From 647a161cfa938c87017e603579fef8c639d59e85 Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Thu, 8 Dec 2022 17:30:39 -0600 Subject: [PATCH 001/231] added doc. warning for H5Oflush in parallel --- src/H5Opublic.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/H5Opublic.h b/src/H5Opublic.h index 5f40d6ce303..65e140f59f9 100644 --- a/src/H5Opublic.h +++ b/src/H5Opublic.h @@ -1376,6 +1376,9 @@ H5_DLL herr_t H5Oclose_async(hid_t object_id, hid_t es_id); * The object associated with \p object_id can be any named object in an * HDF5 file including a dataset, a group, or a committed datatype. * + * \warning H5Oflush doesn't work correctly with parallel. It causes an assertion + * failure in metadata cache during H5Fclose(). + * * \note HDF5 does not possess full control over buffering. H5Oflush() * flushes the internal HDF5 buffers and then asks the operating * system (the OS) to flush the system buffers for the open From 898b37bfcdcf75593c6a2e16236f5fa72a1e3864 Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Fri, 9 Dec 2022 14:38:42 -0600 Subject: [PATCH 002/231] fixed unclosed objects in test --- fortran/test/tH5O.F90 | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/fortran/test/tH5O.F90 b/fortran/test/tH5O.F90 index 3b29e54626b..5fdb066fa32 100644 --- a/fortran/test/tH5O.F90 +++ b/fortran/test/tH5O.F90 @@ -60,7 +60,7 @@ SUBROUTINE test_h5o_link(total_error) INTEGER, INTENT(INOUT) :: total_error INTEGER(HID_T) :: file_id - INTEGER(HID_T) :: group_id + INTEGER(HID_T) :: group_id, group_id1, group_id2, group_id3 INTEGER(HID_T) :: space_id INTEGER(HID_T) :: dset_id INTEGER(HID_T) :: type_id @@ -256,11 +256,11 @@ SUBROUTINE test_h5o_link(total_error) ! ! Create intermediate groups ! - CALL h5gcreate_f(file_id,"/G1",group_id,error) + CALL h5gcreate_f(file_id,"/G1",group_id1,error) CALL check("h5gcreate_f", error, total_error) - CALL h5gcreate_f(file_id,"/G1/G2",group_id,error) + CALL h5gcreate_f(file_id,"/G1/G2",group_id2,error) CALL check("h5gcreate_f", error, total_error) - CALL h5gcreate_f(file_id,"/G1/G2/G3",group_id,error) + CALL h5gcreate_f(file_id,"/G1/G2/G3",group_id3,error) CALL check("h5gcreate_f", error, total_error) ! Try putting a comment on the group /G1/G2/G3 by name @@ -299,7 +299,7 @@ SUBROUTINE test_h5o_link(total_error) ! ! Create the dataset ! - CALL h5dcreate_f(group_id, dataset, H5T_STD_I32LE, space_id, dset_id, error) + CALL h5dcreate_f(group_id3, dataset, H5T_STD_I32LE, space_id, dset_id, error) CALL check("h5dcreate_f", error, total_error) ! Putting a comment on the dataset @@ -477,7 +477,11 @@ SUBROUTINE test_h5o_link(total_error) CALL check(" h5dclose_f", error, total_error) CALL h5sclose_f(space_id, error) CALL check("h5sclose_f", error, total_error) - CALL h5gclose_f(group_id, error) + CALL h5gclose_f(group_id1, error) + CALL check("h5gclose_f", error, total_error) + CALL h5gclose_f(group_id2, error) + CALL check("h5gclose_f", error, total_error) + CALL h5gclose_f(group_id3, error) CALL check("h5gclose_f", error, total_error) ! Test opening an object by index, note From efb1065c999ecdda8023f2e5169ea1412ebe35b2 Mon Sep 17 00:00:00 2001 From: vchoi-hdfgroup <55293060+vchoi-hdfgroup@users.noreply.github.com> Date: Tue, 13 Dec 2022 11:40:59 -0600 Subject: [PATCH 003/231] =?UTF-8?q?Fix=20for=20HDFFV-11052:=20h5debug=20fa?= =?UTF-8?q?ils=20on=20a=20corrupted=20file=20(h5=5Fnrefs=5FPOC)=E2=80=A6?= =?UTF-8?q?=20(#2291)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix for HDFFV-11052: h5debug fails on a corrupted file (h5_nrefs_POC) producing a core dump. When h5debug closes the corrupted file, the library calls H5F__dest() which performs all the closing operations for the file "f" (H5F_t *) but just keeping note of errors in "ret_value" all the way till the end of the routine. The user-provided corrupted file has an illegal file size causing failure when reading the image during the closing process. At the end of this routine it sets f->shared to NULL and then frees "f". This is done whether there is error or not in "ret_value". Due to the failure in reading the file earlier, the routine then returns error. The error return from H5F__dest() causes the file object "f" not being removed from the ID node table. When the library finally exits, it will try to close the file objects in the table. This causes assert failure when H5F_ID_EXISTS(f) or H5F_NREFS(f). Fix: a) H5F_dest(): free the f only when there is no error in "ret_value" at the end of the routine. b) H5VL__native_file_close(): if f->shared is NULL, free "f"; otherwise, perform closing on "f" as before. c) h5debug.c main(): track error return from H5Fclose(). * Committing clang-format changes Co-authored-by: vchoi Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> --- src/H5Fint.c | 4 +++- src/H5VLnative_file.c | 52 ++++++++++++++++++++++------------------ tools/src/misc/h5debug.c | 8 +++++-- 3 files changed, 38 insertions(+), 26 deletions(-) diff --git a/src/H5Fint.c b/src/H5Fint.c index 2c1b4b2c3c0..7ad35fc552d 100644 --- a/src/H5Fint.c +++ b/src/H5Fint.c @@ -1615,7 +1615,9 @@ H5F__dest(H5F_t *f, hbool_t flush) if (H5FO_top_dest(f) < 0) HDONE_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "problems closing file") f->shared = NULL; - f = H5FL_FREE(H5F_t, f); + + if (ret_value >= 0) + f = H5FL_FREE(H5F_t, f); FUNC_LEAVE_NOAPI(ret_value) } /* end H5F__dest() */ diff --git a/src/H5VLnative_file.c b/src/H5VLnative_file.c index 907a12dedce..f2f0ea7d19e 100644 --- a/src/H5VLnative_file.c +++ b/src/H5VLnative_file.c @@ -753,29 +753,35 @@ H5VL__native_file_close(void *file, hid_t H5_ATTR_UNUSED dxpl_id, void H5_ATTR_U FUNC_ENTER_PACKAGE /* This routine should only be called when a file ID's ref count drops to zero */ - HDassert(H5F_ID_EXISTS(f)); - - /* Flush file if this is the last reference to this id and we have write - * intent, unless it will be flushed by the "shared" file being closed. - * This is only necessary to replicate previous behaviour, and could be - * disabled by an option/property to improve performance. - */ - if ((H5F_NREFS(f) > 1) && (H5F_INTENT(f) & H5F_ACC_RDWR)) { - /* Get the file ID corresponding to the H5F_t struct */ - if (H5I_find_id(f, H5I_FILE, &file_id) < 0 || H5I_INVALID_HID == file_id) - HGOTO_ERROR(H5E_ID, H5E_CANTGET, FAIL, "invalid ID") - - /* Get the number of references outstanding for this file ID */ - if ((nref = H5I_get_ref(file_id, FALSE)) < 0) - HGOTO_ERROR(H5E_ID, H5E_CANTGET, FAIL, "can't get ID ref count") - if (nref == 1) - if (H5F__flush(f) < 0) - HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush cache") - } /* end if */ - - /* Close the file */ - if (H5F__close(f) < 0) - HGOTO_ERROR(H5E_FILE, H5E_CANTDEC, FAIL, "can't close file") + HDassert(f->shared == NULL || H5F_ID_EXISTS(f)); + + if (f->shared == NULL) + f = H5FL_FREE(H5F_t, f); + + else { + + /* Flush file if this is the last reference to this id and we have write + * intent, unless it will be flushed by the "shared" file being closed. + * This is only necessary to replicate previous behaviour, and could be + * disabled by an option/property to improve performance. + */ + if ((H5F_NREFS(f) > 1) && (H5F_INTENT(f) & H5F_ACC_RDWR)) { + /* Get the file ID corresponding to the H5F_t struct */ + if (H5I_find_id(f, H5I_FILE, &file_id) < 0 || H5I_INVALID_HID == file_id) + HGOTO_ERROR(H5E_ID, H5E_CANTGET, FAIL, "invalid ID") + + /* Get the number of references outstanding for this file ID */ + if ((nref = H5I_get_ref(file_id, FALSE)) < 0) + HGOTO_ERROR(H5E_ID, H5E_CANTGET, FAIL, "can't get ID ref count") + if (nref == 1) + if (H5F__flush(f) < 0) + HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush cache") + } /* end if */ + + /* Close the file */ + if (H5F__close(f) < 0) + HGOTO_ERROR(H5E_FILE, H5E_CANTDEC, FAIL, "can't close file") + } done: FUNC_LEAVE_NOAPI(ret_value) diff --git a/tools/src/misc/h5debug.c b/tools/src/misc/h5debug.c index 05d37b84362..b15ae09a888 100644 --- a/tools/src/misc/h5debug.c +++ b/tools/src/misc/h5debug.c @@ -815,8 +815,12 @@ main(int argc, char *argv[]) done: if (fapl > 0) H5Pclose(fapl); - if (fid > 0) - H5Fclose(fid); + if (fid > 0) { + if (H5Fclose(fid) < 0) { + HDfprintf(stderr, "Error in closing file!\n"); + exit_value = 1; + } + } /* Pop API context */ if (api_ctx_pushed) From 2df00297888c303fb126d12a1224a1142f5ee4e7 Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Tue, 13 Dec 2022 12:56:48 -0600 Subject: [PATCH 004/231] Updated H5ES documenation (#2293) --- src/H5ESpublic.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/H5ESpublic.h b/src/H5ESpublic.h index 3d46e827539..ecfd08f7a27 100644 --- a/src/H5ESpublic.h +++ b/src/H5ESpublic.h @@ -189,7 +189,7 @@ H5_DLL herr_t H5ESwait(hid_t es_id, uint64_t timeout, size_t *num_in_progress, h * \param[out] err_occurred Status indicating if error is present in the event set * \returns \herr_t * - * \details H5ESget_count() attempts to cancel operations in an event set specified + * \details H5EScancel() attempts to cancel operations in an event set specified * by \p es_id. H5ES_NONE is a valid value for \p es_id, but functions as a no-op. * * \since 1.13.0 @@ -217,14 +217,14 @@ H5_DLL herr_t H5ESget_count(hid_t es_id, size_t *count); /** * \ingroup H5ES * - * \brief Retrieves the next operation counter to be assigned in an event set + * \brief Retrieves the accumulative operation counter for an event set * * \es_id - * \param[out] counter The next counter value to be assigned to an event + * \param[out] counter The accumulative counter value for an event set * \returns \herr_t * - * \details H5ESget_op_counter() retrieves the \p counter that will be assigned - * to the next operation inserted into the event set \p es_id. + * \details H5ESget_op_counter() retrieves the current accumulative count of + * event set operations since the event set creation of \p es_id. * * \note This is designed for wrapper libraries mainly, to use as a mechanism * for matching operations inserted into the event set with possible From 2e8d1c32ec2262b8d151655f4f9743126d80eda3 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Wed, 14 Dec 2022 08:58:20 -0600 Subject: [PATCH 005/231] Update windows worker compilers (#2286) * Update windows worker compilers * Update bin and test issues * Update script and revert java test --- .github/workflows/main.yml | 2 +- bin/h5vers | 4 ++-- bin/release | 32 ++++++++++++++++---------------- release_docs/RELEASE.txt | 5 ++++- 4 files changed, 23 insertions(+), 20 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 96e2c83954e..f41b7c8cf57 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -5,7 +5,7 @@ on: workflow_dispatch: push: pull_request: - branches: [ develop, hdf5_1_12, hdf5_1_10, hdf5_1_8 ] + branches: [ develop, hdf5_1_14, hdf5_1_12, hdf5_1_10, hdf5_1_8 ] paths-ignore: - '.github/CODEOWNERS' - '.github/FUNDING.yml' diff --git a/bin/h5vers b/bin/h5vers index a78c24d71a5..6716794098f 100755 --- a/bin/h5vers +++ b/bin/h5vers @@ -376,7 +376,7 @@ if ($H5_JAVA) { my $version_string2 = sprintf("%d, %d, %d", @newver[0,1,2]); $data =~ s/\@version HDF5 .*
/\@version HDF5 $version_string1
/; - $data =~ s/ public final static int LIB_VERSION\[\] = \{ \d*, \d*, \d* \};/ public final static int LIB_VERSION[] = \{ $version_string2 \};/; + $data =~ s/ public final static int LIB_VERSION\[\] = \{\d*,.\d*,.\d*\};/ public final static int LIB_VERSION[] = \{$version_string2\};/; write_file($H5_JAVA, $data); } @@ -393,7 +393,7 @@ if ($TESTH5_JAVA) { my $version_string1 = sprintf("%d, %d, %d", @newver[0,1,2]); my $version_string2 = sprintf("int majnum = %d, minnum = %d, relnum = %d", @newver[0,1,2]); - $data =~ s/ int libversion\[\] = \{ .* \};/ int libversion\[\] = \{ $version_string1 \};/; + $data =~ s/ int libversion\[\] = \{.*\};/ int libversion\[\] = \{$version_string1\};/; $data =~ s/ int majnum = \d*, minnum = \d*, relnum = \d*;/ $version_string2;/; write_file($TESTH5_JAVA, $data); diff --git a/bin/release b/bin/release index 4c471b03b83..6beab5704a2 100755 --- a/bin/release +++ b/bin/release @@ -52,23 +52,23 @@ for compressing the resulting tar archive (if none are given then information is available in the README_HPC file. doc -- produce the latest doc tree in addition to the archive. -An md5 checksum is produced for each archive created and stored in the md5 file. +An sha256 checksum is produced for each archive created and stored in the sha256 file. Examples: $ bin/release -d /tmp /tmp/hdf5-1.8.13-RELEASE.txt - /tmp/hdf5-1.8.13.md5 + /tmp/hdf5-1.8.13.sha256 /tmp/hdf5-1.8.13.tar $ bin/release -d /tmp gzip /tmp/hdf5-1.8.13-RELEASE.txt - /tmp/hdf5-1.8.13.md5 + /tmp/hdf5-1.8.13.sha256 /tmp/hdf5-1.8.13.tar.gz $ bin/release -d /tmp tar gzip zip /tmp/hdf5-1.8.13-RELEASE.txt - /tmp/hdf5-1.8.13.md5 + /tmp/hdf5-1.8.13.sha256 /tmp/hdf5-1.8.13.tar /tmp/hdf5-1.8.13.tar.gz /tmp/hdf5-1.8.13.tar.zip @@ -205,7 +205,7 @@ tar2cmakezip() # step 3: add LIBAEC.tar.gz, ZLib.tar.gz and cmake files cp /mnt/scr1/pre-release/hdf5/CMake/LIBAEC.tar.gz $cmziptmpsubdir cp /mnt/scr1/pre-release/hdf5/CMake/ZLib.tar.gz $cmziptmpsubdir - cp /mnt/scr1/pre-release/hdf5/CMake/HDF5Examples-2.0.1-Source.zip $cmziptmpsubdir + cp /mnt/scr1/pre-release/hdf5/CMake/HDF5Examples-2.0.2-Source.zip $cmziptmpsubdir cp /mnt/scr1/pre-release/hdf5/CMake/hdf5_plugins-master.zip $cmziptmpsubdir cp $cmziptmpsubdir/$version/config/cmake/scripts/CTestScript.cmake $cmziptmpsubdir cp $cmziptmpsubdir/$version/config/cmake/scripts/HDF5config.cmake $cmziptmpsubdir @@ -297,7 +297,7 @@ tar2cmaketgz() # step 3: add LIBAEC.tar.gz, ZLib.tar.gz and cmake files cp /mnt/scr1/pre-release/hdf5/CMake/LIBAEC.tar.gz $cmgztmpsubdir cp /mnt/scr1/pre-release/hdf5/CMake/ZLib.tar.gz $cmgztmpsubdir - cp /mnt/scr1/pre-release/hdf5/CMake/HDF5Examples-2.0.1-Source.tar.gz $cmgztmpsubdir + cp /mnt/scr1/pre-release/hdf5/CMake/HDF5Examples-2.0.2-Source.tar.gz $cmgztmpsubdir cp /mnt/scr1/pre-release/hdf5/CMake/hdf5_plugins-master.tar.gz $cmgztmpsubdir cp $cmgztmpsubdir/$version/config/cmake/scripts/CTestScript.cmake $cmgztmpsubdir cp $cmgztmpsubdir/$version/config/cmake/scripts/HDF5config.cmake $cmgztmpsubdir @@ -376,7 +376,7 @@ tar2hpccmaketgz() # step 3: add LIBAEC.tar.gz, ZLib.tar.gz and cmake files cp /mnt/scr1/pre-release/hdf5/CMake/LIBAEC.tar.gz $cmgztmpsubdir cp /mnt/scr1/pre-release/hdf5/CMake/ZLib.tar.gz $cmgztmpsubdir - cp /mnt/scr1/pre-release/hdf5/CMake/HDF5Examples-2.0.1-Source.tar.gz $cmgztmpsubdir + cp /mnt/scr1/pre-release/hdf5/CMake/HDF5Examples-2.0.2-Source.tar.gz $cmgztmpsubdir cp /mnt/scr1/pre-release/hdf5/CMake/hdf5_plugins-master.tar.gz $cmgztmpsubdir cp $cmgztmpsubdir/$version/config/cmake/scripts/CTestScript.cmake $cmgztmpsubdir cp $cmgztmpsubdir/$version/config/cmake/scripts/HDF5config.cmake $cmgztmpsubdir @@ -531,43 +531,43 @@ test "$verbose" && echo " Running tar..." 1>&2 (cd "$tmpdir" && exec tar -ch --exclude-vcs -f "$HDF5_VERS.tar" "./$HDF5_VERS" || exit 1 ) # Compress -MD5file=$HDF5_VERS.md5 -cp /dev/null $DEST/$MD5file +SHA256=$HDF5_VERS.sha256 +cp /dev/null $DEST/$SHA256 for comp in $methods; do case $comp in tar) cp -p $tmpdir/$HDF5_VERS.tar $DEST/$HDF5_VERS.tar - (cd $DEST; md5sum $HDF5_VERS.tar >> $MD5file) + (cd $DEST; sha256 $HDF5_VERS.tar >> $SHA256) ;; gzip) test "$verbose" && echo " Running gzip..." 1>&2 gzip -9 <$tmpdir/$HDF5_VERS.tar >$DEST/$HDF5_VERS.tar.gz - (cd $DEST; md5sum $HDF5_VERS.tar.gz >> $MD5file) + (cd $DEST; sha256 $HDF5_VERS.tar.gz >> $SHA256) ;; cmake-tgz) test "$verbose" && echo " Creating CMake tar.gz file..." 1>&2 tar2cmaketgz $HDF5_VERS $tmpdir/$HDF5_VERS.tar $DEST/CMake-$HDF5_VERS.tar.gz 1>&2 - (cd $DEST; md5sum CMake-$HDF5_VERS.tar.gz >> $MD5file) + (cd $DEST; sha256 CMake-$HDF5_VERS.tar.gz >> $SHA256) ;; hpc-cmake-tgz) test "$verbose" && echo " Creating HPC-CMake tar.gz file..." 1>&2 tar2hpccmaketgz $HDF5_VERS $tmpdir/$HDF5_VERS.tar $DEST/HPC-CMake-$HDF5_VERS.tar.gz 1>&2 - (cd $DEST; md5sum HPC-CMake-$HDF5_VERS.tar.gz >> $MD5file) + (cd $DEST; sha256 HPC-CMake-$HDF5_VERS.tar.gz >> $SHA256) ;; bzip2) test "$verbose" && echo " Running bzip2..." 1>&2 bzip2 -9 <$tmpdir/$HDF5_VERS.tar >$DEST/$HDF5_VERS.tar.bz2 - (cd $DEST; md5sum $HDF5_VERS.tar.bz2 >> $MD5file) + (cd $DEST; sha256 $HDF5_VERS.tar.bz2 >> $SHA256) ;; zip) test "$verbose" && echo " Creating zip ball..." 1>&2 tar2zip $HDF5_VERS $tmpdir/$HDF5_VERS.tar $DEST/$HDF5_VERS.zip 1>&2 - (cd $DEST; md5sum $HDF5_VERS.zip >> $MD5file) + (cd $DEST; sha256 $HDF5_VERS.zip >> $SHA256) ;; cmake-zip) test "$verbose" && echo " Creating CMake-zip ball..." 1>&2 tar2cmakezip $HDF5_VERS $tmpdir/$HDF5_VERS.tar $DEST/CMake-$HDF5_VERS.zip 1>&2 - (cd $DEST; md5sum CMake-$HDF5_VERS.zip >> $MD5file) + (cd $DEST; sha256 CMake-$HDF5_VERS.zip >> $SHA256) ;; doc) if [ "${DOCVERSION}" = "" ]; then diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index b12068c0b6e..682addd6de8 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -467,7 +467,10 @@ Platforms Tested Visual Studio 2017 w/ Intel C/C++/Fortran 19 (cmake) Visual Studio 2019 w/ clang 12.0.0 with MSVC-like command-line (C/C++ only - cmake) - Visual Studio 2019 w/ Intel C/C++/Fortran oneAPI 2021 (cmake) + Visual Studio 2019 w/ Intel C/C++/Fortran oneAPI 2022 (cmake) + Visual Studio 2022 w/ clang 15.0.1 + with MSVC-like command-line (C/C++ only - cmake) + Visual Studio 2022 w/ Intel C/C++/Fortran oneAPI 2022 (cmake) Visual Studio 2019 w/ MSMPI 10.1 (C only - cmake) From e9d7173ea1b9779f89a496c6b40baf2822ef5b6f Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Wed, 14 Dec 2022 15:22:37 -0600 Subject: [PATCH 006/231] added doc. warning for H5Literate_async return value (#2295) --- src/H5Lpublic.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/H5Lpublic.h b/src/H5Lpublic.h index 6f6c6382123..60e83f2f200 100644 --- a/src/H5Lpublic.h +++ b/src/H5Lpublic.h @@ -923,6 +923,11 @@ H5_DLL herr_t H5Literate2(hid_t grp_id, H5_index_t idx_type, H5_iter_order_t ord /** * -------------------------------------------------------------------------- * \ingroup ASYNC + * + * \warning The returned value of the callback routine op will not be set + * in the return value for H5Literate_async(), so the \p herr_t value + * should not be used for determining the return state of the callback routine. + * * \async_variant_of{H5Literate} */ #ifndef H5_DOXYGEN From fea59741b9712442346226a63351538fe4970379 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Wed, 14 Dec 2022 14:54:51 -0800 Subject: [PATCH 007/231] Adds 'unused' hints for MDS io_info parameters (#2301) * Adds 'unused' hints for MDS io_info parameters * Committing clang-format changes * Fix issue with formatter * Committing clang-format changes * Final fix for formatter * Committing clang-format changes Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> --- src/H5Dchunk.c | 19 +++++++++---------- src/H5Defl.c | 12 ++++++------ src/H5Dvirtual.c | 4 ++-- 3 files changed, 17 insertions(+), 18 deletions(-) diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index e90fb327a6a..40f8359f2cf 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -2456,8 +2456,8 @@ H5D__chunk_mdio_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo) *------------------------------------------------------------------------- */ htri_t -H5D__chunk_cacheable(const H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info, haddr_t caddr, - hbool_t write_op) +H5D__chunk_cacheable(const H5D_io_info_t H5_ATTR_PARALLEL_USED *io_info, H5D_dset_io_info_t *dset_info, + haddr_t caddr, hbool_t write_op) { const H5D_t *dataset = NULL; /* Local pointer to dataset info */ hbool_t has_filters = FALSE; /* Whether there are filters on the chunk or not */ @@ -2466,7 +2466,6 @@ H5D__chunk_cacheable(const H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info FUNC_ENTER_PACKAGE /* Sanity check */ - HDassert(io_info); HDassert(dset_info); dataset = dset_info->dset; HDassert(dataset); @@ -4266,8 +4265,8 @@ H5D__chunk_cache_prune(const H5D_t *dset, size_t size) *------------------------------------------------------------------------- */ static void * -H5D__chunk_lock(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset_info, H5D_chunk_ud_t *udata, - hbool_t relax, hbool_t prev_unfilt_chunk) +H5D__chunk_lock(const H5D_io_info_t H5_ATTR_NDEBUG_UNUSED *io_info, const H5D_dset_io_info_t *dset_info, + H5D_chunk_ud_t *udata, hbool_t relax, hbool_t prev_unfilt_chunk) { const H5D_t *dset; /* Convenience pointer to the dataset */ H5O_pline_t *pline; /* I/O pipeline info - always equal to the pline passed to H5D__chunk_mem_alloc */ @@ -4674,7 +4673,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset_inf *------------------------------------------------------------------------- */ static herr_t -H5D__chunk_unlock(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset_info, +H5D__chunk_unlock(const H5D_io_info_t H5_ATTR_NDEBUG_UNUSED *io_info, const H5D_dset_io_info_t *dset_info, const H5D_chunk_ud_t *udata, hbool_t dirty, void *chunk, uint32_t naccessed) { const H5O_layout_t *layout; /* Dataset layout */ @@ -7453,10 +7452,10 @@ H5D__nonexistent_readvv_cb(hsize_t H5_ATTR_UNUSED dst_off, hsize_t src_off, size *------------------------------------------------------------------------- */ static ssize_t -H5D__nonexistent_readvv(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset_info, - size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[], - hsize_t chunk_off_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, - size_t mem_len_arr[], hsize_t mem_off_arr[]) +H5D__nonexistent_readvv(const H5D_io_info_t H5_ATTR_NDEBUG_UNUSED *io_info, + const H5D_dset_io_info_t *dset_info, size_t chunk_max_nseq, size_t *chunk_curr_seq, + size_t chunk_len_arr[], hsize_t chunk_off_arr[], size_t mem_max_nseq, + size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_off_arr[]) { H5D_chunk_readvv_ud_t udata; /* User data for H5VM_opvv() operator */ ssize_t ret_value = -1; /* Return value */ diff --git a/src/H5Defl.c b/src/H5Defl.c index 7966c5e3659..faa5e4def18 100644 --- a/src/H5Defl.c +++ b/src/H5Defl.c @@ -441,9 +441,9 @@ H5D__efl_readvv_cb(hsize_t dst_off, hsize_t src_off, size_t len, void *_udata) *------------------------------------------------------------------------- */ static ssize_t -H5D__efl_readvv(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset_info, size_t dset_max_nseq, - size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_off_arr[], size_t mem_max_nseq, - size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_off_arr[]) +H5D__efl_readvv(const H5D_io_info_t H5_ATTR_NDEBUG_UNUSED *io_info, const H5D_dset_io_info_t *dset_info, + size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_off_arr[], + size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_off_arr[]) { H5D_efl_readvv_ud_t udata; /* User data for H5VM_opvv() operator */ ssize_t ret_value = -1; /* Return value (Total size of sequence in bytes) */ @@ -522,9 +522,9 @@ H5D__efl_writevv_cb(hsize_t dst_off, hsize_t src_off, size_t len, void *_udata) *------------------------------------------------------------------------- */ static ssize_t -H5D__efl_writevv(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset_info, size_t dset_max_nseq, - size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_off_arr[], size_t mem_max_nseq, - size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_off_arr[]) +H5D__efl_writevv(const H5D_io_info_t H5_ATTR_NDEBUG_UNUSED *io_info, const H5D_dset_io_info_t *dset_info, + size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_off_arr[], + size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_off_arr[]) { H5D_efl_writevv_ud_t udata; /* User data for H5VM_opvv() operator */ ssize_t ret_value = -1; /* Return value (Total size of sequence in bytes) */ diff --git a/src/H5Dvirtual.c b/src/H5Dvirtual.c index ee16a178d1b..c40642fad43 100644 --- a/src/H5Dvirtual.c +++ b/src/H5Dvirtual.c @@ -2785,7 +2785,7 @@ H5D__virtual_read_one(H5D_dset_io_info_t *dset_info, H5O_storage_virtual_srcdset *------------------------------------------------------------------------- */ static herr_t -H5D__virtual_read(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info) +H5D__virtual_read(H5D_io_info_t H5_ATTR_NDEBUG_UNUSED *io_info, H5D_dset_io_info_t *dset_info) { H5O_storage_virtual_t *storage; /* Convenient pointer into layout struct */ hsize_t tot_nelmts; /* Total number of elements mapped to mem_space */ @@ -2988,7 +2988,7 @@ H5D__virtual_write_one(H5D_dset_io_info_t *dset_info, H5O_storage_virtual_srcdse *------------------------------------------------------------------------- */ static herr_t -H5D__virtual_write(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info) +H5D__virtual_write(H5D_io_info_t H5_ATTR_NDEBUG_UNUSED *io_info, H5D_dset_io_info_t *dset_info) { H5O_storage_virtual_t *storage; /* Convenient pointer into layout struct */ hsize_t tot_nelmts; /* Total number of elements mapped to mem_space */ From f04316ee06a29207db7ea18b21b0d8f8276ec9cb Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Wed, 14 Dec 2022 16:05:15 -0800 Subject: [PATCH 008/231] Fixes broken CI matrix actions involving -Werror and API versions (#2302) --- .github/workflows/main.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index f41b7c8cf57..08807c3d999 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -177,7 +177,7 @@ jobs: - enabled: false text: "" build_mode: - - text: "DBG" + - text: " DBG" cmake: "Debug" autotools: "debug" @@ -199,7 +199,7 @@ jobs: - enabled: false text: "" build_mode: - - text: "DBG" + - text: " DBG" cmake: "Debug" autotools: "debug" @@ -221,7 +221,7 @@ jobs: - enabled: false text: "" build_mode: - - text: "DBG" + - text: " DBG" cmake: "Debug" autotools: "debug" @@ -243,7 +243,7 @@ jobs: - enabled: false text: "" build_mode: - - text: "DBG" + - text: " DBG" cmake: "Debug" autotools: "debug" @@ -265,7 +265,7 @@ jobs: - enabled: false text: "" build_mode: - - text: "DBG" + - text: " DBG" cmake: "Debug" autotools: "debug" @@ -287,7 +287,7 @@ jobs: - enabled: false text: "" build_mode: - - text: "DBG" + - text: " DBG" cmake: "Debug" autotools: "debug" @@ -309,7 +309,7 @@ jobs: - enabled: false text: "" build_mode: - - text: "REL" + - text: " REL" cmake: "Release" autotools: "production" From 4591d559380fd839c4709f13647f0de8dfa0c360 Mon Sep 17 00:00:00 2001 From: vchoi-hdfgroup <55293060+vchoi-hdfgroup@users.noreply.github.com> Date: Thu, 15 Dec 2022 07:32:23 -0600 Subject: [PATCH 009/231] Hdffv 11052 (#2303) * Fix for HDFFV-11052: h5debug fails on a corrupted file (h5_nrefs_POC) producing a core dump. When h5debug closes the corrupted file, the library calls H5F__dest() which performs all the closing operations for the file "f" (H5F_t *) but just keeping note of errors in "ret_value" all the way till the end of the routine. The user-provided corrupted file has an illegal file size causing failure when reading the image during the closing process. At the end of this routine it sets f->shared to NULL and then frees "f". This is done whether there is error or not in "ret_value". Due to the failure in reading the file earlier, the routine then returns error. The error return from H5F__dest() causes the file object "f" not being removed from the ID node table. When the library finally exits, it will try to close the file objects in the table. This causes assert failure when H5F_ID_EXISTS(f) or H5F_NREFS(f). Fix: a) H5F_dest(): free the f only when there is no error in "ret_value" at the end of the routine. b) H5VL__native_file_close(): if f->shared is NULL, free "f"; otherwise, perform closing on "f" as before. c) h5debug.c main(): track error return from H5Fclose(). * Committing clang-format changes * Add test and release note info for fix to HDFFV-11052 which is merged via PR#2291. * Committing clang-format changes * Add the test file to Cmake. Co-authored-by: vchoi Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> --- release_docs/RELEASE.txt | 19 +++++++++++++++++++ test/CMakeTests.cmake | 1 + test/cve_2020_10812.h5 | Bin 0 -> 2565 bytes test/tmisc.c | 39 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 59 insertions(+) create mode 100755 test/cve_2020_10812.h5 diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 682addd6de8..a1117a0c9df 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -171,6 +171,25 @@ Bug Fixes since HDF5-1.13.3 release =================================== Library ------- + - Seg fault on file close + + h5debug fails at file close with core dump on a file that has an + illegal file size in its cache image. In H5F_dest(), the library + performs all the closing operations for the file and keeps track of + the error encountered when reading the file cache image. + At the end of the routine, it frees the file's file structure and + returns error. Due to the error return, the file object is not removed + from the ID node table. This eventually causes assertion failure in + H5VL__native_file_close() when the library finally exits and tries to + access that file object in the table for closing. + + The closing routine, H5F_dest(), will not free the file structure if + there is error, keeping a valid file structure in the ID node table. + It will be freed later in H5VL__native_file_close() when the + library exits and terminates the file package. + + (VC - 2022/12/14, HDFFV-11052, CVE-2020-10812) + - Fix CVE-2018-13867 / GHSA-j8jr-chrh-qfrf Validate location (offset) of the accumulated metadata when comparing. diff --git a/test/CMakeTests.cmake b/test/CMakeTests.cmake index 74f63f43aed..508619430cc 100644 --- a/test/CMakeTests.cmake +++ b/test/CMakeTests.cmake @@ -126,6 +126,7 @@ set (HDF5_REFERENCE_TEST_FILES btree_idx_1_6.h5 btree_idx_1_8.h5 corrupt_stab_msg.h5 + cve_2020_10812.h5 deflate.h5 family_v16-000000.h5 family_v16-000001.h5 diff --git a/test/cve_2020_10812.h5 b/test/cve_2020_10812.h5 new file mode 100755 index 0000000000000000000000000000000000000000..a20369da0db50a0d12400e9f886f2a431fbdfe6e GIT binary patch literal 2565 zcmeD5aB<`1lHy|G;9!6O11N))3&J=7<-f7G)6K{Lf(#5DP%#OH1_l!_n-L@o1_BU@ zi(vvMgaxDj(+B`_7|ufp*`h{i7i;8UmvsK Date: Thu, 15 Dec 2022 08:49:19 -0600 Subject: [PATCH 010/231] Update hdf5_header.html Use less awkward language. --- doxygen/hdf5_header.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doxygen/hdf5_header.html b/doxygen/hdf5_header.html index 23f41f9b501..36a32653ab9 100644 --- a/doxygen/hdf5_header.html +++ b/doxygen/hdf5_header.html @@ -21,7 +21,7 @@ -
Please, help us to better know about our user community by answering the following short survey: https://www.hdfgroup.org/website-survey/
+
Please, help us to better serve our user community by answering the following short survey: https://www.hdfgroup.org/website-survey/
From 93b9540c4cf3609e6458d17b49147b1fcc09d84f Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Thu, 15 Dec 2022 23:03:02 -0800 Subject: [PATCH 011/231] Converts the YAML build mode arrays to objects in special runs (#2308) * Converts the YAML build mode arrays to objects in special runs * Adds a dump of the matrix context for each test This would have made it a LOT easier to debug the build_mode issues... * Disable the mirror VFD in the -Werror checks We can re-enable this after we fix the warnings, but I don't want to conflate code and GitHub changes, so this is a better way to get the CI to pass for now. --- .github/workflows/main.yml | 78 ++++++++++++++++++++------------------ 1 file changed, 41 insertions(+), 37 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 08807c3d999..556fb132888 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -174,12 +174,12 @@ jobs: flags: "" run_tests: false thread_safety: - - enabled: false - text: "" + enabled: false + text: "" build_mode: - - text: " DBG" - cmake: "Debug" - autotools: "debug" + text: " DBG" + cmake: "Debug" + autotools: "debug" - name: "Ubuntu gcc Autotools v1.8 default API (build only)" os: ubuntu-latest @@ -196,12 +196,12 @@ jobs: flags: "" run_tests: false thread_safety: - - enabled: false - text: "" + enabled: false + text: "" build_mode: - - text: " DBG" - cmake: "Debug" - autotools: "debug" + text: " DBG" + cmake: "Debug" + autotools: "debug" - name: "Ubuntu gcc Autotools v1.10 default API (build only)" os: ubuntu-latest @@ -218,12 +218,12 @@ jobs: flags: "" run_tests: false thread_safety: - - enabled: false - text: "" + enabled: false + text: "" build_mode: - - text: " DBG" - cmake: "Debug" - autotools: "debug" + text: " DBG" + cmake: "Debug" + autotools: "debug" - name: "Ubuntu gcc Autotools v1.12 default API (build only)" os: ubuntu-latest @@ -240,12 +240,12 @@ jobs: flags: "" run_tests: false thread_safety: - - enabled: false - text: "" + enabled: false + text: "" build_mode: - - text: " DBG" - cmake: "Debug" - autotools: "debug" + text: " DBG" + cmake: "Debug" + autotools: "debug" - name: "Ubuntu gcc Autotools no deprecated symbols (build only)" os: ubuntu-latest @@ -262,12 +262,12 @@ jobs: flags: "" run_tests: false thread_safety: - - enabled: false - text: "" + enabled: false + text: "" build_mode: - - text: " DBG" - cmake: "Debug" - autotools: "debug" + text: " DBG" + cmake: "Debug" + autotools: "debug" - name: "Ubuntu gcc Autotools -Werror (build only) DBG" os: ubuntu-latest @@ -275,7 +275,7 @@ jobs: fortran: disable java: disable parallel: disable - mirror_vfd: enable + mirror_vfd: disable direct_vfd: enable deprec_sym: enable default_api: v114 @@ -284,12 +284,12 @@ jobs: flags: "CFLAGS=-Werror" run_tests: false thread_safety: - - enabled: false - text: "" + enabled: false + text: "" build_mode: - - text: " DBG" - cmake: "Debug" - autotools: "debug" + text: " DBG" + cmake: "Debug" + autotools: "debug" - name: "Ubuntu gcc Autotools -Werror (build only) REL" os: ubuntu-latest @@ -297,7 +297,7 @@ jobs: fortran: disable java: disable parallel: disable - mirror_vfd: enable + mirror_vfd: disable direct_vfd: enable deprec_sym: enable default_api: v114 @@ -306,12 +306,12 @@ jobs: flags: "CFLAGS=-Werror" run_tests: false thread_safety: - - enabled: false - text: "" + enabled: false + text: "" build_mode: - - text: " REL" - cmake: "Release" - autotools: "production" + text: " REL" + cmake: "Release" + autotools: "production" # Sets the job's name from the properties name: "${{ matrix.name }}${{ matrix.build_mode.text }}${{ matrix.thread_safety.text }}" @@ -329,6 +329,10 @@ jobs: # SETUP # + #Useful for debugging + - name: Dump matrix context + run: echo '${{ toJSON(matrix) }}' + - name: Install CMake Dependencies (Linux) run: sudo apt-get install ninja-build if: matrix.os == 'ubuntu-latest' From c9d6c438d60eeaa7aeae4cf0f058486e0c3a9424 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Fri, 16 Dec 2022 12:05:01 -0800 Subject: [PATCH 012/231] Removes duplicated build mode from -Werror GitHub actions (#2314) --- .github/workflows/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 556fb132888..cd5e2214d04 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -269,7 +269,7 @@ jobs: cmake: "Debug" autotools: "debug" - - name: "Ubuntu gcc Autotools -Werror (build only) DBG" + - name: "Ubuntu gcc Autotools -Werror (build only)" os: ubuntu-latest cpp: enable fortran: disable @@ -291,7 +291,7 @@ jobs: cmake: "Debug" autotools: "debug" - - name: "Ubuntu gcc Autotools -Werror (build only) REL" + - name: "Ubuntu gcc Autotools -Werror (build only)" os: ubuntu-latest cpp: enable fortran: disable From bb9575145eaf6cebb874c9a644889ee5d059abff Mon Sep 17 00:00:00 2001 From: vchoi-hdfgroup <55293060+vchoi-hdfgroup@users.noreply.github.com> Date: Fri, 16 Dec 2022 15:47:29 -0600 Subject: [PATCH 013/231] Hdffv 11052 (#2315) * Fix for HDFFV-11052: h5debug fails on a corrupted file (h5_nrefs_POC) producing a core dump. When h5debug closes the corrupted file, the library calls H5F__dest() which performs all the closing operations for the file "f" (H5F_t *) but just keeping note of errors in "ret_value" all the way till the end of the routine. The user-provided corrupted file has an illegal file size causing failure when reading the image during the closing process. At the end of this routine it sets f->shared to NULL and then frees "f". This is done whether there is error or not in "ret_value". Due to the failure in reading the file earlier, the routine then returns error. The error return from H5F__dest() causes the file object "f" not being removed from the ID node table. When the library finally exits, it will try to close the file objects in the table. This causes assert failure when H5F_ID_EXISTS(f) or H5F_NREFS(f). Fix: a) H5F_dest(): free the f only when there is no error in "ret_value" at the end of the routine. b) H5VL__native_file_close(): if f->shared is NULL, free "f"; otherwise, perform closing on "f" as before. c) h5debug.c main(): track error return from H5Fclose(). * Committing clang-format changes * Add test and release note info for fix to HDFFV-11052 which is merged via PR#2291. * Committing clang-format changes * Add the test file to Cmake. * Skip test_misc37() for drivers that is not default compatible as it is using a pre-generated file. * Committing clang-format changes Co-authored-by: vchoi Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> --- test/tmisc.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/test/tmisc.c b/test/tmisc.c index 8a70a2921e8..5fb44d46fa5 100644 --- a/test/tmisc.c +++ b/test/tmisc.c @@ -6060,15 +6060,23 @@ test_misc36(void) static void test_misc37(void) { - const char *fname; + const char *testfile = H5_get_srcdir_filename(CVE_2020_10812_FILENAME); + hbool_t driver_is_default_compatible; hid_t fid; herr_t ret; /* Output message about test being performed */ MESSAGE(5, ("Fix for HDFFV-11052/CVE-2020-10812")); - fname = H5_get_srcdir_filename(CVE_2020_10812_FILENAME); - fid = H5Fopen(fname, H5F_ACC_RDONLY, H5P_DEFAULT); + ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); + CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); + + if (!driver_is_default_compatible) { + HDprintf("-- SKIPPED --\n"); + return; + } + + fid = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT); CHECK(fid, FAIL, "H5Fopen"); /* This should fail due to the illegal file size. From 77d2bd21859c48db764e4acfabc0652aa507a41b Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Fri, 16 Dec 2022 21:51:52 -0600 Subject: [PATCH 014/231] moved onion VFD to FAPL group (#2321) --- src/H5FDonion.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/H5FDonion.h b/src/H5FDonion.h index 63c2d778a35..0e605d0fd80 100644 --- a/src/H5FDonion.h +++ b/src/H5FDonion.h @@ -142,11 +142,11 @@ H5_DLL hid_t H5FD_onion_init(void); /** * -------------------------------------------------------------------------- - * \ingroup H5P + * \ingroup FAPL * * \brief get the onion info from the file access property list * - * \param[in] fapl_id The ID of the file access property list + * \fapl_id * \param[out] fa_out The pointer to the structure H5FD_onion_fapl_info_t * * \return \herr_t @@ -159,11 +159,11 @@ H5_DLL herr_t H5Pget_fapl_onion(hid_t fapl_id, H5FD_onion_fapl_info_t *fa_out); /** * -------------------------------------------------------------------------- - * \ingroup H5P + * \ingroup FAPL * * \brief set the onion info for the file access property list * - * \param[in] fapl_id The ID of the file access property list + * \fapl_id * \param[in] fa The pointer to the structure H5FD_onion_fapl_info_t * * \return \herr_t From 73d4219bdd1dd9ca0f7ef4eac4771308a62978be Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Fri, 16 Dec 2022 21:52:08 -0600 Subject: [PATCH 015/231] Only document Fortran functions (#2319) * Only document Fortran functions * Only document Fortran functions * Only document Fortran functions * Only document Fortran functions --- doxygen/Doxyfile.in | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/doxygen/Doxyfile.in b/doxygen/Doxyfile.in index f45f7461592..28c73272dda 100644 --- a/doxygen/Doxyfile.in +++ b/doxygen/Doxyfile.in @@ -897,7 +897,15 @@ EXCLUDE_SYMLINKS = NO # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories for example use the pattern */test/* -EXCLUDE_PATTERNS = +EXCLUDE_PATTERNS = */fortran/test/* +EXCLUDE_PATTERNS += */fortran/testpar/* +EXCLUDE_PATTERNS += */fortran/examples/* +EXCLUDE_PATTERNS += */fortran/src/*.c +EXCLUDE_PATTERNS += */fortran/src/*.h +EXCLUDE_PATTERNS += */hl/fortran/examples/* +EXCLUDE_PATTERNS += */hl/fortran/test/* +EXCLUDE_PATTERNS += */hl/fortran/src/*.c +EXCLUDE_PATTERNS += */hl/fortran/src/*.h # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the From cdb420664e3d85aea0f3a325c633633bcf7f66c6 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Fri, 16 Dec 2022 21:56:07 -0600 Subject: [PATCH 016/231] Disable hl tools by default (#2313) * Disable hl tools by default * identify the tools * Only GIF tools are depecated * Add new option * Update autotools --- configure.ac | 17 +++++++++++++++++ hl/tools/CMakeLists.txt | 10 +++++++++- hl/tools/Makefile.am | 8 +++++++- release_docs/INSTALL_CMake.txt | 1 + release_docs/RELEASE.txt | 18 ++++++++++++++---- 5 files changed, 48 insertions(+), 6 deletions(-) diff --git a/configure.ac b/configure.ac index a7d11293b36..5f62140e95e 100644 --- a/configure.ac +++ b/configure.ac @@ -833,6 +833,7 @@ AC_LANG_POP(C++) ## library is disabled. AC_SUBST([HDF5_HL]) AC_SUBST([HDF5_HL_TOOLS]) +AC_SUBST([HDF5_HL_GIF_TOOLS]) ## The high-level library and high-level tools are enabled unless the build mode ## is clean. @@ -888,6 +889,21 @@ else AC_MSG_RESULT([no]) fi +AC_MSG_CHECKING([if the high-level GIF tools are enabled]) +AC_ARG_ENABLE([hltools], + [AS_HELP_STRING([--enable-hlgiftools], + [Enable the high-level GIF tools. + [default=no] + ])], + [HDF5_HL_GIF_TOOLS=$enableval]) + +if test "X${HDF5_GIF_HL}" = "Xyes" -a "X-$HDF5_HL_TOOLS" = "X-yes" -a "X-$HDF5_HL_GIF_TOOLS" = "X-yes"; then + AC_MSG_RESULT([yes]) + HL_GIF_TOOLS="tools" +else + AC_MSG_RESULT([no]) +fi + ## ---------------------------------------------------------------------- ## Enable new references for dimension scales @@ -3907,6 +3923,7 @@ AM_CONDITIONAL([BUILD_TESTS_CONDITIONAL], [test "X$HDF5_TESTS" = "Xyes"]) AM_CONDITIONAL([BUILD_TESTS_PARALLEL_CONDITIONAL], [test -n "$TESTPARALLEL"]) AM_CONDITIONAL([BUILD_TOOLS_CONDITIONAL], [test "X$HDF5_TOOLS" = "Xyes"]) AM_CONDITIONAL([BUILD_TOOLS_HL_CONDITIONAL], [test "X$HDF5_HL_TOOLS" = "Xyes"]) +AM_CONDITIONAL([BUILD_TOOLS_HL_GIF_CONDITIONAL], [test "X$HDF5_HL_GIF_TOOLS" = "Xyes"]) AM_CONDITIONAL([BUILD_DOXYGEN_CONDITIONAL], [test "X$HDF5_DOXYGEN" = "Xyes"]) ## ---------------------------------------------------------------------- diff --git a/hl/tools/CMakeLists.txt b/hl/tools/CMakeLists.txt index a209fbb7107..7df2b796a04 100644 --- a/hl/tools/CMakeLists.txt +++ b/hl/tools/CMakeLists.txt @@ -1,6 +1,14 @@ cmake_minimum_required (VERSION 3.18) project (HDF5_HL_TOOLS C) -add_subdirectory (gif2h5) + #----------------------------------------------------------------------------- + #-- Option to build the High level GIF Tools + #----------------------------------------------------------------------------- + if (EXISTS "${HDF5_HL_SOURCE_DIR}/gif2h5" AND IS_DIRECTORY "${HDF5_HL_SOURCE_DIR}/gif2h5") + option (HDF5_BUILD_HL_GIF_TOOLS "Build HDF5 HL GIF Tools" OFF) + if (HDF5_BUILD_HL_GIF_TOOLS) + add_subdirectory (gif2h5) + endif () + endif () add_subdirectory (h5watch) diff --git a/hl/tools/Makefile.am b/hl/tools/Makefile.am index 6687f40f871..f3fe0d352ae 100644 --- a/hl/tools/Makefile.am +++ b/hl/tools/Makefile.am @@ -18,7 +18,13 @@ include $(top_srcdir)/config/commence.am +if BUILD_TOOLS_HL_GIF_CONDITIONAL + TOOLS_GIF_DIR = gif2h5 +else + TOOLS_GIF_DIR = +endif + # All subdirectories -SUBDIRS=gif2h5 h5watch +SUBDIRS=$(TOOLS_DIR) h5watch include $(top_srcdir)/config/conclude.am diff --git a/release_docs/INSTALL_CMake.txt b/release_docs/INSTALL_CMake.txt index 94b14d9ddad..b17a7e58123 100644 --- a/release_docs/INSTALL_CMake.txt +++ b/release_docs/INSTALL_CMake.txt @@ -741,6 +741,7 @@ HDF5_BUILD_JAVA "Build JAVA support" OFF HDF5_BUILD_HL_LIB "Build HIGH Level HDF5 Library" ON HDF5_BUILD_TOOLS "Build HDF5 Tools" ON HDF5_BUILD_HL_TOOLS "Build HIGH Level HDF5 Tools" ON +HDF5_BUILD_HL_GIF_TOOLS "Build HIGH Level HDF5 GIF Tools" OFF ---------------- HDF5 Folder Build Options --------------------- Defaults relative to $ diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index a1117a0c9df..16c8a329c8b 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -301,6 +301,16 @@ Bug Fixes since HDF5-1.13.3 release Configuration ------------- + - Change the default for building the high-level tools + + The gif2hdf5 and hdf2gif high-level tools are deprecated and will be removed + in a future release. The default build setting for them have been changed from enabled + to disabled. A user can enable the build of these tools if needed. + autotools: --enable-hlgiftools + cmake: HDF5_BUILD_HL_GIF_TOOLS=ON + + (ADB - 2022/12/16) + - Change the settings of the *pc files to use the correct format The pkg-config files generated by CMake uses incorrect syntax for the 'Requires' @@ -583,7 +593,7 @@ The issues with the gif tool are: HDFFV-10592 CVE-2018-17433 HDFFV-10593 CVE-2018-17436 HDFFV-11048 CVE-2020-10809 -These CVE issues have not yet been addressed and can be avoided by not building -the gif tool. Disable building the High-Level tools with these options: - autotools: --disable-hltools - cmake: HDF5_BUILD_HL_TOOLS=OFF +These CVE issues have not yet been addressed and are avoided by not building +the gif tool by default. Enable building the High-Level tools with these options: + autotools: --enable-hltools + cmake: HDF5_BUILD_HL_TOOLS=ON From a97581e716992225cacd473e1e9efa9d0bfb5758 Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Fri, 16 Dec 2022 21:56:52 -0600 Subject: [PATCH 017/231] Removed idioms and misc. text clean-up (#2320) * removed idioms and misc. text clean-up, Issue #2135 * removed idioms and misc. text clean-up, Issue #2135 --- doxygen/dox/ReferenceManual.dox | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/doxygen/dox/ReferenceManual.dox b/doxygen/dox/ReferenceManual.dox index e79de679f10..b9bcd498357 100644 --- a/doxygen/dox/ReferenceManual.dox +++ b/doxygen/dox/ReferenceManual.dox @@ -145,23 +145,19 @@ Functions with \ref ASYNC
- -Mind the gap - Follow these simple rules and stay out of trouble: -\li \Bold{Handle discipline:} The HDF5 C-API is rife with handles or +\li \Bold{Handle discipline:} The HDF5 API is rife with handles or identifiers, which you typically obtain by creating new HDF5 items, copying - items, or retrieving facets of items. \Emph{You acquire a handle, you own it!} - (Colin Powell) In other words, you are responsible for releasing the underlying + items, or retrieving facets of items. Consequently, \Bold{and most importantly}, you are + responsible for releasing the underlying resources via the matching \Code{H5*close()} call, or deal with the consequences of resource leakage. \li \Bold{Closed means closed:} Do not pass identifiers that were previously \Code{H5*close()}-d to other API functions! It will generate an error. \li \Bold{Dynamic memory allocation:} The API contains a few functions in which the HDF5 library dynamically allocates memory on the caller's behalf. The caller owns - this memory and eventually must free it by calling H5free_memory(). (\Bold{Not} - the `free` function \Emph{du jour}!) + this memory and eventually must free it by calling H5free_memory() and not language-explicit memory functions. \li \Bold{Be careful with that saw:} Do not modify the underlying collection when an iteration is in progress! \li \Bold{Use of locations:} Certain API functions, typically called \Code{H5***_by_name} @@ -169,7 +165,6 @@ Follow these simple rules and stay out of trouble: If the identifier fully specifies the object in question, pass \Code{'.'} (a dot) for the name! -Break a leg! From c7683ce3ca84daca089c3b321b7b747c4f7dbbea Mon Sep 17 00:00:00 2001 From: Mark Kittisopikul Date: Mon, 19 Dec 2022 18:13:43 -0500 Subject: [PATCH 018/231] Align arg types of H5D_chunk_iter_op_t with H5Dget_chunk_info (#2074) * Align arg types of H5D_chunk_iter_op_t with H5Dget_chunk_info * Modify chunk_info test to for unsigned / hsize_t types * Fix types in test --- src/H5D.c | 16 ++++++++-------- src/H5Dchunk.c | 4 ++-- src/H5Dpublic.h | 2 +- test/chunk_info.c | 18 +++++++++--------- 4 files changed, 20 insertions(+), 20 deletions(-) diff --git a/src/H5D.c b/src/H5D.c index d9213566cb0..006f4a9d88f 100644 --- a/src/H5D.c +++ b/src/H5D.c @@ -2515,18 +2515,18 @@ H5Dget_chunk_info_by_coord(hid_t dset_id, const hsize_t *offset, unsigned *filte * * typedef int (*H5D_chunk_iter_op_t)( * const hsize_t *offset, - * uint32_t filter_mask, + * unsigned filter_mask, * haddr_t addr, - * uint32_t nbytes, + * hsize_t size, * void *op_data); * * H5D_chunk_iter_op_t parameters: - * hsize_t *offset; IN/OUT: Array of starting logical coordinates of chunk. - * uint32_t filter_mask; IN: Filter mask of chunk. - * haddr_t addr; IN: Offset in file of chunk data. - * uint32_t nbytes; IN: Size in number of bytes of chunk data in file. - * void *op_data; IN/OUT: Pointer to any user-defined data - * associated with the operation. + * hsize_t *offset; IN/OUT: Logical position of the chunk’s first element in units of dataset + * elements + * unsigned filter_mask; IN: Bitmask indicating the filters used when the chunk was written haddr_t + * addr; IN: Chunk address in the file + * hsize_t; IN: Chunk size in bytes, 0 if the chunk does not exist + * void *op_data; IN/OUT: Pointer to any user-defined data associated with the operation. * * The return values from an operator are: * Zero (H5_ITER_CONT) causes the iterator to continue, returning zero when all diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index 40f8359f2cf..830560d454a 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -8278,8 +8278,8 @@ H5D__chunk_iter_cb(const H5D_chunk_rec_t *chunk_rec, void *udata) FUNC_ENTER_PACKAGE_NOERR /* Check for callback failure and pass along return value */ - if ((ret_value = (data->op)(offset, chunk_rec->filter_mask, chunk_rec->chunk_addr, chunk_rec->nbytes, - data->op_data)) < 0) + if ((ret_value = (data->op)(offset, (unsigned)chunk_rec->filter_mask, chunk_rec->chunk_addr, + (hsize_t)chunk_rec->nbytes, data->op_data)) < 0) HERROR(H5E_DATASET, H5E_CANTNEXT, "iteration operator failed"); FUNC_LEAVE_NOAPI(ret_value) diff --git a/src/H5Dpublic.h b/src/H5Dpublic.h index ac76bc8a8c8..f7d208dd9f0 100644 --- a/src/H5Dpublic.h +++ b/src/H5Dpublic.h @@ -238,7 +238,7 @@ typedef herr_t (*H5D_gather_func_t)(const void *dst_buf, size_t dst_buf_bytes_us * \li A negative (#H5_ITER_ERROR) causes the iterator to immediately * return that value, indicating failure. */ -typedef int (*H5D_chunk_iter_op_t)(const hsize_t *offset, uint32_t filter_mask, haddr_t addr, uint32_t size, +typedef int (*H5D_chunk_iter_op_t)(const hsize_t *offset, unsigned filter_mask, haddr_t addr, hsize_t size, void *op_data); //! diff --git a/test/chunk_info.c b/test/chunk_info.c index e38752f7eb0..5651b26cb6c 100644 --- a/test/chunk_info.c +++ b/test/chunk_info.c @@ -1508,9 +1508,9 @@ test_chunk_info_version2_btrees(const char *filename, hid_t fapl) typedef struct chunk_iter_info_t { hsize_t offset[2]; - uint32_t filter_mask; + unsigned filter_mask; haddr_t addr; - uint32_t nbytes; + hsize_t size; } chunk_iter_info_t; typedef struct chunk_iter_udata_t { @@ -1519,7 +1519,7 @@ typedef struct chunk_iter_udata_t { } chunk_iter_udata_t; static int -iter_cb(const hsize_t *offset, uint32_t filter_mask, haddr_t addr, uint32_t nbytes, void *op_data) +iter_cb(const hsize_t *offset, unsigned filter_mask, haddr_t addr, hsize_t size, void *op_data) { chunk_iter_udata_t *cidata = (chunk_iter_udata_t *)op_data; int idx = cidata->last_index + 1; @@ -1528,7 +1528,7 @@ iter_cb(const hsize_t *offset, uint32_t filter_mask, haddr_t addr, uint32_t nbyt cidata->chunk_info[idx].offset[1] = offset[1]; cidata->chunk_info[idx].filter_mask = filter_mask; cidata->chunk_info[idx].addr = addr; - cidata->chunk_info[idx].nbytes = nbytes; + cidata->chunk_info[idx].size = size; cidata->last_index++; @@ -1536,8 +1536,8 @@ iter_cb(const hsize_t *offset, uint32_t filter_mask, haddr_t addr, uint32_t nbyt } static int -iter_cb_stop(const hsize_t H5_ATTR_UNUSED *offset, uint32_t H5_ATTR_UNUSED filter_mask, - haddr_t H5_ATTR_UNUSED addr, uint32_t H5_ATTR_UNUSED nbytes, void *op_data) +iter_cb_stop(const hsize_t H5_ATTR_UNUSED *offset, unsigned H5_ATTR_UNUSED filter_mask, + haddr_t H5_ATTR_UNUSED addr, hsize_t H5_ATTR_UNUSED size, void *op_data) { chunk_iter_info_t **chunk_info = (chunk_iter_info_t **)op_data; *chunk_info += 1; @@ -1545,8 +1545,8 @@ iter_cb_stop(const hsize_t H5_ATTR_UNUSED *offset, uint32_t H5_ATTR_UNUSED filte } static int -iter_cb_fail(const hsize_t H5_ATTR_UNUSED *offset, uint32_t H5_ATTR_UNUSED filter_mask, - haddr_t H5_ATTR_UNUSED addr, uint32_t H5_ATTR_UNUSED nbytes, void *op_data) +iter_cb_fail(const hsize_t H5_ATTR_UNUSED *offset, unsigned H5_ATTR_UNUSED filter_mask, + haddr_t H5_ATTR_UNUSED addr, hsize_t H5_ATTR_UNUSED size, void *op_data) { chunk_iter_info_t **chunk_info = (chunk_iter_info_t **)op_data; *chunk_info += 1; @@ -1718,7 +1718,7 @@ test_basic_query(hid_t fapl) FAIL_PUTS_ERROR("offset[1] mismatch"); if (chunk_infos[0].filter_mask != 0) FAIL_PUTS_ERROR("filter mask mismatch"); - if (chunk_infos[0].nbytes != 96) + if (chunk_infos[0].size != 96) FAIL_PUTS_ERROR("size mismatch"); if (chunk_infos[1].offset[0] != CHUNK_NX) From 5ef7c9b0a2f6cfc15d2edf206fc60d660645c3cb Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Tue, 20 Dec 2022 16:59:40 -0600 Subject: [PATCH 019/231] Fix doxygen warnings and remove javadocs (#2324) * Fix doxygen warnings and remove javadocs * fix typo --- doxygen/Doxyfile.in | 2 +- doxygen/dox/ViewTools.dox | 8 +++----- doxygen/dox/cookbook/Accessibility.dox | 2 +- doxygen/dox/cookbook/Attributes.dox | 2 +- doxygen/dox/cookbook/Files.dox | 2 +- doxygen/dox/cookbook/Performance.dox | 2 +- doxygen/hdf5doxy_layout.xml | 8 ++++---- fortran/src/H5Pff.F90 | 25 ------------------------- java/src/Makefile.am | 8 ++------ java/src/hdf/hdf5lib/CMakeLists.txt | 12 ------------ release_docs/RELEASE.txt | 8 ++++++++ src/H5Amodule.h | 2 +- src/H5Gmodule.h | 2 +- src/H5Lpublic.h | 4 ++-- src/H5Tpublic.h | 2 +- src/H5VLconnector_passthru.h | 2 +- 16 files changed, 28 insertions(+), 63 deletions(-) diff --git a/doxygen/Doxyfile.in b/doxygen/Doxyfile.in index 28c73272dda..d9b0fe9aa55 100644 --- a/doxygen/Doxyfile.in +++ b/doxygen/Doxyfile.in @@ -854,7 +854,7 @@ INPUT_ENCODING = UTF-8 # *.vhdl, *.ucf, *.qsf and *.ice. FILE_PATTERNS = H5*public.h H5*module.h H5*develop.h H5FD*.h \ - H5VLconnector.h H5VLconnector_passthru.h H5VLnative.h \ + H5VLconnector.h H5VLconnector_passthru.h H5VLnative.h H5PLextern.h \ H5Zdevelop.h \ H5version.h \ H5*.java \ diff --git a/doxygen/dox/ViewTools.dox b/doxygen/dox/ViewTools.dox index 0b685a0bb3d..2212d4ba3f2 100644 --- a/doxygen/dox/ViewTools.dox +++ b/doxygen/dox/ViewTools.dox @@ -829,6 +829,7 @@ by simply viewing the specified dataset with the -d option must be specified - -before -\par subsetting options (if not using the shorthand method). +Where, the -d option must be specified +before subsetting options (if not using the shorthand method). The -A 0 option suppresses the printing of attributes. diff --git a/doxygen/dox/cookbook/Accessibility.dox b/doxygen/dox/cookbook/Accessibility.dox index f10028367e0..28009be71d2 100644 --- a/doxygen/dox/cookbook/Accessibility.dox +++ b/doxygen/dox/cookbook/Accessibility.dox @@ -1,6 +1,6 @@ /** \page Accessibility -\section Accessibility +\section secAccessibility Accessibility \subsection CB_MaintainCompat Maintaining Compatibility with other HDF5 Library Versions diff --git a/doxygen/dox/cookbook/Attributes.dox b/doxygen/dox/cookbook/Attributes.dox index 68fd15906d9..59149099579 100644 --- a/doxygen/dox/cookbook/Attributes.dox +++ b/doxygen/dox/cookbook/Attributes.dox @@ -1,6 +1,6 @@ /** \page Attributes -\section Attributes +\section secAttributes Attributes \subsection CB_LargeAttributes Creating "Large" HDF5 Attributes diff --git a/doxygen/dox/cookbook/Files.dox b/doxygen/dox/cookbook/Files.dox index 169d6387251..489377153a0 100644 --- a/doxygen/dox/cookbook/Files.dox +++ b/doxygen/dox/cookbook/Files.dox @@ -1,6 +1,6 @@ /** \page Files -\section Files +\section secFiles Files \subsection CB_FreeSpace Tracking Free Space in HDF5 Files diff --git a/doxygen/dox/cookbook/Performance.dox b/doxygen/dox/cookbook/Performance.dox index 7ac3a182ad0..5e945b232c4 100644 --- a/doxygen/dox/cookbook/Performance.dox +++ b/doxygen/dox/cookbook/Performance.dox @@ -1,6 +1,6 @@ /** \page Performance -\section Performance +\section secPerformance Performance \subsection CB_MDCPerf Assessing HDF5 Metadata Cache Performance diff --git a/doxygen/hdf5doxy_layout.xml b/doxygen/hdf5doxy_layout.xml index 588052bcaae..d156f2c1785 100644 --- a/doxygen/hdf5doxy_layout.xml +++ b/doxygen/hdf5doxy_layout.xml @@ -4,13 +4,13 @@ - - - - + + + + diff --git a/fortran/src/H5Pff.F90 b/fortran/src/H5Pff.F90 index 9c30637427b..75d73659abc 100644 --- a/fortran/src/H5Pff.F90 +++ b/fortran/src/H5Pff.F90 @@ -4305,31 +4305,6 @@ SUBROUTINE h5pset_f(prp_id, name, value, hdferr) CHARACTER(LEN=*), INTENT(IN) :: name INTEGER , INTENT(IN) :: value INTEGER , INTENT(OUT) :: hdferr - END SUBROUTINE h5pset - -!> -!! \ingroup FH5P -!! -!! \brief Sets a property list value. -!! -!! \attention \fortran_obsolete -!! -!! \param prp_id Property list identifier to modify. -!! \param name Name of property to modify. -!! \param value Property value, supported types are: -!! \li INTEGER -!! \li REAL -!! \li DOUBLE PRECISION -!! \li CHARACTER(LEN=*) -!! \param hdferr \fortran_error -!! -!! See C API: @ref H5Pset() -!! - SUBROUTINE h5pset_f(prp_id, name, value, hdferr) - INTEGER(HID_T), INTENT(IN) :: prp_id - CHARACTER(LEN=*), INTENT(IN) :: name - INTEGER, INTENT(IN) :: value - INTEGER, INTENT(OUT) :: hdferr END SUBROUTINE h5pset_f !> !! \ingroup FH5P diff --git a/java/src/Makefile.am b/java/src/Makefile.am index 007693280db..36fca3a81c3 100644 --- a/java/src/Makefile.am +++ b/java/src/Makefile.am @@ -111,26 +111,22 @@ hdf5_java_JAVA = \ ${pkgpath}/HDFArray.java \ ${pkgpath}/HDFNativeData.java -$(jarfile): classhdf5_java.stamp classes docs +$(jarfile): classhdf5_java.stamp classes $(JAR) cvf $@ -C $(JAVAROOT)/ $(pkgpath) hdf5_java_DATA = $(jarfile) -.PHONY: docs classes +.PHONY: classes WINDOWTITLE = 'HDF5 Java' DOCTITLE = '

HDF5 Java Wrapper

' SRCDIR = '$(pkgpath)' -docs: - $(JAVADOC) -sourcepath $(srcdir) -d javadoc -Xdoclint:none -use -splitIndex -windowtitle $(WINDOWTITLE) -doctitle $(DOCTITLE) -J-Xmx180m -verbose -overview $(top_srcdir)/java/src/hdf/overview.html -classpath $(CLASSPATH_ENV) hdf.hdf5lib - CLEANFILES = classhdf5_java.stamp $(jarfile) $(JAVAROOT)/$(pkgpath)/callbacks/*.class $(JAVAROOT)/$(pkgpath)/exceptions/*.class $(JAVAROOT)/$(pkgpath)/structs/*.class $(JAVAROOT)/$(pkgpath)/*.class clean: rm -rf $(JAVAROOT)/* rm -f $(jarfile) - rm -rf javadoc rm -f classhdf5_java.stamp diff --git a/java/src/hdf/hdf5lib/CMakeLists.txt b/java/src/hdf/hdf5lib/CMakeLists.txt index 4e340f19a89..41cf4e9ab60 100644 --- a/java/src/hdf/hdf5lib/CMakeLists.txt +++ b/java/src/hdf/hdf5lib/CMakeLists.txt @@ -134,15 +134,3 @@ set_target_properties (${HDF5_JAVA_HDF5_LIB_TARGET} PROPERTIES FOLDER libraries/ if (HDF5_ENABLE_FORMATTERS) clang_format (HDF5_JAVA_SRC_FORMAT ${HDF5_JAVA_HDF_HDF5_CALLBACKS_SOURCES} ${HDF5_JAVA_HDF_HDF5_EXCEPTIONS_SOURCES} ${HDF5_JAVA_HDF_HDF5_STRUCTS_SOURCES} ${HDF5_JAVA_HDF_HDF5_SOURCES}) endif () - -create_javadoc(hdf5_java_doc - FILES ${HDF5_JAVADOC_HDF_HDF5_CALLBACKS_SOURCES} ${HDF5_JAVADOC_HDF_HDF5_EXCEPTIONS_SOURCES} ${HDF5_JAVADOC_HDF_HDF5_STRUCTS_SOURCES} ${HDF5_JAVADOC_HDF_HDF5_SOURCES} - OVERVIEW ${HDF5_JAVA_HDF5_SRC_DIR}/overview.html - CLASSPATH ${CMAKE_JAVA_INCLUDE_PATH} - WINDOWTITLE "HDF5 Java" - DOCTITLE "

HDF5 Java Wrapper

" - INSTALLPATH ${HDF5_INSTALL_DOC_DIR} - AUTHOR TRUE - USE TRUE - VERSION TRUE -) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 16c8a329c8b..5dea822c7fb 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -301,6 +301,14 @@ Bug Fixes since HDF5-1.13.3 release Configuration ------------- + - Remove Javadoc generation + + The use of doxygen now supersedes the requirement to build javadocs. We do not + have the resources to continue to support two documentation methods and have + chosen doxygen as our standard. + + (ADB - 2022/12/19) + - Change the default for building the high-level tools The gif2hdf5 and hdf2gif high-level tools are deprecated and will be removed diff --git a/src/H5Amodule.h b/src/H5Amodule.h index 7f88a22ebcc..4823d0d6e7a 100644 --- a/src/H5Amodule.h +++ b/src/H5Amodule.h @@ -363,7 +363,7 @@ * will be ignored by HDF5. * * The use of ASCII or UTF-8 characters is determined by the character encoding property. See - * #H5Pset_char_encoding in the \ref RM. + * #H5Pset_char_encoding in the \ref RM. * *

No Special I/O or Storage

* diff --git a/src/H5Gmodule.h b/src/H5Gmodule.h index 4b58ee6c9ac..d4738f63006 100644 --- a/src/H5Gmodule.h +++ b/src/H5Gmodule.h @@ -923,7 +923,7 @@ * containing thousands to millions of members. Links are stored in * a fractal heap and indexed with an improved B-tree. * \li The new implementation also enables the use of link names consisting of - * non-ASCII character sets (see H5Pset_char_encoding()) and is + * non-ASCII character sets (see #H5Pset_char_encoding) and is * required for all link types other than hard or soft links, e.g., * external and user-defined links (see the \ref H5L APIs). * diff --git a/src/H5Lpublic.h b/src/H5Lpublic.h index 60e83f2f200..d2d9e9ded46 100644 --- a/src/H5Lpublic.h +++ b/src/H5Lpublic.h @@ -729,7 +729,7 @@ H5_DLL herr_t H5Lexists_async(hid_t loc_id, const char *name, hbool_t *exists, h * \p cset specifies the character set in which the link name is * encoded. Valid values include the following: * \csets - * This value is set with H5Pset_char_encoding(). + * This value is set with #H5Pset_char_encoding. * * \c token is the location that a hard link points to, and * \c val_size is the size of a soft link or user defined link value. @@ -1525,7 +1525,7 @@ typedef herr_t (*H5L_iterate1_t)(hid_t group, const char *name, const H5L_info1_ * \c cset specifies the character set in which the link name is * encoded. Valid values include the following: * \csets - * This value is set with H5Pset_char_encoding(). + * This value is set with #H5Pset_char_encoding. * * \c address and \c val_size are returned for hard and symbolic * links, respectively. Symbolic links include soft and external links diff --git a/src/H5Tpublic.h b/src/H5Tpublic.h index dcaffd80188..16172a872b0 100644 --- a/src/H5Tpublic.h +++ b/src/H5Tpublic.h @@ -1035,7 +1035,7 @@ H5_DLLVAR hid_t H5T_NATIVE_UINT_FAST64_g; * When creating a fixed-length string datatype, \p size will * be the length of the string in bytes. The length of the * string in characters will depend on i the encoding used; see - * H5Pset_char_encoding(). + * #H5Pset_char_encoding. * * ENUMs created with this function have a signed native integer * base datatype. Use H5Tenum_create() if a different integer base diff --git a/src/H5VLconnector_passthru.h b/src/H5VLconnector_passthru.h index 68dd33ad194..d10bac4697c 100644 --- a/src/H5VLconnector_passthru.h +++ b/src/H5VLconnector_passthru.h @@ -71,7 +71,7 @@ H5_DLL herr_t H5VLcmp_connector_cls(int *cmp, hid_t connector_id1, hid_t connect * - #H5I_MAP * - #H5I_ATTR * - * \return \hid_t + * \return \hid_t{VOL connector} * * \note This routine is mainly targeted toward wrapping objects for * iteration routine callbacks (i.e. the callbacks from H5Aiterate*, From f6a1041c9f87c3d22b97db7aef958aaa60fc8d1c Mon Sep 17 00:00:00 2001 From: vchoi-hdfgroup <55293060+vchoi-hdfgroup@users.noreply.github.com> Date: Tue, 20 Dec 2022 18:48:56 -0600 Subject: [PATCH 020/231] Fix up the H5Pencode2 test for virtual layout. (#2325) * Fix up the H5Pencode2 test for virtual layout. * Committing clang-format changes * Remove VERIFY and use TEST_ERROR. Co-authored-by: vchoi Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> --- test/enc_dec_plist.c | 178 +++++++++++++++++++++---------------------- 1 file changed, 87 insertions(+), 91 deletions(-) diff --git a/test/enc_dec_plist.c b/test/enc_dec_plist.c index 4a8762095dc..fd4ae5eedfc 100644 --- a/test/enc_dec_plist.c +++ b/test/enc_dec_plist.c @@ -22,13 +22,12 @@ #define SRC_DSET "src_dset" static int -test_encode_decode(hid_t orig_pl, H5F_libver_t low, H5F_libver_t high, hbool_t support_virtual) +test_encode_decode(hid_t orig_pl, H5F_libver_t low, H5F_libver_t high) { hid_t pl = (-1); /* Decoded property list */ hid_t fapl = -1; /* File access property list */ void *temp_buf = NULL; /* Pointer to encoding buffer */ size_t temp_size = 0; /* Size of encoding buffer */ - herr_t ret; /* Return value */ /* Create file access property list */ if ((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0) @@ -38,68 +37,58 @@ test_encode_decode(hid_t orig_pl, H5F_libver_t low, H5F_libver_t high, hbool_t s if (H5Pset_libver_bounds(fapl, low, high) < 0) TEST_ERROR; - H5E_BEGIN_TRY - { - ret = H5Pencode2(orig_pl, NULL, &temp_size, fapl); - } - H5E_END_TRY; - - if (support_virtual && high < H5F_LIBVER_V110) - VERIFY(ret, FAIL, "H5Pencode2"); - else { - - VERIFY(ret, SUCCEED, "H5Pencode2"); + if (H5Pencode2(orig_pl, NULL, &temp_size, fapl) < 0) + TEST_ERROR; - /* Allocate the buffer for encoding */ - if (NULL == (temp_buf = (void *)HDmalloc(temp_size))) - TEST_ERROR; + /* Allocate the buffer for encoding */ + if (NULL == (temp_buf = (void *)HDmalloc(temp_size))) + TEST_ERROR; - /* Encode the property list to the buffer */ - if (H5Pencode2(orig_pl, temp_buf, &temp_size, fapl) < 0) - TEST_ERROR; + /* Encode the property list to the buffer */ + if (H5Pencode2(orig_pl, temp_buf, &temp_size, fapl) < 0) + TEST_ERROR; - /* Decode the buffer */ - if ((pl = H5Pdecode(temp_buf)) < 0) - STACK_ERROR; + /* Decode the buffer */ + if ((pl = H5Pdecode(temp_buf)) < 0) + STACK_ERROR; - /* Check if the original and the decoded property lists are equal */ - if (!H5Pequal(orig_pl, pl)) - PUTS_ERROR("encoding-decoding cycle failed\n"); + /* Check if the original and the decoded property lists are equal */ + if (!H5Pequal(orig_pl, pl)) + PUTS_ERROR("encoding-decoding cycle failed\n"); - /* Close the decoded property list */ - if ((H5Pclose(pl)) < 0) - TEST_ERROR; + /* Close the decoded property list */ + if ((H5Pclose(pl)) < 0) + TEST_ERROR; - /* Free the buffer */ - if (temp_buf) - HDfree(temp_buf); + /* Free the buffer */ + if (temp_buf) + HDfree(temp_buf); #ifndef H5_NO_DEPRECATED_SYMBOLS - /* Test H5Pencode1() */ + /* Test H5Pencode1() */ - /* first call to encode returns only the size of the buffer needed */ - if (H5Pencode1(orig_pl, NULL, &temp_size) < 0) - STACK_ERROR; + /* first call to encode returns only the size of the buffer needed */ + if (H5Pencode1(orig_pl, NULL, &temp_size) < 0) + STACK_ERROR; - if (NULL == (temp_buf = (void *)HDmalloc(temp_size))) - TEST_ERROR; + if (NULL == (temp_buf = (void *)HDmalloc(temp_size))) + TEST_ERROR; - if (H5Pencode1(orig_pl, temp_buf, &temp_size) < 0) - STACK_ERROR; + if (H5Pencode1(orig_pl, temp_buf, &temp_size) < 0) + STACK_ERROR; - if ((pl = H5Pdecode(temp_buf)) < 0) - STACK_ERROR; + if ((pl = H5Pdecode(temp_buf)) < 0) + STACK_ERROR; - if (!H5Pequal(orig_pl, pl)) - PUTS_ERROR("encoding-decoding cycle failed\n"); + if (!H5Pequal(orig_pl, pl)) + PUTS_ERROR("encoding-decoding cycle failed\n"); - if ((H5Pclose(pl)) < 0) - STACK_ERROR; + if ((H5Pclose(pl)) < 0) + STACK_ERROR; - if (temp_buf) - HDfree(temp_buf); + if (temp_buf) + HDfree(temp_buf); #endif /* H5_NO_DEPRECATED_SYMBOLS */ - } if ((H5Pclose(fapl)) < 0) TEST_ERROR; @@ -211,7 +200,7 @@ main(void) FAIL_STACK_ERROR; /* Test encoding & decoding default property list */ - if (test_encode_decode(dcpl, low, high, FALSE) < 0) + if (test_encode_decode(dcpl, low, high) < 0) FAIL_PUTS_ERROR("Default DCPL encoding/decoding failed\n"); PASSED(); @@ -241,7 +230,7 @@ main(void) FAIL_STACK_ERROR; /* Test encoding & decoding property list */ - if (test_encode_decode(dcpl, low, high, FALSE) < 0) + if (test_encode_decode(dcpl, low, high) < 0) FAIL_PUTS_ERROR("DCPL encoding/decoding failed\n"); /* release resource */ @@ -252,31 +241,38 @@ main(void) /******* ENCODE/DECODE DCPLS *****/ TESTING("DCPL Encoding/Decoding for virtual layout"); - if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) - FAIL_STACK_ERROR; + if (high < H5F_LIBVER_V110) + HDprintf(" SKIPPED: virtual layout not supported yet\n"); - /* Set virtual layout */ - if (H5Pset_layout(dcpl, H5D_VIRTUAL) < 0) - TEST_ERROR; + else { + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR; - /* Create source dataspace */ - if ((srcspace = H5Screate_simple(1, dims, NULL)) < 0) - TEST_ERROR; + /* Set virtual layout */ + if (H5Pset_layout(dcpl, H5D_VIRTUAL) < 0) + TEST_ERROR; - /* Create virtual dataspace */ - if ((vspace = H5Screate_simple(1, dims, NULL)) < 0) - TEST_ERROR; + /* Create source dataspace */ + if ((srcspace = H5Screate_simple(1, dims, NULL)) < 0) + TEST_ERROR; - /* Add virtual layout mapping */ - if (H5Pset_virtual(dcpl, vspace, SRC_FNAME, SRC_DSET, srcspace) < 0) - TEST_ERROR; + /* Create virtual dataspace */ + if ((vspace = H5Screate_simple(1, dims, NULL)) < 0) + TEST_ERROR; - if (test_encode_decode(dcpl, low, high, TRUE) < 0) - FAIL_PUTS_ERROR("DCPL encoding/decoding failed\n"); + /* Add virtual layout mapping */ + if (H5Pset_virtual(dcpl, vspace, SRC_FNAME, SRC_DSET, srcspace) < 0) + TEST_ERROR; - /* release resource */ - if ((H5Pclose(dcpl)) < 0) - FAIL_STACK_ERROR; + if (test_encode_decode(dcpl, low, high) < 0) + FAIL_PUTS_ERROR("DCPL encoding/decoding failed\n"); + + /* release resource */ + if ((H5Pclose(dcpl)) < 0) + FAIL_STACK_ERROR; + + PASSED(); + } /******* ENCODE/DECODE DAPLS *****/ TESTING("Default DAPL Encoding/Decoding"); @@ -284,7 +280,7 @@ main(void) FAIL_STACK_ERROR; /* Test encoding & decoding default property list */ - if (test_encode_decode(dapl, low, high, FALSE) < 0) + if (test_encode_decode(dapl, low, high) < 0) FAIL_PUTS_ERROR("Default DAPL encoding/decoding failed\n"); PASSED(); @@ -295,7 +291,7 @@ main(void) FAIL_STACK_ERROR; /* Test encoding & decoding property list */ - if (test_encode_decode(dapl, low, high, FALSE) < 0) + if (test_encode_decode(dapl, low, high) < 0) FAIL_PUTS_ERROR("DAPL encoding/decoding failed\n"); /* release resource */ @@ -310,7 +306,7 @@ main(void) FAIL_STACK_ERROR; /* Test encoding & decoding default property list */ - if (test_encode_decode(ocpl, low, high, FALSE) < 0) + if (test_encode_decode(ocpl, low, high) < 0) FAIL_PUTS_ERROR("Default OCPL encoding/decoding failed\n"); PASSED(); @@ -327,7 +323,7 @@ main(void) FAIL_STACK_ERROR; /* Test encoding & decoding property list */ - if (test_encode_decode(ocpl, low, high, FALSE) < 0) + if (test_encode_decode(ocpl, low, high) < 0) FAIL_PUTS_ERROR("OCPL encoding/decoding failed\n"); /* release resource */ @@ -342,7 +338,7 @@ main(void) FAIL_STACK_ERROR; /* Test encoding & decoding default property list */ - if (test_encode_decode(dxpl, low, high, FALSE) < 0) + if (test_encode_decode(dxpl, low, high) < 0) FAIL_PUTS_ERROR("Default DXPL encoding/decoding failed\n"); PASSED(); @@ -372,7 +368,7 @@ main(void) FAIL_STACK_ERROR; /* Test encoding & decoding property list */ - if (test_encode_decode(dxpl, low, high, FALSE) < 0) + if (test_encode_decode(dxpl, low, high) < 0) FAIL_PUTS_ERROR("DXPL encoding/decoding failed\n"); /* release resource */ @@ -387,7 +383,7 @@ main(void) FAIL_STACK_ERROR; /* Test encoding & decoding default property list */ - if (test_encode_decode(gcpl, low, high, FALSE) < 0) + if (test_encode_decode(gcpl, low, high) < 0) FAIL_PUTS_ERROR("Default GCPL encoding/decoding failed\n"); PASSED(); @@ -411,7 +407,7 @@ main(void) FAIL_STACK_ERROR; /* Test encoding & decoding property list */ - if (test_encode_decode(gcpl, low, high, FALSE) < 0) + if (test_encode_decode(gcpl, low, high) < 0) FAIL_PUTS_ERROR("GCPL encoding/decoding failed\n"); /* release resource */ @@ -426,7 +422,7 @@ main(void) FAIL_STACK_ERROR; /* Test encoding & decoding default property list */ - if (test_encode_decode(lcpl, low, high, FALSE) < 0) + if (test_encode_decode(lcpl, low, high) < 0) FAIL_PUTS_ERROR("Default LCPL encoding/decoding failed\n"); PASSED(); @@ -437,7 +433,7 @@ main(void) FAIL_STACK_ERROR; /* Test encoding & decoding property list */ - if (test_encode_decode(lcpl, low, high, FALSE) < 0) + if (test_encode_decode(lcpl, low, high) < 0) FAIL_PUTS_ERROR("LCPL encoding/decoding failed\n"); /* release resource */ @@ -452,7 +448,7 @@ main(void) FAIL_STACK_ERROR; /* Test encoding & decoding default property list */ - if (test_encode_decode(lapl, low, high, FALSE) < 0) + if (test_encode_decode(lapl, low, high) < 0) FAIL_PUTS_ERROR("Default LAPL encoding/decoding failed\n"); PASSED(); @@ -482,7 +478,7 @@ main(void) FAIL_STACK_ERROR; /* Test encoding & decoding property list */ - if (test_encode_decode(lapl, low, high, FALSE) < 0) + if (test_encode_decode(lapl, low, high) < 0) FAIL_PUTS_ERROR("LAPL encoding/decoding failed\n"); /* release resource */ @@ -497,7 +493,7 @@ main(void) FAIL_STACK_ERROR; /* Test encoding & decoding default property list */ - if (test_encode_decode(ocpypl, low, high, FALSE) < 0) + if (test_encode_decode(ocpypl, low, high) < 0) FAIL_PUTS_ERROR("Default OCPYPL encoding/decoding failed\n"); PASSED(); @@ -513,7 +509,7 @@ main(void) FAIL_STACK_ERROR; /* Test encoding & decoding property list */ - if (test_encode_decode(ocpypl, low, high, FALSE) < 0) + if (test_encode_decode(ocpypl, low, high) < 0) FAIL_PUTS_ERROR("OCPYPL encoding/decoding failed\n"); /* release resource */ @@ -528,7 +524,7 @@ main(void) FAIL_STACK_ERROR; /* Test encoding & decoding default property list */ - if (test_encode_decode(fapl, low, high, FALSE) < 0) + if (test_encode_decode(fapl, low, high) < 0) FAIL_PUTS_ERROR("Default FAPL encoding/decoding failed\n"); PASSED(); @@ -563,7 +559,7 @@ main(void) FAIL_STACK_ERROR; /* Test encoding & decoding property list */ - if (test_encode_decode(fapl, low, high, FALSE) < 0) + if (test_encode_decode(fapl, low, high) < 0) FAIL_PUTS_ERROR("FAPL encoding/decoding failed\n"); /* release resource */ @@ -579,7 +575,7 @@ main(void) FAIL_STACK_ERROR; /* Test encoding & decoding default property list */ - if (test_encode_decode(fcpl, low, high, FALSE) < 0) + if (test_encode_decode(fcpl, low, high) < 0) FAIL_PUTS_ERROR("Default FCPL encoding/decoding failed\n"); PASSED(); @@ -608,7 +604,7 @@ main(void) FAIL_STACK_ERROR; /* Test encoding & decoding property list */ - if (test_encode_decode(fcpl, low, high, FALSE) < 0) + if (test_encode_decode(fcpl, low, high) < 0) FAIL_PUTS_ERROR("FCPL encoding/decoding failed\n"); /* release resource */ @@ -624,7 +620,7 @@ main(void) FAIL_STACK_ERROR; /* Test encoding & decoding default property list */ - if (test_encode_decode(strcpl, low, high, FALSE) < 0) + if (test_encode_decode(strcpl, low, high) < 0) FAIL_PUTS_ERROR("Default STRCPL encoding/decoding failed\n"); PASSED(); @@ -635,7 +631,7 @@ main(void) FAIL_STACK_ERROR; /* Test encoding & decoding property list */ - if (test_encode_decode(strcpl, low, high, FALSE) < 0) + if (test_encode_decode(strcpl, low, high) < 0) FAIL_PUTS_ERROR("STRCPL encoding/decoding failed\n"); /* release resource */ @@ -651,7 +647,7 @@ main(void) FAIL_STACK_ERROR; /* Test encoding & decoding default property list */ - if (test_encode_decode(acpl, low, high, FALSE) < 0) + if (test_encode_decode(acpl, low, high) < 0) FAIL_PUTS_ERROR("Default ACPL encoding/decoding failed\n"); PASSED(); @@ -662,7 +658,7 @@ main(void) FAIL_STACK_ERROR; /* Test encoding & decoding property list */ - if (test_encode_decode(acpl, low, high, FALSE) < 0) + if (test_encode_decode(acpl, low, high) < 0) FAIL_PUTS_ERROR("ACPL encoding/decoding failed\n"); /* release resource */ From da6c19846736b076053aaa294c6a3cb6b9625aa4 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Wed, 21 Dec 2022 12:07:25 -0600 Subject: [PATCH 021/231] H5F_LIBVER_LATEST changes for move to 1.15 (#2288) * H5F_LIBVER_LATEST changes for move to 1.15 * Add new default api check * Format fixes * Fix default configure * fix lib version tests * Fix another version variable * Add 1.14 doc link --- .github/workflows/main.yml | 24 ++++++++++++++++++- CMakeLists.txt | 11 +++++++++ README.md | 2 +- bin/make_vers | 4 ++-- c++/src/cpp_doc_config | 2 +- config/cmake/H5pubconf.h.in | 3 +++ config/cmake/scripts/HDF5config.cmake | 4 ++-- configure.ac | 9 +++++-- doxygen/dox/Overview.dox | 1 + examples/testh5cc.sh.in | 14 ++++++++++- java/src/hdf/hdf5lib/H5.java | 4 ++-- java/src/hdf/hdf5lib/HDF5Constants.java | 4 ++++ java/src/jni/h5Constants.c | 5 ++++ java/test/TestH5.java | 4 ++-- release_docs/RELEASE.txt | 2 +- src/H5.c | 4 ++-- src/H5Aint.c | 1 + src/H5Dlayout.c | 12 ++++++---- src/H5Fpublic.h | 3 ++- src/H5Fsuper.c | 1 + src/H5Ofill.c | 1 + src/H5Ofsinfo.c | 1 + src/H5Oint.c | 1 + src/H5Opline.c | 1 + src/H5S.c | 1 + src/H5Shyper.c | 1 + src/H5Spoint.c | 1 + src/H5T.c | 1 + src/H5public.h | 8 +++---- src/H5trace.c | 6 ++++- test/chunk_info.c | 9 +++++-- test/dtypes.c | 6 ++--- test/h5test.c | 3 ++- test/tfile.c | 1 + tools/src/h5repack/h5repack_main.c | 3 ++- .../test/h5repack/testfiles/h5repack-help.txt | 3 ++- ...h5repack_layout.h5-plugin_version_test.ddl | 14 +++++------ 37 files changed, 132 insertions(+), 43 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index cd5e2214d04..8abe0fd44a3 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -247,6 +247,28 @@ jobs: cmake: "Debug" autotools: "debug" + - name: "Ubuntu gcc Autotools v1.14 default API (build only)" + os: ubuntu-latest + cpp: enable + fortran: enable + java: enable + parallel: disable + mirror_vfd: enable + direct_vfd: enable + deprec_sym: enable + default_api: v114 + toolchain: "" + generator: "autogen" + flags: "" + run_tests: false + thread_safety: + - enabled: false + text: "" + build_mode: + - text: "DBG" + cmake: "Debug" + autotools: "debug" + - name: "Ubuntu gcc Autotools no deprecated symbols (build only)" os: ubuntu-latest cpp: enable @@ -256,7 +278,7 @@ jobs: mirror_vfd: enable direct_vfd: enable deprec_sym: disable - default_api: v114 + default_api: v116 toolchain: "" generator: "autogen" flags: "" diff --git a/CMakeLists.txt b/CMakeLists.txt index 31cae2e6cff..1c85965fa05 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -812,6 +812,17 @@ if (DEFAULT_API_VERSION MATCHES "v114") set (H5_USE_114_API_DEFAULT 1) endif () +#----------------------------------------------------------------------------- +# Option to use 1.16.x API +#----------------------------------------------------------------------------- +if (NOT DEFAULT_API_VERSION) + set (DEFAULT_API_VERSION "v116") +endif () +set (H5_USE_116_API_DEFAULT 0) +if (DEFAULT_API_VERSION MATCHES "v116") + set (H5_USE_116_API_DEFAULT 1) +endif () + #----------------------------------------------------------------------------- # Include user macros #----------------------------------------------------------------------------- diff --git a/README.md b/README.md index ecf4b6657f0..40947ab32a2 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -HDF5 version 1.13.4-1 currently under development +HDF5 version 1.15.0 currently under development ![HDF5 Logo](doxygen/img/HDF5.png) diff --git a/bin/make_vers b/bin/make_vers index 956fbfd3317..1e21bf74cbb 100755 --- a/bin/make_vers +++ b/bin/make_vers @@ -8,8 +8,8 @@ use warnings; # is added (like support for 1.4, etc), the min_sup_idx parameter will # need to be decremented.) -# Max. library "index" (0 = v1.0, 1 = 1.2, 2 = 1.4, 3 = 1.6, 4 = 1.8, 5 = 1.10, 6 = 1.12, 7 = 1.14, etc) -$max_idx = 7; +# Max. library "index" (0 = v1.0, 1 = 1.2, 2 = 1.4, 3 = 1.6, 4 = 1.8, 5 = 1.10, 6 = 1.12, 7 = 1.14, 8 = 1.16, etc) +$max_idx = 8; # Min. supported previous library version "index" (0 = v1.0, 1 = 1.2, etc) $min_sup_idx = 3; diff --git a/c++/src/cpp_doc_config b/c++/src/cpp_doc_config index 42bac70a48f..a3595bf5ab9 100644 --- a/c++/src/cpp_doc_config +++ b/c++/src/cpp_doc_config @@ -38,7 +38,7 @@ PROJECT_NAME = # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = "1.13.4-1, currently under development" +PROJECT_NUMBER = "1.15.0" # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/config/cmake/H5pubconf.h.in b/config/cmake/H5pubconf.h.in index c1b051ff666..da23cb361bb 100644 --- a/config/cmake/H5pubconf.h.in +++ b/config/cmake/H5pubconf.h.in @@ -632,6 +632,9 @@ /* Define using v1.14 public API symbols by default */ #cmakedefine H5_USE_114_API_DEFAULT @H5_USE_114_API_DEFAULT@ +/* Define using v1.16 public API symbols by default */ +#cmakedefine H5_USE_116_API_DEFAULT @H5_USE_116_API_DEFAULT@ + /* Define if the library will use file locking */ #cmakedefine H5_USE_FILE_LOCKING @H5_USE_FILE_LOCKING@ diff --git a/config/cmake/scripts/HDF5config.cmake b/config/cmake/scripts/HDF5config.cmake index 71223cb61b8..2fbb26cdd7b 100644 --- a/config/cmake/scripts/HDF5config.cmake +++ b/config/cmake/scripts/HDF5config.cmake @@ -37,8 +37,8 @@ cmake_minimum_required (VERSION 3.18) # CTEST_SOURCE_NAME - source folder ############################################################################## -set (CTEST_SOURCE_VERSION "1.13.4") -set (CTEST_SOURCE_VERSEXT "-1") +set (CTEST_SOURCE_VERSION "1.15.0") +set (CTEST_SOURCE_VERSEXT "") ############################################################################## # handle input parameters to script. diff --git a/configure.ac b/configure.ac index 5f62140e95e..3d82c20db26 100644 --- a/configure.ac +++ b/configure.ac @@ -22,7 +22,7 @@ AC_PREREQ([2.69]) ## NOTE: Do not forget to change the version number here when we do a ## release!!! ## -AC_INIT([HDF5], [1.13.4-1], [help@hdfgroup.org]) +AC_INIT([HDF5], [1.15.0], [help@hdfgroup.org]) AC_CONFIG_SRCDIR([src/H5.c]) AC_CONFIG_HEADERS([src/H5config.h]) @@ -3789,6 +3789,11 @@ elif test "X$withval" = "Xv114"; then DEFAULT_API_VERSION=v114 AC_DEFINE([USE_114_API_DEFAULT], [1], [Define using v1.14 public API symbols by default]) +elif test "X$withval" = "Xv116"; then + AC_MSG_RESULT([v116]) + DEFAULT_API_VERSION=v116 + AC_DEFINE([USE_116_API_DEFAULT], [1], + [Define using v1.16 public API symbols by default]) else AC_MSG_ERROR([invalid version of public symbols given]) fi @@ -3798,7 +3803,7 @@ fi ## if the user insists on doing this via the --enable-unsupported configure ## flag, we'll let them. if test "X${ALLOW_UNSUPPORTED}" != "Xyes"; then - if test "X${DEFAULT_API_VERSION}" != "Xv114" -a "X${DEPRECATED_SYMBOLS}" = "Xno" ; then + if test "X${DEFAULT_API_VERSION}" != "Xv116" -a "X${DEPRECATED_SYMBOLS}" = "Xno" ; then AC_MSG_ERROR([Removing old public API symbols not allowed when using them as default public API symbols. Use --enable-unsupported to override this error.]) fi fi diff --git a/doxygen/dox/Overview.dox b/doxygen/dox/Overview.dox index 040769c859c..3d171f7e2b5 100644 --- a/doxygen/dox/Overview.dox +++ b/doxygen/dox/Overview.dox @@ -24,6 +24,7 @@ documents cover a mix of tasks, concepts, and reference, to help a specific Version-specific documentation (see the version in the title area) can be found here: - HDF5 develop branch (this site) + - HDF5 1.14.x - HDF5 1.12.x - HDF5 1.10.x - HDF5 1.8.x diff --git a/examples/testh5cc.sh.in b/examples/testh5cc.sh.in index a958b663883..ff8f83f89d2 100644 --- a/examples/testh5cc.sh.in +++ b/examples/testh5cc.sh.in @@ -51,6 +51,7 @@ H5_USE_18_API_DEFAULT=`grep '#define H5_USE_18_API_DEFAULT ' ../src/H5pubconf.h` H5_USE_110_API_DEFAULT=`grep '#define H5_USE_110_API_DEFAULT ' ../src/H5pubconf.h` H5_USE_112_API_DEFAULT=`grep '#define H5_USE_112_API_DEFAULT ' ../src/H5pubconf.h` H5_USE_114_API_DEFAULT=`grep '#define H5_USE_114_API_DEFAULT ' ../src/H5pubconf.h` +H5_USE_116_API_DEFAULT=`grep '#define H5_USE_116_API_DEFAULT ' ../src/H5pubconf.h` # setup my machine information. myos=`uname -s` @@ -415,6 +416,8 @@ elif [ -n "$H5_USE_112_API_DEFAULT" ]; then echo "H5_USE_112_API_DEFAULT is defined." elif [ -n "$H5_USE_114_API_DEFAULT" ]; then echo "H5_USE_114_API_DEFAULT is defined." +elif [ -n "$H5_USE_116_API_DEFAULT" ]; then + echo "H5_USE_116_API_DEFAULT is defined." else echo "No H5 API_DEFAULT is defined." fi @@ -447,15 +450,24 @@ elif [ -n "$H5_USE_112_API_DEFAULT" ]; then TOOLTEST -DH5_USE_18_API_DEFAULT $v18main TOOLTEST -DH5_USE_110_API_DEFAULT $v110main TOOLTEST $v112main -else +elif [ -n "$H5_USE_114_API_DEFAULT" ]; then echo "Testing HDF5 with 114_API_DEFAULT" TOOLTEST -DH5_USE_16_API_DEFAULT $v16main TOOLTEST -DH5_USE_18_API_DEFAULT $v18main TOOLTEST -DH5_USE_110_API_DEFAULT $v110main TOOLTEST -DH5_USE_112_API_DEFAULT $v112main + TOOLTEST $v114main +else + echo "Testing HDF5 with 116_API_DEFAULT" + TOOLTEST -DH5_USE_16_API_DEFAULT $v16main + TOOLTEST -DH5_USE_18_API_DEFAULT $v18main + TOOLTEST -DH5_USE_110_API_DEFAULT $v110main + TOOLTEST -DH5_USE_112_API_DEFAULT $v112main + TOOLTEST -DH5_USE_114_API_DEFAULT $v114main TOOLTEST $v18main TOOLTEST $v110main TOOLTEST $v112main + TOOLTEST $v114main fi ############################################################################## diff --git a/java/src/hdf/hdf5lib/H5.java b/java/src/hdf/hdf5lib/H5.java index e7b04dc84f8..7a3ad891278 100644 --- a/java/src/hdf/hdf5lib/H5.java +++ b/java/src/hdf/hdf5lib/H5.java @@ -228,7 +228,7 @@ * which prints out the HDF5 error stack, as described in the HDF5 C API @ref H5Eprint(). This * may be used by Java exception handlers to print out the HDF5 error stack.
* - * @version HDF5 1.13.4
+ * @version HDF5 1.15.0
* See also: * @ref HDFARRAY hdf.hdf5lib.HDFArray
* @ref HDF5CONST hdf.hdf5lib.HDF5Constants
@@ -270,7 +270,7 @@ public class H5 implements java.io.Serializable { * * Make sure to update the versions number when a different library is used. */ - public final static int LIB_VERSION[] = {1, 13, 3}; + public final static int LIB_VERSION[] = {1, 15, 0}; /** * @ingroup JH5 diff --git a/java/src/hdf/hdf5lib/HDF5Constants.java b/java/src/hdf/hdf5lib/HDF5Constants.java index 57f88eba83d..25b65fa016a 100644 --- a/java/src/hdf/hdf5lib/HDF5Constants.java +++ b/java/src/hdf/hdf5lib/HDF5Constants.java @@ -573,6 +573,8 @@ public class HDF5Constants { /** */ public static final int H5F_LIBVER_V114 = H5F_LIBVER_V114(); /** */ + public static final int H5F_LIBVER_V116 = H5F_LIBVER_V116(); + /** */ public static final int H5F_LIBVER_NBOUNDS = H5F_LIBVER_NBOUNDS(); /** */ public static final int H5F_LIBVER_LATEST = H5F_LIBVER_LATEST(); @@ -2050,6 +2052,8 @@ public class HDF5Constants { private static native final int H5F_LIBVER_V114(); + private static native final int H5F_LIBVER_V116(); + private static native final int H5F_LIBVER_NBOUNDS(); private static native final int H5F_LIBVER_LATEST(); diff --git a/java/src/jni/h5Constants.c b/java/src/jni/h5Constants.c index 18a74b3d3d0..c69ba09fbf6 100644 --- a/java/src/jni/h5Constants.c +++ b/java/src/jni/h5Constants.c @@ -1310,6 +1310,11 @@ Java_hdf_hdf5lib_HDF5Constants_H5F_1LIBVER_1V114(JNIEnv *env, jclass cls) return H5F_LIBVER_V114; } JNIEXPORT jint JNICALL +Java_hdf_hdf5lib_HDF5Constants_H5F_1LIBVER_1V116(JNIEnv *env, jclass cls) +{ + return H5F_LIBVER_V116; +} +JNIEXPORT jint JNICALL Java_hdf_hdf5lib_HDF5Constants_H5F_1LIBVER_1NBOUNDS(JNIEnv *env, jclass cls) { return H5F_LIBVER_NBOUNDS; diff --git a/java/test/TestH5.java b/java/test/TestH5.java index bb9258289ac..9457ffdfcf1 100644 --- a/java/test/TestH5.java +++ b/java/test/TestH5.java @@ -313,7 +313,7 @@ public void testH5set_free_list_limits() @Test public void testH5get_libversion() { - int libversion[] = {1, 13, 3}; + int libversion[] = {1, 15, 0}; try { H5.H5get_libversion(libversion); @@ -354,7 +354,7 @@ public void testH5get_libversion_null_param() @Test public void testH5check_version() { - int majnum = 1, minnum = 13, relnum = 4; + int majnum = 1, minnum = 15, relnum = 0; try { H5.H5check_version(majnum, minnum, relnum); diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 5dea822c7fb..fe063011ee2 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -1,4 +1,4 @@ -HDF5 version 1.13.4-1 currently under development +HDF5 version 1.15.0 currently under development ================================================================================ diff --git a/src/H5.c b/src/H5.c index 89330fe98d9..259e2409588 100644 --- a/src/H5.c +++ b/src/H5.c @@ -70,8 +70,8 @@ static int H5__mpi_delete_cb(MPI_Comm comm, int keyval, void *attr_val, int *fla /*****************************/ /* Library incompatible release versions, develop releases are incompatible by design */ -const unsigned VERS_RELEASE_EXCEPTIONS[] = {0, 1, 2, 3, 4}; -const unsigned VERS_RELEASE_EXCEPTIONS_SIZE = 5; +const unsigned VERS_RELEASE_EXCEPTIONS[] = {0}; +const unsigned VERS_RELEASE_EXCEPTIONS_SIZE = 1; /* statically initialize block for pthread_once call used in initializing */ /* the first global mutex */ diff --git a/src/H5Aint.c b/src/H5Aint.c index 8662b5fd0a0..0b504be6b70 100644 --- a/src/H5Aint.c +++ b/src/H5Aint.c @@ -104,6 +104,7 @@ const unsigned H5O_attr_ver_bounds[] = { H5O_ATTR_VERSION_3, /* H5F_LIBVER_V18 */ H5O_ATTR_VERSION_3, /* H5F_LIBVER_V110 */ H5O_ATTR_VERSION_3, /* H5F_LIBVER_V112 */ + H5O_ATTR_VERSION_3, /* H5F_LIBVER_V114 */ H5O_ATTR_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; diff --git a/src/H5Dlayout.c b/src/H5Dlayout.c index 8a10a37c0ad..26bdc55efe7 100644 --- a/src/H5Dlayout.c +++ b/src/H5Dlayout.c @@ -42,11 +42,13 @@ /* Format version bounds for layout */ const unsigned H5O_layout_ver_bounds[] = { - H5O_LAYOUT_VERSION_1, /* H5F_LIBVER_EARLIEST */ - H5O_LAYOUT_VERSION_3, /* H5F_LIBVER_V18 */ /* H5O_LAYOUT_VERSION_DEFAULT */ - H5O_LAYOUT_VERSION_4, /* H5F_LIBVER_V110 */ - H5O_LAYOUT_VERSION_4, /* H5F_LIBVER_V112 */ - H5O_LAYOUT_VERSION_LATEST /* H5F_LIBVER_LATEST */ + H5O_LAYOUT_VERSION_1, /* H5F_LIBVER_EARLIEST */ + H5O_LAYOUT_VERSION_3, + /* H5F_LIBVER_V18 */ /* H5O_LAYOUT_VERSION_DEFAULT */ + H5O_LAYOUT_VERSION_4, /* H5F_LIBVER_V110 */ + H5O_LAYOUT_VERSION_4, /* H5F_LIBVER_V112 */ + H5O_LAYOUT_VERSION_4, /* H5F_LIBVER_V114 */ + H5O_LAYOUT_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; /*****************************/ diff --git a/src/H5Fpublic.h b/src/H5Fpublic.h index 7d2a5da7fbc..9bee5c4c848 100644 --- a/src/H5Fpublic.h +++ b/src/H5Fpublic.h @@ -189,10 +189,11 @@ typedef enum H5F_libver_t { H5F_LIBVER_V110 = 2, /**< Use the latest v110 format for storing objects */ H5F_LIBVER_V112 = 3, /**< Use the latest v112 format for storing objects */ H5F_LIBVER_V114 = 4, /**< Use the latest v114 format for storing objects */ + H5F_LIBVER_V116 = 5, /**< Use the latest v116 format for storing objects */ H5F_LIBVER_NBOUNDS /**< Sentinel */ } H5F_libver_t; -#define H5F_LIBVER_LATEST H5F_LIBVER_V114 +#define H5F_LIBVER_LATEST H5F_LIBVER_V116 /** * File space handling strategy diff --git a/src/H5Fsuper.c b/src/H5Fsuper.c index cf18fb0e5fb..d590119574c 100644 --- a/src/H5Fsuper.c +++ b/src/H5Fsuper.c @@ -70,6 +70,7 @@ static const unsigned HDF5_superblock_ver_bounds[] = { HDF5_SUPERBLOCK_VERSION_2, /* H5F_LIBVER_V18 */ HDF5_SUPERBLOCK_VERSION_3, /* H5F_LIBVER_V110 */ HDF5_SUPERBLOCK_VERSION_3, /* H5F_LIBVER_V112 */ + HDF5_SUPERBLOCK_VERSION_3, /* H5F_LIBVER_V114 */ HDF5_SUPERBLOCK_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; diff --git a/src/H5Ofill.c b/src/H5Ofill.c index 094edacad3e..45877d25bd3 100644 --- a/src/H5Ofill.c +++ b/src/H5Ofill.c @@ -155,6 +155,7 @@ const unsigned H5O_fill_ver_bounds[] = { H5O_FILL_VERSION_3, /* H5F_LIBVER_V18 */ H5O_FILL_VERSION_3, /* H5F_LIBVER_V110 */ H5O_FILL_VERSION_3, /* H5F_LIBVER_V112 */ + H5O_FILL_VERSION_3, /* H5F_LIBVER_V114 */ H5O_FILL_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; diff --git a/src/H5Ofsinfo.c b/src/H5Ofsinfo.c index 4d5934d08da..b3766060e54 100644 --- a/src/H5Ofsinfo.c +++ b/src/H5Ofsinfo.c @@ -69,6 +69,7 @@ static const unsigned H5O_fsinfo_ver_bounds[] = { H5O_INVALID_VERSION, /* H5F_LIBVER_V18 */ H5O_FSINFO_VERSION_1, /* H5F_LIBVER_V110 */ H5O_FSINFO_VERSION_1, /* H5F_LIBVER_V112 */ + H5O_FSINFO_VERSION_1, /* H5F_LIBVER_V114 */ H5O_FSINFO_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; #define N_FSINFO_VERSION_BOUNDS H5F_LIBVER_NBOUNDS diff --git a/src/H5Oint.c b/src/H5Oint.c index 0499cd9e508..cdcf6c82157 100644 --- a/src/H5Oint.c +++ b/src/H5Oint.c @@ -125,6 +125,7 @@ const unsigned H5O_obj_ver_bounds[] = { H5O_VERSION_2, /* H5F_LIBVER_V18 */ H5O_VERSION_2, /* H5F_LIBVER_V110 */ H5O_VERSION_2, /* H5F_LIBVER_V112 */ + H5O_VERSION_2, /* H5F_LIBVER_V114 */ H5O_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; diff --git a/src/H5Opline.c b/src/H5Opline.c index 58c729f3f3c..4ccd96cf45d 100644 --- a/src/H5Opline.c +++ b/src/H5Opline.c @@ -91,6 +91,7 @@ const unsigned H5O_pline_ver_bounds[] = { H5O_PLINE_VERSION_2, /* H5F_LIBVER_V18 */ H5O_PLINE_VERSION_2, /* H5F_LIBVER_V110 */ H5O_PLINE_VERSION_2, /* H5F_LIBVER_V112 */ + H5O_PLINE_VERSION_2, /* H5F_LIBVER_V114 */ H5O_PLINE_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; diff --git a/src/H5S.c b/src/H5S.c index 84f2f82a2ca..bb5028d482a 100644 --- a/src/H5S.c +++ b/src/H5S.c @@ -60,6 +60,7 @@ const unsigned H5O_sdspace_ver_bounds[] = { H5O_SDSPACE_VERSION_2, /* H5F_LIBVER_V18 */ H5O_SDSPACE_VERSION_2, /* H5F_LIBVER_V110 */ H5O_SDSPACE_VERSION_2, /* H5F_LIBVER_V112 */ + H5O_SDSPACE_VERSION_2, /* H5F_LIBVER_V114 */ H5O_SDSPACE_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; diff --git a/src/H5Shyper.c b/src/H5Shyper.c index 84cef8044a7..2399937d5e0 100644 --- a/src/H5Shyper.c +++ b/src/H5Shyper.c @@ -243,6 +243,7 @@ const unsigned H5O_sds_hyper_ver_bounds[] = { H5S_HYPER_VERSION_1, /* H5F_LIBVER_V18 */ H5S_HYPER_VERSION_2, /* H5F_LIBVER_V110 */ H5S_HYPER_VERSION_3, /* H5F_LIBVER_V112 */ + H5S_HYPER_VERSION_3, /* H5F_LIBVER_V114 */ H5S_HYPER_VERSION_3 /* H5F_LIBVER_LATEST */ }; diff --git a/src/H5Spoint.c b/src/H5Spoint.c index 884418152e4..b10b7da166b 100644 --- a/src/H5Spoint.c +++ b/src/H5Spoint.c @@ -128,6 +128,7 @@ const unsigned H5O_sds_point_ver_bounds[] = { H5S_POINT_VERSION_1, /* H5F_LIBVER_V18 */ H5S_POINT_VERSION_1, /* H5F_LIBVER_V110 */ H5S_POINT_VERSION_2, /* H5F_LIBVER_V112 */ + H5S_POINT_VERSION_2, /* H5F_LIBVER_V114 */ H5S_POINT_VERSION_2 /* H5F_LIBVER_LATEST */ }; diff --git a/src/H5T.c b/src/H5T.c index 7e852271e86..277fb06bc9a 100644 --- a/src/H5T.c +++ b/src/H5T.c @@ -551,6 +551,7 @@ const unsigned H5O_dtype_ver_bounds[] = { H5O_DTYPE_VERSION_3, /* H5F_LIBVER_V18 */ H5O_DTYPE_VERSION_3, /* H5F_LIBVER_V110 */ H5O_DTYPE_VERSION_4, /* H5F_LIBVER_V112 */ + H5O_DTYPE_VERSION_4, /* H5F_LIBVER_V114 */ H5O_DTYPE_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; diff --git a/src/H5public.h b/src/H5public.h index 345191cf9f0..5ed54d9fee2 100644 --- a/src/H5public.h +++ b/src/H5public.h @@ -79,19 +79,19 @@ /** * For minor interface/format changes */ -#define H5_VERS_MINOR 13 +#define H5_VERS_MINOR 15 /** * For tweaks, bug-fixes, or development */ -#define H5_VERS_RELEASE 4 +#define H5_VERS_RELEASE 0 /** * For pre-releases like \c snap0. Empty string for official releases. */ -#define H5_VERS_SUBRELEASE "1" +#define H5_VERS_SUBRELEASE "" /** * Full version string */ -#define H5_VERS_INFO "HDF5 library version: 1.13.4-1" +#define H5_VERS_INFO "HDF5 library version: 1.15.0" #define H5check() H5check_version(H5_VERS_MAJOR, H5_VERS_MINOR, H5_VERS_RELEASE) diff --git a/src/H5trace.c b/src/H5trace.c index 03eaf11f545..3be5b9181bf 100644 --- a/src/H5trace.c +++ b/src/H5trace.c @@ -1441,7 +1441,11 @@ H5_trace_args(H5RS_str_t *rs, const char *type, va_list ap) break; case H5F_LIBVER_V114: - HDcompile_assert(H5F_LIBVER_LATEST == H5F_LIBVER_V114); + H5RS_acat(rs, "H5F_LIBVER_V114"); + break; + + case H5F_LIBVER_V116: + HDcompile_assert(H5F_LIBVER_LATEST == H5F_LIBVER_V116); H5RS_acat(rs, "H5F_LIBVER_LATEST"); break; diff --git a/test/chunk_info.c b/test/chunk_info.c index 5651b26cb6c..6f3359e7caf 100644 --- a/test/chunk_info.c +++ b/test/chunk_info.c @@ -47,8 +47,13 @@ #endif /* Test file names, using H5F_libver_t as indices */ -const char *FILENAME[] = {"tchunk_info_earliest", "tchunk_info_v18", "tchunk_info_v110", - "tchunk_info_v112", "tchunk_info_v114", NULL}; +const char *FILENAME[] = {"tchunk_info_earliest", + "tchunk_info_v18", + "tchunk_info_v110", + "tchunk_info_v112", + "tchunk_info_v114", + "tchunk_info_v116", + NULL}; /* File to be used in test_failed_attempts */ #define FILTERMASK_FILE "tflt_msk" diff --git a/test/dtypes.c b/test/dtypes.c index cffc08fe681..2d0dc887dfa 100644 --- a/test/dtypes.c +++ b/test/dtypes.c @@ -8695,9 +8695,9 @@ test_versionbounds(void) H5T_t *dtypep = NULL; /* Pointer to internal structure of a datatype */ hsize_t arr_dim[] = {ARRAY_LEN}; /* Length of the array */ int low, high; /* Indices for iterating over versions */ - H5F_libver_t versions[] = {H5F_LIBVER_EARLIEST, H5F_LIBVER_V18, H5F_LIBVER_V110, H5F_LIBVER_V112, - H5F_LIBVER_V114}; - int versions_count = 5; /* Number of version bounds in the array */ + H5F_libver_t versions[] = {H5F_LIBVER_EARLIEST, H5F_LIBVER_V18, H5F_LIBVER_V110, + H5F_LIBVER_V112, H5F_LIBVER_V114, H5F_LIBVER_V114}; + int versions_count = 6; /* Number of version bounds in the array */ unsigned highest_version; /* Highest version in nested datatypes */ color_t enum_val; /* Enum type index */ herr_t ret = 0; /* Generic return value */ diff --git a/test/h5test.c b/test/h5test.c index b4bf102a43a..0138c6927ad 100644 --- a/test/h5test.c +++ b/test/h5test.c @@ -109,7 +109,8 @@ const char *LIBVER_NAMES[] = {"earliest", /* H5F_LIBVER_EARLIEST = 0 */ "v18", /* H5F_LIBVER_V18 = 1 */ "v110", /* H5F_LIBVER_V110 = 2 */ "v112", /* H5F_LIBVER_V112 = 3 */ - "latest", /* H5F_LIBVER_V114 = 4 */ + "v114", /* H5F_LIBVER_V114 = 4 */ + "latest", /* H5F_LIBVER_V116 = 5 */ NULL}; /* Previous error reporting function */ diff --git a/test/tfile.c b/test/tfile.c index 16e55c6794f..dd72da64d07 100644 --- a/test/tfile.c +++ b/test/tfile.c @@ -5915,6 +5915,7 @@ test_libver_bounds_super_create(hid_t fapl, hid_t fcpl, htri_t is_swmr, htri_t n case H5F_LIBVER_V110: case H5F_LIBVER_V112: case H5F_LIBVER_V114: + case H5F_LIBVER_V116: ok = (f->shared->sblock->super_vers == HDF5_SUPERBLOCK_VERSION_3); VERIFY(ok, TRUE, "HDF5_superblock_ver_bounds"); break; diff --git a/tools/src/h5repack/h5repack_main.c b/tools/src/h5repack/h5repack_main.c index 5ecb423ce98..9578f7abeaf 100644 --- a/tools/src/h5repack/h5repack_main.c +++ b/tools/src/h5repack/h5repack_main.c @@ -224,8 +224,9 @@ usage(const char *prog) PRINTVALSTREAM(rawoutstream, " 2: This is H5F_LIBVER_V110 in H5F_libver_t struct\n"); PRINTVALSTREAM(rawoutstream, " 3: This is H5F_LIBVER_V112 in H5F_libver_t struct\n"); PRINTVALSTREAM(rawoutstream, " 4: This is H5F_LIBVER_V114 in H5F_libver_t struct\n"); + PRINTVALSTREAM(rawoutstream, " 5: This is H5F_LIBVER_V116 in H5F_libver_t struct\n"); PRINTVALSTREAM(rawoutstream, - " (H5F_LIBVER_LATEST is aliased to H5F_LIBVER_V114 for this release\n"); + " (H5F_LIBVER_LATEST is aliased to H5F_LIBVER_V116 for this release\n"); PRINTVALSTREAM(rawoutstream, "\n"); PRINTVALSTREAM(rawoutstream, " FS_STRATEGY is a string indicating the file space strategy used:\n"); PRINTVALSTREAM(rawoutstream, " FSM_AGGR:\n"); diff --git a/tools/test/h5repack/testfiles/h5repack-help.txt b/tools/test/h5repack/testfiles/h5repack-help.txt index bff70afdb08..b291a6881dd 100644 --- a/tools/test/h5repack/testfiles/h5repack-help.txt +++ b/tools/test/h5repack/testfiles/h5repack-help.txt @@ -91,7 +91,8 @@ usage: h5repack [OPTIONS] file1 file2 2: This is H5F_LIBVER_V110 in H5F_libver_t struct 3: This is H5F_LIBVER_V112 in H5F_libver_t struct 4: This is H5F_LIBVER_V114 in H5F_libver_t struct - (H5F_LIBVER_LATEST is aliased to H5F_LIBVER_V114 for this release + 5: This is H5F_LIBVER_V116 in H5F_libver_t struct + (H5F_LIBVER_LATEST is aliased to H5F_LIBVER_V116 for this release FS_STRATEGY is a string indicating the file space strategy used: FSM_AGGR: diff --git a/tools/test/h5repack/testfiles/h5repack_layout.h5-plugin_version_test.ddl b/tools/test/h5repack/testfiles/h5repack_layout.h5-plugin_version_test.ddl index 6afabbcd03f..d7aa1c3b0c5 100644 --- a/tools/test/h5repack/testfiles/h5repack_layout.h5-plugin_version_test.ddl +++ b/tools/test/h5repack/testfiles/h5repack_layout.h5-plugin_version_test.ddl @@ -11,7 +11,7 @@ GROUP "/" { USER_DEFINED_FILTER { FILTER_ID 260 COMMENT dynlib4 - PARAMS { 9 1 13 4 } + PARAMS { 9 1 15 0 } } } FILLVALUE { @@ -33,7 +33,7 @@ GROUP "/" { USER_DEFINED_FILTER { FILTER_ID 260 COMMENT dynlib4 - PARAMS { 9 1 13 4 } + PARAMS { 9 1 15 0 } } } FILLVALUE { @@ -55,7 +55,7 @@ GROUP "/" { USER_DEFINED_FILTER { FILTER_ID 260 COMMENT dynlib4 - PARAMS { 9 1 13 4 } + PARAMS { 9 1 15 0 } } } FILLVALUE { @@ -77,7 +77,7 @@ GROUP "/" { USER_DEFINED_FILTER { FILTER_ID 260 COMMENT dynlib4 - PARAMS { 9 1 13 4 } + PARAMS { 9 1 15 0 } } } FILLVALUE { @@ -99,7 +99,7 @@ GROUP "/" { USER_DEFINED_FILTER { FILTER_ID 260 COMMENT dynlib4 - PARAMS { 9 1 13 4 } + PARAMS { 9 1 15 0 } } } FILLVALUE { @@ -121,7 +121,7 @@ GROUP "/" { USER_DEFINED_FILTER { FILTER_ID 260 COMMENT dynlib4 - PARAMS { 9 1 13 4 } + PARAMS { 9 1 15 0 } } } FILLVALUE { @@ -143,7 +143,7 @@ GROUP "/" { USER_DEFINED_FILTER { FILTER_ID 260 COMMENT dynlib4 - PARAMS { 9 1 13 4 } + PARAMS { 9 1 15 0 } } } FILLVALUE { From 6b8b32941a61f7130b25a3dd3d3872881befafb0 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Wed, 21 Dec 2022 10:46:05 -0800 Subject: [PATCH 022/231] Onion VFD header cleanup (#2337) * Partial work towards cleaning up the onion VFD code * Committing clang-format changes * A few minor tweaks to the onion VFD public header Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> --- src/H5FDonion.c | 10 +-- src/H5FDonion.h | 178 ++++++++++++++++++----------------------- src/H5FDonion_header.h | 9 +-- 3 files changed, 85 insertions(+), 112 deletions(-) diff --git a/src/H5FDonion.c b/src/H5FDonion.c index 904f2cc7808..1c81870694d 100644 --- a/src/H5FDonion.c +++ b/src/H5FDonion.c @@ -577,8 +577,6 @@ H5FD__onion_close(H5FD_t *_file) HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "Can't write updated header to backing store") } } - else if (H5FD_ONION_STORE_TARGET_H5 == file->fa.store_target) - HGOTO_ERROR(H5E_VFL, H5E_UNSUPPORTED, FAIL, "hdf5 store-target not supported") else HGOTO_ERROR(H5E_VFL, H5E_BADVALUE, FAIL, "invalid history target") @@ -704,8 +702,6 @@ H5FD__onion_create_truncate_onion(H5FD_onion_t *file, const char *filename, cons rec = &file->curr_rev_record; hdr->flags = H5FD_ONION_HEADER_FLAG_WRITE_LOCK; - if (H5FD_ONION_FAPL_INFO_CREATE_FLAG_ENABLE_DIVERGENT_HISTORY & file->fa.creation_flags) - hdr->flags |= H5FD_ONION_HEADER_FLAG_DIVERGENT_HISTORY; if (H5FD_ONION_FAPL_INFO_CREATE_FLAG_ENABLE_PAGE_ALIGNMENT & file->fa.creation_flags) hdr->flags |= H5FD_ONION_HEADER_FLAG_PAGE_ALIGNMENT; @@ -955,9 +951,7 @@ H5FD__onion_open(const char *filename, unsigned flags, hid_t fapl_id, haddr_t ma } /* Check for unsupported target values */ - if (H5FD_ONION_STORE_TARGET_H5 == fa->store_target) - HGOTO_ERROR(H5E_ARGS, H5E_UNSUPPORTED, NULL, "same-file storage not implemented") - else if (H5FD_ONION_STORE_TARGET_ONION != fa->store_target) + if (H5FD_ONION_STORE_TARGET_ONION != fa->store_target) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "invalid store target") /* Allocate space for the file struct */ @@ -1056,8 +1050,6 @@ H5FD__onion_open(const char *filename, unsigned flags, hid_t fapl_id, haddr_t ma new_open = true; - if (H5FD_ONION_FAPL_INFO_CREATE_FLAG_ENABLE_DIVERGENT_HISTORY & file->fa.creation_flags) - hdr->flags |= H5FD_ONION_HEADER_FLAG_DIVERGENT_HISTORY; if (H5FD_ONION_FAPL_INFO_CREATE_FLAG_ENABLE_PAGE_ALIGNMENT & file->fa.creation_flags) { hdr->flags |= H5FD_ONION_HEADER_FLAG_PAGE_ALIGNMENT; file->align_history_on_pages = TRUE; diff --git a/src/H5FDonion.h b/src/H5FDonion.h index 0e605d0fd80..09b290e2a9e 100644 --- a/src/H5FDonion.h +++ b/src/H5FDonion.h @@ -21,117 +21,93 @@ #define H5FD_ONION (H5FDperform_init(H5FD_onion_init)) #define H5FD_ONION_VALUE H5_VFD_ONION -/* Current version of the fapl info struct */ +/** + * Current version of the onion VFD fapl info struct. + */ #define H5FD_ONION_FAPL_INFO_VERSION_CURR 1 -/* Flag to open a file that has a locked header (after crashes, for example) */ -#define H5FD_ONION_FAPL_INFO_FLAG_FORCE_OPEN 1 - -/* Flag to enable opening older revisions in write mode, creating a tree */ -#define H5FD_ONION_FAPL_INFO_CREATE_FLAG_ENABLE_DIVERGENT_HISTORY 0x1 - -/* Flag to require page alignment of onion revision data */ -#define H5FD_ONION_FAPL_INFO_CREATE_FLAG_ENABLE_PAGE_ALIGNMENT 0x2 +#define H5FD_ONION_FAPL_INFO_CREATE_FLAG_ENABLE_PAGE_ALIGNMENT \ + (0x0001u) /**< \ + * Onion history metadata will align to page_size. \ + * Partial pages of unused space will occur in the file, \ + * but may improve read performance from the backing store \ + * on some systems. \ + * If disabled (0), padding will not be inserted to align \ + * to page boundaries. \ + */ -/* Max length of a comment - * The buffer is defined to be this size + 1 to handle the NUL +/** + * Max length of a comment. + * The buffer is defined to be this size + 1 to handle the NUL. */ #define H5FD_ONION_FAPL_INFO_COMMENT_MAX_LEN 255 -/* Indicates that you want the latest revision */ +/** + * Indicates that you want the latest revision. + */ #define H5FD_ONION_FAPL_INFO_REVISION_ID_LATEST UINT64_MAX +/** + * Indicates how the new onion data will be stored. + */ typedef enum H5FD_onion_target_file_constant_t { - H5FD_ONION_STORE_TARGET_H5, /* Onion history as part of HDF5 file */ - H5FD_ONION_STORE_TARGET_ONION, /* Separate, single "onion" file */ + H5FD_ONION_STORE_TARGET_ONION, /**< + * Onion history is stored in a single, separate "onion + * file". Shares filename and path as hdf5 file (if any), + * with only a different filename extension. + */ } H5FD_onion_target_file_constant_t; -/*----------------------------------------------------------------------------- - * Structure H5FD_onion_fapl_info_t - * - * Purpose: Encapsulate info for the Onion driver FAPL entry. - * - * version: Future-proofing identifier. Informs struct membership. - * Must equal H5FD_ONION_FAPL_VERSION_CURR to be considered valid. - * - * backing_fapl_id: - * Backing or 'child' FAPL ID to handle I/O with the - * underlying backing store. If the onion data is stored as a - * separate file, it must use the same backing driver as the - * original file. - * - * page_size: Size of the amended data pages. If opening an existing file, - * must equal the existing page size or zero. If creating a new - * file or an initial revision of an existing file, must be a - * power of 2. - * - * store_target: - * Enumerated/defined value identifying where the history data is - * stored, either in the same file (appended to HDF5 data) or a - * separate file. Other options may be added in later versions. - * - * + H5FD_ONION_FAPL_STORE_MODE_SEPARATE_SINGLE (1) - * Onion history is stored in a single, separate "onion - * file". Shares filename and path as hdf5 file (if any), - * with only a different filename extension. - * - * revision_num: Which revision to open. Must be 0 (the original file) or the - * revision number of an existing revision. - * Revision ID -1 is reserved to open the most recently-created - * revision in history. - * - * force_write_open: - * Flag to ignore the write-lock flag in the onion data - * and attempt to open the file write-only anyway. - * This may be relevant if, for example, the library crashed - * while the file was open in write mode and the write-lock - * flag was not cleared. - * Must equal H5FD_ONION_FAPL_FLAG_FORCE_OPEN to enable. - * - * creation_flags: - * Flag used only when instantiating an Onion file. - * If the relevant bit is set to a nonzero value, its feature - * will be enabled. - * - * + H5FD_ONION_FAPL_CREATE_FLAG_ENABLE_DIVERGENT_HISTORY - * (1, bit 1) - * User will be allowed to open arbitrary revisions - * in write mode. - * If disabled (0), only the most recent revision may be - * opened for amendment. - * - * + H5FD_ONION_FAPL_CREATE_FLAG_ENABLE_PAGE_ALIGNMENT (2, bit 2) - * Onion history metadata will align to page_size. - * Partial pages of unused space will occur in the file, - * but may improve read performance from the backing store - * on some systems. - * If disabled (0), padding will not be inserted to align - * to page boundaries. - * - * + - * - * comment: User-supplied NULL-terminated comment for a revision to be - * written. - * Cannot be longer than H5FD_ONION_FAPL_COMMENT_MAX_LEN. - * Ignored if part of a FAPL used to open in read mode. - * - * The comment for a revision may be modified prior to committing - * to the revision (closing the file and writing the record) - * with a call to H5FDfctl(). - * This H5FDfctl overwrite may be used to exceed constraints of - * maximum string length and the NULL-terminator requirement. - * - *----------------------------------------------------------------------------- +/** + * Stores fapl information for creating onion VFD files. */ typedef struct H5FD_onion_fapl_info_t { - uint8_t version; - hid_t backing_fapl_id; - uint32_t page_size; - H5FD_onion_target_file_constant_t store_target; - uint64_t revision_num; - uint8_t force_write_open; - uint8_t creation_flags; - char comment[H5FD_ONION_FAPL_INFO_COMMENT_MAX_LEN + 1]; + uint8_t version; /**< + * Future-proofing identifier. Informs struct membership. + * Must equal H5FD_ONION_FAPL_VERSION_CURR to be considered valid. + */ + hid_t backing_fapl_id; /**< + * Backing or 'child' FAPL ID to handle I/O with the + * underlying backing store. It must use the same backing driver as the + * original file. + */ + uint32_t page_size; /**< + * page_size: Size of the amended data pages. If opening an existing file, + * must equal the existing page size or zero. If creating a new + * file or an initial revision of an existing file, must be a + * power of 2. + * + */ + H5FD_onion_target_file_constant_t store_target; /**< + * Identifies where the history data is stored. + */ + uint64_t revision_num; /**< + * Which revision to open. Valid values are 0 (the original file) or the + * revision number of an existing revision. + * H5FD_ONION_FAPL_INFO_REVISION_ID_LATEST refers to the most + * recently-created revision in the history. + */ + uint8_t force_write_open; /**< + * Flag to ignore the write-lock flag in the onion data + * and attempt to open the file write-only anyway. + * This may be relevant if, for example, the library crashed + * while the file was open in write mode and the write-lock + * flag was not cleared. + * Must equal H5FD_ONION_FAPL_FLAG_FORCE_OPEN to enable. + * + */ + uint8_t creation_flags; /**< + * Flag used only when instantiating an onion file. + * If the relevant bit is set to a nonzero value, its feature + * will be enabled. + */ + char comment[H5FD_ONION_FAPL_INFO_COMMENT_MAX_LEN + + 1]; /**< + * User-supplied NULL-terminated comment for a revision to be + * written. + * Cannot be longer than H5FD_ONION_FAPL_COMMENT_MAX_LEN. + * Ignored if part of a FAPL used to open in read mode. + */ } H5FD_onion_fapl_info_t; #ifdef __cplusplus @@ -154,6 +130,8 @@ H5_DLL hid_t H5FD_onion_init(void); * \details H5Pget_fapl_onion() retrieves the structure H5FD_onion_fapl_info_t * from the file access property list that is set for the onion VFD * driver. + * + * \since 1.14.0 */ H5_DLL herr_t H5Pget_fapl_onion(hid_t fapl_id, H5FD_onion_fapl_info_t *fa_out); @@ -171,6 +149,8 @@ H5_DLL herr_t H5Pget_fapl_onion(hid_t fapl_id, H5FD_onion_fapl_info_t *fa_out); * \details H5Pset_fapl_onion() sets the structure H5FD_onion_fapl_info_t * for the file access property list that is set for the onion VFD * driver. + * + * \since 1.14.0 */ H5_DLL herr_t H5Pset_fapl_onion(hid_t fapl_id, const H5FD_onion_fapl_info_t *fa); @@ -190,6 +170,8 @@ H5_DLL herr_t H5Pset_fapl_onion(hid_t fapl_id, const H5FD_onion_fapl_info_t *fa) * for an onion file. It takes the file name and file access property * list that is set for the onion VFD driver. * + * + * \since 1.14.0 */ H5_DLL herr_t H5FDonion_get_revision_count(const char *filename, hid_t fapl_id, uint64_t *revision_count); diff --git a/src/H5FDonion_header.h b/src/H5FDonion_header.h index deb0d968c9e..f8dd739d13e 100644 --- a/src/H5FDonion_header.h +++ b/src/H5FDonion_header.h @@ -23,11 +23,10 @@ #define H5FD_ONION_ENCODED_SIZE_HEADER 40 /* Flags must align exactly one per bit, up to 24 bits */ -#define H5FD_ONION_HEADER_FLAG_WRITE_LOCK 0x1 -#define H5FD_ONION_HEADER_FLAG_DIVERGENT_HISTORY 0x2 -#define H5FD_ONION_HEADER_FLAG_PAGE_ALIGNMENT 0x4 -#define H5FD_ONION_HEADER_SIGNATURE "OHDH" -#define H5FD_ONION_HEADER_VERSION_CURR 1 +#define H5FD_ONION_HEADER_FLAG_WRITE_LOCK 0x1 +#define H5FD_ONION_HEADER_FLAG_PAGE_ALIGNMENT 0x2 +#define H5FD_ONION_HEADER_SIGNATURE "OHDH" +#define H5FD_ONION_HEADER_VERSION_CURR 1 /* In-memory representation of the on-store onion history file header. */ From bcbf750315b048a8a8123fbdbc9f687a7d5e1e7b Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Wed, 21 Dec 2022 13:44:50 -0800 Subject: [PATCH 023/231] Adds Doxygen VOL flag markup (#2340) * Initial doxygen markup * Committing clang-format changes * Adds Doxygen comments to VOL flags * Committing clang-format changes Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> --- src/H5VLpublic.h | 118 +++++++++++++++++++++++++---------------------- 1 file changed, 63 insertions(+), 55 deletions(-) diff --git a/src/H5VLpublic.h b/src/H5VLpublic.h index c4bc5a74559..2683e0ac9c8 100644 --- a/src/H5VLpublic.h +++ b/src/H5VLpublic.h @@ -60,64 +60,72 @@ */ #define H5_VOL_MAX 65535 -/* Capability flags for connector */ -#define H5VL_CAP_FLAG_NONE 0x0000000000000000 /* No special connector capabilities */ -#define H5VL_CAP_FLAG_THREADSAFE 0x0000000000000001 /* Connector is threadsafe */ -#define H5VL_CAP_FLAG_ASYNC 0x0000000000000002 /* Connector performs operations asynchronously*/ -#define H5VL_CAP_FLAG_NATIVE_FILES 0x0000000000000004 /* Connector produces native file format */ -#define H5VL_CAP_FLAG_ATTR_BASIC 0x0000000000000008 -#define H5VL_CAP_FLAG_ATTR_MORE 0x0000000000000010 -#define H5VL_CAP_FLAG_DATASET_BASIC 0x0000000000000020 -#define H5VL_CAP_FLAG_DATASET_MORE 0x0000000000000040 -#define H5VL_CAP_FLAG_FILE_BASIC 0x0000000000000080 -#define H5VL_CAP_FLAG_FILE_MORE 0x0000000000000100 -#define H5VL_CAP_FLAG_GROUP_BASIC 0x0000000000000200 -#define H5VL_CAP_FLAG_GROUP_MORE 0x0000000000000400 -#define H5VL_CAP_FLAG_LINK_BASIC 0x0000000000000800 -#define H5VL_CAP_FLAG_LINK_MORE 0x0000000000001000 -#define H5VL_CAP_FLAG_MAP_BASIC 0x0000000000002000 -#define H5VL_CAP_FLAG_MAP_MORE 0x0000000000004000 -#define H5VL_CAP_FLAG_OBJECT_BASIC 0x0000000000008000 -#define H5VL_CAP_FLAG_OBJECT_MORE 0x0000000000010000 -#define H5VL_CAP_FLAG_REF_BASIC 0x0000000000020000 -#define H5VL_CAP_FLAG_REF_MORE 0x0000000000040000 -#define H5VL_CAP_FLAG_OBJ_REF 0x0000000000080000 -#define H5VL_CAP_FLAG_REG_REF 0x0000000000100000 -#define H5VL_CAP_FLAG_ATTR_REF 0x0000000000200000 -#define H5VL_CAP_FLAG_STORED_DATATYPES 0x0000000000400000 -#define H5VL_CAP_FLAG_CREATION_ORDER 0x0000000000800000 -#define H5VL_CAP_FLAG_ITERATE 0x0000000001000000 -#define H5VL_CAP_FLAG_STORAGE_SIZE 0x0000000002000000 -#define H5VL_CAP_FLAG_BY_IDX 0x0000000004000000 -#define H5VL_CAP_FLAG_GET_PLIST 0x0000000008000000 -#define H5VL_CAP_FLAG_FLUSH_REFRESH 0x0000000010000000 -#define H5VL_CAP_FLAG_EXTERNAL_LINKS 0x0000000020000000 -#define H5VL_CAP_FLAG_HARD_LINKS 0x0000000040000000 -#define H5VL_CAP_FLAG_SOFT_LINKS 0x0000000080000000 -#define H5VL_CAP_FLAG_UD_LINKS 0x0000000100000000 -#define H5VL_CAP_FLAG_TRACK_TIMES 0x0000000200000000 -#define H5VL_CAP_FLAG_MOUNT 0x0000000400000000 -#define H5VL_CAP_FLAG_FILTERS 0x0000000800000000 -#define H5VL_CAP_FLAG_FILL_VALUES 0x0000001000000000 +/* + * Capability flags for VOL connectors + */ +#define H5VL_CAP_FLAG_NONE 0x0000000000000000 /**< No special connector capabilities */ +#define H5VL_CAP_FLAG_THREADSAFE 0x0000000000000001 /**< Connector is threadsafe */ +#define H5VL_CAP_FLAG_ASYNC 0x0000000000000002 /**< Connector performs operations asynchronously*/ +#define H5VL_CAP_FLAG_NATIVE_FILES 0x0000000000000004 /**< Connector produces native file format */ +#define H5VL_CAP_FLAG_ATTR_BASIC 0x0000000000000008 /**< H5A create/delete/exists/open/close/read/write */ +#define H5VL_CAP_FLAG_ATTR_MORE 0x0000000000000010 /**< All other H5A API calls */ +#define H5VL_CAP_FLAG_DATASET_BASIC 0x0000000000000020 /**< H5D create/open/close/read/write */ +#define H5VL_CAP_FLAG_DATASET_MORE 0x0000000000000040 /**< All other H5D API calls */ +#define H5VL_CAP_FLAG_FILE_BASIC 0x0000000000000080 /**< H5F create/open/close/read/write */ +#define H5VL_CAP_FLAG_FILE_MORE 0x0000000000000100 /**< All other H5F API calls */ +#define H5VL_CAP_FLAG_GROUP_BASIC 0x0000000000000200 /**< H5G create/open/close */ +#define H5VL_CAP_FLAG_GROUP_MORE 0x0000000000000400 /**< All other H5G API calls*/ +#define H5VL_CAP_FLAG_LINK_BASIC 0x0000000000000800 /**< H5L exists/delete */ +#define H5VL_CAP_FLAG_LINK_MORE 0x0000000000001000 /**< All other H5L API calls */ +#define H5VL_CAP_FLAG_MAP_BASIC \ + 0x0000000000002000 /**< H5M create/open/close/get*type/get_count/put/get/exists/delete */ +#define H5VL_CAP_FLAG_MAP_MORE 0x0000000000004000 /**< All other H5M API calls */ +#define H5VL_CAP_FLAG_OBJECT_BASIC 0x0000000000008000 /**< H5O open/close/exists */ +#define H5VL_CAP_FLAG_OBJECT_MORE 0x0000000000010000 /**< All other H5O API calls */ +#define H5VL_CAP_FLAG_REF_BASIC 0x0000000000020000 /**< H5Rdestroy */ +#define H5VL_CAP_FLAG_REF_MORE 0x0000000000040000 /**< All other H5R API calls */ +#define H5VL_CAP_FLAG_OBJ_REF 0x0000000000080000 /**< Connector supports object references */ +#define H5VL_CAP_FLAG_REG_REF 0x0000000000100000 /**< Connector supports regional references */ +#define H5VL_CAP_FLAG_ATTR_REF 0x0000000000200000 /**< Connector supports attribute references */ +#define H5VL_CAP_FLAG_STORED_DATATYPES 0x0000000000400000 /**< Connector supports stored datatypes */ +#define H5VL_CAP_FLAG_CREATION_ORDER 0x0000000000800000 /**< Connector tracks creation order */ +#define H5VL_CAP_FLAG_ITERATE 0x0000000001000000 /**< Connector supports iteration functions */ +#define H5VL_CAP_FLAG_STORAGE_SIZE 0x0000000002000000 /**< Connector can return a meaningful storage size */ +#define H5VL_CAP_FLAG_BY_IDX 0x0000000004000000 /**< "by index" API calls are supported */ +#define H5VL_CAP_FLAG_GET_PLIST \ + 0x0000000008000000 /**< Connector can return the property lists used to create an object */ +#define H5VL_CAP_FLAG_FLUSH_REFRESH 0x0000000010000000 /**< flush/refresh calls are supported */ +#define H5VL_CAP_FLAG_EXTERNAL_LINKS 0x0000000020000000 /**< External links are supported */ +#define H5VL_CAP_FLAG_HARD_LINKS 0x0000000040000000 /**< Hard links are supported */ +#define H5VL_CAP_FLAG_SOFT_LINKS 0x0000000080000000 /**< Soft links are supported */ +#define H5VL_CAP_FLAG_UD_LINKS 0x0000000100000000 /**< User-defined links are supported */ +#define H5VL_CAP_FLAG_TRACK_TIMES 0x0000000200000000 /**< Connector tracks creation, etc. times */ +#define H5VL_CAP_FLAG_MOUNT 0x0000000400000000 /**< H5Fmount/unmount supported */ +#define H5VL_CAP_FLAG_FILTERS 0x0000000800000000 /**< Connector implements a filter pipeline */ +#define H5VL_CAP_FLAG_FILL_VALUES 0x0000001000000000 /**< Connector allows fill values to be set */ -/* Flags to return from H5VLquery_optional API and 'opt_query' callbacks */ -/* Note: Operations which access multiple objects' data or metadata in a - * container should be registered as file-level optional operations. - * (e.g. "H5Dwrite_multi" takes a list of datasets to write data to, so - * a VOL connector that implemented it should register it as an optional - * file operation, and pass-through VOL connectors that are stacked above - * the connector that registered it should assume that dataset elements - * for _any_ dataset in the file could be written to) +/** + * \ingroup H5VLDEF + * + * Flags to return from H5VLquery_optional API and 'opt_query' callbacks + * + * \details Operations which access multiple objects' data or metadata in a + * container should be registered as file-level optional operations. + * (e.g. "H5Dwrite_multi" takes a list of datasets to write data to, so + * a VOL connector that implemented it should register it as an optional + * file operation, and pass-through VOL connectors that are stacked above + * the connector that registered it should assume that dataset elements + * for _any_ dataset in the file could be written to) */ -#define H5VL_OPT_QUERY_SUPPORTED 0x0001 /* VOL connector supports this operation */ -#define H5VL_OPT_QUERY_READ_DATA 0x0002 /* Operation reads data for object */ -#define H5VL_OPT_QUERY_WRITE_DATA 0x0004 /* Operation writes data for object */ -#define H5VL_OPT_QUERY_QUERY_METADATA 0x0008 /* Operation reads metadata for object */ -#define H5VL_OPT_QUERY_MODIFY_METADATA 0x0010 /* Operation modifies metadata for object */ +#define H5VL_OPT_QUERY_SUPPORTED 0x0001 /**< VOL connector supports this operation */ +#define H5VL_OPT_QUERY_READ_DATA 0x0002 /**< Operation reads data for object */ +#define H5VL_OPT_QUERY_WRITE_DATA 0x0004 /**< Operation writes data for object */ +#define H5VL_OPT_QUERY_QUERY_METADATA 0x0008 /**< Operation reads metadata for object */ +#define H5VL_OPT_QUERY_MODIFY_METADATA 0x0010 /**< Operation modifies metadata for object */ #define H5VL_OPT_QUERY_COLLECTIVE \ - 0x0020 /* Operation is collective (operations without this flag are assumed to be independent) */ -#define H5VL_OPT_QUERY_NO_ASYNC 0x0040 /* Operation may NOT be executed asynchronously */ -#define H5VL_OPT_QUERY_MULTI_OBJ 0x0080 /* Operation involves multiple objects */ + 0x0020 /**< Operation is collective (operations without this flag are assumed to be independent) */ +#define H5VL_OPT_QUERY_NO_ASYNC 0x0040 /**< Operation may NOT be executed asynchronously */ +#define H5VL_OPT_QUERY_MULTI_OBJ 0x0080 /**< Operation involves multiple objects */ /*******************/ /* Public Typedefs */ From fd753bf673e0cd39d11157ece05b8eb1da4e93fc Mon Sep 17 00:00:00 2001 From: Larry Knox Date: Thu, 22 Dec 2022 13:38:51 -0600 Subject: [PATCH 024/231] Update testh5cc.sh.in for new major version 1.14. (#2349) * Update testh5cc.sh.in for new major version 1.14. * iFix sha256sum commands in release script. --- bin/release | 16 ++++++------- examples/testh5cc.sh.in | 52 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 59 insertions(+), 9 deletions(-) diff --git a/bin/release b/bin/release index 6beab5704a2..ca9a47ce3c3 100755 --- a/bin/release +++ b/bin/release @@ -52,7 +52,7 @@ for compressing the resulting tar archive (if none are given then information is available in the README_HPC file. doc -- produce the latest doc tree in addition to the archive. -An sha256 checksum is produced for each archive created and stored in the sha256 file. +A sha256 checksum is produced for each archive created and stored in the sha256 file. Examples: @@ -537,37 +537,37 @@ for comp in $methods; do case $comp in tar) cp -p $tmpdir/$HDF5_VERS.tar $DEST/$HDF5_VERS.tar - (cd $DEST; sha256 $HDF5_VERS.tar >> $SHA256) + (cd $DEST; sha256sum $HDF5_VERS.tar >> $SHA256) ;; gzip) test "$verbose" && echo " Running gzip..." 1>&2 gzip -9 <$tmpdir/$HDF5_VERS.tar >$DEST/$HDF5_VERS.tar.gz - (cd $DEST; sha256 $HDF5_VERS.tar.gz >> $SHA256) + (cd $DEST; sha256sum $HDF5_VERS.tar.gz >> $SHA256) ;; cmake-tgz) test "$verbose" && echo " Creating CMake tar.gz file..." 1>&2 tar2cmaketgz $HDF5_VERS $tmpdir/$HDF5_VERS.tar $DEST/CMake-$HDF5_VERS.tar.gz 1>&2 - (cd $DEST; sha256 CMake-$HDF5_VERS.tar.gz >> $SHA256) + (cd $DEST; sha256sum CMake-$HDF5_VERS.tar.gz >> $SHA256) ;; hpc-cmake-tgz) test "$verbose" && echo " Creating HPC-CMake tar.gz file..." 1>&2 tar2hpccmaketgz $HDF5_VERS $tmpdir/$HDF5_VERS.tar $DEST/HPC-CMake-$HDF5_VERS.tar.gz 1>&2 - (cd $DEST; sha256 HPC-CMake-$HDF5_VERS.tar.gz >> $SHA256) + (cd $DEST; sha256sum HPC-CMake-$HDF5_VERS.tar.gz >> $SHA256) ;; bzip2) test "$verbose" && echo " Running bzip2..." 1>&2 bzip2 -9 <$tmpdir/$HDF5_VERS.tar >$DEST/$HDF5_VERS.tar.bz2 - (cd $DEST; sha256 $HDF5_VERS.tar.bz2 >> $SHA256) + (cd $DEST; sha256sum $HDF5_VERS.tar.bz2 >> $SHA256) ;; zip) test "$verbose" && echo " Creating zip ball..." 1>&2 tar2zip $HDF5_VERS $tmpdir/$HDF5_VERS.tar $DEST/$HDF5_VERS.zip 1>&2 - (cd $DEST; sha256 $HDF5_VERS.zip >> $SHA256) + (cd $DEST; sha256sum $HDF5_VERS.zip >> $SHA256) ;; cmake-zip) test "$verbose" && echo " Creating CMake-zip ball..." 1>&2 tar2cmakezip $HDF5_VERS $tmpdir/$HDF5_VERS.tar $DEST/CMake-$HDF5_VERS.zip 1>&2 - (cd $DEST; sha256 CMake-$HDF5_VERS.zip >> $SHA256) + (cd $DEST; sha256sum CMake-$HDF5_VERS.zip >> $SHA256) ;; doc) if [ "${DOCVERSION}" = "" ]; then diff --git a/examples/testh5cc.sh.in b/examples/testh5cc.sh.in index ff8f83f89d2..3eb8949c4bd 100644 --- a/examples/testh5cc.sh.in +++ b/examples/testh5cc.sh.in @@ -69,6 +69,8 @@ v110main=${H5TOOL}_v110main.$suffix v110main_o=${H5TOOL}_v110main.o v112main=${H5TOOL}_v112main.$suffix v112main_o=${H5TOOL}_v112main.o +v114main=${H5TOOL}_v114main.$suffix +v114main_o=${H5TOOL}_v114main.o appmain=${H5TOOL}_appmain.$suffix appmain_o=${H5TOOL}_appmain.o prog1=${H5TOOL}_prog1.$suffix @@ -82,7 +84,7 @@ applib=libapp${H5TOOL}.a # Don't use the wildcard form of *.h5 as it will wipe out even *.h5 generated # by other test programs. This will cause a racing condition error when # parallel make (e.g., gmake -j 4) is used. -temp_SRC="$hdf5main $v16main $v18main $v110main $v112main $appmain $prog1 $prog2" +temp_SRC="$hdf5main $v16main $v18main $v110main $v112main $v114main $appmain $prog1 $prog2" temp_OBJ=`echo $temp_SRC | sed -e "s/\.${suffix}/.o/g"` temp_FILES="a.out $applib" @@ -286,6 +288,53 @@ main (void) } EOF +# Generate HDF5 v1.14 Main Program: +# This makes unique V1.14 API calls. +cat > $v114main < Date: Fri, 23 Dec 2022 16:05:31 -0800 Subject: [PATCH 025/231] HL GIF tools changes (#2360) * Fixes the broken Autotools option * Removes the "build HL tools" option --- config/cmake/libhdf5.settings.cmake.in | 2 +- configure.ac | 53 ++++++++++---------------- hl/CMakeLists.txt | 10 +---- hl/Makefile.am | 4 -- hl/tools/CMakeLists.txt | 16 ++++---- hl/tools/Makefile.am | 2 +- src/libhdf5.settings.in | 1 + 7 files changed, 32 insertions(+), 56 deletions(-) diff --git a/config/cmake/libhdf5.settings.cmake.in b/config/cmake/libhdf5.settings.cmake.in index 625b0c02601..f2cf6c073f7 100644 --- a/config/cmake/libhdf5.settings.cmake.in +++ b/config/cmake/libhdf5.settings.cmake.in @@ -72,7 +72,7 @@ Features: Dimension scales w/ new references: @DIMENSION_SCALES_WITH_NEW_REF@ Build HDF5 Tests: @BUILD_TESTING@ Build HDF5 Tools: @HDF5_BUILD_TOOLS@ - Build High-level HDF5 Tools: @HDF5_BUILD_HL_TOOLS@ + Build GIF Tools: @HDF5_BUILD_HL_GIF_TOOLS@ Threadsafety: @HDF5_ENABLE_THREADSAFE@ Default API mapping: @DEFAULT_API_VERSION@ With deprecated public symbols: @HDF5_ENABLE_DEPRECATED_SYMBOLS@ diff --git a/configure.ac b/configure.ac index 3d82c20db26..35db84b111c 100644 --- a/configure.ac +++ b/configure.ac @@ -874,37 +874,6 @@ else AC_MSG_RESULT([no]) fi -AC_MSG_CHECKING([if the high-level tools are enabled]) -AC_ARG_ENABLE([hltools], - [AS_HELP_STRING([--enable-hltools], - [Enable the high-level tools. - [default=yes] - ])], - [HDF5_HL_TOOLS=$enableval]) - -if test "X${HDF5_HL}" = "Xyes" -a "X-$HDF5_HL_TOOLS" = "X-yes"; then - AC_MSG_RESULT([yes]) - HL_TOOLS="tools" -else - AC_MSG_RESULT([no]) -fi - -AC_MSG_CHECKING([if the high-level GIF tools are enabled]) -AC_ARG_ENABLE([hltools], - [AS_HELP_STRING([--enable-hlgiftools], - [Enable the high-level GIF tools. - [default=no] - ])], - [HDF5_HL_GIF_TOOLS=$enableval]) - -if test "X${HDF5_GIF_HL}" = "Xyes" -a "X-$HDF5_HL_TOOLS" = "X-yes" -a "X-$HDF5_HL_GIF_TOOLS" = "X-yes"; then - AC_MSG_RESULT([yes]) - HL_GIF_TOOLS="tools" -else - AC_MSG_RESULT([no]) -fi - - ## ---------------------------------------------------------------------- ## Enable new references for dimension scales ## @@ -1195,6 +1164,27 @@ AC_ARG_ENABLE([tools], AC_MSG_RESULT([$HDF5_TOOLS]) +## ---------------------------------------------------------------------- +## Check if they would like to disable building the high-level GIF +## tools (they have unfixed CVE issues) +## + +AC_MSG_CHECKING([if the high-level GIF tools are enabled]) +AC_ARG_ENABLE([hlgiftools], + [AS_HELP_STRING([--enable-hlgiftools], + [Enable the high-level GIF tools. NOTE: These have unfixed CVE issues! + [default=no] + ])], + [HDF5_HL_GIF_TOOLS=$enableval]) + +if test "X-$HDF5_TOOLS" = "X-yes" -a "X-$HDF5_HL" = "X-yes" -a "X-$HDF5_HL_GIF_TOOLS" = "X-yes"; then + AC_MSG_RESULT([yes]) +else + AC_MSG_RESULT([no]) + HDF5_HL_GIF_TOOLS="no" +fi + + ## ---------------------------------------------------------------------- ## Check if they would like to enable building doxygen files ## @@ -3927,7 +3917,6 @@ AM_CONDITIONAL([BUILD_HDF5_HL_CONDITIONAL], [test "X$HDF5_HL" = "Xyes"]) AM_CONDITIONAL([BUILD_TESTS_CONDITIONAL], [test "X$HDF5_TESTS" = "Xyes"]) AM_CONDITIONAL([BUILD_TESTS_PARALLEL_CONDITIONAL], [test -n "$TESTPARALLEL"]) AM_CONDITIONAL([BUILD_TOOLS_CONDITIONAL], [test "X$HDF5_TOOLS" = "Xyes"]) -AM_CONDITIONAL([BUILD_TOOLS_HL_CONDITIONAL], [test "X$HDF5_HL_TOOLS" = "Xyes"]) AM_CONDITIONAL([BUILD_TOOLS_HL_GIF_CONDITIONAL], [test "X$HDF5_HL_GIF_TOOLS" = "Xyes"]) AM_CONDITIONAL([BUILD_DOXYGEN_CONDITIONAL], [test "X$HDF5_DOXYGEN" = "Xyes"]) diff --git a/hl/CMakeLists.txt b/hl/CMakeLists.txt index 6c5ad70e31b..a777b72f540 100644 --- a/hl/CMakeLists.txt +++ b/hl/CMakeLists.txt @@ -17,15 +17,7 @@ add_subdirectory (src) # Build HDF5 Tools if (HDF5_BUILD_TOOLS) - #----------------------------------------------------------------------------- - #-- Option to build the High level Tools - #----------------------------------------------------------------------------- - if (EXISTS "${HDF5_HL_SOURCE_DIR}/tools" AND IS_DIRECTORY "${HDF5_HL_SOURCE_DIR}/tools") - option (HDF5_BUILD_HL_TOOLS "Build HDF5 HL Tools" ON) - if (HDF5_BUILD_HL_TOOLS) - add_subdirectory (tools) - endif () - endif () + add_subdirectory (tools) endif () #-- Add High Level Examples diff --git a/hl/Makefile.am b/hl/Makefile.am index 3cf8778c3ea..5660b9bc5fe 100644 --- a/hl/Makefile.am +++ b/hl/Makefile.am @@ -36,14 +36,10 @@ else TEST_DIR = endif if BUILD_TOOLS_CONDITIONAL -if BUILD_TOOLS_HL_CONDITIONAL TOOLS_DIR = tools else TOOLS_DIR = endif -else - TOOLS_DIR = -endif ## Don't recurse into any subdirectories if HDF5 is not configured to ## use the HL library diff --git a/hl/tools/CMakeLists.txt b/hl/tools/CMakeLists.txt index 7df2b796a04..a369d9efa18 100644 --- a/hl/tools/CMakeLists.txt +++ b/hl/tools/CMakeLists.txt @@ -1,14 +1,12 @@ cmake_minimum_required (VERSION 3.18) project (HDF5_HL_TOOLS C) - #----------------------------------------------------------------------------- - #-- Option to build the High level GIF Tools - #----------------------------------------------------------------------------- - if (EXISTS "${HDF5_HL_SOURCE_DIR}/gif2h5" AND IS_DIRECTORY "${HDF5_HL_SOURCE_DIR}/gif2h5") - option (HDF5_BUILD_HL_GIF_TOOLS "Build HDF5 HL GIF Tools" OFF) - if (HDF5_BUILD_HL_GIF_TOOLS) - add_subdirectory (gif2h5) - endif () - endif () +#----------------------------------------------------------------------------- +#-- Option to build the High level GIF Tools +#----------------------------------------------------------------------------- +option (HDF5_BUILD_HL_GIF_TOOLS "Build HDF5 HL GIF Tools" OFF) +if (HDF5_BUILD_HL_GIF_TOOLS) + add_subdirectory (gif2h5) +endif () add_subdirectory (h5watch) diff --git a/hl/tools/Makefile.am b/hl/tools/Makefile.am index f3fe0d352ae..9f93f676dce 100644 --- a/hl/tools/Makefile.am +++ b/hl/tools/Makefile.am @@ -25,6 +25,6 @@ else endif # All subdirectories -SUBDIRS=$(TOOLS_DIR) h5watch +SUBDIRS=h5watch $(TOOLS_GIF_DIR) include $(top_srcdir)/config/conclude.am diff --git a/src/libhdf5.settings.in b/src/libhdf5.settings.in index 9e95bd393e8..32c82b93147 100644 --- a/src/libhdf5.settings.in +++ b/src/libhdf5.settings.in @@ -75,6 +75,7 @@ Features: Dimension scales w/ new references: @DIMENSION_SCALES_WITH_NEW_REF@ Build HDF5 Tests: @HDF5_TESTS@ Build HDF5 Tools: @HDF5_TOOLS@ + Build GIF Tools: @HDF5_HL_GIF_TOOLS@ Threadsafety: @THREADSAFE@ Default API mapping: @DEFAULT_API_VERSION@ With deprecated public symbols: @DEPRECATED_SYMBOLS@ From 2e5eabf4c529bce519bdde2ae0b24c4c5baa395c Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Sat, 24 Dec 2022 11:36:38 -0800 Subject: [PATCH 026/231] Fix for Autotools --disable-deprecated-symbols (#2363) * Fix for Autotools --disable-deprecated-symbols When we added v116 as a valid option, we left the default as v114 so using --disable-deprecated-symbols leads configure to complain that you can't set a default API that was deprecated. The GitHub action didn't catch this because it explicitly specifies v116 This only affects develop w/ the Autotools when --disable-deprecated-symbols is requested. * Added v116 & default to the --with-default-api-version help string The Autotools --with-default-api-version help string was missing v116. This has been added, as well as a "default" target so the CI can catch version problems when we forget to update the defaults upon creating a new major version. The GitHub CI also now uses "default" as the target for the deprecated symbols build. * Fixes the 1.14 API GitHub CI threadsafe/build_mode This action still had the bug we fixed where we set the build mode and threadsafety using an array of size one instead of an object. --- .github/workflows/main.yml | 13 ++++++------- configure.ac | 10 ++++++---- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 8abe0fd44a3..77c53578b28 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -261,13 +261,12 @@ jobs: generator: "autogen" flags: "" run_tests: false - thread_safety: - - enabled: false - text: "" + enabled: false + text: "" build_mode: - - text: "DBG" - cmake: "Debug" - autotools: "debug" + text: " DBG" + cmake: "Debug" + autotools: "debug" - name: "Ubuntu gcc Autotools no deprecated symbols (build only)" os: ubuntu-latest @@ -278,7 +277,7 @@ jobs: mirror_vfd: enable direct_vfd: enable deprec_sym: disable - default_api: v116 + default_api: default toolchain: "" generator: "autogen" flags: "" diff --git a/configure.ac b/configure.ac index 35db84b111c..b34355c9714 100644 --- a/configure.ac +++ b/configure.ac @@ -3749,11 +3749,13 @@ esac AC_SUBST([DEFAULT_API_VERSION]) AC_MSG_CHECKING([which version of public symbols to use by default]) AC_ARG_WITH([default-api-version], - [AS_HELP_STRING([--with-default-api-version=(v16|v18|v110|v112|v114)], + [AS_HELP_STRING([--with-default-api-version=(default|v16|v18|v110|v112|v114|v116)], [Specify default release version of public symbols - [default=v114]])],, - [withval=v114]) + [default=v116]])],, + [withval=v116]) +## Allowing "default" allows the GitHub CI to check that we didn't forget +## to change the defaults when creating a new major version if test "X$withval" = "Xv16"; then AC_MSG_RESULT([v16]) DEFAULT_API_VERSION=v16 @@ -3779,7 +3781,7 @@ elif test "X$withval" = "Xv114"; then DEFAULT_API_VERSION=v114 AC_DEFINE([USE_114_API_DEFAULT], [1], [Define using v1.14 public API symbols by default]) -elif test "X$withval" = "Xv116"; then +elif test "X$withval" = "Xv116" -o "X$withval" = "Xdefault"; then AC_MSG_RESULT([v116]) DEFAULT_API_VERSION=v116 AC_DEFINE([USE_116_API_DEFAULT], [1], From 4967e510a4a316d607329cf3721b9c8ddcc5abd3 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Tue, 27 Dec 2022 11:42:31 -0800 Subject: [PATCH 027/231] Adds missing Doxygen for H5ESpublic.h (#2364) * Adds missing Doxygen for H5ESpublic.h * Committing clang-format changes Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> --- src/H5ESpublic.h | 93 +++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 77 insertions(+), 16 deletions(-) diff --git a/src/H5ESpublic.h b/src/H5ESpublic.h index ecfd08f7a27..768cfd5a0d5 100644 --- a/src/H5ESpublic.h +++ b/src/H5ESpublic.h @@ -40,30 +40,34 @@ /* Public Typedefs */ /*******************/ -/* Asynchronous operation status */ +/** + * Asynchronous operation status + */ typedef enum H5ES_status_t { - H5ES_STATUS_IN_PROGRESS, /* Operation(s) have not yet completed */ - H5ES_STATUS_SUCCEED, /* Operation(s) have completed, successfully */ - H5ES_STATUS_CANCELED, /* Operation(s) has been canceled */ - H5ES_STATUS_FAIL /* An operation has completed, but failed */ + H5ES_STATUS_IN_PROGRESS, /**< Operation(s) have not yet completed */ + H5ES_STATUS_SUCCEED, /**< Operation(s) have completed, successfully */ + H5ES_STATUS_CANCELED, /**< Operation(s) has been canceled */ + H5ES_STATUS_FAIL /**< An operation has completed, but failed */ } H5ES_status_t; -/* Information about operations in an event set */ +/** + * Information about operations in an event set + */ typedef struct H5ES_op_info_t { /* API call info */ - const char *api_name; /* Name of HDF5 API routine called */ - char *api_args; /* "Argument string" for arguments to HDF5 API routine called */ + const char *api_name; /**< Name of HDF5 API routine called */ + char *api_args; /**< "Argument string" for arguments to HDF5 API routine called */ /* Application info */ - const char *app_file_name; /* Name of source file where the HDF5 API routine was called */ - const char *app_func_name; /* Name of function where the HDF5 API routine was called */ - unsigned app_line_num; /* Line # of source file where the HDF5 API routine was called */ + const char *app_file_name; /**< Name of source file where the HDF5 API routine was called */ + const char *app_func_name; /**< Name of function where the HDF5 API routine was called */ + unsigned app_line_num; /**< Line # of source file where the HDF5 API routine was called */ /* Operation info */ - uint64_t op_ins_count; /* Counter of operation's insertion into event set */ - uint64_t op_ins_ts; /* Timestamp for when the operation was inserted into the event set */ - uint64_t op_exec_ts; /* Timestamp for when the operation began execution */ - uint64_t op_exec_time; /* Execution time for operation (in ns) */ + uint64_t op_ins_count; /**< Counter of operation's insertion into event set */ + uint64_t op_ins_ts; /**< Timestamp for when the operation was inserted into the event set */ + uint64_t op_exec_ts; /**< Timestamp for when the operation began execution */ + uint64_t op_exec_time; /**< Execution time for operation (in ns) */ } H5ES_op_info_t; //! @@ -115,7 +119,14 @@ How to Trace Async Operations? */ +/** + * Callback for H5ESregister_insert_func() + */ typedef int (*H5ES_event_insert_func_t)(const H5ES_op_info_t *op_info, void *ctx); + +/** + * Callback for H5ESregister_complete_func() + */ typedef int (*H5ES_event_complete_func_t)(const H5ES_op_info_t *op_info, H5ES_status_t status, hid_t err_stack, void *ctx); @@ -136,7 +147,7 @@ extern "C" { * * \brief Creates an event set * - * \returns \hid_ti{event set} + * \returns \hid_t{event set} * * \details H5EScreate() creates a new event set and returns a corresponding * event set identifier. @@ -297,8 +308,58 @@ H5_DLL herr_t H5ESget_err_count(hid_t es_id, size_t *num_errs); */ H5_DLL herr_t H5ESget_err_info(hid_t es_id, size_t num_err_info, H5ES_err_info_t err_info[], size_t *err_cleared); +/** + * \ingroup H5ES + * + * \brief Convenience routine to free an array of H5ES_err_info_t structs + * + * \param[in] num_err_info The number of elements in \p err_info array + * \param[in] err_info Array of structures + * \returns \herr_t + * + * \since 1.13.0 + * + */ H5_DLL herr_t H5ESfree_err_info(size_t num_err_info, H5ES_err_info_t err_info[]); + +/** + * \ingroup H5ES + * + * \brief Registers a callback to invoke when a new operation is inserted into + * an event set + * + * \es_id + * \param[in] func The insert function to register + * \param[in] ctx User-specified information (context) to pass to \p func + * \returns \herr_t + * + * \details Only one insert callback can be registered for each event set. + * Registering a new callback will replace the existing one. + * H5ES_NONE is a valid value for 'es_id', but functions as a no-op + * + * \since 1.13.0 + * + */ H5_DLL herr_t H5ESregister_insert_func(hid_t es_id, H5ES_event_insert_func_t func, void *ctx); + +/** + * \ingroup H5ES + * + * \brief Registers a callback to invoke when an operation completes within an + * event set + * + * \es_id + * \param[in] func The completion function to register + * \param[in] ctx User-specified information (context) to pass to \p func + * \returns \herr_t + * + * \details Only one complete callback can be registered for each event set. + * Registering a new callback will replace the existing one. + * H5ES_NONE is a valid value for 'es_id', but functions as a no-op + * + * \since 1.13.0 + * + */ H5_DLL herr_t H5ESregister_complete_func(hid_t es_id, H5ES_event_complete_func_t func, void *ctx); /** From 5b962fab29e4a775352beba345efe1eb0c474e78 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Tue, 27 Dec 2022 12:33:14 -0800 Subject: [PATCH 028/231] Purged references to HDF5 1.13.x from the Doxygen documentation (#2365) --- src/H5Dpublic.h | 6 +++--- src/H5ESmodule.h | 2 +- src/H5ESpublic.h | 24 ++++++++++++------------ src/H5Epublic.h | 2 +- src/H5FDsubfiling/H5FDioc.h | 4 ++-- src/H5FDsubfiling/H5FDsubfiling.h | 4 ++-- src/H5Ppublic.h | 4 ++-- src/H5VLmodule.h | 16 ++++------------ src/H5VLpublic.h | 2 +- src/H5public.h | 4 ++-- 10 files changed, 30 insertions(+), 38 deletions(-) diff --git a/src/H5Dpublic.h b/src/H5Dpublic.h index f7d208dd9f0..fbce3d6c7a4 100644 --- a/src/H5Dpublic.h +++ b/src/H5Dpublic.h @@ -694,7 +694,7 @@ H5_DLL herr_t H5Dget_chunk_info_by_coord(hid_t dset_id, const hsize_t *offset, u * Iterate over all chunked datasets and chunks in a file. * \snippet H5D_examples.c H5Ovisit_cb * - * \since 1.13.0 + * \since 1.14.0 * */ H5_DLL herr_t H5Dchunk_iter(hid_t dset_id, hid_t dxpl_id, H5D_chunk_iter_op_t cb, void *op_data); @@ -900,7 +900,7 @@ H5_DLL herr_t H5Dread(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id, hid_ * parallel, each rank must pass exactly the same list of datasets in * \p dset_id , though the other parameters may differ. * - * \since 1.13.3 + * \since 1.14.0 * * \see H5Dread() * @@ -1091,7 +1091,7 @@ H5_DLL herr_t H5Dwrite(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id, hid * parallel, each rank must pass exactly the same list of datasets in * \p dset_id , though the other parameters may differ. * - * \since 1.13.3 + * \since 1.14.0 * * \see H5Dwrite() * diff --git a/src/H5ESmodule.h b/src/H5ESmodule.h index b05b7f4797d..1e333f71ab4 100644 --- a/src/H5ESmodule.h +++ b/src/H5ESmodule.h @@ -39,7 +39,7 @@ * while the application is performing other tasks. * * To support AIO capabilities for the HDF5 VOL connectors, the AIO versions for the functions - * listed in the table below were added to HDF5 library version 1.13.0 and later. The async version + * listed in the table below were added to HDF5 library version 1.14.0 and later. The async version * of the function has “_async” suffix added to the function name. For example, the async version * for H5Fcreate is H5Fcreate_async. * diff --git a/src/H5ESpublic.h b/src/H5ESpublic.h index 768cfd5a0d5..f2c7cb518cb 100644 --- a/src/H5ESpublic.h +++ b/src/H5ESpublic.h @@ -152,7 +152,7 @@ extern "C" { * \details H5EScreate() creates a new event set and returns a corresponding * event set identifier. * - * \since 1.13.0 + * \since 1.14.0 * */ H5_DLL hid_t H5EScreate(void); @@ -185,7 +185,7 @@ H5_DLL hid_t H5EScreate(void); * immediately if an operation fails. If a failure occurs, the value * returned for the number of operations in progress may be inaccurate. * - * \since 1.13.0 + * \since 1.14.0 * */ H5_DLL herr_t H5ESwait(hid_t es_id, uint64_t timeout, size_t *num_in_progress, hbool_t *err_occurred); @@ -203,7 +203,7 @@ H5_DLL herr_t H5ESwait(hid_t es_id, uint64_t timeout, size_t *num_in_progress, h * \details H5EScancel() attempts to cancel operations in an event set specified * by \p es_id. H5ES_NONE is a valid value for \p es_id, but functions as a no-op. * - * \since 1.13.0 + * \since 1.14.0 * */ H5_DLL herr_t H5EScancel(hid_t es_id, size_t *num_not_canceled, hbool_t *err_occurred); @@ -220,7 +220,7 @@ H5_DLL herr_t H5EScancel(hid_t es_id, size_t *num_not_canceled, hbool_t *err_occ * \details H5ESget_count() retrieves number of events in an event set specified * by \p es_id. * - * \since 1.13.0 + * \since 1.14.0 * */ H5_DLL herr_t H5ESget_count(hid_t es_id, size_t *count); @@ -241,7 +241,7 @@ H5_DLL herr_t H5ESget_count(hid_t es_id, size_t *count); * for matching operations inserted into the event set with possible * errors that occur. * - * \since 1.13.0 + * \since 1.14.0 * */ H5_DLL herr_t H5ESget_op_counter(hid_t es_id, uint64_t *counter); @@ -259,7 +259,7 @@ H5_DLL herr_t H5ESget_op_counter(hid_t es_id, uint64_t *counter); * \details H5ESget_err_status() checks if event set specified by es_id has * failed operations. * - * \since 1.13.0 + * \since 1.14.0 * */ H5_DLL herr_t H5ESget_err_status(hid_t es_id, hbool_t *err_occurred); @@ -279,7 +279,7 @@ H5_DLL herr_t H5ESget_err_status(hid_t es_id, hbool_t *err_occurred); * The function does not wait for active operations to complete, so * count may not include all failures. * - * \since 1.13.0 + * \since 1.14.0 * */ H5_DLL herr_t H5ESget_err_count(hid_t es_id, size_t *num_errs); @@ -303,7 +303,7 @@ H5_DLL herr_t H5ESget_err_count(hid_t es_id, size_t *num_errs); * \snippet this H5ES_err_info_t_snip * \click4more * - * \since 1.13.0 + * \since 1.14.0 * */ H5_DLL herr_t H5ESget_err_info(hid_t es_id, size_t num_err_info, H5ES_err_info_t err_info[], @@ -317,7 +317,7 @@ H5_DLL herr_t H5ESget_err_info(hid_t es_id, size_t num_err_info, H5ES_err_info_t * \param[in] err_info Array of structures * \returns \herr_t * - * \since 1.13.0 + * \since 1.14.0 * */ H5_DLL herr_t H5ESfree_err_info(size_t num_err_info, H5ES_err_info_t err_info[]); @@ -337,7 +337,7 @@ H5_DLL herr_t H5ESfree_err_info(size_t num_err_info, H5ES_err_info_t err_info[]) * Registering a new callback will replace the existing one. * H5ES_NONE is a valid value for 'es_id', but functions as a no-op * - * \since 1.13.0 + * \since 1.14.0 * */ H5_DLL herr_t H5ESregister_insert_func(hid_t es_id, H5ES_event_insert_func_t func, void *ctx); @@ -357,7 +357,7 @@ H5_DLL herr_t H5ESregister_insert_func(hid_t es_id, H5ES_event_insert_func_t fun * Registering a new callback will replace the existing one. * H5ES_NONE is a valid value for 'es_id', but functions as a no-op * - * \since 1.13.0 + * \since 1.14.0 * */ H5_DLL herr_t H5ESregister_complete_func(hid_t es_id, H5ES_event_complete_func_t func, void *ctx); @@ -372,7 +372,7 @@ H5_DLL herr_t H5ESregister_complete_func(hid_t es_id, H5ES_event_complete_func_t * * \details H5ESclose() terminates access to an event set specified by \p es_id. * - * \since 1.13.0 + * \since 1.14.0 * */ H5_DLL herr_t H5ESclose(hid_t es_id); diff --git a/src/H5Epublic.h b/src/H5Epublic.h index c01fa2b0c30..dbc3457879a 100644 --- a/src/H5Epublic.h +++ b/src/H5Epublic.h @@ -314,7 +314,7 @@ H5_DLL hid_t H5Eget_current_stack(void); * If \p close_source_stack is \c TRUE, the source error stack * will be closed. * - * \since 1.13.0 + * \since 1.14.0 */ H5_DLL herr_t H5Eappend_stack(hid_t dst_stack_id, hid_t src_stack_id, hbool_t close_source_stack); /** diff --git a/src/H5FDsubfiling/H5FDioc.h b/src/H5FDsubfiling/H5FDioc.h index 516f483fa7a..bcacd52252d 100644 --- a/src/H5FDsubfiling/H5FDioc.h +++ b/src/H5FDsubfiling/H5FDioc.h @@ -146,7 +146,7 @@ H5_DLL hid_t H5FD_ioc_init(void); * If the two drivers differ in configuration settings, application behavior * may not be as expected. * - * \since 1.13.2 + * \since 1.14.0 * */ H5_DLL herr_t H5Pset_fapl_ioc(hid_t fapl_id, H5FD_ioc_config_t *vfd_config); @@ -170,7 +170,7 @@ H5_DLL herr_t H5Pset_fapl_ioc(hid_t fapl_id, H5FD_ioc_config_t *vfd_config); * values and then calling H5Pset_fapl_ioc() with the configured * H5FD_ioc_config_t structure. * - * \since 1.13.2 + * \since 1.14.0 * */ H5_DLL herr_t H5Pget_fapl_ioc(hid_t fapl_id, H5FD_ioc_config_t *config_out); diff --git a/src/H5FDsubfiling/H5FDsubfiling.h b/src/H5FDsubfiling/H5FDsubfiling.h index c2543fa91af..22dd9067cb6 100644 --- a/src/H5FDsubfiling/H5FDsubfiling.h +++ b/src/H5FDsubfiling/H5FDsubfiling.h @@ -336,7 +336,7 @@ H5_DLL hid_t H5FD_subfiling_init(void); * H5FD_subfiling_config_t documentation for information about configuration * for the #H5FD_SUBFILING driver. * - * \since 1.13.2 + * \since 1.14.0 * */ H5_DLL herr_t H5Pset_fapl_subfiling(hid_t fapl_id, const H5FD_subfiling_config_t *vfd_config); @@ -370,7 +370,7 @@ H5_DLL herr_t H5Pset_fapl_subfiling(hid_t fapl_id, const H5FD_subfiling_config_t * environment variables to get accurate values for the #H5FD_SUBFILING driver * properties. * - * \since 1.13.2 + * \since 1.14.0 * */ H5_DLL herr_t H5Pget_fapl_subfiling(hid_t fapl_id, H5FD_subfiling_config_t *config_out); diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h index a4993ab107f..5d3774b659f 100644 --- a/src/H5Ppublic.h +++ b/src/H5Ppublic.h @@ -5329,7 +5329,7 @@ H5_DLL herr_t H5Pset_vol(hid_t plist_id, hid_t new_vol_id, const void *new_vol_i * \note The H5VL_CAP_FLAG_ASYNC flag can be checked to see if asynchronous * operations are supported by the VOL connector stack. * - * \since 1.13.0 + * \since 1.14.0 * */ H5_DLL herr_t H5Pget_vol_cap_flags(hid_t plist_id, uint64_t *cap_flags); @@ -8194,7 +8194,7 @@ H5_DLL herr_t H5Pget_mpio_no_collective_cause(hid_t plist_id, uint32_t *local_no * please see the documentation for that routine for details about * their use. * - * \since 1.13.0 + * \since 1.14.0 * */ H5_DLL herr_t H5Pset_dataset_io_hyperslab_selection(hid_t plist_id, unsigned rank, H5S_seloper_t op, diff --git a/src/H5VLmodule.h b/src/H5VLmodule.h index d6e65db0f6c..169134a296b 100644 --- a/src/H5VLmodule.h +++ b/src/H5VLmodule.h @@ -122,22 +122,14 @@ * specifying a name in the VOL plugin environment variable. * * \subsubsection subsubsec_vol_quick_use Use A VOL-Enabled HDF5 Library - * The virtual object layer was introduced in HDF5 1.12.0, however that version of the VOL is deprecated. - * VOL users should target HDF5 1.13.X, which is currently under development. The 1.13.X releases are - * considered ”unstable” in the sense that API calls, interfaces, and the file format may change in the - * 1.13.X release branches and we do not guarantee binary compatibility (”unstable” does NOT mean buggy). - * The next stable version of the library will be HDF5 1.14.0 which will release in 2023. The particular - * configuration of the library (serial vs parallel, thread-safe, debug vs production/release) does not - * matter. The VOL is a fundamental part of the library and cannot be disabled, so any build will do. + * The virtual object layer was introduced in HDF5 1.12.0, however that version of the VOL is deprecated + * due to inadequate support for pass-through connectors. These deficiencies have been addressed + * in HDF5 1.14.0, so VOL users and connector authors should target the 1.14.0 VOL API. * * On Windows, it’s probably best to use the same debug vs release configuration for the application and * all libraries in order to avoid C runtime (CRT) issues. Pre-2015 versions of Visual Studio are not * supported. * - * When working with a debug HDF5 library, it’s probably also wise to build with the ”memory sanity checking” - * feature disabled to avoid accidentally clobbering our memory tracking infrastructure when dealing with - * buffers obtained from the HDF5 library. This feature should be disabled by default in HDF5 1.13.X. - * * \subsubsection subsubsec_vol_quick_set Determine How You Will Set The VOL Connector * Fundamentally, setting a VOL connector involves modifying the file access property list (fapl) that will * be used to open or create the file. @@ -505,7 +497,7 @@ * \endcode * * \subsubsection subsubsec_vol_adapt_native Protect Native-Only API Calls - * In HDF5 1.13.0, a way to determine support for optional calls has been added. + * In HDF5 1.14.0, a way to determine support for optional calls has been added. * \code * herr_t H5VLquery_optional(hid_t obj_id, H5VL_subclass_t subcls, int opt_type, uint64_t *flags) * \endcode diff --git a/src/H5VLpublic.h b/src/H5VLpublic.h index 2683e0ac9c8..f32abdabfd9 100644 --- a/src/H5VLpublic.h +++ b/src/H5VLpublic.h @@ -414,7 +414,7 @@ H5_DLL herr_t H5VLquery_optional(hid_t obj_id, H5VL_subclass_t subcls, int opt_t * VOL connector object * \return \herr_t * - * \since 1.13.0 + * \since 1.14.0 */ H5_DLL herr_t H5VLobject_is_native(hid_t obj_id, hbool_t *is_native); diff --git a/src/H5public.h b/src/H5public.h index 5ed54d9fee2..5b6bce8bf17 100644 --- a/src/H5public.h +++ b/src/H5public.h @@ -449,7 +449,7 @@ H5_DLL herr_t H5open(void); * If the HDF5 library is initialized and closed more than once, the * \p func callback must be registered within each open/close cycle. * - * \since 1.13.0 + * \since 1.14.0 */ H5_DLL herr_t H5atclose(H5_atclose_func_t func, void *ctx); /** @@ -652,7 +652,7 @@ H5_DLL herr_t H5check_version(unsigned majnum, unsigned minnum, unsigned relnum) * after it has been closed. The value of \p is_terminating is * undefined if this routine fails. * - * \since 1.13.0 + * \since 1.14.0 */ H5_DLL herr_t H5is_library_terminating(hbool_t *is_terminating); /** From 581df9bfd16d91dcc08fcfc3c494d0ac9a06aaec Mon Sep 17 00:00:00 2001 From: Larry Knox Date: Wed, 28 Dec 2022 12:30:57 -0600 Subject: [PATCH 029/231] Restore line 264 of main.yml: "thread_safety:" (#2374) --- .github/workflows/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 77c53578b28..e2146185c2e 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -261,6 +261,7 @@ jobs: generator: "autogen" flags: "" run_tests: false + thread_safety: enabled: false text: "" build_mode: From 18ea6c02f1db7eaa747661852c76e168c28af6a3 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Wed, 28 Dec 2022 13:07:39 -0800 Subject: [PATCH 030/231] Adds Doxygen markup for H5FDdriver_query() (#2376) --- src/H5FDpublic.h | 22 +++++++++++++++++++++- src/H5VLpublic.h | 5 ++--- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/src/H5FDpublic.h b/src/H5FDpublic.h index 3b956de038d..422cd182ab9 100644 --- a/src/H5FDpublic.h +++ b/src/H5FDpublic.h @@ -397,7 +397,27 @@ extern "C" { #endif /* Function prototypes */ -/* Allows querying a VFD ID for features before the file is opened */ + +/** + * \ingroup H5FD + * + * \brief Allows querying a VFD ID for features before the file is opened + * + * \param[in] driver_id Virtual File Driver (VFD) ID + * \param[out] flags VFD flags supported + * + * \return \herr_t + * + * \details Queries a virtual file driver (VFD) for feature flags. Takes a + * VFD hid_t so it can be used before the file is opened. For example, + * this could be used to check if a VFD supports SWMR. + * + * \note The flags obtained here are just those of the base driver and + * do not take any configuration options (e.g., set via a fapl + * call) into consideration. + * + * \since 1.10.2 + */ H5_DLL herr_t H5FDdriver_query(hid_t driver_id, unsigned long *flags /*out*/); #ifdef __cplusplus diff --git a/src/H5VLpublic.h b/src/H5VLpublic.h index f32abdabfd9..c0a0e68394b 100644 --- a/src/H5VLpublic.h +++ b/src/H5VLpublic.h @@ -409,9 +409,8 @@ H5_DLL herr_t H5VLquery_optional(hid_t obj_id, H5VL_subclass_t subcls, int opt_t * \brief Determines whether an object ID represents a native * VOL connector object. * - * \param[in] obj_id Object identifier - * \param[in] is_native Boolean determining whether object is a native - * VOL connector object + * \obj_id + * \param[out] is_native Boolean determining whether object is a native VOL connector object * \return \herr_t * * \since 1.14.0 From 52b2d209d42e7429910aa121aa0a33497e3bf405 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Thu, 29 Dec 2022 12:58:54 -0800 Subject: [PATCH 031/231] Adds RELEASE.txt notes and updates Doxygen (#2377) (#2379) --- release_docs/RELEASE.txt | 32 ++++++++++++++++++++++++++++++++ src/H5Ipublic.h | 14 ++++++++------ src/H5VLpublic.h | 4 ++-- 3 files changed, 42 insertions(+), 8 deletions(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index fe063011ee2..472932cf4fc 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -89,6 +89,38 @@ New Features Library: -------- + - Overhauled the Virtual Object Layer (VOL) + + The virtual object layer (VOL) was added in HDF5 1.12.0 but the initial + implementation required API-breaking changes to better support optional + operations and pass-through VOL connectors. The original VOL API is + now considered deprecated and VOL users and connector authors should + target the 1.14 VOL API. + + The specific changes are too extensive to document in a release note, so + VOL users and connector authors should consult the updated VOL connector + author's guide and the 1.12-1.14 VOL migration guide. + + (DER - 2022/12/28) + + - H5VLquery_optional() signature change + + The last parameter of this API call has changed from a pointer to hbool_t + to a pointer to uint64_t. Due to the changes in how optional operations + are handled in the 1.14 VOL API, we cannot make the old API call work + with the new scheme, so there is no API compatibility macro for it. + + (DER - 2022/12/28) + + - H5I_free_t callback signature change + + In order to support asynchronous operations and future IDs, the signature + of the H5I_free_t callback has been modified to take a second 'request' + parameter. Due to the nature of the internal library changes, no API + compatibility macro is available for this change. + + (DER - 2022/12/28) + - Fix for CVE-2019-8396 Malformed HDF5 files may have truncated content which does not match diff --git a/src/H5Ipublic.h b/src/H5Ipublic.h index c0ad6be6880..d699c92252b 100644 --- a/src/H5Ipublic.h +++ b/src/H5Ipublic.h @@ -76,13 +76,15 @@ typedef int64_t hid_t; #define H5I_INVALID_HID (-1) /** - * A function for freeing objects. This function will be called with an object - * ID type number and a pointer to the object. The function should free the - * object and return non-negative to indicate that the object - * can be removed from the ID type. If the function returns negative - * (failure) then the object will remain in the ID type. + * A function for freeing objects. This function will be called with a pointer + * to the object and a pointer to a pointer to the asynchronous request object. + * The function should free the object and return non-negative to indicate that + * the object can be removed from the ID type. If the function returns negative + * (failure) then the object will remain in the ID type. For asynchronous + * operations and handling the request parameter, see the HDF5 user guide and + * VOL connector author guide. */ -typedef herr_t (*H5I_free_t)(void *, void **); +typedef herr_t (*H5I_free_t)(void *obj, void **request); /** * The type of a function to compare objects & keys diff --git a/src/H5VLpublic.h b/src/H5VLpublic.h index c0a0e68394b..09b31afc101 100644 --- a/src/H5VLpublic.h +++ b/src/H5VLpublic.h @@ -401,7 +401,7 @@ H5_DLL herr_t H5VLunregister_connector(hid_t connector_id); * \param[out] flags Operation flags * \return \herr_t * - * \since 1.12.0 + * \since 1.12.1 */ H5_DLL herr_t H5VLquery_optional(hid_t obj_id, H5VL_subclass_t subcls, int opt_type, uint64_t *flags); /** @@ -413,7 +413,7 @@ H5_DLL herr_t H5VLquery_optional(hid_t obj_id, H5VL_subclass_t subcls, int opt_t * \param[out] is_native Boolean determining whether object is a native VOL connector object * \return \herr_t * - * \since 1.14.0 + * \since 1.12.2 */ H5_DLL herr_t H5VLobject_is_native(hid_t obj_id, hbool_t *is_native); From c502bb422f511c38ba1fbbf3d41df01949ae4c24 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Tue, 3 Jan 2023 06:17:56 -0800 Subject: [PATCH 032/231] Adds 'make install' and 'make check-install' to the CI (Autotools only) (#2381) --- .github/workflows/main.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index e2146185c2e..a31317997a0 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -468,3 +468,17 @@ jobs: working-directory: ${{ runner.workspace }}/build # Skip Debug MSVC while we investigate H5L Java test timeouts if: (matrix.generator != 'autogen') && (matrix.run_tests) && ! ((matrix.name == 'Windows MSVC CMake') && (matrix.build_mode.cmake == 'Debug')) + + # + # INSTALL (note that this runs even when we don't run the tests) + # + + - name: Autotools Install + run: make install + working-directory: ${{ runner.workspace }}/build + if: (matrix.generator == 'autogen') + + - name: Autotools Verify Install + run: make check-install + working-directory: ${{ runner.workspace }}/build + if: (matrix.generator == 'autogen') From f25957dcec634b9d3ef03573ace084d4cac3255e Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Tue, 3 Jan 2023 06:18:48 -0800 Subject: [PATCH 033/231] Adds -Warray-temporaries to the developer warnings (#2385) These generate a LOT of noise that we are not going to address anytime soon. --- config/cmake/HDFFortranCompilerFlags.cmake | 5 +++++ config/gnu-fflags | 2 ++ config/gnu-warnings/gfort-4.8 | 1 - config/gnu-warnings/gfort-developer-4.8 | 3 +++ config/gnu-warnings/gfort-no-developer-4.8 | 3 +++ 5 files changed, 13 insertions(+), 1 deletion(-) create mode 100644 config/gnu-warnings/gfort-developer-4.8 create mode 100644 config/gnu-warnings/gfort-no-developer-4.8 diff --git a/config/cmake/HDFFortranCompilerFlags.cmake b/config/cmake/HDFFortranCompilerFlags.cmake index 108a4e97fb1..86d04315720 100644 --- a/config/cmake/HDFFortranCompilerFlags.cmake +++ b/config/cmake/HDFFortranCompilerFlags.cmake @@ -79,6 +79,11 @@ if (NOT MSVC AND NOT MINGW) # Append more extra warning flags that only gcc 4.8+ knows about if (NOT CMAKE_Fortran_COMPILER_VERSION VERSION_LESS 4.8) ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/gfort-4.8") + if (HDF5_ENABLE_DEV_WARNINGS) + ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/gfort-developer-4.8") + else () + ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/gfort-no-developer-4.8") + endif () endif () # Append more extra warning flags that only gcc 4.9+ knows about diff --git a/config/gnu-fflags b/config/gnu-fflags index c43f416dcc2..b3385ec8836 100644 --- a/config/gnu-fflags +++ b/config/gnu-fflags @@ -159,6 +159,8 @@ if test "X-gfortran" = "X-$f9x_vendor"; then # gfortran >= 4.8 if test $f9x_vers_major -ge 5 -o $f9x_vers_major -eq 4 -a $f9x_vers_minor -ge 8; then H5_FCFLAGS="$H5_FCFLAGS $(load_gnu_arguments gfort-4.8)" + DEVELOPER_WARNING_FCFLAGS="$DEVELOPER_WARNING_FCFLAGS $(load_gnu_arguments gfort-developer-4.8)" + NO_DEVELOPER_WARNING_FCFLAGS="$NO_DEVELOPER_WARNING_FCFLAGS $(load_gnu_arguments gfort-no-developer-4.8)" fi # gfortran 4.9 (nothing new) diff --git a/config/gnu-warnings/gfort-4.8 b/config/gnu-warnings/gfort-4.8 index 9d880dea0af..f986072fa90 100644 --- a/config/gnu-warnings/gfort-4.8 +++ b/config/gnu-warnings/gfort-4.8 @@ -1,5 +1,4 @@ # warning flags added for gfortran >= 4.4 --Warray-temporaries -Wintrinsics-std # warning flag added for gfortran >= 4.5 diff --git a/config/gnu-warnings/gfort-developer-4.8 b/config/gnu-warnings/gfort-developer-4.8 new file mode 100644 index 00000000000..20effdc04dc --- /dev/null +++ b/config/gnu-warnings/gfort-developer-4.8 @@ -0,0 +1,3 @@ +# warning flags added for gfortran >= 4.4 +-Warray-temporaries + diff --git a/config/gnu-warnings/gfort-no-developer-4.8 b/config/gnu-warnings/gfort-no-developer-4.8 new file mode 100644 index 00000000000..82274ef89fd --- /dev/null +++ b/config/gnu-warnings/gfort-no-developer-4.8 @@ -0,0 +1,3 @@ +# warning flags added for gfortran >= 4.4 +-Wno-array-temporaries + From ae93477b6a000010161ef56355eacc4850b0a7ce Mon Sep 17 00:00:00 2001 From: Quincey Koziol Date: Wed, 4 Jan 2023 13:04:07 -0600 Subject: [PATCH 034/231] Correct VOL connector env string parsing issue (#2350) * Correct concurrency bugs when running tests, along with a bugfix & small warning cleanup. * Committing clang-format changes * Allow spaces (and tabs) in VOL connector info string from environment variable. Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: AWS ParallelCluster user Co-authored-by: Koziol --- src/H5VLint.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/H5VLint.c b/src/H5VLint.c index 07798c537c0..e1d7005f913 100644 --- a/src/H5VLint.c +++ b/src/H5VLint.c @@ -425,7 +425,7 @@ H5VL__set_def_conn(void) } /* end else */ /* Was there any connector info specified in the environment variable? */ - if (NULL != (tok = HDstrtok_r(NULL, " \t\n\r", &lasts))) + if (NULL != (tok = HDstrtok_r(NULL, "\n\r", &lasts))) if (H5VL__connector_str_to_info(tok, connector_id, &vol_info) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTDECODE, FAIL, "can't deserialize connector info") From a42fb0753ff2fe6bc0577f2a47f8b993a5ec1bc4 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Wed, 4 Jan 2023 12:44:51 -0800 Subject: [PATCH 035/231] Updates README.md (#2387) * Updates release schedule for 2023 * Fixes broken build badge * Another build badge tweak * Even more badge tweaking * More branch tweaking * More badge tweaking * More badge tweaks * Even more badge tweaking * Adds badges for other branches --- README.md | 18 ++++++++++++---- doc/img/release-schedule.plantuml | 33 +++++++++++------------------- doc/img/release-schedule.png | Bin 14979 -> 13977 bytes 3 files changed, 26 insertions(+), 25 deletions(-) diff --git a/README.md b/README.md index 40947ab32a2..38e48ed1c8d 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,11 @@ HDF5 version 1.15.0 currently under development ![HDF5 Logo](doxygen/img/HDF5.png) -[![Build](https://img.shields.io/github/workflow/status/HDFGroup/hdf5/hdf5%20dev%20CI/develop)](https://github.com/HDFGroup/hdf5/actions?query=branch%3Adevelop) +[![develop build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/main.yml?branch=develop&label=develop)](https://github.com/HDFGroup/hdf5/actions?query=branch%3Adevelop) +[![1.14 build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/main.yml?branch=hdf5_1_14&label=1.14)](https://github.com/HDFGroup/hdf5/actions?query=branch%3Ahdf5_1_14) +[![1.12 build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/main.yml?branch=hdf5_1_12&label=1.12)](https://github.com/HDFGroup/hdf5/actions?query=branch%3Ahdf5_1_12) +[![1.10 build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/main.yml?branch=hdf5_1_10&label=1.10)](https://github.com/HDFGroup/hdf5/actions?query=branch%3Ahdf5_1_10) +[![1.8 build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/main.yml?branch=hdf5_1_8&label=1.8)](https://github.com/HDFGroup/hdf5/actions?query=branch%3Ahdf5_1_8) [![BSD](https://img.shields.io/badge/License-BSD-blue.svg)](https://github.com/HDFGroup/hdf5/blob/develop/COPYING) *Please refer to the release_docs/INSTALL file for installation instructions.* @@ -79,12 +83,18 @@ RELEASE SCHEDULE HDF5 does not release on a regular schedule. Instead, releases are driven by new features and bug fixes, though we try to have at least one release of each maintenance branch per year. Future HDF5 releases indicated on this schedule -are tentative. +are tentative. + +**NOTE**: HDF5 1.12 is being retired early due to its incomplete and incompatible VOL +layer. | Release | New Features | | ------- | ------------ | -| 1.13.2 | Onion VFD, Selection I/O, Subfiling | -| 1.13.3 | Multi-Dataset I/O | +| 1.8.23 | last HDF5 1.8 release | +| 1.10.10 | CVE fixes, performance improvements, H5Dchunk\_iter() | +| 1.12.3 | CVE fixes, performance improvements, H5Dchunk\_iter(), last HDF5 1.12 release | +| 1.14.1 | selection I/O with datatype conversion | +| 2.0.0 | TBD | | TBD | VFD SWMR | This list of feature release versions is also tentative, and the specific release diff --git a/doc/img/release-schedule.plantuml b/doc/img/release-schedule.plantuml index f51f9d4ae2a..c724dc98802 100644 --- a/doc/img/release-schedule.plantuml +++ b/doc/img/release-schedule.plantuml @@ -2,7 +2,8 @@ The release timeline was generated on PlantUML (https://plantuml.com) The current script: -@startuml +@startgantt + title HDF5 Release Schedule projectscale monthly @@ -10,34 +11,24 @@ Project starts 2022-01-01 [1.8] starts 2022-01-01 and lasts 57 weeks [1.8.23] happens 2023-01-31 -[1.8] is colored in #CC6677 +[1.8] is colored in #F76969 [1.10] starts 2022-01-01 and lasts 104 weeks [1.10.9] happens 2022-05-31 -[1.10.10] happens 2023-03-31 +[1.10.10] happens 2023-02-28 [1.10.10] displays on same row as [1.10.9] -[1.10] is colored in #DDCC77 +[1.10] is colored in #F6DD60 -[1.12] starts 2022-01-01 and lasts 61 weeks +[1.12] starts 2022-01-01 and lasts 65 weeks [1.12.2] happens 2022-04-30 -[1.12.3] happens 2023-02-28 +[1.12.3] happens 2023-03-31 [1.12.3] displays on same row as [1.12.2] [1.12] is colored in #88CCEE -[1.13] starts 2022-01-01 and lasts 53 weeks -[1.13.1] happens 2022-03-02 -[1.13.2] happens 2022-08-15 -[1.13.3] happens 2022-10-31 -[1.13.2] displays on same row as [1.13.1] -[1.13.3] displays on same row as [1.13.1] -[1.13] is colored in #44AA99 - [1.14] starts at 2023-01-01 and lasts 52 weeks -[1.14.0] happens at 2023-01-01 -[1.14.1] happens at 2023-06-30 -[1.14] displays on same row as [1.13] -[1.14.0] displays on same row as [1.13.1] -[1.14.1] displays on same row as [1.13.1] -[1.14] is colored in #AA4499 -@enduml +[1.14.0] happens at 2022-12-31 +[1.14.1] happens at 2023-04-30 +[1.14.1] displays on same row as [1.14.0] +[1.14] is colored in #B187CF +@endgantt diff --git a/doc/img/release-schedule.png b/doc/img/release-schedule.png index 42f065849dd6c6d8ace5ab4a5427c9599f65c248..b96f741a02953245d03f6420552621181d8aa608 100644 GIT binary patch literal 13977 zcmeHuWn5L=*6*T}5D+OrNrKC$*xebR*p#^N_UGi(jA+WymRAu z&U>D7-!J#x-}~kM4jeU1!ssCW!a?8K24-Psg{w6tK+x3QV=%?}4HM>yS2M}t_~iYVHRH&Z#w$PF{XE9I`<7kl*1QWceG99{ z-YL_$Hwvq4Oqh_vW#ktTQL#chwKq3Jzkgh!O=}z8p&*@N%@P?g&w4A=U{;2*@TD`D zWh==S#Ti}g+>)!W>pnJ@pbT? zH&vMu^^%)ed|CJAX%l;iLgJpT2iB~WuAa(hQu4yxBw-|PC=AJ8E)KsE*xV@^+{vu- z!P&ZT5%l2w_)w-$R!^gsLs{omw^f4=jrz-nf+_$UT&KbLB{b zNfz~Mrx&xLu!w*>qM*gta`PatewQX->?Q9kO7+KIldhOXdpUBP*|ojBAIBhPOBY+!D9S6n?c=58IL7@b&#Hac$h z3Qp-JcR>_E(ARg8+t+XMCxs~xfd#$r=0}7p z^a+BYB3@r3!O;<~Uy&E_5U<{79W;nn2rCjy9()l|MO?o!{dWW8^HZ=o&w5mCR_EuJ zHe{fa$vCa%m*zka_wEMQ4M>%5eOSr0+AcX?x5?AO!eXT_b*E+}?fZAp$2zyz7Ur}o z&0=?x9!FqcVVRGYEy;0DJFE`eho#`85mNP^3=IvDup4Q)uH{w`M@2;%#z=TD7b*mmj852tB;V)q^F)<@0ud38bUhX|u%aKdwl16b&xIL(9Eh6$3 zaTi+4Z^;@uuv~umo47cU#{31mI=JMeJv}`ixUOF1DWt34lzQscD@EgVbv8R}^z7NYyNb%nPdrW?E>%Ck z_Lr-sohF0u=_~BXB|PDs^i5~eJNf0F7pDaEVgWei#{Ddh9_jD&rSk2=;aS<)Vq#+W zckY}XtnJUZ?fed99IUq0f7x?5rpxNeu*N;>PQxX1b-s^BBcXV^>kct7x6PdJ_DVzb zLO9=c{eE9U!rh?^T%q$%2}||fXjnu8KBi0W8*CVyq;IyvhIz1{4F9+M$Qe$NY2p5? zuxG_DD}(ud=MR(`ANO3VA4^nI;MCuK6@HH1FTPm)_W24xsex&L@h#|^M@4Q|Kq<<0SOY+Wp>0!hmu`@9B4--hK+POcsG zYbj`u;y%b2MZVqr{_}E?k!AUPsGL8GUcg!KU_fjFUEsNp8*SQ!WCVqroZO*{7qz5I zuMHVH%8gqL4+ys)&HkT54Ci~fXDw3qu=wF#mn&ZR9@|qqGxaNJ zZnt!?0z*S{jx4LD+idf{>Da$0p@`;6Er%d83XuF}H$5*CNBgH68@1f_d%0`uf5Vq1 zker?;ada+RlyHpy5^rH-X7*Sw$aC5MkZh&LDpVugo*;f#gr?WtBfO`#cj?>KWYtcl ziHOX_W_f@3J??O=!}ZbZ%b5nZvryBU(kUQGKjh}Z>U9%s^AuoEqRh#6P4xRyb%P( zb6?+5>}0#{%rvz5?RW$p&w8cbjERi}x7lu|^om*Lji9xaD7|N0A2ngzm-2gh$FtoR zws@1$T{y{qGFvj@Nb9zJ9|+mNK-Ructl8Pt_V)b;UNuC-#8t-q6x-u9_VnCO{nV?U zvU)dZWjDWC_lqcB`7Y0CI`nRWMj|xhsOV#(*}u=43(mmESm1WN4W_Z&`}60|2*J}1 z7N5Dy019diJ-w{o#t~^=moQigF=O9yZ*tAbk5|@96qg0RA|oTi$@z>73~Yoi;qplv zY|*jL8e9*DhKG|m%_R!`R^ZRfo=Hj$n+*>QnebW~85y01GJZoc{b*WxvbH1r{=w(5 z^Xzegm9lOYcH`rp=)}#oNV0KEzrN*YG3q~ihH4FO`R03b|CzE8Q*6(hT)BXMS!`ZX zE_lbn1yw*!GYELkF>U8WulurLR8)*?dM$yVvPKbP+_aRJ4{sK_%5Ed!Fo|qGt0eGp zRRRUjez^zK1%E$3rHWGRI{9?Li<7;Db*+FXU5`Vxk2($Co12rlt>xD(yJ8smTCD9W-FK@K8>O&!mO+SdVb;vU#87 zHtc%k1^(V&;ReTn?Lu4ClwJ4L<<3=-bFfpUaeo>|h&pxS-4ZQOJg`CWK%89vZztUU z(Eu*+>oe9H1NVYn_5lRtV=IP3&|7E`Z-5=FAB%SF;_@<=$p95I^Ol0w>2Qtx%DdxK z=b27$2z7R@4P-Xg!}iY;m*$X#z3os{Q;H<%x(0WDHM2II!$Fa5YiSvnB7CGCs0pz} zN(w6&7%YOED%hK{?}ybtPZvzpPWL=Db#Xaq3BoV9m@xksW&X_vP0*O98oHf9hC~>! zu2f_9Y)e7#cgvh)Mms(!X%`LBZBV5I_E-8>+B)CU*OXOQj7Ph*g_4LRhY+)VI0BJU zKu$@itFFGx-~A2E8(Koof2tbM6-%>ct0XOrQTL-*vsnN!ov$}@b91!>C%#yA3m_7_ z7aX;I6X3Ws+5zF5V*Gmn8C?7{Lp|bQTz0N(yxkQzBx#BB7r^}~EvBlY9(8Alhg5@e zCxuJb>%!42ozFSEVW=*ix0{j}VvzibROzakOJ-!!UBbYl_E22q4T zG5&!0#BzdTkc5cn!MYL*#xs^WTBO0(4_`z63NX)+Ns**l7vAUB-;nKc^YZqOZ6S23 z2ybRpR*5ju*p!s>qe<)Jvsaa7OEtTuUrt6&%lifG)0Q@oy+wGCUv3sk=62)RpP$&K z!G8r%b_pt0fpgV0oQ!7(JRyifaz3X}w;Q+=Jz;~{l5WL9SR};6&dWU{E1Ebg5GsSD zaBHTkYAM;Y*VV<;Q`>pe?rg`6AGWyAEf5-FI|kCy(qqX|Qc|zSj!|(;`;%V~YMT48 z?wFR$FXa~)Rpr&>vo=D0lY3zqQoo$sn=c5g+PPWa1l!_7)v4dNPW;apr^2mK#WAZE z?rrDgzdfO-vcFrpjH%Ra&Vz?Pw|xT4DW`F=my?@X-oqpGIZw%Y*GIG&8zGHinQW(;p% zAmO%3S=Iewq3L@Ao5F4L@$d_;L_v?*7$xoV&-(2Qm!OE{c=C01cTWO1Q&&7q&gogX z6BCnFD_a!`Ztz!PSV88r`4XS2;wsO~d$=Lt; z?gzlgPO)bBHh^m&;=5?-2?z;;yx{U7NupY^>_$Cv6t&;iY}B@Qkc#$tx#2yD?DX;} z)pkoVrqBHpRnSvDGF&OCsLJwCgDg{NaAxUJqEmGKwi_$%#y~dk;q^mT-px<4Z|!Im z7c{JSx}4)ApS^kW#;^1q^8|tN;!<~fa>|NrU*8+k=L>CNDk>_t6#N|*_z#{3MnqKI zI=si3U3(&Q^}9b?BCK{P?p2QG&Av1N^HraFwI>peJ26S3b zDdAvRSs%8Z#JFH*vvm3wyEf2Q3Wmy{x;OXrvBEugtG?kQ;OifMC zt!D>~ICZ4_9o1CR;BdH(+ZN|WBxZ)({aqO9xkz%>`clWPMClBlMsGe#ML38i} zn`Gm~(PWL1q9U8!Vh4}?awrM=^2W!eXKTqaESj)=F zP9ijtl~e*_6&b7)S;9XGlnlWhi=U!#3NA$YXeU^R0kSEq_=yIh!ZC+aAZ*|6~ z@?oGGGu;)#60`Oj$n>=(iSzLX61-IFoUbW*Ocw6VG$oDaNaO49mjlwVJzaONtM&I+ z6x`hqCBN`Hlg}z)D~Fe!uZ&PJ)2z!Q2RNhMKS;0OR!2j$#0NPIGvYbb9*c*JX z6I09Bma&UfKTo{*4G+pd!ie8q%Y-LfZ>kjrF| zZqZOEa@Fa{yru2l#)EZyqQ$QtFY3mdqjgX^1u}A{@hp4-d@Fq#ddGo!LR4>tyluW?fQls|sczPDMRfcK{dRQl?SLME1`QUF`&)UT((x@|exlh-^AvVs&qp%4Zy* z3p&x;%aRmejeAjh5^7Im%aB6c|1)*aF!dxe-) z=e(0XJwY#}okvhCH-N}`H9Wdj^5FylvOP=Pt5+$`d=-p6QI9kb!HL(N&ZkkFs1y<5 z`Z$}cV5RkrEOEMRpO0Nj1{ntYB3|;~hM2WRg=|h!j;u_I3%GK`SJsS$j&7+Vie^fu z(H)j4jn;JdDyL;vqZ+zutbM*WapBqX0EFQC_L`_9;Bq$5R+|emoIee!`2&JIIH%m14=F4cw!L zXZ+p2AHWv-Qy*Jf>3~j`cilf zlFCenKNP6sMK>lt)^8Iyo??If)qA?mIa%-gj(Usc7mLKzJikHLGyqteje3`Lb*LP9 zJkMQ}V-ALd$Es>o^!5Opq>&8I`BpC-lF2@nr$8AjY?n+uAmajp*Y#tTobtB{zLAW0 z=51p#obYtMRuqOqK=JOkfjXO@@Mvvy(yDH2ubnc_ft# znHmnx1F4!@JM=v#w#BlFsp)VHW1NGY9DkJ-)hy+M3sp z0HD_DVD|CWWP7T^EnV0Wp_ObtIPae@Z!RZWdjgK_r_o@MBXG(&fHp#4gj_>LGE=6L z$w#~tXv;vbj>i_WuNRaX(*M*zgy)H`UCSb02hBFU2T);NjcbT(Dy!nff0sGw(TgjJ z{c7xa7KJ9La$?WOJICIr;65|RAju#(DvI55{Bw!w0?=OC z==|k)|E@RjS;5r=>feotJoo1)TZ&Kx7tY?v4_BBDS2VeGCa9<`m=fmK&3O@}hZv6# z5b6O$^qyYf!|@&0;oNB2x&~D`KcO*UwfVE4Nd%<(nYVit3=h%{E?m$tc-s#q7mRZf zx`UtY%+ZC{rBtbivL|4*!lpHe4W73;we{b@BzOkfs5~3ivaeeR&^e_5@Fxv}@--k8 z2a{h!LC`@6pF;D=HBB3_?CZ`R_ByAzb;EZQuRKl!f+FAY`+R%t*jH?L8-fg|B3`q- zNM$!C0X&ij z$m4(J{E#U^=JKRn-q!X}X=BoaeObA+$(a1V58{Xa;MqHWyR&_z?C9*Qk;iOzZxMAE zG5bOgmC75K1N_G;eQIjK`fXk_AwZ(6?8`k=b_nivxkRZ z9-{KfcieQzpR#d2_#kFs5&!O;C5=RW?#;hlhz@@8`T5^0ZwJlYTk0!4*Gsot|3{;~ z<|AOot~F_4?ab=x7u!I*CPz>cUJ*}EPgT{C># zOhYm+03)M-!X6wrBn<&#>EnaEPXGyfBlHAxyLViA6_^FZ#i7sofrwiP&j*?%6Vv9` zwGh;}-}0vF`y2QE+iEtZ=+K=t~ODNE*Kulo^+WFhDSp@*oJA42W&Q zY&51cH~pRw5D}Rz;RWTzF~5p-$vO&QZF~xpURVp7)7C_6Obo3f&<+l|9(|1AMS|X5 z(=?MoGa8Aa$jwE(&)`rq>P@ON8O#Fj0&pqgrit$gd7kG;zi%-Dq8L6tKA*#?4M0}I z43Kcf^<&5fhlEHEIBfn@QkNh$8Oe`2sIzc_=hxTs18Jh9Y9@;-s4$8~5>S?&WNubI zzE{Dpqs?)!TDbSPdZsWSbQ^>%AR-GkK~xl3nn;rbR*478-E)GkHeFp<1DUC+>GE_V zCWfHsikOJ`WlUlsZ1d;i@i%vYpep#e4 z#p|S2{O-6-n50Ye2z4Bi_(riVFL9RKueP@dPPN#?-OFrI)N zTacxtju-3KCCnaMAA0GAs?mSHzR(w~jHfdjUpKluIJAkw* zd~0_uhGqR3xC0pclQ0)OtmUXzvbhp2wIOiWd(sg&AjT&FkisLBI!AfMysv+asaDQ^ z>eL;kguP+!c6_rr`p&&Vl|O$2Risdtg4677n7-g^L9Lu>0*FS#WZb%&kd}{C>AQFDew4i8HXTCcCWS;$7L{ziPXz9|@T*om2lGVs9$nN~HiC@>K~eZ0ym8v~{mK z$2V-U)$}l-j4WXawEISuEhdD@nwsOMha1k@Qw1dxPBWQe0SIF$uMLbZ=yqgLz5#@+ zgM0=ChKwWNMvVblm)HQIl3ZUKJzcW6=~zAZ@G)=}l&B(nu+{E8e)*?x6mikO`cwIc zNk}wRRiABz-Qzx59b6d8^tT0}9~g*kwKHB$&&M~piAI&6Sc#XdCgpGUNLqDOLLV{O zoDf(daa-HR7&L`_%AH04=t*HppAsfxs%%+qwaTwzISIJ3zh;dV2GPYDM`1 z&f8j@Si%Fs+<2zhplWp;C&UAaH>kZZ6V|2*p8Km8Vv@@AhZ|$@SYL=;KWcy0^W$jD zQmX@w%hZp!*RH19U<)K9+8T~$lB1)kVMyqtKrd|`= z&VNiZQ2oAXWmG1CCPo16E*2&xnc!F5Iw#B7Mvo;O$OpYdbjXZGtR|NBq3!kBU(W$w z`u?~njTmFuokC~e{LHrtQNkAS%0r&I9xz;70nG*AXs^9iVmekx#6Kr;vmz?9F19CM zXXRdR{s;l&eFgT9S@b@SNp^8@F;MDNe_aF$a&wRM$&x*_q2lA?V`jDl z>wP{))zu%x_lR(7}nSs>Y_=Xsf)8Vo8nN~3iy}5D__9#H#lkRC8n~Dnh_a3^k;4b zMz9#?Gn5jR{v_>P-*o`I{yC&_DtvXlGa|>K!)5j2V%-Jtv}&!zx%Z$smeB_hrHN0t z3ofiF+?_x2-bKt@jpNVEbto!VM{;f5%fUhLpf|c!hdtVL+snA0F138Co_)+?Y0qUo z`qg_;YV*Xnf|<$d>Rj<>5fA7KK=$asqlRyMRs9|QRLz<;PDAiD7d&(7e>m+$^x%=)iBCG@r=A4=*<)yCW>Ze zW_j|dAmb|Cj;)4oQR+a#U%yW<@bHW;h!;gxaDE%#(88Dh3&rn$!u}YxMTxrY{UvVB6VbwijPW!7DfgFYv1y-K3RR8)0T{kOtc&8+$I`? zivH5FB_FfXrWnINd#vUG(?76`8xpz`m`eEkyxx#7un6DFrpAN%*xuTj=*82TnL-ji z1CDZqBo68A2JdoWphPP&0+E~wMynBw^-Nq`x~Y3v5&xFtM{T)PKrI1Z1tl1VoR{_t zx*j+N)E*#AbvyRS0;>d+7GsJ{#lBl^I{+z^m6vaM-uyX^1)-C{_xJB{Tan$lLrsho zLC?W)kQ=WP-8MOj+SUga5s0FE9;XMO;&~&*AqgLMwG8NDt)Mh$AFslcXWUvlwYHAy+{wD zMazaI{7WAsC^g`4{`x!I8O0UeMS-dEq5B!+g#kS=2{>t-kLlIW|2LLX_*WY?W-nJ(13;6*A zGVz4v83Gi26)jN*nWe7Z=7)|z-?j48d!~;%a`v@kk!8r5E`Q2C#|FegQbLF;UUb5 zPl`pO_QOAT(aA^a4BGrh)ZO!%4^e{> z6c$8L5pbSq#B7Plfp*6un@DGL>*e*>=a2vEIr>B;;X3c__1)ha_3A~>XXA^BistWq z_od7)2bJu(xmF+o`n~mgS@MuJ!u@(^U)%dW04t&rDHqfhi|74p+XHgxo?Mc4B2R*A zT(5i8FrD^h{r78fNf-Lh#~=hHZZzSqYt1S_Is8P*{$DfsmlXv&@V9myVPQorEhgu; zq^Rwb5IbTECg_EPgjiUtfJI)0GJ^i}5|TokBW5J7T)h@($YbjrLrUEd(LUdf9#)pxs&m!PjAE`h_~=- zfz%Cn6c9K@1m(!a7J0S#d1d4F5PR!-u=zw z8r=U^peGcx4QYUT+fTsM|^wnc9Cd3kFGK zPS3-*YdnX)OeNvEkNlC7|b0XX6Y;Q!}-RlkWXY)x$ zTMm3{*EPELSNwvwNnB{X!NI`*n@rX^KC)I{@H2R! zA-=@O%DPiIrc;hn00ALCUGghs1!JKOknk7R8|+6z*XJMEUSyL=|Er^zL6-PAL|1!> zp-B`TxOg{6@J`9q`HGO~dlqE7C{NHAk;vy5)=y7_!KzWss zhAVjDZRIFq3(AMb!Js5r7?-`hJuGjeRUwW4H#mA@d@9&1XFQIl9U2)2fuM;Houhx% z=S^%#hgqu{G_Yx_aBe%f5_uIX~q>&Jo}H!_aB$< zKQ7<@?=D}OdpsTZCx{b_E~z$Kkcr9u)f}Mf`Fjbt#i2n#nKkFuJje*hyc!f5I#uV)%9-Z2 z%@Yw1ypi$^aAIQP;-){frLNQkolbVlFFt);`FsCfe(%-L9BYIyLTHGQM5aQ_Y+3h2 zr3YZw#jYW6q;G@&dbnlDqEnwRaTAv!$I0E@T~jl0|L?=Jw2Ol?c=Khs5%w{J5XC7$ ztB>99+gAospqJ0|{Z&EfaGpZBGr+rj<>$6Ypr4dXu}G4zdvGXMw&n_Sh(=?L*n$p7 zTrSWyA8R<6B|e2uK_Q=fe*7M2$?ZC8)3T{c5m-QL#0?iuOO9ej9ley#^K;v(atE!GppQAC5a{9{MYauo;yuie|QmoxGrWc>QwLnMI zxd$19+(GjiYcvG#EjoI7zz-*vFF|K9jKw{PS+J7GD|Wytt{e-xv4n2&xric`MY0Eo zK(H?GuL2Bb!qbR_=doLqN*DBK3%lnGJf&?Ceit@wi3p0V&zDRFT3TdR_Z0&Ehubbk zg?{3!KLtIwxJ^olkFPLRE6vB8)`r0I&~tJ+g8l~}q6r#z;+P9VcR}YS^;r1B_-r5& zf$pN;!H-GzZJ7RfSu)AP7tJ@|xBq&4@TwRLlQF3QFA{F#}f z+Y5(hoEA$h**h>E#I*qbxw_t4*MG#W07 zyeF4N4|^Oab}CDsOV|Fsuk$5|u7AT`5s75H8Lb!gjg(yfc+E}ekdF;UHrlBV+UcI~ z^eN&H1kj{Tthv<}c5f*Ae^l-(M)mN1prELVGuy{|7OnhaBp03#&@Xex;l&+PhFSY4 zX{iV|E+x}d-p`o*W_fE+>yEuIEqg@b&54No8Dzu(M+E&9DzRUypFPMbDkh>LrZx&M zVTd~RxJq$+gP!OO&za}>`I>h6;uY$4X~N4}k7@QElLfw@%xyjEi;VNGs~c~+X-7lH zvG=%1F1cq6QEAk|7sYh1)UQ}}#`)yjM9q0YxotfvK-q@GgZ-sYNZVKzSDtSu|6!9; zkhWFashD}tW-QaDBX!l`z~QG4NRQHGl6u|ZUnr`)iKn(G|3CzlX>$AfkTv(KD$%=& z3+!2w{DG(PSLFv-eDa>jF)}F8dwe0*kTx{jo3Ol^f-sQ6Ww{qR<3e|>`V&(_`%cPe z<}4M_WfIpS@lhLGo^-@2mM7^p>!-Yl-P}7+-ub+Tu^+@DF8B!NVjkMRnJ5>eg*~Uf zA;SN}Om^iJw~xn=v4j5_w9D1to%(=9h1BQxZSO6b*NnDzzZ$B3fk1>Bm1G}iyPIr% zxb97)bGR+$&wiV*BIq_TowT1VCY{txg6|C6t9~JE5KM`{X4th00yzRqPG<BR68B zRQjFH@MhYOOm=#Wd$0%U59HflV_+Fzr{1QeBUEg(pkXfh)u=PAR2S{Gcj_DATew|| zO7SXk{BWU@Cu2koVi*G9o+G6A{Y!}>E&TT{52dKl;SG832Sslh2sehuRrKo=w1;rHwPe|G_GnuO$QG~ViNM|YbHvE=5|Gi3rwqcI^VEk!>` zA#<)vuN3A7;^4u7w+ueRm9cKhli@L2w&X{gLY45DJ9w487+CIs|z>sa#7vFq3 zy&XX%cK=Z8OV)aim$!$<@&4NA&(97jML78QPNTU4rM?G6T+L^in@saXI^aoebGyHw zxB~HxIQx|!tI*TaGhO%M>3HGYW`+-+ton#EWbm(e@_&D9WK`@GPU;o?c`m`{^ziO! z!Q9t_CGk{ks=**tJzZU0Z*M5oB?@*~J*n2JR|IB;_Q}q=AU5QP@_1T<1-_4WxhHu! znkjd~KI1Y?5@zy?_Q|%uz5TO;QrIJ|w$|2tJS!`!fPjFrf`^sX1NR@kf4h@mXsji$Hme;GTA8~1Ev!Con=jP`8{rx3;PP~uigQzWV zW##2ZEPvH3;xz_DU+kq_j+y!%jO(=p;pfrDG71X597=yM^|h#GyXo{-^X0|rs_jU+ z8g$oRqSqJm1~Gg3_0fK&+IjU&Le+?Kl~ZQc%554(7lE74V+PiM5 zu(AZAD?L%Mv9Vp$UYiRYq2^3N(vxp9_YTszNM+9RaseSCjou_hHnyMDPBhIa0#Dkse5llPF_-k&-;T7;WT2do9<5$(W2>#`Luo9hA%nggPP6`A{Ts?U;pOqTagC&gpk1(&-O@?a(5S| zQhU$+dT|KDE8Hy3Q_=mOW7ar_uC4_=b!t8r6e`g#m;U}GxJ|9ux3MbCWnjVH({{ITCE=GN@G5n`)Dd7 zGt(U81O+(t-Oe8<250$_*^9$!H}Cn@fMmzIU@_N0xWvgj{#!I~a)I6#a{b@Ch-m3D zFXtZCBlqfSYJ`1zB#lDD!>8+CaaC{rs0zgw4$!QbRPzJDmRCJ>E6rviP7eGG@9xaE ztsM;r&GH9+)ciYcX&VgeiIL1ABDG$qUm#?b6L@%d(!7zuI#)k*(ZEuSl$4Y@x2YS? zkbUNoLeeSV6qi2O#R|;j2JERYGjqO0S43iBB4s4Zohhtptw%ZY!CP!HGP2;{;A}&8 zcKqzOxsQ@Zo{f%<+6g!I_x2)5wfwG(6xtPEY;A*;IwWsSdgNTvcxY(xG9lpk@H3_| zw>8aQ4}0Ty0|NqfRjA!JK2{^8G_Bd=!ps5^1PvgNDtwUsU%cJ|2wxDYJ@sMmt3wMR-U$h=hFPL<@J?XWBmYB=)L8E zWD*)08Zt7EL2=aC1nfM)S>uy`s<_t(b5iid+u?O949FPoe^$UiY5VV0z<+lE|DxZe z6hiuZ7?x>?3Gu6gAaEe+G@!892jbJrfIZmI&>$a9o|Bg+c`&Axz-c_vxG_;W2qte} zU~4F&4dl5-GOI;QscQ(F?nynkeQcK4cEjGuM(OtVN*gyf{&3?e`#1?)2uSYv*YWXZ zK_a3;m!lW8EbQ!-6C|eI1mxtRj#I*-Cbce%wZ!cDbI!5hHy~88^t2XQ;)QwihZ!2vEU99ZBS1?vjy_hiGN7k;SW-ZbyL<~6_85zmR z1)MWekU&J46{I?IJ+(_I*JwyUaVXz7N6oGfLrA;n=UyX`E}S$41qD+hpd5Q1cTzYF z?H=!~e6M$1Q9ad|sewp|U@&BHUIY>61=%zSpUyBc*Yz)20TEEP2>b z8_i9h#+&GyjE;0 z+}y&1HryhjP-2eE8@t`~@S9}3OL2N81k)xKcTHU{Sq1`bkfuSN;+@(yAF2%~=f! zs`D`}vJC)HH8r|wL=?VmxH_Okj3_93kocxx9Z}ZGiq#txK{;EE$nQ3xCJ4f#DjaFj z$b3jAYf^3=@Wq4dbdxgv1R%`ZoE$0<7yqAV6bY)FmR@NTUvJFiBF&|+%Tl-3uUZA$ z>UjVPlUQqVLrRZ5HC%8J4-3O7)5?3y7d6e}q>H{G)H5GG=!kF>EF^JjaGGseZ=~Vw zJ-T_f-X_gQ7pC;oW2^f_cHS5fmLYu`0;v zL@O|KDegzvp8uS^kms_y6B)X1;pq4a?52lp{mfdV^lB->QTQRDp$cK7JYcEof7NaL zbZSQYbh$V?&gQv>Pu=ipd&acW^m?Cjs_%&%adxtxgJGRpfp2G=YbwC#U@Mz=@50g37Q``?2KMaEcw#Cia;bl;tp=dwb;-_Hm;)Q#y6vU zvfvWE`0_fUD&B?F4nN})6AesE9E%k^ z&xW%UMp>aIHMmF)@ z>#})Pkxsb={t@XYQGYb347_Lti57tR3N~iuRe)u(;{jqORk+TqHMBD{G^FI{d9a>S zD>PV&s8hs`N|W}!I9?IS(c)~xlN*@sp&ExFcBLUry*L$6Xr}R5&&fsZ}K)kY?$B7DOlh;G^puVTp}wX>g}Uj z%CteWx3_OMU#3P}UZ9#c@-$jlJ3s@vx*Qx4K@D;Vs4o2D*P~E=BAtW!9eG<5*-CdT zZph^x^$xIX)6n-4o#cb=DXcfg*D!2%;^X62djmosazHx&wlbKO$Dt7L_+6Gq$l%$A z>1AeF(mHz&+lay%v(_r9D~r>XC_9*H-oc?hv4oW{7X~#YIG3sJaUH3QUExuf4{AP*JfPDwM4hgiqt!eO}Y) zbGY@hYh!G_%#%{|Eov5a335%?UU?Fq44IPE2A9CNBk_GTmVD8n{myWTgt)j4@c<=U zgD@V%xh2e{!60_ocP2+6Qu2IF&ASb!n`Mb*ZTFyOg9G`3r$e*DX0viNuhkbBrh|(> z+;$m0TO$ZRg@KHw>ov}bCG%>nKameKT*U>d-E?;N@3HI4L)73;`E}im`g(fGzo+Pe zN6%s=5LeVV{-zd6ZOU~I1!`>{=;)_h2YtYL29hR}!w1#ZGJ2jCRx*?-%7fJKzHj~8 zx6=B%y+obeplpKZr0z-<(@W!{PoweR%cAU==pa(Ln^r#f#-$w{pdMWC)$4(&CALc0 z&HfxYrtWO9EK4ye_MD822+;d^H<)*X$Y9?+`&GYvxG^y}nbx-Mj7JTf1Ywotd!m#E zA4nnl20zP7Za!Hr8b}uE3*}W!-QT>U2F+;B&dULHpf84~g4Ck-wC7^Uy^?vrc!-aW z&yJ7m?P@p6{t=T}>P>w7(&FN&&ko7sR&k}FEb*)oi;sv0MwiH}z`RAe4lf!?OJnNX z`?wA!XK-%riD&HClo$gC$0Mx|U`M}iI}78L;IrtwIUjV6#>@>Oa7vL@r@N-U$2l|K z-|+_QYLCijf{eA_r@V_#Bk{0{LdcPl`-$K3#Z6i6lhh_E8%spS+D z_I9aWO+Bz#pAVvOoT}L^Ds56G4C6feQT0^G)-}{)m8+Y@c4oijL7UA$l3bUkm$UQk z>Ts6LVCo|{%j3__5n@wq^?KbPW0~a=eTuSuvAet5DJ<%OJ7A1lm8XKbbH|0vo-GRjl)mzE z-k;J%vV;tzh5kY2MLxGIPW>{!&XIlQ^b0vfGgr-O(1B>ucs}R9Ruh);hLvTT@q9up zZ1#2f(Wej**X1z4sa#&1PbseKgbU5gSzw4X?aJo{hto)2Y*o*GYeaq~(A`%_XX|DO z8(VS7uI=-T`(+eM-JM_GdT)qC+c077&!t5(akg_?VX^5 zH)4u~g~gx|j(=;sjaBZJGjp+D7yH@L7t{74Z0bcXf6h_BZ^bry^^j^E?NqFHtL#XG zcJ=VNgs_%BpKn!)xj*cBU3^VbiE!G{KDsqoTs`KT+cb*ObuW zMV-Hs{9nu*z=eOd{*Z-^B-I?p|OD%SJA-%BwLF&xN)mGR|^ zcZ2Ki^+}i7ZHMBPL8Ag3_l-10RF-%`}3C+ z^_M4QMPK`^ZbKllwD2d~lR7rbyr7G|mw@<9NB`+YK(qauzb+wGR#EA=4}qwo+3Ols zW>(gackkXy!kSjWDQM;gifL#EY^=2dJ?WoWb^+eOqQ29+Rz*?ESb<#2S zANkK(RYog-j%RIc4fF?`R5AJFJm&tBZ3BRnaCLF9lX6(er>%l-;C3n2&F7sVBy>Z+Edv zMOpdC9dO9U$H#RyAnKRDCs6o}bj67L_wU~Uy#yXvSxJdZ>}BzCyfEOeH+?`S1(b&& z@bCcEp*eN=ZGePXo`Hzqopu9rOJ40XtJ($VwXGjj6yQ`qbREb-{7%r6810T(fb2r2 zG6e?2j{rf)D zgZuX%53QNthtE9nCvG{IXHv)sUXM{Q^bl%3JuME4ed(}xZQt$^u0|but@Y3O<2IEs z3bT$MD$cmtCYmHvS>VMDLMTTen@%ZF((~lvp5|r^Y8-G6aCP0Z0afa9pNf}nG^tq{ zFQ*Ql>)W<{pqWK~oAuPJ-{|x`aRw7=ZlcwaO`*lg9Ket+jfeXlfyUaChN;+?8JhDgcad2%(K78cr ziRu91!o4?djFf_-s%KrMeNphi3bE=kWVBDiGRnBbX{N;CY@qSy7H2AZp8ww?dH|Ba z?>RDdz(yxw^dHu|e+_~=Cist11>yb=Vh=NAWJI(4I%FG&^ZS8;Wac(=kgYK) z$lMmCR10nuwvdj$Rpx$VE z1W<-nZ*Vn8UAx1iiq;c`U+gZ(6)tvkbhw?ZNM3q?jz}JTWzs5wENw;IS4E8z%O)kA zw;`A}yR)QZzM-#(Zud3gwdHqA>fBhM(1z@6{|F(cnW-!VxM`jJ4=~8q*488U?U_aq z5fSv&l)HbYjYoBlu?pR|_{K>wrCJ~!MC!5e@zLD(7#YU2azO^oj`W{}6=*7n`<}VW z%3h_R5q_TTvA0~O;;J{8ot+Itn4w+JcY_CS@miHYY*PeRmnWyE(`qVyo$h-m5D2-= zEk%t*o~;U03uav{p)X&)06L5CM6W7a&~C&Z9(NFxYFqze12m{2R=q%vBKmRzvXcs0 zLIL|R#fI?dNT}y>qK*GWQ)5Dv{VfdICw@>|saRa4f^hxs$@6%n9copSG7h{4v{SSoN>xV6T&|82(p^$$FC<9nJf4 zQ-9$6*u%smH7-Heedmd)sw$`|Lm4v1Mfja`K$?>UG@jUtb$*+#l#YOozHLnR-Io5? z@#B2sCIT$rU9fo;-&vL>c5Q%F@=9Vvas3EU0epx~6?|`&} z{`p&wuW5?5gV=t|ErDWu1mzn zQ&TS8UizGbB?5u3r<<3<32NK6qkfukld{kK8FL66p>ff!@Ypy_yy)oKcPy~q zF`Sk$K@8}5v%q}?)_P*`65&pww5T9-0{!ym*^VAkPxE@Hx}0uMpPow8@_}KXfm;~_ z0+Zu_8@jC%k|mH55fR0`p91$BLw27uPPlz7M}E~uh4&#s9A=V;2o3u7*=y}VveR!# zu5H1DoZYQL!#r+%fj?9BpUh^j)t@wvW?TPSlov{Nb_J7HAv-e9xvt!PB1Jt;25KVc zBlcm#7?2${^ig+k^p>^~C^_WB#Kcro;f1(fN{m?ctLx0j*h z<(quciw?`!8340@?hh1mWm}ugMu;D4hPI-zL$<+GnW%E9kkIDE`Dsjabi3}KPhPni zkUb`!B-b$()fh^`^|ikr>VeEI*XWx0J;i9~lC&NuaL>!ZadNQ>o)8N(s>%f6iF+UI#6cmDGz<@In@H)*!oo3m2t@J?k*1|(_U}wY zAF1#ncwC4R}(7pd$EvJvOlMiv-@H z_tU}fl0AA?S6Rm+^S9Yx5q%HwgP)C*In&p=ur&HyY*%U&Oz_)H-^;Yx%YRzhlX_4F zy$wJ_1CPZA&z$WvzQqxGnIwM4NyQTL!x>KJ2^Mtv=SWwQ=PV(gh0Bn}lDAWMV6ZdqjXuUt3D3%}cF&M_a@bFN+N!Lrd?t_1s3YE7aPyI$ zL4x5Sigw>NI^>YI!PD+_F4;5Qp!ITsw{!BPQj$K8CR# zV3~f3n4-AGA%X^cBbZuqEVP#D_roeI5f}_ge!cEM&iMPILSPcTGU(l}Rc+p(ApUTh znuLV-&Ju3s+XN?1Lu&MdD(~e$*AEu78WY6Zx+WqTI5_!?sWJaG_(BFa^fP)0;op0* z^2feq3~Q96rsdYHxZr~J0}uaC*mh<@9Ty)T@@Q^-d3}oTb@_UWK2|ylGZO@l01R>lodYf0bC(`I`wr-M1vXF#C_`TH}^&d2WUkBEUT z&Fb{ktWQF6vPx0S*5{u<&XpC~p|OWuPU2%=_%-*^8&mKo2WV?(JlH0#f4QaUXqtl> z$x+chId(r-9|wYIok}Gs=KA&GaB_azmdDZsk(;IIBTt(d6{#Hn%&e%0j!0lP7y*=h zRyFYx2O@Iv-XdLA21MOv<6+Tfla2HL8{x6&?A~ezaKzMo9!vDS zu=7H(K_wW~opsC0$(tCjUz_&QKR}O4mQvKM+xql}`(N34{3T94+f{Un8@;@S`@{X? z=c|=~^IuxBEFV|;bKJU~bKo0#HcEwXwjIKdRH~I9@^X>X;-36WOe<-ZXL-;NSrd16 zcVK6s0zQ@6!yJyC>o2Ncq#igpq-Sc>EWh#W;ZMD_f9CHR{!qGfJPmt#`M;;&{a>XZ zBI7rJW|_#z+W%4dV`hV~7bPfvjF*>^7qQuRe)y9mU`mxz(l><_`!BF+ExB`L{rM%g zAm`lnQ6iL;gCjgRm|*+LmHj}vGvu4|!z!dngL`>d*>g}0Guu3I^6#v=baixq;mPET zu5mo*{~(r-%KLPS#D-Y|R3HG>YJRW{QNTEg!Tkx6%*@U28A~`H>ZN0Sv%JfuTTCkH z+c5Pt5}J99Ln080R9Juc_svJ9;5W2SI!;?20eQ~{sew@myY%%GCVuP?39p8yis zZkqrs8xMtVL38^^;DQOxf8%$Z%G+XdUwa+h$H)T~?(EEaItQFEc3t_;n{4{y9Odr2 z3;q55F-&T>TUCCVbSza-_dlwnE2AUvEQ3A|8JDvP?A(^xc2dSt60jo zS29%XRA-fRwk=Es5dH6CVq#KLRg3bgXu>elwDeB9tZ{H4XFAk&*y!@wG9Fk0`UDNB zR$LZg;YnqRW2`eg1(L7pjkh2aAUO^0T6kpFIt2sgoh^NO+vA1xVfmZQLPw>7<{?wf z0sbgKb#ZW-6UbrN^YBebaFMIZ)y~2z7I)K2S}rgo+PD8OT7FJw&I0`W?LTS#o0beo zxvb&}()_QHoBthR)GB5XxNGpCJ5u`klYE0v`jf&pDGPaNz6;%O(|O?IAUe4X$eXHT z)DIS5jk95i=tGe6S%=Luc^@Nzq2+uYAI80Z5Z~G=O_v2d2B&JA0Ve9kmaew0?q{dj z)c9f}pii%a5^5f;qZ#2LsFg-4E=4TM`$7{uwqi{X=ETdb%kD@$6Aw8zuW%jj zf%C3rVooDr&>sS4*bKHyRl~x1YWL(rAT)HN*kWi%7l35etC}a@mk5G)VbnuJt>d9SIg`zFP7GLpILwo zZY({ONRv~aMpPqo#C!i55KjEN!ItJ0=CVlp(59Kz-QE4^(}-)nA%YU^MZ(z9O{gEtYVt)F_0s4MbhX&L6I&C0=bE<29W#>1p9FfusPqr^Azpsr=-<-rcW+nkLA zQ7G{ZhV*Lr`d;+Lu?-j(`tb%tbn(sI>Ln83LaSp=+eRW_?(H{WvGf=e34}#NhE<$F zgjV3e=yi8}bRt3kHc*U+TBRR8sct?GHyOE4OS0BAH8lmaiT8nt8oAu^slv|pXoyHi zfaGEhc+gkVFD8Hj-dBwPy@!$o*hFX=p*gIXFn$qwzCX&YC296c} z)a=LrOW{)O+O(x3Lmgrr4Gm|&+gV#$Zcf$i(Oe!WynK21*s0MfBu6=8&|i8N1F>+_ z+sA4|B9oo=dwwSo188|S$xUR*s2xxO&-*y4haAurOHciI6|VG$4}|m|KD;SDrEbRi z`395h%$3fsKy7=#LL&Xt@NTB5! z#cO}$TwB0S?q7m-1+B^(1D~qo#SwkgC)?pxVP&B22L=TOs=AC8>2VF5Cz`mpAYWg( z#*b>ac8grr$B2_;OVHk+{-p!ZkHQIr*#~!k;tHmog@YqU+y=VXE27ony7Co>>uG$z z>l$VRyn6@n9FJ?RI7oQ*gc9CY^U?iiD0dA29?$=g&;Lh0|NkbROP=jqp@IRD9r)?u zNvu+!Yu}kt-KAklYAV1qoxsFm$KnwV%PEROlD8Y>*4D~8#h)myQ=*CuNKN0j?+xorUFKBpShs+l_D@v z=ZexHskwuW$*jDsfrF|Q|8&-S?~#WqHE=u4vNoL|RT?V^tZ)|#a)Yt~YFJH(TcDBf zejgzOtdhWUI&e-V_44KC@v0rt%hrG!`}&=dn+b!_4fkZ;_W|$1YVA_J)J!$7F?6GH zp)j~!Z8t5vq1H7Z8+jZ>65zt{Z^jDb$^;kfrEq9SM8xu<*sq$mU&qi+?i%MsJyV}J z22$grs;3jtL(9Z_L#B~hLk(-M-v$mQ34}%*94;&@0EuESRXmwm>+`ekyf4d|&v36*&EVfN)`X9z6K1OTNN<&^MmeQHL?t;KM0 zanbOYUwyHhz)x1^;&ZZ}fx}SEf;r$1)*@qt)l-S}eZ0)I{j!d&uz{Xk7mcacnm`0I z+;);xHESr&&SHA_zRZ2sJVbsl07*8KyJYl$EUcc);mfsp;TW>vfSOUn9O>7_RTrG&*fo<&Ddp zV1(zaX-J)iMz$_zJBoW9W=rhP+2TDX6T#BaoKZO`O(Si-@?h8Qt}8H$0%jO+E45Ky z=P8^TzI0)_crS)Oyh8y5FJSWe=!I~gFJ4WM(u>q0Ef_>7x+Wa3PS-LkggwfL9Q3ZyRSvBfqM6OtN$n`Y-;#5s>il^G@|{jd z(jriy;Vcc`Lj>K!wh7>`_+es>91 z)6qSCemAg#+!|aE$}SxGWi5;UG{z3|T&ld4Y{sfH0?A?O4g@Hj>DDuGk{#Z__F2Nt z@%ZynJn#gDnn<9gT;05vx(P%hLPLx4^UG-dc3o@3*W>8V7c(m=cwaUG%jFa%1gNc0 zbrX}X2$@SJ8PH8V;yO-S7lmCwyUshB0^u&q66&3 zJ1xMhUR#02w~xU$Yt|Sq&zDpZILr0i52F=47BPWCe3DiVo)nwa-~Z4aeAR&M(&{=S z3`mP&oF)xmZ#fmxf-f|TQpiY4?|W(+81%fb0XpSj7PJmB1dP)Hz3Upl8_EuxC`LfS zJlhk7xPilr+FbGxl-~i52%J`flQV@nY!h*6l?Q4c51N&21^#=c=3{)1A7K^F80o+l zZTYNF;h=XIAotd5HeXZBJU%&2i#lTXfhNa(HIZFJE+dZw9EtE1aK9gaL|H)?{9fh zGK%(NU_1fV@GyNbd!q<>IQ4X`>k5Tq4V|cHooTagn$KY(^)0l+`v2L+ymyJgk4&q{ TD@$-fcWFv;s Date: Thu, 5 Jan 2023 17:42:33 -0600 Subject: [PATCH 036/231] Update FORTRAN VOL connector test for external pass-through testing (#2393) * Correct concurrency bugs when running tests, along with a bugfix & small warning cleanup. * Committing clang-format changes * Allow spaces (and tabs) in VOL connector info string from environment variable. * Parse connector name from HDF5_PLUGIN_PATH environment variable better * Correct H5VLquery_optional to use H5VL routine instead of H5I. Also add an error message to the failure return value from not finding a plugin. Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: AWS ParallelCluster user Co-authored-by: Koziol --- fortran/test/vol_connector.F90 | 4 +++- src/H5PLint.c | 2 ++ src/H5VL.c | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/fortran/test/vol_connector.F90 b/fortran/test/vol_connector.F90 index 2cc6cee3acd..bfea2e1551d 100644 --- a/fortran/test/vol_connector.F90 +++ b/fortran/test/vol_connector.F90 @@ -237,6 +237,7 @@ PROGRAM vol_connector LOGICAL :: cleanup, status CHARACTER(LEN=12) :: VOL_CONNECTOR_ENV INTEGER :: LEN = 0 + INTEGER :: CONN_NAME_LEN CALL h5open_f(error) cleanup = .TRUE. @@ -251,8 +252,9 @@ PROGRAM vol_connector ! Check to see if the VOL connector was set with an env variable CALL GET_ENVIRONMENT_VARIABLE("HDF5_VOL_CONNECTOR", VOL_CONNECTOR_ENV, LEN) + CONN_NAME_LEN = INDEX(VOL_CONNECTOR_ENV, ' ') IF(LEN.NE.0)THEN - NATIVE_VOL_CONNECTOR_NAME = TRIM(VOL_CONNECTOR_ENV) + NATIVE_VOL_CONNECTOR_NAME = TRIM(VOL_CONNECTOR_ENV(1:CONN_NAME_LEN)) ELSE NATIVE_VOL_CONNECTOR_NAME = "native" ENDIF diff --git a/src/H5PLint.c b/src/H5PLint.c index 4995adb7d27..4c970c5bd16 100644 --- a/src/H5PLint.c +++ b/src/H5PLint.c @@ -259,6 +259,8 @@ H5PL_load(H5PL_type_t type, const H5PL_key_t *key) /* Set the return value we found the plugin */ if (found) ret_value = plugin_info; + else + HGOTO_ERROR(H5E_PLUGIN, H5E_NOTFOUND, NULL, "unable to locate plugin") done: FUNC_LEAVE_NOAPI(ret_value) diff --git a/src/H5VL.c b/src/H5VL.c index fb7af972996..8ff5f26445e 100644 --- a/src/H5VL.c +++ b/src/H5VL.c @@ -967,7 +967,7 @@ H5VLquery_optional(hid_t obj_id, H5VL_subclass_t subcls, int opt_type, uint64_t /* Check args */ if (NULL == flags) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid 'flags' pointer") - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(obj_id))) + if (NULL == (vol_obj = H5VL_vol_object(obj_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid object identifier") /* Query the connector */ From b36aa566396f8590fddae3381f357772eb223a88 Mon Sep 17 00:00:00 2001 From: Quincey Koziol Date: Fri, 6 Jan 2023 11:08:16 -0600 Subject: [PATCH 037/231] Play nice with existing plugin paths (#2394) * Correct concurrency bugs when running tests, along with a bugfix & small warning cleanup. * Committing clang-format changes * Allow spaces (and tabs) in VOL connector info string from environment variable. * Parse connector name from HDF5_PLUGIN_PATH environment variable better * Correct H5VLquery_optional to use H5VL routine instead of H5I. Also add an error message to the failure return value from not finding a plugin. * Play nice with existing plugin paths Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: AWS ParallelCluster user Co-authored-by: Koziol --- test/test_plugin.sh.in | 2 +- tools/test/h5diff/h5diff_plugin.sh.in | 2 +- tools/test/h5dump/h5dump_plugin.sh.in | 2 +- tools/test/h5ls/h5ls_plugin.sh.in | 2 +- tools/test/h5repack/h5repack_plugin.sh.in | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/test/test_plugin.sh.in b/test/test_plugin.sh.in index 02ac21f0eb8..1d04760fb3a 100644 --- a/test/test_plugin.sh.in +++ b/test/test_plugin.sh.in @@ -109,7 +109,7 @@ if [ $? != 0 ]; then fi # Set plugin path -ENVCMD="env HDF5_PLUGIN_PATH=${TEMP_PLUGIN_DIR}:${TEMP_FILTER_DIR1}:${TEMP_FILTER_DIR2}" +ENVCMD="env HDF5_PLUGIN_PATH=${TEMP_PLUGIN_DIR}:${TEMP_FILTER_DIR1}:${TEMP_FILTER_DIR2}:${HDF5_PLUGIN_PATH}" # Run the tests $ENVCMD $FILTER_TEST_BIN diff --git a/tools/test/h5diff/h5diff_plugin.sh.in b/tools/test/h5diff/h5diff_plugin.sh.in index ee7bc8bdf73..e3f0f01314b 100644 --- a/tools/test/h5diff/h5diff_plugin.sh.in +++ b/tools/test/h5diff/h5diff_plugin.sh.in @@ -82,7 +82,7 @@ if [ $? != 0 ]; then fi # setup plugin path -ENVCMD="env HDF5_PLUGIN_PATH=../${PLUGIN_LIBDIR}" +ENVCMD="env HDF5_PLUGIN_PATH=../${PLUGIN_LIBDIR}:${HDF5_PLUGIN_PATH}" # # copy test files and expected output files from source dirs to test dir diff --git a/tools/test/h5dump/h5dump_plugin.sh.in b/tools/test/h5dump/h5dump_plugin.sh.in index b2f9f31b4a8..d9b77ee1017 100644 --- a/tools/test/h5dump/h5dump_plugin.sh.in +++ b/tools/test/h5dump/h5dump_plugin.sh.in @@ -88,7 +88,7 @@ if [ $? != 0 ]; then fi # setup plugin path -ENVCMD="env HDF5_PLUGIN_PATH=../${PLUGIN_LIBDIR}" +ENVCMD="env HDF5_PLUGIN_PATH=../${PLUGIN_LIBDIR}:${HDF5_PLUGIN_PATH}" # # copy test files and expected output files from source dirs to test dir diff --git a/tools/test/h5ls/h5ls_plugin.sh.in b/tools/test/h5ls/h5ls_plugin.sh.in index 48f6e1eb15c..02e0cf7c499 100644 --- a/tools/test/h5ls/h5ls_plugin.sh.in +++ b/tools/test/h5ls/h5ls_plugin.sh.in @@ -88,7 +88,7 @@ if [ $? != 0 ]; then fi # setup plugin path -ENVCMD="env HDF5_PLUGIN_PATH=../${PLUGIN_LIBDIR}" +ENVCMD="env HDF5_PLUGIN_PATH=../${PLUGIN_LIBDIR}:${HDF5_PLUGIN_PATH}" # # copy test files and expected output files from source dirs to test dir diff --git a/tools/test/h5repack/h5repack_plugin.sh.in b/tools/test/h5repack/h5repack_plugin.sh.in index 14bcbbf8179..bd7c3a1229c 100644 --- a/tools/test/h5repack/h5repack_plugin.sh.in +++ b/tools/test/h5repack/h5repack_plugin.sh.in @@ -98,7 +98,7 @@ if [ $? != 0 ]; then fi # setup plugin path -ENVCMD="env HDF5_PLUGIN_PATH=../${PLUGIN_LIBDIR}" +ENVCMD="env HDF5_PLUGIN_PATH=../${PLUGIN_LIBDIR}:${HDF5_PLUGIN_PATH}" COPY_TESTFILES_TO_TESTDIR() { From 6482525232036033ed6f30d75af74ca7e3886538 Mon Sep 17 00:00:00 2001 From: Quincey Koziol Date: Mon, 9 Jan 2023 15:49:29 -0600 Subject: [PATCH 038/231] Determine if native connector is terminal (#2397) * Correct concurrency bugs when running tests, along with a bugfix & small warning cleanup. * Committing clang-format changes * Allow spaces (and tabs) in VOL connector info string from environment variable. * Parse connector name from HDF5_PLUGIN_PATH environment variable better * Correct H5VLquery_optional to use H5VL routine instead of H5I. Also add an error message to the failure return value from not finding a plugin. * Play nice with existing plugin paths * Use API routine to determine if native connector is terminal. * Committing clang-format changes Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: AWS ParallelCluster user Co-authored-by: Koziol --- tools/lib/h5tools.c | 18 +++++++++++------- tools/lib/h5tools.h | 2 +- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/tools/lib/h5tools.c b/tools/lib/h5tools.c index 078ae5a0b63..189aafdab63 100644 --- a/tools/lib/h5tools.c +++ b/tools/lib/h5tools.c @@ -817,10 +817,11 @@ h5tools_get_fapl(hid_t prev_fapl_id, h5tools_vol_info_t *vol_info, h5tools_vfd_i *------------------------------------------------------------------------- */ herr_t -h5tools_get_vfd_name(hid_t fapl_id, char *drivername, size_t drivername_size) +h5tools_get_vfd_name(hid_t fid, hid_t fapl_id, char *drivername, size_t drivername_size) { - hid_t fapl_vol_id = H5I_INVALID_HID; - herr_t ret_value = SUCCEED; + hid_t fapl_vol_id = H5I_INVALID_HID; + hbool_t is_native = FALSE; + herr_t ret_value = SUCCEED; if (fapl_id < 0) H5TOOLS_GOTO_ERROR(FAIL, "invalid FAPL"); @@ -839,9 +840,11 @@ h5tools_get_vfd_name(hid_t fapl_id, char *drivername, size_t drivername_size) if (H5Pget_vol_id(fapl_id, &fapl_vol_id) < 0) H5TOOLS_ERROR(FAIL, "failed to retrieve VOL ID from FAPL"); - /* TODO: For now, we have no way of determining if an arbitrary - * VOL connector is native-terminal. */ - if (fapl_vol_id == H5VL_NATIVE || fapl_vol_id == H5VL_PASSTHRU) { + /* Query if the file ID is native-terminal */ + if (H5VLobject_is_native(fid, &is_native) < 0) + H5TOOLS_ERROR(FAIL, "failed to determine if file ID is native-terminal"); + + if (is_native) { const char *driver_name; hid_t driver_id; @@ -1072,7 +1075,8 @@ h5tools_fopen(const char *fname, unsigned flags, hid_t fapl_id, hbool_t use_spec done: /* Save the driver name if using a native-terminal VOL connector */ if (drivername && drivername_size && ret_value >= 0) - if (used_fapl_id >= 0 && h5tools_get_vfd_name(used_fapl_id, drivername, drivername_size) < 0) + if (used_fapl_id >= 0 && + h5tools_get_vfd_name(ret_value, used_fapl_id, drivername, drivername_size) < 0) H5TOOLS_ERROR(H5I_INVALID_HID, "failed to retrieve name of VFD used to open file"); if (tmp_fapl_id >= 0) diff --git a/tools/lib/h5tools.h b/tools/lib/h5tools.h index 2082f2d691a..753a83be480 100644 --- a/tools/lib/h5tools.h +++ b/tools/lib/h5tools.h @@ -667,7 +667,7 @@ H5TOOLS_DLL int h5tools_set_error_file(const char *fname, int is_bin); H5TOOLS_DLL hid_t h5tools_get_fapl(hid_t prev_fapl_id, h5tools_vol_info_t *vol_info, h5tools_vfd_info_t *vfd_info); -H5TOOLS_DLL herr_t h5tools_get_vfd_name(hid_t fapl_id, char *drivername, size_t drivername_size); +H5TOOLS_DLL herr_t h5tools_get_vfd_name(hid_t fid, hid_t fapl_id, char *drivername, size_t drivername_size); H5TOOLS_DLL hid_t h5tools_fopen(const char *fname, unsigned flags, hid_t fapl, hbool_t use_specific_driver, char *drivername, size_t drivername_size); H5TOOLS_DLL hid_t h5tools_get_little_endian_type(hid_t type); From c4eccab0080afc715d1e6c75bcf8f62cdb0810e6 Mon Sep 17 00:00:00 2001 From: Quincey Koziol Date: Mon, 9 Jan 2023 17:18:28 -0600 Subject: [PATCH 039/231] Pass-through connector testing flexibility (#2399) * Correct concurrency bugs when running tests, along with a bugfix & small warning cleanup. * Committing clang-format changes * Allow spaces (and tabs) in VOL connector info string from environment variable. * Parse connector name from HDF5_PLUGIN_PATH environment variable better * Correct H5VLquery_optional to use H5VL routine instead of H5I. Also add an error message to the failure return value from not finding a plugin. * Play nice with existing plugin paths * Use API routine to determine if native connector is terminal. * Committing clang-format changes * Make string size larger, to allow for connectors with longer names. * Be more flexible about testing external pass through connectors, especially if they have registered new optional operations. Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: AWS ParallelCluster user Co-authored-by: Koziol --- fortran/test/vol_connector.F90 | 2 +- test/vol.c | 12 +++++------- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/fortran/test/vol_connector.F90 b/fortran/test/vol_connector.F90 index bfea2e1551d..e2235f4bbbf 100644 --- a/fortran/test/vol_connector.F90 +++ b/fortran/test/vol_connector.F90 @@ -235,7 +235,7 @@ PROGRAM vol_connector INTEGER :: error INTEGER :: ret_total_error LOGICAL :: cleanup, status - CHARACTER(LEN=12) :: VOL_CONNECTOR_ENV + CHARACTER(LEN=32) :: VOL_CONNECTOR_ENV INTEGER :: LEN = 0 INTEGER :: CONN_NAME_LEN diff --git a/test/vol.c b/test/vol.c index d3696692151..e50476f189c 100644 --- a/test/vol.c +++ b/test/vol.c @@ -1664,7 +1664,7 @@ exercise_reg_opt_oper(hid_t fake_vol_id, hid_t reg_opt_vol_id, H5VL_subclass_t s /* Verify that the reserved amount of optional operations is obeyed */ /* (The first optional operation registered should be at the lower limit) */ - if (op_val != H5VL_RESERVED_NATIVE_OPTIONAL) + if (op_val < H5VL_RESERVED_NATIVE_OPTIONAL) TEST_ERROR; /* Look up 1st registered optional operation */ @@ -1683,7 +1683,7 @@ exercise_reg_opt_oper(hid_t fake_vol_id, hid_t reg_opt_vol_id, H5VL_subclass_t s /* Verify that the reserved amount of optional operations is obeyed */ /* (The 2nd optional operation registered should be at the lower limit + 1) */ - if (op_val2 != (H5VL_RESERVED_NATIVE_OPTIONAL + 1)) + if (op_val2 < (H5VL_RESERVED_NATIVE_OPTIONAL + 1)) TEST_ERROR; /* Look up 2nd registered optional operation */ @@ -2242,10 +2242,9 @@ test_get_vol_name(void) conn_env_str = "native"; /* Skip the connectors other than the native and pass_through connector */ - if (HDstrcmp(conn_env_str, "native") && - HDstrncmp(conn_env_str, "pass_through", HDstrlen("pass_through"))) { + if (HDstrcmp(conn_env_str, "native") && HDstrcmp(conn_env_str, "pass_through")) { SKIPPED(); - HDprintf(" only test the native or pass_through connector\n"); + HDprintf(" only test the native or internal pass_through connector\n"); return SUCCEED; } @@ -2262,8 +2261,7 @@ test_get_vol_name(void) /* When comparing the pass_through connector, ignore the rest information (under_vol=0;under_info={}) */ if ((!HDstrcmp(conn_env_str, "native") && HDstrcmp(vol_name, "native")) || - (!HDstrncmp(conn_env_str, "pass_through", HDstrlen("pass_through")) && - HDstrcmp(vol_name, "pass_through"))) + (!HDstrcmp(conn_env_str, "pass_through") && HDstrcmp(vol_name, "pass_through"))) TEST_ERROR; if (H5Fclose(file_id) < 0) From 178cb1d39b0e70c7d4c5a427ae9821cc5a0ae4fa Mon Sep 17 00:00:00 2001 From: raylu-hdf <60487644+raylu-hdf@users.noreply.github.com> Date: Tue, 10 Jan 2023 11:29:54 -0600 Subject: [PATCH 040/231] HDFFV-11208 (OESS-320): H5VLquery_optional had an assertion failure with a committed datatype (#2398) * HDFFV-11208 (OESS-320): H5VLquery_optional had an assertion failure with a committed datatype. Added a test case for the fix that Quincey checked in. * Committing clang-format changes * Fixed a typo in a comment. * Fixed a typo in a comment. * Minor change: changed H5Tcommit to H5Tcommit2. Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> --- test/vol.c | 80 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/test/vol.c b/test/vol.c index e50476f189c..4cfb3de6648 100644 --- a/test/vol.c +++ b/test/vol.c @@ -2362,6 +2362,85 @@ test_wrap_register(void) return FAIL; } /* end test_wrap_register() */ +/*------------------------------------------------------------------------- + * Function: test_query_optional + * + * Purpose: Tests the bug fix (HDFFV-11208) that a committed datatype + * triggered an assertion failure in H5VLquery_optional + * + * Return: SUCCEED/FAIL + * + *------------------------------------------------------------------------- + */ +static herr_t +test_query_optional(void) +{ + hid_t fapl_id = H5I_INVALID_HID; + hid_t file_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t dtype_id = H5I_INVALID_HID; + char filename[NAME_LEN]; + uint64_t supported = 0; + + TESTING("H5VLquery_optional"); + + /* Retrieve the file access property for testing */ + fapl_id = h5_fileaccess(); + + h5_fixname(FILENAME[0], fapl_id, filename, sizeof filename); + + if ((file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0) + TEST_ERROR; + + if ((group_id = H5Gcreate2(file_id, "test_group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + TEST_ERROR; + + /* Test H5VLquery_optional with a group */ + if (H5VLquery_optional(group_id, H5VL_SUBCLS_OBJECT, H5VL_NATIVE_OBJECT_GET_COMMENT, &supported) < 0) + TEST_ERROR; + + if ((dtype_id = H5Tcopy(H5T_NATIVE_INT)) < 0) + TEST_ERROR; + + /* Commit the datatype into the file */ + if (H5Tcommit2(file_id, "test_dtype", dtype_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + + /* Test H5VLquery_optional with a committed datatype where the assertion failure happened in the past */ + if (H5VLquery_optional(dtype_id, H5VL_SUBCLS_OBJECT, H5VL_NATIVE_OBJECT_GET_COMMENT, &supported) < 0) + TEST_ERROR; + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + + if (H5Tclose(dtype_id) < 0) + TEST_ERROR; + + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + h5_delete_test_file(FILENAME[0], fapl_id); + + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + + PASSED(); + + return SUCCEED; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Tclose(dtype_id); + H5Fclose(file_id); + H5Pclose(fapl_id); + } + H5E_END_TRY; + + return FAIL; +} /* end test_query_optional() */ + /*------------------------------------------------------------------------- * Function: main * @@ -2400,6 +2479,7 @@ main(void) nerrors += test_vol_cap_flags() < 0 ? 1 : 0; nerrors += test_get_vol_name() < 0 ? 1 : 0; nerrors += test_wrap_register() < 0 ? 1 : 0; + nerrors += test_query_optional() < 0 ? 1 : 0; if (nerrors) { HDprintf("***** %d Virtual Object Layer TEST%s FAILED! *****\n", nerrors, nerrors > 1 ? "S" : ""); From be8891c05bfb479ee4ba7197c25f648dc5b10b34 Mon Sep 17 00:00:00 2001 From: Mark Kittisopikul Date: Tue, 17 Jan 2023 10:27:25 -0500 Subject: [PATCH 041/231] Include shlwapi.h explicitly on Windows (#2407) * Include shlwapi.h explicitly * Add shlwapi library to LINK_LIB when header is detected Looking for StrStrIA in the shlwapi library is not reliable due to stdcall on mingw32. --- config/cmake/ConfigureChecks.cmake | 6 ++---- src/H5private.h | 1 + 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/config/cmake/ConfigureChecks.cmake b/config/cmake/ConfigureChecks.cmake index ef434b37e07..70a210404e2 100644 --- a/config/cmake/ConfigureChecks.cmake +++ b/config/cmake/ConfigureChecks.cmake @@ -131,6 +131,8 @@ CHECK_INCLUDE_FILE_CONCAT ("netdb.h" ${HDF_PREFIX}_HAVE_NETDB_H) CHECK_INCLUDE_FILE_CONCAT ("arpa/inet.h" ${HDF_PREFIX}_HAVE_ARPA_INET_H) if (WINDOWS) CHECK_INCLUDE_FILE_CONCAT ("shlwapi.h" ${HDF_PREFIX}_HAVE_SHLWAPI_H) + # Checking for StrStrIA in the library is not relaible for mingw32 to stdcall + set (LINK_LIBS ${LINK_LIBS} "shlwapi") endif () ## Check for non-standard extension quadmath.h @@ -156,10 +158,6 @@ if (MINGW OR NOT WINDOWS) CHECK_LIBRARY_EXISTS_CONCAT ("wsock32" gethostbyname ${HDF_PREFIX}_HAVE_LIBWSOCK32) endif () -if (WINDOWS) - CHECK_LIBRARY_EXISTS_CONCAT ("shlwapi" StrStrIA ${HDF_PREFIX}_HAVE_SHLWAPI) -endif () - # UCB (BSD) compatibility library CHECK_LIBRARY_EXISTS_CONCAT ("ucb" gethostname ${HDF_PREFIX}_HAVE_LIBUCB) diff --git a/src/H5private.h b/src/H5private.h index 0b683054836..6ed0aa2a98a 100644 --- a/src/H5private.h +++ b/src/H5private.h @@ -125,6 +125,7 @@ #include /* For _getcwd() */ #include /* POSIX I/O */ #include /* For GetUserName() */ +#include /* For StrStrIA */ #ifdef H5_HAVE_THREADSAFE #include /* For _beginthread() */ From a9564b743709b93b0d0cfc80534c5a32ee6bdfd8 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Wed, 18 Jan 2023 03:04:41 -0600 Subject: [PATCH 042/231] Port VOL connector Guide to doxygen (#2333) * Port VOL connector Guide to doxygen * Fix spelling * Updated VOL UG ref and added release note --- doxygen/dox/VOLConnGuide.dox | 4908 ++++++++++++++++++++++++++++++++++ doxygen/hdf5doxy_layout.xml | 2 + release_docs/RELEASE.txt | 8 +- 3 files changed, 4916 insertions(+), 2 deletions(-) create mode 100644 doxygen/dox/VOLConnGuide.dox diff --git a/doxygen/dox/VOLConnGuide.dox b/doxygen/dox/VOLConnGuide.dox new file mode 100644 index 00000000000..fb662a0ef5a --- /dev/null +++ b/doxygen/dox/VOLConnGuide.dox @@ -0,0 +1,4908 @@ +/** \page VOL_Connector HDF5 Virtual Object Layer (VOL) Connector Author Guide + +Navigate back: \ref index "Main" +
+ +\section secVOLIntro Introduction +The Virtual Object Layer (VOL) is an abstraction layer in the HDF5 library which intercepts all API +calls that could potentially access objects in an HDF5 container and forwards those calls to object drivers +referred to as VOL connectors. The architecture of this feature is described in the \ref H5VL_UG +and VOL Architecture and Internals Documentation and will not be duplicated here. + +This guide is for people who are interested in developing their own VOL connector for the HDF5 library. +It is assumed that the reader has good knowledge of the VOL architecture obtained by reading the VOL +architectural design document. + +\section secVOLCreate Creating a New Connector + +\subsection subsecVOLOverview Overview +Creating a new VOL connector can be a complicated process. You will need to map your storage system +to the HDF5 data model through the lens of the VOL and this may involve some impedance mismatch that +you will have to work around. The good news is that the HDF5 library has been re-engineered to handle +arbitrary, connector-specific data structures via the VOL callbacks, so no knowledge of the library internals +is necessary to write a VOL connector. + +Writing a VOL connector requires these things: +\li Decide on library vs plugin vs internal. +\li Set up your build/test files (CMake, Autotools, etc.). +\li Fill in some boilerplate information in yourH5VLclasststruct. +\li Decide how you will perform any necessary initialization needed by your storage system. +\li Map Storage to HDF5 File Objects +\li Create implementations for the callbacks you need to support. +\li Test the connector. + +Each of the steps listed above is described in more detail in this section of the document. + +The "model then implement" steps can be performed iteratively. You might begin by only supporting files, +datasets, and groups and only allowing basic operations on them. In some cases, this may be all that is +needed. As your needs grow, you can repeat those steps and increase the connector's HDF5 API coverage +at a pace that makes sense for your users. + +Also, note that this document only covers writing VOL connectors using the C programming language. It +is often possible to write connectors in other programming languages (e.g.; Python) via the language's C +interop facilities, but that topic is out of scope for this document. + +\subsection subsecVOL112dep The HDF5 1.12.x VOL Interface Is DEPRECATED +Important changes were made to the VOL interface for HDF5 1.13.0 and, due to binary compatibility issues, +these cannot be merged to HDF5 1.12.x. For this reason, VOL connector development should be shifted to +target 1.13.0 as no further development of the VOL interface will take place on the 1.12.x branch. Unlike the +other development branches of the library, there is no hdf5_1_13 branch - all HDF5 1.13.0 development is +taking place in the develop branch of the HDF5 repository and 1.13.x branches will split off from that. + +Note also that HDF5 1.13.0 is considered an unstable branch, and the API and file format are subject to +change ("unstable" means "not yet finalized", not "buggy"). The VOL feature is under active development +and, although it is nearing its final form, may change further before the stable HDF5 1.14.0 release targeted +for 2022. + +\subsection subsecVOLRelated VOL-Related HDF5 Header Files +Use of the VOL, including topics such as registration and loading VOL plugins, is described in the \ref H5VL_UG. + +Public header Files you will need to be familiar with include: + + + + + + + + + + + + + + + + + + + + + +
H5VLpublic.h +Public VOL header. +
H5VLconnector.h +Main header for connector authors. Contains definitions for the main VOL struct and callbacks, enum values, etc. +
H5VLconnector_passthru.h +Helper routines for passthrough connector authors. +
H5VLnative.h +Native VOL connector header. May be useful if your connector will attempt to implement native HDF5 API calls that are handled via the optional callbacks. +
H5PLextern.h +Needed if your connector will be built as a plugin. +
+ +Many VOL connectors are listed on The HDF Group's VOL plugin registration page, located at: +Registered VOL Connectors. +Not all of these VOL connectors are supported by The HDF Group and the level of completeness varies, but the +connectors found there can serve as examples of working implementations + +\subsection subsecVOLLPI Library vs Plugin vs Internal +When building a VOL connector, you have several options: + +

Library

+The connector can be built as a normal shared or static library. Software that uses your connector will have +to link to it just like any other library. This can be convenient since you don't have to deal with plugin paths +and searching for the connector at runtime, but it also means that software which uses your connector will +have to be built and linked against it. + +

Plugin

+You can also build your connector as a dynamically loaded plugin. The mechanism for this is the same +mechanism used to dynamically load HDF5 filter plugins. This can allow use of your connector via the +VOL environment variable, without modifying the application, but requires your plugin to be discoverable +at runtime. See the \ref H5VL_UG for more information about using HDF5 plugins. + +To build your connector as a plugin, you will have to include H5PLextern.h +(a public header distributed with the library) and implement the #H5PLget_plugin_type +#H5PLget_plugin_info calls, both of which are trivial to code up. It also often requires your connector +to be built with certain compile/link options. The VOL connector template does all of these things. + +The HDF5 library's plugin loading code will call #H5PLget_plugin_type +to determine the type of plugin(e.g.; filter, VOL) and #H5PLget_plugin_info +to get the class struct, which allows the library to query the plugin for its name and value to see if it has found +a requested plugin. When a match is found, the library will use the class struct to register the connector and map its callbacks. + +For the HDF5 library to be able to load an external plugin dynamically, the plugin developer has to define +two public routines with the following name and signature: +\code + H5PL_type_t H5PLget_plugin_type(void); + const void *H5PLget_plugin_info(void); +\endcode + +To show how easy this is to accomplish, here is the complete implementation of those functions in the +template VOL connector: +\code +H5PL_type_t H5PLget_plugin_type(void) { return H5PL_TYPE_VOL; } +const void *H5PLget_plugin_info(void) { return &template_class_g; } +\endcode + +\ref H5PLget_plugin_type should return the library type which should always be #H5PL_TYPE_VOL. +#H5PLget_plugin_info should return a pointer to the plugin structure defining the VOL plugin with all the callbacks. +For example, consider an external plugin defined as: +\code + static const H5VL_class_t H5VL_foo_g = { + 2, // version + 12345, // value + "foo", // name + ... + } +\endcode + +The plugin would implement the two routines as: +\code + H5PL_type_t H5PLget_plugin_type(void) + {return H5PL_TYPE_VOL;} + const void *H5PLget_plugin_info(void) + {return &H5VL_foo_g;} +\endcode + +

Internal

+Your VOL connector can also be constructed as a part of the HDF5 library. This works in the same way +as the stdio and multi virtual file drivers (VFDs) and does not require knowledge of HDF5 internals or +use of non-public API calls. You simply have to add your connector's files to the +Makefile.am and/or CMakeLists.txt files in the source distribution's src directory. This requires maintaining +a private build of the library, though, and is not recommended. + +\subsection subsecVOLBuild Build Files / VOL Template +We have created a template terminal VOL connector that includes both Autotools and CMake build files. The +constructed VOL connector includes no real functionality, but can be registered and loaded as a plugin. + +The VOL template can be found here: +VOL template + +The purpose of this template is to quickly get you to the point where you can begin filling in the callback +functions and writing tests. You can copy this code to your own repository to serve as the basis for your +new connector. + +A template passthrough VOL is also available. This will be discussed in the section on passthrough connectors. + +\subsection subsecVOLBoil H5VL_class_t Boilerplate +Several fields in the H5VLclasststruct will need to be filled in. + +In HDF5 1.13.0, the version field will be 2, indicating the connector targets version 2 of the +#H5VL_class_t struct. Version 1 of the struct was never formally released and only available in the +develop branch of the HDF5 git repository. Version 0 is used in the deprecated HDF5 1.12.x branch. + +Every connector needs a name and value. The library will use these when loading and registering the +connector (as described in the \ref H5VL_UG), so they should be unique in your ecosystem. + +VOL connector values are integers, with a maximum value of 65535. Values from 0 to 255 are reserved +for internal use by The HDF Group. The native VOL connector has a value of 0. Values of 256 to 511 +are for connector testing and should not be found in the wild. Values of 512 to 65535 are for external +connectors. + +As is the case with HDF5 filters, The HDF Group can assign you an official VOL connector value. Please +contact help@hdfgroup.org for help with this. We currently do not register connector names, though the +name you've chosen will appear on the registered VOL connectors page. + +As noted above, registered VOL connectors will be listed at: +Registered VOL Connectors + +A new \b conn_version field has been added to the class struct for 1.13. This field is currently not used by +the library so its use is determined by the connector author. Best practices for this field will be determined +in the near future and this part of the guide will be updated. + +The \b cap_flags field is used to determine the capabilities of the VOL connector. At this time, the use of this +field is limited to indicating thread-safety, asynchronous capabilities, and ability to produce native HDF5 +files. Supported flags can be found in \ref H5VLconnector.h. +\code +// Capability flags for connector +#define H5VL_CAP_FLAG_NONE 0 // No special connector capabilities +#define H5VL_CAP_FLAG_THREADSAFE 0x01 // Connector is threadsafe +#define H5VL_CAP_FLAG_ASYNC 0x02 // Connector performs operations asynchronously +#define H5VL_CAP_FLAG_NATIVE_FILES 0x04 // Connector produces native file format +\endcode + +\subsection subsecVOLInit Initialization and Shutdown +You'll need to decide how to perform any initialization and shutdown tasks that are required by your +connector. There are initialize and terminate callbacks in the #H5VL_class_t struct to handle this. They +are invoked when the connector is registered and unregistered, respectively. The initialize callback can take +a VOL initialization property list, so any properties you need for initialization can be applied to it. The +HDF5 library currently makes no use of the vipl so there are no default vipl properties. + +If this is unsuitable, you may have to create custom connector-specific API calls to handle initialization and +termination. It may also be useful to perform operations in a custom API call used to set the VOL connector +in the fapl. + +The initialization and terminate callbacks: +\code + herr_t (*initialize)(hid_t vipl_id); // Connector initialization callback + herr_t (*terminate)(void); // Connector termination callback +\endcode + +\subsection subsecVOLMap Map Storage to HDF5 File Objects +The most difficult part of designing a new VOL connector is going to determining how to support HDF5 +file objects and operations using your storage system. There isn't much specific advice to give here, as each +connector will have unique needs, but a forthcoming "tutorial" connector will set up a simple connector and +demonstrate this process. + +\subsection subsecVOLFillIn Fill In VOL Callbacks +For each file object you support in your connector (including the file itself), you will need to create a +data struct to hold whatever file object metadata that are needed by your connector. For example, a data +structure for a VOL connector based on text files might have a file struct that contains a file pointer for the +text file, buffers used for caching data, etc. Pointers to these data structures are where your connector's +state is stored and are returned to the HDF5 library from the create/open/etc. callbacks such as +dataset create. + +Once you have your data structures, you'll need to create your own implementations of the callback functions +and map them via your #H5VL_class_t struct. + +\subsection subsecVOLOpt Handling Optional Operations +Handling optional operations has changed significantly in HDF5 1.13.0. In the past, optional operations were +specified using an integer opt_type parameter. This proved to be a problem with pass-through connectors, +though, as it was possible to have opt_type clash if two connectors used the same opt_type values. + +The new scheme allows a connector to register an optional operation with the library and receive a dynamically-allocated +opt_type value for the operation. + +The following API calls can be used to manage the optional operations: +\code + herr_t H5VLregister_opt_operation(H5VL_subclass_t subcls, const char *op_name, int *op_val); + herr_t H5VLfind_opt_operation(H5VL_subclass_t subcls, const char *op_name, int *op_val); + herr_t H5VLunregister_opt_operation(H5VL_subclass_t subcls, const char *op_name) +\endcode + +The register call is used to register an operation for a subclass (file, etc.) and the opt_type parameter +that the library assigned to the operation will be returned via the opt_val parameter. This value can then +be passed to one of the subclass-specific API calls (listed below). If you need to find an existing optional +call's assigned opt_type value by name, you can use the find call. + +One recommended way to handle optional calls is to register all the optional calls at startup, saving the +values in connector state, then use these cached values in your optional calls. The assigned values should be +unregistered using the unregister call when the connector shuts down. + +Subclass-specific optional calls: +\code +herr_t H5VLattr_optional_op(const char *app_file, const char *app_func, unsigned app_line, + hid_t attr_id, H5VL_optional_args_t *args, hid_t dxpl_id, hid_t es_id); +herr_t H5VLdataset_optional_op(const char *app_file, const char *app_func, unsigned app_line, + hid_t dset_id, H5VL_optional_args_t *args, hid_t dxpl_id, hid_t es_id); +herr_t H5VLdatatype_optional_op(const char *app_file, const char *app_func, unsigned app_line, + hid_t type_id, H5VL_optional_args_t *args, hid_t dxpl_id, hid_tes_id); +herr_t H5VLfile_optional_op(const char *app_file, const char *app_func, unsigned app_line, + hid_t file_id, H5VL_optional_args_t *args, hid_t dxpl_id, hid_t es_id); +herr_t H5VLgroup_optional_op(const char *app_file, const char *app_func, unsigned app_line, + hid_t group_id, H5VL_optional_args_t *args, hid_t dxpl_id, hid_t es_id); +herr_t H5VLlink_optional_op(const char *app_file, const char *app_func, unsigned app_line, + hid_t loc_id, const char *name, hid_t lapl_id, H5VL_optional_args_t *args, + hid_t dxpl_id, hid_t es_id); +herr_t H5VLobject_optional_op(const char *app_file, const char *app_func, unsigned app_line, + hid_t loc_id, const char *name, hid_t lapl_id, + H5VL_optional_args_t *args, hid_t dxpl_id, hid_t es_id); +herr_t H5VLrequest_optional_op(void *req, hid_t connector_id, H5VL_optional_args_t *args); +\endcode + +\subsection subsecVOLTest Testing Your Connector +At the time of writing, some of the HDF5 library tests have been abstracted out of the library with their +native-file-format-only sections removed and added to a VOL test suite available here: +vol-tests + +This is an evolving set of tests, so see the documentation in that repository for instructions as to its use. +You may want to clone and modify and/or extend these tests for use with your own connector. + +In the future, we plan to modify the HDF5 test suite that ships with the library to use a future VOL +capabilities flags scheme to selectively run tests that a particular connector supports. As this is a large task, +it may be some time before that work is complete. + +\subsection subsecVOLPassthrough Passthrough Connectors +Coming Soon + +\subsection subsecVOLAsync Asynchronous Operations +Coming Soon + +\section secVOLRef VOL Connector Interface Reference +Each VOL connector should be of type #H5VL_class_t: + + + + +
+VOL connector class, H5VLpublic.h +\snippet H5VLconnector.h H5VL_class_t_snip +
+ +The version field is the version of the #H5VL_class_t struct. This is identical to how the version field is +used in the #H5Z_class2_t struct for filters. + +The value field is a unique integer identifier that should be between 512 and 65535 for external, non-library +connectors. + +The name field is a string that uniquely identifies the VOL connector name. + +The conn_version is the connector version. This is currently not used by the library. + +The cap_flags holds bitwise capability/feature flags that determine which operations and capabilities are +supported by a the VOL connector. These fields were enumerated in the previous section. + +The initialize field is a function pointer to a routine that a connector implements to set up or initialize +access to the connector. Implementing this function by the connector is not required since some connectors +do not require any set up to start accessing the connector. In that case, the value of the function pointer +should be set to NULL. Connector specific variables that are required to be passed from users should be +passed through the VOL initialize property list. Generic properties can be added to this property class +for user-defined connectors that cannot modify the HDF5 library to add internal properties. For more +information consult the property list reference manual pages. + +The terminate field is a function pointer to a routine that a connector implements to terminate or finalize +access to the connector. Implementing this function by the connector is not required since some connectors +do not require any termination phase to the connector. In that case, the value of the function pointer should +be set to NULL. + +The rest of the fields in the #H5VL_class_t struct are "subclasses" that define all the VOL function callbacks +that are mapped to from the HDF5 API layer. Those subclasses are categorized into three categories, VOL +Framework, Data Model, and Infrastructure / Services. + +VOL Framework classes provide functionality for working with the VOL connectors themselves (e.g., working +with connector strings) and with wrapping and unwrapping objects for passthrough connectors. + +Data Model classes are those that provide functionality for accessing an HDF5 container and objects in that +container as defined by the HDF5 data model. + +Infrastructure / Service classes are those that provide services for users that are not related to the data model +specifically. Asynchronous operations, for example, are a service that most connectors can implement, so we +add a class for it in the VOL structure. + +If a service becomes generic enough and common among many connectors, a class for it should be added +to the VOL structure. However, many connectors can/will provide services that are not shared by other +connectors. A good way to support these services is through an optional callback in the VOL structure which +can be a hook from the API to the connector that provides those services, passing any necessary arguments +needed without the HDF5 library having to worry about supporting that service. A similar API operation +to allow users to use that service will be added. This API call would be similar to an "ioctl" call where any +kind of operation can be supported and passed down to the connector that has enough knowledge from the +user to interpret the type of the operation. All classes and their defined callbacks will be detailed in the +following sub-sections. + +To handle that large set of API routines, each class in the Data Model category has three generic callbacks, +get, specific, and optional to handle the three set of API operations outline above respectively. To +handle the varying parameters that can be passed to the callback, each callback will take a struct parameter +that includes an enum get/specific or integer optional field indicating the operation and a union of the +possible parameters get/specific or void pointer to the parameters optional. + +The optional args struct used for all optional operations: +\code +// Struct for all 'optional' callbacks +typedef struct H5VL_optional_args_t { + int op_type; // Operation to perform + void *args; // Pointer to operation's argument struct +} H5VL_optional_args_t; +\endcode + +The opt_type member is the value assigned by the library when the optional operation was registered (or +defined in the case of the native VOL connector) and the args member is a pointer to the optional +operation's parameters (usually passed in as a struct). + +Note that this differs from the HDF5 1.12.x scheme, which used va_lists. + +The optional callback is a free for all callback where anything from the API layer is passed in directly. +This callback is used to support connector specific operations in the API that other connectors should or +would not know about. More information about types and the arguments for each type will be detailed in +the corresponding class arguments. + +\subsection subsecVOLRefMap Mapping the API to the Callbacks +The callback interface defined for the VOL has to be general enough to handle all the HDF5 API operations +that would access the file. Furthermore, it has to capture future additions to the HDF5 library with little to +no changes to the callback interface. Changing the interface often whenever new features are added would +be discouraging to connector developers since that would mean reworking their VOL connector structure. +To remedy this issue, every callback will contain two parameters: +
    +
  • A data transfer property list (DXPL) which allows that API to put some properties on for the connectors +to retrieve if they have to for particular operations, without having to add arguments to the VOL +callback function.
  • +
  • A pointer to a request (void **req) to handle asynchronous operations if the HDF5 library adds +support for them in future releases. hat pointer is set by the VOL connector to a request object it +creates to manage progress on that asynchronous operation. If the req is NULL, that means that the +API operation is blocking and so the connector would not execute the operation asynchronously. If +the connector does not support asynchronous operations, it needs not to worry about this field and +leaves it unset.
  • +
+ +In order to keep the number of the VOL object classes and callbacks concise and readable, it was decided +not to have a one-to-one mapping between API operation and callbacks. The parameter names and types +will be detailed when describing each callback in their respective sections. + +The HDF5 library provides several routines to access an object in the container. For example, to open an +attribute on a group object, the user could use #H5Aopen and pass the group identifier directly where the +attribute needs to be opened. Alternatively, the user could use #H5Aopen_by_name or #H5Aopen_by_idx +to open the attribute, which provides a more flexible way of locating the attribute, whether by a starting +object location and a path or an index type and traversal order. All those types of accesses usually map to +one VOL callback with a parameter that indicates the access type. In the example of opening an attribute, +the three API open routine will map to the same VOL open callback but with a different location parameter. +The same applies to all types of routines that have multiple types of accesses. The location parameter is a +structure defined in: + +Structure to hold parameters for object locations, H5VLconnector.h +\code +// +// Structure to hold parameters for object locations. +// either: BY_SELF, BY_NAME, BY_IDX, BY_TOKEN + +typedef struct H5VL_loc_params_t { + H5I_type_t obj_type; // The object type of the location object + H5VL_loc_type_t type; // The location type + union { // parameters of the location + H5VL_loc_by_token_t loc_by_token; + H5VL_loc_by_name_t loc_by_name; + H5VL_loc_by_idx_t loc_by_idx; + }loc_data; + } H5VL_loc_params_t + +// +// Types for different ways that objects are located in an +// HDF5 container. +typedef enum H5VL_loc_type_t { + // starting location is the target object + H5VL_OBJECT_BY_SELF, + + // location defined by object and path in H5VL_loc_by_name_t + H5VL_OBJECT_BY_NAME, + + // location defined by object, path, and index in H5VL_loc_by_idx_t + H5VL_OBJECT_BY_IDX, + + // location defined by token (e.g. physical address) in H5VL_loc_by_token_t + H5VL_OBJECT_BY_TOKEN, +} H5VL_loc_type_t; + +typedef struct H5VL_loc_by_name { + const char *name; // The path relative to the starting location + hid_t lapl_id; // The link access property list +}H5VL_loc_by_name_t; + +typedef struct H5VL_loc_by_idx { + const char *name; // The path relative to the starting location + H5_index_t idx_type; // Type of index + H5_iter_order_t order; // Index traversal order + hsize_t n; // Position in index + hid_t lapl_id; // The link access property list +}H5VL_loc_by_idx_t; + +typedef struct H5VL_loc_by_token { + void *token; // arbitrary token (physical address of location in native VOL) +}H5VL_loc_by_token_t; +\endcode + +\subsection subsecVOLRefConn Connector Information Callbacks +This section's callbacks involve the connector-specific information that will be associated with the VOL in +the fapl via H5Pset_fapl_ et al. This data is copied into the fapl so the library needs these functions to +manage this in a way that prevents resource leaks. + +The to_str and from_str callbacks are used to convert the connector-specific data to and from a configuration +string. There is no official way to construct VOL configuration strings, so the format used (JSON, +XML, getopt-style processing, etc.) is up to the connector author. These connector configuration strings +can be used to set up a VOL connector via mechanisms like command-line parameters and environment +variables. + +Info class for connector information routines, H5VLconnector.h +\code +// VOL connector info fields & callbacks +typedef struct H5VL_info_class_t { + size_t size; // Size of the VOL info + void *(*copy)(const void *info); // Callback to create a copy of the VOL info + herr_t (*cmp)(int *cmp_value, const void *info1, const void *info2); // Callback to compare VOL info + herr_t (*free)(void *info); // Callback to release a VOL info + herr_t (*to_str)(const void *info, char **str); // Callback to serialize connector's info into a string + herr_t (*from_str)(const char *str, void **info); // Callback to deserialize a string into connector's info +} H5VL_info_class_t; +\endcode + +\subsubsection subsubsecVOLRefConnsize info: size +The size field indicates the size required to store any special information that the connector needs. + +If the connector requires no special information, set this field to zero. + + + + + + + +
Signature:
+\code + size_t size; +\endcode +
+ +\subsubsection subsubsecVOLRefConncopy info: copy +The copy callback is invoked when the connector is selected for use with H5Pset_fapl_, the +connector-specific set call, etc. Where possible, the information should be deep copied in such a way that the original +data can be freed. + + + + + + + + + + + + + +
Signature:
+\code + void * (*copy)(const void *info); +\endcode +
Arguments:
+\code + info (IN): The connector-specific info to copy. +\endcode +
+ +\subsubsection subsubsecVOLRefConncmp info: cmp +The cmp callback is used to determine if two connector-specific data structs are identical and helps the library +manage connector resources. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*cmp)(int *cmp_value, const void *info1, const void *info2); +\endcode +
Arguments:
+\code + cmp_value (OUT): A strcmp-like compare value. + info1 (IN): The 1st connector-specific info to copy. + info2 (IN): The 2nd connector-specific info to copy. +\endcode +
+ +\subsubsection subsubsecVOLRefConnfree info: free +The free callback is used to clean up the connector-specific information that was copied when set in the +fapl via the copy callback. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*free)(void *info); +\endcode +
Arguments:
+\code + info (IN): The connector-specific info to free. +\endcode +
+ +\subsubsection subsubsecVOLRefConnto info: to_str +The to_str callback converts a connector-specific information structure to a connector-specific configuration +string. It is the opposite of the from_str callback. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*to_str)(const void *info, char **str); +\endcode +
Arguments:
+\code + info (IN): The connector-specific info to convert to a configuration string. + str (OUT): The constructed configuration string. +\endcode +
+ +\subsubsection subsubsecVOLRefConnfrom info: from_str +The from_str callback converts a connector-specific configuration string to a connector-specific information +structure. It is the opposite of the to_str callback. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*from_str)(const char *str, void **info); +\endcode +
Arguments:
+\code + str (IN): The connector-specific configuration string. + info (OUT): The connector-specific info generated from the configuration string. +\endcode +
+ +\subsection subsecVOLRefWrap Object Wrap Callbacks +The object wrap callbacks are used by passthrough connectors to wrap/unwrap objects and contexts when +passing them up and down the VOL chain. + +Wrap class for object wrapping routines, H5VLconnector.h +\code +typedef struct H5VL_wrap_class_t { + void *(*get_object)(const void *obj); // Callback to retrieve underlying object + herr_t (*get_wrap_ctx)(const void *obj, void **wrap_ctx); // Callback to retrieve the object wrapping context for the connector + void *(*wrap_object)(void *obj, H5I_type_t obj_type, void *wrap_ctx); // Callback to wrap a library object + void *(*unwrap_object)(void *obj); // Callback to unwrap a library object + herr_t (*free_wrap_ctx)(void *wrap_ctx); // Callback to release the object wrapping context for the connector +} H5VL_wrap_class_t; +\endcode + +\subsubsection subsubsecVOLRefWrapobj wrap: get_object +Retrieves an underlying object. + + + + + + + + + + + + + +
Signature:
+\code + void * (*get_object)(const void *obj); +\endcode +
Arguments:
+\code + obj (IN): Object being unwrapped. +\endcode +
+ +\subsubsection subsubsecVOLRefWrapctx wrap: get_wrap_ctx +Get a VOL connector's object wrapping context. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*get_wrap_ctx)(const void *obj, void **wrap_ctx); +\endcode +
Arguments:
+\code + obj (IN): Object for which we need a context. + wrap_ctx (OUT): Context. +\endcode +
+ +\subsubsection subsubsecVOLRefWrapwrap wrap: wrap_object +Asks a connector to wrap an underlying object. + + + + + + + + + + + + + +
Signature:
+\code + void * (*wrap_object)(void *obj, H5I_type_t obj_type, void *wrap_ctx); +\endcode +
Arguments:
+\code + obj (IN): Object being wrapped. + obj_type (IN): Object type (see H5Ipublic.h). + wrap_ctx (IN): Context. +\endcode +
+ +\subsubsection subsubsecVOLRefWrapunwrap wrap: unwrap_object +Unwrap an object from connector. + + + + + + + + + + + + + +
Signature:
+\code + void * (*unwrap_object)(void *obj); +\endcode +
Arguments:
+\code + obj (IN): Object being unwrapped. +\endcode +
+ +\subsubsection subsubsecVOLRefWrapfree wrap: free_wrap_ctx +Release a VOL connector's object wrapping context. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*free_wrap_ctx)(void *wrap_ctx); +\endcode +
Arguments:
+\code + wrap_ctx (IN): Context to be freed. +\endcode +
+ +\subsection subsecVOLRefAttr The Attribute Function Callbacks +The attribute API routines (\ref H5A) allow HDF5 users to create and manage HDF5 attributes. All the \ref H5A +API routines that modify the HDF5 container map to one of the attribute callback routines in this class +that the connector needs to implement. + +Structure for attribute callback routines, H5VLconnector.h +\code +typedef struct H5VL_attr_class_t { + void *(*create)(void *obj, const H5VL_loc_params_t *loc_params, const char *attr_name, hid_t type_id, + hid_t space_id, hid_t acpl_id, hid_t aapl_id, hid_t dxpl_id, void **req); + void *(*open)(void *obj, const H5VL_loc_params_t *loc_params, const char *attr_name, hid_t aapl_id, + hid_t dxpl_id, void **req); + herr_t (*read)(void *attr, hid_t mem_type_id, void *buf, hid_t dxpl_id, void **req); + herr_t (*write)(void *attr, hid_t mem_type_id, const void *buf, hid_t dxpl_id, void **req); + herr_t (*get)(void *obj, H5VL_attr_get_args_t *args, hid_t dxpl_id, void **req); + herr_t (*specific)(void *obj, const H5VL_loc_params_t *loc_params, H5VL_attr_specific_args_t *args, + hid_t dxpl_id, void **req); + herr_t (*optional)(void *obj, H5VL_optional_args_t *args, hid_t dxpl_id, void **req); + herr_t (*close)(void *attr, hid_t dxpl_id, void **req); +} H5VL_attr_class_t; +\endcode + +\subsubsection subsubsecVOLRefAttrcreate attr: create +The create callback in the attribute class creates an attribute object in the container of the location object +and returns a pointer to the attribute structure containing information to access the attribute in future calls. + + + + + + + + + + + + + +
Signature:
+\code + void *(*create)(void *obj, H5VL_loc_params_t *loc_params, const char *attr_name, hid_t type_id, hid_t space_id, hid_t acpl_id, hid_t aapl_id, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): Pointer to an object where the attribute needs to be created or where the look-up + of the target object needs to start. + loc_params (IN): Pointer to the location parameters as explained in "Mapping the API to the Callbacks". + attr_name (IN): The name of the attribute to be created. + type_id (IN): The datatype of the attribute. + space_id (IN): The dataspace of the attribute. + acpl_id (IN): The attribute creation property list. + aapl_id (IN): The attribute access property list. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +\subsubsection subsubsecVOLRefAttropen attr: open +The open callback in the attribute class opens an attribute object in the container of the location object and +returns a pointer to the attribute structure containing information to access the attribute in future calls. + + + + + + + + + + + + +
Signature:
+\code + void *(*open)(void *obj, H5VL_loc_params_t *loc_params, const char *attr_name, hid_t aapl_id, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): Pointer to an object where the attribute needs to be opened or where the look-up + of the target object needs to start. + loc_params (IN): Pointer to the location parameters as explained in "Mapping the API to the Callbacks". + attr_name (IN): The name of the attribute to be opened. + aapl_id (IN): The attribute access property list. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +\subsubsection subsubsecVOLRefAttrread attr: read +The read callback in the attribute class reads data from the attribute object and returns an herr_t indicating +success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*read)(void *attr, hid_t mem_type_id, void *buf, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + attr (IN): Pointer to the attribute object. + mem_type_id (IN): The memory datatype of the attribute. + buf (OUT): Data buffer to be read into. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +\subsubsection subsubsecVOLRefAttrwrite attr: write +The write callback in the attribute class writes data to the attribute object and returns an herr_t indicating +success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*write)(void *attr, hid_t mem_type_id, const void *buf, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + attr (IN): Pointer to the attribute object. + mem_type_id (IN): The memory datatype of the attribute. + buf (IN): Data buffer to be written. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +\subsubsection subsubsecVOLRefAttrget attr: get +The get callback in the attribute class retrieves information about the attribute as specified in the get_type parameter. +It returns an herr_t indicating success or failure + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*get)(void *obj, H5VL_attr_get_args_t *args, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): An attribute or location object where information needs to be retrieved from. + args (IN/OUT): A pointer to the arguments struct. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +\code +/* Values for attribute 'get' operations */ +typedef enum H5VL_attr_get_t { + H5VL_ATTR_GET_ACPL, /* creation property list */ + H5VL_ATTR_GET_INFO, /* info */ + H5VL_ATTR_GET_NAME, /* access property list */ + H5VL_ATTR_GET_SPACE, /* dataspace */ + H5VL_ATTR_GET_STORAGE_SIZE, /* storage size */ + H5VL_ATTR_GET_TYPE /* datatype */ +} H5VL_attr_get_t; + +/* Parameters for attribute 'get_name' operation */ +typedef struct H5VL_attr_get_name_args_t { + H5VL_loc_params_t loc_params; /* Location parameters for object access */ + size_t buf_size; /* Size of attribute name buffer */ + char *buf; /* Buffer for attribute name (OUT) */ + size_t *attr_name_len; /* Actual length of attribute name (OUT) */ +} H5VL_attr_get_name_args_t; + +/* Parameters for attribute 'get_info' operation */ +typedef struct H5VL_attr_get_info_args_t { + H5VL_loc_params_t loc_params; /* Location parameters for object access */ + const char *attr_name; /* Attribute name (for get_info_by_name) */ + H5A_info_t *ainfo; /* Attribute info (OUT) */ +} H5VL_attr_get_info_args_t; + +/* Parameters for attribute 'get' operations */ +typedef struct H5VL_attr_get_args_t { + H5VL_attr_get_t op_type; /* Operation to perform */ + + /* Parameters for each operation */ + union { + /* H5VL_ATTR_GET_ACPL */ + struct { + hid_t acpl_id; /* Attribute creation property list ID (OUT) */ + } get_acpl; + + /* H5VL_ATTR_GET_INFO */ + H5VL_attr_get_info_args_t get_info; /* Attribute info */ + + /* H5VL_ATTR_GET_NAME */ + H5VL_attr_get_name_args_t get_name; /* Attribute name */ + + /* H5VL_ATTR_GET_SPACE */ + struct { + hid_t space_id; /* Dataspace ID (OUT) */ + } get_space; + + /* H5VL_ATTR_GET_STORAGE_SIZE */ + struct { + hsize_t *data_size; /* Size of attribute in file (OUT) */ + } get_storage_size; + + /* H5VL_ATTR_GET_TYPE */ + struct { + hid_t type_id; /* Datatype ID (OUT) */ + } get_type; + } args; +} H5VL_attr_get_args_t; +\endcode + +\subsubsection subsubsecVOLRefAttrspec attr: specific +The specific callback in the attribute class implements specific operations on HDF5 attributes as specified +in the specific_type parameter. It returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*specific)(void *obj, H5VL_loc_params_t *loc_params, H5VL_attr_specific_args_t *args, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): The location object where the operation needs to happen. + loc_params (IN): A pointer to the location parameters as explained in "Mapping the API to the Callbacks". + args (IN/OUT): A pointer to the arguments struct. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +\code +/* Values for attribute 'specific' operation */ +typedef enum H5VL_attr_specific_t { + H5VL_ATTR_DELETE, /* H5Adelete(_by_name) */ + H5VL_ATTR_DELETE_BY_IDX, /* H5Adelete_by_idx */ + H5VL_ATTR_EXISTS, /* H5Aexists(_by_name) */ + H5VL_ATTR_ITER, /* H5Aiterate(_by_name) */ + H5VL_ATTR_RENAME /* H5Arename(_by_name) */ +} H5VL_attr_specific_t; + +/* Parameters for attribute 'iterate' operation */ +typedef struct H5VL_attr_iterate_args_t { + H5_index_t idx_type; /* Type of index to iterate over */ + H5_iter_order_t order; /* Order of index iteration */ + hsize_t *idx; /* Start/stop iteration index (IN/OUT) */ + H5A_operator2_t op; /* Iteration callback function */ + void *op_data; /* Iteration callback context */ +} H5VL_attr_iterate_args_t; + +/* Parameters for attribute 'delete_by_idx' operation */ +typedef struct H5VL_attr_delete_by_idx_args_t { + H5_index_t idx_type; /* Type of index to iterate over */ + H5_iter_order_t order; /* Order of index iteration */ + hsize_t n; /* Iteration index */ +} H5VL_attr_delete_by_idx_args_t; + +/* Parameters for attribute 'specific' operations */ +typedef struct H5VL_attr_specific_args_t { + H5VL_attr_specific_t op_type; /* Operation to perform */ + + /* Parameters for each operation */ + union { + /* H5VL_ATTR_DELETE */ + struct { + const char *name; /* Name of attribute to delete */ + } del; + + /* H5VL_ATTR_DELETE_BY_IDX */ + H5VL_attr_delete_by_idx_args_t delete_by_idx; + + /* H5VL_ATTR_EXISTS */ + struct { + const char *name; /* Name of attribute to check */ + hbool_t *exists; /* Whether attribute exists (OUT) */ + } exists; + + /* H5VL_ATTR_ITER */ + H5VL_attr_iterate_args_t iterate; + + /* H5VL_ATTR_RENAME */ + struct { + const char *old_name; /* Name of attribute to rename */ + const char *new_name; /* New attribute name */ + } rename; + } args; +} H5VL_attr_specific_args_t; +\endcode + +\subsubsection subsubsecVOLRefAttropt attr: optional +The optional callback in the attribute class implements connector specific operations on an HDF5 attribute. +It returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*optional)(void *obj, H5VL_optional_args_t *args, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): The container or object where the operation needs to happen. + args (IN/OUT): A pointer to the arguments struct. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+Each connector that requires connector-specific operations should compare the value of the op_type field of +the #H5VL_optional_args_t struct with the values returned from calling #H5VLregister_opt_operation to +determine how to handle the optional call and interpret the arguments passed in the struct. + +\subsubsection subsubsecVOLRefAttrclose attr: close +The close callback in the attribute class terminates access to the attribute object and free all resources it +was consuming, and returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*close)(void *attr, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + attr (IN): Pointer to the attribute object. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +\subsection subsecVOLRefDset Dataset Callbacks +The dataset API routines (\ref H5D) allow HDF5 users to create and manage HDF5 datasets. All the \ref H5D API +routines that modify the HDF5 container map to one of the dataset callback routines in this class that the +connector needs to implement. + +Structure for dataset callback routines, H5VLconnector.h +\code +typedef struct H5VL_dataset_class_t { + void *(*create)(void *obj, const H5VL_loc_params_t *loc_params, const char *name, hid_t lcpl_id, + hid_t type_id, hid_t space_id, hid_t dcpl_id, hid_t dapl_id, hid_t dxpl_id, void **req); + void *(*open)(void *obj, const H5VL_loc_params_t *loc_params, const char *name, hid_t dapl_id, + hid_t dxpl_id, void **req); + herr_t (*read)(size_t count, void *dset[], hid_t mem_type_id[], hid_t mem_space_id[], + hid_t file_space_id[], hid_t dxpl_id, void *buf[], void **req); + herr_t (*write)(size_t count, void *dset[], hid_t mem_type_id[], hid_t mem_space_id[], + hid_t file_space_id[], hid_t dxpl_id, const void *buf[], void **req); + herr_t (*get)(void *obj, H5VL_dataset_get_args_t *args, hid_t dxpl_id, void **req); + herr_t (*specific)(void *obj, H5VL_dataset_specific_args_t *args, hid_t dxpl_id, void **req); + herr_t (*optional)(void *obj, H5VL_optional_args_t *args, hid_t dxpl_id, void **req); + herr_t (*close)(void *dset, hid_t dxpl_id, void **req); +} H5VL_dataset_class_t; +\endcode + +\subsubsection subsubsecVOLRefDsetcreate dataset: create +The create callback in the dataset class creates a dataset object in the container of the location object and +returns a pointer to the dataset structure containing information to access the dataset in future calls. + + + + + + + + + + + + + +
Signature:
+\code + void *(*create)(void *obj, H5VL_loc_params_t *loc_params, const char *name, hid_t lcpl_id,hid_t type_id, hid_t space_id, hid_t dcpl_id, hid_t dapl_id, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): Pointer to an object where the dataset needs to be created or where the look-up of + the target object needs to start. + loc_params (IN): Pointer to the location parameters as explained in "Mapping the API to the Callbacks". + The type can be only H5VL_OBJECT_BY_SELF in this callback. + name (IN): The name of the dataset to be created. + lcpl_id (IN): The link creation property list. + type_id (IN): The datatype of the dataset. + space_id (IN): The dataspace of the dataset. + dcpl_id (IN): The dataset creation property list. + dapl_id (IN): The dataset access property list. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +\subsubsection subsubsecVOLRefDsetopen dataset: open +The open callback in the dataset class opens a dataset object in the container of the location object and +returns a pointer to the dataset structure containing information to access the dataset in future calls. + + + + + + + + + + + + + +
Signature:
+\code + void *(*open)(void *obj, H5VL_loc_params_t *loc_params, const char *name, hid_t dapl_id, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): Pointer to an object where the dataset needs to be opened or where the look-up of the target object needs to start. + loc_params (IN): Pointer to the location parameters as explained in "Mapping the API to the Callbacks". + The type can be only H5VL_OBJECT_BY_SELF in this callback. + name (IN): The name of the dataset to be opened. + dapl_id (IN): The dataset access property list. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +\subsubsection subsubsecVOLRefDsetread dataset: read +The read callback in the dataset class reads data from the dataset object and returns an herr_t indicating +success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*read)(void *dset, hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id, hid_t dxpl_id, void *buf, void **req); +\endcode +
Arguments:
+\code + dset (IN): Pointer to the dataset object. + mem_type_id (IN): The memory datatype of the data. + mem_space_id (IN): The memory dataspace selection. + file_space_id (IN): The file dataspace selection. + dxpl_id (IN): The data transfer property list. + buf (OUT): Data buffer to be read into. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +\subsubsection subsubsecVOLRefDsetwrite dataset: write +The write callback in the dataset class writes data to the dataset object and returns an herr_t indicating +success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*write)(void *dset, hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id, hid_t dxpl_id, const void *buf, void **req); +\endcode +
Arguments:
+\code + dset (IN): Pointer to the dataset object. + mem_type_id (IN): The memory datatype of the data. + mem_space_id (IN): The memory dataspace selection. + file_space_id (IN): The file dataspace selection. + dxpl_id (IN): The data transfer property list. + buf (IN): Data buffer to be written from. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +\subsubsection subsubsecVOLRefDsetget dataset: get +The get callback in the dataset class retrieves information about the dataset as specified in the get_type +parameter.It returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*get)(void *dset, H5VL_dataset_get_args_t *args, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + dset (IN): The dataset object where information needs to be retrieved from. + args (IN/OUT): A pointer to the arguments struct. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+\code +/* Values for dataset 'get' operation */ +typedef enum H5VL_dataset_get_t { + H5VL_DATASET_GET_DAPL, /* access property list */ + H5VL_DATASET_GET_DCPL, /* creation property list */ + H5VL_DATASET_GET_SPACE, /* dataspace */ + H5VL_DATASET_GET_SPACE_STATUS, /* space status */ + H5VL_DATASET_GET_STORAGE_SIZE, /* storage size */ + H5VL_DATASET_GET_TYPE /* datatype */ +} H5VL_dataset_get_t; + +/* Parameters for dataset 'get' operations */ +typedef struct H5VL_dataset_get_args_t { + H5VL_dataset_get_t op_type; /* Operation to perform */ + + /* Parameters for each operation */ + union { + /* H5VL_DATASET_GET_DAPL */ + struct { + hid_t dapl_id; /* Dataset access property list ID (OUT) */ + } get_dapl; + + /* H5VL_DATASET_GET_DCPL */ + struct { + hid_t dcpl_id; /* Dataset creation property list ID (OUT) */ + } get_dcpl; + + /* H5VL_DATASET_GET_SPACE */ + struct { + hid_t space_id; /* Dataspace ID (OUT) */ + } get_space; + + /* H5VL_DATASET_GET_SPACE_STATUS */ + struct { + H5D_space_status_t *status; /* Storage space allocation status (OUT) */ + } get_space_status; + + /* H5VL_DATASET_GET_STORAGE_SIZE */ + struct { + hsize_t *storage_size; /* Size of dataset's storage (OUT) */ + } get_storage_size; + + /* H5VL_DATASET_GET_TYPE */ + struct { + hid_t type_id; /* Datatype ID (OUT) */ + } get_type; + } args; +} H5VL_dataset_get_args_t; +\endcode + +\subsubsection subsubsecVOLRefDsetspec dataset: specific +The specific callback in the dataset class implements specific operations on HDF5 datasets as specified in +the specific_type parameter. It returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*specific)(void *obj, H5VL_file_specific_args_t *args, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): The dset where the operation needs to happen. + args (IN/OUT): A pointer to the arguments struct. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+\code +/* Values for dataset 'specific' operation */ +typedef enum H5VL_dataset_specific_t { + H5VL_DATASET_SET_EXTENT, /* H5Dset_extent */ + H5VL_DATASET_FLUSH, /* H5Dflush */ + H5VL_DATASET_REFRESH /* H5Drefresh */ +} H5VL_dataset_specific_t; + +/* Parameters for dataset 'specific' operations */ +typedef struct H5VL_dataset_specific_args_t { + H5VL_dataset_specific_t op_type; /* Operation to perform */ + + /* Parameters for each operation */ + union { + /* H5VL_DATASET_SET_EXTENT */ + struct { + const hsize_t *size; /* New dataspace extent */ + } set_extent; + + /* H5VL_DATASET_FLUSH */ + struct { + hid_t dset_id; /* Dataset ID (IN) */ + } flush; + + /* H5VL_DATASET_REFRESH */ + struct { + hid_t dset_id; /* Dataset ID (IN) */ + } refresh; + } args; +} H5VL_dataset_specific_args_t; +\endcode + +\subsubsection subsubsecVOLRefDsetopt dataset: optional +The optional callback in the dataset class implements connector specific operations on an HDF5 dataset. +It returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*optional)(void *obj, H5VL_optional_args_t *args, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): The container or object where the operation needs to happen. + args (IN/OUT): A pointer to the arguments struct. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+Each connector that requires connector-specific operations should compare the value of the op_type field of +the #H5VL_optional_args_t struct with the values returned from calling #H5VLregister_opt_operation to +determine how to handle the optional call and interpret the arguments passed in the struct. + +\subsubsection subsubsecVOLRefDsetclose dataset: close +The close callback in the dataset class terminates access to the dataset object and free all resources it was +consuming and returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*close)(void *dset, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + dset (IN): Pointer to the dataset object. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +\subsection subsecVOLRefDType Datatype Callbacks +The HDF5 datatype routines (\ref H5T) allow users to create and manage HDF5 datatypes. Those routines are +divided into two categories. One that operates on all types of datatypes but do not modify the contents of the +container (all in memory), and others that operate on named datatypes by accessing the container. When +a user creates an HDF5 datatype, it is still an object in memory space (transient datatype) that has not +been added to the HDF5 containers. Only when a user commits the HDF5 datatype, it becomes persistent +in the container. Those are called named/committed datatypes. The transient H5T routines should work +on named datatypes nevertheless. + +All the \ref H5T API routines that modify the HDF5 container map to one of the named datatype callback +routines in this class that the connector needs to implement. + +Structure for datatype callback routines, H5VLconnector.h +\code +typedef struct H5VL_datatype_class_t { + void *(*commit)(void *obj, const H5VL_loc_params_t *loc_params, const char *name, hid_t type_id, + hid_t lcpl_id, hid_t tcpl_id, hid_t tapl_id, hid_t dxpl_id, void **req); + void *(*open)(void *obj, const H5VL_loc_params_t *loc_params, const char *name, hid_t tapl_id, + hid_t dxpl_id, void **req); + herr_t (*get)(void *obj, H5VL_datatype_get_args_t *args, hid_t dxpl_id, void **req); + herr_t (*specific)(void *obj, H5VL_datatype_specific_args_t *args, hid_t dxpl_id, void **req); + herr_t (*optional)(void *obj, H5VL_optional_args_t *args, hid_t dxpl_id, void **req); + herr_t (*close)(void *dt, hid_t dxpl_id, void **req); +} H5VL_datatype_class_t; +\endcode + +\subsubsection subsubsecVOLRefDTypecommit datatype: commit +The commit callback in the named datatype class creates a datatype object in the container of the location +object and returns a pointer to the datatype structure containing information to access the datatype in +future calls. + + + + + + + + + + + + + +
Signature:
+\code + void *(*commit)(void *obj, H5VL_loc_params_t *loc_params, const char *name, hid_t type_id, hid_t lcpl_id, hid_t tcpl_id, hid_t tapl_id, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): Pointer to an object where the datatype needs to be committed or where the look-up of the target object needs to start. + loc_params (IN): Pointer to location parameters as explained in "Mapping the API to the Callbacks". + In this call, the location type is always H5VL_OBJECT_BY_SELF. + name (IN): The name of the datatype to be created. + typeid (IN): The transient datatype identifier to be committed. + lcpl_id (IN): The link creation property list. + tcpl_id (IN): The datatype creation property list. + tapl_id (IN): The datatype access property list. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +\subsubsection subsubsecVOLRefDTypeopen datatype: open +The open callback in the named datatype class opens a previously committed datatype object in the container +of the location object and returns a pointer to the datatype structure containing information to access the +datatype in future calls. + + + + + + + + + + + + + +
Signature:
+\code + void *(*open) (void *obj, H5VL_loc_params_t *loc_params, const char * name, hid_t tapl_id, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): Pointer to an object where the datatype needs to be opened or where the look-up + of the target object needs to start. + loc_params (IN): Pointer to location parameters as explained in "Mapping the API to the Callbacks". + In this call, the location type is always H5VL_OBJECT_BY_SELF. + name (IN): The name of the datatype to be opened. + tapl_id (IN): The datatype access property list. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +\subsubsection subsubsecVOLRefDTypeget datatype: get +The get callback in the named datatype class retrieves information about the named datatype as specified +in thegettypeparameter.It returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*get) (void *obj, H5VL_datatype_get_args_t *args, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): The named datatype to retrieve information from. + args (IN/OUT): A pointer to the arguments struct. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+\code +/* Values for datatype 'get' operation */ +typedef enum H5VL_datatype_get_t { + H5VL_DATATYPE_GET_BINARY_SIZE, /* Get size of serialized form of transient type */ + H5VL_DATATYPE_GET_BINARY, /* Get serialized form of transient type */ + H5VL_DATATYPE_GET_TCPL /* Datatype creation property list */ +} H5VL_datatype_get_t; + +/* Parameters for datatype 'get' operations */ +typedef struct H5VL_datatype_get_args_t { + H5VL_datatype_get_t op_type; /* Operation to perform */ + + /* Parameters for each operation */ + union { + /* H5VL_DATATYPE_GET_BINARY_SIZE */ + struct { + size_t *size; /* Size of serialized form of datatype (OUT) */ + } get_binary_size; + + /* H5VL_DATATYPE_GET_BINARY */ + struct { + void *buf; /* Buffer to store serialized form of datatype (OUT) */ + size_t buf_size; /* Size of serialized datatype buffer */ + } get_binary; + + /* H5VL_DATATYPE_GET_TCPL */ + struct { + hid_t tcpl_id; /* Named datatype creation property list ID (OUT) */ + } get_tcpl; + } args; +} H5VL_datatype_get_args_t; +\endcode + +\subsubsection subsubsecVOLRefDTypespec datatype: specific +The specific callback in the datatype class implements specific operations on HDF5 named datatypes as +specified in the specific_type parameter. It returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*specific)(void *obj, H5VL_loc_params_t *loc_params, H5VL_object_specific_args_t *args, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): The container or object where the operation needs to happen. + loc_params (IN): Pointer to location parameters as explained in "Mapping the API to the Callbacks". + args (IN/OUT): A pointer to the arguments struct. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+\code +/* Values for datatype 'specific' operation */ +typedef enum H5VL_datatype_specific_t { + H5VL_DATATYPE_FLUSH, /* H5Tflush */ + H5VL_DATATYPE_REFRESH /* H5Trefresh */ +} H5VL_datatype_specific_t; + +/* Parameters for datatype 'specific' operations */ +typedef struct H5VL_datatype_specific_args_t { + H5VL_datatype_specific_t op_type; /* Operation to perform */ + + /* Parameters for each operation */ + union { + /* H5VL_DATATYPE_FLUSH */ + struct { + hid_t type_id; /* Named datatype ID (IN) */ + } flush; + + /* H5VL_DATATYPE_REFRESH */ + struct { + hid_t type_id; /* Named datatype ID (IN) */ + } refresh; + } args; +} H5VL_datatype_specific_args_t; +\endcode + +\subsubsection subsubsecVOLRefDTypeopt datatype: optional +The optional callback in the datatype class implements connector specific operations on an HDF5 datatype. +It returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*optional)(void *obj, H5VL_optional_args_t *args, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): The container or object where the operation needs to happen. + args (IN/OUT): A pointer to the arguments struct. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +Each connector that requires connector-specific operations should compare the value of the op_type field of +the #H5VL_optional_args_t struct with the values returned from calling #H5VLregister_opt_operation to +determine how to handle the optional call and interpret the arguments passed in the struct. + +\subsubsection subsubsecVOLRefDTypeclose datatype: close +The close callback in the named datatype class terminates access to the datatype object and free all +resources it was consuming and returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*close) (void *dt, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + dt (IN): Pointer to the datatype object. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +\subsection subsecVOLRefFile File Callbacks +The file API routines (\ref H5F) allow HDF5 users to create and manage HDF5 containers. All the \ref H5F API +routines that modify the HDF5 container map to one of the file callback routines in his class that the +connector needs to implement. + +File class for file API routines, H5VLconnector.h +\code +typedef struct H5VL_file_class_t { + void *(*create)(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id, hid_t dxpl_id, + void **req); + void *(*open)(const char *name, unsigned flags, hid_t fapl_id, hid_t dxpl_id, void **req); + herr_t (*get)(void *obj, H5VL_file_get_args_t *args, hid_t dxpl_id, void **req); + herr_t (*specific)(void *obj, H5VL_file_specific_args_t *args, hid_t dxpl_id, void **req); + herr_t (*optional)(void *obj, H5VL_optional_args_t *args, hid_t dxpl_id, void **req); + herr_t (*close)(void *file, hid_t dxpl_id, void **req); +} H5VL_file_class_t; +\endcode + +\subsubsection subsubsecVOLRefFilecreate file: create +The create callback in the file class should create a container and returns a pointer to the file structure +created by the connector containing information to access the container in future calls. + + + + + + + + + + + + + +
Signature:
+\code + void *(*create)(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id, hid_tdxpl_id, void **req); +\endcode +
Arguments:
+\code + name (IN): The name of the container to be created. + flags (IN): The creation flags of the container. + fcpl_id (IN): The file creation property list. + fapl_id (IN): The file access property list. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +\subsubsection subsubsecVOLRefFileopen file: open +The open callback in the file class should open a container and returns a pointer to the file structure created +by the connector containing information to access the container in future calls. + + + + + + + + + + + + + +
Signature:
+\code + void *(*open)(const char *name, unsigned flags, hid_t fapl_id, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + name (IN): The name of the container to open. + flags (IN): The open flags of the container. + fapl_id (IN): The file access property list. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +\subsubsection subsubsecVOLRefFileget file: get +The get callback in the file class should retrieve information about the container as specified in the get_type +parameter. It returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*get)(void *obj, H5VL_file_get_args_t *args, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): The container or object where information needs to be retrieved from. + args (IN/OUT): A pointer to the arguments struct. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+\code +/* Info for H5VL_FILE_GET_CONT_INFO */ +typedef struct H5VL_file_cont_info_t { + unsigned version; /* version information (keep first) */ + uint64_t feature_flags; /* Container feature flags */ + /* (none currently defined) */ + size_t token_size; /* Size of tokens */ + size_t blob_id_size; /* Size of blob IDs */ +} H5VL_file_cont_info_t; + +/* Values for file 'get' operation */ +typedef enum H5VL_file_get_t { + H5VL_FILE_GET_CONT_INFO, /* file get container info */ + H5VL_FILE_GET_FAPL, /* file access property list */ + H5VL_FILE_GET_FCPL, /* file creation property list */ + H5VL_FILE_GET_FILENO, /* file number */ + H5VL_FILE_GET_INTENT, /* file intent */ + H5VL_FILE_GET_NAME, /* file name */ + H5VL_FILE_GET_OBJ_COUNT, /* object count in file */ + H5VL_FILE_GET_OBJ_IDS /* object ids in file */ +} H5VL_file_get_t; + +/* Parameters for file 'get_name' operation */ +typedef struct H5VL_file_get_name_args_t { + H5I_type_t type; /* ID type of object pointer */ + size_t buf_size; /* Size of file name buffer (IN) */ + char *buf; /* Buffer for file name (OUT) */ + size_t *file_name_len; /* Actual length of file name (OUT) */ +} H5VL_file_get_name_args_t; + +/* Parameters for file 'get_obj_ids' operation */ +typedef struct H5VL_file_get_obj_ids_args_t { + unsigned types; /* Type of objects to count */ + size_t max_objs; /* Size of array of object IDs */ + hid_t *oid_list; /* Array of object IDs (OUT) */ + size_t *count; /* # of objects (OUT) */ +} H5VL_file_get_obj_ids_args_t; + +/* Parameters for file 'get' operations */ +typedef struct H5VL_file_get_args_t { + H5VL_file_get_t op_type; /* Operation to perform */ + + /* Parameters for each operation */ + union { + /* H5VL_FILE_GET_CONT_INFO */ + struct { + H5VL_file_cont_info_t *info; /* Container info (OUT) */ + } get_cont_info; + + /* H5VL_FILE_GET_FAPL */ + struct { + hid_t fapl_id; /* File access property list (OUT) */ + } get_fapl; + + /* H5VL_FILE_GET_FCPL */ + struct { + hid_t fcpl_id; /* File creation property list (OUT) */ + } get_fcpl; + + /* H5VL_FILE_GET_FILENO */ + struct { + unsigned long *fileno; /* File "number" (OUT) */ + } get_fileno; + + /* H5VL_FILE_GET_INTENT */ + struct { + unsigned *flags; /* File open/create intent flags (OUT) */ + } get_intent; + + /* H5VL_FILE_GET_NAME */ + H5VL_file_get_name_args_t get_name; + + /* H5VL_FILE_GET_OBJ_COUNT */ + struct { + unsigned types; /* Type of objects to count */ + size_t *count; /* # of objects (OUT) */ + } get_obj_count; + + /* H5VL_FILE_GET_OBJ_IDS */ + H5VL_file_get_obj_ids_args_t get_obj_ids; + } args; +} H5VL_file_get_args_t; +\endcode + +\subsubsection subsubsecVOLRefFilespec file: specific +The specific callback in the file class implements specific operations on HDF5 files as specified in the +specific_type parameter. It returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*specific)(void *obj, H5VL_file_specific_args_t *args, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): The container or object where the operation needs to happen. + args (IN/OUT): A pointer to the arguments struct. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+\code +/* Values for file 'specific' operation */ +typedef enum H5VL_file_specific_t { + H5VL_FILE_FLUSH, /* Flush file */ + H5VL_FILE_REOPEN, /* Reopen the file */ + H5VL_FILE_IS_ACCESSIBLE, /* Check if a file is accessible */ + H5VL_FILE_DELETE, /* Delete a file */ + H5VL_FILE_IS_EQUAL /* Check if two files are the same */ +} H5VL_file_specific_t; + +/* Parameters for file 'specific' operations */ +typedef struct H5VL_file_specific_args_t { + H5VL_file_specific_t op_type; /* Operation to perform */ + + /* Parameters for each operation */ + union { + /* H5VL_FILE_FLUSH */ + struct { + H5I_type_t obj_type; /* Type of object to use */ + H5F_scope_t scope; /* Scope of flush operation */ + } flush; + + /* H5VL_FILE_REOPEN */ + struct { + void **file; /* File object for new file (OUT) */ + } reopen; + + /* H5VL_FILE_IS_ACCESSIBLE */ + struct { + const char *filename; /* Name of file to check */ + hid_t fapl_id; /* File access property list to use */ + hbool_t *accessible; /* Whether file is accessible with FAPL settings (OUT) */ + } is_accessible; + + /* H5VL_FILE_DELETE */ + struct { + const char *filename; /* Name of file to delete */ + hid_t fapl_id; /* File access property list to use */ + } del; + + /* H5VL_FILE_IS_EQUAL */ + struct { + void *obj2; /* Second file object to compare against */ + hbool_t *same_file; /* Whether files are the same (OUT) */ + } is_equal; + } args; +} H5VL_file_specific_args_t; +\endcode + +\subsubsection subsubsecVOLRefFileopt file: optional +The optional callback in the file class implements connector specific operations on an HDF5 container. It +returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*optional)(void *obj, H5VL_optional_args_t *args, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): The container or object where the operation needs to happen. + args (IN/OUT): A pointer to the arguments struct. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+Each connector that requires connector-specific operations should compare the value of the op_type field of +the #H5VL_optional_args_t struct with the values returned from calling #H5VLregister_opt_operation to +determine how to handle the optional call and interpret the arguments passed in the struct. + +\subsubsection subsubsecVOLRefFileclose file: close +The close callback in the file class should terminate access to the file object and free all resources it was +consuming, and returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*close)(void *file, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + file (IN): Pointer to the file. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +\subsection subsecVOLRefGrp Group Callbacks +The group API routines (\ref H5G) allow HDF5 users to create and manage HDF5 groups. All the \ref H5G API +routines that modify the HDF5 container map to one of the group callback routines in this class that the +connector needs to implement. + +Structure for group callback routines, H5VLconnector.h +\code +typedef struct H5VL_group_class_t { + void *(*create)(void *obj, const H5VL_loc_params_t *loc_params, const char *name, hid_t lcpl_id, + hid_t gcpl_id, hid_t gapl_id, hid_t dxpl_id, void **req); + void *(*open)(void *obj, const H5VL_loc_params_t *loc_params, const char *name, hid_t gapl_id, + hid_t dxpl_id, void **req); + herr_t (*get)(void *obj, H5VL_group_get_args_t *args, hid_t dxpl_id, void **req); + herr_t (*specific)(void *obj, H5VL_group_specific_args_t *args, hid_t dxpl_id, void **req); + herr_t (*optional)(void *obj, H5VL_optional_args_t *args, hid_t dxpl_id, void **req); + herr_t (*close)(void *grp, hid_t dxpl_id, void **req); +} H5VL_group_class_t; +\endcode + +\subsubsection subsubsecVOLRefGrpcreate group: create +The create callback in the group class creates a group object in the container of the location object and +returns a pointer to the group structure containing information to access the group in future calls. + + + + + + + + + + + + + +
Signature:
+\code + void *(*create)(void *obj, H5VL_loc_params_t *loc_params, const char *name, hid_t gcpl_id,hid_t gapl_id, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): Pointer to an object where the group needs to be created or where the look-up of + the target object needs to start. + loc_params (IN): Pointer to the location parameters as explained in "Mapping the API to the Callbacks". + The type can be only H5VL_OBJECT_BY_SELF in this callback. + name (IN): The name of the group to be created. + dcpl_id (IN): The group creation property list. It contains all the group creation properties in + addition to the link creation property list of the create operation (an hid_t) that can be + retrieved with the property H5VL_GRP_LCPL_ID. + gapl_id (IN): The group access property list. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +\subsubsection subsubsecVOLRefGrpopen group: open +The open callback in the group class opens a group object in the container of the location object and returns +a pointer to the group structure containing information to access the group in future calls. + + + + + + + + + + + + + +
Signature:
+\code + void *(*open)(void *obj, H5VL_loc_params_t *loc_params, const char *name, hid_t gapl_id, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): Pointer to an object where the group needs to be opened or where the look-up of the target object needs to start. + loc_params (IN): Pointer to the location parameters as explained in "Mapping the API to the Callbacks". + The type can be only H5VL_OBJECT_BY_SELF in this callback. + name (IN): The name of the group to be opened. + gapl_id (IN): The group access property list. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +\subsubsection subsubsecVOLRefGrpget group: get +The get callback in the group class retrieves information about the group as specified in the get_type +parameter. It returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*get)(void *obj, H5VL_group_get_args_t *args, hid_t dxpl_id, void **req) +\endcode +
Arguments:
+\code + obj (IN): The group object where information needs to be retrieved from. + args (IN/OUT): A pointer to the arguments struct. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+\code +/* Values for group 'get' operation */ +typedef enum H5VL_group_get_t { + H5VL_GROUP_GET_GCPL, /* group creation property list */ + H5VL_GROUP_GET_INFO /* group info */ +} H5VL_group_get_t; + +/* Parameters for group 'get_info' operation */ +typedef struct H5VL_group_get_info_args_t { + H5VL_loc_params_t loc_params; /* Location parameters for object access */ + H5G_info_t *ginfo; /* Group info (OUT) */ +} H5VL_group_get_info_args_t; + +/* Parameters for group 'get' operations */ +typedef struct H5VL_group_get_args_t { + H5VL_group_get_t op_type; /* Operation to perform */ + + /* Parameters for each operation */ + union { + /* H5VL_GROUP_GET_GCPL */ + struct { + hid_t gcpl_id; /* Group creation property list (OUT) */ + } get_gcpl; + + /* H5VL_GROUP_GET_INFO */ + H5VL_group_get_info_args_t get_info; /* Group info */ + } args; +} H5VL_group_get_args_t; +\endcode + +\subsubsection subsubsecVOLRefGrpspec group: specific +The specific callback in the group class implements specific operations on HDF5 groups as specified in the +specific_type parameter. It returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*specific)(void *obj, H5VL_loc_params_t *loc_params, H5VL_object_specific_args_t *args, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): The container or object where the operation needs to happen. + loc_params (IN): Pointer to the location parameters as explained in "Mapping the API to the Callbacks". + args (IN/OUT): A pointer to the arguments struct. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+\code +/* Values for group 'specific' operation */ +typedef enum H5VL_group_specific_t { + H5VL_GROUP_MOUNT, /* Mount a file on a group */ + H5VL_GROUP_UNMOUNT, /* Unmount a file on a group */ + H5VL_GROUP_FLUSH, /* H5Gflush */ + H5VL_GROUP_REFRESH /* H5Grefresh */ +} H5VL_group_specific_t; + +/* Parameters for group 'mount' operation */ +typedef struct H5VL_group_spec_mount_args_t { + const char *name; /* Name of location to mount child file */ + void *child_file; /* Pointer to child file object */ + hid_t fmpl_id; /* File mount property list to use */ +} H5VL_group_spec_mount_args_t; + +/* Parameters for group 'specific' operations */ +typedef struct H5VL_group_specific_args_t { + H5VL_group_specific_t op_type; /* Operation to perform */ + + /* Parameters for each operation */ + union { + /* H5VL_GROUP_MOUNT */ + H5VL_group_spec_mount_args_t mount; + + /* H5VL_GROUP_UNMOUNT */ + struct { + const char *name; /* Name of location to unmount child file */ + } unmount; + + /* H5VL_GROUP_FLUSH */ + struct { + hid_t grp_id; /* Group ID (IN) */ + } flush; + + /* H5VL_GROUP_REFRESH */ + struct { + hid_t grp_id; /* Group ID (IN) */ + } refresh; + } args; +} H5VL_group_specific_args_t; +\endcode + +\subsubsection subsubsecVOLRefGrpopt group: optional +The optional callback in the group class implements connector specific operations on an HDF5 group. It +returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*optional)(void *obj, H5VL_optional_args_t *args, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): The container or object where the operation needs to happen. + args (IN/OUT): A pointer to the arguments struct. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+Each connector that requires connector-specific operations should compare the value of the op_type field of +the #H5VL_optional_args_t struct with the values returned from calling #H5VLregister_opt_operation to +determine how to handle the optional call and interpret the arguments passed in the struct. + +\subsubsection subsubsecVOLRefGrpclose group: close +The close callback in the group class terminates access to the group object and frees all resources it was +consuming, and returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*close)(void *group, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + group (IN): Pointer to the group object. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +\subsection subsecVOLRefLink Link Callbacks +The link API routines (\ref H5L) allow HDF5 users to create and manage HDF5 links. All the \ref H5L API routines +that modify the HDF5 container map to one of the link callback routines in this class that the connector +needs to implement. + +Structure for link callback routines, H5VLconnector.h +\code +typedef struct H5VL_link_class_t { + herr_t (*create)(H5VL_link_create_args_t *args, void *obj, const H5VL_loc_params_t *loc_params, + hid_t lcpl_id, hid_t lapl_id, hid_t dxpl_id, void **req); + herr_t (*copy)(void *src_obj, const H5VL_loc_params_t *loc_params1, void *dst_obj, + const H5VL_loc_params_t *loc_params2, hid_t lcpl_id, hid_t lapl_id, hid_t dxpl_id, + void **req); + herr_t (*move)(void *src_obj, const H5VL_loc_params_t *loc_params1, void *dst_obj, + const H5VL_loc_params_t *loc_params2, hid_t lcpl_id, hid_t lapl_id, hid_t dxpl_id, + void **req); + herr_t (*get)(void *obj, const H5VL_loc_params_t *loc_params, H5VL_link_get_args_t *args, hid_t dxpl_id, + void **req); + herr_t (*specific)(void *obj, const H5VL_loc_params_t *loc_params, H5VL_link_specific_args_t *args, + hid_t dxpl_id, void **req); + herr_t (*optional)(void *obj, const H5VL_loc_params_t *loc_params, H5VL_optional_args_t *args, + hid_t dxpl_id, void **req); +} H5VL_link_class_t; +\endcode + +\subsubsection subsubsecVOLRefLinkcreate link: create +The create callback in the group class creates a hard, soft, external, or user-defined link in the container. +It returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*create)(H5VL_link_create_args_t *args, void *obj, H5VL_loc_params_t *loc_params, hid_t lcpl_id, hid_t lapl_id, hid_t dxpl_id, void **req) +\endcode +
Arguments:
+\code + args (IN/OUT): A pointer to the arguments struct. + obj (IN): Pointer to an object where the link needs to be created from. + loc_params (IN): Pointer to the location parameters as explained in "Mapping the API to the Callbacks" for the source object. + lcplid (IN): The link creation property list. It contains all the link creation properties in + addition to other API parameters depending on the creation type, which will be detailed next. + laplid (IN): The link access property list. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+\code +/* Link create types for VOL */ +typedef enum H5VL_link_create_t { + H5VL_LINK_CREATE_HARD, + H5VL_LINK_CREATE_SOFT, + H5VL_LINK_CREATE_UD +} H5VL_link_create_t; + +/* Parameters for link 'create' operations */ +typedef struct H5VL_link_create_args_t { + H5VL_link_create_t op_type; /* Operation to perform */ + + /* Parameters for each operation */ + union { + /* H5VL_LINK_CREATE_HARD */ + struct { + void *curr_obj; /* Current object */ + H5VL_loc_params_t curr_loc_params; /* Location parameters for current object */ + } hard; + + /* H5VL_LINK_CREATE_SOFT */ + struct { + const char *target; /* Target of soft link */ + } soft; + + /* H5VL_LINK_CREATE_UD */ + struct { + H5L_type_t type; /* Type of link to create */ + const void *buf; /* Buffer that contains link info */ + size_t buf_size; /* Size of link info buffer */ + } ud; + } args; +} H5VL_link_create_args_t; +\endcode + +\subsubsection subsubsecVOLRefLinkcopy link: copy +The copy callback in the link class copies a link within the HDF5 container. It returns an herr_t indicating +success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*copy)(void *src_obj, H5VL_loc_params_t *loc_params1, void *dst_obj, H5VL_loc_params_t *loc_params2, hid_t lcpl_id, hid_t lapl_id, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + src_obj (IN): original/source object or file. + loc_params1 (IN): Pointer to the location parameters for the source object as explained in "Mapping the API to the Callbacks". + The type can be only H5VL_OBJECT_BY_NAME in this callback. + dst_obj (IN): destination object or file. + loc_params2 (IN): Pointer to the location parameters for the destination object as explained in "Mapping the API to the Callbacks". + The type can be only H5VL_OBJECT_BY_NAME in this callback. + lcpl_id (IN): The link creation property list. + lapl_id (IN): The link access property list. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +\subsubsection subsubsecVOLRefLinkmove link: move +The move callback in the link class moves a link within the HDF5 container. It returns an herr_t indicating +success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*move)(void *src_obj, H5VL_loc_params_t *loc_params1, void *dst_obj, H5VL_loc_params_t *loc_params2, hid_t lcpl_id, hid_t lapl_id, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + src_obj (IN): original/source object or file. + loc_params1 (IN): Pointer to the location parameters for the source object as explained in "Mapping the API to the Callbacks". + The type can be only H5VL_OBJECT_BY_NAME in this callback. + dst_obj (IN): destination object or file. + loc_params2 (IN): Pointer to the location parameters for the destination object as explained in "Mapping the API to the Callbacks". + The type can be only H5VL_OBJECT_BY_NAME in this callback. + lcpl_id (IN): The link creation property list. + lapl_id (IN): The link access property list. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +\subsubsection subsubsecVOLRefLinkget link: get +The get callback in the link class retrieves information about links as specified in the get_type parameter. +It returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*get)(void *obj, H5VL_loc_params_t *loc_params, H5VL_link_get_args_t *args, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): The file or group object where information needs to be retrieved from. + loc_params (IN): Pointer to the location parameters for the destination object as explained in "Mapping the API to the Callbacks". + The type can be only H5VL_OBJECT_BY_NAME or H5VL_OBJECT_BY_IDX in this callback. + args (IN/OUT): A pointer to the arguments struct. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+\code +/* Values for link 'get' operation */ +typedef enum H5VL_link_get_t { + H5VL_LINK_GET_INFO, /* link info */ + H5VL_LINK_GET_NAME, /* link name */ + H5VL_LINK_GET_VAL /* link value */ +} H5VL_link_get_t; + +/* Parameters for link 'get' operations */ +typedef struct H5VL_link_get_args_t { + H5VL_link_get_t op_type; /* Operation to perform */ + + /* Parameters for each operation */ + union { + /* H5VL_LINK_GET_INFO */ + struct { + H5L_info2_t *linfo; /* Pointer to link's info (OUT) */ + } get_info; + + /* H5VL_LINK_GET_NAME */ + struct { + size_t name_size; /* Size of link name buffer (IN) */ + char *name; /* Buffer for link name (OUT) */ + size_t *name_len; /* Actual length of link name (OUT) */ + } get_name; + + /* H5VL_LINK_GET_VAL */ + struct { + size_t buf_size; /* Size of link value buffer (IN) */ + void *buf; /* Buffer for link value (OUT) */ + } get_val; + } args; +} H5VL_link_get_args_t; +\endcode + +\subsubsection subsubsecVOLRefLinkspec link: specific +The specific callback in the link class implements specific operations on HDF5 links as specified in +the specific_type parameter. It returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*specific)(void *obj, H5VL_loc_params_t *loc_params, H5VL_link_specific_args_t *args, hid_t dxpl_id, void **req) +\endcode +
Arguments:
+\code + obj (IN): The location object where operation needs to happen. + loc_params (IN): Pointer to the location parameters for the destination object as explained in "Mapping the API to the Callbacks". + args (IN/OUT): A pointer to the arguments struct. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+\code +/* Values for link 'specific' operation */ +typedef enum H5VL_link_specific_t { + H5VL_LINK_DELETE, /* H5Ldelete(_by_idx) */ + H5VL_LINK_EXISTS, /* link existence */ + H5VL_LINK_ITER /* H5Literate/visit(_by_name) */ +} H5VL_link_specific_t; + +/* Parameters for link 'iterate' operation */ +typedef struct H5VL_link_iterate_args_t { + hbool_t recursive; /* Whether iteration is recursive */ + H5_index_t idx_type; /* Type of index to iterate over */ + H5_iter_order_t order; /* Order of index iteration */ + hsize_t *idx_p; /* Start/stop iteration index (OUT) */ + H5L_iterate2_t op; /* Iteration callback function */ + void *op_data; /* Iteration callback context */ +} H5VL_link_iterate_args_t; + +/* Parameters for link 'specific' operations */ +typedef struct H5VL_link_specific_args_t { + H5VL_link_specific_t op_type; /* Operation to perform */ + + /* Parameters for each operation */ + union { + /* H5VL_LINK_DELETE */ + /* No args */ + + /* H5VL_LINK_EXISTS */ + struct { + hbool_t *exists; /* Whether link exists (OUT) */ + } exists; + + /* H5VL_LINK_ITER */ + H5VL_link_iterate_args_t iterate; + } args; +} H5VL_link_specific_args_t; +\endcode + +\subsubsection subsubsecVOLRefLinkopt link: optional +The optional callback in the link class implements connector specific operations on an HDF5 link. It returns +an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*optional)(void *obj, const H5VL_loc_params_t *loc_params, H5VL_optional_args_t *args, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): The container or object where operation needs to happen. + loc_params (IN): Pointer to the location parameters for the destination object as explained in "Mapping the API to the Callbacks". + args (IN/OUT): A pointer to the arguments struct. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+Each connector that requires connector-specific operations should compare the value of the op_type field of +the #H5VL_optional_args_t struct with the values returned from calling #H5VLregister_opt_operation to +determine how to handle the optional call and interpret the arguments passed in the struct. + +\subsection subsecVOLRefObj Object Callbacks +The object API routines (\ref H5O) allow HDF5 users to manage HDF5 group, dataset, and named datatype +objects. All the \ref H5O API routines that modify the HDF5 container map to one of the object callback +routines in this class that the connector needs to implement. + +Structure for object callback routines, H5VLconnector.h +\code +typedef struct H5VL_object_class_t { + void *(*open)(void *obj, const H5VL_loc_params_t *loc_params, H5I_type_t *opened_type, hid_t dxpl_id, + void **req); + herr_t (*copy)(void *src_obj, const H5VL_loc_params_t *loc_params1, const char *src_name, void *dst_obj, + const H5VL_loc_params_t *loc_params2, const char *dst_name, hid_t ocpypl_id, hid_t lcpl_id, + hid_t dxpl_id, void **req); + herr_t (*get)(void *obj, const H5VL_loc_params_t *loc_params, H5VL_object_get_args_t *args, hid_t dxpl_id, + void **req); + herr_t (*specific)(void *obj, const H5VL_loc_params_t *loc_params, H5VL_object_specific_args_t *args, + hid_t dxpl_id, void **req); + herr_t (*optional)(void *obj, const H5VL_loc_params_t *loc_params, H5VL_optional_args_t *args, + hid_t dxpl_id, void **req); +} H5VL_object_class_t; +\endcode + +\subsubsection subsubsecVOLRefObjopen object: open +The open callback in the object class opens the object in the container of the location object and returns a +pointer to the object structure containing information to access the object in future calls. + + + + + + + + + + + + + +
Signature:
+\code + void *(*open)(void *obj, H5VL_loc_params_t *loc_params, H5I_type_t *opened_type, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): The container or object where operation needs to happen. + loc_params (IN): Pointer to the location parameters for the destination object as explained in "Mapping the API to the Callbacks". + opened_type (OUT): Buffer to return the type of the object opened (H5I_GROUP or H5I_DATASET or H5I_DATATYPE). + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +\subsubsection subsubsecVOLRefObjcopy object: copy +The copy callback in the object class copies the object from the source object to the destination object. It +returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*copy)(void *src_obj, H5VL_loc_params_t *loc_params1, const char *src_name, void *dst_obj, H5VL_loc_params_t *loc_params2, const char *dst_name, hid_t ocpypl_id, hid_t lcpl_id, hid_t dxpl_id, void **req) +\endcode +
Arguments:
+\code + src_obj (IN): Pointer to location of the source object to be copied. + loc_params1 (IN): Pointer to the location parameters for the source object as explained in "Mapping the API to the Callbacks". + The type can be only H5VL_OBJECT_BY_SELF in this callback. + src_name (IN): Name of the source object to be copied. + dst_obj (IN): Pointer to location of the destination object or file. + loc_params2 (IN): Pointer to the location parameters for the destination object as explained in "Mapping the API to the Callbacks". + The type can be only H5VL_OBJECT_BY_SELF in this callback. + dst_name (IN): Name o be assigned to the new copy. + ocpypl_id (IN): The object copy property list. + lcpl_id (IN): The link creation property list. + dxpl_id (IN): The data transfer property list. + req (IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +\subsubsection subsubsecVOLRefObjget object: get +The get callback in the object class retrieves information about the object as specified in +the get_type parameter.It returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*get)(void *obj, H5VL_loc_params_t *loc_params, H5VL_object_get_args_t *args, hid_t dxpl_id, void **req) +\endcode +
Arguments:
+\code + obj (IN): A location object where information needs to be retrieved from. + loc_params (IN): Pointer to the location parameters for the destination object as explained in "Mapping the API to the Callbacks". + args (IN/OUT): A pointer to the arguments struct. + dxpl_id (IN): The data transfer property list. + req IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+\code +/* Values for object 'get' operation */ +typedef enum H5VL_object_get_t { + H5VL_OBJECT_GET_FILE, /* object file */ + H5VL_OBJECT_GET_NAME, /* object name */ + H5VL_OBJECT_GET_TYPE, /* object type */ + H5VL_OBJECT_GET_INFO /* H5Oget_info(_by_idx|name) */ +} H5VL_object_get_t; + +/* Parameters for object 'get' operations */ +typedef struct H5VL_object_get_args_t { + H5VL_object_get_t op_type; /* Operation to perform */ + + /* Parameters for each operation */ + union { + /* H5VL_OBJECT_GET_FILE */ + struct { + void **file; /* File object (OUT) */ + } get_file; + + /* H5VL_OBJECT_GET_NAME */ + struct { + size_t buf_size; /* Size of name buffer (IN) */ + char *buf; /* Buffer for name (OUT) */ + size_t *name_len; /* Actual length of name (OUT) */ + } get_name; + + /* H5VL_OBJECT_GET_TYPE */ + struct { + H5O_type_t *obj_type; /* Type of object (OUT) */ + } get_type; + + /* H5VL_OBJECT_GET_INFO */ + struct { + unsigned fields; /* Flags for fields to retrieve */ + H5O_info2_t *oinfo; /* Pointer to object info (OUT) */ + } get_info; + } args; +} H5VL_object_get_args_t; +\endcode + +\subsubsection subsubsecVOLRefObjspec object: specific +The specific callback in the object class implements specific operations on HDF5 objects as specified in +the specific_type parameter. It returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*specific)(void *obj, H5VL_loc_params_t *loc_params, H5VL_object_specific_args_t *args, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): A location object where he operation needs to happen. + loc_params (IN): Pointer to the location parameters for the destination object as explained in "Mapping the API to the Callbacks". + args (IN/OUT): A pointer to the arguments struct. + dxpl_id (IN): The data transfer property list. + req IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+\code +/* Values for object 'specific' operation */ +typedef enum H5VL_object_specific_t { + H5VL_OBJECT_CHANGE_REF_COUNT, /* H5Oincr/decr_refcount */ + H5VL_OBJECT_EXISTS, /* H5Oexists_by_name */ + H5VL_OBJECT_LOOKUP, /* Lookup object */ + H5VL_OBJECT_VISIT, /* H5Ovisit(_by_name) */ + H5VL_OBJECT_FLUSH, /* H5{D|G|O|T}flush */ + H5VL_OBJECT_REFRESH /* H5{D|G|O|T}refresh */ +} H5VL_object_specific_t; + +/* Parameters for object 'visit' operation */ +typedef struct H5VL_object_visit_args_t { + H5_index_t idx_type; /* Type of index to iterate over */ + H5_iter_order_t order; /* Order of index iteration */ + unsigned fields; /* Flags for fields to provide in 'info' object for 'op' callback */ + H5O_iterate2_t op; /* Iteration callback function */ + void *op_data; /* Iteration callback context */ +} H5VL_object_visit_args_t; + +/* Parameters for object 'specific' operations */ +typedef struct H5VL_object_specific_args_t { + H5VL_object_specific_t op_type; /* Operation to perform */ + + /* Parameters for each operation */ + union { + /* H5VL_OBJECT_CHANGE_REF_COUNT */ + struct { + int delta; /* Amount to modify object's refcount */ + } change_rc; + + /* H5VL_OBJECT_EXISTS */ + struct { + hbool_t *exists; /* Whether object exists (OUT) */ + } exists; + + /* H5VL_OBJECT_LOOKUP */ + struct { + H5O_token_t *token_ptr; /* Pointer to token for lookup (OUT) */ + } lookup; + + /* H5VL_OBJECT_VISIT */ + H5VL_object_visit_args_t visit; + + /* H5VL_OBJECT_FLUSH */ + struct { + hid_t obj_id; /* Object ID (IN) */ + } flush; + + /* H5VL_OBJECT_REFRESH */ + struct { + hid_t obj_id; /* Object ID (IN) */ + } refresh; + } args; +} H5VL_object_specific_args_t; +\endcode + +\subsubsection subsubsecVOLRefObjopt object: optional +The optional callback in the object class implements connector specific operations on an HDF5 object. It +returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*optional)(void *obj, const H5VL_loc_params_t *loc_params, H5VL_optional_args_t *args, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): A container or object where he operation needs to happen. + loc_params (IN): Pointer to the location parameters for the destination object as explained in "Mapping the API to the Callbacks". + args (IN/OUT): A pointer to the arguments struct. + dxpl_id (IN): The data transfer property list. + req IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+Each connector that requires connector-specific operations should compare the value of the op_type field of +the #H5VL_optional_args_t struct with the values returned from calling #H5VLregister_opt_operation to +determine how to handle the optional call and interpret the arguments passed in the struct. + +\subsection subsecVOLRefIntrospect Introspection Callbacks +Structure for VOL connector introspection callback routines, H5VLconnector.h +\code +typedef struct H5VL_introspect_class_t { + herr_t (*get_conn_cls)(void *obj, H5VL_get_conn_lvl_t lvl, const struct H5VL_class_t **conn_cls); + herr_t (*get_cap_flags)(const void *info, uint64_t *cap_flags); + herr_t (*opt_query)(void *obj, H5VL_subclass_t cls, int opt_type, uint64_t *flags); +} H5VL_introspect_class_t; +\endcode + +\subsubsection subsubsecVOLRefIntrospectcls introspect: get_conn_cls +Get a connector's #H5VL_class_t struct. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*get_conn_cls)(void *obj, H5VL_get_conn_lvl_t lvl, const struct H5VL_class_t **conn_cls); +\endcode +
Arguments:
+\code + obj (IN): The VOL object. + lvl (IN): Current or terminal connector. + cls (OUT): A const pointer to the connector. +\endcode +
+The "lvl" argument is an enum: +\code +/* "Levels" for 'get connector class' introspection callback */ +typedef enum H5VL_get_conn_lvl_t { + H5VL_GET_CONN_LVL_CURR, /* Get "current" connector (for this object) */ + H5VL_GET_CONN_LVL_TERM /* Get "terminal" connector (for this object) */ + /* (Recursively called, for pass-through connectors) */ + /* (Connectors that "split" must choose which connector to return) */ +} H5VL_get_conn_lvl_t; +\endcode + +\subsubsection subsubsecVOLRefIntrospecflags introspect: get_cap_flags +Get a connector's capability flags. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*get_cap_flags)(const void *info, unsigned *cap_flags) +\endcode +
Arguments:
+\code + info (IN): A const pointer to pertinent VOL info. + cap_flags (OUT): A pointer to capability flags. +\endcode +
+ +\subsubsection subsubsecVOLRefIntrospecquery introspect: opt_query +Query a class for a capability or functionality. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*opt_query)(void *obj, H5VL_subclass_t cls, int opt_type, hbool_t *supported); +\endcode +
Arguments:
+\code + obj (IN): The VOL object. + cls (IN): The VOL 'class' to query. + opt_type (IN): The specific option to query. + supported (OUT): Whether the operation is supported or not. +\endcode +
+The "cls" argument is an enum: +\code +// Enum type for each VOL subclass +// (Used for various queries, etc) +typedef enum H5VL_subclass_t { + H5VL_SUBCLS_NONE, // Operations outside of a subclass + H5VL_SUBCLS_INFO, // 'Info' subclass + H5VL_SUBCLS_WRAP, // 'Wrap' subclass + H5VL_SUBCLS_ATTR, // 'Attribute' subclass + H5VL_SUBCLS_DATASET, // 'Dataset' subclass + H5VL_SUBCLS_DATATYPE, // 'Named datatype' subclass + H5VL_SUBCLS_FILE, // 'File' subclass + H5VL_SUBCLS_GROUP, // 'Group' subclass + H5VL_SUBCLS_LINK, // 'Link' subclass + H5VL_SUBCLS_OBJECT, // 'Object' subclass + H5VL_SUBCLS_REQUEST, // 'Request' subclass + H5VL_SUBCLS_BLOB, // 'Blob' subclass + H5VL_SUBCLS_TOKEN // 'Token' subclass +} H5VL_subclass_t; +\endcode + +\subsection subsecVOLRefReq Request (Async) Callbacks +Structure for async request callback routines, H5VLconnector.h +\code +typedef struct H5VL_request_class_t { + herr_t (*wait)(void *req, uint64_t timeout, H5VL_request_status_t *status); + herr_t (*notify)(void *req, H5VL_request_notify_t cb, void *ctx); + herr_t (*cancel)(void *req, H5VL_request_status_t *status); + herr_t (*specific)(void *req, H5VL_request_specific_args_t *args); + herr_t (*optional)(void *req, H5VL_optional_args_t *args); + herr_t (*free)(void *req); +} H5VL_request_class_t; +\endcode + +\subsubsection subsubsecVOLRefReqwait request: wait +Wait (with a timeout) for an async operation to complete. Releases the request if the operation has completed +and the connector callback succeeds. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*wait)(void *req, uint64_t timeout, H5VL_request_status_t *status); +\endcode +
Arguments:
+\code + req (IN): The async request on which to wait. + timeout (IN): The timeout value. + status (IN): The status. +\endcode +
+The "status" argument is an enum: +\code +/* Status values for async request operations */ +typedef enum H5VL_request_status_t { + H5VL_REQUEST_STATUS_IN_PROGRESS, /* Operation has not yet completed */ + H5VL_REQUEST_STATUS_SUCCEED, /* Operation has completed, successfully */ + H5VL_REQUEST_STATUS_FAIL, /* Operation has completed, but failed */ + H5VL_REQUEST_STATUS_CANT_CANCEL, /* An attempt to cancel this operation was made, but it */ + /* can't be canceled immediately. The operation has */ + /* not completed successfully or failed, and is not yet */ + /* in progress. Another attempt to cancel it may be */ + /* attempted and may (or may not) succeed. */ + H5VL_REQUEST_STATUS_CANCELED /* Operation has not completed and was canceled */ +} H5VL_request_status_t; +\endcode + +\subsubsection subsubsecVOLRefReqnotify request: notify +Registers a user callback to be invoked when an asynchronous operation completes. Releases the request if +connector callback succeeds. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*notify)(void *req, H5VL_request_notify_t cb, void *ctx); +\endcode +
Arguments:
+\code + req (IN): The async request that will receive the notify callback. + cb (IN): The notify callback for the request. + ctx (IN): The request's context. +\endcode +
+The "cb" argument is an enum: +\code + typedef herr_t (*H5VL_request_notify_t)(void *ctx, H5ES_status_t status) +\endcode + +\subsubsection subsubsecVOLRefReqcancel request: cancel + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*cancel)(void *req, H5VL_request_status_t *status); +\endcode +
Arguments:
+\code + req (IN): The async request to be cancelled. + status (IN): The status. +\endcode +
+The "status" argument is an enum: +\code +/* Status values for async request operations */ +typedef enum H5VL_request_status_t { + H5VL_REQUEST_STATUS_IN_PROGRESS, /* Operation has not yet completed */ + H5VL_REQUEST_STATUS_SUCCEED, /* Operation has completed, successfully */ + H5VL_REQUEST_STATUS_FAIL, /* Operation has completed, but failed */ + H5VL_REQUEST_STATUS_CANT_CANCEL, /* An attempt to cancel this operation was made, but it */ + /* can't be canceled immediately. The operation has */ + /* not completed successfully or failed, and is not yet */ + /* in progress. Another attempt to cancel it may be */ + /* attempted and may (or may not) succeed. */ + H5VL_REQUEST_STATUS_CANCELED /* Operation has not completed and was canceled */ +} H5VL_request_status_t; +\endcode + +\subsubsection subsubsecVOLRefReqspec request: specific +Perform a specific operation on an asynchronous request. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*specific)(void *req, H5VL_request_specific_args_t *args); +\endcode +
Arguments:
+\code + req (IN): The async request on which to perform the operation. + args (IN/OUT): A pointer to the arguments struct. +\endcode +
+\code +/* Values for async request 'specific' operation */ +typedef enum H5VL_request_specific_t { + H5VL_REQUEST_GET_ERR_STACK, /* Retrieve error stack for failed operation */ + H5VL_REQUEST_GET_EXEC_TIME /* Retrieve execution time for operation */ +} H5VL_request_specific_t; + +/* Parameters for request 'specific' operations */ +typedef struct H5VL_request_specific_args_t { + H5VL_request_specific_t op_type; /* Operation to perform */ + + /* Parameters for each operation */ + union { + /* H5VL_REQUEST_GET_ERR_STACK */ + struct { + hid_t err_stack_id; /* Error stack ID for operation (OUT) */ + } get_err_stack; + + /* H5VL_REQUEST_GET_EXEC_TIME */ + struct { + uint64_t *exec_ts; /* Timestamp for start of task execution (OUT) */ + uint64_t *exec_time; /* Duration of task execution (in ns) (OUT) */ + } get_exec_time; + } args; +} H5VL_request_specific_args_t; +\endcode + +\subsubsection subsubsecVOLRefReqopt request: optional +Perform a connector-specific operation for a request. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*optional)(void *req, H5VL_optional_args_t *args); +\endcode +
Arguments:
+\code + req (IN): The async request on which to perform the operation. + args (IN/OUT): A pointer to the arguments struct. +\endcode +
+Each connector that requires connector-specific operations should compare the value of the op_type field of +the #H5VL_optional_args_t struct with the values returned from calling #H5VLregister_opt_operation to +determine how to handle the optional call and interpret the arguments passed in the struct. + +\subsubsection subsubsecVOLRefReqfree request: free +Frees an asynchronous request. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*free)(void *req); +\endcode +
Arguments:
+\code + req (IN): The async request to be freed. +\endcode +
+ +\subsection subsecVOLRefBlob Blob Callbacks +Structure for blob callback routines, H5VLconnector.h +\code +typedef struct H5VL_blob_class_t { + herr_t (*put)(void *obj, const void *buf, size_t size, void *blob_id, void *ctx); + herr_t (*get)(void *obj, const void *blob_id, void *buf, size_t size, void *ctx); + herr_t (*specific)(void *obj, void *blob_id, H5VL_blob_specific_args_t *args); + herr_t (*optional)(void *obj, void *blob_id, H5VL_optional_args_t *args); +} H5VL_blob_class_t; +\endcode + +\subsubsection subsubsecVOLRefBlobput blob: put +Put a blob through the VOL. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*put)(void *obj, const void *buf, size_t size, void *blob_id, void *ctx); +\endcode +
Arguments:
+\code + obj (IN): Pointer to the blob container. + buf (IN): Pointer to the blob. + size (IN): Size of the blob. + blob_id (OUT): Pointer to the blob's connector-specific ID. + ctx (IN): Connector-specific blob context. +\endcode +
+ +\subsubsection subsubsecVOLRefBlobget blob: get +Get a blob through the VOL. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*get)(void *obj, const void *blob_id, void *buf, size_t size, void *ctx); +\endcode +
Arguments:
+\code + obj (IN): Pointer to the blob container. + blob_id (IN): Pointer to the blob's connector-specific ID. + buf (IN/OUT): Pointer to the blob. + size (IN): Size of the blob. + ctx (IN): Connector-specific blob context. +\endcode +
+ +\subsubsection subsubsecVOLRefBlobspec blob: specific +Perform a defined operation on a blob via the VOL. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*specific)(void *obj, void *blob_id, H5VL_blob_specific_args_t *args); +\endcode +
Arguments:
+\code + obj (IN): Pointer to the blob container. + blob_id (IN): Pointer to the blob's connector-specific ID. + args (IN/OUT): A pointer to the arguments struct. +\endcode +
+\code +/* Values for 'blob' 'specific' operation */ +typedef enum H5VL_blob_specific_t { + H5VL_BLOB_DELETE, /* Delete a blob (by ID) */ + H5VL_BLOB_ISNULL, /* Check if a blob ID is "null" */ + H5VL_BLOB_SETNULL /* Set a blob ID to the connector's "null" blob ID value */ +} H5VL_blob_specific_t; + +/* Parameters for blob 'specific' operations */ +typedef struct H5VL_blob_specific_args_t { + H5VL_blob_specific_t op_type; /* Operation to perform */ + + /* Parameters for each operation */ + union { + /* H5VL_BLOB_DELETE */ + /* No args */ + + /* H5VL_BLOB_ISNULL */ + struct { + hbool_t *isnull; /* Whether blob ID is "null" (OUT) */ + } is_null; + + /* H5VL_BLOB_SETNULL */ + /* No args */ + } args; +} H5VL_blob_specific_args_t; +\endcode + +\subsubsection subsubsecVOLRefBlobopt blob: optional +Perform a connector-specific operation on a blob via the VOL + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*optional)(void *obj, void *blob_id, H5VL_optional_args_t *args); +\endcode +
Arguments:
+\code + obj (IN): Pointer to the blob container. + blob_id (IN): Pointer to the blob's connector-specific ID. + args (IN/OUT): A pointer to the arguments struct. +\endcode +
+Each connector that requires connector-specific operations should compare the value of the op_type field of +the #H5VL_optional_args_t struct with the values returned from calling #H5VLregister_opt_operation to +determine how to handle the optional call and interpret the arguments passed in the struct + +\subsection subsecVOLRefToken Token Callbacks +Structure for token callback routines, H5VLconnector.h +\code +typedef struct H5VL_token_class_t { + herr_t (*cmp)(void *obj, const H5O_token_t *token1, const H5O_token_t *token2, int *cmp_value); + herr_t (*to_str)(void *obj, H5I_type_t obj_type, const H5O_token_t *token, char **token_str); + herr_t (*from_str)(void *obj, H5I_type_t obj_type, const char *token_str, H5O_token_t *token); +} H5VL_token_class_t; +\endcode + +\subsubsection subsubsecVOLRefTokencmp token: cmp +Compares two tokens and outputs a value like strcmp. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*cmp)(void *obj, const H5O_token_t *token1, const H5O_token_t *token2, int *cmp_value); +\endcode +
Arguments:
+\code + obj (IN): The underlying VOL object. + token1 (IN): The first token to compare. + token2 (IN): The second token to compare. + cmp_value (OUT): A value like strcmp. +\endcode +
+ +\subsubsection subsubsecVOLRefTokento token: to_str +Converts a token to a string representation. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*to_str)(void *obj, H5I_type_t obj_type, const H5O_token_t *token, char **token_str) +\endcode +
Arguments:
+\code + obj (IN): The underlying VOL object. + obj_type (IN): The type of the object. + token (IN): The token to turn into a string representation. + token_str (OUT): The string representation of the token. +\endcode +
+The "obj_type" argument is an enum: (from H5Ipublic.h) +\snippet H5Ipublic.h H5I_type_t_snip +The only values which should be used for this call are: +\li #H5I_GROUP +\li #H5I_DATATYPE +\li #H5I_DATASET +\li #H5I_MAP + +as these are the only objects for which tokens are valid. + +\subsubsection subsubsecVOLRefTokenfrom token: from_str +Converts a string representation of a token to a token. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*from_str)(void *obj, H5I_type_t obj_type, const char *token_str, H5O_token_t *token); +\endcode +
Arguments:
+\code + obj (IN): The underlying VOL object. + obj_type (IN): The type of the object. + token_str (IN): The string representation of the token. + token (OUT): The token reated from the string representation. +\endcode +
+The "obj_type" argument is an enum: (from H5Ipublic.h) +\snippet H5Ipublic.h H5I_type_t_snip +The only values which should be used for this call are: +\li #H5I_GROUP +\li #H5I_DATATYPE +\li #H5I_DATASET +\li #H5I_MAP + +as these are the only objects for which tokens are valid. + +\subsection subsecVOLRefOpt Optional Generic Callback +A generic optional callback is provided for services that are specific to a connector. +The optional callback has the following definition. It returns an herr_t indicating success or failure. + + + + + + + + + + + + + +
Signature:
+\code + herr_t (*optional)(void *obj, H5VL_optional_args_t *args, hid_t dxpl_id, void **req); +\endcode +
Arguments:
+\code + obj (IN): A container or object where he operation needs to happen. + args (IN/OUT): A pointer to the arguments struct. + dxpl_id (IN): The data transfer property list. + req IN/OUT): A pointer to the asynchronous request of the operation created by the connector. +\endcode +
+ +\section secVOLNew New VOL API Routines +API routines have been added to the HDF5 library to manage VOL connectors. This section details each +new API call and explains its intended usage. Additionally, a set of API calls that map directly to the VOL +callbacks themselves have been added to aid in the development of passthrough connectors which can be +stacked and/or split. A list of these API calls is given in an appendix. + +\subsection subsecVOLNewPub H5VLpublic.h +The API calls in this header are for VOL management and general use (i.e., not limited to VOL connector authors). + +\subsubsection subsubsecVOLNewPubregname H5VLregister_connector_by_name + + + + + + + + + + + + + +
Signature:
+\code + hid_t H5VLregister_connector_by_name(const char *connector_name, hid_t vipl_id); +\endcode +
Arguments:
+\code + connector_name (IN): The connector name to search for and register. + vipl_id (IN): An ID for a VOL initialization property list (vipl). +\endcode +
+Registers a VOL connector with the HDF5 library given the name of the connector and returns an identifier +for it (#H5I_INVALID_HID on errors). If the connector is already registered, the library will create a new +identifier for it and returns it to the user; otherwise the library will search the plugin path for a connector +of that name, loading and registering it, returning an ID for it, if found. See the \ref H5VL_UG for more +information on loading plugins and the search paths. + +\subsubsection subsubsecVOLNewPubregval H5VLregister_connector_by_value + + + + + + + + + + + + + +
Signature:
+\code + hid_t H5VLregister_connector_by_value(H5VL_class_value_t connector_value, hid_t vipl_id); +\endcode +
Arguments:
+\code + connector_value (IN): The connector value to search for and register. + vipl_id (IN): An ID for a VOL initialization property list (vipl). +\endcode +
+Registers a VOL connector with the HDF5 library given a value for the connector and returns an identifier +for it (#H5I_INVALID_HID on errors). If the connector is already registered, the library will create a new +identifier for it and returns it to the user; otherwise the library will search the plugin path for a connector +of that name, loading and registering it, returning an ID for it, if found. See the VOL User Guide for more +information on loading plugins and the search paths. + +\subsubsection subsubsecVOLNewPubis_name H5VLis_connector_registered_by_name + + + + + + + + + + + + + +
Signature:
+\code + htri_t H5VLis_connector_registered_by_name(const char *name); +\endcode +
Arguments:
+\code + name (IN): The connector name to check for. +\endcode +
+Checks if a VOL connector is registered with the library given the connector name and returns TRUE/FALSE +on success, otherwise it returns a negative value. + +\subsubsection subsubsecVOLNewPubis_value H5VLis_connector_registered_by_value + + + + + + + + + + + + + +
Signature:
+\code + htri_t H5VLis_connector_registered_by_value(H5VL_class_value_t connector_value); +\endcode +
Arguments:
+\code + connector_value (IN): The connector value to check for. +\endcode +
+Checks if a VOL connector is registered with the library given the connector value and returns TRUE/FALSE +on success, otherwise it returns a negative value. + +\subsubsection subsubsecVOLNewPubget_id H5VLget_connector_id + + + + + + + + + + + + + +
Signature:
+\code + hid_t H5VLget_connector_id(hid_t obj_id); +\endcode +
Arguments:
+\code + obj_id (IN): An ID for an HDF5 VOL object. +\endcode +
+Given a VOL object such as a dataset or an attribute, this function returns an identifier for its associated +connector. If the ID is not a VOL object (such as a dataspace or uncommitted datatype), #H5I_INVALID_HID +is returned. The identifier must be released with a call to #H5VLclose. + +\subsubsection subsubsecVOLNewPubget_by_name H5VLget_connector_id_by_name + + + + + + + + + + + + + +
Signature:
+\code + hid_t H5VLget_connector_id_by_name(const char *name); +\endcode +
Arguments:
+\code + name (IN): The connector name to check for. +\endcode +
+Given a connector name that is registered with the library, this function returns an identifier for the connector. +If the connector is not registered with the library, #H5I_INVALID_HID is returned.The identifier must be +released with a call to #H5VLclose. + +\subsubsection subsubsecVOLNewPubget_by_value H5VLget_connector_id_by_value + + + + + + + + + + + + + +
Signature:
+\code + hid_t H5VLget_connector_id_by_value(H5VL_class_value_t connector_value); +\endcode +
Arguments:
+\code + connector_value (IN): The connector value to check for. +\endcode +
+Given a connector value that is registered with the library, this function returns an identifier for the connector. +If the connector is not registered with the library, #H5I_INVALID_HID is returned.The identifier must be +released with a call to #H5VLclose. + +\subsubsection subsubsecVOLNewPubget_name H5VLget_connector_name + + + + + + + + + + + + + +
Signature:
+\code + ssize_t H5VLget_connector_name(hid_t id, char *name /*out*/, size_t size); +\endcode +
Arguments:
+\code + id (IN): The object identifier to check. + name (OUT): Buffer pointer to put the connector name. If NULL, the library just returns thesize required to store the connector name. + size (IN): the size of the passed in buffer. +\endcode +
+Retrieves the name of a VOL connector given an object identifier that was created/opened ith it. On +success, the name length is returned. + +\subsubsection subsubsecVOLNewPubclose H5VLclose + + + + + + + + + + + + + +
Signature:
+\code + herr_t H5VLclose(hid_t connector_id); +\endcode +
Arguments:
+\code + connector_id (IN): A valid identifier of the connector to close. +\endcode +
+Shuts down access to the connector that the identifier points to and release resources associated with it. + +\subsubsection subsubsecVOLNewPubunreg H5VLunregister_connector + + + + + + + + + + + + + +
Signature:
+\code + herr_t H5VLunregister_connector(hid_t connector_id); +\endcode +
Arguments:
+\code + connector_id (IN): A valid identifier of the connector to unregister. +\endcode +
+Unregisters a connector from the library and return a positive value on success otherwise return a negative +value. The native VOL connector cannot be unregistered (this will return a negative #herr_t value). + +\subsubsection subsubsecVOLNewPubquery H5VLquery_optional + + + + + + + + + + + + + +
Signature:
+\code + herr_t H5VLquery_optional(hid_t obj_id, H5VL_subclass_t subcls, int opt_type, uint64_t *flags); +\endcode +
Arguments:
+\code + obj_id (IN): A valid identifier of a VOL-managed object. + subcls (IN): The subclass of the optional operation. + opt_type (IN): The optional operation. The native VOL connector uses hard-coded values. Other + VOL connectors get this value when the optional operations are registered. + flags (OUT): Bitwise flags indicating support and behavior. +\endcode +
+Determines if a connector or connector stack (determined from the passed-in object) supports an optional +operation. The returned flags (listed below) not only indicate whether the operation is supported or not, +but also give a sense of the option's behavior (useful for pass-through connectors). + +Bitwise query flag values: +\code + #define H5VL_OPT_QUERY_SUPPORTED 0x0001 /* VOL connector supports this operation */ + #define H5VL_OPT_QUERY_READ_DATA 0x0002 /* Operation reads data for object */ + #define H5VL_OPT_QUERY_WRITE_DATA 0x0004 /* Operation writes data for object */ + #define H5VL_OPT_QUERY_QUERY_METADATA 0x0008 /* Operation reads metadata for object */ + #define H5VL_OPT_QUERY_MODIFY_METADATA 0x0010 /* Operation modifies metadata for object */ + #define H5VL_OPT_QUERY_COLLECTIVE 0x0020 /* Operation is collective (operations without this flag are assumed to be independent) */ + #define H5VL_OPT_QUERY_NO_ASYNC 0x0040 /* Operation may NOT be executed asynchronously */ + #define H5VL_OPT_QUERY_MULTI_OBJ 0x0080 /* Operation involves multiple objects */ +\endcode + +\subsection subsecVOLNewConn H5VLconnector.h +This functionality is intended for VOL connector authors and includes helper functions that are useful for +writing connectors. + +API calls to manage optional operations are also found in this header file. These are discussed in the section +on optional operations, above. + +\subsubsection subsubsecVOLNewConnreg H5VLregister_connector + + + + + + + + + + + + + +
Signature:
+\code + hid_t H5VLregister_connector(const H5VL_class_t *cls, hid_t vipl_id); +\endcode +
Arguments:
+\code + cls (IN): A pointer to the connector structure to register. + vipl_id (IN): An ID for a VOL initialization property list (vipl). +\endcode +
+Registers a user-defined VOL connector with the HDF5 library and returns an identifier for that connector +(#H5I_INVALID_HID on errors). This function is used when the application has direct access to the connector +it wants to use and is able to obtain a pointer for the connector structure to pass to the HDF5 library. + +\subsubsection subsubsecVOLNewConnobj H5VLobject + + + + + + + + + + + + + +
Signature:
+\code + void *H5VLobject(hid_t obj_id); +\endcode +
Arguments:
+\code + obj_id (IN): identifier of the object to dereference. +\endcode +
+Retrieves a pointer to the VOL object from an HDF5 file or object identifier. + +\subsubsection subsubsecVOLNewConnget H5VLget_file_type + + + + + + + + + + + + + +
Signature:
+\code + hid_t H5VLget_file_type(void *file_obj, hid_t connector_id, hid_t dtype_id); +\endcode +
Arguments:
+\code + file_obj (IN): pointer to a file or file object's connector-specific data. + connector_id (IN): A valid identifier of the connector to use. + dtype_id (IN): A valid identifier for the type. +\endcode +
+Returns a copy of the dtype_id parameter but with the location set to be in the file. Returns a negative +value (#H5I_INVALID_HID) on errors. + +\subsubsection subsubsecVOLNewConnpeek_name H5VLpeek_connector_id_by_name + + + + + + + + + + + + + +
Signature:
+\code + hid_t H5VLpeek_connector_id_by_name(const char *name); +\endcode +
Arguments:
+\code + name (IN): name of the connector to query. +\endcode +
+Retrieves the ID for a registered VOL connector based on a connector name. This is done without duplicating +the ID and transferring ownership to the caller (as it normally the case in the HDF5 library). The ID returned +from this operation should not be closed. This is intended for use by VOL connectors to find their own ID. +Returns a negative value (#H5I_INVALID_HID) on errors. + +\subsubsection subsubsecVOLNewConnpeek_value H5VLpeek_connector_id_by_value + + + + + + + + + + + + + +
Signature:
+\code + hid_t H5VLpeek_connector_id_by_value(H5VL_class_value_t value); +\endcode +
Arguments:
+\code + value (IN): value of the connector to query. +\endcode +
+Retrieves the ID for a registered VOL connector based on a connector value. This is done without duplicating +the ID and transferring ownership to the caller (as it normally the case in the HDF5 library). The ID returned +from this operation should not be closed. This is intended for use by VOL connectors to find their own ID. +Returns a negative value (#H5I_INVALID_HID) on errors. + +\subsection subsecVOLNewPass H5VLconnector_passthru.h +This functionality is intended for VOL connector authors who are writing pass-through connectors and +includes helper functions that are useful for writing such connectors. Callback equivalent functions can be +found in this header as well. A list of these functions is included as an appendix to this document. + +\subsubsection subsubsecVOLNewPasscmp H5VLcmp_connector_cls + + + + + + + + + + + + + +
Signature:
+\code + herr_t H5VLcmp_connector_cls(int *cmp, hid_t connector_id1, hid_t connector_id2); +\endcode +
Arguments:
+\code + cmp (OUT): a value like strcmp. + connector_id1 (IN): the ID of the first connector to compare. + connector_id2 (IN): the ID of the second connector to compare +\endcode +
+Compares two connectors (given by the connector IDs) to see if they refer to the same connector underneath. +Returns a positive value on success and a negative value on errors. + +\subsubsection subsubsecVOLNewPasswrap H5VLwrap_register + + + + + + + + + + + + + +
Signature:
+\code + hid_t H5VLwrap_register(void *obj, H5I_type_t type); +\endcode +
Arguments:
+\code + obj (IN): an object to wrap. + type (IN): the type of the object (see below). +\endcode +
+Wrap an internal object with a "wrap context" and register and return an hidt for the resulting object. +This routine is mainly targeted toward wrapping objects for iteration routine callbacks +(i.e. the callbacks from H5Aiterate*, H5Literate* / H5Lvisit*, and H5Ovisit* ). Using it in an application +will return an error indicating the API context isn't available or can't be retrieved. +he type must be a VOL-managed object class: +\li #H5I_FILE +\li #H5I_GROUP +\li #H5I_DATATYPE +\li #H5I_DATASET +\li #H5I_MAP +\li #H5I_ATTR + +\subsubsection subsubsecVOLNewPassretrieve H5VLretrieve_lib_state + + + + + + + + + + + + + +
Signature:
+\code + herr_t H5VLretrieve_lib_state(void **state); +\endcode +
Arguments:
+\code + state (OUT): the library state. +\endcode +
+Retrieves a copy of the internal state of the HDF5 library, so that it can be restored later. Returns a positive +value on success and a negative value on errors. + +\subsubsection subsubsecVOLNewPassstar H5VLstart_lib_state + + + + + + + +
Signature:
+\code + herr_t H5VLstart_lib_state(void); +\endcode +
+Opens a new internal state for the HDF5 library. Returns a positive value on success and a negative value +on errors. + +\subsubsection subsubsecVOLNewPassrestore H5VLrestore_lib_state + + + + + + + + + + + + + +
Signature:
+\code + herr_t H5VLrestore_lib_state(const void *state); +\endcode +
Arguments:
+\code + state (IN): the library state. +\endcode +
+Restores the internal state of the HDF5 library. Returns a positive value on success and a negative value on errors. + +\subsubsection subsubsecVOLNewPassfinish H5VLfinish_lib_state + + + + + + + +
Signature:
+\code + herr_t H5VLfinish_lib_state(void); +\endcode +
+Closes the state of the library, undoing the effects of #H5VLstart_lib_state. Returns a positive value on +success and a negative value on errors. + +\subsubsection subsubsecVOLNewPassfree H5VLfree_lib_state + + + + + + + + + + + + + +
Signature:
+\code + herr_t H5VLfree_lib_state(void *state); +\endcode +
Arguments:
+\code + state (IN): the library state. +\endcode +
+Free a retrieved library state. Returns a positive value on success and a negative value on errors. + +\section secVOLAppA Appendix A Mapping of VOL Callbacks to HDF5 API Calls + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
VOL CallbackHDF5 API Call
FILE
create\li #H5Fcreate
open +\li #H5Fopen +
get +\li #H5Fget_access_plist +\li #H5Fget_create_plist +\li #H5Fget_fileno +\li #H5Fget_intent +\li #H5Fget_name +\li #H5Fget_obj_count +\li #H5Fget_obj_ids +
specific +\li #H5Fdelete +\li #H5Fflush +\li #H5Fis_accessible +\li #H5Fis_hdf5 (deprecated, hard-coded to use native connector) +\li #H5Freopen +
close +\li #H5Fclose +
GROUP
create +\li #H5Gcreate1 (deprecated) +\li #H5Gcreate2 +\li #H5Gcreate_anon +
open +\li #H5Gopen1 (deprecated) +\li #H5Gopen2 +
get +\li #H5Gget_create_plist +\li #H5Gget_info +\li #H5Gget_info_by_idx +\li #H5Gget_info_by_name +\li #H5Gget_num_objs (deprecated) +
specific +\li #H5Fmount +\li #H5Funmount +\li #H5Gflush +\li #H5Grefresh +
close +\li #H5Gclose +
DATASET
create +\li #H5Dcreate1 (deprecated) +\li #H5Dcreate2 +
open +\li #H5Dopen1 (deprecated) +\li #H5Dopen2 +
read +\li #H5Dread +
write +\li #H5Dwrite +
get +\li #H5Dget_access_plist +\li #H5Dget_create_plist +\li #H5Dget_space +\li #H5Dget_space_status +\li #H5Dget_storage_size +\li #H5Dget_type +
specific +\li #H5Dextend (deprecated) +\li #H5Dflush +\li #H5Drefresh +\li #H5Dset_extent +
close +\li #H5Dclose +
OBJECT
open +\li #H5Oopen +\li #H5Oopen_by_addr (deprecated) +\li #H5Oopen_by_idx +\li #H5Oopen_by_token +
copy +\li #H5Ocopy +
get +\li #H5Oget_info1 (deprecated) +\li #H5Oget_info2 (deprecated) +\li #H5Oget_info3 +
specific +\li #H5Odecr_refcount +\li #H5Oexists_by_name +\li #H5Oflush +\li #H5Oincr_refcount +\li #H5Orefresh +\li #H5Ovisit_by_name1 (deprecated) +\li #H5Ovisit_by_name2 (deprecated) +\li #H5Ovisit_by_name3 +\li #H5Ovisit1 (deprecated) +\li #H5Ovisit2 (deprecated) +\li #H5Ovisit3 +
close +\li #H5Oclose +
LINK
create +\li #H5Glink (deprecated) +\li #H5Glink2 (deprecated) +\li #H5Lcreate_hard +\li #H5Lcreate_soft +\li #H5Lcreate_ud +\li #H5Olink +
copy +\li #H5Lcopy +
move +\li #H5Gmove (deprecated) +\li #H5Gmove2 (deprecated) +\li #H5Lmove +
get +\li #H5Gget_linkval (deprecated) +\li #H5Lget_info1 (deprecated) +\li #H5Lget_info2 +\li #H5Lget_info_by_idx +\li #H5Lget_name_by_idx +\li #H5Lget_val +\li #H5Lget_val_by_idx +
specific +\li #H5Gunlink (deprecated) +\li #H5Ldelete +\li #H5Ldelete_by_idx +\li #H5Lexists +\li #H5Literate1 (deprecated) +\li #H5Literate2 +\li #H5Literate_by_name1 (deprecated) +\li #H5Literate_by_name2 +\li #H5Lvisit1 (deprecated) +\li #H5Lvisit2 +\li #H5Lvisit_by_name1 (deprecated) +\li #H5Lvisit_by_name2 +
DATATYPE
commit +\li #H5Tcommit1 (deprecated) +\li #H5Tcommit2 +\li #H5Tcommit+anon +
open +\li #H5Topen1 (deprecated) +\li #H5Topen2 +
get +\li #H5Tget_create_plist +
specific +\li #H5Tflush +\li #H5Trefresh +
close +\li #H5Tclose +
ATTRIBUTE
create +\li #H5Acreate1 (deprecated) +\li #H5Acreate2 +\li #H5Acreate_by_name +
open +\li #H5Aopen +\li #H5Aopen_by_idx +\li #H5Aopen_by_name +\li #H5Aopen_idx (deprecated) +\li #H5Aopen_name (deprecated) +
read +\li #H5Aread +
write +\li #H5Awrite +
get +\li #H5Aget_create_plist +\li #H5Aget_info +\li #H5Aget_info_by_idx +\li #H5Aget_info_by_name +\li #H5Aget_name +\li #H5Aget_name_by_idx +\li #H5Aget_space +\li #H5Aget_storage_size +\li #H5Aget_type +
specific +\li #H5Adelete +\li #H5Adelete_by_idx +\li #H5Adelete_by_name +\li #H5Aexists +\li #H5Aexists_by_name +\li #H5Aiterate1 (deprecated) +\li #H5Aiterate2 +\li #H5Aiterate_by_name +\li #H5Arename +\li #H5Arename_by_name +
close +\li #H5Aclose +
+ +\section secVOLAppB Appendix B Callback Wrapper API Calls for Passthrough Connector Authors +\code +/* Pass-through callbacks */ +H5_DLL void *H5VLget_object(void *obj, hid_t connector_id); +H5_DLL herr_t H5VLget_wrap_ctx(void *obj, hid_t connector_id, void **wrap_ctx); +H5_DLL void *H5VLwrap_object(void *obj, H5I_type_t obj_type, hid_t connector_id, void *wrap_ctx); +H5_DLL void *H5VLunwrap_object(void *obj, hid_t connector_id); +H5_DLL herr_t H5VLfree_wrap_ctx(void *wrap_ctx, hid_t connector_id); + +/* Public wrappers for generic callbacks */ +H5_DLL herr_t H5VLinitialize(hid_t connector_id, hid_t vipl_id); +H5_DLL herr_t H5VLterminate(hid_t connector_id); +H5_DLL herr_t H5VLget_cap_flags(hid_t connector_id, uint64_t *cap_flags); +H5_DLL herr_t H5VLget_value(hid_t connector_id, H5VL_class_value_t *conn_value); + +/* Public wrappers for info fields and callbacks */ +H5_DLL herr_t H5VLcopy_connector_info(hid_t connector_id, void **dst_vol_info, void *src_vol_info); +H5_DLL herr_t H5VLcmp_connector_info(int *cmp, hid_t connector_id, const void *info1, const void *info2); +H5_DLL herr_t H5VLfree_connector_info(hid_t connector_id, void *vol_info); +H5_DLL herr_t H5VLconnector_info_to_str(const void *info, hid_t connector_id, char **str); +H5_DLL herr_t H5VLconnector_str_to_info(const char *str, hid_t connector_id, void **info); + +/* Public wrappers for attribute callbacks */ +H5_DLL void *H5VLattr_create(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, + const char *attr_name, hid_t type_id, hid_t space_id, hid_t acpl_id, + hid_t aapl_id, hid_t dxpl_id, void **req); +H5_DLL void *H5VLattr_open(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, + const char *name, hid_t aapl_id, hid_t dxpl_id, void **req); +H5_DLL herr_t H5VLattr_read(void *attr, hid_t connector_id, hid_t dtype_id, void *buf, hid_t dxpl_id, + void **req); +H5_DLL herr_t H5VLattr_write(void *attr, hid_t connector_id, hid_t dtype_id, const void *buf, hid_t dxpl_id, + void **req); +H5_DLL herr_t H5VLattr_get(void *obj, hid_t connector_id, H5VL_attr_get_args_t *args, hid_t dxpl_id, + void **req); +H5_DLL herr_t H5VLattr_specific(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, + H5VL_attr_specific_args_t *args, hid_t dxpl_id, void **req); +H5_DLL herr_t H5VLattr_optional(void *obj, hid_t connector_id, H5VL_optional_args_t *args, hid_t dxpl_id, + void **req); +H5_DLL herr_t H5VLattr_close(void *attr, hid_t connector_id, hid_t dxpl_id, void **req); + +/* Public wrappers for dataset callbacks */ +H5_DLL void *H5VLdataset_create(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, + const char *name, hid_t lcpl_id, hid_t type_id, hid_t space_id, hid_t dcpl_id, + hid_t dapl_id, hid_t dxpl_id, void **req); +H5_DLL void *H5VLdataset_open(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, + const char *name, hid_t dapl_id, hid_t dxpl_id, void **req); +H5_DLL herr_t H5VLdataset_read(size_t count, void *dset[], hid_t connector_id, hid_t mem_type_id[], + hid_t mem_space_id[], hid_t file_space_id[], hid_t plist_id, void *buf[], + void **req); +H5_DLL herr_t H5VLdataset_write(size_t count, void *dset[], hid_t connector_id, hid_t mem_type_id[], + hid_t mem_space_id[], hid_t file_space_id[], hid_t plist_id, + const void *buf[], void **req); +H5_DLL herr_t H5VLdataset_get(void *dset, hid_t connector_id, H5VL_dataset_get_args_t *args, hid_t dxpl_id, + void **req); +H5_DLL herr_t H5VLdataset_specific(void *obj, hid_t connector_id, H5VL_dataset_specific_args_t *args, + hid_t dxpl_id, void **req); +H5_DLL herr_t H5VLdataset_optional(void *obj, hid_t connector_id, H5VL_optional_args_t *args, hid_t dxpl_id, + void **req); +H5_DLL herr_t H5VLdataset_close(void *dset, hid_t connector_id, hid_t dxpl_id, void **req); + +/* Public wrappers for named datatype callbacks */ +H5_DLL void *H5VLdatatype_commit(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, + const char *name, hid_t type_id, hid_t lcpl_id, hid_t tcpl_id, hid_t tapl_id, + hid_t dxpl_id, void **req); +H5_DLL void *H5VLdatatype_open(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, + const char *name, hid_t tapl_id, hid_t dxpl_id, void **req); +H5_DLL herr_t H5VLdatatype_get(void *dt, hid_t connector_id, H5VL_datatype_get_args_t *args, hid_t dxpl_id, + void **req); +H5_DLL herr_t H5VLdatatype_specific(void *obj, hid_t connector_id, H5VL_datatype_specific_args_t *args, + hid_t dxpl_id, void **req); +H5_DLL herr_t H5VLdatatype_optional(void *obj, hid_t connector_id, H5VL_optional_args_t *args, hid_t dxpl_id, + void **req); +H5_DLL herr_t H5VLdatatype_close(void *dt, hid_t connector_id, hid_t dxpl_id, void **req); + +/* Public wrappers for file callbacks */ +H5_DLL void *H5VLfile_create(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id, hid_t dxpl_id, + void **req); +H5_DLL void *H5VLfile_open(const char *name, unsigned flags, hid_t fapl_id, hid_t dxpl_id, void **req); +H5_DLL herr_t H5VLfile_get(void *file, hid_t connector_id, H5VL_file_get_args_t *args, hid_t dxpl_id, + void **req); +H5_DLL herr_t H5VLfile_specific(void *obj, hid_t connector_id, H5VL_file_specific_args_t *args, hid_t dxpl_id, + void **req); +H5_DLL herr_t H5VLfile_optional(void *obj, hid_t connector_id, H5VL_optional_args_t *args, hid_t dxpl_id, + void **req); +H5_DLL herr_t H5VLfile_close(void *file, hid_t connector_id, hid_t dxpl_id, void **req); + +/* Public wrappers for group callbacks */ +H5_DLL void *H5VLgroup_create(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, + const char *name, hid_t lcpl_id, hid_t gcpl_id, hid_t gapl_id, hid_t dxpl_id, + void **req); +H5_DLL void *H5VLgroup_open(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, + const char *name, hid_t gapl_id, hid_t dxpl_id, void **req); +H5_DLL herr_t H5VLgroup_get(void *obj, hid_t connector_id, H5VL_group_get_args_t *args, hid_t dxpl_id, + void **req); +H5_DLL herr_t H5VLgroup_specific(void *obj, hid_t connector_id, H5VL_group_specific_args_t *args, + hid_t dxpl_id, void **req); +H5_DLL herr_t H5VLgroup_optional(void *obj, hid_t connector_id, H5VL_optional_args_t *args, hid_t dxpl_id, + void **req); +H5_DLL herr_t H5VLgroup_close(void *grp, hid_t connector_id, hid_t dxpl_id, void **req); + +/* Public wrappers for link callbacks */ +H5_DLL herr_t H5VLlink_create(H5VL_link_create_args_t *args, void *obj, const H5VL_loc_params_t *loc_params, + hid_t connector_id, hid_t lcpl_id, hid_t lapl_id, hid_t dxpl_id, void **req); +H5_DLL herr_t H5VLlink_copy(void *src_obj, const H5VL_loc_params_t *loc_params1, void *dst_obj, + const H5VL_loc_params_t *loc_params2, hid_t connector_id, hid_t lcpl_id, + hid_t lapl_id, hid_t dxpl_id, void **req); +H5_DLL herr_t H5VLlink_move(void *src_obj, const H5VL_loc_params_t *loc_params1, void *dst_obj, + const H5VL_loc_params_t *loc_params2, hid_t connector_id, hid_t lcpl_id, + hid_t lapl_id, hid_t dxpl_id, void **req); +H5_DLL herr_t H5VLlink_get(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, + H5VL_link_get_args_t *args, hid_t dxpl_id, void **req); +H5_DLL herr_t H5VLlink_specific(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, + H5VL_link_specific_args_t *args, hid_t dxpl_id, void **req); +H5_DLL herr_t H5VLlink_optional(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, + H5VL_optional_args_t *args, hid_t dxpl_id, void **req); + +/* Public wrappers for object callbacks */ +H5_DLL void *H5VLobject_open(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, + H5I_type_t *opened_type, hid_t dxpl_id, void **req); +H5_DLL herr_t H5VLobject_copy(void *src_obj, const H5VL_loc_params_t *loc_params1, const char *src_name, + void *dst_obj, const H5VL_loc_params_t *loc_params2, const char *dst_name, + hid_t connector_id, hid_t ocpypl_id, hid_t lcpl_id, hid_t dxpl_id, void **req); +H5_DLL herr_t H5VLobject_get(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, + H5VL_object_get_args_t *args, hid_t dxpl_id, void **req); +H5_DLL herr_t H5VLobject_specific(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, + H5VL_object_specific_args_t *args, hid_t dxpl_id, void **req); +H5_DLL herr_t H5VLobject_optional(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, + H5VL_optional_args_t *args, hid_t dxpl_id, void **req); + +/* Public wrappers for connector/container introspection callbacks */ +H5_DLL herr_t H5VLintrospect_get_conn_cls(void *obj, hid_t connector_id, H5VL_get_conn_lvl_t lvl, + const H5VL_class_t **conn_cls); +H5_DLL herr_t H5VLintrospect_get_cap_flags(const void *info, hid_t connector_id, uint64_t *cap_flags); +H5_DLL herr_t H5VLintrospect_opt_query(void *obj, hid_t connector_id, H5VL_subclass_t subcls, int opt_type, + uint64_t *flags); + +/* Public wrappers for asynchronous request callbacks */ +H5_DLL herr_t H5VLrequest_wait(void *req, hid_t connector_id, uint64_t timeout, + H5VL_request_status_t *status); +H5_DLL herr_t H5VLrequest_notify(void *req, hid_t connector_id, H5VL_request_notify_t cb, void *ctx); +H5_DLL herr_t H5VLrequest_cancel(void *req, hid_t connector_id, H5VL_request_status_t *status); +H5_DLL herr_t H5VLrequest_specific(void *req, hid_t connector_id, H5VL_request_specific_args_t *args); +H5_DLL herr_t H5VLrequest_optional(void *req, hid_t connector_id, H5VL_optional_args_t *args); +H5_DLL herr_t H5VLrequest_free(void *req, hid_t connector_id); + +/* Public wrappers for blob callbacks */ +H5_DLL herr_t H5VLblob_put(void *obj, hid_t connector_id, const void *buf, size_t size, void *blob_id, + void *ctx); +H5_DLL herr_t H5VLblob_get(void *obj, hid_t connector_id, const void *blob_id, void *buf, size_t size, + void *ctx); +H5_DLL herr_t H5VLblob_specific(void *obj, hid_t connector_id, void *blob_id, + H5VL_blob_specific_args_t *args); +H5_DLL herr_t H5VLblob_optional(void *obj, hid_t connector_id, void *blob_id, H5VL_optional_args_t *args); + +/* Public wrappers for token callbacks */ +H5_DLL herr_t H5VLtoken_cmp(void *obj, hid_t connector_id, const H5O_token_t *token1, + const H5O_token_t *token2, int *cmp_value); +H5_DLL herr_t H5VLtoken_to_str(void *obj, H5I_type_t obj_type, hid_t connector_id, const H5O_token_t *token, + char **token_str); +H5_DLL herr_t H5VLtoken_from_str(void *obj, H5I_type_t obj_type, hid_t connector_id, const char *token_str, + H5O_token_t *token); + +/* Public wrappers for generic 'optional' callback */ +H5_DLL herr_t H5VLoptional(void *obj, hid_t connector_id, H5VL_optional_args_t *args, hid_t dxpl_id, + void **req); +\endcode + +\section secVOLAppC Appendix C Native VOL Connector Optional Values By Subclass +\code +/* H5VL_SUBCLS_ATTR */ +#define H5VL_NATIVE_ATTR_ITERATE_OLD 0 /* H5Aiterate (deprecated routine) */ + +/* H5VL_SUBCLS_DATASET */ +#define H5VL_NATIVE_DATASET_FORMAT_CONVERT 0 /* H5Dformat_convert (internal) */ +#define H5VL_NATIVE_DATASET_GET_CHUNK_INDEX_TYPE 1 /* H5Dget_chunk_index_type */ +#define H5VL_NATIVE_DATASET_GET_CHUNK_STORAGE_SIZE 2 /* H5Dget_chunk_storage_size */ +#define H5VL_NATIVE_DATASET_GET_NUM_CHUNKS 3 /* H5Dget_num_chunks */ +#define H5VL_NATIVE_DATASET_GET_CHUNK_INFO_BY_IDX 4 /* H5Dget_chunk_info */ +#define H5VL_NATIVE_DATASET_GET_CHUNK_INFO_BY_COORD 5 /* H5Dget_chunk_info_by_coord */ +#define H5VL_NATIVE_DATASET_CHUNK_READ 6 /* H5Dchunk_read */ +#define H5VL_NATIVE_DATASET_CHUNK_WRITE 7 /* H5Dchunk_write */ +#define H5VL_NATIVE_DATASET_GET_VLEN_BUF_SIZE 8 /* H5Dvlen_get_buf_size */ +#define H5VL_NATIVE_DATASET_GET_OFFSET 9 /* H5Dget_offset */ +#define H5VL_NATIVE_DATASET_CHUNK_ITER 10 /* H5Dchunk_iter */ + +/* H5VL_SUBCLS_FILE */ +#define H5VL_NATIVE_FILE_CLEAR_ELINK_CACHE 0 /* H5Fclear_elink_file_cache */ +#define H5VL_NATIVE_FILE_GET_FILE_IMAGE 1 /* H5Fget_file_image */ +#define H5VL_NATIVE_FILE_GET_FREE_SECTIONS 2 /* H5Fget_free_sections */ +#define H5VL_NATIVE_FILE_GET_FREE_SPACE 3 /* H5Fget_freespace */ +#define H5VL_NATIVE_FILE_GET_INFO 4 /* H5Fget_info1/2 */ +#define H5VL_NATIVE_FILE_GET_MDC_CONF 5 /* H5Fget_mdc_config */ +#define H5VL_NATIVE_FILE_GET_MDC_HR 6 /* H5Fget_mdc_hit_rate */ +#define H5VL_NATIVE_FILE_GET_MDC_SIZE 7 /* H5Fget_mdc_size */ +#define H5VL_NATIVE_FILE_GET_SIZE 8 /* H5Fget_filesize */ +#define H5VL_NATIVE_FILE_GET_VFD_HANDLE 9 /* H5Fget_vfd_handle */ +#define H5VL_NATIVE_FILE_RESET_MDC_HIT_RATE 10 /* H5Freset_mdc_hit_rate_stats */ +#define H5VL_NATIVE_FILE_SET_MDC_CONFIG 11 /* H5Fset_mdc_config */ +#define H5VL_NATIVE_FILE_GET_METADATA_READ_RETRY_INFO 12 /* H5Fget_metadata_read_retry_info */ +#define H5VL_NATIVE_FILE_START_SWMR_WRITE 13 /* H5Fstart_swmr_write */ +#define H5VL_NATIVE_FILE_START_MDC_LOGGING 14 /* H5Fstart_mdc_logging */ +#define H5VL_NATIVE_FILE_STOP_MDC_LOGGING 15 /* H5Fstop_mdc_logging */ +#define H5VL_NATIVE_FILE_GET_MDC_LOGGING_STATUS 16 /* H5Fget_mdc_logging_status */ +#define H5VL_NATIVE_FILE_FORMAT_CONVERT 17 /* H5Fformat_convert */ +#define H5VL_NATIVE_FILE_RESET_PAGE_BUFFERING_STATS 18 /* H5Freset_page_buffering_stats */ +#define H5VL_NATIVE_FILE_GET_PAGE_BUFFERING_STATS 19 /* H5Fget_page_buffering_stats */ +#define H5VL_NATIVE_FILE_GET_MDC_IMAGE_INFO 20 /* H5Fget_mdc_image_info */ +#define H5VL_NATIVE_FILE_GET_EOA 21 /* H5Fget_eoa */ +#define H5VL_NATIVE_FILE_INCR_FILESIZE 22 /* H5Fincrement_filesize */ +#define H5VL_NATIVE_FILE_SET_LIBVER_BOUNDS 23 /* H5Fset_latest_format/libver_bounds */ +#define H5VL_NATIVE_FILE_GET_MIN_DSET_OHDR_FLAG 24 /* H5Fget_dset_no_attrs_hint */ +#define H5VL_NATIVE_FILE_SET_MIN_DSET_OHDR_FLAG 25 /* H5Fset_dset_no_attrs_hint */ +#ifdef H5_HAVE_PARALLEL +#define H5VL_NATIVE_FILE_GET_MPI_ATOMICITY 26 /* H5Fget_mpi_atomicity */ +#define H5VL_NATIVE_FILE_SET_MPI_ATOMICITY 27 /* H5Fset_mpi_atomicity */ +#endif +#define H5VL_NATIVE_FILE_POST_OPEN 28 /* Adjust file after open, with wrapping context */ + +/* H5VL_SUBCLS_GROUP */ +#define H5VL_NATIVE_GROUP_ITERATE_OLD 0 /* HG5Giterate (deprecated routine) */ +#define H5VL_NATIVE_GROUP_GET_OBJINFO 1 /* HG5Gget_objinfo (deprecated routine) */ + +/* H5VL_SUBCLS_OBJECT */ +#define H5VL_NATIVE_OBJECT_GET_COMMENT 0 /* H5G|H5Oget_comment, H5Oget_comment_by_name */ +#define H5VL_NATIVE_OBJECT_SET_COMMENT 1 /* H5G|H5Oset_comment, H5Oset_comment_by_name */ +#define H5VL_NATIVE_OBJECT_DISABLE_MDC_FLUSHES 2 /* H5Odisable_mdc_flushes */ +#define H5VL_NATIVE_OBJECT_ENABLE_MDC_FLUSHES 3 /* H5Oenable_mdc_flushes */ +#define H5VL_NATIVE_OBJECT_ARE_MDC_FLUSHES_DISABLED 4 /* H5Oare_mdc_flushes_disabled */ +#define H5VL_NATIVE_OBJECT_GET_NATIVE_INFO 5 /* H5Oget_native_info(_by_idx, _by_name) */ +\endcode + +
+Navigate back: \ref index "Main" / \ref VOL_Connector +*/ diff --git a/doxygen/hdf5doxy_layout.xml b/doxygen/hdf5doxy_layout.xml index d156f2c1785..77900a35784 100644 --- a/doxygen/hdf5doxy_layout.xml +++ b/doxygen/hdf5doxy_layout.xml @@ -11,6 +11,8 @@ + + diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 472932cf4fc..baf85fbaa40 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -71,7 +71,7 @@ New Features HDF5_ENABLE_USING_DMALLOC (DER - 2022/11/08) - + - Removal of memory allocation sanity checks configure options With the removal of the memory allocation sanity checks feature, the @@ -192,7 +192,11 @@ New Features Documentation: -------------- - - + - Ported the existing VOL Connector Author Guide document to doxygen. + + Added new dox file, VOLConnGuide.dox. + + (ADB - 2022/12/20) Support for new platforms, languages and compilers From 4200ea9bd47da9ca65cf350786560f3747f7bf27 Mon Sep 17 00:00:00 2001 From: kwryankrattiger <80296582+kwryankrattiger@users.noreply.github.com> Date: Wed, 18 Jan 2023 16:26:35 -0600 Subject: [PATCH 043/231] CMake: Find MPI in HDF5 CMake config (#2400) --- config/cmake/hdf5-config.cmake.in | 2 ++ 1 file changed, 2 insertions(+) diff --git a/config/cmake/hdf5-config.cmake.in b/config/cmake/hdf5-config.cmake.in index 35cee4f6062..1a3fb7bbf2f 100644 --- a/config/cmake/hdf5-config.cmake.in +++ b/config/cmake/hdf5-config.cmake.in @@ -63,6 +63,8 @@ if (${HDF5_PACKAGE_NAME}_ENABLE_PARALLEL) set (${HDF5_PACKAGE_NAME}_MPI_Fortran_INCLUDE_PATH "@MPI_Fortran_INCLUDE_DIRS@") set (${HDF5_PACKAGE_NAME}_MPI_Fortran_LIBRARIES "@MPI_Fortran_LIBRARIES@") endif () + + find_package(MPI QUIET REQUIRED) endif () if (${HDF5_PACKAGE_NAME}_BUILD_JAVA) From 6127a5ba4277a8b58231d0a3ec6fe92003662e2d Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Thu, 9 Feb 2023 05:46:16 -0800 Subject: [PATCH 044/231] Add szip/libaec to GitHub CI and fix warnings (#2438) szip (or libaec) is currently not tested in CI. This adds szip to the the Autotools GitHub CI actions on Linux when building with the Autotools. This PR also cleans up a few warnings that remained in the szip- related code so the -Werror check will pass. --- .github/workflows/main.yml | 16 ++++++++++++++-- src/H5Z.c | 12 +++++++++--- src/H5Zszip.c | 14 +++++++------- test/dsets.c | 8 ++------ test/tmisc.c | 4 ++-- 5 files changed, 34 insertions(+), 20 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index a31317997a0..01baf4dda27 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -109,6 +109,7 @@ jobs: direct_vfd: enable deprec_sym: enable default_api: v114 + szip: yes toolchain: "" generator: "autogen" flags: "" @@ -129,6 +130,7 @@ jobs: direct_vfd: disable deprec_sym: enable default_api: v114 + szip: yes toolchain: "" generator: "autogen" flags: "CC=mpicc" @@ -169,6 +171,7 @@ jobs: direct_vfd: enable deprec_sym: enable default_api: v16 + szip: yes toolchain: "" generator: "autogen" flags: "" @@ -191,6 +194,7 @@ jobs: direct_vfd: enable deprec_sym: enable default_api: v18 + szip: yes toolchain: "" generator: "autogen" flags: "" @@ -213,6 +217,7 @@ jobs: direct_vfd: enable deprec_sym: enable default_api: v110 + szip: yes toolchain: "" generator: "autogen" flags: "" @@ -235,6 +240,7 @@ jobs: direct_vfd: enable deprec_sym: enable default_api: v112 + szip: yes toolchain: "" generator: "autogen" flags: "" @@ -257,6 +263,7 @@ jobs: direct_vfd: enable deprec_sym: enable default_api: v114 + szip: yes toolchain: "" generator: "autogen" flags: "" @@ -279,6 +286,7 @@ jobs: direct_vfd: enable deprec_sym: disable default_api: default + szip: yes toolchain: "" generator: "autogen" flags: "" @@ -301,6 +309,7 @@ jobs: direct_vfd: enable deprec_sym: enable default_api: v114 + szip: yes toolchain: "" generator: "autogen" flags: "CFLAGS=-Werror" @@ -323,6 +332,7 @@ jobs: direct_vfd: enable deprec_sym: enable default_api: v114 + szip: yes toolchain: "" generator: "autogen" flags: "CFLAGS=-Werror" @@ -367,6 +377,7 @@ jobs: echo "CC=gcc-11" >> $GITHUB_ENV echo "CXX=g++-11" >> $GITHUB_ENV echo "FC=gfortran-11" >> $GITHUB_ENV + sudo apt install libaec0 libaec-dev if: (matrix.generator == 'autogen') && (matrix.parallel != 'enable') - name: Install Autotools Dependencies (Linux, parallel) @@ -376,6 +387,7 @@ jobs: sudo apt install openmpi-bin openmpi-common mpi-default-dev echo "CC=mpicc" >> $GITHUB_ENV echo "FC=mpif90" >> $GITHUB_ENV + sudo apt install libaec0 libaec-dev if: (matrix.generator == 'autogen') && (matrix.parallel == 'enable') - name: Install Dependencies (Windows) @@ -406,7 +418,7 @@ jobs: sh ./autogen.sh mkdir "${{ runner.workspace }}/build" cd "${{ runner.workspace }}/build" - ${{ matrix.flags }} $GITHUB_WORKSPACE/configure --enable-build-mode=${{ matrix.build_mode.autotools }} --${{ matrix.deprec_sym }}-deprecated-symbols --with-default-api-version=${{ matrix.default_api }} --enable-shared --${{ matrix.parallel }}-parallel --${{ matrix.cpp }}-cxx --${{ matrix.fortran }}-fortran --${{ matrix.java }}-java --${{ matrix.mirror_vfd }}-mirror-vfd --${{ matrix.direct_vfd }}-direct-vfd + ${{ matrix.flags }} $GITHUB_WORKSPACE/configure --enable-build-mode=${{ matrix.build_mode.autotools }} --${{ matrix.deprec_sym }}-deprecated-symbols --with-default-api-version=${{ matrix.default_api }} --enable-shared --${{ matrix.parallel }}-parallel --${{ matrix.cpp }}-cxx --${{ matrix.fortran }}-fortran --${{ matrix.java }}-java --${{ matrix.mirror_vfd }}-mirror-vfd --${{ matrix.direct_vfd }}-direct-vfd --with-szlib=${{ matrix.szip }} shell: bash if: (matrix.generator == 'autogen') && (! matrix.thread_safe.enabled) @@ -415,7 +427,7 @@ jobs: sh ./autogen.sh mkdir "${{ runner.workspace }}/build" cd "${{ runner.workspace }}/build" - ${{ matrix.flags }} $GITHUB_WORKSPACE/configure --enable-build-mode=${{ matrix.build_mode.autotools }} --enable-shared --enable-threadsafe --disable-hl --${{ matrix.parallel }}-parallel --${{ matrix.mirror_vfd }}-mirror-vfd --${{ matrix.direct_vfd }}-direct-vfd + ${{ matrix.flags }} $GITHUB_WORKSPACE/configure --enable-build-mode=${{ matrix.build_mode.autotools }} --enable-shared --enable-threadsafe --disable-hl --${{ matrix.parallel }}-parallel --${{ matrix.mirror_vfd }}-mirror-vfd --${{ matrix.direct_vfd }}-direct-vfd --with-szlib=${{ matrix.szip }} shell: bash if: (matrix.generator == 'autogen') && (matrix.thread_safe.enabled) diff --git a/src/H5Z.c b/src/H5Z.c index 8ac9b9cc2c1..8631b049e86 100644 --- a/src/H5Z.c +++ b/src/H5Z.c @@ -104,9 +104,15 @@ H5Z_init(void) HGOTO_ERROR(H5E_PLINE, H5E_CANTINIT, FAIL, "unable to register deflate filter") #endif /* H5_HAVE_FILTER_DEFLATE */ #ifdef H5_HAVE_FILTER_SZIP - H5Z_SZIP->encoder_present = SZ_encoder_enabled(); - if (H5Z_register(H5Z_SZIP) < 0) - HGOTO_ERROR(H5E_PLINE, H5E_CANTINIT, FAIL, "unable to register szip filter") + { + int encoder_enabled = SZ_encoder_enabled(); + if (encoder_enabled < 0) + HGOTO_ERROR(H5E_PLINE, H5E_CANTINIT, FAIL, "check for szip encoder failed") + + H5Z_SZIP->encoder_present = (unsigned)encoder_enabled; + if (H5Z_register(H5Z_SZIP) < 0) + HGOTO_ERROR(H5E_PLINE, H5E_CANTINIT, FAIL, "unable to register szip filter") + } #endif /* H5_HAVE_FILTER_SZIP */ done: diff --git a/src/H5Zszip.c b/src/H5Zszip.c index 18ae248f23d..b03ca389b6e 100644 --- a/src/H5Zszip.c +++ b/src/H5Zszip.c @@ -72,7 +72,7 @@ static htri_t H5Z__can_apply_szip(hid_t H5_ATTR_UNUSED dcpl_id, hid_t type_id, hid_t H5_ATTR_UNUSED space_id) { const H5T_t *type; /* Datatype */ - unsigned dtype_size; /* Datatype's size (in bits) */ + size_t dtype_size; /* Datatype's size (in bits) */ H5T_order_t dtype_order; /* Datatype's endianness order */ htri_t ret_value = TRUE; /* Return value */ @@ -130,7 +130,7 @@ H5Z__set_local_szip(hid_t dcpl_id, hid_t type_id, hid_t space_id) H5T_order_t dtype_order; /* Datatype's endianness order */ size_t dtype_size; /* Datatype's size (in bits) */ size_t dtype_precision; /* Datatype's precision (in bits) */ - size_t dtype_offset; /* Datatype's offset (in bits) */ + int dtype_offset; /* Datatype's offset (in bits) */ hsize_t scanline; /* Size of dataspace's fastest changing dimension */ herr_t ret_value = SUCCEED; /* Return value */ @@ -160,16 +160,16 @@ H5Z__set_local_szip(hid_t dcpl_id, hid_t type_id, hid_t space_id) dtype_offset = H5T_get_offset(type); if (dtype_offset != 0) dtype_precision = dtype_size; - } /* end if */ + } if (dtype_precision > 24) { if (dtype_precision <= 32) dtype_precision = 32; else if (dtype_precision <= 64) dtype_precision = 64; - } /* end if */ + } /* Set "local" parameter for this dataset's "bits-per-pixel" */ - cd_values[H5Z_SZIP_PARM_BPP] = dtype_precision; + cd_values[H5Z_SZIP_PARM_BPP] = (unsigned)dtype_precision; /* Get dataspace */ if (NULL == (ds = (H5S_t *)H5I_object_verify(space_id, H5I_DATASPACE))) @@ -199,7 +199,7 @@ H5Z__set_local_szip(hid_t dcpl_id, hid_t type_id, hid_t space_id) if (npoints < cd_values[H5Z_SZIP_PARM_PPB]) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "pixels per block greater than total number of elements in the chunk") - scanline = MIN((cd_values[H5Z_SZIP_PARM_PPB] * SZ_MAX_BLOCKS_PER_SCANLINE), npoints); + scanline = (hsize_t)MIN((cd_values[H5Z_SZIP_PARM_PPB] * SZ_MAX_BLOCKS_PER_SCANLINE), npoints); } else { if (scanline <= SZ_MAX_PIXELS_PER_SCANLINE) @@ -217,7 +217,7 @@ H5Z__set_local_szip(hid_t dcpl_id, hid_t type_id, hid_t space_id) /* Set the correct endianness flag for szip */ /* (Note: this may not handle non-atomic datatypes well) */ - cd_values[H5Z_SZIP_PARM_MASK] &= ~(SZ_LSB_OPTION_MASK | SZ_MSB_OPTION_MASK); + cd_values[H5Z_SZIP_PARM_MASK] &= ~((unsigned)SZ_LSB_OPTION_MASK | (unsigned)SZ_MSB_OPTION_MASK); switch (dtype_order) { case H5T_ORDER_LE: /* Little-endian byte order */ cd_values[H5Z_SZIP_PARM_MASK] |= SZ_LSB_OPTION_MASK; diff --git a/test/dsets.c b/test/dsets.c index 6592d17b6ef..fc3de086f1e 100644 --- a/test/dsets.c +++ b/test/dsets.c @@ -2495,11 +2495,7 @@ test_get_filter_info(void) *------------------------------------------------------------------------- */ static herr_t -test_filters(hid_t file, hid_t -#ifndef H5_HAVE_FILTER_SZIP - H5_ATTR_UNUSED -#endif /* H5_HAVE_FILTER_SZIP */ - fapl) +test_filters(hid_t file) { hid_t dc; /* Dataset creation property list ID */ const hsize_t chunk_size[2] = {FILTER_CHUNK_DIM1, FILTER_CHUNK_DIM2}; /* Chunk dimensions */ @@ -15728,7 +15724,7 @@ main(void) nerrors += (test_compact_open_close_dirty(my_fapl) < 0 ? 1 : 0); nerrors += (test_conv_buffer(file) < 0 ? 1 : 0); nerrors += (test_tconv(file) < 0 ? 1 : 0); - nerrors += (test_filters(file, my_fapl) < 0 ? 1 : 0); + nerrors += (test_filters(file) < 0 ? 1 : 0); nerrors += (test_onebyte_shuffle(file) < 0 ? 1 : 0); nerrors += (test_nbit_int(file) < 0 ? 1 : 0); nerrors += (test_nbit_float(file) < 0 ? 1 : 0); diff --git a/test/tmisc.c b/test/tmisc.c index 5fb44d46fa5..92b441e6558 100644 --- a/test/tmisc.c +++ b/test/tmisc.c @@ -3948,7 +3948,7 @@ test_misc22(void) unsigned int flags; size_t cd_nelmts = 32; unsigned int cd_values[32]; - unsigned correct; + size_t correct; if (h5_szip_can_encode() != 1) return; @@ -4042,7 +4042,7 @@ test_misc22(void) NULL); CHECK(ret, FAIL, "H5Pget_filter_by_id2"); - VERIFY(cd_values[2], correct, "SZIP filter returned value for precision"); + VERIFY(cd_values[2], (unsigned)correct, "SZIP filter returned value for precision"); ret = H5Dclose(dsid); CHECK(ret, FAIL, "H5Dclose"); From 8211133594cd2fa859ac3b87e6a356a3c6d99bbd Mon Sep 17 00:00:00 2001 From: Larry Knox Date: Fri, 10 Feb 2023 21:28:57 -0600 Subject: [PATCH 045/231] Remove duplicated "help@hdfgroup.org" lines in file headers. (#2441) --- java/src/hdf/hdf5lib/callbacks/package-info.java | 1 - src/H5FDhdfs.h | 1 - test/accum.c | 1 - test/accum_swmr_reader.c | 1 - test/efc.c | 1 - 5 files changed, 5 deletions(-) diff --git a/java/src/hdf/hdf5lib/callbacks/package-info.java b/java/src/hdf/hdf5lib/callbacks/package-info.java index 1f1503416f7..bf7bf3c79c1 100644 --- a/java/src/hdf/hdf5lib/callbacks/package-info.java +++ b/java/src/hdf/hdf5lib/callbacks/package-info.java @@ -8,7 +8,6 @@ * distribution tree, or in https://www.hdfgroup.org/licenses. * * If you do not have access to either file, you may request a copy from * * help@hdfgroup.org. * - * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /** diff --git a/src/H5FDhdfs.h b/src/H5FDhdfs.h index 9c2cb70a11e..e3888d256e7 100644 --- a/src/H5FDhdfs.h +++ b/src/H5FDhdfs.h @@ -8,7 +8,6 @@ * distribution tree, or in https://www.hdfgroup.org/licenses. * * If you do not have access to either file, you may request a copy from * * help@hdfgroup.org. * - * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* diff --git a/test/accum.c b/test/accum.c index 16f351151ad..816d9c08beb 100644 --- a/test/accum.c +++ b/test/accum.c @@ -8,7 +8,6 @@ * distribution tree, or in https://www.hdfgroup.org/licenses. * * If you do not have access to either file, you may request a copy from * * help@hdfgroup.org. * - * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* Programmer: Mike McGreevy diff --git a/test/accum_swmr_reader.c b/test/accum_swmr_reader.c index 957ab1435fb..1269c5a059d 100644 --- a/test/accum_swmr_reader.c +++ b/test/accum_swmr_reader.c @@ -8,7 +8,6 @@ * distribution tree, or in https://www.hdfgroup.org/licenses. * * If you do not have access to either file, you may request a copy from * * help@hdfgroup.org. * - * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #include "h5test.h" diff --git a/test/efc.c b/test/efc.c index 7bddb6c0705..90450753019 100644 --- a/test/efc.c +++ b/test/efc.c @@ -8,7 +8,6 @@ * distribution tree, or in https://www.hdfgroup.org/licenses. * * If you do not have access to either file, you may request a copy from * * help@hdfgroup.org. * - * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* Programmer: Neil Fortner From c22befe304680acd40b41905b0f2faed9c1c4518 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Sat, 11 Feb 2023 04:50:30 -0800 Subject: [PATCH 046/231] Update CODEOWNERS given personnel changes (#2453) --- .github/CODEOWNERS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index bd56708ad14..0a8d1d2a8af 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -2,10 +2,10 @@ # Each line is a file pattern followed by one or more owners. # These owners will be the default owners for everything in the repo. -* @lrknox @derobins @byrnHDF @fortnern @jhendersonHDF @qkoziol @vchoi-hdfgroup @bmribler @raylu-hdf +* @lrknox @derobins @byrnHDF @fortnern @jhendersonHDF @qkoziol @vchoi-hdfgroup @bmribler @raylu-hdf @mattjala @brtnfld # Order is important. The last matching pattern has the most precedence. # So if a pull request only touches javascript files, only these owners # will be requested to review. -/fortran/ @brtnfld +/fortran/ @brtnfld @derobins /java/ @jhendersonHDF @byrnHDF @derobins From 461f37417e1e0be3f7755d17718b435e35fd2c2f Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Sat, 11 Feb 2023 08:34:58 -0600 Subject: [PATCH 047/231] corrected/added note about closing creation property list identifier (#2425) * corrected/added note about closing creation property list identifier * removed immaterial \see APIs * added details to H5Literate --- src/H5Apublic.h | 3 +++ src/H5Dpublic.h | 3 +++ src/H5Gpublic.h | 18 +++--------------- src/H5Lpublic.h | 12 ++++++++---- src/H5Mpublic.h | 3 +++ 5 files changed, 20 insertions(+), 19 deletions(-) diff --git a/src/H5Apublic.h b/src/H5Apublic.h index 7ed29386c76..e21e8085c0e 100644 --- a/src/H5Apublic.h +++ b/src/H5Apublic.h @@ -392,6 +392,9 @@ H5_DLL herr_t H5Aexists_by_name_async(hid_t loc_id, const char *obj_name, const * creation property list associated with the attribute specified * by \p attr_id. * + * The creation property list identifier should be released with + * H5Pclose() to prevent resource leaks. + * * \since 1.8.0 * */ diff --git a/src/H5Dpublic.h b/src/H5Dpublic.h index fbce3d6c7a4..45b941acbb0 100644 --- a/src/H5Dpublic.h +++ b/src/H5Dpublic.h @@ -501,6 +501,9 @@ H5_DLL hid_t H5Dget_type(hid_t dset_id); * a copy of the dataset creation property list associated with * the dataset specified by \p dset_id. * + * The creation property list identifier should be released with + * H5Pclose() to prevent resource leaks. + * */ H5_DLL hid_t H5Dget_create_plist(hid_t dset_id); diff --git a/src/H5Gpublic.h b/src/H5Gpublic.h index d6cebf37b33..ace2071dde5 100644 --- a/src/H5Gpublic.h +++ b/src/H5Gpublic.h @@ -119,7 +119,7 @@ extern "C" { * * \since 1.8.0 * - * \see H5Gopen2(), H5Gclose() + * \see H5Gopen2() * */ H5_DLL hid_t H5Gcreate2(hid_t loc_id, const char *name, hid_t lcpl_id, hid_t gcpl_id, hid_t gapl_id); @@ -211,7 +211,7 @@ H5_DLL hid_t H5Gcreate_anon(hid_t loc_id, hid_t gcpl_id, hid_t gapl_id); * * \since 1.8.0 * - * \see H5Gcreate2(), H5Gclose() + * \see H5Gcreate2() * */ H5_DLL hid_t H5Gopen2(hid_t loc_id, const char *name, hid_t gapl_id); @@ -242,12 +242,10 @@ H5_DLL hid_t H5Gopen_async(hid_t loc_id, const char *name, hid_t gapl_id, hid_t * property list associated with the group specified by \p group_id. * * The creation property list identifier should be released with - * H5Gclose() to prevent resource leaks. + * H5Pclose() to prevent resource leaks. * * \since 1.8.0 * - * \see H5Gcreate2(), H5Gclose() - * */ H5_DLL hid_t H5Gget_create_plist(hid_t group_id); @@ -274,8 +272,6 @@ H5_DLL hid_t H5Gget_create_plist(hid_t group_id); * * \since 1.8.0 * - * \see H5Gcreate2(), H5Gclose() - * */ H5_DLL herr_t H5Gget_info(hid_t loc_id, H5G_info_t *ginfo); @@ -320,8 +316,6 @@ H5_DLL herr_t H5Gget_info_async(hid_t loc_id, H5G_info_t *ginfo /*out*/, hid_t e * * \since 1.8.0 * - * \see H5Gcreate2(), H5Gclose() - * */ H5_DLL herr_t H5Gget_info_by_name(hid_t loc_id, const char *name, H5G_info_t *ginfo, hid_t lapl_id); @@ -381,8 +375,6 @@ H5_DLL herr_t H5Gget_info_by_name_async(hid_t loc_id, const char *name, H5G_info * * \since 1.8.0 * - * \see H5Gcreate2(), H5Gclose() - * */ H5_DLL herr_t H5Gget_info_by_idx(hid_t loc_id, const char *group_name, H5_index_t idx_type, H5_iter_order_t order, hsize_t n, H5G_info_t *ginfo, hid_t lapl_id); @@ -426,8 +418,6 @@ H5_DLL herr_t H5Gget_info_by_idx_async(hid_t loc_id, const char *group_name, H5_ * * \since 1.8.0 * - * \see H5Gcreate2(), H5Gclose() - * */ H5_DLL herr_t H5Gflush(hid_t group_id); @@ -451,8 +441,6 @@ H5_DLL herr_t H5Gflush(hid_t group_id); * * \since 1.8.0 * - * \see H5Gcreate2(), H5Gclose() - * */ H5_DLL herr_t H5Grefresh(hid_t group_id); diff --git a/src/H5Lpublic.h b/src/H5Lpublic.h index d2d9e9ded46..6feefcd433f 100644 --- a/src/H5Lpublic.h +++ b/src/H5Lpublic.h @@ -884,10 +884,12 @@ H5_DLL ssize_t H5Lget_name_by_idx(hid_t loc_id, const char *group_name, H5_index * not been indexed by the index type, they will first be sorted by * that index then the iteration will begin; if the links have been * so indexed, the sorting step will be unnecessary, so the iteration - * may begin more quickly. + * may begin more quickly. Valid values include the following: + * \indexes * * \p order specifies the order in which objects are to be inspected - * along the index \p idx_type. + * along the index \p idx_type. Valid values include the following: + * \orders * * \p idx_p tracks the iteration and allows an iteration to be * resumed if it was stopped before all members were processed. It is @@ -1641,10 +1643,12 @@ H5_DLL herr_t H5Lget_info_by_idx1(hid_t loc_id, const char *group_name, H5_index * not been indexed by the index type, they will first be sorted by * that index then the iteration will begin; if the links have been * so indexed, the sorting step will be unnecessary, so the iteration - * may begin more quickly. + * may begin more quickly. Valid values include the following: + * \indexes * * \p order specifies the order in which objects are to be inspected - * along the index \p idx_type. + * along the index \p idx_type. Valid values include the following: + * \orders * * \p idx_p tracks the iteration and allows an iteration to be * resumed if it was stopped before all members were processed. It is diff --git a/src/H5Mpublic.h b/src/H5Mpublic.h index 2cea579c476..86fe433c09f 100644 --- a/src/H5Mpublic.h +++ b/src/H5Mpublic.h @@ -352,6 +352,9 @@ H5_DLL hid_t H5Mget_val_type(hid_t map_id); * \details H5Mget_create_plist() returns an identifier for a copy of the * creation property list for a map object specified by \p map_id. * + * The creation property list identifier should be released with + * H5Pclose() to prevent resource leaks. + * * \since 1.12.0 * */ From a253cd1aee6ca971f42fee378bffff97bac887e7 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Sun, 12 Feb 2023 07:15:38 -0800 Subject: [PATCH 048/231] Drop non-develop branches in main.yml (#2446) There's no reason to list 1.10, etc. in the list of branches where this flavor of main.yml applies. Those branches have their own main.yml files. --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 01baf4dda27..de33669d33c 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -5,7 +5,7 @@ on: workflow_dispatch: push: pull_request: - branches: [ develop, hdf5_1_14, hdf5_1_12, hdf5_1_10, hdf5_1_8 ] + branches: [ develop ] paths-ignore: - '.github/CODEOWNERS' - '.github/FUNDING.yml' From dcdaa7a4b189e30a18c72d812f46b68c8a6e8119 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Mon, 13 Feb 2023 07:13:38 -0800 Subject: [PATCH 049/231] Fix parallel warnings in H5Dmpio.c (#2457) * Mark a parameter as unused when not using a special debug define * Check for a chunk_entry NULL pointer after using HASH_FIND These should be the last parallel warnings so we can start building parallel with -Werror. --- src/H5Dmpio.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c index 1e66f80e2e2..8e413c14bad 100644 --- a/src/H5Dmpio.c +++ b/src/H5Dmpio.c @@ -1404,7 +1404,11 @@ H5D__collective_write(H5D_io_info_t *io_info) *------------------------------------------------------------------------- */ static herr_t +#ifdef H5Dmpio_DEBUG H5D__link_piece_collective_io(H5D_io_info_t *io_info, int mpi_rank) +#else +H5D__link_piece_collective_io(H5D_io_info_t *io_info, int H5_ATTR_UNUSED mpi_rank) +#endif { MPI_Datatype chunk_final_mtype; /* Final memory MPI datatype for all chunks with selection */ hbool_t chunk_final_mtype_is_derived = FALSE; @@ -4697,8 +4701,10 @@ H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_io_info_t *ch /* Find the chunk entry according to its chunk index */ HASH_FIND(hh, chunk_hash_table, &chunk_idx, sizeof(hsize_t), chunk_entry); - HDassert(chunk_entry); - HDassert(mpi_rank == chunk_entry->new_owner); + if (chunk_entry == NULL) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFIND, FAIL, "unable to find chunk entry") + if (mpi_rank != chunk_entry->new_owner) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "chunk owner set to incorrect MPI rank") /* * Only process the chunk if its data buffer is allocated. From ea2a5ed79f2c764435a8c7b881d8b8129ad79efd Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Mon, 13 Feb 2023 08:10:47 -0800 Subject: [PATCH 050/231] Fix a possible uninitialized variable in pio_perf (#2461) --- tools/src/h5perf/pio_perf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/src/h5perf/pio_perf.c b/tools/src/h5perf/pio_perf.c index 1460772d272..26e90f8a5b6 100644 --- a/tools/src/h5perf/pio_perf.c +++ b/tools/src/h5perf/pio_perf.c @@ -311,7 +311,7 @@ run_test_loop(struct options *opts) { parameters parms; int num_procs; - int doing_pio; /* if this process is doing PIO */ + int doing_pio = 0; /* if this process is doing parallel IO */ parms.num_files = opts->num_files; parms.num_dsets = opts->num_dsets; From 7375e6c928ef4f9584375e00620a4a5adb14507b Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Mon, 13 Feb 2023 10:36:17 -0800 Subject: [PATCH 051/231] Fix uninitialized variable in pio_engine.c (#2463) --- tools/src/h5perf/pio_engine.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/tools/src/h5perf/pio_engine.c b/tools/src/h5perf/pio_engine.c index 26968e330a6..5c994436c65 100644 --- a/tools/src/h5perf/pio_engine.c +++ b/tools/src/h5perf/pio_engine.c @@ -505,18 +505,18 @@ do_write(results *res, file_descr *fd, parameters *parms, long ndsets, off_t nby void *buffer) { int ret_code = SUCCESS; - int rc; /*routine return code */ + int rc; /* Return code */ long ndset; size_t blk_size; /* The block size to subdivide the xfer buffer into */ off_t nbytes_xfer; /* Total number of bytes transferred so far */ size_t nbytes_xfer_advance; /* Number of bytes transferred in a single I/O operation */ size_t nbytes_toxfer; /* Number of bytes to transfer a particular time */ char dname[64]; - off_t dset_offset = 0; /*dataset offset in a file */ - off_t bytes_begin[2]; /*first elmt this process transfer */ - off_t bytes_count; /*number of elmts this process transfer */ - off_t snbytes = 0; /*size of a side of the dataset square */ - unsigned char *buf_p; /* Current buffer pointer */ + off_t dset_offset = 0; /* Dataset offset in a file */ + off_t bytes_begin[2] = {0, 0}; /* First elmt this process transfer */ + off_t bytes_count; /* Number of elmts this process transfer */ + off_t snbytes = 0; /* Size of a side of the dataset square */ + unsigned char *buf_p; /* Current buffer pointer */ /* POSIX variables */ off_t file_offset; /* File offset of the next transfer */ @@ -1530,11 +1530,11 @@ do_read(results *res, file_descr *fd, parameters *parms, long ndsets, off_t nbyt size_t nbytes_xfer_advance; /* Number of bytes transferred in a single I/O operation */ size_t nbytes_toxfer; /* Number of bytes to transfer a particular time */ char dname[64]; - off_t dset_offset = 0; /*dataset offset in a file */ - off_t bytes_begin[2]; /*first elmt this process transfer */ - off_t bytes_count; /*number of elmts this process transfer */ - off_t snbytes = 0; /*size of a side of the dataset square */ - unsigned char *buf_p; /* Current buffer pointer */ + off_t dset_offset = 0; /* Dataset offset in a file */ + off_t bytes_begin[2] = {0, 0}; /* First elmt this process transfer */ + off_t bytes_count; /* Number of elmts this process transfer */ + off_t snbytes = 0; /* Size of a side of the dataset square */ + unsigned char *buf_p; /* Current buffer pointer */ /* POSIX variables */ off_t file_offset; /* File offset of the next transfer */ From 992146c12b00578529e2e21465866d3d07409d3a Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Mon, 13 Feb 2023 11:57:43 -0800 Subject: [PATCH 052/231] Add parallel Autotools -Werror checks (#2384) New actions added to GitHub CI to build parallel HDF5 w/ CFLAGS=-Werror. Since the GitHub runners are so underpowered, we just configure, build, and install but do not run tests. This covers Autotools only, both debug and release. --- .github/workflows/main.yml | 46 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index de33669d33c..f827ed96fe6 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -345,6 +345,52 @@ jobs: cmake: "Release" autotools: "production" + # Parallel Debug -Werror + - name: "Ubuntu gcc Autotools parallel -Werror (build only)" + os: ubuntu-latest + cpp: disable + fortran: disable + java: disable + parallel: enable + mirror_vfd: disable + direct_vfd: enable + deprec_sym: enable + default_api: v114 + toolchain: "" + generator: "autogen" + flags: "CFLAGS=-Werror" + run_tests: false + thread_safety: + enabled: false + text: "" + build_mode: + text: " DBG" + cmake: "Debug" + autotools: "debug" + + # Parallel production/release -Werror + - name: "Ubuntu gcc Autotools parallel -Werror (build only)" + os: ubuntu-latest + cpp: disable + fortran: disable + java: disable + parallel: enable + mirror_vfd: disable + direct_vfd: enable + deprec_sym: enable + default_api: v114 + toolchain: "" + generator: "autogen" + flags: "CFLAGS=-Werror" + run_tests: false + thread_safety: + enabled: false + text: "" + build_mode: + text: " REL" + cmake: "Release" + autotools: "production" + # Sets the job's name from the properties name: "${{ matrix.name }}${{ matrix.build_mode.text }}${{ matrix.thread_safety.text }}" From 7a9a948ea350b2e3194cbb1ebe1b0fd7fba54104 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Wed, 15 Feb 2023 11:11:42 -0600 Subject: [PATCH 053/231] Add build options for CMake packaging #2347 (#2462) --- CMakeInstallation.cmake | 54 +++++++++++++++++++++++------------------ 1 file changed, 30 insertions(+), 24 deletions(-) diff --git a/CMakeInstallation.cmake b/CMakeInstallation.cmake index f643afcfb04..d13f8bb781a 100644 --- a/CMakeInstallation.cmake +++ b/CMakeInstallation.cmake @@ -395,24 +395,29 @@ if (NOT HDF5_EXTERNALLY_CONFIGURED AND NOT HDF5_NO_PACKAGES) set (CPACK_PACKAGING_INSTALL_PREFIX "/${CPACK_PACKAGE_INSTALL_DIRECTORY}") set (CPACK_COMPONENTS_ALL_IN_ONE_PACKAGE ON) - list (APPEND CPACK_GENERATOR "DEB") - set (CPACK_DEBIAN_PACKAGE_SECTION "Libraries") - set (CPACK_DEBIAN_PACKAGE_MAINTAINER "${HDF5_PACKAGE_BUGREPORT}") - - list (APPEND CPACK_GENERATOR "RPM") - set (CPACK_RPM_PACKAGE_RELEASE "1") - set (CPACK_RPM_PACKAGE_RELEASE_DIST ON) - set (CPACK_RPM_COMPONENT_INSTALL ON) - set (CPACK_RPM_PACKAGE_RELOCATABLE ON) - set (CPACK_RPM_FILE_NAME "RPM-DEFAULT") - set (CPACK_RPM_PACKAGE_NAME "${CPACK_PACKAGE_NAME}") - set (CPACK_RPM_PACKAGE_VERSION "${CPACK_PACKAGE_VERSION}") - set (CPACK_RPM_PACKAGE_VENDOR "${CPACK_PACKAGE_VENDOR}") - set (CPACK_RPM_PACKAGE_LICENSE "BSD-style") - set (CPACK_RPM_PACKAGE_GROUP "Development/Libraries") - set (CPACK_RPM_PACKAGE_URL "${HDF5_PACKAGE_URL}") - set (CPACK_RPM_PACKAGE_SUMMARY "HDF5 is a unique technology suite that makes possible the management of extremely large and complex data collections.") - set (CPACK_RPM_PACKAGE_DESCRIPTION + find_program (DPKGSHLIB_EXE dpkg-shlibdeps) + if (DPKGSHLIB_EXE) + list (APPEND CPACK_GENERATOR "DEB") + set (CPACK_DEBIAN_PACKAGE_SECTION "Libraries") + set (CPACK_DEBIAN_PACKAGE_MAINTAINER "${HDF5_PACKAGE_BUGREPORT}") + endif () + + find_program (RPMBUILD_EXE rpmbuild) + if (RPMBUILD_EXE) + list (APPEND CPACK_GENERATOR "RPM") + set (CPACK_RPM_PACKAGE_RELEASE "1") + set (CPACK_RPM_PACKAGE_RELEASE_DIST ON) + set (CPACK_RPM_COMPONENT_INSTALL ON) + set (CPACK_RPM_PACKAGE_RELOCATABLE ON) + set (CPACK_RPM_FILE_NAME "RPM-DEFAULT") + set (CPACK_RPM_PACKAGE_NAME "${CPACK_PACKAGE_NAME}") + set (CPACK_RPM_PACKAGE_VERSION "${CPACK_PACKAGE_VERSION}") + set (CPACK_RPM_PACKAGE_VENDOR "${CPACK_PACKAGE_VENDOR}") + set (CPACK_RPM_PACKAGE_LICENSE "BSD-style") + set (CPACK_RPM_PACKAGE_GROUP "Development/Libraries") + set (CPACK_RPM_PACKAGE_URL "${HDF5_PACKAGE_URL}") + set (CPACK_RPM_PACKAGE_SUMMARY "HDF5 is a unique technology suite that makes possible the management of extremely large and complex data collections.") + set (CPACK_RPM_PACKAGE_DESCRIPTION "The HDF5 technology suite includes: * A versatile data model that can represent very complex data objects and a wide variety of metadata. @@ -427,13 +432,14 @@ if (NOT HDF5_EXTERNALLY_CONFIGURED AND NOT HDF5_NO_PACKAGES) The HDF5 data model, file format, API, library, and tools are open and distributed without charge. " - ) + ) - #----------------------------------------------------------------------------- - # Configure the spec file for the install RPM - #----------------------------------------------------------------------------- -# configure_file ("${HDF5_RESOURCES_DIR}/hdf5.spec.in" "${CMAKE_CURRENT_BINARY_DIR}/${HDF5_PACKAGE_NAME}.spec" @ONLY IMMEDIATE) -# set (CPACK_RPM_USER_BINARY_SPECFILE "${CMAKE_CURRENT_BINARY_DIR}/${HDF5_PACKAGE_NAME}.spec") + #----------------------------------------------------------------------------- + # Configure the spec file for the install RPM + #----------------------------------------------------------------------------- +# configure_file ("${HDF5_RESOURCES_DIR}/hdf5.spec.in" "${CMAKE_CURRENT_BINARY_DIR}/${HDF5_PACKAGE_NAME}.spec" @ONLY IMMEDIATE) +# set (CPACK_RPM_USER_BINARY_SPECFILE "${CMAKE_CURRENT_BINARY_DIR}/${HDF5_PACKAGE_NAME}.spec") + endif () endif () # By default, do not warn when built on machines using only VS Express: From a136ce8539db84e8dada2e95b344e137a811c2cb Mon Sep 17 00:00:00 2001 From: raylu-hdf <60487644+raylu-hdf@users.noreply.github.com> Date: Fri, 17 Feb 2023 16:15:48 -0600 Subject: [PATCH 054/231] GitHub #2417: to avoid the pass-through VOL failing in unexpected places, make sure the underneath VOL ID is specified. (#2475) * GitHub #2417: to avoid the pass-through VOL failing in unexpected places, make sure the underneath VOL ID is specified. * Committing clang-format changes --------- Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> --- src/H5VLpassthru.c | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/src/H5VLpassthru.c b/src/H5VLpassthru.c index d7e730af061..4ceac7ad5d3 100644 --- a/src/H5VLpassthru.c +++ b/src/H5VLpassthru.c @@ -388,6 +388,7 @@ H5VL_pass_through_new_obj(void *under_obj, hid_t under_vol_id) new_obj = (H5VL_pass_through_t *)calloc(1, sizeof(H5VL_pass_through_t)); new_obj->under_object = under_obj; new_obj->under_vol_id = under_vol_id; + H5Iinc_ref(new_obj->under_vol_id); return new_obj; @@ -520,12 +521,27 @@ H5VL_pass_through_info_copy(const void *_info) printf("------- PASS THROUGH VOL INFO Copy\n"); #endif + /* Make sure the underneath VOL of this pass-through VOL is specified */ + if (!info) { + printf("\nH5VLpassthru.c line %d in %s: info for pass-through VOL can't be null\n", __LINE__, + __func__); + return NULL; + } + + if (H5Iis_valid(info->under_vol_id) <= 0) { + printf("\nH5VLpassthru.c line %d in %s: not a valid underneath VOL ID for pass-through VOL\n", + __LINE__, __func__); + return NULL; + } + /* Allocate new VOL info struct for the pass through connector */ new_info = (H5VL_pass_through_info_t *)calloc(1, sizeof(H5VL_pass_through_info_t)); /* Increment reference count on underlying VOL ID, and copy the VOL info */ new_info->under_vol_id = info->under_vol_id; + H5Iinc_ref(new_info->under_vol_id); + if (info->under_vol_info) H5VLcopy_connector_info(new_info->under_vol_id, &(new_info->under_vol_info), info->under_vol_info); @@ -753,7 +769,9 @@ H5VL_pass_through_get_wrap_ctx(const void *obj, void **wrap_ctx) /* Increment reference count on underlying VOL ID, and copy the VOL info */ new_wrap_ctx->under_vol_id = o->under_vol_id; + H5Iinc_ref(new_wrap_ctx->under_vol_id); + H5VLget_wrap_ctx(o->under_object, o->under_vol_id, &new_wrap_ctx->under_wrap_ctx); /* Set wrap context to return */ @@ -2605,6 +2623,19 @@ H5VL_pass_through_introspect_get_cap_flags(const void *_info, uint64_t *cap_flag printf("------- PASS THROUGH VOL INTROSPECT GetCapFlags\n"); #endif + /* Make sure the underneath VOL of this pass-through VOL is specified */ + if (!info) { + printf("\nH5VLpassthru.c line %d in %s: info for pass-through VOL can't be null\n", __LINE__, + __func__); + return -1; + } + + if (H5Iis_valid(info->under_vol_id) <= 0) { + printf("\nH5VLpassthru.c line %d in %s: not a valid underneath VOL ID for pass-through VOL\n", + __LINE__, __func__); + return -1; + } + /* Invoke the query on the underlying VOL connector */ ret_value = H5VLintrospect_get_cap_flags(info->under_vol_info, info->under_vol_id, cap_flags); From 8ccd0acee7f10d24fb5b0432b0cce705cf567e83 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Fri, 17 Feb 2023 16:17:17 -0600 Subject: [PATCH 055/231] CMake generated pkg-config file is incorrect #2259 (#2476) * CMake generated pkg-config file is incorrect #2259 * Fix fortran pc template * hdf5.pc is incorrect for debug builds #1546 * Correct pkg name and lib name * Fix typo * Fix missing space --- c++/src/CMakeLists.txt | 8 ++++++-- config/libhdf5.fpc.in | 2 +- config/libhdf5.pc.in | 2 +- fortran/src/CMakeLists.txt | 8 ++++++-- hl/c++/src/CMakeLists.txt | 8 ++++++-- hl/fortran/src/CMakeLists.txt | 8 ++++++-- hl/src/CMakeLists.txt | 8 ++++++-- release_docs/RELEASE.txt | 13 +++++++++++++ src/CMakeLists.txt | 18 ++++++++++++++---- 9 files changed, 59 insertions(+), 16 deletions(-) diff --git a/c++/src/CMakeLists.txt b/c++/src/CMakeLists.txt index 490f3e0f706..23c6f01fb2a 100644 --- a/c++/src/CMakeLists.txt +++ b/c++/src/CMakeLists.txt @@ -177,14 +177,18 @@ set (_PKG_CONFIG_LIBDIR \${exec_prefix}/${HDF5_INSTALL_LIB_DIR}) set (_PKG_CONFIG_INCLUDEDIR \${prefix}/${HDF5_INSTALL_INCLUDE_DIR}) set (_PKG_CONFIG_LIBNAME "${HDF5_CPP_LIB_CORENAME}") set (_PKG_CONFIG_VERSION "${HDF5_PACKAGE_VERSION}") +set (PKG_CONFIG_LIBNAME "${HDF5_CPP_LIB_CORENAME}") +if (${HDF_CFG_NAME} MATCHES "Debug" OR ${HDF_CFG_NAME} MATCHES "Developer") + set (PKG_CONFIG_LIBNAME "${PKG_CONFIG_LIBNAME}${CMAKE_DEBUG_POSTFIX}") +endif () set (_PKG_CONFIG_LIBS_PRIVATE) if (NOT ONLY_SHARED_LIBS) - set (_PKG_CONFIG_LIBS "${_PKG_CONFIG_LIBS} -l${HDF5_CPP_LIB_CORENAME}") + set (_PKG_CONFIG_LIBS "${_PKG_CONFIG_LIBS} -l${PKG_CONFIG_LIBNAME}") endif () if (BUILD_SHARED_LIBS) - set (_PKG_CONFIG_SH_LIBS "${_PKG_CONFIG_SH_LIBS} -l${HDF5_CPP_LIB_CORENAME}") + set (_PKG_CONFIG_SH_LIBS "${_PKG_CONFIG_SH_LIBS} -l${PKG_CONFIG_LIBNAME}") endif () set (_PKG_CONFIG_REQUIRES "${HDF5_LIB_CORENAME} = ${HDF5_PACKAGE_VERSION}") diff --git a/config/libhdf5.fpc.in b/config/libhdf5.fpc.in index c28c2f9c62a..d4ac61508fd 100644 --- a/config/libhdf5.fpc.in +++ b/config/libhdf5.fpc.in @@ -12,5 +12,5 @@ Cflags: -I${includedir} Fflags: -I${moddir} Libs: -L${libdir} @_PKG_CONFIG_SH_LIBS@ Requires: @_PKG_CONFIG_REQUIRES@ -Libs.private: @_PKG_CONFIG_LIBS_PRIVATE@ @_PKG_CONFIG_LIBS@ +Libs.private: @_PKG_CONFIG_LIBS@ @_PKG_CONFIG_LIBS_PRIVATE@ Requires.private: @_PKG_CONFIG_REQUIRES_PRIVATE@ diff --git a/config/libhdf5.pc.in b/config/libhdf5.pc.in index 4a2ebaab474..3cb42d280e9 100644 --- a/config/libhdf5.pc.in +++ b/config/libhdf5.pc.in @@ -10,5 +10,5 @@ Version: @_PKG_CONFIG_VERSION@ Cflags: -I${includedir} Libs: -L${libdir} @_PKG_CONFIG_SH_LIBS@ Requires: @_PKG_CONFIG_REQUIRES@ -Libs.private: @_PKG_CONFIG_LIBS_PRIVATE@ @_PKG_CONFIG_LIBS@ +Libs.private: @_PKG_CONFIG_LIBS@ @_PKG_CONFIG_LIBS_PRIVATE@ Requires.private: @_PKG_CONFIG_REQUIRES_PRIVATE@ diff --git a/fortran/src/CMakeLists.txt b/fortran/src/CMakeLists.txt index df246fc6024..35b47a53c18 100644 --- a/fortran/src/CMakeLists.txt +++ b/fortran/src/CMakeLists.txt @@ -545,14 +545,18 @@ set (_PKG_CONFIG_INCLUDEDIR \${prefix}/${HDF5_INSTALL_INCLUDE_DIR}) set (_PKG_CONFIG_MODULEDIR \${prefix}/${HDF5_INSTALL_MODULE_DIR}) set (_PKG_CONFIG_LIBNAME "${HDF5_F90_LIB_CORENAME}") set (_PKG_CONFIG_VERSION "${HDF5_PACKAGE_VERSION}") +set (PKG_CONFIG_LIBNAME "${HDF5_F90_LIB_CORENAME}") +if (${HDF_CFG_NAME} MATCHES "Debug" OR ${HDF_CFG_NAME} MATCHES "Developer") + set (PKG_CONFIG_LIBNAME "${PKG_CONFIG_LIBNAME}${CMAKE_DEBUG_POSTFIX}") +endif () set (_PKG_CONFIG_LIBS_PRIVATE) if (NOT ONLY_SHARED_LIBS) - set (_PKG_CONFIG_LIBS "${_PKG_CONFIG_LIBS} -l${HDF5_F90_LIB_CORENAME}") + set (_PKG_CONFIG_LIBS "${_PKG_CONFIG_LIBS} -l${PKG_CONFIG_LIBNAME}") endif () if (BUILD_SHARED_LIBS) - set (_PKG_CONFIG_SH_LIBS "${_PKG_CONFIG_SH_LIBS} -l${HDF5_F90_LIB_CORENAME}") + set (_PKG_CONFIG_SH_LIBS "${_PKG_CONFIG_SH_LIBS} -l${PKG_CONFIG_LIBNAME}") endif () set (_PKG_CONFIG_REQUIRES "${HDF5_LIB_CORENAME} = ${HDF5_PACKAGE_VERSION}") diff --git a/hl/c++/src/CMakeLists.txt b/hl/c++/src/CMakeLists.txt index 3949624a989..5e1db579a94 100644 --- a/hl/c++/src/CMakeLists.txt +++ b/hl/c++/src/CMakeLists.txt @@ -95,14 +95,18 @@ set (_PKG_CONFIG_LIBDIR \${exec_prefix}/${HDF5_INSTALL_LIB_DIR}) set (_PKG_CONFIG_INCLUDEDIR \${prefix}/${HDF5_INSTALL_INCLUDE_DIR}) set (_PKG_CONFIG_LIBNAME "${HDF5_HL_CPP_LIB_CORENAME}") set (_PKG_CONFIG_VERSION "${HDF5_PACKAGE_VERSION}") +set (PKG_CONFIG_LIBNAME "${HDF5_HL_CPP_LIB_CORENAME}") +if (${HDF_CFG_NAME} MATCHES "Debug" OR ${HDF_CFG_NAME} MATCHES "Developer") + set (PKG_CONFIG_LIBNAME "${PKG_CONFIG_LIBNAME}${CMAKE_DEBUG_POSTFIX}") +endif () set (_PKG_CONFIG_LIBS_PRIVATE) if (NOT ONLY_SHARED_LIBS) - set (_PKG_CONFIG_LIBS "${_PKG_CONFIG_LIBS} -l${HDF5_HL_CPP_LIB_CORENAME}") + set (_PKG_CONFIG_LIBS "${_PKG_CONFIG_LIBS} -l${PKG_CONFIG_LIBNAME}") endif () if (BUILD_SHARED_LIBS) - set (_PKG_CONFIG_SH_LIBS "${_PKG_CONFIG_SH_LIBS} -l${HDF5_HL_CPP_LIB_CORENAME}") + set (_PKG_CONFIG_SH_LIBS "${_PKG_CONFIG_SH_LIBS} -l${PKG_CONFIG_LIBNAME}") endif () set (_PKG_CONFIG_REQUIRES "${HDF5_HL_LIB_CORENAME} = ${HDF5_PACKAGE_VERSION}") diff --git a/hl/fortran/src/CMakeLists.txt b/hl/fortran/src/CMakeLists.txt index 3eae8100735..e999751f5e2 100644 --- a/hl/fortran/src/CMakeLists.txt +++ b/hl/fortran/src/CMakeLists.txt @@ -329,14 +329,18 @@ set (_PKG_CONFIG_INCLUDEDIR \${prefix}/${HDF5_INSTALL_INCLUDE_DIR}) set (_PKG_CONFIG_MODULEDIR \${prefix}/${HDF5_INSTALL_MODULE_DIR}) set (_PKG_CONFIG_LIBNAME "${HDF5_HL_F90_LIB_CORENAME}") set (_PKG_CONFIG_VERSION "${HDF5_PACKAGE_VERSION}") +set (PKG_CONFIG_LIBNAME "${HDF5_HL_F90_LIB_CORENAME}") +if (${HDF_CFG_NAME} MATCHES "Debug" OR ${HDF_CFG_NAME} MATCHES "Developer") + set (PKG_CONFIG_LIBNAME "${PKG_CONFIG_LIBNAME}${CMAKE_DEBUG_POSTFIX}") +endif () set (_PKG_CONFIG_LIBS_PRIVATE) if (NOT ONLY_SHARED_LIBS) - set (_PKG_CONFIG_LIBS "${_PKG_CONFIG_LIBS} -l${HDF5_HL_F90_LIB_CORENAME}") + set (_PKG_CONFIG_LIBS "${_PKG_CONFIG_LIBS} -l${PKG_CONFIG_LIBNAME}") endif () if (BUILD_SHARED_LIBS) - set (_PKG_CONFIG_SH_LIBS "${_PKG_CONFIG_SH_LIBS} -l${HDF5_HL_F90_LIB_CORENAME}") + set (_PKG_CONFIG_SH_LIBS "${_PKG_CONFIG_SH_LIBS} -l${PKG_CONFIG_LIBNAME}") endif () set (_PKG_CONFIG_REQUIRES "${HDF5_F90_LIB_CORENAME} = ${HDF5_PACKAGE_VERSION}") diff --git a/hl/src/CMakeLists.txt b/hl/src/CMakeLists.txt index 837af4812f2..2155f930674 100644 --- a/hl/src/CMakeLists.txt +++ b/hl/src/CMakeLists.txt @@ -127,14 +127,18 @@ set (_PKG_CONFIG_LIBDIR \${exec_prefix}/${HDF5_INSTALL_LIB_DIR}) set (_PKG_CONFIG_INCLUDEDIR \${prefix}/${HDF5_INSTALL_INCLUDE_DIR}) set (_PKG_CONFIG_LIBNAME "${HDF5_HL_LIB_CORENAME}") set (_PKG_CONFIG_VERSION "${HDF5_PACKAGE_VERSION}") +set (PKG_CONFIG_LIBNAME "${HDF5_HL_LIB_CORENAME}") +if (${HDF_CFG_NAME} MATCHES "Debug" OR ${HDF_CFG_NAME} MATCHES "Developer") + set (PKG_CONFIG_LIBNAME "${PKG_CONFIG_LIBNAME}${CMAKE_DEBUG_POSTFIX}") +endif () set (_PKG_CONFIG_LIBS_PRIVATE) if (NOT ONLY_SHARED_LIBS) - set (_PKG_CONFIG_LIBS "${_PKG_CONFIG_LIBS} -l${HDF5_HL_LIB_CORENAME}") + set (_PKG_CONFIG_LIBS "${_PKG_CONFIG_LIBS} -l${PKG_CONFIG_LIBNAME}") endif () if (BUILD_SHARED_LIBS) - set (_PKG_CONFIG_SH_LIBS "${_PKG_CONFIG_SH_LIBS} -l${HDF5_HL_LIB_CORENAME}") + set (_PKG_CONFIG_SH_LIBS "${_PKG_CONFIG_SH_LIBS} -l${PKG_CONFIG_LIBNAME}") endif () set (_PKG_CONFIG_REQUIRES "${HDF5_LIB_CORENAME} = ${HDF5_PACKAGE_VERSION}") diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index baf85fbaa40..d229a423c17 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -337,6 +337,19 @@ Bug Fixes since HDF5-1.13.3 release Configuration ------------- + - Correct the CMake generated pkg-config file + + The pkg-config file generated by CMake had the order and placement of the + libraries wrong. Also added support for debug library names. + + Changed the order of Libs.private libraries so that dependencies come after + dependents. Did not move the compression libraries into Requires.private + because there was not a way to determine if the compression libraries had + supported pkconfig files. Still recommend that the CMake config file method + be used for building projects with CMake. + + (ADB - 2023/02/16 GH-1546,GH-2259) + - Remove Javadoc generation The use of doxygen now supersedes the requirement to build javadocs. We do not diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 355881d72f3..585367210e9 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1359,20 +1359,30 @@ set (_PKG_CONFIG_LIBDIR \${exec_prefix}/${HDF5_INSTALL_LIB_DIR}) set (_PKG_CONFIG_INCLUDEDIR \${prefix}/${HDF5_INSTALL_INCLUDE_DIR}) set (_PKG_CONFIG_LIBNAME "${HDF5_LIB_CORENAME}") set (_PKG_CONFIG_VERSION "${HDF5_PACKAGE_VERSION}") +set (PKGCONFIG_LIBNAME "${HDF5_LIB_CORENAME}") +if (${HDF_CFG_NAME} MATCHES "Debug" OR ${HDF_CFG_NAME} MATCHES "Developer") + set (PKGCONFIG_LIBNAME "${PKGCONFIG_LIBNAME}${CMAKE_DEBUG_POSTFIX}") +endif () + +foreach (libs ${LINK_LIBS}) + set (_PKG_CONFIG_LIBS_PRIVATE "${_PKG_CONFIG_LIBS_PRIVATE} -l${libs}") +endforeach () -foreach (libs ${LINK_LIBS} ${LINK_COMP_LIBS}) +# The settings for the compression libs depends on if they have pkconfig support +# Assuming they don't +foreach (libs ${LINK_COMP_LIBS}) +# set (_PKG_CONFIG_REQUIRES_PRIVATE "${_PKG_CONFIG_REQUIRES_PRIVATE} -l${libs}") set (_PKG_CONFIG_LIBS_PRIVATE "${_PKG_CONFIG_LIBS_PRIVATE} -l${libs}") endforeach () if (NOT ONLY_SHARED_LIBS) - set (_PKG_CONFIG_LIBS "${_PKG_CONFIG_LIBS} -l${HDF5_LIB_CORENAME}") + set (_PKG_CONFIG_LIBS "${_PKG_CONFIG_LIBS} -l${PKGCONFIG_LIBNAME}") endif () if (BUILD_SHARED_LIBS) - set (_PKG_CONFIG_SH_LIBS "${_PKG_CONFIG_SH_LIBS} -l${HDF5_LIB_CORENAME}") + set (_PKG_CONFIG_SH_LIBS "${_PKG_CONFIG_SH_LIBS} -l${PKGCONFIG_LIBNAME}") endif () set (_PKG_CONFIG_REQUIRES) -set (_PKG_CONFIG_REQUIRES_PRIVATE) configure_file ( ${HDF_CONFIG_DIR}/libhdf5.pc.in From 9c9d76d33cad744f047892340de30db3ef2232b1 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Fri, 17 Feb 2023 16:37:26 -0600 Subject: [PATCH 056/231] Develop toolchain lowercase names and updates (#2426) * lowercase the toolchain filenames * Change name of option define * Update CI with changed toolchain name * Correct Intel compiler names * Account for Intels name for compiler based on clang * Make icx default and rename classic to icc --- .github/workflows/main.yml | 2 +- config/cmake/HDFCXXCompilerFlags.cmake | 6 +++--- config/cmake/HDFCompilerFlags.cmake | 6 +++--- config/sanitizer/code-coverage.cmake | 12 ++++++------ config/sanitizer/sanitizers.cmake | 2 +- config/toolchain/ icc.cmake | 11 +++++++++++ config/toolchain/{GCC.cmake => gcc.cmake} | 0 config/toolchain/intel.cmake | 13 +++---------- config/toolchain/{PGI.cmake => pgi.cmake} | 0 9 files changed, 28 insertions(+), 24 deletions(-) create mode 100644 config/toolchain/ icc.cmake rename config/toolchain/{GCC.cmake => gcc.cmake} (100%) rename config/toolchain/{PGI.cmake => pgi.cmake} (100%) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index f827ed96fe6..0b09a947e10 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -91,7 +91,7 @@ jobs: parallel: OFF mirror_vfd: ON direct_vfd: ON - toolchain: "config/toolchain/GCC.cmake" + toolchain: "config/toolchain/gcc.cmake" generator: "-G Ninja" run_tests: true diff --git a/config/cmake/HDFCXXCompilerFlags.cmake b/config/cmake/HDFCXXCompilerFlags.cmake index b1e4b3c8012..e8a55ba779b 100644 --- a/config/cmake/HDFCXXCompilerFlags.cmake +++ b/config/cmake/HDFCXXCompilerFlags.cmake @@ -133,7 +133,7 @@ else () ADD_H5_FLAGS (HDF5_CMAKE_CXX_FLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/cxx-general") ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/cxx-error-general") endif () - elseif (CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") + elseif (CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") ADD_H5_FLAGS (HDF5_CMAKE_CXX_FLAGS "${HDF5_SOURCE_DIR}/config/clang-warnings/general") elseif (CMAKE_CXX_COMPILER_ID STREQUAL "PGI") list (APPEND HDF5_CMAKE_CXX_FLAGS "-Minform=inform") @@ -152,14 +152,14 @@ if (HDF5_ENABLE_DEV_WARNINGS) elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") # Use the C warnings as CXX warnings are the same ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/developer-general") - elseif (CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") + elseif (CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/clang-warnings/developer-general") endif () else () if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") # Use the C warnings as CXX warnings are the same ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/no-developer-general") - elseif (CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") + elseif (CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/clang-warnings/no-developer-general") endif () endif () diff --git a/config/cmake/HDFCompilerFlags.cmake b/config/cmake/HDFCompilerFlags.cmake index 504c163f44d..7a9a62ebbdb 100644 --- a/config/cmake/HDFCompilerFlags.cmake +++ b/config/cmake/HDFCompilerFlags.cmake @@ -155,7 +155,7 @@ else () # gcc automatically inlines based on the optimization level # this is just a failsafe list (APPEND H5_CFLAGS "-finline-functions") - elseif (CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") + elseif (CMAKE_C_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") ADD_H5_FLAGS (HDF5_CMAKE_C_FLAGS "${HDF5_SOURCE_DIR}/config/clang-warnings/general") ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/clang-warnings/error-general") elseif (CMAKE_C_COMPILER_ID STREQUAL "PGI") @@ -183,13 +183,13 @@ if (HDF5_ENABLE_DEV_WARNINGS) endif () elseif (CMAKE_C_COMPILER_ID STREQUAL "GNU" AND CMAKE_C_COMPILER_VERSION VERSION_GREATER_EQUAL 4.8) ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/developer-general") - elseif (CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") + elseif (CMAKE_C_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/clang-warnings/developer-general") endif () else () if (CMAKE_C_COMPILER_ID STREQUAL "GNU" AND CMAKE_C_COMPILER_VERSION VERSION_GREATER_EQUAL 4.8) ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/no-developer-general") - elseif (CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") + elseif (CMAKE_C_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/clang-warnings/no-developer-general") endif () endif () diff --git a/config/sanitizer/code-coverage.cmake b/config/sanitizer/code-coverage.cmake index 3a99024cf84..4a927af5ee8 100644 --- a/config/sanitizer/code-coverage.cmake +++ b/config/sanitizer/code-coverage.cmake @@ -106,7 +106,7 @@ if(CODE_COVERAGE AND NOT CODE_COVERAGE_ADDED) ${CMAKE_COVERAGE_OUTPUT_DIRECTORY} DEPENDS ccov-clean) - if(CMAKE_C_COMPILER_ID MATCHES "[Cc]lang" OR CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") + if(CMAKE_C_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_C_COMPILER_ID MATCHES "[Cc]lang" OR CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") # Messages message(STATUS "Building with llvm Code Coverage Tools") @@ -212,7 +212,7 @@ function(target_code_coverage TARGET_NAME) if(CODE_COVERAGE) # Add code coverage instrumentation to the target's linker command - if(CMAKE_C_COMPILER_ID MATCHES "[Cc]lang" OR CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") + if(CMAKE_C_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_C_COMPILER_ID MATCHES "[Cc]lang" OR CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") target_compile_options(${TARGET_NAME} PRIVATE -fprofile-instr-generate -fcoverage-mapping --coverage) set_property( @@ -234,7 +234,7 @@ function(target_code_coverage TARGET_NAME) # Add shared library to processing for 'all' targets if(target_type STREQUAL "SHARED_LIBRARY" AND target_code_coverage_ALL) - if(CMAKE_C_COMPILER_ID MATCHES "[Cc]lang" OR CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") + if(CMAKE_C_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_C_COMPILER_ID MATCHES "[Cc]lang" OR CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") add_custom_target( ccov-run-${TARGET_NAME} COMMAND echo "-object=$" >> @@ -254,7 +254,7 @@ function(target_code_coverage TARGET_NAME) # For executables add targets to run and produce output if(target_type STREQUAL "EXECUTABLE") - if(CMAKE_C_COMPILER_ID MATCHES "[Cc]lang" OR CMAKE_CXX_COMPILER_ID MATCHES "(Apple)?Cc]lang") + if(CMAKE_C_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_C_COMPILER_ID MATCHES "[Cc]lang" OR CMAKE_CXX_COMPILER_ID MATCHES "(Apple)?Cc]lang") # If there are shared objects to also work with, generate the string to # add them here @@ -412,7 +412,7 @@ endfunction() # any subdirectories. To add coverage instrumentation to only specific targets, # use `target_code_coverage`. function(add_code_coverage) - if(CMAKE_C_COMPILER_ID MATCHES "[Cc]lang" OR CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") + if(CMAKE_C_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_C_COMPILER_ID MATCHES "[Cc]lang" OR CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") add_compile_options(-fprofile-instr-generate -fcoverage-mapping --coverage) add_link_options(-fprofile-instr-generate -fcoverage-mapping --coverage) elseif(CMAKE_C_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "GNU") @@ -437,7 +437,7 @@ function(add_code_coverage_all_targets) "${multi_value_keywords}" ${ARGN}) if(CODE_COVERAGE) - if(CMAKE_C_COMPILER_ID MATCHES "[Cc]lang" OR CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") + if(CMAKE_C_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_C_COMPILER_ID MATCHES "[Cc]lang" OR CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") # Merge the profile data for all of the run executables add_custom_target( diff --git a/config/sanitizer/sanitizers.cmake b/config/sanitizer/sanitizers.cmake index b06992fa600..4ba043bac08 100644 --- a/config/sanitizer/sanitizers.cmake +++ b/config/sanitizer/sanitizers.cmake @@ -30,7 +30,7 @@ endfunction() message(STATUS "USE_SANITIZER=${USE_SANITIZER}, CMAKE_C_COMPILER_ID=${CMAKE_C_COMPILER_ID}") if(USE_SANITIZER) - if(INTEL_CLANG OR CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") + if(CMAKE_C_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") set(CMAKE_EXPORT_COMPILE_COMMANDS ON) if(UNIX) diff --git a/config/toolchain/ icc.cmake b/config/toolchain/ icc.cmake new file mode 100644 index 00000000000..97f6a64985f --- /dev/null +++ b/config/toolchain/ icc.cmake @@ -0,0 +1,11 @@ +# Uncomment the following to use cross-compiling +#set(CMAKE_SYSTEM_NAME Linux) + +set(CMAKE_COMPILER_VENDOR "intel") + +set(CMAKE_C_COMPILER icc) +set(CMAKE_CXX_COMPILER icpc) +set(CMAKE_Fortran_COMPILER ifort) + +# the following is used if cross-compiling +set(CMAKE_CROSSCOMPILING_EMULATOR "") diff --git a/config/toolchain/GCC.cmake b/config/toolchain/gcc.cmake similarity index 100% rename from config/toolchain/GCC.cmake rename to config/toolchain/gcc.cmake diff --git a/config/toolchain/intel.cmake b/config/toolchain/intel.cmake index ae1d2f8fa86..f8f60b28b46 100644 --- a/config/toolchain/intel.cmake +++ b/config/toolchain/intel.cmake @@ -3,16 +3,9 @@ set(CMAKE_COMPILER_VENDOR "intel") -if(USE_SANITIZER) - set(CMAKE_C_COMPILER icl) - set(CMAKE_CXX_COMPILER icl++) - set(CMAKE_Fortran_COMPILER ifort) - set(INTEL_CLANG ON) -else () - set(CMAKE_C_COMPILER icc) - set(CMAKE_CXX_COMPILER icpc) - set(CMAKE_Fortran_COMPILER ifort) -endif () +set(CMAKE_C_COMPILER icx) +set(CMAKE_CXX_COMPILER icpx) +set(CMAKE_Fortran_COMPILER ifx) # the following is used if cross-compiling set(CMAKE_CROSSCOMPILING_EMULATOR "") diff --git a/config/toolchain/PGI.cmake b/config/toolchain/pgi.cmake similarity index 100% rename from config/toolchain/PGI.cmake rename to config/toolchain/pgi.cmake From 3d5798bd55e1f3167447f3a9273ff2081e639226 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Mon, 20 Feb 2023 08:49:34 -0600 Subject: [PATCH 057/231] Fix issue with collective metadata writes of global heap data (#2480) --- release_docs/RELEASE.txt | 15 +++++ src/H5Cmpio.c | 8 +++ testpar/CMakeLists.txt | 2 +- testpar/Makefile.am | 2 +- testpar/{t_coll_md_read.c => t_coll_md.c} | 79 ++++++++++++++++++++++- testpar/testphdf5.c | 2 + testpar/testphdf5.h | 1 + 7 files changed, 105 insertions(+), 4 deletions(-) rename testpar/{t_coll_md_read.c => t_coll_md.c} (88%) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index d229a423c17..dfad6c2cc2a 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -207,6 +207,21 @@ Bug Fixes since HDF5-1.13.3 release =================================== Library ------- + - Fixed an issue with collective metadata writes of global heap data + + New test failures in parallel netCDF started occurring with debug + builds of HDF5 due to an assertion failure and this was reported in + GitHub issue #2433. The assertion failure began happening after the + collective metadata write pathway in the library was updated to use + vector I/O so that parallel-enabled HDF5 Virtual File Drivers (other + than the existing MPI I/O VFD) can support collective metadata writes. + + The assertion failure was fixed by updating collective metadata writes + to treat global heap metadata as raw data, as done elsewhere in the + library. + + (JTH - 2023/02/16, GH #2433) + - Seg fault on file close h5debug fails at file close with core dump on a file that has an diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c index 6af346c4b07..cfd078019bb 100644 --- a/src/H5Cmpio.c +++ b/src/H5Cmpio.c @@ -1003,6 +1003,10 @@ H5C__collective_write(H5F_t *f) bufs[0] = base_buf; types[0] = entry_ptr->type->mem_type; + /* Treat global heap as raw data */ + if (types[0] == H5FD_MEM_GHEAP) + types[0] = H5FD_MEM_DRAW; + node = H5SL_next(node); i = 1; while (node) { @@ -1016,6 +1020,10 @@ H5C__collective_write(H5F_t *f) bufs[i] = entry_ptr->image_ptr; types[i] = entry_ptr->type->mem_type; + /* Treat global heap as raw data */ + if (types[i] == H5FD_MEM_GHEAP) + types[i] = H5FD_MEM_DRAW; + /* Advance to next node & array location */ node = H5SL_next(node); i++; diff --git a/testpar/CMakeLists.txt b/testpar/CMakeLists.txt index c950b1b6e48..d876a215126 100644 --- a/testpar/CMakeLists.txt +++ b/testpar/CMakeLists.txt @@ -17,7 +17,7 @@ set (testphdf5_SOURCES ${HDF5_TEST_PAR_SOURCE_DIR}/t_chunk_alloc.c ${HDF5_TEST_PAR_SOURCE_DIR}/t_filter_read.c ${HDF5_TEST_PAR_SOURCE_DIR}/t_prop.c - ${HDF5_TEST_PAR_SOURCE_DIR}/t_coll_md_read.c + ${HDF5_TEST_PAR_SOURCE_DIR}/t_coll_md.c ${HDF5_TEST_PAR_SOURCE_DIR}/t_oflush.c ) diff --git a/testpar/Makefile.am b/testpar/Makefile.am index 0506961a269..539750a9e46 100644 --- a/testpar/Makefile.am +++ b/testpar/Makefile.am @@ -44,7 +44,7 @@ check_PROGRAMS = $(TEST_PROG_PARA) t_pflush1 t_pflush2 testphdf5_SOURCES=testphdf5.c t_dset.c t_file.c t_file_image.c t_mdset.c \ t_ph5basic.c t_coll_chunk.c t_span_tree.c t_chunk_alloc.c t_filter_read.c \ - t_prop.c t_coll_md_read.c t_oflush.c + t_prop.c t_coll_md.c t_oflush.c # The tests all depend on the hdf5 library and the test library LDADD = $(LIBH5TEST) $(LIBHDF5) diff --git a/testpar/t_coll_md_read.c b/testpar/t_coll_md.c similarity index 88% rename from testpar/t_coll_md_read.c rename to testpar/t_coll_md.c index e4024286205..aa724867d34 100644 --- a/testpar/t_coll_md_read.c +++ b/testpar/t_coll_md.c @@ -11,8 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * A test suite to test HDF5's collective metadata read capabilities, as enabled - * by making a call to H5Pset_all_coll_metadata_ops(). + * A test suite to test HDF5's collective metadata read and write capabilities, + * as enabled by making a call to H5Pset_all_coll_metadata_ops() and/or + * H5Pset_coll_metadata_write(). */ #include "testphdf5.h" @@ -38,6 +39,10 @@ #define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DATASET_NAME "linked_chunk_io_sort_chunk_issue" #define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS 1 +#define COLL_GHEAP_WRITE_ATTR_NELEMS 10 +#define COLL_GHEAP_WRITE_ATTR_NAME "coll_gheap_write_attr" +#define COLL_GHEAP_WRITE_ATTR_DIMS 1 + /* * A test for issue HDFFV-10501. A parallel hang was reported which occurred * in linked-chunk I/O when collective metadata reads are enabled and some ranks @@ -524,3 +529,73 @@ test_link_chunk_io_sort_chunk_issue(void) VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded"); VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded"); } + +/* + * A test for GitHub issue #2433 which causes a collective metadata write + * of global heap data. This test is meant to ensure that global heap data + * gets correctly mapped as raw data during a collective metadata write + * using vector I/O. + * + * An assertion exists in the library that should be triggered if global + * heap data is not correctly mapped as raw data. + */ +void +test_collective_global_heap_write(void) +{ + const char *filename; + hsize_t attr_dims[COLL_GHEAP_WRITE_ATTR_DIMS]; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t vl_type = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hvl_t vl_data; + int mpi_rank, mpi_size; + int data_buf[COLL_GHEAP_WRITE_ATTR_NELEMS]; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + filename = GetTestParameters(); + + fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + VRFY((fapl_id >= 0), "create_faccess_plist succeeded"); + + /* + * Even though the testphdf5 framework currently sets collective metadata + * writes on the FAPL, we call it here just to be sure this is futureproof, + * since demonstrating this issue relies upon it. + */ + VRFY((H5Pset_coll_metadata_write(fapl_id, true) >= 0), "Set collective metadata writes succeeded"); + + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + + attr_dims[0] = 1; + + fspace_id = H5Screate_simple(COLL_GHEAP_WRITE_ATTR_DIMS, attr_dims, NULL); + VRFY((fspace_id >= 0), "H5Screate_simple succeeded"); + + vl_type = H5Tvlen_create(H5T_NATIVE_INT); + VRFY((vl_type >= 0), "H5Tvlen_create succeeded"); + + vl_data.len = COLL_GHEAP_WRITE_ATTR_NELEMS; + vl_data.p = data_buf; + + /* + * Create a variable-length attribute that will get written to the global heap + */ + attr_id = H5Acreate2(file_id, COLL_GHEAP_WRITE_ATTR_NAME, vl_type, fspace_id, H5P_DEFAULT, H5P_DEFAULT); + VRFY((attr_id >= 0), "H5Acreate2 succeeded"); + + for (size_t i = 0; i < COLL_GHEAP_WRITE_ATTR_NELEMS; i++) + data_buf[i] = (int)i; + + VRFY((H5Awrite(attr_id, vl_type, &vl_data) >= 0), "H5Awrite succeeded"); + + VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded"); + VRFY((H5Tclose(vl_type) >= 0), "H5Sclose succeeded"); + VRFY((H5Aclose(attr_id) >= 0), "H5Aclose succeeded"); + VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded"); + VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded"); +} diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c index cc32deeada2..e7befd63e27 100644 --- a/testpar/testphdf5.c +++ b/testpar/testphdf5.c @@ -502,6 +502,8 @@ main(int argc, char **argv) "Collective MD read with multi chunk I/O (H5D__chunk_addrmap)", PARATESTFILE); AddTest("LC_coll_MD_read", test_link_chunk_io_sort_chunk_issue, NULL, "Collective MD read with link chunk I/O (H5D__sort_chunk)", PARATESTFILE); + AddTest("GH_coll_MD_wr", test_collective_global_heap_write, NULL, + "Collective MD write of global heap data", PARATESTFILE); /* Display testing information */ TestInfo(argv[0]); diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h index 14b82970b10..2a21ee6af0f 100644 --- a/testpar/testphdf5.h +++ b/testpar/testphdf5.h @@ -293,6 +293,7 @@ void test_dense_attr(void); void test_partial_no_selection_coll_md_read(void); void test_multi_chunk_io_addrmap_issue(void); void test_link_chunk_io_sort_chunk_issue(void); +void test_collective_global_heap_write(void); void test_oflush(void); /* commonly used prototypes */ From e1e820eae350939a6645a73258a2d898fdcf5b6c Mon Sep 17 00:00:00 2001 From: raylu-hdf <60487644+raylu-hdf@users.noreply.github.com> Date: Mon, 20 Feb 2023 08:53:52 -0600 Subject: [PATCH 058/231] OESS-330/HDFFV-11282: Improve failure message when a VOL connector can't be loaded (#2481) * OESS-330/HDFFV-11282: Improve failure message when a VOL connector can't be loaded. Made the error message clearer when the library can't find a plugin in the path table that contains the paths set by HDF5_PLUGIN_PATH and the default location and maybe set by H5PLxxx functions. * Committing clang-format changes --------- Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> --- src/H5PLint.c | 8 ++++++-- src/H5PLpath.c | 3 ++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/src/H5PLint.c b/src/H5PLint.c index 4c970c5bd16..62e00748d57 100644 --- a/src/H5PLint.c +++ b/src/H5PLint.c @@ -254,13 +254,17 @@ H5PL_load(H5PL_type_t type, const H5PL_key_t *key) /* If not found, try iterating through the path table to find an appropriate plugin */ if (!found) if (H5PL__find_plugin_in_path_table(&search_params, &found, &plugin_info) < 0) - HGOTO_ERROR(H5E_PLUGIN, H5E_CANTGET, NULL, "search in path table failed") + HGOTO_ERROR(H5E_PLUGIN, H5E_CANTGET, NULL, + "can't find plugin in the paths either set by HDF5_PLUGIN_PATH, or default location, " + "or set by H5PLxxx functions") /* Set the return value we found the plugin */ if (found) ret_value = plugin_info; else - HGOTO_ERROR(H5E_PLUGIN, H5E_NOTFOUND, NULL, "unable to locate plugin") + HGOTO_ERROR(H5E_PLUGIN, H5E_NOTFOUND, NULL, + "can't find plugin. Check either HDF5_VOL_CONNECTOR, HDF5_PLUGIN_PATH, default location, " + "or path set by H5PLxxx functions") done: FUNC_LEAVE_NOAPI(ret_value) diff --git a/src/H5PLpath.c b/src/H5PLpath.c index 98428704d23..8ad00c3b4af 100644 --- a/src/H5PLpath.c +++ b/src/H5PLpath.c @@ -854,7 +854,8 @@ H5PL__find_plugin_in_path(const H5PL_search_params_t *search_params, hbool_t *fo /* Open the directory */ if (!(dirp = HDopendir(dir))) - HGOTO_ERROR(H5E_PLUGIN, H5E_OPENERROR, FAIL, "can't open directory: %s", dir) + HGOTO_ERROR(H5E_PLUGIN, H5E_OPENERROR, FAIL, "can't open directory (%s). Please verify its existence", + dir) /* Iterate through all entries in the directory */ while (NULL != (dp = HDreaddir(dirp))) { From 1f4b12d669f462d27935624b6357dd53b5d9bc92 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Tue, 21 Feb 2023 08:24:53 -0600 Subject: [PATCH 059/231] Remove space from toolchain name (#2482) * Remove space from toolchain name * Another space removed --- config/toolchain/{ icc.cmake => icc.cmake} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename config/toolchain/{ icc.cmake => icc.cmake} (100%) diff --git a/config/toolchain/ icc.cmake b/config/toolchain/icc.cmake similarity index 100% rename from config/toolchain/ icc.cmake rename to config/toolchain/icc.cmake From ab265e72f501d688d1242980ebbd4c9dcbf9f65f Mon Sep 17 00:00:00 2001 From: raylu-hdf <60487644+raylu-hdf@users.noreply.github.com> Date: Tue, 21 Feb 2023 10:38:00 -0600 Subject: [PATCH 060/231] HDFFV-10368 (OESS-319): Making two useful macros (H5L_EXT_FLAGS_ALL and H5L_EXT_VERSION) public (#2386) * HDFFV-10368 (OESS-319): Move two useful macros (H5L_EXT_FLAGS_ALL and H5L_EXT_VERSION) from H5Lpkg.h to H5Lpublic.h for public usage. The test in vol.c only envokes H5Lcreate_external using the passthru VOL. To test if the macros are public, one can use them in the link creation of the passthru VOL, such as printing out their values. * Committing clang-format changes * Moved the test for the external link public macros (H5L_EXT_FLAGS_ALL and H5L_EXT_VERSION) from vol.c to links.c. * Committing clang-format changes * Minor change: changed a pointer variable to an integer. * Committing clang-format changes * Moved two macros (H5L_EXT_VERSION and H5L_EXT_FLAGS_ALL) to H5Ldevelop.h for only developers to use. --------- Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> --- src/H5Ldevelop.h | 10 +++++ src/H5Lpkg.h | 6 --- test/links.c | 107 +++++++++++++++++++++++++++++++++++++++++++++++ test/vol.c | 4 +- 4 files changed, 119 insertions(+), 8 deletions(-) diff --git a/src/H5Ldevelop.h b/src/H5Ldevelop.h index 34e50a0a3f6..148a5a57468 100644 --- a/src/H5Ldevelop.h +++ b/src/H5Ldevelop.h @@ -30,6 +30,16 @@ */ #define H5L_LINK_CLASS_T_VERS 1 +/** + * \brief Version of external link format + */ +#define H5L_EXT_VERSION 0 + +/** + * \brief Valid flags for external links + */ +#define H5L_EXT_FLAGS_ALL 0 + /*******************/ /* Public Typedefs */ /*******************/ diff --git a/src/H5Lpkg.h b/src/H5Lpkg.h index 6fd15e71e7b..25d9978df46 100644 --- a/src/H5Lpkg.h +++ b/src/H5Lpkg.h @@ -34,12 +34,6 @@ /* Package Private Macros */ /**************************/ -/* Version of external link format */ -#define H5L_EXT_VERSION 0 - -/* Valid flags for external links */ -#define H5L_EXT_FLAGS_ALL 0 - /****************************/ /* Package Private Typedefs */ /****************************/ diff --git a/test/links.c b/test/links.c index 0b815e26d8d..56912fe3c53 100644 --- a/test/links.c +++ b/test/links.c @@ -13716,6 +13716,112 @@ external_link_with_committed_datatype(hid_t fapl, hbool_t new_format) return FAIL; } /* end external_link_with_committed_datatype() */ +/*------------------------------------------------------------------------- + * Function: external_link_public_macros + * + * Purpose: Test public macros for external links + * + * Return: Success: 0 + * Failure: -1 + *------------------------------------------------------------------------- + */ +static int +external_link_public_macros(hid_t fapl, hbool_t new_format) +{ + hid_t fid = -1; /* File ID */ + H5L_info2_t linfo; /* Link information */ + char objname[NAME_BUF_SIZE]; /* Object name */ + char filename1[NAME_BUF_SIZE]; + char filename2[NAME_BUF_SIZE]; + unsigned flags; /* External link flags, packed as a bitmap */ + const char *file; /* File from external link */ + const char *path; /* Path from external link */ + + if (new_format) + TESTING("external link public macros (w/new group format)"); + else + TESTING("external link public macros"); + + /* Set up filenames */ + h5_fixname(FILENAME[3], fapl, filename1, sizeof filename1); + h5_fixname(FILENAME[4], fapl, filename2, sizeof filename2); + + /* Create file to point to */ + if ((fid = H5Fcreate(filename1, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + TEST_ERROR; + + /* Close file */ + if (H5Fclose(fid) < 0) + TEST_ERROR; + + /* Check that external links are registered with the library */ + if (H5Lis_registered(H5L_TYPE_EXTERNAL) != TRUE) + TEST_ERROR; + + /* Create file with link to first file */ + if ((fid = H5Fcreate(filename2, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + TEST_ERROR; + + /* Create external link to object in first file */ + if (H5Lcreate_external(filename1, "/", fid, "ext_link", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + + /* Check information for external link */ + if (H5Lget_info2(fid, "ext_link", &linfo, H5P_DEFAULT) < 0) + goto error; + + if (H5L_TYPE_EXTERNAL != linfo.type) { + H5_FAILED(); + HDputs(" Unexpected object type - should have been an external link"); + goto error; + } + + if (H5Lget_val(fid, "ext_link", objname, sizeof(objname), H5P_DEFAULT) < 0) + TEST_ERROR; + + if (H5Lunpack_elink_val(objname, linfo.u.val_size, &flags, &file, &path) < 0) + TEST_ERROR; + + if (HDstrcmp(file, filename1) != 0) { + H5_FAILED(); + HDputs(" External link file name incorrect"); + goto error; + } + + if (HDstrcmp(path, "/") != 0) { + H5_FAILED(); + HDputs(" External link path incorrect"); + goto error; + } + + /* External link version & flags */ + if (flags != ((H5L_EXT_VERSION << 4) | H5L_EXT_FLAGS_ALL)) { + H5_FAILED(); + HDputs(" External link version or flags are incorrect"); + goto error; + } + + /* Close first file */ + if (H5Fclose(fid) < 0) + TEST_ERROR; + + /* Check that all file IDs have been closed */ + if (H5I_nmembers(H5I_FILE) != 0) + TEST_ERROR; + H5F_sfile_assert_num(0); + + PASSED(); + return SUCCEED; + +error: + H5E_BEGIN_TRY + { + H5Fclose(fid); + } + H5E_END_TRY; + return FAIL; +} /* end external_link_public_macros() */ + /*------------------------------------------------------------------------- * Function: ud_hard_links * @@ -22717,6 +22823,7 @@ main(void) nerrors += external_dont_fail_to_source(my_fapl, new_format) < 0 ? 1 : 0; nerrors += external_open_twice(my_fapl, new_format) < 0 ? 1 : 0; nerrors += external_link_with_committed_datatype(my_fapl, new_format) < 0 ? 1 : 0; + nerrors += external_link_public_macros(my_fapl, new_format) < 0 ? 1 : 0; } /* with/without external file cache */ } diff --git a/test/vol.c b/test/vol.c index 4cfb3de6648..776248b96a9 100644 --- a/test/vol.c +++ b/test/vol.c @@ -28,7 +28,7 @@ #include "H5VLpkg.h" /* Virtual Object Layer */ /* Filename */ -const char *FILENAME[] = {"native_vol_test", NULL}; +const char *FILENAME[] = {"vol_test_file", NULL}; #define NATIVE_VOL_TEST_GROUP_NAME "test_group" #define NATIVE_VOL_TEST_DATASET_NAME "test_dataset" @@ -2285,7 +2285,7 @@ test_get_vol_name(void) H5E_END_TRY; return FAIL; -} /* end test_vol_cap_flags() */ +} /* end test_get_vol_name() */ /*------------------------------------------------------------------------- * Function: test_wrap_register() From 79aa46767cb64c98dedecf953b150f12fd426e78 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Sun, 26 Feb 2023 12:12:00 -0600 Subject: [PATCH 061/231] Subfiling VFD - add option to specify config file prefix (#2495) --- release_docs/RELEASE.txt | 12 +++ src/H5FDsubfiling/H5FDioc.c | 9 ++- src/H5FDsubfiling/H5FDsubfiling.c | 95 ++++++++++++++++++++++- src/H5FDsubfiling/H5FDsubfiling.h | 21 +++++ src/H5FDsubfiling/H5subfiling_common.c | 57 ++++++++++---- src/H5FDsubfiling/H5subfiling_common.h | 1 + testpar/t_subfiling_vfd.c | 102 +++++++++++++++++++++++-- 7 files changed, 273 insertions(+), 24 deletions(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index dfad6c2cc2a..5de8fc9fe32 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -89,6 +89,18 @@ New Features Library: -------- + - Added a Subfiling VFD configuration file prefix environment variable + + The Subfiling VFD now checks for values set in a new environment + variable "H5FD_SUBFILING_CONFIG_FILE_PREFIX" to determine if the + application has specified a pathname prefix to apply to the file + path for its configuration file. For example, this can be useful + for cases where the application wishes to write subfiles to a + machine's node-local storage while placing the subfiling configuration + file on a file system readable by all machine nodes. + + (JTH - 2023/02/22) + - Overhauled the Virtual Object Layer (VOL) The virtual object layer (VOL) was added in HDF5 1.12.0 but the initial diff --git a/src/H5FDsubfiling/H5FDioc.c b/src/H5FDsubfiling/H5FDioc.c index 152c045b0bd..2fd8b64104c 100644 --- a/src/H5FDsubfiling/H5FDioc.c +++ b/src/H5FDsubfiling/H5FDioc.c @@ -1455,6 +1455,7 @@ H5FD__ioc_del(const char *name, hid_t fapl) if (mpi_rank == 0) { int64_t read_n_subfiles = 0; int32_t n_subfiles = 0; + char *prefix_env = NULL; int num_digits = 0; if (HDstat(name, &st) < 0) @@ -1470,9 +1471,13 @@ H5FD__ioc_del(const char *name, hid_t fapl) H5_SUBFILING_GOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "can't allocate config file name buffer"); + /* Check if a prefix has been set for the configuration file name */ + prefix_env = HDgetenv(H5FD_SUBFILING_CONFIG_FILE_PREFIX); + /* TODO: No support for subfile directory prefix currently */ - HDsnprintf(tmp_filename, PATH_MAX, "%s/" H5FD_SUBFILING_CONFIG_FILENAME_TEMPLATE, file_dirname, - base_filename, (uint64_t)st.st_ino); + /* TODO: Possibly try loading config file prefix from file before deleting */ + HDsnprintf(tmp_filename, PATH_MAX, "%s/" H5FD_SUBFILING_CONFIG_FILENAME_TEMPLATE, + prefix_env ? prefix_env : file_dirname, base_filename, (uint64_t)st.st_ino); if (NULL == (config_file = HDfopen(tmp_filename, "r"))) { if (ENOENT == errno) { diff --git a/src/H5FDsubfiling/H5FDsubfiling.c b/src/H5FDsubfiling/H5FDsubfiling.c index 5fcaddb52aa..afdf0739a6b 100644 --- a/src/H5FDsubfiling/H5FDsubfiling.c +++ b/src/H5FDsubfiling/H5FDsubfiling.c @@ -106,6 +106,8 @@ typedef struct H5FD_subfiling_t { uint64_t file_id; int64_t context_id; /* The value used to lookup a subfiling context for the file */ + hbool_t fail_to_encode; /* Used to check for failures from sb_get_size routine */ + char *file_dir; /* Directory where we find files */ char *file_path; /* The user defined filename */ @@ -145,6 +147,12 @@ typedef struct H5FD_subfiling_t { #define REGION_OVERFLOW(A, Z) \ (ADDR_OVERFLOW(A) || SIZE_OVERFLOW(Z) || HADDR_UNDEF == (A) + (Z) || (HDoff_t)((A) + (Z)) < (HDoff_t)(A)) +/* + * NOTE: Must be kept in sync with the private + * H5F_MAX_DRVINFOBLOCK_SIZE macro value for now + */ +#define H5FD_SUBFILING_MAX_DRV_INFO_SIZE 1024 + /* Prototypes */ static herr_t H5FD__subfiling_term(void); static hsize_t H5FD__subfiling_sb_size(H5FD_t *_file); @@ -679,8 +687,9 @@ H5FD__subfiling_validate_config(const H5FD_subfiling_config_t *fa) static hsize_t H5FD__subfiling_sb_size(H5FD_t *_file) { - H5FD_subfiling_t *file = (H5FD_subfiling_t *)_file; - hsize_t ret_value = 0; + subfiling_context_t *sf_context = NULL; + H5FD_subfiling_t *file = (H5FD_subfiling_t *)_file; + hsize_t ret_value = 0; HDassert(file); @@ -699,6 +708,24 @@ H5FD__subfiling_sb_size(H5FD_t *_file) /* Subfiling stripe count (encoded as int64_t for future) */ ret_value += sizeof(int64_t); + /* Subfiling config file prefix string length */ + ret_value += sizeof(uint64_t); + + /* + * Since this callback currently can't return any errors, we + * will set the "fail to encode" flag on the file if we fail + * to retrieve the context object here so we can check for + * errors later. + */ + if (NULL == (sf_context = H5_get_subfiling_object(file->context_id))) { + file->fail_to_encode = TRUE; + } + else { + if (sf_context->config_file_prefix) { + ret_value += HDstrlen(sf_context->config_file_prefix) + 1; + } + } + /* Add superblock information from IOC file if necessary */ if (file->sf_file) { /* Encode the IOC's name into the subfiling information */ @@ -707,6 +734,16 @@ H5FD__subfiling_sb_size(H5FD_t *_file) ret_value += H5FD_sb_size(file->sf_file); } + /* + * Since the library doesn't currently properly check this, + * set the "fail to encode" flag if the message size is + * larger than the library's currently accepted max message + * size so that we don't try to encode the message and overrun + * a buffer. + */ + if (ret_value > H5FD_SUBFILING_MAX_DRV_INFO_SIZE) + file->fail_to_encode = TRUE; + H5_SUBFILING_FUNC_LEAVE; } /* end H5FD__subfiling_sb_size() */ @@ -725,9 +762,17 @@ H5FD__subfiling_sb_encode(H5FD_t *_file, char *name, unsigned char *buf) subfiling_context_t *sf_context = NULL; H5FD_subfiling_t *file = (H5FD_subfiling_t *)_file; uint8_t *p = (uint8_t *)buf; + uint64_t tmpu64; int64_t tmp64; int32_t tmp32; - herr_t ret_value = SUCCEED; + size_t prefix_len = 0; + herr_t ret_value = SUCCEED; + + /* Check if the "fail to encode flag" is set */ + if (file->fail_to_encode) + H5_SUBFILING_GOTO_ERROR( + H5E_VFL, H5E_CANTENCODE, FAIL, + "can't encode subfiling driver info message - message was too large or internal error occurred"); if (NULL == (sf_context = H5_get_subfiling_object(file->context_id))) H5_SUBFILING_GOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get subfiling context object"); @@ -753,6 +798,21 @@ H5FD__subfiling_sb_encode(H5FD_t *_file, char *name, unsigned char *buf) tmp64 = sf_context->sf_num_subfiles; INT64ENCODE(p, tmp64); + /* Encode config file prefix string length */ + if (sf_context->config_file_prefix) { + prefix_len = HDstrlen(sf_context->config_file_prefix) + 1; + H5_CHECKED_ASSIGN(tmpu64, uint64_t, prefix_len, size_t); + } + else + tmpu64 = 0; + UINT64ENCODE(p, tmpu64); + + /* Encode config file prefix string */ + if (sf_context->config_file_prefix) { + HDmemcpy(p, sf_context->config_file_prefix, prefix_len); + p += prefix_len; + } + /* Encode IOC VFD configuration information if necessary */ if (file->sf_file) { char ioc_name[9]; @@ -786,10 +846,17 @@ H5FD__subfiling_sb_decode(H5FD_t *_file, const char *name, const unsigned char * subfiling_context_t *sf_context = NULL; H5FD_subfiling_t *file = (H5FD_subfiling_t *)_file; const uint8_t *p = (const uint8_t *)buf; + uint64_t tmpu64; int64_t tmp64; int32_t tmp32; herr_t ret_value = SUCCEED; + /* Check if we previously failed to encode the info */ + if (file->fail_to_encode) + H5_SUBFILING_GOTO_ERROR( + H5E_VFL, H5E_CANTDECODE, FAIL, + "can't decode subfiling driver info message - message wasn't encoded (or encoded improperly)"); + if (NULL == (sf_context = H5_get_subfiling_object(file->context_id))) H5_SUBFILING_GOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get subfiling context object"); @@ -814,6 +881,25 @@ H5FD__subfiling_sb_decode(H5FD_t *_file, const char *name, const unsigned char * H5_CHECK_OVERFLOW(tmp64, int64_t, int32_t); file->fa.shared_cfg.stripe_count = (int32_t)tmp64; + /* Decode config file prefix string length */ + UINT64DECODE(p, tmpu64); + + /* Decode config file prefix string */ + if (tmpu64 > 0) { + if (!sf_context->config_file_prefix) { + if (NULL == (sf_context->config_file_prefix = HDmalloc(tmpu64))) + H5_SUBFILING_GOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, + "can't allocate space for config file prefix string"); + + HDmemcpy(sf_context->config_file_prefix, p, tmpu64); + + /* Just in case.. */ + sf_context->config_file_prefix[tmpu64 - 1] = '\0'; + } + + p += tmpu64; + } + if (file->sf_file) { char ioc_name[9]; @@ -1051,6 +1137,7 @@ H5FD__subfiling_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t ma file_ptr->context_id = -1; file_ptr->fa.ioc_fapl_id = H5I_INVALID_HID; file_ptr->ext_comm = MPI_COMM_NULL; + file_ptr->fail_to_encode = FALSE; /* Get the driver-specific file access properties */ if (NULL == (plist_ptr = (H5P_genplist_t *)H5I_object(fapl_id))) @@ -1240,6 +1327,8 @@ H5FD__subfiling_close_int(H5FD_subfiling_t *file_ptr) if (H5_mpi_comm_free(&file_ptr->ext_comm) < 0) H5_SUBFILING_GOTO_ERROR(H5E_VFL, H5E_CANTFREE, FAIL, "can't free MPI communicator"); + file_ptr->fail_to_encode = FALSE; + done: HDfree(file_ptr->file_path); file_ptr->file_path = NULL; diff --git a/src/H5FDsubfiling/H5FDsubfiling.h b/src/H5FDsubfiling/H5FDsubfiling.h index 22dd9067cb6..b1cf53a645f 100644 --- a/src/H5FDsubfiling/H5FDsubfiling.h +++ b/src/H5FDsubfiling/H5FDsubfiling.h @@ -148,6 +148,27 @@ * pathname. */ #define H5FD_SUBFILING_SUBFILE_PREFIX "H5FD_SUBFILING_SUBFILE_PREFIX" +/** + * \def H5FD_SUBFILING_CONFIG_FILE_PREFIX + * Macro for name of the environment variable that specifies a prefix + * to apply to the subfiling configuration filename. Useful for cases + * where the application wants to place the configuration file in a + * different directory than the default of putting it alongside the + * generated subfiles. For example, when writing to node-local storage + * one may wish to place the configuration file on a scratch file + * system readable by all nodes, while the subfiles are initially + * written to the node-local storage. + * + * The value set for this environment variable is interpreted as a + * pathname that must already exist. + * + * NOTE: As this prefix string will be encoded in the driver info + * message that gets written to the file, there is an upper + * limit of about ~900 single-byte characters for this string, + * though possibly less due to other information the driver + * may encode. Avoid long prefix names where possible. + */ +#define H5FD_SUBFILING_CONFIG_FILE_PREFIX "H5FD_SUBFILING_CONFIG_FILE_PREFIX" /** * \enum H5FD_subfiling_ioc_select_t diff --git a/src/H5FDsubfiling/H5subfiling_common.c b/src/H5FDsubfiling/H5subfiling_common.c index 3e83cf53c41..b58c4d3c2b1 100644 --- a/src/H5FDsubfiling/H5subfiling_common.c +++ b/src/H5FDsubfiling/H5subfiling_common.c @@ -73,8 +73,8 @@ static herr_t record_fid_to_subfile(uint64_t file_id, int64_t subfile_context_id static void clear_fid_map_entry(uint64_t file_id, int64_t sf_context_id); static herr_t ioc_open_files(int64_t file_context_id, int file_acc_flags); static herr_t create_config_file(subfiling_context_t *sf_context, const char *base_filename, - const char *subfile_dir, hbool_t truncate_if_exists); -static herr_t open_config_file(const char *base_filename, const char *subfile_dir, uint64_t file_id, + const char *config_dir, const char *subfile_dir, hbool_t truncate_if_exists); +static herr_t open_config_file(const char *base_filename, const char *config_dir, uint64_t file_id, const char *mode, FILE **config_file_out); /*------------------------------------------------------------------------- @@ -383,6 +383,9 @@ H5_free_subfiling_object_int(subfiling_context_t *sf_context) HDfree(sf_context->subfile_prefix); sf_context->subfile_prefix = NULL; + HDfree(sf_context->config_file_prefix); + sf_context->config_file_prefix = NULL; + HDfree(sf_context->h5_filename); sf_context->h5_filename = NULL; @@ -721,6 +724,7 @@ init_subfiling(const char *base_filename, uint64_t file_id, H5FD_subfiling_param FILE *config_file = NULL; char *file_basename = NULL; char *subfile_dir = NULL; + char *prefix_env = NULL; int mpi_rank; int mpi_size; int mpi_code; @@ -748,6 +752,13 @@ init_subfiling(const char *base_filename, uint64_t file_id, H5FD_subfiling_param new_context->sf_node_comm = MPI_COMM_NULL; new_context->sf_group_comm = MPI_COMM_NULL; + /* Check if a prefix has been set for the configuration file name */ + prefix_env = HDgetenv(H5FD_SUBFILING_CONFIG_FILE_PREFIX); + if (prefix_env) { + if (NULL == (new_context->config_file_prefix = HDstrdup(prefix_env))) + H5_SUBFILING_GOTO_ERROR(H5E_VFL, H5E_CANTCOPY, FAIL, "couldn't copy config file prefix string"); + } + /* * If there's an existing subfiling configuration file for * this file, read the stripe size and number of subfiles @@ -767,7 +778,12 @@ init_subfiling(const char *base_filename, uint64_t file_id, H5FD_subfiling_param } if (config[0] >= 0) { - if (open_config_file(file_basename, subfile_dir, file_id, "r", &config_file) < 0) + /* + * If a prefix has been specified, try to read the config file + * from there, otherwise look for it next to the generated subfiles. + */ + if (open_config_file(file_basename, prefix_env ? prefix_env : subfile_dir, file_id, "r", + &config_file) < 0) config[0] = -1; } @@ -2133,7 +2149,19 @@ ioc_open_files(int64_t file_context_id, int file_acc_flags) * check if we also need to create a config file. */ if ((file_acc_flags & O_CREAT) && (sf_context->topology->ioc_idx == 0)) { - if (create_config_file(sf_context, base, subfile_dir, (file_acc_flags & O_TRUNC)) < 0) + char *config_dir = NULL; + + /* + * If a config file prefix has been specified, place the + * config file there, otherwise place it next to the + * generated subfiles. + */ + if (sf_context->config_file_prefix) + config_dir = sf_context->config_file_prefix; + else + config_dir = subfile_dir; + + if (create_config_file(sf_context, base, config_dir, subfile_dir, (file_acc_flags & O_TRUNC)) < 0) H5_SUBFILING_GOTO_ERROR(H5E_FILE, H5E_CANTCREATE, FAIL, "couldn't create subfiling configuration file"); } @@ -2174,8 +2202,8 @@ ioc_open_files(int64_t file_context_id, int file_acc_flags) *------------------------------------------------------------------------- */ static herr_t -create_config_file(subfiling_context_t *sf_context, const char *base_filename, const char *subfile_dir, - hbool_t truncate_if_exists) +create_config_file(subfiling_context_t *sf_context, const char *base_filename, const char *config_dir, + const char *subfile_dir, hbool_t truncate_if_exists) { hbool_t config_file_exists = FALSE; FILE *config_file = NULL; @@ -2186,6 +2214,7 @@ create_config_file(subfiling_context_t *sf_context, const char *base_filename, c HDassert(sf_context); HDassert(base_filename); + HDassert(config_dir); HDassert(subfile_dir); if (sf_context->h5_file_id == UINT64_MAX) @@ -2194,6 +2223,8 @@ create_config_file(subfiling_context_t *sf_context, const char *base_filename, c if (*base_filename == '\0') H5_SUBFILING_GOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "invalid base HDF5 filename '%s'", base_filename); + if (*config_dir == '\0') + config_dir = "."; if (*subfile_dir == '\0') subfile_dir = "."; @@ -2201,7 +2232,7 @@ create_config_file(subfiling_context_t *sf_context, const char *base_filename, c H5_SUBFILING_GOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "couldn't allocate space for subfiling configuration filename"); - HDsnprintf(config_filename, PATH_MAX, "%s/" H5FD_SUBFILING_CONFIG_FILENAME_TEMPLATE, subfile_dir, + HDsnprintf(config_filename, PATH_MAX, "%s/" H5FD_SUBFILING_CONFIG_FILENAME_TEMPLATE, config_dir, base_filename, sf_context->h5_file_id); /* Determine whether a subfiling configuration file exists */ @@ -2226,7 +2257,7 @@ create_config_file(subfiling_context_t *sf_context, const char *base_filename, c if (NULL == (config_file = HDfopen(config_filename, "w+"))) H5_SUBFILING_SYS_GOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, - "couldn't open subfiling configuration file"); + "couldn't create/truncate subfiling configuration file"); if (NULL == (line_buf = HDmalloc(PATH_MAX))) H5_SUBFILING_GOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, @@ -2302,7 +2333,7 @@ create_config_file(subfiling_context_t *sf_context, const char *base_filename, c *------------------------------------------------------------------------- */ static herr_t -open_config_file(const char *base_filename, const char *subfile_dir, uint64_t file_id, const char *mode, +open_config_file(const char *base_filename, const char *config_dir, uint64_t file_id, const char *mode, FILE **config_file_out) { hbool_t config_file_exists = FALSE; @@ -2312,7 +2343,7 @@ open_config_file(const char *base_filename, const char *subfile_dir, uint64_t fi herr_t ret_value = SUCCEED; HDassert(base_filename); - HDassert(subfile_dir); + HDassert(config_dir); HDassert(file_id != UINT64_MAX); HDassert(mode); HDassert(config_file_out); @@ -2322,14 +2353,14 @@ open_config_file(const char *base_filename, const char *subfile_dir, uint64_t fi if (*base_filename == '\0') H5_SUBFILING_GOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "invalid base HDF5 filename '%s'", base_filename); - if (*subfile_dir == '\0') - subfile_dir = "."; + if (*config_dir == '\0') + config_dir = "."; if (NULL == (config_filename = HDmalloc(PATH_MAX))) H5_SUBFILING_GOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "couldn't allocate space for subfiling configuration filename"); - HDsnprintf(config_filename, PATH_MAX, "%s/" H5FD_SUBFILING_CONFIG_FILENAME_TEMPLATE, subfile_dir, + HDsnprintf(config_filename, PATH_MAX, "%s/" H5FD_SUBFILING_CONFIG_FILENAME_TEMPLATE, config_dir, base_filename, file_id); /* Determine whether a subfiling configuration file exists */ diff --git a/src/H5FDsubfiling/H5subfiling_common.h b/src/H5FDsubfiling/H5subfiling_common.h index a5d6f612442..51d8b2289aa 100644 --- a/src/H5FDsubfiling/H5subfiling_common.h +++ b/src/H5FDsubfiling/H5subfiling_common.h @@ -208,6 +208,7 @@ typedef struct { int sf_group_size; /* IOC count (in sf_group_comm) */ int sf_group_rank; /* IOC rank (in sf_group_comm) */ char *subfile_prefix; /* If subfiles are node-local */ + char *config_file_prefix; /* Prefix added to config file name */ char *h5_filename; /* The user supplied file name */ void *ioc_data; /* Private data for underlying IOC */ sf_topology_t *topology; /* Pointer to our topology */ diff --git a/testpar/t_subfiling_vfd.c b/testpar/t_subfiling_vfd.c index b65aef0b6c5..e1f9e05e061 100644 --- a/testpar/t_subfiling_vfd.c +++ b/testpar/t_subfiling_vfd.c @@ -35,6 +35,9 @@ /* The smallest Subfiling stripe size used for testing */ #define SUBFILING_MIN_STRIPE_SIZE 128 +/* Temporary test directory */ +#define SUBFILING_CONFIG_FILE_DIR "subfiling_config_file_dir" + #ifndef PATH_MAX #define PATH_MAX 4096 #endif @@ -76,6 +79,8 @@ static long long stripe_size_g = -1; static long ioc_per_node_g = -1; static int ioc_thread_pool_size_g = -1; +static char *config_dir = NULL; + int nerrors = 0; int curr_nerrors = 0; @@ -279,8 +284,8 @@ test_config_file(void) config_filename = HDmalloc(PATH_MAX); VRFY(config_filename, "HDmalloc succeeded"); - HDsnprintf(config_filename, PATH_MAX, H5FD_SUBFILING_CONFIG_FILENAME_TEMPLATE, SUBF_FILENAME, - (uint64_t)file_info.st_ino); + HDsnprintf(config_filename, PATH_MAX, "%s/" H5FD_SUBFILING_CONFIG_FILENAME_TEMPLATE, config_dir, + SUBF_FILENAME, (uint64_t)file_info.st_ino); config_file = HDfopen(config_filename, "r"); VRFY(config_file, "HDfopen succeeded"); @@ -1636,8 +1641,8 @@ test_subfiling_h5fuse(void) VRFY((HDstat(SUBF_FILENAME, &file_info) >= 0), "HDstat succeeded"); /* Generate name for configuration file */ - HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_CONFIG_FILENAME_TEMPLATE, SUBF_FILENAME, - (uint64_t)file_info.st_ino); + HDsnprintf(tmp_filename, PATH_MAX, "%s/" H5FD_SUBFILING_CONFIG_FILENAME_TEMPLATE, config_dir, + SUBF_FILENAME, (uint64_t)file_info.st_ino); args[0] = HDstrdup("env"); args[1] = HDstrdup("sh"); @@ -1751,14 +1756,29 @@ parse_subfiling_env_vars(void) if (ioc_thread_pool_size_g <= 0) ioc_thread_pool_size_g = -1; } + + if (NULL != (env_value = HDgetenv(H5FD_SUBFILING_CONFIG_FILE_PREFIX))) { + HDassert(config_dir); + + HDstrncpy(config_dir, env_value, PATH_MAX); + + /* Just in case.. */ + config_dir[PATH_MAX - 1] = '\0'; + + if (*config_dir == '\0') { + *config_dir = '.'; + *(config_dir + 1) = '\0'; + } + } } int main(int argc, char **argv) { unsigned seed; - int required = MPI_THREAD_MULTIPLE; - int provided = 0; + char *env_value = NULL; + int required = MPI_THREAD_MULTIPLE; + int provided = 0; HDcompile_assert(SUBFILING_MIN_STRIPE_SIZE <= H5FD_SUBFILING_DEFAULT_STRIPE_SIZE); @@ -1885,6 +1905,18 @@ main(int argc, char **argv) if (MAINPROCESS) HDprintf("Using seed: %u\n\n", seed); + /* Allocate buffer for possible config file directory specified */ + config_dir = HDmalloc(PATH_MAX); + if (!config_dir) { + if (MAINPROCESS) + HDprintf("couldn't allocate space for subfiling config file directory buffer\n"); + nerrors++; + goto exit; + } + + /* Initialize to current working directory for now */ + HDsnprintf(config_dir, PATH_MAX, "."); + /* Grab values from environment variables if set */ parse_subfiling_env_vars(); @@ -1967,6 +1999,57 @@ main(int argc, char **argv) } } + if (!(env_value = HDgetenv(H5FD_SUBFILING_CONFIG_FILE_PREFIX))) { + int rand_value = 0; + + if (MAINPROCESS) + rand_value = rand() % 2; + + mpi_code_g = MPI_Bcast(&rand_value, 1, MPI_INT, 0, comm_g); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Bcast succeeded"); + + /* Randomly set config file prefix to either "." or a real + * directory to test both cases + */ + if (rand_value == 0) { + int mkdir_success = 0; + + if (MAINPROCESS) { + if ((HDmkdir(SUBFILING_CONFIG_FILE_DIR, (mode_t)0755) < 0) && (errno != EEXIST)) { + HDprintf("couldn't create temporary testing directory\n"); + mkdir_success = 0; + } + else + mkdir_success = 1; + } + + mpi_code_g = MPI_Bcast(&mkdir_success, 1, MPI_INT, 0, comm_g); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Bcast succeeded"); + + if (!mkdir_success) { + if (MAINPROCESS) + HDprintf("HDmkdir failed\n"); + nerrors++; + goto exit; + } + + if (HDsetenv(H5FD_SUBFILING_CONFIG_FILE_PREFIX, SUBFILING_CONFIG_FILE_DIR, 1) < 0) { + if (MAINPROCESS) + HDprintf("HDsetenv failed\n"); + nerrors++; + goto exit; + } + } + else { + if (HDsetenv(H5FD_SUBFILING_CONFIG_FILE_PREFIX, ".", 1) < 0) { + if (MAINPROCESS) + HDprintf("HDsetenv failed\n"); + nerrors++; + goto exit; + } + } + } + /* Grab values from environment variables */ parse_subfiling_env_vars(); @@ -2004,6 +2087,13 @@ main(int argc, char **argv) HDputs("All Subfiling VFD tests passed\n"); exit: + if (MAINPROCESS) { + if (HDrmdir(SUBFILING_CONFIG_FILE_DIR) < 0 && (errno != ENOENT)) { + HDprintf("couldn't remove temporary testing directory\n"); + nerrors++; + } + } + if (nerrors) { if (MAINPROCESS) HDprintf("*** %d TEST ERROR%s OCCURRED ***\n", nerrors, nerrors > 1 ? "S" : ""); From d77c0482a43de88b8aecb11bd187353f0ca6c64b Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Sun, 26 Feb 2023 17:06:14 -0600 Subject: [PATCH 062/231] Add fetchcontent for compression libs and fix cmake config (#2487) * Add fetchcontent for compression libs and fix cmake config * MSDOS is a reserved define name * Add release note and update install doc for FetchContent * Add CI test for FetchContent * Use LINK_COMP_LIBS instead of STATIC_LIBRARY for depends * Use general link --- .github/workflows/codespell.yml | 2 +- .github/workflows/main.yml | 8 +- CMakeFilters.cmake | 117 ++-- CMakeLists.txt | 18 +- config/cmake/HDFLibMacros.cmake | 137 ++--- config/cmake/HDFMacros.cmake | 3 + config/cmake/LIBAEC/CMakeLists.txt | 519 ++++++++++++++++ config/cmake/LIBAEC/CPack.Info.plist.in | 26 + config/cmake/LIBAEC/config.h.in | 36 ++ .../LIBAEC/libaec-config-version.cmake.in | 42 ++ config/cmake/LIBAEC/libaec-config.cmake.in | 59 ++ config/cmake/ZLIB/CMakeLists.txt | 572 ++++++++++++++++++ config/cmake/ZLIB/CPack.Info.plist.in | 26 + config/cmake/ZLIB/zconf.h.in | 536 ++++++++++++++++ .../cmake/ZLIB/zlib-config-version.cmake.in | 42 ++ config/cmake/ZLIB/zlib-config.cmake.in | 58 ++ config/cmake/cacheinit.cmake | 15 +- config/toolchain/aarch64.cmake | 2 +- release_docs/INSTALL_CMake.txt | 48 +- release_docs/RELEASE.txt | 25 + 20 files changed, 2150 insertions(+), 141 deletions(-) create mode 100644 config/cmake/LIBAEC/CMakeLists.txt create mode 100644 config/cmake/LIBAEC/CPack.Info.plist.in create mode 100644 config/cmake/LIBAEC/config.h.in create mode 100644 config/cmake/LIBAEC/libaec-config-version.cmake.in create mode 100644 config/cmake/LIBAEC/libaec-config.cmake.in create mode 100644 config/cmake/ZLIB/CMakeLists.txt create mode 100644 config/cmake/ZLIB/CPack.Info.plist.in create mode 100644 config/cmake/ZLIB/zconf.h.in create mode 100644 config/cmake/ZLIB/zlib-config-version.cmake.in create mode 100644 config/cmake/ZLIB/zlib-config.cmake.in diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml index 74f193a282f..db20884d29c 100644 --- a/.github/workflows/codespell.yml +++ b/.github/workflows/codespell.yml @@ -12,4 +12,4 @@ jobs: - uses: codespell-project/actions-codespell@master with: skip: ./bin/trace,./hl/tools/h5watch/h5watch.c,./tools/test/h5jam/tellub.c,./config/sanitizer/LICENSE,./tools/test/h5repack/testfiles/*.dat - ignore_words_list: isnt,inout,nd,parms,parm,ba,offsetP,ser,ois,had,fiter,fo,clude,refere,minnum,offsetp,creat,ans:,eiter,lastr,ans,isn't,ifset,sur,trun,dne,tthe,hda,filname,te,htmp,minnum,ake,gord,numer,ro,oce + ignore_words_list: isnt,inout,nd,parms,parm,ba,offsetP,ser,ois,had,fiter,fo,clude,refere,minnum,offsetp,creat,ans:,eiter,lastr,ans,isn't,ifset,sur,trun,dne,tthe,hda,filname,te,htmp,minnum,ake,gord,numer,ro,oce,msdos diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 0b09a947e10..07104f40efe 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -73,6 +73,8 @@ jobs: cpp: ON fortran: OFF java: ON + libaecfc: ON + zlibfc: ON parallel: OFF mirror_vfd: OFF direct_vfd: OFF @@ -88,6 +90,8 @@ jobs: cpp: ON fortran: ON java: ON + libaecfc: ON + zlibfc: ON parallel: OFF mirror_vfd: ON direct_vfd: ON @@ -145,6 +149,8 @@ jobs: cpp: ON fortran: OFF java: ON + libaecfc: ON + zlibfc: ON parallel: OFF mirror_vfd: ON direct_vfd: OFF @@ -485,7 +491,7 @@ jobs: run: | mkdir "${{ runner.workspace }}/build" cd "${{ runner.workspace }}/build" - cmake ${{ matrix.generator }} -DCMAKE_BUILD_TYPE=${{ matrix.build_mode.cmake }} -DCMAKE_TOOLCHAIN_FILE=${{ matrix.toolchain }} -DBUILD_SHARED_LIBS=ON -DHDF5_ENABLE_ALL_WARNINGS=ON -DHDF5_ENABLE_PARALLEL:BOOL=${{ matrix.parallel }} -DHDF5_BUILD_CPP_LIB:BOOL=${{ matrix.cpp }} -DHDF5_BUILD_FORTRAN=${{ matrix.fortran }} -DHDF5_BUILD_JAVA=${{ matrix.java }} -DHDF5_ENABLE_MIRROR_VFD:BOOL=${{ matrix.mirror_vfd }} -DHDF5_ENABLE_DIRECT_VFD:BOOL=${{ matrix.direct_vfd }} $GITHUB_WORKSPACE + cmake ${{ matrix.generator }} -DCMAKE_BUILD_TYPE=${{ matrix.build_mode.cmake }} -DCMAKE_TOOLCHAIN_FILE=${{ matrix.toolchain }} -DBUILD_SHARED_LIBS=ON -DHDF5_ENABLE_ALL_WARNINGS=ON -DHDF5_ENABLE_PARALLEL:BOOL=${{ matrix.parallel }} -DHDF5_BUILD_CPP_LIB:BOOL=${{ matrix.cpp }} -DHDF5_BUILD_FORTRAN=${{ matrix.fortran }} -DHDF5_BUILD_JAVA=${{ matrix.java }} -DBUILD_SZIP_WITH_FETCHCONTENT=${{ matrix.libaecfc }} -DBUILD_ZLIB_WITH_FETCHCONTENT=${{ matrix.zlibfc }} -DHDF5_ENABLE_MIRROR_VFD:BOOL=${{ matrix.mirror_vfd }} -DHDF5_ENABLE_DIRECT_VFD:BOOL=${{ matrix.direct_vfd }} $GITHUB_WORKSPACE shell: bash if: (matrix.generator != 'autogen') && (! matrix.thread_safe.enabled) diff --git a/CMakeFilters.cmake b/CMakeFilters.cmake index 15221bf72bf..eb62071e073 100644 --- a/CMakeFilters.cmake +++ b/CMakeFilters.cmake @@ -11,16 +11,44 @@ # option (USE_LIBAEC "Use AEC library as SZip Filter" OFF) option (USE_LIBAEC_STATIC "Use static AEC library " OFF) +option (ZLIB_USE_EXTERNAL "Use External Library Building for ZLIB" 0) +option (SZIP_USE_EXTERNAL "Use External Library Building for SZIP" 0) -include (ExternalProject) -include (FetchContent) +option (BUILD_ZLIB_WITH_FETCHCONTENT "Use FetchContent to use original source files" OFF) +if (BUILD_ZLIB_WITH_FETCHCONTENT) + set (ZLIB_USE_EXTERNAL "Use External Library Building for ZLIB" 1) + if (NOT ZLIB_USE_LOCALCONTENT) + set (ZLIB_URL ${ZLIB_TGZ_ORIGPATH}/${ZLIB_TGZ_ORIGNAME}) + else () + set (ZLIB_URL ${TGZPATH}/${ZLIB_TGZ_ORIGNAME}) + endif () + if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.15.0") + message (VERBOSE "Filter ZLIB file is ${ZLIB_URL}") + endif () +endif () + +option (BUILD_SZIP_WITH_FETCHCONTENT "Use FetchContent to use original source files" OFF) +if (BUILD_SZIP_WITH_FETCHCONTENT) + # Only libaec library is usable + set (USE_LIBAEC ON CACHE BOOL "Use libaec szip replacement" FORCE) + set (SZIP_USE_EXTERNAL "Use External Library Building for SZIP" 1) + if (NOT LIBAEC_USE_LOCALCONTENT) + set (SZIP_URL ${LIBAEC_TGZ_ORIGPATH}/${LIBAEC_TGZ_ORIGNAME}) + else () + set (SZIP_URL ${TGZPATH}/${LIBAEC_TGZ_ORIGNAME}) + endif () + if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.15.0") + message (VERBOSE "Filter SZIP file is ${SZIP_URL}") + endif () +endif () +include (ExternalProject) #option (HDF5_ALLOW_EXTERNAL_SUPPORT "Allow External Library Building (NO GIT TGZ)" "NO") set (HDF5_ALLOW_EXTERNAL_SUPPORT "NO" CACHE STRING "Allow External Library Building (NO GIT TGZ)") set_property (CACHE HDF5_ALLOW_EXTERNAL_SUPPORT PROPERTY STRINGS NO GIT TGZ) if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") - option (ZLIB_USE_EXTERNAL "Use External Library Building for HDF5_ZLIB" 1) - option (SZIP_USE_EXTERNAL "Use External Library Building for SZIP" 1) + set (ZLIB_USE_EXTERNAL "Use External Library Building for ZLIB" 1) + set (SZIP_USE_EXTERNAL "Use External Library Building for SZIP" 1) if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT") set (ZLIB_URL ${ZLIB_GIT_URL} CACHE STRING "Path to zlib git repository") set (ZLIB_BRANCH ${ZLIB_GIT_BRANCH}) @@ -30,14 +58,18 @@ if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MAT if (NOT TGZPATH) set (TGZPATH ${HDF5_SOURCE_DIR}) endif () - set (ZLIB_URL ${TGZPATH}/${ZLIB_TGZ_NAME}) + if (NOT BUILD_ZLIB_WITH_FETCHCONTENT) + set (ZLIB_URL ${TGZPATH}/${ZLIB_TGZ_NAME}) + endif () if (NOT EXISTS "${ZLIB_URL}") set (HDF5_ENABLE_Z_LIB_SUPPORT OFF CACHE BOOL "" FORCE) message (VERBOSE "Filter ZLIB file ${ZLIB_URL} not found") endif () - set (SZIP_URL ${TGZPATH}/${SZIP_TGZ_NAME}) - if (USE_LIBAEC) - set (SZIP_URL ${TGZPATH}/${SZAEC_TGZ_NAME}) + if (NOT BUILD_SZIP_WITH_FETCHCONTENT) + set (SZIP_URL ${TGZPATH}/${SZIP_TGZ_NAME}) + if (USE_LIBAEC) + set (SZIP_URL ${TGZPATH}/${SZAEC_TGZ_NAME}) + endif () endif () if (NOT EXISTS "${SZIP_URL}") set (HDF5_ENABLE_SZIP_SUPPORT OFF CACHE BOOL "" FORCE) @@ -59,25 +91,32 @@ if (HDF5_ENABLE_Z_LIB_SUPPORT) find_package (ZLIB NAMES ${ZLIB_PACKAGE_NAME}${HDF_PACKAGE_EXT} COMPONENTS static shared) if (NOT ZLIB_FOUND) find_package (ZLIB) # Legacy find - if (ZLIB_FOUND) - set (LINK_COMP_LIBS ${LINK_COMP_LIBS} ${ZLIB_LIBRARIES}) - endif () endif () - endif () - if (ZLIB_FOUND) - set (H5_HAVE_FILTER_DEFLATE 1) - set (H5_HAVE_ZLIB_H 1) - set (H5_HAVE_LIBZ 1) - set (H5_ZLIB_HEADER "zlib.h") - set (ZLIB_INCLUDE_DIR_GEN ${ZLIB_INCLUDE_DIR}) - set (ZLIB_INCLUDE_DIRS ${ZLIB_INCLUDE_DIRS} ${ZLIB_INCLUDE_DIR}) + if (ZLIB_FOUND) + set (H5_HAVE_FILTER_DEFLATE 1) + set (H5_HAVE_ZLIB_H 1) + set (H5_HAVE_LIBZ 1) + set (H5_ZLIB_HEADER "zlib.h") + set (ZLIB_INCLUDE_DIR_GEN ${ZLIB_INCLUDE_DIR}) + set (ZLIB_INCLUDE_DIRS ${ZLIB_INCLUDE_DIRS} ${ZLIB_INCLUDE_DIR}) + set (LINK_COMP_LIBS ${LINK_COMP_LIBS} ${ZLIB_LIBRARIES}) + endif () else () - if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") + if (BUILD_ZLIB_WITH_FETCHCONTENT) + # Only tgz files available + ORIGINAL_ZLIB_LIBRARY ("TGZ") + set (H5_HAVE_FILTER_DEFLATE 1) + set (H5_HAVE_ZLIB_H 1) + set (H5_HAVE_LIBZ 1) + message (VERBOSE "HDF5_ZLIB is built from fetch content") + set (LINK_COMP_LIBS ${LINK_COMP_LIBS} ${ZLIB_STATIC_LIBRARY}) + elseif (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") EXTERNAL_ZLIB_LIBRARY (${HDF5_ALLOW_EXTERNAL_SUPPORT}) set (H5_HAVE_FILTER_DEFLATE 1) set (H5_HAVE_ZLIB_H 1) set (H5_HAVE_LIBZ 1) message (VERBOSE "Filter HDF5_ZLIB is built") + set (LINK_COMP_LIBS ${LINK_COMP_LIBS} ${ZLIB_STATIC_LIBRARY}) else () message (FATAL_ERROR " ZLib is Required for ZLib support in HDF5") endif () @@ -91,7 +130,6 @@ if (HDF5_ENABLE_Z_LIB_SUPPORT) if (H5_HAVE_FILTER_DEFLATE) set (EXTERNAL_FILTERS "${EXTERNAL_FILTERS} DEFLATE") endif () - set (LINK_COMP_LIBS ${LINK_COMP_LIBS} ${ZLIB_STATIC_LIBRARY}) INCLUDE_DIRECTORIES (${ZLIB_INCLUDE_DIRS}) message (VERBOSE "Filter HDF5_ZLIB is ON") endif () @@ -111,25 +149,36 @@ if (HDF5_ENABLE_SZIP_SUPPORT) set (LINK_COMP_LIBS ${LINK_COMP_LIBS} ${SZIP_LIBRARIES}) endif () endif () - if (NOT SZIP_FOUND) find_package (SZIP NAMES ${SZIP_PACKAGE_NAME}${HDF_PACKAGE_EXT} COMPONENTS static shared) if (NOT SZIP_FOUND) find_package (SZIP) # Legacy find - if (SZIP_FOUND) - set (LINK_COMP_LIBS ${LINK_COMP_LIBS} ${SZIP_LIBRARIES}) - endif () + endif () + if (SZIP_FOUND) + set (H5_HAVE_FILTER_SZIP 1) + set (H5_HAVE_SZLIB_H 1) + set (H5_HAVE_LIBSZ 1) + set (SZIP_INCLUDE_DIR_GEN ${SZIP_INCLUDE_DIR}) + set (SZIP_INCLUDE_DIRS ${SZIP_INCLUDE_DIRS} ${SZIP_INCLUDE_DIR}) + set (LINK_COMP_LIBS ${LINK_COMP_LIBS} ${SZIP_LIBRARIES}) endif () endif () - endif () - if (SZIP_FOUND) - set (H5_HAVE_FILTER_SZIP 1) - set (H5_HAVE_SZLIB_H 1) - set (H5_HAVE_LIBSZ 1) - set (SZIP_INCLUDE_DIR_GEN ${SZIP_INCLUDE_DIR}) - set (SZIP_INCLUDE_DIRS ${SZIP_INCLUDE_DIRS} ${SZIP_INCLUDE_DIR}) else () - if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") + if (BUILD_SZIP_WITH_FETCHCONTENT) + # Only tgz files available + ORIGINAL_SZIP_LIBRARY ("TGZ" ${HDF5_ENABLE_SZIP_ENCODING}) + set (H5_HAVE_FILTER_SZIP 1) + set (H5_HAVE_SZLIB_H 1) + set (H5_HAVE_LIBSZ 1) + message (VERBOSE "SZIP is built from fetch content") + if (USE_LIBAEC) + message (VERBOSE "... with library AEC") + set (SZIP_PACKAGE_NAME ${LIBAEC_PACKAGE_NAME}) + else () + set (SZIP_PACKAGE_NAME ${SZIP_PACKAGE_NAME}) + endif () + set (LINK_COMP_LIBS ${LINK_COMP_LIBS} ${SZIP_STATIC_LIBRARY}) + elseif (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") EXTERNAL_SZIP_LIBRARY (${HDF5_ALLOW_EXTERNAL_SUPPORT} ${HDF5_ENABLE_SZIP_ENCODING}) set (H5_HAVE_FILTER_SZIP 1) set (H5_HAVE_SZLIB_H 1) @@ -141,11 +190,11 @@ if (HDF5_ENABLE_SZIP_SUPPORT) else () set (SZIP_PACKAGE_NAME ${SZIP_PACKAGE_NAME}) endif () + set (LINK_COMP_LIBS ${LINK_COMP_LIBS} ${SZIP_STATIC_LIBRARY}) else () message (FATAL_ERROR "SZIP is Required for SZIP support in HDF5") endif () endif () - set (LINK_COMP_LIBS ${LINK_COMP_LIBS} ${SZIP_STATIC_LIBRARY}) INCLUDE_DIRECTORIES (${SZIP_INCLUDE_DIRS}) message (VERBOSE "Filter SZIP is ON") if (H5_HAVE_FILTER_SZIP) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1c85965fa05..46ef959e55a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -840,11 +840,11 @@ option (HDF5_PACKAGE_EXTLIBS "CPACK - include external libraries" OFF) if (NOT HDF5_EXTERNALLY_CONFIGURED) if (HDF5_PACKAGE_EXTLIBS) set (HDF5_NO_PACKAGES OFF CACHE BOOL "CPACK - Disable packaging" FORCE) - if (HDF5_ENABLE_Z_LIB_SUPPORT AND ZLIB_FOUND) + if (HDF5_ENABLE_Z_LIB_SUPPORT AND ZLIB_FOUND AND NOT BUILD_ZLIB_WITH_FETCHCONTENT) PACKAGE_ZLIB_LIBRARY (${HDF5_ALLOW_EXTERNAL_SUPPORT}) endif () - if (HDF5_ENABLE_SZIP_SUPPORT AND SZIP_FOUND) + if (HDF5_ENABLE_SZIP_SUPPORT AND SZIP_FOUND AND NOT BUILD_SZIP_WITH_FETCHCONTENT) PACKAGE_SZIP_LIBRARY (${HDF5_ALLOW_EXTERNAL_SUPPORT}) endif () endif () @@ -910,20 +910,12 @@ endif () add_subdirectory (src) if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") - if (ZLIB_FOUND AND ZLIB_USE_EXTERNAL) + if ((ZLIB_FOUND AND ZLIB_USE_EXTERNAL) OR (SZIP_FOUND AND SZIP_USE_EXTERNAL)) if (NOT ONLY_SHARED_LIBS) - add_dependencies (${HDF5_LIB_TARGET} HDF5_ZLIB) + add_dependencies (${HDF5_LIB_TARGET} ${LINK_COMP_LIBS}) endif () if (BUILD_SHARED_LIBS) - add_dependencies (${HDF5_LIBSH_TARGET} HDF5_ZLIB) - endif () - endif () - if (SZIP_FOUND AND SZIP_USE_EXTERNAL) - if (NOT ONLY_SHARED_LIBS) - add_dependencies (${HDF5_LIB_TARGET} SZIP) - endif () - if (BUILD_SHARED_LIBS) - add_dependencies (${HDF5_LIBSH_TARGET} SZIP) + add_dependencies (${HDF5_LIBSH_TARGET} ${LINK_COMP_LIBS}) endif () endif () endif () diff --git a/config/cmake/HDFLibMacros.cmake b/config/cmake/HDFLibMacros.cmake index 51e16c169f0..d2f2660d0a2 100644 --- a/config/cmake/HDFLibMacros.cmake +++ b/config/cmake/HDFLibMacros.cmake @@ -10,90 +10,77 @@ # help@hdfgroup.org. # #------------------------------------------------------------------------------- -macro (EXTERNAL_JPEG_LIBRARY compress_type jpeg_pic) - # May need to build JPEG with PIC on x64 machines with gcc - # Need to use CMAKE_ANSI_CFLAGS define so that compiler test works - +macro (ORIGINAL_ZLIB_LIBRARY compress_type) if (${compress_type} MATCHES "GIT") - EXTERNALPROJECT_ADD (JPEG - GIT_REPOSITORY ${JPEG_URL} - GIT_TAG ${JPEG_BRANCH} - INSTALL_COMMAND "" - CMAKE_ARGS - -DBUILD_SHARED_LIBS:BOOL=OFF - -DJPEG_PACKAGE_EXT:STRING=${HDF_PACKAGE_EXT} - -DJPEG_EXTERNALLY_CONFIGURED:BOOL=OFF - -DCMAKE_BUILD_TYPE:STRING=${CMAKE_BUILD_TYPE} - -DCMAKE_DEBUG_POSTFIX:STRING=${CMAKE_DEBUG_POSTFIX} - -DCMAKE_INSTALL_PREFIX:PATH=${CMAKE_INSTALL_PREFIX} - -DCMAKE_RUNTIME_OUTPUT_DIRECTORY:PATH=${CMAKE_RUNTIME_OUTPUT_DIRECTORY} - -DCMAKE_LIBRARY_OUTPUT_DIRECTORY:PATH=${CMAKE_LIBRARY_OUTPUT_DIRECTORY} - -DCMAKE_ARCHIVE_OUTPUT_DIRECTORY:PATH=${CMAKE_ARCHIVE_OUTPUT_DIRECTORY} - -DCMAKE_PDB_OUTPUT_DIRECTORY:PATH=${CMAKE_PDB_OUTPUT_DIRECTORY} - -DCMAKE_ANSI_CFLAGS:STRING=${jpeg_pic} - -DHDF_USE_GNU_DIRS:STRING=${HDF5_USE_GNU_DIRS} - -DCMAKE_OSX_ARCHITECTURES:STRING=${CMAKE_OSX_ARCHITECTURES} - -DCMAKE_TOOLCHAIN_FILE:STRING=${CMAKE_TOOLCHAIN_FILE} - -DPACKAGE_NAMESPACE=${HDF_PACKAGE_NAMESPACE} + FetchContent_Declare (HDF5_ZLIB + GIT_REPOSITORY ${ZLIB_URL} + GIT_TAG ${ZLIB_BRANCH} ) elseif (${compress_type} MATCHES "TGZ") - EXTERNALPROJECT_ADD (JPEG - URL ${JPEG_URL} - URL_MD5 "" - INSTALL_COMMAND "" - CMAKE_ARGS - -DBUILD_SHARED_LIBS:BOOL=OFF - -DJPEG_PACKAGE_EXT:STRING=${HDF_PACKAGE_EXT} - -DJPEG_EXTERNALLY_CONFIGURED:BOOL=OFF - -DCMAKE_BUILD_TYPE:STRING=${CMAKE_BUILD_TYPE} - -DCMAKE_DEBUG_POSTFIX:STRING=${CMAKE_DEBUG_POSTFIX} - -DCMAKE_INSTALL_PREFIX:PATH=${CMAKE_INSTALL_PREFIX} - -DCMAKE_RUNTIME_OUTPUT_DIRECTORY:PATH=${CMAKE_RUNTIME_OUTPUT_DIRECTORY} - -DCMAKE_LIBRARY_OUTPUT_DIRECTORY:PATH=${CMAKE_LIBRARY_OUTPUT_DIRECTORY} - -DCMAKE_ARCHIVE_OUTPUT_DIRECTORY:PATH=${CMAKE_ARCHIVE_OUTPUT_DIRECTORY} - -DCMAKE_PDB_OUTPUT_DIRECTORY:PATH=${CMAKE_PDB_OUTPUT_DIRECTORY} - -DCMAKE_ANSI_CFLAGS:STRING=${jpeg_pic} - -DHDF_USE_GNU_DIRS:STRING=${HDF5_USE_GNU_DIRS} - -DCMAKE_OSX_ARCHITECTURES:STRING=${CMAKE_OSX_ARCHITECTURES} - -DCMAKE_TOOLCHAIN_FILE:STRING=${CMAKE_TOOLCHAIN_FILE} - -DPACKAGE_NAMESPACE=${HDF_PACKAGE_NAMESPACE} + FetchContent_Declare (HDF5_ZLIB + URL ${ZLIB_URL} + URL_HASH "" ) endif () - externalproject_get_property (JPEG BINARY_DIR SOURCE_DIR) + FetchContent_GetProperties(HDF5_ZLIB) + if(NOT zlib_POPULATED) + FetchContent_Populate(HDF5_ZLIB) + + # Copy an additional/replacement files into the populated source + file(COPY ${HDF_RESOURCES_DIR}/ZLIB/CMakeLists.txt DESTINATION ${hdf5_zlib_SOURCE_DIR}) + + add_subdirectory(${hdf5_zlib_SOURCE_DIR} ${hdf5_zlib_BINARY_DIR}) + endif() -##include (${BINARY_DIR}/${JPEG_PACKAGE_NAME}${HDF_PACKAGE_EXT}-targets.cmake) -# Create imported target jpeg-static - add_library(${HDF_PACKAGE_NAMESPACE}jpeg-static STATIC IMPORTED) - HDF_IMPORT_SET_LIB_OPTIONS (${HDF_PACKAGE_NAMESPACE}jpeg-static "jpeg" STATIC "") - add_dependencies (${HDF_PACKAGE_NAMESPACE}jpeg-static JPEG) - set (JPEG_STATIC_LIBRARY "${HDF_PACKAGE_NAMESPACE}jpeg-static") - set (JPEG_LIBRARIES ${JPEG_STATIC_LIBRARY}) + add_library(${HDF_PACKAGE_NAMESPACE}zlib-static ALIAS zlib-static) + set (ZLIB_STATIC_LIBRARY "${HDF_PACKAGE_NAMESPACE}zlib-static") + set (ZLIB_LIBRARIES ${ZLIB_STATIC_LIBRARY}) - set (JPEG_INCLUDE_DIR_GEN "${BINARY_DIR}") - set (JPEG_INCLUDE_DIR "${SOURCE_DIR}/src") - set (JPEG_FOUND 1) - set (JPEG_INCLUDE_DIRS ${JPEG_INCLUDE_DIR_GEN} ${JPEG_INCLUDE_DIR}) + set (ZLIB_INCLUDE_DIR_GEN "${hdf5_zlib_BINARY_DIR}") + set (ZLIB_INCLUDE_DIR "${hdf5_zlib_SOURCE_DIR}") + set (ZLIB_FOUND 1) + set (ZLIB_INCLUDE_DIRS ${ZLIB_INCLUDE_DIR_GEN} ${ZLIB_INCLUDE_DIR}) endmacro () #------------------------------------------------------------------------------- -macro (PACKAGE_JPEG_LIBRARY compress_type) - add_custom_target (JPEG-GenHeader-Copy ALL - COMMAND ${CMAKE_COMMAND} -E copy_if_different ${JPEG_INCLUDE_DIR_GEN}/jconfig.h ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ - COMMENT "Copying ${JPEG_INCLUDE_DIR_GEN}/jconfig.h to ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/" - ) - set (EXTERNAL_HEADER_LIST ${EXTERNAL_HEADER_LIST} ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/jconfig.h) - if (${compress_type} MATCHES "GIT" OR ${compress_type} MATCHES "TGZ") - add_dependencies (JPEG-GenHeader-Copy JPEG) +macro (ORIGINAL_SZIP_LIBRARY compress_type encoding) + # Only libaec library is usable + if (${compress_type} MATCHES "GIT") + FetchContent_Declare (SZIP + GIT_REPOSITORY ${SZIP_URL} + GIT_TAG ${SZIP_BRANCH} + ) + elseif (${compress_type} MATCHES "TGZ") + FetchContent_Declare (SZIP + URL ${SZIP_URL} + URL_HASH "" + ) endif () + FetchContent_GetProperties(SZIP) + if(NOT szip_POPULATED) + FetchContent_Populate(SZIP) + + # Copy an additional/replacement files into the populated source + file(COPY ${HDF_RESOURCES_DIR}/LIBAEC/CMakeLists.txt DESTINATION ${szip_SOURCE_DIR}) + + add_subdirectory(${szip_SOURCE_DIR} ${szip_BINARY_DIR}) + endif() + + set (USE_LIBAEC ON CACHE BOOL "Use libaec szip replacement" FORCE) + add_library (${HDF_PACKAGE_NAMESPACE}szaec-static ALIAS szaec-static) + add_library (${HDF_PACKAGE_NAMESPACE}aec-static ALIAS aec-static) + set (SZIP_STATIC_LIBRARY "${HDF_PACKAGE_NAMESPACE}szaec-static;${HDF_PACKAGE_NAMESPACE}aec-static") + set (SZIP_LIBRARIES ${SZIP_STATIC_LIBRARY}) + + set (SZIP_INCLUDE_DIR_GEN "${szip_BINARY_DIR}") + set (SZIP_INCLUDE_DIR "${szip_SOURCE_DIR}/include") + set (SZIP_FOUND 1) + set (SZIP_INCLUDE_DIRS ${SZIP_INCLUDE_DIR_GEN} ${SZIP_INCLUDE_DIR}) endmacro () #------------------------------------------------------------------------------- macro (EXTERNAL_SZIP_LIBRARY compress_type encoding) if (${compress_type} MATCHES "GIT") -# FetchContent_Declare (SZIP -# GIT_REPOSITORY ${SZIP_URL} -# GIT_TAG ${SZIP_BRANCH} -# ) EXTERNALPROJECT_ADD (SZIP GIT_REPOSITORY ${SZIP_URL} GIT_TAG ${SZIP_BRANCH} @@ -109,7 +96,6 @@ macro (EXTERNAL_SZIP_LIBRARY compress_type encoding) -DCMAKE_LIBRARY_OUTPUT_DIRECTORY:PATH=${CMAKE_LIBRARY_OUTPUT_DIRECTORY} -DCMAKE_ARCHIVE_OUTPUT_DIRECTORY:PATH=${CMAKE_ARCHIVE_OUTPUT_DIRECTORY} -DCMAKE_PDB_OUTPUT_DIRECTORY:PATH=${CMAKE_PDB_OUTPUT_DIRECTORY} - -DCMAKE_ANSI_CFLAGS:STRING=${CMAKE_ANSI_CFLAGS} -DSZIP_ENABLE_ENCODING:BOOL=${encoding} -DHDF_USE_GNU_DIRS:STRING=${HDF5_USE_GNU_DIRS} -DCMAKE_OSX_ARCHITECTURES:STRING=${CMAKE_OSX_ARCHITECTURES} @@ -117,10 +103,6 @@ macro (EXTERNAL_SZIP_LIBRARY compress_type encoding) -DPACKAGE_NAMESPACE=${HDF_PACKAGE_NAMESPACE} ) elseif (${compress_type} MATCHES "TGZ") -# FetchContent_Declare (SZIP -# URL ${SZIP_URL} -# URL_HASH "" -# ) EXTERNALPROJECT_ADD (SZIP URL ${SZIP_URL} URL_MD5 "" @@ -136,7 +118,6 @@ macro (EXTERNAL_SZIP_LIBRARY compress_type encoding) -DCMAKE_LIBRARY_OUTPUT_DIRECTORY:PATH=${CMAKE_LIBRARY_OUTPUT_DIRECTORY} -DCMAKE_ARCHIVE_OUTPUT_DIRECTORY:PATH=${CMAKE_ARCHIVE_OUTPUT_DIRECTORY} -DCMAKE_PDB_OUTPUT_DIRECTORY:PATH=${CMAKE_PDB_OUTPUT_DIRECTORY} - -DCMAKE_ANSI_CFLAGS:STRING=${CMAKE_ANSI_CFLAGS} -DSZIP_ENABLE_ENCODING:BOOL=${encoding} -DHDF_USE_GNU_DIRS:STRING=${HDF5_USE_GNU_DIRS} -DCMAKE_OSX_ARCHITECTURES:STRING=${CMAKE_OSX_ARCHITECTURES} @@ -145,11 +126,6 @@ macro (EXTERNAL_SZIP_LIBRARY compress_type encoding) ) endif () externalproject_get_property (SZIP BINARY_DIR SOURCE_DIR) -# FetchContent_GetProperties(SZIP) -# if(NOT SZIP_POPULATED) -# FetchContent_Populate(SZIP) -# add_subdirectory(${szip_SOURCE_DIR} ${szip_BINARY_DIR}) -# endif() # ##include (${BINARY_DIR}/${SZIP_PACKAGE_NAME}${HDF_PACKAGE_EXT}-targets.cmake) # Create imported target szip-static @@ -215,7 +191,6 @@ macro (EXTERNAL_ZLIB_LIBRARY compress_type) -DCMAKE_LIBRARY_OUTPUT_DIRECTORY:PATH=${CMAKE_LIBRARY_OUTPUT_DIRECTORY} -DCMAKE_ARCHIVE_OUTPUT_DIRECTORY:PATH=${CMAKE_ARCHIVE_OUTPUT_DIRECTORY} -DCMAKE_PDB_OUTPUT_DIRECTORY:PATH=${CMAKE_PDB_OUTPUT_DIRECTORY} - -DCMAKE_ANSI_CFLAGS:STRING=${CMAKE_ANSI_CFLAGS} -DHDF_USE_GNU_DIRS:STRING=${HDF5_USE_GNU_DIRS} -DCMAKE_OSX_ARCHITECTURES:STRING=${CMAKE_OSX_ARCHITECTURES} -DCMAKE_TOOLCHAIN_FILE:STRING=${CMAKE_TOOLCHAIN_FILE} @@ -237,7 +212,6 @@ macro (EXTERNAL_ZLIB_LIBRARY compress_type) -DCMAKE_LIBRARY_OUTPUT_DIRECTORY:PATH=${CMAKE_LIBRARY_OUTPUT_DIRECTORY} -DCMAKE_ARCHIVE_OUTPUT_DIRECTORY:PATH=${CMAKE_ARCHIVE_OUTPUT_DIRECTORY} -DCMAKE_PDB_OUTPUT_DIRECTORY:PATH=${CMAKE_PDB_OUTPUT_DIRECTORY} - -DCMAKE_ANSI_CFLAGS:STRING=${CMAKE_ANSI_CFLAGS} -DHDF_USE_GNU_DIRS:STRING=${HDF5_USE_GNU_DIRS} -DCMAKE_OSX_ARCHITECTURES:STRING=${CMAKE_OSX_ARCHITECTURES} -DCMAKE_TOOLCHAIN_FILE:STRING=${CMAKE_TOOLCHAIN_FILE} @@ -252,7 +226,6 @@ macro (EXTERNAL_ZLIB_LIBRARY compress_type) ##include (${BINARY_DIR}/${ZLIB_PACKAGE_NAME}${HDF_PACKAGE_EXT}-targets.cmake) # Create imported target zlib-static add_library(${HDF_PACKAGE_NAMESPACE}zlib-static STATIC IMPORTED) -# add_library(${HDF_PACKAGE_NAMESPACE}zlib-static ALIAS zlib-static) HDF_IMPORT_SET_LIB_OPTIONS (${HDF_PACKAGE_NAMESPACE}zlib-static ${ZLIB_LIB_NAME} STATIC "") add_dependencies (${HDF_PACKAGE_NAMESPACE}zlib-static HDF5_ZLIB) set (ZLIB_STATIC_LIBRARY "${HDF_PACKAGE_NAMESPACE}zlib-static") diff --git a/config/cmake/HDFMacros.cmake b/config/cmake/HDFMacros.cmake index 1af513b479f..fc04d176cd8 100644 --- a/config/cmake/HDFMacros.cmake +++ b/config/cmake/HDFMacros.cmake @@ -479,7 +479,10 @@ macro (HDF_DIR_PATHS package_prefix) CACHE PATH "Install path prefix, prepended onto install directories." FORCE) set (CMAKE_GENERIC_PROGRAM_FILES) endif () + set (CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT 0 CACHE PATH "" FORCE) endif () + + include (FetchContent) endmacro () macro (ADD_H5_FLAGS h5_flag_var infile) diff --git a/config/cmake/LIBAEC/CMakeLists.txt b/config/cmake/LIBAEC/CMakeLists.txt new file mode 100644 index 00000000000..212c9bfe886 --- /dev/null +++ b/config/cmake/LIBAEC/CMakeLists.txt @@ -0,0 +1,519 @@ +cmake_minimum_required (VERSION 3.10) +PROJECT (LIBAEC C) + +#----------------------------------------------------------------------------- +# Basic LIBAEC stuff here +#----------------------------------------------------------------------------- +set(CMAKE_C_STANDARD 99) + +set (LIBAEC_PACKAGE_EXT ${HDF_PACKAGE_EXT}) +set (HDF_USE_GNU_DIRS ${HDF5_USE_GNU_DIRS}) +set (CMAKE_OSX_ARCHITECTURES ${CMAKE_OSX_ARCHITECTURES}) +set (CMAKE_TOOLCHAIN_FILE ${CMAKE_TOOLCHAIN_FILE}) +set (PACKAGE_NAMESPACE ${HDF_PACKAGE_NAMESPACE}) +if (MINGW) + set (WINDOWS 1) # MinGW tries to imitate Windows +endif () +if (WINDOWS) + set (HAVE_SYS_STAT_H 1) + set (HAVE_SYS_TYPES_H 1) +endif () + +if (NOT WINDOWS) + TEST_BIG_ENDIAN (WORDS_BIGENDIAN) +endif () + +# Check for __builtin_clzll for faster decoding +check_c_source_compiles( + "int main(void)\n{return __builtin_clzll(1LL);}" + HAVE_DECL___BUILTIN_CLZLL) + +if(NOT HAVE_DECL___BUILTIN_CLZLL) + # With MSVC we can use _BitScanReverse64 + check_c_source_compiles( + "int main(void){unsigned long foo; unsigned __int64 bar=1LL; +return _BitScanReverse64(&foo, bar);}" + HAVE_BSR64) +endif() + +#----------------------------------------------------------------------------- +# Define some CMake variables for use later in the project +#----------------------------------------------------------------------------- +set (LIBAEC_RESOURCES_DIR ${HDF_RESOURCES_DIR}/LIBAEC) +set (LIBAEC_SRC_DIR ${LIBAEC_SOURCE_DIR}/src) +set (LIBAEC_INC_DIR ${LIBAEC_SOURCE_DIR}/include) + +#----------------------------------------------------------------------------- +# Set the core names of all the libraries +#----------------------------------------------------------------------------- +set (LIBAEC_LIB_CORENAME "aec") +set (SZIP_LIB_CORENAME "szaec") + +#----------------------------------------------------------------------------- +# Set the true names of all the libraries if customized by external project +#----------------------------------------------------------------------------- +set (LIBAEC_LIB_NAME "${LIBAEC_EXTERNAL_LIB_PREFIX}${LIBAEC_LIB_CORENAME}") +set (SZIP_LIB_NAME "${LIBAEC_EXTERNAL_LIB_PREFIX}${SZIP_LIB_CORENAME}") + +#----------------------------------------------------------------------------- +# Set the target names of all the libraries +#----------------------------------------------------------------------------- +set (LIBAEC_LIB_TARGET "${LIBAEC_LIB_CORENAME}-static") +set (SZIP_LIB_TARGET "${SZIP_LIB_CORENAME}-static") + +set (libaec_VERS_MAJOR 1) +set (libaec_VERS_MINOR 0) +set (libaec_VERS_RELEASE 6) + +#----------------------------------------------------------------------------- +set (LIBAEC_PACKAGE "libaec") +set (LIBAEC_PACKAGE_NAME "LIBAEC") +set (LIBAEC_PACKAGE_VERSION "${libaec_VERS_MAJOR}.${libaec_VERS_MINOR}") +set (LIBAEC_PACKAGE_VERSION_MAJOR "${libaec_VERS_MAJOR}.${libaec_VERS_MINOR}") +set (LIBAEC_PACKAGE_VERSION_MINOR "${libaec_VERS_RELEASE}") +set (LIBAEC_PACKAGE_STRING "${LIBAEC_PACKAGE_NAME} ${LIBAEC_PACKAGE_VERSION}") +set (LIBAEC_PACKAGE_TARNAME "${LIBAEC_PACKAGE_NAME}${LIBAEC_PACKAGE_EXT}") +set (LIBAEC_PACKAGE_URL "http://www.hdfgroup.org") +set (LIBAEC_PACKAGE_BUGREPORT "help@hdfgroup.org") +set (LIBAEC_PACKAGE_SOVERSION "${libaec_VERS_MAJOR}.${libaec_VERS_MINOR}.${libaec_VERS_RELEASE}") +set (LIBAEC_PACKAGE_SOVERSION_MAJOR "${libaec_VERS_MAJOR}") + + +HDF_DIR_PATHS(${LIBAEC_PACKAGE_NAME}) + +#----------------------------------------------------------------------------- +# Targets built within this project are exported at Install time for use +# by other projects +#----------------------------------------------------------------------------- +if (NOT LIBAEC_EXPORTED_TARGETS) + set (LIBAEC_EXPORTED_TARGETS "libaec-targets") +endif () + +#----------------------------------------------------------------------------- +# To include a library in the list exported by the project AT BUILD TIME, +# add it to this variable. This is NOT used by Make Install, but for projects +# which include SZIP as a sub-project within their build tree +#----------------------------------------------------------------------------- +set_global_variable (LIBAEC_LIBRARIES_TO_EXPORT "") + +#----------------------------------------------------------------------------- +# Mac OS X Options +#----------------------------------------------------------------------------- +if (LIBAEC_BUILD_FRAMEWORKS AND NOT BUILD_SHARED_LIBS) + set (BUILD_SHARED_LIBS ON CACHE BOOL "Build Shared Libraries") +endif () + +set (CMAKE_POSITION_INDEPENDENT_CODE ON) + +#----------------------------------------------------------------------------- +# When building utility executables that generate other (source) files : +# we make use of the following variables defined in the root CMakeLists. +# Certain systems may add /Debug or /Release to output paths +# and we need to call the executable from inside the CMake configuration +#----------------------------------------------------------------------------- +set (EXE_EXT "") +if (WIN32) + set (EXE_EXT ".exe") + add_definitions (-D_BIND_TO_CURRENT_VCLIBS_VERSION=1) + add_definitions (-D_CRT_SECURE_NO_WARNINGS) + add_definitions (-D_CONSOLE) +endif () + +if (MSVC) + set (CMAKE_MFC_FLAG 0) +endif () + +set (MAKE_SYSTEM) +if (CMAKE_BUILD_TOOL MATCHES "make") + set (MAKE_SYSTEM 1) +endif () + +set (CFG_INIT "/${CMAKE_CFG_INTDIR}") +if (MAKE_SYSTEM) + set (CFG_INIT "") +endif () + +#----------------------------------------------------------------------------- +# Compiler specific flags : Shouldn't there be compiler tests for these +#----------------------------------------------------------------------------- +if (CMAKE_C_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") + set (CMAKE_C_FLAGS "${CMAKE_ANSI_CFLAGS} ${CMAKE_C_FLAGS} -Wno-deprecated-non-prototype") +endif () + +#----------------------------------------------------------------------------- +# This is in here to help some of the GCC based IDES like Eclipse +# and code blocks parse the compiler errors and warnings better. +#----------------------------------------------------------------------------- +if (CMAKE_COMPILER_IS_GNUCC) + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fmessage-length=0") +endif () + +#----------------------------------------------------------------------------- +# Generate the aec_config.h file containing user settings needed by compilation +#----------------------------------------------------------------------------- +configure_file (${LIBAEC_RESOURCES_DIR}/config.h.in ${CMAKE_CURRENT_BINARY_DIR}/config.h) + +#----------------------------------------------------------------------------- +# All libs/tests/examples need the main include directories +#----------------------------------------------------------------------------- +INCLUDE_DIRECTORIES (${LIBAEC_BINARY_DIR} ${LIBAEC_SOURCE_DIR}/src ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) + +#----------------------------------------------------------------------------- +# Define LIBAEC Library +#----------------------------------------------------------------------------- +set(LIBAEC_SRCS + ${LIBAEC_SRC_DIR}/encode.c + ${LIBAEC_SRC_DIR}/encode_accessors.c + ${LIBAEC_SRC_DIR}/decode.c +) + +set (LIBAEC_PUBLIC_HEADERS + ${LIBAEC_INC_DIR}/libaec.h +) + +add_library (${LIBAEC_LIB_TARGET} STATIC ${LIBAEC_SRCS} ${LIBAEC_PUBLIC_HEADERS}) +target_include_directories (${LIBAEC_LIB_TARGET} PUBLIC + "$" + "$" + "$" + "$") +TARGET_C_PROPERTIES (${LIBAEC_LIB_TARGET} STATIC) +target_link_libraries (${LIBAEC_LIB_TARGET} PRIVATE ${LINK_LIBS}) +H5_SET_LIB_OPTIONS (${LIBAEC_LIB_TARGET} ${LIBAEC_LIB_NAME} STATIC 0) +set_target_properties (${LIBAEC_LIB_TARGET} PROPERTIES + VERSION 0.0.12 SOVERSION 0 + PUBLIC_HEADER "${LIBAEC_PUBLIC_HEADERS}" + LINKER_LANGUAGE C + INTERFACE_INCLUDE_DIRECTORIES "$/include>" +) +set_global_variable (LIBAEC_LIBRARIES_TO_EXPORT ${LIBAEC_LIB_TARGET}) +set (install_targets ${LIBAEC_LIB_TARGET}) + +set(SZIP_SRCS + ${LIBAEC_SRC_DIR}/sz_compat.c +) + +set (SZIP_PUBLIC_HEADERS + ${LIBAEC_INC_DIR}/szlib.h +) + +add_library (${SZIP_LIB_TARGET} STATIC ${SZIP_SRCS} ${SZIP_PUBLIC_HEADERS}) +target_include_directories (${SZIP_LIB_TARGET} PUBLIC "${LIBAEC_SOURCE_DIR};${LIBAEC_SOURCE_DIR}/include;${CMAKE_BINARY_DIR}") +TARGET_C_PROPERTIES (${SZIP_LIB_TARGET} STATIC) +target_link_libraries (${SZIP_LIB_TARGET} PRIVATE ${LIBAEC_LIB_TARGET}) +H5_SET_LIB_OPTIONS (${SZIP_LIB_TARGET} ${SZIP_LIB_NAME} STATIC 0) +set_target_properties (${SZIP_LIB_TARGET} PROPERTIES + VERSION 2.0.1 SOVERSION 2 + PUBLIC_HEADER "${SZIP_PUBLIC_HEADERS}" + LINKER_LANGUAGE C + INTERFACE_INCLUDE_DIRECTORIES "$/include>" +) +set_global_variable (LIBAEC_LIBRARIES_TO_EXPORT "${LIBAEC_LIBRARIES_TO_EXPORT};${SZIP_LIB_TARGET}") +set (install_targets ${install_targets} ${SZIP_LIB_TARGET}) + +#----------------------------------------------------------------------------- +# Add Target(s) to CMake Install for import into other projects +#----------------------------------------------------------------------------- +if (LIBAEC_EXPORTED_TARGETS) + INSTALL_TARGET_PDB (${LIBAEC_LIB_TARGET} ${LIBAEC_INSTALL_BIN_DIR} libraries) + + install ( + TARGETS + ${install_targets} + EXPORT + ${LIBAEC_EXPORTED_TARGETS} + LIBRARY DESTINATION ${LIBAEC_INSTALL_LIB_DIR} COMPONENT libraries + ARCHIVE DESTINATION ${LIBAEC_INSTALL_LIB_DIR} COMPONENT libraries + RUNTIME DESTINATION ${LIBAEC_INSTALL_BIN_DIR} COMPONENT libraries + FRAMEWORK DESTINATION ${LIBAEC_INSTALL_FWRK_DIR} COMPONENT libraries + PUBLIC_HEADER DESTINATION ${LIBAEC_INSTALL_INCLUDE_DIR} COMPONENT headers + ) +endif () + +include (CMakePackageConfigHelpers) + +#----------------------------------------------------------------------------- +# Check for Installation Utilities +#----------------------------------------------------------------------------- +if (WIN32) + set (PF_ENV_EXT "(x86)") + find_program (NSIS_EXECUTABLE NSIS.exe PATHS "$ENV{ProgramFiles}\\NSIS" "$ENV{ProgramFiles${PF_ENV_EXT}}\\NSIS") + if(NOT CPACK_WIX_ROOT) + file(TO_CMAKE_PATH "$ENV{WIX}" CPACK_WIX_ROOT) + endif () + find_program (WIX_EXECUTABLE candle PATHS "${CPACK_WIX_ROOT}/bin") +endif () + +#----------------------------------------------------------------------------- +# Add file(s) to CMake Install +#----------------------------------------------------------------------------- +#if (NOT LIBAEC_INSTALL_NO_DEVELOPMENT) +# install ( +# FILES ${PROJECT_BINARY_DIR}/aec_config.h +# DESTINATION ${LIBAEC_INSTALL_INCLUDE_DIR} +# COMPONENT headers +# ) +#endif () + +#----------------------------------------------------------------------------- +# Add Target(s) to CMake Install for import into other projects +#----------------------------------------------------------------------------- +if (NOT LIBAEC_EXTERNALLY_CONFIGURED) + install ( + EXPORT ${LIBAEC_EXPORTED_TARGETS} + DESTINATION ${LIBAEC_INSTALL_CMAKE_DIR} + FILE ${LIBAEC_PACKAGE}${LIBAEC_PACKAGE_EXT}-targets.cmake + NAMESPACE ${PACKAGE_NAMESPACE} + COMPONENT configinstall + ) +endif () + +#----------------------------------------------------------------------------- +# Export all exported targets to the build tree for use by parent project +#----------------------------------------------------------------------------- +if (NOT LIBAEC_EXTERNALLY_CONFIGURED) + export ( + TARGETS ${LIBAEC_LIBRARIES_TO_EXPORT} ${LIBAEC_LIB_DEPENDENCIES} + FILE ${LIBAEC_PACKAGE}${LIBAEC_PACKAGE_EXT}-targets.cmake + NAMESPACE ${PACKAGE_NAMESPACE} + ) + export (PACKAGE ${LIBAEC_PACKAGE}${LIBAEC_PACKAGE_EXT}) +endif () + +#----------------------------------------------------------------------------- +# Set includes needed for build +#----------------------------------------------------------------------------- +set (LIBAEC_INCLUDES_BUILD_TIME + ${LIBAEC_SRC_DIR} ${LIBAEC_INC_DIR} ${LIBAEC_BINARY_DIR} +) + +#----------------------------------------------------------------------------- +# Set variables needed for installation +#----------------------------------------------------------------------------- +set (LIBAEC_VERSION_STRING ${LIBAEC_PACKAGE_VERSION}) +set (LIBAEC_VERSION_MAJOR ${LIBAEC_PACKAGE_VERSION_MAJOR}) +set (LIBAEC_VERSION_MINOR ${LIBAEC_PACKAGE_VERSION_MINOR}) + +#----------------------------------------------------------------------------- +# Configure the libaec-config.cmake file for the build directory +#----------------------------------------------------------------------------- +set (INCLUDE_INSTALL_DIR ${LIBAEC_INSTALL_INCLUDE_DIR}) +set (SHARE_INSTALL_DIR "${CMAKE_CURRENT_BINARY_DIR}/${LIBAEC_INSTALL_CMAKE_DIR}" ) +set (CURRENT_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}" ) +configure_package_config_file ( + ${LIBAEC_RESOURCES_DIR}/libaec-config.cmake.in + "${LIBAEC_BINARY_DIR}/${LIBAEC_PACKAGE}${LIBAEC_PACKAGE_EXT}-config.cmake" + INSTALL_DESTINATION "${LIBAEC_INSTALL_CMAKE_DIR}" + PATH_VARS INCLUDE_INSTALL_DIR SHARE_INSTALL_DIR CURRENT_BUILD_DIR + INSTALL_PREFIX "${CMAKE_CURRENT_BINARY_DIR}" +) + +#----------------------------------------------------------------------------- +# Configure the libaec-config.cmake file for the install directory +#----------------------------------------------------------------------------- +set (INCLUDE_INSTALL_DIR ${LIBAEC_INSTALL_INCLUDE_DIR}) +set (SHARE_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/${LIBAEC_INSTALL_CMAKE_DIR}" ) +set (CURRENT_BUILD_DIR "${CMAKE_INSTALL_PREFIX}") +configure_package_config_file ( + ${LIBAEC_RESOURCES_DIR}/libaec-config.cmake.in + "${LIBAEC_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/${LIBAEC_PACKAGE}${LIBAEC_PACKAGE_EXT}-config.cmake" + INSTALL_DESTINATION "${LIBAEC_INSTALL_CMAKE_DIR}" + PATH_VARS INCLUDE_INSTALL_DIR SHARE_INSTALL_DIR CURRENT_BUILD_DIR +) +if (NOT LIBAEC_EXTERNALLY_CONFIGURED) + install ( + FILES ${LIBAEC_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/${LIBAEC_PACKAGE}${LIBAEC_PACKAGE_EXT}-config.cmake + DESTINATION ${LIBAEC_INSTALL_CMAKE_DIR} + COMPONENT configinstall + ) +endif () + +#----------------------------------------------------------------------------- +# Configure the libaec-config-version.cmake file for the install directory +#----------------------------------------------------------------------------- +if (NOT LIBAEC_EXTERNALLY_CONFIGURED) + configure_file ( + ${LIBAEC_RESOURCES_DIR}/libaec-config-version.cmake.in + ${LIBAEC_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/${LIBAEC_PACKAGE}${LIBAEC_PACKAGE_EXT}-config-version.cmake @ONLY + ) + install ( + FILES ${LIBAEC_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/${LIBAEC_PACKAGE}${LIBAEC_PACKAGE_EXT}-config-version.cmake + DESTINATION ${LIBAEC_INSTALL_CMAKE_DIR} + COMPONENT configinstall + ) +endif () + +#----------------------------------------------------------------------------- +# Add Document File(s) to CMake Install +#----------------------------------------------------------------------------- +if (NOT LIBAEC_EXTERNALLY_CONFIGURED) + install ( + FILES + ${LIBAEC_SOURCE_DIR}/README.md + ${LIBAEC_SOURCE_DIR}/INSTALL.md + ${LIBAEC_SOURCE_DIR}/CHANGELOG.md + DESTINATION ${LIBAEC_INSTALL_DATA_DIR} + COMPONENT documents + ) +endif () + +#----------------------------------------------------------------------------- +# Check for Installation Utilities +#----------------------------------------------------------------------------- +if (WIN32) + set (PF_ENV_EXT "(x86)") + find_program (NSIS_EXECUTABLE NSIS.exe PATHS "$ENV{ProgramFiles}\\NSIS" "$ENV{ProgramFiles${PF_ENV_EXT}}\\NSIS") + if(NOT CPACK_WIX_ROOT) + file(TO_CMAKE_PATH "$ENV{WIX}" CPACK_WIX_ROOT) + endif() + find_program (WIX_EXECUTABLE candle PATHS "${CPACK_WIX_ROOT}/bin") +endif () + +#----------------------------------------------------------------------------- +# Set the cpack variables +#----------------------------------------------------------------------------- +if (NOT LIBAEC_EXTERNALLY_CONFIGURED) + set (CPACK_PACKAGE_VENDOR "HDF_Group") + set (CPACK_PACKAGE_NAME "${LIBAEC_PACKAGE_NAME}") + if (CDASH_LOCAL) + set (CPACK_PACKAGE_VERSION "${LIBAEC_PACKAGE_VERSION}") + else () + set (CPACK_PACKAGE_VERSION "${LIBAEC_PACKAGE_VERSION_STRING}") + endif () + set (CPACK_PACKAGE_VERSION_MAJOR "${LIBAEC_PACKAGE_VERSION_MAJOR}") + set (CPACK_PACKAGE_VERSION_MINOR "${LIBAEC_PACKAGE_VERSION_MINOR}") + set (CPACK_PACKAGE_VERSION_PATCH "") + set (CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE.txt") + set (CPACK_PACKAGE_DESCRIPTION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/README.SZIP") + set (CPACK_RESOURCE_FILE_README "${CMAKE_CURRENT_SOURCE_DIR}/README.md") + set (CPACK_PACKAGE_RELOCATABLE TRUE) + set (CPACK_PACKAGE_DESCRIPTION_SUMMARY "libaec - Adaptive Entropy Coding library by Deutsches Klimarechenzentrum GmbH") + set (CPACK_PACKAGE_INSTALL_DIRECTORY "${CPACK_PACKAGE_VENDOR}/${CPACK_PACKAGE_NAME}/${CPACK_PACKAGE_VERSION}") + + set (CPACK_GENERATOR "TGZ") + if (WIN32) + set (CPACK_GENERATOR "ZIP") + + if (NSIS_EXECUTABLE) + list (APPEND CPACK_GENERATOR "NSIS") + endif () + # Installers for 32- vs. 64-bit CMake: + # - Root install directory (displayed to end user at installer-run time) + # - "NSIS package/display name" (text used in the installer GUI) + # - Registry key used to store info about the installation + set (CPACK_NSIS_PACKAGE_NAME "${LIBAEC_PACKAGE_STRING}") + if (CMAKE_CL_64) + set (CPACK_NSIS_INSTALL_ROOT "$PROGRAMFILES64") + set (CPACK_PACKAGE_INSTALL_REGISTRY_KEY "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION} (Win64)") + else () + set (CPACK_NSIS_INSTALL_ROOT "$PROGRAMFILES") + set (CPACK_PACKAGE_INSTALL_REGISTRY_KEY "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}") + endif () + set (CPACK_NSIS_DISPLAY_NAME "${CPACK_NSIS_PACKAGE_NAME}") + set (CPACK_PACKAGE_INSTALL_DIRECTORY "${CPACK_PACKAGE_VENDOR}\\\\${CPACK_PACKAGE_NAME}\\\\${CPACK_PACKAGE_VERSION}") + set (CPACK_NSIS_CONTACT "${LIBAEC_PACKAGE_BUGREPORT}") + set (CPACK_NSIS_MODIFY_PATH ON) + set (CPACK_NSIS_PACKAGE_NAME "LIBAEC ${LIBAEC_PACKAGE_VERSION}") + if (WIX_EXECUTABLE) + list (APPEND CPACK_GENERATOR "WIX") + endif () +#WiX variables + set (CPACK_WIX_UNINSTALL "1") + set (CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE.txt") + elseif (APPLE) + list (APPEND CPACK_GENERATOR "STGZ") + list (APPEND CPACK_GENERATOR "DragNDrop") + set (CPACK_COMPONENTS_ALL_IN_ONE_PACKAGE ON) + set (CPACK_PACKAGING_INSTALL_PREFIX "/${CPACK_PACKAGE_INSTALL_DIRECTORY}") + #set (CPACK_PACKAGE_ICON "${LIBAEC_RESOURCES_DIR}/hdf.icns") + + option (LIBAEC_PACK_MACOSX_FRAMEWORK "Package the LIBAEC Library in a Framework" OFF) + if (LIBAEC_PACK_MACOSX_FRAMEWORK AND LIBAEC_BUILD_FRAMEWORKS) + set (CPACK_BUNDLE_NAME "${LIBAEC_PACKAGE_STRING}") + set (CPACK_BUNDLE_LOCATION "/") # make sure CMAKE_INSTALL_PREFIX ends in / + set (CMAKE_INSTALL_PREFIX "/${CPACK_BUNDLE_NAME}.framework/Versions/${CPACK_PACKAGE_VERSION}/${CPACK_PACKAGE_NAME}/") + set (CPACK_SHORT_VERSION_STRING "${CPACK_PACKAGE_VERSION}") + #----------------------------------------------------------------------------- + # Configure the Info.plist file for the install bundle + #----------------------------------------------------------------------------- + configure_file ( + ${LIBAEC_RESOURCES_DIR}/CPack.Info.plist.in + ${LIBAEC_BINARY_DIR}/CMakeFiles/Info.plist @ONLY + ) + configure_file ( + ${LIBAEC_RESOURCES_DIR}/PkgInfo.in + ${LIBAEC_BINARY_DIR}/CMakeFiles/PkgInfo @ONLY + ) + install ( + FILES ${LIBAEC_BINARY_DIR}/CMakeFiles/PkgInfo + DESTINATION .. + ) + endif () + else () + list (APPEND CPACK_GENERATOR "STGZ") + set (CPACK_PACKAGING_INSTALL_PREFIX "/${CPACK_PACKAGE_INSTALL_DIRECTORY}") + set (CPACK_COMPONENTS_ALL_IN_ONE_PACKAGE ON) + + set (CPACK_DEBIAN_PACKAGE_SECTION "Libraries") + set (CPACK_DEBIAN_PACKAGE_MAINTAINER "${LIBAEC_PACKAGE_BUGREPORT}") + +# list (APPEND CPACK_GENERATOR "RPM") + set (CPACK_RPM_PACKAGE_RELEASE "1") + set (CPACK_RPM_COMPONENT_INSTALL ON) + set (CPACK_RPM_PACKAGE_RELOCATABLE ON) + endif () + + # By default, do not warn when built on machines using only VS Express: + if (NOT DEFINED CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_NO_WARNINGS) + set (CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_NO_WARNINGS ON) + endif () + include (InstallRequiredSystemLibraries) + + set (CPACK_INSTALL_CMAKE_PROJECTS "${LIBAEC_BINARY_DIR};LIBAEC;libraries;/") + set (CPACK_INSTALL_CMAKE_PROJECTS "${LIBAEC_BINARY_DIR};LIBAEC;headers;/") + set (CPACK_INSTALL_CMAKE_PROJECTS "${LIBAEC_BINARY_DIR};LIBAEC;configinstall;/") + + set (CPACK_ALL_INSTALL_TYPES Full Developer User) + set (CPACK_INSTALL_TYPE_FULL_DISPLAY_NAME "Everything") + + set(CPACK_COMPONENTS_ALL libraries headers documents configinstall) + + include (CPack) + + cpack_add_component_group(Runtime) + + cpack_add_component_group(Documents + EXPANDED + DESCRIPTION "Release notes for libaec" + ) + + cpack_add_component_group(Development + EXPANDED + DESCRIPTION "All of the tools you'll need to develop applications" + ) + + cpack_add_component (libraries + DISPLAY_NAME "LIBAEC Libraries" + REQUIRED + GROUP Runtime + INSTALL_TYPES Full Developer User + ) + cpack_add_component (headers + DISPLAY_NAME "LIBAEC Headers" + DEPENDS libraries + GROUP Development + INSTALL_TYPES Full Developer + ) + cpack_add_component (documents + DISPLAY_NAME "LIBAEC Documents" + GROUP Documents + INSTALL_TYPES Full Developer + ) + cpack_add_component (configinstall + DISPLAY_NAME "LIBAEC CMake files" + DEPENDS libraries + GROUP Development + INSTALL_TYPES Full Developer User + ) + +endif () diff --git a/config/cmake/LIBAEC/CPack.Info.plist.in b/config/cmake/LIBAEC/CPack.Info.plist.in new file mode 100644 index 00000000000..08d371bd5d9 --- /dev/null +++ b/config/cmake/LIBAEC/CPack.Info.plist.in @@ -0,0 +1,26 @@ + + + + + CFBundleDevelopmentRegion + English + CFBundleExecutable + @CPACK_PACKAGE_FILE_NAME@ + CFBundleIconFile + @CPACK_BUNDLE_ICON@ + CFBundleIdentifier + org.@CPACK_PACKAGE_VENDOR@.@CPACK_PACKAGE_NAME@@CPACK_MODULE_VERSION_SUFFIX@ + CFBundleInfoDictionaryVersion + 6.0 + CFBundlePackageType + FMWK + CFBundleSignature + ???? + CFBundleVersion + @CPACK_PACKAGE_VERSIO@ + CFBundleShortVersionString + @CPACK_SHORT_VERSION_STRING@ + CSResourcesFileMapped + + + diff --git a/config/cmake/LIBAEC/config.h.in b/config/cmake/LIBAEC/config.h.in new file mode 100644 index 00000000000..04425480729 --- /dev/null +++ b/config/cmake/LIBAEC/config.h.in @@ -0,0 +1,36 @@ +#cmakedefine WORDS_BIGENDIAN +#cmakedefine01 HAVE_DECL___BUILTIN_CLZLL +#cmakedefine01 HAVE_BSR64 +#cmakedefine HAVE_SNPRINTF +#cmakedefine HAVE__SNPRINTF +#cmakedefine HAVE__SNPRINTF_S + +/* Name of package */ +#cmakedefine LIBAEC_PACKAGE "@LIBAEC_PACKAGE@" + +/* Define to the address where bug reports for this package should be sent. */ +#cmakedefine LIBAEC_PACKAGE_BUGREPORT "@LIBAEC_PACKAGE_BUGREPORT@" + +/* Define to the full name of this package. */ +#cmakedefine LIBAEC_PACKAGE_NAME "@LIBAEC_PACKAGE_NAME@" + +/* Define to the full name and version of this package. */ +#cmakedefine LIBAEC_PACKAGE_STRING "@LIBAEC_PACKAGE_STRING@" + +/* Define to the one symbol short name of this package. */ +#cmakedefine LIBAEC_PACKAGE_TARNAME "@LIBAEC_PACKAGE_TARNAME@" + +/* Define to the version of this package. */ +#cmakedefine LIBAEC_PACKAGE_VERSION "@LIBAEC_PACKAGE_VERSION@" + +/* Define to 1 if you have the ANSI C header files. */ +#cmakedefine STDC_HEADERS @STDC_HEADERS@ + +/* Version number of package */ +#define VERSION "@LIBAEC_PACKAGE_VERSION@" + +/* Define to empty if `const' does not conform to ANSI C. */ +#cmakedefine const + +/* Define to `unsigned int' if does not define. */ +#cmakedefine size_t diff --git a/config/cmake/LIBAEC/libaec-config-version.cmake.in b/config/cmake/LIBAEC/libaec-config-version.cmake.in new file mode 100644 index 00000000000..4f0e7ae9ec7 --- /dev/null +++ b/config/cmake/LIBAEC/libaec-config-version.cmake.in @@ -0,0 +1,42 @@ +#----------------------------------------------------------------------------- +# LIBAEC Version file for install directory +#----------------------------------------------------------------------------- + +set (PACKAGE_VERSION "@LIBAEC_VERSION_STRING@") + +if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}" ) + set(PACKAGE_VERSION_COMPATIBLE FALSE) +else() + if ("${PACKAGE_FIND_VERSION_MAJOR}" STREQUAL "@LIBAEC_VERSION_MAJOR@") + + # exact match for version @LIBAEC_VERSION_MAJOR@.@LIBAEC_VERSION_MINOR@ + if ("${PACKAGE_FIND_VERSION_MINOR}" STREQUAL "@LIBAEC_VERSION_MINOR@") + + # compatible with any version @LIBAEC_VERSION_MAJOR@.@LIBAEC_VERSION_MINOR@.x + set (PACKAGE_VERSION_COMPATIBLE TRUE) + + if ("${PACKAGE_FIND_VERSION_PATCH}" STREQUAL "@LIBAEC_VERSION_RELEASE@") + set (PACKAGE_VERSION_EXACT TRUE) + + if ("${PACKAGE_FIND_VERSION_TWEAK}" STREQUAL "@LIBAEC_VERSION_SUBRELEASE@") + # not using this yet + endif () + endif () + else () + set (PACKAGE_VERSION_COMPATIBLE FALSE) + endif () + endif () +endif () + +# if the installed or the using project don't have CMAKE_SIZEOF_VOID_P set, ignore it: +if("${CMAKE_SIZEOF_VOID_P}" STREQUAL "" OR "@CMAKE_SIZEOF_VOID_P@" STREQUAL "") + return() +endif() + +# check that the installed version has the same 32/64bit-ness as the one which is currently searching: +if(NOT "${CMAKE_SIZEOF_VOID_P}" STREQUAL "@CMAKE_SIZEOF_VOID_P@") + math(EXPR installedBits "@CMAKE_SIZEOF_VOID_P@ * 8") + set(PACKAGE_VERSION "${PACKAGE_VERSION} (${installedBits}bit)") + set(PACKAGE_VERSION_UNSUITABLE TRUE) +endif() + diff --git a/config/cmake/LIBAEC/libaec-config.cmake.in b/config/cmake/LIBAEC/libaec-config.cmake.in new file mode 100644 index 00000000000..36a116a67f0 --- /dev/null +++ b/config/cmake/LIBAEC/libaec-config.cmake.in @@ -0,0 +1,59 @@ +#----------------------------------------------------------------------------- +# LIBAEC Config file for compiling against LIBAEC build directory +#----------------------------------------------------------------------------- +@PACKAGE_INIT@ + +string(TOUPPER @LIBAEC_PACKAGE@ LIBAEC_PACKAGE_NAME) + +set (${LIBAEC_PACKAGE_NAME}_VALID_COMPONENTS static shared) + +#----------------------------------------------------------------------------- +# User Options +#----------------------------------------------------------------------------- +set (${LIBAEC_PACKAGE_NAME}_ENABLE_ENCODING @LIBAEC_ENABLE_ENCODING@) +set (${LIBAEC_PACKAGE_NAME}_BUILD_SHARED_LIBS @BUILD_SHARED_LIBS@) +set (${LIBAEC_PACKAGE_NAME}_EXPORT_LIBRARIES @LIBAEC_LIBRARIES_TO_EXPORT@) + +#----------------------------------------------------------------------------- +# Directories +#----------------------------------------------------------------------------- +set (${LIBAEC_PACKAGE_NAME}_INCLUDE_DIR "@PACKAGE_INCLUDE_INSTALL_DIR@") + +set (${LIBAEC_PACKAGE_NAME}_SHARE_DIR "@PACKAGE_SHARE_INSTALL_DIR@") +set_and_check (${LIBAEC_PACKAGE_NAME}_BUILD_DIR "@PACKAGE_CURRENT_BUILD_DIR@") + +#----------------------------------------------------------------------------- +# Version Strings +#----------------------------------------------------------------------------- +set (${LIBAEC_PACKAGE_NAME}_VERSION_STRING @LIBAEC_VERSION_STRING@) +set (${LIBAEC_PACKAGE_NAME}_VERSION_MAJOR @LIBAEC_VERSION_MAJOR@) +set (${LIBAEC_PACKAGE_NAME}_VERSION_MINOR @LIBAEC_VERSION_MINOR@) + +#----------------------------------------------------------------------------- +# Don't include targets if this file is being picked up by another +# project which has already build LIBAEC as a subproject +#----------------------------------------------------------------------------- +if (NOT TARGET "@LIBAEC_PACKAGE@") + include (@PACKAGE_SHARE_INSTALL_DIR@/@LIBAEC_PACKAGE@@LIBAEC_PACKAGE_EXT@-targets.cmake) +endif () + +# Handle default component(static) : +if (NOT ${LIBAEC_PACKAGE_NAME}_FIND_COMPONENTS) + set (${LIBAEC_PACKAGE_NAME}_FIND_COMPONENTS static) + set (${LIBAEC_PACKAGE_NAME}_FIND_REQUIRED_static true) +endif () + +# Handle requested components: +list (REMOVE_DUPLICATES ${LIBAEC_PACKAGE_NAME}_FIND_COMPONENTS) +foreach (comp IN LISTS ${LIBAEC_PACKAGE_NAME}_FIND_COMPONENTS) + list (FIND ${LIBAEC_PACKAGE_NAME}_EXPORT_LIBRARIES "@LIBAEC_LIB_CORENAME@-${comp}" HAVE_COMP) + if (${HAVE_COMP} LESS 0) + set (${LIBAEC_PACKAGE_NAME}_${comp}_FOUND 0) + else () + set (${LIBAEC_PACKAGE_NAME}_${comp}_FOUND 1) + string(TOUPPER ${LIBAEC_PACKAGE_NAME}_${comp}_LIBRARY COMP_LIBRARY) + set (${COMP_LIBRARY} ${${COMP_LIBRARY}} @LIBAEC_LIB_CORENAME@-${comp}) + endif () +endforeach () + +check_required_components (${LIBAEC_PACKAGE_NAME}) diff --git a/config/cmake/ZLIB/CMakeLists.txt b/config/cmake/ZLIB/CMakeLists.txt new file mode 100644 index 00000000000..c74ecea9dd9 --- /dev/null +++ b/config/cmake/ZLIB/CMakeLists.txt @@ -0,0 +1,572 @@ +cmake_minimum_required (VERSION 3.12) +PROJECT (ZLIB C) + +#----------------------------------------------------------------------------- +# Basic ZLIB stuff here +#----------------------------------------------------------------------------- +set (ZLIB_PACKAGE_EXT ${HDF_PACKAGE_EXT}) +set (HDF_USE_GNU_DIRS ${HDF5_USE_GNU_DIRS}) +set (CMAKE_OSX_ARCHITECTURES ${CMAKE_OSX_ARCHITECTURES}) +set (CMAKE_TOOLCHAIN_FILE ${CMAKE_TOOLCHAIN_FILE}) +set (PACKAGE_NAMESPACE ${HDF_PACKAGE_NAMESPACE}) +if (MINGW) + set (WINDOWS 1) # MinGW tries to imitate Windows +endif () +if (WINDOWS) + set (HAVE_STDDEF_H 1) + set (HAVE_SYS_TYPES_H 1) +endif () +# +# Check for unistd.h +# +check_include_file(unistd.h Z_HAVE_UNISTD_H) +CHECK_FUNCTION_EXISTS (memcpy HAVE_MEMCPY) +CHECK_FUNCTION_EXISTS (vsnprintf HAVE_VSNPRINTF) + +#----------------------------------------------------------------------------- +# Define some CMake variables for use later in the project +#----------------------------------------------------------------------------- +set (ZLIB_RESOURCES_DIR ${HDF_RESOURCES_DIR}/ZLIB) +set (ZLIB_SRC_DIR ${ZLIB_SOURCE_DIR}) + +#----------------------------------------------------------------------------- +# Set the core names of all the libraries +#----------------------------------------------------------------------------- +set (ZLIB_LIB_CORENAME "zlib") + +#----------------------------------------------------------------------------- +# Set the true names of all the libraries if customized by external project +#----------------------------------------------------------------------------- +set (ZLIB_LIB_NAME "${ZLIB_EXTERNAL_LIB_PREFIX}${ZLIB_LIB_CORENAME}") + +#----------------------------------------------------------------------------- +# Set the target names of all the libraries +#----------------------------------------------------------------------------- +set (ZLIB_LIB_TARGET "${ZLIB_LIB_CORENAME}-static") + +set(ZLIB_PC ${CMAKE_CURRENT_BINARY_DIR}/zlib.pc) +configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/zlib.pc.cmakein ${ZLIB_PC} @ONLY) + +#----------------------------------------------------------------------------- +# Generate the zconf.h file containing user settings needed by compilation +#----------------------------------------------------------------------------- +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/zconf.h.cmakein ${CMAKE_CURRENT_BINARY_DIR}/zconf.h @ONLY) + +if (DEFINED ADDITIONAL_CMAKE_PREFIX_PATH AND EXISTS "${ADDITIONAL_CMAKE_PREFIX_PATH}") + set (CMAKE_PREFIX_PATH ${ADDITIONAL_CMAKE_PREFIX_PATH} ${CMAKE_PREFIX_PATH}) +endif () + +#----------------------------------------------------------------------------- +# parse the full version number from zlib.h and include in ZLIB_VERS_INFO +#----------------------------------------------------------------------------- +file (READ ${ZLIB_SRC_DIR}/zlib.h _zlib_h_contents) +string (REGEX REPLACE ".*#define[ \t]+ZLIB_VER_MAJOR[ \t]+([0-9]*).*$" + "\\1" ZLIB_VERS_MAJOR ${_zlib_h_contents}) +string (REGEX REPLACE ".*#define[ \t]+ZLIB_VER_MINOR[ \t]+([0-9]*).*$" + "\\1" ZLIB_VERS_MINOR ${_zlib_h_contents}) +string (REGEX REPLACE ".*#define[ \t]+ZLIB_VER_REVISION[ \t]+([0-9]*).*$" + "\\1" ZLIB_VERS_RELEASE ${_zlib_h_contents}) +string (REGEX REPLACE ".*#define[ \t]+ZLIB_VER_SUBREVISION[ \t]+([0-9]*).*$" + "\\1" ZLIB_VERS_SUBRELEASE ${_zlib_h_contents}) +#message (STATUS "VERSION: ${ZLIB_VERS_MAJOR}.${ZLIB_VERS_MINOR}.${ZLIB_VERS_RELEASE}-${ZLIB_VERS_SUBRELEASE}") +string (REGEX REPLACE ".*#define[ \t]+ZLIB_VERSION[ \t]+\"([0-9A-Za-z.]+)\".*" + "\\1" ZLIB_FULL_VERSION ${_zlib_h_contents}) +#message (STATUS "VERSION: ${ZLIB_FULL_VERSION}") + +#----------------------------------------------------------------------------- +set (ZLIB_PACKAGE "zlib") +set (ZLIB_PACKAGE_NAME "ZLIB") +set (ZLIB_PACKAGE_VERSION "${ZLIB_VERS_MAJOR}.${ZLIB_VERS_MINOR}") +set (ZLIB_PACKAGE_VERSION_MAJOR "${ZLIB_VERS_MAJOR}.${ZLIB_VERS_MINOR}") +set (ZLIB_PACKAGE_VERSION_MINOR "${ZLIB_VERS_RELEASE}") +set (ZLIB_PACKAGE_STRING "${ZLIB_PACKAGE_NAME} ${ZLIB_PACKAGE_VERSION}-${ZLIB_VERS_SUBRELEASE}") +set (ZLIB_PACKAGE_TARNAME "zlib") +set (ZLIB_PACKAGE_URL "http://www.hdfgroup.org") +set (ZLIB_PACKAGE_BUGREPORT "help@hdfgroup.org") +set (ZLIB_PACKAGE_SOVERSION "${ZLIB_VERS_MAJOR}.${ZLIB_VERS_MINOR}.${ZLIB_VERS_RELEASE}") +set (ZLIB_PACKAGE_SOVERSION_MAJOR "${ZLIB_VERS_MAJOR}") + + +HDF_DIR_PATHS(${ZLIB_PACKAGE_NAME}) + +#----------------------------------------------------------------------------- +# Targets built within this project are exported at Install time for use +# by other projects +#----------------------------------------------------------------------------- +if (NOT ZLIB_EXPORTED_TARGETS) + set (ZLIB_EXPORTED_TARGETS "zlib-targets") +endif () + +#----------------------------------------------------------------------------- +# To include a library in the list exported by the project AT BUILD TIME, +# add it to this variable. This is NOT used by Make Install, but for projects +# which include zlib as a sub-project within their build tree +#----------------------------------------------------------------------------- +set_global_variable (ZLIB_LIBRARIES_TO_EXPORT "") + +set (CMAKE_POSITION_INDEPENDENT_CODE ON) + +#----------------------------------------------------------------------------- +# When building utility executables that generate other (source) files : +# we make use of the following variables defined in the root CMakeLists. +# Certain systems may add /Debug or /Release to output paths +# and we need to call the executable from inside the CMake configuration +#----------------------------------------------------------------------------- +set (EXE_EXT "") +if (WIN32) + set (EXE_EXT ".exe") + add_definitions (-D_BIND_TO_CURRENT_VCLIBS_VERSION=1) + add_definitions (-D_CRT_SECURE_NO_WARNINGS) + add_definitions (-D_CONSOLE) + add_definitions (-D_CRT_NONSTDC_NO_DEPRECATE) +endif () + +if (MSVC) + set (CMAKE_MFC_FLAG 0) +endif () + +set (MAKE_SYSTEM) +if (CMAKE_BUILD_TOOL MATCHES "make") + set (MAKE_SYSTEM 1) +endif () + +set (CFG_INIT "/${CMAKE_CFG_INTDIR}") +if (MAKE_SYSTEM) + set (CFG_INIT "") +endif () + +#----------------------------------------------------------------------------- +# Compiler specific flags : Shouldn't there be compiler tests for these +#----------------------------------------------------------------------------- +if (CMAKE_COMPILER_IS_GNUCC) + set (CMAKE_C_FLAGS "${CMAKE_ANSI_CFLAGS} ${CMAKE_C_FLAGS} -Wno-strict-prototypes") +endif () +if (CMAKE_C_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") + set (CMAKE_C_FLAGS "${CMAKE_ANSI_CFLAGS} ${CMAKE_C_FLAGS} -Wno-deprecated-non-prototype -Wno-implicit-function-declaration") +endif () + +#----------------------------------------------------------------------------- +# This is in here to help some of the GCC based IDES like Eclipse +# and code blocks parse the compiler errors and warnings better. +#----------------------------------------------------------------------------- +if (CMAKE_COMPILER_IS_GNUCC) + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fmessage-length=0") +endif () + +#----------------------------------------------------------------------------- +# All libs/tests/examples need the main include directories +#----------------------------------------------------------------------------- +INCLUDE_DIRECTORIES (${ZLIB_BINARY_DIR} ${ZLIB_SOURCE_DIR} ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) + +#============================================================================ +# zlib +#============================================================================ + +#----------------------------------------------------------------------------- +# Define zlib Library +#----------------------------------------------------------------------------- +set(ZLIB_PUBLIC_HDRS + ${CMAKE_CURRENT_BINARY_DIR}/zconf.h + zlib.h +) +set(ZLIB_PRIVATE_HDRS + crc32.h + deflate.h + gzguts.h + inffast.h + inffixed.h + inflate.h + inftrees.h + trees.h + zutil.h +) +set(ZLIB_SRCS + adler32.c + compress.c + crc32.c + deflate.c + gzclose.c + gzlib.c + gzread.c + gzwrite.c + inflate.c + infback.c + inftrees.c + inffast.c + trees.c + uncompr.c + zutil.c +) + +if(NOT MINGW) + set(ZLIB_DLL_SRCS + win32/zlib1.rc # If present will override custom build rule below. + ) +endif() + +if(CMAKE_COMPILER_IS_GNUCC) + if(ASM686) + set(ZLIB_ASMS contrib/asm686/match.S) + elseif (AMD64) + set(ZLIB_ASMS contrib/amd64/amd64-match.S) + endif () + + if(ZLIB_ASMS) + add_definitions(-DASMV) + set_source_files_properties (${ZLIB_ASMS} PROPERTIES LANGUAGE C COMPILE_FLAGS -DNO_UNDERLINE) + endif() +endif() + +if(MSVC) + if(ASM686) + enable_language(ASM_MASM) + set(ZLIB_ASMS + contrib/masmx86/inffas32.asm + contrib/masmx86/match686.asm + ) + elseif (AMD64) + enable_language(ASM_MASM) + set(ZLIB_ASMS + contrib/masmx64/gvmat64.asm + contrib/masmx64/inffasx64.asm + ) + endif() + + if(ZLIB_ASMS) + add_definitions(-DASMV -DASMINF) + endif() +endif() + +if(MINGW) + add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/zlib1rc.obj + COMMAND ${CMAKE_RC_COMPILER} + -D GCC_WINDRES + -I ${CMAKE_CURRENT_SOURCE_DIR} + -I ${CMAKE_CURRENT_BINARY_DIR} + -o ${CMAKE_CURRENT_BINARY_DIR}/zlib1rc.obj + -i ${CMAKE_CURRENT_SOURCE_DIR}/win32/zlib1.rc) + set(ZLIB_DLL_SRCS ${CMAKE_CURRENT_BINARY_DIR}/zlib1rc.obj) +endif() + +add_library (${ZLIB_LIB_TARGET} STATIC ${ZLIB_SRCS} ${ZLIB_PRIVATE_HDRS} ${ZLIB_PUBLIC_HDRS}) +if (MSVC AND CMAKE_CL_64) + set_target_properties (${ZLIB_LIB_TARGET} PROPERTIES STATIC_LIBRARY_FLAGS "/machine:x64") +endif () +target_include_directories(${ZLIB_LIB_TARGET} PRIVATE "${CMAKE_BINARY_DIR}") +TARGET_C_PROPERTIES (${ZLIB_LIB_TARGET} STATIC) +target_link_libraries (${ZLIB_LIB_TARGET} PRIVATE ${LINK_LIBS}) +H5_SET_LIB_OPTIONS (${ZLIB_LIB_TARGET} ${ZLIB_LIB_NAME} STATIC 0) +set_target_properties(${ZLIB_LIB_TARGET} PROPERTIES + PUBLIC_HEADER "${ZLIB_PUBLIC_HEADERS}" + LINKER_LANGUAGE C + INTERFACE_INCLUDE_DIRECTORIES "$/include>" +) +set_global_variable (ZLIB_LIBRARIES_TO_EXPORT ${ZLIB_LIB_TARGET}) +set (install_targets ${ZLIB_LIB_TARGET}) + +#----------------------------------------------------------------------------- +# Add Target(s) to CMake Install for import into other projects +#----------------------------------------------------------------------------- +if (ZLIB_EXPORTED_TARGETS) + INSTALL_TARGET_PDB (${ZLIB_LIB_TARGET} ${ZLIB_INSTALL_BIN_DIR} libraries) + + install ( + TARGETS + ${install_targets} + EXPORT + ${ZLIB_EXPORTED_TARGETS} + LIBRARY DESTINATION ${ZLIB_INSTALL_LIB_DIR} COMPONENT libraries + ARCHIVE DESTINATION ${ZLIB_INSTALL_LIB_DIR} COMPONENT libraries + RUNTIME DESTINATION ${ZLIB_INSTALL_BIN_DIR} COMPONENT libraries + FRAMEWORK DESTINATION ${ZLIB_INSTALL_FWRK_DIR} COMPONENT libraries + PUBLIC_HEADER DESTINATION ${ZLIB_INSTALL_INCLUDE_DIR} COMPONENT headers + ) +endif () + +include (CMakePackageConfigHelpers) + +#----------------------------------------------------------------------------- +# Check for Installation Utilities +#----------------------------------------------------------------------------- +if (WIN32) + set (PF_ENV_EXT "(x86)") + find_program (NSIS_EXECUTABLE NSIS.exe PATHS "$ENV{ProgramFiles}\\NSIS" "$ENV{ProgramFiles${PF_ENV_EXT}}\\NSIS") + if(NOT CPACK_WIX_ROOT) + file(TO_CMAKE_PATH "$ENV{WIX}" CPACK_WIX_ROOT) + endif () + find_program (WIX_EXECUTABLE candle PATHS "${CPACK_WIX_ROOT}/bin") +endif () + +#----------------------------------------------------------------------------- +# Add file(s) to CMake Install +#----------------------------------------------------------------------------- +if (NOT ZLIB_INSTALL_NO_DEVELOPMENT) + install ( + FILES ${PROJECT_BINARY_DIR}/zconf.h + DESTINATION ${ZLIB_INSTALL_INCLUDE_DIR} + COMPONENT headers + ) +endif () + +#----------------------------------------------------------------------------- +# Add Target(s) to CMake Install for import into other projects +#----------------------------------------------------------------------------- +if (NOT ZLIB_EXTERNALLY_CONFIGURED) + install ( + EXPORT ${ZLIB_EXPORTED_TARGETS} + DESTINATION ${ZLIB_INSTALL_CMAKE_DIR} + FILE ${ZLIB_PACKAGE}${ZLIB_PACKAGE_EXT}-targets.cmake + NAMESPACE ${PACKAGE_NAMESPACE} + COMPONENT configinstall + ) +endif () + +#----------------------------------------------------------------------------- +# Export all exported targets to the build tree for use by parent project +#----------------------------------------------------------------------------- +if (NOT ZLIB_EXTERNALLY_CONFIGURED) + export ( + TARGETS ${ZLIB_LIBRARIES_TO_EXPORT} ${ZLIB_LIB_DEPENDENCIES} + FILE ${ZLIB_PACKAGE}${ZLIB_PACKAGE_EXT}-targets.cmake + NAMESPACE ${PACKAGE_NAMESPACE} + ) + export (PACKAGE ${ZLIB_PACKAGE}${ZLIB_PACKAGE_EXT}) +endif () + +#----------------------------------------------------------------------------- +# Set includes needed for build +#----------------------------------------------------------------------------- +set (ZLIB_INCLUDES_BUILD_TIME + ${ZLIB_SRC_DIR} ${ZLIB_BINARY_DIR} +) + +#----------------------------------------------------------------------------- +# Set variables needed for installation +#----------------------------------------------------------------------------- +set (ZLIB_VERSION_STRING ${ZLIB_PACKAGE_VERSION}) +set (ZLIB_VERSION_MAJOR ${ZLIB_PACKAGE_VERSION_MAJOR}) +set (ZLIB_VERSION_MINOR ${ZLIB_PACKAGE_VERSION_MINOR}) + +#----------------------------------------------------------------------------- +# Configure the zlib-config.cmake file for the build directory +#----------------------------------------------------------------------------- +set (INCLUDE_INSTALL_DIR ${ZLIB_INSTALL_INCLUDE_DIR}) +set (SHARE_INSTALL_DIR "${CMAKE_CURRENT_BINARY_DIR}/${ZLIB_INSTALL_CMAKE_DIR}" ) +set (CURRENT_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}" ) +configure_package_config_file ( + ${ZLIB_RESOURCES_DIR}/zlib-config.cmake.in + "${ZLIB_BINARY_DIR}/${ZLIB_PACKAGE}${ZLIB_PACKAGE_EXT}-config.cmake" + INSTALL_DESTINATION "${ZLIB_INSTALL_CMAKE_DIR}" + PATH_VARS INCLUDE_INSTALL_DIR SHARE_INSTALL_DIR CURRENT_BUILD_DIR + INSTALL_PREFIX "${CMAKE_CURRENT_BINARY_DIR}" +) + +#----------------------------------------------------------------------------- +# Configure the zlib-config.cmake file for the install directory +#----------------------------------------------------------------------------- +set (INCLUDE_INSTALL_DIR ${ZLIB_INSTALL_INCLUDE_DIR}) +set (SHARE_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/${ZLIB_INSTALL_CMAKE_DIR}" ) +set (CURRENT_BUILD_DIR "${CMAKE_INSTALL_PREFIX}" ) +configure_package_config_file ( + ${ZLIB_RESOURCES_DIR}/zlib-config.cmake.in + "${ZLIB_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/${ZLIB_PACKAGE}${ZLIB_PACKAGE_EXT}-config.cmake" + INSTALL_DESTINATION "${ZLIB_INSTALL_CMAKE_DIR}" + PATH_VARS INCLUDE_INSTALL_DIR SHARE_INSTALL_DIR CURRENT_BUILD_DIR +) +if (NOT ZLIB_EXTERNALLY_CONFIGURED) + install ( + FILES ${ZLIB_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/${ZLIB_PACKAGE}${ZLIB_PACKAGE_EXT}-config.cmake + DESTINATION ${ZLIB_INSTALL_CMAKE_DIR} + COMPONENT configinstall + ) +endif () + +#----------------------------------------------------------------------------- +# Configure the ZLIB-config-version.cmake file for the install directory +#----------------------------------------------------------------------------- +if (NOT ZLIB_EXTERNALLY_CONFIGURED) + configure_file ( + ${ZLIB_RESOURCES_DIR}/zlib-config-version.cmake.in + ${ZLIB_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/${ZLIB_PACKAGE}${ZLIB_PACKAGE_EXT}-config-version.cmake @ONLY + ) + install ( + FILES ${ZLIB_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/${ZLIB_PACKAGE}${ZLIB_PACKAGE_EXT}-config-version.cmake + DESTINATION ${ZLIB_INSTALL_CMAKE_DIR} + COMPONENT configinstall + ) +endif () + +#----------------------------------------------------------------------------- +# Add Document File(s) to CMake Install +#----------------------------------------------------------------------------- +if (NOT ZLIB_EXTERNALLY_CONFIGURED) + install ( + FILES + ${ZLIB_SOURCE_DIR}/FAQ + ${ZLIB_SOURCE_DIR}/README + ${ZLIB_SOURCE_DIR}/INDEX + DESTINATION ${ZLIB_INSTALL_DATA_DIR} + COMPONENT documents + ) +endif () + +#----------------------------------------------------------------------------- +# Check for Installation Utilities +#----------------------------------------------------------------------------- +if (WIN32) + set (PF_ENV_EXT "(x86)") + find_program (NSIS_EXECUTABLE NSIS.exe PATHS "$ENV{ProgramFiles}\\NSIS" "$ENV{ProgramFiles${PF_ENV_EXT}}\\NSIS") + if(NOT CPACK_WIX_ROOT) + file(TO_CMAKE_PATH "$ENV{WIX}" CPACK_WIX_ROOT) + endif() + find_program (WIX_EXECUTABLE candle PATHS "${CPACK_WIX_ROOT}/bin") +endif () + +#----------------------------------------------------------------------------- +# Set the cpack variables +#----------------------------------------------------------------------------- +if (NOT ZLIB_EXTERNALLY_CONFIGURED) + set (CPACK_PACKAGE_VENDOR "HDF_Group") + set (CPACK_PACKAGE_NAME "${ZLIB_PACKAGE_NAME}") + if (CDASH_LOCAL) + set (CPACK_PACKAGE_VERSION "${ZLIB_PACKAGE_VERSION}") + else () + set (CPACK_PACKAGE_VERSION "${ZLIB_PACKAGE_VERSION_STRING}") + endif () + set (CPACK_PACKAGE_VERSION_MAJOR "${ZLIB_PACKAGE_VERSION_MAJOR}") + set (CPACK_PACKAGE_VERSION_MINOR "${ZLIB_PACKAGE_VERSION_MINOR}") + set (CPACK_PACKAGE_VERSION_PATCH "") + set (CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/README") + set (CPACK_PACKAGE_DESCRIPTION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/README") + set (CPACK_RESOURCE_FILE_README "${CMAKE_CURRENT_SOURCE_DIR}/README") + set (CPACK_PACKAGE_RELOCATABLE TRUE) + set (CPACK_PACKAGE_DESCRIPTION_SUMMARY "zlib Installation") + set (CPACK_PACKAGE_INSTALL_DIRECTORY "${CPACK_PACKAGE_VENDOR}/${CPACK_PACKAGE_NAME}/${CPACK_PACKAGE_VERSION}") + + set (CPACK_GENERATOR "TGZ") + if (WIN32) + set (CPACK_GENERATOR "ZIP") + + if (NSIS_EXECUTABLE) + list (APPEND CPACK_GENERATOR "NSIS") + endif () + # Installers for 32- vs. 64-bit CMake: + # - Root install directory (displayed to end user at installer-run time) + # - "NSIS package/display name" (text used in the installer GUI) + # - Registry key used to store info about the installation + set (CPACK_NSIS_PACKAGE_NAME "${ZLIB_PACKAGE_STRING}") + if (CMAKE_CL_64) + set (CPACK_NSIS_INSTALL_ROOT "$PROGRAMFILES64") + set (CPACK_PACKAGE_INSTALL_REGISTRY_KEY "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION} (Win64)") + else () + set (CPACK_NSIS_INSTALL_ROOT "$PROGRAMFILES") + set (CPACK_PACKAGE_INSTALL_REGISTRY_KEY "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}") + endif () + set (CPACK_NSIS_DISPLAY_NAME "${CPACK_NSIS_PACKAGE_NAME}") + set (CPACK_PACKAGE_INSTALL_DIRECTORY "${CPACK_PACKAGE_VENDOR}\\\\${CPACK_PACKAGE_NAME}\\\\${CPACK_PACKAGE_VERSION}") + set (CPACK_NSIS_CONTACT "${ZLIB_PACKAGE_BUGREPORT}") + set (CPACK_NSIS_MODIFY_PATH ON) + set (CPACK_NSIS_PACKAGE_NAME "ZLIB ${ZLIB_PACKAGE_VERSION}") + if (WIX_EXECUTABLE) + list (APPEND CPACK_GENERATOR "WIX") + endif () +#WiX variables + set (CPACK_WIX_UNINSTALL "1") + set (CPACK_RESOURCE_FILE_LICENSE "${JPEG_BINARY_DIR}/README") + elseif (APPLE) + list (APPEND CPACK_GENERATOR "STGZ") + list (APPEND CPACK_GENERATOR "DragNDrop") + set (CPACK_COMPONENTS_ALL_IN_ONE_PACKAGE ON) + set (CPACK_PACKAGING_INSTALL_PREFIX "/${CPACK_PACKAGE_INSTALL_DIRECTORY}") + #set (CPACK_PACKAGE_ICON "${ZLIB_RESOURCES_DIR}/hdf.icns") + + option (ZLIB_PACK_MACOSX_FRAMEWORK "Package the ZLIB Library in a Framework" OFF) + if (ZLIB_PACK_MACOSX_FRAMEWORK AND ZLIB_BUILD_FRAMEWORKS) + set (CPACK_BUNDLE_NAME "${ZLIB_PACKAGE_STRING}") + set (CPACK_BUNDLE_LOCATION "/") # make sure CMAKE_INSTALL_PREFIX ends in / + set (CMAKE_INSTALL_PREFIX "/${CPACK_BUNDLE_NAME}.framework/Versions/${CPACK_PACKAGE_VERSION}/${CPACK_PACKAGE_NAME}/") + set (CPACK_SHORT_VERSION_STRING "${CPACK_PACKAGE_VERSION}") + #----------------------------------------------------------------------------- + # Configure the Info.plist file for the install bundle + #----------------------------------------------------------------------------- + configure_file ( + ${ZLIB_RESOURCES_DIR}/CPack.Info.plist.in + ${ZLIB_BINARY_DIR}/CMakeFiles/Info.plist @ONLY + ) + configure_file ( + ${ZLIB_RESOURCES_DIR}/PkgInfo.in + ${ZLIB_BINARY_DIR}/CMakeFiles/PkgInfo @ONLY + ) + install ( + FILES ${ZLIB_BINARY_DIR}/CMakeFiles/PkgInfo + DESTINATION .. + ) + endif () + else () + list (APPEND CPACK_GENERATOR "STGZ") + set (CPACK_PACKAGING_INSTALL_PREFIX "/${CPACK_PACKAGE_INSTALL_DIRECTORY}") + set (CPACK_COMPONENTS_ALL_IN_ONE_PACKAGE ON) + + set (CPACK_DEBIAN_PACKAGE_SECTION "Libraries") + set (CPACK_DEBIAN_PACKAGE_MAINTAINER "${ZLIB_PACKAGE_BUGREPORT}") + +# list (APPEND CPACK_GENERATOR "RPM") + set (CPACK_RPM_PACKAGE_RELEASE "1") + set (CPACK_RPM_COMPONENT_INSTALL ON) + set (CPACK_RPM_PACKAGE_RELOCATABLE ON) + endif () + + # By default, do not warn when built on machines using only VS Express: + if (NOT DEFINED CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_NO_WARNINGS) + set (CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_NO_WARNINGS ON) + endif () + include (InstallRequiredSystemLibraries) + + set (CPACK_INSTALL_CMAKE_PROJECTS "${ZLIB_BINARY_DIR};ZLIB;libraries;/") + set (CPACK_INSTALL_CMAKE_PROJECTS "${ZLIB_BINARY_DIR};ZLIB;headers;/") + set (CPACK_INSTALL_CMAKE_PROJECTS "${ZLIB_BINARY_DIR};ZLIB;configinstall;/") + + set (CPACK_ALL_INSTALL_TYPES Full Developer User) + set (CPACK_INSTALL_TYPE_FULL_DISPLAY_NAME "Everything") + + set(CPACK_COMPONENTS_ALL libraries headers documents configinstall) + + include (CPack) + + cpack_add_component_group(Runtime) + + cpack_add_component_group(Documents + EXPANDED + DESCRIPTION "Release notes for zlib" + ) + + cpack_add_component_group(Development + EXPANDED + DESCRIPTION "All of the tools you'll need to develop applications" + ) + + cpack_add_component (libraries + DISPLAY_NAME "ZLIB Libraries" + REQUIRED + GROUP Runtime + INSTALL_TYPES Full Developer User + ) + cpack_add_component (headers + DISPLAY_NAME "ZLIB Headers" + DEPENDS libraries + GROUP Development + INSTALL_TYPES Full Developer + ) + cpack_add_component (documents + DISPLAY_NAME "ZLIB Documents" + GROUP Documents + INSTALL_TYPES Full Developer + ) + cpack_add_component (configinstall + DISPLAY_NAME "ZLIB CMake files" + DEPENDS libraries + GROUP Development + INSTALL_TYPES Full Developer User + ) + +endif () diff --git a/config/cmake/ZLIB/CPack.Info.plist.in b/config/cmake/ZLIB/CPack.Info.plist.in new file mode 100644 index 00000000000..08d371bd5d9 --- /dev/null +++ b/config/cmake/ZLIB/CPack.Info.plist.in @@ -0,0 +1,26 @@ + + + + + CFBundleDevelopmentRegion + English + CFBundleExecutable + @CPACK_PACKAGE_FILE_NAME@ + CFBundleIconFile + @CPACK_BUNDLE_ICON@ + CFBundleIdentifier + org.@CPACK_PACKAGE_VENDOR@.@CPACK_PACKAGE_NAME@@CPACK_MODULE_VERSION_SUFFIX@ + CFBundleInfoDictionaryVersion + 6.0 + CFBundlePackageType + FMWK + CFBundleSignature + ???? + CFBundleVersion + @CPACK_PACKAGE_VERSIO@ + CFBundleShortVersionString + @CPACK_SHORT_VERSION_STRING@ + CSResourcesFileMapped + + + diff --git a/config/cmake/ZLIB/zconf.h.in b/config/cmake/ZLIB/zconf.h.in new file mode 100644 index 00000000000..a7f24cce60f --- /dev/null +++ b/config/cmake/ZLIB/zconf.h.in @@ -0,0 +1,536 @@ +/* zconf.h -- configuration of the zlib compression library + * Copyright (C) 1995-2016 Jean-loup Gailly, Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* @(#) $Id$ */ + +#ifndef ZCONF_H +#define ZCONF_H +#cmakedefine Z_PREFIX +#cmakedefine Z_HAVE_UNISTD_H + +/* + * If you *really* need a unique prefix for all types and library functions, + * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it. + * Even better than compiling with -DZ_PREFIX would be to use configure to set + * this permanently in zconf.h using "./configure --zprefix". + */ +#ifdef Z_PREFIX /* may be set to #if 1 by ./configure */ +# define Z_PREFIX_SET + +/* all linked symbols and init macros */ +# define _dist_code z__dist_code +# define _length_code z__length_code +# define _tr_align z__tr_align +# define _tr_flush_bits z__tr_flush_bits +# define _tr_flush_block z__tr_flush_block +# define _tr_init z__tr_init +# define _tr_stored_block z__tr_stored_block +# define _tr_tally z__tr_tally +# define adler32 z_adler32 +# define adler32_combine z_adler32_combine +# define adler32_combine64 z_adler32_combine64 +# define adler32_z z_adler32_z +# ifndef Z_SOLO +# define compress z_compress +# define compress2 z_compress2 +# define compressBound z_compressBound +# endif +# define crc32 z_crc32 +# define crc32_combine z_crc32_combine +# define crc32_combine64 z_crc32_combine64 +# define crc32_z z_crc32_z +# define deflate z_deflate +# define deflateBound z_deflateBound +# define deflateCopy z_deflateCopy +# define deflateEnd z_deflateEnd +# define deflateGetDictionary z_deflateGetDictionary +# define deflateInit z_deflateInit +# define deflateInit2 z_deflateInit2 +# define deflateInit2_ z_deflateInit2_ +# define deflateInit_ z_deflateInit_ +# define deflateParams z_deflateParams +# define deflatePending z_deflatePending +# define deflatePrime z_deflatePrime +# define deflateReset z_deflateReset +# define deflateResetKeep z_deflateResetKeep +# define deflateSetDictionary z_deflateSetDictionary +# define deflateSetHeader z_deflateSetHeader +# define deflateTune z_deflateTune +# define deflate_copyright z_deflate_copyright +# define get_crc_table z_get_crc_table +# ifndef Z_SOLO +# define gz_error z_gz_error +# define gz_intmax z_gz_intmax +# define gz_strwinerror z_gz_strwinerror +# define gzbuffer z_gzbuffer +# define gzclearerr z_gzclearerr +# define gzclose z_gzclose +# define gzclose_r z_gzclose_r +# define gzclose_w z_gzclose_w +# define gzdirect z_gzdirect +# define gzdopen z_gzdopen +# define gzeof z_gzeof +# define gzerror z_gzerror +# define gzflush z_gzflush +# define gzfread z_gzfread +# define gzfwrite z_gzfwrite +# define gzgetc z_gzgetc +# define gzgetc_ z_gzgetc_ +# define gzgets z_gzgets +# define gzoffset z_gzoffset +# define gzoffset64 z_gzoffset64 +# define gzopen z_gzopen +# define gzopen64 z_gzopen64 +# ifdef _WIN32 +# define gzopen_w z_gzopen_w +# endif +# define gzprintf z_gzprintf +# define gzputc z_gzputc +# define gzputs z_gzputs +# define gzread z_gzread +# define gzrewind z_gzrewind +# define gzseek z_gzseek +# define gzseek64 z_gzseek64 +# define gzsetparams z_gzsetparams +# define gztell z_gztell +# define gztell64 z_gztell64 +# define gzungetc z_gzungetc +# define gzvprintf z_gzvprintf +# define gzwrite z_gzwrite +# endif +# define inflate z_inflate +# define inflateBack z_inflateBack +# define inflateBackEnd z_inflateBackEnd +# define inflateBackInit z_inflateBackInit +# define inflateBackInit_ z_inflateBackInit_ +# define inflateCodesUsed z_inflateCodesUsed +# define inflateCopy z_inflateCopy +# define inflateEnd z_inflateEnd +# define inflateGetDictionary z_inflateGetDictionary +# define inflateGetHeader z_inflateGetHeader +# define inflateInit z_inflateInit +# define inflateInit2 z_inflateInit2 +# define inflateInit2_ z_inflateInit2_ +# define inflateInit_ z_inflateInit_ +# define inflateMark z_inflateMark +# define inflatePrime z_inflatePrime +# define inflateReset z_inflateReset +# define inflateReset2 z_inflateReset2 +# define inflateResetKeep z_inflateResetKeep +# define inflateSetDictionary z_inflateSetDictionary +# define inflateSync z_inflateSync +# define inflateSyncPoint z_inflateSyncPoint +# define inflateUndermine z_inflateUndermine +# define inflateValidate z_inflateValidate +# define inflate_copyright z_inflate_copyright +# define inflate_fast z_inflate_fast +# define inflate_table z_inflate_table +# ifndef Z_SOLO +# define uncompress z_uncompress +# define uncompress2 z_uncompress2 +# endif +# define zError z_zError +# ifndef Z_SOLO +# define zcalloc z_zcalloc +# define zcfree z_zcfree +# endif +# define zlibCompileFlags z_zlibCompileFlags +# define zlibVersion z_zlibVersion + +/* all zlib typedefs in zlib.h and zconf.h */ +# define Byte z_Byte +# define Bytef z_Bytef +# define alloc_func z_alloc_func +# define charf z_charf +# define free_func z_free_func +# ifndef Z_SOLO +# define gzFile z_gzFile +# endif +# define gz_header z_gz_header +# define gz_headerp z_gz_headerp +# define in_func z_in_func +# define intf z_intf +# define out_func z_out_func +# define uInt z_uInt +# define uIntf z_uIntf +# define uLong z_uLong +# define uLongf z_uLongf +# define voidp z_voidp +# define voidpc z_voidpc +# define voidpf z_voidpf + +/* all zlib structs in zlib.h and zconf.h */ +# define gz_header_s z_gz_header_s +# define internal_state z_internal_state + +#endif + +#if defined(__MSDOS__) && !defined(MSDOS) +# define MSDOS +#endif +#if (defined(OS_2) || defined(__OS2__)) && !defined(OS2) +# define OS2 +#endif +#if defined(_WINDOWS) && !defined(WINDOWS) +# define WINDOWS +#endif +#if defined(_WIN32) || defined(_WIN32_WCE) || defined(__WIN32__) +# ifndef WIN32 +# define WIN32 +# endif +#endif +#if (defined(MSDOS) || defined(OS2) || defined(WINDOWS)) && !defined(WIN32) +# if !defined(__GNUC__) && !defined(__FLAT__) && !defined(__386__) +# ifndef SYS16BIT +# define SYS16BIT +# endif +# endif +#endif + +/* + * Compile with -DMAXSEG_64K if the alloc function cannot allocate more + * than 64k bytes at a time (needed on systems with 16-bit int). + */ +#ifdef SYS16BIT +# define MAXSEG_64K +#endif +#ifdef MSDOS +# define UNALIGNED_OK +#endif + +#ifdef __STDC_VERSION__ +# ifndef STDC +# define STDC +# endif +# if __STDC_VERSION__ >= 199901L +# ifndef STDC99 +# define STDC99 +# endif +# endif +#endif +#if !defined(STDC) && (defined(__STDC__) || defined(__cplusplus)) +# define STDC +#endif +#if !defined(STDC) && (defined(__GNUC__) || defined(__BORLANDC__)) +# define STDC +#endif +#if !defined(STDC) && (defined(MSDOS) || defined(WINDOWS) || defined(WIN32)) +# define STDC +#endif +#if !defined(STDC) && (defined(OS2) || defined(__HOS_AIX__)) +# define STDC +#endif + +#if defined(__OS400__) && !defined(STDC) /* iSeries (formerly AS/400). */ +# define STDC +#endif + +#ifndef STDC +# ifndef const /* cannot use !defined(STDC) && !defined(const) on Mac */ +# define const /* note: need a more gentle solution here */ +# endif +#endif + +#if defined(ZLIB_CONST) && !defined(z_const) +# define z_const const +#else +# define z_const +#endif + +#ifdef Z_SOLO + typedef unsigned long z_size_t; +#else +# define z_longlong long long +# if defined(NO_SIZE_T) + typedef unsigned NO_SIZE_T z_size_t; +# elif defined(STDC) +# include + typedef size_t z_size_t; +# else + typedef unsigned long z_size_t; +# endif +# undef z_longlong +#endif + +/* Maximum value for memLevel in deflateInit2 */ +#ifndef MAX_MEM_LEVEL +# ifdef MAXSEG_64K +# define MAX_MEM_LEVEL 8 +# else +# define MAX_MEM_LEVEL 9 +# endif +#endif + +/* Maximum value for windowBits in deflateInit2 and inflateInit2. + * WARNING: reducing MAX_WBITS makes minigzip unable to extract .gz files + * created by gzip. (Files created by minigzip can still be extracted by + * gzip.) + */ +#ifndef MAX_WBITS +# define MAX_WBITS 15 /* 32K LZ77 window */ +#endif + +/* The memory requirements for deflate are (in bytes): + (1 << (windowBits+2)) + (1 << (memLevel+9)) + that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values) + plus a few kilobytes for small objects. For example, if you want to reduce + the default memory requirements from 256K to 128K, compile with + make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7" + Of course this will generally degrade compression (there's no free lunch). + + The memory requirements for inflate are (in bytes) 1 << windowBits + that is, 32K for windowBits=15 (default value) plus about 7 kilobytes + for small objects. +*/ + + /* Type declarations */ + +#ifndef OF /* function prototypes */ +# ifdef STDC +# define OF(args) args +# else +# define OF(args) () +# endif +#endif + +#ifndef Z_ARG /* function prototypes for stdarg */ +# if defined(STDC) || defined(Z_HAVE_STDARG_H) +# define Z_ARG(args) args +# else +# define Z_ARG(args) () +# endif +#endif + +/* The following definitions for FAR are needed only for MSDOS mixed + * model programming (small or medium model with some far allocations). + * This was tested only with MSC; for other MSDOS compilers you may have + * to define NO_MEMCPY in zutil.h. If you don't need the mixed model, + * just define FAR to be empty. + */ +#ifdef SYS16BIT +# if defined(M_I86SM) || defined(M_I86MM) + /* MSC small or medium model */ +# define SMALL_MEDIUM +# ifdef _MSC_VER +# define FAR _far +# else +# define FAR far +# endif +# endif +# if (defined(__SMALL__) || defined(__MEDIUM__)) + /* Turbo C small or medium model */ +# define SMALL_MEDIUM +# ifdef __BORLANDC__ +# define FAR _far +# else +# define FAR far +# endif +# endif +#endif + +#if defined(WINDOWS) || defined(WIN32) + /* If building or using zlib as a DLL, define ZLIB_DLL. + * This is not mandatory, but it offers a little performance increase. + */ +# ifdef ZLIB_DLL +# if defined(WIN32) && (!defined(__BORLANDC__) || (__BORLANDC__ >= 0x500)) +# ifdef ZLIB_INTERNAL +# define ZEXTERN extern __declspec(dllexport) +# else +# define ZEXTERN extern __declspec(dllimport) +# endif +# endif +# endif /* ZLIB_DLL */ + /* If building or using zlib with the WINAPI/WINAPIV calling convention, + * define ZLIB_WINAPI. + * Caution: the standard ZLIB1.DLL is NOT compiled using ZLIB_WINAPI. + */ +# ifdef ZLIB_WINAPI +# ifdef FAR +# undef FAR +# endif +# include + /* No need for _export, use ZLIB.DEF instead. */ + /* For complete Windows compatibility, use WINAPI, not __stdcall. */ +# define ZEXPORT WINAPI +# ifdef WIN32 +# define ZEXPORTVA WINAPIV +# else +# define ZEXPORTVA FAR CDECL +# endif +# endif +#endif + +#if defined (__BEOS__) +# ifdef ZLIB_DLL +# ifdef ZLIB_INTERNAL +# define ZEXPORT __declspec(dllexport) +# define ZEXPORTVA __declspec(dllexport) +# else +# define ZEXPORT __declspec(dllimport) +# define ZEXPORTVA __declspec(dllimport) +# endif +# endif +#endif + +#ifndef ZEXTERN +# define ZEXTERN extern +#endif +#ifndef ZEXPORT +# define ZEXPORT +#endif +#ifndef ZEXPORTVA +# define ZEXPORTVA +#endif + +#ifndef FAR +# define FAR +#endif + +#if !defined(__MACTYPES__) +typedef unsigned char Byte; /* 8 bits */ +#endif +typedef unsigned int uInt; /* 16 bits or more */ +typedef unsigned long uLong; /* 32 bits or more */ + +#ifdef SMALL_MEDIUM + /* Borland C/C++ and some old MSC versions ignore FAR inside typedef */ +# define Bytef Byte FAR +#else + typedef Byte FAR Bytef; +#endif +typedef char FAR charf; +typedef int FAR intf; +typedef uInt FAR uIntf; +typedef uLong FAR uLongf; + +#ifdef STDC + typedef void const *voidpc; + typedef void FAR *voidpf; + typedef void *voidp; +#else + typedef Byte const *voidpc; + typedef Byte FAR *voidpf; + typedef Byte *voidp; +#endif + +#if !defined(Z_U4) && !defined(Z_SOLO) && defined(STDC) +# include +# if (UINT_MAX == 0xffffffffUL) +# define Z_U4 unsigned +# elif (ULONG_MAX == 0xffffffffUL) +# define Z_U4 unsigned long +# elif (USHRT_MAX == 0xffffffffUL) +# define Z_U4 unsigned short +# endif +#endif + +#ifdef Z_U4 + typedef Z_U4 z_crc_t; +#else + typedef unsigned long z_crc_t; +#endif + +#ifdef HAVE_UNISTD_H /* may be set to #if 1 by ./configure */ +# define Z_HAVE_UNISTD_H +#endif + +#ifdef HAVE_STDARG_H /* may be set to #if 1 by ./configure */ +# define Z_HAVE_STDARG_H +#endif + +#ifdef STDC +# ifndef Z_SOLO +# include /* for off_t */ +# endif +#endif + +#if defined(STDC) || defined(Z_HAVE_STDARG_H) +# ifndef Z_SOLO +# include /* for va_list */ +# endif +#endif + +#ifdef _WIN32 +# ifndef Z_SOLO +# include /* for wchar_t */ +# endif +#endif + +/* a little trick to accommodate both "#define _LARGEFILE64_SOURCE" and + * "#define _LARGEFILE64_SOURCE 1" as requesting 64-bit operations, (even + * though the former does not conform to the LFS document), but considering + * both "#undef _LARGEFILE64_SOURCE" and "#define _LARGEFILE64_SOURCE 0" as + * equivalently requesting no 64-bit operations + */ +#if defined(_LARGEFILE64_SOURCE) && -_LARGEFILE64_SOURCE - -1 == 1 +# undef _LARGEFILE64_SOURCE +#endif + +#if defined(__WATCOMC__) && !defined(Z_HAVE_UNISTD_H) +# define Z_HAVE_UNISTD_H +#endif +#ifndef Z_SOLO +# if defined(Z_HAVE_UNISTD_H) || defined(_LARGEFILE64_SOURCE) +# include /* for SEEK_*, off_t, and _LFS64_LARGEFILE */ +# ifdef VMS +# include /* for off_t */ +# endif +# ifndef z_off_t +# define z_off_t off_t +# endif +# endif +#endif + +#if defined(_LFS64_LARGEFILE) && _LFS64_LARGEFILE-0 +# define Z_LFS64 +#endif + +#if defined(_LARGEFILE64_SOURCE) && defined(Z_LFS64) +# define Z_LARGE64 +#endif + +#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS-0 == 64 && defined(Z_LFS64) +# define Z_WANT64 +#endif + +#if !defined(SEEK_SET) && !defined(Z_SOLO) +# define SEEK_SET 0 /* Seek from beginning of file. */ +# define SEEK_CUR 1 /* Seek from current position. */ +# define SEEK_END 2 /* Set file pointer to EOF plus "offset" */ +#endif + +#ifndef z_off_t +# define z_off_t long +#endif + +#if !defined(_WIN32) && defined(Z_LARGE64) +# define z_off64_t off64_t +#else +# if defined(_WIN32) && !defined(__GNUC__) && !defined(Z_SOLO) +# define z_off64_t __int64 +# else +# define z_off64_t z_off_t +# endif +#endif + +/* MVS linker does not support external names larger than 8 bytes */ +#if defined(__MVS__) + #pragma map(deflateInit_,"DEIN") + #pragma map(deflateInit2_,"DEIN2") + #pragma map(deflateEnd,"DEEND") + #pragma map(deflateBound,"DEBND") + #pragma map(inflateInit_,"ININ") + #pragma map(inflateInit2_,"ININ2") + #pragma map(inflateEnd,"INEND") + #pragma map(inflateSync,"INSY") + #pragma map(inflateSetDictionary,"INSEDI") + #pragma map(compressBound,"CMBND") + #pragma map(inflate_table,"INTABL") + #pragma map(inflate_fast,"INFA") + #pragma map(inflate_copyright,"INCOPY") +#endif + +#endif /* ZCONF_H */ diff --git a/config/cmake/ZLIB/zlib-config-version.cmake.in b/config/cmake/ZLIB/zlib-config-version.cmake.in new file mode 100644 index 00000000000..38bcde858aa --- /dev/null +++ b/config/cmake/ZLIB/zlib-config-version.cmake.in @@ -0,0 +1,42 @@ +#----------------------------------------------------------------------------- +# ZLIB Version file for install directory +#----------------------------------------------------------------------------- + +set (PACKAGE_VERSION "@ZLIB_VERSION_STRING@") + +if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}" ) + set(PACKAGE_VERSION_COMPATIBLE FALSE) +else() + if ("${PACKAGE_FIND_VERSION_MAJOR}" STREQUAL "@ZLIB_VERSION_MAJOR@") + + # exact match for version @ZLIB_VERSION_MAJOR@.@ZLIB_VERSION_MINOR@ + if ("${PACKAGE_FIND_VERSION_MINOR}" STREQUAL "@ZLIB_VERSION_MINOR@") + + # compatible with any version @ZLIB_VERSION_MAJOR@.@ZLIB_VERSION_MINOR@.x + set (PACKAGE_VERSION_COMPATIBLE TRUE) + + if ("${PACKAGE_FIND_VERSION_PATCH}" STREQUAL "@ZLIB_VERSION_RELEASE@") + set (PACKAGE_VERSION_EXACT TRUE) + + if ("${PACKAGE_FIND_VERSION_TWEAK}" STREQUAL "@ZLIB_VERSION_SUBRELEASE@") + # not using this yet + endif () + endif () + else () + set (PACKAGE_VERSION_COMPATIBLE FALSE) + endif () + endif () +endif () + +# if the installed or the using project don't have CMAKE_SIZEOF_VOID_P set, ignore it: +if("${CMAKE_SIZEOF_VOID_P}" STREQUAL "" OR "@CMAKE_SIZEOF_VOID_P@" STREQUAL "") + return() +endif() + +# check that the installed version has the same 32/64bit-ness as the one which is currently searching: +if(NOT "${CMAKE_SIZEOF_VOID_P}" STREQUAL "@CMAKE_SIZEOF_VOID_P@") + math(EXPR installedBits "@CMAKE_SIZEOF_VOID_P@ * 8") + set(PACKAGE_VERSION "${PACKAGE_VERSION} (${installedBits}bit)") + set(PACKAGE_VERSION_UNSUITABLE TRUE) +endif() + diff --git a/config/cmake/ZLIB/zlib-config.cmake.in b/config/cmake/ZLIB/zlib-config.cmake.in new file mode 100644 index 00000000000..307896b61a9 --- /dev/null +++ b/config/cmake/ZLIB/zlib-config.cmake.in @@ -0,0 +1,58 @@ +#----------------------------------------------------------------------------- +# ZLIB Config file for compiling against ZLIB build directory +#----------------------------------------------------------------------------- +@PACKAGE_INIT@ + +string(TOUPPER @ZLIB_PACKAGE@ ZLIB_PACKAGE_NAME) + +set (${ZLIB_PACKAGE_NAME}_VALID_COMPONENTS static shared) + +#----------------------------------------------------------------------------- +# User Options +#----------------------------------------------------------------------------- +set (${ZLIB_PACKAGE_NAME}_BUILD_SHARED_LIBS @BUILD_SHARED_LIBS@) +set (${ZLIB_PACKAGE_NAME}_EXPORT_LIBRARIES @ZLIB_LIBRARIES_TO_EXPORT@) + +#----------------------------------------------------------------------------- +# Directories +#----------------------------------------------------------------------------- +set (${ZLIB_PACKAGE_NAME}_INCLUDE_DIR "@PACKAGE_INCLUDE_INSTALL_DIR@") + +set (${ZLIB_PACKAGE_NAME}_SHARE_DIR "@PACKAGE_SHARE_INSTALL_DIR@") +set_and_check (${ZLIB_PACKAGE_NAME}_BUILD_DIR "@PACKAGE_CURRENT_BUILD_DIR@") + +#----------------------------------------------------------------------------- +# Version Strings +#----------------------------------------------------------------------------- +set (${ZLIB_PACKAGE_NAME}_VERSION_STRING @ZLIB_VERSION_STRING@) +set (${ZLIB_PACKAGE_NAME}_VERSION_MAJOR @ZLIB_VERSION_MAJOR@) +set (${ZLIB_PACKAGE_NAME}_VERSION_MINOR @ZLIB_VERSION_MINOR@) + +#----------------------------------------------------------------------------- +# Don't include targets if this file is being picked up by another +# project which has already build ZLIB as a subproject +#----------------------------------------------------------------------------- +if (NOT TARGET "@ZLIB_PACKAGE@") + include (@PACKAGE_SHARE_INSTALL_DIR@/@ZLIB_PACKAGE@@ZLIB_PACKAGE_EXT@-targets.cmake) +endif () + +# Handle default component(static) : +if (NOT ${ZLIB_PACKAGE_NAME}_FIND_COMPONENTS) + set (${ZLIB_PACKAGE_NAME}_FIND_COMPONENTS static) + set (${ZLIB_PACKAGE_NAME}_FIND_REQUIRED_static true) +endif () + +# Handle requested components: +list (REMOVE_DUPLICATES ${ZLIB_PACKAGE_NAME}_FIND_COMPONENTS) +foreach (comp IN LISTS ${ZLIB_PACKAGE_NAME}_FIND_COMPONENTS) + list (FIND ${ZLIB_PACKAGE_NAME}_EXPORT_LIBRARIES "@ZLIB_LIB_CORENAME@-${comp}" HAVE_COMP) + if (${HAVE_COMP} LESS 0) + set (${ZLIB_PACKAGE_NAME}_${comp}_FOUND 0) + else () + set (${ZLIB_PACKAGE_NAME}_${comp}_FOUND 1) + string(TOUPPER ${ZLIB_PACKAGE_NAME}_${comp}_LIBRARY COMP_LIBRARY) + set (${COMP_LIBRARY} ${${COMP_LIBRARY}} @ZLIB_LIB_CORENAME@-${comp}) + endif () +endforeach () + +check_required_components (${ZLIB_PACKAGE_NAME}) diff --git a/config/cmake/cacheinit.cmake b/config/cmake/cacheinit.cmake index faa05339260..41293ef723f 100644 --- a/config/cmake/cacheinit.cmake +++ b/config/cmake/cacheinit.cmake @@ -47,14 +47,21 @@ set (HDF5_MINGW_STATIC_GCC_LIBS ON CACHE BOOL "Statically link libgcc/libstdc++" set (HDF5_ALLOW_EXTERNAL_SUPPORT "NO" CACHE STRING "Allow External Library Building (NO GIT TGZ)" FORCE) set_property (CACHE HDF5_ALLOW_EXTERNAL_SUPPORT PROPERTY STRINGS NO GIT TGZ) + +set (ZLIB_PACKAGE_NAME "zlib" CACHE STRING "Name of ZLIB package" FORCE) set (ZLIB_TGZ_NAME "ZLib.tar.gz" CACHE STRING "Use HDF5_ZLib from compressed file" FORCE) +set (ZLIB_TGZ_ORIGPATH "https://github.com/madler/zlib/releases/download/v1.2.13" CACHE STRING "Use ZLIB from original location" FORCE) +set (ZLIB_TGZ_ORIGNAME "zlib-1.2.13.tar.gz" CACHE STRING "Use ZLIB from original compressed file" FORCE) +set (ZLIB_USE_LOCALCONTENT OFF CACHE BOOL "Use local file for ZLIB FetchContent" FORCE) + +set (SZIP_PACKAGE_NAME "szip" CACHE STRING "Name of SZIP package" FORCE) +set (LIBAEC_PACKAGE_NAME "libaec" CACHE STRING "Name of AEC SZIP package" FORCE) set (SZIP_TGZ_NAME "SZip.tar.gz" CACHE STRING "Use SZip from compressed file" FORCE) set (SZAEC_TGZ_NAME "LIBAEC.tar.gz" CACHE STRING "Use SZip AEC from compressed file" FORCE) set (USE_LIBAEC ON CACHE BOOL "Use libaec szip replacement" FORCE) - -set (ZLIB_PACKAGE_NAME "zlib" CACHE STRING "Name of HDF5_ZLIB package" FORCE) -set (LIBAEC_PACKAGE_NAME "libaec" CACHE STRING "Name of AEC SZIP package" FORCE) -set (SZIP_PACKAGE_NAME "szip" CACHE STRING "Name of SZIP package" FORCE) +set (LIBAEC_TGZ_ORIGPATH "https://gitlab.dkrz.de/k202009/libaec/-/archive/v1.0.6" CACHE STRING "Use LIBAEC from original location" FORCE) +set (LIBAEC_TGZ_ORIGNAME "libaec-v1.0.6.tar.gz" CACHE STRING "Use LIBAEC from original compressed file" FORCE) +set (LIBAEC_USE_LOCALCONTENT OFF CACHE BOOL "Use local file for LIBAEC FetchContent" FORCE) ######################## # filter plugin options diff --git a/config/toolchain/aarch64.cmake b/config/toolchain/aarch64.cmake index adb86390e46..aa84a742654 100644 --- a/config/toolchain/aarch64.cmake +++ b/config/toolchain/aarch64.cmake @@ -1,5 +1,5 @@ set(TOOLCHAIN_PREFIX aarch64-linux-gnu) -set(ANDROID_NDK /opt/android-ndk-r25b-linux/android-ndk-r25b) +set(ANDROID_NDK /opt/android-ndk-linux) set (CMAKE_SYSTEM_NAME Android) set (CMAKE_ANDROID_ARCH_ABI x86_64) #set (CMAKE_ANDROID_STANDALONE_TOOLCHAIN ${ANDROID_NDK}/build/cmake/andriod.toolchain.cmake) diff --git a/release_docs/INSTALL_CMake.txt b/release_docs/INSTALL_CMake.txt index b17a7e58123..a8d86b07ff7 100644 --- a/release_docs/INSTALL_CMake.txt +++ b/release_docs/INSTALL_CMake.txt @@ -296,8 +296,8 @@ IV. Further considerations B. Use source packages from an GIT server by adding the following CMake options: HDF5_ALLOW_EXTERNAL_SUPPORT:STRING="GIT" - ZLIB_GIT_URL:STRING="http://some_location/zlib" - SZIP_GIT_URL:STRING="http://some_location/szip" + ZLIB_GIT_URL:STRING="https://some_location/zlib" + SZIP_GIT_URL:STRING="https://some_location/szip" where "some_location" is the URL to the GIT repository. Also set CMAKE_BUILD_TYPE to the configuration type. @@ -312,6 +312,29 @@ IV. Further considerations to the configuration type during configuration. See the settings in the config/cmake/cacheinit.cmake file HDF uses for testing. + D. Use original source packages from a compressed file by adding the following + CMake options: + BUILD_SZIP_WITH_FETCHCONTENT:BOOL=ON + LIBAEC_TGZ_ORIGNAME:STRING="szip_src.ext" + LIBAEC_TGZ_ORIGPATH:STRING="some_location" + + BUILD_ZLIB_WITH_FETCHCONTENT:BOOL=ON + ZLIB_TGZ_ORIGNAME:STRING="zlib_src.ext" + ZLIB_TGZ_ORIGPATH:STRING="some_location" + + HDF5_ALLOW_EXTERNAL_SUPPORT:STRING="TGZ" + where "some_location" is the URL or full path to the compressed + file and ext is the type of compression file. The individual filters are + enabled by setting the BUILD__WITH_FETCHCONTENT CMake variable to ON. + Also set CMAKE_BUILD_TYPE to the configuration type during configuration. + See the settings in the config/cmake/cacheinit.cmake file HDF uses for testing. + + The files can also be retrieved from a local path if necessary + TGZPATH:STRING="some_location" + by setting + ZLIB_USE_LOCALCONTENT:BOOL=ON + LIBAEC_USE_LOCALCONTENT:BOOL=ON + 3. If you plan to use compression plugins: A. Use source packages from an GIT server by adding the following CMake options: @@ -472,6 +495,7 @@ These five steps are described in detail below. * Visual Studio 15 2017 * Visual Studio 15 2017 Win64 * Visual Studio 16 2019 + * Visual Studio 17 2022 is: * SZIP_INCLUDE_DIR:PATH= @@ -496,13 +520,19 @@ These five steps are described in detail below. set (HDF_TEST_EXPRESS "2" CACHE STRING "Control testing framework (0-3)" FORCE) set (HDF5_ALLOW_EXTERNAL_SUPPORT "NO" CACHE STRING "Allow External Library Building (NO GIT TGZ)" FORCE) set_property (CACHE HDF5_ALLOW_EXTERNAL_SUPPORT PROPERTY STRINGS NO GIT TGZ) + set (ZLIB_PACKAGE_NAME "zlib" CACHE STRING "Name of ZLIB package" FORCE) set (ZLIB_TGZ_NAME "ZLib.tar.gz" CACHE STRING "Use ZLib from compressed file" FORCE) + set (ZLIB_TGZ_ORIGPATH "https://github.com/madler/zlib/releases/download/v1.2.13" CACHE STRING "Use ZLIB from original location" FORCE) + set (ZLIB_TGZ_ORIGNAME "zlib-1.2.13.tar.gz" CACHE STRING "Use ZLIB from original compressed file" FORCE) + set (ZLIB_USE_LOCALCONTENT OFF CACHE BOOL "Use local file for ZLIB FetchContent" FORCE) + set (SZIP_PACKAGE_NAME "szip" CACHE STRING "Name of SZIP package" FORCE) + set (LIBAEC_PACKAGE_NAME "libaec" CACHE STRING "Name of AEC SZIP package" FORCE) set (SZIP_TGZ_NAME "SZip.tar.gz" CACHE STRING "Use SZip from compressed file" FORCE) set (SZAEC_TGZ_NAME "LIBAEC.tar.gz" CACHE STRING "Use SZip AEC from compressed file" FORCE) set (USE_LIBAEC ON CACHE BOOL "Use libaec szip replacement" FORCE) - set (ZLIB_PACKAGE_NAME "zlib" CACHE STRING "Name of ZLIB package" FORCE) - set (LIBAEC_PACKAGE_NAME "libaec" CACHE STRING "Name of AEC SZIP package" FORCE) - set (SZIP_PACKAGE_NAME "szip" CACHE STRING "Name of SZIP package" FORCE) + set (LIBAEC_TGZ_ORIGPATH "https://gitlab.dkrz.de/k202009/libaec/-/archive/v1.0.6" CACHE STRING "Use LIBAEC from original location" FORCE) + set (LIBAEC_TGZ_ORIGNAME "libaec-v1.0.6.tar.gz" CACHE STRING "Use LIBAEC from original compressed file" FORCE) + set (LIBAEC_USE_LOCALCONTENT OFF CACHE BOOL "Use local file for LIBAEC FetchContent" FORCE) ####################### # filter plugin options ####################### @@ -837,6 +867,14 @@ if (WINDOWS) else () H5_DEFAULT_PLUGINDIR "/usr/local/hdf5/lib/plugin" endif () +if (BUILD_SZIP_WITH_FETCHCONTENT) + LIBAEC_TGZ_ORIGPATH "Use LIBAEC from original location" "https://gitlab.dkrz.de/k202009/libaec/-/archive/v1.0.6" + LIBAEC_TGZ_ORIGNAME "Use LIBAEC from original compressed file" "libaec-v1.0.6.tar.gz" + LIBAEC_USE_LOCALCONTENT "Use local file for LIBAEC FetchContent" OFF +if (BUILD_ZLIB_WITH_FETCHCONTENT) + ZLIB_TGZ_ORIGPATH "Use ZLIB from original location" "https://github.com/madler/zlib/releases/download/v1.2.13" + ZLIB_TGZ_ORIGNAME "Use ZLIB from original compressed file" "zlib-1.2.13.tar.gz" + ZLIB_USE_LOCALCONTENT "Use local file for ZLIB FetchContent" OFF NOTE: The BUILD_STATIC_EXECS ("Build Static Executables") option is only valid diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 5de8fc9fe32..72134ef2718 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -47,6 +47,31 @@ New Features Configuration: ------------- + - Added new option to build libaec and zlib inline with CMake. + + Using the CMake FetchContent module, the external filters can populate + content at configure time via any method supported by the ExternalProject + module. Whereas ExternalProject_Add() downloads at build time, the + FetchContent module makes content available immediately, allowing the + configure step to use the content in commands like add_subdirectory(), + include() or file() operations. + + The HDF options (and defaults) for using this are: + BUILD_SZIP_WITH_FETCHCONTENT:BOOL=OFF + LIBAEC_USE_LOCALCONTENT:BOOL=OFF + BUILD_ZLIB_WITH_FETCHCONTENT:BOOL=OFF + ZLIB_USE_LOCALCONTENT:BOOL=OFF + + The CMake variables to control the path and file names: + LIBAEC_TGZ_ORIGPATH:STRING + LIBAEC_TGZ_ORIGNAME:STRING + ZLIB_TGZ_ORIGPATH:STRING + ZLIB_TGZ_ORIGNAME:STRING + + See the CMakeFilters.cmake and config/cmake/cacheinit.cmake files for usage. + + (ADB - 2023/02/21) + - Removal of MPE support The ability to build with MPE instrumentation has been removed along with From 71fb06f0682ff40845ac7d00362a7ab036ab2a76 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Wed, 1 Mar 2023 08:07:37 -0600 Subject: [PATCH 063/231] Missing or misplaced entries in API reference tables #2470 (#2472) * Missing or misplaced entries in API reference tables #2470 * Doxy corrections --- doxygen/dox/Overview.dox | 4 - doxygen/examples/tables/propertyLists.dox | 267 +++++++++++++++++----- src/H5Pmodule.h | 2 + 3 files changed, 208 insertions(+), 65 deletions(-) diff --git a/doxygen/dox/Overview.dox b/doxygen/dox/Overview.dox index 3d171f7e2b5..fb6231ce36a 100644 --- a/doxygen/dox/Overview.dox +++ b/doxygen/dox/Overview.dox @@ -37,10 +37,6 @@ documents cover a mix of tasks, concepts, and reference, to help a specific \par Offline reading You can download it as a tgz archive for offline reading. -\par History - A snapshot (~April 2017) of the pre-Doxygen HDF5 documentation can be found - here. - \par ToDo List There is plenty of unfinished business. diff --git a/doxygen/examples/tables/propertyLists.dox b/doxygen/examples/tables/propertyLists.dox index 498a820245b..375fd509702 100644 --- a/doxygen/examples/tables/propertyLists.dox +++ b/doxygen/examples/tables/propertyLists.dox @@ -86,7 +86,7 @@ Iterates over properties in a property class or list -#H5Pregister/H5Punregister +#H5Pregister/#H5Punregister Registers/removes a permanent property with/from a property list class @@ -124,14 +124,6 @@ Sets/retrieves size of parameter used to control B-trees for indexing chunked datasets. -#H5Pset_file_image -Sets an initial file image in a memory buffer. - - -#H5Pget_file_image -Retrieves a copy of the file image designated as the initial content and structure of a file. - - #H5Pset_file_space_page_size/#H5Pget_file_space_page_size Sets or retrieves the file space page size used in paged aggregation and paged buffering. @@ -179,11 +171,19 @@ creation property list. Sets/retrieves metadata cache and raw data chunk cache parameters. +#H5Pset_core_write_tracking/#H5Pget_core_write_tracking +Sets/retrieves write tracking information for core driver. + + #H5Pset_elink_file_cache_size/#H5Pget_elink_file_cache_size Sets/retrieves the size of the external link open file cache from the specified file access property list. +#H5Pset_evict_on_close/#H5Pget_evict_on_close +Set/get the file access property list setting that determines whether an HDF5 object will be evicted from the library's metadata cache when it is closed. + + #H5Pset_gc_references/#H5Pget_gc_references Sets/retrieves garbage collecting references flag. @@ -196,18 +196,56 @@ file access property list. Retrieves a data offset from the file access property list. +#H5Pset_fclose_degree/#H5Pget_fclose_degree +Sets/retrieves file close degree property. + + +#H5Pset_file_image +Sets an initial file image in a memory buffer. + + +#H5Pget_file_image +Retrieves a copy of the file image designated as the initial content and structure of a file. + + +#H5Pset_file_image_callbacks/#H5Pget_file_image_callbacks +Sets/gets the callbacks for working with file images. + + +#H5Pset_file_locking/#H5Pget_file_locking +Sets/retrieves file locking property values. + + #H5Pset_meta_block_size/#H5Pget_meta_block_size Sets the minimum metadata blocksize or retrieves the current metadata block size setting. -#H5Pset_mdc_config -Set the initial metadata cache configuration in the indicated File Access Property List -to the supplied value. +#H5Pset_metadata_read_attempts/#H5Pget_metadata_read_attempts +Sets/gets the number of read attempts from a file access property list. + + +#H5Pset_mdc_config/#H5Pget_mdc_config +Set/get the initial metadata cache configuration in the indicated file access property list. + + +#H5Pset_mdc_image_config/#H5Pget_mdc_image_config +Set/get the metadata cache image option for a file access property list. + + +#H5Pset_mdc_log_options/#H5Pget_mdc_log_options +Set/get the metadata cache logging options. + + +#H5Pset_multi_type/#H5Pget_multi_type +Sets/gets the type of data property for the MULTI driver. + + +#H5Pset_object_flush_cb/#H5Pget_object_flush_cb +Set/get the object flush property values from the file access property list. -#H5Pget_mdc_config -Get the current initial metadata cache config-uration from the indicated File Access -Property List. +#H5Pset_page_buffer_size/#H5Pget_page_buffer_size +Set/get the the maximum size for the page buffer. #H5Pset_sieve_buf_size/#H5Pget_sieve_buf_size @@ -231,6 +269,30 @@ versions used when creating objects. #H5Pget_small_data_block_size Retrieves the current small data block size setting. + +#H5Pset_vol +Sets the file VOL connector for a file access property list. + + +#H5Pget_vol_cap_flags +Retrieves the capability flags for the VOL connector that will be used with a file access property list. + + +#H5Pget_vol_id +Retrieves the identifier of the current VOL connector. + + +#H5Pget_vol_info +Retrieves a copy of the VOL information for a connector. + + +#H5Pset_mpi_params/#H5Pget_mpi_params +Sets/retrieves the MPI communicator and info. + + +#H5Pset_coll_metadata_write/#H5Pget_coll_metadata_write +Sets/retrieves metadata write mode setting. + //! [fapl_table] * @@ -254,6 +316,18 @@ versions used when creating objects. Returns a pointer to file driver information. +#H5Pset_driver_by_name +Sets a file driver according to a given driver name. + + +#H5Pset_driver_by_value +Sets a file driver according to a given driver value. + + +#H5Pget_driver_config_str +Retrieves a string representation of the configuration for the driver. + + #H5Pset_fapl_core/#H5Pget_fapl_core Sets the driver for buffered memory files (in RAM) or retrieves information regarding the driver. @@ -268,10 +342,22 @@ the driver. larger than 2 gigabytes, or retrieves information regarding driver. +#H5Pset_fapl_hdfs/#H5Pget_fapl_hdfs +. + + +#H5Pset_fapl_ioc/#H5Pget_fapl_ioc +Modifies/queries the file driver properties of the I/O concentrator driver. + + #H5Pset_fapl_log Sets logging driver. +#H5Pset_fapl_mirror/#H5Pget_fapl_mirror +Modifies/queries the file driver properties of the mirror driver. + + #H5Pset_fapl_mpio/#H5Pget_fapl_mpio Sets driver for files on parallel file systems (MPI I/O) or retrieves information regarding the driver. @@ -286,6 +372,10 @@ regarding the driver. or retrieves information regarding driver. +#H5Pset_fapl_onion/#H5Pget_fapl_onion +Modifies/queries the file driver properties of the onion driver. + + #H5Pset_fapl_sec2 Sets driver for unbuffered permanent files or retrieves information regarding driver. @@ -299,6 +389,10 @@ and one raw data file. Sets driver for buffered permanent files. +#H5Pset_fapl_subfiling/#H5Pget_fapl_subfiling +Modifies/queries the file driver properties of the subfiling driver. + + #H5Pset_fapl_windows Sets the Windows I/O driver. @@ -337,6 +431,10 @@ and one raw data file. Retrieves the size of chunks for the raw data of a chunked layout dataset. +#H5Pset_chunk_opts/#H5Pget_chunk_opts +Sets/gets the edge chunk option setting from a dataset creation property list. + + #H5Pset_deflate Sets compression method and compression level. @@ -439,6 +537,34 @@ encoding for object names. #H5Pget_char_encoding Retrieves the character encoding used to create a string. + +#H5Pset_virtual +Sets the mapping between virtual and source datasets. + + +#H5Pget_virtual_count +Gets the number of mappings for the virtual dataset. + + +#H5Pget_virtual_dsetname +Gets the name of a source dataset used in the mapping. + + +#H5Pget_virtual_filename +Gets the filename of a source dataset used in the mapping. + + +#H5Pget_virtual_srcspace +Gets a dataspace identifier for the selection within the source dataset used in the mapping. + + +#H5Pget_virtual_vspace +Gets a dataspace identifier for the selection within the virtual dataset used in the mapping. + + +#H5Pset_dset_no_attrs_hint/#H5Pget_dset_no_attrs_hint +Sets/gets the flag to create minimized dataset object headers. + //! [dcpl_table] * @@ -458,32 +584,77 @@ encoding for object names. Reads buffer settings. +#H5Pset_append_flush/#H5Pget_append_flush +Sets/gets the values of the append property that is set up in the dataset access property list. + + #H5Pset_chunk_cache/#H5Pget_chunk_cache Sets/gets the raw data chunk cache parameters. -#H5Pset_edc_check/#H5Pget_edc_check -Sets/gets whether to enable error-detection when reading a dataset. +#H5Pset_efile_prefix/#H5Pget_efile_prefix +Sets/gets the prefix for external raw data storage files as set in the dataset access property list. -#H5Pset_filter_callback -Sets user-defined filter callback function. +#H5Pset_virtual_prefix/#H5Pget_virtual_prefix +Sets/gets the prefix to be applied to VDS source file paths. + + +#H5Pset_virtual_printf_gap/#H5Pget_virtual_printf_gap +Sets/gets the maximum number of missing source files and/or datasets with the printf-style names when getting the extent for an unlimited virtual dataset. + + +#H5Pset_virtual_view/#H5Pget_virtual_view +Sets/gets the view of the virtual dataset (VDS) to include or exclude missing mapped elements. + + +//! [dapl_table] + * +//! [dxpl_table] + + + + + + + + + + + + + - - + + + + + + + + + + + + + + - - + + @@ -511,38 +682,20 @@ encoding for object names. - - - - - - - -
Data transfer property list functions (H5P)
C FunctionPurpose
#H5Pset_btree_ratios/#H5Pget_btree_ratiosSets/gets B-tree split ratios for a dataset transfer property list.
#H5Pset_bufferMaximum size for the type conversion buffer and the background buffer. May also supply +pointers to application-allocated buffers.
#H5Pset_data_transform/#H5Pget_data_transform Sets/gets a data transform expression.
#H5Pset_type_conv_cb/#H5Pget_type_conv_cbSets/gets user-defined datatype conversion callback function.#H5Pset_dataset_io_hyperslab_selectionSets a hyperslab file selection for a dataset I/O operation.
#H5Pset_edc_check/#H5Pget_edc_checkSets/gets whether to enable error-detection when reading a dataset.
#H5Pset_hyper_vector_sizeset the number of "I/O vectors" (offset and length pairs) which are to be +accumulated in memory before being issued to the lower levels +of the library for reading or writing the actual data.
#H5Pset_filter_callbackSets user-defined filter callback function.
#H5Pset_hyper_vector_size/#H5Pget_hyper_vector_size Sets/gets number of I/O vectors to be read/written in hyperslab I/O.
#H5Pset_btree_ratios/#H5Pget_btree_ratiosSets/gets B-tree split ratios for a dataset transfer property list.#H5Pset_type_conv_cb/#H5Pget_type_conv_cbSets/gets user-defined datatype conversion callback function.
#H5Pset_vlen_mem_manager/#H5Pget_vlen_mem_manager Sets a flag governing the use of independent versus collective I/O.
#H5Pset_multi_type/#H5Pget_multi_typeSets/gets the type of data property for the MULTI driver.
#H5Pset_small_data_block_size/#H5Pget_small_data_block_sizeSets/gets the size of a contiguous block reserved for small data.
-//! [dapl_table] - * -//! [dxpl_table] - - - - - + + - - + + - - + + - - + +
Data transfer property list functions (H5P)
C FunctionPurpose#H5Pget_mpio_actual_chunk_opt_modeGets the type of chunk optimization that HDF5 actually performed on the last parallel I/O call.
#H5Pset_bufferMaximum size for the type conversion buffer and the background buffer. May also supply -pointers to application-allocated buffers.#H5Pget_mpio_actual_io_modeGets the type of I/O that HDF5 actually performed on the last parallel I/O call.
#H5Pset_hyper_vector_sizeset the number of "I/O vectors" (offset and length pairs) which are to be -accumulated in memory before being issued to the lower levels -of the library for reading or writing the actual data.#H5Pget_mpio_no_collective_causeGets local and global causes that broke collective I/O on the last parallel I/O call.
#H5Pset_btree_ratiosSet the B-tree split ratios for a dataset transfer property list. The split ratios determine -what percent of children go in the first node when a node splits.H5Pset_preserve/H5Pget_preserveNo longer available, deprecated as it no longer has any effect.
//! [dxpl_table] @@ -593,6 +746,10 @@ C function is a macro: \see \ref api-compat-macros. Sets up use of the Fletcher32 checksum filter. +#H5Pset_local_heap_size_hint#H5Pget_local_heap_size_hint/ +Sets/gets the anticipated maximum size of a local heap. + + #H5Pset_link_phase_change Sets the parameters for conversion between compact and dense groups. @@ -625,14 +782,6 @@ C function is a macro: \see \ref api-compat-macros. Queries whether link creation order is tracked and/or indexed in a group. -#H5Pset_create_intermediate_group -Specifies in the property list whether to create missing intermediate groups. - - -#H5Pget_create_intermediate_group -Determines whether the property is set to enable creating missing intermediate groups. - - #H5Pset_char_encoding Sets the character encoding used to encode a string. Use to set ASCII or UTF-8 character encoding for object names. @@ -771,10 +920,6 @@ encoding for object names. #H5Pset_char_encoding/#H5Pget_char_encoding Sets/gets the character encoding used to encode link and attribute names. - -#H5Pset_create_intermediate_group/#H5Pget_create_intermediate_group -Specifies/retrieves whether to create missing intermediate groups. - //! [strcpl_table] * diff --git a/src/H5Pmodule.h b/src/H5Pmodule.h index 38d06e2292b..f2a1e5aa7fe 100644 --- a/src/H5Pmodule.h +++ b/src/H5Pmodule.h @@ -904,6 +904,7 @@ * * \ref PLCR * \snippet{doc} tables/propertyLists.dox fapl_table + * \snippet{doc} tables/propertyLists.dox fd_pl_table * * \ref PLCR * \snippet{doc} tables/propertyLists.dox lapl_table @@ -1007,6 +1008,7 @@ * can be adjusted at runtime before a file is created or opened. * * \snippet{doc} tables/propertyLists.dox fapl_table + * \snippet{doc} tables/propertyLists.dox fd_pl_table * * \defgroup FCPL File Creation Properties * \ingroup GCPL From a674222a2899c45612ca3c009e4bca77f2d42790 Mon Sep 17 00:00:00 2001 From: Egbert Eich Date: Thu, 2 Mar 2023 18:17:49 +0100 Subject: [PATCH 064/231] Check for overflow when calculating on-disk attribute data size (#2459) * Remove duplicate code Signed-off-by: Egbert Eich * Add test case for CVE-2021-37501 Bogus sizes in this test case causes the on-disk data size calculation in H5O__attr_decode() to overflow so that the calculated size becomes 0. This causes the read to overflow and h5dump to segfault. This test case was crafted, the test file was not directly generated by HDF5. Test case from: https://github.com/ST4RF4LL/Something_Found/blob/main/HDF5_v1.13.0_h5dump_heap_overflow.md --- release_docs/RELEASE.txt | 13 +++++++++++++ src/H5Oattr.c | 7 +++---- tools/test/h5dump/CMakeTests.cmake | 5 +++++ tools/test/h5dump/testh5dump.sh.in | 5 +++++ tools/testfiles/tCVE-2021-37501_attr_decode.h5 | Bin 0 -> 48544 bytes 5 files changed, 26 insertions(+), 4 deletions(-) create mode 100644 tools/testfiles/tCVE-2021-37501_attr_decode.h5 diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 72134ef2718..d6ab00e3ee2 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -244,6 +244,19 @@ Bug Fixes since HDF5-1.13.3 release =================================== Library ------- + - Fix CVE-2021-37501 / GHSA-rfgw-5vq3-wrjf + + Check for overflow when calculating on-disk attribute data size. + + A bogus hdf5 file may contain dataspace messages with sizes + which lead to the on-disk data sizes to exceed what is addressable. + When calculating the size, make sure, the multiplication does not + overflow. + The test case was crafted in a way that the overflow caused the + size to be 0. + + (EFE - 2023/02/11 GH-2458) + - Fixed an issue with collective metadata writes of global heap data New test failures in parallel netCDF started occurring with debug diff --git a/src/H5Oattr.c b/src/H5Oattr.c index 638686587dc..e431cd2e620 100644 --- a/src/H5Oattr.c +++ b/src/H5Oattr.c @@ -221,10 +221,6 @@ H5O__attr_decode(H5F_t *f, H5O_t *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, u else p += attr->shared->ds_size; - /* Get the datatype's size */ - if (0 == (dt_size = H5T_get_size(attr->shared->dt))) - HGOTO_ERROR(H5E_ATTR, H5E_CANTGET, NULL, "unable to get datatype size") - /* Get the datatype & dataspace sizes */ if (0 == (dt_size = H5T_get_size(attr->shared->dt))) HGOTO_ERROR(H5E_ATTR, H5E_CANTGET, NULL, "unable to get datatype size") @@ -234,6 +230,9 @@ H5O__attr_decode(H5F_t *f, H5O_t *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, u /* Compute the size of the data */ H5_CHECKED_ASSIGN(attr->shared->data_size, size_t, ds_size * (hsize_t)dt_size, hsize_t); + /* Check if multiplication has overflown */ + if ((attr->shared->data_size / dt_size) != ds_size) + HGOTO_ERROR(H5E_RESOURCE, H5E_OVERFLOW, NULL, "data size exceeds addressable range") /* Go get the data */ if (attr->shared->data_size) { diff --git a/tools/test/h5dump/CMakeTests.cmake b/tools/test/h5dump/CMakeTests.cmake index bcbb1c1afbc..c328ef129e2 100644 --- a/tools/test/h5dump/CMakeTests.cmake +++ b/tools/test/h5dump/CMakeTests.cmake @@ -344,6 +344,7 @@ ${HDF5_TOOLS_DIR}/testfiles/tCVE_2018_11206_fill_old.h5 ${HDF5_TOOLS_DIR}/testfiles/tCVE_2018_11206_fill_new.h5 ${HDF5_TOOLS_DIR}/testfiles/zerodim.h5 + ${HDF5_TOOLS_DIR}/testfiles/tCVE-2021-37501_attr_decode.h5 #STD_REF_OBJ files ${HDF5_TOOLS_DIR}/testfiles/trefer_attr.h5 ${HDF5_TOOLS_DIR}/testfiles/trefer_compat.h5 @@ -1340,6 +1341,10 @@ ADD_H5_TEST (tCVE_2018_11206_fill_old 1 tCVE_2018_11206_fill_old.h5) ADD_H5_TEST (tCVE_2018_11206_fill_new 1 tCVE_2018_11206_fill_new.h5) + # test to verify fix for CVE-2021-37501: multiplication overflow in H5O__attr_decode() + # https://github.com/ST4RF4LL/Something_Found/blob/main/HDF5_v1.13.0_h5dump_heap_overflow.assets/poc + ADD_H5_TEST (tCVE-2021-37501_attr_decode 1 tCVE-2021-37501_attr_decode.h5) + # onion VFD tests ADD_H5_TEST (tst_onion_objs 0 --enable-error-stack --vfd-name onion --vfd-info 3 tst_onion_objs.h5) ADD_H5_TEST (tst_onion_dset_ext 0 --enable-error-stack --vfd-name onion --vfd-info 1 tst_onion_dset_ext.h5) diff --git a/tools/test/h5dump/testh5dump.sh.in b/tools/test/h5dump/testh5dump.sh.in index 24807cb5300..6ea410bb851 100644 --- a/tools/test/h5dump/testh5dump.sh.in +++ b/tools/test/h5dump/testh5dump.sh.in @@ -183,6 +183,7 @@ $SRC_H5DUMP_TESTFILES/tvms.h5 $SRC_H5DUMP_TESTFILES/err_attr_dspace.h5 $SRC_H5DUMP_TESTFILES/tCVE_2018_11206_fill_old.h5 $SRC_H5DUMP_TESTFILES/tCVE_2018_11206_fill_new.h5 +$SRC_H5DUMP_TESTFILES/tCVE-2021-37501_attr_decode.h5 $SRC_H5DUMP_TESTFILES/tst_onion_objs.h5 $SRC_H5DUMP_TESTFILES/tst_onion_objs.h5.onion $SRC_H5DUMP_TESTFILES/tst_onion_dset_ext.h5 @@ -1495,6 +1496,10 @@ TOOLTEST err_attr_dspace.ddl err_attr_dspace.h5 TOOLTEST_FAIL tCVE_2018_11206_fill_old.h5 TOOLTEST_FAIL tCVE_2018_11206_fill_new.h5 +# test to verify fix for CVE-2021-37501: multiplication overflow in H5O__attr_decode() +# https://github.com/ST4RF4LL/Something_Found/blob/main/HDF5_v1.13.0_h5dump_heap_overflow.assets/poc +TOOLTEST_FAIL tCVE-2021-37501_attr_decode.h5 + # test Onion VFD TOOLTEST tst_onion_objs.ddl --enable-error-stack --vfd-name onion --vfd-info 3 tst_onion_objs.h5 TOOLTEST tst_onion_dset_ext.ddl --enable-error-stack --vfd-name onion --vfd-info 1 tst_onion_dset_ext.h5 diff --git a/tools/testfiles/tCVE-2021-37501_attr_decode.h5 b/tools/testfiles/tCVE-2021-37501_attr_decode.h5 new file mode 100644 index 0000000000000000000000000000000000000000..331b05b59362a661b81364da3d7357312ad1e57c GIT binary patch literal 48544 zcmeFZ30#g{w=mwMNrRFmMIwjkR(7IpC9Bqu$U3BOE#tqJo+G zznW*Wrgh@CchUbnx>e4d1V_5)gg~ilzWzF|3^G&)+7p?GiTW&Jn&3olKK~Yz3mACf_VQ%T(ib~rLY!v4{sm$`M%yBYrGb2Smoos*wbx|r}xUAJ(3f#?&`0!h>D=c zix#>qcVD?!L_^TMpA!G`@6SE$OpA#8R16EcI{mXuPQg!YyKU@CW=kNVav!ps{w|BkLsW(J^>y5u<;qMXw2ZVZKRu?yf@+Z%hmww!KstXu)*`0e zMNEHmeJ6!_q_Y+Q^535SnEY64b-s9{>wQsy)c+j2@cS(6ce{2cK@Xiy`1j&Q1HU=&n*+Z&@S6j_Iq;hUzd7)m1HU=&n*+Z&@S6j_Iq-i42c}JN7$YjM zXb9~u?Ohvup&g*6%Rc*YMrU`0){L4i`;3q-O`{WE(Yq7sbOI6mKiMXK7U>&xBJ8^C z62@J0*26jz!upnWAIkNhK{I-PXFb~|&w|NpCS;8#1R(4N=aB`-og{MFMS zwE6$EHvV&u&|dhfe9DO|?eY`I3#9*V?VLaTBfSJo3hkOgyPpvLxu3%XPKK54D=dxw zQn<)-p|7P-yahG&_+Jh{=zr{TZU}(gA9SJ;of0XuGYX+lPP@eR-|Uco4=*kl+s}8D zbo%}=UJ(%;5jANMwQXI!=_VNcPrR6z*dJB@m+2qj^<#Bz=c|6~LH$W?)zPzXUpkdj*l5L$T~p!?dq;6W zI(^8=X_;8}?KytaUB&hDL*};=$bPn*>w6=F%NcwejKT_Vu1hAj!t^ebuQ%niTGR08 zz$skXoMt*cCFP)w{y?m>OfSz8ndM~*sr9BYc6U=1(Wma?I>*))!{x9%?rO!>3bVEt-5+HId%uW zw0_7L1n=giceHSQy{2%Aw>7QwyhqU((BtnLo|t>^6QTwUg&?IV)sAX_P&PjXXrv#*ctX-*{@{aD+TMQ$l37ab#C@4y=5h zf>)QHr~V6)EVmjD1f3ZT(-3utoq}~PWX@XO`%yQ-EV#K zpZ-bypRC|leWTMg3iYn>9ee7tq>&sj`w`duhdUu~hWqIPw=oo(RtT?`yuQODcEQYq#R#4sd z6g<#07T8Kd@yqENJYA*9de^=sdKZ>4=ZKAv+;buKEYK0=alNp=*m=B>myMfGyo900 zpP*EeGK^3u0EunMcp`ofoVxi5tcJdz_lm!R4*v*qoEJ;ApNnuMpWJEMr4KOS$5Zk& zScS#!Q6#(O^kQ4(Ge~o64b5`u%XMt9MWz03tUl`o@6G!r`Z;?tds1M;X52z7J5@~z zZ1*5$igTv!1KGD3$;EfbbzKE0cwGeh zjJvaOp}CN>IFpu3y(CBZwQ%Qp19-U%VSYCHsO@Hl*Q}~>O>P#Ztu$wmKN$DCuL>7o z){3e#mDzxzZ2qXL{bBFHK=9f90CkTqVB^m}r9TGSu%Z6DQOfT<x z`_1=oFS!mkOngDidfdjFZAw_vp}=}82BDbFV|W!fmfdnKg!r-us*~7*E#yBW)>U%EL4a(u~t3A z)BPv1jgj*3L1!xSIEOgPWf>=<+k5G6?*J=-+g%CtSa|lq$x~V7Q)C4QLe3E1S~%A z2`f}g*zD&!L4NUcc8V8@sgh>J{g!9KePhCvkADo63UBfC?d_bo%~5(&{VLuc0^Dd> z4K7}W!JzUU>`DIzu*k**B@f*sIhXmwLgWlIl_sFX#-6a)@Bu%uuNfSje2VfMfL$3P z!$K!5;0{H$6X)$QppkJ1%YyXjRU1qGa+k;OTxJYe_M!l`)Ya4IixS*Yul02M!)aXq zALSS^azA(MY7yiHzegi45i}X`7U%h%z+FYN=&VW!Y#eCE7PQZT1BO1}JZTDq5Ap%= z8P8!|qY^hNpTgHUyUFAY^^my8ftyxT35ygSfmgC2Hn_In;I~sCWn(fHcC&-T;W4mC zL>(swOOOi#RN1UI(=p%IiG0C>67)Q7#=VhQ&GCo^YggL=*|!&!G17HVmJ< zAHNwdWFPMK;ygacaD7s*fYh$uY|%n3?vT+*tO%>XtWrBrxV4vd-=xKL>{rBRnj@U4%r z=bJse?K&#>tm89oNM_)ZKLXB)j%1l(VHo##1dHaYa&Ff`!04qjbd0xQ)rA&Z_rzp) zD7zgG`pa?-^&)J&)d>i$KMcFiUPp@&irnx`0i4Mm8??*l!%?!*)0?dA5NvQTkyzwmnt6Jo_ zYG!~dFO1<%&NL^(52#{eV@&S zBL9%S>lX#PCQjs5|FGwzE5q4)CI`GDrrgmnAmiANI5-a4A=JvA75H%PDt zVhqMSplxb>QJOy~*2#u{-cYbP-G)rA;XR}XuQ z$1&@hzC43V$WM}TC-sJ2@NHiLmW)|~j~<$EN-@=Vr*$u0R0r1dNYP4m`8r$|x{)=$ zD1@AWL0DuR3Cim^QWv7e`EHWurf#x_F@?eWlO2+r{@lyd<=j=8UgeCpckhM?a%-W( zA`H)+kmSfTIc`y}-i(cU2NQ}WaboSm&{p9q`qj>4ZvKvN=d?DlY>C7=m0+^u#~}zT z$;bAr4EnOU0G^46a9>l+xPv!V0Kf5vhXhH?QNj_L0mp!Ev+IRDWw95|pCmt3XE^^mIqeUAa0ef&r= z_JlNN9NV8|$GW2FZHHphSF+r7xhCF}y~ZqWz7vG(>CJ`B&}DO-?n34CI~D`?rt{6! z;>k?uYjl-L6tPyU<(<;5g`3j)Oe&K{+xmo}?a(6F4 zo4)a_zXX6{Z+$Mp+*r_`AuMEvGS|l|3bx;RZqaST8o0NA81r^@1kGpffm`woueaRd zt@Sm6r&kX_^hz0S!eUX*Xn;KHo-!LYT3Ai$k-KR88A2bLu+=R~S!}?3)DlaDJ9$xfe!wAYxS-FarOSiFoK_UQ9uH?XJVLPS z1(iFGlE~MROxoR@hAmpoJ)Ha$&n&o)J4`=exzlGzcz>6OT=VCg3_fFpvmET7-j6e- za@?DX*ICeegyIm3QaVgLa_uYDKoBMUK<`eilaDPlTfj zAHWMsL*TvAV3Vpn@x~5&)=}7>D$P|O>%+6ihfQHHW9(JfZ8(`FwmgK#{wCa`gzdys z)_~2u+78!zH{sm?CC*5+8T6f}v03eRaL}q&qGfK63yx}{g>(##v+U1q`l@ql`@ZHM ztCfQW+g_acn~iWcU0OT1+YWA`#sj49f?c2gSxJ zw0k)PDiaLYj<8IunY@ie49Ktc-Pw*!&*{94 zZ}F@&!#xjkU^SV|o_kHD!5PCj?=#nMNTdYmrZpQLzBU7kQMGvaP#bAkSkf*rpn1K)bIg^vKz6o1k z8unkOkLF_c$c?RTsH&leE&-v?bHf~PyA?pXY1hyy^%Pd^x&=>fOGgQ*Y%sKH;YGS3 z?6Mln4SJS`(?aec(do~f32mc0)9r|3Q4$+cXo#O25LWCBc>p_gw|kms+T>^Cc-xKFKS2W{)}bugHvJ zk}Q9>HLG8r&T?J6vB6i|YPWA4F@N@zj?vO%CpH}<;U#r&)gM5$?L17YGQyd@itOST z73^pA1!o>r!S1WHn9RKmAe%7{-#i-)_vCugGmXh~%re0Eyh~7EJ`xumP{-q+hH__8 zm$DJ56MimxiClJV3sE$;{1?;l8ykNkpS{c>SR zr77L@+aU}MTo`nr1)riNIO$eAzE$G96i)JL%B zze6$Y?tOTWGl*|^;}CR6-=@mzx3gF4lVM`yDf%j?5PInE=3E-L!_)kFa_^-jSo>Xp zGdgbYFgq8%hwGrt>B+d)FBY!TZcO`HCQYIOD-IP z#k`SR)9b?^uKFGdhLsbg@3&|kR}NcNuVCkQNZ?^n$~z*m%uJ>u8hJ@8QTd28sJd0) z`>FAGU0#CdXZ2!Q4;8tV;cEo@t$x@y9q{1~S2#c99G=R4L&}t9!M4SRS*zg-SaSav z*_N)x4u=+jLdIgyiK67$_j5E}z7Ync?}y^02=$qMuk_S{8Zs=Sp8>!Rn(1Y`qJggyqe|=|^O_ zljhd&QL-6Q#AUfSaSk1#f5Q zUjpwc)}miS47PpLWHM3G+?vE&`0V=`qF{Qm*l=|sH23*HD~gY!zj6gGX*TB4CX^P7 zQw1=1HI_LkOyU|mB-o(I*)V;01TiTHfcv*YvHI=+&a1f!S~tvNs<%RUM(x!QZD+xh zwTE(w9eTX8(~MZTRwGi|8BkA4f;peJ;@iO8fFCue;xkZf zxmF3 z&x#;=f&@2AJ_9c%cEc(yY1-c=1DX$2!!6k#M0)KuSY*|V)*+=3c>OYz&A7`OSnXB( zvB()#uPDKBjYjN_Twk0V^pu_oZbp%7*LkDIgoBPp7`YUggw@+mK$Y@Hl-$$8H(OLs z!+ip9+S6KaG5LzENsD;(UXC#4vp39FQ|4ZdGs4#zav(RfzlxdVON0xw%gyqNu#v5fuRRMbnPX` zsfxzT6%Q%l@##N`QxN(=gz%sDcNBKTDH#97RQR|4j{n&>g}>*IweE5_Z|~~;KZ|c!h@5!-9*SS+6 zJN~QXSST02o(ugyD;IzNKFx;Nog)|OH6b5J?K-~T)DMrwk#gz>k+bD~1{*M9zc#L-_qd&B-5bM(I(Z>0an2t_-OZ;DLn zYEXE8-LLt7eto@aZYL`nT>s3{zg=H1{WJQ%J^lCMjqGN1HodeKmP|UWRtfA7Q8eyW@>?y5_A=kNrB2h5h&q|H^@18BAFEA@#hQZOh=%MN%SUTPZme=gVt!B303{V>-~O)%b*jzCNa&)CM7K?hOHGJF?M-KB z*wfSKqL2h%JVof!{0T6p=sHc2xd-Q(T!Ho6#(TayvDj>RCPuXOqLO!Av0XVCBg)gz z)>+leL%$r$_3L2JvwGq_s0JVL!eP|iCaRM33JXMJ(a^{mh*uR@sf3G}10Q?tg_xIf;Ky1AOm(tCnOV6!(*^o4J!>+AUk^dCQ#Y~h_+=W< z;Y92%OEBNWWPDN!yiYQ((M4?|9MbHd`6*jTR@povzq$i`c0`l2Xa!ky!@#fk8Q8Y_ zL&>v8q<6JH7L3`1y)5!EF~|<5cqriw_w%l`-OB0Dt2XQ`omx74%Dof=+R7 z@bEo_^+~c=6SNz2^3%v2FL`vT9D`9A_bvBJ@4Na8*EHgSqCMf08JsAe047DZ=C#K|a_cr&__+kF8c*<--;)EoglW*^>;W;?<8XRN3Es<8hUh{q+;AWm3=^|a zL0ShtXhq}PF@ku8q=iIbP$}%wiRST{0v9}18r@7L!Z^+Q__f&=l6Q(QH*F`7pRylk zWY$8*os;Oqdk$N-Uxh6Oil8uQBXyBF3Ulg|a9vMTv@!Yys@Ah1Z*?ud^-?_P?iPvX zJsn_nV1?jZ^&lAPH5FRJ??Kd)yYOJkal(y~PO z;y%GiA2m36N1IJuIfjKF83SUuEoA1rb}~NlBx*^o0uoX{3b(K27o2?y_AXm7D0?0= z=pn-v_qu`(IWsvIsr%q_c``1ad>&(a*x^>6EqLR=cl_FdkWuvbHxqf1=!vujg{jPpm4k%Ne;BZgHMj)9*NPM#zYzB z5+cUkk4h!u7mKm?ONwaR(xE){Ys2sWw+|feDWj^RJ>`8i!F0(YqBO#es?4&(sp6q% zH)00Nw5%asr#C=?pB`0xQAkT>eIsS2Vq{*$R&EXdk7m{Yn@i-+Q6Xn(_F)LGvOXS`_QO=9OT{=xuEQ=foA z{o63)at_oEv_dlt4aS!#B8EEMSytV2Zj-|?oO_f{dxX`%Ch^6%FgP4!&TYeqB~~md z$N@GF8i5CQ>2o=IL)j_Th|Zt7v(#gadQ&Fz`I_I=lRHhYJ%+s{H_yXbIkM8hCf(li71 z3?Bm_*FK=cv!QUdUmq~4dB&e3y#S7H$bi$AZDHcu{g^1RkZAPWjwwU0py#bnn67f3 zPNNOvcJEwX=%^zkY=RR!_;eVj=p+!Ohk^LOJ&Pz!x&$ANhEo4}Cwy3WmHsH&3?o}U z@#oDsPx;ZOFs7=SCTK`7^>j_}pOwhl`)LK7RZ}1hA4OpL**U!2n>=(EaRCG2DzyaYESSpyi z`p1Ld6M928CduiH&&IH{TI#Pf3hxG;rjG>oKWv(I77M$}b3>YLU_jDSypVSoRr7~I z)vH*(?Uh~BW7|A>b%qIxzP%XteVxzMqG`m5=q4jYW~e+_%B=5K> zCG(29p8@-nO5*Y|noRc}j;0sWQSnVCX0h-1J)k#tx94uCGn2$o-o|kIUJ%R{#Dg@c zRMU*=cHT+W!9$;s8>?p*HoEQLoyDzaIA#*Gu9oB-vVH{%51Bx# z({ZY>!2=z4Y0$jd{n)r78>iUu;7a;ldi&&H+ygrxZq*s;nHWoZZE?fC5+VZM^J;SJ zUJy2j-sO#POu**r$tZHp5FeDeL(`WJa4b=oZm-#a%Q^;FUQ{S2J{n~>$YTi%i<4lb zCN1QZiazsH(k{B`zm0_NItkaU?qS;WHjuB9r^j^;kk3t7ML6kDiDl9|5iV%dJQO!70=2?adU1m}z6%ILULOtiNV&Tp?(!oBnN`EK z2UAJ6uV+wAZyMgHk0!G20etY;1Lm`O;e+N}&>MJ|?wOR!7th^93jNe@l-pG3P+h@S zaU6+<3KLLsuL+kOAH+XgaFj$Fhu{}k4r1M9@PLU06;q!LSs|6M8pGYvrC8<9bgGe~cZKy*-t2X>-I2$&~y+resfL>94 zMEiIQcScjBNm=R>Az7{Q)C2 zQt|1h1t9k^9@e)O&^0$sQ=56R+@N|nRvzGqg|8&><3L52o;r@5?%Tv4SlJDN=Fh|q z>1DXdrwO8zZedY?CRz_Yjm3-Zkd4w7G;c^bJ(O|+w71;BFYYPcXF*!FH>Qu-j|UUV$k@?G!QdqagU8k5hOeSyz`DwjWa8Q#0J6RzhP;=G;{@cCn1_QrNQq)17@x$9Y&@3IHKaq-YQ zwVk%rlt9pW6^64I#D_{UW3kQHw0;Kk+^)SEyi^%U()Uk~&xo?X5AAcoFyvM_b znkCRvl*6Ns>L6m^L!OO!Tb6&4uyw$B!?hTfkC$wRjJ18y|-X zch=zJp&_{H#ZJ(3w}HuyJK#;}EtKyY4%5jD@N)^oXM@+#BTwSN;Pgi_rY!<5`Bjs` zj2STIA;Q3kP4wu;@nEkq1Pue4kvLUIvpm|;k@1fBnlzcvhB_uz_<_*Vi*x^+0kA6ut8Vw+= zERLQye-PWwYH&p%a`-joJF0HWCxv#wu<3dNzi6)>_PMzh%LOq=HF3J|JXi-N6^P>H z`!1Msb3N?Hl4HXT*5C_;4M4q*p=EJ4_9_{JKc*O4j%q)NuWwol`jAvCqBjtn#bse= zT^4nGu#F#;VhuJ%-Vl`02woH9iF5M}RFob>+*fr#6WYQ1CEdAWA`fV^eo-+taqMj% za2xH#;hK>j8xbMl|bnINaa&fP|P&<2_z+4bF-UC25Oec-x;o;gyf8Kwm+upoiiRn41*@ ziI1nl`W8=kSRg_}3zK*?IVU0I$rEI1*>p-z6}I2zESi1RzMo4kLDBq8}Gq(ix@X5XgIUQhbSp`h4hJEA$7*nU;X{&^?Szev;VeJffsRl6fgSv8e7Q!Cv^{UemM?wqY>!0v zYII4)@fIB`1wP&`b$w@b&G7i8hx|FMB)xyJtH}IP2h# zi_gJeUIx#={2-iFd`w$nv#@u-Vis7ki0obGPvpLovpcP`aLJ%2r0&*0{1EjVYh8Vc zpT3JA(Q_4HprbWLng_#+oMc#gs}eKcJjIC)Z=f-66O^@fgDGq%7O!i;CT$T;)X)Ti zul#^}kp{SGO9Q@(ybZ+?A0XRbjWzT!#Vb**7@Tv1T#nH}mDC52v{4GXKYvXM4UVFb z+HN}W;~{7>9|sc^L--Y2jzF%2HET_@VVj=XL%8F8tclx)?`Fzz8PV;?hMYj@J_=0y z-6=tw?kHT-><^~9w4hO81YO*51tkW@kWq?S478`fi&lLsI`|n^U*ceOr4~J47*A-4 zJ~zC-DzAC@F7S60oIS`)#>?{e>AXZ!+_X&x{hVg8Z{`C@n71WM4jRj?ESKdrEKFl- zGIY4`VNTRfq6$QJK1bcw1k6xTPOe)PC~2(&YI!P6dtz+|@zH_e5x z5-I^o>rR0FY;$t|hB#L_#sxkXt-}4j`s_im8to3vR{_}yG@w($4 zc4y>p*qHMK<0h0sK+!#zr5{3lM|uiuE25mZ@)I&NAsu#@-6uhOc_!`klpnV)8{+G4 zBX87tzCp}%?6vGNtvlMt&)aYiw8y-_=3Wx4M=ik_y+-5LY9lzXe>F@{?1>)AmAso5 zeQ}^{Hg%o4A2-D&fzpvON(|GAbK^(T0V(p_%qTwa)y{yZMjMPfKAiOPI!=sVsS3_$ z^kY2Ta%yK9iTz@ocx7oR=&o~;ZA+{MFa60ZGGi?}`oa%*FSc-YpN!e@>`6={We09) zc7i1N51=vb9?GBkPCrch1}mP#qvqf|+*09;Uo3uL#2ZWKr9p7p$eDuJy*hf=Z5xJr zWb<qGB7)54)cpF!_(?_v2|7v@;}tkRa;g{p9&5meBfTxZaD2!2K7OE@KnZH+_tv|SdZXMWZ9M#{VMPeol z3VDiGW_rVkgz+d_f19qgW_Wksa>$K2hA%YYp?h8cTYpW1+ji{=)Qw1mgx7+&^0)vC ziTx%dRihuQww;8F65Bv;<}&zETaEM6^5~6;+2FKEl1nRAfc+zO^QUdIgp+m^yvkB3 z`rwW-JT9{YU4u8&OAr_D5cC#K8W_Xsjx?AjI-B=aDwODMO$WmOBb={SLM+3q_&bi~ z!=-DY+))>IA~oeLB-d1s+0jdI&8vA9dWOr0ugDoZw@eHAm4}i;^Kz)!t|k~fDui|O z*a_a@<5~9dDeP2kFXkMwg(H>UIPKo+aKkNuEw$w?Ry7@hey0rS!EYY0VsbY9;ra>Q zmD!;cdf}_?WneTb3|j0OVCA@X)LeHT=zLlY-Cv~9hUdLWT2TT@EqM$sAC8gK*`fI0 z_%+%v`U86GjfMLzBhc^B9P(x4Fm9Ta67Pog4rm@w3^VF37Y$bJ%lh;=3E@Zju(sM6 zWcZMqL_}MQ)zo(5KE`IjqI-Msb?J3j-yjO({2M__bvH^z3f6yOTBIg;4P+)h8S5w#ahEeSYvA7Y>B?_U0feIOE!{c?>um8x8}_Q}D%&mw0m4 zU`$vSi2FB8qlWA4;mXHVkZU`NBxzE5qSsq&l`5ca>K>#I9DuF&+UUDwfmCmP0bXcz zC#w^;@NO#}g;9dJt9+q@s{5YjU6}n5v@;D4)egd!^UE1SqzZ* z`n;fgfoyEGp}`Bo@qvXMoU2U0^`~;_oJEfKbu6D5#}2T3eJcX}225ezMm@M5>FHoL zVJtUoZy;XUp~J+BD+Ky!9$7Q|BtE^d0o%UZ!@F}-U{!4)Uq$mP^2%F@(zg}hU-g5Q zZ;uAY*A4XT$QsN%6vcl!*b#b|=`G{X_UsXu_i`)m+#qo_In{`6-d;<~Z>)p|QZjV5f(Dqm>jGTShK%ny znCDS|-__*toBs@)_C*3$c?$01tXKf;OquoK{K*=Xd%W&ub!5hs)3E;X5Dc#7L7y*b zxM{ID==$}81KW)XuU9;zA>ZQYl*dwJ-@=!elU$7cS_N37Je>4WdP*t`O{hz394J=l zFqO&4;TrU}k}URX;s+^(RS`!*b4x)V2iw4hZceIemZ z7Ur6Jpqx%r@mSa0v~RZ~)PAP{3r)zx=OU+R%%umQXA;c6zu$oDim4^H7S&^b$8{Pk zu-o{be@W}_Ohbp-{ zPwftRO_S+Z-8aPkb{oDtmcp~Do`hL*YAhezxs4VU;yf7%WtLfHgl5rUSgw1JhSW1u zyO4@4lC~sP@f?0$#OF;EY?E#6H3ethv~j{758j0pr!Zii87vNR=EV%03AqbC(C3ARW-fkPvt zV8xFw=wls?{;$^K8Zif$IAo8R{Pe~2z|=NK8=He^Cmq1`(Q8x;8DU5w_hSc0& z06Vkn@$K00V8!D&XV+}V*!Gw_%f7(N9cTixD|*8k>nw09ECj<czFBx2U=WlB`r5MW13O~8Jl{Ow=+QlmK|;4WxrR# zy(#h-sV$HJ>pn0(e<+useTazLq`AEQK>YF)k+_D-O;ERhP%8R1z&%OzvV=L=Exjni6=Sd62L9 zu^#U#*}?MN$yD6)AxWMfj+MztpdBBESbi1{)aTJbJOl1n){z9aQaV7Y8tf_( zp`mFF9FfU~_vtUOmqIOFfAb`L7kU^htTV8EM>_PLcMpRCwt<+~L68se$Fg(1saZd1 z?!c+@G&??%>fXKwA0n#pqf9y0^7i7YC09VTbRkZ=)QU6a*wT?N_OJueBe>dEm7ugL z9gjp-5)ZfWZ2sWI?1KON+LlPht@I;#2Nsjs842j8 zQB3QL5E{;%N34o~T7jLkHewepp5_d8R;yuXavzkPgtS1p4@=hl4n28+5c(~Grq~rw zrPq|+aY@0#k@M*u=SBFaA%}cg>5P{1hOtLx!5DXaD{D{}<aK*QC zv}og1>@B7M#&?vc$^du%p6~}ScHJP*(B4Y}J9x0-%neM}4I*0h1?Y7n5+WxRf|Ouy zJ*(duXwcF?vA2{Yy_4l8zHFo|74vCa!bLE!^~2stZ73&ikEZXIldRM-G>}x|Ru7hB zKHs*$gR%}fa@1~Sl$eQcro}O_frmNYkHPG?$SyMbt0+lz7o0H{*tK?rzruu#2O-lz z0WHcL!Fyi}-};gQy|E(|lhsauSf)8#^<4qeHwKZ7p(FTO`N5cM=#PhEZi0tk|6=3U zhNJUu5xxEzT+L-!99>gN=h`L1vL!#zYIOm5c)l;U*G8QUw`#-$wX>i+B^+(?8sW3F z4n)3^WT)%wvERFStXFTs^|37=>g@;N+lge<+kFr`hZmBB?_=PsmM%n0PsbIDuk#90 zqi}kqJ}wiJ!0jgrU{psvh?>RXpcjkqyTL_jY!Co%Ri&U??mohq+`@_353t~K2YD0v z+H!^4F1YSA29?z+Fv9Qx1ip}E+qS<0gOA(c#g{j9)t&vkmsz5mhQ}P@_0}I<6bU}5 zzC%g|>>##d1Nh5ony?}-9tO;C#M8U;>8iAqsD9ZWjdzH$;MzLSk1c`*#ZzEBsT`uK z%1HXtJy=$F1nl0a(dqN9VcioMHYRQ(n5p_ukI_*ucg+bHbS|BGI>plwL*zN5smH*q zh2veU(qJm73)!pHy;$10L8R=C0b4Wko_R)K0Fc&&c=>t>JQ8<;xQS1h6gvVV3~M9jCDhfmjjp|vOk9)N=mCjTIMHIsGFz3{bj`=$TbGBUW*xwi`4Po& zI%{!Sc>wJ;pb^Tnbg}2zb`-B^=l6NA5(;mxXr%x)=2~20>k;A1rOzO`KwkFjA`!lsl?X zsvlRJp&E!S1Fys72^Zm1gCCx5K0zSHi(KOCz!EYWn_Pq)l?*A^z>7So&XwD2WQ z?QJn`N%X~yy`oU9cR4IzJ5c{oA;?)iAR6Bn0(U46m*?eTfn+lse6yead2<-9tB($dVqO*^9k+Ay@z_#9iH=r~Qzolmje4^^?$>dw8E%5VS zZHk4_uGY|AcN`9nibS=ssVF;r5NE%Q5VzQBa9h|Pdnvu62M#Nch{PD;l<3c^KG6s3 z3@l*6#3RM)7tX@l(NgR{fC^VL`zzKey@Ge?Eo9TxKunJx19M$RFw>QiFjdxr7Ef#^ zJ7t&hAMc8V+hHo~aKDXk!uk;`>m$l!Ko+D&6yw12uSs5W0+E+`Lqm68qut&a;AEvx z)Cu&*BQc(6IxG)HE>pwHZvw!3^9%ydTfj#43^-T~U_Fb(S)%GI;%Jsb&J9&$6`v&p z-_)5*Lvk!zjM;ccFB?VUEV(TU?!$10X_zN#kAo-j@od})P%)9m#>DnH+VYvaXj= zDyJ3aS??pUeXgU{Cm9Ut`;zB%{RB+ybC}#+AHj?J5<(ux6;p?7efpaJ7*vys_^@G# zU_FvdkMqSrRsJzn?)VB@1#40l*9dZZP63e+tihete$a;f`XJ??jZYh=;IYN-VDMoz zte5aWhlZXw>GEmPTcZq$WFpDU6>sUjZzU*xYd4vhdKB7u{`@0fPl8onf2fY14c>*j z`P#ED;M&~XsOuAlL49&DS|%J^4;MhdtUNH@QVxl0^stB8BIa9Zf-KpVwIAM8tl!p1 zD`x0$`C&Qe5mAn9QqQ5EyEsdakE6TyzC=Z>^%$bZW8$J!c74Ddx|>!S5`&ZJ&!yS|O;JkW8K#ouoUBAd(C!x5E%1@IP9=;p6rW3sirOxY0-q zMm9LIwf5q?B}F>SB1DgsZd2tnmQUgm^A_Q8Jwuu%HisUI?#_%oPNQ>#Cpl@p3?myo ziY>35r56rNL5r$=FvD3L8zQ6eO;t3%xJi#{XI&+YwX-4jxINnIzoT3O$t{_$)qNyx%$4M|{a z9zu?-x`$eVJ8I&imf-7WN%Ta$BBzpE2{%>E*>%C4KHo+?1vME0xerFtnY|o=2<$YE zXLry>?R{iIM-1flpNJn8$YAIOZ;*eu3|;S!#FX0O7Pnk?BB z6;xESCk_t-=$`+tz4r==Dtfyq@nhzLlM)cZMr0TdJzGbSVmA|fV0QT)!-`~ELxrl!8Csj2zqVy^n4FV3#+KE2Og z&u^`DE(k@IoWg&*O|e^pkmWU{Ah|^!AD3@Hjf(qxMMqccH#?7hgUUGjvnW2fm%#aE zA7bAD1?nO228nJmzNniHek+5)W1c_Ij2LRIz6RfRB-5IWo&4c*n?bbg0n9M+qSCL< z!^L8Kh@2wEcd|_<)l%Py-c~D+-tJBk$q+A7JP)S#J|IVPV`%(GdGgP=lccm<;APyO zP~GBhLi4|L)1IqQRW?i@6spgL0v`>cZ(k07bDp3;@ozZyhmf;n+%(@P#2~-zro8j)4+2fLpGZ_(lZC1LxG1A6lr}S zTX#H$MfZ~66i!BA^yU1vFUWA90kd-A2mYSpFV0V%$_7}Ok^aNWajTgPT{HO^ zVGnhn?WvEbAvhZfuP?%B$zQ;Br3v^xG$1!EH^6|74Aa27je?il*@4dwuqi@_L@s#; zQYW{-%ij}N!!arNC-{rj_9}zylg)4~Lj>g{O<8tA6#a5qkl9iD1ns0%n7Cs-^m0rx zD)udA*6l8UAM3-RvgH%~lYNOk6|^E6&mN<$$ubPx(?UMps)K)%CzG3*4)8`R4!5aF z!no7oZ0EWzPKUZf(pS6y+3am(woN=<(9%GMt6Yu@mk$*YGKW1Iwu72TD}mTOGjidC z3>%Dbct80YJ>ne#)kD+qeMLR)vc%aEIpo_`fc$CnP~Sf4mOY53_1s=5dS^Fcxn7J zyn9|B0?9YrEm?vi94XUy-vw7B*1_5N85lL!4b7&V$GhvNFh>e@lWo#-aPRCmD3QEM zJ_N6$e`=HP&>aq)(5@h&8W}b7o7~$NUeWuG9`giSf7!WmN zf}ERhw|G2`s_C)Hz{_x$X;sS z#m$~qNU*_1$+Yp|WY(%a47Sl%kg~6t@20Vy&8{5678!Yl`K!ph`>D@&zIYyew<@yv z({iboRW1KX*gR5yAQW4B1tEG)IT-!C1l$%6{Qo|B@7UStCtevSeDl4v3mLE3w_ z!-}9GVyW)JS2>pi^Cf&p@&Ykja{U-}Fc)Do6)MrD=Pj1S{^HIhZ=m+62UZ2N!S!jU zk$0^NKFE(}?=OjlC$CEIQSnXuvAf#jLD4O0QWF8Ud7Y%faxnzO$}v)Rbg{~67dC!U zBI9x@`GMphR9IG*B4d2WA zAc!nY1?NZE7;&Z)Tz`F{U0)5MNzRt8@6N&{j;ZjocNM+0@Gg!V)xdgA8|XOYiFt*; zcqe!f)OJHS%6MNVPtH}+NNXjWdT9dlJfQ$5q@STRtO?FE`GEGHCD?I)#27#GN)qXo z1skUbnf@(K!K{ECxC`FH9a4zu2a(^Dj}miULD;gh{22cQq;QcV z-kk2m&UF{SweC^$Z=NDEuX!pvZg7Y^2#$xznSp4S;9A{Rx0a{vI~&&jnT3Wo6Yx-8 z5#7?ILDO?QVBV!1+~;>0WuNHds`gW`{;52i8%xL7E!h;>2YHKjc;V^g383IILhq^8 zV|u74Q`Y*$boO~4lI3H{Mkl9G+sJru-uDq(tp*_IZ$1QAXkq(F0e<9;0+8AN3%!@$ zAm&fnP`5Z4N@tb8mTYd`l#vR#Lt!}kTL!$gnuZ>e){-JV3aHnjmYmp{jE)r}yi>4ewMy@HO`M(Wml8Ip@lXqcBj zfBv+5=-yS2gG_yw*h@StFJ`c_$NbT*_~L zlkg5y4iLcHYf{*B&XSF;+-{oC!$ZHo`Bc$q1+zVE5Wn55aOO&M1H;kt+Uslym=f^e9Q3~Hfb2rCQUz&WnzqL3xL z12YRpYLsNnd?|PcMn{_AFu#*`B4i!AWT_cWh#5rF`WSS%SxiE1he1}X0Q=%i08M+- zM^euHg>gZ5$u(trvZ_u1>PD_$&2=9<{W>2R>oh2(e<0Z-4nnL>X`J-l=D2k}&k;IXDkp4NB7{p(|{KC7Yi4tZYU7=4b zour|$dlMdeZ~+YbpTODWugH?BGpKm3iX?<20V7^P!+Oootg#l3CF{e&T_zdwj~i;-tbdOJb% z%6=TQDtywL!DpQz!75xihr#;RZ`yl55sEGfgMiOjR5ee5S&^K^b^8-pzh4v$mj5A+;_IP*QxwkO&ijvF zMS}47K=9!5RQqdgk^~F_`4Cy$7p0C=bRn{t1b+ z^iefGptGJVf7Ofm5#{*vdlQ3uD6#;6tVX>ime}vi)S)PY-J8 zq)mWbl4r0%KZ(|=7_lxE*!sNXXK&%Zn!Rx2uRXczAPTR(%wV)P+kr~+ zELKj)0?#z+v1W7o=)x73cunIX$nf_J=+k0O89if30$==((U7~QLBnX*f!@3 z5l_F1#cK|eJRJ#;TgUBth1MVxevdpJcL9tFcH-dsP1|nn~VK_c0scD zKQf^ImoD-&z{*NXe5@mf+HG5biMT_or+bkHI@ciZqc)7Hmt#Rk4aBZ|j~(msd8@7( zW6+``I(c3Kt+{=V?@!M1UtF<;6`=^3Kjy=cu2_7?7sG~iO03#xmZWW;41GfHP^ zfz9qym}mJ09@kFCYEOGmD^}t)CK;iB_Xvnd)Y8?_c67VAG3YkiV*87XYX5D>zi@OW zuIkr8<(NmX#Pk&GIN$+M=I79{W)yq4j91wfSvI6=1s#l?fPV&G!O0a-^#147lxYv7 zQxAWDs{G}2)cPt8+)sg$$@aK*?jfWuoR0TAkL-33WIfkOGg-6$LXG!0R&()EeD26a zX*G*7y2nQ#&h-?1|7j*BP7J}G%0?0;ITf!xoq~2(exOyQH;UNh;Tyk8yjd$+;rp;9 z^v!Yu5-dY~NhdvAMCgKPFL+8`6IdtnFlb$>fOneD5K(VI=ExaUw2m&IS7ocH7lfhZ zF@OV`E#R>4S!`MN-Spwz8XlMkvJ>rWiAT*J+`p%v)au^C>3AJ=#)`;tCrRj(R7Qu- zRb;8+Cp0u}#*i7c;HqVcdTm*F%j+Bz%Zagz?w^H#buGARmpJ^mu?v4aXu-v0`b1Pg z4eQ%YsLT#Q_MMF%t{k|?|I#6cQ~KVL_umX*-rp=t3J3PnBRur672J}QjBOz5YO&BN4_$Q82p4qlY+rz$!T=m`U|(@ttMJi1eso+Xma+bIZS&L%zG@8iB&NL(7fV0 zPA==9&9@pbiRTX8IVBjrSci4sAEN#T8z4351h$^IhR>ScLBt&qs&n`NOs~HMvU7$| z%vhJUZa1W=3BJUWW7!{$>m%7h&G<=n7q#^cCdXVXz(VvPDG-!~dFqknDPMgI6Qv4Ss`itlPb1_;W0d_$L)YPT76%VDFo5Zz)FI zO$(@8(hTg4U5Ra*y~$P27`U6p`2_3jd9$C_;Y`lUAho`gsBjw8&qZ8rphFW`H|$D? zI>J{%@|jDoK_#PNB2F|}drdB(mX_;#EKbM_yXeY`Ihob7J&+r__;&Q>W#c2OYh zeH~6@UWTH1ksQptG>N&RyN3Oo(*b)r{y~6h2>ZGB5^O5hMO%*y-qeA$yjuALZV#0} zv&?D~o?`{}?+j?cKq_YP4Qb4g-?(^o8vL-Bgn_wI%%1nYq%QRid8wfVon__t%vqWB zFZ~XCF6~DNu{zE-mj{;D-{2npI(Ysp2bD4;*^l0xP$n^rS?1me^->F9$aDs3HO62_ zbqH!o8ZdX}VTj3Uj%b&wI@5R`^lcLIw zYnZb~g{^yH08b2~KsZ5+b?L1H7uCI(?Hz&UJEq}QQcG17%V63A9%hJs1TtqDx_K4g zk#EvCeib!cQ2Yiq^_R?bso zG?p}BCA*9~S*ygP*%C5+UJg!L-%L{mHsXitx;XuyJ5dzlG(g4qFt}5Jn+4S2<@?RV zBXkkGd^>_8hQIk$<~v|{tQsyaQDm-cxlNNfFK?{bA2_{DoONxR3xA%<}bjM%`#+bM8TYcGr)=X@KkQ`m%X3#^;4g8nt%$R?P(;oZ4wSkneU=Jt(u z=uxLg1tcAzKJ^tGwmJl>TM~)pQAvnd9tZN?4v?7qfc|`EfO4&}Y?XH4#)YghE#2C&ao;Q9V5mu6Q1A1ajE+;>KhLmr#P{h zJMTlzyK7MRTAa1O14KBZl9s-0rV2HyAz=;+lxK+g(oRqsd4diGdE=YmBh=|*HXeD; zhyK++>3r0Iq3X-v?!Oc1wW%OG9?_{I9~;!Hi00&Y;`_x0le*kciJJ>WcrGU^TJP{m zHyOgOl<)Y_@D>!-pQ2e3ip&9#hgjKF0#0u7px^HVUb_r%Y}H3x!e#w?bl;~}c(E9J zT@}QRy@AcAM~UY09+(z21!wJF0h_O!g6L2O(m&-YNQqXno3ankQGu`Iv+H=6p*IH& z0t&&xA_i`L--xOuhWM%X2vPpN2`{OZo3<)?R;fiI| zlh}uKFX6B)$1-J?@p)?%*y>qk%=Jm zMYs7AHpH=Ct%HCU6aqV^7UJ5v6;L&CKUrselO)Y5gIQNvu`zZV>IlxFVP^vAf+OeY zzJz!vYj?wuq$g;+aywe)8>7+M_ZWHb9ldfj8TZUS$9L}OA)fvO&n9xL#@u3HB)Tx? zyf2PP{h%2|@i_i!2FkYvQKileylW-Z)q*q$g-;|A!`-g1yrLYm4m}|W*H6P<-S_-k z=^gmc7Kq0}1$zHiHkb!JvJpm-nf6HQLTSS|lNsCvSkofa>JOqU*1=^4bd7YW#-upe}OE~a+7 zZ=&;~gSh8yHxyi(Mn7A>gp0wmuwdP58u_FG-WGS_-9p3{-m$!--JT>nyc%p@IKfZ# zLb!8w0~`sS%)~p};`;ctd^(bXMNbr=c4ispiSq|*pH-x2U^?Wkwd1v)yGm0VOR+mr z0!3}I@Op(L3@qwLhYj4^ZFMVp?$Kw+%qngVNhM=;3-OR&C-#WTvWah}(tzW)aYDWb z`lf1QqQ@!tG;SBJsJA9+-%Uw}y*f>kS%87R7vr<^0jyh+4gY@3Kq2lqp!QZX^>qEr z8@6nuU;asRS;05aJ3s~cUliiPf-*dCFc#F>b|SBT5LC!TSn)^&`>$RH^R@I}I#Zy$a2;tZ0tKtVmNhmY?swBM%l0_%MlT`Q zOC#0KrJ+A5iAlcBGymp@Z@%55Q4{wp)VJ3_X)8@4H1Zm#yEO2;qi~ItI4WyI@`VHr z;ig?g*aQb@-hT#Xffh zyf$?)G40w8@O&zI1PxLNTEj7$qRg8cR;2H-4oE-mf*Ggp(YX#AvHW8yjA}lkGTY+u zi_i@`Y3_@!TPBjAl=0A?(idgm=fpc` zT-ifH^lQqvaa7tph~$ihLrJRvuH;xtS#4>i+PDiZY6>%p z@8?y&jE#c`b~k?OUyKXB7^AYC0EuYN;peohp!}fA=ob?S|4!xOu0xzRTkRjLIw``q zMmyt@tnYA;b82oPn(V^9S&VJX16ak=;dHn$va~?~T^kuxc)A$#rsm*p|EcIz_!QOD zyYPOQH#X~aK(%rdbk0zMsN4id5aibWON5jNCgSicA+XemC2MXM(M5^j=xC+|;S-W@ zLAEr;n+mZ{oK)~>2bWi|^9-5&fw1`x8gccPL{^jaAv2 zgJI^?v|@%589gt+9BI0UTKh#<*cOTb|8Ag{jXdnMp2?Vb&1Z|K64N!$0J_`Caqp(* zc>bXn*hSlr2KE9tn@FO#pC}52-olRW$#60EH~Bf?G4Zx@M&83m^dc2zb}vw5qFXBH zU`YW_-9HUpmQG_&ZApQ3@00j97A?f(mqb{}M?$>J72D}zQ3a-{mV$`2q5~4cS8r z&(dqVKjDUb<3KxYAN47}N$xIiM0mJ|NCgUzH=#|`vONwG?blQ#e_;XQ7c z#?2vG*5P!?EBv^GDBL3@NCm52lViT}^h@gy?)e@~wjN`vmnCM>1qG>Cf6NGrz2kB3 z=@8V}JwklX`QY=qIn2IhIrd1?Zt!u9XD_M(31O?;c#CGUPxZUMt@ErPX_~0rSl6~JNA-Cn?;zCAuFOb z?hb_HHqrIfoz!fA56{xI*%e!2Oxwmk=gk=7dZrwz$Uuc8&tuOHR2$7e8k&w@e_aIZ zFGBu;lNi?!4qc}hLOhG;Qa4F#j#gk_dzC})0|sx@X+U(tC%UXa6O?9#qMgr5$eqwg zWP29D>9rThEstUt9}o!rS+eX)L5d4LP6xH`+u)j^BzubHVmQZyzB8XeVeM(Wv9%p% zsHEb!jmJQAEDYj)|G?8L^ck?4&C2?iFWNiW$Lo{ufkDL;=yGN|D3$u7=OY`AVLyl> zmoDS4;g@8pl_BVs74pNE+^3%#qhQtoHMXp5kSvt7BqQb5P?zTk?g4u+ZTo9-DQ+ng zsss_kvUKA2MI5%4?!qUby6E;!2e(X508{Hb*xexx(o%jfN7WN2|Ez{a`{NK16pW{L z+oEnlHnmiZqoogS(0-}!w6FLfWXe{7!ZZn{?8Z1|YnvLIrc_b={nZ$MeZqGdyG0B) z{eDkxZ3yIx-M6wJ59@!nk_YQ1L8M(b zPu?{N+eaPfJnb>EFYN%LrXipCQ%WQsr%|hWk-T|FSCGP4O&sqp!K{=ug}8_6pm#P2 zKe;I~&KC#KyiO0zLgulXKX;Kidd;}q?l9(d6~P~KEmD$kk1wU#jYm>bX~kG79+)S{ zY>)cFt5R*mMgEz*lcWNE9FnE!KQEdp3vYlq9Cy5*jpKAmeRRrSE zFC#xtS1(=k?Tg13-8mpTa0U()rb1Uw0_Tm{3h(};(*cFuSYdSmwKt^lPDr1IjxsCQ z=~so@8b@iH>%A z_~G!2?63?0^$<&JEBk<>PZVI;s$X=W)Mv^|oWX2etpT_7;(3a%qoCbslv=foam>X8 zcK0V;V(P_`y+8Ky&7*tiyYUiC$b=g5F6Aj|-fpEa#tUf0k5sfD)TX-SvDg?;Lz;e^ zr4E+kSc?;Pz;C1#OY28T+WuI&cWev!`O*bu%pV|PPdR_Ra2qV-Sd<>E&E%f)5~%Jx zXS!m`7D)JTk8Iy}7<%`Y;kXPB66R7z)o#7TAeCOwdn1GQ3!}k%Rw?AZEyGoJb;+CT zhj`4U4R-c6fZKIfrgYvll>VlIS<;U%#q1?5_7#F;$!Ijx??+#b5t3eMg9RgxaFvxU zE}lCd`k9-s+(`$tWcavhngEll=mi(ttx!#G7bcyrhsBxPY%k0m-``Z^N$9y$;cQBuZK3L0j9i-4t5hc~;7J201^=mM{@EFSc^1?Nb$1xF& z+3+z~fY}`SmsDiAldrlPu-DrLOb_;xXYK?Tjwuf=*Mlp{-LT0>9VdOT2C3p2j944T zpBo+nS#c|IgJUcdo1H)#(;cv5xjN)N6@w#3r(lNL^NOgA#kivCJQg%Pgq<6U`SXhE z5pucyps(lQw&+~;bd?itcb^cu{GkY>zAU7V%vZBxw;K2h#gouB&=X7-9fgR~jiB%_ z9D~F3aLTqb-1p)HPn#T2o^OY9+gr(Im1_|1RZj1vh%!sB>fxt;agrbRg?{65in>Zv z=)5g&(fbMF%K$O9WLh@Y$@Yk92lD9TBTis&aX%5Wl*W-qJb=E{wE4nhY+UpKbKFCr zOht=%_AVCk^bV7n9n)D2llL^T;5JDuodo-Wc@Va=fvOrufbx#TrY~ix$clUFp#1L- z`W@pkP}s?+)gg!jtB=F2+5N=XCY)n-#xuv<`S__|F z^?$IC2*83hQJ5q35)uvTaZ=`2>XQ@?dyQtm>}8?6{>6zPZ|s9J4S&QF%@Vrg{f62m=I3Rcsuf6a<)37u2n@BIeT9TVN@a37CHQIEFlm*x4 z1fW`b1y_k@;GM!o-q8siylYEy$$2j$%)d7emfZKl)-fBBV(}RNbaPryh7a7PlDN56 z5OiP(x)kg1TPCYOiuX_MJgfr21L91W_at`KtZWdRRYpg4-++xnr(w-IZB(uML}g~I zz_566OmJ|d^21W>!J!h8VDg?MMIi=lSj^Rr6kTQO$t3qef1}a&ObYt@k$Z@@c%z_W_V8<|NT+Fd+w!%zq zxiRjZDMOqVPr{p{Z^)(VQJ7dMfzJx#L9^u*c^ntTD|xb=Iw(Ei=HT`yH_IG{T0Rlw z8}r%n!AG$AO9{S}7NXu3qiD|M_pMseLz?1NkUeij(R>NVFTFE{mYuIDe^L>??O9zd zrGFj6YajAMw<|C;o*ytP)d(!td_ePwY1Cvb*LT$Z41Ub$B8Ll#aAoO6yuR@m?p<~l zm7c0V(lr5ex&0YWPrr>f4n(77$`3eu_XFM6z5sfj&tWGWOXfQ%oWlv4gz7E&3tmtA z&}fnt$y7Ckx;g_Gw`x7_qqifY_O+8AXl4PM_RJ(cKEkZiq( znyRvHX*R6V>4{A9w{gtr8eLp@(uhs}{SEeYII`^*q?kE}PLWO%3pf`y9~_Mw@!+Zk z`cO_BPn?(pYkZ7x=H}T@=ROWk%evFba0}+TumpSH0mT666ryqQ0nQTfW`~4lG0)bR zun&D^(q+e!;r7~KDssu4e|GpdYKgVshR1{C@ZmG0Y{7WeV_PX-TV)1XPthj90>chob>){GR8LSW{4fsVlYEk*s;l(`RKEC_EF6 z4;!$H$M#`=_FQ&WQ9fC3Sm3kY;GM6JRaIQZ`}oiA`1*G$ZU^OoPS zWc6+Ayyt~~?DS#2?Rb`18wqB5e{uHpZY-T`z{qoK;N|u2fM4YUEI02R7G4TfYu|!d zku)#LSsYYeBv2tzi*p{%M$hE~_*%_}9C_GEJxmH9UG)cunQG(V?=s9#wK{u6>HygL zn;9J@z*7Id_9V1Vg2`q5-5Q=-|9mOcZRvwHz^vPzU)Qd3Rm?yH1N6aEtG zU%}M%f*_{REg+JNWJ*9WNEhFrgHK|qpqC)64j9D6?h?4Sbt79oClWMw&Vb~z+3;bi z2^)O02?Pb*`RY$47{?PHuz8{tw1G2Dyz&IrEziX(X3e~%dMW(X{`I^Jyi7dDm%`Tu zWmX-}!R2vr>_XijREeF&C}tGV@vYlgZ*5m*?nejKwD>iIa~VrFNh`G8cE#cnXACd6 zh@z<*@odoqYFPOl{Ww06>aloltc}wZ!pJX=YE0s^tlMiPdCSr|;B?+O^lpsD^@*H! z!163Dxe$Xl_On#>tR(xax)#2FJBtxsHcYxF*RkdijMMrSvzjr>LAfFlL^fDa+;bQt zOJDOkckko>a8AI{$Yv5y{|US9M8l4M>cCD_V-sH+~c%3+IaAsh0-k%Q-ukSLLWPu zfL7H~wr<>8`tRH+92i&zx_4zJdC#-9Yaw}i6xfXwy&!W^f>#<` zfvI`N>BiFi6r57wTeNx&l``Ib`#NzIb@ySHe5R827W)1PTXy6F-&+H)mK{# zCM8jDkA%bb7gwO`dInE?yAh;Ha$L;6l_=uqO2w^{(Z^bn#5AuWc8gz9{sS}igx?tu zEfU3A4?Bp6)?qjOx`Vss=|Z}|OYrW@p)>bALi`hglUB6gh6Wj^zbVF^S}TY1=E&j` z-(I}_ZIs3}6cYiHvphq!BbYtZ03Wuukk#rOlM*e9KU|mbhc+i+@rDjEIM0GHvdQOs z!t=m8EE}b#x#O#^9B(}ogY#=UAxe5L7GLeFTIlzP?|bw6^}>&nEm zKl@U*S5}_T#dR+?nQ(5?tZ_BNt|fvmVzzkQO@jdAKk45*1WgM^}0L#1d^8xE3A+hc<;`zVb`{jq*VlK3NHqhTmfh zch6axAHYdsp4I0kRbpnI8YtV=!qVIXT%sBebCteg;g)t>Z<2$H4(x%I%R{lAyDx1L z`S7{4jLUnifFtt;(fi96cyD2k{-ihPN+{+)6fz#n2KNX^~@0xm-+{9iRNBrdD09m3>&^P)cni_PGj;#?Onsbp} z8gc<&s|nN~P#ylfZpHI`m*ByO2>1?LVTnQr8dX<=x`z;Yq$yL^Iv*@a?E~+QNW3Kd z2%?%ii0`d!=-YV~&K^+2=)Hbq-=3$YM^t^#b-Es2GbzDo6I9{K4|`g-vkFblU#Ig& z{r;zYZ2#$v_|NPA{l19a|L$Y^uYQXEq^kd)`y%}Q^Va{8*Z-?NHg){3<(&NQRsGL; zoc{m*i2o&D-~VSW`hVC-=Xv_y1Op)BgZD CR{P-q literal 0 HcmV?d00001 From 713aa316d32d60b9c05a2785118f3163e82f061d Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Thu, 2 Mar 2023 11:18:13 -0600 Subject: [PATCH 065/231] Update cross compile checks and files (#2497) --- config/cmake/ConfigureChecks.cmake | 110 +++++++++++++++-------------- config/toolchain/aarch64.cmake | 1 + release_docs/INSTALL_CMake.txt | 81 +++++++++++++++++++++ 3 files changed, 138 insertions(+), 54 deletions(-) diff --git a/config/cmake/ConfigureChecks.cmake b/config/cmake/ConfigureChecks.cmake index 70a210404e2..b535396ab7f 100644 --- a/config/cmake/ConfigureChecks.cmake +++ b/config/cmake/ConfigureChecks.cmake @@ -768,56 +768,57 @@ if (HDF5_BUILD_FORTRAN) set (${HDF_PREFIX}_SIZEOF__QUAD ${_SIZEOF__QUAD}) endif () -#----------------------------------------------------------------------------- -# The provided CMake C macros don't provide a general compile/run function -# so this one is used. -#----------------------------------------------------------------------------- - set (RUN_OUTPUT_PATH_DEFAULT ${CMAKE_BINARY_DIR}) - macro (C_RUN FUNCTION_NAME SOURCE_CODE RETURN_VAR RETURN_OUTPUT_VAR) - message (VERBOSE "Detecting C ${FUNCTION_NAME}") - file (WRITE - ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/testCCompiler1.c - ${SOURCE_CODE} - ) - TRY_RUN (RUN_RESULT_VAR COMPILE_RESULT_VAR - ${CMAKE_BINARY_DIR} - ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/testCCompiler1.c - COMPILE_DEFINITIONS "-D_SIZEOF___FLOAT128=${H5_SIZEOF___FLOAT128};-D_HAVE_QUADMATH_H=${H5_HAVE_QUADMATH_H}" - COMPILE_OUTPUT_VARIABLE COMPILEOUT - RUN_OUTPUT_VARIABLE OUTPUT_VAR - ) + if (NOT CMAKE_CROSSCOMPILING) + #----------------------------------------------------------------------------- + # The provided CMake C macros don't provide a general compile/run function + # so this one is used. + #----------------------------------------------------------------------------- + set (RUN_OUTPUT_PATH_DEFAULT ${CMAKE_BINARY_DIR}) + macro (C_RUN FUNCTION_NAME SOURCE_CODE RETURN_VAR RETURN_OUTPUT_VAR) + message (VERBOSE "Detecting C ${FUNCTION_NAME}") + file (WRITE + ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/testCCompiler1.c + ${SOURCE_CODE} + ) + TRY_RUN (RUN_RESULT_VAR COMPILE_RESULT_VAR + ${CMAKE_BINARY_DIR} + ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/testCCompiler1.c + COMPILE_DEFINITIONS "-D_SIZEOF___FLOAT128=${H5_SIZEOF___FLOAT128};-D_HAVE_QUADMATH_H=${H5_HAVE_QUADMATH_H}" + COMPILE_OUTPUT_VARIABLE COMPILEOUT + RUN_OUTPUT_VARIABLE OUTPUT_VAR + ) - set (${RETURN_OUTPUT_VAR} ${OUTPUT_VAR}) - - message (VERBOSE "* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * ") - message (VERBOSE "Test COMPILE_RESULT_VAR ${COMPILE_RESULT_VAR} ") - message (VERBOSE "Test COMPILE_OUTPUT ${COMPILEOUT} ") - message (VERBOSE "* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * ") - message (VERBOSE "Test RUN_RESULT_VAR ${RUN_RESULT_VAR} ") - message (VERBOSE "* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * ") - - if (COMPILE_RESULT_VAR) - if (RUN_RESULT_VAR EQUAL "0") - set (${RETURN_VAR} 1 CACHE INTERNAL "Have C function ${FUNCTION_NAME}") - message (VERBOSE "Testing C ${FUNCTION_NAME} - OK") - file (APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log - "Determining if the C ${FUNCTION_NAME} exists passed with the following output:\n" - "${OUTPUT_VAR}\n\n" - ) + set (${RETURN_OUTPUT_VAR} ${OUTPUT_VAR}) + + message (VERBOSE "* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * ") + message (VERBOSE "Test COMPILE_RESULT_VAR ${COMPILE_RESULT_VAR} ") + message (VERBOSE "Test COMPILE_OUTPUT ${COMPILEOUT} ") + message (VERBOSE "* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * ") + message (VERBOSE "Test RUN_RESULT_VAR ${RUN_RESULT_VAR} ") + message (VERBOSE "* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * ") + + if (COMPILE_RESULT_VAR) + if (RUN_RESULT_VAR EQUAL "0") + set (${RETURN_VAR} 1 CACHE INTERNAL "Have C function ${FUNCTION_NAME}") + message (VERBOSE "Testing C ${FUNCTION_NAME} - OK") + file (APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log + "Determining if the C ${FUNCTION_NAME} exists passed with the following output:\n" + "${OUTPUT_VAR}\n\n" + ) + else () + message (VERBOSE "Testing C ${FUNCTION_NAME} - Fail") + set (${RETURN_VAR} 0 CACHE INTERNAL "Have C function ${FUNCTION_NAME}") + file (APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log + "Determining if the C ${FUNCTION_NAME} exists failed with the following output:\n" + "${OUTPUT_VAR}\n\n") + endif () else () - message (VERBOSE "Testing C ${FUNCTION_NAME} - Fail") - set (${RETURN_VAR} 0 CACHE INTERNAL "Have C function ${FUNCTION_NAME}") - file (APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log - "Determining if the C ${FUNCTION_NAME} exists failed with the following output:\n" - "${OUTPUT_VAR}\n\n") + message (FATAL_ERROR "Compilation of C ${FUNCTION_NAME} - Failed") endif () - else () - message (FATAL_ERROR "Compilation of C ${FUNCTION_NAME} - Failed") - endif () - endmacro () + endmacro () - set (PROG_SRC - " + set (PROG_SRC + " #include \n\ #include \n\ #define CHECK_FLOAT128 _SIZEOF___FLOAT128\n\ @@ -838,17 +839,18 @@ if (HDF5_BUILD_FORTRAN) #else\n\ #define C_LDBL_DIG LDBL_DIG\n\ #endif\n\nint main() {\nprintf(\"\\%d\\\;\\%d\\\;\", C_LDBL_DIG, C_FLT128_DIG)\\\;\n\nreturn 0\\\;\n}\n - " - ) + " + ) - C_RUN ("maximum decimal precision for C" ${PROG_SRC} PROG_RES PROG_OUTPUT4) - message (STATUS "Testing maximum decimal precision for C - ${PROG_OUTPUT4}") + C_RUN ("maximum decimal precision for C" ${PROG_SRC} PROG_RES PROG_OUTPUT4) + message (STATUS "Testing maximum decimal precision for C - ${PROG_OUTPUT4}") - # dnl The output from the above program will be: - # dnl -- long double decimal precision -- __float128 decimal precision + # dnl The output from the above program will be: + # dnl -- long double decimal precision -- __float128 decimal precision - list (GET PROG_OUTPUT4 0 H5_LDBL_DIG) - list (GET PROG_OUTPUT4 1 H5_FLT128_DIG) + list (GET PROG_OUTPUT4 0 H5_LDBL_DIG) + list (GET PROG_OUTPUT4 1 H5_FLT128_DIG) + endif () if (${HDF_PREFIX}_SIZEOF___FLOAT128 EQUAL "0" OR FLT128_DIG EQUAL "0") set (${HDF_PREFIX}_HAVE_FLOAT128 0) diff --git a/config/toolchain/aarch64.cmake b/config/toolchain/aarch64.cmake index aa84a742654..b0504823e3c 100644 --- a/config/toolchain/aarch64.cmake +++ b/config/toolchain/aarch64.cmake @@ -11,6 +11,7 @@ set (CMAKE_FIND_ROOT_PATH /usr/${TOOLCHAIN_PREFIX}) set (CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) set (CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) set (CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) +set (CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) set (CMAKE_CROSSCOMPILING_EMULATOR qemu-aarch64) include_directories(/usr/${TOOLCHAIN_PREFIX}/include) diff --git a/release_docs/INSTALL_CMake.txt b/release_docs/INSTALL_CMake.txt index a8d86b07ff7..8d9a8db75f0 100644 --- a/release_docs/INSTALL_CMake.txt +++ b/release_docs/INSTALL_CMake.txt @@ -13,6 +13,7 @@ Section V: Options for building HDF5 Libraries with CMake command line Section VI: CMake option defaults for HDF5 Section VII: User Defined Options for HDF5 Libraries with CMake Section VIII: User Defined Compile Flags for HDF5 Libraries with CMake +Section IX: Considerations for cross-compiling ************************************************************************ @@ -938,6 +939,86 @@ The HDF5_ENABLE_COVERAGE option will add "-g -O0 -fprofile-arcs -ftest-coverage" to CMAKE_C_FLAGS. +======================================================================== +IX: Considerations for cross-compiling +======================================================================== + +Cross-compiling has several consequences for CMake: + CMake cannot automatically detect the target platform. + CMake cannot find libraries and headers in the default system directories. + Executables built during cross compiling cannot be executed. + +Cross-compiling support means that CMake separates information about the +build platform and target platform and gives the user mechanisms to solve +cross-compiling issues without additional requirements such as running +virtual machines, etc. + +CMake uses a toolchain of utilities to compile, link libraries and create +archives, and other tasks to drive the build. The toolchain utilities +available are determined by the languages enabled. + +CMake stores info about the current toolchain in the following variables: + CMAKE_C_COMPILER, + CMAKE_CXX_COMPILER. +They contain paths to the C and C++ compilers respectively. This is usually +enough on desktop platforms. In the case of embedded systems, a custom +linker and assembler setting may be needed. In more complex projects +you may need to additionally specify binaries to other parts of the toolchain +(size, ranlib, objcopy…). All these tools should be set in the corresponding +variables: + CMAKE_AR, + CMAKE_ASM_COMPILER, + CMAKE_LINKER, + CMAKE_OBJCOPY, + CMAKE_RANLIB + +As for the host and target operating systems, CMake stores their names in the +following variables: + CMAKE_HOST_SYSTEM_NAME – name of the platform, on which CMake is running (host platform). + On major operating systems this is set to the Linux, Windows or + Darwin (MacOS) value. + CMAKE_SYSTEM_NAME – name of the platform, for which we are building (target platform). + By default, this value is the same as CMAKE_HOST_SYSTEM_NAME, which + means that we are building for the local platform (no cross-compilation). + +Put the toolchain variables into a separate file (e.g. .cmake) +and set CMAKE_TOOLCHAIN_FILE variable to the path of that file. +If cmake is invoked with the command line parameter: + --toolchain path/to/file +or + -DCMAKE_TOOLCHAIN_FILE=path/to/file +the file will be loaded early to set values for the compilers. The +CMAKE_CROSSCOMPILING variable is set to true when CMake is cross-compiling. + +Structure of the toolchain file +------------------------------- +In fact, the toolchain file doesn’t have any structure. You can put anything you +want there. But the best practice is to define at least these settings: +path to the toolchain binaries (C compiler, C++ compiler, linker, etc.) +name of the target platform (and optionally target processor architecture) +required compilation and linking flags on that particular platform +toolchain sysroot settings + +It is recommended that you set the CMAKE_FIND_ROOT_PATH variable to a path where +you have an exact copy of the root filesystem you have on your target device (with +libraries and binaries pre-compiled for the target processor). + +References: + https://cmake.org/cmake/help/latest/manual/cmake-toolchains.7.html + https://gitlab.com/embeddedlinux/libs/platform + https://discourse.cmake.org/t/cross-compile-for-aarch64-on-ubuntu/2161/10 + https://stackoverflow.com/questions/54539682/how-to-set-up-cmake-to-cross-compile-with-clang-for-arm-embedded-on-windows?rq=1 + https://developer.android.com/ndk/guides/cmake + +Predefine H5Tinit.c file +------------------------------- +The one file that needs to be pre-generated is the H5Tinit.c file. The variables +indicated in the error log (see above) are the variables that need to match the target system. + +The HDF5 CMake variables; + HDF5_USE_PREGEN: set this to true + HDF5_USE_PREGEN_DIR: set this path to the preset H5Tinit.c file + ======================================================================== For further assistance, send email to help@hdfgroup.org ======================================================================== From 1122570104bace94ae67e4aa506303f6dec5ba51 Mon Sep 17 00:00:00 2001 From: Mark Kittisopikul Date: Wed, 8 Mar 2023 15:42:47 -0500 Subject: [PATCH 066/231] Fix new codespell issues (#2521) * Fix new codespell issues * Have codespell ignore ./config/sanitizer/sanitizers.cmake --- .github/workflows/codespell.yml | 2 +- config/toolchain/aarch64.cmake | 2 +- configure.ac | 2 +- doxygen/dox/LearnBasics2.dox | 2 +- doxygen/examples/FileFormat.html | 2 +- doxygen/examples/IOFlow.html | 2 +- fortran/src/H5Pf.c | 2 +- fortran/test/tH5O_F03.F90 | 2 +- src/H5FScache.c | 2 +- src/H5PLpath.c | 2 +- src/H5S.c | 2 +- testpar/t_bigio.c | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml index db20884d29c..a4edb0f3e7b 100644 --- a/.github/workflows/codespell.yml +++ b/.github/workflows/codespell.yml @@ -11,5 +11,5 @@ jobs: - uses: actions/checkout@v3 - uses: codespell-project/actions-codespell@master with: - skip: ./bin/trace,./hl/tools/h5watch/h5watch.c,./tools/test/h5jam/tellub.c,./config/sanitizer/LICENSE,./tools/test/h5repack/testfiles/*.dat + skip: ./bin/trace,./hl/tools/h5watch/h5watch.c,./tools/test/h5jam/tellub.c,./config/sanitizer/LICENSE,./config/sanitizer/sanitizers.cmake,./tools/test/h5repack/testfiles/*.dat ignore_words_list: isnt,inout,nd,parms,parm,ba,offsetP,ser,ois,had,fiter,fo,clude,refere,minnum,offsetp,creat,ans:,eiter,lastr,ans,isn't,ifset,sur,trun,dne,tthe,hda,filname,te,htmp,minnum,ake,gord,numer,ro,oce,msdos diff --git a/config/toolchain/aarch64.cmake b/config/toolchain/aarch64.cmake index b0504823e3c..69968336f78 100644 --- a/config/toolchain/aarch64.cmake +++ b/config/toolchain/aarch64.cmake @@ -2,7 +2,7 @@ set(TOOLCHAIN_PREFIX aarch64-linux-gnu) set(ANDROID_NDK /opt/android-ndk-linux) set (CMAKE_SYSTEM_NAME Android) set (CMAKE_ANDROID_ARCH_ABI x86_64) -#set (CMAKE_ANDROID_STANDALONE_TOOLCHAIN ${ANDROID_NDK}/build/cmake/andriod.toolchain.cmake) +#set (CMAKE_ANDROID_STANDALONE_TOOLCHAIN ${ANDROID_NDK}/build/cmake/android.toolchain.cmake) set (CMAKE_C_COMPILER ${TOOLCHAIN_PREFIX}-gcc) set (CMAKE_CXX_COMPILER ${TOOLCHAIN_PREFIX}-g++) #set (CMAKE_RC_COMPILER ${TOOLCHAIN_PREFIX}-windres) diff --git a/configure.ac b/configure.ac index b34355c9714..7c8c958bf3e 100644 --- a/configure.ac +++ b/configure.ac @@ -2332,7 +2332,7 @@ AC_ARG_ENABLE([optimization], details. Alternatively, optimization options can be specified directly by specifying them as a - string value. These custom optimzation flags will + string value. These custom optimization flags will completely replace all other optimization flags. [default depends on build mode: debug=debug, production=high, clean=none] diff --git a/doxygen/dox/LearnBasics2.dox b/doxygen/dox/LearnBasics2.dox index ffcb9718409..6f94c7f7eb5 100644 --- a/doxygen/dox/LearnBasics2.dox +++ b/doxygen/dox/LearnBasics2.dox @@ -906,7 +906,7 @@ can be used to obtain information about the selection. The dataset with the region references was read by #H5Dread with the #H5T_STD_REF_DSETREG datatype specified. The read reference can be used to obtain the dataset identifier by calling #H5Rdereference or by obtaining -obtain spacial information (dataspace and selection) with the call to #H5Rget_region. +obtain spatial information (dataspace and selection) with the call to #H5Rget_region. The reference to the dataset region has information for both the dataset itself and its selection. In both functions: \li The first parameter is an identifier of the dataset with the region references. diff --git a/doxygen/examples/FileFormat.html b/doxygen/examples/FileFormat.html index fc35357f8ae..133bbc8c260 100644 --- a/doxygen/examples/FileFormat.html +++ b/doxygen/examples/FileFormat.html @@ -30,7 +30,7 @@
  • Document's Audience:

      -
    • Current H5 library designers and knowledgable external developers.
    • +
    • Current H5 library designers and knowledgeable external developers.
  • Background Reading:

    diff --git a/doxygen/examples/IOFlow.html b/doxygen/examples/IOFlow.html index 6b2c27e0827..e890edbb766 100644 --- a/doxygen/examples/IOFlow.html +++ b/doxygen/examples/IOFlow.html @@ -24,7 +24,7 @@
  • Document's Audience:

      -
    • Current H5 library designers and knowledgable external developers.
    • +
    • Current H5 library designers and knowledgeable external developers.
  • Background Reading:

    diff --git a/fortran/src/H5Pf.c b/fortran/src/H5Pf.c index 2cec2cedbb7..7371814df0e 100644 --- a/fortran/src/H5Pf.c +++ b/fortran/src/H5Pf.c @@ -1755,7 +1755,7 @@ h5pset_filter_c(hid_t_f *prp_id, int_f *filter, int_f *flags, size_t_f *cd_nelmt * INPUTS * prp_id - property list identifier * OUTPUTS - * nfilters - number of filters defined in the filter pipline + * nfilters - number of filters defined in the filter pipeline * RETURNS * 0 on success, -1 on failure * AUTHOR diff --git a/fortran/test/tH5O_F03.F90 b/fortran/test/tH5O_F03.F90 index eab2e196029..6b6c730236a 100644 --- a/fortran/test/tH5O_F03.F90 +++ b/fortran/test/tH5O_F03.F90 @@ -440,7 +440,7 @@ SUBROUTINE test_obj_visit(total_error) ! Construct "interesting" file to visit CALL build_visit_file(fid) - ! Inialize udata for testing purposes + ! Initialize udata for testing purposes udata%info(1)%path(1:1) ="." udata%info(1)%type_obj = H5O_TYPE_GROUP_F udata%info(2)%path(1:12) = & diff --git a/src/H5FScache.c b/src/H5FScache.c index d7271d7fa76..313439ef37b 100644 --- a/src/H5FScache.c +++ b/src/H5FScache.c @@ -435,7 +435,7 @@ H5FS__cache_hdr_pre_serialize(H5F_t *f, void *_thing, haddr_t addr, size_t H5_AT * * H5F_addr_defined(fspace->addr) * - * will both be TRUE. If this contition does not hold, then + * will both be TRUE. If this condition does not hold, then * either the free space info is not persistent * (!H5F_addr_defined(fspace->addr)???) or the section info * contains no free space data that must be written to file diff --git a/src/H5PLpath.c b/src/H5PLpath.c index 8ad00c3b4af..9619b1392a8 100644 --- a/src/H5PLpath.c +++ b/src/H5PLpath.c @@ -819,7 +819,7 @@ H5PL__find_plugin_in_path_table(const H5PL_search_params_t *search_params, hbool /*------------------------------------------------------------------------- * Function: H5PL__find_plugin_in_path * - * Purpose: Given a path, this function opens the directory and envokes + * Purpose: Given a path, this function opens the directory and invokes * another function to go through all files to find the right * plugin library. Two function definitions are for Unix and * Windows. diff --git a/src/H5S.c b/src/H5S.c index bb5028d482a..22de41f97fa 100644 --- a/src/H5S.c +++ b/src/H5S.c @@ -1609,7 +1609,7 @@ H5S_decode(const unsigned char **p) { H5F_t *f = NULL; /* Fake file structure*/ H5S_t *ds; /* Decoded dataspace */ - H5S_extent_t *extent; /* Entent of decoded dataspace */ + H5S_extent_t *extent; /* Extent of decoded dataspace */ const unsigned char *pp = (*p); /* Local pointer for decoding */ size_t extent_size; /* size of the extent message*/ uint8_t sizeof_size; /* 'Size of sizes' for file */ diff --git a/testpar/t_bigio.c b/testpar/t_bigio.c index 1c6674862ca..4ecb09eb2af 100644 --- a/testpar/t_bigio.c +++ b/testpar/t_bigio.c @@ -1858,7 +1858,7 @@ main(int argc, char **argv) /* Having set the bigio handling to a size that is manageable, * we'll set our 'bigcount' variable to be 2X that limit so * that we try to ensure that our bigio handling is actually - * envoked and tested. + * invoked and tested. */ if (newsize != oldsize) bigcount = newsize * 2; From bb8f15179d361d6fa9e455e01c97b6a2c6e6eba5 Mon Sep 17 00:00:00 2001 From: Mark Kittisopikul Date: Wed, 8 Mar 2023 15:46:47 -0500 Subject: [PATCH 067/231] Remove redundant and flawed documentation of H5D_chunk_iter_op_t from H5D.c (#2512) Forward port of 9f252f1 from #2329 --- src/H5D.c | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/src/H5D.c b/src/H5D.c index 006f4a9d88f..ee6e0f35c6b 100644 --- a/src/H5D.c +++ b/src/H5D.c @@ -2510,32 +2510,6 @@ H5Dget_chunk_info_by_coord(hid_t dset_id, const hsize_t *offset, unsigned *filte * H5D_chunk_iter_op_t cb IN: User callback function, called for every chunk. * void *op_data IN/OUT: Optional user data passed on to user callback. * - * Callback information: - * H5D_chunk_iter_op_t is defined as: - * - * typedef int (*H5D_chunk_iter_op_t)( - * const hsize_t *offset, - * unsigned filter_mask, - * haddr_t addr, - * hsize_t size, - * void *op_data); - * - * H5D_chunk_iter_op_t parameters: - * hsize_t *offset; IN/OUT: Logical position of the chunk’s first element in units of dataset - * elements - * unsigned filter_mask; IN: Bitmask indicating the filters used when the chunk was written haddr_t - * addr; IN: Chunk address in the file - * hsize_t; IN: Chunk size in bytes, 0 if the chunk does not exist - * void *op_data; IN/OUT: Pointer to any user-defined data associated with the operation. - * - * The return values from an operator are: - * Zero (H5_ITER_CONT) causes the iterator to continue, returning zero when all - * elements have been processed. - * Positive (H5_ITER_STOP) causes the iterator to immediately return that positive - * value, indicating short-circuit success. - * Negative (H5_ITER_ERROR) causes the iterator to immediately return that value, - * indicating failure. - * * Return: Non-negative on success, negative on failure * * Programmer: Gaute Hope From 0720eff26659ff381067522df753524f05ba5b5a Mon Sep 17 00:00:00 2001 From: Mark Kittisopikul Date: Wed, 8 Mar 2023 16:06:17 -0500 Subject: [PATCH 068/231] Fix H5Dchunk_iter doxygen example, cherry-pick of ef3bed6 (#2519) --- doxygen/examples/H5D_examples.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doxygen/examples/H5D_examples.c b/doxygen/examples/H5D_examples.c index 7af3cc452b1..ae483ee5566 100644 --- a/doxygen/examples/H5D_examples.c +++ b/doxygen/examples/H5D_examples.c @@ -7,10 +7,10 @@ //! int -chunk_cb(const hsize_t *offset, uint32_t filter_mask, haddr_t addr, uint32_t nbytes, void *op_data) +chunk_cb(const hsize_t *offset, unsigned filter_mask, haddr_t addr, hsize_t size, void *op_data) { // only print the allocated chunk size only - printf("%d\n", nbytes); + printf("%ld\n", size); return EXIT_SUCCESS; } //! @@ -67,7 +67,7 @@ H5Ovisit_cb(hid_t obj, const char *name, const H5O_info2_t *info, void *op_data) retval = -1; goto fail_fig; } - +fail_fig: fail_shape: H5Sclose(dspace); fail_dspace: From 5011364686d6a1a6a65d106c527b38f00e48fc0f Mon Sep 17 00:00:00 2001 From: mattjala <124107509+mattjala@users.noreply.github.com> Date: Wed, 8 Mar 2023 15:08:04 -0600 Subject: [PATCH 069/231] Update ReferenceManual.dox (#2517) --- doxygen/dox/ReferenceManual.dox | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doxygen/dox/ReferenceManual.dox b/doxygen/dox/ReferenceManual.dox index b9bcd498357..40f8b8af183 100644 --- a/doxygen/dox/ReferenceManual.dox +++ b/doxygen/dox/ReferenceManual.dox @@ -158,7 +158,7 @@ Follow these simple rules and stay out of trouble: \li \Bold{Dynamic memory allocation:} The API contains a few functions in which the HDF5 library dynamically allocates memory on the caller's behalf. The caller owns this memory and eventually must free it by calling H5free_memory() and not language-explicit memory functions. -\li \Bold{Be careful with that saw:} Do not modify the underlying collection when an +\li \Bold{Don't modify while iterating:} Do not modify the underlying collection when an iteration is in progress! \li \Bold{Use of locations:} Certain API functions, typically called \Code{H5***_by_name} use a combination of identifiers and path names to refer to HDF5 objects. From 5e7fb82ee945ef9b30d94d67f53406893a207ec6 Mon Sep 17 00:00:00 2001 From: "H. Joe Lee" Date: Wed, 8 Mar 2023 15:09:06 -0600 Subject: [PATCH 070/231] docs: in -> out (#2514) (#2516) --- src/H5Ppublic.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h index 5d3774b659f..55d7641ff09 100644 --- a/src/H5Ppublic.h +++ b/src/H5Ppublic.h @@ -4017,7 +4017,7 @@ H5_DLL herr_t H5Pget_page_buffer_size(hid_t plist_id, size_t *buf_size, unsigned * \brief Returns maximum data sieve buffer size * * \fapl_id{fapl_id} - * \param[in] size Maximum size, in bytes, of data sieve buffer + * \param[out] size Maximum size, in bytes, of data sieve buffer * * \return \herr_t * From 511a7baddd255242dadb800430a96102d2f6086e Mon Sep 17 00:00:00 2001 From: "Mark (he/his) C. Miller" Date: Wed, 8 Mar 2023 14:46:10 -0800 Subject: [PATCH 071/231] Elaborate how cd_values get stored (#2522) --- src/H5Ppublic.h | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h index 55d7641ff09..24dc7544185 100644 --- a/src/H5Ppublic.h +++ b/src/H5Ppublic.h @@ -2321,10 +2321,13 @@ H5_DLL herr_t H5Pset_deflate(hid_t plist_id, unsigned level); * (#H5Z_FILTER_DEFLATE) and the Fletcher32 error detection filter * (#H5Z_FILTER_FLETCHER32). * - * The array \p c_values contains \p cd_nelmts integers which are - * auxiliary data for the filter. The integer values will be - * stored in the dataset object header as part of the filter - * information. + * The array \p cd_values contains \p cd_nelmts unsigned integers + * which are auxiliary data for the filter. The values are typically + * used as parameters to control the filter. In a filter's + * \p set_local method (called from \p H5Dcreate), the values are + * interpreted and possibly modified before they are used to control + * the filter. These, possibly modified values, are then stored in + * the dataset object header as auxiliary data for the filter. * * The \p flags argument is a bit vector with the following * fields specifying certain general properties of the filter: From e6e324ead512bab691d8194be51e0af2a910ffdd Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Mon, 13 Mar 2023 11:53:08 -0500 Subject: [PATCH 072/231] Addressed various Doxygen grammar issues. (#2524) * reviewed H5A * Addressed various grammar issues. * remove double words, spelling fixes * replace the use of rawdata with raw data --- doxygen/aliases | 2 +- .../H5Pget_metadata_read_attempts.2.c | 4 +- .../H5Pget_metadata_read_attempts.3.c | 4 +- .../examples/H5Pset_metadata_read_attempts.c | 4 +- src/H5ACpublic.h | 2 +- src/H5Apublic.h | 30 ++++++------- src/H5C.c | 6 +-- src/H5Cpkg.h | 4 +- src/H5Cprivate.h | 2 +- src/H5Dint.c | 4 +- src/H5Doh.c | 2 +- src/H5Dpublic.h | 36 +++++++-------- src/H5ESmodule.h | 2 +- src/H5Epublic.h | 10 ++--- src/H5FDhdfs.c | 8 ++-- src/H5FDmulti.h | 8 ++-- src/H5FDros3.c | 8 ++-- src/H5Fpublic.h | 14 +++--- src/H5Gmodule.h | 12 ++--- src/H5Goh.c | 2 +- src/H5Gpublic.h | 16 +++---- src/H5HFcache.c | 2 +- src/H5Ipublic.h | 16 +++---- src/H5Lpublic.h | 28 ++++++------ src/H5MF.c | 2 +- src/H5Mpublic.h | 2 +- src/H5Oalloc.c | 2 +- src/H5Omodule.h | 2 +- src/H5Opublic.h | 44 +++++++++---------- src/H5Oshared.h | 2 +- src/H5PLpublic.h | 6 +-- src/H5Pfapl.c | 4 +- src/H5Ppublic.h | 24 +++++----- src/H5Rpublic.h | 12 ++--- src/H5Sall.c | 4 +- src/H5Shyper.c | 2 +- src/H5Spublic.h | 8 ++-- src/H5Tenum.c | 4 +- src/H5Tpublic.h | 22 +++++----- src/H5Zdevelop.h | 28 ++++++------ src/H5Zmodule.h | 2 +- src/H5Zpublic.h | 6 +-- src/H5public.h | 18 ++++---- 43 files changed, 210 insertions(+), 210 deletions(-) diff --git a/doxygen/aliases b/doxygen/aliases index 3fc7e1028d1..bb31325315e 100644 --- a/doxygen/aliases +++ b/doxygen/aliases @@ -15,7 +15,7 @@ ALIASES += Code{1}="\1" ALIASES += success{1}="\Bold{Success:} \1" ALIASES += failure{1}="\Bold{Failure:} \1" -ALIASES += herr_t="Returns a non-negative value if successful; otherwise returns a negative value." +ALIASES += herr_t="Returns a non-negative value if successful; otherwise, returns a negative value." ALIASES += herr_t_iter="\li Zero causes the iterator to continue, returning zero when the iteration is complete. \li A positive value causes the iterator to immediately return that positive value, indicating short-circuit success. \li A negative value causes the iterator to immediately return that value, indicating failure." ALIASES += hid_t{1}="Returns a \1 identifier if successful; otherwise returns #H5I_INVALID_HID. " ALIASES += hid_ti{1}="Returns an \1 identifier if successful; otherwise returns #H5I_INVALID_HID. " diff --git a/doxygen/examples/H5Pget_metadata_read_attempts.2.c b/doxygen/examples/H5Pget_metadata_read_attempts.2.c index 2cd12dbca57..44b26e9d8ed 100644 --- a/doxygen/examples/H5Pget_metadata_read_attempts.2.c +++ b/doxygen/examples/H5Pget_metadata_read_attempts.2.c @@ -1,7 +1,7 @@ /* Open the file with SWMR access and default file access property list */ fid = H5Fopen(FILE, (H5F_ACC_RDONLY | H5F_ACC_SWMR_READ), H5P_DEFAULT); -/* Get the file's file access roperty list */ +/* Get the file's file access property list */ file_fapl = H5Fget_access_plist(fid); /* Retrieve the # of read attempts from the file's file access property list */ @@ -26,7 +26,7 @@ H5Pset_metadata_read_attempts(fapl, 20); /* Open the file with SWMR access and the non-default file access property list */ fid = H5Fopen(FILE, (H5F_ACC_RDONLY | H5F_ACC_SWMR_READ), fapl); -/* Get the file's file access roperty list */ +/* Get the file's file access property list */ file_fapl = H5Fget_access_plist(fid); /* Retrieve the # of read attempts from the file's file access property list */ diff --git a/doxygen/examples/H5Pget_metadata_read_attempts.3.c b/doxygen/examples/H5Pget_metadata_read_attempts.3.c index 4b5ea3a6208..8edda9f1383 100644 --- a/doxygen/examples/H5Pget_metadata_read_attempts.3.c +++ b/doxygen/examples/H5Pget_metadata_read_attempts.3.c @@ -1,7 +1,7 @@ /* Open the file with non-SWMR access and default file access property list */ fid = H5Fopen(FILE, H5F_ACC_RDONLY, H5P_DEFAULT); -/* Get the file's file access roperty list */ +/* Get the file's file access property list */ file_fapl = H5Fget_access_plist(fid); /* Retrieve the # of read attempts from the file's file access property list */ @@ -26,7 +26,7 @@ H5Pset_metadata_read_attempts(fapl, 20); /* Open the file with non-SWMR access and the non-default file access property list */ fid = H5Fopen(FILE, H5F_ACC_RDONLY, fapl); -/* Get the file's file access roperty list */ +/* Get the file's file access property list */ file_fapl = H5Fget_access_plist(fid); /* Retrieve the # of read attempts from the file's file access property list */ diff --git a/doxygen/examples/H5Pset_metadata_read_attempts.c b/doxygen/examples/H5Pset_metadata_read_attempts.c index 7c2f65d3208..db3573b9315 100644 --- a/doxygen/examples/H5Pset_metadata_read_attempts.c +++ b/doxygen/examples/H5Pset_metadata_read_attempts.c @@ -8,7 +8,7 @@ H5Pset_metadata_read_attempts(fapl, 20); /* Open the file with SWMR access and the non-default file access property list */ fid = H5Fopen(FILE, (H5F_ACC_RDONLY | H5F_ACC_SWMR_READ), fapl); -/* Get the file's file access roperty list */ +/* Get the file's file access property list */ file_fapl = H5Fget_access_plist(fid); /* Retrieve the # of read attempts from the file's file access property list */ @@ -38,7 +38,7 @@ H5Pset_metadata_read_attempts(fapl, 20); /* Open the file with SWMR access and the non-default file access property list */ fid = H5Fopen(FILE, H5F_ACC_RDONLY, fapl); -/* Get the file's file access roperty list */ +/* Get the file's file access property list */ file_fapl = H5Fget_access_plist(fid); /* Retrieve the # of read attempts from the file's file access property list */ diff --git a/src/H5ACpublic.h b/src/H5ACpublic.h index 0967f77dc26..5dc65c7a191 100644 --- a/src/H5ACpublic.h +++ b/src/H5ACpublic.h @@ -466,7 +466,7 @@ typedef struct H5AC_cache_config_t { /* general configuration fields: */ //! int version; - /**< Integer field indicating the the version of the H5AC_cache_config_t + /**< Integer field indicating the version of the H5AC_cache_config_t * in use. This field should be set to #H5AC__CURR_CACHE_CONFIG_VERSION * (defined in H5ACpublic.h). */ diff --git a/src/H5Apublic.h b/src/H5Apublic.h index e21e8085c0e..92df94a51ed 100644 --- a/src/H5Apublic.h +++ b/src/H5Apublic.h @@ -127,7 +127,7 @@ H5_DLL herr_t H5Aclose_async(hid_t attr_id, hid_t es_id); * The attribute identifier returned by this function must be released * with H5Aclose() resource leaks will develop. * - * \note If \p loc_id is a file identifier, the attribute will be attached + * \note If \p loc_id is a file identifier, the attribute will be attached to * that file’s root group. * * \par Example @@ -246,8 +246,8 @@ H5_DLL herr_t H5Adelete(hid_t loc_id, const char *attr_name); * \param[in] obj_name Name of object, relative to location, from which * attribute is to be removed * \param[in] idx_type Type of index - * \param[in] order Order in which to iterate over index - * \param[in] n Offset within index + * \param[in] order Order in which to iterate over the index + * \param[in] n Offset within the index * \lapl_id * * \return \herr_t @@ -264,7 +264,7 @@ H5_DLL herr_t H5Adelete(hid_t loc_id, const char *attr_name); * The order in which the index is to be traversed is specified by * \p order. For example, if \p idx_type, \p order, * and \p n are set to #H5_INDEX_NAME, #H5_ITER_INC, and 5, - * respectively, the fifth attribute in lexicographic order of + * respectively, the fifth attribute in the lexicographic order of * attribute names will be removed. * * The link access property list, \p lapl_id, may provide @@ -354,7 +354,7 @@ H5_DLL herr_t H5Aexists_async(hid_t obj_id, const char *attr_name, hbool_t *exis * \p loc_id specifies a location in the file containing the object. * \p obj_name is the name of the object to which the attribute is * attached and can be a relative name, relative to \p loc_id, - * or an absolute name, based in the root group of the file. + * or an absolute name, based on the root group of the file. * * The link access property list, \p lapl_id, may provide * information regarding the properties of links required to access @@ -403,7 +403,7 @@ H5_DLL hid_t H5Aget_create_plist(hid_t attr_id); /** * \ingroup H5A * - * \brief Retrieves attribute information, by attribute identifier + * \brief Retrieves attribute information by attribute identifier * * \attr_id * \param[out] ainfo Attribute information struct @@ -438,7 +438,7 @@ H5_DLL herr_t H5Aget_info(hid_t attr_id, H5A_info_t *ainfo /*out*/); * \details H5Aget_info_by_idx() retrieves information for an attribute * that is attached to an object, which is specified by its * location and name, \p loc_id and \p obj_name, respectively. - * The attribute is located by its index position and the attribute + * The attribute is located by its index position, and the attribute * information is returned in the \p ainfo struct. * * The attribute is located by means of an index type, an index @@ -458,7 +458,7 @@ H5_DLL herr_t H5Aget_info_by_idx(hid_t loc_id, const char *obj_name, H5_index_t /** * \ingroup H5A * - * \brief Retrieves attribute information, by attribute name + * \brief Retrieves attribute information by attribute name * * \fgdt_loc_id * \param[in] obj_name Name of the object to which an attribute is attached, @@ -494,7 +494,7 @@ H5_DLL herr_t H5Aget_info_by_name(hid_t loc_id, const char *obj_name, const char * \param[out] buf Buffer to store name in * * \return Returns the length of the attribute's name, which may be longer - * than \p buf_size, if successful. Otherwise returns a negative + * than \p buf_size, if successful. Otherwise, returns a negative * value. * * \details H5Aget_name() retrieves the name of an attribute specified by @@ -529,7 +529,7 @@ H5_DLL ssize_t H5Aget_name(hid_t attr_id, size_t buf_size, char *buf); * \lapl_id * * \return Returns attribute name size, in bytes, if successful; - * otherwise returns a negative value. + * otherwise, returns a negative value. * * \details H5Aget_name_by_idx() retrieves the name of an attribute that is * attached to an object, which is specified by its location and @@ -584,7 +584,7 @@ H5_DLL hid_t H5Aget_space(hid_t attr_id); * \attr_id * * \return Returns the amount of storage size allocated for the attribute; - * otherwise returns 0 (zero). + * otherwise, returns 0 (zero). * * \details H5Aget_storage_size() returns the amount of storage that is * required for the specified attribute, \p attr_id. @@ -687,7 +687,7 @@ H5_DLL herr_t H5Aiterate2(hid_t loc_id, H5_index_t idx_type, H5_iter_order_t ord * * \return \herr_t * Further note that this function returns the return value of - * the last operator if it was non-zero, which can be a negative + * the last operator if it is non-zero, which can be a negative * value, zero if all attributes were processed, or a positive value * indicating short-circuit success. * @@ -1150,7 +1150,7 @@ H5_DLL hid_t H5Acreate1(hid_t loc_id, const char *name, hid_t type_id, hid_t spa * * \fgdt_loc_id * - * \return Returns the number of attributes if successful; otherwise returns + * \return Returns the number of attributes if successful; otherwise, returns * a negative value. * * \deprecation_note{H5Oget_info(), H5Oget_info_by_name(), and H5Oget_info_by_idx()} @@ -1207,8 +1207,8 @@ H5_DLL herr_t H5Aiterate1(hid_t loc_id, unsigned *idx, H5A_operator1_t op, void * * \deprecation_note{H5Aopen_by_idx()} * - * \details H5Aopen_idx() opens an attribute which is attached to the - * object specified with \p loc_id . The location object may be + * \details H5Aopen_idx() opens an attribute that is attached to the + * object specified with \p loc_id. The location object may be * either a group, dataset, or named datatype, all of which may * have any sort of attribute. The attribute specified by the index, * \p idx , indicates the attribute to access. The value of \p idx diff --git a/src/H5C.c b/src/H5C.c index 4262f008afa..ae03d6913ab 100644 --- a/src/H5C.c +++ b/src/H5C.c @@ -4331,7 +4331,7 @@ H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted) * must run the marker maintenance code, whether we run the size * reduction code or not. We do this in two places -- here we * insert a new marker if the number of active epoch markers is - * is less than the the current epochs before eviction, and after + * is less than the current epochs before eviction, and after * the ageout call, we cycle the markers. * * However, we can't call the ageout code or cycle the markers @@ -5027,7 +5027,7 @@ H5C__autoadjust__ageout__remove_all_markers(H5C_t *cache_ptr) * * Purpose: Remove epoch markers from the end of the LRU list and * mark them as inactive until the number of active markers - * equals the the current value of + * equals the current value of * (cache_ptr->resize_ctl).epochs_before_eviction. * * Return: SUCCEED on success/FAIL on failure. @@ -8861,7 +8861,7 @@ H5C__serialize_single_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry HDassert(!entry_ptr->flush_in_progress); HDassert(entry_ptr->type); - /* Set entry_ptr->flush_in_progress to TRUE so the the target entry + /* Set entry_ptr->flush_in_progress to TRUE so the target entry * will not be evicted out from under us. Must set it back to FALSE * when we are done. */ diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index 4f826d11e6d..23c2b782f6f 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -4034,7 +4034,7 @@ typedef struct H5C_tag_info_t { * * 2) A pinned entry can be accessed or modified at any time. * This places an additional burden on the associated pre-serialize - * and serialize callbacks, which must ensure the the entry is in + * and serialize callbacks, which must ensure the entry is in * a consistent state before creating an image of it. * * 3) A pinned entry can be marked as dirty (and possibly @@ -4398,7 +4398,7 @@ typedef struct H5C_tag_info_t { * order, we scan the index repeatedly, once for each flush dependency * height in increasing order. * - * This operation is complicated by the fact that entries other the the + * This operation is complicated by the fact that entries other than the * target may be inserted, loaded, relocated, or removed from the cache * (either by eviction or the take ownership flag) as the result of a * pre_serialize or serialize callback. While entry removals are not diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h index 109f5f6ae80..6a661d298fa 100644 --- a/src/H5Cprivate.h +++ b/src/H5Cprivate.h @@ -1125,7 +1125,7 @@ typedef int H5C_ring_t; * flag will only be applied to one entry, the superblock, * and the code utilizing these flags is protected with HDasserts * to enforce this. This restraint can certainly be relaxed in - * the future if the the need for multiple entries getting flushed + * the future if the need for multiple entries getting flushed * last or collectively arises, though the code allowing for that * will need to be expanded and tested appropriately if that * functionality is desired. diff --git a/src/H5Dint.c b/src/H5Dint.c index f55b8299daa..34a9d75a606 100644 --- a/src/H5Dint.c +++ b/src/H5Dint.c @@ -1016,7 +1016,7 @@ H5D__update_oh_info(H5F_t *file, H5D_t *dset, hid_t dapl_id) #endif /* H5O_ENABLE_BOGUS */ /* Add a modification time message, if using older format. */ - /* (If using v18 format versions and above, the the modification time is part of the object + /* (If using v18 format versions and above, the modification time is part of the object * header and doesn't use a separate message -QAK) */ if (!use_at_least_v18) @@ -1506,7 +1506,7 @@ H5D_open(const H5G_loc_t *loc, hid_t dapl_id) if (H5FO_top_incr(dataset->oloc.file, dataset->oloc.addr) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINC, NULL, "can't increment object count") - /* We're the first dataset to use the the shared info */ + /* We're the first dataset to use the shared info */ dataset->shared->fo_count = 1; /* Set the external file prefix */ diff --git a/src/H5Doh.c b/src/H5Doh.c index 1e498bc059e..42cb54315a9 100644 --- a/src/H5Doh.c +++ b/src/H5Doh.c @@ -282,7 +282,7 @@ H5O__dset_create(H5F_t *f, void *_crt_info, H5G_loc_t *obj_loc) HDassert(crt_info); HDassert(obj_loc); - /* Create the the dataset */ + /* Create the dataset */ if (NULL == (dset = H5D__create(f, crt_info->type_id, crt_info->space, crt_info->dcpl_id, crt_info->dapl_id))) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to create dataset") diff --git a/src/H5Dpublic.h b/src/H5Dpublic.h index 45b941acbb0..9ec6f708abb 100644 --- a/src/H5Dpublic.h +++ b/src/H5Dpublic.h @@ -279,7 +279,7 @@ extern "C" { * * \p loc_id may specify a file, group, dataset, named datatype, * or attribute. If an attribute, dataset, or named datatype is - * specified then the dataset will be created at the location + * specified, then the dataset will be created at the location * where the attribute, dataset, or named datatype is attached. * * \p name may be either an absolute path in the file or a relative @@ -290,7 +290,7 @@ extern "C" { * file location where the dataset will be created, the datatype * is copied and converted to a transient type. * - * The link creation property list, \p lcpl_id, governs creation + * The link creation property list, \p lcpl_id, governs the creation * of the link(s) by which the new dataset is accessed and the * creation of any intermediate groups that may be missing. * @@ -348,12 +348,12 @@ H5_DLL hid_t H5Dcreate_async(hid_t loc_id, const char *name, hid_t type_id, hid * * \p loc_id may specify a file, group, dataset, named datatype, * or attribute. If an attribute, dataset, or named datatype is - * specified then the dataset will be created at the location + * specified, then the dataset will be created at the location * where the attribute, dataset, or named datatype is attached. * * The dataset’s datatype and dataspace are specified by * \p type_id and \p space_id, respectively. These are the - * datatype and dataspace of the dataset as it will exist in + * datatype and dataspace of the dataset as they will exist in * the file, which may differ from the datatype and dataspace * in application memory. * @@ -692,7 +692,7 @@ H5_DLL herr_t H5Dget_chunk_info_by_coord(hid_t dset_id, const hsize_t *offset, u * context \p op_data. * * \par Example - * For each chunk, print the allocated chunk size (0 for un-allocated chunks). + * For each chunk, print the allocated chunk size (0 for unallocated chunks). * \snippet H5D_examples.c H5Dchunk_iter_cb * Iterate over all chunked datasets and chunks in a file. * \snippet H5D_examples.c H5Ovisit_cb @@ -729,17 +729,17 @@ H5_DLL herr_t H5Dchunk_iter(hid_t dset_id, hid_t dxpl_id, H5D_chunk_iter_op_t cb * * \p chk_idx is the chunk index in the selection. The index value * may have a value of 0 up to the number of chunks stored in - * the file that have a nonempty intersection with the file - * dataspace selection + * the file that has a nonempty intersection with the file + * dataspace selection. * * \note As of 1.10.5, the dataspace intersection is not yet - * supported, hence, the index is of all the written chunks. + * supported. Hence, the index is of all the written chunks. * * \p fspace_id specifies the file dataspace selection. It is - * intended to take #H5S_ALL for specifying the current selection. + * intended to take #H5S_ALL to specify the current selection. * * \note Please be aware that this function currently does not - * support non-trivial selections, thus \p fspace_id has no + * support non-trivial selections; thus \p fspace_id has no * effect. Also, the implementation does not handle the #H5S_ALL * macro correctly. As a workaround, an application can get * the dataspace for the dataset using H5Dget_space() and pass that @@ -893,7 +893,7 @@ H5_DLL herr_t H5Dread(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id, hid_ * * This function will produce the same results as \p count calls to * H5Dread(). Information listed in that function about the specifics - * of its behaviour also apply to H5Dread_multi(). By calling + * of its behavior also applies to H5Dread_multi(). By calling * H5Dread_multi() instead of multiple calls to H5Dread(), however, the * library can in some cases pass information about the entire I/O * operation to the file driver, which can improve performance. @@ -1156,7 +1156,7 @@ H5_DLL herr_t H5Dwrite_multi_async(size_t count, hid_t dset_id[], hid_t mem_type * the file. Only one chunk can be written with this function. * * \p filters is a mask providing a record of which filters are - * used with the the chunk. The default value of the mask is + * used with the chunk. The default value of the mask is * zero (0), indicating that all enabled filters are applied. A * filter is skipped if the bit corresponding to the filter’s * position in the pipeline (0 ≤ position < 32) is turned on. @@ -1489,7 +1489,7 @@ H5_DLL herr_t H5Drefresh(hid_t dset_id); * \p op and scatters it to the supplied buffer \p dst_buf in a * manner similar to data being written to a dataset. * - * \p dst_space_id is a dataspace which defines the extent of \p + * \p dst_space_id is a dataspace that defines the extent of \p * dst_buf and the selection within it to scatter the data to. * * \p type_id is the datatype of the data to be scattered in both @@ -1546,7 +1546,7 @@ H5_DLL herr_t H5Dscatter(H5D_scatter_func_t op, void *op_data, hid_t type_id, hi * enough to hold all the data if the callback function \p op is * not provided. * - * \p op is a callback function which handles the gathered data. + * \p op is a callback function that handles the gathered data. * It is optional if \p dst_buf is large enough to hold all of the * gathered data; required otherwise. * @@ -1715,10 +1715,10 @@ H5_DLL herr_t H5Dget_chunk_index_type(hid_t did, H5D_chunk_index_t *idx_type); * * H5Dcreate() and H5Dcreate_anon() return a dataset identifier for * success or a negative value for failure. The dataset identifier - * should eventually be closed by calling H5Dclose() to release + * should eventually be closed by calling H5Dclose() to release the * resources it uses. * - * See H5Dcreate_anon() for discussion of the differences between + * See H5Dcreate_anon() for a discussion of the differences between * H5Dcreate() and H5Dcreate_anon(). * * The HDF5 library provides flexible means of specifying a fill value, @@ -1794,7 +1794,7 @@ H5_DLL hid_t H5Dopen1(hid_t loc_id, const char *name); * * This function ensures that the dataset dimensions are of at least * the sizes specified in size. The function H5Dset_extent() must be - * used if the dataset dimension sizes are are to be reduced. + * used if the dataset dimension sizes are to be reduced. * * \version 1.8.0 Function deprecated in this release. Parameter size * syntax changed to \Code{const hsize_t size[]} in this release. @@ -1822,7 +1822,7 @@ H5_DLL herr_t H5Dextend(hid_t dset_id, const hsize_t size[]); * The \p type_id must be the datatype stored in the buffer. The \p * space_id describes the selection for the memory buffer to free the * VL datatypes within. The \p dxpl_id is the dataset transfer property - * list which was used for the I/O transfer to create the buffer. And + * list that was used for the I/O transfer to create the buffer. And * \p buf is the pointer to the buffer to be reclaimed. * * The VL structures (\ref hvl_t) in the user's buffer are modified to diff --git a/src/H5ESmodule.h b/src/H5ESmodule.h index 1e333f71ab4..d945b702bcf 100644 --- a/src/H5ESmodule.h +++ b/src/H5ESmodule.h @@ -120,7 +120,7 @@ * * \brief Event Set Interface * - * \details \Bold{This interface can be only used with the HDF5 VOL connectors that + * \details \Bold{This interface can only be used with the HDF5 VOL connectors that * enable the asynchronous feature in HDF5.} The native HDF5 library has * only synchronous operations. * diff --git a/src/H5Epublic.h b/src/H5Epublic.h index dbc3457879a..c02049a6e3c 100644 --- a/src/H5Epublic.h +++ b/src/H5Epublic.h @@ -203,7 +203,7 @@ typedef herr_t (*H5E_auto2_t)(hid_t estack, void *client_data); * \param[in] cls_name Name of the error class * \param[in] lib_name Name of the client library or application to which the error class belongs * \param[in] version Version of the client library or application to which the - error class belongs. Can be \c NULL. + error class belongs. It can be \c NULL. * \return Returns a class identifier on success; otherwise returns H5I_INVALID_ID. * * \details H5Eregister_class() registers a client library or application @@ -341,12 +341,12 @@ H5_DLL herr_t H5Eclose_stack(hid_t stack_id); * * \param[in] class_id Error class identifier * \param[out] name Buffer for the error class name - * \param[in] size The maximum number of characters the class name to be returned - * by this function in\p name. + * \param[in] size The maximum number of characters of the class name to be returned + * by this function in \p name. * \return Returns non-negative value as on success; otherwise returns negative value. * * \details H5Eget_class_name() retrieves the name of the error class specified - * by the class identifier. If non-NULL pointer is passed in for \p + * by the class identifier. If a non-NULL pointer is passed in for \p * name and \p size is greater than zero, the class name of \p size * long is returned. The length of the error class name is also * returned. If NULL is passed in as \p name, only the length of class @@ -817,7 +817,7 @@ H5_DLL herr_t H5Epush1(const char *file, const char *func, unsigned line, H5E_ma * \deprecated 1.8.0 Function H5Eprint() renamed to H5Eprint1() and * deprecated in this release. * - * \details H5Eprint1() prints prints the error stack for the current thread + * \details H5Eprint1() prints the error stack for the current thread * on the specified stream, \p stream. Even if the error stack is empty, a * one-line message of the following form will be printed: * \code{.unparsed} diff --git a/src/H5FDhdfs.c b/src/H5FDhdfs.c index d81e8a59a93..2c64237cbaa 100644 --- a/src/H5FDhdfs.c +++ b/src/H5FDhdfs.c @@ -924,7 +924,7 @@ H5FD__hdfs_open(const char *path, unsigned flags, hid_t fapl_id, haddr_t maxaddr * or "meta" (any other flag) * * Prints filename and listing of total number of reads and bytes read, - * both as a grand total and separate meta- and rawdata reads. + * both as a grand total and separate meta- and raw data reads. * * If any reads were done, prints out two tables: * @@ -1153,11 +1153,11 @@ hdfs__fprint_stats(FILE *stream, const H5FD_hdfs_t *file) HDfprintf(stream, " %8.3f%c %7d %7d %8.3f%c %8.3f%c %8.3f%c %8.3f%c\n", re_dub, suffixes[suffix_i], /* bin ceiling */ m->count, /* metadata reads */ - r->count, /* rawdata reads */ + r->count, /* raw data reads */ bm_val, bm_suffix, /* metadata bytes */ - br_val, br_suffix, /* rawdata bytes */ + br_val, br_suffix, /* raw data bytes */ am_val, am_suffix, /* metadata average */ - ar_val, ar_suffix); /* rawdata average */ + ar_val, ar_suffix); /* raw data average */ HDfflush(stream); } diff --git a/src/H5FDmulti.h b/src/H5FDmulti.h index e5975d3664b..1765c6ad15b 100644 --- a/src/H5FDmulti.h +++ b/src/H5FDmulti.h @@ -226,17 +226,17 @@ H5_DLL herr_t H5Pget_fapl_multi(hid_t fapl_id, H5FD_mem_t *memb_map /*out*/, hid * * \par Example: * \code - * // Example 1: Both metadata and rawdata files are in the same + * // Example 1: Both metadata and raw data files are in the same * // directory. Use Station1-m.h5 and Station1-r.h5 as - * // the metadata and rawdata files. + * // the metadata and raw data files. * hid_t fapl, fid; * fapl = H5Pcreate(H5P_FILE_ACCESS); * H5Pset_fapl_split(fapl, "-m.h5", H5P_DEFAULT, "-r.h5", H5P_DEFAULT); * fid=H5Fcreate("Station1",H5F_ACC_TRUNC,H5P_DEFAULT,fapl); * - * // Example 2: metadata and rawdata files are in different + * // Example 2: metadata and raw data files are in different * // directories. Use PointA-m.h5 and /pfs/PointA-r.h5 as - * // the metadata and rawdata files. + * // the metadata and raw data files. * hid_t fapl, fid; * fapl = H5Pcreate(H5P_FILE_ACCESS); * H5Pset_fapl_split(fapl, "-m.h5", H5P_DEFAULT, "/pfs/%s-r.h5", H5P_DEFAULT); diff --git a/src/H5FDros3.c b/src/H5FDros3.c index 156da680508..9a529d68a0e 100644 --- a/src/H5FDros3.c +++ b/src/H5FDros3.c @@ -793,7 +793,7 @@ H5FD__ros3_open(const char *url, unsigned flags, hid_t fapl_id, haddr_t maxaddr) * or "meta" (any other flag) * * Prints filename and listing of total number of reads and bytes read, - * both as a grand total and separate meta- and rawdata reads. + * both as a grand total and separate meta- and raw data reads. * * If any reads were done, prints out two tables: * @@ -1042,11 +1042,11 @@ ros3_fprint_stats(FILE *stream, const H5FD_ros3_t *file) HDfprintf(stream, " %8.3f%c %7d %7d %8.3f%c %8.3f%c %8.3f%c %8.3f%c\n", re_dub, suffixes[suffix_i], /* bin ceiling */ m->count, /* metadata reads */ - r->count, /* rawdata reads */ + r->count, /* raw data reads */ bm_val, bm_suffix, /* metadata bytes */ - br_val, br_suffix, /* rawdata bytes */ + br_val, br_suffix, /* raw data bytes */ am_val, am_suffix, /* metadata average */ - ar_val, ar_suffix); /* rawdata average */ + ar_val, ar_suffix); /* raw data average */ HDfflush(stream); } diff --git a/src/H5Fpublic.h b/src/H5Fpublic.h index 9bee5c4c848..59f8dec268a 100644 --- a/src/H5Fpublic.h +++ b/src/H5Fpublic.h @@ -398,7 +398,7 @@ H5_DLL hid_t H5Fcreate_async(const char *filename, unsigned flags, hid_t fcpl_id * opened. * * The \p fapl_id parameter specifies the file access property list. - * Use of #H5P_DEFAULT specifies that default I/O access properties + * The use of #H5P_DEFAULT specifies that default I/O access properties * are to be used. * * The \p flags parameter specifies whether the file will be opened in @@ -530,7 +530,7 @@ H5_DLL hid_t H5Freopen_async(hid_t file_id, hid_t es_id); * \snippet H5F_examples.c flush * * \attention HDF5 does not possess full control over buffering. H5Fflush() - * flushes the internal HDF5 buffers then asks the operating system + * flushes the internal HDF5 buffers and then asks the operating system * (the OS) to flush the system buffers for the open files. After * that, the OS is responsible for ensuring that the data is * actually flushed to disk. @@ -568,7 +568,7 @@ H5_DLL herr_t H5Fflush_async(hid_t object_id, H5F_scope_t scope, hid_t es_id); * \snippet H5F_examples.c minimal * * \note \Bold{Delayed close:} Note the following deviation from the - * above-described behavior. If H5Fclose() is called for a file but one + * above-described behavior. If H5Fclose() is called for a file, but one * or more objects within the file remain open, those objects will remain * accessible until they are individually closed. Thus, if the dataset * \c data_sample is open when H5Fclose() is called for the file @@ -577,7 +577,7 @@ H5_DLL herr_t H5Fflush_async(hid_t object_id, H5F_scope_t scope, hid_t es_id); * automatically closed once all objects in the file have been closed.\n * Be warned, however, that there are circumstances where it is not * possible to delay closing a file. For example, an MPI-IO file close is - * a collective call; all of the processes that opened the file must + * a collective call; all of the processes that open the file must * close it collectively. The file cannot be closed at some time in the * future by each process in an independent fashion. Another example is * that an application using an AFS token-based file access privilege may @@ -1360,7 +1360,7 @@ H5_DLL herr_t H5Fstart_swmr_write(hid_t file_id); * \snippet this H5F_sect_info_t_snip * * This routine retrieves free-space section information for \p nsects - * sections or at most the maximum number of sections in the specified + * sections or, at most, the maximum number of sections in the specified * free-space manager. If the number of sections is not known, a * preliminary H5Fget_free_sections() call can be made by setting \p * sect_info to NULL and the total number of free-space sections for @@ -1662,7 +1662,7 @@ H5_DLL herr_t H5Fget_mdc_image_info(hid_t file_id, haddr_t *image_addr, hsize_t * file_id. This setting is used to inform the library to create * minimized dataset object headers when \c TRUE. * - * The setting's value is returned in the boolean pointer minimize. + * The setting's value is returned in the boolean pointer minimized. * * \since 1.10.5 * @@ -1888,7 +1888,7 @@ H5_DLL herr_t H5Fget_info1(hid_t obj_id, H5F_info1_t *file_info); /** * \ingroup H5F * - * \brief Sets thelatest version of the library to be used for writing objects + * \brief Sets the latest version of the library to be used for writing objects * * \file_id * \param[in] latest_format Latest format flag diff --git a/src/H5Gmodule.h b/src/H5Gmodule.h index d4738f63006..f09486d650f 100644 --- a/src/H5Gmodule.h +++ b/src/H5Gmodule.h @@ -886,24 +886,24 @@ * objects appear in at least one group (with the possible exception of * the root object) and since objects can have names in more than one * group, the set of all objects in an HDF5 file is a directed - * graph. The internal nodes (nodes with out-degree greater than zero) - * must be groups while the leaf nodes (nodes with out-degree zero) are + * graph. The internal nodes (nodes with an out-degree greater than zero) + * must be groups, while the leaf nodes (nodes with an out-degree zero) are * either empty groups or objects of some other type. Exactly one * object in every non-empty file is the root object. The root object * always has a positive in-degree because it is pointed to by the file - * super block. + * superblock. * * \Bold{Locating objects in the HDF5 file hierarchy:} An object name * consists of one or more components separated from one another by - * slashes. An absolute name begins with a slash and the object is + * slashes. An absolute name begins with a slash, and the object is * located by looking for the first component in the root object, then * looking for the second component in the first object, etc., until * the entire name is traversed. A relative name does not begin with a - * slash and the traversal begins at the location specified by the + * slash, and the traversal begins at the location specified by the * create or access function. * * \Bold{Group implementations in HDF5:} The original HDF5 group - * implementation provided a single indexed structure for link + * implementation provided a single-indexed structure for link * storage. A new group implementation, in HDF5 Release 1.8.0, enables * more efficient compact storage for very small groups, improved link * indexing for large groups, and other advanced features. diff --git a/src/H5Goh.c b/src/H5Goh.c index 5e8ab02a46a..94d51f066c3 100644 --- a/src/H5Goh.c +++ b/src/H5Goh.c @@ -246,7 +246,7 @@ H5O__group_create(H5F_t *f, void *_crt_info, H5G_loc_t *obj_loc) HDassert(crt_info); HDassert(obj_loc); - /* Create the the group */ + /* Create the group */ if (NULL == (grp = H5G__create(f, crt_info))) HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, NULL, "unable to create group") diff --git a/src/H5Gpublic.h b/src/H5Gpublic.h index ace2071dde5..0e0a58b1986 100644 --- a/src/H5Gpublic.h +++ b/src/H5Gpublic.h @@ -406,7 +406,7 @@ H5_DLL herr_t H5Gget_info_by_idx_async(hid_t loc_id, const char *group_name, H5_ * \return \herr_t * * \details H5Gflush() causes all buffers associated with a group to be - * immediately flushed to disk without removing the data from + * immediately flushed to the disk without removing the data from * the cache. * * \attention @@ -414,7 +414,7 @@ H5_DLL herr_t H5Gget_info_by_idx_async(hid_t loc_id, const char *group_name, H5_ * flushes the internal HDF5 buffers and then asks the operating * system (the OS) to flush the system buffers for the open * files. After that, the OS is responsible for ensuring that - * the data is actually flushed to disk. + * the data is actually flushed to the disk. * * \since 1.8.0 * @@ -435,7 +435,7 @@ H5_DLL herr_t H5Gflush(hid_t group_id); * cleared and immediately re-loaded with updated contents from disk. * * This function essentially closes the group, evicts all - * metadata associated with it from the cache, and then re-opens + * metadata associated with it from the cache, and then reopens * the group. The reopened group is automatically re-registered * with the same identifier. * @@ -454,7 +454,7 @@ H5_DLL herr_t H5Grefresh(hid_t group_id); * * \return \herr_t * - * \details H5Gclose() releases resources used by a group which was + * \details H5Gclose() releases resources used by a group that was * opened by H5Gcreate() or H5Gopen(). After closing a group, * \p group_id cannot be used again until another H5Gcreate() * or H5Gopen() is called on it. @@ -926,7 +926,7 @@ H5_DLL herr_t H5Gset_comment(hid_t loc_id, const char *name, const char *comment * \deprecated This function is deprecated in favor of the function * H5Oget_comment(). * - * \details H5Gget_comment() retrieves the comment for the the object specified + * \details H5Gget_comment() retrieves the comment for the object specified * by \p loc_id and \p name. The comment is returned in the buffer \p * buf. * @@ -987,7 +987,7 @@ H5_DLL int H5Gget_comment(hid_t loc_id, const char *name, size_t bufsize, char * * The operation receives the group identifier for the group being * iterated over, \p group, the name of the current object within * the group, \p name, and the pointer to the operator data - * passed in to H5Giterate(), \p op_data. + * passed into H5Giterate(), \p op_data. * * The return values from an operator are: * \li Zero causes the iterator to continue, returning zero when all @@ -1096,7 +1096,7 @@ H5_DLL herr_t H5Gget_objinfo(hid_t loc_id, const char *name, hbool_t follow_link *------------------------------------------------------------------------- * \ingroup H5G * - * \brief Returns a name of an object specified by an index + * \brief Returns the name of an object specified by an index * * \fg_loc_id * \param[in] idx Transient index identifying object @@ -1109,7 +1109,7 @@ H5_DLL herr_t H5Gget_objinfo(hid_t loc_id, const char *name, hbool_t follow_link * * \deprecated This function is deprecated in favor of the function H5Lget_name_by_idx(). * - * \details H5Gget_objname_by_idx() returns a name of the object specified by + * \details H5Gget_objname_by_idx() returns the name of the object specified by * the index \p idx in the group \p loc_id. * * The group is specified by a group identifier \p loc_id. If diff --git a/src/H5HFcache.c b/src/H5HFcache.c index 3cb067ccc42..ab01c1e7aa6 100644 --- a/src/H5HFcache.c +++ b/src/H5HFcache.c @@ -3385,7 +3385,7 @@ H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, haddr_t fd_parent_addr, H5 * If the entry is unprotected and unpinned, we simply * protect it. * - * If, however, the the child iblock is already protected, + * If, however, the child iblock is already protected, * but not pinned, we have a bit of a problem, as we have * no legitimate way of looking up its pointer in memory. * diff --git a/src/H5Ipublic.h b/src/H5Ipublic.h index d699c92252b..69b2450644e 100644 --- a/src/H5Ipublic.h +++ b/src/H5Ipublic.h @@ -235,7 +235,7 @@ H5_DLL hid_t H5Iget_file_id(hid_t id); * \brief Retrieves a name of an object based on the object identifier * * \obj_id{id} - * \param[out] name A buffer for thename associated with the identifier + * \param[out] name A buffer for the name associated with the identifier * \param[in] size The size of the \p name buffer; usually the size of * the name in bytes plus 1 for a NULL terminator * @@ -339,7 +339,7 @@ H5_DLL int H5Iinc_ref(hid_t id); * with H5Iinc_ref(). When an object identifier’s reference count * reaches zero, the object will be closed. Calling an object * identifier’s \c close function decrements the reference count for - * the identifier which normally closes the object, but if the + * the identifier, which normally closes the object, but if the * reference count for the identifier has been incremented with * H5Iinc_ref(), the object will only be closed when the reference * count reaches zero with further calls to this function or the @@ -348,15 +348,15 @@ H5_DLL int H5Iinc_ref(hid_t id); * If the object ID was created by a collective parallel call (such as * H5Dcreate(), H5Gopen(), etc.), the reference count should be * modified by all the processes which have copies of the ID. - * Generally this means that group, dataset, attribute, file and named + * Generally, this means that group, dataset, attribute, file and named * datatype IDs should be modified by all the processes and that all * other types of IDs are safe to modify by individual processes. * - * This function is of particular value when an application is - * maintaining multiple copies of an object ID. The object ID can be + * This function is of particular value when an application + * maintains multiple copies of an object ID. The object ID can be * incremented when a copy is made. Each copy of the ID can then be * safely closed or decremented and the HDF5 object will be closed - * when the reference count for that that object drops to zero. + * when the reference count for that object drops to zero. * * \since 1.6.2 * @@ -643,7 +643,7 @@ H5_DLL htri_t H5Itype_exists(H5I_type_t type); * \details Valid identifiers are those that have been obtained by an * application and can still be used to access the original target. * Examples of invalid identifiers include: - * \li Out of range values: negative, for example + * \li Out-of-range values: negative, for example * \li Previously-valid identifiers that have been released: * for example, a dataset identifier for which the dataset has * been closed @@ -651,7 +651,7 @@ H5_DLL htri_t H5Itype_exists(H5I_type_t type); * H5Iis_valid() can be used with any type of identifier: object * identifier, property list identifier, attribute identifier, error * message identifier, etc. When necessary, a call to H5Iget_type() - * can determine the type of the object that \p id identifies. + * can determine the type of object that the \p id identifies. * * \since 1.8.3 * diff --git a/src/H5Lpublic.h b/src/H5Lpublic.h index 6feefcd433f..653bf2738cb 100644 --- a/src/H5Lpublic.h +++ b/src/H5Lpublic.h @@ -209,7 +209,7 @@ H5_DLL herr_t H5Lmove(hid_t src_loc, const char *src_name, hid_t dst_loc, const * * H5Lcopy() retains the creation time and the target of the original * link. However, since the link may be renamed, the character - * encoding is that specified in \p lcpl_id rather than that of the + * encoding is specified in \p lcpl_id rather than in that of the * original link. Other link creation properties are ignored. * * If the link is a soft link, also known as a symbolic link, its @@ -337,7 +337,7 @@ H5_DLL herr_t H5Lcreate_hard_async(hid_t cur_loc_id, const char *cur_name, hid_t * * For instance, if target_path is \c ./foo, \p link_loc_id specifies * \c ./x/y/bar, and the name of the new link is \c new_link, then a - * subsequent request for \c ./x/y/bar/new_link will return same the + * subsequent request for \c ./x/y/bar/new_link will return the same * object as would be found at \c ./foo. * * \note H5Lcreate_soft() is for use only if the target object is in the @@ -480,7 +480,7 @@ H5_DLL herr_t H5Ldelete_by_idx_async(hid_t loc_id, const char *group_name, H5_in * * \return \herr_t * - * \details H5Lget_val() returns the value of link \p name. For smbolic links, + * \details H5Lget_val() returns the value of link \p name. For symbolic links, * this is the path to which the link points, including the null * terminator. For external and user-defined links, it is the link * buffer. @@ -607,7 +607,7 @@ H5_DLL herr_t H5Lget_val_by_idx(hid_t loc_id, const char *group_name, H5_index_t * the link \c datasetD in the \c group group1/group2/softlink_to_group3/, * where \c group1 is a member of the group specified by \c loc_id: * - * 1. First use H5Lexists() to verify that \c group1 exists. + * 1. First, use H5Lexists() to verify that the \c group1 exists. * 2. If \c group1 exists, use H5Lexists() again, this time with name * set to \c group1/group2, to verify that \c group2 exists. * 3. If \c group2 exists, use H5Lexists() with name set to @@ -624,7 +624,7 @@ H5_DLL herr_t H5Lget_val_by_idx(hid_t loc_id, const char *group_name, H5_index_t * \c /group1/group2/softlink_to_group3, the first call to H5Lexists() * would have name set to \c /group1. * - * Note that this is an outline and does not include all necessary + * Note that this is an outline and does not include all the necessary * details. Depending on circumstances, for example, you may need to * verify that an intermediate link points to a group and that a soft * link points to an existing target. @@ -713,7 +713,7 @@ H5_DLL herr_t H5Lexists_async(hid_t loc_id, const char *name, hbool_t *exists, h * There will be additional valid values if user-defined links have * been registered. * - * \p corder specifies the link’s creation order position while + * \p corder specifies the link’s creation order position, while * \p corder_valid indicates whether the value in corder is valid. * * If \p corder_valid is \c TRUE, the value in \p corder is known to @@ -732,7 +732,7 @@ H5_DLL herr_t H5Lexists_async(hid_t loc_id, const char *name, hbool_t *exists, h * This value is set with #H5Pset_char_encoding. * * \c token is the location that a hard link points to, and - * \c val_size is the size of a soft link or user defined link value. + * \c val_size is the size of a soft link or user-defined link value. * H5O_token_t is used in the VOL layer. It is defined in H5public.h * as: * \snippet H5public.h H5O_token_t_snip @@ -1118,7 +1118,7 @@ H5_DLL herr_t H5Lvisit2(hid_t grp_id, H5_index_t idx_type, H5_iter_order_t order * (with an absolute name based in the file’s root group) or a group * relative to \p loc_id. If \p loc_id fully specifies the group that * is to serve as the root of the iteration, group_name should be '.' - * (a dot). (Note that when \p loc_id fully specifies the the group + * (a dot). (Note that when \p loc_id fully specifies the group * that is to serve as the root of the iteration, the user may wish to * consider using H5Lvisit2() instead of H5Lvisit_by_name2().) * @@ -1363,7 +1363,7 @@ H5_DLL herr_t H5Lunpack_elink_val(const void *ext_linkval /*in*/, size_t link_si * If that target file does not exist, the new \p file_name after * stripping will be \c A.h5. * - For Windows, there are 6 cases: - * -# \p file_name is an absolute drive with absolute pathname. + * -# \p file_name is an absolute drive with an absolute pathname. * For example, consider a \p file_name of \c /tmp/A.h5. If that * target file does not exist, the new \p file_name after * stripping will be \c A.h5. @@ -1371,16 +1371,16 @@ H5_DLL herr_t H5Lunpack_elink_val(const void *ext_linkval /*in*/, size_t link_si * name. For example, consider a \p file_name of \c /tmp/A.h5. * If that target file does not exist, the new \p file_name after * stripping will be \c A.h5. - * -# \p file_name is an absolute drive with relative pathname. + * -# \p file_name is an absolute drive with a relative pathname. * For example, consider a \p file_name of \c /tmp/A.h5. If that * target file does not exist, the new \p file_name after * stripping will be \c tmp\A.h5. * -# \p file_name is in UNC (Uniform Naming Convention) format with - * server name, share name, and pathname. For example, consider + * a server name, share name, and pathname. For example, consider * a \p file_name of \c /tmp/A.h5. If that target file does not * exist, the new \p file_name after stripping will be \c A.h5. * -# \p file_name is in Long UNC (Uniform Naming Convention) format - * with server name, share name, and pathname. For example, + * with a server name, share name, and pathname. For example, * consider a \p file_name of \c /tmp/A.h5. If that target file * does not exist, the new \p file_name after stripping will be * \c A.h5. @@ -1390,7 +1390,7 @@ H5_DLL herr_t H5Lunpack_elink_val(const void *ext_linkval /*in*/, size_t link_si * does not exist, the new \p file_name after stripping will be * \c A.h5. * - * The library opens target file \p file_name with the file access + * The library opens the target file \p file_name with the file access * property list that is set via H5Pset_elink_fapl() when the external * link link_name is accessed. If no such property list is set, the * library uses the file access property list associated with the file @@ -1879,7 +1879,7 @@ H5_DLL herr_t H5Lvisit1(hid_t grp_id, H5_index_t idx_type, H5_iter_order_t order * (with an absolute name based in the file’s root group) or a group * relative to \p loc_id. If \p loc_id fully specifies the group that * is to serve as the root of the iteration, group_name should be '.' - * (a dot). (Note that when \p loc_id fully specifies the the group + * (a dot). (Note that when \p loc_id fully specifies the group * that is to serve as the root of the iteration, the user may wish to * consider using H5Lvisit1() instead of H5Lvisit_by_name1().) * diff --git a/src/H5MF.c b/src/H5MF.c index be71b2fc854..9f430c96042 100644 --- a/src/H5MF.c +++ b/src/H5MF.c @@ -3236,7 +3236,7 @@ H5MF_settle_meta_data_fsm(H5F_t *f, hbool_t *fsm_settled) * multi file drivers, as the self referential free space * manager header and section info can be stored in up to * two different files -- requiring that up to two EOA's - * be stored in the the free space managers super block + * be stored in the free space manager's superblock * extension message. * * As of this writing, we are solving this problem by diff --git a/src/H5Mpublic.h b/src/H5Mpublic.h index 86fe433c09f..f5874e8ea78 100644 --- a/src/H5Mpublic.h +++ b/src/H5Mpublic.h @@ -211,7 +211,7 @@ extern "C" { * \details H5Mcreate() creates a new map object for storing key-value * pairs. The in-file datatype for keys is defined by \p key_type_id * and the in-file datatype for values is defined by \p val_type_id. \p - * loc_id specifies the location to create the the map object and \p + * loc_id specifies the location to create the map object and \p * name specifies the name of the link to the map object relative to * \p loc_id. * diff --git a/src/H5Oalloc.c b/src/H5Oalloc.c index 17c1535ca15..84846788bbf 100644 --- a/src/H5Oalloc.c +++ b/src/H5Oalloc.c @@ -517,7 +517,7 @@ H5O__alloc_extend_chunk(H5F_t *f, H5O_t *oh, unsigned chunkno, size_t size, size HDassert(H5F_addr_defined(oh->chunk[chunkno].addr)); /* Test to see if the specified chunk ends with a null messages. - * If successful, set the index of the the null message in extend_msg. + * If successful, set the index of the null message in extend_msg. */ for (u = 0; u < oh->nmesgs; u++) { /* Check for null message at end of proper chunk */ diff --git a/src/H5Omodule.h b/src/H5Omodule.h index afb005b0c30..deb00bda57a 100644 --- a/src/H5Omodule.h +++ b/src/H5Omodule.h @@ -52,7 +52,7 @@ * * HDF5 objects are deleted as a side effect of rendering them unreachable * from the root group. The net effect is the diminution of the object's - * reference count to zero, which can (but should not usually) be effected + * reference count to zero, which can (but should not usually) be affected * by a function in this module. * * diff --git a/src/H5Opublic.h b/src/H5Opublic.h index 65e140f59f9..6fba5085948 100644 --- a/src/H5Opublic.h +++ b/src/H5Opublic.h @@ -246,7 +246,7 @@ extern "C" { * * H5Oopen() cannot be used to open a dataspace, attribute, property list, or file. * - * Once an object of unknown type has been opened with H5Oopen(), + * Once an object of an unknown type has been opened with H5Oopen(), * the type of that object can be determined by means of an H5Iget_type() call. * * \p loc_id may be a file, group, dataset, named datatype, or attribute. @@ -384,7 +384,7 @@ H5_DLL hid_t H5Oopen_by_idx_async(hid_t loc_id, const char *group_name, H5_inde * the \p loc_id and \p name combination exists. * \return Returns 0 if the object pointed to by * the \p loc_id and \p name combination does not exist. - * \return Returns a negatvie value when the function fails. + * \return Returns a negative value when the function fails. * * \details H5Oexists_by_name() allows an application to determine whether * the link \p name in the group or file specified with \p loc_id @@ -408,22 +408,22 @@ H5_DLL hid_t H5Oopen_by_idx_async(hid_t loc_id, const char *group_name, H5_inde * where \c group1 is a member of the group specified by \c loc_id: * * \par - * - First use H5Lexists() to verify that a link named \c group1 exists. + * - First, use H5Lexists() to verify that a link named \c group1 exists. * - If \c group1 exists, use H5Oexists_by_name() to verify that the * link \c group1 resolves to an object. - * - If \c group1 exists, use H5Lexists() again, this time with name + * - If \c group1 exists, use H5Lexists() again, this time with the name * set to \c group1/group2, to verify that the link \c group2 exists * in \c group1. * - If the \c group2 link exists, use H5Oexists_by_name() to verify * that \c group1/group2 resolves to an object. - * - If \c group2 exists, use H5Lexists() again, this time with name + * - If \c group2 exists, use H5Lexists() again, this time with the name * set to \c group1/group2/softlink_to_group3, to verify that the * link \c softlink_to_group3 exists in \c group2. * - If the \c softlink_to_group3 link exists, use H5Oexists_by_name() * to verify that \c group1/group2/softlink_to_group3 resolves to * an object. * - If \c softlink_to_group3 exists, you can now safely use H5Lexists - * with name set to \c group1/group2/softlink_to_group3/datasetD to + * with the name set to \c group1/group2/softlink_to_group3/datasetD to * verify that the target link, \c datasetD, exists. * - And finally, if the link \c datasetD exists, use H5Oexists_by_name * to verify that \c group1/group2/softlink_to_group3/datasetD @@ -437,12 +437,12 @@ H5_DLL hid_t H5Oopen_by_idx_async(hid_t loc_id, const char *group_name, H5_inde * H5Lexists() would have name set to \c /group1. * * \par - * Note that this is an outline and does not include all necessary + * Note that this is an outline and does not include all the necessary * details. Depending on circumstances, for example, an application * may need to verify the type of an object also. * * \warning \Bold{Failure Modes:} - * \warning If \p loc_id and \p name both exist but the combination does not + * \warning If \p loc_id and \p name both exist, but the combination does not * resolve to an object, the function will return 0 (zero); * the function does not fail in this case. * \warning If either the location or the link specified by the \p loc_id @@ -492,7 +492,7 @@ H5_DLL htri_t H5Oexists_by_name(hid_t loc_id, const char *name, hid_t lapl_id); * \note If you are iterating through a lot of different objects to * retrieve information via the H5Oget_info() family of routines, * you may see memory building up. This can be due to memory - * allocation for metadata such as object headers and messages + * allocation for metadata, such as object headers and messages, * when the iterated objects are put into the metadata cache. * \note * If the memory buildup is not desirable, you can configure a @@ -722,7 +722,7 @@ H5_DLL herr_t H5Oget_native_info_by_idx(hid_t loc_id, const char *group_name, H5 * * \details H5Olink() creates a new hard link to an object in an HDF5 file. * \p new_loc_id and \p \p new_link_name specify the location and name of the - * new link while \p object_id identifies the object that the link + * new link, while \p object_id identifies the object that the link * points to. * * H5Olink() is designed for two purposes: @@ -792,7 +792,7 @@ H5_DLL herr_t H5Olink(hid_t obj_id, hid_t new_loc_id, const char *new_name, hid_ * An object’s reference count is the number of hard links in the * file that point to that object. See the “Programming Model” * section of the HDF5 Groups chapter in the -- \ref UG - * for a more complete discussion of reference counts. + * for a complete discussion of reference counts. * * If a user application needs to determine an object’s reference * count, an H5Oget_info() call is required; the reference count @@ -965,7 +965,7 @@ H5_DLL herr_t H5Ocopy_async(hid_t src_loc_id, const char *src_name, hid_t dst_lo * is overwritten. * * The target object is specified by an identifier, \p obj_id. - * If \p comment is the empty string or a null pointer, any existing + * If \p comment is an empty string or a null pointer, any existing * comment message is removed from the object. * * Comments should be relatively short, null-terminated, ASCII strings. @@ -1010,7 +1010,7 @@ H5_DLL herr_t H5Oset_comment(hid_t obj_id, const char *comment); * - An absolute name of the object, starting from \c /, the file’s root group * - A dot (\c .), if \p loc_id fully specifies the object * - * If \p comment is the empty string or a null pointer, any existing + * If \p comment is an empty string or a null pointer, any existing * comment message is removed from the object. * * Comments should be relatively short, null-terminated, ASCII strings. @@ -1064,7 +1064,7 @@ H5_DLL herr_t H5Oset_comment_by_name(hid_t loc_id, const char *name, const char * only \p bufsize bytes of the comment, without a \c NULL terminator, * are returned in \p comment. * - * If an object does not have a comment, the empty string is + * If an object does not have a comment, an empty string is * returned in \p comment. * * \version 1.8.11 Fortran subroutine introduced in this release. @@ -1112,7 +1112,7 @@ H5_DLL ssize_t H5Oget_comment(hid_t obj_id, char *comment, size_t bufsize); * only \p bufsize bytes of the comment, without a \c NULL terminator, * are returned in \p comment. * - * If an object does not have a comment, the empty string is + * If an object does not have a comment, an empty string is * returned in \p comment. * * \p lapl_id contains a link access property list identifier. A @@ -1259,7 +1259,7 @@ H5_DLL herr_t H5Ovisit3(hid_t obj_id, H5_index_t idx_type, H5_iter_order_t order * a file or an object in a file; if \p loc_id is an attribute identifier, * the object where the attribute is attached will be used. * \p obj_name specifies either an object in the file (with an absolute - * name based in the file’s root group) or an object name relative + * name based on the file’s root group) or an object name relative * to \p loc_id. If \p loc_id fully specifies the object that is to serve * as the root of the iteration, \p obj_name should be '\c .' (a dot). * (Note that when \p loc_id fully specifies the object that is to serve @@ -1374,7 +1374,7 @@ H5_DLL herr_t H5Oclose_async(hid_t object_id, hid_t es_id); * flushed to disk without removing the data from the cache. * * The object associated with \p object_id can be any named object in an - * HDF5 file including a dataset, a group, or a committed datatype. + * HDF5 file, including a dataset, a group, or a committed datatype. * * \warning H5Oflush doesn't work correctly with parallel. It causes an assertion * failure in metadata cache during H5Fclose(). @@ -1781,7 +1781,7 @@ typedef herr_t (*H5O_iterate1_t)(hid_t obj, const char *name, const H5O_info1_t * * The object’s address within the file, \p addr, is the byte offset of the first byte * of the object header from the beginning of the HDF5 file space, i.e., from the - * beginning of the super block (see the “HDF5 Storage Model” section of the The + * beginning of the superblock (see the “HDF5 Storage Model” section of the The * HDF5 Data Model and File Structure chapter of the HDF5 User's Guide.) * * \p addr can be obtained via either of two function calls. H5Gget_objinfo() returns @@ -1800,7 +1800,7 @@ typedef herr_t (*H5O_iterate1_t)(hid_t obj, const char *name, const H5O_info1_t * overcome by retrieving the object address with H5Gget_objinfo() or H5Lget_info() * immediately before calling H5Oopen_by_addr(). The immediacy of the operation can be * important; if time has elapsed and the object has been deleted from the file, - * the address will be invalid and file corruption can result. + * the address will be invalid, and file corruption can result. * * \version 1.8.4 Fortran subroutine added in this release. * @@ -1829,7 +1829,7 @@ H5_DLL hid_t H5Oopen_by_addr(hid_t loc_id, haddr_t addr); * \note If you are iterating through a lot of different objects to * retrieve information via the H5Oget_info() family of routines, * you may see memory building up. This can be due to memory - * allocation for metadata such as object headers and messages + * allocation for metadata, such as object headers and messages, * when the iterated objects are put into the metadata cache. * \note * If the memory buildup is not desirable, you can configure a @@ -1967,7 +1967,7 @@ H5_DLL herr_t H5Oget_info_by_idx1(hid_t loc_id, const char *group_name, H5_index * \note If you are iterating through a lot of different objects to * retrieve information via the H5Oget_info() family of routines, * you may see memory building up. This can be due to memory - * allocation for metadata such as object headers and messages + * allocation for metadata, such as object headers and messages, * when the iterated objects are put into the metadata cache. * \note * If the memory buildup is not desirable, you can configure a @@ -2197,7 +2197,7 @@ H5_DLL herr_t H5Ovisit1(hid_t obj_id, H5_index_t idx_type, H5_iter_order_t order * a file or an object in a file; if \p loc_id is an attribute identifier, * the object where the attribute is attached will be used. * \p obj_name specifies either an object in the file (with an absolute - * name based in the file’s root group) or an object name relative + * name based on the file’s root group) or an object name relative * to \p loc_id. If \p loc_id fully specifies the object that is to serve * as the root of the iteration, \p obj_name should be '\c .' (a dot). * (Note that when \p loc_id fully specifies the object that is to serve diff --git a/src/H5Oshared.h b/src/H5Oshared.h index 07bc4a1b9ba..3280c4befdd 100644 --- a/src/H5Oshared.h +++ b/src/H5Oshared.h @@ -418,7 +418,7 @@ H5O_SHARED_POST_COPY_FILE(const H5O_loc_t H5_ATTR_NDEBUG_UNUSED *oloc_src, const HGOTO_ERROR(H5E_OHDR, H5E_CANTCOPY, FAIL, "unable to update native message") #endif /* H5O_SHARED_POST_COPY_FILE_UPD */ - /* Make sure that if the the source or destination is committed, both are + /* Make sure that if the source or destination is committed, both are * committed */ HDassert((shared_src->type == H5O_SHARE_TYPE_COMMITTED) == (shared_dst->type == H5O_SHARE_TYPE_COMMITTED)); diff --git a/src/H5PLpublic.h b/src/H5PLpublic.h index 4595d22cb5f..c53053b1538 100644 --- a/src/H5PLpublic.h +++ b/src/H5PLpublic.h @@ -95,9 +95,9 @@ H5_DLL herr_t H5PLset_loading_state(unsigned int plugin_control_mask); * \brief Queries the loadability of dynamic plugin types * * \param[out] plugin_control_mask List of dynamic plugin types that are enabled or disabled.\n - * A plugin bit set to 0 (zero) indicates that that the dynamic plugin type is + * A plugin bit set to 0 (zero) indicates that the dynamic plugin type is * disabled.\n - * A plugin bit set to 1 (one) indicates that that the dynamic plugin type is + * A plugin bit set to 1 (one) indicates that the dynamic plugin type is * enabled.\n * If the value of \p plugin_control_mask is negative, all dynamic plugin * types are enabled.\n @@ -105,7 +105,7 @@ H5_DLL herr_t H5PLset_loading_state(unsigned int plugin_control_mask); * are disabled. * \return \herr_t * - * \details H5PLget_loading_state() retrieves the bitmask that controls whether a certain type of plugins + * \details H5PLget_loading_state() retrieves the bitmask that controls whether a certain type of plugin * (e.g.: filters, VOL drivers) will be loaded by the HDF5 library. * * Bit positions allocated to date are specified in \ref H5PL_type_t as follows: diff --git a/src/H5Pfapl.c b/src/H5Pfapl.c index 07d64bc2a95..8e9b680f270 100644 --- a/src/H5Pfapl.c +++ b/src/H5Pfapl.c @@ -5957,7 +5957,7 @@ H5Pset_page_buffer_size(hid_t plist_id, size_t buf_size, unsigned min_meta_perc, "Minimum metadata fractions must be between 0 and 100 inclusive") if (min_raw_perc > 100) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, - "Minimum rawdata fractions must be between 0 and 100 inclusive") + "Minimum raw data fractions must be between 0 and 100 inclusive") if (min_meta_perc + min_raw_perc > 100) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, @@ -5969,7 +5969,7 @@ H5Pset_page_buffer_size(hid_t plist_id, size_t buf_size, unsigned min_meta_perc, if (H5P_set(plist, H5F_ACS_PAGE_BUFFER_MIN_META_PERC_NAME, &min_meta_perc) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set percentage of min metadata entries") if (H5P_set(plist, H5F_ACS_PAGE_BUFFER_MIN_RAW_PERC_NAME, &min_raw_perc) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set percentage of min rawdata entries") + HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set percentage of min raw data entries") done: FUNC_LEAVE_API(ret_value) diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h index 24dc7544185..bb5d421a8e3 100644 --- a/src/H5Ppublic.h +++ b/src/H5Ppublic.h @@ -2419,7 +2419,7 @@ H5_DLL herr_t H5Pset_deflate(hid_t plist_id, unsigned level); * format. The filters will come into play only when dense storage * is used (see H5Pset_link_phase_change()) and will be applied to * the group’s fractal heap. The fractal heap will contain most of - * the the group’s link metadata, including link names. + * the group’s link metadata, including link names. * * \note When working with group creation property lists, if you are * adding a filter that is not in HDF5’s set of predefined filters, @@ -3268,7 +3268,7 @@ H5_DLL herr_t H5Pget_cache(hid_t plist_id, int *mdc_nelmts, /* out */ * after the call to H5Pset_fapl_core(). It is an error to use this * function with any other VFD. * - * \note This function only applies to the backing store write operation + * \note This function only applies to the backing store write operation, * which typically occurs when the file is flushed or closed. This * function has no relationship to the increment parameter passed * to H5Pset_fapl_core(). @@ -3315,7 +3315,7 @@ H5_DLL herr_t H5Pget_core_write_tracking(hid_t fapl_id, hbool_t *is_enabled, siz * * * - * * @@ -3366,7 +3366,7 @@ H5_DLL herr_t H5Pget_core_write_tracking(hid_t fapl_id, hbool_t *is_enabled, siz * * * * @@ -3947,7 +3947,7 @@ H5_DLL herr_t H5Pget_metadata_read_attempts(hid_t plist_id, unsigned *attempts); * for more information. * * The type of data returned in \p type will be one of those - * listed in the discussion of the \p type parameter in the the + * listed in the discussion of the \p type parameter in the * description of the function H5Pset_multi_type(). * * Use of this function is only appropriate for an HDF5 file @@ -4106,7 +4106,7 @@ H5_DLL herr_t H5Pget_vol_info(hid_t plist_id, void **vol_info); * \details H5Pset_alignment() sets the alignment properties of a * file access property list so that any file object greater * than or equal in size to \p threshold bytes will be aligned - * on an address which is a multiple of \p alignment. The + * on an address that is a multiple of \p alignment. The * addresses are relative to the end of the user block; the * alignment is calculated by subtracting the user block size * from the absolute file address and then adjusting the address @@ -4116,7 +4116,7 @@ H5_DLL herr_t H5Pget_vol_info(hid_t plist_id, void **vol_info); * implying no alignment. Generally the default values will * result in the best performance for single-process access to * the file. For MPI IO and other parallel systems, choose an - * alignment which is a multiple of the disk block size. + * alignment that is a multiple of the disk block size. * * If the file space handling strategy is set to * #H5F_FSPACE_STRATEGY_PAGE, then the alignment set via this @@ -4154,7 +4154,7 @@ H5_DLL herr_t H5Pset_alignment(hid_t fapl_id, hsize_t threshold, hsize_t alignme * penalized when determining which chunks to flush * from cache. A value of 0 means fully read or * written chunks are treated no differently than - * other chunks (the preemption is strictly LRU) + * other chunks (the preemption is strictly LRU), * while a value of 1 means fully read or written * chunks are always preempted before other chunks. * If your application only reads or writes data once, @@ -4176,7 +4176,7 @@ H5_DLL herr_t H5Pset_alignment(hid_t fapl_id, hsize_t threshold, hsize_t alignme * computing a hash value using the address of a chunk and then by * using that hash value as the chunk’s index into the table of * cached chunks. In other words, the size of this hash table and the - * number of possible hash values is determined by the \p rdcc_nslots + * number of possible hash values are determined by the \p rdcc_nslots * parameter. If a different chunk in the cache has the same hash value, * a collision will occur, which will reduce efficiency. If inserting * the chunk into the cache would cause the cache to be too big, then @@ -5341,7 +5341,7 @@ H5_DLL herr_t H5Pget_vol_cap_flags(hid_t plist_id, uint64_t *cap_flags); /** * \ingroup GAPL * - * \brief Sets metadata I/O mode for read operations to collective or independent (default) + * \brief Sets metadata I/O mode for read operations to be collective or independent (default) * * \gacpl_id * \param[in] is_collective Boolean value indicating whether metadata reads are collective @@ -5430,7 +5430,7 @@ H5_DLL herr_t H5Pget_all_coll_metadata_ops(hid_t plist_id, hbool_t *is_collectiv /** * \ingroup FAPL * - * \brief Sets metadata write mode to collective or independent (default) + * \brief Sets metadata write mode to be collective or independent (default) * * \fapl_id{plist_id} * \param[out] is_collective Boolean value indicating whether metadata @@ -6699,7 +6699,7 @@ H5_DLL herr_t H5Pset_scaleoffset(hid_t plist_id, H5Z_SO_scale_type_t scale_type, * Specifically, a dataset with a datatype that is 8-, 16-, 32-, or * 64-bit signed or unsigned integer; char; or 32- or 64-bit float * can be compressed with SZIP. See Note, below, for further - * discussion of the the SZIP \p bits_per_pixel setting. + * discussion of the SZIP \p bits_per_pixel setting. * * SZIP options are passed in an options mask, \p options_mask, * as follows. diff --git a/src/H5Rpublic.h b/src/H5Rpublic.h index 856a445bf41..dfeffda1600 100644 --- a/src/H5Rpublic.h +++ b/src/H5Rpublic.h @@ -259,7 +259,7 @@ H5_DLL herr_t H5Rdestroy(H5R_ref_t *ref_ptr); * \snippet this H5R_type_t_snip * * Note that #H5R_OBJECT1 and #H5R_DATASET_REGION1 can never be - * associated to an \ref H5R_ref_t reference and can therefore never be + * associated with an \ref H5R_ref_t reference and can, therefore, never be * returned through that function. * * \ref H5R_ref_t is defined in H5Rpublic.h as: @@ -302,7 +302,7 @@ H5_DLL htri_t H5Requal(const H5R_ref_t *ref1_ptr, const H5R_ref_t *ref2_ptr); * \return \herr_t * * \details H5Rcopy() creates a copy of an existing reference. - * \p src_ref_ptr points to the reference to copy and \p dst_ref_ptr is the + * \p src_ref_ptr points to the reference to copy, and \p dst_ref_ptr is the * pointer to the destination reference. * */ @@ -336,7 +336,7 @@ H5_DLL herr_t H5Rcopy(const H5R_ref_t *src_ref_ptr, H5R_ref_t *dst_ref_ptr); * * The object opened with this function should be closed when it * is no longer needed so that resource leaks will not develop. Use - * the appropriate close function such as H5Oclose() or H5Dclose() + * the appropriate close function, such as H5Oclose() or H5Dclose() * for datasets. * */ @@ -628,7 +628,7 @@ H5_DLL ssize_t H5Rget_attr_name(const H5R_ref_t *ref_ptr, char *name, size_t siz * * A \Emph{reference type} is the type of reference, either an object * reference or a dataset region reference. An \Emph{object reference} - * points to an HDF5 object while a \Emph{dataset region reference} + * points to an HDF5 object, while a \Emph{dataset region reference} * points to a defined region within a dataset. * * The \Emph{referenced object} is the object the reference points @@ -700,7 +700,7 @@ H5_DLL H5G_obj_t H5Rget_obj_type1(hid_t id, H5R_type_t ref_type, const void *ref * * The object opened with this function should be closed when it is no * longer needed so that resource leaks will not develop. Use the - * appropriate close function such as H5Oclose() or H5Dclose() for + * appropriate close function, such as H5Oclose() or H5Dclose() for * datasets. * * \version 1.10.0 Function H5Rdereference() renamed to H5Rdereference1() and @@ -837,7 +837,7 @@ H5_DLL herr_t H5Rget_obj_type2(hid_t id, H5R_type_t ref_type, const void *ref, H * * The object opened with this function should be closed when it is no * longer needed so that resource leaks will not develop. Use the - * appropriate close function such as H5Oclose() or H5Dclose() for + * appropriate close function, such as H5Oclose() or H5Dclose() for * datasets. * * \since 1.10.0 diff --git a/src/H5Sall.c b/src/H5Sall.c index eb9a4d786f5..20c9a20a332 100644 --- a/src/H5Sall.c +++ b/src/H5Sall.c @@ -1109,7 +1109,7 @@ H5S__all_project_simple(const H5S_t H5_ATTR_UNUSED *base_space, H5S_t *new_space NAME H5S_select_all PURPOSE - Specify the the entire extent is selected + Specify the entire extent is selected USAGE herr_t H5S_select_all(dsid) hid_t dsid; IN: Dataspace ID of selection to modify @@ -1152,7 +1152,7 @@ H5S_select_all(H5S_t *space, hbool_t rel_prev) NAME H5Sselect_all PURPOSE - Specify the the entire extent is selected + Specify the entire extent is selected USAGE herr_t H5Sselect_all(dsid) hid_t dsid; IN: Dataspace ID of selection to modify diff --git a/src/H5Shyper.c b/src/H5Shyper.c index 2399937d5e0..e749ee91d9d 100644 --- a/src/H5Shyper.c +++ b/src/H5Shyper.c @@ -9230,7 +9230,7 @@ H5S__check_spans_overlap(const H5S_hyper_span_info_t *spans1, const H5S_hyper_sp owned by the result. If not, the 2nd span list has to be copied. hbool_t *span2_owned; OUT: Indicates if the 2nd span list is actually owned - H5S_t **result; OUT: The dataspace containing the the new selection. It + H5S_t **result; OUT: The dataspace containing the new selection. It could be same with the 1st dataspace. RETURNS Non-negative on success, negative on failure diff --git a/src/H5Spublic.h b/src/H5Spublic.h index f8fed47aef3..871a8e7e035 100644 --- a/src/H5Spublic.h +++ b/src/H5Spublic.h @@ -1006,7 +1006,7 @@ H5_DLL herr_t H5Sselect_copy(hid_t dst_id, hid_t src_id); * The \p coord parameter is a pointer to a buffer containing a * serialized 2-dimensional array of size \p num_elements by the * rank of the dataspace. The array lists dataset elements in the - * point selection; that is, it’s a list of of zero-based values + * point selection; that is, it’s a list of zero-based values * specifying the coordinates in the dataset of the selected * elements. The order of the element coordinates in the \p coord * array specifies the order in which the array elements are @@ -1048,7 +1048,7 @@ H5_DLL herr_t H5Sselect_copy(hid_t dst_id, hid_t src_id); * * In the 1D case, we will be selecting five points and a 1D * dataspace has rank 1, so the selection will be described in a - * 5-by-1 array. To select the 1st, 14th, 17th, 23rd, 8th elements + * 5-by-1 array. To select the 1st, 14th, 17th, 23rd and 8th elements * of the dataset, the selection array would be as follows * (remembering that point coordinates are zero-based): * \n 0 @@ -1320,9 +1320,9 @@ H5_DLL herr_t H5Sset_extent_none(hid_t space_id); * \details H5Sset_extent_simple() sets or resets the size of an existing * dataspace. * - * \p dims is an array of size \p rank which contains the new size + * \p dims is an array of size \p rank that contains the new size * of each dimension in the dataspace. \p max is an array of size - * \p rank which contains the maximum size of each dimension in + * \p rank that contains the maximum size of each dimension in * the dataspace. * * Any previous extent is removed from the dataspace, the dataspace diff --git a/src/H5Tenum.c b/src/H5Tenum.c index af5812dfea7..bf9b6710aa5 100644 --- a/src/H5Tenum.c +++ b/src/H5Tenum.c @@ -333,7 +333,7 @@ H5Tenum_nameof(hid_t type, const void *value, char *name /*out*/, size_t size) /*------------------------------------------------------------------------- * Function: H5T__enum_nameof * - * Purpose: Finds the symbol name that corresponds the the specified + * Purpose: Finds the symbol name that corresponds to the specified * VALUE of an enumeration data type DT. At most SIZE characters * of the symbol name are copied into the NAME buffer. If the * entire symbol name and null terminator do not fit in the NAME @@ -470,7 +470,7 @@ H5Tenum_valueof(hid_t type, const char *name, void *value /*out*/) /*------------------------------------------------------------------------- * Function: H5T__enum_valueof * - * Purpose: Finds the value that corresponds the the specified symbol + * Purpose: Finds the value that corresponds to the specified symbol * NAME of an enumeration data type DT and copy it to the VALUE * result buffer. The VALUE should be allocated by the caller to * be large enough for the result. diff --git a/src/H5Tpublic.h b/src/H5Tpublic.h index 16172a872b0..92ec134efa6 100644 --- a/src/H5Tpublic.h +++ b/src/H5Tpublic.h @@ -1069,7 +1069,7 @@ H5_DLL hid_t H5Tcreate(H5T_class_t type, size_t size); * dataset's datatype. * * The returned datatype identifier should be released with H5Tclose() - * to prevent resource leak. + * to prevent resource leaks. * */ H5_DLL hid_t H5Tcopy(hid_t type_id); @@ -1158,7 +1158,7 @@ H5_DLL herr_t H5Tlock(hid_t type_id); * * \p loc_id may be a file identifier, or a group identifier within * that file. \p name may be either an absolute path in the file or - * a relative path from \p loc_id naming the newly-commited datatype. + * a relative path from \p loc_id naming the newly-committed datatype. * * The link creation property list, \p lcpl_id, governs creation of * the link(s) by which the new committed datatype is accessed and @@ -1235,7 +1235,7 @@ H5_DLL hid_t H5Topen_async(hid_t loc_id, const char *name, hid_t tapl_id, hid_t /** * \ingroup H5T * - * \brief Commits a transient datatype to a file, creating a new named + * \brief Commits a transient datatype to a file, creating a newly named * datatype, but does not link it into the file structure * * \fg_loc_id @@ -1365,7 +1365,7 @@ H5_DLL herr_t H5Tencode(hid_t obj_id, void *buf, size_t *nalloc); /** * \ingroup H5T * - * \brief Decodes a binary object description of datatype and return a new + * \brief Decodes a binary object description of datatype and returns a new * object handle * * \param[in] buf Buffer for the datatype object to be decoded @@ -1437,7 +1437,7 @@ H5_DLL herr_t H5Tflush(hid_t type_id); * contents from disk. * * This function essentially closes the datatype, evicts all - * metadata associated with it from the cache, and then re-opens the + * metadata associated with it from the cache, and then reopens the * datatype. The reopened datatype is automatically re-registered * with the same identifier. * @@ -2263,7 +2263,7 @@ H5_DLL htri_t H5Tis_variable_str(hid_t type_id); * \details H5Tget_native_type() returns the equivalent native datatype * identifier for the datatype specified by \p type_id. * - * H5Tget_native_type() is designed primarily to facilitate use of + * H5Tget_native_type() is designed primarily to facilitate the use of * the H5Dread() function, for which users otherwise must undertake a * multi-step process to determine the native datatype of a dataset * prior to reading it into memory. This function can be used for @@ -2321,7 +2321,7 @@ H5_DLL htri_t H5Tis_variable_str(hid_t type_id); * * \note Please note that a datatype is actually an object * identifier or handle returned from opening the datatype. It - * is not persistent and its value can be different from one HDF5 + * is not persistent, and its value can be different from one HDF5 * session to the next. * * \note H5Tequal() can be used to compare datatypes. @@ -2342,14 +2342,14 @@ H5_DLL hid_t H5Tget_native_type(hid_t type_id, H5T_direction_t direction); * \brief Sets size for a datatype. * * \type_id - * \param[in] size New datatype size is bytes or #H5T_VARIABLE + * \param[in] size New datatype size in bytes or #H5T_VARIABLE * * \return \herr_t * * \details H5Tset_size() sets the total size, \p size, in bytes, for a * datatype. * - * \p size must have a positive value, unless it is passed in as + * \p size must have a positive value unless it is passed in as * #H5T_VARIABLE and the datatype is a string datatype. * * \li Numeric datatypes: If the datatype is atomic and the size @@ -2361,7 +2361,7 @@ H5_DLL hid_t H5Tget_native_type(hid_t type_id, H5T_direction_t direction); * * \li String or character datatypes: The size set for a string * datatype should include space for the null-terminator character, - * otherwise it will not be stored on (or retrieved from) + * otherwise it will not be stored on (or retrieved from) the * disk. Adjusting the size of a string automatically sets the * precision to \p 8*size. * @@ -2767,7 +2767,7 @@ H5_DLL herr_t H5Treclaim(hid_t type_id, hid_t space_id, hid_t plist_id, void *bu /** * \ingroup H5T * - * \brief Commits a transient datatype to a file, creating a new named datatype + * \brief Commits a transient datatype to a file, creating a newly named datatype * * \fg_loc_id * \param[in] name Name given to committed datatype diff --git a/src/H5Zdevelop.h b/src/H5Zdevelop.h index 346eb0e9c43..cc53824a714 100644 --- a/src/H5Zdevelop.h +++ b/src/H5Zdevelop.h @@ -227,12 +227,12 @@ extern "C" { * descriptive name for the filter, and may be the null pointer. * * \c can_apply, described in detail below, is a user-defined callback - * function which determines whether the combination of the dataset + * function that determines whether the combination of the dataset * creation property list values, the datatype, and the dataspace * represent a valid combination to apply this filter to. * * \c set_local, described in detail below, is a user-defined callback - * function which sets any parameters that are specific to this + * function that sets any parameters that are specific to this * dataset, based on the combination of the dataset creation property * list values, the datatype, and the dataspace. * @@ -242,9 +242,9 @@ extern "C" { * The statistics associated with a filter are not reset by this * function; they accumulate over the life of the library. * - * #H5Z_class_t is a macro which maps to either H5Z_class1_t or + * #H5Z_class_t is a macro that maps to either H5Z_class1_t or * H5Z_class2_t, depending on the needs of the application. To affect - * only this macro, H5Z_class_t_vers may be defined to either 1 or 2. + * only this macro, H5Z_class_t_vers may be defined as either 1 or 2. * Otherwise, it will behave in the same manner as other API * compatibility macros. See API Compatibility Macros in HDF5 for more * information. H5Z_class1_t matches the #H5Z_class_t structure that is @@ -261,15 +261,15 @@ extern "C" { * defined as described in the HDF5 library header file H5Zpublic.h. * * When a filter is applied to the fractal heap for a group (e.g., - * when compressing group metadata) and if the can apply and set local - * callback functions have been defined for that filter, HDF5 passes + * when compressing group metadata) and if they can apply and set local + * callback functions that have been defined for that filter, HDF5 passes * the value -1 for all parameters for those callback functions. This * is done to ensure that the filter will not be applied to groups if * it relies on these parameters, as they are not applicable to group * fractal heaps; to operate on group fractal heaps, a filter must be * capable of operating on an opaque block of binary data. * - * The \Emph{can apply} callback function must return a positive value + * The \Emph{can-apply} callback function must return a positive value * for a valid combination, zero for an invalid combination, and a * negative value for an error. * \snippet this H5Z_can_apply_func_t_snip @@ -302,9 +302,9 @@ extern "C" { * \Emph{set local} callback functions for any filters used in the * dataset creation property list are called. These callbacks receive * \c dcpl_id, the dataset's private copy of the dataset creation - * property list passed in to H5Dcreate() (i.e. not the actual - * property list passed in to H5Dcreate()); \c type_id, the datatype - * identifier passed in to H5Dcreate(), which is not copied and should + * property list passed into H5Dcreate() (i.e. not the actual + * property list passed into H5Dcreate()); \c type_id, the datatype + * identifier passed into H5Dcreate(), which is not copied and should * not be modified; and \c space_id, a dataspace describing the chunk * (for chunked dataset storage), which should also not be modified. * @@ -345,13 +345,13 @@ extern "C" { * will work in many cases, but if there is a mismatch between the * memory allocators used in the library and any filter that * reallocates a buffer, there could be problems. This is most often - * the case with Windows and/or when debug memory allocators are being + * the case with Windows and/or when debugging memory allocators are being * used. In both cases, the "state" of the memory allocator lies in * different libraries and will get corrupted if you allocate in one * library and free in another. Windows adds the C standard library - * via dlls that can vary with Visual Studio version and debug vs + * via dlls that can vary with Visual Studio version and debug vs. * release builds. Static links to the MSVC CRT can also introduce - * new memory allocator state. + * a new memory allocator state. * * The library does provide H5allocate_memory() and H5free_memory() * functions that will use the library's allocation and free functions, @@ -398,7 +398,7 @@ H5_DLL herr_t H5Zregister(const void *cls); * sure that all cached data that may use this filter are written out. * * If the application is a parallel program, all processes that - * participate in collective data write should call this function to + * participate in collective data writing should call this function to * ensure that all data is flushed. * * After a call to H5Zunregister(), the filter specified in filter diff --git a/src/H5Zmodule.h b/src/H5Zmodule.h index ec21e50f77e..57e08c1dd50 100644 --- a/src/H5Zmodule.h +++ b/src/H5Zmodule.h @@ -84,7 +84,7 @@ * * The HDF5 library does not support filters for contiguous datasets because of * the difficulty of implementing random access for partial I/O. Compact dataset - * filters are not supported because it would not produce significant results. + * filters are not supported because they would not produce significant results. * * Filter identifiers for the filters distributed with the HDF5 * Library are as follows: diff --git a/src/H5Zpublic.h b/src/H5Zpublic.h index ce67cd732d5..a63729e5745 100644 --- a/src/H5Zpublic.h +++ b/src/H5Zpublic.h @@ -268,7 +268,7 @@ H5_DLL htri_t H5Zfilter_avail(H5Z_filter_t id); * \details H5Zget_filter_info() retrieves information about a filter. At * present, this means that the function retrieves a filter's * configuration flags, indicating whether the filter is configured to - * decode data, to encode data, neither, or both. + * decode data, encode data, neither, or both. * * If \p filter_config_flags is not set to NULL prior to the function * call, the returned parameter contains a bit field specifying the @@ -305,8 +305,8 @@ H5_DLL htri_t H5Zfilter_avail(H5Z_filter_t id); * to read an existing file encoded with that filter. * * This function should be called, and the returned \p - * filter_config_flags analyzed, before calling any other function, - * such as H5Pset_szip() , that might require a particular filter + * filter_config_flags should be analyzed, before calling any other function, + * such as H5Pset_szip(), that might require a particular filter * configuration. * * \since 1.6.3 diff --git a/src/H5public.h b/src/H5public.h index 5b6bce8bf17..895c07028bc 100644 --- a/src/H5public.h +++ b/src/H5public.h @@ -421,7 +421,7 @@ extern "C" { * issued. If one finds that an HDF5 library function is failing * inexplicably, H5open() can be called first. It is safe to call * H5open() before an application issues any other function calls to - * the HDF5 library as there are no damaging side effects in calling + * the HDF5 library, as there are no damaging side effects in calling * it more than once. */ H5_DLL herr_t H5open(void); @@ -434,13 +434,13 @@ H5_DLL herr_t H5open(void); * * \details H5atclose() registers a callback that the HDF5 library will invoke * when closing. The full capabilities of the HDF5 library are - * available to callbacks invoked through this mechanism, library + * available to callbacks invoked through this mechanism, and library * shutdown will only begin in earnest when all callbacks have been * invoked and have returned. * * Registered callbacks are invoked in LIFO order, similar to the * Standard C 'atexit' routine. For example, if 'func1' is registered, - * then 'func2', when the library is closing 'func2' will + * then 'func2', when the library is closing 'func2', will * be invoked first, then 'func1'. * * The \p ctx pointer will be passed to \p func when it's invoked. @@ -474,13 +474,13 @@ H5_DLL herr_t H5close(void); * function is in situations where the library is dynamically linked * into an application and is un-linked from the application before * exit() gets called. In those situations, a routine installed with - * atexit() would jump to a routine which was no longer in memory, + * atexit() would jump to a routine that was no longer in memory, * causing errors. * * \attention In order to be effective, this routine \Emph{must} be called * before any other HDF5 function calls, and must be called each * time the library is loaded/linked into the application (the first - * time and after it's been un-loaded). + * time and after it's been unloaded). */ H5_DLL herr_t H5dont_atexit(void); /** @@ -492,7 +492,7 @@ H5_DLL herr_t H5dont_atexit(void); * of the library, freeing any unused memory. * * It is not required that H5garbage_collect() be called at any - * particular time; it is only necessary in certain situations where + * particular time; it is only necessary for certain situations where * the application has performed actions that cause the library to * allocate many objects. The application should call * H5garbage_collect() if it eventually releases those objects and @@ -678,7 +678,7 @@ H5_DLL herr_t H5is_library_threadsafe(hbool_t *is_ts); * \param[in] mem Buffer to be freed. Can be NULL * \return \herr_t * - * \details H5free_memory() frees memory that has been allocated by the caller + * \details H5free_memory() frees the memory that has been allocated by the caller * with H5allocate_memory() or by the HDF5 library on behalf of the * caller. * @@ -728,7 +728,7 @@ H5_DLL herr_t H5free_memory(void *mem); * initialized. * * This function is intended to have the semantics of malloc() and - * calloc(). However, unlike malloc() and calloc() which allow for a + * calloc(). However, unlike malloc() and calloc(), which allow for a * "special" pointer to be returned instead of NULL, this function * always returns NULL on failure or when size is set to 0 (zero). * @@ -740,7 +740,7 @@ H5_DLL herr_t H5free_memory(void *mem); * the same library that initially allocated it. In most cases, the * HDF5 API uses resources that are allocated and freed either * entirely by the user or entirely by the library, so this is not a - * problem. In rare cases, however, HDF5 API calls will free memory + * problem. In rare cases, however, HDF5 API calls will free the memory * that the user allocated. This function allows the user to safely * allocate this memory.\n * It is particularly important to use this function to allocate From 668c23bb293e3fa30306632836ebe3cc7b100fc9 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Mon, 13 Mar 2023 16:23:33 -0500 Subject: [PATCH 073/231] GH issue #1142, print must have verbose on first (#2540) --- tools/src/h5repack/h5repack.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tools/src/h5repack/h5repack.c b/tools/src/h5repack/h5repack.c index 21a5883dadc..c58943b6948 100644 --- a/tools/src/h5repack/h5repack.c +++ b/tools/src/h5repack/h5repack.c @@ -530,10 +530,12 @@ copy_attr(hid_t loc_in, hid_t loc_out, named_dt_t **named_dt_head_p, trav_table_ buf = NULL; } /*H5T_REFERENCE*/ - if (options->verbose == 2) - HDprintf(FORMAT_OBJ_ATTR_TIME, "attr", read_time, write_time, name); - else - HDprintf(FORMAT_OBJ_ATTR, "attr", name); + if (options->verbose > 0) { + if (options->verbose == 2) + HDprintf(FORMAT_OBJ_ATTR_TIME, "attr", read_time, write_time, name); + else + HDprintf(FORMAT_OBJ_ATTR, "attr", name); + } /*--------------------------------------------------------------------- * close From a1078be80164f9ae48102acc1c59bef4bd3d17ef Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Mon, 13 Mar 2023 16:24:15 -0500 Subject: [PATCH 074/231] Add concurrency option to cancel in-progress jobs (#2539) --- .github/workflows/main.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 07104f40efe..35c0d6430b7 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -14,7 +14,12 @@ on: - 'ACKNOWLEDGEMENTS' - 'COPYING**' - '**.md' - + +# Using concurrency to cancel any in-progress job or run +concurrency: + group: ${{ github.ref }} + cancel-in-progress: true + # A workflow run is made up of one or more jobs that can run sequentially or # in parallel. We just have one job, but the matrix items defined below will # run in parallel. From f44d66f9e4dac6511f05f939e586babc5cc6fab7 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Mon, 13 Mar 2023 16:24:49 -0500 Subject: [PATCH 075/231] Make sure that ONLY_SHARED_LIBS option is correctly set (#2544) --- CMakeLists.txt | 28 ++++++++++++---------- c++/src/CMakeLists.txt | 8 +++---- fortran/examples/CMakeLists.txt | 4 ++-- fortran/src/CMakeLists.txt | 24 +++++++++---------- hl/c++/src/CMakeLists.txt | 8 +++---- hl/fortran/src/CMakeLists.txt | 18 +++++++------- hl/src/CMakeLists.txt | 8 +++---- hl/test/CMakeLists.txt | 2 +- hl/tools/gif2h5/CMakeLists.txt | 10 ++++---- hl/tools/h5watch/CMakeLists.txt | 8 +++---- src/CMakeLists.txt | 8 +++---- test/CMakeLists.txt | 4 ++-- test/CMakeTests.cmake | 2 +- tools/lib/CMakeLists.txt | 6 ++--- tools/libtest/CMakeLists.txt | 2 +- tools/src/h5copy/CMakeLists.txt | 4 ++-- tools/src/h5diff/CMakeLists.txt | 6 ++--- tools/src/h5dump/CMakeLists.txt | 4 ++-- tools/src/h5format_convert/CMakeLists.txt | 4 ++-- tools/src/h5import/CMakeLists.txt | 4 ++-- tools/src/h5jam/CMakeLists.txt | 4 ++-- tools/src/h5ls/CMakeLists.txt | 4 ++-- tools/src/h5perf/CMakeLists.txt | 4 ++-- tools/src/h5repack/CMakeLists.txt | 4 ++-- tools/src/h5stat/CMakeLists.txt | 4 ++-- tools/src/misc/CMakeLists.txt | 4 ++-- tools/test/h5copy/CMakeLists.txt | 2 +- tools/test/h5diff/CMakeLists.txt | 2 +- tools/test/h5dump/CMakeLists.txt | 2 +- tools/test/h5format_convert/CMakeLists.txt | 4 ++-- tools/test/h5import/CMakeLists.txt | 2 +- tools/test/h5jam/CMakeLists.txt | 6 ++--- tools/test/h5repack/CMakeLists.txt | 6 ++--- tools/test/h5stat/CMakeLists.txt | 2 +- tools/test/misc/CMakeLists.txt | 24 ++++++------------- tools/test/misc/vds/CMakeLists.txt | 2 +- utils/tools/h5dwalk/CMakeLists.txt | 4 ++-- 37 files changed, 117 insertions(+), 125 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 46ef959e55a..ba0e51b01ed 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -456,28 +456,30 @@ endif () #----------------------------------------------------------------------------- # Option to Build Shared and Static libs, default is both #----------------------------------------------------------------------------- +option (ONLY_SHARED_LIBS "Only Build Shared Libraries" OFF) +mark_as_advanced (ONLY_SHARED_LIBS) option (BUILD_STATIC_LIBS "Build Static Libraries" ON) set (H5_ENABLE_STATIC_LIB NO) option (BUILD_SHARED_LIBS "Build Shared Libraries" ON) set (H5_ENABLE_SHARED_LIB NO) -option (ONLY_SHARED_LIBS "Only Build Shared Libraries" OFF) -mark_as_advanced (ONLY_SHARED_LIBS) -if (BUILD_STATIC_LIBS) - set (H5_ENABLE_STATIC_LIB YES) -endif () -if (BUILD_SHARED_LIBS) - set (H5_ENABLE_SHARED_LIB YES) -endif () - -# Force only shared libraries if all OFF -if (NOT BUILD_STATIC_LIBS AND NOT BUILD_SHARED_LIBS) +# only shared libraries is true if user forces static OFF +if (NOT BUILD_STATIC_LIBS) set (ONLY_SHARED_LIBS ON CACHE BOOL "Only Build Shared Libraries" FORCE) endif () +# only shared libraries is set ON by user then force settings if (ONLY_SHARED_LIBS) set (H5_ENABLE_STATIC_LIB NO) - set (BUILD_SHARED_LIBS ON CACHE BOOL "Build Shared Libraries") + set (BUILD_SHARED_LIBS ON CACHE BOOL "Build Shared Libraries" FORCE) + set (BUILD_STATIC_LIBS OFF CACHE BOOL "Build Static Libraries" FORCE) +endif () + +if (BUILD_STATIC_LIBS) + set (H5_ENABLE_STATIC_LIB YES) +endif () +if (BUILD_SHARED_LIBS) + set (H5_ENABLE_SHARED_LIB YES) endif () set (CMAKE_POSITION_INDEPENDENT_CODE ON) @@ -911,7 +913,7 @@ add_subdirectory (src) if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") if ((ZLIB_FOUND AND ZLIB_USE_EXTERNAL) OR (SZIP_FOUND AND SZIP_USE_EXTERNAL)) - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) add_dependencies (${HDF5_LIB_TARGET} ${LINK_COMP_LIBS}) endif () if (BUILD_SHARED_LIBS) diff --git a/c++/src/CMakeLists.txt b/c++/src/CMakeLists.txt index 23c6f01fb2a..ab1baed3f80 100644 --- a/c++/src/CMakeLists.txt +++ b/c++/src/CMakeLists.txt @@ -77,7 +77,7 @@ set (CPP_HDRS ${HDF5_CPP_SRC_SOURCE_DIR}/H5VarLenType.h ) -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) add_library (${HDF5_CPP_LIB_TARGET} STATIC ${CPP_SOURCES} ${CPP_HDRS}) target_include_directories (${HDF5_CPP_LIB_TARGET} PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>" @@ -125,7 +125,7 @@ endif () # Add Target to clang-format #----------------------------------------------------------------------------- if (HDF5_ENABLE_FORMATTERS) - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) clang_format (HDF5_CPP_SRC_FORMAT ${HDF5_CPP_LIB_TARGET}) else () clang_format (HDF5_CPP_SRC_FORMAT ${HDF5_CPP_LIBSH_TARGET}) @@ -151,7 +151,7 @@ if (HDF5_EXPORTED_TARGETS) if (BUILD_SHARED_LIBS) INSTALL_TARGET_PDB (${HDF5_CPP_LIBSH_TARGET} ${HDF5_INSTALL_BIN_DIR} cpplibraries) endif () - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) INSTALL_TARGET_PDB (${HDF5_CPP_LIB_TARGET} ${HDF5_INSTALL_LIB_DIR} cpplibraries) endif () @@ -184,7 +184,7 @@ endif () set (_PKG_CONFIG_LIBS_PRIVATE) -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) set (_PKG_CONFIG_LIBS "${_PKG_CONFIG_LIBS} -l${PKG_CONFIG_LIBNAME}") endif () if (BUILD_SHARED_LIBS) diff --git a/fortran/examples/CMakeLists.txt b/fortran/examples/CMakeLists.txt index 6f1b8253c8b..e7edf40bae5 100644 --- a/fortran/examples/CMakeLists.txt +++ b/fortran/examples/CMakeLists.txt @@ -82,7 +82,7 @@ foreach (example ${F2003_examples}) if(MSVC) set_property(TARGET f03_ex_${example} PROPERTY LINK_FLAGS "/SUBSYSTEM:CONSOLE ${WIN_LINK_FLAGS}") endif() - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) target_include_directories (f03_ex_${example} PRIVATE "${CMAKE_Fortran_MODULE_DIRECTORY}/static" @@ -123,7 +123,7 @@ if (H5_HAVE_PARALLEL AND MPI_Fortran_FOUND) if(MSVC) set_property(TARGET f90_ex_ph5example PROPERTY LINK_FLAGS "/SUBSYSTEM:CONSOLE ${WIN_LINK_FLAGS}") endif() - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) target_include_directories (f90_ex_ph5example PRIVATE "${CMAKE_Fortran_MODULE_DIRECTORY}/static" diff --git a/fortran/src/CMakeLists.txt b/fortran/src/CMakeLists.txt index 35b47a53c18..c93e9438924 100644 --- a/fortran/src/CMakeLists.txt +++ b/fortran/src/CMakeLists.txt @@ -64,7 +64,7 @@ if (BUILD_SHARED_LIBS) file (MAKE_DIRECTORY "${HDF5_F90_BINARY_DIR}/shared") set (MODSH_BUILD_DIR ${CMAKE_Fortran_MODULE_DIRECTORY}/shared/${HDF_CFG_BUILD_TYPE}) endif () -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) file (MAKE_DIRECTORY "${HDF5_F90_BINARY_DIR}/static") set (MOD_BUILD_DIR ${CMAKE_Fortran_MODULE_DIRECTORY}/static/${HDF_CFG_BUILD_TYPE}) endif () @@ -83,7 +83,7 @@ add_custom_command (TARGET H5match_types POST_BUILD DEPENDS H5match_types ) -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) add_custom_command (TARGET H5match_types POST_BUILD BYPRODUCTS ${HDF5_F90_BINARY_DIR}/static/H5f90i_gen.h ${HDF5_F90_BINARY_DIR}/static/H5fortran_types.F90 COMMAND ${CMAKE_COMMAND} @@ -139,7 +139,7 @@ set (f90CStub_C_HDRS ${HDF5_F90_SRC_SOURCE_DIR}/H5f90proto.h ) -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) set (f90CStub_CGEN_HDRS # generated files ${HDF5_F90_BINARY_DIR}/static/H5f90i_gen.h @@ -159,7 +159,7 @@ if (BUILD_SHARED_LIBS) ) endif () -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) add_library (${HDF5_F90_C_LIB_TARGET} STATIC ${f90CStub_C_SOURCES} ${f90CStub_C_HDRS} ${f90CStub_CGEN_HDRS}) target_include_directories (${HDF5_F90_C_LIB_TARGET} PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};${HDF5_F90_BINARY_DIR};${HDF5_F90_BINARY_DIR}/static;$<$:${MPI_C_INCLUDE_DIRS}>" @@ -202,7 +202,7 @@ endif () # Add Target to clang-format #----------------------------------------------------------------------------- if (HDF5_ENABLE_FORMATTERS) - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) clang_format (HDF5_F90_C_SRC_FORMAT ${HDF5_F90_C_LIB_TARGET} ${HDF5_F90_SRC_SOURCE_DIR}/H5match_types.c @@ -236,7 +236,7 @@ add_custom_command (TARGET H5_buildiface POST_BUILD DEPENDS H5_buildiface ${f90_F_GEN_SOURCES} COMMENT "Generating the H5_gen.F90 file" ) -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) add_custom_command (TARGET H5_buildiface POST_BUILD BYPRODUCTS ${HDF5_F90_BINARY_DIR}/static/H5_gen.F90 COMMAND ${CMAKE_COMMAND} @@ -287,7 +287,7 @@ set (f90_F_BASE_SOURCES ${HDF5_F90_SRC_SOURCE_DIR}/H5Zff.F90 ) -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) set (f90_F_SOURCES # generated file ${HDF5_F90_BINARY_DIR}/static/H5fortran_types.F90 @@ -319,7 +319,7 @@ endif () #----------------------------------------------------------------------------- # Add Main fortran library #----------------------------------------------------------------------------- -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) add_library (${HDF5_F90_LIB_TARGET} STATIC ${f90_F_SOURCES}) target_include_directories (${HDF5_F90_LIB_TARGET} PRIVATE "${HDF5_F90_SRC_SOURCE_DIR};${CMAKE_Fortran_MODULE_DIRECTORY}/static;${HDF5_F90_BINARY_DIR};$<$:${MPI_Fortran_INCLUDE_DIRS}>" @@ -401,7 +401,7 @@ install ( COMPONENT fortheaders ) -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) install ( FILES ${HDF5_F90_BINARY_DIR}/static/H5f90i_gen.h @@ -423,7 +423,7 @@ else () ) endif () -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) set (mod_files ${MOD_BUILD_DIR}/h5fortran_types.mod ${MOD_BUILD_DIR}/hdf5.mod @@ -517,7 +517,7 @@ if (HDF5_EXPORTED_TARGETS) INSTALL_TARGET_PDB (${HDF5_F90_C_LIBSH_TARGET} ${HDF5_INSTALL_BIN_DIR} fortlibraries) #INSTALL_TARGET_PDB (${HDF5_F90_LIBSH_TARGET} ${HDF5_INSTALL_BIN_DIR} fortlibraries) endif () - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) INSTALL_TARGET_PDB (${HDF5_F90_C_LIB_TARGET} ${HDF5_INSTALL_LIB_DIR} fortlibraries) #INSTALL_TARGET_PDB (${HDF5_F90_LIB_TARGET} ${HDF5_INSTALL_LIB_DIR} fortlibraries) endif () @@ -552,7 +552,7 @@ endif () set (_PKG_CONFIG_LIBS_PRIVATE) -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) set (_PKG_CONFIG_LIBS "${_PKG_CONFIG_LIBS} -l${PKG_CONFIG_LIBNAME}") endif () if (BUILD_SHARED_LIBS) diff --git a/hl/c++/src/CMakeLists.txt b/hl/c++/src/CMakeLists.txt index 5e1db579a94..c6e43c06973 100644 --- a/hl/c++/src/CMakeLists.txt +++ b/hl/c++/src/CMakeLists.txt @@ -8,7 +8,7 @@ project (HDF5_HL_CPP_SRC CXX) set (HDF5_HL_CPP_SOURCES ${HDF5_HL_CPP_SRC_SOURCE_DIR}/H5PacketTable.cpp) set (HDF5_HL_CPP_HDRS ${HDF5_HL_CPP_SRC_SOURCE_DIR}/H5PacketTable.h) -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) add_library (${HDF5_HL_CPP_LIB_TARGET} STATIC ${HDF5_HL_CPP_SOURCES} ${HDF5_HL_CPP_HDRS}) target_include_directories (${HDF5_HL_CPP_LIB_TARGET} PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>" @@ -43,7 +43,7 @@ endif () # Add Target to clang-format #----------------------------------------------------------------------------- if (HDF5_ENABLE_FORMATTERS) - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) clang_format (HDF5_HL_CPP_SRC_FORMAT ${HDF5_HL_CPP_LIB_TARGET}) else () clang_format (HDF5_HL_CPP_SRC_FORMAT ${HDF5_HL_CPP_LIBSH_TARGET}) @@ -69,7 +69,7 @@ if (HDF5_EXPORTED_TARGETS) if (BUILD_SHARED_LIBS) INSTALL_TARGET_PDB (${HDF5_HL_CPP_LIBSH_TARGET} ${HDF5_INSTALL_BIN_DIR} hlcpplibraries) endif () - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) INSTALL_TARGET_PDB (${HDF5_HL_CPP_LIB_TARGET} ${HDF5_INSTALL_LIB_DIR} hlcpplibraries) endif () @@ -102,7 +102,7 @@ endif () set (_PKG_CONFIG_LIBS_PRIVATE) -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) set (_PKG_CONFIG_LIBS "${_PKG_CONFIG_LIBS} -l${PKG_CONFIG_LIBNAME}") endif () if (BUILD_SHARED_LIBS) diff --git a/hl/fortran/src/CMakeLists.txt b/hl/fortran/src/CMakeLists.txt index e999751f5e2..705a37a1bb1 100644 --- a/hl/fortran/src/CMakeLists.txt +++ b/hl/fortran/src/CMakeLists.txt @@ -37,7 +37,7 @@ if (BUILD_SHARED_LIBS) file (MAKE_DIRECTORY "${HDF5_HL_F90_BINARY_DIR}/shared") set (MODSH_BUILD_DIR ${CMAKE_Fortran_MODULE_DIRECTORY}/shared/${HDF_CFG_BUILD_TYPE}) endif () -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) file (MAKE_DIRECTORY "${HDF5_HL_F90_BINARY_DIR}/static") set (MOD_BUILD_DIR ${CMAKE_Fortran_MODULE_DIRECTORY}/static/${HDF_CFG_BUILD_TYPE}) endif () @@ -56,7 +56,7 @@ set_source_files_properties (${HDF5_HL_F90_C_SOURCES} PROPERTIES LANGUAGE C) set (HDF5_HL_F90_HEADERS ${HDF5_HL_F90_SRC_SOURCE_DIR}/H5LTf90proto.h ${HDF5_HL_F90_SRC_SOURCE_DIR}/H5IMcc.h) -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) add_library (${HDF5_HL_F90_C_LIB_TARGET} STATIC ${HDF5_HL_F90_C_SOURCES} ${HDF5_HL_F90_HEADERS}) target_include_directories (${HDF5_HL_F90_C_LIB_TARGET} PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};${HDF5_F90_BINARY_DIR}/static;$<$:${MPI_C_INCLUDE_DIRS}>" @@ -96,7 +96,7 @@ endif () # Add Target to clang-format #----------------------------------------------------------------------------- if (HDF5_ENABLE_FORMATTERS) - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) clang_format (HDF5_HL_F90_C_SRC_FORMAT ${HDF5_HL_F90_C_LIB_TARGET}) else () clang_format (HDF5_HL_F90_C_SRC_FORMAT ${HDF5_HL_F90_C_LIBSH_TARGET}) @@ -113,7 +113,7 @@ set (HDF5_HL_F90_F_BASE_SOURCES ${HDF5_HL_F90_SRC_SOURCE_DIR}/H5IMff.F90 ) -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) add_custom_command (TARGET H5HL_buildiface POST_BUILD BYPRODUCTS ${HDF5_HL_F90_BINARY_DIR}/static/H5LTff_gen.F90 ${HDF5_HL_F90_BINARY_DIR}/static/H5TBff_gen.F90 COMMAND ${CMAKE_CROSSCOMPILING_EMULATOR} $ @@ -148,7 +148,7 @@ if (BUILD_SHARED_LIBS) ) endif () -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) set (HDF5_HL_F90_F_SOURCES ${HDF5_HL_F90_F_BASE_SOURCES} @@ -169,7 +169,7 @@ if (BUILD_SHARED_LIBS) set_source_files_properties (${HDF5_HL_F90_F_SOURCES_SHARED} PROPERTIES LANGUAGE Fortran) endif () -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) add_library (${HDF5_HL_F90_LIB_TARGET} STATIC ${HDF5_HL_F90_F_SOURCES}) target_include_directories (${HDF5_HL_F90_LIB_TARGET} PRIVATE "${HDF5_F90_BINARY_DIR};${CMAKE_Fortran_MODULE_DIRECTORY}/static;$<$:${MPI_Fortran_INCLUDE_DIRS}>" @@ -236,7 +236,7 @@ endif () #----------------------------------------------------------------------------- -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) set (mod_files ${MOD_BUILD_DIR}/h5ds.mod ${MOD_BUILD_DIR}/h5tb.mod @@ -301,7 +301,7 @@ if (HDF5_EXPORTED_TARGETS) INSTALL_TARGET_PDB (${HDF5_HL_F90_C_LIBSH_TARGET} ${HDF5_INSTALL_BIN_DIR} hlfortlibraries) #INSTALL_TARGET_PDB (${HDF5_HL_F90_LIBSH_TARGET} ${HDF5_INSTALL_BIN_DIR} hlfortlibraries) endif () - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) INSTALL_TARGET_PDB (${HDF5_HL_F90_C_LIB_TARGET} ${HDF5_INSTALL_LIB_DIR} hlfortlibraries) #INSTALL_TARGET_PDB (${HDF5_HL_F90_LIB_TARGET} ${HDF5_INSTALL_LIB_DIR} hlfortlibraries) endif () @@ -336,7 +336,7 @@ endif () set (_PKG_CONFIG_LIBS_PRIVATE) -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) set (_PKG_CONFIG_LIBS "${_PKG_CONFIG_LIBS} -l${PKG_CONFIG_LIBNAME}") endif () if (BUILD_SHARED_LIBS) diff --git a/hl/src/CMakeLists.txt b/hl/src/CMakeLists.txt index 2155f930674..71fd6e60a65 100644 --- a/hl/src/CMakeLists.txt +++ b/hl/src/CMakeLists.txt @@ -40,7 +40,7 @@ set (HL_PRIVATE_HEADERS ${HDF5_HL_SRC_SOURCE_DIR}/H5LDprivate.h ) -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) add_library (${HDF5_HL_LIB_TARGET} STATIC ${HL_SOURCES} ${HL_HEADERS} ${HL_PRIVATE_HEADERS}) target_include_directories (${HDF5_HL_LIB_TARGET} PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>" @@ -75,7 +75,7 @@ endif () # Add Target to clang-format #----------------------------------------------------------------------------- if (HDF5_ENABLE_FORMATTERS) - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) clang_format (HDF5_HL_SRC_FORMAT ${HDF5_HL_LIB_TARGET}) else () clang_format (HDF5_HL_SRC_FORMAT ${HDF5_HL_LIBSH_TARGET}) @@ -101,7 +101,7 @@ if (HDF5_EXPORTED_TARGETS) if (BUILD_SHARED_LIBS) INSTALL_TARGET_PDB (${HDF5_HL_LIBSH_TARGET} ${HDF5_INSTALL_BIN_DIR} hllibraries) endif () - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) INSTALL_TARGET_PDB (${HDF5_HL_LIB_TARGET} ${HDF5_INSTALL_LIB_DIR} hllibraries) endif () @@ -134,7 +134,7 @@ endif () set (_PKG_CONFIG_LIBS_PRIVATE) -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) set (_PKG_CONFIG_LIBS "${_PKG_CONFIG_LIBS} -l${PKG_CONFIG_LIBNAME}") endif () if (BUILD_SHARED_LIBS) diff --git a/hl/test/CMakeLists.txt b/hl/test/CMakeLists.txt index 9674457d0cb..a5615c0d2cb 100644 --- a/hl/test/CMakeLists.txt +++ b/hl/test/CMakeLists.txt @@ -87,7 +87,7 @@ endif () # This executable is used to generate test files for the test_ds test. # It should only be run during development when new test files are needed # -------------------------------------------------------------------- -if (HDF5_BUILD_GENERATORS AND NOT ONLY_SHARED_LIBS) +if (HDF5_BUILD_GENERATORS AND BUILD_STATIC_LIBS) add_executable (hl_gen_test_ds gen_test_ds.c) target_compile_options(hl_gen_test_ds PRIVATE "${HDF5_CMAKE_C_FLAGS}") target_include_directories (hl_gen_test_ds PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") diff --git a/hl/tools/gif2h5/CMakeLists.txt b/hl/tools/gif2h5/CMakeLists.txt index 3a9c708c749..f3b1fef0567 100644 --- a/hl/tools/gif2h5/CMakeLists.txt +++ b/hl/tools/gif2h5/CMakeLists.txt @@ -14,7 +14,7 @@ set (GIF2H5_SOURCES ) #-- Add gif2hdf5 program -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) add_executable (gif2h5 ${GIF2H5_SOURCES}) target_compile_options(gif2h5 PRIVATE "${HDF5_CMAKE_C_FLAGS}") target_include_directories (gif2h5 PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") @@ -44,7 +44,7 @@ endif () # Add Target to clang-format #----------------------------------------------------------------------------- if (HDF5_ENABLE_FORMATTERS) - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) clang_format (HDF5_HL_TOOLS_GIF2H5_FORMAT gif2h5) else () clang_format (HDF5_HL_TOOLS_GIF2H5_FORMAT gif2h5-shared) @@ -56,7 +56,7 @@ set (hdf2gif_SOURCES ${HDF5_HL_TOOLS_GIF2H5_SOURCE_DIR}/hdf2gif.c ${HDF5_HL_TOOLS_GIF2H5_SOURCE_DIR}/hdfgifwr.c ) -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) add_executable (h52gif ${hdf2gif_SOURCES}) target_compile_options(h52gif PRIVATE "${HDF5_CMAKE_C_FLAGS}") target_include_directories (h52gif PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") @@ -86,7 +86,7 @@ endif () # Add Target to clang-format #----------------------------------------------------------------------------- if (HDF5_ENABLE_FORMATTERS) - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) clang_format (HDF5_HL_TOOLS_H52GIF_FORMAT h52gif) else () clang_format (HDF5_HL_TOOLS_H52GIF_FORMAT h52gif-shared) @@ -99,7 +99,7 @@ if (BUILD_TESTING AND HDF5_TEST_SERIAL) # used in the CMake Build system as we rely on the test files that are # shipped with HDF5 source archives # -------------------------------------------------------------------- - if (HDF5_BUILD_GENERATORS AND NOT ONLY_SHARED_LIBS) + if (HDF5_BUILD_GENERATORS AND BUILD_STATIC_LIBS) add_executable (hl_h52gifgentest ${HDF5_HL_TOOLS_GIF2H5_SOURCE_DIR}/h52gifgentst.c) target_include_directories (hl_h52gifgentest PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") TARGET_C_PROPERTIES (hl_h52gifgentest STATIC) diff --git a/hl/tools/h5watch/CMakeLists.txt b/hl/tools/h5watch/CMakeLists.txt index fc4c3a0d7aa..1eb361898b3 100644 --- a/hl/tools/h5watch/CMakeLists.txt +++ b/hl/tools/h5watch/CMakeLists.txt @@ -9,7 +9,7 @@ set (H5WATCH_SOURCES ) #-- Add h5watch program -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) add_executable (h5watch ${H5WATCH_SOURCES}) target_compile_options(h5watch PRIVATE "${HDF5_CMAKE_C_FLAGS}") target_include_directories (h5watch PRIVATE "${HDF5_TOOLS_DIR}/lib;${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") @@ -32,7 +32,7 @@ endif () # Add Target to clang-format #----------------------------------------------------------------------------- if (HDF5_ENABLE_FORMATTERS) - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) clang_format (HDF5_HL_TOOLS_H5WATCH_FORMAT h5watch) else () clang_format (HDF5_HL_TOOLS_H5WATCH_FORMAT h5watch-shared) @@ -47,7 +47,7 @@ if (BUILD_TESTING AND HDF5_TEST_SWMR AND HDF5_TEST_SERIAL) add_executable (extend_dset ${extend_dset_SOURCES}) target_compile_options(extend_dset PRIVATE "${HDF5_CMAKE_C_FLAGS}") target_include_directories (extend_dset PRIVATE "${HDF5_HL_SRC_DIR}/test;${HDF5_HL_SRC_DIR}/src;${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) TARGET_C_PROPERTIES (extend_dset STATIC) target_link_libraries (extend_dset PRIVATE ${HDF5_HL_LIB_TARGET} ${HDF5_TEST_LIB_TARGET} ${HDF5_LIB_TARGET} ${HDF5_TOOLS_LIB_TARGET}) else () @@ -66,7 +66,7 @@ if (BUILD_TESTING AND HDF5_TEST_SWMR AND HDF5_TEST_SERIAL) add_executable (h5watchgentest ${HDF5_HL_TOOLS_H5WATCH_SOURCE_DIR}/h5watchgentest.c) target_compile_options(h5watchgentest PRIVATE "${HDF5_CMAKE_C_FLAGS}") target_include_directories (h5watchgentest PRIVATE "${HDF5_HL_SRC_DIR}/src;${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) TARGET_C_PROPERTIES (h5watchgentest STATIC) target_link_libraries (h5watchgentest PRIVATE ${HDF5_HL_LIB_TARGET} ${HDF5_LIB_TARGET}) else () diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 585367210e9..e002e4f98ae 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1220,7 +1220,7 @@ endif () #----------------------------------------------------------------------------- # Add H5Tinit source to build - generated by H5detect/CMake at configure time #----------------------------------------------------------------------------- -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) set (gen_SRCS ${HDF5_GENERATED_SOURCE_DIR}/H5Tinit.c ${HDF5_SRC_BINARY_DIR}/H5lib_settings.c) add_custom_target (gen_${HDF5_LIB_TARGET} ALL DEPENDS ${lib_prog_deps} ${HDF5_GENERATED_SOURCE_DIR}/gen_SRCS.stamp1 ${HDF5_SRC_BINARY_DIR}/gen_SRCS.stamp2 @@ -1303,7 +1303,7 @@ endif () # Add Target to clang-format #----------------------------------------------------------------------------- if (HDF5_ENABLE_FORMATTERS) - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) clang_format (HDF5_SRC_FORMAT ${HDF5_LIB_TARGET}) else () clang_format (HDF5_SRC_FORMAT ${HDF5_LIBSH_TARGET}) @@ -1333,7 +1333,7 @@ if (HDF5_EXPORTED_TARGETS) if (BUILD_SHARED_LIBS) INSTALL_TARGET_PDB (${HDF5_LIBSH_TARGET} ${HDF5_INSTALL_BIN_DIR} libraries) endif () - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) INSTALL_TARGET_PDB (${HDF5_LIB_TARGET} ${HDF5_INSTALL_LIB_DIR} libraries) endif () @@ -1375,7 +1375,7 @@ foreach (libs ${LINK_COMP_LIBS}) set (_PKG_CONFIG_LIBS_PRIVATE "${_PKG_CONFIG_LIBS_PRIVATE} -l${libs}") endforeach () -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) set (_PKG_CONFIG_LIBS "${_PKG_CONFIG_LIBS} -l${PKGCONFIG_LIBNAME}") endif () if (BUILD_SHARED_LIBS) diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index a9412f0894a..da98f15a020 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -27,7 +27,7 @@ set (TEST_LIB_HEADERS ${HDF5_TEST_SOURCE_DIR}/swmr_common.h ) -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) add_library (${HDF5_TEST_LIB_TARGET} STATIC ${TEST_LIB_SOURCES} ${TEST_LIB_HEADERS}) target_include_directories (${HDF5_TEST_LIB_TARGET} PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};${HDF5_TEST_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>" @@ -81,7 +81,7 @@ endif () # Add Target to clang-format #----------------------------------------------------------------------------- if (HDF5_ENABLE_FORMATTERS) - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) clang_format (HDF5_TEST_SRC_FORMAT ${HDF5_TEST_LIB_TARGET}) else () clang_format (HDF5_TEST_SRC_FORMAT ${HDF5_TEST_LIBSH_TARGET}) diff --git a/test/CMakeTests.cmake b/test/CMakeTests.cmake index 508619430cc..fa38a76de5d 100644 --- a/test/CMakeTests.cmake +++ b/test/CMakeTests.cmake @@ -925,7 +925,7 @@ endif () ############################################################################## ############################################################################## -if (HDF5_BUILD_GENERATORS AND NOT ONLY_SHARED_LIBS) +if (HDF5_BUILD_GENERATORS AND BUILD_STATIC_LIBS) macro (ADD_H5_GENERATOR genfile) add_executable (${genfile} ${HDF5_TEST_SOURCE_DIR}/${genfile}.c) target_include_directories (${genfile} PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") diff --git a/tools/lib/CMakeLists.txt b/tools/lib/CMakeLists.txt index 7d139ee68cd..a831ed52050 100644 --- a/tools/lib/CMakeLists.txt +++ b/tools/lib/CMakeLists.txt @@ -34,7 +34,7 @@ set (H5_TOOLS_LIB_HDRS ${HDF5_TOOLS_LIB_SOURCE_DIR}/io_timer.h ) -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) add_library (${HDF5_TOOLS_LIB_TARGET} STATIC ${H5_TOOLS_LIB_SOURCES} ${H5_TOOLS_LIB_HDRS}) target_include_directories (${HDF5_TOOLS_LIB_TARGET} PRIVATE "${HDF5_TOOLS_LIB_SOURCE_DIR};${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>" @@ -81,7 +81,7 @@ endif () # Add Target to clang-format #----------------------------------------------------------------------------- if (HDF5_ENABLE_FORMATTERS) - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) clang_format (HDF5_TOOLS_SRC_FORMAT ${HDF5_TOOLS_LIB_TARGET}) else () clang_format (HDF5_TOOLS_SRC_FORMAT ${HDF5_TOOLS_LIBSH_TARGET}) @@ -101,7 +101,7 @@ if (HDF5_EXPORTED_TARGETS) if (BUILD_SHARED_LIBS) INSTALL_TARGET_PDB (${HDF5_TOOLS_LIBSH_TARGET} ${HDF5_INSTALL_BIN_DIR} toolslibraries) endif () - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) INSTALL_TARGET_PDB (${HDF5_TOOLS_LIB_TARGET} ${HDF5_INSTALL_LIB_DIR} toolslibraries) endif () diff --git a/tools/libtest/CMakeLists.txt b/tools/libtest/CMakeLists.txt index 1b0983b910e..f6d8912dcbf 100644 --- a/tools/libtest/CMakeLists.txt +++ b/tools/libtest/CMakeLists.txt @@ -7,7 +7,7 @@ project (HDF5_TOOLS_LIBTEST C) add_executable (h5tools_test_utils ${HDF5_TOOLS_LIBTEST_SOURCE_DIR}/h5tools_test_utils.c) target_compile_options(h5tools_test_utils PRIVATE "${HDF5_CMAKE_C_FLAGS}") target_include_directories(h5tools_test_utils PRIVATE "${HDF5_TOOLS_DIR}/lib;${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) TARGET_C_PROPERTIES (h5tools_test_utils STATIC) target_link_libraries (h5tools_test_utils PRIVATE ${HDF5_TOOLS_LIB_TARGET} ${HDF5_LIB_TARGET} ${HDF5_TEST_LIB_TARGET}) else () diff --git a/tools/src/h5copy/CMakeLists.txt b/tools/src/h5copy/CMakeLists.txt index 64acf7ef502..efd38cdd73b 100644 --- a/tools/src/h5copy/CMakeLists.txt +++ b/tools/src/h5copy/CMakeLists.txt @@ -4,7 +4,7 @@ project (HDF5_TOOLS_SRC_H5COPY C) # -------------------------------------------------------------------- # Add the h5copy and test executables # -------------------------------------------------------------------- -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) add_executable (h5copy ${HDF5_TOOLS_SRC_H5COPY_SOURCE_DIR}/h5copy.c) target_include_directories (h5copy PRIVATE "${HDF5_TOOLS_DIR}/lib;${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") target_compile_options(h5copy PRIVATE "${HDF5_CMAKE_C_FLAGS}") @@ -32,7 +32,7 @@ endif () # Add Target to clang-format #----------------------------------------------------------------------------- if (HDF5_ENABLE_FORMATTERS) - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) clang_format (HDF5_H5COPY_SRC_FORMAT h5copy) else () clang_format (HDF5_H5COPY_SRC_FORMAT h5copy-shared) diff --git a/tools/src/h5diff/CMakeLists.txt b/tools/src/h5diff/CMakeLists.txt index 44921cbdd59..f01d1aa5c7d 100644 --- a/tools/src/h5diff/CMakeLists.txt +++ b/tools/src/h5diff/CMakeLists.txt @@ -4,7 +4,7 @@ project (HDF5_TOOLS_SRC_H5DIFF C) # -------------------------------------------------------------------- # Add the h5diff executables # -------------------------------------------------------------------- -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) add_executable (h5diff ${HDF5_TOOLS_SRC_H5DIFF_SOURCE_DIR}/h5diff_common.c ${HDF5_TOOLS_SRC_H5DIFF_SOURCE_DIR}/h5diff_main.c @@ -41,7 +41,7 @@ endif () # Add Target to clang-format #----------------------------------------------------------------------------- if (HDF5_ENABLE_FORMATTERS) - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) clang_format (HDF5_H5DIFF_SRC_FORMAT h5diff) else () clang_format (HDF5_H5DIFF_SRC_FORMAT h5diff-shared) @@ -49,7 +49,7 @@ if (HDF5_ENABLE_FORMATTERS) endif () if (H5_HAVE_PARALLEL) - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) add_executable (ph5diff ${HDF5_TOOLS_SRC_H5DIFF_SOURCE_DIR}/h5diff_common.c ${HDF5_TOOLS_SRC_H5DIFF_SOURCE_DIR}/ph5diff_main.c diff --git a/tools/src/h5dump/CMakeLists.txt b/tools/src/h5dump/CMakeLists.txt index df521a9bb0f..f382020e736 100644 --- a/tools/src/h5dump/CMakeLists.txt +++ b/tools/src/h5dump/CMakeLists.txt @@ -4,7 +4,7 @@ project (HDF5_TOOLS_SRC_H5DUMP C) # -------------------------------------------------------------------- # Add the h5dump executables # -------------------------------------------------------------------- -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) add_executable (h5dump ${HDF5_TOOLS_SRC_H5DUMP_SOURCE_DIR}/h5dump.c ${HDF5_TOOLS_SRC_H5DUMP_SOURCE_DIR}/h5dump_ddl.c @@ -50,7 +50,7 @@ endif () # Add Target to clang-format #----------------------------------------------------------------------------- if (HDF5_ENABLE_FORMATTERS) - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) clang_format (HDF5_H5DUMP_SRC_FORMAT h5dump) else () clang_format (HDF5_H5DUMP_SRC_FORMAT h5dump-shared) diff --git a/tools/src/h5format_convert/CMakeLists.txt b/tools/src/h5format_convert/CMakeLists.txt index c4e412b7d67..d1e21582d0f 100644 --- a/tools/src/h5format_convert/CMakeLists.txt +++ b/tools/src/h5format_convert/CMakeLists.txt @@ -4,7 +4,7 @@ project (HDF5_TOOLS_SRC_H5FC C) # -------------------------------------------------------------------- # Add the h5format_convert executables # -------------------------------------------------------------------- -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) add_executable (h5format_convert ${HDF5_TOOLS_SRC_H5FC_SOURCE_DIR}/h5format_convert.c) target_include_directories (h5format_convert PRIVATE "${HDF5_TOOLS_DIR}/lib;${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") target_compile_options(h5format_convert PRIVATE "${HDF5_CMAKE_C_FLAGS}") @@ -31,7 +31,7 @@ endif () # Add Target to clang-format #----------------------------------------------------------------------------- if (HDF5_ENABLE_FORMATTERS) - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) clang_format (HDF5_H5FORMAT_SRC_FORMAT h5format_convert) else () clang_format (HDF5_H5FORMAT_SRC_FORMAT h5format_convert-shared) diff --git a/tools/src/h5import/CMakeLists.txt b/tools/src/h5import/CMakeLists.txt index 472a8948418..b2337cd51d0 100644 --- a/tools/src/h5import/CMakeLists.txt +++ b/tools/src/h5import/CMakeLists.txt @@ -4,7 +4,7 @@ project (HDF5_TOOLS_SRC_H5IMPORT C) # -------------------------------------------------------------------- # Add the h5import executables # -------------------------------------------------------------------- -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) add_executable (h5import ${HDF5_TOOLS_SRC_H5IMPORT_SOURCE_DIR}/h5import.c ${HDF5_TOOLS_SRC_H5IMPORT_SOURCE_DIR}/h5import.h) target_include_directories (h5import PRIVATE "${HDF5_TOOLS_DIR}/lib;${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") TARGET_C_PROPERTIES (h5import STATIC) @@ -32,7 +32,7 @@ endif () # Add Target to clang-format #----------------------------------------------------------------------------- if (HDF5_ENABLE_FORMATTERS) - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) clang_format (HDF5_H5IMPORT_SRC_FORMAT h5import) else () clang_format (HDF5_H5IMPORT_SRC_FORMAT h5import-shared) diff --git a/tools/src/h5jam/CMakeLists.txt b/tools/src/h5jam/CMakeLists.txt index b037f809b1e..8642d6f7999 100644 --- a/tools/src/h5jam/CMakeLists.txt +++ b/tools/src/h5jam/CMakeLists.txt @@ -4,7 +4,7 @@ project (HDF5_TOOLS_SRC_H5JAM C) # -------------------------------------------------------------------- # Add the h5jam executables # -------------------------------------------------------------------- -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) add_executable (h5jam ${HDF5_TOOLS_SRC_H5JAM_SOURCE_DIR}/h5jam.c) target_include_directories (h5jam PRIVATE "${HDF5_TOOLS_DIR}/lib;${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") TARGET_C_PROPERTIES (h5jam STATIC) @@ -50,7 +50,7 @@ endif () # Add Target to clang-format #----------------------------------------------------------------------------- if (HDF5_ENABLE_FORMATTERS) - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) clang_format (HDF5_H5JAM_SRC_FORMAT h5jam) clang_format (HDF5_H5UNJAM_SRC_FORMAT h5unjam) else () diff --git a/tools/src/h5ls/CMakeLists.txt b/tools/src/h5ls/CMakeLists.txt index 8805448dc71..94992538a46 100644 --- a/tools/src/h5ls/CMakeLists.txt +++ b/tools/src/h5ls/CMakeLists.txt @@ -4,7 +4,7 @@ project (HDF5_TOOLS_SRC_H5LS C) #----------------------------------------------------------------------------- # Add the h5ls executable #----------------------------------------------------------------------------- -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) add_executable (h5ls ${HDF5_TOOLS_SRC_H5LS_SOURCE_DIR}/h5ls.c) target_include_directories (h5ls PRIVATE "${HDF5_TOOLS_DIR}/lib;${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") target_compile_options(h5ls PRIVATE "${HDF5_CMAKE_C_FLAGS}") @@ -34,7 +34,7 @@ endif () # Add Target to clang-format #----------------------------------------------------------------------------- if (HDF5_ENABLE_FORMATTERS) - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) clang_format (HDF5_H5LS_SRC_FORMAT h5ls) else () clang_format (HDF5_H5LS_SRC_FORMAT h5ls-shared) diff --git a/tools/src/h5perf/CMakeLists.txt b/tools/src/h5perf/CMakeLists.txt index 6531ccdbae0..6ff52002bb3 100644 --- a/tools/src/h5perf/CMakeLists.txt +++ b/tools/src/h5perf/CMakeLists.txt @@ -10,7 +10,7 @@ set (h5perf_serial_SOURCES ) add_executable (h5perf_serial ${h5perf_serial_SOURCES}) target_include_directories (h5perf_serial PRIVATE "${HDF5_TEST_SRC_DIR};${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) TARGET_C_PROPERTIES (h5perf_serial STATIC) target_link_libraries (h5perf_serial PRIVATE ${HDF5_TOOLS_LIB_TARGET} ${HDF5_LIB_TARGET}) else () @@ -36,7 +36,7 @@ if (H5_HAVE_PARALLEL) ) add_executable (h5perf ${h5perf_SOURCES}) target_include_directories (h5perf PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) TARGET_C_PROPERTIES (h5perf STATIC) target_link_libraries (h5perf PRIVATE ${LINK_LIBS} ${HDF5_TOOLS_LIB_TARGET} ${HDF5_LIB_TARGET} "$<$:MPI::MPI_C>") else () diff --git a/tools/src/h5repack/CMakeLists.txt b/tools/src/h5repack/CMakeLists.txt index 637d24f5c3c..360fb0f1bd0 100644 --- a/tools/src/h5repack/CMakeLists.txt +++ b/tools/src/h5repack/CMakeLists.txt @@ -15,7 +15,7 @@ set (REPACK_COMMON_SOURCES ${HDF5_TOOLS_SRC_H5REPACK_SOURCE_DIR}/h5repack.h ) -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) add_executable (h5repack ${REPACK_COMMON_SOURCES} ${HDF5_TOOLS_SRC_H5REPACK_SOURCE_DIR}/h5repack_main.c) target_include_directories (h5repack PRIVATE "${HDF5_TOOLS_DIR}/lib;${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") target_compile_options(h5repack PRIVATE "${HDF5_CMAKE_C_FLAGS}") @@ -43,7 +43,7 @@ endif () # Add Target to clang-format #----------------------------------------------------------------------------- if (HDF5_ENABLE_FORMATTERS) - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) clang_format (HDF5_H5REPACK_SRC_FORMAT h5repack) else () clang_format (HDF5_H5REPACK_SRC_FORMAT h5repack-shared) diff --git a/tools/src/h5stat/CMakeLists.txt b/tools/src/h5stat/CMakeLists.txt index 662f9352145..c0c0b32bfab 100644 --- a/tools/src/h5stat/CMakeLists.txt +++ b/tools/src/h5stat/CMakeLists.txt @@ -4,7 +4,7 @@ project (HDF5_TOOLS_SRC_H5STAT C) # -------------------------------------------------------------------- # Add the h5stat executables # -------------------------------------------------------------------- -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) add_executable (h5stat ${HDF5_TOOLS_SRC_H5STAT_SOURCE_DIR}/h5stat.c) target_include_directories (h5stat PRIVATE "${HDF5_TOOLS_DIR}/lib;${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") target_compile_options(h5stat PRIVATE "${HDF5_CMAKE_C_FLAGS}") @@ -32,7 +32,7 @@ endif () # Add Target to clang-format #----------------------------------------------------------------------------- if (HDF5_ENABLE_FORMATTERS) - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) clang_format (HDF5_H5STAT_SRC_FORMAT h5stat) else () clang_format (HDF5_H5STAT_SRC_FORMAT h5stat-shared) diff --git a/tools/src/misc/CMakeLists.txt b/tools/src/misc/CMakeLists.txt index 054c888e54e..d2b1f51ef2c 100644 --- a/tools/src/misc/CMakeLists.txt +++ b/tools/src/misc/CMakeLists.txt @@ -5,7 +5,7 @@ project (HDF5_TOOLS_SRC_MISC C) # Add the misc executables # -------------------------------------------------------------------- #-- Misc Executables -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) add_executable (h5debug ${HDF5_TOOLS_SRC_MISC_SOURCE_DIR}/h5debug.c) target_include_directories (h5debug PRIVATE "${HDF5_TOOLS_DIR}/lib;${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") target_compile_options(h5debug PRIVATE "${HDF5_CMAKE_C_FLAGS}") @@ -108,7 +108,7 @@ endif () # Add Target to clang-format #----------------------------------------------------------------------------- if (HDF5_ENABLE_FORMATTERS) - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) clang_format (HDF5_H5DEBUG_SRC_FORMAT h5debug) clang_format (HDF5_H5REPART_SRC_FORMAT h5repart) clang_format (HDF5_H5MKGRP_SRC_FORMAT h5mkgrp) diff --git a/tools/test/h5copy/CMakeLists.txt b/tools/test/h5copy/CMakeLists.txt index e8b2df47392..0fb6da4ca30 100644 --- a/tools/test/h5copy/CMakeLists.txt +++ b/tools/test/h5copy/CMakeLists.txt @@ -4,7 +4,7 @@ project (HDF5_TOOLS_TEST_H5COPY C) # -------------------------------------------------------------------- # Add the h5copy test executables # -------------------------------------------------------------------- -if (HDF5_BUILD_GENERATORS AND NOT ONLY_SHARED_LIBS) +if (HDF5_BUILD_GENERATORS AND BUILD_STATIC_LIBS) add_executable (h5copygentest ${HDF5_TOOLS_TEST_H5COPY_SOURCE_DIR}/h5copygentest.c) target_include_directories (h5copygentest PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") TARGET_C_PROPERTIES (h5copygentest STATIC) diff --git a/tools/test/h5diff/CMakeLists.txt b/tools/test/h5diff/CMakeLists.txt index aaf12d8bfd1..5f82e9b42bc 100644 --- a/tools/test/h5diff/CMakeLists.txt +++ b/tools/test/h5diff/CMakeLists.txt @@ -4,7 +4,7 @@ project (HDF5_TOOLS_TEST_H5DIFF C) # -------------------------------------------------------------------- # Add the h5diff and test executables # -------------------------------------------------------------------- -if (HDF5_BUILD_GENERATORS AND NOT ONLY_SHARED_LIBS) +if (HDF5_BUILD_GENERATORS AND BUILD_STATIC_LIBS) add_executable (h5diffgentest ${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/h5diffgentest.c) target_include_directories (h5diffgentest PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") TARGET_C_PROPERTIES (h5diffgentest STATIC) diff --git a/tools/test/h5dump/CMakeLists.txt b/tools/test/h5dump/CMakeLists.txt index 4367fed5f8d..95f690fc11f 100644 --- a/tools/test/h5dump/CMakeLists.txt +++ b/tools/test/h5dump/CMakeLists.txt @@ -40,7 +40,7 @@ endif () # -------------------------------------------------------------------- # Add the h5dump test executable # -------------------------------------------------------------------- -if (HDF5_BUILD_GENERATORS AND NOT ONLY_SHARED_LIBS) +if (HDF5_BUILD_GENERATORS AND BUILD_STATIC_LIBS) add_executable (h5dumpgentest ${HDF5_TOOLS_TEST_H5DUMP_SOURCE_DIR}/h5dumpgentest.c) target_include_directories (h5dumpgentest PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_TEST_SRC_DIR};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") TARGET_C_PROPERTIES (h5dumpgentest STATIC) diff --git a/tools/test/h5format_convert/CMakeLists.txt b/tools/test/h5format_convert/CMakeLists.txt index 4b81e529294..853009351fe 100644 --- a/tools/test/h5format_convert/CMakeLists.txt +++ b/tools/test/h5format_convert/CMakeLists.txt @@ -6,7 +6,7 @@ project (HDF5_TOOLS_TEST_H5FC C) # -------------------------------------------------------------------- add_executable (h5fc_chk_idx ${HDF5_TOOLS_TEST_H5FC_SOURCE_DIR}/h5fc_chk_idx.c) target_include_directories (h5fc_chk_idx PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) TARGET_C_PROPERTIES (h5fc_chk_idx STATIC) target_link_libraries (h5fc_chk_idx PRIVATE ${HDF5_TOOLS_LIB_TARGET} ${HDF5_LIB_TARGET}) else () @@ -22,7 +22,7 @@ if (HDF5_ENABLE_FORMATTERS) clang_format (HDF5_TOOLS_TEST_H5FC_h5fc_chk_idx_FORMAT h5fc_chk_idx) endif () -if (HDF5_BUILD_GENERATORS AND NOT ONLY_SHARED_LIBS) +if (HDF5_BUILD_GENERATORS AND BUILD_STATIC_LIBS) add_executable (h5fc_gentest ${HDF5_TOOLS_TEST_H5FC_SOURCE_DIR}/h5fc_gentest.c) target_include_directories (h5fc_gentest PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") TARGET_C_PROPERTIES (h5fc_gentest STATIC) diff --git a/tools/test/h5import/CMakeLists.txt b/tools/test/h5import/CMakeLists.txt index c0198856aa6..4413d06c10d 100644 --- a/tools/test/h5import/CMakeLists.txt +++ b/tools/test/h5import/CMakeLists.txt @@ -6,7 +6,7 @@ project (HDF5_TOOLS_TEST_H5IMPORT C) # -------------------------------------------------------------------- add_executable (h5importtest ${HDF5_TOOLS_TEST_H5IMPORT_SOURCE_DIR}/h5importtest.c) target_include_directories (h5importtest PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) TARGET_C_PROPERTIES (h5importtest STATIC) target_link_libraries (h5importtest PRIVATE ${HDF5_TOOLS_LIB_TARGET} ${HDF5_LIB_TARGET}) else () diff --git a/tools/test/h5jam/CMakeLists.txt b/tools/test/h5jam/CMakeLists.txt index 4edf54fe559..160ecdf2914 100644 --- a/tools/test/h5jam/CMakeLists.txt +++ b/tools/test/h5jam/CMakeLists.txt @@ -4,7 +4,7 @@ project (HDF5_TOOLS_TEST_H5JAM C) # -------------------------------------------------------------------- # Add the h5jam test executables # -------------------------------------------------------------------- -if (HDF5_BUILD_GENERATORS AND NOT ONLY_SHARED_LIBS) +if (HDF5_BUILD_GENERATORS AND BUILD_STATIC_LIBS) add_executable (h5jamgentest ${HDF5_TOOLS_TEST_H5JAM_SOURCE_DIR}/h5jamgentest.c) target_include_directories (h5jamgentest PRIVATE "${HDF5_TOOLS_DIR}/lib;${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") TARGET_C_PROPERTIES (h5jamgentest STATIC) @@ -23,7 +23,7 @@ endif () add_executable (getub ${HDF5_TOOLS_TEST_H5JAM_SOURCE_DIR}/getub.c) target_include_directories (getub PRIVATE "${HDF5_TOOLS_DIR}/lib;${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) TARGET_C_PROPERTIES (getub STATIC) target_link_libraries (getub PRIVATE ${HDF5_TOOLS_LIB_TARGET} ${HDF5_LIB_TARGET}) else () @@ -34,7 +34,7 @@ set_target_properties (getub PROPERTIES FOLDER tools) add_executable (tellub ${HDF5_TOOLS_TEST_H5JAM_SOURCE_DIR}/tellub.c) target_include_directories (tellub PRIVATE "${HDF5_TOOLS_DIR}/lib;${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) TARGET_C_PROPERTIES (tellub STATIC) target_link_libraries (tellub PRIVATE ${HDF5_TOOLS_LIB_TARGET} ${HDF5_LIB_TARGET}) else () diff --git a/tools/test/h5repack/CMakeLists.txt b/tools/test/h5repack/CMakeLists.txt index 54a5f5361cc..3e7c6c20945 100644 --- a/tools/test/h5repack/CMakeLists.txt +++ b/tools/test/h5repack/CMakeLists.txt @@ -8,7 +8,7 @@ add_executable (testh5repack_detect_szip ${HDF5_TOOLS_TEST_H5REPACK_SOURCE_DIR}/ target_include_directories (testh5repack_detect_szip PRIVATE "${HDF5_TOOLS_SRC_H5REPACK_SOURCE_DIR};${HDF5_TOOLS_DIR}/lib;${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>" ) -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) TARGET_C_PROPERTIES (testh5repack_detect_szip STATIC) target_link_libraries (testh5repack_detect_szip PRIVATE ${HDF5_TOOLS_LIB_TARGET} ${HDF5_LIB_TARGET} ${HDF5_TEST_LIB_TARGET}) else () @@ -31,7 +31,7 @@ add_executable (h5repacktest ${REPACK_COMMON_SOURCES} ${HDF5_TOOLS_TEST_H5REPACK target_include_directories (h5repacktest PRIVATE "${HDF5_TOOLS_SRC_H5REPACK_SOURCE_DIR};${HDF5_TOOLS_DIR}/lib;${HDF5_SRC_INCLUDE_DIRS};${HDF5_TEST_SRC_DIR};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>" ) -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) TARGET_C_PROPERTIES (h5repacktest STATIC) target_link_libraries (h5repacktest PRIVATE ${HDF5_TOOLS_LIB_TARGET} ${HDF5_TEST_LIB_TARGET}) else () @@ -97,7 +97,7 @@ endif () # -------------------------------------------------------------------- # Add the h5repack test executable # -------------------------------------------------------------------- -if (HDF5_BUILD_GENERATORS AND NOT ONLY_SHARED_LIBS) +if (HDF5_BUILD_GENERATORS AND BUILD_STATIC_LIBS) add_executable (h5repackgentest ${HDF5_TOOLS_TEST_H5REPACK_SOURCE_DIR}/h5repackgentest.c) target_include_directories (h5repackgentest PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") TARGET_C_PROPERTIES (h5repackgentest STATIC) diff --git a/tools/test/h5stat/CMakeLists.txt b/tools/test/h5stat/CMakeLists.txt index 3883ff32324..a2dca46b0c2 100644 --- a/tools/test/h5stat/CMakeLists.txt +++ b/tools/test/h5stat/CMakeLists.txt @@ -4,7 +4,7 @@ project (HDF5_TOOLS_TEST_H5STAT C) # -------------------------------------------------------------------- # Add the h5stat test executables # -------------------------------------------------------------------- -if (HDF5_BUILD_GENERATORS AND NOT ONLY_SHARED_LIBS) +if (HDF5_BUILD_GENERATORS AND BUILD_STATIC_LIBS) add_executable (h5stat_gentest ${HDF5_TOOLS_TEST_H5STAT_SOURCE_DIR}/h5stat_gentest.c) target_include_directories (h5stat_gentest PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") TARGET_C_PROPERTIES (h5stat_gentest STATIC) diff --git a/tools/test/misc/CMakeLists.txt b/tools/test/misc/CMakeLists.txt index 6d01d998581..a4032c84efe 100644 --- a/tools/test/misc/CMakeLists.txt +++ b/tools/test/misc/CMakeLists.txt @@ -4,16 +4,11 @@ project (HDF5_TOOLS_TEST_MISC C) # -------------------------------------------------------------------- # Add the misc test executables # -------------------------------------------------------------------- -if (HDF5_BUILD_GENERATORS AND NOT ONLY_SHARED_LIBS) +if (HDF5_BUILD_GENERATORS AND BUILD_STATIC_LIBS) add_executable (h5repart_gentest ${HDF5_TOOLS_TEST_MISC_SOURCE_DIR}/h5repart_gentest.c) target_include_directories (h5repart_gentest PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") - if (NOT ONLY_SHARED_LIBS) - TARGET_C_PROPERTIES (h5repart_gentest STATIC) - target_link_libraries (h5repart_gentest PRIVATE ${HDF5_TOOLS_LIB_TARGET} ${HDF5_TEST_LIB_TARGET}) - else () - TARGET_C_PROPERTIES (h5repart_gentest SHARED) - target_link_libraries (h5repart_gentest PRIVATE ${HDF5_TOOLS_LIBSH_TARGET} ${HDF5_TEST_LIBSH_TARGET}) - endif () + TARGET_C_PROPERTIES (h5repart_gentest STATIC) + target_link_libraries (h5repart_gentest PRIVATE ${HDF5_TOOLS_LIB_TARGET} ${HDF5_TEST_LIB_TARGET}) set_target_properties (h5repart_gentest PROPERTIES FOLDER generator/tools) #add_test (NAME h5repart_gentest COMMAND ${CMAKE_CROSSCOMPILING_EMULATOR} $) @@ -26,13 +21,8 @@ if (HDF5_BUILD_GENERATORS AND NOT ONLY_SHARED_LIBS) add_executable (h5clear_gentest ${HDF5_TOOLS_TEST_MISC_SOURCE_DIR}/h5clear_gentest.c) target_include_directories (h5clear_gentest PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_TEST_SRC_DIR};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") - if (NOT ONLY_SHARED_LIBS) - TARGET_C_PROPERTIES (h5clear_gentest STATIC) - target_link_libraries (h5clear_gentest PRIVATE ${HDF5_TOOLS_LIB_TARGET} ${HDF5_TEST_LIB_TARGET}) - else () - TARGET_C_PROPERTIES (h5clear_gentest SHARED) - target_link_libraries (h5clear_gentest PRIVATE ${HDF5_TOOLS_LIBSH_TARGET} ${HDF5_TEST_LIBSH_TARGET}) - endif () + TARGET_C_PROPERTIES (h5clear_gentest STATIC) + target_link_libraries (h5clear_gentest PRIVATE ${HDF5_TOOLS_LIB_TARGET} ${HDF5_TEST_LIB_TARGET}) set_target_properties (h5clear_gentest PROPERTIES FOLDER tools) #add_test (NAME H5CLEAR-h5clear_gentest COMMAND ${CMAKE_CROSSCOMPILING_EMULATOR} $) @@ -49,7 +39,7 @@ endif () add_executable (h5repart_test ${HDF5_TOOLS_TEST_MISC_SOURCE_DIR}/repart_test.c) target_include_directories (h5repart_test PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) TARGET_C_PROPERTIES (h5repart_test STATIC) target_link_libraries (h5repart_test PRIVATE ${HDF5_TOOLS_LIB_TARGET} ${HDF5_LIB_TARGET}) else () @@ -67,7 +57,7 @@ endif () add_executable (clear_open_chk ${HDF5_TOOLS_TEST_MISC_SOURCE_DIR}/clear_open_chk.c) target_include_directories (clear_open_chk PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) TARGET_C_PROPERTIES (clear_open_chk STATIC) target_link_libraries (clear_open_chk PRIVATE ${HDF5_TOOLS_LIB_TARGET} ${HDF5_LIB_TARGET}) else () diff --git a/tools/test/misc/vds/CMakeLists.txt b/tools/test/misc/vds/CMakeLists.txt index a459098c308..026df6fce18 100644 --- a/tools/test/misc/vds/CMakeLists.txt +++ b/tools/test/misc/vds/CMakeLists.txt @@ -4,7 +4,7 @@ project (HDF5_TOOLS_TEST_MISC_VDS C) macro (ADD_H5_GENERATOR genfile) add_executable (${genfile} ${HDF5_TOOLS_TEST_MISC_VDS_SOURCE_DIR}/${genfile}.c) target_include_directories (${genfile} PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) TARGET_C_PROPERTIES (${genfile} STATIC) target_link_libraries (${genfile} PRIVATE ${HDF5_TOOLS_LIB_TARGET} ${HDF5_LIB_TARGET}) else () diff --git a/utils/tools/h5dwalk/CMakeLists.txt b/utils/tools/h5dwalk/CMakeLists.txt index 8c8ed6210fd..9e4eb6d8b55 100644 --- a/utils/tools/h5dwalk/CMakeLists.txt +++ b/utils/tools/h5dwalk/CMakeLists.txt @@ -4,7 +4,7 @@ project (HDF5_UTILS_TOOLS_H5DWALK C) # -------------------------------------------------------------------- # Add the h5dwalk and test executables # -------------------------------------------------------------------- -if (NOT ONLY_SHARED_LIBS) +if (BUILD_STATIC_LIBS) add_executable (h5dwalk ${HDF5_UTILS_TOOLS_H5DWALK_SOURCE_DIR}/h5dwalk.c) # add_custom_target(generate_demo ALL # DEPENDS "${HDF5_TOOLS_DIR}/test/demo_destfiles.test" @@ -35,7 +35,7 @@ endif () # Add Target to clang-format #----------------------------------------------------------------------------- if (HDF5_ENABLE_FORMATTERS) - if (NOT ONLY_SHARED_LIBS) + if (BUILD_STATIC_LIBS) clang_format (HDF5_H5DWALK_SRC_FORMAT h5dwalk) else () clang_format (HDF5_H5DWALK_SRC_FORMAT h5dwalk-shared) From 8c5a2848b51bbfbba37f9b7fc5e580be4b22611f Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Tue, 14 Mar 2023 00:57:19 -0700 Subject: [PATCH 076/231] Add Glenn Song to the CODEOWNERS file (#2551) Also removes Ray Lu --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 0a8d1d2a8af..506c668b94d 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -2,7 +2,7 @@ # Each line is a file pattern followed by one or more owners. # These owners will be the default owners for everything in the repo. -* @lrknox @derobins @byrnHDF @fortnern @jhendersonHDF @qkoziol @vchoi-hdfgroup @bmribler @raylu-hdf @mattjala @brtnfld +* @lrknox @derobins @byrnHDF @fortnern @jhendersonHDF @qkoziol @vchoi-hdfgroup @bmribler @glennsong09 @mattjala @brtnfld # Order is important. The last matching pattern has the most precedence. # So if a pull request only touches javascript files, only these owners From bc61a837e92da276b7556d45baf402ad51d883b4 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Tue, 14 Mar 2023 00:57:38 -0700 Subject: [PATCH 077/231] Rename clang-format fix action (#2550) The clang-format fix and check actions have the same name. This also makes some small changes to the action's text fields. --- .github/workflows/clang-format-check.yml | 2 +- .github/workflows/clang-format-fix.yml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/clang-format-check.yml b/.github/workflows/clang-format-check.yml index e8251f2ce7a..70809a1156a 100644 --- a/.github/workflows/clang-format-check.yml +++ b/.github/workflows/clang-format-check.yml @@ -8,7 +8,7 @@ jobs: if: "!contains(github.event.head_commit.message, 'skip-ci')" steps: - uses: actions/checkout@v3 - - name: Run clang-format style check for C and Java programs. + - name: Run clang-format style check for C and Java code uses: DoozyX/clang-format-lint-action@v0.13 with: source: '.' diff --git a/.github/workflows/clang-format-fix.yml b/.github/workflows/clang-format-fix.yml index c1110cf2b98..bb4d685480b 100644 --- a/.github/workflows/clang-format-fix.yml +++ b/.github/workflows/clang-format-fix.yml @@ -1,15 +1,15 @@ -name: clang-format Check +name: clang-format Commit Changes on: workflow_dispatch: push: jobs: formatting-check: - name: Formatting Check + name: Commit Format Changes runs-on: ubuntu-latest if: "!contains(github.event.head_commit.message, 'skip-ci')" steps: - uses: actions/checkout@v3 - - name: Run clang-format style check for C and Java programs. + - name: Fix C and Java formatting issues detected by clang-format uses: DoozyX/clang-format-lint-action@v0.13 with: source: '.' From c33a50b2e10fdbc94096cbcecd4bde63a2d5d045 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Wed, 15 Mar 2023 18:48:54 -0500 Subject: [PATCH 078/231] Change libaec URL to actively maintained repo GH#2552 (#2559) --- config/cmake/README.md.cmake.in | 4 ++-- config/cmake/cacheinit.cmake | 2 +- release_docs/INSTALL_CMake.txt | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/config/cmake/README.md.cmake.in b/config/cmake/README.md.cmake.in index b60e729e072..d7e4bc443a9 100644 --- a/config/cmake/README.md.cmake.in +++ b/config/cmake/README.md.cmake.in @@ -25,8 +25,8 @@ The official ZLIB and SZIP/Libaec pages are at: ZLIB: https://git.savannah.gnu.org/cgit/gzip.git/ https://git.savannah.gnu.org/cgit/gzip.git/tree/COPYING - SZIP/Libaec: https://gitlab.dkrz.de/k202009/libaec - https://gitlab.dkrz.de/k202009/libaec/-/blob/master/Copyright.txt + SZIP/Libaec: https://github.com/MathisRosenhauer/libaec + https://github.com/MathisRosenhauer/libaec/blob/master/LICENSE.txt Installation diff --git a/config/cmake/cacheinit.cmake b/config/cmake/cacheinit.cmake index 41293ef723f..7c5cc1e2bb5 100644 --- a/config/cmake/cacheinit.cmake +++ b/config/cmake/cacheinit.cmake @@ -59,7 +59,7 @@ set (LIBAEC_PACKAGE_NAME "libaec" CACHE STRING "Name of AEC SZIP package" FORCE) set (SZIP_TGZ_NAME "SZip.tar.gz" CACHE STRING "Use SZip from compressed file" FORCE) set (SZAEC_TGZ_NAME "LIBAEC.tar.gz" CACHE STRING "Use SZip AEC from compressed file" FORCE) set (USE_LIBAEC ON CACHE BOOL "Use libaec szip replacement" FORCE) -set (LIBAEC_TGZ_ORIGPATH "https://gitlab.dkrz.de/k202009/libaec/-/archive/v1.0.6" CACHE STRING "Use LIBAEC from original location" FORCE) +set (LIBAEC_TGZ_ORIGPATH "https://github.com/MathisRosenhauer/libaec/releases/download/v1.0.6/libaec-1.0.6.tar.gz" CACHE STRING "Use LIBAEC from original location" FORCE) set (LIBAEC_TGZ_ORIGNAME "libaec-v1.0.6.tar.gz" CACHE STRING "Use LIBAEC from original compressed file" FORCE) set (LIBAEC_USE_LOCALCONTENT OFF CACHE BOOL "Use local file for LIBAEC FetchContent" FORCE) diff --git a/release_docs/INSTALL_CMake.txt b/release_docs/INSTALL_CMake.txt index 8d9a8db75f0..b5521f8f6f7 100644 --- a/release_docs/INSTALL_CMake.txt +++ b/release_docs/INSTALL_CMake.txt @@ -531,7 +531,7 @@ These five steps are described in detail below. set (SZIP_TGZ_NAME "SZip.tar.gz" CACHE STRING "Use SZip from compressed file" FORCE) set (SZAEC_TGZ_NAME "LIBAEC.tar.gz" CACHE STRING "Use SZip AEC from compressed file" FORCE) set (USE_LIBAEC ON CACHE BOOL "Use libaec szip replacement" FORCE) - set (LIBAEC_TGZ_ORIGPATH "https://gitlab.dkrz.de/k202009/libaec/-/archive/v1.0.6" CACHE STRING "Use LIBAEC from original location" FORCE) + set (LIBAEC_TGZ_ORIGPATH "https://github.com/MathisRosenhauer/libaec/releases/download/v1.0.6/libaec-1.0.6.tar.gz" CACHE STRING "Use LIBAEC from original location" FORCE) set (LIBAEC_TGZ_ORIGNAME "libaec-v1.0.6.tar.gz" CACHE STRING "Use LIBAEC from original compressed file" FORCE) set (LIBAEC_USE_LOCALCONTENT OFF CACHE BOOL "Use local file for LIBAEC FetchContent" FORCE) ####################### @@ -568,7 +568,7 @@ These five steps are described in detail below. ####### # fpzip ####### - set (FPZIP_GIT_URL "https://https://github.com/LLNL/fpzip" CACHE STRING "Use FPZIP from github repository" FORCE) + set (FPZIP_GIT_URL "https://github.com/LLNL/fpzip" CACHE STRING "Use FPZIP from github repository" FORCE) set (FPZIP_GIT_BRANCH "master" CACHE STRING "" FORCE) set (FPZIP_TGZ_NAME "fpzip.tar.gz" CACHE STRING "Use FPZIP from compressed file" FORCE) set (FPZIP_PACKAGE_NAME "fpzip" CACHE STRING "Name of FPZIP package" FORCE) @@ -869,7 +869,7 @@ else () H5_DEFAULT_PLUGINDIR "/usr/local/hdf5/lib/plugin" endif () if (BUILD_SZIP_WITH_FETCHCONTENT) - LIBAEC_TGZ_ORIGPATH "Use LIBAEC from original location" "https://gitlab.dkrz.de/k202009/libaec/-/archive/v1.0.6" + LIBAEC_TGZ_ORIGPATH "Use LIBAEC from original location" "https://github.com/MathisRosenhauer/libaec/releases/download/v1.0.6/libaec-1.0.6.tar.gz" LIBAEC_TGZ_ORIGNAME "Use LIBAEC from original compressed file" "libaec-v1.0.6.tar.gz" LIBAEC_USE_LOCALCONTENT "Use local file for LIBAEC FetchContent" OFF if (BUILD_ZLIB_WITH_FETCHCONTENT) From 7fb49b7d4380abe2ec4e1db203493badcbf89e2a Mon Sep 17 00:00:00 2001 From: glennsong09 <43005495+glennsong09@users.noreply.github.com> Date: Fri, 17 Mar 2023 11:41:54 -0500 Subject: [PATCH 079/231] Enclose MESG in do...while loop (#2576) Enclose MSG macro in a do...while loop --- testpar/testpar.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/testpar/testpar.h b/testpar/testpar.h index 6c380a989c4..58bcab469e9 100644 --- a/testpar/testpar.h +++ b/testpar/testpar.h @@ -30,8 +30,11 @@ * mesg is not an empty string. */ #define MESG(mesg) \ - if (VERBOSE_MED && *mesg != '\0') \ - HDprintf("%s\n", mesg) + do { \ + if (VERBOSE_MED && *mesg != '\0') { \ + HDprintf("%s\n", mesg); \ + } \ + } while (0) /* * VRFY: Verify if the condition val is true. From af028a03e06d427688e85c8b6a6784e50b4d9486 Mon Sep 17 00:00:00 2001 From: Mark Kittisopikul Date: Fri, 17 Mar 2023 13:25:56 -0400 Subject: [PATCH 080/231] Fix 2nd argument of Java H5Fis_accessible (#2535) --- java/src/hdf/hdf5lib/H5.java | 6 +++--- java/src/jni/h5fImp.c | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/java/src/hdf/hdf5lib/H5.java b/java/src/hdf/hdf5lib/H5.java index 7a3ad891278..0e1c0e62ab0 100644 --- a/java/src/hdf/hdf5lib/H5.java +++ b/java/src/hdf/hdf5lib/H5.java @@ -5112,8 +5112,8 @@ public synchronized static native boolean H5Fis_hdf5(String name) * * @param name * IN: File name to check. - * @param file_id - * IN: File identifier for a currently-open HDF5 file + * @param fapl_id + * IN: File access property list identifier * * @return true if file is accessible, false if not. * @@ -5122,7 +5122,7 @@ public synchronized static native boolean H5Fis_hdf5(String name) * @exception NullPointerException * name is null. **/ - public synchronized static native boolean H5Fis_accessible(String name, long file_id) + public synchronized static native boolean H5Fis_accessible(String name, long fapl_id) throws HDF5LibraryException, NullPointerException; /** diff --git a/java/src/jni/h5fImp.c b/java/src/jni/h5fImp.c index b915993a617..af06bdcf55a 100644 --- a/java/src/jni/h5fImp.c +++ b/java/src/jni/h5fImp.c @@ -188,7 +188,7 @@ Java_hdf_hdf5lib_H5_H5Fis_1hdf5(JNIEnv *env, jclass clss, jstring name) * Signature: (Ljava/lang/String;J)Z */ JNIEXPORT jboolean JNICALL -Java_hdf_hdf5lib_H5_H5Fis_1accessible(JNIEnv *env, jclass clss, jstring name, jlong file_id) +Java_hdf_hdf5lib_H5_H5Fis_1accessible(JNIEnv *env, jclass clss, jstring name, jlong fapl_id) { const char *fileName = NULL; htri_t bval = JNI_FALSE; @@ -200,7 +200,7 @@ Java_hdf_hdf5lib_H5_H5Fis_1accessible(JNIEnv *env, jclass clss, jstring name, jl PIN_JAVA_STRING(ENVONLY, name, fileName, NULL, "H5Fis_accessible: file name not pinned"); - if ((bval = H5Fis_accessible(fileName, (hid_t)file_id)) < 0) + if ((bval = H5Fis_accessible(fileName, (hid_t)fapl_id)) < 0) H5_LIBRARY_ERROR(ENVONLY); bval = (bval > 0) ? JNI_TRUE : JNI_FALSE; From 237137eadd24b50b37937870ba2345f6921e052e Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Fri, 17 Mar 2023 10:55:32 -0700 Subject: [PATCH 081/231] Remove an obsolete comment from the MDS test (#2578) The seed is now broadcast from rank 0, so the warning about multiple machines having different seeds is unnecessary. --- testpar/t_pmulti_dset.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/testpar/t_pmulti_dset.c b/testpar/t_pmulti_dset.c index 52d0aa7d765..dd9a71a01d3 100644 --- a/testpar/t_pmulti_dset.c +++ b/testpar/t_pmulti_dset.c @@ -17,10 +17,6 @@ * Purpose: Test H5Dwrite_multi() and H5Dread_multi using randomized * parameters in parallel. Also tests H5Dwrite() and H5Dread() * using a similar method. - * - * Note that this test currently relies on all processes generating - * the same sequence of random numbers after using a shared seed - * value, therefore it may not work across multiple machines. */ #include "h5test.h" From e98784d50a272848da8b54095c98839fb3f6f81b Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Fri, 17 Mar 2023 10:56:14 -0700 Subject: [PATCH 082/231] Add a clang-format comment about permissions (#2577) --- .github/workflows/clang-format-fix.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/workflows/clang-format-fix.yml b/.github/workflows/clang-format-fix.yml index bb4d685480b..feaa3d0014e 100644 --- a/.github/workflows/clang-format-fix.yml +++ b/.github/workflows/clang-format-fix.yml @@ -1,3 +1,12 @@ +# NOTE: This action requires write permissions to be set in your GitHub +# repo/fork for it to be able to commit changes. +# +# This is currently enabled via: +# +# settings > Actions > General > Workflow permissions +# +# which you will need to set to "Read and write permissions" +# name: clang-format Commit Changes on: workflow_dispatch: From 1f3f7fbb2de6caa02bdf8e438d5099398b2ab2f1 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Fri, 17 Mar 2023 15:45:07 -0500 Subject: [PATCH 083/231] Subfiling VFD - fix issues with I/O concentrator selection strategies (#2571) Fix multiple bugs with the SELECT_IOC_EVERY_NTH_RANK and SELECT_IOC_TOTAL I/O concentrator selection strategies and add a regression test for them --- release_docs/RELEASE.txt | 20 +++ src/H5FDsubfiling/H5subfiling_common.c | 110 +++++++++----- testpar/t_subfiling_vfd.c | 195 +++++++++++++++++++++++++ 3 files changed, 286 insertions(+), 39 deletions(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index d6ab00e3ee2..c8637f09787 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -244,6 +244,26 @@ Bug Fixes since HDF5-1.13.3 release =================================== Library ------- + - Fixed issues in the Subfiling VFD when using the SELECT_IOC_EVERY_NTH_RANK + or SELECT_IOC_TOTAL I/O concentrator selection strategies + + Multiple bugs involving these I/O concentrator selection strategies + were fixed, including: + + * A bug that caused the selection strategy to be altered when + criteria for the strategy was specified in the + H5FD_SUBFILING_IOC_SELECTION_CRITERIA environment variable as + a single value, rather than in the old and undocumented + 'integer:integer' format + * Two bugs which caused a request for 'N' I/O concentrators to + result in 'N - 1' I/O concentrators being assigned, which also + lead to issues if only 1 I/O concentrator was requested + + Also added a regression test for these two I/O concentrator selection + strategies to prevent future issues. + + (JTH - 2023/03/15) + - Fix CVE-2021-37501 / GHSA-rfgw-5vq3-wrjf Check for overflow when calculating on-disk attribute data size. diff --git a/src/H5FDsubfiling/H5subfiling_common.c b/src/H5FDsubfiling/H5subfiling_common.c index b58c4d3c2b1..e4dcf251236 100644 --- a/src/H5FDsubfiling/H5subfiling_common.c +++ b/src/H5FDsubfiling/H5subfiling_common.c @@ -992,17 +992,24 @@ init_app_topology(H5FD_subfiling_params_t *subfiling_config, MPI_Comm comm, MPI_ HDprintf("invalid IOC selection strategy string '%s' for strategy " "SELECT_IOC_EVERY_NTH_RANK; defaulting to SELECT_IOC_ONE_PER_NODE\n", ioc_sel_str); + ioc_select_val = 1; ioc_selection_type = SELECT_IOC_ONE_PER_NODE; + + H5_CHECK_OVERFLOW(iocs_per_node, long, int); + ioc_count = (int)iocs_per_node; + + break; } } + if (ioc_select_val > comm_size) + ioc_select_val = comm_size; + H5_CHECK_OVERFLOW(ioc_select_val, long, int); - ioc_count = (comm_size / (int)ioc_select_val); + ioc_count = ((comm_size - 1) / (int)ioc_select_val) + 1; - if ((comm_size % ioc_select_val) != 0) { - ioc_count++; - } + rank_multiple = (int)ioc_select_val; break; } @@ -1017,19 +1024,31 @@ init_app_topology(H5FD_subfiling_params_t *subfiling_config, MPI_Comm comm, MPI_ if (ioc_sel_str) { errno = 0; ioc_select_val = HDstrtol(ioc_sel_str, NULL, 0); - if ((ERANGE == errno) || (ioc_select_val <= 0) || (ioc_select_val >= comm_size)) { + if ((ERANGE == errno) || (ioc_select_val <= 0)) { HDprintf("invalid IOC selection strategy string '%s' for strategy SELECT_IOC_TOTAL; " "defaulting to SELECT_IOC_ONE_PER_NODE\n", ioc_sel_str); + ioc_select_val = 1; ioc_selection_type = SELECT_IOC_ONE_PER_NODE; + + H5_CHECK_OVERFLOW(iocs_per_node, long, int); + ioc_count = (int)iocs_per_node; + + break; } } + if (ioc_select_val > comm_size) + ioc_select_val = comm_size; + H5_CHECK_OVERFLOW(ioc_select_val, long, int); ioc_count = (int)ioc_select_val; - rank_multiple = (comm_size / ioc_count); + if (ioc_select_val > 1) + rank_multiple = (comm_size - 1) / ((int)ioc_select_val - 1); + else + rank_multiple = 1; break; } @@ -1145,34 +1164,48 @@ get_ioc_selection_criteria_from_env(H5FD_subfiling_ioc_select_t *ioc_selection_t *ioc_sel_info_str = NULL; if (env_value) { - long check_value; - /* - * For non-default options, the environment variable - * should have the following form: integer:[integer|string] - * In particular, EveryNthRank == 1:64 or every 64 ranks assign an IOC - * or WithConfig == 2:/ + * Parse I/O Concentrator selection strategy criteria as + * either a single value or two colon-separated values of + * the form 'integer:[integer|string]'. If two values are + * given, the first value specifies the I/O Concentrator + * selection strategy to use (given as the integer value + * corresponding to the H5FD_subfiling_ioc_select_t enum + * value for that strategy) and the second value specifies + * the criteria for that strategy. + * + * For example, to assign every 64th MPI rank as an I/O + * Concentrator, the criteria string should have the format + * '1:64' to specify the "every Nth rank" strategy with a + * criteria of '64'. */ - if ((opt_value = HDstrchr(env_value, ':'))) + opt_value = HDstrchr(env_value, ':'); + if (opt_value) { + long check_value; + *opt_value++ = '\0'; - errno = 0; - check_value = HDstrtol(env_value, NULL, 0); + errno = 0; + check_value = HDstrtol(env_value, NULL, 0); - if (errno == ERANGE) - H5_SUBFILING_SYS_GOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, - "couldn't parse value from " H5FD_SUBFILING_IOC_SELECTION_CRITERIA - " environment variable"); + if (errno == ERANGE) + H5_SUBFILING_SYS_GOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, + "couldn't parse value from " H5FD_SUBFILING_IOC_SELECTION_CRITERIA + " environment variable"); - if ((check_value < 0) || (check_value >= ioc_selection_options)) - H5_SUBFILING_GOTO_ERROR( - H5E_VFL, H5E_BADVALUE, FAIL, - "invalid IOC selection type value %ld from " H5FD_SUBFILING_IOC_SELECTION_CRITERIA - " environment variable", - check_value); + if ((check_value < 0) || (check_value >= ioc_selection_options)) + H5_SUBFILING_GOTO_ERROR( + H5E_VFL, H5E_BADVALUE, FAIL, + "invalid IOC selection type value %ld from " H5FD_SUBFILING_IOC_SELECTION_CRITERIA + " environment variable", + check_value); - *ioc_selection_type = (H5FD_subfiling_ioc_select_t)check_value; - *ioc_sel_info_str = opt_value; + *ioc_selection_type = (H5FD_subfiling_ioc_select_t)check_value; + *ioc_sel_info_str = opt_value; + } + else { + *ioc_sel_info_str = env_value; + } } done: @@ -1562,8 +1595,8 @@ identify_ioc_ranks(sf_topology_t *app_topology, int rank_stride) max_iocs = app_topology->n_io_concentrators; - if (NULL == (app_topology->io_concentrators = HDmalloc((size_t)app_topology->n_io_concentrators * - sizeof(*app_topology->io_concentrators)))) + if (NULL == (app_topology->io_concentrators = + HDmalloc((size_t)max_iocs * sizeof(*app_topology->io_concentrators)))) H5_SUBFILING_GOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "couldn't allocate array of I/O concentrator ranks"); @@ -1622,30 +1655,29 @@ identify_ioc_ranks(sf_topology_t *app_topology, int rank_stride) case SELECT_IOC_EVERY_NTH_RANK: case SELECT_IOC_TOTAL: { - int world_size = app_layout->world_size; - int ioc_next = 0; + int num_iocs_assigned = 0; + int world_size = app_layout->world_size; HDassert(rank_stride > 0); - for (int i = 0; ioc_next < app_topology->n_io_concentrators; ioc_next++) { + for (int i = 0; num_iocs_assigned < max_iocs; num_iocs_assigned++) { int ioc_index = rank_stride * i++; + if (num_iocs_assigned >= max_iocs) + break; if (ioc_index >= world_size) break; - io_concentrators[ioc_next] = app_layout->layout[ioc_index].rank; + io_concentrators[num_iocs_assigned] = app_layout->layout[ioc_index].rank; - if (app_layout->world_rank == io_concentrators[ioc_next]) { - app_topology->ioc_idx = ioc_next; + if (app_layout->world_rank == io_concentrators[num_iocs_assigned]) { + app_topology->ioc_idx = num_iocs_assigned; app_topology->rank_is_ioc = TRUE; } - - if (ioc_next + 1 >= max_iocs) - break; } /* Set final number of I/O concentrators after adjustments */ - app_topology->n_io_concentrators = ioc_next; + app_topology->n_io_concentrators = num_iocs_assigned; break; } diff --git a/testpar/t_subfiling_vfd.c b/testpar/t_subfiling_vfd.c index e1f9e05e061..380c06817d7 100644 --- a/testpar/t_subfiling_vfd.c +++ b/testpar/t_subfiling_vfd.c @@ -95,6 +95,7 @@ static hid_t create_subfiling_ioc_fapl(MPI_Comm comm, MPI_Info info, hbool_t cus static void test_create_and_close(void); static void test_config_file(void); static void test_stripe_sizes(void); +static void test_selection_strategies(void); static void test_read_different_stripe_size(void); static void test_subfiling_precreate_rank_0(void); static void test_subfiling_write_many_read_one(void); @@ -105,6 +106,7 @@ static test_func tests[] = { test_create_and_close, test_config_file, test_stripe_sizes, + test_selection_strategies, test_read_different_stripe_size, test_subfiling_precreate_rank_0, test_subfiling_write_many_read_one, @@ -794,6 +796,199 @@ test_stripe_sizes(void) #undef SUBF_FILENAME #undef SUBF_NITER +/* + * Test the different I/O Concentator selection strategies + * for the Subfiling VFD + */ +#define SUBF_FILENAME "test_subfiling_selection_strategies.h5" +#define NUM_RANKS_CHOICES 2 +#define NUM_CRITERIA_FORMATS 2 +static void +test_selection_strategies(void) +{ + H5FD_subfiling_params_t cfg; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + char *tmp_filename = NULL; + + curr_nerrors = nerrors; + + if (MAINPROCESS) + TESTING_2("I/O concentrator selection strategies"); + + tmp_filename = HDmalloc(PATH_MAX); + VRFY(tmp_filename, "HDmalloc succeeded"); + + for (H5FD_subfiling_ioc_select_t strategy = 0; strategy < ioc_selection_options; strategy++) { + /* Skip 1 IOC per node strategy since we assume it's + * the default strategy tested in this file. Skip + * "with config" strategy since it isn't supported. + */ + if (strategy == SELECT_IOC_ONE_PER_NODE || strategy == SELECT_IOC_WITH_CONFIG) + continue; + + /* Test with 1 MPI rank and then all MPI ranks */ + for (size_t num_ranks_choice = 0; num_ranks_choice < NUM_RANKS_CHOICES; num_ranks_choice++) { + int num_active_ranks = mpi_size; + + if (num_ranks_choice == 0) + num_active_ranks = 1; + + /* Test with a selection strategy criteria string + * in the 'integer:[integer|string]' form and in + * the form of just a single value. + */ + for (size_t criteria_format_choice = 0; criteria_format_choice < NUM_CRITERIA_FORMATS; + criteria_format_choice++) { + MPI_Comm file_comm = comm_g; + char criteria_buf[256]; + char sel_criteria[128]; /* Use char buffer for criteria as we may support + the "with config" strategy in the future */ + int expected_num_subfiles; + + cfg.ioc_selection = strategy; + cfg.stripe_size = H5FD_SUBFILING_DEFAULT_STRIPE_SIZE; + cfg.stripe_count = H5FD_SUBFILING_DEFAULT_STRIPE_COUNT; + + switch (strategy) { + case SELECT_IOC_EVERY_NTH_RANK: { + int stride; + + /* Try to select a reasonable stride value */ + if (num_active_ranks <= 2) + stride = 1; + else if (num_active_ranks <= 8) + stride = 2; + else if (num_active_ranks <= 32) + stride = 4; + else if (num_active_ranks <= 128) + stride = 8; + else + stride = 16; + + HDsnprintf(sel_criteria, 128, "%d", stride); + + expected_num_subfiles = ((num_active_ranks - 1) / stride) + 1; + + break; + } + + case SELECT_IOC_TOTAL: { + int n_iocs; + + /* Try to select a reasonable number of IOCs */ + if (num_active_ranks <= 2) + n_iocs = 1; + else if (num_active_ranks <= 8) + n_iocs = 2; + else if (num_active_ranks <= 32) + n_iocs = 4; + else if (num_active_ranks <= 128) + n_iocs = 8; + else + n_iocs = 16; + + HDsnprintf(sel_criteria, 128, "%d", n_iocs); + + expected_num_subfiles = n_iocs; + + break; + } + + case SELECT_IOC_ONE_PER_NODE: + case SELECT_IOC_WITH_CONFIG: + default: + HDprintf("invalid IOC selection strategy\n"); + MPI_Abort(comm_g, -1); + } + + if (criteria_format_choice == 0) { + HDsnprintf(criteria_buf, 256, "%d:%s", strategy, sel_criteria); + } + else if (criteria_format_choice == 1) { + HDsnprintf(criteria_buf, 256, "%s", sel_criteria); + } + + VRFY(HDsetenv(H5FD_SUBFILING_IOC_SELECTION_CRITERIA, criteria_buf, 1) >= 0, + "HDsetenv succeeded"); + + HDassert(num_active_ranks == mpi_size || num_active_ranks == 1); + + if ((num_active_ranks == mpi_size) || (mpi_rank == 0)) { + h5_stat_t file_info; + FILE *subfile_ptr; + int num_digits; + + if (num_active_ranks < mpi_size) + file_comm = MPI_COMM_SELF; + + fapl_id = create_subfiling_ioc_fapl(file_comm, info_g, TRUE, &cfg, + H5FD_IOC_DEFAULT_THREAD_POOL_SIZE); + VRFY((fapl_id >= 0), "FAPL creation succeeded"); + + file_id = H5Fcreate(SUBF_FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + + /* + * Get the file inode value so we can construct the subfile names + */ + VRFY((HDstat(SUBF_FILENAME, &file_info) >= 0), "HDstat succeeded"); + + num_digits = (int)(HDlog10(expected_num_subfiles) + 1); + + /* Ensure all the subfiles are present */ + for (int i = 0; i < expected_num_subfiles; i++) { + HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME, + (uint64_t)file_info.st_ino, num_digits, i + 1, expected_num_subfiles); + + /* Ensure file exists */ + subfile_ptr = HDfopen(tmp_filename, "r"); + VRFY(subfile_ptr, "HDfopen on subfile succeeded"); + VRFY((HDfclose(subfile_ptr) >= 0), "HDfclose on subfile succeeded"); + } + + /* Ensure no extra subfiles are present */ + HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME, + (uint64_t)file_info.st_ino, num_digits, expected_num_subfiles + 1, + expected_num_subfiles); + + /* Ensure file doesn't exist */ + subfile_ptr = HDfopen(tmp_filename, "r"); + VRFY(subfile_ptr == NULL, "HDfopen on subfile correctly failed"); + + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + mpi_code_g = MPI_Barrier(file_comm); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Barrier succeeded"); + + H5E_BEGIN_TRY + { + H5Fdelete(SUBF_FILENAME, fapl_id); + } + H5E_END_TRY; + + VRFY((H5Pclose(fapl_id) >= 0), "FAPL close succeeded"); + + VRFY(HDunsetenv(H5FD_SUBFILING_IOC_SELECTION_CRITERIA) >= 0, "HDunsetenv succeeded"); + } + + mpi_code_g = MPI_Barrier(comm_g); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Barrier succeeded"); + } + } + } + + mpi_code_g = MPI_Barrier(comm_g); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Barrier succeeded"); + + HDfree(tmp_filename); + + CHECK_PASSED(); +} +#undef SUBF_FILENAME +#undef NUM_RANKS_CHOICES +#undef NUM_CRITERIA_FORMATS + /* * Test that opening a file with a different stripe * size/count than was used when creating the file From fb6ada22eb140662f4aaf16a5cdeff7d99cfd121 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Mon, 20 Mar 2023 08:04:27 -0700 Subject: [PATCH 084/231] Fix test script names in cleanup code (#2590) When the test scripts were renamed, DISTCLEANFILES in Makefile.am was not updated. --- test/Makefile.am | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/test/Makefile.am b/test/Makefile.am index 73cb96e72ec..bad52c8b24d 100644 --- a/test/Makefile.am +++ b/test/Makefile.am @@ -35,9 +35,9 @@ AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_builddir)/src # test_vds_swmr.sh: vds_swmr* # test_use_cases.sh: use_append_chunk, use_append_mchunks, use_disable_mdc_flushes TEST_SCRIPT = test_abort_fail.sh test_check_version.sh test_error.sh \ - test_flush_refresh.sh test_external_env.sh test_libinfo.sh \ - test_links_env.sh test_swmr.sh test_vds_env.sh test_vds_swmr.sh \ - test_use_cases.sh + test_flush_refresh.sh test_external_env.sh test_libinfo.sh \ + test_links_env.sh test_swmr.sh test_vds_env.sh test_vds_swmr.sh \ + test_use_cases.sh SCRIPT_DEPEND = error_test$(EXEEXT) err_compat$(EXEEXT) links_env$(EXEEXT) \ external_env$(EXEEXT) filenotclosed$(EXEEXT) del_many_dense_attrs$(EXEEXT) \ flushrefresh$(EXEEXT) use_append_chunk$(EXEEXT) use_append_mchunks$(EXEEXT) use_disable_mdc_flushes$(EXEEXT) \ @@ -241,8 +241,11 @@ use_append_mchunks_SOURCES=use_append_mchunks.c use_common.c use_disable_mdc_flushes_SOURCES=use_disable_mdc_flushes.c # Temporary files. -DISTCLEANFILES=testerror.sh testlibinfo.sh testcheck_version.sh testlinks_env.sh test_filter_plugin.sh \ - testexternal_env.sh testswmr.sh testvds_env.sh testvdsswmr.sh test_usecases.sh testflushrefresh.sh \ - testabort_fail.sh test_vol_plugin.sh test_mirror.sh +DISTCLEANFILES=test_abort_fail.sh test_check_version.sh test_error.sh test_external_env.sh \ + test_flush_refresh.sh test_libinfo.sh test_links_env.sh test_plugin.sh \ + test_swmr.sh test_use_cases.sh test_vds_env.sh test_vds_swmr.sh +if MIRROR_VFD_CONDITIONAL + DISTCLEANFILES+= test_mirror.sh +endif include $(top_srcdir)/config/conclude.am From 8e4267b6086a18e2617c76c4077f3da90c9e4b0f Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Wed, 22 Mar 2023 18:41:00 -0700 Subject: [PATCH 085/231] Bring new release_docs scheme from 1.14 (#2614) --- ..._0-1_8_0_rc3.txt => HISTORY-1_0-1_8_0.txt} | 0 release_docs/HISTORY-1_10.txt | 6562 ------- release_docs/HISTORY-1_12.txt | 628 - ...ORY-1_13.txt => HISTORY-1_12_0-1_14_0.txt} | 753 + release_docs/HISTORY-1_8.txt | 14439 ---------------- release_docs/NEWSLETTER.txt | 25 + release_docs/README.md | 102 + release_docs/RELEASE.txt | 272 +- 8 files changed, 884 insertions(+), 21897 deletions(-) rename release_docs/{HISTORY-1_0-1_8_0_rc3.txt => HISTORY-1_0-1_8_0.txt} (100%) delete mode 100644 release_docs/HISTORY-1_10.txt delete mode 100644 release_docs/HISTORY-1_12.txt rename release_docs/{HISTORY-1_13.txt => HISTORY-1_12_0-1_14_0.txt} (81%) delete mode 100644 release_docs/HISTORY-1_8.txt create mode 100644 release_docs/NEWSLETTER.txt create mode 100644 release_docs/README.md diff --git a/release_docs/HISTORY-1_0-1_8_0_rc3.txt b/release_docs/HISTORY-1_0-1_8_0.txt similarity index 100% rename from release_docs/HISTORY-1_0-1_8_0_rc3.txt rename to release_docs/HISTORY-1_0-1_8_0.txt diff --git a/release_docs/HISTORY-1_10.txt b/release_docs/HISTORY-1_10.txt deleted file mode 100644 index 1fc3c60b1b1..00000000000 --- a/release_docs/HISTORY-1_10.txt +++ /dev/null @@ -1,6562 +0,0 @@ -HDF5 History -============ - -This file contains development history of the HDF5 1.10 branch - -09. Release Information for hdf5-1.10.7 -08. Release Information for hdf5-1.10.6 -07. Release Information for hdf5-1.10.5 -06. Release Information for hdf5-1.10.4 -05. Release Information for hdf5-1.10.3 -04. Release Information for hdf5-1.10.2 -03. Release Information for hdf5-1.10.1 -02. Release Information for hdf5-1.10.0-patch1 -01. Release Information for hdf5-1.10.0 - -[Search on the string '%%%%' for section breaks of each release.] - -%%%%1.10.7%%%% - -HDF5 version 1.10.7 released on 2020-09-11 -================================================================================ - - -INTRODUCTION - -This document describes the differences between this release and the previous -HDF5 release. It contains information on the platforms tested and known -problems in this release. For more details check the HISTORY*.txt files in the -HDF5 source. - -Note that documentation in the links below will be updated at the time of each -final release. - -Links to HDF5 documentation can be found on The HDF5 web page: - - https://portal.hdfgroup.org/display/HDF5/HDF5 - -The official HDF5 releases can be obtained from: - - https://www.hdfgroup.org/downloads/hdf5/ - - HDF5 binaries provided are fully tested with ZLIB and the free - Open Source SZIP successor Libaec (with BSD license). - The official ZLIB and SZIP/Libaec pages are at: - - ZLIB: http://www.zlib.net/ - http://www.zlib.net/zlib_license.html - SZIP/Libaec: https://gitlab.dkrz.de/k202009/libaec - https://gitlab.dkrz.de/k202009/libaec/-/blob/master/Copyright.txt - -Changes from Release to Release and New Features in the HDF5-1.10.x release series -can be found at: - - https://portal.hdfgroup.org/display/HDF5/HDF5+Application+Developer%27s+Guide - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS - -- New Features -- Support for new platforms and languages -- Bug Fixes since HDF5-1.10.6 -- Supported Platforms -- Tested Configuration Features Summary -- More Tested Platforms -- Known Problems -- CMake vs. Autotools installations - - -New Features -============ - - Configuration: - ------------- - - Disable memory sanity checks in the Autotools in release branches - - The library can be configured to use internal memory sanity checking, - which replaces C API calls like malloc(3) and free(3) with our own calls - which add things like heap canaries. These canaries can cause problems - when external filter plugins reallocate canary-marked buffers. - - For this reason, the default will be to not use the memory allocation - sanity check feature in release branches (e.g., hdf5_1_10_7). - Debug builds in development branches (e.g., develop, hdf5_1_10) will - still use them by default. - - This change only affects Autotools debug builds. Non-debug autotools - builds and all CMake builds do not enable this feature by default. - - (DER - 2020/08/19) - - - Add file locking configure and CMake options - - HDF5 1.10.0 introduced a file locking scheme, primarily to help - enforce SWMR setup. Formerly, the only user-level control of the scheme - was via the HDF5_USE_FILE_LOCKING environment variable. - - This change introduces configure-time options that control whether - or not file locking will be used and whether or not the library - ignores errors when locking has been disabled on the file system - (useful on some HPC Lustre installations). - - In both the Autotools and CMake, the settings have the effect of changing - the default property list settings (see the H5Pset/get_file_locking() - entry, below). - - The yes/no/best-effort file locking configure setting has also been - added to the libhdf5.settings file. - - Autotools: - - An --enable-file-locking=(yes|no|best-effort) option has been added. - - yes: Use file locking. - no: Do not use file locking. - best-effort: Use file locking and ignore "disabled" errors. - - CMake: - - Two self-explanatory options have been added: - - HDF5_USE_FILE_LOCKING - HDF5_IGNORE_DISABLED_FILE_LOCKS - - Setting both of these to ON is the equivalent to the Autotools' - best-effort setting. - - NOTE: - The precedence order of the various file locking control mechanisms is: - - 1) HDF5_USE_FILE_LOCKING environment variable (highest) - - 2) H5Pset_file_locking() - - 3) configure/CMake options (which set the property list defaults) - - 4) library defaults (currently best-effort) - - (DER - 2020/07/30, HDFFV-11092) - - - CMake option to link the generated Fortran MOD files into the include - directory. - - The Fortran generation of MOD files by a Fortran compile can produce - different binary files between SHARED and STATIC compiles with different - compilers and/or different platforms. Note that it has been found that - different versions of Fortran compilers will produce incompatible MOD - files. Currently, CMake will locate these MOD files in subfolders of - the include directory and add that path to the Fortran library target - in the CMake config file, which can be used by the CMake find library - process. For other build systems using the binary from a CMake install, - a new CMake configuration can be used to copy the pre-chosen version - of the Fortran MOD files into the install include directory. - - The default will depend on the configuration of - BUILD_STATIC_LIBS and BUILD_SHARED_LIBS: - YES YES Default to SHARED - YES NO Default to STATIC - NO YES Default to SHARED - NO NO Default to SHARED - The defaults can be overridden by setting the config option - HDF5_INSTALL_MOD_FORTRAN to one of NO, SHARED, or STATIC - - (ADB - 2020/07/09, HDFFV-11116) - - - CMake option to use AEC (open source SZip) library instead of SZip - - The open source AEC library is a replacement library for SZip. In - order to use it for hdf5, the libaec CMake source was changed to add - "-fPIC" and exclude test files. A new option USE_LIBAEC is required - to compensate for the different files produced by AEC build. - - Autotools does not build the compression libraries within hdf5 builds, - but will use an installed libaec when configured as before with the - option --with-libsz=. - - (ADB - 2020/04/22, OESS-65) - - - CMake ConfigureChecks.cmake file now uses CHECK_STRUCT_HAS_MEMBER - - Some handcrafted tests in HDFTests.c have been removed and the CMake - CHECK_STRUCT_HAS_MEMBER module has been used. - - (ADB - 2020/03/24, TRILAB-24) - - - Both build systems use same set of warnings flags - - GNU C, C++ and gfortran warnings flags were moved to files in a config - sub-folder named gnu-warnings. Flags that only are available for a specific - version of the compiler are in files named with that version. - Clang C warnings flags were moved to files in a config sub-folder - named clang-warnings. - Intel C, Fortran warnings flags were moved to files in a config sub-folder - named intel-warnings. - - There are flags in named "error-xxx" files with warnings that may - be promoted to errors. Some source files may still need fixes. - - There are also pairs of files named "developer-xxx" and "no-developer-xxx" - that are chosen by the CMake option:HDF5_ENABLE_DEV_WARNINGS or the - configure option:--enable-developer-warnings. - - In addition, CMake no longer applies these warnings for examples. - - (ADB - 2020/03/24, TRILAB-192) - - - Update CMake minimum version to 3.12 - - Updated CMake minimum version to 3.12 and added version checks - for Windows features. - - (ADB - 2020/02/05, TRILABS-142) - - - Fixed CMake include properties for Fortran libraries - - Corrected the library properties for Fortran to use the - correct path for the Fortran module files. - - (ADB - 2020/02/04, HDFFV-11012) - - - Added common warnings files for gnu and intel - - Added warnings files to use one common set of flags - during configure for both autotools and CMake build - systems. The initial implementation only affects a - general set of flags for gnu and intel compilers. - - (ADB - 2020/01/17) - - - Added new options to CMake for control of testing - - Added CMake options (default ON); - HDF5_TEST_SERIAL AND/OR HDF5_TEST_PARALLEL - combined with: - HDF5_TEST_TOOLS - HDF5_TEST_EXAMPLES - HDF5_TEST_SWMR - HDF5_TEST_FORTRAN - HDF5_TEST_CPP - HDF5_TEST_JAVA - - (ADB - 2020/01/15, HDFFV-11001) - - - Added Clang sanitizers to CMake for analyzer support if compiler is clang. - - Added CMake code and files to execute the Clang sanitizers if - HDF5_ENABLE_SANITIZERS is enabled and the USE_SANITIZER option - is set to one of the following: - Address - Memory - MemoryWithOrigins - Undefined - Thread - Leak - 'Address;Undefined' - - (ADB - 2019/12/12, TRILAB-135) - - - Library: - -------- - - Add metadata cache optimization to reduce skip list usage - - On file flush or close, the metadata cache attempts to write out - all dirty entries in increasing address order. To do this, it needs - an address sorted list of metadata entries. Further, since flushing - one metadata cache entry can dirty another, this list must support - efficient insertion and deletion. - - The metadata cache uses a skip list of all dirty entries for this - purpose. Before this release, this skip list was maintained at all - times. However, since profiling indicates that this imposes a - significant cost, we now construct and maintain the skip list only - when needed. Specifically, we enable the skip list and load it with - a list of all dirty entries in the metadata cache just before a flush, - and disable it after the flush. - - (JRM - 2020/08/17, HDFFV-11034) - - - Add BEST_EFFORT value to HDF5_USE_FILE_LOCKING environment variable - - This change adds a BEST_EFFORT to the TRUE/FALSE, 1/0 settings that - were previously accepted. This option turns on file locking but - ignores locking errors when the library detects that file locking - has been disabled on a file system (useful on some HPC Lustre - installations). - - The capitalization of BEST_EFFORT is mandatory. - - See the configure option discussion for HDFFV-11092 (above) for more - information on the file locking feature and how it's controlled. - - (DER - 2020/07/30, HDFFV-11092) - - - - Add H5Pset/get_file_locking() API calls - - This change adds new API calls which can be used to set or get the - file locking parameters. The single API call sets both the "use file - locking" flag and the "ignore disabled file locking" flag. - - When opening a file multiple times without closing, the file MUST be - opened with the same file locking settings. Opening a file with different - file locking settings will fail (this is similar to the behavior of - H5Pset_fclose_degree()). - - See the configure option discussion for HDFFV-11092 (above) for more - information on the file locking feature and how it's controlled. - - (DER - 2020/07/30, HDFFV-11092) - - - Add Mirror VFD - - Use TCP/IP sockets to perform write-only (W/O) file I/O on a remote - machine. Must be used in conjunction with the Splitter VFD. - - (JOS - 2020/03/13, TBD) - - - Add Splitter VFD - - Maintain separate R/W and W/O channels for "concurrent" file writes - to two files using a single HDF5 file handle. - - (JOS - 2020/03/13, TBD) - - - Fixed an assertion failure in the parallel library when collectively - filling chunks. As it is required that chunks be written in - monotonically non-decreasing order of offset in the file, this assertion - was being triggered when the list of chunk file space allocations being - passed to the collective chunk filling routine was not sorted according - to this particular requirement. - - The addition of a sort of the out of order chunks trades a bit of - performance for the elimination of this assertion and of any complaints - from MPI implementations about the file offsets used being out of order. - - (JTH - 2019/10/07) - - Fortran Library: - ---------------- - - Add wrappers for H5Pset/get_file_locking() API calls - - h5pget_file_locking_f() - h5pset_file_locking_f() - - See the configure option discussion for HDFFV-11092 (above) for more - information on the file locking feature and how it's controlled. - - (DER - 2020/07/30, HDFFV-11092) - - - Added new Fortran parameters: - - H5F_LIBVER_ERROR_F - H5F_LIBVER_NBOUNDS_F - H5F_LIBVER_V18_F - H5F_LIBVER_V110_F - - - Added new Fortran API: h5pget_libver_bounds_f - - (MSB - 2020/02/11, HDFFV-11018) - - C++ Library: - ------------ - - Add wrappers for H5Pset/get_file_locking() API calls - - FileAccPropList::setFileLocking() - FileAccPropList::getFileLocking() - - See the configure option discussion for HDFFV-11092 (above) for more - information on the file locking feature and how it's controlled. - - (DER - 2020/07/30, HDFFV-11092) - - Java Library: - ---------------- - - Add wrappers for H5Pset/get_file_locking() API calls - - H5Pset_file_locking() - H5Pget_use_file_locking() - H5Pget_ignore_disabled_file_locking() - - Unlike the C++ and Fortran wrappers, there are separate getters for the - two file locking settings, each of which returns a boolean value. - - See the configure option discussion for HDFFV-11092 (above) for more - information on the file locking feature and how it's controlled. - - (DER - 2020/07/30, HDFFV-11092) - - Tools: - ------ - - h5repack added options to control how external links are handled. - - Currently h5repack preserves external links and cannot copy and merge - data from the external files. Two options, merge and prune, were added to - control how to merge data from an external link into the resulting file. - --merge Follow external soft link recursively and merge data. - --prune Do not follow external soft links and remove link. - --merge --prune Follow external link, merge data and remove dangling link. - - (ADB - 2020/08/05, HDFFV-9984) - - High-Level APIs: - --------------- - - None - - C Packet Table API - ------------------ - - None - - Internal header file - -------------------- - - None - - Documentation - ------------- - - None - - -Support for new platforms, languages and compilers. -======================================= - - None - - -Bug Fixes since HDF5-1.10.6 release -================================== - - Library - ------- - - Fix bug and simplify collective metadata write operation when some ranks - have no entries to contribute. This fixes parallel regression test - failures with IBM SpectrumScale MPI on the Summit system at ORNL. - - (QAK - 2020/09/02) - - - Avoid setting up complex MPI types with 0-length vectors, which some - MPI implementations don't handle well. (In particular, IBM - SpectrumScale MPI on the Summit system at ORNL) - - (QAK - 2020/08/21) - - - Fixed use-of-uninitialized-value error - - Appropriate initialization of local structs was added to remove the - use-of-uninitialized-value errors reported by MemorySanitizer. - - (BMR - 2020/8/13, HDFFV-11101) - - - Creation of dataset with optional filter - - When the combination of type, space, etc doesn't work for filter - and the filter is optional, it was supposed to be skipped but it was - not skipped and the creation failed. - - A fix is applied to allow the creation of a dataset in such - situation, as specified in the user documentation. - - (BMR - 2020/8/13, HDFFV-10933) - - - Explicitly declared dlopen to use RTLD_LOCAL - - dlopen documentation states that if neither RTLD_GLOBAL nor - RTLD_LOCAL are specified, then the default behavior is unspecified. - The default on linux is usually RTLD_LOCAL while macos will default - to RTLD_GLOBAL. - - (ADB - 2020/08/12, HDFFV-11127) - - - Fixed issues CVE-2018-13870 and CVE-2018-13869 - - When a buffer overflow occurred because a name length was corrupted - and became very large, h5dump crashed on memory access violation. - - A check for reading past the end of the buffer was added to multiple - locations to prevent the crashes and h5dump now simply fails with an - error message when this error condition occurs. - - (BMR - 2020/7/31, HDFFV-11120 and HDFFV-11121) - - - H5Sset_extent_none() sets the dataspace class to H5S_NO_CLASS which - causes asserts/errors when passed to other dataspace API calls. - - H5S_NO_CLASS is an internal class value that should not have been - exposed via a public API call. - - In debug builds of the library, this can cause asserts to trip. In - non-debug builds, it will produce normal library errors. - - The new library behavior is for H5Sset_extent_none() to convert - the dataspace into one of type H5S_NULL, which is better handled - by the library and easier for developers to reason about. - - (DER - 2020/07/27, HDFFV-11027) - - - Fixed the segmentation fault when reading attributes with multiple threads - - It was reported that the reading of attributes with variable length string - datatype will crash with segmentation fault particularly when the number - of threads is high (>16 threads). The problem was due to the file pointer - that was set in the variable length string datatype for the attribute. - That file pointer was already closed when the attribute was accessed. - - The problem was fixed by setting the file pointer to the current opened - file pointer when the attribute was accessed. Similar patch up was done - before when reading dataset with variable length string datatype. - - (VC - 2020/07/13, HDFFV-11080) - - - Fixed issue CVE-2018-17438 - - A division by zero was discovered in H5D__select_io() of H5Dselect.c. - https://security-tracker.debian.org/tracker/CVE-2018-17438 - - A check was added to protect against division by zero. When such - situation occurs again, the normal HDF5 error handling will be invoked, - instead of segmentation fault. - - (BMR, DER - 2020/07/09, HDFFV-10587) - - - Fixed CVE-2018-17435 - - The tool h52gif produced a segfault when the size of an attribute message - was corrupted and caused a buffer overflow. - - The problem was fixed by verifying the attribute message's size against the - buffer size before accessing the buffer. h52gif was also fixed to display - the failure instead of silently exiting after the segfault was eliminated. - - (BMR - 2020/6/19, HDFFV-10591) - - - Don't allocate an empty (0-dimensioned) chunked dataset's chunk - index, until the dataset's dimensions are increased. - - (QAK - 2020/05/07) - - Configuration - ------------- - - Stopped addition of szip header and include directory path for - incompatible libsz - - szlib.h is the same for both 32-bit and 64-bit szip, and the header file - and its path were added to the HDF5 binary even though the configure - check of a function in libsz later failed and szip compression was not - enabled. The header file and include path are now added only when the - libsz function passes the configure check. - - (LRK - 2020/08/17, HDFFV-10830) - - - Added -fsanitize=address autotools configure option for Clang compiler - - Clang sanitizer options were also added for Clang compilers with CMake. - - (LRK, 2020/08/05, HDFFV-10836) - - - Updated testh5cc.sh.in for functions versioned in HDF5 1.10. - - testh5cc.sh previously tested that the correct version of a function - versioned in HDF5 1.6 or 1.8 was compiled when one of - H5_NO_DEPRECATED_SYMBOLS or H5_USE_16_API_DEFAULT were defined. This - test was extended for additional testing with H5_USE_18_API_DEFAULT. - - (LRK, 2020/06/22, HDFFV-11000) - - - Fixed CMake include properties for Fortran libraries - - Corrected the library properties for Fortran to use the - correct path for the Fortran module files. - - (ADB - 2020/02/04, HDFFV-11012) - - Performance - ------------- - - None - - Java Library: - ---------------- - - None - - Fortran - -------- - - Corrected INTERFACE INTENT(IN) to INTENT(OUT) for buf_size in h5fget_file_image_f. - - (MSB - 2020/2/18, HDFFV-11029) - - - Fixed configure issue when building HDF5 with NAG Fortran 7.0. - - HDF5 now accounts for the addition of half-precision floating-point - in NAG 7.0 with a KIND=16. - - (MSB - 2020/02/28, HDFFV-11033) - - Tools - ----- - - The tools library was updated by standardizing the error stack process. - - General sequence is: - h5tools_setprogname(PROGRAMNAME); - h5tools_setstatus(EXIT_SUCCESS); - h5tools_init(); - ... process the command-line (check for error-stack enable) ... - h5tools_error_report(); - ... (do work) ... - h5diff_exit(ret); - - (ADB - 2020/07/20, HDFFV-11066) - - - h5diff fixed a command line parsing error. - - h5diff would ignore the argument to -d (delta) if it is smaller than DBL_EPSILON. - The macro H5_DBL_ABS_EQUAL was removed and a direct value comparison was used. - - (ADB - 2020/07/20, HDFFV-10897) - - - h5diff added a command line option to ignore attributes. - - h5diff would ignore all objects with a supplied path if the exclude-path argument is used. - Adding the exclude-attribute argument will only eclude attributes, with the supplied path, - from comparison. - - (ADB - 2020/07/20, HDFFV-5935) - - - h5diff added another level to the verbose argument to print filenames. - - Added verbose level 3 that is level 2 plus the filenames. The levels are: - 0 : Identical to '-v' or '--verbose' - 1 : All level 0 information plus one-line attribute status summary - 2 : All level 1 information plus extended attribute status report - 3 : All level 2 information plus file names - - (ADB - 2020/07/20, HDFFV-10005) - - - h5repack was fixed to repack the reference attributes properly. - The code line that checks if the update of reference inside a compound - datatype is misplaced outside the code block loop that carries out the - check. In consequence, the next attribute that is not the reference - type was repacked again as the reference type and caused the failure of - repacking. The fix is to move the corresponding code line to the correct - code block. - - (KY -2020/02/10, HDFFV-11014) - - High-Level APIs: - ------ - - The H5DSis_scale function was updated to return "not a dimension scale" (0) - instead of failing (-1), when CLASS or DIMENSION_SCALE attributes are - not written according to Dimension Scales Specification. - - (EIP - 2020/08/12, HDFFV-10436) - - Fortran High-Level APIs: - ------ - - None - - Documentation - ------------- - - None - - F90 APIs - -------- - - None - - C++ APIs - -------- - - None - - Testing - ------- - - Stopped java/test/junit.sh.in installing libs for testing under ${prefix} - - Lib files needed are now copied to a subdirectory in the java/test - directory, and on Macs the loader path for libhdf5.xxxs.so is changed - in the temporary copy of libhdf5_java.dylib. - - (LRK, 2020/7/2, HDFFV-11063) - - -Supported Platforms -=================== - - Linux 3.10.0-1127.10.1.el7 gcc (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) - #1 SMP ppc64 GNU/Linux g++ (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) - (echidna) GNU Fortran (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) - - Linux 2.6.32-754.31.1.el6 IBM XL C/C++ V13.1 - #1 SMP ppc64 GNU/Linux IBM XL Fortran V15.1 - (ostrich) - - Linux 3.10.0-327.18.2.el7 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (jelly/kituo/moohan) Version 4.8.5 20150623 (Red Hat 4.8.5-4) - Version 4.9.3, Version 5.3.0, Version 6.3.0, - Version 7.2.0, Version 8.3.0, Version 9.1.0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 17.0.0.098 Build 20160721 - MPICH 3.3 compiled with GCC 7.2.0 - OpenMPI 4.0.0 compiled with GCC 7.2.0 - - SunOS 5.11 11.4.5.12.5.0 Sun C 5.15 SunOS_sparc 2017/05/30 - 32- and 64-bit Studio 12.6 Fortran 95 8.8 SunOS_sparc 2017/05/30 - (hedgehog) Sun C++ 5.15 SunOS_sparc 2017/05/30 - - Windows 7 x64 Visual Studio 2015 w/ Intel C, Fortran 2018 (cmake) - - Windows 10 x64 Visual Studio 2015 w/ Intel Fortran 18 (cmake) - Visual Studio 2017 w/ Intel Fortran 19 (cmake) - Visual Studio 2019 w/ Intel Fortran 19 (cmake) - Visual Studio 2019 w/ MSMPI 10.1 (cmake) - - macOS Mojave 10.14.6 Apple LLVM version 10.0.1 (clang-1001.0.46.4) - 64-bit gfortran GNU Fortran (GCC) 6.3.0 - (swallow) Intel icc/icpc/ifort version 19.0.4.233 20190416 - -Tested Configuration Features Summary -===================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90/ F90 C++ zlib SZIP - parallel F2003 parallel -Solaris2.11 32-bit n y/y n y y y -Solaris2.11 64-bit n y/n n y y y -Windows 7 x64 y y/y y y y y -Windows 10 y y/y n y y y -Windows 10 x64 y y/y n y y y -MacOS Sierra 10.12.6 64-bit n y/y n y y y -MacOS High Sierra 10.13.6 64-bit n y/y n y y y -MacOS Mojave 10.14.6 64-bit n y/y n y y y -CentOS 7.2 Linux 3.10.0 x86_64 PGI n y/y n y y y -CentOS 7.2 Linux 3.10.0 x86_64 GNU y y/y y y y y -CentOS 7.2 Linux 3.10.0 x86_64 Intel n y/y n y y y -Linux 2.6.32-754.31.1.el6.ppc64 XL n y/y n y y y - - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -Solaris2.11 32-bit y y y y -Solaris2.11 64-bit y y y y -Windows 7 x64 y y y y -Windows 10 y y y y -Windows 10 x64 y y y y -MacOS Sierra 10.12.6 64-bit y n y y -MacOS High Sierra 10.13.6 64-bit y n y y -MacOS Mojave 10.14.6 64-bit y n y y -CentOS 7.2 Linux 3.10.0 x86_64 PGI y y y n -CentOS 7.2 Linux 3.10.0 x86_64 GNU y y y y -CentOS 7.2 Linux 3.10.0 x86_64 Intel y y y n -Linux 2.6.32-754.31.1.el6.ppc64 XL y y y n - -Compiler versions for each platform are listed in the preceding -"Supported Platforms" table. - - -More Tested Platforms -===================== -The following platforms are not supported but have been tested for this release. - - Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (platypus) Version 4.4.7 20120313 - Version 4.9.3, 5.3.0, 6.2.0 - PGI C, Fortran, C++ for 64-bit target on - x86-64; - Version 19.10-0 - MPICH 3.1.4 compiled with GCC 4.9.3 - - Linux 2.6.32-754.31.1.el6 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-18) - #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-18) - (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-18) - - Linux 3.10.0-327.18.2.el7 GNU C (gcc) and C++ (g++) compilers - #1 SMP x86_64 GNU/Linux Version 4.8.5 20150623 (Red Hat 4.8.5-4) - (jelly) with NAG Fortran Compiler Release 6.1(Tozai) - GCC Version 7.1.0 - OpenMPI 2.1.6-GCC-7.2.0-2.29, - 3.1.3-GCC-7.2.0-2.29 - Intel(R) C (icc) and C++ (icpc) compilers - Version 17.0.0.098 Build 20160721 - with NAG Fortran Compiler Release 6.1(Tozai) - - Linux 3.10.0-327.10.1.el7 MPICH 3.1.4 compiled with GCC 4.9.3 - #1 SMP x86_64 GNU/Linux - (moohan) - - Linux-3.10.0-1127.0.0.1chaos openmpi-4.0.0 - #1 SMP x86_64 GNU/Linux clang/3.9.0, 8.0.1 - (quartz) gcc/7.3.0, 8.1.0 - intel/16.0.4 - - Linux-4.14.0-115.10.1.1 spectrum-mpi/rolling-release - #1 SMP ppc64le GNU/Linux clang/coral-2018.08.08 - (lassen) gcc/7.3.1 - xl/2019.02.07 - - Linux-4.12.14-150.52-default cray-mpich/7.7.10 - #1 SMP x86_64 GNU/Linux gcc/7.3.0, 8.2.0 - (cori) intel/19.0.3 - - Linux-4.4.180-94.107-default cray-mpich/7.7.6 - # 1SMP x86_64 GNU/Linux gcc/7.2.0, 8.2.0 - (mutrino) intel/17.0.4, 18.0.2, 19.0.4 - - Fedora 32 5.7.15-200.fc32.x86_64 Clang version 10.0.0 (Fedora 10.0.0-2.fc32) - #1 SMP x86_64 GNU/Linux GNU gcc (GCC) 10.2.1 20200723 (Red Hat 10.2.1-1) - GNU Fortran (GCC) 10.2.1 20200723 (Red Hat 10.2.1) - (cmake and autotools) - - Mac OS X El Capitan 10.11.6 Apple clang version 7.3.0 from Xcode 7.3 - 64-bit gfortran GNU Fortran (GCC) 5.2.0 - (osx1011test) Intel icc/icpc/ifort version 16.0.2 - - macOS Sierra 10.12.6 Apple LLVM version 9.0.0 (clang-900.39.2) - 64-bit gfortran GNU Fortran (GCC) 7.4.0 - (kite) Intel icc/icpc/ifort version 17.0.2 - - macOS High Sierra 10.13.6 Apple LLVM version 10.0.0 (clang-1000.10.44.4) - 64-bit gfortran GNU Fortran (GCC) 6.3.0 - (bear) Intel icc/icpc/ifort version 19.0.4.233 20190416 - - SunOS 5.11 11.3 Sun C 5.15 SunOS_sparc - 32- and 64-bit Sun Fortran 95 8.8 SunOS_sparc - (emu) Sun C++ 5.15 SunOS_sparc - - -Known Problems -============== - CMake files do not behave correctly with paths containing spaces. - Do not use spaces in paths because the required escaping for handling spaces - results in very complex and fragile build files. - ADB - 2019/05/07 - - At present, metadata cache images may not be generated by parallel - applications. Parallel applications can read files with metadata cache - images, but since this is a collective operation, a deadlock is possible - if one or more processes do not participate. - - Two tests fail attempting collective writes with OpenMPI 3.0.0/GCC-7.2.0-2.29: - testphdf5 (ecdsetw, selnone, cchunk1, cchunk3, cchunk4, and actualio) - t_shapesame (sscontig2) - - CPP ptable test fails on both VS2017 and VS2019 with Intel compiler, JIRA - issue: HDFFV-10628. This test will pass with VS2015 with Intel compiler. - - Known problems in previous releases can be found in the HISTORY*.txt files - in the HDF5 source. Please report any new problems found to - help@hdfgroup.org. - - -CMake vs. Autotools installations -================================= -While both build systems produce similar results, there are differences. -Each system produces the same set of folders on linux (only CMake works -on standard Windows); bin, include, lib and share. Autotools places the -COPYING and RELEASE.txt file in the root folder, CMake places them in -the share folder. - -The bin folder contains the tools and the build scripts. Additionally, CMake -creates dynamic versions of the tools with the suffix "-shared". Autotools -installs one set of tools depending on the "--enable-shared" configuration -option. - build scripts - ------------- - Autotools: h5c++, h5cc, h5fc - CMake: h5c++, h5cc, h5hlc++, h5hlcc - -The include folder holds the header files and the fortran mod files. CMake -the share folder. - -The bin folder contains the tools and the build scripts. Additionally, CMake -places the fortran mod files into separate shared and static subfolders, -while Autotools places one set of mod files into the include folder. Because -CMake produces a tools library, the header files for tools will appear in -the include folder. - -The lib folder contains the library files, and CMake adds the pkgconfig -subfolder with the hdf5*.pc files used by the bin/build scripts created by -the CMake build. CMake separates the C interface code from the fortran code by -creating C-stub libraries for each Fortran library. In addition, only CMake -installs the tools library. The names of the szip libraries are different -between the build systems. - -The share folder will have the most differences because CMake builds include -a number of CMake specific files for support of CMake's find_package and support -for the HDF5 Examples CMake project. - - - -%%%%1.10.6%%%% - -HDF5 version 1.10.6 released on 2019-12-23 -================================================================================ - - -INTRODUCTION - -This document describes the differences between this release and the previous -HDF5 release. It contains information on the platforms tested and known -problems in this release. For more details check the HISTORY*.txt files in the -HDF5 source. - -Note that documentation in the links below will be updated at the time of each -final release. - -Links to HDF5 documentation can be found on The HDF5 web page: - - https://portal.hdfgroup.org/display/HDF5/HDF5 - -The official HDF5 releases can be obtained from: - - https://www.hdfgroup.org/downloads/hdf5/ - -Changes from Release to Release and New Features in the HDF5-1.10.x release series -can be found at: - - https://portal.hdfgroup.org/display/HDF5/HDF5+Application+Developer%27s+Guide - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS - -- New Features -- Support for new platforms and languages -- Bug Fixes since HDF5-1.10.5 -- Supported Platforms -- Tested Configuration Features Summary -- More Tested Platforms -- Known Problems -- CMake vs. Autotools installations - - -New Features -============ - - Configuration: - ------------- - - Update CMake for VS2019 support - - CMake added support for VS2019 in version 3.15. Changes to the CMake - generator setting required changes to scripts. Also updated version - references in CMake files as necessary. - - (ADB - 2019/11/18, HDFFV-10962) - - - Update CMake options to match new autotools options - - Add configure options (autotools - CMake): - enable-asserts HDF5_ENABLE_ASSERTS - enable-symbols HDF5_ENABLE_SYMBOLS - enable-profiling HDF5_ENABLE_PROFILING - enable-optimization HDF5_ENABLE_OPTIMIZATION - In addition NDEBUG is no longer forced defined and relies on the CMake - process. - - (ADB - 2019/10/07, HDFFV-100901, HDFFV-10637, TRILAB-97) - - - Update CMake tests to use FIXTURES - - CMake test fixtures allow setup/cleanup tests and other dependency - requirements as properties for tests. This is more flexible for - modern CMake code. - - (ADB - 2019/07/23, HDFFV-10529) - - - Windows PDB files are always installed - - There are build configuration or flag settings for Windows that may not - generate PDB files. If those files are not generated then the install - utility will fail because those PDB files are not found. An optional - variable, DISABLE_PDB_FILES, was added to not install PDB files. - - (ADB - 2019/07/17, HDFFV-10424) - - - Add mingw CMake support with a toolchain file - - There have been a number of mingw issues that have been linked under - HDFFV-10845. It has been decided to implement the CMake cross-compiling - technique of toolchain files. We will use a linux platform with the mingw - compiler stack for testing. Only the C language is fully supported, and - the error tests are skipped. The C++ language works for static but shared - builds have a shared library issue with the mingw Standard Exception Handling - library, which is not available on Windows. Fortran has a common cross-compile - problem with the fortran configure tests. - - (ADB - 2019/07/12, HDFFV-10845, HDFFV-10595) - - - Windows PDB files are installed incorrectly - - For static builds, the PDB files for windows should be installed next - to the static libraries in the lib folder. Also the debug versions of - libraries and PDB files are now correctly built using the default - CMAKE_DEBUG_POSTFIX setting. - - (ADB - 2019/07/09, HDFFV-10581) - - - Add option to build only shared libs - - A request was made to prevent building static libraries and only build - shared. A new option was added to CMake, ONLY_SHARED_LIBS, which will - skip building static libraries. Certain utility functions will build with - static libs but are not published. Tests are adjusted to use the correct - libraries depending on SHARED/STATIC settings. - - (ADB - 2019/06/12, HDFFV-10805) - - - Add options to enable or disable building tools and tests - - Configure options --enable-tests and --enable-tools were added for - autotools configure. These options are enabled by default, and can be - disabled with either --disable-tests (or tools) or --enable-tests=no - (or --enable-tools=no). Build time is reduced ~20% when tools are - disabled, 35% when tests are disabled, 45% when both are disabled. - Re-enabling them after the initial build requires running configure - again with the option(s) enabled. - - (LRK - 2019/06/12, HDFFV-9976) - - - Change tools tests to search the error stack - - There are some use cases which can cause the error stack of tools to be - different then the expected output. These tests now use grepTest.cmake; - this was changed to allow the error file to be searched for an expected string. - - (ADB - 2019/04/15, HDFFV-10741) - - - Library: - -------- - - Added S3 and HDFS Virtual File Drivers (VFDs) to HDF5 - - These new VFDs have been introduced in HDF5-1.10.6. Instructions to - enable them when configuring HDF5 on Linux and Mac may be found at - https://portal.hdfgroup.org/display/HDF5/Virtual+File+Drivers+-+S3+and+HDFS. - - Installing on Windows requires CMake 3.13 and the following additional setup. - Install openssl library (with dev files); - from "Shining Light Productions". msi package preferred. - - PATH should have been updated with the installation dir. - set ENV variable OPENSSL_ROOT_DIR to the installation dir. - set ENV variable OPENSSL_CONF to the cfg file, likely %OPENSSL_ROOT_DIR%\bin\openssl.cfg - Install libcurl library (with dev files); - download the latest released version using git: https://github.com/curl/curl.git - - Open a Visual Studio Command prompt - change to the libcurl root folder - run the "buildconf.bat" batch file - change to the winbuild directory - nmake /f Makefile.vc mode=dll MACHINE=x64 - copy libcurl-vc-x64-release-dll-ipv6-sspi-winssl dir to C:\curl (installation dir) - set ENV variable CURL_ROOT to C:\curl (installation dir) - update PATH ENV variable to %CURL_ROOT%\bin (installation bin dir). - the aws credentials file should be in %USERPROFILE%\.aws folder - set the ENV variable "HDF5_ROS3_TEST_BUCKET_URL=https://s3.us-east-2.amazonaws.com/hdf5ros3" - - (ADB - 2019/09/12, HDFFV-10854) - - C++ Library: - ------------ - - Added new wrappers for H5Pset/get_create_intermediate_group() - LinkCreatPropList::setCreateIntermediateGroup() - LinkCreatPropList::getCreateIntermediateGroup() - - (BMR - 2019/04/22, HDFFV-10622) - - - Java Library: - ---------------- - - Fixed a failure in JUnit-TestH5P on 32-bit architectures - - (JTH - 2019/04/30) - - -Support for new platforms, languages and compilers. -======================================= - - CMake added support for VS2019 in version 3.15. Updated scripts. - - - macOS 10.13.6 Darwin 17.7.0 with Apple clang LLVM version 10.0.0 - - - macOS 10.14.6 Darwin 18.7.0 with Apple clang LLVM version 10.0.1 - - -Bug Fixes since HDF5-1.10.5 release -================================== - - Library - ------- - - Improved performance when creating a large number of small datasets by - retrieving default property values from the API context instead of doing - skip list searches. More work is required to achieve parity with HDF5 1.8. - - (CJH - 2019/12/10, HDFFV-10658) - - - Fixed user-created data access properties not existing in the property list - returned by H5Dget_access_plist. Thanks to Steven Varga for submitting a - reproducer and a patch. - - (CJH - 2019/12/9, HDFFV-10934) - - - Inappropriate linking with deprecated MPI C++ libraries - - HDF5 does not define *_SKIP_MPICXX in the public headers, so applications - can inadvertently wind up linking to the deprecated MPI C++ wrappers. - - MPICH_SKIP_MPICXX and OMPI_SKIP_MPICXX have both been defined in H5public.h - so this should no longer be an issue. HDF5 makes no use of the deprecated - MPI C++ wrappers. - - (DER - 2019/09/17, HDFFV-10893) - - - fcntl(2)-based file locking incorrectly passed the lock argument struct - instead of a pointer to the struct, causing errors on systems where - flock(2) is not available. - - File locking is used when files are opened to enforce SWMR semantics. A - lock operation takes place on all file opens unless the - HDF5_USE_FILE_LOCKING environment variable is set to the string "FALSE". - flock(2) is preferentially used, with fcntl(2) locks as a backup if - flock(2) is unavailable on a system (if neither is available, the lock - operation fails). On these systems, the file lock will often fail, which - causes HDF5 to not open the file and report an error. - - This bug only affects POSIX systems. Win32 builds on Windows use a no-op - locking call which always succeeds. Systems which exhibit this bug will - have H5_HAVE_FCNTL defined but not H5_HAVE_FLOCK in the configure output. - - This bug affects HDF5 1.10.0 through 1.10.5. - - fcntl(2)-based file locking now correctly passes the struct pointer. - - (DER - 2019/08/27, HDFFV-10892) - - - Fixed a bug caused by a bad tag value when condensing object header - messages - - There was an assertion failure when moving messages from running a - user test program with library release HDF5 1.10.4. It was because - the tag value (object header's address) was not set up when entering - the library routine H5O__chunk_update_idx(), which eventually - verifies the metadata tag value when protecting the object header. - - The problem was fixed by replacing FUNC_ENTER_PACKAGE in H5O__chunk_update_idx() - with FUNC_ENTER_PACKAGE_TAG(oh->cache_info.addr) to set up the metadata tag. - - (VC - 2019/08/23, HDFFV-10873) - - - Fixed the test failure from test_metadata_read_retry_info() in - test/swmr.c - - The test failure is due to an incorrect number of bins returned for - retry info (info.nbins). The # of bins expected for 101 read attempts - is 3 instead of 2. The routine H5F_set_retries() in src/H5Fint.c - calculates the # of bins by first obtaining the log10 value for - (read attempts - 1). For PGI/19, the log10 value for 100 read attempts - is 1.9999999999999998 instead of 2.00000. When casting the log10 value - to unsigned later on, the decimal part is chopped off causing the test - failure. - - This was fixed by obtaining the rounded integer value (HDceil) for the - log10 value of read attempts first before casting the result to unsigned. - - (VC - 2019/8/14, HDFFV-10813) - - - Fixed an issue when creating a file with non-default file space info - together with library high bound setting to H5F_LIBVER_V18. - - When setting non-default file space info in fcpl via - H5Pset_file_space_strategy() and then creating a file with both high and - low library bounds set to H5F_LIBVER_V18 in fapl, the library succeeds in - creating the file. File creation should fail because the feature of - setting non-default file space info does not exist in library release 1.8 - or earlier. - - This was fixed by setting and checking the proper version in the file - space info message based on the library low and high bounds when creating - and opening the HDF5 file. - - (VC - 2019/6/25, HDFFV-10808) - - - Fixed an issue where copying a version 1.8 dataset between files using - H5Ocopy fails due to an incompatible fill version - - When using the HDF5 1.10.x H5Ocopy() API call to copy a version 1.8 - dataset to a file created with both high and low library bounds set to - H5F_LIBVER_V18, the H5Ocopy() call will fail with the error stack indicating - that the fill value version is out of bounds. - - This was fixed by changing the fill value message version to H5O_FILL_VERSION_3 - (from H5O_FILL_VERSION_2) for H5F_LIBVER_V18. - - (VC - 2019/6/14, HDFFV-10800) - - - Fixed a bug that would cause an error or cause fill values to be - incorrectly read from a chunked dataset using the "single chunk" index if - the data was held in cache and there was no data on disk. - - (NAF - 2019/03/06) - - - Fixed a bug that could cause an error or cause fill values to be - incorrectly read from a dataset that was written to using H5Dwrite_chunk - if the dataset was not closed after writing. - - (NAF - 2019/03/06, HDFFV-10716) - - - Fixed memory leak in scale offset filter - - In a special case where the MinBits is the same as the number of bits in - the datatype's precision, the filter's data buffer was not freed, causing - the memory usage to grow. In general the buffer was freed correctly. The - Minbits are the minimal number of bits to store the data values. Please - see the reference manual for H5Pset_scaleoffset for the details. - - (RL - 2019/3/4, HDFFV-10705) - - - Configuration - ------------- - - Correct option for default API version - - CMake options for default API version are not mutually exclusive. - Change the multiple BOOL options to a single STRING option with the - strings; v16, v18, v110. - - (ADB - 2019/08/12, HDFFV-10879) - - Tools - ----- - - h5repack was fixed to repack datasets with external storage - to other types of storage. - - New test added to repack files and verify the correct data using h5diff. - - (JS - 2019/09/25, HDFFV-10408) - (ADB - 2019/10/02, HDFFV-10918) - - -Supported Platforms -=================== - - Linux 2.6.32-696.20.1.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-23) - #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-23) - (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-23) - IBM XL C/C++ V13.1 - IBM XL Fortran V15.1 - - Linux 3.10.0-327.10.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (jelly/kituo/moohan) Version 4.8.5 20150623 (Red Hat 4.8.5-4) - Version 4.9.3, Version 5.2.0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 17.0.0.098 Build 20160721 - MPICH 3.1.4 compiled with GCC 4.9.3 - - SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc - (emu) Sun Fortran 95 8.6 SunOS_sparc - Sun C++ 5.12 SunOS_sparc - - Windows 7 Visual Studio 2015 w/ Intel Fortran 18 (cmake) - - Windows 7 x64 Visual Studio 2015 w/ Intel C, Fortran 2018 (cmake) - Visual Studio 2015 w/ MSMPI 8 (cmake) - - Windows 10 Visual Studio 2015 w/ Intel Fortran 18 (cmake) - - Windows 10 x64 Visual Studio 2015 w/ Intel Fortran 18 (cmake) - Visual Studio 2017 w/ Intel Fortran 19 (cmake) - Visual Studio 2019 w/ Intel Fortran 19 (cmake) - - macOS 10.13.6, Darwin, Apple clang LLVM version 10.0.0 - 17.7.0, x86_64 gfortran GNU Fortran (GCC) 6.3.0 - (bear) Intel icc/icpc/ifort version 19.0.4 - - macOS 10.14.6, Darwin Apple clang LLVM version 10.0.1 - 18.7.0, x86_64 gfortran GNU Fortran (GCC) 6.3.0 - (bobcat) Intel icc/icpc/ifort version 19.0.4 - - -Tested Configuration Features Summary -===================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90/ F90 C++ zlib SZIP - parallel F2003 parallel -Solaris2.11 32-bit n y/y n y y y -Solaris2.11 64-bit n y/n n y y y -Windows 7 y y/y n y y y -Windows 7 x64 y y/y y y y y -Windows 7 Cygwin n y/n n y y y -Windows 7 x64 Cygwin n y/n n y y y -Windows 10 y y/y n y y y -Windows 10 x64 y y/y n y y y -Mac OS X Yosemite 10.10.5 64-bit n y/y n y y y -Mac OS X El Capitan 10.11.6 64-bit n y/y n y y y -MacOS High Sierra 10.13.6 64-bit n y/y n y y y -CentOS 7.2 Linux 3.10.0 x86_64 PGI n y/y n y y y -CentOS 7.2 Linux 3.10.0 x86_64 GNU y y/y y y y y -CentOS 7.2 Linux 3.10.0 x86_64 Intel n y/y n y y y -Linux 2.6.32-573.18.1.el6.ppc64 n y/y n y y y - - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -Solaris2.11 32-bit y y y y -Solaris2.11 64-bit y y y y -Windows 7 y y y y -Windows 7 x64 y y y y -Windows 7 Cygwin n n n y -Windows 7 x64 Cygwin n n n y -Windows 10 y y y y -Windows 10 x64 y y y y -Mac OS X Yosemite 10.10.5 64-bit y y y y -Mac OS X El Capitan 10.11.6 64-bit y y y y -MacOS High Sierra 10.13.6 64-bit y y y y -CentOS 7.2 Linux 3.10.0 x86_64 PGI y y y n -CentOS 7.2 Linux 3.10.0 x86_64 GNU y y y y -CentOS 7.2 Linux 3.10.0 x86_64 Intel y y y n -Linux 2.6.32-573.18.1.el6.ppc64 y y y n - -Compiler versions for each platform are listed in the preceding -"Supported Platforms" table. - - -More Tested Platforms -===================== -The following configurations are not supported but have been tested for this release. - - Linux 2.6.32-754.11.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (mayll/platypus) Version 4.4.7 20120313 - Version 4.9.3, 5.3.0, 6.2.0 - PGI C, Fortran, C++ for 64-bit target on - x86-64; - Version 17.10-0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 17.0.4.196 Build 20170411 - MPICH 3.1.4 compiled with GCC 4.9.3 - - Linux 3.10.0-327.18.2.el7 GNU C (gcc) and C++ (g++) compilers - #1 SMP x86_64 GNU/Linux Version 4.8.5 20150623 (Red Hat 4.8.5-4) - (jelly) with NAG Fortran Compiler Release 6.2(Chiyoda) - GCC Version 7.1.0 - MPICH 3.2-GCC-4.9.3 - MPICH 3.2.1-GCC-7.2.0-2.29 - OpenMPI 2.1.5-GCC-7.2.0-2.29 - Intel(R) C (icc) and C++ (icpc) compilers - Version 17.0.0.098 Build 20160721 - with NAG Fortran Compiler Release 6.2(Chiyoda) - - Linux 3.10.0-327.10.1.el7 MPICH 3.2 compiled with GCC 5.3.0 - #1 SMP x86_64 GNU/Linux - (moohan) - - Linux 2.6.32-573.18.1.el6.ppc64 MPICH mpich 3.1.4 compiled with - #1 SMP ppc64 GNU/Linux IBM XL C/C++ for Linux, V13.1 - (ostrich) and IBM XL Fortran for Linux, V15.1 - - Fedora30 5.3.11-200.fc30.x86_64 - #1 SMP x86_64 GNU/Linux GNU gcc (GCC) 9.2.1 20190827 (Red Hat 9.2.1 20190827) - GNU Fortran (GCC) 9.2.1 20190827 (Red Hat 9.2.1 20190827) - (cmake and autotools) - - Mac OS X 10.11.6, Darwin, Apple clang version 7.3.0 from Xcode 7.3 - 15.6.0, x86-64 gfortran GNU Fortran (GCC) 5.2.0 - (osx1011test) Intel icc/icpc/ifort version 16.0.2 - - macOS 10.12.6, Darwin, Apple clang LLVM version 8.1.0 from Xcode 8.3 - 16.6.0, x86_64 gfortran GNU Fortran (GCC) 7.1.0 - (kite) Intel icc/icpc/ifort version 17.0.2 - - Windows 7 x64 Visual Studio 2008 - - -Known Problems -============== - CMake files do not behave correctly with paths containing spaces. - Do not use spaces in paths because the required escaping for handling spaces - results in very complex and fragile build files. - ADB - 2019/05/07 - - At present, metadata cache images may not be generated by parallel - applications. Parallel applications can read files with metadata cache - images, but since this is a collective operation, a deadlock is possible - if one or more processes do not participate. - - Three tests fail with OpenMPI 3.0.0/GCC-7.2.0-2.29: - testphdf5 (ecdsetw, selnone, cchunk1, cchunk3, cchunk4, and actualio) - t_shapesame (sscontig2) - t_pflush1/fails on exit - The first two tests fail attempting collective writes. - - Parallel builds using OpenMPI 3.03 or later and romio fail several tests - with collective writes or compression that will not fail when ompio is used - instead of romio. This can be done by adding "--mca io ompio" to the mpirun - command. For example, in autotools builds RUNPARALLEL can be set to - "mpirun --mca io ompio -n 6" provided ompio is installed. - - CPP ptable test fails on VS2017 with Intel compiler, JIRA issue: HDFFV-10628. - This test will pass with VS2015 with Intel compiler. - - Older MPI libraries such as OpenMPI 2.0.1 and MPICH 2.1.5 were tested - while attempting to resolve the Jira issue: HDFFV-10540. - The known problems of reading or writing > 2GBs when using MPI-2 was - partially resolved with the MPICH library. The proposed support recognizes - IO operations > 2GB and if the datatype is not a derived type, the library - breaks the IO into chunks which can be input or output with the existing - MPI 2 limitations, i.e. size reporting and function API size/count - arguments are restricted to be 32 bit integers. For derived types larger - than 2GB, MPICH 2.1.5 fails while attempting to read or write data. - OpenMPI in contrast, implements MPI-3 APIs even in the older releases - and thus does not suffer from the 32 bit size limitation described here. - OpenMPI releases prior to v3.1.3 appear to have other datatype issues however, - e.g. within a single parallel test (testphdf5) the subtests (cdsetr, eidsetr) - report data verification errors before eventually aborting. - The most recent versions of OpenMPI (v3.1.3 or newer) have evidently - resolved these issues and parallel HDF5 testing does not currently report - errors though occasional hangs have been observed. - - Known problems in previous releases can be found in the HISTORY*.txt files - in the HDF5 source. Please report any new problems found to - help@hdfgroup.org. - - -CMake vs. Autotools installations -================================= -While both build systems produce similar results, there are differences. -Each system produces the same set of folders on linux (only CMake works -on standard Windows); bin, include, lib and share. Autotools places the -COPYING and RELEASE.txt file in the root folder, CMake places them in -the share folder. - -The bin folder contains the tools and the build scripts. Additionally, CMake -creates dynamic versions of the tools with the suffix "-shared". Autotools -installs one set of tools depending on the "--enable-shared" configuration -option. - build scripts - ------------- - Autotools: h5c++, h5cc, h5fc - CMake: h5c++, h5cc, h5hlc++, h5hlcc - -The include folder holds the header files and the fortran mod files. CMake -places the fortran mod files into separate shared and static subfolders, -while Autotools places one set of mod files into the include folder. Because -CMake produces a tools library, the header files for tools will appear in -the include folder. - -The lib folder contains the library files, and CMake adds the pkgconfig -subfolder with the hdf5*.pc files used by the bin/build scripts created by -the CMake build. CMake separates the C interface code from the fortran code by -creating C-stub libraries for each Fortran library. In addition, only CMake -installs the tools library. The names of the szip libraries are different -between the build systems. - -The share folder will have the most differences because CMake builds include -a number of CMake specific files for support of CMake's find_package and support -for the HDF5 Examples CMake project. - -%%%%1.10.5%%%% - -HDF5 version 1.10.5 released on 2019-02-25 -================================================================================ - - -INTRODUCTION - -This document describes the differences between this release and the previous -HDF5 release. It contains information on the platforms tested and known -problems in this release. For more details check the HISTORY*.txt files in the -HDF5 source. - -Note that documentation in the links below will be updated at the time of each -final release. - -Links to HDF5 documentation can be found on The HDF5 web page: - - https://portal.hdfgroup.org/display/HDF5/HDF5 - -The official HDF5 releases can be obtained from: - - https://www.hdfgroup.org/downloads/hdf5/ - -Changes from Release to Release and New Features in the HDF5-1.10.x release series -can be found at: - - https://portal.hdfgroup.org/display/HDF5/HDF5+Application+Developer%27s+Guide - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS - -- New Features -- Support for new platforms and languages -- Bug Fixes since HDF5-1.10.4 -- Supported Platforms -- Tested Configuration Features Summary -- More Tested Platforms -- Known Problems -- CMake vs. Autotools installations - - -New Features -============ - - Configuration: - ------------- - - Cross compile on mutrino and other Cray XC40 systems. - - Added support for CMake options to use CrayLinuxEnvironment, craype-mic-knl - module for building with craype-haswell module for configuration, and - batch scripts in bin/batch for use with sbatch or bsub to run tests in - batch jobs on compute nodes. An instruction file README_HPC describing - the use of these options was added in release_docs. - - (LRK - 2019/02/18, TRILABS-34) - - - Rework CMake command files to fix MPI testing. - - Added setup fixture to remove any test generated files and added DEPENDS - to test properties to execute tests in order expected. - - (ADB - 2019/02/14, TRILABS-111) - - - Disable SZIP or ZLIB options if TGZ files are not available. - - Changed the TGZ option for SZip and ZLib to disable the options - if the source tar.gz files are not found. - - (ADB - 2019/02/05, HDFFV-10697) - - - Added a new option to enable/disable using pread/pwrite instead of - read/write in the sec2, log, and core VFDs. - - This option is enabled by default when pread/pwrite are detected. - - Autotools: --enable-preadwrite - CMake: HDF5_ENABLE_PREADWRITE - - (DER - 2019/02/03, HDFFV-10696) - - - Rework CMake versioning for OSX platforms. - - Changed the current_version and compatibility_version flags from optional - with HDF5_BUILD_WITH_INSTALL_NAME to always setting the flags for OSX. - - (ADB - 2019/01/22, HDFFV-10685) - - - Rework CMake command files to eliminate developer CMP005 warning - - Use variables without quotes in if () statements. - - (ADB - 2019/01/18, TILABS-105) - - - Rework CMake configure files to eliminate developer CMP0075 warning - - Renamed varname to HDF5_REQUIRED_LIBRARIES as the contents were not - required for configuration. Also moved check includes calls to top of - files. - - (ADB - 2019/01/03, HDFFV-10546) - - - Keep stderr and stdout separate in tests - - Changed test handling of output capture. Tests now keep the stderr - output separate from the stdout output. It is up to the test to decide - which output to check against a reference. Also added the option - to grep for a string in either output. - - (ADB - 2018/12/12, HDFFV-10632) - - - Incorrectly installed private header files were removed from - CMake installs. - - The CMake build files incorrectly flagged the following header files - as public and installed them. They are private and will no longer be - installed. - - HDF5 library private package files (H5Xpkg.h) - H5Edefin.h - H5Einit.h - H5Eterm.h - H5LTparse.h - h5diff.h - h5tools_dump.h - h5tools.h - h5tools_ref.h - h5tools_str.h - h5tools_utils.h - h5trav.h - - (DER - 2018/10/26, HDFFV-10614, 10609) - - - Autotools installs now install H5FDwindows.h - - This is simply to align the installed header files between the - autotools and CMake. H5FDwindows.h has no functionality on - non-Windows systems. - - (DER - 2018/10/26, HDFFV-10614) - - - Library: - -------- - - The sec2, log, and core VFDs can now use pread/pwrite instead of - read/write. - - pread and pwrite do not change the file offset, a feature that was - requested by a user working with a multi-threaded application. - - The option to configure this feature is described above. - - (DER - 2019/02/03, HDFFV-10696) - - - Add ability to minimize dataset object headers. - - Creation of many, very small datasets resulted in extensive file bloat - due to extra space in the dataset object headers -- this space is - allocated by default to allow for the insertion of a small number of - attributes within the object header and not require a continuation - block, an unnecessary provision in the target use case. - - Inform the library to expect no attributes on created datasets, and to - allocate the least space possible for the object headers. - NOTE: A continuation block is created if attributes are added to a - 'minimized' dataset, which can reduce performance. - NOTE: Some extra space is allocated for attributes essential to the - correct behavior of the object header (store creation times, e.g.). This - does not violate the design principle, as the space is calculated and - allocated as needed at the time of dataset object header creation -- - unused space is not generated. - New API calls: - H5Fget_dset_no_attrs_hint - H5Fset_dset_no_attrs_hint - H5Pget_dset_no_attrs_hint - H5Pset_dset_no_attrs_hint - - (JOS - 2019/01/04, TRILAB-45) - - - Added new chunk query functions - - The following public functions were added to discover information about - the chunks in an HDF5 file. - herr_t H5Dget_num_chunks(dset_id, fspace_id, *nchunks) - herr_t H5Dget_chunk_info_by_coord(dset_id, *coord, *filter_mask, *addr, *size) - herr_t H5Dget_chunk_info(dset_id, fspace_id, index, *coord, *filter_mask, *addr, *size) - - (BMR - 2018/11/07, HDFFV-10615) - - - Several empty public header files where removed from the distribution - - The following files were empty placeholders. They are for internal - packages that are unlikely to ever have public functionality and have - thus been removed. - - H5Bpublic.h - H5B2public.h - H5FSpublic.h - H5HFpublic.h - H5HGpublic.h - H5HLpublic.h - - They were only installed in CMake builds. - - (DER - 2018/10/26, HDFFV-10614) - - - Parallel Library: - ----------------- - - Changed the default behavior in parallel when reading the same dataset in its entirety - (i.e. H5S_ALL dataset selection) which is being read by all the processes collectively. - The dataset must be contiguous, less than 2GB, and of an atomic datatype. - The new behavior is the HDF5 library will use an MPI_Bcast to pass the data read from - the disk by the root process to the remain processes in the MPI communicator associated - with the HDF5 file. - - (MSB - 2019/01/02, HDFFV-10652) - - - All MPI-1 API calls have been replaced with MPI-2 equivalents. - - This was done to better support OpenMPI, as default builds no longer - include MPI-1 support (as of OpenMPI 4.0). - - (DER - 2018/12/30, HDFFV-10566) - - Fortran Library: - ---------------- - - Added wrappers for dataset object header minimization calls. - (see the note for TRILAB-45, above) - - New API calls: - - h5fget_dset_no_attrs_hint_f - h5fset_dset_no_attrs_hint_f - h5pget_dset_no_attrs_hint_f - h5pset_dset_no_attrs_hint_f - - (DER - 2019/01/09, TRILAB-45) - - - Added new Fortran derived type, c_h5o_info_t, which is interoperable with - C's h5o_info_t. This is needed for callback functions which - pass C's h5o_info_t data type definition. - - (MSB, 2019/01/08, HDFFV-10443) - - - Added new Fortran API, H5gmtime, which converts (C) 'time_t' structure - to Fortran DATE AND TIME storage format. - - (MSB, 2019/01/08, HDFFV-10443) - - - Added new Fortran 'fields' optional parameter to: h5ovisit_f, h5oget_info_by_name_f, - h5oget_info, h5oget_info_by_idx and h5ovisit_by_name_f. - - (MSB, 2019/01/08, HDFFV-10443) - - C++ Library: - ------------ - - Added new function to the C++ interface - - Added wrapper for H5Ovisit2: - H5Object::visit() - - (BMR - 2019/02/14, HDFFV-10532) - - - Java Library: - ---------------- - - Rewrote the JNI error handling to be much cleaner - - (JTH - 2019/02/12) - - - Add new functions to java interface - - Added wrappers for: - H5Fset_libver_bounds - H5Fget_dset_no_attrs_hint/H5Fset_dset_no_attrs_hint - H5Pget_dset_no_attrs_hint/H5Pset_dset_no_attrs_hint - - (ADB - 2019/01/07, HDFFV-10664) - - - Fix java unit tests when Time is a natural number - - Time substitution in java/test/junit.sh.in doesn't - handle the case when Time is a natural number. Fixed - the regular expression. - - (ADB - 2019/01/07, HDFFV-10674) - - - Duplicate the data read/write functions of Datasets for Attributes. - - Region references could not be displayed for attributes as they could - for datasets. Datasets had overloaded read and write functions for different - datatypes that were not available for attributes. After adding similar - functions, attribute region references work normally. - - (ADB - 2018/12/12, HDFVIEW-4) - - - Tools: - ------ - - The h5repart -family-to-sec2 argument was changed to -family-to-single - - In order to better support other single-file VFDs which could work with - h5repart, the -family-to-sec2 argument was renamed to -family-to-single. - This is just a name change and the functionality of the argument has not - changed. - - The -family-to-sec2 argument has been kept for backwards-compatibility. - This argument should be considered deprecated. - - (DER - 2018/11/14, HDFFV-10633) - - -Bug Fixes since HDF5-1.10.4 release -================================== - - Library - ------- - - Fix hangs with collective metadata reads during chunked dataset I/O - - In the parallel library, it was discovered that when a particular - sequence of operations following a pattern of: - - "write to chunked dataset" -> "flush file" -> "read from dataset" - - occurred with collective metadata reads enabled, hangs could be - observed due to certain MPI ranks not participating in the collective - metadata reads. - - To fix the issue, collective metadata reads are now disabled during - chunked dataset raw data I/O. - - (JTH - 2019/02/11, HDFFV-10563, HDFFV-10688) - - - Performance issue when closing an object - - The slow down is due to the search of the "tag_list" to find - out the "corked" status of an object and "uncork" it if so. - - Improve performance by skipping the search of the "tag_list" - if there are no "corked" objects when closing an object. - - (VC - 2019/02/06) - - - Uninitialized bytes from a type conversion buffer could be written - to disk in H5Dwrite calls where type conversion takes place - and the type conversion buffer was created by the HDF5 library. - - When H5Dwrite is called and datatype conversion must be performed, - the library will create a temporary buffer for type conversion if - one is not provided by the user via H5Pset_buffer. This internal - buffer is allocated via malloc and contains uninitialized data. In - some datatype conversions (float to long double, possibly others), - some of this uninitialized data could be written to disk. - - This was flagged by valgrind in the dtransform test and does not - appear to be a common occurrence (it is flagged in one test out - of the entire HDF5 test suite). - - Switching to calloc fixed the problem. - - (DER - 2019/02/03, HDFFV-10694) - - - There was missing protection against division by zero reported to - The HDF Group as issue #CVE-2018-17434. - - Protection against division by zero was added to address the issue - #CVE-2018-17434. - - (BMR - 2019/01/29, HDFFV-10586) - - - The issue CVE-2018-17437 was reported to The HDF Group - - Although CVE-2018-17437 reported a memory leak, the actual issue - was invalid read. It was found that the attribute name length - in an attribute message was corrupted, which caused the buffer - pointer to be advanced too far and later caused an invalid read. - - A check was added to detect when the attribute name or its length - was corrupted and report the potential of data corruption. - - (BMR - 2019/01/29, HDFFV-10588) - - - H5Ewalk did not stop when it was supposed to - - H5Ewalk was supposed to stop when the callback function stopped - even though the errors in the stack were not all visited, but it - did not. This problem is now fixed. - - (BMR - 2019/01/29, HDFFV-10684) - - - Revert H5Oget_info* and H5Ovisit* functions - - In 1.10.3 new H5Oget_info*2 and H5Ovisit*2 functions were - added for performance. Inadvertently, the original functions; - H5Oget_info, - H5Oget_info_by_name, - H5Oget_info_by_idx, - H5Ovisit, - H5Ovisit_by_name - were versioned to H5Oget_info*1 and H5Ovisit*1. This - broke the API compatibility for a maintenance release. The - original functions have been restored. - - (ADB - 2019/01/24, HDFFV-10686) - - - Fixed a potential invalid memory access and failure that could occur when - decoding an unknown object header message (from a future version of the - library). - - (NAF - 2019/01/07) - - - Deleting attributes in dense storage - - The library aborts with "infinite loop closing library" after - attributes in dense storage are created and then deleted. - - When deleting the attribute nodes from the name index v2 B-tree, - if an attribute is found in the intermediate B-tree nodes, - which may be merged/redistributed in the process, we need to - free the dynamically allocated spaces for the intermediate - decoded attribute. - - (VC - 2018/12/26, HDFFV-10659) - - - There was missing protection against division by zero reported to - The HDF Group as issue #CVE-2018-17233. - - Protection against division by zero was added to address the issue - #CVE-2018-17233. In addition, several similar occurrences in the same - file were fixed as well. - - (BMR - 2018/12/23, HDFFV-10577) - - - Fixed an issue where the parallel filters tests would fail - if zlib was not available on the system. Until support can - be added in the tests for filters beyond gzip/zlib, the tests - will be skipped if zlib is not available. - - (JTH - 2018/12/05) - - - A bug was discovered in the parallel library where an application - would eventually consume all of the available MPI communicators - when continually writing to a compressed dataset in parallel. This - was due to internal copies of an HDF5 File Access Property List, - which each contained a copy of the MPI communicator, not being - closed at the end of each write operation. This problem was - exacerbated by larger numbers of processors. - - (JTH - 2018/12/05, HDFFV-10629) - - - Fortran - -------- - - Fixed issue with Fortran not returning h5o_info_t field values - meta_size%attr%index_size and meta_size%attr%heap_size. - - (MSB, 2019/01/08, HDFFV-10443) - - - Added symbolic links libhdf5_hl_fortran.so to libhdf5hl_fortran.so and - libhdf5_hl_fortran.a to libhdf5hl_fortran.a in hdf5/lib directory for - autotools installs. These were added to match the name of the files - installed by cmake and the general pattern of hl lib files. We will - change the names of the installed lib files to the matching name in - the next major release. - - (LRK - 2019/01/04, HDFFV-10596) - - - Made Fortran specific subroutines PRIVATE in generic procedures. - - Affected generic procedures were functions in H5A, H5D, H5P, H5R and H5T. - - (MSB, 2018/12/04, HDFFV-10511) - - - Testing - ------- - - Fixed a test failure in testpar/t_dset.c caused by - the test trying to use the parallel filters feature - on MPI-2 implementations. - - (JTH, 2019/2/7) - - -Supported Platforms -=================== - - Linux 2.6.32-696.16.1.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-18) - #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-18) - (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-18) - IBM XL C/C++ V13.1 - IBM XL Fortran V15.1 - - Linux 3.10.0-327.10.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (kituo/moohan) Version 4.8.5 20150623 (Red Hat 4.8.5-4) - Version 4.9.3, Version 5.2.0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 17.0.0.098 Build 20160721 - MPICH 3.1.4 compiled with GCC 4.9.3 - - SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc - (emu) Sun Fortran 95 8.6 SunOS_sparc - Sun C++ 5.12 SunOS_sparc - - Windows 7 Visual Studio 2015 w/ Intel Fortran 16 (cmake) - - Windows 7 x64 Visual Studio 2013 - Visual Studio 2015 w/ Intel Fortran 16 (cmake) - Visual Studio 2015 w/ Intel C, Fortran 2018 (cmake) - Visual Studio 2015 w/ MSMPI 8 (cmake) - - Windows 10 Visual Studio 2015 w/ Intel Fortran 18 (cmake) - - Windows 10 x64 Visual Studio 2015 w/ Intel Fortran 18 (cmake) - Visual Studio 2017 w/ Intel Fortran 18 (cmake) - - Mac OS X Yosemite 10.10.5 Apple clang/clang++ version 6.1 from Xcode 7.0 - 64-bit gfortran GNU Fortran (GCC) 4.9.2 - (osx1010dev/osx1010test) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X El Capitan 10.11.6 Apple clang/clang++ version 7.3.0 from Xcode 7.3 - 64-bit gfortran GNU Fortran (GCC) 5.2.0 - (osx1011dev/osx1011test) Intel icc/icpc/ifort version 16.0.2 - - MacOS High Sierra 10.13.6 Apple LLVM version 10.0.0 (clang/clang++-1000.10.44.4) - 64-bit gfortran GNU Fortran (GCC) 8.3.0 - - -Tested Configuration Features Summary -===================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90/ F90 C++ zlib SZIP - parallel F2003 parallel -Solaris2.11 32-bit n y/y n y y y -Solaris2.11 64-bit n y/n n y y y -Windows 7 y y/y n y y y -Windows 7 x64 y y/y y y y y -Windows 7 Cygwin n y/n n y y y -Windows 7 x64 Cygwin n y/n n y y y -Windows 10 y y/y n y y y -Windows 10 x64 y y/y n y y y -Mac OS X Yosemite 10.10.5 64-bit n y/y n y y y -Mac OS X El Capitan 10.11.6 64-bit n y/y n y y y -MacOS High Sierra 10.13.6 64-bit n y/y n y y y -CentOS 7.2 Linux 3.10.0 x86_64 PGI n y/y n y y y -CentOS 7.2 Linux 3.10.0 x86_64 GNU y y/y y y y y -CentOS 7.2 Linux 3.10.0 x86_64 Intel n y/y n y y y -Linux 2.6.32-573.18.1.el6.ppc64 n y/y n y y y - - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -Solaris2.11 32-bit y y y y -Solaris2.11 64-bit y y y y -Windows 7 y y y y -Windows 7 x64 y y y y -Windows 7 Cygwin n n n y -Windows 7 x64 Cygwin n n n y -Windows 10 y y y y -Windows 10 x64 y y y y -Mac OS X Yosemite 10.10.5 64-bit y y y y -Mac OS X El Capitan 10.11.6 64-bit y y y y -MacOS High Sierra 10.13.6 64-bit y y y y -CentOS 7.2 Linux 3.10.0 x86_64 PGI y y y n -CentOS 7.2 Linux 3.10.0 x86_64 GNU y y y y -CentOS 7.2 Linux 3.10.0 x86_64 Intel y y y n -Linux 2.6.32-573.18.1.el6.ppc64 y y y n - -Compiler versions for each platform are listed in the preceding -"Supported Platforms" table. - - -More Tested Platforms -===================== -The following configurations are not supported but have been tested for this release. - - Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (mayll/platypus) Version 4.4.7 20120313 - Version 4.9.3, 5.3.0, 6.2.0 - PGI C, Fortran, C++ for 64-bit target on - x86-64; - Version 17.10-0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 17.0.4.196 Build 20170411 - MPICH 3.1.4 compiled with GCC 4.9.3 - - Linux 3.10.0-327.18.2.el7 GNU C (gcc) and C++ (g++) compilers - #1 SMP x86_64 GNU/Linux Version 4.8.5 20150623 (Red Hat 4.8.5-4) - (jelly) with NAG Fortran Compiler Release 6.1(Tozai) - GCC Version 7.1.0 - MPICH 3.2-GCC-4.9.3 - MPICH 3.2.1-GCC-7.2.0-2.29 - OpenMPI 2.1.5-GCC-7.2.0-2.29 - Intel(R) C (icc) and C++ (icpc) compilers - Version 17.0.0.098 Build 20160721 - with NAG Fortran Compiler Release 6.1(Tozai) - - Linux 3.10.0-327.10.1.el7 MPICH 3.2 compiled with GCC 5.3.0 - #1 SMP x86_64 GNU/Linux - (moohan) - - Fedora 29 4.20.10-200.fc29.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc, g++ (GCC) 8.2.1 20181215 - (Red Hat 8.2.1-6) - GNU Fortran (GCC) 8.2.1 20181215 - (Red Hat 8.2.1-6) - (cmake and autotools) - - Windows 7 x64 Visual Studio 2008 - - -Known Problems -============== - - At present, metadata cache images may not be generated by parallel - applications. Parallel applications can read files with metadata cache - images, but since this is a collective operation, a deadlock is possible - if one or more processes do not participate. - - Three tests fail with OpenMPI 3.0.0/GCC-7.2.0-2.29: - testphdf5 (ecdsetw, selnone, cchunk1, cchunk3, cchunk4, and actualio) - t_shapesame (sscontig2) - t_pflush1/fails on exit - The first two tests fail attempting collective writes. - - CPP ptable test fails on VS2017 with Intel compiler, JIRA issue: HDFFV-10628. - This test will pass with VS2015 with Intel compiler. - - Older MPI libraries such as OpenMPI 2.0.1 and MPICH 2.1.5 were tested - while attempting to resolve the Jira issue: HDFFV-10540. - The known problems of reading or writing > 2GBs when using MPI-2 was - partially resolved with the MPICH library. The proposed support recognizes - IO operations > 2GB and if the datatype is not a derived type, the library - breaks the IO into chunks which can be input or output with the existing - MPI 2 limitations, i.e. size reporting and function API size/count - arguments are restricted to be 32 bit integers. For derived types larger - than 2GB, MPICH 2.1.5 fails while attempting to read or write data. - OpenMPI in contrast, implements MPI-3 APIs even in the older releases - and thus does not suffer from the 32 bit size limitation described here. - OpenMPI releases prior to v3.1.3 appear to have other datatype issues however, - e.g. within a single parallel test (testphdf5) the subtests (cdsetr, eidsetr) - report data verification errors before eventually aborting. - The most recent versions of OpenMPI (v3.1.3 or newer) have evidently - resolved these issues and parallel HDF5 testing does not currently report - errors though occasional hangs have been observed. - - Known problems in previous releases can be found in the HISTORY*.txt files - in the HDF5 source. Please report any new problems found to - help@hdfgroup.org. - - -CMake vs. Autotools installations -================================= -While both build systems produce similar results, there are differences. -Each system produces the same set of folders on linux (only CMake works -on standard Windows); bin, include, lib and share. Autotools places the -COPYING and RELEASE.txt file in the root folder, CMake places them in -the share folder. - -The bin folder contains the tools and the build scripts. Additionally, CMake -creates dynamic versions of the tools with the suffix "-shared". Autotools -installs one set of tools depending on the "--enable-shared" configuration -option. - build scripts - ------------- - Autotools: h5c++, h5cc, h5fc - CMake: h5c++, h5cc, h5hlc++, h5hlcc - -The include folder holds the header files and the fortran mod files. CMake -places the fortran mod files into separate shared and static subfolders, -while Autotools places one set of mod files into the include folder. Because -CMake produces a tools library, the header files for tools will appear in -the include folder. - -The lib folder contains the library files, and CMake adds the pkgconfig -subfolder with the hdf5*.pc files used by the bin/build scripts created by -the CMake build. CMake separates the C interface code from the fortran code by -creating C-stub libraries for each Fortran library. In addition, only CMake -installs the tools library. The names of the szip libraries are different -between the build systems. - -The share folder will have the most differences because CMake builds include -a number of CMake specific files for support of CMake's find_package and support -for the HDF5 Examples CMake project. - -%%%%1.10.4%%%% - -HDF5 version 1.10.4 released on 2018-10-05 -================================================================================ - - -INTRODUCTION - -This document describes the differences between this release and the previous -HDF5 release. It contains information on the platforms tested and known -problems in this release. For more details check the HISTORY*.txt files in the -HDF5 source. - -Note that documentation in the links below will be updated at the time of each -final release. - -Links to HDF5 documentation can be found on The HDF5 web page: - - https://portal.hdfgroup.org/display/HDF5/HDF5 - -The official HDF5 releases can be obtained from: - - https://www.hdfgroup.org/downloads/hdf5/ - -Changes from Release to Release and New Features in the HDF5-1.10.x release series -can be found at: - - https://portal.hdfgroup.org/display/HDF5/HDF5+Application+Developer%27s+Guide - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS - -- Bug Fixes since HDF5-1.10.3 -- Supported Platforms -- Tested Configuration Features Summary -- More Tested Platforms -- Known Problems -- CMake vs. Autotools installations - - -New Features -============ - - Configuration: - ------------- - - Add toolchain and cross-compile support - - Added info on using a toolchain file to INSTALL_CMAKE.txt. A - toolchain file is also used in cross-compiling, which requires - CMAKE_CROSSCOMPILING_EMULATOR to be set. To help with cross-compiling - the fortran configure process, the HDF5UseFortran.cmake file macros - were improved. Fixed a Fortran configure file issue that incorrectly - used #cmakedefine instead of #define. - - (ADB - 2018/10/04, HDFFV-10594) - - - Add warning flags for Intel compilers - - Identified Intel compiler specific warnings flags that should be used - instead of GNU flags. - - (ADB - 2018/10/04, TRILABS-21) - - - Add default rpath to targets - - Default rpaths should be set in shared executables and - libraries to allow the use of loading dependent libraries - without requiring LD_LIBRARY_PATH to be set. The default - path should be relative using @rpath on osx and $ORIGIN - on linux. Windows is not affected. - - (ADB - 2018/09/26, HDFFV-10594) - - Library: - -------- - - Allow pre-generated H5Tinit.c and H5make_libsettings.c to be used. - - Rather than always running H5detect and generating H5Tinit.c and - H5make_libsettings.c, supply a location for those files. - - (ADB - 2018/09/18, HDFFV-10332) - - -Bug Fixes since HDF5-1.10.3 release -================================== - - Library - ------- - - Allow H5detect and H5make_libsettings to take a file as an argument. - - Rather than only writing to stdout, add a command argument to name - the file that H5detect and H5make_libsettings will use for output. - Without an argument, stdout is still used, so backwards compatibility - is maintained. - - (ADB - 2018/09/05, HDFFV-9059) - - - A bug was discovered in the parallel library where an application - would hang if a collective read/write of a chunked dataset occurred - when collective metadata reads were enabled and some of the ranks - had no selection in the dataset's dataspace. The ranks which had no - selection in the dataset's dataspace called H5D__chunk_addrmap() to - retrieve the lowest chunk address in the dataset. This is because we - require reads/writes to be performed in strictly non-decreasing order - of chunk address in the file. - - When the chunk index used was a version 1 or 2 B-tree, these - non-participating ranks would issue a collective MPI_Bcast() call - that the participating ranks would not issue, causing the hang. Since - the non-participating ranks are not actually reading/writing anything, - the H5D__chunk_addrmap() call can be safely removed and the address used - for the read/write can be set to an arbitrary number (0 was chosen). - - (JTH - 2018/08/25, HDFFV-10501) - - Java Library: - ---------------- - - JNI native library dependencies - - The build for the hdf5_java native library used the wrong - hdf5 target library for CMake builds. Correcting the hdf5_java - library to build with the shared hdf5 library required testing - paths to change also. - - (ADB - 2018/08/31, HDFFV-10568) - - - Java iterator callbacks - - Change global callback object to a small stack structure in order - to fix a runtime crash. This crash was discovered when iterating - through a file with nested group members. The global variable - visit_callback is overwritten when recursion starts. When recursion - completes, visit_callback will be pointing to the wrong callback method. - - (ADB - 2018/08/15, HDFFV-10536) - - - Java HDFLibraryException class - - Change parent class from Exception to RuntimeException. - - (ADB - 2018/07/30, HDFFV-10534) - - - JNI Read and Write - - Refactored variable-length functions, H5DreadVL and H5AreadVL, - to correct dataset and attribute reads. New write functions, - H5DwriteVL and H5AwriteVL, are under construction. - - (ADB - 2018/06/02, HDFFV-10519) - - -Supported Platforms -=================== - - Linux 2.6.32-696.16.1.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-18) - #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-18) - (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-18) - IBM XL C/C++ V13.1 - IBM XL Fortran V15.1 - - Linux 3.10.0-327.10.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (kituo/moohan) Version 4.8.5 20150623 (Red Hat 4.8.5-4) - Version 4.9.3, Version 5.2.0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 17.0.0.098 Build 20160721 - MPICH 3.1.4 compiled with GCC 4.9.3 - - SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc - (emu) Sun Fortran 95 8.6 SunOS_sparc - Sun C++ 5.12 SunOS_sparc - - Windows 7 Visual Studio 2015 w/ Intel Fortran 16 (cmake) - - Windows 7 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - Visual Studio 2015 w/ Intel Fortran 16 (cmake) - Visual Studio 2015 w/ Intel C, Fortran 2017 (cmake) - Visual Studio 2015 w/ MSMPI 8 (cmake) - - Windows 10 Visual Studio 2015 w/ Intel Fortran 18 (cmake) - - Windows 10 x64 Visual Studio 2015 w/ Intel Fortran 18 (cmake) - Visual Studio 2017 w/ Intel Fortran 18 (cmake) - - Mac OS X Yosemite 10.10.5 Apple clang/clang++ version 6.1 from Xcode 7.0 - 64-bit gfortran GNU Fortran (GCC) 4.9.2 - (osx1010dev/osx1010test) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X El Capitan 10.11.6 Apple clang/clang++ version 7.3.0 from Xcode 7.3 - 64-bit gfortran GNU Fortran (GCC) 5.2.0 - (osx1011dev/osx1011test) Intel icc/icpc/ifort version 16.0.2 - - Mac OS Sierra 10.12.6 Apple LLVM version 8.1.0 (clang/clang++-802.0.42) - 64-bit gfortran GNU Fortran (GCC) 7.1.0 - (kite) Intel icc/icpc/ifort version 17.0.2 - - -Tested Configuration Features Summary -===================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90/ F90 C++ zlib SZIP - parallel F2003 parallel -Solaris2.11 32-bit n y/y n y y y -Solaris2.11 64-bit n y/n n y y y -Windows 7 y y/y n y y y -Windows 7 x64 y y/y y y y y -Windows 7 Cygwin n y/n n y y y -Windows 7 x64 Cygwin n y/n n y y y -Windows 10 y y/y n y y y -Windows 10 x64 y y/y n y y y -Mac OS X Mavericks 10.9.5 64-bit n y/y n y y y -Mac OS X Yosemite 10.10.5 64-bit n y/y n y y y -Mac OS X El Capitan 10.11.6 64-bit n y/y n y y y -Mac OS Sierra 10.12.6 64-bit n y/y n y y y -CentOS 7.2 Linux 3.10.0 x86_64 PGI n y/y n y y y -CentOS 7.2 Linux 3.10.0 x86_64 GNU y y/y y y y y -CentOS 7.2 Linux 3.10.0 x86_64 Intel n y/y n y y y -Linux 2.6.32-573.18.1.el6.ppc64 n y/y n y y y - - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -Solaris2.11 32-bit y y y y -Solaris2.11 64-bit y y y y -Windows 7 y y y y -Windows 7 x64 y y y y -Windows 7 Cygwin n n n y -Windows 7 x64 Cygwin n n n y -Windows 10 y y y y -Windows 10 x64 y y y y -Mac OS X Mavericks 10.9.5 64-bit y n y y -Mac OS X Yosemite 10.10.5 64-bit y n y y -Mac OS X El Capitan 10.11.6 64-bit y n y y -Mac OS Sierra 10.12.6 64-bit y n y y -CentOS 7.2 Linux 3.10.0 x86_64 PGI y y y n -CentOS 7.2 Linux 3.10.0 x86_64 GNU y y y y -CentOS 7.2 Linux 3.10.0 x86_64 Intel y y y n -Linux 2.6.32-573.18.1.el6.ppc64 y y y n - -Compiler versions for each platform are listed in the preceding -"Supported Platforms" table. - - -More Tested Platforms -===================== -The following platforms are not supported but have been tested for this release. - - Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (mayll/platypus) Version 4.4.7 20120313 - Version 4.9.3, 5.3.0, 6.2.0 - PGI C, Fortran, C++ for 64-bit target on - x86-64; - Version 17.10-0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 17.0.4.196 Build 20170411 - MPICH 3.1.4 compiled with GCC 4.9.3 - - Linux 3.10.0-327.18.2.el7 GNU C (gcc) and C++ (g++) compilers - #1 SMP x86_64 GNU/Linux Version 4.8.5 20150623 (Red Hat 4.8.5-4) - (jelly) with NAG Fortran Compiler Release 6.1(Tozai) - GCC Version 7.1.0 - OpenMPI 3.0.0-GCC-7.2.0-2.29, - 3.1.0-GCC-7.2.0-2.29 - Intel(R) C (icc) and C++ (icpc) compilers - Version 17.0.0.098 Build 20160721 - with NAG Fortran Compiler Release 6.1(Tozai) - - Linux 3.10.0-327.10.1.el7 MPICH 3.2 compiled with GCC 5.3.0 - #1 SMP x86_64 GNU/Linux - (moohan) - - Linux 2.6.32-573.18.1.el6.ppc64 MPICH mpich 3.1.4 compiled with - #1 SMP ppc64 GNU/Linux IBM XL C/C++ for Linux, V13.1 - (ostrich) and IBM XL Fortran for Linux, V15.1 - - Debian 8.4 3.16.0-4-amd64 #1 SMP Debian 3.16.36-1 x86_64 GNU/Linux - gcc, g++ (Debian 4.9.2-10) 4.9.2 - GNU Fortran (Debian 4.9.2-10) 4.9.2 - (cmake and autotools) - - Fedora 24 4.7.2-201.fc24.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc, g++ (GCC) 6.1.1 20160621 - (Red Hat 6.1.1-3) - GNU Fortran (GCC) 6.1.1 20160621 - (Red Hat 6.1.1-3) - (cmake and autotools) - - Ubuntu 16.04.1 4.4.0-38-generic #57-Ubuntu SMP x86_64 GNU/Linux - gcc, g++ (Ubuntu 5.4.0-6ubuntu1~16.04.2) - 5.4.0 20160609 - GNU Fortran (Ubuntu 5.4.0-6ubuntu1~16.04.2) - 5.4.0 20160609 - (cmake and autotools) - - -Known Problems -============== - - At present, metadata cache images may not be generated by parallel - applications. Parallel applications can read files with metadata cache - images, but since this is a collective operation, a deadlock is possible - if one or more processes do not participate. - - Three tests fail with OpenMPI 3.0.0/GCC-7.2.0-2.29: - testphdf5 (ecdsetw, selnone, cchunk1, cchunk3, cchunk4, and actualio) - t_shapesame (sscontig2) - t_pflush1/fails on exit - The first two tests fail attempting collective writes. - - Known problems in previous releases can be found in the HISTORY*.txt files - in the HDF5 source. Please report any new problems found to - help@hdfgroup.org. - - -CMake vs. Autotools installations -================================= -While both build systems produce similar results, there are differences. -Each system produces the same set of folders on linux (only CMake works -on standard Windows); bin, include, lib and share. Autotools places the -COPYING and RELEASE.txt file in the root folder, CMake places them in -the share folder. - -The bin folder contains the tools and the build scripts. Additionally, CMake -creates dynamic versions of the tools with the suffix "-shared". Autotools -installs one set of tools depending on the "--enable-shared" configuration -option. - build scripts - ------------- - Autotools: h5c++, h5cc, h5fc - CMake: h5c++, h5cc, h5hlc++, h5hlcc - -The include folder holds the header files and the fortran mod files. CMake -places the fortran mod files into separate shared and static subfolders, -while Autotools places one set of mod files into the include folder. Because -CMake produces a tools library, the header files for tools will appear in -the include folder. - -The lib folder contains the library files, and CMake adds the pkgconfig -subfolder with the hdf5*.pc files used by the bin/build scripts created by -the CMake build. CMake separates the C interface code from the fortran code by -creating C-stub libraries for each Fortran library. In addition, only CMake -installs the tools library. The names of the szip libraries are different -between the build systems. - -The share folder will have the most differences because CMake builds include -a number of CMake specific files for support of CMake's find_package and support -for the HDF5 Examples CMake project. - -%%%%1.10.3%%%% - -HDF5 version 1.10.3 released on 2018-08-21 -================================================================================ - - -INTRODUCTION - -This document describes the differences between this release and the previous -HDF5 release. It contains information on the platforms tested and known -problems in this release. For more details check the HISTORY*.txt files in the -HDF5 source. - -Note that documentation in the links below will be updated at the time of each -final release. - -Links to HDF5 documentation can be found on The HDF5 web page: - - https://portal.hdfgroup.org/display/HDF5/HDF5 - -The official HDF5 releases can be obtained from: - - https://www.hdfgroup.org/downloads/hdf5/ - -Changes from Release to Release and New Features in the HDF5-1.10.x release series -can be found at: - - https://portal.hdfgroup.org/display/HDF5/HDF5+Application+Developer%27s+Guide - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS - -- New Features -- Bug Fixes since HDF5-1.10.2 -- Supported Platforms -- Tested Configuration Features Summary -- More Tested Platforms -- Known Problems -- CMake vs. Autotools installations - - -New Features -============ - - Library - ------- - - Moved the H5DOread/write_chunk() API calls to H5Dread/write_chunk() - - The functionality of the direct chunk I/O calls in the high-level - library has been moved to the H5D package in the main library. This - will allow using those functions without building the high-level - library. The parameters and functionality of the H5D calls are - identical to the H5DO calls. - - The original H5DO high-level API calls have been retained, though - they are now just wrappers for the H5D calls. They are marked as - deprecated and are only available when the library is built with - deprecated functions. New code should use the H5D calls for this - reason. - - As a part of this work, the following symbols from H5Dpublic.h are no - longer used: - - H5D_XFER_DIRECT_CHUNK_WRITE_FLAG_NAME - H5D_XFER_DIRECT_CHUNK_WRITE_FILTERS_NAME - H5D_XFER_DIRECT_CHUNK_WRITE_OFFSET_NAME - H5D_XFER_DIRECT_CHUNK_WRITE_DATASIZE_NAME - H5D_XFER_DIRECT_CHUNK_READ_FLAG_NAME - H5D_XFER_DIRECT_CHUNK_READ_OFFSET_NAME - H5D_XFER_DIRECT_CHUNK_READ_FILTERS_NAME - - And properties with these names are no longer stored in the dataset - transfer property lists. The symbols are still defined in H5Dpublic.h, - but only when the library is built with deprecated symbols. - - (DER - 2018/05/04) - - Configuration: - ------------- - - Add missing USE_110_API_DEFAULT option. - - Option USE_110_API_DEFAULT sets the default version of - versioned APIs. The bin/makevers perl script did not set - the maxidx variable correctly when the 1.10 branch was - created. This caused the versioning process to always use - the latest version of any API. - - (ADB - 2018/08/17, HDFFV-10552) - - - Added configuration checks for the following MPI functions: - - MPI_Mprobe - Used for the Parallel Compression feature - MPI_Imrecv - Used for the Parallel Compression feature - - MPI_Get_elements_x - Used for the "big Parallel I/O" feature - MPI_Type_size_x - Used for the "big Parallel I/O" feature - - (JTH - 2018/08/02, HDFFV-10512) - - - Added section to the libhdf5.settings file to indicate - the status of the Parallel Compression and "big Parallel I/O" - features. - - (JTH - 2018/08/02, HDFFV-10512) - - - Add option to execute swmr shell scripts from CMake. - - Option TEST_SHELL_SCRIPTS redirects processing into a - separate ShellTests.cmake file for UNIX types. The tests - execute the shell scripts if a SH program is found. - - (ADB - 2018/07/16) - - - C++ Library: - ------------ - - New wrappers - - Added the following items: - - + Class DSetAccPropList for the dataset access property list. - - + Wrapper for H5Dget_access_plist to class DataSet - // Gets the access property list of this dataset. - DSetAccPropList getAccessPlist() const; - - + Wrappers for H5Pset_chunk_cache and H5Pget_chunk_cache to class DSetAccPropList - // Sets the raw data chunk cache parameters. - void setChunkCache(size_t rdcc_nslots, size_t rdcc_nbytes, double rdcc_w0) - - // Retrieves the raw data chunk cache parameters. - void getChunkCache(size_t &rdcc_nslots, size_t &rdcc_nbytes, double &rdcc_w0) - - + New operator!= to class DataType (HDFFV-10472) - // Determines whether two datatypes are not the same. - bool operator!=(const DataType& compared_type) - - + Wrappers for H5Oget_info2, H5Oget_info_by_name2, and H5Oget_info_by_idx2 - (HDFFV-10458) - - // Retrieves information about an HDF5 object. - void getObjinfo(H5O_info_t& objinfo, unsigned fields = H5O_INFO_BASIC) const; - - // Retrieves information about an HDF5 object, given its name. - void getObjinfo(const char* name, H5O_info_t& objinfo, - unsigned fields = H5O_INFO_BASIC, - const LinkAccPropList& lapl = LinkAccPropList::DEFAULT) const; - void getObjinfo(const H5std_string& name, H5O_info_t& objinfo, - unsigned fields = H5O_INFO_BASIC, - const LinkAccPropList& lapl = LinkAccPropList::DEFAULT) const; - - // Retrieves information about an HDF5 object, given its index. - void getObjinfo(const char* grp_name, H5_index_t idx_type, - H5_iter_order_t order, hsize_t idx, H5O_info_t& objinfo, - unsigned fields = H5O_INFO_BASIC, - const LinkAccPropList& lapl = LinkAccPropList::DEFAULT) const; - void getObjinfo(const H5std_string& grp_name, H5_index_t idx_type, - H5_iter_order_t order, hsize_t idx, H5O_info_t& objinfo, - unsigned fields = H5O_INFO_BASIC, - const LinkAccPropList& lapl = LinkAccPropList::DEFAULT) const; - - (BMR - 2018/07/22, HDFFV-10150, HDFFV-10458, HDFFV-1047) - - - Java Library: - ---------------- - - Java HDFLibraryException class - - Change parent class from Exception to RuntimeException. - - (ADB - 2018/07/30, HDFFV-10534) - - - JNI Read and Write - - Refactored variable-length functions, H5DreadVL and H5AreadVL, - to correct dataset and attribute reads. New write functions, - H5DwriteVL and H5AwriteVL, are under construction. - - (ADB - 2018/06/02, HDFFV-10519) - - -Bug Fixes since HDF5-1.10.2 release -================================== - - Library - ------- - - Performance issue with H5Oget_info - - H5Oget_info family of routines retrieves information for an object such - as object type, access time, number of attributes, and storage space etc. - Retrieving all such information regardless is an overkill and causes - performance issue when doing so for many objects. - - Add an additional parameter "fields" to the the H5Oget_info family of routines - indicating the type of information to be retrieved. The same is done to - the H5Ovisit family of routines which recursively visits an object - returning object information in a callback function. Both sets of routines - are versioned and the corresponding compatibility macros are added. - - The version 2 names of the two sets of routines are: - (1) H5Oget_info2, H5Oget_info_by_idx2, H5Oget_info_by_name2 - (2) H5Ovisit2, H5Ovisit_by_name2 - - (VC - 2018/08/15, HDFFV-10180) - - - Test failure due to metadata size in test/vds.c - - The size of metadata from test_api_get_ex_dcpl() in test/vds.c is not as expected - because the latest format should be used when encoding the layout for VDS. - - Set the latest format in a temporary fapl and pass the setting to the routines that - encode the dataset selection for VDS. - - (VC - 2018/08/14 HDFFV-10469) - - - Java HDF5LibraryException class - - The error minor and major values would be lost after the - constructor executed. - - Created two local class variables to hold the values obtained during - execution of the constructor. Refactored the class functions to retrieve - the class values rather then calling the native functions. - The native functions were renamed and called only during execution - of the constructor. - Added error checking to calling class constructors in JNI classes. - - (ADB - 2018/08/06, HDFFV-10544) - - - Added checks of the defined MPI_VERSION to guard against usage of - MPI-3 functions in the Parallel Compression and "big Parallel I/O" - features when HDF5 is built with MPI-2. Previously, the configure - step would pass but the build itself would fail when it could not - locate the MPI-3 functions used. - - As a result of these new checks, HDF5 can again be built with MPI-2, - but the Parallel Compression feature will be disabled as it relies - on the MPI-3 functions used. - - (JTH - 2018/08/02, HDFFV-10512) - - - User's patches: CVEs - - The following patches have been applied: - - CVE-2018-11202 - NULL pointer dereference was discovered in - H5S_hyper_make_spans in H5Shyper.c (HDFFV-10476) - https://security-tracker.debian.org/tracker/CVE-2018-11202 - https://cve.mitre.org/cgi-bin/cvename.cgi?name=3DCVE-2018-11202 - - CVE-2018-11203 - A division by zero was discovered in - H5D__btree_decode_key in H5Dbtree.c (HDFFV-10477) - https://security-tracker.debian.org/tracker/CVE-2018-11203 - https://cve.mitre.org/cgi-bin/cvename.cgi?name=3DCVE-2018-11203 - - CVE-2018-11204 - A NULL pointer dereference was discovered in - H5O__chunk_deserialize in H5Ocache.c (HDFFV-10478) - https://security-tracker.debian.org/tracker/CVE-2018-11204 - https://cve.mitre.org/cgi-bin/cvename.cgi?name=3DCVE-2018-11204 - - CVE-2018-11206 - An out of bound read was discovered in - H5O_fill_new_decode and H5O_fill_old_decode in H5Ofill.c - (HDFFV-10480) - https://security-tracker.debian.org/tracker/CVE-2018-11206 - https://cve.mitre.org/cgi-bin/cvename.cgi?name=3DCVE-2018-11206 - - CVE-2018-11207 - A division by zero was discovered in - H5D__chunk_init in H5Dchunk.c (HDFFV-10481) - https://security-tracker.debian.org/tracker/CVE-2018-11207 - https://cve.mitre.org/cgi-bin/cvename.cgi?name=3DCVE-2018-11207 - - (BMR - 2018/7/22, PR#s: 1134 and 1139, - HDFFV-10476, HDFFV-10477, HDFFV-10478, HDFFV-10480, HDFFV-10481) - - - H5Adelete - - H5Adelete failed when deleting the last "large" attribute that - is stored densely via fractal heap/v2 b-tree. - - After removing the attribute, update the ainfo message. If the - number of attributes goes to zero, remove the message. - - (VC - 2018/07/20, HDFFV-9277) - - - A bug was discovered in the parallel library which caused partial - parallel reads of filtered datasets to return incorrect data. The - library used the incorrect dataspace for each chunk read, causing - the selection used in each chunk to be wrong. - - The bug was not caught during testing because all of the current - tests which do parallel reads of filtered data read all of the data - using an H5S_ALL selection. Several tests were added which exercise - partial parallel reads. - - (JTH - 2018/07/16, HDFFV-10467) - - - A bug was discovered in the parallel library which caused parallel - writes of filtered datasets to trigger an assertion failure in the - file free space manager. - - This occurred when the filter used caused chunks to repeatedly shrink - and grow over the course of several dataset writes. The previous chunk - information, such as the size of the chunk and the offset in the file, - was being cached and not updated after each write, causing the next write - to the chunk to retrieve the incorrect cached information and run into - issues when reallocating space in the file for the chunk. - - (JTH - 2018/07/16, HDFFV-10509) - - - A bug was discovered in the parallel library which caused the - H5D__mpio_array_gatherv() function to allocate too much memory. - - When the function is called with the 'allgather' parameter set - to a non-true value, the function will receive data from all MPI - ranks and gather it to the single rank specified by the 'root' - parameter. However, the bug in the function caused memory for - the received data to be allocated on all MPI ranks, not just the - singular rank specified as the receiver. In some circumstances, - this would cause an application to fail due to the large amounts - of memory being allocated. - - (JTH - 2018/07/16, HDFFV-10467) - - - Error checks in h5stat and when decoding messages - - h5stat exited with seg fault/core dumped when - errors are encountered in the internal library. - - Add error checks and --enable-error-stack option to h5stat. - Add range checks when decoding messages: old fill value, old - layout and refcount. - - (VC - 2018/07/11, HDFFV-10333) - - - If an HDF5 file contains a malformed compound datatype with a - suitably large offset, the type conversion code can run off - the end of the type conversion buffer, causing a segmentation - fault. - - This issue was reported to The HDF Group as issue #CVE-2017-17507. - - NOTE: The HDF5 C library cannot produce such a file. This condition - should only occur in a corrupt (or deliberately altered) file - or a file created by third-party software. - - THE HDF GROUP WILL NOT FIX THIS BUG AT THIS TIME - - Fixing this problem would involve updating the publicly visible - H5T_conv_t function pointer typedef and versioning the API calls - which use it. We normally only modify the public API during - major releases, so this bug will not be fixed at this time. - - (DER - 2018/02/26, HDFFV-10356) - - - Configuration - ------------- - - Applied patches to address Cywin build issues - - There were three issues for Cygwin builds: - - Shared libs were not built. - - The -std=c99 flag caused a SIG_SETMASK undeclared error. - - Undefined errors when buildbing test shared libraries. - - Patches to address these issues were received and incorporated in this version. - - (LRK - 2018/07/18, HDFFV-10475) - - - The --enable-debug/production configure flags are listed as 'deprecated' - when they should really be listed as 'removed'. - - In the autotools overhaul several years ago, we removed these flags and - implemented a new --enable-build-mode= flag. This was done because we - changed the semantics of the modes and didn't want users to silently - be exposed to them. The newer system is also more flexible and us to - add other modes (like 'clean'). - - The --enable-debug/production flags are now listed as removed. - - (DER - 2018/05/31, HDFFV-10505) - - - Moved the location of gcc attribute. - - The gcc attribute(no_sanitize), named as the macro HDF_NO_UBSAN, - was located after the function name. Builds with GCC 7 did not - indicate any problem, but GCC 8 issued errors. Moved the - attribute before the function name, as required. - - (ADB - 2018/05/22, HDFFV-10473) - - - Reworked java test suite into individual JUnit tests. - - Testing the whole suite of java unit tests in a single JUnit run - made it difficult to determine actual failures when tests would fail. - Running each file set of tests individually, allows individual failures - to be diagnosed easier. A side benefit is that tests for optional components - of the library can be disabled if not configured. - - (ADB - 2018/05/16, HDFFV-9739) - - - Converted CMake global commands ADD_DEFINITIONS and INCLUDE_DIRECTORIES - to use target_* type commands. This change modernizes the CMake usage - in the HDF5 library. - - In addition, there is the intention to convert to generator expressions, - where possible. The exception is Fortran FLAGS on Windows Visual Studio. - The HDF macros TARGET_C_PROPERTIES and TARGET_FORTRAN_PROPERTIES have - been removed with this change in usage. - - The additional language (C++ and Fortran) checks have also been localized - to only be checked when that language is enabled. - - (ADB - 2018/05/08) - - - Performance - ------------- - - Revamped internal use of DXPLs, improving performance - - (QAK - 2018/05/20) - - - Fortran - -------- - - Fixed issue with h5fget_obj_count_f and using a file id of H5F_OBJ_ALL_F not - returning the correct count. - - (MSB - 2018/5/15, HDFFV-10405) - - - C++ APIs - -------- - - Adding default arguments to existing functions - - Added the following items: - + Two more property list arguments are added to H5Location::createDataSet: - const DSetAccPropList& dapl = DSetAccPropList::DEFAULT - const LinkCreatPropList& lcpl = LinkCreatPropList::DEFAULT - - + One more property list argument is added to H5Location::openDataSet: - const DSetAccPropList& dapl = DSetAccPropList::DEFAULT - - (BMR - 2018/07/21, PR# 1146) - - - Improvement C++ documentation - - Replaced the table in main page of the C++ documentation from mht to htm format - for portability. - - (BMR - 2018/07/17, PR# 1141) - - -Supported Platforms -=================== - - Linux 2.6.32-696.16.1.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-18) - #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-18) - (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-18) - IBM XL C/C++ V13.1 - IBM XL Fortran V15.1 - - Linux 3.10.0-327.10.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (kituo/moohan) Version 4.8.5 20150623 (Red Hat 4.8.5-4) - Version 4.9.3, Version 5.2.0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 17.0.0.098 Build 20160721 - MPICH 3.1.4 compiled with GCC 4.9.3 - - SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc - (emu) Sun Fortran 95 8.6 SunOS_sparc - Sun C++ 5.12 SunOS_sparc - - Windows 7 Visual Studio 2015 w/ Intel Fortran 16 (cmake) - - Windows 7 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - Visual Studio 2015 w/ Intel Fortran 16 (cmake) - Visual Studio 2015 w/ Intel C, Fortran 2017 (cmake) - Visual Studio 2015 w/ MSMPI 8 (cmake) - - Windows 10 Visual Studio 2015 w/ Intel Fortran 18 (cmake) - - Windows 10 x64 Visual Studio 2015 w/ Intel Fortran 18 (cmake) - Visual Studio 2017 w/ Intel Fortran 18 (cmake) - - Mac OS X Yosemite 10.10.5 Apple clang/clang++ version 6.1 from Xcode 7.0 - 64-bit gfortran GNU Fortran (GCC) 4.9.2 - (osx1010dev/osx1010test) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X El Capitan 10.11.6 Apple clang/clang++ version 7.3.0 from Xcode 7.3 - 64-bit gfortran GNU Fortran (GCC) 5.2.0 - (osx1011dev/osx1011test) Intel icc/icpc/ifort version 16.0.2 - - Mac OS Sierra 10.12.6 Apple LLVM version 8.1.0 (clang/clang++-802.0.42) - 64-bit gfortran GNU Fortran (GCC) 7.1.0 - (swallow/kite) Intel icc/icpc/ifort version 17.0.2 - -Tested Configuration Features Summary -===================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90/ F90 C++ zlib SZIP - parallel F2003 parallel -Solaris2.11 32-bit n y/y n y y y -Solaris2.11 64-bit n y/n n y y y -Windows 7 y y/y n y y y -Windows 7 x64 y y/y y y y y -Windows 7 Cygwin n y/n n y y y -Windows 7 x64 Cygwin n y/n n y y y -Windows 10 y y/y n y y y -Windows 10 x64 y y/y n y y y -Mac OS X Mavericks 10.9.5 64-bit n y/y n y y y -Mac OS X Yosemite 10.10.5 64-bit n y/y n y y y -Mac OS X El Capitan 10.11.6 64-bit n y/y n y y y -Mac OS Sierra 10.12.6 64-bit n y/y n y y y -CentOS 7.2 Linux 2.6.32 x86_64 PGI n y/y n y y y -CentOS 7.2 Linux 2.6.32 x86_64 GNU y y/y y y y y -CentOS 7.2 Linux 2.6.32 x86_64 Intel n y/y n y y y -Linux 2.6.32-573.18.1.el6.ppc64 n y/y n y y y - - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -Solaris2.11 32-bit y y y y -Solaris2.11 64-bit y y y y -Windows 7 y y y y -Windows 7 x64 y y y y -Windows 7 Cygwin n n n y -Windows 7 x64 Cygwin n n n y -Windows 10 y y y y -Windows 10 x64 y y y y -Mac OS X Mavericks 10.9.5 64-bit y n y y -Mac OS X Yosemite 10.10.5 64-bit y n y y -Mac OS X El Capitan 10.11.6 64-bit y n y y -Mac OS Sierra 10.12.6 64-bit y n y y -CentOS 7.2 Linux 2.6.32 x86_64 PGI y y y n -CentOS 7.2 Linux 2.6.32 x86_64 GNU y y y y -CentOS 7.2 Linux 2.6.32 x86_64 Intel y y y n -Linux 2.6.32-573.18.1.el6.ppc64 y y y n - -Compiler versions for each platform are listed in the preceding -"Supported Platforms" table. - - -More Tested Platforms -===================== -The following platforms are not supported but have been tested for this release. - - Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (mayll/platypus) Version 4.4.7 20120313 - Version 4.9.3, 5.3.0, 6.2.0 - PGI C, Fortran, C++ for 64-bit target on - x86-64; - Version 17.10-0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 17.0.4.196 Build 20170411 - MPICH 3.1.4 compiled with GCC 4.9.3 - - Linux 3.10.0-327.18.2.el7 GNU C (gcc) and C++ (g++) compilers - #1 SMP x86_64 GNU/Linux Version 4.8.5 20150623 (Red Hat 4.8.5-4) - (jelly) with NAG Fortran Compiler Release 6.1(Tozai) - GCC Version 7.1.0 - OpenMPI 3.0.0-GCC-7.2.0-2.29, - 3.1.0-GCC-7.2.0-2.29 - Intel(R) C (icc) and C++ (icpc) compilers - Version 17.0.0.098 Build 20160721 - with NAG Fortran Compiler Release 6.1(Tozai) - - Linux 3.10.0-327.10.1.el7 MPICH 3.2 compiled with GCC 5.3.0 - #1 SMP x86_64 GNU/Linux - (moohan) - - Linux 2.6.32-573.18.1.el6.ppc64 MPICH mpich 3.1.4 compiled with - #1 SMP ppc64 GNU/Linux IBM XL C/C++ for Linux, V13.1 - (ostrich) and IBM XL Fortran for Linux, V15.1 - - Debian 8.4 3.16.0-4-amd64 #1 SMP Debian 3.16.36-1 x86_64 GNU/Linux - gcc, g++ (Debian 4.9.2-10) 4.9.2 - GNU Fortran (Debian 4.9.2-10) 4.9.2 - (cmake and autotools) - - Fedora 24 4.7.2-201.fc24.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc, g++ (GCC) 6.1.1 20160621 - (Red Hat 6.1.1-3) - GNU Fortran (GCC) 6.1.1 20160621 - (Red Hat 6.1.1-3) - (cmake and autotools) - - Ubuntu 16.04.1 4.4.0-38-generic #57-Ubuntu SMP x86_64 GNU/Linux - gcc, g++ (Ubuntu 5.4.0-6ubuntu1~16.04.2) - 5.4.0 20160609 - GNU Fortran (Ubuntu 5.4.0-6ubuntu1~16.04.2) - 5.4.0 20160609 - (cmake and autotools) - - -Known Problems -============== - - At present, metadata cache images may not be generated by parallel - applications. Parallel applications can read files with metadata cache - images, but since this is a collective operation, a deadlock is possible - if one or more processes do not participate. - - Three tests fail with OpenMPI 3.0.0/GCC-7.2.0-2.29: - testphdf5 (ecdsetw, selnone, cchunk1, cchunk3, cchunk4, and actualio) - t_shapesame (sscontig2) - t_pflush1/fails on exit - The first two tests fail attempting collective writes. - - Known problems in previous releases can be found in the HISTORY*.txt files - in the HDF5 source. Please report any new problems found to - help@hdfgroup.org. - - -CMake vs. Autotools installations -================================= -While both build systems produce similar results, there are differences. -Each system produces the same set of folders on linux (only CMake works -on standard Windows); bin, include, lib and share. Autotools places the -COPYING and RELEASE.txt file in the root folder, CMake places them in -the share folder. - -The bin folder contains the tools and the build scripts. Additionally, CMake -creates dynamic versions of the tools with the suffix "-shared". Autotools -installs one set of tools depending on the "--enable-shared" configuration -option. - build scripts - ------------- - Autotools: h5c++, h5cc, h5fc - CMake: h5c++, h5cc, h5hlc++, h5hlcc - -The include folder holds the header files and the fortran mod files. CMake -places the fortran mod files into separate shared and static subfolders, -while Autotools places one set of mod files into the include folder. Because -CMake produces a tools library, the header files for tools will appear in -the include folder. - -The lib folder contains the library files, and CMake adds the pkgconfig -subfolder with the hdf5*.pc files used by the bin/build scripts created by -the CMake build. CMake separates the C interface code from the fortran code by -creating C-stub libraries for each Fortran library. In addition, only CMake -installs the tools library. The names of the szip libraries are different -between the build systems. - -The share folder will have the most differences because CMake builds include -a number of CMake specific files for support of CMake's find_package and support -for the HDF5 Examples CMake project. - - -%%%%1.10.2%%%% - -HDF5 version 1.10.2 released on 2018-03-29 -================================================================================ - - -INTRODUCTION - -This document describes the differences between this release and the previous -HDF5 release. It contains information on the platforms tested and known -problems in this release. For more details check the HISTORY*.txt files in the -HDF5 source. - -Note that documentation in the links below will be updated at the time of each -final release. - -Links to HDF5 documentation can be found on The HDF5 web page: - - https://portal.hdfgroup.org/display/HDF5/HDF5 - -The official HDF5 releases can be obtained from: - - https://www.hdfgroup.org/downloads/hdf5/ - -Changes from Release to Release and New Features in the HDF5-1.10.x release series -can be found at: - - https://portal.hdfgroup.org/display/HDF5/HDF5+Application+Developer%27s+Guide - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS - -- New Features -- Support for new platforms and languages -- Bug Fixes since HDF5-1.10.1 -- Supported Platforms -- Tested Configuration Features Summary -- More Tested Platforms -- Known Problems - - -New Features -============ - - Configuration and Build Systems: - -------------------------------- - - CMake builds - -------------- - - - Changed minimum CMake required version to 3.10. - - This change removed the need to support a copy of the FindMPI.cmake module, - which has been removed, along with its subfolder in the config/cmake_ext_mod - location. - - (ADB - 2018/03/09) - - - Added pkg-config file generation - - Added pkg-config file generation for the C, C++, HL, and HL C++ libraries. - In addition, builds on Linux will create h5cc, h5c++, h5hlcc, and h5hlc++ scripts in the bin - directory that use the pkg-config files. The scripts can be used to build HDF5 C and C++ - applications (i.e, similar to the compiler scripts produced by the Autotools builds). - - (ADB - 2018/03/08, HDFFV-4359) - - - Refactored use of CMAKE_BUILD_TYPE for new variable, which understands - the type of generator in use. - - Added new configuration macros to use new HDF_BUILD_TYPE variable. This - variable is set correctly for the type of generator being used for the build. - - (ADB - 2018/01/08, HDFFV-10385, HDFFV-10296) - - - Autotools builds - ------------------ - - - Removed version-specific gcc/gfortran flags for version 4.0 (inclusive) - and earlier. - - The config/gnu-flags file, which is sourced as a part of the configure - process, adds version-specific flags for use when building HDF5. Most of - these flags control warnings and do not affect the final product. - - Flags for older versions of the compiler were consolidated into the - common flags section. Moving these flags simplifies maintenance of - the file. - - The upshot of this is that building with ancient versions of gcc - (<= 4.0) will possibly no longer work without hand-hacking the file - to remove the flags not understood by that version of the compiler. - Nothing should change when building with gcc >= 4.1. - - (DER - 2017/05/31, HDFFV-9937) - - - -fno-omit-frame-pointer was added when building with debugging symbols - enabled. - - Debugging symbols can be enabled independently of the overall build - mode in both the autotools and CMake. This allows (limited) debugging - of optimized code. Since many debuggers rely on the frame pointer, - we've disabled this optimization when debugging symbols are requested - (e.g.: via building with --enable-symbols). - - (DER - 2017/05/31, HDFFV-10226) - - - Library: - -------- - - Added an enumerated value to H5F_libver_t for H5Pset_libver_bounds(). - - Currently, the library defines two values for H5F_libver_t and supports - only two pairs of (low, high) combinations as derived from these values. - Thus the bounds setting via H5Pset_libver_bounds() is rather restricted. - - Added an enumerated value (H5F_LIBVER_V18) to H5F_libver_t and - H5Pset_libver_bounds() now supports five pairs of (low, high) combinations - as derived from these values. This addition provides the user more - flexibility in setting bounds for object creation. - - (VC - 2018/03/14) - - - Added prefix option to VDS files. - - Currently, VDS source files must be in the active directory to be - found by the virtual file. Adding the option of a prefix to be set - on the virtual file, using a data access property list (DAPL), - allows the source files to locate at an absolute or relative path - to the virtual file. - Private utility functions in H5D and H5L packages merged into single - function in H5F package. - - New public APIs: - herr_t H5Pset_virtual_prefix(hid_t dapl_id, const char* prefix); - ssize_t H5Pget_virtual_prefix(hid_t dapl_id, char* prefix /*out*/, size_t size); - The prefix can also be set with an environment variable, HDF5_VDS_PREFIX. - - (ADB - 2017/12/12, HDFFV-9724, HDFFV-10361) - - - H5FDdriver_query() API call added to the C library. - - This new library call allows the user to query a virtual file driver - (VFD) for the feature flags it supports (listed in H5FDpublic.h). - This can be useful to determine if a VFD supports SWMR, for example. - - Note that some VFDs have feature flags that may only be present - after a file has been created or opened (e.g.: the core VFD will - have the H5FD_FEAT_POSIX_COMPAT_HANDLE flag set if the backing - store is switched on). Since the new API call queries a generic VFD - unassociated with a file, these flags will never be returned. - - (DER - 2017/05/31, HDFFV-10215) - - - H5FD_FEAT_DEFAULT_VFD_COMPATIBLE VFD feature flag added to the C library. - - This new feature flag indicates that the VFD is compatible with the - default VFD. VFDs that set this flag create single files that follow - the canonical HDF5 file format. - - (DER - 2017/05/31, HDFFV-10214) - - - The H5I_REFERENCE value in the H5I_type_t enum (defined in H5Ipublic.h) - has been marked as deprecated. - - This ID type value is not used in the C library. i.e.: There are no - hid_t values that are of ID type H5I_REFERENCE. - - This enum value will be removed in a future major version of the library. - The code will remain unchanged in the HDF5 1.10.x releases and branches. - - (DER - 2017/04/05, HDFFV-10252) - - - Parallel Library: - ----------------- - - Enabled compression for parallel applications. - - With this release parallel applications can create and write compressed - datasets (or the datasets with the filters such as Fletcher32 applied). - - (EIP - 2018/03/29) - - - Addressed slow file close on some Lustre file systems. - - Slow file close has been reported on some Lustre file systems. - While the ultimate cause is not understood fully, the proximate - cause appears to be long delays in MPI_File_set_size() calls at - file close and flush. - - To minimize this problem pending a definitive diagnosis and fix, - PHDF5 has been modified to avoid MPI_File_set_size() calls when - possible. This is done by comparing the library's EOA (End of - Allocation) with the file systems EOF, and skipping the - MPI_File_set_size() call if the two match. - - (JRM - 2018/03/29) - - - Optimized parallel open/location of the HDF5 super-block. - - Previous releases of PHDF5 required all parallel ranks to - search for the HDF5 superblock signature when opening the - file. As this is accomplished more or less as a synchronous - operation, a large number of processes can experience a - slowdown in the file open due to filesystem contention. - - As a first step in improving the startup/file-open performance, - we allow MPI rank 0 of the associated MPI communicator to locate - the base offset of the super-block and then broadcast that result - to the remaining ranks in the parallel group. Note that this - approach is utilized ONLY during file opens which employ the MPIO - file driver in HDF5 by previously having called H5Pset_fapl_mpio(). - - HDF5 parallel file operations which do not employ multiple ranks - e.g. specifying MPI_COMM_SELF (whose MPI_Comm_size == 1) - as opposed to MPI_COMM_WORLD, will not be affected by this - optimization. Conversely, parallel file operations on subgroups - of MPI_COMM_WORLD are allowed to be run in parallel with each - subgroup operating as an independent collection of processes. - - (RAW - 2017/10/10, HDFFV-10294) - - - Added large (>2GB) MPI-IO transfers. - - Previous releases of PHDF5 would fail when attempting to - read or write greater than 2GB of data in a single IO operation. - This issue stems principally from an MPI API whose definitions - utilize 32 bit integers to describe the number of data elements - and datatype that MPI should use to effect a data transfer. - Historically, HDF5 has invoked MPI-IO with the number of - elements in a contiguous buffer represented as the length - of that buffer in bytes. - - Resolving the issue and thus enabling larger MPI-IO transfers - is accomplished first, by detecting when a user IO request would - exceed the 2GB limit as described above. Once a transfer request - is identified as requiring special handling, PHDF5 now creates a - derived datatype consisting of a vector of fixed sized blocks - which is in turn wrapped within a single MPI_Type_struct to - contain the vector and any remaining data. The newly created - datatype is then used in place of MPI_BYTE and can be used to - fulfill the original user request without encountering API - errors. - - (RAW - 2017/09/10, HDFFV-8839) - - - C++ Library: - ------------ - - The following C++ API wrappers have been added to the C++ Library: - + H5Lcreate_soft: - // Creates a soft link from link_name to target_name. - void link(const char *target_name, const char *link_name,...) - void link(const H5std_string& target_name,...) - - + H5Lcreate_hard: - // Creates a hard link from new_name to curr_name. - void link(const char *curr_name, const Group& new_loc,...) - void link(const H5std_string& curr_name, const Group& new_loc,...) - - // Creates a hard link from new_name to curr_name in same location. - void link(const char *curr_name, const hid_t same_loc,...) - void link(const H5std_string& curr_name, const hid_t same_loc,...) - - Note: previous version of H5Location::link will be deprecated. - - + H5Lcopy: - // Copy an object from a group of file to another. - void copyLink(const char *src_name, const Group& dst,...) - void copyLink(const H5std_string& src_name, const Group& dst,...) - - // Copy an object from a group of file to the same location. - void copyLink(const char *src_name, const char *dst_name,...) - void copyLink(const H5std_string& src_name,...) - - + H5Lmove: - // Rename an object in a group or file to a new location. - void moveLink(const char* src_name, const Group& dst,...) - void moveLink(const H5std_string& src_name, const Group& dst,...) - - // Rename an object in a group or file to the same location. - void moveLink(const char* src_name, const char* dst_name,...) - void moveLink(const H5std_string& src_name,...) - - Note: previous version H5Location::move will be deprecated. - - + H5Ldelete: - // Removes the specified link from this location. - void unlink(const char *link_name, - const LinkAccPropList& lapl = LinkAccPropList::DEFAULT) - void unlink(const H5std_string& link_name, - const LinkAccPropList& lapl = LinkAccPropList::DEFAULT) - - Note: additional parameter is added to previous H5Location::unlink. - - + H5Tencode and H5Tdecode: - // Creates a binary object description of this datatype. - void DataType::encode() - C API H5Tencode() - - // Returns the decoded type from the binary object description. - DataType::decode() - C API H5Tdecode() - ArrayType::decode() - C API H5Tdecode() - CompType::decode() - C API H5Tdecode() - DataType::decode() - C API H5Tdecode() - EnumType::decode() - C API H5Tdecode() - FloatType::decode() - C API H5Tdecode() - IntType::decode() - C API H5Tdecode() - StrType::decode() - C API H5Tdecode() - VarLenType::decode() - C API H5Tdecode() - - + H5Lget_info: - // Returns the information of the named link. - H5L_info_t getLinkInfo(const H5std_string& link_name,...) - - (BMR - 2018/03/11, HDFFV-10149) - - - Added class LinkCreatPropList for link create property list. - - (BMR - 2018/03/11, HDFFV-10149) - - - Added overloaded functions H5Location::createGroup to take a link - creation property list. - Group createGroup(const char* name, const LinkCreatPropList& lcpl) - Group createGroup(const H5std_string& name, const LinkCreatPropList& lcpl) - - (BMR - 2018/03/11, HDFFV-10149) - - - A document is added to the HDF5 C++ API Reference Manual to show the - mapping from a C API to C++ wrappers. It can be found from the main - page of the C++ API Reference Manual. - - (BMR - 2017/10/17, HDFFV-10151) - - - Java Library: - ---------------- - - Wrapper added for enabling the error stack. - - H5error_off would disable the error stack reporting. In order - to re-enable the reporting, the error stack info needs to be - saved so that H5error_on can revert state. - - (ADB - 2018/03/13, HDFFV-10412) - - - Wrappers were added for the following C APIs: - H5Pset_evict_on_close - H5Pget_evict_on_close - H5Pset_chunk_opts - H5Pget_chunk_opts - H5Pset_efile_prefix - H5Pget_efile_prefix - H5Pset_virtual_prefix - H5Pget_virtual_prefix - - (ADB - 2017/12/20) - - - The H5I_REFERENCE value in the H5I_type_t enum (defined in H5Ipublic.h) - has been marked as deprecated. - - JNI code which refers to this value will be removed in a future - major version of the library. The code will remain unchanged in the - 1.10.x releases and branches. - - See the C library section, above, for further information. - - (HDFFV-10252, DER, 2017/04/05) - - - Tools: - ------ - - h5diff has a new option to display error stack. - - Updated h5diff with the --enable-error-stack argument, which - enables the display of the hdf5 error stack. This completes the - improvement to the main tools: h5copy, h5diff, h5dump, h5ls and - h5repack. - - (ADB - 2017/08/30, HDFFV-9774) - - -Support for new platforms, languages and compilers. -======================================= - - None - -Bug Fixes since HDF5-1.10.1 release -================================== - - Library - ------- - - The data read after a direct chunk write to a chunked dataset with - one chunk was incorrect. - - The problem was due to the passing of a null dataset pointer to - the insert callback for the chunk index in the routine - H5D__chunk_direct_write() in H5Dchunk.c - The dataset was a single-chunked dataset which will use the - single chunk index when latest format was enabled on file creation. - The single chunk index was the only index that used this pointer - in the insert callback. - - Passed the dataset pointer to the insert callback for the chunk - index in H5D__chunk_direct_write(). - - (VC - 2018/03/20, HDFFV-10425) - - - Added public routine H5DOread_chunk to the high-level C library. - - The patch for H5DOwrite_chunk() to write an entire chunk to the file - directly was contributed by GE Healthcare and integrated by The HDF Group - developers. - - (VC - 2017/05/19, HDFFV-9934) - - - Freeing of object header after failed checksum verification. - - It was discovered that the object header (in H5Ocache.c) was not released properly - when the checksum verification failed and a re-load of the object - header was needed. - - Freed the object header that failed the chksum verification only - after the new object header is reloaded, deserialized and set up. - - (VC - 2018/03/14, HDFFV-10209) - - - Updated H5Pset_evict_on_close in H5Pfapl.c - - Changed the minor error number from H5E_CANTSET to H5E_UNSUPPORTED for - parallel library. - - (ADB - 2018/03/06, HDFFV-10414) - - - Fixed the problems with the utility function that could not handle lowercase - Windows drive letters. - - Added call to upper function for drive letter. - - (ADB - 2017/12/18, HDFFV-10307) - - - Fixed H5Sencode() bug when the number of elements selected was > 2^32. - - H5Sencode() incorrectly encodes dataspace selection with number of - elements exceeding 2^32. When decoding such selection via H5Sdecode(), - the number of elements in the decoded dataspace is not the same as - what is encoded. This problem exists for H5S_SEL_HYPER and - H5S_SEL_POINTS encoding. - - The cause of the problem is due to the fact that the library uses 32 bits to - encode counts and block offsets for the selection. - The solution is to use the original 32 bit encodings if possible, - but use a different way to encode selection if more that 32 bits is needed. - See details in the RFC: H5Sencode/H5Sdecode Format Change i - https://bitbucket.hdfgroup.org/projects/HDFFV/repos/hdf5doc/browse/RFCs/HDF5_Library/H5SencodeFormatChange. - - (VC - 2017/11/28, HDFFV-9947) - - - Fixed filter plugin handling in H5PL.c and H5Z.c to not require i availability of - dependent libraries (e.g., szip or zlib). - - It was discovered that the dynamic loading process used by - filter plugins had issues with library dependencies. - - CMake build process changed to use LINK INTERFACE keywords, which - allowed HDF5 C library to make dependent libraries private. The - filter plugin libraries no longer require dependent libraries - (such as szip or zlib) to be available. - - (ADB - 2017/11/16, HDFFV-10328) - - - Fixed rare object header corruption bug. - - In certain cases, such as when converting large attributes to dense - storage, an error could occur which would either fail an assertion or - cause file corruption. Fixed and added test. - - (NAF - 2017/11/14, HDFFV-10274) - - - Updated H5Zfilter_avail in H5Z.c. - - The public function checked for plugins, while the private - function did not. - - Modified H5Zfilter_avail and private function, H5Z_filter_avail. - Moved check for plugin from public to private function. Updated - H5P__set_filter due to change in H5Z_filter_avail. Updated tests. - - (ADB - 2017/10/10, HDFFV-10297, HDFFV-10319) - - - h5dump produced SEGFAULT when dumping corrypted file. - - The behavior was due to the error in the internal function H5HL_offset_into(). - - (1) Fixed H5HL_offset_into() to return error when offset exceeds heap data - block size. - (2) Fixed other places in the library that call this routine to detect - error routine. - - (VC - 2017/08/30, HDFFV-10216) - - - Fixes for paged aggregation feature. - - Skip test in test/fheap.c when: - (1) multi/split drivers and - (2) persisting free-space or using paged aggregation strategy - - (VC, 2017/07/10) - - Changes made based on RFC review comments: - (1) Added maximum value for file space page size - (2) Dropped check for page end metadata threshold - (3) Removed "can_shrink" and "shrink" callbacks for small section class - - (VC - 2017/06/09) - - - Fixed for infinite loop in H5VM_power2up(). - - The function H5VM_power2up() returns the next power of 2 - for n. When n exceeds 2^63, it overflows and becomes 0 causing - the infinite looping. - - The fix ensures that the function checks for n >= 2^63 - and returns 0. - - (VC - 2017/07/10, HDFFV-10217) - - - Fixed for H5Ocopy doesn't work with open identifiers. - - Changes made so that raw data for dataset objects are copied from - cached info when possible instead of flushing objects to file and - read them back in again. - - (VC - 2017/07/05, HDFFV-7853) - - - An uninitialized struct could cause a memory access error when using - variable-length or reference types in a compressed, chunked dataset. - - A struct containing a callback function pointer and a pointer to some - associated data was used before initialization. This could cause a - memory access error and system crash. This could only occur under - unusual conditions when using variable-lenth and reference types in - a compressed, chunked dataset. - - On recent versions of Visual Studio, when built in debug mode, the - debug heap will complain and cause a crash if the code in question - is executed (this will cause the objcopy test to fail). - - (DER - 2017/11/21, HDFFV-10330) - - - Fixed collective metadata writes on file close. - - It was discovered that metadata was being written twice as part of - the parallel file close behavior, once independently and once - collectively. - - A fix for this error was included as part of the parallel compression - feature but remained undocumented here. - - (RAW - 2017/12/01, HDFFV-10272) - - - If an HDF5 file contains a filter pipeline message with a 'number of - filters' field that exceeds the maximum number of allowed filters, - the error handling code will attempt to dereference a NULL pointer. - - This issue was reported to The HDF Group as issue #CVE-2017-17505. - https://security-tracker.debian.org/tracker/CVE-2017-17505 - https://cve.mitre.org/cgi-bin/cvename.cgi?name=3DCVE-2017-17505 - - NOTE: The HDF5 C library cannot produce such a file. This condition - should only occur in a corrupt (or deliberately altered) file - or a file created by third-party software. - - This problem arose because the error handling code assumed that - the 'number of filters' field implied that a dynamic array of that - size had already been created and that the cleanup code should - iterate over that array and clean up each element's resources. If - an error occurred before the array has been allocated, this will - not be true. - - This has been changed so that the number of filters is set to - zero on errors. Additionally, the filter array traversal in the - error handling code now requires that the filter array not be NULL. - - (DER - 2018/02/06, HDFFV-10354) - - - If an HDF5 file contains a filter pipeline message which contains - a 'number of filters' field that exceeds the actual number of - filters in the message, the HDF5 C library will read off the end of - the read buffer. - - This issue was reported to The HDF Group as issue #CVE-2017-17506. - https://security-tracker.debian.org/tracker/CVE-2017-17506 - https://cve.mitre.org/cgi-bin/cvename.cgi?name=3DCVE-2017-17506 - - NOTE: The HDF5 C library cannot produce such a file. This condition - should only occur in a corrupt (or deliberately altered) file - or a file created by third-party software. - - The problem was fixed by passing the buffer size with the buffer - and ensuring that the pointer cannot be incremented off the end - of the buffer. A mismatch between the number of filters declared - and the actual number of filters will now invoke normal HDF5 - error handling. - - (DER - 2018/02/26, HDFFV-10355) - - - If an HDF5 file contains a malformed compound datatype with a - suitably large offset, the type conversion code can run off - the end of the type conversion buffer, causing a segmentation - fault. - - This issue was reported to The HDF Group as issue #CVE-2017-17507. - https://security-tracker.debian.org/tracker/CVE-2017-17506 - https://cve.mitre.org/cgi-bin/cvename.cgi?name=3DCVE-2017-17506 - - NOTE: The HDF5 C library cannot produce such a file. This condition - should only occur in a corrupt (or deliberately altered) file - or a file created by third-party software. - - THE HDF GROUP WILL NOT FIX THIS BUG AT THIS TIME - - Fixing this problem would involve updating the publicly visible - H5T_conv_t function pointer typedef and versioning the API calls - which use it. We normally only modify the public API during - major releases, so this bug will not be fixed at this time. - - (DER - 2018/02/26, HDFFV-10356) - - - If an HDF5 file contains a malformed compound type which contains - a member of size zero, a division by zero error will occur while - processing the type. - - This issue was reported to The HDF Group as issue #CVE-2017-17508. - https://security-tracker.debian.org/tracker/CVE-2017-17508 - https://cve.mitre.org/cgi-bin/cvename.cgi?name=3DCVE-2017-17508 - - NOTE: The HDF5 C library cannot produce such a file. This condition - should only occur in a corrupt (or deliberately altered) file - or a file created by third-party software. - - Checking for zero before dividing fixes the problem. Instead of the - division by zero, the normal HDF5 error handling is invoked. - - (DER - 2018/02/26, HDFFV-10357) - - - If an HDF5 file contains a malformed symbol table node that declares - it contains more symbols than it actually contains, the library - can run off the end of the metadata cache buffer while processing - the symbol table node. - - This issue was reported to The HDF Group as issue #CVE-2017-17509. - https://security-tracker.debian.org/tracker/CVE-2017-17509 - https://cve.mitre.org/cgi-bin/cvename.cgi?name=3DCVE-2017-17509 - - NOTE: The HDF5 C library cannot produce such a file. This condition - should only occur in a corrupt (or deliberately altered) file - or a file created by third-party software. - - Performing bounds checks on the buffer while processing fixes the - problem. Instead of the segmentation fault, the normal HDF5 error - handling is invoked. - - (DER - 2018/03/12, HDFFV-10358) - - - Fixed permissions passed to open(2) on file create. - - On Windows, the POSIX permissions passed to open(2) when creating files - were only incidentally correct. They are now set to the correct value of - (_S_IREAD | _S_IWRITE). - - On other platforms, the permissions were set to a mix of 666, 644, and - 000. They are now set uniformly to 666. - - (DER - 2017/04/28, HDFFV-9877) - - - The H5FD_FEAT_POSIX_COMPAT_HANDLE flag is no longer used to determine - if a virtual file driver (VFD) is compatible with SWMR. - - Use of this VFD feature flag was not in line with the documentation in - the public H5FDpublic.h file. In particular, it was being used as a - proxy for determining if SWMR I/O is allowed. This is unnecessary as we - already have a feature flag for this (H5FD_SUPPORTS_SWMR_IO). - - (DER - 2017/05/31, HDFFV-10214) - - - Configuration - ------------- - - CMake changes - - - Updated CMake commands configuration. - - A number of improvements were made to the CMake commands. Most - changes simplify usage or eliminate unused constructs. Also, - some changes support better cross-platform support. - - (ADB - 2018/02/01, HDFFV-10398) - - - Corrected usage of CMAKE_BUILD_TYPE variable. - - The use of the CMAKE_BUILD_TYPE is incorrect for multi-config - generators (Visual Studio and XCode) and is optional for single - config generators. Created a new macro to check - GLOBAL PROPERTY -> GENERATOR_IS_MULTI_CONFIG - Created two new HDF variable, HDF_BUILD_TYPE and HDF_CFG_BUILD_TYPE. - Defaults for these variables is "Release". - - (ADB - 2018/01/10, HDFFV-10385) - - - Added replacement of fortran flags if using static CRT. - - Added TARGET_STATIC_CRT_FLAGS call to HDFUseFortran.cmake file in - config/cmake_ext_mod folder. - - (ADB - 2018/01/08, HDFFV-10334) - - - - The hdf5 library used shared szip and zlib, which needlessly required - applications to link with the same szip and zlib libraries. - - Changed the target_link_libraries commands to use the static libs. - Removed improper link duplication of szip and zlib. - Adjusted the link dependencies and the link interface values of - the target_link_libraries commands. - - (ADB - 2017/11/14, HDFFV-10329) - - - CMake MPI - - CMake implementation for MPI was problematic and would create incorrect - MPI library references in the hdf5 libraries. - - Reworked the CMake MPI code to properly create CMake targets. Also merged - the latest CMake FindMPI.cmake changes to the local copy. This is necessary - until HDF changes the CMake minimum to 3.9 or greater. - - (ADB - 2017/11/02, HDFFV-10321) - - - Corrected FORTRAN_HAVE_C_LONG_DOUBLE processing in the autotools. - - A bug in the autotools Fortran processing code always set the - FORTRAN_HAVE_C_LONG_DOUBLE variable to be true regardless of - whether or not a C long double type was present. - - This would cause compilation failures on platforms where a C - long double type was not available and the Fortran wrappers - were being built. - - (DER - 2017/07/05, HDFFV-10247) - - - The deprecated --enable-production and --enable-debug configure options - failed to emit errors when passed an empty string - (e.g.: --enable-debug=""). - - Due to the way we checked for these options being set, it was possible - to avoid the error message and continue configuration if an empty string - was passed to the option. - - Any use of --enable-production or --enable-debug will now halt the - configuration step and emit a helpful error message - (use --enable-build-mode=debug|production instead). - - (DER - 2017/07/05, HDFFV-10248) - - - CMake - - Too many commands for POST_BUILD step caused command line to be - too big on windows. - - Changed foreach of copy command to use a custom command with the - use of the HDFTEST_COPY_FILE macro. - - (ADB - 2017/07/12, HDFFV-10254) - - - CMake test execution environment - - The parallel HDF5 test: 't_pread' assumed the use of autotools - and the directory structure associated with that testing approach. - Modified the test code to check whether the 'h5jam' utility can be - found in the same directory as the test executable (which is - preferred directory structure utilized by cmake) and if found - will invoke the tool directly rather than utilizing a relative path. - - (RAW - 2017/11/03, HDFFV-10318) - - - Fortran compilation fails for xlf and CMake builds. - - Fixed CMake shared library build for H5match_types and modules - - (MSB - 2017/12/19, HDFFV-10363) - - - Shared libraries fail test on OSX with Fortran enabled with CMake. - - Fixed by removing the F77 use of EQUIVALENCE and COMMON, replaced - using MODULES. Updated CMake. - - (MSB - 2017/12/07, HDFFV-10223) - - - The bin/trace script now emits an error code on problems and autogen.sh - will fail if bin/trace fails. - - The bin/trace script adds tracing functionality to public HDF5 API calls. - It is only of interest to developers who modify the HDF5 source code. - Previously, bin/trace just wrote an error message to stdout when it - encountered problems, so autogen.sh processing did not halt and a broken - version of the library could be built. The script will now return an - error code when it encounters problems, and autogen.sh will fail. - - This only affects users who run autogen.sh to rebuild the Autotools files, - which is not necessary to build HDF5 from source in official releases of the - library. CMake users are unaffected as bin/trace is not run via CMake - at this time. - - (DER - 2017/04/25, HDFFV-10178) - - - FC_BASENAME was changed from gfortran40 to gfortran in a few places. - - In the autotools, FC_BASENAME was set to gfortran40 in a few locations - (config/gnu-fflags and config/freebsd). This was probably a historical - artifact and did not seem to affect many users. - - The value is now correctly set to gfortran. - - (DER - 2017/05/26, HDFFV-10249) - - - The ar flags were changed to -cr (was: -cru) - - The autotools set the flags for ar to -cru by default. The -u flag, - which allows selective replacement of only the members which have - changed, raises warnings on some platforms, so the flags are now set to - -cr via AR_FLAGS in configure.ac. This causes the static library to - always be completely recreated from the object files on each build. - - (DER - 2017/11/15, HDFFV-10428) - - - Fortran - -------- - - Fixed compilation errors when using Intel 18 Fortran compilers - (MSB - 2017/11/3, HDFFV-10322) - - Tools - ----- - - h5clear - - An enhancement to the tool in setting a file's stored EOA. - - It was discovered that a crashed file's stored EOA in the superblock - was smaller than the actual file's EOF. When the file was reopened - and closed, the library truncated the file to the stored EOA. - - Added an option to the tool in setting the file's stored EOA in the - superblock to the maximum of (EOA, EOF) + increment. - An option was also added to print the file's EOA and EOF. - - (VC - 2018/03/14, HDFFV-10360) - - - h5repack - - h5repack changes the chunk parameters when a change of layout is not - specified and a filter is applied. - - HDFFV-10297, HDFFV-10319 reworked code for h5repack and h5diff code - in the tools library. The check for an existing layout was incorrectly - placed into an if block and not executed. The check was moved into - the normal path of the function. - - (ADB - 2018/02/21, HDFFV-10412) - - - h5dump - - The tools library will hide the error stack during file open. - - While this is preferable almost always, there are reasons to enable - display of the error stack when a tool will not open a file. Adding an - optional argument to the --enable-error-stack will provide this use case. - As an optional argument it will not affect the operation of the - --enable-error-stack. h5dump is the only tool to implement this change. - - (ADB - 2018/02/15, HDFFV-10384) - - - h5dump - - h5dump would output an indented blank line in the filters section. - - h5dump overused the h5tools_simple_prefix function, which is a - function intended to account for the data index (x,y,z) option. - Removed the function call for header information. - - (ADB - 2018/01/25, HDFFV-10396) - - - h5repack - - h5repack incorrectly searched internal object table for name. - - h5repack would search the table of objects for a name, if the - name did not match it tried to determine if the name without a - leading slash would match. The logic was flawed! The table - stored names(paths) without a leading slash and did a strstr - of the table path to the name. - The assumption was that if there was a difference of one then - it was a match, however "pressure" would match "/pressure" as - well as "/pressure1", "/pressure2", etc. Changed logic to remove - any leading slash and then do a full compare of the name. - - (ADB - 2018/01/18, HDFFV-10393) - - - h5repack - - h5repack failed to handle command line parameters for customer filters. - - User defined filter parameter conversions would fail when integers were - represented on the command line with character string - larger then 9 characters. Increased local variable array for storing - the current command line parameter to prevent buffer overflows. - - (ADB - 2018/01/17, HDFFV-10392) - - - h5diff - - h5diff seg faulted if comparing VL strings against fixed strings. - - Reworked solution for HDFFV-8625 and HDFFV-8639. Implemented the check - for string objects of same type in the diff_can_type function by - adding an if(tclass1 == H5T_STRING) block. This "if block" moves the - same check that was added for attributes to this function, which is - used by all object types. This function handles complex type structures. - Also added a new test file in h5diffgenttest for testing this issue - and removed the temporary files used in the test scripts. - - (ADB - 2018/01/04, HDFFV-8745) - - - h5repack - - h5repack failed to copy a dataset with existing filter. - - Reworked code for h5repack and h5diff code in the tools library. Added - improved error handling, cleanup of resources and checks of calls. - Modified H5Zfilter_avail and private function, H5Z_filter_avail. - Moved check for plugin from public to private function. Updated - H5P__set_filter due to change in H5Z_filter_avail. Updated tests. - Note, h5repack output display has changed to clarify the individual - steps of the repack process. The output indicates if an operation - applies to all objects. Lines with notation and no information - have been removed. - - (ADB - 2017/10/10, HDFFV-10297, HDFFV-10319) - - - h5repack - - h5repack always set the User Defined filter flag to H5Z_FLAG_MANDATORY. - - Added another parameter to the 'UD=' option to set the flag by default - to '0' or H5Z_FLAG_MANDATORY, the other choice is '1' or H5Z_FLAG_OPTIONAL. - - (ADB - 2017/08/31, HDFFV-10269) - - - h5ls - - h5ls generated error on stack when it encountered a H5S_NULL - dataspace. - - Adding checks for H5S_NULL before calling H5Sis_simple (located - in the h5tools_dump_mem function) fixed the issue. - - (ADB - 2017/08/17, HDFFV-10188) - - - h5repack - - Added tests to h5repack.sh.in to verify options added for paged - aggregation work as expected. - - (VC - 2017/08/03) - - - h5dump - - h5dump segfaulted on output of XML file. - - Function that escape'd strings used the full buffer length - instead of just the length of the replacement string in a - strncpy call. Using the correct length fixed the issue. - - (ADB - 2017/08/01, HDFFV-10256) - - - h5diff - - h5diff segfaulted on compare of a NULL variable length string. - - Improved h5diff compare of strings by adding a check for - NULL strings and setting the lengths to zero. - - (ADB - 2017/07/25, HDFFV-10246) - - - h5import - - h5import crashed trying to import data from a subset of a dataset. - - Improved h5import by adding the SUBSET keyword. h5import understands - to use the Count times the Block as the size of the dimensions. - Added INPUT_B_ORDER keyword to old-style configuration files. - The import from h5dump function expects the binary files to use native - types (FILE '-b' option) in the binary file. - - (ADB - 2017/06/15, HDFFV-10219) - - - h5repack - - h5repack did not maintain the creation order flag of the root - group. - - Improved h5repack by reading the creation order and applying the - flag to the new root group. Also added arguments to set the - order and index direction, which applies to the traversing of the - original file, on the command line. - - (ADB - 2017/05/26, HDFFV-8611) - - - h5diff - - h5diff failed to account for strpad type and null terminators - of char strings. Also, h5diff failed to account for string length - differences and would give a different result depending on file - order in the command line. - - Improved h5diff compare of strings and arrays by adding a check for - string lengths and if the strpad was null filled. - - (ADB - 2017/05/18, HDFFV-9055, HDFFV-10128) - - High-Level APIs: - ------ - - H5DOwrite_chunk() problems when overwriting an existing chunk with - no filters enabled. - - When overwriting chunks and no filters were being used, the library would - fail (when asserts are enabled, e.g. debug builds) or incorrectly - insert additional chunks instead of overwriting (when asserts are not - enabled, e.g. production builds). - - This has been fixed and a test was added to the hl/test_dset_opt test. - - (DER - 2017/05/11, HDFFV-10187) - - C++ APIs - -------- - - Removal of memory leaks. - - A private function was inadvertently called, causing memory leaks. This - is now fixed. - - (BMR - 2018/03/12 - User's reported in email) - - Testing - ------- - - Memory for three variables in testphdf5's coll_write_test was malloced - but not freed, leaking memory when running the test. - - The variables' memory is now freed. - - (LRK - 2018/03/12, HDFFV-10397) - - - Refactored the testpar/t_bigio.c test to include ALARM macros - - Changed the test to include the ALARM_ON and ALARM_OFF macros which - are intended to prevent nightly test hangs that have been observed - with this particular parallel test example. The code was also modified to - simplify status reporting (only from MPI rank 0) and additional - status checking added. - - (RAW - 2017/11/08, HDFFV-10301) - - -Supported Platforms -=================== - - Linux 2.6.32-696.16.1.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-18) - #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-18) - (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-18) - IBM XL C/C++ V13.1 - IBM XL Fortran V15.1 - - Linux 3.10.0-327.10.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (kituo/moohan) Version 4.8.5 20150623 (Red Hat 4.8.5-4) - Version 4.9.3, Version 5.2.0, - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 17.0.0.098 Build 20160721 - MPICH 3.1.4 compiled with GCC 4.9.3 - - SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc - (emu) Sun Fortran 95 8.6 SunOS_sparc - Sun C++ 5.12 SunOS_sparc - - Windows 7 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - Visual Studio 2015 w/ Intel Fortran 16 (cmake) - - Windows 7 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - Visual Studio 2015 w/ Intel Fortran 16 (cmake) - Visual Studio 2015 w/ Intel C, Fortran 2017 (cmake) - Visual Studio 2015 w/ MSMPI 8 (cmake) - Cygwin(CYGWIN_NT-6.1 2.8.0(0.309/5/3) - gcc and gfortran compilers (GCC 5.4.0) - (cmake and autotools) - - Windows 10 Visual Studio 2015 w/ Intel Fortran 16 (cmake) - Cygwin(CYGWIN_NT-6.1 2.8.0(0.309/5/3) - gcc and gfortran compilers (GCC 5.4.0) - (cmake and autotools) - - Windows 10 x64 Visual Studio 2015 w/ Intel Fortran 16 (cmake) - - Mac OS X Yosemite 10.10.5 Apple clang/clang++ version 6.1 from Xcode 7.0 - 64-bit gfortran GNU Fortran (GCC) 4.9.2 - (osx1010dev/osx1010test) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X El Capitan 10.11.6 Apple clang/clang++ version 7.3.0 from Xcode 7.3 - 64-bit gfortran GNU Fortran (GCC) 5.2.0 - (osx1011dev/osx1011test) Intel icc/icpc/ifort version 16.0.2 - - Mac OS Sierra 10.12.6 Apple LLVM version 8.1.0 (clang/clang++-802.0.42) - 64-bit gfortran GNU Fortran (GCC) 7.1.0 - (swallow/kite) Intel icc/icpc/ifort version 17.0.2 - - -Tested Configuration Features Summary -===================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90/ F90 C++ zlib SZIP - parallel F2003 parallel -Solaris2.11 32-bit n y/y n y y y -Solaris2.11 64-bit n y/n n y y y -Windows 7 y y/y n y y y -Windows 7 x64 y y/y y y y y -Windows 7 Cygwin n y/n n y y y -Windows 7 x64 Cygwin n y/n n y y y -Windows 10 y y/y n y y y -Windows 10 x64 y y/y n y y y -Mac OS X Mountain Lion 10.8.5 64-bit n y/y n y y y -Mac OS X Mavericks 10.9.5 64-bit n y/y n y y y -Mac OS X Yosemite 10.10.5 64-bit n y/y n y y y -Mac OS X El Capitan 10.11.6 64-bit n y/y n y y y -CentOS 7.2 Linux 2.6.32 x86_64 PGI n y/y n y y y -CentOS 7.2 Linux 2.6.32 x86_64 GNU y y/y y y y y -CentOS 7.2 Linux 2.6.32 x86_64 Intel n y/y n y y y -Linux 2.6.32-573.18.1.el6.ppc64 n y/y n y y y - - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -Solaris2.11 32-bit y y y y -Solaris2.11 64-bit y y y y -Windows 7 y y y y -Windows 7 x64 y y y y -Windows 7 Cygwin n n n y -Windows 7 x64 Cygwin n n n y -Windows 10 y y y y -Windows 10 x64 y y y y -Mac OS X Mountain Lion 10.8.5 64-bit y n y y -Mac OS X Mavericks 10.9.5 64-bit y n y y -Mac OS X Yosemite 10.10.5 64-bit y n y y -Mac OS X El Capitan 10.11.6 64-bit y n y y -CentOS 7.2 Linux 2.6.32 x86_64 PGI y y y n -CentOS 7.2 Linux 2.6.32 x86_64 GNU y y y y -CentOS 7.2 Linux 2.6.32 x86_64 Intel y y y n -Linux 2.6.32-573.18.1.el6.ppc64 y y y n - -Compiler versions for each platform are listed in the preceding -"Supported Platforms" table. - - -More Tested Platforms -===================== -The following platforms are not supported but have been tested for this release. - - Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (mayll/platypus) Version 4.4.7 20120313 - Version 4.9.3, 5.3.0, 6.2.0 - PGI C, Fortran, C++ for 64-bit target on - x86-64; - Version 17.10-0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 17.0.4.196 Build 20170411 - MPICH 3.1.4 compiled with GCC 4.9.3 - - Linux 3.10.0-327.18.2.el7 GNU C (gcc) and C++ (g++) compilers - #1 SMP x86_64 GNU/Linux Version 4.8.5 20150623 (Red Hat 4.8.5-4) - (jelly) with NAG Fortran Compiler Release 6.1(Tozai) - GCC Version 7.1.0 - OpenMPI 3.0.0-GCC-7.2.0-2.29 - Intel(R) C (icc) and C++ (icpc) compilers - Version 17.0.0.098 Build 20160721 - with NAG Fortran Compiler Release 6.1(Tozai) - - Linux 3.10.0-327.10.1.el7 MPICH 3.2 compiled with GCC 5.3.0 - #1 SMP x86_64 GNU/Linux - (moohan) - - Linux 2.6.32-573.18.1.el6.ppc64 MPICH mpich 3.1.4 compiled with - #1 SMP ppc64 GNU/Linux IBM XL C/C++ for Linux, V13.1 - (ostrich) and IBM XL Fortran for Linux, V15.1 - - Debian 8.4 3.16.0-4-amd64 #1 SMP Debian 3.16.36-1 x86_64 GNU/Linux - gcc, g++ (Debian 4.9.2-10) 4.9.2 - GNU Fortran (Debian 4.9.2-10) 4.9.2 - (cmake and autotools) - - Fedora 24 4.7.2-201.fc24.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc, g++ (GCC) 6.1.1 20160621 - (Red Hat 6.1.1-3) - GNU Fortran (GCC) 6.1.1 20160621 - (Red Hat 6.1.1-3) - (cmake and autotools) - - Ubuntu 16.04.1 4.4.0-38-generic #57-Ubuntu SMP x86_64 GNU/Linux - gcc, g++ (Ubuntu 5.4.0-6ubuntu1~16.04.2) - 5.4.0 20160609 - GNU Fortran (Ubuntu 5.4.0-6ubuntu1~16.04.2) - 5.4.0 20160609 - (cmake and autotools) - - -Known Problems -============== - - At present, metadata cache images may not be generated by parallel - applications. Parallel applications can read files with metadata cache - images, but since this is a collective operation, a deadlock is possible - if one or more processes do not participate. - - Three tests fail with OpenMPI 3.0.0/GCC-7.2.0-2.29: - testphdf5 (ecdsetw, selnone, cchunk1, cchunk3, cchunk4, and actualio) - t_shapesame (sscontig2) - t_pflush1/fails on exit - The first two tests fail attempting collective writes. - - Known problems in previous releases can be found in the HISTORY*.txt files - in the HDF5 source. Please report any new problems found to - help@hdfgroup.org. - - -%%%%1.10.1%%%% - -HDF5 version 1.10.1 released on 2017-04-27 -================================================================================ - -INTRODUCTION - -This document describes the differences between HDF5-1.10.0-patch1 and -HDF5 1.10.1, and contains information on the platforms tested and known -problems in HDF5-1.10.1. For more details check the HISTORY*.txt files -in the HDF5 source. - -Links to HDF5 1.10.1 source code, documentation, and additional materials can -be found on The HDF5 web page at: - - https://support.hdfgroup.org/HDF5/ - -The HDF5 1.10.1 release can be obtained from: - - https://support.hdfgroup.org/HDF5/release/obtain5.html - -User documentation for the snapshot can be accessed directly at this location: - - https://support.hdfgroup.org/HDF5/doc/ - -New features in the HDF5-1.10.x release series, including brief general -descriptions of some new and modified APIs, are described in the "New Features -in HDF5 Release 1.10" document: - - https://support.hdfgroup.org/HDF5/docNewFeatures/index.html - -All new and modified APIs are listed in detail in the "HDF5 Software Changes -from Release to Release" document, in the section "Release 10.1 (current -release) versus Release 1.10.0 - - https://support.hdfgroup.org/HDF5/doc/ADGuide/Changes.html - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS - -- Major New Features Introduced in HDF5 1.10.1 -- Other New Features and Enhancements -- Support for New Platforms, Languages, and Compilers -- Bug Fixes since HDF5-1.10.0-patch1 -- Supported Platforms -- Tested Configuration Features Summary -- More Tested Platforms -- Known Problems - - -Major New Features Introduced in HDF5 1.10.1 -============================================ - -For links to the RFCs and documentation in this section please view -https://support.hdfgroup.org/HDF5/docNewFeatures in a web browser. - -________________________________________ -Metadata Cache Image -________________________________________ - - HDF5 metadata is typically small, and scattered throughout the HDF5 file. - This can affect performance, particularly on large HPC systems. The - Metadata Cache Image feature can improve performance by writing the - metadata cache in a single block on file close, and then populating the - cache with the contents of this block on file open, thus avoiding the many - small I/O operations that would otherwise be required on file open and - close. See the RFC for complete details regarding this feature. Also, - see the Fine Tuning the Metadata Cache documentation. - - At present, metadata cache images may not be generated by parallel - applications. Parallel applications can read files with metadata cache - images, but since this is a collective operation, a deadlock is possible - if one or more processes do not participate. - -________________________________________ -Metadata Cache Evict on Close -________________________________________ - - The HDF5 library's metadata cache is fairly conservative about holding on - to HDF5 object metadata (object headers, chunk index structures, etc.), - which can cause the cache size to grow, resulting in memory pressure on - an application or system. The "evict on close" property will cause all - metadata for an object to be evicted from the cache as long as metadata - is not referenced from any other open object. See the Fine Tuning the - Metadata Cache documentation for information on the APIs. - - At present, evict on close is disabled in parallel builds. - -________________________________________ -Paged Aggregation -________________________________________ - - The current HDF5 file space allocation accumulates small pieces of metadata - and raw data in aggregator blocks which are not page aligned and vary - widely in sizes. The paged aggregation feature was implemented to provide - efficient paged access of these small pieces of metadata and raw data. - See the RFC for details. Also, see the File Space Management documentation. - -________________________________________ -Page Buffering -________________________________________ - - Small and random I/O accesses on parallel file systems result in poor - performance for applications. Page buffering in conjunction with paged - aggregation can improve performance by giving an application control of - minimizing HDF5 I/O requests to a specific granularity and alignment. - See the RFC for details. Also, see the Page Buffering documentation. - - At present, page buffering is disabled in parallel builds. - - - -Other New Features and Enhancements -=================================== - - Library - ------- - - Added a mechanism for disabling the SWMR file locking scheme. - - The file locking calls used in HDF5 1.10.0 (including patch1) - will fail when the underlying file system does not support file - locking or where locks have been disabled. To disable all file - locking operations, an environment variable named - HDF5_USE_FILE_LOCKING can be set to the five-character string - 'FALSE'. This does not fundamentally change HDF5 library - operation (aside from initial file open/create, SWMR is lock-free), - but users will have to be more careful about opening files - to avoid problematic access patterns (i.e.: multiple writers) - that the file locking was designed to prevent. - - Additionally, the error message that is emitted when file lock - operations set errno to ENOSYS (typical when file locking has been - disabled) has been updated to describe the problem and potential - resolution better. - - (DER, 2016/10/26, HDFFV-9918) - - - The return type of H5Pget_driver_info() has been changed from void * - to const void *. - - The pointer returned by this function points to internal library - memory and should not be freed by the user. - - (DER, 2016/11/04, HDFFV-10017) - - - The direct I/O VFD has been removed from the list of VFDs that - support SWMR. - - This configuration was never officially tested and several SWMR - tests fail when this VFD is set. - - (DER, 2016/11/03, HDFFV-10169) - - Configuration: - -------------- - - The minimum version of CMake required to build HDF5 is now 3.2.2. - - (ADB, 2017/01/10) - - - An --enable/disable-developer-warnings option has been added to - configure. - - This disables warnings that do not indicate poor code quality such - as -Winline and gcc's -Wsuggest-attribute. Developer warnings are - disabled by default. - - (DER, 2017/01/10) - - - A bin/restore.sh script was added that reverts autogen.sh processing. - - (DER, 2016/11/08) - - - CMake: Added NAMESPACE hdf5:: to package configuration files to allow - projects using installed HDF5 binaries built with CMake to link with - them without specifying the HDF5 library location via IMPORTED_LOCATION. - - (ABD, 2016/10/17, HDFFV-10003) - - - CMake: Changed the CTEST_BUILD_CONFIGURATION option to - CTEST_CONFIGURATION_TYPE as recommended by the CMake documentation. - - (ABD, 2016/10/17, HDFFV-9971) - - - Fortran Library: - ---------------- - - - The HDF5 Fortran library can now be compiled with the NAG compiler. - - (MSB, 2017/2/10, HDFFV-9973) - - - C++ Library: - ------------ - - - The following C++ API wrappers have been added to the C++ Library: - - // Sets/Gets the strategy and the threshold value that the library - // will employ in managing file space. - FileCreatPropList::setFileSpaceStrategy - H5Pset_file_space_strategy - FileCreatPropList::getFileSpaceStrategy - H5Pget_file_space_strategy - - // Sets/Gets the file space page size for paged aggregation. - FileCreatPropList::setFileSpacePagesize - H5Pset_file_space_page_size - FileCreatPropList::getFileSpacePagesize - H5Pget_file_space_page_size - - // Checks if the given ID is valid. - IdComponent::isValid - H5Iis_valid - - // Sets/Gets the number of soft or user-defined links that can be - // traversed before a failure occurs. - LinkAccPropList::setNumLinks - H5Pset_nlinks - LinkAccPropList::getNumLinks - H5Pget_nlinks - - // Returns a copy of the creation property list of a datatype. - DataType::getCreatePlist - H5Tget_create_plist - - // Opens/Closes an object within a group or a file, regardless of object - // type - Group::getObjId - H5Oopen - Group::closeObjId - H5Oclose - - // Maps elements of a virtual dataset to elements of the source dataset. - DSetCreatPropList::setVirtual - H5Pset_virtual - - // Gets general information about this file. - H5File::getFileInfo - H5Fget_info2 - - // Returns the number of members in a type. - IdComponent::getNumMembers - H5Inmembers - - // Determines if an element type exists. - IdComponent::typeExists - H5Itype_exists - - // Determines if an object exists. - H5Location::exists - H5Lexists. - - // Returns the header version of an HDF5 object. - H5Object::objVersion - H5Oget_info for version - - (BMR, 2017/03/20, HDFFV-10004, HDFFV-10139, HDFFV-10145) - - - New exception: ObjHeaderIException for H5O interface. - - (BMR, 2017/03/15, HDFFV-10145) - - - New class LinkAccPropList for link access property list, to be used by - wrappers of H5Lexists. - - (BMR, 2017/01/04, HDFFV-10145) - - - New constructors to open datatypes in ArrayType, CompType, DataType, - EnumType, FloatType, IntType, StrType, and VarLenType. - - (BMR, 2016/12/26, HDFFV-10056) - - - New member functions: - - DSetCreatPropList::setNbit() to setup N-bit compression for a dataset. - - ArrayType::getArrayNDims() const - ArrayType::getArrayDims() const - both to replace the non-const versions. - - (BMR, 2016/04/25, HDFFV-8623, HDFFV-9725) - - - Tools: - ------ - - The following options have been added to h5clear: - -s: clear the status_flags field in the file's superblock - -m: Remove the metadata cache image from the file - - (QAK, 2017/03/22, PR#361) - - - High-Level APIs: - --------------- - - Added New Fortran 2003 API for h5tbmake_table_f. - - (MSB, 2017/02/10, HDFFV-8486) - - - -Support for New Platforms, Languages, and Compilers -=================================================== - - - Added NAG compiler - - - -Bug Fixes since HDF5-1.10.0-patch1 release -================================== - - Library - ------- - - Outdated data structure was used in H5D_CHUNK_DEBUG blocks, causing - compilation errors when H5D_CHUNK_DEBUG was defined. This is fixed. - - (BMR, 2017/04/04, HDFFV-8089) - - - SWMR implementation in the HDF5 1.10.0 and 1.10.0-patch1 releases has a - broken metadata flush dependency that manifested itself with the following - error at the end of the HDF5 error stack: - - H5Dint.c line 846 in H5D__swmr_setup(): dataspace chunk index must be 0 - for SWMR access, chunkno = 1 - major: Dataset - minor: Bad value - - It was also reported at https://github.com/areaDetector/ADCore/issues/203 - - The flush dependency is fixed in this release. - - - Changed the plugins dlopen option from RTLD_NOW to RTLD_LAZY - - (ABD, 2016/12/12, PR#201) - - - A number of issues were fixed when reading/writing from/to corrupted - files to ensure that the library fails gracefully in these cases: - - * Writing to a corrupted file that has an object message which is - incorrectly marked as shareable on disk results in a buffer overflow / - invalid write instead of a clean error message. - - * Decoding data from a corrupted file with a dataset encoded with the - H5Z_NBIT decoding can result in a code execution vulnerability under - the context of the application using the HDF5 library. - - * When decoding an array datatype from a corrupted file, the HDF5 library - fails to return an error in production if the number of dimensions - decoded is greater than the maximum rank. - - * When decoding an "old style" array datatype from a corrupted file, the - HDF5 library fails to return an error in production if the number of - dimensions decoded is greater than the maximum rank. - - (NAF, 2016/10/06, HDFFV-9950, HDFFV-9951, HDFFV-9992, HDFFV-9993) - - - Fixed an error that would occur when copying an object with an attribute - which is a compound datatype consisting of a variable length string. - - (VC, 2016/08/24, HDFFV-7991) - - - H5DOappend will no longer fail if a dataset has no append callback - registered. - - (VC, 2016/08/14, HDFFV-9960) - - - Fixed an issue where H5Pset_alignment could result in misaligned blocks - with some input combinations, causing an assertion failure in debug mode. - - (NAF, 2016/08/11, HDFFV-9948) - - - Fixed a problem where a plugin compiled into a DLL in the default plugin - directory could not be found by the HDF5 library at runtime on Windows - when the HDF5_PLUGIN_PATH environment variable was not set. - - (ABD, 2016/08/01, HDFFV-9706) - - - Fixed an error that would occur when calling H5Adelete on an attribute - which is attached to an externally linked object in the target file and - whose datatype is a committed datatype in the main file. - - (VC, 2016/07/06, HDFFV-9940) - - - (a) Throw an error instead of assertion when v1 btree level hits the 1 - byte limit. - (b) Modifications to better handle error recovery when conversion by - h5format_convert fails. - - (VC, 2016/05/29, HDFFV-9434) - - - Fixed a memory leak where an array used by the library to track SWMR - read retries was unfreed. - - The leaked memory was small (on the order of a few tens of ints) and - allocated per-file. The memory was allocated (and lost) only when a - file was opened for SWMR access. - - (DER, 2016/04/27, HDFFV-9786) - - - Fixed a memory leak that could occur when opening a file for the first - time (including creating) and the call fails. - - This occurred when the file-driver-specific info was not cleaned up. - The amount of memory leaked varied with the file driver, but would - normally be less than 1 kB. - - (DER, 2016/12/06, HDFFV-10168) - - - Fixed a failure in collective metadata writes. - - This failure only appeared when collective metadata writes - were enabled (via H5Pset_coll_metadata_write()). - - (JRM, 2017/04/10, HDFFV-10055) - - - Parallel Library - ---------------- - - Fixed a bug that could occur when allocating a chunked dataset in parallel - with an alignment set and an alignment threshold greater than the chunk - size but less than or equal to the raw data aggregator size. - - (NAF, 2016/08/11, HDFFV-9969) - - - Configuration - ------------- - - Configuration will check for the strtoll and strtoull functions - before using alternatives - - (ABD, 2017/03/17, PR#340) - - - CMake uses a Windows pdb directory variable if available and - will generate both static and shared pdb files. - - (ABD, 2017/02/06, HDFFV-9875) - - - CMake now builds shared versions of tools. - - (ABD, 2017/02/01, HDFFV-10123) - - - Makefiles and test scripts have been updated to correctly remove files - created when running "make check" and to avoid removing any files under - source control. In-source builds followed by "make clean" and "make - distclean" should result in the original source files. - (LRK, 2017/01/17, HDFFV-10099) - - - The tools directory has been divided into two separate source and test - directories. This resolves a build dependency and, as a result, - 'make check' will no longer fail in the tools directory if 'make' was - not executed first. - - (ABD, 2016/10/27, HDFFV-9719) - - - CMake: Fixed a timeout error that would occasionally occur when running - the virtual file driver tests simultaneously due to test directory - and file name collisions. - - (ABD, 2016/09/19, HDFFV-9431) - - - CMake: Fixed a command length overflow error by converting custom - commands inside CMakeTest.cmake files into regular dependencies and - targets. - - (ABD, 2016/07/12, HDFFV-9939) - - - Fixed a problem preventing HDF5 to be built on 32-bit CYGWIN by - condensing cygwin configuration files into a single file and - removing outdated compiler settings. - - (ABD, 2016/07/12, HDFFV-9946) - - - Fortran - -------- - - Changed H5S_ALL_F from INTEGER to INTEGER(HID_T) - - (MSB, 2016/10/14, HDFFV-9987) - - - Tools - ----- - - h5diff now correctly ignores strpad in comparing strings. - - (ABD, 2017/03/03, HDFFV-10128) - - - h5repack now correctly parses the command line filter options. - - (ABD, 2017/01/24, HDFFV-10046) - - - h5diff now correctly returns an error when it cannot read data due - to an unavailable filter plugin. - - (ADB 2017/01/18, HDFFV-9994 ) - - - Fixed an error in the compiler wrapper scripts (h5cc, h5fc, et al.) - in which they would erroneously drop the file argument specified via - the -o flag when the -o flag was specified before the -c flag on the - command line, resulting in a failure to compile. - - (LRK, 2016/11/04, HDFFV-9938, HDFFV-9530) - - - h5repack User Defined (UD) filter parameters were not parsed correctly. - - The UD filter parameters were not being parsed correctly. Reworked coding - section to parse the correct values and verify number of parameters. - - (ABD, 2016/10/19, HDFFV-9996, HDFFV-9974, HDFFV-9515, HDFFV-9039) - - - h5repack allows the --enable-error-stack option on the command line. - - (ADB, 2016/08/08, HDFFV-9775) - - - C++ APIs - -------- - - The member function H5Location::getNumObjs() is moved to - class Group because the objects are in a group or a file only, - and H5Object::getNumAttrs to H5Location to get the number of - attributes at a given location. - - (BMR, 2017/03/17, PR#466) - - - Due to the change in the C API, the overloaded functions of - PropList::setProperty now need const for some arguments. They are - planned for deprecation and are replaced by new versions with proper - consts. - - (BMR, 2017/03/17, PR#344) - - - The high-level API Packet Table (PT) did not write data correctly when - the datatype is a compound type that has string type as one of the - members. This problem started in 1.8.15, after the fix of HDFFV-9042 - was applied, which caused the Packet Table to use native type to access - the data. It should be up to the application to specify whether the - buffer to be read into memory is in the machine's native architecture. - Thus, the PT is fixed to not use native type but to make a copy of the - user's provided datatype during creation or the packet table's datatype - during opening. If an application wishes to use native type to read the - data, then the application will request that. However, the Packet Table - doesn't provide a way to specify memory datatype in this release. This - feature will be available in future releases. - - (BMR, 2016/10/27, HDFFV-9758) - - - The obsolete macros H5_NO_NAMESPACE and H5_NO_STD have been removed from - the HDF5 C++ API library. - - (BMR, 2016/10/23, HDFFV-9532) - - - The problem where a user-defined function cannot access both, attribute - and dataset, using only one argument is now fixed. - - (BMR, 2016/10/11, HDFFV-9920) - - - In-memory array information, ArrayType::rank and - ArrayType::dimensions, were removed. This is an implementation - detail and should not affect applications. - - (BMR, 2016/04/25, HDFFV-9725) - - - Testing - ------- - - Fixed a problem that caused tests using SWMR to occasionally fail when - running "make check" using parallel make. - - (LRK, 2016/03/22, PR#338, PR#346, PR#358) - - -Supported Platforms -=================== - - Linux 2.6.32-573.18.1.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-4) - #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-4) - (ostrich) GNU Fortran (GCC) 4.4.7 20120313 - (Red Hat 4.4.7-4) - IBM XL C/C++ V13.1 - IBM XL Fortran V15.1 - - Linux 3.10.0-327.10.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (kituo/moohan) Version 4.8.5 20150623 (Red Hat 4.8.5-4) - Version 4.9.3, Version 5.2.0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 15.0.3.187 Build 20150407 - MPICH 3.1.4 compiled with GCC 4.9.3 - - SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc - (emu) Sun Fortran 95 8.6 SunOS_sparc - Sun C++ 5.12 SunOS_sparc - - Windows 7 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - Visual Studio 2015 w/ Intel Fortran 16 (cmake) - - Windows 7 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - Visual Studio 2015 w/ Intel Fortran 16 (cmake) - Visual Studio 2015 w/ MSMPI 8 (cmake) - Cygwin(CYGWIN_NT-6.1 2.8.0(0.309/5/3) - gcc and gfortran compilers (GCC 5.4.0) - (cmake and autotools) - - Windows 10 Visual Studio 2015 w/ Intel Fortran 16 (cmake) - Cygwin(CYGWIN_NT-6.1 2.8.0(0.309/5/3) - gcc and gfortran compilers (GCC 5.4.0) - (cmake and autotools) - - Windows 10 x64 Visual Studio 2015 w/ Intel Fortran 16 (cmake) - - Mac OS X Mt. Lion 10.8.5 Apple clang/clang++ version 5.1 from Xcode 5.1 - 64-bit gfortran GNU Fortran (GCC) 4.8.2 - (swallow/kite) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X Mavericks 10.9.5 Apple clang/clang++ version 6.0 from Xcode 6.2 - 64-bit gfortran GNU Fortran (GCC) 4.9.2 - (wren/quail) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X Yosemite 10.10.5 Apple clang/clang++ version 6.1 from Xcode 7.0 - 64-bit gfortran GNU Fortran (GCC) 4.9.2 - (osx1010dev/osx1010test) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X El Capitan 10.11.6 Apple clang/clang++ version 7.3 from Xcode 7.3 - 64-bit gfortran GNU Fortran (GCC) 5.2.0 - (osx1010dev/osx1010test) Intel icc/icpc/ifort version 16.0.2 - - -Tested Configuration Features Summary -===================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90/ F90 C++ zlib SZIP - parallel F2003 parallel -Solaris2.11 32-bit n y/y n y y y -Solaris2.11 64-bit n y/n n y y y -Windows 7 y y/y n y y y -Windows 7 x64 y y/y y y y y -Windows 7 Cygwin n y/n n y y y -Windows 7 x64 Cygwin n y/n n y y y -Windows 10 y y/y n y y y -Windows 10 x64 y y/y n y y y -Mac OS X Mountain Lion 10.8.5 64-bit n y/y n y y y -Mac OS X Mavericks 10.9.5 64-bit n y/y n y y y -Mac OS X Yosemite 10.10.5 64-bit n y/y n y y y -Mac OS X El Capitan 10.11.6 64-bit n y/y n y y y -CentOS 7.2 Linux 2.6.32 x86_64 PGI n y/y n y y y -CentOS 7.2 Linux 2.6.32 x86_64 GNU y y/y y y y y -CentOS 7.2 Linux 2.6.32 x86_64 Intel n y/y n y y y -Linux 2.6.32-573.18.1.el6.ppc64 n y/y n y y y - - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -Solaris2.11 32-bit y y y y -Solaris2.11 64-bit y y y y -Windows 7 y y y y -Windows 7 x64 y y y y -Windows 7 Cygwin n n n y -Windows 7 x64 Cygwin n n n y -Windows 10 y y y y -Windows 10 x64 y y y y -Mac OS X Mountain Lion 10.8.5 64-bit y n y y -Mac OS X Mavericks 10.9.5 64-bit y n y y -Mac OS X Yosemite 10.10.5 64-bit y n y y -Mac OS X El Capitan 10.11.6 64-bit y n y y -CentOS 7.2 Linux 2.6.32 x86_64 PGI y y y n -CentOS 7.2 Linux 2.6.32 x86_64 GNU y y y y -CentOS 7.2 Linux 2.6.32 x86_64 Intel y y y n -Linux 2.6.32-573.18.1.el6.ppc64 y y y n - -Compiler versions for each platform are listed in the preceding -"Supported Platforms" table. - - -More Tested Platforms -===================== - -The following platforms are not supported but have been tested for this release. - - Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (mayll/platypus) Version 4.4.7 20120313 - Version 4.8.4 - PGI C, Fortran, C++ for 64-bit target on - x86-64; - Version 16.10-0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 15.0.3.187 (Build 20150407) - MPICH 3.1.4 compiled with GCC 4.9.3 - - Linux 3.10.0-327.18.2.el7 GNU C (gcc) and C++ (g++) compilers - #1 SMP x86_64 GNU/Linux Version 4.8.5 20150623 (Red Hat 4.8.5-4) - (jelly) with NAG Fortran Compiler Release 6.1(Tozai) - Intel(R) C (icc) and C++ (icpc) compilers - Version 15.0.3.187 (Build 20150407) - with NAG Fortran Compiler Release 6.1(Tozai) - - Linux 2.6.32-573.18.1.el6.ppc64 MPICH mpich 3.1.4 compiled with - #1 SMP ppc64 GNU/Linux IBM XL C/C++ for Linux, V13.1 - (ostrich) and IBM XL Fortran for Linux, V15.1 - - Debian 8.4 3.16.0-4-amd64 #1 SMP Debian 3.16.36-1 x86_64 GNU/Linux - gcc, g++ (Debian 4.9.2-10) 4.9.2 - GNU Fortran (Debian 4.9.2-10) 4.9.2 - (cmake and autotools) - - Fedora 24 4.7.2-201.fc24.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc, g++ (GCC) 6.1.1 20160621 - (Red Hat 6.1.1-3) - GNU Fortran (GCC) 6.1.1 20160621 - (Red Hat 6.1.1-3) - (cmake and autotools) - - Ubuntu 16.04.1 4.4.0-38-generic #57-Ubuntu SMP x86_64 GNU/Linux - gcc, g++ (Ubuntu 5.4.0-6ubuntu1~16.04.2) - 5.4.0 20160609 - GNU Fortran (Ubuntu 5.4.0-6ubuntu1~16.04.2) - 5.4.0 20160609 - (cmake and autotools) - - -Known Problems -============== - - At present, metadata cache images may not be generated by parallel - applications. Parallel applications can read files with metadata cache - images, but since this is a collective operation, a deadlock is possible - if one or more processes do not participate. - - Known problems in previous releases can be found in the HISTORY*.txt files - in the HDF5 source. Please report any new problems found to - help@hdfgroup.org. - - -%%%%1.10.0-patch1%%%% - - -HDF5 version 1.10.0-patch1 released on 2016-05-23 -================================================================================ - -INTRODUCTION - -This document describes the differences between HDF5-1.8 series and -HDF5 1.10.0 releases, and contains information on the platforms -tested. - -Links to HDF5 1.10.0 source code can be found on The HDF Group's -development FTP server at the following location: - - https://www.hdfgroup.org/HDF5/release/obtain5110.html - -User documentation can be accessed directly at this location: - - https://www.hdfgroup.org/HDF5/docNewFeatures/ - -For more information, see the HDF5 home page: - - https://www.hdfgroup.org/HDF5/ - -If you have any questions or comments, please send them to the HDF -Help Desk: - - help@hdfgroup.org - - - -CONTENTS - -- New Features -- Issues Addressed in this Release -- Supported Platforms -- Tested Configuration Features Summary -- More Tested Platforms -- Known Problems and Limitations - - - -New Features -============ -This release supports the following features: - - Configuration - ------------- - - API Compatibility with HDF5 1.8 Flag Was Added - - The 1.10 version of the HDF5 Library can be configured to operate - identically to the 1.8 library with the --with-default-api-version=v18 - configure flag. This allows existing code to be compiled with the 1.10 - library without requiring immediate changes to the application source - code. For additional configuration options and other details, see - "API Compatibility Macros in HDF5" at - https://www.hdfgroup.org/HDF5/doc/RM/APICompatMacros.html. - - - Autotools Configuration Has Been Extensively Reworked - - The autotools configuration options have been updated to allow more - fine-grained control of the build options and to correct some bugs. - See configure --help for comprehensive information on each option. - - Specific changes: - - * --enable-debug and --enable-production are no longer accepted. - Use --enable-build-mode=(debug | production) instead. These set - appropriate defaults for symbols, optimizations, and other - configuration options. These defaults can be overridden by the - user. - - * Extra debug output messages are no longer enabled with - --enable-debug=. Use --enable-internal-debug= - instead. - - * A new --enable-symbols option allows symbols to be generated - independently of the build mode. --disable-symbols can be used - to strip symbols from the binary. - - * A new --enable-asserts option sets/unsets NDEBUG. This is - independent of the build mode. This also enables some extra - low-overhead debug checks in the library. - - * A new --enable-profiling option sets profiling flags. This is - independent of the build mode. - - * A new --enable-optimization option sets the optimization level. - This is independent of the build mode. - - * Many of these options can take a flags string that will be used - to build the library. This can be useful for specifying custom - optimization flags such as -Os and -Ofast. - - * gnu C++ and Fortran use configure sub-files that update the - build flags and turn on warnings. The increase in warnings when - building these wrapper libraries is due to these flag changes - and not to a decrease in code quality. - - * The option to clear file buffers has been removed. Any buffer that - will eventually be written to disk will now always be memset - to zero. This prevents the previous contents of the buffer from - being written to the disk if the buffer contents are not - completely overwritten, which has security implications. - - - LFS Changes - - The way the autotools handle large file support (LFS) has been - overhauled in this release. - - * We assume ftello and fseeko exist - - * We no longer explicitly use the *64 I/O functions. Instead, we - rely on a mapping provided by _FILE_OFFSET_BITS or its equivalent. - - * _LARGEFILE(64)_SOURCE is no longer exported via AM_CPPFLAGS. - - - - Parallel Library - ----------------- - - Collective Metadata I/O - - Calls for HDF5 metadata can result in many small reads and writes. - On metadata reads, collective metadata I/O can improve performance - by allowing the library to perform optimizations when reading the - metadata by having one rank read the data and broadcasting it to - all other ranks. - - Collective metadata I/O improves metadata write performance through - the construction of an MPI derived datatype that is then written - collectively in a single call. For more information, see - https://www.hdfgroup.org/HDF5/docNewFeatures/NewFeaturesCollectiveMetadataIoDocs.html. - - - - Library - -------- - - Concurrent Access to HDF5 Files - Single Writer/ Multiple Reader (SWMR) - - The Single Writer/ Multiple Reader or SWMR feature enables users to - read data concurrently while writing it. Communications between the - processes and file locking are not required. The processes can run - on the same or on different platforms as long as they share a common - file system that is POSIX compliant. For more information, see the - Single-Writer/Multiple-Reader (SWMR) documentation at - https://www.hdfgroup.org/HDF5/docNewFeatures/NewFeaturesSwmrDocs.html. - - - Virtual Dataset (VDS) - - The VDS feature enables data to be accessed across HDF5 files - using standard HDF5 objects such as groups and datasets without - rewriting or rearranging the data. An HDF5 virtual dataset (VDS) - is an HDF5 dataset that is composed of source HDF5 datasets in - a predefined mapping. VDS can be used with the SWMR feature. For - documentation, check - https://www.hdfgroup.org/HDF5/docNewFeatures/NewFeaturesVirtualDatasetDocs.html. - - - Persistent Free File Space Tracking - - Usage patterns when working with an HDF5 file sometimes result in - wasted space within the file. This can also impair access times - when working with the resulting files. The new file space management - feature provides strategies for managing space in a file to improve - performance in both of these areas. For more information, see - https://www.hdfgroup.org/HDF5/docNewFeatures/NewFeaturesFileSpaceMgmtDocs.html. - - - Version 3 Metadata Cache - - The version 3 metadata cache moves management of metadata I/O from - the clients to the metadata cache proper. This change is essential for - SWMR and other features that have yet to be released. - - - - C++ Library - ------------ - - New Member Function Added to H5::ArrayType - - The assignment operator ArrayType::operator= was added because - ArrayType has pointer data members. - - (BMR - 2016/03/07, HDFFV-9562) - - - - Tools - ------ - - h5watch - - The h5watch tool allows users to output new records appended to - a dataset under SWMR access as it grows. The functionality is - similar to the Unix user command "tail" with the follow option, - which outputs appended data as the file grows. For more - information, see - https://www.hdfgroup.org/HDF5/docNewFeatures/NewFeaturesSwmrDocs.html#Tools. - - - h5format_convert - - The h5format_convert tool allows users to convert the indexing - type of a chunked dataset made with a 1.10.x version of the HDF5 - Library when the latest file format is used to the 1.8.x version 1 B-tree indexing - type. For example, datasets created using SWMR access, can be - converted to be accessed by the HDF5 1.18 library and tools. The - tool does not rewrite raw data, but it does rewrite HDF5 metadata. - - - - High-Level APIs - ---------------- - - H5DOappend - - The function appends data to a dataset along a specified dimension. - - - C Packet Table API - ------------------ - - Replacement of a Public Function with H5PTcreate - - The existing function H5PTcreate_fl limits applications so they - can use the deflate compression only. The public function - H5PTcreate has been added to replace H5PTcreate_fl. H5PTcreate - takes a property list identifier to provide flexibility on - creation properties. - - (BMR - 2016/03/04, HDFFV-8623) - - - New Public Functions: H5PTget_dataset and H5PTget_type - - Two accessor functions have been added. H5PTget_dataset returns - the identifier of the dataset associated with the packet table, - and H5PTget_type returns the identifier of the datatype used by - the packet table. - - (BMR, 2016/03/04, HDFFV-8623) - - - Regarding #ifdef VLPT_REMOVED - - The #ifdef VLPT_REMOVED blocks have been removed from the packet - table (PT) library source except for the following functions: - + H5PTis_varlen() has been made available again - + H5PTfree_vlen_readbuff() is now H5PTfree_vlen_buff() - - (BMR - 2016/03/04, HDFFV-442) - - C++ Packet Table API - -------------------- - - New Constructor Added to FL_PacketTable - - An overloaded constructor has been added to FL_PacketTable and - takes a property list identifier to provide flexibility on - creation properties. - - (BMR - 2016/03/08, HDFFV-8623) - - - New Public Functions - - Two accessor wrappers are added to class PacketTable. - PacketTable::GetDataset() returns the identifier of the dataset - associated with the packet table, and PacketTable::GetDatatype() - returns the identifier of the datatype that the packet table uses. - - (BMR - 2016/03/04, HDFFV-8623) - - - Member Functions with "char*" as an Argument - - Overloaded functions were added to provide the "const char*" - argument; the existing version will be deprecated in future - releases. - - (BMR - 2016/03/04, HDFFV-8623) - - - Regarding #ifdef VLPT_REMOVED - - The #ifdef VLPT_REMOVED blocks have been removed from the packet - table library source code except for the following functions: - + VL_PacketTable::IsVariableLength() was moved to PacketTable - + VL_PacketTable::FreeReadBuff() is now PacketTable::FreeBuff() - - (BMR - 2016/03/04, HDFFV-442) - - - - Java Wrapper Library - -------------------- - - The Java HDF5 JNI library has been integrated into the HDF5 repository. - The configure option is "--enable-java", and the CMake option is - HDF5_BUILD_JAVA:BOOL=ON. The package hierarchy has changed from the - HDF5 1.8 JNI, which was "ncsa.hdf.hdflib.hdf5", to HDF5 1.10, - "hdf.hdflib.hdf5". - - A number of new APIs were added including some for VDS and SWMR. - - - - Other Important Changes - ----------------------- - - The hid_t type was changed from 32-bit to a 64-bit value. - - - -Issues Addressed in this Release Since 1.10.0 -============================================= - - - h5diff would return from a compare attributes abnormally if one of the datatypes - was a vlen. This resulted in a memory leak as well as an incorrect report of - attribute comparison. - - Fixed. - (ADB - 2016/04/26, HDFFV-9784) - - - The JUnit-interface test may fail on Solaris platforms. The result of - a test for verifying the content of the error stack to stdout is - in a different order on Solaris then other platforms. - - This test is skipped on Solaris - (ADB - 2016/04/21, HDFFV-9734) - - - When building HDF5 with Java using CMake and specifying Debug for CMAKE_BUILD_TYPE, - there was a missing command argument for the tests of the examples. - - Fixed. - (ADB - 2016/04/21, HDFFV-9743) - - - Changed h5diff to print a warning when a dataset is virtual, enabling - the data to be compared. In addition h5repack failed to copy the data - of a virtual dataset to the new file. Function H5D__get_space_status changed - to correctly determine the H5D_space_status_t allocation value. - - CMake added the Fixed Array indexing tests that were only in the autotools - test scripts. - - Fixed and tests added for vds issues. - (ADB,NAF - 2016/04/21, HDFFV-9756) - - - CMake added the h5format_convert tool and tests that were only in the autotools - build and test scripts. The autotools test script was reworked to allow CMake - to execute the test suite in parallel. - - Also, h5clear tool and tests were added to the misc folder. - - Fixed. - (ADB - 2016/04/21, HDFFV-9766) - - - CMake added the h5watch tool and argument tests that were only in the autotools - build and test scripts. The POSIX only tests were not added to CMake. - - CMake HL tools files were refactored to move the CMake test scripts into each tool folder. - - Fixed. - (ADB - 2016/04/21, HDFFV-9770) - - - Configure fails to detect valid real KINDs on FreeBSD 9.3 (i386) with Fortran enabled. - - Fixed. Added the exponential option to SELECTED_REAL_KIND to distinguish - KINDs of same precision - (MSB - 2016/05/14,HDFFV-9912) - - - - Corrected the f90 H5AWRITE_F integer interface's buf to be INTENT(IN). - (MSB - 2016/05/14) - - - Configure fails in sed command on FreeBSD 9.3 (i386) with Fortran enabled. - - Fixed. - (MSB - 2016/05/14,HDFFV-9912) - - - Compile time error in H5f90global.F90 with IBM XL Fortran 14.1.0.13 on BG/Q with Fortran - enabled. - - Fixed. - (MSB - 2016/05/16,HDFFV-9917) - - - A cmake build with Fortran enabled does not install module h5fortkit - - Fixed. - (MSB - 2016/05/23,HDFFV-9923) - - -Issues Addressed in this Release Since alpha1 -============================================= - - - H5Pget_virtual_printf_gap, H5Pget_virtual_view, H5Pget_efile_prefix - - The correct access property list settings from the - H5Pget_virtual_printf_gap, H5Pget_virtual_view, and - H5Pget_efile_prefix function calls could not be retrieved - using H5Dget_access_plist(). - - Fixed. - - (DER and NAF - 2016/03/14, HDFFV-9716) - - - h5dump - - When h5dump was provided with the name of a non-existing file or - when optional arguments were the last option on the command line, - h5dump would segfault. - - Fixed. - - (ADB 2016/02/28 HDFFV-9639, HDFFV-9684) - - - No Error Message for Corrupt Metadata - - The HDF5 Library did not propagate an error when it encountered - corrupt metadata in an HDF5 file. The issue was fixed for a - specific file provided by a user. If you still see the problem, - please contact help@hdfgroup.org - - Fixed. - - (MC - 2016/02/18, HDFFV-9670) - - - Problem Reading Chunked Datasets with a String Datatype Larger - Than the Chunk Size in Bytes - - When the latest file format was used and when a chunked dataset - was created with a datatype with the size bigger than a chunk - size, the data could not be read back. The issue was reported - for chunked datasets with a string datatype and was confirmed - for other datatypes with the sizes bigger than the chunk size in - bytes. - - Fixed. - - (JM - 2016/02/13, HDFFV-9672) - - - Control over the Location of External Files - - Users were unable to specify the locations of external files. - - Two APIs - H5Pget_efile_prefix and H5Pset_efile_prefix - were - added so that users could specify the locations of external files. - - (DER - 2016/02/04, HDFFV-8740) - - - -Issues Addressed in this Release Since alpha0 -============================================= - - h5format_convert - - The h5format_convert tool did not downgrade the version of the - superblock. - - Fixed. The tool now will downgrade the version of the superblock. - - (EIP 2016/01/11) - - - Crashes with multiple threads: invalid pointers - - It was reported that alpha0 crashed when used with multiple - threads. The issue exists in the HDF5 Library versions 1.8 and - 1.9. The problem is related to a shared file pointer used in some - miscellaneous data structures. The thread-safe library exposed - paths in the library where a file pointer became invalid. - - The alpha1 release contains the fixes for the specific use case - as described in HDFFV-9643. We will keep working on identifying - and fixing other paths in the library with similar problems. - - (EIP - 2016/01/15, HDFFV-9643) - - - -Supported Platforms -=================== -The following platforms are supported and have been tested for this release. -They are built with the configure process unless specified otherwise. - - AIX 6.1 xlc/xlc_r 10.1.0.5 - (NASA G-ADA) xlC/xlC_r 10.1.0.5 - xlf90/xlf90_r 12.1.0.6 - - Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (mayll/platypus) Version 4.4.7 20120313 (Red Hat 4.4.7-16) - Version 4.9.3, Version 5.2.0 - PGI C, Fortran, C++ for 64-bit target on - x86-64; - Version 15.7-0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 15.0.3.187 Build 20150407 - MPICH 3.1.4 compiled with GCC 4.9.3 - - Linux 2.6.32-573.18.1.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-11) - #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-11) - (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-11) - IBM XL C/C++ V13.1 - IBM XL Fortran V15.1 - - Linux 3.10.0-327.10.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (kituo/moohan) Version 4.8.5 20150623 (Red Hat 4.8.5-4) - Version 4.9.3, Version 5.2.0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 15.0.3.187 Build 20150407 - MPICH 3.1.4 compiled with GCC 4.9.3 - - SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc - (emu) Sun Fortran 95 8.6 SunOS_sparc - Sun C++ 5.12 SunOS_sparc - - Windows 7 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - Visual Studio 2015 w/ Intel Fortran 16 (cmake) - Cygwin(CYGWIN_NT-6.1 2.2.1(0.289/5/3) gcc(4.9.3) compiler and gfortran) - (cmake and autotools) - - Windows 7 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - Visual Studio 2015 w/ Intel Fortran 16 (cmake) - - Windows 8.1 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - - Windows 8.1 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - - Mac OS X Mt. Lion 10.8.5 Apple clang/clang++ version 5.1 from Xcode 5.1 - 64-bit gfortran GNU Fortran (GCC) 4.8.2 - (swallow/kite) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X Mavericks 10.9.5 Apple clang/clang++ version 6.0 from Xcode 6.2 - 64-bit gfortran GNU Fortran (GCC) 4.9.2 - (wren/quail) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X Yosemite 10.10.5 Apple clang/clang++ version 6.0 from Xcode 7.0 - 64-bit gfortran GNU Fortran (GCC) 4.9.2 - (osx1010dev/osx1010test) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X El Capitan 10.11.4 Apple clang/clang++ version 7.3.0 from Xcode 7.3 - 64-bit gfortran GNU Fortran (GCC) 5.2.0 - (osx1010dev/osx1010test) Intel icc/icpc/ifort version 15.0.3 - - - -Tested Configuration Features Summary -===================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90/ F90 C++ zlib SZIP - parallel F2003 parallel -SunOS 5.11 32-bit n y/y n y y y -SunOS 5.11 64-bit n y/y n y y y -Windows 7 y y/y n y y y -Windows 7 x64 y y/y n y y y -Windows 7 Cygwin n y/y n y y n -Windows 8.1 n y/y n y y y -Windows 8.1 x64 n y/y n y y y -Mac OS X Mountain Lion 10.8.5 64-bit n y/y n y y y -Mac OS X Mavericks 10.9.5 64-bit n y/y n y y y -Mac OS X Yosemeti 10.10.5 64-bit n y/y n y y y -AIX 6.1 32- and 64-bit n y/n n y y y -CentOS 6.7 Linux 2.6.32 x86_64 GNU y y/y y y y y -CentOS 6.7 Linux 2.6.32 x86_64 Intel n y/y n y y y -CentOS 6.7 Linux 2.6.32 x86_64 PGI n y/y n y y y -CentOS 7.1 Linux 3.10.0 x86_64 GNU y y/y y y y y -CentOS 7.1 Linux 3.10.0 x86_64 Intel n y/y n y y y -Linux 2.6.32-431.11.2.el6.ppc64 n y/n n y y y - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -SunOS 5.11 32-bit y y y y -SunOS 5.11 64-bit y y y y -Windows 7 y y y y -Windows 7 x64 y y y y -Windows 7 Cygwin n n n y -Windows 8.1 y y y y -Windows 8.1 x64 y y y y -Mac OS X Mountain Lion 10.8.5 64-bit y n y y -Mac OS X Mavericks 10.9.5 64-bit y n y y -Mac OS X Yosemeti 10.10.5 64-bit y n y y -AIX 6.1 32- and 64-bit y n n y -CentOS 6.7 Linux 2.6.32 x86_64 GNU y y y y -CentOS 6.7 Linux 2.6.32 x86_64 Intel y y y y -CentOS 6.7 Linux 2.6.32 x86_64 PGI y y y y -CentOS 7.1 Linux 3.10.0 x86_64 GNU y y y y -CentOS 7.1 Linux 3.10.0 x86_64 Intel y y y y -Linux 2.6.32-431.11.2.el6.ppc64 y y y y - -Compiler versions for each platform are listed in the preceding -"Supported Platforms" table. - - - -More Tested Platforms -===================== -The following platforms are not supported but have been tested for this release. - - Linux 2.6.18-431.11.2.el6 g95 (GCC 4.0.3 (g95 0.94!) - #1 SMP x86_64 GNU/Linux - (platypus) - - Windows 7 Visual Studio 2008 (cmake) - - Windows 7 x64 Visual Studio 2008 (cmake) - - Windows 7 x64 Visual Studio 2010 (cmake) with SWMR using GPFS - - Windows 10 Visual Studio 2013 w/ Intel Fortran 15 (cmake) - - Windows 10 x64 Visual Studio 2013 w/ Intel Fortran 15 (cmake) - - Debian7.5.0 3.2.0-4-amd64 #1 SMP Debian 3.2.51-1 x86_64 GNU/Linux - gcc (Debian 4.7.2-5) 4.7.2 - GNU Fortran (Debian 4.7.2-5) 4.7.2 - (cmake and autotools) - - Fedora20 3.15.3-200.fc20.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1) - GNU Fortran (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1) - (cmake and autotools) - - SUSE 13.1 3.11.10-17-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux - gcc (SUSE Linux) 4.8.1 - GNU Fortran (SUSE Linux) 4.8.1 - (cmake and autotools) - - Ubuntu 14.04 3.13.0-35-generic #62-Ubuntu SMP x86_64 GNU/Linux - gcc (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1 - GNU Fortran (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1 - (cmake and autotools) - - hopper.nersc.gov PrgEnv-gnu/5.2.40 - gcc (GCC) 4.9.2 20141030 (Cray Inc.) - GNU Fortran (GCC) 4.9.2 20141030 (Cray Inc.) - g++ (GCC) 4.9.2 20141030 (Cray Inc.) - - - -Known Problems and Limitations -============================== -This section contains the list of known problems and limitations introduced -in this release of HDF5. - -Note: this list is not exhaustive of all known issues discovered in HDF5 -software to date. For a list of significant problems and known workarounds -identified in past releases, please refer to: - -https://www.hdfgroup.org/HDF5/release/known_problems/ - -The HDF Group also maintains a JIRA issue-tracking database which is used to -capture all known issues which are too numerous to reasonably list in this -document. The HDF Group is taking steps to make our JIRA issue database -open to the public, and this section will refer to that database in a future -release. In the meantime, please contact help@hdfgroup.org if you come across -an issue not listed here or at the link above, and we will provide any -information about known workarounds that we have or add it to our list of -known issues if it is a new issue. - - - The flush/refresh test occasionally fails on OS X platforms. This is - being investigated but no fix or workaround is available at this time. - (DER - 2016/03/22, HDFFV-9731) - - - The VDS/SWMR test will fail with a segmentation fault if the library - is built with --enable-using-memchecker. The is due to a VDS shutdown - procedure freeing a shared resource too early when the memory - checker changes are built. This problem does not arise when the - memory checker changes are not used since the internal library free - lists behave differently. The memory checker configure option should - normally only be used under special circumstances so this should not - affect most users. Users should be aware that the --enable-using-memchecker - + VDS combination may cause a segfault, however, so Valgrind et al. may - have to be used with an HDF5 library built without the feature if this - proves to be a problem. - (DER - 2016/03/21, HDFFV-9732) - - - SWMR feature limitations - The SWMR feature will only work if an HDF5 file under SWMR access resides - on a file system that obeys POSIX write() ordering semantics. Because of - this, SWMR will not work on network file systems such as NFS or SMB/Windows - file shares since those systems do not guarantee write odering. SWMR - regression tests are likely to fail if run on a network file system. SWMR - is currently not tested on Windows though it can be tested manually - (some of the SWMR test programs are built by CMake), and there are no - obvious reasons for it to not work on NTFS or GPFS. - (EIP - 2016/03/20, HDFFV-9733) - - - VDS feature limitation - Currently, the path to a VDS source file is interpreted as relative to the - directory where the executable program runs and not to the HDF5 file with - the VDS dataset unless a full path to the source file is specified during - the mapping. - (EIP - 2016/03/20, HDFFV-9724) - - - The H5Lexists API changed behavior in HDF5-1.10 when used with a file handle - and root group name ("/"): - - H5Lexists(fileid, "/") - - In HDF5-1.8 it returns false (0) and in HDF5-1.10 it returns true (1). - The documentation will be updated with information regarding this change. - (LRK - 2016/03/30, HDFFV-8746) - - -%%%%1.10.0%%%% - -HDF5 version 1.10.0 released on 2016-03-30 -================================================================================ - - - -INTRODUCTION - -This document describes the differences between HDF5-1.8 series and -HDF5 1.10.0 releases, and contains information on the platforms -tested. - -Links to HDF5 1.10.0 source code can be found on The HDF Group's -development FTP server at the following location: - - https://www.hdfgroup.org/HDF5/release/obtain5110.html - -User documentation can be accessed directly at this location: - - https://www.hdfgroup.org/HDF5/docNewFeatures/ - -For more information, see the HDF5 home page: - - https://www.hdfgroup.org/HDF5/ - -If you have any questions or comments, please send them to the HDF -Help Desk: - - help@hdfgroup.org - - - -CONTENTS - -- New Features -- Issues Addressed in this Release -- Supported Platforms -- Tested Configuration Features Summary -- More Tested Platforms -- Known Problems and Limitations - - - -New Features -============ -This release supports the following features: - - Configuration - ------------- - - API Compatibility with HDF5 1.8 Flag Was Added - - The 1.10 version of the HDF5 Library can be configured to operate - identically to the 1.8 library with the --with-default-api-version=v18 - configure flag. This allows existing code to be compiled with the 1.10 - library without requiring immediate changes to the application source - code. For additional configuration options and other details, see - "API Compatibility Macros in HDF5" at - https://www.hdfgroup.org/HDF5/doc/RM/APICompatMacros.html. - - - Autotools Configuration Has Been Extensively Reworked - - The autotools configuration options have been updated to allow more - fine-grained control of the build options and to correct some bugs. - See configure --help for comprehensive information on each option. - - Specific changes: - - * --enable-debug and --enable-production are no longer accepted. - Use --enable-build-mode=(debug | production) instead. These set - appropriate defaults for symbols, optimizations, and other - configuration options. These defaults can be overridden by the - user. - - * Extra debug output messages are no longer enabled with - --enable-debug=. Use --enable-internal-debug= - instead. - - * A new --enable-symbols option allows symbols to be generated - independently of the build mode. --disable-symbols can be used - to strip symbols from the binary. - - * A new --enable-asserts option sets/unsets NDEBUG. This is - independent of the build mode. This also enables some extra - low-overhead debug checks in the library. - - * A new --enable-profiling option sets profiling flags. This is - independent of the build mode. - - * A new --enable-optimization option sets the optimization level. - This is independent of the build mode. - - * Many of these options can take a flags string that will be used - to build the library. This can be useful for specifying custom - optimization flags such as -Os and -Ofast. - - * gnu C++ and Fortran use configure sub-files that update the - build flags and turn on warnings. The increase in warnings when - building these wrapper libraries is due to these flag changes - and not to a decrease in code quality. - - * The option to clear file buffers has been removed. Any buffer that - will eventually be written to disk will now always be memset - to zero. This prevents the previous contents of the buffer from - being written to the disk if the buffer contents are not - completely overwritten, which has security implications. - - - LFS Changes - - The way the autotools handle large file support (LFS) has been - overhauled in this release. - - * We assume ftello and fseeko exist - - * We no longer explicitly use the *64 I/O functions. Instead, we - rely on a mapping provided by _FILE_OFFSET_BITS or its equivalent. - - * _LARGEFILE(64)_SOURCE is no longer exported via AM_CPPFLAGS. - - - - Parallel Library - ----------------- - - Collective Metadata I/O - - Calls for HDF5 metadata can result in many small reads and writes. - On metadata reads, collective metadata I/O can improve performance - by allowing the library to perform optimizations when reading the - metadata by having one rank read the data and broadcasting it to - all other ranks. - - Collective metadata I/O improves metadata write performance through - the construction of an MPI derived datatype that is then written - collectively in a single call. For more information, see - https://www.hdfgroup.org/HDF5/docNewFeatures/NewFeaturesCollectiveMetadataIoDocs.html. - - - - Library - -------- - - Concurrent Access to HDF5 Files - Single Writer/ Multiple Reader (SWMR) - - The Single Writer/ Multiple Reader or SWMR feature enables users to - read data concurrently while writing it. Communications between the - processes and file locking are not required. The processes can run - on the same or on different platforms as long as they share a common - file system that is POSIX compliant. For more information, see the - Single-Writer/Multiple-Reader (SWMR) documentation at - https://www.hdfgroup.org/HDF5/docNewFeatures/NewFeaturesSwmrDocs.html. - - - Virtual Dataset (VDS) - - The VDS feature enables data to be accessed across HDF5 files - using standard HDF5 objects such as groups and datasets without - rewriting or rearranging the data. An HDF5 virtual dataset (VDS) - is an HDF5 dataset that is composed of source HDF5 datasets in - a predefined mapping. VDS can be used with the SWMR feature. For - documentation, check - https://www.hdfgroup.org/HDF5/docNewFeatures/NewFeaturesVirtualDatasetDocs.html. - - - Persistent Free File Space Tracking - - Usage patterns when working with an HDF5 file sometimes result in - wasted space within the file. This can also impair access times - when working with the resulting files. The new file space management - feature provides strategies for managing space in a file to improve - performance in both of these areas. For more information, see - https://www.hdfgroup.org/HDF5/docNewFeatures/NewFeaturesFileSpaceMgmtDocs.html. - - - Version 3 Metadata Cache - - The version 3 metadata cache moves management of metadata I/O from - the clients to the metadata cache proper. This change is essential for - SWMR and other features that have yet to be released. - - - - C++ Library - ------------ - - New Member Function Added to H5::ArrayType - - The assignment operator ArrayType::operator= was added because - ArrayType has pointer data members. - - (BMR - 2016/03/07, HDFFV-9562) - - - - Tools - ------ - - h5watch - - The h5watch tool allows users to output new records appended to - a dataset under SWMR access as it grows. The functionality is - similar to the Unix user command "tail" with the follow option, - which outputs appended data as the file grows. For more - information, see - https://www.hdfgroup.org/HDF5/docNewFeatures/NewFeaturesSwmrDocs.html#Tools. - - - h5format_convert - - The h5format_convert tool allows users to convert the indexing - type of a chunked dataset made with a 1.10.x version of the HDF5 - Library when the latest file format is used to the 1.8.x version 1 B-tree indexing - type. For example, datasets created using SWMR access, can be - converted to be accessed by the HDF5 1.18 library and tools. The - tool does not rewrite raw data, but it does rewrite HDF5 metadata. - - - - High-Level APIs - ---------------- - - H5DOappend - - The function appends data to a dataset along a specified dimension. - - - C Packet Table API - ------------------ - - Replacement of a Public Function with H5PTcreate - - The existing function H5PTcreate_fl limits applications so they - can use the deflate compression only. The public function - H5PTcreate has been added to replace H5PTcreate_fl. H5PTcreate - takes a property list identifier to provide flexibility on - creation properties. - - (BMR - 2016/03/04, HDFFV-8623) - - - New Public Functions: H5PTget_dataset and H5PTget_type - - Two accessor functions have been added. H5PTget_dataset returns - the identifier of the dataset associated with the packet table, - and H5PTget_type returns the identifier of the datatype used by - the packet table. - - (BMR, 2016/03/04, HDFFV-8623) - - - Regarding #ifdef VLPT_REMOVED - - The #ifdef VLPT_REMOVED blocks have been removed from the packet - table (PT) library source except for the following functions: - + H5PTis_varlen() has been made available again - + H5PTfree_vlen_readbuff() is now H5PTfree_vlen_buff() - - (BMR - 2016/03/04, HDFFV-442) - - C++ Packet Table API - -------------------- - - New Constructor Added to FL_PacketTable - - An overloaded constructor has been added to FL_PacketTable and - takes a property list identifier to provide flexibility on - creation properties. - - (BMR - 2016/03/08, HDFFV-8623) - - - New Public Functions - - Two accessor wrappers are added to class PacketTable. - PacketTable::GetDataset() returns the identifier of the dataset - associated with the packet table, and PacketTable::GetDatatype() - returns the identifier of the datatype that the packet table uses. - - (BMR - 2016/03/04, HDFFV-8623) - - - Member Functions with "char*" as an Argument - - Overloaded functions were added to provide the "const char*" - argument; the existing version will be deprecated in future - releases. - - (BMR - 2016/03/04, HDFFV-8623) - - - Regarding #ifdef VLPT_REMOVED - - The #ifdef VLPT_REMOVED blocks have been removed from the packet - table library source code except for the following functions: - + VL_PacketTable::IsVariableLength() was moved to PacketTable - + VL_PacketTable::FreeReadBuff() is now PacketTable::FreeBuff() - - (BMR - 2016/03/04, HDFFV-442) - - - - Java Wrapper Library - -------------------- - - The Java HDF5 JNI library has been integrated into the HDF5 repository. - The configure option is "--enable-java", and the CMake option is - HDF5_BUILD_JAVA:BOOL=ON. The package hierarchy has changed from the - HDF5 1.8 JNI, which was "ncsa.hdf.hdflib.hdf5", to HDF5 1.10, - "hdf.hdflib.hdf5". - - A number of new APIs were added including some for VDS and SWMR. - - - - Other Important Changes - ----------------------- - - The hid_t type was changed from 32-bit to a 64-bit value. - - - -Issues Addressed in this Release Since alpha1 -============================================= - - - H5Pget_virtual_printf_gap, H5Pget_virtual_view, H5Pget_efile_prefix - - The correct access property list settings from the - H5Pget_virtual_printf_gap, H5Pget_virtual_view, and - H5Pget_efile_prefix function calls could not be retrieved - using H5Dget_access_plist(). - - Fixed. - - (DER and NAF - 2016/03/14, HDFFV-9716) - - - h5dump - - When h5dump was provided with the name of a non-existing file or - when optional arguments were the last option on the command line, - h5dump would segfault. - - Fixed. - - (ADB 2016/02/28 HDFFV-9639, HDFFV-9684) - - - No Error Message for Corrupt Metadata - - The HDF5 Library did not propagate an error when it encountered - corrupt metadata in an HDF5 file. The issue was fixed for a - specific file provided by a user. If you still see the problem, - please contact help@hdfgroup.org - - Fixed. - - (MC - 2016/02/18, HDFFV-9670) - - - Problem Reading Chunked Datasets with a String Datatype Larger - Than the Chunk Size in Bytes - - When the latest file format was used and when a chunked dataset - was created with a datatype with the size bigger than a chunk - size, the data could not be read back. The issue was reported - for chunked datasets with a string datatype and was confirmed - for other datatypes with the sizes bigger than the chunk size in - bytes. - - Fixed. - - (JM - 2016/02/13, HDFFV-9672) - - - Control over the Location of External Files - - Users were unable to specify the locations of external files. - - Two APIs - H5Pget_efile_prefix and H5Pset_efile_prefix - were - added so that users could specify the locations of external files. - - (DER - 2016/02/04, HDFFV-8740) - - - -Issues Addressed in this Release Since alpha0 -============================================= - - h5format_convert - - The h5format_convert tool did not downgrade the version of the - superblock. - - Fixed. The tool now will downgrade the version of the superblock. - - (EIP 2016/01/11) - - - Crashes with multiple threads: invalid pointers - - It was reported that alpha0 crashed when used with multiple - threads. The issue exists in the HDF5 Library versions 1.8 and - 1.9. The problem is related to a shared file pointer used in some - miscellaneous data structures. The thread-safe library exposed - paths in the library where a file pointer became invalid. - - The alpha1 release contains the fixes for the specific use case - as described in HDFFV-9643. We will keep working on identifying - and fixing other paths in the library with similar problems. - - (EIP - 2016/01/15, HDFFV-9643) - - - -Supported Platforms -=================== -The following platforms are supported and have been tested for this release. -They are built with the configure process unless specified otherwise. - - AIX 6.1 xlc/xlc_r 10.1.0.5 - (NASA G-ADA) xlC/xlC_r 10.1.0.5 - xlf90/xlf90_r 12.1.0.6 - - Linux 2.6.32-573.18.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (mayll/platypus) Version 4.4.7 20120313 (Red Hat 4.4.7-16) - Version 4.9.3, Version 5.2.0 - PGI C, Fortran, C++ for 64-bit target on - x86-64; - Version 15.7-0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 15.0.3.187 Build 20150407 - MPICH 3.1.4 compiled with GCC 4.9.3 - - Linux 2.6.32-504.8.1.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-11) - #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-11) - (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-11) - IBM XL C/C++ V13.1 - IBM XL Fortran V15.1 - - Linux 3.10.0-327.10.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (kituo/moohan) Version 4.8.5 20150623 (Red Hat 4.8.5-4) - Version 4.9.3, Version 5.2.0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 15.0.3.187 Build 20150407 - MPICH 3.1.4 compiled with GCC 4.9.3 - - SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc - (emu) Sun Fortran 95 8.6 SunOS_sparc - Sun C++ 5.12 SunOS_sparc - - Windows 7 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - Visual Studio 2015 w/ Intel Fortran 16 (cmake) - Cygwin(CYGWIN_NT-6.1 2.2.1(0.289/5/3) gcc(4.9.3) compiler and gfortran) - (cmake and autotools) - - Windows 7 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - Visual Studio 2015 w/ Intel Fortran 16 (cmake) - - Windows 8.1 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - - Windows 8.1 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - - Mac OS X Mt. Lion 10.8.5 Apple clang/clang++ version 5.1 from Xcode 5.1 - 64-bit gfortran GNU Fortran (GCC) 4.8.2 - (swallow/kite) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X Mavericks 10.9.5 Apple clang/clang++ version 6.0 from Xcode 6.2.0 - 64-bit gfortran GNU Fortran (GCC) 4.9.2 - (wren/quail) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X Yosemite 10.10.5 Apple clang/clang++ version 6.0 from Xcode 7.0.0 - 64-bit gfortran GNU Fortran (GCC) 4.9.2 - (osx1010dev/osx1010test) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X El Capitan 10.11.3 Apple clang/clang++ version 7.0.2 from Xcode 7.0.2 - 64-bit gfortran GNU Fortran (GCC) 5.2.0 - (osx1010dev/osx1010test) Intel icc/icpc/ifort version 15.0.3 - - - -Tested Configuration Features Summary -===================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90/ F90 C++ zlib SZIP - parallel F2003 parallel -SunOS 5.11 32-bit n y/y n y y y -SunOS 5.11 64-bit n y/y n y y y -Windows 7 y y/y n y y y -Windows 7 x64 y y/y n y y y -Windows 7 Cygwin n y/y n y y n -Windows 8.1 n y/y n y y y -Windows 8.1 x64 n y/y n y y y -Mac OS X Mountain Lion 10.8.5 64-bit n y/y n y y y -Mac OS X Mavericks 10.9.5 64-bit n y/y n y y y -Mac OS X Yosemeti 10.10.5 64-bit n y/y n y y y -AIX 6.1 32- and 64-bit n y/n n y y y -CentOS 6.7 Linux 2.6.32 x86_64 GNU y y/y y y y y -CentOS 6.7 Linux 2.6.32 x86_64 Intel n y/y n y y y -CentOS 6.7 Linux 2.6.32 x86_64 PGI n y/y n y y y -CentOS 7.1 Linux 3.10.0 x86_64 GNU y y/y y y y y -CentOS 7.1 Linux 3.10.0 x86_64 Intel n y/y n y y y -Linux 2.6.32-431.11.2.el6.ppc64 n y/n n y y y - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -SunOS 5.11 32-bit y y y y -SunOS 5.11 64-bit y y y y -Windows 7 y y y y -Windows 7 x64 y y y y -Windows 7 Cygwin n n n y -Windows 8.1 y y y y -Windows 8.1 x64 y y y y -Mac OS X Mountain Lion 10.8.5 64-bit y n y y -Mac OS X Mavericks 10.9.5 64-bit y n y y -Mac OS X Yosemeti 10.10.5 64-bit y n y y -AIX 6.1 32- and 64-bit y n n y -CentOS 6.7 Linux 2.6.32 x86_64 GNU y y y y -CentOS 6.7 Linux 2.6.32 x86_64 Intel y y y y -CentOS 6.7 Linux 2.6.32 x86_64 PGI y y y y -CentOS 7.1 Linux 3.10.0 x86_64 GNU y y y y -CentOS 7.1 Linux 3.10.0 x86_64 Intel y y y y -Linux 2.6.32-431.11.2.el6.ppc64 y y y y - -Compiler versions for each platform are listed in the preceding -"Supported Platforms" table. - - - -More Tested Platforms -===================== -The following platforms are not supported but have been tested for this release. - - Linux 2.6.18-431.11.2.el6 g95 (GCC 4.0.3 (g95 0.94!) - #1 SMP x86_64 GNU/Linux - (platypus) - - Windows 7 Visual Studio 2008 (cmake) - - Windows 7 x64 Visual Studio 2008 (cmake) - - Windows 7 x64 Visual Studio 2010 (cmake) with SWMR using GPFS - - Windows 10 Visual Studio 2013 w/ Intel Fortran 15 (cmake) - - Windows 10 x64 Visual Studio 2013 w/ Intel Fortran 15 (cmake) - - Debian7.5.0 3.2.0-4-amd64 #1 SMP Debian 3.2.51-1 x86_64 GNU/Linux - gcc (Debian 4.7.2-5) 4.7.2 - GNU Fortran (Debian 4.7.2-5) 4.7.2 - (cmake and autotools) - - Fedora20 3.15.3-200.fc20.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1) - GNU Fortran (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1) - (cmake and autotools) - - SUSE 13.1 3.11.10-17-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux - gcc (SUSE Linux) 4.8.1 - GNU Fortran (SUSE Linux) 4.8.1 - (cmake and autotools) - - Ubuntu 14.04 3.13.0-35-generic #62-Ubuntu SMP x86_64 GNU/Linux - gcc (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1 - GNU Fortran (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1 - (cmake and autotools) - - hopper.nersc.gov PrgEnv-gnu/5.2.40 - gcc (GCC) 4.9.2 20141030 (Cray Inc.) - GNU Fortran (GCC) 4.9.2 20141030 (Cray Inc.) - g++ (GCC) 4.9.2 20141030 (Cray Inc.) - - - -Known Problems and Limitations -============================== -This section contains the list of known problems and limitations introduced -in this release of HDF5. - -Note: this list is not exhaustive of all known issues discovered in HDF5 -software to date. For a list of significant problems and known workarounds -identified in past releases, please refer to: - -https://www.hdfgroup.org/HDF5/release/known_problems/ - -The HDF Group also maintains a JIRA issue-tracking database which is used to -capture all known issues which are too numerous to reasonably list in this -document. The HDF Group is taking steps to make our JIRA issue database -open to the public, and this section will refer to that database in a future -release. In the meantime, please contact help@hdfgroup.org if you come across -an issue not listed here or at the link above, and we will provide any -information about known workarounds that we have or add it to our list of -known issues if it is a new issue. - - - The JUnit-interface test may fail on Solaris platforms. The result of - a test for verifying the content of the error stack to stdout is - in a different order on Solaris then other platforms. Use make -i option - to test beyond the java/test folder. - (ADB - 2016/03/22, HDFFV-9734) - - - The flush/refresh test occasionally fails on OS X platforms. This is - being investigated but no fix or workaround is available at this time. - (DER - 2016/03/22, HDFFV-9731) - - - The VDS/SWMR test will fail with a segmentation fault if the library - is built with --enable-using-memchecker. The is due to a VDS shutdown - procedure freeing a shared resource too early when the memory - checker changes are built. This problem does not arise when the - memory checker changes are not used since the internal library free - lists behave differently. The memory checker configure option should - normally only be used under special circumstances so this should not - affect most users. Users should be aware that the --enable-using-memchecker - + VDS combination may cause a segfault, however, so Valgrind et al. may - have to be used with an HDF5 library built without the feature if this - proves to be a problem. - (DER - 2016/03/21, HDFFV-9732) - - - SWMR feature limitations - The SWMR feature will only work if an HDF5 file under SWMR access resides - on a file system that obeys POSIX write() ordering semantics. Because of - this, SWMR will not work on network file systems such as NFS or SMB/Windows - file shares since those systems do not guarantee write odering. SWMR - regression tests are likely to fail if run on a network file system. SWMR - is currently not tested on Windows though it can be tested manually - (some of the SWMR test programs are built by CMake), and there are no - obvious reasons for it to not work on NTFS or GPFS. - (EIP - 2016/03/20, HDFFV-9733) - - - VDS feature limitation - Currently, the path to a VDS source file is interpreted as relative to the - directory where the executable program runs and not to the HDF5 file with - the VDS dataset unless a full path to the source file is specified during - the mapping. - (EIP - 2016/03/20, HDFFV-9724) - - - When building HDF5 with Java using CMake and specifying Debug for CMAKE_BUILD_TYPE, - there is a missing command argument for the tests of the examples. - - This error can be avoided by not building Java with Debug, HDF5_BUILD_JAVA:BOOL=OFF, - or not building Examples, HDF5_BUILD_EXAMPLES:BOOL=OFF. - (LRK - 2016/03/30, HDFFV-9743) - - - The H5Lexists API changed behavior in HDF5-1.10 when used with a file handle - and root group name ("/"): - - H5Lexists(fileid, "/") - - In HDF5-1.8 it returns false (0) and in HDF5-1.10 it returns true (1). - The documentation will be updated with information regarding this change. - (LRK - 2016/03/30, HDFFV-8746) - diff --git a/release_docs/HISTORY-1_12.txt b/release_docs/HISTORY-1_12.txt deleted file mode 100644 index 6d0c1e23d81..00000000000 --- a/release_docs/HISTORY-1_12.txt +++ /dev/null @@ -1,628 +0,0 @@ -HDF5 History -============ - -This file contains development history of the HDF5 1.12 branch - -01. Release Information for hdf5-1.12.0 - -[Search on the string '%%%%' for section breaks of each release.] - -%%%%1.12.0%%%% - -HDF5 version 1.12.0 released on 2020-02-28 -================================================================================ - - -INTRODUCTION - -This document describes the new features introduced in the HDF5 1.12.0 release. -It contains information on the platforms tested and known problems in this -release. For more details check the HISTORY*.txt files in the HDF5 source. - -Note that documentation in the links below will be updated at the time of the -release. - -Links to HDF5 documentation can be found on The HDF5 web page: - - https://portal.hdfgroup.org/display/HDF5/HDF5 - -The official HDF5 releases can be obtained from: - - https://www.hdfgroup.org/downloads/hdf5/ - -More information about the new features can be found at: - - https://portal.hdfgroup.org/display/HDF5/New+Features+in+HDF5+Release+1.12 - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS - -- New Features -- Support for new platforms and languages -- Bug Fixes since HDF5-1.12.0-alpha1 -- Major Bug Fixes since HDF5-1.10.0 -- Supported Platforms -- Tested Configuration Features Summary -- More Tested Platforms -- Known Problems -- CMake vs. Autotools installations - - -New Features -============ - - Configuration: - ------------- - - Added test script for file size compare - - If CMake minimum version is at least 3.14, the fileCompareTest.cmake - script will compare file sizes. - - (ADB - 2020/02/24, HDFFV-11036) - - - Update CMake minimum version to 3.12 - - Updated CMake minimum version to 3.12 and added version checks - for Windows features. - - (ADB - 2020/02/05, TRILABS-142) - - - Fixed CMake include properties for Fortran libraries - - Corrected the library properties for Fortran to use the - correct path for the Fortran module files. - - (ADB - 2020/02/04, HDFFV-11012) - - - Added common warnings files for gnu and intel - - Added warnings files to use one common set of flags - during configure for both autotools and CMake build - systems. The initial implementation only affects a - general set of flags for gnu and intel compilers. - - (ADB - 2020/01/17) - - - Added new options to CMake for control of testing - - Added CMake options (default ON); - HDF5_TEST_SERIAL AND/OR HDF5_TEST_PARALLEL - combined with: - HDF5_TEST_TOOLS - HDF5_TEST_EXAMPLES - HDF5_TEST_SWMR - HDF5_TEST_FORTRAN - HDF5_TEST_CPP - HDF5_TEST_JAVA - - (ADB - 2020/01/15, HDFFV-11001) - - - Added Clang sanitizers to CMake for analyzer support if compiler is clang. - - Added CMake code and files to execute the Clang sanitizers if - HDF5_ENABLE_SANITIZERS is enabled and the USE_SANITIZER option - is set to one of the following: - Address - Memory - MemoryWithOrigins - Undefined - Thread - Leak - 'Address;Undefined' - - (ADB - 2019/12/12, TRILAB-135) - - - Update CMake for VS2019 support - - CMake added support for VS2019 in version 3.15. Changes to the CMake - generator setting required changes to scripts. Also updated version - references in CMake files as necessary. - - (ADB - 2019/11/18, HDFFV-10962) - - - Library: - -------- - - Refactored public exposure of haddr_t type in favor of "object tokens" - - To better accommodate HDF5 VOL connectors where "object addresses in a file" - may not make much sense, the following changes were made to the library: - - * Introduced new H5O_token_t "object token" type, which represents a - unique and permanent identifier for referencing an HDF5 object within - a container; these "object tokens" are meant to replace object addresses. - Along with the new type, a new H5Oopen_by_token API call was introduced - to open an object by a token, similar to how object addresses were - previously used with H5Oopen_by_addr. - - * Introduced new H5Lget_info2, H5Lget_info_by_idx2, H5Literate2, H5Literate_by_name2, - H5Lvisit2 and H5Lvisit_by_name2 API calls, along with their associated H5L_info2_t - struct and H5L_iterate2_t callback function, which work with the newly-introduced - object tokens, instead of object addresses. The original functions have been - renamed to version 1 functions and are deprecated in favor of the new version 2 - functions. The H5L_info_t and H5L_iterate_t types have been renamed to version 1 - types and are now deprecated in favor of their version 2 counterparts. For each of - the functions and types, compatibility macros take place of the original symbols. - - * Introduced new H5Oget_info3, H5Oget_info_by_name3, H5Oget_info_by_idx3, - H5Ovisit3 and H5Ovisit_by_name3 API calls, along with their associated H5O_info2_t - struct and H5O_iterate2_t callback function, which work with the newly-introduced - object tokens, instead of object addresses. The version 2 functions are now - deprecated in favor of the version 3 functions. The H5O_info_t and H5O_iterate_t - types have been renamed to version 1 types and are now deprecated in favor of their - version 2 counterparts. For each, compatibility macros take place of the original - symbols. - - * Introduced new H5Oget_native_info, H5Oget_native_info_by_name and - H5Oget_native_info_by_idx API calls, along with their associated H5O_native_info_t - struct, which are used to retrieve the native HDF5 file format-specific information - about an object. This information (such as object header info and B-tree/heap info) - has been removed from the new H5O_info2_t struct so that the more generic - H5Oget_info(_by_name/_by_idx)3 routines will not try to retrieve it for non-native - VOL connectors. - - * Added new H5Otoken_cmp, H5Otoken_to_str and H5Otoken_from_str routines to compare - two object tokens, convert an object token into a nicely-readable string format and - to convert an object token string back into a real object token, respectively. - - (DER, QAK, JTH - 2020/01/16) - - - Virtual Object Layer (VOL) - - In this major HDF5 release we introduce HDF5 Virtual Object Layer (VOL). - VOL is an abstraction layer within the HDF5 library that enables different - methods for accessing data and objects that conform to the HDF5 data model. - The VOL layer intercepts all HDF5 API calls that potentially modify data - on disk and forwards those calls to a plugin "object driver". The data on - disk can be a different format than the HDF5 format. For more information - about VOL we refer the reader to the following documents (under review): - - VOL HDF5 APIs - https://portal.hdfgroup.org/display/HDF5/Virtual+Object++Layer - - VOL Documentation - https://bitbucket.hdfgroup.org/projects/HDFFV/repos/hdf5doc/browse/RFCs/HDF5/VOL - - Repository with VOL plugins - https://bitbucket.hdfgroup.org/projects/HDF5VOL - - - Enhancements to HDF5 References - - HDF5 references were extended to support attributes, and object and dataset - selections that reside in another HDF5 file. For more information including - a list of new APIs, see - - https://portal.hdfgroup.org/display/HDF5/Update+to+References - - - Add new public function H5Sselect_adjust. - - This function shifts a dataspace selection by a specified logical offset - within the dataspace extent. This can be useful for VOL developers to - implement chunked datasets. - - (NAF - 2019/11/18) - - - Add new public function H5Sselect_project_intersection. - - This function computes the intersection between two dataspace selections - and projects that intersection into a third selection. This can be useful - for VOL developers to implement chunked or virtual datasets. - - (NAF - 2019/11/13, ID-148) - - - Add new public function H5VLget_file_type. - - This function returns a datatype equivalent to the supplied datatype but - with the location set to be in the file. This datatype can then be used - with H5Tconvert to convert data between file and in-memory representation. - This funcition is intended for use only by VOL connector developers. - - (NAF - 2019/11/08, ID-127) - - - New S3 and HDFS Virtual File Drivers (VFDs) - - This release has two new VFDs. The S3 VFD allows accessing HDF5 files on - AWS S3 buckets. HDFS VFD allows accessing HDF5 files stored on Apache HDFS. - See https://portal.hdfgroup.org/display/HDF5/Virtual+File+Drivers+-+S3+and+HDFS - for information on enabling those drivers and using those APIs. - - Below are specific instructions for enabling S3 VFD on Windows: - - Fix windows requirements and java tests. Windows requires CMake 3.13. - - Install openssl library (with dev files); - from "Shining Light Productions". msi package preferred. - - PATH should have been updated with the installation dir. - - set ENV variable OPENSSL_ROOT_DIR to the installation dir. - - set ENV variable OPENSSL_CONF to the cfg file, likely %OPENSSL_ROOT_DIR%\bin\openssl.cfg - - Install libcurl library (with dev files); - - download the latest released version using git: https://github.com/curl/curl.git - - Open a Visual Studio Command prompt - - change to the libcurl root folder - - run the "buildconf.bat" batch file - - change to the winbuild directory - - nmake /f Makefile.vc mode=dll MACHINE=x64 - - copy libcurl-vc-x64-release-dll-ipv6-sspi-winssl dir to C:\curl (installation dir) - - set ENV variable CURL_ROOT to C:\curl (installation dir) - - update PATH ENV variable to %CURL_ROOT%\bin (installation bin dir). - - the aws credentials file should be in %USERPROFILE%\.aws folder - - set the ENV variable HDF5_ROS3_TEST_BUCKET_URL to the s3 url for the - s3 bucket containing the HDF5 files to be accessed. - - FORTRAN Library: - ---------------- - - Added new Fortran parameters: - - H5F_LIBVER_ERROR_F - H5F_LIBVER_NBOUNDS_F - H5F_LIBVER_V18_F - H5F_LIBVER_V110_F - H5F_LIBVER_V112_F - - - Added new Fortran API: h5pget_libver_bounds_f - - (MSB - 2020/02/11, HDFFV-11018) - - Java Library: - ---------------- - - Added ability to test java library with VOLs. - - Created new CMake script that combines the java and vol test scripts. - - (ADB - 2020/02/03, HDFFV-10996) - - - Tests fail for non-English locale. - - In the JUnit tests with a non-English locale, only the part before - the decimal comma is replaced by XXXX and this leads to a comparison - error. Changed the regex for the Time substitution. - - (ADB - 2020/01/09, HDFFV-10995) - - - Tools: - ------ - - h5diff was updated to use the new reference APIs. - - h5diff uses the new reference APIs to compare references. - Attribute references can also be compared. - - (ADB - 2019/12/19, HDFFV-10980) - - - h5dump and h5ls were updated to use the new reference APIs. - - The tools library now use the new reference APIs to inspect a - file. Also the DDL spec was updated to reflect the format - changes produced with the new APIs. The export API and support - functions in the JNI were updated to match. - - - Other improvements and changes: - - - Hyperslab selection code was reworked to improve performance, getting more - than 10x speedup in some cases. - - - The HDF5 Library was enhanced to open files with Unicode names on Windows. - - - Deprecated H5Dvlen_reclaim() and replaced it with H5Treclaim(). - This routine is meant to be used when resources are internally allocated - when reading data, i.e. when using either vlen or new reference types. - This is applicable to both attribute and dataset reads. - - - h5repack was fixed to repack datasets with external storage - to other types of storage. - - -Support for new platforms, languages and compilers. -======================================= - - Added spectrum-mpi with clang, gcc and xl compilers on Linux 3.10.0 - - Added OpenMPI 3.1 and 4.0 with clang, gcc and Intel compilers on Linux 3.10.0 - - Added cray-mpich/PrgEnv with gcc and Intel compilers on Linux 4.14.180 - - Added spectrum mpi with clang, gcc and xl compilers on Linux 4.14.0 - - -Bug Fixes since HDF5-1.12.0-alpha1 release -========================================== - Library - ------- - - Improved performance when creating a large number of small datasets by - retrieving default property values from the API context instead of doing - skip list searches. - - (CJH - 2019/12/10, HDFFV-10658) - - - Fixed user-created data access properties not existing in the property list - returned by H5Dget_access_plist. Thanks to Steven Varga for submitting a - reproducer and a patch. - - (CJH - 2019/12/09, HDFFV-10934) - - - Fixed an assertion failure in the parallel library when collectively - filling chunks. As it is required that chunks be written in - monotonically non-decreasing order of offset in the file, this assertion - was being triggered when the list of chunk file space allocations being - passed to the collective chunk filling routine was not sorted according - to this particular requirement. - - The addition of a sort of the out of order chunks trades a bit of - performance for the elimination of this assertion and of any complaints - from MPI implementations about the file offsets used being out of order. - - (JTH - 2019/10/07, HDFFV-10792) - - FORTRAN library: - ---------------- - - - Corrected INTERFACE INTENT(IN) to INTENT(OUT) for buf_size in h5fget_file_image_f. - - (MSB - 2020/2/18, HDFFV-11029) - - Java Library: - ---------------- - - Added ability to test java library with VOLs. - - Created new CMake script that combines the java and vol test scripts. - - (ADB - 2020/02/03, HDFFV-10996) - - - Tests fail for non-English locale. - - In the JUnit tests with a non-English locale, only the part before - the decimal comma is replaced by XXXX and this leads to a comparison - error. Changed the regex for the Time substitution. - - (ADB - 2020/01/09, HDFFV-10995) - - Tools: - ------ - - h5repack was fixed to repack the reference attributes properly. - The code line that checks if the update of reference inside a compound - datatype is misplaced outside the code block loop that carries out the - check. In consequence, the next attribute that is not the reference - type was repacked again as the reference type and caused the failure of - repacking. The fix is to move the corresponding code line to the correct - code block. - - (KY -2020/02/10, HDFFV-11014) - - - h5diff was updated to use the new reference APIs. - - h5diff uses the new reference APIs to compare references. - Attribute references can also be compared. - - (ADB - 2019/12/19, HDFFV-10980) - - - h5dump and h5ls were updated to use the new reference APIs. - - The tools library now use the new reference APIs to inspect a - file. Also the DDL spec was updated to reflect the format - changes produced with the new APIs. The export API and support - functions in the JNI were updated to match. - - (ADB - 2019/12/06, HDFFV-10876 and HDFFV-10877) - - -Major Bug Fixes since HDF5-1.10.0 release -========================================= - - - For major bug fixes please see HISTORY-1_10_0-1_12_0.txt file - - -Supported Platforms -=================== - - Linux 2.6.32-696.16.1.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-18) - #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-18) - (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-18) - IBM XL C/C++ V13.1 - IBM XL Fortran V15.1 - - Linux 3.10.0-327.10.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (kituo/moohan) Version 4.8.5 20150623 (Red Hat 4.8.5-4) - Version 4.9.3, 5.2.0, 7.1.0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 17.0.0.098 Build 20160721 - MPICH 3.1.4 - - Linux-3.10.0- spectrum-mpi/rolling-release with cmake>3.10 and - 862.14.4.1chaos.ch6.ppc64le clang/3.9,8.0 - #1 SMP ppc64le GNU/Linux gcc/7.3 - (ray) xl/2016,2019 - - Linux 3.10.0- openmpi/3.1,4.0 with cmake>3.10 and - 957.12.2.1chaos.ch6.x86_64 clang 5.0 - #1 SMP x86_64 GNU/Linux gcc/7.3,8.2 - (serrano) intel/17.0,18.0/19.0 - - Linux 3.10.0- openmpi/3.1/4.0 with cmake>3.10 and - 1062.1.1.1chaos.ch6.x86_64 clang/3.9,5.0,8.0 - #1 SMP x86_64 GNU/Linux gcc/7.3,8.1,8.2 - (chama,quartz) intel/16.0,18.0,19.0 - - Linux 4.4.180-94.100-default cray-mpich/7.7.6 with PrgEnv-*/6.0.5, cmake>3.10 and - #1 SMP x86_64 GNU/Linux gcc/7.2.0,8.2.0 - (mutrino) intel/17.0,18.0 - - Linux 4.14.0- spectrum-mpi/rolling-release with cmake>3.10 and - 49.18.1.bl6.ppc64le clang/6.0,8.0 - #1 SMP ppc64le GNU/Linux gcc/7.3 - (lassen) xl/2019 - - SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc - (emu) Sun Fortran 95 8.6 SunOS_sparc - Sun C++ 5.12 SunOS_sparc - - Windows 7 x64 Visual Studio 2015 w/ Intel C, Fortran 2018 (cmake) - Visual Studio 2015 w/ MSMPI 10 (cmake) - - Windows 10 x64 Visual Studio 2015 w/ Intel Fortran 18 (cmake) - Visual Studio 2017 w/ Intel Fortran 19 (cmake) - Visual Studio 2019 w/ Intel Fortran 19 (cmake) - - macOS 10.13.6 High Sierra Apple LLVM version 10.0.0 (clang/clang++-1000.10.44.4) - 64-bit gfortran GNU Fortran (GCC) 6.3.0 - (bear) Intel icc/icpc/ifort version 19.0.4 - - macOS 10.14.6 Mohave Apple LLVM version 10.0.1 (clang/clang++-1001.0.46.4) - 64-bit gfortran GNU Fortran (GCC) 6.3.0 - (bobcat) Intel icc/icpc/ifort version 19.0.4 - - -Tested Configuration Features Summary -===================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90/ F90 C++ zlib SZIP - parallel F2003 parallel -SunOS 5.11 32-bit n y/y n y y y -SunOS 5.11 64-bit n y/n n y y y -Windows 7 y y/y n y y y -Windows 7 x64 y y/y y y y y -Windows 7 Cygwin n y/n n y y y -Windows 7 x64 Cygwin n y/n n y y y -Windows 10 y y/y n y y y -Windows 10 x64 y y/y n y y y -macOS 10.13.6 64-bit n y/y n y y ? -macOS 10.14.6 64-bit n y/y n y y ? -CentOS 6.7 Linux 2.6.18 x86_64 GNU n y/y n y y y -CentOS 6.7 Linux 2.6.18 x86_64 Intel n y/y n y y y -CentOS 6.7 Linux 2.6.32 x86_64 PGI n y/y n y y y -CentOS 7.2 Linux 2.6.32 x86_64 GNU y y/y y y y y -CentOS 7.2 Linux 2.6.32 x86_64 Intel n y/y n y y y -Linux 2.6.32-573.18.1.el6.ppc64 n y/n n y y y - - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -SunOS 5.11 32-bit y y y y -SunOS 5.11 64-bit y y y y -Windows 7 y y y y -Windows 7 x64 y y y y -Windows 7 Cygwin n n n y -Windows 7 x64 Cygwin n n n y -Windows 10 y y y y -Windows 10 x64 y y y y -macOS 10.13.6 64-bit y n y y -macOS 10.14.6 64-bit y n y y -CentOS 6.7 Linux 2.6.18 x86_64 GNU y y y y -CentOS 6.7 Linux 2.6.18 x86_64 Intel y y y n -CentOS 6.7 Linux 2.6.32 x86_64 PGI y y y n -CentOS 7.2 Linux 2.6.32 x86_64 GNU y y y n -CentOS 7.2 Linux 2.6.32 x86_64 Intel y y y n -Linux 2.6.32-573.18.1.el6.ppc64 y y y n - -Compiler versions for each platform are listed in the preceding -"Supported Platforms" table. - - -More Tested Platforms -===================== -The following platforms are not supported but have been tested for this release. - - Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (mayll/platypus) Version 4.4.7 20120313 - Version 4.9.3, 5.3.0, 6.2.0 - PGI C, Fortran, C++ for 64-bit target on - x86-64; - Version 17.10-0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 17.0.4.196 Build 20170411 - MPICH 3.1.4 compiled with GCC 4.9.3 - - Linux 3.10.0-327.18.2.el7 GNU C (gcc) and C++ (g++) compilers - #1 SMP x86_64 GNU/Linux Version 4.8.5 20150623 (Red Hat 4.8.5-4) - (jelly) with NAG Fortran Compiler Release 6.1(Tozai) - GCC Version 7.1.0 - OpenMPI 3.0.0-GCC-7.2.0-2.29 - Intel(R) C (icc) and C++ (icpc) compilers - Version 17.0.0.098 Build 20160721 - with NAG Fortran Compiler Release 6.1(Tozai) - PGI C (pgcc), C++ (pgc++), Fortran (pgf90) - compilers: - Version 18.4, 19.4 - MPICH 3.3 - OpenMPI 2.1.5, 3.1.3, 4.0.0 - - Fedora30 5.3.11-200.fc30.x86_64 - #1 SMP x86_64 GNU/Linux GNU gcc (GCC) 9.2.1 20190827 (Red Hat 9.2.1 20190827) - GNU Fortran (GCC) 9.2.1 20190827 (Red Hat 9.2.1 20190827) - (cmake and autotools) - - Mac OS X El Capitan 10.11.6 Apple LLVM version 7.3.0 (clang/clang++-703.0.29) - 64-bit gfortran GNU Fortran (GCC) 5.2.0 - (osx1011dev/osx1011test) Intel icc/icpc/ifort version 16.0.2 - - macOS 10.12.6 Sierra Apple LLVM version 9.0.0 (clang/clang++-900.0.39.2) - 64-bit gfortran GNU Fortran (GCC) 7.4.0 - (kite) Intel icc/icpc/ifort version 17.0.8 - - -Known Problems -============== - CMake files do not behave correctly with paths containing spaces. - Do not use spaces in paths because the required escaping for handling spaces - results in very complex and fragile build files. - ADB - 2019/05/07 - - At present, metadata cache images may not be generated by parallel - applications. Parallel applications can read files with metadata cache - images, but since this is a collective operation, a deadlock is possible - if one or more processes do not participate. - - Known problems in previous releases can be found in the HISTORY*.txt files - in the HDF5 source, and in the HDF5 Jira database, available at - https://jira.hdfgroup.org/. Please register at https://www.hdfgroup.org to - create a free account for accessing the Jira database. Please report any - new problems found to help@hdfgroup.org. - - -CMake vs. Autotools installations -================================= -While both build systems produce similar results, there are differences. -Each system produces the same set of folders on linux (only CMake works -on standard Windows); bin, include, lib and share. Autotools places the -COPYING and RELEASE.txt file in the root folder, CMake places them in -the share folder. - -The bin folder contains the tools and the build scripts. Additionally, CMake -creates dynamic versions of the tools with the suffix "-shared". Autotools -installs one set of tools depending on the "--enable-shared" configuration -option. - build scripts - ------------- - Autotools: h5c++, h5cc, h5fc - CMake: h5c++, h5cc, h5hlc++, h5hlcc - -The include folder holds the header files and the fortran mod files. CMake -places the fortran mod files into separate shared and static subfolders, -while Autotools places one set of mod files into the include folder. Because -CMake produces a tools library, the header files for tools will appear in -the include folder. - -The lib folder contains the library files, and CMake adds the pkgconfig -subfolder with the hdf5*.pc files used by the bin/build scripts created by -the CMake build. CMake separates the C interface code from the fortran code by -creating C-stub libraries for each Fortran library. In addition, only CMake -installs the tools library. The names of the szip libraries are different -between the build systems. - -The share folder will have the most differences because CMake builds include -a number of CMake specific files for support of CMake's find_package and support -for the HDF5 Examples CMake project. - diff --git a/release_docs/HISTORY-1_13.txt b/release_docs/HISTORY-1_12_0-1_14_0.txt similarity index 81% rename from release_docs/HISTORY-1_13.txt rename to release_docs/HISTORY-1_12_0-1_14_0.txt index 73b7feb54d1..11ca947af9d 100644 --- a/release_docs/HISTORY-1_13.txt +++ b/release_docs/HISTORY-1_12_0-1_14_0.txt @@ -4,6 +4,7 @@ HDF5 History This file contains development history of the HDF5 1.13 releases from the develop branch +05. Release Information for hdf5-1.14.0 04. Release Information for hdf5-1.13.3 03. Release Information for hdf5-1.13.2 02. Release Information for hdf5-1.13.1 @@ -11,6 +12,758 @@ the develop branch [Search on the string '%%%%' for section breaks of each release.] +%%%%1.14.0%%%% + +HDF5 version 1.14.0 released on 2022-12-28 + +*** NOTE *** + +This file reflects the 1.13.x experimental release history. In the 1.14.0 +release, we consolidated the experimental release notes into a single +section. + +*** NOTE **** + +================================================================================ + + +INTRODUCTION +============ + +This document describes the differences between this release and the previous +HDF5 release. It contains information on the platforms tested and known +problems in this release. For more details check the HISTORY*.txt files in the +HDF5 source. + +Note that documentation in the links below will be updated at the time of each +final release. + +Links to HDF5 documentation can be found on The HDF5 web page: + + https://portal.hdfgroup.org/display/HDF5/HDF5 + +The official HDF5 releases can be obtained from: + + https://www.hdfgroup.org/downloads/hdf5/ + +Changes from Release to Release and New Features in the HDF5-1.13.x release series +can be found at: + + https://portal.hdfgroup.org/display/HDF5/Release+Specific+Information + +If you have any questions or comments, please send them to the HDF Help Desk: + + help@hdfgroup.org + + +CONTENTS +======== + +- New Features +- Support for new platforms and languages +- Bug Fixes since HDF5-1.13.3 +- Platforms Tested +- Known Problems +- CMake vs. Autotools installations + + +New Features +============ + + Configuration: + ------------- + - Added new option to build libaec and zlib inline with CMake. + + Using the CMake FetchContent module, the external filters can populate + content at configure time via any method supported by the ExternalProject + module. Whereas ExternalProject_Add() downloads at build time, the + FetchContent module makes content available immediately, allowing the + configure step to use the content in commands like add_subdirectory(), + include() or file() operations. + + The HDF options (and defaults) for using this are: + BUILD_SZIP_WITH_FETCHCONTENT:BOOL=OFF + LIBAEC_USE_LOCALCONTENT:BOOL=OFF + BUILD_ZLIB_WITH_FETCHCONTENT:BOOL=OFF + ZLIB_USE_LOCALCONTENT:BOOL=OFF + + The CMake variables to control the path and file names: + LIBAEC_TGZ_ORIGPATH:STRING + LIBAEC_TGZ_ORIGNAME:STRING + ZLIB_TGZ_ORIGPATH:STRING + ZLIB_TGZ_ORIGNAME:STRING + + See the CMakeFilters.cmake and config/cmake/cacheinit.cmake files for usage. + + (ADB - 2023/02/21) + + - Removal of MPE support + + The ability to build with MPE instrumentation has been removed along with + the following configure options: + + Autotools: + --with-mpe= + + CMake has never supported building with MPE support. + + (DER - 2022/11/08) + + - Removal of dmalloc support + + The ability to build with dmalloc support has been removed along with + the following configure options: + + Autotools: + --with-dmalloc= + + CMake: + HDF5_ENABLE_USING_DMALLOC + + (DER - 2022/11/08) + + - Removal of memory allocation sanity checks configure options + + With the removal of the memory allocation sanity checks feature, the + following configure options are no longer necessary and have been + removed: + + Autotools: + --enable-memory-alloc-sanity-check + + CMake: + HDF5_MEMORY_ALLOC_SANITY_CHECK + HDF5_ENABLE_MEMORY_STATS + + (DER - 2022/11/03) + + Library: + -------- + - Added a Subfiling VFD configuration file prefix environment variable + + The Subfiling VFD now checks for values set in a new environment + variable "H5FD_SUBFILING_CONFIG_FILE_PREFIX" to determine if the + application has specified a pathname prefix to apply to the file + path for its configuration file. For example, this can be useful + for cases where the application wishes to write subfiles to a + machine's node-local storage while placing the subfiling configuration + file on a file system readable by all machine nodes. + + (JTH - 2023/02/22) + + - Overhauled the Virtual Object Layer (VOL) + + The virtual object layer (VOL) was added in HDF5 1.12.0 but the initial + implementation required API-breaking changes to better support optional + operations and pass-through VOL connectors. The original VOL API is + now considered deprecated and VOL users and connector authors should + target the 1.14 VOL API. + + The specific changes are too extensive to document in a release note, so + VOL users and connector authors should consult the updated VOL connector + author's guide and the 1.12-1.14 VOL migration guide. + + (DER - 2022/12/28) + + - H5VLquery_optional() signature change + + The last parameter of this API call has changed from a pointer to hbool_t + to a pointer to uint64_t. Due to the changes in how optional operations + are handled in the 1.14 VOL API, we cannot make the old API call work + with the new scheme, so there is no API compatibility macro for it. + + (DER - 2022/12/28) + + - H5I_free_t callback signature change + + In order to support asynchronous operations and future IDs, the signature + of the H5I_free_t callback has been modified to take a second 'request' + parameter. Due to the nature of the internal library changes, no API + compatibility macro is available for this change. + + (DER - 2022/12/28) + + - Fix for CVE-2019-8396 + + Malformed HDF5 files may have truncated content which does not match + the expected size. When H5O__pline_decode() attempts to decode these it + may read past the end of the allocated space leading to heap overflows + as bounds checking is incomplete. + + The fix ensures each element is within bounds before reading. + + (2022/11/09 - HDFFV-10712, CVE-2019-8396, GitHub #2209) + + - Removal of memory allocation sanity checks feature + + This feature added heap canaries and statistics tracking for internal + library memory operations. Unfortunately, the heap canaries caused + problems when library memory operations were mixed with standard C + library memory operations (such as in the filter pipeline, where + buffers may have to be reallocated). Since any platform with a C + compiler also usually has much more sophisticated memory sanity + checking tools than the HDF5 library provided (e.g., valgrind), we + have decided to to remove the feature entirely. + + In addition to the configure changes described above, this also removes + the following from the public API: + H5get_alloc_stats() + H5_alloc_stats_t + + (DER - 2022/11/03) + + Parallel Library: + ----------------- + - + + + Fortran Library: + ---------------- + - + + + C++ Library: + ------------ + - + + + Java Library: + ------------- + - + + + Tools: + ------ + - + + + High-Level APIs: + ---------------- + - + + + C Packet Table API: + ------------------- + - + + + Internal header file: + --------------------- + - + + + Documentation: + -------------- + - Ported the existing VOL Connector Author Guide document to doxygen. + + Added new dox file, VOLConnGuide.dox. + + (ADB - 2022/12/20) + + +Support for new platforms, languages and compilers +================================================== + - + +Bug Fixes since HDF5-1.13.3 release +=================================== + Library + ------- + - Fixed issues in the Subfiling VFD when using the SELECT_IOC_EVERY_NTH_RANK + or SELECT_IOC_TOTAL I/O concentrator selection strategies + + Multiple bugs involving these I/O concentrator selection strategies + were fixed, including: + + * A bug that caused the selection strategy to be altered when + criteria for the strategy was specified in the + H5FD_SUBFILING_IOC_SELECTION_CRITERIA environment variable as + a single value, rather than in the old and undocumented + 'integer:integer' format + * Two bugs which caused a request for 'N' I/O concentrators to + result in 'N - 1' I/O concentrators being assigned, which also + lead to issues if only 1 I/O concentrator was requested + + Also added a regression test for these two I/O concentrator selection + strategies to prevent future issues. + + (JTH - 2023/03/15) + + - Fix CVE-2021-37501 / GHSA-rfgw-5vq3-wrjf + + Check for overflow when calculating on-disk attribute data size. + + A bogus hdf5 file may contain dataspace messages with sizes + which lead to the on-disk data sizes to exceed what is addressable. + When calculating the size, make sure, the multiplication does not + overflow. + The test case was crafted in a way that the overflow caused the + size to be 0. + + (EFE - 2023/02/11 GH-2458) + + - Fixed an issue with collective metadata writes of global heap data + + New test failures in parallel netCDF started occurring with debug + builds of HDF5 due to an assertion failure and this was reported in + GitHub issue #2433. The assertion failure began happening after the + collective metadata write pathway in the library was updated to use + vector I/O so that parallel-enabled HDF5 Virtual File Drivers (other + than the existing MPI I/O VFD) can support collective metadata writes. + + The assertion failure was fixed by updating collective metadata writes + to treat global heap metadata as raw data, as done elsewhere in the + library. + + (JTH - 2023/02/16, GH #2433) + + - Seg fault on file close + + h5debug fails at file close with core dump on a file that has an + illegal file size in its cache image. In H5F_dest(), the library + performs all the closing operations for the file and keeps track of + the error encountered when reading the file cache image. + At the end of the routine, it frees the file's file structure and + returns error. Due to the error return, the file object is not removed + from the ID node table. This eventually causes assertion failure in + H5VL__native_file_close() when the library finally exits and tries to + access that file object in the table for closing. + + The closing routine, H5F_dest(), will not free the file structure if + there is error, keeping a valid file structure in the ID node table. + It will be freed later in H5VL__native_file_close() when the + library exits and terminates the file package. + + (VC - 2022/12/14, HDFFV-11052, CVE-2020-10812) + + - Fix CVE-2018-13867 / GHSA-j8jr-chrh-qfrf + + Validate location (offset) of the accumulated metadata when comparing. + + Initially, the accumulated metadata location is initialized to HADDR_UNDEF + - the highest available address. Bogus input files may provide a location + or size matching this value. Comparing this address against such bogus + values may provide false positives. Thus make sure, the value has been + initialized or fail the comparison early and let other parts of the + code deal with the bogus address/size. + Note: To avoid unnecessary checks, it is assumed that if the 'dirty' + member in the same structure is true the location is valid. + + (EFE - 2022/10/10 GH-2230) + + - Fix CVE-2018-16438 / GHSA-9xmm-cpf8-rgmx + + Make sure info block for external links has at least 3 bytes. + + According to the specification, the information block for external links + contains 1 byte of version/flag information and two 0 terminated strings + for the object linked to and the full path. + Although not very useful, the minimum string length for each (with + terminating 0) would be one byte. + Checking this helps to avoid SEGVs triggered by bogus files. + + (EFE - 2022/10/09 GH-2233) + + - CVE-2021-46244 / GHSA-vrxh-5gxg-rmhm + + Compound datatypes may not have members of size 0 + + A member size of 0 may lead to an FPE later on as reported in + CVE-2021-46244. To avoid this, check for this as soon as the + member is decoded. + + (EFE - 2022/10/05 GEH-2242) + + + - Fix CVE-2021-45830 / GHSA-5h2h-fjjr-x9m2 + + Make H5O__fsinfo_decode() more resilient to out-of-bound reads. + + When decoding a file space info message in H5O__fsinfo_decode() make + sure each element to be decoded is still within the message. Malformed + hdf5 files may have trunkated content which does not match the + expected size. Checking this will prevent attempting to decode + unrelated data and heap overflows. So far, only free space manager + address data was checked before decoding. + + (EFE - 2022/10/05 GH-2228) + + - Fix CVE-2021-46242 / GHSA-x9pw-hh7v-wjpf + + When evicting driver info block, NULL the corresponding entry. + + Since H5C_expunge_entry() called (from H5AC_expunge_entry()) sets the flag + H5C__FLUSH_INVALIDATE_FLAG, the driver info block will be freed. NULLing + the pointer in f->shared->drvinfo will prevent use-after-free when it is + used in other functions (like H5F__dest()) - as other places will check + whether the pointer is initialized before using its value. + + (EFE - 2022/09/29 GH-2254) + + - Fix CVE-2021-45833 / GHSA-x57p-jwp6-4v79 + + Report error if dimensions of chunked storage in data layout < 2 + + For Data Layout Messages version 1 & 2 the specification state + that the value stored in the data field is 1 greater than the + number of dimensions in the dataspace. For version 3 this is + not explicitly stated but the implementation suggests it to be + the case. + Thus the set value needs to be at least 2. For dimensionality + < 2 an out-of-bounds access occurs. + + (EFE - 2022/09/28 GH-2240) + + - Fix CVE-2018-14031 / GHSA-2xc7-724c-r36j + + Parent of enum datatype message must have the same size as the + enum datatype message itself. + Functions accessing the enumeration values use the size of the + enumeration datatype to determine the size of each element and + how much data to copy. + Thus the size of the enumeration and its parent need to match. + Check in H5O_dtype_decode_helper() to avoid unpleasant surprises + later. + + (EFE - 2022/09/28 GH-2236) + + - Fix CVE-2018-17439 / GHSA-vcxv-vp43-rch7 + + H5IMget_image_info(): Make sure to not exceed local array size + + Malformed hdf5 files may provide more dimensions than the array dim[] in + H5IMget_image_info() is able to hold. Check number of elements first by calling + H5Sget_simple_extent_dims() with NULL for both 'dims' and 'maxdims' arguments. + This will cause the function to return only the number of dimensions. + The fix addresses a stack overflow on write. + + (EFE - 2022/09/27 HDFFV-10589, GH-2226) + + + Java Library + ------------ + - + + + Configuration + ------------- + - Correct the CMake generated pkg-config file + + The pkg-config file generated by CMake had the order and placement of the + libraries wrong. Also added support for debug library names. + + Changed the order of Libs.private libraries so that dependencies come after + dependents. Did not move the compression libraries into Requires.private + because there was not a way to determine if the compression libraries had + supported pkconfig files. Still recommend that the CMake config file method + be used for building projects with CMake. + + (ADB - 2023/02/16 GH-1546,GH-2259) + + - Remove Javadoc generation + + The use of doxygen now supersedes the requirement to build javadocs. We do not + have the resources to continue to support two documentation methods and have + chosen doxygen as our standard. + + (ADB - 2022/12/19) + + - Change the default for building the high-level tools + + The gif2hdf5 and hdf2gif high-level tools are deprecated and will be removed + in a future release. The default build setting for them have been changed from enabled + to disabled. A user can enable the build of these tools if needed. + autotools: --enable-hlgiftools + cmake: HDF5_BUILD_HL_GIF_TOOLS=ON + + (ADB - 2022/12/16) + + - Change the settings of the *pc files to use the correct format + + The pkg-config files generated by CMake uses incorrect syntax for the 'Requires' + settings. Changing the set to use 'lib-name = version' instead 'lib-name-version' + fixes the issue + + (ADB - 2022/12/06 HDFFV-11355) + + - Move MPI libraries link from PRIVATE to PUBLIC + + The install dependencies were not including the need for MPI libraries when + an application or library was built with the C library. Also updated the + CMake target link command to use the newer style MPI::MPI_C link variable. + + (ADB - 2022/10/27) + + + Tools + ----- + - Fix h5repack to only print output when verbose option is selected + + When timing option was added to h5repack, the check for verbose was + incorrectly implemented. + + (ADB - 2022/12/02, GH #2270) + + + Performance + ------------- + - + + + Fortran API + ----------- + - + + High-Level Library + ------------------ + - + + + Fortran High-Level APIs + ----------------------- + - + + + Documentation + ------------- + - + + + F90 APIs + -------- + - + + + C++ APIs + -------- + - + + + Testing + ------- + - + + +Platforms Tested +=================== + + Linux 5.16.14-200.fc35 GNU gcc (GCC) 11.2.1 20220127 (Red Hat 11.2.1-9) + #1 SMP x86_64 GNU/Linux GNU Fortran (GCC) 11.2.1 20220127 (Red Hat 11.2.1-9) + Fedora35 clang version 13.0.0 (Fedora 13.0.0-3.fc35) + (cmake and autotools) + + Linux 5.11.0-34-generic GNU gcc (GCC) 9.3.0-17ubuntu1 + #36-Ubuntu SMP x86_64 GNU/Linux GNU Fortran (GCC) 9.3.0-17ubuntu1 + Ubuntu 20.04 Ubuntu clang version 10.0.0-4 + (cmake and autotools) + + Linux 5.3.18-150300-cray_shasta_c cray-mpich/8.1.16 + #1 SMP x86_64 GNU/Linux Cray clang 14.0.0 + (crusher) GCC 11.2.0 + (cmake) + + Linux 4.14.0-115.35.1.1chaos openmpi 4.0.5 + #1 SMP aarch64 GNU/Linux GCC 9.2.0 (ARM-build-5) + (stria) GCC 7.2.0 (Spack GCC) + (cmake) + + Linux 4.14.0-115.35.1.3chaos spectrum-mpi/rolling-release + #1 SMP ppc64le GNU/Linux clang 12.0.1 + (vortex) GCC 8.3.1 + XL 16.1.1 + (cmake) + + Linux-4.14.0-115.21.2 spectrum-mpi/rolling-release + #1 SMP ppc64le GNU/Linux clang 12.0.1, 14.0.5 + (lassen) GCC 8.3.1 + XL 16.1.1.2, 2021,09.22, 2022.08.05 + (cmake) + + Linux-4.12.14-197.99-default cray-mpich/7.7.14 + #1 SMP x86_64 GNU/Linux cce 12.0.3 + (theta) GCC 11.2.0 + llvm 9.0 + Intel 19.1.2 + + Linux 3.10.0-1160.36.2.el7.ppc64 gcc (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) + #1 SMP ppc64be GNU/Linux g++ (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) + Power8 (echidna) GNU Fortran (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) + + Linux 3.10.0-1160.24.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++) + #1 SMP x86_64 GNU/Linux compilers: + Centos7 Version 4.8.5 20150623 (Red Hat 4.8.5-4) + (jelly/kituo/moohan) Version 4.9.3, Version 5.3.0, Version 6.3.0, + Version 7.2.0, Version 8.3.0, Version 9.1.0 + Intel(R) C (icc), C++ (icpc), Fortran (icc) + compilers: + Version 17.0.0.098 Build 20160721 + GNU C (gcc) and C++ (g++) 4.8.5 compilers + with NAG Fortran Compiler Release 6.1(Tozai) + Intel(R) C (icc) and C++ (icpc) 17.0.0.098 compilers + with NAG Fortran Compiler Release 6.1(Tozai) + MPICH 3.1.4 compiled with GCC 4.9.3 + MPICH 3.3 compiled with GCC 7.2.0 + OpenMPI 2.1.6 compiled with icc 18.0.1 + OpenMPI 3.1.3 and 4.0.0 compiled with GCC 7.2.0 + PGI C, Fortran, C++ for 64-bit target on + x86_64; + Version 19.10-0 + (autotools and cmake) + + Linux-3.10.0-1160.0.0.1chaos openmpi-4.1.2 + #1 SMP x86_64 GNU/Linux clang 6.0.0, 11.0.1 + (quartz) GCC 7.3.0, 8.1.0 + Intel 19.0.4, 2022.2, oneapi.2022.2 + + Linux-3.10.0-1160.71.1.1chaos openmpi/4.1 + #1 SMP x86_64 GNU/Linux GCC 7.2.0 + (skybridge) Intel/19.1 + (cmake) + + Linux-3.10.0-1160.66.1.1chaos openmpi/4.1 + #1 SMP x86_64 GNU/Linux GCC 7.2.0 + (attaway) Intel/19.1 + (cmake) + + Linux-3.10.0-1160.59.1.1chaos openmpi/4.1 + #1 SMP x86_64 GNU/Linux Intel/19.1 + (chama) (cmake) + + macOS Apple M1 11.6 Apple clang version 12.0.5 (clang-1205.0.22.11) + Darwin 20.6.0 arm64 gfortran GNU Fortran (Homebrew GCC 11.2.0) 11.1.0 + (macmini-m1) Intel icc/icpc/ifort version 2021.3.0 202106092021.3.0 20210609 + + macOS Big Sur 11.3.1 Apple clang version 12.0.5 (clang-1205.0.22.9) + Darwin 20.4.0 x86_64 gfortran GNU Fortran (Homebrew GCC 10.2.0_3) 10.2.0 + (bigsur-1) Intel icc/icpc/ifort version 2021.2.0 20210228 + + macOS High Sierra 10.13.6 Apple LLVM version 10.0.0 (clang-1000.10.44.4) + 64-bit gfortran GNU Fortran (GCC) 6.3.0 + (bear) Intel icc/icpc/ifort version 19.0.4.233 20190416 + + macOS Sierra 10.12.6 Apple LLVM version 9.0.0 (clang-900.39.2) + 64-bit gfortran GNU Fortran (GCC) 7.4.0 + (kite) Intel icc/icpc/ifort version 17.0.2 + + Mac OS X El Capitan 10.11.6 Apple clang version 7.3.0 from Xcode 7.3 + 64-bit gfortran GNU Fortran (GCC) 5.2.0 + (osx1011test) Intel icc/icpc/ifort version 16.0.2 + + + Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) + #1 SMP x86_64 GNU/Linux compilers: + Centos6 Version 4.4.7 20120313 + (platypus) Version 4.9.3, 5.3.0, 6.2.0 + MPICH 3.1.4 compiled with GCC 4.9.3 + PGI C, Fortran, C++ for 64-bit target on + x86_64; + Version 19.10-0 + + Windows 10 x64 Visual Studio 2015 w/ Intel C/C++/Fortran 18 (cmake) + Visual Studio 2017 w/ Intel C/C++/Fortran 19 (cmake) + Visual Studio 2019 w/ clang 12.0.0 + with MSVC-like command-line (C/C++ only - cmake) + Visual Studio 2019 w/ Intel C/C++/Fortran oneAPI 2022 (cmake) + Visual Studio 2022 w/ clang 15.0.1 + with MSVC-like command-line (C/C++ only - cmake) + Visual Studio 2022 w/ Intel C/C++/Fortran oneAPI 2022 (cmake) + Visual Studio 2019 w/ MSMPI 10.1 (C only - cmake) + + +Known Problems +============== + + ************************************************************ + * _ * + * (_) * + * __ ____ _ _ __ _ __ _ _ __ __ _ * + * \ \ /\ / / _` | '__| '_ \| | '_ \ / _` | * + * \ V V / (_| | | | | | | | | | | (_| | * + * \_/\_/ \__,_|_| |_| |_|_|_| |_|\__, | * + * __/ | * + * |___/ * + * * + * Please refrain from running any program (including * + * HDF5 tests) which uses the subfiling VFD on Perlmutter * + * at the National Energy Research Scientific Computing * + * Center, NERSC. * + * Doing so may cause a system disruption due to subfiling * + * crashing Lustre. The sytem's Lustre bug is expected * + * to be resolved by 2023. * + * * + ************************************************************ + + CMake files do not behave correctly with paths containing spaces. + Do not use spaces in paths because the required escaping for handling spaces + results in very complex and fragile build files. + ADB - 2019/05/07 + + At present, metadata cache images may not be generated by parallel + applications. Parallel applications can read files with metadata cache + images, but since this is a collective operation, a deadlock is possible + if one or more processes do not participate. + + CPP ptable test fails on both VS2017 and VS2019 with Intel compiler, JIRA + issue: HDFFV-10628. This test will pass with VS2015 with Intel compiler. + + The subsetting option in ph5diff currently will fail and should be avoided. + The subsetting option works correctly in serial h5diff. + + Several tests currently fail on certain platforms: + MPI_TEST-t_bigio fails with spectrum-mpi on ppc64le platforms. + + MPI_TEST-t_subfiling_vfd and MPI_TEST_EXAMPLES-ph5_subfiling fail with + cray-mpich on theta and with XL compilers on ppc64le platforms. + + MPI_TEST_testphdf5_tldsc fails with cray-mpich 7.7 on cori and theta. + + Known problems in previous releases can be found in the HISTORY*.txt files + in the HDF5 source. Please report any new problems found to + help@hdfgroup.org. + + +CMake vs. Autotools installations +================================= +While both build systems produce similar results, there are differences. +Each system produces the same set of folders on linux (only CMake works +on standard Windows); bin, include, lib and share. Autotools places the +COPYING and RELEASE.txt file in the root folder, CMake places them in +the share folder. + +The bin folder contains the tools and the build scripts. Additionally, CMake +creates dynamic versions of the tools with the suffix "-shared". Autotools +installs one set of tools depending on the "--enable-shared" configuration +option. + build scripts + ------------- + Autotools: h5c++, h5cc, h5fc + CMake: h5c++, h5cc, h5hlc++, h5hlcc + +The include folder holds the header files and the fortran mod files. CMake +places the fortran mod files into separate shared and static subfolders, +while Autotools places one set of mod files into the include folder. Because +CMake produces a tools library, the header files for tools will appear in +the include folder. + +The lib folder contains the library files, and CMake adds the pkgconfig +subfolder with the hdf5*.pc files used by the bin/build scripts created by +the CMake build. CMake separates the C interface code from the fortran code by +creating C-stub libraries for each Fortran library. In addition, only CMake +installs the tools library. The names of the szip libraries are different +between the build systems. + +The share folder will have the most differences because CMake builds include +a number of CMake specific files for support of CMake's find_package and support +for the HDF5 Examples CMake project. + +The issues with the gif tool are: + HDFFV-10592 CVE-2018-17433 + HDFFV-10593 CVE-2018-17436 + HDFFV-11048 CVE-2020-10809 +These CVE issues have not yet been addressed and are avoided by not building +the gif tool by default. Enable building the High-Level tools with these options: + autotools: --enable-hltools + cmake: HDF5_BUILD_HL_TOOLS=ON %%%%1.13.3%%%% diff --git a/release_docs/HISTORY-1_8.txt b/release_docs/HISTORY-1_8.txt deleted file mode 100644 index 461e0be7394..00000000000 --- a/release_docs/HISTORY-1_8.txt +++ /dev/null @@ -1,14439 +0,0 @@ -HDF5 History -============ - -This file contains development history of HDF5 1.8 branch - -23. Release Information for hdf5-1.8.21 -22. Release Information for hdf5-1.8.20 -21. Release Information for hdf5-1.8.19 -20. Release Information for hdf5-1.8.18 -19. Release Information for hdf5-1.8.17 -18. Release Information for hdf5-1.8.16 -17. Release Information for hdf5-1.8.15 -16. Release Information for hdf5-1.8.14 -15. Release Information for hdf5-1.8.13 -14. Release Information for hdf5-1.8.12 -13. Release Information for hdf5-1.8.11 -12. Release Information for hdf5-1.8.10-patch1 -11. Release Information for hdf5-1.8.10 -10. Release Information for hdf5-1.8.9 -09. Release Information for hdf5-1.8.8 -08. Release Information for hdf5-1.8.7 -07. Release Information for hdf5-1.8.6 -06. Release Information for hdf5-1.8.5 -05. Release Information for hdf5-1.8.4 -04. Release Information for hdf5-1.8.3 -03. Release Information for hdf5-1.8.2 -02. Release Information for hdf5-1.8.1 -01. Release Information for hdf5-1.8.0 - -[Search on the string '%%%%' for section breaks of each release.] - -%%%%1.8.21%%%% - - -HDF5 version 1.8.21 released on 2018-06-04 -================================================================================ - -INTRODUCTION -============ - -This document describes the differences between HDF5-1.8.20 and -HDF5-1.8.21, and contains information on the platforms tested and -known problems in HDF5-1.8.21. -For more details, see the files HISTORY-1_0-1_8_0_rc3.txt -and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source. - -Links to the HDF5 1.8.21 source code, documentation, and additional materials -can be found on the HDF5 web page at: - - https://support.hdfgroup.org/HDF5/ - -The HDF5 1.8.21 release can be obtained from: - - https://support.hdfgroup.org/HDF5/release/obtain518.html - -User documentation for 1.8.21 can be accessed directly at this location: - - https://support.hdfgroup.org/HDF5/doc1.8/ - -New features in the HDF5-1.8.x release series, including brief general -descriptions of some new and modified APIs, are described in the "What's New -in 1.8.0?" document: - - https://support.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html - -All new and modified APIs are listed in detail in the "HDF5 Software Changes -from Release to Release" document, in the section "Release 1.8.21 (current -release) versus Release 1.8.20 - - https://support.hdfgroup.org/HDF5/doc1.8/ADGuide/Changes.html - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS -======== - -- New Features -- Support for New Platforms, Languages, and Compilers -- Bug Fixes since HDF5-1.8.20 -- Supported Platforms -- Supported Configuration Features Summary -- More Tested Platforms -- Known Problems - - -New Features -============ - - Configuration - ------------- - - CMake - - Change minimum version to 3.10. - - This change removes the need to support a copy of the FindMPI.cmake module, - which has been removed, along with its subfolder in the config/cmake_ext_mod - location. - - (ADB - 2018/03/09) - - - CMake - - Add pkg-config file generation - - Added pkg-config file generation for the C, C++, HL, and HL C++ libraries. - In addition, builds on linux will create h5cXXX scripts that use the pkg-config - files. This is a limited implementation of a script like autotools h5cc. - - (ADB - 2018/03/08, HDFFV-4359) - - - CMake - - Refactor use of CMAKE_BUILD_TYPE for new variable, which understands - the type of generator in use. - - Added new configuration macros to use new HDF_BUILD_TYPE variable. This - variable is set correctly for the type of generator being used for the build. - - (ADB - 2018/01/08, HDFFV-10385, HDFFV-10296) - - C++ API - ------- - - The following C++ API wrappers have been added to class H5Location - + H5Lcreate_soft: - // Creates a soft link from link_name to target_name. - void link(const char *target_name, const char *link_name,...) - void link(const H5std_string& target_name,...) - - + H5Lcreate_hard: - // Creates a hard link from new_name to curr_name. - void link(const char *curr_name, const Group& new_loc,...) - void link(const H5std_string& curr_name, const Group& new_loc,...) - - // Creates a hard link from new_name to curr_name in the same location. - void link(const char *curr_name, const hid_t same_loc,...) - void link(const H5std_string& curr_name, const hid_t same_loc,...) - - Note: previous version CommonFG::link will be deprecated. - - + H5Lcopy: - // Copy an object from a group of file to another. - void copyLink(const char *src_name, const Group& dst,...) - void copyLink(const H5std_string& src_name, const Group& dst,...) - - // Copy an object from a group of file to the same location. - void copyLink(const char *src_name, const char *dst_name,...) - void copyLink(const H5std_string& src_name,...) - - + H5Lmove: - // Rename an object in a group or file to a new location. - void moveLink(const char* src_name, const Group& dst,...) - void moveLink(const H5std_string& src_name, const Group& dst,...) - - // Rename an object in a group or file to the same location. - void moveLink(const char* src_name, const char* dst_name,...) - void moveLink(const H5std_string& src_name,...) - - Note: previous version CommonFG::move will be deprecated. - - + H5Ldelete: - // Removes the specified link from this location. - void unlink(const char *link_name, - const LinkAccPropList& lapl = LinkAccPropList::DEFAULT) - void unlink(const H5std_string& link_name, - const LinkAccPropList& lapl = LinkAccPropList::DEFAULT) - - Note: An additional parameter is added to CommonFG::unlink and it - is moved to H5Location. - - (BMR - 2018/05/11 - HDFFV-10445) - - - New property list subclasses - - Property list subclasses StrCreatPropList, LinkCreatPropList, and - AttrCreatPropList are added for the C property list classes - H5P_STRING_CREATE, H5P_LINK_CREATE, and H5P_ATTRIBUTE_CREATE. - - (BMR - 2018/05/11 - HDFFV-10445) - - - Another argument, LinkCreatPropList& lcpl, is added to the following - functions for the use of link creation property list. - Group createGroup(const char* name, size_t size_hint = 0, - const LinkCreatPropList& lcpl = LinkCreatPropList::DEFAULT) - Group createGroup(const H5std_string& name, size_t size_hint = 0, - const LinkCreatPropList& lcpl = LinkCreatPropList::DEFAULT) - - (BMR - 2018/05/11 - HDFFV-10445) - - - -Support for New Platforms, Languages, and Compilers -=================================================== - - - Added support for Visual Studio 2017 w/ Intel Fortran 18 on Windows 10 x64. - - -Bug Fixes since HDF5-1.8.20 -=========================== - - - If an HDF5 file contains a filter pipeline message with a 'number of - filters' field that exceeds the maximum number of allowed filters, - the error handling code will attempt to dereference a NULL pointer. - - This issue was reported to The HDF Group as issue #CVE-2017-17505. - - NOTE: The HDF5 C library cannot produce such a file. This condition - should only occur in a corrupt (or deliberately altered) file - or a file created by third-party software. - - This problem arose because the error handling code assumed that - the 'number of filters' field implied that a dynamic array of that - size had already been created and that the cleanup code should - iterate over that array and clean up each element's resources. If - an error occurred before the array has been allocated, this will - not be true. - - This has been changed so that the number of filters is set to - zero on errors. Additionally, the filter array traversal in the - error handling code now requires that the filter array not be NULL. - - (DER - 2018/02/06, HDFFV-10354) - - - If an HDF5 file contains a filter pipeline message which contains - a 'number of filters' field that exceeds the actual number of - filters in the message, the HDF5 C library will read off the end of - the read buffer. - - This issue was reported to The HDF Group as issue #CVE-2017-17506. - - NOTE: The HDF5 C library cannot produce such a file. This condition - should only occur in a corrupt (or deliberately altered) file - or a file created by third-party software. - - The problem was fixed by passing the buffer size with the buffer - and ensuring that the pointer cannot be incremented off the end - of the buffer. A mismatch between the number of filters declared - and the actual number of filters will now invoke normal HDF5 - error handling. - - (DER - 2018/02/26, HDFFV-10355) - - - If an HDF5 file contains a malformed compound datatype with a - suitably large offset, the type conversion code can run off - the end of the type conversion buffer, causing a segmentation - fault. - - This issue was reported to The HDF Group as issue #CVE-2017-17507. - - NOTE: The HDF5 C library cannot produce such a file. This condition - should only occur in a corrupt (or deliberately altered) file - or a file created by third-party software. - - THE HDF GROUP WILL NOT FIX THIS BUG AT THIS TIME - - Fixing this problem would involve updating the publicly visible - H5T_conv_t function pointer typedef and versioning the API calls - which use it. We normally only modify the public API during - major releases, so this bug will not be fixed at this time. - - (DER - 2018/02/26, HDFFV-10356) - - - If an HDF5 file contains a malformed compound type which contains - a member of size zero, a division by zero error will occur while - processing the type. - - This issue was reported to The HDF Group as issue #CVE-2017-17508. - - NOTE: The HDF5 C library cannot produce such a file. This condition - should only occur in a corrupt (or deliberately altered) file - or a file created by third-party software. - - Checking for zero before dividing fixes the problem. Instead of the - division by zero, the normal HDF5 error handling is invoked. - - (DER - 2018/02/26, HDFFV-10357) - - - If an HDF5 file contains a malformed symbol table node that declares - it contains more symbols than it actually contains, the library - can run off the end of the metadata cache buffer while processing - the symbol table node. - - This issue was reported to The HDF Group as issue #CVE-2017-17509. - - NOTE: The HDF5 C library cannot produce such a file. This condition - should only occur in a corrupt (or deliberately altered) file - or a file created by third-party software. - - Performing bounds checks on the buffer while processing fixes the - problem. Instead of the segmentation fault, the normal HDF5 error - handling is invoked. - - (DER - 2018/03/12, HDFFV-10358) - - - Configuration - ------------- - - Library - - Moved the location of gcc attribute. - - The gcc attribute(no_sanitize), named as the macro HDF_NO_UBSAN, - was located after the function name. Builds with GCC 7 did not - indicate any problem, but GCC 8 issued errors. Moved the - attribute before the function name, as required. - - (ADB 2018/05/22, HDFFV-10473) - - - CMake - - Update CMake commands configuration. - - A number of improvements were made to the CMake commands. Most - changes simplify usage or eliminate unused constructs. Also, - some changes support better cross-platform support. - - (ADB - 2018/02/01, HDFFV-10398) - - - CMake - - Correct usage of CMAKE_BUILD_TYPE variable. - - The use of the CMAKE_BUILD_TYPE is incorrect for multi-config - generators (Visual Studio and XCode) and is optional for single - config generators. Created a new macro to check - GLOBAL PROPERTY -> GENERATOR_IS_MULTI_CONFIG - Created two new HDF variable, HDF_BUILD_TYPE and HDF_CFG_BUILD_TYPE. - Defaults for these variables is "Release". - - (ADB - 2018/01/10, HDFFV-10385) - - - CMake - - Add replacement of fortran flags if using static CRT. - - Added TARGET_STATIC_CRT_FLAGS call to HDFUseFortran.cmake file in - config/cmake_ext_mod folder. - - (ADB - 2018/01/08, HDFFV-10334) - - - Library - ------- - - Utility function can not handle lowercase Windows drive letters - - Added call to toupper function for drive letter. - - (ADB - 2017/12/18, HDFFV-10307) - - - Tools - ----- - - h5repack - - h5repack changes the chunk parameters when a change of layout is not - specified and a filter is applied. - - HDFFV-10297, HDFFV-10319 reworked code for h5repack and h5diff code - in the tools library. The check for an existing layout was incorrectly - placed into an if block and not executed. The check was moved into - the normal path of the function. - - (ADB - 2018/02/21, HDFFV-10412) - - - h5dump - - the tools library will hide the error stack during file open. - - While this is preferable almost always, there are reasons to enable - display of the error stack when a tool will not open a file. Adding an - optional argument to the --enable-error-stack will provide this use case. - As an optional argument it will not affect the operation of the - --enable-error-stack. h5dump is the only tool to implement this change. - - (ADB - 2018/02/15, HDFFV-10384) - - - h5dump - - h5dump would output an indented blank line in the filters section. - - h5dump overused the h5tools_simple_prefix function, which is a - function intended to account for the data index (x,y,z) option. - Removed the function call for header information. - - (ADB - 2018/01/25, HDFFV-10396) - - - h5repack - - h5repack incorrectly searched internal object table for name. - - h5repack would search the table of objects for a name, if the - name did not match it tried to determine if the name without a - leading slash would match. The logic was flawed! The table - stored names(paths) without a leading slash and did a strstr - of the table path to the name. - The assumption was that if there was a difference of one then - it was a match, however "pressure" would match "/pressure" as - well as "/pressure1", "/pressure2", etc. Changed logic to remove - any leading slash and then do a full compare of the name. - - (ADB - 2018/01/18, HDFFV-10393) - - - h5repack - - h5repack failed to handle command line parameters for customer filters. - - User defined filter parameter conversions would fail when integers - were represented on the command line with character strings - larger than 9 characters. Increased local variable array for storing - the current command line parameter to prevent buffer overflows. - - (ADB - 2018/01/17, HDFFV-10392) - - - h5diff - - h5diff seg faulted if comparing VL strings against fixed strings. - - Reworked solution for HDFFV-8625 and HDFFV-8639. Implemented the check - for string objects of same type in the diff_can_type function by - adding an if(tclass1 == H5T_STRING) block. This "if block" moves the - same check that was added for attributes to this function, which is - used by all object types. This function handles complex type structures. - Also added a new test file in h5diffgentest for testing this issue - and removed the temporary files used in the test scripts. - - (ADB - 2018/01/04, HDFFV-8745) - - - C++ API - ------- - - Removal of memory leaks - - A private function was inadvertently called, causing memory leaks. This - is now fixed. - - (BMR - 2018/04/12 - User reported in email) - - - Changes in exception classes - - Some exception classes are reorganized to reflect the HDF5 object - hierarchy and allow customization. - DataSetIException -> LocationException -> Exception - DataTypeIException -> LocationException -> Exception - GroupIException -> LocationException -> Exception - AttributeIException -> LocationException -> Exception - FileIException -> GroupIException -> LocationException -> Exception - Member functions in H5Location and H5Object now throw specific exceptions - associated with the invoking objects. - - (BMR - 2018/05/11) - - - H5Location::closeObjId is made static - (BMR - 2018/05/11) - - - H5A wrappers in H5Location are removed as they have been in H5Object. - (BMR - 2018/05/11) - - -Supported Platforms -=================== -The following platforms are supported and have been tested for this release. -They are built with the configure process unless specified otherwise. - - Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (platypus/mayll) Version 4.4.7 20120313 - Versions 4.9.3, 5.3.0, 6.2.0 - PGI C, Fortran, C++ for 64-bit target on - x86-64; - Version 17.10-0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 17.0.4.196 Build 20160721 - MPICH 3.1.4 compiled with GCC 4.9.3 - OpenMPI 2.0.1 compiled with GCC 4.9.3 - - Linux 2.6.32-573.18.1.el6 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-16) - #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-16) - (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-16) - IBM XL C/C++ V13.1 - IBM XL Fortran V15.1 - - Linux 3.10.0-327.10.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (kituo/moohan/jelly Version 4.8.5 20150623 (Red Hat 4.8.5-4) - Versions 4.9.3, 5.3.0, 6.2.0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 17.0.4.196 Build 20170411 - MPICH 3.1.4 compiled with GCC 4.9.3 - NAG Fortran Compiler Release 6.1(Tozai) Build 6116 - - SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc - (emu) Sun Fortran 95 8.6 SunOS_sparc - Sun C++ 5.12 SunOS_sparc - - Windows 7 Visual Studio 2015 w/ Intel Fortran 16 (cmake) - - Windows 7 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - Visual Studio 2015 w/ Intel Fortran 16 (cmake) - Visual Studio 2015 w/ Intel C, Fortran 2017 (cmake) - Visual Studio 2015 w/ MSMPI 8 (cmake) - - Windows 10 Visual Studio 2015 w/ Intel Fortran 16 (cmake) - - Windows 10 x64 Visual Studio 2015 w/ Intel Fortran 16 (cmake) - Visual Studio 2017 w/ Intel Fortran 18 (cmake) - - Mac OS X Mavericks 10.9.5 Apple LLVM version 6.0 (clang-600.0.57) - 64-bit gfortran GNU Fortran (GCC) 4.9.2 - (wren/quail) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X Yosemite 10.10.5 Apple LLVM version 6.1 (clang-602.0.53) - 64-bit gfortran GNU Fortran (GCC) 4.9.2 - (osx1010dev/osx1010test) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X El Capitan 10.11.6 Apple LLVM version 7.3.0 (clang-703.0.29) - 64-bit gfortran GNU Fortran (GCC) 5.2.0 - (VM osx1011dev/osx1011test) Intel icc/icpc/ifort version 16.0.2 - - Mac OS Sierra 10.12.6 Apple LLVM version 8.1 (clang-802.0.42) - 64-bit gfortran GNU Fortran (GCC) 7.1.0 - (kite) Intel icc/icpc/ifort version 17.0.2 - - -Tested Configuration Features Summary -===================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90/ F90 C++ zlib SZIP - parallel F2003 parallel -SunOS 5.11 32-bit n y/y n y y y -SunOS 5.11 64-bit n y/y n y y y -Windows 7 y y/y n y y y -Windows 7 x64 y y/y n y y y -Windows 7 Cygwin n y/n n y y y -Windows 7 x64 Cygwin n y/n n y y y -Windows 10 y y/y n y y y -Windows 10 x64 y y/y n y y y -Mac OS X Yosemite 10.10.5 64-bit n y/y n y y y -Mac OS X El Capitan 10.11.6 64-bit n y/y n y y y -Mac OS Sierra 10.12.6 64-bit n y/y n y y y -AIX 6.1 32- and 64-bit n y/n n y y y -CentOS 6.7 Linux 2.6.32 x86_64 GNU y y/y y y y y -CentOS 6.7 Linux 2.6.32 x86_64 Intel n y/y n y y y -CentOS 6.7 Linux 2.6.32 x86_64 PGI n y/y n y y y -CentOS 7.1 Linux 3.10.0 x86_64 GNU y y/y y y y y -CentOS 7.1 Linux 3.10.0 x86_64 Intel n y/y n y y y -Linux 2.6.32-573.18.1.el6.ppc64 n y/n n y y y - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -SunOS 5.11 32-bit y y y y -SunOS 5.11 64-bit y y y y -Windows 7 y y y y -Windows 7 x64 y y y y -Windows 7 Cygwin n n n y -Windows 7 x64 Cygwin n n n y -Windows 10 y y y y -Windows 10 x64 y y y y -Mac OS X Yosemite 10.10.5 64-bit y n y y -Mac OS X El Capitan 10.11.6 64-bit y n y y -Mac OS Sierra 10.12.6 64-bit y n y y -AIX 6.1 32- and 64-bit y n n y -CentOS 6.7 Linux 2.6.32 x86_64 GNU y y y y -CentOS 6.7 Linux 2.6.32 x86_64 Intel y y y y -CentOS 6.7 Linux 2.6.32 x86_64 PGI y y y y -CentOS 7.1 Linux 3.10.0 x86_64 GNU y y y y -CentOS 7.1 Linux 3.10.0 x86_64 Intel y y y y -Linux 2.6.32-573.18.1.el6.ppc64 y y y y - -Compiler versions for each platform are listed in the preceding -"Supported Platforms" table. - - -More Tested Platforms -===================== -The following platforms are not supported but have been tested for this release. - - Linux 2.6.32-573.22.1.el6 g95 (GCC 4.0.3 (g95 0.94!) - #1 SMP x86_64 GNU/Linux - (mayll) - - Debian8.4.0 3.16.0-4-amd64 #1 SMP Debian 3.16.36-1 x86_64 GNU/Linux - gcc (Debian 4.9.2-10) 4.9.2 - GNU Fortran (Debian 4.9.2-10) 4.9.2 - (cmake and autotools) - - Fedora24 4.7.2-201.fc24.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc (GCC) 6.1.1 20160621 (Red Hat 6.1.1-3) - GNU Fortran (GCC) 6.1.1 20160621 (Red Hat 6.1.1-3) - (cmake and autotools) - - CentOS 7.2 3.10.0-327.28.2.el7.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc (GCC) 4.8.5 20150623 (Red Hat 4.8.5-4) - GNU Fortran (GCC) 4.8.5 20150623 (Red Hat 4.8.5-4) - (cmake and autotools) - - Ubuntu 16.04 4.4.0-38-generic #62-Ubuntu SMP x86_64 GNU/Linux - gcc (Ubuntu 5.4.0-6ubuntu1~16.04.2) 5.4.0 - GNU Fortran (Ubuntu 5.4.0-6ubuntu1~16.04.2) 5.4.0 - (cmake and autotools) - - -Known Problems -============== - - The dynamically loaded plugin test libraries require undefined references - to HDF5 functions to be resolved at runtime in order to function properly. - With autotools on CYGWIN this results in build errors, and we have not - found a solution that satisfies both. Therefore the dynamically loaded - plugin tests have been disabled on CYGWIN. - - Mac OS X 10.13 added additional subdirectory structure in .libs for shared - libraries. Consequently "make check" will fail testing java and dynamically - loaded plugin test libraries attempting to copy files from the previous - locations in .libs directories. This will be addressed in the next release - when support for the Mac OS X 10.13 platform is added. - - Known problems in previous releases can be found in the HISTORY*.txt files - in the HDF5 source. Please report any new problems found to - help@hdfgroup.org. - - -%%%%1.8.20%%%% - - -HDF5 version 1.8.20 released on 2017-11-28 -================================================================================ - -INTRODUCTION -============ - -This document describes the differences between HDF5-1.8.19 and -HDF5-1.8.20, and contains information on the platforms tested and -known problems in HDF5-1.8.20. -For more details, see the files HISTORY-1_0-1_8_0_rc3.txt -and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source. - -Links to the HDF5 1.8.20 source code, documentation, and additional materials -can be found on the HDF5 web page at: - - https://support.hdfgroup.org/HDF5/ - -The HDF5 1.8.20 release can be obtained from: - - https://support.hdfgroup.org/HDF5/release/obtain518.html - -User documentation for 1.8.20 can be accessed directly at this location: - - https://support.hdfgroup.org/HDF5/doc1.8/ - -New features in the HDF5-1.8.x release series, including brief general -descriptions of some new and modified APIs, are described in the "What's New -in 1.8.0?" document: - - https://support.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html - -All new and modified APIs are listed in detail in the "HDF5 Software Changes -from Release to Release" document, in the section "Release 1.8.20 (current -release) versus Release 1.8.19 - - https://support.hdfgroup.org/HDF5/doc1.8/ADGuide/Changes.html - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS -======== - -- New Features -- Support for New Platforms, Languages, and Compilers -- Bug Fixes since HDF5-1.8.20 -- Supported Platforms -- Supported Configuration Features Summary -- More Tested Platforms -- Known Problems - - -New Features -============ - - Tools - ----- - - h5diff - - h5diff has new option enable-error-stack. - - Updated h5diff with the --enable-error-stack argument, which - enables the display of the hdf5 error stack. This completes the - improvement to the main tools; h5copy, h5diff, h5dump, h5ls and - h5repack. - - (ADB - 2017/08/30, HDFFV-9774) - - - C++ API - ------- - - The following C++ API wrappers have been added to the C++ Library: - - // Creates a binary object description of this datatype. - void DataType::encode() - C API H5Tencode() - - // Returns the decoded type from the binary object description. - DataType::decode() - C API H5Tdecode() - ArrayType::decode() - C API H5Tdecode() - CompType::decode() - C API H5Tdecode() - DataType::decode() - C API H5Tdecode() - EnumType::decode() - C API H5Tdecode() - FloatType::decode() - C API H5Tdecode() - IntType::decode() - C API H5Tdecode() - StrType::decode() - C API H5Tdecode() - VarLenType::decode() - C API H5Tdecode() - - // Three overloaded functions to retrieve information about an object - H5Location::getObjectInfo() - H5Oget_info()/H5Oget_info_by_name() - - (BMR - 2017/10/17, HDFFV-10175) - - - New constructors to open existing datatypes added in ArrayType, - CompType, DataType, EnumType, FloatType, IntType, StrType, and - VarLenType. - - (BMR - 2017/10/17, HDFFV-10175) - - - A document is added to the HDF5 C++ API Reference Manual to show the - mapping from a C API to C++ wrappers. It can be found from the main - page of the C++ API Reference Manual. - - (BMR - 2017/10/17, HDFFV-10151) - - - High-Level APIs - --------------- - - H5DOread_chunk - - Users wanted to read compressed data directly from a file without any - processing by the HDF5 data transfer pipeline, just as they were able - to write it directly to a file with H5DOwrite_chunk. - - New API function, corresponding to existing function H5DOwrite_chunk. - H5DOread_chunk reads a raw data chunk directly from a chunked dataset - in the file into the application buffer, bypassing the library’s internal - data transfer pipeline, including filters. - - (VC - 2017/05/02, HDFFV-9934) - - -Support for New Platforms, Languages, and Compilers -=================================================== - - - Added NAG compiler - - -Bug Fixes since HDF5-1.8.19 -=========================== - - Configuration - ------------- - - cmake - - The hdf5 library used shared szip and zlib, which needlessly required - applications to link with the same szip and zlib libraries. - - Changed the target_link_libraries commands to use the static libs. - Removed improper link duplication of szip and zlib. - Adjusted the link dependencies and the link interface values of - the target_link_libraries commands. - - (ADB - 2017/11/14, HDFFV-10329) - - - cmake MPI - - CMake implementation for MPI was problematic and would create incorrect - MPI library references in the hdf5 libraries. - - Reworked the CMake MPI code to properly create CMake targets.Also merged - the latest CMake FindMPI.cmake changes to the local copy. This is necessary - until HDF changes the CMake minimum to 3.9 or greater. - - (ADB - 2017/11/02, HDFFV-10321) - - - - Fixed Fortran linker flags when using the NAG Fortran compiler (autotools). - - (HDFFV-10037, MSB, 2017/10/21) - - - cmake - - Too many commands for POST_BUILD step caused command line to be - too big on windows. - - Changed foreach of copy command to use a custom command with the - use of the HDFTEST_COPY_FILE macro. - - (ADB - 2017/07/12, HDFFV-10254) - - - Library - ------- - - filter plugin handling in H5PL.c and H5Z.c - - It was discovered that the dynamic loading process used by - filter plugins had issues with library dependencies. - - CMake build process changed to use LINK INTERFACE keywords, which - allowed HDF5 C library to make dependent libraries private. The - filter plugin libraries no longer require dependent libraries - (such as szip or zlib) to be available. - (ADB - 2017/11/16, HDFFV-10328) - - - Fix rare object header corruption bug - - In certain cases, such as when converting large attributes to dense - storage, an error could occur which would either fail an assertion or - cause file corruption. Fixed and added test. - - (NAF - 2017/11/14, HDFFV-10274) - - - H5Zfilter_avail in H5Z.c - - The public function checked for plugins, while the private - function did not. - - Modified H5Zfilter_avail and private function, H5Z_filter_avail. - Moved check for plugin from public to private function. Updated - H5P__set_filter due to change in H5Z_filter_avail. Updated tests. - - (ADB - 2017/10/10, HDFFV-10297, HDFFV-10319) - - - Fix H5Sencode bug when num points selected is >2^32 - - Modified to fail if the 32 bit limit is exceeded when encoding either - offsets or counts in the selection. - - (HDFFV-10323, VC, 2017/09/07) - - - Fix H5HL_offset_into() - - (1) Fix H5HL_offset_into() to return error when offset exceeds heap data - block size. - (2) Fix other places in the library that call this routine to detect - error routine. - - (HDFFV-10216, VC, 2017/09/05) - - - Tools - ----- - - h5repack - - h5repack failed to copy a dataset with existing filter. - - Reworked code for h5repack and h5diff code in tools library. Added - improved error handling, cleanup of resources and checks of calls. - Modified H5Zfilter_avail and private function, H5Z_filter_avail. - Moved check for plugin from public to private function. Updated - H5P__set_filter due to change in H5Z_filter_avail. Updated tests. - Note, h5repack output display has changed to clarify the individual - steps of the repack process. The output indicates if an operation - applies to all objects. Lines with notation and no information - have been removed. - - (ADB - 2017/10/10, HDFFV-10297, HDFFV-10319) - - - h5repack - - h5repack always set the User Defined filter flag to H5Z_FLAG_MANDATORY. - - Added another parameter to the 'UD=' option to set the flag by default - to '0' or H5Z_FLAG_MANDATORY, the other choice is '1' or H5Z_FLAG_OPTIONAL. - - (ADB - 2017/08/31, HDFFV-10269) - - - h5ls - - h5ls generated error on stack when it encountered a H5S_NULL - dataspace. - - Adding checks for H5S_NULL before calling H5Sis_simple (located - in the h5tools_dump_mem function) fixed the issue. - - (ADB - 2017/08/17, HDFFV-10188) - - - h5dump - - h5dump segfaulted on output of XML file. - - Function that escape'd strings used the full buffer length - instead of just the length of the replacement string in a - strncpy call. Using the correct length fixed the issue. - - (ADB - 2017/08/01, HDFFV-10256) - - - h5diff - - h5diff segfaulted on compare of a NULL variable length string. - - Improved h5diff compare of strings by adding a check for - NULL strings and setting the lengths to zero. - - (ADB - 2017/07/25, HDFFV-10246) - - - h5import - - h5import crashed trying to import data from a subset of a dataset. - - Improved h5import by adding the SUBSET keyword. h5import understands - to use the Count times the Block as the size of the dimensions. - Added INPUT_B_ORDER keyword to old-style configuration files. - The import from h5dump function expects the binary files to use native - types (FILE '-b' option) in the binary file. - - (ADB - 2017/06/15, HDFFV-10219) - - - C++ API - ------- - - Marked the following functions deprecated because they were moved to - class H5Object: - H5Location::createAttribute() - H5Location::openAttribute() - H5Location::attrExists() - H5Location::removeAttr() - H5Location::renameAttr() - H5Location::getNumAttrs() - - (BMR - 2017/10/17) - - -Supported Platforms -=================== -The following platforms are supported and have been tested for this release. -They are built with the configure process unless specified otherwise. - - Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (platypus/mayll) Version 4.4.7 20120313 - Versions 4.9.3, 5.3.0, 6.2.0 - PGI C, Fortran, C++ for 64-bit target on - x86-64; - Version 16.10-0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 17.0.0.196 Build 20160721 - MPICH 3.1.4 compiled with GCC 4.9.3 - OpenMPI 2.0.1 compiled with GCC 4.9.3 - - Linux 2.6.32-573.18.1.el6 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-16) - #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-16) - (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-16) - IBM XL C/C++ V13.1 - IBM XL Fortran V15.1 - - Linux 3.10.0-327.10.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (kituo/moohan/jelly Version 4.8.5 20150623 (Red Hat 4.8.5-4) - Versions 4.9.3, 5.3.0, 6.2.0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 17.0.4.196 Build 20170411 - MPICH 3.1.4 compiled with GCC 4.9.3 - NAG Fortran Compiler Release 6.1(Tozai) Build 6116 - - SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc - (emu) Sun Fortran 95 8.6 SunOS_sparc - Sun C++ 5.12 SunOS_sparc - - Windows 7 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - Visual Studio 2015 w/ Intel Fortran 16 (cmake) - - Windows 7 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - Visual Studio 2015 w/ Intel Fortran 16 (cmake) - Visual Studio 2015 w/ Intel C, Fortran 2017 (cmake) - Visual Studio 2015 w/ MSMPI 8 (cmake) - Cygwin(CYGWIN_NT-6.1 2.8.0(0.309/5/3) - gcc and gfortran compilers (GCC 5.4.0) - (cmake and autotools) - - Windows 10 Visual Studio 2015 w/ Intel Fortran 16 (cmake) - Cygwin(CYGWIN_NT-6.1 2.8.0(0.309/5/3) - gcc and gfortran compilers (GCC 5.4.0) - (cmake and autotools) - - Windows 10 x64 Visual Studio 2015 w/ Intel Fortran 16 (cmake) - - Mac OS X Mavericks 10.9.5 Apple LLVM version 6.0 (clang-600.0.57) - 64-bit gfortran GNU Fortran (GCC) 4.9.2 - (wren/quail) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X Yosemite 10.10.5 Apple LLVM version 6.1 (clang-602.0.53) - 64-bit gfortran GNU Fortran (GCC) 4.9.2 - (osx1010dev/osx1010test) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X El Capitan 10.11.6 Apple LLVM version 7.3.0 (clang-703.0.29) - 64-bit gfortran GNU Fortran (GCC) 5.2.0 - (VM osx1011dev/osx1011test) Intel icc/icpc/ifort version 16.0.2 - - Mac OS Sierra 10.12.6 Apple LLVM version 8.1 (clang-802.0.42) - 64-bit gfortran GNU Fortran (GCC) 7.1.0 - (kite) Intel icc/icpc/ifort version 17.0.2 - -Tested Configuration Features Summary -===================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90/ F90 C++ zlib SZIP - parallel F2003 parallel -SunOS 5.11 32-bit n y/y n y y y -SunOS 5.11 64-bit n y/y n y y y -Windows 7 y y/y n y y y -Windows 7 x64 y y/y n y y y -Windows 7 Cygwin n y/n n y y y -Windows 7 x64 Cygwin n y/n n y y y -Windows 10 y y/y n y y y -Windows 10 x64 y y/y n y y y -Mac OS X Mavericks 10.9.5 64-bit n y/y n y y y -Mac OS X Yosemite 10.10.5 64-bit n y/y n y y y -Mac OS X El Capitan 10.11.6 64-bit n y/y n y y y -Mac OS Sierra 10.12.6 64-bit n y/y n y y y -AIX 6.1 32- and 64-bit n y/n n y y y -CentOS 6.7 Linux 2.6.32 x86_64 GNU y y/y y y y y -CentOS 6.7 Linux 2.6.32 x86_64 Intel n y/y n y y y -CentOS 6.7 Linux 2.6.32 x86_64 PGI n y/y n y y y -CentOS 7.1 Linux 3.10.0 x86_64 GNU y y/y y y y y -CentOS 7.1 Linux 3.10.0 x86_64 Intel n y/y n y y y -Linux 2.6.32-431.11.2.el6.ppc64 n y/n n y y y - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -SunOS 5.11 32-bit y y y y -SunOS 5.11 64-bit y y y y -Windows 7 y y y y -Windows 7 x64 y y y y -Windows 7 Cygwin n n n y -Windows 7 x64 Cygwin n n n y -Windows 10 y y y y -Windows 10 x64 y y y y -Mac OS X Mavericks 10.9.5 64-bit y n y y -Mac OS X Yosemite 10.10.5 64-bit y n y y -Mac OS X El Capitan 10.11.6 64-bit y n y y -Mac OS Sierra 10.12.6 64-bit y n y y -AIX 6.1 32- and 64-bit y n n y -CentOS 6.7 Linux 2.6.32 x86_64 GNU y y y y -CentOS 6.7 Linux 2.6.32 x86_64 Intel y y y y -CentOS 6.7 Linux 2.6.32 x86_64 PGI y y y y -CentOS 7.1 Linux 3.10.0 x86_64 GNU y y y y -CentOS 7.1 Linux 3.10.0 x86_64 Intel y y y y -Linux 2.6.32-431.11.2.el6.ppc64 y y y y - -Compiler versions for each platform are listed in the preceding -"Supported Platforms" table. - - -More Tested Platforms -===================== -The following platforms are not supported but have been tested for this release. - - Linux 2.6.32-573.22.1.el6 g95 (GCC 4.0.3 (g95 0.94!) - #1 SMP x86_64 GNU/Linux - (mayll) - - Debian8.4.0 3.16.0-4-amd64 #1 SMP Debian 3.16.36-1 x86_64 GNU/Linux - gcc (Debian 4.9.2-10) 4.9.2 - GNU Fortran (Debian 4.9.2-10) 4.9.2 - (cmake and autotools) - - Fedora24 4.7.2-201.fc24.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc (GCC) 6.1.1 20160621 (Red Hat 6.1.1-3) - GNU Fortran (GCC) 6.1.1 20160621 (Red Hat 6.1.1-3) - (cmake and autotools) - - CentOS 7.2 3.10.0-327.28.2.el7.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc (GCC) 4.8.5 20150623 (Red Hat 4.8.5-4) - GNU Fortran (GCC) 4.8.5 20150623 (Red Hat 4.8.5-4) - (cmake and autotools) - - Ubuntu 16.04 4.4.0-38-generic #62-Ubuntu SMP x86_64 GNU/Linux - gcc (Ubuntu 5.4.0-6ubuntu1~16.04.2) 5.4.0 - GNU Fortran (Ubuntu 5.4.0-6ubuntu1~16.04.2) 5.4.0 - (cmake and autotools) - - -Known Problems -============== - - The dynamically loaded plugin test libraries require undefined references - to HDF5 functions to be resolved at runtime in order to function properly. - With autotools on CYGWIN this results in build errors, and we have not - found a solution that satisfies both. Therefore the dynamically loaded - plugin tests have been disabled on CYGWIN. - - Mac OS X 10.13 added additional subdirectory structure in .libs for shared - libraries. Consequently "make check" will fail testing java and dynamically - loaded plugin test libraries attempting to copy files from the previous - locations in .libs directories. This will be addressed in the next release - when support for the Mac OS X 10.13 platform is added. - - Known problems in previous releases can be found in the HISTORY*.txt files - in the HDF5 source. Please report any new problems found to - help@hdfgroup.org. - - -%%%%1.8.19%%%% - - -HDF5 version 1.8.19 released on 2017-06-15 -================================================================================ - -INTRODUCTION -============ - -This document describes the differences between HDF5-1.8.18 and -HDF5-1.8.19, and contains information on the platforms tested and -known problems in HDF5-1.8.19. -For more details, see the files HISTORY-1_0-1_8_0_rc3.txt -and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source. - -Links to the HDF5 1.8.19 source code, documentation, and additional materials -can be found on the HDF5 web page at: - - https://support.hdfgroup.org/HDF5/ - -The HDF5 1.8.19 release can be obtained from: - - https://support.hdfgroup.org/HDF5/release/obtain518.html - -User documentation for 1.8.19 can be accessed directly at this location: - - https://support.hdfgroup.org/HDF5/doc1.8/ - -New features in the HDF5-1.8.x release series, including brief general -descriptions of some new and modified APIs, are described in the "What's New -in 1.8.0?" document: - - https://support.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html - -All new and modified APIs are listed in detail in the "HDF5 Software Changes -from Release to Release" document, in the section "Release 1.8.19 (current -release) versus Release 1.8.18 - - https://support.hdfgroup.org/HDF5/doc1.8/ADGuide/Changes.html - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS -======== - -- New Features -- Support for New Platforms, Languages, and Compilers -- Bug Fixes since HDF5-1.8.19 -- Supported Platforms -- Supported Configuration Features Summary -- More Tested Platforms -- Known Problems - - -New Features -============ - - Configuration - -------------- - - CMake in the Tools - - User-defined filters on Windows require that tools be built - with shared libraries. - - CMake command code added to build tools with - shared libraries as well as with static libraries. - - (ADB - 2017/02/07, HDFFV-10123) - - - Library - ------- - - H5PL package - - Users would like to be able to set the plugin path programmatically - instead of through the environment variable. - - H5PL package obtained new APIs for manipulating the entries of - the plugin path table. The new APIs are; - H5PLappend - Insert a plugin path at the end of the list. - H5PLprepend - Insert a plugin path at the beginning of the list. - H5PLreplace - Replace the path at the specified index. - H5PLinsert - Insert a plugin path at the specified index, moving - other paths after the index. - H5PLremove - Remove the plugin path at the specified index and - compacting the list. - H5PLget - Query the plugin path at the specified index. - H5PLsize - Query the size of the current list of plugin paths. - - (ADB - 2017/04/04, HDFFV-10143) - - - H5Dget_chunk_storage_size - - The storage size of a chunk in the file is needed to determine the size - of the buffer to be allocated for reading a chunk directly from a file. - - New API function gets the size in bytes currently allocated within a - file for a raw data chunk in a dataset. This function was added to get - the chunk size in support of the implementation of H5DOread_chunks, but - may also be useful for other purposes. - - (VC - 2017/05/02, HDFFV-9934) - - C++ API - ------- - - The following C++ API wrappers have been added to the C++ Library: - // Determines if a name exists. - H5Location::nameExists() - C API H5Lexists() - - // Checks if an ID is valid. - IdComponent::isValid() - C API H5Iis_valid() - - // Sets the number of soft or user-defined links that can be - // traversed before a failure occurs. - LinkAccPropList::setNumLinks() - C API H5Pset_nlinks() - - // Gets the number of soft or user-defined link traversals allowed - LinkAccPropList::getNumLinks() - C API H5Pget_nlinks() - - // Returns a copy of the creation property list of a datatype. - DataType::getCreatePlist() - C API H5Tget_create_plist() - - // Opens an object within a location, regardless its type. - H5Location::openObjId() - C API H5Oopen() - H5Location::openObjId() - C API H5Oopen() - - // Closes an object opened by openObjId(). - H5Location::closeObjId() - C API H5Oclose() - - // Gets general information about a file. - H5File::getFileInfo() - C API H5Fget_info() - - // Returns the header version of an HDF5 object. - H5Object::objVersion() - C API H5Oget_info() - - (BMR, 2017/05/13, HDFFV-10004, HDFFV-10156) - - - New class LinkAccPropList is added for link access property list - - (BMR, 2017/05/13, HDFFV-10156) - - High-Level APIs - --------------- - - H5DOread_chunk - - Users wanted to read compressed data directly from a file without any - processing by the HDF5 data transfer pipeline, just as they were able - to write it directly to a file with H5DOwrite_chunk. - - New API function, corresponding to existing function H5DOwrite_chunk. - H5DOread_chunk reads a raw data chunk directly from a chunked dataset - in the file into the application buffer, bypassing the library’s internal - data transfer pipeline, including filters. - - (VC - 2017/05/02, HDFFV-9934) - - -Support for New Platforms, Languages, and Compilers -=================================================== - - - Added OpenMPI 2.0.1 compiled with GCC 4.9.3 - - -Bug Fixes since HDF5-1.8.18 -=========================== - - Configuration - ------------- - - Support for Fortran shared libraries on OS X with autotools now - works. Cmake builds still disables Fortran shared libraries on OS X. - (MSB - 2017/04/30, HDFFV-2772) - - - Library - ------- - - bitfield datatypes - - bitfield datatypes did not fully support endianness of the data. - - Improved the endianness of bitfield datatypes by adding missing functional - code. This code used integer types as a template. - - (ADB - 2017/05/12, HDFFV-10186) - - - Newly created datasets with H5D_COMPACT layout failed to open after - several H5Dopen/H5Dclose cycles. - - The layout "dirty" flag for a compact dataset is now properly reset - before flushing the message. - - (VC - 2017/05/11, HDFFV-10051) - - Missing #ifdef __cplusplus macros were added to the generated H5Epubgen.h file. - - (DER - 2017/04/25, HDFFV-9638) - - - Tools - ----- - - h5repack - - h5repack did not maintain the creation order flag of the root - group. - - Improved h5repack by reading the creation order and applying the - flag to the new root group. Also added arguments to set the - order and index direction, which applies to the traversing of the - original file, on the command line. - - (ADB - 2017/05/26, HDFFV-8611) - - - h5diff - - h5diff failed to account for strpad type and null terminators - of char strings. Also, h5diff failed to account for string length - differences and would give a different result depending on file - order in the command line. - - Improved h5diff compare of strings and arrays by adding a check for - string lengths and if the strpad was null filled. - - (ADB - 2017/05/18, HDFFV-9055, HDFFV-10128) - - - h5diff - - h5diff help text about epsilon comparison was confusing. - - Changed h5diff help text to indicate that the 'a' refers to the - datapoint in file1 and 'b' refers to the datapoint value in file2. - - (ADB - 2017/05/16, HDFFV-9995) - - - h5diff - - h5diff did not report user-defined filter data differences correctly. - - Improved h5diff compare of user-defined filter data by reporting an - error if the user-defined filter plugin cannot be found. - - (ADB - 2017/01/18, HDFFV-9994) - C++ API - ------- - - The class hierarchy is revised to better reflect the HDF5 model. - Class H5File is now derived from class Group instead of H5Location. - Class Attribute is now derived from H5Location instead of IdComponent. - Wrappers of H5A APIs in H5Location are now duplicated in H5Object, - the original wrappers in H5Location will be deprecated in future - releases. - - (BMR - 2017/05/15, HDFFV-10156) - - - -Supported Platforms -=================== -The following platforms are supported and have been tested for this release. -They are built with the configure process unless specified otherwise. - - Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (platypus/mayll) Version 4.4.7 20120313 - Versions 4.9.3, 5.2.0, 6.2.0 - PGI C, Fortran, C++ for 64-bit target on - x86-64; - Version 16.10-0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 17.0.0.098 Build 20160721 - MPICH 3.1.4 compiled with GCC 4.9.3 - OpenMPI 2.0.1 compiled with GCC 4.9.3 - - Linux 2.6.32-573.18.1.el6 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-16) - #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-16) - (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-16) - IBM XL C/C++ V13.1 - IBM XL Fortran V15.1 - - Linux 3.10.0-327.10.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (kituo/moohan) Version 4.8.5 20150623 (Red Hat 4.8.5-4) - Versions 4.9.3, 5.3.0, 6.2.0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 17.0.4.196 Build 20170411 - MPICH 3.1.4 compiled with GCC 4.9.3 - - SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc - (emu) Sun Fortran 95 8.6 SunOS_sparc - Sun C++ 5.12 SunOS_sparc - Windows 7 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - Visual Studio 2015 w/ Intel Fortran 16 (cmake) - - Windows 7 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - Visual Studio 2015 w/ Intel Fortran 16 (cmake) - Visual Studio 2015 w/ Intel C, Fortran 2017 (cmake) - Visual Studio 2015 w/ MSMPI 8 (cmake) - Cygwin(CYGWIN_NT-6.1 2.8.0(0.309/5/3) - gcc and gfortran compilers (GCC 5.4.0) - (cmake and autotools) - - Windows 10 Visual Studio 2015 w/ Intel Fortran 16 (cmake) - Cygwin(CYGWIN_NT-6.1 2.8.0(0.309/5/3) - gcc and gfortran compilers (GCC 5.4.0) - (cmake and autotools) - - Windows 10 x64 Visual Studio 2015 w/ Intel Fortran 16 (cmake) - - Mac OS X Mt. Lion 10.8.5 Apple LLVM version 5.1 (clang-503.0.40) - 64-bit gfortran GNU Fortran (GCC) 4.8.2 - (swallow/kite) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X Mavericks 10.9.5 Apple LLVM version 6.0 (clang-600.0.57) - 64-bit gfortran GNU Fortran (GCC) 4.9.2 - (wren/quail) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X Yosemite 10.10.5 Apple LLVM version 6.1 (clang-602.0.53) - 64-bit gfortran GNU Fortran (GCC) 4.9.2 - (osx1010dev/osx1010test) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X El Capitan 10.11.4 Apple LLVM version 7.3.0 (clang-703.0.29) - 64-bit gfortran GNU Fortran (GCC) 5.2.0 - (VM osx1011dev/osx1011test) Intel icc/icpc/ifort version 16.0.2 - - -Tested Configuration Features Summary -===================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform -Platform C F90/ F90 C++ zlib SZIP - parallel F2003 parallel -SunOS 5.11 32-bit n y/y n y y y -SunOS 5.11 64-bit n y/y n y y y -Windows 7 y y/y n y y y -Windows 7 x64 y y/y n y y y -Windows 7 Cygwin n y/n n y y y -Windows 7 x64 Cygwin n y/n n y y y -Windows 10 y y/y n y y y -Windows 10 x64 y y/y n y y y -Mac OS X Mountain Lion 10.8.5 64-bit n y/y n y y y -Mac OS X Mavericks 10.9.5 64-bit n y/y n y y y -Mac OS X Yosemite 10.10.5 64-bit n y/y n y y y -AIX 6.1 32- and 64-bit n y/n n y y y -CentOS 6.7 Linux 2.6.32 x86_64 GNU y y/y y y y y -CentOS 6.7 Linux 2.6.32 x86_64 Intel n y/y n y y y -CentOS 6.7 Linux 2.6.32 x86_64 PGI n y/y n y y y -CentOS 7.1 Linux 3.10.0 x86_64 GNU y y/y y y y y -CentOS 7.1 Linux 3.10.0 x86_64 Intel n y/y n y y y -Linux 2.6.32-431.11.2.el6.ppc64 n y/n n y y y - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -SunOS 5.11 32-bit y y y y -SunOS 5.11 64-bit y y y y -Windows 7 y y y y -Windows 7 x64 y y y y -Windows 7 Cygwin n n n y -Windows 7 x64 Cygwin n n n y -Windows 10 y y y y -Windows 10 x64 y y y y -Mac OS X Mountain Lion 10.8.5 64-bit y n y y -Mac OS X Mavericks 10.9.5 64-bit y n y y -Mac OS X Yosemite 10.10.5 64-bit y n y y -AIX 6.1 32- and 64-bit y n n y -CentOS 6.7 Linux 2.6.32 x86_64 GNU y y y y -CentOS 6.7 Linux 2.6.32 x86_64 Intel y y y y -CentOS 6.7 Linux 2.6.32 x86_64 PGI y y y y -CentOS 7.1 Linux 3.10.0 x86_64 GNU y y y y -CentOS 7.1 Linux 3.10.0 x86_64 Intel y y y y -Linux 2.6.32-431.11.2.el6.ppc64 y y y y - -Compiler versions for each platform are listed in the preceding -"Supported Platforms" table. - - -More Tested Platforms -===================== -The following platforms are not supported but have been tested for this release. - - Linux 2.6.32-573.22.1.el6 g95 (GCC 4.0.3 (g95 0.94!) - #1 SMP x86_64 GNU/Linux - (mayll) - - Debian8.4.0 3.16.0-4-amd64 #1 SMP Debian 3.16.36-1 x86_64 GNU/Linux - gcc (Debian 4.9.2-10) 4.9.2 - GNU Fortran (Debian 4.9.2-10) 4.9.2 - (cmake and autotools) - - Fedora24 4.7.2-201.fc24.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc (GCC) 6.1.1 20160621 (Red Hat 6.1.1-3) - GNU Fortran (GCC) 6.1.1 20160621 (Red Hat 6.1.1-3) - (cmake and autotools) - - CentOS 7.2 3.10.0-327.28.2.el7.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc (GCC) 4.8.5 20150623 (Red Hat 4.8.5-4) - GNU Fortran (GCC) 4.8.5 20150623 (Red Hat 4.8.5-4) - (cmake and autotools) - - Ubuntu 16.04 4.4.0-38-generic #62-Ubuntu SMP x86_64 GNU/Linux - gcc (Ubuntu 5.4.0-6ubuntu1~16.04.2) 5.4.0 - GNU Fortran (Ubuntu 5.4.0-6ubuntu1~16.04.2) 5.4.0 - (cmake and autotools) - - -Known Problems -============== - - The dynamically loaded plugin test libraries require undefined references - to HDF5 functions to be resolved at runtime in order to function properly. - With autotools on CYGWIN this results in build errors, and we have not - found a solution that satisfies both. Therefore the dynamically loaded - plugin tests have been disabled on CYGWIN. - - Known problems in previous releases can be found in the HISTORY*.txt files - in the HDF5 source. Please report any new problems found to - help@hdfgroup.org. - - -%%%%1.8.18%%%% - - -HDF5 version 1.8.18 released on 2016-11-14 -================================================================================ - -INTRODUCTION -============ - -This document describes the differences between HDF5-1.8.17 and -HDF5-1.8.18, and contains information on the platforms tested and -known problems in HDF5-1.8.18. -For more details, see the files HISTORY-1_0-1_8_0_rc3.txt -and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source. - -Links to the HDF5 1.8.18 source code, documentation, and additional materials -can be found on the HDF5 web page at: - - https://support.hdfgroup.org/HDF5/ - -The HDF5 1.8.18 release can be obtained from: - - https://support.hdfgroup.org/HDF5/release/obtain518.html - -User documentation for 1.8.18 can be accessed directly at this location: - - https://support.hdfgroup.org/HDF5/doc1.8/ - -New features in the HDF5-1.8.x release series, including brief general -descriptions of some new and modified APIs, are described in the "What's New -in 1.8.0?" document: - - https://support.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html - -All new and modified APIs are listed in detail in the "HDF5 Software Changes -from Release to Release" document, in the section "Release 1.8.18 (current -release) versus Release 1.8.17 - - https://support.hdfgroup.org/HDF5/doc1.8/ADGuide/Changes.html - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS -======== - -- New Features -- Support for New Platforms, Languages, and Compilers -- Bug Fixes since HDF5-1.8.17 -- Supported Platforms -- Supported Configuration Features Summary -- More Tested Platforms -- Known Problems - - -New Features -============ - - Configuration - ------------- - - - CMake: Added NAMESPACE hdf5:: to package configuration files to allow - projects using installed HDF5 binaries built with CMake to link with - them without specifying the HDF5 library location via IMPORTED_LOCATION. - - (ADB, 2016/10/17, HDFFV-10003) - - - - CMake: Changed the CTEST_BUILD_CONFIGURATION option to - CTEST_CONFIGURATION_TYPE as recommended by the CMake documentation. - - (ADB, 2016/10/17, HDFFV-9971) - - - CMake: Added support for GIT - - (ADB, 2016/07/12) - - - Library - ------- - - None - - - Parallel Library - ---------------- - - None - - - Tools - ----- - - None - - - High-Level APIs - --------------- - - None - - - Fortran API - ----------- - - None - - - C++ API - ------- - - None - - -Support for New Platforms, Languages, and Compilers -=================================================== - - - -Bug Fixes since HDF5-1.8.17 -=========================== - - Configuration - ------------- - - - Fixed a problem preventing HDF5 to be built on 32-bit CYGWIN by - condensing cygwin configuration files into a single file and - removing outdated compiler settings. - - (ABD, 2016/07/12, HDFFV-9946) - - - - CMake: Fixed a command length overflow error by converting custom - commands inside CMakeTest.cmake files into regular dependencies and - targets. - - (ABD, 2016/07/12, HDFFV-9939) - - - - CMake: Fixed a timeout error that would occasionally occur when running - the virtual file driver tests simultaneously due to test directory and file - name collisions. - - (ABD, 2016/09/19, HDFFV-9431) - - - Library - ------- - - - Fixed a memory leak that would occur when the library allocated memory - for an external file prefix (H5Pset_efile_prefix) and failed to free it. - - (DER, 2016/04/29) - - - - Fixed an error that would occur when calling H5Adelete on an attribute - which is attached to an externally linked object in the target file and - whose datatype is a committed datatype in the main file. - - (VC, 2016-07-04, HDFFV-9940) - - - - Fixed a problem where a plugin compiled into a DLL in the default plugin - directory could not be found by the HDF5 library at runtime on Windows - when the HDF5_PLUGIN_PATH environment variable was not set. - - (ABD, 2016/08/01, HDFFV-9706) - - - - Fixed an issue where H5Pset_alignment could result in misaligned blocks - with some input combinations, causing an assertion failure in debug mode. - - (NAF, 2016/08/11, HDFFV-9948) - - - - A number of issues were fixed when reading/writing from/to corrupted - files to ensure that the library fails gracefully in these cases: - - * Writing to a corrupted file that has an object message which is - incorrectly marked as shareable on disk results in a buffer overflow / - invalid write instead of a clean error message. - - * Decoding data from a corrupted file with a dataset encoded with the - H5Z_NBIT decoding can result in a code execution vulnerability under - the context of the application using the HDF5 library. - - * When decoding an array datatype from a corrupted file, the HDF5 library - fails to return an error in production if the number of dimensions - decoded is greater than the maximum rank. - - * When decoding an "old style" array datatype from a corrupted file, the - HDF5 library fails to return an error in production if the number of - dimensions decoded is greater than the maximum rank. - - (NAF, 2016/10/06, HDFFV-9950, HDFFV-9951, HDFFV-9992, HDFFV-9993) - - - - Fixed an error that would occur when copying an object with an attribute - which is a compound datatype consisting of a variable length string. - - (VC, 2016-10-17, HDFFV-7991) - - - Parallel Library - ---------------- - - - Fixed a bug that could occur when allocating a chunked dataset in parallel - with an alignment set and an alignment threshold greater than the chunk - size but less than or equal to the raw data aggregator size. - - (NAF, 2016/08/11, HDFFV-9969) - - - Performance - ------------- - - None - - - Tools - ----- - - - Fixed an error in the compiler wrapper scripts (h5cc, h5fc, et al.) - in which they would erroneously drop the file argument specified via - the -o flag when the -o flag was specified before the -c flag on the - command line, resulting in a failure to compile. - - (LRK, 2016/06/08, HDFFV-9938, HDFFV-9530) - - - - h5repack User Defined (UD) filter parameters were not parsed correctly. - - The UD filter parameters were not being parsed correctly. Reworked coding - section to parse the correct values and verify number of parameters. - - (ADB, 2016/10/19, HDFFV-9996, HDFFV-9974, HDFFV-9515, HDFFV-9039) - - - Fortran API - ----------- - - - Fortran library fails to compile and fails tests with NAG compiler. - - * Removed the non-standard assumption that KIND=SIZEOF, in the HDF5 - configure programs. - * Removed Fortran 66 character/integer conversions from tests. - * Removed the use of C_SIZEOF in the test programs - * Changed to using STORAGE_SIZE in the test programs if available. Otherwise, - uses C_SIZEOF or SIZEOF. - - (MSB, 2016/9/22, HDFFV-9973) - - - - Fortran segfaults for F03 tests with NAG compiler - - * Removed INTENT(OUT) from 'fillvalue' in F2003 interface - for H5Pget_fill_value_f. - - (MSB, 2016/9/22, HDFFV-9980) - - - C++ API - ------- - - - The macro H5_NO_NAMESPACE is deprecated from the HDF5 C++ API library. - In future releases, the macros H5_NO_STD and OLD_HEADER_FILENAME may - also be removed. - - (BMR, 2016/10/27, HDFFV-9532) - - - High-Level APIs: - --------------- - - - The high-level API Packet Table (PT) did not write data correctly when - the datatype is a compound type that has string type as one of the - members. This problem started in 1.8.15, after the fix of HDFFV-9042 - was applied, which caused the Packet Table to use native type to access - the data. It should be up to the application to specify whether the - buffer to be read into memory in the machine’s native architecture. - Thus, the PT is fixed to not use native type but to make a copy of the - user's provided datatype during creation or the packet table's datatype - during opening. If an application wishes to use native type to read the - data, then the application will request that. However, the Packet Table - doesn't provide a way to specify memory datatype in this release. This - feature will be available in future releases, HDFFV-10023. - - (BMR, 2016/10/27, HDFFV-9758) - - - Fortran High-Level APIs: - ------------------------ - - None - - - Testing - ------- - - None - - -Supported Platforms -=================== -The following platforms are supported and have been tested for this release. -They are built with the configure process unless specified otherwise. - - Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (platypus/mayll) Version 4.4.7 20120313 - Versions 4.8.4, 4.9.3, 5.2.0 - PGI C, Fortran, C++ for 64-bit target on - x86-64; - Version 15.7-0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 15.0.3.187 Build 20150407 - MPICH 3.1.4 compiled with GCC 4.9.3 - - Linux 2.6.32-573.18.1.el6 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-16) - #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-16) - (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-16) - IBM XL C/C++ V13.1 - IBM XL Fortran V15.1 - - Linux 3.10.0-327.10.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (kituo/moohan) Version 4.8.5 20150623 (Red Hat 4.8.5-4) - Versions 4.9.3, 5.2.0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 15.0.3.187 Build 20150407 - MPICH 3.1.4 compiled with GCC 4.9.3 - - SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc - (emu) Sun Fortran 95 8.6 SunOS_sparc - Sun C++ 5.12 SunOS_sparc - - Windows 7 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - Visual Studio 2015 w/ Intel Fortran 16 (cmake) - Cygwin(CYGWIN_NT-6.1 2.2.1(0.289/5/3) gcc(4.9.3) compiler and gfortran) - (cmake and autotools) - - Windows 7 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - Visual Studio 2015 w/ Intel Fortran 16 (cmake) - - Windows 10 Visual Studio 2015 w/ Intel Fortran 16 (cmake) - - Windows 10 x64 Visual Studio 2015 w/ Intel Fortran 16 (cmake) - - Mac OS X Mt. Lion 10.8.5 Apple LLVM version 5.1 (clang-503.0.40) - 64-bit gfortran GNU Fortran (GCC) 4.8.2 - (swallow/kite) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X Mavericks 10.9.5 Apple LLVM version 6.0 (clang-600.0.57) - 64-bit gfortran GNU Fortran (GCC) 4.9.2 - (wren/quail) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X Yosemite 10.10.5 Apple LLVM version 6.1 (clang-602.0.53) - 64-bit gfortran GNU Fortran (GCC) 4.9.2 - (osx1010dev/osx1010test) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X El Capitan 10.11.4 Apple LLVM version 7.3.0 (clang-703.0.29) - 64-bit gfortran GNU Fortran (GCC) 5.2.0 - (VM osx1011dev/osx1011test) Intel icc/icpc/ifort version 16.0.2 - - -Tested Configuration Features Summary -===================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90/ F90 C++ zlib SZIP - parallel F2003 parallel -SunOS 5.11 32-bit n y/y n y y y -SunOS 5.11 64-bit n y/y n y y y -Windows 7 y y/y n y y y -Windows 7 x64 y y/y n y y y -Windows 7 Cygwin n y/y n y y n -Windows 10 n y/y n y y y -Windows 10 x64 n y/y n y y y -Mac OS X Mountain Lion 10.8.5 64-bit n y/y n y y y -Mac OS X Mavericks 10.9.5 64-bit n y/y n y y y -Mac OS X Yosemite 10.10.5 64-bit n y/y n y y y -AIX 6.1 32- and 64-bit n y/n n y y y -CentOS 6.7 Linux 2.6.32 x86_64 GNU y y/y y y y y -CentOS 6.7 Linux 2.6.32 x86_64 Intel n y/y n y y y -CentOS 6.7 Linux 2.6.32 x86_64 PGI n y/y n y y y -CentOS 7.1 Linux 3.10.0 x86_64 GNU y y/y y y y y -CentOS 7.1 Linux 3.10.0 x86_64 Intel n y/y n y y y -Linux 2.6.32-431.11.2.el6.ppc64 n y/n n y y y - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -SunOS 5.11 32-bit y y y y -SunOS 5.11 64-bit y y y y -Windows 7 y y y y -Windows 7 x64 y y y y -Windows 7 Cygwin n n n y -Windows 10 y y y y -Windows 10 x64 y y y y -Mac OS X Mountain Lion 10.8.5 64-bit y n y y -Mac OS X Mavericks 10.9.5 64-bit y n y y -Mac OS X Yosemite 10.10.5 64-bit y n y y -AIX 6.1 32- and 64-bit y n n y -CentOS 6.7 Linux 2.6.32 x86_64 GNU y y y y -CentOS 6.7 Linux 2.6.32 x86_64 Intel y y y y -CentOS 6.7 Linux 2.6.32 x86_64 PGI y y y y -CentOS 7.1 Linux 3.10.0 x86_64 GNU y y y y -CentOS 7.1 Linux 3.10.0 x86_64 Intel y y y y -Linux 2.6.32-431.11.2.el6.ppc64 y y y y - -Compiler versions for each platform are listed in the preceding -"Supported Platforms" table. - - -More Tested Platforms -===================== -The following platforms are not supported but have been tested for this release. - - Linux 2.6.32-573.22.1.el6 g95 (GCC 4.0.3 (g95 0.94!) - #1 SMP x86_64 GNU/Linux - (platypus) - - Debian8.4.0 3.16.0-4-amd64 #1 SMP Debian 3.16.36-1 x86_64 GNU/Linux - gcc (Debian 4.9.2-10) 4.9.2 - GNU Fortran (Debian 4.9.2-10) 4.9.2 - (cmake and autotools) - - Fedora24 4.7.2-201.fc24.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc (GCC) 6.1.1 20160621 (Red Hat 6.1.1-3) - GNU Fortran (GCC) 6.1.1 20160621 (Red Hat 6.1.1-3) - (cmake and autotools) - - CentOS 7.2 3.10.0-327.28.2.el7.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc (GCC) 4.8.5 20150623 (Red Hat 4.8.5-4) - GNU Fortran (GCC) 4.8.5 20150623 (Red Hat 4.8.5-4) - (cmake and autotools) - - Ubuntu 16.04 4.4.0-38-generic #62-Ubuntu SMP x86_64 GNU/Linux - gcc (Ubuntu 5.4.0-6ubuntu1~16.04.2) 5.4.0 - GNU Fortran (Ubuntu 5.4.0-6ubuntu1~16.04.2) 5.4.0 - (cmake and autotools) - - -Known Problems -============== -* On windows platforms in debug configurations, the VFD flush1 tests will fail - with the split and multi VFD drivers. These tests will display a modal debug - dialog which must be answered or wait for the test timeout to expire. - (ADB - 2014/06/23 - HDFFV-8851) - -* CLANG compiler with the options -fcatch-undefined-behavior and -ftrapv - catches some undefined behavior in the alignment algorithm of the macro DETECT_I - in H5detect.c (Issue 8147). Since the algorithm is trying to detect the alignment - of integers, ideally the flag -fcatch-undefined-behavior shouldn't to be used for - H5detect.c. In the future, we can separate flags for H5detect.c from the rest of - the library. (SLU - 2013/10/16) - -* Make provided by Solaris fails in "make check". Solaris users should use - gmake to build and install the HDF5 software. (AKC - 2013/10/08 - HDFFV-8534) - -* The C++ and FORTRAN bindings are not currently working on FreeBSD with the - native release 8.2 compilers (4.2.1), but are working with gcc 4.6 from the - ports (and probably gcc releases after that). - (QAK - 2012/10/19) - -* The following h5dump test case fails in BG/P machines (and potentially other - machines that use a command script to launch executables): - - h5dump --no-compact-subset -d "AHFINDERDIRECT::ah_centroid_t[0] it=0 tl=0" - tno-subset.h5 - - This is due to the embedded spaces in the dataset name being interpreted - by the command script launcher as meta-characters, thus passing three - arguments to h5dump's -d flag. The command passes if run by hand, just - not via the test script. - (AKC - 2012/05/03) - -* The STDIO VFD does not work on some architectures, possibly due to 32/64 - bit or large file issues. The basic STDIO VFD test is known to fail on - 64-bit SunOS 5.10 on SPARC when built with -m64 and 32-bit OS X/Darwin - 10.7.0. The STDIO VFD test has been disabled while we investigate and - a fix should appear in a future release. - (DER - 2011/10/14 - HDFFV-8235) - -* h5diff can report inconsistent results when comparing datasets of enum type - that contain invalid values. This is due to how enum types are handled in - the library and will be addressed in a future release. - (DER - 2011/10/14 - HDFFV-7527) - -* The links test can fail under the stdio VFD due to some issues with external - links. This will be investigated and fixed in a future release. - (DER - 2011/10/14 - HDFFV-7768) - -* After the shared library support was fixed for some bugs, it was discovered - that "make prefix=XXX install" no longer works for shared libraries. It - still works correctly for static libraries. Therefore, if you want to - install the HDF5 shared libraries in a location such as /usr/local/hdf5, - you need to specify the location via the --prefix option during configure - time. E.g, ./configure --prefix=/usr/local/hdf5 ... - (AKC - 2011/05/07 - HDFFV-7583) - -* The parallel test, t_shapesame, in testpar/, may run for a long time and may - be terminated by the alarm signal. If that happens, one can increase the - alarm seconds (default is 1200 seconds = 20 minutes) by setting the - environment variable, $HDF5_ALARM_SECONDS, to a larger value such as 3600 - (60 minutes). Note that the t_shapesame test may fail in some systems - (see the "While working on the 1.8.6 release..." problem below). If - it does, it will waste more time if $HDF5_ALARM_SECONDS is set - to a larger value. - (AKC - 2011/05/07) - -* Shared Fortran libraries are not quite working on AIX. While they are - generated when --enable-shared is specified, the fortran and hl/fortran - tests fail. the issue. HL and C++ shared libraries should now be - working as intended, however. - (MAM - 2011/04/20) - -* While working on the 1.8.6 release of HDF5, a bug was discovered that can - occur when reading from a dataset in parallel shortly after it has been - written to collectively. The issue was exposed by a new test in the parallel - HDF5 test suite, but had existed before that. We believe the problem lies with - certain MPI implementations and/or file systems. - - We have provided a pure MPI test program, as well as a standalone HDF5 - program, that can be used to determine if this is an issue on your system. - They should be run across multiple nodes with a varying number of processes. - These programs can be found at: - http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/ - (NAF - 2011/01/19) - -* All the VFL drivers aren't backward compatible. In H5FDpublic.h, the - structure H5FD_class_t changed in 1.8. There is new parameter added to - get_eoa and set_eoa callback functions. A new callback function - get_type_map was added in. The public function H5FDrealloc was taken - out in 1.8. The problem only happens when users define their own driver - for 1.6 and try to plug in 1.8 library. Because there's only one user - complaining about it, we (Elena, Quincey, and I) decided to leave it as - it is (see bug report #1279). Quincey will make a plan for 1.10. - (SLU - 2010/02/02) - -* The --enable-static-exec configure flag will only statically link libraries - if the static version of that library is present. If only the shared version - of a library exists (i.e., most system libraries on Solaris, AIX, and Mac, - for example, only have shared versions), the flag should still result in a - successful compilation, but note that the installed executables will not be - fully static. Thus, the only guarantee on these systems is that the - executable is statically linked with just the HDF5 library. - (MAM - 2009/11/04) - -* A dataset created or rewritten with a v1.6.3 library or after cannot be read - with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled. - There was a bug in the calculation of the Fletcher32 checksum in the - library before v1.6.3; the checksum value was not consistent between big- - endian and little-endian systems. This bug was fixed in Release 1.6.3. - However, after fixing the bug, the checksum value was no longer the same as - before on little-endian system. Library releases after 1.6.4 can still read - datasets created or rewritten with an HDF5 library of v1.6.2 or before. - (SLU - 2005/06/30) - - -%%%%1.8.17%%%% - - -HDF5 version 1.8.17 released on 2016-05-10 -================================================================================ - -INTRODUCTION -============ - -This document describes the differences between HDF5-1.8.16 and -HDF5-1.8.17-*, and contains information on the platforms tested and -known problems in HDF5-1.8.17-*. -For more details, see the files HISTORY-1_0-1_8_0_rc3.txt -and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source. - -Links to the HDF5 1.8.17 source code, documentation, and additional materials -can be found on the HDF5 web page at: - - http://www.hdfgroup.org/products/hdf5/ - -The HDF5 1.8.17 release can be obtained from: - - http://www.hdfgroup.org/HDF5/release/obtain5.html - -User documentation for 1.8.17 can be accessed directly at this location: - - http://www.hdfgroup.org/HDF5/doc/ - -New features in the HDF5-1.8.x release series, including brief general -descriptions of some new and modified APIs, are described in the "What's New -in 1.8.0?" document: - - http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html - -All new and modified APIs are listed in detail in the "HDF5 Software Changes -from Release to Release" document, in the section "Release 1.8.17 (current -release) versus Release 1.8.16 - - http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - - -CONTENTS -======== - -- New Features -- Support for New Platforms, Languages, and Compilers -- Bug Fixes since HDF5-1.8.16 -- Supported Platforms -- Supported Configuration Features Summary -- More Tested Platforms -- Known Problems - - - -New Features -============ - - Configuration - ------------- - - Cmakehdf5: Added Ability to Run Multiple Make Commands - - Added option --njobs to specify up to how many jobs to launch during - build (cmake) and testing (ctest). - - (AKC - 2015/12/13, HDFFV-9612) - - - Cmakehdf5: Added Szip Support and Verbose Option - - Added --with-szlib to support the Szip library; and - --enable/disable-verbose to display all CMake process output. - - (AKC - 2015/11/16, HDFFV-8932 and DAILYTEST-195) - - - CMake minimum is now 3.1.0. (ADB - 2015/11/14) - - - Large File System (LFS) Support has Changed in the Autotools - - We assume that fseeko and ftello exist. - - The *64 I/O functions and types are no longer explicitly used. - We now rely on a mapping provided by _FILE_OFFSET_BITS (or its - equivalent). - - _LARGEFILE(64)_SOURCE is no longer exposed via AM_CPPFLAGS. - - (DER - 2016/03/29, HDFFV-9626 and HDFFV-9541) - - - - Library - ------- - - New API Calls for Searching for External Dataset Storage - - API calls that determine the search path for dataset external - storage were added. H5Pset/get_efile_prefix() API calls were added - to the library. These functions give control over the search path - for dataset external storage that has been configured with - H5Pset_external(). - - Additionally, the HDF5_EXTFILE_PREFIX environment variable can be - used to control the search path. - - (DER - 2016/04/20, HDFFV-8740) - - - - Parallel Library - ---------------- - - None - - - - Tools - ----- - - None - - - - High-Level APIs - --------------- - - C Packet Table API - ------------------ - - Replacement of a Public Function with H5PTcreate - - The existing function H5PTcreate_fl limits applications so they - can use the deflate compression only. The public function - H5PTcreate has been added to replace H5PTcreate_fl. H5PTcreate - takes a property list identifier to provide flexibility on - creation properties. This also removes the following warning: - "deprecated conversion from string constant to "char*" - [-Wwrite-strings]". - - (BMR - 2016/04/25, HDFFV-9708, HDFFV-8615) - - - New Public Functions: H5PTget_dataset and H5PTget_type - - Two accessor functions have been added. H5PTget_dataset returns - the identifier of the dataset associated with the packet table, - and H5PTget_type returns the identifier of the datatype used by - the packet table. - - (BMR - 2016/04/25, HDFFV-8623 patch 3) - - - Regarding #ifdef VLPT_REMOVED - - The #ifdef VLPT_REMOVED blocks have been removed from the packet - table (PT) library source except for the following functions: - + H5PTis_varlen() has been made available again - + H5PTfree_vlen_readbuff() is now H5PTfree_vlen_buff() - - (BMR - 2016/04/25, HDFFV-442) - - C++ Packet Table API - -------------------- - - New Constructor in FL_PacketTable - - An overloaded constructor has been added to FL_PacketTable and - takes a property list identifier to provide flexibility on - creation properties such as compression. - - FL_PacketTable(hid_t fileID, const char* name, hid_t dtypeID, - hsize_t chunkSize = 0, hid_t plistID = H5P_DEFAULT) - - (BMR - 2016/04/25, HDFFV-8623 patch 5) - - - New Member Functions in PacketTable - - Two accessor wrappers were added to class PacketTable. - - PacketTable::GetDataset() returns the identifier of the dataset - associated with the packet table, and PacketTable::GetDatatype() - returns the identifier of the datatype that the packet table uses. - - (BMR - 2016/04/25, HDFFV-8623 patch 4) - - - New Member Functions with "char*" as an Argument - - Overloaded functions were added to provide the "const char*" - argument; the existing version will be deprecated in future - releases. This also removes the following warning: - "deprecated conversion from string constant to "char*" - [-Wwrite-strings]". - - (BMR - 2016/04/25, HDFFV-8623 patch 1, HDFFV-8615) - - - Regarding #ifdef VLPT_REMOVED - - The #ifdef VLPT_REMOVED blocks have been removed from the packet - table library source code except for the following functions: - + VL_PacketTable::IsVariableLength() was moved to PacketTable - + VL_PacketTable::FreeReadBuff() is now PacketTable::FreeBuff() - - (BMR - 2016/04/25, HDFFV-442) - - - - Fortran API - ----------- - - None - - - - C++ API - ------- - - New Member Function in DSetCreatPropList - - DSetCreatPropList::setNbit() was added to setup N-bit compression for - a dataset. - - (BMR - 2016/04/25, HDFFV-8623 patch 7) - - - New Overloaded "const" Member Functions in ArrayType - - The two following functions were added: - ArrayType::getArrayNDims() const - ArrayType::getArrayDims() const - to provide const version, and the non-const version was marked - deprecated. In-memory array information, ArrayType::rank and - ArrayType::dimensions, were removed. This is an implementation - detail and should not affect applications. - - (BMR, 2016/04/25, HDFFV-9725) - - - New member function added - - The assignment operator ArrayType::operator= is added because ArrayType - has pointer data members. - - (BMR, 2016/03/07, HDFFV-9562) - - -Support for New Platforms, Languages, and Compilers -=================================================== - - Mac OS X El Capitan 10.11.4 with compilers Apple clang/clang++ - version 7.3.0 from Xcode 7.3, gfortran GNU Fortran (GCC) 5.2.0 - and Intel icc/icpc/ifort version 16.0.2 - - - -Bug Fixes since HDF5-1.8.16 -=========================== - - Configuration - ------------- - - Updated Linux Language Level Flags to Match the Autotools. Removed - Linux-specific Flags from OS X. - - An addition to the flags simply being out of sync with the Autotools, - the Linux flags were used on OS X builds which led to symbols not being - found. Although this was non-fatal and compilation continued (implicit - definitions were used by the compiler and the symbols resolved at link - time), a large number of warnings were raised. - - Linux changes: - - * CHANGED: _POSIX_C_SOURCE (from 199605 to 200112L) - * ADDED: _GNU_SOURCE - * REMOVED: _BSD_SOURCE - * REMOVED: _DEFAULT_SOURCE - - (DER - 2015/12/08, HDFFV-9627) - - - The --enable-clear-file-buffers configure Option was Non-functional - so the Feature was Always Enabled (its default value). - - Regardless of the configure flag, the setting was always enabled when - the Autotools were used to configure HDF5. This was due to the "no" - option being processed after the "*" option in configure.ac so "*" - matched first. CMake was unaffected. - - The option now works correctly. - - NOTE that builders are always advised to leave this option enabled. - When disabled, buffers that are written to disk may contain the - memory's previous contents, which may include secure information. - The performance overhead of the feature (a single memset call per - allocation) is minimal. - - (DER - 2016/02/03, HDFFV-9676) - - - Added a patch to remove '"'s from arguments for MPI compilers that - were causing errors compiling H5lib_settings.c with SGI MPT. - - (LRK - 2016/04/20, HDFFV-9439) - - Library - ------- - - Fixed shared file pointer problem which caused a crash when running a - program provided by a user. - - (VC - 2016/04/01, HDFFV-9469) - - - Fixed some format string warnings that prevent compiling with - -Werror=format-security on gcc. - - These only appeared in error messages and would not cause problems - under normal operation. - - (DER - 2016/01/13, HDFFV-9640) - - - Fixed a library segmentation fault when accessing a corrupted - file provided by a user. - - (MSC - 2016/02/19, HDFFV-9670) - - - - Parallel Library - ---------------- - - None - - - - Performance - ------------- - - None - - - - Tools - ----- - - h5dump: Sub-setting Fixed for Dimensions Greater than Two - - When a dataset has more than two dimensions, sub-setting would - incorrectly calculate the data that needed to be displayed. - Added in block and stride calculations that account for dimensions - greater than two. NOTE: lines that have line breaks inserted - because of display length calculations may have index info that - is incorrect until the next dimension break. - - (ADB - 2016/03/07, HDFFV-9698) - - - h5dump: Issue with Argument Segmentation Fault - - When an argument with an optional value was at the end of the command - line with a value, h5dump would crash. Reworked check for remaining - arguments. - - (ADB - 2016/03/07, HDFFV-9570, HDFFV-9684) - - - h5dump: Issue with Default Fill Value - - Added all default cases of fill value to the display of fill value. - - (ADB -, 2016/03/07, HDFFV-9241) - - - h5dump: Clarified Help - - Clarified usage of -O F option in h5dump utility help. - - (ADB - 2016/03/07, HDFFV-9066) - - - h5dump: Issue with Double Free Fault - - Added a check for filename not null before calling free(). - - (ADB - 2016/01/27, HDFFV-9639) - - - VS2015 Release Changed how Timezone was Handled - - Created a function, HDget_timezone, in H5system.c. Replaced - timezone variable usage with function call. - - (ADB - 2015/11/02, HDFFV-9550) - - - - Fortran API - ----------- - - None - - - - C++ API - ------- - - Removal of Obsolete Methods - - The overloaded methods which had parameters that should be const - but were not have been removed. - - (BMR - 2016/01/13, HDFFV-9789) - - - - High-Level APIs: - --------------- - - Fixed Memory Leak in Packet Table API - - Applied user's patch to fix memory leak in the creation of a - packet table. - - (BMR - 2016/04/25, HDFFV-9700) - - - - Fortran High-Level APIs: - ------------------------ - - None - - - - Testing - ------- - - None - - - -Supported Platforms -=================== -The following platforms are supported and have been tested for this release. -They are built with the configure process unless specified otherwise. - - AIX 6.1 xlc/xlc_r 10.1.0.5 - (NASA G-ADA) xlC/xlC_r 10.1.0.5 - xlf90/xlf90_r 12.1.0.6 - - Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (platypus) Version 4.4.7 20120313 - Version 4.9.3, Version 5.2.0 - PGI C, Fortran, C++ for 64-bit target on - x86-64; - Version 15.7-0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 15.0.3.187 Build 20150407 - MPICH 3.1.4 compiled with GCC 4.9.3 - - Linux 2.6.32-504.8.1.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-16) - #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-16) - (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-16) - IBM XL C/C++ V13.1 - IBM XL Fortran V15.1 - - Linux 3.10.0-229.14.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (kituo/moohan) Version 4.8.5 20150623 (Red Hat 4.8.5-4) - Version 5.2.0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 15.0.3.187 Build 20150407 - MPICH 3.1.4 compiled with GCC 4.9.3 - - SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc - (emu) Sun Fortran 95 8.6 SunOS_sparc - Sun C++ 5.12 SunOS_sparc - - Windows 7 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - Visual Studio 2015 (cmake) - Cygwin(CYGWIN_NT-6.1 2.2.1(0.289/5/3) gcc(4.9.3) compiler and gfortran) - (cmake and autotools) - - Windows 7 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - Visual Studio 2015 (cmake) - - Windows 8.1 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - - Windows 8.1 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - - Mac OS X Mt. Lion 10.8.5 Apple clang/clang++ version 5.1 from Xcode 5.1 - 64-bit gfortran GNU Fortran (GCC) 4.8.2 - (swallow/kite) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X Mavericks 10.9.5 Apple clang/clang++ version 6.0 from Xcode 6.2 - 64-bit gfortran GNU Fortran (GCC) 4.9.2 - (wren/quail) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X Yosemite 10.10.5 Apple clang/clang++ version 6.1 from Xcode 7.0 - 64-bit gfortran GNU Fortran (GCC) 4.9.2 - (osx1010dev/osx1010test) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X El Capitan 10.11.4 Apple clang/clang++ version 7.3.0 from Xcode 7.3 - 64-bit gfortran GNU Fortran (GCC) 5.2.0 - (osx1011dev/ox1011test) Intel icc/icpc/ifort version 16.0.2 - - -Tested Configuration Features Summary -===================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90/ F90 C++ zlib SZIP - parallel F2003 parallel -SunOS 5.11 32-bit n y/y n y y y -SunOS 5.11 64-bit n y/y n y y y -Windows 7 y y/y n y y y -Windows 7 x64 y y/y n y y y -Windows 7 Cygwin n y/y n y y n -Windows 8.1 n y/y n y y y -Windows 8.1 x64 n y/y n y y y -Mac OS X Mountain Lion 10.8.5 64-bit n y/y n y y y -Mac OS X Mavericks 10.9.5 64-bit n y/y n y y y -Mac OS X Yosemeti 10.10.5 64-bit n y/y n y y y -AIX 6.1 32- and 64-bit n y/n n y y y -CentOS 6.7 Linux 2.6.32 x86_64 GNU y y/y y y y y -CentOS 6.7 Linux 2.6.32 x86_64 Intel n y/y n y y y -CentOS 6.7 Linux 2.6.32 x86_64 PGI n y/y n y y y -CentOS 7.1 Linux 3.10.0 x86_64 GNU y y/y y y y y -CentOS 7.1 Linux 3.10.0 x86_64 Intel n y/y n y y y -Linux 2.6.32-431.11.2.el6.ppc64 n y/n n y y y - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -SunOS 5.11 32-bit y y y y -SunOS 5.11 64-bit y y y y -Windows 7 y y y y -Windows 7 x64 y y y y -Windows 7 Cygwin n n n y -Windows 8.1 y y y y -Windows 8.1 x64 y y y y -Mac OS X Mountain Lion 10.8.5 64-bit y n y y -Mac OS X Mavericks 10.9.5 64-bit y n y y -Mac OS X Yosemeti 10.10.5 64-bit y n y y -AIX 6.1 32- and 64-bit y n n y -CentOS 6.7 Linux 2.6.32 x86_64 GNU y y y y -CentOS 6.7 Linux 2.6.32 x86_64 Intel y y y y -CentOS 6.7 Linux 2.6.32 x86_64 PGI y y y y -CentOS 7.1 Linux 3.10.0 x86_64 GNU y y y y -CentOS 7.1 Linux 3.10.0 x86_64 Intel y y y y -Linux 2.6.32-431.11.2.el6.ppc64 y y y y - -Compiler versions for each platform are listed in the preceding -"Supported Platforms" table. - - -More Tested Platforms -===================== -The following platforms are not supported but have been tested for this release. - - Linux 2.6.18-431.11.2.el6 g95 (GCC 4.0.3 (g95 0.94!) - #1 SMP x86_64 GNU/Linux - (platypus) - - Windows 7 Visual Studio 2008 (cmake) - - Windows 7 x64 Visual Studio 2008 (cmake) - - Windows 10 Visual Studio 2013 w/ Intel Fortran 15 (cmake) - - Windows 10 x64 Visual Studio 2013 w/ Intel Fortran 15 (cmake) - - Debian7.5.0 3.2.0-4-amd64 #1 SMP Debian 3.2.51-1 x86_64 GNU/Linux - gcc (Debian 4.7.2-5) 4.7.2 - GNU Fortran (Debian 4.7.2-5) 4.7.2 - (cmake and autotools) - - Fedora20 3.15.3-200.fc20.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1) - GNU Fortran (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1) - (cmake and autotools) - - SUSE 13.1 3.11.10-17-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux - gcc (SUSE Linux) 4.8.1 - GNU Fortran (SUSE Linux) 4.8.1 - (cmake and autotools) - - Ubuntu 14.04 3.13.0-35-generic #62-Ubuntu SMP x86_64 GNU/Linux - gcc (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1 - GNU Fortran (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1 - (cmake and autotools) - - hopper.nersc.gov PrgEnv-gnu/5.2.40 - gcc (GCC) 4.9.2 20141030 (Cray Inc.) - GNU Fortran (GCC) 4.9.2 20141030 (Cray Inc.) - g++ (GCC) 4.9.2 20141030 (Cray Inc.) - - -Known Problems -============== -* On windows platforms in debug configurations, the VFD flush1 tests will fail - with the split and multi VFD drivers. These tests will display a modal debug - dialog which must be answered or wait for the test timeout to expire. - (ADB - 2014/06/23 - HDFFV-8851) - -* CLANG compiler with the options -fcatch-undefined-behavior and -ftrapv - catches some undefined behavior in the alignment algorithm of the macro DETECT_I - in H5detect.c (Issue 8147). Since the algorithm is trying to detect the alignment - of integers, ideally the flag -fcatch-undefined-behavior shouldn't to be used for - H5detect.c. In the future, we can separate flags for H5detect.c from the rest of - the library. (SLU - 2013/10/16) - -* Make provided by Solaris fails in "make check". Solaris users should use - gmake to build and install the HDF5 software. (AKC - 2013/10/08 - HDFFV-8534) - -* The C++ and FORTRAN bindings are not currently working on FreeBSD with the - native release 8.2 compilers (4.2.1), but are working with gcc 4.6 from the - ports (and probably gcc releases after that). - (QAK - 2012/10/19) - -* The following h5dump test case fails in BG/P machines (and potentially other - machines that use a command script to launch executables): - - h5dump --no-compact-subset -d "AHFINDERDIRECT::ah_centroid_t[0] it=0 tl=0" - tno-subset.h5 - - This is due to the embedded spaces in the dataset name being interpreted - by the command script launcher as meta-characters, thus passing three - arguments to h5dump's -d flag. The command passes if run by hand, just - not via the test script. - (AKC - 2012/05/03) - -* The STDIO VFD does not work on some architectures, possibly due to 32/64 - bit or large file issues. The basic STDIO VFD test is known to fail on - 64-bit SunOS 5.10 on SPARC when built with -m64 and 32-bit OS X/Darwin - 10.7.0. The STDIO VFD test has been disabled while we investigate and - a fix should appear in a future release. - (DER - 2011/10/14 - HDFFV-8235) - -* h5diff can report inconsistent results when comparing datasets of enum type - that contain invalid values. This is due to how enum types are handled in - the library and will be addressed in a future release. - (DER - 2011/10/14 - HDFFV-7527) - -* The links test can fail under the stdio VFD due to some issues with external - links. This will be investigated and fixed in a future release. - (DER - 2011/10/14 - HDFFV-7768) - -* After the shared library support was fixed for some bugs, it was discovered - that "make prefix=XXX install" no longer works for shared libraries. It - still works correctly for static libraries. Therefore, if you want to - install the HDF5 shared libraries in a location such as /usr/local/hdf5, - you need to specify the location via the --prefix option during configure - time. E.g, ./configure --prefix=/usr/local/hdf5 ... - (AKC - 2011/05/07 - HDFFV-7583) - -* The parallel test, t_shapesame, in testpar/, may run for a long time and may - be terminated by the alarm signal. If that happens, one can increase the - alarm seconds (default is 1200 seconds = 20 minutes) by setting the - environment variable, $HDF5_ALARM_SECONDS, to a larger value such as 3600 - (60 minutes). Note that the t_shapesame test may fail in some systems - (see the "While working on the 1.8.6 release..." problem below). If - it does, it will waste more time if $HDF5_ALARM_SECONDS is set - to a larger value. - (AKC - 2011/05/07) - -* Shared Fortran libraries are not quite working on AIX. While they are - generated when --enable-shared is specified, the fortran and hl/fortran - tests fail. the issue. HL and C++ shared libraries should now be - working as intended, however. - (MAM - 2011/04/20) - -* While working on the 1.8.6 release of HDF5, a bug was discovered that can - occur when reading from a dataset in parallel shortly after it has been - written to collectively. The issue was exposed by a new test in the parallel - HDF5 test suite, but had existed before that. We believe the problem lies with - certain MPI implementations and/or file systems. - - We have provided a pure MPI test program, as well as a standalone HDF5 - program, that can be used to determine if this is an issue on your system. - They should be run across multiple nodes with a varying number of processes. - These programs can be found at: - http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/ - (NAF - 2011/01/19) - -* All the VFL drivers aren't backward compatible. In H5FDpublic.h, the - structure H5FD_class_t changed in 1.8. There is new parameter added to - get_eoa and set_eoa callback functions. A new callback function - get_type_map was added in. The public function H5FDrealloc was taken - out in 1.8. The problem only happens when users define their own driver - for 1.6 and try to plug in 1.8 library. Because there's only one user - complaining about it, we (Elena, Quincey, and I) decided to leave it as - it is (see bug report #1279). Quincey will make a plan for 1.10. - (SLU - 2010/02/02) - -* The --enable-static-exec configure flag will only statically link libraries - if the static version of that library is present. If only the shared version - of a library exists (i.e., most system libraries on Solaris, AIX, and Mac, - for example, only have shared versions), the flag should still result in a - successful compilation, but note that the installed executables will not be - fully static. Thus, the only guarantee on these systems is that the - executable is statically linked with just the HDF5 library. - (MAM - 2009/11/04) - -* A dataset created or rewritten with a v1.6.3 library or after cannot be read - with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled. - There was a bug in the calculation of the Fletcher32 checksum in the - library before v1.6.3; the checksum value was not consistent between big- - endian and little-endian systems. This bug was fixed in Release 1.6.3. - However, after fixing the bug, the checksum value was no longer the same as - before on little-endian system. Library releases after 1.6.4 can still read - datasets created or rewritten with an HDF5 library of v1.6.2 or before. - (SLU - 2005/06/30) - - -%%%%1.8.16%%%% - - -HDF5 version 1.8.16 released on 2015-11-10 -================================================================================ - -INTRODUCTION -============ - -This document describes the differences between HDF5-1.8.15 and -HDF5-1.8.16, and contains information on the platforms tested and -known problems in HDF5-1.8.16. -For more details, see the files HISTORY-1_0-1_8_0_rc3.txt -and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source. - -Links to the HDF5 1.8.16 source code, documentation, and additional materials -can be found on the HDF5 web page at: - - http://www.hdfgroup.org/products/hdf5/ - -The HDF5 1.8.16 release can be obtained from: - - http://www.hdfgroup.org/HDF5/release/obtain5.html - -User documentation for 1.8.16 can be accessed directly at this location: - - http://www.hdfgroup.org/HDF5/doc/ - -New features in the HDF5-1.8.x release series, including brief general -descriptions of some new and modified APIs, are described in the "What's New -in 1.8.0?" document: - - http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html - -All new and modified APIs are listed in detail in the "HDF5 Software Changes -from Release to Release" document, in the section "Release 1.8.16 (current -release) versus Release 1.8.15": - - http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS -======== - -- New Features -- Support for New Platforms, Languages, and Compilers -- Bug Fixes since HDF5-1.8.15 -- Supported Platforms -- Supported Configuration Features Summary -- More Tested Platforms -- Known Problems - -New Features -============ - - Configuration and Build - ------------- - - The thread-safety + high-level library combination has been marked - as "unsupported" in the Autotools - - The global lock used by the thread-safety feature has never been - raised to the high-level library level, making it possible that - the library state could change if a context switch were to occur in - a high-level library call. Because of this, the combination of - thread-safety and high-level library is officially unsupported by - The HDF Group. - - In the past, although this combination has never been supported, this - was not enforced by the build systems. These changes will cause an - Autotools configure step to fail if --enable-threadsafe and - --enable-hl are combined unless additional options are specified. - Since the high-level library is built by default, this means that - these extra configuration options will need to be used any time - --enable-threadsafe is selected. - - To build with --enable-threadsafe, either: - - 1) Use --disable-hl to disable the high-level library (recommended) - - 2) Use --enable-unsupported to build the high-level library with - the thread-safety feature. - - (DER - 2015/09/10 HDFFV-8719) - - - Using CMake now builds both static and shared libraries. - - The CMake files have been updated to build both static and shared - libraries, with tools only built statically. The packaging of the - libraries and tools will include cmake-config files that allows - projects to choose either shared or static (default) libraries - to be found with the find_package command using the COMPONENTS - keyword and a list of components. The imported libraries will - include any interface specific settings and dependent libraries. - - The default setting for BUILD_SHARED_LIBS has changed from OFF - to ON, which builds both static and shared libraries. The static - libraries are always built because of tools requirements. - - (ADB - 2015/08/24 HDFFV-5881) - - - Inline functions now correctly annotated with Autotools builds. - - The method used to detect the appropriate inline markup scheme was - nonfunctional in Autotools builds. The Autotools have been modified - to correctly detect the compiler's inline markup symbol and apply it - to the source. Note that only a very small number of internal - functions are marked inline so this was not a very big change or - likely to significantly affect performance. - - As a part of this change, the H5_inline symbol no longer appears in - H5pubconf.h. - - (DER - 2015/08/13 HDFFV-9119, HDFFV-9421) - - - Removed obsolete/unmaintained files from config/ - - Several files were removed from the config directory. These files - represent old operating systems, were no longer necessary, and/or - were no longer maintained. configure.ac was updated to reflect the - removed files. - - Removed: - - craynv - dec-flags - hpux11.23 - ia64-linux-gnu - nec-superux14.1 - sv1-cray - x86_64-redstorm-linux-gnu - powerpc-ibm-aix5.x - - As a part of this work, a few lines that deal with locating the - sys/fpu.h header on SGI machines and some OSF/1 configure lines - were also removed. The Solaris config was also renamed to not have - a version number since the version number was ignored by configure - as it applies to all Solaris versions. - - (DER - 2015/09/04 HDFFV-9116) - - - Removed the FP_TO_INTEGER_OVERFLOW_WORKS macro/defines from the library - - This was for working around bugs in the Cray X1 compiler, which is no - longer supported. - - (DER - 2015/09/09 HDFFV-9191) - - - Removed the H5_SW_ULONG_TO_FP_BOTTOM_BIT_WORKS and - H5_FP_TO_ULLONG_BOTTOM_BIT_WORKS symbols and associated code. - - H5_SW_ULONG_TO_FP_BOTTOM_BIT_WORKS was a work-around on old 64-bit - SGI and Solaris systems. - - H5_FP_TO_ULLONG_BOTTOM_BIT_WORKS was a work-around for old PGI - compilers on Linux. - - Neither of these were used in any current library code and only appeared - in the dt_arith test. - - (DER - 2015/09/09 HDFFV-9187) - - - Removed CONVERT_DENORMAL_FLOAT symbol and associated code from the - library. - - This was only set in configure files for Cray and NEC computers. These - config files no longer exist so there is no effect on currently - supported platforms. - - (DER - 2015/09/09 HDFFV-9188) - - - Removed _BSD_SOURCE and _DEFAULT_SOURCE from configure.ac - - These are old BSD-compatibility symbols that are no longer needed by - the library. - - (DER - 2015/09/10 HDFFV-9079) - - - Removed HW_FP_TO_LLONG_NOT_WORKS symbol and associated code from the - library. - - This was part of a work-around for the VS.NET 2003 compiler, which is - no longer supported. - - (DER - 2015/09/10 HDFFV-9189) - - - Removed the BAD_LOG2_CODE_GENERATED symbol and associated code from the - library. - - This was an IRIX work-around. - - (DER - 2015/09/11 HDFFV-9195) - - - Decoupled shared object version numbers for wrapper libraries from the - shared object version number for the HDF5 library. These will be - maintained on an individual basis according to the interface changes - specific to these wrapper libraries. - - For HDF5 1.8.16 the shared object version numbers were changed from - 10.0.1 to 10.1.0 for the HDF5 library due to added APIs. For the C++ - wrapper library they were changed from 10.0.1 to 11.0.0 due to changes - in existing APIs. For all other wrapper libraries the versions were - changed from 10.0.1 to 10.0.2 because while the APIs had no changes - there have been changes in code that did not result in changes to their - interfaces. - - (LRK - 2015/10/28) - - Library - ------- - - - H5F_ACC_DEBUG flag for H5Fopen/create: functionality removed - - The symbol was used to emit some extra debugging information - for HDF Group developers in the multi VFD. The underlying - functionality has been removed due to disuse. The symbol - remains defined since it was visible in H5Fpublic.h but it - has been set to zero and has no effect anywhere in the library. - - (DER - 2015-05-02, HDFFV-1074) - - - New public API call: H5is_library_threadsafe() - - This API call indicates if the library was built with thread- - safety enabled. - - (DER - 2015-09-01, HDFFV-9496) - - Parallel Library - ---------------- - - None - - Tools - ----- - - None - - High-Level APIs - --------------- - - None - - Fortran API - ----------- - - None - - - C++ API - ------- - - Class H5::ObjCreatPropList is added for the object creation property - list class. - - Class H5::ObjCreatPropList is derived from H5::PropList and is a - baseclass of H5::DSetCreatPropList. Additional property list classes - will be derived from H5::ObjCreatPropList when they are added to the - library in future releases. - - (BMR, 2015/10/13, Part of HDFFV-9169) - - - New Wrappers for C Functions H5P[s/g]et_attr_phase_change and - H5P[s/g]et_attr_creation_order. - - Wrappers were added to class H5::ObjCreatPropList for the C Functions - H5Pset_attr_phase_change: H5::ObjCreatPropList::setAttrPhaseChange - H5Pget_attr_phase_change: H5::ObjCreatPropList::getAttrPhaseChange - H5Pset_attr_creation_order: H5::ObjCreatPropList::setAttrCrtOrder - H5Pget_attr_creation_order: H5::ObjCreatPropList::getAttrCrtOrder - - (BMR, 2015/10/13, Part of HDFFV-9167 and HDFFV-9169) - - -Support for New Platforms, Languages, and Compilers -=================================================== - - Added VS2015 with Intel Fortran 16 to supported Windows 7 platforms - - -Bug Fixes since HDF5-1.8.15 -=========================== - - Configuration - ------------- - - - CMake test for long long printf format improved - - The CMake configuration test for determining the printf format string - for printing a long long integer was fixed. The test would crash - when executed with VS2015. - - (ADB - 2015-10-21 HDFFV-9488) - - Library - ------- - - VS2015 removed global variable timezone - - The usage of the global variable timezone was modified for VS2015 by - adding an alias to Windows builds. - - (ADB - 2015-10-23 HDFFV-9550) - - - Fix potential error in H5Iclear_type - - If the ID type's close callback could close another ID of the same type, - H5Iclear_type could occasionally run into problems due to the inner - workings of the skip list package. This could potentially cause an - error on library shutdown without calling H5Iclear_type directly. This - issue has been fixed. - - (NAF - 2015-08-12) - - - Fix uninitialized memory in dataspace selection code - - When creating a dataspace with H5Screate and setting the extent with - H5Sextent_copy, the selection offset was not initialized, potentially - causing invalid I/O. There may be other cases where this happened. - Modified the library to always initialize the offset. - - (NAF - 2015-09-08) - - - Truncate file in H5Fflush() if EOA != EOF to avoid file - corruption in certain scenarios - - In the following scenario, the resulting HDF5 file would be - incorrectly corrupted because the truncate operation - was at some point wrongly moved out of the flush operation: - - Create a new file with a single dataset. - - Write parts of the dataset (make sure that some values at - the end of the dataset are not initialized). - - Flush the file. - - Crash the program. - - Try to open the file with h5dump or h5debug, but the - resulting file is corrupted. - - (MSC - 2015-06-15 HDFFV-9418) - - - Parallel Library - ---------------- - - - (XYZ - YYYY/MM/DD HDFFV-####) - - Performance - ------------- - - None - - Tools - ----- - - VS2015 changed the default format for printing of exponents - - VS2015 default format for exponents changed with the elimination - of the leading '0'. CMake now tests for the VS2015 compiler and - adjusts which reference files are used by tests. - - (ADB - 2015-10-23 HDFFV-9550) - - - Fixed h5repack with user-defined filters - - h5repack would throw a buffer overrun exception on Windows when - parsing a user-defined filter ID of 5 digits. A local variable in - the parse routine was not of sufficient size. - - (ADB - 2015/09/01 HDFFV-9515) - - Fortran API - ------------ - - None - - - C++ API - ------ - - Removed memory leaks - - The static global constant objects were changed to constant references - referencing dynamically allocated objects. This ensures that the clean-up - process in the C++ library occurs before the termination of the C library - and prevents memory leaks because the previous global constants were not - properly deleted before the C library termination. - - (BMR, 2015/10/13, HDFFV-9529) - - - Fixed the problem about identifiers being closed prematurely. - - The C++ library needs to increment the ID's reference counter when it is - duplicated in the form of C++ objects, but not when the ID is obtained - from a C function. With this approach, both problems, prematurely - closing ID's and memory leaks due to ID's not being closed, should be - eliminated. - - (BMR, 2015/10/15, HDFFV-7947) - - - High-Level APIs: - ------ - - None - - - Fortran High-Level APIs: - ------------------------ - - None - - - Testing - ------- - - None - - -Supported Platforms -=================== -The following platforms are supported and have been tested for this release. -They are built with the configure process unless specified otherwise. - - AIX 6.1 xlc/xlc_r 10.1.0.5 - (NASA G-ADA) xlC/xlC_r 10.1.0.5 - xlf90/xlf90_r 12.1.0.6 - - Linux 2.6.32-573.3.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (platypus) Version 4.4.7 20120313 - Version 4.8.4, Version 5.2.0 - PGI C, Fortran, C++ for 64-bit target on - x86-64; - Version 15.7-0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 15.0.3.187 Build 20150407 - MPICH 3.1.4 compiled with GCC 4.9.3 - - Linux 2.6.32-504.8.1.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-11) - #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-11) - (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-11) - IBM XL C/C++ V13.1 - IBM XL Fortran V15.1 - - Linux 3.10.0-229.14.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (kituo/moohan) Version 4.8.3 20140911 (Red Hat 4.8.3-9) - Version 5.2.0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 15.0.3.187 Build 20150407 - MPICH 3.1.4 compiled with GCC 4.9.3 - - SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc - (emu) Sun Fortran 95 8.6 SunOS_sparc - Sun C++ 5.12 SunOS_sparc - - Windows 7 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - Visual Studio 2015 w/ Intel Fortran 16 (cmake) - Cygwin(CYGWIN_NT-6.1 2.2.1(0.289/5/3) gcc(4.9.3) compiler and gfortran) - (cmake and autotools) - - Windows 7 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - Visual Studio 2015 w/ Intel Fortran 16 (cmake) - - Windows 8.1 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - - Windows 8.1 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - - Mac OS X Mt. Lion 10.8.5 Apple clang/clang++ version 5.1 from Xcode 5.1 - 64-bit gfortran GNU Fortran (GCC) 4.8.2 - (swallow/kite) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X Mavericks 10.9.5 Apple clang/clang++ version 6.0 from Xcode 6.2.0 - 64-bit gfortran GNU Fortran (GCC) 4.9.2 - (wren/quail) Intel icc/icpc/ifort version 15.0.3 - - Mac OS X Yosemite 10.10.5 Apple clang/clang++ version 6.0 from Xcode 7.0.0 - 64-bit gfortran GNU Fortran (GCC) 4.9.2 - (osx1010dev/osx1010test) Intel icc/icpc/ifort version 15.0.3 - -Tested Configuration Features Summary -===================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90/ F90 C++ zlib SZIP - parallel F2003 parallel -SunOS 5.11 32-bit n y/y n y y y -SunOS 5.11 64-bit n y/y n y y y -Windows 7 y y/y n y y y -Windows 7 x64 y y/y n y y y -Windows 7 Cygwin n y/y n y y n -Windows 8.1 n y/y n y y y -Windows 8.1 x64 n y/y n y y y -Mac OS X Mountain Lion 10.8.5 64-bit n y/y n y y y -Mac OS X Mavericks 10.9.5 64-bit n y/y n y y y -Mac OS X Yosemeti 10.10.5 64-bit n y/y n y y y -AIX 6.1 32- and 64-bit n y/n n y y y -CentOS 6.7 Linux 2.6.32 x86_64 GNU y y/y y y y y -CentOS 6.7 Linux 2.6.32 x86_64 Intel n y/y n y y y -CentOS 6.7 Linux 2.6.32 x86_64 PGI n y/y n y y y -CentOS 7.1 Linux 3.10.0 x86_64 GNU y y/y y y y y -CentOS 7.1 Linux 3.10.0 x86_64 Intel n y/y n y y y -Linux 2.6.32-431.11.2.el6.ppc64 n y/n n y y y - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -SunOS 5.11 32-bit y y y y -SunOS 5.11 64-bit y y y y -Windows 7 y y y y -Windows 7 x64 y y y y -Windows 7 Cygwin n n n y -Windows 8.1 y y y y -Windows 8.1 x64 y y y y -Mac OS X Mountain Lion 10.8.5 64-bit y n y y -Mac OS X Mavericks 10.9.5 64-bit y n y y -Mac OS X Yosemeti 10.10.5 64-bit y n y y -AIX 6.1 32- and 64-bit y n n y -CentOS 6.7 Linux 2.6.32 x86_64 GNU y y y y -CentOS 6.7 Linux 2.6.32 x86_64 Intel y y y y -CentOS 6.7 Linux 2.6.32 x86_64 PGI y y y y -CentOS 7.1 Linux 3.10.0 x86_64 GNU y y y y -CentOS 7.1 Linux 3.10.0 x86_64 Intel y y y y -Linux 2.6.32-431.11.2.el6.ppc64 y y y y - -Compiler versions for each platform are listed in the preceding -"Supported Platforms" table. - - -More Tested Platforms -===================== -The following platforms are not supported but have been tested for this release. - - Linux 2.6.18-431.11.2.el6 g95 (GCC 4.0.3 (g95 0.94!) - #1 SMP x86_64 GNU/Linux - (platypus) - - Mac OS X El Capitan 10.11 Apple clang/clang++ version 7.0.0 from Xcode 7.0.1 - 64-bit gfortran GNU Fortran (GCC) 5.2.0 - (VM) - - Windows 7 Visual Studio 2008 (cmake) - - Windows 7 x64 Visual Studio 2008 (cmake) - - Windows 10 Visual Studio 2013 w/ Intel Fortran 15 (cmake) - - Windows 10 x64 Visual Studio 2013 w/ Intel Fortran 15 (cmake) - - Debian7.5.0 3.2.0-4-amd64 #1 SMP Debian 3.2.51-1 x86_64 GNU/Linux - gcc (Debian 4.7.2-5) 4.7.2 - GNU Fortran (Debian 4.7.2-5) 4.7.2 - (cmake and autotools) - - Fedora20 3.15.3-200.fc20.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1) - GNU Fortran (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1) - (cmake and autotools) - - SUSE 13.1 3.11.10-17-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux - gcc (SUSE Linux) 4.8.1 - GNU Fortran (SUSE Linux) 4.8.1 - (cmake and autotools) - - Ubuntu 14.04 3.13.0-35-generic #62-Ubuntu SMP x86_64 GNU/Linux - gcc (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1 - GNU Fortran (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1 - (cmake and autotools) - - hopper.nersc.gov PrgEnv-gnu/5.2.40 - gcc (GCC) 4.9.2 20141030 (Cray Inc.) - GNU Fortran (GCC) 4.9.2 20141030 (Cray Inc.) - g++ (GCC) 4.9.2 20141030 (Cray Inc.) - - -Known Problems -============== -* On Windows platforms in debug configurations, the VFD flush1 tests will fail - with the split and multi VFD drivers. These tests will display a modal debug - dialog which must be answered or wait for the test timeout to expire. - (ADB - 2014/06/23 - HDFFV-8851) - -* CLANG compiler with the options -fcatch-undefined-behavior and -ftrapv - catches some undefined behavior in the alignment algorithm of the macro DETECT_I - in H5detect.c (Issue 8147). Since the algorithm is trying to detect the alignment - of integers, ideally the flag -fcatch-undefined-behavior shouldn't to be used for - H5detect.c. In the future, we can separate flags for H5detect.c from the rest of - the library. (SLU - 2013/10/16) - -* Make provided by Solaris fails in "make check". Solaris users should use - gmake to build and install the HDF5 software. (AKC - 2013/10/08 - HDFFV-8534) - -* The C++ and FORTRAN bindings are not currently working on FreeBSD with the - native release 8.2 compilers (4.2.1), but are working with gcc 4.6 from the - ports (and probably gcc releases after that). - (QAK - 2012/10/19) - -* The following h5dump test case fails in BG/P machines (and potentially other - machines that use a command script to launch executables): - - h5dump --no-compact-subset -d "AHFINDERDIRECT::ah_centroid_t[0] it=0 tl=0" - tno-subset.h5 - - This is due to the embedded spaces in the dataset name being interpreted - by the command script launcher as meta-characters, thus passing three - arguments to h5dump's -d flag. The command passes if run by hand, just - not via the test script. - (AKC - 2012/05/03) - -* The STDIO VFD does not work on some architectures, possibly due to 32/64 - bit or large file issues. The basic STDIO VFD test is known to fail on - 64-bit SunOS 5.10 on SPARC when built with -m64 and 32-bit OS X/Darwin - 10.7.0. The STDIO VFD test has been disabled while we investigate and - a fix should appear in a future release. - (DER - 2011/10/14 - HDFFV-8235) - -* h5diff can report inconsistent results when comparing datasets of enum type - that contain invalid values. This is due to how enum types are handled in - the library and will be addressed in a future release. - (DER - 2011/10/14 - HDFFV-7527) - -* The links test can fail under the stdio VFD due to some issues with external - links. This will be investigated and fixed in a future release. - (DER - 2011/10/14 - HDFFV-7768) - -* After the shared library support was fixed for some bugs, it was discovered - that "make prefix=XXX install" no longer works for shared libraries. It - still works correctly for static libraries. Therefore, if you want to - install the HDF5 shared libraries in a location such as /usr/local/hdf5, - you need to specify the location via the --prefix option during configure - time. E.g, ./configure --prefix=/usr/local/hdf5 ... - (AKC - 2011/05/07 - HDFFV-7583) - -* The parallel test, t_shapesame, in testpar/, may run for a long time and may - be terminated by the alarm signal. If that happens, one can increase the - alarm seconds (default is 1200 seconds = 20 minutes) by setting the - environment variable, $HDF5_ALARM_SECONDS, to a larger value such as 3600 - (60 minutes). Note that the t_shapesame test may fail in some systems - (see the "While working on the 1.8.6 release..." problem below). If - it does, it will waste more time if $HDF5_ALARM_SECONDS is set - to a larger value. - (AKC - 2011/05/07) - -* Shared Fortran libraries are not quite working on AIX. While they are - generated when --enable-shared is specified, the Fortran and HL/Fortran - tests fail. HL and C++ shared libraries should now be working as intended, - however. - (MAM - 2011/04/20) - -* While working on the 1.8.6 release of HDF5, a bug was discovered that can - occur when reading from a dataset in parallel shortly after it has been - written to collectively. The issue was exposed by a new test in the parallel - HDF5 test suite, but had existed before that. We believe the problem lies with - certain MPI implementations and/or file systems. - - We have provided a pure MPI test program, as well as a standalone HDF5 - program, that can be used to determine if this is an issue on your system. - They should be run across multiple nodes with a varying number of processes. - These programs can be found at: - http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/ - (NAF - 2011/01/19) - -* All of the VFL drivers aren't backward compatible. In H5FDpublic.h, the - structure H5FD_class_t changed in 1.8. There is new parameter added to - get_eoa and set_eoa callback functions. A new callback function - get_type_map was added in. The public function H5FDrealloc was taken - out in 1.8. The problem only happens when users define their own driver - for 1.6 and try to plug in 1.8 library. Because there's only one user - complaining about it, we (Elena, Quincey, and I) decided to leave it as - it is (see bug report #1279). Quincey will make a plan for 1.10. - (SLU - 2010/02/02) - -* The --enable-static-exec configure flag will only statically link libraries - if the static version of that library is present. If only the shared version - of a library exists (i.e., most system libraries on Solaris, AIX, and Mac, - for example, only have shared versions), the flag should still result in a - successful compilation, but note that the installed executables will not be - fully static. Thus, the only guarantee on these systems is that the - executable is statically linked with just the HDF5 library. - (MAM - 2009/11/04) - -* A dataset created or rewritten with a v1.6.3 library or after cannot be read - with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled. - There was a bug in the calculation of the Fletcher32 checksum in the - library before v1.6.3; the checksum value was not consistent between big- - endian and little-endian systems. This bug was fixed in Release 1.6.3. - However, after fixing the bug, the checksum value was no longer the same as - before on little-endian systems. Library releases after 1.6.4 can still read - datasets created or rewritten with an HDF5 library of v1.6.2 or before. - (SLU - 2005/06/30) - - -%%%%1.8.15%%%% - - -HDF5 version 1.8.15 released on 2015-05-04 -================================================================================ - -INTRODUCTION -============ - -This document describes the differences between HDF5-1.8.14 and -HDF5-1.8.15, and contains information on the platforms tested and -known problems in HDF5-1.8.15. - -Links to the HDF5 source code, documentation, and additional materials -can be found on the HDF5 web page at: - - http://www.hdfgroup.org/products/hdf5/ - -The HDF5 release can be obtained from: - - http://www.hdfgroup.org/HDF5/release/obtain5.html - -User documentation for HDF5 can be accessed directly at this location: - - http://www.hdfgroup.org/HDF5/doc/ - -All new and modified APIs are listed in detail in the "HDF5 Software Changes -from Release to Release" document at this location: - - http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS -======== - -- New Features -- Support for New Platforms, Languages, and Compilers -- Bug Fixes since HDF5-1.8.14 -- Supported Platforms -- Supported Configuration Features Summary -- More Tested Platforms -- Known Problems - - -New Features -============ - - Configuration - ------------- - - CMake - - Improvements made to the CMake build system. - - The default options were changed to align with the Autotools configure - defaults. CMake configure files now support components when packaged - with CPack. Windows CPack supports WiX packaging, and will look for - WiX and NSIS in the standard locations. - - The CMake minimum has been changed to 3.1. - - (ADB - 2015/04/01 HDFFV-8074, 8968, 9006) - - - cmakehdf5 for Cmake building. - Added configure options to support the building of Fortran or CXX API, - to enable/disable testings. Use "cmakehdf5 --help" for details. - (AKC - 2014/12/09 HDFFV-8932) - - - Building Shared and Parallel Made Explicit - - When --enable-parallel is specified, configure used to disable - shared by default. - - Removed the restriction for building shared when parallel is - enabled. --disable-shared has to be used explicitly if user - wishes to disable shared libraries. - - (MSC - 2015/02/19 HDFFV-9069) - - - Inferring Parallel Compilers - - configure was trying to infer if a compiler is a parallel - compiler with MPI support and enable parallel even if the user - did not explicitly enable parallel. This should not happen. - - Disabled inferring parallel compilers to enable parallel HDF5 - build. --enable-parallel has to be used explicitly to build - parallel HDF5 regardless of the compiler type being used. - - (MSC - 2015/02/19 HDFFV-9068) - - - Large File Support Configuration Option - - Removed the option to enable or disable large file support. It will - always be enabled. - - (MSC - 2015/02/19 HDFFV-9097) - - - Removed Configuration Feature - - When configure detected that the CodeWarrior compiler was being used it - would define a symbol that caused a test in test/tfile.c to be skipped - due to a broken CodeWarrior open() command. - - Since this only masks the problem instead of fixing it and we don't - support CodeWarrior anyway, this functionality was removed. - - (DER - 2015/02/21, HDFFV-9080) - - - VMS Build/Test Files Have Been Removed - - HDF5 no longer supports VMS, and the files were getting out of date. - Since we have no access to a VMS machine, there is no way for us to - maintain them. - - A Subversion tag was created at: - - https://svn.hdfgroup.uiuc.edu/tags/vms_last_support_1_8 - - immediately before removing the files. - - (DER - 2015-02-26, HDFFV-9147) - - - Removal of --with-default-vfd configure Option - - In theory, this option was intended to allow setting a default - VFD that would be used by the library. In practice, the feature - only accepted the POSIX (SEC2) VFD (already the default) and - the stdio VFD (a demo VFD not intended for production use). The - inability to pass key VFD parameters at configure time limits the - full implementation of this feature, so it was retired. - - (DER - 2015-02-26, HDFFV-9081) - - - Direct VFD configure Behavior - - The configure options for Linux now allow the Direct VFD to build - without passing additional compiler options/defines like _GNU_SOURCE. - Passing --enable-direct-vfd is now all that is needed to enable - the feature. - - The Direct VFD is now disabled by default since it is intended for - specialized audiences. It was previously enabled by default, but the - configure script did not set correct POSIX levels, etc. making this - a moot point. - - Note that the Direct VFD can only be configured on Linux when - the O_DIRECT flag to open()/create() and posix_memalign() function - are available. This is unchanged from previous behavior. - - (DER - 2015-02-26, HDFFV-9057, 7567, 9088, 7566) - - - _POSIX_C_SOURCE, _GNU_SOURCE, and _BSD_SOURCE No Longer Exported - to h5cc and Other Compiler Wrappers - - The _POSIX_C_SOURCE, _GNU_SOURCE, and _BSD_SOURCE definitions are - not required for using API functions and may conflict with user - code requirements. - - (DER - 2015-03-08, HDFFV-9152) - - - Removed the --enable-filters Option from configure - - This option allowed the user to disable selected internal filters, - presumably to make the library smaller. It has been removed since - it saved little space (the internal filters are small with respect - to the overall library size) and was not generally extendible to - the library at large due to the large number of #ifdefs that would - be required. - - Note that this features applied to internal filters such as shuffle - and n-bit and not external filters like gzip or Szip. Those are still - enabled or disabled via their own configure options. - - (DER - 2015-03-08, HDFFV-9086) - - - Removed Obsolete Time Functionality from configure and the C Library - - The library contained some residual functionality from obsolete - time zone handling code. This has been removed, and the configure - checks for the time functions have been cleaned up. - - * Lumped all the time functionality together in configure.ac. - This was previously more spread out due to Solaris issues - with the ordering of certain checks. - - * Removed processing that handles __tm_gmtoff members of struct - tm. (libc-4) - - * Removed BSDgettimeofday(). (IRIX 5.3) - - * Removed timezone struct handling in gettimeofday() (considered - harmful). - - Note that the HDF5 Library stores timestamps in a platform-independent - manner, so old files can still be read. This only affects converting - system time to HDF5 timestamps. - - The library currently uses the tm_gmtoff member of the tm struct - (preferred, if available) or the timezone global variable to - construct HDF5 timestamps. - - (DER - 2015-03-09, HDFFV-9083 and 9085) - - - Added -D_DEFAULT_SOURCE to CPPFLAGS on Linux Systems - - This is the replacement for -D_BSD_SOURCE in versions of glibc since 2.19. - Since both are defined, it should work for all versions of glibc. Defining - both suppresses the warning about defining _BSD_SOURCE. - - (NAF - 2015-04-02, HDFFV-9079) - - Library - ------- - - Added Memory Allocation Functions that Use the Library's Allocator - - HDF5 filters may need to allocate or resize the buffer that is passed - to them from the library. If the filter has been compiled separately - from the library, it and the library may use different memory - allocation libraries for the (re)allocation and free calls. This can - cause heap corruption and crashes. This is particularly a problem on - Windows since each C run-time library is implemented as a separate - shared library, but can also show up on POSIX systems when debug or - high-performance allocation libraries are in use. - - Two new functions (H5allocate_memory() and H5resize_memory()) were - added to the HDF5 C library. These functions have the same semantics as - malloc/calloc and realloc, respectively. Their primary purpose is to - allow filter authors to allocate or resize memory using the same - memory allocation library as the HDF5 library. Filter authors are - highly encouraged to use these new functions in place of malloc, - calloc, and realloc. They should also use the H5free_memory() call when - freeing memory. - - Note that the filters provided with the library (zlib, szip, etc.) do - not experience the problems that these new functions are intended to - fix. This work only applies to third-party filters that are compiled - separately from the library. - - (DER - 2015-04-01, HDFFV-9100) - - - H5Pset_istore_k and H5Pset_sym_k - - These two functions didn't check the value of the input parameter "ik". - When 2*ik exceeded 2 bytes of storage, data was lost in the file; - for example, some chunks would be overwritten. - - Added validation of "ik" to not exceed the max v1 btree entries (2 bytes) - to these two routines. - - (VC - 2015-03-24, HDFFV-9173) - - - Added Functions to Control the Value of H5PL_no_plugin_g without - Using an Environment Variable - - Sometimes it is necessary for an application to disable the use of - dynamically loaded plugin libraries without requiring the library to - be built with plugin support disabled or to set an environment - variable to disable plugin support globally. - - Two new functions (H5PLset_loading_state() and H5PLget_loading_state()) - were added to the HDF5 C Library. These functions require a parameter - that indicates which type of dynamically loaded plugin is enabled or - disabled. - - (ADB - 2015-03-17, HDFFV-8520) - - Parallel Library - ---------------- - - MPI_Finalize and HDF5 Library Shutdown - - Calling HDF5 routines after MPI_Finalize has been closed should - not be done, since those routines might call MPI functions that - would not be possible to do after finalizing the MPI library. - - Attached an attribute destroy callback to MPI_COMM_SELF that - shuts down the HDF5 library when MPI_COMM_SELF is destroyed, - in other words, on MPI_Finalize. This should fix several issues - that users see when they forget to close HDF5 objects before - calling MPI_Finalize(). - - (MSC - 2015/02/25, HDFFV-883) - - Tools - ----- - - None - - High-Level APIs - --------------- - - None - - Fortran API - ----------- - - Added Global Variables - - These new global variables are equivalent to the C definitions - without the '_F': - - H5G_UDLINK_F - H5G_SAME_LOC_F - H5O_TYPE_UNKNOWN_F - H5O_TYPE_GROUP_F - H5O_TYPE_DATASET_F - H5O_NAMED_DATATYPE_F - H5O_TYPE_NTYPES_F - - (MSB - 2015/02/03, HDFFV-9040) - - - C++ API - ------- - - New Wrappers for C Functions H5P[s/g]et_libver_bounds - - Wrappers were added to class H5::FileAccPropList for the - C Functions H5Pget_libver_bounds and H5Pset_libver_bounds. - - (BMR, 2015/04/06, Part of HDFFV-9167) - - - New Wrappers to Get the Object Header's Version - - The following wrappers are added to class H5::CommonFG - Returns the object header version of an object in a file or group, - given the object's name. - - unsigned childObjVersion(const char* objname) const; - unsigned childObjVersion(const H5std_string& objname) const; - - (BMR, 2015/04/06) - - - New DataType Constructor - - Added a DataType constructor that takes a PredType object, and this - constructor will invoke H5Tcopy to generate another datatype id - from a predefined datatype. - - (BMR, 2015/04/06) - - -Support for New Platforms, Languages, and Compilers -=================================================== - - Support for Linux 3.10.0-123.20.1.el7 added (LK - 2015/04/01) - - Support for Mac OS X Yosemite 10.10 added (AKC - 2015/03/04, HDFFV-9007) - - Support for AIX 6.1 added and AIX 5.3 is retired. (AKC - 2015/01/09) - -Bug Fixes since HDF5-1.8.14 -=========================== - - Configuration - ------------- - - Make uninstall generated "test: argument expected". - The error is due to $EXAMPLETOPDIR is used without setting a value first. - - Fixed by assign it with the proper value. - - (AKC - 2015/04/29, HDFFV-9298) - - - Windows Installer Incorrect Display of PATH Environment Variable - - In the Windows installer, the dialog box where the user can elect to - add the product's bin path to the %PATH% environment variable displayed - an incorrect path. This path was missing the C:\Program Files part - and used the POSIX file separator '/' before the bin (/bin, - instead of \bin). - - The dialog box text was changed to simply say that the product's bin - path would be added instead of explicitly displaying the path. - This is in line with most installers. The reason for not fixing the - displayed path instead is that it is difficult to pass the correct - path from CPack to the NSIS installer for display. - - Note that this was never a code issue - it was just a display - problem. The installer always did the right thing when updating the - environment variable. - - (DER - 2014/11/14, HDFFV-9016) - - Library - ------- - - Incorrect Usage of List in CMake COMPILE_DEFINITIONS set_property - - The CMake command set_property with COMPILE_DEFINITIONS property - needs a quoted semi-colon separated list of values. CMake will - transform the list to a series of -D{value} for the compile. - - (ADB - 2014/12/09, HDFV-9041) - - - Fixed Compile Errors on Windows w/ Visual Studio and CMake When - UNICODE is Defined - - The HDF5 Library could not be built on Windows with Visual Studio when - UNICODE was defined. This was due to the incorrect use of the TEXT() - macro and some Win32 API functions that take TCHAR parameters. The faulty - code was a part of the filter plugin functionality. This was a - compile-time error that only affected users who build HDF5 from source - and define UNICODE, usually when HDF5 is being built as a part of a - larger product. There were no run-time effects. - - These errors caused no problems when UNICODE was not defined. HDF5 is - normally not built with UNICODE defined and the binaries were - unaffected. - - The fix was to remove the TEXT() macro and explicitly use the - 'A' form of the Win32 API calls, which expect char strings instead of - wchar_t strings. - - Note that HDF5 currently does not support Unicode file paths on Windows. - - (DER - 2015/02/22, HDFFV-8927) - - - Addition of Error Tracing Functionality to Several C API Calls - - A bug in a text processing script caused API calls that return a - pointer to not receive error tracing macros/functionality. - - The bug has been corrected and error tracing functionality has been - added to the affected API calls. These functions will now correctly - print trace information when library errors are encountered. - - (DER - 2015/02/26, HDFFV-9141) - - - H5Rdereference Now Checks for HADDR_UNDEF or Uninitialized References - - When passed HADDR_UNDEF or uninitialized references, the previous - behavior of H5Rdereference was to continue to process the reference - as a valid address. - - H5Rdereference was changed to return immediately (with an error - message) if the references are HADDR_UNDEF or uninitialized. - - (MSB - 2015/3/10, HDFFV-7959) - - - Fixed Bugs in H5Sextent_copy - - H5Sextent_copy would not free the previous extent, resulting in a memory - leak. Also, H5Sextent_copy would not update the number of elements - selected if the selection was "all", causing various problems. These - issues have been fixed. - - (NAF - 2015/04/02) - - - Parallel Library - ---------------- - - Fixed a Potential Memory Error - - Fixed a potential memory error when performing parallel I/O on a - dataset with a single chunk, and at least one process has nothing - to do. - - (NAF - 2015/02/16) - - - Parallel Test Problem Fixed - - Fixed problem with parallel tests where they failed beyond a - certain number of ranks. All tests should work for any arbitrary - number of ranks. - - (MSC - 2014/11/06, HDFFV-1027,8962,8963) - - - MPE Support - - Enabling MPE was causing HDF5 build to fail. Support for it was - dropped at some point in time. - - Fixed problem with enabling MPE. Users should use the community - maintained MPE on github (http://git.mpich.org/mpe.git/). - - (MSC - 2015/02/20, HDFFV-9135) - - Performance - ------------- - - None - - Tools - ----- - - h5repack crashed on enumerated 8-bit type. - - Previous version 1.8.14 introduced an error that caused the reading - of enumerated 8-bit type nested in compound type to fail. - - Fixed library code responsible for reading the particular type. - (AKC - 2015.03/31, HDFFV-8667) - - - h52gif crashed non-8bit images. - - h52gif crashed if instructed to convert images other than 8bit images. - - h52gif could handle only 8bit images. Added code to detect non-8bit - images and flag them as failure. Update tool document page to reflect - the limit. - (AKC - 2015/03/31, HDFFV-8957) - - - perform/benchpar.c retired. - - benchpar.c has not been built for a long time and its original purpose - is not needed any more. - (AKC - 2014/12/19, HDFFV-8156) - - - Source perform/ directory moved to tools/perform. - The perform directory is moved to tools/perform for easier maintenance. - (AKC - 2014/12/17, HDFFV-9046) - - Fortran API - ------------ - - Fortran Fails with --enable-fortran2003 and Intel 15.x Compilers - - Added BIND(C) to the offending APIs. - - The Fortran Library (--enable-fortran2003) now works using Intel 15.x - without the need for any additional compilers flags. - - (MSB - 2015/1/26, HDFFV-9049) - - - h5tenum_insert_f Does Not Work with Default 8 Byte Integers - (xlf compiler) - - In the Fortran 90 API, 'value' is no longer cast into the C int type. - Therefore, if h5tenum_insert_f is passed an 8 byte integer (via -i8) - then 'value' is written as the same type as the default Fortran - integer type (which can be 8 bytes). - - A new Fortran 2003 API was added which is more in line with the C - API and users are strongly encouraged to use the Fortran 2003 API - instead of the Fortran 90 API. - - SUBROUTINE h5tenum_insert_f(type_id, name, value, hdferr) - INTEGER(HID_T) , INTENT(IN) :: type_id - CHARACTER(LEN=*), INTENT(IN) :: name - TYPE(C_PTR) , INTENT(IN) :: value - INTEGER, INTENT(OUT) :: hdferr - - (MSB - 2015/2/19, HDFFV-8908) - - - Some Fortran APIs Never Returned the Error State - - Some Fortran APIs never returned the error state: they - would always return a positive number. The APIs include - the following: - - h5fget_file_image_f - h5lget_name_by_idx_f - h5oget_comment_by_name_f - - They were corrected to return a negative number as described in - the Reference Manual if an error occurred. - - (MSB - 2015/3/19, HDF5-239) - - - Fixed h5pget_class_f - - h5pget_class_f never correlated the class identifier to the property - list class name as indicated in the HDF5 Reference Manual; it instead - returned a property list class identifier as an INTEGER. The INTEGER - needed to be of type INTEGER(HID_T) to be correct. - - The h5pget_class_f API was changed to return an INTEGER(HID_T) - property list class identifier instead of an INTEGER. This mimics the - intended behavior of the C API. - - (MSB - 2015/3/16, HDFFV5-9162) - - C++ API - ------ - - Combined Two H5File::getObjCount Overloaded Methods - - The following two methods - - ssize_t getObjCount(unsigned types) const; - ssize_t getObjCount() const; - - were combined into one: - - ssize_t getObjCount(unsigned types = H5F_OBJ_ALL) const; - - (BMR - 2015/04/06) - - - Many Warnings Were Removed - - Many warnings such as conversion, unused variables, missing base - class initialization, and initializing base classes in wrong order - were removed. - - (BMR, 2015/04/06) - - - Functionality Deprecation - - The following two constructors of classes AbstractDs, IdComponent, - H5Location, and H5Object are no longer appropriate after the data member - "id" had been moved from IdComponent to the sub-classes in previous - releases. - - (const hid_t h5_id); - (const & original); - - The copy constructors were no-op and removed in 1.8.15. The other - constructors will be removed from 1.10 release, and then from 1.8.17 - if their removal does not cause any problems. - - (BMR, 2015-04-06) - - - High-Level APIs: - ------ - - Suppress Warnings from Flex/Bison-generated Code - - Warning suppression #pragmas, etc. have been added to H5LTparse.c and - H5LTanalyze.c. We have no control over this code since it's created by - a generator. - - (DER - 2015/03/08 - HDFFV-9149) - - - Changed hdf5_hl.h to Include the HDF5 Main Library "hdf5.h" - - User's no longer need to include both hdf5_hl.h and hdf5.h - - (MSB - 2015/2/14, HDFFV-8685) - - - - H5PTcreate_fl Does Not Convert to Memory Datatype - - H5PTcreate_fl now converts to the table's native memory datatype - to fix the problem of handling BE and LE packet tables. - - (MSB - 2015/2/26 - HDFFV-9042) - - - Fix for H5LT Attribute Functions - - H5LT attribute functions fail to create attributes whose name - is a substring of an existing attribute. - - H5LT attribute functions can now create attributes whose name - is a substring of an existing attribute. - - (MSB - 2015/2/24, HDFFV-9132) - - - Fortran High-Level APIs: - ------------------------ - - - Internal Library Fix for Missing Argument Declaration - - In Interface block for h5tbmake_table_c, "max_char_size_field_names" - is listed as an input, but in the argument definitions it is - "INTEGER :: max_char_size". This caused no known problems with the - Fortran HL API. - - Fixed missing argument definition. - - (MSB - 2015/2/18, HDFFV-8559) - - - Testing - ------- - - None - - -Supported Platforms -=================== -The following platforms are supported and have been tested for this release. -They are built with the configure process unless specified otherwise. - - AIX 6.1 xlc/xlc_r 10.1.0.5 - (NASA G-ADA) xlC/xlC_r 10.1.0.5 - xlf90/xlf90_r 12.1.0.6 - - Linux 2.6.18-308.13.1.el5PAE GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP i686 i686 i386 compilers for 32-bit applications; - (jam) Version 4.1.2 20080704 (Red Hat 4.1.2-55) - Version 4.8.4, 4.9.2 - PGI C, Fortran, C++ Compilers for 32-bit - applications; - Version 14.10-0 - Intel(R) C, C++, Fortran Compiler for 32-bit - applications; - Version 15.0.1.133 (Build 20141023) - - Linux 2.6.18-371.6.1.el5 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers for 64-bit applications; - (koala) Version 4.1.2 20080704 (Red Hat 4.1.2-55) - Version 4.8.4, 4.9.2 - Intel(R) C, C++, Fortran Compilers for - applications running on Intel(R) 64; - Version 15.0.1.133 Build 20141023 - - Linux 2.6.32-431.11.2.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (platypus) Version 4.4.7 20120313 - Version 4.8.2, Version 4.9.2 - PGI C, Fortran, C++ for 64-bit target on - x86-64; - Version 14.10-0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 15.0.1.133 Build 20141023 - - Linux 3.10.0-123.20.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (moohan) Version 4.8.2 20140120 (Red Hat 4.8.2-16) - Intel(R) C Intel(R) 64 Compiler XE for - applications running on Intel(R) 64, - Version 15.0.1.133 Build 20141023 - - Linux 2.6.32-431.29.2.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-4) - #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-4) - (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-4) - IBM XL C/C++ V13.1 - IBM XL Fortran V15.1 - - SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc - (emu) Sun Fortran 95 8.6 SunOS_sparc - Sun C++ 5.12 SunOS_sparc - - Windows 7 Visual Studio 2008 (cmake) - Visual Studio 2010 w/ Intel Fortran 14 (cmake) - Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - Cygwin(CYGWIN_NT-6.1 1.7.34(0.285/5/3) gcc(4.9.2) compiler and gfortran) - (cmake and autotools) - - Windows 7 x64 Visual Studio 2008 (cmake) - Visual Studio 2010 w/ Intel Fortran 14 (cmake) - Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - - Windows 8.1 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - - Windows 8.1 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake) - Visual Studio 2013 w/ Intel Fortran 15 (cmake) - - Mac OS X Mt. Lion 10.8.5 Apple clang/clang++ version 5.1 from Xcode 5.1 - 64-bit gfortran GNU Fortran (GCC) 4.8.2 - (swallow/kite) Intel icc/icpc/ifort version 14.0.2 - - Mac OS X Mavericks 10.9.5 Apple clang/clang++ version 6.0 from Xcode 6.1.1 - 64-bit gfortran GNU Fortran (GCC) 4.8.2 - (wren/quail) Intel icc/icpc/ifort version 14.0.2 - - Mac OS X Yosemite 10.10.2 Apple clang/clang++ version 6.0 from Xcode 6.1.1 - 64-bit gfortran GNU Fortran (GCC) 4.9.2 - (osx1010dev/osx1010test) Intel icc/icpc/ifort version 15.0.1 - - -Tested Configuration Features Summary -===================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90/ F90 C++ zlib SZIP - parallel F2003 parallel -SunOS 5.11 32-bit n y/y n y y y -SunOS 5.11 64-bit n y/y n y y y -Windows 7 y y/y n y y y -Windows 7 x64 y y/y n y y y -Windows 7 Cygwin n y/y n y y n -Windows 8.1 n y/y n y y y -Windows 8.1 x64 n y/y n y y y -Mac OS X Mountain Lion 10.8.5 64-bit n y/y n y y y -Mac OS X Mavericks 10.9.5 64-bit n y/y n y y y -Mac OS X Yosemeti 10.10.2 64-bit n y/y n y y y -AIX 6.1 32- and 64-bit n y/n n y y y -CentOS 5.9 Linux 2.6.18-308 i686 GNU y y/y y y y y -CentOS 5.9 Linux 2.6.18-308 i686 Intel n y/y n y y y -CentOS 5.9 Linux 2.6.18-308 i686 PGI n y/y n y y y -CentOS 5.9 Linux 2.6.18 x86_64 GNU n y/y n y y y -CentOS 5.9 Linux 2.6.18 x86_64 Intel n y/y n y y y -CentOS 6.4 Linux 2.6.32 x86_64 GNU y y/y y y y y -CentOS 6.4 Linux 2.6.32 x86_64 Intel n y/y n y y y -CentOS 6.4 Linux 2.6.32 x86_64 PGI n y/y n y y y -CentOS 7.0 Linux 3.10.0 x86_64 GNU y y/y y y y y -CentOS 7.0 Linux 3.10.0 x86_64 Intel n y/y n y y y -Linux 2.6.32-431.11.2.el6.ppc64 n y/n n y y y - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -SunOS 5.11 32-bit y y y y -SunOS 5.11 64-bit y y y y -Windows 7 y y y y -Windows 7 x64 y y y y -Windows 7 Cygwin n n n y -Windows 8.1 y y y y -Windows 8.1 x64 y y y y -Mac OS X Mountain Lion 10.8.5 64-bit y n y y -Mac OS X Mavericks 10.9.5 64-bit y n y y -Mac OS X Yosemeti 10.10.2 64-bit y n y y -AIX 6.1 32- and 64-bit y n n y -CentOS 5.9 Linux 2.6.18-308 i686 GNU y y y y -CentOS 5.9 Linux 2.6.18-308 i686 Intel y y y n -CentOS 5.9 Linux 2.6.18-308 i686 PGI y y y n -CentOS 5.9 Linux 2.6.18 x86_64 GNU y y y y -CentOS 5.9 Linux 2.6.18 x86_64 Intel y y y n -CentOS 6.4 Linux 2.6.32 x86_64 GNU y y y n -CentOS 6.4 Linux 2.6.32 x86_64 Intel y y y n -CentOS 6.4 Linux 2.6.32 x86_64 PGI y y y n -CentOS 7.0 Linux 3.10.0 x86_64 GNU y y y n -CentOS 7.0 Linux 3.10.0 x86_64 Intel y y y n -Linux 2.6.32-431.11.2.el6.ppc64 y y y n - -Compiler versions for each platform are listed in the preceding -"Supported Platforms" table. - - -More Tested Platforms -===================== -The following platforms are not supported but have been tested for this release. - - Linux 2.6.18-308.13.1.el5PAE MPICH mpich 3.1.3 compiled with - #1 SMP i686 i686 i386 gcc 4.9.2 and gfortran 4.9.2 - (jam) g95 (GCC 4.0.3 (g95 0.94!) - - Linux 2.6.18-431.11.2.el6 MPICH mpich 3.1.3 compiled with - #1 SMP x86_64 GNU/Linux gcc 4.9.2 and gfortran 4.9.2 - (platypus) g95 (GCC 4.0.3 (g95 0.94!) - - FreeBSD 8.2-STABLE i386 gcc 4.5.4 [FreeBSD] 20110526 - (loyalty) gcc 4.6.1 20110527 - g++ 4.6.1 20110527 - gfortran 4.6.1 20110527 - - FreeBSD 8.2-STABLE amd64 gcc 4.5.4 [FreeBSD] 20110526 - (freedom) gcc 4.6.1 20110527 - g++ 4.6.1 20110527 - gfortran 4.6.1 20110527 - - Debian7.5.0 3.2.0-4-amd64 #1 SMP Debian 3.2.51-1 x86_64 GNU/Linux - gcc (Debian 4.7.2-5) 4.7.2 - GNU Fortran (Debian 4.7.2-5) 4.7.2 - (cmake and autotools) - - Fedora20 3.15.3-200.fc20.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1) - GNU Fortran (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1) - (cmake and autotools) - - SUSE 13.1 3.11.10-17-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux - gcc (SUSE Linux) 4.8.1 - GNU Fortran (SUSE Linux) 4.8.1 - (cmake and autotools) - - Ubuntu 14.04 3.13.0-35-generic #62-Ubuntu SMP x86_64 GNU/Linux - gcc (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1 - GNU Fortran (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1 - (cmake and autotools) - - hopper.nersc.gov PrgEnv-gnu/5.2.40 - gcc (GCC) 4.9.2 20141030 (Cray Inc.) - GNU Fortran (GCC) 4.9.2 20141030 (Cray Inc.) - g++ (GCC) 4.9.2 20141030 (Cray Inc.) - - -Known Problems -============== -* On Windows platforms in debug configurations, the VFD flush1 tests will fail - with the split and multi VFD drivers. These tests will display a modal debug - dialog which must be answered or wait for the test timeout to expire. - (ADB - 2014/06/23 - HDFFV-8851) - -* CLANG compiler with the options -fcatch-undefined-behavior and -ftrapv - catches some undefined behavior in the alignment algorithm of the macro DETECT_I - in H5detect.c (Issue 8147). Since the algorithm is trying to detect the alignment - of integers, ideally the flag -fcatch-undefined-behavior shouldn't to be used for - H5detect.c. In the future, we can separate flags for H5detect.c from the rest of - the library. (SLU - 2013/10/16) - -* Make provided by Solaris fails in "make check". Solaris users should use - gmake to build and install the HDF5 software. (AKC - 2013/10/08 - HDFFV-8534) - -* The C++ and FORTRAN bindings are not currently working on FreeBSD with the - native release 8.2 compilers (4.2.1), but are working with gcc 4.6 from the - ports (and probably gcc releases after that). - (QAK - 2012/10/19) - -* The following h5dump test case fails in BG/P machines (and potentially other - machines that use a command script to launch executables): - - h5dump --no-compact-subset -d "AHFINDERDIRECT::ah_centroid_t[0] it=0 tl=0" - tno-subset.h5 - - This is due to the embedded spaces in the dataset name being interpreted - by the command script launcher as meta-characters, thus passing three - arguments to h5dump's -d flag. The command passes if run by hand, just - not via the test script. - (AKC - 2012/05/03) - -* The STDIO VFD does not work on some architectures, possibly due to 32/64 - bit or large file issues. The basic STDIO VFD test is known to fail on - 64-bit SunOS 5.10 on SPARC when built with -m64 and 32-bit OS X/Darwin - 10.7.0. The STDIO VFD test has been disabled while we investigate and - a fix should appear in a future release. - (DER - 2011/10/14 - HDFFV-8235) - -* h5diff can report inconsistent results when comparing datasets of enum type - that contain invalid values. This is due to how enum types are handled in - the library and will be addressed in a future release. - (DER - 2011/10/14 - HDFFV-7527) - -* The links test can fail under the stdio VFD due to some issues with external - links. This will be investigated and fixed in a future release. - (DER - 2011/10/14 - HDFFV-7768) - -* After the shared library support was fixed for some bugs, it was discovered - that "make prefix=XXX install" no longer works for shared libraries. It - still works correctly for static libraries. Therefore, if you want to - install the HDF5 shared libraries in a location such as /usr/local/hdf5, - you need to specify the location via the --prefix option during configure - time. E.g, ./configure --prefix=/usr/local/hdf5 ... - (AKC - 2011/05/07 - HDFFV-7583) - -* The parallel test, t_shapesame, in testpar/, may run for a long time and may - be terminated by the alarm signal. If that happens, one can increase the - alarm seconds (default is 1200 seconds = 20 minutes) by setting the - environment variable, $HDF5_ALARM_SECONDS, to a larger value such as 3600 - (60 minutes). Note that the t_shapesame test may fail in some systems - (see the "While working on the 1.8.6 release..." problem below). If - it does, it will waste more time if $HDF5_ALARM_SECONDS is set - to a larger value. - (AKC - 2011/05/07) - -* Shared Fortran libraries are not quite working on AIX. While they are - generated when --enable-shared is specified, the fortran and hl/fortran - tests fail. the issue. HL and C++ shared libraries should now be - working as intended, however. - (MAM - 2011/04/20) - -* While working on the 1.8.6 release of HDF5, a bug was discovered that can - occur when reading from a dataset in parallel shortly after it has been - written to collectively. The issue was exposed by a new test in the parallel - HDF5 test suite, but had existed before that. We believe the problem lies with - certain MPI implementations and/or file systems. - - We have provided a pure MPI test program, as well as a standalone HDF5 - program, that can be used to determine if this is an issue on your system. - They should be run across multiple nodes with a varying number of processes. - These programs can be found at: - http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/ - (NAF - 2011/01/19) - -* All the VFL drivers aren't backward compatible. In H5FDpublic.h, the - structure H5FD_class_t changed in 1.8. There is new parameter added to - get_eoa and set_eoa callback functions. A new callback function - get_type_map was added in. The public function H5FDrealloc was taken - out in 1.8. The problem only happens when users define their own driver - for 1.6 and try to plug in 1.8 library. Because there's only one user - complaining about it, we (Elena, Quincey, and I) decided to leave it as - it is (see bug report #1279). Quincey will make a plan for 1.10. - (SLU - 2010/02/02) - -* The --enable-static-exec configure flag will only statically link libraries - if the static version of that library is present. If only the shared version - of a library exists (i.e., most system libraries on Solaris, AIX, and Mac, - for example, only have shared versions), the flag should still result in a - successful compilation, but note that the installed executables will not be - fully static. Thus, the only guarantee on these systems is that the - executable is statically linked with just the HDF5 library. - (MAM - 2009/11/04) - -* A dataset created or rewritten with a v1.6.3 library or after cannot be read - with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled. - There was a bug in the calculation of the Fletcher32 checksum in the - library before v1.6.3; the checksum value was not consistent between big- - endian and little-endian systems. This bug was fixed in Release 1.6.3. - However, after fixing the bug, the checksum value was no longer the same as - before on little-endian system. Library releases after 1.6.4 can still read - datasets created or rewritten with an HDF5 library of v1.6.2 or before. - (SLU - 2005/06/30) - - -%%%%1.8.14%%%% - - -HDF5 version 1.8.14 released on 2014-11-12 -================================================================================ - -INTRODUCTION -============ - -This document describes the differences between HDF5-1.8.13 and -HDF5-1.8.14, and contains information on the platforms tested and -known problems in HDF5-1.8.14. - -All new and modified APIs are listed in the "HDF5 Software Changes -from Release to Release" document along with details about previous -releases at: - - http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html - -Links to the HDF5 1.8.14 source code, documentation, and additional materials -can be found on the HDF5 web page at: - - http://www.hdfgroup.org/products/hdf5/ - -The HDF5 1.8.14 release can be obtained from: - - http://www.hdfgroup.org/HDF5/release/obtain5.html - -User documentation for 1.8.14 can be accessed directly at this location: - - http://www.hdfgroup.org/HDF5/doc/ - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS -======== - -- New Features -- Support for New Platforms, Languages, and Compilers -- Bug Fixes since HDF5-1.8.13 -- Supported Platforms -- Supported Configuration Features Summary -- More Tested Platforms -- Known Problems - - -New Features -============ - - Configuration - ------------- - - bin/cmakehdf5 configures, builds and installs C, C++, Fortran and High - level API's. (It used to build the C API only). - (AKC 2014/10/17 HDFFV-8932). - - Library - ------- - - None - - Parallel Library - ---------------- - - Chunk Fill Writes Changed to Collective - - Slow performance in chunk fill writes. Chunk fills - in the past were written independently by rank 0 one block - at a time. - - Optimized the chunk fill write algorithm so that all - chunk fill values will be written collectively in a single MPI-IO - call. This should show a great performance improvement when - creating chunked datasets in parallel when the chunk dimensions - are fairly small. - - (MSC - 2014/08/22, HDFFV-8878) - - Tools - ----- - - None - - High-level APIs - --------------- - - None - - Fortran API - ----------- - - None - - C++ API - ------- - - Initialization of Object IDs - - The data member "id" in classes that represent HDF5 objects were - initialized to 0, which caused problem for some users. - - Replaced 0 with H5I_INVALID_HID to initialize these "id"s. For the - PropList class, H5P_DEFAULT is used instead of H5I_INVALID_HID. - - (BMR - 2014/09/30, HDFFV-4259) - - - -Support for New Platforms, Languages, and Compilers -=================================================== - - None - -Bug Fixes since HDF5-1.8.13 -=========================== - - Configuration - ------------- - - CMake and SVN URLs - - The SVN URLs will be different when the HDF Group domain name changes. - - Removed the SVN URL references in the cacheinit.cmake and release_docs files. - - (ADB - 2014/10/27, HDFFV-8953) - - - CMake Packaging - - A Fortran module was not generated if the compiler was not F2003 - compliant. - - Removed the module name from the package list of Fortran modules because - that module was never generated. This was only an issue for Fortran - compilers that are not F2003 compatible. - - (ADB - 2014/10/16, HDFFV-8932) - - - Shared Library Interface Version Number (soname) - - In order to increase the maintainability of HDF5, an architectural - change was made which required the renaming of several public symbols in - H5Ppublic.h. - - The shared library interface version number ("soname") has been increased - on account of these changes. For a full list of the changed symbols, see - the interface compatibility report, which is available as a link off of - the 'HDF5 Software Changes from Release to Release' document: - - http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html - - (AKC - 2014/10/03, HDFFV-8937) - - - Configure Settings for Mac OSX Need Defaults for PROD_XXX, DEBUG_XXX, - and PROFILE_XXX - - The configure setting files for Mac OSX (config/apple) did not - have the default settings of PROD_XXX, DEBUG_XXX, PROFILE_XXX. - - Added the default settings. Mac platforms now builds library with - "-O3" optimization when the default clang compiler is used. - - (AKC - 2014/10/01, HDFFV-8933) - - - CMake ConfigureChecks - - Two include files were missing from two C tests. - - Propagated the configure test changes to H5_LDOUBLE_TO_INTEGER_WORKS_TEST - and H5_ULLONG_TO_LDOUBLE_PRECISION_TEST to ConfigureChecks.cmake (added - stdlib.h and string.h in the HDFTests.c file). - - (ADB - 2014/09/02 HDFFV-8845) - - - CMake Parallel Test Missing - - The source file was removed in the previous release but the parallel - test t_posix_compliant was not. - - Removed the t_posix_compliant parallel test from the library. - - (ADB - 2014/8/14 HDFFV-8880) - - - Autotools Reconfigure. Bison. Flex. - - The Bison and Flex files were out of date. - - Bison was upgraded to 2.7, and Flex was upgraded to 2.5.37. The - bin/reconfigure script now will execute Bison and Flex and update - the hl/src files. - - (ADB - 2014/06/16 HDFFV-8709) - - - Autotools Reconfigure. m4. - - The m4 macro processor was out of date. - - Reconfigured Autotools with m4 upgraded to 1.4.17. - - (ADB - 2014/06/12 HDFFV-8743) - - - Autotools: Modified configure to add an entry at the beginning of AM_LDFLAGS - for the hdf5 install directory. Without this entry the relink commands - invoked by "make install" to create libraries dependent on libhdf5.so added - a dependency on the first libhdf5.so found in any directory in AM_LDFLAGS - regardless of its version. (LRK - 2014/10/17 HDFFV-8944) - - - Changed Autotools Build Behavior. Fortran High-level Library. - - The Fortran high-level (HL) library did not compile if the default - size of a REAL is DOUBLE PRECISION; the build would fail during - compilation. - - Configure now checks to see if REAL is DOUBLE PRECISION, Fortran is - enabled, and HL library is enabled. If this is true, then configure - will stop with an error message. - - (MSB - 2014/8/11, HDFFV-8883/HDFFV-889) - - - - Library - ------- - - Fixed Identifier Management Code - - Opening an object returns an identifier; closing the object should - free up the identifier. A problem was found where the identifiers - were not being freed up correctly. - - Fixed the problem so that identifiers that have been used can be - used again after their object has been closed. - - (QAK - 2014/10/16, HDFFV-8930) - - - Removal of DllMain() from Static Windows Builds - - A DllMain() function was added in HDF5 1.8.13 in order to handle - win32 thread cleanup. The preprocessor #ifdefs around the DllMain - function allowed it to be compiled when the static library is built, - which is incorrect behavior that can cause linkage problems in - clients. - - The fix was to change the preprocessor #ifdefs to exclude compiling - DllMain() in static builds. Our DllMain function is now only - compiled when the shared, thread-safe library is built on Windows. - - (DER - 2014/06/13, HDFFV-8837) - - - Enforce Constraint on page_size Parameter in H5Pset_core_write_tracking() - - The reference manual states that the page_size parameter cannot be - zero. - - This change checks the page_size parameter to see it is zero and - returns an error code if it is. - - (DER - 2014/08/11, HDFFV-8891) - - - H5Ldelete_by_idx() fails on non-existent group name. - (MSC - 2014/07/31, HDFFV-8888) - - - H5Ldelete_by_idx() Seg Fault on Non-existent Group Name - - If a non-existent group name was used by H5Ldelete_by_idx(), a - segmentation fault would result. - - Bug was fixed. - - (MSC - 2014/07/31, HDFFV-8888) - - - Bug in Test When Building Parallel HDF5 on PVFS2 - - There was a bug in a test when building Parallel HDF5 on PVFS2. - - The build now uses MPI_File_get_size() instead of stat(). - - (MSC - 2014/07/14, HDFFV-8856) - - - MPI-IO Driver Tried to Allocate Space for Zero-length Dataset - - MPI-IO driver tried to allocate space for zero-length dataset - and asserts. - - Fixed driver and added a regression test. - - (MSC - 2014/07/03, HDFFV-8761) - - - Parallel Library - ---------------- - - None - - Performance - ------------- - - None - - Tools - ----- - - None - - Fortran API - ------- - - SIZEOF Replaced by C_SIZEOF and STORAGE_SIZE. - - The intrinsic function SIZEOF is non-standard and should be replaced with a - standard intrinsic function. - - If the F2008 intrinsic C_SIZEOF and STORAGE_SIZE are available, then they will - be used instead of the non-standard SIZEOF intrinsic, even when the SIZEOF - function is available. - - (MSB - 2014/6/16, HDFFV-8653) - - - Non-functional API: h5pget_fill_value_f - - The Fortran wrapper h5pget_fill_value_f was calling the wrong C API. - - The correct C API, H5Pget_fill_value, is now called by the Fortran - wrapper. - - (MSB - 2014/9/25, HDFFV-8879) - - - Interoperability with C HDF5: H5Literate and h5literate_f - - h5literate_f assumes the return value for the callback function to - be of type int (or int_f in C). However, in the C wrapper the return - value of H5Literate is type herr_t, and this could cause - interoperability issues. - - The callback function should be declared INTEGER(C_INT) for - portability. The tests were updated accordingly. - - (MSB - 2014/9/26, HDFFV-8909) - - - Interoperability with C HDF5: Constant INTEGER Parameters with the - H5FD Interface - - Wrong type cast of constant Fortran INTEGER parameters was used. - - The following parameter constant types were changed from INTEGER to - INTEGER(HID_T) to match the C types: H5FD_CORE, H5FD_FAMILY, H5FD_LOG, - H5FD_MPIO, H5FD_MULTI, H5FD_SEC2, and H5FD_STDIO. - - Other internal 'int' types where changed to 'hid_t'; these are - transparent to the user. - - (MSB - 2014/7/18, HDFFV-8748) - - C++ API - ------ - - Memory Leaks - - There were several potential memory leaks in the library due to - dynamically allocated strings not being freed when failure occurs. - - Applied user's patches to remove these potential memory leaks. - - (BMR - 2014/09/30, HDFFV-8928) - - - Disallow H5F_ACC_CREAT - - H5F_ACC_CREAT was included in the C++ API but the C library does not - allow it at this time. - - Removed this flag from the functions in H5File class. - - (BMR - 2014/09/29, HDFFV-8852) - - - Missing Flags in Documentation: H5F_ACC_RDONLY and H5F_ACC_RDWR - - The H5F_ACC_RDONLY and H5F_ACC_RDWR flags were missing from the - documentation of the H5File constructors. - - These two flags are now included in the documentation for opening - files. - - (BMR - 2014/09/29, HDFFV-8852) - - High-level APIs: - ------ - - Seg Faults in H5TBread_field_name and H5TBread_field_name_f - - When H5TBread_field_name or H5TBread_field_name_f were used to read a - field and if the name of the field was wrong, a segmentation fault - would result. - - Both C and Fortran APIs were fixed so they no longer seg fault if - the name of the field is wrong, and both APIs return a negative - value if the name of the field is wrong. - - (MSB - 2014/09/29, HDFFV-8912) - - - Possible Buffer Overflow in High-level (HL) APIs - - Multiple HL APIs (H5DSis_scale is one example) had issues: - (1) The datatype from the file was re-used as the memory datatype, - and - (2) No effort was made to ensure that strings were actually - null-terminated. - - All of the HL routines now check for NULL pointers, for null-terminated - strings, and to see if string buffers are short enough not to overflow - the buffer. The minimum length of the buffers is now used in strncmp - to avoid overflow. - - (MSB - 2014/9/29, HDFFV-8670) - - - Behavior Change of H5LTdtype_to_text - - If a user buffer was passed in to H5LTdtype_to_text along with the - length, then the function would not truncate at the end of the - buffer, but would exceed the end of the user buffer. - - H5LTdtype_to_text was changed to truncate the string if the user - buffer is too small. - - (MSB - 2014/9/29, HDFFV-8855) - - Fortran High-level APIs: - ------ - - See entry for HDFFV-8912 above. - - Testing - ------- - - A subtest in parallel h5diff (ph5diff) testing was bypassed for the - local Linux 32 machine due to unknown issue in the previous version of - Mpich. The failure no long exists in the current Mpich. Therefore the - bypass is removed. (AKC - 2014/11/03 HDFFV-8954) - - - Fixed incorrect exit code values (was -1) in testframe which is commonly - used by several test programs. (AKC - 2014/07/22 HDFFV-8881) - - - Fixed Incorrect Exit Code Values in Testframe - The testframe which is commonly used by several test programs - had some incorrect exit code values. Fixed the incorrect exit code - values. (AKC - 2014/07/22, HDFFV-8881) - -Supported Platforms -=================== -The following platforms are supported and have been tested for this release. -They are built with the configure process unless specified otherwise. - - AIX 5.3 xlc 10.1.0.5 - (NASA G-ADA) xlC 10.1.0.5 - xlf90 12.1.0.6 - - Linux 2.6.18-308.13.1.el5PAE GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP i686 i686 i386 compilers for 32-bit applications; - (jam) Version 4.1.2 20080704 (Red Hat 4.1.2-54) - Version 4.8.2 - PGI C, Fortran, C++ Compilers for 32-bit - applications; - Version 13.7-0 - Intel(R) C, C++, Fortran Compiler for 32-bit - applications; - Version 14.0.2 (Build 20140120) - - Linux 2.6.18-371.6.1.el5 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers for 64-bit applications; - (koala) Version 4.1.2 20080704 (Red Hat 4.1.2-54) - Version 4.8.2 - Intel(R) C, C++, Fortran Compilers for - applications running on Intel(R) 64; - Version 14.0.2 (Build 20140120) - - Linux 2.6.32-431.11.2.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (platypus) Version 4.4.7 20120313 - Version 4.8.2 - PGI C, Fortran, C++ for 64-bit target on - x86-64; - Version 13.7-0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 14.0.2 (Build 20140120) - - Linux 2.6.32-431.29.2.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-4) - #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-4) - (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-4) - IBM XL C/C++ V13.1 - IBM XL Fortran V15.1 - - SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc - (emu) Sun Fortran 95 8.6 SunOS_sparc - Sun C++ 5.12 SunOS_sparc - - Windows 7 Visual Studio 2008 (cmake) - Visual Studio 2010 w/ Intel Fortran 14 (cmake) - Visual Studio 2012 w/ Intel Fortran 14 (cmake) - Visual Studio 2013 w/ Intel Fortran 14 (cmake) - Cygwin(CYGWIN_NT-6.1 1.7.32(0.274/5/3) gcc(4.8.3) compiler and gfortran) - (cmake and autotools) - - Windows 7 x64 Visual Studio 2008 (cmake) - Visual Studio 2010 w/ Intel Fortran 14 (cmake) - Visual Studio 2012 w/ Intel Fortran 14 (cmake) - Visual Studio 2013 w/ Intel Fortran 14 (cmake) - - Windows 8.1 Visual Studio 2012 w/ Intel Fortran 14 (cmake) - Visual Studio 2013 w/ Intel Fortran 14 (cmake) - - Windows 8.1 x64 Visual Studio 2012 w/ Intel Fortran 14 (cmake) - Visual Studio 2013 w/ Intel Fortran 14 (cmake) - - Mac OS X Lion 10.7.5 Apple clang/clang++ version 3.0 from Xcode 4.6.1 - 64-bit gfortran GNU Fortran (GCC) 4.8.2 - (duck) Intel icc/icpc/ifort version 13.0.3 - - Mac OS X Mt. Lion 10.8.5 Apple clang/clang++ version 5.1 from Xcode 5.1 - 64-bit gfortran GNU Fortran (GCC) 4.8.2 - (swallow/kite) Intel icc/icpc/ifort version 14.0.2 - - Mac OS X Mavericks 10.9.5 Apple clang/clang++ version 6.0 from Xcode 6.0.1 - 64-bit gfortran GNU Fortran (GCC) 4.8.2 - (wren) Intel icc/icpc/ifort version 14.0.2 - - -Tested Configuration Features Summary -===================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90/ F90 C++ zlib SZIP - parallel F2003 parallel -Solaris2.11 32-bit n y/y n y y y -Solaris2.11 64-bit n y/y n y y y -Windows 7 y y/y n y y y -Windows 7 x64 y y/y n y y y -Windows 7 Cygwin n y/y n y y n -Windows 8.1 n y/y n y y y -Windows 8.1 x64 n y/y n y y y -Mac OS X Lion 10.7.5 64-bit n y/y n y y y -Mac OS X Mountain Lion 10.8.5 64-bit n y/y n y y y -Mac OS X Mavericks 10.9.5 64-bit n y/y n y y ? -AIX 5.3 32- and 64-bit n y/n n y y y -CentOS 5.9 Linux 2.6.18-308 i686 GNU y y/y y y y y -CentOS 5.9 Linux 2.6.18-308 i686 Intel n y/y n y y y -CentOS 5.9 Linux 2.6.18-308 i686 PGI n y/y n y y y -CentOS 5.9 Linux 2.6.18 x86_64 GNU n y/y n y y y -CentOS 5.9 Linux 2.6.18 x86_64 Intel n y/y n y y y -CentOS 6.4 Linux 2.6.32 x86_64 GNU y y/y y y y y -CentOS 6.4 Linux 2.6.32 x86_64 Intel n y/y n y y y -CentOS 6.4 Linux 2.6.32 x86_64 PGI n y/y n y y y -Linux 2.6.32-431.11.2.el6.ppc64 n y/n n y y y - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -Solaris2.11 32-bit y y y y -Solaris2.11 64-bit y y y y -Windows 7 y y y y -Windows 7 x64 y y y y -Windows 7 Cygwin n n n y -Windows 8.1 y y y y -Windows 8.1 x64 y y y y -Mac OS X Lion 10.7.5 64-bit y n y y -Mac OS X Mountain Lion 10.8.5 64-bit y n y y -Mac OS X Mavericks 10.9.5 64-bit y n y y -AIX 5.3 32- and 64-bit y n n y -CentOS 5.9 Linux 2.6.18-308 i686 GNU y y y y -CentOS 5.9 Linux 2.6.18-308 i686 Intel y y y n -CentOS 5.9 Linux 2.6.18-308 i686 PGI y y y n -CentOS 5.9 Linux 2.6.18 x86_64 GNU y y y y -CentOS 5.9 Linux 2.6.18 x86_64 Intel y y y n -CentOS 6.4 Linux 2.6.32 x86_64 GNU y y y n -CentOS 6.4 Linux 2.6.32 x86_64 Intel y y y n -CentOS 6.4 Linux 2.6.32 x86_64 PGI y y y n -Linux 2.6.32-431.11.2.el6.ppc64 y y y n - -Compiler versions for each platform are listed in the preceding -"Supported Platforms" table. - - -More Tested Platforms -===================== -The following platforms are not supported but have been tested for this release. - - Linux 2.6.18-308.13.1.el5PAE MPICH mpich 3.1.2 compiled with - #1 SMP i686 i686 i386 gcc 4.9.1 and gfortran 4.9.1 - (jam) g95 (GCC 4.0.3 (g95 0.94!) - - Linux 2.6.18-431.11.2.el6 MPICH mpich 3.1.2 compiled with - #1 SMP x86_64 GNU/Linux gcc 4.9.1 and gfortran 4.9.1 - (platypus) g95 (GCC 4.0.3 (g95 0.94!) - - FreeBSD 8.2-STABLE i386 gcc 4.5.4 [FreeBSD] 20110526 - (loyalty) gcc 4.6.1 20110527 - g++ 4.6.1 20110527 - gfortran 4.6.1 20110527 - - FreeBSD 8.2-STABLE amd64 gcc 4.5.4 [FreeBSD] 20110526 - (freedom) gcc 4.6.1 20110527 - g++ 4.6.1 20110527 - gfortran 4.6.1 20110527 - - Debian7.5.0 3.2.0-4-amd64 #1 SMP Debian 3.2.51-1 x86_64 GNU/Linux - gcc (Debian 4.7.2-5) 4.7.2 - GNU Fortran (Debian 4.7.2-5) 4.7.2 - (cmake and autotools) - - Fedora20 3.15.3-200.fc20.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1) - GNU Fortran (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1) - (cmake and autotools) - - SUSE 13.1 3.11.10-17-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux - gcc (SUSE Linux) 4.8.1 - GNU Fortran (SUSE Linux) 4.8.1 - (cmake and autotools) - - Ubuntu 14.04 3.13.0-35-generic #62-Ubuntu SMP x86_64 GNU/Linux - gcc (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1 - GNU Fortran (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1 - (cmake and autotools) - - Cray Linux Environment (CLE) PrgEnv-pgi/4.2.34 - hopper.nersc.gov pgcc 13.6-0 64-bit target on x86-64 Linux -tp istanbul - pgf90 13.6-0 64-bit target on x86-64 Linux -tp istanbul - pgCC 13.6-0 64-bit target on x86-64 Linux -tp istanbul - - -Known Problems -============== -* On cygwin platforms the feature to load dynamic filter libraries only looks - for libraries with the a so extension. Support for cygwin cygxxx.dll libraries - is planned for the next release. - (ADB - 2014/11/04 - HDFFV-8736) - -* On windows platforms in debug configurations, the VFD flush1 tests will fail - with the split and multi VFD drivers. These tests will display a modal debug - dialog which must be answered or wait for the test timeout to expire. - The flush1 and flush2 tests will be skipped under debug for this release. - (ADB - 2014/06/23 - HDFFV-8851) - -* CLANG compiler with the options -fcatch-undefined-behavior and -ftrapv - catches some undefined behavior in the alignment algorithm of the macro DETECT_I - in H5detect.c (Issue 8147). Since the algorithm is trying to detect the alignment - of integers, ideally the flag -fcatch-undefined-behavior shouldn't to be used for - H5detect.c. In the future, we can separate flags for H5detect.c from the rest of - the library. (SLU - 2013/10/16) - -* Make provided by Solaris fails in "make check". Solaris users should use - gmake to build and install the HDF5 software. (AKC - 2013/10/08 - HDFFV-8534) - -* The h5dump and h5diff utilities occasionally produce different output - between Linux and Windows systems. This is caused by lower-level library - routines that fail to write fill values to the user's buffer when reading - unallocated chunks from datasets that have a fill value set to - H5D_FILL_VALUE_DEFAULT. Due to platform differences the return of - spurious data values has only been encountered on Windows 32-bit systems. - (Issue HDFFV-8247; JP - 2013/03/27) - -* The C++ and FORTRAN bindings are not currently working on FreeBSD with the - native release 8.2 compilers (4.2.1), but are working with gcc 4.6 from the - ports (and probably gcc releases after that). - (QAK - 2012/10/19) - -* The STDIO VFD does not work on some architectures, possibly due to 32/64 - bit or large file issues. The basic STDIO VFD test is known to fail on - 64-bit SunOS 5.10 on SPARC when built with -m64 and 32-bit OS X/Darwin - 10.7.0. The STDIO VFD test has been disabled while we investigate and - a fix should appear in a future release. - (DER - 2011/10/14 - HDFFV-8235) - -* h5diff can report inconsistent results when comparing datasets of enum type - that contain invalid values. This is due to how enum types are handled in - the library and will be addressed in a future release. - (DER - 2011/10/14 - HDFFV-7527) - -* The links test can fail under the stdio VFD due to some issues with external - links. This will be investigated and fixed in a future release. - (DER - 2011/10/14 - HDFFV-7768) - -* After the shared library support was fixed for some bugs, it was discovered - that "make prefix=XXX install" no longer works for shared libraries. It - still works correctly for static libraries. Therefore, if you want to - install the HDF5 shared libraries in a location such as /usr/local/hdf5, - you need to specify the location via the --prefix option during configure - time. E.g, ./configure --prefix=/usr/local/hdf5 ... - (AKC - 2011/05/07 - HDFFV-7583) - -* The parallel test, t_shapesame, in testpar/, may run for a long time and may - be terminated by the alarm signal. If that happens, one can increase the - alarm seconds (default is 1200 seconds = 20 minutes) by setting the - environment variable, $HDF5_ALARM_SECONDS, to a larger value such as 3600 - (60 minutes). Note that the t_shapesame test may fail in some systems - (see the "While working on the 1.8.6 release..." problem below). If - it does, it will waste more time if $HDF5_ALARM_SECONDS is set - to a larger value. - (AKC - 2011/05/07) - -* Shared Fortran libraries are not quite working on AIX. While they are - generated when --enable-shared is specified, the fortran and hl/fortran - tests fail. We are looking into the issue. HL and C++ shared libraries - should now be working as intended, however. - (MAM - 2011/04/20) - -* The --with-mpe configure option does not work with Mpich2. - (AKC - 2011/03/10) - -* While working on the 1.8.6 release of HDF5, a bug was discovered that can - occur when reading from a dataset in parallel shortly after it has been - written to collectively. The issue was exposed by a new test in the parallel - HDF5 test suite, but had existed before that. We believe the problem lies with - certain MPI implementations and/or file systems. - - We have provided a pure MPI test program, as well as a standalone HDF5 - program, that can be used to determine if this is an issue on your system. - They should be run across multiple nodes with a varying number of processes. - These programs can be found at: - http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/ - (NAF - 2011/01/19) - -* All the VFL drivers aren't backward compatible. In H5FDpublic.h, the - structure H5FD_class_t changed in 1.8. There is new parameter added to - get_eoa and set_eoa callback functions. A new callback function - get_type_map was added in. The public function H5FDrealloc was taken - out in 1.8. The problem only happens when users define their own driver - for 1.6 and try to plug in 1.8 library. Because there's only one user - complaining about it, we (Elena, Quincey, and I) decided to leave it as - it is (see bug report #1279). Quincey will make a plan for 1.10. - (SLU - 2010/02/02) - -* The --enable-static-exec configure flag will only statically link libraries - if the static version of that library is present. If only the shared version - of a library exists (i.e., most system libraries on Solaris, AIX, and Mac, - for example, only have shared versions), the flag should still result in a - successful compilation, but note that the installed executables will not be - fully static. Thus, the only guarantee on these systems is that the - executable is statically linked with just the HDF5 library. - (MAM - 2009/11/04) - -* Parallel tests failed with 16 processes with data inconsistency at testphdf5 - / dataset_readAll. Parallel tests also failed with 32 and 64 processes with - collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks - with MPI IO. - (CMC - 2009/04/28) - -* A dataset created or rewritten with a v1.6.3 library or after cannot be read - with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled. - There was a bug in the calculation of the Fletcher32 checksum in the - library before v1.6.3; the checksum value was not consistent between big- - endian and little-endian systems. This bug was fixed in Release 1.6.3. - However, after fixing the bug, the checksum value was no longer the same as - before on little-endian system. Library releases after 1.6.4 can still read - datasets created or rewritten with an HDF5 library of v1.6.2 or before. - (SLU - 2005/06/30) - - -%%%%1.8.13%%%% - - -HDF5 version 1.8.13 released on 2014-05-05 -================================================================================ - -INTRODUCTION -============ - -This document describes the differences between HDF5-1.8.12 and -HDF5-1.8.13, and contains information on the platforms tested and -known problems in HDF5-1.8.13. -For more details, see the files HISTORY-1_0-1_8_0_rc3.txt -and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source. - -Links to the HDF5 1.8.13 source code, documentation, and additional materials -can be found on the HDF5 web page at: - - http://www.hdfgroup.org/products/hdf5/ - -The HDF5 1.8.13 release can be obtained from: - - http://www.hdfgroup.org/HDF5/release/obtain5.html - -User documentation for 1.8.13 can be accessed directly at this location: - - http://www.hdfgroup.org/HDF5/doc/ - -New features in the HDF5-1.8.x release series, including brief general -descriptions of some new and modified APIs, are described in the "What's New -in 1.8.0?" document: - - http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html - -All new and modified APIs are listed in detail in the "HDF5 Software Changes -from Release to Release" document, in the section "Release 1.8.13 (current -release) versus Release 1.8.12": - - http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS -======== - -- New Features -- Support for New Platforms, Languages, and Compilers -- Bug Fixes since HDF5-1.8.12 -- Supported Platforms -- Supported Configuration Features Summary -- More Tested Platforms -- Known Problems - - -New Features -============ - - Configuration - ------------- - - Autotools: Automake updated to 1.14.1 (ADB - 2014/04/08) - - - CMake: Moved minimum CMake version to 2.8.11 which enables better library - include processing. (ADB - 2014/03/26) - - - When configuring a thread-safe HDF5 Library it is no longer necessary - to specify --enable-threadsafe with --with-pthreads if the Pthreads - library is in a standard location. (DER - 2014/04/11 HDFFV-8693) - - Library - ------- - - Added an H5free_memory API call. This should be used to free memory - that has been allocated by HDF5 API calls. H5Tget_member_name and - H5Pget_class_name are two examples. The main motivation for this call - is Windows, where it is common for application code and the HDF5 Library - to be using different C run-time libraries (CRT). Using the new call - ensures that the same CRT handles both the allocation and free. This - new function can also be useful in any case where the library uses a - different memory manager than the application, such as when a debug - memory manager is in use or when the HDF5 Library is wrapped for use - in a managed language like Python or Java. Fixes HDFFV-7710, 8519, - and 8851. (DER - 2014/04/11) - - - The Core VFD (aka Memory VFD) can now be configured to track dirty - regions in the file and only write out the changed regions on - flush/close. Additionally, a "page aggregation" size can be set that - will aggregate small writes into larger writes. For example, setting - a 1 MiB page aggregation size will logically partition the the - in-memory file into 1 MiB pages that will be written out in their - entirety if even a single byte is dirtied. The feature is controlled - via the new H5Pset/get_core_write_tracking() API call. A new - "core_paged" target has been added to the check-vfd target in - test/Makefile.am that exercises the feature over all HDF5 VFD-aware - tests. (DER - 2014/04/12) - - Parallel Library - ---------------- - - Removed MPI-POSIX VFD, as it wasn't helping anyone and was just - generating support questions. Application developers performing - parallel I/O should always use the MPI-IO VFD. - (QAK - 2014/03/28 HDFFV-8659) - - - Improved parallel I/O support to allow collective I/O on point - selections. (QAK - 2014/03/15) - - Tools - ----- - - None - - High-Level APIs - --------------- - - None - - Fortran API - ----------- - - Wrappers h5pset_file_image_f and h5pget_file_image_f were added to the - library. (MSB - 2014/1/2014) - - C++ API - ------- - - The following new features are added: - + Wrappers to class H5Object to get an object's name (HDFFV-8548). - ssize_t getObjName(char *obj_name, size_t buf_size = 0) - ssize_t getObjName(H5std_string& obj_name, size_t len = 0) - H5std_string getObjName() - + Wrappers to class H5CommonFG to get a child object's type from a - group or file (HDFFV-8367). - H5O_type_t childObjType(const H5std_string& objname) - H5O_type_t childObjType(const char* objname) - H5O_type_t childObjType(hsize_t index, - H5_index_t index_type=H5_INDEX_NAME, - H5_iter_order_t order=H5_ITER_INC, const char* objname=".") - + Wrappers to class DSetMemXferPropList for setting/getting a transform - property list (HDFFV-7907). - DSetMemXferPropList(const char* expression); - void setDataTransform(const char* expression) - void setDataTransform(const H5std_string& expression) - ssize_t getDataTransform(char* exp, size_t buf_size=0) - H5std_string getDataTransform() - + Wrapper to CompType for setting size to compound datatype (HDFFV-8642). - void setSize(size_t size) - + Overloaded functions to provide prototypes that declare constant - arguments const (HDFFV-3384). These include: - DataSet::fillMemBuf - DataSet::getVlenBufSize - DataSpace::extentCopy - DataType::commit - FileAccPropList::setSplit - H5File::getVFDHandle - + Additional overload to class H5Location to get a comment as a char* - ssize_t getComment(const char* name, size_t buf_size, char* comment) - + Additional overloads to class Attribute to get an attribute's name for - convenience: - ssize_t getName(char* attr_name, size_t buf_size=0) - ssize_t getName(H5std_string& attr_name, size_t buf_size=0) - (BMR, 2014/04/15) - + A static wrapper to Exception for printing the error stack without an - instance of Exception - static void printErrorStack(FILE* stream = stderr, - hid_t err_stack = H5E_DEFAULT); - (BMR, 2014/04/25) - - -Support for New Platforms, Languages, and Compilers -=================================================== - Mac OS X 10.6 Snow Leopard is not supported by Apple any more. In view of - the added support of Mac OS X 10.9, Mac OS X 10.6 is retired from HDF5 - supported platforms. (AKC - 2014/03/14 HDFFV-8704) - - Mac OS X 10.9 Mavericks is supported. (AKC - 2014/03/04 HDFFV-8694) - - -Bug Fixes since HDF5-1.8.12 -=========================== - - Configuration - ------------- - - CMake: When CMake commands are executed individually on the command line - and the external filters are being built, the CMAKE_BUILD_TYPE define - must be set to the same value as the configuration - (-DCMAKE_BUILD_TYPE:STRING=Release if using -C Release). This is needed - by the the szip and zlib filter build commands. (ADB - HDFFV-8695) - - - CMake: Removed use of the XLATE_UTILITY program. - (ADB - 2014/03/28 HDFFV-8640) - - - CMake: Added missing quotes in setting the CMAKE_EXE_LINKER_FLAGS for the - MPI option. (ADB - 2014/02/27 HDFFV-8674) - - - CMake: Configuration of the HDF5 C++ or Fortran libraries with the - thread-safety feature. - - C++ and/or Fortran + thread-safe is enforced as a non-supported - configuration. This matches the autotools. (DER - 2014/04/11) - - - CMake: Configuration of static HDF5 C library with the thread-safety - feature. - - Static + thread-safe + Win32 threads is not a supported configuration - due to the inability to automatically clean up thread-local storage. - This is expected to be fixed in a future release. In the meantime, a - work-around that uses internal functionality may allow the combination - to be used without resource leaks. Contact the help desk for more - information. (DER - 2014/04/11) - - - Autotools: Several changes were done to configure and installcheck. - - An export of LD_LIBRARY_PATH= was - removed from configure; make installcheck was revised to run - scripts installed in share/hdf5_examples to use the installed h5cc, etc. - to compile and run example source files also installed there. - - Make installcheck will now fail when a shared szip or other external lib - file cannot be found in the same manner that executables compiled and - linked with h5cc will fail to run when those lib files cannot be found - after install. Make installcheck should pass after setting - LD_LIBRARY_PATH to the szip location. (LRK - 2014/04/16) - - Library - ------- - - A Gnu Make directive (.NOTPARALLEL) is added to fortran/test/Makefile. - - AIX native make does not support this directive and would fail if - parallel make (e.g. make -j4) is used to build the library. AIX users - either do not use parallel make or install Gnu Make to build the library. - (AKC 2014/04/08 HDFFV-8738) - - - H5R.c: H5Rget_name gave an assertion failure if the "name" parameter - was NULL. - - Fixed H5Rget_name to return the size of the buffer needed to read a - name of the referenced object in this case. The size doesn't include - the NULL terminator. H5Rget_name returns negative on failure. - (MSB - 2014/01/22 HDFFV-8620) - - - H5Z.c: H5Zfilter_avail didn't check if a filter was available as a - dynamically loaded filter. The error manifested itself in the h5repack - tool when removing user-defined dynamically loaded filter. - - Added a code to find the filter among the dynamically loaded filters - after the function fails to find it among the registered filters. - (ADB - 2014/03/03 HDFFV-8629) - - - Memory leak: a memory leak was observed in conjunction to the - H5TS_errstk_key_g thread-local variable allocated in the H5E_get_stack - function in H5E.c. - - The shared HDF5 thread-safe library now no longer leaks thread-local - storage resources on Windows with Win32 threads. Currently, there is - no solution for this problem when HDF5 is statically built. We - disabled the build of the static HDF5 thread-safe library with - Win32 threads. (DER - 2014/04/11 HDFFV-8518) - - - H5Dio.c: Improved handling of NULL pointers to H5Dread/H5Dwrite - calls. Credit to Jason Newton (nevion@gmail.com) for the original patch. - - H5Dwrite/read failed when a NULL pointer was passed for a data buffer - and 0 elements were selected. Fixed. (QAK - 2014/04/16 HDFFV-8705) - - - Deprecated API (1_6 API): Improved handling of closing the library and - re-accessing it with a deprecated routine. - - When a program used a deprecated API (for example, H5Gcreate1), - closed the library, and reopened it again to access a group, dataset, - datatype, dataspace, attribute, or property list, HDF5 failed to - provide an identifier for the object. Fixed. - (NAF, QAK - 2014/04/16 HDFFV-8232) - - Parallel Library - ---------------- - - Fixed a missing H5F_Provisional module in HDF5mpio.f90 - (MSB - 2014/2/7 HDFFV-8651) - - Performance - ------------- - - None - - Tools - ----- - - The h5diff tool would report that a datafile compared with an exact - copy of the same datafile had differences. This was due to the issue - below of reading un-written chunks. This problem is also fixed. - (AKC - 2014/05/01 HDFFV-8637) - - - The h5dump and h5diff utilities occasionally produced different output - between Linux and Windows systems. This has been fixed. - - This happened to datasets that used chunked storage, with default fill - values, and some of the chunks had not been written. - When the dataset was read, the library failed to write the default fill - values to parts of the use buffer that were associated with the unwritten - chunks. (JP - 2014/05/01 HDFFV-8247) - - - The compress option is retired from bin/release. - (AKC - 2014/04/25 HDFFV-8755) - - - bin/release has a new option "zip" that produces a release zip file for - the Windows platform. (AKC - 2014/04/24 HDFFV-8433) - - - h5diff: Several failures relating to handling of strings attributes - are fixed. - - The tool crashed or gave an error message when one of the strings had - fixed size type and another variable-length size type. h5diff now flags such - strings as "not comparable". We plan to enhance the tool to handle - strings of the different types in the future releases. - (AKC - 2014/04/18 HDFFV-8625, 8639, 8745) - - - h5repack: h5repack would not remove user-defined filters. - Fixed by modifying h5repack to check if the filter is registered or - can be dynamically loaded. (ADB - 2014/03/03 HDFFV-8629) - - F90 API - ------- - - H5D_CHUNK_CACHE_NSLOTS_DFLT_F and H5D_CHUNK_CACHE_NBYTES_DFLT_F were - changed from the default KIND for INTEGER to INTEGER of KIND size_t. - (MSB - 2014/3/31 HDFFV-8689) - - C++ API - ------ - - Added throw() to all exception destructors. Credit to Jason Newton - (nevion@gmail.com) for the patch. (BMR - 2014/4/15 HDFFV-8623) - - Changed the default value for H5Location::getComment from 256 to 0 - to conform to C function and because it makes more sense. - (BMR - 2014/4/15) - - High-Level APIs: - ------ - - None - - Fortran High-Level APIs: - ------ - - None - - Testing - ------- - - testhdf5 now exits with EXIT_SUCCESS(0) if no errors, else - EXIT_FAILURE(1). (AKC - 2014/01/27 HDFFV-8572) - - - The big test now pays attention to the HDF5_DRIVER environment variable. - Previously, it would run all tests with the family, stdio, and sec2 - virtual file drivers (VFDs) for each VFD in the check-vfd make target, - regardless of the variable setting. It now checks the variable and - either runs the appropriate VFD-specific tests or skips as needed. - This saves much testing time. Fixes HDFFV-8554. (DER - 2014/04/11) - -Supported Platforms -=================== -The following platforms are supported and have been tested for this release. -They are built with the configure process unless specified otherwise. - - AIX 5.3 xlc 10.1.0.5 - (NASA G-ADA) xlC 10.1.0.5 - xlf90 12.1.0.6 - gmake v3.82 - - Linux 2.6.18-308.13.1.el5PAE GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP i686 i686 i386 compilers for 32-bit applications; - (jam) Version 4.1.2 20080704 (Red Hat 4.1.2-54) - Version 4.8.2 - PGI C, Fortran, C++ Compilers for 32-bit - applications; - Version 13.7-0 - Intel(R) C, C++, Fortran Compiler for 32-bit - applications; - Version 14.0.2 (Build 20140120) - - Linux 2.6.18-371.6.1.el5 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers for 64-bit applications; - (koala) Version 4.1.2 20080704 (Red Hat 4.1.2-54) - Version 4.8.2 - PGI C, Fortran, C++ for 64-bit target on - x86-64; - Version 13.7-0 - Intel(R) C, C++, Fortran Compilers for - applications running on Intel(R) 64; - Version 14.0.2 (Build 20140120) - - Linux 2.6.32-431.11.2.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (platypus) Version 4.4.7 20120313 - Version 4.8.2 - PGI C, Fortran, C++ for 64-bit target on - x86-64; - Version 13.7-0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 14.0.2 (Build 20140120) - - Linux 2.6.32-431.11.2.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-3) - #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-3) - (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-3) - IBM XL C/C++ V11.1 - IBM XL Fortran V13.1 - - SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc - (emu) Sun Fortran 95 8.6 SunOS_sparc - Sun C++ 5.12 SunOS_sparc - - Windows 7 Visual Studio 2008 w/ Intel Fortran 14 (cmake) - Visual Studio 2010 w/ Intel Fortran 14 (cmake) - Visual Studio 2012 w/ Intel Fortran 14 (cmake) - Cygwin(CYGWIN_NT-6.1 1.7.28(0.271/5/3) gcc(4.8.2) compiler and gfortran) - (cmake and autotools) - - Windows 7 x64 Visual Studio 2008 w/ Intel Fortran 14 (cmake) - Visual Studio 2010 w/ Intel Fortran 14 (cmake) - Visual Studio 2012 w/ Intel Fortran 14 (cmake) - - Windows 8.1 Visual Studio 2012 w/ Intel Fortran 14 (cmake) - - Windows 8.1 x64 Visual Studio 2012 w/ Intel Fortran 14 (cmake) - - Mac OS X Lion 10.7.3 Apple clang/clang++ version 3.0 from Xcode 4.6.1 - 64-bit gfortran GNU Fortran (GCC) 4.6.2 - (duck) Intel icc/icpc/ifort version 13.0.3 - - Mac OS X Mt. Lion 10.8.5 Apple clang/clang++ version 5.0 from Xcode 5.0.2 - 64-bit gfortran GNU Fortran (GCC) 4.8.2 - (swallow/kite) Intel icc/icpc/ifort version 14.0.2 - - Mac OS X Mavericks 10.9.2 Apple clang/clang++ version 5.1 from Xcode 5.1 - 64-bit gfortran GNU Fortran (GCC) 4.8.2 - (wren/quail) Intel icc/icpc/ifort version 14.0.2 - - -Tested Configuration Features Summary -===================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90/ F90 C++ zlib SZIP - parallel F2003 parallel -Solaris2.11 32-bit n y/y n y y y -Solaris2.11 64-bit n y/y n y y y -Windows 7 y y/y n y y y -Windows 7 x64 y y/y n y y y -Windows 7 Cygwin n y/y n y y y -Windows 8.1 n y/y n y y y -Windows 8.1 x64 n y/y n y y y -Mac OS X Lion 10.7.3 64-bit n y/y n y y y -Mac OS X Mountain Lion 10.8.1 64-bit n y/y n y y y -Mac OS X Mavericks 10.9.1 64-bit n y/y n y y y -AIX 5.3 32- and 64-bit n y/n n y y y -CentOS 5.9 Linux 2.6.18-308 i686 GNU y y/y y y y y -CentOS 5.9 Linux 2.6.18-308 i686 Intel n y/y n y y y -CentOS 5.9 Linux 2.6.18-308 i686 PGI n y/y n y y y -CentOS 5.9 Linux 2.6.18 x86_64 GNU n y/y n y y y -CentOS 5.9 Linux 2.6.18 x86_64 Intel n y/y n y y y -CentOS 6.4 Linux 2.6.32 x86_64 GNU y y/y y y y y -CentOS 6.4 Linux 2.6.32 x86_64 Intel n y/y n y y y -CentOS 6.4 Linux 2.6.32 x86_64 PGI n y/y n y y y -Linux 2.6.32-431.11.2.el6.ppc64 n y/n n y y y -OpenVMS IA64 V8.4 n y/n n y y n - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -Solaris2.11 32-bit y y y y -Solaris2.11 64-bit y y y y -Windows 7 y y y y -Windows 7 x64 y y y y -Windows 7 Cygwin n n n y -Windows 8.1 y y y y -Windows 8.1 x64 y y y y -Mac OS X Lion 10.7.3 64-bit y n y y -Mac OS X Mountain Lion 10.8.1 64-bit y n y y -Mac OS X Mavericks 10.9.1 64-bit y n y y -AIX 5.3 32- and 64-bit y n n y -CentOS 5.9 Linux 2.6.18-308 i686 GNU y y y y -CentOS 5.9 Linux 2.6.18-308 i686 Intel y y y n -CentOS 5.9 Linux 2.6.18-308 i686 PGI y y y n -CentOS 5.9 Linux 2.6.18 x86_64 GNU y y y y -CentOS 5.9 Linux 2.6.18 x86_64 Intel y y y n -CentOS 6.4 Linux 2.6.32 x86_64 GNU y y y n -CentOS 6.4 Linux 2.6.32 x86_64 Intel y y y n -CentOS 6.4 Linux 2.6.32 x86_64 PGI y y y n -Linux 2.6.32-431.11.2.el6.ppc64 y y y n -OpenVMS IA64 V8.4 n n n n - -Compiler versions for each platform are listed in the preceding -"Supported Platforms" table. - - -More Tested Platforms -===================== -The following platforms are not supported but have been tested for this release. - - Linux 2.6.18-308.13.1.el5PAE MPICH mpich 3.1 compiled with - #1 SMP i686 i686 i386 gcc 4.8.2 and gfortran 4.8.2 - (jam) g95 (GCC 4.0.3 (g95 0.94!) - - Linux 2.6.18-431.11.2.el6 MPICH mpich 3.1 compiled with - #1 SMP x86_64 GNU/Linux gcc 4.8.2 and gfortran 4.8.2 - (platypus) g95 (GCC 4.0.3 (g95 0.94!) - - FreeBSD 8.2-STABLE i386 gcc 4.5.4 [FreeBSD] 20110526 - (loyalty) gcc 4.6.1 20110527 - g++ 4.6.1 20110527 - gfortran 4.6.1 20110527 - - FreeBSD 8.2-STABLE amd64 gcc 4.5.4 [FreeBSD] 20110526 - (freedom) gcc 4.6.1 20110527 - g++ 4.6.1 20110527 - gfortran 4.6.1 20110527 - - Debian7.1.0 3.2.0-4-amd64 #1 SMP Debian 3.2.51-1 x86_64 GNU/Linux - gcc (Debian 4.7.2-5) 4.7.2 - GNU Fortran (Debian 4.7.2-5) 4.7.2 - (cmake and autotools) - - Fedora20 3.11.10-301.fc20.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc (GCC) 4.8.2 20131212 (Red Hat 4.8.2-7) - GNU Fortran (GCC) 4.8.2 20130603 (Red Hat 4.8.2-7) - (cmake and autotools) - - SUSE 13.1 3.11.6-4-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux - gcc (SUSE Linux) 4.8.1 - GNU Fortran (SUSE Linux) 4.8.1 - (cmake and autotools) - - Ubuntu 13.10 3.11.0-13-generic #20-Ubuntu SMP x86_64 GNU/Linux - gcc (Ubuntu/Linaro 4.8.1-10ubuntu8) 4.8.1 - GNU Fortran (Ubuntu/Linaro 4.8.1-10ubuntu8) 4.8.1 - (cmake and autotools) - - Cray Linux Environment (CLE) PrgEnv-pgi/4.0.46 - hopper.nersc.gov pgcc 12.5-0 64-bit target on x86-64 Linux -tp shanghai - pgf90 12.5-0 64-bit target on x86-64 Linux -tp shanghai - pgCC 12.5-0 64-bit target on x86-64 Linux -tp shanghai - - -Known Problems -============== -* When reading or writing a dataset (H5Dread/H5Dwrite) with a large selection - size (e.g., 2GB ~= 500 million of 4 bytes integers or floating point - numbers), some I/O systems may not be able to process it correctly. - We advise users to find out system limits before using large selections. If - I/O size limits exist, application should use HDF5 partial I/O capabilities - (e.g., H5Sselect_hyperslab(...)) to divide large requests into smaller sizes. - In this case we also advise users not to use chunk storage sizes larger that - 2GB since the HDF5 library performs I/O on the entire chunk. We will work - on the HDF5 library to divide large data requests to smaller I/O requests. - (AKC 2014/05/02 HDFFV-8479) - -* Due to an Intel compiler bug introduced in version 14.0.1, the HDF5 FORTRAN - wrappers do not work with configure option --enable-fortran2003. - However, the option --enable-fortran works with Intel 14.0.1. The compiler - bug was fixed in Intel version 14.0.2 and resolved the issue. - (MSB - 2014/4/15) - -* Due to a PGI compiler bug introduced in versions before 13.3 and versions - after 14.2, the FORTRAN test 'Testing get file image' will fail. - (MSB - 2014/4/15) - -* On CYGWIN, when building the library dynamically, testing will fail on - dynamically loaded filters. The test process will build dynamic filter - libraries with the *.dll.a extension, and the HDF5 Library will be looking - for *.so libraries. Entered as issue HDFFV-8736. (ADB - 2014/04/14) - -* A Gnu Make directive (.NOTPARALLEL) is added to fortran/test/Makefile. - AIX native make does not support this directive and would fail if - parallel make (e.g. make -j4) is used to build the library. AIX users - either do not use parallel make or install Gnu Make to build the library. - (AKC 2014/04/08 HDFFV-8738) - -* CLANG compiler with the options -fcatch-undefined-behavior and -ftrapv - catches some undefined behavior in the alignment algorithm of the macro - DETECT_I in H5detect.c. Since the algorithm is trying to detect the alignment - of integers, ideally the flag -fcatch-undefined-behavior should not to be - used for H5detect.c. In the future, we can separate flags for H5detect.c - from the rest of the library. (SLU - 2013/10/16 HDFFV-8147) - -* Make provided by Solaris fails in "make check". Solaris users should use - gmake to build and install the HDF5 software. (AKC - 2013/10/08 - HDFFV-8534) - -* On OpenVMS, two soft conversion functions (H5T__conv_i_f and H5T__conv_f_i) - have bugs. They convert data between floating-point numbers and integers. - But the library's default is hard conversion. The user should avoid - explicitly enabling soft conversion between floating-point numbers and - integers. (Issue VMS-8; SLU - 2013/09/19) - -* On OpenVMS, ZLIB 1.2.8 library doesn't work properly. ZLIB 1.2.5 works - fine. So please use ZLIB 1.2.5 to build HDF5 library. (Issue VMS-5; - SLU 2013/09/19) - -* When building using the Cray compilers on Cray machines, HDF5 - configure mistakenly thinks the compiler is an intel compiler and - sets the -std=c99 flag which breaks configure on Cray. To build HDF5 - properly on a Cray machine, please consult with the instructions in - INSTALL_parallel for building on Hopper. - (MSC - 2013/04/26 - HDFFV-8429) - -* The 5.9 C++ compiler on Sun failed to compile a C++ test ttypes.cpp. It - complains with this message: - "/home/hdf5/src/H5Vprivate.h", line 130: Error: __func__ is not defined. - - The reason is that __func__ is a predefined identifier in C99 standard. The - HDF5 C library uses it in H5private.h. The test ttypes.cpp includes - H5private.h (H5Tpkg.h<-H5Fprivate.h<-H5Vprivate.h<-H5private.h). Sun's 5.9 - C++ compiler doesn't support __func__, thus fails to compile the C++ test. - But Sun's 5.11 C++ compiler does. To check whether your Sun C++ compiler - knows this identifier, try to compile the following simple C++ program: - #include - - int main(void) - { - printf("%s\n", __func__); - return 0; - } - (SLU - 2012/11/5) - -* The C++ and FORTRAN bindings are not currently working on FreeBSD with the - native release 8.2 compilers (4.2.1), but are working with gcc 4.6 from the - ports (and probably gcc releases after that). - (QAK - 2012/10/19) - -* The following h5dump test case fails in BG/P machines (and potentially other - machines that use a command script to launch executables): - - h5dump --no-compact-subset -d "AHFINDERDIRECT::ah_centroid_t[0] it=0 tl=0" - tno-subset.h5 - - This is due to the embedded spaces in the dataset name being interpreted - by the command script launcher as meta-characters, thus passing three - arguments to h5dump's -d flag. The command passes if run by hand, just - not via the test script. - (AKC - 2012/05/03) - -* On hopper, the build failed when RUNSERIAL and RUNPARALLEL are set - to aprun -np X, because the H5lib_settings.c file was not generated - properly. Not setting those environment variables works, because - configure was able to automatically detect that it's a Cray system - and used the proper launch commands when necessary. - (MSC - 2012/04/18) - -* The data conversion test dt_arith.c fails in "long double" to integer - conversion on Ubuntu 11.10 (3.0.0.13 kernel) with GCC 4.6.1 if the library - is built with optimization -O3 or -O2. The older GCC (4.5) or newer kernel - (3.2.2 on Fedora) doesn't have the problem. Users should lower the - optimization level (-O1 or -O0) by defining CFLAGS in the command line of - "configure" like: - - CFLAGS=-O1 ./configure - - This will overwrite the library's default optimization level. - (SLU - 2012/02/07 - HDFFV-7829) - This issue is no longer present on Ubuntu 12.10 (3.5.0 kernel) with - gcc 4.7.2. - -* The STDIO VFD does not work on some architectures, possibly due to 32/64 - bit or large file issues. The basic STDIO VFD test is known to fail on - 64-bit SunOS 5.10 on SPARC when built with -m64 and 32-bit OS X/Darwin - 10.7.0. The STDIO VFD test has been disabled while we investigate and - a fix should appear in a future release. - (DER - 2011/10/14 - HDFFV-8235) - -* h5diff can report inconsistent results when comparing datasets of enum type - that contain invalid values. This is due to how enum types are handled in - the library and will be addressed in a future release. - (DER - 2011/10/14 - HDFFV-7527) - -* The links test can fail under the stdio VFD due to some issues with external - links. This will be investigated and fixed in a future release. - (DER - 2011/10/14 - HDFFV-7768) - -* After the shared library support was fixed for some bugs, it was discovered - that "make prefix=XXX install" no longer works for shared libraries. It - still works correctly for static libraries. Therefore, if you want to - install the HDF5 shared libraries in a location such as /usr/local/hdf5, - you need to specify the location via the --prefix option during configure - time. E.g, ./configure --prefix=/usr/local/hdf5 ... - (AKC - 2011/05/07 - HDFFV-7583) - -* The parallel test, t_shapesame, in testpar/, may run for a long time and may - be terminated by the alarm signal. If that happens, one can increase the - alarm seconds (default is 1200 seconds = 20 minutes) by setting the - environment variable, $HDF5_ALARM_SECONDS, to a larger value such as 3600 - (60 minutes). Note that the t_shapesame test may fail in some systems - (see the "While working on the 1.8.6 release..." problem below). If - it does, it will waste more time if $HDF5_ALARM_SECONDS is set - to a larger value. - (AKC - 2011/05/07) - -* The C++ and FORTRAN bindings are not currently working on FreeBSD. - (QAK - 2011/04/26) - -* Shared Fortran libraries are not quite working on AIX. While they are - generated when --enable-shared is specified, the fortran and hl/fortran - tests fail. We are looking into the issue. HL and C++ shared libraries - should now be working as intended, however. - (MAM - 2011/04/20) - -* The --with-mpe configure option does not work with Mpich2. - (AKC - 2011/03/10) - -* While working on the 1.8.6 release of HDF5, a bug was discovered that can - occur when reading from a dataset in parallel shortly after it has been - written to collectively. The issue was exposed by a new test in the parallel - HDF5 test suite, but had existed before that. We believe the problem lies with - certain MPI implementations and/or file systems. - - We have provided a pure MPI test program, as well as a standalone HDF5 - program, that can be used to determine if this is an issue on your system. - They should be run across multiple nodes with a varying number of processes. - These programs can be found at: - http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/ - (NAF - 2011/01/19) - -* All the VFL drivers aren't backward compatible. In H5FDpublic.h, the - structure H5FD_class_t changed in 1.8. There is new parameter added to - get_eoa and set_eoa callback functions. A new callback function - get_type_map was added in. The public function H5FDrealloc was taken - out in 1.8. The problem only happens when users define their own driver - for 1.6 and try to plug in 1.8 library. Because there's only one user - complaining about it, we (Elena, Quincey, and I) decided to leave it as - it is (see bug report #1279). Quincey will make a plan for 1.10. - (SLU - 2010/02/02) - -* The --enable-static-exec configure flag will only statically link libraries - if the static version of that library is present. If only the shared version - of a library exists (i.e., most system libraries on Solaris, AIX, and Mac, - for example, only have shared versions), the flag should still result in a - successful compilation, but note that the installed executables will not be - fully static. Thus, the only guarantee on these systems is that the - executable is statically linked with just the HDF5 library. - (MAM - 2009/11/04) - -* Parallel tests failed with 16 processes with data inconsistency at testphdf5 - / dataset_readAll. Parallel tests also failed with 32 and 64 processes with - collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks - with MPI IO. - (CMC - 2009/04/28) - -* On an Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers, - use -mp -O1 compilation flags to build the libraries. A higher level of - optimization causes failures in several HDF5 library tests. - -* A dataset created or rewritten with a v1.6.3 library or after cannot be read - with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled. - There was a bug in the calculation of the Fletcher32 checksum in the - library before v1.6.3; the checksum value was not consistent between big- - endian and little-endian systems. This bug was fixed in Release 1.6.3. - However, after fixing the bug, the checksum value was no longer the same as - before on little-endian system. Library releases after 1.6.4 can still read - datasets created or rewritten with an HDF5 library of v1.6.2 or before. - (SLU - 2005/06/30) - - -%%%%1.8.12%%%% - - -HDF5 version 1.8.12 released on 2013-11-04 -================================================================================ - -INTRODUCTION -============ - -This document describes the differences between HDF5-1.8.11 and -HDF5-1.8.12, and contains information on the platforms tested and -known problems in HDF5-1.8.12. -For more details, see the files HISTORY-1_0-1_8_0_rc3.txt -and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source. - -Links to the HDF5 1.8.12 source code, documentation, and additional materials -can be found on the HDF5 web page at: - - http://www.hdfgroup.org/products/hdf5/ - -The HDF5 1.8.12 release can be obtained from: - - http://www.hdfgroup.org/HDF5/release/obtain5.html - -User documentation for 1.8.12 can be accessed directly at this location: - - http://www.hdfgroup.org/HDF5/doc/ - -New features in the HDF5-1.8.x release series, including brief general -descriptions of some new and modified APIs, are described in the "What's New -in 1.8.0?" document: - - http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html - -All new and modified APIs are listed in detail in the "HDF5 Software Changes -from Release to Release" document, in the section "Release 1.8.12 (current -release) versus Release 1.8.11": - - http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS -======== - -- New Features -- Support for New Platforms, Languages, and Compilers -- Bug Fixes since HDF5-1.8.11 -- Supported Platforms -- Supported Configuration Features Summary -- More Tested Platforms -- Known Problems - - -New Features -============ - - Configuration - ------------- - - Added a configuration option to change the default plugin path. - The configure option is --with-default-plugindir=location. - The cmake option is -DH5_DEFAULT_PLUGINDIR:PATH=location. - HDFFV-8513. (ADB 2013/09/04) - - Renamed FFLAGS to FCFLAGS in configure. (ADB 2013/08/13) - - CMake can now package a compressed examples file, the default for - Windows binaries from HDF Group. (ADB - 2013/07/22) - - Library - ------- - - None - - Parallel Library - ---------------- - - None - - Tools - ----- - - h5repack: Added the ability to use plugin filters to read and write - files. The option uses the filter number. HDFFV-8345 - (ADB - 2013/09/04). - - h5dump: Added the option -N --any_path, which searches the file for - paths that match the search path. HDFFV-7989 (ADB - 2013/08/12). - - h5dump: Added the optional arg 0 to -A, which excludes attributes - from display. HDFFV-8134 (ADB - 2013/08/01). - - High-Level APIs - --------------- - - None - - Fortran API - ----------- - - None - - C++ API - ------- - - Added tutorial examples to C++/examples. They can be installed by - "make install-examples" and, in the installed directory, they can be - executed by running the script file run-c++-ex.sh. (BMR - 2013/09/28) - - A new class, H5::H5Location, is added to represent the location concept - in the C library. It is a base class to H5::H5File and H5::H5Ojbect, - whose member functions are moved into H5::H5Location. H5::H5File can - now inherent those functions. As a result, an H5::H5File object can have - an attribute. (BMR - 2013/09/27) - - Added wrappers for H5Rget_obj_type2 to retrieve the type of the object - that an object reference points to. (BMR - 2013/09/27) - H5O_type_t H5Location::getRefObjType(void *ref, H5R_type_t ref_type) - - Added wrappers for H5Aexist to check whether an attribute exists given - a name. (BMR - 2013/09/27) - bool H5::H5Location::attrExists(const char* name) - bool H5::H5Location::attrExists(const H5std_string& name) - - Added a number of overloaded functions for convenience. (BMR - 2013/09/27) - - -Support for New Platforms, Languages, and Compilers -=================================================== - - None - -Bug Fixes since HDF5-1.8.11 -=========================== - - Configuration - ------------- - - Modified H5detect.c to scan floating point types for padding bits before - analyzing the type further. This should fix problems with gcc 4.8. - (NAF - 2013/09/19 - HDFFV-8523/HDFFV-8500) - - HDF5 rpaths are no longer encoded in the library files when configured - with --disable-sharedlib-rpath. (LRK-2013-09-23 - HDFFV-8276) - - Library - ------- - - Added const qualifier to source buffer parameters in H5Dgather and - H5D_scatter_func_t (H5Dscatter callback). (NAF - 2013/7/09) - - - CMake now creates *.so.{lt_version} files with the same version as - configure. (ADB - 2013/06/05 HDFFV-8437) - - Parallel Library - ---------------- - - None - - Performance - ------------- - - None - - Tools - ----- - - h5dump: Added the option -N --any_path, which searches the file for - paths that match the search path. HDFFV-7989 (ADB - 2013/08/12). - - h5dump: Added the optional arg 0 to -A, which excludes attributes - from display. HDFFV-8134 (ADB - 2013/08/01). - - h5dump correctly exports subsetted data to a file, using the --output - option. (ADB - 2013/06/07 HDFFV-8447) - - h5cc and other compile scripts now default to linking shared libraries - when HDF5 is configured with the --disable-static option. - (LRK - 2013-09-23 - HDFFV-8141) - - F90 API - ------- - - None - - C++ API - ------ - - None - - High-Level APIs: - ------ - - None - - Fortran High-Level APIs: - ------ - - None - - Testing - ------- - - test/big sometimes failed with the message of "file selection+offset not - within extent". This has been fixed. (AKC - 2013/09/28 HDFFV-8271). - - tools/h5diff/testh5diff.sh is run in every "make check", even after it - has passed in the previous run. It should not run again if there are no - code changes. Fixed. (AKC - 2013/07/19 HDFFV-8392) - -Supported Platforms -=================== -The following platforms are supported and have been tested for this release. -They are built with the configure process unless specified otherwise. - - AIX 5.3 xlc 10.1.0.5 - (NASA G-ADA) xlC 10.1.0.5 - xlf90 12.1.0.6 - - Linux 2.6.18-308.13.1.el5PAE GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP i686 i686 i386 compilers for 32-bit applications; - (jam) Version 4.1.2 20080704 (Red Hat 4.1.2-54) - Version 4.8.1 - PGI C, Fortran, C++ Compilers for 32-bit - applications; - Version 13.7-0 - Intel(R) C, C++, Fortran Compiler for 32-bit - applications; - Version 13.1.3 (Build 20130607) - - Linux 2.6.18-308.16.1.el5 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers for 64-bit applications; - (koala) Version 4.1.2 20080704 (Red Hat 4.1.2-54) - Version 4.8.1 - PGI C, Fortran, C++ for 64-bit target on - x86-64; - Version 13.7-0 - Intel(R) C, C++, Fortran Compilers for - applications running on Intel(R) 64; - Version 13.1.3 (Build 20130607) - - Linux 2.6.32-358.18.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (platypus) Version 4.4.7 20120313 - Version 4.8.1 - PGI C, Fortran, C++ for 64-bit target on - x86-64; - Version 13.7-0 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 13.1.3 (Build 20130607) - - Linux 2.6.32-358.18.1.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-3) - #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-3) - (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-3) - IBM XL C/C++ V11.1 - IBM XL Fortran V13.1 - - SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc - (emu) Sun Fortran 95 8.6 SunOS_sparc - Sun C++ 5.12 SunOS_sparc - - Windows 7 Visual Studio 2008 w/ Intel Fortran 11 (cmake) - Visual Studio 2010 w/ Intel Fortran 12 (cmake) - Visual Studio 2012 w/ Intel Fortran 13 (cmake) - Cygwin(CYGWIN_NT-6.1 1.7.15(0.260/5/3) gcc(4.5.3) compiler and gfortran) - (cmake and autotools) - - Windows 7 x64 Visual Studio 2008 w/ Intel Fortran 11 (cmake) - Visual Studio 2010 w/ Intel Fortran 12 (cmake) - Visual Studio 2012 w/ Intel Fortran 13 (cmake) - - Windows 8.1 Visual Studio 2012 w/ Intel Fortran 13 (cmake) - - Windows 8.1 x64 Visual Studio 2012 w/ Intel Fortran 13 (cmake) - - Mac OS X Snow Leopard 10.6.8 gcc i686-apple-darwin11-llvm-gcc-4.2 (GCC) 4.2.1 from Xcode 3.2.6 - Darwin Kernel Version 10.8.0 g++ i686-apple-darwin11-llvm-g++-4.2 (GCC) 4.2.1 from Xcode 3.2.6 - 64-bit gfortran GNU Fortran (GCC) 4.6.2 - (fred) Intel C (icc), Fortran (ifort), C++ (icpc) - 12.1 Build 20120928 - - Mac OS X Lion 10.7.3 Apple clang/clang++ version 3.0 from Xcode 4.6.1 - 64-bit gfortran GNU Fortran (GCC) 4.6.2 - (duck) Intel icc/icpc/ifort version 13.0.3 - - Mac OS X Mountain Lion 10.8.1 Apple clang/clang++ version 4.2 from Xcode 4.6.1 - 64-bit gfortran GNU Fortran (GCC) 4.6.2 - (wren) Intel icc/icpc/ifort version 13.0.3 - - OpenVMS IA64 V8.4 HP C V7.3-018 - HP Fortran V8.2-104939-50H96 - HP C++ V7.4-004 - -Tested Configuration Features Summary -===================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90/ F90 C++ zlib SZIP - parallel F2003 parallel -Solaris2.11 32-bit n y/y n y y y -Solaris2.11 64-bit n y/y n y y y -Windows 7 y y/y n y y y -Windows 7 x64 y y/y n y y y -Windows 7 Cygwin n y/n n y y y -Windows 8.1 n y/y n y y y -Windows 8.1 x64 n y/y n y y y -Mac OS X Snow Leopard 10.6.8 64-bit n y/y n y y y -Mac OS X Lion 10.7.3 64-bit n y/y n y y y -Mac OS X Mountain Lion 10.8.1 64-bit n y/y n y y y -AIX 5.3 32- and 64-bit n y/n n y y y -CentOS 5.9 Linux 2.6.18-308 i686 GNU y y/y y y y y -CentOS 5.9 Linux 2.6.18-308 i686 Intel n y/y n y y y -CentOS 5.9 Linux 2.6.18-308 i686 PGI n y/y n y y y -CentOS 5.9 Linux 2.6.18 x86_64 GNU y y/y y y y y -CentOS 5.9 Linux 2.6.18 x86_64 Intel n y/y n y y y -CentOS 5.9 Linux 2.6.18 x86_64 PGI n y/y n y y y -CentOS 6.4 Linux 2.6.32 x86_64 GNU n y/y n y y y -CentOS 6.4 Linux 2.6.32 x86_64 Intel n y/y n y y y -Linux 2.6.32-358.2.1.el6.ppc64 n y/n n y y y -OpenVMS IA64 V8.4 n y/n n y y n - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -Solaris2.11 32-bit y y y y -Solaris2.11 64-bit y y y y -Windows 7 y y y y -Windows 7 x64 y y y y -Windows 7 Cygwin n n n y -Windows 8.1 y y y y -Windows 8.1 x64 y y y y -Mac OS X Snow Leopard 10.6.8 64-bit y n y n -Mac OS X Lion 10.7.3 64-bit y n y y -Mac OS X Mountain Lion 10.8.1 64-bit y n y y -AIX 5.3 32- and 64-bit y n n y -CentOS 5.9 Linux 2.6.18-308 i686 GNU y y y y -CentOS 5.9 Linux 2.6.18-308 i686 Intel y y y n -CentOS 5.9 Linux 2.6.18-308 i686 PGI y y y n -CentOS 5.9 Linux 2.6.18 x86_64 GNU y y y y -CentOS 5.9 Linux 2.6.18 x86_64 Intel y y y n -CentOS 5.9 Linux 2.6.18 x86_64 PGI y y y n -CentOS 6.4 Linux 2.6.32 x86_64 GNU y y y n -CentOS 6.4 Linux 2.6.32 x86_64 Intel y y y n -Linux 2.6.32-358.2.1.el6.ppc64 y y y n -OpenVMS IA64 V8.4 n n n n - -Compiler versions for each platform are listed in the preceding -"Supported Platforms" table. - - -More Tested Platforms -===================== -The following platforms are not supported but have been tested for this release. - - Linux 2.6.18-308.13.1.el5PAE MPICH mpich2-1.4.1p1 compiled with - #1 SMP i686 i686 i386 gcc 4.1.2 and gfortran 4.1.2 - (jam) g95 (GCC 4.0.3 (g95 0.94!) - - Linux 2.6.18-308.16.1.el5 MPICH mpich2-1.4.1p1 compiled with - #1 SMP x86_64 GNU/Linux gcc 4.1.2 and gfortran 4.1.2 - (koala) g95 (GCC 4.0.3 (g95 0.94!) - - FreeBSD 8.2-STABLE i386 gcc 4.5.4 [FreeBSD] 20110526 - (loyalty) gcc 4.6.1 20110527 - g++ 4.6.1 20110527 - gfortran 4.6.1 20110527 - - FreeBSD 8.2-STABLE amd64 gcc 4.5.4 [FreeBSD] 20110526 - (freedom) gcc 4.6.1 20110527 - g++ 4.6.1 20110527 - gfortran 4.6.1 20110527 - - Debian7.1.0 3.2.0-4-amd64 #1 SMP Debian 3.2.46-1 x86_64 GNU/Linux - gcc (Debian 4.7.2-5) 4.7.2 - GNU Fortran (Debian 4.7.2-5) 4.7.2 - (cmake and autotools) - - Fedora19 3.11.1-200.fc19.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc (GCC) 4.8.1 20130603 (Red Hat 4.8.1-1) - GNU Fortran (GCC) 4.8.1 20130603 (Red Hat 4.8.1-1) - (cmake and autotools) - - SUSE 12.3 3.7.10-1.16-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux - gcc (SUSE Linux) 4.7.2 - GNU Fortran (SUSE Linux) 4.7.2 - (cmake and autotools) - - Ubuntu 13.04 3.8.0-30-generic #44-Ubuntu SMP x86_64 GNU/Linux - gcc (Ubuntu/Linaro 4.7.3-1ubuntu1) 4.7.3 - GNU Fortran (Ubuntu/Linaro 4.7.3-1ubuntu1) 4.7.3 - (cmake and autotools) - - Cray Linux Environment (CLE) PrgEnv-pgi/4.0.46 - hopper.nersc.gov pgcc 12.5-0 64-bit target on x86-64 Linux -tp shanghai - pgf90 12.5-0 64-bit target on x86-64 Linux -tp shanghai - pgCC 12.5-0 64-bit target on x86-64 Linux -tp shanghai - - -Known Problems -============== -* Several HDF5 command-line tools and tests leave behind generated files - that are not cleaned up with "make clean" or "make distclean" when software - is built in place. The issue will be addressed in the 1.8.13 release. We - recommend to use build directory to compile and test HDF5 as described - in the INSTALL file, section 4.2. - -* Source directory names with spaces in them will cause failures in configure - or make on Mac (HDFFV-8152), Linux, and probably all other platforms. If a - configure command with a space is run from a build directory, it will exit - with an error message: "checking whether build environment is sane... - configure: error: unsafe srcdir value: '/scr/lrknox/hdf5 v1.8.12'". If - configure is run inside or below the directory with the space in the name, - libtool will get the directory path from the system, put the part of the - path before the space in the libdir variable in .../src/libhdf5.la, and - then fail to find the nonexistent directory. This is a known libtool issue - and the suggested workaround is to rename the directory without spaces. - (LRK - 2013/10/22) - -* CLANG compiler with the options -fcatch-undefined-behavior and -ftrapv - catches some undefined behavior in the alignment algorithm of the macro - DETECT_I in H5detect.c (HDFFV-8147). This issue will be addressed in the - next release. (SLU - 2013/10/16) - -* Running make check for the tools can fail in the tools tests if make was not - run prior. The tests for the tools use other tools in the tests, therefore - all the tools should be built before testing the tools. (ADB - 2013/10/09) - -* Make provided by Solaris fails in "make check". Solaris users should use - gmake to build and install HDF5 software. (AKC - 2013/10/08 - HDFFV-8534) - -* On OpenVMS, two soft conversion functions (H5T__conv_i_f and H5T__conv_f_i) - have bugs. They convert data between floating-point numbers and integers. - But the library's default is hard conversion. The user should avoid - explicitly enabling soft conversion between floating-point numbers and - integers. (Issue VMS-8; SLU - 2013/09/19) - -* On OpenVMS, ZLIB 1.2.8 library doesn't work properly. ZLIB 1.2.5 works - fine. So please use ZLIB 1.2.5 to build HDF5 library. (Issue VMS-5; - SLU 2013/09/19) - -* When building using the Cray compilers on Cray machines, HDF5 - configure mistakenly thinks the compiler is an intel compiler and - sets the -std=c99 flag which breaks configure on Cray. To build HDF5 - properly on a Cray machine, please consult with the instructions in - INSTALL_parallel for building on Hopper. - (MSC - 2013/04/26 - HDFFV-8429) - -* The h5dump and h5diff utilities occasionally produce different output - between Linux and Windows systems. This is caused by lower-level library - routines that fail to write fill values to the user's buffer when reading - unallocated chunks from datasets that have a fill value set to - H5D_FILL_VALUE_DEFAULT. Due to platform differences the return of - spurious data values has only been encountered on Windows 32-bit systems. - (Issue HDFFV-8247; JP - 2013/03/27) - -* The 5.9 C++ compiler on Sun failed to compile a C++ test ttypes.cpp. It - complains with this message: - "/home/hdf5/src/H5Vprivate.h", line 130: Error: __func__ is not defined. - - The reason is that __func__ is a predefined identifier in C99 standard. The - HDF5 C library uses it in H5private.h. The test ttypes.cpp includes - H5private.h (H5Tpkg.h<-H5Fprivate.h<-H5Vprivate.h<-H5private.h). Sun's 5.9 - C++ compiler doesn't support __func__, thus fails to compile the C++ test. - But Sun's 5.11 C++ compiler does. To check whether your Sun C++ compiler - knows this identifier, try to compile the following simple C++ program: - #include - - int main(void) - { - printf("%s\n", __func__); - return 0; - } - (SLU - 2012/11/5) - -* The C++ and FORTRAN bindings are not currently working on FreeBSD with the - native release 8.2 compilers (4.2.1), but are working with gcc 4.6 from the - ports (and probably gcc releases after that). - (QAK - 2012/10/19) - -* The following h5dump test case fails in BG/P machines (and potentially other - machines that use a command script to launch executables): - - h5dump --no-compact-subset -d "AHFINDERDIRECT::ah_centroid_t[0] it=0 tl=0" - tno-subset.h5 - - This is due to the embedded spaces in the dataset name being interpreted - by the command script launcher as meta-characters, thus passing three - arguments to h5dump's -d flag. The command passes if run by hand, just - not via the test script. - (AKC - 2012/05/03) - -* On hopper, the build failed when RUNSERIAL and RUNPARALLEL are set - to aprun -np X, because the H5lib_settings.c file was not generated - properly. Not setting those environment variables works, because - configure was able to automatically detect that it is a Cray system - and used the proper launch commands when necessary. - (MSC - 2012/04/18) - -* The data conversion test dt_arith.c fails in "long double" to integer - conversion on Ubuntu 11.10 (3.0.0.13 kernel) with GCC 4.6.1 if the library - is built with optimization -O3 or -O2. The older GCC (4.5) or newer kernel - (3.2.2 on Fedora) do not have the problem. Users should lower the - optimization level (-O1 or -O0) by defining CFLAGS in the command line of - "configure" like: - - CFLAGS=-O1 ./configure - - This will overwrite the library's default optimization level. - (SLU - 2012/02/07 - HDFFV-7829) - This issue is no longer present on Ubuntu 12.10 (3.5.0 kernel) with - gcc 4.7.2. - -* The STDIO VFD does not work on some architectures, possibly due to 32/64 - bit or large file issues. The basic STDIO VFD test is known to fail on - 64-bit SunOS 5.10 on SPARC when built with -m64 and 32-bit OS X/Darwin - 10.7.0. The STDIO VFD test has been disabled while we investigate and - a fix should appear in a future release. - (DER - 2011/10/14 - HDFFV-8235) - -* h5diff can report inconsistent results when comparing datasets of enum type - that contain invalid values. This is due to how enum types are handled in - the library and will be addressed in a future release. - (DER - 2011/10/14 - HDFFV-7527) - -* The links test can fail under the stdio VFD due to some issues with external - links. This will be investigated and fixed in a future release. - (DER - 2011/10/14 - HDFFV-7768) - -* After the shared library support was fixed for some bugs, it was discovered - that "make prefix=XXX install" no longer works for shared libraries. It - still works correctly for static libraries. Therefore, if you want to - install the HDF5 shared libraries in a location such as /usr/local/hdf5, - you need to specify the location via the --prefix option during configure - time. E.g, ./configure --prefix=/usr/local/hdf5 ... - (AKC - 2011/05/07 - HDFFV-7583) - -* The parallel test, t_shapesame, in testpar/, may run for a long time and may - be terminated by the alarm signal. If that happens, one can increase the - alarm seconds (default is 1200 seconds = 20 minutes) by setting the - environment variable, $HDF5_ALARM_SECONDS, to a larger value such as 3600 - (60 minutes). Note that the t_shapesame test may fail in some systems - (see the "While working on the 1.8.6 release..." problem below). If - it does, it will waste more time if $HDF5_ALARM_SECONDS is set - to a larger value. - (AKC - 2011/05/07) - -* The C++ and FORTRAN bindings are not currently working on FreeBSD. - (QAK - 2011/04/26) - -* Shared Fortran libraries are not quite working on AIX. While they are - generated when --enable-shared is specified, the fortran and hl/fortran - tests fail. We are looking into the issue. HL and C++ shared libraries - should now be working as intended, however. - (MAM - 2011/04/20) - -* The --with-mpe configure option does not work with Mpich2. - (AKC - 2011/03/10) - -* While working on the 1.8.6 release of HDF5, a bug was discovered that can - occur when reading from a dataset in parallel shortly after it has been - written to collectively. The issue was exposed by a new test in the parallel - HDF5 test suite, but had existed before that. We believe the problem lies with - certain MPI implementations and/or file systems. - - We have provided a pure MPI test program, as well as a standalone HDF5 - program, that can be used to determine if this is an issue on your system. - They should be run across multiple nodes with a varying number of processes. - These programs can be found at: - http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/ - (NAF - 2011/01/19) - -* All the VFL drivers aren't backward compatible. In H5FDpublic.h, the - structure H5FD_class_t changed in 1.8. There is new parameter added to - get_eoa and set_eoa callback functions. A new callback function - get_type_map was added in. The public function H5FDrealloc was taken - out in 1.8. The problem only happens when users define their own driver - for 1.6 and try to plug in 1.8 library. Because there's only one user - complaining about it, we (Elena, Quincey, and I) decided to leave it as - it is (see bug report #1279). Quincey will make a plan for 1.10. - (SLU - 2010/02/02) - -* The --enable-static-exec configure flag will only statically link libraries - if the static version of that library is present. If only the shared version - of a library exists (i.e., most system libraries on Solaris, AIX, and Mac, - for example, only have shared versions), the flag should still result in a - successful compilation, but note that the installed executables will not be - fully static. Thus, the only guarantee on these systems is that the - executable is statically linked with just the HDF5 library. - (MAM - 2009/11/04) - -* Parallel tests failed with 16 processes with data inconsistency at testphdf5 - / dataset_readAll. Parallel tests also failed with 32 and 64 processes with - collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks - with MPI IO. - (CMC - 2009/04/28) - -* On an Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers, - use -mp -O1 compilation flags to build the libraries. A higher level of - optimization causes failures in several HDF5 library tests. - -* A dataset created or rewritten with a v1.6.3 library or after cannot be read - with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled. - There was a bug in the calculation of the Fletcher32 checksum in the - library before v1.6.3; the checksum value was not consistent between big- - endian and little-endian systems. This bug was fixed in Release 1.6.3. - However, after fixing the bug, the checksum value was no longer the same as - before on little-endian system. Library releases after 1.6.4 can still read - datasets created or rewritten with an HDF5 library of v1.6.2 or before. - (SLU - 2005/06/30) - - -%%%%1.8.11%%%% - - -HDF5 version 1.8.11 released on 2013-05-08 -================================================================================ - -INTRODUCTION -============ - -This document describes the differences between HDF5-1.8.10 and -HDF5-1.8.11-*, and contains information on the platforms tested and -known problems in HDF5-1.8.11-*. -For more details, see the files HISTORY-1_0-1_8_0_rc3.txt -and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source. - -Links to the HDF5 1.8.11 source code, documentation, and additional materials -can be found on the HDF5 web page at: - - http://www.hdfgroup.org/products/hdf5/ - -The HDF5 1.8.11 release can be obtained from: - - http://www.hdfgroup.org/HDF5/release/obtain5.html - -User documentation for 1.8.11 can be accessed directly at this location: - - http://www.hdfgroup.org/HDF5/doc/ - -New features in the HDF5-1.8.x release series, including brief general -descriptions of some new and modified APIs, are described in the "What's New -in 1.8.0?" document: - - http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html - -All new and modified APIs are listed in detail in the "HDF5 Software Changes -from Release to Release" document, in the section "Release 1.8.11 (current -release) versus Release 1.8.10": - - http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS -======== - -- New Features -- Support for New Platforms, Languages, and Compilers -- Bug Fixes since HDF5-1.8.10 -- Supported Platforms -- Supported Configuration Features Summary -- More Tested Platforms -- Known Problems - - -New Features -============ - - Configuration - ------------- - - Libtool version number is changed to 8.0.0 because there are API - changes. See below for details. (AKC - 2013/05/07 HDFFV-8435) - - Mac OS X 10.7 (Lion) and 10.8 (Mountain Lion) uses clang/clang++ as the - default C and C++ compilers. (AKC - 2013/04/19 HDFFV-8245) - - CMake minimum is now 2.8.10. (ADB 2013/1/14) - - A new tool, cmakehdf5, which is a build command script similar to - buildhdf5 is added and is available in the bin directory. - (AKC - 2013/01/16 HDFFV-8336) - - Library - ------- - - The library can load filter libraries dynamically during runtime. Users - can set the search path through environment variable HDF5_PLUGIN_PATH - and call H5Pset_filter to enable a dynamic filter. (SLU - 2013/04/08) - - Added new API functions H5Dscatter and H5Dgather to scatter data to and - and gather data from a selection within a memory buffer. - (NAF - 2013/02/05) - - The library now supports the data conversion from enumeration to numeric - (integer and floating-point number) datatypes. See Issue HDFFV-8221. - (SLU - 2012/10/23) - - Parallel Library - ---------------- - - None - - Tools - ----- - - h5dump: added new option -O or -ddl to output the ddl text to a file. This - is a complement to the -o or --output option, which redirects the data to - a file. HDFFV-8229 (ADB - 2013/2/25) - - High-Level APIs - --------------- - - A new API function, H5DOwrite_chunk. This function writes a data chunk - directly into a file, bypassing hyperslab selection, data conversion, - and the filter pipeline. The user must be careful with the function and - clearly understand the I/O process of the library. (SLU - 2013/2/11) - - Fortran API - ----------- - - New API functions added (MSB - 2013/3/23): - - h5odecr_refcount_f, h5oexists_by_name_f, h5oget_comment_f, - h5oget_comment_by_name_f, h5oincr_refcount_f, h5oopen_by_idx_f, - h5oset_comment_f, h5oset_comment_by_name_f, h5oset_comment_by_name_f - - F2003: h5oget_info_f, h5oget_info_by_idx_f, h5ovisit_by_name_f - - - C++ API - ------- - - None - - -Support for New Platforms, Languages, and Compilers -=================================================== - - SunOS 5.11 (emu) 32-bit and 64-bit with Sun C/C++ 5.12 compiler and - Sun Fortran 95 8.6 compiler. - - Visual Studio 2012 w/ Intel Fortran 13 on Windows 7 - - g95 released new version recently and is tested in this release. - -Bug Fixes since HDF5-1.8.10 -========================== - - Configuration - ------------- - - Fixed Thread-safe configure failure for the AIX platform. - (AKC - 2013/04/19 HDFFV-8390) - - Configure will check the result of header searches before searching for - the library. - Fixes HDFFV-8257 (ADB 2013/03/04) - - HDF does not support building SHARED Fortran libraries on OSX. Added - CMake code to check for this condition. - Fixes HDFFV-8227 (ADB 2013/03/04) - - CMake builds on Windows will no longer use legacy naming for libraries. - The "dll" tag will no longer be added to the name of *.lib and *.dll. - The option HDF_LEGACY_NAMING is now OFF by default. - Fixes HDFFV-8292 (ADB 2013/01/30) - - Library - ------- - - The library now behaves correctly when performing large I/O operations - on Mac OS-X. Previously, single I/O operations > 2 GB would fail - since the Darwin read/write calls cannot handle the number of bytes - that their parameter types imply. - Fixes HDFFV-7975 and HDFFV-8240 (DER 2013/01/07) - - Fixed a bug in the core VFD that causes failures when opening files - > 2 GB. - Fixes HDFFV-8124 and HDFFV-8158 (DER 2013/01/07) - - Fixed a bug where uninitialized memory was read during variable-length - type conversion. This caused segfaults in netCDF. Fixes HDFFV-8159. - (DER 2013/03/30) - - Removed the H5Pset/get_dxpl_multi functions from the library. The - intended functionality for them was never fully implemented, and they - have always been fundamentally broken. NOTE: This does not affect - setting the multi VFD or any expected VFD functionality. Multi VFD - usage remains unchanged. - Fixes HDFFV-8296. (DER 2013/03/30) - - Parallel Library - ---------------- - - None - - Performance - ------------- - - None - - Tools - ----- - - h5redeploy is changed to do this by default: - Installation directories: - prefix architecture-independent files. - exec_prefix architecture-dependent files, default is . - libdir libraries, default is /lib. - includedir header files, default is . - This allows users to just change the first line of prefix=<...> and the - effect will change libdir and includedir too. (AKC 2013/04/05 HDFFV-8358) - - h5repack: Fixed failure to convert the layout of a small chunked - dataset (size < 1K) to contiguous layout. HDFFV-8214 (JKM 2013/03/26) - - h5dump: Fixed displaying compression ratio for unknown or user-defined - filters. HDFFV-8344 (XCAO 2013/03/19) - - h5dump: Changed UNKNOWN_FILTER to USER_DEFINED_FILTER for user defined - filter. HDFFV-8346 (XCAO 2013/03/19) - - h5diff: Fixed to return the correct exit code 1 when the program - detects a unique extra attribute. Prior to this fix, h5diff returned - exit code 0 indicating the two files are identical. - HDFFV-7643 (JKM 2013/02/15) - - h5dump: Fixed writing nulls to a binary file when exporting a dataset - with compound string datatype. HDFFV-8169 (ADB 2013/1/31) - - The following h5stat test case failed in BG/P machines (and potentially - other machines that display extra output if an MPI task returns with a - non-zero code.) - Testing h5stat notexist.h5 - The test script was fixed to ignore the extra output. HDFFV-8233 - (AKC - 2012/11/30) - - h5diff: Improved speed when comparing HDF5 files with lots of - attributes. Much slower performance was identified with release versions - from 1.8.7 to 1.8.10 compared to 1.8.6. (JKM 2012/10/19) - - F90 API - ------- - - The integer type of the 'offset' argument in h5pset_external_f and - h5pget_external_f was changed to INTEGER(KIND=OFF_T) to support 8-byte - integers, matching the C type definition of off_t. (MSB - 2013/3/23) - - h5fc updated to recognize .f95, .f03 and .f08 file extensions. - - C++ API - ------ - - The C++ wrappers DSetMemXferPropList::setMulti/getMulti were removed - because the C functions H5Pset/get_dxpl_multi functions are removed - from the library. Fixes HDFFV-8296 by DER. (BMR 2013/03/30) - - An exception thrown by an internal function was not propagating to the - test program during stack unwinding, so it couldn't be caught by the - test, and the program terminated "without an active exception." It - seemed that the problem happened when c_str() was used to generate - an equivalent const char* from a std::string and the resulting string - was passed to the internal function. As a work-around, we added a - try/catch around the the call to the internal function and when the - exception is caught there, it is re-thrown. Fixes HDFFV-8067. - (BMR 2013/03/30) - - High-Level APIs: - ------ - - Fixed a problem with H5DSget_scale_name including the NULL terminator - in the size calculation returned by the function. The API was changed - to NOT include the NULL terminator in the size of name returned - (MSB- 2013/2/10) - - Fortran High-Level APIs: - ------ - - None - - Testing - ------- - - In some Mac systems, testlibinfo.sh failed with this error: - Check file ../src/.libs/libhdf5.7.dylib - strings: object: ../src/.libs/libhdf5.7.dylib malformed object \ - (unknown load command 15) - The strings command of Mac systems inspects library files, and older - versions of strings may not know newer library formats, resulting - in errors. Fixed by sending the library file as stdin to the strings - command to avoid this problem. (AKC - 2013/03/08 HDFFV-8305) - - Fixed a typo in the ERROR macro in test/testhdf5.h. It segmentation - faulted when used before. (AKC - 2013/02/12 HDFFV-8267) - -Supported Platforms -=================== -The following platforms are supported and have been tested for this release. -They are built with the configure process unless specified otherwise. - - AIX 5.3 xlc 10.1.0.5 - (NASA G-ADA) xlC 10.1.0.5 - xlf90 12.1.0.6 - - Linux 2.6.18-308.13.1.el5PAE GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP i686 i686 i386 compilers for 32-bit applications; - (jam) Version 4.1.2 20080704 (Red Hat 4.1.2-54) - Version 4.6.3 - PGI C, Fortran, C++ Compilers for 32-bit - applications; - Version 11.9-0 - Version 12.5-0 - Intel(R) C, C++, Fortran Compiler for 32-bit - applications; - Version 12.1 (Build 20110811) - Version 12.1 (Build 20120212) - - Linux 2.6.18-308.16.1.el5 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers for 64-bit applications; - (koala) Version 4.1.2 20080704 (Red Hat 4.1.2-52) - Version 4.6.3 - PGI C, Fortran, C++ for 64-bit target on - x86-64; - Version 11.9-0 - Version 12.5-0 - Intel(R) C, C++, Fortran Compilers for - applications running on Intel(R) 64; - Version 12.1 (Build 20110811) - Version 12.1 (Build 20120212) - - Linux 2.6.32-358.2.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers: - (platypus) Version 4.4.7 20120313 - Intel(R) C (icc), C++ (icpc), Fortran (icc) - compilers: - Version 12.1 20120212 - - Linux 2.6.32-358.2.1.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-3) - #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-3) - (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-3) - IBM XL C/C++ V11.1 - IBM XL Fortran V13.1 - - SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc - (emu) Sun Fortran 95 8.6 SunOS_sparc - Sun C++ 5.12 SunOS_sparc - - Windows 7 Visual Studio 2008 w/ Intel Fortran 11 (cmake) - Visual Studio 2010 w/ Intel Fortran 12 (cmake) - Visual Studio 2012 w/ Intel Fortran 13 (cmake) - Cygwin(CYGWIN_NT-6.1 1.7.15(0.260/5/3) gcc(4.5.3) compiler and gfortran) - (cmake and autotools) - - Windows 7 x64 Visual Studio 2008 w/ Intel Fortran 11 (cmake) - Visual Studio 2010 w/ Intel Fortran 12 (cmake) - Visual Studio 2012 w/ Intel Fortran 13 (cmake) - Cygwin(CYGWIN_NT-6.1 1.7.15(0.260/5/3) gcc(4.5.3) compiler and gfortran) - (cmake and autotools) - - Mac OS X Snow Leopard 10.6.8 gcc i686-apple-darwin11-llvm-gcc-4.2 (GCC) 4.2.1 from Xcode 3.2.6 - Darwin Kernel Version 10.8.0 g++ i686-apple-darwin11-llvm-g++-4.2 (GCC) 4.2.1 from Xcode 3.2.6 - 64-bit gfortran GNU Fortran (GCC) 4.6.2 - (fred) Intel C (icc), Fortran (ifort), C++ (icpc) - 12.1 Build 20120928 - - Mac OS X Lion 10.7.3 Apple clang/clang++ version 3.0 from Xcode 4.6.1 - 64-bit gfortran GNU Fortran (GCC) 4.6.2 - (duck) Intel icc/icpc/ifort version 13.0 - - Mac OS X Mountain Lion 10.8.1 Apple clang/clang++ version 4.2 from Xcode 4.6.1 - 64-bit gfortran GNU Fortran (GCC) 4.6.2 - (wren) Intel icc/icpc/ifort version 13.0.1.119 - - -Tested Configuration Features Summary -===================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90/ F90 C++ zlib SZIP - parallel F2003 parallel -Solaris2.11 32-bit n y/y n y y y -Solaris2.11 64-bit n y/n n y y y -Windows 7 y y/y n y y y -Windows 7 x64 y y/y n y y y -Windows 7 Cygwin n y/n n y y y -Windows 7 x64 Cygwin n y/n n y y y -Mac OS X Snow Leopard 10.6.8 64-bit n y/y n y y y -Mac OS X Lion 10.7.3 64-bit n y/y n y y y -Mac OS X Mountain Lion 10.8.1 64-bit n y/y n y y y -AIX 5.3 32- and 64-bit n y/n n y y y -CentOS 5.9 Linux 2.6.18-308 i686 GNU y y/y y y y y -CentOS 5.9 Linux 2.6.18-308 i686 Intel n y/y n y y y -CentOS 5.9 Linux 2.6.18-308 i686 PGI n y/y n y y y -CentOS 5.9 Linux 2.6.18 x86_64 GNU y y/y y y y y -CentOS 5.9 Linux 2.6.18 x86_64 Intel n y/y n y y y -CentOS 5.9 Linux 2.6.18 x86_64 PGI n y/y n y y y -CentOS 6.4 Linux 2.6.32 x86_64 GNU n y/n n y y y -CentOS 6.4 Linux 2.6.32 x86_64 Intel n y/y n y y y -Linux 2.6.32-358.2.1.el6.ppc64 n y/n n y y y - - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -Solaris2.11 32-bit y y y y -Solaris2.11 64-bit y y y y -Windows 7 y y y y -Windows 7 x64 y y y y -Windows 7 Cygwin n n n y -Windows 7 x64 Cygwin n n n y -Mac OS X Snow Leopard 10.6.8 64-bit y n y n -Mac OS X Lion 10.7.3 64-bit y n y y -Mac OS X Mountain Lion 10.8.1 64-bit y n y y -AIX 5.3 32- and 64-bit y n n y -CentOS 5.9 Linux 2.6.18-308 i686 GNU y y y y -CentOS 5.9 Linux 2.6.18-308 i686 Intel y y y n -CentOS 5.9 Linux 2.6.18-308 i686 PGI y y y n -CentOS 5.9 Linux 2.6.18 x86_64 GNU y y y y -CentOS 5.9 Linux 2.6.18 x86_64 Intel y y y n -CentOS 5.9 Linux 2.6.18 x86_64 PGI y y y n -CentOS 6.4 Linux 2.6.32 x86_64 GNU y y y n -CentOS 6.4 Linux 2.6.32 x86_64 Intel y y y n -Linux 2.6.32-358.2.1.el6.ppc64 y y y n - -Compiler versions for each platform are listed in the preceding -"Supported Platforms" table. - - -More Tested Platforms -===================== -The following platforms are not supported but have been tested for this release. - - Linux 2.6.18-308.13.1.el5PAE MPICH mpich2-1.4.1p1 compiled with - #1 SMP i686 i686 i386 gcc 4.1.2 and gfortran 4.1.2 - (jam) g95 (GCC 4.0.3 (g95 0.94!) - - Linux 2.6.18-308.16.1.el5 MPICH mpich2-1.4.1p1 compiled with - #1 SMP x86_64 GNU/Linux gcc 4.1.2 and gfortran 4.1.2 - (koala) g95 (GCC 4.0.3 (g95 0.94!) - - FreeBSD 8.2-STABLE i386 gcc 4.2.1 [FreeBSD] 20070719 - (loyalty) gcc 4.6.1 20110422 - g++ 4.6.1 20110422 - gfortran 4.6.1 20110422 - - FreeBSD 8.2-STABLE amd64 gcc 4.2.1 [FreeBSD] 20070719 - (freedom) gcc 4.6.1 20110422 - g++ 4.6.1 20110422 - gfortran 4.6.1 20110422 - - Debian6.0.7 2.6.32-5-amd64 #1 SMP x86_64 GNU/Linux - gcc (Debian 4.4.5-8) 4.4.5 - GNU Fortran (Debian 4.4.5-8) 4.4.5 - (cmake and autotools) - - Fedora18 3.7.9-205.fc18.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc (GCC) 4.7.2 20121109 (Red Hat 4.7.2-8) - GNU Fortran (GCC) 4.7.2 20120507 (Red Hat 4.7.2-8) - (cmake and autotools) - - SUSE 12.3 3.7.10-1.1-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux - gcc (SUSE Linux) 4.7.2 - GNU Fortran (SUSE Linux) 4.7.2 - (cmake and autotools) - - Ubuntu 12.10 3.5.0-25-generic #39-Ubuntu SMP x86_64 GNU/Linux - gcc (Ubuntu/Linaro 4.7.2-2ubuntu1) 4.7.2 - GNU Fortran (Ubuntu/Linaro 4.7.2-2ubuntu1) 4.7.2 - (cmake and autotools) - - Cray Linux Environment (CLE) PrgEnv-pgi/4.0.46 - hopper.nersc.gov pgcc 12.5-0 64-bit target on x86-64 Linux -tp shanghai - pgf90 12.5-0 64-bit target on x86-64 Linux -tp shanghai - pgCC 12.5-0 64-bit target on x86-64 Linux -tp shanghai - - -Known Problems -============== - -* When building using the Cray compilers on Cray machines, HDF5 - configure mistakenly thinks the compiler is an intel compiler and - sets the -std=c99 flag which breaks configure on Cray. To build HDF5 - properly on a Cray machine, please consult with the instructions in - INSTALL_parallel for building on Hopper. - (MSC - 2013/04/26 - HDFFV-8429) - -* The h5dump and h5diff utilities occasionally produce different output - between Linux and Windows systems. This is caused by lower-level library - routines that fail to write fill values to the user's buffer when reading - unallocated chunks from datasets that have a fill value set to - H5D_FILL_VALUE_DEFAULT. Due to platform differences the return of - spurious data values has only been encountered on Windows 32-bit systems. - (Issue HDFFV-8247; JP - 2013/03/27) - -* The 5.9 C++ compiler on Sun failed to compile a C++ test ttypes.cpp. It - complains with this message: - "/home/hdf5/src/H5Vprivate.h", line 130: Error: __func__ is not defined. - - The reason is that __func__ is a predefined identifier in C99 standard. The - HDF5 C library uses it in H5private.h. The test ttypes.cpp includes - H5private.h (H5Tpkg.h<-H5Fprivate.h<-H5Vprivate.h<-H5private.h). Sun's 5.9 - C++ compiler doesn't support __func__, thus fails to compile the C++ test. - But Sun's 5.11 C++ compiler does. To check whether your Sun C++ compiler - knows this identifier, try to compile the following simple C++ program: - #include - - int main(void) - { - printf("%s\n", __func__); - return 0; - } - (SLU - 2012/11/5) - -* The C++ and FORTRAN bindings are not currently working on FreeBSD with the - native release 8.2 compilers (4.2.1), but are working with gcc 4.6 from the - ports (and probably gcc releases after that). - (QAK - 2012/10/19) - -* The following h5dump test case fails in BG/P machines (and potentially other - machines that use a command script to launch executables): - - h5dump --no-compact-subset -d "AHFINDERDIRECT::ah_centroid_t[0] it=0 tl=0" - tno-subset.h5 - - This is due to the embedded spaces in the dataset name being interpreted - by the command script launcher as meta-characters, thus passing three - arguments to h5dump's -d flag. The command passes if run by hand, just - not via the test script. - (AKC - 2012/05/03) - -* On hopper, the build failed when RUNSERIAL and RUNPARALLEL are set - to aprun -np X, because the H5lib_settings.c file was not generated - properly. Not setting those environment variables works, because - configure was able to automatically detect that it's a Cray system - and used the proper launch commands when necessary. - (MSC - 2012/04/18) - -* The data conversion test dt_arith.c fails in "long double" to integer - conversion on Ubuntu 11.10 (3.0.0.13 kernel) with GCC 4.6.1 if the library - is built with optimization -O3 or -O2. The older GCC (4.5) or newer kernel - (3.2.2 on Fedora) doesn't have the problem. Users should lower the - optimization level (-O1 or -O0) by defining CFLAGS in the command line of - "configure" like: - - CFLAGS=-O1 ./configure - - This will overwrite the library's default optimization level. - (SLU - 2012/02/07 - HDFFV-7829) - This issue is no longer present on Ubuntu 12.10 (3.5.0 kernel) with - gcc 4.7.2. - -* The STDIO VFD does not work on some architectures, possibly due to 32/64 - bit or large file issues. The basic STDIO VFD test is known to fail on - 64-bit SunOS 5.10 on SPARC when built with -m64 and 32-bit OS X/Darwin - 10.7.0. The STDIO VFD test has been disabled while we investigate and - a fix should appear in a future release. - (DER - 2011/10/14 - HDFFV-8235) - -* h5diff can report inconsistent results when comparing datasets of enum type - that contain invalid values. This is due to how enum types are handled in - the library and will be addressed in a future release. - (DER - 2011/10/14 - HDFFV-7527) - -* The links test can fail under the stdio VFD due to some issues with external - links. This will be investigated and fixed in a future release. - (DER - 2011/10/14 - HDFFV-7768) - -* After the shared library support was fixed for some bugs, it was discovered - that "make prefix=XXX install" no longer works for shared libraries. It - still works correctly for static libraries. Therefore, if you want to - install the HDF5 shared libraries in a location such as /usr/local/hdf5, - you need to specify the location via the --prefix option during configure - time. E.g, ./configure --prefix=/usr/local/hdf5 ... - (AKC - 2011/05/07 - HDFFV-7583) - -* The parallel test, t_shapesame, in testpar/, may run for a long time and may - be terminated by the alarm signal. If that happens, one can increase the - alarm seconds (default is 1200 seconds = 20 minutes) by setting the - environment variable, $HDF5_ALARM_SECONDS, to a larger value such as 3600 - (60 minutes). Note that the t_shapesame test may fail in some systems - (see the "While working on the 1.8.6 release..." problem below). If - it does, it will waste more time if $HDF5_ALARM_SECONDS is set - to a larger value. - (AKC - 2011/05/07) - -* The C++ and FORTRAN bindings are not currently working on FreeBSD. - (QAK - 2011/04/26) - -* Shared Fortran libraries are not quite working on AIX. While they are - generated when --enable-shared is specified, the fortran and hl/fortran - tests fail. We are looking into the issue. HL and C++ shared libraries - should now be working as intended, however. - (MAM - 2011/04/20) - -* The --with-mpe configure option does not work with Mpich2. - (AKC - 2011/03/10) - -* While working on the 1.8.6 release of HDF5, a bug was discovered that can - occur when reading from a dataset in parallel shortly after it has been - written to collectively. The issue was exposed by a new test in the parallel - HDF5 test suite, but had existed before that. We believe the problem lies with - certain MPI implementations and/or file systems. - - We have provided a pure MPI test program, as well as a standalone HDF5 - program, that can be used to determine if this is an issue on your system. - They should be run across multiple nodes with a varying number of processes. - These programs can be found at: - http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/ - (NAF - 2011/01/19) - -* The library's test dt_arith.c showed a compiler's rounding problem on - Cygwin when converting from unsigned long long to long double. The - library's own conversion works fine. We defined a macro for Cygwin to - skip this test until we can solve the problem. - (SLU - 2010/05/05 - HDFFV-1264) - -* All the VFL drivers aren't backward compatible. In H5FDpublic.h, the - structure H5FD_class_t changed in 1.8. There is new parameter added to - get_eoa and set_eoa callback functions. A new callback function - get_type_map was added in. The public function H5FDrealloc was taken - out in 1.8. The problem only happens when users define their own driver - for 1.6 and try to plug in 1.8 library. Because there's only one user - complaining about it, we (Elena, Quincey, and I) decided to leave it as - it is (see bug report #1279). Quincey will make a plan for 1.10. - (SLU - 2010/02/02) - -* The --enable-static-exec configure flag will only statically link libraries - if the static version of that library is present. If only the shared version - of a library exists (i.e., most system libraries on Solaris, AIX, and Mac, - for example, only have shared versions), the flag should still result in a - successful compilation, but note that the installed executables will not be - fully static. Thus, the only guarantee on these systems is that the - executable is statically linked with just the HDF5 library. - (MAM - 2009/11/04) - -* Parallel tests failed with 16 processes with data inconsistency at testphdf5 - / dataset_readAll. Parallel tests also failed with 32 and 64 processes with - collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks - with MPI IO. - (CMC - 2009/04/28) - -* On an Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers, - use -mp -O1 compilation flags to build the libraries. A higher level of - optimization causes failures in several HDF5 library tests. - -* A dataset created or rewritten with a v1.6.3 library or after cannot be read - with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled. - There was a bug in the calculation of the Fletcher32 checksum in the - library before v1.6.3; the checksum value was not consistent between big- - endian and little-endian systems. This bug was fixed in Release 1.6.3. - However, after fixing the bug, the checksum value was no longer the same as - before on little-endian system. Library releases after 1.6.4 can still read - datasets created or rewritten with an HDF5 library of v1.6.2 or before. - (SLU - 2005/06/30) - - -%%%%1.8.10-patch1%%%% - - -HDF5 version 1.8.10-patch1 released on 2013-01-22 -================================================================================ - -INTRODUCTION -============ - -This document describes the differences between HDF5-1.8.9 and -HDF5 1.8.10, and contains information on the platforms tested and -known problems in HDF5-1.8.10. -For more details, see the files HISTORY-1_0-1_8_0_rc3.txt -and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source. - -Links to the HDF5 1.8.10 source code, documentation, and additional materials -can be found on the HDF5 web page at: - - http://www.hdfgroup.org/products/hdf5/ - -The HDF5 1.8.10 release can be obtained from: - - http://www.hdfgroup.org/HDF5/release/obtain5.html - -User documentation for 1.8.10 can be accessed directly at this location: - - http://www.hdfgroup.org/HDF5/doc/ - -New features in the HDF5-1.8.x release series, including brief general -descriptions of some new and modified APIs, are described in the "What's New -in 1.8.0?" document: - - http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html - -All new and modified APIs are listed in detail in the "HDF5 Software Changes -from Release to Release" document, in the section "Release 1.8.10 (current -release) versus Release 1.8.9": - - http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS -======== - -- New Features -- Support for New Platforms, Languages, and Compilers -- Bug Fixes since HDF5-1.8.10 -- Bug Fixes since HDF5-1.8.9 -- Supported Platforms -- Supported Configuration Features Summary -- More Tested Platforms -- Known Problems - - -New Features -============ - - Configuration - ------------- - - A new tool, cmakehdf5, which is a build command script similar to - buildhdf5 is added and is available in the bin directory. - (AKC - 2013/01/21) - - Library - ------- - - Updated to latest autotools and changed all hard *.sh scripts to - configure managed *.sh.in files. Removed overloading of autotools - TESTS variable by examples and tests. Renamed configure.in to - configure.ac. (ADB - 2012/08/23 - HDFFV-8129) - - The data sieve buffer size was set for all the datasets in the file. It - could waste memory if any dataset size is smaller than the sieve buffer - size. Now the library picks the smaller one between the dataset size - and the sieve buffer size from the file access property. See Issue 7934. - (SLU - 2012/4/11) - - Parallel Library - ---------------- - - Added the H5Pget_mpio_no_collective_cause() function that retrieves - reasons why the collective I/O was broken during read/write IO access. - (JKM - 2012/08/30 HDFFV-8143) - - - Added H5Pget_mpio_actual_io_mode_f (MSB - 2012/09/27) - - Tools - ----- - - h5import: Changed to allow the use of h5dump output as input files to - h5import. h5dump must include the "-p" option to print the properties; - configuration file is captured output of h5dump. The restrictions are - that only one dataset with a simple datatype (integer, floating-point, - or string) can be processed. Integers and floating-point imports from - h5dump must use the "binary" option for the data file. The string version - uses the h5dump "-y --width=1" options to disable the indexing printouts, - print single columns, and obviously NOT use the "binary" option. - (ADB - 2012/07/19 HDFFV-721) - - High-Level APIs - --------------- - - None - - Fortran API - ----------- - - Fixed a typo in return value of the nh5dread_f_c function (was 1 - instead of 0 on success); fixed the return value to make it consistent - with other Fortran functions; cleaned debug statements from the code. - (EIP - 2012/06/23) - - C++ API - ------- - - None - - -Support for New Platforms, Languages, and Compilers -=================================================== - - None - -Bug Fixes since HDF5-1.8.10 -=========================== - Library - ------- - - The library now behaves correctly when performing large I/O operations on - Mac OS-X. Previously, single I/O operations > 2 GB would fail since the - Darwin read/write calls cannot handle the number of bytes that their - parameter types imply. - Fixes HDFFV-7975 and HDFFV-8240 (DER - 07 JAN 2013) - - Fixed a bug in the core VFD that cause failures when opening files > 2 GB. - Fixes HDFFV-8124 and HDFFV-8158 (DER - 07 JAN 2013) - - Tools - ----- - - The following h5stat test case failed in BG/P machines (and potentially - other machines that display extra output if an MPI task returns with a - non-zero code.) - Testing h5stat notexist.h5 - The test script was fixed to ignore the extra output. - HDFFV-8233 (AKC - 2012/12/17) - - h5diff: Fixed slowness when comparing HDF5 files with many attributes. - Much slower performance was identified with later release version - (from 1.8.7 to 1.8.10) compared to 1.8.6. The issue was introduced - from fixing an attribute related bug for 1.8.7 release in the past. - HDFFV-8145 (JKM 2012/12/13) - - Testing - ------- - - None - -Bug Fixes since HDF5-1.8.9 -========================== - - Configuration - ------------- - - Fixed configure --enable-production to not use -O optimization for Lion - and Mountain Lion systems when gcc (i686-apple-darwin11-llvm-gcc-4.2 - (GCC) 4.2.1) is used. Somehow the -O optimization will cause some of - the hard conversion code in test/dt_arith.c to fail. HDFFV-8017. - (AKC - 2012/10/10) - - Fixed AIX Fortran compiler flags to use appropriate settings for - debugging, profiling, and optimization situations. HDFFV-8069. - (AKC 2012/09/27) - - Library - ------- - - Fixed a memory leak exposed when inserting/removing a property - from a property list several times. HDFFV-8022. (MSC 2012/05/18) - - The file_image test will fail in the "initial file image and callbacks in - the core VFD" sub-test if the source directory is read-only as the test - fails to create its test files in the build directory. This has been - fixed. HDFFV-8009 (AKC - 2012/07/06) - - - Parallel Library - ---------------- - - The MPI-POSIX VFD was updated to include the POSIX and Windows - correctness features added that had already been added to the other VFDs. - HDFFV-8058/7845. (DER 2012/09/17) - - Performance - ------------- - - Removed program perform/benchpar from the enable-build-all list. The - program will be retired or moved to another location. HDFFV-8156 - (AKC 2012/10/01) - - Retired program perform/mpi-perf. Its purpose has been incorporated - into h5perf. (AKC 2012/09/21) - - Tools - ----- - - h5repack: "h5repack -f NONE file1.h5 out.h5" command failed if - source file contains chunked dataset and a chunk dim is bigger than - the dataset dim. Another issue is that the command changed max dims - if chunk dim is smaller than the dataset dim. These issue occurred - when dataset size is smaller than 64k (compact size limit) Fixed both. - HDFFV-8012 (JKM 2012/09/24) - - h5diff: Fixed the counter in verbose mode (-v, -r) so that it will no - longer add together the differences between datasets and the differences - between attributes of those datasets. This change makes the output of - verbose mode consistent for datasets, groups, and committed datatypes. - HDFFV-5919 (JKM 2012/09/10) - - h5diff: Fixed the incorrect result when comparing attribute data - values and the data type has the same class but different sizes. - HDFFV-7942 (JKM 2012/08/15) - - h5dump: Replaced single element fwrite with block writes. - HDFFV-1208 (ADB 2012/08/13) - - h5diff: Fixed test failure for "make check" due to failure of - copying test files when performed in HDF5 source tree. Also applied - to other tools. HDFFV-8107 (JKM 2012/08/01) - - ph5diff: Fixed intermittent hang issue on a certain operation in - parallel mode. It was detected by daily test for comparing - non-comparable objects, but it could have occurred in other - operations depending on machine condition. HDFFV-8003 (JKM 2012/08/01) - - h5diff: Fixed the function COPY_TESTFILES_TO_TESTDIR() of testh5diff.sh - to better report when there is an error in the file copying. - HDFFV-8105 (AKC 2012/07/22) - - h5dump: Fixed the sort by name display to maintain correct parent/child - relationships between ascending/descending order. - HDFFV-8095 (ADB 2012/07/12) - - h5dump: Fixed the display by creation order when using option -n - (print contents). - HDFFV-5942 (ADB 2012/07/09) - - h5dump: Changed to allow H5T_CSET_UTF8 to be displayed in h5dump output. - Used technique similar to what was done in h5ls (matches library - options). - HDFFV-7999 (ADB 2012/05/23) - - h5diff: Fixed the tool so that it will not check and display the status - of dangling links without setting the --follow-symlinks option. This - also improved performance when comparing lots of external links without - the --follow-symlinks option. - HDFFV-7998 (JKM 2012/04/26) - - F90 API - ------- - - - Fixed a typo in return value of the nh5dread_f_c function (was 1 - instead of 0 on success); fixed the return value to make it consistent - with other Fortran functions; cleaned debug statements from the code. - (EIP - 2012/06/23) - - - Fixed a problem writing/reading control characters to a dataset; writing - a string containing alerts, backspace, carriage_return, form_feed, - horizontal_tab, vertical_tab, or new_line is now tested and working. - (MSB - 2012/09/01) - - - Corrected the integer type of H5S_UNLIMITED_F to HSIZE_T (MSB - 2012/09/01) - - - Corrected the number of continuation lines in the src files - to be less than 32 lines for F95 compliance. (MSB - 2012/10/01) - - C++ API - ------ - - None - - High-Level APIs: - ------ - - - Fixed problem with H5TBdelete_record destroying all data following the - deletion of a row. (MSB- 2012/7/26) - - - Fixed H5LTget_attribute_string not closing an object identifier when an - error occurs. (MSB- 2012/7/21) - - - Corrected the return type of H5TBAget_fill from herr_t to htri_t to - reflect that a return value of 1 indicates that a fill value is - present, 0 indicates a fill value is not present, and <0 indicates an - error. - - Fortran High-Level APIs: - ------ - - None - -Supported Platforms -=================== - AIX 5.3 xlc 10.1.0.5 - (NASA G-ADA) xlC 10.1.0.5 - xlf90 12.1.0.6 - - Linux 2.6.18-308.13.1.el5PAE GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP i686 i686 i386 compilers for 32-bit applications; - (jam) Version 4.1.2 20080704 (Red Hat 4.1.2-52) - Version 4.6.3 - PGI C, Fortran, C++ Compilers for 32-bit - applications; - Version 11.9-0 - Intel(R) C, C++, Fortran Compiler for 32-bit - applications; - Version 12.1 - MPICH mpich2-1.4.1p1 compiled with - gcc 4.1.2 and gfortran 4.1.2 - - Linux 2.6.18-308.16.1.el5 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers for 32-bit applications; - (koala) Version 4.1.2 20080704 (Red Hat 4.1.2-52) - Version 4.6.3 - PGI C, Fortran, C++ for 64-bit target on - x86-64; - Version 11.9-0 - Version 12.5-0 - Intel(R) C, C++, Fortran Compilers for - applications running on Intel(R) 64; - Version 12.1 (Build 20110811) - Version 12.1 (Build 20120212) - MPICH mpich2-1.4.1p1 compiled with - gcc 4.1.2 and gfortran 4.1.2 - - Linux 2.6.32-220.7.1.el6.ppc64 gcc (GCC) 4.4.6 20110731 (Red Hat 4.4.6-3) - #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.6 20110731 - (ostrich) GNU Fortran (GCC) 4.4.6 20110731 (Red Hat 4.4.6-3) - - Linux 2.6.32-220.23.1.1chaos Intel C, C++, Fortran Compilers - ch5.x86_64 GNU/Linux Version 12.1.5.339 - (LLNL Aztec) - - IBM Blue Gene/P XL C for Blue Gene/P, bgxlc V9.0 - (LLNL uDawn) XL C++ for Blue Gene/P, bgxlC V9.0 - XL Fortran for Blue Gene/P, bgxlf90 V11.1 - - SunOS 5.10 32- and 64-bit Sun C 5.9 Sun OS_sparc Patch 124867-16 - (linew) Sun Fortran 95 8.3 Sun OS_sparc Patch 127000-13 - Sun C++ 5.9 Sun OS_sparc Patch 124863-26 - Sun C 5.11 SunOS_sparc - Sun Fortran 95 8.5 SunOS_sparc - Sun C++ 5.11 SunOS_sparc - - Windows XP Visual Studio 2008 w/ Intel Fortran 10.1 (project files) - - Windows XP x64 Visual Studio 2008 w/ Intel Fortran 10.1 (project files) - - Windows 7 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake) - Visual Studio 2010 w/ Intel Fortran 12 (cmake) - Cygwin(CYGWIN_NT-6.1 1.7.15(0.260/5/3) gcc(4.5.3) compiler and gfortran) - (cmake and autotools) - - Windows 7 x64 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake) - Visual Studio 2010 w/ Intel Fortran 12 (cmake) - Cygwin(CYGWIN_NT-6.1 1.7.15(0.260/5/3) gcc(4.5.3) compiler and gfortran) - (cmake and autotools) - - Mac OS X Snow Leopard 10.6.8 gcc i686-apple-darwin11-llvm-gcc-4.2 (GCC) 4.2.1 from Xcode 3.2.6 - Darwin Kernel Version 10.8.0 g++ i686-apple-darwin11-llvm-g++-4.2 (GCC) 4.2.1 from Xcode 3.2.6 - (fred) gfortran GNU Fortran (GCC) 4.6.2 - Intel C (icc), Fortran (ifort), C++ (icpc) - 12.1.0.038 Build 20110811 - - Mac OS X Snow Leopard 10.6.8 gcc i686-apple-darwin11-llvm-gcc-4.2 (GCC) 4.2.1 from Xcode 3.2.6 - Darwin Kernel Version 10.8.0 g++ i686-apple-darwin11-llvm-g++-4.2 (GCC) 4.2.1 from Xcode 3.2.6 - Intel 32-bit gfortran GNU Fortran (GCC) 4.6.1 - (tejeda) Intel C (icc), Fortran (ifort), C++ (icpc) - 12.1.0.038 Build 20110811 - - Mac OS X Lion 10.7.3 gcc i686-apple-darwin11-llvm-gcc-4.2 (GCC) 4.2.1 from Xcode 4.2.1 - 32- and 64-bit g++ i686-apple-darwin11-llvm-g++-4.2 (GCC) 4.2.1 from Xcode 4.2.1 - (duck) gfortran GNU Fortran (GCC) 4.6.2 - - Mac OS X Mountain Lion 10.8.1 cc Apple clang version 4.0 from Xcode 4.5.1 - (owl) c++ Apple clang version 4.0 from Xcode 4.5.1 - gcc i686-apple-darwin11-llvm-gcc-4.2 (GCC) 4.2.1 from Xcode 4.5.1 - g++ i686-apple-darwin11-llvm-g++-4.2 (GCC) 4.2.1 from Xcode 4.5.1 - gfortran GNU Fortran (GCC) 4.6.2 - - -Tested Configuration Features Summary -===================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90/ F90 C++ zlib SZIP - parallel F2003 parallel -Solaris2.10 32-bit n y/y n y y y -Solaris2.10 64-bit n y/n n y y y -Windows 7 y y/n n y y y -Windows 7 x64 y y/n n y y y -Mac OS X Snow Leopard 10.6.8 32-bit n y/y n y y n -Mac OS X Snow Leopard 10.6.8 64-bit n y/y n y y y -Mac OS X Lion 10.7.3 32-bit n y/y n y y n -Mac OS X Lion 10.7.3 64-bit n y/y n y y y -Mac OS X Mountain Lion 10.8.1 64-bit n y/n n y y n -AIX 5.3 32- and 64-bit y y/n y y y y -CentOS 5.5 Linux 2.6.18-308 i686 GNU y y/y y y y y -CentOS 5.5 Linux 2.6.18-308 i686 Intel n y/y n y y y -CentOS 5.5 Linux 2.6.18-308 i686 PGI n y/y n y y y -CentOS 5.5 Linux 2.6.18 x86_64 GNU y y/y y y y y -CentOS 5.5 Linux 2.6.18 x86_64 Intel n y/y n y y y -CentOS 5.5 Linux 2.6.18 x86_64 PGI n y/y n y y y -Linux 2.6.32-220.7.1.el6.ppc64 n y/n n y y y - - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -Solaris2.10 32-bit y y y y -Solaris2.10 64-bit n n n n -Windows 7 y y y y -Windows 7 x64 y y y y -Mac OS X Snow Leopard 10.6.8 32-bit y n y n -Mac OS X Snow Leopard 10.6.8 64-bit y n y n -Mac OS X Lion 10.7.3 32-bit y n y y -Mac OS X Lion 10.7.3 64-bit y n y y -Mac OS X Mountain Lion 10.8.1 64-bit y n y y -AIX 5.3 32- and 64-bit n n n y -CentOS 5.5 Linux 2.6.18-308 i686 GNU y y y y -CentOS 5.5 Linux 2.6.18-308 i686 Intel y y y n -CentOS 5.5 Linux 2.6.18-308 i686 PGI y y y n -CentOS 5.5 Linux 2.6.18 x86_64 GNU y y y y -CentOS 5.5 Linux 2.6.18 x86_64 Intel y y y n -CentOS 5.5 Linux 2.6.18 x86_64 PGI y y y n -Linux 2.6.32-220.7.1.el6.ppc64 y y y n - -Compiler versions for each platform are listed in the preceding -"Supported Platforms" table. - - -More Tested Platforms -===================== -The following platforms are not supported but have been tested for this release. - - FreeBSD 8.2-STABLE i386 gcc 4.2.1 [FreeBSD] 20070719 - (loyalty) gcc 4.6.1 20110422 - g++ 4.6.1 20110422 - gfortran 4.6.1 20110422 - - FreeBSD 8.2-STABLE amd64 gcc 4.2.1 [FreeBSD] 20070719 - (freedom) gcc 4.6.1 20110422 - g++ 4.6.1 20110422 - gfortran 4.6.1 20110422 - - Debian6.0.3 2.6.32-5-686 #1 SMP i686 GNU/Linux - gcc (Debian 4.4.5-8) 4.4.5 - GNU Fortran (Debian 4.4.5-8) 4.4.5 - (cmake and autotools) - - Debian6.0.3 2.6.32-5-amd64 #1 SMP x86_64 GNU/Linux - gcc (Debian 4.4.5-8) 4.4.5 - GNU Fortran (Debian 4.4.5-8) 4.4.5 - (cmake and autotools) - - Fedora17 3.5.2-1.fc17.i6866 #1 SMP i686 i686 i386 GNU/Linux - gcc (GCC) 4.7.0 20120507 (Red Hat 4.7.0-5) - GNU Fortran (GCC) 4.7.0 20120507 (Red Hat 4.7.0-5) - (cmake and autotools) - - Fedora17 3.5.2-1.fc17.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc (GCC) 4.7.0 20120507 (Red Hat 4.7.0-5) - GNU Fortran (GCC) 4.7.0 20120507 (Red Hat 4.7.0-5) - (cmake and autotools) - - SUSE 12.2 3.4.6-2.10-desktop #1 SMP PREEMPT i686 i686 i386 GNU/Linux - gcc (SUSE Linux) 4.7.1 - GNU Fortran (SUSE Linux) 4.7.1 - (cmake and autotools) - - SUSE 12.2 3.4.6-2.10-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux - gcc (SUSE Linux) 4.7.1 - GNU Fortran (SUSE Linux) 4.7.1 - (cmake and autotools) - - Ubuntu 12.04 3.2.0-29-generic #46-Ubuntu SMP i686 GNU/Linux - gcc (Ubuntu/Linaro 4.6.3-1ubuntu5) 4.6.3 - GNU Fortran (Ubuntu/Linaro 4.6.3-1ubuntu5) 4.6.3 - (cmake and autotools) - - Ubuntu 12.04 3.2.0-29-generic #46-Ubuntu SMP x86_64 GNU/Linux - gcc (Ubuntu/Linaro 4.6.3-1ubuntu5) 4.6.3 - GNU Fortran (Ubuntu/Linaro 4.6.3-1ubuntu5) 4.6.3 - (cmake and autotools) - (Use optimization level -O1) - - Cray Linux Environment (CLE) PrgEnv-pgi/4.0.46 - hopper.nersc.gov pgcc 12.5-0 64-bit target on x86-64 Linux -tp shanghai - pgf90 12.5-0 64-bit target on x86-64 Linux -tp shanghai - pgCC 12.5-0 64-bit target on x86-64 Linux -tp shanghai - - -Known Problems -============== -* The C++ and FORTRAN bindings are not currently working on FreeBSD with the - native release 8.2 compilers (4.2.1), but are working with gcc 4.6 from the - ports (and probably gcc releases after that). - (QAK - 2012/10/19) - -* The following h5dump test case fails in BG/P machines (and potentially other - machines that use a command script to launch executables): - - h5dump --no-compact-subset -d "AHFINDERDIRECT::ah_centroid_t[0] it=0 tl=0" - tno-subset.h5 - - This is due to the embedded spaces in the dataset name being interpreted - by the command script launcher as meta-characters, thus passing three - arguments to h5dump's -d flag. The command passes if run by hand, just - not via the test script. - (AKC - 2012/05/03) - -* On hopper, the build failed when RUNSERIAL and RUNPARALLEL are set - to aprun -np X, because the H5lib_settings.c file was not generated - properly. Not setting those environment variables works, because - configure was able to automatically detect that it's a Cray system - and used the proper launch commands when necessary. - (MSC - 2012/04/18) - -* The data conversion test dt_arith.c fails in "long double" to integer - conversion on Ubuntu 11.10 (3.0.0.13 kernel) with GCC 4.6.1 if the library - is built with optimization -O3 or -O2. The older GCC (4.5) or newer kernel - (3.2.2 on Fedora) doesn't have the problem. Users should lower the - optimization level (-O1 or -O0) by defining CFLAGS in the command line of - "configure" like: - - CFLAGS=-O1 ./configure - - This will overwrite the library's default optimization level. - (SLU - 2012/02/07 - HDFFV-7829) - -* The STDIO VFD does not work on some architectures, possibly due to 32/64 - bit or large file issues. The basic STDIO VFD test is known to fail on - 64-bit SunOS 5.10 on SPARC when built with -m64 and 32-bit OS X/Darwin - 10.7.0. The STDIO VFD test has been disabled while we investigate and - a fix should appear in a future release. - (DER - 2011/10/14 - HDFFV-8235) - -* h5diff can report inconsistent results when comparing datasets of enum type - that contain invalid values. This is due to how enum types are handled in - the library and will be addressed in a future release. - (DER - 2011/10/14 - HDFFV-7527) - -* The links test can fail under the stdio VFD due to some issues with external - links. This will be investigated and fixed in a future release. - (DER - 2011/10/14 - HDFFV-7768) - -* After the shared library support was fixed for some bugs, it was discovered - that "make prefix=XXX install" no longer works for shared libraries. It - still works correctly for static libraries. Therefore, if you want to - install the HDF5 shared libraries in a location such as /usr/local/hdf5, - you need to specify the location via the --prefix option during configure - time. E.g, ./configure --prefix=/usr/local/hdf5 ... - (AKC - 2011/05/07 - HDFFV-7583) - -* The parallel test, t_shapesame, in testpar/, may run for a long time and may - be terminated by the alarm signal. If that happens, one can increase the - alarm seconds (default is 1200 seconds = 20 minutes) by setting the - environment variable, $HDF5_ALARM_SECONDS, to a larger value such as 3600 - (60 minutes). Note that the t_shapesame test may fail in some systems - (see the "While working on the 1.8.6 release..." problem below). If - it does, it will waste more time if $HDF5_ALARM_SECONDS is set - to a larger value. - (AKC - 2011/05/07) - -* The C++ and FORTRAN bindings are not currently working on FreeBSD. - (QAK - 2011/04/26) - -* Shared Fortran libraries are not quite working on AIX. While they are - generated when --enable-shared is specified, the fortran and hl/fortran - tests fail. We are looking into the issue. HL and C++ shared libraries - should now be working as intended, however. - (MAM - 2011/04/20) - -* The --with-mpe configure option does not work with Mpich2. - (AKC - 2011/03/10) - -* While working on the 1.8.6 release of HDF5, a bug was discovered that can - occur when reading from a dataset in parallel shortly after it has been - written to collectively. The issue was exposed by a new test in the parallel - HDF5 test suite, but had existed before that. We believe the problem lies with - certain MPI implementations and/or file systems. - - We have provided a pure MPI test program, as well as a standalone HDF5 - program, that can be used to determine if this is an issue on your system. - They should be run across multiple nodes with a varying number of processes. - These programs can be found at: - http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/ - (NAF - 2011/01/19) - -* The library's test dt_arith.c showed a compiler's rounding problem on - Cygwin when converting from unsigned long long to long double. The - library's own conversion works fine. We defined a macro for Cygwin to - skip this test until we can solve the problem. - (SLU - 2010/05/05 - HDFFV-1264) - -* All the VFL drivers aren't backward compatible. In H5FDpublic.h, the - structure H5FD_class_t changed in 1.8. There is new parameter added to - get_eoa and set_eoa callback functions. A new callback function - get_type_map was added in. The public function H5FDrealloc was taken - out in 1.8. The problem only happens when users define their own driver - for 1.6 and try to plug in 1.8 library. Because there's only one user - complaining about it, we (Elena, Quincey, and I) decided to leave it as - it is (see bug report #1279). Quincey will make a plan for 1.10. - (SLU - 2010/02/02) - -* The --enable-static-exec configure flag will only statically link libraries - if the static version of that library is present. If only the shared version - of a library exists (i.e., most system libraries on Solaris, AIX, and Mac, - for example, only have shared versions), the flag should still result in a - successful compilation, but note that the installed executables will not be - fully static. Thus, the only guarantee on these systems is that the - executable is statically linked with just the HDF5 library. - (MAM - 2009/11/04) - -* Parallel tests failed with 16 processes with data inconsistency at testphdf5 - / dataset_readAll. Parallel tests also failed with 32 and 64 processes with - collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks - with MPI IO. - (CMC - 2009/04/28) - -* On an Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers, - use -mp -O1 compilation flags to build the libraries. A higher level of - optimization causes failures in several HDF5 library tests. - -* A dataset created or rewritten with a v1.6.3 library or after cannot be read - with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled. - There was a bug in the calculation of the Fletcher32 checksum in the - library before v1.6.3; the checksum value was not consistent between big- - endian and little-endian systems. This bug was fixed in Release 1.6.3. - However, after fixing the bug, the checksum value was no longer the same as - before on little-endian system. Library releases after 1.6.4 can still read - datasets created or rewritten with an HDF5 library of v1.6.2 or before. - (SLU - 2005/06/30) - - -%%%%1.8.10%%%% - - -HDF5 version 1.8.10 released on 2012-10-26 -================================================================================ - -INTRODUCTION -============ - -This document describes the differences between HDF5-1.8.9 and -HDF5 1.8.10, and contains information on the platforms tested and -known problems in HDF5-1.8.10. -For more details, see the files HISTORY-1_0-1_8_0_rc3.txt -and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source. - -Links to the HDF5 1.8.10 source code, documentation, and additional materials -can be found on the HDF5 web page at: - - http://www.hdfgroup.org/products/hdf5/ - -The HDF5 1.8.10 release can be obtained from: - - http://www.hdfgroup.org/HDF5/release/obtain5.html - -User documentation for 1.8.10 can be accessed directly at this location: - - http://www.hdfgroup.org/HDF5/doc/ - -New features in the HDF5-1.8.x release series, including brief general -descriptions of some new and modified APIs, are described in the "What's New -in 1.8.0?" document: - - http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html - -All new and modified APIs are listed in detail in the "HDF5 Software Changes -from Release to Release" document, in the section "Release 1.8.10 (current -release) versus Release 1.8.9": - - http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS -======== - -- New Features -- Support for New Platforms, Languages, and Compilers -- Bug Fixes since HDF5-1.8.9 -- Supported Platforms -- Supported Configuration Features Summary -- More Tested Platforms -- Known Problems - - -New Features -============ - - Configuration - ------------- - - None - - Library - ------- - - Updated to latest autotools and changed all hard *.sh scripts to - configure managed *.sh.in files. Removed overloading of autotools - TESTS variable by examples and tests. Renamed configure.in to - configure.ac. (ADB - 2012/08/23 - HDFFV-8129) - - The data sieve buffer size was set for all the datasets in the file. It - could waste memory if any dataset size is smaller than the sieve buffer - size. Now the library picks the smaller one between the dataset size - and the sieve buffer size from the file access property. See Issue 7934. - (SLU - 2012/4/11) - - Parallel Library - ---------------- - - Added the H5Pget_mpio_no_collective_cause() function that retrieves - reasons why the collective I/O was broken during read/write IO access. - (JKM - 2012/08/30 HDFFV-8143) - - - Added H5Pget_mpio_actual_io_mode_f (MSB - 2012/09/27) - - Tools - ----- - - h5import: Changed to allow the use of h5dump output as input files to - h5import. h5dump must include the "-p" option to print the properties; - configuration file is captured output of h5dump. The restrictions are - that only one dataset with a simple datatype (integer, floating-point, - or string) can be processed. Integers and floating-point imports from - h5dump must use the "binary" option for the data file. The string version - uses the h5dump "-y --width=1" options to disable the indexing printouts, - print single columns, and obviously NOT use the "binary" option. - (ADB - 2012/07/19 HDFFV-721) - - High-Level APIs - --------------- - - None - - Fortran API - ----------- - - Fixed a typo in return value of the nh5dread_f_c function (was 1 - instead of 0 on success); fixed the return value to make it consistent - with other Fortran functions; cleaned debug statements from the code. - (EIP - 2012/06/23) - - C++ API - ------- - - None - - -Support for New Platforms, Languages, and Compilers -=================================================== - - None - -Bug Fixes since HDF5-1.8.9 -========================== - - Configuration - ------------- - - Fixed configure --enable-production to not use -O optimization for Lion - and Mountain Lion systems when gcc (i686-apple-darwin11-llvm-gcc-4.2 - (GCC) 4.2.1) is used. Somehow the -O optimization will cause some of - the hard conversion code in test/dt_arith.c to fail. HDFFV-8017. - (AKC - 2012/10/10) - - Fixed AIX Fortran compiler flags to use appropriate settings for - debugging, profiling, and optimization situations. HDFFV-8069. - (AKC 2012/09/27) - - Library - ------- - - Fixed a memory leak exposed when inserting/removing a property - from a property list several times. HDFFV-8022. (MSC 2012/05/18) - - The file_image test will fail in the "initial file image and callbacks in - the core VFD" sub-test if the source directory is read-only as the test - fails to create its test files in the build directory. This has been - fixed. HDFFV-8009 (AKC - 2012/07/06) - - - Parallel Library - ---------------- - - The MPI-POSIX VFD was updated to include the POSIX and Windows - correctness features added that had already been added to the other VFDs. - HDFFV-8058/7845. (DER 2012/09/17) - - Performance - ------------- - - Removed program perform/benchpar from the enable-build-all list. The - program will be retired or moved to another location. HDFFV-8156 - (AKC 2012/10/01) - - Retired program perform/mpi-perf. Its purpose has been incorporated - into h5perf. (AKC 2012/09/21) - - Tools - ----- - - h5repack: "h5repack -f NONE file1.h5 out.h5" command failed if - source file contains chunked dataset and a chunk dim is bigger than - the dataset dim. Another issue is that the command changed max dims - if chunk dim is smaller than the dataset dim. These issue occurred - when dataset size is smaller than 64k (compact size limit) Fixed both. - HDFFV-8012 (JKM 2012/09/24) - - h5diff: Fixed the counter in verbose mode (-v, -r) so that it will no - longer add together the differences between datasets and the differences - between attributes of those datasets. This change makes the output of - verbose mode consistent for datasets, groups, and committed datatypes. - HDFFV-5919 (JKM 2012/09/10) - - h5diff: Fixed the incorrect result when comparing attribute data - values and the data type has the same class but different sizes. - HDFFV-7942 (JKM 2012/08/15) - - h5dump: Replaced single element fwrite with block writes. - HDFFV-1208 (ADB 2012/08/13) - - h5diff: Fixed test failure for "make check" due to failure of - copying test files when performed in HDF5 source tree. Also applied - to other tools. HDFFV-8107 (JKM 2012/08/01) - - ph5diff: Fixed intermittent hang issue on a certain operation in - parallel mode. It was detected by daily test for comparing - non-comparable objects, but it could have occurred in other - operations depending on machine condition. HDFFV-8003 (JKM 2012/08/01) - - h5diff: Fixed the function COPY_TESTFILES_TO_TESTDIR() of testh5diff.sh - to better report when there is an error in the file copying. - HDFFV-8105 (AKC 2012/07/22) - - h5dump: Fixed the sort by name display to maintain correct parent/child - relationships between ascending/descending order. - HDFFV-8095 (ADB 2012/07/12) - - h5dump: Fixed the display by creation order when using option -n - (print contents). - HDFFV-5942 (ADB 2012/07/09) - - h5dump: Changed to allow H5T_CSET_UTF8 to be displayed in h5dump output. - Used technique similar to what was done in h5ls (matches library - options). - HDFFV-7999 (ADB 2012/05/23) - - h5diff: Fixed the tool so that it will not check and display the status - of dangling links without setting the --follow-symlinks option. This - also improved performance when comparing lots of external links without - the --follow-symlinks option. - HDFFV-7998 (JKM 2012/04/26) - - F90 API - ------- - - - Fixed a typo in return value of the nh5dread_f_c function (was 1 - instead of 0 on success); fixed the return value to make it consistent - with other Fortran functions; cleaned debug statements from the code. - (EIP - 2012/06/23) - - - Fixed a problem writing/reading control characters to a dataset; writing - a string containing alerts, backspace, carriage_return, form_feed, - horizontal_tab, vertical_tab, or new_line is now tested and working. - (MSB - 2012/09/01) - - - Corrected the integer type of H5S_UNLIMITED_F to HSIZE_T (MSB - 2012/09/01) - - - Corrected the number of continuation lines in the src files - to be less than 32 lines for F95 compliance. (MSB - 2012/10/01) - - C++ API - ------ - - None - - High-Level APIs: - ------ - - - Fixed problem with H5TBdelete_record destroying all data following the - deletion of a row. (MSB- 2012/7/26) - - - Fixed H5LTget_attribute_string not closing an object identifier when an - error occurs. (MSB- 2012/7/21) - - - Corrected the return type of H5TBAget_fill from herr_t to htri_t to - reflect that a return value of 1 indicates that a fill value is - present, 0 indicates a fill value is not present, and <0 indicates an - error. - - Fortran High-Level APIs: - ------ - - None - -Supported Platforms -=================== - AIX 5.3 xlc 10.1.0.5 - (NASA G-ADA) xlC 10.1.0.5 - xlf90 12.1.0.6 - - Linux 2.6.18-308.13.1.el5PAE GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP i686 i686 i386 compilers for 32-bit applications; - (jam) Version 4.1.2 20080704 (Red Hat 4.1.2-52) - Version 4.6.3 - PGI C, Fortran, C++ Compilers for 32-bit - applications; - Version 11.9-0 - Intel(R) C, C++, Fortran Compiler for 32-bit - applications; - Version 12.1 - MPICH mpich2-1.4.1p1 compiled with - gcc 4.1.2 and gfortran 4.1.2 - - Linux 2.6.18-308.16.1.el5 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers for 32-bit applications; - (koala) Version 4.1.2 20080704 (Red Hat 4.1.2-52) - Version 4.6.3 - PGI C, Fortran, C++ for 64-bit target on - x86-64; - Version 11.9-0 - Version 12.5-0 - Intel(R) C, C++, Fortran Compilers for - applications running on Intel(R) 64; - Version 12.1 (Build 20110811) - Version 12.1 (Build 20120212) - MPICH mpich2-1.4.1p1 compiled with - gcc 4.1.2 and gfortran 4.1.2 - - Linux 2.6.32-220.7.1.el6.ppc64 gcc (GCC) 4.4.6 20110731 (Red Hat 4.4.6-3) - #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.6 20110731 - (ostrich) GNU Fortran (GCC) 4.4.6 20110731 (Red Hat 4.4.6-3) - - Linux 2.6.32-220.23.1.1chaos Intel C, C++, Fortran Compilers - ch5.x86_64 GNU/Linux Version 12.1.5.339 - (LLNL Aztec) - - IBM Blue Gene/P XL C for Blue Gene/P, bgxlc V9.0 - (LLNL uDawn) XL C++ for Blue Gene/P, bgxlC V9.0 - XL Fortran for Blue Gene/P, bgxlf90 V11.1 - - SunOS 5.10 32- and 64-bit Sun C 5.9 Sun OS_sparc Patch 124867-16 - (linew) Sun Fortran 95 8.3 Sun OS_sparc Patch 127000-13 - Sun C++ 5.9 Sun OS_sparc Patch 124863-26 - Sun C 5.11 SunOS_sparc - Sun Fortran 95 8.5 SunOS_sparc - Sun C++ 5.11 SunOS_sparc - - Windows XP Visual Studio 2008 w/ Intel Fortran 10.1 (project files) - - Windows XP x64 Visual Studio 2008 w/ Intel Fortran 10.1 (project files) - - Windows 7 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake) - Visual Studio 2010 w/ Intel Fortran 12 (cmake) - Cygwin(CYGWIN_NT-6.1 1.7.15(0.260/5/3) gcc(4.5.3) compiler and gfortran) - (cmake and autotools) - - Windows 7 x64 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake) - Visual Studio 2010 w/ Intel Fortran 12 (cmake) - Cygwin(CYGWIN_NT-6.1 1.7.15(0.260/5/3) gcc(4.5.3) compiler and gfortran) - (cmake and autotools) - - Mac OS X Snow Leopard 10.6.8 gcc i686-apple-darwin11-llvm-gcc-4.2 (GCC) 4.2.1 from Xcode 3.2.6 - Darwin Kernel Version 10.8.0 g++ i686-apple-darwin11-llvm-g++-4.2 (GCC) 4.2.1 from Xcode 3.2.6 - (fred) gfortran GNU Fortran (GCC) 4.6.2 - Intel C (icc), Fortran (ifort), C++ (icpc) - 12.1.0.038 Build 20110811 - - Mac OS X Snow Leopard 10.6.8 gcc i686-apple-darwin11-llvm-gcc-4.2 (GCC) 4.2.1 from Xcode 3.2.6 - Darwin Kernel Version 10.8.0 g++ i686-apple-darwin11-llvm-g++-4.2 (GCC) 4.2.1 from Xcode 3.2.6 - Intel 32-bit gfortran GNU Fortran (GCC) 4.6.1 - (tejeda) Intel C (icc), Fortran (ifort), C++ (icpc) - 12.1.0.038 Build 20110811 - - Mac OS X Lion 10.7.3 gcc i686-apple-darwin11-llvm-gcc-4.2 (GCC) 4.2.1 from Xcode 4.2.1 - 32- and 64-bit g++ i686-apple-darwin11-llvm-g++-4.2 (GCC) 4.2.1 from Xcode 4.2.1 - (duck) gfortran GNU Fortran (GCC) 4.6.2 - - Mac OS X Mountain Lion 10.8.1 cc Apple clang version 4.0 from Xcode 4.5.1 - (owl) c++ Apple clang version 4.0 from Xcode 4.5.1 - gcc i686-apple-darwin11-llvm-gcc-4.2 (GCC) 4.2.1 from Xcode 4.5.1 - g++ i686-apple-darwin11-llvm-g++-4.2 (GCC) 4.2.1 from Xcode 4.5.1 - gfortran GNU Fortran (GCC) 4.6.2 - - -Tested Configuration Features Summary -===================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90/ F90 C++ zlib SZIP - parallel F2003 parallel -Solaris2.10 32-bit n y/y n y y y -Solaris2.10 64-bit n y/n n y y y -Windows 7 y y/n n y y y -Windows 7 x64 y y/n n y y y -Mac OS X Snow Leopard 10.6.8 32-bit n y/y n y y n -Mac OS X Snow Leopard 10.6.8 64-bit n y/y n y y y -Mac OS X Lion 10.7.3 32-bit n y/y n y y n -Mac OS X Lion 10.7.3 64-bit n y/y n y y y -Mac OS X Mountain Lion 10.8.1 64-bit n y/n n y y n -AIX 5.3 32- and 64-bit y y/n y y y y -CentOS 5.5 Linux 2.6.18-308 i686 GNU y y/y y y y y -CentOS 5.5 Linux 2.6.18-308 i686 Intel n y/y n y y y -CentOS 5.5 Linux 2.6.18-308 i686 PGI n y/y n y y y -CentOS 5.5 Linux 2.6.18 x86_64 GNU y y/y y y y y -CentOS 5.5 Linux 2.6.18 x86_64 Intel n y/y n y y y -CentOS 5.5 Linux 2.6.18 x86_64 PGI n y/y n y y y -Linux 2.6.32-220.7.1.el6.ppc64 n y/n n y y y - - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -Solaris2.10 32-bit y y y y -Solaris2.10 64-bit n n n n -Windows 7 y y y y -Windows 7 x64 y y y y -Mac OS X Snow Leopard 10.6.8 32-bit y n y n -Mac OS X Snow Leopard 10.6.8 64-bit y n y n -Mac OS X Lion 10.7.3 32-bit y n y y -Mac OS X Lion 10.7.3 64-bit y n y y -Mac OS X Mountain Lion 10.8.1 64-bit y n y y -AIX 5.3 32- and 64-bit n n n y -CentOS 5.5 Linux 2.6.18-308 i686 GNU y y y y -CentOS 5.5 Linux 2.6.18-308 i686 Intel y y y n -CentOS 5.5 Linux 2.6.18-308 i686 PGI y y y n -CentOS 5.5 Linux 2.6.18 x86_64 GNU y y y y -CentOS 5.5 Linux 2.6.18 x86_64 Intel y y y n -CentOS 5.5 Linux 2.6.18 x86_64 PGI y y y n -Linux 2.6.32-220.7.1.el6.ppc64 y y y n - -Compiler versions for each platform are listed in the preceding -"Supported Platforms" table. - - -More Tested Platforms -===================== -The following platforms are not supported but have been tested for this release. - - FreeBSD 8.2-STABLE i386 gcc 4.2.1 [FreeBSD] 20070719 - (loyalty) gcc 4.6.1 20110422 - g++ 4.6.1 20110422 - gfortran 4.6.1 20110422 - - FreeBSD 8.2-STABLE amd64 gcc 4.2.1 [FreeBSD] 20070719 - (freedom) gcc 4.6.1 20110422 - g++ 4.6.1 20110422 - gfortran 4.6.1 20110422 - - Debian6.0.3 2.6.32-5-686 #1 SMP i686 GNU/Linux - gcc (Debian 4.4.5-8) 4.4.5 - GNU Fortran (Debian 4.4.5-8) 4.4.5 - (cmake and autotools) - - Debian6.0.3 2.6.32-5-amd64 #1 SMP x86_64 GNU/Linux - gcc (Debian 4.4.5-8) 4.4.5 - GNU Fortran (Debian 4.4.5-8) 4.4.5 - (cmake and autotools) - - Fedora17 3.5.2-1.fc17.i6866 #1 SMP i686 i686 i386 GNU/Linux - gcc (GCC) 4.7.0 20120507 (Red Hat 4.7.0-5) - GNU Fortran (GCC) 4.7.0 20120507 (Red Hat 4.7.0-5) - (cmake and autotools) - - Fedora17 3.5.2-1.fc17.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc (GCC) 4.7.0 20120507 (Red Hat 4.7.0-5) - GNU Fortran (GCC) 4.7.0 20120507 (Red Hat 4.7.0-5) - (cmake and autotools) - - SUSE 12.2 3.4.6-2.10-desktop #1 SMP PREEMPT i686 i686 i386 GNU/Linux - gcc (SUSE Linux) 4.7.1 - GNU Fortran (SUSE Linux) 4.7.1 - (cmake and autotools) - - SUSE 12.2 3.4.6-2.10-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux - gcc (SUSE Linux) 4.7.1 - GNU Fortran (SUSE Linux) 4.7.1 - (cmake and autotools) - - Ubuntu 12.04 3.2.0-29-generic #46-Ubuntu SMP i686 GNU/Linux - gcc (Ubuntu/Linaro 4.6.3-1ubuntu5) 4.6.3 - GNU Fortran (Ubuntu/Linaro 4.6.3-1ubuntu5) 4.6.3 - (cmake and autotools) - - Ubuntu 12.04 3.2.0-29-generic #46-Ubuntu SMP x86_64 GNU/Linux - gcc (Ubuntu/Linaro 4.6.3-1ubuntu5) 4.6.3 - GNU Fortran (Ubuntu/Linaro 4.6.3-1ubuntu5) 4.6.3 - (cmake and autotools) - (Use optimization level -O1) - - Cray Linux Environment (CLE) PrgEnv-pgi/4.0.46 - hopper.nersc.gov pgcc 12.5-0 64-bit target on x86-64 Linux -tp shanghai - pgf90 12.5-0 64-bit target on x86-64 Linux -tp shanghai - pgCC 12.5-0 64-bit target on x86-64 Linux -tp shanghai - - -Known Problems -============== -* The following h5stat test case fails in BG/P machines (and potentially other - machines that display extra output if an MPI task returns with a non-zero - code.) - Testing h5stat notexist.h5 - - The test actually runs and passes as expected. It is the extra output from - the MPI process that causes the test script to fail. This will be fixed - in the next release. (AKC - 2012/10/25 - HDFFV-8233) - -* The C++ and FORTRAN bindings are not currently working on FreeBSD with the - native release 8.2 compilers (4.2.1), but are working with gcc 4.6 from the - ports (and probably gcc releases after that). - (QAK - 2012/10/19) - -* The following h5dump test case fails in BG/P machines (and potentially other - machines that use a command script to launch executables): - - h5dump --no-compact-subset -d "AHFINDERDIRECT::ah_centroid_t[0] it=0 tl=0" - tno-subset.h5 - - This is due to the embedded spaces in the dataset name being interpreted - by the command script launcher as meta-characters, thus passing three - arguments to h5dump's -d flag. The command passes if run by hand, just - not via the test script. - (AKC - 2012/05/03) - -* On hopper, the build failed when RUNSERIAL and RUNPARALLEL are set - to aprun -np X, because the H5lib_settings.c file was not generated - properly. Not setting those environment variables works, because - configure was able to automatically detect that it's a Cray system - and used the proper launch commands when necessary. - (MSC - 2012/04/18) - -* The data conversion test dt_arith.c fails in "long double" to integer - conversion on Ubuntu 11.10 (3.0.0.13 kernel) with GCC 4.6.1 if the library - is built with optimization -O3 or -O2. The older GCC (4.5) or newer kernel - (3.2.2 on Fedora) doesn't have the problem. Users should lower the - optimization level (-O1 or -O0) by defining CFLAGS in the command line of - "configure" like: - - CFLAGS=-O1 ./configure - - This will overwrite the library's default optimization level. - (SLU - 2012/02/07 - HDFFV-7829) - -* The STDIO VFD does not work on some architectures, possibly due to 32/64 - bit or large file issues. The basic STDIO VFD test is known to fail on - 64-bit SunOS 5.10 on SPARC when built with -m64 and 32-bit OS X/Darwin - 10.7.0. The STDIO VFD test has been disabled while we investigate and - a fix should appear in a future release. - (DER - 2011/10/14 - HDFFV-8235) - -* h5diff can report inconsistent results when comparing datasets of enum type - that contain invalid values. This is due to how enum types are handled in - the library and will be addressed in a future release. - (DER - 2011/10/14 - HDFFV-7527) - -* The links test can fail under the stdio VFD due to some issues with external - links. This will be investigated and fixed in a future release. - (DER - 2011/10/14 - HDFFV-7768) - -* After the shared library support was fixed for some bugs, it was discovered - that "make prefix=XXX install" no longer works for shared libraries. It - still works correctly for static libraries. Therefore, if you want to - install the HDF5 shared libraries in a location such as /usr/local/hdf5, - you need to specify the location via the --prefix option during configure - time. E.g, ./configure --prefix=/usr/local/hdf5 ... - (AKC - 2011/05/07 - HDFFV-7583) - -* The parallel test, t_shapesame, in testpar/, may run for a long time and may - be terminated by the alarm signal. If that happens, one can increase the - alarm seconds (default is 1200 seconds = 20 minutes) by setting the - environment variable, $HDF5_ALARM_SECONDS, to a larger value such as 3600 - (60 minutes). Note that the t_shapesame test may fail in some systems - (see the "While working on the 1.8.6 release..." problem below). If - it does, it will waste more time if $HDF5_ALARM_SECONDS is set - to a larger value. - (AKC - 2011/05/07) - -* The C++ and FORTRAN bindings are not currently working on FreeBSD. - (QAK - 2011/04/26) - -* Shared Fortran libraries are not quite working on AIX. While they are - generated when --enable-shared is specified, the fortran and hl/fortran - tests fail. We are looking into the issue. HL and C++ shared libraries - should now be working as intended, however. - (MAM - 2011/04/20) - -* The --with-mpe configure option does not work with Mpich2. - (AKC - 2011/03/10) - -* While working on the 1.8.6 release of HDF5, a bug was discovered that can - occur when reading from a dataset in parallel shortly after it has been - written to collectively. The issue was exposed by a new test in the parallel - HDF5 test suite, but had existed before that. We believe the problem lies with - certain MPI implementations and/or file systems. - - We have provided a pure MPI test program, as well as a standalone HDF5 - program, that can be used to determine if this is an issue on your system. - They should be run across multiple nodes with a varying number of processes. - These programs can be found at: - http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/ - (NAF - 2011/01/19) - -* The library's test dt_arith.c showed a compiler's rounding problem on - Cygwin when converting from unsigned long long to long double. The - library's own conversion works fine. We defined a macro for Cygwin to - skip this test until we can solve the problem. - (SLU - 2010/05/05 - HDFFV-1264) - -* All the VFL drivers aren't backward compatible. In H5FDpublic.h, the - structure H5FD_class_t changed in 1.8. There is new parameter added to - get_eoa and set_eoa callback functions. A new callback function - get_type_map was added in. The public function H5FDrealloc was taken - out in 1.8. The problem only happens when users define their own driver - for 1.6 and try to plug in 1.8 library. Because there's only one user - complaining about it, we (Elena, Quincey, and I) decided to leave it as - it is (see bug report #1279). Quincey will make a plan for 1.10. - (SLU - 2010/02/02) - -* The --enable-static-exec configure flag will only statically link libraries - if the static version of that library is present. If only the shared version - of a library exists (i.e., most system libraries on Solaris, AIX, and Mac, - for example, only have shared versions), the flag should still result in a - successful compilation, but note that the installed executables will not be - fully static. Thus, the only guarantee on these systems is that the - executable is statically linked with just the HDF5 library. - (MAM - 2009/11/04) - -* Parallel tests failed with 16 processes with data inconsistency at testphdf5 - / dataset_readAll. Parallel tests also failed with 32 and 64 processes with - collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks - with MPI IO. - (CMC - 2009/04/28) - -* On an Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers, - use -mp -O1 compilation flags to build the libraries. A higher level of - optimization causes failures in several HDF5 library tests. - -* A dataset created or rewritten with a v1.6.3 library or after cannot be read - with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled. - There was a bug in the calculation of the Fletcher32 checksum in the - library before v1.6.3; the checksum value was not consistent between big- - endian and little-endian systems. This bug was fixed in Release 1.6.3. - However, after fixing the bug, the checksum value was no longer the same as - before on little-endian system. Library releases after 1.6.4 can still read - datasets created or rewritten with an HDF5 library of v1.6.2 or before. - (SLU - 2005/06/30) - - -%%%%1.8.9%%%% - - -HDF5 version 1.8.9 released on 2012-05-09 -================================================================================ - -INTRODUCTION -============ - -This document describes the differences between HDF5-1.8.8 and -HDF5 1.8.9. It also contains information on the platforms tested and -known problems in HDF5-1.8.9. - -For more details, see the files HISTORY-1_0-1_8_0_rc3.txt -and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source. - -Links to the HDF5 1.8.9 source code, documentation, and additional materials -can be found on the HDF5 web page at: - - http://www.hdfgroup.org/products/hdf5/ - -The HDF5 1.8.9 release can be obtained from: - - http://www.hdfgroup.org/HDF5/release/obtain5.html - -User documentation for 1.8.9 can be accessed directly at this location: - - http://www.hdfgroup.org/HDF5/doc/ - -New features in the HDF5-1.8.x release series, including brief general -descriptions of some new and modified APIs, are described in the "What's New -in 1.8.0?" document: - - http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html - -All new and modified APIs are listed in detail in the "HDF5 Software Changes -from Release to Release" document, in the section "Release 1.8.9 (current -release) versus Release 1.8.8": - - http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS -======== - -- New Features -- Support for New Platforms, Languages, and Compilers -- Bug Fixes since HDF5-1.8.8 -- Platforms Tested -- Supported Configuration Features Summary -- Known Problems - - -New Features -============ - - Configuration - ------------- - - None - - Library - ------- - - Added new feature to merge committed datatypes when copying objects, - using new H5O_COPY_MERGE_COMMITTED_DTYPE_FLAG, modified by new API - routines: H5Padd_merge_committed_dtype_path(), - H5Pfree_merge_committed_dtype_paths(), H5Pset_mcdt_search_cb() and - H5Pget_mcdt_search_cb(). (QAK - 2012/03/30) - - Added new feature which allows working with files in memory in the - same ways files are worked with on disk. New API routines include - H5Pset_file_image, H5Pget_file_image, H5Pset_file_image_callbacks, - H5Pget_file_image_callbacks, H5Fget_file_image, and - H5LTopen_file_image. (QAK - 2012/04/17) - - Parallel Library - ---------------- - - Corrected memory allocation error in MPI datatype construction code. - (QAK - 2012/04/23) - - Add two new routines to set/get the atomicity parameter in the - MPI library to perform atomic operations. Some file systems (for - example PVFS2) do not support atomic updates, so those routines - would not be supported. (MSC - 2012/03/27 - HDFFV-7961) - - Tools - ----- - - h5repack: Added ability to set the metadata block size of the output - file, with the '-M'/'--metadata_block_size' command line parameter. - (QAK - 2012/03/30) - - h5stat: Added ability to display a summary of the file space usage for a - file, with the '-S'/'--summary' command line parameter. (QAK - 2012/03/28) - - h5dump: Added capability for "-a" option to show attributes containing "/" - by using an escape character. For example, for a dataset "/dset" - containing attribute "speed(m/h)", use "h5dump -a "/dset/speed(\/h)" - to show the content of the attribute. (PC - 2012/03/12 - HDFFV-7523) - - h5dump: Added ability to apply command options across multiple files using a - wildcard in the filename. Unix example; "h5dump -H -d Dataset1 tarr*.h5". - Cross platform example; "h5dump -H -d Dataset1 tarray1.h5 tarray2.h5 tarray3.h5". - (ADB - 2012/03/12 - HDFFV-7876). - - h5dump: Added new option --no-compact-subset. This option will not - interpret the '[' character as starting the compact form of - subsetting. This is useful when the "h5dump error: unable to - open dataset "datset_name"" message is output because a dataset - name contains a '[' character. (ADB - 2012/03/05 - HDFFV-7689). - - h5repack: Improved performance for big chunked datasets (size > 128MB) - when used with the layout (-l) or compression (-f) options. - Before this change, repacking datasets with chunks with a large first - dimension would take extremely long. For example, repacking a dataset - with chunk dimensions of 1024x5x1 might take many hours to process - while changing a dataset with chunk dimensions set to 1x5x1024 - might take under an hour. After this change, processing the dataset - with chunk dimensions of 1024x5x1 takes about 15 minutes, and processing - a dataset with chunk dimensions of 1x5x1024 takes about 14 minutes. - (JKM - 2012/03/01 - HDFFV-7862) - - High-Level APIs - --------------- - - New API: H5LTpath_valid (Fortran: h5ltpath_valid_f) checks - if a path is correct, determines if a link resolves to a valid - object, and checks that the link does not dangle. (MSB - 2012/03/15) - - Fortran API - ----------- - - - Added for the C API the Fortran wrapper: - h5ocopy_f (MSB - 2012/03/22) - - C++ API - ------- - - None - - -Support for New Platforms, Languages, and Compilers -=================================================== - - None - -Bug Fixes since HDF5-1.8.8 -========================== - - Configuration - ------------- - - Fixed Makefile issue in which "-Wl," was not properly specified - prior to -rpath when building parallel Fortran libraries with - an Intel compiler. (MAM - 2012/03/26) - - Makefiles generated by other packages using h5cc as the compiler - no longer error when 'make' is invoked more than once in order - to 'rebuild' after changes to source. (MAM - 2012/03/26) - - Added code to display the version information of XL Fortran and C++ - in the summary of configure. (AKC - 2012/02/28 - HDFFV-7793) - - Updated all CMakeLists.txt files to indicate the minimum CMake version is - the current standard of 2.8.6 (ADB - 2011/12/05 - HDFFV-7854) - - Library - ------- - - Windows and STDIO correctness changes have been propagated from the SEC2 - and old Windows drivers to the STDIO VFD. (DER - 2012/03/30 - HDFFV-7917) - - Fixed an error that would occur when copying an object with attribute - creation order tracked and indexed. (NAF - 2012/03/28 - HDFFV-7762) - - Fixed a bug in H5Ocopy(): When copying an opened object, call the - object's flush class action to ensure that cached data is flushed so - that H5Ocopy will get the correct data. (VC - 2012/03/27 - HDFFV-7853) - - The istore test will now skip the sparse 50x50x50 test when the VFD does - not support sparse files on that platform. The most important platforms - on which this will be skipped are Windows (NTFS sparse files are not - supported) and Mac OS-X (HFS sparse files are not supported). This - fixes CTest timeout issues on Windows. (DER - 2012/03/27 - HDFFV-7769) - - Windows and POSIX correctness changes have been propagated from the SEC2 - VFD to the Core VFD. This mainly affects file operations on the - driver's backing store and fixes a problem on Windows where large files - could not be read. (DER - 2012/03/27 - HDFFV-7916 - HDFFV-7603) - - When an application tries to write or read many small data chunks and - runs out of memory, the library had a segmentation fault. The fix is to - return the error stack with proper information. - (SLU - 2012/03/23 - HDFFV-7785) - - H5Pset_data_transform had a segmentation fault in some cases like x*-100. - It works correctly now and handles other cases like 100-x or 2/x. - (SLU - 2012/03/15 - HDFFV-7922) - - Fixed rare corruption bugs that could occur when using the new object - header format. (NAF - 2012/03/15 - HDFFV-7879) - - Fixed an error that occurred when creating a contiguous dataset with a - zero-sized dataspace and space allocation time set to 'early'. - (QAK - 2012/03/12) - - Changed Windows thread creation to use _beginthread() instead of - CreateThread(). Threads created by the latter can be killed in - low-memory situations. (DER - 2012/02/10 - HDFFV-7780) - - Creating a dataset in a read-only file caused a segmentation fault when - the file is closed. It's fixed. The attempt to create a dataset will - fail with an error indicating the file is read-only. - (SLU - 2012/01/25 - HDFFV-7756) - - Fixed a segmentation fault that could occur when shrinking a dataset - with chunks larger than 1 MB. (NAF - 2011/11/30 - HDFFV-7833) - - Fixed a bug that could cause H5Oget_info to return the wrong address - after copying a committed (named) datatype. (NAF - 2011/11/14) - - The library allowed the conversion of strings between ASCII and UTF8 - We have corrected it to report an error under this situation. - (SLU - 2011/11/8 - HDFFV-7582) - - Fixed a segmentation fault when the library tried to shrink the size - of a compound datatype through H5Tset_size immediately after the - datatype was created. (SLU - 2011/11/4 - HDFFV-7618) - - Parallel Library - ---------------- - - None - - Tools - ----- - - h5unjam: Fixed a segmentation fault that occurred when h5unjam was used - with the -V (show version) option. (JKM - 2012/04/19 - HDFFV-8001) - - h5repack: Fixed a failure that occurred when repacking the chunk size - of a specified chunked dataset with unlimited max dims. - (JKM - 2012/04/11 - HDFFV-7993) - - h5diff: Fixed a failure when comparing groups. Before the fix, if an - object in a group was compared with an object in another group where - both had the same name but the object type was different, then h5diff - would fail. After the fix, h5diff detects such cases as non-comparable - and displays appropriate error messages. - (JKM - 2012/03/28 - HDFFV-7644) - - h5diff: If unique objects exist only in one file and if h5diff is set to - exclude the unique objects with the --exclude-path option, then h5diff - might miss excluding some objects. This was fixed to correctly exclude - objects. (JKM - 2012/03/20 - HDFFV-7837) - - h5diff: When two symbolic dangling links are compared with the - --follow-symlinks option, the result should be the same. This worked when - comparing two files, but didn't work when comparing two objects. - h5diff now works when comparing two objects. - (JKM - 2012/03/09 - HDFFV-7835) - - h5dump: Added the tools library error stack to properly catch error - information generated within the library. (ADB - 2012/03/12 - HDFFV-7958) - - h5dump: Changed the process where an open link used to fail. Now dangling - links no longer throw error messages. (ADB - 2012/03/12 - HDFFV-7839) - - h5dump: Refactored code to remove duplicated functions. Split XML - functions from DDL functions. Corrected indentation and formatting - errors. Also fixed subsetting counting overflow (HDFFV-5874). Verified - all tools call tools_init() in main. The USER_BLOCK data now correctly - displays within the SUPER_BLOCK info. NOTE: WHITESPACE IN THE OUTPUT - HAS CHANGED. (ADB - 2012/02/17 - HDFFV-7560) - - h5diff: Fixed to prevent from displaying error stack message when - comparing two dangling symbolic links with the follow-symlinks option. - (JKM - 2012/01/13 - HDFFV-7836) - - h5repack: Fixed a memory leak that occurred with the handling of - variable length strings in attributes. - (JKM - 2012/01/10 - HDFFV-7840) - - h5ls: Fixed a segmentation fault that occurred when accessing region - reference data in an attribute. (JKM - 2012/01/06 - HDFFV-7838) - - F90 API - ------- - - None - - C++ API - ------ - - None - - High-Level APIs: - ------ - - None - - Fortran High-Level APIs: - ------ - - h5ltget_attribute_string_f: The h5ltget_attribute_string_f used to return - the C NULL character in the returned character buffer. The returned - character buffer now does not return the C NULL character; the buffer - is blank-padded if needed. (MSB - 2012/03/23) - - -Platforms Tested -================ -The following platforms and compilers have been tested for this release. - - AIX 5.3 xlc 10.1.0.5 - (NASA G-ADA) xlC 10.1.0.5 - xlf90 12.1.0.6 - - FreeBSD 8.2-STABLE i386 gcc 4.2.1 [FreeBSD] 20070719 - (loyalty) g++ 4.2.1 [FreeBSD] 20070719 - gcc 4.6.1 20110422 - g++ 4.6.1 20110422 - gfortran 4.6.1 20110422 - - FreeBSD 8.2-STABLE amd64 gcc 4.2.1 [FreeBSD] 20070719 - (freedom) g++ 4.2.1 [FreeBSD] 20070719 - gcc 4.6.1 20110422 - g++ 4.6.1 20110422 - gfortran 4.6.1 20110422 - - Linux 2.6.18-194.3.1.el5PAE GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP i686 i686 i386 compilers for 32-bit applications; - (jam) Version 4.1.2 20080704 (Red Hat 4.1.2-52) - Version 4.5.2 - PGI C, Fortran, C++ Compilers for 32-bit - applications; - Version 11.8-0 - Version 11.9-0 - Intel(R) C, C++, Fortran Compiler for 32-bit - applications; - Version 12.0 - Version 12.1 - MPICH mpich2-1.3.1 compiled with - gcc 4.1.2 and gfortran 4.1.2 - - Linux 2.6.18-308.1.1.el5 GNU C (gcc), Fortran (gfortran), C++ (g++) - #1 SMP x86_64 GNU/Linux compilers for 32-bit applications; - (koala) Version 4.1.2 20080704 (Red Hat 4.1.2-52) - Version 4.5.2 - PGI C, Fortran, C++ for 64-bit target on - x86-64; - Version 11.9-0 (64-bit) - Version 11.8-0 (32-bit) - Intel(R) C, C++, Fortran Compilers for - applications running on Intel(R) 64; - Version 12.0 - Version 12.1 - MPICH mpich2-1.3.1 compiled with - gcc 4.1.2 and gfortran 4.1.2 - - Linux 2.6.32-220.7.1.el6.ppc64 gcc (GCC) 4.4.6 20110731 (Red Hat 4.4.6-3) - #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.6 20110731 - (ostrich) GNU Fortran (GCC) 4.4.6 20110731 (Red Hat 4.4.6-3) - - Linux 2.6.18-108chaos Intel C, C++, Fortran Compilers Version 11.1 - #1 SMP x86_64 GNU/Linux - (LLNL Aztec) - - IBM Blue Gene/P XL C for Blue Gene/P, bgxlc V9.0 - (LLNL uDawn) XL C++ for Blue Gene/P, bgxlC V9.0 - XL Fortran for Blue Gene/P, bgxlf0 V11.1 - - SunOS 5.10 32- and 64-bit Sun C 5.9 Sun OS_sparc Patch 124867-16 - (linew) Sun Fortran 95 8.3 Sun OS_sparc Patch 127000-13 - Sun C++ 5.9 Sun OS_sparc Patch 124863-26 - Sun C 5.11 SunOS_sparc - Sun Fortran 95 8.5 SunOS_sparc - Sun C++ 5.11 SunOS_sparc - - SGI Altix UV Intel(R) C, Fortran Compilers - SGI ProPack 7 Linux Version 11.1 20100806 - 2.6.32.24-0.2.1.2230.2.PTF- SGI MPT 2.02 - default #1 SMP - (NCSA ember) - - Dell NVIDIA Cluster Intel(R) C, Fortran Compilers - Red Hat Enterprise Linux 6 Version 12.0.4 20110427 - 2.6.32-131.4.1.el6.x86_64 mvapich2 1.7rc1-intel-12.0.4 - (NCSA forge) - - Windows XP Visual Studio 2008 w/ Intel Fortran 10.1 (project files) - Visual Studio 2008 w/ Intel Fortran 11.1 (cmake) - Visual Studio 2010 w/ Intel Fortran 12 (cmake) - - Windows XP x64 Visual Studio 2008 w/ Intel Fortran 10.1 (project files) - Visual Studio 2008 w/ Intel Fortran 11.1 (cmake) - Visual Studio 2010 w/ Intel Fortran 12 (cmake) - - Windows 7 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake) - Visual Studio 2010 w/ Intel Fortran 12 (cmake) - Cygwin(1.7.9 native gcc(4.5.3) compiler and gfortran) - - Windows 7 x64 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake) - Visual Studio 2010 w/ Intel Fortran 12 (cmake) - Cygwin(1.7.9 native gcc(4.5.3) compiler and gfortran) - - Mac OS X Snow Leopard 10.6.8 i686-apple-darwin10-gcc-4.2.1 (GCC) 4.2.1 (gcc) - Darwin Kernel Version 10.8.0 i686-apple-darwin10-g++-4.2.1 (GCC) 4.2.1 (g++) - Intel 64-bit (Apple Inc. build 5666) (dot 3) - (fred) GNU Fortran (GCC) 4.6.1 (gfortran) - Intel C (icc), Fortran (ifort), C++ (icpc) - 12.1.0.038 Build 20110811 - - Mac OS X Snow Leopard 10.6.8 i686-apple-darwin10-gcc-4.2.1 (GCC) 4.2.1 (gcc) - Darwin Kernel Version 10.8.0 i686-apple-darwin10-g++-4.2.1 (GCC) 4.2.1 (g++) - Intel 32-bit (Apple Inc. build 5666) (dot 3) - (tejeda) GNU Fortran (GCC) 4.6.1 (gfortran) - Intel C (icc), Fortran (ifort), C++ (icpc) - 12.1.0.038 Build 20110811 - - Mac OS X Lion 10.7.3 GCC 4.2.1 gcc - 32- and 64-bit GNU Fortran (GCC) 4.6.1 gfortran - (duck) GCC 4.2.1. g++ - - Debian6.0.3 2.6.32-5-686 #1 SMP i686 GNU/Linux - gcc (Debian 4.4.5-8) 4.4.5 - GNU Fortran (Debian 4.4.5-8) 4.4.5 - - Debian6.0.3 2.6.32-5-amd64 #1 SMP x86_64 GNU/Linux - gcc (Debian 4.4.5-8) 4.4.5 - GNU Fortran (Debian 4.4.5-8) 4.4.5 - - Fedora16 3.2.9-2.fc16.i6866 #1 SMP i686 i686 i386 GNU/Linux - gcc (GCC) 4.6.2 20111027 (Red Hat 4.6.2-1) - GNU Fortran (GCC) 4.6.2 20111027 (Red Hat 4.6.2-1) - - Fedora16 3.2.9-2.fc16.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc (GCC) 4.6.2 20111027 (Red Hat 4.6.2-1) - GNU Fortran (GCC) 4.6.2 20111027 (Red Hat 4.6.2-1) - - SUSE 12.1 3.1.9-1.4-desktop #1 SMP PREEMPT i686 i686 i386 GNU/Linux - gcc (SUSE Linux) 4.6.2 - GNU Fortran (SUSE Linux) 4.6.2 - - SUSE 12.1 3.1.9-1.4-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux - gcc (SUSE Linux) 4.6.2 - GNU Fortran (SUSE Linux) 4.6.2 - - Ubuntu 11.10 3.0.0-16-generic #29-Ubuntu SMP i686 GNU/Linux - gcc (Ubuntu/Linaro 4.6.1-9ubuntu3) 4.6.1 - GNU Fortran (Ubuntu/Linaro 4.6.4-9ubuntu3) 4.6.1 - - Ubuntu 11.10 3.0.0-16-generic #29-Ubuntu SMP x86_64 GNU/Linux - gcc (Ubuntu/Linaro 4.6.1-9ubuntu3) 4.6.1 - GNU Fortran (Ubuntu/Linaro 4.6.1-9ubuntu3) 4.6.1 - - Cray Linux Environment (CLE) PrgEnv-pgi 2.2.74 - hopper.nersc.gov pgcc 11.9-0 64-bit target on x86-64 Linux -tp k8e - pgf90 11.9-0 64-bit target on x86-64 Linux -tp k8e - pgCC 11.9-0 64-bit target on x86-64 Linux -tp k8e - - -Tested Configuration Features Summary -===================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90 F90 C++ zlib SZIP - parallel parallel -Solaris2.10 32-bit n y n y y y -Solaris2.10 64-bit n y n y y y -Windows XP n y(4) n y y y -Windows XP x64 n y(4) n y y y -Windows Vista n y(4) n y y y -Windows Vista x64 n y(4) n y y y -Mac OS X Snow Leopard 10.6.8 32-bit n y n y y n -Mac OS X Snow Leopard 10.6.8 64-bit n y n y y y -Mac OS X Lion 10.7.3 32-bit n y n y y n -Mac OS X Lion 10.7.3 64-bit n y n y y y -AIX 5.3 32- and 64-bit y y y y y y -FreeBSD 8.2-STABLE 32&64 bit n x n x y y -CentOS 5.5 Linux 2.6.18-194 i686 GNU (1)W y y(2) y y y y -CentOS 5.5 Linux 2.6.18-194 i686 Intel W n y n y y y -CentOS 5.5 Linux 2.6.18-194 i686 PGI W n y n y y y -CentOS 5.5 Linux 2.6.18 x86_64 GNU (1) W y y(3) y y y y -CentOS 5.5 Linux 2.6.18 x86_64 Intel W n y n y y y -CentOS 5.5 Linux 2.6.18 x86_64 PGI W n y n y y y -Linux 2.6.32-220.7.1.el6.ppc64 n y n y y y -SGI ProPack 7 Linux 2.6.32.24 y y y y y y -Red Hat Enterprise Linux 6 y y y y y y -CLE hopper.nersc.gov y y(3) y y y n - - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -Solaris2.10 32-bit y y y y -Solaris2.10 64-bit n n n n -Windows XP y y(4) y n -Windows XP x64 y y(4) y n -Windows Vista y y(4) y y -Windows Vista x64 y y(4) y y -Mac OS X Snow Leopard 10.6.8 32-bit y n y n -Mac OS X Snow Leopard 10.6.8 64-bit y n y n -Mac OS X Lion 10.7.3 32-bit y n y y -Mac OS X Lion 10.7.3 64-bit y n y y -AIX 5.3 32- and 64-bit n n n y -FreeBSD 8.2-STABLE 32&64 bit y x x y -CentOS 5.5 Linux 2.6.18-194 i686 GNU (1)W y y(2) y y -CentOS 5.5 Linux 2.6.18-194 i686 Intel W y y y n -CentOS 5.5 Linux 2.6.18-194 i686 PGI W y y y n -CentOS 5.5 Linux 2.6.18 x86_64 GNU (1) W y y y y -CentOS 5.5 Linux 2.6.18 x86_64 Intel W y y y n -CentOS 5.5 Linux 2.6.18 x86_64 PGI W y y y n -Linux 2.6.32-220.7.1.el6.ppc64 y y y n -SGI ProPack 7 Linux 2.6.32.24 y y y n -Red Hat Enterprise Linux 6 y y y n -CLE hopper.nersc.gov n n n n - - (1) Fortran compiled with gfortran. - (2) With PGI and Absoft compilers. - (3) With PGI compiler for Fortran. - (4) Using Visual Studio 2008 w/ Intel Fortran 10.1 (Cygwin shared libraries are not supported) - (5) C and C++ shared libraries will not be built when Fortran is enabled. - Compiler versions for each platform are listed in the preceding - "Platforms Tested" table. - - -Known Problems -============== -* The h5repacktst test fails on AIX 32-bit because the test uses more - memory than the default amount. The failure message typically looks like: - - "time: 0551-010 The process was stopped abnormally. Try again." - - This is an issue with the test only and does not represent a problem with - the library. To allow the test to pass, request more memory when testing - via appropriate command such as: - - $ env LDR_CNRTL=MAXDATA=0x20000000@DSA make check - - (AKC - 2012/05/09 - HDFFV-8016) - -* The file_image test will fail in the "initial file image and callbacks in - the core VFD" sub-test if the source directory is read-only as the test - fails to create its test files in the build directory. This will be - resolved in a future release. - (AKC - 2012/05/05 - HDFFV-8009) - -* The dt_arith test reports several errors involving "long double" on - Mac OS X 10.7 Lion when any level of optimization is enabled. The test does - not fail in debug mode. This will be addressed in a future release. - (SLU - 2012/05/08) - -* The following h5dump test case fails in BG/P machines (and potentially other - machines that use a command script to launch executables): - - h5dump --no-compact-subset -d "AHFINDERDIRECT::ah_centroid_t[0] it=0 tl=0" - tno-subset.h5 - - This is due to the embedded spaces in the dataset name being interpreted - by the command script launcher as meta-characters, thus passing three - arguments to h5dump's -d flag. The command passes if run by hand, just - not via the test script. - (AKC - 2012/05/03) - -* The ph5diff (parallel h5diff) tool can intermittently hang in parallel mode - when comparing two HDF5 files that contain objects with the same names but - with different object types. - (JKM - 2012/04/27) - -* On hopper, the build failed when RUNSERIAL and RUNPARALLEL are set - to aprun -np X, because the H5lib_settings.c file was not generated - properly. Not setting those environment variables works, because - configure was able to automatically detect that it's a Cray system - and used the proper launch commands when necessary. - (MSC - 2012/04/18) - -* The data conversion test dt_arith.c fails in "long double" to integer - conversion on Ubuntu 11.10 (3.0.0.13 kernel) with GCC 4.6.1 if the library - is built with optimization -O3 or -O2. The older GCC (4.5) or newer kernel - (3.2.2 on Fedora) doesn't have the problem. Users should lower the - optimization level (-O1 or -O0) by defining CFLAGS in the command line of - "configure" like: - - CFLAGS=-O1 ./configure - - This will overwrite the library's default optimization level. - (SLU - 2012/02/07 - HDFFV-7829) - -* The STDIO VFD does not work on some architectures, possibly due to 32/64 - bit or large file issues. The basic STDIO VFD test is known to fail on - 64-bit SunOS 5.10 on SPARC when built with -m64 and 32-bit OS X/Darwin - 10.7.0. The STDIO VFD test has been disabled while we investigate and - a fix should appear in a future release. - (DER - 2011/10/14) - -* h5diff can report inconsistent results when comparing datasets of enum type - that contain invalid values. This is due to how enum types are handled in - the library and will be addressed in a future release. - (DER - 2011/10/14 - HDFFV-7527) - -* The links test can fail under the stdio VFD due to some issues with external - links. This will be investigated and fixed in a future release. - (DER - 2011/10/14 - HDFFV-7768) - -* After the shared library support was fixed for some bugs, it was discovered - that "make prefix=XXX install" no longer works for shared libraries. It - still works correctly for static libraries. Therefore, if you want to - install the HDF5 shared libraries in a location such as /usr/local/hdf5, - you need to specify the location via the --prefix option during configure - time. E.g, ./configure --prefix=/usr/local/hdf5 ... - (AKC - 2011/05/07 - HDFFV-7583) - -* The parallel test, t_shapesame, in testpar/, may run for a long time and may - be terminated by the alarm signal. If that happens, one can increase the - alarm seconds (default is 1200 seconds = 20 minutes) by setting the - environment variable, $HDF5_ALARM_SECONDS, to a larger value such as 3600 - (60 minutes). Note that the t_shapesame test may fail in some systems - (see the "While working on the 1.8.6 release..." problem below). If - it does, it will waste more time if $HDF5_ALARM_SECONDS is set - to a larger value. - (AKC - 2011/05/07) - -* The C++ and FORTRAN bindings are not currently working on FreeBSD. - (QAK - 2011/04/26) - -* Shared Fortran libraries are not quite working on AIX. While they are - generated when --enable-shared is specified, the fortran and hl/fortran - tests fail. We are looking into the issue. HL and C++ shared libraries - should now be working as intended, however. - (MAM - 2011/04/20) - -* The --with-mpe configure option does not work with Mpich2. - (AKC - 2011/03/10) - -* While working on the 1.8.6 release of HDF5, a bug was discovered that can - occur when reading from a dataset in parallel shortly after it has been - written to collectively. The issue was exposed by a new test in the parallel - HDF5 test suite, but had existed before that. We believe the problem lies with - certain MPI implementations and/or file systems. - - We have provided a pure MPI test program, as well as a standalone HDF5 - program, that can be used to determine if this is an issue on your system. - They should be run across multiple nodes with a varying number of processes. - These programs can be found at: - http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/ - (NAF - 2011/01/19) - -* The library's test dt_arith.c showed a compiler's rounding problem on - Cygwin when converting from unsigned long long to long double. The - library's own conversion works fine. We defined a macro for Cygwin to - skip this test until we can solve the problem. - (SLU - 2010/05/05 - HDFFV-1264) - -* All the VFL drivers aren't backward compatible. In H5FDpublic.h, the - structure H5FD_class_t changed in 1.8. There is new parameter added to - get_eoa and set_eoa callback functions. A new callback function - get_type_map was added in. The public function H5FDrealloc was taken - out in 1.8. The problem only happens when users define their own driver - for 1.6 and try to plug in 1.8 library. Because there's only one user - complaining about it, we (Elena, Quincey, and I) decided to leave it as - it is (see bug report #1279). Quincey will make a plan for 1.10. - (SLU - 2010/02/02) - -* MinGW has a missing libstdc++.dll.a library file and will not successfully link - C++ applications/tests. Do not use the enable-cxx configure option. Read all of - the INSTALL_MINGW.txt file for all restrictions. - (ADB - 2009/11/11) - -* The --enable-static-exec configure flag will only statically link libraries - if the static version of that library is present. If only the shared version - of a library exists (i.e., most system libraries on Solaris, AIX, and Mac, - for example, only have shared versions), the flag should still result in a - successful compilation, but note that the installed executables will not be - fully static. Thus, the only guarantee on these systems is that the - executable is statically linked with just the HDF5 library. - (MAM - 2009/11/04) - -* The PathScale MPI implementation, accessing a Panasas file system, would - cause H5Fcreate() with H5F_ACC_EXCL to fail even when the file does not - exist. This is due to the MPI_File_open() call failing if the mode has - the MPI_MODE_EXCL bit set. - (AKC - 2009/08/11 - HDFFV-988) - -* Parallel tests failed with 16 processes with data inconsistency at testphdf5 - / dataset_readAll. Parallel tests also failed with 32 and 64 processes with - collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks - with MPI IO. - (CMC - 2009/04/28) - -* For Red Storm, a Cray XT3 system, the tools/h5ls/testh5ls.sh and - tools/h5copy/testh5copy.sh will fail some of its sub-tests. These sub-tests - are expected to fail and should exit with a non-zero code but the yod - command does not propagate the exit code of the executables. Yod always - returns 0 if it can launch the executable. The test suite shell expects - a non-zero for this particular test, therefore it concludes the test has - failed when it receives 0 from yod. Skip all the "failing" test for now - by changing them as following. - - ======== Original tools/h5ls/testh5ls.sh ========= - TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5 - ======== Change to =============================== - echo SKIP TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5 - ================================================== - - ======== Original tools/h5copy/testh5copy.sh ========= - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested - H5LSTEST $FILEOUT - ======== Change to =============================== - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested - echo SKIP H5LSTEST $FILEOUT - ================================================== - (AKC - 2008/11/10) - -* For Red Storm, a Cray XT3 system, the yod command sometimes gives the - message, "yod allocation delayed for node recovery". This interferes with - test suites that do not expect to see this message. See the section of "Red - Storm" in file INSTALL_parallel for a way to deal with this problem. - (AKC - 2008/05/28) - -* On an Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers, - use -mp -O1 compilation flags to build the libraries. A higher level of - optimization causes failures in several HDF5 library tests. - -* On mpich 1.2.5 and 1.2.6, if more than two processes contribute no IO and - the application asks to do collective IO, we have found that when using 4 - processors, a simple collective write will sometimes be hung. This can be - verified with t_mpi test under testpar. - -* A dataset created or rewritten with a v1.6.3 library or after cannot be read - with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled. - There was a bug in the calculation of the Fletcher32 checksum in the - library before v1.6.3; the checksum value was not consistent between big- - endian and little-endian systems. This bug was fixed in Release 1.6.3. - However, after fixing the bug, the checksum value was no longer the same as - before on little-endian system. Library releases after 1.6.4 can still read - datasets created or rewritten with an HDF5 library of v1.6.2 or before. - (SLU - 2005/06/30) - -* On IBM AIX systems, parallel HDF5 mode will fail some tests with error - messages like "INFO: 0031-XXX ...". This is from the command `poe'. - Set the environment variable MP_INFOLEVEL to 0 to minimize the messages - and run the tests again. - - The tests may fail with messages like "The socket name is already in use", - but HDF5 does not use sockets. This failure is due to problems with the - poe command trying to set up the debug socket. To resolve this problem, - check to see whether there are many old /tmp/s.pedb.* files staying around. - These are sockets used by the poe command and left behind due to failed - commands. First, ask your system administrator to clean them out. - Lastly, request IBM to provide a means to run poe without the debug socket. - (AKC - 2004/12/08) - - -%%%%1.8.8%%%% - - -HDF5 version 1.8.8 released on 2011-11-15 -================================================================================ - -INTRODUCTION -============ - -This document describes the differences between HDF5-1.8.7 and -HDF5 1.8.8, and contains information on the platforms tested and -known problems in HDF5-1.8.8. -For more details, see the files HISTORY-1_0-1_8_0_rc3.txt -and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source. - -Links to the HDF5 1.8.8 source code, documentation, and additional materials -can be found on the HDF5 web page at: - - http://www.hdfgroup.org/products/hdf5/ - -The HDF5 1.8.8 release can be obtained from: - - http://www.hdfgroup.org/HDF5/release/obtain5.html - -User documentation for 1.8.8 can be accessed directly at this location: - - http://www.hdfgroup.org/HDF5/doc/ - -New features in the HDF5-1.8.x release series, including brief general -descriptions of some new and modified APIs, are described in the "What's New -in 1.8.0?" document: - - http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html - -All new and modified APIs are listed in detail in the "HDF5 Software Changes -from Release to Release" document, in the section "Release 1.8.8 (current -release) versus Release 1.8.7": - - http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS -======== - -- New Features -- Support for New Platforms, Languages, and Compilers -- Bug Fixes since HDF5-1.8.7 -- Platforms Tested -- Supported Configuration Features Summary -- Known Problems - - -New Features -============ - - Configuration - ------------- - - Added the --enable-fortran2003 flag to enable Fortran2003 support - in the HDF5 Fortran library. The flag should be used along with the - --enable-fortran flag and takes affect only when the Fortran compiler - is Fortran2003 compliant. (EIP - 2011/11/14) - - Added checks for clock_gettime and mach/mach_time.h to both configure and - CMake. This will support the move from gettimeofday to clock_gettime's - monotonic timer in the profiling code in a future release. - (DER - 2011/10/12) - - Library - ------- - - The Windows VFD code has been removed with the exception of the functions - which set it (H5Pset_fapl_windows, for example). Setting the Windows - VFD now really sets the SEC2 VFD. The WINDOWS_MAX_BUF and - WINDOWS_USE_STDIO configuration options and #defines have also been - removed. NOTE: Since the Windows VFD was a clone of the SEC2 VFD, this - change should be transparent to users. - (DER - 2011/10/12 - HDFFV-7740, HDFFV-7744) - - H5Tcreate now supports the string type (fixed-length and variable- - length). (SLU - 2011/05/20) - - Parallel Library - ---------------- - - Added new H5Pget_mpio_actual_chunk_opt_mode and - H5Pget_mpio_actual_io_mode API routines for querying whether/how - a collective I/O operation completed. (QAK - 2011/10/12) - - Tools - ----- - - None - - High-Level APIs - --------------- - - Added the following Fortran wrappers for the Dimension Scale APIs: - h5dsset_scale_f - h5dsattach_scale_f - h5dsdetach_scale_f - h5dsis_attached_f - h5dsis_scale_f - h5dsset_label_f - h5dsget_label_f - h5dsget_scale_name_f - h5dsget_num_scales_f - (EIP for SB - 2011/10/13 - HDFFV-3797) - - Fortran API - ----------- - - The HDF5 Fortran library was enhanced to support the Fortran 2003 standard. - The following features are available when the HDF5 library is configured - using the --enable-fortran and --enable-fortran2003 configure flags AND - if the Fortran compiler is Fortran 2003 compliant: - - - Subroutines overloaded with the C_PTR derived type: - h5pget_f - h5pget_fill_value_f - h5pinsert_f - h5pregister_f - h5pset_f - h5pset_fill_value_f - h5rcreate_f - h5rderefrence_f - h5rget_name_f - h5rget_obj_type_f - - Subroutines overloaded with the C_PTR derived type - and simplified signatures: - h5aread_f - h5awrite_f - h5dread_f - h5dwrite_f - - New subroutines - h5dvlen_reclaim_f - h5literate_by_name_f - h5literate_f - h5ovisit_f - h5tconvert_f - h5pset_nbit_f - h5pset_scaleoffset_f - - Subroutines with additional optional parameters: - h5pcreate_class_f - (EIP - 2011/10/14) - - C++ API - ------- - - None - - -Support for New Platforms, Languages, and Compilers -=================================================== - - None - -Bug Fixes since HDF5-1.8.7 -========================== - - Configuration - ------------- - - Changed the size of H5_SIZEOF_OFF_T to 4 bytes (was 8) in the VMS - h5pubconf.h based on the output of a test program. (DER - 2011/10/12) - - The Windows and VMS versions of H5pubconf.h were brought into sync with - the linux/posix version. (DER - 2011/10/12) - - Fixed a bug in the bin/trace Perl script where API functions - that take a variable number of arguments were not processed for - trace statement fixup. (DER - 2011/08/25) - - The --enable-h5dump-packed-bits configure option has been removed. - The h5dump code that this option conditionally enabled is now always - compiled into h5dump. Please refer to the h5dump reference manual for - usage of the packed bits feature. (MAM - 2011/06/23 - HDFFV-7592) - - Configure now uses the same flags and symbols in its tests that are - used to build the library. (DER - 2011/05/24) - - Library - ------- - - Corrected the error when copying attributes between files which are using - different versions of the file format. (QAK - 2011/10/20 - HDFFV-7718) - - Corrected the error when loading local heaps from the file, which could - cause the size of the local heap's data block to increase dramatically. - (QAK - 2011/10/14 - HDFFV-7767) - - An application does not need to do H5O_move_msgs_forward() when writing - attributes. Tests were checked into the performance suite. - (VC - 2011/10/13 - HDFFV-7640) - - Fixed a bug that occurred when using H5Ocopy on a committed datatype - containing an attribute using that committed datatype. - (NAF - 2011/10/13 - HDFFV-5854) - - Added generic VFD I/O types to the SEC2 and log VFDs to ensure correct - I/O sizes (and remove compiler warnings) between Windows and true POSIX - systems. (DER - 2011/10/12) - - Corrected some Windows behavior in the SEC2 and log VFDs. This mainly - involved datatype correctness fixes, Windows API call error checks, - and adding the volume serial number to the VFD cmp functions. - (DER - 2011/10/12) - - Converted post-checks for the appropriate POSIX I/O sizes to pre-checks - in order to avoid platform-specific or undefined behavior. - (DER - 2011/10/12) - - #ifdef _WIN32 instances have been changed to #ifdef H5_HAVE_WIN32_API. - H5_HAVE_VISUAL_STUDIO checks have been added where necessary. This is in - CMake only as configure never sets _WIN32. (ADB - 2011/09/12) - - CLANG compiler with the options -fcatch-undefined-behavior and -ftrapv - discovered 3 problems in tests and tools' library: - 1. In dsets.c, left shifting an unsigned int for 32 bits or more - caused undefined behavior. - 2. In dt_arith.c, the INIT_INTEGER macro definition has an overflow - when the value is a negative minimal and is being subtracted from one. - 3. In tools/lib/h5tools_str.c, right shifting an int value for 32 bits - or more caused undefined behavior. - All the problems have been corrected. (SLU - 2011/09/02 - HDFFV-7674) - - H5Epush2() now has the correct trace functionality (this is related to the - bin/trace Perl script bug noted in the configure section). - (DER - 2011/08/25) - - Corrected mismatched function name typo of h5pget_dxpl_mpio_c and - h5pfill_value_defined_c. (AKC - 2011/08/22 - HDFFV-7641) - - Corrected an internal error in the library where objects that use committed - (named) datatypes and were accessed from two different file IDs could confuse - the two and cause erroneous failures. (QAK - 2011/07/18 - HDFFV-7638) - - In v1.6 of the library, there was an EOA for the whole MULTI file saved in the - super block. We took it out in v1.8 of the library because it's meaningless - for the MULTI file. v1.8 of the library saves the EOA for the metadata file - instead, but this caused a backward compatibility problem. - A v1.8 library couldn't open the file created with the v1.6 library. We - fixed the problem by checking the EOA value to detect the file - created with v1.6 library. (SLU - 2011/06/22) - - When a dataset had filters and reading data failed, the error message - didn't say which filter wasn't registered. It's fixed now. (SLU - 2011/06/03) - - Parallel Library - ---------------- - - The Special Collective IO (IO when some processes do not contribute to the - IO) and the Complex Derived Datatype MPI functionalities are no longer - conditionally enabled in the library by configure. They are always - enabled in order to take advantage of performance boosts from these - behaviors. Older MPI implementations that do not allow for these - functionalities can no longer by used by HDF5. - (MAM - 2011/07/08 - HDFFV-7639). - - Tools - ----- - - h5diff: fixed segfault over non-comparable attribute with different - dimension or rank, along with '-c' option to display details. - (JKM - 2011/10/24 - HDFFV-7770) - - Fixed h5diff to display all the comparable objects and attributes - regardless of detecting non-comparables. (JKM - 2011/09/16 - HDFFV-7693) - - Fixed h5repack to update the values of references(object and region) of - attributes in h5repack for 1) references, 2) arrays of references, - 3) variable-length references, and 4) compound references. - (PC - 2011/09/14 - HDFFV-5932) - - h5diff: fixed a segfault over a dataset with container types - array and variable-length (vlen) along with multiple nested compound types. - Example: compound->array->compound, compound->vlen->compound. - (JKM - 2011/09/01 - HDFFV-7712) - - h5repack: added macro to handle a failure in H5Dread/write when memory - allocation failed inside the library. (PC - 2011/08/19) - - Fixed h5jam to not to allow the specifying of an HDF5 formatted file as - an input file for the -u (user block file) option. The original HDF5 file - would not be accessible if this behavior was allowed. - (JKM - 2011/08/19 - HDFFV-5941) - - Revised the command help pages of h5jam and h5unjam. The descriptions - were not up to date and some were missing. - (JKM - 2011/08/15 - HDFFV-7515) - - Fixed h5dump to correct the schema location: - - (ADB - 2011/08/10) - - h5repack: h5repack failed to copy a dataset if the layout is changed - from chunked with unlimited dimensions to contiguous. - (PC - 2011/07/15 - HDFFV-7649) - - Fixed h5diff: the "--delta" option considers two NaN of the same type - are different. This is wrong based on the h5diff description in the - Reference Manual. (PC - 2011/07/15 - HDFFV-7656) - - Fixed h5diff to display an instructive error message and exit with - an instructive error message when mutually exclusive options - (-d, -p and --use-system-epsilon) are used together. - (JKM - 2011/07/07 - HDFFV-7600) - - Fixed h5dump so that it displays the first line of each element in correct - position for multiple dimension array types. Before this fix, - the first line of each element in an array was - displayed after the last line of previous element without - moving to the next line (+indentation). - (JKM - 2011/06/15 - HDFFV-5878) - - Fixed h5dump so that it will display the correct value for - H5T_STD_I8LE datasets on the Blue-gene system (ppc64, linux, Big-Endian, - clustering). (AKC & JKM - 2011/05/12 - HDFFV-7594) - - Fixed h5diff to compare a file to itself correctly. Previously h5diff - reported either the files were different or not compatible in certain - cases even when comparing a file to itself. This fix also improves - performance when comparing the same target objects through verifying - the object and file addresses before comparing the details - in the objects. Examples of details are datasets and attributes. - (XCAO & JKM - 2011/05/06 - HDFFV-5928) - - F90 API - ------- - - Modified the h5open_f and h5close_f subroutines to not to call H5open - and H5close correspondingly. While the H5open call just adds overhead, - the H5close call called by a Fortran application shuts down the HDF5 - library. This makes the library inaccessible to the application. - (EIP & SB - 2011/10/13 - HDFFV-915) - - Fixed h5tget_tag_f where the length of the C string was used to - repack the C string into the Fortran string. This lead to memory - corruption in the calling program. (SB - 2011/07/26) - - Added defined constants: - H5T_ORDER_MIXED_F (HDFFV-2767) - H5Z_SO_FLOAT_DSCALE_F - H5Z_SO_FLOAT_ESCALE_F - H5Z_SO_INT_F - H5Z_SO_INT_MINBITS_DEFAULT_F - H5O_TYPE_UNKNOWN_F - H5O_TYPE_GROUP_F - H5O_TYPE_DATASET_F - H5O_TYPE_NAMED_DATATYPE_F - H5O_TYPE_NTYPES_F - - C++ API - ------ - - None - - High-Level APIs: - ------ - - Fixed the H5LTdtype_to_text function. It had some memory problems when - dealing with some complicated data types. (SLU - 2011/10/19 - HDFFV-7701) - - Fixed H5DSset_label seg faulting when retrieving the length of a - dimension label that was not set. (SB - 2011/08/07 - HDFFV-7673) - - Fixed a dimension scale bug where if you create a dimscale, attach two - datasets to it, and then unattach them, you get an error if they are - unattached in order, but no error if you unattach them in reverse order. - (SB - 2011/06/07 - HDFFV-7605) - - Fortran High-Level APIs: - ------ - - None - - -Platforms Tested -================ -The following platforms and compilers have been tested for this release. - - AIX 5.3 xlc 10.1.0.5 - (NASA G-ADA) xlC 10.1.0.5 - xlf90 12.1.0.6 - - FreeBSD 8.2-STABLE i386 gcc 4.2.1 [FreeBSD] 20070719 - (loyalty) g++ 4.2.1 [FreeBSD] 20070719 - gcc 4.6.1 20110422 - g++ 4.6.1 20110422 - gfortran 4.6.1 20110422 - - FreeBSD 8.2-STABLE amd64 gcc 4.2.1 [FreeBSD] 20070719 - (freedom) g++ 4.2.1 [FreeBSD] 20070719 - gcc 4.6.1 20110422 - g++ 4.6.1 20110422 - gfortran 4.6.1 20110422 - - IBM Blue Gene/P bgxlc 9.0.0.9 - (LLNL uDawn) bgxlf90 11.1.0.7 - bgxlC 9.0.0.9 - - Linux 2.6.16.60-0.54.5-smp Intel(R) C, C++, Fortran Compilers - x86_64 Version 11.1 20090630 - (INL Icestorm) - - Linux 2.6.18-194.el5 x86_64 Intel(R) C, C++, Fortran Compilers - (INL Fission) Version 12.0.2 20110112 - - Linux 2.6.18-108chaos x86_64 Intel(R) C, C++, Fortran Compilers - (LLNL Aztec) Version 11.1 20090630 - - Linux 2.6.18-194.3.1.el5PAE gcc (GCC) 4.1.2 and 4.4.2 - #1 SMP i686 i686 i386 GNU Fortran (GCC) 4.1.2 20080704 - (jam) (Red Hat 4.1.2-48) and 4.4.2 - PGI C, Fortran, C++ 10.4-0 32-bit - PGI C, Fortran, C++ 10.6-0 32-bit - Intel(R) C Compiler for 32-bit - applications, Version 11.1 - Intel(R) C++ Compiler for 32-bit - applications, Version 11.1 - Intel(R) Fortran Compiler for 32-bit - applications, Version 11.1 - MPICH mpich2-1.3.1 compiled with - gcc 4.1.2 and gfortran 4.1.2 - - Linux 2.6.18-238.12.1.el5 gcc 4.1.2 and 4.4.2 - #1 SMP x86_64 GNU/Linux GNU Fortran (GCC) 4.1.2 20080704 - (koala) (Red Hat 4.1.2-46) and 4.4.2 - tested for both 32- and 64-bit binaries - Intel(R) C, C++, Fortran Compilers for - applications running on Intel(R) 64, - Version 11.1. - PGI C, Fortran, C++ Version 9.0-4 - for 64-bit target on x86-64 - MPICH mpich2-1.3.1 compiled with - gcc 4.1.2 and gfortran 4.1.2 - - SGI Altix UV Intel(R) C, Fortran Compilers - SGI ProPack 7 Linux Version 11.1 20100806 - 2.6.32.24-0.2.1.2230.2.PTF- SGI MPT 2.02 - default #1 SMP - (NCSA ember) - - Dell NVIDIA Cluster Intel(R) C, Fortran Compilers - Red Hat Enterprise Linux 6 Version 12.0.4 20110427 - 2.6.32-131.4.1.el6.x86_64 mvapich2 1.7rc1-intel-12.0.4 - (NCSA forge) - - SunOS 5.10 32- and 64-bit Sun C 5.11 SunOS_sparc 2010/08/13 - Sun Fortran 95 8.5 SunOS_sparc 2010/08/13 - Sun C++ 5.11 SunOS_sparc 2010/08/13 - - Windows XP Visual Studio 2008 w/ Intel Fortran 10.1 (project files) - Visual Studio 2008 w/ Intel Fortran 11.1 (cmake) - Visual Studio 2010 (cmake) - Cygwin(1.7.9 native gcc(4.5.3) compiler and gfortran) - - Windows XP x64 Visual Studio 2008 w/ Intel Fortran 10.1 (project files) - Visual Studio 2008 w/ Intel Fortran 11.1 (cmake) - Visual Studio 2010 (cmake) - Cygwin(1.7.9 native gcc(4.5.3) compiler and gfortran) - - Windows Vista Visual Studio 2008 w/ Intel Fortran 11.1 (cmake) - - Windows Vista x64 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake) - - Windows 7 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake) - - Windows 7 x64 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake) - - Mac OS X 10.8.0 (Intel 64-bit) i686-apple-darwin10-gcc-4.2.1 (GCC) 4.2.1 (Apple Inc. build 5666) (dot 3) - Darwin Kernel Version 10.8.0 GNU Fortran (GCC) 4.6.1 - Intel C, C++ and Fortran compilers 12.1.0 - - Mac OS X 10.8.0 (Intel 32-bit) i686-apple-darwin10-gcc-4.2.1 (GCC) 4.2.1 (Apple Inc. build 5666) (dot 3) - Darwin Kernel Version 10.8.0 GNU Fortran (GCC) version 4.6.1 - Intel C, C++ and Fortran compilers 12.1.0 - - Fedora 12 2.6.32.16-150.fc12.ppc64 #1 SMP ppc64 GNU/Linux - gcc (GCC) 4.4.4 20100630 (Red Hat 4.4.4-10) - GNU Fortran (GCC) 4.4.4 20100630 (Red Hat 4.4.4-10) - - Debian6.0.3 2.6.32-5-686 #1 SMP i686 GNU/Linux - gcc (Debian 4.4.5-8) 4.4.5 - GNU Fortran (Debian 4.4.5-8) 4.4.5 - - Debian6.0.3 2.6.32-5-amd64 #1 SMP x86_64 GNU/Linux - gcc (Debian 4.4.5-8) 4.4.5 - GNU Fortran (Debian 4.4.5-8) 4.4.5 - - Fedora15 2.6.40.6-0.fc15.i686.PAE #1 SMP i686 i686 i386 GNU/Linux - gcc (GCC) 4.6.1 20110908 (Red Hat 4.6.1-9) - GNU Fortran (GCC) 4.6.1 20110908 (Red Hat 4.6.1-9) - - Fedora15 2.6.40.6-0.fc15.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc (GCC) 4.6.1 20110908 (Red Hat 4.6.1-9) - GNU Fortran (GCC) 4.6.1 20110908 (Red Hat 4.6.1-9) - - SUSE 11.4 2.6.37.6-0.7-desktop #1 SMP PREEMPT i686 i686 i386 GNU/Linux - gcc (SUSE Linux) 4.5.1 20101208 - GNU Fortran (SUSE Linux) 4.5.1 20101208 - - SUSE 11.4 2.6.37.6-0.7-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux - gcc (SUSE Linux) 4.5.1 20101208 - GNU Fortran (SUSE Linux) 4.5.1 20101208 - - Ubuntu 11.10 3.0.0-12-generic #20-Ubuntu SMP i686 GNU/Linux - gcc (Ubuntu/Linaro 4.6.1-9ubuntu3) 4.6.1 - GNU Fortran (Ubuntu/Linaro 4.6.4-9ubuntu3) 4.6.1 - - Ubuntu 11.10 3.0.0-12-generic #20-Ubuntu SMP x86_64 GNU/Linux - gcc (Ubuntu/Linaro 4.6.1-9ubuntu3) 4.6.1 - GNU Fortran (Ubuntu/Linaro 4.6.1-9ubuntu3) 4.6.1 - - OpenVMS Alpha 8.3 HP C V7.3-009 - HP Fortran V8.2-104679-48H9K - HP C++ V7.3-009 - - Cray Linux Environment (CLE) PrgEnv-pgi 2.2.74 - hopper.nersc.gov pgcc 11.7-0 64-bit target on x86-64 Linux -tp k8e - franklin.nersc.gov pgf90 11.7-0 64-bit target on x86-64 Linux -tp k8e - pgCC 11.7-0 64-bit target on x86-64 Linux -tp k8e - -Tested Configuration Features Summary -===================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90 F90 C++ zlib SZIP - parallel parallel -Solaris2.10 32-bit n y n y y y -Solaris2.10 64-bit n y n y y y -Windows XP n y(4) n y y y -Windows XP x64 n y(4) n y y y -Windows Vista n y(4) n y y y -Windows Vista x64 n y(4) n y y y -OpenVMS Alpha n y n y y n -Mac OS X 10.8 Intel 32-bit n y n y y y -Mac OS X 10.8 Intel 64-bit n y n y y y -AIX 5.3 32- and 64-bit n y n y y y -FreeBSD 8.2-STABLE 32&64 bit n x n x y y -CentOS 5.5 Linux 2.6.18-194 i686 GNU (1)W y y(2) y y y y -CentOS 5.5 Linux 2.6.18-194 i686 Intel W n y n y y n -CentOS 5.5 Linux 2.6.18-194 i686 PGI W n y n y y n -CentOS 5.5 Linux 2.6.16 x86_64 GNU (1) W y y(3) y y y y -CentOS 5.5 Linux 2.6.16 x86_64 Intel W n y n y y n -CentOS 5.5 Linux 2.6.16 x86_64 PGI W n y n y y y -Fedora 12 Linux 2.6.32.16-150.fc12.ppc64 n y n y y y -SGI ProPack 7 Linux 2.6.32.24 y y y y y y -Red Hat Enterprise Linux 6 y y y y y y -CLE hopper.nersc.gov y y(3) y y y n -CLE franklin.nersc.gov y y(3) y y y n - - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -Solaris2.10 32-bit y y y y -Solaris2.10 64-bit y y y y -Windows XP y y(4) y n -Windows XP x64 y y(4) y n -Windows Vista y y(4) y y -Windows Vista x64 y y(4) y y -OpenVMS Alpha n n n n -Mac OS X 10.8 Intel 32-bit y(5) n y n -Mac OS X 10.8 Intel 64-bit y(5) n y n -AIX 5.3 32- and 64-bit n n n y -FreeBSD 8.2-STABLE 32&64 bit y x x y -CentOS 5.5 Linux 2.6.18-128 i686 GNU (1)W y y(2) y y -CentOS 5.5 Linux 2.6.18-128 i686 Intel W y y y n -CentOS 5.5 Linux 2.6.18-128 i686 PGI W y y y n -CentOS 5.5 Linux 2.6.16 x86_64 GNU (1) W y y y y -CentOS 5.5 Linux 2.6.16 x86_64 Intel W y y y n -CentOS 5.5 Linux 2.6.16 x86_64 PGI W y y y n -Fedora 12 Linux 2.6.32.16-150.fc12.ppc64 y y y y -SGI ProPack 7 Linux 2.6.32.24 y y y n -Red Hat Enterprise Linux 6 y y y n -CLE hopper.nersc.gov n n n n -CLE franklin.nersc.gov n n n n - - (1) Fortran compiled with gfortran. - (2) With PGI and Absoft compilers. - (3) With PGI compiler for Fortran. - (4) Using Visual Studio 2008 w/ Intel Fortran 10.1 (Cygwin shared libraries are not supported) - (5) C and C++ shared libraries will not be built when Fortran is enabled. - Compiler versions for each platform are listed in the preceding - "Platforms Tested" table. - - -Known Problems -============== - -* The STDIO VFD does not work on some architectures, possibly due to 32/64 - bit or large file issues. The basic STDIO VFD test is known to fail on - 64-bit SunOS 5.10 on SPARC when built with -m64 and 32-bit OS X/Darwin - 10.7.0. The STDIO VFD test has been disabled while we investigate and - a fix should appear in a future release, possibly 1.8.9. - (DER - 2011/10/14) - -* h5diff can report inconsistent results when comparing datasets of enum type - that contain invalid values. This is due to how enum types are handled in - the library and will be addressed in the next release. - (DER - 2011/10/14 - HDFFV-7527) - -* The links test can fail under the stdio VFD due to some issues with external - links. This will be investigated and fixed in a future release. - (DER - 2011/10/14 - HDFFV-7768) - -* After the shared library support was fixed for some bugs, it was discovered - that "make prefix=XXX install" no longer works for shared libraries. It - still works correctly for static libraries. Therefore, if you want to - install the HDF5 shared libraries in a location such as /usr/local/hdf5, - you need to specify the location via the --prefix option during configure - time. E.g, ./configure --prefix=/usr/local/hdf5 ... - (AKC - 2011/05/07 - HDFFV-7583) - -* The parallel test, t_shapesame, in testpar/, may run for a long time and may - be terminated by the alarm signal. If that happens, one can increase the - alarm seconds (default is 1200 seconds = 20 minutes) by setting the - environment variable, $HDF5_ALARM_SECONDS, to a larger value such as 3600 - (60 minutes). Note that the t_shapesame test may fail in some systems - (see the "While working on the 1.8.6 release..." problem below). If - it does, it will waste more time if $HDF5_ALARM_SECONDS is set - to a larger value. (AKC - 2011/05/07) - -* The C++ and FORTRAN bindings are not currently working on FreeBSD. - (QAK - 2011/04/26) - -* Shared Fortran libraries are not quite working on AIX. While they are - generated when --enable-shared is specified, the fortran and hl/fortran - tests fail. We are looking into the issue. HL and C++ shared libraries - should now be working as intended, however. (MAM - 2011/04/20) - -* The --with-mpe configure option does not work with Mpich2. (AKC - 2011/03/10) - -* While working on the 1.8.6 release of HDF5, a bug was discovered that can - occur when reading from a dataset in parallel shortly after it has been - written to collectively. The issue was exposed by a new test in the parallel - HDF5 test suite, but had existed before that. We believe the problem lies with - certain MPI implementations and/or file systems. - - We have provided a pure MPI test program, as well as a standalone HDF5 - program, that can be used to determine if this is an issue on your system. - They should be run across multiple nodes with a varying number of processes. - These programs can be found at: - http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/ - (NAF - 2011/01/19) - -* The library's test dt_arith.c showed a compiler's rounding problem on - Cygwin when converting from unsigned long long to long double. The - library's own conversion works fine. We defined a macro for Cygwin to - skip this test until we can solve the problem. - (SLU - 2010/05/05 - HDFFV-1264) - -* All the VFL drivers aren't backward compatible. In H5FDpublic.h, the - structure H5FD_class_t changed in 1.8. There is new parameter added to - get_eoa and set_eoa callback functions. A new callback function - get_type_map was added in. The public function H5FDrealloc was taken - out in 1.8. The problem only happens when users define their own driver - for 1.6 and try to plug in 1.8 library. Because there's only one user - complaining about it, we (Elena, Quincey, and I) decided to leave it as - it is (see bug report #1279). Quincey will make a plan for 1.10. - (SLU - 2010/02/02) - -* MinGW has a missing libstdc++.dll.a library file and will not successfully link - C++ applications/tests. Do not use the enable-cxx configure option. Read all of - the INSTALL_MINGW.txt file for all restrictions. (ADB - 2009/11/11) - -* The --enable-static-exec configure flag will only statically link libraries - if the static version of that library is present. If only the shared version - of a library exists (i.e., most system libraries on Solaris, AIX, and Mac, - for example, only have shared versions), the flag should still result in a - successful compilation, but note that the installed executables will not be - fully static. Thus, the only guarantee on these systems is that the - executable is statically linked with just the HDF5 library. - (MAM - 2009/11/04) - -* The PathScale MPI implementation, accessing a Panasas file system, would - cause H5Fcreate() with H5F_ACC_EXCL to fail even when the file does not - exist. This is due to the MPI_File_open() call failing if the mode has - the MPI_MODE_EXCL bit set. (AKC - 2009/08/11 - HDFFV-988) - -* Parallel tests failed with 16 processes with data inconsistency at testphdf5 - / dataset_readAll. Parallel tests also failed with 32 and 64 processes with - collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks - with MPI IO. (CMC - 2009/04/28) - -* For Red Storm, a Cray XT3 system, the tools/h5ls/testh5ls.sh and - tools/h5copy/testh5copy.sh will fail some of its sub-tests. These sub-tests - are expected to fail and should exit with a non-zero code but the yod - command does not propagate the exit code of the executables. Yod always - returns 0 if it can launch the executable. The test suite shell expects - a non-zero for this particular test, therefore it concludes the test has - failed when it receives 0 from yod. Skip all the "failing" test for now - by changing them as following. - - ======== Original tools/h5ls/testh5ls.sh ========= - TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5 - ======== Change to =============================== - echo SKIP TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5 - ================================================== - - ======== Original tools/h5copy/testh5copy.sh ========= - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested - H5LSTEST $FILEOUT - ======== Change to =============================== - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested - echo SKIP H5LSTEST $FILEOUT - ================================================== - (AKC - 2008/11/10) - -* For Red Storm, a Cray XT3 system, the yod command sometimes gives the - message, "yod allocation delayed for node recovery". This interferes with - test suites that do not expect to see this message. See the section of "Red - Storm" in file INSTALL_parallel for a way to deal with this problem. - (AKC - 2008/05/28) - -* On an Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers, - use -mp -O1 compilation flags to build the libraries. A higher level of - optimization causes failures in several HDF5 library tests. - -* On mpich 1.2.5 and 1.2.6, if more than two processes contribute no IO and - the application asks to do collective IO, we have found that when using 4 - processors, a simple collective write will sometimes be hung. This can be - verified with t_mpi test under testpar. - -* A dataset created or rewritten with a v1.6.3 library or after cannot be read - with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled. - There was a bug in the calculation of the Fletcher32 checksum in the - library before v1.6.3; the checksum value was not consistent between big- - endian and little-endian systems. This bug was fixed in Release 1.6.3. - However, after fixing the bug, the checksum value was no longer the same as - before on little-endian system. Library releases after 1.6.4 can still read - datasets created or rewritten with an HDF5 library of v1.6.2 or before. - (SLU - 2005/06/30) - -* On IBM AIX systems, parallel HDF5 mode will fail some tests with error - messages like "INFO: 0031-XXX ...". This is from the command `poe'. - Set the environment variable MP_INFOLEVEL to 0 to minimize the messages - and run the tests again. - - The tests may fail with messages like "The socket name is already in use", - but HDF5 does not use sockets. This failure is due to problems with the - poe command trying to set up the debug socket. To resolve this problem, - check to see whether there are many old /tmp/s.pedb.* files staying around. - These are sockets used by the poe command and left behind due to failed - commands. First, ask your system administrator to clean them out. - Lastly, request IBM to provide a means to run poe without the debug socket. - (AKC - 2004/12/08) - - -%%%%1.8.7%%%% - - -HDF5 version 1.8.7 released on Tue May 10 09:24:44 CDT 2011 -================================================================================ - -INTRODUCTION -============ - -This document describes the differences between HDF5-1.8.6 and -HDF5 1.8.7, and contains information on the platforms tested and -known problems in HDF5-1.8.7. -For more details, see the files HISTORY-1_0-1_8_0_rc3.txt -and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source. - -Links to the HDF5 1.8.7 source code, documentation, and additional materials -can be found on the HDF5 web page at: - - http://www.hdfgroup.org/products/hdf5/ - -The HDF5 1.8.7 release can be obtained from: - - http://www.hdfgroup.org/HDF5/release/obtain5.html - -User documentation for 1.8.7 can be accessed directly at this location: - - http://www.hdfgroup.org/HDF5/doc/ - -New features in the HDF5-1.8.x release series, including brief general -descriptions of some new and modified APIs, are described in the "What's New -in 1.8.0?" document: - - http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html - -All new and modified APIs are listed in detail in the "HDF5 Software Changes -from Release to Release" document, in the section "Release 1.8.7 (current -release) versus Release 1.8.6": - - http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS -======== - -- New Features -- Support for New Platforms, Languages, and Compilers -- Bug Fixes since HDF5-1.8.6 -- Platforms Tested -- Supported Configuration Features Summary -- Known Problems - - -New Features -============ - - Configuration - ------------- - - Configure now generates Makefiles that build in "silent make mode" - by default in which compile and link lines are significantly - simplified for clarity. To override this and view actual compile and - link lines during building, the --disable-silent-rules flag can be used - at configure time, or the 'make' command can be followed by V=1, to - indicate a "verbose" make. (MAM - 2011/4/14). - - Added mpicc and mpif90 as the default C and Fortran compilers for Linux - systems when --enable-parallel is specified but no $CC or $FC is defined. - (AKC - 2011/2/7) - - Added a new configure option, "--enable-unsupported", which can - be used to stop configure from preventing the use of unsupported - configure option combinations, such as c++ in parallel or Fortran - with threadsafe. Use at your own risk, as it may result in a - library that won't compile or run as expected! - (MAM - 2010/11/17 - Bug 2061) - - Library - ------- - - The library allows the dimension size of a dataspace to be zero. In - the past, the library would allow this only if the maximal dimension - size was unlimited. Now there is no such restriction, but no data - can be written to this kind of dataset. (SLU - 2011/4/20) - - We added two new macros, H5_VERSION_GE and H5_VERSION_LE, to let users - compare certain version numbers with the library being used. (SLU - - 2011/4/20) - - Added ability to cache files opened through external links. Added new - public functions H5Pset_elink_file_cache_size(), - H5Pget_elink_file_cache_size(), and H5Fclear_elink_file_cache(). - (NAF - 2011/02/17) - - Finished implementing all options for 'log' VFD. (QAK - 2011/1/25) - - Removed all old code for Metrowerks compilers, bracketed by - __MWERKS__). Metrowerks compiler is long gone. (AKC - 2010/11/17) - - Parallel Library - ---------------- - - None - - Tools - ----- - - h5diff: Added new "verbose with levels" option, '-vN, --verbose=N'. - The old '-v, --verbose' option is deprecated but remains available; - it is exactly equivalent to '-v0, --verbose=0'. - The new levels 1 ('-v1' or '--verbose=1') and 2 ('-v2' or - '--verbose=2') can be specified to view more information regarding - attributes differences. Bug #2121 (JKM 2011/3/23) - - h5dump: Added new option --enable-error-stack. This option will - display error stack information in the output stream. This is - useful when the "h5dump: Unable to print data" message is output. - (ADB - 2011/03/03) - - High-Level APIs - --------------- - - Fortran LT make datasets routines (H5LTmake_dataset_f, - h5ltmake_dataset_int_f, h5ltmake_dataset_float_f, h5ltmake_dataset_double_f) - and LT read datasets routines (h5ltread_dataset_f,h5ltread_dataset_int_f, - h5ltread_dataset_float_f, 5ltread_dataset_double_f) can now handle - 4-dimensional to 7-dimensional rank datasets. HDFFV-1217 (MSB-2011/4/24/2011) - - F90 API - ------- - - None - - C++ API - ------- - - None - - -Support for New Platforms, Languages, and Compilers -=================================================== - - Intel V11.1 uses now -O3 optimization in production mode (EIP - 2010/10/08) - - - -Bug Fixes since HDF5-1.8.6 -========================== - - Configuration - ------------- - - Shared C++ and HL libraries on AIX should now be working correctly. - Note that Fortran shared libraries are still not working on AIX. - (See the Known Problems section, below). (MAM - 2011/4/20) - - Removed config/ibm-aix6.x. All IBM-AIX settings are in one file, - ibm-aix. (AKC - 2011/4/14) - - Shared C libraries are no longer disabled on Mac when Fortran - is enabled. Shared Fortran libraries are still not supported on Mac, - so configure will disable them by default, but this is overridable - with the new --enable-unsupported configure option. The configure - summary has been updated to reflect the fact that the shared-ness of - the C++/Fortran wrapper libraries may not align with the C library. - (MAM - 2011/04/11 - HDFFV-4353). - - Library - ------- - - Changed assertion failure when decoding a compound datatype with no - fields into a normal error failure. Also prohibit using this sort - of datatype for creating an attribute (as is already the case for - datasets and committed (named) datatypes). (QAK - 2011/04/15, Jira - issue #HDFFV-2766) - - Tell the VFL flush call that the file will be closing, allowing - the VFDs to avoid sync'ing the file (particularly valuable in parallel). - (QAK - 2011/03/09) - - The datatype handler created with H5Tencode/decode used to have the - reference count 0 (zero); it now has the reference count 1 (one). - (SLU - 2011/2/18) - - Fixed the definition of H5_HAVE_GETTIMEOFDAY on Windows so that - HDgettimeofday() is defined and works properly. Bug HDFFV-5931 - (DER - 2011/04/14) - - Added basic VFD tests for the Windows, STDIO and log VFD tests. - (DER - 2011/04/11) - - Parallel Library - ---------------- - - None - - Tools - ----- - - Updated h5dump test case script to prevent entire test failure when - source directory is read-only. Bug #HDFFV-4342 (JKM 2011/4/12) - - Fixed h5dump displaying incorrect values for H5T_STD_I8BE type data in - attribute on Big-Endian machine. H5T_STD_I8BE is unsigned 8bit type, - so h5dump is supposed to display -2 instead of 254. It worked correctly - on Little-Endian system , but not on Big-Endian system. Bug #HDFFV-4358 - (JKM 04/08/2011) - - Updated some HDF5 tools to standardize the option name as - '--enable-error-stack' for printing HDF5 error stack messages. h5ls and - h5dump have been updated. For h5ls, this replaces "-e/--errors" option, - which is deprecated. For h5dump, this is a new option. Bug #2182 - (JKM 2011/3/30) - - Fixed the h5diff --use-system-epsilon option. The formula used in the - calculation was changed from ( |a - b| / b ) to ( |a - b| ). - This was done to improve performance. Bug #2184 (JKM 2011/3/24) - - Fixed output for H5T_REFERENCE in h5dump. According to the BNF document - the output of a H5T_REFERENCE should be followed by the type; - ::= H5T_REFERENCE { } - ::= H5T_STD_REF_OBJECT | H5T_STD_REF_DSETREG - Previously this was only displayed if the -R option was used. - Bug #1725 (ADB 2011/3/28) - - Fixed two h5diff issues. 1) h5diff compared attributes correctly only - when two objects had the same number of attributes and the attribute - names were identical. 2) h5diff did not display useful information about - attribute differences. Bug #2121 (JKM 2011/3/17) - - Fixed a memory leak in h5diff that occurred when accessing symbolic links - with the --follow-symlink option. Bug #2214 (JKM 2011/3/18) - - Fixed a memory leak in h5diff that occurred when accessing variable length - string data. Bug #2216 (JKM 2011/3/18) - - Fixed and improved the help page for h5ls -a, --address option. - Bug #1904 (JKM 2011/3/11) - - Fixed h5copy to enable copying an object into the same HDF5 file. - Previously h5copy displayed an error message when the target file - was the same as the source file. (XCAO 2011/3/8) - - Fixed an h5dump problem that caused the tool to skip some data elements - in large datasets with a large array datatype on Windows. This issue - arose only on Windows due to the different return behavior of the - _vsnprintf() function. Bug #2161 (JKM 2011/3/3) - - Fixed h5dump which was skipping some array indices in large datasets - with a relatively large array datatype. The interval of skipped indices - varied according to the size of the array. Bug #2092 (JKM 2011/2/15) - - Fixed h5diff which was segfaulting when comparing compound datasets - with a combination of fixed-length string datatypes and variable-length - string datatypes in certain orders. Bug #2089 (JKM 2010/12/28) - - Improved h5diff performance. 1) Now use HDmemcmp() before comparing two - elements. 2) Replace expensive H5Tequals() calls. 3) Retrieve datatype - information at dataset level, not at each element level for compound - datasets. HDFFV-7516 (JKM 2011/4/18) - - Fixed h5ls to display nested compound types with curly brackets - when -S (--simple) option is used with -l (--label), so it shows - which members (in curly brackets) belong to which nested compound type, - making the output clearer. Bug #1979 (JKM 2010/11/09) - - Fixed h5diff to handle variable-length strings in a compound dataset - and variable-length string arrays in a compound dataset correctly. - Garbage values were previously displayed when h5diff compared multiple - variable-length strings in a compound type dataset. - Bug #1989 (JKM 2010/10/28) - - Fixed h5copy to fail gracefully when copying an object to a non- - existing group without the -p option. Bug #2040 (JKM 2010/10/18) - - F90 API - ------ - - None - - C++ API - ------ - - None - - High-Level APIs: - ------ - - None - - Fortran High-Level APIs: - ------ - - h5tbmake_table_f: Fixed error in passing an array of characters with different - length field names. - - h5tget_field_info_f: Fixed error with packing the C strings into a Fortran - array of strings. Added optional argument called 'maxlen_out' which returns - the maximum string character length in a field name element. - Bug HDFFV-1255 (MSB- 4/17/2011) - - - - -Platforms Tested -================ -The following platforms and compilers have been tested for this release. - - AIX 6.1 xlc 11.1.0.3 - (NCSA BP) xlC 11.1.0.3 - xlf90 13.1.0.3 - mpcc_r 11.1.0.3 - mpxlf90_r 13.1.0.3 - - FreeBSD 8.2-STABLE i386 gcc 4.2.1 [FreeBSD] 20070719 - (loyalty) g++ 4.2.1 [FreeBSD] 20070719 - gcc 4.6.1 20110422 - g++ 4.6.1 20110422 - gfortran 4.6.1 20110422 - - FreeBSD 8.2-STABLE amd64 gcc 4.2.1 [FreeBSD] 20070719 - (freedom) g++ 4.2.1 [FreeBSD] 20070719 - gcc 4.6.1 20110422 - g++ 4.6.1 20110422 - gfortran 4.6.1 20110422 - - Linux 2.6.18-194.3.1.el5PAE gcc (GCC) 4.1.2 and 4.4.2 - #1 SMP i686 i686 i386 G95 (GCC 4.0.3 (g95 0.93!) Apr 21 2010) - (jam) GNU Fortran (GCC) 4.1.2 20080704 - (Red Hat 4.1.2-48) and 4.4.2 - PGI C, Fortran, C++ 10.4-0 32-bit - PGI C, Fortran, C++ 10.6-0 32-bit - Intel(R) C Compiler for 32-bit - applications, Version 11.1 - Intel(R) C++ Compiler for 32-bit - applications, Version 11.1 - Intel(R) Fortran Compiler for 32-bit - applications, Version 11.1 - Absoft 32-bit Fortran 95 10.0.7 - MPICH mpich2-1.3.1 compiled with - gcc 4.1.2 and gfortran 4.1.2 - - Linux 2.6.18-194.17.1.el5 gcc 4.1.2 and 4.4.2 - #1 SMP x86_64 GNU/Linux G95 (GCC 4.0.3 (g95 0.93!) Apr 21 2010) - (amani) tested for both 32- and 64-bit binaries - GNU Fortran (GCC) 4.1.2 20080704 - (Red Hat 4.1.2-46) and 4.4.2 - Intel(R) C, C++, Fortran Compilers for - applications running on Intel(R) 64, - Version 11.1. - PGI C, Fortran, C++ Version 9.0-4 - for 64-bit target on x86-64 - MPICH mpich2-1.3.1 compiled with - gcc 4.1.2 and gfortran 4.1.2 - - SGI ProPack 7 Linux Intel(R) C++ Version 11.1 20100806 - 2.6.32.24-0.2.1.2230.2.PTF- Intel(R) Fortran Version 11.1 20100806 - default #1 SMP SGI MPT 2.01 - SGI Altix UV - (NCSA ember) - - SunOS 5.10 32- and 64-bit Sun C 5.9 Sun OS_sparc Patch 124867-16 - (linew) Sun Fortran 95 8.3 Sun OS_sparc Patch 127000-13 - Sun C++ 5.9 Sun OS_sparc Patch 124863-26 - Sun C 5.10 SunOS_sparc Patch 141861-07 - Sun Fortran 95 8.4 SunOS_sparc Patch 128231-06 - Sun C++ 5.10 SunOS_sparc 128228-11 - - Intel Xeon Linux 2.6.18- gcc 4.2.4 - 92.1.10.el5_lustre.1.6.6smp- Intel(R) C++ Version 10.1.017 - perfctr #8 SMP Intel(R) Fortran Compiler Version 10.1.017 - (NCSA abe) Open MPI 1.3.2 - MVAPICH2-1.5.1_pgi-10.8 - - Windows XP Visual Studio 2008 w/ Intel Fortran 10.1 (project files) - Visual Studio 2008 w/ Intel Fortran 11.1 (cmake) - Visual Studio 2010 (cmake) - Cygwin(1.7.7 native gcc(4.3.4) compiler and gfortran) - - Windows XP x64 Visual Studio 2008 w/ Intel Fortran 10.1 (project files) - Visual Studio 2008 w/ Intel Fortran 11.1 (cmake) - Visual Studio 2010 (cmake) - Cygwin(1.7.7 native gcc(4.3.4) compiler and gfortran) - - Windows Vista Visual Studio 2008 w/ Intel Fortran 11.1 (cmake) - - Windows Vista x64 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake) - - Windows 7 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake) - - Windows 7 x64 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake) - - Mac OS X 10.7.0 (Intel 64-bit) i686-apple-darwin10-gcc-4.2.1 (GCC) 4.2.1 - Darwin Kernel Version 10.7.0 GNU Fortran (GCC) 4.6.0 20101106 (experimental) - Intel C, C++ and Fortran compilers 12.0.1.122 20101110 - - Mac OS X 10.7.0 (Intel 32-bit) i686-apple-darwin10-gcc-4.2.1 (GCC) 4.2.1 (Apple Inc. build 5666) (dot 3) - Darwin Kernel Version 10.7.0 GNU Fortran (GCC) version 4.4.0 20090123 (experimental) - [trunk revision 143587] - - Fedora 12 2.6.32.16-150.fc12.ppc64 #1 SMP ppc64 GNU/Linux - gcc (GCC) 4.4.4 20100630 (Red Hat 4.4.4-10) - GNU Fortran (GCC) 4.4.4 20100630 (Red Hat 4.4.4-10) - - Debian6.01 2.6.32-5-686 #1 SMP i686 GNU/Linux - gcc (Debian 4.4.5-8) 4.4.5 - GNU Fortran (Debian 4.4.5-8) 4.4.5 - - Debian6.01 2.6.32-5-amd64 #1 SMP x86_64 GNU/Linux - gcc (Debian 4.4.5-8) 4.4.5 - GNU Fortran (Debian 4.4.5-8) 4.4.5 - - Fedora14 2.6.35.12-88.fc14.i686.PAE #1 SMP i686 i686 i386 GNU/Linux - gcc (GCC) 4.5.1 20100924 (Red Hat 4.5.1-4) - GNU Fortran (GCC) 4.5.1 20100924 (Red Hat 4.5.1-4) - - Fedora14 2.6.35.12-88.fc14.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc (GCC) 4.5.1 20100924 (Red Hat 4.5.1-4) - GNU Fortran (GCC) 4.5.1 20100924 (Red Hat 4.5.1-4) - - SUSE 11.4 2.6.37.1-1.2-desktop #1 SMP PREEMPT i686 i686 i386 GNU/Linux - gcc (SUSE Linux) 4.5.1 20101208 - GNU Fortran (SUSE Linux) 4.5.1 20101208 - - SUSE 11.4 2.6.37.1-1.2-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux - gcc (SUSE Linux) 4.5.1 20101208 - GNU Fortran (SUSE Linux) 4.5.1 20101208 - - Ubuntu 10.10 2.6.35-28-generic #50-Ubuntu SMP i686 GNU/Linux - gcc (Ubuntu/Linaro 4.4.4-14ubuntu5) 4.4.5 - GNU Fortran (Ubuntu/Linaro 4.4.4-14ubuntu5) 4.4.5 - - Ubuntu 10.10 2.6.35-28-generic #50-Ubuntu SMP x86_64 GNU/Linux - gcc (Ubuntu/Linaro 4.4.4-14ubuntu5) 4.4.5 - GNU Fortran (Ubuntu/Linaro 4.4.4-14ubuntu5) 4.4.5 - - OpenVMS Alpha 8.3 HP C V7.3-009 - HP Fortran V8.2-104679-48H9K - HP C++ V7.3-009 - -Tested Configuration Features Summary -======================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90 F90 C++ zlib SZIP - parallel parallel -Solaris2.10 32-bit n y n y y y -Solaris2.10 64-bit n y n y y y -Windows XP n y(4) n y y y -Windows XP x64 n y(4) n y y y -Windows Vista n y(4) n y y y -Windows Vista x64 n y(4) n y y y -OpenVMS Alpha n y n y y n -Mac OS X 10.7 Intel 32-bit n y n y y y -Mac OS X 10.7 Intel 64-bit n y n y y y -AIX 6.1 32- and 64-bit y y y y y y -FreeBSD 8.2-STABLE 32&64 bit n x n x y y -CentOS 5.5 Linux 2.6.18-194 i686 GNU (1)W y y(2) y y y y -CentOS 5.5 Linux 2.6.18-194 i686 Intel W n y n y y n -CentOS 5.5 Linux 2.6.18-194 i686 PGI W n y n y y n -CentOS 5.5 Linux 2.6.16 x86_64 GNU (1) W y y(3) y y y y -CentOS 5.5 Linux 2.6.16 x86_64 Intel W n y n y y n -CentOS 5.5 Linux 2.6.16 x86_64 PGI W n y n y y y -RedHat EL4 2.6.18 Xeon Lustre C y y y y y n -Fedora 12 Linux 2.6.32.16-150.fc12.ppc64 n y n y y y -SGI Linux 2.6.32.19 y y y y y y - - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -Solaris2.10 32-bit y y y y -Solaris2.10 64-bit y y y y -Windows XP y y(4) y n -Windows XP x64 y y(4) y n -Windows Vista y y(4) y y -Windows Vista x64 y y(4) y y -OpenVMS Alpha n n n n -Mac OS X 10.7 Intel 32-bit y(5) n y n -Mac OS X 10.7 Intel 64-bit y(5) n y n -AIX 6.1 32- and 64-bit n n n y -FreeBSD 8.2-STABLE 32&64 bit y x x y -CentOS 5.5 Linux 2.6.18-128 i686 GNU (1)W y y(2) y y -CentOS 5.5 Linux 2.6.18-128 i686 Intel W y y y n -CentOS 5.5 Linux 2.6.18-128 i686 PGI W y y y n -CentOS 5.5 Linux 2.6.16 x86_64 GNU (1) W y y y y -CentOS 5.5 Linux 2.6.16 x86_64 Intel W y y y n -CentOS 5.5 Linux 2.6.16 x86_64 PGI W y y y n -RedHat EL4 2.6.18 Xeon Lustre C y y y n -Fedora 12 Linux 2.6.32.16-150.fc12.ppc64 y y y y -SGI Linux 2.6.32.19 y y y y - - (1) Fortran compiled with gfortran. - (2) With PGI and Absoft compilers. - (3) With PGI compiler for Fortran. - (4) Using Visual Studio 2008 w/ Intel Fortran 10.1 (Cygwin shared libraries are not supported) - (5) C and C++ shared libraries will not be built when Fortran is enabled. - Compiler versions for each platform are listed in the preceding - "Platforms Tested" table. - - -Known Problems -============== -* After the shared library support was fixed for some bugs, it was discovered - that "make prefix=XXX install" no longer works for shared libraries. It - still works correctly for static libraries. Therefore, if you want to - install the HDF5 shared libraries in a location such as /usr/local/hdf5, - you need to specify the location via the --prefix option during configure - time. E.g, ./configure --prefix=/usr/local/hdf5 ... - (AKC - 2011/05/07 HDFFV-7583) - -* The parallel test, t_shapesame, in testpar/, may run for a long time and may - be terminated by the alarm signal. If that happens, one can increase the - alarm seconds (default is 1200 seconds = 20 minutes) by setting the - environment variable, $HDF5_ALARM_SECONDS, to a larger value such as 3600 - (60 minutes). Note that the t_shapesame test may fail in some systems - (see the "While working on the 1.8.6 release..." problem below). If - it does, it will waste more time if $HDF5_ALARM_SECONDS is set - to a larger value. (AKC - 2011/05/07) - -* The C++ and FORTRAN bindings are not currently working on FreeBSD. - (QAK - 2011/04/26) - -* Shared Fortran libraries are not quite working on AIX. While they are - generated when --enable-shared is specified, the fortran and hl/fortran - tests fail. We are looking into the issue. HL and C++ shared libraries - should now be working as intended, however. (MAM - 2011/04/20) - -* The --with-mpe configure option does not work with Mpich2. AKC - 2011/03/10 - -* If parallel gmake (e.g., gmake -j 4) is used, the "gmake clean" command - sometimes fails in the perform directory due to the attempt to remove the - executable of h5perf or h5perf_serial by two "parallel" commands. This error - has no consequence on the functionality of the HDF5 library or install. It - is fixed in the next release. AKC - 2011/01/25 - -* While working on the 1.8.6 release of HDF5, a bug was discovered that can - occur when reading from a dataset in parallel shortly after it has been - written to collectively. The issue was exposed by a new test in the parallel - HDF5 test suite, but had existed before that. We believe the problem lies with - certain MPI implementations and/or file systems. - - We have provided a pure MPI test program, as well as a standalone HDF5 - program, that can be used to determine if this is an issue on your system. - They should be run across multiple nodes with a varying number of processes. - These programs can be found at: - http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/ - -* The library's test dt_arith.c showed a compiler's rounding problem on - Cygwin when converting from unsigned long long to long double. The - library's own conversion works fine. We defined a macro for Cygwin to - skip this test until we can solve the problem. Please see bug #1813. - SLU - 2010/5/5 - -* All the VFL drivers aren't backward compatible. In H5FDpublic.h, the - structure H5FD_class_t changed in 1.8. There is new parameter added to - get_eoa and set_eoa callback functions. A new callback function - get_type_map was added in. The public function H5FDrealloc was taken - out in 1.8. The problem only happens when users define their own driver - for 1.6 and try to plug in 1.8 library. Because there's only one user - complaining about it, we (Elena, Quincey, and I) decided to leave it as - it is (see bug report #1279). Quincey will make a plan for 1.10. - SLU - 2010/2/2 - -* MinGW has a missing libstdc++.dll.a library file and will not successfully link - C++ applications/tests. Do not use the enable-cxx configure option. Read all of - the INSTALL_MINGW.txt file for all restrictions. ADB - 2009/11/11 - -* The PathScale MPI implementation, accessing a Panasas file system, would - cause H5Fcreate() with H5F_ACC_EXCL to fail even when the file does not - exist. This is due to the MPI_File_open() call failing if the mode has - the MPI_MODE_EXCL bit set. (See bug 1468 for details.) AKC - 2009/8/11 - -* Parallel tests failed with 16 processes with data inconsistency at testphdf5 - / dataset_readAll. Parallel tests also failed with 32 and 64 processes with - collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks - with MPI IO. CMC - 2009/04/28 - -* For Red Storm, a Cray XT3 system, the tools/h5ls/testh5ls.sh and - tools/h5copy/testh5copy.sh will fail some of its sub-tests. These sub-tests - are expected to fail and should exit with a non-zero code but the yod - command does not propagate the exit code of the executables. Yod always - returns 0 if it can launch the executable. The test suite shell expects - a non-zero for this particular test, therefore it concludes the test has - failed when it receives 0 from yod. Skip all the "failing" test for now - by changing them as following. - - ======== Original tools/h5ls/testh5ls.sh ========= - TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5 - ======== Change to =============================== - echo SKIP TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5 - ================================================== - - ======== Original tools/h5copy/testh5copy.sh ========= - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested - H5LSTEST $FILEOUT - ======== Change to =============================== - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested - echo SKIP H5LSTEST $FILEOUT - ================================================== - AKC - 2008/11/10 - -* For Red Storm, a Cray XT3 system, the yod command sometimes gives the - message, "yod allocation delayed for node recovery". This interferes with - test suites that do not expect to see this message. See the section of "Red - Storm" in file INSTALL_parallel for a way to deal with this problem. - AKC - 2008/05/28 - -* On an Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers, - use -mp -O1 compilation flags to build the libraries. A higher level of - optimization causes failures in several HDF5 library tests. - -* On mpich 1.2.5 and 1.2.6, if more than two processes contribute no IO and - the application asks to do collective IO, we have found that when using 4 - processors, a simple collective write will sometimes be hung. This can be - verified with t_mpi test under testpar. - -* A dataset created or rewritten with a v1.6.3 library or after cannot be read - with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled. - There was a bug in the calculation of the Fletcher32 checksum in the - library before v1.6.3; the checksum value was not consistent between big- - endian and little-endian systems. This bug was fixed in Release 1.6.3. - However, after fixing the bug, the checksum value was no longer the same as - before on little-endian system. Library releases after 1.6.4 can still read - datasets created or rewritten with an HDF5 library of v1.6.2 or before. - SLU - 2005/6/30 - -* On IBM AIX systems, parallel HDF5 mode will fail some tests with error - messages like "INFO: 0031-XXX ...". This is from the command `poe'. - Set the environment variable MP_INFOLEVEL to 0 to minimize the messages - and run the tests again. - - The tests may fail with messages like "The socket name is already in use", - but HDF5 does not use sockets. This failure is due to problems with the - poe command trying to set up the debug socket. To resolve this problem, - check to see whether there are many old /tmp/s.pedb.* files staying around. - These are sockets used by the poe command and left behind due to failed - commands. First, ask your system administrator to clean them out. - Lastly, request IBM to provide a means to run poe without the debug socket. - -* The --enable-static-exec configure flag will only statically link libraries - if the static version of that library is present. If only the shared version - of a library exists (i.e., most system libraries on Solaris, AIX, and Mac, - for example, only have shared versions), the flag should still result in a - successful compilation, but note that the installed executables will not be - fully static. Thus, the only guarantee on these systems is that the - executable is statically linked with just the HDF5 library. - -* There is also a configure error on Altix machines that incorrectly reports - when a version of Szip without an encoder is being used. - -* On cobalt, an SGI Altix SMP ia64 system, Intel compiler version 10.1 (which - is the default on that system) does not work properly and results in - failures during make check (in a static build) and make installcheck (during - a shared build). This appears to be a compiler optimization problem. - Reducing optimization by setting CFLAGS to -O1 or below resolves the issue. - Alternatively, using a newer version of the compiler (11.0) also works as - intended. MAM - 2010/06/01 - -* h5diff will not report enum value differences when one or both of the values - is not a valid enumeration value. The source of this bug has been identified - and it will be fixed in 1.8.8. DER - 2011/04/27 - - -%%%%1.8.6%%%% - - -HDF5 version 1.8.6 released on Mon Feb 14 10:26:30 CST 2011 -================================================================================ - -INTRODUCTION -============ - -This document describes the differences between HDF5-1.8.5 and -HDF5 1.8.6, and contains information on the platforms tested and -known problems in HDF5-1.8.6. -For more details, see the files HISTORY-1_0-1_8_0_rc3.txt -and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source. - -Links to the HDF5 1.8.6 source code, documentation, and additional materials -can be found on the HDF5 web page at: - - http://www.hdfgroup.org/products/hdf5/ - -The HDF5 1.8.6 release can be obtained from: - - http://www.hdfgroup.org/HDF5/release/obtain5.html - -User documentation for 1.8.6 can be accessed directly at this location: - - http://www.hdfgroup.org/HDF5/doc/ - -New features in the HDF5-1.8.x release series, including brief general -descriptions of some new and modified APIs, are described in the "What's New -in 1.8.0?" document: - - http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html - -All new and modified APIs are listed in detail in the "HDF5 Software Changes -from Release to Release" document, in the section "Release 1.8.6 (current -release) versus Release 1.8.5": - - http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS -======== - -- New Features -- Support for New Platforms, Languages, and Compilers -- Bug Fixes since HDF5-1.8.5 -- Platforms Tested -- Supported Configuration Features Summary -- Known Problems - - -New Features -============ - - Configuration - ------------- - - CMake: Improved CPack packaging, added parallel commands, improved - configuration options (better similarity to configure), added more - tests, better support for use in external cmake projects. - (ADB - 2010/10/07) - - The default configuration setting for official releases is - --enable-production. For unofficial releases, the default configuration - setting has been --disable-production. (AKC - 2010/05/28) - Library - ------- - - Added support for thread safety on Windows using the Windows threads - library. Use the HDF5_ENABLE_THREADSAFE option in CMake on a Windows - platform to enable this functionality. This is supported on Windows - Vista and newer Windows operating systems. (MAM - 2010/09/10) - - H5Tset_order and H5Tget_order now support all datatypes. A new byte - order, H5T_ORDER_MIXED, has been added specifically for a compound - datatype and its derived type. (SLU - 2010/8/23) - - Improved performance of metadata I/O by changing the default algorithm - to perform I/O from all processes (instead of just process 0) when using - parallel I/O drivers. (QAK - 2010/07/19) - - Improved performance of I/O on datasets with the same shape, but - different rank. (QAK - 2010/07/19) - - Improved performance of the chunk cache by avoiding unnecessary b-tree - lookups of chunks already in cache. (NAF - 2010/06/15) - - Parallel Library - ---------------- - - None - - Tools - ----- - - h5diff: Added a new flag: --exclude-path. The specified path to an - object will be excluded when comparing two files or two groups. If a - group is specified to be excluded, all member objects of that group - will be excluded. (JKM - 2010/09/16). - - h5ls: Added a new flag: --no-dangling-links. See --help output for - details. (JKM - 2010/06/15) - - h5ls: Added a new flag --follow-symlinks. See --help output for - details. (JKM - 2010/05/25) - - High-Level APIs - --------------- - - None - - F90 API - ------- - - None - - C++ API - ------- - - None - - -Support for New Platforms, Languages, and Compilers -=================================================== - - Sun C and C++ 5.10 and Sun Fortran 95 8.4. - - Mac OS X 10.6.4 with gcc 4.2.1 and gfortran 4.6 - - -Bug Fixes since HDF5-1.8.5 -========================== - - Configuration - ------------- - - The default number of MPI processes for testing purposes has been - changed from 3 to 6. (AKC - 2010/11/11) - - Some tests in tools/h5repack may fail in AIX systems when -q32 mode is - used. The error is caused by not requesting enough memory in default. - Added "env LDR_CNTRL=MAXDATA=0x20000000@DSA" into the $RUNSERIAL and - $RUNPARALLE in the AIX config file so that executables are tested with - more memory. (AKC - 2010/11/11) - - Removed recognition of the parallel compilers of LAM(hcc) and - ChMPIon(cmpicc) since we have no access to these two MPI implementations - and cannot verify their correctness. (AKC - 2010/07/14 - Bug 1921) - - PHDF5 was changed to use "mpiexec" instead of mpirun as the default - MPI applications startup command as defined in the MPI-2 definition, - section 4.1. (AKC - 2010/06/11 - Bug 1921) - - Library - ------- - - Fixed a bug that caused big endian machines to generate corrupt files - when using the scale-offset filter with floating point data or fill - values. Note that such datasets will no longer be readable by any - by any machine after this patch. (NAF - 2010/02/02 - Bug 2131) - - Retrieving a link's name by index in the case where the link is external - and the file that the link refers to doesn't exist will now fail - gracefully rather than cause a segmentation fault. (MAM - 2010/11/17) - - Modified metadata accumulator to better track accumulated dirty metadata - in an effort to reduce unnecessary I/O in certain situations and to - fix some other corner cases which were prone to error. (MAM - 2010/10/15) - - Added a new set of unit tests that are run during 'make check' to verify - the behavior of the metadata accumulator. (MAM - 2010/10/15) - - Modified library to always cache symbol table information. Libraries - from version 1.6.3 and earlier have a bug which causes them to require - this information for some operations. (NAF - 2010/09/21 - Bug 1864) - - Fixed a bug where the library could generate an assertion/core dump when - a file that had been created with H5Pset_libver_bounds(fapl, - H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) but didn't have a superblock - extension was later reopened. (QAK - 2010/09/16 - Bug 1968) - - Fixed a bug that could occur when getting information for a new-style - group that was previously opened through a file handle that was later - closed. (NAF - 2010/09/15) - - Added define check in H5public.h if stdint.h is supported by the C++ - compiler. This define is only available on Windows with VS2010 and using - CMake to build the library. (ADB - 2010/09/13 - Bug 1938) - - When a mandatory filter failed to write data chunks, the dataset - couldn't close (bug 1260). The fix releases all resources and closes - the dataset but returns a failure. (SLU - 2010/09/08) - - H5Eset_current_stack now also closes the error stack set as the - default. This is to avoid a potential problem. - (SLU - 2010/09/07 - Bug 1799) - - Corrected situation where 1-D chunked dataset could get created by an - application without calling H5Pset_chunk(). H5Pset_chunk is now - required for creating all chunked datasets. (QAK - 2010/09/02) - - Fixed many memory issues that valgrind exposed. (QAK - 2010/08/24) - - Fixed the bug in the filter's public CAN_APPLY function. The return - value should be htri_t not herr_t. (SLU - 2010/08/05 - Bug 1239) - - Fixed the STDIO VFD to use fseeko64 instead of fseek64 for 64-bit I/O - support. (AKC - 2010/7/30) - - Fixed a bug in the direct I/O driver that could render files with certain - kinds of unaligned data unreadable or corrupt them. (NAF - 2010/07/28) - - valgrind reported an error of copying data to itself when a new attribute - is written. Fixed by taking out the memcpy step in the attribute code. - (SLU - 2010/07/28 - Bug 1956) - - Corrected various issues in the MPI datatype creation code which could - cause resource leaks or incorrect behavior (and may improve the - performance as well). (QAK - 2010/07/19) - - Fixed a bug that could cause file corruption when using non-default sizes - of addresses and/or lengths. This bug could also cause uncorrupted files - with this property to be unreadable. This bug was introduced in 1.8.5. - (NAF - 2010/07/16 - Bug 1951) - - Parallel Library - ---------------- - - None - - Tools - ----- - - Fixed h5diff to compare member objects and groups recursively when - two files or groups are compared. (JKM - 2010/9/16 - Bug 1975) - - Fixed h5repack to be able to convert a dataset to COMPACT layout. - (JKM - 2010/09/15 - Bug 1896) - - Changed h5ls to not interpret special characters in object or attribute - names for output. (JKM - 2010/06/28 - Bug 1784) - - Revised the order of arguments for h5cc, h5fc, h5c++, h5pcc and h5pfc. - CPPFLAGS, CFLAGS, LDFLAGS, and LIBS have been duplicated with an H5BLD_ - prefix to put the flags and paths from the hdf5 build in the correct - places and allow the script user to add entries in CPPFLAGS, CFLAGS, - LDFLAGS, and LIBS that will take precedence over those from the hdf5 - build. The user can make these entries persistent by editing - CFLAGSBASE, CPPFLAGSBASE, LDFLAGSBASE, and LIBSBASE near the top of - the script or temporary by setting HDF5_CFLAGS, HDF5_CPPFLAGS, - HDF5_LDFLAGS, or HDF5_LIBS in the environment. The new order of - arguments in these scripts is $CLINKER $H5BLD_CPPFLAGS $CPPFLAGS - $H5BLD_CFLAGS $CFLAGS $LDFLAGS $clibpath $link_objs $LIBS $link_args - $shared_link. (LRK - 2010/10/25 - Bug 1973) - - F90 API - ------ - - None - - C++ API - ------ - - None - - High-Level APIs: - ------ - - None - - Fortran High-Level APIs: - ------ - - None - - -Platforms Tested -================ -The following platforms and compilers have been tested for this release. - - AIX 6.1 xlc 11.1.0.3 - (NCSA BP) xlC 11.1.0.3 - xlf 13.1.0.3 - mpcc_r 11.1.0.3 - mpxlf_r 13.1.0.3 - - FreeBSD 6.3-STABLE i386 gcc 3.4.6 [FreeBSD] 20060305 - (duty) g++ 3.4.6 [FreeBSD] 20060305 - gcc 4.4.5 20100803 - g++ 4.4.5 20100803 - gfortran 4.4.5 20100803 - - FreeBSD 6.3-STABLE amd64 gcc 3.4.6 [FreeBSD] 20060305 - (liberty) g++ 3.4.6 [FreeBSD] 20060305 - gcc 4.4.5 20100803 - g++ 4.4.5 20100803 - gfortran 4.4.5 20100803 - - Linux 2.6.18-194.3.1.el5PAE gcc (GCC) 4.1.2 and 4.4.2 - #1 SMP i686 i686 i386 G95 (GCC 4.0.3 (g95 0.93!) Apr 21 2010) - (jam) GNU Fortran (GCC) 4.1.2 20080704 - (Red Hat 4.1.2-48) and 4.4.2 - PGI C, Fortran, C++ 10.4-0 32-bit - PGI C, Fortran, C++ 10.6-0 32-bit - Intel(R) C Compiler for 32-bit - applications, Version 11.1 - Intel(R) C++ Compiler for 32-bit - applications, Version 11.1 - Intel(R) Fortran Compiler for 32-bit - applications, Version 11.1 - Absoft 32-bit Fortran 95 10.0.7 - MPICH mpich2-1.3.1 compiled with - gcc 4.1.2 and gfortran 4.1.2 - - Linux 2.6.18-194.17.1.el5 gcc 4.1.2 and 4.4.2 - #1 SMP x86_64 GNU/Linux G95 (GCC 4.0.3 (g95 0.93!) Apr 21 2010) - (amani) tested for both 32- and 64-bit binaries - GNU Fortran (GCC) 4.1.2 20080704 - (Red Hat 4.1.2-46) and 4.4.2 - Intel(R) C, C++, Fortran Compilers for - applications running on Intel(R) 64, - Version 11.1. - PGI C, Fortran, C++ Version 9.0-4 - for 64-bit target on x86-64 - MPICH mpich2-1.3.1 compiled with - gcc 4.1.2 and gfortran 4.1.2 - - SGI ProPack 7 Linux Intel(R) C++ Version 11.1 20100806 - 2.6.32.19-0.3.1.1982.0.PTF- Intel(R) Fortran Version 11.1 20100806 - default #1 SMP SGI MPT 2.01 - SGI Altix UV - (NCSA ember) - - SunOS 5.10 32- and 64-bit Sun C 5.9 Sun OS_sparc Patch 124867-16 - (linew) Sun Fortran 95 8.3 Sun OS_sparc Patch 127000-13 - Sun C++ 5.9 Sun OS_sparc Patch 124863-62 - Sun C 5.10 SunOS_sparc Patch 141861-07 - Sun Fortran 95 8.4 SunOS_sparc Patch 128231-06 - Sun C++ 5.10 SunOS_sparc 128228-11 - - Intel Xeon Linux 2.6.18- gcc 4.2.4 - 92.1.10.el5_lustre.1.6.6smp- Intel(R) C++ Version 10.1.017 - perfctr #8 SMP Intel(R) Fortran Compiler Version 10.1.017 - (NCSA abe) Open MPI 1.3.2 - MVAPICH2-1.5.1_pgi-10.8 - - Windows XP Visual Studio 2008 w/ Intel Fortran 10.1 (project files) - Visual Studio 2008 w/ Intel Fortran 11.1 (cmake) - Visual Studio 2010 (cmake) - Cygwin(1.7.7 native gcc(4.3.4) compiler and gfortran) - - Windows XP x64 Visual Studio 2008 w/ Intel Fortran 10.1 (project files) - Visual Studio 2008 w/ Intel Fortran 11.1 (cmake) - Visual Studio 2010 (cmake) - Cygwin(1.7.7 native gcc(4.3.4) compiler and gfortran) - - Windows Vista Visual Studio 2008 w/ Intel Fortran 10.1 (project files) - Visual Studio 2008 w/ Intel Fortran 11.1 (cmake) - - Windows Vista x64 Visual Studio 2008 w/ Intel Fortran 10.1 (project files) - Visual Studio 2008 w/ Intel Fortran 11.1 (cmake) - - Windows 7 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake) - - Windows 7 x64 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake) - - Mac OS X 10.6.3 (Intel 64-bit) i686-apple-darwin10-gcc-4.2.1 (GCC) 4.2.1 - Darwin Kernel Version 10.3.1 GNU Fortran (GCC) 4.5.0 20090910 - Intel C, C++ and Fortran compilers 11.1 20100806 - - Mac OS X 10.6.4 (Intel 32-bit) i686-apple-darwin10-gcc-4.2.1 (GCC) 4.2.1 - Darwin Kernel Version 10.4.0 GNU Fortran (GCC) 4.6.0 20101106 - Intel C, C++ and Fortran compilers 12.0.0 20101110 - - Mac OS X 10.6.4 (Intel 64-bit) i686-apple-darwin10-gcc-4.2.1 (GCC) 4.2.1 (Apple Inc. build 5659) - Darwin Kernel Version 10.6.0 GNU Fortran (GCC) 4.5.0 20090910 - Intel C, C++ and Fortran compilers 11.1 20100806 - - Fedora 12 2.6.32.16-150.fc12.ppc64 #1 SMP ppc64 GNU/Linux - gcc (GCC) 4.4.4 20100630 (Red Hat 4.4.4-10) - GNU Fortran (GCC) 4.4.4 20100630 (Red Hat 4.4.4-10) - - Debian5.06 2.6.26-2-686 #1 SMP i686 GNU/Linux - gcc (Debian 4.3.2-1.1) 4.3.2 - GNU Fortran (Debian 4.3.2-1.1) 4.3.2 - - Debian5.06 2.6.26-2-amd64 #1 SMP x86_64 GNU/Linux - gcc (Debian 4.3.2-1.1) 4.3.2 - GNU Fortran (Debian 4.3.2-1.1) 4.3.2 - - Fedora14 2.6.35.6-48.fc14.i686.PAE #1 SMP i686 i686 i386 GNU/Linux - gcc (GCC) 4.5.1 20100924 (Red Hat 4.5.1-4) - GNU Fortran (GCC) 4.5.1 20100924 (Red Hat 4.5.1-4) - - Fedora14 2.6.35.6-48.fc14.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux - gcc (GCC) 4.5.1 20100924 (Red Hat 4.5.1-4) - GNU Fortran (GCC) 4.5.1 20100924 (Red Hat 4.5.1-4) - - SUSE 11.3 2.6.34.7-0.7-desktop #1 SMP PREEMPT i686 i686 i386 GNU/Linux - gcc (SUSE Linux) 4.5.0 20100604 [gcc-4_5-branch revision 160292] - GNU Fortran (SUSE Linux) 4.5.0 20100604 [gcc-4_5-branch revision 160292] - - SUSE 11.3 2.6.34.7-0.7-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux - gcc (SUSE Linux) 4.5.0 20100604 [gcc-4_5-branch revision 160292] - GNU Fortran (SUSE Linux) 4.5.0 20100604 [gcc-4_5-branch revision 160292] - - Ubuntu 10.10 2.6.35-25-generic #44-Ubuntu SMP i686 GNU/Linux - gcc (Ubuntu/Linaro 4.4.4-14ubuntu5) 4.4.5 - GNU Fortran (Ubuntu/Linaro 4.4.4-14ubuntu5) 4.4.5 - - Ubuntu 10.10 2.6.35-25-generic #44-Ubuntu SMP x86_64 GNU/Linux - gcc (Ubuntu/Linaro 4.4.4-14ubuntu5) 4.4.5 - GNU Fortran (Ubuntu/Linaro 4.4.4-14ubuntu5) 4.4.5 - - OpenVMS Alpha 8.3 HP C V7.3-009 - HP Fortran V8.2-104679-48H9K - HP C++ V7.3-009 - -Tested Configuration Features Summary -======================================== - - In the tables below - y = tested - n = not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90 F90 C++ zlib SZIP - parallel parallel -Solaris2.10 32-bit n y n y y y -Solaris2.10 64-bit n y n y y y -Windows XP n y(4) n y y y -Windows XP x64 n y(4) n y y y -Windows Vista n y(4) n y y y -Windows Vista x64 n y(4) n y y y -OpenVMS Alpha n y n y y n -Mac OS X 10.6 Intel n y n y y y -AIX 6.1 32- and 64-bit y y y y y y -FreeBSD 6.3-STABLE 32&64 bit n y n y y y -CentOS 5.5 Linux 2.6.18-194 i686 GNU (1)W y y(2) y y y y -CentOS 5.5 Linux 2.6.18-194 i686 Intel W n y n y y n -CentOS 5.5 Linux 2.6.18-194 i686 PGI W n y n y y n -CentOS 5.5 Linux 2.6.16 x86_64 GNU (1) W y y(3) y y y y -CentOS 5.5 Linux 2.6.16 x86_64 Intel W n y n y y n -CentOS 5.5 Linux 2.6.16 x86_64 PGI W n y n y y y -RedHat EL4 2.6.18 Xeon Lustre C y y y y y n -Fedora 12 Linux 2.6.32.16-150.fc12.ppc64 n y n y y y -SGI Linux 2.6.32.19 y y y y y y - - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -Solaris2.10 32-bit y y y y -Solaris2.10 64-bit y y y y -Windows XP y y(4) y n -Windows XP x64 y y(4) y n -Windows Vista y y(4) y y -Windows Vista x64 y y(4) y y -OpenVMS Alpha n n n n -Mac OS X 10.6 y(5) n y n -AIX 6.1 32- and 64-bit n n n y -FreeBSD 6.3-STABLE 32&64 bit y n y y -CentOS 5.5 Linux 2.6.18-128 i686 GNU (1)W y y(2) y y -CentOS 5.5 Linux 2.6.18-128 i686 Intel W y y y n -CentOS 5.5 Linux 2.6.18-128 i686 PGI W y y y n -CentOS 5.5 Linux 2.6.16 x86_64 GNU (1) W y y y y -CentOS 5.5 Linux 2.6.16 x86_64 Intel W y y y n -CentOS 5.5 Linux 2.6.16 x86_64 PGI W y y y n -RedHat EL4 2.6.18 Xeon Lustre C y y y n -Fedora 12 Linux 2.6.32.16-150.fc12.ppc64 y y y y -SGI Linux 2.6.32.19 y y y y - - (1) Fortran compiled with gfortran. - (2) With PGI and Absoft compilers. - (3) With PGI compiler for Fortran. - (4) Using Visual Studio 2008 w/ Intel Fortran 10.1 (Cygwin shared libraries are not supported) - (5) C and C++ shared libraries will not be built when Fortran is enabled. - Compiler versions for each platform are listed in the preceding - "Platforms Tested" table. - - -Known Problems -============== -* examples/run-all-ex.sh does not work on Cygwin. (NAF - 2011/02/11) - -* Parallel test, t_shapesame in testpar, is rather unstable as it continues to - have occasional errors in AIX and quite often in NCSA Abe. It is being built - but it is not run automatically in the "make check" command. One would have to - run it by hand to see if it works in a particular machine. AKC - 2011/01/28 - -* Although OpenVMS Alpha is supported, there are several problems with the C - test suite - getname.c, lheap.c, lheap.c, mtime.c, and stab.c. The test - suite for h5diff also fails. These failures are from the tests, not the - library. We have fixed these failures. But it's too late to put the fixes - into this release. If you install the 1.8.6 library, it should still work - despite of these test failures. If you want the working copy without any - test failure, you can request it from us. SLU - 2011/01/26 - -* If parallel gmake (e.g., gmake -j 4) is used, the "gmake clean" command - sometimes fails in the perform directory due to the attempt to remove the - executable of h5perf or h5perf_serial by two "parallel" commands. This error - has no consequence on the functionality of the HDF5 library or install. It - is fixed in the next release. AKC - 2011/01/25 - -* While working on the 1.8.6 release of HDF5, a bug was discovered that can - occur when reading from a dataset in parallel shortly after it has been - written to collectively. The issue was exposed by a new test in the parallel - HDF5 test suite, but had existed before that. We believe the problem lies with - certain MPI implementations and/or filesystems. - - We have provided a pure MPI test program, as well as a standalone HDF5 - program, that can be used to determine if this is an issue on your system. - They should be run across multiple nodes with a varying number of processes. - These programs can be found at: - http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/ - -* The h5diff tool can display garbage values when variable-length strings in - a compound type dataset are compared. This also occurs with variable-length - string arrays in a compound type dataset. See bug #1989. This will be fixed - in the next release. JKM - 2010/11/05 - -* The AIX --enable-shared setting does not quite work. It can produce a shared - library, but there cannot be more than one shared library that is - interlinked. This means that the high level APIs will not work which is not - very useful. We hope to have a solution in the next release. - (AKC - 2010/10/15) - -* H5Eset_auto can cause a seg fault for a library API call if the application - compiles with -DH5_USE_16_API (see bug 1707). It will be fixed in the - next release. SLU - 2010/10/5 - -* The library's test dt_arith.c showed a compiler's rounding problem on - Cygwin when converting an unsigned long long to a long double. The - library's own conversion works fine. We defined a macro for Cygwin to - skip this test until we can solve the problem. Please see bug #1813. - SLU - 2010/5/5 - -* All the VFL drivers aren't backwardly compatible. In H5FDpublic.h, the - structure H5FD_class_t changed in 1.8. A new parameter was added to the - get_eoa and set_eoa callback functions, and a new callback function - get_type_map was added. The public function H5FDrealloc was taken out in - 1.8. The problem only happens when users define their own driver for 1.6 - and try to plug in a 1.8 library. This will be fixed in 1.10. SLU - 2010/2/2 - -* MinGW has a missing libstdc++.dll.a library file and will not successfully link - C++ applications/tests. Do not use the enable-cxx configure option. Read all of - the INSTALL_MINGW.txt file for all restrictions. ADB - 2009/11/11 - -* The PathScale MPI implementation, accessing a Panasas file system, would - cause H5Fcreate() with H5F_ACC_EXCL to fail even when the file does not - exist. This is due to the MPI_File_open() call failing if the amode has - the MPI_MODE_EXCL bit set. (See bug 1468 for details.) AKC - 2009/8/11 - -* Parallel tests failed with 16 processes with data inconsistency at testphdf5 - / dataset_readAll. Parallel tests also failed with 32 and 64 processes with - collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks - with MPI IO. CMC - 2009/04/28 - -* For Red Storm, a Cray XT3 system, the tools/h5ls/testh5ls.sh and - tools/h5copy/testh5copy.sh will fail some of their sub-tests. These - sub-tests are expected to fail and should exit with a non-zero code but - the yod command does not propagate the exit code of the executables. Yod - always returns 0 if it can launch the executable. The test suite shell - expects a non-zero for this particular test. Therefore, it concludes the - test has failed when it receives 0 from yod. To skip all the "failing" - tests for now, change them as shown below. - - ======== Original tools/h5ls/testh5ls.sh ========= - TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5 - ======== Change to =============================== - echo SKIP TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5 - ================================================== - - ======== Original tools/h5copy/testh5copy.sh ========= - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested - H5LSTEST $FILEOUT - ======== Change to =============================== - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested - echo SKIP H5LSTEST $FILEOUT - ================================================== - AKC - 2008/11/10 - -* For Red Storm, a Cray XT3 system, the yod command sometimes gives the - message "yod allocation delayed for node recovery." This interferes - with test suites that do not expect to see this message. See the "Red Storm" - section in file INSTALL_parallel for a way to deal with this problem. - AKC - 2008/05/28 - -* On an Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers, - use the -mp -O1 compilation flags to build the libraries. A higher level - of optimization causes failures in several HDF5 library tests. - -* On mpich 1.2.5 and 1.2.6 on a system using four processors, if more than - two processes contribute no I/O and the application asks to do collective - I/O, we have found that a simple collective write will sometimes hang. This - can be verified with the t_mpi test under testpar. - -* A dataset created or rewritten with a v1.6.3 or later library cannot be - read with the v1.6.2 or earlier library when the Fletcher32 EDC filter - is enabled. There was a bug in the calculation of the Fletcher32 checksum - in the library before v1.6.3; the checksum value was not consistent - between big-endian and little-endian systems. This bug was fixed in - Release 1.6.3. However, after fixing the bug, the checksum value was no - longer the same as before on little-endian system. Library releases after - 1.6.4 can still read datasets created or rewritten with an HDF5 library of - v1.6.2 or earlier. SLU - 2005/6/30 - -* On IBM AIX systems, parallel HDF5 mode will fail some tests with error - messages like "INFO: 0031-XXX ...". This is from the command `poe'. To - work around this, set the environment variable MP_INFOLEVEL to 0 to - minimize the messages and run the tests again. The tests may fail with - messages like "The socket name is already in use", but HDF5 does not use - sockets. This failure is due to problems with the poe command trying to - set up the debug socket. To resolve this problem, check to see whether - there are any old /tmp/s.pedb.* files around. These are sockets used by - the poe command and left behind if the command failed at some point. To - resolve this, ask your system administrator to remove the - old/tmp/s.pedb.* files, and then ask IBM to provide a means to run poe - without the debug socket. - -* The --enable-static-exec configure flag will only statically link - libraries if the static version of that library is present. If only the - shared version of a library exists (i.e., most system libraries on - Solaris, AIX, and Mac, for example, only have shared versions), the flag - should still result in a successful compilation, but note that the - installed executables will not be fully static. Thus, the only guarantee - on these systems is that the executable is statically linked with just - the HDF5 library. - -* On an SGI Altix SMP ia64 system, the Intel compiler version 10.1 (which - is the default on that system) does not work properly and results in - failures during the make check (in a static build) and the make - installcheck (in a shared build). This appears to be a compiler - optimization problem. Reducing the optimization by setting CFLAGS to - -O1 or below resolves the issue. Using a newer version of the compiler - (11.0) avoids the issue. MAM - 2010/06/01 - -* On solaris systems, when running the examples with the scripts installed in - .../share/hdf5_examples, two of the c tests, h5_extlink and h5_elink_unix2win - may fail or generate HDF5 errors because the script commands in c/run-c-ex.sh - fail to create test directories red, blue, and u2w. Moving the '!' in lines - 67, 70, 73 of run-c-ex.sh will fix the problem. For example the script command - "if ! test -d red; then" will work on solaris if changed to - "if test ! -d red; then". - - -%%%%1.8.5%%%% - - -HDF5 version 1.8.5 released on Fri Jun 4 13:27:31 CDT 2010 -================================================================================ - -INTRODUCTION -============ - -This document describes the differences between HDF5-1.8.4 and HDF5 1.8.5, and -contains information on the platforms tested and known problems in HDF5-1.8.5. -For more details, see the files HISTORY-1_0-1_8_0_rc3.txt and HISTORY-1_8.txt -in the release_docs/ directory of the HDF5 source. - -Links to the HDF5 1.8.5 source code, documentation, and additional materials -can be found on the HDF5 web page at: - - http://www.hdfgroup.org/products/hdf5/ - -The HDF5 1.8.5 release can be obtained from: - - http://www.hdfgroup.org/HDF5/release/obtain5.html - -User documentation for 1.8.5 can be accessed directly at this location: - - http://www.hdfgroup.org/HDF5/doc/ - -New features in the HDF5-1.8.x release series, including brief general -descriptions of some new and modified APIs, are described in the "What's New -in 1.8.0?" document: - - http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html - -All new and modified APIs are listed in detail in the "HDF5 Software Changes -from Release to Release" document, in the section "Release 1.8.5 (current -release) versus Release 1.8.4": - - http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS -======== - -- New Features -- Support for New Platforms, Languages, and Compilers -- Bug Fixes since HDF5-1.8.4 -- Platforms Tested -- Supported Configuration Features Summary -- Known Problems - - -New Features -============ - - Configuration - ------------- - - CMake Early Access: This release adds support for building HDF5 using - the CMake system. Initial work has targeted Windows, but other platforms - can be used. See the CMake.TXT file for more information. Version - 2.8.1 of CMake is required. - - Configure now adds appropriate defines for supporting large (64-bit) - files on all systems, where supported, by default, instead of only Linux. - This large file support is controllable with the --enable-largefile - configure option. The Linux-specific --enable-linux-lfs option has been - deprecated in favor of this new option. Please note that specifying - --disable-large does NOT attempt to "turn off" largefile support if it - is natively supported by the compiler, but rather just disables - configure from actively trying to add any additional compiler flags. - (MAM - 2010/05/05 - Bug # 1772/1434) - - Fixed an signal handling mask error in H5detect that might result in - SIGBUS or SIGSEGV failures in some platforms such as Linux on Sparc. - (AKC - 2010/4/28 - Bug # 1764) - - Fixed various "strict aliasing" problems, allowing higher levels - of compiler optimization (in particular, allowing '-O3' to work - with recent versions of GCC). (QAK - 2010/04/26) - - Upgraded versions of autotools used to generate configuration suite. - We now use Automake 1.11.1, Autoconf 2.65, and Libtool 2.2.6b. - (MAM - 2010/04/15) - - Added the xlc-* and mpcc_r-* BASENAME patterns to be recognized as IBM - compilers so that the IBM compiler options can be added properly. This - allows non-system-default compiler command names (e.g. xlc-m.n.k.l) be - recognized. (AKC - 2009/11/26) - - Library - ------- - - Performance is substantially improved when extending a dataset with early - allocation. (NAF - 2010/03/24 - Bug # 1637) - - Added support for filtering densely stored groups. Many of the API - functions related to filters have been extended to support dense groups - as well as datasets. Pipeline messages can now be stored in a group's - object header. (NAF/QAK - 2009/11/3) - - Parallel Library - ---------------- - - None - - Tools - ----- - - h5dump: Added the new packed bits feature which prints packed bits stored - in an integer dataset. (AKC/ADB - 2010/5/7) - - h5diff: Fixed incorrect behavior (hang) in parallel mode when specifying - invalid options (ex: -v and -q). (JKM - 2010/02/17) - - h5diff: Added new flag --no-dangling-links (see --help for details). - (JKM - 2010/02/10) - - h5diff: Added new flag --follow-symlinks (see --help for details). - (JKM - 2010/01/25) - - h5diff: Added a fix to correct the display of garbage values when - displaying big-endian data on a little-endian machine. (JKM - 2009/11/20) - - High-Level APIs - --------------- - - None - - F90 API - ------- - - None - - C++ API - ------- - - New member functions - + Overloaded CommonFG::getObjnameByIdx to take char* for name. - + Overloaded CommonFG::getObjTypeByIdx to return type name as a char*. - (BMR - 2010/05/10) - + Added DataSet::getInMemDataSize() to simplify getting the dataset's - data size in memory. (BMR - 2009/07/26) - - -Support for New Platforms, Languages, and Compilers -=================================================== - - AIX 6.1 has been added. (AKC - 2010/1/4) - - -Bug Fixes since HDF5-1.8.4 -========================== - - Configuration - ------------- - - Fixed various "strict aliasing" problems, allowing higher levels - of compiler optimization (in particular, allowing '-O3' to work - with recent versions of GCC). (QAK - 2010/04/26) - - Library - ------- - - Fixed a file corruption bug that could happen when shrinking a compressed - dataset. (NAF - 2010/05/20) - - Fixed some memory leaks in VL datatype conversion when strings are - used as fill values. (MAM - 2010/05/12 - Bug # 1826) - - Fixed an H5Rcreate failure when passing in a -1 for the dataspace - identifier. (ADB - 2010/4/28) - - Fixed a bug when copying objects with NULL references with the - H5O_COPY_EXPAND_REFERENCE_FLAG flag set. (NAF - 2010/04/08 - Bug # 1815) - - Added a mechanism to the H5I interface to save returned object identifier - structures for immediate re-use if needed. This addresses a potential - performance issue by delaying the case when the next identifier to be - registered has grown so large that it wraps around and needs to be - checked to see whether it is available for distribution. - (MAM - 2010/03/15 - Bug # 1730) - - Files can now be concurrently opened more than once using the core file - driver, as long as the backing store is used. (NAF - 2010/03/09) - - Added support for H5O_COPY_EXPAND_EXT_LINK_FLAG to H5Ocopy. External - links will now be expanded if this flag is set. - (NAF - 2010/03/05 - Bug # 1733) - - Fixed a bug where the library, when traversing an external link, would - reopen the source file if nothing else worked. (NAF - 2010/03/05) - - Fixed a bug where fractal heap identifiers for attributes and shared - object header messages could be incorrectly encoded in the file for - files created on big-endian platforms. - Please see http://www.hdfgroup.org/HDF5/release/known_problems if you - suspect you have a file with this problem. - (QAK - 2010/02/23 - Bug # 1755) - - Fixed an intermittent bug in the b-tree code which could be triggered - by expanding and shrinking chunked datasets in certain ways. - (NAF - 2010/02/16) - - H5Tdetect_class said a VL string is a string type. But when it's in - a compound type, it said it's a VL type. THis has been fixed to be - consistent; it now always returns a string type. - (SLU - 2009/12/10 - Bug # 1584) - - Allow "child" files from external links to be correctly located when - relative to a "parent" file that is opened through a symbolic link. - (QAK - 2009/12/01) - - Parallel Library - ---------------- - - Parallel mode in AIX will fail some of the testcheck_version.sh tests - where it treats "exit(134) the same as if process 0 had received an abort - signal. Fixed. (AKC - 2009/11/3) - - Tools - ----- - - Fixed h5ls to return exit code 1 (error) when a non-existent file is - specified. (JKM - 2010/04/27 - Bug # 1793) - - Fixed h5copy failure when copying a dangling link that is specified - directly. (JKM - 2010/04/22 - Bug # 1817) - - Fixed an h5repack failure that lost attributes from a dataset of - reference type. (JKM - 2010/3/25 - Bug # 1726) - - Fixed h5repack error that set NULL for object reference values for - datasets, groups, or named datatypes. (JKM - 2010/03/19 - Bug # 1814) - - F90 API - ------ - - None - - C++ API - ------ - - The constructor PropList::PropList(id) was fixed to act properly - according to the nature of 'id'. When 'id' is a property class - identifier, a new property list will be created. When 'id' is a - property list identifier, a copy of the property list will be made. - (BMR - 2010/5/9) - - The parameters 'size' and 'bufsize' in CommonFG::getLinkval and - CommonFG::getComment, respectively, now have default values for the - user's convenience. (BMR - 2009/10/23) - - NULL pointer accessing was fixed. (BMR - 2009/10/05 - Bug # 1061) - - Read/write methods of DataSet and Attribute classes were fixed - to handle string correctly. (BMR - 2009/07/26) - - High-Level APIs: - ------ - - Fixed a bug in H5DSattach_scale, H5DSis_attached, and H5DSdetach_scale - caused by using the H5Tget_native_type function to determine the native - type for reading the REFERENCE_LIST attribute. This bug was exposed - on Mac PPC. (EIP - 2010/05/22 - Bug # 1851) - - Fixed a bug in the H5DSdetach_scale function when 0 bytes were - allocated after the last reference to a dimension scale was removed - from the list of references in a VL element of the DIMENSION_LIST - attribute. Modified the function to comply with the specification: - the DIMENSION_LIST attribute is now deleted when no dimension scales - are left attached. (EIP - 2010/05/14 - Bug # 1822) - - Fortran High-Level APIs: - ------ - - None - - -Platforms Tested -================ -The following platforms and compilers have been tested for this release. - - AIX 5.3 xlc 7.0.0.9, 8.0.0.20, 9.0.0.4 - (LLNL Up) xlC 7.0.0.9, 8.0.0.20, 9.0.0.4 - xlf 9.1.0.9, 10.1.0.9, 11.1.0.7 - mpcc_r 7.0.0.9 - mpxlf_r 09.01.0000.0008 - - AIX 6.1 xlc 10.1.0.6 - (NCSA BP) xlC 10.1.0.6 - xlf 12.1.0.7 - - Cray XT3 (2.1.56) cc (pgcc) 10.0-0 - (SNL red storm) ftn (pgf90) 10.0-0 - CC (pgCC) 10.0-0 - - FreeBSD 6.3-STABLE i386 gcc 3.4.6 [FreeBSD] 20060305 - (duty) g++ 3.4.6 [FreeBSD] 20060305 - gcc 4.3.4 20090419 - g++ 4.3.4 20090419 - gfortran 4.3.4 20090419 - - FreeBSD 6.3-STABLE amd64 gcc 3.4.6 [FreeBSD] 20060305 - (liberty) g++ 3.4.6 [FreeBSD] 20060305 - gcc 4.4.1 20090421 - g++ 4.4.1 20090421 - gfortran 4.4.1 20090421 - - Linux 2.6.18-128.1.6.el5xen gcc (GCC) 4.1.2 20080704 and 4.4.2 - #1 SMP i686 i686 i386 GNU Fortran (GCC) 4.1.2 20080704 and 4.4.2 - (jam) g++ (GCC) 4.1.2 20080704 and 4.4.2 - G95 (GCC 4.0.3 (g95 0.93!) Apr 21 2010) - Absoft 32-bit Fortran 95 10.0.7 - PGI C, Fortran, C++ 10.4-0 32-bit - Intel(R) C, C++, Fortran Compilers for 32-bit - applications, Version 11.1 Build 20090827 - MPICH mpich2-1.0.8 compiled with - gcc 4.1.2 and GNU Fortran (GCC) 4.1.2 - - Linux 2.6.18-164.el5 #1 SMP gcc 4.1.2 20080704 and gcc 4.4.2 - x86_64 GNU/Linux GNU Fortran (GCC) 4.1.2 20080704 and 4.4.2 - (amani) g++ (GCC) 4.1.2 20080704 and 4.4.2 - G95 (GCC 4.0.3 (g95 0.93!) Apr 21 2010) - Intel(R) C, C++, Fortran Compilers for - applications running on Intel(R) 64, - Version 11.1 Build 20090827. - PGI C, Fortran, C++ Version 10.4-0 - for 32 & 64-bit target on x86-64 - MPICH mpich2-1.0.8 compiled with - gcc 4.1.2 and GNU Fortran (GCC) 4.1.2 - - Linux 2.6.16.54-0.2.5 #1 Intel(R) C++ Version 11.0.074 - SGI Altix SMP ia64 Intel(R) Fortran Itanium(R) Version 11.0.074 - (cobalt) SGI MPI 1.38 - - SunOS 5.10 32- and 64-bit Sun C 5.9 SunOS_sparc Patch 124867-14 - (linew) Sun Fortran 95 8.3 SunOS_sparc - Patch 127000-13 - Sun C++ 5.9 SunOS_sparc Patch 124863-23 - - Intel Xeon Linux 2.6.18- Intel(R) C++ Version 10.0.026 - 92.1.10.el5_lustre.1.6.6smp- Intel(R) Fortran Compiler Version 10.0.026 - perfctr #7 SMP Open MPI 1.2.2 - (abe) MVAPICH2-0.9.8p28p2patched-intel-ofed-1.2 - compiled with icc v10.0.026 and ifort 10.0.026 - - Linux 2.6.18-76chaos #1 SMP Intel(R) C, C++, Fortran Compilers for - SMP x86_64 GNU/Linux applications running on Intel(R) 64, - (SNL Glory) Versions 11.1. - - Windows XP Visual Studio 2008 w/ Intel Fortran 10.1 - Cygwin(1.7.5 native gcc(4.3.4) compiler and - gfortran) - - Windows XP x64 Visual Studio 2008 w/ Intel Fortran 10.1 - - Windows Vista Visual Studio 2008 w/ Intel Fortran 10.1 - - Windows Vista x64 Visual Studio 2008 w/ Intel Fortran 10.1 - - MAC OS 10.6.3 (Intel) i686-apple-darwin10-gcc-4.2.1 (GCC) 4.2.1 - (pahra) GNU Fortran (GCC) 4.5.0 20090910 - i686-apple-darwin10-g++-4.2.1 (GCC) 4.2.1 - Intel C, C++ and Fortran compilers 11.1 - - MAC OS 10.5.8 (Intel) i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 - (tejeda) - - MAC OS 10.5 (PPC) powerpc-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 - (juniper-w) - - OpenVMS Alpha V8.3 HP C V7.3-009 - HP C++ V7.3-009 - HP Fortran V8.0-1-104669-48GBT - -Supported Configuration Features Summary -======================================== - - In the tables below - y = tested and supported - n = not supported or not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90 F90 C++ zlib SZIP - parallel parallel -Solaris2.10 32-bit n y n y y y -Solaris2.10 64-bit n y n y y y -Windows XP n y(4) n(4) y y y -Windows XP x64 n y(4) n(4) y y y -Windows Vista n y(4) n(4) y y y -Windows Vista x64 n y(4) n(4) y y y -Mac OS X 10.5 PPC n n n n y n -Mac OS X 10.5 Intel n y n y y y -Mac OS X 10.6 Intel n y n y y y -AIX 5.3 32- and 64-bit n y n y y n -AIX 6.1 32- and 64-bit n y n y y n -FreeBSD 6.3-STABLE 32&64 bit n y n y y y -RedHat EL4 2.6.9-42 i686 GNU (1) W y y y y y y -RedHat EL5 2.6.18-128 i686 GNU (1)W y y(2) y y y y -RedHat EL5 2.6.18-128 i686 Intel W n y n y y n -RedHat EL5 2.6.18-128 i686 PGI W n y n y y n -SuSe Linux 2.6.16 x86_64 GNU (1) W y y(3) y y y y -SuSe Linux 2.6.16 x86_64 Intel W n y n y y n -SuSe Linux 2.6.16 x86_64 PGI W n y n y y y -SuSe Linux 2.6.16 SGI Altix ia64 C y y y y y y -RedHat EL4 2.6.18 Xeon Lustre C y y y y y n -Cray XT3 2.1.56 y y y y y n -OpenVMS Alpha V8.3 n y n y y n - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -Solaris2.10 32-bit y y y y -Solaris2.10 64-bit y y y y -Windows XP y y(4) y n -Windows XP x64 y y(4) y n -Windows Vista y y(4) y n -Windows Vista x64 y y(4) y n -Mac OS X 10.5 PPC y n n n -Mac OS X 10.5 (Intel) y(5) n y n -Mac OS X 10.6 (Intel) y(5) n y n -AIX 5.3 32- and 64-bit n n n n -AIX 6.1 32- and 64-bit n n n n -FreeBSD 6.3-STABLE 32&64 bit y n y y -RedHat EL4 2.6.9-42 i686 GNU (1) W y y y y -RedHat EL5 2.6.18-128 i686 GNU (1)W y y(2) y y -RedHat EL5 2.6.18-128 i686 Intel W y y y n -RedHat EL5 2.6.18-128 i686 PGI W y y y n -SuSe Linux 2.6.16 x86_64 GNU (1) W y y y y -SuSe Linux 2.6.16 x86_64 Intel W y y y n -SuSe Linux 2.6.16 x86_64 PGI W y y y n -SuSe Linux 2.6.16 SGI Altix ia64 C y n -RedHat EL4 2.6.18 Xeon Lustre C y y y n -Cray XT3 2.1.56 n n n n -OpenVMS Alpha V8.3 n n n n - - (1) Fortran compiled with g95. - (2) With PGI and Absoft compilers. - (3) With PGI compiler for Fortran. - (4) Using Visual Studio 2008. (Cygwin shared libraries are not - supported.) - (5) Shared C and C++ are disabled when Fortran is configured in. - Compiler versions for each platform are listed in the preceding - "Platforms Tested" table. - - -Known Problems -============== -* The library's test dt_arith.c exposed a compiler's rounding problem on - Cygwin when converting from unsigned long long to long double. The - library's own conversion works correctly. A macro is defined for Cygwin - to skip this test until we can solve the problem. (Please see bug #1813.) - SLU - 2010/5/5 - -* All the VFL drivers aren't backward compatible. In H5FDpublic.h, the - structure H5FD_class_t changed in 1.8. There is a new parameter added to - get_eoa and set_eoa callback functions. A new callback function - get_type_map was added. The public function H5FDrealloc was taken - out in 1.8. The problem only happens when users define their own driver - for 1.6 and try to plug it into a 1.8 library. This affects a very small - number of users. (See bug report #1279.) SLU - 2010/2/2 - -* MinGW has a missing libstdc++.dll.a library file and will not successfully - link C++ applications/tests. Do not use the enable-cxx configure option. - Read all of the INSTALL_MINGW.txt file for all restrictions. - ADB - 2009/11/11 - -* Some tests in tools/h5repack may fail in AIX systems when -q32 mode is used. - The error is due to insufficient memory requested. Request a large amount - of runtime memory by setting the following environment variable for more - memory. - LDR_CNTRL=MAXDATA=0x20000000@DSA - AKC - 2009/10/31 - -* The PathScale MPI implementation, accessing a Panasas file system, would - cause H5Fcreate() with H5F_ACC_EXCL to fail even when the file is not - existing. This is due to the MPI_File_open() call failing if the amode has - the MPI_MODE_EXCL bit set. (See bug 1468 for details.) AKC - 2009/8/11 - -* Parallel tests failed with 16 processes with data inconsistency at testphdf5 - / dataset_readAll. Parallel tests also failed with 32 and 64 processes with - collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks - with MPI IO. CMC - 2009/04/28 - -* For gcc v4.3 and v4.4, with production mode, if -O3 is used, H5Tinit.c - would fail to compile. Actually bad H5Tinit.c is produced. If -O (same - as -O1) is used, H5Tinit.c compiled okay but test/dt_arith would fail. - When -O0 (no optimizatio) is used, H5Tinit.c compilete okay and all - tests passed. Therefore, -O0 is imposed for v4.3 and v4.4 of gcc. - AKC - 2009/04/20 - -* For Red Storm, a Cray XT3 system, the tools/h5ls/testh5ls.sh and - tools/h5copy/testh5copy.sh will fail some of its sub-tests. These sub-tests - are expected to fail and should exit with a non-zero code but the yod - command does not propagate the exit code of the executables. Yod always - returns 0 if it can launch the executable. The test suite shell expects - a non-zero for this particular test, therefore it concludes the test has - failed when it receives 0 from yod. Skip all the "failing" test for now - by changing them as following. - - ======== Original tools/h5ls/testh5ls.sh ========= - TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5 - ======== Change to =============================== - echo SKIP TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5 - ================================================== - - ======== Original tools/h5copy/testh5copy.sh ========= - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested - H5LSTEST $FILEOUT - ======== Change to =============================== - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested - echo SKIP H5LSTEST $FILEOUT - ================================================== - AKC - 2008/11/10 - -* For Red Storm, a Cray XT3 system, the yod command sometimes gives the - message, "yod allocation delayed for node recovery". This interferes with - test suites that do not expect seeing this message. See the section of "Red - Storm" in file INSTALL_parallel for a way to deal with this problem. - AKC - 2008/05/28 - -* On Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers, - use -mp -O1 compilation flags to build the libraries. A higher level of - optimization causes failures in several HDF5 library tests. - -* On mpich 1.2.5 and 1.2.6, if more than two processes contribute no IO and - the application asks to do collective IO, we have found that when using 4 - processors, a simple collective write will sometimes be hung. This can be - verified with t_mpi test under testpar. - -* A dataset created or rewritten with a v1.6.3 library or after cannot be read - with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled. - There was a bug in the calculation of the Fletcher32 checksum in the - library before v1.6.3; the checksum value was not consistent between big- - endian and little-endian systems. This bug was fixed in Release 1.6.3. - However, after fixing the bug, the checksum value was no longer the same as - before on little-endian system. Library releases after 1.6.4 can still read - datasets created or rewritten with an HDF5 library of v1.6.2 or before. - SLU - 2005/6/30 - -* On IBM AIX systems, parallel HDF5 mode will fail some tests with error - messages like "INFO: 0031-XXX ...". This is from the command `poe'. - Set the environment variable MP_INFOLEVEL to 0 to minimize the messages - and run the tests again. - - The tests may fail with messages like "The socket name is already in use," - but HDF5 does not use sockets. This failure is due to problems with the - poe command trying to set up the debug socket. To resolve this problem, - check to see whether there are many old /tmp/s.pedb.* files staying around. - These are sockets used by the poe command and left behind due to failed - commands. First, ask your system administrator to clean them out. - Lastly, request IBM to provide a means to run poe without the debug socket. - -* The --enable-static-exec configure flag will only statically link libraries - if the static version of that library is present. If only the shared version - of a library exists (i.e., most system libraries on Solaris, AIX, and Mac, - for example, only have shared versions), the flag should still result in a - successful compilation, but note that the installed executables will not be - fully static. Thus, the only guarantee on these systems is that the - executable is statically linked with just the HDF5 library. - -* There is also a configure error on Altix machines that incorrectly reports - when a version of Szip without an encoder is being used. - -* On FREE-BSD systems when shared libraries are disabled, make install fails - in install-examples with the error '"Makefile", line 635: Need an operator'. - When this error occurs removing or commenting out the line "export - LD_LIBRARY_PATH=$(LL_PATH)" (line 635 in examples/Makefile) will allow make - install to finish installing examples. The problem will be fixed in the - next release. LRK - 2010/05/26 - -* On cobalt, an SGI Altix SMP ia64 system, Intel compiler version 10.1 (which - is the default on that system) does not work properly and results in - failures during make check (in a static build) and make installcheck (during - a shared build). This appears to be a compiler optimization problem. - Reducing optimization by setting CFLAGS to -O1 or below resolves the issue. - Alternatively, using a newer version of the compiler (11.0) also works as - intended. MAM - 2010/06/01 - - -%%%%1.8.4%%%% - - -HDF5 version 1.8.4 released on Tue Nov 10 15:33:14 CST 2009 -================================================================================ - -INTRODUCTION -============ - -This document describes the differences between HDF5-1.8.3 and -HDF5 1.8.4, and contains information on the platforms tested and -known problems in HDF5-1.8.4 -For more details, see the files HISTORY-1_0-1_8_0_rc3.txt -and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source. - -Links to the HDF5 1.8.4 source code, documentation, and additional materials -can be found on the HDF5 web page at: - - http://www.hdfgroup.org/products/hdf5/ - -The HDF5 1.8.4 release can be obtained from: - - http://www.hdfgroup.org/HDF5/release/obtain5.html - -User documentation for 1.8.4 can be accessed directly at this location: - - http://www.hdfgroup.org/HDF5/doc/ - -New features in the HDF5-1.8.x release series, including brief general -descriptions of some new and modified APIs, are described in the "What's New -in 1.8.0?" document: - - http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html - -All new and modified APIs are listed in detail in the "HDF5 Software Changes -from Release to Release" document, in the section "Release 1.8.4 (current -release) versus Release 1.8.3": - - http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS -======== - -- New Features -- Support for New Platforms, Languages, and Compilers -- Bug Fixes since HDF5-1.8.3 -- Platforms Tested -- Supported Configuration Features Summary -- Known Problems - - -New Features -============ - - Configuration - ------------- - - Configuration suite now uses Automake 1.11 and Autoconf 2.64. - MAM 2009/08/31. - - Changed default Gnu fortran compiler from g95 to gfortran since - gfortran is more likely installed with gcc now. -AKC 2009/07/19- - - Library - ------- - - The embedded library information is displayed by H5check_version() if a - version mismatch is detected. Also changed H5check_version() to - suppress the warning message totally if $HDF5_DISABLE_VERSION_CHECK is 2 - or higher. (Old behavior treated 3 or higher the same as 1, that is - print a warning and allows the program to continue. (AKC - 2009/9/28) - - If a user does not care for the extra library information insert - in the executables, he may turn it off by --disable-embedded-libinfo - during configure. (AKC - 2009/9/15) - - Parallel Library - ---------------- - - None - - Tools - ----- - - h5diff: h5diff treats two INFINITY values different. Fixed by checking - (value==expect) before call ABS(...) at h5diff_array.c. This will make - that (INF==INF) is true (INF is treated as an number instead of NaN) - (PC -- 2009/07/28) - - h5diff: add option "--use-system-epsilon" to print difference if - (|a-b| > EPSILON). - Change default to use strict equality (PC -- 2009/09/12) - - High-Level APIs - --------------- - - None - - F90 API - ------- - - Added H5Oopen_by_addr_f MSB - 9/14/09 - - C++ API - ------- - - None - - -Support for New Platforms, Languages, and Compilers -=================================================== - - PathScale compilers are recognized and can build the HDF5 library - properly. AKC - 2009/7/28 - - - -Bug Fixes since HDF5-1.8.3 -========================== - - Configuration - ------------- - - Removed the following config files, as we no longer support them: - config/dec-osf*, config/hpux11.00, config/irix5.x, - config/powerpc-ibm-aix4.x config/rs6000-ibm-aix5.x config/unicos* - MAM - 2009/10/08 - - Modified configure and make process to properly preserve user's CFLAGS - (and company) environment variables. Build will now properly use - automake's AM_CFLAGS for any compiler flags set by the configure - process. Configure will no longer modify CFLAGS directly, nor will - setting CFLAGS during make completely replace what configure has set up. - MAM - 2009/10/08 - - Support for TFLOPS, config/intel-osf1, is removed since the TFLOPS - machine has long retired. AKC - 2009/10/06. - - Added $(EXEEXT) extension to H5detect when it's executed in the - src/Makefile to generate H5Tinit.c so it works correctly on platforms - that require the full extension when running executables. - MAM - 2009/10/01 - BZ #1613 - - Configure will now set FC and CXX to "no" when fortran and c++ - are not being compiled, respectively, so configure will not run - some of the compiler tests for these languages when they are not - being used. MAM - 2009/10/01 - - The --enable-static-exec flag will now properly place the -static flag - on the link line of all installed executables. This will force the - executable to link with static libraries over shared libraries, provided - the static libraries are available. MAM - 2009/08/31 - BZ #1583 - - The PathScale compiler (v3.2) was mistaken as gcc v4.2.0 but it fails to - recognize some gcc options. Fixed. (see bug 1301). AKC - 2009/7/28 - - - Library - ------- - - Fixed a bug where writing and deleting many global heap objects (i.e. - variable length data) would render the file unreadable. Previously - created files exhibiting this problem should now be readable. - NAF - 2009/10/27 - 1483 - - Fixed error in library's internal caching mechanisms which could cause - an assertion failure (and attendant core dump) when encountering an - unusually formatted file. (QAK - 2009/10/13) - - Fixed incorrect return value for H5Pget_preserve. AKC - 2009/10/08 - 1628 - - Fixed an assertion failure that occurred when H5Ocopy was called on a - dataset using a vlen inside a compound. NAF - 2009/10/02 - 1597 - - Fixed incorrect return value for H5Pget_filter_by_id1/2 in H5Ppublic.h. - NAF - 2009/09/25 - 1620 - - Fixed a bug where properties weren't being compared with the registered - compare callback. NAF - 2009/09/25 - 1555 - - Corrected problem where library would re-write the superblock in a file - opened for R/W access, even when no changes were made to the file. - (QAK - 2009/08/20, Bz#1473) - - Fixed a bug where H5Pget_filter_by_id would succeed when called for a - filter that wasn't present. NAF - 2009/06/25 - 1250 - - Fixed an issue with committed compound datatypes containing a vlen. Also - fixed memory leaks involving committed datatypes. NAF - 2009/06/10 - 1593 - - Parallel Library - ---------------- - - None - - Tools - ----- - - h5dump/h5ls display buffer resize fixed in tools library. - ADB - 2009/7/21 - 1520 - - perf_serial test added to Windows projects and check batch file. - ADB - 2009/06/11 -1504 - - - F90 API - ------ - - Fixed bug in h5lget_info_by_idx_f by adding missing arguments, - consequently changing the API. New API is: - - SUBROUTINE h5lget_info_by_idx_f(loc_id, group_name, index_field, order, n, & - link_type, f_corder_valid, corder, cset, address, val_size, hdferr, lapl_id) - - MSB - 2009/9/17 - 1652 - - - Corrected the values for the H5L_flags FORTRAN constants: - H5L_LINK_ERROR_F, H5L_LINK_HARD_F, H5L_LINK_SOFT_F, H5L_LINK_EXTERNAL_F - MSB - 2009-09-17 - 1653 - - - Added FORTRAN equivalent of C constant H5T_ORDER_NONE: H5T_ORDER_NONE_F - MSB - 2009-9-24 - 1471 - - C++ API - ------ - - None - - High-Level APIs: - ------ - - Fixed a bug where the H5TB API would forget the order of fields when added - out of offset order. NAF - 2009/10/27 - 1582 - - H5DSis_attached failed to account for different platform types. Added a - get native type call. ADB - 2009/9/29 - 1562 - - Fortran High-Level APIs: - ------ - - Lite: the h5ltread_dataset_string_f and h5ltget_attribute_string_f functions - had memory problems with the g95 fortran compiler. (PVN � 5/13/2009) 1522 - - - -Platforms Tested -================ -The following platforms and compilers have been tested for this release. - - AIX 5.3 xlc 7.0.0.8 - (LLNL Up) xlf 09.01.0000.0008 - xlC 7.0.0.8 - mpcc_r 7.0.0.8 - mpxlf_r 09.01.0000.0008 - - Cray XT3 (2.0.41) cc (pgcc) 7.1-4 - (SNL red storm) ftn (pgf90) 7.1-4 - CC (pgCC) 7.1-4 - - FreeBSD 6.3-STABLE i386 gcc 3.4.6 [FreeBSD] 20060305 - (duty) g++ 3.4.6 [FreeBSD] 20060305 - gcc 4.3.5 20091004 - g++ 4.3.5 20091004 - gfortran 4.3.5 20091004 - - FreeBSD 6.3-STABLE amd64 gcc 3.4.6 [FreeBSD] 20060305 - (liberty) g++ 3.4.6 [FreeBSD] 20060305 - gcc 4.4.2 20091006 - g++ 4.4.2 20091006 - gfortran 4.4.2 20091006 - - Linux 2.6.18-164.el5 gcc (GCC) 4.1.2 20080704 - #1 SMP i686 i686 i386 G95 (GCC 4.0.3 (g95 0.92!) Jun 24 2009) - (jam) GNU Fortran (GCC) 4.1.2 20080704 - (Red Hat 4.1.2-46) - PGI C, Fortran, C++ 8.0-5 32-bit - PGI C, Fortran, C++ 8.0-1 32-bit - Intel(R) C Compiler for 32-bit - applications, Versions 11.0, 11.1 - Intel(R) C++ Compiler for 32-bit - applications, Version 11.0, 11.1 - Intel(R) Fortran Compiler for 32-bit - applications, Version 11.0, 11.1 - Absoft 32-bit Fortran 95 10.0.7 - MPICH mpich2-1.0.8 compiled with - gcc (GCC) 4.1.2 and G95 - (GCC 4.0.3 (g95 0.92!) - - Linux 2.6.18-164.el5 #1 SMP gcc 4.1.2 20080704 - x86_64 GNU/Linux G95 (GCC 4.0.3 (g95 0.92!) Jun 24 2009) - (amani) tested for both 32- and 64-bit binaries - Intel(R) C, C++, Fortran Compilers for - applications running on Intel(R) 64, - Versions 11.1. - PGI C, Fortran, C++ Version 9.0-4 - for 64-bit target on x86-64 - gcc 4.1.2 and G95 (GCC 4.0.3 (g95 0.92!) - MPICH mpich2-1.0.8 compiled with - gcc 4.1.2 and G95 (GCC 4.0.3 (g95 0.92!) - GNU Fortran (GCC) 4.1.2 20080704 - (Red Hat 4.1.2-46) - - - Linux 2.6.16.60-0.42.5 #1 Intel(R) C++ Version 10.1.017 - SGI Altix SMP ia64 Intel(R) Fortran Itanium(R) Version 10.1.017 - (cobalt) SGI MPI 1.38 - - SunOS 5.10 32- and 64-bit Sun C 5.9 SunOS_sparc Patch 124867-11 2009/04/30 - (linew) Sun Fortran 95 8.3 SunOS_sparc - Patch 127000-11 2009/10/06 - Sun C++ 5.9 SunOS_sparc - Patch 124863-16 2009/09/15 - - Intel Xeon Linux 2.6.18- Intel(R) C++ Version 10.0.026 - 92.1.10.el5_lustre.1.6.6smp- Intel(R) Fortran Compiler Version 10.0.026 - perfctr #6 SMP Open MPI 1.2.2 - (abe) MVAPICH2-0.9.8p28p2patched-intel-ofed-1.2 - compiled with icc v10.0.026 and ifort 10.0.026 - - IA-64 Linux 2.4.21-309.tg1 gcc (GCC) 3.2.2 - #1 SMP ia64 Intel(R) C++ Version 8.1.037 - (NCSA tg-login) Intel(R) Fortran Compiler Version 8.1.033 - mpich-gm-1.2.7p1..16-intel-8.1.037-r1 - - Linux 2.6.9-55.0.9.EL_lustre Intel(R) C, C++, Fortran Compilers for - .1.4.11.1smp #1 SMP applications running on Intel(R) 64, - SMP x86_64 GNU/Linux Versions 10.1. - (SNL Thunderbird) - - Linux 2.6.18-76chaos #1 SMP Intel(R) C, C++, Fortran Compilers for - SMP x86_64 GNU/Linux applications running on Intel(R) 64, - (SNL Glory) Versions 10.1. - - Windows XP Visual Studio 2005 w/ Intel Fortran 9.1 - Cygwin(native gcc compiler and g95) - - Windows XP x64 Visual Studio 2005 w/ Intel Fortran 9.1 - - Windows Vista Visual Studio 2005 w/ Intel Fortran 9.1 - - Windows Vista x64 Visual Studio 2005 w/ Intel Fortran 9.1 - - MAC OS 10.5.6 (Intel) i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 - GNU Fortran (GCC) 4.3.0 20070810 - G95 (GCC 4.0.3 (g95 0.91!) Apr 24 2008) - Intel C, C++ and Fortran compilers 10.1 - - -Supported Configuration Features Summary -======================================== - - In the tables below - y = tested and supported - n = not supported or not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90 F90 C++ zlib SZIP - parallel parallel -Solaris2.10 32-bit n y n y y y -Solaris2.10 64-bit n y n y y y -Windows XP n y(4) n(4) y y y -Windows XP x64 n y(4) n(4) y y y -Windows Vista n n n y y y -Mac OS X 10.5 Intel n y n y y y -AIX 5.3 32- and 64-bit n y n y y n -FreeBSD 6.3-STABLE 32&64 bit n y n y y y -RedHat EL5 2.6.18-164 i686 GNU (1)W y y(2) y y y y -RedHat EL5 2.6.18-164 i686 Intel W n y n y y n -RedHat EL5 2.6.18-164 i686 PGI W n y n y y n -RedHat EL5 2.6.18-164 x86_64 GNU(1)W y y(3) y y y y -RedHat EL5 2.6.18-164 x86_64 IntelW n y n y y n -RedHat EL5 2.6.18-164 x86_64 PGI W n y n y y y -SuSe Linux 2.6.16 SGI Altix ia64 C y y y y y y -RedHat EL4 2.6.18 Xeon Lustre C y y y y y n -SuSe Linux 2.4.21 ia64 Intel C y y y y y n -Cray XT3 2.0.62 y y y y y n - - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -Solaris2.10 32-bit y y y y -Solaris2.10 64-bit y y y y -Windows XP y y(4) y y -Windows XP x64 y y(4) y y -Windows Vista y n n y -Mac OS X 10.5 y n y n -AIX 5.3 32- and 64-bit n n n n -FreeBSD 6.3-STABLE 32&64 bit y y y y -RedHat EL5 2.6.18-164 i686 GNU (1)W y y(2) y y -RedHat EL5 2.6.18-164 i686 Intel W y y y n -RedHat EL5 2.6.18-164 i686 PGI W y y y n -RedHat EL5 2.6.18-164 x86_64 GNU(1)W y y y y -RedHat EL5 2.6.18-164 x86_64 IntelW y y y n -RedHat EL5 2.6.18-164 x86_64 PGI W y y y n -SuSe Linux 2.6.16 SGI Altix ia64 C y n -RedHat EL4 2.6.18 Xeon Lustre C y y y n -SuSe Linux 2.4.21 ia64 Intel C y y y n -Cray XT3 2.0.62 n n n n - - (1) Fortran compiled with g95. - (2) With PGI and Absoft compilers. - (3) With PGI compiler for Fortran. - (4) Using Visual Studio 2005 or Cygwin - Compiler versions for each platform are listed in the preceding - "Platforms Tested" table. - - -Known Problems -============== -* Parallel mode in AIX will fail some of the testcheck_version.sh tests where - it treats "exit(134) the same as if process 0 had received an abort signal. - This is fixed and will be available in the next release. AKC - 2009/11/3 - -* Some tests in tools/h5repack may fail in AIX systems when -q32 mode is used. - The error is due to insufficient memory requested. Request a large amount - of runtime memory by setting the following environment variable for more - memory. - LDR_CNTRL=MAXDATA=0x20000000@DSA - AKC - 2009/10/31 - -* The PathScale MPI implementation, accessing a Panasas file system, would - cause H5Fcreate() with H5F_ACC_EXCL to fail even when the file is not - existing. This is due to the MPI_File_open() call failing if the amode has - the MPI_MODE_EXCL bit set. (See bug 1468 for details.) AKC - 2009/8/11 - -* Parallel tests failed with 16 processes with data inconsistency at testphdf5 - / dataset_readAll. Parallel tests also failed with 32 and 64 processes with - collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks - with MPI IO. CMC - 2009/04/28 - -* There is a known issue in which HDF5 will change the timestamp on a file - simply by opening it with read/write permissions, even if the file is not - modified in any way. This is due to the way in which HDF5 manages the file - superblock. A fix is currently underway and should be included in the 1.8.4 - release of HDF5. MAM - 2009/04/28 - -* For gcc v4.3 and v4.4, with production mode, if -O3 is used, H5Tinit.c - would fail to compile. Actually bad H5Tinit.c is produced. If -O (same - as -O1) is used, H5Tinit.c compiled okay but test/dt_arith would fail. - When -O0 (no optimizatio) is used, H5Tinit.c compilete okay and all - tests passed. Therefore, -O0 is imposed for v4.3 and v4.4 of gcc. - AKC - 2009/04/20 - -* For Red Storm, a Cray XT3 system, the tools/h5ls/testh5ls.sh and - tools/h5copy/testh5copy.sh will fail some of its sub-tests. These sub-tests - are expected to fail and should exit with a non-zero code but the yod - command does not propagate the exit code of the executables. Yod always - returns 0 if it can launch the executable. The test suite shell expects - a non-zero for this particular test, therefore it concludes the test has - failed when it receives 0 from yod. Skip all the "failing" test for now - by changing them as following. - - ======== Original tools/h5ls/testh5ls.sh ========= - TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5 - ======== Change to =============================== - echo SKIP TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5 - ================================================== - - ======== Original tools/h5copy/testh5copy.sh ========= - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested - H5LSTEST $FILEOUT - ======== Change to =============================== - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested - echo SKIP H5LSTEST $FILEOUT - ================================================== - AKC - 2008/11/10 - -* For Red Storm, a Cray XT3 system, the yod command sometimes gives the - message, "yod allocation delayed for node recovery". This interferes with - test suites that do not expect seeing this message. See the section of "Red - Storm" in file INSTALL_parallel for a way to deal with this problem. - AKC - 2008/05/28 - -* On Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers, - use -mp -O1 compilation flags to build the libraries. A higher level of - optimization causes failures in several HDF5 library tests. - -* On mpich 1.2.5 and 1.2.6, if more than two processes contribute no IO and - the application asks to do collective IO, we have found that when using 4 - processors, a simple collective write will sometimes be hung. This can be - verified with t_mpi test under testpar. - -* A dataset created or rewritten with a v1.6.3 library or after cannot be read - with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled. - There was a bug in the calculation of the Fletcher32 checksum in the - library before v1.6.3; the checksum value was not consistent between big- - endian and little-endian systems. This bug was fixed in Release 1.6.3. - However, after fixing the bug, the checksum value was no longer the same as - before on little-endian system. Library releases after 1.6.4 can still read - datasets created or rewritten with an HDF5 library of v1.6.2 or before. - SLU - 2005/6/30 - -* On IBM AIX systems, parallel HDF5 mode will fail some tests with error - messages like "INFO: 0031-XXX ...". This is from the command `poe'. - Set the environment variable MP_INFOLEVEL to 0 to minimize the messages - and run the tests again. - - The tests may fail with messages like "The socket name is already in use", - but HDF5 does not use sockets. This failure is due to problems with the - poe command trying to set up the debug socket. To resolve this problem, - check to see whether there are many old /tmp/s.pedb.* files staying around. - These are sockets used by the poe command and left behind due to failed - commands. First, ask your system administrator to clean them out. - Lastly, request IBM to provide a means to run poe without the debug socket. - -* The --enable-static-exec configure flag will only statically link libraries - if the static version of that library is present. If only the shared version - of a library exists (i.e., most system libraries on Solaris, AIX, and Mac, - for example, only have shared versions), the flag should still result in a - successful compilation, but note that the installed executables will not be - fully static. Thus, the only guarantee on these systems is that the - executable is statically linked with just the HDF5 library. - -* There is also a configure error on Altix machines that incorrectly reports - when a version of Szip without an encoder is being used. - -%%%%1.8.3%%%% - - -HDF5 version 1.8.3 released on Mon May 4 09:21:00 CDT 2009 -================================================================================ - -INTRODUCTION -============ - -This document describes the differences between HDF5-1.8.2 and -HDF5 1.8.3, and contains information on the platforms tested and -known problems in HDF5-1.8.3. -For more details, see the files HISTORY-1_0-1_8_0_rc3.txt -and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source. - -Links to the HDF5 1.8.3 source code, documentation, and additional materials -can be found on the HDF5 web page at: - - http://www.hdfgroup.org/products/hdf5/ - -The HDF5 1.8.3 release can be obtained from: - - http://www.hdfgroup.org/HDF5/release/obtain5.html - -User documentation for 1.8.3 can be accessed directly at this location: - - http://www.hdfgroup.org/HDF5/doc/ - -New features in the HDF5-1.8.x release series, including brief general -descriptions of some new and modified APIs, are described in the "What's New -in 1.8.0?" document: - - http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html - -All new and modified APIs are listed in detail in the "HDF5 Software Changes -from Release to Release" document, in the section "Release 1.8.3 (current -release) versus Release 1.8.2": - - http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS -======== - -- New Features -- Support for New Platforms, Languages, and Compilers -- Bug Fixes since HDF5-1.8.2 -- Platforms Tested -- Supported Configuration Features Summary -- Known Problems - - -New Features -============ - - Configuration - ------------- - - Added libtool version numbers to generated c++, fortran, and - hl libraries. MAM 2009/04/19. - - Regenerated Makefile.ins using Automake 1.10.2. MAM 2009/04/19. - - Added a Make target of check-all-install to test the correctness of - installing via the prefix= or $DESTDIR options. AKC - 2009/04/14 - - Library - ------- - - Embed the content of libhdf5.settings into the hdf5 executables - so that an "orphaned" executables can display (via the Unix - strings command, for example) the library settings used to build - the executables. This is a prototype implementation. Improvement will - be added in next release. AKC - 2009/04/20 - - Separated "factory" free list class from block free lists. These free - lists are dynamically created and manage blocks of a fixed size. - H5set_free_list_limits() will use the same settings specified for block - free lists for factory free lists. NAF - 2009/04/08 - - Added support for dense attributes to H5Ocopy. XCao/NAF - 2009/01/29 - - Added H5Pset_elink_cb and H5Pget_elink_cb functions to support a - user-defined callback function for external link traversal. - NAF - 2009/01/08 - - Added H5Pset_elink_acc_flags and H5Pget_elink_acc_flags functions to - allow the user to specify the file access flags used to open the target - file of an external link. NAF - 2009/01/08 - - Added H5Pset_chunk_cache() and H5Pget_chunk_cache() functions to allow - individual rdcc configuration for each dataset. Added - H5Dget_access_plist() function to retrieve a dataset access property - list from a dataset. NAF - 2008/11/12 - - Added H5Iis_valid() function to check if an id is valid without - producing an error message. NAF - 2008/11/5 - - Added code to maintain a min_clean_fraction in the metadata cache when - in serial mode. MAM - 2009/01/9 - - Parallel Library - ---------------- - - Modified parallel tests to run with arbitrary number of processes. The - modified tests are testphdf5 (parallel dataset access), t_chunk_alloc - (chunk allocation), and t_posix_compliant (posix compliance). The rest of - the parallel tests already use in the code the number of processes - available in the communicator. (CMC - 2009/04/28) - - Tools - ----- - - h5diff new flag, -c, --compare, list objects that are not comparable. - PVN - 2009/4/2 - 1368 - - h5diff new flag, -N, --nan, avoids NaNs detection. PVN - 2009/4/2 - - h5dump correctly specifies XML dtd / schema urls ADB - 2009/4/3 - 1519 - - h5repack now handles group creation order. PVN - 2009/4/2 - 1402 - - h5repack: When user doesn't specify a chunk size, h5repack now - defines a default chunk size as the same size of the size of the - hyperslab used to read the chunks. The size of the hyperslabs are - defined as the size of each dimension or a predefined constant, - whatever is smaller. This assures that the chunk read fits in the - chunk cache. PVN - 2008/11/21 - - High-Level APIs - --------------- - - Table: In version 3.0 of Table, the writing of the "NROWS" attribute - (used to store number of records) was deprecated. PVN - 2008/11/24 - - F90 API - ------- - - Added for the C APIs the Fortran wrappers: - h5dget_access_plist_f - h5iis_valid_f - h5pset_chunk_cache_f - h5pget_chunk_cache_f - MSB - 2009/04/17 - - C++ API - ------- - - None - - -Support for New Platforms, Languages, and Compilers -=================================================== - - -Bug Fixes since HDF5-1.8.2 -========================== - - Configuration - ------------- - - The --includedir=DIR configuration option now works as intended, and - can be used to specify the location to install C header files. The - default location remains unchanged, residing at ${prefix}/include. - MAM - 2009/03/10 - BZ #1381 - - Configure no longer removes the '-g' flag from CFLAGS when in production - mode if it has been explicitly set in the CFLAGS environment variable - prior to configuration. MAM - 2009/03/09 - BZ #1401 - - Library - ------- - - Added versioning to H5Z_class_t struct to allow compatibility with 1.6 - API. NAF - 2009/04/20 - 1533 - - Fixed a problem with using data transforms with non-native types in the - file. NAF - 2009/04/20 - 1548 - - Added direct.h include file to windows section of H5private.h - to fix _getcwd() warning. ADB - 2009/04/14 - 1536 - - Fixed a bug that prevented external links from working after calling - H5close(). NAF - 2009/04/10 - 1539 - - Modified library to write cached symbol table information to the - superblock, to allow library versions 1.3.0 to 1.6.3 to read files created - by this version. NAF - 2009/04/08 - 1423 - - Changed skip lists to use a deterministic algorithm. The library should - now never call rand() or srand(). NAF - 2009/04/08 - 503 - - Fixed a bug where H5Lcopy and H5Lmove wouldn't create intermediate groups - when that property was set. NAF - 2009/04/07 - 1526 - - Fixed a bug that caused files with a user block to grow by the size of the - user block every time they were opened. NAF - 2009/03/26 - 1499 - - Fixed a rare problem that could occur with files using the old (pre 1.4) - array datatype. NAF - 2009/03/23 - - Modified library to be able to open files with corrupt root group symbol - table messages, and correct these errors if they are found. Such files - can only be successfully opened with write access. NAF - 2009/03/23 - 1189 - - Removed the long_long #define and replaced all instances with - "long long". This caused problems with third party products. All - currently supported compilers support the type. ADB - 2009/03/05 - - Fixed various bugs that could prevent the fill value from being written - in certain rare cases. NAF - 2009/02/26 - 1469 - - Fixed a bug that prevented more than one dataset chunk from being cached - at a time. NAF - 2009/02/12 - 1015 - - Fixed an assertion failure caused by opening an attribute multiple times - through multiple file handles. NAF - 2009/02/12 - 1420 - - Fixed a problem that could prevent the user from adding attributes (or any - object header message) in some circumstances. NAF - 2009/02/12 - 1427 - - Fixed a bug that could cause problems when an attribute was added to a - committed datatype using the committed datatype's datatype. - NAF - 2009/02/12 - - Fixed a bug that could cause problems when copying an object with a shared - message in its own object header. NAF - 2009/01/29 - - Changed H5Tset_order to properly reject H5T_ORDER_NONE for most datatypes. - NAF - 2009/01/27 - 1443 - - Fixed a bug where H5Tpack wouldn't remove trailing space from an otherwise - packed compound type. NAF - 2009/01/14 - - Fixed up some old v2 btree assertions that get run in debug mode that - were previously failing on compilation, and removed some of the - more heavily outdated and non-rewritable ones. MAM - 2008/12/15 - - Fixed a bug that could cause problems when "automatically" unmounting - multiple files. NAF - 2008/11/17 - - H5Dset_extent: when shrinking dimensions, some chunks were not deleted. - PVN - 2009/01/8 - - Parallel Library - ---------------- - - None - - Tools - ----- - - Fixed many problems that could occur when using h5repack with named - datatypes. NAF - 2009/4/20 - 1516/1466 - - h5dump, h5diff, h5repack were not reading (by hyperslabs) datasets - that have a datatype datum size greater than H5TOOLS_BUFSIZE, a constant - defined as 1024Kb, such as array types with large dimensions. - PVN - 2009/4/1 - 1501 - - h5import: By selecting a compression type, a big endian byte order - was being selected. PVN - 2009/3/11 - 1462 - - zip_perf.c had missing argument on one of the open() calls. Fixed. - AKC - 2008/12/9 - - F90 API - ------ - - None - - C++ API - ------ - - None - - High-Level APIs: - ------ - - Dimension scales: The scale index return value in H5DSiterate_scales - was not always incremented. PVN - 2009/4/8 - 1538 - - Fortran High-Level APIs: - ------ - - Lite: The h5ltget_dataset_info_f function (gets information about - a dataset) was not correctly returning the dimension array - PVN - 2009/3/23 - - -Platforms Tested -================ -The following platforms and compilers have been tested for this release. - - AIX 5.3 xlc 7.0.0.8 - (LLNL Up) xlf 09.01.0000.0008 - xlC 7.0.0.8 - mpcc_r 7.0.0.8 - mpxlf_r 09.01.0000.0008 - - Cray XT3 (2.0.41) cc (pgcc) 7.1-4 - (SNL red storm) ftn (pgf90) 7.1-4 - CC (pgCC) 7.1-4 - - FreeBSD 6.3-STABLE i386 gcc 3.4.6 [FreeBSD] 20060305 - (duty) g++ 3.4.6 [FreeBSD] 20060305 - gcc 4.3.4 20090419 - g++ 4.3.4 20090419 - gfortran 4.3.4 20090419 - - FreeBSD 6.3-STABLE amd64 gcc 3.4.6 [FreeBSD] 20060305 - (liberty) g++ 3.4.6 [FreeBSD] 20060305 - gcc 4.4.1 20090421 - g++ 4.4.1 20090421 - gfortran 4.4.1 20090421 - - IRIX64 6.5 (64 & n32) MIPSpro cc 7.4.4m - F90 MIPSpro 7.4.4m - C++ MIPSpro cc 7.4.4m - - Linux 2.6.18-128.1.6.el5xen gcc (GCC) 4.1.2 - #1 SMP i686 i686 i386 G95 (GCC 4.0.3 (g95 0.92!) Feb 4 2009) - (jam) PGI C, Fortran, C++ 7.2-1 32-bit - PGI C, Fortran, C++ 8.0-1 32-bit - Intel(R) C Compiler for 32-bit - applications, Versions 10.1, 11.0 - Intel(R) C++ Compiler for 32-bit - applications, Version 10.1, 11.0 - Intel(R) Fortran Compiler for 32-bit - applications, Version 10.1, 11.0 - Absoft 32-bit Fortran 95 10.0.7 - MPICH mpich2-1.0.8 compiled with - gcc 4.1.2 and G95 (GCC 4.0.3 (g95 0.92!) - - Linux 2.6.9-42.0.10.ELsmp #1 gcc (GCC) 3.4.6 - SMP i686 i686 i386 G95 (GCC 4.0.3 (g95 0.92!) Feb 4 2009) - (kagiso) MPICH mpich2-1.0.8 compiled with - gcc 3.4.6 and G95 (GCC 4.0.3 (g95 0.92!) - - Linux 2.6.16.60-0.37-smp #1 gcc 4.1.2 - SMP x86_64 GNU/Linux G95 (GCC 4.0.3 (g95 0.92!) Feb 4 2009) - (smirom) Intel(R) C, C++, Fortran Compilers for - applications running on Intel(R) 64, - Versions 10.1, 11.0. - PGI C, Fortran, C++ Version 7.2-1, 8.0-1 - for 64-bit target on x86-64 - gcc 4.1.2 and G95 (GCC 4.0.3 (g95 0.92!) - MPICH mpich2-1.0.8 compiled with - gcc 4.1.2 and G95 (GCC 4.0.3 (g95 0.92!) - tested for both 32- and 64-bit binaries - - Linux 2.6.16.54-0.2.5 #1 Intel(R) C++ Version 10.1.017 - SGI Altix SMP ia64 Intel(R) Fortran Itanium(R) Version 10.1.017 - (cobalt) SGI MPI 1.38 - - SunOS 5.10 32- and 64-bit Sun WorkShop 6 update 2 C 5.9 Patch 124867-09 - (linew) Sun WorkShop 6 update 2 Fortran 95 8.3 - Patch 127000-07 - Sun WorkShop 6 update 2 C++ 5.8 - Patch 124863-11 - - Intel Xeon Linux 2.6.18- gcc 3.4.6 20060404 - 92.1.10.el5_lustre.1.6.6smp- Intel(R) C++ Version 10.0.026 - perfctr #2 SMP Intel(R) Fortran Compiler Version 10.0.026 - (abe) Open MPI 1.2.2 - MVAPICH2-0.9.8p28p2patched-intel-ofed-1.2 - compiled with icc v10.0.026 and ifort 10.0.026 - - IA-64 Linux 2.4.21-309.tg1 gcc (GCC) 3.2.2 - #1 SMP ia64 Intel(R) C++ Version 8.1.037 - (NCSA tg-login) Intel(R) Fortran Compiler Version 8.1.033 - mpich-gm-1.2.7p1..16-intel-8.1.037-r1 - - Linux 2.6.9-55.0.9.EL_lustre Intel(R) C, C++, Fortran Compilers for - .1.4.11.1smp #1 SMP applications running on Intel(R) 64, - SMP x86_64 GNU/Linux Versions 9.1. - (SNL Spirit) - - Linux 2.6.9-55.0.9.EL_lustre Intel(R) C, C++, Fortran Compilers for - .1.4.11.1smp #1 SMP applications running on Intel(R) 64, - SMP x86_64 GNU/Linux Versions 10.1. - (SNL Thunderbird) - - Linux 2.6.18-63chaos #1 SMP Intel(R) C, C++, Fortran Compilers for - SMP x86_64 GNU/Linux applications running on Intel(R) 64, - (SNL Glory) Versions 10.1. - - Linux 2.6.18-63chaos #1 SMP Intel(R) C, C++, Fortran Compilers for - SMP x86_64 GNU/Linux applications running on Intel(R) 64, - (LLNL Zeus) Versions 9.1. - gcc/gfortran/g++ (GCC) 4.1.2. - - Windows XP Visual Studio .NET - Visual Studio 2005 w/ Intel Fortran 9.1 - Cygwin(native gcc compiler and g95) - - Windows XP x64 Visual Studio 2005 w/ Intel Fortran 9.1 - - Windows Vista Visual Studio 2005 - - MAC OS 10.5.6 (Intel) i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 - GNU Fortran (GCC) 4.3.0 20070810 - G95 (GCC 4.0.3 (g95 0.91!) Apr 24 2008) - Intel C, C++ and Fortran compilers 10.1 - - -Supported Configuration Features Summary -======================================== - - In the tables below - y = tested and supported - n = not supported or not tested in this release - C = Cluster - W = Workstation - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90 F90 C++ zlib SZIP - parallel parallel -Solaris2.10 32-bit n y n y y y -Solaris2.10 64-bit n y n y y y -IRIX64_6.5 32-bit n n n n y y -IRIX64_6.5 64-bit n y y y y y -Windows XP n y(4) n(4) y y y -Windows XP x64 n y(4) n(4) y y y -Windows Vista n n n y y y -Mac OS X 10.5 Intel n y n y y y -AIX 5.3 32- and 64-bit n y n y y n -FreeBSD 6.3-STABLE 32&64 bit n y n y y y -RedHat EL4 2.6.9-42 i686 GNU (1) W y y y y y y -RedHat EL5 2.6.18-128 i686 GNU (1)W y y(2) y y y y -RedHat EL5 2.6.18-128 i686 Intel W n y n y y n -RedHat EL5 2.6.18-128 i686 PGI W n y n y y n -SuSe Linux 2.6.16 x86_64 GNU (1) W y y(3) y y y y -SuSe Linux 2.6.16 x86_64 Intel W n y n y y n -SuSe Linux 2.6.16 x86_64 PGI W n y n y y y -SuSe Linux 2.6.16 SGI Altix ia64 C y y y y y y -RedHat EL4 2.6.18 Xeon Lustre C y y y y y n -SuSe Linux 2.4.21 ia64 Intel C y y y y y n -Cray XT3 2.0.41 y y y y y n - - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -Solaris2.10 32-bit y y y y -Solaris2.10 64-bit y y y y -IRIX64_6.5 32-bit y dna y y -IRIX64_6.5 64-bit y y n y -Windows XP y y(4) y y -Windows XP x64 y y(4) y y -Windows Vista y n n y -Mac OS X 10.5 y n y n -AIX 5.3 32- and 64-bit n n n n -FreeBSD 6.3-STABLE 32&64 bit y n y y -RedHat EL4 2.6.9-42 i686 GNU (1) W y y y y -RedHat EL5 2.6.18-128 i686 GNU (1)W y y(2) y y -RedHat EL5 2.6.18-128 i686 Intel W y y y n -RedHat EL5 2.6.18-128 i686 PGI W y y y n -SuSe Linux 2.6.16 x86_64 GNU (1) W y y y y -SuSe Linux 2.6.16 x86_64 Intel W y y y n -SuSe Linux 2.6.16 x86_64 PGI W y y y n -SuSe Linux 2.6.16 SGI Altix ia64 C y n -RedHat EL4 2.6.18 Xeon Lustre C y y y n -SuSe Linux 2.4.21 ia64 Intel C y y y n -Cray XT3 2.0.41 n n n n - - (1) Fortran compiled with g95. - (2) With PGI and Absoft compilers. - (3) With PGI compiler for Fortran. - (4) Using Visual Studio 2005 or Cygwin - Compiler versions for each platform are listed in the preceding - "Platforms Tested" table. - - -Known Problems -============== -* Parallel tests failed with 16 processes with data inconsistency at testphdf5 - / dataset_readAll. Parallel tests also failed with 32 and 64 processes with - collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks - with MPI IO. CMC - 2009/04/28 - -* There is a known issue in which HDF5 will change the timestamp on a file - simply by opening it with read/write permissions, even if the file is not - modified in any way. This is due to the way in which HDF5 manages the file - superblock. A fix is currently underway and should be included in the 1.8.4 - release of HDF5. MAM - 2009/04/28 - -* For gcc v4.3 and v4.4, with production mode, if -O3 is used, H5Tinit.c - would fail to compile. Actually bad H5Tinit.c is produced. If -O (same - as -O1) is used, H5Tinit.c compiled okay but test/dt_arith would fail. - When -O0 (no optimizatio) is used, H5Tinit.c compilete okay and all - tests passed. Therefore, -O0 is imposed for v4.3 and v4.4 of gcc. - AKC - 2009/04/20 - -* For Red Storm, a Cray XT3 system, the tools/h5ls/testh5ls.sh and - tools/h5copy/testh5copy.sh will fail some of its sub-tests. These sub-tests - are expected to fail and should exit with a non-zero code but the yod - command does not propagate the exit code of the executables. Yod always - returns 0 if it can launch the executable. The test suite shell expects - a non-zero for this particular test, therefore it concludes the test has - failed when it receives 0 from yod. Skip all the "failing" test for now - by changing them as following. - - ======== Original tools/h5ls/testh5ls.sh ========= - TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5 - ======== Change to =============================== - echo SKIP TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5 - ================================================== - - ======== Original tools/h5copy/testh5copy.sh ========= - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested - H5LSTEST $FILEOUT - ======== Change to =============================== - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested - echo SKIP H5LSTEST $FILEOUT - ================================================== - AKC - 2008/11/10 - -* For Red Storm, a Cray XT3 system, the yod command sometimes gives the - message, "yod allocation delayed for node recovery". This interferes with - test suites that do not expect seeing this message. See the section of "Red - Storm" in file INSTALL_parallel for a way to deal with this problem. - AKC - 2008/05/28 - -* We have discovered two problems when running collective IO parallel HDF5 - tests with chunking storage on the ChaMPIon MPI compiler on tungsten, a - Linux cluster at NCSA. - - Under some complex selection cases: - 1) MPI_Get_element returns the wrong value. - 2) MPI_Type_struct also generates the wrong derived datatype and corrupt - data may be generated. - These issues arise only when turning on collective IO with chunking storage - with some complex selections. We have not found these problems on other - MPI-IO compilers. If you encounter these problems, you may use independent - IO instead. - - To avoid this behavior, change the following line in your code - H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - to - H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_INDEPENDENT); - KY - 2007/08/24 - -* On Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers, - use -mp -O1 compilation flags to build the libraries. A higher level of - optimization causes failures in several HDF5 library tests. - -* For LLNL, uP: both serial and parallel tests pass. - Zeus: Serial tests pass but parallel tests fail with a known problem in MPI. - ubgl: Serial tests pass but parallel tests fail. - -* On mpich 1.2.5 and 1.2.6, if more than two processes contribute no IO and - the application asks to do collective IO, we have found that when using 4 - processors, a simple collective write will sometimes be hung. This can be - verified with t_mpi test under testpar. - -* On IRIX6.5, when the C compiler version is greater than 7.4, complicated - MPI derived datatype code will work. However, the user should increase - the value of the MPI_TYPE_MAX environment variable to some appropriate value - to use collective irregular selection code. For example, the current - parallel HDF5 test needs to raise MPI_TYPE_MAX to 200,000 to pass the test. - -* A dataset created or rewritten with a v1.6.3 library or after cannot be read - with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled. - There was a bug in the calculation of the Fletcher32 checksum in the - library before v1.6.3; the checksum value was not consistent between big- - endian and little-endian systems. This bug was fixed in Release 1.6.3. - However, after fixing the bug, the checksum value was no longer the same as - before on little-endian system. Library releases after 1.6.4 can still read - datasets created or rewritten with an HDF5 library of v1.6.2 or before. - SLU - 2005/6/30 - -* On IBM AIX systems, parallel HDF5 mode will fail some tests with error - messages like "INFO: 0031-XXX ...". This is from the command `poe'. - Set the environment variable MP_INFOLEVEL to 0 to minimize the messages - and run the tests again. - - The tests may fail with messages like "The socket name is already in use", - but HDF5 does not use sockets. This failure is due to problems with the - poe command trying to set up the debug socket. To resolve this problem, - check to see whether there are many old /tmp/s.pedb.* files staying around. - These are sockets used by the poe command and left behind due to failed - commands. First, ask your system administrator to clean them out. - Lastly, request IBM to provide a means to run poe without the debug socket. - -* The --enable-static-exec configure flag fails to compile for Solaris - platforms. This is due to the fact that not all of the system libraries on - Solaris are available in a static format. - - The --enable-static-exec configure flag also fails to correctly compile - on IBM SP2 platforms for serial mode. The parallel mode works fine with - this option. - - It is suggested that you do not use this option on these platforms - during configuration. - -* There is also a configure error on Altix machines that incorrectly reports - when a version of Szip without an encoder is being used. - -* Information about building with PGI and Intel compilers is available in - the INSTALL file sections 4.7 and 4.8. - - -%%%%1.8.2%%%% - - -HDF5 version 1.8.2 released on Mon Nov 10 15:43:09 CST 2008 -================================================================================ - -INTRODUCTION -============ - -This document describes the differences between HDF5-1.8.1 and HDF5 1.8.2, -and contains information on the platforms tested and known problems in -HDF5-1.8.2. For more details, see the files HISTORY-1_0-1_8_0_rc3.txt -and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source. - -Links to the HDF5 1.8.2 source code, documentation, and additional materials -can be found on the HDF5 web page at: - - http://www.hdfgroup.org/products/hdf5/ - -The HDF5 1.8.2 release can be obtained from: - - http://www.hdfgroup.org/HDF5/release/obtain5.html - -User documentation for 1.8.2 can be accessed directly at this location: - - http://www.hdfgroup.org/HDF5/doc/ - -New features in the HDF5-1.8.x release series, including brief general -descriptions of some new and modified APIs, are described in the "What's New -in 1.8.0?" document: - - http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html - -All new and modified APIs are listed in detail in the "HDF5 Software Changes -from Release to Release" document, in the section "Release 1.8.2 (current -release) versus Release 1.8.1": - - http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS -======== - -- New Features -- Support for new platforms and languages -- Bug Fixes since HDF5-1.8.1 -- Platforms Tested -- Supported Configuration Features Summary -- Known Problems - - -New Features -============ - - Configuration - ------------- - - Upgraded libtool to version 2.2.6a. (MAM - 2008/10/15). - - Library - ------- - - Added two new public routines: H5Pget_elink_fapl() and - H5Pset_elink_fapl(). (see bug #1247) (VC - 2008/10/13) - - Improved free space tracking in file to be faster. (QAK - 2008/10/06) - - Added 'mounted' field to H5G_info_t struct. (QAK - 2008/07/15) - - Parallel Library - ---------------- - - None - - Tools - ----- - - h5repack: added new options -u and -b to add a userblock to an HDF5 - file during the repack. (PVN - 2008/08/26) - - h5repack: added options -t and -a to call H5Pset_alignment while - creating a repacked file. (PVN - 2008/08/29) - - h5ls: added capability to traverse through external links when the -r - (recursive) flag is given. (NAF - 2008/09/16) - - h5ls: added -E option to enable traversal of external links. - h5ls will not traverse external links without this flag being set. - (NAF - 2008/10/06) - - h5dump: when -b flag is used without a keyword after it, binary - output defaults to NATIVE. MEMORY keyword was deprecated - and replaced by NATIVE keyword. (PVN - 2008/10/30) - - h5diff: returns 1 when file graphs differ by any object. - Error return code was changed to 2 from -1. (PVN - 2008/10/30) - - h5import: TEXTFPE (scientific format) was deprecated. Use TEXTFP - instead (PVN - 2008/10/30) - - - - F90 API - ------ - - Added optional parameter 'mounted' to H5Gget_info_f, - H5Gget_info_by_idx_f, H5Gget_info_by_name_f (MSB - 2008/09/24) - - Added H5Tget_native_type_f (MSB - 2008/09/30) - - - C++ API - ------ - - These member functions were added as wrapper for H5Rdereference to - replace the incorrect IdComponent::dereference(). - void H5Object::dereference(H5Object& obj, void* ref, - H5R_type_t ref_type=H5R_OBJECT) - void H5Object::dereference(H5File& h5file, void* ref, - H5R_type_t ref_type=H5R_OBJECT) - void H5Object::dereference(Attribute& obj, void* ref, - H5R_type_t ref_type=H5R_OBJECT) - - In addition, these constructors were added to create the associated - objects by way of dereference: - DataSet(H5Object& obj, void* ref, H5R_type_t ref_type=H5R_OBJECT) - DataSet(H5File& file, void* ref, H5R_type_t ref_type=H5R_OBJECT) - DataSet(Attribute& attr, void* ref, H5R_type_t ref_type=H5R_OBJECT) - Group(H5Object& obj, void* ref, H5R_type_t ref_type=H5R_OBJECT) - Group(H5File& obj, void* ref, H5R_type_t ref_type=H5R_OBJECT) - Group(Attribute& attr, void* ref, H5R_type_t ref_type=H5R_OBJECT) - DataType(H5Object& obj, void* ref, H5R_type_t ref_type=H5R_OBJECT) - DataType(H5File& file, void* ref, H5R_type_t ref_type=H5R_OBJECT) - DataType(Attribute& attr, void* ref, H5R_type_t ref_type=H5R_OBJECT) - (BMR - 2008/10/29) - - -Support for New Platforms, Languages, and Compilers -=================================================== - - Intel 10.1 is supported on Mac OS X 10.5.4. - Note: - When Fortran is enabled, configure automatically - disables the build of shared libraries (i.e., only - static C and C++ HDF5 libraries will be built - along with the static HDF5 Fortran library). - Intel 10.1 C and C++ compilers require - "-no-multibyte-chars" compilation flag due to the known - bug in the compilers. - (EIP - 2008/10/30) - - -Bug Fixes since HDF5-1.8.1 -========================== - - Configuration - ------------- - - Fixed error with 'make check install' failing due to h5dump - needing other tools built first. (MAM - 2008/10/15). - - When using shared szip, it is no longer necessary to specify - the path to the shared szip libraries in LD_LIBRARY_PATH. - (MAM - 2008/10/15). - - The file libhdf5_fortran.settings is not installed since its content - is included in libhdf5.settings now. (AKC - 2008/10/21) - - "make DESTDIR=xxx install" failed to install some tools and files - (e.g., h5cc and fortran modules). Fixed. (AKC - 2008/10/8). - - Library - ------- - - H5Ovisit and H5Ovisit_by_name will now properly terminate when the - callback function returns a positive value on the starting object. - (NAF - 2008/11/03) - - Fixed an error where a null message could be created that was larger - than could be written to the file. (NAF - 2008/10/23) - - Corrected error with family/split/multi VFD not updating driver info - when "latest" version of the file format used. (QAK - 2008/10/14) - - Corrected alignment+threshold errors to work correctly when metadata - aggregation is enabled. (QAK - 2008/10/06) - - Changed H5Fget_obj_count and H5Fget_obj_ids to ignore objects - registered by the library for internal library use. - (NAF - 2008/10/06) - - Fixed potential memory leak during compound conversion. - (NAF - 2008/10/06) - - Changed the return value of H5Fget_obj_count from INT to SSIZE_T. - Also changed the return value of H5Fget_obj_ids from HERR_T to - SSIZE_T and the type of the parameter MAX_OBJS from INT to SIZE_T. - (SLU - 2008/09/26) - - Fixed an issue that could cause data to be improperly overwritten - during compound type conversion. (NAF - 2008/09/19) - - Fixed pointer alignment violations that could occur during vlen - conversion. (NAF - 2008/09/16) - - Fixed problem where library could cause a segmentation fault when - an invalid location ID was given to H5Giterate(). (QAK - 2008/08/19) - - Fixed improper shutdown when objects have reference count > 1. The - library now tracks reference count due to the application separately - from that due to internal library routines. (NAF - 2008/08/19) - - Fixed assertion failure caused by incorrect array datatype version. - (NAF - 2008/08/08) - - Fixed an issue where mount point traversal would fail when using - multiple handles for the child. (NAF - 2008/08/07) - - Fixed an issue where mount points were inaccessible when using - multiple file handles for the parent. The mount table is now in - the shared file structure (the parent pointer is still in the - top structure). (NAF - 2008/08/07) - - Fixed assertion failure caused by incorrect array datatype version. - (NAF - 2008/08/04) - - Fixed issue where a group could have a file mounted on it twice. - (QAK - 2008/07/15) - - When an attribute was opened twice and data was written with - one of the handles, the file didn't have the data. It happened - because each handle had its own object structure, and the empty - one overwrote the data with fill value. This is fixed by making - some attribute information like the data be shared in the - attribute structure. (SLU - 2008/07/07) - - Fixed a Windows-specific issue in the ohdr test which was causing - users in some timezones to get false errors. This a deficiency in - the Windows mktime() function, and has been handled properly. - (SJW - 2008/06/19) - - Parallel Library - ---------------- - - None - - Tools - ----- - - h5dump now checks for uniqueness of committed datatypes. - (NAF - 2008/10/15) - - Fixed unnecessary indentation of committed datatypes in h5dump. - (NAF - 2008/10/15) - - Fixed bugs in h5stat: segmemtation fault when printing groups and - print warning message when traversal of objects is unsuccessful. - (see bug #1253) (VC- 2008/10/13) - - Fixed bug in h5ls that prevented relative group listings (like - "h5ls foo.h5/bar") from working correctly (QAK - 2008/06/03) - - h5dump: when doing binary output (-b), the stdout printing of - attributes was done incorrectly. Removed printing of attributes - when doing binary output. (PVN - 2008/06/05) - - - F90 API - ------ - - h5sselect_elements_f: Added additional operators H5S_SELECT_APPEND - and H5S_SELECT_PREPEND (MSB - 2008/09/30) - - h5sget_select_elem_pointlist: Fixed list of returned points by - rearranging the point list correctly by accounting for C - conventions. (MSB - 2008/09/30) - - h5sget_select_hyper_blocklist_f: Fixed error in transposed dimension - of arrays.(MSB - 2008/9/30) - - h5sget_select_bounds_f: Swapped array bounds to account for C and - Fortran reversed array notation (MSB - 2008/9/30) - - Changed to initializing string to a blank character instead of a - null type in tH5P.f90 to fix compiling error using AIX 5.3.0 - (MSB - 2008/7/29) - - Fixed missing commas in H5test_kind.f90 detected by NAG compiler - (MSB - 2008/7/29) - - Fixed passing and array to a scalar in tH5A_1_8.f90 detected by - NAG compiler (MSB - 2008/7/29) - - Added the ability of the test programs to use the status of - HDF5_NOCLEANUP to determine if the *.h5 files should be removed - or not after the tests are completed (MSB - 2008/10/1) - - In nh5tget_offset_c: (MSB 9/12/2008) - If offset was equal to 0 it returned the error code of -1, - this was changed to return an error code of -1 when the offset - value is < 0. - - Uses intrinsic Fortran function SIZEOF if available when detecting - type of INTEGERs and REALs in H5test_kind.f90 (MSB - 2008/9/3) - - Put the DOUBLE PRECISION interfaces in a separate module and - added a USE statement for the module. The interfaces are - included/excluded depending on the state of FORTRAN_DEFAULT_REAL - is DBLE_F which detects if the default REAL is DOUBLE PRECISION. - This allows the library to be compiled with -r8 Fortran flag - without the user needing to edit the source code. - (MSB - 200/8/27) - - Enable building shared library for fortran by adding the flag -fPIC - to the compile flags for versions of Intel Fortran compiler >=9 - (MSB - 2008/8/26) - - C++ API - ------ - - Fixed a design bug which allowed an Attribute object to create/modify - attributes (bugzilla #1068). The API class hierarchy was revised - to address the problem. Classes AbstractDS and Attribute are moved - out of H5Object. Class Attribute now multiply inherits from - IdComponent and AbstractDs and class DataSet from H5Object and - AbstractDs. In addition, the data member IdComponent::id was - moved into subclasses: Attribute, DataSet, DataSpace, DataType, - H5File, Group, and PropList. (BMR - 2008/05/20) - - IdComponent::dereference was incorrect and replaced as described - in "New Features" section. - (BMR - 2008/10/29) - - -Platforms Tested -================ -The following platforms and compilers have been tested for this release. - - AIX 5.3 xlc 7.0.0.8 - xlf 09.01.0000.0008 - xlC 7.0.0.8 - mpcc_r 7.0.0.8 - mpxlf_r 09.01.0000.0008 - - Cray XT3 (2.0.41) cc (pgcc) 7.1-4 - (red storm) ftn (pgf90) 7.1-4 - CC (pgCC) 7.1-4 - - FreeBSD 6.3-STABLE i386 gcc 3.4.6 [FreeBSD] 20060305 - (duty) g++ 3.4.6 [FreeBSD] 20060305 - gcc 4.2.5 20080702 - g++ 4.2.5 20080702 - gfortran 4.2.5 20080702 - - FreeBSD 6.3-STABLE amd64 gcc 3.4.6 [FreeBSD] 20060305 - (liberty) g++ 3.4.6 [FreeBSD] 20060305 - gcc 4.2.5 20080702 - g++ 4.2.5 20080702 - gfortran 4.2.5 20080702 - - IRIX64 6.5 (64 & n32) MIPSpro cc 7.4.4m - F90 MIPSpro 7.4.4m - C++ MIPSpro cc 7.4.4m - - Linux 2.6.9-42.0.10.ELsmp #1 gcc (GCC) 3.4.6 - SMP i686 i386 G95 (GCC 4.0.3 (g95 0.92!) April 18 2007) - (kagiso) PGI C, Fortran, C++ 7.2-1 32-bit - Intel(R) C Compiler for 32-bit - applications, Version 10.1 - Intel(R) C++ Compiler for 32-bit - applications, Version 10.1 - Intel(R) Fortran Compiler for 32-bit - applications, Version 10.1 - Absoft 32-bit Fortran 95 10.0.4 - MPICH mpich-1.2.7 compiled with - gcc 3.4.6 and G95 (GCC 4.0.3 (g95 0.92!) - MPICH mpich2-1.0.6p1 compiled with - gcc 3.4.6 and G95 (GCC 4.0.3 (g95 0.92!) - - Linux 2.6.16.46-0.14-smp #1 Intel(R) C++ for Intel(R) EM64T - SMP x86_64 GNU/Linux Ver. 10.1.013 - (smirom) Intel(R) Fortran Intel(R) EM64T - Ver. 10.1.013 - PGI C, Fortran, C++ Version 7.2-1 - for 64-bit target on x86-64 - MPICH mpich-1.2.7 compiled with - gcc 4.1.2 and G95 (GCC 4.0.3 (g95 0.92!) - MPICH mpich2-1.0.7 compiled with - gcc 4.1.2 and G95 (GCC 4.0.3 (g95 0.92!) - tested for both 32- and 64-bit binaries - - Linux 2.6.16.54-0.2.5 #1 Intel(R) C++ Version 10.1.017 - Altix SMP ia64 Intel(R) Fortran Itanium(R) Version 10.1.017 - (cobalt) SGI MPI 1.16 - - SunOS 5.10 32- and 64-bit Sun WorkShop 6 update 2 C 5.8 - (linew) Sun WorkShop 6 update 2 Fortran 95 8.2 - Sun WorkShop 6 update 2 C++ 5.8 - Patch 121019-06 - - Xeon Linux 2.6.9-42.0.10.EL_lustre-1.4.10.1smp - (abe) Intel(R) C++ Version 10.0.026 - Intel(R) Fortran Compiler Version 10.0.026 - Open MPI 1.2.2 - MVAPICH2-0.9.8p28p2patched-intel-ofed-1.2 - compiled with icc v10.0.026 and - ifort 10.0.026 - - IA-64 Linux 2.4.21-309.tg1 #1 SMP - ia64 gcc (GCC) 3.2.2 - (NCSA tg-login) Intel(R) C++ Version 8.1.037 - Intel(R) Fortran Compiler Version 8.1.033 - mpich-gm-1.2.7p1..16-intel-8.1.037-r1 - - Intel 64 Linux 2.6.9-42.0.10.EL_lustre-1.4.10.1smp - (abe) gcc 3.4.6 20060404 - Intel(R) C++ Version 10.0 - Intel (R) Fortran Compiler Version 10.0 - mvapich2-0.9.8p2patched-intel-ofed-1.2 - - Windows XP Visual Studio .NET - Visual Studio 2005 w/ Intel Fortran 9.1 - Cygwin(native gcc compiler and g95) - - Windows XP x64 Visual Studio 2005 w/ Intel Fortran 9.1 - - Windows Vista Visual Studio 2005 - - MAC OS 10.5.4 (Intel) i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 - GNU Fortran (GCC) 4.3.0 20070810 - G95 (GCC 4.0.3 (g95 0.91!) Apr 24 2008) - Intel C, C++ and Fortran compilers 10.1 - - -Supported Configuration Features Summary -======================================== - - In the tables below - y = tested and supported - n = not supported or not tested in this release - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90 F90 C++ zlib SZIP - parallel parallel -Solaris2.10 32-bit n y n y y y -Solaris2.10 64-bit n y n y y y -IRIX64_6.5 32-bit n n n n y y -IRIX64_6.5 64-bit n y y y y y -Windows XP n y(15) n(15) y y y -Windows XP x64 n y(15) n(15) y y y -Windows Vista n n n y y y -Mac OS X 10.5 Intel n y n y y y -AIX 5.3 32- and 64-bit n y n y y n -FreeBSD 6.3-STABLE -32&64 bit n y n y y y -RedHat EL4 (3) W y(1) y(10) y(1) y y y -RedHat EL4 Intel (3) W n y n y y n -RedHat EL4 PGI (3) W n y n y y n -SuSe x86_64 gcc(3,12) W y(2) y(11) y(2) y y y -SuSe x86_64 Int(3,12) W n y(13) n y y n -SuSe x86_64 PGI(3,12) W n y(8) n y y y -Linux 2.6 SuSE ia64 C - Intel (3,7) y y y y y n -Linux 2.6 SGI Altix - ia64 Intel (3) y y y y y y -Linux 2.6 RHEL C - Lustre Intel (5) y(4) y y(4) y y n -Cray XT3 2.0.41 y y y y y n - - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -Solaris2.10 32-bit y y y y -Solaris2.10 64-bit y y y y -IRIX64_6.5 32-bit y dna y y -IRIX64_6.5 64-bit y y n y -Windows XP y y(15) y y -Windows XP x64 y y(15) y y -Windows Vista y n n y -Mac OS X 10.5 y n y n -AIX 5.3 32- and 64-bit n n n n -FreeBSD 6.2 32&64 bit y n y y -RedHat EL4 (3) W y y(10) y y -RedHat EL4 Intel (3) W y y y n -RedHat EL4 PGI (3) W y y y n -SuSe x86_64 GNU(3,12) W y y y y -SuSe x86_64 Int(3,12) W y y y n -SuSe x86_64 PGI(3,12) W y y y n -Linux 2.4 SuSE C - ia64 C Intel (7) y y y n -Linux 2.4 SGI Altix C - ia64 Intel y n -Linux 2.6 RHEL C - Lustre Intel (5) y y y n -Cray XT3 2.0.41 n n n n - - Notes: (1) Using mpich2 1.0.6. - (2) Using mpich2 1.0.7. - (3) Linux 2.6 with GNU, Intel, and PGI compilers, as indicated. - W or C indicates workstation or cluster, respectively. - (4) Using mvapich2 0.9.8. - (5) Linux 2.6.9-42.0.10. Xeon cluster with ELsmp_perfctr_lustre - and Intel compilers - (6) Linux 2.4.21-32.0.1. Xeon cluster with ELsmp_perfctr_lustre - and Intel compilers - (7) Linux 2.4.21, SuSE_292.till. Ia64 cluster with Intel compilers - (8) pgf90 - (9) With Compaq Visual Fortran 6.6c compiler. - (10) With PGI and Absoft compilers. - (11) PGI and Intel compilers for both C and Fortran - (12) AMD Opteron x86_64 - (13) ifort - (14) Yes with C and Fortran, but not with C++ - (15) Using Visual Studio 2005 or Cygwin - (16) Not tested for this release. - Compiler versions for each platform are listed in the preceding - "Platforms Tested" table. - - -Known Problems -============== -* For Red Storm, a Cray XT3 system, the tools/h5ls/testh5ls.sh and - tools/h5copy/testh5copy.sh will fail some of its sub-tests. These sub-tests - are expected to fail and should exit with a non-zero code but the yod - command does not propagate the exit code of the executables. Yod always - returns 0 if it can launch the executable. The test suite shell expects - a non-zero for this particular test, therefore it concludes the test has - failed when it receives 0 from yod. Skip all the "failing" test for now - by changing them as following. - - ======== Original tools/h5ls/testh5ls.sh ========= - TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5 - ======== Change to =============================== - echo SKIP TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5 - ================================================== - - ======== Original tools/h5copy/testh5copy.sh ========= - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets - TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested - H5LSTEST $FILEOUT - ======== Change to =============================== - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets - echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested - echo SKIP H5LSTEST $FILEOUT - ================================================== - AKC - 2008/11/10 - -* For Red Storm, a Cray XT3 system, the yod command sometimes gives the - message, "yod allocation delayed for node recovery". This interferes with - test suites that do not expect seeing this message. See the section of "Red - Storm" in file INSTALL_parallel for a way to deal with this problem. - AKC - 2008/05/28 - -* We have discovered two problems when running collective IO parallel HDF5 - tests with chunking storage on the ChaMPIon MPI compiler on tungsten, a - Linux cluster at NCSA. - - Under some complex selection cases: - 1) MPI_Get_element returns the wrong value. - 2) MPI_Type_struct also generates the wrong derived datatype and corrupt - data may be generated. - These issues arise only when turning on collective IO with chunking storage - with some complex selections. We have not found these problems on other - MPI-IO compilers. If you encounter these problems, you may use independent - IO instead. - - To avoid this behavior, change the following line in your code - H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - to - H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_INDEPENDENT); - - KY - 2007/08/24 - -* On Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers, - use -mp -O1 compilation flags to build the libraries. A higher level of - optimization causes failures in several HDF5 library tests. - -* For LLNL, uP: both serial and parallel tests pass. - Zeus: Serial tests pass but parallel tests fail with a known problem in MPI. - ubgl: Serial tests pass but parallel tests fail. - -* On mpich 1.2.5 and 1.2.6, if more than two processes contribute no IO and - the application asks to do collective IO, we have found that when using 4 - processors, a simple collective write will sometimes be hung. This can be - verified with t_mpi test under testpar. - -* On IRIX6.5, when the C compiler version is greater than 7.4, complicated - MPI derived datatype code will work. However, the user should increase - the value of the MPI_TYPE_MAX environment variable to some appropriate value - to use collective irregular selection code. For example, the current - parallel HDF5 test needs to raise MPI_TYPE_MAX to 200,000 to pass the test. - -* A dataset created or rewritten with a v1.6.3 library or after cannot be read - with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled. - There was a bug in the calculation of the Fletcher32 checksum in the - library before v1.6.3; the checksum value was not consistent between big- - endian and little-endian systems. This bug was fixed in Release 1.6.3. - However, after fixing the bug, the checksum value was no longer the same as - before on little-endian system. Library releases after 1.6.4 can still read - datasets created or rewritten with an HDF5 library of v1.6.2 or before. - SLU - 2005/6/30 - -* On IBM AIX systems, parallel HDF5 mode will fail some tests with error - messages like "INFO: 0031-XXX ...". This is from the command `poe'. - Set the environment variable MP_INFOLEVEL to 0 to minimize the messages - and run the tests again. - - The tests may fail with messages like "The socket name is already in use", - but HDF5 does not use sockets. This failure is due to problems with the - poe command trying to set up the debug socket. To resolve this problem, - check to see whether there are many old /tmp/s.pedb.* files staying around. - These are sockets used by the poe command and left behind due to failed - commands. First, ask your system administrator to clean them out. - Lastly, request IBM to provide a means to run poe without the debug socket. - -* The --enable-static-exec configure flag fails to compile for Solaris - platforms. This is due to the fact that not all of the system libraries on - Solaris are available in a static format. - - The --enable-static-exec configure flag also fails to correctly compile - on IBM SP2 platforms for serial mode. The parallel mode works fine with - this option. - - It is suggested that you do not use this option on these platforms - during configuration. - -* There is also a configure error on Altix machines that incorrectly reports - when a version of Szip without an encoder is being used. - -* Information about building with PGI and Intel compilers is available in - the INSTALL file sections 4.7 and 4.8. - - - - -%%%%1.8.1%%%% - - -HDF5 version 1.8.1 released on Thu May 29 15:28:55 CDT 2008 -================================================================================ - -INTRODUCTION -============ - -This document describes the differences between the HDF5-1.8.1 release -and HDF5 1.8.0, and contains information on the platforms tested and known -problems in HDF5-1.8.1. For more details, see the files -HISTORY-1_0-1_8_0_rc3.txt and HISTORY-1_8.txt in the release_docs/ directory -of the HDF5 source. - -Links to the HDF5 1.8.1 source code, documentation, and additional materials -can be found on the HDF5 web page at: - - http://www.hdfgroup.org/products/hdf5/ - -The HDF5 1.8.1 release can be obtained from: - - http://www.hdfgroup.org/HDF5/release/obtain5.html - -User documentation for 1.8.1 can be accessed directly at this location: - - http://www.hdfgroup.org/HDF5/doc/ - -New features in the HDF5-1.8.x release series, including brief general -descriptions of some new and modified APIs, are described in the "What's New -in 1.8.0?" document: - - http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html - -All new and modified APIs are listed in detail in the "HDF5 Software Changes -from Release to Release" document, in the section "Release 1.8.1 (current -release) versus Release 1.8.0": - - http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS -======== - -- New Features -- Support for new platforms and languages -- Bug Fixes since HDF5-1.8.0 -- Platforms Tested -- Supported Configuration Features Summary -- Known Problems - - -New Features -============ - - Configuration - ------------- - - The lib/libhdf5.settings file contains much more configure - information. (AKC - 2008/05/18) - - - The new configure option "--disable-sharedlib-rpath" disables - embedding the '-Wl,-rpath' information into executables when - shared libraries are produced, and instead solely relies on the - information in LD_LIBRARY_PATH. (MAM - 2008/05/15) - - - Configuration suite now uses Autoconf 2.61, Automake 1.10.1, and - Libtool 2.2.2 (MAM - 2008/05/01) - - Source code distribution - ======================== - - Library - ------- - - None - - Parallel Library - ---------------- - - None - - Tools - ----- - - h5repack: Reinstated the -i and -o command line flags to specify - input and output files. h5repack now understands both the old - syntax (with -i and -o) and the new syntax introduced in Release - 1.8.0. (PVN - 2008/05/23) - - h5dump: Added support for external links, displaying the object that - an external link points to. (PVN - 2008/05/12) - - h5dump: Added an option, -m, to allow user-defined formatting in the - output of floating point numbers. (PVN - 2008/05/06) - - h5dump, in output of the -p option: Added effective data compression - ratio to the dataset storage layout output when a compression filter - has been applied to a dataset. (PVN - 2008/05/01) - - F90 API - ------ - - New H5A, H5G, H5L, H5O, and H5P APIs to enable 1.8 features were - added. See "Release 1.8.1 (current release) versus Release 1.8.0" in - the document "HDF5 Software Changes from Release to Release" - (http://hdfgroup.org/HDF5/doc/ADGuide/Changes.html) for the - complete list of the new APIs. - - C++ API - ------ - - None - - -Support for New Platforms, Languages, and Compilers -=================================================== - - Both serial and parallel HDF5 are supported for the Red Storm machine - which is a Cray XT3 system. - - - The Fortran library will work correctly if compiled with the -i8 - flag. This has been tested with the g95, PGI and Intel Fortran - compilers. - - -Bug Fixes since HDF5-1.8.0 -========================== - - Configuration - ------------- - - None - - Source code distribution - ======================== - - Library - ------- - - Chunking: Chunks greater than 4GB are disallowed. - (QAK - 2008/05/16) - - Fixed the problem with searching for a target file when following - an external link. The search pattern will depend on whether the - target file's pathname is an absolute or a relative path. - Please see the H5Lcreate_external description in the "HDF5 - Reference Manual" (http://hdfgroup.org/HDF5/doc/RM/RM_H5L.html). - (VC - 2008/04/08) - - Fixed possible file corruption bug when encoding datatype - descriptions for compound datatypes whose size was between - 256 and 511 bytes and the file was opened with the "use the - latest format" property enabled (with H5Pset_libver_bounds). - (QAK - 2008/03/13) - - Fixed bug in H5Aget_num_attrs() routine to correctly handle an - invalid location identifier. (QAK - 2008/03/11) - - Parallel Library - ---------------- - - None - - Tools - ----- - - Fixed bug in h5diff that prevented datasets and attributes with - variable-length string elements from comparing correctly. - (QAK - 2008/02/28) - - Fixed bug in h5dump that caused binary output to be made only for - the first dataset, when several datasets were requested. - (PVN - 2008/04/07) - - F90 API - ------ - - The h5tset(get)_fields subroutines were missing the parameter to - specify a sign position; fixed. (EIP - 2008/05/23) - - Many APIs were fixed to work with the 8-byte integers in Fortran vs. - 4-byte integers in C. This change is transparent to user applications. - - C++ API - ------ - - The class hierarchy was revised to address the problem reported - in bugzilla #1068, Attribute should not be derived from base - class H5Object. Classes AbstractDS was moved out of H5Object. - Class Attribute now multiply inherits from IdComponent and - AbstractDs and class DataSet from H5Object and AbstractDs. - In addition, data member IdComponent::id was moved into subclasses: - Attribute, DataSet, DataSpace, DataType, H5File, Group, and PropList. - (BMR - 2008/05/20) - - IdComponent::dereference was incorrect; it was changed from: - void IdComponent::dereference(IdComponent& obj, void* ref) - to: - void H5Object::dereference(H5File& h5file, void* ref) - void H5Object::dereference(H5Object& obj, void* ref) - (BMR - 2008/05/20) - - Revised Attribute::write and Attribute::read wrappers to handle - memory allocation/deallocation properly. (bugzilla 1045) - (BMR - 2008/05/20) - - -Platforms Tested -================ -The following platforms and compilers have been tested for this release. - - Cray XT3 (2.0.41) cc (pgcc) 7.1-4 - (red storm) ftn (pgf90) 7.1-4 - CC (pgCC) 7.1-4 - mpicc 1.0.2 - mpif90 1.0.2 - - FreeBSD 6.2-STABLE i386 gcc 3.4.6 [FreeBSD] 20060305 - (duty) g++ 3.4.6 [FreeBSD] 20060305 - gcc 4.2.1 20080123 - g++ 4.2.1 20080123 - gfortran 4.2.1 20070620 - - FreeBSD 6.2-STABLE amd64 gcc 3.4.6 [FreeBSD] 20060305 - (liberty) g++ 3.4.6 [FreeBSD] 20060305 - gcc 4.2.1 20080123 - g++ 4.2.1 20080123 - gfortran 4.2.1 20080123 - - IRIX64 6.5 (64 & n32) MIPSpro cc 7.4.4m - F90 MIPSpro 7.4.4m - C++ MIPSpro cc 7.4.4m - - Linux 2.6.9 (RHEL4) Intel 10.0 compilers - (abe.ncsa.uiuc.edu) - - Linux 2.4.21-47 gcc 3.2.3 20030502 - (osage) - - Linux 2.6.9-42.0.10 gcc,g++ 3.4.6 20060404, G95 (GCC 4.0.3) - (kagiso) PGI 7.1-6 (pgcc, pgf90, pgCC) - Intel 9.1 (icc, ifort, icpc) - - Linux 2.6.16.27 x86_64 AMD gcc 4.1.0 (SuSE Linux), g++ 4.1.0, - (smirom) g95 (GCC 4.0.3) - PGI 7.1-6 (pgcc, pgf90, pgCC) - Intel 9.1 (icc, ifort, icpc) - - Linux 2.6.5-7.252.1-rtgfx #1 Intel(R) C++ Version 9.0 - SMP ia64 Intel(R) Fortran Itanium(R) Version 9.0 - (cobalt) SGI MPI - - SunOS 5.8 32,46 Sun WorkShop 6 update 2 C 5.3 - (Solaris 2.8) Sun WorkShop 6 update 2 Fortran 95 6.2 - Sun WorkShop 6 update 2 C++ 5.3 - - SunOS 5.10 cc: Sun C 5.8 - (linew) f90: Sun Fortran 95 8.2 - CC: Sun C++ 5.8 - - Xeon Linux 2.4.21-32.0.1.ELsmp-perfctr-lustre - (tungsten) gcc 3.2.2 20030222 - Intel(R) C++ Version 9.0 - Intel(R) Fortran Compiler Version 9.0 - - IA-64 Linux 2.4.21.SuSE_309.tg1 ia64 - (NCSA tg-login) gcc 3.2.2 - Intel(R) C++ Version 8.1 - Intel(R) Fortran Compiler Version 8.1 - mpich-gm-1.2.6..14b-intel-r2 - - Intel 64 Linux 2.6.9-42.0.10.EL_lustre-1.4.10.1smp - (abe) gcc 3.4.6 20060404 - Intel(R) C++ Version 10.0 - Intel (R) Fortran Compiler Version 10.0 - mvapich2-0.9.8p2patched-intel-ofed-1.2 - - Windows XP Visual Studio .NET - Visual Studio 2005 w/ Intel Fortran 9.1 - Cygwin(native gcc compiler and g95) - MinGW(native gcc compiler and g95) - - Windows XP x64 Visual Studio 2005 w/ Intel Fortran 9.1 - - Windows Vista Visual Studio 2005 - - MAC OS 10.5.2 (Intel) i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 - GNU Fortran (GCC) 4.3.0 20070810 - G95 (GCC 4.0.3 (g95 0.91!) Apr 24 2008) - - -Supported Configuration Features Summary -======================================== - - In the tables below - y = tested and supported - n = not supported or not tested in this release - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90 F90 C++ zlib SZIP - parallel parallel -SunOS5.10 64-bit n y n y y y -SunOS5.10 32-bit n y n y y y -IRIX64_6.5 64-bit n y y y y y -IRIX64_6.5 32-bit n n n n y y -Windows XP n y(15) n(15) y y y -Windows XP x64 n y(15) n(15) y y y -Windows Vista n n n y y y -Mac OS X 10.5 Intel n y n y y y -FreeBSD 4.11 n n n y y y -RedHat EL3 W (3) y(1) y(10) y(1) y y y -RedHat EL3 W Intel (3) n y n y y n -RedHat EL3 W PGI (3) n y n y y n -SuSe x86_64 gcc (3,12) y(2) y(11) y(2) y y y -SuSe x86_64 Int (3,12) n y(13) n y y n -SuSe x86_64 PGI (3,12) n y(8) n y y y -Linux 2.4 Xeon C - Lustre Intel (3,6) n y n y y n -Linux 2.6 SuSE ia64 C - Intel (3,7) y y y y y n -Linux 2.6 SGI Altix - ia64 Intel (3) y y y y y y -Linux 2.6 RHEL C - Lustre Intel (5) y(4) y y(4) y y n -Cray XT3 2.0.41 y y y y y n - - -Platform Shared Shared Shared Thread- - C libs F90 libs C++ libs safe -Solaris2.10 64-bit y y y y -Solaris2.10 32-bit y y y y -IRIX64_6.5 64-bit y y n y -IRIX64_6.5 32-bit y dna y y -Windows XP y y(15) y y -Windows XP x64 y y(15) y y -Windows Vista y n n y -Mac OS X 10.3 y n -FreeBSD 4.11 y n y y -RedHat EL3 W (3) y y(10) y y -RedHat EL3 W Intel (3) y y y n -RedHat EL3 W PGI (3) y y y n -SuSe x86_64 W GNU (3,12) y y y y -SuSe x86_64 W Int (3,12) y y y n -SuSe x86_64 W PGI (3,12) y y y n -Linux 2.4 Xeon C - Lustre Intel (6) y y y n -Linux 2.4 SuSE - ia64 C Intel (7) y y y n -Linux 2.4 SGI Altix - ia64 Intel y n -Linux 2.6 RHEL C - Lustre Intel (5) y y y n -Cray XT3 2.0.41 n n n n n - - Notes: (1) Using mpich2 1.0.6. - (2) Using mpich2 1.0.7. - (3) Linux 2.6 with GNU, Intel, and PGI compilers, as indicated. - W or C indicates workstation or cluster, respectively. - (4) Using mvapich2 0.9.8. - (5) Linux 2.6.9-42.0.10. Xeon cluster with ELsmp_perfctr_lustre - and Intel compilers - (6) Linux 2.4.21-32.0.1. Xeon cluster with ELsmp_perfctr_lustre - and Intel compilers - (7) Linux 2.4.21, SuSE_292.till. Ia64 cluster with Intel compilers - (8) pgf90 - (9) With Compaq Visual Fortran 6.6c compiler. - (10) With PGI and Absoft compilers. - (11) PGI and Intel compilers for both C and Fortran - (12) AMD Opteron x86_64 - (13) ifort - (14) Yes with C and Fortran, but not with C++ - (15) Using Visual Studio 2005 or Cygwin - (16) Not tested for this release. - Compiler versions for each platform are listed in the preceding - "Platforms Tested" table. - - -Known Problems -============== -* For Red Storm, a Cray XT3 system, the yod command sometimes gives the - message, "yod allocation delayed for node recovery". This interferes with - test suites that do not expect seeing this message. See the section of "Red - Storm" in file INSTALL_parallel for a way to deal with this problem. - AKC - 2008/05/28 - -* For Red Storm, a Cray XT3 system, the tools/h5ls/testh5ls.sh will fail on - the test "Testing h5ls -w80 -r -g tgroup.h5" fails. This test is - expected to fail and exit with a non-zero code but the yod command does - not propagate the exit code of the executables. Yod always returns 0 if it - can launch the executable. The test suite shell expects a non-zero for - this particular test, therefore it concludes the test has failed when it - receives 0 from yod. To bypass this problem for now, change the following - lines in the tools/h5ls/testh5ls.sh. - ======== Original ========= - # The following combination of arguments is expected to return an error message - # and return value 1 - TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5 - ======== Skip the test ========= - echo SKIP TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5 - ======== end of bypass ======== - AKC - 2008/05/28 - -* We have discovered two problems when running collective IO parallel HDF5 - tests with chunking storage on the ChaMPIon MPI compiler on tungsten, a - Linux cluster at NCSA. - - Under some complex selection cases: - 1) MPI_Get_element returns the wrong value. - 2) MPI_Type_struct also generates the wrong derived datatype and corrupt - data may be generated. - These issues arise only when turning on collective IO with chunking storage - with some complex selections. We have not found these problems on other - MPI-IO compilers. If you encounter these problems, you may use independent - IO instead. - - To avoid this behavior, change the following line in your code - H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - to - H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_INDEPENDENT); - - KY - 2007/08/24 - -* For SNL, spirit/liberty/thunderbird: The serial tests pass but parallel - tests failed with MPI-IO file locking message. AKC - 2007/6/25 - -* On Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers, - use -mp -O1 compilation flags to build the libraries. A higher level of - optimization causes failures in several HDF5 library tests. - -* For LLNL, uP: both serial and parallel tests pass. - Zeus: Serial tests pass but parallel tests fail with a known problem in MPI. - ubgl: Serial tests pass but parallel tests fail. - -* Configuring with --enable-debug=all produces compiler errors on most - platforms: Users who want to run HDF5 in debug mode should use - --enable-debug rather than --enable-debug=all to enable debugging - information on most modules. - -* On Mac OS 10.4, test/dt_arith.c has some errors in conversion from long - double to (unsigned) long long and from (unsigned) long long to long double. - -* On Altix SGI with Intel 9.0, testmeta.c would not compile with -O3 - optimization flag. - -* On VAX, the Scaleoffset filter is not supported. The Scaleoffset filter - supports only the IEEE standard for floating-point data; it cannot be applied - to HDF5 data generated on VAX. - -* On Cray X1, a lone colon on the command line of h5dump --xml (as in - the testh5dumpxml.sh script) is misinterpereted by the operating system - and causes an error. - -* On mpich 1.2.5 and 1.2.6, if more than two processes contribute no IO and - the application asks to do collective IO, we have found that when using 4 - processors, a simple collective write will sometimes be hung. This can be - verified with t_mpi test under testpar. - -* On IRIX6.5, when the C compiler version is greater than 7.4, complicated - MPI derived datatype code will work. However, the user should increase - the value of the MPI_TYPE_MAX environment variable to some appropriate value - to use collective irregular selection code. For example, the current - parallel HDF5 test needs to raise MPI_TYPE_MAX to 200,000 to pass the test. - -* A dataset created or rewritten with a v1.6.3 library or after cannot be read - with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled. - There was a bug in the calculation of the Fletcher32 checksum in the - library before v1.6.3; the checksum value was not consistent between big- - endian and little-endian systems. This bug was fixed in Release 1.6.3. - However, after fixing the bug, the checksum value was no longer the same as - before on little-endian system. Library releases after 1.6.4 can still read - datasets created or rewritten with an HDF5 library of v1.6.2 or before. - SLU - 2005/6/30 - -* For version 6 (6.02 and 6.04) of the Portland Group compiler on the AMD - Opteron processor, there is a bug in the compiler for optimization(-O2). - The library failed in several tests, all related to the MULTI driver. - The problem has been reported to the vendor. - -* On IBM AIX systems, parallel HDF5 mode will fail some tests with error - messages like "INFO: 0031-XXX ...". This is from the command `poe'. - Set the environment variable MP_INFOLEVEL to 0 to minimize the messages - and run the tests again. - - The tests may fail with messages like "The socket name is already in use", - but HDF5 does not use sockets. This failure is due to problems with the - poe command trying to set up the debug socket. To resolve this problem, - check to see whether there are many old /tmp/s.pedb.* files staying around. - These are sockets used by the poe command and left behind due to failed - commands. First, ask your system administrator to clean them out. - Lastly, request IBM to provide a means to run poe without the debug socket. - -* The --enable-static-exec configure flag fails to compile for Solaris - platforms. This is due to the fact that not all of the system libraries on - Solaris are available in a static format. - - The --enable-static-exec configure flag also fails to correctly compile - on IBM SP2 platforms for serial mode. The parallel mode works fine with - this option. - - It is suggested that you do not use this option on these platforms - during configuration. - -* With the gcc 2.95.2 compiler, HDF5 uses the `-ansi' flag during - compilation. The ANSI version of the compiler complains about not being - able to handle the `long long' datatype with the warning: - - warning: ANSI C does not support `long long' - - This warning is innocuous and can be safely ignored. - -* The ./dsets tests fail on the TFLOPS machine if the test program, - dsets.c, is compiled with the -O option. The HDF5 library still works - correctly with the -O option. The test program works fine if it is - compiled with -O1 or -O0. Only -O (same as -O2) causes the test - program to fail. - -* Not all platforms behave correctly with Szip's shared libraries. Szip is - disabled in these cases, and a message is relayed at configure time. Static - libraries should be working on all systems that support Szip and should be - used when shared libraries are unavailable. - - There is also a configure error on Altix machines that incorrectly reports - when a version of Szip without an encoder is being used. - -* On some platforms that use Intel and Absoft compilers to build the HDF5 - Fortran library, compilation may fail for fortranlib_test.f90, fflush1.f90 - and fflush2.f90 complaining about the exit subroutine. Comment out the line - IF (total_error .ne. 0) CALL exit (total_error). - -* Information about building with PGI and Intel compilers is available in - the INSTALL file sections 4.7 and 4.8. - -* On at least one system, SDSC DataStar, the scheduler (in this case - LoadLeveler) sends job status updates to standard error when you run - any executable that was compiled with the parallel compilers. - - This causes problems when running "make check" on parallel builds, as - many of the tool tests function by saving the output from test runs, - and comparing it to an exemplar. - - The best solution is to reconfigure the target system so it no longer - inserts the extra text. However, this may not be practical. - - In such cases, one solution is to "setenv HDF5_Make_Ignore yes" prior to - the configure and build. This will cause "make check" to continue after - detecting errors in the tool tests. However, in the case of SDSC DataStar, - it also leaves you with some 150 "failed" tests to examine by hand. - - A second solution is to write a script to run serial tests and filter - out the text added by the scheduler. A sample script used on SDSC - DataStar is given below, but you will probably have to customize it - for your installation. - - Observe that the basic idea is to insert the script as the first item - on the command line which executes the the test. The script then - executes the test and filters out the offending text before passing - it on. - - #!/bin/csh - - set STDOUT_FILE=~/bin/serial_filter.stdout - set STDERR_FILE=~/bin/serial_filter.stderr - - rm -f $STDOUT_FILE $STDERR_FILE - - ($* > $STDOUT_FILE) >& $STDERR_FILE - - set RETURN_VALUE=$status - - cat $STDOUT_FILE - - tail +3 $STDERR_FILE - - exit $RETURN_VALUE - - You get the HDF5 make files and test scripts to execute your filter script - by setting the environment variable "RUNSERIAL" to the full path of the - script prior to running configure for parallel builds. Remember to - "unsetenv RUNSERIAL" before running configure for a serial build. - - Note that the RUNSERIAL environment variable exists so that we can - prefix serial runs as necessary on the target system. On DataStar, - no prefix is necessary. However on an MPICH system, the prefix might - have to be set to something like "/usr/local/mpi/bin/mpirun -np 1" to - get the serial tests to run at all. - - In such cases, you will have to include the regular prefix in your - filter script. - -* H5Ocopy() does not copy reg_ref attributes correctly when shared-message - is turn on. The value of the reference in the destination attriubte is - wrong. This H5Ocopy problem will affect the h5copy tool. - -* In the C++ API, it appears that there are bugs in Attribute::write/read - and DataSet::write/read for fixed- and variable-len strings. The problems - are being worked on and a patch will be provided when the fixes are - available. - - -%%%%1.8.0%%%% - - -HDF5 version 1.8.0 released on Tue Feb 12 20:41:19 CST 2008 -================================================================================ - -INTRODUCTION -============ - -This document describes the differences between the HDF5-1.6.x release series -and HDF5 1.8.0, and contains information on the platforms tested and known -problems in HDF5-1.8.0. For more details, see the HISTORY-1_0-1_8_0_rc3.txt -file in the -release_docs/ directory of the HDF5 source. - -Links to the HDF5 1.8.0 source code, documentation, and additional materials -can be found on the HDF5 web page at: - - http://www.hdfgroup.org/products/hdf5/ - -The HDF5 1.8.0 release can be obtained from: - - http://www.hdfgroup.org/HDF5/release/obtain5.html - -User documentation for 1.8.0 can be accessed directly at this location: - - http://www.hdfgroup.org/HDF5/doc/ - -New features in 1.8.0, including brief general descriptions of some new -and modified APIs, are described in the "What's New in 1.8.0?" document: - - http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html - -All new and modified APIs are listed in detail in the "HDF5 Software Changes -from Release to Release" document, in the section "Release 1.8.0 (current -release) versus Release 1.6.x": - - http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html - -If you have any questions or comments, please send them to the HDF Help Desk: - - help@hdfgroup.org - - -CONTENTS -======== - -- New Features -- Removed Feature -- Support for new platforms and languages -- Bug Fixes since HDF5-1.6.0 -- Platforms Tested -- Supported Configuration Features Summary -- Known Problems - - -New Features -============ - - HDF5 Release 1.8.0 is a major release with many changes and new features. - - New format and interface features discussed in the "What's New in - HDF5 1.8.0" document include the following: - - Enhanced group object management - Enhanced attribute management and more efficient meta data handling - Expanded datatype features - Creation order tracking and indexing - Improved meta data caching and cache control - UTF-8 encoding - New I/O filters: n-bit and scale+offset compression - New link (H5L) and object (H5O) interfaces and features - External and user-defined links - New high-level APIs: - HDF5 Packet Table (H5PT) and HDF5 Dimension Scale (H5DS) - C++ and Fortran interfaces for older high-level APIs: - H5Lite (H5LT), H5Image (H5IM), and H5Table (H5TB) - New and improved tools - And more... - - http://hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html - - - New APIs associated with these features, other interface changes - (e.g., ENUM and struct definitions), and new library configuration flags - are listed in the "Release 1.8.0 (current release) versus Release 1.6.x" - section of "HDF5 Software Changes from Release to Release." - - http://hdfgroup.org/HDF5/doc/ADGuide/Changes.html - -Compatibility -------------- - Many HDF5 users and user communities have existing applications that - they may wish to port to Release 1.8.0. Alternatively, some users may - wish to take advantage of Release 1.8.0's improved performance without - having to port such applications. To facilitate managing application - compatibility and porting applications from release to release, the HDF - Team has implemented the following features: - Individually-configurable macros that selectively map common - interface names to the old and new interfaces - Library configuration options to configure the macro mappings - - Two related documents accompany this release: - "API Compatibility Macros in HDF5" discusses the specifics of the - new individually-configurable macros and library configuration - options. - http://hdfgroup.org/HDF5/doc/RM/APICompatMacros.html - - "New Features in HDF5 Release 1.8.0 and Backward/Forward Format - Compatibility Issues" discusses each new feature with regard to - its impact on format compatibility. - http://hdfgroup.org/HDF5/doc/ADGuide/CompatFormat180.html - -Referenced documents --------------------- - http://hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html - "What's New in HDF5 1.8.0" - - http://hdfgroup.org/HDF5/doc/ADGuide/Changes.html - The "Release 1.8.0 (current release) versus Release 1.6.x " - section in "HDF5 Software Changes from Release to Release" - - http://hdfgroup.org/HDF5/doc/RM/APICompatMacros.html - "API Compatibility Macros in HDF5" - - http://hdfgroup.org/HDF5/doc/ADGuide/CompatFormat180.html - "New Features in HDF5 Release 1.8.0 and Backward/Forward Format - Compatibility Issues" - - -Removed Feature -=============== -The stream virtual file driver (H5FD_STREAM) have been removed in this -release. This affects the functions H5Pset_fapl_stream and H5Pget_fapl_stream -and the constant H5FD_STREAM. - -This virtual file driver will be available at -http://hdf5-addons.origo.ethz.ch/. Note that at the time of this release, -the transition is still in progress; the necessary integration tools may -not be available when HDF5 Release 1.8.0 first comes out. - - -Support for New Platforms, Languages, and Compilers -=================================================== - - Support for Open VMS 7.3 was added. - - -Bug Fixes since HDF5-1.6.0 -========================== - This release contains numerous bug fixes. For details, see the - "Changes from 1.6.0 to 1.8.0-rc3" section of the HISTORY.txt file for - this release. - - -Platforms Tested -================ -The following platforms and compilers have been tested for for this release. - - AIX 5.2 (32/64 bit) xlc 8.0.0.11 - xlC 8.0 - xlf 10.01.0000.0 - mpcc_r 6.0.0.8 - mpxlf_r 8.1.1.7 - - FreeBSD 6.2-STABLE i386 gcc 3.4.6 [FreeBSD] 20060305 - (duty) g++ 3.4.6 [FreeBSD] 20060305 - gcc 4.2.1 20080123 - g++ 4.2.1 20080123 - gfortran 4.2.1 20070620 - - FreeBSD 6.2-STABLE amd64 gcc 3.4.6 [FreeBSD] 20060305 - (liberty) g++ 3.4.6 [FreeBSD] 20060305 - gcc 4.2.1 20080123 - g++ 4.2.1 20080123 - gfortran 4.2.1 20080123 - - IRIX64 6.5 (64 & n32) MIPSpro cc 7.4.4m - F90 MIPSpro 7.4.4m - C++ MIPSpro cc 7.4.4m - - Linux 2.6.9 (RHEL4) Intel 10.0 compilers - (abe.ncsa.uiuc.edu) - - Linux 2.4.21-47 gcc 3.2.3 20030502 - (osage) - - Linux 2.6.9-42.0.10 gcc 3.4.6 20060404 - (kagiso) PGI 7.0-7 (pgcc, pgf90, pgCC) - Intel 9.1 (icc, ifort, icpc) - - Linux 2.6.16.27 x86_64 AMD gcc 4.1.0 (SuSE Linux), g++ 4.1.0, - (smirom) g95 (GCC 4.0.3) - PGI 6.2-5 (pgcc, pgf90, pgCC) - Intel 9.1 (icc, iort, icpc) - - Linux 2.6.5-7.252.1-rtgfx #1 Intel(R) C++ Version 9.0 - SMP ia64 Intel(R) Fortran Itanium(R) Version 9.0 - (cobalt) SGI MPI - - SunOS 5.8 32,46 Sun WorkShop 6 update 2 C 5.3 - (Solaris 2.8) Sun WorkShop 6 update 2 Fortran 95 6.2 - Sun WorkShop 6 update 2 C++ 5.3 - - SunOS 5.10 cc: Sun C 5.8 - (linew) f90: Sun Fortran 95 8.2 - CC: Sun C++ 5.8 - - Xeon Linux 2.4.21-32.0.1.ELsmp-perfctr-lustre - (tungsten) gcc 3.2.2 20030222 - Intel(R) C++ Version 9.0 - Intel(R) Fortran Compiler Version 9.0 - - IA-64 Linux 2.4.21.SuSE_292.til1 ia64 - (NCSA tg-login) gcc 3.2.2 - Intel(R) C++ Version 8.1 - Intel(R) Fortran Compiler Version 8.1 - mpich-gm-1.2.5..10-intel-r2 - - Windows XP Visual Studio .NET - Visual Studio 2005 w/ Intel Fortran 9.1 - Cygwin(native gcc compiler and g95) - MinGW(native gcc compiler and g95) - - Windows XP x64 Visual Studio 2005 w/ Intel Fortran 9.1 - - Windows Vista Visual Studio 2005 - - MAC OS 10.4 (Intel) gcc i686-apple-darwin8-gcc-4.0.1 (GCC) 4.0.1 - G95 (GCC 4.0.3 (g95 0.91!) Nov 21 2006) - - Alpha Open VMS 7.3 Compaq C V6.5-001-48BCD - HP Fortran V7.6-3276 - Compaq C++ V6.5-004 - - -Supported Configuration Features Summary -======================================== - - In the tables below - y = tested and supported - n = not supported or not tested in this release - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - = testing incomplete on this feature or platform - -Platform C F90 F90 C++ zlib SZIP - parallel parallel -SunOS5.8 64-bit n y n y y y -SunOS5.8 32-bit n y n y y y -SunOS5.10 64-bit y(1) y n y y y -SunOS5.10 32-bit y(1) y n y y y -IRIX64_6.5 64-bit n y y y y y -IRIX64_6.5 32-bit n n n n y y -AIX-5.2 32-bit y y y y y y -AIX-5.2 64-bit y y y y y y -Windows XP n y(15) n(15) y y y -Windows XP x64 n y(15) n(15) y y y -Windows Vista n n n y y y -Mac OS X 10.4 PowerPC n n -Mac OS X 10.4 Intel n y n y y y -FreeBSD 4.11 n n n y y y -RedHat EL3 W (3) y(1a) y(10) y(1a) y y y -RedHat EL3 W Intel (3) n y n y y n -RedHat EL3 W PGI (3) n y n y y n -SuSe x86_64 gcc (3,12) y(1a) y(11) n y y y -SuSe x86_64 Int (3,12) n y(13) n y y n -SuSe x86_64 PGI (3,12) n y(8) n y y y -Linux 2.4 Xeon C - Lustre Intel (3,6) n y n y y n -Linux 2.6 SuSE ia64 C - Intel (3,7) y y y y y n -Linux 2.6 SGI Altix - ia64 Intel (3) y y y y y y -Alpha OpenVMS 7.3.2 n y n y n n - - - -Platform Shared Shared Shared static- Thread- - C libs F90 libs C++ libs exec safe -Solaris2.8 64-bit y y y x y -Solaris2.8 32-bit y y y x y -Solaris2.10 64-bit y x y -Solaris2.10 32-bit y x y -IRIX64_6.5 64-bit y y n y y -IRIX64_6.5 32-bit y dna y y y -AIX-5.2 & 5.3 32-bit n n n y n -AIX-5.2 & 5.3 64-bit n n n y n -Windows XP y y(15) y y y -Windows XP x64 y y(15) y y y -Windows Vista y n n y y -Mac OS X 10.3 y y n -FreeBSD 4.11 y n y y y -RedHat EL3 W (3) y y(10) y y y -RedHat EL3 W Intel (3) y y y y n -RedHat EL3 W PGI (3) y y y y n -SuSe x86_64 W GNU (3,12) y y y y y -SuSe x86_64 W Int (3,12) y y y y(14) n -SuSe x86_64 W PGI (3,12) y y y y(14) n -Linux 2.4 Xeon C - Lustre Intel (6) y y y y n -Linux 2.4 SuSE - ia64 C Intel (7) y y y y n -Linux 2.4 SGI Altix - ia64 Intel y y n -Alpha OpenVMS 7.3.2 n n n y n - - Notes: (1) Using mpich 1.2.6. - (1a) Using mpich2 1.0.6. - (2) Using mpt and mpich 1.2.6. - (3) Linux 2.6 with GNU, Intel, and PGI compilers, as indicated. - W or C indicates workstation or cluster, respectively. - - (6) Linux 2.4.21-32.0.1. Xeon cluster with ELsmp_perfctr_lustre - and Intel compilers - (7) Linux 2.4.21, SuSE_292.till. Ia64 cluster with Intel -compilers - (8) pgf90 - (9) With Compaq Visual Fortran 6.6c compiler. - (10) With PGI and Absoft compilers. - (11) PGI and Intel compilers for both C and Fortran - (12) AMD Opteron x86_64 - (13) ifort - (14) Yes with C and Fortran, but not with C++ - (15) Using Visual Studio 2005 or Cygwin - (16) Not tested for this release. - Compiler versions for each platform are listed in the preceding - "Platforms Tested" table. - - -Known Problems -============== -* We have discovered two problems when running collective IO parallel HDF5 - tests with chunking storage on the ChaMPIon MPI compiler on tungsten, a - Linux cluster at NCSA. - - Under some complex selection cases: - 1) MPI_Get_element returns the wrong value. - 2) MPI_Type_struct also generates the wrong derived datatype and corrupt - data may be generated. - These issues arise only when turning on collective IO with chunking storage - with some complex selections. We have not found these problems on other - MPI-IO compilers. If you encounter these problems, you may use independent - IO instead. - - To avoid this behavior, change the following line in your code - H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - - to - H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_INDEPENDENT); - - KY - 2007/08/24 - -* For SNL, spirit/liberty/thunderbird: The serial tests pass but parallel - tests failed with MPI-IO file locking message. AKC - 2007/6/25 - -* On Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers, - use -mp -O1 compilation flags to build the libraries. A higher level of - optimization causes failures in several HDF5 library tests. - -* For SNL, Red Storm: Only parallel HDF5 is supported. The serial tests pass - when run against the parallel library; the parallel tests also pass, but - with lots of non-fatal error messages. - -* For LLNL, uP: both serial and parallel tests pass. - Zeus: Serial tests pass but parallel tests fail with a known problem in MPI. - ubgl: Serial tests pass but parallel tests fail. - -* On SUN 5.10 C++, testing fails in the "Testing Shared Datatypes with - Attributes" test. - -* Configuring with --enable-debug=all produces compiler errors on most - platforms: Users who want to run HDF5 in debug mode should use - --enable-debug rather than --enable-debug=all to enable debugging - information on most modules. - -* On Mac OS 10.4, test/dt_arith.c has some errors in conversion from long - double to (unsigned) long long and from (unsigned) long long to long double. - -* On Altix SGI with Intel 9.0, testmeta.c would not compile with -O3 - optimization flag. - -* On VAX, the Scaleoffset filter is not supported. The filter cannot be - applied to HDF5 data generated on VAX. The Scaleoffset filter only supports - the IEEE standard for floating-point data. - -* On Cray X1, a lone colon on the command line of h5dump --xml (as in - the testh5dumpxml.sh script) is misinterpereted by the operating system - and causes an error. - -* On mpich 1.2.5 and 1.2.6, if more than two processes contribute no IO and - the application asks to do collective IO, we have found that when using 4 - processors, a simple collective write will sometimes be hung. This can be - verified with t_mpi test under testpar. - -* On IRIX6.5, when the C compiler version is greater than 7.4, complicated - MPI derived datatype code will work. However, the user should increase - the value of the MPI_TYPE_MAX environment variable to some appropriate value - to use collective irregular selection code. For example, the current - parallel HDF5 test needs to raise MPI_TYPE_MAX to 200,000 to pass the test. - -* A dataset created or rewritten with a v1.6.3 library or after cannot be read - with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled. - There was a bug in the calculating code of the Fletcher32 checksum in the - library before v1.6.3; the checksum value was not consistent between big- - endian and little-endian systems. This bug was fixed in Release 1.6.3. - However, after fixing the bug, the checksum value was no longer the same as - before on little-endian system. Library releases after 1.6.4 can still read - datasets created or rewritten with an HDF5 library of v1.6.2 or before. - SLU - 2005/6/30 - -* For version 6 (6.02 and 6.04) of the Portland Group compiler on the AMD - Opteron processor, there is a bug in the compiler for optimization(-O2). - The library failed in several tests, all related to the MULTI driver. - The problem has been reported to the vendor. - -* On IBM AIX systems, parallel HDF5 mode will fail some tests with error - messages like "INFO: 0031-XXX ...". This is from the command `poe'. - Set the environment variable MP_INFOLEVEL to 0 to minimize the messages - and run the tests again. - - The tests may fail with messages like "The socket name is already in use", - but HDF5 does not use sockets. This failure is due to problems with the - poe command trying to set up the debug socket. To resolve this problem, - check to see whether there are many old /tmp/s.pedb.* files staying around. - These are sockets used by the poe command and left behind due to failed - commands. First, ask your system administrator to clean them out. - Lastly, request IBM to provide a means to run poe without the debug socket. - -* The --enable-static-exec configure flag fails to compile for Solaris - platforms. This is due to the fact that not all of the system libraries on - Solaris are available in a static format. - - The --enable-static-exec configure flag also fails to correctly compile - on IBM SP2 platform for the serial mode. The parallel mode works fine with - this option. - - It is suggested that you do not use this option on these platforms - during configuration. - -* With the gcc 2.95.2 compiler, HDF5 uses the `-ansi' flag during - compilation. The ANSI version of the compiler complains about not being - able to handle the `long long' datatype with the warning: - - warning: ANSI C does not support `long long' - - This warning is innocuous and can be safely ignored. - -* The ./dsets tests fail on the TFLOPS machine if the test program, - dsets.c, is compiled with the -O option. The HDF5 library still works - correctly with the -O option. The test program works fine if it is - compiled with -O1 or -O0. Only -O (same as -O2) causes the test - program to fail. - -* Not all platforms behave correctly with Szip's shared libraries. Szip is - disabled in these cases, and a message is relayed at configure time. Static - libraries should be working on all systems that support Szip and should be - used when shared libraries are unavailable. - - There is also a configure error on Altix machines that incorrectly reports - when a version of Szip without an encoder is being used. - -* On some platforms that use Intel and Absoft compilers to build the HDF5 - Fortran library, compilation may fail for fortranlib_test.f90, fflush1.f90 - and fflush2.f90 complaining about the exit subroutine. Comment out the line - IF (total_error .ne. 0) CALL exit (total_error). - -* Information about building with PGI and Intel compilers is available in - the INSTALL file sections 4.7 and 4.8. - -* On at least one system, SDSC DataStar, the scheduler (in this case - LoadLeveler) sends job status updates to standard error when you run - any executable that was compiled with the parallel compilers. - - This causes problems when running "make check" on parallel builds, as - many of the tool tests function by saving the output from test runs, - and comparing it to an exemplar. - - The best solution is to reconfigure the target system so it no longer - inserts the extra text. However, this may not be practical. - - In such cases, one solution is to "setenv HDF5_Make_Ignore yes" prior to - the configure and build. This will cause "make check" to continue after - detecting errors in the tool tests. However, in the case of SDSC DataStar, - it also leaves you with some 150 "failed" tests to examine by hand. - - A second solution is to write a script to run serial tests and filter - out the text added by the scheduler. A sample script used on SDSC - DataStar is given below, but you will probably have to customize it - for your installation. - - Observe that the basic idea is to insert the script as the first item - on the command line which executes the the test. The script then - executes the test and filters out the offending text before passing - it on. - - #!/bin/csh - - set STDOUT_FILE=~/bin/serial_filter.stdout - set STDERR_FILE=~/bin/serial_filter.stderr - - rm -f $STDOUT_FILE $STDERR_FILE - - ($* > $STDOUT_FILE) >& $STDERR_FILE - - set RETURN_VALUE=$status - - cat $STDOUT_FILE - - tail +3 $STDERR_FILE - - exit $RETURN_VALUE - - You get the HDF5 make files and test scripts to execute your filter script - by setting the environment variable "RUNSERIAL" to the full path of the - script prior to running configure for parallel builds. Remember to - "unsetenv RUNSERIAL" before running configure for a serial build. - - Note that the RUNSERIAL environment variable exists so that we can - can prefix serial runs as necessary on the target system. On DataStar, - no prefix is necessary. However on an MPICH system, the prefix might - have to be set to something like "/usr/local/mpi/bin/mpirun -np 1" to - get the serial tests to run at all. - - In such cases, you will have to include the regular prefix in your - filter script. - -* H5Ocopy() does not copy reg_ref attributes correctly when shared-message - is turn on. The value of the reference in the destination attriubte is - wrong. This H5Ocopy problem will affect the h5copy tool. - diff --git a/release_docs/NEWSLETTER.txt b/release_docs/NEWSLETTER.txt new file mode 100644 index 00000000000..f03f710d717 --- /dev/null +++ b/release_docs/NEWSLETTER.txt @@ -0,0 +1,25 @@ +INTRODUCTION +============ + +This purpose of this document is to contain entries that can be used to quickly +produce a release newsletter. When something is added to the library that is +"newsletter worthy" (i.e., new feature, CVE fix, etc.) a summary note should +be added here. + +The format should look like this: + +* SUMMARY OF NEWSLETTER-WORTHY THING + + Here is where you describe the summary. Summarize the feature, fix, or + change in general language. Remember, RELEASE.txt is for communicating + technical specifics. Text entered here is more like advertising. + + (GitHub #123, #125) + +The GitHub #s could be relevant issues or PRs. They will probably not appear +in the final newsletter, but are so that the person writing the newsletter +has easy access to context if they have questions. + +Every entry in RELEASE.txt does NOT require an entry here. The newsletter is +for communicating major changes that are of interest to anyone. Minor bugfixes, +memory leak fixes, etc. do not require entries. diff --git a/release_docs/README.md b/release_docs/README.md new file mode 100644 index 00000000000..1532f1a25bf --- /dev/null +++ b/release_docs/README.md @@ -0,0 +1,102 @@ +# The `release_docs` directory + +## Intro + +This directory contains instructions for building and using the library as +well as the HDF5 history files. + +## HISTORY files + +The `HISTORY` files contain the history of this branch of HDF5. They fall into +three categories. + +### HISTORY-\[VERSION 1\]-\[VERSION 2\].txt + +These files are created when we release a new major version and include all +the changes that were made to the `develop` branch while creating a major release. + +### HISTORY-\[VERSION\].txt + +This file contains the changes that were made to a maintenance branch since +it split off from `develop`. It will also be found in the `develop` branch +when experimental releases have been created. + +### RELEASE.txt + +This is the changelog for the current version of the library. + +For a MAJOR release (or in `develop`) this files lists all the changes since the +last major version. For a MINOR release (or in a maintenance branch), this file +lists all the changes since the last release in the maintenance branch. + +Examples: + +* The file for HDF5 1.14.0 includes all the changes since HDF5 1.12.0 +* The file for HDF5 1.10.9 includes all the changes since HDF5 1.10.8 +* The file in `develop` includes all the changes since the last major release +* The file in `hdf5_1_14` includes all the changes since the last minor HDF5 1.14 release + +Note that we make no effort to bring maintenance branch `HISTORY` files back to +develop. If you want to compare, say, 1.10.4 with 1.12.3, you'd have to get +the history files from those releases and compare them by hand. + +## Creating new releases + +### MAJOR release + +* If there were experimental releases, merge the experimental `HISTORY` file + and the current `RELEASE.txt` by category to create a separate, unified + file that ignores the experimental releases. Don't check this in yet or + clobber any existing `HISTORY`/`RELEASE` files, but put it someplace handy for + use in later steps. + +* Create the new maintenance branch + +In develop: +* Create the new `HISTORY-\[VERSION 1\]-\[VERSION 2\].txt` file + * If there is an experimental `HISTORY` file, add `RELEASE.txt` to the beginning of it and use that + * Otherwise, start with `RELEASE.txt` + * Add the introduction boilerplate like in the other `HISTORY` files (TOC, etc.) +* Delete any experimental `HISTORY` file +* Clear out `RELEASE.txt` + +Note that we're KEEPING any experimental release history information in the +`HISTORY-\[VERSION 1\]-\[VERSION 2\].txt` file, so do NOT use the merged file in +the above steps! + +In the new maintenance branch: +* Create the new `HISTORY-\[VERSION\].txt` file + * If there is an experimental `HISTORY` file use the combined file you created earlier + * Otherwise, start with `RELEASE.txt` + * Add the introduction boilerplate like in the other `HISTORY` files (TOC, etc.) +* Delete any experimental `HISTORY` file +* Clear out `RELEASE.txt` + +* Create the new release branch + +In the new release branch: +* If there were experimental releases, use the combined file you created earlier as `RELEASE.txt` +* Otherwise the `RELEASE.txt` will be used as-is + +### MINOR release + +* Create the release branch + +In the maintenance branch: +* Add the contents of `RELEASE.txt` to the beginnnig of `HISTORY-\[VERSION\].txt` +* Clear out `RELEASE.txt` + +### EXPERIMENTAL release + +* Add the contents of `RELEASE.txt` to the beginnnig of `HISTORY-\[VERSION\].txt` +* Clear out `RELEASE.txt` + +## INSTALL files + +These files include instructions for building and installing HDF5 on various +platforms. + +## USING files + +These files document how to build HDF5 applications with an installed HDF5 +library. diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index c8637f09787..b4d8cb26f19 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -21,7 +21,7 @@ The official HDF5 releases can be obtained from: https://www.hdfgroup.org/downloads/hdf5/ -Changes from Release to Release and New Features in the HDF5-1.13.x release series +Changes from Release to Release and New Features in the HDF5-1.16.x release series can be found at: https://portal.hdfgroup.org/display/HDF5/Release+Specific+Information @@ -36,7 +36,7 @@ CONTENTS - New Features - Support for new platforms and languages -- Bug Fixes since HDF5-1.13.3 +- Bug Fixes since HDF5-1.14.0 - Platforms Tested - Known Problems - CMake vs. Autotools installations @@ -72,45 +72,6 @@ New Features (ADB - 2023/02/21) - - Removal of MPE support - - The ability to build with MPE instrumentation has been removed along with - the following configure options: - - Autotools: - --with-mpe= - - CMake has never supported building with MPE support. - - (DER - 2022/11/08) - - - Removal of dmalloc support - - The ability to build with dmalloc support has been removed along with - the following configure options: - - Autotools: - --with-dmalloc= - - CMake: - HDF5_ENABLE_USING_DMALLOC - - (DER - 2022/11/08) - - - Removal of memory allocation sanity checks configure options - - With the removal of the memory allocation sanity checks feature, the - following configure options are no longer necessary and have been - removed: - - Autotools: - --enable-memory-alloc-sanity-check - - CMake: - HDF5_MEMORY_ALLOC_SANITY_CHECK - HDF5_ENABLE_MEMORY_STATS - - (DER - 2022/11/03) Library: -------- @@ -126,66 +87,6 @@ New Features (JTH - 2023/02/22) - - Overhauled the Virtual Object Layer (VOL) - - The virtual object layer (VOL) was added in HDF5 1.12.0 but the initial - implementation required API-breaking changes to better support optional - operations and pass-through VOL connectors. The original VOL API is - now considered deprecated and VOL users and connector authors should - target the 1.14 VOL API. - - The specific changes are too extensive to document in a release note, so - VOL users and connector authors should consult the updated VOL connector - author's guide and the 1.12-1.14 VOL migration guide. - - (DER - 2022/12/28) - - - H5VLquery_optional() signature change - - The last parameter of this API call has changed from a pointer to hbool_t - to a pointer to uint64_t. Due to the changes in how optional operations - are handled in the 1.14 VOL API, we cannot make the old API call work - with the new scheme, so there is no API compatibility macro for it. - - (DER - 2022/12/28) - - - H5I_free_t callback signature change - - In order to support asynchronous operations and future IDs, the signature - of the H5I_free_t callback has been modified to take a second 'request' - parameter. Due to the nature of the internal library changes, no API - compatibility macro is available for this change. - - (DER - 2022/12/28) - - - Fix for CVE-2019-8396 - - Malformed HDF5 files may have truncated content which does not match - the expected size. When H5O__pline_decode() attempts to decode these it - may read past the end of the allocated space leading to heap overflows - as bounds checking is incomplete. - - The fix ensures each element is within bounds before reading. - - (2022/11/09 - HDFFV-10712, CVE-2019-8396, GitHub #2209) - - - Removal of memory allocation sanity checks feature - - This feature added heap canaries and statistics tracking for internal - library memory operations. Unfortunately, the heap canaries caused - problems when library memory operations were mixed with standard C - library memory operations (such as in the filter pipeline, where - buffers may have to be reallocated). Since any platform with a C - compiler also usually has much more sophisticated memory sanity - checking tools than the HDF5 library provided (e.g., valgrind), we - have decided to to remove the feature entirely. - - In addition to the configure changes described above, this also removes - the following from the public API: - H5get_alloc_stats() - H5_alloc_stats_t - - (DER - 2022/11/03) Parallel Library: ----------------- @@ -229,11 +130,7 @@ New Features Documentation: -------------- - - Ported the existing VOL Connector Author Guide document to doxygen. - - Added new dox file, VOLConnGuide.dox. - - (ADB - 2022/12/20) + - Support for new platforms, languages and compilers @@ -292,128 +189,6 @@ Bug Fixes since HDF5-1.13.3 release (JTH - 2023/02/16, GH #2433) - - Seg fault on file close - - h5debug fails at file close with core dump on a file that has an - illegal file size in its cache image. In H5F_dest(), the library - performs all the closing operations for the file and keeps track of - the error encountered when reading the file cache image. - At the end of the routine, it frees the file's file structure and - returns error. Due to the error return, the file object is not removed - from the ID node table. This eventually causes assertion failure in - H5VL__native_file_close() when the library finally exits and tries to - access that file object in the table for closing. - - The closing routine, H5F_dest(), will not free the file structure if - there is error, keeping a valid file structure in the ID node table. - It will be freed later in H5VL__native_file_close() when the - library exits and terminates the file package. - - (VC - 2022/12/14, HDFFV-11052, CVE-2020-10812) - - - Fix CVE-2018-13867 / GHSA-j8jr-chrh-qfrf - - Validate location (offset) of the accumulated metadata when comparing. - - Initially, the accumulated metadata location is initialized to HADDR_UNDEF - - the highest available address. Bogus input files may provide a location - or size matching this value. Comparing this address against such bogus - values may provide false positives. Thus make sure, the value has been - initialized or fail the comparison early and let other parts of the - code deal with the bogus address/size. - Note: To avoid unnecessary checks, it is assumed that if the 'dirty' - member in the same structure is true the location is valid. - - (EFE - 2022/10/10 GH-2230) - - - Fix CVE-2018-16438 / GHSA-9xmm-cpf8-rgmx - - Make sure info block for external links has at least 3 bytes. - - According to the specification, the information block for external links - contains 1 byte of version/flag information and two 0 terminated strings - for the object linked to and the full path. - Although not very useful, the minimum string length for each (with - terminating 0) would be one byte. - Checking this helps to avoid SEGVs triggered by bogus files. - - (EFE - 2022/10/09 GH-2233) - - - CVE-2021-46244 / GHSA-vrxh-5gxg-rmhm - - Compound datatypes may not have members of size 0 - - A member size of 0 may lead to an FPE later on as reported in - CVE-2021-46244. To avoid this, check for this as soon as the - member is decoded. - - (EFE - 2022/10/05 GEH-2242) - - - - Fix CVE-2021-45830 / GHSA-5h2h-fjjr-x9m2 - - Make H5O__fsinfo_decode() more resilient to out-of-bound reads. - - When decoding a file space info message in H5O__fsinfo_decode() make - sure each element to be decoded is still within the message. Malformed - hdf5 files may have trunkated content which does not match the - expected size. Checking this will prevent attempting to decode - unrelated data and heap overflows. So far, only free space manager - address data was checked before decoding. - - (EFE - 2022/10/05 GH-2228) - - - Fix CVE-2021-46242 / GHSA-x9pw-hh7v-wjpf - - When evicting driver info block, NULL the corresponding entry. - - Since H5C_expunge_entry() called (from H5AC_expunge_entry()) sets the flag - H5C__FLUSH_INVALIDATE_FLAG, the driver info block will be freed. NULLing - the pointer in f->shared->drvinfo will prevent use-after-free when it is - used in other functions (like H5F__dest()) - as other places will check - whether the pointer is initialized before using its value. - - (EFE - 2022/09/29 GH-2254) - - - Fix CVE-2021-45833 / GHSA-x57p-jwp6-4v79 - - Report error if dimensions of chunked storage in data layout < 2 - - For Data Layout Messages version 1 & 2 the specification state - that the value stored in the data field is 1 greater than the - number of dimensions in the dataspace. For version 3 this is - not explicitly stated but the implementation suggests it to be - the case. - Thus the set value needs to be at least 2. For dimensionality - < 2 an out-of-bounds access occurs. - - (EFE - 2022/09/28 GH-2240) - - - Fix CVE-2018-14031 / GHSA-2xc7-724c-r36j - - Parent of enum datatype message must have the same size as the - enum datatype message itself. - Functions accessing the enumeration values use the size of the - enumeration datatype to determine the size of each element and - how much data to copy. - Thus the size of the enumeration and its parent need to match. - Check in H5O_dtype_decode_helper() to avoid unpleasant surprises - later. - - (EFE - 2022/09/28 GH-2236) - - - Fix CVE-2018-17439 / GHSA-vcxv-vp43-rch7 - - H5IMget_image_info(): Make sure to not exceed local array size - - Malformed hdf5 files may provide more dimensions than the array dim[] in - H5IMget_image_info() is able to hold. Check number of elements first by calling - H5Sget_simple_extent_dims() with NULL for both 'dims' and 'maxdims' arguments. - This will cause the function to return only the number of dimensions. - The fix addresses a stack overflow on write. - - (EFE - 2022/09/27 HDFFV-10589, GH-2226) - Java Library ------------ @@ -435,49 +210,10 @@ Bug Fixes since HDF5-1.13.3 release (ADB - 2023/02/16 GH-1546,GH-2259) - - Remove Javadoc generation - - The use of doxygen now supersedes the requirement to build javadocs. We do not - have the resources to continue to support two documentation methods and have - chosen doxygen as our standard. - - (ADB - 2022/12/19) - - - Change the default for building the high-level tools - - The gif2hdf5 and hdf2gif high-level tools are deprecated and will be removed - in a future release. The default build setting for them have been changed from enabled - to disabled. A user can enable the build of these tools if needed. - autotools: --enable-hlgiftools - cmake: HDF5_BUILD_HL_GIF_TOOLS=ON - - (ADB - 2022/12/16) - - - Change the settings of the *pc files to use the correct format - - The pkg-config files generated by CMake uses incorrect syntax for the 'Requires' - settings. Changing the set to use 'lib-name = version' instead 'lib-name-version' - fixes the issue - - (ADB - 2022/12/06 HDFFV-11355) - - - Move MPI libraries link from PRIVATE to PUBLIC - - The install dependencies were not including the need for MPI libraries when - an application or library was built with the C library. Also updated the - CMake target link command to use the newer style MPI::MPI_C link variable. - - (ADB - 2022/10/27) - Tools ----- - - Fix h5repack to only print output when verbose option is selected - - When timing option was added to h5repack, the check for verbose was - incorrectly implemented. - - (ADB - 2022/12/02, GH #2270) + - Performance From dbb90d916d6d4b128746e3b741052e0323fdb293 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Thu, 23 Mar 2023 20:55:18 -0700 Subject: [PATCH 086/231] Fix minor things noted when doing 1.10 merges (#2610) * Duplicated HDF_RESOURCES_DIR from cmake_ext_mod merge * Typos in comments --- CMakeLists.txt | 2 +- bin/checkposix | 4 ++-- java/CMakeLists.txt | 2 +- release_docs/INSTALL_Cygwin.txt | 2 +- src/H5private.h | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ba0e51b01ed..c2679dd6420 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -640,7 +640,7 @@ else () endif () include (${HDF_RESOURCES_DIR}/HDFCompilerFlags.cmake) -set (CMAKE_MODULE_PATH ${HDF_RESOURCES_DIR} ${HDF_RESOURCES_DIR} ${CMAKE_MODULE_PATH}) +set (CMAKE_MODULE_PATH ${HDF_RESOURCES_DIR} ${CMAKE_MODULE_PATH}) #----------------------------------------------------------------------------- # Option to Enable HDFS diff --git a/bin/checkposix b/bin/checkposix index ee12d445e77..bb6b81bae7d 100755 --- a/bin/checkposix +++ b/bin/checkposix @@ -103,7 +103,7 @@ foreach $arg (@ARGV) { # Now find all function calls on this line which don't start with 'H' while (($name)=/\b([a-z_A-GI-Z]\w*)\s*\(/) { $_ = $'; - + # Ignore C statements that look sort of like function # calls. next if $name =~ /^(if|for|offsetof|return|sizeof|switch|while|void)$/; @@ -186,7 +186,7 @@ foreach $arg (@ARGV) { } # TESTING (not comprehensive - just noise reduction) - + # Test macros and functions (testhdf5.h) next if $name =~ /^(AddTest|TestErrPrintf|TestSummary|TestCleanup|TestShutdown)$/; next if $name =~ /^(CHECK|CHECK_PTR|CHECK_PTR_NULL|CHECK_PTR_EQ|CHECK_I)$/; diff --git a/java/CMakeLists.txt b/java/CMakeLists.txt index 86fab9d5b54..ae37ceb9ad8 100644 --- a/java/CMakeLists.txt +++ b/java/CMakeLists.txt @@ -1,7 +1,7 @@ cmake_minimum_required (VERSION 3.18) project (HDF5_JAVA C Java) -set (CMAKE_MODULE_PATH "${HDF_RESOURCES_DIR};${HDF_RESOURCES_DIR}") +set (CMAKE_MODULE_PATH "${HDF_RESOURCES_DIR}") find_package (Java) #----------------------------------------------------------------------------- diff --git a/release_docs/INSTALL_Cygwin.txt b/release_docs/INSTALL_Cygwin.txt index 34d3e5aea74..b3c8a83c2d5 100644 --- a/release_docs/INSTALL_Cygwin.txt +++ b/release_docs/INSTALL_Cygwin.txt @@ -2,7 +2,7 @@ HDF5 Build and Install Instructions for Cygwin ************************************************************************ -This document is a instruction on how to build, test and install HDF5 library on +This document is an instruction on how to build, test and install HDF5 library on Cygwin. See detailed information in hdf5/INSTALL. NOTE: hdf5 can be built with CMake, see the INSTALL_CMake.txt file for more guidance. diff --git a/src/H5private.h b/src/H5private.h index 6ed0aa2a98a..ec77bbbbd0d 100644 --- a/src/H5private.h +++ b/src/H5private.h @@ -1932,7 +1932,7 @@ typedef struct H5_api_struct { #define H5_FIRST_THREAD_INIT pthread_once(&H5TS_first_init_g, H5TS_pthread_first_thread_init); #endif -/* Macros for threadsafe HDF-5 Phase I locks */ +/* Macros for threadsafe HDF5 Phase I locks */ #define H5_API_LOCK H5TS_mutex_lock(&H5_g.init_lock); #define H5_API_UNLOCK H5TS_mutex_unlock(&H5_g.init_lock); From a3391f682c33c7f24f254f5677573a2daf1a1dcc Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Thu, 23 Mar 2023 22:55:34 -0500 Subject: [PATCH 087/231] Fix a memory corruption issue in H5S__point_project_simple (#2626) --- release_docs/RELEASE.txt | 14 ++++++++++++++ src/H5Spoint.c | 2 +- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index b4d8cb26f19..8249a8cd3b8 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -141,6 +141,20 @@ Bug Fixes since HDF5-1.13.3 release =================================== Library ------- + - Fixed a memory corruption issue that can occur when reading + from a dataset using a hyperslab selection in the file + dataspace and a point selection in the memory dataspace + + When reading from a dataset using a hyperslab selection in + the dataset's file dataspace and a point selection in the + dataset's memory dataspace where the file dataspace's "rank" + is greater than the memory dataspace's "rank", memory corruption + could occur due to an incorrect number of selection points + being copied when projecting the point selection onto the + hyperslab selection's dataspace. + + (JTH - 2023/03/23) + - Fixed issues in the Subfiling VFD when using the SELECT_IOC_EVERY_NTH_RANK or SELECT_IOC_TOTAL I/O concentrator selection strategies diff --git a/src/H5Spoint.c b/src/H5Spoint.c index b10b7da166b..1c3697cc15a 100644 --- a/src/H5Spoint.c +++ b/src/H5Spoint.c @@ -2319,7 +2319,7 @@ H5S__point_project_simple(const H5S_t *base_space, H5S_t *new_space, hsize_t *of /* Copy over the point's coordinates */ HDmemset(new_node->pnt, 0, sizeof(hsize_t) * rank_diff); H5MM_memcpy(&new_node->pnt[rank_diff], base_node->pnt, - (new_space->extent.rank * sizeof(hsize_t))); + (base_space->extent.rank * sizeof(hsize_t))); /* Keep the order the same when copying */ if (NULL == prev_node) From 602449b0b7c75946cbdc942d2024d5cf94461cae Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Fri, 31 Mar 2023 22:36:28 -0500 Subject: [PATCH 088/231] Avoid suppressing error output for non-tentative file opens (#2632) * Avoid suppressing error output for non-tentative file opens * Update comment about tentative file opens --- src/H5Fint.c | 52 +++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 41 insertions(+), 11 deletions(-) diff --git a/src/H5Fint.c b/src/H5Fint.c index 7ad35fc552d..ee35bddefc4 100644 --- a/src/H5Fint.c +++ b/src/H5Fint.c @@ -1798,22 +1798,52 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id) else tent_flags = flags; - H5E_BEGIN_TRY - { - lf = H5FD_open(name, tent_flags, fapl_id, HADDR_UNDEF); + /* + * When performing a tentative open of a file where we have stripped away + * flags such as H5F_ACC_CREAT from the specified file access flags, the + * H5E_BEGIN/END_TRY macros are used to suppress error output since there + * is an expectation that the tentative open might fail. Even though we + * explicitly clear the error stack after such a failure, the underlying + * file driver might maintain its own error stack and choose whether to + * display errors based on whether the library has disabled error reporting. + * Since we wish to suppress that error output as well for the case of + * tentative file opens, surrounding the file open call with the + * H5E_BEGIN/END_TRY macros is an explicit instruction to the file driver + * not to display errors. If the tentative file open call fails, another + * attempt at opening the file will be made without error output being + * suppressed. + * + * However, if stripping away the H5F_ACC_CREAT flag and others left us + * with the same file access flags as before, then we will skip this + * tentative file open and only make a single attempt at opening the file. + * In this case, we don't want to suppress error output since the underlying + * file driver might provide more details on why the file open failed. + */ + if (tent_flags != flags) { + /* Make tentative attempt to open file */ + H5E_BEGIN_TRY + { + lf = H5FD_open(name, tent_flags, fapl_id, HADDR_UNDEF); + } + H5E_END_TRY; } - H5E_END_TRY; - if (NULL == lf) { - if (tent_flags == flags) - HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to open file: name = '%s', tent_flags = %x", - name, tent_flags) - H5E_clear_stack(NULL); - tent_flags = flags; + /* + * If a tentative attempt to open the file wasn't necessary, attempt + * to open the file now. Otherwise, if the tentative open failed, clear + * the error stack and reset the file access flags, then make another + * attempt at opening the file. + */ + if ((tent_flags == flags) || (lf == NULL)) { + if (tent_flags != flags) { + H5E_clear_stack(NULL); + tent_flags = flags; + } + if (NULL == (lf = H5FD_open(name, tent_flags, fapl_id, HADDR_UNDEF))) HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to open file: name = '%s', tent_flags = %x", name, tent_flags) - } /* end if */ + } /* Is the file already open? */ if ((shared = H5F__sfile_search(lf)) != NULL) { From 18687543bace47f73ca655ca2fc2665cb2f00a0a Mon Sep 17 00:00:00 2001 From: "H. Joe Lee" Date: Fri, 31 Mar 2023 22:37:12 -0500 Subject: [PATCH 089/231] chore: make comment consistent (#2654) Use a 3rd person verb to match "Does not" in the next sentence. --- test/h5test.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/h5test.c b/test/h5test.c index 0138c6927ad..1467e0b41e0 100644 --- a/test/h5test.c +++ b/test/h5test.c @@ -1553,7 +1553,7 @@ h5_verify_cached_stabs_cb(hid_t oid, const char H5_ATTR_UNUSED *name, const H5O_ /*------------------------------------------------------------------------- * Function: h5_verify_cached_stabs * - * Purpose: Verify that all groups in every file in base_name have + * Purpose: Verifies that all groups in every file in base_name have * their symbol table information cached (if present, and if * the parent group also uses a symbol table). Does not * check that the root group's symbol table information is From 2e5a19204964281db26945d954c649f7be02e3ef Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Fri, 31 Mar 2023 22:38:15 -0500 Subject: [PATCH 090/231] CMake - Match Autotools behavior for library instrumentation (#2648) Enable library instrumentation by default for parallel debug builds --- CMakeLists.txt | 8 +++++++- release_docs/RELEASE.txt | 10 ++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c2679dd6420..c46bbb10919 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -611,9 +611,15 @@ option (HDF5_BUILD_GENERATORS "Build Test Generators" OFF) #----------------------------------------------------------------------------- option (HDF5_ENABLE_TRACE "Enable API tracing capability" OFF) mark_as_advanced (HDF5_ENABLE_TRACE) -if (${HDF_CFG_NAME} MATCHES "Debug") +if (${HDF_CFG_NAME} MATCHES "Debug" OR ${HDF_CFG_NAME} MATCHES "Developer") # Enable instrumenting of the library's internal operations option (HDF5_ENABLE_INSTRUMENT "Instrument The library" OFF) + + # Instrumenting is enabled by default for parallel debug builds + if (HDF5_ENABLE_PARALLEL) + set (HDF5_ENABLE_INSTRUMENT ON CACHE BOOL "Instrument The library" FORCE) + endif () + if (HDF5_ENABLE_INSTRUMENT) set (H5_HAVE_INSTRUMENTED_LIBRARY 1) endif () diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 8249a8cd3b8..e02a58d44ec 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -47,6 +47,16 @@ New Features Configuration: ------------- + - Enabled instrumentation of the library by default in CMake for parallel + debug builds + + HDF5 can be configured to instrument portions of the parallel library to + aid in debugging. Autotools builds of HDF5 turn this capability on by + default for parallel debug builds and off by default for other build types. + CMake has been updated to match this behavior. + + (JTH - 2023/03/29) + - Added new option to build libaec and zlib inline with CMake. Using the CMake FetchContent module, the external filters can populate From 4fe8376fec797b915f508d8381becb700d6ddcc9 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Mon, 3 Apr 2023 11:06:36 -0700 Subject: [PATCH 091/231] Fix memory leaks and aborts in H5O EFL decode (#2656) * Convert asserts to error handling in efl decode The function that decodes external data files object header messages would call assert() when parsing malformed files, causing applications to crash when linked against the debug library. This change converts these assert() calls to HDF5 error checks, so the messages are sanity checked in both release and debug mode and debug mode no longer crashes applications. Also cleaned up some error handling usage and debug checks. * Free memory on H5O efl decode errors * Add buffer size checks to efl msg decode * Add parentheses to math expressions Fixes GitHub #2605 --- src/H5Oefl.c | 109 ++++++++++++++++++++++++++++----------------------- 1 file changed, 61 insertions(+), 48 deletions(-) diff --git a/src/H5Oefl.c b/src/H5Oefl.c index 557dc133f29..35e2d9f145c 100644 --- a/src/H5Oefl.c +++ b/src/H5Oefl.c @@ -67,108 +67,121 @@ const H5O_msg_class_t H5O_MSG_EFL[1] = {{ * Purpose: Decode an external file list message and return a pointer to * the message (and some other data). * - * Return: Success: Ptr to a new message struct. + * We allow zero dimension size starting from the 1.8.7 release. + * The dataset size of external storage can be zero. * + * Return: Success: Pointer to a new message struct * Failure: NULL - * - * Programmer: Robb Matzke - * Tuesday, November 25, 1997 - * - * Modification: - * Raymond Lu - * 11 April 2011 - * We allow zero dimension size starting from the 1.8.7 release. - * The dataset size of external storage can be zero. *------------------------------------------------------------------------- */ static void * H5O__efl_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, - unsigned H5_ATTR_UNUSED *ioflags, size_t H5_ATTR_UNUSED p_size, const uint8_t *p) + unsigned H5_ATTR_UNUSED *ioflags, size_t p_size, const uint8_t *p) { - H5O_efl_t *mesg = NULL; - int version; - const char *s = NULL; - H5HL_t *heap; - size_t u; /* Local index variable */ - void *ret_value = NULL; /* Return value */ + H5O_efl_t *mesg = NULL; + int version; + const uint8_t *p_end = p + p_size - 1; /* pointer to last byte in p */ + const char *s = NULL; + H5HL_t *heap = NULL; + void *ret_value = NULL; /* Return value */ FUNC_ENTER_PACKAGE /* Check args */ HDassert(f); HDassert(p); + HDassert(p_size > 0); if (NULL == (mesg = (H5O_efl_t *)H5MM_calloc(sizeof(H5O_efl_t)))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") + HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "memory allocation failed") - /* Version */ + /* Version (1 byte) */ + if ((p + 1 - 1) > p_end) + HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "ran off end of input buffer while decoding") version = *p++; if (version != H5O_EFL_VERSION) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad version number for external file list message") - /* Reserved */ + /* Reserved (3 bytes) */ + if ((p + 3 - 1) > p_end) + HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "ran off end of input buffer while decoding") p += 3; - /* Number of slots */ + /* Number of slots (2x 2 bytes) */ + if ((p + 4 - 1) > p_end) + HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "ran off end of input buffer while decoding") UINT16DECODE(p, mesg->nalloc); - HDassert(mesg->nalloc > 0); + if (mesg->nalloc <= 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad number of allocated slots when parsing efl msg") UINT16DECODE(p, mesg->nused); - HDassert(mesg->nused <= mesg->nalloc); + if (mesg->nused > mesg->nalloc) + HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad number of in-use slots when parsing efl msg") /* Heap address */ + if ((p + H5F_SIZEOF_ADDR(f) - 1) > p_end) + HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "ran off end of input buffer while decoding") H5F_addr_decode(f, &p, &(mesg->heap_addr)); + if (H5F_addr_defined(mesg->heap_addr) == FALSE) + HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad local heap address when parsing efl msg") -#ifndef NDEBUG - HDassert(H5F_addr_defined(mesg->heap_addr)); + /* Decode the file list */ + mesg->slot = (H5O_efl_entry_t *)H5MM_calloc(mesg->nalloc * sizeof(H5O_efl_entry_t)); + if (NULL == mesg->slot) + HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "memory allocation failed") if (NULL == (heap = H5HL_protect(f, mesg->heap_addr, H5AC__READ_ONLY_FLAG))) - HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, NULL, "unable to read protect link value") + HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, NULL, "unable to protect local heap") +#ifdef H5O_DEBUG + /* Verify that the name at offset 0 in the local heap is the empty string */ s = (const char *)H5HL_offset_into(heap, 0); - - HDassert(s && !*s); - - if (H5HL_unprotect(heap) < 0) - HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, NULL, "unable to read unprotect link value") - heap = NULL; + if (s == NULL) + HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, NULL, "could not obtain pointer into local heap") + if (*s != '\0') + HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, NULL, "entry at offset 0 in local heap not an empty string") #endif - /* Decode the file list */ - mesg->slot = (H5O_efl_entry_t *)H5MM_calloc(mesg->nalloc * sizeof(H5O_efl_entry_t)); - if (NULL == mesg->slot) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") - - if (NULL == (heap = H5HL_protect(f, mesg->heap_addr, H5AC__READ_ONLY_FLAG))) - HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, NULL, "unable to read protect link value") - for (u = 0; u < mesg->nused; u++) { + for (size_t u = 0; u < mesg->nused; u++) { /* Name */ + if ((p + H5F_SIZEOF_SIZE(f) - 1) > p_end) + HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "ran off end of input buffer while decoding") H5F_DECODE_LENGTH(f, p, mesg->slot[u].name_offset); if ((s = (const char *)H5HL_offset_into(heap, mesg->slot[u].name_offset)) == NULL) - HGOTO_ERROR(H5E_SYM, H5E_CANTGET, NULL, "unable to get external file name") - if (*s == (char)'\0') - HGOTO_ERROR(H5E_SYM, H5E_CANTGET, NULL, "invalid external file name") + HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, NULL, "unable to get external file name") + if (*s == '\0') + HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, NULL, "invalid external file name") mesg->slot[u].name = H5MM_xstrdup(s); - HDassert(mesg->slot[u].name); + if (mesg->slot[u].name == NULL) + HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "string duplication failed") /* File offset */ + if ((p + H5F_SIZEOF_SIZE(f) - 1) > p_end) + HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "ran off end of input buffer while decoding") H5F_DECODE_LENGTH(f, p, mesg->slot[u].offset); /* Size */ + if ((p + H5F_SIZEOF_SIZE(f) - 1) > p_end) + HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "ran off end of input buffer while decoding") H5F_DECODE_LENGTH(f, p, mesg->slot[u].size); - } /* end for */ + } if (H5HL_unprotect(heap) < 0) - HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, NULL, "unable to read unprotect link value") - heap = NULL; + HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, NULL, "unable to unprotect local heap") /* Set return value */ ret_value = mesg; done: if (ret_value == NULL) - if (mesg != NULL) + if (mesg != NULL) { + if (mesg->slot != NULL) { + for (size_t u = 0; u < mesg->nused; u++) + H5MM_xfree(mesg->slot[u].name); + H5MM_xfree(mesg->slot); + } H5MM_xfree(mesg); + } FUNC_LEAVE_NOAPI(ret_value) } /* end H5O__efl_decode() */ From 759be9efbcb70bbf8c5e4d4ebdf7abfd76b5d687 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Mon, 3 Apr 2023 13:08:39 -0500 Subject: [PATCH 092/231] Synchronize HDF5 tests with VOL tests (#2628) --- test/tarray.c | 6 +- test/tattr.c | 339 ++++++++++++++++++++++++++++++++---------- test/tcoords.c | 6 +- test/tfile.c | 154 +++++++++++++++---- test/tgenprop.c | 46 ++++-- test/th5o.c | 24 +++ test/th5s.c | 28 +++- test/titerate.c | 8 +- test/tmisc.c | 92 +++++++----- test/trefer.c | 186 ++++++++++++++++++----- test/tselect.c | 86 +++++++++-- test/ttime.c | 6 +- test/tunicode.c | 6 +- test/tvlstr.c | 12 +- test/tvltypes.c | 6 +- testpar/t_bigio.c | 11 +- testpar/t_file.c | 16 +- testpar/t_shapesame.c | 3 +- 18 files changed, 799 insertions(+), 236 deletions(-) diff --git a/test/tarray.c b/test/tarray.c index c2cc2791758..06eac90e6fa 100644 --- a/test/tarray.c +++ b/test/tarray.c @@ -2244,5 +2244,9 @@ test_array(void) void cleanup_array(void) { - HDremove(FILENAME); + H5E_BEGIN_TRY + { + H5Fdelete(FILENAME, H5P_DEFAULT); + } + H5E_END_TRY; } /* end cleanup_array() */ diff --git a/test/tattr.c b/test/tattr.c index f77f793ce30..35b42dd6463 100644 --- a/test/tattr.c +++ b/test/tattr.c @@ -247,7 +247,11 @@ test_attr_basic_write(hid_t fapl) CHECK(attr, FAIL, "H5Acreate2"); /* Try to create the same attribute again (should fail) */ - ret_id = H5Acreate2(dataset, ATTR1_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret_id = H5Acreate2(dataset, ATTR1_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret_id, FAIL, "H5Acreate2"); /* Write attribute information */ @@ -388,7 +392,11 @@ test_attr_basic_write(hid_t fapl) VERIFY(attr_size, (ATTR2_DIM1 * ATTR2_DIM2 * sizeof(int)), "H5Aget_storage_size"); /* Try to create the same attribute again (should fail) */ - ret_id = H5Acreate2(group, ATTR2_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret_id = H5Acreate2(group, ATTR2_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret_id, FAIL, "H5Acreate2"); /* Write attribute information */ @@ -541,7 +549,7 @@ test_attr_flush(hid_t fapl) CHECK(att, FAIL, "H5Acreate2"); ret = H5Aread(att, H5T_NATIVE_DOUBLE, &rdata); - CHECK(ret, FAIL, "H5Awrite"); + CHECK(ret, FAIL, "H5Aread"); if (!H5_DBL_ABS_EQUAL(rdata, 0.0)) TestErrPrintf("attribute value wrong: rdata=%f, should be %f\n", rdata, 0.0); @@ -757,7 +765,11 @@ test_attr_compound_write(hid_t fapl) CHECK(attr, FAIL, "H5Acreate2"); /* Try to create the same attribute again (should fail) */ - ret_id = H5Acreate2(dataset, ATTR4_NAME, tid1, sid2, H5P_DEFAULT, H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret_id = H5Acreate2(dataset, ATTR4_NAME, tid1, sid2, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret_id, FAIL, "H5Acreate2"); /* Write complex attribute data */ @@ -978,7 +990,11 @@ test_attr_scalar_write(hid_t fapl) CHECK(attr, FAIL, "H5Acreate2"); /* Try to create the same attribute again (should fail) */ - ret_id = H5Acreate2(dataset, ATTR5_NAME, H5T_NATIVE_FLOAT, sid2, H5P_DEFAULT, H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret_id = H5Acreate2(dataset, ATTR5_NAME, H5T_NATIVE_FLOAT, sid2, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret_id, FAIL, "H5Acreate2"); /* Write attribute information */ @@ -1121,7 +1137,11 @@ test_attr_mult_write(hid_t fapl) CHECK(attr, FAIL, "H5Acreate2"); /* Try to create the same attribute again (should fail) */ - ret_id = H5Acreate2(dataset, ATTR1_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret_id = H5Acreate2(dataset, ATTR1_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret_id, FAIL, "H5Acreate2"); /* Write 1st attribute data */ @@ -1145,7 +1165,11 @@ test_attr_mult_write(hid_t fapl) CHECK(attr, FAIL, "H5Acreate2"); /* Try to create the same attribute again (should fail) */ - ret_id = H5Acreate2(dataset, ATTR2_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret_id = H5Acreate2(dataset, ATTR2_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret_id, FAIL, "H5Acreate2"); /* Write 2nd attribute information */ @@ -1169,7 +1193,11 @@ test_attr_mult_write(hid_t fapl) CHECK(attr, FAIL, "H5Acreate2"); /* Try to create the same attribute again (should fail) */ - ret_id = H5Acreate2(dataset, ATTR3_NAME, H5T_NATIVE_DOUBLE, sid2, H5P_DEFAULT, H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret_id = H5Acreate2(dataset, ATTR3_NAME, H5T_NATIVE_DOUBLE, sid2, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret_id, FAIL, "H5Acreate2"); /* Write 3rd attribute information */ @@ -1547,7 +1575,7 @@ test_attr_delete(hid_t fapl) herr_t ret; /* Generic return value */ /* Output message about test being performed */ - MESSAGE(5, ("Testing Basic Attribute Functions\n")); + MESSAGE(5, ("Testing Basic Attribute Deletion Functions\n")); /* Open file */ fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); @@ -1563,7 +1591,11 @@ test_attr_delete(hid_t fapl) VERIFY(oinfo.num_attrs, 3, "H5Oget_info3"); /* Try to delete bogus attribute */ - ret = H5Adelete(dataset, "Bogus"); + H5E_BEGIN_TRY + { + ret = H5Adelete(dataset, "Bogus"); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Adelete"); /* Verify the correct number of attributes */ @@ -2254,7 +2286,11 @@ test_attr_dense_create(hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "H5Aclose"); /* Attempt to add attribute again, which should fail */ - attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + H5E_BEGIN_TRY + { + attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(attr, FAIL, "H5Acreate2"); /* Close dataspace */ @@ -4200,7 +4236,11 @@ test_attr_deprec(hid_t fcpl, hid_t fapl) CHECK(dataset, FAIL, "H5Dopen2"); /* Get number of attributes with bad ID */ - ret = H5Aget_num_attrs((hid_t)-1); + H5E_BEGIN_TRY + { + ret = H5Aget_num_attrs((hid_t)-1); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Aget_num_attrs"); /* Get number of attributes */ @@ -4408,7 +4448,11 @@ test_attr_corder_create_basic(hid_t fcpl, hid_t fapl) VERIFY(crt_order_flags, 0, "H5Pget_attr_creation_order"); /* Setting invalid combination of a attribute order creation order indexing on should fail */ - ret = H5Pset_attr_creation_order(dcpl, H5P_CRT_ORDER_INDEXED); + H5E_BEGIN_TRY + { + ret = H5Pset_attr_creation_order(dcpl, H5P_CRT_ORDER_INDEXED); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Pset_attr_creation_order"); ret = H5Pget_attr_creation_order(dcpl, &crt_order_flags); CHECK(ret, FAIL, "H5Pget_attr_creation_order"); @@ -5886,11 +5930,19 @@ test_attr_info_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl) VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); /* Check for query on non-existent attribute */ - ret = H5Aget_info_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)0, &ainfo, - H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret = H5Aget_info_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)0, &ainfo, + H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Aget_info_by_idx"); - ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)0, - tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)0, + tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Aget_name_by_idx"); /* Create attributes, up to limit of compact form */ @@ -5923,14 +5975,26 @@ test_attr_info_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl) VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); /* Check for out of bound offset queries */ - ret = H5Aget_info_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)u, &ainfo, - H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret = H5Aget_info_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)u, &ainfo, + H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Aget_info_by_idx"); - ret = H5Aget_info_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, (hsize_t)u, &ainfo, - H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret = H5Aget_info_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, (hsize_t)u, &ainfo, + H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Aget_info_by_idx"); - ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)u, - tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)u, + tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Aget_name_by_idx"); /* Create more attributes, to push into dense form */ @@ -5976,14 +6040,26 @@ test_attr_info_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl) } /* end if */ /* Check for out of bound offset queries */ - ret = H5Aget_info_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)u, &ainfo, - H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret = H5Aget_info_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)u, &ainfo, + H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Aget_info_by_idx"); - ret = H5Aget_info_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, (hsize_t)u, &ainfo, - H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret = H5Aget_info_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, (hsize_t)u, &ainfo, + H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Aget_info_by_idx"); - ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)u, - tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)u, + tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Aget_name_by_idx"); } /* end for */ @@ -6267,6 +6343,8 @@ test_attr_delete_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl) unsigned u; /* Local index variable */ herr_t ret; /* Generic return value */ + MESSAGE(5, ("Testing Deleting Attribute By Index\n")) + /* Create dataspace for dataset & attributes */ sid = H5Screate(H5S_SCALAR); CHECK(sid, FAIL, "H5Screate"); @@ -6374,7 +6452,11 @@ test_attr_delete_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl) VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); /* Check for deleting non-existent attribute */ - ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Adelete_by_idx"); /* Create attributes, up to limit of compact form */ @@ -6408,7 +6490,11 @@ test_attr_delete_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl) VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); /* Check for out of bound deletions */ - ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Adelete_by_idx"); } /* end for */ @@ -6536,7 +6622,11 @@ test_attr_delete_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl) } /* end if */ /* Check for out of bound deletion */ - ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Adelete_by_idx"); } /* end for */ @@ -6600,7 +6690,11 @@ test_attr_delete_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl) VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test"); /* Check for deletion on empty attribute storage again */ - ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Adelete_by_idx"); } /* end for */ @@ -6767,7 +6861,11 @@ test_attr_delete_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl) VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test"); /* Check for deletion on empty attribute storage again */ - ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Adelete_by_idx"); } /* end for */ @@ -7162,15 +7260,29 @@ attr_iterate_check(hid_t fid, const char *dsetname, hid_t obj_id, H5_index_t idx /* Check for iteration routine indicating failure */ skip = 0; - ret = H5Aiterate2(obj_id, idx_type, order, &skip, attr_iterate2_fail_cb, NULL); + H5E_BEGIN_TRY + { + ret = H5Aiterate2(obj_id, idx_type, order, &skip, attr_iterate2_fail_cb, NULL); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Aiterate2"); skip = 0; - ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &skip, attr_iterate2_fail_cb, NULL, H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &skip, attr_iterate2_fail_cb, NULL, + H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Aiterate_by_name"); skip = 0; - ret = H5Aiterate_by_name(obj_id, ".", idx_type, order, &skip, attr_iterate2_fail_cb, NULL, H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret = + H5Aiterate_by_name(obj_id, ".", idx_type, order, &skip, attr_iterate2_fail_cb, NULL, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Aiterate_by_name"); /* Retrieve current # of errors */ @@ -7373,17 +7485,29 @@ test_attr_iterate2(hbool_t new_format, hid_t fcpl, hid_t fapl) /* Check for out of bound iteration */ idx = u; - ret = H5Aiterate2(my_dataset, idx_type, order, &idx, attr_iterate2_cb, NULL); + H5E_BEGIN_TRY + { + ret = H5Aiterate2(my_dataset, idx_type, order, &idx, attr_iterate2_cb, NULL); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Aiterate2"); idx = u; - ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &idx, attr_iterate2_cb, NULL, - H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &idx, attr_iterate2_cb, NULL, + H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Aiterate_by_name"); idx = u; - ret = H5Aiterate_by_name(my_dataset, ".", idx_type, order, &idx, attr_iterate2_cb, NULL, - H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret = H5Aiterate_by_name(my_dataset, ".", idx_type, order, &idx, attr_iterate2_cb, + NULL, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Aiterate_by_name"); /* Test iteration over attributes stored compactly */ @@ -7460,17 +7584,29 @@ test_attr_iterate2(hbool_t new_format, hid_t fcpl, hid_t fapl) /* Check for out of bound iteration */ idx = u; - ret = H5Aiterate2(my_dataset, idx_type, order, &idx, attr_iterate2_cb, NULL); + H5E_BEGIN_TRY + { + ret = H5Aiterate2(my_dataset, idx_type, order, &idx, attr_iterate2_cb, NULL); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Aiterate2"); idx = u; - ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &idx, attr_iterate2_cb, NULL, - H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &idx, attr_iterate2_cb, NULL, + H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Aiterate_by_name"); idx = u; - ret = H5Aiterate_by_name(my_dataset, ".", idx_type, order, &idx, attr_iterate2_cb, NULL, - H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret = H5Aiterate_by_name(my_dataset, ".", idx_type, order, &idx, attr_iterate2_cb, + NULL, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Aiterate_by_name"); /* Test iteration over attributes stored densely */ @@ -7701,8 +7837,12 @@ test_attr_open_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl) VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); /* Check for opening an attribute on an object with no attributes */ - ret_id = H5Aopen_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT, - H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret_id = H5Aopen_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT, + H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret_id, FAIL, "H5Aopen_by_idx"); /* Create attributes, up to limit of compact form */ @@ -7736,8 +7876,12 @@ test_attr_open_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl) VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); /* Check for out of bound opening an attribute on an object */ - ret_id = H5Aopen_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, H5P_DEFAULT, - H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret_id = H5Aopen_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, H5P_DEFAULT, + H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret_id, FAIL, "H5Aopen_by_idx"); /* Test opening attributes by index stored compactly */ @@ -7810,8 +7954,12 @@ test_attr_open_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl) } /* end if */ /* Check for out of bound opening an attribute on an object */ - ret_id = H5Aopen_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, H5P_DEFAULT, - H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret_id = H5Aopen_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, H5P_DEFAULT, + H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret_id, FAIL, "H5Aopen_by_idx"); /* Test opening attributes by index stored compactly */ @@ -8029,13 +8177,25 @@ test_attr_open_by_name(hbool_t new_format, hid_t fcpl, hid_t fapl) VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); /* Check for opening a non-existent attribute on an object with no attributes */ - ret_id = H5Aopen(my_dataset, "foo", H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret_id = H5Aopen(my_dataset, "foo", H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret_id, FAIL, "H5Aopen"); - ret_id = H5Aopen_by_name(my_dataset, ".", "foo", H5P_DEFAULT, H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret_id = H5Aopen_by_name(my_dataset, ".", "foo", H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret_id, FAIL, "H5Aopen_by_name"); - ret_id = H5Aopen_by_name(fid, dsetname, "foo", H5P_DEFAULT, H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret_id = H5Aopen_by_name(fid, dsetname, "foo", H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret_id, FAIL, "H5Aopen_by_name"); /* Create attributes, up to limit of compact form */ @@ -8068,13 +8228,25 @@ test_attr_open_by_name(hbool_t new_format, hid_t fcpl, hid_t fapl) VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); /* Check for opening a non-existent attribute on an object with compact attribute storage */ - ret_id = H5Aopen(my_dataset, "foo", H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret_id = H5Aopen(my_dataset, "foo", H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret_id, FAIL, "H5Aopen"); - ret_id = H5Aopen_by_name(my_dataset, ".", "foo", H5P_DEFAULT, H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret_id = H5Aopen_by_name(my_dataset, ".", "foo", H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret_id, FAIL, "H5Aopen_by_name"); - ret_id = H5Aopen_by_name(fid, dsetname, "foo", H5P_DEFAULT, H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret_id = H5Aopen_by_name(fid, dsetname, "foo", H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret_id, FAIL, "H5Aopen_by_name"); /* Test opening attributes stored compactly */ @@ -8149,13 +8321,25 @@ test_attr_open_by_name(hbool_t new_format, hid_t fcpl, hid_t fapl) } /* end if */ /* Check for opening a non-existent attribute on an object with dense attribute storage */ - ret_id = H5Aopen(my_dataset, "foo", H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret_id = H5Aopen(my_dataset, "foo", H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret_id, FAIL, "H5Aopen"); - ret_id = H5Aopen_by_name(my_dataset, ".", "foo", H5P_DEFAULT, H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret_id = H5Aopen_by_name(my_dataset, ".", "foo", H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret_id, FAIL, "H5Aopen_by_name"); - ret_id = H5Aopen_by_name(fid, dsetname, "foo", H5P_DEFAULT, H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret_id = H5Aopen_by_name(fid, dsetname, "foo", H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(ret_id, FAIL, "H5Aopen_by_name"); /* Test opening attributes stored compactly */ @@ -10898,8 +11082,8 @@ test_attr_bug8(hid_t fcpl, hid_t fapl) hsize_t dims = 256; /* Attribute dimensions */ H5O_info2_t oinfo; /* Object info */ H5A_info_t ainfo; /* Attribute info */ - haddr_t root_addr; /* Root group address */ - haddr_t link_addr; /* Link (to root group) address */ + H5O_token_t root_token; /* Root group token */ + int cmp_value; /* Result from H5Otoken_cmp */ herr_t ret; /* Generic return status */ /* Output message about test being performed */ @@ -10913,11 +11097,10 @@ test_attr_bug8(hid_t fcpl, hid_t fapl) gid = H5Gcreate2(fid, GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); CHECK(gid, FAIL, "H5Gcreate2"); - /* Get root group address */ + /* Get root group token */ ret = H5Oget_info3(fid, &oinfo, H5O_INFO_BASIC); CHECK(ret, FAIL, "H5Oget_info"); - ret = H5VLnative_token_to_addr(fid, oinfo.token, &root_addr); - CHECK(ret, FAIL, "H5VLnative_token_to_addr"); + root_token = oinfo.token; /* * Create link to root group @@ -10942,11 +11125,9 @@ test_attr_bug8(hid_t fcpl, hid_t fapl) CHECK(oid, FAIL, "H5Oopen"); ret = H5Oget_info3(oid, &oinfo, H5O_INFO_BASIC); CHECK(ret, FAIL, "H5Oget_info"); - ret = H5VLnative_token_to_addr(fid, oinfo.token, &link_addr); - CHECK(ret, FAIL, "H5VLnative_token_to_addr"); - if (link_addr != root_addr) - TestErrPrintf("incorrect link target address: addr: %llu, expected: %llu\n", - (long long unsigned)link_addr, (long long unsigned)root_addr); + ret = H5Otoken_cmp(oid, &oinfo.token, &root_token, &cmp_value); + CHECK(ret, FAIL, "H5Otoken_cmp"); + VERIFY(cmp_value, 0, "H5Otoken_cmp"); /* Close file */ ret = H5Fclose(fid); @@ -10990,11 +11171,9 @@ test_attr_bug8(hid_t fcpl, hid_t fapl) CHECK(oid, FAIL, "H5Oopen"); ret = H5Oget_info3(oid, &oinfo, H5O_INFO_BASIC); CHECK(ret, FAIL, "H5Oget_info"); - ret = H5VLnative_token_to_addr(fid, oinfo.token, &link_addr); - CHECK(ret, FAIL, "H5VLnative_token_to_addr"); - if (link_addr != root_addr) - TestErrPrintf("incorrect link target address: addr: %llu, expected: %llu\n", - (long long unsigned)link_addr, (long long unsigned)root_addr); + ret = H5Otoken_cmp(oid, &oinfo.token, &root_token, &cmp_value); + CHECK(ret, FAIL, "H5Otoken_cmp"); + VERIFY(cmp_value, 0, "H5Otoken_cmp"); ret = H5Aget_info_by_name(gid, ".", ATTR1_NAME, &ainfo, H5P_DEFAULT); CHECK(ret, FAIL, "H5Aget_info_by_name"); if (ainfo.data_size != dims) @@ -11511,5 +11690,9 @@ test_attr(void) void cleanup_attr(void) { - HDremove(FILENAME); + H5E_BEGIN_TRY + { + H5Fdelete(FILENAME, H5P_DEFAULT); + } + H5E_END_TRY; } diff --git a/test/tcoords.c b/test/tcoords.c index cf84aba98e7..f2bad20811e 100644 --- a/test/tcoords.c +++ b/test/tcoords.c @@ -720,5 +720,9 @@ test_coords(void) void cleanup_coords(void) { - HDremove(FILENAME); + H5E_BEGIN_TRY + { + H5Fdelete(FILENAME, H5P_DEFAULT); + } + H5E_END_TRY; } diff --git a/test/tfile.c b/test/tfile.c index dd72da64d07..03ade923d4c 100644 --- a/test/tfile.c +++ b/test/tfile.c @@ -213,11 +213,13 @@ static void test_rw_noupdate(void); static void test_file_create(void) { - hid_t fid1, fid2, fid3; /* HDF5 File IDs */ - hid_t tmpl1, tmpl2; /* file creation templates */ - hsize_t ublock; /* sizeof userblock */ - size_t parm; /* file-creation parameters */ - size_t parm2; /* file-creation parameters */ + hid_t fid1 = H5I_INVALID_HID; + hid_t fid2 = H5I_INVALID_HID; + hid_t fid3 = H5I_INVALID_HID; /* HDF5 File IDs */ + hid_t tmpl1, tmpl2; /* file creation templates */ + hsize_t ublock; /* sizeof userblock */ + size_t parm; /* file-creation parameters */ + size_t parm2; /* file-creation parameters */ unsigned iparm; unsigned iparm2; herr_t ret; /*generic return value */ @@ -226,10 +228,18 @@ test_file_create(void) MESSAGE(5, ("Testing Low-Level File Creation I/O\n")); /* First ensure the file does not exist */ - HDremove(FILE1); + H5E_BEGIN_TRY + { + H5Fdelete(FILE1, H5P_DEFAULT); + } + H5E_END_TRY; /* Try opening a non-existent file */ - fid1 = H5Fopen(FILE1, H5F_ACC_RDWR, H5P_DEFAULT); + H5E_BEGIN_TRY + { + fid1 = H5Fopen(FILE1, H5F_ACC_RDWR, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(fid1, FAIL, "H5Fopen"); /* Test create with various sequences of H5F_ACC_EXCL and */ @@ -243,21 +253,33 @@ test_file_create(void) * try to create the same file with H5F_ACC_TRUNC. This should fail * because fid1 is the same file and is currently open. */ - fid2 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + H5E_BEGIN_TRY + { + fid2 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(fid2, FAIL, "H5Fcreate"); /* Close all files */ ret = H5Fclose(fid1); CHECK(ret, FAIL, "H5Fclose"); - ret = H5Fclose(fid2); + H5E_BEGIN_TRY + { + ret = H5Fclose(fid2); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Fclose"); /*file should not have been open */ /* * Try again with H5F_ACC_EXCL. This should fail because the file already * exists from the previous steps. */ - fid1 = H5Fcreate(FILE1, H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT); + H5E_BEGIN_TRY + { + fid1 = H5Fcreate(FILE1, H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(fid1, FAIL, "H5Fcreate"); /* Test create with H5F_ACC_TRUNC. This will truncate the existing file. */ @@ -268,14 +290,22 @@ test_file_create(void) * Try to truncate first file again. This should fail because fid1 is the * same file and is currently open. */ - fid2 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + H5E_BEGIN_TRY + { + fid2 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(fid2, FAIL, "H5Fcreate"); /* * Try with H5F_ACC_EXCL. This should fail too because the file already * exists. */ - fid2 = H5Fcreate(FILE1, H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT); + H5E_BEGIN_TRY + { + fid2 = H5Fcreate(FILE1, H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(fid2, FAIL, "H5Fcreate"); /* Get the file-creation template */ @@ -598,11 +628,19 @@ test_file_open(const char *env_h5_drvr) CHECK(ret, FAIL, "H5Fclose"); /* Open file for second time, which should fail. */ - fid2 = H5Fopen(FILE2, H5F_ACC_RDWR, fapl_id); + H5E_BEGIN_TRY + { + fid2 = H5Fopen(FILE2, H5F_ACC_RDWR, fapl_id); + } + H5E_END_TRY; VERIFY(fid2, FAIL, "H5Fopen"); /* Check that the intent fails for an invalid ID */ - ret = H5Fget_intent(fid1, &intent); + H5E_BEGIN_TRY + { + ret = H5Fget_intent(fid1, &intent); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Fget_intent"); /* Close dataset from first open */ @@ -662,7 +700,12 @@ test_file_reopen(void) CHECK(ret, FAIL, "H5Fclose"); ret = H5Fclose(rfid); CHECK(ret, FAIL, "H5Fclose"); - HDremove(REOPEN_FILE); + + H5E_BEGIN_TRY + { + H5Fdelete(REOPEN_FILE, H5P_DEFAULT); + } + H5E_END_TRY; } /* test_file_reopen() */ @@ -681,6 +724,9 @@ test_file_close(void) H5F_close_degree_t fc_degree; herr_t ret; + /* Output message about test being performed */ + MESSAGE(5, ("Testing File Closing with file close degrees\n")); + /* Test behavior while opening file multiple times with different * file close degree value */ @@ -697,7 +743,11 @@ test_file_close(void) VERIFY(fc_degree, H5F_CLOSE_STRONG, "H5Pget_fclose_degree"); /* should fail */ - fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); + H5E_BEGIN_TRY + { + fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); + } + H5E_END_TRY; VERIFY(fid2, FAIL, "H5Fopen"); ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_DEFAULT); @@ -751,7 +801,11 @@ test_file_close(void) CHECK(ret, FAIL, "H5Pset_fclose_degree"); /* should fail */ - fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); + H5E_BEGIN_TRY + { + fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); + } + H5E_END_TRY; VERIFY(fid2, FAIL, "H5Fopen"); ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_STRONG); @@ -784,7 +838,11 @@ test_file_close(void) CHECK(ret, FAIL, "H5Pset_fclose_degree"); /* should fail */ - fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); + H5E_BEGIN_TRY + { + fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); + } + H5E_END_TRY; VERIFY(fid2, FAIL, "H5Fopen"); ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_SEMI); @@ -799,12 +857,20 @@ test_file_close(void) /* Close first open, should fail since it is SEMI and objects are * still open. */ - ret = H5Fclose(fid1); + H5E_BEGIN_TRY + { + ret = H5Fclose(fid1); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Fclose"); /* Close second open, should fail since it is SEMI and objects are * still open. */ - ret = H5Fclose(fid2); + H5E_BEGIN_TRY + { + ret = H5Fclose(fid2); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Fclose"); ret = H5Dclose(dataset_id); @@ -822,11 +888,19 @@ test_file_close(void) /* Close second open, should fail since it is SEMI and one group ID is * still open. */ - ret = H5Fclose(fid2); + H5E_BEGIN_TRY + { + ret = H5Fclose(fid2); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Fclose"); /* Same check with H5Idec_ref() (should fail also) */ - ret = H5Idec_ref(fid2); + H5E_BEGIN_TRY + { + ret = H5Idec_ref(fid2); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Idec_ref"); ret = H5Gclose(group_id3); @@ -848,7 +922,11 @@ test_file_close(void) CHECK(ret, FAIL, "H5Pset_fclose_degree"); /* should fail */ - fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); + H5E_BEGIN_TRY + { + fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); + } + H5E_END_TRY; VERIFY(fid2, FAIL, "H5Fopen"); ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_DEFAULT); @@ -897,7 +975,11 @@ test_file_close(void) CHECK(ret, FAIL, "H5Pset_fclose_degree"); /* should fail */ - fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); + H5E_BEGIN_TRY + { + fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); + } + H5E_END_TRY; VERIFY(fid2, FAIL, "H5Fopen"); ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_DEFAULT); @@ -1101,6 +1183,8 @@ test_get_obj_ids(void) ssize_t oid_list_size = NDSETS; char gname[64], dname[64]; + MESSAGE(5, ("Testing retrieval of object IDs\n")); + /* Create a new file */ fid = H5Fcreate(FILE7, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(fid, FAIL, "H5Fcreate"); @@ -1196,11 +1280,15 @@ test_get_obj_ids(void) /* Get the list of all opened objects */ ret_count = H5Fget_obj_ids((hid_t)H5F_OBJ_ALL, H5F_OBJ_ALL, (size_t)oid_count, oid_list); CHECK(ret_count, FAIL, "H5Fget_obj_ids"); - VERIFY(ret_count, NDSETS, "H5Fget_obj_count"); + VERIFY(ret_count, NDSETS, "H5Fget_obj_ids"); - /* Close all open objects with H5Oclose */ - for (n = 0; n < oid_count; n++) - H5Oclose(oid_list[n]); + H5E_BEGIN_TRY + { + /* Close all open objects with H5Oclose */ + for (n = 0; n < oid_count; n++) + H5Oclose(oid_list[n]); + } + H5E_END_TRY; HDfree(oid_list); } @@ -1220,6 +1308,8 @@ test_get_file_id(void) unsigned intent; herr_t ret; + MESSAGE(5, ("Testing H5Iget_file_id\n")); + /* Create a file */ fid = H5Fcreate(FILE4, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(fid, FAIL, "H5Fcreate"); @@ -2870,7 +2960,7 @@ test_file_double_datatype_open(void) herr_t ret; /* Generic return value */ /* Output message about test being performed */ - MESSAGE(5, ("Testing double dataset open\n")); + MESSAGE(5, ("Testing double datatype open\n")); file1_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(file1_id, FAIL, "H5Fcreate"); @@ -5587,7 +5677,11 @@ test_libver_bounds_copy(void) CHECK(ret, FAIL, "H5Fclose"); /* Remove the destination file */ - HDremove(DST_FILE); + H5E_BEGIN_TRY + { + H5Fdelete(DST_FILE, H5P_DEFAULT); + } + H5E_END_TRY; } /* end test_libver_bounds_copy() */ diff --git a/test/tgenprop.c b/test/tgenprop.c index 9c4495bea16..46bb0ac220d 100644 --- a/test/tgenprop.c +++ b/test/tgenprop.c @@ -194,8 +194,12 @@ test_genprop_basic_class_prop(void) CHECK_I(ret, "H5Pregister2"); /* Try to insert the first property again (should fail) */ - ret = - H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + H5E_BEGIN_TRY + { + ret = H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, + NULL); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Pregister2"); /* Check the existence of the first property */ @@ -218,8 +222,12 @@ test_genprop_basic_class_prop(void) CHECK_I(ret, "H5Pregister2"); /* Try to insert the second property again (should fail) */ - ret = - H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + H5E_BEGIN_TRY + { + ret = H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, + NULL); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Pregister2"); /* Check the existence of the second property */ @@ -260,7 +268,11 @@ test_genprop_basic_class_prop(void) CHECK_I(ret, "H5Punregister"); /* Try to check the size of the first property (should fail) */ - ret = H5Pget_size(cid1, PROP1_NAME, &size); + H5E_BEGIN_TRY + { + ret = H5Pget_size(cid1, PROP1_NAME, &size); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Pget_size"); /* Check the number of properties in class */ @@ -1950,7 +1962,11 @@ test_genprop_deprec_class(void) CHECK_I(ret, "H5Pregister1"); /* Try to insert the first property again (should fail) */ - ret = H5Pregister1(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); + H5E_BEGIN_TRY + { + ret = H5Pregister1(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Pregister1"); /* Check the existence of the first property */ @@ -1972,7 +1988,11 @@ test_genprop_deprec_class(void) CHECK_I(ret, "H5Pregister1"); /* Try to insert the second property again (should fail) */ - ret = H5Pregister1(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); + H5E_BEGIN_TRY + { + ret = H5Pregister1(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Pregister1"); /* Check the existence of the second property */ @@ -2012,7 +2032,11 @@ test_genprop_deprec_class(void) CHECK_I(ret, "H5Punregister"); /* Try to check the size of the first property (should fail) */ - ret = H5Pget_size(cid1, PROP1_NAME, &size); + H5E_BEGIN_TRY + { + ret = H5Pget_size(cid1, PROP1_NAME, &size); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Pget_size"); /* Check the number of properties in class */ @@ -2171,5 +2195,9 @@ test_genprop(void) void cleanup_genprop(void) { - HDremove(FILENAME); + H5E_BEGIN_TRY + { + H5Fdelete(FILENAME, H5P_DEFAULT); + } + H5E_END_TRY; } diff --git a/test/th5o.c b/test/th5o.c index 3e7c80a7eab..027445ec695 100644 --- a/test/th5o.c +++ b/test/th5o.c @@ -49,6 +49,9 @@ test_h5o_open(void) H5T_class_t type_class; /* Class of the datatype */ herr_t ret; /* Value returned from API calls */ + /* Output message about test being performed */ + MESSAGE(5, ("Testing H5Oopen\n")); + h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); /* Create a new HDF5 file */ @@ -161,6 +164,9 @@ test_h5o_close(void) hsize_t dims[RANK]; herr_t ret; /* Value returned from API calls */ + /* Output message about test being performed */ + MESSAGE(5, ("Testing H5Oclose\n")); + h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); /* Create a new HDF5 file */ @@ -413,6 +419,9 @@ test_h5o_open_by_token(void) H5T_class_t type_class; /* Class of the datatype */ herr_t ret; /* Value returned from API calls */ + /* Output message about test being performed */ + MESSAGE(5, ("Testing H5Oopen_by_token\n")); + h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); /* Create a new HDF5 file */ @@ -531,6 +540,9 @@ test_h5o_refcount(void) hsize_t dims[RANK]; herr_t ret; /* Value returned from API calls */ + /* Output message about test being performed */ + MESSAGE(5, ("Testing retrieval of object reference count with H5Oget_info\n")); + h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); /* Create a new HDF5 file */ @@ -732,6 +744,9 @@ test_h5o_plist(void) unsigned max_compact, min_dense; /* Actual phase change parameters */ herr_t ret; /* Value returned from API calls */ + /* Output message about test being performed */ + MESSAGE(5, ("Testing Object creation properties\n")); + /* Make a FAPL that uses the "use the latest version of the format" flag */ fapl = H5Pcreate(H5P_FILE_ACCESS); CHECK(fapl, FAIL, "H5Pcreate"); @@ -935,6 +950,9 @@ test_h5o_link(void) int i, n; herr_t ret; /* Value returned from API calls */ + /* Output message about test being performed */ + MESSAGE(5, ("Testing H5Olink\n")); + h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); /* Allocate memory buffers */ @@ -1422,6 +1440,9 @@ test_h5o_getinfo_same_file(void) H5O_info2_t oinfo1, oinfo2; /* Object info structs */ herr_t ret; /* Value returned from API calls */ + /* Output message about test being performed */ + MESSAGE(5, ("Testing H5Oget_info on objects in same file\n")); + h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); /* Create a new HDF5 file */ @@ -1720,6 +1741,9 @@ test_h5o_getinfo_visit(void) int j; /* Local index variable */ herr_t ret; /* Value returned from API calls */ + /* Output message about test being performed */ + MESSAGE(5, ("Testing info returned by H5Oget_info vs H5Ovisit\n")); + h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); /* Create an HDF5 file */ diff --git a/test/th5s.c b/test/th5s.c index 77116a63ad3..25ab2906a08 100644 --- a/test/th5s.c +++ b/test/th5s.c @@ -214,14 +214,22 @@ test_h5s_basic(void) /* Verify that incorrect dimensions don't work */ dims1[0] = H5S_UNLIMITED; - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + H5E_BEGIN_TRY + { + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + } + H5E_END_TRY; VERIFY(sid1, FAIL, "H5Screate_simple"); dims1[0] = H5S_UNLIMITED; sid1 = H5Screate(H5S_SIMPLE); CHECK(sid1, FAIL, "H5Screate"); - ret = H5Sset_extent_simple(sid1, SPACE1_RANK, dims1, NULL); + H5E_BEGIN_TRY + { + ret = H5Sset_extent_simple(sid1, SPACE1_RANK, dims1, NULL); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Sset_extent_simple"); ret = H5Sclose(sid1); @@ -1626,7 +1634,7 @@ test_h5s_check_encoding(hid_t in_fapl, hid_t in_sid, uint32_t expected_version, /* Allocate the buffer for encoding */ buf = (char *)HDmalloc(buf_size); - CHECK_PTR(buf, "H5Dmalloc"); + CHECK_PTR(buf, "HDmalloc"); /* Encode according to the setting in in_fapl */ ret = H5Sencode2(in_sid, buf, &buf_size, in_fapl); @@ -3506,9 +3514,13 @@ test_h5s(void) void cleanup_h5s(void) { - HDremove(DATAFILE); - HDremove(NULLFILE); - HDremove(BASICFILE); - HDremove(ZEROFILE); - HDremove(VERBFNAME); + H5E_BEGIN_TRY + { + H5Fdelete(DATAFILE, H5P_DEFAULT); + H5Fdelete(NULLFILE, H5P_DEFAULT); + H5Fdelete(BASICFILE, H5P_DEFAULT); + H5Fdelete(ZEROFILE, H5P_DEFAULT); + H5Fdelete(VERBFNAME, H5P_DEFAULT); + } + H5E_END_TRY; } diff --git a/test/titerate.c b/test/titerate.c index 48e0aa9e979..defed212b7d 100644 --- a/test/titerate.c +++ b/test/titerate.c @@ -491,7 +491,7 @@ test_iter_attr(hid_t fapl, hbool_t new_format) if ((ret = H5Aiterate2(dataset, H5_INDEX_NAME, H5_ITER_INC, &idx, aiter_cb, &info)) > 0) TestErrPrintf("Attribute iteration function didn't return zero correctly!\n"); - /* Test all attributes on dataset, when callback always returns 1 */ + /* Test all attributes on dataset, when callback always returns 2 */ /* This also tests the "restarting" ability, because the index changes */ info.command = RET_TWO; i = 0; @@ -1210,5 +1210,9 @@ test_iterate(void) void cleanup_iterate(void) { - HDremove(DATAFILE); + H5E_BEGIN_TRY + { + H5Fdelete(DATAFILE, H5P_DEFAULT); + } + H5E_END_TRY; } diff --git a/test/tmisc.c b/test/tmisc.c index 92b441e6558..0b04b2c5734 100644 --- a/test/tmisc.c +++ b/test/tmisc.c @@ -666,7 +666,7 @@ test_misc4(void) herr_t ret; /* Output message about test being performed */ - MESSAGE(5, ("Testing fileno working in H5O_info_t\n")); + MESSAGE(5, ("Testing fileno working in H5O_info2_t\n")); file1 = H5Fcreate(MISC4_FILE_1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(file1, FAIL, "H5Fcreate"); @@ -1412,7 +1412,11 @@ test_misc8(void) /* Create a contiguous dataset, with space allocation late */ /* Should fail */ - did = H5Dcreate2(fid, MISC8_DSETNAME4, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + H5E_BEGIN_TRY + { + did = H5Dcreate2(fid, MISC8_DSETNAME4, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(did, FAIL, "H5Dcreate2"); /* Set the space allocation time to incremental */ @@ -1421,7 +1425,11 @@ test_misc8(void) /* Create a contiguous dataset, with space allocation incremental */ /* Should fail */ - did = H5Dcreate2(fid, MISC8_DSETNAME4, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + H5E_BEGIN_TRY + { + did = H5Dcreate2(fid, MISC8_DSETNAME4, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(did, FAIL, "H5Dcreate2"); /* Set the space allocation time to early */ @@ -1762,7 +1770,11 @@ test_misc9(void) ret = H5Pset_fapl_core(fapl, (size_t)1024, 0); CHECK(ret, FAIL, "H5Pset_fapl_core"); - fid = H5Fopen(MISC9_FILE, H5F_ACC_RDWR, fapl); + H5E_BEGIN_TRY + { + fid = H5Fopen(MISC9_FILE, H5F_ACC_RDWR, fapl); + } + H5E_END_TRY; VERIFY(fid, FAIL, "H5Fopen"); ret = H5Pclose(fapl); @@ -6175,42 +6187,46 @@ test_misc(void) void cleanup_misc(void) { - HDremove(MISC1_FILE); - HDremove(MISC2_FILE_1); - HDremove(MISC2_FILE_2); - HDremove(MISC3_FILE); - HDremove(MISC4_FILE_1); - HDremove(MISC4_FILE_2); - HDremove(MISC5_FILE); - HDremove(MISC6_FILE); - HDremove(MISC7_FILE); - HDremove(MISC8_FILE); - HDremove(MISC9_FILE); - HDremove(MISC10_FILE_NEW); - HDremove(MISC11_FILE); - HDremove(MISC12_FILE); - HDremove(MISC13_FILE_1); - HDremove(MISC13_FILE_2); - HDremove(MISC14_FILE); - HDremove(MISC15_FILE); - HDremove(MISC16_FILE); - HDremove(MISC17_FILE); - HDremove(MISC18_FILE); - HDremove(MISC19_FILE); - HDremove(MISC20_FILE); + H5E_BEGIN_TRY + { + H5Fdelete(MISC1_FILE, H5P_DEFAULT); + H5Fdelete(MISC2_FILE_1, H5P_DEFAULT); + H5Fdelete(MISC2_FILE_2, H5P_DEFAULT); + H5Fdelete(MISC3_FILE, H5P_DEFAULT); + H5Fdelete(MISC4_FILE_1, H5P_DEFAULT); + H5Fdelete(MISC4_FILE_2, H5P_DEFAULT); + H5Fdelete(MISC5_FILE, H5P_DEFAULT); + H5Fdelete(MISC6_FILE, H5P_DEFAULT); + H5Fdelete(MISC7_FILE, H5P_DEFAULT); + H5Fdelete(MISC8_FILE, H5P_DEFAULT); + H5Fdelete(MISC9_FILE, H5P_DEFAULT); + H5Fdelete(MISC10_FILE_NEW, H5P_DEFAULT); + H5Fdelete(MISC11_FILE, H5P_DEFAULT); + H5Fdelete(MISC12_FILE, H5P_DEFAULT); + H5Fdelete(MISC13_FILE_1, H5P_DEFAULT); + H5Fdelete(MISC13_FILE_2, H5P_DEFAULT); + H5Fdelete(MISC14_FILE, H5P_DEFAULT); + H5Fdelete(MISC15_FILE, H5P_DEFAULT); + H5Fdelete(MISC16_FILE, H5P_DEFAULT); + H5Fdelete(MISC17_FILE, H5P_DEFAULT); + H5Fdelete(MISC18_FILE, H5P_DEFAULT); + H5Fdelete(MISC19_FILE, H5P_DEFAULT); + H5Fdelete(MISC20_FILE, H5P_DEFAULT); #ifdef H5_HAVE_FILTER_SZIP - HDremove(MISC21_FILE); - HDremove(MISC22_FILE); + H5Fdelete(MISC21_FILE, H5P_DEFAULT); + H5Fdelete(MISC22_FILE, H5P_DEFAULT); #endif /* H5_HAVE_FILTER_SZIP */ - HDremove(MISC23_FILE); - HDremove(MISC24_FILE); - HDremove(MISC25A_FILE); - HDremove(MISC25C_FILE); - HDremove(MISC26_FILE); - HDremove(MISC28_FILE); - HDremove(MISC29_COPY_FILE); - HDremove(MISC30_FILE); + H5Fdelete(MISC23_FILE, H5P_DEFAULT); + H5Fdelete(MISC24_FILE, H5P_DEFAULT); + H5Fdelete(MISC25A_FILE, H5P_DEFAULT); + H5Fdelete(MISC25C_FILE, H5P_DEFAULT); + H5Fdelete(MISC26_FILE, H5P_DEFAULT); + H5Fdelete(MISC28_FILE, H5P_DEFAULT); + H5Fdelete(MISC29_COPY_FILE, H5P_DEFAULT); + H5Fdelete(MISC30_FILE, H5P_DEFAULT); #ifndef H5_NO_DEPRECATED_SYMBOLS - HDremove(MISC31_FILE); + H5Fdelete(MISC31_FILE, H5P_DEFAULT); #endif /* H5_NO_DEPRECATED_SYMBOLS */ + } + H5E_END_TRY; } /* end cleanup_misc() */ diff --git a/test/trefer.c b/test/trefer.c index cc0ee2c158f..26a883b1226 100644 --- a/test/trefer.c +++ b/test/trefer.c @@ -205,83 +205,187 @@ test_reference_params(void) CHECK(ret, H5I_INVALID_HID, "H5Dcreate2"); /* Test parameters to H5Rcreate_object */ - ret = H5Rcreate_object(fid1, "/Group1/Dataset1", H5P_DEFAULT, NULL); + H5E_BEGIN_TRY + { + ret = H5Rcreate_object(fid1, "/Group1/Dataset1", H5P_DEFAULT, NULL); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Rcreate_object ref"); - ret = H5Rcreate_object(H5I_INVALID_HID, "/Group1/Dataset1", H5P_DEFAULT, &wbuf[0]); + H5E_BEGIN_TRY + { + ret = H5Rcreate_object(H5I_INVALID_HID, "/Group1/Dataset1", H5P_DEFAULT, &wbuf[0]); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Rcreate_object loc_id"); - ret = H5Rcreate_object(fid1, NULL, H5P_DEFAULT, &wbuf[0]); + H5E_BEGIN_TRY + { + ret = H5Rcreate_object(fid1, NULL, H5P_DEFAULT, &wbuf[0]); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Rcreate_object name"); - ret = H5Rcreate_object(fid1, "", H5P_DEFAULT, &wbuf[0]); + H5E_BEGIN_TRY + { + ret = H5Rcreate_object(fid1, "", H5P_DEFAULT, &wbuf[0]); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Rcreate_object null name"); /* Test parameters to H5Rcreate_region */ - ret = H5Rcreate_region(fid1, "/Group1/Dataset1", sid1, H5P_DEFAULT, NULL); + H5E_BEGIN_TRY + { + ret = H5Rcreate_region(fid1, "/Group1/Dataset1", sid1, H5P_DEFAULT, NULL); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Rcreate_region ref"); - ret = H5Rcreate_region(H5I_INVALID_HID, "/Group1/Dataset1", sid1, H5P_DEFAULT, &wbuf[0]); + H5E_BEGIN_TRY + { + ret = H5Rcreate_region(H5I_INVALID_HID, "/Group1/Dataset1", sid1, H5P_DEFAULT, &wbuf[0]); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Rcreate_region loc_id"); - ret = H5Rcreate_region(fid1, NULL, sid1, H5P_DEFAULT, &wbuf[0]); + H5E_BEGIN_TRY + { + ret = H5Rcreate_region(fid1, NULL, sid1, H5P_DEFAULT, &wbuf[0]); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Rcreate_region name"); - ret = H5Rcreate_region(fid1, "/Group1/Dataset1", H5I_INVALID_HID, H5P_DEFAULT, &wbuf[0]); + H5E_BEGIN_TRY + { + ret = H5Rcreate_region(fid1, "/Group1/Dataset1", H5I_INVALID_HID, H5P_DEFAULT, &wbuf[0]); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Rcreate_region dataspace"); /* Test parameters to H5Rcreate_attr */ - ret = H5Rcreate_attr(fid1, "/Group1/Dataset2", "Attr", H5P_DEFAULT, NULL); + H5E_BEGIN_TRY + { + ret = H5Rcreate_attr(fid1, "/Group1/Dataset2", "Attr", H5P_DEFAULT, NULL); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Rcreate_attr ref"); - ret = H5Rcreate_attr(H5I_INVALID_HID, "/Group1/Dataset2", "Attr", H5P_DEFAULT, &wbuf[0]); + H5E_BEGIN_TRY + { + ret = H5Rcreate_attr(H5I_INVALID_HID, "/Group1/Dataset2", "Attr", H5P_DEFAULT, &wbuf[0]); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Rcreate_attr loc_id"); - ret = H5Rcreate_attr(fid1, NULL, "Attr", H5P_DEFAULT, &wbuf[0]); + H5E_BEGIN_TRY + { + ret = H5Rcreate_attr(fid1, NULL, "Attr", H5P_DEFAULT, &wbuf[0]); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Rcreate_attr name"); - ret = H5Rcreate_attr(fid1, "/Group1/Dataset2", NULL, H5P_DEFAULT, &wbuf[0]); + H5E_BEGIN_TRY + { + ret = H5Rcreate_attr(fid1, "/Group1/Dataset2", NULL, H5P_DEFAULT, &wbuf[0]); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Rcreate_attr attr_name"); /* Test parameters to H5Rdestroy */ - ret = H5Rdestroy(NULL); + H5E_BEGIN_TRY + { + ret = H5Rdestroy(NULL); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Rdestroy"); /* Test parameters to H5Rget_type */ - type = H5Rget_type(NULL); + H5E_BEGIN_TRY + { + type = H5Rget_type(NULL); + } + H5E_END_TRY; VERIFY(type, H5R_BADTYPE, "H5Rget_type ref"); /* Test parameters to H5Requal */ - ret = H5Requal(NULL, &rbuf[0]); + H5E_BEGIN_TRY + { + ret = H5Requal(NULL, &rbuf[0]); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Requal ref1"); - ret = H5Requal(&rbuf[0], NULL); + H5E_BEGIN_TRY + { + ret = H5Requal(&rbuf[0], NULL); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Requal ref2"); /* Test parameters to H5Rcopy */ - ret = H5Rcopy(NULL, &wbuf[0]); + H5E_BEGIN_TRY + { + ret = H5Rcopy(NULL, &wbuf[0]); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Rcopy src_ref"); - ret = H5Rcopy(&rbuf[0], NULL); + H5E_BEGIN_TRY + { + ret = H5Rcopy(&rbuf[0], NULL); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Rcopy dest_ref"); /* Test parameters to H5Ropen_object */ - dset2 = H5Ropen_object(&rbuf[0], H5I_INVALID_HID, H5I_INVALID_HID); + H5E_BEGIN_TRY + { + dset2 = H5Ropen_object(&rbuf[0], H5I_INVALID_HID, H5I_INVALID_HID); + } + H5E_END_TRY; VERIFY(dset2, H5I_INVALID_HID, "H5Ropen_object oapl_id"); - dset2 = H5Ropen_object(NULL, H5P_DEFAULT, dapl_id); + H5E_BEGIN_TRY + { + dset2 = H5Ropen_object(NULL, H5P_DEFAULT, dapl_id); + } + H5E_END_TRY; VERIFY(dset2, H5I_INVALID_HID, "H5Ropen_object ref"); /* Test parameters to H5Ropen_region */ - ret_id = H5Ropen_region(NULL, H5I_INVALID_HID, H5I_INVALID_HID); + H5E_BEGIN_TRY + { + ret_id = H5Ropen_region(NULL, H5I_INVALID_HID, H5I_INVALID_HID); + } + H5E_END_TRY; VERIFY(ret_id, H5I_INVALID_HID, "H5Ropen_region ref"); /* Test parameters to H5Ropen_attr */ - ret_id = H5Ropen_attr(NULL, H5P_DEFAULT, aapl_id); + H5E_BEGIN_TRY + { + ret_id = H5Ropen_attr(NULL, H5P_DEFAULT, aapl_id); + } + H5E_END_TRY; VERIFY(ret_id, H5I_INVALID_HID, "H5Ropen_attr ref"); /* Test parameters to H5Rget_obj_type3 */ - ret = H5Rget_obj_type3(NULL, H5P_DEFAULT, NULL); + H5E_BEGIN_TRY + { + ret = H5Rget_obj_type3(NULL, H5P_DEFAULT, NULL); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Rget_obj_type3 ref"); /* Test parameters to H5Rget_file_name */ - name_size = H5Rget_file_name(NULL, NULL, 0); + H5E_BEGIN_TRY + { + name_size = H5Rget_file_name(NULL, NULL, 0); + } + H5E_END_TRY; VERIFY(name_size, (-1), "H5Rget_file_name ref"); /* Test parameters to H5Rget_obj_name */ - name_size = H5Rget_obj_name(NULL, H5P_DEFAULT, NULL, 0); + H5E_BEGIN_TRY + { + name_size = H5Rget_obj_name(NULL, H5P_DEFAULT, NULL, 0); + } + H5E_END_TRY; VERIFY(name_size, (-1), "H5Rget_obj_name ref"); /* Test parameters to H5Rget_attr_name */ - name_size = H5Rget_attr_name(NULL, NULL, 0); + H5E_BEGIN_TRY + { + name_size = H5Rget_attr_name(NULL, NULL, 0); + } + H5E_END_TRY; VERIFY(name_size, (-1), "H5Rget_attr_name ref"); /* Close disk dataspace */ @@ -1920,6 +2024,8 @@ test_reference_obj_deleted(void) H5O_type_t obj_type; /* Object type */ herr_t ret; /* Generic return value */ + MESSAGE(5, ("Testing References to Deleted Objects\n")); + /* Create file */ fid1 = H5Fcreate(FILE_REF_OBJ_DEL, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(fid1, H5I_INVALID_HID, "H5Fcreate"); @@ -3512,16 +3618,20 @@ test_reference(void) void cleanup_reference(void) { - HDremove(FILE_REF_PARAM); - HDremove(FILE_REF_OBJ); - HDremove(FILE_REF_VL_OBJ); - HDremove(FILE_REF_CMPND_OBJ); - HDremove(FILE_REF_REG); - HDremove(FILE_REF_REG_1D); - HDremove(FILE_REF_OBJ_DEL); - HDremove(FILE_REF_GRP); - HDremove(FILE_REF_ATTR); - HDremove(FILE_REF_EXT1); - HDremove(FILE_REF_EXT2); - HDremove(FILE_REF_COMPAT); + H5E_BEGIN_TRY + { + H5Fdelete(FILE_REF_PARAM, H5P_DEFAULT); + H5Fdelete(FILE_REF_OBJ, H5P_DEFAULT); + H5Fdelete(FILE_REF_VL_OBJ, H5P_DEFAULT); + H5Fdelete(FILE_REF_CMPND_OBJ, H5P_DEFAULT); + H5Fdelete(FILE_REF_REG, H5P_DEFAULT); + H5Fdelete(FILE_REF_REG_1D, H5P_DEFAULT); + H5Fdelete(FILE_REF_OBJ_DEL, H5P_DEFAULT); + H5Fdelete(FILE_REF_GRP, H5P_DEFAULT); + H5Fdelete(FILE_REF_ATTR, H5P_DEFAULT); + H5Fdelete(FILE_REF_EXT1, H5P_DEFAULT); + H5Fdelete(FILE_REF_EXT2, H5P_DEFAULT); + H5Fdelete(FILE_REF_COMPAT, H5P_DEFAULT); + } + H5E_END_TRY; } diff --git a/test/tselect.c b/test/tselect.c index 6b63d2bea4c..3dd739c1deb 100644 --- a/test/tselect.c +++ b/test/tselect.c @@ -1025,7 +1025,11 @@ test_select_all_hyper(hid_t xfer_plist) CHECK(ret, FAIL, "H5Sselect_none"); /* Read selection from disk (should fail with no selection defined) */ - ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer_plist, rbuf); + H5E_BEGIN_TRY + { + ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer_plist, rbuf); + } + H5E_END_TRY; VERIFY(ret, FAIL, "H5Dread"); /* Select entire 15x26 extent for disk dataset */ @@ -5605,31 +5609,63 @@ test_select_hyper_valid_combination(void) /* Test all the selections created */ /* Test the invalid combinations between point and hyperslab */ - tmp_sid = H5Scombine_select(single_pt_sid, H5S_SELECT_AND, single_hyper_sid); + H5E_BEGIN_TRY + { + tmp_sid = H5Scombine_select(single_pt_sid, H5S_SELECT_AND, single_hyper_sid); + } + H5E_END_TRY; VERIFY(tmp_sid, FAIL, "H5Scombine_select"); - tmp_sid = H5Smodify_select(single_pt_sid, H5S_SELECT_AND, single_hyper_sid); + H5E_BEGIN_TRY + { + tmp_sid = H5Smodify_select(single_pt_sid, H5S_SELECT_AND, single_hyper_sid); + } + H5E_END_TRY; VERIFY(tmp_sid, FAIL, "H5Smodify_select"); /* Test the invalid combination between two hyperslab but of different dimension size */ - tmp_sid = H5Scombine_select(single_hyper_sid, H5S_SELECT_AND, regular_hyper_sid); + H5E_BEGIN_TRY + { + tmp_sid = H5Scombine_select(single_hyper_sid, H5S_SELECT_AND, regular_hyper_sid); + } + H5E_END_TRY; VERIFY(tmp_sid, FAIL, "H5Scombine_select"); - tmp_sid = H5Smodify_select(single_hyper_sid, H5S_SELECT_AND, regular_hyper_sid); + H5E_BEGIN_TRY + { + tmp_sid = H5Smodify_select(single_hyper_sid, H5S_SELECT_AND, regular_hyper_sid); + } + H5E_END_TRY; VERIFY(tmp_sid, FAIL, "H5Smodify_select"); /* Test invalid operation inputs to the two functions */ - tmp_sid = H5Scombine_select(single_hyper_sid, H5S_SELECT_SET, single_hyper_sid); + H5E_BEGIN_TRY + { + tmp_sid = H5Scombine_select(single_hyper_sid, H5S_SELECT_SET, single_hyper_sid); + } + H5E_END_TRY; VERIFY(tmp_sid, FAIL, "H5Scombine_select"); - tmp_sid = H5Smodify_select(single_hyper_sid, H5S_SELECT_SET, single_hyper_sid); + H5E_BEGIN_TRY + { + tmp_sid = H5Smodify_select(single_hyper_sid, H5S_SELECT_SET, single_hyper_sid); + } + H5E_END_TRY; VERIFY(tmp_sid, FAIL, "H5Smodify_select"); /* Test inputs in case of non-existent space ids */ - tmp_sid = H5Scombine_select(single_hyper_sid, H5S_SELECT_AND, non_existent_sid); + H5E_BEGIN_TRY + { + tmp_sid = H5Scombine_select(single_hyper_sid, H5S_SELECT_AND, non_existent_sid); + } + H5E_END_TRY; VERIFY(tmp_sid, FAIL, "H5Scombine_select"); - tmp_sid = H5Smodify_select(single_hyper_sid, H5S_SELECT_AND, non_existent_sid); + H5E_BEGIN_TRY + { + tmp_sid = H5Smodify_select(single_hyper_sid, H5S_SELECT_AND, non_existent_sid); + } + H5E_END_TRY; VERIFY(tmp_sid, FAIL, "H5Smodify_select"); /* Close dataspaces */ @@ -6891,10 +6927,18 @@ test_select_valid(void) MESSAGE(8, ("Case 1 : sub_space is not a valid dataspace\n")); dims[0] = dims[1] = H5S_UNLIMITED; - sub_space = H5Screate_simple(2, dims, NULL); + H5E_BEGIN_TRY + { + sub_space = H5Screate_simple(2, dims, NULL); + } + H5E_END_TRY; VERIFY(sub_space, FAIL, "H5Screate_simple"); - valid = H5Sselect_valid(sub_space); + H5E_BEGIN_TRY + { + valid = H5Sselect_valid(sub_space); + } + H5E_END_TRY; VERIFY(valid, FAIL, "H5Sselect_valid"); /* Set arrays and dataspace for the rest of the cases */ @@ -6911,7 +6955,11 @@ test_select_valid(void) error = H5Sclose(sub_space); CHECK(error, FAIL, "H5Sclose"); - valid = H5Sselect_valid(sub_space); + H5E_BEGIN_TRY + { + valid = H5Sselect_valid(sub_space); + } + H5E_END_TRY; VERIFY(valid, FAIL, "H5Sselect_valid"); MESSAGE(8, ("Case 3 : in the dimensions\nTry offset (4,4) and size(6,6), the original space is of size " @@ -13867,7 +13915,7 @@ test_select_bounds(void) ret = H5Sget_select_bounds(sid, low_bounds, high_bounds); } H5E_END_TRY; - VERIFY(ret, FAIL, "H5Sget_select_bo unds"); + VERIFY(ret, FAIL, "H5Sget_select_bounds"); /* Set point selection */ coord[0][0] = 3; @@ -14650,7 +14698,11 @@ test_hyper_unlim(void) VERIFY(ssize_out, (hssize_t)H5S_UNLIMITED, "H5Sget_select_npoints"); /* Test H5Sget_select_hyper_nblocks() */ - ssize_out = H5Sget_select_hyper_nblocks(sid); + H5E_BEGIN_TRY + { + ssize_out = H5Sget_select_hyper_nblocks(sid); + } + H5E_END_TRY; VERIFY(ssize_out, (hssize_t)H5S_UNLIMITED, "H5Sget_select_hyper_nblocks"); /* Test H5Sget_select_bounds() */ @@ -16211,5 +16263,9 @@ test_select(void) void cleanup_select(void) { - HDremove(FILENAME); + H5E_BEGIN_TRY + { + H5Fdelete(FILENAME, H5P_DEFAULT); + } + H5E_END_TRY; } diff --git a/test/ttime.c b/test/ttime.c index 99f38bc7ffd..49ddafdfa77 100644 --- a/test/ttime.c +++ b/test/ttime.c @@ -227,5 +227,9 @@ test_time(void) void cleanup_time(void) { - HDremove(DATAFILE); + H5E_BEGIN_TRY + { + H5Fdelete(DATAFILE, H5P_DEFAULT); + } + H5E_END_TRY; } diff --git a/test/tunicode.c b/test/tunicode.c index bf5f64f1570..27df42d6cfb 100644 --- a/test/tunicode.c +++ b/test/tunicode.c @@ -859,5 +859,9 @@ test_unicode(void) void cleanup_unicode(void) { - HDremove(FILENAME); + H5E_BEGIN_TRY + { + H5Fdelete(FILENAME, H5P_DEFAULT); + } + H5E_END_TRY; } diff --git a/test/tvlstr.c b/test/tvlstr.c index c9204a51f6a..b27b2bede37 100644 --- a/test/tvlstr.c +++ b/test/tvlstr.c @@ -880,6 +880,8 @@ test_write_same_element(void) hsize_t coord[SPACE1_RANK][NUMP]; herr_t ret; + MESSAGE(5, ("Testing writing to same element of VL string dataset twice\n")); + file1 = H5Fcreate(DATAFILE3, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(file1, FAIL, "H5Fcreate"); @@ -1000,7 +1002,11 @@ test_vlstrings(void) void cleanup_vlstrings(void) { - HDremove(DATAFILE); - HDremove(DATAFILE2); - HDremove(DATAFILE3); + H5E_BEGIN_TRY + { + H5Fdelete(DATAFILE, H5P_DEFAULT); + H5Fdelete(DATAFILE2, H5P_DEFAULT); + H5Fdelete(DATAFILE3, H5P_DEFAULT); + } + H5E_END_TRY; } diff --git a/test/tvltypes.c b/test/tvltypes.c index c7913503284..d14f70d7e53 100644 --- a/test/tvltypes.c +++ b/test/tvltypes.c @@ -3261,5 +3261,9 @@ test_vltypes(void) void cleanup_vltypes(void) { - HDremove(FILENAME); + H5E_BEGIN_TRY + { + H5Fdelete(FILENAME, H5P_DEFAULT); + } + H5E_END_TRY; } diff --git a/testpar/t_bigio.c b/testpar/t_bigio.c index 4ecb09eb2af..af65cda4d24 100644 --- a/testpar/t_bigio.c +++ b/testpar/t_bigio.c @@ -26,7 +26,7 @@ const char *FILENAME[3] = {"bigio_test.h5", "single_rank_independent_io.h5", NUL #define DATASET4 "DSET4" #define DXFER_COLLECTIVE_IO 0x1 /* Collective IO*/ #define DXFER_INDEPENDENT_IO 0x2 /* Independent IO collectively */ -#define DXFER_BIGCOUNT (1 < 29) +#define DXFER_BIGCOUNT (1 << 29) #define HYPER 1 #define POINT 2 @@ -1165,9 +1165,16 @@ single_rank_independent_io(void) free(data); H5Sclose(fspace_id); - H5Pclose(fapl_id); H5Dclose(dset_id); H5Fclose(file_id); + + H5E_BEGIN_TRY + { + H5Fdelete(FILENAME[1], fapl_id); + } + H5E_END_TRY; + + H5Pclose(fapl_id); } MPI_Barrier(MPI_COMM_WORLD); } diff --git a/testpar/t_file.c b/testpar/t_file.c index 99ad13c441b..90ae22d098d 100644 --- a/testpar/t_file.c +++ b/testpar/t_file.c @@ -95,19 +95,21 @@ test_split_comm_access(void) fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); VRFY((fid >= 0), "H5Fcreate succeeded"); - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), ""); - /* close the file */ ret = H5Fclose(fid); VRFY((ret >= 0), ""); /* delete the test file */ - if (sub_mpi_rank == 0) { - mrc = MPI_File_delete(filename, info); - /*VRFY((mrc==MPI_SUCCESS), ""); */ + H5E_BEGIN_TRY + { + ret = H5Fdelete(filename, acc_tpl); } + H5E_END_TRY; + VRFY((ret >= 0), "H5Fdelete succeeded"); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), ""); } mrc = MPI_Comm_free(&comm); VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free succeeded"); diff --git a/testpar/t_shapesame.c b/testpar/t_shapesame.c index cbae5e11299..c11f95c29de 100644 --- a/testpar/t_shapesame.c +++ b/testpar/t_shapesame.c @@ -4305,7 +4305,8 @@ main(int argc, char **argv) * calls. By then, MPI calls may not work. */ if (H5dont_atexit() < 0) { - HDprintf("%d: Failed to turn off atexit processing. Continue.\n", mpi_rank); + if (MAINPROCESS) + HDprintf("%d: Failed to turn off atexit processing. Continue.\n", mpi_rank); }; H5open(); h5_show_hostname(); From a44d32abbcc6ecb17935ff887b0b113480e027fe Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Mon, 3 Apr 2023 13:09:08 -0500 Subject: [PATCH 093/231] Minor fixes for Subfiling VFD tests (#2633) --- testpar/CMakeTests.cmake | 1 + testpar/t_subfiling_vfd.c | 134 ++++++++++++++++++++++++++++++-------- 2 files changed, 109 insertions(+), 26 deletions(-) diff --git a/testpar/CMakeTests.cmake b/testpar/CMakeTests.cmake index 26968dedbec..a77535ca659 100644 --- a/testpar/CMakeTests.cmake +++ b/testpar/CMakeTests.cmake @@ -101,6 +101,7 @@ set (test_par_CLEANFILES test_subfiling_basic_create.h5 test_subfiling_config_file.h5 test_subfiling_stripe_sizes.h5 + test_subfiling_selection_strategies.h5 test_subfiling_read_different_stripe_sizes.h5 test_subfiling_precreate_rank_0.h5 test_subfiling_write_many_read_one.h5 diff --git a/testpar/t_subfiling_vfd.c b/testpar/t_subfiling_vfd.c index 380c06817d7..0c2bca7a893 100644 --- a/testpar/t_subfiling_vfd.c +++ b/testpar/t_subfiling_vfd.c @@ -30,8 +30,6 @@ #include "H5FDsubfiling.h" #include "H5FDioc.h" -#define SUBFILING_TEST_DIR H5FD_SUBFILING_NAME - /* The smallest Subfiling stripe size used for testing */ #define SUBFILING_MIN_STRIPE_SIZE 128 @@ -897,6 +895,7 @@ test_selection_strategies(void) case SELECT_IOC_ONE_PER_NODE: case SELECT_IOC_WITH_CONFIG: + case ioc_selection_options: default: HDprintf("invalid IOC selection strategy\n"); MPI_Abort(comm_g, -1); @@ -1718,16 +1717,20 @@ test_subfiling_write_many_read_few(void) static void test_subfiling_h5fuse(void) { - hsize_t start[1]; - hsize_t count[1]; - hsize_t dset_dims[1]; - size_t target_size; - hid_t file_id = H5I_INVALID_HID; - hid_t fapl_id = H5I_INVALID_HID; - hid_t dset_id = H5I_INVALID_HID; - hid_t fspace_id = H5I_INVALID_HID; - void *buf = NULL; - int skip_test = 0; +#if defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID) + h5_stat_t file_info; + uint64_t file_inode; + hsize_t start[1]; + hsize_t count[1]; + hsize_t dset_dims[1]; + size_t target_size; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + void *buf = NULL; + int skip_test = 0; +#endif curr_nerrors = nerrors; @@ -1769,6 +1772,22 @@ test_subfiling_h5fuse(void) file_id = H5Fcreate(SUBF_FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); VRFY((file_id >= 0), "H5Fcreate succeeded"); + /* + * Retrieve the HDF5 file's inode number before operating on + * it, since it might change after fusing + */ + HDcompile_assert(sizeof(uint64_t) >= sizeof(ino_t)); + if (MAINPROCESS) { + VRFY((HDstat(SUBF_FILENAME, &file_info) >= 0), "HDstat succeeded"); + + file_inode = (uint64_t)file_info.st_ino; + } + + if (mpi_size > 1) { + mpi_code_g = MPI_Bcast(&file_inode, 1, MPI_UINT64_T, 0, comm_g); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Bcast succeeded"); + } + /* Calculate target size for dataset to stripe it across available IOCs */ target_size = (stripe_size_g > 0) ? (size_t)stripe_size_g : H5FD_SUBFILING_DEFAULT_STRIPE_SIZE; @@ -1818,10 +1837,9 @@ test_subfiling_h5fuse(void) VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); if (MAINPROCESS) { - h5_stat_t file_info; - pid_t pid = 0; - pid_t tmppid; - int status; + pid_t pid = 0; + pid_t tmppid; + int status; pid = HDfork(); VRFY(pid >= 0, "HDfork succeeded"); @@ -1833,11 +1851,9 @@ test_subfiling_h5fuse(void) tmp_filename = HDmalloc(PATH_MAX); VRFY(tmp_filename, "HDmalloc succeeded"); - VRFY((HDstat(SUBF_FILENAME, &file_info) >= 0), "HDstat succeeded"); - /* Generate name for configuration file */ HDsnprintf(tmp_filename, PATH_MAX, "%s/" H5FD_SUBFILING_CONFIG_FILENAME_TEMPLATE, config_dir, - SUBF_FILENAME, (uint64_t)file_info.st_ino); + SUBF_FILENAME, file_inode); args[0] = HDstrdup("env"); args[1] = HDstrdup("sh"); @@ -1904,14 +1920,59 @@ test_subfiling_h5fuse(void) mpi_code_g = MPI_Barrier(comm_g); VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Barrier succeeded"); - H5E_BEGIN_TRY - { - H5Fdelete(SUBF_FILENAME, fapl_id); + /* + * Since we've now fused the file back together, the Subfiling + * VFD will no longer be able to work with it. The main HDF5 + * file should be able to be deleted using the sec2 VFD, but we + * have to delete the extra files manually. + */ + if (MAINPROCESS) { + char *filename_buf; + int num_subfiles = num_iocs_g; + int num_digits = (int)(HDlog10(num_subfiles) + 1); + + /* Delete the regular HDF5 file */ + H5Pset_fapl_sec2(fapl_id); + + H5E_BEGIN_TRY + { + H5Fdelete(SUBF_FILENAME, fapl_id); + } + H5E_END_TRY; + + filename_buf = HDmalloc(PATH_MAX); + VRFY(filename_buf, "HDmalloc succeeded"); + + /* Generate name for configuration file */ + HDsnprintf(filename_buf, PATH_MAX, "%s/" H5FD_SUBFILING_CONFIG_FILENAME_TEMPLATE, config_dir, + SUBF_FILENAME, file_inode); + + /* Delete the configuration file */ + if (HDremove(filename_buf) < 0) { + HDprintf("couldn't remove Subfiling VFD configuration file '%s'\n", filename_buf); + nerrors++; + } + + for (int i = 0; i < num_subfiles; i++) { + /* Generate name for each subfile */ + HDsnprintf(filename_buf, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME, file_inode, + num_digits, i + 1, num_subfiles); + + /* Delete the subfile */ + if (HDremove(filename_buf) < 0) { + HDprintf("couldn't remove subfile '%s'\n", filename_buf); + nerrors++; + } + } + + HDfree(filename_buf); } - H5E_END_TRY; VRFY((H5Pclose(fapl_id) >= 0), "FAPL close succeeded"); + mpi_code_g = MPI_Barrier(comm_g); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Barrier succeeded"); + CHECK_PASSED(); #else SKIPPED(); @@ -1971,9 +2032,13 @@ int main(int argc, char **argv) { unsigned seed; - char *env_value = NULL; - int required = MPI_THREAD_MULTIPLE; - int provided = 0; + hbool_t must_unset_stripe_size_env = FALSE; + hbool_t must_unset_ioc_per_node_env = FALSE; + hbool_t must_unset_ioc_thread_count_env = FALSE; + hbool_t must_unset_config_dir_env = FALSE; + char *env_value = NULL; + int required = MPI_THREAD_MULTIPLE; + int provided = 0; HDcompile_assert(SUBFILING_MIN_STRIPE_SIZE <= H5FD_SUBFILING_DEFAULT_STRIPE_SIZE); @@ -2169,6 +2234,8 @@ main(int argc, char **argv) nerrors++; goto exit; } + + must_unset_stripe_size_env = TRUE; } if (ioc_per_node_g < 0) { const char *ioc_per_node_str; @@ -2184,6 +2251,8 @@ main(int argc, char **argv) nerrors++; goto exit; } + + must_unset_ioc_per_node_env = TRUE; } if (ioc_thread_pool_size_g < 0) { if (HDsetenv(H5FD_IOC_THREAD_POOL_SIZE, "2", 1) < 0) { @@ -2192,6 +2261,8 @@ main(int argc, char **argv) nerrors++; goto exit; } + + must_unset_ioc_thread_count_env = TRUE; } if (!(env_value = HDgetenv(H5FD_SUBFILING_CONFIG_FILE_PREFIX))) { @@ -2243,6 +2314,8 @@ main(int argc, char **argv) goto exit; } } + + must_unset_config_dir_env = TRUE; } /* Grab values from environment variables */ @@ -2282,6 +2355,15 @@ main(int argc, char **argv) HDputs("All Subfiling VFD tests passed\n"); exit: + if (must_unset_stripe_size_env) + HDunsetenv(H5FD_SUBFILING_STRIPE_SIZE); + if (must_unset_ioc_per_node_env) + HDunsetenv(H5FD_SUBFILING_IOC_PER_NODE); + if (must_unset_ioc_thread_count_env) + HDunsetenv(H5FD_IOC_THREAD_POOL_SIZE); + if (must_unset_config_dir_env) + HDunsetenv(H5FD_SUBFILING_CONFIG_FILE_PREFIX); + if (MAINPROCESS) { if (HDrmdir(SUBFILING_CONFIG_FILE_DIR) < 0 && (errno != ENOENT)) { HDprintf("couldn't remove temporary testing directory\n"); From db8f04762c0a50d4d5a67f5e1621e944955483de Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Mon, 3 Apr 2023 16:36:23 -0500 Subject: [PATCH 094/231] Update java logger version (#2643) --- .gitattributes | 6 +++--- CMakeLists.txt | 6 +++--- config/cmake/hdf5-config.cmake.in | 4 ++-- java/examples/datasets/JavaDatasetExample.sh.in | 6 +++--- java/examples/datasets/Makefile.am | 2 +- .../datatypes/JavaDatatypeExample.sh.in | 6 +++--- java/examples/datatypes/Makefile.am | 2 +- java/examples/groups/JavaGroupExample.sh.in | 6 +++--- java/examples/groups/Makefile.am | 2 +- java/examples/intro/JavaIntroExample.sh.in | 6 +++--- java/examples/intro/Makefile.am | 2 +- java/lib/ext/slf4j-nop-1.7.33.jar | Bin 4020 -> 0 bytes java/lib/ext/slf4j-nop-2.0.6.jar | Bin 0 -> 4071 bytes java/lib/ext/slf4j-simple-1.7.33.jar | Bin 15400 -> 0 bytes java/lib/ext/slf4j-simple-2.0.6.jar | Bin 0 -> 15239 bytes java/lib/slf4j-api-1.7.33.jar | Bin 41473 -> 0 bytes java/lib/slf4j-api-2.0.6.jar | Bin 0 -> 62531 bytes java/src/Makefile.am | 2 +- java/src/hdf/hdf5lib/H5.java | 5 ++++- java/test/Makefile.am | 2 +- java/test/junit.sh.in | 6 +++--- 21 files changed, 33 insertions(+), 30 deletions(-) delete mode 100644 java/lib/ext/slf4j-nop-1.7.33.jar create mode 100644 java/lib/ext/slf4j-nop-2.0.6.jar delete mode 100644 java/lib/ext/slf4j-simple-1.7.33.jar create mode 100644 java/lib/ext/slf4j-simple-2.0.6.jar delete mode 100644 java/lib/slf4j-api-1.7.33.jar create mode 100644 java/lib/slf4j-api-2.0.6.jar diff --git a/.gitattributes b/.gitattributes index 2ff0dab4b3a..1d1ad38d8c1 100644 --- a/.gitattributes +++ b/.gitattributes @@ -192,12 +192,12 @@ java/examples/testfiles/examples.intro.H5_CreateGroup.txt -text java/examples/testfiles/examples.intro.H5_CreateGroupAbsoluteRelative.txt -text java/examples/testfiles/examples.intro.H5_CreateGroupDataset.txt -text java/examples/testfiles/examples.intro.H5_ReadWrite.txt -text -java/lib/ext/slf4j-nop-1.7.33.jar -text svneol=unset#application/zip -java/lib/ext/slf4j-simple-1.7.33.jar -text svneol=unset#application/zip +java/lib/ext/slf4j-nop-2.0.6.jar -text svneol=unset#application/zip +java/lib/ext/slf4j-simple-2.0.6.jar -text svneol=unset#application/zip java/lib/hamcrest-core.jar -text svneol=unset#application/java-archive java/lib/junit.jar -text svneol=unset#application/java-archive java/lib/simplelogger.properties -text -java/lib/slf4j-api-1.7.33.jar -text svneol=unset#application/zip +java/lib/slf4j-api-2.0.6.jar -text svneol=unset#application/zip java/src/CMakeLists.txt -text java/src/Makefile.am -text java/src/hdf/CMakeLists.txt -text diff --git a/CMakeLists.txt b/CMakeLists.txt index c46bbb10919..8cfa71dcd64 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -226,9 +226,9 @@ set (HDF5_JAVA_JNI_SRC_DIR ${HDF5_SOURCE_DIR}/java/src/jni) set (HDF5_JAVA_HDF5_SRC_DIR ${HDF5_SOURCE_DIR}/java/src/hdf) set (HDF5_JAVA_TEST_SRC_DIR ${HDF5_SOURCE_DIR}/java/test) set (HDF5_JAVA_LIB_DIR ${HDF5_SOURCE_DIR}/java/lib) -set (HDF5_JAVA_LOGGING_JAR ${HDF5_SOURCE_DIR}/java/lib/slf4j-api-1.7.33.jar) -set (HDF5_JAVA_LOGGING_NOP_JAR ${HDF5_SOURCE_DIR}/java/lib/ext/slf4j-nop-1.7.33.jar) -set (HDF5_JAVA_LOGGING_SIMPLE_JAR ${HDF5_SOURCE_DIR}/java/lib/ext/slf4j-simple-1.7.33.jar) +set (HDF5_JAVA_LOGGING_JAR ${HDF5_SOURCE_DIR}/java/lib/slf4j-api-2.0.6.jar) +set (HDF5_JAVA_LOGGING_NOP_JAR ${HDF5_SOURCE_DIR}/java/lib/ext/slf4j-nop-2.0.6.jar) +set (HDF5_JAVA_LOGGING_SIMPLE_JAR ${HDF5_SOURCE_DIR}/java/lib/ext/slf4j-simple-2.0.6.jar) set (HDF5_DOXYGEN_DIR ${HDF5_SOURCE_DIR}/doxygen) set (HDF5_SRC_INCLUDE_DIRS ${HDF5_SRC_DIR}) diff --git a/config/cmake/hdf5-config.cmake.in b/config/cmake/hdf5-config.cmake.in index 1a3fb7bbf2f..b778c426e26 100644 --- a/config/cmake/hdf5-config.cmake.in +++ b/config/cmake/hdf5-config.cmake.in @@ -70,8 +70,8 @@ endif () if (${HDF5_PACKAGE_NAME}_BUILD_JAVA) set (${HDF5_PACKAGE_NAME}_JAVA_INCLUDE_DIRS @PACKAGE_CURRENT_BUILD_DIR@/lib/jarhdf5-@HDF5_VERSION_STRING@.jar - @PACKAGE_CURRENT_BUILD_DIR@/lib/slf4j-api-1.7.33.jar - @PACKAGE_CURRENT_BUILD_DIR@/lib/slf4j-nop-1.7.33.jar + @PACKAGE_CURRENT_BUILD_DIR@/lib/slf4j-api-2.0.6.jar + @PACKAGE_CURRENT_BUILD_DIR@/lib/slf4j-nop-2.0.6.jar ) set (${HDF5_PACKAGE_NAME}_JAVA_LIBRARY "@PACKAGE_CURRENT_BUILD_DIR@/lib") set (${HDF5_PACKAGE_NAME}_JAVA_LIBRARIES "${${HDF5_PACKAGE_NAME}_JAVA_LIBRARY}") diff --git a/java/examples/datasets/JavaDatasetExample.sh.in b/java/examples/datasets/JavaDatasetExample.sh.in index b299ff207ad..96830763215 100644 --- a/java/examples/datasets/JavaDatasetExample.sh.in +++ b/java/examples/datasets/JavaDatasetExample.sh.in @@ -58,8 +58,8 @@ $top_builddir/java/src/jni/.libs/libhdf5_java.* $top_builddir/java/src/$JARFILE " LIST_JAR_TESTFILES=" -$HDFLIB_HOME/slf4j-api-1.7.33.jar -$HDFLIB_HOME/ext/slf4j-simple-1.7.33.jar +$HDFLIB_HOME/slf4j-api-2.0.6.jar +$HDFLIB_HOME/ext/slf4j-simple-2.0.6.jar " LIST_DATA_FILES=" $HDFTEST_HOME/../testfiles/examples.datasets.H5Ex_D_Alloc.txt @@ -222,7 +222,7 @@ JAVAEXEFLAGS=@H5_JAVAFLAGS@ COPY_LIBFILES_TO_BLDLIBDIR COPY_DATAFILES_TO_BLDDIR -CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/slf4j-api-1.7.33.jar:"$BLDLIBDIR"/slf4j-simple-1.7.33.jar:"$TESTJARFILE"" +CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/slf4j-api-2.0.6.jar:"$BLDLIBDIR"/slf4j-simple-2.0.6.jar:"$TESTJARFILE"" TEST=/usr/bin/test if [ ! -x /usr/bin/test ] diff --git a/java/examples/datasets/Makefile.am b/java/examples/datasets/Makefile.am index 8b71cedad4c..d4bb6662bb6 100644 --- a/java/examples/datasets/Makefile.am +++ b/java/examples/datasets/Makefile.am @@ -27,7 +27,7 @@ classes: pkgpath = examples/datasets hdfjarfile = jar$(PACKAGE_TARNAME)-$(PACKAGE_VERSION).jar -CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/slf4j-api-1.7.33.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-1.7.33.jar:$$CLASSPATH +CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/slf4j-api-2.0.6.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-2.0.6.jar:$$CLASSPATH jarfile = jar$(PACKAGE_TARNAME)datasets.jar diff --git a/java/examples/datatypes/JavaDatatypeExample.sh.in b/java/examples/datatypes/JavaDatatypeExample.sh.in index a82d883ebae..fc4a62706be 100644 --- a/java/examples/datatypes/JavaDatatypeExample.sh.in +++ b/java/examples/datatypes/JavaDatatypeExample.sh.in @@ -55,8 +55,8 @@ $top_builddir/java/src/jni/.libs/libhdf5_java.* $top_builddir/java/src/$JARFILE " LIST_JAR_TESTFILES=" -$HDFLIB_HOME/slf4j-api-1.7.33.jar -$HDFLIB_HOME/ext/slf4j-simple-1.7.33.jar +$HDFLIB_HOME/slf4j-api-2.0.6.jar +$HDFLIB_HOME/ext/slf4j-simple-2.0.6.jar " LIST_DATA_FILES=" $HDFTEST_HOME/../testfiles/examples.datatypes.H5Ex_T_Array.txt @@ -218,7 +218,7 @@ JAVAEXEFLAGS=@H5_JAVAFLAGS@ COPY_LIBFILES_TO_BLDLIBDIR COPY_DATAFILES_TO_BLDDIR -CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/slf4j-api-1.7.33.jar:"$BLDLIBDIR"/slf4j-simple-1.7.33.jar:"$TESTJARFILE"" +CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/slf4j-api-2.0.6.jar:"$BLDLIBDIR"/slf4j-simple-2.0.6.jar:"$TESTJARFILE"" TEST=/usr/bin/test if [ ! -x /usr/bin/test ] diff --git a/java/examples/datatypes/Makefile.am b/java/examples/datatypes/Makefile.am index 55ff91f7f42..12e5cb8fadd 100644 --- a/java/examples/datatypes/Makefile.am +++ b/java/examples/datatypes/Makefile.am @@ -27,7 +27,7 @@ classes: pkgpath = examples/datatypes hdfjarfile = jar$(PACKAGE_TARNAME)-$(PACKAGE_VERSION).jar -CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/slf4j-api-1.7.33.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-1.7.33.jar:$$CLASSPATH +CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/slf4j-api-2.0.6.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-2.0.6.jar:$$CLASSPATH jarfile = jar$(PACKAGE_TARNAME)datatypes.jar diff --git a/java/examples/groups/JavaGroupExample.sh.in b/java/examples/groups/JavaGroupExample.sh.in index 1b84ed36b3e..477357955b8 100644 --- a/java/examples/groups/JavaGroupExample.sh.in +++ b/java/examples/groups/JavaGroupExample.sh.in @@ -57,8 +57,8 @@ $top_builddir/java/src/jni/.libs/libhdf5_java.* $top_builddir/java/src/$JARFILE " LIST_JAR_TESTFILES=" -$HDFLIB_HOME/slf4j-api-1.7.33.jar -$HDFLIB_HOME/ext/slf4j-simple-1.7.33.jar +$HDFLIB_HOME/slf4j-api-2.0.6.jar +$HDFLIB_HOME/ext/slf4j-simple-2.0.6.jar " LIST_ITER_FILES=" $HDFTEST_HOME/h5ex_g_iterate.h5 @@ -257,7 +257,7 @@ COPY_LIBFILES_TO_BLDLIBDIR COPY_DATAFILES_TO_BLDDIR COPY_ITERFILES_TO_BLDITERDIR -CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/slf4j-api-1.7.33.jar:"$BLDLIBDIR"/slf4j-simple-1.7.33.jar:"$TESTJARFILE"" +CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/slf4j-api-2.0.6.jar:"$BLDLIBDIR"/slf4j-simple-2.0.6.jar:"$TESTJARFILE"" TEST=/usr/bin/test if [ ! -x /usr/bin/test ] diff --git a/java/examples/groups/Makefile.am b/java/examples/groups/Makefile.am index c5208608a95..a3fb774c5b7 100644 --- a/java/examples/groups/Makefile.am +++ b/java/examples/groups/Makefile.am @@ -27,7 +27,7 @@ classes: pkgpath = examples/groups hdfjarfile = jar$(PACKAGE_TARNAME)-$(PACKAGE_VERSION).jar -CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/slf4j-api-1.7.33.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-1.7.33.jar:$$CLASSPATH +CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/slf4j-api-2.0.6.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-2.0.6.jar:$$CLASSPATH jarfile = jar$(PACKAGE_TARNAME)groups.jar diff --git a/java/examples/intro/JavaIntroExample.sh.in b/java/examples/intro/JavaIntroExample.sh.in index 41ed6940d98..56b6e572cd2 100644 --- a/java/examples/intro/JavaIntroExample.sh.in +++ b/java/examples/intro/JavaIntroExample.sh.in @@ -55,8 +55,8 @@ $top_builddir/java/src/jni/.libs/libhdf5_java.* $top_builddir/java/src/$JARFILE " LIST_JAR_TESTFILES=" -$HDFLIB_HOME/slf4j-api-1.7.33.jar -$HDFLIB_HOME/ext/slf4j-simple-1.7.33.jar +$HDFLIB_HOME/slf4j-api-2.0.6.jar +$HDFLIB_HOME/ext/slf4j-simple-2.0.6.jar " LIST_DATA_FILES=" $HDFTEST_HOME/../testfiles/examples.intro.H5_CreateDataset.txt @@ -207,7 +207,7 @@ JAVAEXEFLAGS=@H5_JAVAFLAGS@ COPY_LIBFILES_TO_BLDLIBDIR COPY_DATAFILES_TO_BLDDIR -CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/slf4j-api-1.7.33.jar:"$BLDLIBDIR"/slf4j-simple-1.7.33.jar:"$TESTJARFILE"" +CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/slf4j-api-2.0.6.jar:"$BLDLIBDIR"/slf4j-simple-2.0.6.jar:"$TESTJARFILE"" TEST=/usr/bin/test if [ ! -x /usr/bin/test ] diff --git a/java/examples/intro/Makefile.am b/java/examples/intro/Makefile.am index 741f122989f..b6d6a1f3082 100644 --- a/java/examples/intro/Makefile.am +++ b/java/examples/intro/Makefile.am @@ -27,7 +27,7 @@ classes: pkgpath = examples/intro hdfjarfile = jar$(PACKAGE_TARNAME)-$(PACKAGE_VERSION).jar -CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/slf4j-api-1.7.33.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-1.7.33.jar:$$CLASSPATH +CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/slf4j-api-2.0.6.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-2.0.6.jar:$$CLASSPATH jarfile = jar$(PACKAGE_TARNAME)intro.jar diff --git a/java/lib/ext/slf4j-nop-1.7.33.jar b/java/lib/ext/slf4j-nop-1.7.33.jar deleted file mode 100644 index aa8fc0943b0919f97941745cd623d5c20a0799fb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4020 zcma)9c|6qH8=fIMS&}7@5Rzdm*@bLlUuzVTl--OOV{FYO)YK)}msCW^awD3umWpI+ z2!$*Ok$s&BqhEeA30mL{~jm;uz0`WMLd`=1{_Oq8dY zzO4?($lO5I@<$jW;07g34v?q>1p)x|Gynki&tYad=0*nkHnuWm27d9)=CxoB{iV1C z_HTgwB%HR6G#L`6?f%Youe$SX56_A2S3Y;Y%o>p){XEI(3cjCPl0-$ixzLw*GknEy zF+FFR{m3_q?3l>5>F925RW+I}eotX~E{$$--UCJ$FcQL4<3~?7>((% zA2VNb|6_3elsmJ^{->S9P6OY&HejAg%QS`B7lx{iU#d%D3rh9~p7ZMT-^Y!_%Qo@{ zOT71ai#IGJr+%W*6CoX>MapA68(2e_?;T`P(!SOM6*j(?lNU^uoO%iTkUGn2;F{zU zDVoYnkjl4deaHzY;wP4`qA#{(B`pq2^QI|Fq&bwBjHz{|??s&$T%*e% z8;mwJ+9fl4R#^byx(#_2UecZ#_*##|g9`5Apd)?+D zvfYwflbsoNnxU3pcDwSx-OvOzJRff(m$K6fMKDABa+kH;v@!D`gxgSAj~;A@ODpt1 zQ=sGca!IQDTA=dm2keaM(kD{a>;Wwx{A1955h3H)IL>*j5lhLp#pZt8aeKm@=1S14bp z*4XrnqP&Eo`$PmPp}9fHGw(I8m?|wf9>>8}A#RX7SBnSf82ieOH>Y`A?&{;+F7&bv z!iySly$FJ*XLMt;PRvKaTQa)wdG9>(O>Br=FJ;c?N2R2Tesa`^+}nFn7*fzHVQw#b z%b})*RLHzv<~%(Ue?Y9Oo!}AG&cf=Ma^uIPT6MC+JfqIp;O625i-Cr;>owPEI+LTs zT=g{)#)Ci!oaYB0pEN{bZEE|f^G)N<>^6yVs%B>uS+RLnsdjP<)Rd760D4q&J^iF2 zk#l1defVCveEh^IwBH5mZ zXeuH*cwGc+)c<-RemUdH^MxWrxwL;ML^CI}*S128hgs~=;l489QPw~6@Qt-q@yTMI zM+jDy2&nNBNzO|i&H79cDIG4-Hlt&uBI$WUEX=a@FG;^5$Qd|G*6R)S58e?Ev`|63_ zu+{mI=gGBkUIDs7#<_%d`Ictmf-Iz)P9c1sv3*Rzq|{7Bo#`?Otm5tdxOCP5aJCmo zlpfKg#lyOiOw{`f@7^;JyW~*7ZC_p6FIwcBD4`G1hyz}F$Lj>W5DdLsU0-bV&ss_g zjnlM8{>Wisba&FJb6q1kZ`k(Ma>dbpi|CbEB0sW-6B0qIN8jR8tooLUewiMXC_8ZC z5&t01f+YBUjQ(xL05RLM%0VcFJJMy~7&;c;VvI(arEWy#g2nJB?ES?IWe#%{9G$3# zMPbE@h4a#-gp7`Xwn5!6W=%d?=eQ3YwGa+X{PFDQ7S?wSd;EA0dx@AizC}2MA1#U{ zqaug_?!2}pnGl~a@lcR^x0tuUk|e%}U%F0Npge}5F?gh$*4;5yH8-Iyzevyfkq%l z;89|5HAYtiK6Tx5Un2X#B#iYvBaO=ZafckEq`(Th@%g`Bp4WW1)W8mN3Wwm)X8kpi26}e|~qEFwtHs|04ldkcz3ac$X){GLr z8v#*hofT}o4T4v8*GSd&JiB_{E`4NzWGG@w%utX#;6w%-JHP^O?|M&s#_E@L*_=gk z{OskvtcgR@rdd^8XHzhegU1SYPd}Go)u`o4>pRTU9CS@hDC4xE`>1lL53XI`JDcPh zE%b)W@i@G~Li4ESK*raVeXr^YdlUPshn;!NiMn*KG2EQ=Fa>YjMZ-aw4{ znEx@(;y~JjzCxrVtfdaoMpi-EuhDKO$WF&Y?R%S!2jS%J;=NfjJA?Um22*;g%=X^8 z-BpSH3HBWf@ z=e;g(aGS4R6$y&`9H^pg+$@@ZV@Of5eL%4o6Ync7ol_e7^`<|Yo10~%#B&LZR`n{8Eqvrov5D(2 zI0#l@S3>d<`H%Sf(W|^+{8qCsP2?uiwMl-fq7kE~&fOKXHd7LYyX^Cw}6T{|ooS?rwm8ZFo1}l!a1k@Rq0k z4*s3}-CFyv&C3>PF=Z=$NxIX@Y$x5`*0x9|>3>K1?(T|XZ`#=4OKz6Xiol#=4UrTjV4plI0SG)QL(;a<=|1 zr^q#=7@E7|UQ(Tk#H8lm{I@!1Tb*P7o_&6w&u^bS&+|U-`+VQ;^Bo4oFCYVeKp+5d zA!9$lWio&OV252qo0oJ zS<7x6{gFsMnRPh0uLzAFQ}8kwu|J;J^eb5XdWM;L?aGHs1oNkY=Z;CVpEaSTYW*3< zKYFB)C={r&ZN_z~&f2Y75O*JzJ-I8d0>JI)lQ=&qYkZ}Y47aaM>(EZv>bpV8>C$Y) zX6wdeqP|0zg<=o%Owi1$@8ZS)UGwu2vP`Q^3d;>>X^pPV+uRXaF%1<}uH zgm;`po(UsZ@9eZaOx2{498W7QBp;A?HH0J2IG$TLa754BJG=Zf%A?SL_9nYMnjAws zUT>Oz9(7<`bO4fO?byaQO*Oc&Hg_+Fo=ly0EFE+!@st#~v7-BuZYEBNb(k?ucX7+A zl5SqyUtugb>>ir;PVP~P`kNxLh^W%MKO#dBK{e`X(Q80XAt*4z{6%|$7^(e_%Wnr@!Cm_yE$ZZyo-GrGDjc0zo$)pxCAEjm)aS$)-R zTj=?P|0Zh9^TAdbtt<8`8?|m2`n*ng{a-;0N*MHfXa}Ii- zh7ck{AkOH$`nJwr8i~Qa};ke6t?JKg^dvIGLO1(2tON*z{0T z@j58$5bPsKRoq*uG&Q3@IYXddBA}+NEhTq;^C@hhw@R<8x~58Ro9uKtZ1>!&J27xZ zMcs?}qQ^_)W>clB%d4vnfV`*1k>#FmVWp!YcSFa^em>a{KS6e>sq!x%&mD77RvS9QcM~SGf4o_;)k~~= z)BwB#|6)(Uv8f8E)ddsr~={u))py3)pSokineYb~GhMra`!OQ)A zSQ7Uau64alc+fijzV|s}-f_nf%=m zsgC+LH%Zml^G2=NkX!YU2{B!TmNO$y4+@Rx|918X2&WikJ5Rjn9PlZz7+V&iIX>9h zVyC0C=6O#j2b?ac(gMp4ROD97gOC|o`n~Ew|r_P;gB&7_~7-+K>j|UX= zDbMy@R>(}RKAJA)-t6~EL3^2h@zSJ)eMk% zEf*_p(^+9<5~`+Is5Qjyms0{G-g6I+|6GZObq5>MVP zsHHV{96r&$J)x~M@iVRyZ#n3uF!|D2kQc5snYd>2@9OE9Hx5pS zctvZge&Wl+;V^auJO_3N+bL1zQyL7c8Z<_?sx7Uuu}&vF_~MW-t3u@jxjr@f>aqKc^$TCvYCaP0$ar;L16#GnO4_FfK%M*n9WVDo9A|Hvt8M__KaM%<=8K~j^NV1C`Bk{OMEdFo^mVnaM>_DT?kay?`ozEoMO@U6^LOY>4 z!0BhQ*Iy?gYdf9NPs5j%!ZK;%w62=Zs7kRTNx_)Q88Dyq*;1XmAg=Kjjthtm!|H@Y z1meDsCDz_Eq6HzodC4|uF}Rfv!>F_Pkp}vRsLssYsO?M47y2pOs?o^Rd>3$P&n_Yz z;Z|uj^UiJep+*NFP)_QeXO(fuE9Bduc0Q@pH6cGWg3U|$U(>X`9Sr?~-a`~po|hM- z-L+jrbp(Y7oT5U|emmf^#oe^wH^rz0hWH-`S9?_pdepE4#d7LJuiLrNRJ9D;535@) zKbaUhP9=8TZ`Y)jsa8)?9*cYuO@T4a5<9aH66TH%H;Nv72tNY0BkMdb^uM}0|2NUb zR-+V3&Ha~8L|JV!yCq!I&pZP|X6rC0z6~+}XR2`P7?!a9+SqsT8=1|@A_}apc{o|h znRlL@g;y9$197a6JB&SKzdwEh6QS&7*)8W4&(cU7gZ*nfOEIm#@vjdrfSG|cqIpHI zG!(~D|BCoq!K}X$OH*--{8ii+@v?p_OJi|tlzX3lE3Wl}S(=Mu!pt4B5BXcR=CyA% zP7dbVZeD4)71`IwxT1iFKz@`p=i&-=JPUrG=IFX~bnj=;VGkdjk~nYG#xLt)&hO z1auAp1cdrmGiiPqaS>rfC0c0_M+J3RA9_?zvUR>INRs*JR7m;ubp`uZF)X(;Bul&1mgA&F-uD_6Tjlz z4_emxid#!@TihHA-Ky+``i?=?x7`mp%z2!`Bsgo$8+0+SCZ zmt4mlCp;}TH|gnQKxX-9HlP4*==#Jw3#|?BAa!JPz*3x@=wMV(6J;D$8qRk}yapdj zV*@bVRm|WUS$_yVY^ZEp(QNiY1dtOqFgS^(X(bYr$ZR-MTNUEnEuI4U5g3g zl@cc9iMfifs*isWHl@nc8PFl@rCkuk+cxVHFsCDU>?IT)Dp()6F2Ot14?z^$NHdt| z1-eY8b1`tq#@M9{Ceo`nosBlV`qsl4vX6_)nY$veFc|Fg#oAk5&jAO!lJoka-FC-n z4R+&=AiLNMy8luP7f|D8zvN5PuCTXbiJTQ^~8u&8SYCu zz?6-b7`Tb2MGr@Gq2^K?2&G5l!p=PaJ6#l=!4;HUhl4pAl4c%(lX%i+^S?>!numP!Hghiq$+8Eb8_DqNsh#BfX z_DsHMPX{v}6HzRk7Hywice%8(xa!sV0#felj*9Gn7OWD*K(Sd6#2(k6Do>vkW$!1- ztw=w5-rx<^hXEb+R-01H)}4fShyVvozpS}xgkA~$akHK7E$dNX&C~vz-Z-n~>;-h# zyoH*yzn?-^(Z2Mq&Yf7@VPDuvBEzLMCmKF^F~$no>WStx;&PC>`Z8?=fu%)U5Gq4e zlhy(SHtc4A=|FSc}C+?o37^F;vOXi+)$=H^%nmtHF<;wB>Ms--eavR}zo# z0U&f*wNSrmR$|J8Z) zTKpl=%orkXcj^Un3yQ3-8Sl*kr3=YR-gU!N##B(xpRg{LNIVihgGe zSJ|KgKmZD?XvRs#T+EmhQ#&qwlN#)@f%=~D1_kFrN3tr*eG;vu61%8kdj&@8`E8{U z7mk+bNvaRT-Z_vyLB;Cdf2YkfM85X+%B0 z?dF#(^IbYn8F!fx&4IXh4Zf4n2z5zVh(G*Il|U<*k$r-)4gU1kQkGc#!e+BH^WV9oc$fO;3p%BOY#Ss>~gI+6<00pP3^f7j+Dl|%$u{L!J`Ap zfu&k3qv=lbWeQ8anL|I>K5^f(9&iA{Sx~#O$pNAB(Qg& ztjQfL$=#QWcW{|?YQd@}`KO2)}>}ywP2Ah(gpE4!A{~go2krj+4>BLb;3hxUuGtx0HHY z8O>^C-WM2MZM6)>_Vjf#hSUj}*W!ybGWL{Skfcd6t0@XLne9PNO*Fgo;=3oY`}Dd` zMX}Y$_w%<-tIcfIE5nbwBgu6(Pw{jffnPtKf8xG!M0nEYZP-h9cX1a~TfLg#xq`!Y zoS-3mQN>%0>ND@Wu*713h_Q573D{)D|( z?FrXvu`4rHK#gW(?x>jcyJ>U-N@(Y!Jvhi4h_05%(Ojb|8*<2u$@YT)^_v=?Yq*IS4)j4E@%B!lf6&s(%36Rb9m#45S;FgD!ka^$oU|W6_Me%{t zOjM#~s7D$Mmp(GFj5RGvgcdl?6w9Z0+ek;wx+#?HZ>pSKqNWo}K3GU-clxrqF#7$ty=5fkoE)X_ym5?> zZjfFKj)l>6b0V5dAW+4hqEEXjeqcG}XqJRt530q(U@s#rNC=^KVl#}Qv|0LM1tjx% zz?Kk1qH};3>=PL%I>D{A@txbj@aZx6V_oHFr_K^0==j4Y@l23OB< z`T2*(eNjhUlIFuO{f4E+=1<~FV(@bE=;tmsEN8zUoi{cJ;{Wgb#L^aor; z624=XCY@FcK7@D(5! z_+=~MP%mQ*u{ayzMp!YhqC-C;rd)?IRw@)~%FjUUs4f$skBkv6DkDQx?hP?kzGn*U zRF#pltC&km^8JR<$(19CsDg)^xzzVLi%-ZJgtdYJ3>Ew9Dn2<%9H_||jRXDFI!PLZ z@*(i%SL{5w6G%bkSA?c;6P}^S%o!6X7q>ia3Y($L;+rGYt6l2MD}#&7RvX9cgSlc1 zy_Hncl~j{XJxIA!%vL+-p*}?QvCbyV(SrTad#iqid%|u|NoQ7}UE^Vk^_tnTFxE`Y zE-vA{H#T`9JkvBhn%JUyvKJhUihAOwg4*=vHNz0t`rZ=XX%HMdUf_p>Ydp7TNQ1`K*C|#Om z@zHNKi}z*Lm?hvz^~kQDDg7+Jc}~<~7~vtZd8uqdvLCGbVBgrb1vrL6j*I8*k1^xr z^l`h3ZDMwLcb=P$cZo7{cG&{Gm;K}-_SH8a8XF-vcx43WTHirpli8U0{;h`=UN#=y z#4S1g<(X}$>>GEoJN|XoH(YkPHRw~R(U+R)%#K2`eh?_);9kt3a;a?K-+R?$h&?7+ z1>O3@Am({K-BKref2iHA_wk?2Yn>kbVO6W!O?8yoX&%0^8} zGEY~acQX44Vl`^oSOY zpGXrNA+U?>%}5?JJ;UZtP~5r7ox1MSmoApw>IXW^6-7Ob9^3~Qgn(!gnkn5kK$C%; z*|5!{ZOnB3ST7!4RvoH!j{TlKxb{3UH5k|xLwcd2C)!!7qn_p?GX;(kdu)&XcN6hg zgCqnJeFYflMpK75e>NFjZm!-DTE7jX4-AXY8aZvnPtnl39PrCF!{u_eTeC}FZpH59 z;;h|GLG=SdRnKI)$+u6=WL5RzIeXw4>%H_;k1{wsNCenahx~$ zX#gO%rzs_>_O2^q)iPW1eV&B_wnMK-_-9_T?C>wVR@t(jc}=tN!JZ;QJrVX^LA61j z4q5L#GVp6B+JNubEU&D;cy^2MNnCFMZl?72?tt88)@feQQnOPOw|EbEWjQd;Kc<85 zecjaU~A%IusOU>ZtGAe&Uz8xwZH&!J?r*lERLasjo)#l@Q z*XXRg2I30R1E7mwYp0X*%=iv|0WMOIQF20w25~?e<)1mlOa(MBHbsqS9E(NdjF%B$ zrBkLgr_E(-mo#@>t0G)LCyis-c%xsbv5BMQXs0|Sv|zTR(vtlbqxhJR$+u_2SO5h) zKxOShFms*33i)!y_Eniy375X923#3Z3siV3xb|}d^F31RL^x8EHR#uLIhZFUQylt_ zY&9bQ(t8g;NkDDVg4whfzHq`$M(D-z_+l*N3imB9q6)plwk)v7M0x!I$cVKA6#^~0 zu=$7V4a#Nt6aNR@p%jnplZ`_8!X;-lPP)k~dnG{lq+XG>bOEn$@OXtbr?2?{`Ld}L zttzE;Pqh?7EHxVhYP45WPLiThMC`jl2yFYIHEWPLc8U8@ZiX;C55v#hcq~{wDO=mUAo`Zych~gf~FwV^kS(x*&Vw; zRYUTiD;>>-N44)M;KP#sqHht+@<%GRDzm^tO`FtoUSY4oC~(@1+}2OSOFKm^5-Vz8 zi!&7jR53`c=}YcF{4?f~(!A!l=~1JFB$JxAm!?!KJirobFdA8w?HLpnM>}cX<3k}d zDqBkMw9Z%>56fbj=!l=bVo;JapQ{(HlvYfSLLyx}oq5ZkaliM(t1d2Uzcn7-N`Mog zi<+xqhqhr8ZDTlyvL?>;Ox6wE;}!j$`ly8Gz(+4JqVGyC+jD_b6Jb=IyrFpk)roqo zn@`ZLtUg~{s-++ysc)t8#UMC?Ta9i!tO`Yn6CygStAnh-7eA-xaeS`S`(9O4S-s<9 z=;bhdRnn*){&xP1YxNTQTJXN>TiphBBfbFr;qhbLkTgJZh;h5p`hJ>`%%1H+iK` zQg%sQ9_kGfQ%0HCN};iuOKEHt3Tk|I-4Q&u`^Mv#2;Yv`sO9%A5;cmCoet+5Vs z){S9VxX2T!*&*o#wFURBjB_Y;Yn@K;WJs~U#bSVE5_5_Q{iH~0Nj~Q8?dLW#&=q}P zuMv>e0Wes%ol7WvX+htPBdHHXtu>?3)jDp^MbFv(#r~&}m!SY=pNZ6kyu5 zV8#&FU^6^j<3F{T#{Sb z>po)cXA_WKG3Udz5#BmU(z3fL9J^erDb_nT>G*uWO63;o9f%g()wXa|ta$4c46+dN zWrvwvIy1+u?IAf-VM$Y zMGYy{5ao;)!Cv~U3p4iBUrHf&q?)qgJ%Vw6G;T50uKYa+5^GoYGaDXCx0dE##oEfV>bo#Q02H`Fe@ z=o-1Y78KPhJ2cgT!1d5vO2Ms!^5x1g>+4Z(8`QDb@L?eG{rIxd#(Q7xwa}Wz%sM-Y z><^ta=JHtwk0tbq2@MzSFu7^D4YY^4s4!T~n{ISw>?%Tjnd65EZ(;SjJIE=~(>PuH zNg&70y?2oENOaXI#}(v*sc2j*)&V}sF2`%34ABW$4^vI#unoc4S&GkIA7P0J11ns6 zzxdK5;CutGiG?1Zm%kH-xB!Kl z0Be<}>P#Q~yb7pDAn)>!$`Q>pj>d!jbxWF!r)UBN1_)>j9SDf_|AEiHZO(j!EZx~@jgF}<=S0-AZlX2NPqvy=bEC?1X%$Ok@ z`jORyinW8GmT4_4JXAWHpFXsnKCccNINL+t9VfG$ep+o>eP~?=yzpLAfBrCNLAOzF z%t>ie_W6OEoqX&8oSR^c9~c43MWvk>1Rm-(;1Z?i1!qk`x@ouj#b3octXL};abOILL3 zA7ov48y930QT6miVxtboPdC#6S`w6+iDT&~Y6K78ckorxi7mJG-ZDQSvp=7GYCojA1)V4LeI)kQRK zp&~Koqvw)MSdkNz?DJx!jTOw%hAkMFIH)2kYDq(yOEmh| z=$rCr^ll`wRxi%x!8|<(aYn5hIdS4x9JbKz%&K5kG8t_GTzZ6Uh61$pS?bW^_9nO& z6K;FMMbC|Q;$b8&t3HBp*nHze)br=%&5ClUK%gTzR+Ib@4@%!44y zK-iWfrU^dZ3j?AI0E5(CYd(9lp zR((`6o0L{rk-!;AhH*$@IL>k)wbabYX!h*+2#!W$#1>i>l*8o4yjrI|){D~A!=>FV zMH*5PXrZ>Espt1CIrpqxMVSly0M;(ZQrF0BvcR2H7cc^g_VFt(I$0eZ)qY^P?p}c} z(s1sDhg&bSX7u#KGH#8&glBh$s(<3%ipGY4DyN{8sL6r_yfJh~cFa7*vYk_-`gbZ7 zMZQ+5KE;nx(~W3YO`goNtW96UV-uKErfXK1;dmk`D3h7KRN_)Ok+Yk7`$0L54$rGB zIdHW(Wre076|Vb>cbJM-h0BlYXPaH7KuNpCdefXTU~Pi?ok@I)M<-V`x>8ii)Cx|V z(|8wceol@gKV`;rnRqf8QiMSQS4Z8FYQ&p7orw^EO@hC9xpvp0 z4UM`dw7m+1hGnI4vKmo$6E`f|N=N0fMN%&-t#-9{>OoNNb&8xd`)ca@8O z3J19IzDv4UtFNR9M6&+_51%m#*XAk{J(x`F!YVJF|v+iM*gK?SuX8GUE5u%Nx+3h1F`Jd0hvKXD)A<#3bYedm~SFkd`S zOTqTZ%|PmCOw~;rC2;eh*df9-#!k9GDR+XcC!3;>v7|on%T&z1hGMfo8ryshm2Hhd zWnecJ2s7TZq6|B@eP7t>^V%N6@s& z_#_<80+1-!naLWTJK=GQ$~#&Tr!fcJY2zG8# z@2qC?TGO7I_g+pJK4^TZDUYCX%XHAAhQQm62?Fd2FPK_`P0+LF@S|P7kW!$ z3{&YqaMTbEqvom#{=GMSW^&I5xMjf?3F%xJ-y>CP#xO9>5;((40Rc}fRI5DA?Q;if`c>Yi<}juJx0 z;)(ja4G5f%!xjO&J|ZxiiaHi{Mrv5k-mMYDq1{IXGC>UE&$VkHcGZmO3?Hv#J~|-i z#>KkxP^}q#JcCrNfmAIEP)+qIp3B~1?+A){e~(q1o#2mihHj=mw-jOK0&87ko-1Wl zVP)7Xa3aAwn<}*F1W~^amXzK%k7T&rWs>E?LKRRE^xB(sZ664!Eu7D$zA6jjHeYlG z_3lDY0KyvMH#DXZ)klqau*Z*D(R3Xd*IT+@;)nZHY^|oiqjSfv((oMA`KE+Sm zZx^q+O=63glO9`4j&6^B%PAqWq%p;~bfsuPZrhz(DYb7Y>7;DNWm^#L`BZy&jJ8#< zR>k>#N?8;~+c$3=krF!`b#TsHvLW)kX&?^U<`%|KHPm^I0rkj8&v5H%16@xct2wJF z3Ar?PAuG30FyVx&%ocMOAw=F8#&1b2tF$)b$20E0Y5t%P`oM+6{+pquqoV{@@&?PW zjBxe3uv1BN2h8n?O}9)-0OxG|!tPu_Tc~wi^fPbiQpbviS7r!#nT~j>xi}WrJ^G^U zhgMCECxmm&;-#%7`uC8hLWXyrtxJ4lioV0&(qea*L%3svuLVQ6$A_$4gSJYFp4mh6 zfBZUemmuzfA-b?!eS(KR)RGYVV6x7SKJ-K#0jWZOFj*9!f{IXOi`N0ct;YC)b)Dy= zeAjO!mC}AcUFhhub!ztaD?eww8F8@rj>GWm?XNzy&<8M~`leKs7B=Idlf3KSqvxty z*4b%;2lX~>g~B3Gx9dahSP|@jE_V+JFh%l){gG*yi$hyd*EGm+3&JPPyI?yuQxJU| zZpm?Zd#zl&N8#=JqEnD%&)>2Sq{`aUatrpQ*Q9EXT=mWwo*J4B^)sXvJOM4QjI1;Q zJ1+p7Gi+Agmdm2!wWiP#PpyM+xl^|uBm`xcLVJ$(bMSq0{Qpj?#V+!1^?IOBGl`0@%6;}FPU}! z$(Z}=a#2KA-@)3>_4kRj;*aztRL_$c`582Rtr(157?4h_{Jr*2BxFH>1gu+Kf8TKD zZ3PJl$psl7uikLa9w8qwy~mtDzCjl&QC35kh}_*Yd$*HI?%kyy8B89K<)174d8LXH zCJ?(EkU7&#_IfO!?fdil3;PGy={8+WVbNqiQ0Ke!MmTT}AwQGy=_M56oz*Rw_}1zt zq?xn2Yu9I1%-9taN~xRGZeNtI!}u$+%~eho+t!<7)r{9^7HM3&54}lg>HH!edOK7E zwp69pgv4W1_Su36!tEeuG`2Sl=BytaD9%%sodP#5y7DVQ z)A6^j=u^CoCFc1hyG2w|fy%>COsd^|XTaGx;8&);@YLUW>5 zbkp0Ya(8v3MMwM6IiSikWK)a(0tO=WGc>?Gmfg&oz(ZH1myZiNF4AgB7H?W+fIukK5tForPbH09&``m%?3A zHxR6gXnV20)gLC8i`>&Lw}VP;A2v5iv7w#eF1(>wy$(OmPi|0UU=J3T=Yr=W}(4gv1}p6IASDlB8g2r9#f)`==_H*@eTlt?&PP>0TbMn z%*o7zpwT5*sHU-@CJJL%3b4mBHTh1U;z>$Ph9%!FHuLQceo5%;kHL53dF3+AYZ58h zzi`>%b)!T7x6};*Q!9f%5;#H?Ev+YCsZ49_rv}+IBAwERA>S^Zsu~h(5SUCK2)7sX z&QlG#s3xaca$o9EIXPcaV*}w#6l;tI*P1Eu$wu~sW0zyhK5gmw`SKFuGaR?{`*sf` z(Ym`YfX zP^-mtQtMEcSwlV8Es<;Kwg|hf(K_JAWjSJIRyr`mFWdcK@Eub@s6k0w>BcPsiC&1bJ7mbC_8od3k4OcU5;N=r zp25FWgF##=12JRD4b!}?UTaMcSm34t#&Nyq)A22X8Bov zO@j@9&0h`qk}Y_~Lk+p62ShqwVQS$4ivv||Te+tfxjQ}5)?@#sTCf=*PUea*w~NoF zBz$gv+`mKM5K%shVlhVZx?$!SD&h~-< z)4C>0Rg(g86)F{&1=fiVo4DPr)1$vf*pRQ?p}sqT$-gbZFE_ee*49B~oq?HaMGobz5eyXbg6n!mNyI zIa}95)Q!=-E25MQ9;>?6>m4RE?tb9Pem>na@M)70g`;~wQ-H<*xonV{3pFT&J%NC_ z8>q5xYhU>X>&N&}BHd-xf>^ApxVakFtm6xUa>1T*fvpIyjkuu};A6#{fUieko1_qz zwO)m9E2xOKi|WvL!fjB%QL$Lbh{F81&?(E+xa-qoE(ot!hY*Vag3)h zCzRHnw$%E}LKk^&^X1CfWLHTtQ^+=uG)uIwgXje9M9xTyK_$?7be;BuqEZEloh{w7-mRgEQ=^^ud z#`mk2$#%Q~*0NaLv_IjPeZGF^lLh)T=cJP@|0R$NJqdo$gxw{G1?CX`3mgm%Z0uV0 zOXkB#&MuRML$t%5;Y7SoBw1f{KMblenzOIz(6vSxfFg=12}a2Z!hN6pXvoY7c5T&R>0=k&;wfmpR_TrW2@VuIJ^{yW-N z7DPOu2)9Dp-Lc;6;G9TfmG1$tEK#Ai$|u6ycv1Wkz+bKEy7CkHp`tFCQBMJ3YRSQ8 z8s?hRosRNfy2^QnY)G>L#hAuUamP|&X<@m$kW-f^poUz7Iq@>o zm`!%rKHK_D3R~q|8-?MG-y_y|H^}m^n5||!-eLFllq|+9w{99>$Xp4k&|J|4P z`9EXWSXSN0hq9DP3nI5+KAcL* zPG#l0)M>Gtx?a3yyCXBMpt)8f^$se8Zr~mCM#?oy0R~0R9~R%ae(kHa7p$gG_HOFm z91Pn4*t>nT?O+uk%*E1%R!V|xYaKoVa-_l`2;ibpcWYYCvV2YGwyF!@GoET#RVf#u zJWSXiI$07L*>#4e@y#PKhCgUZVr>*UvQP_#vgt3=_bR2trx;Y)!goX<_l4q}1j$Q8 z8k}epiAuC9Nmn>njvT@8LF))bePfiOL4VQ=@?HT4(H-%PNdnMu=BMdXlx9<=2LdaU zu8Z!OL4np?vrMcz+Y)qxbTnsD z6(e*Kvs9zvOamL+64aCfQj$`%z(L-^4#7aWrev>u*tm?1Qr{8Yi zhtCg>ncqGB=)nKo@5eClTlwMl{5QYfWlw)N|GV(%5BHoeRIjt=pW3SbIR90>^2ZtB z&-?y&HOx;yoV*o z>Lb-ZQUA-L>{rlVXVyPKJzq}+e{Hk>fc|ry{VV9NbFQDD4X;P%{|@^18Q4$6e@5iL z_RE)-_E(wMpJT`$!TgVY`A_E`;rzFP2=wxD{7>iC+Wi^a|2xc&@ct9#9n(MR@Dt{j zVE+f^zlZxjP;j0sS-h$Vq}j{6Il{eIUMm17ZE~ G>;D7q5?_`8 diff --git a/java/lib/ext/slf4j-simple-2.0.6.jar b/java/lib/ext/slf4j-simple-2.0.6.jar new file mode 100644 index 0000000000000000000000000000000000000000..f8913c0a8caf34877b66c316f2c12d3a39cf4a9a GIT binary patch literal 15239 zcmb7r1yEht5-slT9^Bo6YjAgWF7ECSG`PD32<{NvU4jG%7Tnz-ID9hmev+9tllOm9 zRGq5Z-Mi1(YxlNZ@=~B+Xh2X37&Ga!MR)CXWZoYUz64PXk#Qi4u>`Y95mkY?lH8%wj z{M)nQy`>h{liM-fs+3AWa|cL8d*qfezi-x-#Ba@^hHm;Jw*X;*4MhU3= zR~j5==PfcD+m@%MP`%N@AQoVlc7S_fmIdDU6U7b#6~(0MDhnvv4zN)@e>P56#tWac z1-Po`SIi^e;0(Y=Hk_t7K(Ae5`+spu>&H$eUKT9VG3{sGmXmNNb$|O@qXqp%!7fjz zB=>5KSB29Tyd{7GZPMB2@$w+}n%F#>V1DM36In4H7lWs26w)iNjPg{|UC=UNU7=C8 zwA(qHVZlKfc~!vKRjww8QqW)X4O#27j&!o==J}m}(qUMZa%4>LAwVlx)SnaDL^hbV zDN5c*_Kof%JEKfM%G2EH^1NQ43nEu}+VJ84F4(YzzocU}O3soAK`Pg-b*yOG#tn(S z32>eC#7HC>HsV`p^=?(3bsUCpd0biaK{!`iQq^muU5VZVhaK&`SM5oSvW0b_DOqy? z+m<_+&SaHVk(W{+x|~|-m9Y_~s~)t^M>C!x{s!R8k;`JZM^95y-4JB{wBP#7N-8nv zY%7KB%uad`rL%sVsIi1s}&rezj>fnwRT&Z+S1%U&<|{6GWxCn<(w_b!$YH z*Q>>>W9q3U8ty`TAcAuzuq0YzsRZ4vZ>zj9X1~#hyaSEcA=4f&j>N-ne5r~b?_2cU z3M3?`_8h-4u?zI;*Sx-ggNF~p&QS-UWbwG4Rl4h+faRqip)4kqr@}yhfE>YrfaG8B z34)08mhH{3&7?#%Gm#{{1P<%{|brx zcV>SW)BO_XKW8e|Uw!=OvLCI}JKCAkD@lv8N-F)?o$?O0F6PDnhffTWvovE8EJKqt zV%dg)l*RVIW;gLax!8<296K2WfoVxzWj7BRT5(sbnH zv~ul5^qz3zD?jkvam6X!U?aVkQeyeAl=r_i%+9KJ|c8X3IBOr!V!7N%Jm)@s*xQdxzME|b}qYCt&K0LiR<-p-@>yLZ&Q5G?c)&RVCr`7 zzJ$Vr&PS$4H8FgUeip^3P3b{L6YqzaSFu_}o*$U&X4k;4G>DPO6=$BTP9c_Lr7&B} zul8HS4Q0l8PBmL-^*WzQBp(xkY(%zyNdEc1fAa{~c#w-CLySFL|sL^mjP> zj`rW<>{rk!yW0W&13Tr%@37;I_WoFk$R12gNC~nJAt@LfmR`jqN~obNWH7Xj_#uK> zvN`H7p!Zo)l=w~Fn&~s;hWB_FD%{Y_hu1sHj^n&--b>NhdhMX%`YM8mJyzwq6S!l7 z>*BU^(?$c6u;ttKrK!AS^8m{dR4ZSedLjK&M=&H~`G~k`t+{!VluxdOacIocV&a3B z0G9d4#jh2G9-l;i(8E$g8SLjCgdtYLb7HMYp@2TtX?NTTd{$isc1IU7{S zUQrZG)xbc)g;z%%ml?6+3`GS_lr7kh&2tJui9bq2QSlf+6#~Mh+>p^|Y)yKyQjk-5S{a__*h}P%m%!gr?>{`z2w}xlLnIDzs&ywmr ztm}$xyy>P0M~mwA0hUV`QFq)_wL+P1ZJR4pk}GE|D8*SKmbik69K7W{qL$~eyofqFRE${lYLy{vB{;yUP_P(iFc!1MKS+e9VINemagh|z7UH=&O_g>dyG zDDJ+vMwyzPN+4ZGy%$G96n`(lh@bFY%^zFDe-Jz?+hFz5B2Bd5EuXPj?zrbWQbp$W zo*HAuX4M!8#7B_tBfv|;_wfD82yp+;O#aWAmgIkpfd2qq>5n|?bIeYOi(l5+4+Q)+ zXQ9)2(CaM`Ln;Y}o#HaW5c>%_?u<<+81r#4hD>(1r! z)WvzmPR7&S;}N7EsVc8sCsij|E@&oPJLqjW-mGyO+R7ZSd>DrCPFzXp408Yr-n>H| ztPL;$@)8oZVX#J+THWkWfSU@P8Y@}5LM=5LO`C0z2-z18Dm+y*h*=NzWCQWd3d|Ig z&pnLp!1Sf&k`gmBBZ}o^yQy1%vAV1Lkda?A-XZGT_LSr&gpv*Sua3{_DU&=cDm&BIriN+D z+Usu++nmb}#f`=p)_H9MC`6>|l1Hs<&TFQKZO#M=4byA_k&%(mnF2I|&r0VrjMSmF zR*F^apNqAjV;u)w5ZE$=pHoveRAz)f8xpUJGeQM774FxVFuX}J;PYBC5Un>up6n;s zIvy=*FW8iOw&-FQPCB(2twWfhBH*1MQ14=nl2HV^FCtzj3a(| z@wRsWGf|eW9p};|&Pg8u&UH_j0`a&3hZj7ZGuSTh_~g_kJNMeuX&a2M<~DjYl7Pg` zzd9O=xaUm|QVqP0g6PS(%eMQZf}1#ECpiD(0PrP=_&8EOQv{S~%=;tVUMy88xi}us z$h*dndq3}WH*A8>O^6lz37td&C}vWt*G*QJi0Qj}tP@gryhJEajAGV3_H%ZW;Cm92 zN?;UkSfv{h630GCsuOx(Wn#oy6g&&?E!Mb1EKD$*diQ0{Q8W*Tr@(`y5T=rcWOzhRjt_kJ+WaV_m}^8N2@U(z zqnX>w6T(yLUbE#68vY4a(lT5`tUIr`QSoD65OX6dExFq=S0XHW(U+<>1Bw%+CTR{M z@1(KL-o&q9!6V4kBap?Nt=df*3)bE4Qq+ zrygYCpM@3Pw`J{Ww@8?9iwGGFkxtyTYo_GOOb``S5A_WP-x7QQ89-}4?}5)Y*BKR! zOG}7sG8S)LeHN-bDQ_sPDdcXwq#d;$&xKF$rPdL3pBAb1V`))lxZVIxa*o1)C_rHl z3%Y<e&7~fZY1>^T-5p^n}}Y~*Dy2ITrN3@D$ughSQAJ&Ov#y! zQM8=*erTsxZF;Mf)oZ5MX{`Mo*PVKF`J}oGkCFj3QOk&L-kJde=7)F!bSLie$XTBuADq*f!0#WhT5?#^MdI z<*xSepx$|YL*BAlr{UpD2KW`S#5)^%*NSyC*XN|D>vcYM(hT{dPF!>`yCBY#&G*6A z0G@o~y8ewW315y_r!^mCj1Az2sNj|9GtZk&I6Wws>u`NaR+=xVy=nm)!dwKs!h!pC ztQNi80Tc4QyBMe>Yp9~)nn0HG7!h`Qu+v{_(yt=O(-WHMo>GMzm3MU<`*x&Bhf=L? zK(TRNv5&4W#CoX@hyl(e3i-*~5dudEoKuo#F0L1@Rw{vpV|xQ`Z6Rt zrrSLQURDi2(^xFnd@1G&lC!*th@v>%9I@d^701yQ4lfHOwQvd|iM11A+ioMi`Vcj~ z?g*yl`@W8W5$n|1UE@wS)<^O~HpZUkPBQ&BABBM_L)*}~Ps>dHIW}x{W2zpoMax7w>$neKa+pH&Vgo51Og2usx zxIo>iIki%@Q|UYCiNDjGIU5lA$Vg1J7Jj@v%xg%BO5f^VX>o&^+D=Z5CsFH?#n=flNYY&b zY+DMZJz(sCqOvQE9lE|7g!a<46K5~Pl^qpBdYBd~+LYbx`u9RU@{~^s1Bj<`HJU{m zsj>}|Iz5OQkLCw0{%0hjtyq=o{;<#fuPkrW@SlTyDauk?sGYGx-`IUw?vV`f4?V52 z08btJNGHZUuHbrc`TqIyS@sQ;m~x$0cR|x4Gd7pp7RCnKgLV&t(*sJ!I%zhld2O;A z_^P_yw)%Zs`eR~CwfK2!#YP8!idwwiOX1kvcGZ_nd+N;Bjb%rZbE(xMz+M)}1yjvb zfz?rZcVuF~s=02it_fLfENQk;^4+@zFU$p_{)GI*(LwrYR1JHFU1dUzGP}{3;pze< zB6KDSKAXtsf_{zAz{|sCyfxI+KCZowoupNxqhz(=OE8hN8i|VPqC9vJho+EdahX=>Iu~X;Rg`nl$OAeZB0YMFwm4$Lqmh)u5diVOa*$q9e6s2sDQ&ST}Sn0P_ z&jP-k3jiYYNgQ^!tjOs&@0M$UB!G&RrBVvcAnF2oAM5$`>USRNqq6#GR|qq5YODi)a*14v8eZ-Vbs*-|(%g zW6DOK?7r;EMj~RL`OPC?6yZ11a2Rv}&KHD@ePb4YdS-G+ILQyBqSC`3ZB~fY2tB`H zoD;JeK;)5HEr&mNiC9DrY2!(r0Y34b0IhjbQr%{kYS_zCOh-~^S#YZPMt&Pt_ZvAL zv?vx&o#Mpd?&qj&aI)8#32(J8r4e=6*bZ)fHvK#B}x;tOmW&2U&qA?-`gFaP^V8ysU5ib#=7Er6RvF*370t>k^9p*okXKs%m zgGIv08~*WZgqAjw7kUeXodl7Bg1M>D2IKf!Q`(U-@JHs@rfB1OAItYOIFD-+W{jy} z7Z+y@Zh#@G-Pf=Je5Tk2SCtEFq{_ifC@M=>Y(@=ob{Dx`XXW0d;V`E8+A)f;gPZtN zKHMnmLUGZei)E|bM?J__wdvYghQ&8k24i6&-!Q5aX!|Ud(P`X>83vd&-;KX5O32~S z3Ta^q>o?Fd_Yq36M4o+TJMW;;A!sBPiPb?bC*nrtknLkzS zBI}5P$$HovT0ywIis?9s*Mq)YkeQyk{Ft^@$L_O$t!wJpk+lZT-k4+%4PRl}I00d6 zd_#`g8oEZueq$1BTivCFy=)SYj=gLX;L7Ng4!_)YYJKBMBoNwH}oOHyrxh zB)!aD5JX2G40t%G3$MOb2$6?dYO!N?j7yM#vk=OG=+$l5Ny!JSIxoSm5!o@()g?0a zACqm=(rn&_k#EqG@bhm`^kaK&#~o}Z9EL6YS{;L^5bo3bN39iQ@qPy6hb$mN-v z_R!2VGoL_PRgCMU_Afk??E|d1?SSb33sZAL9NbfLGUjeA`wZP`Gl`<(WAU>c%cs&a z32fBpzQcUa*q--nE{EeyRz6+Ow^CMpm7k+#6R`=DngOD~n#EG7Icw}wa0ONcHU*#5 zAm{{^(?82-afA>ivwun`J@0@rR#VM7k}~pr&5v*9(mFSYb0WK8BPVQxBmD4s-KEtD zrEe7c;fUUC4K7@xSGL$_pUX;lr2t!{42m*3*$WcmY^zU3wrpKnLn>(Q4ShOvdTs2S zH1C(vb5oWjLGO`;K3pGua9Y$7QgUZXd8YY-wW?rYoTGR&o(G6ej)S(*NivXSA04Lk zL=3!zan|;%sA6A#1VW$|L{9u#)15fom(NOgMT)Ztm~4B8%^x@FPXj06>_$)0Z{X9S z*Tm@a{$Awb?W60*(pa5s3m4Y*O_$crSd3&9dv37ja_h+NJ5q$+A+_ z_kIw{#@O{}biFvu@d#D9b!F=U?B15WDy3`k6=6gh`3HEWEcr&%`Khz7YwHh@E=aG) z^0wsi3pbWomv5!(u{pqlR4MbFP?r?@i1kue_k~m6w{7@}0BV!;|@%VOJW|S9*7eXZD8VHh2q7sb&C5Bj(kl&D>ILL%B0N z`4xDZ-s}!^Qa+TJ?(PWZZ!o^AkI0x~a7yw*H$=Gu7i|Dd{9#_%#9NregO$y<-7b2cjhsQ zOzMF>oJAZ0eObhHqWaYGp_bkR`WYH>`1+4`AU+-Q#TLgTN{!RB?WJ}tnLMg|{noCX zqqc>UZdR)wGIV<>#@DGxBFE)A0iG34ZGwY&16)qUHuZ|xC_OotXWY<2SL@TOKr`eXyd^+^oQ4m#|gVtLE zzad)`Vps@1p0TR4*}`=Wld;Q*J@5eMHl{+{H*#9UX_hT+@AF#N2^*IVgXpIK^d5Gd zs%Rk&e;T3Wiq%1MEwSm;bO6rkQOcj*VN4u{-2o)vHwuvLROx#d(JH0!4QKF=!fsRP z?#o`JyHbvwJotV;0GKEttg3psx}hQZ-6=BTKb@iddH^77Yhz+=>g-_P^m3u~=V?Hu zI;0z3ANrG6T-(l+X54HN-E|KXZKjG9V{bpXplbDa2B}yaJUi*|z6}|Enax%UZ#o9l zo`4Wx%x5X_cS>mM)YuuighXxE%iP6*fdWsdOElDlX!lb~Bc8U)t=HSWC!>q;7@ytts@o4H_XO<^wxWCT6I$BBvccUR_h#Nm$6D-Kn{@`azT1_Un)XfyHSq0 z)S0#)$vBC1zmo zX&klO%fz*rRS%#fM4OC1Hy--4amglMQcIlRyC~WKU%=s;N}A-;>4cIt1#pa+Y z{+)t{v(wkNPMA4kP5OiBqGd*tI8iz|K>(O_mrfE*n7ebN*2O$#1#$aW zt&eOO6jkCHiT0>27ZnLC7Ws>Ti3o-)l3SA(&aFrcxvqJfy2rEyWkf)rW)syixs}~6 zBW_uRIWqcNWk|5^g;%A+SgB$QoPaCMVZt_Lxt-or#g8FsRP01`EG2@Z?-vYsaOL}O zNl-`a@4mrAPEq~HCuU70&l@m#Cl1sCHiO300;H|z7#*d{C<2$(I%;qANqXB_mnY!P z`|2KKj-<*3<1vU3e%0{plqvF~rK$PJ*_DOacsdG2#sU(0a7^i$E+lY9bv))N2dBoy!-Q6- zRpz%UeU~1$ctAu7YHv(5`G%+D_U#y{s~EQl4t~^96i!8^*{jz>t`!B0BKjd&ubA`;pQ8x{NP1Jo z!nvEwXRX{4SCct08Y~c176d6KjgHxO!ZbmrLaS}yz%$I+(^ZfhU(h3Hu_qU%yVh1! zY2}wG2&I>sf3qTg$_Uu@v!1{YC_nYUuk0A^FJW``Nh>T@Cr`AiH13ncTLnhY zI7>=#ij;W9X;tkxaB3xZeY#2!-q35P$~6kHTYv`<0zA7Rt1I@j8D=Z5Zq}+h65kRk zF25N#g(rbQ2w+(*+AwJi1=q$-Kb zv=c4|hp@Kl3~+y-3Mx^)Hx@IoA5j&7KS5L1CEN&U>?|L3qx5r~@=XpMEAwIqv9~1b zA@zARDn|;L3A8*OJ8}jT6*G!(RONz4@+PB85T3%sLMF(a9TAnIcufI6dM!adH@XF@$wRdM)u^{-lD@Kt5@*n5h0nR-XZW4iAEegJwMR` z5$1tMymrc^Ta=SHcPC70`BT2AO7Y@K+2OY!yd{3#nFDHOqRAUB9I3`UkSOpgXzI-9 z5ev-F+&20CHxQDbHwe{p`^JZElzWXtxeVCNZ)(~LH|}qM!(%xmapJ4bIwT47I30Jz zz)q#ogshjrgUezL0?*Y%ng-;Y9?v1UtQ7b-;xITUp z9^A2igPT?G4O;~XWwCBRk~>v-#6rSTj7naQr$&i7dvO~Z|2f8!HgT38eQXaWa^|=q zYVg|1I!9?w(EU#g@GL+@}Y!-VXPKns~oPrNwr9-Xl8e zM;*HOTjG|lJ7QnY$KPA@#hm1ItAGXMUgQ*=DJc_2nai z4=bk)TP@aUEv0|e;<{J_oIjoLj(d=?OxE#^(^@*OU$!%+iEHYl#sbq!Q+O766>u<3 zEgKb}I2@yXT3iETc!Hmzk(*Fk$58P`JzVjJvB-PAjp{<5R-(U~_exngILDbakGlJo zKsxue)D$d)yrfpd^!&>28>!6g=P!>kUO)*F56t&gzQXlEf+1t{;TSYFtRKBt^@3pN zsAy%+kzZRHVb=)rq^Ci z<>nr$WHHKaWL5?OEU=%SQQ^x>#vN4s3&j~i=wUM+Vnrn$U|}5}c5ck===Z#G&$ub| z^d)_mk|^&k+}(ji6wb@u#b1>vU*ae*i|}oJN|ue~Un)$N$;sZE+CjUiPVznQYDufs zJ=>AhR3r9%rQlSqza;8oqj&&!yAgaYH~4B){WE9=ufL)=iE9uAVYgULX6J+59<*{$ zA2cmYa;89J27Pj#ii1;Hv5~urc`T?(H_zIR_A}Xv&kFvkM;O zQ%CgSU0Y7?(%e_mW`=0vT^3(e!|93&t!2z|>K5FFtaq#+zg8)MH7Q;nqHYOg=B8xV z>GT#W)fb4F--8?E=|mWLIxTHbB35#T!se40~sZ8IGv#ZV9+jfncn%JTE?# zcan+J(RW1Cw{AsoG03pWaS43kIPljzk=$%}DN+;GP45`CtPq06d?UCO&r13Iaq|q9 zddzY^Y0XA%z(2HXA*s3&bf3%Gw#IA>)7$dwwAS<^zRC&9mrg>m=>dtdxb=o0`n)<$ z*7zC;FP-B$w+8jR+;U1W83mHkk!@B_3VxRHtTHb+;ENrbv$ho7?EBb{SB+ts6$M$A z#d|p2r(9NjSJWusiO)Mb4=aRl)t|4Qwavzr>_db7`Y9;l_v%%jtkqTwCoVqAZ(nez zc%4P~n+3XfNF;uEV)juB+WV%}jFas*V+RF@43W6tfuV-g9qnuv)j<2USxdNMRxN4B zsoAXnO%{+FoTmuLkBT!@=u&F-^Ub(Md9}QJ|JYV~;aSNM=-!_hxzXrp6$boT{wNY0 zLU{+SXbf9k1{8xDlo}Qe$^_o*TDzlX<%!+*9z8c+;)Za%y{vQ%TKVRNqu0I1V4uza zFTicC!K7Mfdi2Ax(4F<>;;^Q@CqliV-b@X8JAkarX)+51Fz%4RI}XU-H#?3MTpQwv zfI-jpdx)KSeM*Tb8Sjd70>eV=Y9PCpaz@-b#M%2@e0}0EZ8)lv)=( zYbz*&(b{d=D~QVVjt~Pk22kdHkZ({+zWn&yBOiH$W~1%|Hl$^yf7%-(xpcTBO2Iwx$i_ z@$SzzJh1Pk93P$9zJ{1<)$L-z_A*`8CRRlc`cQsBK!+K(+%>`{Q6BfURH@_<&e%I5 ziB@L~y;`@bR?txH=D^3?(UvC}Lr?E@SHH9UzS+XNr{zL~f2lXb{oQ7Z`hRV372X{U%UDY)ro>Hl3RDK%q;^$yhZF!B=+x zxFRexz=!Ab;GC(o69gJ%RhR}lt^TSo6{VKEpvODdoDKF}6o9ga$+@)&o!gpNF90gZ*Eib>Fw5+XR{Qh1TX zqV`KTQ3%J?-geyRlr$L*C}@*W)gtz0xe+P9;BCY*qJqF%D}K@%_Zo12>Pfd?+V@!@ zlj3BCE)R2K(3 z6!!~lYCM5{M?T0sXkMfB9r=GN#{3WV6MW^S1h57=1|XIskyHHqI*qB} zzMvzsff@Q&k)@mqhlM|hG#}%IEO9Le7-01TlnV{{#+jfnxxR}P^rkO3!ohzNXWukLzb-n~%`x7PUP?E(Swm+nlS}&{zT~x-*u_c-wB29m?#A1wq@QxF*}h$h&ac%UE1DXkG4Y zJBny8+(j4+Rf;nfa!{4|tqn+usxcH9qDhne^|0vDg>VCljZP7_R(fQcAGVSm>;_yO zpd(&n9angE$@(w;U;MMN?ZqwdOMr@_Y*E$8FL zeW@OF6G~`orS^OLV5h?S<7JzwS&vP`ASwQz^XJB22E!T(`#2=qgSJ1 za0vzG7qp*-nlR-aBPU|_4?g<&@@!QMe8P2B_}~H4($6TQ9n$)R-)8`}2PL~0B-DAz zq7_J^K@LA2Gf*#er~-WOcEBJBrxfx7*Sh%Kqeyi5fG_jg{22t=D{76iZyw zVr$ZBH%v*mb``(_9fqvKV-3}3Jd314yX?vqkGzh^f^;=ovQOzR?AjZD`lAS~ zjaKxv-wU!eev53n7i9k^C;ziJ`bWj_pEbb$w{Y3c)|%eU+Nw^e%L11PsqI9KR;rSQ zGnA@t5atre%uc-1dPrqj7O$x#E@pEs=W>N6QuUataZ@?lWxLIt$?C9qw$7bn}BS3a&W6eX#yS+bDjpx9PY^Si+ef)z-U|UC3`X2bD()*?6 z{@*LT|59)MKfB(}!PXAo;A9SPl%k~`l9rODKa;Me`=jif?vJwbElGx9209>^ZOr_G zw`7gOis4}_7Z)#V47kgY#y<+s5BtR5u)W}oyWbGgancm0}297#jp!4^F{i-g4pIr3&$xCL698^ypaBo zOX4T`q1lC75dAQsoHmvNqgz+YHGyq)zu*IL9bLU`>20x zSR%+lPs@u`evc|*MjOrmwS-b;d`V%HnB@oalotlUP{G(H2b5^e`k;Ov+wd1#_XRR{<=f_e(Cm8mhu0G!+*Gf`+K;*uEM|XGJlE` z&HwQJmp$j-{rh~X4^ndsK>lFKa4fRuAG5yy7{ej|tTv7eq^{><9 zU*U$s^8e%d=ZyXL7Ji*!{yIjge{JDMBIacHugOUsPF& zj6D>#+C>hZp6;*ynw`eIWGKe6XE_Y3>o?gt&NENWyL7|4c8hc`kqy0!&dCM5lxCI- zj>PlbZCEYQ8c3ueaNKzi483Ru^W4o-)-Ltj;tqS(^9=sAF`X3n8pWZgm1!c@*f>7o z;TYuQPD9N=q@AoU?`&L=lptt)y6uAf=KyjVm2Tr;-876Hhj+zEVN|lRZMXXIxbsL9 zpgJm`bXOu4bc%(y38{Ty_NRIkfmFex{JRq$)X3Ow9x?PYO-nGz zG6?mlwt%VwZ9P|UNUs3SHrekV7-_0Kn9)XxPWc6!u_l>kpq`|SJ&vO;PKNnwbsfVx z?VS0LuT1O`u=> zZ|h+CKO6%5i$nkB;lFl&vG*^8qm>DZ#s39|@DH4^i?NN<{{@Zo544%Fm7TGJDFaxmWQ{dS!fX^JJk-tywu z`E!!>?&aYR&^G8B=Q&!O0BJ}EP8m#4k+ti&&)6wb4O*Wz@ip;)_rjho&8)IhcWf7{ zwbYSW6+&}G`DCD4>4nesnJ+sNkTd8)Lo*RbCi_O zqawF#H~nz*u#}qjK57)k4(<~!PSIZe$79&iB@$y|4PwSx2~DgaV=Gd=)-0n7FOdHT z;ol7V#bfSo_$6BoWB>r={};m)jSX#$%xz4iZB0#$9scH8W!g_GEj84ksfVdS_am~f?Jr-;847QWig3>ojiB&T5T`u#sl;;8aBmAA0RvkSUQH;0s)JQEr8?c3DX zFt(GoW2WwRDw|%FXxRbKvd>%~TAH$yFkU*Q`}Gcvin2o)Z!D~F(anr@B+NH0znIjQ zk`(qPR`#Epm?w}W9TXMnC%ju{Dw3xi(q~CccDzy?R3$8j93|mU&!ECNY2&QWggEkw z=n>_{G-U_gtP7u9(K!(XN$D7h81QY@0wkR~b|&*X-sD`JoU`EI13h2gFls13TusTc z7WNz12*@47QKtg*@!FA30-Hj5CVkU{55YDWeBb8N!u01itAsUAMhZny8-L)*glCOK zj*I|l$Xr``PZ!CM)WIxHNl6$@mq!{Kq(;};KRB`$k(oM)mP0+Gxx^PAMpA3Wl=yQ4 z!z$ytA+7kR;3@T*%`=@KeCfKwH|0&;?-!i?s!&_7OaP_MPU)Gao%~ zLUIOE(ve<;cVzOm+h}y)$D~{}5|h6ET9>?v;LT>2%o(9mVm`5MX&!BC&@88HWFoJF zkSh>UIiLh@w`DDB4e@L2_<=bw5VhJg^h_F0633f~H-)j+1gaahBcLHO1((xDO^B?z z5XnWIe=P-?6b|2xg*>%(GJ$J>V_8Daa0PTBm|&$PPxeT%q*qfB=_Wy=1yzgFE>nww zJGo25t_7_tYFmxIdKFL9g)8cf^CR?(=|C;QRf&>xF)b~V3IOJKZ_Ye4Ls@JXmRdoG zZ5d%;z~GZ%NT~(QDoO)WV1#%Z=V|f-?)m%G*#11@0l)tk4Y{sPM$1n~KKD+-9#II6 z9d}60JQ@Fo8UcTB2&R^`e(Y&Tdhu%+jHGRrb@CpJeiC^-ZGR4F@oS>48zkG#fehO_ zGDymb`q__eBm%a6$gQ|-V>c0hP_L98aK{tw9TiC3fF)8lkWTU*5jQ3Ot3G&=mamDc z1;$q5T4?z!kQzxl=r`z`*E5!O%C@!J5F}sxHu!o!hG~cM3BJybZ_8j=JO2VJ}DVDv(2;Yl-Vejmd74csJU0PK2!5O5Y)?jNe- z8uSCpE~b;CfWB#&F1Z2*rLqBo`l(yj+nD@RV|o#M3z*!}Xs+hCj&KL5SHFLkCEeaI-8yw<4^`Kk^oG6#&G zYko&@q*FWLMvx+!0uXQ8JzN7?9q?*I^rS(F#x@1cT+Ag0s;*&ZSGIPhB*JbN<5J_byNVSIWR#?0w63NU_S?N2fj!^1t5_E?G^#uqO2CnfRIQ6 z_v?TYOl^;vmIK5SK|?dZhaJu6Q^EgD-)3Gh)gA^&1bEokAGsfe=Dsj#fZ=MY z|J+$U^JkESKUl)o&CK^5?c%Bo@E2H))At<@{sSHw_}{nk{rXV)pE^w&ewG3g6iWm4 zt4a7p#eLn(eZ71iZBYSCg24S!Ap2KlpZZt?#3cN`zgi=w{qps+e_z=$`!xLf3Jd?A zh)1D00H+EjL8J;`_=`*QkO(H<&>d63|9zkuFkAKaom$`o<568a9$$R=5(ix*f(p3$ zTNR*xA6QXxm3A5Xsz3j9b0V+nbZ!*5?|vSnrE}gDBc~2$t6C0&$!|lxoHS^u$GWuQ z-~wmd*Z`$L!uozW=>l>ptNGFNUDQnn(%DKSPAz9=!PM;mQ`b9KEr)~SQpL}H<1FHM z!LMn+_9wa*`t~H}=Eq}>FFdgu>buwacV?g2!{(I0!@c(L#crY|G}U_NngDAA!b{3w zY{Djt|7^#Ew|LdGaJw8fGK9|koZeBhiTXesiBdFKb zA=GXfD}XX-hCkJTrPLu*Ysu96*aV2zXnyeqIwo)5kkstEtrHSo#v-~7hjJVBltgNyMz##Q%lz&h2!^%-JSPafLFjdH(!!|kEg8|j0ZsXf?{ z?IKPLiFwAAaaSKXsv7OnaaWIm+}&X1nc}G!Xm#n@<65`&>seuGChGCC4#8>yIV%85 zuFY~IXwUU)6lAsxxUgbxsOPndy$Hfv1X5_462}!{LBeDD&o+yE*wG-~0^XDCCn2yL z6J|F53G`1(wR#&30vH4Um;nO-u>XstD*feQSp99AIxA@@&B-JDfs;vR5vJH#{YBws z8KJ50+aF@le*uaacH8NmbUsvHkZZ1p=UM0-^}AU|baeF8ABv$3AtQvfai^}!>r0G->hp01+KX`} z+B1xk&-_9X{Isf6mZ*L+4jr{wwy*RiuD6%j*nuxhethJ)JQ`&cNQtT`OBA0zH=Bb{ zJ08ZR@+i~ZveaOCz7!wM7d3=>Nv@f^bx$3x}6R`G3#_7D;+uxpEG3p46{R} zAv1OSpMRsf?fCOIff$$0=S`3;1k>{>67swvl3Ceu+x4mjJc}~vSj^YZrC^R3xo7En ze@g37BwRGedv_j+nkmFt=i#vk zkwavV$=+~tnC}weLBhCN>F?r`V;-Pq4VX}eYto4a?bxA zLB_E%f5m-*a-_^>X9fLKA;LgNu!%@qD%yO(sA?d%Rc$cW1K)EcR>R9{w*fd%jIu=Y z{^&lp;~7U&e_sDQgWCDk1{i)L23wNnEJP)RD$eGcSOFEeb-?_lI5wJVp+o63;8OgO zq^{p^P+o3;xm>W0vFn+oXdUq&`7LZx8X+0SSk#I)Ls%pac7Zttn(RY+3g&=Ezh3&_ z60+V$4a4)#oakg9O_4h#r#ClNAxbirx6<;>`&v=UwpXQTSbvrk;X50RNGiR%EPGrd zuBKI9dz0MvW)LXTPalIm;@qEMTu@N7%Lh$h->mta(6c0QTasL`FId|`dmUOS62=b+oD^2X(9G6pPA;rbfuEBfuobTle3fYU(WJBJ2filOz}%C4Y7AL zg9}J%*+4-FLP^TYE6elm6D3A81x!9u2Bm^~>c^uDg*b?s8fD%Dz;6q2UXqz-WM^_T zFnc=4K6bs0U9x@LJuTS++yI3rp*2~7mNi0$tZ6z-o<>kck0Us-oFtAYA?)^U*df0F zGi4rms$j#YSixi&3o#LPr0v-#B}3yup62L|<4nr#aW^Zpkih?U`aRK>FWZZfPgB7Nn7 z24-i$d0vPFA3X_2HC{#8>LpT|9x}9=k{8+l{gg-4{2L8QHHFL@2+2xA1Fe~}wg*Aq znnXS{g1Q+Fk*iIAT{Zah#yX2c9NzD*VT!z}Fj6B^Z z^6hy~@zHOpJo-%=!5=fY^u00$E#A$XAUZdz7ZT~XDUth6xeUC*2PZ^9yZ5z2=PrGA z->cD7daPfR(HDNEXR~nsGC!fhl1%yq+yG8u?7`;kX1Xz*&RUVm&5B7mm6h>*?kGKp zBRf&Pzou|9b!UD%H_T*7UwGA8rEx#ah$_dGp6f{qIsJ^P%~e#Se#~Be_jToff-QY8 ztgSF%3*sl#kvL5+Ee7%q*M0c(H+JFUkOl~lSVTfGwEzZ^UWUlf*uo2n^?p@d>(Sb5 zs{>Gshlv_7pyrz-5@1z`_-8>Oid=Yy73yCr)Uzv=iyKGls2Ufj_7|x7GewMe--%c4 z*GF2QT0~8K{I7B81yfN7Oz;P{;WTILeIZl(ReakS4v;^6r`bg_55tQE90us8xrH~G z>r)U;3uIS{Ip<+k5$mE;u>Gn^;1W6DnQ>wnpqYBX(tBW6RG8QnJAOGe%O`*6ewjSQE z9t%b{Z0I9mKr@VFkqzp*zaCDL7lKbR=}+S*klo9SC!PtWAu^!}fcSu*ykV+n5D>JE z?NTQQ41=xss`134K;Oc#UV-o*dy9v$TTd+V{Zj#Yl4h&8fdc>wFaQAYKSu3; zLY{*9j^>6k`VN-=iEkyT=r|%9qxi5%WW>@fo9qjX5-|H{bM9ri>bmV^>E6KiA8lAQ6Ix?t z$!_s_KK?%9IpUmpJ6!tN`3cmG<%J;(i{wKRa81DYZOUL8p4C@K0ELirGX!|r-wQ4! zSDPjz4s9&-`lOesp9!lQafeYV){iu~0$(h^O~-B2UYf>m_n{>e3prwED2#wA4rvVg z*Sv&&2W5qlfxFc2PTcMjz$B>MVvOs%o0_1xC@U-JMsYgk#Pm~xbqYbFRO*_+F{ac^ z<#*C7l?b2aU67xjjh}g-IaQG*NP@V1qY9+Ss`ttjOH8c1jcFOu53v?y9*P&!OlXcr z{&mU8(U9;$hxZngI<<9VjulkT>&yD7rRrqfW*c@iRjDB=j(4gyH_pz@E|L@$C&#DO zN!u6J7s>MLB`4{PDuV74P9%-q>k9@jb%XD4GHTrQ#E=W?l_axl4#Xoqs4f$(LnrmY z4UvJU?hwKA_Y@LgtkjsaC*5(|a;2)p6WzV(t7#n9Q~?2OW#%0a%8uH>Rs=zW1$N>i z_tJw1am__8)qzMJTEPwX1RF@e>4oXx#;HhDWCF`1O`JZ5VdKyGXu%OO&ix!^lfW4d zW7^B@QF!ERu`EtfGfqJ9Ih+K2Sc(%GWW6K12Rcw2eDmQJ#)@ZamKk1ihRB%Tc%j#0Nsd% zfJ|Q~;Q|UD^@y83`c~yzUoxUrLnI7Of$5)5tQ2L#tWx%1BEjHPa#?Sp~e2 z6~dxN?Y$v3c^CaSsh6)5Zsma-&vL&a1sCKUURRczgi*uZ?z${G_!SsbgAy;Pg z=Z9^m)`l9~Y-nRAc$zNR{w0`Q#xEIOxy%$A|<#T5;yCi z%=G*gPP9d6Hz%WG#l5s?M`q3yY8p^YD1caq9JTPGVA?(nzF?CC-chPK-NJl=#(z6vAVp z;jqG}PM32BJ*#}N2Ay4bwNnAY)J`53No^O&>75#6*97k|iI?Zd_5`YoOyy&$a4&Y@ zYBfYhg`u-cL%;qRm05L)9r8riMX@YkYW)HMr+X;A>@?8?|@(_lRuYpNT5b2wg#> z!v>hq3W0{1efF}4ptU(9n@V(ltlgj6b>25FG4~G6xB2?0h|1WIK1AnDuthepXL1yL zCfGu0>Cy>j2auL$P-@m8ImJq|zEyinJ_6fDN2rHNk-RVo=-LC-`ac7d2x63_aUaQTr2wY^fv{Q^?k|F|r?nZ^tyQ|}sNL00!j7^{o^R)T|I`&9G0 zzLSN=uh_-%f|AGWeU!}l3mmhvDCKEI@07zow(!=~RHx0ffFRK5)iAz-9(8<=p{cj6CN_kCgP9EdanVK5= zfbD?llwWum92qRj0UHJ(P@hN;_@<}c0b`&_r!9K>M*%#K*)NS)VQ>F_zR~B#H7FZU z_v6-wH=WKqZ_chyuTLO7lo5a?LNWqmcR&sPM5rW2)sQ&AIh1vqP?VG>{+h#=-<;0` ze(&*GxK|0%9&!rwxqjShY64{p^7zMw={FkH8fDrdj5b9EnJTT?g7$(ENdJm*%q$JC z1ZX#R*kL0|Q9}>1CY!Xf>@;=OacP?Bi|C6X#ppl%EQoJpDmsltU~b<+*uX+13`0oL z)l{;oQR~cYI`z$?Wc|y7dmCg-${sttC^S7Sbi2F3PI;>iTGC=BYJZm{I-#W*U><(s zj(};FGzG<~Xyi~ni{lS&QUU^_hemKtcE(w}R`6J1njL~)_$^|xMmM*CMsQ^pLFtKF zs9MK$m1(EOTVsPF9YkN|a*5lB8NJ-kv^pbj@=+`*MI)!li7Bv-Fzsq%t}*8pY+i3t z6kFI5BouT+voH!~pDuC6(>{zPWr#}88=|lQM0<82jJ)Xo_W+KivSi47a zGfU|gB!mcyR!ie1%!op$BI2#QKQW)!k@Ds|a>y&ri zzs%weh4*2i$V=9sUc+O3g1ZDGaq>3{H%n{`DMd_ z`f+{=-z~QXQ%g216_0X+*b5w76Uw10UrwvAR>9HPi-H;g?@HP0l10JYpw@9r9K(A& z#QS=d=gfDRqEl>kmDif2z2qJrd34WbBhbR;GpnCn}<=`b#u%n&-z_s+9ZXp+$ z?;ic11FU1(Sds24Sv~WWf&XXz^PiTXu#kX}zMYe?0}12b<10%^#}-Kdg%>1|c1)9E z@wIYZalfx+ag`h*OfgD^AD@B5`&!>O4!4fZ$d&Qt$4&s-O#idVZVc0=MibOPVDQHI zd57s`rsw5&){m|(pKmoW-yqGFXKJH8v0R*m_1M$t`*zPE&V=>^-3unt!DUPgu~4R-1;zj=$c z+q#aH5iepqcW9r`DSbzXq~wGdtlfiXwsD+7-oK=sN?Id#_E?H{sh+rCR4|5)0{ZNp zsHL~A9}!F1<;)@E+^w>Ew-d0?_wm%-9hylS?|Yd(oo{TNW7q1W&hjxvEJf7+MfwK! zG(=~=9F!y;PF*yKZgRbp<#+sQs$GxJHv;2!uHKvEBj0fVwF%8u&=wC$DrgHkWTW;w z)&jYC5_dxsoI%Fg@xJ7T@R^sDk_yfVXSN~!N(FWD76x?%7`|g zBlvT_K#2G<31vS>Wp|+TVbn(0fPtV|MHe6Vm!s|x3x8vd@zxhA!cbygs zhcQUd6to4c^?brzf}14|PUNzQwxjXlkkV_3r6&}dooYJ`p`5vY(3Q3M@4Q#FRhu|3P#B}Hy^d2H z>&{6ATP#TD<=XA~YYgR^n0vTrdAz2GJuJ31vo&5&7;9h{+x5)_p(! z<(o>;*nt3u9Qgo;{;{PN8sjQT3@&>$nH$w+7m+ycbSs*2)rN|t3|g;SiKH~Stov?# z-r3DV*Zu>I($V_a`IpPgNbZfn&B1z|?Kl_0HBOg08y;GWU7_zlU{}{?3C>3!yqz#! zZumMl^^R^IzRB*O9Kf%GaCwoqJ!;$CpX9(@YnPO|2bLf-`o1DqyA-=L-3*hrRW-dw z8+u(ma`bvscTh9LPAI!sArOC6 zQ5!k;i(yd&{loM1)m@HNFJOEc1%6ys24{b%sy{yF4LQ=g^u!MyP!MN--6kVuzXleFBF?F~}ETtFfe4d-*fm4wf{UV8}T9$j0mydqDi8zfxM~BI-|DAskbL90y{+cO; zz7*GgS6ZUB4%YfkPG3P!WoJ7p9pRNS(|ueBZA)b?~z78J*yWW}bFA zFrF)CG=^O<@4w^2@#ZimZe%}=I11Wsa}U)Lz;w_J>L7YQt`Y)KsR1g-*z}>Yub+ah zI~gjxTKqXJp4`aE_f_RCObsR=X=Uj5lsy$^ zo7|=hA^J7o4AMPfF*fD~`Q^#!^OYTCGO0(}Fu*|O#O59BpB5PDic&w{S5B|+Yc1k` zcX9p>%So8p*gAa4)>owN-=bBRVy%R%hB7o26d20_B{E}?liP>@jDc@P3oha=A3Kc1 z4h_}VLL#Lu?s9S|YNax7P)=*z2+w>GnPqPJ+C+bv!7(>^4{|T$>@i7(d|jutdrS2B zW_QW?>FN42!|Kn=(^MDGHiZ|uzicX`0LpiQR>@n!5t+T@E_A*9x!& zqv_=7D68@Di6@#tw8V8Lk0H9z!FYp66V?s+#&cgQDtqvD6=vgMb7S@VcbI^~;i7p( z$=`-pu1U>rO|XR3LK-ev6P(lm* zXDiAJvL7xFYkY)^S8+<;RF23Qb{)khWO!^WJowm!Tc?0l>3|#isd2ZOpPb>icS_(m zxNmvzTCEa1nxFa+QEeVCGfaQ1=0Pk6$kT$}9ldcf+bZBmzON9>4tR42jh;P|hWyDu z%a9yB@6eU5bxw9uO;cXBNEheTYD&qjP1^)?XL^M?zf7A%Q0V0SaE9ld#p=|ieyp_;B^HaBSj_jf< zFj;0O^wDBWM;3@{@DDRP6Rm(h;R3*PtKfUlHS>2hFg73&+b~kGTZiuBn59@~Jz&WQ zKU`oEknr>nwe{V@nh_9^!*F|uTDp-AD}{MvMnP!&p=sLP0w`Hp&ViC4#nfHyYR0WV zq-HUb!F@97QynWU)Z^)DSZ&Ij|2lu8D&>}*`Ry$F_JodfwTt>R>00?acHTmQB0B+#HWwaim zj-Go^vu5Ai`V-(U5YXl}5EjopKxz~*Ie_QOChR8S?7OAvnh;C9Yq3x#L08w^8}o^g z;B)g8aR@qa-rN-nJShHpr`GOUaN7q^xx4yrf1x(_VSNMwe(-kt|MVUOM}GY1LEq`h zJs4#+6EWGO>Rl7zn7@aEf?F%uidGC9#>bKAvi9({p>}7_F6`2TyV}I{s=I4{LiJp@ z|M^cf?0)ZZ7Wb7N@xuiGDE_NYB4h06sBdcg?^*sod6GhPNH^sL)IYW+WC_xEgrMJ$ z;=6rAN2JjLd@E*RMf@QJkn(^ubeZT!`bWk&ppop+8ygqelo9*EtR%`&dRC1Rv0Tk6 zHK|w2bQ)|L2UaHvn^fMm*y#x&g)*0~GS=TN-H)>GavU!?pMLY)z4UnlBt_C;C6=v4 znqdvEy*c0c;hsi!zEeLg>kYsk)GUR+Sucltuq|KvqVh@h1UP&cwqC{wRj(r_^$9t= z)S!N|sxfRXco(kw9`FDCEs+K~@>*UcpYpLI<{(a_dko03a zYLw57nLi|Y%Hv;HIp0O$x{~^?_OCsub>(`n*=PoJvH$q8gUwQvTUeehva=*to1q|} z0vl4jcgm|j1gVv6hMN2mV~1XTT{%sW8XW*G{YXRwPHV(H6>aPJ^Bowgw_q zV^vP1uh#rgTi_M6?pVctD;P6{Ooz;HfL?*Zp!U3x-o30DBd3LSVzuZWWl^t|aQjYM z=30n#Kinw560*ii=y%fiM^8&)c#W$(zd+{>yg{#|Ko&Y^o~?2f|6I9GpH}>0r1DiT zm+VvBE`Z`;0#{SYO+WyrG?>H2EE4oLR#1`aAPVhLL8~9iHSi&-SZ<`n`SZ38nfy}6n zE7;}8o!5SZLDYB*tC^lItn67JCv8s%f_WTs)Zy=YAJVY;tQb`{t&K7~i$Fis?}B*C*vZ0CwTj#c%w2xS^Ma*>&3=4z{XaOHYf{w?2kJF1#y@)I9`~Hqbm+X&1GqiHQX_p7J zXc*wnZCC8yUaLf>ky7kqZk=+w8Q-@Cj2yEkM7^nSP4|alWfM8|@c}PKd zDo)QE)E|F%l;>?Vgv36?_ar}_^k}Jhd2S^|0f>fu+h=ZDv>*LvNtvJW*K%HF17c9$ z2g#x6fJuJ@m7{v$ZpmNEcuDmzu(jqHI#X0Y{*iy&>2i%JIV#A89vt)^h9iD=KDe>-aVDq`=b6GE zmgtdMO;5&{b)s1d$jJ2AJ7!&|biIMe4)Z0);k3S>5<`kFj@fX4ryn#>WG@f{=|z?5 zh3ToW`9J`&#Di=5#AoRV^N9=E-3-+!4KTUUUr&~$xkF9_69s6yspzGaLUnGYA)+xI zK5M-ipS|@L_Mo#0ona$ve?Ac5C=m#XIPU$%Z8d{K;r73{w=Ljg-Zc+xCT9QnApU)F z0{;eP|BxQ{#ra{lnp(_*_qo9}s8^HR7L<2J^PamKbErR^gMjqdrfZmUCP*#+~o5y?S{>qTJbYqVUe|BbXf@hYU_RClUQBM_Ov0 zP?YbQ$(#{T`QqQp41g5?+#H)g)R)3omxACfr&L*th#*wtf`tj7?#wg5?4;@=<#8sn zNzH?a5o*Kw2bt$Qf%hOaY>;aD^oozIZ~}2sTzt+srRM-EnMu3?dRkr#bkBo^jQrvCV2=;| zcNgik`1hdw=8OrOksRxh<@Eq$Rr`*-3hV`;t=97qX87*S`3Oh_>4ps2cvH>DqZS;k zc#$V`R$^bQYiER39M`$>4U7m+M!cuEI`o8E`~f!v$C*AMw1XG#RPr|fydJ*y8^z=h zatngot|U)T`3;LruYm4ONDcIq!;|B_M|L9m4KUmu?C(HViZck^0dhB)m^~!X{LI(Y zX*y!6_&4IwVs+tz1hJT7qu(KLw`rlrsF~mB4K>13W+XJ3>Z78YOf?|nvDa*t z&EC=y<4HYs_tgtl4U>w_1Y;PuH;eQ6FcOm=fka${;$+y=yvX6N zO!_MNcq+{&$ItPkH5<1U0y6RTZODT(5|^TMm-bSBN1f`#=rjFPkUkltL&620>G^@8 z)NYR(HV3oI^5+*vEp~=@rVT>z9r}R8h8dxKut4S)g51>*GwcPNF{g_Hs*N#5xLCmH z7CopOdrpFeB{b+=M4{i1Ol{!ZUV{iaTf%jc>yYW1koXpn`H$gnl{N5p<9oh7iD#VR zUFh+nT6H4OQxT*^DCG6t$lRLT2-5>B{ecG}Fm7S0OGLJFbn{Rsux}z|4WEm&RFL}%|9uhi7?qU zLaC;`RBAucBo>=~>!hAKtxPaOKWN6Ax@>`EFgtdFb_|S{-L|d&CV*^6xetoJ99O3< z#DhKXS_NYSQ{Cz2U_?#mXW0)!W)zYZ3*&$+4#T9_Yg0tYRpr2xMOO_c3T{wuc0`-u zV$hNEV7_$|(akIF;vxBPsWLp783WA;4zbyYowm-Se+DuLyoB3BOSgXRU1%OwPEKiu zR?4j68MK}$`@K#mnqhXI5g;8IWC^ZlZa^uM37Nr>WHKq(lnNV?8j#RRhvdj8HI5CD z;a@5Wn|}&KY%&*Wd9T;%gq-l)KUDylmmCFtDk!KUTw)yP!IhUSP;wBcDO!RWsF?dDy+0s8Vf9he%uSKB^?-*%aog03VK5}Mxn>>wcvH=yYEuZlP&wB5g zos~B0?TQwJXSnTc_)FVUD_9R0)q2aD?oY1?)O5%lnfm@-Q9SrL4pW(ejZahMQ)vjN zXRl-;LQjRCG>U$B&0RznvV|b4lt)P(Ue=orFi^X+0X`mdv!}1?$J15vWK>y?0lw+Z zjqWk_9X42NlCqt;g*d{SjemaE(b;|a^X;E*jbapO{p{DWSN5+Y`hSmF%IaGi8~y!d z&f~AAOZLLG4F~Il4YR8Duwk;o=0zGxWE18`|14#w@qi`Ub1U}i(27{!p5WBR}?{u zzNBN--qKT(3Qg<2iprHj#qCx%)Sb{{Adgd@RO;&N3nsXs$&VWnvefsHA!Q$1$$_-VE-( zf*qxnF!t$Lg$53GgWGkcz)>v9ndb38ST3L8;jzFkMug^tBLEDax@DW_dZt$WL7&Op6YxVpLk39LW7%5uGC4PN9^Z4a|H_b#oaQ+M|z&DE-w3)&143u zu*hk2y=--1IFj}}g{~kj-mtQVGU>A&8TfRkbi{iu{a6DuCG%*~B6>{>nn$w10vZSl z#j6rva)-y5t=2C39kLJ}>^2K|7>+K!DxP#JBhIpNisEOG(*D&)U-iUuiZy(&il!yn z&ja>58}nbgD2*Huvgb&m*0%}p=N*Hcp~4uYi+MMaYWwH|3AbOl&cQ%Muf z5}_6;Q%j(^w(JlhgS*^i$$18lp* zJYWx@Wg^SI5Yv=+8Tp*I*OFk3rV(rn@CdM%#`Ral0UMK;9;CT85o$MPFvD!oJkwR# z*G0psj&Gwehj!{ll)|blaIQl4OjJ@CZ40kM-pZ@MF|;qKYN3&4&=+%`K}yf3J04tY z;4smUuKi_2bC@J(Gm< z7k$Yo>beNj_SSSvy|FWscr zQy*g+2mXd!$npF0*4-6)Y03hO-7#)U@{a(|Z#-)>&P%G@E~(b6xBqJ~#(+=4y3Cid z7XL~!{r4Eu|BT9itaS=fn2`ilK;TtVW1pC%nW)1Hhd{Lm^MHudB_dU4Ve*PPbJ41< zXt&o1_*3Qq>2VD5T4Ls+#sqbKkj>0|G!EnX{rlD?5RM)(EbJI%MQ(0>iJ^*E7@<3n z&PfhaKlK7#fXm?qRB#2Q&I}g3r=ZSVwouyd6OHu~2hsqy`1$uv9LDq1?7FBpM^_ex z*WOli3!{eBlp@nLLDrT7&(qq;*t#J60}L1HLtj3-zMU#-CofW+cHNH|ve`HZ#}@{i zPqE3^OE*K~uwWP$%J=LRd=4gY!KK>d9W#APgj@Dhxtv*()58kibX=1y3+fxG_qPxj_CPY1UnSG9>+M zj8J+#``PVOhG`4>P+sr1gghVmd$j!5e(V|=hn?j@`J7MSe=5qYyjNw2FLvyFW&KG1 zzZ9j+|6Zt|sPI?L@Bg9e9m8yCnl<6xZQHhO+qT`iZQHhOcki}s+qQPw_Vjz^%y-T6 zd}n^ts?1t-Wo4{fnNb-L_Z_^h#+lM~^uP)VY52$t%$TLb_`{PY6Z)h~ojb`=r_|Pt z8?;w?r-|V|)FQq~x92(|DTW2t&RKh=h$4~hp1-CqB)Or*71i{>C7le zux889&?si__n!va@Fhi*~Vee6YABA<1 z*JJ#Vfu&uoCXNh<`<^W7eQx;bVfz@d2JeRxE#3z6)Rl*%ot2p+Q>wqR{Q2GlqAerV z7ivJ16}fbYWp1}swH%FMT#QDcQOXC+9`UFK%xt*xwk{!%_&nXAA-y)xU6ryS)vc_E zn>(RnF=~yopxr?Ayza7B*?>XXe`zU7WS1D2hQKK2XWl>`Be;oWQ$TG>{8}TiOR*v8 zO}z<5ALy3FcSUh9SC@&iS1#r#cFM7U7{T(=+`c)EFtk?hEYgP%!KT5d?A<9D3aZR! zcNoQ_;L%{bK;06cDRIl_{QV^BS-yh4Wv(&g8s8!Ij1&TAj-VN_`=}>eISaHJT3)J~ zud+(;idoPef&2L{0AIUeKJFeA008RG)c&7m_dmfi|DT0f!&6^L4dYujk=fc~LlEjn zBL_$lQW!EuEdOB1u%(%M+$dP4Q-;nQUj`pe7Z!N)Z`#$bpg5C-=u`eAnO>BjG3*6@ zA+{~2vyx1k_R|ujpW;IPj_cK>bw*-l(2bbS^S0y6wP*JA$5q#5@q}qEfZv!^5R7Vn zxK|n+8AP|tO&Uyo1%OS65^yO*w~$@PE_L4l@D2&P*bQ*sGk6X$yW9a1Wtd_^xq)9@0n1 zE=qtJgpbq>SHKFmF4+rDA0GHNzFXpMEU+)3TjZ{F03Xsv@Gee358_Ak?lthWBEn8X zKcCf&oLlZL(4T{zyFJnuHk5mz_K#7>@4CLn8@=|IRnXKuUw6C3@!s(?7}Pp!Y||32 z*2zg#lYApuhxDcKJ1gqkwaUugabhZ}#(F|3Dii&uaU96Ngb?RD(zU(#E?P0i+3TnF zKNyEK=1SZVQm}Urygd^LHJz znuoV-n92GbHP+o1RwF<9wB$9p33mv?sSL@^rCbm+l~Ps@E2QAfh@O-dl($cTN>->j zdPf^kQShX+=8O#4$rIZv4AIi1qtk(2cu$pVJD0@G9JUEG;7XsoEDq{na7%S1uUJ5~ zC*ZY(<_RR=QP*{r#&dJyFmL17xL1{7H2Y|H4iof%au59>?kxEPX2}?TO1!gTO5qV*lS{igRnj^AH;*DMs*X zNXP-Cy7nY}r0Z%Ql!L@awRo=DNjk1%O*)Xd;a+71TuT#(dHldH2}8n;k{CZK6@k~$ zf@SAb1uvf~&DFQ?GB?*Jqbe~fM=Pmdik~6%h3Mopg=o9-X{?>b@@8?$o~s{E4@}r}Z}*bxcQ%-! zAkU6WEIxg5dq$!Je~_Ew!*Lf7#8%N)FZLYiu~dL8p1K{_W@^R2Eh;>)4`CdaBg788 z0Bn`jWnXSbG~ncYAe>!lOy*XSJ$S`*yzuB>qQgTQhnBD$8!A;bTl>naaoPo*NW6r0 zCc*ejU4y~c8ZYdD2IZv|Pz9ok$aijTLy!IT0+_MCI1a9I1Rkbn!GyOEcT;MK9o1%7LTc3p4TQYWVeOml0HfcYFE3*GO{dXNK42!@fs1;#f^rvw3>%UI_UH6P@RoE8|$0D&w`YGTN|JUii z>)C_R2yCL8==MfGg_guthW+6%EFznzR))j+?#iIPvIjW|DyrmN-d4Q;uw4NNUZDEH zx4JJxuVl(@)LIcy(;H0w3RO}8D)uTXzd_J`R|SqXW|ePoD|jqw0W5Mv zEI?V%frW?K!H@u@fPupbx&5!kdWUcrqXCSllQxy>eBFmhXU=4P{+2*z#8R`^h*pW@ zeJJHFS0vuu(d!_t6 z7MMs>+De`AJnpf~@GJD!1u8~&X9V43^5*XTNK&A36&@`D&y!0!ya6?}@V+c&ITMz` z7wq6?#ryi=2}=dvv%jp&2ljb$%1IV%v_l>wdsBjmXTRm}?7>C%xxOUzU+ndR5fSk} z)-%Mo0Ao28*FVBO$6{MqUJzIyInG~RJr47=_g)3D?*rgQSa+9TxifB8XM8Zzy6T}E zE|A)Lf74w9-a}xs^@$EL*5XeOrZ2_8HROSt2vIEh;fY;R;sL?~@0udx9hf2u!1598 zkb>{E742xnJ};iI?yZ`v28c6o+=x3;KaJoYv*+>)9NzV{ZI)XM7H}9B^my1WHUHuIrHc3`sifz`jg}R%j$S7 zu@5Jsk92ZVn6|AC!W4goSlvq41gV8xFuOu4Y1dJchh!C^!;W=ryK2p*?@Z|vN@W9S zhQC{bz8kn)>HxJuo=vw5i!Nx!@xaN2DkuFWRF1w7*~$=df*}~iT&WeOJFnO-H2xBB z&IM{+*I3`P!^RVCbIkhSUbVqnr(aW&Ly&9Jsy0S1prg^bbAxy zYNTE49w%z}c^LrDb3%<#i@meRlJn-+O8n+#MVo@%%&aQy@+LH6uD@aA?r1cZ7mh2l zN_I6D#> zXk6PG1LoHaU9J67J2DOGNq&fh1@wrlhYl~k4g=y>kfzc=ZO7P`VoydxHAf6 z<<`7NcT`Z9@TY58z1Xy=ob>ej3M^m~hIc(I7ysE%^C)QUrBP!c;CJRcOWpbq$B_7?q^+`AXzd z>N*Fbj*2xKdBCmVrqy*hVUCPb@aZbKqyKZ=xEcLK9NJ0wr*~2WbS(RvF@4V1nZccd zisKW`KTo=qAG}Z_p{ql_V-%aTA~a#8fN~Enj!SJT$Q(NFJ|^4-ND z*WwP|gRJre8O?|M>WtQ!-hKf7SGG1MD0t+ubWe%L7v$JC!ZaG)7YWsgA*r5(-j8ME z75&faXufaK>BdOGdsz>7@zcoO8!DG>n(P3Kj=bCe4{rpyuM5$enZQOx;;P9F%Lk&0 z^@`a$cj}k7d}`~3Y^96VChN=+Ra_)XocmSiBbhjKrPneyblD!DBp9x&zySkf6KrZ* z(U=!3YEzmdDqC&{9& zK5v;J4t=n@0qI8G2nWakJD}bV+H}E@HzIe+BFi8u$D;`F-ByGwO0-mpLO9CCIIrlgbGjq~gV%x^lSy3+nZRFl^2`fWQjN=gl@`&kcL1 zxz~(4##qY$yMv@U0mXxaYB$p^O6?AzD`BCrnzE>tHee~rAPrH=(R6bj^~{|24_3JC zW&6FI65;nUan;nWme=y__rDnFjN*R*Wd4}uQo#WL=>EqY=fAfJ|F8x82e-Q+jLc&> zqFP94bCD7!_^Xtr>M|{Y{{&1H$Xt9KnJb??NMZ+Bja^cif=DVIg6@MPjo`PS3Ho;0 zo)|+`GIR4qYt{Si($$vd42S3S)??52=PQ~Y$4A~i2Hg5Z)bKwUp(O`=Q2+4-0b5NtYx*DCE7fRCcfan( zUp7BM;Jh(9IQ~8k4oZu&;fMTGE#g2<(y2{L6Y+N{?dF%JNw=W}%f* z9#f?<#PWr|PYGtHfY$DF>NM#C2IhPfzIC%u=P0ooYkK2;^ssDDYyv5NyssDx$twh110uXm<3!69mOFh_3 zVU&3C7F%qFqIP#prrG}ACB^>6i*;6`m!xH4$EVp^AotXgcR(cFv^^{z5Wwpj=TIxy zn8bQCGtbz2YWFzet znyBvSIzNV(25i1QcBi|UJy06Wpuo(Rv>l8e%thJ(+HXN`$K;*(be|H}5r}7*@se_TT8l8CxK|h}40W@U0)SzeaPkdrp(OrhRtYmpo=a>!@ zNEv(G;#Fnx5>z~P-;i+YF_(y}UI^O9(fa&>fx+@S2DhXEuEx_Epe$_2)(6R6IrGc` zQ--|h)G1-F|&h}_a`uuXr{ z_58)H2jCC|9KVzAIF z5BB@^UuI0wWI9mKA2a6XkKK&@|1Fu&f5=aNxKN#p-JFz+jhr3Ko!n&%Y<@!Q{+nVm zIetbKkO6+UhZYJdEFvuL<$3oPd#ipcI(Rq%1A_ov8JZl*xnjdr#+Bhi)x+=ozW;Ee zE(8k)>Kno_Eu`;A_cY_>+Fk>ktx60GYGU5TQ)sN#){e!QNyvr#fW+Dy`t)5sEc>vFIQMW}gA7Np zJ#*v{2*GWfypLA>Jr`P=!1}w;C~IegPOrWgiX_#oepWPdN+AjvctEhceXu`7^v&M&#ua7D*I^CB9Oqv zlV)Q4N*e%lSJI0t8~eL@FYIV4d9OHlSf4Q#i-H};HY~L21QLuuk6TcPGJe`@I9Owt z;}t`Qbnf&UVghr5wC7)>FM~8!IR~_DEA0R0fc`f~OI1tlCn)_ZgjgC~w9gJf z*lJx9OB^cMqukJ%A)o*PFkIE6P>4OK&ww;Ly{xS1`JC^>=zU)bZ?+nwx;>?Psp$>u z4e5<;-N%%XJY;prr7q2JI;X40`}K)8edp`rh}ZA)MOFw=iL&Hi)Y6P!V8C5l{x&59 zrxkr9Dx~FB776Zpe8f}nqdP(aX5+@68{L7ew4lrYSI~m4jC`oBEFUMBv>+Wb#f}12 zYJkxSB_;${dPpJVJoIkw&yDg6b zszpQUkP#$bzNEgw&G?9&WoU8yF>9e8?G@zMoit=ch+L-<2 z&)l1C7ClE;nFzuz1jSb#o+3|yP>MD!OQ>nU{C(6rCTBFq1N56Zs zV+6_(AGadp?L|gZuovsl=uTEc(VVgSEFnX_Je9`aRh_VhEjg?AI4gAVgoa>n@b*e# zVsFC(Oq+jaJg+5X&Udn#)QaCr>`2h2J$WV60{yz+2j{{!ppebzPW_##9-zf_N*7Ud zj5Q=RAhD(_VLMl@elM|cn;0qDMy(z$cbgtD+{qD+X2I_%`H68osMR7zeAwIh) zD5orKx%0x5B^QUw?ys5R$jXc8NK5Nj$6ei1TN{bO?;0nb;-dH5+3Iab$P5nM*z1sG z&12+nII57N=v1o_6pR5Qw8xSh7qknwb~^Mm8DL5?xyL1}{uE3E$0BoQ{0${%y53<6 zc9tr=DW2A0!hUBwn5a`|+4*YgbyV9|~-F0D%Qlw5=H;?3sewO7P*drJHIM^ht zm(56W8NGek8p=ImXkd8kZunk-AYXpDZy~FW(2#WD-#Bw%lFi?=hHF}3f9ETdtwq4v zXVLqw{l`rsZoF4zC8+$lz}G}xY=&udTtEpPZkPv~P^O}s!^CCS{Q~pv&CkTEfV0cj z9)t!7`HGQlS(rZ`$diG){7@ON3od_H)z*S?!EcQ|a5<27EggP{?8fk|@!i4JYvfKuKLdM9Gd56Mm~>`zrVBnWpvvx_Ki67| zYK%NBHH=%}hcnGbYg)sNd&VbVjKUSea1x7%F<{TyL)Y9BGx(M^Pr9DbYECC0imvyB zEL$0lU?R4S-flm9;>~?3w!4NMvow0g4r`gc5f*H|oyYF#INDwW{(MWEddU{2$ed4AFAs^{XA5LlV<+N5EwVoo&&r+7gZtU&}}w` zhTWg-h7{>k|Ak1{x>uIe2@vCr1A|m!3G>K?8vg-#tl?rm;pA_I z?m+bh3yfX#pf=eyOyljg&nCbnI})?sW;ThrDV#6PMr@d)EHp*~buTl*n3d8@{vbVScX+`eVRZ32wBY9Y51z7t1W3 zcAy!fm7Rb3&Aj$q1R{P^MdQ!$&$kC@2d|ZslOT@MqRwr_<+dLcwIGlch}VM}gDleim~gwRae&r6_3i?#Fz= zp>oS)ma-_Qfm~RtmZ5`$k3uo-D7YvnL=da8yC~?dAd)W$jq))Pt0?FwXlZEt-qHTi z-pO7-Lxx&F2~tqje<{AivE9$M9|_=p-e&$K#s3Cn{X+`t=sw5*e)y1YiiO4i!hjt7 zvjpI+&JcKHoOb7pWe0Hj80W@=^#7xKk~ zvB0>XAGO(lF|APTAb7nq5;6-NWa#zHaCH)= zAt8KYFwMy&N&7*L5Onns>^-bPN_WtoH2W?3eFbr8dtreERV@veA5Z`tC?A{;CJ19v zzN~c)vXa_-g~BfP&!NL(wy3}Q5m@g>pV+y5gVBvJ1_0U<-z{89)-N%P}QK>}aI zjl8RAF99ckbCOZPq_g^faKAM}oX^`crCaVMSMK`Yc0(Drb!5wYRh7IsImn*B{%&uj z*X;265)_Oy>f}WOT0>IeICE~@yO{d+I6L{g?B|h`ZQ`cMm0+aJTH=mO!*%fpK+zT2 zp=Z+bGW3m3&!^-Cm>vk-&q4lK#M0mq@m+N;vDL~y1$h#jb?UOGB*!Dm&$IY^E*TEb z%Z~a8En2`@^~qAbl2uOM(waHpnV|K4c{Xal)^SR3 za%H@`_;e-vGl~0n>HB1|kRvPnKA`6LR8#ozf%AITS>F}!>gj%m7Jig@0=E;CI%l18 zzgB$9b=7n;+5r3@lOpXz-mdZJD!$JT-fd&4-XdV%R0*SU$a=a{|~n>M_+0t|$13=!MY^U6t1j z=}WhSazpZh=mpUYtQXJ&_@7U@zSLc*8_OT|c9a)LFOY7)9?)(;J^z|swVzAZfjy94 z7UMl8Ize=TQe{7P3#I0xJgDs1zF6*B-mv^#Nz*&PhWWyL!SsUZ2Gt9!LE`6Xc4WU#uXpEAiIRsG>Y%}Pa^Kf;atL$lfT4-0ZKtfdx`IC(LT;5w zBa~=0E(@R9r~8zrY7>Q`xJ@>?1J3h*Qb$l^byfjLi}g z**eT|YaDM;(QKV>SnDBj&%DOzgv zaI0i(9Fx7BWa(mfKR z9wpCB%rWLUclYzem&kAyhCXR=?QZiTg=o8n#=w54NK!l4sjms#g|)GhpKBxa_V?8x zDv{855DmMFB7gc8mI-%Trj8*Tn*E_jw)1yCmE+ZgZL4=;>%*a2sHC%hs^*aE$i67) zdcWhyJ~r{1=BUssH&O3s|Gl_PEy%kmQjaKmi`J)#`kNDtZ+CyC;|0brifw$~*5Ni& z=L7Y2May`+PLUTj)1b*k| z&X5u9mtP_ud5P}TAza(3F2itW@WO!XS6C$9*?w%>3j%3XUDp`a$>qrd>wC4)-HR=Q z4a?bp=KQsAOwjEmVSxVH((_ZEl=CLiu{R{6Wg7(g^jj>mF2_B zi_5Erju+yEtYsVD)S7|o%VfCj=ImeR3lt^u%{DzfD?3&&URmW{H7!iLmbT~Ck06>@ z?U=4@FG#!aC))@och1hP?w(a7l@JHg_m@6eLKSR0V{aZAV_T<}H*62;pAhtCHc(D% ztR6uAe{FngYNl3t|&1{K4K0ZXH)n+gM%N zUK>z8h#dw!JJ|brY*~cM*fU^4t$`zk8$`+%W;A#!1OhUtTmvU9s8e8fby;pVC#AZh z|7Co2fg-a_Yip!ttDEZN)a>fU)zNGHbQRJC)8*M^EpwTTF#tu!h-K1)i)wpv^T^uV z;=+G`!`o9+QMz&s<;0-lBE(yys-7mz(5MhTrmxn?f(${@%!Hz zXe9-ULvN%r>|aYk01tz?*Ck3#?Cm)lj}AT5_d{mYRze<;ZqM`!y;t>fZZ!_BA~?$K zq&6O0gh<*lWYTRDogP6Nf(SXhWdszkf0T_G8KLoU(~#;ubtmD*RYLfE;pV4Ay?8aFzjmwWkJ}cai13Z0NuWx`A@CODV!+hpC4PZi z3D_19zbHsQv>y_R(t|GlFj=8Ri3W_ROQ!)0f)O~fV{(JTdIq6lOD-1ipU{({g6!#n z?_x5-QdGZG0^fx0qtnjpQW2$Xv0N56%c@b_UjEHW)?GX#`72L8S5CcwD>e7a<~>SX z*uBDK9rP(2%v3`cQlHt%g9dF$Iy2bLP7B}&%@!jgoi={5C)T+?jk0J&y z6O}4h0pUdmvSf`3znHR-)Q&cc?ur$~gnxBXCpDcj(2U|g9bJV{Q91JD=DvCnrH8SK zhK3;Mdq<11Lo*#x#kZ2FrqqB3U+x%ejtsFOhvj?&7fnLS8& zNEeEod}4a*YWkLpz!N7f5+vb0Dh2W%h`m;dsF#AD2i4Nz8BkbaerA8=*F{QYb!r~S z-5Ft~+J}qo#4V+0cW%zUD0#^i&deoBXJ#y2Rge{Uplb-iK{jINp`Ad_nU~ zH~!j$32z+ry9pOw5e~iF%vUQ2shY3SE{39W1RquPW+tf{PnQpmJWA?x;UM#t{b)lzLPSpl~b`^VVb zoi1SS?mEcLYz2z&L4D~l_4rzHE;Ijpx^ zn*AXX-2+U0m-LYmqFiqiHWZ~$ZbSU>Rd(z8(m{XxFb)~*Bi6HW((>R-xc&W9%_WyA!1B{WLGd$x56SdOUxs}FygUeU_xN@^Gxy_amnaUf81 zvvR>Y8DEJVdu8F!A!0pW`~iUpRDj@GpBXVa-P`%C`UIRBsX`?|+!#rMT!Qqqmr0#5 zhPsuLWM3ToPmHh;m@^TqOC14x%o&CDuk1|X%4j=okFnL5V*p@t7FW;Bc<5jil>-~U z_@mV_HH!}iV%KoNec}jY118$S3429FNjGItrNmLO1s>P|$!De3B$nRMzOry; zP9eBUFK7wv$B-rSk7k@=^h>8;qE*NRtHz{ww>~!g4tI{2-wD1$hncdx%`2xkkV7iJ zve>W+<;P?OrwEv`#s@H7sk}o5czhrme?vzdMa5ZJ4S^GlT}t30#HpMcb!%KX`;n|vixBC>KOip-{gu` zw(pqzW5l#aREvwlDpKn!Nvby~mGwavExQmj{QdXatkVyT(LHlA=tz`HPLiRb6hNu; z8rr~`rBy?(nX%g{vcCwq5F;?I!3&Y}l5#WaLofP!CMPk2(nRd!3I$+GG@46|w%GSJlWk~`Z%GFqN*5(4$dcm} zGgH&-fcc~fGtsMrv>HdtAXP@g^HS>m=39|9Q0dFvCjkDlsTo9Co;5ufO9zoM(Fv8YkB^rk&GEtDpb`ueRu` zmfHUeVK6yo#(B?hyHbX9#9!GzHOFkCwRR2JcX7_VfY71a&x^3cC{sV@w>^(tJ#g^S zK7*-nJX^%oQ+ooJK#?5c6Kid6RzmQ4YEKU;iNyyils2Y^B03=C>8`od)f13%5hOaJ zs77Qks^m~wN^|%`1~Bp0L~QdcKblylD5rMrR%X%C9L7{w&i0Q};tgITV$D{Q|L{Ol zrHCRA&I;M&dI>gDLNTDVG3DSylsc)bGe@C2ps*M@pkv6|$dSH-8;S?6#$#1bpv5Rv zV?;*p87R?ytQ>aLwecsni(H|5p}k*73M-{M%9gsywXn?|S)V1Ji@vLI>_THnQ6uc2 zC3yDSWiiEU_F>sW=}=MGrbUEv*Jhbs# zFYmDWGf}JRZnFwRcXyEXtHElV=q4B;sY=M5d>cM?qKeMJm55gIJM`uvoC^-fFNM~TL5}={I z`pAf0tTNrQ(MZWMaRNXpC{Gl3Yua%l4ty$3EC0fmJ$R2%VKx8;aQD(gy z-b&9J=d9hO1&*L6`?=XXa)@l>AKrhm$j#4V$^|EQ+Qdt1;y*wg)q%&JIo7XlHZ&HE zG$7;7&omi&T(%+22IGG-o)BI0gh9E-&Ko}RMjSI*C|vXOhIB$D^IU}tgy{mEa|5t- z8u5ET$n!)l0_2I-C0X{AByi(c z*DInku);g69p*G=@lNj!s2iAlt$1K*%&GG>y?tpH++v#-*Rljc`fBqENXafbI%p(B zGdrN4Fb}f_jlvyzuq`R|1+a>|*$xipp^2?HrTXFb$IzTQtlp~4&$a1 zF5ZDz*9;QhjC6zMI0f>V9CxbqU@fi_IQs(Yjb=pKw%A;}Z4xS04yc6FtHg(~Xbd@# z8@jLO_eaiDzbDCupHKPeT>B(EntN2mh++y+>6;sQVLiXVZbhsv}=7AF(=kjx~DWuhC&hj@B^zoQ3{50|PSSPP4@Iq>7y< zs+Ug$^dpm<4{Y6AMdLXLdBLyp6p|_}o8n6rynv$poy*av7^M1`9#A){A)|MOjC_Yt zmeSL8N0#!(<7M(WNlX4sdeLdk-&qpwXMT_Yvan_* zqM0Xnep@|BS?whLG^Lr#RAB}50g;*Go?b6YcaDRIXDS7m`8KOa5dZ+*5;LV zI(dxSz64|6y$9S=gNtiX8A-v%P!D36$I&hTm7G#sp0W$Hv9?(*bm*n1v3Sw;7AVWs zcV;v5s>MLG1;N_VoLuH49gO=%D4!CSeNcy?oLs!vlzHL(WaWj$c2x8FmY1c?Xf}3} z_S8ObG-cTb>Md<$?FD98oW@G)9)o>G$G&Yh>`SG>Q2c~xJ}sqCaFe-tSN7{%6n}Y} ztdpAX!yDbITMRYqYVkUDozJ$!c$z<^dZFt?VQC8ubAB?2VslOkVow*baAz*xVc*?q zpAElr=7_z&I<%)#jrMlLrpAqN;v5=F9$Ty$B6NPExmAK+;<{zLE^yHW53#&8i z--iA{e&PDi%cI`!*GjP1&}jb!xxKo-XTN@3;~rs>Rg_6u*5VAUFXWhIV^hTw*i?)y zbl~?pL|^MMgK1HX|9yCx4=njJ$A@^~?M$pT2db?%Ql+Z1_E&kV8%$ek&bS+myBp4f zjvSR6kkghp)~iK(2-_U&wxsU1^i*f?M<-3!v9jL`pZ_Z0I{5T=$#x^`bUing|44Zc>&;EoNy@eR5=4*>1cOh6_eB$BHVS#8QfNN@-H8kSVg`K~fxlR%36`UyGGPtn#F1C75Ruw^Ek~ zpC*pCsh;tfqfCe51#t5x!QbR!#Z{#3zzVY$U z@;dmG5ve~oB<^m7RVS|ix6AC1mExg};%U~7L!M81f}%Ls_OT?lm^E+{iriDN)GSQ7 zR;31!D9=3L0Vz@Abx6GR8n$j5JL(hpHg>i}!b7bZ;VpdjFhsw%{{Y(pi%#eRX7?$p zf2qS9iC4rCbx+!pnsWONZ*H{Mk5}aKMgMp+64xi!DdZ1395BPEf zwH}joF&T#L`(Hso0=vIo;(mNB89$*i|Ma!|-?=!P6*puz_>r@Y5Ho}9U|Ila)F}Ld zRLX6Y#T6t;gy|IvL<{K5>|v)tM?xMs;k-2Q-hO?S{TY#p`vU;ialpi7>JroB`}Oq( zv&-~^W5u!RQcEOX`MScE+VpowLrFh>E)nQdgb}tYWsVY74tV^LST;IJMoScy%0A{V<61Ty1o z%x@dv1DV{}So8W%Ss{fvBPNRW))P6s>aN4C4bN#D z&T-a=IY~O~Nk>E)CC`paRtw~0bcw#jV$yeKvGRVCI2A@YnR}*IxR9>cPiEL*%dBS> zY5jyAs1M5FUAxOKHMxBDK6`mU^iqXp_CSqD$28L;K&pF-_zG+)bwii6ry9F1PUV_< z*BHE78+)M4N`x1))x}N;ldfR96&k za{;N8CJd$ex_wk3&5QkS7FM4+UIguR&D~RWT&H1PMOK%S7ovz1I-SRa)mYU;yi<%X z$I=^46jkn&jYGSUoE+1$In35M)yNInRwo*f)H)I)vsE-YX?NNzZ<_%cCao%JyVPI8 zE_7$eQtg(C!;G$2ANG65Xfj+QAOpQWNf>f?JG6@;0>^Yd199t|xbP&n(jyea?OS#a z4F%Z)`WIKl0#mgY>cZ9VeHCNfq?e3Mv<=o@*Je4kbqTso9zAKZ@)H44TPv_Ya=3n_ zG>bMv(>R8x;7}1&>I0pY6{k)}d68WyUtnHmIukUZ{^r9dC4|Oh>*6vaXQE+Z-t3hD zsbT5t3kVT`rhm|5AcRq%Ha`(;2}ldU7iEqbE7miC)tO2(xNy=3u}N+v7ejrlH_tll zc@Ho_FJZI(svCq!Jr3Y%H&+3MyFw{z&`)9JM}lSTB@eSyABGgVCb=^_VW(+K*&;Cu ziC@rfP_I9^6ZcdJAk$npnPya;-LEEQ-Nx^dsy|`myfb}}LYyF;pBP&m*32+?=;42( zMdA>ec|EmhFAtrZF&`0V4c^E}4HEoG|EK_B#wWPo%6wgeyPL7e zQ-}vCfGyOVpT6u~-$_VoYf(${8)FbYJLL45C^Eut@ptkb^pOxfBcPgKL^S`{Ej~e-2W3PsS+vhpGZlO zZ!;Da$|rDIZ7j&`JjzybT5f^LI@TcIjI%v$Hvl*iS^FcezrH4J-=FT?0EcOi;QkOl z8JfDPCcEuJ;!-vYtsjMuP`UKl;IZ^lOFDuh{SHmdEK#XB&$(D&MgP~Rl$52ou+=G;k|l!4VhK>gEyYLt8V^5J z$!OxgX^>(@!}<8(8E0)FDl9T*XEQney)bv(;nUp#IIJ_k_mBF8;s}&9g{9tG5JCbo zfvJja$o7##pkuR+v*BP}W02hfIAVFhR0bAv+z5*?%;wwDpc3NN*K-IxR)27N@MIkg zhUPMLBsg8OVY38_?cXuvcD@dQF)3LmZ}%A|rgA@e?{vp~3i z=E|rcxd?*4lDYOLL?z^rukumiuvqQLMl6VXV6=ue(Eh3=kduz1KS6fMI^UwdHxJy1 z)Ct;5ovAKHASC@;hOx*aMAkw^*LE(Y{_tg)O|yOmWMDQH^7Gqo=M5)@gp&;g8nesX zAPANfR3xo1LZVDqJb!7FM`+BNhv11{ybpfJU?0$UP$7TJsJ~ZuP&DBIpDBO*1mkDZ z<|t>#Kf^=#6yPQ|7q?J)cr5)V9f3|!D*Q`Wtx#OLt|Mc^1N5At%wb<#mQFN^j*gPF z7%g7LK+|U}Dl2OUyvlqW(J5SM5EBiE;uGngqK^UI1YZu)SY$m)&nYe>%5kqA z=zT(WV@az6x?;&fXT2~%XSQOY_P$vC)TZNocIGO|h$z@str5w@`5!EKS5Mar@BH=G z`@J3jd&sjgw_i=!3#D${fFd^C)1CQXU6x-tVTdB#li*+-q8NvN1SCpvS8eT|`EJY+ zDN0a%DJd1$7;J5h#X~!V>bw1$18`+%i9Z5C5Za1$Arlal6|DiONH3azda(ys5MD}{ z-6$he_MLzDGhMLvj;scrgJ(`B+q9=$ybY)~1O@aAcysj%ti8;?1a$=%dn~jW>zntI z8islsvhE~k$J=7?&}D1&SM$YpHy}Zw8l;BWZre_?HaneM+U;!i1A@eNxgZxU#<1xy zn)uxyL+-29rr(~%ZRMbVHS4mT0I&Stb?$b*)Q_okLU zrT^+I)S^MpB;1G8k(H@;Yi|+bvn#~X)*NkGOo3BZXCewDI?Ov(n9`II<8WnRZcR1e zj?^R^=Aeo|+AuIvF%g!v7DLQ99x*!a*Hc*di-e<#&_E+|C#{$7%SJ}Eypz;T-zFF^ zF>P)nq&KvH)$=^PYc)t6w@(Coaj*WeXjCdN$|-A475B+ofPL#2X$%%PeL`S6US2j; zQ^)jg#hyfG0dXzGEHltw)5ri{(`p=#K?uTC$TjozJ#!J{CZd{6^^z(1{U=(SvowQ- z>dn>7!Ojz+YQ-jH&dBv-F|nL$om}UVss*(;V31;=+a3Ooj|OTk1zbRP)k}epmu#hU zdwzq8Z-lhkM17S*Qj><0&pIwc zi*}gEUM~xBaZWE{m=5fv27>Lp!D7tZk>)V&lV)y;lCHm>kr@_A1w>X^_FmWeFC3@L zm_N{MYYmG5QY1XP1cq*rU~KnkKw{-EC-o)Zw6|QM6Ko_Ay@P6_@vAJRrW>vC#s+Vj zLtN>{AWOwW&A_PnA@*D0I-CaLIL~SRNTL%UVRntlJCvW(kT|D)gn5MP_?mMNuBd5$ zh3d-h9VdXF^vHF3Zj`&u+en8pf*PZyrDGSN1WqX~u{9 zqG5yeS>IY}+iTH59yy~?N>H*YoCK}oTe?+5H*2X&9+kCJzZ>RSluWKb*HzGl4mOHN zESzaLzM}qgcbPaTDR09?O5gs>$gFU?(sW9XrrqWBLGbX~X={H4F+(;dM?a{9%02qn zOK}HnPCd)INqXx0lUyjLTMTW zQNR7yY)^Q#<%!StN9Zb1ZahN>JHc6;>zr6`QL(U|YO_94V^*)kwCrt9Xo|qYIO`&Q z>!|m9`rHHANr(TcA!E1y;0Rs;TSi5sgf)=2Lk@07Oh@}%lwlgIUv?3gZQPg4#fTPr z|9xVA2f}9`Cw)$deAxeL?#$z%>fQi8DDk3*5+Y0XZLFoTC1MORWGU2GBaE127;BOt z6_O=ymXvG>iAu?m<(H)pZ`O#CC~qm*e`k`exrSeV{;toRnUBwXzUMjT-g}nkoaZ@e zs_c5+t>9eyNs@RzDg1xi94NXGF7FXV5Xn zWK2<8-rKT^_MAy&8|oQmV-m`36Uwm(4}& zRHP-inBS&DWhAy>q80?(${D}yAgV=t*w^n9S?pk6TC{n?M-+aSwaIt>Th++aoyd%d z`(nrow2o6m_iIB;#CC%0Y6q z3YMyXQV!mb=#H|Hzhu9QA2vmB5<6enuy^e@vRrjkh( z%1%UF!US^`=sZk;lUmU_0e_ia%TV__yW1}nEwrT8-J86sRz`j*>sc+ zJr~RJb+kqtmS7<*Bc8WOh4k!Mg|PMoWkSb3YeaoI}SRBw*Q zhKU66ENx$YgA2?N3`ZlGxm-Co_~U^>`}+pNrMgRAG3VnF=|x$Ub?A8ix-|IO|AQJb=j;FVzm0|CnQ#pKs! zMOdNB&8R93;18AOW}8#Iko#j@l8sepNQj_rb?nXaL6b`ET9rwmjkkt#2;(%YH`+HviZxi-_NS>eQ%h!`=A!u>Nj8R5iv!xFvjpPOUS($U1Rfta{9# zdRKpnYy7tD(tU+D?YDg%;kS`A#he)MRpw#qY5sC)sxbrogpS!k#nf7VN>5AtuJTl zd!DilsZIobrO~dE;P(IRHH$!8B1)Trm*e-b3H&n9Hr15YGt`lF!+JZo%PzfAD=+3+ zc`?q@Nyf|7QPE`$F`>1@ehN!t!7W@$mjUt7Pui5GQiGcBHBJ7-Tjg3Ht5MMhPSs<`r)iJ*Si&OYIV zIkBm?vvJ*+)PMT<3bJav} zebsShlb4T#csMti^PJ34K;_^@Gf8y{D6N^k8wp<;$x(#@7G(E~Kt8~nHE zbeP<;rr_3b$4kAyvjOb>G&3|rqh z5LF^KRvmEpU3#2bME(>8k!%fHvoOB@^M zEec@J@~mR2cg?G~d_^xy>H%F$@Oo=cMlP(p&|aB!G_(gKJIOxa`>kN_gDE z7bGgAP!>vx1L{o&(?{tiF7oiBPaJ>EE}j^m5xo7Ii`c8HjFrNg+r<6bb>ABwsPrJ7 zE-S9wD{5yVO*rds;Z=8o5&BX=eBb^fk7g2`rH)SL&K3>cxtbXC!uHU95dTwI`!J6qzq7Bh zW1fm3@?7N=kye?3dV}YaG?7=X58`496vBIaetv8{>;JF4vVAMlOq3CT$xdoPLSf( zeDm%>N(~F09a8>77~X9*NTg=qwm^zWg=BK=kY9Jm(<97j0;q`0aH}IBN}3s!q~u8M zGmhKeG`iY6n%)^OB+$*xN@z2B&%aGQ9Kl#MiHHJLGQ*VMvm3Lm#<#a*ay-(j&;AQ{ zf1EzKE}&xK2Wd-0^VjAD_p1WDY>=T!pVo$MjQyXM{HxIF=HdI&9yww-e` zPsrzzNp*|7m6A;feEb@WeYic$s(`HR?;Fr1T7o zb+b*!4W#rN-wt6m){7k;M>Uur70r-}#q!Dp@^WS#-YmwTb09x3B%v=Y(bUjfq|~O+ zp`iS32qpU;S?O_fL8C0HL$XOvp?+WpB_&ah)z#8&SxUdohsgVkxVBnP$KN z9Wmg_olBzNJ0sYSLUvJeZLA zoPrb}Hc z)44QP0LQ{NCWWwRpg2}j_cxaM?SgL{2tmz27|4}Ss@n*{HHGgQ2SM*hP=hYsq?LQf z!MX5lkRYxSk}8+#rbuuqJPJ8PWmcs|r5ckQ4u!|ChM+>~RH2j+t>HL$@LdS^VE>x9 zH6K)X{8)&JKClWEKJ0@l3XdZSA>(zZAt@_>CHnxJ3J*I3QJ-M`gt}%az+;_2@LOio z;L9VQz!il@7lBwo=Bu*)XRHx;D)5jOP%6fjtEEEy^@n>xL!{7S%SqJyqTyP>{gELS z>No|9!can8FSwg8#8SsDXRXyl!W}^&2G(vRW6ehdz7hsP<@hc~t<~DVS7Ja69C0OM zO?BW?%@FE!AO#AVc!u9B_$)7kNDG1^R?hjtY4Ev3h(-#5(Wu{VxQP}5c#)O^)->3{ zwSpTbA!t*?O6V%4N;nj51B9Suk#H!*gTCA%2#3N=Z4fm3!V2iBE&9sw1=-lNc(5WC z7AB&W4HTA4Pf*VtZmA+0pCDm@iz|dd@8Gv7YIO|MRU_+Qu@(lhEG;h}YR0K0%`$=$ u{|xxcRJEK-Z5#=4bz;}ZFQh1$3ymrd>8}SJR0Ki>{Ef085DNf;K>QbV@y^5m diff --git a/java/lib/slf4j-api-2.0.6.jar b/java/lib/slf4j-api-2.0.6.jar new file mode 100644 index 0000000000000000000000000000000000000000..a2cb8020a5afda869b487e2f9d172dcd1e9795bf GIT binary patch literal 62531 zcmbrlV{~oZwk;al=8A3Gwr$%^R+1Ilwr$(CZ97@9le@mX&$;jJd-nd`E9K2n#{4l$ z?Q`_nTPwY_(MPTFQotZ601yxm0Q_l@Dggh-2L%8GKt@DafJQ=AlukxKRzg%nNtsqg z^lKadASlt!Zk--sc!%f~x3e=fG;k`%RR?>pa9*)2(z&wMa$dJ>3{Y3*avjd5=aZHI z3UK*u-fiadBXh=e6qy11;uIKzN*O5@&T7R<4XiaF6>b~IvCN|p1nG{2*f^H74umo8 zOpy2x`l9yU(R?`*?wXW}1L5wY%yEs^_Zx z9PUJhsdj|oJ%B`|G{4VRsw&r3yOE8nY7MMj6JxZj`@B9{axZ zAj-KF-gAA3-{6f-h8f)PKdq^yi;&&xNPb^xa3t)z&|PzM?WglWQQ&lYo1pvmRSaxE z6|AAJQK08L_YtYRz zh9JJWY|oitt#+NbW8>a}AQi0wmHb&#LLCn$*>%tqMDnW&k~nEg$oKwj;;FX7iH~bH zO_32lfVkM~5`=HewqwI4r>VzPmeZZiXMw6|p75RW4lgpgx(EEQGdx->Hmbenm9UH*x5SK{bxMN zzr}O>%?OY`BkUZ_{s!@fz&{X9)~3vse}hB#7o3T!iLLYBp#Ng_*M0x_ujVG!_J7#@ z4Ib`a@J{v?e}jepgZHKV@vdR!~QXt#^Apu`R_NZAOAu~ zLfX#E%*0XDz{uIo(Sz2=+Q7+)>(Oq77Yqyx8BEp{j8qgX>?7a#b3c8CkrASRQx)Er9GeK32$%^Nq84Cl zdc1cS;0~Az7(Deja9$hVAy?8Ce{8T9_#cb#@mA5$_GbYQ0RL_g(EcF)pWOauWd9k` ztmJnmWEGT=o($gkkz_IGBLYJYlzB8P0p;8yh4n_!`NCq7CgnjJ&kWm+E!d8XX0S;A z1HRk?_Q-=5@Kg9mzF(t})2A;Hz5&rauWIB6ZEPh)iLl61lhtaT-)ib;=heT4z({rNN9K#J7h1M8ze}HifUfw(5tw zgelM&p}kzTf3b)#wxqTn6O9R`-s5Txo)&kxxEz8uxLmj;e6m`_#Hb7A+zH$04o(+W z)1G%bW&ag`)ttH=Xk`p)CPz{FfU`U|1|?)z9BTy1;i4=iEZdZ-yP568I_Z3Ix|b#~ z(8M;0vobevZCHKL%8fcafdxm4F(JChc&W{7t3ha{4s2UpD?B%%+CX@d5sf=6QND=W z<*-vETxnN}%ud3#T0!|C4XnF@pt_5@;CL4q-Edm4Wp;SqPda)KH)yJ-ya;vuUKrst zKlCb-T$TnJ_n2d}_A8>1t;S_FdS@@ZGNEBAKlLgJj_wR&Fg6%$hhoEg713iEndZk( zZ6J2uK3GteMIkQ2$}K6aQX1JjZ~%j(N>CP+i>(zX2Md=erDWAOnEha$EdT*+Z? zYcwPaJx8qUxL;(ngbA`#iP9N;@hOV!G%#$DQd!tV*y5--wKoq3y`*0wFE)csOj8E2 zkvizyFE;`Ex^@blJQnAvXCEL~l(u-_<=y1Yupg?)aC-+} zx;7>>M-9>BY5XRbYYd`)1|`;5#4A>Gb(|1j3AIL;uB~JBciqg9elC;uXo&;4jgE{M zzmI`^UtN4{toh1nSX8B+{E`Oc_XWb(r2~q+wtdjATpU#K^qb07IZ#Gl$GY`rK+mg8nMtU?|B4bxi;Db zG~}yFz!e7W;wu{B8?g8p4gCT%kQk!yQ=;WeEitbK)kcit&}BVCS}VkaT0hQ_pe;k; zn@!g0@lI^0Acp0zhgNF1`VdJf#m*3PqXrQV4 zroB^zzkc!`3t3HuNlvcMm@oPe9P3q>Mbk^gX@pNI8YdGrprn!XQ;V2VF#h*X8Gh7a z%j$|r>Vg!Ufkx)gMHOae2FY5Jj#71-q-XX!rfFuM*qhIP-QT)p@{@4_+>ctviux>^ zx(9gzZ}86Sk-BSA(J&clrD)KFf<6vAn8tHu zZ8lrX)9937Pb~-1qF6{0$dFz&GD%z#Q_zUF%Gk!|C8W{26_f1xcl0_v^H9*jqeej+ zqQjPr=98DU`gL@l+-YZ%pL<;!=H2*qdzr}y2E^d?OnzQ}cHd`xe$2Sp^?YoW;1TEB zXvWB5EyN2umX1Zk+IDnPBw}FI$6uNHQpVxj$Aw6`P%?>=(Jg#jOc|LfO-91AG!08{ zn5MHC7cH8Q8^Ebc6rZQB%g<4tC^W>R{sc=(=u`GHL8S7aUbizrvn@+A$daPV3Lj2~ zT4ugrjwz#jZX`)#9uZXFIx1*WHkbt}H>TXcj1p7oNR_m-^zv?uMLTO;%_V6>F`VH~ z8^e~-kU*x?*VIm!5t!}T(UzSsZ*%)V;JCV6gXyiJM1;`}X^?7E@ z%!0;xa2y-jSl`Pi5|cN|Jfx9v^0;P%6s2sy|C=X?!=BOH;f| zk!$aYiH(S7>ryfI?yy)+gmE=cbkw{-^Hdn-`T)U;4kHVyeW=TxE$&&JNeLl z2r4J%#qfX?nb%HEE>!YTeW%spBu?1#d~_nrUa5gv(2p8*En3_)dPDh^$){qZX&t=B zd4a5I##01bQTOAYuB+iuni3sl;Ol@P5?0J{(x?_p0|fb}Smm>4B%pwYr=D;?O*6R{ zqab>H(fV$+Bq-dQ^SGCnrxDIA&fux+<#`wXf5*ZRc zM>fG}eTp49EaqOEk%z(=`Y7lVq^Y7AVHml)Wg9XIL7d({WVqnTdjddjoSa5=c1Rx5F^jT!8JlGcq7&#*^?zrSO zu&tu8_cro6?=b_4)fA;*JuuD%%Q4z7MvBf78kwV&hunSa0ufYi^)*7%)MDo@p?Xm!J3PYW^pyM1Mdjq@z>iK&fJXN=HJkS?q z{pp#gFJn}8M8&X+?}g$)}QBz%>3f5^`P&Pt4ULEQPnP z(M{C{%VpEVO;qq!h3PuvOF;LmQ=vo&RG$nb3a<(5DZSafC`MLN;Ds1KoVJ?8GQHSS z->AoYRSelVM>YX0t4NN>Lt3ju2PGe23~)lWFrl$cE7r^2y3o$$%5jpG!Ek8_7bLUi|a{15g z-0!?^lk|k53Fo=SFRe{p7U(fPtOv3;Che(aMVsV8Nr6{>A|=z6C(pAQdD(a8)`=;7 z5%0B_(;F}zp>KXd5Vvt}-OT3s)!e+?VSIR?Vba^xsJE>;;co@|D${@&vonsY=>jq% z)AF<{LH!@tN7^i`zXYAyK-w~ST49O(;vc9}Fe{lnMZNLExHqFbd#HO{o1k7?cb#il zOB>+Jr_P3uJi;#ntCCV{VX(nwOqhM4Gf%^IH6|71RD#oR2})cYgZiK`rklpG!U%^T zx~7dyL0`DcNe{|tStrx&XZv*co^Ek;LCE%p_SqXCu^n)9E$y2Qn}QU$4%otNCmfWZ z$nG5j)I#gg57k}U5|1iibZI6|j+22A#@boHYLeJB2`}okPK*2+2KeO+z9JdMN9|e4*6POwC@x7a;KguK znX6uK9yc$T(1(6+3IVx@m2$^7n=Rf>l_J9u)F9t}S>j&NxAB6+PPQt1oo(uvhwc8V zSgf-(;7@N;B4Ej0o$QX zsL)TxW?~|xAl}TK=SPr6G+DgZXEAK}7~&-Ey;-eBK}rq40ua+=YE~=#a&5%s(gclo z2TkqQC}OJ+tUY`S&@(;f5^4~S2;!tcV=2Jdti82olyprm_mJ2D1r-Gg7UlxVW>Vm# zS6X!xS+r5GQZWg$u=sqC*K;rcq2iywdBH!f4pM$MaXw@I#Ld7gb}Vsj&ct7x zP8$hMuP_rHZBtc6vK*WMmVP${jnFxM7tMbPO*_ZSZVp-SKo}!#@em`7zs9qT(X0bJ zAr1bCy6$*MqBJS<9M5s^9q(;a3fswsv75`nN2?(Z-E(%6=%C)%^}dSotdmQFOX zBqxQ_HNUHNTQOJA1lkTG`<@7HZlk7RphD^jeF5~1r;)mL+{QlJ%E zV10-mdh)|oO@?P8*(b}6<%P8y*Pb~$G-RlSQL3! zQ{Cmj_ZVO@=jg~cqiTR12r?G#WJE7-q*wcANFUjY?C)rdTAPP@B%Les@4 z9h=`?FI@o3wd&Ot6d>v|ZuCQLm>;~iec{x#Gn@$b;4s}<{PTnW%ySwBJp|j21OX$j z1JYE06>sZWf_YmsRqyYFe!yLQ%lx_{%gnI5N>aNfVv}D5LzNz{1=1}BQ56e7%knpj zC-rchcW$HwmH%$xfL7;j7dv$4BqKZkw5PBU#&<8fXywu7R)mDiG3?ve4aIS@xV&x=g8~V7@Sm zq0d3?+?-8iu+9<&@O&V;dZ9UZ6O|5maI?UHY`i-HaBcdcd1&)y03|wS?{Rxg?TJTb z`&7xQAZ3WzL~fy_wdlkh&fDNRc{Y*H;ar1&M5uk@EGD4o;6taPv*k z!*kM6{ifhR+68Gsa42^`opVU?1MPWm|iEwa%dq;*dP~TNT-}8Cp+3ZRVvO z<;=DbfK(NwKsc0ltHWruD%K#EG3AWPblv8}I{chAc3XFFuoC?`HF!ybE<3__*_YWm zy4`Uy3?f(@Fo3DceEhvpBY}qLJ&VcsJ4hF7faY>v?P?1nd&@ff`;F+j^SgN{rP{pgJA-4yU%6}{S7=BC?J@$Er8W`w!sH{`;4Iy5!v ztE5R?))c4Gc5QCq(A^P@yRQTI8!FsSyIy!GCF;kXgV>_(po!M&};HMA9s-==93qTz!wniSTUkmFv$a61sg9;TC zg{~t|PV@zzcX1zcYIj8r=p5LWEC;|06QVC<(Zyo-pqXz|{C$Fc9N(KlW)mlRT3nB1 z>U>Bt8dIo^$eL?6M9YG~t{j-Cn&7(vQcnf zMBb|14eJfPTzDMV>3e2-&)!yM$CMEMGM9SzWF(A4^7A*{jS*!QgYNl%iV zRPgih{bG}wQ;2GmO3%+=%Fh0zqV=TD&YMI|e(bAVA|1Is=lDl^%yGyCS!ub^f=0P1 zr@#?x2C2p4%PSvqrZUg$!b?ZE^GB2tCP`_gXj!-|z=njT5bKg+x^F~@3%;Hx_KVwx zSLFEE%=f@I^nsedFTUS+b)SmbbbEYxk8ez-FJ*&Sk13axJ*H-#unk|)8W9p)Ff z10Ol&lKm|FfABanJv=Iw7p5q&bOGgeDTXygNm}rxIs}#lKh4}W5MS+asx7jtZhM^P zEq&nRjIa~sHr&yrC1JhEy769909HtNp=F$7El(97smb?X?FtIJb#{k64(z_(1jMdr>~i&UObwp7+jx;HDO4E9v{y&JbGCAbhyw zetB;8z{+g{*z7>@Y-Kl<#B`>EkI(%s)TCM(w-4${;yLPxNaE25>tKJstXVVjbqZRP z=>E2?auw;9?@k*QLbRY#M?RLkxGt!Wqv(M0EVi1s&aaoKcf?ksBzq%Y{9u@E4LP3g zk_<)KZ<}4`4@Ouq%8$iLFEoHGRo^ODly^%OO}j31aUn4qTOJfNJAJX>^hvfARRsx2 zeZ6zgEIO#aOQM%KsyLdlx;2Z}#VgM#+#oxfI#4Dd-f`gDawxr%H9y{h=OB%gr|X(x zF#s0~I%QsE2j2N4mVTjNANQu;B4(M_Xb-$DnK)%?gtlFfgeNrOmL_+i_cCmJFl2h5K=HX6SxFU!t9J7G)2G#tD<^LOdR8dYIn@7UyG(-PmLsA(rKRMY7 zbsflLq!wu%arU;k7HJl7A{S|ggHr6v15Edt-I2t#9ks4ta39w8&!aI9q^;bIS{bYI zHwI>s8R?nQp2OXeT6S9Q`vAlmxX55ptGjU6szM0JM`_^gtDe4Y>7CT?z>y1Ce|S0_ z$a|h+$rfBVi3g+2PYR^rncXP9lA{MH@lUGqk`ny~y@jI(?8;YMRUX(fU#a)gXUTn? z(AOg6d=(4mz6+92!niIF=VtSW*hzpw^Q8C2sPV?3b)+U=t!&dl0URtThKV#fwdu;p z=JK)!jaM^hH-fyZ0>-Q#Z(LQIRSQNY)#v2~=`E&4jv3hfs0j~C5^uv6Z^t0sZvJ0P z=u@)FUx*pskb)neqMuacKY66~r1S+ktAMTGufDZ|`+ZGleH@Z~l1{X!n7SX8F}@x$ zB;Ity28toy23}rs7I}a^Z+aC0fysRX>P`cma7UC&ZM>&=^*>J)`3e^0hy$E;V zNRF@^mm<;m0HxTw)h`E9PE-P9ucxWl8Y@b}(^=v~zz01V)Y~md| zqS7t6&0;Rm)Z-nyADQumw`vWy`1m-}w!SWgs@(_fIAYE@(yDH23_Y2{s?G(|<#4NKExoP$E<=du?KAwaituq3Kno0ExD|Xnag% z$N_7F1!|SIj5bH#lMLvuSz_In4F3&S1r|1yAY(Q?uBjsTTUzK->CeN1$%#XSa*0`5 z+=MG)rOw#ND0GF*&)`P0So5EE7AaQs*tJE42~94@o%a4InfQ1p^)Q%QZFP5n=8e|z zpC$F%jp{#B)cmI@ONG13HKCI#XFL%~DHJ4hDC$t0ij?bn0dDS+G+7ZOl^wU#LLwa` zn_&QVKEd!~R_G9SR$8aS54tK1-0C%bf!1Rds7r+=_b%n?GM9u3#e2O=&ctkmw+2N z_vmvZvkGq$r{g%50C?|E)2Nnx*n}#p_pf%#!u1NjuH}8*%6mGrbvZ)p+{uy{)(N&j z6LbJ0JxG){fcmLc50JX2qiQ)@hLvxVN@*rY-Q7^Z>s5-wpG$iL++w&(du44 zzu-Tjy;(j=-bq-Yhj61?d*CvwgWTe7WFmwNLU zG^LoFZg*u6@`(5rE%Gg~>2PV*j6-utrJjaF=fiQ9QB`nDjsf!7lxa81=QMx*+8G-SpWzgd7aZaVc4Tj-JgN{WS30`v-?2QH(BmyQZ+UG+%g2Y;H)I>{;w zO4C(!W{i~2pdD5G8kPLMP|0gDa-(;=9jgB)S3ge#h)|SIqJ4dz>2YuS`ca$`$Z4qz~5n5C8z)_$jZX z#X_C(F7djJa-Co*XSz`@APC2-1STsn*JU_0EnUGPi@DdS>hW|$h?d){>R$?KCilcT zJ@?a?{ksPqP#GtV_-g$sJ~ym0Yb)N(L|TT-MA%qdad@`-84*}D90?!Q4KT#Mj2Yfp zSu=uCH+#?R3q7DVqr3s9?bn4%NtO0kwBA2{Wk~q+;M3WaUdO$K*uNKqKe*nIXFH#-i4|r8uCxxsj3Ic9^(1SDtx*|-C26QRmah_(!sLjRzXL&C z7xdzv^M@_p9V`y(6?J{M1nwX^$M(pm-MeiK4|_!?166bIPc;-54yq~cB^fgdIa;S50#L< zPGWWs-2;fhB`MzuAHujDT4wtrI@63k5b)Lj%i;ybZ38y?1TpR;X+{kEYQ}!{U+1N% zbSd98tdio&by0f!2K?)NA#MP!&k;5NKpoHDy)XPrCG>wQLH_G~p{S#UiLLQJWg*`3 zGExKl2;No;!^7qm&ur%+<)s`i4hkS;Wl$2Jusjcn&1afQ^C0o$FAakM`+a!3F-T?! z;SlFPOkGWPI@#X0`E+*xE)66G5F9B{a6M2k#(8{ZU~|BA4(hj%ZTscf?||qyOh;8% zv9N*_l=IFo;(o8upJUbeHmopJ)^4CGM0H4D@X#L;Ax0F6|5m^?B4?n^PLI}b(Of=S zOY>MN&sjB{=x%M6%RJ`*wkpRkQ~wI6Me`s$r6*siCBl$QMh|_{>7gMEY4JXnR?nOG z1dHL;fKQinycmL^TJEU~X~e$rq(pkNd(u7eHt=%DA-krTQ%=W*1yccuPi~&A9_ScF zWswV?{1j{>vKho0Kdk2Fpg*GZ(} zRC19Wi@UkB;<P9_G1)+YZX=%TnG z+s}`Z32H4>1@-8IE}|@5Nji)kNr!<BV|1FA-y%xW=rYL;fs%=-f9lX!cX4B=nA z#=~VcBYkpteX{Zn{!6FwER>*{&_EvmY9JDCv1+z!+eW=4WQaN{{~~0wx;;~1&5gUn zrI)^2*K+l_`Dk5ti?Ri1qyn9ll9QQO+8KWL0k`B`R*X6c98=7)wU#GWP6Bm~6*4N)FH zs1`qtp(Duz8Kmd9rJH&&k=VtM&{Ij75mk)98h&X*s)+93fNi9EApvWCVGd&};)>|h zPslq^EJc>mDoXkNoN8f#8`>x+J!I4CGOXeRE=n);+zmaW>h6l zXG{=HZr4B!OnidfL=Dw{GWzOoGaC8-j{Scr(p0sSa70kPupm0DgCmFuAj3?Z zh&l!@v4oh}3TpWU>j5W$!dqO_(@py6nWm@XcOOGeJLoi%PJ#QBEi965@A8XgG{1mp zyc-mg_23h>0MP;HInUOgCtXrH-X%njVtAq+wuJ zkpv2YA@YH2RvtikY|d2~coYRouI7cQ*S=*hi0Q;2BSmY7hmTufzvCwb@s4%On)Xw8N3AYLy0CjWuF z+7y?PI8ULpM#R!zO(GmAh~UOYq}SvY^1^4dPEsb9#u&^FN67;Z>AVD_+x5Wikkq4t z6^$rwAzntju1H^WU7kk9j5Cq|v=N$Jmo*fZ z7HQ*g7wjhD*4h>Z&~b@rny$oa9}}G59`2#3XjQ&^6~;151(-Q+uUWQrm?+iY9g%XB zpUzXUmkC96zA9^S*BNYftG$}x^D3(GwgU=hhtnZ5Lxf-)T4ULT5 zY_8@D5e8cOdcEmWlZ66psM-3MwZ=u2tRW#1(@CykWt#r)`0!oO~4CLAD9Ta5d2QjA~uVDY0(1MmJ~cT}$2tE^cnd z5*jt-ro(!CC628=0w+(*4O^(k z>V$py>P7bsWy8x#7Fa;u85aU;QP}Wf8IGcJ^Ii2)y(KGXk8M*3nPifc2<1=9EJM@Q z+EpX@PI?Im@MCV2xAM>l$d~reloKGbcEx6ky($lLPM}Qe8U?aN>F_qiGV&FfI+;3~K<-*=@E>Hc^z2`@nY3B}vuOqchSY^S+v7b&+3I^P*;(QQE`8k@S; zAd!{YC=Xzlx7{M+>Kky(Wu9)m(;j$C$19M*x6Qk@T^Fchx#{hU*>RC4e(oJF%Y4@& z9S_>L3pR5o(%&5fvi&5%rTR<380Ytn>1QYm-&cFGs`mX(>fA?00A{PRfcH^D)BCB- zWktfSnT0NmyavVx5r#F0C4ntpSm}VuK&4 zfIG&$Ef~K`2!fFP;G2XFA%DE=e&FH7yIcr=$EWJeh18r4=)C==Ie52S@F}D`V-i<> zv~zaOo1Dq4ftJ#^||KNcsN8}n+X?gIRj?$;M^MdQ%?jLO9aM7P^iQl&g3y$QuW49 zjUtENS+^Z%`1Tv^M4COydud1(>6*~sgwmkg;y?+)B?$Uq23%k%UtfBrvgDL-2vLz( z=++sG=ae^1y}~1HtW92GwuPgBTWVYM=IFZP5^|yyU){+D&(Q2WX#>Lo?_dU3TAFrG zL{`nkJJJTpItSLex-+j<@oq9;g`-BPS&-xs45#0vxhAQ$CI9+IvzYy@o$CgtEA!Dp z#-*iY!MXbd`a>DyO)12B{x{HH#YaY)6`ULFPx1L%-Q2$_qW`H1|L=A~M-Ev4g-5Be zqmd8-v`X(6o;tmQp8G&5vkJ1IMm#HP96F;l-A364X4drGuH5}!%`f9NrX~Kb4C(7l zu9M6zv*WL?`+M|XI-TM2i0X{2a>YYu*X$gzPgH9VEv=l%W{ebt(+L9!3?{UKD!GW| z;;q*NcK_*|#Ywo=y|hnTf!L z$K#Tw)sH_K6gX9g-y1lcj^8O)=97{;w%QOMp4x-lq$8OMvXr|CTs^MBI}0ei=}a6@ z?Z1iv5u64JqA?X)1H8Kfx+!c#@sfPk-(GjN-6`P#jNG?GL1lzPJ@{q{lm(*hxl-;x z7u`eA>OlCUpKEdUp|A+54Wk&I_jEaQa5B1pQ-1Fq!2QY~w=#H3$%dJ1^_GCv>sa*QKyFPj?^1s@E|PBy!5 zw2wbN?jN~+G5;uoz%Ruw%!9px+2Z>6b%92Q`nWWOYtRmJg{cvLZ4|t#?8xI!PY9(9 zt6&Zj(5$pbwXEtXrnfg;ZC}6XY;%GubTTez`)YA*<7(hw!_v}6Z^-;JhmB)KY%Yuz zbPLN9!3d*$G>$+K#a3mAp2LJWeySsy8ucV$crEbfZ(i8XACB-rbdV_-8~O4q6@Rqf z{syo_t&7Sr2IvtbsRbx+<8|=HdE?&cWs1q3+oQ#Lhe1yGG7kG4;K6hb>(e@Z1o-8#a`4zWm6wpJTJNd)_WjW^eh_m`d4 z=vd4};wm)bV!3{M$mqZyfDH`BukODmOHsz0K-P#iP$Xf3vSA`s;Z#*q79$!%(5!>@ z&6x(Xjx!EddL7k3dw z$G0%_8<1;1+GQ zaB~Ia#((qR<$V(dZmR}vwn03XwNmz%+5rj%+Ml9tgnA1F@s*~$cMm6V5BEBrgSUs; zC+;yN!ed10aEKBn=O17fvqjSjS=NvU;MwAY=U9iH4hV0DgkTI)pPif*nVnMv$Kp>= zOE^7fdlb1_`Hsfjl-QFKsx(cPz_f@YV zt~xnXcU0J2oN{@n;jK`S?%J+Rj#1Rlu}uV$klbFBOSaXTgXTp6cBxv2%Y_0W1H^y| zQ}|hH!HeMLr7S#Gfv>$-iMmyeaMZY~-2}^&#!Bf*8R-K$H+)=ZVsY^kNdOOwV^#-> z(0E_ab3m2M0I-S%WN_3iFz!T^q9E0o8`DpWO8@`a!ADh>&-?M_%E)0|4x z$#>~ZKZZMn7mj%^#xa88P1R`foqE+M4zjb;5vvl?#mB0f$Y)lfJAtZLI{J-?my08` z%=o#bKG{{Kk*}^S3JPabBcR(^PKe`7!6@REr~_86O-s6r}j+k`wx^IWd)Nw5>NeRF>`rHN^dN~e41mH z2o(`&Il-dg!AN*w7Q>yiSc1}76U4VT5vh+5AB~7*9A6RsLHmT-W#x%+U&foiPc-{P zZqvZfn~q-R;qkIvqf+_^+-{@|7Z#DCe`Jah_uGf#Mtg_-T_%1dAJlH<9VKU z!fUU^+hPZZa(!YqRp*~3nQBD6jR4-Oy_?lxlQPE3y~!KAS$$>m@CxgG-Gzp+)+e)r zwcBq3u&v*Ki`f`}Sb*jX2l=GKK(l?vO>N&Gpw8!)O%5zVD#`(?gMp}#&}1mU?{L?J zryyoc0FrZ|ZFP?ho@i_VS}5{Kc3mxzgu1%@?OaQ^e~A&qeY{t){8~nyFDwqPFkBwt z-Y2q2i}2m+Cqqn}J0fv`{6oHG`B@)P{nz(nYn`b$J)B9bA#N1~4iZ$%-ST~V;})S4 zMZ9ZfVbFWl0PZHaA;0{^-qp>yE-!)I1xV*9D-t#q2Rn3-6qa`nsD#raZtq9Lg z7Cm5YqfgZ((}FIrf-jkj;~?F=DYVHcv#fl1nSrtXHRQh0x2aY*1P8xKr#aW22&p{~ z2weh0TtI5xEqWXj&c6_UOgN`LUD(H1SF&;ziqMe1VC{&9W&2PP=lUSE_?ym@@_et^smJfcBOzxdHRC-_{)FyD_}hWQmsb>{U(1s}M_6%7F; zZp08Fji_;mu`K#?k|o6}dKouzP@chuJVfj{PR~pnJ>rTUi zsu$%i-PE@(?~TSXsj*a8+X{ZqX%9Fr7!#$HOLGLuGC~%OG=i-=JdlBM0v-d=8DPYR zmtIr{%=Xxg6Te6g!s4Zb!lLrwZ>t<86l*@H;C;>EnQ`9%v)f;#1ajcujH^RP;f;fg zb3m%0YA5bd?%vT-gGU_O_}iHWH2XG}A9u7K)L)AA?}g@&z!4=k;$0*Z;A0qJp+faT zVS0TEj$4-TD)0X|;I3#f{g8nPP7m#nSO zK0XwXs3sK(i!gnYZuxLNgg_o3RiApTjb}VgaNihARs`C{&g{t-1m1d+<)um0Qx&}T z)s96+`^+Q)Eo4QbM`~un%-7S`(^2N<`wrhH&@NQBB!Pdae`+{863hv;)&$4!dQ#M8 zJ3VXA)nT~cK~q&(`N4YUF&d2wGON&jvhDdbu@Ezps%$>A1SkcnST1AaH3s5L;Mp4)wYK}6?FTs zy%aHods5KT9v0&I5%MWIgP5R#RU&t7f{i*d?KrBG1?DGe(Ltm zl(UE2!;}+!+{aH~XP`)$`czWSj3^sB_~k`RM+^?>vnNqpmz?Ar0uKg;M~pHMlMqVp zzfKyABk$6cxud6GwrjoMe}1Mtb%KRfP*(5p+>Ks5g4*&W{djltf(guff<(=iR-87tkG8-ve{fq=s$AN`pp@)CTq~TvjOeLOgHr)j=n%?+AIVyZ47Z|tUAQA zNrvvsXkw)sU@g1Gx?_kj%DrZCC*UHqxEXub84fQG6VYg^HRG|A8<{zId4w!tt zp1UiA3ST!k+fXx7`j%Mcjk)F8dY;hWVw+ed-fqI1o?)PWk22M0!b!`v#mL5u_?P zyE3|b+?;p*ZTNx5NNU7Y&NcB+9_~+(wftGpbsk|RytJUXZ@ZPov{JeCKC;QGEhrM%D$mtE$!l_j;b69=)yW2APDxn97RE1qcXsbnRG{$jd{D#WZ{ z$}_O)d8(kh;;fK@x#e@LNHWo2WvH4`ekP0Srhqg^AC(B96Ui={7idDyov#C8@;1-x z|1n4W_-%1L`zrSJ3MFfZDyR8|MXVGt9mY=9WM&y1J{ITb)`iH!bK$>2*eJ}g)C0(T5b0?5-P ze$KwQhB%?Xw+wm4C#KuZ8}y8G4|?XU8i?q27B!$(6XFMaU)<`1_h_BYs5o>PE)d5Y zVw(@dVNtaBC&Fk2ZX9+9|Muyx$UTZkfU$gK(jeoc9EQd_EW2&#LS(*OXXJu25LBK7 zS^4cG(&0}LJ>;9n@BhWuJ3vX+WKpBlWxJ}&wr$(C?W!)T%eHOXwr$(Cy3AMKH#6`3 z^ZuDx>)w?&Z^p`uj1@P|Is5FhBglw@erq7n(I7AMT!D9cKnclZKGd-y6!2p(J%#Y2 z*KtpjO6bP5*t9A)y(@l!wz+i7rR4ED0Cp+Gn@`Y4OHCo3dop7dMf!y!NrpHp%p39c zV^Kv`dB*27Sj9I``inv7hW$ZrL3h=bOs&;^SKgoK)PgJjQDz4!&e``PbQ}_98Ldg< zV@ZN|ecMVBJN%|^+wJN_#-h zWCIn3G{gvXOj)U!?8g^V9$&W)m_CA7Y9si+a`X%#h5NL0p#yP4YL2rT-s@=eSKGr|%bNs3zsFh-xYkY)(Bw3PfpMBruse8qCWv9T` zr`Y^CtEBgN8f*PbJzVVU@cU`>x6x^_t|t=~rMXBCqAW?4Ae81qFj-h?h3Vo?lIz?| z0Xg+e0T+RHsReeG7Y>6dS(V7PVR@R2-*p=N@5%F{z%>X|IHuwP5B%GRDp)>Q8IPLN zX7R2@mL*t~KY6%jC~Bta$8YZro>e-4FO=``=AmVg)~#IFI2O|fzv^7T?wo(=x&CHN zdC5TKuqr33VO~vouBRKfr7>L+vA(H+D#$9zU6>b23)*JLH!q^!*~+(f4;I~Gj-XBH z9*jSuz=SoevyGgjw#fE^A~VTJa}?S!DG2Cnu=pbV8$X)Ui|+fjt>i%eYesx$*#D0N z`ai;JsqYu7f1-y<){5&Q@LsUpBdCyQi@>(NY%xqt5NxzD1~(~p1`U9`9I$4?VGa(Mvawmx;H)?V3QE~8o15!C8)0x?80J$W&U zsZalne8tzCCZeCNPh!{0X?Yqcas()6e_o_Lt`*S(0sJKmlD&&6XuH%P?Gm+Xu~%1| zq;+gef>J=&3t#6-o_5JV7fCBcks|Q=0mx2YlB`a&VT|?$Zkc4f;AokvV|gX-Z{MvH z>ACD^1@UA)ud(?vy{OT6!FZz|div#uKW(fQ+%AG(^@;UHgLb~j!anB42^0wWgo6I- zXP5pjdZ{LIDmRF`J|nh4tYX&*x{MhP%%GZ{nI{GtPMZ!JrOY(zPYWw)yRQmA(h+%0 zt>cYir|eEDqm5pM>QnMar*^7R3GpHx@5#tJ=uzk21&uBju(_lGUGRAsxg|LP4B1Nb zoG?LZ1gqvMrEb@i@9L)GJCQ8bhK;Sw<_qJmMZzOO>s8KsihpF3o0lHiMmxD7yDP8l z4zE&>LsN+g`ibjJ&TZ;#Zc?o@v9NKM5e&uRbLdBy0E|Tk_MFT!|FkIBKb*-@zIG)G zm^E3r{n>VxFyV}uK#0_`PkShrW5M{(2w?dL2SzAKpJJisHm@jIZ?$@4z zKH2LZ23Q3YZpQSz7^J#gZTJ)iy0j%j4j072cl*D4Z-Ior5ZaO*(wL@6V%+@)uBlI> zw8U6<5Q1w%%Z@!H3OuYyAJf5GsV=6n-|~cZ@Wm5|1Q{gmQi%BDxc%H+19T7riKGzO zxcW24(65wb=qh=*hK_3uJxZtMhv`veYwlTJUWB~z8)kc)EMC}fc1cX^p#F%>!O-~@ zr*yA!6e?zb+}e~--mlP&tc5k1eiQJH=^_@H4qjU+(pRhO0=6+8lb8$2RDTb~*+VJ$Xk zO+&2=V0PG$=Di7Du$^3Z!(zUi5UjoTtm|xa+4slKJ4gV{8U-I5&_A)LIu$leYdGFJ zxwr3P;OJq3l#nroau)kQ?vR4JnXGFa=4kg&HXeN_{;3v|*An`I>(D{eBBw1p{XUFn zq>r5Hn`UK#D|q_FTv77yibS==!iaKtDg1}?acHGnxWi1A2KH?N>pZFu7g(9G~qyPriy5+ zXD0}lVX$luhGx3HvT~puHgqkfGn$py#f+pOhLo9ZsrNr91|Pf<0;m@puQvD>@@7hC z1-}tbm@3+N4HRCByzlCs$}6j*iOPa{D(A1G3F%tJa?wzmJ3@2mLo3_<$81wRZa>eU z6O{*koQq2ezHUu0j&nR{1XZFkupsg1o|1K1FOCQ(A3oVEHsrF3flwue9$UZ0Pwh_sy{Y_H3m>d z>0%6-?ht5e+8cnXPaNN`3OEna zAcPV6;#|~yX|BBRB7f7y{KHxX30FBvS(q=N#w49M&K&S5rbW;{j|Ioce~A7i%b>b zXDKz!thXY+>Y9-l$H}l*hleqIp!GdUT}m}n_ZH*znQ$#Qn@^G6X0=0~I_)>((&qlL- zq1=|&JaW>2mVbsS+v`HOZKfmpxTAHuG~MP}U|gT(c@g}JBdSb+H!Ch2<$|v4;)pcb zFt|wS(8B5Wbc^Yo$IpXKr%cg7qD=+_I*(xFkL?p&kk6T4fmWFJD7{bjMd{irr%R~& zb-dZPqHW%TEjT@!xvZO!eKM*zoxx_2UTODTlJ!)P5ko5*U^P#Gb>i18bVat&TVcmO zxVG6ML0~HuKwrKCcHmqjAiM!b{>!7BeVxZgvN2{wG7#tAE;%AX5H~F^;MW;~JbF{D zprt-Rr#?YEB0&~g^vEL`?`uF5JW6TNkt~AeG+}0*_th_Go^a2eR-}9T`S)zJs2zyn zbz=JnMO3}C8b)MokT001IoT_UDv%Rd<`dan(gQNXtS)k0D(P+Bd^~wa0<950rCCx} z3PnePiW36;Bo4AMSyb6~=xJi1s({c6vCxhlN%UAseFDpX)4JQ)07oczJm;-Zz}HZyRUKf9<-T?ArLVLHqcFw7I&_n+iSWeW?F zkvs4k`POCCV0YColMbiFT#P0t@yeiWmtguwkhr3Ou-901rjjWqj(~88`nI6|l9T4K# zq-{MGLk(G*_O%j>tfH6GFZ4~8?KTR)#H;1fE@tHv0)Peb%7Fb)#T4KJC9o6_5Wyh= zlmdVOePhC6_{6mKzP2JvU7WYZwklBEOr77mp5D5ix?V0)`0JDxF;G0k z<;4Y4pjGnZXU?znYcQ?waM_7uOVO3BAcX}X{zgw^ZH`ctCsuYV)hNVJK(CBO?|pQ2 zbstWBMCaxz$Gm;43U%$9Awlx5)stgIBbTSnW4wbgF~yNa%QdU1$8e}rD2-BKyaRn0 zh(TMK*&x6?%Xg<`E?E$Bp>N|1XQuZA7>WQ9QBE<9eV42MotM=6E*Cqbk@)Tim#>`r zj25^2JAX|`EgO+JyCe^8wo^x=c+RsSOWx2K^dkdDx&dwv-sAus^YrXW*xrBcho3{Y z1l)|j+7UW62g;JH^(lkjtqpa=&Fm4N9eXmI`H;HN1r6o#*h&OK`thnuQ-s5TAcb|_ z5tY3q>dOiw{-jK;>yJc@(n$Ht(e#tUVBA~%L}v|{rF_{5kV??~$_Io*=i9Ek`}-^2xoI$$yWaJeRYHo( zL;c3wXFf-cB%}xV#5UIi2D$~-iD2hHxdbJug*=oeB_=`+emKhJe8AY380dR~aCf-` zCovL&Dt?_o2#of5IEtq=Vl^&CMy>&HIEVhiD+q~0r)2pg4zL@of*ayd~WNgK=zw4kO>v!1?IzW8@G(p6`mHfzZP7GZ+& zUT3t71XzbALOuLz#V7LhSvs0&C_+=^uqfGD5}rBb>vAm>3EjC+d5-VelyUOUw*JJw zS^<_0bvj1Pc7`jb63zg%q1F!PK(Gt`Y);d>+J|>0ifSyymNttwg)^^LSxYC%H)dtz zxhxWf(j%Az=iqI+$Z(;yWO;dqEZVfdX}aZ6D>A+r4GL)mx51|J?lgkFo-p>N%ORCB z$iHtlXiT!x4O;<5-KT@&-Y=U93^!cyh}r4%m3|~9dNlV-d_C&h4eWPlIV={0hlDQ2JgQfGFteIU zS$GrZI6lv0J?Q5mHwMbk4)RuHid_C}79=om0-XgienLh5UJ84h*%CC&-yw2Sg)JJC zPih;!l5SvzT#d44;A7ybJo$r3GP@`C)|5l_hIRDqp<|~qPA?ET%n%uF%LGIf)JEab zS|c8JI~_^ae2{biE8^&eS@yV+DazoJ0n0Px^a@i6(=|Lzjj~-B)0O|wU`m+{5gU&d zyMWnIY{D}c-FR|x8Ws2&0%33od;|n4?}x&bU&CfzJmS4r(uy2KuL zFS)ce%4Q~Cb`$}!MRJiOQQcFsCTZ_m3vH_=(?My#wMqbNdWX1WX%F^w8f=kJt9WMI zTmmeTK$23%BXvV8ZjysL6^M}_&!G6^j{6gHB^kvo6!>#WK z_Zyzha=aoW^$76o&&wQ3k~LS3yEb3XxG`C0m-esCZQjn;M#8IE zFvL~N+X;onD~1L~qZTf4Axg_wxo;Q1%}@E061sYW+l&?~2%EwU|9@z!W(O|I7HE-QOi&n*?Xw z8Q1v*8=|mvmc`xxcX0flcH}$~;+_q|z}N9_C)^G9vNLnM+EvjC3b@Aj47eZAG1)+O z;d`D5aj$04r}>^B9`}+rUV*D#+F^gSVTW{Lo_PcK0uB6Za>Xqy;IZG)N`KI_o&a0Ay>FV|zdh>Nuzf$>1%BUYjG~l9S^1D35=Svbe_p;7+4ob1O1# z;UN!)-fKy>PxK0EO|t^fde?V{$N$k`&R9XPvu)+D=(+DI%EO~|_0NjmPc!az0nXGH%Z|>Y zd*?9h|MyS{dyi<&7qx;>(Af@&y=)kic(a~38?2qI-Y7z9|E}tZHlya{?QHimJ*&R? zEwt6Rm$^6LgdJ_Hil1j$P)D>_k6dYJzShIAFjf`bk?Y+c3M2KWq9kXs>i;_;s|W@7 z??qmrSyn4@!p1%YF*_V9B;-$&H6qiHKf7NCaL;)uAD@Q)QR##*G3M3&tyfL|ovZ%u zaLK>(p#J3!3g|n0%l8Qh@*C;fIvU&mtB593QTrF-H|ewAR6R++a`0DS(jFj%byydl zSc1hG@V#$PL3BTZ@z3SJ3mTG7N{<eYX*~B^g%peO!Cd7FR|0$z3jK#s!by0HYLZyXa|T z;R_0gWr%QYNr<5F$o&hLPF>3;CH8udtyHuNm7M{bp9>~XrY5j1v-SKpRMhCf8RirO zqt0|ERqi;Toj)23i756Gg9)9kvCkk=8>Z_NiBEs5EOzRIV!oj#=h)xSW^EG*Os_%K zU1u_4{)Nx9Z34asrtK%iBBj}Z-gymvJdiAkRoKMhB1P`92mNw9Qqzvb)h>V6k5ZMmTuma;dXVKRNRHG;oBrvf-x zEk;~C`MmA?D)2;8W1G>b0P<4y~*|r)kS-kK(N^li87C|$rvb=8+?98~ z%3EGzDR{&AO##IcESynwK^=D}CY~gMRN%tvzhe}&$-aD^@2vdUcbxLyCnf2Bj8Xo7 zlky*#gv_M%Z)Ikz;cilxO_TP{m^s84$(FR5`E?+#ov;Vw(OtHV5 zXK#N3*rHJ~5SYcoE7$zEgzgMK3rnL{s!vGG(p(6G6?|*4Ih!&MYFd!v7XTA1Ge~4; zs| zl%vOp2(9T@U!mTXKZUMA2-$z76*D!O)DP}J8Ci~Lf~FzPOj4m>tW_*I7?{) z)_E`p?Zm{%G`?GkA{ryqu&XSLi&j=+m8rV z6I%~%bq;CXAhRY0S$ucWEq+_OYishm*@LfNPQ-UPR2oB@S$87HN6`T_PWY@C2 z$=K8RFg3k#50?`NlyrPIR}Ms!L<_w?0zn-0fh4NnYj~RU z*q2F!EKNgQ&b#ZBLmkg-sGlufTIpa=eItWy$!4*1ev!MpVQ?sBqY8JH34#0P=6j|G zf?1^t6?x7&$EO>}WgkDT`f{YwicE}Q7MqyUSwdtu4ACJSBmXeQz;3bea*jNbaEsDG z1!kST7oKYxr3xfu`B?W`>h;6nPgdbO!!d9_PQ_7s!F_atPdA^gMY6O-#Z3R0;|Xy& zk+%D|rP<2o%#VDCEB@e&jGFVL`T*93t2Ji%oWp4PVt=Q6y(WAC_gBo9oq2*M4`b&V zoXmfU#gUHY;wyq#c3UX{Z^y)T?Bx6P6Ji$;TQIp6oBbkx(iC}~EsPS-cm?J-_on#C zPH-oZzpxmbw}&p6z<2WZNtH0+IH0;gh~rlzHllS+#Nz|`FA|q$-k)ux-x59}h6D4_ zv)plBKB@$+0r@Vg+PLgAa*#^V=O_6a*Lc`bDUMDa0|mh=p5!1clW66Nw2}9KYT?6G#FO;csbR}#?bHC6riUvE zBzdq;QNxkWh7cl6Swc@9wn>-a!jFFAa`_x#0kX4;LbE7EXgkPP29)vI!+D|(=?asJ zO)PU{9zP0}ZJSb$73aPpT(97JkDa(QJ+pAA0$p1QeMu2+(2R74+@g=6_7Db5U)&?O zhB0#k(W>T?51vpGUoFFKVVGx{r_Jyb;e|)a_gH7oOgMGJI|iN*b$Ka8Nz$I}~oXY2G_d>aHQ-kEY%*dZLJg+x@PjNqWEq{G{?#lvP3>KmZfK!H) znDLkM4R>^vm`b!UC!?olu|ihyS$3PJUU~W2y2$E$?+q}lspK@NP^70BShkOrl#X6E zemqKh}X+9xiV?e_qvP01%JL2M{P3&Y{C2m*-Cqe?ecWv(f21%HCI zfXX$TR!EDc@mGE2ViC+_S&{_3qh7pGm%5(f(@EGk#4>$lYgbmsfLC)^OVfj6FFJ05EUQ-HG431d?=%h*NGa<=}I=!Z$k|^2WehhSj>(hJj!~u);Kt!HE1i3!|y0rQ27xn9hXM(Mj2OH@O z0}}@e{lr>28c9INL~f6Eev$D(8Pt4`gH|?;8;r%z5K&T!uD78OO(ylGr--fFd;N+g z2vypQ(KBQ3{j|sw(#V{^&X7q-;JpR6k`X2QSiXxKdFKefD-2|t^V+h*G$@g+iWF!D zGD_WKQB(vQn;Ao6xDVoGtMYXtD^H)R7SD-u=AuXzlG&uoA=cmSUJv?5Pr!{V#@!W* z{Tby_NMF{LB73@tAAb(p05^GYJgxewI6(Ge$e)~;PUb1K^?v>)0p9vyHpr5aa{LTk zT@NoIHd3FqAJAcC*38u|t(n^W+BT1sZS9)9yJGH|9W>^>1=XO$=8mGT;rly&8841C zM@qZDtvsZVM{pg+QHmIGx#l)|bV1qbC1kwM_GLrFl@9}<0KD)gW=t@xx1pgn;z zk$UI#noc~am-nce|3*%mcJ@I{&1%Ws6BRHEVqJ|Hbd4Eg^Ex$_AAtkiUiBVf>HD(E zIWcq+eoLA`Y$aHw#Yf7Sy6S@Unx@W;_s943&ZSJ2d8u>wI84`NG2#{{+4(@|C5QaP z!sm`|m&fx#zwhtCpQsZEXB*>5h+)1uH-+^Q&e4aU1jrt&_D3`mm3(Wvp2^kIJ8srt z{ZsS+;dzFH@rz)t`{c%Z_7`JQhJ|-7RWuCiUDP0XE(c0_v>%5`R?NB0CI| zEe0H0Br8+pK|$5t!1d4UdE%GM$*(M^NMarXO>$iktf{9ne^UCA1 z6Lum%(rx{b!n>fLvZy~{p&D;Aut%p!A=&lZBtw+<$Z+T^=7{WQv!yFX^3_ZdeIi?D z#4(9tms+Wkc7;{j6jw^;i!cX0_4ap`Mwf%0Q1sMzVk|4%_2kjD_?}a03{lj3!2Lbc z^yoLyr|Hy3pz^b77Ns}gRQEp7R&W#?7m4`Q^{~t}U)&}0xr0VDk}GyMaV~a+57d_Y zOVM-(%acluquK{fCURQuAGo>KrsVo?-tQ0z417j-G<@&7Qd9kZn#_MEGyeyZ`ET1D zL@LeN&wnSrDcMo&69()c;Kf1&@gp;VQ6Q}~NJ|9;Bzl6-H%v&-aoSW+MfsB$;wE~2 zJ_LfLm=6mw&fftudMF0(R#8!wN}J;pEN*o^Wz-*EoqYsLB45B=VZK6Y}RQ_sU-^NLZhyvV&jC`VFZ|mvLQlsJ(^k7T#X<^XQ!5x(SQa zpK7S!rzzTy38kn{<4p6WNNEjohy&WP?N$$nlp{MYWmJYGID`EWo6hj2LN0H?F2Td@ zNO6=Jj9bv?bX#&S@=!qsEsWL>N>H!AO3RCDBTfu#(4g#0wpdKS`&bVZB(Qiobn7MP z)4(5`;5GnG2Jfk!jt0`p@S-Orvz9U2E3prgU)`3@{fGMFKcQV@XRZrBrsmz51%g_9Ne;*rd1stC z{UFRu5+BDPsfpsU$@hVw+9Rdfg;lYool`ET5MEm+=!#ZKHVnKw2TKs!2i9&U)=;jI zlE)sq;iY4inwx`+b^K;rjzdP~H-!Fp_!her=?xcB9!?A%P$lfrC3s}a%nNW}I?{?N zOKIrHu)Rq!eAmSi2Kkj+=p`s|dALSWg}vwQ7-7k^i@`gT7?d`Zgg&Vf-$jKKR58=u z)h&gQ5ta6GPOrx8yJ)B;0w#P^?FszX!y3zdoTmKEQAzrKVE;S5LG~Z<4M8(wLrVo? zM<;umznQcDaB>5q+rODg$U)ve*Z5jdh(JM=^iD#OZKOg#eTB#&bD`ofVN3ljRYMg^ ztA~1kZsbCcKq+#?)Hr^=eN7E!>t5pmyt0AMz_U5HQ9%7!n?6XG$hZ^GktTNwA4zV- zrc_k$aq^WZLY2TUR%A~zN_YmK_Psh&RzL5!TT^#?Nr!B})lMaKtb~EU`n2}cS%kt8 zE_`qoG1NW@%~kfqw{=(LG~9`^7g?Gg5A3nyMk?nbo_2SThuNQAa>&gsQ|`z>>aWyC z+l~MQBz<72o~ARMc>LtY$(S9D>&0_#i~bd?FUzMFSs8l91podFV`YC)H+R%B`1;5Q ztPj?{Qf?Cf``G<==rBKNPE|ERAtT(&c*dB$&I>Cqe zA_;%aSVm3*CdX|43HG{AyD77H%9M>lVH2F@hyoT&BURN9&For_OjG5k%mU}-%Vlv8*(c%WnL2}Qen%5dY{m89syhtCMPvlKz0RUP|M@FM8uVLlbak)p z5^l5qHfKLQoC zmB``n4}y26zDU@0hdOB+K&C@xpCJ$PVl_Tk{9>TX%LLTH z#4B)HD>whBm6YB@qH5|7CbZT`8yB@0DZFF$1p}$AJ>~=1uh0ds zsjo-Y4Ng^{BMeNBG>PWVpMO2faTDV-*xz8K;Qs_G|DAO9FUu=sY^rbQ_Md1Kk+IV< z>wNG--ILo(Xo%lLGro69X(ZUmA2cY+g<-%DMlJAu1J+2W^*gS!Yy4;9_Ym&%@K8(eR%0X zN(OaC=smJZAV5UtFrafb6eOBlFAd>1IJc<0-*!9X zGaK#X;g+ZPx~S>zi3+Kye#blTS~=cQJLtx_;Ck$$M6`c9Q=m)0It`D%HN$hHC7@Tr zVkUwm#n{|8py^2ZAvpii?Ri13MA|v5wXM+T#J`S{x-lVs2E9~@ah}T-Gdtss@%jPN zecTG8Aspnz){l60Pfs;Z?T>;?Hl6N}_??7^Fz|Xqxad_|I-yF*3v}YRGzDU*G529~ zMPl{NNui(Qm3vPzqe(v+G!@*yGS+mpG)YpRuNRkcf-H&)BPXSoRHR!}nh-#&X8}=^ z9Hb(l*Hol~+4p_|`)ek!1*gl_e6Kr=|C4_BpM9`@h$o1f+WfNrUVpwp?0*V&6(+wa zVt(%_I337`XM-64s4$zP<^~~Mq9iG3=%zdeDsea~pgAr;qi_av8cFf9*&#`GVhMD+LP0-FC>0+KZl{)bde@fW^OJ6L?I4G=HSBsgWRaN zAS6#JO#NL3wICrM4cJN4COJOzfRZo7Siu*-)hXovveSZurEMTwY+p_tZVvb=FdPEL_nCBz|N5&|Q zj?W<+3-1|BfX^W)Uygf(#x!2en=wBp9z{?uhwkI1cGK?$LenD*>U7s`Bp1K8B&IQG zg3jdVoY=t@8v(k5NcGCEtOt>V4qSK){jMg98P=Qim9S1UL)8LE8PjK7R8{SO{UlO7 zkFye2_ZG6`GvxCvG_6{c^Z6i}qVOzim^*^{Qw^l2=Z$1%|1Sv@mN|H!=FBWEja2 zkR(;3RbW(US0Ca7$no;JLF7Jr6VaLgExps2N|Ty&b-2H;<6x$8dpp3?+_ClDq=OFo zSZaha^x4T|9S|%R>hwcfdge&SNoqrW`w;Izj-S^JQuO6rf+vMQQ3V^9GdaiK0!R*k zB(WO%J2AEF{YWYyqyl1~T4Jdg$k0bmD0aiPv`K)GoB&B`Hd;+ra~ubvqE}T5G79Ge zA0LX+3vKOTAkf_Gx4HWM`Xj*c6ssEm7#w#W;w$j+bq1>FCST%R=y7qkG+l$q{W^W| zmL3NDaW8-2?e8YLkmKg7dz=A>Z`i$Yw!ViERu6>Ef8xQ^5Wh307ymmt?|_^;F#H=Dn; zX`ElLw0-qvALu=Vwcr2V9q`B5MX}yD+}!fLZSjBi8UM38E^X}Kpl@pYPuJ1-pG9ni z>JYBThv;5k-2EnE0(iav5WDd4n=t^O2N6L5Vt5c@bO?RSqcU-WlQtWh06$8>Tq(oW zC_?;J(p1O=%Jmb&M5%EWm+FJL8=4;LTq;#4=AN=Msn}Wc^LaQp|4cr0Z@*=J@!lRz zXLG^k$U(dG0M}!qf~c$jE3lMcE&^7cSyHHJbn{z+L#!ORd1T4O`mmEW5BUN^*Y@Y) zA%yh+d*_tmomf+C$jwx^X*TlJQ*|34HlB}(M4Zro`Mq%1={xZLJ zhsvlE3M9p0n~`kI!uAjvW_e+5VcZ`E-&%I!55_vf)*iYKYt4~;YPAB;bjn2)Fq$BB zj20n&7>iV@=AuUT^>C-Bd}fS#thSstO2|!E>ytw|Y)X_00c^qb9x~Kmf>a9e9*nJ* zwGN%rF^fIF3ePRdO?Hgmw&4h z^LM7+_st1jE6)JphSQa>SRjCfNA8W_O34|GHrG*{J1+S6;h_aAxOZ=tm-1|y{j^c} zYH=zjni2`E=i9SVnTevlFX-Yf7=)?g^V7oTmmnHZMb-)}2hS@e(TPEXu#W^TO~T_u z)(Ss2%1Hb0mD?a&L_7oyRiKf#QLtsSK@hup-Ww)MM&l$NG6=P6_tS3izjFQPDr(_J zm5ga85=#YsN%L#BgWEK}e_eboO%PAuM*LQ=zfiWty)r#+eW;4WEK=&bFok(y{DdQO zh1Vsm;#G**vVlCfzT zZl2xF3F7k7W%*KeTa-3_!>kv)tkiJMyy)|X?4lmd&xFw&B#-wI1sT8qi`wCI_E7CL zbj={Fy+(D1D76m*Zcs@|<;uLrM)|OkfMBB_)KJmVsjd0mJCZrn6;O7~POA6mCZT5r z2Y*+*sP`JG6i3%KbZN_y^Ze{m8F9}XWO_$kCTfWVF&#zl*N*lIJn_z3MRtc3#0wAz z>5bo&O}IB;{=a7z?wgT7HJ}2Vvu;g*(@>c%D=S^rY`@rF1 z<;KPZOJ9__A|e3FP0f5zuDexu4S`7<8{&9_48GsEm*!JTW*O^aU{weF-#WxRngl^f zrz$PKQy2LZWxZjzeA`+syyQtxF|tr!Ql~Ccd51pJFv4ytV^Qm!2*=Uqau@L%cNtg; z5zG|{_gUy^aQ@gElnio24RfH)$3^?&*?^-G$^OjCSVVn@o5tZltoXH$z?8p@QLF({ zoX~!*CKe5pYwzZ0{L%O-_)d3IvNduW-R31TlT-9^&!4zqnd0E}SRpH;OK8vdyKD;c z+3c5CSO{Ce+Wdi{p!E;GmhR_VIvxL3W~NhGSm(?)5G0$U)ohnCpGV@Rwq810rMGoJ z7>!iXAeu@Sq~merlp*WY0XzL(vUkAlfS&4SqrU2wcbn$pr{{Of4qVSH|Ji5j=BI8! zj_b;r+t(k&Anz#ejU3SeShLDr)8*1Xm-gX0C=fv25xn<)fqq87MGg)oNGJ$qCKc;x zm^&*6>+^0v9YA6eap#3}$6(&Kckli3pxynn>7gdhzzXfpPywN^~~nHvRcNnY-( z`Qwoo@)qm&(I##ROkiP8^f}1NkX8E3%9AI~>GUO-ijMoolVE_nVC6Oj`)wp>djT|^ zfwbb{4j;QeLC}a3&uZ#tj3YEUWgz7>nLS*)jt>E@I<;Hyr0}p@ zO&n@bp_HyS^6_+Sn(5k{Rn+ajyYA+tY7K1 zI_}8S*eK_yR7x4~^X99EVAD_7w0QtZ&Fp~bl9q?nK%)uK^`Vc+EvTlQ)Lk)NTw&{P zxZ3xCY5Ky|{F0v+-yqvhl(nLkYe1?EIk$S8T#;OMs4sfg*?e4VLts2JsNMZ0-!Rgj zxJ_Pw3_M4$Jo~ERQ#A&xkn0I}`pMP(`j(WTI>HzHI^MKsd#S3}rMVf=Zzl+i++_CY zM}<}P8XiyMNPfC&MWya%9@~7<-Wje$Ttr^PP z7gD%u(%?9TM=G>9ysgI0K58^z)8It*+0vwX5cKr3*H)UvFIEuli866AN}uIt!*Gcw zjHctku=@JcQXP3O=vH|I>v9Z?69y<~5-;OfhF$?gN72d;W|H~iY<3l=!r~|POgDp+ zG!`t<^dAg86VaxID)^}r%|wA=O#3fFg-p-RWGX7T_s#;=Agtktv1pQ=^s3L*pIrz? zPu)R3>1eKdq!Sv@FeiH$BTu)5|N`D6u6QhChBW{5rGG17#1AHcr()@ zdWn4oW^~u1zyDI=_+v!CL06~%Tb%!Fa@I^1a&U;SRfw|}7&!@#xHC`5|0*(MHK3wW)e>?oe{3yhrS7QkbQuH8Xnq@mn{D(Mw_!LPiu;)gE6Qv zI5bM^;-!TtoFL|J->!ZxRT?iyJ^cXOM-mmZ$Cc~~I6PV6giXsZPM_umu%B7QG=9(J zX=SXj@wQ`I#6!xxqQ499Tb0IZ3z$RpC0yZ3N}W)Cy1*SaGav=9ePozU-e=Z;f;YZc zzb<9UnJ3zuMhGEWa`2@DzP?6jLL@RJoY^4j$`OCn#6c)y=~#|rndme=2uCPWXPjY{ z6!F@<*x9;yfu!FZa#g@6E8u|$i-oY299j-O*UX-9{SlgwuWJ9#q;RSsoL%aKO?Ghz19Dh+UZ^tNR!S}t z_Jb8Dwm{!|1iD5izw!w7B?7e^W8vDuq{^yxq3oXeL@qrHiRpNn;@Rj6fr{Bi+ zokKg@ZC92n)w>jAuqOd>^l#qwNN3Q7<>-fky{`CG`ABEdhHCs<8e5C59n00SyjB>Ev3pGYyQWzJ5Tlv8EmFvJ=hul8+(*^ZZ&8AD3@N zTbYyr5!dRJMOEfwhZ2hB4pEFSaNsHEw4WlFW?*k^xF4SQ@o`LI zd#hB#i=S-k`=TEXOj(ewKENSvW;f|iW54UHRt7` zjryctpD(Y^!=GxU^x_ko=YJsK&}r7SEEbleT?f_54WxW#DGa0i%-wLANzXEphkqzD zoWGOOV~j+hG)JC`c3G?>y*q3+UW^e3@Q!QRN7ZrI-nTj18|s+ER@{q%1+;dG5S6q4 zK2rX}6+#RblGxCgxYRV^pdILB&2BSUYM8!zB?&FckQyrqJuRnKvDY2RFcL(p4NWI) zI4H8rV#COe#TeLo(rMaQcgo53=L5wYhHqETth(jH@kpa!uH$3W**h%8?~rBcg@yVvegr$&EH0AyL* zuIo!{XydN=apBz4_nb2)$}jY*q2nS2rb|xm{W--$i<%*$fUJ$Rl4)0+=J}P(c!_v^1a8FSJh+KNdpXy?GtJ8* zS*Y-XkOhiJ*YbW1dGF?vcWht7K)tA=;q?>6uwx2KM@R$-CkhWhcGuJFC5G;p#`Ghr z_4Wq`)QG%G1qNxO>KAyMnyJYK-qs^>NA!bZa7j+yVsQ+U-Cn&qatj4vz6Ba*=$jJI z^<~HBuEP{5nsH|20^a1(W%EwALitN(oNOiMDVR6c=bQ|YoUViC%9uCbK(Zv~e+h3T zYw?%NIW-f`0M1#Pb0!tP38z>65O`K6&k~)tMs~uIc}`*meB6xUFPU}PmYg?d-gKRF z+DCHY3!FP;-joB$lAZq`lqs2a>K4tNH*ec6JatOHay4%waN~r)9$r=>-^{TFsx{Go zVdFjS*|wJciTrAr1Nnl;rFic%T0-vU^Mq&?l!c_v)g5S$_$H0e4&xQl%$Cf6sCmN` zwvrjTNgx|~IpJ=wMZMVD!O_HbZBE&g5;@6g$l+*Rx~N9Fz?#&FGg&r!(g1OZGI@p@sG7ZCBdW1ve$zx;875iTCRlM!|KKrq$ujw( z2WpqC-{wRBy!+tzX_u_>U8eHDqGxgFmCCv|eE z6_q_Y76x;!x>BXU-lMn1MrLQpX@bE@R&Svd7wk?zEIE(;DC;Af(TuD&N-wopOP4w1 zBRV#vV?U0}6qNP3s>LY@m#eYn91f$2Gzu-HilGn}(YvxqyJ;xJB4q9{OXE;1&5g!l z_uhNq6F=)}9xiwe%~^Fa{%hS{*H{m@{()dEbgL+~OW47$0$5Nqb>r7}&+tBu__Mqo zIA*09@pJ}4eMon~V4&oEppd}uavmX{_bEA%_fGO=`tadaNh$*pfS&)YRL)i363vQuC#B**VWtKYJk@{#Q7Pa z%YX_Agm&~}p2{i|I*HiUc#7FU7(;$naVi5zl+BIxq4<&XpN%xRG|nfqOEaYzj~_=( zn+`X|q%1h4CI#X64fc$(Ds$8zi7!Sj;a|U&Ih(gpT1?kuedg3n9#Q=-+=%TZ%fUy>+&F-4-a(Kp!+QJ$!{NX(|c|JK7 zD~u7^*uM^V@kPFG9>wZS(t}5p0~%s4G8{!^fa>Grrk#~!L{X7aZGPisaaIMHU5j3Z zB+UFK9E+U12ThZl6DO3VED7WeHRm|Jl` zHQy>Bdcy$;Cz;T5<1N$ibi$h9cTD+?P#icOlzrD6%4flOK zc91QUWjA1AKk6D`n)`Fq4Y^|;o6ql^pWr(vHXRVB2~$*$)gFV2rWiJA^Vc`#v))+Qd48A+M}V3v$P3j5i9j>Muk6@=*TD0MF=D^@2ie(X`O z;RI*HDj-fd28%n?{vaBoH(fPuH0jdhQkH8`KXj4VT<4pqa7bI{*tDa56M|tR>}&xQ z5zX;mKQUFi3dKEknx{S+QqNvb>DvKsa;A{nFgan;WMU+4F!9ggZ;Xr-2==^u(LG_& zzc_nRACG`t(6v(Qg%B+pb3b<}mHbvjxeqgs+il?LX<1*Wifc1WuR>?1+6zL8X>d%^ zzYT3ttsJy=z(~tfsRvdMLlm;AT3!EAaPf3|)H`KQ;F-F;x@{P0Df8w2$aJX`w?&?}ps zadB7QkiHuuF||QSaCXGtW3IlKnzMd${A6B3$_Y|UwX$Ab8)nzhjCn^?pm~}f_r2sF z^k2S;TYxUA<1W3ku?UN~4`$w5M&6IV4UG_as%OFgGu;w^xcv9U`9CvsWc96$jsBH6 z{2z!s=%uz@euMBQ-N|I;LQ&Z;&nn@3Y*#RNG zPTlVcdqC#h$p=xo3zIir3cxBaNH5S(_Y+#3j=1ja99|7zj_;_6*3Kg2IKgxp(OgTAW z$s6Kqji{N|L8;p$Y_!~CMAvc3P&?LQ?3$C+6_o@zi^2fQG$5L)0XaX3@T^jn-r7n$8ksRAJCx z|FCsz4?GkPpM}Oe-!aN_eSu~y{X9q_i$R`ZVbw>j`k*5v?t^Ir%tk*%f#Rh%Q|r=eXiPPe?(X?(SM~UPJq;{x zvOHn;`W@`AI43A!#i_zQC_Q8UAPpJIV}`C2hyJus3}pgZ~ceM5Er| zqW)L}|C;i9{Fg15xc*t+;rIrZ>lBA)j^h*0D8A3vJ4z3Gmv}xT1FSvpq#5FgTJx2r z`|{n`i)Q(`3j=~#>z70u6b>MKN7z@5VLORYZe@hJds+kF z9jh!;Xk003fIMFXX15?Rf2Z`Q`&FnuidrzD!hRB39ce#b^CRoGZTrlu9RPl@16j`lzr!5HZy zwON6`xy!2Sj8ZUAVAAH>tfnyQVGv=_VvX!O`}G!hq*X&3ijI-ys)1;v85J3%6r(&# z6RQoR8Szp^u##p)FP>jGP;CupV$ou{VP8XJa_pFHn{wR+S+q(Yo4*jo8VMXd2YOK^ z@IVpn%V4zlORxqlE%$fnq)f(6MjFs&ZL!$FE`vr6r9)>l_ zeu*6f?Qb4I`u$OA6mCEU9j-@br7CaoX`fd_#{{!O=05wm-~-Bb9eMrrdQ;wh2{`7A2jML`AVxGbD>>JfwU!0V_= zNLIPg!N6gW;DGtSG1!*2a$id>tsTL{20WdkBY@S>2%HT6t^<0(G7nxDvK^H$!GFW3k^EZEzM4#?){KT?$;z$)j+m6eicCm>OSx0`T zES$P9*in@L*!t0#rC)y`R+?C`FMUBAhrmO?))?-{l6jFutO@nh2Xe)TkhU$%jBAPi z4snV26T4$7aZR)+B~hL9@X$R&!rXT1FckFuNz6wvxVmd<=Hm}&Mi#oXIu>UWYop8L z>9d3;nw}YyxpL6@bs7~jBIjJL-C@t~9m?L5f^w*evPfBmYX99eP9Tj>&k`djpuD9d z9)m1I;pvVQQtEbk&(L}CKo;s}=_Vt|w(u7-ufkq685&=lIImca=(n_$5`BZ^g-gBp ziD8W6>vKab6}it-kHBZRh|_a024_Ri914;x1(lqyzxRUp<#4~V02$^QpfCTwN67!2 zVg7F+yZ_k$7HL4bDJ`P%k}-{r-^Hg!)m%%Z`T}F1NW&mN#Ks4b5JM~-8sCY7kz%?r z0ozy?RW?=Cs#w-usUYBL){tV9g)7%yp)EI|ZD=&AtpBdmo_WfA(;-c0y(itz)xDFw z0)(Bf*5i$@t@Ha+I(#pTZj&%ylb zrOp=+q0Yf;GW>&^%RA#|@03PDkP+%0tvyujoB`$t+d%cU=?;7 zk+155Nezp|J6MJwF?J9wZnl$Sd(I4^r;&|$U4g0{n%HF^Bbbo0h{PS!66L_mqlO#9 zbO|`R&3%EW8mL-!!T7x7CR?hJ%&wtifCs)`%f-x~;5&*`X1W1lfd2{K-l{oZXfeP| z%rHb-ND}!oqJ~feg=@n&vK?o4q0r9S^xYDsp3nrAScr}l9F~%~PqG$lDP5VPhY@&_tZsAtQ$Hr@M5F%(TLI--fq)vR@*dje1+y6<+H(~N=h{%p zt|N!0w>CupbQrt^pk)?I(|em@1sM%rPr{C8BxU!W1Qnwmh8F0C{7W!M=!V#uHrU7WW^oE+7&8(XR9U8RC7S_CrKa%&Et{WES#vh8V&vryL z1nora!gfArWce2FB_4{dp9(_V6yEAr(A`i9Xm#&7)VoU1cJDQ!=;)i<{+RNIrylF7 z>2kGJQ-up`U+OE!FDm!@j{qn!w|knRG=vD3sH#vP9V*N4v+ssGl(SbD2+a)i+I{DiAVG z3QBzz@7_68&a;tTAox@_s;HksELuMv`QoKn&Z)Pi z$k-^qco0H`^L}KhlZ|yoKFO2KMxU|4xyd}j z=WejNueUeog+|EzUZFHcgr&XvG$^a6l6G2%&;Y_NZkB(Q?j5aub~%m8n^M_S8+V{K z8Y+0vVD2CH(TtkX3rT6Pa$uB#(_hgGy>EKxuaNcMQVmrF$)C|;4KJ6t{gKzSQw9k7 z0DH%y4H>Bhrk?Q5Sz9LyCuIkDZ!le_3c)=0T-Ex?VVtoH6OtU)F!n~#XAFO;Fi$%| zpRBz?7PXbidGx4<2z685RvCfG`c{E;gGmEfet6n2E9ii!P8BFOo!BjzQhWpR_yS1# zE27!?3ANLFTz#v(swILcqa}>HmjQd?xf%ylYeAXM*xlI_502RQl{JpI(>Z6#1Q!D& zLiyZS+ft|D%H5CHZ1alsJ&8S%?1t~N^|94CzG`FE>^M+=q9vXr`y_@D7g*yzUr8Tb z=jvA)@n@8`fWs|C;Sbt|9{78CEnIiVUwI-~F?=b{+_8)VSPpuD1T{ko$`*~IhZ)$> z{^S_+!Uqk)%mog1;j%|xjBxEGn$%*;ag*%`zfNXuECjq2wqJo8SD*9{GYan`?Jj^w zG-ncSJ5=^Uj}Wv^*gpzNxZ_8P2dIT|erK4tK-~soP+${^Y2Wf;_9MM!-~!!(4^ZWOJ^|pn}}_O|U5)*0Sqwz!ufP zF_4hrk?3@t-f~wrB%7-;3grAtE6_yK&mtc5Ow?lwkcG3XLn}pStu!m>Z)Z2~vydT_ z&L4JM5c+OXKq%FB@>|CWwS)*lkkQCbJ^?*CV|vo*UrTA^F0DW<{@$2R!^deQ-sD(xX72MN;U6ePVezoRq)s9pzvohqy2gxVIya-&yP? zx7;_n!XNUyTz+fB&zBKop|WIHK?UDb^N$_$Fl?=V>RfdzFI5S^7`Ew z4g8ppAe6+%hrfXS`~dZ1ya2Um$LSM8=^K`SPSmH9KOSAlL3k29&tGsKRL^bf5?jh z`L(IJI0Cg&hzsP>HX@f^O8^Ct*dHE3arw1LBa!2vqUWuv6LKng)9HFxrB z)1b0OjrEpEKr6WdzArizAqCN|ie75t>DQ(L*DFIXA*qqub_seW#^?g7KP{t?TLl;? zz7$Z{fl$|Z9hk+~lf;F;%)yWh^vOrd~KTG7eB9UwIQgmC%F5=Seo>E!aOQfhgw{ju@-o6I0PVEucoR_(*~ zADfu8d?4;1Pi`C^8<3~bb}s@WRy)-&OeZDi-oJp^I9shIuknxx5)f>IojO2KI#7dm zt?6;DeHq-y68C9S!(2sBbGeZ6u}u{n{?rQ-OyxB(bB96WW=i7W1sDYB#eKwj0A^mzV+G zad^A1BXIXb?(RM@1Z~`vxI<-Eby&FeGXdt4)S11hB|9&Y8=Hg_GcIj9*W5j*B!I6- zQ_VGi(1V7+5sJ^A7bK_B73H(O4E=Kvcv$P1;GyTOouDLp z!in(>9MOe$A{fdE=64)Ac@Mg|LKg_F7pBR+<-$H{YDBdlY8Re9XNpHwWQp$!f#r{P zX}g1FiWB*h$rASIz0Q#Q-8M?cM}s5TRAH282IIZ-rY(i7g$Rywmw>gJjBqzLcI+nw zTTsznHEM+EFg4APSm=Qee8ifh{(R?vSS4dvFWfsq|*7vN?y>#yX^{0nrLaO zjF~YdNK8i=@Kg0D)M^!ah#!eJQT2?Bn=C3UM~us40`^F3!#(MYQm>t*f+JDsVNT-X z>LaJ+;X5ML(fu)fmbK;k{chy}(bXH}uYc1TFx|>m$N){$h=8)ve^0*X{$nBj|DAmO zFO)Ay#oBgV34^Cu;*8X}o)p{D@B1ZdTZeVAd=s3jv9?*W;2a~Pf19n1$I8%b0%B z@)Tr{;#=`fUS9h_(3b{h&A$H>* zPG=XR2OfNxLH)YOTNB#-prdrl-{h39pAk$&)lsS!mf?Y^jc+zA-L*Sl_IAqwC^U-HM1Fn+IG zE!~O{g%;7*tP?C!M^~?!XI`p59RsCrh!JKG`-wO<+_|O438dDAs)lPOOs9sf?p5Vm zj>>w^i8RhODBN-=aD-3{#N?q$Qr9?#f{3IY-NVMm#aiwxYdnLNsrNe2+&cq^nZ5Jw z-J~PddId0b?|CDXD}r#kW9f$8ymr~7(dBM>F+TgO8Xwq9WEt9(3tymo#u#i=fK%om zj|~3QKBQ+Dd_Of797&*?RF4NZYYqJR$p;Z?%DF>OtHsl+__O4+G1`{=O;dyH#}>aL zxpb~SW#!Lwyx<%T5q&9#oLpkNNI6~CeO!1PJW+#*K&7lmo0+B;AKx5!P%U?=9oGh) z{%3;RA+pniSamEA^(bHBSAL5I8JF@71Sd#W)V&J3>f}2gkkmqu`2`^?__#ovK>|UJ z#JJM|r<`M|$Mr$1s0Wffv<@~e{2p}(WR@hAV2C0t%(m@{lh(zu^Ahb*QG$>l zPI&t*FS2BlajlL(ed+D`eag`>+wtrD;{}=@Ge&s;K{Zy~5Jn<+RH(DuvUtnJ!t+tc zrUhn{ET3kh1m;%IUbmNBXf|GEl7TN7z7g1FO^Z{bQqfgw*F{UGFBJ4(y3 z(RH?>xz;fAXp{di%!}`ZECflCdhnnE<#cm1R@H zmVM$ETK`xY-SwL6a1+hx-4Gt+X;sY{T^Gj26DcJcH&+Tqav!ASqU6hA>ltZNC79`m zl1N7PS;LuD*Wx+prBQiuUdCG+%2r!_d2$W4^((qfCrGw}e`K-Ha~IAg8nJH5CyGO#n&fw)H<KF@8i8&5*J5!I zk?h3lIoY<9C~qoeMiluADsyHJQ^O*B`EGb5l8;I0Z+O@}Bo#VfQ?NnThSm_75qb3U9Q#V7Nf)vK%;Quq|Bv9e)byUJLuuh7ggQd?eP zs3=_E7O2pnaA?;`S0{`@0p|mkhtN$5fj$$Z_*pj!yOv&ZA@X_6N|fffdm?pzXg!2j0|ZQKQM)G?!AzT#@Heqg zPBaWX_G{bmxEIJao-kX@8{J$&=nx8GAUbbAcK72D1Gu~2mAC-^_B_EIXa->bNwp22 zmdl;|GVf+^oUd+dvg7;?v!-yt_J^0?2D2bzlqoXPAhN958p`nwS#b0Zq>bAR=B}8A zI9GsoI2TL{nM)Y5v`1`YNCUF4QU|IYGo(rMp`YbOkeY=*y-jEc!m>T|^Pis+JG6KX zNkb^v_p2U3p&&fAz>fS8i;5NEgw?fUNg-(}HNmEZ^g_4ZJLa%_b9_gyOWNDeuip97 z@Qv3vY@HZ9Vk6^Z;=NKvIL2Q`0y6x?Sii=Gi*c|a&Ba@QAby#NXaC5v%XvMS%j)~q zZGVMyvBDf2Yk+P-AFX~%J@@4ca!fLqsy$i6ek_keXe2PvkNh3bqaL=}<-Gjt&l{rl z#Q_wr)8)Ir$_h?ZoOHxhkszy+ISdjAT!puxxlIi9Cw@niIcl0bQ-0 zvRpAvb#Zs6yMyZD{_+)(@{6J=^$L}&*JeXkb{`4Xkov@2?DC?4$i-Smz){e!gFaUS z>+#HR>Dztu9_*>Hx@|`N3lnK=e{q*}UW(l*36972sRkLki8R|1)3+sUU1*-E0^Pa$ zb!$%|NRxl9J?G&wSU&(rhuJ?O9eDq7?NN3zxB4H+Ve3d9D1d=(;BndssRDhHnuc0C zM5`aUT4<1)FAMK|e_h<>$u2dwZwM0G8fU-PJj}fZU-maf7*LNub3^FU2ap5&GCj(p7Y1J6Kz7w2Z@Xn~fdOLLn2r zo&#r>Fq)_;GYwOw_#ti-eY;HIfg!dD2&WIQ@17(J{uZ=db#v0p1dL(;@cQpb1^a&= zr;wGtqa(mI*vZ(e&T}Iyq(0Tgrj?<){+4!onci1Xf#qs|}~y4>^Gq_!LngqkH?}8+YJJFoQ^657wifpxa2Zu2&0|Jdl#P;jVg0KcVS3CZ@jRlmTD^?BPu9^fM}cb5 z{){V1BSnwL1{rPG5TaFfW|_ymF!Hd%+%`=D z?R7l&{(3we^WACp4dl0cB|$z>IF{dQ`H(Y_C8i65=hc|9`VFW>bXwCXKA@sx-CjyS zT4Vb1v)gzyU;nXX@Gi39qbmbc{Z3{$nt&j-W;{8G@$%=Y9DhKS*8(JS!%;G9J0T>sD26mfa^G;pY}>6 zLD9bZg?bbpzhInKwUwHV&#a5qs-@c`VC={=$i*!4&s@YZB*oue8t5D0>9)X zQ+HmB1r=nSC3`i{SR+(xfDTi%s=bmPQ;g~>%F5c>(^5D!XviicPQN8q^tl_Zt-R%A ztFI`R&`>1I!!nt=@;D@J=Z{{Nq&780*rX>z7BhB|8c*%OK%r6y~OkcXs&DWYq( zs;JSZ{V&3lh+7<)9m1^zJBi`cbScf`{b{-ODcJpR5eqf|odJ^P8Vj^+m&UKS(I|&J zW2tcLj{UcxTgsG}V@cZ-o(#pPF)mmYJ)}Tx0?_4`MO7ecds3JL?oU*K30E{tp1E^r zN-vK2WTC?}s<}y31TsfB_inS6Vt`?BsDZZLI+J8ehL>(ay`(!XBamy6YLi-t^LO|F zQHq8(CLA>~oZ&d7>Hg0hjZ6e* zms#{H2Lpc4*`ZPgDdeQ0}p(5;g1&$Q{@wZk21bmi=rri{69ZM~E*-hw? zpGl;#TR+BIVJ#y211_fBCdzJ7D7`0fi$B$Rp>#(FN)=t8?-I*yhLk@!6Qg~%Y}e;t zd&H4e-r*Bu`dz6*OktUE4%o~0?zMvgJ&0Q*OM9nmGa0+~5mNWA5W4-h%{;#EmGI|b zXsL{OBKGo1I~J+Z_6{uefNb~9^stp4!OvuRhqsB%48`AqF7P6@J_uQjZHgGtGYywN zn)R?+9V#1pqvv?&@l7@xAS*2Hf19rFn2o-4qp(}W;5Qp$QF~u1Kx5K^T-lhND(+Sy z#&WsE4-`dwhI!0?Qa2EKyr?)xFxCTthv zS3lAJjph(z`v*@p5pe%X*u7sh#BU@lx%HIyNQBnALOwwkY3iMw4c}B~Vg}Kgk*ne# z1hzjN!qyC>AeAJ3!PB~qaZ=^#&Z32Y=a7wkUN0zRX8MFyeJ4%)Gk@?|PU;DvdT$J$U{;(l1sXbt;8qx0aftI=0BurOH+_luM;w8UNGA1>jod0?n0H=a7qvC~z60hVPw$@O=I}?OA?NhqS zJr%>i*X&szbvbyGR_JLX?p%lT)BVQB!P`0*c&ng2J}($UVGP6)szHg*uSb+$3m zcW@VQ)pz)}91+)}?J_SI7#K2`tP2>aDA?e8zSGBECUXMy$DY6Ruj7Ht%mu=d_qmS5 zfqco~yMcTq7cewYFu#g{I^mDZfexo%UxgJ52S}n|y?YBtN{5j!`B=OVLA@j*Y3`>> z>H3i{TD>ntBBNzCX&hydFi2|Dv%uzthDLfudM4k#i{@rkz*zl21Lle~GT8ZRD+LY# zH5rr`4G62PfY*QDTG9Wn!28z_g^f-0ovr@1G6M)MiMSZsI0-tNTLC!J|Ffx#`Z*48 zqDJ9KNI~A8lh;7f$o(#zfl^9AX~i#YzLy`$Ek8G$UmnIJg9FxHmvtjiq4o_)5aRGJ zFKS$>pRAREhiOd56P=C#qWb#>3vf*g4=yAzK=ZONPG9XlAP;ry{-c(pxG$oD)X!){ zx-~O2R=-n=!%_$9fii^J0m}(^S&#mLI=>Hk`FDgPZ=AwBKAg6#S$Ri57!^1n{WFH7 z$#nuYM*&(lJ;ik7{e+vdqYZ)m(n#FuIaqR>sW`rKIZH;KT@ZF-Eh;cu?O5(hZ_gt; zO+2+K7M9H{?~vCR&1h%d;*%$`snZD*M@>Kvyi!uW-@1KL+b_F!6$)&yD{_)WGkljs zyXhVi>P(e(YmyDNn;%oP?VGZy0i&_HN~aIQePI}N)wrh~HV>q%j)E^NK0g2w)>UL{ z2{FGAac;Yajf+mXw6bq{l)Y$a9|^C#at+fRJCEgn#Bkcet#p2kw2wh5*Yb$H638?O zGv9$@Q3v0BZJ`9r-f$x>5W@>09!WXv!=K{TdMRwSV#VD-Xa=qin266ouzy0bzE%2! zJjr}W@!1hAzC4mb0`$^iqI?8ag=P)atuh2(IKuAF;Kmt-T z3mP}`BXn`pq>kFkW&eT}BSwR!$)fVH^zpZ8tvn4>AV{o7Bh*s%Q%wp_MQ1rrL&@XG z^Om%UYkj@`I%)T&`~6Yp_18!D*Vo5(#;@JY;T#Zv8t%9}NDNeUu)!Y`CF>cs(@F&% zG#j20*8oIG${U){0m9LFR5vWs$HaV)BFZu5L?Oi)47UEtFf*U^;H3e*Lg(7q>XQ6X zJ#L)nfj%i#%-Vx|CE_*H)WEPhRb~tSQwf39<*IzaUi(uUp$R7;s(!>$B&BjxW;U?l zX^1-{>*>K}H@P`_Ohpv4Sr}o^;p3hb3>)<15_5JWZ_pwS-EGFr4f2k(_|d+zLJo#k za#*o~>z zQCLU$Zj?(U8AAINu4FQjgsaK9BK=kkNSXeWrLJLO#s0}5j?9_F)Jd~*j6`uW{lVc) z6$ABFp}qIanURX*`oTDmpo-YjuE@gj>2%~QVPmHqXKjsAXGb#0$7uYc9!2Hn>!6WA>z5 z)%jBW>g<}mv&fJ_+nAXQ&z#bVDf!b5w+cP9^EEif@(Fe-8ZzTh)|}M?iHpkh(*xnhGBu*do`F7m-h5{CQ-T|Y3w;p2^ZNa)(N+S)2sGOP7*X>+lFgE3hv z8skqwAG54s9#3Wgijgs?kym^D2XRuYM=nWwf?9=w%QF!OX=oZHSV!cgmM2>{62VJ! z^WfO*)dEwNvC>#ZK??FtT`;wIdHB*FNKlr1w~8vybxOeytfecZ1W-^MgU*RSEH+Of z1|CXFni^q-@`Hm|(()O8qXC1?W31;`B z2445`_ZeYR#^H{37=p9CvRb-m*PXShjt%x)EGK z^X2of!yyt0ic1ZKeMKxAHFg}aXV7w)WBLVZ^*`p@_K z7{}UdhQc6oL|9$ExRKAKemUCQS(S#+c=I)iC@Jwq+}bU!mKZ~LmiX9)rFzBCfF^X_ z@J`An^A);Ss(B&K0uO(V;-49Zuispwfj3<5maW%uQRNO`#PZS(0J47ua!{qPdI?H1uF3`Dr~Gcro~=59j7d7biR_ zVNQK3=vyhUA(_8qdlH)AR3MHc4HTItsmp}*PkmvYw$aL`1%HcBP31flVfNX!iCS8% zm;^0W3DJA{QIQ6swge9|B6vs_poS0lXlM+cH+r{;ffD>G>ANPmFkaRGu>NYNuvfy2 zY(Eg=-*AKA9_q<+!zQTJrGJn=oYf-?+E69^Dgs+>0;R3n_)qcrxsAbKN)ZtXGTqst)CVziAvz5j1XDDN z6Zv^$w?zE;V()^0U-!b?!|qn?02-&--geJ|v0-$J-_bWy$^#f)% zka3*XQHstrLB~hkHTPad_?Fs#ogg3Hni|f^-Ji{A$=MZ12V6%oL0^=&`b z+9r+|A`$c{x7Zv-5ts`X+W2l1lekNsP_9vwbHtcE@Hnhshsuore(M3JgJeK69sIid zPx?T4|ML9~ZbE=7>#e{0O_v`o>lWLK^iE~%EhWj8H%_@8%tFF<4_bJ6Dkgp^+!;wP zXgZ|3bt}!f@a*fFM{S7SI{464Rcwkxb*ZKj8lD5Rs$_u?J~(S@7}9{1v}z!?h?(S_$$9qA2)a)wtJ zHpRa`N(I@0n}>QUd*O%6C_TInN?#2r`cr?8TSY8JgL612jVL1&5dkw}jnivfVpO_h zrbY3vI*Grd4VQ2G9W|;Kibq3QeotqP%MBw&SEe-l{))@wy&h@SIT1)559VWljkDL; zQ#Q>oVi{4sTSkp&U;s&5v^#G0#{sKT5DQ*SFZ$MF*p0`VGAd$edgX41a!wp)==%Ba z{*5xdJN>A6=A9+v@$J}c@aYZRWeH}Lp~9LoFx;1r>1e^a6w1hGrmU}&t^dUNy(=5~ z3JhnGZP-cL61Sj{BTUJXY_1%sy!2ZMwH6>SE7umi5}aUm_1mhN=|Pd7lTAY z_K@J5hEkw4Ib;oz>y-oJI@HjlygfAXnM$}PwrtNO=79<4fytigALn#Q9=<(O>InZk z>Nv?i2PEklzxCAb2}-lA$!)yvoSr~lkaiwDD|xG3br{}MYNl1WivHx2$usL(^Fmav z?yK@NzSWFf^MwBF);NE50Z)JL>Qv*}ZpOwxDCAWDm`8i7F=8b)exD23|C2T@N5;2F zND_GLouj)MlNypuC%sKJ_z`ycHo4bD`^%@SrCM@ZcUM58ws8T}i%7+ut=|K_cwdLo zg)iXt2lR$Y!#)A^(L1fxFo#>C(z&1wRphYyj63tBq1Cfuvr&wL|-JPXn$nJ4X#3wKtnJdAipbFNR_lLYFUp zxRFL?ubU;tAh$o%oLYVx){8CBD?PFDF^xM+XqL zm-KjrNSK0ODO!_#g>t+WM~Ob8VYa*5@uB|EMT7BQf9GSVzUUI;P~X0JWBpS;_Ww;e z{{j)6%x!J{PB#Ca;vm*>ld=K)C_&fO%9I+F8egoWS7)NEo3(wCu#kd+fOwGV7YQMY z!6g4?L+kYof6T3n0D&bz-HYtmXo~0l}{5WxG76Ygsj)@S{{__6_O|lQBlg0 zC3}eMdt@t;wMYvg3h6gji_hn}?(=;A`_1#Zy*z)+`*Y^Ze9mX)%$YMD|LD;e1YT)( zD@!w-z54CV=Cb(b4Q7-999>mZcXwF^A|qb&KNHmxH;!|<=6*a~T1R_4dv0ju&4W){ zZpRou)^M3UbLGzSF_qeL@td!>%;+fJh`H8xN8i2o{jETXayy;P{UnKA8MXa2KbWVG zQ-xkiWxXT5N{azxtXWU?7-Q52ackdU)J;(q;B>ABzm3SIS8hCM`qqCs;6dnXA#?g5MW z3B_A(^xTBtM)Z$6#EnLGfh3zxvH8g^EPHNptr%5NL;ys9@S1xfeiZ;(PKdHWSXEN6>n7HyerP_@bm$xb878$vqs% zx{)4~N|lY$_gTI`8D>v+%Xlv56YJ%Z?q^@0vYamHD~`PFf&P|gDwydkvy&-%_ss)e z`x@;XG$(!{m{&dyEl@sS-yViJ-|-%7t*8qr1y zizdib7z#7aWjPhFwpia+UuiwZz&CcoxWU0mCg^MujmgkE45|mG89a);K zu^UQsiIGknb@Vx>PwE8y_JCu3EEtwol9$8^>96ZOb-mdzjaDH^Z7}qIe=w8OS@+xK zvhwN*tr}}H*^t|)7k;-d`TCccXs#*gK5@5u(yU;9byra+DZZf= z4EPzMksI*cQ=CU)U49E)EuA1RARf&t7tC7~?9UP|Z@3@I77<$bB^gbNF3T=fqpEr$ z60s~pyMpk$?&_%;hLOaKRZ0w=n{5sHzPR#@+|Lch+c(#TEn^1rxIY`E3=h4F z!!{m7^-xRiRZNN1A**LQ;N22S-s5$7ieZ*(3;Lc5**F&UejoRsdBcIW;e}W(Qod)W zt5H1djm=8s$)9!$AMMpYy}g@~OQ)_=nN?$6O#M28ZG4)ih{>P|RY*kW2Xg&7(t_vA zqlOvUZV|!-8r{vp1uY>e>1l4Wrjx?mL#}h!gx7h&N+B2b{%EOVe_V9){zS*1R`-1w zmV~D#LT+^4V=K<;{ZVS?L0?y8{ZCe8LZ%I+Tl$O^`E;+pPrBCuHNk)2tdZB7*%0qboXP4`p&$Z(6kGv+j z8Mxjq|4IV?zuV=(n&glFKkf45T>iwr+hur4eB2QIohXN}{fb#l$5Q{iFhnKIc$2t4 z_5tI4PR<+L8o9>#j63XXSnDqyRiO&Lca2|u?_z`!hl_h1$YudDvYb3)6fHYK_Hq8B zLdEQ0o=slGdB303h}}7;00en8GE8Mh7lZ9S=CUcGJ!{C0q0!Bz3-}r9n&l zgQesohIyv^e4G5hJ7b$nQY%U%E!i9S82Q-Q9kwh|Z)&M!j~B6c5veWmSx7y9kQZrN z#h&rRGU?3LW?gT^{drfp(Jnk-|DFe}dYt}Zs*!{x8vQZ8Hko_rjcQ(@ zFyWcNgDf*;MH7@4B8;Ezl3Af+u0B0)?g-l#lSM7&{pHsA)2{}T7EGLzqI&k2mir9^ zE>nMU??1D1$}r-?6|vn#?ZM_LD`GT06q|PFSU$T^v4<--SkwL{H`0yEcY0fm_&*n$ zr98)Um4|6fEDd*m4J;Pxbk;3eB9l?3Ymc1LMoeEA9rRt~9ZBiCzdRg`~(u(%~)a)Zy7SJ?CTRNfvI^ zN9n2#4z-0OHqNk{pL~+(krdLlQ$>#9C0Y4?!O@-9)u(UjExJVLo;ExhxUK0P<$Vqc z7vJ_Qih`}f4mZSDQ;rCqD&i;cX`m}(VlM>$nn>%UE9sd1dN@t0>`^bCxYbs z*&ocfdWA7-eJAguFaFT?)#S^=7RJ36UlR-a?QVIH%AaadDZ}P73{#bLpJ3cIt)Rnk z$&i20otO4yBN;25O{2}n>H!f8d99c)W-SKnq6IDe>ZO&1g$+z?QVGpHZxn`VE-;)L zlIAU`q!Fk}n<}SF*5wfDEUFn2FMi5Y=iX5?K5;E!al#>PVL)0%j4UM^DeTNPazYaxOQISwSl8 zpXKx+2ieG_klyW`%^V{=)3ZNo>hv#ZYKdes$EEF^netj1IWPQ$A@smdGn;+ntK?)R zEILcyMa1}LP|fa`<1-e*jn{-GXqX&Y-|fn!FM7wwBV42SBO@~0vqK{Q*`9Q_Yj-eR zzRaea&i7)M_Y4I6I3nP}sk!MQmey=>HFwGr){`XjU>+DkbefAZr)V{OWl_x|hr}Jt z!Pb_zGuB4T&B0pQPJH!WQFEYR7iUK?FGq(019W)51hRa|khQZj@X<%v>-3DGXQ>@D zZ8D!)s8a6goXOVy|&5YO_iZj z$Y{2jv8Xs)!5=9Y7{RE1*xlh}fe}CDql8Teru`j?)inH+@zf^+KP}MQwAp>CR+5FK zmSX<H=Fwy?OS9JT19wBA7=}-=&p>)cW_1H!!v_ioa8*@@Zhm>B_7;xV`eN zp7=#h4KHgs*EusbzkoZmj#};D_REY*+hxVZbu>@AVnkHcMH~b6=Qpw$GJfjG_+Io4 zv&8Ov@C|lzK#=492VojLFEvws1k`0k-d91U8mAn`G8Vso=-GyRdHFy%BaO^>N_wf? zTej3JQ;8DfeDAhkW-knCs=j-==k{ zq;6J|N7|1ni3b-c7;t@~NGaVjUGqIjIB~}pYSnfDrx!(^g*qbn$XMq>h6WT+341=Q zWXD+3?)S_cEY_)b5SXd;1NG>w1lwVwvs^N9XDM~~8?kdQdP=;<`?MuI`Wt7c(-h3q zqD+k}vT5tD>#}~RKXEa+s7j3eLuE|6kB3JOLSOFIWVMrma#3Pb|EVQ=r4Ccvv}d5_ zk@6uh<-zSZZ~Ug?0^|SV=ybt4yI5h}F;;F`BEpT@TH0b?v~xwcMcGe+cZtGl2@v7<3y~?g81s*2@ej5vfTNVBl$P=WA0bU3< zaRqTlXG;$UD^ZM-wezoqlR+d}msAoNV%q$?k~2vD8CkKM>d6VZ6DN$+x^HoAr6E^W z;ghm|{SA}ThBbtZQ^z_BhhgOg9+2!z$M!bL3*eJWg2Ik4J)E+HfK^lh-G55el=KGMYM z(!%O)1uFZir~x-Y+=oB6zDHEZAD`m;J$KDe+0E#)4Y+z;Q+9 zHKD*?mw^RdV?n*C_>3JOBg5P62t@8akkJ&zum*bVTV!)FlfF=I|Y{|SIm1+S_QtUX>J7JUu%%gzvpm?A7; z%XQDgSkOvcASEvhTeN>Yb~W@0gtoi6n>)GxK0Ac3c&5=!^RDvA`rD|>$zH1-m6!x9yQ1O_Lt+!8NIS8LlVfz z0lszMrGk`R_4Pb$pjQXNDy$2)AIJoPNvsp>m^J|r#EVUs_rp8Gs|g6E21c$3L@Z#F zkF4jx&oTdT!Vo6YwKSbC1>bQ=(9`@dva24H{O4;!=(0_FV8A z54{{1KGroH{{=|&WryC3i(754m9H*@xT*v%8pf9vdiyC{r$@1bs08oW#fL&K(u9xJ zDtAKAjjvI~Cqu7#gDbbilMwlrAzptiCq5T?83J6n!RH8a30@nAPlcu$=UVG8@Yj=2 zLe#(M#R(5pXj*KzkdHwG?V7p&^Q!*E5gk|1%*A{#Aq}o>KwW!P2@48M5evSHkV18B z10?YT$oPtdCKiQ{+L;)F4t+Xo-DJ8lw$ZkVAIpA5}O0q;1*L_%b^{ocsX zK+mz`@>#5vsPz9rB|0jgr;2ea{JDOch{2~0Ba!t(Ly(0~*8P)M)(?{vcIfCgB=)zQ(4 z&)zVA1z%ujtO6Xm@A-P@29X;0P-v6@+zP9=6N3Jk)IowC@R88%Ww@N&9mJ5ZU;}(8 zbZZp;a_#NJhyL4V0=l^f7qGi)9cy*Q_1k1YLtgL&gl<>BX9$Dc_)ucIl=xhz>mF{y zM_%D`H{SZdr$Sw>aB0Q**ZEt3S=?w70G|u>oWjj_%fNr;uDNgV@lY=#e6*Upf#4y| bxOL9hqgtE5N*sYu0skcehu+me-2eR_v{?`y literal 0 HcmV?d00001 diff --git a/java/src/Makefile.am b/java/src/Makefile.am index 36fca3a81c3..aaa40525285 100644 --- a/java/src/Makefile.am +++ b/java/src/Makefile.am @@ -36,7 +36,7 @@ jarfile = jar$(PACKAGE_TARNAME)-$(PACKAGE_VERSION).jar hdf5_javadir = $(libdir) pkgpath = hdf/hdf5lib -CLASSPATH_ENV=CLASSPATH=.:$(top_srcdir)/java/lib/slf4j-api-1.7.33.jar:$$CLASSPATH +CLASSPATH_ENV=CLASSPATH=.:$(top_srcdir)/java/lib/slf4j-api-2.0.6.jar:$$CLASSPATH AM_JAVACFLAGS = $(H5_JAVACFLAGS) -deprecation diff --git a/java/src/hdf/hdf5lib/H5.java b/java/src/hdf/hdf5lib/H5.java index 0e1c0e62ab0..5a17075c1d6 100644 --- a/java/src/hdf/hdf5lib/H5.java +++ b/java/src/hdf/hdf5lib/H5.java @@ -57,6 +57,9 @@ import hdf.hdf5lib.structs.H5O_native_info_t; import hdf.hdf5lib.structs.H5O_token_t; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * @page HDF5LIB HDF5 Java API Package * This class is the Java interface for the HDF5 library. @@ -257,7 +260,7 @@ public class H5 implements java.io.Serializable { */ private static final long serialVersionUID = 6129888282117053288L; - private final static org.slf4j.Logger log = org.slf4j.LoggerFactory.getLogger(H5.class); + private final static Logger log = LoggerFactory.getLogger(H5.class); /** * @ingroup JH5 diff --git a/java/test/Makefile.am b/java/test/Makefile.am index 4a6785d29d5..9f39be9ca1c 100644 --- a/java/test/Makefile.am +++ b/java/test/Makefile.am @@ -27,7 +27,7 @@ classes: pkgpath = test hdfjarfile = jar$(PACKAGE_TARNAME)-$(PACKAGE_VERSION).jar -CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/junit.jar:$(top_srcdir)/java/lib/hamcrest-core.jar:$(top_srcdir)/java/lib/slf4j-api-1.7.33.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-1.7.33.jar:$$CLASSPATH +CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/junit.jar:$(top_srcdir)/java/lib/hamcrest-core.jar:$(top_srcdir)/java/lib/slf4j-api-2.0.6.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-2.0.6.jar:$$CLASSPATH jarfile = jar$(PACKAGE_TARNAME)test.jar diff --git a/java/test/junit.sh.in b/java/test/junit.sh.in index 4e5152b0e75..0690081386a 100644 --- a/java/test/junit.sh.in +++ b/java/test/junit.sh.in @@ -67,8 +67,8 @@ $top_builddir/java/src/jni/.libs/libhdf5_java.* LIST_JAR_TESTFILES=" $HDFLIB_HOME/hamcrest-core.jar $HDFLIB_HOME/junit.jar -$HDFLIB_HOME/slf4j-api-1.7.33.jar -$HDFLIB_HOME/ext/slf4j-simple-1.7.33.jar +$HDFLIB_HOME/slf4j-api-2.0.6.jar +$HDFLIB_HOME/ext/slf4j-simple-2.0.6.jar " LIST_JAR_FILES=" $top_builddir/java/src/$JARFILE @@ -304,7 +304,7 @@ JAVAEXEFLAGS=@H5_JAVAFLAGS@ COPY_LIBFILES_TO_BLDLIBDIR COPY_DATAFILES_TO_BLDDIR -CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/junit.jar:"$BLDLIBDIR"/hamcrest-core.jar:"$BLDLIBDIR"/slf4j-api-1.7.33.jar:"$BLDLIBDIR"/slf4j-simple-1.7.33.jar:"$TESTJARFILE"" +CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/junit.jar:"$BLDLIBDIR"/hamcrest-core.jar:"$BLDLIBDIR"/slf4j-api-2.0.6.jar:"$BLDLIBDIR"/slf4j-simple-2.0.6.jar:"$TESTJARFILE"" TEST=/usr/bin/test if [ ! -x /usr/bin/test ] From f2906fc4d8817994047fafd171c50798c95de4bb Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Mon, 3 Apr 2023 16:38:10 -0500 Subject: [PATCH 095/231] Minor fixes for CMake code and install docs (#2623) --- bin/release | 10 ++-- c++/examples/CMakeTests.cmake | 4 +- config/cmake/scripts/HDF5config.cmake | 30 ++++++++---- config/cmake/scripts/HDF5options.cmake | 2 +- hl/tools/h5watch/CMakeTests.cmake | 2 +- release_docs/INSTALL | 6 ++- release_docs/INSTALL_CMake.txt | 66 +++++++++++++------------- release_docs/INSTALL_Cygwin.txt | 8 ++-- release_docs/README_HDF5_CMake | 6 +-- release_docs/README_HPC | 6 +-- release_docs/USING_CMake_Examples.txt | 2 +- release_docs/USING_HDF5_CMake.txt | 17 ++++--- release_docs/USING_HDF5_VS.txt | 4 +- 13 files changed, 90 insertions(+), 73 deletions(-) diff --git a/bin/release b/bin/release index ca9a47ce3c3..31181f9bcfd 100755 --- a/bin/release +++ b/bin/release @@ -193,19 +193,19 @@ tar2cmakezip() fi # step 2: add batch file for building CMake on window - (cd $cmziptmpsubdir; echo "ctest -S HDF5config.cmake,BUILD_GENERATOR=VS2013 -C Release -V -O hdf5.log" > build-VS2013-32.bat; chmod 755 build-VS2013-32.bat) - (cd $cmziptmpsubdir; echo "ctest -S HDF5config.cmake,BUILD_GENERATOR=VS201364 -C Release -V -O hdf5.log" > build-VS2013-64.bat; chmod 755 build-VS2013-64.bat) (cd $cmziptmpsubdir; echo "ctest -S HDF5config.cmake,BUILD_GENERATOR=VS2015 -C Release -V -O hdf5.log" > build-VS2015-32.bat; chmod 755 build-VS2015-32.bat) (cd $cmziptmpsubdir; echo "ctest -S HDF5config.cmake,BUILD_GENERATOR=VS201564 -C Release -V -O hdf5.log" > build-VS2015-64.bat; chmod 755 build-VS2015-64.bat) (cd $cmziptmpsubdir; echo "ctest -S HDF5config.cmake,BUILD_GENERATOR=VS2017 -C Release -V -O hdf5.log" > build-VS2017-32.bat; chmod 755 build-VS2017-32.bat) (cd $cmziptmpsubdir; echo "ctest -S HDF5config.cmake,BUILD_GENERATOR=VS201764 -C Release -V -O hdf5.log" > build-VS2017-64.bat; chmod 755 build-VS2017-64.bat) (cd $cmziptmpsubdir; echo "ctest -S HDF5config.cmake,BUILD_GENERATOR=VS2019 -C Release -V -O hdf5.log" > build-VS2019-32.bat; chmod 755 build-VS2019-32.bat) (cd $cmziptmpsubdir; echo "ctest -S HDF5config.cmake,BUILD_GENERATOR=VS201964 -C Release -V -O hdf5.log" > build-VS2019-64.bat; chmod 755 build-VS2019-64.bat) + (cd $cmziptmpsubdir; echo "ctest -S HDF5config.cmake,BUILD_GENERATOR=VS2022 -C Release -V -O hdf5.log" > build-VS2022-32.bat; chmod 755 build-VS2022-32.bat) + (cd $cmziptmpsubdir; echo "ctest -S HDF5config.cmake,BUILD_GENERATOR=VS202264 -C Release -V -O hdf5.log" > build-VS2022-64.bat; chmod 755 build-VS2022-64.bat) # step 3: add LIBAEC.tar.gz, ZLib.tar.gz and cmake files cp /mnt/scr1/pre-release/hdf5/CMake/LIBAEC.tar.gz $cmziptmpsubdir cp /mnt/scr1/pre-release/hdf5/CMake/ZLib.tar.gz $cmziptmpsubdir - cp /mnt/scr1/pre-release/hdf5/CMake/HDF5Examples-2.0.2-Source.zip $cmziptmpsubdir + cp /mnt/scr1/pre-release/hdf5/CMake/HDF5Examples-2.0.3-Source.zip $cmziptmpsubdir cp /mnt/scr1/pre-release/hdf5/CMake/hdf5_plugins-master.zip $cmziptmpsubdir cp $cmziptmpsubdir/$version/config/cmake/scripts/CTestScript.cmake $cmziptmpsubdir cp $cmziptmpsubdir/$version/config/cmake/scripts/HDF5config.cmake $cmziptmpsubdir @@ -297,7 +297,7 @@ tar2cmaketgz() # step 3: add LIBAEC.tar.gz, ZLib.tar.gz and cmake files cp /mnt/scr1/pre-release/hdf5/CMake/LIBAEC.tar.gz $cmgztmpsubdir cp /mnt/scr1/pre-release/hdf5/CMake/ZLib.tar.gz $cmgztmpsubdir - cp /mnt/scr1/pre-release/hdf5/CMake/HDF5Examples-2.0.2-Source.tar.gz $cmgztmpsubdir + cp /mnt/scr1/pre-release/hdf5/CMake/HDF5Examples-2.0.3-Source.tar.gz $cmgztmpsubdir cp /mnt/scr1/pre-release/hdf5/CMake/hdf5_plugins-master.tar.gz $cmgztmpsubdir cp $cmgztmpsubdir/$version/config/cmake/scripts/CTestScript.cmake $cmgztmpsubdir cp $cmgztmpsubdir/$version/config/cmake/scripts/HDF5config.cmake $cmgztmpsubdir @@ -376,7 +376,7 @@ tar2hpccmaketgz() # step 3: add LIBAEC.tar.gz, ZLib.tar.gz and cmake files cp /mnt/scr1/pre-release/hdf5/CMake/LIBAEC.tar.gz $cmgztmpsubdir cp /mnt/scr1/pre-release/hdf5/CMake/ZLib.tar.gz $cmgztmpsubdir - cp /mnt/scr1/pre-release/hdf5/CMake/HDF5Examples-2.0.2-Source.tar.gz $cmgztmpsubdir + cp /mnt/scr1/pre-release/hdf5/CMake/HDF5Examples-2.0.3-Source.tar.gz $cmgztmpsubdir cp /mnt/scr1/pre-release/hdf5/CMake/hdf5_plugins-master.tar.gz $cmgztmpsubdir cp $cmgztmpsubdir/$version/config/cmake/scripts/CTestScript.cmake $cmgztmpsubdir cp $cmgztmpsubdir/$version/config/cmake/scripts/HDF5config.cmake $cmgztmpsubdir diff --git a/c++/examples/CMakeTests.cmake b/c++/examples/CMakeTests.cmake index f5b2a88e741..f710204a948 100644 --- a/c++/examples/CMakeTests.cmake +++ b/c++/examples/CMakeTests.cmake @@ -104,8 +104,8 @@ foreach (example ${tutr_examples}) -D "TEST_ARGS:STRING=" -D "TEST_EXPECT=0" -D "TEST_SKIP_COMPARE=TRUE" - -D "TEST_OUTPUT=tutr_cpp_ex_${example}.txt" - #-D "TEST_REFERENCE=cpp_ex_tutr_${example}.out" + -D "TEST_OUTPUT=cpp_ex_${example}.txt" + #-D "TEST_REFERENCE=cpp_ex_${example}.out" -D "TEST_FOLDER=${PROJECT_BINARY_DIR}" -P "${HDF_RESOURCES_DIR}/runTest.cmake" ) diff --git a/config/cmake/scripts/HDF5config.cmake b/config/cmake/scripts/HDF5config.cmake index 2fbb26cdd7b..8f7a77d8138 100644 --- a/config/cmake/scripts/HDF5config.cmake +++ b/config/cmake/scripts/HDF5config.cmake @@ -11,7 +11,7 @@ # ############################################################################################# ### ${CTEST_SCRIPT_ARG} is of the form OPTION=VALUE ### -### BUILD_GENERATOR required [Unix, VS2019, VS201964, VS2017, VS201764, VS2015, VS201564] ### +### BUILD_GENERATOR required [Unix, VS2022, VS2019, VS201964, VS2017, VS201764, VS2015, VS201564] ### ### ctest -S HDF5config.cmake,BUILD_GENERATOR=VS201764 -C Release -VV -O hdf5.log ### ############################################################################################# @@ -23,14 +23,14 @@ cmake_minimum_required (VERSION 3.18) # BUILD_GENERATOR - The cmake build generator: # MinGW * MinGW Makefiles # Unix * Unix Makefiles +# VS2022 * Visual Studio 17 2022 +# VS202264 * Visual Studio 17 2022 # VS2019 * Visual Studio 16 2019 # VS201964 * Visual Studio 16 2019 # VS2017 * Visual Studio 15 2017 # VS201764 * Visual Studio 15 2017 Win64 # VS2015 * Visual Studio 14 2015 # VS201564 * Visual Studio 14 2015 Win64 -# VS2013 * Visual Studio 12 2013 -# VS201364 * Visual Studio 12 2013 Win64 # # INSTALLDIR - root folder where hdf5 is installed # CTEST_CONFIGURATION_TYPE - Release, Debug, etc @@ -43,9 +43,9 @@ set (CTEST_SOURCE_VERSEXT "") ############################################################################## # handle input parameters to script. #BUILD_GENERATOR - which CMake generator to use, required -#INSTALLDIR - HDF5-1.13.x root folder +#INSTALLDIR - HDF5-1.15.x root folder #CTEST_CONFIGURATION_TYPE - Release, Debug, RelWithDebInfo -#CTEST_SOURCE_NAME - name of source folder; HDF5-1.13.x +#CTEST_SOURCE_NAME - name of source folder; HDF5-1.15.x #MODEL - CDash group name #HPC - run alternate configurations for HPC machines; sbatch, bsub, raybsub, qsub #MPI - enable MPI @@ -68,7 +68,7 @@ endif () # build generator must be defined if (NOT DEFINED BUILD_GENERATOR) - message (FATAL_ERROR "BUILD_GENERATOR must be defined - Unix, VS2019, VS201964, VS2017, VS201764, VS2015, VS201564") + message (FATAL_ERROR "BUILD_GENERATOR must be defined - Unix, VS2022, VS2019, VS201964, VS2017, VS201764, VS2015, VS201564") endif () ################################################################### @@ -105,12 +105,24 @@ endif () ######### Following describes compiler ############ if (NOT DEFINED HPC) if (NOT DEFINED BUILD_GENERATOR) - message (FATAL_ERROR "BUILD_GENERATOR must be defined - Unix, VS2019, VS201964, VS2017, VS201764, VS2015, VS201564") + message (FATAL_ERROR "BUILD_GENERATOR must be defined - Unix, VS2022, VS2019, VS201964, VS2017, VS201764, VS2015, VS201564") endif () if (WIN32 AND NOT MINGW) set (SITE_OS_NAME "Windows") set (SITE_OS_VERSION "WIN10") - if (BUILD_GENERATOR STREQUAL "VS201964") + if (BUILD_GENERATOR STREQUAL "VS202264") + set (CTEST_CMAKE_GENERATOR "Visual Studio 17 2022") + set (CMAKE_GENERATOR_ARCHITECTURE "x64") + set (SITE_OS_BITS "64") + set (SITE_COMPILER_NAME "vs2022") + set (SITE_COMPILER_VERSION "17") + elseif (BUILD_GENERATOR STREQUAL "VS2022") + set (CTEST_CMAKE_GENERATOR "Visual Studio 17 2022") + set (CMAKE_GENERATOR_ARCHITECTURE "Win32") + set (SITE_OS_BITS "32") + set (SITE_COMPILER_NAME "vs2022") + set (SITE_COMPILER_VERSION "17") + elseif (BUILD_GENERATOR STREQUAL "VS201964") set (CTEST_CMAKE_GENERATOR "Visual Studio 16 2019") set (CMAKE_GENERATOR_ARCHITECTURE "x64") set (SITE_OS_BITS "64") @@ -163,7 +175,7 @@ if (NOT DEFINED HPC) set (SITE_COMPILER_NAME "vs2012") set (SITE_COMPILER_VERSION "11") else () - message (FATAL_ERROR "Invalid BUILD_GENERATOR must be - Unix, VS2019, VS201964, VS2017, VS201764, VS2015, VS201564") + message (FATAL_ERROR "Invalid BUILD_GENERATOR must be - Unix, VS2022, VS2019, VS201964, VS2017, VS201764, VS2015, VS201564") endif () ## Set the following to unique id your computer ## set (CTEST_SITE "WIN7${BUILD_GENERATOR}.XXXX") diff --git a/config/cmake/scripts/HDF5options.cmake b/config/cmake/scripts/HDF5options.cmake index 7a5d3276f5b..92bfd37ecbe 100644 --- a/config/cmake/scripts/HDF5options.cmake +++ b/config/cmake/scripts/HDF5options.cmake @@ -74,7 +74,7 @@ set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ALLOW_EXTERNAL_SUPPORT:STRIN #set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_ENCODING:BOOL=OFF") #### package examples #### -#set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_PACK_EXAMPLES:BOOL=ON -DHDF5_EXAMPLES_COMPRESSED:STRING=HDF5Examples-2.0.1-Source.tar.gz -DHDF5_EXAMPLES_COMPRESSED_DIR:PATH=${CTEST_SCRIPT_DIRECTORY}") +#set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_PACK_EXAMPLES:BOOL=ON -DHDF5_EXAMPLES_COMPRESSED:STRING=HDF5Examples-2.0.3-Source.tar.gz -DHDF5_EXAMPLES_COMPRESSED_DIR:PATH=${CTEST_SCRIPT_DIRECTORY}") ############################################################################################# ### enable parallel builds diff --git a/hl/tools/h5watch/CMakeTests.cmake b/hl/tools/h5watch/CMakeTests.cmake index fa3cb1454b7..a7828b30fa6 100644 --- a/hl/tools/h5watch/CMakeTests.cmake +++ b/hl/tools/h5watch/CMakeTests.cmake @@ -86,7 +86,7 @@ add_custom_target(H5WATCH_files ALL COMMENT "Copying files needed by H5WATCH tes DEPENDS ${last_test} FIXTURES_REQUIRED gen_test_watch ) - set (last_test "H5WATCH_ARGS-h5watch-${resultfile}s") + set (last_test "H5WATCH_ARGS-h5watch-${resultfile}") endif () endmacro () diff --git a/release_docs/INSTALL b/release_docs/INSTALL index 177f3b60873..dd5ea34435c 100644 --- a/release_docs/INSTALL +++ b/release_docs/INSTALL @@ -417,7 +417,9 @@ CONTENTS 4.3.11. Backward compatibility - The 1.13 version of the HDF5 library can be configured to operate + The 1.15 version of the HDF5 library can be configured to operate + identically to the v1.14 library with the + --with-default-api-version=v114 identically to the v1.12 library with the --with-default-api-version=v112 configure flag, or identically to the v1.10 library with the @@ -427,7 +429,7 @@ CONTENTS configure flag, or identically to the v1.6 library with the --with-default-api-version=v16 configure flag. This allows existing code to be compiled with the - v1.13 library without requiring immediate changes to the application + v1.15 library without requiring immediate changes to the application source code. For additional configuration options and other details, see "API Compatibility Macros": diff --git a/release_docs/INSTALL_CMake.txt b/release_docs/INSTALL_CMake.txt index b5521f8f6f7..d9eb97d80f3 100644 --- a/release_docs/INSTALL_CMake.txt +++ b/release_docs/INSTALL_CMake.txt @@ -27,11 +27,11 @@ Obtaining HDF5 source code 2. Obtain compressed (*.tar or *.zip) HDF5 source from https://portal.hdfgroup.org/display/support/Building+HDF5+with+CMake and put it in "myhdfstuff". - Uncompress the file. There should be a hdf5-1.13."X" folder. + Uncompress the file. There should be a hdf5-1.15."X" folder. CMake version 1. We suggest you obtain the latest CMake from the Kitware web site. - The HDF5 1.13."X" product requires a minimum CMake version 3.12, + The HDF5 1.15."X" product requires a minimum CMake version 3.12, where "X" is the current HDF5 release version. If you are using VS2022, the minimum version is 3.21. @@ -54,7 +54,7 @@ The following files referenced below are available at the HDF web site: https://portal.hdfgroup.org/display/support/Building+HDF5+with+CMake Single compressed file with all the files needed, including source: - CMake-hdf5-1.13.X.zip or CMake-hdf5-1.13.X.tar.gz + CMake-hdf5-1.15.X.zip or CMake-hdf5-1.15.X.tar.gz Individual files included in the above mentioned compressed files ----------------------------------------------- @@ -83,10 +83,10 @@ To build HDF5 with the SZIP and ZLIB external libraries you will need to: 1. Change to the development directory "myhdfstuff". - 2. Download the CMake-hdf5-1.13.X.zip(.tar.gz) file to "myhdfstuff". + 2. Download the CMake-hdf5-1.15.X.zip(.tar.gz) file to "myhdfstuff". Uncompress the file. - 3. Change to the source directory "hdf5-1.13.x". + 3. Change to the source directory "hdf5-1.15.x". CTestScript.cmake file should not be modified. 4. Edit the platform configuration file, HDF5options.cmake, if you want to change @@ -96,6 +96,10 @@ To build HDF5 with the SZIP and ZLIB external libraries you will need to: 5. From the "myhdfstuff" directory execute the CTest Script with the following options: + On 32-bit Windows with Visual Studio 2022, execute: + ctest -S HDF5config.cmake,BUILD_GENERATOR=VS2022 -C Release -VV -O hdf5.log + On 64-bit Windows with Visual Studio 2022, execute: + ctest -S HDF5config.cmake,BUILD_GENERATOR=VS202264 -C Release -VV -O hdf5.log On 32-bit Windows with Visual Studio 2019, execute: ctest -S HDF5config.cmake,BUILD_GENERATOR=VS2019 -C Release -VV -O hdf5.log On 64-bit Windows with Visual Studio 2019, execute: @@ -104,17 +108,13 @@ To build HDF5 with the SZIP and ZLIB external libraries you will need to: ctest -S HDF5config.cmake,BUILD_GENERATOR=VS2017 -C Release -VV -O hdf5.log On 64-bit Windows with Visual Studio 2017, execute: ctest -S HDF5config.cmake,BUILD_GENERATOR=VS201764 -C Release -VV -O hdf5.log - On 32-bit Windows with Visual Studio 2015, execute: - ctest -S HDF5config.cmake,BUILD_GENERATOR=VS2015 -C Release -VV -O hdf5.log - On 64-bit Windows with Visual Studio 2015, execute: - ctest -S HDF5config.cmake,BUILD_GENERATOR=VS201564 -C Release -VV -O hdf5.log On Linux and Mac, execute: ctest -S HDF5config.cmake,BUILD_GENERATOR=Unix -C Release -VV -O hdf5.log The supplied build scripts are versions of the above. The command above will configure, build, test, and create an install package in the myhdfstuff folder. It will have the format: - HDF5-1.13.NN-. + HDF5-1.15.NN-. On Unix, will be "Linux". A similar .sh file will also be created. On Windows, will be "win64" or "win32". If you have an @@ -135,13 +135,13 @@ To build HDF5 with the SZIP and ZLIB external libraries you will need to: 6. To install, "X" is the current release version On Windows (with WiX installed), execute: - HDF5-1.13."X"-win32.msi or HDF5-1.13."X"-win64.msi + HDF5-1.15."X"-win32.msi or HDF5-1.15."X"-win64.msi By default this program will install the hdf5 library into the "C:\Program Files" directory and will create the following directory structure: HDF_Group --HDF5 - ----1.13."X" + ----1.15."X" ------bin ------include ------lib @@ -149,40 +149,40 @@ To build HDF5 with the SZIP and ZLIB external libraries you will need to: On Linux, change to the install destination directory (create it if doesn't exist) and execute: - /myhdfstuff/HDF5-1.13."X"-Linux.sh + /myhdfstuff/HDF5-1.15."X"-Linux.sh After accepting the license, the script will prompt: By default the HDF5 will be installed in: - "/HDF5-1.13."X"-Linux" - Do you want to include the subdirectory HDF5-1.13."X"-Linux? + "/HDF5-1.15."X"-Linux" + Do you want to include the subdirectory HDF5-1.15."X"-Linux? Saying no will install in: "" [Yn]: Note that the script will create the following directory structure relative to the install point: HDF_Group --HDF5 - ----1.13."X" + ----1.15."X" ------bin ------include ------lib ------share - On Mac you will find HDF5-1.13."X"-Darwin.dmg in the myhdfstuff folder. Click + On Mac you will find HDF5-1.15."X"-Darwin.dmg in the myhdfstuff folder. Click on the dmg file to proceed with installation. After accepting the license, there will be a folder with the following structure: HDF_Group --HDF5 - ----1.13."X" + ----1.15."X" ------bin ------include ------lib ------share By default the installation will create the bin, include, lib and cmake - folders in the /HDF_Group/HDF5/1.13."X" + folders in the /HDF_Group/HDF5/1.15."X" The depends on the build platform; Windows will set the default to: - C:/Program Files/HDF_Group/HDF5/1.13."X" + C:/Program Files/HDF_Group/HDF5/1.15."X" Linux will set the default to: - "myhdfstuff/HDF_Group/HDF5/1.13."X" + "myhdfstuff/HDF_Group/HDF5/1.15."X" The default can be changed by adding ",INSTALLDIR=" to the "ctest -S HDF5config.cmake..." command. For example on linux: ctest -S HDF5config.cmake,INSTALLDIR=/usr/local/myhdf5,BUILD_GENERATOR=Unix -C Release -VV -O hdf5.log @@ -209,13 +209,13 @@ Notes: This short set of instructions is written for users who want to 5. Configure the C library, tools and tests with one of the following commands: On Windows 32 bit - cmake -G "Visual Studio 12 2013" -DCMAKE_BUILD_TYPE:STRING=Release -DBUILD_SHARED_LIBS:BOOL=OFF -DBUILD_TESTING:BOOL=ON -DHDF5_BUILD_TOOLS:BOOL=ON ..\hdf5-1.13."X" + cmake -G "Visual Studio 12 2013" -DCMAKE_BUILD_TYPE:STRING=Release -DBUILD_SHARED_LIBS:BOOL=OFF -DBUILD_TESTING:BOOL=ON -DHDF5_BUILD_TOOLS:BOOL=ON ..\hdf5-1.15."X" On Windows 64 bit - cmake -G "Visual Studio 12 2013 Win64" -DCMAKE_BUILD_TYPE:STRING=Release -DBUILD_SHARED_LIBS:BOOL=OFF -DBUILD_TESTING:BOOL=ON -DHDF5_BUILD_TOOLS:BOOL=ON ..\hdf5-1.13."X" + cmake -G "Visual Studio 12 2013 Win64" -DCMAKE_BUILD_TYPE:STRING=Release -DBUILD_SHARED_LIBS:BOOL=OFF -DBUILD_TESTING:BOOL=ON -DHDF5_BUILD_TOOLS:BOOL=ON ..\hdf5-1.15."X" On Linux and Mac - cmake -G "Unix Makefiles" -DCMAKE_BUILD_TYPE:STRING=Release -DBUILD_SHARED_LIBS:BOOL=OFF -DBUILD_TESTING:BOOL=ON -DHDF5_BUILD_TOOLS:BOOL=ON ../hdf5-1.13."X" + cmake -G "Unix Makefiles" -DCMAKE_BUILD_TYPE:STRING=Release -DBUILD_SHARED_LIBS:BOOL=OFF -DBUILD_TESTING:BOOL=ON -DHDF5_BUILD_TOOLS:BOOL=ON ../hdf5-1.15."X" where "X" is the current release version. @@ -230,13 +230,13 @@ Notes: This short set of instructions is written for users who want to 9. To install On Windows (with WiX installed), execute: - HDF5-1.13."X"-win32.msi or HDF5-1.13."X"-win64.msi + HDF5-1.15."X"-win32.msi or HDF5-1.15."X"-win64.msi By default this program will install the hdf5 library into the "C:\Program Files" directory and will create the following directory structure: HDF_Group --HDF5 - ----1.13."X" + ----1.15."X" ------bin ------include ------lib @@ -244,28 +244,28 @@ Notes: This short set of instructions is written for users who want to On Linux, change to the install destination directory (create if doesn't exist) and execute: - /myhdfstuff/build/HDF5-1.13."X"-Linux.sh + /myhdfstuff/build/HDF5-1.15."X"-Linux.sh After accepting the license, the script will prompt: By default the HDF5 will be installed in: - "/HDF5-1.13."X"-Linux" - Do you want to include the subdirectory HDF5-1.13."X"-Linux? + "/HDF5-1.15."X"-Linux" + Do you want to include the subdirectory HDF5-1.15."X"-Linux? Saying no will install in: "" [Yn]: Note that the script will create the following directory structure relative to the install point: HDF_Group --HDF5 - ----1.13."X" + ----1.15."X" ------bin ------include ------lib ------share - On Mac you will find HDF5-1.13."X"-Darwin.dmg in the build folder. Click + On Mac you will find HDF5-1.15."X"-Darwin.dmg in the build folder. Click on the dmg file to proceed with installation. After accepting the license, there will be a folder with the following structure: HDF_Group --HDF5 - ----1.13."X" + ----1.15."X" ------bin ------include ------lib @@ -277,7 +277,7 @@ IV. Further considerations ======================================================================== 1. We suggest you obtain the latest CMake for windows from the Kitware - web site. The HDF5 1.13."X" product requires a minimum CMake version 3.18. + web site. The HDF5 1.15."X" product requires a minimum CMake version 3.18. 2. If you plan to use Zlib or Szip: A. Download the binary packages and install them in a central location. diff --git a/release_docs/INSTALL_Cygwin.txt b/release_docs/INSTALL_Cygwin.txt index b3c8a83c2d5..afc5deb4fd3 100644 --- a/release_docs/INSTALL_Cygwin.txt +++ b/release_docs/INSTALL_Cygwin.txt @@ -98,19 +98,19 @@ Build, Test and Install HDF5 on Cygwin The HDF5 source code is distributed in a variety of formats which can be unpacked with the following commands, each of which creates - an `hdf5-1.13.x' directory. + an `hdf5-1.15.x' directory. 2.1 Non-compressed tar archive (*.tar) - $ tar xf hdf5-1.13.x.tar + $ tar xf hdf5-1.15.x.tar 2.2 Gzip'd tar archive (*.tar.gz) - $ gunzip < hdf5-1.13.x.tar.gz | tar xf - + $ gunzip < hdf5-1.15.x.tar.gz | tar xf - 2.3 Bzip'd tar archive (*.tar.bz2) - $ bunzip2 < hdf5-1.13.x.tar.bz2 | tar xf - + $ bunzip2 < hdf5-1.15.x.tar.bz2 | tar xf - 2. Setup Environment diff --git a/release_docs/README_HDF5_CMake b/release_docs/README_HDF5_CMake index 8b301125b6e..619e2513519 100644 --- a/release_docs/README_HDF5_CMake +++ b/release_docs/README_HDF5_CMake @@ -6,18 +6,18 @@ This tar file contains CTestScript.cmake HDF5config.cmake CMake scripts for building HDF5 HDF5options.cmake - hdf5-1.13.2 HDF5 1.13.2 source + hdf5-1.15.0 HDF5 1.15.0 source LIBAEC.tar.gz source for building SZIP replacement ZLib.tar.gz source for building ZLIB hdf5_plugins.tar.gz source for building compression plugins HDF5Examples-2.0.1-Source.tar.gz source for building examples For more information about building HDF5 with CMake, see USING_HDF5_CMake.txt in -hdf5-1.13.2/release_docs, or +hdf5-1.15.0/release_docs, or https://portal.hdfgroup.org/display/support/Building+HDF5+with+CMake. For more information about building HDF5 with CMake on HPC machines, including -cross compiling on Cray XC40, see README_HPC in hdf5-1.13.2/release_docs. +cross compiling on Cray XC40, see README_HPC in hdf5-1.15.0/release_docs. diff --git a/release_docs/README_HPC b/release_docs/README_HPC index 02034b4eb18..bea5902a859 100644 --- a/release_docs/README_HPC +++ b/release_docs/README_HPC @@ -154,10 +154,10 @@ If using ctest is undesirable, one can create a build directory and run the cmak configure command, for example "/projects/Mutrino/hpcsoft/cle6.0/common/cmake/3.18/bin/cmake" --C "/hdf5-1.13.3/config/cmake/cacheinit.cmake" +-C "/hdf5-1.15.x/config/cmake/cacheinit.cmake" -DCMAKE_BUILD_TYPE:STRING=Release -DHDF5_BUILD_FORTRAN:BOOL=ON -DHDF5_BUILD_JAVA:BOOL=OFF --DCMAKE_INSTALL_PREFIX:PATH=/HDF_Group/HDF5/1.13.3 +-DCMAKE_INSTALL_PREFIX:PATH=/HDF_Group/HDF5/1.15.x -DHDF5_ENABLE_Z_LIB_SUPPORT:BOOL=OFF -DHDF5_ENABLE_SZIP_SUPPORT:BOOL=OFF -DHDF5_ENABLE_PARALLEL:BOOL=ON -DHDF5_BUILD_CPP_LIB:BOOL=OFF -DHDF5_BUILD_JAVA:BOOL=OFF -DHDF5_ENABLE_THREADSAFE:BOOL=OFF @@ -168,7 +168,7 @@ configure command, for example -DLOCAL_BATCH_SCRIPT_NAME:STRING=knl_ctestS.sl -DLOCAL_BATCH_SCRIPT_PARALLEL_NAME:STRING=knl_ctestP.sl -DSITE:STRING=mutrino -DBUILDNAME:STRING=par-knl_GCC493-SHARED-Linux-4.4.156-94.61.1.16335.0.PTF.1107299-default-x86_64 -"-GUnix Makefiles" "" "/hdf5-1.13.3" +"-GUnix Makefiles" "" "/hdf5-1.15.x" followed by make and batch jobs to run tests. diff --git a/release_docs/USING_CMake_Examples.txt b/release_docs/USING_CMake_Examples.txt index 853fa4fc350..6598876beda 100644 --- a/release_docs/USING_CMake_Examples.txt +++ b/release_docs/USING_CMake_Examples.txt @@ -21,7 +21,7 @@ I. Preconditions ======================================================================== 1. We suggest you obtain the latest CMake for your platform from the Kitware - web site. The HDF5 1.13.x product requires a minimum CMake version + web site. The HDF5 1.15.x product requires a minimum CMake version of 3.18. If you are using VS2022, the minimum version is 3.21. 2. You have installed the HDF5 library built with CMake, by executing diff --git a/release_docs/USING_HDF5_CMake.txt b/release_docs/USING_HDF5_CMake.txt index e8e50df97fe..27c421b831b 100644 --- a/release_docs/USING_HDF5_CMake.txt +++ b/release_docs/USING_HDF5_CMake.txt @@ -38,7 +38,7 @@ I. Preconditions ======================================================================== 1. We suggest you obtain the latest CMake for your platform from the Kitware - web site. The HDF5 1.13.x product requires a minimum CMake version + web site. The HDF5 1.15.x product requires a minimum CMake version of 3.18. If you are using VS2022, the minimum version is 3.21. 2. You have installed the HDF5 library built with CMake, by executing @@ -50,24 +50,24 @@ I. Preconditions or environment variable, set(ENV{HDF5_ROOT} "") to the installed location of HDF5. On Windows: - HDF5_ROOT=C:/Program Files/HDF_Group/HDF5/1.13.x/ + HDF5_ROOT=C:/Program Files/HDF_Group/HDF5/1.15.x/ On unix: - HDF5_ROOT=/HDF_Group/HDF5/1.13.x/ + HDF5_ROOT=/HDF_Group/HDF5/1.15.x/ If you are using shared libraries, you may need to add to the path environment variable. Set the path environment variable to the installed location of the library files for HDF5. On Windows (*.dll): - PATH=%PATH%;C:/Program Files/HDF_Group/HDF5/1.13.x/bin + PATH=%PATH%;C:/Program Files/HDF_Group/HDF5/1.15.x/bin On unix (*.so): - LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/HDF_Group/HDF5/1.13.x/lib + LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/HDF_Group/HDF5/1.15.x/lib If you are using filter plugin libraries, you will need to set the HDF5_PLUGIN_PATH environment variable. On Windows: - HDF5_PLUGIN_PATH=C:/Program Files/HDF_Group/HDF5/1.13.x/lib/plugin + HDF5_PLUGIN_PATH=C:/Program Files/HDF_Group/HDF5/1.15.x/lib/plugin On unix: - HDF5_PLUGIN_PATH=/HDF_Group/HDF5/1.13.x/lib/plugin + HDF5_PLUGIN_PATH=/HDF_Group/HDF5/1.15.x/lib/plugin (Note there are no quote characters used on Windows and all platforms use forward slashes) @@ -126,6 +126,9 @@ These steps are described in more detail below. * Visual Studio 16 2019 * ... in addition VS2019 will need to set the "-A" option, * ... [Win32, x64, ARM, ARM64] + * Visual Studio 17 2022 + * ... in addition VS2022 will need to set the "-A" option, + * ... [Win32, x64, ARM, ARM64] is: * BUILD_TESTING:BOOL=ON diff --git a/release_docs/USING_HDF5_VS.txt b/release_docs/USING_HDF5_VS.txt index 5ec9996b1c9..9343d9dcfa4 100644 --- a/release_docs/USING_HDF5_VS.txt +++ b/release_docs/USING_HDF5_VS.txt @@ -54,11 +54,11 @@ Using Visual Studio 2008 with HDF5 Libraries built with Visual Studio 2008 and select "x64". 2.2 Find the box "Show directories for", choose "Include files", add the - header path (i.e. c:\Program Files\HDF_Group\HDF5\1.13.x\include) + header path (i.e. c:\Program Files\HDF_Group\HDF5\1.15.x\include) to the included directories. 2.3 Find the box "Show directories for", choose "Library files", add the - library path (i.e. c:\Program Files\HDF_Group\HDF5\1.13.x\lib) + library path (i.e. c:\Program Files\HDF_Group\HDF5\1.15.x\lib) to the library directories. 2.4 If using Fortran libraries, you will also need to setup the path From c76691de1a2d0319abe311ac972a471bd587c23d Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Fri, 7 Apr 2023 12:35:51 -0500 Subject: [PATCH 096/231] Fix #2598 sanitize leak (#2660) --- release_docs/RELEASE.txt | 7 ++++++- tools/lib/h5trav.c | 8 ++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index e02a58d44ec..be040f49c67 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -237,7 +237,12 @@ Bug Fixes since HDF5-1.13.3 release Tools ----- - - + - In the tools traverse function - an error in either visit call + will bypass the cleanup of the local data variables. + + Replaced the H5TOOLS_GOTO_ERROR with just H5TOOLS_ERROR. + + (ADB - 2023/04/06 GH-2598) Performance diff --git a/tools/lib/h5trav.c b/tools/lib/h5trav.c index 599cb227a26..564dcba6772 100644 --- a/tools/lib/h5trav.c +++ b/tools/lib/h5trav.c @@ -144,9 +144,9 @@ trav_token_visited(hid_t loc_id, trav_addr_t *visited, H5O_token_t *token) size_t u; /* Local index variable */ int token_cmp; - /* Look for address */ + /* Look for path associated with token */ for (u = 0; u < visited->nused; u++) { - /* Check for address already in array */ + /* Check for token already in array */ if (H5Otoken_cmp(loc_id, &visited->objs[u].token, token, &token_cmp) < 0) return NULL; if (!token_cmp) @@ -281,13 +281,13 @@ traverse(hid_t file_id, const char *grp_name, hbool_t visit_start, hbool_t recur /* Visit all links in group, recursively */ if (H5Lvisit_by_name2(file_id, grp_name, trav_index_by, trav_index_order, traverse_cb, &udata, H5P_DEFAULT) < 0) - H5TOOLS_GOTO_ERROR((-1), "H5Lvisit_by_name failed"); + H5TOOLS_ERROR((-1), "H5Lvisit_by_name failed"); } /* end if */ else { /* Iterate over links in group */ if (H5Literate_by_name2(file_id, grp_name, trav_index_by, trav_index_order, NULL, traverse_cb, &udata, H5P_DEFAULT) < 0) - H5TOOLS_GOTO_ERROR((-1), "H5Literate_by_name failed"); + H5TOOLS_ERROR((-1), "H5Literate_by_name failed"); } /* end else */ /* Free visited addresses table */ From 364b8b42e404c148b131cd2792dcadf3c704bd01 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Mon, 10 Apr 2023 11:39:46 -0700 Subject: [PATCH 097/231] Remove dead code hidden behind #ifdef LATER (#2686) --- src/H5FLprivate.h | 3 - src/H5FOprivate.h | 4 -- src/H5RSprivate.h | 3 - src/H5SLprivate.h | 3 - src/H5Sselect.c | 132 ---------------------------------- src/H5UCprivate.h | 3 - src/H5VM.c | 75 ------------------- test/tattr.c | 36 ++-------- test/tfile.c | 6 -- testpar/t_dset.c | 3 - tools/lib/h5tools_str.c | 16 ----- tools/test/perform/zip_perf.c | 20 ------ 12 files changed, 4 insertions(+), 300 deletions(-) diff --git a/src/H5FLprivate.h b/src/H5FLprivate.h index ca16360d3a9..0fb4b327496 100644 --- a/src/H5FLprivate.h +++ b/src/H5FLprivate.h @@ -24,9 +24,6 @@ #define H5FLprivate_H /* Public headers needed by this file */ -#ifdef LATER -#include "H5FLpublic.h" /*API prototypes */ -#endif /* LATER */ /* Private headers needed by this file */ diff --git a/src/H5FOprivate.h b/src/H5FOprivate.h index 9db0b0dd743..1f44ec839bf 100644 --- a/src/H5FOprivate.h +++ b/src/H5FOprivate.h @@ -16,10 +16,6 @@ #ifndef H5FOprivate_H #define H5FOprivate_H -#ifdef LATER -#include "H5FOpublic.h" -#endif /* LATER */ - /* Private headers needed by this file */ #include "H5private.h" /* Generic Functions */ #include "H5Fprivate.h" /* File access */ diff --git a/src/H5RSprivate.h b/src/H5RSprivate.h index 0c8f3eaeffc..a38214039ab 100644 --- a/src/H5RSprivate.h +++ b/src/H5RSprivate.h @@ -19,9 +19,6 @@ /**************************************/ /* Public headers needed by this file */ /**************************************/ -#ifdef LATER -#include "H5RSpublic.h" -#endif /* LATER */ /***************************************/ /* Private headers needed by this file */ diff --git a/src/H5SLprivate.h b/src/H5SLprivate.h index 6bd1b79fe65..d7eb5be8b83 100644 --- a/src/H5SLprivate.h +++ b/src/H5SLprivate.h @@ -19,9 +19,6 @@ /**************************************/ /* Public headers needed by this file */ /**************************************/ -#ifdef LATER -#include "H5SLpublic.h" -#endif /* LATER */ /***************************************/ /* Private headers needed by this file */ diff --git a/src/H5Sselect.c b/src/H5Sselect.c index c2fb7fab8a3..9d13cf2972c 100644 --- a/src/H5Sselect.c +++ b/src/H5Sselect.c @@ -49,12 +49,6 @@ /* Local Prototypes */ /********************/ -#ifdef LATER -static herr_t H5S__select_iter_block(const H5S_sel_iter_t *iter, hsize_t *start, hsize_t *end); -static htri_t H5S__select_iter_has_next_block(const H5S_sel_iter_t *iter); -static herr_t H5S__select_iter_next_block(H5S_sel_iter_t *iter); -#endif /* LATER */ - /*****************************/ /* Library Private Variables */ /*****************************/ @@ -1191,50 +1185,6 @@ H5S_select_iter_coords(const H5S_sel_iter_t *sel_iter, hsize_t *coords) FUNC_LEAVE_NOAPI(ret_value) } /* end H5S_select_iter_coords() */ -#ifdef LATER - -/*-------------------------------------------------------------------------- - NAME - H5S__select_iter_block - PURPOSE - Get the block of the current iterator position - USAGE - herr_t H5S__select_iter_block(sel_iter,start,end) - const H5S_sel_iter_t *sel_iter; IN: Selection iterator to query - hsize_t *start; OUT: Array to place iterator start block coordinates - hsize_t *end; OUT: Array to place iterator end block coordinates - RETURNS - Non-negative on success, negative on failure. - DESCRIPTION - The current location of the iterator within the selection is placed in - the COORDS array. - GLOBAL VARIABLES - COMMENTS, BUGS, ASSUMPTIONS - This routine participates in the "Inlining C function pointers" - pattern, don't call it directly, use the appropriate macro - defined in H5Sprivate.h. - EXAMPLES - REVISION LOG ---------------------------------------------------------------------------*/ -static herr_t -H5S__select_iter_block(const H5S_sel_iter_t *iter, hsize_t *start, hsize_t *end) -{ - herr_t ret_value; /* return value */ - - FUNC_ENTER_PACKAGE_NOERR - - /* Check args */ - HDassert(iter); - HDassert(start); - HDassert(end); - - /* Call iter_block routine for selection type */ - ret_value = (*iter->type->iter_block)(iter, start, end); - - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5S__select_iter_block() */ -#endif /* LATER */ - /*-------------------------------------------------------------------------- NAME H5S_select_iter_nelmts @@ -1271,46 +1221,6 @@ H5S_select_iter_nelmts(const H5S_sel_iter_t *sel_iter) FUNC_LEAVE_NOAPI(ret_value) } /* end H5S_select_iter_nelmts() */ -#ifdef LATER - -/*-------------------------------------------------------------------------- - NAME - H5S__select_iter_has_next_block - PURPOSE - Check if there is another block available in the selection iterator - USAGE - htri_t H5S__select_iter_has_next_block(sel_iter) - const H5S_sel_iter_t *sel_iter; IN: Selection iterator to query - RETURNS - Non-negative on success, negative on failure. - DESCRIPTION - Check if there is another block available to advance to in the selection - iterator. - GLOBAL VARIABLES - COMMENTS, BUGS, ASSUMPTIONS - This routine participates in the "Inlining C function pointers" - pattern, don't call it directly, use the appropriate macro - defined in H5Sprivate.h. - EXAMPLES - REVISION LOG ---------------------------------------------------------------------------*/ -static htri_t -H5S__select_iter_has_next_block(const H5S_sel_iter_t *iter) -{ - herr_t ret_value; /* return value */ - - FUNC_ENTER_PACKAGE_NOERR - - /* Check args */ - HDassert(iter); - - /* Call iter_has_next_block routine for selection type */ - ret_value = (*iter->type->iter_has_next_block)(iter); - - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5S__select_iter_has_next_block() */ -#endif /* LATER */ - /*-------------------------------------------------------------------------- NAME H5S_select_iter_next @@ -1353,48 +1263,6 @@ H5S_select_iter_next(H5S_sel_iter_t *iter, size_t nelem) FUNC_LEAVE_NOAPI(ret_value) } /* end H5S_select_iter_next() */ -#ifdef LATER - -/*-------------------------------------------------------------------------- - NAME - H5S__select_iter_next_block - PURPOSE - Advance selection iterator to next block - USAGE - herr_t H5S__select_iter_next_block(iter) - H5S_sel_iter_t *iter; IN/OUT: Selection iterator to change - RETURNS - Non-negative on success, negative on failure. - DESCRIPTION - Move the current element for the selection iterator to the next - block in the selection. - GLOBAL VARIABLES - COMMENTS, BUGS, ASSUMPTIONS - Doesn't maintain the 'elmt_left' field of the selection iterator. - - This routine participates in the "Inlining C function pointers" - pattern, don't call it directly, use the appropriate macro - defined in H5Sprivate.h. - EXAMPLES - REVISION LOG ---------------------------------------------------------------------------*/ -static herr_t -H5S__select_iter_next_block(H5S_sel_iter_t *iter) -{ - herr_t ret_value; /* return value */ - - FUNC_ENTER_PACKAGE_NOERR - - /* Check args */ - HDassert(iter); - - /* Call iter_next_block routine for selection type */ - ret_value = (*iter->type->iter_next_block)(iter); - - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5S__select_iter_next_block() */ -#endif /* LATER */ - /*------------------------------------------------------------------------- * Function: H5S_select_iter_get_seq_list * diff --git a/src/H5UCprivate.h b/src/H5UCprivate.h index 9a4aa2aa66a..c6a6b7a854e 100644 --- a/src/H5UCprivate.h +++ b/src/H5UCprivate.h @@ -22,9 +22,6 @@ /**************************************/ /* Public headers needed by this file */ /**************************************/ -#ifdef LATER -#include "H5UCpublic.h" -#endif /* LATER */ /***************************************/ /* Private headers needed by this file */ diff --git a/src/H5VM.c b/src/H5VM.c index 3abe1d3842f..f67917d43d9 100644 --- a/src/H5VM.c +++ b/src/H5VM.c @@ -35,11 +35,6 @@ static void H5VM__stride_optimize1(unsigned *np /*in,out*/, hsize_t *elmt_size / const hsize_t *size, hsize_t *stride1); static void H5VM__stride_optimize2(unsigned *np /*in,out*/, hsize_t *elmt_size /*in,out*/, const hsize_t *size, hsize_t *stride1, hsize_t *stride2); -#ifdef LATER -static void H5VM__stride_copy2(hsize_t nelmts, hsize_t elmt_size, unsigned dst_n, const hsize_t *dst_size, - const ssize_t *dst_stride, void *_dst, unsigned src_n, const hsize_t *src_size, - const ssize_t *src_stride, const void *_src); -#endif /* LATER */ /*------------------------------------------------------------------------- * Function: H5VM__stride_optimize1 @@ -761,76 +756,6 @@ H5VM_stride_copy_s(unsigned n, hsize_t elmt_size, const hsize_t *size, const hss FUNC_LEAVE_NOAPI(SUCCEED) } -#ifdef LATER - -/*------------------------------------------------------------------------- - * Function: H5VM__stride_copy2 - * - * Purpose: Similar to H5VM_stride_copy() except the source and - * destination each have their own dimensionality and size and - * we copy exactly NELMTS elements each of size ELMT_SIZE. The - * size counters wrap if NELMTS is more than a size counter. - * - * Return: void - * - * Programmer: Robb Matzke - * Saturday, October 11, 1997 - * - *------------------------------------------------------------------------- - */ -static void -H5VM__stride_copy2(hsize_t nelmts, hsize_t elmt_size, unsigned dst_n, const hsize_t *dst_size, - const hsize_t *dst_stride, void *_dst, unsigned src_n, const hsize_t *src_size, - const hsize_t *src_stride, const void *_src) -{ - uint8_t *dst = (uint8_t *)_dst; - const uint8_t *src = (const uint8_t *)_src; - hsize_t dst_idx[H5VM_HYPER_NDIMS]; - hsize_t src_idx[H5VM_HYPER_NDIMS]; - hsize_t i; /* Local index variable */ - int j; /* Local index variable */ - hbool_t carry; - - FUNC_ENTER_PACKAGE_NOERR - - HDassert(elmt_size < SIZE_MAX); - HDassert(dst_n > 0); - HDassert(src_n > 0); - - H5VM_vector_cpy(dst_n, dst_idx, dst_size); - H5VM_vector_cpy(src_n, src_idx, src_size); - - for (i = 0; i < nelmts; i++) { - - /* Copy an element */ - H5_CHECK_OVERFLOW(elmt_size, hsize_t, size_t); - H5MM_memcpy(dst, src, (size_t)elmt_size); /*lint !e671 The elmt_size will be OK */ - - /* Decrement indices and advance pointers */ - for (j = (int)(dst_n - 1), carry = TRUE; j >= 0 && carry; --j) { - dst += dst_stride[j]; - if (--dst_idx[j]) - carry = FALSE; - else { - HDassert(dst_size); - dst_idx[j] = dst_size[j]; - } /* end else */ - } - for (j = (int)(src_n - 1), carry = TRUE; j >= 0 && carry; --j) { - src += src_stride[j]; - if (--src_idx[j]) - carry = FALSE; - else { - HDassert(src_size); - src_idx[j] = src_size[j]; - } /* end else */ - } - } - - FUNC_LEAVE_NOAPI_VOID -} -#endif /* LATER */ - /*------------------------------------------------------------------------- * Function: H5VM_array_fill * diff --git a/test/tattr.c b/test/tattr.c index 35b42dd6463..a4ff0880008 100644 --- a/test/tattr.c +++ b/test/tattr.c @@ -5503,13 +5503,9 @@ test_attr_corder_delete(hid_t fcpl, hid_t fapl) hsize_t corder_count; /* # of records in creation order index */ unsigned reopen_file; /* Whether to re-open the file before deleting group */ char attrname[NAME_BUF_SIZE]; /* Name of attribute */ -#ifdef LATER - h5_stat_size_t empty_size; /* Size of empty file */ - h5_stat_size_t file_size; /* Size of file after operating on it */ -#endif /* LATER */ - unsigned curr_dset; /* Current dataset to work on */ - unsigned u; /* Local index variable */ - herr_t ret; /* Generic return value */ + unsigned curr_dset; /* Current dataset to work on */ + unsigned u; /* Local index variable */ + herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing Deleting Object w/Dense Attribute Storage and Creation Order Info\n")); @@ -5536,24 +5532,6 @@ test_attr_corder_delete(hid_t fcpl, hid_t fapl) ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); CHECK(ret, FAIL, "H5Pget_attr_phase_change"); -/* XXX: Try to find a way to resize dataset's object header so that the object - * header can have one chunk, then retrieve "empty" file size and check - * that size after everything is deleted -QAK - */ -#ifdef LATER - /* Create empty file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Get the size of an empty file */ - empty_size = h5_get_file_size(FILENAME); - CHECK(empty_size, FAIL, "h5_get_file_size"); -#endif /* LATER */ - /* Loop to leave file open when deleting dataset, or to close & re-open file * before deleting dataset */ for (reopen_file = FALSE; reopen_file <= TRUE; reopen_file++) { @@ -5666,13 +5644,7 @@ test_attr_corder_delete(hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "H5Fclose"); } /* end if */ -#ifdef LATER - /* Get the size of the file now */ - file_size = h5_get_file_size(FILENAME); - CHECK(file_size, FAIL, "h5_get_file_size"); - VERIFY(file_size, empty_size, "h5_get_file_size"); -#endif /* LATER */ - } /* end for */ + } /* end for */ /* Close property list */ ret = H5Pclose(dcpl); diff --git a/test/tfile.c b/test/tfile.c index 03ade923d4c..bca3da2bfd6 100644 --- a/test/tfile.c +++ b/test/tfile.c @@ -331,12 +331,6 @@ test_file_create(void) ret = H5Pclose(tmpl1); CHECK(ret, FAIL, "H5Pclose"); -#ifdef LATER - /* Double-check that the atom has been vaporized */ - ret = H5Pclose(tmpl1); - VERIFY(ret, FAIL, "H5Pclose"); -#endif - if (h5_using_default_driver(NULL)) { /* Create a new file with a non-standard file-creation template */ diff --git a/testpar/t_dset.c b/testpar/t_dset.c index 5002fb83bdb..34c4d97b8b8 100644 --- a/testpar/t_dset.c +++ b/testpar/t_dset.c @@ -3427,9 +3427,6 @@ actual_io_mode_tests(void) * Programmer: Jonathan Kim * Date: Aug, 2012 */ -#ifdef LATER -#define DSET_NOCOLCAUSE "nocolcause" -#endif #define FILE_EXTERNAL "nocolcause_extern.data" static void test_no_collective_cause_mode(int selection_mode) diff --git a/tools/lib/h5tools_str.c b/tools/lib/h5tools_str.c index d9e26e798f2..223eb616e57 100644 --- a/tools/lib/h5tools_str.c +++ b/tools/lib/h5tools_str.c @@ -1336,22 +1336,6 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai if (i) h5tools_str_append(str, "%s", OPT(info->vlen_sep, "," OPTIONAL_LINE_BREAK)); -#ifdef LATER - /* Need to fix so VL data breaks at correct location on end of line -QAK */ - if (info->arr_linebreak && h5tools_str_len(str) >= info->line_ncols) { - int x; - - h5tools_str_append(str, "%s", "\n"); - - /* need to indent some more here */ - if (ctx->indent_level >= 0) - h5tools_str_append(str, "%s", OPT(info->line_pre, "")); - - for (x = 0; x < ctx->indent_level + 1; x++) - h5tools_str_append(str, "%s", OPT(info->line_indent, "")); - } /* end if */ -#endif /* LATER */ - ctx->indent_level++; /* Dump the array element */ diff --git a/tools/test/perform/zip_perf.c b/tools/test/perform/zip_perf.c index 1265c30dda0..19b29ba2587 100644 --- a/tools/test/perform/zip_perf.c +++ b/tools/test/perform/zip_perf.c @@ -192,26 +192,6 @@ compress_buffer(Bytef *dest, uLongf *destLen, const Bytef *source, uLong sourceL } } -#ifdef LATER -/* - * Function: uncompress_buffer - * Purpose: Uncompress the buffer. - * Returns: Z_OK - success - * Z_MEM_ERROR - not enough memory - * Z_BUF_ERROR - not enough room in the output buffer - * Z_DATA_ERROR - the input data was corrupted - * Programmer: Bill Wendling, 05. June 2002 - * Modifications: - */ -static int -uncompress_buffer(Bytef *dest, uLongf *destLen, const Bytef *source, uLong sourceLen) -{ - int rc = uncompress(dest, destLen, source, sourceLen); - - return rc; -} -#endif /* LATER */ - /* * Function: get_unique_name * Purpose: Create a new file who's name doesn't conflict with From 427cabb029b679032e1e8a21b521d4b8c9ba8671 Mon Sep 17 00:00:00 2001 From: Quincey Koziol Date: Tue, 11 Apr 2023 09:41:32 -0500 Subject: [PATCH 098/231] Comment cleanup (#2689) * Clean up content and redundant logging in comments. --- c++/test/dsets.cpp | 8 - c++/test/tfile.cpp | 14 - c++/test/tfilter.cpp | 4 - c++/test/th5s.cpp | 45 -- fortran/src/H5If.c | 1 - fortran/src/H5Pf.c | 6 - fortran/test/t.c | 4 - fortran/test/tH5G_1_8.F90 | 10 - fortran/test/tH5MISC_1_8.F90 | 2 - fortran/test/tH5P_F03.F90 | 2 - fortran/test/tH5T.F90 | 2 - fortran/test/tH5T_F03.F90 | 7 - hl/fortran/src/H5IMcc.c | 12 - hl/fortran/src/H5IMfc.c | 36 -- hl/fortran/src/H5LTfc.c | 39 -- hl/fortran/src/H5LTff.F90 | 8 - hl/src/H5IM.c | 29 - hl/src/H5LT.c | 74 --- hl/src/H5PT.c | 50 -- hl/tools/gif2h5/writehdf.c | 3 - src/H5AC.c | 42 +- src/H5ACmpio.c | 2 - src/H5C.c | 477 ++------------ src/H5Cdbg.c | 6 - src/H5Cepoch.c | 3 +- src/H5Cimage.c | 22 +- src/H5Cmpio.c | 56 +- src/H5Cpkg.h | 582 +++--------------- src/H5Cprivate.h | 51 +- src/H5FD.c | 8 - src/H5FDint.c | 16 - src/H5FDmpi.c | 30 - src/H5FDsubfiling/H5FDioc_int.c | 2 - src/H5FDsubfiling/H5FDioc_threads.c | 27 - src/H5FDsubfiling/H5FDsubfile_int.c | 4 - src/H5FDsubfiling/H5FDsubfiling.c | 12 - src/H5FDsubfiling/H5subfiling_common.c | 13 - src/H5detect.c | 2 +- src/H5make_libsettings.c | 2 +- test/big.c | 25 - test/bittests.c | 16 - test/cache.c | 244 -------- test/cache_common.c | 23 - test/cache_common.h | 2 - test/cmpd_dset.c | 21 - test/dangle.c | 12 - test/dt_arith.c | 47 -- test/dtypes.c | 57 +- test/enum.c | 12 - test/file_image.c | 8 - test/fillval.c | 25 - test/filter_fail.c | 7 - test/gen_cross.c | 24 - test/gen_filters.c | 2 - test/gen_new_array.c | 2 - test/gen_new_mtime.c | 2 - test/gen_noencoder.c | 2 - test/gen_old_array.c | 2 - test/gen_old_layout.c | 2 - test/gen_old_mtime.c | 2 - test/gen_sizes_lheap.c | 2 - test/h5test.c | 22 - test/hdfs.c | 12 - test/istore.c | 15 - test/links.c | 2 - test/links_env.c | 2 - test/mount.c | 64 -- test/mtime.c | 7 - test/ntypes.c | 37 -- test/page_buffer.c | 53 -- test/reserved.c | 8 - test/s3comms.c | 4 - test/space_overflow.c | 2 - test/stab.c | 3 - test/tattr.c | 2 - test/tcheck_version.c | 2 - test/tconfig.c | 8 - test/tcoords.c | 2 - test/test_swmr.pwsh.in | 7 - test/test_swmr.sh.in | 7 - test/testframe.c | 3 - test/tfile.c | 2 - test/tgenprop.c | 2 - test/th5s.c | 2 - test/titerate.c | 2 - test/tmeta.c | 2 - test/trefer.c | 2 - test/trefer_deprec.c | 2 - test/tsohm.c | 32 - test/ttime.c | 2 - test/tvlstr.c | 2 - test/tvltypes.c | 2 - test/unlink.c | 32 - test/vfd.c | 28 - testpar/t_bigio.c | 2 - testpar/t_cache.c | 54 -- testpar/t_cache_image.c | 65 -- testpar/t_coll_chunk.c | 36 +- testpar/t_filter_read.c | 2 - testpar/t_pread.c | 4 - testpar/t_vfd.c | 66 -- tools/lib/h5diff.c | 4 - tools/lib/h5tools_dump.c | 17 - tools/lib/h5tools_ref.c | 10 - tools/lib/io_timer.c | 6 - tools/libtest/h5tools_test_utils.c | 8 - tools/src/h5copy/h5copy.c | 8 - tools/src/h5diff/h5diff_main.c | 32 - tools/src/h5diff/ph5diff_main.c | 8 - tools/src/h5dump/h5dump.c | 2 - tools/src/h5import/h5import.c | 3 - tools/src/h5perf/pio_engine.c | 10 - tools/src/h5perf/pio_perf.c | 25 - tools/src/h5perf/sio_engine.c | 9 - tools/src/h5perf/sio_perf.c | 15 - tools/src/h5stat/h5stat.c | 27 - tools/src/misc/h5repart.c | 4 - tools/test/h5dump/binread.c | 2 - tools/test/h5dump/h5dumpgentest.c | 4 - .../test/h5repack/testh5repack_detect_szip.c | 4 - tools/test/h5stat/testh5stat.sh.in | 4 - tools/test/perform/chunk.c | 10 - tools/test/perform/overhead.c | 10 - tools/test/perform/perf_meta.c | 26 +- tools/test/perform/zip_perf.c | 8 - utils/tools/h5dwalk/h5dwalk.c | 2 - 126 files changed, 154 insertions(+), 2959 deletions(-) diff --git a/c++/test/dsets.cpp b/c++/test/dsets.cpp index 5c3ce814267..1a23f5ee4cf 100644 --- a/c++/test/dsets.cpp +++ b/c++/test/dsets.cpp @@ -1375,14 +1375,6 @@ test_operator(H5File &file) * * Failure: -1 * - * Modifications: - * Nov 12, 01: - * - moved h5_cleanup to outside of try block because - * dataset.h5 cannot be removed until "file" is out of - * scope and dataset.h5 is closed. - * Feb 20, 05: - * - cleanup_dsets took care of the cleanup now. - * *------------------------------------------------------------------------- */ extern "C" void diff --git a/c++/test/tfile.cpp b/c++/test/tfile.cpp index 56774494815..d683797193c 100644 --- a/c++/test/tfile.cpp +++ b/c++/test/tfile.cpp @@ -65,13 +65,6 @@ const H5std_string FILE4("tfile4.h5"); * Programmer Binh-Minh Ribler (use C version) * January, 2001 * - * Modifications: - * January, 2005: C tests' macro VERIFY casts values to 'long' for all - * cases. Since there are no operator<< for 'long long' - * or int64 in VS C++ ostream, I casted the hsize_t values - * passed to verify_val to 'long' as well. If problems - * arises later, this will have to be specifically handled - * with a special routine. *------------------------------------------------------------------------- */ static void @@ -274,13 +267,6 @@ test_file_create() * Programmer Binh-Minh Ribler (use C version) * January, 2001 * - * Modifications: - * January, 2005: C tests' macro VERIFY casts values to 'long' for all - * cases. Since there are no operator<< for 'long long' - * or int64 in VS C++ ostream, I casted the hsize_t values - * passed to verify_val to 'long' as well. If problems - * arises later, this will have to be specifically handled - * with a special routine. *------------------------------------------------------------------------- */ static void diff --git a/c++/test/tfilter.cpp b/c++/test/tfilter.cpp index 2d7dbb82b2b..34078a7640f 100644 --- a/c++/test/tfilter.cpp +++ b/c++/test/tfilter.cpp @@ -96,8 +96,6 @@ filter_bogus(unsigned int flags, size_t cd_nelmts, const unsigned int *cd_values * Programmer Binh-Minh Ribler (use C version, from dsets.c/test_filters) * January, 2007 * - * Modifications: - * Note: H5Z interface is not implemented yet. *------------------------------------------------------------------------- */ const hsize_t chunk_size[2] = {FILTER_CHUNK_DIM1, FILTER_CHUNK_DIM2}; @@ -145,8 +143,6 @@ test_null_filter() * Programmer Binh-Minh Ribler (partly from dsets.c/test_filters) * January, 2007 * - * Modifications: - * Note: H5Z interface is not implemented yet. *------------------------------------------------------------------------- */ const H5std_string DSET_SZIP_NAME("szipped dataset"); diff --git a/c++/test/th5s.cpp b/c++/test/th5s.cpp index 3651aa376c9..b9a84e1d5b7 100644 --- a/c++/test/th5s.cpp +++ b/c++/test/th5s.cpp @@ -87,16 +87,6 @@ int space5_data = 7; * Programmer Binh-Minh Ribler (using C version) * Mar 2001 * - * Modifications: - * January, 2005: C tests' macro VERIFY casts values to 'long' for all - * cases. Since there are no operator<< for 'long long' - * or int64 in VS C++ ostream, I casted the hssize_t values - * passed to verify_val to 'long' as well. If problems - * arises later, this will have to be specifically handled - * with a special routine. - * April 12, 2011: Raymond Lu - * Starting from the 1.8.7 release, we allow dimension - * size to be zero. So I took out the test against it. *------------------------------------------------------------------------- */ static void @@ -223,13 +213,6 @@ test_h5s_basic() * Programmer Binh-Minh Ribler (using C version) * Mar 2001 * - * Modifications: - * January, 2005: C tests' macro VERIFY casts values to 'long' for all - * cases. Since there are no operator<< for 'long long' - * or int64 in VS C++ ostream, I casted the hssize_t values - * passed to verify_val to 'long' as well. If problems - * arises later, this will have to be specifically handled - * with a special routine. *------------------------------------------------------------------------- */ static void @@ -287,13 +270,6 @@ test_h5s_scalar_write() * Programmer Binh-Minh Ribler (using C version) * Mar 2001 * - * Modifications: - * January, 2005: C tests' macro VERIFY casts values to 'long' for all - * cases. Since there are no operator<< for 'long long' - * or int64 in VS C++ ostream, I casted the hssize_t values - * passed to verify_val to 'long' as well. If problems - * arises later, this will have to be specifically handled - * with a special routine. *------------------------------------------------------------------------- */ static void @@ -348,13 +324,6 @@ test_h5s_scalar_read() * Programmer Raymond Lu (using C version) * May 18, 2004 * - * Modifications: - * January, 2005: C tests' macro VERIFY casts values to 'long' for all - * cases. Since there are no operator<< for 'long long' - * or int64 in VS C++ ostream, I casted the hssize_t values - * passed to verify_val to 'long' as well. If problems - * arises later, this will have to be specifically handled - * with a special routine. *------------------------------------------------------------------------- */ static void @@ -402,13 +371,6 @@ test_h5s_null() * Programmer Binh-Minh Ribler (using C version) * Mar 2001 * - * Modifications: - * January, 2005: C tests' macro VERIFY casts values to 'long' for all - * cases. Since there are no operator<< for 'long long' - * or int64 in VS C++ ostream, I casted the hssize_t values - * passed to verify_val to 'long' as well. If problems - * arises later, this will have to be specifically handled - * with a special routine. *------------------------------------------------------------------------- */ static void @@ -470,13 +432,6 @@ test_h5s_compound_scalar_write() * Programmer Binh-Minh Ribler (using C version) * Mar 2001 * - * Modifications: - * January, 2005: C tests' macro VERIFY casts values to 'long' for all - * cases. Since there are no operator<< for 'long long' - * or int64 in VS C++ ostream, I casted the hssize_t values - * passed to verify_val to 'long' as well. If problems - * arises later, this will have to be specifically handled - * with a special routine. *------------------------------------------------------------------------- */ static void diff --git a/fortran/src/H5If.c b/fortran/src/H5If.c index ef0128ab8d7..ffc6e0861a7 100644 --- a/fortran/src/H5If.c +++ b/fortran/src/H5If.c @@ -273,7 +273,6 @@ h5iget_file_id_c(hid_t_f *obj_id, hid_t_f *file_id) * Returns: 0 on success, -1 on failure * Programmer: Elena Pourmal * Tuesday, August 24, 2004 - * Modifications: *---------------------------------------------------------------------------*/ int_f h5iis_valid_c(hid_t_f *obj_id, int_f *c_valid) diff --git a/fortran/src/H5Pf.c b/fortran/src/H5Pf.c index 7371814df0e..38eba37340f 100644 --- a/fortran/src/H5Pf.c +++ b/fortran/src/H5Pf.c @@ -1802,10 +1802,6 @@ h5pget_nfilters_c(hid_t_f *prp_id, int_f *nfilters) * Returns: 0 on success, -1 on failure * Programmer: Xiangyang Su * Friday, February 25, 2000 - * Modifications: - * Since cd_nelmts has IN/OUT attributes, fixed the input and - * returned value of cd_nelmnts to satisfy this specification. - * MSB January 27, 2009 *---------------------------------------------------------------------------*/ int_f h5pget_filter_c(hid_t_f *prp_id, int_f *filter_number, int_f *flags, size_t_f *cd_nelmts, int_f *cd_values, @@ -5262,7 +5258,6 @@ h5pget_create_inter_group_c(hid_t_f *lcpl_id, int_f *crt_intermed_group) * Returns: 0 on success, -1 on failure * Programmer: M. Scot Breitenfeld * April 13, 2009 - * Modifications: *---------------------------------------------------------------------------*/ int_f @@ -5295,7 +5290,6 @@ h5pset_chunk_cache_c(hid_t_f *dapl_id, size_t_f *rdcc_nslots, size_t_f *rdcc_nby * Returns: 0 on success, -1 on failure * Programmer: M. Scot Breitenfeld * April 13, 2009 - * Modifications: *---------------------------------------------------------------------------*/ int_f diff --git a/fortran/test/t.c b/fortran/test/t.c index f6bc0f91b9d..b89e8ae4ff2 100644 --- a/fortran/test/t.c +++ b/fortran/test/t.c @@ -36,7 +36,6 @@ * Returns: 0 on success, -1 on failure * Programmer: Elena Pourmal * Friday, September 13, 2002 - * Modifications: *---------------------------------------------------------------------------*/ int_f nh5_fixname_c(_fcd base_name, size_t_f *base_namelen, hid_t_f *fapl, _fcd full_name, size_t_f *full_namelen) @@ -78,7 +77,6 @@ nh5_fixname_c(_fcd base_name, size_t_f *base_namelen, hid_t_f *fapl, _fcd full_n * Returns: 0 on success, -1 on failure * Programmer: Elena Pourmal * Thursday, September 19, 2002 - * Modifications: *---------------------------------------------------------------------------*/ int_f nh5_cleanup_c(_fcd base_name, size_t_f *base_namelen, hid_t_f *fapl) @@ -128,7 +126,6 @@ nh5_cleanup_c(_fcd base_name, size_t_f *base_namelen, hid_t_f *fapl) * Returns: none * Programmer: Quincey Koziol * Tuesday, December 14, 2004 - * Modifications: *---------------------------------------------------------------------------*/ void nh5_exit_c(int_f *status) @@ -145,7 +142,6 @@ nh5_exit_c(int_f *status) * Returns: none * Programmer: M.S. Breitenfeld * September 30, 2008 - * Modifications: *---------------------------------------------------------------------------*/ void nh5_env_nocleanup_c(int_f *status) diff --git a/fortran/test/tH5G_1_8.F90 b/fortran/test/tH5G_1_8.F90 index 755c96de853..a4b25f28ac1 100644 --- a/fortran/test/tH5G_1_8.F90 +++ b/fortran/test/tH5G_1_8.F90 @@ -639,8 +639,6 @@ END SUBROUTINE timestamps ! * Programmer: Adapted from C test by: ! * M.S. Breitenfeld ! * -! * Modifications: -! * ! *------------------------------------------------------------------------- ! @@ -732,8 +730,6 @@ END SUBROUTINE mklinks ! * Programmer: M.S. Breitenfeld ! * March 3, 2008 ! * -! * Modifications: -! * ! *------------------------------------------------------------------------- ! @@ -1080,8 +1076,6 @@ END SUBROUTINE lifecycle ! * Programmer: M.S. Breitenfeld ! * April 14, 2008 ! * -! * Modifications: Modified original C code -! * ! *------------------------------------------------------------------------- ! @@ -1488,8 +1482,6 @@ END SUBROUTINE link_info_by_idx_check ! * Modified C routine ! * March 12, 2008 ! * -! * Modifications: -! * ! *------------------------------------------------------------------------- ! @@ -1858,8 +1850,6 @@ END SUBROUTINE objcopy ! * Programmer: James Laird ! * Tuesday, June 6, 2006 ! * -! * Modifications: -! * ! *------------------------------------------------------------------------- ! diff --git a/fortran/test/tH5MISC_1_8.F90 b/fortran/test/tH5MISC_1_8.F90 index 2eea6bad0a7..5413169f3d3 100644 --- a/fortran/test/tH5MISC_1_8.F90 +++ b/fortran/test/tH5MISC_1_8.F90 @@ -341,8 +341,6 @@ END SUBROUTINE test_h5s_encode ! Programmer: M. Scot Breitenfeld ! Decemeber 11, 2010 ! -! Modifications: -! !------------------------------------------------------------------------- ! diff --git a/fortran/test/tH5P_F03.F90 b/fortran/test/tH5P_F03.F90 index ad505d47161..0875b81af13 100644 --- a/fortran/test/tH5P_F03.F90 +++ b/fortran/test/tH5P_F03.F90 @@ -85,8 +85,6 @@ MODULE TH5P_F03 ! * Programmer: M. Scot Breitenfeld ! * June 24, 2008 ! * -! * Modifications: -! * ! *------------------------------------------------------------------------- ! diff --git a/fortran/test/tH5T.F90 b/fortran/test/tH5T.F90 index 82a908e4240..1fee0361908 100644 --- a/fortran/test/tH5T.F90 +++ b/fortran/test/tH5T.F90 @@ -953,8 +953,6 @@ END SUBROUTINE enumtest ! * Fortran Programmer: M.S. Breitenfeld ! * September 9, 2008 ! * -! * Modifications: -! * ! *------------------------------------------------------------------------- ! diff --git a/fortran/test/tH5T_F03.F90 b/fortran/test/tH5T_F03.F90 index 9535d3a07a3..2256b50f210 100644 --- a/fortran/test/tH5T_F03.F90 +++ b/fortran/test/tH5T_F03.F90 @@ -2917,13 +2917,6 @@ END SUBROUTINE setup_buffer ! Programmer: M. Scot Breitenfeld ! Decemeber 7, 2010 ! -! Modifications: Moved this subroutine from the 1.8 test file and -! modified it to use F2003 features. -! This routine requires 4 byte reals, so we use F2003 features to -! ensure the requirement is satisfied in a portable way. -! The need for this arises when a user specifies the default real is 8 bytes. -! MSB 7/31/12 -! !------------------------------------------------------------------------- ! diff --git a/hl/fortran/src/H5IMcc.c b/hl/fortran/src/H5IMcc.c index 6e812f8f158..208482653eb 100644 --- a/hl/fortran/src/H5IMcc.c +++ b/hl/fortran/src/H5IMcc.c @@ -37,8 +37,6 @@ herr_t H5IM_get_palette(hid_t loc_id, const char *image_name, int pal_number, hi * The memory datatype is H5T_NATIVE_INT. It is supposed to be called from * the FORTRAN interface where the image buffer is defined as type "integer" * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -134,8 +132,6 @@ H5IMmake_image_8bitf(hid_t loc_id, const char *dset_name, hsize_t width, hsize_t * INTERLACE_PLANE [pixel components][height][width] * * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -244,8 +240,6 @@ H5IMmake_image_24bitf(hid_t loc_id, const char *dset_name, hsize_t width, hsize_ * The memory datatype is H5T_NATIVE_INT. It is supposed to be called from * the FORTRAN interface where the image buffer is defined as type "integer" * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -302,8 +296,6 @@ H5IMread_imagef(hid_t loc_id, const char *dset_name, int_f *buf) * * based on HDF5 Image and Palette Specification * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -395,8 +387,6 @@ H5IMmake_palettef(hid_t loc_id, const char *pal_name, const hsize_t *pal_dims, i * * based on HDF5 Image and Palette Specification * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -434,8 +424,6 @@ H5IMget_palettef(hid_t loc_id, const char *image_name, int pal_number, int_f *pa * Comments: * based on HDF5 Image and Palette Specification * - * Modifications: - * *------------------------------------------------------------------------- */ herr_t diff --git a/hl/fortran/src/H5IMfc.c b/hl/fortran/src/H5IMfc.c index 30b70dcba16..cead87144b1 100644 --- a/hl/fortran/src/H5IMfc.c +++ b/hl/fortran/src/H5IMfc.c @@ -29,9 +29,6 @@ * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -84,9 +81,6 @@ h5immake_image_8bit_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name, hsize_t_f * * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -134,9 +128,6 @@ h5imread_image_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name, int_f *buf) * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -197,9 +188,6 @@ h5immake_image_24bit_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name, size_t_f * * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -274,9 +262,6 @@ h5imget_image_info_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name, hsize_t_f *w * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -320,9 +305,6 @@ h5imis_image_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name) * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -370,9 +352,6 @@ h5immake_palette_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name, hsize_t_f *dim * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -430,9 +409,6 @@ h5imlink_palette_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name, size_t_f *ilen * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -490,9 +466,6 @@ h5imunlink_palette_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name, size_t_f *il * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -546,9 +519,6 @@ h5imget_npalettes_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name, hsize_t_f *np * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -605,9 +575,6 @@ h5imget_palette_info_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name, int_f *pal * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -658,9 +625,6 @@ h5imget_palette_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name, int_f *pal_numb * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ diff --git a/hl/fortran/src/H5LTfc.c b/hl/fortran/src/H5LTfc.c index 2819f1b3e93..c888eec0369 100644 --- a/hl/fortran/src/H5LTfc.c +++ b/hl/fortran/src/H5LTfc.c @@ -29,9 +29,6 @@ * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -93,9 +90,6 @@ h5ltmake_dataset_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name, int_f *rank, h * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -148,9 +142,6 @@ h5ltread_dataset_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name, hid_t_f *type_ * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -208,9 +199,6 @@ h5ltmake_dataset_string_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name, size_t_ * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -261,9 +249,6 @@ h5ltread_dataset_string_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name, char *b * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -361,9 +346,6 @@ h5ltset_attribute_c(hid_t_f *loc_id, size_t_f *namelen, _fcd dsetname, size_t_f * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -443,9 +425,6 @@ h5ltget_attribute_c(hid_t_f *loc_id, size_t_f *namelen, _fcd dsetname, size_t_f * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -513,9 +492,6 @@ h5ltget_attribute_string_c(hid_t_f *loc_id, size_t_f *namelen, _fcd dsetname, si * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -568,9 +544,6 @@ h5ltget_dataset_ndims_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name, int_f *ra * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -614,9 +587,6 @@ h5ltfind_dataset_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name) * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -687,9 +657,6 @@ h5ltget_dataset_info_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name, hsize_t_f * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -750,9 +717,6 @@ h5ltget_attribute_ndims_c(hid_t_f *loc_id, size_t_f *namelen, _fcd dsetname, siz * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -830,9 +794,6 @@ h5ltget_attribute_info_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name, size_t_f * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ diff --git a/hl/fortran/src/H5LTff.F90 b/hl/fortran/src/H5LTff.F90 index 8ced874e8c2..fe54627ed3c 100644 --- a/hl/fortran/src/H5LTff.F90 +++ b/hl/fortran/src/H5LTff.F90 @@ -615,8 +615,6 @@ END SUBROUTINE h5ltread_dataset_f_int7 ! ! Comments: ! - ! Modifications: - ! !------------------------------------------------------------------------- SUBROUTINE h5ltmake_dataset_int_f_1 (loc_id,& @@ -790,8 +788,6 @@ END SUBROUTINE h5ltmake_dataset_int_f_7 ! ! Comments: ! - ! Modifications: - ! !------------------------------------------------------------------------- SUBROUTINE h5ltread_dataset_int_f_1(loc_id,& @@ -953,8 +949,6 @@ END SUBROUTINE h5ltread_dataset_int_f_7 ! ! Comments: ! - ! Modifications: - ! !------------------------------------------------------------------------- SUBROUTINE h5ltmake_dataset_string_f(loc_id,& @@ -1003,8 +997,6 @@ END SUBROUTINE h5ltmake_dataset_string_f ! ! Comments: ! - ! Modifications: - ! !------------------------------------------------------------------------- SUBROUTINE h5ltread_dataset_string_f(loc_id,& diff --git a/hl/src/H5IM.c b/hl/src/H5IM.c index 43e5bedd62d..25381fd5cf4 100644 --- a/hl/src/H5IM.c +++ b/hl/src/H5IM.c @@ -27,8 +27,6 @@ * Comments: * based on HDF5 Image and Palette Specification * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -83,9 +81,6 @@ H5IMmake_image_8bit(hid_t loc_id, const char *dset_name, hsize_t width, hsize_t * INTERLACE_PIXEL [height][width][pixel components] * INTERLACE_PLANE [pixel components][height][width] * - * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -154,8 +149,6 @@ H5IMmake_image_24bit(hid_t loc_id, const char *dset_name, hsize_t width, hsize_t * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -197,8 +190,6 @@ find_palette(H5_ATTR_UNUSED hid_t loc_id, const char *name, H5_ATTR_UNUSED const * Comments: * The function uses H5Aiterate2 with the operator function find_palette * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -223,8 +214,6 @@ H5IM_find_palette(hid_t loc_id) * Comments: * based on HDF5 Image and Palette Specification * - * Modifications: - * *------------------------------------------------------------------------- */ herr_t @@ -390,8 +379,6 @@ H5IMget_image_info(hid_t loc_id, const char *dset_name, hsize_t *width, hsize_t * Comments: * based on HDF5 Image and Palette Specification * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -437,8 +424,6 @@ H5IMread_image(hid_t loc_id, const char *dset_name, unsigned char *buf) * Comments: * based on HDF5 Image and Palette Specification * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -493,8 +478,6 @@ H5IMmake_palette(hid_t loc_id, const char *pal_name, const hsize_t *pal_dims, co * palettes to be viewed with. The dataset will have an attribute * which contains an array of object reference pointers which refer to palettes in the file. * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -660,8 +643,6 @@ H5IMlink_palette(hid_t loc_id, const char *image_name, const char *pal_name) * Comments: * based on HDF5 Image and Palette Specification * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -756,8 +737,6 @@ H5IMunlink_palette(hid_t loc_id, const char *image_name, const char *pal_name) * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -842,8 +821,6 @@ H5IMget_npalettes(hid_t loc_id, const char *image_name, hssize_t *npals) * Comments: * based on HDF5 Image and Palette Specification * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -951,8 +928,6 @@ H5IMget_palette_info(hid_t loc_id, const char *image_name, int pal_number, hsize * Comments: * based on HDF5 Image and Palette Specification * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -1053,8 +1028,6 @@ H5IMget_palette(hid_t loc_id, const char *image_name, int pal_number, unsigned c * Comments: * based on HDF5 Image and Palette Specification * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -1154,8 +1127,6 @@ H5IMis_image(hid_t loc_id, const char *dset_name) * Comments: * based on HDF5 Image and Palette Specification * - * Modifications: - * *------------------------------------------------------------------------- */ diff --git a/hl/src/H5LT.c b/hl/src/H5LT.c index 6a4975f144c..4c61aa88f9f 100644 --- a/hl/src/H5LT.c +++ b/hl/src/H5LT.c @@ -559,9 +559,6 @@ H5LT_make_dataset_numerical(hid_t loc_id, const char *dset_name, int rank, const * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -585,9 +582,6 @@ H5LTmake_dataset(hid_t loc_id, const char *dset_name, int rank, const hsize_t *d * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -610,9 +604,6 @@ H5LTmake_dataset_char(hid_t loc_id, const char *dset_name, int rank, const hsize * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -635,9 +626,6 @@ H5LTmake_dataset_short(hid_t loc_id, const char *dset_name, int rank, const hsiz * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -660,9 +648,6 @@ H5LTmake_dataset_int(hid_t loc_id, const char *dset_name, int rank, const hsize_ * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -685,9 +670,6 @@ H5LTmake_dataset_long(hid_t loc_id, const char *dset_name, int rank, const hsize * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -710,9 +692,6 @@ H5LTmake_dataset_float(hid_t loc_id, const char *dset_name, int rank, const hsiz * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -736,9 +715,6 @@ H5LTmake_dataset_double(hid_t loc_id, const char *dset_name, int rank, const hsi * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -1293,8 +1269,6 @@ H5LTget_dataset_info(hid_t loc_id, const char *dset_name, hsize_t *dims, H5T_cla * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -1380,8 +1354,6 @@ H5_GCC_CLANG_DIAG_ON("cast-qual") * * Comments: If the attribute already exists, it is overwritten * - * Modifications: - * *------------------------------------------------------------------------- */ herr_t @@ -2034,8 +2006,6 @@ H5LTget_attribute_info(hid_t loc_id, const char *obj_name, const char *attr_name * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ hid_t @@ -2083,8 +2053,6 @@ H5LTtext_to_dtype(const char *text, H5LT_lang_t lang_type) * * Date: 29 September 2011 * - * Modifications: - * *------------------------------------------------------------------------- */ static char * @@ -2155,8 +2123,6 @@ realloc_and_append(hbool_t _no_user_buf, size_t *len, char *buf, const char *str * * Date: December 6, 2005 * - * Modifications: - * *------------------------------------------------------------------------- */ static char * @@ -2189,8 +2155,6 @@ indentation(size_t x, char *str, hbool_t no_u_buf, size_t *s_len) * * Programmer: Raymond Lu * - * Modifications: - * *-----------------------------------------------------------------------*/ static char * print_enum(hid_t type, char *str, size_t *str_len, hbool_t no_ubuf, size_t indt) @@ -2323,8 +2287,6 @@ print_enum(hid_t type, char *str, size_t *str_len, hbool_t no_ubuf, size_t indt) * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ herr_t @@ -2374,8 +2336,6 @@ H5LTdtype_to_text(hid_t dtype, char *str, H5LT_lang_t lang_type, size_t *len) * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ char * @@ -2984,8 +2944,6 @@ H5LT_dtype_to_text(hid_t dtype, char *dt_str, H5LT_lang_t lang, size_t *slen, hb * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -3031,8 +2989,6 @@ H5LTget_attribute_string(hid_t loc_id, const char *obj_name, const char *attr_na * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ herr_t @@ -3058,8 +3014,6 @@ H5LTget_attribute_char(hid_t loc_id, const char *obj_name, const char *attr_name * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ herr_t @@ -3085,8 +3039,6 @@ H5LTget_attribute_uchar(hid_t loc_id, const char *obj_name, const char *attr_nam * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ herr_t @@ -3112,8 +3064,6 @@ H5LTget_attribute_short(hid_t loc_id, const char *obj_name, const char *attr_nam * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ herr_t @@ -3139,8 +3089,6 @@ H5LTget_attribute_ushort(hid_t loc_id, const char *obj_name, const char *attr_na * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ herr_t @@ -3166,8 +3114,6 @@ H5LTget_attribute_int(hid_t loc_id, const char *obj_name, const char *attr_name, * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ herr_t @@ -3193,8 +3139,6 @@ H5LTget_attribute_uint(hid_t loc_id, const char *obj_name, const char *attr_name * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ herr_t @@ -3220,8 +3164,6 @@ H5LTget_attribute_long(hid_t loc_id, const char *obj_name, const char *attr_name * * Comments: This function was added to support INTEGER*8 Fortran types * - * Modifications: - * *------------------------------------------------------------------------- */ herr_t @@ -3247,8 +3189,6 @@ H5LTget_attribute_long_long(hid_t loc_id, const char *obj_name, const char *attr * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ herr_t @@ -3274,8 +3214,6 @@ H5LTget_attribute_ulong(hid_t loc_id, const char *obj_name, const char *attr_nam * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ herr_t @@ -3301,8 +3239,6 @@ H5LTget_attribute_ullong(hid_t loc_id, const char *obj_name, const char *attr_na * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -3329,8 +3265,6 @@ H5LTget_attribute_float(hid_t loc_id, const char *obj_name, const char *attr_nam * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -3357,8 +3291,6 @@ H5LTget_attribute_double(hid_t loc_id, const char *obj_name, const char *attr_na * * Comments: Private function * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -3390,8 +3322,6 @@ H5LTget_attribute(hid_t loc_id, const char *obj_name, const char *attr_name, hid * * Comments: Private function * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -3451,8 +3381,6 @@ H5LT_get_attribute_mem(hid_t loc_id, const char *obj_name, const char *attr_name * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -3500,8 +3428,6 @@ H5LT_get_attribute_disk(hid_t loc_id, const char *attr_name, void *attr_out) * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ herr_t diff --git a/hl/src/H5PT.c b/hl/src/H5PT.c index 8f0b323a544..9d5d6275b78 100644 --- a/hl/src/H5PT.c +++ b/hl/src/H5PT.c @@ -58,13 +58,6 @@ static herr_t H5PT_get_index(htbl_t *table_id, hsize_t *pt_index); * currently. Fill data is not necessary because the * table is initially of size 0. * - * Modifications: - * Mar 1, 2016 - * This function is added to replace H5PTcreate_fl and it differs - * from H5PTcreate_fl only because its last argument is plist_id - * instead of compression; this is to allow flexible compression. - * -BMR - * *------------------------------------------------------------------------- */ hid_t @@ -183,8 +176,6 @@ H5PTcreate(hid_t loc_id, const char *dset_name, hid_t dtype_id, hsize_t chunk_si * currently. Fill data is not necessary because the * table is initially of size 0. * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -297,13 +288,6 @@ H5PTcreate_fl(hid_t loc_id, const char *dset_name, hid_t dtype_id, hsize_t chunk * * Comments: * - * Modifications: - * - * John Mainzer -- 4/23/08 - * Added error check on malloc of table, initialized fields - * in table to keep lower level code from choking on bogus - * data in error cases. - * *------------------------------------------------------------------------- */ hid_t @@ -422,8 +406,6 @@ H5PT_free_id(void *id, void H5_ATTR_UNUSED **_ctx) * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -470,8 +452,6 @@ H5PT_close(htbl_t *table) * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ herr_t @@ -524,8 +504,6 @@ H5PTclose(hid_t table_id) * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ herr_t @@ -574,9 +552,6 @@ H5PTappend(hid_t table_id, size_t nrecords, const void *data) * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ herr_t @@ -618,9 +593,6 @@ H5PTget_next(hid_t table_id, size_t nrecords, void *data) * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ herr_t @@ -667,8 +639,6 @@ H5PTread_packets(hid_t table_id, hsize_t start, size_t nrecords, void *data) * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -720,8 +690,6 @@ H5PT_get_index(htbl_t *table, hsize_t *pt_index) * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ herr_t @@ -781,9 +749,6 @@ H5PTget_index(hid_t table_id, hsize_t *pt_index) * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ herr_t @@ -818,9 +783,6 @@ H5PTget_num_packets(hid_t table_id, hsize_t *nrecords) * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ herr_t @@ -848,9 +810,6 @@ H5PTis_valid(hid_t table_id) * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ herr_t @@ -898,9 +857,6 @@ H5PTis_varlen(hid_t table_id) * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -956,9 +912,6 @@ H5PTfree_vlen_buff(hid_t table_id, size_t _bufflen, void *buff) * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ hid_t @@ -991,9 +944,6 @@ H5PTget_dataset(hid_t table_id) * * Comments: * - * Modifications: - * - * *------------------------------------------------------------------------- */ hid_t diff --git a/hl/tools/gif2h5/writehdf.c b/hl/tools/gif2h5/writehdf.c index 17d864b04f0..0a90870d17c 100644 --- a/hl/tools/gif2h5/writehdf.c +++ b/hl/tools/gif2h5/writehdf.c @@ -25,9 +25,6 @@ * * Programmer: Unknown * - * Modifications: pvn - * Use the HDF5 IMAGE API to write the HDF5 image and palette - * * Date: January, 31, 2006 * *------------------------------------------------------------------------- diff --git a/src/H5AC.c b/src/H5AC.c index 49ff0d3478d..b93fca958dc 100644 --- a/src/H5AC.c +++ b/src/H5AC.c @@ -413,14 +413,6 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_co * Programmer: Robb Matzke * Jul 9 1997 * - * Changes: - * - * In the parallel case, added code to setup the MDC slist - * before the call to H5AC__flush_entries() and take it down - * afterwards. - * - * JRM -- 7/29/20 - * *------------------------------------------------------------------------- */ herr_t @@ -1191,13 +1183,12 @@ H5AC_prep_for_file_close(H5F_t *f) * * Function: H5AC_prep_for_file_flush * - * Purpose: This function should be called just prior to the first - * call to H5AC_flush() during a file flush. + * Purpose: Handle any setup required prior to metadata cache flush. * - * Its purpose is to handly any setup required prior to - * metadata cache flush. + * This function should be called just prior to the first + * call to H5AC_flush() during a file flush. * - * Initially, this means setting up the slist prior to the + * Initially, this means setting up the skip list prior to the * flush. We do this in a separate call because * H5F__flush_phase2() make repeated calls to H5AC_flush(). * Handling this detail in separate calls allows us to avoid @@ -1209,8 +1200,6 @@ H5AC_prep_for_file_close(H5F_t *f) * Programmer: John Mainzer * 5/5/20 * - * Changes: None. - * *------------------------------------------------------------------------- */ herr_t @@ -1242,9 +1231,6 @@ H5AC_prep_for_file_flush(H5F_t *f) * Purpose: This function should be called just after the last * call to H5AC_flush() during a file flush. * - * Its purpose is to perform any necessary cleanup after the - * metadata cache flush. - * * The objective of the call is to allow the metadata cache * to do any necessary necessary cleanup work after a cache * flush. @@ -1261,8 +1247,6 @@ H5AC_prep_for_file_flush(H5F_t *f) * Programmer: John Mainzer * 5/5/20 * - * Changes: None. - * *------------------------------------------------------------------------- */ herr_t @@ -1474,24 +1458,6 @@ H5AC_resize_entry(void *thing, size_t new_size) * amounts of dirty metadata creation in other areas -- which will * cause aux_ptr->dirty_bytes to be incremented. * - * The bottom line is that this code is probably OK, but the above - * points should be kept in mind. - * - * One final observation: This comment is occasioned by a bug caused - * by moving the call to H5AC__log_dirtied_entry() after the call to - * H5C_resize_entry(), and then only calling H5AC__log_dirtied_entry() - * if entry_ptr->is_dirty was false. - * - * Since H5C_resize_entry() marks the target entry dirty unless there - * is not change in size, this had the effect of not calling - * H5AC__log_dirtied_entry() when it should be, and corrupting - * the cleaned and dirtied lists used by rank 0 in the parallel - * version of the metadata cache. - * - * The point here is that you should be very careful when working with - * this code, and not modify it unless you fully understand it. - * - * JRM -- 2/28/22 */ if ((!entry_ptr->is_dirty) && (entry_ptr->size != new_size)) { diff --git a/src/H5ACmpio.c b/src/H5ACmpio.c index 197cc3cb28b..40e68fd131c 100644 --- a/src/H5ACmpio.c +++ b/src/H5ACmpio.c @@ -1860,8 +1860,6 @@ H5AC__rsp__dist_md_write__flush_to_min_clean(H5F_t *f) * Programmer: John Mainzer * April 28, 2010 * - * Changes: None. - * *------------------------------------------------------------------------- */ static herr_t diff --git a/src/H5C.c b/src/H5C.c index ae03d6913ab..c41b143978d 100644 --- a/src/H5C.c +++ b/src/H5C.c @@ -185,82 +185,6 @@ H5FL_SEQ_DEFINE_STATIC(H5C_cache_entry_ptr_t); * Programmer: John Mainzer * 6/2/04 * - * Modifications: - * - * JRM -- 7/20/04 - * Updated for the addition of the hash table. - * - * JRM -- 10/5/04 - * Added call to H5C_reset_cache_hit_rate_stats(). Also - * added initialization for cache_is_full flag and for - * resize_ctl. - * - * JRM -- 11/12/04 - * Added initialization for the new size_decreased field. - * - * JRM -- 11/17/04 - * Added/updated initialization for the automatic cache - * size control data structures. - * - * JRM -- 6/24/05 - * Added support for the new write_permitted field of - * the H5C_t structure. - * - * JRM -- 7/5/05 - * Added the new log_flush parameter and supporting code. - * - * JRM -- 9/21/05 - * Added the new aux_ptr parameter and supporting code. - * - * JRM -- 1/20/06 - * Added initialization of the new prefix field in H5C_t. - * - * JRM -- 3/16/06 - * Added initialization for the pinned entry related fields. - * - * JRM -- 5/31/06 - * Added initialization for the trace_file_ptr field. - * - * JRM -- 8/19/06 - * Added initialization for the flush_in_progress field. - * - * JRM -- 8/25/06 - * Added initialization for the slist_len_increase and - * slist_size_increase fields. These fields are used - * for sanity checking in the flush process, and are not - * compiled in unless H5C_DO_SANITY_CHECKS is TRUE. - * - * JRM -- 3/28/07 - * Added initialization for the new is_read_only and - * ro_ref_count fields. - * - * JRM -- 7/27/07 - * Added initialization for the new evictions_enabled - * field of H5C_t. - * - * JRM -- 12/31/07 - * Added initialization for the new flash cache size increase - * related fields of H5C_t. - * - * JRM -- 11/5/08 - * Added initialization for the new clean_index_size and - * dirty_index_size fields of H5C_t. - * - * - * Missing entries? - * - * - * JRM -- 4/20/20 - * Added initialization for the slist_enabled field. Recall - * that the slist is used to flush metadata cache entries - * in (roughly) increasing address order. While this is - * needed at flush and close, it is not used elsewhere. - * The slist_enabled field exists to allow us to construct - * the slist when needed, and leave it empty otherwise -- thus - * avoiding the overhead of maintaining it. - * - * JRM -- 4/29/20 - * *------------------------------------------------------------------------- */ H5C_t * @@ -691,10 +615,7 @@ H5C_prep_for_file_close(H5F_t *f) HDassert(cache_ptr); HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - /* For now at least, it is possible to receive the - * close warning more than once -- the following - * if statement handles this. - */ + /* It is possible to receive the close warning more than once */ if (cache_ptr->close_warning_received) HGOTO_DONE(SUCCEED) cache_ptr->close_warning_received = TRUE; @@ -759,27 +680,13 @@ H5C_prep_for_file_close(H5F_t *f) * This function fails if any object are protected since the * resulting file might not be consistent. * - * Note that *cache_ptr has been freed upon successful return. + * Note: *cache_ptr has been freed upon successful return. * * Return: Non-negative on success/Negative on failure * * Programmer: John Mainzer * 6/2/04 * - * Modifications: - * - * JRM -- 5/15/20 - * - * Updated the function to enable the slist prior to the - * call to H5C__flush_invalidate_cache(). - * - * Arguably, it shouldn't be necessary to re-enable the - * slist after the call to H5C__flush_invalidate_cache(), as - * the metadata cache should be discarded. However, in the - * test code, we make multiple calls to H5C_dest(). Thus - * we re-enable the slist on failure if it and the cache - * still exist. - * *------------------------------------------------------------------------- */ herr_t @@ -859,9 +766,12 @@ H5C_dest(H5F_t *f) done: if ((ret_value < 0) && (cache_ptr) && (cache_ptr->slist_ptr)) { - - /* need this for test code -- see change note for details */ - + /* Arguably, it shouldn't be necessary to re-enable the slist after + * the call to H5C__flush_invalidate_cache(), as the metadata cache + * should be discarded. However, in the test code, we make multiple + * calls to H5C_dest(). Thus we re-enable the slist on failure if it + * and the cache still exist. JRM -- 5/15/20 + */ if (H5C_set_slist_enabled(f->shared->cache, FALSE, FALSE) < 0) HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "disable slist on flush dest failure failed") @@ -881,14 +791,6 @@ H5C_dest(H5F_t *f) * Programmer: Vailin Choi * Dec 2013 * - * Modifications: - * - * JRM -- 5/5/20 - * - * Added code to enable the skip list prior to the call - * to H5C__flush_invalidate_cache(), and disable it - * afterwards. - * *------------------------------------------------------------------------- */ herr_t @@ -923,9 +825,9 @@ H5C_evict(H5F_t *f) /*------------------------------------------------------------------------- * Function: H5C_expunge_entry * - * Purpose: Use this function to tell the cache to expunge an entry - * from the cache without writing it to disk even if it is - * dirty. The entry may not be either pinned or protected. + * Purpose: Expunge an entry from the cache without writing it to disk + * even if it is dirty. The entry may not be either pinned or + * protected. * * Return: Non-negative on success/Negative on failure * @@ -1007,39 +909,11 @@ H5C_expunge_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, unsigned flag * function returns failure. * * Return: Non-negative on success/Negative on failure or if there was - * a request to flush all items and something was protected. + * a request to flush all items and an entry was protected. * * Programmer: John Mainzer * 6/2/04 * - * Changes: Modified function to test for slist chamges in - * pre_serialize and serialize callbacks, and re-start - * scans through the slist when such changes occur. - * - * This has been a potential problem for some time, - * and there has been code in this function to deal - * with elements of this issue. However the shift - * to the V3 cache in combination with the activities - * of some of the cache clients (in particular the - * free space manager and the fractal heap) have - * made this re-work necessary. - * - * JRM -- 12/13/14 - * - * Modified function to support rings. Basic idea is that - * every entry in the cache is assigned to a ring. Entries - * in the outermost ring are flushed first, followed by - * those in the next outermost ring, and so on until the - * innermost ring is flushed. See header comment on - * H5C_ring_t in H5Cprivate.h for a more detailed - * discussion. - * - * JRM -- 8/30/15 - * - * Modified function to call the free space manager - * settling functions. - * JRM -- 6/9/16 - * *------------------------------------------------------------------------- */ herr_t @@ -1226,8 +1100,6 @@ H5C_flush_to_min_clean(H5F_t *f) * exist on disk yet, but it must have an address and disk * space reserved. * - * Observe that this function cannot occasion a read. - * * Return: Non-negative on success/Negative on failure * * Programmer: John Mainzer @@ -1443,9 +1315,6 @@ H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, u * oversized at the end of an unprotect. As a result, it is * possible to have a vastly oversized cache with no protected * entries as long as all the protects precede the unprotects. - * - * Since items 1 and 2 are not changing any time soon, I see - * no point in worrying about the third. */ if (H5C__make_space_in_cache(f, space_needed, write_permitted) < 0) @@ -1531,12 +1400,6 @@ H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, u * Programmer: John Mainzer * 5/15/06 * - * JRM -- 11/5/08 - * Added call to H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY() to - * update the new clean_index_size and dirty_index_size - * fields of H5C_t in the case that the entry was clean - * prior to this call, and is pinned and not protected. - * *------------------------------------------------------------------------- */ herr_t @@ -2092,9 +1955,6 @@ H5C_resize_entry(void *thing, size_t new_size) * Programmer: John Mainzer * 4/26/06 * - * Changes: Added extreme sanity checks on entry and exit. - * JRM -- 4/26/14 - * *------------------------------------------------------------------------- */ herr_t @@ -2242,14 +2102,14 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign if (entry_ptr->type != type) HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, NULL, "incorrect cache entry type") - /* if this is a collective metadata read, the entry is not - marked as collective, and is clean, it is possible that - other processes will not have it in its cache and will - expect a bcast of the entry from process 0. So process 0 - will bcast the entry to all other ranks. Ranks that _do_ have - the entry in their cache still have to participate in the - bcast. */ #ifdef H5_HAVE_PARALLEL + /* If this is a collective metadata read, the entry is not marked as + * collective, and is clean, it is possible that other processes will + * not have it in its cache and will expect a bcast of the entry from + * process 0. So process 0 will bcast the entry to all other ranks. + * Ranks that _do_ have the entry in their cache still have to + * participate in the bcast. + */ if (coll_access) { if (!(entry_ptr->is_dirty) && !(entry_ptr->coll_access)) { MPI_Comm comm; /* File MPI Communicator */ @@ -2415,24 +2275,16 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign * oversized at the end of an unprotect. As a result, it is * possible to have a vastly oversized cache with no protected * entries as long as all the protects precede the unprotects. - * - * Since items 1, 2, and 3 are not changing any time soon, I - * see no point in worrying about the fourth. */ - if (H5C__make_space_in_cache(f, space_needed, write_permitted) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__make_space_in_cache failed") } /* end if */ - /* Insert the entry in the hash table. It can't be dirty yet, so - * we don't even check to see if it should go in the skip list. - * - * This is no longer true -- due to a bug fix, we may modify - * data on load to repair a file. + /* Insert the entry in the hash table. * * ******************************************* * - * Set the flush_last field + * Set the flush_me_last field * of the newly loaded entry before inserting it into the * index. Must do this, as the index tracked the number of * entries with the flush_last field set, but assumes that @@ -2531,7 +2383,6 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign * should also call H5C__make_space_in_cache() to bring us * into compliance. */ - if (cache_ptr->index_size >= cache_ptr->max_cache_size) empty_space = 0; else @@ -2689,7 +2540,7 @@ H5C_set_cache_auto_resize_config(H5C_t *cache_ptr, H5C_auto_size_ctl_t *config_p HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown incr_mode?!?!?") } /* end switch */ - /* logically, this is were configuration for flash cache size increases + /* logically, this is where configuration for flash cache size increases * should go. However, this configuration depends on max_cache_size, so * we wait until the end of the function, when this field is set. */ @@ -2842,8 +2693,7 @@ H5C_set_evictions_enabled(H5C_t *cache_ptr, hbool_t evictions_enabled) /* There is no fundamental reason why we should not permit * evictions to be disabled while automatic resize is enabled. - * However, I can't think of any good reason why one would - * want to, and allowing it would greatly complicate testing + * However, allowing it would greatly complicate testing * the feature. Hence the following: */ if ((evictions_enabled != TRUE) && ((cache_ptr->resize_ctl.incr_mode != H5C_incr__off) || @@ -2912,10 +2762,6 @@ H5C_set_evictions_enabled(H5C_t *cache_ptr, hbool_t evictions_enabled) * Programmer: John Mainzer * 5/1/20 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ herr_t @@ -3035,9 +2881,6 @@ H5C_set_slist_enabled(H5C_t *cache_ptr, hbool_t slist_enabled, hbool_t clear_sli * Programmer: John Mainzer * 3/22/06 * - * Changes: Added extreme sanity checks on entry and exit. - * JRM -- 4/26/14 - * *------------------------------------------------------------------------- */ herr_t @@ -3098,81 +2941,6 @@ H5C_unpin_entry(void *_entry_ptr) * Programmer: John Mainzer * 6/2/04 * - * Modifications: - * - * JRM -- 7/21/04 - * Updated for the addition of the hash table. - * - * JRM -- 10/28/04 - * Added code to set cache_full to TRUE whenever we try to - * make space in the cache. - * - * JRM -- 11/12/04 - * Added code to call to H5C_make_space_in_cache() after the - * call to H5C__auto_adjust_cache_size() if that function - * sets the size_decreased flag is TRUE. - * - * JRM -- 4/25/05 - * The size_decreased flag can also be set to TRUE in - * H5C_set_cache_auto_resize_config() if a new configuration - * forces an immediate reduction in cache size. Modified - * the code to deal with this eventuallity. - * - * JRM -- 6/24/05 - * Added support for the new write_permitted field of H5C_t. - * - * JRM -- 10/22/05 - * Hand optimizations. - * - * JRM -- 5/3/06 - * Added code to set the new dirtied field in - * H5C_cache_entry_t to FALSE prior to return. - * - * JRM -- 6/23/06 - * Modified code to allow dirty entries to be loaded from - * disk. This is necessary as a bug fix in the object - * header code requires us to modify a header as it is read. - * - * JRM -- 3/28/07 - * Added the flags parameter and supporting code. At least - * for now, this parameter is used to allow the entry to - * be protected read only, thus allowing multiple protects. - * - * Also added code to allow multiple read only protects - * of cache entries. - * - * JRM -- 7/27/07 - * Added code supporting the new evictions_enabled field - * in H5C_t. - * - * JRM -- 1/3/08 - * Added to do a flash cache size increase if appropriate - * when a large entry is loaded. - * - * JRM -- 11/13/08 - * Modified function to call H5C_make_space_in_cache() when - * the min_clean_size is violated, not just when there isn't - * enough space for and entry that has just been loaded. - * - * The purpose of this modification is to avoid "metadata - * blizzards" in the write only case. In such instances, - * the cache was allowed to fill with dirty metadata. When - * we finally needed to evict an entry to make space, we had - * to flush out a whole cache full of metadata -- which has - * interesting performance effects. We hope to avoid (or - * perhaps more accurately hide) this effect by maintaining - * the min_clean_size, which should force us to start flushing - * entries long before we actually have to evict something - * to make space. - * - * - * Missing entries? - * - * - * JRM -- 5/8/20 - * Updated for the possibility that the slist will be - * disabled. - * *------------------------------------------------------------------------- */ herr_t @@ -3427,14 +3195,10 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags) } } /* end if */ - /* this implementation of the "deleted" option is a bit inefficient, as + /* This implementation of the "deleted" option is a bit inefficient, as * we re-insert the entry to be deleted into the replacement policy * data structures, only to remove them again. Depending on how often * we do this, we may want to optimize a bit. - * - * On the other hand, this implementation is reasonably clean, and - * makes good use of existing code. - * JRM - 5/19/04 */ if (deleted) { @@ -3476,8 +3240,7 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags) } /* end if */ #ifdef H5_HAVE_PARALLEL else if (clear_entry) { - - /* verify that the target entry is in the cache. */ + /* Verify that the target entry is in the cache. */ H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL) if (test_entry_ptr == NULL) @@ -4704,8 +4467,6 @@ H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t *cache_ptr) * will be re-calculated, and will be enforced the next time * we have to make space in the cache. * - * Observe that this function cannot occasion a read. - * * Return: Non-negative on success/Negative on failure. * * Programmer: John Mainzer, 11/22/04 @@ -5138,8 +4899,6 @@ H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t n if (((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) && (cache_ptr->max_cache_size < (cache_ptr->resize_ctl).max_size)) { - /* we have work to do */ - switch ((cache_ptr->resize_ctl).flash_incr_mode) { case H5C_flash_incr__off: HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, @@ -5259,52 +5018,7 @@ H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t n * a request to flush all items and something was protected. * * Programmer: John Mainzer - * 3/24/065 - * - * Modifications: - * - * To support the fractal heap, the cache must now deal with - * entries being dirtied, resized, and/or renamed inside - * flush callbacks. Updated function to support this. - * - * -- JRM 8/27/06 - * - * Added code to detect and manage the case in which a - * flush callback changes the s-list out from under - * the function. The only way I can think of in which this - * can happen is if a flush function loads an entry - * into the cache that isn't there already. Quincey tells - * me that this will never happen, but I'm not sure I - * believe him. - * - * Note that this is a pretty bad scenario if it ever - * happens. The code I have added should allow us to - * handle the situation under all but the worst conditions, - * but one can argue that we should just scream and die if - * we ever detect the condition. - * - * -- JRM 10/13/07 - * - * Missing entries? - * - * - * Added support for the H5C__EVICT_ALLOW_LAST_PINS_FLAG. - * This flag is used to flush and evict all entries in - * the metadata cache that are not pinned -- typically, - * everything other than the superblock. - * - * ??? -- ??/??/?? - * - * Added sanity checks to verify that the skip list is - * enabled on entry. On the face of it, it would make - * sense to enable the slist on entry, and disable it - * on exit, as this function is not called repeatedly. - * However, since this function can be called from - * H5C_flush_cache(), this would create cases in the test - * code where we would have to check the flags to determine - * whether we must setup and take down the slist. - * - * JRM -- 5/5/20 + * 3/24/05 * *------------------------------------------------------------------------- */ @@ -5472,20 +5186,6 @@ H5C__flush_invalidate_cache(H5F_t *f, unsigned flags) * Programmer: John Mainzer * 9/1/15 * - * Changes: Added support for the H5C__EVICT_ALLOW_LAST_PINS_FLAG. - * This flag is used to flush and evict all entries in - * the metadata cache that are not pinned -- typically, - * everything other than the superblock. - * - * ??? -- ??/??/?? - * - * A recent optimization turns off the slist unless a flush - * is in progress. This should not effect this function, as - * it is only called during a flush. Added an assertion to - * verify this. - * - * JRM -- 5/6/20 - * *------------------------------------------------------------------------- */ static herr_t @@ -5546,13 +5246,11 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) * for some other cache entry), we can no longer promise to flush * the cache entries in increasing address order. * - * Instead, we just do the best we can -- making a pass through + * Instead, we make a pass through * the skip list, and then a pass through the "clean" entries, and * then repeating as needed. Thus it is quite possible that an * entry will be evicted from the cache only to be re-loaded later - * in the flush process (From what Quincey tells me, the pin - * mechanism makes this impossible, but even it it is true now, - * we shouldn't count on it in the future.) + * in the flush process. * * The bottom line is that entries will probably be flushed in close * to increasing address order, but there are no guarantees. @@ -5706,8 +5404,7 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) (entry_ptr->flush_dep_nchildren == 0) && (entry_ptr->ring == ring)) { if (entry_ptr->is_protected) { - - /* we have major problems -- but lets flush + /* We have major problems -- but lets flush * everything we can before we flag an error. */ protected_entries++; @@ -5792,7 +5489,7 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) * Writes to disk are possible here. */ - /* reset the counters so that we can detect insertions, loads, + /* Reset the counters so that we can detect insertions, loads, * and moves caused by the pre_serialize and serialize calls. */ cache_ptr->entries_loaded_counter = 0; @@ -6000,14 +5697,6 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) * Programmer: John Mainzer * 9/1/15 * - * Changes: A recent optimization turns off the slist unless a flush - * is in progress. This should not effect this function, as - * it is only called during a flush. Added an assertion to - * verify this. - * - * JRM -- 5/6/20 - * - * *------------------------------------------------------------------------- */ static herr_t @@ -6158,7 +5847,7 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) * dirty, resize, or take ownership of other entries * in the cache. * - * To deal with this, I have inserted code to detect any + * To deal with this, there is code to detect any * change in the skip list not directly under the control * of this function. If such modifications are detected, * we must re-start the scan of the skip list to avoid @@ -6310,69 +5999,6 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) * * Programmer: John Mainzer, 5/5/04 * - * Modifications: - * - * JRM -- 7/21/04 - * Updated function for the addition of the hash table. - * - * QAK -- 11/26/04 - * Updated function for the switch from TBBTs to skip lists. - * - * JRM -- 1/6/05 - * Updated function to reset the flush_marker field. - * Also replace references to H5F_FLUSH_INVALIDATE and - * H5F_FLUSH_CLEAR_ONLY with references to - * H5C__FLUSH_INVALIDATE_FLAG and H5C__FLUSH_CLEAR_ONLY_FLAG - * respectively. - * - * JRM -- 6/24/05 - * Added code to remove dirty entries from the slist after - * they have been flushed. Also added a sanity check that - * will scream if we attempt a write when writes are - * completely disabled. - * - * JRM -- 7/5/05 - * Added code to call the new log_flush callback whenever - * a dirty entry is written to disk. Note that the callback - * is not called if the H5C__FLUSH_CLEAR_ONLY_FLAG is set, - * as there is no write to file in this case. - * - * JRM -- 8/21/06 - * Added code maintaining the flush_in_progress and - * destroy_in_progress fields in H5C_cache_entry_t. - * - * Also added flush_flags parameter to the call to - * type_ptr->flush() so that the flush routine can report - * whether the entry has been resized or renamed. Added - * code using the flush_flags variable to detect the case - * in which the target entry is resized during flush, and - * update the caches data structures accordingly. - * - * JRM -- 3/29/07 - * Added sanity checks on the new is_read_only and - * ro_ref_count fields. - * - * QAK -- 2/07/08 - * Separated "destroy entry" concept from "remove entry from - * cache" concept, by adding the 'take_ownership' flag and - * the "destroy_entry" variable. - * - * JRM -- 11/5/08 - * Added call to H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN() to - * maintain the new clean_index_size and clean_index_size - * fields of H5C_t. - * - * - * Missing entries?? - * - * - * JRM -- 5/8/20 - * Updated sanity checks for the possibility that the slist - * is disabled. - * - * Also updated main comment to conform more closely with - * the current state of the code. - * *------------------------------------------------------------------------- */ herr_t @@ -6741,8 +6367,6 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) * A clear and a flush are the same from the point of * view of the replacement policy and the slist. * Hence no differentiation between them. - * - * JRM -- 7/7/07 */ H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, FAIL) @@ -6942,9 +6566,8 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) HDassert(take_ownership); - /* client is taking ownership of the entry. - * set bad magic here too so the cache will choke - * unless the entry is re-inserted properly + /* Client is taking ownership of the entry. Set bad magic here too + * so the cache will choke unless the entry is re-inserted properly */ entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC; @@ -8002,15 +7625,6 @@ H5C_entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr) * Programmer: Mike McGreevy * November 3, 2010 * - * Changes: Modified function to setup the slist before calling - * H%C_flush_cache(), and take it down afterwards. Note - * that the slist need not be empty after the call to - * H5C_flush_cache() since we are only flushing marked - * entries. Thus must set the clear_slist parameter - * of H5C_set_slist_enabled to TRUE. - * - * JRM -- 5/6/20 - * *------------------------------------------------------------------------- */ @@ -8412,8 +8026,6 @@ H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t *entry, const H5C_cache_en * The initial need for this routine is to settle all entries * in the cache prior to construction of the metadata cache * image so that the size of the cache image can be calculated. - * However, I gather that other uses for the routine are - * under consideration. * * Return: Non-negative on success/Negative on failure or if there was * a request to flush all items and something was protected. @@ -8582,16 +8194,16 @@ H5C__serialize_cache(H5F_t *f) * If the cache contains protected entries in the specified * ring, the function will fail, as protected entries cannot * be serialized. However all unprotected entries in the - * target ring should be serialized before the function - * returns failure. + * target ring should be serialized before the function + * returns failure. * * If flush dependencies appear in the target ring, the * function makes repeated passes through the index list - * serializing entries in flush dependency order. + * serializing entries in flush dependency order. * - * All entries outside the H5C_RING_SBE are marked for - * inclusion in the cache image. Entries in H5C_RING_SBE - * and below are marked for exclusion from the image. + * All entries outside the H5C_RING_SBE are marked for + * inclusion in the cache image. Entries in H5C_RING_SBE + * and below are marked for exclusion from the image. * * Return: Non-negative on success/Negative on failure or if there was * a request to flush all items and something was protected. @@ -8910,10 +8522,6 @@ H5C__serialize_single_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry * Programmer: Mohamad Chaarawi * 2/10/16 * - * Changes: Updated sanity checks for the possibility that the skip - * list is disabled. - * JRM 5/16/20 - * *------------------------------------------------------------------------- */ static herr_t @@ -8975,13 +8583,6 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr) * in the parallel case, it will not detect an * entry that dirties, resizes, and/or moves * other entries during its flush. - * - * From what Quincey tells me, this test is - * sufficient for now, as any flush routine that - * does the latter will also do the former. - * - * If that ceases to be the case, further - * tests will be necessary. */ if (cache_ptr->aux_ptr != NULL) diff --git a/src/H5Cdbg.c b/src/H5Cdbg.c index 4d74a0ac1bd..0dc975693c0 100644 --- a/src/H5Cdbg.c +++ b/src/H5Cdbg.c @@ -259,12 +259,6 @@ H5C_dump_cache_LRU(H5C_t *cache_ptr, const char *cache_name) * Programmer: John Mainzer * 11/15/14 * - * Changes: Updated function for the slist_enabled field in H5C_t. - * Recall that to minimize slist overhead, the slist is - * empty and not maintained if cache_ptr->slist_enabled is - * false. - * JRM -- 5/6/20 - * *------------------------------------------------------------------------- */ #ifndef NDEBUG diff --git a/src/H5Cepoch.c b/src/H5Cepoch.c index f6de3fff8f7..1b550809ae1 100644 --- a/src/H5Cepoch.c +++ b/src/H5Cepoch.c @@ -45,7 +45,7 @@ * * As a strategy for automatic cache size reduction, the cache may insert * marker entries in the LRU list at the end of each epoch. These markers - * are then used to identify entries that have not been accessed for n + * are then used to identify entries that have not been accessed for 'n' * epochs so that they can be evicted from the cache. * ****************************************************************************/ @@ -98,7 +98,6 @@ const H5AC_class_t H5AC_EPOCH_MARKER[1] = { * * None of these functions should ever be called, so there is no point in * documenting them separately. - * JRM - 11/16/04 * ***************************************************************************/ diff --git a/src/H5Cimage.c b/src/H5Cimage.c index 6fbd9369a50..70944beb6df 100644 --- a/src/H5Cimage.c +++ b/src/H5Cimage.c @@ -452,10 +452,6 @@ H5C__generate_cache_image(H5F_t *f, H5C_t *cache_ptr) * * Programmer: John Mainzer, 8/10/15 * - * Changes: Updated sanity checks for possibility that the slist - * is disabled. - * JRM -- 5/17/20 - * *------------------------------------------------------------------------- */ herr_t @@ -1579,7 +1575,6 @@ H5C_set_cache_image_config(const H5F_t *f, H5C_t *cache_ptr, H5C_cache_image_ctl /* The collective metadata write code is not currently compatible * with cache image. Until this is fixed, suppress cache image silently * if there is more than one process. - * JRM -- 11/8/16 */ if (cache_ptr->aux_ptr) { H5C_cache_image_ctl_t default_image_ctl = H5C__DEFAULT_CACHE_IMAGE_CTL; @@ -2296,22 +2291,7 @@ H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr, uint8_t **buf, unsigne /*------------------------------------------------------------------------- * Function: H5C__prep_for_file_close__compute_fd_heights * - * Purpose: Recent modifications to flush dependency support in the - * metadata cache have removed the notion of flush dependency - * height. This is a problem for the cache image feature, - * as flush dependency height is used to order entries in the - * cache image so that flush dependency parents appear before - * flush dependency children. (Recall that the flush dependency - * height of an entry in a flush dependency relationship is the - * length of the longest path from the entry to a leaf entry -- - * that is an entry with flush dependency parents, but no - * flush dependency children. With the introduction of the - * possibility of multiple flush dependency parents, we have - * a flush partial dependency latice, not a flush dependency - * tree. But since the partial latice is acyclic, the concept - * of flush dependency height still makes sense. - * - * The purpose of this function is to compute the flush + * Purpose: The purpose of this function is to compute the flush * dependency height of all entries that appear in the cache * image. * diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c index cfd078019bb..f154c8a1d26 100644 --- a/src/H5Cmpio.c +++ b/src/H5Cmpio.c @@ -402,22 +402,11 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha * shouldn't be used elsewhere. * * Return: Success: SUCCEED - * * Failure: FAIL * * Programmer: John Mainzer * 3/17/10 * - * Changes: With the slist optimization, the slist is not maintained - * unless a flush is in progress. Thus we can not longer use - * cache_ptr->slist_size to determine the total size of - * the entries we must insert in the candidate list. - * - * To address this, we now use cache_ptr->dirty_index_size - * instead. - * - * JRM -- 7/27/20 - * *------------------------------------------------------------------------- */ herr_t @@ -440,15 +429,14 @@ H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr) HDassert((!cache_ptr->slist_enabled) || (space_needed == cache_ptr->slist_size)); - /* Recall that while we shouldn't have any protected entries at this - * point, it is possible that some dirty entries may reside on the - * pinned list at this point. + /* We shouldn't have any protected entries at this point, but it is + * possible that some dirty entries may reside on the pinned list. */ HDassert(cache_ptr->dirty_index_size <= (cache_ptr->dLRU_list_size + cache_ptr->pel_size)); HDassert((!cache_ptr->slist_enabled) || (cache_ptr->slist_len <= (cache_ptr->dLRU_list_len + cache_ptr->pel_len))); - if (space_needed > 0) { /* we have work to do */ + if (space_needed > 0) { H5C_cache_entry_t *entry_ptr; unsigned nominated_entries_count = 0; @@ -545,12 +533,6 @@ H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr) * Programmer: John Mainzer * 3/17/10 * - * Changes: With the slist optimization, the slist is not maintained - * unless a flush is in progress. Updated sanity checks to - * reflect this. - * - * JRM -- 7/27/20 - * *------------------------------------------------------------------------- */ herr_t @@ -785,14 +767,8 @@ H5C_mark_entries_as_clean(H5F_t *f, unsigned ce_array_len, haddr_t *ce_array_ptr * resizes, or removals of other entries can occur as * a side effect of the flush. Hence, there is no need * for the checks for entry removal / status change - * that I ported to H5C_apply_candidate_list(). + * that are in H5C_apply_candidate_list(). * - * However, if (in addition to allowing such operations - * in the parallel case), we allow such operations outside - * of the pre_serialize / serialize routines, this may - * cease to be the case -- requiring a review of this - * point. - * JRM -- 4/7/15 */ entries_cleared = 0; entries_examined = 0; @@ -1086,8 +1062,6 @@ H5C__collective_write(H5F_t *f) * Programmer: John Mainzer * 2/10/17 * - * Changes: None. - * *------------------------------------------------------------------------- */ static herr_t @@ -1464,12 +1438,8 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, unsigned entries_to_flu cache_ptr->entries_removed_counter = 0; cache_ptr->last_entry_removed_ptr = NULL; - /* Add this entry to the list of entries to collectively write - * - * This comment is misleading -- the entry will be added to the - * collective write list only if said list exists. - * - * JRM -- 2/9/17 + /* Add this entry to the list of entries to collectively + * write, if the list exists. */ if (H5C__flush_single_entry(f, op_ptr, op_flags) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't flush entry") @@ -1491,12 +1461,6 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, unsigned entries_to_flu entry_ptr->is_protected || !entry_ptr->is_pinned)) { /* Something has happened to the pinned entry list -- start * over from the head. - * - * Recall that this code should be un-reachable at present, - * as all the operations by entries on flush that could cause - * it to be reachable are disallowed in the parallel case at - * present. Hence the following assertion which should be - * removed if the above changes. */ HDassert(!restart_scan); @@ -1505,7 +1469,13 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, unsigned entries_to_flu HDassert(!entry_ptr->is_protected); HDassert(entry_ptr->is_pinned); - HDassert(FALSE); /* see comment above */ + /* This code should be un-reachable at present, + * as all the operations by entries on flush that could cause + * it to be reachable are disallowed in the parallel case at + * present. Hence the following assertion which should be + * removed if the above changes. + */ + HDassert(FALSE); restart_scan = FALSE; diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index 23c2b782f6f..24c0263514d 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -83,89 +83,6 @@ * to the HGOTO_ERROR macro, which may not be appropriate in all cases. * If so, we will need versions of the insertion and deletion macros which * do not reference the sanity checking macros. - * JRM - 5/5/04 - * - * Changes: - * - * - Removed the line: - * - * ( ( (Size) == (entry_ptr)->size ) && ( (len) != 1 ) ) || - * - * from the H5C__DLL_PRE_REMOVE_SC macro. With the addition of the - * epoch markers used in the age out based cache size reduction algorithm, - * this invariant need not hold, as the epoch markers are of size 0. - * - * One could argue that I should have given the epoch markers a positive - * size, but this would break the index_size = LRU_list_size + pl_size - * + pel_size invariant. - * - * Alternatively, I could pass the current decr_mode in to the macro, - * and just skip the check whenever epoch markers may be in use. - * - * However, any size errors should be caught when the cache is flushed - * and destroyed. Until we are tracking such an error, this should be - * good enough. - * JRM - 12/9/04 - * - * - * - In the H5C__DLL_PRE_INSERT_SC macro, replaced the lines: - * - * ( ( (len) == 1 ) && - * ( ( (head_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || - * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) - * ) - * ) || - * - * with: - * - * ( ( (len) == 1 ) && - * ( ( (head_ptr) != (tail_ptr) ) || - * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) - * ) - * ) || - * - * Epoch markers have size 0, so we can now have a non-empty list with - * zero size. Hence the "( (Size) <= 0 )" clause cause false failures - * in the sanity check. Since "Size" is typically a size_t, it can't - * take on negative values, and thus the revised clause "( (Size) < 0 )" - * caused compiler warnings. - * JRM - 12/22/04 - * - * - In the H5C__DLL_SC macro, replaced the lines: - * - * ( ( (len) == 1 ) && - * ( ( (head_ptr) != (tail_ptr) ) || ( (cache_ptr)->size <= 0 ) || - * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) - * ) - * ) || - * - * with - * - * ( ( (len) == 1 ) && - * ( ( (head_ptr) != (tail_ptr) ) || - * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) - * ) - * ) || - * - * Epoch markers have size 0, so we can now have a non-empty list with - * zero size. Hence the "( (Size) <= 0 )" clause cause false failures - * in the sanity check. Since "Size" is typically a size_t, it can't - * take on negative values, and thus the revised clause "( (Size) < 0 )" - * caused compiler warnings. - * JRM - 1/10/05 - * - * - Added the H5C__DLL_UPDATE_FOR_SIZE_CHANGE macro and the associated - * sanity checking macros. These macro are used to update the size of - * a DLL when one of its entries changes size. - * - * JRM - 9/8/05 - * - * - Added macros supporting the index list -- a doubly liked list of - * all entries in the index. This list is necessary to reduce the - * cost of visiting all entries in the cache, which was previously - * done via a scan of the hash table. - * - * JRM - 10/15/15 * ****************************************************************************/ @@ -966,28 +883,6 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ * When modifying these macros, remember to modify the similar macros * in tst/cache.c * - * Changes: - * - * - Updated existing index macros and sanity check macros to maintain - * the clean_index_size and dirty_index_size fields of H5C_t. Also - * added macros to allow us to track entry cleans and dirties. - * - * JRM -- 11/5/08 - * - * - Updated existing index macros and sanity check macros to maintain - * the index_ring_len, index_ring_size, clean_index_ring_size, and - * dirty_index_ring_size fields of H5C_t. - * - * JRM -- 9/1/15 - * - * - Updated existing index macros and sanity checks macros to - * maintain an doubly linked list of all entries in the index. - * This is necessary to reduce the computational cost of visiting - * all entries in the index, which used to be done by scanning - * the hash table. - * - * JRM -- 10/15/15 - * ***********************************************************************/ /* H5C__HASH_TABLE_LEN is defined in H5Cpkg.h. It mut be a power of two. */ @@ -1518,9 +1413,6 @@ if ( ( (cache_ptr)->index_size != \ * * Skip list insertion and deletion macros: * - * These used to be functions, but I converted them to macros to avoid some - * function call overhead. - * **************************************************************************/ /*------------------------------------------------------------------------- @@ -1535,56 +1427,6 @@ if ( ( (cache_ptr)->index_size != \ * * Programmer: John Mainzer, 5/10/04 * - * Modifications: - * - * JRM -- 7/21/04 - * Updated function to set the in_tree flag when inserting - * an entry into the tree. Also modified the function to - * update the tree size and len fields instead of the similar - * index fields. - * - * All of this is part of the modifications to support the - * hash table. - * - * JRM -- 7/27/04 - * Converted the function H5C_insert_entry_in_tree() into - * the macro H5C__INSERT_ENTRY_IN_TREE in the hopes of - * wringing a little more speed out of the cache. - * - * Note that we don't bother to check if the entry is already - * in the tree -- if it is, H5SL_insert() will fail. - * - * QAK -- 11/27/04 - * Switched over to using skip list routines. - * - * JRM -- 6/27/06 - * Added fail_val parameter. - * - * JRM -- 8/25/06 - * Added the H5C_DO_SANITY_CHECKS version of the macro. - * - * This version maintains the slist_len_increase and - * slist_size_increase fields that are used in sanity - * checks in the flush routines. - * - * All this is needed as the fractal heap needs to be - * able to dirty, resize and/or move entries during the - * flush. - * - * JRM -- 12/13/14 - * Added code to set cache_ptr->slist_changed to TRUE - * when an entry is inserted in the slist. - * - * JRM -- 9/1/15 - * Added code to maintain the cache_ptr->slist_ring_len - * and cache_ptr->slist_ring_size arrays. - * - * JRM -- 4/29/20 - * Reworked macro to support the slist_enabled field - * of H5C_t. If slist_enabled == TRUE, the macro - * functions as before. Otherwise, the macro is a no-op, - * and the slist must be empty. - * *------------------------------------------------------------------------- */ @@ -1716,33 +1558,6 @@ if ( ( (cache_ptr)->index_size != \ * * Programmer: John Mainzer, 5/10/04 * - * Modifications: - * - * JRM -- 7/21/04 - * Updated function for the addition of the hash table. - * - * JRM - 7/27/04 - * Converted from the function H5C_remove_entry_from_tree() - * to the macro H5C__REMOVE_ENTRY_FROM_TREE in the hopes of - * wringing a little more performance out of the cache. - * - * QAK -- 11/27/04 - * Switched over to using skip list routines. - * - * JRM -- 3/28/07 - * Updated sanity checks for the new is_read_only and - * ro_ref_count fields in H5C_cache_entry_t. - * - * JRM -- 12/13/14 - * Added code to set cache_ptr->slist_changed to TRUE - * when an entry is removed from the slist. - * - * JRM -- 4/29/20 - * Reworked macro to support the slist_enabled field - * of H5C_t. If slist_enabled == TRUE, the macro - * functions as before. Otherwise, the macro is a no-op, - * and the slist must be empty. - * *------------------------------------------------------------------------- */ @@ -1853,33 +1668,6 @@ if ( ( (cache_ptr)->index_size != \ * * Programmer: John Mainzer, 9/07/05 * - * Modifications: - * - * JRM -- 8/27/06 - * Added the H5C_DO_SANITY_CHECKS version of the macro. - * - * This version maintains the slist_size_increase field - * that are used in sanity checks in the flush routines. - * - * All this is needed as the fractal heap needs to be - * able to dirty, resize and/or move entries during the - * flush. - * - * JRM -- 12/13/14 - * Note that we do not set cache_ptr->slist_changed to TRUE - * in this case, as the structure of the slist is not - * modified. - * - * JRM -- 9/1/15 - * Added code to maintain the cache_ptr->slist_ring_len - * and cache_ptr->slist_ring_size arrays. - * - * JRM -- 4/29/20 - * Reworked macro to support the slist_enabled field - * of H5C_t. If slist_enabled == TRUE, the macro - * functions as before. Otherwise, the macro is a no-op, - * and the slist must be empty. - * *------------------------------------------------------------------------- */ @@ -1976,9 +1764,6 @@ if ( ( (cache_ptr)->index_size != \ * * Replacement policy update macros: * - * These used to be functions, but I converted them to macros to avoid some - * function call overhead. - * **************************************************************************/ /*------------------------------------------------------------------------- @@ -2000,18 +1785,6 @@ if ( ( (cache_ptr)->index_size != \ * * Programmer: John Mainzer, 10/13/05 * - * Modifications: - * - * JRM -- 3/20/06 - * Modified macro to ignore pinned entries. Pinned entries - * do not appear in the data structures maintained by the - * replacement policy code, and thus this macro has nothing - * to do if called for such an entry. - * - * JRM -- 3/28/07 - * Added sanity checks using the new is_read_only and - * ro_ref_count fields of struct H5C_cache_entry_t. - * *------------------------------------------------------------------------- */ @@ -2130,30 +1903,6 @@ if ( ( (cache_ptr)->index_size != \ * * Programmer: John Mainzer, 5/10/04 * - * Modifications: - * - * JRM - 7/27/04 - * Converted the function H5C_update_rp_for_eviction() to the - * macro H5C__UPDATE_RP_FOR_EVICTION in an effort to squeeze - * a bit more performance out of the cache. - * - * At least for the first cut, I am leaving the comments and - * white space in the macro. If they cause difficulties with - * the pre-processor, I'll have to remove them. - * - * JRM - 7/28/04 - * Split macro into two version, one supporting the clean and - * dirty LRU lists, and the other not. Yet another attempt - * at optimization. - * - * JRM - 3/20/06 - * Pinned entries can't be evicted, so this entry should never - * be called on a pinned entry. Added assert to verify this. - * - * JRM -- 3/28/07 - * Added sanity checks for the new is_read_only and - * ro_ref_count fields of struct H5C_cache_entry_t. - * *------------------------------------------------------------------------- */ @@ -2241,32 +1990,6 @@ if ( ( (cache_ptr)->index_size != \ * * Programmer: John Mainzer, 5/6/04 * - * Modifications: - * - * JRM - 7/27/04 - * Converted the function H5C_update_rp_for_flush() to the - * macro H5C__UPDATE_RP_FOR_FLUSH in an effort to squeeze - * a bit more performance out of the cache. - * - * At least for the first cut, I am leaving the comments and - * white space in the macro. If they cause difficulties with - * pre-processor, I'll have to remove them. - * - * JRM - 7/28/04 - * Split macro into two versions, one supporting the clean and - * dirty LRU lists, and the other not. Yet another attempt - * at optimization. - * - * JRM - 3/20/06 - * While pinned entries can be flushed, they don't reside in - * the replacement policy data structures when unprotected. - * Thus I modified this macro to do nothing if the entry is - * pinned. - * - * JRM - 3/28/07 - * Added sanity checks based on the new is_read_only and - * ro_ref_count fields of struct H5C_cache_entry_t. - * *------------------------------------------------------------------------- */ @@ -2499,34 +2222,6 @@ if ( ( (cache_ptr)->index_size != \ * * Programmer: John Mainzer, 5/17/04 * - * Modifications: - * - * JRM - 7/27/04 - * Converted the function H5C_update_rp_for_insertion() to the - * macro H5C__UPDATE_RP_FOR_INSERTION in an effort to squeeze - * a bit more performance out of the cache. - * - * At least for the first cut, I am leaving the comments and - * white space in the macro. If they cause difficulties with - * pre-processor, I'll have to remove them. - * - * JRM - 7/28/04 - * Split macro into two version, one supporting the clean and - * dirty LRU lists, and the other not. Yet another attempt - * at optimization. - * - * JRM - 3/10/06 - * This macro should never be called on a pinned entry. - * Inserted an assert to verify this. - * - * JRM - 8/9/06 - * Not any more. We must now allow insertion of pinned - * entries. Updated macro to support this. - * - * JRM - 3/28/07 - * Added sanity checks using the new is_read_only and - * ro_ref_count fields of struct H5C_cache_entry_t. - * *------------------------------------------------------------------------- */ @@ -2637,31 +2332,6 @@ if ( ( (cache_ptr)->index_size != \ * * Programmer: John Mainzer, 5/17/04 * - * Modifications: - * - * JRM - 7/27/04 - * Converted the function H5C_update_rp_for_protect() to the - * macro H5C__UPDATE_RP_FOR_PROTECT in an effort to squeeze - * a bit more performance out of the cache. - * - * At least for the first cut, I am leaving the comments and - * white space in the macro. If they cause difficulties with - * pre-processor, I'll have to remove them. - * - * JRM - 7/28/04 - * Split macro into two version, one supporting the clean and - * dirty LRU lists, and the other not. Yet another attempt - * at optimization. - * - * JRM - 3/17/06 - * Modified macro to attempt to remove pinned entriese from - * the pinned entry list instead of from the data structures - * maintained by the replacement policy. - * - * JRM - 3/28/07 - * Added sanity checks based on the new is_read_only and - * ro_ref_count fields of struct H5C_cache_entry_t. - * *------------------------------------------------------------------------- */ @@ -2927,12 +2597,6 @@ if ( ( (cache_ptr)->index_size != \ * * Programmer: John Mainzer, 8/23/06 * - * Modifications: - * - * JRM -- 3/28/07 - * Added sanity checks based on the new is_read_only and - * ro_ref_count fields of struct H5C_cache_entry_t. - * *------------------------------------------------------------------------- */ @@ -3060,12 +2724,6 @@ if ( ( (cache_ptr)->index_size != \ * * Programmer: John Mainzer, 3/22/06 * - * Modifications: - * - * JRM -- 3/28/07 - * Added sanity checks based on the new is_read_only and - * ro_ref_count fields of struct H5C_cache_entry_t. - * *------------------------------------------------------------------------- */ @@ -3181,27 +2839,6 @@ if ( ( (cache_ptr)->index_size != \ * * Programmer: John Mainzer, 5/19/04 * - * Modifications: - * - * JRM - 7/27/04 - * Converted the function H5C_update_rp_for_unprotect() to - * the macro H5C__UPDATE_RP_FOR_UNPROTECT in an effort to - * squeeze a bit more performance out of the cache. - * - * At least for the first cut, I am leaving the comments and - * white space in the macro. If they cause difficulties with - * pre-processor, I'll have to remove them. - * - * JRM - 7/28/04 - * Split macro into two version, one supporting the clean and - * dirty LRU lists, and the other not. Yet another attempt - * at optimization. - * - * JRM - 3/17/06 - * Modified macro to put pinned entries on the pinned entry - * list instead of inserting them in the data structures - * maintained by the replacement policy. - * *------------------------------------------------------------------------- */ @@ -3608,24 +3245,9 @@ typedef struct H5C_tag_info_t { * While the cache was designed with multiple replacement policies in mind, * at present only a modified form of LRU is supported. * - * JRM - 4/26/04 - * - * Profiling has indicated that searches in the instance of H5TB_TREE are - * too expensive. To deal with this issue, I have augmented the cache - * with a hash table in which all entries will be stored. Given the - * advantages of flushing entries in increasing address order, the TBBT - * is retained, but only dirty entries are stored in it. At least for - * now, we will leave entries in the TBBT after they are flushed. - * - * Note that index_size and index_len now refer to the total size of - * and number of entries in the hash table. - * - * JRM - 7/19/04 - * - * The TBBT has since been replaced with a skip list. This change - * greatly predates this note. - * - * JRM - 9/26/05 + * The cache has a hash table in which all entries are stored. Given the + * advantages of flushing entries in increasing address order, a skip list + * is used to track dirty entries. * * magic: Unsigned 32 bit integer always set to H5C__H5C_T_MAGIC. * This field is used to validate pointers to instances of @@ -3719,13 +3341,8 @@ typedef struct H5C_tag_info_t { * The cache requires an index to facilitate searching for entries. The * following fields support that index. * - * Addendum: JRM -- 10/14/15 - * - * We sometimes need to visit all entries in the cache. In the past, this - * was done by scanning the hash table. However, this is expensive, and - * we have come to scan the hash table often enough that it has become a - * performance issue. To repair this, I have added code to maintain a - * list of all entries in the index -- call this list the index list. + * We sometimes need to visit all entries in the cache, they are stored in + * the index list. * * The index list is maintained by the same macros that maintain the * index, and must have the same length and size as the index proper. @@ -3759,12 +3376,10 @@ typedef struct H5C_tag_info_t { * dirty_index_size == index_size. * * WARNING: - * - * The value of the clean_index_size must not be mistaken - * for the current clean size of the cache. Rather, the - * clean size of the cache is the current value of - * clean_index_size plus the amount of empty space (if any) - * in the cache. + * The value of the clean_index_size must not be mistaken for + * the current clean size of the cache. Rather, the clean size + * of the cache is the current value of clean_index_size plus + * the amount of empty space (if any) in the cache. * * clean_index_ring_size: Array of size_t of length H5C_RING_NTYPES used to * maintain the sum of the sizes of all clean entries in the @@ -3786,7 +3401,7 @@ typedef struct H5C_tag_info_t { * H5C__HASH_TABLE_LEN. At present, this value is a power * of two, not the usual prime number. * - * I hope that the variable size of cache elements, the large + * Hopefully the variable size of cache elements, the large * hash table size, and the way in which HDF5 allocates space * will combine to avoid problems with periodicity. If so, we * can use a trivial hash function (a bit-and and a 3 bit left @@ -3827,11 +3442,10 @@ typedef struct H5C_tag_info_t { * This field is NULL if the index is empty. * * - * With the addition of the take ownership flag, it is possible that - * an entry may be removed from the cache as the result of the flush of - * a second entry. In general, this causes little trouble, but it is - * possible that the entry removed may be the next entry in the scan of - * a list. In this case, we must be able to detect the fact that the + * It is possible that an entry may be removed from the cache as the result + * of the flush of a second entry. In general, this causes little trouble, + * but it is possible that the entry removed may be the next entry in the + * scan of a list. In this case, we must be able to detect the fact that the * entry has been removed, so that the scan doesn't attempt to proceed with * an entry that is no longer in the cache. * @@ -3859,29 +3473,19 @@ typedef struct H5C_tag_info_t { * one. * * entry_watched_for_removal: Pointer to an instance of H5C_cache_entry_t - * which contains the 'next' entry for an iteration. Removing - * this entry must trigger a rescan of the iteration, so each - * entry removed from the cache is compared against this pointer - * and the pointer is reset to NULL if the watched entry is - * removed. - * (This functions similarly to a "dead man's switch") + * which contains the 'next' entry for an iteration. Removing + * this entry must trigger a rescan of the iteration, so each + * entry removed from the cache is compared against this pointer + * and the pointer is reset to NULL if the watched entry is + * removed. (This functions similarly to a "dead man's switch") * * * When we flush the cache, we need to write entries out in increasing * address order. An instance of a skip list is used to store dirty entries in - * sorted order. Whether it is cheaper to sort the dirty entries as needed, - * or to maintain the list is an open question. At a guess, it depends - * on how frequently the cache is flushed. We will see how it goes. - * - * For now at least, I will not remove dirty entries from the list as they - * are flushed. (this has been changed -- dirty entries are now removed from - * the skip list as they are flushed. JRM - 10/25/05) - * - * Update 4/21/20: + * sorted order. * - * Profiling indicates that the cost of maintaining the skip list is - * significant. As it is only used on flush and close, maintaining it - * only when needed is an obvious optimization. + * The cost of maintaining the skip list is significant. As it is only used + * on flush and close, it is maintained only when needed. * * To do this, we add a flag to control maintenanace of the skip list. * This flag is initially set to FALSE, which disables all operations @@ -3940,30 +3544,21 @@ typedef struct H5C_tag_info_t { * order, which results in significant savings. * * b) It facilitates checking for adjacent dirty entries when - * attempting to evict entries from the cache. While we - * don't use this at present, I hope that this will allow - * some optimizations when I get to it. + * attempting to evict entries from the cache. * * num_last_entries: The number of entries in the cache that can only be * flushed after all other entries in the cache have - * been flushed. At this time, this will only ever be - * one entry (the superblock), and the code has been - * protected with HDasserts to enforce this. This restraint - * can certainly be relaxed in the future if the need for - * multiple entries being flushed last arises, though - * explicit tests for that case should be added when said - * HDasserts are removed. - * - * Update: There are now two possible last entries - * (superblock and file driver info message). This - * number will probably increase as we add superblock - * messages. JRM -- 11/18/14 - * - * With the addition of the fractal heap, the cache must now deal with - * the case in which entries may be dirtied, moved, or have their sizes - * changed during a flush. To allow sanity checks in this situation, the - * following two fields have been added. They are only compiled in when - * H5C_DO_SANITY_CHECKS is TRUE. + * been flushed. + * + * Note: At this time, the this field will only be applied to + * two types of entries: the superblock and the file driver info + * message. The code utilizing these flags is protected with + * HDasserts to enforce this. + * + * The cache must deal with the case in which entries may be dirtied, moved, + * or have their sizes changed during a flush. To allow sanity checks in this + * situation, the following two fields have been added. They are only + * compiled in when H5C_DO_SANITY_CHECKS is TRUE. * * slist_len_increase: Number of entries that have been added to the * slist since the last time this field was set to zero. @@ -4020,8 +3615,8 @@ typedef struct H5C_tag_info_t { * * * For very frequently used entries, the protect/unprotect overhead can - * become burdensome. To avoid this overhead, I have modified the cache - * to allow entries to be "pinned". A pinned entry is similar to a + * become burdensome. To avoid this overhead, the cache + * allows entries to be "pinned". A pinned entry is similar to a * protected entry, in the sense that it cannot be evicted, and that * the entry can be modified at any time. * @@ -4072,29 +3667,15 @@ typedef struct H5C_tag_info_t { * The cache must have a replacement policy, and the fields supporting this * policy must be accessible from this structure. * - * While there has been interest in several replacement policies for - * this cache, the initial development schedule is tight. Thus I have - * elected to support only a modified LRU (least recently used) policy - * for the first cut. - * - * To further simplify matters, I have simply included the fields needed - * by the modified LRU in this structure. When and if we add support for - * other policies, it will probably be easiest to just add the necessary - * fields to this structure as well -- we only create one instance of this - * structure per file, so the overhead is not excessive. - * - * * Fields supporting the modified LRU policy: * - * See most any OS text for a discussion of the LRU replacement policy. - * * When operating in parallel mode, we must ensure that a read does not * cause a write. If it does, the process will hang, as the write will * be collective and the other processes will not know to participate. * - * To deal with this issue, I have modified the usual LRU policy by adding + * To deal with this issue, the usual LRU policy has been modified by adding * clean and dirty LRU lists to the usual LRU list. In general, these - * lists are only exist in parallel builds. + * lists only exist in parallel builds. * * The clean LRU list is simply the regular LRU list with all dirty cache * entries removed. @@ -4191,7 +3772,7 @@ typedef struct H5C_tag_info_t { * While the default cache size is adequate for most cases, we can run into * cases where the default is too small. Ideally, we will let the user * adjust the cache size as required. However, this is not possible in all - * cases. Thus I have added automatic cache size adjustment code. + * cases, so the cache has automatic cache size adjustment code. * * The configuration for the automatic cache size adjustment is stored in * the structure described below: @@ -4222,10 +3803,9 @@ typedef struct H5C_tag_info_t { * * resize_enabled: This is another convenience flag which is set whenever * a new set of values for resize_ctl are provided. Very - * simply, + * simply: * - * resize_enabled = size_increase_possible || - * size_decrease_possible; + * resize_enabled = size_increase_possible || size_decrease_possible; * * cache_full: Boolean flag used to keep track of whether the cache is * full, so we can refrain from increasing the size of a @@ -4248,11 +3828,6 @@ typedef struct H5C_tag_info_t { * and to prevent the infinite recursion that would otherwise * occur. * - * Note that this issue is not hypothetical -- this field - * was added 12/29/15 to fix a bug exposed in the testing - * of changes to the file driver info superblock extension - * management code needed to support rings. - * * msic_in_progress: As the metadata cache has become re-entrant, and as * the free space manager code has become more tightly * integrated with the metadata cache, it is possible that @@ -4265,11 +3840,6 @@ typedef struct H5C_tag_info_t { * and prevent the infinite regression that would otherwise * occur. * - * Note that this is issue is not hypothetical -- this field - * was added 2/16/17 to address this issue when it was - * exposed by modifications to test/fheap.c to cause it to - * use paged allocation. - * * resize_ctl: Instance of H5C_auto_size_ctl_t containing configuration * data for automatic cache resizing. * @@ -4362,8 +3932,8 @@ typedef struct H5C_tag_info_t { * call to H5C_protect(). * * image_loaded: Boolean flag indicating that the metadata cache has - * loaded the metadata cache image as directed by the - * MDC cache image superblock extension message. + * loaded the metadata cache image as directed by the + * MDC cache image superblock extension message. * * delete_image: Boolean flag indicating whether the metadata cache image * superblock message should be deleted and the cache image @@ -4476,11 +4046,11 @@ typedef struct H5C_tag_info_t { * free space manager metadata. * * mdfsm_settled: Boolean flag indicating whether the meta data free space - * manager is settled -- i.e. whether the correct space has - * been allocated for it in the file. + * manager is settled -- i.e. whether the correct space has + * been allocated for it in the file. * - * Note that the name of this field is deceptive. In the - * multi file case, the flag applies only to free space + * Note that the name of this field is deceptive. In the + * multi-file case, the flag applies only to free space * managers that are involved in allocating space for free * space managers. * @@ -4699,16 +4269,16 @@ typedef struct H5C_tag_info_t { * close, this field should only be set at that time. * * images_read: Integer field containing the number of cache images - * read from file. Note that reading an image is different - * from loading it -- reading the image means just that, - * while loading the image refers to decoding it and loading - * it into the metadata cache. + * read from file. Note that reading an image is different + * from loading it -- reading the image means just that, + * while loading the image refers to decoding it and loading + * it into the metadata cache. * - * In the serial case, image_read should always equal - * images_loaded. However, in the parallel case, the - * image should only be read by process 0. All other - * processes should receive the cache image via a broadcast - * from process 0. + * In the serial case, image_read should always equal + * images_loaded. However, in the parallel case, the + * image should only be read by process 0. All other + * processes should receive the cache image via a broadcast + * from process 0. * * images_loaded: Integer field containing the number of cache images * loaded since the last time statistics were reset. @@ -4719,21 +4289,19 @@ typedef struct H5C_tag_info_t { * should only change on those events. * * last_image_size: Size of the most recently loaded metadata cache image - * loaded into the cache, or zero if no image has been - * loaded. + * loaded into the cache, or zero if no image has been loaded. * - * At present, at most one cache image can be loaded into - * the metadata cache for any given file, and this image - * will be loaded either on the first protect, or on file - * close if no entry is protected before then. + * At present, at most one cache image can be loaded into + * the metadata cache for any given file, and this image + * will be loaded either on the first protect, or on file + * close if no entry is protected before then. * * * Fields for tracking prefetched entries. Note that flushes and evictions * of prefetched entries are tracked in the flushes and evictions arrays * discussed above. * - * prefetches: Number of prefetched entries that are loaded to the - * cache. + * prefetches: Number of prefetched entries that are loaded to the cache. * * dirty_prefetches: Number of dirty prefetched entries that are loaded * into the cache. @@ -4741,9 +4309,9 @@ typedef struct H5C_tag_info_t { * prefetch_hits: Number of prefetched entries that are actually used. * * - * As entries are now capable of moving, loading, dirtying, and deleting - * other entries in their pre_serialize and serialize callbacks, it has - * been necessary to insert code to restart scans of lists so as to avoid + * Entries may move, load, dirty, and delete + * other entries in their pre_serialize and serialize callbacks, there is + * code to restart scans of lists so as to avoid * improper behavior if the next entry in the list is the target of one on * these operations. * @@ -4757,9 +4325,9 @@ typedef struct H5C_tag_info_t { * entry in the scan. * * LRU_scan_restarts: Number of times a scan of the LRU list (that contains - * calls to H5C__flush_single_entry()) has been restarted to - * avoid potential issues with change of status of the next - * entry in the scan. + * calls to H5C__flush_single_entry()) has been restarted to + * avoid potential issues with change of status of the next + * entry in the scan. * * index_scan_restarts: Number of times a scan of the index has been * restarted to avoid potential issues with load, insertion @@ -4794,14 +4362,14 @@ typedef struct H5C_tag_info_t { * flushed in the current epoch. * * max_size: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the maximum size of any single entry + * are used to record the maximum size of any single entry * with type id equal to the array index that has resided in * the cache in the current epoch. * * max_pins: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the maximum number of times that any single - * entry with type id equal to the array index that has been - * marked as pinned in the cache in the current epoch. + * are used to record the maximum number of times that any single + * entry with type id equal to the array index that has been + * marked as pinned in the cache in the current epoch. * * * Fields supporting testing: @@ -4811,9 +4379,9 @@ typedef struct H5C_tag_info_t { * the processes mpi rank. * * get_entry_ptr_from_addr_counter: Counter used to track the number of - * times the H5C_get_entry_ptr_from_addr() function has been - * called successfully. This field is only defined when - * NDEBUG is not #defined. + * times the H5C_get_entry_ptr_from_addr() function has been + * called successfully. This field is only defined when + * NDEBUG is not #defined. * ****************************************************************************/ diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h index 6a661d298fa..949c3e162cc 100644 --- a/src/H5Cprivate.h +++ b/src/H5Cprivate.h @@ -42,8 +42,6 @@ /* This sanity checking constant was picked out of the air. Increase * or decrease it if appropriate. Its purposes is to detect corrupt * object sizes, so it probably doesn't matter if it is a bit big. - * - * JRM - 5/17/04 */ #define H5C_MAX_ENTRY_SIZE ((size_t)(32 * 1024 * 1024)) @@ -978,8 +976,6 @@ typedef int H5C_ring_t; * * The fields of this structure are discussed individually below: * - * JRM - 4/26/04 - * * magic: Unsigned 32 bit integer that must always be set to * H5C__H5C_CACHE_ENTRY_T_MAGIC when the entry is valid. * The field must be set to H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC @@ -1100,15 +1096,9 @@ typedef int H5C_ring_t; * be unpinned (and possibly unprotected) during the * flush. * - * JRM -- 3/16/06 - * * in_slist: Boolean flag indicating whether the entry is in the skip list - * As a general rule, entries are placed in the list when they - * are marked dirty. However they may remain in the list after - * being flushed. - * - * Update: Dirty entries are now removed from the skip list - * when they are flushed. + * As a general rule, entries are placed in the list when they are + * marked dirty. * * flush_marker: Boolean flag indicating that the entry is to be flushed * the next time H5C_flush_cache() is called with the @@ -1116,24 +1106,13 @@ typedef int H5C_ring_t; * the entry is flushed for whatever reason. * * flush_me_last: Boolean flag indicating that this entry should not be - * flushed from the cache until all other entries without - * the flush_me_last flag set have been flushed. - * - * Note: + * flushed from the cache until all other entries without the + * flush_me_last flag set have been flushed. * - * At this time, the flush_me_last - * flag will only be applied to one entry, the superblock, - * and the code utilizing these flags is protected with HDasserts - * to enforce this. This restraint can certainly be relaxed in - * the future if the need for multiple entries getting flushed - * last or collectively arises, though the code allowing for that - * will need to be expanded and tested appropriately if that - * functionality is desired. - * - * Update: There are now two possible last entries - * (superblock and file driver info message). This - * number will probably increase as we add superblock - * messages. JRM -- 11/18/14 + * Note: At this time, the flush_me_last flag will only be applied to + * two types of entries: the superblock and the file driver info + * message. The code utilizing these flags is protected with + * HDasserts to enforce this. * * clear_on_unprotect: Boolean flag used only in PHDF5. When H5C is used * to implement the metadata cache In the parallel case, only @@ -1228,8 +1207,6 @@ typedef int H5C_ring_t; * If there are multiple entries in any hash bin, they are stored in a doubly * linked list. * - * Addendum: JRM -- 10/14/15 - * * We have come to scan all entries in the cache frequently enough that * the cost of doing so by scanning the hash table has become unacceptable. * To reduce this cost, the index now also maintains a doubly linked list @@ -1691,8 +1668,6 @@ typedef struct H5C_cache_entry_t { * * The fields of this structure are discussed individually below: * - * JRM - 8/5/15 - * * magic: Unsigned 32 bit integer that must always be set to * H5C_IMAGE_ENTRY_T_MAGIC when the entry is valid. * The field must be set to H5C_IMAGE_ENTRY_T_BAD_MAGIC @@ -1855,7 +1830,7 @@ typedef struct H5C_image_entry_t { * H5C_auto_size_ctl_t passed to the cache must have a known * version number, or an error will be flagged. * - * report_fcn: Pointer to the function that is to be called to report + * rpt_fcn: Pointer to the function that is to be called to report * activities each time the auto cache resize code is executed. If the * field is NULL, no call is made. * @@ -1978,10 +1953,6 @@ typedef struct H5C_image_entry_t { * performance, however the above flash increment algorithm will not be * triggered. * - * Hopefully, the add space algorithm detailed above will be sufficient - * for the performance problems encountered to date. However, we should - * expect to revisit the issue. - * * flash_multiple: Double containing the multiple described above in the * H5C_flash_incr__add_space section of the discussion of the * flash_incr_mode section. This field is ignored unless flash_incr_mode @@ -2048,8 +2019,8 @@ typedef struct H5C_image_entry_t { * The field is a double containing the multiplier used to derive the * new cache size from the old if a cache size decrement is triggered. * The decrement must be in the range 0.0 (in which case the cache will - * try to contract to its minimum size) to 1.0 (in which case the - * cache will never shrink). + * try to contract to its minimum size) to 1.0 (in which case the + * cache will never shrink). * * apply_max_decrement: Boolean flag used to determine whether decrements * in cache size are to be limited by the max_decrement field. diff --git a/src/H5FD.c b/src/H5FD.c index fd82217db03..9de4ad924c3 100644 --- a/src/H5FD.c +++ b/src/H5FD.c @@ -1501,8 +1501,6 @@ H5FDwrite(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, size_t siz * * Programmer: JRM -- 6/10/20 * - * Changes: None. - * *------------------------------------------------------------------------- */ herr_t @@ -1581,8 +1579,6 @@ H5FDread_vector(H5FD_t *file, hid_t dxpl_id, uint32_t count, H5FD_mem_t types[], * * Programmer: JRM -- 6/10/20 * - * Changes: None. - * *------------------------------------------------------------------------- */ herr_t @@ -1678,8 +1674,6 @@ H5FDwrite_vector(H5FD_t *file, hid_t dxpl_id, uint32_t count, H5FD_mem_t types[] * * Programmer: NAF -- 5/19/21 * - * Changes: None. - * *------------------------------------------------------------------------- */ herr_t @@ -1779,8 +1773,6 @@ H5FDread_selection(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, uint32_t count, * * Programmer: NAF -- 5/14/21 * - * Changes: None. - * *------------------------------------------------------------------------- */ herr_t diff --git a/src/H5FDint.c b/src/H5FDint.c index e1cb5ff94da..c5b87133c68 100644 --- a/src/H5FDint.c +++ b/src/H5FDint.c @@ -346,8 +346,6 @@ H5FD_write(H5FD_t *file, H5FD_mem_t type, haddr_t addr, size_t size, const void * * Programmer: JRM -- 6/10/20 * - * Changes: None - * *------------------------------------------------------------------------- */ herr_t @@ -558,8 +556,6 @@ H5FD_read_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addrs * * Programmer: JRM -- 6/10/20 * - * Changes: None - * *------------------------------------------------------------------------- */ herr_t @@ -742,8 +738,6 @@ H5FD_write_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addr * * Programmer: NAF -- 5/13/21 * - * Changes: None - * *------------------------------------------------------------------------- */ static herr_t @@ -1066,8 +1060,6 @@ H5FD__read_selection_translate(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, uin * * Programmer: NAF -- 3/29/21 * - * Changes: None - * *------------------------------------------------------------------------- */ herr_t @@ -1232,8 +1224,6 @@ H5FD_read_selection(H5FD_t *file, H5FD_mem_t type, uint32_t count, H5S_t **mem_s * * Programmer: NAF -- 5/19/21 * - * Changes: None - * *------------------------------------------------------------------------- */ herr_t @@ -1387,8 +1377,6 @@ H5FD_read_selection_id(H5FD_t *file, H5FD_mem_t type, uint32_t count, hid_t mem_ * * Programmer: NAF -- 5/13/21 * - * Changes: None - * *------------------------------------------------------------------------- */ static herr_t @@ -1709,8 +1697,6 @@ H5FD__write_selection_translate(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, ui * * Programmer: NAF -- 3/29/21 * - * Changes: None - * *------------------------------------------------------------------------- */ herr_t @@ -1867,8 +1853,6 @@ H5FD_write_selection(H5FD_t *file, H5FD_mem_t type, uint32_t count, H5S_t **mem_ * * Programmer: NAF -- 5/19/21 * - * Changes: None - * *------------------------------------------------------------------------- */ herr_t diff --git a/src/H5FDmpi.c b/src/H5FDmpi.c index e71c739ef6e..b7b7489ae64 100644 --- a/src/H5FDmpi.c +++ b/src/H5FDmpi.c @@ -40,16 +40,6 @@ * Programmer: Quincey Koziol * Friday, January 30, 2004 * - * Changes: Reworked function to use the ctl callback so we can get - * rid of H5FD_class_mpi_t. Since there are no real limits - * on what the ctl callback can do, its file parameter can't - * be constant. Thus, I had to remove the const qualifier - * on this functions file parameter as well. Note also the - * circumlocution required to use the ctl callbacks output - * parameter to pass back the rank without introducing - * compiler warnings. - * JRM -- 8/13/21 - * *------------------------------------------------------------------------- */ int @@ -92,16 +82,6 @@ H5FD_mpi_get_rank(H5FD_t *file) * Programmer: Quincey Koziol * Friday, January 30, 2004 * - * Changes: Reworked function to use the ctl callback so we can get - * rid of H5FD_class_mpi_t. Since there are no real limits - * on what the ctl callback can do, its file parameter can't - * be constant. Thus, I had to remove the const qualifier - * on this functions file parameter as well. Note also the - * circumlocution required to use the ctl callbacks output - * parameter to pass back the rank without introducing - * compiler warnings. - * JRM -- 8/13/21 - * *------------------------------------------------------------------------- */ int @@ -145,16 +125,6 @@ H5FD_mpi_get_size(H5FD_t *file) * Programmer: Quincey Koziol * Friday, January 30, 2004 * - * Changes: Reworked function to use the ctl callback so we can get - * rid of H5FD_class_mpi_t. Since there are no real limits - * on what the ctl callback can do, its file parameter can't - * be constant. Thus, I had to remove the const qualifier - * on this functions file parameter as well. Note also the - * circumlocution required to use the ctl callbacks output - * parameter to pass back the rank without introducing - * compiler warnings. - * JRM -- 8/13/21 - * *------------------------------------------------------------------------- */ MPI_Comm diff --git a/src/H5FDsubfiling/H5FDioc_int.c b/src/H5FDsubfiling/H5FDioc_int.c index 42f088e0cf5..ce5a000fcaf 100644 --- a/src/H5FDsubfiling/H5FDioc_int.c +++ b/src/H5FDsubfiling/H5FDioc_int.c @@ -91,7 +91,6 @@ cast_to_void(const void *data) * Programmer: Richard Warren * 7/17/2020 * - * Changes: Initial Version/None. *------------------------------------------------------------------------- */ herr_t @@ -263,7 +262,6 @@ ioc__write_independent_async(int64_t context_id, int64_t offset, int64_t element * Programmer: Richard Warren * 7/17/2020 * - * Changes: Initial Version/None. *------------------------------------------------------------------------- */ herr_t diff --git a/src/H5FDsubfiling/H5FDioc_threads.c b/src/H5FDsubfiling/H5FDioc_threads.c index fd6fc01e088..abf816d4439 100644 --- a/src/H5FDsubfiling/H5FDioc_threads.c +++ b/src/H5FDsubfiling/H5FDioc_threads.c @@ -105,8 +105,6 @@ static void ioc_io_queue_add_entry(ioc_data_t *ioc_data, sf_work_request_t *wk_r * Programmer: Richard Warren * 7/17/2020 * - * Changes: Initial Version/None. - * *------------------------------------------------------------------------- */ int @@ -264,8 +262,6 @@ finalize_ioc_threads(void *_sf_context) * Programmer: Richard Warren * 7/17/2020 * - * Changes: Initial Version/None. - * *------------------------------------------------------------------------- */ static HG_THREAD_RETURN_TYPE @@ -339,7 +335,6 @@ ioc_thread_main(void *arg) * Programmer: Richard Warren * 7/17/2020 * - * Changes: Initial Version/None. *------------------------------------------------------------------------- */ static int @@ -493,8 +488,6 @@ translate_opcode(io_op_t op) * Programmer: Richard Warren * 7/17/2020 * - * Changes: Initial Version/None. - * *------------------------------------------------------------------------- */ static HG_THREAD_RETURN_TYPE @@ -591,8 +584,6 @@ handle_work_request(void *arg) * Programmer: Richard Warren * 7/17/2020 * - * Changes: Initial Version/None. - * *------------------------------------------------------------------------- */ void @@ -612,8 +603,6 @@ H5FD_ioc_begin_thread_exclusive(void) * Programmer: Richard Warren * 7/17/2020 * - * Changes: Initial Version/None. - * *------------------------------------------------------------------------- */ void @@ -679,8 +668,6 @@ from the thread pool threads... * Programmer: Richard Warren * 7/17/2020 * - * Changes: Initial Version/None. - * *------------------------------------------------------------------------- */ static int @@ -870,8 +857,6 @@ ioc_file_queue_write_indep(sf_work_request_t *msg, int ioc_idx, int source, MPI_ * Programmer: Richard Warren * 7/17/2020 * - * Changes: Initial Version/None. - * *------------------------------------------------------------------------- */ static int @@ -1217,8 +1202,6 @@ ioc_file_truncate(sf_work_request_t *msg) * Programmer: John Mainzer * 7/17/2020 * - * Changes: Initial Version/None. - * *------------------------------------------------------------------------- */ @@ -1283,8 +1266,6 @@ ioc_file_report_eof(sf_work_request_t *msg, MPI_Comm comm) * * Programmer: JRM -- 11/6/21 * - * Changes: None. - * *------------------------------------------------------------------------- */ static ioc_io_queue_entry_t * @@ -1338,8 +1319,6 @@ ioc_io_queue_alloc_entry(void) * * Programmer: JRM -- 11/7/21 * - * Changes: None. - * *------------------------------------------------------------------------- */ static void @@ -1461,8 +1440,6 @@ ioc_io_queue_add_entry(ioc_data_t *ioc_data, sf_work_request_t *wk_req_ptr) * * Programmer: JRM -- 11/7/21 * - * Changes: None. - * *------------------------------------------------------------------------- */ /* TODO: Keep an eye on statistics and optimize this algorithm if necessary. While it is O(N) @@ -1629,8 +1606,6 @@ ioc_io_queue_dispatch_eligible_entries(ioc_data_t *ioc_data, hbool_t try_lock) * * Programmer: JRM -- 11/7/21 * - * Changes: None. - * *------------------------------------------------------------------------- */ static void @@ -1715,8 +1690,6 @@ ioc_io_queue_complete_entry(ioc_data_t *ioc_data, ioc_io_queue_entry_t *entry_pt * * Programmer: JRM -- 11/6/21 * - * Changes: None. - * *------------------------------------------------------------------------- */ static void diff --git a/src/H5FDsubfiling/H5FDsubfile_int.c b/src/H5FDsubfiling/H5FDsubfile_int.c index d4aef351f32..4c583e8366a 100644 --- a/src/H5FDsubfiling/H5FDsubfile_int.c +++ b/src/H5FDsubfiling/H5FDsubfile_int.c @@ -65,8 +65,6 @@ * * Programmer: JRM -- 12/13/21 * - * Changes: None. - * *------------------------------------------------------------------------- */ herr_t @@ -280,8 +278,6 @@ H5FD__subfiling__truncate_sub_files(hid_t context_id, int64_t logical_file_eof, * * Programmer: JRM -- 1/18/22 * - * Changes: None. - * *------------------------------------------------------------------------- */ herr_t diff --git a/src/H5FDsubfiling/H5FDsubfiling.c b/src/H5FDsubfiling/H5FDsubfiling.c index afdf0739a6b..e0861908d10 100644 --- a/src/H5FDsubfiling/H5FDsubfiling.c +++ b/src/H5FDsubfiling/H5FDsubfiling.c @@ -429,8 +429,6 @@ H5FD__subfiling_term(void) * Programmer: John Mainzer * 9/10/17 * - * Changes: None. - * *------------------------------------------------------------------------- */ herr_t @@ -945,8 +943,6 @@ H5FD__subfiling_sb_decode(H5FD_t *_file, const char *name, const unsigned char * * Programmer: John Mainzer * 9/8/17 * - * Modifications: - * *------------------------------------------------------------------------- */ static void * @@ -1028,8 +1024,6 @@ H5FD__copy_plist(hid_t fapl_id, hid_t *id_out_ptr) * Programmer: John Mainzer * 9/8/17 * - * Modifications: - * *------------------------------------------------------------------------- */ static void * @@ -1072,8 +1066,6 @@ H5FD__subfiling_fapl_copy(const void *_old_fa) * Programmer: John Mainzer * 9/8/17 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -2055,8 +2047,6 @@ H5FD__subfiling_write(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_ * * Programmer: RAW -- ??/??/21 * - * Changes: None. - * * Notes: Thus function doesn't actually implement vector read. * Instead, it comverts the vector read call into a series * of scalar read calls. Fix this when time permits. @@ -2219,8 +2209,6 @@ H5FD__subfiling_read_vector(H5FD_t *_file, hid_t dxpl_id, uint32_t count, H5FD_m * * Programmer: RAW -- ??/??/21 * - * Changes: None. - * * Notes: Thus function doesn't actually implement vector write. * Instead, it comverts the vector write call into a series * of scalar read calls. Fix this when time permits. diff --git a/src/H5FDsubfiling/H5subfiling_common.c b/src/H5FDsubfiling/H5subfiling_common.c index e4dcf251236..58f36430c31 100644 --- a/src/H5FDsubfiling/H5subfiling_common.c +++ b/src/H5FDsubfiling/H5subfiling_common.c @@ -596,7 +596,6 @@ H5_open_subfiling_stub_file(const char *name, unsigned flags, MPI_Comm file_comm * Programmer: Richard Warren * 7/17/2020 * - * Changes: Initial Version/None. *------------------------------------------------------------------------- */ herr_t @@ -1709,7 +1708,6 @@ identify_ioc_ranks(sf_topology_t *app_topology, int rank_stride) * Programmer: Richard Warren * 7/17/2020 * - * Changes: Initial Version/None. *------------------------------------------------------------------------- */ static herr_t @@ -1898,7 +1896,6 @@ init_subfiling_context(subfiling_context_t *sf_context, const char *base_filenam * Programmer: Richard Warren * 7/17/2020 * - * Changes: Initial Version/None. *------------------------------------------------------------------------- */ static herr_t @@ -1961,8 +1958,6 @@ open_subfile_with_context(subfiling_context_t *sf_context, int file_acc_flags) * Programmer: Richard Warren * 7/17/2020 * - * Changes: Initial Version/None. - * *------------------------------------------------------------------------- */ static herr_t @@ -2038,8 +2033,6 @@ record_fid_to_subfile(uint64_t file_id, int64_t subfile_context_id, int *next_in * Programmer: Richard Warren * 7/17/2020 * - * Changes: Initial Version/None. - * *------------------------------------------------------------------------- */ static void @@ -2091,8 +2084,6 @@ clear_fid_map_entry(uint64_t file_id, int64_t sf_context_id) * Programmer: Richard Warren * 7/17/2020 * - * Changes: Initial Version/None. - * *------------------------------------------------------------------------- */ static herr_t @@ -2653,7 +2644,6 @@ H5_resolve_pathname(const char *filepath, MPI_Comm comm, char **resolved_filepat * Programmer: Richard Warren * 7/17/2020 * - * Changes: Initial Version/None. *------------------------------------------------------------------------- */ /*------------------------------------------------------------------------- @@ -2678,7 +2668,6 @@ H5_resolve_pathname(const char *filepath, MPI_Comm comm, char **resolved_filepat * Programmer: Richard Warren * 7/17/2020 * - * Changes: Initial Version/None. *------------------------------------------------------------------------- */ herr_t @@ -2980,8 +2969,6 @@ H5_subfiling_get_file_id_prop(H5P_genplist_t *plist_ptr, uint64_t *file_id) * Programmer: Richard Warren * 7/17/2020 * - * Changes: Initial Version/None. - * *------------------------------------------------------------------------- */ int64_t diff --git a/src/H5detect.c b/src/H5detect.c index a4913437c87..daf7708ac61 100644 --- a/src/H5detect.c +++ b/src/H5detect.c @@ -839,7 +839,7 @@ bit.\n"; fprintf(rawoutstream, " *\t\t\t"); } - fprintf(rawoutstream, " *\n * Modifications:\n *\n"); + fprintf(rawoutstream, " *\n"); fprintf(rawoutstream, " *\tDO NOT MAKE MODIFICATIONS TO THIS FILE!\n"); fprintf(rawoutstream, " *\tIt was generated by code in `H5detect.c'.\n"); diff --git a/src/H5make_libsettings.c b/src/H5make_libsettings.c index a42806222b9..88a6d7dea4b 100644 --- a/src/H5make_libsettings.c +++ b/src/H5make_libsettings.c @@ -230,7 +230,7 @@ information about the library build configuration\n"; HDfprintf(rawoutstream, " *\t\t\t"); } - HDfprintf(rawoutstream, " *\n * Modifications:\n *\n"); + HDfprintf(rawoutstream, " *\n"); HDfprintf(rawoutstream, " *\tDO NOT MAKE MODIFICATIONS TO THIS FILE!\n"); HDfprintf(rawoutstream, " *\tIt was generated by code in `H5make_libsettings.c'.\n"); diff --git a/test/big.c b/test/big.c index 8185a414d90..a008cefae3c 100644 --- a/test/big.c +++ b/test/big.c @@ -105,8 +105,6 @@ static hsize_t values_used[WRT_N]; * Programmer: Robb Matzke * Tuesday, November 24, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static hsize_t @@ -153,8 +151,6 @@ randll(hsize_t limit, int current_index) * Programmer: Robb Matzke * Wednesday, July 15, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -196,8 +192,6 @@ is_sparse(void) * Programmer: Raymond Lu * Wednesday, April 18, 2007 * - * Modifications: - * *------------------------------------------------------------------------- */ static fsizes_t @@ -266,8 +260,6 @@ supports_big(void) * Programmer: Robb Matzke * Thursday, August 6, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ /* Disable warning for "format not a string literal" here -QAK */ @@ -332,10 +324,6 @@ H5_GCC_CLANG_DIAG_ON("format-nonliteral") * Programmer: Robb Matzke * Wednesday, April 8, 1998 * - * Modifications: - * Robb Matzke, 15 Jul 1998 - * Addresses are written to the file DNAME instead of stdout. - * *------------------------------------------------------------------------- */ static int @@ -485,8 +473,6 @@ writer(char *filename, hid_t fapl, fsizes_t testsize, int wrt_n) * Programmer: Robb Matzke * Friday, April 10, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -591,8 +577,6 @@ reader(char *filename, hid_t fapl) * Programmer: Albert Chent * Mar 28, 2002 * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -757,15 +741,6 @@ test_family(hid_t fapl) * Programmer: Robb Matzke * Friday, April 10, 1998 * - * Modifications: - * Albert Cheng, 2002/03/28 - * Added command option -fsize. - * Albert Cheng, 2002/04/19 - * Added command option -c. - * - * Raymond Lu, 2007/05/25 - * Added similar tests for SEC2 and STDIO drivers. - * *------------------------------------------------------------------------- */ int diff --git a/test/bittests.c b/test/bittests.c index 3fbd096c5c7..285f4045fe0 100644 --- a/test/bittests.c +++ b/test/bittests.c @@ -37,8 +37,6 @@ * Programmer: Robb Matzke * Tuesday, June 16, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -154,8 +152,6 @@ test_find(void) * Programmer: Robb Matzke * Tuesday, June 16, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -280,8 +276,6 @@ test_copy(void) * Programmer: Raymond Lu * Monday, April 12, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -419,8 +413,6 @@ test_shift(void) * Programmer: Raymond Lu * Monday, April 12, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -510,8 +502,6 @@ test_increment(void) * Programmer: Raymond Lu * Monday, April 12, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -584,8 +574,6 @@ test_decrement(void) * Programmer: Raymond Lu * Monday, April 12, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -688,8 +676,6 @@ test_negate(void) * Programmer: Robb Matzke * Tuesday, June 16, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -808,8 +794,6 @@ test_set(void) * Programmer: Robb Matzke * Tuesday, June 16, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t diff --git a/test/cache.c b/test/cache.c index 666615bc2d2..3c107f20af9 100644 --- a/test/cache.c +++ b/test/cache.c @@ -1969,10 +1969,6 @@ smoke_check_8(int express_test, unsigned paged) * Programmer: John Mainzer * 8/1/07 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -2287,10 +2283,6 @@ smoke_check_9(int express_test, unsigned paged) * Programmer: John Mainzer * 8/1/07 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -2600,8 +2592,6 @@ smoke_check_10(int express_test, unsigned paged) * Programmer: John Mainzer * 6/24/04 * - * Modifications: - * *------------------------------------------------------------------------- */ static unsigned @@ -2812,12 +2802,6 @@ write_permitted_check(int * Programmer: John Mainzer * 8/10/06 * - * Modifications: - * - * Updated tests to accommodate the case in which the - * slist is disabled. - * JRM -- 5/14/20 - * *------------------------------------------------------------------------- */ @@ -3093,8 +3077,6 @@ check_insert_entry(unsigned paged) * Programmer: John Mainzer * 1/10/05 * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -3183,14 +3165,6 @@ check_flush_cache(unsigned paged) * Programmer: John Mainzer * 1/12/05 * - * Modifications: - * - * Added code to setup and take down the skip list before - * and after calls to H5C_flush_cache(). Do this via the - * H5C_FLUSH_CACHE macro. - * - * JRM -- 5/14/20 - * *------------------------------------------------------------------------- */ @@ -3252,11 +3226,6 @@ check_flush_cache__empty_cache(H5F_t *file_ptr) * Programmer: John Mainzer * 1/14/05 * - * Modifications: - * - * JRM -- 4/5/06 - * Added pinned entry tests. - * *------------------------------------------------------------------------- */ @@ -4452,14 +4421,6 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) * Programmer: John Mainzer * 1/13/05 * - * Modifications: - * - * Added code to setup and take down the skip list before - * and after calls to H5C_flush_cache(). Do this via the - * H5C_FLUSH_CACHE macro. - * - * JRM -- 5/14/20 - * *------------------------------------------------------------------------- */ @@ -4637,14 +4598,6 @@ check_flush_cache__multi_entry_test(H5F_t *file_ptr, int test_num, unsigned int * Programmer: John Mainzer * 4/5/06 * - * Modifications: - * - * Added code to setup and take down the skip list before - * and after calls to H5C_flush_cache(). Do this via the - * H5C_FLUSH_CACHE macro. - * - * JRM -- 5/16/20 - * *------------------------------------------------------------------------- */ @@ -4835,8 +4788,6 @@ check_flush_cache__pe_multi_entry_test(H5F_t *file_ptr, int test_num, unsigned i * Programmer: John Mainzer * 9/3/06 * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -8202,14 +8153,6 @@ check_flush_cache__flush_ops(H5F_t *file_ptr) * Programmer: John Mainzer * 9/3/06 * - * Modifications: - * - * Added code to setup and take down the skip list before - * and after calls to H5C_flush_cache(). Do this via the - * H5C_FLUSH_CACHE macro. - * - * JRM -- 5/16/20 - * *------------------------------------------------------------------------- */ @@ -8592,19 +8535,6 @@ check_flush_cache__flush_op_test(H5F_t *file_ptr, int test_num, unsigned int flu * Programmer: John Mainzer * 10/3/06 * - * Modifications: - * - * Updated test for minor changes in the behaviour - * of H5C__flush_single_entry(). - * - * JRM -- 2/16/15 - * - * Added code to setup and take down the skip list before - * and after calls to H5C_flush_cache(). Do this via the - * H5C_FLUSH_CACHE macro. - * - * JRM -- 5/16/20 - * *------------------------------------------------------------------------- */ @@ -9854,15 +9784,6 @@ check_flush_cache__flush_op_eviction_test(H5F_t *file_ptr) * Programmer: John Mainzer * 1/12/05 * - * Modifications: - * - * JRM -- 3/29/06 - * Added tests for pinned entries. - * - * JRM -- 5/17/06 - * Complete rewrite of pinned entry tests to accommodate - * the new H5C_mark_entry_dirty() call. - * *------------------------------------------------------------------------- */ @@ -11047,14 +10968,6 @@ check_flush_cache__single_entry(H5F_t *file_ptr) * Programmer: John Mainzer * 1/12/05 * - * Modifications: - * - * Added code to setup and take down the skip list before - * and after calls to H5C_flush_cache(). Do this via the - * H5C_FLUSH_CACHE macro. - * - * JRM -- 5/14/20 - * *------------------------------------------------------------------------- */ @@ -11186,21 +11099,6 @@ check_flush_cache__single_entry_test(H5F_t *file_ptr, int test_num, int entry_ty * Programmer: John Mainzer * 3/28/06 * - * Modifications: - * - * JRM -- 5/17/06 - * Added the pop_mark_dirty_prot and pop_mark_dirty_pinned - * flags and supporting code to allow us to test the - * H5C_mark_entry_dirty() call. Use the - * call to mark the entry dirty while the entry is protected - * if pop_mark_dirty_prot is TRUE, and to mark the entry - * dirty while it is pinned if pop_mark_dirty_pinned is TRUE. - * - * JRM -- 5/14/20 - * Added code to setup and take down the skip list before - * and after calls to H5C_flush_cache(). Do this via the - * H5C_FLUSH_CACHE macro. - * *------------------------------------------------------------------------- */ @@ -11361,8 +11259,6 @@ check_flush_cache__pinned_single_entry_test(H5F_t *file_ptr, int test_num, int e * Programmer: John Mainzer * 4/28/06 * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -11581,9 +11477,6 @@ check_get_entry_status(unsigned paged) * Programmer: John Mainzer * 7/5/06 * - * Modifications: - * - * *------------------------------------------------------------------------- */ @@ -11863,10 +11756,6 @@ check_expunge_entry(unsigned paged) * Programmer: John Mainzer * 4/1/07 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ static unsigned @@ -12353,10 +12242,6 @@ check_move_entry(unsigned paged) * Programmer: John Mainzer * 4/27/06 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -12509,8 +12394,6 @@ check_move_entry__run_test(H5F_t *file_ptr, unsigned test_num, struct move_entry * Programmer: John Mainzer * 4/28/06 * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -12612,12 +12495,6 @@ check_pin_protected_entry(unsigned paged) * Programmer: John Mainzer * 7/7/06 * - * Modifications: - * - * Updated function to allow for disabling of the slist. - * - * JRM -- 5/18/20 - * *------------------------------------------------------------------------- */ @@ -13480,12 +13357,6 @@ check_resize_entry(unsigned paged) * Programmer: John Mainzer * 8/2/07 * - * Modifications: - * - * Updated function to allow for disabling of the slist. - * - * JRM -- 5/18/20 - * *------------------------------------------------------------------------- */ @@ -14130,13 +14001,6 @@ check_evictions_enabled(unsigned paged) * Programmer: John Mainzer * 6/24/04 * - * Modifications: - * - * Added code to setup and take down the skip list before - * and after calls to H5C_flush_cache(). - * - * JRM -- 5/14/20 - * *------------------------------------------------------------------------- */ @@ -14229,9 +14093,6 @@ check_flush_protected_err(unsigned paged) * Programmer: John Mainzer * 4/7/06 * - * Modifications: - * - * *------------------------------------------------------------------------- */ static unsigned @@ -14316,8 +14177,6 @@ check_destroy_pinned_err(unsigned paged) * Programmer: John Mainzer * 6/24/04 * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -14412,8 +14271,6 @@ check_destroy_protected_err(unsigned paged) * Programmer: John Mainzer * 6/24/04 * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -14494,10 +14351,6 @@ check_duplicate_insert_err(unsigned paged) * Programmer: John Mainzer * 4/24/06 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -14583,10 +14436,6 @@ check_double_pin_err(unsigned paged) * Programmer: John Mainzer * 4/24/06 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -14681,10 +14530,6 @@ check_double_unpin_err(unsigned paged) * Programmer: John Mainzer * 4/24/06 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -15035,10 +14880,6 @@ check_mark_entry_dirty_errs(unsigned paged) * Programmer: John Mainzer * 7/6/06 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -15490,10 +15331,6 @@ check_unprotect_ro_dirty_err(unsigned paged) * Programmer: John Mainzer * 4/9/07 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -15706,10 +15543,6 @@ check_protect_retries(unsigned paged) * Programmer: John Mainzer * 8/3/07 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -19710,12 +19543,6 @@ check_auto_cache_resize(hbool_t cork_ageout, unsigned paged) * Programmer: John Mainzer * 12/16/04 * - * Modifications: - * - * Added code to include the flash cache size increment - * code in this test. - * JRM -- 1/10/08 - * *------------------------------------------------------------------------- */ @@ -22312,8 +22139,6 @@ check_auto_cache_resize_disable(unsigned paged) * Programmer: John Mainzer * 12/16/04 * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -22999,13 +22824,6 @@ check_auto_cache_resize_epoch_markers(unsigned paged) * Programmer: John Mainzer * 10/29/04 * - * Modifications: - * - * Added code to verify that errors in the flash cache size - * increment related fields are caught as well. - * - * JRM -- 1/17/08 - * *------------------------------------------------------------------------- */ @@ -25236,8 +25054,6 @@ check_auto_cache_resize_input_errs(unsigned paged) * Programmer: John Mainzer * 11/4/04 * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -25738,8 +25554,6 @@ check_auto_cache_resize_aux_fcns(unsigned paged) * Programmer: Mike McGreevy * 12/16/08 * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -28650,14 +28464,6 @@ check_flush_deps_err(unsigned paged) * Programmer: Quincey Koziol * 3/17/09 * - * Modifications: - * - * Added code to setup and take down the skip list before - * and after calls to H5C_flush_cache(). Do this via the - * H5C_FLUSH_CACHE macro. - * - * JRM -- 5/14/20 - * *------------------------------------------------------------------------- */ @@ -32207,10 +32013,6 @@ check_metadata_cork(hbool_t fill_via_insertion, unsigned paged) * Programmer: John Mainzer * 4/3/15 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ static unsigned @@ -32304,14 +32106,6 @@ check_entry_deletions_during_scans(unsigned paged) * Programmer: John Mainzer * 4/4/15 * - * Modifications: - * - * Added code to setup and take down the skip list before - * and after calls to H5C_flush_cache(). Do this via the - * H5C_FLUSH_CACHE macro. - * - * JRM -- 5/14/20 - * *------------------------------------------------------------------------- */ @@ -32544,14 +32338,6 @@ cedds__expunge_dirty_entry_in_flush_test(H5F_t *file_ptr) * Programmer: John Mainzer * 4/4/15 * - * Modifications: - * - * Added code to setup and take down the skip list before - * and after calls to H5C_flush_cache(). Do this via the - * H5C_FLUSH_CACHE macro. - * - * JRM -- 5/14/20 - * *------------------------------------------------------------------------- */ @@ -32906,14 +32692,6 @@ cedds__H5C_make_space_in_cache(H5F_t *file_ptr) * Programmer: John Mainzer * 4/4/15 * - * Modifications: - * - * Added code to setup and take down the skip list before - * and after calls to H5C_flush_cache(). Do this via the - * H5C_FLUSH_CACHE macro. - * - * JRM -- 5/14/20 - * *------------------------------------------------------------------------- */ @@ -33366,14 +33144,6 @@ cedds__H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t *file_ptr) * Programmer: John Mainzer * 4/9/15 * - * Modifications: - * - * Added code to setup and take down the skip list before - * and after calls to H5C_flush_cache(). Do this via the - * H5C_FLUSH_CACHE macro. - * - * JRM -- 5/14/20 - * *------------------------------------------------------------------------- */ @@ -33673,8 +33443,6 @@ cedds__H5C_flush_invalidate_cache__bucket_scan(H5F_t *file_ptr) * Programmer: John Mainzer * 4/12/15 * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -33755,18 +33523,6 @@ check_stats(unsigned paged) * Programmer: John Mainzer * 4/22/15 * - * Modifications: - * - * Modified slist stats checks to allow for the case that - * the slist is disabled. - * - * Also added code to setup and take down the skip list before - * and after calls to H5C_flush_cache(). Do this via the - * H5C_FLUSH_CACHE macro. - * - * JRM -- 5/14/20 - * - * *------------------------------------------------------------------------- */ #if H5C_COLLECT_CACHE_STATS diff --git a/test/cache_common.c b/test/cache_common.c index df08e25ea44..d4bab3b7fbd 100644 --- a/test/cache_common.c +++ b/test/cache_common.c @@ -2664,11 +2664,6 @@ verify_unprotected(void) * Programmer: John Mainzer * 7/6/06 * - * Changes: Added code to set entry_ptr->expunged to TRUE if - * H5C_expunge_entry() returns without error. - * - * JRM -- 8/21/14 - * *------------------------------------------------------------------------- */ @@ -2727,19 +2722,6 @@ expunge_entry(H5F_t *file_ptr, int32_t type, int32_t idx) * Programmer: John Mainzer * 6/23/04 * - * Changes: Added code to setup and take down the skip list before - * and after calls to H5C_flush_cache(). Do this via calls - * to the H5C_FLUSH_CACHE macro. - * - * This is necessary, as H5C_flush() is called repeatedly - * during file flush. If we setup and took down the - * skip list on H5C_flush_cache(), we would find ourselves - * doing this repeatedly -- which is contrary to the - * objective of the exercise (avoiding as many skip list - * operations as possible). - * - * JRM -- 5/14/20 - * *------------------------------------------------------------------------- */ @@ -3512,11 +3494,6 @@ unprotect_entry(H5F_t *file_ptr, int32_t type, int32_t idx, unsigned int flags) * Programmer: John Mainzer * 6/12/04 * - * Changes: Updated slist size == dirty index size checks to - * bypass the test if cache_ptr->slist_enabled is FALSE. - * - * JRM -- 5/8/20 - * *------------------------------------------------------------------------- */ void diff --git a/test/cache_common.h b/test/cache_common.h index 28d3a651047..bdeeded0dca 100644 --- a/test/cache_common.h +++ b/test/cache_common.h @@ -141,8 +141,6 @@ * Programmer: John Mainzer * 5/14/20 * - * Changes: None. - * ***********************************************************************/ #define H5C_FLUSH_CACHE(file, flags, fail_mssg) \ diff --git a/test/cmpd_dset.c b/test/cmpd_dset.c index 9f4072e5be4..73d0459275b 100644 --- a/test/cmpd_dset.c +++ b/test/cmpd_dset.c @@ -127,13 +127,6 @@ typedef struct { * Programmer: Robb Matzke * Friday, January 23, 1998 * - * Modifications: - * Robb Matzke, 1999-06-23 - * If the command line switch `--noopt' is present then the fast - * compound datatype conversion is turned off. - * - * Raymond Lu, 15 June 2007 - * Moved this part of code from MAIN to TEST_COMPOUND function. *------------------------------------------------------------------------- */ static unsigned @@ -820,7 +813,6 @@ test_compound(char *filename, hid_t fapl) * Programmer: Raymond Lu * Friday, 15 June 2007 * - * Modifications: *------------------------------------------------------------------------- */ static void @@ -862,7 +854,6 @@ initialize_stype1(unsigned char *buf, size_t num) * Programmer: Raymond Lu * Friday, 15 June 2007 * - * Modifications: *------------------------------------------------------------------------- */ static void @@ -908,7 +899,6 @@ initialize_stype2(unsigned char *buf, size_t num) * Programmer: Raymond Lu * Friday, 15 June 2007 * - * Modifications: *------------------------------------------------------------------------- */ static void @@ -938,7 +928,6 @@ initialize_stype3(unsigned char *buf, size_t num) * Programmer: Raymond Lu * Friday, 15 June 2007 * - * Modifications: *------------------------------------------------------------------------- */ static void @@ -990,7 +979,6 @@ initialize_stype4(unsigned char *buf, size_t num) * Programmer: Raymond Lu * Friday, 15 June 2007 * - * Modifications: *------------------------------------------------------------------------- */ static hid_t @@ -1045,7 +1033,6 @@ create_stype1(void) * Programmer: Raymond Lu * Friday, 15 June 2007 * - * Modifications: *------------------------------------------------------------------------- */ static hid_t @@ -1103,7 +1090,6 @@ create_stype2(void) * Programmer: Raymond Lu * Friday, 15 June 2007 * - * Modifications: *------------------------------------------------------------------------- */ static hid_t @@ -1145,7 +1131,6 @@ create_stype3(void) * Programmer: Raymond Lu * Friday, 15 June 2007 * - * Modifications: *------------------------------------------------------------------------- */ static hid_t @@ -1206,7 +1191,6 @@ create_stype4(void) * Programmer: Raymond Lu * Friday, 15 June 2007 * - * Modifications: *------------------------------------------------------------------------- */ static int @@ -1289,7 +1273,6 @@ compare_data(void *src_data, void *dst_data, hbool_t src_subset) * Programmer: Raymond Lu * Friday, 15 June 2007 * - * Modifications: *------------------------------------------------------------------------- */ static unsigned @@ -1496,7 +1479,6 @@ test_hdf5_src_subset(char *filename, hid_t fapl) * Programmer: Raymond Lu * Friday, 15 June 2007 * - * Modifications: *------------------------------------------------------------------------- */ static unsigned @@ -1705,7 +1687,6 @@ test_hdf5_dst_subset(char *filename, hid_t fapl) * Programmer: Neil Fortner * Thursday, 22 January 2009 * - * Modifications: *------------------------------------------------------------------------- */ static unsigned @@ -1981,7 +1962,6 @@ test_pack_ooo(void) * Programmer: Neil Fortner * Monday, 19 October 2009 * - * Modifications: *------------------------------------------------------------------------- */ static unsigned @@ -2222,7 +2202,6 @@ test_ooo_order(char *filename, hid_t fapl_id) * Programmer: Raymond Lu * Friday, 15 June 2007 * - * Modifications: *------------------------------------------------------------------------- */ int diff --git a/test/dangle.c b/test/dangle.c index 660b54fe0ea..36984b82bdc 100644 --- a/test/dangle.c +++ b/test/dangle.c @@ -39,8 +39,6 @@ const char *FILENAME[] = {"dangle", NULL}; * Programmer: Quincey Koziol * Tuesday, May 13, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -138,8 +136,6 @@ test_dangle_dataset(H5F_close_degree_t degree) * Programmer: Quincey Koziol * Tuesday, May 13, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -233,8 +229,6 @@ test_dangle_group(H5F_close_degree_t degree) * Programmer: Quincey Koziol * Tuesday, May 13, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -331,8 +325,6 @@ test_dangle_datatype1(H5F_close_degree_t degree) * Programmer: Quincey Koziol * Thursday, August 25, 2005 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -418,8 +410,6 @@ test_dangle_datatype2(H5F_close_degree_t degree) * Programmer: Quincey Koziol * Wednesday, June 18, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -652,8 +642,6 @@ test_dangle_force(void) * Programmer: Quincey Koziol * Tuesday, May 13, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ int diff --git a/test/dt_arith.c b/test/dt_arith.c index 6d6cb4c7214..aa0115e8a84 100644 --- a/test/dt_arith.c +++ b/test/dt_arith.c @@ -403,8 +403,6 @@ static int my_isinf(int endian, const unsigned char *val, size_t size, size_t mp * Programmer: Robb Matzke * Monday, July 6, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -430,8 +428,6 @@ fpe_handler(int H5_ATTR_UNUSED signo) * Programmer: Robb Matzke * Monday, November 16, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -472,8 +468,6 @@ reset_hdf5(void) * Programmer: Raymond Lu * April 19, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ static H5T_conv_ret_t @@ -518,8 +512,6 @@ except_func(H5T_conv_except_t except_type, hid_t H5_ATTR_UNUSED src_id, hid_t H5 * Programmer: Raymond Lu * Friday, Sept 2, 2005 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -582,8 +574,6 @@ test_hard_query(void) * Programmer: Raymond Lu * Sept 7, 2005 * - * Modifications: - * *------------------------------------------------------------------------- */ static H5T_conv_ret_t @@ -803,8 +793,6 @@ test_particular_fp_integer(void) * Programmer: Raymond Lu * Thursday, Jan 6, 2005 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -1254,8 +1242,6 @@ test_derived_flt(void) * Programmer: Raymond Lu * Saturday, Jan 29, 2005 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -1575,8 +1561,6 @@ test_derived_integer(void) * Programmer: Robb Matzke * Monday, November 16, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -2628,8 +2612,6 @@ test_conv_int_1(const char *name, hid_t src, hid_t dst) * Programmer: Robb Matzke * Friday, April 30, 1999 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -2680,8 +2662,6 @@ test_conv_int_2(void) * Programmer: Robb Matzke * Monday, July 6, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -2756,8 +2736,6 @@ my_isnan(dtype_t type, void *val) * Programmer: Raymond Lu * Monday, June 20, 2005 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -2796,13 +2774,6 @@ my_isinf(int endian, const unsigned char *val, size_t size, size_t mpos, size_t * Programmer: Robb Matzke * Tuesday, June 23, 1998 * - * Modifications: - * Albert Cheng, Apr 16, 2004 - * Check for underflow condition. If the src number is - * smaller than the dst MIN float number, consider it okay - * if the converted sw and hw dst are both less than or - * equal to the dst MIN float number. - * *------------------------------------------------------------------------- */ static int @@ -3404,8 +3375,6 @@ test_conv_flt_1(const char *name, int run_test, hid_t src, hid_t dst) * Programmer: Raymond Lu * Thursday, November 6, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -4606,8 +4575,6 @@ test_conv_int_fp(const char *name, int run_test, hid_t src, hid_t dst) * Programmer: Raymond Lu * Monday, Nov 17, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ static hbool_t @@ -4692,8 +4659,6 @@ overflows(unsigned char *origin_bits, hid_t src_id, size_t dst_num_bits) * Programmer: Robb Matzke * Tuesday, November 24, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -4854,8 +4819,6 @@ run_integer_tests(const char *name) * Programmer: Raymond Lu * Tuesday, March 22, 2005 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -4939,8 +4902,6 @@ run_fp_tests(const char *name) * Programmer: Raymond Lu * Monday, November 10, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -5047,8 +5008,6 @@ run_int_fp_conv(const char *name) * Programmer: Raymond Lu * Monday, November 10, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -5195,12 +5154,6 @@ run_fp_int_conv(const char *name) * Programmer: Robb Matzke * Tuesday, December 9, 1997 * - * Modifications: - * Raymond Lu - * Monday, April 4, 2005 - * These tests were split from dtypes.c because dtypes.c - * has grown too big. - * *------------------------------------------------------------------------- */ int diff --git a/test/dtypes.c b/test/dtypes.c index 2d0dc887dfa..37fc8c7c243 100644 --- a/test/dtypes.c +++ b/test/dtypes.c @@ -341,11 +341,6 @@ test_copy(void) * Programmer: Quincey Koziol * Saturday, August 30, 2003 * - * Modifications: - * Raymond Lu - * 8 December 2009 - * I added a field of VL string in the compound type to test - * H5Tdetect_class correctly detect it as string type. *------------------------------------------------------------------------- */ static int @@ -1347,11 +1342,6 @@ test_compound_6(void) * Programmer: Quincey Koziol * Tuesday, December 18, 2001 * - * Modifications: - * The size of compound datatype can be expanded now. - * Raymond Lu - * Wednesday, September 10, 2003 - * *------------------------------------------------------------------------- */ static int @@ -1496,11 +1486,6 @@ test_compound_7(void) * Programmer: Robb Matzke * Wednesday, January 7, 1998 * - * Modifications: - * Raymond Lu - * 27 June 2008 - * Added verification of compound type size for H5Tpack and - * test for array of nested compound type. *------------------------------------------------------------------------- */ static int @@ -2019,8 +2004,6 @@ test_compound_9(void) * Programmer: Raymond Lu * Tuesday, June 15, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -2233,8 +2216,6 @@ test_compound_10(void) * Programmer: Quincey Koziol * Saturday, August 7, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -2516,8 +2497,6 @@ test_compound_11(void) * Programmer: Raymond Lu * Wednesday, September 29, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -2737,9 +2716,9 @@ test_compound_13(void) * Function: test_compound_14 * * Purpose: Tests compound type conversions where a vlen string will - be misaligned in the conversion buffer and the file. The - two compound types are meant to trigger two different - conversion routines. + * be misaligned in the conversion buffer and the file. The + * two compound types are meant to trigger two different + * conversion routines. * * Return: Success: 0 * @@ -2748,8 +2727,6 @@ test_compound_13(void) * Programmer: Neil Fortner * Monday, August 25, 2008 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -3204,8 +3181,6 @@ test_compound_14(void) * Programmer: Neil Fortner * Friday, September 19, 2008 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -3379,8 +3354,6 @@ test_compound_15(void) * Programmer: Ray Lu * 14 July 2022 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -3507,8 +3480,6 @@ test_compound_15_attr(void) * Programmer: Neil Fortner * Friday, October 3, 2008 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -3621,8 +3592,6 @@ test_compound_16(void) * Programmer: Neil Fortner * Tuesday, January 13, 2009 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -3927,11 +3896,6 @@ test_compound_18(void) * Programmer: Raymond Lu * Thursday, April 4, 2002 * - * Modifications: - * Raymond Lu - * Wednesday, February 9, 2005 - * Added test for H5Tenum_valueof, H5Tenum_nameof, and - * H5Tget_member_value. *------------------------------------------------------------------------- */ static int @@ -4174,8 +4138,6 @@ test_query(void) * Programmer: Robb Matzke * Thursday, June 4, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -4331,8 +4293,6 @@ test_transient(hid_t fapl) * Programmer: Robb Matzke * Monday, June 1, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -5285,8 +5245,6 @@ test_conv_str_3(void) * Programmer: Robb Matzke * Tuesday, January 5, 1999 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -6014,13 +5972,6 @@ opaque_funcs(void) * Programmer: Raymond Lu * July 14, 2004 * - * Modifications: Raymond Lu - * July 13, 2009 - * Added the test for VL string types. - * - * Raymond Lu - * 17 February 2011 - * I added the test of reference count for decoded datatypes. *------------------------------------------------------------------------- */ static int @@ -6810,8 +6761,6 @@ conv_except(H5T_conv_except_t except_type, hid_t H5_ATTR_UNUSED src_id, hid_t H5 * make it portable to other architectures, but further * input and changes are welcome. -QAK * - * Modifications: - * *------------------------------------------------------------------------- */ static int diff --git a/test/enum.c b/test/enum.c index 780da74bf07..592236a141e 100644 --- a/test/enum.c +++ b/test/enum.c @@ -33,8 +33,6 @@ typedef enum { E1_RED, E1_GREEN, E1_BLUE, E1_WHITE, E1_BLACK } c_e1; * Programmer: Robb Matzke * Wednesday, December 23, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -315,8 +313,6 @@ test_conv(hid_t file) * Programmer: Robb Matzke * Monday, January 4, 1999 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -419,8 +415,6 @@ test_tr1(hid_t file) * Programmer: Robb Matzke * Tuesday, January 5, 1999 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -530,8 +524,6 @@ test_tr2(hid_t file) * Programmer: Elena Pourmal * Wednesday, June 7, 2002 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -622,8 +614,6 @@ test_value_dsnt_exist(void) * Programmer: Raymond Lu * Tuesday, April 4, 2006 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -757,8 +747,6 @@ test_funcs(void) * Programmer: Robb Matzke * Tuesday, December 22, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ int diff --git a/test/file_image.c b/test/file_image.c index 0398c15007b..e9f50c367a9 100644 --- a/test/file_image.c +++ b/test/file_image.c @@ -699,10 +699,6 @@ test_core(void) * Programmer: John Mainzer * Tuesday, November 15, 2011 * - * Modifications: - * Vailin Choi; July 2013 - * Add the creation of user block to the file as indicated by the parameter "user". - * ****************************************************************************** */ /* Disable warning for "format not a string literal" here -QAK */ @@ -1343,10 +1339,6 @@ test_get_file_image_error_rejection(void) return 1; } /* test_get_file_image_error_rejection() */ -/* - * Modifications: - * Add testing for file image with or without user block in the file. - */ int main(void) { diff --git a/test/fillval.c b/test/fillval.c index 008833d68ce..489aa764e17 100644 --- a/test/fillval.c +++ b/test/fillval.c @@ -62,8 +62,6 @@ typedef struct { * Programmer: Raymond Lu * Monday, Jan 26, 2001 * - * Modifications: - * *------------------------------------------------------------------------- */ static hid_t @@ -159,8 +157,6 @@ create_compound_vl_type(void) * Programmer: Robb Matzke * Thursday, October 1, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -404,10 +400,6 @@ test_getset_vl(hid_t fapl) * Programmer: Robb Matzke * Thursday, October 1, 1998 * - * Modifications: - * Many new cases have been added to this test since - * the fill value design has been modified. - * *------------------------------------------------------------------------- */ static int @@ -866,10 +858,6 @@ test_create(hid_t fapl, const char *base_name, H5D_layout_t layout) * Programmer: Robb Matzke * Thursday, October 1, 1998 * - * Modifications: - * This function is called by test_rdwr to write and read - * dataset for different cases. - * *------------------------------------------------------------------------- */ static int @@ -1207,10 +1195,6 @@ test_rdwr_cases(hid_t file, hid_t dcpl, const char *dname, void *_fillval, H5D_f * Programmer: Robb Matzke * Thursday, October 1, 1998 * - * Modifications: - * Many new cases have been added to this test since the - * fill value design is modified. - * *------------------------------------------------------------------------- */ static int @@ -2060,8 +2044,6 @@ test_extend_cases(hid_t file, hid_t _dcpl, const char *dset_name, const hsize_t * Programmer: Robb Matzke * Monday, October 5, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -2224,8 +2206,6 @@ test_extend(hid_t fapl, const char *base_name, H5D_layout_t layout) * Programmer: Raymond Lu * Feb 27, 2002 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -2379,11 +2359,6 @@ test_compatible(void) * Programmer: Joel Plutchak * April 15, 2013 * - * Modifications: - * This function is called by test_rdwr to write and read - * dataset for different cases of chunked datasets with - * unallocated chunks. - * *------------------------------------------------------------------------- */ diff --git a/test/filter_fail.c b/test/filter_fail.c index d413b079aad..172027c6155 100644 --- a/test/filter_fail.c +++ b/test/filter_fail.c @@ -97,11 +97,6 @@ filter_fail(unsigned int flags, size_t H5_ATTR_UNUSED cd_nelmts, const unsigned * Programmer: Raymond Lu * 25 August 2010 * - * Modifications: - * Raymond Lu - * 5 Oct 2010 - * Test when the chunk cache is enable and disabled to make - * sure the library behaves properly. *------------------------------------------------------------------------- */ static herr_t @@ -237,8 +232,6 @@ test_filter_write(char *file_name, hid_t my_fapl, hbool_t cache_enabled) * Programmer: Raymond Lu * 25 August 2010 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t diff --git a/test/gen_cross.c b/test/gen_cross.c index 98c6fd7cba3..c4680392a67 100644 --- a/test/gen_cross.c +++ b/test/gen_cross.c @@ -88,8 +88,6 @@ int create_nbit_dsets_float(hid_t fid, hid_t fsid, hid_t msid); * Programmer: Raymond Lu * Some time ago * - * Modifications: - * *------------------------------------------------------------------------- */ int @@ -194,8 +192,6 @@ create_normal_dset(hid_t fid, hid_t fsid, hid_t msid) * Programmer: Neil Fortner * 27 January 2011 * - * Modifications: - * *------------------------------------------------------------------------- */ int @@ -284,8 +280,6 @@ create_scale_offset_dsets_float(hid_t fid, hid_t fsid, hid_t msid) * Programmer: Raymond Lu * 21 January 2011 * - * Modifications: - * *------------------------------------------------------------------------- */ int @@ -374,8 +368,6 @@ create_scale_offset_dsets_double(hid_t fid, hid_t fsid, hid_t msid) * Programmer: Neil Fortner * 27 January 2011 * - * Modifications: - * *------------------------------------------------------------------------- */ int @@ -472,8 +464,6 @@ create_scale_offset_dsets_char(hid_t fid, hid_t fsid, hid_t msid) * Programmer: Neil Fortner * 27 January 2011 * - * Modifications: - * *------------------------------------------------------------------------- */ int @@ -570,8 +560,6 @@ create_scale_offset_dsets_short(hid_t fid, hid_t fsid, hid_t msid) * Programmer: Raymond Lu * 21 January 2011 * - * Modifications: - * *------------------------------------------------------------------------- */ int @@ -669,8 +657,6 @@ create_scale_offset_dsets_int(hid_t fid, hid_t fsid, hid_t msid) * Programmer: Neil Fortner * 27 January 2011 * - * Modifications: - * *------------------------------------------------------------------------- */ int @@ -767,8 +753,6 @@ create_scale_offset_dsets_long_long(hid_t fid, hid_t fsid, hid_t msid) * Programmer: Raymond Lu * 29 March 2011 * - * Modifications: - * *------------------------------------------------------------------------- */ int @@ -857,8 +841,6 @@ create_fletcher_dsets_float(hid_t fid, hid_t fsid, hid_t msid) * Programmer: Raymond Lu * 29 March 2011 * - * Modifications: - * *------------------------------------------------------------------------- */ int @@ -958,8 +940,6 @@ create_deflate_dsets_float(hid_t fid, hid_t fsid, hid_t msid) * Programmer: Raymond Lu * 29 March 2011 * - * Modifications: - * *------------------------------------------------------------------------- */ int @@ -1049,8 +1029,6 @@ create_szip_dsets_float(hid_t fid, hid_t fsid, hid_t msid) * Programmer: Raymond Lu * 29 March 2011 * - * Modifications: - * *------------------------------------------------------------------------- */ int @@ -1139,8 +1117,6 @@ create_shuffle_dsets_float(hid_t fid, hid_t fsid, hid_t msid) * Programmer: Raymond Lu * 29 March 2011 * - * Modifications: - * *------------------------------------------------------------------------- */ int diff --git a/test/gen_filters.c b/test/gen_filters.c index d82a3eb6250..c1715389f95 100644 --- a/test/gen_filters.c +++ b/test/gen_filters.c @@ -228,8 +228,6 @@ create_file_with_bogus_filter(void) * * Return: Success: * - * Modifications: - * *------------------------------------------------------------------------- */ int diff --git a/test/gen_new_array.c b/test/gen_new_array.c index e04b71a72e8..1db1c69d7da 100644 --- a/test/gen_new_array.c +++ b/test/gen_new_array.c @@ -50,8 +50,6 @@ * Programmer: Robb Matzke * Monday, October 26, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ int diff --git a/test/gen_new_mtime.c b/test/gen_new_mtime.c index 7632769db0b..87a104a4140 100644 --- a/test/gen_new_mtime.c +++ b/test/gen_new_mtime.c @@ -39,8 +39,6 @@ * Programmer: Quincey Koziol * Friday, January 3, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ int diff --git a/test/gen_noencoder.c b/test/gen_noencoder.c index fe9cea64d35..a0be0ef4d2e 100644 --- a/test/gen_noencoder.c +++ b/test/gen_noencoder.c @@ -24,8 +24,6 @@ * Programmer: James Laird and Nat Furrer * Thursday, July 1, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ int diff --git a/test/gen_old_array.c b/test/gen_old_array.c index ab0ea2051ab..2e6e9535606 100644 --- a/test/gen_old_array.c +++ b/test/gen_old_array.c @@ -49,8 +49,6 @@ * Programmer: Robb Matzke * Monday, October 26, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ int diff --git a/test/gen_old_layout.c b/test/gen_old_layout.c index b7a9b3d756d..a18e7fb9fce 100644 --- a/test/gen_old_layout.c +++ b/test/gen_old_layout.c @@ -45,8 +45,6 @@ * Programmer: Quincey Koziol * Friday, January 3, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ int diff --git a/test/gen_old_mtime.c b/test/gen_old_mtime.c index 9645a5e40b2..d67d018d387 100644 --- a/test/gen_old_mtime.c +++ b/test/gen_old_mtime.c @@ -39,8 +39,6 @@ * Programmer: Quincey Koziol * Friday, January 3, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ int diff --git a/test/gen_sizes_lheap.c b/test/gen_sizes_lheap.c index dc55d81a42d..c30b811fac1 100644 --- a/test/gen_sizes_lheap.c +++ b/test/gen_sizes_lheap.c @@ -36,8 +36,6 @@ * Programmer: Neil Fortner * Thursday, July 15, 2010 * - * Modifications: - * *------------------------------------------------------------------------- */ int diff --git a/test/h5test.c b/test/h5test.c index 1467e0b41e0..c667ffdfcb5 100644 --- a/test/h5test.c +++ b/test/h5test.c @@ -132,8 +132,6 @@ static char *h5_fixname_real(const char *base_name, hid_t fapl, const char *_su * Programmer: Robb Matzke * Wednesday, March 4, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -918,8 +916,6 @@ h5_get_libver_fapl(hid_t fapl) * Programmer: Robb Matzke * Friday, November 20, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ void @@ -938,8 +934,6 @@ h5_no_hwconv(void) * Programmer: Albert Cheng * 2002/04/22 * - * Modifications: - * *------------------------------------------------------------------------- */ void @@ -1010,12 +1004,6 @@ h5_show_hostname(void) * object. * Return: 0 if all is fine; otherwise non-zero. * Programmer: Albert Cheng, 2002/05/21. - * Modifications: - * Bill Wendling, 2002/05/31 - * Modified so that the HDF5_MPI_INFO environment variable can - * be a semicolon separated list of "key=value" pairings. Most - * of the code is to remove any whitespaces which might be - * surrounding the "key=value" pairs. */ int h5_set_info_object(void) @@ -1109,7 +1097,6 @@ h5_set_info_object(void) * Purpose: Display content of an MPI Info object * Return: void * Programmer: Albert Cheng 2002/05/21 - * Modifications: */ void h5_dump_info_object(MPI_Info info) @@ -1339,8 +1326,6 @@ print_func(const char *format, ...) * * Programmer: * - * Modifications: - * *------------------------------------------------------------------------- */ int @@ -1391,11 +1376,6 @@ h5_szip_can_encode(void) * Programmer: Leon Arber * 4/4/05 * - * Modifications: - * Use original getenv if MPI is not initialized. This happens - * one uses the PHDF5 library to build a serial nature code. - * Albert 2006/04/07 - * *------------------------------------------------------------------------- */ char * @@ -1566,8 +1546,6 @@ h5_verify_cached_stabs_cb(hid_t oid, const char H5_ATTR_UNUSED *name, const H5O_ * Programmer: Neil Fortner * Tuesday, April 12, 2011 * - * Modifications: - * *------------------------------------------------------------------------- */ herr_t diff --git a/test/hdfs.c b/test/hdfs.c index 06cd7cc2e95..bf8e84058ea 100644 --- a/test/hdfs.c +++ b/test/hdfs.c @@ -404,8 +404,6 @@ static H5FD_hdfs_fapl_t default_fa = { * Programmer: Jacob Smith * 2018-04-25 * - * Changes: None. - * *--------------------------------------------------------------------------- */ static int @@ -614,8 +612,6 @@ test_fapl_config_validation(void) * Programmer: Jacob Smith * 2018-04-25 * - * Changes: None. - * *------------------------------------------------------------------------- */ static int @@ -1383,10 +1379,6 @@ test_read(void) * Programmer: Jacob Smith * 2017-11-06 * - * Changes: - * + modify from S3VFD codebase to HDFS; Minor changes, mostly. - * + Jacob Smith 2018-06-08 - * *--------------------------------------------------------------------------- */ static int @@ -1529,10 +1521,6 @@ test_cmp(void) * Programmer: Jacob Smith * 2017-11-07 * - * Changes: - * + modify from S3VFD codebase to HDFS; Minor changes, mostly. - * + Jacob Smith 2018-06-08 - * *--------------------------------------------------------------------------- */ static int diff --git a/test/istore.c b/test/istore.c index 916cf670493..b688acda528 100644 --- a/test/istore.c +++ b/test/istore.c @@ -94,8 +94,6 @@ is_sparse(void) * Programmer: Robb Matzke * Friday, October 10, 1997 * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -137,13 +135,6 @@ print_array(uint8_t *array, size_t nx, size_t ny, size_t nz) * Programmer: Robb Matzke * Wednesday, October 15, 1997 * - * Modifications: - * Converted to use datasets instead of directly messing with - * the istore routines, etc. since the new raw data architecture - * performs hyperslab operations at a higher level than the - * istore routines did and the new istore routines can't handle - * I/O on more than one chunk at a time. QAK - 2003/04/16 - * *------------------------------------------------------------------------- */ static hid_t @@ -198,8 +189,6 @@ new_object(hid_t f, const char *name, int ndims, hsize_t dims[], hsize_t cdims[] * Programmer: Robb Matzke * Wednesday, October 15, 1997 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -246,8 +235,6 @@ test_create(hid_t f, const char *prefix) * Programmer: Robb Matzke * Wednesday, October 15, 1997 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -445,8 +432,6 @@ test_extend(hid_t f, const char *prefix, size_t nx, size_t ny, size_t nz) * Programmer: Robb Matzke * Wednesday, October 22, 1997 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t diff --git a/test/links.c b/test/links.c index 56912fe3c53..9e4d2e33f3a 100644 --- a/test/links.c +++ b/test/links.c @@ -7815,8 +7815,6 @@ external_link_self(hid_t fapl, hbool_t new_format) * Programmer: Quincey Koziol * Tuesday, July 26, 2005 * - * Modifications: - * *------------------------------------------------------------------------- */ static int diff --git a/test/links_env.c b/test/links_env.c index 0bd2add237d..27e29ea0ae2 100644 --- a/test/links_env.c +++ b/test/links_env.c @@ -48,8 +48,6 @@ static int external_link_env(hid_t fapl, hbool_t new_format); * Programmer: Vailin Choi * Feb. 20, 2008 * - * Modifications: - * *------------------------------------------------------------------------- */ static int diff --git a/test/mount.c b/test/mount.c index c5a04b5df06..6807f368a23 100644 --- a/test/mount.c +++ b/test/mount.c @@ -41,8 +41,6 @@ int bm[NX][NY], bm_out[NX][NY]; /* Data buffers */ * Programmer: Robb Matzke * Wednesday, October 7, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -116,8 +114,6 @@ setup(hid_t fapl) * Programmer: Robb Matzke * Wednesday, October 7, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -172,8 +168,6 @@ test_basic(hid_t fapl) * Programmer: Robb Matzke * Wednesday, October 7, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -461,8 +455,6 @@ test_samefile(hid_t fapl) * Programmer: Robb Matzke * Wednesday, October 7, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -561,8 +553,6 @@ test_hide(hid_t fapl) * Programmer: Robb Matzke * Tuesday, October 13, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -650,8 +640,6 @@ test_assoc(hid_t fapl) * Programmer: Robb Matzke * Wednesday, October 7, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -720,8 +708,6 @@ test_mntlnk(hid_t fapl) * Programmer: Robb Matzke * Wednesday, October 7, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -790,8 +776,6 @@ test_move(hid_t fapl) * Programmer: Robb Matzke * Wednesday, October 7, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -859,8 +843,6 @@ test_preopen(hid_t fapl) * Programmer: Robb Matzke * Wednesday, October 14, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -934,8 +916,6 @@ test_postopen(hid_t fapl) * Programmer: Robb Matzke * Tuesday, October 13, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -1088,8 +1068,6 @@ test_unlink(hid_t fapl) * Programmer: Robb Matzke * Tuesday, October 13, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -1151,8 +1129,6 @@ test_mvmpt(hid_t fapl) * Programmer: Robb Matzke * Wednesday, October 14, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -1273,8 +1249,6 @@ test_interlink(hid_t fapl) * Programmer: Robb Matzke * Wednesday, October 14, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -1352,8 +1326,6 @@ test_uniformity(hid_t fapl) * Programmer: Robb Matzke * Wednesday, October 14, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -1442,8 +1414,6 @@ test_close(hid_t fapl) * Programmer: Quincey Koziol * Wednesday, May 4, 2005 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -1677,8 +1647,6 @@ test_mount_after_close(hid_t fapl) * Programmer: Quincey Koziol * Monday, June 6, 2005 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -1946,8 +1914,6 @@ test_mount_after_unmount(hid_t fapl) * Programmer: Quincey Koziol * Thursday, June 30, 2005 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -2098,8 +2064,6 @@ test_missing_unmount(hid_t fapl) * Programmer: Quincey Koziol * Tuesday, July 5, 2005 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -2228,8 +2192,6 @@ test_hold_open_file(hid_t fapl) * Programmer: Quincey Koziol * Thursday, July 14, 2005 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -2384,8 +2346,6 @@ test_hold_open_group(hid_t fapl) * Programmer: Quincey Koziol * Tuesday, July 19, 2005 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -2528,8 +2488,6 @@ test_fcdegree_same(hid_t fapl) * Programmer: Quincey Koziol * Tuesday, July 19, 2005 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -2671,8 +2629,6 @@ test_fcdegree_semi(hid_t fapl) * Programmer: Quincey Koziol * Tuesday, July 19, 2005 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -2813,8 +2769,6 @@ test_fcdegree_strong(hid_t fapl) * Programmer: Quincey Koziol * Tuesday, July 19, 2005 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -3017,8 +2971,6 @@ test_acc_perm(hid_t fapl) * Programmer: Quincey Koziol * Monday, July 25, 2005 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -3240,8 +3192,6 @@ test_mult_mount(hid_t fapl) * Programmer: Quincey Koziol * Monday, July 25, 2005 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -3453,8 +3403,6 @@ test_nested_survive(hid_t fapl) * Programmer: Quincey Koziol * Monday, July 25, 2005 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -3624,8 +3572,6 @@ test_close_parent(hid_t fapl) * Programmer: Quincey Koziol * Monday, July 25, 2005 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -4016,8 +3962,6 @@ test_cut_graph(hid_t fapl) * Programmer: Quincey Koziol * Monday, July 25, 2005 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -4181,8 +4125,6 @@ test_symlink(hid_t fapl) * Programmer: Neil Fortner * Friday, August 1, 2008 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -4287,8 +4229,6 @@ test_sharedacc(hid_t fapl) * Programmer: Neil Fortner * Friday, August 6, 2008 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -4446,8 +4386,6 @@ test_sharedclose(hid_t fapl) * Programmer: Neil Fortner * Friday, November 14, 2008 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -4600,8 +4538,6 @@ test_multisharedclose(hid_t fapl) * Programmer: Robb Matzke * Wednesday, October 7, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ int diff --git a/test/mtime.c b/test/mtime.c index 8391def71a5..882189e51e4 100644 --- a/test/mtime.c +++ b/test/mtime.c @@ -39,13 +39,6 @@ const char *FILENAME[] = {"mtime", NULL}; * Programmer: Robb Matzke * Thursday, July 30, 1998 * - * Modifications: - * Added checks for old and new modification time messages - * in pre-created datafiles (generated with gen_old_mtime.c and - * gen_new_mtime.c). - * Quincey Koziol - * Friday, January 3, 2003 - * *------------------------------------------------------------------------- */ int diff --git a/test/ntypes.c b/test/ntypes.c index b825ecdb2d5..92b28960a69 100644 --- a/test/ntypes.c +++ b/test/ntypes.c @@ -63,8 +63,6 @@ const char *FILENAME[] = {"ntypes", NULL}; * Programmer: Raymond Lu * October 15, 2002 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -314,8 +312,6 @@ test_atomic_dtype(hid_t file) * Programmer: Raymond Lu * October 15, 2002 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -644,8 +640,6 @@ test_compound_dtype2(hid_t file) * Programmer: Raymond Lu * October 15, 2002 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -857,8 +851,6 @@ test_compound_dtype(hid_t file) * Programmer: Raymond Lu * October 15, 2002 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -1098,8 +1090,6 @@ test_compound_dtype3(hid_t file) * Programmer: Quincey Koziol * January 31, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -1325,8 +1315,6 @@ test_compound_opaque(hid_t file) * Programmer: Raymond Lu * October 15, 2002 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -1484,8 +1472,6 @@ test_enum_dtype(hid_t file) * Programmer: Raymond Lu * October 15, 2002 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -1672,8 +1658,6 @@ test_array_dtype(hid_t file) * Programmer: Raymond Lu * October 15, 2002 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -1824,8 +1808,6 @@ test_array_dtype2(hid_t file) * Programmer: Raymond Lu * October 15, 2002 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -2030,8 +2012,6 @@ test_vl_dtype(hid_t file) * Programmer: Raymond Lu * October 15, 2002 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -2170,8 +2150,6 @@ test_vlstr_dtype(hid_t file) * Programmer: Raymond Lu * October 15, 2002 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -2292,8 +2270,6 @@ test_str_dtype(hid_t file) * Programmer: Raymond Lu * October 15, 2002 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -2465,8 +2441,6 @@ test_refer_dtype(hid_t file) * Programmer: Raymond Lu * October 15, 2002 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -2679,8 +2653,6 @@ test_refer_dtype2(hid_t file) * Programmer: Raymond Lu * October 15, 2002 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -2778,11 +2750,6 @@ test_opaque_dtype(hid_t file) * Programmer: Raymond Lu * October 15, 2002 * - * Modifications: - * Raymond Lu - * 1 December 2009 - * I added the support for bitfield and changed the test to - * compare the data being read back. *------------------------------------------------------------------------- */ static herr_t @@ -2937,8 +2904,6 @@ test_bitfield_dtype(hid_t file) * Programmer: Pedro Vicente * September 3, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -3145,8 +3110,6 @@ test_ninteger(void) * Programmer: Raymond Lu * October 15, 2002 * - * Modifications: - * *------------------------------------------------------------------------- */ int diff --git a/test/page_buffer.c b/test/page_buffer.c index d588d30cfcd..a6f7f0a94e3 100644 --- a/test/page_buffer.c +++ b/test/page_buffer.c @@ -554,21 +554,8 @@ test_args(hid_t orig_fapl, const char *env_h5_drvr) * Programmer: unknown * ?? / ?? / ?? * - * Changes: Added base_page_cnt field as supporting code. This allows - * the test to adjust to the number of page buffer pages - * accessed during file open / create. - * - * The test for the value of base_page_cnt just after file - * open exists detect changes in library behavior. Assuming - * any such change is not indicative of other issues, these - * tests can be modified to reflect the change. - * - * JRM -- 2/23/17 - * *------------------------------------------------------------------------- */ - -/* Changes due to file space page size has a minimum size of 512 */ static unsigned test_raw_data_handling(hid_t orig_fapl, const char *env_h5_drvr) { @@ -832,18 +819,6 @@ test_raw_data_handling(hid_t orig_fapl, const char *env_h5_drvr) * Programmer: unknown * ?? / ?? / ?? * - * Changes: Added base_page_cnt field as supporting code. This allows - * the test to adjust to the number of page buffer pages - * accessed during file open / create. - * - * The test for the value of base_page_cnt just after file - * open exists detect changes in library behavior. Assuming - * any such change is not indicative of other issues, these - * tests can be modified to reflect the change. - * - * JRM -- 2/23/17 - * - * *------------------------------------------------------------------------- */ @@ -1086,19 +1061,6 @@ test_lru_processing(hid_t orig_fapl, const char *env_h5_drvr) * Programmer: unknown * ?? / ?? / ?? * - * Changes: Added the base_raw_cnt and base_meta_cnt fields and - * supporting code. This allows the test to adjust to the - * number of page buffer pages accessed during file open / - * create. - * - * The tests for the values of base_raw_cnt and base_meta_cnt - * just after file open exist detect changes in library - * behavior. Assuming any such change is not indicative of - * other issues, these tests can be modified to reflect the - * change. - * - * JRM -- 2/23/17 - * *------------------------------------------------------------------------- */ @@ -1721,19 +1683,6 @@ test_min_threshold(hid_t orig_fapl, const char *env_h5_drvr) * Programmer: unknown * ?? / ?? / ?? * - * Changes: Added the base_raw_cnt and base_meta_cnt fields and - * supporting code. This allows the test to adjust to the - * number of page buffer pages accessed during file open / - * create. - * - * The tests for the values of base_raw_cnt and base_meta_cnt - * just after file open exist detect changes in library - * behavior. Assuming any such change is not indicative of - * other issues, these tests can be modified to reflect the - * change. - * - * JRM -- 2/23/17 - * *------------------------------------------------------------------------- */ static unsigned @@ -2024,8 +1973,6 @@ test_stats_collection(hid_t orig_fapl, const char *env_h5_drvr) * Programmer: John Mainzer * 03/21/17 * - * Changes: None. - * *------------------------------------------------------------------------- */ diff --git a/test/reserved.c b/test/reserved.c index d0e0c85283e..a820344f738 100644 --- a/test/reserved.c +++ b/test/reserved.c @@ -30,8 +30,6 @@ const char *FILENAME[] = {"rsrv_heap", "rsrv_ohdr", "rsrv_vlen", NULL}; * Nat Furrer * Friday, May 28, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -165,8 +163,6 @@ rsrv_heap(void) * Nat Furrer * Friday, May 28, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -309,8 +305,6 @@ rsrv_ohdr(void) * Nat Furrer * Thursday, July 1, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -471,8 +465,6 @@ rsrv_vlen(void) * Programmer: Nat Furrer and James Laird * Thursday, July 1, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ int diff --git a/test/s3comms.c b/test/s3comms.c index d68cc847ce4..45978fb2be6 100644 --- a/test/s3comms.c +++ b/test/s3comms.c @@ -1759,8 +1759,6 @@ test_percent_encode_char(void) * * Programmer: Jacob Smith 2018-01-24 * - * Changes: None - * *--------------------------------------------------------------------------- */ static herr_t @@ -1813,8 +1811,6 @@ test_s3r_get_filesize(void) * * Programmer: Jacob Smith 2018-01-?? * - * Changes: None - * *--------------------------------------------------------------------------- */ static herr_t diff --git a/test/space_overflow.c b/test/space_overflow.c index 41530fd4a87..d31b52d66de 100644 --- a/test/space_overflow.c +++ b/test/space_overflow.c @@ -37,8 +37,6 @@ * Programmer: Robb Matzke * Monday, October 26, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ int diff --git a/test/stab.c b/test/stab.c index 26cb015e59e..697bce081a9 100644 --- a/test/stab.c +++ b/test/stab.c @@ -191,7 +191,6 @@ test_misc(hid_t fcpl, hid_t fapl, hbool_t new_format) * * Programmer: Robb Matzke 2002-03-28 * - * Modifications: *------------------------------------------------------------------------- */ static int @@ -1405,8 +1404,6 @@ corrupt_stab_msg(void) * Programmer: Robb Matzke * Tuesday, November 24, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ int diff --git a/test/tattr.c b/test/tattr.c index a4ff0880008..5d822340067 100644 --- a/test/tattr.c +++ b/test/tattr.c @@ -11655,8 +11655,6 @@ test_attr(void) * Programmer: Albert Cheng * July 2, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ void diff --git a/test/tcheck_version.c b/test/tcheck_version.c index a528f5e81a0..2d5c239f7b5 100644 --- a/test/tcheck_version.c +++ b/test/tcheck_version.c @@ -22,8 +22,6 @@ * * Programmer: Albert Cheng * September 20, 2009 - * Modifications: - * Added abort signal intercept. AKC - 2009/10/16 - */ #include "h5test.h" diff --git a/test/tconfig.c b/test/tconfig.c index fdab5ef5d1d..ec6bb2c4d6f 100644 --- a/test/tconfig.c +++ b/test/tconfig.c @@ -75,8 +75,6 @@ test_configure(void) * Programmer: Albert Cheng * September 25, 2001 * - * Modifications: - * *------------------------------------------------------------------------- */ void @@ -95,10 +93,6 @@ cleanup_configure(void) * Programmer: Albert Cheng * September 25, 2001 * - * Modifications: - * Albert Cheng, 2004/10/14 - * Verified both signed and unsigned int types. - * *------------------------------------------------------------------------- */ void @@ -183,8 +177,6 @@ test_config_ctypes(void) * Programmer: Albert Cheng * October 12, 2009 * - * Modifications: - * *------------------------------------------------------------------------- */ void diff --git a/test/tcoords.c b/test/tcoords.c index f2bad20811e..230db608418 100644 --- a/test/tcoords.c +++ b/test/tcoords.c @@ -713,8 +713,6 @@ test_coords(void) * Programmer: Raymond Lu * 20 Dec. 2007 * - * Modifications: - * *------------------------------------------------------------------------- */ void diff --git a/test/test_swmr.pwsh.in b/test/test_swmr.pwsh.in index de7a57a36de..8f09740c2c5 100644 --- a/test/test_swmr.pwsh.in +++ b/test/test_swmr.pwsh.in @@ -85,13 +85,6 @@ function Wait-Message { ############################################################################### ## Main ## -## Modifications: -## Vailin Choi; July 2013 -## Add waiting of message file before launching the reader(s). -## Due to the implementation of file locking, coordination -## is needed in file opening for the writer/reader tests -## to proceed as expected. -## ############################################################################### # Check to see if the VFD specified by the HDF5_DRIVER environment variable diff --git a/test/test_swmr.sh.in b/test/test_swmr.sh.in index aacd575dbf3..e4c75466b40 100644 --- a/test/test_swmr.sh.in +++ b/test/test_swmr.sh.in @@ -83,13 +83,6 @@ WAIT_MESSAGE() { ############################################################################### ## Main ## -## Modifications: -## Vailin Choi; July 2013 -## Add waiting of message file before launching the reader(s). -## Due to the implementation of file locking, coordination -## is needed in file opening for the writer/reader tests -## to proceed as expected. -## ############################################################################### # Check to see if the VFD specified by the HDF5_DRIVER environment variable diff --git a/test/testframe.c b/test/testframe.c index 9bcccd196aa..f3cf7816b2b 100644 --- a/test/testframe.c +++ b/test/testframe.c @@ -126,9 +126,6 @@ AddTest(const char *TheName, void (*TheCall)(void), void (*Cleanup)(void), const * private_parser: Optional routine provided by test program to parse the * private options. Default to NULL which means none is provided. * - * Modifications: - * Albert Cheng 2004/08/17 - * Added the ProgName, private_usage and private_parser arguments. */ void TestInit(const char *ProgName, void (*private_usage)(void), int (*private_parser)(int ac, char *av[])) diff --git a/test/tfile.c b/test/tfile.c index bca3da2bfd6..f24b55d8c91 100644 --- a/test/tfile.c +++ b/test/tfile.c @@ -8226,8 +8226,6 @@ test_file(void) * Programmer: Albert Cheng * July 2, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ void diff --git a/test/tgenprop.c b/test/tgenprop.c index 46bb0ac220d..5751ce7d7cb 100644 --- a/test/tgenprop.c +++ b/test/tgenprop.c @@ -2188,8 +2188,6 @@ test_genprop(void) * Programmer: Quincey Koziol * June 8, 1999 * - * Modifications: - * *------------------------------------------------------------------------- */ void diff --git a/test/th5s.c b/test/th5s.c index 25ab2906a08..40d8b06eda2 100644 --- a/test/th5s.c +++ b/test/th5s.c @@ -3507,8 +3507,6 @@ test_h5s(void) * Programmer: Albert Cheng * July 2, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ void diff --git a/test/titerate.c b/test/titerate.c index defed212b7d..1e23ade10bf 100644 --- a/test/titerate.c +++ b/test/titerate.c @@ -1203,8 +1203,6 @@ test_iterate(void) * Programmer: Quincey Koziol * April 5, 2000 * - * Modifications: - * *------------------------------------------------------------------------- */ void diff --git a/test/tmeta.c b/test/tmeta.c index f5c5fc1b2d7..d55882d09a8 100644 --- a/test/tmeta.c +++ b/test/tmeta.c @@ -121,8 +121,6 @@ test_metadata(void) * Programmer: Albert Cheng * July 2, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ void diff --git a/test/trefer.c b/test/trefer.c index 26a883b1226..2c622340d9f 100644 --- a/test/trefer.c +++ b/test/trefer.c @@ -3611,8 +3611,6 @@ test_reference(void) * Programmer: Quincey Koziol * September 8, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ void diff --git a/test/trefer_deprec.c b/test/trefer_deprec.c index 75d441df3d4..8d75fed2d6e 100644 --- a/test/trefer_deprec.c +++ b/test/trefer_deprec.c @@ -1853,8 +1853,6 @@ test_reference_deprec(void) * Programmer: Quincey Koziol * September 8, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ void diff --git a/test/tsohm.c b/test/tsohm.c index d923efbc26a..7d9964f97ab 100644 --- a/test/tsohm.c +++ b/test/tsohm.c @@ -480,8 +480,6 @@ make_dtype_1(void) * Programmer: James Laird * Saturday, August 26, 2006 * - * Modifications: - * *------------------------------------------------------------------------- */ static hid_t @@ -555,8 +553,6 @@ make_dtype_2(void) * Programmer: James Laird * Wednesday, October 4, 2006 * - * Modifications: - * *------------------------------------------------------------------------- */ static hid_t @@ -1521,8 +1517,6 @@ size2_verify_plist2(hid_t plist) * Programmer: James Laird * Friday, January 26, 2007 * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -1565,8 +1559,6 @@ size2_dump_struct(const char *name, size2_helper_struct *sizes) * Programmer: James Laird * Friday, November 17, 2006 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -2724,8 +2716,6 @@ test_sohm_size2(int close_reopen) * Programmer: James Laird * Tuesday, December 19, 2006 * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -2763,8 +2753,6 @@ delete_helper_write(hid_t file_id, hid_t *dspace_id, hid_t *dcpl_id, int x) * Programmer: James Laird * Tuesday, December 19, 2006 * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -2812,8 +2800,6 @@ delete_helper_read(hid_t file_id, hid_t *dspace_id, int x) * Programmer: James Laird * Tuesday, December 19, 2006 * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -2896,8 +2882,6 @@ delete_helper(hid_t fcpl_id, hid_t *dspace_id, hid_t *dcpl_id) * Programmer: James Laird * Tuesday, December 19, 2006 * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -3036,8 +3020,6 @@ test_sohm_delete(void) * Programmer: James Laird * Wednesday, January 3, 2007 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -3137,8 +3119,6 @@ verify_dset_create_and_delete_does_not_grow_file(hid_t fcpl_id) * Programmer: James Laird * Wednesday, January 3, 2007 * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -3217,8 +3197,6 @@ test_sohm_delete_revert(void) * Programmer: James Laird * Friday, December 22, 2006 * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -3285,8 +3263,6 @@ verify_dset_create_and_open_through_extlink_with_sohm(hid_t src_fcpl_id, hid_t d * Programmer: James Laird * Friday, December 22, 2006 * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -3333,8 +3309,6 @@ test_sohm_extlink(void) * Programmer: James Laird * Wednesday, January 10, 2007 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -3600,8 +3574,6 @@ verify_dataset_extension(hid_t fcpl_id, hbool_t close_reopen) * Programmer: James Laird * Wednesday, January 10, 2007 * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -3682,8 +3654,6 @@ test_sohm_extend_dset(void) * Programmer: Raymond Lu * 13 October, 2008 * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -3875,8 +3845,6 @@ test_sohm(void) * Programmer: James Laird * October 9, 2006 * - * Modifications: - * *------------------------------------------------------------------------- */ void diff --git a/test/ttime.c b/test/ttime.c index 49ddafdfa77..81c5015d27e 100644 --- a/test/ttime.c +++ b/test/ttime.c @@ -220,8 +220,6 @@ test_time(void) * Programmer: Quincey Koziol * October 19, 2000 * - * Modifications: - * *------------------------------------------------------------------------- */ void diff --git a/test/tvlstr.c b/test/tvlstr.c index b27b2bede37..3fcc57dd9a2 100644 --- a/test/tvlstr.c +++ b/test/tvlstr.c @@ -995,8 +995,6 @@ test_vlstrings(void) * Programmer: Quincey Koziol * September 10, 1999 * - * Modifications: - * *------------------------------------------------------------------------- */ void diff --git a/test/tvltypes.c b/test/tvltypes.c index d14f70d7e53..1e0de1c9831 100644 --- a/test/tvltypes.c +++ b/test/tvltypes.c @@ -3254,8 +3254,6 @@ test_vltypes(void) * Programmer: Quincey Koziol * June 8, 1999 * - * Modifications: - * *------------------------------------------------------------------------- */ void diff --git a/test/unlink.c b/test/unlink.c index e2cf67f31cc..e18ae73d3fe 100644 --- a/test/unlink.c +++ b/test/unlink.c @@ -78,8 +78,6 @@ const char *FILENAME[] = {"unlink", "new_move_a", "new_move_b", * Programmer: Robb Matzke * Friday, September 25, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -155,8 +153,6 @@ test_one(hid_t file) * Programmer: Robb Matzke * Friday, September 25, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -309,8 +305,6 @@ test_symlink(hid_t file) * Programmer: Robb Matzke * Friday, September 25, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -375,8 +369,6 @@ test_rename(hid_t file) * Programmer: Raymond Lu * Thursday, April 25, 2002 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -478,8 +470,6 @@ test_new_move(hid_t fapl) * Programmer: Raymond Lu * Thursday, April 25, 2002 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -540,8 +530,6 @@ check_new_move(hid_t fapl) * Programmer: Quincey Koziol * Saturday, March 22, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -1480,8 +1468,6 @@ test_filespace(hid_t fapl) * Programmer: Quincey Koziol * Friday, April 11, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -1546,8 +1532,6 @@ test_create_unlink(const char *msg, hid_t fapl) * Programmer: Quincey Koziol * Saturday, August 16, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -1627,8 +1611,6 @@ test_link_slashes(hid_t fapl) * Programmer: Quincey Koziol * Saturday, August 16, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -1740,8 +1722,6 @@ delete_node(hid_t pid, hid_t id) * Programmer: Quincey Koziol * Monday, January 19, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -1829,8 +1809,6 @@ test_unlink_rightleaf(hid_t fid) * Programmer: Quincey Koziol * Monday, January 19, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -1921,8 +1899,6 @@ test_unlink_rightnode(hid_t fid) * Programmer: Quincey Koziol * Monday, January 19, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -2299,8 +2275,6 @@ test_unlink_middlenode(hid_t fid) * Programmer: Quincey Koziol * Wednesday, July 14, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -2384,8 +2358,6 @@ test_resurrect_dataset(hid_t fapl) * Programmer: James Laird * Wednesday, July 28, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -2466,8 +2438,6 @@ test_resurrect_datatype(hid_t fapl) * Programmer: James Laird * Wednesday, July 28, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -2545,8 +2515,6 @@ test_resurrect_group(hid_t fapl) * Programmer: Quincey Koziol * Monday, September 27, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ static int diff --git a/test/vfd.c b/test/vfd.c index 3be299cd696..4a5351b6d96 100644 --- a/test/vfd.c +++ b/test/vfd.c @@ -4043,10 +4043,6 @@ test_ctl(void) * Programmer: John Mainzer * 6/21/20 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -4149,10 +4145,6 @@ test_vector_io__setup_v(uint32_t count, H5FD_mem_t types[], haddr_t addrs[], siz * Programmer: John Mainzer * 3/10/21 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -4286,10 +4278,6 @@ test_vector_io__setup_fixed_size_v(uint32_t count, H5FD_mem_t types[], haddr_t a * Programmer: John Mainzer * 6/21/20 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -4341,10 +4329,6 @@ test_vector_io__read_v_indiv(H5FD_t *lf, uint32_t count, H5FD_mem_t types[], had * Programmer: John Mainzer * 6/21/20 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -4394,8 +4378,6 @@ test_vector_io__write_v_indiv(H5FD_t *lf, uint32_t count, H5FD_mem_t types[], ha * Programmer: John Mainzer * 6/21/20 * - * Changes: None. - * *------------------------------------------------------------------------- */ @@ -4465,8 +4447,6 @@ test_vector_io__verify_v(uint32_t count, H5FD_mem_t types[], size_t sizes[], con * Programmer: John Mainzer * 6/21/20 * - * Changes: None. - * *------------------------------------------------------------------------- */ @@ -4541,8 +4521,6 @@ test_vector_io__dump_test_vectors(uint32_t count, H5FD_mem_t types[], haddr_t ad * Programmer: John Mainzer * 6/20/20 * - * Changes: None. - * *------------------------------------------------------------------------- */ #define VECTOR_LEN 16 @@ -4983,8 +4961,6 @@ test_vector_io(const char *vfd_name) * Programmer: Neil Fortner * 7/1/21 * - * Changes: None. - * *------------------------------------------------------------------------- */ /* Array dimensions, used for all selection I/O tests. Currently both must be @@ -5030,8 +5006,6 @@ test_selection_io_write(H5FD_t *lf, H5FD_mem_t type, uint32_t count, hid_t mem_s * Programmer: Neil Fortner * 7/1/21 * - * Changes: None. - * *------------------------------------------------------------------------- */ static herr_t @@ -5106,8 +5080,6 @@ test_selection_io_read_verify(H5FD_t *lf, H5FD_mem_t type, uint32_t count, hid_t * Programmer: Neil Fortner * 7/1/21 * - * Changes: None. - * *------------------------------------------------------------------------- */ static herr_t diff --git a/testpar/t_bigio.c b/testpar/t_bigio.c index af65cda4d24..ad669d81776 100644 --- a/testpar/t_bigio.c +++ b/testpar/t_bigio.c @@ -1296,8 +1296,6 @@ coll_chunk1(void) * Programmer: Unknown * July 12th, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ diff --git a/testpar/t_cache.c b/testpar/t_cache.c index ae47a6fb70b..4d3aff53f03 100644 --- a/testpar/t_cache.c +++ b/testpar/t_cache.c @@ -475,10 +475,6 @@ static hbool_t trace_file_check(int metadata_write_strategy); * * Programmer: JRM -- 4/17/06 * - * Modifications: - * - * None. - * *****************************************************************************/ static void @@ -507,10 +503,6 @@ print_stats(void) * * Programmer: JRM -- 4/17/06 * - * Modifications: - * - * None. - * *****************************************************************************/ static void @@ -547,10 +539,6 @@ reset_stats(void) * * Programmer: JRM -- 11/16/05 * - * Modifications: - * - * None. - * *****************************************************************************/ static hbool_t @@ -964,10 +952,6 @@ get_max_nerrors(void) * * Programmer: JRM -- 12/22/05 * - * Modifications: - * - * JRM -- 5/10/06 - * Added mssg_tag_offset parameter and supporting code. * *****************************************************************************/ @@ -1043,11 +1027,6 @@ recv_mssg(struct mssg_t *mssg_ptr, int mssg_tag_offset) * * Programmer: JRM -- 12/22/05 * - * Modifications: - * - * JRM -- 5/10/06 - * Added the add_req_to_tag parameter and supporting code. - * *****************************************************************************/ static hbool_t send_mssg(struct mssg_t *mssg_ptr, hbool_t add_req_to_tag) @@ -1300,11 +1279,6 @@ reset_server_counters(void) * * Programmer: JRM -- 12/22/05 * - * Modifications: - * - * JRM -- 5/10/06 - * Updated for sync message. - * *****************************************************************************/ static hbool_t server_main(void) @@ -2876,13 +2850,6 @@ expunge_entry(H5F_t *file_ptr, int32_t idx) * Programmer: John Mainzer * 01/04/06 * - * Modifications: - * - * JRM -- 8/11/06 - * Updated code to reflect the fact that entries can now be - * inserted pinned. Note that since all inserts are dirty, - * any pins must be global pins. - * *****************************************************************************/ static void insert_entry(H5C_t *cache_ptr, H5F_t *file_ptr, int32_t idx, unsigned int flags) @@ -3245,12 +3212,6 @@ lock_and_unlock_random_entry(H5F_t *file_ptr, int min_idx, int max_idx) * Programmer: John Mainzer * 1/4/06 * - * Modifications: - * - * JRM -- 7/11/06 - * Modified asserts to handle the new local_len field in - * datum. - * *****************************************************************************/ static void lock_entry(H5F_t *file_ptr, int32_t idx) @@ -4089,11 +4050,6 @@ verify_writes(unsigned num_writes, haddr_t *written_entries_tbl) * * Programmer: JRM -- 1/12/06 * - * Modifications: - * - * JRM -- 5/9/06 - * Modified function to facilitate setting predefined seeds. - * *****************************************************************************/ static void setup_rand(void) @@ -4617,11 +4573,6 @@ verify_total_writes(unsigned expected_total_writes) * Programmer: John Mainzer * 1/4/06 * - * Modifications: - * - * 7/11/06 - * Updated for the new local_len field in datum. - * *****************************************************************************/ static void unlock_entry(H5F_t *file_ptr, int32_t idx, unsigned int flags) @@ -4692,11 +4643,6 @@ unlock_entry(H5F_t *file_ptr, int32_t idx, unsigned int flags) * Programmer: John Mainzer * 4/12/06 * - * Modifications: - * - * JRM -- 8/15/06 - * Added assertion that entry is pinned on entry. - * *****************************************************************************/ static void unpin_entry(H5F_t *file_ptr, int32_t idx, hbool_t global, hbool_t dirty, hbool_t via_unprotect) diff --git a/testpar/t_cache_image.c b/testpar/t_cache_image.c index 65c892d4090..1e556d984ee 100644 --- a/testpar/t_cache_image.c +++ b/testpar/t_cache_image.c @@ -125,10 +125,6 @@ static hbool_t smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, * Programmer: John Mainzer * 1/25/17 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -390,13 +386,6 @@ construct_test_file(int test_file_index) * Programmer: John Mainzer * 7/15/15 * - * Modifications: - * - * Added min_dset and max_dset parameters and supporting - * code. This allows the caller to specify a range of - * datasets to create. - * JRM -- 8/20/15 - * *------------------------------------------------------------------------- */ @@ -723,11 +712,6 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset) * Programmer: John Mainzer * 10/31/16 * - * Modifications: - * - * None. - * JRM -- 8/20/15 - * *------------------------------------------------------------------------- */ #if 0 @@ -823,14 +807,6 @@ delete_data_sets(hid_t file_id, int min_dset, int max_dset) * Programmer: John Mainzer * 7/14/15 * - * Modifications: - * - * Modified function to handle parallel file creates / opens. - * - * JRM -- 2/1/17 - * - * Modified function to handle - * *------------------------------------------------------------------------- */ @@ -1300,10 +1276,6 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons * Programmer: John Mainzer * 3/4/17 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -1689,10 +1661,6 @@ par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size) * Programmer: John Mainzer * 3/6/17 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -1760,10 +1728,6 @@ par_delete_dataset(int dset_num, hid_t file_id, int mpi_rank) * Programmer: John Mainzer * 3/8/17 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -1809,10 +1773,6 @@ par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size) * Programmer: John Mainzer * 3/6/17 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -2067,10 +2027,6 @@ par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank) * Programmer: John Mainzer * 3/8/17 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -2181,10 +2137,6 @@ serial_insert_cache_image(int file_name_idx, int mpi_size) * Programmer: John Mainzer * 3/6/17 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -2413,13 +2365,6 @@ serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size) * Programmer: John Mainzer * 7/15/15 * - * Modifications: - * - * Added min_dset and max_dset parameters and supporting - * code. This allows the caller to specify a range of - * datasets to verify. - * JRM -- 8/20/15 - * *------------------------------------------------------------------------- */ @@ -2677,10 +2622,6 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset) * Programmer: John Mainzer * 3/11/17 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -2944,10 +2885,6 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank) * Programmer: John Mainzer * 1/25/17 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -3614,8 +3551,6 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size) * Programmer: John Mainzer * 1/25/17 * - * Modifications: - * *------------------------------------------------------------------------- */ diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c index c6ed9b122f5..5f853e3694d 100644 --- a/testpar/t_coll_chunk.c +++ b/testpar/t_coll_chunk.c @@ -46,8 +46,6 @@ static void coll_chunktest(const char *filename, int chunk_factor, int select_fa * Programmer: Unknown * July 12th, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -97,8 +95,6 @@ coll_chunk1(void) * Programmer: Unknown * July 12th, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -148,8 +144,6 @@ coll_chunk2(void) * Programmer: Unknown * July 12th, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -202,8 +196,6 @@ coll_chunk3(void) * Programmer: Unknown * July 12th, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -254,8 +246,6 @@ coll_chunk4(void) * Programmer: Unknown * July 12th, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -308,8 +298,6 @@ coll_chunk5(void) * Programmer: Unknown * July 12th, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -360,8 +348,6 @@ coll_chunk6(void) * Programmer: Unknown * July 12th, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -412,8 +398,6 @@ coll_chunk7(void) * Programmer: Unknown * July 12th, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -464,8 +448,6 @@ coll_chunk8(void) * Programmer: Unknown * July 12th, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -516,8 +498,6 @@ coll_chunk9(void) * Programmer: Unknown * July 12th, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -559,26 +539,16 @@ coll_chunk10(void) * Function: coll_chunktest * * Purpose: The real testing routine for regular selection of collective - chunking storage - testing both write and read, - If anything fails, it may be read or write. There is no - separation test between read and write. + * chunking storage testing both write and read, + * If anything fails, it may be read or write. There is no + * separation test between read and write. * * Return: Success: 0 - * * Failure: -1 * - * Modifications: - * Remove invalid temporary property checkings for API_LINK_HARD and - * API_LINK_TRUE cases. - * Programmer: Jonathan Kim - * Date: 2012-10-10 - * * Programmer: Unknown * July 12th, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ diff --git a/testpar/t_filter_read.c b/testpar/t_filter_read.c index 0781594698e..8895c159022 100644 --- a/testpar/t_filter_read.c +++ b/testpar/t_filter_read.c @@ -192,8 +192,6 @@ filter_read_internal(const char *filename, hid_t dcpl, hsize_t *dset_size) * Programmer: Christian Chilan * Tuesday, May 15, 2007 * - * Modifications: - * *------------------------------------------------------------------------- */ diff --git a/testpar/t_pread.c b/testpar/t_pread.c index 9a2493d7747..d05ec46aa3c 100644 --- a/testpar/t_pread.c +++ b/testpar/t_pread.c @@ -77,8 +77,6 @@ static char *test_argv0 = NULL; * Programmer: Richard Warren * 10/1/17 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -483,8 +481,6 @@ generate_test_file(MPI_Comm comm, int mpi_rank, int group_id) * Programmer: Richard Warren * 10/1/17 * - * Modifications: - * *------------------------------------------------------------------------- */ static int diff --git a/testpar/t_vfd.c b/testpar/t_vfd.c index 512aa5b70b6..86cfe2f6358 100644 --- a/testpar/t_vfd.c +++ b/testpar/t_vfd.c @@ -117,10 +117,6 @@ static unsigned vector_write_test_7(int file_name_id, int mpi_rank, int mpi_size * Programmer: John Mainzer * 3/25/26 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -199,10 +195,6 @@ alloc_and_init_file_images(int mpi_size) * Programmer: John Mainzer * 1/25/17 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -261,10 +253,6 @@ free_file_images(void) * Programmer: John Mainzer * 3/25/26 * - * Modifications: - * - * Updated for subfiling VFD 9/29/30 - * *------------------------------------------------------------------------- */ @@ -535,10 +523,6 @@ setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_x * Programmer: John Mainzer * 3/25/26 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -658,10 +642,6 @@ takedown_vfd_test_file(int mpi_rank, char *filename, H5FD_t **lf_ptr, hid_t *fap * Programmer: John Mainzer * 3/26/21 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -895,10 +875,6 @@ vector_read_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_ * Programmer: John Mainzer * 3/26/21 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -1199,10 +1175,6 @@ vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_ * Programmer: John Mainzer * 3/26/21 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -1559,10 +1531,6 @@ vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_ * Programmer: John Mainzer * 3/26/21 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -1994,10 +1962,6 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_ * Programmer: John Mainzer * 3/26/21 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -2240,10 +2204,6 @@ vector_read_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_ * Programmer: John Mainzer * 3/26/21 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -2433,10 +2393,6 @@ vector_write_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer * Programmer: John Mainzer * 3/28/21 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -2689,10 +2645,6 @@ vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer * Programmer: John Mainzer * 3/31/21 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -2956,10 +2908,6 @@ vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer * Programmer: John Mainzer * 3/31/21 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -3261,10 +3209,6 @@ vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer * Programmer: John Mainzer * 3/31/21 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -3704,10 +3648,6 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer * Programmer: John Mainzer * 3/26/21 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -3965,10 +3905,6 @@ vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer * Programmer: John Mainzer * 10/10/21 * - * Modifications: - * - * None. - * *------------------------------------------------------------------------- */ @@ -4190,8 +4126,6 @@ vector_write_test_7(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer * Programmer: John Mainzer * 3/2621/ * - * Modifications: - * *------------------------------------------------------------------------- */ diff --git a/tools/lib/h5diff.c b/tools/lib/h5diff.c index 1ab9236076e..ea268f3746a 100644 --- a/tools/lib/h5diff.c +++ b/tools/lib/h5diff.c @@ -1105,10 +1105,6 @@ h5diff(const char *fname1, const char *fname2, const char *objname1, const char * * Return: Number of differences found * - * Modifications: Compare the graph and make h5diff return 1 for difference if - * 1) the number of objects in file1 is not the same as in file2 - * 2) the graph does not match, i.e same names (absolute path) - * 3) objects with the same name are not of the same type *------------------------------------------------------------------------- */ hsize_t diff --git a/tools/lib/h5tools_dump.c b/tools/lib/h5tools_dump.c index d80ef1f2d4b..435ca87fb5f 100644 --- a/tools/lib/h5tools_dump.c +++ b/tools/lib/h5tools_dump.c @@ -232,24 +232,7 @@ h5tools_dump_init(void) * Failure: FAIL * Programmer: * Robb Matzke, Monday, April 26, 1999 - * Modifications: - * Robb Matzke, 1999-06-04 - * The `container' argument is the optional dataset for reference types. * - * Robb Matzke, 1999-09-29 - * Understands the `per_line' property which indicates that every Nth - * element should begin a new line. - * - * Robb Matzke, LLNL, 2003-06-05 - * Do not dereference the memory for a variable-length string here. - * Deref in h5tools_str_sprint() instead so recursive types are - * handled correctly. - * - * Pedro Vicente Nunes, The HDF Group, 2005-10-19 - * pass to the prefix in h5tools_simple_prefix the total position - * instead of the current stripmine position i; this is necessary - * to print the array indices - * new field sm_pos in h5tools_context_t, the current stripmine element position *------------------------------------------------------------------------- */ int diff --git a/tools/lib/h5tools_ref.c b/tools/lib/h5tools_ref.c index d5c1cf905a8..9f12367a6f5 100644 --- a/tools/lib/h5tools_ref.c +++ b/tools/lib/h5tools_ref.c @@ -51,8 +51,6 @@ static int ref_path_table_put(const char *, const H5O_token_t *token); * * Programmer: Quincey Koziol * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -160,8 +158,6 @@ init_ref_path_table(void) * * Programmer: Quincey Koziol * - * Modifications: - * *------------------------------------------------------------------------- */ int @@ -187,8 +183,6 @@ term_ref_path_table(void) * * Programmer: REMcG * - * Modifications: - * *------------------------------------------------------------------------- */ int @@ -237,8 +231,6 @@ ref_path_table_lookup(const char *thepath, H5O_token_t *token) * * Programmer: REMcG * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -322,8 +314,6 @@ ref_path_table_gen_fake(const char *path, H5O_token_t *token) * * Programmer: REMcG * - * Modifications: - * *------------------------------------------------------------------------- */ const char * diff --git a/tools/lib/io_timer.c b/tools/lib/io_timer.c index ce21b3eb5f5..49a4a208259 100644 --- a/tools/lib/io_timer.c +++ b/tools/lib/io_timer.c @@ -57,7 +57,6 @@ sub_time(struct timeval *a, struct timeval *b) * SYS_CLOCK for system time). * Return: Pointer to io_time object * Programmer: Bill Wendling, 01. October 2001 - * Modifications: */ io_time_t * io_time_new(clock_type type) @@ -78,7 +77,6 @@ io_time_new(clock_type type) * function. * Return: Nothing * Programmer: Bill Wendling, 01. October 2001 - * Modifications: */ void io_time_destroy(io_time_t *pt) @@ -97,7 +95,6 @@ io_time_destroy(io_time_t *pt) * timer with the pio_timer_new function (shame!). * Return: Nothing * Programmer: Bill Wendling, 04. October 2001 - * Modifications: */ void set_timer_type(io_time_t *pt, clock_type type) @@ -110,7 +107,6 @@ set_timer_type(io_time_t *pt, clock_type type) * Purpose: Get the type of the timer. * Return: MPI_CLOCK or SYS_CLOCK. * Programmer: Bill Wendling, 04. October 2001 - * Modifications: */ clock_type get_timer_type(io_time_t *pt) @@ -124,7 +120,6 @@ get_timer_type(io_time_t *pt) * Purpose: Set the time in a ``io_time_t'' object. * Return: Pointer to the passed in ``io_time_t'' object if SUCCEED; Null otherwise. * Programmer: Bill Wendling, 01. October 2001 - * Modifications: */ io_time_t * io_time_set(io_time_t *pt, timer_type t, int start_stop) @@ -214,7 +209,6 @@ io_time_set(io_time_t *pt, timer_type t, int start_stop) * Purpose: Get the time from a ``io_time_t'' object. * Return: The number of seconds as a DOUBLE. * Programmer: Bill Wendling, 01. October 2001 - * Modifications: */ H5_ATTR_PURE double io_time_get(io_time_t *pt, timer_type t) diff --git a/tools/libtest/h5tools_test_utils.c b/tools/libtest/h5tools_test_utils.c index 9bc3f7647b1..f060e2c63c3 100644 --- a/tools/libtest/h5tools_test_utils.c +++ b/tools/libtest/h5tools_test_utils.c @@ -372,8 +372,6 @@ H5_GCC_CLANG_DIAG_OFF("format") * Programmer: Jacob Smith * 2017-11-11 * - * Changes: None. - * *---------------------------------------------------------------------------- */ static unsigned @@ -595,8 +593,6 @@ test_parse_tuple(void) * Programmer: Jacob Smith * 2017-11-13 * - * Changes: None - * *---------------------------------------------------------------------------- */ static unsigned @@ -975,8 +971,6 @@ test_populate_ros3_fa(void) * Programmer: Jacob Smith * 2018-07-12 * - * Changes: None - * *---------------------------------------------------------------------------- */ static unsigned @@ -1229,8 +1223,6 @@ H5_GCC_CLANG_DIAG_ON("format") * Programmer: Jacob Smith * 2017-11-10 * - * Changes: None. - * *---------------------------------------------------------------------------- */ int diff --git a/tools/src/h5copy/h5copy.c b/tools/src/h5copy/h5copy.c index 3f8f8cfc609..9756d11f75d 100644 --- a/tools/src/h5copy/h5copy.c +++ b/tools/src/h5copy/h5copy.c @@ -46,8 +46,6 @@ char *str_flag = NULL; * Programmer: Quincey Koziol * Saturday, 31. January 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -77,8 +75,6 @@ leave(int ret) * * Programmer: Pedro Vicente Nunes, 7/8/2006 * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -159,8 +155,6 @@ usage(void) * * Programmer: Pedro Vicente Nunes, 7/8/2006 * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -207,8 +201,6 @@ parse_flag(const char *s_flag, unsigned *flag) * * Programmer: Pedro Vicente Nunes * - * Modifications: - * *------------------------------------------------------------------------- */ diff --git a/tools/src/h5diff/h5diff_main.c b/tools/src/h5diff/h5diff_main.c index 9aa0cddf4ff..561f139849c 100644 --- a/tools/src/h5diff/h5diff_main.c +++ b/tools/src/h5diff/h5diff_main.c @@ -30,36 +30,6 @@ * * Comments: * - * Modifications: July 2004 - * Introduced the four modes: - * Normal mode: print the number of differences found and where they occurred - * Report mode: print the above plus the differences - * Verbose mode: print the above plus a list of objects and warnings - * Quiet mode: do not print output - * - * November 2004: Leon Arber (larber@uiuc.edu) - * Additions that allow h5diff to be run in parallel - * - * February 2005: Leon Arber (larber@uiuc.edu) - * h5diff and ph5diff split into two files, one that is used - * to build a serial h5diff and one used to build a parallel h5diff - * Common functions have been moved to h5diff_common.c - * - * October 2005 - * Introduced a new field 'not_cmp' to 'diff_opt_t' that detects - * if some objects are not comparable and prints the message - * "Some objects are not comparable" - * - * February 2007 - * Added comparison for dataset regions. - * Added support for reading and comparing by hyperslabs for large files. - * Inclusion of a relative error formula to compare floating - * point numbers in order to deal with floating point uncertainty. - * Printing of dataset dimensions along with dataset name - * - * November 19, 2007 - * adopted the syntax h5diff [OPTIONS] file1 file2 [obj1[obj2]] - * *------------------------------------------------------------------------- */ @@ -147,8 +117,6 @@ main(int argc, char *argv[]) * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ H5_ATTR_NORETURN void diff --git a/tools/src/h5diff/ph5diff_main.c b/tools/src/h5diff/ph5diff_main.c index ee8669fe2e0..64b1f282957 100644 --- a/tools/src/h5diff/ph5diff_main.c +++ b/tools/src/h5diff/ph5diff_main.c @@ -116,10 +116,6 @@ main(int argc, char *argv[]) * Programmer: Leon Arber * Date: January 2005 * - * Comments: - * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -286,10 +282,6 @@ print_manager_output(void) * Programmer: Albert Cheng * Date: Feb 6, 2005 * - * Comments: - * - * Modifications: - * *------------------------------------------------------------------------- */ void diff --git a/tools/src/h5dump/h5dump.c b/tools/src/h5dump/h5dump.c index 76c95962a08..1c221243f69 100644 --- a/tools/src/h5dump/h5dump.c +++ b/tools/src/h5dump/h5dump.c @@ -1695,8 +1695,6 @@ main(int argc, char *argv[]) * * Return: void * - * Modifications: - * *------------------------------------------------------------------------- */ static void diff --git a/tools/src/h5import/h5import.c b/tools/src/h5import/h5import.c index 0bdd6d01130..e63b6a8c186 100644 --- a/tools/src/h5import/h5import.c +++ b/tools/src/h5import/h5import.c @@ -346,9 +346,6 @@ gtoken(char *s) * * Programmer: pkmat * - * Modifications: pvn - * 7/23/2007. Added support for STR type, extra parameter FILE_ID - * *------------------------------------------------------------------------- */ diff --git a/tools/src/h5perf/pio_engine.c b/tools/src/h5perf/pio_engine.c index 5c994436c65..2238aac4797 100644 --- a/tools/src/h5perf/pio_engine.c +++ b/tools/src/h5perf/pio_engine.c @@ -133,8 +133,6 @@ static off_t sqrto(off_t); * Purpose: PIO Engine where Parallel IO are executed. * Return: results * Programmer: Albert Cheng, Bill Wendling 2001/12/12 - * Modifications: - * Added 2D testing (Christian Chilan, 10. August 2005) */ results do_pio(parameters param) @@ -382,7 +380,6 @@ do_pio(parameters param) * USER or LOGIN are specified in the environment. * Return: Pointer to filename or NULL * Programmer: Bill Wendling, 21. November 2001 - * Modifications: */ static char * pio_create_filename(iotype iot, const char *base_name, char *fullname, size_t size) @@ -497,8 +494,6 @@ pio_create_filename(iotype iot, const char *base_name, char *fullname, size_t si * Purpose: Write the required amount of data to the file. * Return: SUCCESS or FAIL * Programmer: Albert Cheng, Bill Wendling, 2001/12/13 - * Modifications: - * Added 2D testing (Christian Chilan, 10. August 2005) */ static herr_t do_write(results *res, file_descr *fd, parameters *parms, long ndsets, off_t nbytes, size_t buf_size, @@ -1514,8 +1509,6 @@ sqrto(off_t x) * Purpose: read the required amount of data from the file. * Return: SUCCESS or FAIL * Programmer: Albert Cheng 2001/12/13 - * Modifications: - * Added 2D testing (Christian Chilan, 10. August 2005) */ static herr_t do_read(results *res, file_descr *fd, parameters *parms, long ndsets, off_t nbytes, size_t buf_size, @@ -2497,7 +2490,6 @@ do_read(results *res, file_descr *fd, parameters *parms, long ndsets, off_t nbyt * Purpose: Open the specified file. * Return: SUCCESS or FAIL * Programmer: Albert Cheng, Bill Wendling, 2001/12/13 - * Modifications: */ static herr_t do_fopen(parameters *param, char *fname, file_descr *fd /*out*/, int flags) @@ -2607,7 +2599,6 @@ do_fopen(parameters *param, char *fname, file_descr *fd /*out*/, int flags) * Purpose: Close the specified file descriptor. * Return: SUCCESS or FAIL * Programmer: Albert Cheng, Bill Wendling, 2001/12/13 - * Modifications: */ static herr_t do_fclose(iotype iot, file_descr *fd /*out*/) @@ -2664,7 +2655,6 @@ do_fclose(iotype iot, file_descr *fd /*out*/) * Other processes just return. * Return: void * Programmer: Albert Cheng 2001/12/12 - * Modifications: */ static void do_cleanupfile(iotype iot, char *fname) diff --git a/tools/src/h5perf/pio_perf.c b/tools/src/h5perf/pio_perf.c index 26e90f8a5b6..d4b302e290d 100644 --- a/tools/src/h5perf/pio_perf.c +++ b/tools/src/h5perf/pio_perf.c @@ -206,7 +206,6 @@ static off_t squareo(off_t); * function. * Return: EXIT_SUCCESS or EXIT_FAILURE * Programmer: Bill Wendling, 30. October 2001 - * Modifications: */ int main(int argc, char *argv[]) @@ -303,8 +302,6 @@ squareo(off_t x) * * Return: Nothing * Programmer: Bill Wendling, 30. October 2001 - * Modifications: - * Added 2D testing (Christian Chilan, 10. August 2005) */ static void run_test_loop(struct options *opts) @@ -399,7 +396,6 @@ run_test_loop(struct options *opts) * Purpose: Inner loop call to actually run the I/O test. * Return: Nothing * Programmer: Bill Wendling, 18. December 2001 - * Modifications: */ static int run_test(iotype iot, parameters parms, struct options *opts) @@ -715,7 +711,6 @@ run_test(iotype iot, parameters parms, struct options *opts) * Purpose: * Return: Nothing * Programmer: Bill Wendling, 29. January 2002 - * Modifications: */ static void output_all_info(minmax *mm, int count, int indent_level) @@ -738,12 +733,6 @@ output_all_info(minmax *mm, int count, int indent_level) * object. * Return: 0 if all is fine; otherwise non-zero. * Programmer: Albert Cheng, 2002/05/21. - * Modifications: - * Bill Wendling, 2002/05/31 - * Modified so that the HDF5_MPI_INFO environment variable can - * be a semicolon separated list of "key=value" pairings. Most - * of the code is to remove any whitespaces which might be - * surrounding the "key=value" pairs. */ int h5_set_info_object(void) @@ -836,7 +825,6 @@ h5_set_info_object(void) * Purpose: Display content of an MPI Info object * Return: void * Programmer: Albert Cheng 2002/05/21 - * Modifications: */ void h5_dump_info_object(MPI_Info info) @@ -866,8 +854,6 @@ h5_dump_info_object(MPI_Info info) * Purpose: Gather all the min, max and total of val. * Return: Nothing * Programmer: Bill Wendling, 21. December 2001 - * Modifications: - * Use MPI_Allreduce to do it. -akc, 2002/01/11 */ static void get_minmax(minmax *mm, double val) @@ -888,8 +874,6 @@ get_minmax(minmax *mm, double val) * across all processes. * Return: TOTAL_MM - the total of all of these. * Programmer: Bill Wendling, 21. December 2001 - * Modifications: - * Changed to use seconds instead of MB/s - QAK, 5/9/02 */ static minmax accumulate_minmax_stuff(minmax *mm, int count) @@ -924,7 +908,6 @@ accumulate_minmax_stuff(minmax *mm, int count) * Return: SUCCESS on success. * FAIL otherwise. * Programmer: Bill Wendling, 19. December 2001 - * Modifications: */ static int create_comm_world(int num_procs, int *doing_pio) @@ -983,7 +966,6 @@ create_comm_world(int num_procs, int *doing_pio) * Return: SUCCESS on success. * FAIL otherwise. * Programmer: Bill Wendling, 19. December 2001 - * Modifications: */ static int destroy_comm_world(void) @@ -1003,7 +985,6 @@ destroy_comm_world(void) * minmax & # of iterations. * Return: Nothing * Programmer: Quincey Koziol, 9. May 2002 - * Modifications: */ static void output_results(const struct options *opts, const char *name, minmax *table, int table_size, off_t data_size) @@ -1067,7 +1048,6 @@ output_times(const struct options *opts, const char *name, minmax *table, int ta * Purpose: Print a line of the report. Only do so if I'm the 0 process. * Return: Nothing * Programmer: Bill Wendling, 19. December 2001 - * Modifications: */ static void output_report(const char *fmt, ...) @@ -1266,8 +1246,6 @@ report_parameters(struct options *opts) * structure which will need to be freed by the calling function. * Return: Pointer to an OPTIONS structure * Programmer: Bill Wendling, 31. October 2001 - * Modifications: - * Added 2D testing (Christian Chilan, 10. August 2005) */ static struct options * parse_command_line(int argc, const char *const *argv) @@ -1518,7 +1496,6 @@ parse_command_line(int argc, const char *const *argv) * If an unknown size indicator is used, then the program will * exit with EXIT_FAILURE as the return value. * Programmer: Bill Wendling, 18. December 2001 - * Modifications: */ static off_t parse_size_directive(const char *size) @@ -1559,8 +1536,6 @@ parse_size_directive(const char *size) * Purpose: Print a usage message and then exit. * Return: Nothing * Programmer: Bill Wendling, 31. October 2001 - * Modifications: - * Added 2D testing (Christian Chilan, 10. August 2005) */ static void usage(const char *prog) diff --git a/tools/src/h5perf/sio_engine.c b/tools/src/h5perf/sio_engine.c index 376fc0bf943..b80189b1539 100644 --- a/tools/src/h5perf/sio_engine.c +++ b/tools/src/h5perf/sio_engine.c @@ -282,7 +282,6 @@ do_sio(parameters param, results *res) * USER or LOGIN are specified in the environment. * Return: Pointer to filename or NULL * Programmer: Bill Wendling, 21. November 2001 - * Modifications: Support for file drivers. Christian Chilan, April, 2008 */ static char * sio_create_filename(iotype iot, const char *base_name, char *fullname, size_t size, parameters *param) @@ -403,7 +402,6 @@ sio_create_filename(iotype iot, const char *base_name, char *fullname, size_t si * Purpose: Write the required amount of data to the file. * Return: SUCCESS or FAIL * Programmer: Christian Chilan, April, 2008 - * Modifications: */ static herr_t do_write(results *res, file_descr *fd, parameters *parms, void *buffer) @@ -618,7 +616,6 @@ do_write(results *res, file_descr *fd, parameters *parms, void *buffer) * Purpose: Write buffer into the dataset. * Return: SUCCESS or FAIL * Programmer: Christian Chilan, April, 2008 - * Modifications: */ static herr_t dset_write(int local_dim, file_descr *fd, parameters *parms, void *buffer) @@ -701,7 +698,6 @@ dset_write(int local_dim, file_descr *fd, parameters *parms, void *buffer) * Purpose: Write buffer into the POSIX file considering contiguity. * Return: SUCCESS or FAIL * Programmer: Christian Chilan, April, 2008 - * Modifications: */ static herr_t @@ -762,7 +758,6 @@ posix_buffer_write(int local_dim, file_descr *fd, parameters *parms, void *buffe * Purpose: Read the required amount of data to the file. * Return: SUCCESS or FAIL * Programmer: Christian Chilan, April, 2008 - * Modifications: */ static herr_t do_read(results *res, file_descr *fd, parameters *parms, void *buffer) @@ -940,7 +935,6 @@ do_read(results *res, file_descr *fd, parameters *parms, void *buffer) * Purpose: Read buffer into the dataset. * Return: SUCCESS or FAIL * Programmer: Christian Chilan, April, 2008 - * Modifications: */ static herr_t @@ -1002,7 +996,6 @@ dset_read(int local_dim, file_descr *fd, parameters *parms, void *buffer, const * Purpose: Read buffer into the POSIX file considering contiguity. * Return: SUCCESS or FAIL * Programmer: Christian Chilan, April, 2008 - * Modifications: */ static herr_t @@ -1057,7 +1050,6 @@ posix_buffer_read(int local_dim, file_descr *fd, parameters *parms, void *buffer * Purpose: Open the specified file. * Return: SUCCESS or FAIL * Programmer: Albert Cheng, Bill Wendling, 2001/12/13 - * Modifications: Support for file drivers, Christian Chilan, April, 2008 */ static herr_t do_fopen(parameters *param, char *fname, file_descr *fd /*out*/, int flags) @@ -1223,7 +1215,6 @@ set_vfd(parameters *param) * Purpose: Close the specified file descriptor. * Return: SUCCESS or FAIL * Programmer: Albert Cheng, Bill Wendling, 2001/12/13 - * Modifications: */ static herr_t do_fclose(iotype iot, file_descr *fd /*out*/) diff --git a/tools/src/h5perf/sio_perf.c b/tools/src/h5perf/sio_perf.c index fc300fba815..ef9e7dbd035 100644 --- a/tools/src/h5perf/sio_perf.c +++ b/tools/src/h5perf/sio_perf.c @@ -182,7 +182,6 @@ static void report_parameters(struct options *opts); * Purpose: Start things up. * Return: EXIT_SUCCESS or EXIT_FAILURE * Programmer: Bill Wendling, 30. October 2001 - * Modifications: */ int main(int argc, char *argv[]) @@ -237,8 +236,6 @@ main(int argc, char *argv[]) * * Return: Nothing * Programmer: Bill Wendling, 30. October 2001 - * Modifications: - * Added multidimensional testing (Christian Chilan, April, 2008) */ static void run_test_loop(struct options *opts) @@ -292,7 +289,6 @@ run_test_loop(struct options *opts) * Purpose: Inner loop call to actually run the I/O test. * Return: Nothing * Programmer: Bill Wendling, 18. December 2001 - * Modifications: */ static int run_test(iotype iot, parameters parms, struct options *opts) @@ -524,7 +520,6 @@ run_test(iotype iot, parameters parms, struct options *opts) * Purpose: * Return: Nothing * Programmer: Bill Wendling, 29. January 2002 - * Modifications: */ static void output_all_info(minmax *mm, int count, int indent_level) @@ -546,8 +541,6 @@ output_all_info(minmax *mm, int count, int indent_level) * Purpose: Gather all the min, max and total of val. * Return: Nothing * Programmer: Bill Wendling, 21. December 2001 - * Modifications: - * Use MPI_Allreduce to do it. -akc, 2002/01/11 */ static void @@ -564,8 +557,6 @@ get_minmax(minmax *mm, double val) * across all processes. * Return: TOTAL_MM - the total of all of these. * Programmer: Bill Wendling, 21. December 2001 - * Modifications: - * Changed to use seconds instead of MB/s - QAK, 5/9/02 */ static void accumulate_minmax_stuff(const minmax *mm, int count, minmax *total_mm) @@ -596,7 +587,6 @@ accumulate_minmax_stuff(const minmax *mm, int count, minmax *total_mm) * minmax & # of iterations. * Return: Nothing * Programmer: Quincey Koziol, 9. May 2002 - * Modifications: */ static void output_results(const struct options *opts, const char *name, minmax *table, int table_size, off_t data_size) @@ -637,7 +627,6 @@ output_results(const struct options *opts, const char *name, minmax *table, int * Purpose: Print a line of the report. Only do so if I'm the 0 process. * Return: Nothing * Programmer: Bill Wendling, 19. December 2001 - * Modifications: */ static void output_report(const char *fmt, ...) @@ -812,8 +801,6 @@ report_parameters(struct options *opts) * structure which will need to be freed by the calling function. * Return: Pointer to an OPTIONS structure * Programmer: Bill Wendling, 31. October 2001 - * Modifications: - * Added multidimensional testing (Christian Chilan, April, 2008) */ static struct options * parse_command_line(int argc, const char *const *argv) @@ -1180,7 +1167,6 @@ parse_command_line(int argc, const char *const *argv) * If an unknown size indicator is used, then the program will * exit with EXIT_FAILURE as the return value. * Programmer: Bill Wendling, 18. December 2001 - * Modifications: */ static hsize_t @@ -1225,7 +1211,6 @@ parse_size_directive(const char *size) * Purpose: Print a usage message and then exit. * Return: Nothing * Programmer: Bill Wendling, 31. October 2001 - * Modifications: */ static void usage(const char *prog) diff --git a/tools/src/h5stat/h5stat.c b/tools/src/h5stat/h5stat.c index d29c6e408e1..04d17236629 100644 --- a/tools/src/h5stat/h5stat.c +++ b/tools/src/h5stat/h5stat.c @@ -335,18 +335,6 @@ attribute_stats(iter_t *iter, const H5O_info2_t *oi, const H5O_native_info_t *na * Programmer: Quincey Koziol * Tuesday, August 16, 2005 * - * Modifications: Refactored code from the walk_function - * EIP, Wednesday, August 16, 2006 - * - * Vailin Choi 12 July 2007 - * 1. Gathered storage info for btree and heap - * (groups and attributes) - * 2. Gathered info for attributes - * - * Vailin Choi 14 July 2007 - * Cast "num_objs" and "num_attrs" to size_t - * Due to the -Mbounds problem for the pgi-32 bit compiler on indexing - * *------------------------------------------------------------------------- */ static herr_t @@ -1103,8 +1091,6 @@ iter_free(iter_t *iter) * Programmer: Elena Pourmal * Saturday, August 12, 2006 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -1190,11 +1176,6 @@ print_file_metadata(const iter_t *iter) * Programmer: Elena Pourmal * Saturday, August 12, 2006 * - * Modifications: - * bug #1253; Oct 6th 2008; Vailin Choi - * Fixed segmentation fault: print iter->group_bins[0] when - * there is iter->group_nbins - * *------------------------------------------------------------------------- */ static herr_t @@ -1636,8 +1617,6 @@ print_file_statistics(const iter_t *iter) * Programmer: Elena Pourmal * Thursday, August 17, 2006 * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -1658,8 +1637,6 @@ print_object_statistics(const char *name) * Programmer: Elena Pourmal * Thursday, August 17, 2006 * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -1674,10 +1651,6 @@ print_statistics(const char *name, const iter_t *iter) /*------------------------------------------------------------------------- * Function: main * - * Modifications: - * 2/2010; Vailin Choi - * Get the size of user block - * *------------------------------------------------------------------------- */ int diff --git a/tools/src/misc/h5repart.c b/tools/src/misc/h5repart.c index ac109440065..c01ec0f8de1 100644 --- a/tools/src/misc/h5repart.c +++ b/tools/src/misc/h5repart.c @@ -45,8 +45,6 @@ * Programmer: Robb Matzke * Wednesday, May 13, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -146,8 +144,6 @@ get_size(const char *progname, int *argno, int argc, char *argv[]) * Programmer: Robb Matzke * Wednesday, May 13, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ H5_GCC_CLANG_DIAG_OFF("format-nonliteral") diff --git a/tools/test/h5dump/binread.c b/tools/test/h5dump/binread.c index 484e3541645..2315e858089 100644 --- a/tools/test/h5dump/binread.c +++ b/tools/test/h5dump/binread.c @@ -39,8 +39,6 @@ * * Programmer: Pedro Vicente Nunes * - * Modifications: - * *------------------------------------------------------------------------- */ static void diff --git a/tools/test/h5dump/h5dumpgentest.c b/tools/test/h5dump/h5dumpgentest.c index 7e8e19b7134..1fd512cc5dd 100644 --- a/tools/test/h5dump/h5dumpgentest.c +++ b/tools/test/h5dump/h5dumpgentest.c @@ -7175,10 +7175,6 @@ gent_fs_strategy_threshold(void) * Create one dataset with (set_chunk, fixed dims, fixed max. dims) * so that Fixed Array indexing will be used. * - * Modifications: - * Fixed Array indexing will be used for chunked dataset - * with fixed max. dims setting. - * */ static void gent_dataset_idx(void) diff --git a/tools/test/h5repack/testh5repack_detect_szip.c b/tools/test/h5repack/testh5repack_detect_szip.c index f4e4aec3137..b2be4849a7a 100644 --- a/tools/test/h5repack/testh5repack_detect_szip.c +++ b/tools/test/h5repack/testh5repack_detect_szip.c @@ -30,10 +30,6 @@ * * Date: * - * Comments: - * - * Modifications: - * *------------------------------------------------------------------------- */ diff --git a/tools/test/h5stat/testh5stat.sh.in b/tools/test/h5stat/testh5stat.sh.in index 9cbedce7276..65e40c4d820 100644 --- a/tools/test/h5stat/testh5stat.sh.in +++ b/tools/test/h5stat/testh5stat.sh.in @@ -12,10 +12,6 @@ # # Tests for the h5stat tool # -# Modifications: -# Vailin Choi; July 2013 -# Add tests for -l, -m, -a options -# srcdir=@srcdir@ diff --git a/tools/test/perform/chunk.c b/tools/test/perform/chunk.c index 27ada8762aa..3f4b3d79ef4 100644 --- a/tools/test/perform/chunk.c +++ b/tools/test/perform/chunk.c @@ -97,8 +97,6 @@ const H5Z_class2_t H5Z_COUNTER[1] = {{ * Programmer: Robb Matzke * Thursday, May 14, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static size_t @@ -123,8 +121,6 @@ counter(unsigned H5_ATTR_UNUSED flags, size_t H5_ATTR_UNUSED cd_nelmts, * Programmer: Robb Matzke * Thursday, May 14, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -175,8 +171,6 @@ create_dataset(void) * Programmer: Robb Matzke * Thursday, May 14, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static double @@ -247,8 +241,6 @@ test_rowmaj(int op, size_t cache_size, size_t io_size) * Programmer: Robb Matzke * Friday, May 15, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static double @@ -318,8 +310,6 @@ test_diag(int op, size_t cache_size, size_t io_size, size_t offset) * Programmer: Robb Matzke * Thursday, May 14, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ int diff --git a/tools/test/perform/overhead.c b/tools/test/perform/overhead.c index 69a8251d650..257d3ecbdff 100644 --- a/tools/test/perform/overhead.c +++ b/tools/test/perform/overhead.c @@ -62,8 +62,6 @@ typedef enum fill_t { FILL_ALL, FILL_FORWARD, FILL_REVERSE, FILL_INWARD, FILL_OU * Programmer: Robb Matzke * Wednesday, September 30, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -111,8 +109,6 @@ usage(const char *prog) * Programmer: Robb Matzke * Thursday, June 4, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -135,8 +131,6 @@ cleanup(void) * Programmer: Robb Matzke * Wednesday, March 4, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -160,8 +154,6 @@ display_error_cb(hid_t estack, void H5_ATTR_UNUSED *client_data) * Programmer: Robb Matzke * Wednesday, September 30, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -333,8 +325,6 @@ test(fill_t fill_style, const double splits[], hbool_t verbose, hbool_t use_rdcc * Programmer: Robb Matzke * Monday, September 28, 1998 * - * Modifications: - * *------------------------------------------------------------------------- */ int diff --git a/tools/test/perform/perf_meta.c b/tools/test/perform/perf_meta.c index 94fe849a736..6e62f6a3937 100644 --- a/tools/test/perform/perf_meta.c +++ b/tools/test/perform/perf_meta.c @@ -68,13 +68,11 @@ void print_perf(p_time, p_time, p_time); /*------------------------------------------------------------------------- * Function: parse_options * - Purpose: Parse command line options + * Purpose: Parse command line options * * Programmer: Raymond Lu * Friday, Oct 3, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ static int @@ -188,13 +186,11 @@ parse_options(int argc, char **argv) /*------------------------------------------------------------------------- * Function: usage * - Purpose: Prints help page + * Purpose: Prints help page * * Programmer: Raymond Lu * Friday, Oct 3, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ static void @@ -248,8 +244,6 @@ usage(void) * Programmer: Raymond Lu * Friday, Oct 3, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -288,8 +282,6 @@ create_dspace(void) * Programmer: Raymond Lu * Friday, Oct 3, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -330,8 +322,6 @@ create_dsets(hid_t file) * Programmer: Raymond Lu * Friday, Oct 3, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -435,8 +425,6 @@ create_attrs_1(void) * Programmer: Raymond Lu * Friday, Oct 3, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -538,8 +526,6 @@ create_attrs_2(void) * Programmer: Raymond Lu * Friday, Oct 3, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ static herr_t @@ -644,8 +630,6 @@ create_attrs_3(void) * Programmer: Raymond Lu * Friday, Oct 3, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ double @@ -674,8 +658,6 @@ retrieve_time(void) * Programmer: Raymond Lu * Friday, Oct 3, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ void @@ -728,8 +710,6 @@ perf(p_time *perf_t, double start_t, double end_t) * Programmer: Raymond Lu * Friday, Oct 3, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ void @@ -756,8 +736,6 @@ print_perf(p_time open_t, p_time close_t, p_time attr_t) * Programmer: Raymond Lu * Friday, Oct 3, 2003 * - * Modifications: - * *------------------------------------------------------------------------- */ int diff --git a/tools/test/perform/zip_perf.c b/tools/test/perform/zip_perf.c index 19b29ba2587..256289c25d3 100644 --- a/tools/test/perform/zip_perf.c +++ b/tools/test/perform/zip_perf.c @@ -77,7 +77,6 @@ static struct h5_long_options l_opts[] = {{"help", no_arg, 'h'}, * Function: error * Purpose: Display error message and exit. * Programmer: Bill Wendling, 05. June 2002 - * Modifications: */ static void error(const char *fmt, ...) @@ -99,7 +98,6 @@ error(const char *fmt, ...) * Purpose: Cleanup the output file. * Returns: Nothing * Programmer: Bill Wendling, 06. June 2002 - * Modifications: */ static void cleanup(void) @@ -164,7 +162,6 @@ write_file(Bytef *source, uLongf sourceLen) * Z_BUF_ERROR - not enough room in the output buffer * Z_STREAM_ERROR - level parameter is invalid * Programmer: Bill Wendling, 05. June 2002 - * Modifications: */ static void compress_buffer(Bytef *dest, uLongf *destLen, const Bytef *source, uLong sourceLen) @@ -198,7 +195,6 @@ compress_buffer(Bytef *dest, uLongf *destLen, const Bytef *source, uLong sourceL * pre-existing files. * Returns: Nothing * Programmer: Bill Wendling, 06. June 2002 - * Modifications: */ #define ZIP_PERF_FILE "zip_perf.data" static void @@ -235,7 +231,6 @@ get_unique_name(void) * Purpose: Print a usage message and then exit. * Return: Nothing * Programmer: Bill Wendling, 05. June 2002 - * Modifications: */ static void usage(void) @@ -278,7 +273,6 @@ usage(void) * If an unknown size indicator is used, then the program will * exit with EXIT_FAILURE as the return value. * Programmer: Bill Wendling, 05. June 2002 - * Modifications: */ static unsigned long parse_size_directive(const char *size) @@ -467,7 +461,6 @@ do_write_test(unsigned long file_size, unsigned long min_buf_size, unsigned long * Purpose: Run the program * Return: EXIT_SUCCESS or EXIT_FAILURE * Programmer: Bill Wendling, 05. June 2002 - * Modifications: */ int main(int argc, char *argv[]) @@ -555,7 +548,6 @@ main(int argc, char *argv[]) * zlib stuff. * Return: EXIT_SUCCESS * Programmer: Bill Wendling, 10. June 2002 - * Modifications: */ int main(void) diff --git a/utils/tools/h5dwalk/h5dwalk.c b/utils/tools/h5dwalk/h5dwalk.c index e91fd194e18..acb1724277a 100644 --- a/utils/tools/h5dwalk/h5dwalk.c +++ b/utils/tools/h5dwalk/h5dwalk.c @@ -1694,8 +1694,6 @@ main(int argc, char *argv[]) * * Comments: * - * Modifications: - * *------------------------------------------------------------------------- */ H5_ATTR_NORETURN void From e793b4daba2ee09900b31c7d324081dc2c955f51 Mon Sep 17 00:00:00 2001 From: Quincey Koziol Date: Tue, 11 Apr 2023 11:30:02 -0500 Subject: [PATCH 099/231] Dead code removal (#2690) * Correct concurrency bugs when running tests, along with a bugfix & small warning cleanup. * Committing clang-format changes * Allow spaces (and tabs) in VOL connector info string from environment variable. * Parse connector name from HDF5_PLUGIN_PATH environment variable better * Correct H5VLquery_optional to use H5VL routine instead of H5I. Also add an error message to the failure return value from not finding a plugin. * Play nice with existing plugin paths * Use API routine to determine if native connector is terminal. * Committing clang-format changes * Make string size larger, to allow for connectors with longer names. * Be more flexible about testing external pass through connectors, especially if they have registered new optional operations. * Dead code removal * Committing clang-format changes --------- Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: AWS ParallelCluster user Co-authored-by: Koziol --- test/cache.c | 217 ++--------------------------------------------- test/cache_api.c | 42 --------- 2 files changed, 5 insertions(+), 254 deletions(-) diff --git a/test/cache.c b/test/cache.c index 3c107f20af9..517c0b55e92 100644 --- a/test/cache.c +++ b/test/cache.c @@ -1052,11 +1052,7 @@ smoke_check_5(int express_test, unsigned paged) H5C_t *cache_ptr = NULL; H5C_auto_size_ctl_t auto_size_ctl = { /* int32_t version = */ H5C__CURR_AUTO_SIZE_CTL_VER, -#if 1 /* H5C_auto_resize_report_fcn rpt_fcn = */ NULL, -#else - /* H5C_auto_resize_report_fcn rpt_fcn = */ H5C_def_auto_resize_rpt_fcn, -#endif /* hbool_t set_initial_size = */ TRUE, /* size_t initial_size = */ (2 * 1024 * 1024), @@ -1093,8 +1089,7 @@ smoke_check_5(int express_test, unsigned paged) /* int32_t epochs_before_eviction = */ 3, /* hbool_t apply_empty_reserve = */ TRUE, - /* double empty_reserve = */ 0.5 - }; + /* double empty_reserve = */ 0.5}; if (paged) TESTING("smoke check #5P -- all clean, ins, prot, unprot, AR cache 1"); @@ -1284,11 +1279,7 @@ smoke_check_6(int express_test, unsigned paged) H5C_t *cache_ptr = NULL; H5C_auto_size_ctl_t auto_size_ctl = { /* int32_t version = */ H5C__CURR_AUTO_SIZE_CTL_VER, -#if 1 /* H5C_auto_resize_report_fcn rpt_fcn = */ NULL, -#else - /* H5C_auto_resize_report_fcn rpt_fcn = */ H5C_def_auto_resize_rpt_fcn, -#endif /* hbool_t set_initial_size = */ TRUE, /* size_t initial_size = */ (2 * 1024 * 1024), @@ -1325,8 +1316,7 @@ smoke_check_6(int express_test, unsigned paged) /* int32_t epochs_before_eviction = */ 3, /* hbool_t apply_empty_reserve = */ TRUE, - /* double empty_reserve = */ 0.05 - }; + /* double empty_reserve = */ 0.05}; if (paged) TESTING("smoke check #6P -- ~1/2 dirty, ins, prot, unprot, AR cache 1"); @@ -1516,11 +1506,7 @@ smoke_check_7(int express_test, unsigned paged) H5C_t *cache_ptr = NULL; H5C_auto_size_ctl_t auto_size_ctl = { /* int32_t version = */ H5C__CURR_AUTO_SIZE_CTL_VER, -#if 1 /* H5C_auto_resize_report_fcn rpt_fcn = */ NULL, -#else - /* H5C_auto_resize_report_fcn rpt_fcn = */ H5C_def_auto_resize_rpt_fcn, -#endif /* hbool_t set_initial_size = */ TRUE, /* size_t initial_size = */ (2 * 1024 * 1024), @@ -1558,8 +1544,7 @@ smoke_check_7(int express_test, unsigned paged) /* int32_t epochs_before_eviction = */ 3, /* hbool_t apply_empty_reserve = */ TRUE, - /* double empty_reserve = */ 0.1 - }; + /* double empty_reserve = */ 0.1}; if (paged) TESTING("smoke check #7P -- all clean, ins, prot, unprot, AR cache 2"); @@ -1749,11 +1734,7 @@ smoke_check_8(int express_test, unsigned paged) H5C_t *cache_ptr = NULL; H5C_auto_size_ctl_t auto_size_ctl = { /* int32_t version = */ H5C__CURR_AUTO_SIZE_CTL_VER, -#if 1 /* H5C_auto_resize_report_fcn rpt_fcn = */ NULL, -#else - /* H5C_auto_resize_report_fcn rpt_fcn = */ H5C_def_auto_resize_rpt_fcn, -#endif /* hbool_t set_initial_size = */ TRUE, /* size_t initial_size = */ (2 * 1024 * 1024), @@ -1791,8 +1772,7 @@ smoke_check_8(int express_test, unsigned paged) /* int32_t epochs_before_eviction = */ 3, /* hbool_t apply_empty_reserve = */ TRUE, - /* double empty_reserve = */ 0.1 - }; + /* double empty_reserve = */ 0.1}; if (paged) TESTING("smoke check #8P -- ~1/2 dirty, ins, prot, unprot, AR cache 2"); @@ -4435,13 +4415,6 @@ check_flush_cache__multi_entry_test(H5F_t *file_ptr, int test_num, unsigned int test_entry_t *base_addr; test_entry_t *entry_ptr; -#if 0 /* JRM */ - /* This gets used a lot, so lets leave it in. */ - - HDfprintf(stdout, "check_flush_cache__multi_entry_test: test %d\n", - test_num); -#endif /* JRM */ - if (cache_ptr == NULL) { pass = FALSE; @@ -4517,19 +4490,6 @@ check_flush_cache__multi_entry_test(H5F_t *file_ptr, int test_num, unsigned int (entry_ptr->serialized != spec[u].expected_serialized) || (entry_ptr->destroyed != spec[u].expected_destroyed)) { -#if 0 /* This is useful debugging code. Lets keep it around. */ - - HDfprintf(stdout, - "deslzd = %d(%d), slzd = %d(%d), dest = %d(%d)\n", - (int)(entry_ptr->deserialized), - (int)(spec[u].expected_deserialized), - (int)(entry_ptr->serialized), - (int)(spec[u].expected_serialized), - (int)(entry_ptr->destroyed), - (int)(spec[u].expected_destroyed)); - -#endif - pass = FALSE; HDsnprintf(msg, (size_t)128, "Bad status on entry %u after flush in multi entry test #%d.", u, test_num); @@ -4613,13 +4573,6 @@ check_flush_cache__pe_multi_entry_test(H5F_t *file_ptr, int test_num, unsigned i test_entry_t *base_addr; test_entry_t *entry_ptr; -#if 0 /* JRM */ - /* This is useful debugging code. Leave it in for now. */ - - HDfprintf(stdout, "check_flush_cache__pe_multi_entry_test: test %d\n", - test_num); -#endif /* JRM */ - if (cache_ptr == NULL) { pass = FALSE; @@ -4702,19 +4655,6 @@ check_flush_cache__pe_multi_entry_test(H5F_t *file_ptr, int test_num, unsigned i (entry_ptr->serialized != spec[u].expected_serialized) || (entry_ptr->destroyed != spec[u].expected_destroyed)) { -#if 0 /* This is useful debugging code. Lets keep it around. */ - - HDfprintf(stdout, - "desrlzd = %d(%d), srlzd = %d(%d), dest = %d(%d)\n", - (int)(entry_ptr->deserialized), - (int)(spec[u].expected_deserialized), - (int)(entry_ptr->serialized), - (int)(spec[u].expected_serialized), - (int)(entry_ptr->destroyed), - (int)(spec[u].expected_destroyed)); - -#endif - pass = FALSE; HDsnprintf(msg, (size_t)128, "Bad status on entry %u after flush in pe multi entry test #%d.", u, test_num); @@ -8170,11 +8110,6 @@ check_flush_cache__flush_op_test(H5F_t *file_ptr, int test_num, unsigned int flu test_entry_t *base_addr; test_entry_t *entry_ptr; -#if 0 /* This is useful debugging code -- lets keep it around. */ - HDfprintf(stdout, "check_flush_cache__flush_op_test: test %d\n", - test_num); -#endif - if (cache_ptr == NULL) { pass = FALSE; @@ -8293,21 +8228,6 @@ check_flush_cache__flush_op_test(H5F_t *file_ptr, int test_num, unsigned int flu (entry_ptr->serialized != spec[i].expected_serialized) || (entry_ptr->destroyed != spec[i].expected_destroyed)) { -#if 0 /* This is useful debugging code. Lets keep it around. */ - - HDfprintf(stdout, - "desrlzd = %d(%d), srlzd = %d(%d), dest = %d(%d)\n", - (int)(entry_ptr->deserialized), - (int)(spec[i].expected_deserialized), - (int)(entry_ptr->serialized), - (int)(spec[i].expected_serialized), - (int)(entry_ptr->destroyed), - (int)(spec[i].expected_destroyed)); - - HDfprintf(stdout, "entry_ptr->header.is_dirty = %d\n", - (int)(entry_ptr->header.is_dirty)); -#endif - pass = FALSE; HDsnprintf(msg, (size_t)128, "Bad status on entry %d after flush op test #%d.", i, test_num); failure_mssg = msg; @@ -8345,107 +8265,6 @@ check_flush_cache__flush_op_test(H5F_t *file_ptr, int test_num, unsigned int flu (entry_ptr->serialized != check[i].expected_serialized) || (entry_ptr->destroyed != check[i].expected_destroyed)) { -#if 0 /* This is useful debugging code. Lets keep it around for a while. */ - - if ( entry_ptr->size != check[i].expected_size ) { - - HDfprintf(stdout, "entry_ptr->size (expected) = %d (%d).\n", - (int)(entry_ptr->size), - (int)(check[i].expected_size)); - } - - if ( ( ! entry_ptr->header.destroy_in_progress ) && - ( check[i].in_cache ) && - ( entry_ptr->header.size != check[i].expected_size ) ) { - - HDfprintf(stdout, - "(!destroy in progress and in cache and size (expected) = %d (%d).\n", - (int)(entry_ptr->header.size), - (int)(check[i].expected_size)); - } - - if ( entry_ptr->at_main_addr != check[i].at_main_addr ) { - - HDfprintf(stdout, - "(%d,%d) at main addr (expected) = %d (%d).\n", - (int)(check[i].entry_type), - (int)(check[i].entry_index), - (int)(entry_ptr->at_main_addr), - (int)(check[i].at_main_addr)); - } - - if ( entry_ptr->is_dirty != check[i].is_dirty ) { - - HDfprintf(stdout, - "entry_ptr->is_dirty (expected) = %d (%d).\n", - (int)(entry_ptr->is_dirty), - (int)(check[i].is_dirty)); - } - - if ( entry_ptr->header.is_dirty != check[i].is_dirty ) { - - HDfprintf(stdout, - "entry_ptr->header.is_dirty (expected) = %d (%d).\n", - (int)(entry_ptr->header.is_dirty), - (int)(check[i].is_dirty)); - } - - if ( entry_ptr->is_protected != check[i].is_protected ) { - - HDfprintf(stdout, - "entry_ptr->is_protected (expected) = %d (%d).\n", - (int)(entry_ptr->is_protected), - (int)(check[i].is_protected)); - } - - if ( entry_ptr->header.is_protected != check[i].is_protected ) { - - HDfprintf(stdout, - "entry_ptr->header.is_protected (expected) = %d (%d).\n", - (int)(entry_ptr->is_protected), - (int)(check[i].is_protected)); - } - - if ( entry_ptr->is_pinned != check[i].is_pinned ) { - - HDfprintf(stdout, - "entry_ptr->is_pinned (expected) = %d (%d).\n", - (int)(entry_ptr->is_pinned), - (int)(check[i].is_pinned)); - } - - if ( entry_ptr->header.is_pinned != check[i].is_pinned ) { - - HDfprintf(stdout, - "entry_ptr->header.is_pinned (expected) = %d (%d).\n", - (int)(entry_ptr->header.is_pinned), - (int)(check[i].is_pinned)); - } - - if ( entry_ptr->deserialized != check[i].expected_deserialized ) { - - HDfprintf(stdout, - "entry_ptr->deserialized (expected) = %d (%d).\n", - (int)(entry_ptr->deserialized), - (int)(check[i].expected_deserialized)); - } - - if ( entry_ptr->serialized != check[i].expected_serialized ) { - - HDfprintf(stdout, - "entry_ptr->serialized (expected) = %d (%d).\n", - (int)(entry_ptr->serialized), - (int)(check[i].expected_serialized)); - } - - if ( entry_ptr->destroyed != check[i].expected_destroyed ) { - - HDfprintf(stdout, \ - "entry_ptr->destroyed (expected) = %d (%d).\n", - (int)(entry_ptr->destroyed), - (int)(check[i].expected_destroyed)); - } -#endif pass = FALSE; HDsnprintf(msg, (size_t)128, "Check2 failed on entry %d after flush op test #%d.", i, test_num); @@ -11034,17 +10853,6 @@ check_flush_cache__single_entry_test(H5F_t *file_ptr, int test_num, int entry_ty (entry_ptr->serialized != expected_serialized) || (entry_ptr->destroyed != expected_destroyed)) { -#if 0 /* This is useful debugging code -- lets keep it for a while */ - - HDfprintf(stdout, - "desrlzd = %d(%d), srlzd = %d(%d), dest = %d(%d)\n", - (int)(entry_ptr->deserialized), - (int)expected_deserialized, - (int)(entry_ptr->serialized), - (int)expected_serialized, - (int)(entry_ptr->destroyed), - (int)expected_destroyed); -#endif pass = FALSE; HDsnprintf(msg, (size_t)128, "Unexpected entry status after flush in single entry test #%d.", test_num); @@ -11178,16 +10986,6 @@ check_flush_cache__pinned_single_entry_test(H5F_t *file_ptr, int test_num, int e (entry_ptr->serialized != expected_serialized) || (entry_ptr->destroyed != expected_destroyed)) { -#if 0 /* this is useful debugging code -- keep it around */ - HDfprintf(stdout, - "desrlzd = %d(%d), srlzd = %d(%d), dest = %d(%d)\n", - (int)(entry_ptr->deserialized), - (int)expected_deserialized, - (int)(entry_ptr->serialized), - (int)expected_serialized, - (int)(entry_ptr->destroyed), - (int)expected_destroyed); -#endif pass = FALSE; HDsnprintf(msg, (size_t)128, "Unexpected entry status after flush in pinned single entry test #%d.", test_num); @@ -25071,11 +24869,7 @@ check_auto_cache_resize_aux_fcns(unsigned paged) uint32_t cur_num_entries; H5C_auto_size_ctl_t auto_size_ctl = { /* int32_t version = */ H5C__CURR_AUTO_SIZE_CTL_VER, -#if 1 /* H5C_auto_resize_report_fcn rpt_fcn = */ NULL, -#else - /* H5C_auto_resize_report_fcn rpt_fcn = */ H5C_def_auto_resize_rpt_fcn, -#endif /* hbool_t set_initial_size = */ TRUE, /* size_t initial_size = */ (1 * 1024 * 1024), @@ -25112,8 +24906,7 @@ check_auto_cache_resize_aux_fcns(unsigned paged) /* int32_t epochs_before_eviction = */ 3, /* hbool_t apply_empty_reserve = */ TRUE, - /* double empty_reserve = */ 0.5 - }; + /* double empty_reserve = */ 0.5}; if (paged) TESTING("automatic cache resize auxiliary functions (paged aggregation)"); diff --git a/test/cache_api.c b/test/cache_api.c index e3250691524..a34a6ca723b 100644 --- a/test/cache_api.c +++ b/test/cache_api.c @@ -759,14 +759,6 @@ check_file_mdc_api_calls(unsigned paged, hid_t fcpl_id) pass = FALSE; failure_mssg = "H5Fget_mdc_hit_rate() returned unexpected hit rate.\n"; } -#if 0 /* this may be useful now and then -- keep it around */ - else { - - HDfprintf(stdout, - "H5Fget_mdc_hit_rate() reports hit_rate = %lf:\n", - hit_rate); - } -#endif } if (pass) { @@ -782,16 +774,6 @@ check_file_mdc_api_calls(unsigned paged, hid_t fcpl_id) pass = FALSE; failure_mssg = "H5Fget_mdc_size() returned unexpected value(s).\n"; } -#if 0 /* this may be useful now and then -- keep it around */ - else { - - HDfprintf(stdout, "H5Fget_mdc_size() reports:\n"); - HDfprintf(stdout, " max_size: %ld, min_clean_size: %ld\n", - (long)max_size, (long)min_clean_size); - HDfprintf(stdout, " cur_size: %ld, cur_num_entries: %d\n", - (long)cur_size, cur_num_entries); - } -#endif } /* close the file and delete it */ @@ -1254,27 +1236,13 @@ mdc_api_call_smoke_check(int express_test, unsigned paged, hid_t fcpl_id) if (data_chunk[k][l] != ((DSET_SIZE * DSET_SIZE * m) + (DSET_SIZE * (i + k)) + j + l)) { valid_chunk = FALSE; -#if 0 /* this will be useful from time to time -- lets keep it*/ - HDfprintf(stdout, - "data_chunk[%0d][%0d] = %0d, expect %0d.\n", - k, l, data_chunk[k][l], - ((DSET_SIZE * DSET_SIZE * m) + - (DSET_SIZE * (i + k)) + j + l)); - HDfprintf(stdout, - "m = %d, i = %d, j = %d, k = %d, l = %d\n", - m, i, j, k, l); -#endif } } } if (!valid_chunk) { -#if 1 pass = FALSE; failure_mssg = "slab validation failed."; -#else /* as above */ - HDfprintf(stdout, "Chunk (%0d, %0d) in /dset%03d is invalid.\n", i, j, m); -#endif } } @@ -1366,12 +1334,6 @@ mdc_api_call_smoke_check(int express_test, unsigned paged, hid_t fcpl_id) valid_chunk = FALSE; } -#if 0 /* this will be useful from time to time -- lets keep it */ - HDfprintf(stdout, "data_chunk[%0d][%0d] = %0d, expect %0d.\n", - k, l, data_chunk[k][l], - ((DSET_SIZE * DSET_SIZE * m) + - (DSET_SIZE * (i + k)) + j + l)); -#endif } } @@ -1379,10 +1341,6 @@ mdc_api_call_smoke_check(int express_test, unsigned paged, hid_t fcpl_id) pass = FALSE; failure_mssg = "slab validation failed."; -#if 0 /* as above */ - HDfprintf(stdout, "Chunk (%0d, %0d) in /dset%03d is invalid.\n", - i, j, m); -#endif } } From 1e0dfcd3e8107447624b6ff0c906bb02a5888ad2 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Tue, 11 Apr 2023 11:01:54 -0700 Subject: [PATCH 100/231] Update release schedule (#2692) --- README.md | 7 +++---- doc/img/release-schedule.plantuml | 14 +++++++++----- doc/img/release-schedule.png | Bin 13977 -> 15473 bytes 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 38e48ed1c8d..489d3910f88 100644 --- a/README.md +++ b/README.md @@ -90,12 +90,11 @@ layer. | Release | New Features | | ------- | ------------ | -| 1.8.23 | last HDF5 1.8 release | -| 1.10.10 | CVE fixes, performance improvements, H5Dchunk\_iter() | +| 1.10.11 | Last HDF5 1.10 release | | 1.12.3 | CVE fixes, performance improvements, H5Dchunk\_iter(), last HDF5 1.12 release | -| 1.14.1 | selection I/O with datatype conversion | +| 1.14.1 | Selection I/O with datatype conversion | +| 1.14.2 | VFD SWMR (tentative) | | 2.0.0 | TBD | -| TBD | VFD SWMR | This list of feature release versions is also tentative, and the specific release in which a feature is introduced may change. diff --git a/doc/img/release-schedule.plantuml b/doc/img/release-schedule.plantuml index c724dc98802..7bb7452275d 100644 --- a/doc/img/release-schedule.plantuml +++ b/doc/img/release-schedule.plantuml @@ -13,22 +13,26 @@ Project starts 2022-01-01 [1.8.23] happens 2023-01-31 [1.8] is colored in #F76969 -[1.10] starts 2022-01-01 and lasts 104 weeks +[1.10] starts 2022-01-01 and lasts 109 weeks [1.10.9] happens 2022-05-31 -[1.10.10] happens 2023-02-28 +[1.10.10] happens 2023-03-31 +[1.10.11] happens 2024-01-31 [1.10.10] displays on same row as [1.10.9] +[1.10.11] displays on same row as [1.10.9] [1.10] is colored in #F6DD60 -[1.12] starts 2022-01-01 and lasts 65 weeks +[1.12] starts 2022-01-01 and lasts 74 weeks [1.12.2] happens 2022-04-30 -[1.12.3] happens 2023-03-31 +[1.12.3] happens 2023-05-31 [1.12.3] displays on same row as [1.12.2] [1.12] is colored in #88CCEE -[1.14] starts at 2023-01-01 and lasts 52 weeks +[1.14] starts at 2023-01-01 and lasts 64 weeks [1.14.0] happens at 2022-12-31 [1.14.1] happens at 2023-04-30 +[1.14.2] happens at 2023-08-31 [1.14.1] displays on same row as [1.14.0] +[1.14.2] displays on same row as [1.14.0] [1.14] is colored in #B187CF @endgantt diff --git a/doc/img/release-schedule.png b/doc/img/release-schedule.png index b96f741a02953245d03f6420552621181d8aa608..e504dac39f77526a8527ac417c12a775ae5387d5 100644 GIT binary patch literal 15473 zcmcJ$WmuK%)-}8+1tgUa5fJHCDW#=DK#&GONkK|FBveX3kP-m_=|+%7N=oVOlJ0J) zZ!Wy=d++`1=g;>Z@4J5J;#zB+*O_yUImWo&K2(sx#lD7(Kp=2sq{WpH2sA@@KZc14 z|7xevSHpkIb`lzP23FS2=0?VL2q_~=Bb%poMus=^oo_s|v$MA4(kXX@(P{mjNbVKNDhiy_%D^69EXr9sPDc-&Of9vKwwO?<=MkLY2;Z}|4*(>cpZAO48j(ZTWPEx(%G zy(G24cVqR{DV-jFlyyYfO1cR4Lsl`y8I+H&!>pv{9eYQHznhcXmABY;U(KH|T^(Mt ze?GDGiR9b8{q48CgO%A48%%Ag{kJ|T%x}|Yd9*k099p`*cw?zEu{J8kIjPkvn6hQz zg6kS9>Rz%!hUeO|&!@UZnlN$OYFN0-THrCkmfuZNssTRkW#b|*K@xxRduHlgE59yr zC%t)hK)7`yQ1*lEb}d(jwe*FKWjaPplG|!ZcHgUIEqr`uB38*NA`+(rMRs|e^l+)8 z<9VL4?^V3NDVET*{3Db4dqA%eyS+T#%jLJv{qk-_{W6d+J_wG;;7Q7VP&z17Gw1fy z+pwv=*ihTR>-B?_+l)aM0)Bdwi3!QvH=MqRK=6^vh~HOn(p`*UqY%V43jxq^f)qOLn6{}iHC{k zk~|pVgx)CczT#AfdU{EVt~sut&8}vam+;3{CG#Ji#U&})v(5MBXxsnx=vn-A2R0PE z@@nYR|Gcxt@RK-y_XMl{=J}gEbbZ3}HyWtYBIf`#;GgcRvHIhko$go1@LDNw@0?vCWlyJ|UD0ln7PP89 zad6R0Q=sTdC@pnb>diR5@-0<9Mrfl3&7>o)FQ{nou!$jl)M@&iism;53!CvOx1%Ud zlMf$0*es7%y4u>>8tCiS*40fC^Lue5jNqp*jN{6}MXSX~4Ev+KRe?_=UOGy= zk@Lz@5)$0c`vb$Ns*e|vf=m01QClK8=G*NgxOn*ZqV=N3v@*mNQ{;&dBJVKt2|cgj zKF462!!u@cIz4vIrJXgIy>FLl(~~OCZ@)xK6nL0sWL(;Je48tR*J`{qhw9}9N}rdv z_v&y_fbQB*f$z~5%>v25S()K|0r%sBrZ5JH&iijJC_M@wVLPCEOY3q^zaG9+Hhp`}qYuhnV&6~#~djzQW-JB!mc9zkafqr6k!C6b?Qmd33gsT;RwS6$uD zPWGo8-_h7>z+UPiqNHqBI`|PKUSQnzdv8@vbkePij-Z2Z|VgM6HDu(1ygS*<*4 z^m^sKZU>?8cfqrHk5k{~NcP85k-@^JrIQw@N=iy9?!R7L9Kv^U-Qa!~D-Fh5(C^;@uT{vZfAC6f_;nNt=yd_otf~lI!6hmWr90F8vRyI$`tXJW**+_0R(>}Ughc8U){bM$SMyfnlTH7`LGHpw}RBK~| zV)^vc)B&9b&RFS-X|#)Y)@^WL%W9|#@3gz4!cJqedP}ZrG7u49Hkd~p;fqTyIXj^1 z+O|?G`mYHLwY$Yyudl4ETw*z*Uy>@&{`a%?E}0nTQvQO?+52Fx-rWJcv!67L)$l*@vqP>mXA{X+EOg`4+uz8j30-Y_w&Qt>WBMf zJW7kZXDdnZyl1vLfe#l$Zy9-un}sIR%Aamb*6F&*w#M*& z(W`c+{j!+JM39j?ALqI@)ew+~cD8t3IqW#i_V@mJ$8NXkfxA-uXqjycmudIs`)`ic zzO8$l?R&J5N2MC+zIuzyuXl&@KQ9w%@ zxmCr}Tzn$9lcI`B@Ao$sGd3W}MU?#N&3II{*rO=?E->)Au)9Enp2vy3S+uZ6)qoO@ zJOcB5kS9qD`jnnz;ub^vOZ|;+pIe9_+z6r&+zkmb-%Ix3k6N6l79J2|eVx?|t59aW zHhORTHJeu9uV_f}T`iHiE^nKqf+AgJaCI`)1DmoHUQrn*4qUU87 z5RGr`3=0@eMw;q_DEJ|iwd@iEd^TfbVwz+dOqD0kHSNnJu6()Mr=Ib0vKGT~tiUAr zgi$XVA;O2cmyVSjN@fb9)$!O{(agZ=wZ1MdwG_AG$}r)6v>L->G3DYX@sQhmydtXP zSFQJ@GWt7rlvjSrMY6xh+Ig58w3UK|NTzub94*2e7Si3*lkpWkn(#QQETcr!P1+@| z$(%WCt&M%NwXwn3w^H+)M7gL>IH;ti)}N)$G3@au^#Nf7At9l@)Q8B(?F)|!jNhFw zevwC9#WOJ>SbBCv+UU;Z+a9Mc&EVWlcN`%I?nVX`x^`E`3K;$*APFC((kIlhV6+dq zdOK0Gz<9mtD7UQ2sO3X0i!d{(@Z~gL@*|}^dBiPgsT%Ao^eIP4{q|4y>crsa(3Ii! znqQZdQgiJNjy*n{k9S-hj%Yb=PgNmKU3BT*D>lK+1uPSUxu*Ll6Hu4V&nwxl}Q*OtEYucOM z1^KcR>tD-`N)m9;bh9*bqnPu`l`Aw_13B6QWw!GxD=Uy-U|J#~BV8%zc%AsrJ@50O zYR&8@Ee3MA>?}YO8?&y%&Ckzw`@J)iZ{WLe0eJ#K9Su_)zfP(}jeT>R{|$YMI*WGv z>m+K75j-x+8CmjtAc$Sl^;xZz!F9$7B{6hb3c}|DxwtD$5`0@|yO(jS zlnUjZzTrWC)HU+dTNN{z=GD>!rk!NB&yy9$@bzb6=*%?v+n(1a1zhu`iHL{>I~qv~ zscAb{)N{GGbsL5FzNHA^UgMIMk-6ux!#EI3&I|aVJt?arov^RzY|XKn{I-6*+x{3s z-^fEXHRFX(QAH!oCCJ6t%Sg*X7yal1uO<0Rr z>kKF5v>|{rAkvQQ?BIv+gVo5*@@K;YT+DoYeB9g-XDhcVaehhO5ipUs$Ii>U56N_Y zMH!`#8eLbt2D-@{AB0mfUlXUpoyg}byeHfp;w2wOl11R7S0Wa`gWerNmg7P5nXHK! zecB%5<#%TB&v!19GJAM!Gt-jDr1hYo2zDN?T6rWz+R>)Jjbr;-Pyl7e>h0Dm7MKO| z7pY#pzjSzbSm%o$4_G^0DXHrB5=&LB^Y&cV_K%uZX!zIfZfqa0SPpdnV6VYvfoQ2! zZkM5fvHHoR{Sz{TR3FcBsBA-((VeN}$G=`-3#otQo$lPs%!WipSUlw3w^tY#(Xkv{ z4hIc_yVlY~ARtc4$SN%mzWbu$`dsx)Qi46Ru+VOyGr`o<)OBy=@OZm}lasU0V^s>5 z+E_HVR3v|}daqb^zhP#PLFFt^p2%{`Xe&?^Q>=`_jJ58>WYem0S55YHu+VQtL50nn z$WEWnIz8!1G%g#&XEzyW*!;>bw#LFV)tz+I@tP05XXr2=DbCR@vGnJ&D*k{aj7xq>Z`%3$aHAv{UwFYZjzJe~s~1KA1S|tqeigOG-@Kg~hcly!xqW@^E_| z3YB`oU@Bn+dA*mL4e)sx8ij;Q6Dgg1-0y6yqGmXe6CS^N;s?{XWSIOl#7wVnss&6R zNf48e=-Re%H#9U51$BE3;V1b#Ort%{Dp9TEFjn_z7dD{seqNyvmyexLNuiwm_3KwJ zFE6NZKQymbpH*o{Vr3CPeU~V3nVfv28giuT{{&{IdeLesoi=GCz>=8R6{c{1Mh-E6Jz)Kl+KW=!dmyXUItY?GEyI7~mxf4!ft( z)U8tAAbWdBK$R-^mZGj9&SbL@w|Zm6b&l%eJAx6RKExLTDesp)DViojo)Ea&ZEe*9 z)I*u&g0sX<@uOR2T8sOOmt~A~Co}gI5!GiW>-XHG)2Vxzu+H?i5+y4+ zD68yF$&MKHnN}D&G`u^<@t8>3;wdM}&UhzY8WgGLh~5e>fB@Q+@rXgJebIHEjR3^87^Wz6`>-^y z(!=NLRr&GGoKP1D=3xR3wzY4>!vw}1rztQ;ng^S+GyeBDmwu@2BqgRey&9OF~ zeO8FIf`WpO^A>HSdajPZr}PWRM!K1IbuB;BN=^4nmC6EhDQ<_6QvFh4xE}>Bpaw^P2(mf zCwDxJN*$$TW&IPh3GDHm$`7$}JdA(uEOv8XRqhQxUz!tu+z>wrfFYnA_HC=2ykf_W zA(JJ`;TaSb=@r1bz}_ZX^KEHueURlk*>?s(f6+P8LrnTH-LC%wOZFsOYJ&XUN1xZU zSqiCp3o_a#mLIoszLn5Z$R$#FOtqW%KB)I-YEo`Kev{6B(qb!+$7|0aJ^y_Wz*Qd; zKUrhiEP_DH4r;JZ*bh7|v~u5$ zndr?@w`%YwPPSOQ3K7GHvbMC*cA+DkdVwS~G-Z<0q&@j-dPheGm7vQGd@g-vIaY4J zHPb|UoF*T$2TQTP4mkJpgq}*cjM)7HhaUTqsXkXco!p13B(^*rZf`018-pn1lw&Cw z7YnmU>FD*8mG8VfuwH8AFuhY441AS5rSw?X#b_)=Wq;AvvAW$?E#H-|LgDQFT4S9G14TmyhPUeMFc}jk8aVNXdaJQyH}q3cU%}b-~<7lH{5bFOzY! zI7@fY<@kg!VL;i}j@!T2u#MMwq~Rtd(|SnQjsX7(I)3J5+TR0J}rB-`xL0M zkP&7-RX^FO_u5!rlnU>RI#&GN$DU=XJ6UX^+b@|>X{APW(#BY#JD_Y>o+0BF_Z}>^ zASV?%JuoD9h35+pM2vF?ee*6Q-*^w1B@@4Ja zqQ6=D#RK=v_b1%-0zEh28vhNFr?)iTadvP4#a)BvCvg==ZMu~c7k-f^Mnlpu>NwHH zrd2N?(I76FDf3~aAx`j1z}v;a(idjajqBqMF~cYq-h6*O^jB+K`kSLC`(fy#FZRc@ z@}2Z+z5KL_@B*(%%<^8qGrw{rY46qdCwWhJ{<_^e4MYCBE`@5Wq2cDb)j~2d%pA)j zyd_3*Mff-u-e43sr{0G?I;d!{hMu_oe1A}GAGSPTMNWRS3irQw73abw|?Tl5Ltx{5>6Iu~^TSo%Swr}-MUEOlZxtl2$y^u-Gn zp8eNt;=Y}o4)WOxm2WFBbF2+ovPY)@D?{=-IH4yw_;4Lfokx?SC_RG{^b69?Ef*E+-3;B?*}1E$3oug{Ib0k>$`t9R@0GLFzr?l7 zVqdz{v(yk88ag^UN-rN>Utj;z!txa0HxOt%+}yjQ)dvkexa6(!r0hCZ?^-{Rm6iPw zL;+{M%;U^GQvd?RK<;B;wx3A@>9V!G-6n=W-0(sch$^;T%7%q5`Gtkyw7-DwE7H%; z%e!^+=G4y+8Ws2bF~=|8*7$5$-ZnrP4F&%r=cZ6PNE&Iiz;sD-*!V&sFOZMnnWIJ^ z2t<)1$>a=GF6;^rzkmPL`Bvyk#DJ~vMI&Fo#uLRRm}O<8goBAmCR3-_d`O;mj8+vG zqI6_&nqK@Y;$~7`fSP{DV9|4RQBhHt3?Nh;r|PlF4zUe>1jXKck?1s(rTzuRzqdL< zCG0-hdkKLM#RBeDWH&iI-EZcxFCK+qtdln|pmGzz#g_wSXJ>&fgw%`|A)}8+C8V=9 zX$*{lrlw|S2cSJ5>+s0#)azg&5Y*^U1%Dn6jf^CvqEbXv=znXOBAuh`!IFP4aduh) zrMJ%MQr4HyurR?95B`B47KGx3ettOUS`+OLYgR z$=>GVEHLd!efsoiQ&SU+pL_H*2LpqzHj?Lo@jF6^0n|;r+g?=J?qI&bOtJlP|Dq+7 zy^<09j%&#_T3T8o#TFOwDUZh-C*F#{3Td6MkZ?As%#tP(QX*6Pv-y$@74HveI8A?D2gzLuvzq^snjQ zN46E9%WAAML3_K(QsZNmrhcW8qpteO>F0a=oL!j_D=oo3cFWf%5-e6McuNF|tisE0 zo_7Oo`xJT0t^f$y=X3ip>3p2X$b$SkkNMXBGS#zR{}M4pKr({mJm+dP+L)M`L$hE< zW47Nx^aT>|^%4a1MO9Q(czL1!t{z1oFqy1Bprjjn^(XjfWEXdm1O^1iJ$PU~TKXd0 z9OxqK*olLK10IWEu4kPH6EaUT1cf1AyIuO?Gv9jTFXHl)X?57uL7yvf$xlwE<>f5_ zHSzZC+sa|`%McL4Vq;^Yqg{SbI1-VP#>DtR=x|u*e5JJxLd4dMB2)x3HcJDXr!LK0 z)vOO{99pqzM`|$zD?bPjp22y_$9QeROA?8nKi3#1N#_7F zjfCE;tjSw*Z85+PoZ70_-E}{90#<*%Ao+1aeNEQrsde|{!*FXu8bUrQhr#8)1; zq_mrua9spTI4G?@p;#G1KVM5pMMeBhh+RSCy$ose!(9_fB>Q+WK1HA2|~#N+mEt>)`Q#Vh$k2n8hpie zPP5x9J{}%jFVka%x_Ba(Va0K~%|MR&P?8X{v9Y0=r6zN-%IjZFu{`jl!)VnZ`C=02 z)2V4`!khJ1AigNGAZi?8%~eV)A0J-*marc~s`pZ9iE6OG{D2#7gGQmvk}vHW8&pwN z(8DmCiX6?g#J&KnlxI#OjLKPu7sr5k8K$!11QB|&K$g*e-?BI zjUGotcEAdW>ABv?|?DMesC2dIZ-*T=na{9^wX%rb5coBi;l-Vzq*MN4MSIwWnIMMyUj_snl7MZ8 zQv@=;ee9V#qy)7D1qtP@;yCQKZVO@}2vGlo*p;QIc~=4iph~|6P3_>KZS zM>i2xT~(EopXn|kjEsz(bMlR;_P|BR$jI2N60bmbz3~u<-IgTkcyh>!bM{OvK%U|q zvkQY5utDb6kR_$0M95SNf^d^&k34u3$ayWzPm4dx8*`a!15o9EF(qEMhrO2r5QTs- zhK`o@y-!%!7%>5+}9ndYc}(S?az9VroZKQ0};@Tci!z`bDGmG9P@&EdG6l&0=?ovPV{7qec-7^dI~uZizq~;M7m` zL3&Y5d8T0^DLGPZJy{zV7zmPq+-~4`JJ)lNVW#e84pw1KRdSJ-FCHB|eRB~45&udf z8Q@5ICMKbVrjF_0i`baco}R`xX!^+S=N7 z%`YNU5U_FXBcsd5sHh<@0OjT7{T;AIBg4WB`dn59Z*TPjwzJ~}QiBt}s~8ng(+59+ z0F_$QOj`-EhO*IrAil?M(GcmkV82~hYydp`JOcK#3g>~<8pL3SjTSkuE;w#owapMjcDshlmOvphV`H#W>_O;9*{`i=U?kO3TqOTpP_T8QH zE+(Uy{ZMONZFQV3#DN2e$=++eJHWJTZWupYZa3T*{$Zec=U{{<33<=CzPoHS`3Mt?SzL!0 zipHbe6m{ynyatm?Kh{GL#C&|Rh~KA#jCtE;|7r+?h{``5l={1a$tw1bw}akYtM~Ty zZs2}U$~4WBsVQ`F5;R~}Y({+HJm^GlDYPJKgI}U;YRD$DF8}@OUULkX%{=DY|N9q^ zap*tIYOh}>Cf+Q=){)3N{f@SVfyskhIaNhnU0v|XfRxC<>zRexFYM$50y0=S7J`1_ zB2*}lH)r-4Cxv=M1sKJjKc#bYnV6WG+S)9-lVu(}2ppZuv45Ag)wkDBn#sr4_eUAvnv?RO5;LoM?@Oc?@UDLh4ef^P8Yk$wHc_p& z1kO=pK_{z`0gPS8tYnrth{1n>w%xJzDtkDF291GA%WpwdIy>y}NUeQ?FLW1(^0KnV zhjhzUoXPZcvLRMTqmN4EvuaD895vq8^;3=S$Xi-kGAaz4LP4OGqd+`4AGuGmiun$< z(!+I*&SF)hgb%6Hi!VqpaHdU?+Jd*?2lK1fwZ2(ytI1Cdi%BmXy%{3T(ll zU@^$n!kF8a-hNLb6=Y~+l#-eXQbZ}e$8R-N1J>=;<>mBT2OaMWfmYFB5r1e3A_E~I-Dr8O;fRw0PYD?u7%K| z$fV!+2NE*WsP;&1_Ud@zv&h;D7MQ7tC3VHKo|&b@%;m zgfx;sjJhdT;zHE$X)jCIwnG1%*h?@Rb?|N}KL4$LPjUmZ*W|!zXCm(XjCciBBSmH^ z0jJLWq?E+nj)zO&tjl+j8Q2Q;IIin;C;JkRihDmPF)8WMqeqS_gWoyON<(luG?ju{ z+S-sYN${Hr!UFTv0PzNfg!V+dF1aQ}-n(}XO2Zij`Di;J=dkaC6e0wM z=)xnWPGHrYLY0YJ#HU&M#t;SuIB(y+AnhbJJ9lr}{+ajH?{%z|9x2b|WJ%W_UQI+fdwlFO$lZH@%o`(>%% zx!U<9XF9CI@sA$5-F_#eOiP zc>oF#*JJMi*Qe8Z6}0h4*UZ;)i)5C|N$A4_ub(J@3|p|yFd)KL{#rhm7 zF|e>D#0B9P8K~3KXnH^SBqt^L3+xFmg=>A-Ir3Q|`*g^*0|BMVpG}llO%x62w#Wl{ z2YM=6AD7kmsDP{}`D-63Nl68po&poax|5!DQ0Kv{K)$AZ_el+6Y1*c2VMPhn>N$AA zp6}III{QEgU;%&+@N=xl_GwRh)b%*feSmPhz6q}J-W~Agnva!7@OLUDN4`|*p2_F{ zmu~tko0*@&CyRaiwysro(s=AcCR_T9VHiRv?;i*d(Z zr@Pkl{7}?5Oj}j2c6YPIb*4WXZp&Ds0%cOa7YujuR+Rdht)~ND^gOoPc;oNcrUDlM zJoRTbDJz3es=64DIpMQI{4#F4tYqDgt8|?{1Ro7dgB^%d;cP^iEKE1e-lL(&_#gdz zZCCH*!gnY9-Q%c0L*m!BFLqN}RQH5T?h(8XtjCDU43_%Hz;VaNdiHBHi-B%Xq@H_J zoUBZpKbmCl4V}Es~7_V3Us%?G)|VX$+={4+;4$m;(NfR-wKgqw2d=I^mPakMAtdjVPaJFU%xc^qG|}QNWyH@cQT(lv?@2S!&c* zn>9I&TNipW!Ve>n^c?F$>+N@|(ChMd9GN9x6^7nJw0B1&MQ;ilkN zXnP9zgIYDdC#P(_<$cn385kJi1)M`$z=p(n3u_)=6R_h#bEhk62ls4dq`^nyal9#Y zn=0lWCu?8z*{LhD2blcf2?I5bW&vlHerNcrK`wL>sB0AlCW+feDvFUOQwKImKCYZ8 zX=y!OclbdW^*V$61&|ILa)8;!mWng?8|ot4g{S3+>PBY3vqni% z!CPD%P;0x?+!^Hu_g+IMGFk|X9eec)bGrSuTa5J5bzSAQx1|(x3GdhdP$^*o`*toR zHO<83D^W?ru?I*XD&wUu465BvaQ5qrjEz(KDmRwSz@WQR(NayFoC}r%dfvz`9|Av( zC8me_Ibb?i8d8~G-({bVs!!x#P{+>94R%qImRfS#+(_B(IJn%f!Q(7RF6zqPt065F z1lycboQ@ORHInGcim%G8a`LbfC>8Bbb*&1mbrWc^eRxK5b<}K)o@s#ZHFCXtnSGeI z`moIko+ByIZ@}&@w!`Vq))d2ksxy6W2NX=ZOEOK}8xX;jHGAjh=YwHv@OiQa5CnYS z^S^uwW@J}*A_b@gu(Y^`1;>wK3j;~?fE#lwRj z?@|^+#5R{_y62KD!2|GF2u{<6oi!OZdgYf5-OGBWTxV-UhiO?K!HWd?R>Ke6LLbP9 z%#n0RJo4bmxBYR~372KIu1rhvMSph(I}C2GC}cV~lFt=oAYJs!mmylhzS{g}AqU0p z^WC{pzWDRpn@4arJSecnh}J>3iza)Hu5PtCB{u5?9ggW4kg7i|i0l7Rp|!}?_>mIy zR`}%U&P?c$whi+f0ZK&<>h9&2!Bh5If!7j6!O`{Z5|zjCZ4ZHDIN`-JCS z$$zBo=4r5A4iHqCOOS)O?muwlot>8*5dlcd|47s#n4h-=$n&K`9j5Bv?i}c=C7nJc zP7J*}qgMNz$enps&twnn%W0)(iD`2<({QzA%||ixjbiLxxeY=8fhJBfKkR%!c0lfT zak2=CD!u*?Gp0iC#o_w-&Z%4dIa>W&>Q3B3f?sYg4(+ZB(BjmtI1L3g1qM2R;eP6u z6H2=I*n|YA3j!UItQQD;MUg%DKXru~{LRvlsO$bJWB~r+LNX0PyMiks%==v9V+DND{~b2X~rNu@ETTgq`~FBF8_afzPQt z{k`nEvp`5rE@yBUY>M{ek53!7+Tl(CJj4)5z8hbT1q$E)vA6urS;l<)IL6~#h)HW$ zo;82lz#o7srHm4s(W-Opu`S*}3~iE_gK|0$c#Yd%w`!+bPDKd(IuU?fp^{#e8sb8^ z0d`$2^UHo7WHQH$`K6^FWBPL+w{RnFDJ0Xx2rf=2(8JRvT_sj}xxO0cs$w!SSXTEp zFQxzoC&6)upQ2l8{Yra(WynCdIg%a$?sq{m)C@yOtzvVsfp|ezH8WK1>ry=-FklGL*UvN=mARXWp;Gp^R>3ZEs46(jb4angR;Smww9Sqo;0N5yQBrfhX7y^QA z#r=V7P3YM3D+r&uAu=z%VRYrFK=Iai;rQt8?(TzaAq0AR3e9TVjuitw*&Qzox_ua< zI=O5tjbz}n{_LS|>s8Nlt%km)EH?)?Hw4!=RMS~;H&OuE}d3}~IbK}$~ zd12w%sbB+W@7?bu-(8EnXX_L5w{yKZmi0-#UpzdYfNjOk32I!TJpf0b1>4)(6=Y;LtIU!`7EJ8711sYRnb~+Mh`CUC;@mJA!SqWJ78L!ZzE?ZH5UeQ z%Lt@fG}me%H}>$0^F?eVWk&GoUypQvy&QEBGz!`q6j;c5{62a&A>xL#2;XJUHO`A? b#IwV*j~Xu49oDr($O|$O3gWqrKC$*xebR*p#^N_UGi(jA+WymRAu z&U>D7-!J#x-}~kM4jeU1!ssCW!a?8K24-Psg{w6tK+x3QV=%?}4HM>yS2M}t_~iYVHRH&Z#w$PF{XE9I`<7kl*1QWceG99{ z-YL_$Hwvq4Oqh_vW#ktTQL#chwKq3Jzkgh!O=}z8p&*@N%@P?g&w4A=U{;2*@TD`D zWh==S#Ti}g+>)!W>pnJ@pbT? zH&vMu^^%)ed|CJAX%l;iLgJpT2iB~WuAa(hQu4yxBw-|PC=AJ8E)KsE*xV@^+{vu- z!P&ZT5%l2w_)w-$R!^gsLs{omw^f4=jrz-nf+_$UT&KbLB{b zNfz~Mrx&xLu!w*>qM*gta`PatewQX->?Q9kO7+KIldhOXdpUBP*|ojBAIBhPOBY+!D9S6n?c=58IL7@b&#Hac$h z3Qp-JcR>_E(ARg8+t+XMCxs~xfd#$r=0}7p z^a+BYB3@r3!O;<~Uy&E_5U<{79W;nn2rCjy9()l|MO?o!{dWW8^HZ=o&w5mCR_EuJ zHe{fa$vCa%m*zka_wEMQ4M>%5eOSr0+AcX?x5?AO!eXT_b*E+}?fZAp$2zyz7Ur}o z&0=?x9!FqcVVRGYEy;0DJFE`eho#`85mNP^3=IvDup4Q)uH{w`M@2;%#z=TD7b*mmj852tB;V)q^F)<@0ud38bUhX|u%aKdwl16b&xIL(9Eh6$3 zaTi+4Z^;@uuv~umo47cU#{31mI=JMeJv}`ixUOF1DWt34lzQscD@EgVbv8R}^z7NYyNb%nPdrW?E>%Ck z_Lr-sohF0u=_~BXB|PDs^i5~eJNf0F7pDaEVgWei#{Ddh9_jD&rSk2=;aS<)Vq#+W zckY}XtnJUZ?fed99IUq0f7x?5rpxNeu*N;>PQxX1b-s^BBcXV^>kct7x6PdJ_DVzb zLO9=c{eE9U!rh?^T%q$%2}||fXjnu8KBi0W8*CVyq;IyvhIz1{4F9+M$Qe$NY2p5? zuxG_DD}(ud=MR(`ANO3VA4^nI;MCuK6@HH1FTPm)_W24xsex&L@h#|^M@4Q|Kq<<0SOY+Wp>0!hmu`@9B4--hK+POcsG zYbj`u;y%b2MZVqr{_}E?k!AUPsGL8GUcg!KU_fjFUEsNp8*SQ!WCVqroZO*{7qz5I zuMHVH%8gqL4+ys)&HkT54Ci~fXDw3qu=wF#mn&ZR9@|qqGxaNJ zZnt!?0z*S{jx4LD+idf{>Da$0p@`;6Er%d83XuF}H$5*CNBgH68@1f_d%0`uf5Vq1 zker?;ada+RlyHpy5^rH-X7*Sw$aC5MkZh&LDpVugo*;f#gr?WtBfO`#cj?>KWYtcl ziHOX_W_f@3J??O=!}ZbZ%b5nZvryBU(kUQGKjh}Z>U9%s^AuoEqRh#6P4xRyb%P( zb6?+5>}0#{%rvz5?RW$p&w8cbjERi}x7lu|^om*Lji9xaD7|N0A2ngzm-2gh$FtoR zws@1$T{y{qGFvj@Nb9zJ9|+mNK-Ructl8Pt_V)b;UNuC-#8t-q6x-u9_VnCO{nV?U zvU)dZWjDWC_lqcB`7Y0CI`nRWMj|xhsOV#(*}u=43(mmESm1WN4W_Z&`}60|2*J}1 z7N5Dy019diJ-w{o#t~^=moQigF=O9yZ*tAbk5|@96qg0RA|oTi$@z>73~Yoi;qplv zY|*jL8e9*DhKG|m%_R!`R^ZRfo=Hj$n+*>QnebW~85y01GJZoc{b*WxvbH1r{=w(5 z^Xzegm9lOYcH`rp=)}#oNV0KEzrN*YG3q~ihH4FO`R03b|CzE8Q*6(hT)BXMS!`ZX zE_lbn1yw*!GYELkF>U8WulurLR8)*?dM$yVvPKbP+_aRJ4{sK_%5Ed!Fo|qGt0eGp zRRRUjez^zK1%E$3rHWGRI{9?Li<7;Db*+FXU5`Vxk2($Co12rlt>xD(yJ8smTCD9W-FK@K8>O&!mO+SdVb;vU#87 zHtc%k1^(V&;ReTn?Lu4ClwJ4L<<3=-bFfpUaeo>|h&pxS-4ZQOJg`CWK%89vZztUU z(Eu*+>oe9H1NVYn_5lRtV=IP3&|7E`Z-5=FAB%SF;_@<=$p95I^Ol0w>2Qtx%DdxK z=b27$2z7R@4P-Xg!}iY;m*$X#z3os{Q;H<%x(0WDHM2II!$Fa5YiSvnB7CGCs0pz} zN(w6&7%YOED%hK{?}ybtPZvzpPWL=Db#Xaq3BoV9m@xksW&X_vP0*O98oHf9hC~>! zu2f_9Y)e7#cgvh)Mms(!X%`LBZBV5I_E-8>+B)CU*OXOQj7Ph*g_4LRhY+)VI0BJU zKu$@itFFGx-~A2E8(Koof2tbM6-%>ct0XOrQTL-*vsnN!ov$}@b91!>C%#yA3m_7_ z7aX;I6X3Ws+5zF5V*Gmn8C?7{Lp|bQTz0N(yxkQzBx#BB7r^}~EvBlY9(8Alhg5@e zCxuJb>%!42ozFSEVW=*ix0{j}VvzibROzakOJ-!!UBbYl_E22q4T zG5&!0#BzdTkc5cn!MYL*#xs^WTBO0(4_`z63NX)+Ns**l7vAUB-;nKc^YZqOZ6S23 z2ybRpR*5ju*p!s>qe<)Jvsaa7OEtTuUrt6&%lifG)0Q@oy+wGCUv3sk=62)RpP$&K z!G8r%b_pt0fpgV0oQ!7(JRyifaz3X}w;Q+=Jz;~{l5WL9SR};6&dWU{E1Ebg5GsSD zaBHTkYAM;Y*VV<;Q`>pe?rg`6AGWyAEf5-FI|kCy(qqX|Qc|zSj!|(;`;%V~YMT48 z?wFR$FXa~)Rpr&>vo=D0lY3zqQoo$sn=c5g+PPWa1l!_7)v4dNPW;apr^2mK#WAZE z?rrDgzdfO-vcFrpjH%Ra&Vz?Pw|xT4DW`F=my?@X-oqpGIZw%Y*GIG&8zGHinQW(;p% zAmO%3S=Iewq3L@Ao5F4L@$d_;L_v?*7$xoV&-(2Qm!OE{c=C01cTWO1Q&&7q&gogX z6BCnFD_a!`Ztz!PSV88r`4XS2;wsO~d$=Lt; z?gzlgPO)bBHh^m&;=5?-2?z;;yx{U7NupY^>_$Cv6t&;iY}B@Qkc#$tx#2yD?DX;} z)pkoVrqBHpRnSvDGF&OCsLJwCgDg{NaAxUJqEmGKwi_$%#y~dk;q^mT-px<4Z|!Im z7c{JSx}4)ApS^kW#;^1q^8|tN;!<~fa>|NrU*8+k=L>CNDk>_t6#N|*_z#{3MnqKI zI=si3U3(&Q^}9b?BCK{P?p2QG&Av1N^HraFwI>peJ26S3b zDdAvRSs%8Z#JFH*vvm3wyEf2Q3Wmy{x;OXrvBEugtG?kQ;OifMC zt!D>~ICZ4_9o1CR;BdH(+ZN|WBxZ)({aqO9xkz%>`clWPMClBlMsGe#ML38i} zn`Gm~(PWL1q9U8!Vh4}?awrM=^2W!eXKTqaESj)=F zP9ijtl~e*_6&b7)S;9XGlnlWhi=U!#3NA$YXeU^R0kSEq_=yIh!ZC+aAZ*|6~ z@?oGGGu;)#60`Oj$n>=(iSzLX61-IFoUbW*Ocw6VG$oDaNaO49mjlwVJzaONtM&I+ z6x`hqCBN`Hlg}z)D~Fe!uZ&PJ)2z!Q2RNhMKS;0OR!2j$#0NPIGvYbb9*c*JX z6I09Bma&UfKTo{*4G+pd!ie8q%Y-LfZ>kjrF| zZqZOEa@Fa{yru2l#)EZyqQ$QtFY3mdqjgX^1u}A{@hp4-d@Fq#ddGo!LR4>tyluW?fQls|sczPDMRfcK{dRQl?SLME1`QUF`&)UT((x@|exlh-^AvVs&qp%4Zy* z3p&x;%aRmejeAjh5^7Im%aB6c|1)*aF!dxe-) z=e(0XJwY#}okvhCH-N}`H9Wdj^5FylvOP=Pt5+$`d=-p6QI9kb!HL(N&ZkkFs1y<5 z`Z$}cV5RkrEOEMRpO0Nj1{ntYB3|;~hM2WRg=|h!j;u_I3%GK`SJsS$j&7+Vie^fu z(H)j4jn;JdDyL;vqZ+zutbM*WapBqX0EFQC_L`_9;Bq$5R+|emoIee!`2&JIIH%m14=F4cw!L zXZ+p2AHWv-Qy*Jf>3~j`cilf zlFCenKNP6sMK>lt)^8Iyo??If)qA?mIa%-gj(Usc7mLKzJikHLGyqteje3`Lb*LP9 zJkMQ}V-ALd$Es>o^!5Opq>&8I`BpC-lF2@nr$8AjY?n+uAmajp*Y#tTobtB{zLAW0 z=51p#obYtMRuqOqK=JOkfjXO@@Mvvy(yDH2ubnc_ft# znHmnx1F4!@JM=v#w#BlFsp)VHW1NGY9DkJ-)hy+M3sp z0HD_DVD|CWWP7T^EnV0Wp_ObtIPae@Z!RZWdjgK_r_o@MBXG(&fHp#4gj_>LGE=6L z$w#~tXv;vbj>i_WuNRaX(*M*zgy)H`UCSb02hBFU2T);NjcbT(Dy!nff0sGw(TgjJ z{c7xa7KJ9La$?WOJICIr;65|RAju#(DvI55{Bw!w0?=OC z==|k)|E@RjS;5r=>feotJoo1)TZ&Kx7tY?v4_BBDS2VeGCa9<`m=fmK&3O@}hZv6# z5b6O$^qyYf!|@&0;oNB2x&~D`KcO*UwfVE4Nd%<(nYVit3=h%{E?m$tc-s#q7mRZf zx`UtY%+ZC{rBtbivL|4*!lpHe4W73;we{b@BzOkfs5~3ivaeeR&^e_5@Fxv}@--k8 z2a{h!LC`@6pF;D=HBB3_?CZ`R_ByAzb;EZQuRKl!f+FAY`+R%t*jH?L8-fg|B3`q- zNM$!C0X&ij z$m4(J{E#U^=JKRn-q!X}X=BoaeObA+$(a1V58{Xa;MqHWyR&_z?C9*Qk;iOzZxMAE zG5bOgmC75K1N_G;eQIjK`fXk_AwZ(6?8`k=b_nivxkRZ z9-{KfcieQzpR#d2_#kFs5&!O;C5=RW?#;hlhz@@8`T5^0ZwJlYTk0!4*Gsot|3{;~ z<|AOot~F_4?ab=x7u!I*CPz>cUJ*}EPgT{C># zOhYm+03)M-!X6wrBn<&#>EnaEPXGyfBlHAxyLViA6_^FZ#i7sofrwiP&j*?%6Vv9` zwGh;}-}0vF`y2QE+iEtZ=+K=t~ODNE*Kulo^+WFhDSp@*oJA42W&Q zY&51cH~pRw5D}Rz;RWTzF~5p-$vO&QZF~xpURVp7)7C_6Obo3f&<+l|9(|1AMS|X5 z(=?MoGa8Aa$jwE(&)`rq>P@ON8O#Fj0&pqgrit$gd7kG;zi%-Dq8L6tKA*#?4M0}I z43Kcf^<&5fhlEHEIBfn@QkNh$8Oe`2sIzc_=hxTs18Jh9Y9@;-s4$8~5>S?&WNubI zzE{Dpqs?)!TDbSPdZsWSbQ^>%AR-GkK~xl3nn;rbR*478-E)GkHeFp<1DUC+>GE_V zCWfHsikOJ`WlUlsZ1d;i@i%vYpep#e4 z#p|S2{O-6-n50Ye2z4Bi_(riVFL9RKueP@dPPN#?-OFrI)N zTacxtju-3KCCnaMAA0GAs?mSHzR(w~jHfdjUpKluIJAkw* zd~0_uhGqR3xC0pclQ0)OtmUXzvbhp2wIOiWd(sg&AjT&FkisLBI!AfMysv+asaDQ^ z>eL;kguP+!c6_rr`p&&Vl|O$2Risdtg4677n7-g^L9Lu>0*FS#WZb%&kd}{C>AQFDew4i8HXTCcCWS;$7L{ziPXz9|@T*om2lGVs9$nN~HiC@>K~eZ0ym8v~{mK z$2V-U)$}l-j4WXawEISuEhdD@nwsOMha1k@Qw1dxPBWQe0SIF$uMLbZ=yqgLz5#@+ zgM0=ChKwWNMvVblm)HQIl3ZUKJzcW6=~zAZ@G)=}l&B(nu+{E8e)*?x6mikO`cwIc zNk}wRRiABz-Qzx59b6d8^tT0}9~g*kwKHB$&&M~piAI&6Sc#XdCgpGUNLqDOLLV{O zoDf(daa-HR7&L`_%AH04=t*HppAsfxs%%+qwaTwzISIJ3zh;dV2GPYDM`1 z&f8j@Si%Fs+<2zhplWp;C&UAaH>kZZ6V|2*p8Km8Vv@@AhZ|$@SYL=;KWcy0^W$jD zQmX@w%hZp!*RH19U<)K9+8T~$lB1)kVMyqtKrd|`= z&VNiZQ2oAXWmG1CCPo16E*2&xnc!F5Iw#B7Mvo;O$OpYdbjXZGtR|NBq3!kBU(W$w z`u?~njTmFuokC~e{LHrtQNkAS%0r&I9xz;70nG*AXs^9iVmekx#6Kr;vmz?9F19CM zXXRdR{s;l&eFgT9S@b@SNp^8@F;MDNe_aF$a&wRM$&x*_q2lA?V`jDl z>wP{))zu%x_lR(7}nSs>Y_=Xsf)8Vo8nN~3iy}5D__9#H#lkRC8n~Dnh_a3^k;4b zMz9#?Gn5jR{v_>P-*o`I{yC&_DtvXlGa|>K!)5j2V%-Jtv}&!zx%Z$smeB_hrHN0t z3ofiF+?_x2-bKt@jpNVEbto!VM{;f5%fUhLpf|c!hdtVL+snA0F138Co_)+?Y0qUo z`qg_;YV*Xnf|<$d>Rj<>5fA7KK=$asqlRyMRs9|QRLz<;PDAiD7d&(7e>m+$^x%=)iBCG@r=A4=*<)yCW>Ze zW_j|dAmb|Cj;)4oQR+a#U%yW<@bHW;h!;gxaDE%#(88Dh3&rn$!u}YxMTxrY{UvVB6VbwijPW!7DfgFYv1y-K3RR8)0T{kOtc&8+$I`? zivH5FB_FfXrWnINd#vUG(?76`8xpz`m`eEkyxx#7un6DFrpAN%*xuTj=*82TnL-ji z1CDZqBo68A2JdoWphPP&0+E~wMynBw^-Nq`x~Y3v5&xFtM{T)PKrI1Z1tl1VoR{_t zx*j+N)E*#AbvyRS0;>d+7GsJ{#lBl^I{+z^m6vaM-uyX^1)-C{_xJB{Tan$lLrsho zLC?W)kQ=WP-8MOj+SUga5s0FE9;XMO;&~&*AqgLMwG8NDt)Mh$AFslcXWUvlwYHAy+{wD zMazaI{7WAsC^g`4{`x!I8O0UeMS-dEq5B!+g#kS=2{>t-kLlIW|2LLX_*WY?W-nJ(13;6*A zGVz4v83Gi26)jN*nWe7Z=7)|z-?j48d!~;%a`v@kk!8r5E`Q2C#|FegQbLF;UUb5 zPl`pO_QOAT(aA^a4BGrh)ZO!%4^e{> z6c$8L5pbSq#B7Plfp*6un@DGL>*e*>=a2vEIr>B;;X3c__1)ha_3A~>XXA^BistWq z_od7)2bJu(xmF+o`n~mgS@MuJ!u@(^U)%dW04t&rDHqfhi|74p+XHgxo?Mc4B2R*A zT(5i8FrD^h{r78fNf-Lh#~=hHZZzSqYt1S_Is8P*{$DfsmlXv&@V9myVPQorEhgu; zq^Rwb5IbTECg_EPgjiUtfJI)0GJ^i}5|TokBW5J7T)h@($YbjrLrUEd(LUdf9#)pxs&m!PjAE`h_~=- zfz%Cn6c9K@1m(!a7J0S#d1d4F5PR!-u=zw z8r=U^peGcx4QYUT+fTsM|^wnc9Cd3kFGK zPS3-*YdnX)OeNvEkNlC7|b0XX6Y;Q!}-RlkWXY)x$ zTMm3{*EPELSNwvwNnB{X!NI`*n@rX^KC)I{@H2R! zA-=@O%DPiIrc;hn00ALCUGghs1!JKOknk7R8|+6z*XJMEUSyL=|Er^zL6-PAL|1!> zp-B`TxOg{6@J`9q`HGO~dlqE7C{NHAk;vy5)=y7_!KzWss zhAVjDZRIFq3(AMb!Js5r7?-`hJuGjeRUwW4H#mA@d@9&1XFQIl9U2)2fuM;Houhx% z=S^%#hgqu{G_Yx_aBe%f5_uIX~q>&Jo}H!_aB$< zKQ7<@?=D}OdpsTZCx{b_E~z$Kkcr9u)f}Mf`Fjbt#i2n#nKkFuJje*hyc!f5I#uV)%9-Z2 z%@Yw1ypi$^aAIQP;-){frLNQkolbVlFFt);`FsCfe(%-L9BYIyLTHGQM5aQ_Y+3h2 zr3YZw#jYW6q;G@&dbnlDqEnwRaTAv!$I0E@T~jl0|L?=Jw2Ol?c=Khs5%w{J5XC7$ ztB>99+gAospqJ0|{Z&EfaGpZBGr+rj<>$6Ypr4dXu}G4zdvGXMw&n_Sh(=?L*n$p7 zTrSWyA8R<6B|e2uK_Q=fe*7M2$?ZC8)3T{c5m-QL#0?iuOO9ej9ley#^K;v(atE!GppQAC5a{9{MYauo;yuie|QmoxGrWc>QwLnMI zxd$19+(GjiYcvG#EjoI7zz-*vFF|K9jKw{PS+J7GD|Wytt{e-xv4n2&xric`MY0Eo zK(H?GuL2Bb!qbR_=doLqN*DBK3%lnGJf&?Ceit@wi3p0V&zDRFT3TdR_Z0&Ehubbk zg?{3!KLtIwxJ^olkFPLRE6vB8)`r0I&~tJ+g8l~}q6r#z;+P9VcR}YS^;r1B_-r5& zf$pN;!H-G Date: Tue, 11 Apr 2023 14:07:14 -0500 Subject: [PATCH 101/231] Remove -Wunused-but-set-variable warnings in tests (#2661) --- test/dt_arith.c | 4 ---- test/tattr.c | 8 ++++---- test/twriteorder.c | 25 ++----------------------- 3 files changed, 6 insertions(+), 31 deletions(-) diff --git a/test/dt_arith.c b/test/dt_arith.c index aa0115e8a84..c006c49a911 100644 --- a/test/dt_arith.c +++ b/test/dt_arith.c @@ -2799,7 +2799,6 @@ test_conv_flt_1(const char *name, int run_test, hid_t src, hid_t dst) unsigned char *hw = NULL; /*ptr to hardware-conv'd*/ int underflow; /*underflow occurred */ int overflow = 0; /*overflow occurred */ - int uflow = 0; /*underflow debug counters*/ size_t j, k; /*counters */ int sendian; /* source type endianness */ int dendian; /* Destination type endianness */ @@ -3089,9 +3088,6 @@ test_conv_flt_1(const char *name, int run_test, hid_t src, hid_t dst) } #endif } - if (underflow) { - uflow++; - } /* For Intel machines, the size of "long double" is 12 bytes, precision * is 80 bits; for Intel IA64 and AMD processors, the size of "long double" diff --git a/test/tattr.c b/test/tattr.c index 5d822340067..5dd57364515 100644 --- a/test/tattr.c +++ b/test/tattr.c @@ -7082,7 +7082,7 @@ attr_iterate_check(hid_t fid, const char *dsetname, hid_t obj_id, H5_index_t idx if (iter_info->visited[v] == TRUE) nvisit++; - VERIFY(skip, (max_attrs / 2), "H5Aiterate2"); + VERIFY(nvisit, max_attrs, "H5Aiterate2"); } /* end else */ /* Skip over some attributes on object */ @@ -7113,7 +7113,7 @@ attr_iterate_check(hid_t fid, const char *dsetname, hid_t obj_id, H5_index_t idx if (iter_info->visited[v] == TRUE) nvisit++; - VERIFY(skip, (max_attrs / 2), "H5Aiterate_by_name"); + VERIFY(nvisit, max_attrs, "H5Aiterate_by_name"); } /* end else */ /* Skip over some attributes on object */ @@ -7144,7 +7144,7 @@ attr_iterate_check(hid_t fid, const char *dsetname, hid_t obj_id, H5_index_t idx if (iter_info->visited[v] == TRUE) nvisit++; - VERIFY(skip, (max_attrs / 2), "H5Aiterate_by_name"); + VERIFY(nvisit, max_attrs, "H5Aiterate_by_name"); } /* end else */ #ifndef H5_NO_DEPRECATED_SYMBOLS @@ -7176,7 +7176,7 @@ attr_iterate_check(hid_t fid, const char *dsetname, hid_t obj_id, H5_index_t idx if (iter_info->visited[v] == TRUE) nvisit++; - VERIFY(skip, (max_attrs / 2), "H5Aiterate1"); + VERIFY(nvisit, max_attrs, "H5Aiterate1"); } /* end else */ #endif /* H5_NO_DEPRECATED_SYMBOLS */ diff --git a/test/twriteorder.c b/test/twriteorder.c index c7984e7ff24..165abcf3b3e 100644 --- a/test/twriteorder.c +++ b/test/twriteorder.c @@ -273,9 +273,6 @@ write_wo_file(void) HDmemset(&buffer[4], i & 0xff, (size_t)(BLOCKSIZE_DFT - 4)); /* write the block */ -#ifdef DEBUG - HDprintf("writing block at %d\n", blkaddr); -#endif HDlseek(write_fd_g, (HDoff_t)blkaddr, SEEK_SET); if ((bytes_wrote = HDwrite(write_fd_g, buffer, (size_t)blocksize_g)) != blocksize_g) { HDprintf("blkaddr write failed in partition %d\n", i); @@ -295,9 +292,6 @@ write_wo_file(void) } /* all writes done. return success. */ -#ifdef DEBUG - HDprintf("wrote %d blocks\n", nlinkedblock_g); -#endif return 0; } @@ -305,9 +299,8 @@ int read_wo_file(void) { int read_fd; - int blkaddr = 0; - h5_posix_io_ret_t bytes_read = -1; /* # of bytes actually read */ - int linkedblocks_read = 0; + int blkaddr = 0; + h5_posix_io_ret_t bytes_read = -1; /* # of bytes actually read */ char buffer[BLOCKSIZE_DFT]; /* Open the data file */ @@ -324,30 +317,19 @@ read_wo_file(void) return -1; } } - linkedblocks_read++; /* got a non-zero blkaddr. Proceed down the linked blocks. */ -#ifdef DEBUG - HDprintf("got initial block address=%d\n", blkaddr); -#endif while (blkaddr != 0) { HDlseek(read_fd, (HDoff_t)blkaddr, SEEK_SET); if ((bytes_read = HDread(read_fd, buffer, (size_t)blocksize_g)) != blocksize_g) { HDprintf("blkaddr read failed in partition %d\n", 0); return -1; } - linkedblocks_read++; /* retrieve the block address in byte 0-3 */ HDmemcpy(&blkaddr, &buffer[0], sizeof(blkaddr)); -#ifdef DEBUG - HDprintf("got next block address=%d\n", blkaddr); -#endif } -#ifdef DEBUG - HDprintf("read %d blocks\n", linkedblocks_read); -#endif return 0; } @@ -426,9 +408,6 @@ main(int argc, char *argv[]) /* launch writer */ /* ============= */ /* this process continues to launch the writer */ -#ifdef DEBUG - HDprintf("%d: continue as the writer process\n", mypid); -#endif if (write_wo_file() < 0) { HDfprintf(stderr, "write_wo_file encountered error\n"); Hgoto_error(1); From 70827d89279d4667bcbd3ca0b55bb51ddd917207 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Tue, 11 Apr 2023 14:08:46 -0500 Subject: [PATCH 102/231] Fix a heap buffer overflow during H5D__compact_readvv (GitHub #2606) (#2664) --- release_docs/RELEASE.txt | 19 +++++++++++++++++++ src/H5Dint.c | 27 +++++++++++++++++++++++++++ 2 files changed, 46 insertions(+) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index be040f49c67..a33c97bb347 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -151,6 +151,25 @@ Bug Fixes since HDF5-1.13.3 release =================================== Library ------- + - Fixed a heap buffer overflow that occurs when reading from + a dataset with a compact layout within a malformed HDF5 file + + During opening of a dataset that has a compact layout, the + library allocates a buffer that stores the dataset's raw data. + The dataset's object header that gets written to the file + contains information about how large of a buffer the library + should allocate. If this object header is malformed such that + it causes the library to allocate a buffer that is too small + to hold the dataset's raw data, future I/O to the dataset can + result in heap buffer overflows. To fix this issue, an extra + check is now performed for compact datasets to ensure that + the size of the allocated buffer matches the expected size + of the dataset's raw data (as calculated from the dataset's + dataspace and datatype information). If the two sizes do not + match, opening of the dataset will fail. + + (JTH - 2023/04/04, GH-2606) + - Fixed a memory corruption issue that can occur when reading from a dataset using a hyperslab selection in the file dataspace and a point selection in the memory dataspace diff --git a/src/H5Dint.c b/src/H5Dint.c index 34a9d75a606..e930c6a92b4 100644 --- a/src/H5Dint.c +++ b/src/H5Dint.c @@ -1718,6 +1718,33 @@ H5D__open_oid(H5D_t *dataset, hid_t dapl_id) /* Indicate that the layout information was initialized */ layout_init = TRUE; + /* + * Now that we've read the dataset's datatype, dataspace and + * layout information, perform a quick check for compact datasets + * to ensure that the size of the internal buffer that was + * allocated for the dataset's raw data matches the size of + * the data. A corrupted file can cause a mismatch between the + * two, which might result in buffer overflows during future + * I/O to the dataset. + */ + if (H5D_COMPACT == dataset->shared->layout.type) { + hssize_t dset_nelemts = 0; + size_t dset_type_size = H5T_GET_SIZE(dataset->shared->type); + size_t dset_data_size = 0; + + HDassert(H5D_COMPACT == dataset->shared->layout.storage.type); + + if ((dset_nelemts = H5S_GET_EXTENT_NPOINTS(dataset->shared->space)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get number of elements in dataset's dataspace") + + dset_data_size = (size_t)dset_nelemts * dset_type_size; + + if (dataset->shared->layout.storage.u.compact.size != dset_data_size) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, + "bad value from dataset header - size of compact dataset's data buffer doesn't match " + "size of dataset data"); + } + /* Set up flush append property */ if (H5D__append_flush_setup(dataset, dapl_id)) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set up flush append property") From 5d37ec6e3cb70082c8a603511063f8dbc1f24de3 Mon Sep 17 00:00:00 2001 From: "H. Joe Lee" Date: Tue, 11 Apr 2023 14:20:10 -0500 Subject: [PATCH 103/231] ci: add HDF-EOS5 test action (#2415) (#2625) --- .github/workflows/hdfeos5.yml | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 .github/workflows/hdfeos5.yml diff --git a/.github/workflows/hdfeos5.yml b/.github/workflows/hdfeos5.yml new file mode 100644 index 00000000000..8378b4c2fe4 --- /dev/null +++ b/.github/workflows/hdfeos5.yml @@ -0,0 +1,30 @@ +name: hdfeos5 + +on: [push, pull_request] + +jobs: + build: + name: Build hdfeos5 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Install Autotools Dependencies (Linux) + run: | + sudo apt update + sudo apt install automake autoconf libtool libtool-bin + - name: Install HDF5 + run: | + ./autogen.sh + ./configure --prefix=/usr/local --with-default-api-version=v16 + make + sudo make install + - name: Install HDF-EOS5 + run: | + wget -O HDF-EOS5.2.0.tar.gz "https://git.earthdata.nasa.gov/projects/DAS/repos/hdfeos5/raw/hdf-eos5-2.0-src.tar.gz?at=refs%2Fheads%2FHDFEOS5_2.0" + tar zxvf HDF-EOS5.2.0.tar.gz + cd hdf-eos5-2.0 + ./configure CC=/usr/local/bin/h5cc --prefix=/usr/local/ --enable-install-include + make + make check + sudo make install From 2f00aa15852be0daa95512896770c66661eb759d Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Tue, 11 Apr 2023 21:28:32 +0200 Subject: [PATCH 104/231] CMake: Let runtime checks through stdout only (#2666) This avoids mixing emulators errors on stderr to configure results while cross-compiling with wine. --- config/cmake/ConfigureChecks.cmake | 7 ++++++- config/cmake/HDF5UseFortran.cmake | 29 +++++++++++++++++------------ m4/aclocal_fc.f90 | 20 ++++++++++---------- 3 files changed, 33 insertions(+), 23 deletions(-) diff --git a/config/cmake/ConfigureChecks.cmake b/config/cmake/ConfigureChecks.cmake index b535396ab7f..47c2438349c 100644 --- a/config/cmake/ConfigureChecks.cmake +++ b/config/cmake/ConfigureChecks.cmake @@ -780,12 +780,17 @@ if (HDF5_BUILD_FORTRAN) ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/testCCompiler1.c ${SOURCE_CODE} ) + if (CMAKE_VERSION VERSION_LESS 3.25) + set (_RUN_OUTPUT_VARIABLE "RUN_OUTPUT_VARIABLE") + else () + set (_RUN_OUTPUT_VARIABLE "RUN_OUTPUT_STDOUT_VARIABLE") + endif() TRY_RUN (RUN_RESULT_VAR COMPILE_RESULT_VAR ${CMAKE_BINARY_DIR} ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/testCCompiler1.c COMPILE_DEFINITIONS "-D_SIZEOF___FLOAT128=${H5_SIZEOF___FLOAT128};-D_HAVE_QUADMATH_H=${H5_HAVE_QUADMATH_H}" COMPILE_OUTPUT_VARIABLE COMPILEOUT - RUN_OUTPUT_VARIABLE OUTPUT_VAR + ${_RUN_OUTPUT_VARIABLE} OUTPUT_VAR ) set (${RETURN_OUTPUT_VAR} ${OUTPUT_VAR}) diff --git a/config/cmake/HDF5UseFortran.cmake b/config/cmake/HDF5UseFortran.cmake index 79084cb8c90..aae707853fd 100644 --- a/config/cmake/HDF5UseFortran.cmake +++ b/config/cmake/HDF5UseFortran.cmake @@ -39,11 +39,16 @@ macro (FORTRAN_RUN FUNCTION_NAME SOURCE_CODE RUN_RESULT_VAR1 COMPILE_RESULT_VAR1 ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/testFortranCompiler1.f90 "${SOURCE_CODE}" ) + if (CMAKE_VERSION VERSION_LESS 3.25) + set (_RUN_OUTPUT_VARIABLE "RUN_OUTPUT_VARIABLE") + else () + set (_RUN_OUTPUT_VARIABLE "RUN_OUTPUT_STDOUT_VARIABLE") + endif() TRY_RUN (RUN_RESULT_VAR COMPILE_RESULT_VAR ${CMAKE_BINARY_DIR} ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/testFortranCompiler1.f90 LINK_LIBRARIES "${HDF5_REQUIRED_LIBRARIES}" - RUN_OUTPUT_VARIABLE OUTPUT_VAR + ${_RUN_OUTPUT_VARIABLE} OUTPUT_VAR ) set (${RETURN_OUTPUT_VAR} ${OUTPUT_VAR}) @@ -169,10 +174,10 @@ foreach (KIND ${VAR}) " PROGRAM main USE ISO_C_BINDING - USE, INTRINSIC :: ISO_FORTRAN_ENV, ONLY : stderr=>ERROR_UNIT + USE, INTRINSIC :: ISO_FORTRAN_ENV, ONLY : stdout=>OUTPUT_UNIT IMPLICIT NONE INTEGER (KIND=${KIND}) a - WRITE(stderr,'(I0)') ${FC_SIZEOF_A} + WRITE(stdout,'(I0)') ${FC_SIZEOF_A} END " ) @@ -210,10 +215,10 @@ foreach (KIND ${VAR} ) " PROGRAM main USE ISO_C_BINDING - USE, INTRINSIC :: ISO_FORTRAN_ENV, ONLY : stderr=>ERROR_UNIT + USE, INTRINSIC :: ISO_FORTRAN_ENV, ONLY : stdout=>OUTPUT_UNIT IMPLICIT NONE REAL (KIND=${KIND}) a - WRITE(stderr,'(I0)') ${FC_SIZEOF_A} + WRITE(stdout,'(I0)') ${FC_SIZEOF_A} END " ) @@ -252,17 +257,17 @@ set (PROG_SRC3 " PROGRAM main USE ISO_C_BINDING - USE, INTRINSIC :: ISO_FORTRAN_ENV, ONLY : stderr=>ERROR_UNIT + USE, INTRINSIC :: ISO_FORTRAN_ENV, ONLY : stdout=>OUTPUT_UNIT IMPLICIT NONE INTEGER a REAL b DOUBLE PRECISION c - WRITE(stderr,*) ${FC_SIZEOF_A} - WRITE(stderr,*) kind(a) - WRITE(stderr,*) ${FC_SIZEOF_B} - WRITE(stderr,*) kind(b) - WRITE(stderr,*) ${FC_SIZEOF_C} - WRITE(stderr,*) kind(c) + WRITE(stdout,*) ${FC_SIZEOF_A} + WRITE(stdout,*) kind(a) + WRITE(stdout,*) ${FC_SIZEOF_B} + WRITE(stdout,*) kind(b) + WRITE(stdout,*) ${FC_SIZEOF_C} + WRITE(stdout,*) kind(c) END " ) diff --git a/m4/aclocal_fc.f90 b/m4/aclocal_fc.f90 index bcefab5c8c8..68a8f1b590c 100644 --- a/m4/aclocal_fc.f90 +++ b/m4/aclocal_fc.f90 @@ -82,7 +82,7 @@ END PROGRAM PROG_FC_C_LONG_DOUBLE_EQ_C_DOUBLE !---- START ----- Determine the available KINDs for REALs and INTEGERs PROGRAM FC_AVAIL_KINDS - USE, INTRINSIC :: ISO_FORTRAN_ENV, ONLY : stderr=>ERROR_UNIT + USE, INTRINSIC :: ISO_FORTRAN_ENV, ONLY : stdout=>OUTPUT_UNIT IMPLICIT NONE INTEGER :: ik, jk, k, kk, max_decimal_prec INTEGER :: prev_rkind, num_rkinds = 1, num_ikinds = 1 @@ -102,11 +102,11 @@ PROGRAM FC_AVAIL_KINDS ENDDO DO k = 1, num_ikinds - WRITE(stderr,'(I0)', ADVANCE='NO') list_ikinds(k) + WRITE(stdout,'(I0)', ADVANCE='NO') list_ikinds(k) IF(k.NE.num_ikinds)THEN - WRITE(stderr,'(A)',ADVANCE='NO') ',' + WRITE(stdout,'(A)',ADVANCE='NO') ',' ELSE - WRITE(stderr,'()') + WRITE(stdout,'()') ENDIF ENDDO @@ -139,17 +139,17 @@ PROGRAM FC_AVAIL_KINDS ENDDO prec DO k = 1, num_rkinds - WRITE(stderr,'(I0)', ADVANCE='NO') list_rkinds(k) + WRITE(stdout,'(I0)', ADVANCE='NO') list_rkinds(k) IF(k.NE.num_rkinds)THEN - WRITE(stderr,'(A)',ADVANCE='NO') ',' + WRITE(stdout,'(A)',ADVANCE='NO') ',' ELSE - WRITE(stderr,'()') + WRITE(stdout,'()') ENDIF ENDDO - WRITE(stderr,'(I0)') max_decimal_prec - WRITE(stderr,'(I0)') num_ikinds - WRITE(stderr,'(I0)') num_rkinds + WRITE(stdout,'(I0)') max_decimal_prec + WRITE(stdout,'(I0)') num_ikinds + WRITE(stdout,'(I0)') num_rkinds END PROGRAM FC_AVAIL_KINDS !---- END ----- Determine the available KINDs for REALs and INTEGERs From 38404dfffb6602c53c812ce3ba925ce77522d31e Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Tue, 11 Apr 2023 14:31:25 -0500 Subject: [PATCH 105/231] Fix improper include of build directory (#2422, #2621) (#2667) --- configure.ac | 6 ++---- release_docs/RELEASE.txt | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/configure.ac b/configure.ac index 7c8c958bf3e..d5f46da7577 100644 --- a/configure.ac +++ b/configure.ac @@ -3012,8 +3012,7 @@ SUBFILING_VFD=no HAVE_MERCURY="no" ## Always include subfiling directory so public header files are available -CPPFLAGS="$CPPFLAGS -I$ac_abs_confdir/src/H5FDsubfiling" -AM_CPPFLAGS="$AM_CPPFLAGS -I$ac_abs_confdir/src/H5FDsubfiling" +H5_CPPFLAGS="$H5_CPPFLAGS -I$ac_abs_confdir/src/H5FDsubfiling" AC_MSG_CHECKING([if the subfiling I/O virtual file driver (VFD) is enabled]) @@ -3061,8 +3060,7 @@ if test "X$SUBFILING_VFD" = "Xyes"; then mercury_dir="$ac_abs_confdir/src/H5FDsubfiling/mercury" mercury_inc="$mercury_dir/src/util" - CPPFLAGS="$CPPFLAGS -I$mercury_inc" - AM_CPPFLAGS="$AM_CPPFLAGS -I$mercury_inc" + H5_CPPFLAGS="$H5_CPPFLAGS -I$mercury_inc" HAVE_STDATOMIC_H="yes" AC_CHECK_HEADERS([stdatomic.h],,[HAVE_STDATOMIC_H="no"]) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index a33c97bb347..96e3e6fd931 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -240,6 +240,24 @@ Bug Fixes since HDF5-1.13.3 release Configuration ------------- + - Fixed improper include of Subfiling VFD build directory + + With the release of the Subfiling Virtual File Driver feature, compiler + flags were added to the Autotools build's CPPFLAGS and AM_CPPFLAGS + variables to always include the Subfiling VFD source code directory, + regardless of whether the VFD is enabled and built or not. These flags + are needed because the header files for the VFD contain macros that are + assumed to always be available, such as H5FD_SUBFILING_NAME, so the + header files are unconditionally included in the HDF5 library. However, + these flags are only needed when building HDF5, so they belong in the + H5_CPPFLAGS variable instead. Inclusion in the CPPFLAGS and AM_CPPFLAGS + variables would export these flags to the h5cc and h5c++ wrapper scripts, + as well as the libhdf5.settings file, which would break builds of software + that use HDF5 and try to use or parse information out of these files after + deleting temporary HDF5 build directories. + + (JTH - 2023/04/05 GH-2422, GH-2621) + - Correct the CMake generated pkg-config file The pkg-config file generated by CMake had the order and placement of the From 8fba3b8d871ecc559591d7daeb189b0a34dec748 Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Tue, 11 Apr 2023 13:41:06 -0600 Subject: [PATCH 106/231] Perlmutter was fixed (#2677) --- release_docs/RELEASE.txt | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 96e3e6fd931..f00ce32d0ca 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -450,26 +450,6 @@ Platforms Tested Known Problems ============== - ************************************************************ - * _ * - * (_) * - * __ ____ _ _ __ _ __ _ _ __ __ _ * - * \ \ /\ / / _` | '__| '_ \| | '_ \ / _` | * - * \ V V / (_| | | | | | | | | | | (_| | * - * \_/\_/ \__,_|_| |_| |_|_|_| |_|\__, | * - * __/ | * - * |___/ * - * * - * Please refrain from running any program (including * - * HDF5 tests) which uses the subfiling VFD on Perlmutter * - * at the National Energy Research Scientific Computing * - * Center, NERSC. * - * Doing so may cause a system disruption due to subfiling * - * crashing Lustre. The sytem's Lustre bug is expected * - * to be resolved by 2023. * - * * - ************************************************************ - CMake files do not behave correctly with paths containing spaces. Do not use spaces in paths because the required escaping for handling spaces results in very complex and fragile build files. From 60093b87f9b9bd1819bc32f1c7f5f473afd07e59 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Tue, 11 Apr 2023 14:47:51 -0500 Subject: [PATCH 107/231] Add buffer overrun checks to H5O__layout_decode and H5O__sdspace_decode (#2679) Co-authored-by: Larry Knox --- release_docs/RELEASE.txt | 8 ++ src/H5Olayout.c | 256 +++++++++++++++++++++++++++++++++------ src/H5Osdspace.c | 62 +++++++--- src/H5private.h | 5 + 4 files changed, 276 insertions(+), 55 deletions(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index f00ce32d0ca..b460141adad 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -151,6 +151,14 @@ Bug Fixes since HDF5-1.13.3 release =================================== Library ------- + - Fixed potential buffer overrun issues in some object header decode routines + + Several checks were added to H5O__layout_decode and H5O__sdspace_decode to + ensure that memory buffers don't get overrun when decoding buffers read from + a (possibly corrupted) HDF5 file. + + (JTH - 2023/04/05) + - Fixed a heap buffer overflow that occurs when reading from a dataset with a compact layout within a malformed HDF5 file diff --git a/src/H5Olayout.c b/src/H5Olayout.c index 595c73efbb4..a58fc0c66ba 100644 --- a/src/H5Olayout.c +++ b/src/H5Olayout.c @@ -91,11 +91,11 @@ static void * H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, unsigned H5_ATTR_UNUSED *ioflags, size_t p_size, const uint8_t *p) { + const uint8_t *p_end = p + p_size - 1; /* End of the p buffer */ H5O_layout_t *mesg = NULL; uint8_t *heap_block = NULL; unsigned u; - const uint8_t *p_end = p + p_size - 1; /* End of the p buffer */ - void *ret_value = NULL; /* Return value */ + void *ret_value = NULL; /* Return value */ FUNC_ENTER_PACKAGE @@ -105,39 +105,55 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU /* decode */ if (NULL == (mesg = H5FL_CALLOC(H5O_layout_t))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, NULL, "memory allocation failed") mesg->storage.type = H5D_LAYOUT_ERROR; + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") mesg->version = *p++; + if (mesg->version < H5O_LAYOUT_VERSION_1 || mesg->version > H5O_LAYOUT_VERSION_4) - HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad version number for layout message") + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "bad version number for layout message") if (mesg->version < H5O_LAYOUT_VERSION_3) { unsigned ndims; /* Num dimensions in chunk */ /* Dimensionality */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") ndims = *p++; + if (!ndims || ndims > H5O_LAYOUT_NDIMS) - HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "dimensionality is out of range") + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "dimensionality is out of range") /* Layout class */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") mesg->type = (H5D_layout_t)*p++; - HDassert(H5D_CONTIGUOUS == mesg->type || H5D_CHUNKED == mesg->type || H5D_COMPACT == mesg->type); + + if (H5D_CONTIGUOUS != mesg->type && H5D_CHUNKED != mesg->type && H5D_COMPACT != mesg->type) + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "bad layout type for layout message") /* Set the storage type */ mesg->storage.type = mesg->type; /* Reserved bytes */ + if (H5_IS_BUFFER_OVERFLOW(p, 5, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") p += 5; /* Address */ if (mesg->type == H5D_CONTIGUOUS) { + if (H5_IS_BUFFER_OVERFLOW(p, H5F_SIZEOF_ADDR(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") H5F_addr_decode(f, &p, &(mesg->storage.u.contig.addr)); /* Set the layout operations */ mesg->ops = H5D_LOPS_CONTIG; } /* end if */ else if (mesg->type == H5D_CHUNKED) { + if (H5_IS_BUFFER_OVERFLOW(p, H5F_SIZEOF_ADDR(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") H5F_addr_decode(f, &p, &(mesg->storage.u.chunk.idx_addr)); /* Set the layout operations */ @@ -164,29 +180,46 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU * size in the dataset code, where we've got the dataspace * information available also. - QAK 5/26/04 */ - p += ndims * 4; /* Skip over dimension sizes (32-bit quantities) */ - } /* end if */ + if (H5_IS_BUFFER_OVERFLOW(p, (ndims * sizeof(uint32_t)), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") + p += ndims * sizeof(uint32_t); /* Skip over dimension sizes */ + } /* end if */ else { if (ndims < 2) - HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "bad dimensions for chunked storage") + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "bad dimensions for chunked storage") mesg->u.chunk.ndims = ndims; - for (u = 0; u < ndims; u++) + + if (H5_IS_BUFFER_OVERFLOW(p, (ndims * sizeof(uint32_t)), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") + + for (u = 0; u < ndims; u++) { UINT32DECODE(p, mesg->u.chunk.dim[u]); + /* Just in case that something goes very wrong, such as file corruption. */ + if (mesg->u.chunk.dim[u] == 0) + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, + "bad chunk dimension value when parsing layout message - chunk dimension " + "must be positive: mesg->u.chunk.dim[%u] = %u", + u, mesg->u.chunk.dim[u]) + } + /* Compute chunk size */ for (u = 1, mesg->u.chunk.size = mesg->u.chunk.dim[0]; u < ndims; u++) mesg->u.chunk.size *= mesg->u.chunk.dim[u]; } /* end if */ if (mesg->type == H5D_COMPACT) { + if (H5_IS_BUFFER_OVERFLOW(p, sizeof(uint32_t), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") UINT32DECODE(p, mesg->storage.u.compact.size); + if (mesg->storage.u.compact.size > 0) { /* Ensure that size doesn't exceed buffer size, due to possible data corruption */ - if (p + mesg->storage.u.compact.size - 1 > p_end) - HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "storage size exceeds buffer size") + if (H5_IS_BUFFER_OVERFLOW(p, mesg->storage.u.compact.size, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") if (NULL == (mesg->storage.u.compact.buf = H5MM_malloc(mesg->storage.u.compact.size))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, NULL, "memory allocation failed for compact data buffer") H5MM_memcpy(mesg->storage.u.compact.buf, p, mesg->storage.u.compact.size); p += mesg->storage.u.compact.size; @@ -195,18 +228,23 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU } /* end if */ else { /* Layout & storage class */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") mesg->type = mesg->storage.type = (H5D_layout_t)*p++; /* Interpret the rest of the message according to the layout class */ switch (mesg->type) { case H5D_COMPACT: /* Compact data size */ + if (H5_IS_BUFFER_OVERFLOW(p, sizeof(uint16_t), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") UINT16DECODE(p, mesg->storage.u.compact.size); if (mesg->storage.u.compact.size > 0) { /* Ensure that size doesn't exceed buffer size, due to possible data corruption */ - if (p + mesg->storage.u.compact.size - 1 > p_end) - HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "storage size exceeds buffer size") + if (H5_IS_BUFFER_OVERFLOW(p, mesg->storage.u.compact.size, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding") /* Allocate space for compact data */ if (NULL == (mesg->storage.u.compact.buf = H5MM_malloc(mesg->storage.u.compact.size))) @@ -224,9 +262,13 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU case H5D_CONTIGUOUS: /* Contiguous storage address */ + if (H5_IS_BUFFER_OVERFLOW(p, H5F_SIZEOF_ADDR(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") H5F_addr_decode(f, &p, &(mesg->storage.u.contig.addr)); /* Contiguous storage size */ + if (H5_IS_BUFFER_OVERFLOW(p, H5F_SIZEOF_SIZE(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") H5F_DECODE_LENGTH(f, p, mesg->storage.u.contig.size); /* Set the layout operations */ @@ -239,24 +281,36 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU mesg->u.chunk.flags = (uint8_t)0; /* Dimensionality */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding") mesg->u.chunk.ndims = *p++; + if (mesg->u.chunk.ndims > H5O_LAYOUT_NDIMS) - HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "dimensionality is too large") + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "dimensionality is too large") if (mesg->u.chunk.ndims < 2) - HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "bad dimensions for chunked storage") + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "bad dimensions for chunked storage") /* B-tree address */ + if (H5_IS_BUFFER_OVERFLOW(p, H5F_SIZEOF_ADDR(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding") H5F_addr_decode(f, &p, &(mesg->storage.u.chunk.idx_addr)); + if (H5_IS_BUFFER_OVERFLOW(p, (mesg->u.chunk.ndims * sizeof(uint32_t)), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding") + /* Chunk dimensions */ for (u = 0; u < mesg->u.chunk.ndims; u++) { UINT32DECODE(p, mesg->u.chunk.dim[u]); /* Just in case that something goes very wrong, such as file corruption. */ if (mesg->u.chunk.dim[u] == 0) - HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, NULL, - "chunk dimension must be positive: mesg->u.chunk.dim[%u] = %u", u, - mesg->u.chunk.dim[u]) + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, + "bad chunk dimension value when parsing layout message - chunk " + "dimension must be positive: mesg->u.chunk.dim[%u] = %u", + u, mesg->u.chunk.dim[u]) } /* end for */ /* Compute chunk size */ @@ -270,6 +324,9 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU } /* end if */ else { /* Get the chunked layout flags */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding") mesg->u.chunk.flags = *p++; /* Check for valid flags */ @@ -280,25 +337,50 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "bad flag value for message") /* Dimensionality */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding") mesg->u.chunk.ndims = *p++; + if (mesg->u.chunk.ndims > H5O_LAYOUT_NDIMS) HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "dimensionality is too large") /* Encoded # of bytes for each chunk dimension */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding") mesg->u.chunk.enc_bytes_per_dim = *p++; + if (mesg->u.chunk.enc_bytes_per_dim == 0 || mesg->u.chunk.enc_bytes_per_dim > 8) HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "encoded chunk dimension size is too large") + if (H5_IS_BUFFER_OVERFLOW(p, (mesg->u.chunk.ndims * mesg->u.chunk.enc_bytes_per_dim), + p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding") + /* Chunk dimensions */ - for (u = 0; u < mesg->u.chunk.ndims; u++) + for (u = 0; u < mesg->u.chunk.ndims; u++) { UINT64DECODE_VAR(p, mesg->u.chunk.dim[u], mesg->u.chunk.enc_bytes_per_dim); + /* Just in case that something goes very wrong, such as file corruption. */ + if (mesg->u.chunk.dim[u] == 0) + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, + "bad chunk dimension value when parsing layout message - chunk " + "dimension must be positive: mesg->u.chunk.dim[%u] = %u", + u, mesg->u.chunk.dim[u]) + } + /* Compute chunk size */ for (u = 1, mesg->u.chunk.size = mesg->u.chunk.dim[0]; u < mesg->u.chunk.ndims; u++) mesg->u.chunk.size *= mesg->u.chunk.dim[u]; /* Chunk index type */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding") mesg->u.chunk.idx_type = (H5D_chunk_index_t)*p++; + if (mesg->u.chunk.idx_type >= H5D_CHUNK_IDX_NTYPES) HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "unknown chunk index type") mesg->storage.u.chunk.idx_type = mesg->u.chunk.idx_type; @@ -315,6 +397,9 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU case H5D_CHUNK_IDX_SINGLE: /* Single Chunk Index */ if (mesg->u.chunk.flags & H5O_LAYOUT_CHUNK_SINGLE_INDEX_WITH_FILTER) { + if (H5_IS_BUFFER_OVERFLOW(p, H5F_SIZEOF_SIZE(f) + sizeof(uint32_t), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding") H5F_DECODE_LENGTH(f, p, mesg->storage.u.chunk.u.single.nbytes); UINT32DECODE(p, mesg->storage.u.chunk.u.single.filter_mask); } /* end if */ @@ -325,9 +410,13 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU case H5D_CHUNK_IDX_FARRAY: /* Fixed array creation parameters */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding") mesg->u.chunk.u.farray.cparam.max_dblk_page_nelmts_bits = *p++; + if (0 == mesg->u.chunk.u.farray.cparam.max_dblk_page_nelmts_bits) - HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "invalid fixed array creation parameter") /* Set the chunk operations */ @@ -336,25 +425,49 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU case H5D_CHUNK_IDX_EARRAY: /* Extensible array creation parameters */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding") mesg->u.chunk.u.earray.cparam.max_nelmts_bits = *p++; + if (0 == mesg->u.chunk.u.earray.cparam.max_nelmts_bits) - HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "invalid extensible array creation parameter") + + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding") mesg->u.chunk.u.earray.cparam.idx_blk_elmts = *p++; + if (0 == mesg->u.chunk.u.earray.cparam.idx_blk_elmts) - HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "invalid extensible array creation parameter") + + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding") mesg->u.chunk.u.earray.cparam.sup_blk_min_data_ptrs = *p++; + if (0 == mesg->u.chunk.u.earray.cparam.sup_blk_min_data_ptrs) - HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "invalid extensible array creation parameter") + + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding") mesg->u.chunk.u.earray.cparam.data_blk_min_elmts = *p++; + if (0 == mesg->u.chunk.u.earray.cparam.data_blk_min_elmts) - HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "invalid extensible array creation parameter") + + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding") mesg->u.chunk.u.earray.cparam.max_dblk_page_nelmts_bits = *p++; + if (0 == mesg->u.chunk.u.earray.cparam.max_dblk_page_nelmts_bits) - HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "invalid extensible array creation parameter") /* Set the chunk operations */ @@ -362,10 +475,35 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU break; case H5D_CHUNK_IDX_BT2: /* v2 B-tree index */ + if (H5_IS_BUFFER_OVERFLOW(p, sizeof(uint32_t), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding") UINT32DECODE(p, mesg->u.chunk.u.btree2.cparam.node_size); + + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding") mesg->u.chunk.u.btree2.cparam.split_percent = *p++; + + if (mesg->u.chunk.u.btree2.cparam.split_percent == 0 || + mesg->u.chunk.u.btree2.cparam.split_percent > 100) + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, + "bad value for v2 B-tree split percent value - must be > 0 and " + "<= 100: split percent = %" PRIu8, + mesg->u.chunk.u.btree2.cparam.split_percent) + + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding") mesg->u.chunk.u.btree2.cparam.merge_percent = *p++; + if (mesg->u.chunk.u.btree2.cparam.merge_percent == 0 || + mesg->u.chunk.u.btree2.cparam.merge_percent > 100) + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, + "bad value for v2 B-tree merge percent value - must be > 0 and " + "<= 100: merge percent = %" PRIu8, + mesg->u.chunk.u.btree2.cparam.merge_percent) + /* Set the chunk operations */ mesg->storage.u.chunk.ops = H5D_COPS_BT2; break; @@ -376,6 +514,9 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU } /* end switch */ /* Chunk index address */ + if (H5_IS_BUFFER_OVERFLOW(p, H5F_SIZEOF_ADDR(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding") H5F_addr_decode(f, &p, &(mesg->storage.u.chunk.idx_addr)); } /* end else */ @@ -389,7 +530,13 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU HGOTO_ERROR(H5E_OHDR, H5E_VERSION, NULL, "invalid layout version with virtual layout") /* Heap information */ + if (H5_IS_BUFFER_OVERFLOW(p, H5F_SIZEOF_ADDR(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") H5F_addr_decode(f, &p, &(mesg->storage.u.virt.serial_list_hobjid.addr)); + /* NOTE: virtual mapping global heap entry address could be undefined */ + + if (H5_IS_BUFFER_OVERFLOW(p, sizeof(uint32_t), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") UINT32DECODE(p, mesg->storage.u.virt.serial_list_hobjid.idx); /* Initialize other fields */ @@ -405,54 +552,88 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU /* Decode heap block if it exists */ if (mesg->storage.u.virt.serial_list_hobjid.addr != HADDR_UNDEF) { const uint8_t *heap_block_p; + const uint8_t *heap_block_p_end; uint8_t heap_vers; size_t block_size = 0; size_t tmp_size; hsize_t tmp_hsize; uint32_t stored_chksum; uint32_t computed_chksum; - size_t i; /* Read heap */ if (NULL == (heap_block = (uint8_t *)H5HG_read( f, &(mesg->storage.u.virt.serial_list_hobjid), NULL, &block_size))) HGOTO_ERROR(H5E_OHDR, H5E_READERROR, NULL, "Unable to read global heap block") - heap_block_p = (const uint8_t *)heap_block; + heap_block_p = (const uint8_t *)heap_block; + heap_block_p_end = heap_block_p + block_size - 1; /* Decode the version number of the heap block encoding */ + if (H5_IS_BUFFER_OVERFLOW(heap_block_p, 1, heap_block_p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding") heap_vers = (uint8_t)*heap_block_p++; + if ((uint8_t)H5O_LAYOUT_VDS_GH_ENC_VERS != heap_vers) HGOTO_ERROR(H5E_OHDR, H5E_VERSION, NULL, "bad version # of encoded VDS heap information, expected %u, got %u", (unsigned)H5O_LAYOUT_VDS_GH_ENC_VERS, (unsigned)heap_vers) /* Number of entries */ + if (H5_IS_BUFFER_OVERFLOW(heap_block_p, H5F_SIZEOF_SIZE(f), heap_block_p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding") H5F_DECODE_LENGTH(f, heap_block_p, tmp_hsize) /* Allocate entry list */ if (NULL == (mesg->storage.u.virt.list = (H5O_storage_virtual_ent_t *)H5MM_calloc( (size_t)tmp_hsize * sizeof(H5O_storage_virtual_ent_t)))) - HGOTO_ERROR(H5E_OHDR, H5E_RESOURCE, NULL, "unable to allocate heap block") + HGOTO_ERROR(H5E_OHDR, H5E_CANTALLOC, NULL, "unable to allocate heap block") mesg->storage.u.virt.list_nalloc = (size_t)tmp_hsize; mesg->storage.u.virt.list_nused = (size_t)tmp_hsize; /* Decode each entry */ - for (i = 0; i < mesg->storage.u.virt.list_nused; i++) { + for (size_t i = 0; i < mesg->storage.u.virt.list_nused; i++) { + ptrdiff_t avail_buffer_space; + + avail_buffer_space = heap_block_p_end - heap_block_p + 1; + if (avail_buffer_space <= 0) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding") + /* Source file name */ - tmp_size = HDstrlen((const char *)heap_block_p) + 1; + tmp_size = HDstrnlen((const char *)heap_block_p, (size_t)avail_buffer_space); + if (tmp_size == (size_t)avail_buffer_space) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding - unterminated source " + "file name string") + else + tmp_size += 1; /* Add space for NUL terminator */ + if (NULL == (mesg->storage.u.virt.list[i].source_file_name = (char *)H5MM_malloc(tmp_size))) - HGOTO_ERROR(H5E_OHDR, H5E_RESOURCE, NULL, + HGOTO_ERROR(H5E_OHDR, H5E_CANTALLOC, NULL, "unable to allocate memory for source file name") H5MM_memcpy(mesg->storage.u.virt.list[i].source_file_name, heap_block_p, tmp_size); heap_block_p += tmp_size; + avail_buffer_space = heap_block_p_end - heap_block_p + 1; + if (avail_buffer_space <= 0) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding") + /* Source dataset name */ - tmp_size = HDstrlen((const char *)heap_block_p) + 1; + tmp_size = HDstrnlen((const char *)heap_block_p, (size_t)avail_buffer_space); + if (tmp_size == (size_t)avail_buffer_space) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding - unterminated source " + "dataset name string") + else + tmp_size += 1; /* Add space for NUL terminator */ + if (NULL == (mesg->storage.u.virt.list[i].source_dset_name = (char *)H5MM_malloc(tmp_size))) - HGOTO_ERROR(H5E_OHDR, H5E_RESOURCE, NULL, + HGOTO_ERROR(H5E_OHDR, H5E_CANTALLOC, NULL, "unable to allocate memory for source dataset name") H5MM_memcpy(mesg->storage.u.virt.list[i].source_dset_name, heap_block_p, tmp_size); heap_block_p += tmp_size; @@ -535,6 +716,9 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU } /* end for */ /* Read stored checksum */ + if (H5_IS_BUFFER_OVERFLOW(heap_block_p, sizeof(uint32_t), heap_block_p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding") UINT32DECODE(heap_block_p, stored_chksum) /* Compute checksum */ @@ -558,7 +742,7 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU case H5D_LAYOUT_ERROR: case H5D_NLAYOUTS: default: - HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "Invalid layout class") + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "Invalid layout class") } /* end switch */ } /* end else */ diff --git a/src/H5Osdspace.c b/src/H5Osdspace.c index c4b723f0e0d..e9a0dc6e322 100644 --- a/src/H5Osdspace.c +++ b/src/H5Osdspace.c @@ -107,11 +107,11 @@ static void * H5O__sdspace_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, unsigned H5_ATTR_UNUSED *ioflags, size_t p_size, const uint8_t *p) { - H5S_extent_t *sdim = NULL; /* New extent dimensionality structure */ + const uint8_t *p_end = p + p_size - 1; /* End of the p buffer */ + H5S_extent_t *sdim = NULL; /* New extent dimensionality structure */ unsigned flags, version; - unsigned i; /* Local counting variable */ - const uint8_t *p_end = p + p_size - 1; /* End of the p buffer */ - void *ret_value = NULL; /* Return value */ + unsigned i; + void *ret_value = NULL; /* Return value */ FUNC_ENTER_PACKAGE @@ -121,25 +121,37 @@ H5O__sdspace_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UN /* decode */ if (NULL == (sdim = H5FL_CALLOC(H5S_extent_t))) - HGOTO_ERROR(H5E_DATASPACE, H5E_NOSPACE, NULL, "dataspace structure allocation failed") + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, NULL, "dataspace structure allocation failed") + sdim->type = H5S_NO_CLASS; /* Check version */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") version = *p++; + if (version < H5O_SDSPACE_VERSION_1 || version > H5O_SDSPACE_VERSION_2) - HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, NULL, "wrong version number in dataspace message") + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "wrong version number in dataspace message") sdim->version = version; /* Get rank */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") sdim->rank = *p++; + if (sdim->rank > H5S_MAX_RANK) - HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, NULL, "simple dataspace dimensionality is too large") + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "simple dataspace dimensionality is too large") /* Get dataspace flags for later */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") flags = *p++; /* Get or determine the type of the extent */ if (version >= H5O_SDSPACE_VERSION_2) { + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") sdim->type = (H5S_class_t)*p++; + if (sdim->type != H5S_SIMPLE && sdim->rank > 0) HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "invalid rank for scalar or NULL dataspace") } /* end if */ @@ -151,36 +163,48 @@ H5O__sdspace_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UN sdim->type = H5S_SCALAR; /* Increment past reserved byte */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") p++; } /* end else */ HDassert(sdim->type != H5S_NULL || sdim->version >= H5O_SDSPACE_VERSION_2); /* Only Version 1 has these reserved bytes */ - if (version == H5O_SDSPACE_VERSION_1) + if (version == H5O_SDSPACE_VERSION_1) { + if (H5_IS_BUFFER_OVERFLOW(p, 4, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") p += 4; /*reserved*/ + } /* Decode dimension sizes */ if (sdim->rank > 0) { - /* Ensure that rank doesn't cause reading passed buffer's end, - due to possible data corruption */ uint8_t sizeof_size = H5F_SIZEOF_SIZE(f); - if (p + (sizeof_size * sdim->rank - 1) > p_end) { - HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "rank might cause reading passed buffer's end") - } + + /* + * Ensure that decoding doesn't cause reading past buffer's end, + * due to possible data corruption - check that we have space to + * decode a "sdim->rank" number of hsize_t values + */ + if (H5_IS_BUFFER_OVERFLOW(p, (sizeof_size * sdim->rank), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") if (NULL == (sdim->size = (hsize_t *)H5FL_ARR_MALLOC(hsize_t, (size_t)sdim->rank))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, NULL, "memory allocation failed") for (i = 0; i < sdim->rank; i++) H5F_DECODE_LENGTH(f, p, sdim->size[i]); if (flags & H5S_VALID_MAX) { if (NULL == (sdim->max = (hsize_t *)H5FL_ARR_MALLOC(hsize_t, (size_t)sdim->rank))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") - - /* Ensure that rank doesn't cause reading passed buffer's end */ - if (p + (sizeof_size * sdim->rank - 1) > p_end) - HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "rank might cause reading passed buffer's end") + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, NULL, "memory allocation failed") + + /* + * Ensure that decoding doesn't cause reading past buffer's end, + * due to possible data corruption - check that we have space to + * decode a "sdim->rank" number of hsize_t values + */ + if (H5_IS_BUFFER_OVERFLOW(p, (sizeof_size * sdim->rank), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") for (i = 0; i < sdim->rank; i++) H5F_DECODE_LENGTH(f, p, sdim->max[i]); diff --git a/src/H5private.h b/src/H5private.h index ec77bbbbd0d..eb7d8e0a810 100644 --- a/src/H5private.h +++ b/src/H5private.h @@ -325,6 +325,11 @@ /* Raise an integer to a power of 2 */ #define H5_EXP2(n) (1 << (n)) +/* Check if a read of size bytes starting at ptr would overflow past + * the last valid byte, pointed to by buffer_end. + */ +#define H5_IS_BUFFER_OVERFLOW(ptr, size, buffer_end) (((ptr) + (size)-1) > (buffer_end)) + /* * HDF Boolean type. */ From b05dde13b6b952ab6f2ce76a66d15760e24a5edf Mon Sep 17 00:00:00 2001 From: mattjala <124107509+mattjala@users.noreply.github.com> Date: Tue, 11 Apr 2023 15:22:11 -0500 Subject: [PATCH 108/231] Fix invalid memory access in H5O__ginfo_decode (#2663) --- release_docs/RELEASE.txt | 8 ++++++++ src/H5Oginfo.c | 17 ++++++++++++++++- src/H5private.h | 5 +++++ 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index b460141adad..94f3eb3cc23 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -151,6 +151,14 @@ Bug Fixes since HDF5-1.13.3 release =================================== Library ------- + - Fixed potential heap buffer overrun in group info header decoding from malformed file + + H5O__ginfo_decode could sometimes read past allocated memory when parsing a group info message from the header of a malformed file. + + It now checks buffer size before each read to properly throw an error in these cases. + + (ML - 2023/4/6, #2601) + - Fixed potential buffer overrun issues in some object header decode routines Several checks were added to H5O__layout_decode and H5O__sdspace_decode to diff --git a/src/H5Oginfo.c b/src/H5Oginfo.c index 0f9628b5bfa..54d8b8bdcf1 100644 --- a/src/H5Oginfo.c +++ b/src/H5Oginfo.c @@ -89,7 +89,7 @@ H5FL_DEFINE_STATIC(H5O_ginfo_t); */ static void * H5O__ginfo_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, - unsigned H5_ATTR_UNUSED *ioflags, size_t H5_ATTR_UNUSED p_size, const uint8_t *p) + unsigned H5_ATTR_UNUSED *ioflags, size_t p_size, const uint8_t *p) { H5O_ginfo_t *ginfo = NULL; /* Pointer to group information message */ unsigned char flags; /* Flags for encoding group info */ @@ -100,7 +100,15 @@ H5O__ginfo_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsign /* check args */ HDassert(p); + if (p_size == 0) + HGOTO_ERROR(H5E_OHDR, H5E_ARGS, NULL, "size of given ginfo was zero") + + /* Points at last valid byte in buffer */ + const uint8_t *p_end = p + p_size - 1; + /* Version of message */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") if (*p++ != H5O_GINFO_VERSION) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad version number for message") @@ -109,6 +117,9 @@ H5O__ginfo_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsign HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") /* Get the flags for the group */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") + flags = *p++; if (flags & ~H5O_GINFO_ALL_FLAGS) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad flag value for message") @@ -117,6 +128,8 @@ H5O__ginfo_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsign /* Get the max. # of links to store compactly & the min. # of links to store densely */ if (ginfo->store_link_phase_change) { + if (H5_IS_BUFFER_OVERFLOW(p, 2 * 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") UINT16DECODE(p, ginfo->max_compact) UINT16DECODE(p, ginfo->min_dense) } /* end if */ @@ -127,6 +140,8 @@ H5O__ginfo_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsign /* Get the estimated # of entries & name lengths */ if (ginfo->store_est_entry_info) { + if (H5_IS_BUFFER_OVERFLOW(p, 2 * 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") UINT16DECODE(p, ginfo->est_num_entries) UINT16DECODE(p, ginfo->est_name_len) } /* end if */ diff --git a/src/H5private.h b/src/H5private.h index eb7d8e0a810..d92f7f6da8b 100644 --- a/src/H5private.h +++ b/src/H5private.h @@ -2436,6 +2436,11 @@ H5_DLL herr_t H5CX_pop(hbool_t update_dxpl_props); #define HDcompile_assert(e) do { typedef struct { unsigned int b: (e); } x; } while(0) */ +/* Check if a read of size bytes starting at ptr would overflow past + * the last valid byte, pointed to by buffer_end . + */ +#define H5_IS_BUFFER_OVERFLOW(ptr, size, buffer_end) (((ptr) + (size)-1) > (buffer_end)) + /* Private typedefs */ /* Union for const/non-const pointer for use by functions that manipulate From 83575abc2566565129738e68f1f1b9a2e942518f Mon Sep 17 00:00:00 2001 From: glennsong09 <43005495+glennsong09@users.noreply.github.com> Date: Tue, 11 Apr 2023 16:09:05 -0500 Subject: [PATCH 109/231] Clean up memory allocated when reading messages in H5Dlayout on error (#2602) (#2687) --- release_docs/RELEASE.txt | 10 ++++++++++ src/H5Dlayout.c | 20 +++++++++++++++----- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 94f3eb3cc23..c04ead56580 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -151,6 +151,16 @@ Bug Fixes since HDF5-1.13.3 release =================================== Library ------- + - Fixed memory leaks that could occur when reading a dataset from a + malformed file + + When attempting to read layout, pline, and efl information for a + dataset, memory leaks could occur if attempting to read pline/efl + information threw an error, which is due to the memory that was + allocated for pline and efl not being properly cleaned up on error. + + (GS - 2023/4/11 GH#2602) + - Fixed potential heap buffer overrun in group info header decoding from malformed file H5O__ginfo_decode could sometimes read past allocated memory when parsing a group info message from the header of a malformed file. diff --git a/src/H5Dlayout.c b/src/H5Dlayout.c index 26bdc55efe7..dd1d2b7093c 100644 --- a/src/H5Dlayout.c +++ b/src/H5Dlayout.c @@ -589,7 +589,9 @@ herr_t H5D__layout_oh_read(H5D_t *dataset, hid_t dapl_id, H5P_genplist_t *plist) { htri_t msg_exists; /* Whether a particular type of message exists */ + hbool_t pline_copied = FALSE; /* Flag to indicate that dcpl_cache.pline's message was copied */ hbool_t layout_copied = FALSE; /* Flag to indicate that layout message was copied */ + hbool_t efl_copied = FALSE; /* Flag to indicate that the EFL message was copied */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -605,7 +607,7 @@ H5D__layout_oh_read(H5D_t *dataset, hid_t dapl_id, H5P_genplist_t *plist) /* Retrieve the I/O pipeline message */ if (NULL == H5O_msg_read(&(dataset->oloc), H5O_PLINE_ID, &dataset->shared->dcpl_cache.pline)) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't retrieve message") - + pline_copied = TRUE; /* Set the I/O pipeline info in the property list */ if (H5P_set(plist, H5O_CRT_PIPELINE_NAME, &dataset->shared->dcpl_cache.pline) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set pipeline") @@ -628,6 +630,7 @@ H5D__layout_oh_read(H5D_t *dataset, hid_t dapl_id, H5P_genplist_t *plist) /* Retrieve the EFL message */ if (NULL == H5O_msg_read(&(dataset->oloc), H5O_EFL_ID, &dataset->shared->dcpl_cache.efl)) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't retrieve message") + efl_copied = TRUE; /* Set the EFL info in the property list */ if (H5P_set(plist, H5D_CRT_EXT_FILE_LIST_NAME, &dataset->shared->dcpl_cache.efl) < 0) @@ -659,10 +662,17 @@ H5D__layout_oh_read(H5D_t *dataset, hid_t dapl_id, H5P_genplist_t *plist) HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "unable to set chunk sizes") done: - if (ret_value < 0 && layout_copied) - if (H5O_msg_reset(H5O_LAYOUT_ID, &dataset->shared->layout) < 0) - HDONE_ERROR(H5E_DATASET, H5E_CANTRESET, FAIL, "unable to reset layout info") - + if (ret_value < 0) { + if (pline_copied) + if (H5O_msg_reset(H5O_PLINE_ID, &dataset->shared->dcpl_cache.pline) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTRESET, FAIL, "unable to reset pipeline info") + if (layout_copied) + if (H5O_msg_reset(H5O_LAYOUT_ID, &dataset->shared->layout) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTRESET, FAIL, "unable to reset layout info") + if (efl_copied) + if (H5O_msg_reset(H5O_EFL_ID, &dataset->shared->dcpl_cache.efl) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTRESET, FAIL, "unable to reset efl message") + } FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__layout_oh_read() */ From 09dbb3771ad676b75763d3b632231287f5f2a12f Mon Sep 17 00:00:00 2001 From: Quincey Koziol Date: Tue, 11 Apr 2023 22:57:47 -0500 Subject: [PATCH 110/231] Cache tidy (#2693) * Correct concurrency bugs when running tests, along with a bugfix & small warning cleanup. * Committing clang-format changes * Allow spaces (and tabs) in VOL connector info string from environment variable. * Parse connector name from HDF5_PLUGIN_PATH environment variable better * Correct H5VLquery_optional to use H5VL routine instead of H5I. Also add an error message to the failure return value from not finding a plugin. * Play nice with existing plugin paths * Use API routine to determine if native connector is terminal. * Committing clang-format changes * Make string size larger, to allow for connectors with longer names. * Be more flexible about testing external pass through connectors, especially if they have registered new optional operations. * Bring style closer to library's agreed coding style * Committing clang-format changes --------- Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: AWS ParallelCluster user Co-authored-by: Koziol --- src/H5AC.c | 65 +-- src/H5ACmpio.c | 25 +- src/H5C.c | 1143 +++++++++++++----------------------------------- src/H5Cdbg.c | 24 +- src/H5Cimage.c | 95 ++-- src/H5Cmpio.c | 52 +-- src/H5Cpkg.h | 309 ++++++------- 7 files changed, 524 insertions(+), 1189 deletions(-) diff --git a/src/H5AC.c b/src/H5AC.c index b93fca958dc..2b4c29770a2 100644 --- a/src/H5AC.c +++ b/src/H5AC.c @@ -312,7 +312,7 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_co HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create cleaned entry list") } /* end if */ - /* construct the candidate slist for all processes. + /* construct the candidate skip list for all processes. * when the distributed strategy is selected as all processes * will use it in the case of a flush. */ @@ -439,34 +439,25 @@ H5AC_dest(H5F_t *f) /* Check if log messages are being emitted */ if (H5C_get_logging_status(f->shared->cache, &log_enabled, &curr_logging) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to get logging status") - if (log_enabled && curr_logging) { - - if (H5C_log_write_destroy_cache_msg(f->shared->cache) < 0) - - HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message") - } /* Tear down logging */ if (log_enabled) { + if (curr_logging) + if (H5C_log_write_destroy_cache_msg(f->shared->cache) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message") if (H5C_log_tear_down(f->shared->cache) < 0) - - HGOTO_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "mdc logging tear-down failed") - } + HGOTO_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "metadata cache logging tear-down failed") + } /* end if */ #ifdef H5_HAVE_PARALLEL - /* destroying the cache, so clear all collective entries */ if (H5C_clear_coll_entries(f->shared->cache, FALSE) < 0) - - HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5C_clear_coll_entries() failed") + HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "can't clear collective entries") aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(f->shared->cache); - if (aux_ptr) { - /* Sanity check */ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); @@ -480,7 +471,7 @@ H5AC_dest(H5F_t *f) * H5AC__flush_entries() and disable it afterwards, as the * skip list will be disabled after the previous flush. * - * Note that H5C_dest() does slist setup and take down as well. + * Note that H5C_dest() does skip list setup and take down as well. * Unfortunately, we can't do the setup and take down just once, * as H5C_dest() is called directly in the test code. * @@ -488,59 +479,45 @@ H5AC_dest(H5F_t *f) * point, so the overhead should be minimal. */ if (H5F_ACC_RDWR & H5F_INTENT(f)) { - - /* enable and load the slist */ + /* enable and load the skip list */ if (H5C_set_slist_enabled(f->shared->cache, TRUE, FALSE) < 0) - - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed") + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't enable skip list") if (H5AC__flush_entries(f) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush") - /* disable the slist -- should be empty */ + /* disable the skip list -- should be empty */ if (H5C_set_slist_enabled(f->shared->cache, FALSE, FALSE) < 0) - - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "disable slist failed") - } - } -#endif /* H5_HAVE_PARALLEL */ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't disable skip list") + } /* end if */ + } /* end if */ +#endif /* H5_HAVE_PARALLEL */ /* Destroy the cache */ if (H5C_dest(f) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "can't destroy cache") f->shared->cache = NULL; #ifdef H5_HAVE_PARALLEL - if (aux_ptr != NULL) { - if (aux_ptr->d_slist_ptr != NULL) { - HDassert(H5SL_count(aux_ptr->d_slist_ptr) == 0); H5SL_close(aux_ptr->d_slist_ptr); - } /* end if */ if (aux_ptr->c_slist_ptr != NULL) { - HDassert(H5SL_count(aux_ptr->c_slist_ptr) == 0); H5SL_close(aux_ptr->c_slist_ptr); - } /* end if */ if (aux_ptr->candidate_slist_ptr != NULL) { - HDassert(H5SL_count(aux_ptr->candidate_slist_ptr) == 0); H5SL_close(aux_ptr->candidate_slist_ptr); - } /* end if */ aux_ptr->magic = 0; aux_ptr = H5FL_FREE(H5AC_aux_t, aux_ptr); - } /* end if */ #endif /* H5_HAVE_PARALLEL */ @@ -1215,13 +1192,10 @@ H5AC_prep_for_file_flush(H5F_t *f) HDassert(f->shared->cache); if (H5C_set_slist_enabled(f->shared->cache, TRUE, FALSE) < 0) - - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist enabled failed") + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't enable skip list") done: - FUNC_LEAVE_NOAPI(ret_value) - } /* H5AC_prep_for_file_flush() */ /*------------------------------------------------------------------------- @@ -1235,7 +1209,7 @@ H5AC_prep_for_file_flush(H5F_t *f) * to do any necessary necessary cleanup work after a cache * flush. * - * Initially, this means taking down the slist after the + * Initially, this means taking down the skip list after the * flush. We do this in a separate call because * H5F__flush_phase2() make repeated calls to H5AC_flush(). * Handling this detail in separate calls allows us to avoid @@ -1262,13 +1236,10 @@ H5AC_secure_from_file_flush(H5F_t *f) HDassert(f->shared->cache); if (H5C_set_slist_enabled(f->shared->cache, FALSE, FALSE) < 0) - - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist enabled failed") + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't disable skip list") done: - FUNC_LEAVE_NOAPI(ret_value) - } /* H5AC_secure_from_file_flush() */ /*------------------------------------------------------------------------- diff --git a/src/H5ACmpio.c b/src/H5ACmpio.c index 40e68fd131c..3299a30fcf6 100644 --- a/src/H5ACmpio.c +++ b/src/H5ACmpio.c @@ -1888,12 +1888,9 @@ H5AC__rsp__p0_only__flush(H5F_t *f) * However, when flushing from within the close operation from a file, * it's possible to skip this barrier (on the second flush of the cache). */ - if (!H5CX_get_mpi_file_flushing()) { - + if (!H5CX_get_mpi_file_flushing()) if (MPI_SUCCESS != (mpi_result = MPI_Barrier(aux_ptr->mpi_comm))) - HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_result) - } /* Flush data to disk, from rank 0 process */ if (aux_ptr->mpi_rank == 0) { @@ -2102,31 +2099,28 @@ H5AC__run_sync_point(H5F_t *f, int sync_point_op) /* Sanity checks */ HDassert(f != NULL); - cache_ptr = f->shared->cache; - HDassert(cache_ptr != NULL); - aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); - HDassert(aux_ptr != NULL); HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); HDassert((sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) || (sync_point_op == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED)); #ifdef H5AC_DEBUG_DIRTY_BYTES_CREATION - HDfprintf(stdout, "%d:H5AC_propagate...:%u: (u/uu/i/iu/m/mu) = %zu/%u/%zu/%u/%zu/%u\n", aux_ptr->mpi_rank, - aux_ptr->dirty_bytes_propagations, aux_ptr->unprotect_dirty_bytes, + HDfprintf(stdout, "%d:%s...:%u: (u/uu/i/iu/m/mu) = %zu/%u/%zu/%u/%zu/%u\n", aux_ptr->mpi_rank, + __func__ aux_ptr->dirty_bytes_propagations, aux_ptr->unprotect_dirty_bytes, aux_ptr->unprotect_dirty_bytes_updates, aux_ptr->insert_dirty_bytes, aux_ptr->insert_dirty_bytes_updates, aux_ptr->move_dirty_bytes, aux_ptr->move_dirty_bytes_updates); #endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */ - /* clear collective access flag on half of the entries in the - cache and mark them as independent in case they need to be - evicted later. All ranks are guaranteed to mark the same entries - since we don't modify the order of the collectively accessed - entries except through collective access. */ + /* Clear collective access flag on half of the entries in the cache and + * mark them as independent in case they need to be evicted later. All + * ranks are guaranteed to mark the same entries since we don't modify the + * order of the collectively accessed entries except through collective + * access. + */ if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5C_clear_coll_entries() failed.") @@ -2188,7 +2182,6 @@ H5AC__run_sync_point(H5F_t *f, int sync_point_op) #endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */ done: - FUNC_LEAVE_NOAPI(ret_value) } /* H5AC__run_sync_point() */ diff --git a/src/H5C.c b/src/H5C.c index c41b143978d..7fa43aebfc5 100644 --- a/src/H5C.c +++ b/src/H5C.c @@ -93,42 +93,27 @@ typedef H5C_cache_entry_t *H5C_cache_entry_ptr_t; /********************/ static herr_t H5C__pin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr); - static herr_t H5C__unpin_entry_real(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp); - static herr_t H5C__unpin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp); - static herr_t H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted); - static herr_t H5C__autoadjust__ageout(H5F_t *f, double hit_rate, enum H5C_resize_status *status_ptr, size_t *new_max_cache_size_ptr, hbool_t write_permitted); - static herr_t H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t *cache_ptr); - static herr_t H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t *f, hbool_t write_permitted); - static herr_t H5C__autoadjust__ageout__insert_new_marker(H5C_t *cache_ptr); - static herr_t H5C__autoadjust__ageout__remove_all_markers(H5C_t *cache_ptr); - static herr_t H5C__autoadjust__ageout__remove_excess_markers(H5C_t *cache_ptr); - static herr_t H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t new_entry_size); - static herr_t H5C__flush_invalidate_cache(H5F_t *f, unsigned flags); - static herr_t H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags); - static herr_t H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags); - -static void *H5C__load_entry(H5F_t *f, +static void *H5C__load_entry(H5F_t *f, #ifdef H5_HAVE_PARALLEL hbool_t coll_access, #endif /* H5_HAVE_PARALLEL */ const H5C_class_t *type, haddr_t addr, void *udata); static herr_t H5C__mark_flush_dep_dirty(H5C_cache_entry_t *entry); - static herr_t H5C__mark_flush_dep_clean(H5C_cache_entry_t *entry); static herr_t H5C__serialize_ring(H5F_t *f, H5C_ring_t ring); @@ -179,7 +164,6 @@ H5FL_SEQ_DEFINE_STATIC(H5C_cache_entry_ptr_t); * flag to determine whether writes are permitted. * * Return: Success: Pointer to the new instance. - * * Failure: NULL * * Programmer: John Mainzer @@ -338,33 +322,33 @@ H5C_create(size_t max_cache_size, size_t min_clean_size, int max_type_id, cache_ptr->resize_in_progress = FALSE; cache_ptr->msic_in_progress = FALSE; - (cache_ptr->resize_ctl).version = H5C__CURR_AUTO_SIZE_CTL_VER; - (cache_ptr->resize_ctl).rpt_fcn = NULL; - (cache_ptr->resize_ctl).set_initial_size = FALSE; - (cache_ptr->resize_ctl).initial_size = H5C__DEF_AR_INIT_SIZE; - (cache_ptr->resize_ctl).min_clean_fraction = H5C__DEF_AR_MIN_CLEAN_FRAC; - (cache_ptr->resize_ctl).max_size = H5C__DEF_AR_MAX_SIZE; - (cache_ptr->resize_ctl).min_size = H5C__DEF_AR_MIN_SIZE; - (cache_ptr->resize_ctl).epoch_length = H5C__DEF_AR_EPOCH_LENGTH; - - (cache_ptr->resize_ctl).incr_mode = H5C_incr__off; - (cache_ptr->resize_ctl).lower_hr_threshold = H5C__DEF_AR_LOWER_THRESHHOLD; - (cache_ptr->resize_ctl).increment = H5C__DEF_AR_INCREMENT; - (cache_ptr->resize_ctl).apply_max_increment = TRUE; - (cache_ptr->resize_ctl).max_increment = H5C__DEF_AR_MAX_INCREMENT; - - (cache_ptr->resize_ctl).flash_incr_mode = H5C_flash_incr__off; - (cache_ptr->resize_ctl).flash_multiple = 1.0; - (cache_ptr->resize_ctl).flash_threshold = 0.25; - - (cache_ptr->resize_ctl).decr_mode = H5C_decr__off; - (cache_ptr->resize_ctl).upper_hr_threshold = H5C__DEF_AR_UPPER_THRESHHOLD; - (cache_ptr->resize_ctl).decrement = H5C__DEF_AR_DECREMENT; - (cache_ptr->resize_ctl).apply_max_decrement = TRUE; - (cache_ptr->resize_ctl).max_decrement = H5C__DEF_AR_MAX_DECREMENT; - (cache_ptr->resize_ctl).epochs_before_eviction = H5C__DEF_AR_EPCHS_B4_EVICT; - (cache_ptr->resize_ctl).apply_empty_reserve = TRUE; - (cache_ptr->resize_ctl).empty_reserve = H5C__DEF_AR_EMPTY_RESERVE; + cache_ptr->resize_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + cache_ptr->resize_ctl.rpt_fcn = NULL; + cache_ptr->resize_ctl.set_initial_size = FALSE; + cache_ptr->resize_ctl.initial_size = H5C__DEF_AR_INIT_SIZE; + cache_ptr->resize_ctl.min_clean_fraction = H5C__DEF_AR_MIN_CLEAN_FRAC; + cache_ptr->resize_ctl.max_size = H5C__DEF_AR_MAX_SIZE; + cache_ptr->resize_ctl.min_size = H5C__DEF_AR_MIN_SIZE; + cache_ptr->resize_ctl.epoch_length = H5C__DEF_AR_EPOCH_LENGTH; + + cache_ptr->resize_ctl.incr_mode = H5C_incr__off; + cache_ptr->resize_ctl.lower_hr_threshold = H5C__DEF_AR_LOWER_THRESHHOLD; + cache_ptr->resize_ctl.increment = H5C__DEF_AR_INCREMENT; + cache_ptr->resize_ctl.apply_max_increment = TRUE; + cache_ptr->resize_ctl.max_increment = H5C__DEF_AR_MAX_INCREMENT; + + cache_ptr->resize_ctl.flash_incr_mode = H5C_flash_incr__off; + cache_ptr->resize_ctl.flash_multiple = 1.0; + cache_ptr->resize_ctl.flash_threshold = 0.25; + + cache_ptr->resize_ctl.decr_mode = H5C_decr__off; + cache_ptr->resize_ctl.upper_hr_threshold = H5C__DEF_AR_UPPER_THRESHHOLD; + cache_ptr->resize_ctl.decrement = H5C__DEF_AR_DECREMENT; + cache_ptr->resize_ctl.apply_max_decrement = TRUE; + cache_ptr->resize_ctl.max_decrement = H5C__DEF_AR_MAX_DECREMENT; + cache_ptr->resize_ctl.epochs_before_eviction = H5C__DEF_AR_EPCHS_B4_EVICT; + cache_ptr->resize_ctl.apply_empty_reserve = TRUE; + cache_ptr->resize_ctl.empty_reserve = H5C__DEF_AR_EMPTY_RESERVE; cache_ptr->epoch_markers_active = 0; @@ -485,12 +469,11 @@ H5C_def_auto_resize_rpt_fcn(H5C_t *cache_ptr, break; case increase: - HDassert(hit_rate < (cache_ptr->resize_ctl).lower_hr_threshold); + HDassert(hit_rate < cache_ptr->resize_ctl.lower_hr_threshold); HDassert(old_max_cache_size < new_max_cache_size); HDfprintf(stdout, "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n", - cache_ptr->prefix, hit_rate, (cache_ptr->resize_ctl).lower_hr_threshold); - + cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.lower_hr_threshold); HDfprintf(stdout, "%scache size increased from (%zu/%zu) to (%zu/%zu).\n", cache_ptr->prefix, old_max_cache_size, old_min_clean_size, new_max_cache_size, new_min_clean_size); break; @@ -499,9 +482,7 @@ H5C_def_auto_resize_rpt_fcn(H5C_t *cache_ptr, HDassert(old_max_cache_size < new_max_cache_size); HDfprintf(stdout, "%sflash cache resize(%d) -- size threshold = %zu.\n", cache_ptr->prefix, - (int)((cache_ptr->resize_ctl).flash_incr_mode), - cache_ptr->flash_size_increase_threshold); - + (int)(cache_ptr->resize_ctl.flash_incr_mode), cache_ptr->flash_size_increase_threshold); HDfprintf(stdout, "%s cache size increased from (%zu/%zu) to (%zu/%zu).\n", cache_ptr->prefix, old_max_cache_size, old_min_clean_size, new_max_cache_size, new_min_clean_size); break; @@ -509,20 +490,19 @@ H5C_def_auto_resize_rpt_fcn(H5C_t *cache_ptr, case decrease: HDassert(old_max_cache_size > new_max_cache_size); - switch ((cache_ptr->resize_ctl).decr_mode) { + switch (cache_ptr->resize_ctl.decr_mode) { case H5C_decr__off: HDfprintf(stdout, "%sAuto cache resize -- decrease off. HR = %lf\n", cache_ptr->prefix, hit_rate); break; case H5C_decr__threshold: - HDassert(hit_rate > (cache_ptr->resize_ctl).upper_hr_threshold); + HDassert(hit_rate > cache_ptr->resize_ctl.upper_hr_threshold); HDfprintf(stdout, "%sAuto cache resize -- decrease by threshold. HR = %lf > %6.5lf\n", - cache_ptr->prefix, hit_rate, (cache_ptr->resize_ctl).upper_hr_threshold); - + cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.upper_hr_threshold); HDfprintf(stdout, "%sout of bounds high (%6.5lf).\n", cache_ptr->prefix, - (cache_ptr->resize_ctl).upper_hr_threshold); + cache_ptr->resize_ctl.upper_hr_threshold); break; case H5C_decr__age_out: @@ -531,11 +511,11 @@ H5C_def_auto_resize_rpt_fcn(H5C_t *cache_ptr, break; case H5C_decr__age_out_with_threshold: - HDassert(hit_rate > (cache_ptr->resize_ctl).upper_hr_threshold); + HDassert(hit_rate > cache_ptr->resize_ctl.upper_hr_threshold); HDfprintf(stdout, "%sAuto cache resize -- decrease by ageout with threshold. HR = %lf > %6.5lf\n", - cache_ptr->prefix, hit_rate, (cache_ptr->resize_ctl).upper_hr_threshold); + cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.upper_hr_threshold); break; default: @@ -549,7 +529,7 @@ H5C_def_auto_resize_rpt_fcn(H5C_t *cache_ptr, case at_max_size: HDfprintf(stdout, "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n", - cache_ptr->prefix, hit_rate, (cache_ptr->resize_ctl).lower_hr_threshold); + cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.lower_hr_threshold); HDfprintf(stdout, "%s cache already at maximum size so no change.\n", cache_ptr->prefix); break; @@ -570,10 +550,10 @@ H5C_def_auto_resize_rpt_fcn(H5C_t *cache_ptr, break; case not_full: - HDassert(hit_rate < (cache_ptr->resize_ctl).lower_hr_threshold); + HDassert(hit_rate < cache_ptr->resize_ctl.lower_hr_threshold); HDfprintf(stdout, "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n", - cache_ptr->prefix, hit_rate, (cache_ptr->resize_ctl).lower_hr_threshold); + cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.lower_hr_threshold); HDfprintf(stdout, "%s cache not full so no increase in size.\n", cache_ptr->prefix); break; @@ -628,8 +608,8 @@ H5C_prep_for_file_close(H5F_t *f) HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create cache image") #ifdef H5_HAVE_PARALLEL - if ((H5F_INTENT(f) & H5F_ACC_RDWR) && (!image_generated) && (cache_ptr->aux_ptr != NULL) && - (f->shared->fs_persist)) { + if ((H5F_INTENT(f) & H5F_ACC_RDWR) && !image_generated && cache_ptr->aux_ptr != NULL && + f->shared->fs_persist) { /* If persistent free space managers are enabled, flushing the * metadata cache may result in the deletion, insertion, and/or * dirtying of entries. @@ -692,8 +672,10 @@ H5C_prep_for_file_close(H5F_t *f) herr_t H5C_dest(H5F_t *f) { - H5C_t *cache_ptr = f->shared->cache; - herr_t ret_value = SUCCEED; /* Return value */ + H5C_t *cache_ptr = f->shared->cache; + H5C_tag_info_t *item = NULL; + H5C_tag_info_t *tmp = NULL; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) @@ -709,21 +691,16 @@ H5C_dest(H5F_t *f) /* Enable the slist, as it is needed in the flush */ if (H5C_set_slist_enabled(f->shared->cache, TRUE, FALSE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed") /* Flush and invalidate all cache entries */ if (H5C__flush_invalidate_cache(f, H5C__NO_FLAGS_SET) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache") /* Generate & write cache image if requested */ - if (cache_ptr->image_ctl.generate_image) { - + if (cache_ptr->image_ctl.generate_image) if (H5C__generate_cache_image(f, cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "Can't generate metadata cache image") - } /* Question: Is it possible for cache_ptr->slist be non-null at this * point? If no, shouldn't this if statement be an assert? @@ -733,12 +710,9 @@ H5C_dest(H5F_t *f) HDassert(cache_ptr->slist_size == 0); H5SL_close(cache_ptr->slist_ptr); - cache_ptr->slist_ptr = NULL; } - H5C_tag_info_t *item = NULL; - H5C_tag_info_t *tmp = NULL; HASH_ITER(hh, cache_ptr->tag_list, item, tmp) { HASH_DELETE(hh, cache_ptr->tag_list, item); @@ -750,12 +724,9 @@ H5C_dest(H5F_t *f) #ifndef NDEBUG #ifdef H5C_DO_SANITY_CHECKS - - if (cache_ptr->get_entry_ptr_from_addr_counter > 0) { - + if (cache_ptr->get_entry_ptr_from_addr_counter > 0) HDfprintf(stdout, "*** %" PRId64 " calls to H5C_get_entry_ptr_from_add(). ***\n", cache_ptr->get_entry_ptr_from_addr_counter); - } #endif /* H5C_DO_SANITY_CHECKS */ cache_ptr->magic = 0; @@ -764,8 +735,7 @@ H5C_dest(H5F_t *f) cache_ptr = H5FL_FREE(H5C_t, cache_ptr); done: - - if ((ret_value < 0) && (cache_ptr) && (cache_ptr->slist_ptr)) { + if (ret_value < 0 && cache_ptr && cache_ptr->slist_ptr) /* Arguably, it shouldn't be necessary to re-enable the slist after * the call to H5C__flush_invalidate_cache(), as the metadata cache * should be discarded. However, in the test code, we make multiple @@ -773,12 +743,9 @@ H5C_dest(H5F_t *f) * and the cache still exist. JRM -- 5/15/20 */ if (H5C_set_slist_enabled(f->shared->cache, FALSE, FALSE) < 0) - HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "disable slist on flush dest failure failed") - } FUNC_LEAVE_NOAPI(ret_value) - } /* H5C_dest() */ /*------------------------------------------------------------------------- @@ -805,17 +772,14 @@ H5C_evict(H5F_t *f) /* Enable the slist, as it is needed in the flush */ if (H5C_set_slist_enabled(f->shared->cache, TRUE, FALSE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed") /* Flush and invalidate all cache entries except the pinned entries */ if (H5C__flush_invalidate_cache(f, H5C__EVICT_ALLOW_LAST_PINS_FLAG) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to evict entries in the cache") /* Disable the slist */ if (H5C_set_slist_enabled(f->shared->cache, FALSE, TRUE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist disabled failed") done: @@ -990,7 +954,6 @@ H5C_flush_cache(H5F_t *f, unsigned flags) */ ring = H5C_RING_USER; while (ring < H5C_RING_NTYPES) { - /* Only call the free space manager settle routines when close * warning has been received. */ @@ -1870,35 +1833,29 @@ H5C_resize_entry(void *thing, size_t new_size) /* do a flash cache size increase if appropriate */ if (cache_ptr->flash_size_increase_possible) { - if (new_size > entry_ptr->size) { size_t size_increase; size_increase = new_size - entry_ptr->size; - - if (size_increase >= cache_ptr->flash_size_increase_threshold) { + if (size_increase >= cache_ptr->flash_size_increase_threshold) if (H5C__flash_increase_cache_size(cache_ptr, entry_ptr->size, new_size) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTRESIZE, FAIL, "flash cache increase failed") - } } } /* update the pinned and/or protected entry list */ - if (entry_ptr->is_pinned) { + if (entry_ptr->is_pinned) H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr->pel_len), (cache_ptr->pel_size), (entry_ptr->size), (new_size)) - } /* end if */ - if (entry_ptr->is_protected) { + if (entry_ptr->is_protected) H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr->pl_len), (cache_ptr->pl_size), (entry_ptr->size), (new_size)) - } /* end if */ #ifdef H5_HAVE_PARALLEL - if (entry_ptr->coll_access) { + if (entry_ptr->coll_access) H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr->coll_list_len), (cache_ptr->coll_list_size), (entry_ptr->size), (new_size)) - } /* end if */ -#endif /* H5_HAVE_PARALLEL */ +#endif /* H5_HAVE_PARALLEL */ /* update statistics just before changing the entry size */ H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size); @@ -2111,7 +2068,7 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign * participate in the bcast. */ if (coll_access) { - if (!(entry_ptr->is_dirty) && !(entry_ptr->coll_access)) { + if (!entry_ptr->is_dirty && !entry_ptr->coll_access) { MPI_Comm comm; /* File MPI Communicator */ int mpi_code; /* MPI error code */ int buf_size; @@ -2132,14 +2089,11 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE); #endif /* H5C_DO_MEMORY_SANITY_CHECKS */ - if (0 == mpi_rank) { - if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0) { - /* If image generation fails, push an error but - * still participate in the following MPI_Bcast - */ - HDONE_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't generate entry's image") - } - } + if (0 == mpi_rank && H5C__generate_image(f, cache_ptr, entry_ptr) < 0) + /* If image generation fails, push an error but + * still participate in the following MPI_Bcast + */ + HDONE_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't generate entry's image") } /* end if */ HDassert(entry_ptr->image_ptr); @@ -2151,11 +2105,10 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign entry_ptr->coll_access = TRUE; H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, NULL) } /* end if */ - else if (entry_ptr->coll_access) { + else if (entry_ptr->coll_access) H5C__MOVE_TO_TOP_IN_COLL_LIST(cache_ptr, entry_ptr, NULL) - } /* end else-if */ - } /* end if */ -#endif /* H5_HAVE_PARALLEL */ + } /* end if */ +#endif /* H5_HAVE_PARALLEL */ #ifdef H5C_DO_TAGGING_SANITY_CHECKS { @@ -2182,11 +2135,8 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign thing = (void *)entry_ptr; } else { - /* must try to load the entry from disk. */ - hit = FALSE; - if (NULL == (thing = H5C__load_entry(f, #ifdef H5_HAVE_PARALLEL coll_access, @@ -2210,12 +2160,10 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign /* If the entry is very large, and we are configured to allow it, * we may wish to perform a flash cache size increase. */ - if ((cache_ptr->flash_size_increase_possible) && - (entry_ptr->size > cache_ptr->flash_size_increase_threshold)) { - + if (cache_ptr->flash_size_increase_possible && + (entry_ptr->size > cache_ptr->flash_size_increase_threshold)) if (H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__flash_increase_cache_size failed") - } if (cache_ptr->index_size >= cache_ptr->max_cache_size) empty_space = 0; @@ -2226,7 +2174,7 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign * that if evictions are enabled, we will call H5C__make_space_in_cache() * regardless if the min_free_space requirement is not met. */ - if ((cache_ptr->evictions_enabled) && + if (cache_ptr->evictions_enabled && (((cache_ptr->index_size + entry_ptr->size) > cache_ptr->max_cache_size) || ((empty_space + cache_ptr->clean_index_size) < cache_ptr->min_clean_size))) { @@ -2296,11 +2244,8 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign entry_ptr->flush_me_last = flush_last; H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, NULL) - - if ((entry_ptr->is_dirty) && (!(entry_ptr->in_slist))) { - + if (entry_ptr->is_dirty && !entry_ptr->in_slist) H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, NULL) - } /* insert the entry in the data structures used by the replacement * policy. We are just going to take it out again when we update @@ -2329,28 +2274,23 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, NULL) entry_ptr->is_protected = TRUE; - if (read_only) { entry_ptr->is_read_only = TRUE; entry_ptr->ro_ref_count = 1; } /* end if */ - entry_ptr->dirtied = FALSE; } /* end else */ H5C__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit) - H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) ret_value = thing; - if ((cache_ptr->evictions_enabled) && - ((cache_ptr->size_decreased) || - ((cache_ptr->resize_enabled) && - (cache_ptr->cache_accesses >= (cache_ptr->resize_ctl).epoch_length)))) { + if (cache_ptr->evictions_enabled && + (cache_ptr->size_decreased || + (cache_ptr->resize_enabled && (cache_ptr->cache_accesses >= cache_ptr->resize_ctl.epoch_length)))) { if (!have_write_permitted) { - if (cache_ptr->check_write_permitted != NULL) { if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted") @@ -2358,19 +2298,14 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign have_write_permitted = TRUE; } else { - - write_permitted = cache_ptr->write_permitted; - + write_permitted = cache_ptr->write_permitted; have_write_permitted = TRUE; } } - if (cache_ptr->resize_enabled && - (cache_ptr->cache_accesses >= (cache_ptr->resize_ctl).epoch_length)) { - + if (cache_ptr->resize_enabled && (cache_ptr->cache_accesses >= cache_ptr->resize_ctl.epoch_length)) if (H5C__auto_adjust_cache_size(f, write_permitted) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Cache auto-resize failed") - } /* end if */ if (cache_ptr->size_decreased) { cache_ptr->size_decreased = FALSE; @@ -2405,14 +2340,13 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign * the cache and protected. We must wait until it is protected so it is not * evicted during the notify callback. */ - if (was_loaded) { + if (was_loaded) /* If the entry's type has a 'notify' callback send a 'after load' * notice now that the entry is fully integrated into the cache. */ if (entry_ptr->type->notify && (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_LOAD, entry_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, NULL, "can't notify client about entry inserted into cache") - } /* end if */ #ifdef H5_HAVE_PARALLEL /* Make sure the size of the collective entries in the cache remain in check */ @@ -2551,21 +2485,21 @@ H5C_set_cache_auto_resize_config(H5C_t *cache_ptr, H5C_auto_size_ctl_t *config_p break; case H5C_decr__threshold: - if ((config_ptr->upper_hr_threshold >= 1.0) || (config_ptr->decrement >= 1.0) || - ((config_ptr->apply_max_decrement) && (config_ptr->max_decrement <= 0))) + if (config_ptr->upper_hr_threshold >= 1.0 || config_ptr->decrement >= 1.0 || + (config_ptr->apply_max_decrement && config_ptr->max_decrement <= 0)) cache_ptr->size_decrease_possible = FALSE; break; case H5C_decr__age_out: - if (((config_ptr->apply_empty_reserve) && (config_ptr->empty_reserve >= 1.0)) || - ((config_ptr->apply_max_decrement) && (config_ptr->max_decrement <= 0))) + if ((config_ptr->apply_empty_reserve && config_ptr->empty_reserve >= 1.0) || + (config_ptr->apply_max_decrement && config_ptr->max_decrement <= 0)) cache_ptr->size_decrease_possible = FALSE; break; case H5C_decr__age_out_with_threshold: - if (((config_ptr->apply_empty_reserve) && (config_ptr->empty_reserve >= 1.0)) || - ((config_ptr->apply_max_decrement) && (config_ptr->max_decrement <= 0)) || - (config_ptr->upper_hr_threshold >= 1.0)) + if ((config_ptr->apply_empty_reserve && config_ptr->empty_reserve >= 1.0) || + (config_ptr->apply_max_decrement && config_ptr->max_decrement <= 0) || + config_ptr->upper_hr_threshold >= 1.0) cache_ptr->size_decrease_possible = FALSE; break; @@ -2583,8 +2517,7 @@ H5C_set_cache_auto_resize_config(H5C_t *cache_ptr, H5C_auto_size_ctl_t *config_p * following: */ cache_ptr->resize_enabled = cache_ptr->size_increase_possible || cache_ptr->size_decrease_possible; - - cache_ptr->resize_ctl = *config_ptr; + cache_ptr->resize_ctl = *config_ptr; /* Resize the cache to the supplied initial value if requested, or as * necessary to force it within the bounds of the current automatic @@ -2603,7 +2536,7 @@ H5C_set_cache_auto_resize_config(H5C_t *cache_ptr, H5C_auto_size_ctl_t *config_p else new_max_cache_size = cache_ptr->max_cache_size; - new_min_clean_size = (size_t)((double)new_max_cache_size * ((cache_ptr->resize_ctl).min_clean_fraction)); + new_min_clean_size = (size_t)((double)new_max_cache_size * (cache_ptr->resize_ctl.min_clean_fraction)); /* since new_min_clean_size is of type size_t, we have * @@ -2644,7 +2577,6 @@ H5C_set_cache_auto_resize_config(H5C_t *cache_ptr, H5C_auto_size_ctl_t *config_p * If we haven't already ruled out flash cache size increases above, * go ahead and configure it. */ - if (cache_ptr->flash_size_increase_possible) { switch (config_ptr->flash_incr_mode) { case H5C_flash_incr__off: @@ -2654,8 +2586,7 @@ H5C_set_cache_auto_resize_config(H5C_t *cache_ptr, H5C_auto_size_ctl_t *config_p case H5C_flash_incr__add_space: cache_ptr->flash_size_increase_possible = TRUE; cache_ptr->flash_size_increase_threshold = - (size_t)(((double)(cache_ptr->max_cache_size)) * - ((cache_ptr->resize_ctl).flash_threshold)); + (size_t)(((double)(cache_ptr->max_cache_size)) * (cache_ptr->resize_ctl.flash_threshold)); break; default: /* should be unreachable */ @@ -2773,24 +2704,15 @@ H5C_set_slist_enabled(H5C_t *cache_ptr, hbool_t slist_enabled, hbool_t clear_sli FUNC_ENTER_NOAPI(FAIL) if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry") #if H5C__SLIST_OPT_ENABLED if (slist_enabled) { - - if (cache_ptr->slist_enabled) { - - HDassert(FALSE); + if (cache_ptr->slist_enabled) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist already enabled?") - } - - if ((cache_ptr->slist_len != 0) || (cache_ptr->slist_size != 0)) { - - HDassert(FALSE); - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist not empty (1)?") - } + if ((cache_ptr->slist_len != 0) || (cache_ptr->slist_size != 0)) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist not empty?") /* set cache_ptr->slist_enabled to TRUE so that the slist * maintenance macros will be enabled. @@ -2799,16 +2721,10 @@ H5C_set_slist_enabled(H5C_t *cache_ptr, hbool_t slist_enabled, hbool_t clear_sli /* scan the index list and insert all dirty entries in the slist */ entry_ptr = cache_ptr->il_head; - while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - - if (entry_ptr->is_dirty) { - + if (entry_ptr->is_dirty) H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL) - } - entry_ptr = entry_ptr->il_next; } @@ -2821,35 +2737,22 @@ H5C_set_slist_enabled(H5C_t *cache_ptr, hbool_t slist_enabled, hbool_t clear_sli HDassert(cache_ptr->dirty_index_size == cache_ptr->slist_size); } else { /* take down the skip list */ - - if (!cache_ptr->slist_enabled) { - - HDassert(FALSE); + if (!cache_ptr->slist_enabled) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist already disabled?") - } if ((cache_ptr->slist_len != 0) || (cache_ptr->slist_size != 0)) { - if (clear_slist) { - H5SL_node_t *node_ptr; node_ptr = H5SL_first(cache_ptr->slist_ptr); - while (node_ptr != NULL) { - entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); - H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE); - node_ptr = H5SL_first(cache_ptr->slist_ptr); } } - else { - - HDassert(FALSE); - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist not empty (2)?") - } + else + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist not empty?") } cache_ptr->slist_enabled = FALSE; @@ -2865,9 +2768,7 @@ H5C_set_slist_enabled(H5C_t *cache_ptr, hbool_t slist_enabled, hbool_t clear_sli #endif /* H5C__SLIST_OPT_ENABLED is FALSE */ done: - FUNC_LEAVE_NOAPI(ret_value) - } /* H5C_set_slist_enabled() */ /*------------------------------------------------------------------------- @@ -2993,7 +2894,6 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags) HDassert(!(free_file_space && take_ownership)); entry_ptr = (H5C_cache_entry_t *)thing; - HDassert(entry_ptr->addr == addr); /* also set the dirtied variable if the dirtied field is set in @@ -3014,13 +2914,11 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags) * drops to zero. */ if (entry_ptr->ro_ref_count > 1) { - /* Sanity check */ HDassert(entry_ptr->is_protected); HDassert(entry_ptr->is_read_only); if (dirtied) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Read only entry modified??") /* Reduce the RO ref count */ @@ -3028,35 +2926,26 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags) /* Pin or unpin the entry as requested. */ if (pin_entry) { - /* Pin the entry from a client */ if (H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client") } else if (unpin_entry) { - /* Unpin the entry from a client */ if (H5C__unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry by client") - } /* end if */ } else { - if (entry_ptr->is_read_only) { - /* Sanity check */ HDassert(entry_ptr->ro_ref_count == 1); if (dirtied) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Read only entry modified??") entry_ptr->is_read_only = FALSE; entry_ptr->ro_ref_count = 0; - } /* end if */ #ifdef H5_HAVE_PARALLEL @@ -3086,32 +2975,20 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags) #endif /* H5_HAVE_PARALLEL */ if (!entry_ptr->is_protected) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Entry already unprotected??") /* Mark the entry as dirty if appropriate */ entry_ptr->is_dirty = (entry_ptr->is_dirty || dirtied); - - if (dirtied) { - - if (entry_ptr->image_up_to_date) { - - entry_ptr->image_up_to_date = FALSE; - - if (entry_ptr->flush_dep_nparents > 0) { - - if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0) - - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "Can't propagate serialization status to fd parents") - - } /* end if */ - } /* end if */ - } /* end if */ + if (dirtied && entry_ptr->image_up_to_date) { + entry_ptr->image_up_to_date = FALSE; + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "Can't propagate serialization status to fd parents") + } /* end if */ /* Check for newly dirtied entry */ if (was_clean && entry_ptr->is_dirty) { - /* Update index for newly dirtied entry */ H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr) @@ -3119,20 +2996,16 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags) * 'entry dirtied' notice now that the entry is fully * integrated into the cache. */ - if ((entry_ptr->type->notify) && - ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0)) - + if (entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set") /* Propagate the flush dep dirty flag up the flush dependency chain * if appropriate */ - if (entry_ptr->flush_dep_nparents > 0) { - + if (entry_ptr->flush_dep_nparents > 0) if (H5C__mark_flush_dep_dirty(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag") - } } /* end if */ /* Check for newly clean entry */ else if (!was_clean && !entry_ptr->is_dirty) { @@ -3141,36 +3014,28 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags) * 'entry cleaned' notice now that the entry is fully * integrated into the cache. */ - if ((entry_ptr->type->notify) && - ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0)) - + if (entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag cleared") /* Propagate the flush dep clean flag up the flush dependency chain * if appropriate */ - if (entry_ptr->flush_dep_nparents > 0) { - + if (entry_ptr->flush_dep_nparents > 0) if (H5C__mark_flush_dep_clean(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag") - } } /* end else-if */ /* Pin or unpin the entry as requested. */ if (pin_entry) { - /* Pin the entry from a client */ if (H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client") } else if (unpin_entry) { - /* Unpin the entry from a client */ if (H5C__unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry by client") } /* end if */ @@ -3185,14 +3050,10 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags) * and then add it to the skip list if it isn't there already. */ if (entry_ptr->is_dirty) { - entry_ptr->flush_marker |= set_flush_marker; - - if (!entry_ptr->in_slist) { - + if (!entry_ptr->in_slist) /* this is a no-op if cache_ptr->slist_enabled is FALSE */ H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL) - } } /* end if */ /* This implementation of the "deleted" option is a bit inefficient, as @@ -3201,32 +3062,24 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags) * we do this, we may want to optimize a bit. */ if (deleted) { - unsigned flush_flags = (H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__FLUSH_INVALIDATE_FLAG); /* verify that the target entry is in the cache. */ H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL) if (test_entry_ptr == NULL) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?") - else if (test_entry_ptr != entry_ptr) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "hash table contains multiple entries for addr?!?") /* Set the 'free file space' flag for the flush, if needed */ - if (free_file_space) { - + if (free_file_space) flush_flags |= H5C__FREE_FILE_SPACE_FLAG; - } /* Set the "take ownership" flag for the flush, if needed */ - if (take_ownership) { - + if (take_ownership) flush_flags |= H5C__TAKE_OWNERSHIP_FLAG; - } /* Delete the entry from the skip list on destroy */ flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG; @@ -3234,9 +3087,7 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags) HDassert((!cache_ptr->slist_enabled) || (((!was_clean) || dirtied) == (entry_ptr->in_slist))); if (H5C__flush_single_entry(f, entry_ptr, flush_flags) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't flush entry") - } /* end if */ #ifdef H5_HAVE_PARALLEL else if (clear_entry) { @@ -3244,18 +3095,14 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags) H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL) if (test_entry_ptr == NULL) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?") - else if (test_entry_ptr != entry_ptr) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "hash table contains multiple entries for addr?!?") if (H5C__flush_single_entry(f, entry_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't clear entry") - } /* end else if */ #endif /* H5_HAVE_PARALLEL */ } @@ -3263,7 +3110,6 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags) H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr) done: - #ifdef H5C_DO_EXTREME_SANITY_CHECKS if ((H5C_validate_protected_entry_list(cache_ptr) < 0) || (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0)) @@ -3272,7 +3118,6 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags) #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ FUNC_LEAVE_NOAPI(ret_value) - } /* H5C_unprotect() */ /*------------------------------------------------------------------------- @@ -3444,27 +3289,20 @@ H5C_validate_resize_config(H5C_auto_size_ctl_t *config_ptr, unsigned int tests) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown config version") if ((tests & H5C_RESIZE_CFG__VALIDATE_GENERAL) != 0) { - if (config_ptr->max_size > H5C__MAX_MAX_CACHE_SIZE) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "max_size too big") - if (config_ptr->min_size < H5C__MIN_MAX_CACHE_SIZE) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size too small") - if (config_ptr->min_size > config_ptr->max_size) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size > max_size") - if (config_ptr->set_initial_size && ((config_ptr->initial_size < config_ptr->min_size) || (config_ptr->initial_size > config_ptr->max_size))) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "initial_size must be in the interval [min_size, max_size]") - if ((config_ptr->min_clean_fraction < 0.0) || (config_ptr->min_clean_fraction > 1.0)) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_clean_fraction must be in the interval [0.0, 1.0]") - if (config_ptr->epoch_length < H5C__MIN_AR_EPOCH_LENGTH) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too small") - if (config_ptr->epoch_length > H5C__MAX_AR_EPOCH_LENGTH) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too big") } /* H5C_RESIZE_CFG__VALIDATE_GENERAL */ @@ -3477,7 +3315,6 @@ H5C_validate_resize_config(H5C_auto_size_ctl_t *config_ptr, unsigned int tests) if ((config_ptr->lower_hr_threshold < 0.0) || (config_ptr->lower_hr_threshold > 1.0)) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "lower_hr_threshold must be in the range [0.0, 1.0]") - if (config_ptr->increment < 1.0) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "increment must be greater than or equal to 1.0") @@ -3507,18 +3344,14 @@ H5C_validate_resize_config(H5C_auto_size_ctl_t *config_ptr, unsigned int tests) } /* H5C_RESIZE_CFG__VALIDATE_INCREMENT */ if ((tests & H5C_RESIZE_CFG__VALIDATE_DECREMENT) != 0) { - if ((config_ptr->decr_mode != H5C_decr__off) && (config_ptr->decr_mode != H5C_decr__threshold) && (config_ptr->decr_mode != H5C_decr__age_out) && - (config_ptr->decr_mode != H5C_decr__age_out_with_threshold)) { - + (config_ptr->decr_mode != H5C_decr__age_out_with_threshold)) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid decr_mode") - } if (config_ptr->decr_mode == H5C_decr__threshold) { if (config_ptr->upper_hr_threshold > 1.0) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "upper_hr_threshold must be <= 1.0") - if ((config_ptr->decrement > 1.0) || (config_ptr->decrement < 0.0)) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "decrement must be in the interval [0.0, 1.0]") @@ -3529,14 +3362,12 @@ H5C_validate_resize_config(H5C_auto_size_ctl_t *config_ptr, unsigned int tests) if ((config_ptr->decr_mode == H5C_decr__age_out) || (config_ptr->decr_mode == H5C_decr__age_out_with_threshold)) { - if (config_ptr->epochs_before_eviction < 1) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction must be positive") if (config_ptr->epochs_before_eviction > H5C__MAX_EPOCH_MARKERS) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction too big") - - if ((config_ptr->apply_empty_reserve) && - ((config_ptr->empty_reserve > 1.0) || (config_ptr->empty_reserve < 0.0))) + if (config_ptr->apply_empty_reserve && + (config_ptr->empty_reserve > 1.0 || config_ptr->empty_reserve < 0.0)) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "empty_reserve must be in the interval [0.0, 1.0]") /* no need to check max_decrement as it is a size_t @@ -3544,12 +3375,11 @@ H5C_validate_resize_config(H5C_auto_size_ctl_t *config_ptr, unsigned int tests) */ } /* H5C_decr__age_out || H5C_decr__age_out_with_threshold */ - if (config_ptr->decr_mode == H5C_decr__age_out_with_threshold) { + if (config_ptr->decr_mode == H5C_decr__age_out_with_threshold) if ((config_ptr->upper_hr_threshold > 1.0) || (config_ptr->upper_hr_threshold < 0.0)) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "upper_hr_threshold must be in the interval [0.0, 1.0]") - } /* H5C_decr__age_out_with_threshold */ - } /* H5C_RESIZE_CFG__VALIDATE_DECREMENT */ + } /* H5C_RESIZE_CFG__VALIDATE_DECREMENT */ if ((tests & H5C_RESIZE_CFG__VALIDATE_INTERACTIONS) != 0) { if ((config_ptr->incr_mode == H5C_incr__threshold) && @@ -4012,9 +3842,9 @@ H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted) HDassert(f); HDassert(cache_ptr); HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - HDassert(cache_ptr->cache_accesses >= (cache_ptr->resize_ctl).epoch_length); - HDassert(0.0 <= (cache_ptr->resize_ctl).min_clean_fraction); - HDassert((cache_ptr->resize_ctl).min_clean_fraction <= 100.0); + HDassert(cache_ptr->cache_accesses >= cache_ptr->resize_ctl.epoch_length); + HDassert(0.0 <= cache_ptr->resize_ctl.min_clean_fraction); + HDassert(cache_ptr->resize_ctl.min_clean_fraction <= 100.0); /* check to see if cache_ptr->resize_in_progress is TRUE. If it, this * is a re-entrant call via a client callback called in the resize @@ -4031,55 +3861,43 @@ H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted) if (!cache_ptr->resize_enabled) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Auto cache resize disabled") - HDassert(((cache_ptr->resize_ctl).incr_mode != H5C_incr__off) || - ((cache_ptr->resize_ctl).decr_mode != H5C_decr__off)); + HDassert((cache_ptr->resize_ctl.incr_mode != H5C_incr__off) || + (cache_ptr->resize_ctl.decr_mode != H5C_decr__off)); if (H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate") HDassert((0.0 <= hit_rate) && (hit_rate <= 1.0)); - switch ((cache_ptr->resize_ctl).incr_mode) { + switch (cache_ptr->resize_ctl.incr_mode) { case H5C_incr__off: if (cache_ptr->size_increase_possible) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "size_increase_possible but H5C_incr__off?!?!?") break; case H5C_incr__threshold: - if (hit_rate < (cache_ptr->resize_ctl).lower_hr_threshold) { - - if (!cache_ptr->size_increase_possible) { - + if (hit_rate < cache_ptr->resize_ctl.lower_hr_threshold) { + if (!cache_ptr->size_increase_possible) status = increase_disabled; - } - else if (cache_ptr->max_cache_size >= (cache_ptr->resize_ctl).max_size) { - - HDassert(cache_ptr->max_cache_size == (cache_ptr->resize_ctl).max_size); + else if (cache_ptr->max_cache_size >= cache_ptr->resize_ctl.max_size) { + HDassert(cache_ptr->max_cache_size == cache_ptr->resize_ctl.max_size); status = at_max_size; } - else if (!cache_ptr->cache_full) { - + else if (!cache_ptr->cache_full) status = not_full; - } else { - new_max_cache_size = - (size_t)(((double)(cache_ptr->max_cache_size)) * (cache_ptr->resize_ctl).increment); + (size_t)(((double)(cache_ptr->max_cache_size)) * cache_ptr->resize_ctl.increment); /* clip to max size if necessary */ - if (new_max_cache_size > (cache_ptr->resize_ctl).max_size) { - - new_max_cache_size = (cache_ptr->resize_ctl).max_size; - } + if (new_max_cache_size > cache_ptr->resize_ctl.max_size) + new_max_cache_size = cache_ptr->resize_ctl.max_size; /* clip to max increment if necessary */ - if (((cache_ptr->resize_ctl).apply_max_increment) && - ((cache_ptr->max_cache_size + (cache_ptr->resize_ctl).max_increment) < - new_max_cache_size)) { - - new_max_cache_size = - cache_ptr->max_cache_size + (cache_ptr->resize_ctl).max_increment; - } + if (cache_ptr->resize_ctl.apply_max_increment && + ((cache_ptr->max_cache_size + cache_ptr->resize_ctl.max_increment) < + new_max_cache_size)) + new_max_cache_size = cache_ptr->max_cache_size + cache_ptr->resize_ctl.max_increment; status = increase; } @@ -4102,9 +3920,9 @@ H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted) * entry. The inserted_epoch_marker flag is used to track this. */ - if ((((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out) || - ((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out_with_threshold)) && - (cache_ptr->epoch_markers_active < (cache_ptr->resize_ctl).epochs_before_eviction)) { + if (((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out) || + (cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out_with_threshold)) && + (cache_ptr->epoch_markers_active < cache_ptr->resize_ctl.epochs_before_eviction)) { if (H5C__autoadjust__ageout__insert_new_marker(cache_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't insert new epoch marker") @@ -4118,42 +3936,32 @@ H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted) */ if (status == in_spec) { - - switch ((cache_ptr->resize_ctl).decr_mode) { + switch (cache_ptr->resize_ctl.decr_mode) { case H5C_decr__off: break; case H5C_decr__threshold: - if (hit_rate > (cache_ptr->resize_ctl).upper_hr_threshold) { - - if (!cache_ptr->size_decrease_possible) { - + if (hit_rate > cache_ptr->resize_ctl.upper_hr_threshold) { + if (!cache_ptr->size_decrease_possible) status = decrease_disabled; - } - else if (cache_ptr->max_cache_size <= (cache_ptr->resize_ctl).min_size) { - - HDassert(cache_ptr->max_cache_size == (cache_ptr->resize_ctl).min_size); + else if (cache_ptr->max_cache_size <= cache_ptr->resize_ctl.min_size) { + HDassert(cache_ptr->max_cache_size == cache_ptr->resize_ctl.min_size); status = at_min_size; } else { - - new_max_cache_size = (size_t)(((double)(cache_ptr->max_cache_size)) * - (cache_ptr->resize_ctl).decrement); + new_max_cache_size = + (size_t)(((double)(cache_ptr->max_cache_size)) * cache_ptr->resize_ctl.decrement); /* clip to min size if necessary */ - if (new_max_cache_size < (cache_ptr->resize_ctl).min_size) { - - new_max_cache_size = (cache_ptr->resize_ctl).min_size; - } + if (new_max_cache_size < cache_ptr->resize_ctl.min_size) + new_max_cache_size = cache_ptr->resize_ctl.min_size; /* clip to max decrement if necessary */ - if (((cache_ptr->resize_ctl).apply_max_decrement) && - (((cache_ptr->resize_ctl).max_decrement + new_max_cache_size) < - cache_ptr->max_cache_size)) { - + if (cache_ptr->resize_ctl.apply_max_decrement && + ((cache_ptr->resize_ctl.max_decrement + new_max_cache_size) < + cache_ptr->max_cache_size)) new_max_cache_size = - cache_ptr->max_cache_size - (cache_ptr->resize_ctl).max_decrement; - } + cache_ptr->max_cache_size - cache_ptr->resize_ctl.max_decrement; status = decrease; } @@ -4179,22 +3987,19 @@ H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted) } /* cycle the epoch markers here if appropriate */ - if ((((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out) || - ((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out_with_threshold)) && - (!inserted_epoch_marker)) { - + if (((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out) || + (cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out_with_threshold)) && + !inserted_epoch_marker) /* move last epoch marker to the head of the LRU list */ if (H5C__autoadjust__ageout__cycle_epoch_marker(cache_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error cycling epoch marker") - } if ((status == increase) || (status == decrease)) { - old_max_cache_size = cache_ptr->max_cache_size; old_min_clean_size = cache_ptr->min_clean_size; new_min_clean_size = - (size_t)((double)new_max_cache_size * ((cache_ptr->resize_ctl).min_clean_fraction)); + (size_t)((double)new_max_cache_size * (cache_ptr->resize_ctl.min_clean_fraction)); /* new_min_clean_size is of size_t, and thus must be non-negative. * Hence we have @@ -4204,25 +4009,20 @@ H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted) * by definition. */ HDassert(new_min_clean_size <= new_max_cache_size); - HDassert((cache_ptr->resize_ctl).min_size <= new_max_cache_size); - HDassert(new_max_cache_size <= (cache_ptr->resize_ctl).max_size); + HDassert(cache_ptr->resize_ctl.min_size <= new_max_cache_size); + HDassert(new_max_cache_size <= cache_ptr->resize_ctl.max_size); cache_ptr->max_cache_size = new_max_cache_size; cache_ptr->min_clean_size = new_min_clean_size; - if (status == increase) { - + if (status == increase) cache_ptr->cache_full = FALSE; - } - else if (status == decrease) { - + else if (status == decrease) cache_ptr->size_decreased = TRUE; - } /* update flash cache size increase fields as appropriate */ if (cache_ptr->flash_size_increase_possible) { - - switch ((cache_ptr->resize_ctl).flash_incr_mode) { + switch (cache_ptr->resize_ctl.flash_incr_mode) { case H5C_flash_incr__off: HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flash_size_increase_possible but H5C_flash_incr__off?!") @@ -4231,7 +4031,7 @@ H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted) case H5C_flash_incr__add_space: cache_ptr->flash_size_increase_threshold = (size_t)(((double)(cache_ptr->max_cache_size)) * - ((cache_ptr->resize_ctl).flash_threshold)); + (cache_ptr->resize_ctl.flash_threshold)); break; default: /* should be unreachable */ @@ -4241,11 +4041,10 @@ H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted) } } - if ((cache_ptr->resize_ctl).rpt_fcn != NULL) { - (*((cache_ptr->resize_ctl).rpt_fcn))(cache_ptr, H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, hit_rate, status, - old_max_cache_size, new_max_cache_size, old_min_clean_size, - new_min_clean_size); - } + if (cache_ptr->resize_ctl.rpt_fcn != NULL) + (cache_ptr->resize_ctl.rpt_fcn)(cache_ptr, H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, hit_rate, status, + old_max_cache_size, new_max_cache_size, old_min_clean_size, + new_min_clean_size); if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0) /* this should be impossible... */ @@ -4297,69 +4096,54 @@ H5C__autoadjust__ageout(H5F_t *f, double hit_rate, enum H5C_resize_status *statu HDassert((new_max_cache_size_ptr) && (*new_max_cache_size_ptr == 0)); /* remove excess epoch markers if any */ - if (cache_ptr->epoch_markers_active > (cache_ptr->resize_ctl).epochs_before_eviction) + if (cache_ptr->epoch_markers_active > cache_ptr->resize_ctl.epochs_before_eviction) if (H5C__autoadjust__ageout__remove_excess_markers(cache_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't remove excess epoch markers") - if (((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out) || - (((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out_with_threshold) && - (hit_rate >= (cache_ptr->resize_ctl).upper_hr_threshold))) { - - if (cache_ptr->max_cache_size > (cache_ptr->resize_ctl).min_size) { + if ((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out) || + ((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out_with_threshold) && + (hit_rate >= cache_ptr->resize_ctl.upper_hr_threshold))) { + if (cache_ptr->max_cache_size > cache_ptr->resize_ctl.min_size) { /* evict aged out cache entries if appropriate... */ if (H5C__autoadjust__ageout__evict_aged_out_entries(f, write_permitted) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error flushing aged out entries") /* ... and then reduce cache size if appropriate */ if (cache_ptr->index_size < cache_ptr->max_cache_size) { - - if ((cache_ptr->resize_ctl).apply_empty_reserve) { - - test_size = (size_t)(((double)cache_ptr->index_size) / - (1 - (cache_ptr->resize_ctl).empty_reserve)); - + if (cache_ptr->resize_ctl.apply_empty_reserve) { + test_size = + (size_t)(((double)cache_ptr->index_size) / (1 - cache_ptr->resize_ctl.empty_reserve)); if (test_size < cache_ptr->max_cache_size) { - *status_ptr = decrease; *new_max_cache_size_ptr = test_size; } } else { - *status_ptr = decrease; *new_max_cache_size_ptr = cache_ptr->index_size; } if (*status_ptr == decrease) { - /* clip to min size if necessary */ - if (*new_max_cache_size_ptr < (cache_ptr->resize_ctl).min_size) { - - *new_max_cache_size_ptr = (cache_ptr->resize_ctl).min_size; - } + if (*new_max_cache_size_ptr < cache_ptr->resize_ctl.min_size) + *new_max_cache_size_ptr = cache_ptr->resize_ctl.min_size; /* clip to max decrement if necessary */ - if (((cache_ptr->resize_ctl).apply_max_decrement) && - (((cache_ptr->resize_ctl).max_decrement + *new_max_cache_size_ptr) < - cache_ptr->max_cache_size)) { - + if ((cache_ptr->resize_ctl.apply_max_decrement) && + ((cache_ptr->resize_ctl.max_decrement + *new_max_cache_size_ptr) < + cache_ptr->max_cache_size)) *new_max_cache_size_ptr = - cache_ptr->max_cache_size - (cache_ptr->resize_ctl).max_decrement; - } + cache_ptr->max_cache_size - cache_ptr->resize_ctl.max_decrement; } } } - else { - + else *status_ptr = at_min_size; - } } done: - FUNC_LEAVE_NOAPI(ret_value) - } /* H5C__autoadjust__ageout() */ /*------------------------------------------------------------------------- @@ -4393,18 +4177,14 @@ H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t *cache_ptr) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "No active epoch markers on entry?!?!?") /* remove the last marker from both the ring buffer and the LRU list */ - i = cache_ptr->epoch_marker_ringbuf[cache_ptr->epoch_marker_ringbuf_first]; - cache_ptr->epoch_marker_ringbuf_first = (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1); - if (cache_ptr->epoch_marker_ringbuf_size <= 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow") cache_ptr->epoch_marker_ringbuf_size -= 1; - - if ((cache_ptr->epoch_marker_active)[i] != TRUE) + if (cache_ptr->epoch_marker_active[i] != TRUE) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?") H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr, (cache_ptr)->LRU_tail_ptr, @@ -4413,23 +4193,20 @@ H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t *cache_ptr) /* now, re-insert it at the head of the LRU list, and at the tail of * the ring buffer. */ - - HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i); - HDassert(((cache_ptr->epoch_markers)[i]).next == NULL); - HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL); + HDassert(cache_ptr->epoch_markers[i].addr == (haddr_t)i); + HDassert(cache_ptr->epoch_markers[i].next == NULL); + HDassert(cache_ptr->epoch_markers[i].prev == NULL); cache_ptr->epoch_marker_ringbuf_last = (cache_ptr->epoch_marker_ringbuf_last + 1) % (H5C__MAX_EPOCH_MARKERS + 1); - - (cache_ptr->epoch_marker_ringbuf)[cache_ptr->epoch_marker_ringbuf_last] = i; - + cache_ptr->epoch_marker_ringbuf[cache_ptr->epoch_marker_ringbuf_last] = i; if (cache_ptr->epoch_marker_ringbuf_size >= H5C__MAX_EPOCH_MARKERS) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow") cache_ptr->epoch_marker_ringbuf_size += 1; - H5C__DLL_PREPEND((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr, (cache_ptr)->LRU_tail_ptr, - (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size, (FAIL)) + H5C__DLL_PREPEND(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr, + cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL) done: FUNC_LEAVE_NOAPI(ret_value) @@ -4442,7 +4219,7 @@ H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t *cache_ptr) * * Purpose: Evict clean entries in the cache that haven't * been accessed for at least - * (cache_ptr->resize_ctl).epochs_before_eviction epochs, + * cache_ptr->resize_ctl.epochs_before_eviction epochs, * and flush dirty entries that haven't been accessed for * that amount of time. * @@ -4498,22 +4275,16 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t *f, hbool_t write_permitte * to the equivalent of infinity. The current size of the index will * do nicely. */ - if ((cache_ptr->resize_ctl).apply_max_decrement) { - - eviction_size_limit = (cache_ptr->resize_ctl).max_decrement; - } - else { - + if (cache_ptr->resize_ctl.apply_max_decrement) + eviction_size_limit = cache_ptr->resize_ctl.max_decrement; + else eviction_size_limit = cache_ptr->index_size; /* i.e. infinity */ - } if (write_permitted) { - restart_scan = FALSE; entry_ptr = cache_ptr->LRU_tail_ptr; - - while ((entry_ptr != NULL) && ((entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID) && - (bytes_evicted < eviction_size_limit)) { + while (entry_ptr != NULL && entry_ptr->type->id != H5AC_EPOCH_MARKER_ID && + bytes_evicted < eviction_size_limit) { hbool_t skipping_entry = FALSE; HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); @@ -4554,7 +4325,6 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t *f, hbool_t write_permitte } /* end else */ } /* end if */ else if (!entry_ptr->prefetched_dirty) { - bytes_evicted += entry_ptr->size; if (H5C__flush_single_entry( @@ -4602,7 +4372,6 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t *f, hbool_t write_permitte * min clean space requirement (assuming that requirement was met on * entry). */ - } /* end if */ else /* ! write_permitted */ { /* Since we are not allowed to write, all we can do is evict @@ -4674,14 +4443,13 @@ H5C__autoadjust__ageout__insert_new_marker(H5C_t *cache_ptr) HDassert(cache_ptr); HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - if (cache_ptr->epoch_markers_active >= (cache_ptr->resize_ctl).epochs_before_eviction) + if (cache_ptr->epoch_markers_active >= cache_ptr->resize_ctl.epochs_before_eviction) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Already have a full complement of markers") /* find an unused marker */ i = 0; while ((cache_ptr->epoch_marker_active)[i] && i < H5C__MAX_EPOCH_MARKERS) i++; - if (i >= H5C__MAX_EPOCH_MARKERS) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't find unused marker") @@ -4693,25 +4461,19 @@ H5C__autoadjust__ageout__insert_new_marker(H5C_t *cache_ptr) cache_ptr->epoch_marker_ringbuf_last = (cache_ptr->epoch_marker_ringbuf_last + 1) % (H5C__MAX_EPOCH_MARKERS + 1); - (cache_ptr->epoch_marker_ringbuf)[cache_ptr->epoch_marker_ringbuf_last] = i; - - if (cache_ptr->epoch_marker_ringbuf_size >= H5C__MAX_EPOCH_MARKERS) { - + if (cache_ptr->epoch_marker_ringbuf_size >= H5C__MAX_EPOCH_MARKERS) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow") - } cache_ptr->epoch_marker_ringbuf_size += 1; - H5C__DLL_PREPEND((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr, (cache_ptr)->LRU_tail_ptr, - (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size, (FAIL)) + H5C__DLL_PREPEND(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr, + cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL) cache_ptr->epoch_markers_active += 1; done: - FUNC_LEAVE_NOAPI(ret_value) - } /* H5C__autoadjust__ageout__insert_new_marker() */ /*------------------------------------------------------------------------- @@ -4752,23 +4514,21 @@ H5C__autoadjust__ageout__remove_all_markers(H5C_t *cache_ptr) if (cache_ptr->epoch_marker_ringbuf_size <= 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow") - cache_ptr->epoch_marker_ringbuf_size -= 1; - if ((cache_ptr->epoch_marker_active)[i] != TRUE) + if (cache_ptr->epoch_marker_active[i] != TRUE) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?") /* remove the epoch marker from the LRU list */ - H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr, - (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size, - (FAIL)) + H5C__DLL_REMOVE(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr, + cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL) /* mark the epoch marker as unused. */ - (cache_ptr->epoch_marker_active)[i] = FALSE; + cache_ptr->epoch_marker_active[i] = FALSE; - HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i); - HDassert(((cache_ptr->epoch_markers)[i]).next == NULL); - HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL); + HDassert(cache_ptr->epoch_markers[i].addr == (haddr_t)i); + HDassert(cache_ptr->epoch_markers[i].next == NULL); + HDassert(cache_ptr->epoch_markers[i].prev == NULL); /* decrement the number of active epoch markers */ cache_ptr->epoch_markers_active -= 1; @@ -4777,9 +4537,7 @@ H5C__autoadjust__ageout__remove_all_markers(H5C_t *cache_ptr) } done: - FUNC_LEAVE_NOAPI(ret_value) - } /* H5C__autoadjust__ageout__remove_all_markers() */ /*------------------------------------------------------------------------- @@ -4789,7 +4547,7 @@ H5C__autoadjust__ageout__remove_all_markers(H5C_t *cache_ptr) * Purpose: Remove epoch markers from the end of the LRU list and * mark them as inactive until the number of active markers * equals the current value of - * (cache_ptr->resize_ctl).epochs_before_eviction. + * cache_ptr->resize_ctl.epochs_before_eviction. * * Return: SUCCEED on success/FAIL on failure. * @@ -4809,14 +4567,13 @@ H5C__autoadjust__ageout__remove_excess_markers(H5C_t *cache_ptr) HDassert(cache_ptr); HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - if (cache_ptr->epoch_markers_active <= (cache_ptr->resize_ctl).epochs_before_eviction) + if (cache_ptr->epoch_markers_active <= cache_ptr->resize_ctl.epochs_before_eviction) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "no excess markers on entry") - while (cache_ptr->epoch_markers_active > (cache_ptr->resize_ctl).epochs_before_eviction) { + while (cache_ptr->epoch_markers_active > cache_ptr->resize_ctl.epochs_before_eviction) { /* get the index of the last epoch marker in the LRU list * and remove it from the ring buffer. */ - ring_buf_index = cache_ptr->epoch_marker_ringbuf_first; i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index]; @@ -4825,23 +4582,21 @@ H5C__autoadjust__ageout__remove_excess_markers(H5C_t *cache_ptr) if (cache_ptr->epoch_marker_ringbuf_size <= 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow") - cache_ptr->epoch_marker_ringbuf_size -= 1; - if ((cache_ptr->epoch_marker_active)[i] != TRUE) + if (cache_ptr->epoch_marker_active[i] != TRUE) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?") /* remove the epoch marker from the LRU list */ - H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr, - (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size, - (FAIL)) + H5C__DLL_REMOVE(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr, + cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL) /* mark the epoch marker as unused. */ - (cache_ptr->epoch_marker_active)[i] = FALSE; + cache_ptr->epoch_marker_active[i] = FALSE; - HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i); - HDassert(((cache_ptr->epoch_markers)[i]).next == NULL); - HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL); + HDassert(cache_ptr->epoch_markers[i].addr == (haddr_t)i); + HDassert(cache_ptr->epoch_markers[i].next == NULL); + HDassert(cache_ptr->epoch_markers[i].prev == NULL); /* decrement the number of active epoch markers */ cache_ptr->epoch_markers_active -= 1; @@ -4850,9 +4605,7 @@ H5C__autoadjust__ageout__remove_excess_markers(H5C_t *cache_ptr) } done: - FUNC_LEAVE_NOAPI(ret_value) - } /* H5C__autoadjust__ageout__remove_excess_markers() */ /*------------------------------------------------------------------------- @@ -4861,7 +4614,7 @@ H5C__autoadjust__ageout__remove_excess_markers(H5C_t *cache_ptr) * * Purpose: If there is not at least new_entry_size - old_entry_size * bytes of free space in the cache and the current - * max_cache_size is less than (cache_ptr->resize_ctl).max_size, + * max_cache_size is less than cache_ptr->resize_ctl.max_size, * perform a flash increase in the cache size and then reset * the full cache hit rate statistics, and exit. * @@ -4895,11 +4648,9 @@ H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t n HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "old_entry_size >= new_entry_size") space_needed = new_entry_size - old_entry_size; - if (((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) && - (cache_ptr->max_cache_size < (cache_ptr->resize_ctl).max_size)) { - - switch ((cache_ptr->resize_ctl).flash_incr_mode) { + (cache_ptr->max_cache_size < cache_ptr->resize_ctl.max_size)) { + switch (cache_ptr->resize_ctl.flash_incr_mode) { case H5C_flash_incr__off: HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flash_size_increase_possible but H5C_flash_incr__off?!") @@ -4907,14 +4658,11 @@ H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t n case H5C_flash_incr__add_space: if (cache_ptr->index_size < cache_ptr->max_cache_size) { - HDassert((cache_ptr->max_cache_size - cache_ptr->index_size) < space_needed); space_needed -= cache_ptr->max_cache_size - cache_ptr->index_size; } - space_needed = (size_t)(((double)space_needed) * (cache_ptr->resize_ctl).flash_multiple); - + space_needed = (size_t)(((double)space_needed) * cache_ptr->resize_ctl.flash_multiple); new_max_cache_size = cache_ptr->max_cache_size + space_needed; - break; default: /* should be unreachable */ @@ -4922,16 +4670,11 @@ H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t n break; } - if (new_max_cache_size > (cache_ptr->resize_ctl).max_size) { - - new_max_cache_size = (cache_ptr->resize_ctl).max_size; - } - + if (new_max_cache_size > cache_ptr->resize_ctl.max_size) + new_max_cache_size = cache_ptr->resize_ctl.max_size; HDassert(new_max_cache_size > cache_ptr->max_cache_size); - new_min_clean_size = - (size_t)((double)new_max_cache_size * ((cache_ptr->resize_ctl).min_clean_fraction)); - + new_min_clean_size = (size_t)((double)new_max_cache_size * cache_ptr->resize_ctl.min_clean_fraction); HDassert(new_min_clean_size <= new_max_cache_size); old_max_cache_size = cache_ptr->max_cache_size; @@ -4943,7 +4686,7 @@ H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t n /* update flash cache size increase fields as appropriate */ HDassert(cache_ptr->flash_size_increase_possible); - switch ((cache_ptr->resize_ctl).flash_incr_mode) { + switch (cache_ptr->resize_ctl.flash_incr_mode) { case H5C_flash_incr__off: HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flash_size_increase_possible but H5C_flash_incr__off?!") @@ -4951,8 +4694,7 @@ H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t n case H5C_flash_incr__add_space: cache_ptr->flash_size_increase_threshold = - (size_t)(((double)(cache_ptr->max_cache_size)) * - ((cache_ptr->resize_ctl).flash_threshold)); + (size_t)((double)cache_ptr->max_cache_size * cache_ptr->resize_ctl.flash_threshold); break; default: /* should be unreachable */ @@ -4965,17 +4707,16 @@ H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t n * we don't. */ - if ((cache_ptr->resize_ctl).rpt_fcn != NULL) { - + if (cache_ptr->resize_ctl.rpt_fcn != NULL) { /* get the hit rate for the reporting function. Should still * be good as we haven't reset the hit rate statistics. */ if (H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate") - (*((cache_ptr->resize_ctl).rpt_fcn))(cache_ptr, H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, hit_rate, - status, old_max_cache_size, new_max_cache_size, - old_min_clean_size, new_min_clean_size); + (cache_ptr->resize_ctl.rpt_fcn)(cache_ptr, H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, hit_rate, status, + old_max_cache_size, new_max_cache_size, old_min_clean_size, + new_min_clean_size); } if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0) @@ -4984,9 +4725,7 @@ H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t n } done: - FUNC_LEAVE_NOAPI(ret_value) - } /* H5C__flash_increase_cache_size() */ /*------------------------------------------------------------------------- @@ -5057,7 +4796,6 @@ H5C__flush_invalidate_cache(H5F_t *f, unsigned flags) HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0); for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) { - index_len += cache_ptr->index_ring_len[i]; index_size += cache_ptr->index_ring_size[i]; clean_index_size += cache_ptr->clean_index_ring_size[i]; @@ -5065,7 +4803,6 @@ H5C__flush_invalidate_cache(H5F_t *f, unsigned flags) slist_len += cache_ptr->slist_ring_len[i]; slist_size += cache_ptr->slist_ring_size[i]; - } /* end for */ HDassert(cache_ptr->index_len == index_len); @@ -5078,63 +4815,48 @@ H5C__flush_invalidate_cache(H5F_t *f, unsigned flags) #endif /* H5C_DO_SANITY_CHECKS */ /* remove ageout markers if present */ - if (cache_ptr->epoch_markers_active > 0) { - + if (cache_ptr->epoch_markers_active > 0) if (H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers") - } /* flush invalidate each ring, starting from the outermost ring and * working inward. */ ring = H5C_RING_USER; - while (ring < H5C_RING_NTYPES) { - if (H5C__flush_invalidate_ring(f, ring, flags) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate ring failed") ring++; - } /* end while */ +#ifndef NDEBUG /* Invariants, after destroying all entries in the hash table */ if (!(flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG)) { - HDassert(cache_ptr->index_size == 0); HDassert(cache_ptr->clean_index_size == 0); HDassert(cache_ptr->pel_len == 0); HDassert(cache_ptr->pel_size == 0); - } /* end if */ else { - H5C_cache_entry_t *entry_ptr; /* Cache entry */ unsigned u; /* Local index variable */ /* All rings except ring 4 should be empty now */ /* (Ring 4 has the superblock) */ for (u = H5C_RING_USER; u < H5C_RING_SB; u++) { - HDassert(cache_ptr->index_ring_len[u] == 0); HDassert(cache_ptr->index_ring_size[u] == 0); HDassert(cache_ptr->clean_index_ring_size[u] == 0); - } /* end for */ /* Check that any remaining pinned entries are in the superblock ring */ - entry_ptr = cache_ptr->pel_head_ptr; - while (entry_ptr) { - /* Check ring */ HDassert(entry_ptr->ring == H5C_RING_SB); /* Advance to next entry in pinned entry list */ entry_ptr = entry_ptr->next; - } /* end while */ } /* end else */ @@ -5145,11 +4867,10 @@ H5C__flush_invalidate_cache(H5F_t *f, unsigned flags) HDassert(cache_ptr->pl_size == 0); HDassert(cache_ptr->LRU_list_len == 0); HDassert(cache_ptr->LRU_list_size == 0); +#endif /* NDEBUG */ done: - FUNC_LEAVE_NOAPI(ret_value) - } /* H5C__flush_invalidate_cache() */ /*------------------------------------------------------------------------- @@ -5257,25 +4978,19 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) */ /* compute the number of pinned entries in this ring */ - entry_ptr = cache_ptr->pel_head_ptr; cur_ring_pel_len = 0; - while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(entry_ptr->ring >= ring); if (entry_ptr->ring == ring) cur_ring_pel_len++; entry_ptr = entry_ptr->next; - } /* end while */ - old_ring_pel_len = cur_ring_pel_len; while (cache_ptr->index_ring_len[ring] > 0) { - /* first, try to flush-destroy any dirty entries. Do this by * making a scan through the slist. Note that new dirty entries * may be created by the flush call backs. Thus it is possible @@ -5318,32 +5033,25 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) /* this done, start the scan of the slist */ restart_slist_scan = TRUE; - while (restart_slist_scan || (node_ptr != NULL)) { - if (restart_slist_scan) { - restart_slist_scan = FALSE; /* Start at beginning of skip list */ node_ptr = H5SL_first(cache_ptr->slist_ptr); - if (node_ptr == NULL) /* the slist is empty -- break out of inner loop */ break; /* Get cache entry for this node */ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); - if (NULL == next_entry_ptr) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!") HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(next_entry_ptr->is_dirty); HDassert(next_entry_ptr->in_slist); HDassert(next_entry_ptr->ring >= ring); - } /* end if */ entry_ptr = next_entry_ptr; @@ -5369,13 +5077,9 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) * from the slist. */ node_ptr = H5SL_next(node_ptr); - if (node_ptr != NULL) { - next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); - if (NULL == next_entry_ptr) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!") HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); @@ -5384,10 +5088,8 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) HDassert(next_entry_ptr->ring >= ring); HDassert(entry_ptr != next_entry_ptr); } /* end if */ - else { - + else next_entry_ptr = NULL; - } /* Note that we now remove nodes from the slist as we flush * the associated entries, instead of leaving them there @@ -5402,22 +5104,17 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) if (((!entry_ptr->flush_me_last) || ((entry_ptr->flush_me_last) && (cache_ptr->num_last_entries >= cache_ptr->slist_len))) && (entry_ptr->flush_dep_nchildren == 0) && (entry_ptr->ring == ring)) { - if (entry_ptr->is_protected) { /* We have major problems -- but lets flush * everything we can before we flag an error. */ protected_entries++; - } /* end if */ else if (entry_ptr->is_pinned) { - if (H5C__flush_single_entry(f, entry_ptr, H5C__DURING_FLUSH_FLAG) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed") if (cache_ptr->slist_changed) { - /* The slist has been modified by something * other than the simple removal of the * of the flushed entry after the flush. @@ -5428,20 +5125,16 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) restart_slist_scan = TRUE; cache_ptr->slist_changed = FALSE; H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr); - } /* end if */ } /* end else-if */ else { - if (H5C__flush_single_entry(f, entry_ptr, (cooked_flags | H5C__DURING_FLUSH_FLAG | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed") if (cache_ptr->slist_changed) { - /* The slist has been modified by something * other than the simple removal of the * of the flushed entry after the flush. @@ -5468,10 +5161,8 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) */ if (node_ptr == NULL) { - HDassert(cache_ptr->slist_len == (uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase)); - HDassert(cache_ptr->slist_size == (size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase)); } /* end if */ @@ -5497,9 +5188,7 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) cache_ptr->entries_relocated_counter = 0; next_entry_ptr = cache_ptr->il_head; - while (next_entry_ptr != NULL) { - entry_ptr = next_entry_ptr; HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(entry_ptr->ring >= ring); @@ -5512,20 +5201,16 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) (entry_ptr->flush_dep_nchildren == 0) && (entry_ptr->ring == ring)) { if (entry_ptr->is_protected) { - /* we have major problems -- but lets flush and * destroy everything we can before we flag an * error. */ protected_entries++; - if (!entry_ptr->in_slist) { - + if (!entry_ptr->in_slist) HDassert(!(entry_ptr->is_dirty)); - } } /* end if */ - else if (!(entry_ptr->is_pinned)) { - + else if (!entry_ptr->is_pinned) { /* if *entry_ptr is dirty, it is possible * that one or more other entries may be * either removed from the cache, loaded @@ -5553,12 +5238,10 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) * or three entries. */ cache_ptr->entry_watched_for_removal = next_entry_ptr; - if (H5C__flush_single_entry(f, entry_ptr, (cooked_flags | H5C__DURING_FLUSH_FLAG | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed") /* Restart the index list scan if necessary. Must @@ -5581,12 +5264,9 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) cache_ptr->entries_relocated_counter = 0; H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr) - } /* end if */ - else { - + else cache_ptr->entry_watched_for_removal = NULL; - } } /* end if */ } /* end if */ } /* end for loop scanning hash table */ @@ -5604,49 +5284,39 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) cur_ring_pel_len = 0; while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(entry_ptr->ring >= ring); - if (entry_ptr->ring == ring) { - + if (entry_ptr->ring == ring) cur_ring_pel_len++; - } entry_ptr = entry_ptr->next; - } /* end while */ /* Check if the number of pinned entries in the ring is positive, and * it is not declining. Scream and die if so. */ if ((cur_ring_pel_len > 0) && (cur_ring_pel_len >= old_ring_pel_len)) { - /* Don't error if allowed to have pinned entries remaining */ - if (evict_flags) { - + if (evict_flags) HGOTO_DONE(TRUE) - } - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, - "Pinned entry count not decreasing, cur_ring_pel_len = %d, old_ring_pel_len = " - "%d, ring = %d", - (int)cur_ring_pel_len, (int)old_ring_pel_len, (int)ring) + HGOTO_ERROR( + H5E_CACHE, H5E_CANTFLUSH, FAIL, + "Pinned entry count not decreasing, cur_ring_pel_len = %d, old_ring_pel_len = %d, ring = %d", + (int)cur_ring_pel_len, (int)old_ring_pel_len, (int)ring) } /* end if */ HDassert(protected_entries == cache_ptr->pl_len); if ((protected_entries > 0) && (protected_entries == cache_ptr->index_len)) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Only protected entries left in cache, protected_entries = %d", (int)protected_entries) - } /* main while loop */ /* Invariants, after destroying all entries in the ring */ for (i = (int)H5C_RING_UNDEFINED; i <= (int)ring; i++) { - HDassert(cache_ptr->index_ring_len[i] == 0); HDassert(cache_ptr->index_ring_size[i] == (size_t)0); HDassert(cache_ptr->clean_index_ring_size[i] == (size_t)0); @@ -5654,24 +5324,17 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) HDassert(cache_ptr->slist_ring_len[i] == 0); HDassert(cache_ptr->slist_ring_size[i] == (size_t)0); - } /* end for */ HDassert(protected_entries <= cache_ptr->pl_len); - if (protected_entries > 0) { - + if (protected_entries > 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Cache has protected entries") - } - else if (cur_ring_pel_len > 0) { - + else if (cur_ring_pel_len > 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't unpin all pinned entries in ring") - } done: - FUNC_LEAVE_NOAPI(ret_value) - } /* H5C__flush_invalidate_ring() */ /*------------------------------------------------------------------------- @@ -5739,13 +5402,9 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) ignore_protected = ((flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0); flush_marked_entries = ((flags & H5C__FLUSH_MARKED_ENTRIES_FLAG) != 0); - if (!flush_marked_entries) { - - for (i = (int)H5C_RING_UNDEFINED; i < (int)ring; i++) { - + if (!flush_marked_entries) + for (i = (int)H5C_RING_UNDEFINED; i < (int)ring; i++) HDassert(cache_ptr->slist_ring_len[i] == 0); - } - } HDassert(cache_ptr->flush_in_progress); @@ -5767,7 +5426,6 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) cache_ptr->slist_changed = FALSE; while ((cache_ptr->slist_ring_len[ring] > 0) && (protected_entries == 0) && (flushed_entries_last_pass)) { - flushed_entries_last_pass = FALSE; #ifdef H5C_DO_SANITY_CHECKS @@ -5810,33 +5468,24 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) #endif /* H5C_DO_SANITY_CHECKS */ restart_slist_scan = TRUE; - while ((restart_slist_scan) || (node_ptr != NULL)) { - if (restart_slist_scan) { - restart_slist_scan = FALSE; /* Start at beginning of skip list */ node_ptr = H5SL_first(cache_ptr->slist_ptr); - - if (node_ptr == NULL) { - + if (node_ptr == NULL) /* the slist is empty -- break out of inner loop */ break; - } /* Get cache entry for this node */ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); - if (NULL == next_entry_ptr) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!") HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(next_entry_ptr->is_dirty); HDassert(next_entry_ptr->in_slist); - } /* end if */ entry_ptr = next_entry_ptr; @@ -5862,40 +5511,29 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) HDassert(entry_ptr->in_slist); HDassert(entry_ptr->is_dirty); - if ((!flush_marked_entries) || (entry_ptr->flush_marker)) { - + if (!flush_marked_entries || entry_ptr->flush_marker) HDassert(entry_ptr->ring >= ring); - } /* Advance node pointer now, before we delete its target * from the slist. */ node_ptr = H5SL_next(node_ptr); - if (node_ptr != NULL) { - next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); - if (NULL == next_entry_ptr) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!") HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(next_entry_ptr->is_dirty); HDassert(next_entry_ptr->in_slist); - if (!flush_marked_entries || next_entry_ptr->flush_marker) { - + if (!flush_marked_entries || next_entry_ptr->flush_marker) HDassert(next_entry_ptr->ring >= ring); - } HDassert(entry_ptr != next_entry_ptr); - } /* end if */ - else { - + else next_entry_ptr = NULL; - } if ((!flush_marked_entries || entry_ptr->flush_marker) && ((!entry_ptr->flush_me_last) || @@ -5907,23 +5545,18 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) HDassert(entry_ptr->flush_dep_nunser_children == 0); if (entry_ptr->is_protected) { - /* we probably have major problems -- but lets * flush everything we can before we decide * whether to flag an error. */ tried_to_flush_protected_entry = TRUE; protected_entries++; - } /* end if */ else { - if (H5C__flush_single_entry(f, entry_ptr, (flags | H5C__DURING_FLUSH_FLAG)) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry") if (cache_ptr->slist_changed) { - /* The slist has been modified by something * other than the simple removal of the * of the flushed entry after the flush. @@ -5934,11 +5567,9 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) restart_slist_scan = TRUE; cache_ptr->slist_changed = FALSE; H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr) - } /* end if */ flushed_entries_last_pass = TRUE; - } /* end else */ } /* end if */ } /* while ( ( restart_slist_scan ) || ( node_ptr != NULL ) ) */ @@ -5950,28 +5581,22 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) HDassert((size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase) == cache_ptr->slist_size); #endif /* H5C_DO_SANITY_CHECKS */ - - } /* while */ + } /* while */ HDassert(protected_entries <= cache_ptr->pl_len); - if (((cache_ptr->pl_len > 0) && (!ignore_protected)) || (tried_to_flush_protected_entry)) - + if (((cache_ptr->pl_len > 0) && !ignore_protected) || tried_to_flush_protected_entry) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "cache has protected items") #ifdef H5C_DO_SANITY_CHECKS if (!flush_marked_entries) { - HDassert(cache_ptr->slist_ring_len[ring] == 0); HDassert(cache_ptr->slist_ring_size[ring] == 0); - } /* end if */ #endif /* H5C_DO_SANITY_CHECKS */ done: - FUNC_LEAVE_NOAPI(ret_value) - } /* H5C__flush_ring() */ /*------------------------------------------------------------------------- @@ -6045,26 +5670,18 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) /* Set the flag for destroying the entry, based on the 'take ownership' * and 'destroy' flags */ - if (take_ownership) { - + if (take_ownership) destroy_entry = FALSE; - } - else { - + else destroy_entry = destroy; - } /* we will write the entry to disk if it exists, is dirty, and if the * clear only flag is not set. */ - if (entry_ptr->is_dirty && !clear_only) { - + if (entry_ptr->is_dirty && !clear_only) write_entry = TRUE; - } - else { - + else write_entry = FALSE; - } /* if we have received close warning, and we have been instructed to * generate a metadata cache image, and we have actually constructed @@ -6073,8 +5690,8 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) * Set suppress_image_entry_writes to TRUE if indicated by the * image_ctl flags. */ - if ((cache_ptr->close_warning_received) && (cache_ptr->image_ctl.generate_image) && - (cache_ptr->num_entries_in_image > 0) && (cache_ptr->image_entries != NULL)) { + if (cache_ptr->close_warning_received && cache_ptr->image_ctl.generate_image && + cache_ptr->num_entries_in_image > 0 && cache_ptr->image_entries != NULL) { /* Sanity checks */ HDassert(entry_ptr->image_up_to_date || !(entry_ptr->include_in_image)); @@ -6085,58 +5702,37 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) suppress_image_entry_frees = TRUE; - if (cache_ptr->image_ctl.flags & H5C_CI__SUPRESS_ENTRY_WRITES) { - + if (cache_ptr->image_ctl.flags & H5C_CI__SUPRESS_ENTRY_WRITES) suppress_image_entry_writes = TRUE; - - } /* end if */ - } /* end if */ + } /* end if */ /* run initial sanity checks */ #ifdef H5C_DO_SANITY_CHECKS if (cache_ptr->slist_enabled) { - if (entry_ptr->in_slist) { - HDassert(entry_ptr->is_dirty); - - if ((entry_ptr->flush_marker) && (!entry_ptr->is_dirty)) - + if (entry_ptr->flush_marker && !entry_ptr->is_dirty) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry in slist failed sanity checks") } /* end if */ else { - HDassert(!entry_ptr->is_dirty); HDassert(!entry_ptr->flush_marker); - - if ((entry_ptr->is_dirty) || (entry_ptr->flush_marker)) - + if (entry_ptr->is_dirty || entry_ptr->flush_marker) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry failed sanity checks") - } /* end else */ } else { /* slist is disabled */ - HDassert(!entry_ptr->in_slist); - - if (!entry_ptr->is_dirty) { - + if (!entry_ptr->is_dirty) if (entry_ptr->flush_marker) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flush marked clean entry?") - } } #endif /* H5C_DO_SANITY_CHECKS */ - if (entry_ptr->is_protected) { - - HDassert(!entry_ptr->is_protected); - + if (entry_ptr->is_protected) /* Attempt to flush a protected entry -- scream and die. */ HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, "Attempt to flush a protected entry") - } /* end if */ - /* Set entry_ptr->flush_in_progress = TRUE and set * entry_ptr->flush_marker = FALSE * @@ -6154,13 +5750,9 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) * entry. */ if (write_entry || generate_image) { - HDassert(entry_ptr->is_dirty); - if (NULL == entry_ptr->image_ptr) { - if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer") @@ -6171,16 +5763,13 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) } /* end if */ - if (!(entry_ptr->image_up_to_date)) { - + if (!entry_ptr->image_up_to_date) { /* Sanity check */ HDassert(!entry_ptr->prefetched); /* Generate the entry's image */ if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't generate entry's image") - } /* end if ( ! (entry_ptr->image_up_to_date) ) */ } /* end if */ @@ -6191,12 +5780,10 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) * flag should only be used in test code. */ if (write_entry) { - HDassert(entry_ptr->is_dirty); #ifdef H5C_DO_SANITY_CHECKS - if ((cache_ptr->check_write_permitted) && (!(cache_ptr->write_permitted))) - + if (cache_ptr->check_write_permitted && !cache_ptr->write_permitted) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Write when writes are always forbidden!?!?!") #endif /* H5C_DO_SANITY_CHECKS */ @@ -6207,48 +5794,37 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) * H5AC__CLASS_SKIP_WRITES is set in the entry's type. This * flag should only be used in test code */ - if (((!suppress_image_entry_writes) || (!entry_ptr->include_in_image)) && - (((entry_ptr->type->flags) & H5C__CLASS_SKIP_WRITES) == 0)) { - + if ((!suppress_image_entry_writes || !entry_ptr->include_in_image) && + ((entry_ptr->type->flags & H5C__CLASS_SKIP_WRITES) == 0)) { H5FD_mem_t mem_type = H5FD_MEM_DEFAULT; #ifdef H5_HAVE_PARALLEL if (cache_ptr->coll_write_list) { - if (H5SL_insert(cache_ptr->coll_write_list, entry_ptr, &entry_ptr->addr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "unable to insert skip list item") } /* end if */ else { #endif /* H5_HAVE_PARALLEL */ - if (entry_ptr->prefetched) { - HDassert(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID); - mem_type = cache_ptr->class_table_ptr[entry_ptr->prefetch_type_id]->mem_type; } /* end if */ - else { - + else mem_type = entry_ptr->type->mem_type; - } if (H5F_block_write(f, mem_type, entry_ptr->addr, entry_ptr->size, entry_ptr->image_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't write image to file") #ifdef H5_HAVE_PARALLEL } -#endif /* H5_HAVE_PARALLEL */ - +#endif /* H5_HAVE_PARALLEL */ } /* end if */ /* if the entry has a notify callback, notify it that we have * just flushed the entry. */ - if ((entry_ptr->type->notify) && - ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_FLUSH, entry_ptr) < 0)) - + if (entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_FLUSH, entry_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client of entry flush") - } /* if ( write_entry ) */ /* At this point, all pre-serialize and serialize calls have been @@ -6261,21 +5837,15 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) /* start by updating the statistics */ if (clear_only) { - /* only log a clear if the entry was dirty */ - if (was_dirty) { - + if (was_dirty) H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) - - } /* end if */ } else if (write_entry) { - HDassert(was_dirty); /* only log a flush if we actually wrote to disk */ H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) - } /* end else if */ /* Note that the algorithm below is (very) similar to the set of operations @@ -6285,16 +5855,11 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) /* Update the cache internal data structures. */ if (destroy) { - /* Sanity checks */ - if (take_ownership) { - + if (take_ownership) HDassert(!destroy_entry); - } - else { - + else HDassert(destroy_entry); - } HDassert(!entry_ptr->is_pinned); @@ -6305,9 +5870,8 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) * to be removed from the cache, send a 'before eviction' notice while * the entry is still fully integrated in the cache. */ - if ((entry_ptr->type->notify) && - ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry_ptr) < 0)) - + if (entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict") /* Update the cache internal data structures as appropriate @@ -6328,19 +5892,14 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) */ H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL) - if ((entry_ptr->in_slist) && (del_from_slist_on_destroy)) { - + if (entry_ptr->in_slist && del_from_slist_on_destroy) H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush) - } #ifdef H5_HAVE_PARALLEL /* Check for collective read access flag */ if (entry_ptr->coll_access) { - entry_ptr->coll_access = FALSE; - H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL) - } /* end if */ #endif /* H5_HAVE_PARALLEL */ @@ -6348,16 +5907,13 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) /* Remove entry from tag list */ if (H5C__untag_entry(cache_ptr, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list") /* verify that the entry is no longer part of any flush dependencies */ HDassert(entry_ptr->flush_dep_nparents == 0); HDassert(entry_ptr->flush_dep_nchildren == 0); - } /* end if */ else { - HDassert(clear_only || write_entry); HDassert(entry_ptr->is_dirty); HDassert((!cache_ptr->slist_enabled) || (entry_ptr->in_slist)); @@ -6368,9 +5924,7 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) * view of the replacement policy and the slist. * Hence no differentiation between them. */ - H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, FAIL) - H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush) /* mark the entry as clean and update the index for @@ -6383,31 +5937,23 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) /* Check for entry changing status and do notifications, etc. */ if (was_dirty) { - /* If the entry's type has a 'notify' callback send a * 'entry cleaned' notice now that the entry is fully * integrated into the cache. */ - if ((entry_ptr->type->notify) && - ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0)) - + if (entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag cleared") /* Propagate the clean flag up the flush dependency chain * if appropriate */ - if (entry_ptr->flush_dep_ndirty_children != 0) { - + if (entry_ptr->flush_dep_ndirty_children != 0) HDassert(entry_ptr->flush_dep_ndirty_children == 0); - } - - if (entry_ptr->flush_dep_nparents > 0) { - + if (entry_ptr->flush_dep_nparents > 0) if (H5C__mark_flush_dep_clean(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Can't propagate flush dep clean flag") - } } /* end if */ } /* end else */ @@ -6425,7 +5971,6 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) * Now discard the entry if appropriate. */ if (destroy) { - /* Sanity check */ HDassert(0 == entry_ptr->flush_dep_nparents); @@ -6436,14 +5981,10 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) * * Otherwise, free the buffer if it exists. */ - if (suppress_image_entry_frees && entry_ptr->include_in_image) { - + if (suppress_image_entry_frees && entry_ptr->include_in_image) entry_ptr->image_ptr = NULL; - } - else if (entry_ptr->image_ptr != NULL) { - + else if (entry_ptr->image_ptr != NULL) entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr); - } /* If the entry is not a prefetched entry, verify that the flush * dependency parents addresses array has been transferred. @@ -6452,17 +5993,14 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) * the flush dependency parents addresses array if necessary. */ if (!entry_ptr->prefetched) { - HDassert(0 == entry_ptr->fd_parent_count); HDassert(NULL == entry_ptr->fd_parent_addrs); - } /* end if */ /* Check whether we should free the space in the file that * the entry occupies */ if (free_file_space) { - hsize_t fsf_size; /* Sanity checks */ @@ -6483,22 +6021,15 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) * Otherwise use entry_ptr->size. */ if (entry_ptr->type->fsf_size) { - if ((entry_ptr->type->fsf_size)((void *)entry_ptr, &fsf_size) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to get file space free size") - - } /* end if */ - else { /* no file space free size callback -- use entry size */ - + } /* end if */ + else /* no file space free size callback -- use entry size */ fsf_size = entry_ptr->size; - } /* Release the space on disk */ if (H5MF_xfree(f, entry_ptr->type->mem_type, entry_ptr->addr, fsf_size) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to free file space for cache entry") - } /* end if ( free_file_space ) */ /* Reset the pointer to the cache the entry is within. -QAK */ @@ -6522,17 +6053,13 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) cache_ptr->entries_removed_counter++; cache_ptr->last_entry_removed_ptr = entry_ptr; - if (entry_ptr == cache_ptr->entry_watched_for_removal) { - + if (entry_ptr == cache_ptr->entry_watched_for_removal) cache_ptr->entry_watched_for_removal = NULL; - } /* Check for actually destroying the entry in memory */ /* (As opposed to taking ownership of it) */ if (destroy_entry) { - if (entry_ptr->is_dirty) { - /* Reset dirty flag */ entry_ptr->is_dirty = FALSE; @@ -6540,12 +6067,10 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) * 'entry cleaned' notice now that the entry is fully * integrated into the cache. */ - if ((entry_ptr->type->notify) && - ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0)) - + if (entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag cleared") - } /* end if */ /* we are about to discard the in core representation -- @@ -6558,19 +6083,15 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) HDassert(entry_ptr->image_ptr == NULL); if (entry_ptr->type->free_icr((void *)entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed") - } /* end if */ else { - HDassert(take_ownership); /* Client is taking ownership of the entry. Set bad magic here too * so the cache will choke unless the entry is re-inserted properly */ entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC; - } /* end else */ } /* if (destroy) */ @@ -6578,36 +6099,25 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) * so it doesn't go out of date */ if (update_page_buffer) { - /* Sanity check */ HDassert(!destroy); HDassert(entry_ptr->image_ptr); - if ((f->shared->page_buf) && (f->shared->page_buf->page_size >= entry_ptr->size)) { - + if (f->shared->page_buf && (f->shared->page_buf->page_size >= entry_ptr->size)) if (H5PB_update_entry(f->shared->page_buf, entry_ptr->addr, entry_ptr->size, entry_ptr->image_ptr) > 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Failed to update PB with metadata cache") - } /* end if */ - } /* end if */ - - if (cache_ptr->log_flush) { + } /* end if */ + if (cache_ptr->log_flush) if ((cache_ptr->log_flush)(cache_ptr, entry_addr, was_dirty, flags) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "log_flush callback failed") - } /* end if */ - done: - HDassert((ret_value != SUCCEED) || (destroy_entry) || (!entry_ptr->flush_in_progress)); - HDassert((ret_value != SUCCEED) || (destroy_entry) || (take_ownership) || (!entry_ptr->is_dirty)); FUNC_LEAVE_NOAPI(ret_value) - } /* H5C__flush_single_entry() */ /*------------------------------------------------------------------------- @@ -6788,9 +6298,7 @@ H5C__load_entry(H5F_t *f, #ifdef H5_HAVE_PARALLEL if (!coll_access || 0 == mpi_rank) { #endif /* H5_HAVE_PARALLEL */ - if (H5F_block_read(f, type->mem_type, addr, len, image) < 0) { - #ifdef H5_HAVE_PARALLEL if (coll_access) { /* Push an error, but still participate in following MPI_Bcast */ @@ -6849,7 +6357,6 @@ H5C__load_entry(H5F_t *f, */ if (H5F_block_read(f, type->mem_type, addr + len, actual_len - len, image + len) < 0) { - #ifdef H5_HAVE_PARALLEL if (coll_access) { /* Push an error, but still participate in following MPI_Bcast */ @@ -7129,23 +6636,17 @@ H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted) prev_is_dirty = prev_ptr->is_dirty; if (entry_ptr->is_dirty && (entry_ptr->tag_info && entry_ptr->tag_info->corked)) { - /* Skip "dirty" corked entries. */ ++num_corked_entries; didnt_flush_entry = TRUE; } - else if (((entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID) && (!entry_ptr->flush_in_progress) && - (!entry_ptr->prefetched_dirty)) { - + else if ((entry_ptr->type->id != H5AC_EPOCH_MARKER_ID) && !entry_ptr->flush_in_progress && + !entry_ptr->prefetched_dirty) { didnt_flush_entry = FALSE; - if (entry_ptr->is_dirty) { - #if H5C_COLLECT_CACHE_STATS - if ((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) { - + if ((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) cache_ptr->entries_scanned_to_make_space++; - } #endif /* H5C_COLLECT_CACHE_STATS */ /* reset entries_removed_counter and @@ -7208,9 +6709,7 @@ H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted) } if (prev_ptr != NULL) { - - if (didnt_flush_entry) { - + if (didnt_flush_entry) /* epoch markers don't get flushed, and we don't touch * entries that are in the process of being flushed. * Hence no need for sanity checks, as we haven't @@ -7218,10 +6717,8 @@ H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted) * and go on. */ entry_ptr = prev_ptr; - } - else if ((restart_scan) || (prev_ptr->is_dirty != prev_is_dirty) || - (prev_ptr->next != next_ptr) || (prev_ptr->is_protected) || (prev_ptr->is_pinned)) { - + else if (restart_scan || prev_ptr->is_dirty != prev_is_dirty || prev_ptr->next != next_ptr || + prev_ptr->is_protected || prev_ptr->is_pinned) { /* something has happened to the LRU -- start over * from the tail. */ @@ -7229,26 +6726,18 @@ H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted) entry_ptr = cache_ptr->LRU_tail_ptr; H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr) } - else { - + else entry_ptr = prev_ptr; - } } - else { - + else entry_ptr = NULL; - } entries_examined++; - if (cache_ptr->index_size >= cache_ptr->max_cache_size) { - + if (cache_ptr->index_size >= cache_ptr->max_cache_size) empty_space = 0; - } - else { - + else empty_space = cache_ptr->max_cache_size - cache_ptr->index_size; - } HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size)); } @@ -7260,18 +6749,14 @@ H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted) cache_ptr->total_dirty_pf_entries_skipped_in_msic += dirty_pf_entries_skipped; cache_ptr->total_entries_scanned_in_msic += total_entries_scanned; - if (clean_entries_skipped > cache_ptr->max_entries_skipped_in_msic) { - + if (clean_entries_skipped > cache_ptr->max_entries_skipped_in_msic) cache_ptr->max_entries_skipped_in_msic = clean_entries_skipped; - } if (dirty_pf_entries_skipped > cache_ptr->max_dirty_pf_entries_skipped_in_msic) cache_ptr->max_dirty_pf_entries_skipped_in_msic = dirty_pf_entries_skipped; - if (total_entries_scanned > cache_ptr->max_entries_scanned_in_msic) { - + if (total_entries_scanned > cache_ptr->max_entries_scanned_in_msic) cache_ptr->max_entries_scanned_in_msic = total_entries_scanned; - } #endif /* H5C_COLLECT_CACHE_STATS */ /* NEED: work on a better assert for corked entries */ @@ -7290,7 +6775,6 @@ H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted) #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ } else { - HDassert(H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS); #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS @@ -7306,15 +6790,14 @@ H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted) prev_ptr = entry_ptr->aux_prev; - if ((!(entry_ptr->prefetched_dirty)) + if (!entry_ptr->prefetched_dirty #ifdef H5_HAVE_PARALLEL - && (!(entry_ptr->coll_access)) + && !entry_ptr->coll_access #endif /* H5_HAVE_PARALLEL */ ) { if (H5C__flush_single_entry( f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") - } /* end if */ /* we are scanning the clean LRU, so the serialize function @@ -7393,7 +6876,7 @@ H5C_validate_lru_list(H5C_t *cache_ptr) ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr))) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers") - if ((entry_ptr->is_pinned) || (entry_ptr->pinned_from_client) || (entry_ptr->pinned_from_cache)) + if (entry_ptr->is_pinned || entry_ptr->pinned_from_client || entry_ptr->pinned_from_cache) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "invalid entry 'pin origin' fields") len++; @@ -7627,7 +7110,6 @@ H5C_entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr) * *------------------------------------------------------------------------- */ - herr_t H5C__flush_marked_entries(H5F_t *f) { @@ -7640,12 +7122,10 @@ H5C__flush_marked_entries(H5F_t *f) /* Enable the slist, as it is needed in the flush */ if (H5C_set_slist_enabled(f->shared->cache, TRUE, FALSE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed") /* Flush all marked entries */ if (H5C_flush_cache(f, H5C__FLUSH_MARKED_ENTRIES_FLAG | H5C__FLUSH_IGNORE_PROTECTED_FLAG) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache") /* Disable the slist. Set the clear_slist parameter to TRUE @@ -7653,13 +7133,10 @@ H5C__flush_marked_entries(H5F_t *f) * H5C__FLUSH_MARKED_ENTRIES_FLAG. */ if (H5C_set_slist_enabled(f->shared->cache, FALSE, TRUE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "disable slist failed") done: - FUNC_LEAVE_NOAPI(ret_value) - } /* H5C__flush_marked_entries */ /*------------------------------------------------------------------------- @@ -8553,15 +8030,12 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr) if ((entry_ptr->type->pre_serialize) && ((entry_ptr->type->pre_serialize)(f, (void *)entry_ptr, entry_ptr->addr, entry_ptr->size, &new_addr, &new_len, &serialize_flags) < 0)) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to pre-serialize entry") /* Check for any flags set in the pre-serialize callback */ if (serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) { - /* Check for unexpected flags from serialize callback */ if (serialize_flags & ~(H5C__SERIALIZE_RESIZED_FLAG | H5C__SERIALIZE_MOVED_FLAG)) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unknown serialize flag(s)") #ifdef H5_HAVE_PARALLEL @@ -8585,7 +8059,6 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr) * other entries during its flush. */ if (cache_ptr->aux_ptr != NULL) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occurred in parallel case") #endif @@ -8593,14 +8066,12 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr) * data structures */ if (serialize_flags & H5C__SERIALIZE_RESIZED_FLAG) { - /* Sanity check */ HDassert(new_len > 0); /* Allocate a new image buffer */ if (NULL == (entry_ptr->image_ptr = H5MM_realloc(entry_ptr->image_ptr, new_len + H5C_IMAGE_EXTRA_SPACE))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer") @@ -8636,20 +8107,17 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr) /* Finally, update the entry for its new size */ entry_ptr->size = new_len; - } /* end if */ /* If required, udate the entry and the cache data structures * for a move */ if (serialize_flags & H5C__SERIALIZE_MOVED_FLAG) { - /* Update stats and entries relocated counter */ H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr) /* We must update cache data structures for the change in address */ if (entry_ptr->addr == old_addr) { - /* Delete the entry from the hash table and the slist */ H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL); H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE); @@ -8660,18 +8128,14 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr) /* And then reinsert in the index and slist */ H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL); H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL); - - } /* end if */ - else { /* move is already done for us -- just do sanity checks */ - + } /* end if */ + else /* move is already done for us -- just do sanity checks */ HDassert(entry_ptr->addr == new_addr); - } } /* end if */ } /* end if(serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) */ /* Serialize object into buffer */ if (entry_ptr->type->serialize(f, entry_ptr->image_ptr, entry_ptr->size, (void *)entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to serialize entry") #if H5C_DO_MEMORY_SANITY_CHECKS @@ -8689,12 +8153,9 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr) */ HDassert(entry_ptr->flush_dep_nunser_children == 0); - if (entry_ptr->flush_dep_nparents > 0) { - + if (entry_ptr->flush_dep_nparents > 0) if (H5C__mark_flush_dep_serialized(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "Can't propagate serialization status to fd parents") - } done: FUNC_LEAVE_NOAPI(ret_value) diff --git a/src/H5Cdbg.c b/src/H5Cdbg.c index 0dc975693c0..ed95bcda691 100644 --- a/src/H5Cdbg.c +++ b/src/H5Cdbg.c @@ -291,49 +291,33 @@ H5C_dump_cache_skip_list(H5C_t *cache_ptr, char *calling_fcn) i = 0; node_ptr = H5SL_first(cache_ptr->slist_ptr); - - if (node_ptr != NULL) { - + if (node_ptr != NULL) entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); - } - else { - + else entry_ptr = NULL; - } while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDfprintf(stdout, "%s%d 0x%016llx %4lld %d/%d %d %s\n", cache_ptr->prefix, i, (long long)(entry_ptr->addr), (long long)(entry_ptr->size), (int)(entry_ptr->is_protected), (int)(entry_ptr->is_pinned), (int)(entry_ptr->is_dirty), entry_ptr->type->name); - HDfprintf(stdout, " node_ptr = %p, item = %p\n", (void *)node_ptr, H5SL_item(node_ptr)); /* increment node_ptr before we delete its target */ - node_ptr = H5SL_next(node_ptr); - - if (node_ptr != NULL) { - + if (node_ptr != NULL) entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); - } - else { - + else entry_ptr = NULL; - } i++; - } /* end while */ } /* end if */ HDfprintf(stdout, "\n\n"); FUNC_LEAVE_NOAPI(ret_value) - } /* H5C_dump_cache_skip_list() */ #endif /* NDEBUG */ diff --git a/src/H5Cimage.c b/src/H5Cimage.c index 70944beb6df..105272cfef2 100644 --- a/src/H5Cimage.c +++ b/src/H5Cimage.c @@ -298,8 +298,8 @@ H5C__construct_cache_image_buffer(H5F_t *f, H5C_t *cache_ptr) HDassert(fake_cache_ptr->image_entries); for (u = 0; u < fake_cache_ptr->num_entries_in_image; u++) { - (fake_cache_ptr->image_entries)[u].magic = H5C_IMAGE_ENTRY_T_MAGIC; - (fake_cache_ptr->image_entries)[u].image_ptr = NULL; + fake_cache_ptr->image_entries[u].magic = H5C_IMAGE_ENTRY_T_MAGIC; + fake_cache_ptr->image_entries[u].image_ptr = NULL; /* touch up f->shared->cache to satisfy sanity checks... */ f->shared->cache = fake_cache_ptr; @@ -310,43 +310,42 @@ H5C__construct_cache_image_buffer(H5F_t *f, H5C_t *cache_ptr) f->shared->cache = cache_ptr; /* verify expected contents */ - HDassert((cache_ptr->image_entries)[u].addr == (fake_cache_ptr->image_entries)[u].addr); - HDassert((cache_ptr->image_entries)[u].size == (fake_cache_ptr->image_entries)[u].size); - HDassert((cache_ptr->image_entries)[u].type_id == (fake_cache_ptr->image_entries)[u].type_id); - HDassert((cache_ptr->image_entries)[u].lru_rank == (fake_cache_ptr->image_entries)[u].lru_rank); - HDassert((cache_ptr->image_entries)[u].is_dirty == (fake_cache_ptr->image_entries)[u].is_dirty); + HDassert(cache_ptr->image_entries[u].addr == fake_cache_ptr->image_entries[u].addr); + HDassert(cache_ptr->image_entries[u].size == fake_cache_ptr->image_entries[u].size); + HDassert(cache_ptr->image_entries[u].type_id == fake_cache_ptr->image_entries[u].type_id); + HDassert(cache_ptr->image_entries[u].lru_rank == fake_cache_ptr->image_entries[u].lru_rank); + HDassert(cache_ptr->image_entries[u].is_dirty == fake_cache_ptr->image_entries[u].is_dirty); /* don't check image_fd_height as it is not stored in * the metadata cache image block. */ - HDassert((cache_ptr->image_entries)[u].fd_child_count == - (fake_cache_ptr->image_entries)[u].fd_child_count); - HDassert((cache_ptr->image_entries)[u].fd_dirty_child_count == - (fake_cache_ptr->image_entries)[u].fd_dirty_child_count); - HDassert((cache_ptr->image_entries)[u].fd_parent_count == - (fake_cache_ptr->image_entries)[u].fd_parent_count); + HDassert(cache_ptr->image_entries[u].fd_child_count == + fake_cache_ptr->image_entries[u].fd_child_count); + HDassert(cache_ptr->image_entries[u].fd_dirty_child_count == + fake_cache_ptr->image_entries[u].fd_dirty_child_count); + HDassert(cache_ptr->image_entries[u].fd_parent_count == + fake_cache_ptr->image_entries[u].fd_parent_count); - for (v = 0; v < (cache_ptr->image_entries)[u].fd_parent_count; v++) - HDassert((cache_ptr->image_entries)[u].fd_parent_addrs[v] == - (fake_cache_ptr->image_entries)[u].fd_parent_addrs[v]); + for (v = 0; v < cache_ptr->image_entries[u].fd_parent_count; v++) + HDassert(cache_ptr->image_entries[u].fd_parent_addrs[v] == + fake_cache_ptr->image_entries[u].fd_parent_addrs[v]); /* free the fd_parent_addrs array if it exists */ - if ((fake_cache_ptr->image_entries)[u].fd_parent_addrs) { - HDassert((fake_cache_ptr->image_entries)[u].fd_parent_count > 0); - (fake_cache_ptr->image_entries)[u].fd_parent_addrs = - (haddr_t *)H5MM_xfree((fake_cache_ptr->image_entries)[u].fd_parent_addrs); - (fake_cache_ptr->image_entries)[u].fd_parent_count = 0; + if (fake_cache_ptr->image_entries[u].fd_parent_addrs) { + HDassert(fake_cache_ptr->image_entries[u].fd_parent_count > 0); + fake_cache_ptr->image_entries[u].fd_parent_addrs = + (haddr_t *)H5MM_xfree(fake_cache_ptr->image_entries[u].fd_parent_addrs); + fake_cache_ptr->image_entries[u].fd_parent_count = 0; } /* end if */ else - HDassert((fake_cache_ptr->image_entries)[u].fd_parent_count == 0); + HDassert(fake_cache_ptr->image_entries[u].fd_parent_count == 0); - HDassert((cache_ptr->image_entries)[u].image_ptr); - HDassert((fake_cache_ptr->image_entries)[u].image_ptr); - HDassert(!HDmemcmp((cache_ptr->image_entries)[u].image_ptr, - (fake_cache_ptr->image_entries)[u].image_ptr, - (cache_ptr->image_entries)[u].size)); + HDassert(cache_ptr->image_entries[u].image_ptr); + HDassert(fake_cache_ptr->image_entries[u].image_ptr); + HDassert(!HDmemcmp(cache_ptr->image_entries[u].image_ptr, + fake_cache_ptr->image_entries[u].image_ptr, cache_ptr->image_entries[u].size)); - (fake_cache_ptr->image_entries)[u].image_ptr = - H5MM_xfree((fake_cache_ptr->image_entries)[u].image_ptr); + fake_cache_ptr->image_entries[u].image_ptr = + H5MM_xfree(fake_cache_ptr->image_entries[u].image_ptr); } /* end for */ HDassert((size_t)(q - (const uint8_t *)cache_ptr->image_buffer) == @@ -692,12 +691,10 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t pf_entry_ptr->image_ptr = NULL; if (pf_entry_ptr->is_dirty) { - HDassert(((cache_ptr->slist_enabled) && (pf_entry_ptr->in_slist)) || ((!cache_ptr->slist_enabled) && (!pf_entry_ptr->in_slist))); flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG; - } /* end if */ if (H5C__flush_single_entry(f, pf_entry_ptr, flush_flags) < 0) @@ -826,7 +823,7 @@ H5C__free_image_entries_array(H5C_t *cache_ptr) H5C_image_entry_t *ie_ptr; /* Image entry to release */ /* Get pointer to image entry */ - ie_ptr = &((cache_ptr->image_entries)[u]); + ie_ptr = &(cache_ptr->image_entries[u]); /* Sanity checks */ HDassert(ie_ptr); @@ -987,7 +984,6 @@ H5C__read_cache_image(H5F_t *f, H5C_t *cache_ptr) int mpi_result; if ((NULL == aux_ptr) || (aux_ptr->mpi_rank == 0)) { - HDassert((NULL == aux_ptr) || (aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC)); #endif /* H5_HAVE_PARALLEL */ @@ -1003,21 +999,16 @@ H5C__read_cache_image(H5F_t *f, H5C_t *cache_ptr) #ifdef H5_HAVE_PARALLEL if (aux_ptr) { - /* Broadcast cache image */ if (MPI_SUCCESS != (mpi_result = MPI_Bcast(cache_ptr->image_buffer, (int)cache_ptr->image_len, MPI_BYTE, 0, aux_ptr->mpi_comm))) - HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result) - } /* end if */ } /* end if */ else if (aux_ptr) { - /* Retrieve the contents of the metadata cache image from process 0 */ if (MPI_SUCCESS != (mpi_result = MPI_Bcast(cache_ptr->image_buffer, (int)cache_ptr->image_len, MPI_BYTE, 0, aux_ptr->mpi_comm))) - HMPI_GOTO_ERROR(FAIL, "can't receive cache image MPI_Bcast", mpi_result) } /* end else-if */ } /* end block */ @@ -1866,7 +1857,7 @@ H5C__decode_cache_image_entry(const H5F_t *f, const H5C_t *cache_ptr, const uint HDassert(buf); HDassert(*buf); HDassert(entry_num < cache_ptr->num_entries_in_image); - ie_ptr = &((cache_ptr->image_entries)[entry_num]); + ie_ptr = &(cache_ptr->image_entries[entry_num]); HDassert(ie_ptr); HDassert(ie_ptr->magic == H5C_IMAGE_ENTRY_T_MAGIC); @@ -2217,7 +2208,7 @@ H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr, uint8_t **buf, unsigne HDassert(buf); HDassert(*buf); HDassert(entry_num < cache_ptr->num_entries_in_image); - ie_ptr = &((cache_ptr->image_entries)[entry_num]); + ie_ptr = &(cache_ptr->image_entries[entry_num]); HDassert(ie_ptr->magic == H5C_IMAGE_ENTRY_T_MAGIC); /* Get pointer to buffer to encode into */ @@ -2924,12 +2915,14 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) HDassert(num_entries_in_image <= num_entries_tentatively_in_image); #ifndef NDEBUG - unsigned j = 0; - for (int i = H5C_MAX_RING_IN_IMAGE + 1; i <= H5C_RING_SB; i++) - j += cache_ptr->index_ring_len[i]; + { + unsigned j = 0; + for (int i = H5C_MAX_RING_IN_IMAGE + 1; i <= H5C_RING_SB; i++) + j += cache_ptr->index_ring_len[i]; - /* This will change */ - HDassert(entries_visited == (num_entries_tentatively_in_image + j)); + /* This will change */ + HDassert(entries_visited == (num_entries_tentatively_in_image + j)); + } #endif cache_ptr->num_entries_in_image = num_entries_in_image; @@ -3127,23 +3120,17 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) i = -1; entry_ptr = cache_ptr->LRU_head_ptr; - while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(entry_ptr->type != NULL); if (entry_ptr->prefetched) { - HDassert(entry_ptr->lru_rank != 0); HDassert((entry_ptr->lru_rank == -1) || (entry_ptr->lru_rank > i)); if ((entry_ptr->lru_rank > 1) && (entry_ptr->lru_rank > i + 1)) - lru_rank_holes += entry_ptr->lru_rank - (i + 1); - i = entry_ptr->lru_rank; - } /* end if */ entry_ptr = entry_ptr->next; @@ -3168,10 +3155,8 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) */ hbool_t write_permitted = FALSE; - if (cache_ptr->check_write_permitted != NULL) { - if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, FAIL, "Can't get write_permitted") - } /* end if */ + if (cache_ptr->check_write_permitted && (cache_ptr->check_write_permitted)(f, &write_permitted) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, FAIL, "Can't get write_permitted") else write_permitted = cache_ptr->write_permitted; diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c index f154c8a1d26..7d87d454d96 100644 --- a/src/H5Cmpio.c +++ b/src/H5Cmpio.c @@ -212,7 +212,7 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha /* Create skip list of entries for collective write */ if (NULL == (cache_ptr->coll_write_list = H5SL_create(H5SL_TYPE_HADDR, NULL))) - HGOTO_ERROR(H5E_DATASET, H5E_CANTCREATE, FAIL, "can't create skip list for entries") + HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create skip list for entries") } /* end if */ n = num_candidates / (unsigned)mpi_size; @@ -220,8 +220,7 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha if (NULL == (candidate_assignment_table = (unsigned *)H5MM_malloc(sizeof(unsigned) * (size_t)(mpi_size + 1)))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, - "memory allocation failed for candidate assignment table") + HGOTO_ERROR(H5E_CACHE, H5E_NOSPACE, FAIL, "memory allocation failed for candidate assignment table") candidate_assignment_table[0] = 0; candidate_assignment_table[mpi_size] = num_candidates; @@ -437,7 +436,6 @@ H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr) (cache_ptr->slist_len <= (cache_ptr->dLRU_list_len + cache_ptr->pel_len))); if (space_needed > 0) { - H5C_cache_entry_t *entry_ptr; unsigned nominated_entries_count = 0; size_t nominated_entries_size = 0; @@ -449,11 +447,9 @@ H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr) * entries to free up the necessary space. */ entry_ptr = cache_ptr->dLRU_tail_ptr; - while ((nominated_entries_size < space_needed) && ((!cache_ptr->slist_enabled) || (nominated_entries_count < cache_ptr->slist_len)) && (entry_ptr != NULL)) { - HDassert(!(entry_ptr->is_protected)); HDassert(!(entry_ptr->is_read_only)); HDassert(entry_ptr->ro_ref_count == 0); @@ -461,15 +457,13 @@ H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr) HDassert((!cache_ptr->slist_enabled) || (entry_ptr->in_slist)); nominated_addr = entry_ptr->addr; - if (H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed") nominated_entries_size += entry_ptr->size; nominated_entries_count++; - entry_ptr = entry_ptr->aux_prev; + entry_ptr = entry_ptr->aux_prev; } /* end while */ HDassert(entry_ptr == NULL); @@ -478,13 +472,10 @@ H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr) * protected entry list as well -- scan it too if necessary */ entry_ptr = cache_ptr->pel_head_ptr; - while ((nominated_entries_size < space_needed) && ((!cache_ptr->slist_enabled) || (nominated_entries_count < cache_ptr->slist_len)) && (entry_ptr != NULL)) { - if (entry_ptr->is_dirty) { - HDassert(!(entry_ptr->is_protected)); HDassert(!(entry_ptr->is_read_only)); HDassert(entry_ptr->ro_ref_count == 0); @@ -492,29 +483,22 @@ H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr) HDassert(entry_ptr->in_slist); nominated_addr = entry_ptr->addr; - if (H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed") nominated_entries_size += entry_ptr->size; nominated_entries_count++; - } /* end if */ entry_ptr = entry_ptr->next; - } /* end while */ HDassert((!cache_ptr->slist_enabled) || (nominated_entries_count == cache_ptr->slist_len)); HDassert(nominated_entries_size == space_needed); - } /* end if */ done: - FUNC_LEAVE_NOAPI(ret_value) - } /* H5C_construct_candidate_list__clean_cache() */ /*------------------------------------------------------------------------- @@ -552,30 +536,20 @@ H5C_construct_candidate_list__min_clean(H5C_t *cache_ptr) if (cache_ptr->max_cache_size > cache_ptr->index_size) { if (((cache_ptr->max_cache_size - cache_ptr->index_size) + cache_ptr->cLRU_list_size) >= - cache_ptr->min_clean_size) { - + cache_ptr->min_clean_size) space_needed = 0; - } - else { - + else space_needed = cache_ptr->min_clean_size - ((cache_ptr->max_cache_size - cache_ptr->index_size) + cache_ptr->cLRU_list_size); - } } /* end if */ else { - - if (cache_ptr->min_clean_size <= cache_ptr->cLRU_list_size) { - + if (cache_ptr->min_clean_size <= cache_ptr->cLRU_list_size) space_needed = 0; - } - else { - + else space_needed = cache_ptr->min_clean_size - cache_ptr->cLRU_list_size; - } } /* end else */ if (space_needed > 0) { /* we have work to do */ - H5C_cache_entry_t *entry_ptr; unsigned nominated_entries_count = 0; size_t nominated_entries_size = 0; @@ -586,11 +560,9 @@ H5C_construct_candidate_list__min_clean(H5C_t *cache_ptr) * entries to free up the necessary space. */ entry_ptr = cache_ptr->dLRU_tail_ptr; - while ((nominated_entries_size < space_needed) && ((!cache_ptr->slist_enabled) || (nominated_entries_count < cache_ptr->slist_len)) && (entry_ptr != NULL) && (!entry_ptr->flush_me_last)) { - haddr_t nominated_addr; HDassert(!(entry_ptr->is_protected)); @@ -600,15 +572,13 @@ H5C_construct_candidate_list__min_clean(H5C_t *cache_ptr) HDassert((!cache_ptr->slist_enabled) || (entry_ptr->in_slist)); nominated_addr = entry_ptr->addr; - if (H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed") nominated_entries_size += entry_ptr->size; nominated_entries_count++; - entry_ptr = entry_ptr->aux_prev; + entry_ptr = entry_ptr->aux_prev; } /* end while */ HDassert((!cache_ptr->slist_enabled) || (nominated_entries_count <= cache_ptr->slist_len)); @@ -833,10 +803,8 @@ H5C_mark_entries_as_clean(H5F_t *f, unsigned ce_array_len, haddr_t *ce_array_ptr u = 0; entry_ptr = cache_ptr->pl_head_ptr; while (entry_ptr != NULL) { - if (entry_ptr->clear_on_unprotect) { - + if (entry_ptr->clear_on_unprotect) u++; - } entry_ptr = entry_ptr->next; } HDassert((entries_cleared + u) == ce_array_len); @@ -846,7 +814,7 @@ H5C_mark_entries_as_clean(H5F_t *f, unsigned ce_array_len, haddr_t *ce_array_ptr #ifdef H5C_DO_EXTREME_SANITY_CHECKS if (H5C_validate_protected_entry_list(cache_ptr) < 0 || H5C_validate_pinned_entry_list(cache_ptr) < 0 || H5C_validate_lru_list(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") + HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ FUNC_LEAVE_NOAPI(ret_value) diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index 24c0263514d..5b3d942fd31 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -212,7 +212,7 @@ if ( ( (new_size) > (dll_size) ) || \ (head_ptr) = (entry_ptr); \ } \ (len)++; \ - (Size) += entry_ptr->size; \ + (Size) += (entry_ptr)->size; \ } /* H5C__DLL_PREPEND() */ #define H5C__DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val) \ @@ -236,10 +236,10 @@ if ( ( (new_size) > (dll_size) ) || \ } \ else \ (entry_ptr)->next->prev = (entry_ptr)->prev; \ - entry_ptr->next = NULL; \ - entry_ptr->prev = NULL; \ + (entry_ptr)->next = NULL; \ + (entry_ptr)->prev = NULL; \ (len)--; \ - (Size) -= entry_ptr->size; \ + (Size) -= (entry_ptr)->size; \ } \ } /* H5C__DLL_REMOVE() */ @@ -525,61 +525,55 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ ***********************************************************************/ #define H5C__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit) \ - (cache_ptr->cache_accesses)++; \ - if ( hit ) { \ - (cache_ptr->cache_hits)++; \ - } \ + (cache_ptr)->cache_accesses++; \ + if (hit) \ + (cache_ptr)->cache_hits++; #if H5C_COLLECT_CACHE_STATS #define H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ - if ( (cache_ptr)->index_size > (cache_ptr)->max_index_size ) \ - (cache_ptr)->max_index_size = (cache_ptr)->index_size; \ - if ( (cache_ptr)->clean_index_size > \ - (cache_ptr)->max_clean_index_size ) \ - (cache_ptr)->max_clean_index_size = \ - (cache_ptr)->clean_index_size; \ - if ( (cache_ptr)->dirty_index_size > \ - (cache_ptr)->max_dirty_index_size ) \ - (cache_ptr)->max_dirty_index_size = \ - (cache_ptr)->dirty_index_size; + if ((cache_ptr)->index_size > (cache_ptr)->max_index_size) \ + (cache_ptr)->max_index_size = (cache_ptr)->index_size; \ + if ((cache_ptr)->clean_index_size > (cache_ptr)->max_clean_index_size) \ + (cache_ptr)->max_clean_index_size = (cache_ptr)->clean_index_size; \ + if ((cache_ptr)->dirty_index_size > (cache_ptr)->max_dirty_index_size) \ + (cache_ptr)->max_dirty_index_size = (cache_ptr)->dirty_index_size; #define H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr) \ - (((cache_ptr)->dirty_pins)[(entry_ptr)->type->id])++; - -#define H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr) \ - if ( (cache_ptr)->slist_len > (cache_ptr)->max_slist_len ) \ - (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \ - if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \ - (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \ - if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \ - (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ - if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \ + (cache_ptr)->dirty_pins[(entry_ptr)->type->id]++; + +#define H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr) \ + if ((cache_ptr)->slist_len > (cache_ptr)->max_slist_len) \ + (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \ + if ((cache_ptr)->slist_size > (cache_ptr)->max_slist_size) \ + (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \ + if ((cache_ptr)->pel_len > (cache_ptr)->max_pel_len) \ + (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ + if ((cache_ptr)->pel_size > (cache_ptr)->max_pel_size) \ (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; -#define H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr) \ - if ( cache_ptr->flush_in_progress ) \ - ((cache_ptr)->cache_flush_moves[(entry_ptr)->type->id])++; \ - if ( entry_ptr->flush_in_progress ) \ - ((cache_ptr)->entry_flush_moves[(entry_ptr)->type->id])++; \ - (((cache_ptr)->moves)[(entry_ptr)->type->id])++; \ - (cache_ptr)->entries_relocated_counter++; +#define H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr) \ + if ((cache_ptr)->flush_in_progress) \ + (cache_ptr)->cache_flush_moves[(entry_ptr)->type->id]++; \ + if ((entry_ptr)->flush_in_progress) \ + (cache_ptr)->entry_flush_moves[(entry_ptr)->type->id]++; \ + (cache_ptr)->moves[(entry_ptr)->type->id]++; \ + (cache_ptr)->entries_relocated_counter++; #define H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size)\ - if ( cache_ptr->flush_in_progress ) \ - ((cache_ptr)->cache_flush_size_changes[(entry_ptr)->type->id])++; \ - if ( entry_ptr->flush_in_progress ) \ - ((cache_ptr)->entry_flush_size_changes[(entry_ptr)->type->id])++; \ - if ( (entry_ptr)->size < (new_size) ) { \ - ((cache_ptr)->size_increases[(entry_ptr)->type->id])++; \ - H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ - if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \ - (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \ - if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \ - (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \ - } else if ( (entry_ptr)->size > (new_size) ) { \ - ((cache_ptr)->size_decreases[(entry_ptr)->type->id])++; \ - } + if ((cache_ptr)->flush_in_progress) \ + (cache_ptr)->cache_flush_size_changes[(entry_ptr)->type->id]++; \ + if ((entry_ptr)->flush_in_progress) \ + (cache_ptr)->entry_flush_size_changes[(entry_ptr)->type->id]++; \ + if ((entry_ptr)->size < (new_size)) { \ + (cache_ptr)->size_increases[(entry_ptr)->type->id]++; \ + H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ + if ((cache_ptr)->slist_size > (cache_ptr)->max_slist_size) \ + (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \ + if ((cache_ptr)->pl_size > (cache_ptr)->max_pl_size) \ + (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \ + } else if ((entry_ptr)->size > (new_size)) \ + (cache_ptr)->size_decreases[(entry_ptr)->type->id]++; #define H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) \ (cache_ptr)->total_ht_insertions++; @@ -588,7 +582,7 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ (cache_ptr)->total_ht_deletions++; #define H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, success, depth) \ - if ( success ) { \ + if (success) { \ (cache_ptr)->successful_ht_searches++; \ (cache_ptr)->total_successful_ht_search_depth += depth; \ } else { \ @@ -597,21 +591,19 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ } #define H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) \ - ((cache_ptr)->unpins)[(entry_ptr)->type->id]++; + (cache_ptr)->unpins[(entry_ptr)->type->id]++; #define H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr) \ - ((cache_ptr)->slist_scan_restarts)++; + (cache_ptr)->slist_scan_restarts++; #define H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr) \ - ((cache_ptr)->LRU_scan_restarts)++; + (cache_ptr)->LRU_scan_restarts++; #define H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr) \ - ((cache_ptr)->index_scan_restarts)++; + (cache_ptr)->index_scan_restarts++; #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr) \ -{ \ - (cache_ptr)->images_created++; \ -} + (cache_ptr)->images_created++; #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr) \ { \ @@ -631,14 +623,12 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ #define H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, dirty) \ { \ (cache_ptr)->prefetches++; \ - if ( dirty ) \ + if (dirty) \ (cache_ptr)->dirty_prefetches++; \ } #define H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr) \ -{ \ - (cache_ptr)->prefetch_hits++; \ -} + (cache_ptr)->prefetch_hits++; #if H5C_COLLECT_CACHE_ENTRY_STATS @@ -652,113 +642,96 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ #define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \ { \ - (((cache_ptr)->clears)[(entry_ptr)->type->id])++; \ + (cache_ptr)->clears[(entry_ptr)->type->id]++; \ if((entry_ptr)->is_pinned) \ - (((cache_ptr)->pinned_clears)[(entry_ptr)->type->id])++; \ - ((entry_ptr)->clears)++; \ + (cache_ptr)->pinned_clears[(entry_ptr)->type->id]++; \ + (entry_ptr)->clears++; \ } #define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \ { \ - (((cache_ptr)->flushes)[(entry_ptr)->type->id])++; \ + (cache_ptr)->flushes[(entry_ptr)->type->id]++; \ if((entry_ptr)->is_pinned) \ - (((cache_ptr)->pinned_flushes)[(entry_ptr)->type->id])++; \ - ((entry_ptr)->flushes)++; \ + (cache_ptr)->pinned_flushes[(entry_ptr)->type->id]++; \ + (entry_ptr)->flushes++; \ } #define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership) \ { \ - if ( take_ownership ) \ - (((cache_ptr)->take_ownerships)[(entry_ptr)->type->id])++; \ + if (take_ownership) \ + (cache_ptr)->take_ownerships[(entry_ptr)->type->id]++; \ else \ - (((cache_ptr)->evictions)[(entry_ptr)->type->id])++; \ - if ( (entry_ptr)->accesses > \ - ((cache_ptr)->max_accesses)[(entry_ptr)->type->id] ) \ - ((cache_ptr)->max_accesses)[(entry_ptr)->type->id] = \ - (entry_ptr)->accesses; \ - if ( (entry_ptr)->accesses < \ - ((cache_ptr)->min_accesses)[(entry_ptr)->type->id] ) \ - ((cache_ptr)->min_accesses)[(entry_ptr)->type->id] = \ - (entry_ptr)->accesses; \ - if ( (entry_ptr)->clears > \ - ((cache_ptr)->max_clears)[(entry_ptr)->type->id] ) \ - ((cache_ptr)->max_clears)[(entry_ptr)->type->id] \ - = (entry_ptr)->clears; \ - if ( (entry_ptr)->flushes > \ - ((cache_ptr)->max_flushes)[(entry_ptr)->type->id] ) \ - ((cache_ptr)->max_flushes)[(entry_ptr)->type->id] \ - = (entry_ptr)->flushes; \ - if ( (entry_ptr)->size > \ - ((cache_ptr)->max_size)[(entry_ptr)->type->id] ) \ - ((cache_ptr)->max_size)[(entry_ptr)->type->id] \ - = (entry_ptr)->size; \ - if ( (entry_ptr)->pins > \ - ((cache_ptr)->max_pins)[(entry_ptr)->type->id] ) \ - ((cache_ptr)->max_pins)[(entry_ptr)->type->id] \ - = (entry_ptr)->pins; \ + (cache_ptr)->evictions[(entry_ptr)->type->id]++; \ + if ((entry_ptr)->accesses > (cache_ptr)->max_accesses[(entry_ptr)->type->id]) \ + (cache_ptr)->max_accesses[(entry_ptr)->type->id] = (entry_ptr)->accesses; \ + if ((entry_ptr)->accesses < (cache_ptr)->min_accesses[(entry_ptr)->type->id]) \ + (cache_ptr)->min_accesses[(entry_ptr)->type->id] = (entry_ptr)->accesses; \ + if ((entry_ptr)->clears > (cache_ptr)->max_clears[(entry_ptr)->type->id]) \ + (cache_ptr)->max_clears[(entry_ptr)->type->id] = (entry_ptr)->clears; \ + if ((entry_ptr)->flushes > (cache_ptr)->max_flushes[(entry_ptr)->type->id]) \ + (cache_ptr)->max_flushes[(entry_ptr)->type->id] = (entry_ptr)->flushes; \ + if ((entry_ptr)->size > (cache_ptr)->max_size[(entry_ptr)->type->id]) \ + (cache_ptr)->max_size[(entry_ptr)->type->id] = (entry_ptr)->size; \ + if ((entry_ptr)->pins > (cache_ptr)->max_pins[(entry_ptr)->type->id]) \ + (cache_ptr)->max_pins[(entry_ptr)->type->id] = (entry_ptr)->pins; \ } #define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \ { \ - (((cache_ptr)->insertions)[(entry_ptr)->type->id])++; \ - if ( (entry_ptr)->is_pinned ) { \ - (((cache_ptr)->pinned_insertions)[(entry_ptr)->type->id])++; \ - ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \ + (cache_ptr)->insertions[(entry_ptr)->type->id]++; \ + if ((entry_ptr)->is_pinned) { \ + (cache_ptr)->pinned_insertions[(entry_ptr)->type->id]++; \ + (cache_ptr)->pins[(entry_ptr)->type->id]++; \ (entry_ptr)->pins++; \ - if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \ + if ((cache_ptr)->pel_len > (cache_ptr)->max_pel_len) \ (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ - if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \ + if ((cache_ptr)->pel_size > (cache_ptr)->max_pel_size) \ (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \ } \ - if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \ + if ((cache_ptr)->index_len > (cache_ptr)->max_index_len) \ (cache_ptr)->max_index_len = (cache_ptr)->index_len; \ H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ - if ( (cache_ptr)->slist_len > (cache_ptr)->max_slist_len ) \ + if ((cache_ptr)->slist_len > (cache_ptr)->max_slist_len) \ (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \ - if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \ + if ((cache_ptr)->slist_size > (cache_ptr)->max_slist_size) \ (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \ - if ( (entry_ptr)->size > \ - ((cache_ptr)->max_size)[(entry_ptr)->type->id] ) \ - ((cache_ptr)->max_size)[(entry_ptr)->type->id] \ - = (entry_ptr)->size; \ - cache_ptr->entries_inserted_counter++; \ + if ((entry_ptr)->size > (cache_ptr)->max_size[(entry_ptr)->type->id]) \ + (cache_ptr)->max_size[(entry_ptr)->type->id] = (entry_ptr)->size; \ + (cache_ptr)->entries_inserted_counter++; \ } #define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \ { \ - if ( hit ) \ - ((cache_ptr)->hits)[(entry_ptr)->type->id]++; \ + if (hit) \ + (cache_ptr)->hits[(entry_ptr)->type->id]++; \ else \ - ((cache_ptr)->misses)[(entry_ptr)->type->id]++; \ - if ( ! ((entry_ptr)->is_read_only) ) { \ - ((cache_ptr)->write_protects)[(entry_ptr)->type->id]++; \ - } else { \ - ((cache_ptr)->read_protects)[(entry_ptr)->type->id]++; \ - if ( ((entry_ptr)->ro_ref_count) > \ - ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] ) \ - ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] = \ - ((entry_ptr)->ro_ref_count); \ + (cache_ptr)->misses[(entry_ptr)->type->id]++; \ + if (!(entry_ptr)->is_read_only) \ + (cache_ptr)->write_protects[(entry_ptr)->type->id]++; \ + else { \ + (cache_ptr)->read_protects[(entry_ptr)->type->id]++; \ + if ((entry_ptr)->ro_ref_count > (cache_ptr)->max_read_protects[(entry_ptr)->type->id]) \ + (cache_ptr)->max_read_protects[(entry_ptr)->type->id] = (entry_ptr)->ro_ref_count; \ } \ - if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \ + if ((cache_ptr)->index_len > (cache_ptr)->max_index_len) \ (cache_ptr)->max_index_len = (cache_ptr)->index_len; \ H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ - if ( (cache_ptr)->pl_len > (cache_ptr)->max_pl_len ) \ + if ((cache_ptr)->pl_len > (cache_ptr)->max_pl_len) \ (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \ - if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \ + if ((cache_ptr)->pl_size > (cache_ptr)->max_pl_size) \ (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \ - if ( (entry_ptr)->size > \ - ((cache_ptr)->max_size)[(entry_ptr)->type->id] ) \ - ((cache_ptr)->max_size)[(entry_ptr)->type->id] = (entry_ptr)->size; \ - ((entry_ptr)->accesses)++; \ + if ((entry_ptr)->size > (cache_ptr)->max_size[(entry_ptr)->type->id]) \ + (cache_ptr)->max_size[(entry_ptr)->type->id] = (entry_ptr)->size; \ + (entry_ptr)->accesses++; \ } #define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) \ { \ - ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \ + (cache_ptr)->pins[(entry_ptr)->type->id]++; \ (entry_ptr)->pins++; \ - if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \ + if ((cache_ptr)->pel_len > (cache_ptr)->max_pel_len) \ (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ - if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \ + if ((cache_ptr)->pel_size > (cache_ptr)->max_pel_size) \ (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \ } @@ -768,24 +741,24 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ #define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \ { \ - (((cache_ptr)->clears)[(entry_ptr)->type->id])++; \ + (cache_ptr)->clears[(entry_ptr)->type->id]++; \ if((entry_ptr)->is_pinned) \ - (((cache_ptr)->pinned_clears)[(entry_ptr)->type->id])++; \ + (cache_ptr)->pinned_clears[(entry_ptr)->type->id]++; \ } #define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \ { \ - (((cache_ptr)->flushes)[(entry_ptr)->type->id])++; \ - if ( (entry_ptr)->is_pinned ) \ - (((cache_ptr)->pinned_flushes)[(entry_ptr)->type->id])++; \ + (cache_ptr)->flushes[(entry_ptr)->type->id]++; \ + if ((entry_ptr)->is_pinned) \ + (cache_ptr)->pinned_flushes[(entry_ptr)->type->id]++; \ } #define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership) \ { \ - if ( take_ownership ) \ - (((cache_ptr)->take_ownerships)[(entry_ptr)->type->id])++; \ + if (take_ownership) \ + (cache_ptr)->take_ownerships[(entry_ptr)->type->id]++; \ else \ - (((cache_ptr)->evictions)[(entry_ptr)->type->id])++; \ + (cache_ptr)->evictions[(entry_ptr)->type->id]++; \ } #define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \ @@ -806,7 +779,7 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \ if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \ (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \ - cache_ptr->entries_inserted_counter++; \ + (cache_ptr)->entries_inserted_counter++; \ } #define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \ @@ -855,7 +828,7 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ #define H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) #define H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, success, depth) #define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) -#define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) +#define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) {} #define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) #define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership) #define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) @@ -1238,16 +1211,16 @@ if ( ( (cache_ptr)->index_size != \ ((cache_ptr)->index)[k] = (entry_ptr); \ (cache_ptr)->index_len++; \ (cache_ptr)->index_size += (entry_ptr)->size; \ - ((cache_ptr)->index_ring_len[entry_ptr->ring])++; \ - ((cache_ptr)->index_ring_size[entry_ptr->ring]) \ + ((cache_ptr)->index_ring_len[(entry_ptr)->ring])++; \ + ((cache_ptr)->index_ring_size[(entry_ptr)->ring]) \ += (entry_ptr)->size; \ if((entry_ptr)->is_dirty) { \ (cache_ptr)->dirty_index_size += (entry_ptr)->size; \ - ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring]) \ + ((cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) \ += (entry_ptr)->size; \ } else { \ (cache_ptr)->clean_index_size += (entry_ptr)->size; \ - ((cache_ptr)->clean_index_ring_size[entry_ptr->ring]) \ + ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring]) \ += (entry_ptr)->size; \ } \ if((entry_ptr)->flush_me_last) { \ @@ -1276,16 +1249,16 @@ if ( ( (cache_ptr)->index_size != \ (entry_ptr)->ht_prev = NULL; \ (cache_ptr)->index_len--; \ (cache_ptr)->index_size -= (entry_ptr)->size; \ - ((cache_ptr)->index_ring_len[entry_ptr->ring])--; \ - ((cache_ptr)->index_ring_size[entry_ptr->ring]) \ + ((cache_ptr)->index_ring_len[(entry_ptr)->ring])--; \ + ((cache_ptr)->index_ring_size[(entry_ptr)->ring]) \ -= (entry_ptr)->size; \ if((entry_ptr)->is_dirty) { \ (cache_ptr)->dirty_index_size -= (entry_ptr)->size; \ - ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring]) \ + ((cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) \ -= (entry_ptr)->size; \ } else { \ (cache_ptr)->clean_index_size -= (entry_ptr)->size; \ - ((cache_ptr)->clean_index_ring_size[entry_ptr->ring]) \ + ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring]) \ -= (entry_ptr)->size; \ } \ if((entry_ptr)->flush_me_last) { \ @@ -1358,10 +1331,10 @@ if ( ( (cache_ptr)->index_size != \ { \ H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr); \ (cache_ptr)->dirty_index_size -= (entry_ptr)->size; \ - ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring]) \ + ((cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) \ -= (entry_ptr)->size; \ (cache_ptr)->clean_index_size += (entry_ptr)->size; \ - ((cache_ptr)->clean_index_ring_size[entry_ptr->ring]) \ + ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring]) \ += (entry_ptr)->size; \ H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr); \ } @@ -1370,10 +1343,10 @@ if ( ( (cache_ptr)->index_size != \ { \ H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr); \ (cache_ptr)->clean_index_size -= (entry_ptr)->size; \ - ((cache_ptr)->clean_index_ring_size[entry_ptr->ring]) \ + ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring]) \ -= (entry_ptr)->size; \ (cache_ptr)->dirty_index_size += (entry_ptr)->size; \ - ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring]) \ + ((cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) \ += (entry_ptr)->size; \ H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr); \ } @@ -1385,21 +1358,21 @@ if ( ( (cache_ptr)->index_size != \ entry_ptr, was_clean) \ (cache_ptr)->index_size -= (old_size); \ (cache_ptr)->index_size += (new_size); \ - ((cache_ptr)->index_ring_size[entry_ptr->ring]) -= (old_size); \ - ((cache_ptr)->index_ring_size[entry_ptr->ring]) += (new_size); \ + ((cache_ptr)->index_ring_size[(entry_ptr)->ring]) -= (old_size); \ + ((cache_ptr)->index_ring_size[(entry_ptr)->ring]) += (new_size); \ if(was_clean) { \ (cache_ptr)->clean_index_size -= (old_size); \ - ((cache_ptr)->clean_index_ring_size[entry_ptr->ring])-= (old_size); \ + ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring])-= (old_size); \ } else { \ (cache_ptr)->dirty_index_size -= (old_size); \ - ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring])-= (old_size); \ + ((cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring])-= (old_size); \ } \ if((entry_ptr)->is_dirty) { \ (cache_ptr)->dirty_index_size += (new_size); \ - ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring])+= (new_size); \ + ((cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring])+= (new_size); \ } else { \ (cache_ptr)->clean_index_size += (new_size); \ - ((cache_ptr)->clean_index_ring_size[entry_ptr->ring])+= (new_size); \ + ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring])+= (new_size); \ } \ H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->il_len, \ (cache_ptr)->il_size, \ @@ -1595,7 +1568,7 @@ if ( ( (cache_ptr)->index_size != \ HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \ (cache_ptr)->slist_size -= (entry_ptr)->size; \ ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])--; \ - HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] >= \ + HDassert( (cache_ptr)->slist_ring_size[((entry_ptr)->ring)] >= \ (entry_ptr)->size ); \ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (entry_ptr)->size;\ (cache_ptr)->slist_len_increase--; \ @@ -1642,7 +1615,7 @@ if ( ( (cache_ptr)->index_size != \ HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \ (cache_ptr)->slist_size -= (entry_ptr)->size; \ ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])--; \ - HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] >= \ + HDassert( (cache_ptr)->slist_ring_size[((entry_ptr)->ring)] >= \ (entry_ptr)->size ); \ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (entry_ptr)->size;\ (entry_ptr)->in_slist = FALSE; \ @@ -1696,7 +1669,7 @@ if ( ( (cache_ptr)->index_size != \ (cache_ptr)->slist_size -= (old_size); \ (cache_ptr)->slist_size += (new_size); \ \ - HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] \ + HDassert( (cache_ptr)->slist_ring_size[((entry_ptr)->ring)] \ >= (old_size) ); \ \ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (old_size); \ @@ -1741,7 +1714,7 @@ if ( ( (cache_ptr)->index_size != \ (cache_ptr)->slist_size -= (old_size); \ (cache_ptr)->slist_size += (new_size); \ \ - HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] >= \ + HDassert( (cache_ptr)->slist_ring_size[((entry_ptr)->ring)] >= \ (old_size) ); \ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (old_size); \ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (new_size); \ @@ -2153,7 +2126,7 @@ if ( ( (cache_ptr)->index_size != \ * appropriate. \ */ \ \ - if ( entry_ptr->is_dirty ) { \ + if ( (entry_ptr)->is_dirty ) { \ H5C__AUX_DLL_APPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ (cache_ptr)->dLRU_tail_ptr, \ (cache_ptr)->dLRU_list_len, \ @@ -2259,7 +2232,7 @@ if ( ( (cache_ptr)->index_size != \ * appropriate. \ */ \ \ - if ( entry_ptr->is_dirty ) { \ + if ( (entry_ptr)->is_dirty ) { \ H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ (cache_ptr)->dLRU_tail_ptr, \ (cache_ptr)->dLRU_list_len, \ @@ -2473,7 +2446,7 @@ if ( ( (cache_ptr)->index_size != \ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ HDassert( (entry_ptr)->size > 0 ); \ \ - if ( ! ( (entry_ptr)->is_pinned ) && ! ( (entry_ptr->is_protected ) ) ) {\ + if ( ! ( (entry_ptr)->is_pinned ) && ! ( ((entry_ptr)->is_protected ) ) ) {\ \ /* modified LRU specific code */ \ \ @@ -2550,7 +2523,7 @@ if ( ( (cache_ptr)->index_size != \ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ HDassert( (entry_ptr)->size > 0 ); \ \ - if ( ! ( (entry_ptr)->is_pinned ) && ! ( (entry_ptr->is_protected ) ) ) {\ + if ( ! ( (entry_ptr)->is_pinned ) && ! ( ((entry_ptr)->is_protected ) ) ) {\ \ /* modified LRU specific code */ \ \ @@ -3162,9 +3135,9 @@ if ( ( (entry_ptr) == NULL ) || \ #define H5C__MOVE_TO_TOP_IN_COLL_LIST(cache_ptr, entry_ptr, fail_val) \ { \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ + HDassert((cache_ptr)); \ + HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ + HDassert((entry_ptr)); \ \ /* Remove entry and insert at the head of the list. */ \ H5C__COLL_DLL_REMOVE((entry_ptr), (cache_ptr)->coll_head_ptr, \ From 5e29543aac4d40c4db2137a5c4b01e37c603ef19 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Tue, 11 Apr 2023 20:58:39 -0700 Subject: [PATCH 111/231] Use additional CPUs in GitHub actions (#2695) * Build: 3 CPUs * Tests: 2 CPUs For both Autotools and CMake --- .github/workflows/main.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 35c0d6430b7..ea63fd8f594 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -514,12 +514,12 @@ jobs: # - name: Autotools Build - run: make + run: make -j3 working-directory: ${{ runner.workspace }}/build if: matrix.generator == 'autogen' - name: CMake Build - run: cmake --build . --config ${{ matrix.build_mode.cmake }} + run: cmake --build . --parallel 3 --config ${{ matrix.build_mode.cmake }} working-directory: ${{ runner.workspace }}/build if: matrix.generator != 'autogen' @@ -528,12 +528,12 @@ jobs: # - name: Autotools Run Tests - run: make check + run: make check -j2 working-directory: ${{ runner.workspace }}/build if: (matrix.generator == 'autogen') && (matrix.run_tests) - name: CMake Run Tests - run: ctest --build . -C ${{ matrix.build_mode.cmake }} -V + run: ctest --build . --parallel 2 -C ${{ matrix.build_mode.cmake }} -V working-directory: ${{ runner.workspace }}/build # Skip Debug MSVC while we investigate H5L Java test timeouts if: (matrix.generator != 'autogen') && (matrix.run_tests) && ! ((matrix.name == 'Windows MSVC CMake') && (matrix.build_mode.cmake == 'Debug')) From 3ff32e29249d3c46c2336e28b75eac2121c856db Mon Sep 17 00:00:00 2001 From: vchoi-hdfgroup <55293060+vchoi-hdfgroup@users.noreply.github.com> Date: Tue, 11 Apr 2023 23:06:29 -0500 Subject: [PATCH 112/231] Fix for github issue #2599: (#2665) * Fix for github issue #2599: As indicated in the description, memory leak is detected when running "./h5dump pov". The problem is: when calling H5O__add_cont_msg() from H5O__chunk_deserialize(), memory is allocated for cont_msg_info->msgs. Eventually, when the library tries to load the continuation message via H5AC_protect() in H5O_protect(), error is encountered due to illegal info in the continuation message. Due to the error, H5O_protect() exits but the memory allocated for cont_msg_info->msgs is not freed. When we figure out how to handle fuzzed files that we didn't generate, a test needs to be added to run h5dump with the provided "pov" file. * Add message to release notes for the fix to github issue #2599. --- release_docs/RELEASE.txt | 15 +++++++++++++++ src/H5Oint.c | 7 ++++++- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index c04ead56580..0fde1ab7099 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -151,6 +151,21 @@ Bug Fixes since HDF5-1.13.3 release =================================== Library ------- + + - Memory leak + + Memory leak was detected when running h5dump with "pov". The memory was allocated + via H5FL__malloc() in hdf5/src/H5FL.c + + The fuzzed file "pov" was an HDF5 file containing an illegal continuation message. + When deserializing the object header chunks for the file, memory is allocated for the + array of continuation messages (cont_msg_info->msgs) in continuation message info struct. + As error is encountered in loading the illegal message, the memory allocated for + cont_msg_info->msgs needs to be freed. + + (VC - 2023/04/11 GH-2599) + + - Fixed memory leaks that could occur when reading a dataset from a malformed file diff --git a/src/H5Oint.c b/src/H5Oint.c index cdcf6c82157..cad27e30029 100644 --- a/src/H5Oint.c +++ b/src/H5Oint.c @@ -1166,9 +1166,14 @@ H5O_protect(const H5O_loc_t *loc, unsigned prot_flags, hbool_t pin_all_chunks) ret_value = oh; done: - if (ret_value == NULL && oh) + if (ret_value == NULL && oh) { + /* Release any continuation messages built up */ + if (cont_msg_info.msgs) + cont_msg_info.msgs = (H5O_cont_t *)H5FL_SEQ_FREE(H5O_cont_t, cont_msg_info.msgs); + if (H5O_unprotect(loc, oh, H5AC__NO_FLAGS_SET) < 0) HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, NULL, "unable to release object header") + } FUNC_LEAVE_NOAPI_TAG(ret_value) } /* end H5O_protect() */ From e1a407c1e7a74c93a28dc42d83b3fe709548a4fb Mon Sep 17 00:00:00 2001 From: bmribler <39579120+bmribler@users.noreply.github.com> Date: Wed, 12 Apr 2023 00:08:27 -0400 Subject: [PATCH 113/231] Added tables of CVE issues (#2685) --- doc/CVEissues-multi-tables.md | 103 ++++++++++++++++++++++++++++++++++ doc/CVEissues-table.md | 71 +++++++++++++++++++++++ 2 files changed, 174 insertions(+) create mode 100644 doc/CVEissues-multi-tables.md create mode 100644 doc/CVEissues-table.md diff --git a/doc/CVEissues-multi-tables.md b/doc/CVEissues-multi-tables.md new file mode 100644 index 00000000000..864c3d2895a --- /dev/null +++ b/doc/CVEissues-multi-tables.md @@ -0,0 +1,103 @@ +\*\*\* ***This is a work-in-progress*** \*\*\* + +| CVE_issue_number | JIRA__number | Summary | Affected version | 1.10.9 | 1.14.0 | CVE file | Reproducing_command | +| :------------------------------------------------------------------------- | :----------- | :------------------------------------------------------------------- | :----------------| :----- | :----- | :--------------- | :------- | +| [GitHub Vul 11](https://github.com/magicSwordsMan/PAAFS/tree/master/vul11) | HDFFV-10722 | Invalid write in H5O_mtime_encode() | 1.10.4 | only failed | only failed | H5O_mtime_encode_invalid-write-memory-access | ./h5repack $file1 $file2 | +| [GitHub Vul 10](https://github.com/magicSwordsMan/PAAFS/tree/master/vul10) | HDFFV-10721 | Invalid read in H5S_close() | 1.10.4 | only failed | only failed | H5S_close_invalid-read-memory-access | ./h5repack $file1 $file2 | +| [GitHub Vul 9](https://github.com/magicSwordsMan/PAAFS/tree/master/vul9) | HDFFV-10720 | Invalid write in H5F_addr_encode_len() | 1.10.4 | only failed | only failed | H5F_addr_encode_len_invalid-write-memory-access | ./h5repack $file1 $file2 | +| [CVE-2022-26061](https://nvd.nist.gov/vuln/detail/CVE-2022-26061) | SUPPORT-1923 | A heap-based buffer overflow vulnerability exists in the gif2h5 | 1.10.4 | gif2h5 | gif2h5 | missing cve file | | +| [CVE-2022-25972](https://nvd.nist.gov/vuln/detail/CVE-2022-25972) | SUPPORT-1923 | An out-of-bounds write vulnerability exists in the gif2h5 | 1.10.4 | gif2h5 | gif2h5 | missing cve file | | +| [CVE-2022-25942](https://nvd.nist.gov/vuln/detail/CVE-2022-25942) | SUPPORT-1923 | An out-of-bounds read vulnerability exists in the gif2h5 | 1.10.4 | gif2h5 | gif2h5 | missing cve file | | + +| CVE_issue_number | JIRA_number | Summary | Affected version | 1.10.9 | 1.14.0 | CVE file | Reproducing_command | +| :------------------------------------------------------------------------- | :----------- | :------------------------------------------------------------------- | :----------------| :----- | :----- | :--------------- | :------- | +| [CVE-2021-46244](https://nvd.nist.gov/vuln/detail/CVE-2021-46244) | [github #1327](https://github.com/HDFGroup/hdf5/issues/1327) | Divide By Zero in H5T__complete_copy () | 1.13.1-1 |Floating exception (core dumped)|failed with unable to open dataset | POC-GH1327 | | +| [CVE-2021-46243](https://nvd.nist.gov/vuln/detail/CVE-2021-46243) | [github #1326](https://github.com/HDFGroup/hdf5/issues/1326) | An untrusted pointer dereference in H5O__dtype_decode_helper () | 1.13.1-1 |Segmentation fault (core dumped)| Dataset *ERROR* | POC-GH1326 | | +| [CVE-2021-46242](https://nvd.nist.gov/vuln/detail/CVE-2021-46242) | [github #1329](https://github.com/HDFGroup/hdf5/issues/1329) | Heap-use-after free via the component H5AC_unpin_entry() | 1.13.1-1 |Segmentation fault (core dumped)| error: unable to open file | POC1-GH1329 | | +| [CVE-2021-45833](https://nvd.nist.gov/vuln/detail/CVE-2021-45833) | [github #1313](https://github.com/HDFGroup/hdf5/issues/1313) | Stack buffer overflow in H5D__create_chunk_file_map_hyper() | 1.13.1-1 |error: unable to open file|error: unable to open file| POC3-GH1313 | | +| [CVE-2021-45832](https://nvd.nist.gov/vuln/detail/CVE-2021-45832) | [github #1315](https://github.com/HDFGroup/hdf5/issues/1315) | Stack overflow in H5I_inc_ref() | 1.13.1-1 |OPEN|OPEN|wrong file provided, POC6-GH1315.md | | +| [CVE-2021-45830](https://nvd.nist.gov/vuln/detail/CVE-2021-45830) | [github #1314](https://github.com/HDFGroup/hdf5/issues/1314) | Heap Buffer Overflow in H5F_addr_decode_len() | 1.13.1-1 | error: unable to open file | error: unable to open file | POC5-GH1314 | | + +| CVE_issue_number | JIRA_issue_number | Summary | Affected version | 1.10.9 | 1.14.0 | CVE file | Reproducing_command | +| :------------------------------------------------------------------------- | :----------- | :------------------------------------------------------------------- | :----------------| :----- | :----- | :--------------- | :------- | +| CVE-2021-45829 | [github #1317](https://github.com/HDFGroup/hdf5/issues/1317) | Segmentation fault via h5stat | 1.13.1-1 |error: unable to traverse|error: unable to traverse|POC8-GH1317| | +| [CVE-2020-10812](https://nvd.nist.gov/vuln/detail/CVE-2020-10812) | [HDFFV-11052](https://jira.hdfgroup.org/browse/HDFFV-11052) | NULL pointer dereference exists in the function H5F_get_nrefs() | 1.12.0 |Segfault|no SIGSEGV |h5_nrefs_POC|| +| [CVE-2020-10811](https://nvd.nist.gov/vuln/detail/CVE-2020-10811) | [HDFFV-11049](https://jira.hdfgroup.org/browse/HDFFV-11049) | Heap-based buffer over-read exists in the function H5O__layout_decode() | 1.12.0 | fixed | fixed | h5dump_H5O__layout_decode_POC |./h5dump -r -d BAG_root/metadata | +| [CVE-2020-10810](https://nvd.nist.gov/vuln/detail/CVE-2020-10810) | [HDFFV-11053](https://jira.hdfgroup.org/browse/HDFFV-11053) | NULL pointer dereference exists in the function H5AC_unpin_entry()  | 1.13.0 |no SIGSEGV|no SIGSEGV| H5AC_unpin_entry_POC | ./h5clear -s -m | +| [CVE-2020-10809](https://nvd.nist.gov/vuln/detail/CVE-2020-10809) | [HDFFV-11048](https://jira.hdfgroup.org/browse/HDFFV-11048) | Heap-based buffer overflow exists in Decompress() via gif2h5 | 1.12.0 | gif2h5 | gif2h5 | gif2h5_Decompress_POC | gif2h5 | +| [CVE-2019-9152](https://github.com/magicSwordsMan/PAAFS/tree/master/vul8) | [HDFFV-10719](https://jira.hdfgroup.org/browse/HDFFV-10719) | Invalid read in H5MM_xstrdup() | 1.10.4 || OPEN | H5MM_xstrdup_invalid-read-memory-access | | +| [CVE-2019-9151](https://github.com/magicSwordsMan/PAAFS/tree/master/vul7) | [HDFFV-10718](https://jira.hdfgroup.org/browse/HDFFV-10718) | Invalid read in H5VM_memcpyvv() | 1.10.4 | fixed | fixed | H5VM_memcpyvv_invalid-read-memory-access | h5repack file1 file2 | + +| CVE_issue_number | JIRA_issue_number | Summary | Affected version | 1.10.9 | 1.14.0 | CVE file/Comment | Comments | +| :------------------------------------------------------------------------- | :----------- | :------------------------------------------------------------------- | :----------------| :----- | :----- | :--------------- | :------- | +| [CVE-2019-8398](https://nvd.nist.gov/vuln/detail/CVE-2019-8398) | [HDFFV-10710](https://jira.hdfgroup.org/browse/HDFFV-10710) | Invalid read H5T_get_size | missing ver | fixed | fixed | H5T_get_size_invalid-read-memory-access | h5repack file1 file2 | +| [CVE-2019-8397](https://nvd.nist.gov/vuln/detail/CVE-2019-8397) | [HDFFV-10711](https://jira.hdfgroup.org/browse/HDFFV-10711) | Out of bounds read in the function H5T_close_real()  | 1.10.4 | 0 errors | 0 errors | H5T_close_real_invalid-read-memory-access | h5repack file1 file2 | +| [CVE-2019-8396](https://nvd.nist.gov/vuln/detail/CVE-2019-8396) | [HDFFV-10712](https://jira.hdfgroup.org/browse/HDFFV-10712) | Invalid read in H5O__pline_decode | 1.10.4 | 16 errors from 8 contexts | 0 errors | H5O__pline_decode_invalid-read-memory-access | | +| [CVE-2018-17439](https://nvd.nist.gov/vuln/detail/CVE-2018-17439) | [HDFFV-10589](https://jira.hdfgroup.org/browse/HDFFV-10589) | Stack-based buffer overflow in H5S_extent_get_dims() via h52gif | 1.10.3 | gif2h5 | gif2h5 | stackoverflow_H5S_extent_get_dims_H5S | gif2h5 | +| [CVE-2018-17438](https://nvd.nist.gov/vuln/detail/CVE-2018-17438) | [HDFFV-10587](https://jira.hdfgroup.org/browse/HDFFV-10587) | Incorrect protection against division by zero in H5D__select_io() | 1.8.20, 1.10.3 | gif2h5 | 1.12.0 | SIGFPE_H5D__select_io_H5Dselect | gif2h5 | +| [CVE-2018-17437](https://nvd.nist.gov/vuln/detail/CVE-2018-17437) | [HDFFV-10588](https://jira.hdfgroup.org/browse/HDFFV-10588) | Memory leak in H5O_dtype_decode_helper | 1.10.3 | fixed 1.10.5 | check 1.14.0 | memleak_H5O_dtype_decode_helper_H5Odtype | h52gif | + +| CVE_issue_number | JIRA_issue_number | Summary | Affected version | 1.10.9 | 1.14.0 | CVE file | Reproducing_command | +| :------------------------------------------------------------------------- | :----------- | :------------------------------------------------------------------- | :----------------| :----- | :----- | :--------------- | :------- | +| [CVE-2018-17436](https://nvd.nist.gov/vuln/detail/CVE-2018-17436) | [HDFFV-10593](https://jira.hdfgroup.org/browse/HDFFV-10593) | Invalid write memory access in decompress.c | 1.8.20, 1.10.3 | gif2h5 | gif2h5 | ReadCode_decompress_memoryAccess | gif2h5 | +| [CVE-2018-17435](https://nvd.nist.gov/vuln/detail/CVE-2018-17435) | [HDFFV-10591](https://jira.hdfgroup.org/browse/HDFFV-10591) | Heap-buffer-overflow was discovered in H5O_attr_decode() | 1.10.3 | fixed | fixed | Heap_Overflow_H5O_attr_decode | h52gif file image1.gif -i image | +| [CVE-2018-17434](https://nvd.nist.gov/vuln/detail/CVE-2018-17434) | [HDFFV-10586](https://jira.hdfgroup.org/browse/HDFFV-10586) | Divide by zero in h5repack_filters() | 1.10.3 | only fails | only fails | SIGFPE_apply_filters_h5repack_filters | h5repack -f GZIP=8 -l dset1:CHUNK=5x6 file| +| [CVE-2018-17433](https://nvd.nist.gov/vuln/detail/CVE-2018-17433) | [HDFFV-10592](https://jira.hdfgroup.org/browse/HDFFV-10592) | Heap overflow in ReadGifImageDesc() | 1.10.3 | gif2h5 | gif2h5 | HeapOverflow_ReadGifImageDesc | gif2h5 | +| [CVE-2018-17432](https://nvd.nist.gov/vuln/detail/CVE-2018-17432) | [HDFFV-10590](https://jira.hdfgroup.org/browse/HDFFV-10590) | NULL pointer dereference was discovered in H5O_sdspace_encode() | 1.10.3 | fixed | fixed | Null_Pointer_H5O_sdspace_encode | h5repack file1 file2 | +| [CVE-2018-17237](https://nvd.nist.gov/vuln/detail/CVE-2018-17237) | [HDFFV-10571](https://jira.hdfgroup.org/browse/HDFFV-10571) | Divide by zero in H5D__chunk_set_info_real() | 1.10.3 | fixed | fixed | H5D__chunk_set_info_real_div_by_zero | h5dump | + +| CVE_issue_number | JIRA_issue_number | Summary | Affected version | 1.10.9 | 1.14.0 | CVE file/Comment | Comments | +| :------------------------------------------------------------------------- | :----------- | :------------------------------------------------------------------- | :----------------| :----- | :----- | :--------------- | :------- | +| [CVE-2018-17234](https://nvd.nist.gov/vuln/detail/CVE-2018-17234) | [HDFFV-10578](https://jira.hdfgroup.org/browse/HDFFV-10578) | Memory Leaks in "H5O__chunk_deserialize()" | 1.10.3 | fixed | fixed | H5O__chunk_deserialize_memory_leak | h5dump | +| [CVE-2018-17233](https://nvd.nist.gov/vuln/detail/CVE-2018-17233) | [HDFFV-10577](https://jira.hdfgroup.org/browse/HDFFV-10577) | Divided by zero in "H5D__create_chunk_file_map_hyper()" | 1.10.3 | fixed | fixed | H5D__create_chunk_file_map_hyper_div_zero | | +| [CVE-2018-16438](https://nvd.nist.gov/vuln/detail/CVE-2018-16438) | no JIRA# | Invalid read in H5L_extern_query | 1.8.20 | Segmentation fault (core dumped) | only failed | H5L_extern_query@H5Lexternal.c_498-10___out-of-bounds-read | h5dump | +| [CVE-2018-15672](https://nvd.nist.gov/vuln/detail/CVE-20) | [HDFFV-10556](https://jira.hdfgroup.org/browse/HDFFV-) | Division by zero in H5D__chunk_init() | 1.10.2 | fixed | fixed | SIGPFE_Crash | h5dump | +| [CVE-2018-15671](https://github.com/SegfaultMasters/covering360/tree/master/HDF5#stack-overflow---stackoverflow_h5p__get_cb) | [HDFFV-10557](https://jira.hdfgroup.org/browse/HDFFV-10557) |Excessive stack consumption H5P__get_cb() | 1.10.2 | Infinite loop | Infinite loop | stackoverflow_H5P__get_cb | h5dump | +| [CVE-2018-14460](https://nvd.nist.gov/vuln/detail/CVE-2018-14460) | [HDFFV-11223](https://jira.hdfgroup.org/browse/HDFFV-11223) | Buffer over-read in the function H5O_sdspace_decode() | 1.8.20 | fixed | fixed | H5O_sdspace_decode-heap-buffer-overflow | h5dump | +| [CVE-2018-14035](https://nvd.nist.gov/vuln/detail/CVE-2018-14035) | no JIRA# | Buffer over-read in the function H5VM_memcpyvv() | 1.8.20 | only failed | only failed | H5VM_memcpyvv-heap-buffer-overflow | h5dump | + +| CVE_issue_number | JIRA_issue_number | Summary | Affected version | 1.10.9 | 1.14.0 | CVE file | Reproducing_command | +| :------------------------------------------------------------------------- | :----------- | :------------------------------------------------------------------- | :----------------| :----- | :----- | :--------------- | :------- | +| [CVE-2018-14034](https://nvd.nist.gov/vuln/detail/CVE-2018-14034) | no JIRA# | Out-of-bounds read in the function H5O_pline_reset() | 1.8.20 || missing fixed | missing cve file | | +| [CVE-2018-14033](https://nvd.nist.gov/vuln/detail/CVE-2018-14033) | [HDFFV-11159](https://jira.hdfgroup.org/browse/HDFFV-) | Buffer over-read in the function H5O_layout_decode() | 1.8.20 || missing fixed | dev: PR#405 (with test) | | +| [CVE-2018-14032](https://nvd.nist.gov/vuln/detail/CVE-2018-14032) | no JIRA# | Duplicate of CVE-2018-11206 | missing ver || missing fixed | missing cve file | | +| [CVE-2018-14031](https://nvd.nist.gov/vuln/detail/CVE-2018-14031) | no JIRA# | Buffer over-read in the function H5T_copy | 1.8.20 || missing fixed | missing cve file | | +| [CVE-2018-13876](https://nvd.nist.gov/vuln/detail/CVE-2018-13876) | no JIRA# | Buffer overflow in the function H5FD_sec2_read | 1.8.20 || missing fixed | Related to H5Dread | | +| [CVE-2018-13875](https://nvd.nist.gov/vuln/detail/CVE-2018-13875) | no JIRA# | Out-of-bounds read in the function H5VM_memcpyvv | 1.8.20 || missing fixed | missing cve file | | + +| CVE_issue_number | JIRA_issue_number | Summary | Affected version | 1.10.9 | 1.14.0 | CVE file | Reproducing_command | +| :------------------------------------------------------------------------- | :----------- | :------------------------------------------------------------------- | :----------------| :----- | :----- | :--------------- | :------- | +| [CVE-2018-13874](https://nvd.nist.gov/vuln/detail/CVE-2018-13874) | no JIRA# | Buffer overflow in the function H5FD_sec2_read | 1.8.20 || missing fixed | Related to Hdmemset | | +| [CVE-2018-13873](https://nvd.nist.gov/vuln/detail/CVE-2018-13873) | [HDFFV-10676](https://jira.hdfgroup.org/browse/HDFFV-10676) |Buffer over-read in H5O_chunk_deserialize | 1.8.20 | only fails | only fails | H5O_chunk_deserialize-global-buffer-overflow | h5dump | +| [CVE-2018-13872](https://nvd.nist.gov/vuln/detail/CVE-2018-13872) | no JIRA# | Buffer overflow in the function H5G_ent_decode | 1.8.20 || missing fixed | missing cve file | | +| [CVE-2018-13871](https://nvd.nist.gov/vuln/detail/CVE-2018-13871) | no JIRA# | Buffer overflow in the function H5FL_blk_malloc | 1.8.20 || missing fixed | missing cve file | | +| [CVE-2018-13870](https://nvd.nist.gov/vuln/detail/CVE-2018-13870) | no JIRA# | Buffer over-read in the function H5O_link_decode | 1.8.20 || missing fixed | missing cve file | | +| [CVE-2018-13869](https://nvd.nist.gov/vuln/detail/CVE-2018-13869) | no JIRA# | Memcpy parameter overlap in the function H5O_link_decode | 1.8.20 || missing fixed | missing cve file | | + +| CVE_issue_number | JIRA_issue_number | Summary | Affected version | 1.10.9 | 1.14.0 | CVE file | Reproducing_command | +| :------------------------------------------------------------------------- | :----------- | :------------------------------------------------------------------- | :----------------| :----- | :----- | :--------------- | :------- | +| [CVE-2018-13868](https://nvd.nist.gov/vuln/detail/CVE-2018-13868) | [HDFFV-10548](https://jira.hdfgroup.org/browse/HDFFV-10548) | Buffer over-read in the function H5O_fill_old_decode | 1.8.20 | only fails | only fails | h5dump | +| [CVE-2018-13867](https://nvd.nist.gov/vuln/detail/CVE-2018-13867) | no JIRA# | Out-of-bounds read in the function H5F__accum_read | 1.8.20 | Segmentation fault (core dumped) | Assertion, Abort (core dumped) | H5F__accum_read-Out_Of_Bound_Read | | +| [CVE-2018-13866](https://nvd.nist.gov/vuln/detail/CVE-2018-13866) | no JIRA# | Buffer over-read in the function H5F_addr_decode_len | 1.8.20 || missing fixed | missing cve file | | +| [CVE-2018-11207](https://nvd.nist.gov/vuln/detail/CVE-2018-11207) | [HDFFV-10481](https://jira.hdfgroup.org/browse/HDFFV-10481) | A division by zero was discovered in H5D__chunk_init() | 1.10.2 | fixed | fixed | DivByZero__H5D_chunk_POC | h5stat -A -T -G -D -S file | +| [CVE-2018-11206](https://nvd.nist.gov/vuln/detail/CVE-2018-11206) | [HDFFV-10480](https://jira.hdfgroup.org/browse/HDFFV-10480) | Out-of-bounds read in H5O_fill_new_decode and H5O_fill_old_decode | 1.8.20 | fixed | fixed | H5O_fill\_[new/old]\_decode-heap-buffer-overflow | h5dump | +| [CVE-2018-11205](https://nvd.nist.gov/vuln/detail/CVE-2018-11205) | [HDFFV-10479](https://jira.hdfgroup.org/browse/HDFFV-10479) | Out-of-bounds in H5VM_memcpyvv | 1.10.2 | Segmentation fault (core dumped) | Assertion, Abort (core dumped) | H5VM_memcpyvv-H5VM.c_1626-oob_read | h5dump | + +| CVE_issue_number | JIRA_issue_number | Summary | Affected version | 1.10.9 | 1.14.0 | CVE file | Reproducing_command | +| :------------------------------------------------------------------------- | :----------- | :------------------------------------------------------------------- | :----------------| :----- | :----- | :--------------- | :------- | +| [CVE-2018-11204](https://nvd.nist.gov/vuln/detail/CVE-2018-11204) | [HDFFV-10478](https://jira.hdfgroup.org/browse/HDFFV-10478) | A NULL pointer dereference in H5O__chunk_deserialize | 1.10.2 | fixed | fixed | H5O__chunk_deserialize-H5Ocache.c_1566-null_point_dereference | h5dump | +| [CVE-2018-11203](https://nvd.nist.gov/vuln/detail/CVE-2018-11203) | [HDFFV-10477](https://jira.hdfgroup.org/browse/HDFFV-10477) | A division by zero was discovered in H5D__btree_decode_key | 1.10.2 | fixed | fixed | H5D__btree_decode_key-H5Dbtree.c_697-div_by_zero | h5dump | +| [CVE-2018-11202](https://nvd.nist.gov/vuln/detail/CVE-2018-11202) | [HDFFV-10476](https://jira.hdfgroup.org/browse/HDFFV-10476) | A NULL pointer dereference was discovered in H5S_hyper_make_spans | 1.10.2 | Infinite loop | Assertion, Abort (core dumped) | H5S_hyper_make_spans-H5Shyper.c_6139-null_pointer_dereference | h5dump | +| [CVE-2017-17509](https://nvd.nist.gov/vuln/detail/CVE-2017-17509) | [HDFFV-10358](https://jira.hdfgroup.org/browse/HDFFV-10358) | Out-of-bounds write vulnerability in H5G__ent_decode_vec | 1.10.1 | fixed | fixed | 5-hdf5-heap-overflow-H5G__ent_decode_vec | h5dump | +| [CVE-2017-17508](https://nvd.nist.gov/vuln/detail/CVE-2017-17508) | [HDFFV-10357](https://jira.hdfgroup.org/browse/HDFFV-10357) | Divide-by-zero in H5T_set_loc | 1.10.1 | fixed | fixed | 1-hdf5-divbyzero-H5T_set_loc | h5dump | +| [CVE-2017-17507](https://nvd.nist.gov/vuln/detail/CVE-2017-17507) | [HDFFV-10356](https://jira.hdfgroup.org/browse/HDFFV-10356) | Out-of-bounds read in H5T_conv_struct_opt | 1.10.1 | Dana: | Will not fix | 3-hdf5-outbound-read-H5T_conv_struct_opt | | + +| CVE_issue_number | JIRA_issue_number | Summary | Affected version | 1.10.9 | 1.14.0 | CVE file | Reproducing_command | +| :------------------------------------------------------------------------- | :----------- | :------------------------------------------------------------------- | :----------------| :----- | :----- | :--------------- | :------- | +| [CVE-2017-17506](https://nvd.nist.gov/vuln/detail/CVE-2017-17506) | [HDFFV-10355](https://jira.hdfgroup.org/browse/HDFFV-10355) | Out-of-bounds read vulnerability in H5Opline_pline_decode | 1.10.1 | fixed | fixed | 4-hdf5-outbound-read-H5Opline_pline_decode | h5dump | +| [CVE-2017-17505](https://nvd.nist.gov/vuln/detail/CVE-2017-17505) | [HDFFV-10354](https://jira.hdfgroup.org/browse/HDFFV-10354) | NULL pointer dereference in the function H5O_pline_decode | 1.10.1 | fixed | fixed | 2-hdf5-null-pointer-H5O_pline_reset.h5 | | +| [CVE-2016-4333](https://nvd.nist.gov/vuln/detail/CVE-2016-4333) | [HDFFV-9993](https://jira.hdfgroup.org/browse/HDFFV-9993)  | Out-of-bounds access in H5O_dtype_decode_helper | 1.8.16 | fixed in | 1.8.18, 1.10.1 | missing cve file | still need test: [HDFFV-10008](https://jira.hdfgroup.org/browse/HDFFV-10008) | +| [CVE-2016-4332](https://nvd.nist.gov/vuln/detail/CVE-2016-4332) | [HDFFV-9950](https://jira.hdfgroup.org/browse/HDFFV-9950) | Out-of-bounds write in H5O_msg_read_oh | 1.8.16 | fixed in | 1.8.18, 1.10.1 | missing cve file | still need test: [HDFFV-10008](https://jira.hdfgroup.org/browse/HDFFV-10008) | +| [CVE-2016-4331](https://nvd.nist.gov/vuln/detail/CVE-2016-4331) | [HDFFV-9951](https://jira.hdfgroup.org/browse/HDFFV-9951) | Out-of-bounds write in H5Z_nbit_decompress_one_atomic | 1.8.16 | fixed in | 1.8.18, 1.10.1 | missing cve file | still need test: [HDFFV-10008](https://jira.hdfgroup.org/browse/HDFFV-10008) | +| [CVE-2016-4330](https://nvd.nist.gov/vuln/detail/CVE-2016-4330) | [HDFFV-9992](https://jira.hdfgroup.org/browse/HDFFV-9952) | Out-of-bounds read in H5O_dtype_decode_helper | 1.8.16 | fixed in | 1.8.18, 1.10.1 | missing cve file | still need test: [HDFFV-10008](https://jira.hdfgroup.org/browse/HDFFV-10008) | + diff --git a/doc/CVEissues-table.md b/doc/CVEissues-table.md new file mode 100644 index 00000000000..336fa363fe3 --- /dev/null +++ b/doc/CVEissues-table.md @@ -0,0 +1,71 @@ +| CVE_issue_number | JIRA__number | Summary | Affected version | 1.10.9 | 1.14.0 | CVE file | Reproducing_command | +| :------------------------------------------------------------------------- | :----------- | :------------------------------------------------------------------- | :----------------| :----- | :----- | :--------------- | :------- | +| [GitHub Vul 11](https://github.com/magicSwordsMan/PAAFS/tree/master/vul11) | HDFFV-10722 | Invalid write in H5O_mtime_encode() | 1.10.4 | only failed | only failed | H5O_mtime_encode_invalid-write-memory-access | ./h5repack $file1 $file2 | +| [GitHub Vul 10](https://github.com/magicSwordsMan/PAAFS/tree/master/vul10) | HDFFV-10721 | Invalid read in H5S_close() | 1.10.4 | only failed | only failed | H5S_close_invalid-read-memory-access | ./h5repack $file1 $file2 | +| [GitHub Vul 9](https://github.com/magicSwordsMan/PAAFS/tree/master/vul9) | HDFFV-10720 | Invalid write in H5F_addr_encode_len() | 1.10.4 | only failed | only failed | H5F_addr_encode_len_invalid-write-memory-access | ./h5repack $file1 $file2 | +| [CVE-2022-26061](https://nvd.nist.gov/vuln/detail/CVE-2022-26061) | SUPPORT-1923 | A heap-based buffer overflow vulnerability exists in the gif2h5 | 1.10.4 | gif2h5 | gif2h5 | missing cve file | | +| [CVE-2022-25972](https://nvd.nist.gov/vuln/detail/CVE-2022-25972) | SUPPORT-1923 | An out-of-bounds write vulnerability exists in the gif2h5 | 1.10.4 | gif2h5 | gif2h5 | missing cve file | | +| [CVE-2022-25942](https://nvd.nist.gov/vuln/detail/CVE-2022-25942) | SUPPORT-1923 | An out-of-bounds read vulnerability exists in the gif2h5 | 1.10.4 | gif2h5 | gif2h5 | missing cve file | | +| CVE-2021-46244 | [github #1327](https://github.com/HDFGroup/hdf5/issues/1327) | Divide By Zero in H5T__complete_copy () | 1.13.1-1 |Floating exception (core dumped)|failed with unable to open dataset | POC-GH1327 | | +| CVE-2021-46243 | [github #1326](https://github.com/HDFGroup/hdf5/issues/1326) | An untrusted pointer dereference in H5O__dtype_decode_helper () | 1.13.1-1 |Segmentation fault (core dumped)| Dataset *ERROR* | POC-GH1326 | | +| CVE-2021-46242 | [github #1329](https://github.com/HDFGroup/hdf5/issues/1329) | Heap-use-after free via the component H5AC_unpin_entry() | 1.13.1-1 |Segmentation fault (core dumped)| error: unable to open file | POC1-GH1329 | | +| CVE-2021-45833 | [github #1313](https://github.com/HDFGroup/hdf5/issues/1313) | Stack buffer overflow in H5D__create_chunk_file_map_hyper() | 1.13.1-1 |error: unable to open file|error: unable to open file| POC3-GH1313 | | +| CVE-2021-45832 | [github #1315](https://github.com/HDFGroup/hdf5/issues/1315) | Stack overflow in H5I_inc_ref() | 1.13.1-1 |OPEN|OPEN|wrong file provided, POC6-GH1315.md | | +| CVE-2021-45830 | [github #1314](https://github.com/HDFGroup/hdf5/issues/1314) | Heap Buffer Overflow in H5F_addr_decode_len() | 1.13.1-1 |error: unable to open file|error: unable to open file| POC5-GH1314 | | +| CVE-2021-45829 | [github #1317](https://github.com/HDFGroup/hdf5/issues/1317) | Segmentation fault via h5stat | 1.13.1-1 |error: unable to traverse|error: unable to traverse|POC8-GH1317| | +| [CVE-2020-10812](https://nvd.nist.gov/vuln/detail/CVE-2020-10812) | [HDFFV-11052](https://jira.hdfgroup.org/browse/HDFFV-11052) | NULL pointer dereference exists in the function H5F_get_nrefs() | 1.12.0 |Segfault|no SIGSEGV |h5_nrefs_POC|| +| [CVE-2020-10811](https://nvd.nist.gov/vuln/detail/CVE-2020-10811) | [HDFFV-11049](https://jira.hdfgroup.org/browse/HDFFV-11049) | Heap-based buffer over-read exists in the function H5O__layout_decode() | 1.12.0 | fixed | fixed | h5dump_H5O__layout_decode_POC |./h5dump -r -d BAG_root/metadata | +| [CVE-2020-10810](https://nvd.nist.gov/vuln/detail/CVE-2020-10810) | [HDFFV-11053](https://jira.hdfgroup.org/browse/HDFFV-11053) | NULL pointer dereference exists in the function H5AC_unpin_entry()  | 1.13.0 |no SIGSEGV|no SIGSEGV| H5AC_unpin_entry_POC | ./h5clear -s -m | +| [CVE-2020-10809](https://nvd.nist.gov/vuln/detail/CVE-2020-10809) | [HDFFV-11048](https://jira.hdfgroup.org/browse/HDFFV-11048) | Heap-based buffer overflow exists in Decompress() via gif2h5 | 1.12.0 | gif2h5 | gif2h5 | gif2h5_Decompress_POC | gif2h5 | +| [CVE-2019-9152](https://github.com/magicSwordsMan/PAAFS/tree/master/vul8) | [HDFFV-10719](https://jira.hdfgroup.org/browse/HDFFV-10719) | Invalid read in H5MM_xstrdup() | 1.10.4 || OPEN | H5MM_xstrdup_invalid-read-memory-access | | +| [CVE-2019-9151](https://github.com/magicSwordsMan/PAAFS/tree/master/vul7) | [HDFFV-10718](https://jira.hdfgroup.org/browse/HDFFV-10718) | Invalid read in H5VM_memcpyvv() | 1.10.4 | fixed | fixed | H5VM_memcpyvv_invalid-read-memory-access | h5repack file1 file2 | +| [CVE-2019-8398](https://nvd.nist.gov/vuln/detail/CVE-2019-8398) | [HDFFV-10710](https://jira.hdfgroup.org/browse/HDFFV-10710) | Invalid read H5T_get_size | missing ver | fixed | fixed | H5T_get_size_invalid-read-memory-access | h5repack file1 file2 | +| [CVE-2019-8397](https://nvd.nist.gov/vuln/detail/CVE-2019-8397) | [HDFFV-10711](https://jira.hdfgroup.org/browse/HDFFV-10711) | Out of bounds read in the function H5T_close_real()  | 1.10.4 | 0 errors | 0 errors | H5T_close_real_invalid-read-memory-access | h5repack file1 file2 | +| [CVE-2019-8396](https://nvd.nist.gov/vuln/detail/CVE-2019-8396) | [HDFFV-10712](https://jira.hdfgroup.org/browse/HDFFV-10712) | Invalid read in H5O__pline_decode | 1.10.4 | 16 errors from 8 contexts | 0 errors | H5O__pline_decode_invalid-read-memory-access | | +| [CVE-2018-17439](https://nvd.nist.gov/vuln/detail/CVE-2018-17439) | [HDFFV-10589](https://jira.hdfgroup.org/browse/HDFFV-10589) | Stack-based buffer overflow in H5S_extent_get_dims() via h52gif | 1.10.3 | gif2h5 | gif2h5 | stackoverflow_H5S_extent_get_dims_H5S | gif2h5 | +| [CVE-2018-17438](https://nvd.nist.gov/vuln/detail/CVE-2018-17438) | [HDFFV-10587](https://jira.hdfgroup.org/browse/HDFFV-10587) | Incorrect protection against division by zero in H5D__select_io() | 1.8.20, 1.10.3 | gif2h5 | 1.12.0 | SIGFPE_H5D__select_io_H5Dselect | gif2h5 | +| [CVE-2018-17437](https://nvd.nist.gov/vuln/detail/CVE-2018-17437) | [HDFFV-10588](https://jira.hdfgroup.org/browse/HDFFV-10588) | Memory leak in H5O_dtype_decode_helper | 1.10.3 | fixed 1.10.5 | check 1.14.0 | memleak_H5O_dtype_decode_helper_H5Odtype | h52gif | +| [CVE-2018-17436](https://nvd.nist.gov/vuln/detail/CVE-2018-17436) | [HDFFV-10593](https://jira.hdfgroup.org/browse/HDFFV-10593) | Invalid write memory access in decompress.c | 1.8.20, 1.10.3 | gif2h5 | gif2h5 | ReadCode_decompress_memoryAccess | gif2h5 | +| [CVE-2018-17435](https://nvd.nist.gov/vuln/detail/CVE-2018-17435) | [HDFFV-10591](https://jira.hdfgroup.org/browse/HDFFV-10591) | Heap-buffer-overflow was discovered in H5O_attr_decode() | 1.10.3 | fixed | fixed | Heap_Overflow_H5O_attr_decode | h52gif file image1.gif -i image | +| [CVE-2018-17434](https://nvd.nist.gov/vuln/detail/CVE-2018-17434) | [HDFFV-10586](https://jira.hdfgroup.org/browse/HDFFV-10586) | Divide by zero in h5repack_filters() | 1.10.3 | only fails | only fails | SIGFPE_apply_filters_h5repack_filters | h5repack -f GZIP=8 -l dset1:CHUNK=5x6 file| +| [CVE-2018-17433](https://nvd.nist.gov/vuln/detail/CVE-2018-17433) | [HDFFV-10592](https://jira.hdfgroup.org/browse/HDFFV-10592) | Heap overflow in ReadGifImageDesc() | 1.10.3 | gif2h5 | gif2h5 | HeapOverflow_ReadGifImageDesc | gif2h5 | +| [CVE-2018-17432](https://nvd.nist.gov/vuln/detail/CVE-2018-17432) | [HDFFV-10590](https://jira.hdfgroup.org/browse/HDFFV-10590) | NULL pointer dereference was discovered in H5O_sdspace_encode() | 1.10.3 | fixed | fixed | Null_Pointer_H5O_sdspace_encode | h5repack file1 file2 | +| [CVE-2018-17237](https://nvd.nist.gov/vuln/detail/CVE-2018-17237) | [HDFFV-10571](https://jira.hdfgroup.org/browse/HDFFV-10571) | Divide by zero in H5D__chunk_set_info_real() | 1.10.3 | fixed | fixed | H5D__chunk_set_info_real_div_by_zero | h5dump | +| [CVE-2018-17234](https://nvd.nist.gov/vuln/detail/CVE-2018-17234) | [HDFFV-10578](https://jira.hdfgroup.org/browse/HDFFV-10578) | Memory Leaks in "H5O__chunk_deserialize()" | 1.10.3 | fixed | fixed | H5O__chunk_deserialize_memory_leak | h5dump | +| [CVE-2018-17233](https://nvd.nist.gov/vuln/detail/CVE-2018-17233) | [HDFFV-10577](https://jira.hdfgroup.org/browse/HDFFV-10577) | Divided by zero in "H5D__create_chunk_file_map_hyper()" | 1.10.3 | fixed | fixed | H5D__create_chunk_file_map_hyper_div_zero | | +| [CVE-2018-16438](https://nvd.nist.gov/vuln/detail/CVE-2018-16438) | no JIRA# | Invalid read in H5L_extern_query | 1.8.20 | Segmentation fault (core dumped) | only failed | H5L_extern_query@H5Lexternal.c_498-10___out-of-bounds-read | h5dump | +| [CVE-2018-15672](https://nvd.nist.gov/vuln/detail/CVE-20) | [HDFFV-10556](https://jira.hdfgroup.org/browse/HDFFV-) | Division by zero in H5D__chunk_init() | 1.10.2 | fixed | fixed | SIGPFE_Crash | h5dump | +| [CVE-2018-15671](https://github.com/SegfaultMasters/covering360/tree/master/HDF5#stack-overflow---stackoverflow_h5p__get_cb) | [HDFFV-10557](https://jira.hdfgroup.org/browse/HDFFV-10557) |Excessive stack consumption H5P__get_cb() | 1.10.2 | Infinite loop | Infinite loop | stackoverflow_H5P__get_cb | h5dump | +| [CVE-2018-14460](https://nvd.nist.gov/vuln/detail/CVE-2018-14460) | [HDFFV-11223](https://jira.hdfgroup.org/browse/HDFFV-11223) | Buffer over-read in the function H5O_sdspace_decode() | 1.8.20 | fixed | fixed | H5O_sdspace_decode-heap-buffer-overflow | h5dump | +| [CVE-2018-14035](https://nvd.nist.gov/vuln/detail/CVE-2018-14035) | no JIRA# | Buffer over-read in the function H5VM_memcpyvv() | 1.8.20 | only failed | only failed | H5VM_memcpyvv-heap-buffer-overflow | h5dump | +| [CVE-2018-14034](https://nvd.nist.gov/vuln/detail/CVE-2018-14034) | no JIRA# | Out-of-bounds read in the function H5O_pline_reset() | 1.8.20 || missing fixed | missing cve file | | +| [CVE-2018-14033](https://nvd.nist.gov/vuln/detail/CVE-2018-14033) | [HDFFV-11159](https://jira.hdfgroup.org/browse/HDFFV-) | Buffer over-read in the function H5O_layout_decode() | 1.8.20 || missing fixed | dev: PR#405 (with test) | | +| [CVE-2018-14032](https://nvd.nist.gov/vuln/detail/CVE-2018-14032) | no JIRA# | Duplicate of CVE-2018-11206 | missing ver || missing fixed | missing cve file | | +| [CVE-2018-14031](https://nvd.nist.gov/vuln/detail/CVE-2018-14031) | no JIRA# | Buffer over-read in the function H5T_copy | 1.8.20 || missing fixed | missing cve file | | +| [CVE-2018-13876](https://nvd.nist.gov/vuln/detail/CVE-2018-13876) | no JIRA# | Buffer overflow in the function H5FD_sec2_read | 1.8.20 || missing fixed | Related to H5Dread | | +| [CVE-2018-13875](https://nvd.nist.gov/vuln/detail/CVE-2018-13875) | no JIRA# | Out-of-bounds read in the function H5VM_memcpyvv | 1.8.20 || missing fixed | missing cve file | | +| [CVE-2018-13874](https://nvd.nist.gov/vuln/detail/CVE-2018-13874) | no JIRA# | Buffer overflow in the function H5FD_sec2_read | 1.8.20 || missing fixed | Related to Hdmemset | | +| [CVE-2018-13873](https://nvd.nist.gov/vuln/detail/CVE-2018-13873) | [HDFFV-10676](https://jira.hdfgroup.org/browse/HDFFV-10676) |Buffer over-read in H5O_chunk_deserialize | 1.8.20 | only fails | only fails | H5O_chunk_deserialize-global-buffer-overflow | h5dump | +| [CVE-2018-13872](https://nvd.nist.gov/vuln/detail/CVE-2018-13872) | no JIRA# | Buffer overflow in the function H5G_ent_decode | 1.8.20 || missing fixed | missing cve file | | +| [CVE-2018-13871](https://nvd.nist.gov/vuln/detail/CVE-2018-13871) | no JIRA# | Buffer overflow in the function H5FL_blk_malloc | 1.8.20 || missing fixed | missing cve file | | +| [CVE-2018-13870](https://nvd.nist.gov/vuln/detail/CVE-2018-13870) | no JIRA# | Buffer over-read in the function H5O_link_decode | 1.8.20 || missing fixed | missing cve file | | +| [CVE-2018-13869](https://nvd.nist.gov/vuln/detail/CVE-2018-13869) | no JIRA# | Memcpy parameter overlap in the function H5O_link_decode | 1.8.20 || missing fixed | missing cve file | | +| [CVE-2018-13868](https://nvd.nist.gov/vuln/detail/CVE-2018-13868) | [HDFFV-10548](https://jira.hdfgroup.org/browse/HDFFV-10548) | Buffer over-read in the function H5O_fill_old_decode | 1.8.20 | only fails | only fails | h5dump | +| [CVE-2018-13867](https://nvd.nist.gov/vuln/detail/CVE-2018-13867) | no JIRA# | Out-of-bounds read in the function H5F__accum_read | 1.8.20 | Segmentation fault (core dumped) | Assertion, Abort (core dumped) | H5F__accum_read-Out_Of_Bound_Read | | +| [CVE-2018-13866](https://nvd.nist.gov/vuln/detail/CVE-2018-13866) | no JIRA# | Buffer over-read in the function H5F_addr_decode_len | 1.8.20 || missing fixed | missing cve file | | +| [CVE-2018-11207](https://nvd.nist.gov/vuln/detail/CVE-2018-11207) | [HDFFV-10481](https://jira.hdfgroup.org/browse/HDFFV-10481) | A division by zero was discovered in H5D__chunk_init() | 1.10.2 | fixed | fixed | DivByZero__H5D_chunk_POC | h5stat -A -T -G -D -S file | +| [CVE-2018-11206](https://nvd.nist.gov/vuln/detail/CVE-2018-11206) | [HDFFV-10480](https://jira.hdfgroup.org/browse/HDFFV-10480) | Out-of-bounds read in H5O_fill_new_decode and H5O_fill_old_decode | 1.8.20 | fixed | fixed | H5O_fill\_[new/old]\_decode-heap-buffer-overflow | h5dump | +| [CVE-2018-11205](https://nvd.nist.gov/vuln/detail/CVE-2018-11205) | [HDFFV-10479](https://jira.hdfgroup.org/browse/HDFFV-10479) | Out-of-bounds in H5VM_memcpyvv | 1.10.2 | Segmentation fault (core dumped) | Assertion, Abort (core dumped) | H5VM_memcpyvv-H5VM.c_1626-oob_read | h5dump | +| [CVE-2018-11204](https://nvd.nist.gov/vuln/detail/CVE-2018-11204) | [HDFFV-10478](https://jira.hdfgroup.org/browse/HDFFV-10478) | A NULL pointer dereference in H5O__chunk_deserialize | 1.10.2 | fixed | fixed | H5O__chunk_deserialize-H5Ocache.c_1566-null_point_dereference | h5dump | +| [CVE-2018-11203](https://nvd.nist.gov/vuln/detail/CVE-2018-11203) | [HDFFV-10477](https://jira.hdfgroup.org/browse/HDFFV-10477) | A division by zero was discovered in H5D__btree_decode_key | 1.10.2 | fixed | fixed | H5D__btree_decode_key-H5Dbtree.c_697-div_by_zero | h5dump | +| [CVE-2018-11202](https://nvd.nist.gov/vuln/detail/CVE-2018-11202) | [HDFFV-10476](https://jira.hdfgroup.org/browse/HDFFV-10476) | A NULL pointer dereference was discovered in H5S_hyper_make_spans | 1.10.2 | Infinite loop | Assertion, Abort (core dumped) | H5S_hyper_make_spans-H5Shyper.c_6139-null_pointer_dereference | h5dump | +| [CVE-2017-17509](https://nvd.nist.gov/vuln/detail/CVE-2017-17509) | [HDFFV-10358](https://jira.hdfgroup.org/browse/HDFFV-10358) | Out-of-bounds write vulnerability in H5G__ent_decode_vec | 1.10.1 | fixed | fixed | 5-hdf5-heap-overflow-H5G__ent_decode_vec | h5dump | +| [CVE-2017-17508](https://nvd.nist.gov/vuln/detail/CVE-2017-17508) | [HDFFV-10357](https://jira.hdfgroup.org/browse/HDFFV-10357) | Divide-by-zero in H5T_set_loc | 1.10.1 | fixed | fixed | 1-hdf5-divbyzero-H5T_set_loc | h5dump | +| [CVE-2017-17507](https://nvd.nist.gov/vuln/detail/CVE-2017-17507) | [HDFFV-10356](https://jira.hdfgroup.org/browse/HDFFV-10356) | Out-of-bounds read in H5T_conv_struct_opt | 1.10.1 | Dana: | Will not fix | 3-hdf5-outbound-read-H5T_conv_struct_opt | | +| [CVE-2017-17506](https://nvd.nist.gov/vuln/detail/CVE-2017-17506) | [HDFFV-10355](https://jira.hdfgroup.org/browse/HDFFV-10355) | Out-of-bounds read vulnerability in H5Opline_pline_decode | 1.10.1 | fixed | fixed | 4-hdf5-outbound-read-H5Opline_pline_decode | h5dump | +| [CVE-2017-17505](https://nvd.nist.gov/vuln/detail/CVE-2017-17505) | [HDFFV-10354](https://jira.hdfgroup.org/browse/HDFFV-10354) | NULL pointer dereference in the function H5O_pline_decode | 1.10.1 | fixed | fixed | 2-hdf5-null-pointer-H5O_pline_reset.h5 | | +| [CVE-2016-4333](https://nvd.nist.gov/vuln/detail/CVE-2016-4333) | [HDFFV-9993](https://jira.hdfgroup.org/browse/HDFFV-9993)  | Out-of-bounds access in H5O_dtype_decode_helper | 1.8.16 | fixed in | 1.8.18, 1.10.1 | missing cve file | still need test: [HDFFV-10008](https://jira.hdfgroup.org/browse/HDFFV-10008) | +| [CVE-2016-4332](https://nvd.nist.gov/vuln/detail/CVE-2016-4332) | [HDFFV-9950](https://jira.hdfgroup.org/browse/HDFFV-9950) | Out-of-bounds write in H5O_msg_read_oh | 1.8.16 | fixed in | 1.8.18, 1.10.1 | missing cve file | still need test: [HDFFV-10008](https://jira.hdfgroup.org/browse/HDFFV-10008) | +| [CVE-2016-4331](https://nvd.nist.gov/vuln/detail/CVE-2016-4331) | [HDFFV-9951](https://jira.hdfgroup.org/browse/HDFFV-9951) | Out-of-bounds write in H5Z_nbit_decompress_one_atomic | 1.8.16 | fixed in | 1.8.18, 1.10.1 | missing cve file | still need test: [HDFFV-10008](https://jira.hdfgroup.org/browse/HDFFV-10008) | +| [CVE-2016-4330](https://nvd.nist.gov/vuln/detail/CVE-2016-4330) | [HDFFV-9992](https://jira.hdfgroup.org/browse/HDFFV-9952) | Out-of-bounds read in H5O_dtype_decode_helper | 1.8.16 | fixed in | 1.8.18, 1.10.1 | missing cve file | still need test: [HDFFV-10008](https://jira.hdfgroup.org/browse/HDFFV-10008) | + From 82ca3f8a9db0e6e92b6615f512681d08ab50a935 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Tue, 11 Apr 2023 21:23:52 -0700 Subject: [PATCH 114/231] Revert "Added tables of CVE issues (#2685)" (#2696) This reverts commit b749a4ef09a6fa2bdb02cf49eddda364dcb9a0cc. --- doc/CVEissues-multi-tables.md | 103 ---------------------------------- doc/CVEissues-table.md | 71 ----------------------- 2 files changed, 174 deletions(-) delete mode 100644 doc/CVEissues-multi-tables.md delete mode 100644 doc/CVEissues-table.md diff --git a/doc/CVEissues-multi-tables.md b/doc/CVEissues-multi-tables.md deleted file mode 100644 index 864c3d2895a..00000000000 --- a/doc/CVEissues-multi-tables.md +++ /dev/null @@ -1,103 +0,0 @@ -\*\*\* ***This is a work-in-progress*** \*\*\* - -| CVE_issue_number | JIRA__number | Summary | Affected version | 1.10.9 | 1.14.0 | CVE file | Reproducing_command | -| :------------------------------------------------------------------------- | :----------- | :------------------------------------------------------------------- | :----------------| :----- | :----- | :--------------- | :------- | -| [GitHub Vul 11](https://github.com/magicSwordsMan/PAAFS/tree/master/vul11) | HDFFV-10722 | Invalid write in H5O_mtime_encode() | 1.10.4 | only failed | only failed | H5O_mtime_encode_invalid-write-memory-access | ./h5repack $file1 $file2 | -| [GitHub Vul 10](https://github.com/magicSwordsMan/PAAFS/tree/master/vul10) | HDFFV-10721 | Invalid read in H5S_close() | 1.10.4 | only failed | only failed | H5S_close_invalid-read-memory-access | ./h5repack $file1 $file2 | -| [GitHub Vul 9](https://github.com/magicSwordsMan/PAAFS/tree/master/vul9) | HDFFV-10720 | Invalid write in H5F_addr_encode_len() | 1.10.4 | only failed | only failed | H5F_addr_encode_len_invalid-write-memory-access | ./h5repack $file1 $file2 | -| [CVE-2022-26061](https://nvd.nist.gov/vuln/detail/CVE-2022-26061) | SUPPORT-1923 | A heap-based buffer overflow vulnerability exists in the gif2h5 | 1.10.4 | gif2h5 | gif2h5 | missing cve file | | -| [CVE-2022-25972](https://nvd.nist.gov/vuln/detail/CVE-2022-25972) | SUPPORT-1923 | An out-of-bounds write vulnerability exists in the gif2h5 | 1.10.4 | gif2h5 | gif2h5 | missing cve file | | -| [CVE-2022-25942](https://nvd.nist.gov/vuln/detail/CVE-2022-25942) | SUPPORT-1923 | An out-of-bounds read vulnerability exists in the gif2h5 | 1.10.4 | gif2h5 | gif2h5 | missing cve file | | - -| CVE_issue_number | JIRA_number | Summary | Affected version | 1.10.9 | 1.14.0 | CVE file | Reproducing_command | -| :------------------------------------------------------------------------- | :----------- | :------------------------------------------------------------------- | :----------------| :----- | :----- | :--------------- | :------- | -| [CVE-2021-46244](https://nvd.nist.gov/vuln/detail/CVE-2021-46244) | [github #1327](https://github.com/HDFGroup/hdf5/issues/1327) | Divide By Zero in H5T__complete_copy () | 1.13.1-1 |Floating exception (core dumped)|failed with unable to open dataset | POC-GH1327 | | -| [CVE-2021-46243](https://nvd.nist.gov/vuln/detail/CVE-2021-46243) | [github #1326](https://github.com/HDFGroup/hdf5/issues/1326) | An untrusted pointer dereference in H5O__dtype_decode_helper () | 1.13.1-1 |Segmentation fault (core dumped)| Dataset *ERROR* | POC-GH1326 | | -| [CVE-2021-46242](https://nvd.nist.gov/vuln/detail/CVE-2021-46242) | [github #1329](https://github.com/HDFGroup/hdf5/issues/1329) | Heap-use-after free via the component H5AC_unpin_entry() | 1.13.1-1 |Segmentation fault (core dumped)| error: unable to open file | POC1-GH1329 | | -| [CVE-2021-45833](https://nvd.nist.gov/vuln/detail/CVE-2021-45833) | [github #1313](https://github.com/HDFGroup/hdf5/issues/1313) | Stack buffer overflow in H5D__create_chunk_file_map_hyper() | 1.13.1-1 |error: unable to open file|error: unable to open file| POC3-GH1313 | | -| [CVE-2021-45832](https://nvd.nist.gov/vuln/detail/CVE-2021-45832) | [github #1315](https://github.com/HDFGroup/hdf5/issues/1315) | Stack overflow in H5I_inc_ref() | 1.13.1-1 |OPEN|OPEN|wrong file provided, POC6-GH1315.md | | -| [CVE-2021-45830](https://nvd.nist.gov/vuln/detail/CVE-2021-45830) | [github #1314](https://github.com/HDFGroup/hdf5/issues/1314) | Heap Buffer Overflow in H5F_addr_decode_len() | 1.13.1-1 | error: unable to open file | error: unable to open file | POC5-GH1314 | | - -| CVE_issue_number | JIRA_issue_number | Summary | Affected version | 1.10.9 | 1.14.0 | CVE file | Reproducing_command | -| :------------------------------------------------------------------------- | :----------- | :------------------------------------------------------------------- | :----------------| :----- | :----- | :--------------- | :------- | -| CVE-2021-45829 | [github #1317](https://github.com/HDFGroup/hdf5/issues/1317) | Segmentation fault via h5stat | 1.13.1-1 |error: unable to traverse|error: unable to traverse|POC8-GH1317| | -| [CVE-2020-10812](https://nvd.nist.gov/vuln/detail/CVE-2020-10812) | [HDFFV-11052](https://jira.hdfgroup.org/browse/HDFFV-11052) | NULL pointer dereference exists in the function H5F_get_nrefs() | 1.12.0 |Segfault|no SIGSEGV |h5_nrefs_POC|| -| [CVE-2020-10811](https://nvd.nist.gov/vuln/detail/CVE-2020-10811) | [HDFFV-11049](https://jira.hdfgroup.org/browse/HDFFV-11049) | Heap-based buffer over-read exists in the function H5O__layout_decode() | 1.12.0 | fixed | fixed | h5dump_H5O__layout_decode_POC |./h5dump -r -d BAG_root/metadata | -| [CVE-2020-10810](https://nvd.nist.gov/vuln/detail/CVE-2020-10810) | [HDFFV-11053](https://jira.hdfgroup.org/browse/HDFFV-11053) | NULL pointer dereference exists in the function H5AC_unpin_entry()  | 1.13.0 |no SIGSEGV|no SIGSEGV| H5AC_unpin_entry_POC | ./h5clear -s -m | -| [CVE-2020-10809](https://nvd.nist.gov/vuln/detail/CVE-2020-10809) | [HDFFV-11048](https://jira.hdfgroup.org/browse/HDFFV-11048) | Heap-based buffer overflow exists in Decompress() via gif2h5 | 1.12.0 | gif2h5 | gif2h5 | gif2h5_Decompress_POC | gif2h5 | -| [CVE-2019-9152](https://github.com/magicSwordsMan/PAAFS/tree/master/vul8) | [HDFFV-10719](https://jira.hdfgroup.org/browse/HDFFV-10719) | Invalid read in H5MM_xstrdup() | 1.10.4 || OPEN | H5MM_xstrdup_invalid-read-memory-access | | -| [CVE-2019-9151](https://github.com/magicSwordsMan/PAAFS/tree/master/vul7) | [HDFFV-10718](https://jira.hdfgroup.org/browse/HDFFV-10718) | Invalid read in H5VM_memcpyvv() | 1.10.4 | fixed | fixed | H5VM_memcpyvv_invalid-read-memory-access | h5repack file1 file2 | - -| CVE_issue_number | JIRA_issue_number | Summary | Affected version | 1.10.9 | 1.14.0 | CVE file/Comment | Comments | -| :------------------------------------------------------------------------- | :----------- | :------------------------------------------------------------------- | :----------------| :----- | :----- | :--------------- | :------- | -| [CVE-2019-8398](https://nvd.nist.gov/vuln/detail/CVE-2019-8398) | [HDFFV-10710](https://jira.hdfgroup.org/browse/HDFFV-10710) | Invalid read H5T_get_size | missing ver | fixed | fixed | H5T_get_size_invalid-read-memory-access | h5repack file1 file2 | -| [CVE-2019-8397](https://nvd.nist.gov/vuln/detail/CVE-2019-8397) | [HDFFV-10711](https://jira.hdfgroup.org/browse/HDFFV-10711) | Out of bounds read in the function H5T_close_real()  | 1.10.4 | 0 errors | 0 errors | H5T_close_real_invalid-read-memory-access | h5repack file1 file2 | -| [CVE-2019-8396](https://nvd.nist.gov/vuln/detail/CVE-2019-8396) | [HDFFV-10712](https://jira.hdfgroup.org/browse/HDFFV-10712) | Invalid read in H5O__pline_decode | 1.10.4 | 16 errors from 8 contexts | 0 errors | H5O__pline_decode_invalid-read-memory-access | | -| [CVE-2018-17439](https://nvd.nist.gov/vuln/detail/CVE-2018-17439) | [HDFFV-10589](https://jira.hdfgroup.org/browse/HDFFV-10589) | Stack-based buffer overflow in H5S_extent_get_dims() via h52gif | 1.10.3 | gif2h5 | gif2h5 | stackoverflow_H5S_extent_get_dims_H5S | gif2h5 | -| [CVE-2018-17438](https://nvd.nist.gov/vuln/detail/CVE-2018-17438) | [HDFFV-10587](https://jira.hdfgroup.org/browse/HDFFV-10587) | Incorrect protection against division by zero in H5D__select_io() | 1.8.20, 1.10.3 | gif2h5 | 1.12.0 | SIGFPE_H5D__select_io_H5Dselect | gif2h5 | -| [CVE-2018-17437](https://nvd.nist.gov/vuln/detail/CVE-2018-17437) | [HDFFV-10588](https://jira.hdfgroup.org/browse/HDFFV-10588) | Memory leak in H5O_dtype_decode_helper | 1.10.3 | fixed 1.10.5 | check 1.14.0 | memleak_H5O_dtype_decode_helper_H5Odtype | h52gif | - -| CVE_issue_number | JIRA_issue_number | Summary | Affected version | 1.10.9 | 1.14.0 | CVE file | Reproducing_command | -| :------------------------------------------------------------------------- | :----------- | :------------------------------------------------------------------- | :----------------| :----- | :----- | :--------------- | :------- | -| [CVE-2018-17436](https://nvd.nist.gov/vuln/detail/CVE-2018-17436) | [HDFFV-10593](https://jira.hdfgroup.org/browse/HDFFV-10593) | Invalid write memory access in decompress.c | 1.8.20, 1.10.3 | gif2h5 | gif2h5 | ReadCode_decompress_memoryAccess | gif2h5 | -| [CVE-2018-17435](https://nvd.nist.gov/vuln/detail/CVE-2018-17435) | [HDFFV-10591](https://jira.hdfgroup.org/browse/HDFFV-10591) | Heap-buffer-overflow was discovered in H5O_attr_decode() | 1.10.3 | fixed | fixed | Heap_Overflow_H5O_attr_decode | h52gif file image1.gif -i image | -| [CVE-2018-17434](https://nvd.nist.gov/vuln/detail/CVE-2018-17434) | [HDFFV-10586](https://jira.hdfgroup.org/browse/HDFFV-10586) | Divide by zero in h5repack_filters() | 1.10.3 | only fails | only fails | SIGFPE_apply_filters_h5repack_filters | h5repack -f GZIP=8 -l dset1:CHUNK=5x6 file| -| [CVE-2018-17433](https://nvd.nist.gov/vuln/detail/CVE-2018-17433) | [HDFFV-10592](https://jira.hdfgroup.org/browse/HDFFV-10592) | Heap overflow in ReadGifImageDesc() | 1.10.3 | gif2h5 | gif2h5 | HeapOverflow_ReadGifImageDesc | gif2h5 | -| [CVE-2018-17432](https://nvd.nist.gov/vuln/detail/CVE-2018-17432) | [HDFFV-10590](https://jira.hdfgroup.org/browse/HDFFV-10590) | NULL pointer dereference was discovered in H5O_sdspace_encode() | 1.10.3 | fixed | fixed | Null_Pointer_H5O_sdspace_encode | h5repack file1 file2 | -| [CVE-2018-17237](https://nvd.nist.gov/vuln/detail/CVE-2018-17237) | [HDFFV-10571](https://jira.hdfgroup.org/browse/HDFFV-10571) | Divide by zero in H5D__chunk_set_info_real() | 1.10.3 | fixed | fixed | H5D__chunk_set_info_real_div_by_zero | h5dump | - -| CVE_issue_number | JIRA_issue_number | Summary | Affected version | 1.10.9 | 1.14.0 | CVE file/Comment | Comments | -| :------------------------------------------------------------------------- | :----------- | :------------------------------------------------------------------- | :----------------| :----- | :----- | :--------------- | :------- | -| [CVE-2018-17234](https://nvd.nist.gov/vuln/detail/CVE-2018-17234) | [HDFFV-10578](https://jira.hdfgroup.org/browse/HDFFV-10578) | Memory Leaks in "H5O__chunk_deserialize()" | 1.10.3 | fixed | fixed | H5O__chunk_deserialize_memory_leak | h5dump | -| [CVE-2018-17233](https://nvd.nist.gov/vuln/detail/CVE-2018-17233) | [HDFFV-10577](https://jira.hdfgroup.org/browse/HDFFV-10577) | Divided by zero in "H5D__create_chunk_file_map_hyper()" | 1.10.3 | fixed | fixed | H5D__create_chunk_file_map_hyper_div_zero | | -| [CVE-2018-16438](https://nvd.nist.gov/vuln/detail/CVE-2018-16438) | no JIRA# | Invalid read in H5L_extern_query | 1.8.20 | Segmentation fault (core dumped) | only failed | H5L_extern_query@H5Lexternal.c_498-10___out-of-bounds-read | h5dump | -| [CVE-2018-15672](https://nvd.nist.gov/vuln/detail/CVE-20) | [HDFFV-10556](https://jira.hdfgroup.org/browse/HDFFV-) | Division by zero in H5D__chunk_init() | 1.10.2 | fixed | fixed | SIGPFE_Crash | h5dump | -| [CVE-2018-15671](https://github.com/SegfaultMasters/covering360/tree/master/HDF5#stack-overflow---stackoverflow_h5p__get_cb) | [HDFFV-10557](https://jira.hdfgroup.org/browse/HDFFV-10557) |Excessive stack consumption H5P__get_cb() | 1.10.2 | Infinite loop | Infinite loop | stackoverflow_H5P__get_cb | h5dump | -| [CVE-2018-14460](https://nvd.nist.gov/vuln/detail/CVE-2018-14460) | [HDFFV-11223](https://jira.hdfgroup.org/browse/HDFFV-11223) | Buffer over-read in the function H5O_sdspace_decode() | 1.8.20 | fixed | fixed | H5O_sdspace_decode-heap-buffer-overflow | h5dump | -| [CVE-2018-14035](https://nvd.nist.gov/vuln/detail/CVE-2018-14035) | no JIRA# | Buffer over-read in the function H5VM_memcpyvv() | 1.8.20 | only failed | only failed | H5VM_memcpyvv-heap-buffer-overflow | h5dump | - -| CVE_issue_number | JIRA_issue_number | Summary | Affected version | 1.10.9 | 1.14.0 | CVE file | Reproducing_command | -| :------------------------------------------------------------------------- | :----------- | :------------------------------------------------------------------- | :----------------| :----- | :----- | :--------------- | :------- | -| [CVE-2018-14034](https://nvd.nist.gov/vuln/detail/CVE-2018-14034) | no JIRA# | Out-of-bounds read in the function H5O_pline_reset() | 1.8.20 || missing fixed | missing cve file | | -| [CVE-2018-14033](https://nvd.nist.gov/vuln/detail/CVE-2018-14033) | [HDFFV-11159](https://jira.hdfgroup.org/browse/HDFFV-) | Buffer over-read in the function H5O_layout_decode() | 1.8.20 || missing fixed | dev: PR#405 (with test) | | -| [CVE-2018-14032](https://nvd.nist.gov/vuln/detail/CVE-2018-14032) | no JIRA# | Duplicate of CVE-2018-11206 | missing ver || missing fixed | missing cve file | | -| [CVE-2018-14031](https://nvd.nist.gov/vuln/detail/CVE-2018-14031) | no JIRA# | Buffer over-read in the function H5T_copy | 1.8.20 || missing fixed | missing cve file | | -| [CVE-2018-13876](https://nvd.nist.gov/vuln/detail/CVE-2018-13876) | no JIRA# | Buffer overflow in the function H5FD_sec2_read | 1.8.20 || missing fixed | Related to H5Dread | | -| [CVE-2018-13875](https://nvd.nist.gov/vuln/detail/CVE-2018-13875) | no JIRA# | Out-of-bounds read in the function H5VM_memcpyvv | 1.8.20 || missing fixed | missing cve file | | - -| CVE_issue_number | JIRA_issue_number | Summary | Affected version | 1.10.9 | 1.14.0 | CVE file | Reproducing_command | -| :------------------------------------------------------------------------- | :----------- | :------------------------------------------------------------------- | :----------------| :----- | :----- | :--------------- | :------- | -| [CVE-2018-13874](https://nvd.nist.gov/vuln/detail/CVE-2018-13874) | no JIRA# | Buffer overflow in the function H5FD_sec2_read | 1.8.20 || missing fixed | Related to Hdmemset | | -| [CVE-2018-13873](https://nvd.nist.gov/vuln/detail/CVE-2018-13873) | [HDFFV-10676](https://jira.hdfgroup.org/browse/HDFFV-10676) |Buffer over-read in H5O_chunk_deserialize | 1.8.20 | only fails | only fails | H5O_chunk_deserialize-global-buffer-overflow | h5dump | -| [CVE-2018-13872](https://nvd.nist.gov/vuln/detail/CVE-2018-13872) | no JIRA# | Buffer overflow in the function H5G_ent_decode | 1.8.20 || missing fixed | missing cve file | | -| [CVE-2018-13871](https://nvd.nist.gov/vuln/detail/CVE-2018-13871) | no JIRA# | Buffer overflow in the function H5FL_blk_malloc | 1.8.20 || missing fixed | missing cve file | | -| [CVE-2018-13870](https://nvd.nist.gov/vuln/detail/CVE-2018-13870) | no JIRA# | Buffer over-read in the function H5O_link_decode | 1.8.20 || missing fixed | missing cve file | | -| [CVE-2018-13869](https://nvd.nist.gov/vuln/detail/CVE-2018-13869) | no JIRA# | Memcpy parameter overlap in the function H5O_link_decode | 1.8.20 || missing fixed | missing cve file | | - -| CVE_issue_number | JIRA_issue_number | Summary | Affected version | 1.10.9 | 1.14.0 | CVE file | Reproducing_command | -| :------------------------------------------------------------------------- | :----------- | :------------------------------------------------------------------- | :----------------| :----- | :----- | :--------------- | :------- | -| [CVE-2018-13868](https://nvd.nist.gov/vuln/detail/CVE-2018-13868) | [HDFFV-10548](https://jira.hdfgroup.org/browse/HDFFV-10548) | Buffer over-read in the function H5O_fill_old_decode | 1.8.20 | only fails | only fails | h5dump | -| [CVE-2018-13867](https://nvd.nist.gov/vuln/detail/CVE-2018-13867) | no JIRA# | Out-of-bounds read in the function H5F__accum_read | 1.8.20 | Segmentation fault (core dumped) | Assertion, Abort (core dumped) | H5F__accum_read-Out_Of_Bound_Read | | -| [CVE-2018-13866](https://nvd.nist.gov/vuln/detail/CVE-2018-13866) | no JIRA# | Buffer over-read in the function H5F_addr_decode_len | 1.8.20 || missing fixed | missing cve file | | -| [CVE-2018-11207](https://nvd.nist.gov/vuln/detail/CVE-2018-11207) | [HDFFV-10481](https://jira.hdfgroup.org/browse/HDFFV-10481) | A division by zero was discovered in H5D__chunk_init() | 1.10.2 | fixed | fixed | DivByZero__H5D_chunk_POC | h5stat -A -T -G -D -S file | -| [CVE-2018-11206](https://nvd.nist.gov/vuln/detail/CVE-2018-11206) | [HDFFV-10480](https://jira.hdfgroup.org/browse/HDFFV-10480) | Out-of-bounds read in H5O_fill_new_decode and H5O_fill_old_decode | 1.8.20 | fixed | fixed | H5O_fill\_[new/old]\_decode-heap-buffer-overflow | h5dump | -| [CVE-2018-11205](https://nvd.nist.gov/vuln/detail/CVE-2018-11205) | [HDFFV-10479](https://jira.hdfgroup.org/browse/HDFFV-10479) | Out-of-bounds in H5VM_memcpyvv | 1.10.2 | Segmentation fault (core dumped) | Assertion, Abort (core dumped) | H5VM_memcpyvv-H5VM.c_1626-oob_read | h5dump | - -| CVE_issue_number | JIRA_issue_number | Summary | Affected version | 1.10.9 | 1.14.0 | CVE file | Reproducing_command | -| :------------------------------------------------------------------------- | :----------- | :------------------------------------------------------------------- | :----------------| :----- | :----- | :--------------- | :------- | -| [CVE-2018-11204](https://nvd.nist.gov/vuln/detail/CVE-2018-11204) | [HDFFV-10478](https://jira.hdfgroup.org/browse/HDFFV-10478) | A NULL pointer dereference in H5O__chunk_deserialize | 1.10.2 | fixed | fixed | H5O__chunk_deserialize-H5Ocache.c_1566-null_point_dereference | h5dump | -| [CVE-2018-11203](https://nvd.nist.gov/vuln/detail/CVE-2018-11203) | [HDFFV-10477](https://jira.hdfgroup.org/browse/HDFFV-10477) | A division by zero was discovered in H5D__btree_decode_key | 1.10.2 | fixed | fixed | H5D__btree_decode_key-H5Dbtree.c_697-div_by_zero | h5dump | -| [CVE-2018-11202](https://nvd.nist.gov/vuln/detail/CVE-2018-11202) | [HDFFV-10476](https://jira.hdfgroup.org/browse/HDFFV-10476) | A NULL pointer dereference was discovered in H5S_hyper_make_spans | 1.10.2 | Infinite loop | Assertion, Abort (core dumped) | H5S_hyper_make_spans-H5Shyper.c_6139-null_pointer_dereference | h5dump | -| [CVE-2017-17509](https://nvd.nist.gov/vuln/detail/CVE-2017-17509) | [HDFFV-10358](https://jira.hdfgroup.org/browse/HDFFV-10358) | Out-of-bounds write vulnerability in H5G__ent_decode_vec | 1.10.1 | fixed | fixed | 5-hdf5-heap-overflow-H5G__ent_decode_vec | h5dump | -| [CVE-2017-17508](https://nvd.nist.gov/vuln/detail/CVE-2017-17508) | [HDFFV-10357](https://jira.hdfgroup.org/browse/HDFFV-10357) | Divide-by-zero in H5T_set_loc | 1.10.1 | fixed | fixed | 1-hdf5-divbyzero-H5T_set_loc | h5dump | -| [CVE-2017-17507](https://nvd.nist.gov/vuln/detail/CVE-2017-17507) | [HDFFV-10356](https://jira.hdfgroup.org/browse/HDFFV-10356) | Out-of-bounds read in H5T_conv_struct_opt | 1.10.1 | Dana: | Will not fix | 3-hdf5-outbound-read-H5T_conv_struct_opt | | - -| CVE_issue_number | JIRA_issue_number | Summary | Affected version | 1.10.9 | 1.14.0 | CVE file | Reproducing_command | -| :------------------------------------------------------------------------- | :----------- | :------------------------------------------------------------------- | :----------------| :----- | :----- | :--------------- | :------- | -| [CVE-2017-17506](https://nvd.nist.gov/vuln/detail/CVE-2017-17506) | [HDFFV-10355](https://jira.hdfgroup.org/browse/HDFFV-10355) | Out-of-bounds read vulnerability in H5Opline_pline_decode | 1.10.1 | fixed | fixed | 4-hdf5-outbound-read-H5Opline_pline_decode | h5dump | -| [CVE-2017-17505](https://nvd.nist.gov/vuln/detail/CVE-2017-17505) | [HDFFV-10354](https://jira.hdfgroup.org/browse/HDFFV-10354) | NULL pointer dereference in the function H5O_pline_decode | 1.10.1 | fixed | fixed | 2-hdf5-null-pointer-H5O_pline_reset.h5 | | -| [CVE-2016-4333](https://nvd.nist.gov/vuln/detail/CVE-2016-4333) | [HDFFV-9993](https://jira.hdfgroup.org/browse/HDFFV-9993)  | Out-of-bounds access in H5O_dtype_decode_helper | 1.8.16 | fixed in | 1.8.18, 1.10.1 | missing cve file | still need test: [HDFFV-10008](https://jira.hdfgroup.org/browse/HDFFV-10008) | -| [CVE-2016-4332](https://nvd.nist.gov/vuln/detail/CVE-2016-4332) | [HDFFV-9950](https://jira.hdfgroup.org/browse/HDFFV-9950) | Out-of-bounds write in H5O_msg_read_oh | 1.8.16 | fixed in | 1.8.18, 1.10.1 | missing cve file | still need test: [HDFFV-10008](https://jira.hdfgroup.org/browse/HDFFV-10008) | -| [CVE-2016-4331](https://nvd.nist.gov/vuln/detail/CVE-2016-4331) | [HDFFV-9951](https://jira.hdfgroup.org/browse/HDFFV-9951) | Out-of-bounds write in H5Z_nbit_decompress_one_atomic | 1.8.16 | fixed in | 1.8.18, 1.10.1 | missing cve file | still need test: [HDFFV-10008](https://jira.hdfgroup.org/browse/HDFFV-10008) | -| [CVE-2016-4330](https://nvd.nist.gov/vuln/detail/CVE-2016-4330) | [HDFFV-9992](https://jira.hdfgroup.org/browse/HDFFV-9952) | Out-of-bounds read in H5O_dtype_decode_helper | 1.8.16 | fixed in | 1.8.18, 1.10.1 | missing cve file | still need test: [HDFFV-10008](https://jira.hdfgroup.org/browse/HDFFV-10008) | - diff --git a/doc/CVEissues-table.md b/doc/CVEissues-table.md deleted file mode 100644 index 336fa363fe3..00000000000 --- a/doc/CVEissues-table.md +++ /dev/null @@ -1,71 +0,0 @@ -| CVE_issue_number | JIRA__number | Summary | Affected version | 1.10.9 | 1.14.0 | CVE file | Reproducing_command | -| :------------------------------------------------------------------------- | :----------- | :------------------------------------------------------------------- | :----------------| :----- | :----- | :--------------- | :------- | -| [GitHub Vul 11](https://github.com/magicSwordsMan/PAAFS/tree/master/vul11) | HDFFV-10722 | Invalid write in H5O_mtime_encode() | 1.10.4 | only failed | only failed | H5O_mtime_encode_invalid-write-memory-access | ./h5repack $file1 $file2 | -| [GitHub Vul 10](https://github.com/magicSwordsMan/PAAFS/tree/master/vul10) | HDFFV-10721 | Invalid read in H5S_close() | 1.10.4 | only failed | only failed | H5S_close_invalid-read-memory-access | ./h5repack $file1 $file2 | -| [GitHub Vul 9](https://github.com/magicSwordsMan/PAAFS/tree/master/vul9) | HDFFV-10720 | Invalid write in H5F_addr_encode_len() | 1.10.4 | only failed | only failed | H5F_addr_encode_len_invalid-write-memory-access | ./h5repack $file1 $file2 | -| [CVE-2022-26061](https://nvd.nist.gov/vuln/detail/CVE-2022-26061) | SUPPORT-1923 | A heap-based buffer overflow vulnerability exists in the gif2h5 | 1.10.4 | gif2h5 | gif2h5 | missing cve file | | -| [CVE-2022-25972](https://nvd.nist.gov/vuln/detail/CVE-2022-25972) | SUPPORT-1923 | An out-of-bounds write vulnerability exists in the gif2h5 | 1.10.4 | gif2h5 | gif2h5 | missing cve file | | -| [CVE-2022-25942](https://nvd.nist.gov/vuln/detail/CVE-2022-25942) | SUPPORT-1923 | An out-of-bounds read vulnerability exists in the gif2h5 | 1.10.4 | gif2h5 | gif2h5 | missing cve file | | -| CVE-2021-46244 | [github #1327](https://github.com/HDFGroup/hdf5/issues/1327) | Divide By Zero in H5T__complete_copy () | 1.13.1-1 |Floating exception (core dumped)|failed with unable to open dataset | POC-GH1327 | | -| CVE-2021-46243 | [github #1326](https://github.com/HDFGroup/hdf5/issues/1326) | An untrusted pointer dereference in H5O__dtype_decode_helper () | 1.13.1-1 |Segmentation fault (core dumped)| Dataset *ERROR* | POC-GH1326 | | -| CVE-2021-46242 | [github #1329](https://github.com/HDFGroup/hdf5/issues/1329) | Heap-use-after free via the component H5AC_unpin_entry() | 1.13.1-1 |Segmentation fault (core dumped)| error: unable to open file | POC1-GH1329 | | -| CVE-2021-45833 | [github #1313](https://github.com/HDFGroup/hdf5/issues/1313) | Stack buffer overflow in H5D__create_chunk_file_map_hyper() | 1.13.1-1 |error: unable to open file|error: unable to open file| POC3-GH1313 | | -| CVE-2021-45832 | [github #1315](https://github.com/HDFGroup/hdf5/issues/1315) | Stack overflow in H5I_inc_ref() | 1.13.1-1 |OPEN|OPEN|wrong file provided, POC6-GH1315.md | | -| CVE-2021-45830 | [github #1314](https://github.com/HDFGroup/hdf5/issues/1314) | Heap Buffer Overflow in H5F_addr_decode_len() | 1.13.1-1 |error: unable to open file|error: unable to open file| POC5-GH1314 | | -| CVE-2021-45829 | [github #1317](https://github.com/HDFGroup/hdf5/issues/1317) | Segmentation fault via h5stat | 1.13.1-1 |error: unable to traverse|error: unable to traverse|POC8-GH1317| | -| [CVE-2020-10812](https://nvd.nist.gov/vuln/detail/CVE-2020-10812) | [HDFFV-11052](https://jira.hdfgroup.org/browse/HDFFV-11052) | NULL pointer dereference exists in the function H5F_get_nrefs() | 1.12.0 |Segfault|no SIGSEGV |h5_nrefs_POC|| -| [CVE-2020-10811](https://nvd.nist.gov/vuln/detail/CVE-2020-10811) | [HDFFV-11049](https://jira.hdfgroup.org/browse/HDFFV-11049) | Heap-based buffer over-read exists in the function H5O__layout_decode() | 1.12.0 | fixed | fixed | h5dump_H5O__layout_decode_POC |./h5dump -r -d BAG_root/metadata | -| [CVE-2020-10810](https://nvd.nist.gov/vuln/detail/CVE-2020-10810) | [HDFFV-11053](https://jira.hdfgroup.org/browse/HDFFV-11053) | NULL pointer dereference exists in the function H5AC_unpin_entry()  | 1.13.0 |no SIGSEGV|no SIGSEGV| H5AC_unpin_entry_POC | ./h5clear -s -m | -| [CVE-2020-10809](https://nvd.nist.gov/vuln/detail/CVE-2020-10809) | [HDFFV-11048](https://jira.hdfgroup.org/browse/HDFFV-11048) | Heap-based buffer overflow exists in Decompress() via gif2h5 | 1.12.0 | gif2h5 | gif2h5 | gif2h5_Decompress_POC | gif2h5 | -| [CVE-2019-9152](https://github.com/magicSwordsMan/PAAFS/tree/master/vul8) | [HDFFV-10719](https://jira.hdfgroup.org/browse/HDFFV-10719) | Invalid read in H5MM_xstrdup() | 1.10.4 || OPEN | H5MM_xstrdup_invalid-read-memory-access | | -| [CVE-2019-9151](https://github.com/magicSwordsMan/PAAFS/tree/master/vul7) | [HDFFV-10718](https://jira.hdfgroup.org/browse/HDFFV-10718) | Invalid read in H5VM_memcpyvv() | 1.10.4 | fixed | fixed | H5VM_memcpyvv_invalid-read-memory-access | h5repack file1 file2 | -| [CVE-2019-8398](https://nvd.nist.gov/vuln/detail/CVE-2019-8398) | [HDFFV-10710](https://jira.hdfgroup.org/browse/HDFFV-10710) | Invalid read H5T_get_size | missing ver | fixed | fixed | H5T_get_size_invalid-read-memory-access | h5repack file1 file2 | -| [CVE-2019-8397](https://nvd.nist.gov/vuln/detail/CVE-2019-8397) | [HDFFV-10711](https://jira.hdfgroup.org/browse/HDFFV-10711) | Out of bounds read in the function H5T_close_real()  | 1.10.4 | 0 errors | 0 errors | H5T_close_real_invalid-read-memory-access | h5repack file1 file2 | -| [CVE-2019-8396](https://nvd.nist.gov/vuln/detail/CVE-2019-8396) | [HDFFV-10712](https://jira.hdfgroup.org/browse/HDFFV-10712) | Invalid read in H5O__pline_decode | 1.10.4 | 16 errors from 8 contexts | 0 errors | H5O__pline_decode_invalid-read-memory-access | | -| [CVE-2018-17439](https://nvd.nist.gov/vuln/detail/CVE-2018-17439) | [HDFFV-10589](https://jira.hdfgroup.org/browse/HDFFV-10589) | Stack-based buffer overflow in H5S_extent_get_dims() via h52gif | 1.10.3 | gif2h5 | gif2h5 | stackoverflow_H5S_extent_get_dims_H5S | gif2h5 | -| [CVE-2018-17438](https://nvd.nist.gov/vuln/detail/CVE-2018-17438) | [HDFFV-10587](https://jira.hdfgroup.org/browse/HDFFV-10587) | Incorrect protection against division by zero in H5D__select_io() | 1.8.20, 1.10.3 | gif2h5 | 1.12.0 | SIGFPE_H5D__select_io_H5Dselect | gif2h5 | -| [CVE-2018-17437](https://nvd.nist.gov/vuln/detail/CVE-2018-17437) | [HDFFV-10588](https://jira.hdfgroup.org/browse/HDFFV-10588) | Memory leak in H5O_dtype_decode_helper | 1.10.3 | fixed 1.10.5 | check 1.14.0 | memleak_H5O_dtype_decode_helper_H5Odtype | h52gif | -| [CVE-2018-17436](https://nvd.nist.gov/vuln/detail/CVE-2018-17436) | [HDFFV-10593](https://jira.hdfgroup.org/browse/HDFFV-10593) | Invalid write memory access in decompress.c | 1.8.20, 1.10.3 | gif2h5 | gif2h5 | ReadCode_decompress_memoryAccess | gif2h5 | -| [CVE-2018-17435](https://nvd.nist.gov/vuln/detail/CVE-2018-17435) | [HDFFV-10591](https://jira.hdfgroup.org/browse/HDFFV-10591) | Heap-buffer-overflow was discovered in H5O_attr_decode() | 1.10.3 | fixed | fixed | Heap_Overflow_H5O_attr_decode | h52gif file image1.gif -i image | -| [CVE-2018-17434](https://nvd.nist.gov/vuln/detail/CVE-2018-17434) | [HDFFV-10586](https://jira.hdfgroup.org/browse/HDFFV-10586) | Divide by zero in h5repack_filters() | 1.10.3 | only fails | only fails | SIGFPE_apply_filters_h5repack_filters | h5repack -f GZIP=8 -l dset1:CHUNK=5x6 file| -| [CVE-2018-17433](https://nvd.nist.gov/vuln/detail/CVE-2018-17433) | [HDFFV-10592](https://jira.hdfgroup.org/browse/HDFFV-10592) | Heap overflow in ReadGifImageDesc() | 1.10.3 | gif2h5 | gif2h5 | HeapOverflow_ReadGifImageDesc | gif2h5 | -| [CVE-2018-17432](https://nvd.nist.gov/vuln/detail/CVE-2018-17432) | [HDFFV-10590](https://jira.hdfgroup.org/browse/HDFFV-10590) | NULL pointer dereference was discovered in H5O_sdspace_encode() | 1.10.3 | fixed | fixed | Null_Pointer_H5O_sdspace_encode | h5repack file1 file2 | -| [CVE-2018-17237](https://nvd.nist.gov/vuln/detail/CVE-2018-17237) | [HDFFV-10571](https://jira.hdfgroup.org/browse/HDFFV-10571) | Divide by zero in H5D__chunk_set_info_real() | 1.10.3 | fixed | fixed | H5D__chunk_set_info_real_div_by_zero | h5dump | -| [CVE-2018-17234](https://nvd.nist.gov/vuln/detail/CVE-2018-17234) | [HDFFV-10578](https://jira.hdfgroup.org/browse/HDFFV-10578) | Memory Leaks in "H5O__chunk_deserialize()" | 1.10.3 | fixed | fixed | H5O__chunk_deserialize_memory_leak | h5dump | -| [CVE-2018-17233](https://nvd.nist.gov/vuln/detail/CVE-2018-17233) | [HDFFV-10577](https://jira.hdfgroup.org/browse/HDFFV-10577) | Divided by zero in "H5D__create_chunk_file_map_hyper()" | 1.10.3 | fixed | fixed | H5D__create_chunk_file_map_hyper_div_zero | | -| [CVE-2018-16438](https://nvd.nist.gov/vuln/detail/CVE-2018-16438) | no JIRA# | Invalid read in H5L_extern_query | 1.8.20 | Segmentation fault (core dumped) | only failed | H5L_extern_query@H5Lexternal.c_498-10___out-of-bounds-read | h5dump | -| [CVE-2018-15672](https://nvd.nist.gov/vuln/detail/CVE-20) | [HDFFV-10556](https://jira.hdfgroup.org/browse/HDFFV-) | Division by zero in H5D__chunk_init() | 1.10.2 | fixed | fixed | SIGPFE_Crash | h5dump | -| [CVE-2018-15671](https://github.com/SegfaultMasters/covering360/tree/master/HDF5#stack-overflow---stackoverflow_h5p__get_cb) | [HDFFV-10557](https://jira.hdfgroup.org/browse/HDFFV-10557) |Excessive stack consumption H5P__get_cb() | 1.10.2 | Infinite loop | Infinite loop | stackoverflow_H5P__get_cb | h5dump | -| [CVE-2018-14460](https://nvd.nist.gov/vuln/detail/CVE-2018-14460) | [HDFFV-11223](https://jira.hdfgroup.org/browse/HDFFV-11223) | Buffer over-read in the function H5O_sdspace_decode() | 1.8.20 | fixed | fixed | H5O_sdspace_decode-heap-buffer-overflow | h5dump | -| [CVE-2018-14035](https://nvd.nist.gov/vuln/detail/CVE-2018-14035) | no JIRA# | Buffer over-read in the function H5VM_memcpyvv() | 1.8.20 | only failed | only failed | H5VM_memcpyvv-heap-buffer-overflow | h5dump | -| [CVE-2018-14034](https://nvd.nist.gov/vuln/detail/CVE-2018-14034) | no JIRA# | Out-of-bounds read in the function H5O_pline_reset() | 1.8.20 || missing fixed | missing cve file | | -| [CVE-2018-14033](https://nvd.nist.gov/vuln/detail/CVE-2018-14033) | [HDFFV-11159](https://jira.hdfgroup.org/browse/HDFFV-) | Buffer over-read in the function H5O_layout_decode() | 1.8.20 || missing fixed | dev: PR#405 (with test) | | -| [CVE-2018-14032](https://nvd.nist.gov/vuln/detail/CVE-2018-14032) | no JIRA# | Duplicate of CVE-2018-11206 | missing ver || missing fixed | missing cve file | | -| [CVE-2018-14031](https://nvd.nist.gov/vuln/detail/CVE-2018-14031) | no JIRA# | Buffer over-read in the function H5T_copy | 1.8.20 || missing fixed | missing cve file | | -| [CVE-2018-13876](https://nvd.nist.gov/vuln/detail/CVE-2018-13876) | no JIRA# | Buffer overflow in the function H5FD_sec2_read | 1.8.20 || missing fixed | Related to H5Dread | | -| [CVE-2018-13875](https://nvd.nist.gov/vuln/detail/CVE-2018-13875) | no JIRA# | Out-of-bounds read in the function H5VM_memcpyvv | 1.8.20 || missing fixed | missing cve file | | -| [CVE-2018-13874](https://nvd.nist.gov/vuln/detail/CVE-2018-13874) | no JIRA# | Buffer overflow in the function H5FD_sec2_read | 1.8.20 || missing fixed | Related to Hdmemset | | -| [CVE-2018-13873](https://nvd.nist.gov/vuln/detail/CVE-2018-13873) | [HDFFV-10676](https://jira.hdfgroup.org/browse/HDFFV-10676) |Buffer over-read in H5O_chunk_deserialize | 1.8.20 | only fails | only fails | H5O_chunk_deserialize-global-buffer-overflow | h5dump | -| [CVE-2018-13872](https://nvd.nist.gov/vuln/detail/CVE-2018-13872) | no JIRA# | Buffer overflow in the function H5G_ent_decode | 1.8.20 || missing fixed | missing cve file | | -| [CVE-2018-13871](https://nvd.nist.gov/vuln/detail/CVE-2018-13871) | no JIRA# | Buffer overflow in the function H5FL_blk_malloc | 1.8.20 || missing fixed | missing cve file | | -| [CVE-2018-13870](https://nvd.nist.gov/vuln/detail/CVE-2018-13870) | no JIRA# | Buffer over-read in the function H5O_link_decode | 1.8.20 || missing fixed | missing cve file | | -| [CVE-2018-13869](https://nvd.nist.gov/vuln/detail/CVE-2018-13869) | no JIRA# | Memcpy parameter overlap in the function H5O_link_decode | 1.8.20 || missing fixed | missing cve file | | -| [CVE-2018-13868](https://nvd.nist.gov/vuln/detail/CVE-2018-13868) | [HDFFV-10548](https://jira.hdfgroup.org/browse/HDFFV-10548) | Buffer over-read in the function H5O_fill_old_decode | 1.8.20 | only fails | only fails | h5dump | -| [CVE-2018-13867](https://nvd.nist.gov/vuln/detail/CVE-2018-13867) | no JIRA# | Out-of-bounds read in the function H5F__accum_read | 1.8.20 | Segmentation fault (core dumped) | Assertion, Abort (core dumped) | H5F__accum_read-Out_Of_Bound_Read | | -| [CVE-2018-13866](https://nvd.nist.gov/vuln/detail/CVE-2018-13866) | no JIRA# | Buffer over-read in the function H5F_addr_decode_len | 1.8.20 || missing fixed | missing cve file | | -| [CVE-2018-11207](https://nvd.nist.gov/vuln/detail/CVE-2018-11207) | [HDFFV-10481](https://jira.hdfgroup.org/browse/HDFFV-10481) | A division by zero was discovered in H5D__chunk_init() | 1.10.2 | fixed | fixed | DivByZero__H5D_chunk_POC | h5stat -A -T -G -D -S file | -| [CVE-2018-11206](https://nvd.nist.gov/vuln/detail/CVE-2018-11206) | [HDFFV-10480](https://jira.hdfgroup.org/browse/HDFFV-10480) | Out-of-bounds read in H5O_fill_new_decode and H5O_fill_old_decode | 1.8.20 | fixed | fixed | H5O_fill\_[new/old]\_decode-heap-buffer-overflow | h5dump | -| [CVE-2018-11205](https://nvd.nist.gov/vuln/detail/CVE-2018-11205) | [HDFFV-10479](https://jira.hdfgroup.org/browse/HDFFV-10479) | Out-of-bounds in H5VM_memcpyvv | 1.10.2 | Segmentation fault (core dumped) | Assertion, Abort (core dumped) | H5VM_memcpyvv-H5VM.c_1626-oob_read | h5dump | -| [CVE-2018-11204](https://nvd.nist.gov/vuln/detail/CVE-2018-11204) | [HDFFV-10478](https://jira.hdfgroup.org/browse/HDFFV-10478) | A NULL pointer dereference in H5O__chunk_deserialize | 1.10.2 | fixed | fixed | H5O__chunk_deserialize-H5Ocache.c_1566-null_point_dereference | h5dump | -| [CVE-2018-11203](https://nvd.nist.gov/vuln/detail/CVE-2018-11203) | [HDFFV-10477](https://jira.hdfgroup.org/browse/HDFFV-10477) | A division by zero was discovered in H5D__btree_decode_key | 1.10.2 | fixed | fixed | H5D__btree_decode_key-H5Dbtree.c_697-div_by_zero | h5dump | -| [CVE-2018-11202](https://nvd.nist.gov/vuln/detail/CVE-2018-11202) | [HDFFV-10476](https://jira.hdfgroup.org/browse/HDFFV-10476) | A NULL pointer dereference was discovered in H5S_hyper_make_spans | 1.10.2 | Infinite loop | Assertion, Abort (core dumped) | H5S_hyper_make_spans-H5Shyper.c_6139-null_pointer_dereference | h5dump | -| [CVE-2017-17509](https://nvd.nist.gov/vuln/detail/CVE-2017-17509) | [HDFFV-10358](https://jira.hdfgroup.org/browse/HDFFV-10358) | Out-of-bounds write vulnerability in H5G__ent_decode_vec | 1.10.1 | fixed | fixed | 5-hdf5-heap-overflow-H5G__ent_decode_vec | h5dump | -| [CVE-2017-17508](https://nvd.nist.gov/vuln/detail/CVE-2017-17508) | [HDFFV-10357](https://jira.hdfgroup.org/browse/HDFFV-10357) | Divide-by-zero in H5T_set_loc | 1.10.1 | fixed | fixed | 1-hdf5-divbyzero-H5T_set_loc | h5dump | -| [CVE-2017-17507](https://nvd.nist.gov/vuln/detail/CVE-2017-17507) | [HDFFV-10356](https://jira.hdfgroup.org/browse/HDFFV-10356) | Out-of-bounds read in H5T_conv_struct_opt | 1.10.1 | Dana: | Will not fix | 3-hdf5-outbound-read-H5T_conv_struct_opt | | -| [CVE-2017-17506](https://nvd.nist.gov/vuln/detail/CVE-2017-17506) | [HDFFV-10355](https://jira.hdfgroup.org/browse/HDFFV-10355) | Out-of-bounds read vulnerability in H5Opline_pline_decode | 1.10.1 | fixed | fixed | 4-hdf5-outbound-read-H5Opline_pline_decode | h5dump | -| [CVE-2017-17505](https://nvd.nist.gov/vuln/detail/CVE-2017-17505) | [HDFFV-10354](https://jira.hdfgroup.org/browse/HDFFV-10354) | NULL pointer dereference in the function H5O_pline_decode | 1.10.1 | fixed | fixed | 2-hdf5-null-pointer-H5O_pline_reset.h5 | | -| [CVE-2016-4333](https://nvd.nist.gov/vuln/detail/CVE-2016-4333) | [HDFFV-9993](https://jira.hdfgroup.org/browse/HDFFV-9993)  | Out-of-bounds access in H5O_dtype_decode_helper | 1.8.16 | fixed in | 1.8.18, 1.10.1 | missing cve file | still need test: [HDFFV-10008](https://jira.hdfgroup.org/browse/HDFFV-10008) | -| [CVE-2016-4332](https://nvd.nist.gov/vuln/detail/CVE-2016-4332) | [HDFFV-9950](https://jira.hdfgroup.org/browse/HDFFV-9950) | Out-of-bounds write in H5O_msg_read_oh | 1.8.16 | fixed in | 1.8.18, 1.10.1 | missing cve file | still need test: [HDFFV-10008](https://jira.hdfgroup.org/browse/HDFFV-10008) | -| [CVE-2016-4331](https://nvd.nist.gov/vuln/detail/CVE-2016-4331) | [HDFFV-9951](https://jira.hdfgroup.org/browse/HDFFV-9951) | Out-of-bounds write in H5Z_nbit_decompress_one_atomic | 1.8.16 | fixed in | 1.8.18, 1.10.1 | missing cve file | still need test: [HDFFV-10008](https://jira.hdfgroup.org/browse/HDFFV-10008) | -| [CVE-2016-4330](https://nvd.nist.gov/vuln/detail/CVE-2016-4330) | [HDFFV-9992](https://jira.hdfgroup.org/browse/HDFFV-9952) | Out-of-bounds read in H5O_dtype_decode_helper | 1.8.16 | fixed in | 1.8.18, 1.10.1 | missing cve file | still need test: [HDFFV-10008](https://jira.hdfgroup.org/browse/HDFFV-10008) | - From 600b64d85981630f4bb0f9a1e337bf61b2a75145 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Wed, 12 Apr 2023 14:50:46 -0500 Subject: [PATCH 115/231] Change name of test file to avoid conflict (#2701) --- java/test/TestH5OcopyOld.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/java/test/TestH5OcopyOld.java b/java/test/TestH5OcopyOld.java index 6353df554f9..0fa57e650f2 100644 --- a/java/test/TestH5OcopyOld.java +++ b/java/test/TestH5OcopyOld.java @@ -249,7 +249,7 @@ public void testH5OcopyRefsDatasettodiffFile() try { // create new file - H5fid2 = H5.H5Fcreate("copy.h5", HDF5Constants.H5F_ACC_TRUNC, HDF5Constants.H5P_DEFAULT, + H5fid2 = H5.H5Fcreate("copy_old.h5", HDF5Constants.H5F_ACC_TRUNC, HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT); assertTrue("testH5OcopyRefsDatasettodiffFile.H5Fcreate: ", H5fid2 >= 0); H5.H5Fflush(H5fid2, HDF5Constants.H5F_SCOPE_LOCAL); @@ -288,7 +288,7 @@ public void testH5OcopyRefsDatasettodiffFile() catch (Exception ex) { } } - _deleteFile("copy.h5"); + _deleteFile("copy_old.h5"); } @Test From e70e11a29e56f13bcf0e5a295c62b0d4dda61092 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Wed, 12 Apr 2023 14:56:51 -0500 Subject: [PATCH 116/231] Remove duplicated H5_IS_BUFFER_OVERFLOW macro (#2700) --- src/H5private.h | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/H5private.h b/src/H5private.h index d92f7f6da8b..eb7d8e0a810 100644 --- a/src/H5private.h +++ b/src/H5private.h @@ -2436,11 +2436,6 @@ H5_DLL herr_t H5CX_pop(hbool_t update_dxpl_props); #define HDcompile_assert(e) do { typedef struct { unsigned int b: (e); } x; } while(0) */ -/* Check if a read of size bytes starting at ptr would overflow past - * the last valid byte, pointed to by buffer_end . - */ -#define H5_IS_BUFFER_OVERFLOW(ptr, size, buffer_end) (((ptr) + (size)-1) > (buffer_end)) - /* Private typedefs */ /* Union for const/non-const pointer for use by functions that manipulate From 1395f65e23cdd99d5b4f6c1646b519b7699fa35a Mon Sep 17 00:00:00 2001 From: Quincey Koziol Date: Wed, 12 Apr 2023 19:25:46 -0500 Subject: [PATCH 117/231] Refactor cache validation (#2699) * Correct concurrency bugs when running tests, along with a bugfix & small warning cleanup. * Committing clang-format changes * Allow spaces (and tabs) in VOL connector info string from environment variable. * Parse connector name from HDF5_PLUGIN_PATH environment variable better * Correct H5VLquery_optional to use H5VL routine instead of H5I. Also add an error message to the failure return value from not finding a plugin. * Play nice with existing plugin paths * Use API routine to determine if native connector is terminal. * Committing clang-format changes * Make string size larger, to allow for connectors with longer names. * Be more flexible about testing external pass through connectors, especially if they have registered new optional operations. * Make cache validation routines package local --------- Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: AWS ParallelCluster user Co-authored-by: Koziol --- src/H5C.c | 95 +++++++++++++++++++++++------------------------- src/H5Cmpio.c | 20 +++++----- src/H5Cpkg.h | 6 +++ src/H5Cprivate.h | 6 --- 4 files changed, 62 insertions(+), 65 deletions(-) diff --git a/src/H5C.c b/src/H5C.c index 7fa43aebfc5..2a54e5c0c2c 100644 --- a/src/H5C.c +++ b/src/H5C.c @@ -933,8 +933,8 @@ H5C_flush_cache(H5F_t *f, unsigned flags) #endif /* H5C_DO_SANITY_CHECKS */ #ifdef H5C_DO_EXTREME_SANITY_CHECKS - if ((H5C_validate_protected_entry_list(cache_ptr) < 0) || - (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0)) + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ @@ -1106,8 +1106,8 @@ H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, u #ifdef H5C_DO_EXTREME_SANITY_CHECKS /* no need to verify that entry is not already in the index as */ /* we already make that check below. */ - if ((H5C_validate_protected_entry_list(cache_ptr) < 0) || - (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0)) + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ @@ -1293,8 +1293,8 @@ H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, u H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, FAIL) #ifdef H5C_DO_EXTREME_SANITY_CHECKS - if ((H5C_validate_protected_entry_list(cache_ptr) < 0) || - (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0)) + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed just before done") #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ @@ -1332,8 +1332,8 @@ H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, u done: #ifdef H5C_DO_EXTREME_SANITY_CHECKS - if ((H5C_validate_protected_entry_list(cache_ptr) < 0) || - (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0)) + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ @@ -1651,8 +1651,8 @@ H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type, haddr_t old_addr, hadd HDassert(H5F_addr_ne(old_addr, new_addr)); #ifdef H5C_DO_EXTREME_SANITY_CHECKS - if ((H5C_validate_protected_entry_list(cache_ptr) < 0) || - (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0)) + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ @@ -1757,8 +1757,8 @@ H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type, haddr_t old_addr, hadd done: #ifdef H5C_DO_EXTREME_SANITY_CHECKS - if ((H5C_validate_protected_entry_list(cache_ptr) < 0) || - (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0)) + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ @@ -1804,7 +1804,7 @@ H5C_resize_entry(void *thing, size_t new_size) HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, FAIL, "Entry isn't pinned or protected??") #ifdef H5C_DO_EXTREME_SANITY_CHECKS - if ((H5C_validate_protected_entry_list(cache_ptr) < 0) || (H5C_validate_pinned_entry_list(cache_ptr) < 0)) + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ @@ -1894,7 +1894,7 @@ H5C_resize_entry(void *thing, size_t new_size) done: #ifdef H5C_DO_EXTREME_SANITY_CHECKS - if ((H5C_validate_protected_entry_list(cache_ptr) < 0) || (H5C_validate_pinned_entry_list(cache_ptr) < 0)) + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0) HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ @@ -1931,8 +1931,8 @@ H5C_pin_protected_entry(void *thing) HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); #ifdef H5C_DO_EXTREME_SANITY_CHECKS - if ((H5C_validate_protected_entry_list(cache_ptr) < 0) || - (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0)) + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ @@ -1946,8 +1946,8 @@ H5C_pin_protected_entry(void *thing) done: #ifdef H5C_DO_EXTREME_SANITY_CHECKS - if ((H5C_validate_protected_entry_list(cache_ptr) < 0) || - (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0)) + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ @@ -2010,8 +2010,8 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign HDassert(H5F_addr_defined(addr)); #ifdef H5C_DO_EXTREME_SANITY_CHECKS - if ((H5C_validate_protected_entry_list(cache_ptr) < 0) || - (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0)) + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on entry") #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ @@ -2366,8 +2366,8 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign done: #ifdef H5C_DO_EXTREME_SANITY_CHECKS - if ((H5C_validate_protected_entry_list(cache_ptr) < 0) || - (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0)) + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on exit") #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ @@ -2800,8 +2800,8 @@ H5C_unpin_entry(void *_entry_ptr) HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); #ifdef H5C_DO_EXTREME_SANITY_CHECKS - if ((H5C_validate_protected_entry_list(cache_ptr) < 0) || - (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0)) + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ @@ -2811,8 +2811,8 @@ H5C_unpin_entry(void *_entry_ptr) done: #ifdef H5C_DO_EXTREME_SANITY_CHECKS - if ((H5C_validate_protected_entry_list(cache_ptr) < 0) || - (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0)) + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ @@ -2903,9 +2903,8 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags) was_clean = !(entry_ptr->is_dirty); #ifdef H5C_DO_EXTREME_SANITY_CHECKS - if ((H5C_validate_protected_entry_list(cache_ptr) < 0) || - (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0)) - + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ @@ -3111,9 +3110,8 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags) done: #ifdef H5C_DO_EXTREME_SANITY_CHECKS - if ((H5C_validate_protected_entry_list(cache_ptr) < 0) || - (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0)) - + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ @@ -5393,9 +5391,8 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) HDassert(ring < H5C_RING_NTYPES); #ifdef H5C_DO_EXTREME_SANITY_CHECKS - if ((H5C_validate_protected_entry_list(cache_ptr) < 0) || - (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0)) - + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ @@ -6824,7 +6821,7 @@ H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted) /*------------------------------------------------------------------------- * - * Function: H5C_validate_lru_list + * Function: H5C__validate_lru_list * * Purpose: Debugging function that scans the LRU list for errors. * @@ -6840,14 +6837,14 @@ H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted) */ #ifdef H5C_DO_EXTREME_SANITY_CHECKS herr_t -H5C_validate_lru_list(H5C_t *cache_ptr) +H5C__validate_lru_list(H5C_t *cache_ptr) { int32_t len = 0; size_t size = 0; H5C_cache_entry_t *entry_ptr = NULL; herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI(FAIL) + FUNC_ENTER_PACKAGE HDassert(cache_ptr); HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); @@ -6892,12 +6889,12 @@ H5C_validate_lru_list(H5C_t *cache_ptr) HDassert(0); FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_validate_lru_list() */ +} /* H5C__validate_lru_list() */ #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ /*------------------------------------------------------------------------- * - * Function: H5C_validate_pinned_entry_list + * Function: H5C__validate_pinned_entry_list * * Purpose: Debugging function that scans the pinned entry list for * errors. @@ -6914,14 +6911,14 @@ H5C_validate_lru_list(H5C_t *cache_ptr) */ #ifdef H5C_DO_EXTREME_SANITY_CHECKS herr_t -H5C_validate_pinned_entry_list(H5C_t *cache_ptr) +H5C__validate_pinned_entry_list(H5C_t *cache_ptr) { int32_t len = 0; size_t size = 0; H5C_cache_entry_t *entry_ptr = NULL; herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI(FAIL) + FUNC_ENTER_PACKAGE HDassert(cache_ptr); HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); @@ -6969,12 +6966,12 @@ H5C_validate_pinned_entry_list(H5C_t *cache_ptr) HDassert(0); FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_validate_pinned_entry_list() */ +} /* H5C__validate_pinned_entry_list() */ #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ /*------------------------------------------------------------------------- * - * Function: H5C_validate_protected_entry_list + * Function: H5C__validate_protected_entry_list * * Purpose: Debugging function that scans the protected entry list for * errors. @@ -6991,14 +6988,14 @@ H5C_validate_pinned_entry_list(H5C_t *cache_ptr) */ #ifdef H5C_DO_EXTREME_SANITY_CHECKS herr_t -H5C_validate_protected_entry_list(H5C_t *cache_ptr) +H5C__validate_protected_entry_list(H5C_t *cache_ptr) { int32_t len = 0; size_t size = 0; H5C_cache_entry_t *entry_ptr = NULL; herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI(FAIL) + FUNC_ENTER_PACKAGE HDassert(cache_ptr); HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); @@ -7046,7 +7043,7 @@ H5C_validate_protected_entry_list(H5C_t *cache_ptr) HDassert(0); FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_validate_protected_entry_list() */ +} /* H5C__validate_protected_entry_list() */ #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ /*------------------------------------------------------------------------- @@ -7565,8 +7562,8 @@ H5C__serialize_cache(H5F_t *f) #endif /* H5C_DO_SANITY_CHECKS */ #ifdef H5C_DO_EXTREME_SANITY_CHECKS - if ((H5C_validate_protected_entry_list(cache_ptr) < 0) || - (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0)) + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c index 7d87d454d96..a92ac104770 100644 --- a/src/H5Cmpio.c +++ b/src/H5Cmpio.c @@ -654,8 +654,8 @@ H5C_mark_entries_as_clean(H5F_t *f, unsigned ce_array_len, haddr_t *ce_array_ptr HDassert(ce_array_ptr != NULL); #ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C_validate_protected_entry_list(cache_ptr) < 0 || H5C_validate_pinned_entry_list(cache_ptr) < 0 || - H5C_validate_lru_list(cache_ptr) < 0) + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ @@ -673,8 +673,8 @@ H5C_mark_entries_as_clean(H5F_t *f, unsigned ce_array_len, haddr_t *ce_array_ptr } /* end else */ #ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C_validate_protected_entry_list(cache_ptr) < 0 || - H5C_validate_pinned_entry_list(cache_ptr) < 0 || H5C_validate_lru_list(cache_ptr) < 0) + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || + H5C__validate_pinned_entry_list(cache_ptr) < 0 || H5C__validate_lru_list(cache_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed in for loop") #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ #endif /* H5C_DO_SANITY_CHECKS */ @@ -812,8 +812,8 @@ H5C_mark_entries_as_clean(H5F_t *f, unsigned ce_array_len, haddr_t *ce_array_ptr done: #ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C_validate_protected_entry_list(cache_ptr) < 0 || H5C_validate_pinned_entry_list(cache_ptr) < 0 || - H5C_validate_lru_list(cache_ptr) < 0) + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ @@ -1090,8 +1090,8 @@ H5C__flush_candidate_entries(H5F_t *f, unsigned entries_to_flush[H5C_RING_NTYPES #endif /* H5C_DO_SANITY_CHECKS */ #ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C_validate_protected_entry_list(cache_ptr) < 0 || H5C_validate_pinned_entry_list(cache_ptr) < 0 || - H5C_validate_lru_list(cache_ptr) < 0) + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ @@ -1182,8 +1182,8 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, unsigned entries_to_flu HDassert(ring < H5C_RING_NTYPES); #ifdef H5C_DO_EXTREME_SANITY_CHECKS - if ((H5C_validate_protected_entry_list(cache_ptr) < 0) || - (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0)) + if ((H5C__validate_protected_entry_list(cache_ptr) < 0) || + (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0)) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index 5b3d942fd31..3e99646df68 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -4630,6 +4630,12 @@ H5_DLL herr_t H5C__iter_tagged_entries(H5C_t *cache, haddr_t tag, hbool_t match_ H5_DLL herr_t H5C__tag_entry(H5C_t * cache_ptr, H5C_cache_entry_t * entry_ptr); H5_DLL herr_t H5C__untag_entry(H5C_t *cache, H5C_cache_entry_t *entry); +#ifdef H5C_DO_EXTREME_SANITY_CHECKS +H5_DLL herr_t H5C__validate_lru_list(H5C_t *cache_ptr); +H5_DLL herr_t H5C__validate_pinned_entry_list(H5C_t *cache_ptr); +H5_DLL herr_t H5C__validate_protected_entry_list(H5C_t *cache_ptr); +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + /* Testing functions */ #ifdef H5C_TESTING H5_DLL herr_t H5C__verify_cork_tag_test(hid_t fid, H5O_token_t tag_token, hbool_t status); diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h index 949c3e162cc..855557dc5b5 100644 --- a/src/H5Cprivate.h +++ b/src/H5Cprivate.h @@ -2270,12 +2270,6 @@ H5_DLL herr_t H5C_get_mdc_image_info(const H5C_t *cache_ptr, haddr_t *image_ad H5_DLL hbool_t H5C_entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr); #endif -#ifdef H5C_DO_EXTREME_SANITY_CHECKS -H5_DLL herr_t H5C_validate_lru_list(H5C_t *cache_ptr); -H5_DLL herr_t H5C_validate_pinned_entry_list(H5C_t *cache_ptr); -H5_DLL herr_t H5C_validate_protected_entry_list(H5C_t *cache_ptr); -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - /* Logging functions */ H5_DLL herr_t H5C_start_logging(H5C_t *cache); H5_DLL herr_t H5C_stop_logging(H5C_t *cache); From 666a782053ccbeeec0636fd867ec34b52b95be13 Mon Sep 17 00:00:00 2001 From: Quincey Koziol Date: Thu, 13 Apr 2023 00:19:46 -0500 Subject: [PATCH 118/231] Refactor cache mark flush dep (#2711) --- src/H5C.c | 6 ++++-- src/H5Cpkg.h | 2 -- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/H5C.c b/src/H5C.c index 2a54e5c0c2c..3bafc36b45e 100644 --- a/src/H5C.c +++ b/src/H5C.c @@ -115,6 +115,8 @@ static void *H5C__load_entry(H5F_t *f, static herr_t H5C__mark_flush_dep_dirty(H5C_cache_entry_t *entry); static herr_t H5C__mark_flush_dep_clean(H5C_cache_entry_t *entry); +static herr_t H5C__mark_flush_dep_serialized(H5C_cache_entry_t *entry); +static herr_t H5C__mark_flush_dep_unserialized(H5C_cache_entry_t *entry); static herr_t H5C__serialize_ring(H5F_t *f, H5C_ring_t ring); static herr_t H5C__serialize_single_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr); @@ -7353,7 +7355,7 @@ H5C__mark_flush_dep_clean(H5C_cache_entry_t *entry) * *------------------------------------------------------------------------- */ -herr_t +static herr_t H5C__mark_flush_dep_serialized(H5C_cache_entry_t *entry_ptr) { int i; /* Local index variable */ @@ -7403,7 +7405,7 @@ H5C__mark_flush_dep_serialized(H5C_cache_entry_t *entry_ptr) * *------------------------------------------------------------------------- */ -herr_t +static herr_t H5C__mark_flush_dep_unserialized(H5C_cache_entry_t *entry_ptr) { unsigned u; /* Local index variable */ diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index 3e99646df68..2bc6f368ef8 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -4617,8 +4617,6 @@ H5_DLL herr_t H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags); H5_DLL herr_t H5C__generate_cache_image(H5F_t *f, H5C_t *cache_ptr); H5_DLL herr_t H5C__load_cache_image(H5F_t *f); -H5_DLL herr_t H5C__mark_flush_dep_serialized(H5C_cache_entry_t * entry_ptr); -H5_DLL herr_t H5C__mark_flush_dep_unserialized(H5C_cache_entry_t * entry_ptr); H5_DLL herr_t H5C__make_space_in_cache(H5F_t * f, size_t space_needed, hbool_t write_permitted); H5_DLL herr_t H5C__flush_marked_entries(H5F_t * f); From 028777591f1b3f02fe3a1f1eb47e5c14780445b0 Mon Sep 17 00:00:00 2001 From: Quincey Koziol Date: Thu, 13 Apr 2023 11:01:14 -0500 Subject: [PATCH 119/231] Fix cache validate (#2718) --- src/H5C.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/H5C.c b/src/H5C.c index 3bafc36b45e..f6bc1ff68f2 100644 --- a/src/H5C.c +++ b/src/H5C.c @@ -821,7 +821,7 @@ H5C_expunge_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, unsigned flag HDassert(H5F_addr_defined(addr)); #ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C_validate_lru_list(cache_ptr) < 0) + if (H5C__validate_lru_list(cache_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on entry") #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ @@ -856,7 +856,7 @@ H5C_expunge_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, unsigned flag done: #ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C_validate_lru_list(cache_ptr) < 0) + if (H5C__validate_lru_list(cache_ptr) < 0) HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on exit") #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ From 11e3172ab5e869c5860aa03d484235e9a6ba7f43 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Thu, 13 Apr 2023 09:04:51 -0700 Subject: [PATCH 120/231] Update HDF-EOS5 workflow to mirror main.yml (#2710) Adds paths to ignore, concurrency, etc. --- .github/workflows/hdfeos5.yml | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/.github/workflows/hdfeos5.yml b/.github/workflows/hdfeos5.yml index 8378b4c2fe4..2140fb6e000 100644 --- a/.github/workflows/hdfeos5.yml +++ b/.github/workflows/hdfeos5.yml @@ -1,6 +1,23 @@ name: hdfeos5 -on: [push, pull_request] +on: + workflow_dispatch: + push: + pull_request: + branches: [ develop ] + paths-ignore: + - '.github/CODEOWNERS' + - '.github/FUNDING.yml' + - 'doc/**' + - 'release_docs/**' + - 'ACKNOWLEDGEMENTS' + - 'COPYING**' + - '**.md' + +# Using concurrency to cancel any in-progress job or run +concurrency: + group: ${{ github.ref }} + cancel-in-progress: true jobs: build: From b7bfab2c6d67763340c755e81663dd44e9a10d02 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Thu, 13 Apr 2023 12:36:48 -0500 Subject: [PATCH 121/231] Identify some options as advanced (#2717) * Identify some options as advanced Add explicit option statement to set default for CMake option * Revert advanced setting for file locks --- config/cmake/ConfigureChecks.cmake | 5 +++++ config/cmake/HDF5PluginMacros.cmake | 1 + config/cmake/HDFCompilerFlags.cmake | 1 + config/cmake/HDFMacros.cmake | 2 ++ config/cmake/UserMacros/Windows_MT.cmake | 1 + release_docs/INSTALL_CMake.txt | 2 ++ 6 files changed, 12 insertions(+) diff --git a/config/cmake/ConfigureChecks.cmake b/config/cmake/ConfigureChecks.cmake index 47c2438349c..869f5ea3c2c 100644 --- a/config/cmake/ConfigureChecks.cmake +++ b/config/cmake/ConfigureChecks.cmake @@ -240,6 +240,7 @@ if (MINGW OR NOT WINDOWS) set (HDF_EXTRA_C_FLAGS ${HDF_EXTRA_C_FLAGS} -D_GNU_SOURCE) option (HDF_ENABLE_LARGE_FILE "Enable support for large (64-bit) files on Linux." ON) + mark_as_advanced (HDF_ENABLE_LARGE_FILE) if (HDF_ENABLE_LARGE_FILE AND NOT DEFINED TEST_LFS_WORKS_RUN) set (msg "Performing TEST_LFS_WORKS") try_run (TEST_LFS_WORKS_RUN TEST_LFS_WORKS_COMPILE @@ -568,6 +569,7 @@ endif () # Option for --enable-strict-format-checks #----------------------------------------------------------------------------- option (HDF5_STRICT_FORMAT_CHECKS "Whether to perform strict file format checks" OFF) +mark_as_advanced (HDF5_STRICT_FORMAT_CHECKS) if (HDF5_STRICT_FORMAT_CHECKS) set (${HDF_PREFIX}_STRICT_FORMAT_CHECKS 1) endif () @@ -580,6 +582,7 @@ MARK_AS_ADVANCED (HDF5_STRICT_FORMAT_CHECKS) # support denormalized floating values) to maximize speed. #----------------------------------------------------------------------------- option (HDF5_WANT_DATA_ACCURACY "IF data accuracy is guaranteed during data conversions" ON) +mark_as_advanced (HDF5_WANT_DATA_ACCURACY) if (HDF5_WANT_DATA_ACCURACY) set (${HDF_PREFIX}_WANT_DATA_ACCURACY 1) endif () @@ -592,6 +595,7 @@ MARK_AS_ADVANCED (HDF5_WANT_DATA_ACCURACY) # actually benefit little. #----------------------------------------------------------------------------- option (HDF5_WANT_DCONV_EXCEPTION "exception handling functions is checked during data conversions" ON) +mark_as_advanced (HDF5_WANT_DCONV_EXCEPTION) if (HDF5_WANT_DCONV_EXCEPTION) set (${HDF_PREFIX}_WANT_DCONV_EXCEPTION 1) endif () @@ -601,6 +605,7 @@ MARK_AS_ADVANCED (HDF5_WANT_DCONV_EXCEPTION) # Check if they would like the function stack support compiled in #----------------------------------------------------------------------------- option (HDF5_ENABLE_CODESTACK "Enable the function stack tracing (for developer debugging)." OFF) +mark_as_advanced (HDF5_ENABLE_CODESTACK) if (HDF5_ENABLE_CODESTACK) set (${HDF_PREFIX}_HAVE_CODESTACK 1) endif () diff --git a/config/cmake/HDF5PluginMacros.cmake b/config/cmake/HDF5PluginMacros.cmake index a180d5d8120..da0eab5f903 100644 --- a/config/cmake/HDF5PluginMacros.cmake +++ b/config/cmake/HDF5PluginMacros.cmake @@ -84,6 +84,7 @@ macro (FILTER_OPTION plname) option (ENABLE_${plname} "Enable Library Building for ${plname} plugin" ON) if (ENABLE_${plname}) option (HDF_${plname}_USE_EXTERNAL "Use External Library Building for ${PLUGIN_NAME} plugin" 0) + mark_as_advanced (HDF_${plname}_USE_EXTERNAL) if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") set (HDF_${plname}_USE_EXTERNAL 1 CACHE BOOL "Use External Library Building for ${PLUGIN_NAME} plugin" FORCE) if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT") diff --git a/config/cmake/HDFCompilerFlags.cmake b/config/cmake/HDFCompilerFlags.cmake index 7a9a62ebbdb..1bbf57c9e5d 100644 --- a/config/cmake/HDFCompilerFlags.cmake +++ b/config/cmake/HDFCompilerFlags.cmake @@ -64,6 +64,7 @@ if (CMAKE_COMPILER_IS_GNUCC) # This should NOT be on by default as it can cause process issues. #----------------------------------------------------------------------------- option (HDF5_ENABLE_BUILD_DIAGS "Enable color and URL extended diagnostic messages" OFF) + mark_as_advanced (HDF5_ENABLE_BUILD_DIAGS) if (HDF5_ENABLE_BUILD_DIAGS) message (STATUS "... default color and URL extended diagnostic messages enabled") else () diff --git a/config/cmake/HDFMacros.cmake b/config/cmake/HDFMacros.cmake index fc04d176cd8..71454b62f52 100644 --- a/config/cmake/HDFMacros.cmake +++ b/config/cmake/HDFMacros.cmake @@ -73,6 +73,8 @@ endmacro () #------------------------------------------------------------------------------- macro (INSTALL_TARGET_PDB libtarget targetdestination targetcomponent) + option (DISABLE_PDB_FILES "Do not install PDB files" OFF) + mark_as_advanced (DISABLE_PDB_FILES) if (WIN32 AND MSVC AND NOT DISABLE_PDB_FILES) get_target_property (target_type ${libtarget} TYPE) if (${libtype} MATCHES "SHARED") diff --git a/config/cmake/UserMacros/Windows_MT.cmake b/config/cmake/UserMacros/Windows_MT.cmake index 15cffbaae99..c8edbe4a0d1 100644 --- a/config/cmake/UserMacros/Windows_MT.cmake +++ b/config/cmake/UserMacros/Windows_MT.cmake @@ -47,6 +47,7 @@ endmacro () #----------------------------------------------------------------------------- option (BUILD_STATIC_CRT_LIBS "Build With Static CRT Libraries" OFF) +mark_as_advanced (BUILD_STATIC_CRT_LIBS) if (BUILD_STATIC_CRT_LIBS) TARGET_STATIC_CRT_FLAGS () endif () diff --git a/release_docs/INSTALL_CMake.txt b/release_docs/INSTALL_CMake.txt index d9eb97d80f3..59edaf265f8 100644 --- a/release_docs/INSTALL_CMake.txt +++ b/release_docs/INSTALL_CMake.txt @@ -763,6 +763,8 @@ BUILD_SHARED_LIBS "Build Shared Libraries" ON BUILD_STATIC_LIBS "Build Static Libraries" ON BUILD_STATIC_EXECS "Build Static Executables" OFF BUILD_TESTING "Build HDF5 Unit Testing" ON +if (WINDOWS) + DISABLE_PDB_FILES "Do not install PDB files" OFF ---------------- HDF5 Build Options --------------------- HDF5_BUILD_CPP_LIB "Build HDF5 C++ Library" OFF From 4fcedf2b75f139fd9f62385a9f38a1d150ea9655 Mon Sep 17 00:00:00 2001 From: Quincey Koziol Date: Thu, 13 Apr 2023 13:25:50 -0500 Subject: [PATCH 122/231] Refactor H5C_entry_in_skip_list (#2719) * Refactor H5C__entry_in_skip_list to package scope --- src/H5C.c | 16 ++++++++++++---- src/H5Cpkg.h | 14 +++++++++----- src/H5Cprivate.h | 4 ---- 3 files changed, 21 insertions(+), 13 deletions(-) diff --git a/src/H5C.c b/src/H5C.c index f6bc1ff68f2..2bce704205f 100644 --- a/src/H5C.c +++ b/src/H5C.c @@ -7050,7 +7050,7 @@ H5C__validate_protected_entry_list(H5C_t *cache_ptr) /*------------------------------------------------------------------------- * - * Function: H5C_entry_in_skip_list + * Function: H5C__entry_in_skip_list * * Purpose: Debugging function that scans skip list to see if it * is in present. We need this, as it is possible for @@ -7065,11 +7065,15 @@ H5C__validate_protected_entry_list(H5C_t *cache_ptr) */ #ifdef H5C_DO_SLIST_SANITY_CHECKS hbool_t -H5C_entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr) +H5C__entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr) { H5SL_node_t *node_ptr; hbool_t in_slist; + hbool_t ret_value; + FUNC_ENTER_PACKAGE + + /* Assertions */ HDassert(cache_ptr); HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_ptr->slist_ptr); @@ -7092,8 +7096,12 @@ H5C_entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr) node_ptr = H5SL_next(node_ptr); } - return (in_slist); -} /* H5C_entry_in_skip_list() */ + /* Set return value */ + ret_value = in_slist; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__entry_in_skip_list() */ #endif /* H5C_DO_SLIST_SANITY_CHECKS */ /*------------------------------------------------------------------------- diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index 2bc6f368ef8..55c7d03246d 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -1417,12 +1417,12 @@ if ( ( (cache_ptr)->index_size != \ #ifdef H5C_DO_SLIST_SANITY_CHECKS -#define ENTRY_IN_SLIST(cache_ptr, entry_ptr) \ - H5C_entry_in_skip_list((cache_ptr), (entry_ptr)) +#define H5C_ENTRY_IN_SLIST(cache_ptr, entry_ptr) \ + H5C__entry_in_skip_list((cache_ptr), (entry_ptr)) #else /* H5C_DO_SLIST_SANITY_CHECKS */ -#define ENTRY_IN_SLIST(cache_ptr, entry_ptr) FALSE +#define H5C_ENTRY_IN_SLIST(cache_ptr, entry_ptr) FALSE #endif /* H5C_DO_SLIST_SANITY_CHECKS */ @@ -1440,7 +1440,7 @@ if ( ( (cache_ptr)->index_size != \ HDassert( (entry_ptr)->size > 0 ); \ HDassert( H5F_addr_defined((entry_ptr)->addr) ); \ HDassert( !((entry_ptr)->in_slist) ); \ - HDassert( ! ENTRY_IN_SLIST((cache_ptr), (entry_ptr)) ); \ + HDassert( ! H5C_ENTRY_IN_SLIST((cache_ptr), (entry_ptr)) ); \ HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \ HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \ HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \ @@ -1483,7 +1483,7 @@ if ( ( (cache_ptr)->index_size != \ \ HDassert( (entry_ptr) ); \ HDassert( (entry_ptr)->size > 0 ); \ - HDassert( ! ENTRY_IN_SLIST((cache_ptr), (entry_ptr)) ); \ + HDassert( ! H5C_ENTRY_IN_SLIST((cache_ptr), (entry_ptr)) ); \ HDassert( H5F_addr_defined((entry_ptr)->addr) ); \ HDassert( !((entry_ptr)->in_slist) ); \ HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \ @@ -4628,6 +4628,10 @@ H5_DLL herr_t H5C__iter_tagged_entries(H5C_t *cache, haddr_t tag, hbool_t match_ H5_DLL herr_t H5C__tag_entry(H5C_t * cache_ptr, H5C_cache_entry_t * entry_ptr); H5_DLL herr_t H5C__untag_entry(H5C_t *cache, H5C_cache_entry_t *entry); +#ifdef H5C_DO_SLIST_SANITY_CHECKS +H5_DLL hbool_t H5C__entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr); +#endif + #ifdef H5C_DO_EXTREME_SANITY_CHECKS H5_DLL herr_t H5C__validate_lru_list(H5C_t *cache_ptr); H5_DLL herr_t H5C__validate_pinned_entry_list(H5C_t *cache_ptr); diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h index 855557dc5b5..a5330ce664d 100644 --- a/src/H5Cprivate.h +++ b/src/H5Cprivate.h @@ -2266,10 +2266,6 @@ H5_DLL herr_t H5C_cache_image_status(H5F_t *f, hbool_t *load_ci_ptr, hbool_t * H5_DLL hbool_t H5C_cache_image_pending(const H5C_t *cache_ptr); H5_DLL herr_t H5C_get_mdc_image_info(const H5C_t *cache_ptr, haddr_t *image_addr, hsize_t *image_len); -#ifdef H5C_DO_SLIST_SANITY_CHECKS -H5_DLL hbool_t H5C_entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr); -#endif - /* Logging functions */ H5_DLL herr_t H5C_start_logging(H5C_t *cache); H5_DLL herr_t H5C_stop_logging(H5C_t *cache); From ef577ad6e5370e788798e5dec525618145bd801f Mon Sep 17 00:00:00 2001 From: Quincey Koziol Date: Thu, 13 Apr 2023 14:10:48 -0500 Subject: [PATCH 123/231] Remove dead code for H5C__SLIST_OPT_ENABLED (#2721) * Remove dead code for H5C__SLIST_OPT_ENABLED --- src/H5C.c | 10 +--------- src/H5Cpkg.h | 6 ------ 2 files changed, 1 insertion(+), 15 deletions(-) diff --git a/src/H5C.c b/src/H5C.c index 2bce704205f..f9a41fd238d 100644 --- a/src/H5C.c +++ b/src/H5C.c @@ -261,7 +261,7 @@ H5C_create(size_t max_cache_size, size_t min_clean_size, int max_type_id, cache_ptr->num_objs_corked = 0; /* slist field initializations */ - cache_ptr->slist_enabled = !H5C__SLIST_OPT_ENABLED; + cache_ptr->slist_enabled = FALSE; cache_ptr->slist_changed = FALSE; cache_ptr->slist_len = 0; cache_ptr->slist_size = (size_t)0; @@ -2708,8 +2708,6 @@ H5C_set_slist_enabled(H5C_t *cache_ptr, hbool_t slist_enabled, hbool_t clear_sli if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry") -#if H5C__SLIST_OPT_ENABLED - if (slist_enabled) { if (cache_ptr->slist_enabled) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist already enabled?") @@ -2763,12 +2761,6 @@ H5C_set_slist_enabled(H5C_t *cache_ptr, hbool_t slist_enabled, hbool_t clear_sli HDassert(0 == cache_ptr->slist_size); } -#else /* H5C__SLIST_OPT_ENABLED is FALSE */ - - HDassert(cache_ptr->slist_enabled); - -#endif /* H5C__SLIST_OPT_ENABLED is FALSE */ - done: FUNC_LEAVE_NOAPI(ret_value) } /* H5C_set_slist_enabled() */ diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index 55c7d03246d..08d74db827b 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -59,12 +59,6 @@ #define H5C_FLUSH_DEP_PARENT_INIT 8 -/* Set to TRUE to enable the slist optimization. If this field is TRUE, - * the slist is disabled whenever a flush is not in progress. - */ -#define H5C__SLIST_OPT_ENABLED TRUE - - /**************************************************************************** * * We maintain doubly linked lists of instances of H5C_cache_entry_t for a From 2c5e9cc9df3b60362c500412c44279e6216cb324 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Thu, 13 Apr 2023 14:11:35 -0500 Subject: [PATCH 124/231] Fix java depends and pdb lib install (#2720) * Fix java depends and pdb lib install * Correct end of for loop depends * fix location of set last_test --- config/cmake/HDFMacros.cmake | 2 +- java/examples/datasets/CMakeLists.txt | 7 ++++++- java/examples/datatypes/CMakeLists.txt | 5 +++++ java/examples/groups/CMakeLists.txt | 7 +++++++ java/examples/intro/CMakeLists.txt | 5 +++++ 5 files changed, 24 insertions(+), 2 deletions(-) diff --git a/config/cmake/HDFMacros.cmake b/config/cmake/HDFMacros.cmake index 71454b62f52..5ac7316883c 100644 --- a/config/cmake/HDFMacros.cmake +++ b/config/cmake/HDFMacros.cmake @@ -77,7 +77,7 @@ macro (INSTALL_TARGET_PDB libtarget targetdestination targetcomponent) mark_as_advanced (DISABLE_PDB_FILES) if (WIN32 AND MSVC AND NOT DISABLE_PDB_FILES) get_target_property (target_type ${libtarget} TYPE) - if (${libtype} MATCHES "SHARED") + if (${target_type} MATCHES "SHARED") set (targetfilename $) else () get_property (target_name TARGET ${libtarget} PROPERTY $,$>,OUTPUT_NAME_DEBUG,OUTPUT_NAME_RELWITHDEBINFO>) diff --git a/java/examples/datasets/CMakeLists.txt b/java/examples/datasets/CMakeLists.txt index 4403336e280..a9a5643d652 100644 --- a/java/examples/datasets/CMakeLists.txt +++ b/java/examples/datasets/CMakeLists.txt @@ -82,6 +82,7 @@ if (BUILD_TESTING AND HDF5_TEST_EXAMPLES AND HDF5_TEST_SERIAL) get_property (target_name TARGET ${HDF5_JAVA_JNI_LIB_TARGET} PROPERTY OUTPUT_NAME) set (CMD_ARGS "-Dhdf.hdf5lib.H5.loadLibraryName=${target_name}$<$,$>:${CMAKE_DEBUG_POSTFIX}>;") + set (last_test "") foreach (example ${HDF_JAVA_EXAMPLES}) if (example STREQUAL "H5Ex_D_External") add_test ( @@ -97,6 +98,9 @@ if (BUILD_TESTING AND HDF5_TEST_EXAMPLES AND HDF5_TEST_SERIAL) ${HDFJAVA_EXAMPLES_BINARY_DIR}/${example}.h5 ) endif () + if (last_test) + set_tests_properties (JAVA_datasets-${example}-clear-objects PROPERTIES DEPENDS ${last_test}) + endif () add_test ( NAME JAVA_datasets-${example}-copy-objects @@ -137,9 +141,10 @@ if (BUILD_TESTING AND HDF5_TEST_EXAMPLES AND HDF5_TEST_SERIAL) COMMAND ${CMAKE_COMMAND} -E remove ${HDFJAVA_EXAMPLES_BINARY_DIR}/${example}.h5 ) + endif () set_tests_properties (JAVA_datasets-${example}-clean-objects PROPERTIES DEPENDS JAVA_datasets-${example} ) - endif () + set (last_test "JAVA_datasets-${example}-clean-objects") endforeach () endif () diff --git a/java/examples/datatypes/CMakeLists.txt b/java/examples/datatypes/CMakeLists.txt index 1690701889e..5860429ef6e 100644 --- a/java/examples/datatypes/CMakeLists.txt +++ b/java/examples/datatypes/CMakeLists.txt @@ -67,12 +67,16 @@ if (BUILD_TESTING AND HDF5_TEST_EXAMPLES AND HDF5_TEST_SERIAL) get_property (target_name TARGET ${HDF5_JAVA_JNI_LIB_TARGET} PROPERTY OUTPUT_NAME) set (CMD_ARGS "-Dhdf.hdf5lib.H5.loadLibraryName=${target_name}$<$,$>:${CMAKE_DEBUG_POSTFIX}>;") + set (last_test "") foreach (example ${HDF_JAVA_EXAMPLES}) add_test ( NAME JAVA_datatypes-${example}-clear-objects COMMAND ${CMAKE_COMMAND} -E remove ${HDFJAVA_EXAMPLES_BINARY_DIR}/${example}.h5 ) + if (last_test) + set_tests_properties (JAVA_datatypes-${example}-clear-objects PROPERTIES DEPENDS ${last_test}) + endif () add_test ( NAME JAVA_datatypes-${example}-copy-objects @@ -107,5 +111,6 @@ if (BUILD_TESTING AND HDF5_TEST_EXAMPLES AND HDF5_TEST_SERIAL) set_tests_properties (JAVA_datatypes-${example}-clean-objects PROPERTIES DEPENDS JAVA_datatypes-${example} ) + set (last_test "JAVA_datatypes-${example}-clean-objects") endforeach () endif () diff --git a/java/examples/groups/CMakeLists.txt b/java/examples/groups/CMakeLists.txt index 1d63c1506f0..fc9eb948a99 100644 --- a/java/examples/groups/CMakeLists.txt +++ b/java/examples/groups/CMakeLists.txt @@ -66,6 +66,7 @@ if (BUILD_TESTING AND HDF5_TEST_EXAMPLES AND HDF5_TEST_SERIAL) get_property (target_name TARGET ${HDF5_JAVA_JNI_LIB_TARGET} PROPERTY OUTPUT_NAME) set (CMD_ARGS "-Dhdf.hdf5lib.H5.loadLibraryName=${target_name}$<$,$>:${CMAKE_DEBUG_POSTFIX}>;") + set (last_test "") foreach (example ${HDF_JAVA_EXAMPLES}) if (NOT example STREQUAL "H5Ex_G_Iterate" AND NOT example STREQUAL "H5Ex_G_Visit") if (example STREQUAL "H5Ex_G_Compact") @@ -88,6 +89,9 @@ if (BUILD_TESTING AND HDF5_TEST_EXAMPLES AND HDF5_TEST_SERIAL) COMMAND ${CMAKE_COMMAND} -E echo "${HDFJAVA_EXAMPLES_BINARY_DIR}/${example}.h5 exists" ) endif () + if (last_test) + set_tests_properties (JAVA_groups-${example}-clear-objects PROPERTIES DEPENDS ${last_test}) + endif () add_test ( NAME JAVA_groups-${example}-copy-objects @@ -132,6 +136,9 @@ if (BUILD_TESTING AND HDF5_TEST_EXAMPLES AND HDF5_TEST_SERIAL) set_tests_properties (JAVA_groups-${example}-clean-objects PROPERTIES DEPENDS JAVA_groups-${example} ) + set (last_test "JAVA_groups-${example}-clean-objects") + else () + set (last_test "JAVA_groups-${example}") endif () endforeach () endif () diff --git a/java/examples/intro/CMakeLists.txt b/java/examples/intro/CMakeLists.txt index a1e2a512d24..685ef901347 100644 --- a/java/examples/intro/CMakeLists.txt +++ b/java/examples/intro/CMakeLists.txt @@ -56,12 +56,16 @@ if (BUILD_TESTING AND HDF5_TEST_EXAMPLES AND HDF5_TEST_SERIAL) get_property (target_name TARGET ${HDF5_JAVA_JNI_LIB_TARGET} PROPERTY OUTPUT_NAME) set (CMD_ARGS "-Dhdf.hdf5lib.H5.loadLibraryName=${target_name}$<$,$>:${CMAKE_DEBUG_POSTFIX}>;") + set (last_test "") foreach (example ${HDF_JAVA_EXAMPLES}) add_test ( NAME JAVA_intro-${example}-clear-objects COMMAND ${CMAKE_COMMAND} -E remove ${HDFJAVA_EXAMPLES_BINARY_DIR}/${example}.h5 ) + if (last_test) + set_tests_properties (JAVA_intro-${example}-clear-objects PROPERTIES DEPENDS ${last_test}) + endif () add_test ( NAME JAVA_intro-${example}-copy-objects @@ -97,6 +101,7 @@ if (BUILD_TESTING AND HDF5_TEST_EXAMPLES AND HDF5_TEST_SERIAL) set_tests_properties (JAVA_intro-${example}-clean-objects PROPERTIES DEPENDS JAVA_intro-${example} ) + set (last_test "JAVA_intro-${example}-clean-objects") endforeach () endif () From 65559f28e09c70055da231279653a4a3d42ad604 Mon Sep 17 00:00:00 2001 From: Quincey Koziol Date: Thu, 13 Apr 2023 15:33:06 -0500 Subject: [PATCH 125/231] Remove unused macros (#2722) * Remove unused macros --- src/H5Cpkg.h | 147 --------------------------------------------------- 1 file changed, 147 deletions(-) diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index 08d74db827b..b555fdadcaf 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -1295,32 +1295,6 @@ if ( ( (cache_ptr)->index_size != \ H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, (entry_ptr != NULL), depth) \ } -#define H5C__SEARCH_INDEX_NO_STATS(cache_ptr, Addr, entry_ptr, fail_val) \ -{ \ - int k; \ - H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \ - k = H5C__HASH_FCN(Addr); \ - entry_ptr = ((cache_ptr)->index)[k]; \ - while(entry_ptr) { \ - if(H5F_addr_eq(Addr, (entry_ptr)->addr)) { \ - H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) \ - if(entry_ptr != ((cache_ptr)->index)[k]) { \ - if((entry_ptr)->ht_next) \ - (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \ - HDassert((entry_ptr)->ht_prev != NULL); \ - (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \ - ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \ - (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \ - (entry_ptr)->ht_prev = NULL; \ - ((cache_ptr)->index)[k] = (entry_ptr); \ - H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \ - } \ - break; \ - } \ - (entry_ptr) = (entry_ptr)->ht_next; \ - } \ -} - #define H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr) \ { \ H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr); \ @@ -1733,127 +1707,6 @@ if ( ( (cache_ptr)->index_size != \ * **************************************************************************/ -/*------------------------------------------------------------------------- - * - * Macro: H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS - * - * Purpose: For efficiency, we sometimes change the order of flushes -- - * but doing so can confuse the replacement policy. This - * macro exists to allow us to specify an entry as the - * most recently touched so we can repair any such - * confusion. - * - * At present, we only support the modified LRU policy, so - * this function deals with that case unconditionally. If - * we ever support other replacement policies, the macro - * should switch on the current policy and act accordingly. - * - * Return: N/A - * - * Programmer: John Mainzer, 10/13/05 - * - *------------------------------------------------------------------------- - */ - -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS - -#define H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS(cache_ptr, entry_ptr, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - if ( ! ((entry_ptr)->is_pinned) ) { \ - \ - /* modified LRU specific code */ \ - \ - /* remove the entry from the LRU list, and re-insert it at the head.\ - */ \ - \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - /* Use the dirty flag to infer whether the entry is on the clean or \ - * dirty LRU list, and remove it. Then insert it at the head of \ - * the same LRU list. \ - * \ - * At least initially, all entries should be clean. That may \ - * change, so we may as well deal with both cases now. \ - */ \ - \ - if ( (entry_ptr)->is_dirty ) { \ - H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ - (cache_ptr)->dLRU_tail_ptr, \ - (cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, (fail_val)) \ - \ - H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ - (cache_ptr)->dLRU_tail_ptr, \ - (cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, (fail_val)) \ - } else { \ - H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, (fail_val)) \ - \ - H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, (fail_val)) \ - } \ - \ - /* End modified LRU specific code. */ \ - } \ -} /* H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS */ - -#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - -#define H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS(cache_ptr, entry_ptr, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - if ( ! ((entry_ptr)->is_pinned) ) { \ - \ - /* modified LRU specific code */ \ - \ - /* remove the entry from the LRU list, and re-insert it at the head \ - */ \ - \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - /* End modified LRU specific code. */ \ - } \ -} /* H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS */ - -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - - /*------------------------------------------------------------------------- * * Macro: H5C__UPDATE_RP_FOR_EVICTION From df22d598da57f976de10aa2df038022bbdd48421 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Thu, 13 Apr 2023 14:34:50 -0700 Subject: [PATCH 126/231] Add a RELEASE.txt note for GH #2605 (#2724) --- release_docs/RELEASE.txt | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 0fde1ab7099..f64fdd4e26c 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -151,6 +151,16 @@ Bug Fixes since HDF5-1.13.3 release =================================== Library ------- + - Fixed memory leaks, aborts, and overflows in H5O EFL decode + + The external file list code could call assert(), read past buffer + boundaries, and not properly clean up resources when parsing malformed + external data files messages. + + This fix cleans up allocated memory, adds buffer bounds checks, and + converts asserts to HDF5 error checking. + + (DER - 2023/04/13 GH-2605) - Memory leak From b7553a6edc95e8ab789b93ae04e903fd4ddcc120 Mon Sep 17 00:00:00 2001 From: bmribler <39579120+bmribler@users.noreply.github.com> Date: Thu, 13 Apr 2023 17:35:45 -0400 Subject: [PATCH 127/231] Fixed GH-2603, heap-buffer-overflow in H5O__linfo_decode (#2697) * Fixed GH-2603, heap-buffer-overflow in H5O__linfo_decode Verified with valgrind -v --tool=memcheck --leak-check=full h5dump POV-GH-2603 The several invalid reads shown originally are now gone. --- release_docs/RELEASE.txt | 12 ++++++++++-- src/H5Olinfo.c | 28 ++++++++++++++++++++++------ 2 files changed, 32 insertions(+), 8 deletions(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index f64fdd4e26c..2dcb057e274 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -147,7 +147,7 @@ Support for new platforms, languages and compilers ================================================== - -Bug Fixes since HDF5-1.13.3 release +Bug Fixes since HDF5-1.14.0 release =================================== Library ------- @@ -162,6 +162,15 @@ Bug Fixes since HDF5-1.13.3 release (DER - 2023/04/13 GH-2605) + - Fixed potential heap buffer overflow in decoding of link info message + + Detections of buffer overflow were added for decoding version, index + flags, link creation order value, and the next three addresses. The + checkings will remove the potential invalid read of any of these + values that could be triggered by a malformed file. + + (BMR - 2023/04/12 GH-2603) + - Memory leak Memory leak was detected when running h5dump with "pov". The memory was allocated @@ -175,7 +184,6 @@ Bug Fixes since HDF5-1.13.3 release (VC - 2023/04/11 GH-2599) - - Fixed memory leaks that could occur when reading a dataset from a malformed file diff --git a/src/H5Olinfo.c b/src/H5Olinfo.c index d4ac3bb131e..11138df2228 100644 --- a/src/H5Olinfo.c +++ b/src/H5Olinfo.c @@ -105,11 +105,13 @@ H5FL_DEFINE_STATIC(H5O_linfo_t); */ static void * H5O__linfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, - unsigned H5_ATTR_UNUSED *ioflags, size_t H5_ATTR_UNUSED p_size, const uint8_t *p) + unsigned H5_ATTR_UNUSED *ioflags, size_t p_size, const uint8_t *p) { - H5O_linfo_t *linfo = NULL; /* Link info */ - unsigned char index_flags; /* Flags for encoding link index info */ - void *ret_value = NULL; /* Return value */ + const uint8_t *p_end = p + p_size - 1; /* End of the p buffer */ + H5O_linfo_t *linfo = NULL; /* Link info */ + unsigned char index_flags; /* Flags for encoding link index info */ + uint8_t addr_size = H5F_SIZEOF_ADDR(f); /* Temp var */ + void *ret_value = NULL; /* Return value */ FUNC_ENTER_PACKAGE @@ -117,6 +119,10 @@ H5O__linfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUS HDassert(f); HDassert(p); + /* Check input buffer before decoding version and index flags */ + if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") + /* Version of message */ if (*p++ != H5O_LINFO_VERSION) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad version number for message") @@ -136,11 +142,18 @@ H5O__linfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUS linfo->nlinks = HSIZET_MAX; /* Max. link creation order value for the group, if tracked */ - if (linfo->track_corder) + if (linfo->track_corder) { + if (H5_IS_BUFFER_OVERFLOW(p, 8, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") INT64DECODE(p, linfo->max_corder) + } else linfo->max_corder = 0; + /* Check input buffer before decoding the next two addresses */ + if (H5_IS_BUFFER_OVERFLOW(p, addr_size + addr_size, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") + /* Address of fractal heap to store "dense" links */ H5F_addr_decode(f, &p, &(linfo->fheap_addr)); @@ -148,8 +161,11 @@ H5O__linfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUS H5F_addr_decode(f, &p, &(linfo->name_bt2_addr)); /* Address of v2 B-tree to index creation order of links, if there is one */ - if (linfo->index_corder) + if (linfo->index_corder) { + if (H5_IS_BUFFER_OVERFLOW(p, addr_size, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") H5F_addr_decode(f, &p, &(linfo->corder_bt2_addr)); + } else linfo->corder_bt2_addr = HADDR_UNDEF; From 167535342e3451e918b26720e9375f8c4b81ed64 Mon Sep 17 00:00:00 2001 From: Kobrin Eli Date: Fri, 14 Apr 2023 00:37:10 +0300 Subject: [PATCH 128/231] Fix out of bounds in `hdf5/src/H5Fint.c:2859` (#2691) --- release_docs/RELEASE.txt | 12 +++++++++ src/H5Fsuper_cache.c | 58 +++++++++++++++++++++++++++++++++++++--- src/H5Gent.c | 19 +++++++++++-- src/H5Gprivate.h | 2 +- 4 files changed, 84 insertions(+), 7 deletions(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 2dcb057e274..63f5a361ded 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -291,6 +291,18 @@ Bug Fixes since HDF5-1.14.0 release (JTH - 2023/02/16, GH #2433) + - Fixed buffer overflow error in image decoding function. + + The error occurred in the function for decoding address from the specified + buffer, which is called many times from the function responsible for image + decoding. The length of the buffer is known in the image decoding function, + but no checks are produced, so the buffer overflow can occur in many places, + including callee functions for address decoding. + + The error was fixed by inserting corresponding checks for buffer overflow. + + (KE - 2023/02/07 GH #2432) + Java Library ------------ diff --git a/src/H5Fsuper_cache.c b/src/H5Fsuper_cache.c index 5d9b62843b1..467e2875ac2 100644 --- a/src/H5Fsuper_cache.c +++ b/src/H5Fsuper_cache.c @@ -433,6 +433,8 @@ H5F__cache_superblock_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUS if (H5F__superblock_prefix_decode(sblock, &image, udata, FALSE) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTDECODE, NULL, "can't decode file superblock prefix") + const uint8_t *image_end = image + len - 1; + /* Check for older version of superblock format */ if (sblock->super_vers < HDF5_SUPERBLOCK_VERSION_2) { uint32_t status_flags; /* File status flags */ @@ -440,10 +442,18 @@ H5F__cache_superblock_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUS unsigned snode_btree_k; /* B-tree symbol table internal node 'K' value */ unsigned chunk_btree_k; /* B-tree chunk internal node 'K' value */ + /* Check whether the image pointer is out of bounds */ + if (H5_IS_BUFFER_OVERFLOW(image, 1, image_end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") + /* Freespace version (hard-wired) */ if (HDF5_FREESPACE_VERSION != *image++) HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad free space version number") + /* Check whether the image pointer is out of bounds */ + if (H5_IS_BUFFER_OVERFLOW(image, 1, image_end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") + /* Root group version number (hard-wired) */ if (HDF5_OBJECTDIR_VERSION != *image++) HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad object directory version number") @@ -451,6 +461,10 @@ H5F__cache_superblock_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUS /* Skip over reserved byte */ image++; + /* Check whether the image pointer is out of bounds */ + if (H5_IS_BUFFER_OVERFLOW(image, 1, image_end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") + /* Shared header version number (hard-wired) */ if (HDF5_SHAREDHEADER_VERSION != *image++) HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad shared-header format version number") @@ -466,12 +480,20 @@ H5F__cache_superblock_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUS /* Skip over reserved byte */ image++; + /* Check whether the image pointer is out of bounds */ + if (H5_IS_BUFFER_OVERFLOW(image, sizeof(uint16_t), image_end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") + /* Various B-tree sizes */ UINT16DECODE(image, sym_leaf_k); if (sym_leaf_k == 0) HGOTO_ERROR(H5E_FILE, H5E_BADRANGE, NULL, "bad symbol table leaf node 1/2 rank") udata->sym_leaf_k = sym_leaf_k; /* Keep a local copy also */ + /* Check whether the image pointer is out of bounds */ + if (H5_IS_BUFFER_OVERFLOW(image, sizeof(uint16_t), image_end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") + /* Need 'get' call to set other array values */ UINT16DECODE(image, snode_btree_k); if (snode_btree_k == 0) @@ -483,6 +505,10 @@ H5F__cache_superblock_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUS * for the indexed storage B-tree internal 'K' value later. */ + /* Check whether the image pointer is out of bounds */ + if (H5_IS_BUFFER_OVERFLOW(image, sizeof(uint32_t), image_end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") + /* File status flags (not really used yet) */ UINT32DECODE(image, status_flags); HDassert(status_flags <= 255); @@ -495,16 +521,29 @@ H5F__cache_superblock_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUS * storage B-tree internal 'K' value */ if (sblock->super_vers > HDF5_SUPERBLOCK_VERSION_DEF) { + /* Check whether the image pointer is out of bounds */ + if (H5_IS_BUFFER_OVERFLOW(image, sizeof(uint16_t), image_end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") + UINT16DECODE(image, chunk_btree_k); /* Reserved bytes are present only in version 1 */ - if (sblock->super_vers == HDF5_SUPERBLOCK_VERSION_1) + if (sblock->super_vers == HDF5_SUPERBLOCK_VERSION_1) { image += 2; /* reserved */ - } /* end if */ + + /* Check whether the image pointer is out of bounds */ + if (H5_IS_BUFFER_OVERFLOW(image, 1, image_end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") + } + } /* end if */ else chunk_btree_k = HDF5_BTREE_CHUNK_IK_DEF; udata->btree_k[H5B_CHUNK_ID] = chunk_btree_k; + /* Check whether the image pointer will be out of bounds */ + if (H5_IS_BUFFER_OVERFLOW(image, H5F_SIZEOF_ADDR(udata->f) * 4, image_end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") + /* Remainder of "variable-sized" portion of superblock */ H5F_addr_decode(udata->f, (const uint8_t **)&image, &sblock->base_addr /*out*/); H5F_addr_decode(udata->f, (const uint8_t **)&image, &sblock->ext_addr /*out*/); @@ -518,7 +557,7 @@ H5F__cache_superblock_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUS "can't allocate space for root group symbol table entry") /* decode the root group symbol table entry */ - if (H5G_ent_decode(udata->f, (const uint8_t **)&image, sblock->root_ent) < 0) + if (H5G_ent_decode(udata->f, (const uint8_t **)&image, sblock->root_ent, image_end) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTDECODE, NULL, "can't decode root group symbol table entry") /* Set the root group address to the correct value */ @@ -544,16 +583,23 @@ H5F__cache_superblock_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUS /* Skip over size of file addresses (already decoded) */ image++; udata->f->shared->sizeof_addr = sblock->sizeof_addr; /* Keep a local copy also */ - /* Skip over size of file sizes (already decoded) */ image++; udata->f->shared->sizeof_size = sblock->sizeof_size; /* Keep a local copy also */ + /* Check whether the image pointer is out of bounds */ + if (H5_IS_BUFFER_OVERFLOW(image, 1, image_end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") + /* File status flags (not really used yet) */ sblock->status_flags = *image++; if (sblock->status_flags & ~H5F_SUPER_ALL_FLAGS) HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad flag value for superblock") + /* Check whether the image pointer will be out of bounds */ + if (H5_IS_BUFFER_OVERFLOW(image, H5F_SIZEOF_ADDR(udata->f) * 4, image_end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") + /* Base, superblock extension, end of file & root group object header addresses */ H5F_addr_decode(udata->f, (const uint8_t **)&image, &sblock->base_addr /*out*/); H5F_addr_decode(udata->f, (const uint8_t **)&image, &sblock->ext_addr /*out*/); @@ -562,6 +608,10 @@ H5F__cache_superblock_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUS /* checksum verification already done in verify_chksum cb */ + /* Check whether the image pointer will be out of bounds */ + if (H5_IS_BUFFER_OVERFLOW(image, sizeof(uint32_t), image_end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") + /* Decode checksum */ UINT32DECODE(image, read_chksum); diff --git a/src/H5Gent.c b/src/H5Gent.c index f58ef5c63d1..096e13eefd0 100644 --- a/src/H5Gent.c +++ b/src/H5Gent.c @@ -93,7 +93,7 @@ H5G__ent_decode_vec(const H5F_t *f, const uint8_t **pp, const uint8_t *p_end, H5 for (u = 0; u < n; u++) { if (*pp > p_end) HGOTO_ERROR(H5E_SYM, H5E_CANTDECODE, FAIL, "ran off the end of the image buffer") - if (H5G_ent_decode(f, pp, ent + u) < 0) + if (H5G_ent_decode(f, pp, ent + u, p_end) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTDECODE, FAIL, "can't decode") } @@ -117,7 +117,7 @@ H5G__ent_decode_vec(const H5F_t *f, const uint8_t **pp, const uint8_t *p_end, H5 *------------------------------------------------------------------------- */ herr_t -H5G_ent_decode(const H5F_t *f, const uint8_t **pp, H5G_entry_t *ent) +H5G_ent_decode(const H5F_t *f, const uint8_t **pp, H5G_entry_t *ent, const uint8_t *p_end) { const uint8_t *p_ret = *pp; uint32_t tmp; @@ -130,11 +130,22 @@ H5G_ent_decode(const H5F_t *f, const uint8_t **pp, H5G_entry_t *ent) HDassert(pp); HDassert(ent); + if (H5_IS_BUFFER_OVERFLOW(*pp, ent->name_off, p_end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, FAIL, "image pointer is out of bounds") + /* decode header */ H5F_DECODE_LENGTH(f, *pp, ent->name_off); + + if (H5_IS_BUFFER_OVERFLOW(*pp, H5F_SIZEOF_ADDR(f) + sizeof(uint32_t), p_end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, FAIL, "image pointer is out of bounds") + H5F_addr_decode(f, pp, &(ent->header)); UINT32DECODE(*pp, tmp); *pp += 4; /*reserved*/ + + if (H5_IS_BUFFER_OVERFLOW(*pp, 1, p_end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, FAIL, "image pointer is out of bounds") + ent->type = (H5G_cache_type_t)tmp; /* decode scratch-pad */ @@ -144,11 +155,15 @@ H5G_ent_decode(const H5F_t *f, const uint8_t **pp, H5G_entry_t *ent) case H5G_CACHED_STAB: HDassert(2 * H5F_SIZEOF_ADDR(f) <= H5G_SIZEOF_SCRATCH); + if (H5_IS_BUFFER_OVERFLOW(*pp, H5F_SIZEOF_ADDR(f) * 2, p_end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, FAIL, "image pointer is out of bounds") H5F_addr_decode(f, pp, &(ent->cache.stab.btree_addr)); H5F_addr_decode(f, pp, &(ent->cache.stab.heap_addr)); break; case H5G_CACHED_SLINK: + if (H5_IS_BUFFER_OVERFLOW(*pp, sizeof(uint32_t), p_end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, FAIL, "image pointer is out of bounds") UINT32DECODE(*pp, ent->cache.slink.lval_offset); break; diff --git a/src/H5Gprivate.h b/src/H5Gprivate.h index 0042fb2d3bf..2819e4f9f9e 100644 --- a/src/H5Gprivate.h +++ b/src/H5Gprivate.h @@ -248,7 +248,7 @@ H5_DLL herr_t H5G_node_debug(H5F_t *f, haddr_t addr, FILE *stream, int indent, i * These functions operate on group object locations. */ H5_DLL herr_t H5G_ent_encode(const H5F_t *f, uint8_t **pp, const H5G_entry_t *ent); -H5_DLL herr_t H5G_ent_decode(const H5F_t *f, const uint8_t **pp, H5G_entry_t *ent); +H5_DLL herr_t H5G_ent_decode(const H5F_t *f, const uint8_t **pp, H5G_entry_t *ent, const uint8_t *p_end); /* * These functions operate on group hierarchy names. From 989593da0113e57fa16d43e3f4967d0cb7e2eeea Mon Sep 17 00:00:00 2001 From: Quincey Koziol Date: Fri, 14 Apr 2023 07:54:00 -0500 Subject: [PATCH 129/231] Cache macro tidy (#2731) * Make cache macro parameter names more consistent --- src/H5Cpkg.h | 196 +++++++++++++++++++++++++-------------------------- 1 file changed, 98 insertions(+), 98 deletions(-) diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index b555fdadcaf..ddecc913f69 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -82,12 +82,12 @@ #ifdef H5C_DO_SANITY_CHECKS -#define H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ +#define H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ if ( ( (head_ptr) == NULL ) || \ ( (tail_ptr) == NULL ) || \ ( (entry_ptr) == NULL ) || \ ( (len) <= 0 ) || \ - ( (Size) < (entry_ptr)->size ) || \ + ( (list_size) < (entry_ptr)->size ) || \ ( ( (entry_ptr)->prev == NULL ) && ( (head_ptr) != (entry_ptr) ) ) || \ ( ( (entry_ptr)->next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \ ( ( (len) == 1 ) && \ @@ -95,23 +95,23 @@ if ( ( (head_ptr) == NULL ) || \ ( (tail_ptr) == (entry_ptr) ) && \ ( (entry_ptr)->next == NULL ) && \ ( (entry_ptr)->prev == NULL ) && \ - ( (Size) == (entry_ptr)->size ) \ + ( (list_size) == (entry_ptr)->size ) \ ) \ ) \ ) \ ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "DLL pre remove SC failed") \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "DLL pre remove SC failed") \ } -#define H5C__DLL_SC(head_ptr, tail_ptr, len, Size, fv) \ +#define H5C__DLL_SC(head_ptr, tail_ptr, len, list_size, fail_val) \ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ ( (head_ptr) != (tail_ptr) ) \ ) || \ ( (len) < 0 ) || \ - ( (Size) < 0 ) || \ + ( (list_size) < 0 ) || \ ( ( (len) == 1 ) && \ ( ( (head_ptr) != (tail_ptr) ) || \ - ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \ + ( (head_ptr) == NULL ) || ( (head_ptr)->size != (list_size) ) \ ) \ ) || \ ( ( (len) >= 1 ) && \ @@ -120,10 +120,10 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ ) \ ) \ ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "DLL sanity check failed") \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "DLL sanity check failed") \ } -#define H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ +#define H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ if ( ( (entry_ptr) == NULL ) || \ ( (entry_ptr)->next != NULL ) || \ ( (entry_ptr)->prev != NULL ) || \ @@ -132,7 +132,7 @@ if ( ( (entry_ptr) == NULL ) || \ ) || \ ( ( (len) == 1 ) && \ ( ( (head_ptr) != (tail_ptr) ) || \ - ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \ + ( (head_ptr) == NULL ) || ( (head_ptr)->size != (list_size) ) \ ) \ ) || \ ( ( (len) >= 1 ) && \ @@ -141,7 +141,7 @@ if ( ( (entry_ptr) == NULL ) || \ ) \ ) \ ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "DLL pre insert SC failed") \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "DLL pre insert SC failed") \ } #define H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \ @@ -162,18 +162,18 @@ if ( ( (new_size) > (dll_size) ) || \ #else /* H5C_DO_SANITY_CHECKS */ -#define H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) -#define H5C__DLL_SC(head_ptr, tail_ptr, len, Size, fv) -#define H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) +#define H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) +#define H5C__DLL_SC(head_ptr, tail_ptr, len, list_size, fail_val) +#define H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) #define H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) #define H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) #endif /* H5C_DO_SANITY_CHECKS */ -#define H5C__DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val) \ +#define H5C__DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ { \ - H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \ + H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, \ fail_val) \ if ( (head_ptr) == NULL ) \ { \ @@ -187,12 +187,12 @@ if ( ( (new_size) > (dll_size) ) || \ (tail_ptr) = (entry_ptr); \ } \ (len)++; \ - (Size) += (entry_ptr)->size; \ + (list_size) += (entry_ptr)->size; \ } /* H5C__DLL_APPEND() */ -#define H5C__DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val) \ +#define H5C__DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ { \ - H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \ + H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, \ fail_val) \ if ( (head_ptr) == NULL ) \ { \ @@ -206,12 +206,12 @@ if ( ( (new_size) > (dll_size) ) || \ (head_ptr) = (entry_ptr); \ } \ (len)++; \ - (Size) += (entry_ptr)->size; \ + (list_size) += (entry_ptr)->size; \ } /* H5C__DLL_PREPEND() */ -#define H5C__DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val) \ +#define H5C__DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ { \ - H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \ + H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, \ fail_val) \ { \ if ( (head_ptr) == (entry_ptr) ) \ @@ -233,7 +233,7 @@ if ( ( (new_size) > (dll_size) ) || \ (entry_ptr)->next = NULL; \ (entry_ptr)->prev = NULL; \ (len)--; \ - (Size) -= (entry_ptr)->size; \ + (list_size) -= (entry_ptr)->size; \ } \ } /* H5C__DLL_REMOVE() */ @@ -247,36 +247,36 @@ if ( ( (new_size) > (dll_size) ) || \ #ifdef H5C_DO_SANITY_CHECKS -#define H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \ +#define H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, list_size, fail_val) \ if ( ( (hd_ptr) == NULL ) || \ ( (tail_ptr) == NULL ) || \ ( (entry_ptr) == NULL ) || \ ( (len) <= 0 ) || \ - ( (Size) < (entry_ptr)->size ) || \ - ( ( (Size) == (entry_ptr)->size ) && ( ! ( (len) == 1 ) ) ) || \ + ( (list_size) < (entry_ptr)->size ) || \ + ( ( (list_size) == (entry_ptr)->size ) && ( ! ( (len) == 1 ) ) ) || \ ( ( (entry_ptr)->aux_prev == NULL ) && ( (hd_ptr) != (entry_ptr) ) ) || \ ( ( (entry_ptr)->aux_next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \ ( ( (len) == 1 ) && \ ( ! ( ( (hd_ptr) == (entry_ptr) ) && ( (tail_ptr) == (entry_ptr) ) && \ ( (entry_ptr)->aux_next == NULL ) && \ ( (entry_ptr)->aux_prev == NULL ) && \ - ( (Size) == (entry_ptr)->size ) \ + ( (list_size) == (entry_ptr)->size ) \ ) \ ) \ ) \ ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "aux DLL pre remove SC failed") \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "aux DLL pre remove SC failed") \ } -#define H5C__AUX_DLL_SC(head_ptr, tail_ptr, len, Size, fv) \ +#define H5C__AUX_DLL_SC(head_ptr, tail_ptr, len, list_size, fail_val) \ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ ( (head_ptr) != (tail_ptr) ) \ ) || \ ( (len) < 0 ) || \ - ( (Size) < 0 ) || \ + ( (list_size) < 0 ) || \ ( ( (len) == 1 ) && \ - ( ( (head_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \ - ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \ + ( ( (head_ptr) != (tail_ptr) ) || ( (list_size) <= 0 ) || \ + ( (head_ptr) == NULL ) || ( (head_ptr)->size != (list_size) ) \ ) \ ) || \ ( ( (len) >= 1 ) && \ @@ -285,10 +285,10 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ ) \ ) \ ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "AUX DLL sanity check failed") \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "AUX DLL sanity check failed") \ } -#define H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \ +#define H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, list_size, fail_val) \ if ( ( (entry_ptr) == NULL ) || \ ( (entry_ptr)->aux_next != NULL ) || \ ( (entry_ptr)->aux_prev != NULL ) || \ @@ -296,8 +296,8 @@ if ( ( (entry_ptr) == NULL ) || \ ( (hd_ptr) != (tail_ptr) ) \ ) || \ ( ( (len) == 1 ) && \ - ( ( (hd_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \ - ( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (Size) ) \ + ( ( (hd_ptr) != (tail_ptr) ) || ( (list_size) <= 0 ) || \ + ( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (list_size) ) \ ) \ ) || \ ( ( (len) >= 1 ) && \ @@ -306,21 +306,21 @@ if ( ( (entry_ptr) == NULL ) || \ ) \ ) \ ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "AUX DLL pre insert SC failed") \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "AUX DLL pre insert SC failed") \ } #else /* H5C_DO_SANITY_CHECKS */ -#define H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) -#define H5C__AUX_DLL_SC(head_ptr, tail_ptr, len, Size, fv) -#define H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) +#define H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, list_size, fail_val) +#define H5C__AUX_DLL_SC(head_ptr, tail_ptr, len, list_size, fail_val) +#define H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, list_size, fail_val) #endif /* H5C_DO_SANITY_CHECKS */ -#define H5C__AUX_DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val)\ +#define H5C__AUX_DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val)\ { \ - H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \ + H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, \ fail_val) \ if ( (head_ptr) == NULL ) \ { \ @@ -334,12 +334,12 @@ if ( ( (entry_ptr) == NULL ) || \ (tail_ptr) = (entry_ptr); \ } \ (len)++; \ - (Size) += entry_ptr->size; \ + (list_size) += entry_ptr->size; \ } /* H5C__AUX_DLL_APPEND() */ -#define H5C__AUX_DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ +#define H5C__AUX_DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ { \ - H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ + H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ if ( (head_ptr) == NULL ) \ { \ (head_ptr) = (entry_ptr); \ @@ -352,12 +352,12 @@ if ( ( (entry_ptr) == NULL ) || \ (head_ptr) = (entry_ptr); \ } \ (len)++; \ - (Size) += entry_ptr->size; \ + (list_size) += entry_ptr->size; \ } /* H5C__AUX_DLL_PREPEND() */ -#define H5C__AUX_DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ +#define H5C__AUX_DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ { \ - H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ + H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ { \ if ( (head_ptr) == (entry_ptr) ) \ { \ @@ -378,35 +378,35 @@ if ( ( (entry_ptr) == NULL ) || \ entry_ptr->aux_next = NULL; \ entry_ptr->aux_prev = NULL; \ (len)--; \ - (Size) -= entry_ptr->size; \ + (list_size) -= entry_ptr->size; \ } \ } /* H5C__AUX_DLL_REMOVE() */ #ifdef H5C_DO_SANITY_CHECKS -#define H5C__IL_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \ +#define H5C__IL_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, list_size, fail_val) \ if ( ( (hd_ptr) == NULL ) || \ ( (tail_ptr) == NULL ) || \ ( (entry_ptr) == NULL ) || \ ( (len) <= 0 ) || \ - ( (Size) < (entry_ptr)->size ) || \ - ( ( (Size) == (entry_ptr)->size ) && ( ! ( (len) == 1 ) ) ) || \ + ( (list_size) < (entry_ptr)->size ) || \ + ( ( (list_size) == (entry_ptr)->size ) && ( ! ( (len) == 1 ) ) ) || \ ( ( (entry_ptr)->il_prev == NULL ) && ( (hd_ptr) != (entry_ptr) ) ) || \ ( ( (entry_ptr)->il_next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \ ( ( (len) == 1 ) && \ ( ! ( ( (hd_ptr) == (entry_ptr) ) && ( (tail_ptr) == (entry_ptr) ) && \ ( (entry_ptr)->il_next == NULL ) && \ ( (entry_ptr)->il_prev == NULL ) && \ - ( (Size) == (entry_ptr)->size ) \ + ( (list_size) == (entry_ptr)->size ) \ ) \ ) \ ) \ ) { \ HDassert(0 && "il DLL pre remove SC failed"); \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "il DLL pre remove SC failed") \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "il DLL pre remove SC failed") \ } -#define H5C__IL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \ +#define H5C__IL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, list_size, fail_val) \ if ( ( (entry_ptr) == NULL ) || \ ( (entry_ptr)->il_next != NULL ) || \ ( (entry_ptr)->il_prev != NULL ) || \ @@ -414,8 +414,8 @@ if ( ( (entry_ptr) == NULL ) || \ ( (hd_ptr) != (tail_ptr) ) \ ) || \ ( ( (len) == 1 ) && \ - ( ( (hd_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \ - ( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (Size) ) \ + ( ( (hd_ptr) != (tail_ptr) ) || ( (list_size) <= 0 ) || \ + ( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (list_size) ) \ ) \ ) || \ ( ( (len) >= 1 ) && \ @@ -425,16 +425,16 @@ if ( ( (entry_ptr) == NULL ) || \ ) \ ) { \ HDassert(0 && "IL DLL pre insert SC failed"); \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "IL DLL pre insert SC failed") \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "IL DLL pre insert SC failed") \ } -#define H5C__IL_DLL_SC(head_ptr, tail_ptr, len, Size, fv) \ +#define H5C__IL_DLL_SC(head_ptr, tail_ptr, len, list_size, fail_val) \ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ ( (head_ptr) != (tail_ptr) ) \ ) || \ ( ( (len) == 1 ) && \ ( ( (head_ptr) != (tail_ptr) ) || \ - ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \ + ( (head_ptr) == NULL ) || ( (head_ptr)->size != (list_size) ) \ ) \ ) || \ ( ( (len) >= 1 ) && \ @@ -444,21 +444,21 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ ) \ ) { \ HDassert(0 && "IL DLL sanity check failed"); \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "IL DLL sanity check failed") \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "IL DLL sanity check failed") \ } #else /* H5C_DO_SANITY_CHECKS */ -#define H5C__IL_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) -#define H5C__IL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) -#define H5C__IL_DLL_SC(head_ptr, tail_ptr, len, Size, fv) +#define H5C__IL_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, list_size, fail_val) +#define H5C__IL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, list_size, fail_val) +#define H5C__IL_DLL_SC(head_ptr, tail_ptr, len, list_size, fail_val) #endif /* H5C_DO_SANITY_CHECKS */ -#define H5C__IL_DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val)\ +#define H5C__IL_DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val)\ { \ - H5C__IL_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \ + H5C__IL_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, \ fail_val) \ if ( (head_ptr) == NULL ) \ { \ @@ -472,13 +472,13 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ (tail_ptr) = (entry_ptr); \ } \ (len)++; \ - (Size) += entry_ptr->size; \ - H5C__IL_DLL_SC(head_ptr, tail_ptr, len, Size, fail_val) \ + (list_size) += entry_ptr->size; \ + H5C__IL_DLL_SC(head_ptr, tail_ptr, len, list_size, fail_val) \ } /* H5C__IL_DLL_APPEND() */ -#define H5C__IL_DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ +#define H5C__IL_DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ { \ - H5C__IL_DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ + H5C__IL_DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ { \ if ( (head_ptr) == (entry_ptr) ) \ { \ @@ -499,9 +499,9 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ entry_ptr->il_next = NULL; \ entry_ptr->il_prev = NULL; \ (len)--; \ - (Size) -= entry_ptr->size; \ + (list_size) -= entry_ptr->size; \ } \ - H5C__IL_DLL_SC(head_ptr, tail_ptr, len, Size, fv) \ + H5C__IL_DLL_SC(head_ptr, tail_ptr, len, list_size, fail_val) \ } /* H5C__IL_DLL_REMOVE() */ @@ -2766,37 +2766,37 @@ if ( ( (cache_ptr)->index_size != \ #ifdef H5C_DO_SANITY_CHECKS -#define H5C__COLL_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \ +#define H5C__COLL_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, list_size, fail_val) \ if ( ( (hd_ptr) == NULL ) || \ ( (tail_ptr) == NULL ) || \ ( (entry_ptr) == NULL ) || \ ( (len) <= 0 ) || \ - ( (Size) < (entry_ptr)->size ) || \ - ( ( (Size) == (entry_ptr)->size ) && ( ! ( (len) == 1 ) ) ) || \ + ( (list_size) < (entry_ptr)->size ) || \ + ( ( (list_size) == (entry_ptr)->size ) && ( ! ( (len) == 1 ) ) ) || \ ( ( (entry_ptr)->coll_prev == NULL ) && ( (hd_ptr) != (entry_ptr) ) ) || \ ( ( (entry_ptr)->coll_next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) ||\ ( ( (len) == 1 ) && \ ( ! ( ( (hd_ptr) == (entry_ptr) ) && ( (tail_ptr) == (entry_ptr) ) && \ ( (entry_ptr)->coll_next == NULL ) && \ ( (entry_ptr)->coll_prev == NULL ) && \ - ( (Size) == (entry_ptr)->size ) \ + ( (list_size) == (entry_ptr)->size ) \ ) \ ) \ ) \ ) { \ HDassert(0 && "coll DLL pre remove SC failed"); \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "coll DLL pre remove SC failed") \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "coll DLL pre remove SC failed") \ } -#define H5C__COLL_DLL_SC(head_ptr, tail_ptr, len, Size, fv) \ +#define H5C__COLL_DLL_SC(head_ptr, tail_ptr, len, list_size, fail_val) \ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ ( (head_ptr) != (tail_ptr) ) \ ) || \ ( (len) < 0 ) || \ - ( (Size) < 0 ) || \ + ( (list_size) < 0 ) || \ ( ( (len) == 1 ) && \ - ( ( (head_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \ - ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \ + ( ( (head_ptr) != (tail_ptr) ) || ( (list_size) <= 0 ) || \ + ( (head_ptr) == NULL ) || ( (head_ptr)->size != (list_size) ) \ ) \ ) || \ ( ( (len) >= 1 ) && \ @@ -2806,10 +2806,10 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ ) \ ) { \ HDassert(0 && "COLL DLL sanity check failed"); \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "COLL DLL sanity check failed")\ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "COLL DLL sanity check failed")\ } -#define H5C__COLL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv)\ +#define H5C__COLL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, list_size, fail_val)\ if ( ( (entry_ptr) == NULL ) || \ ( (entry_ptr)->coll_next != NULL ) || \ ( (entry_ptr)->coll_prev != NULL ) || \ @@ -2817,8 +2817,8 @@ if ( ( (entry_ptr) == NULL ) || \ ( (hd_ptr) != (tail_ptr) ) \ ) || \ ( ( (len) == 1 ) && \ - ( ( (hd_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \ - ( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (Size) ) \ + ( ( (hd_ptr) != (tail_ptr) ) || ( (list_size) <= 0 ) || \ + ( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (list_size) ) \ ) \ ) || \ ( ( (len) >= 1 ) && \ @@ -2828,21 +2828,21 @@ if ( ( (entry_ptr) == NULL ) || \ ) \ ) { \ HDassert(0 && "COLL DLL pre insert SC failed"); \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "COLL DLL pre insert SC failed") \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "COLL DLL pre insert SC failed") \ } #else /* H5C_DO_SANITY_CHECKS */ -#define H5C__COLL_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) -#define H5C__COLL_DLL_SC(head_ptr, tail_ptr, len, Size, fv) -#define H5C__COLL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) +#define H5C__COLL_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, list_size, fail_val) +#define H5C__COLL_DLL_SC(head_ptr, tail_ptr, len, list_size, fail_val) +#define H5C__COLL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, list_size, fail_val) #endif /* H5C_DO_SANITY_CHECKS */ -#define H5C__COLL_DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val) \ +#define H5C__COLL_DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ { \ - H5C__COLL_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \ + H5C__COLL_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, \ fail_val) \ if ( (head_ptr) == NULL ) \ { \ @@ -2856,12 +2856,12 @@ if ( ( (entry_ptr) == NULL ) || \ (tail_ptr) = (entry_ptr); \ } \ (len)++; \ - (Size) += entry_ptr->size; \ + (list_size) += entry_ptr->size; \ } /* H5C__COLL_DLL_APPEND() */ -#define H5C__COLL_DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ +#define H5C__COLL_DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ { \ - H5C__COLL_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv)\ + H5C__COLL_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val)\ if ( (head_ptr) == NULL ) \ { \ (head_ptr) = (entry_ptr); \ @@ -2874,12 +2874,12 @@ if ( ( (entry_ptr) == NULL ) || \ (head_ptr) = (entry_ptr); \ } \ (len)++; \ - (Size) += entry_ptr->size; \ + (list_size) += entry_ptr->size; \ } /* H5C__COLL_DLL_PREPEND() */ -#define H5C__COLL_DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ +#define H5C__COLL_DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ { \ - H5C__COLL_DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv)\ + H5C__COLL_DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val)\ { \ if ( (head_ptr) == (entry_ptr) ) \ { \ @@ -2902,7 +2902,7 @@ if ( ( (entry_ptr) == NULL ) || \ entry_ptr->coll_next = NULL; \ entry_ptr->coll_prev = NULL; \ (len)--; \ - (Size) -= entry_ptr->size; \ + (list_size) -= entry_ptr->size; \ } \ } /* H5C__COLL_DLL_REMOVE() */ From 36b07fb94d47a53941ce338ad035837eeaaa762e Mon Sep 17 00:00:00 2001 From: Quincey Koziol Date: Fri, 14 Apr 2023 07:55:06 -0500 Subject: [PATCH 130/231] Move cache image macros (#2732) * Move cache image stats macros to cache image source file * Disable formatting on the macros --- src/H5Cimage.c | 42 ++++++++++++++++++++++++++++++++++++++++++ src/H5Cpkg.h | 33 --------------------------------- 2 files changed, 42 insertions(+), 33 deletions(-) diff --git a/src/H5Cimage.c b/src/H5Cimage.c index 105272cfef2..b8f46f11422 100644 --- a/src/H5Cimage.c +++ b/src/H5Cimage.c @@ -80,6 +80,48 @@ /* Maximum ring allowed in image */ #define H5C_MAX_RING_IN_IMAGE H5C_RING_MDFSM +/*********************************************************************** + * + * Stats collection macros + * + * The following macros must handle stats collection when collection + * is enabled, and evaluate to the empty string when it is not. + * + ***********************************************************************/ +#if H5C_COLLECT_CACHE_STATS +/* clang-format off */ +#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr) \ + (cache_ptr)->images_created++; +#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr) \ +{ \ + /* make sure image len is still good */ \ + HDassert((cache_ptr)->image_len > 0); \ + (cache_ptr)->images_read++; \ +} +#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr) \ +{ \ + /* make sure image len is still good */ \ + HDassert((cache_ptr)->image_len > 0); \ + (cache_ptr)->images_loaded++; \ + (cache_ptr)->last_image_size = (cache_ptr)->image_len; \ +} +#define H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, dirty) \ +{ \ + (cache_ptr)->prefetches++; \ + if (dirty) \ + (cache_ptr)->dirty_prefetches++; \ +} +#define H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr) \ + (cache_ptr)->prefetch_hits++; +/* clang-format on */ +#else /* H5C_COLLECT_CACHE_STATS */ +#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr) +#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr) +#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr) +#define H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, dirty) +#define H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr) +#endif /* H5C_COLLECT_CACHE_STATS */ + /******************/ /* Local Typedefs */ /******************/ diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index ddecc913f69..a0acc66b248 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -596,34 +596,6 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ #define H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr) \ (cache_ptr)->index_scan_restarts++; -#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr) \ - (cache_ptr)->images_created++; - -#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr) \ -{ \ - /* make sure image len is still good */ \ - HDassert((cache_ptr)->image_len > 0); \ - (cache_ptr)->images_read++; \ -} - -#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr) \ -{ \ - /* make sure image len is still good */ \ - HDassert((cache_ptr)->image_len > 0); \ - (cache_ptr)->images_loaded++; \ - (cache_ptr)->last_image_size = (cache_ptr)->image_len; \ -} - -#define H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, dirty) \ -{ \ - (cache_ptr)->prefetches++; \ - if (dirty) \ - (cache_ptr)->dirty_prefetches++; \ -} - -#define H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr) \ - (cache_ptr)->prefetch_hits++; - #if H5C_COLLECT_CACHE_ENTRY_STATS #define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr) \ @@ -831,11 +803,6 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ #define H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr) #define H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr) #define H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr) -#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr) -#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr) -#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr) -#define H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, dirty) -#define H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr) #endif /* H5C_COLLECT_CACHE_STATS */ From 34d6263b68645974c2d6456f768860c89a1c8016 Mon Sep 17 00:00:00 2001 From: Quincey Koziol Date: Fri, 14 Apr 2023 11:10:32 -0500 Subject: [PATCH 131/231] Cache macro error consistency (#2739) * Pass failure value consistently to all macro errors --- src/H5C.c | 26 +++++------ src/H5Cpkg.h | 130 ++++++++++++++++++++++----------------------------- 2 files changed, 70 insertions(+), 86 deletions(-) diff --git a/src/H5C.c b/src/H5C.c index f9a41fd238d..8a97e47ea4d 100644 --- a/src/H5C.c +++ b/src/H5C.c @@ -1415,7 +1415,7 @@ H5C_mark_entry_dirty(void *thing) /* Modify cache data structures */ if (was_clean) - H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr) + H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr, FAIL) if (!entry_ptr->in_slist) H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL) @@ -1498,7 +1498,7 @@ H5C_mark_entry_clean(void *_thing) /* Modify cache data structures */ if (was_dirty) - H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr) + H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr, FAIL) if (entry_ptr->in_slist) H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE) @@ -1847,23 +1847,23 @@ H5C_resize_entry(void *thing, size_t new_size) /* update the pinned and/or protected entry list */ if (entry_ptr->is_pinned) - H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr->pel_len), (cache_ptr->pel_size), (entry_ptr->size), - (new_size)) + H5C__DLL_UPDATE_FOR_SIZE_CHANGE(cache_ptr->pel_len, cache_ptr->pel_size, entry_ptr->size, + new_size, FAIL) if (entry_ptr->is_protected) - H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr->pl_len), (cache_ptr->pl_size), (entry_ptr->size), - (new_size)) + H5C__DLL_UPDATE_FOR_SIZE_CHANGE(cache_ptr->pl_len, cache_ptr->pl_size, entry_ptr->size, new_size, + FAIL) #ifdef H5_HAVE_PARALLEL if (entry_ptr->coll_access) - H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr->coll_list_len), (cache_ptr->coll_list_size), - (entry_ptr->size), (new_size)) + H5C__DLL_UPDATE_FOR_SIZE_CHANGE(cache_ptr->coll_list_len, cache_ptr->coll_list_size, + entry_ptr->size, new_size, FAIL) #endif /* H5_HAVE_PARALLEL */ /* update statistics just before changing the entry size */ H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size); /* update the hash table */ - H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_size, entry_ptr, was_clean); + H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_size, entry_ptr, was_clean, FAIL); /* if the entry is in the skip list, update that too */ if (entry_ptr->in_slist) @@ -2983,7 +2983,7 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags) /* Check for newly dirtied entry */ if (was_clean && entry_ptr->is_dirty) { /* Update index for newly dirtied entry */ - H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr) + H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr, FAIL) /* If the entry's type has a 'notify' callback send a * 'entry dirtied' notice now that the entry is fully @@ -5924,7 +5924,7 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) */ entry_ptr->is_dirty = FALSE; - H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr); + H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr, FAIL); /* Check for entry changing status and do notifications, etc. */ if (was_dirty) { @@ -8084,14 +8084,14 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr) /* Update the hash table for the size change */ H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len, entry_ptr, - !(entry_ptr->is_dirty)); + !entry_ptr->is_dirty, FAIL); /* The entry can't be protected since we are in the process of * flushing it. Thus we must update the replacement policy data * structures for the size change. The macro deals with the pinned * case. */ - H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_len); + H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_len, FAIL); /* As we haven't updated the cache data structures for * for the flush or flush destroy yet, the entry should diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index a0acc66b248..e2f4271b033 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -144,20 +144,20 @@ if ( ( (entry_ptr) == NULL ) || \ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "DLL pre insert SC failed") \ } -#define H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \ +#define H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) \ if ( ( (dll_len) <= 0 ) || \ ( (dll_size) <= 0 ) || \ ( (old_size) <= 0 ) || \ ( (old_size) > (dll_size) ) || \ ( (new_size) <= 0 ) || \ ( ( (dll_len) == 1 ) && ( (old_size) != (dll_size) ) ) ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "DLL pre size update SC failed") \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "DLL pre size update SC failed") \ } -#define H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \ +#define H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) \ if ( ( (new_size) > (dll_size) ) || \ ( ( (dll_len) == 1 ) && ( (new_size) != (dll_size) ) ) ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "DLL post size update SC failed") \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "DLL post size update SC failed") \ } #else /* H5C_DO_SANITY_CHECKS */ @@ -165,8 +165,8 @@ if ( ( (new_size) > (dll_size) ) || \ #define H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) #define H5C__DLL_SC(head_ptr, tail_ptr, len, list_size, fail_val) #define H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) -#define H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) -#define H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) +#define H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) +#define H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) #endif /* H5C_DO_SANITY_CHECKS */ @@ -237,12 +237,12 @@ if ( ( (new_size) > (dll_size) ) || \ } \ } /* H5C__DLL_REMOVE() */ -#define H5C__DLL_UPDATE_FOR_SIZE_CHANGE(dll_len, dll_size, old_size, new_size) \ +#define H5C__DLL_UPDATE_FOR_SIZE_CHANGE(dll_len, dll_size, old_size, new_size, fail_val) \ { \ - H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \ + H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) \ (dll_size) -= (old_size); \ (dll_size) += (new_size); \ - H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \ + H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) \ } /* H5C__DLL_UPDATE_FOR_SIZE_CHANGE() */ #ifdef H5C_DO_SANITY_CHECKS @@ -402,7 +402,6 @@ if ( ( (hd_ptr) == NULL ) || \ ) \ ) \ ) { \ - HDassert(0 && "il DLL pre remove SC failed"); \ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "il DLL pre remove SC failed") \ } @@ -424,7 +423,6 @@ if ( ( (entry_ptr) == NULL ) || \ ) \ ) \ ) { \ - HDassert(0 && "IL DLL pre insert SC failed"); \ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "IL DLL pre insert SC failed") \ } @@ -443,7 +441,6 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ ) \ ) \ ) { \ - HDassert(0 && "IL DLL sanity check failed"); \ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "IL DLL sanity check failed") \ } @@ -853,7 +850,6 @@ if ( ( (cache_ptr) == NULL ) || \ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \ ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \ ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \ - HDassert(FALSE); \ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "pre HT insert SC failed") \ } @@ -875,11 +871,10 @@ if ( ( (cache_ptr) == NULL ) || \ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \ ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \ ( (cache_ptr)->index_size != (cache_ptr)->il_size) ) { \ - HDassert(FALSE); \ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "post HT insert SC failed") \ } -#define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \ +#define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) \ if ( ( (cache_ptr) == NULL ) || \ ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ ( (cache_ptr)->index_len < 1 ) || \ @@ -916,11 +911,10 @@ if ( ( (cache_ptr) == NULL ) || \ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \ ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \ ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \ - HDassert(FALSE); \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT remove SC failed") \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "pre HT remove SC failed") \ } -#define H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr) \ +#define H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) \ if ( ( (cache_ptr) == NULL ) || \ ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ ( (entry_ptr) == NULL ) || \ @@ -942,8 +936,7 @@ if ( ( (cache_ptr) == NULL ) || \ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \ ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \ ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \ - HDassert(FALSE); \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT remove SC failed") \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "post HT remove SC failed") \ } /* (Keep in sync w/H5C_TEST__PRE_HT_SEARCH_SC macro in test/cache_common.h -QAK) */ @@ -955,7 +948,7 @@ if ( ( (cache_ptr) == NULL ) || \ ( ! H5F_addr_defined(Addr) ) || \ ( H5C__HASH_FCN(Addr) < 0 ) || \ ( H5C__HASH_FCN(Addr) >= H5C__HASH_TABLE_LEN ) ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "pre HT search SC failed") \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "pre HT search SC failed") \ } /* (Keep in sync w/H5C_TEST__POST_SUC_HT_SEARCH_SC macro in test/cache_common.h -QAK) */ @@ -977,7 +970,7 @@ if ( ( (cache_ptr) == NULL ) || \ ( (entry_ptr)->ht_prev->ht_next != (entry_ptr) ) ) || \ ( ( (entry_ptr)->ht_next != NULL ) && \ ( (entry_ptr)->ht_next->ht_prev != (entry_ptr) ) ) ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "post successful HT search SC failed") \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "post successful HT search SC failed") \ } /* (Keep in sync w/H5C_TEST__POST_HT_SHIFT_TO_FRONT macro in test/cache_common.h -QAK) */ @@ -985,11 +978,11 @@ if ( ( (cache_ptr) == NULL ) || \ if ( ( (cache_ptr) == NULL ) || \ ( ((cache_ptr)->index)[k] != (entry_ptr) ) || \ ( (entry_ptr)->ht_prev != NULL ) ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "post HT shift to front SC failed") \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "post HT shift to front SC failed") \ } #define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ - entry_ptr, was_clean) \ + entry_ptr, was_clean, fail_val) \ if ( ( (cache_ptr) == NULL ) || \ ( (cache_ptr)->index_len <= 0 ) || \ ( (cache_ptr)->index_size <= 0 ) || \ @@ -1019,12 +1012,11 @@ if ( ( (cache_ptr) == NULL ) || \ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \ ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \ ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \ - HDassert(FALSE); \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT entry size change SC failed") \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "pre HT entry size change SC failed") \ } #define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ - entry_ptr) \ + entry_ptr, fail_val) \ if ( ( (cache_ptr) == NULL ) || \ ( (cache_ptr)->index_len <= 0 ) || \ ( (cache_ptr)->index_size <= 0 ) || \ @@ -1049,11 +1041,10 @@ if ( ( (cache_ptr) == NULL ) || \ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \ ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \ ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \ - HDassert(FALSE); \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT entry size change SC failed") \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "post HT entry size change SC failed") \ } -#define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) \ +#define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr, fail_val) \ if ( \ ( (cache_ptr) == NULL ) || \ ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ @@ -1076,11 +1067,10 @@ if ( \ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \ - HDassert(FALSE); \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT update for entry clean SC failed") \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "pre HT update for entry clean SC failed") \ } -#define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) \ +#define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr, fail_val) \ if ( \ ( (cache_ptr) == NULL ) || \ ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ @@ -1103,11 +1093,10 @@ if ( \ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \ - HDassert(FALSE); \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT update for entry dirty SC failed") \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "pre HT update for entry dirty SC failed") \ } -#define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) \ +#define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr, fail_val) \ if ( ( (cache_ptr)->index_size != \ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \ ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \ @@ -1119,11 +1108,10 @@ if ( ( (cache_ptr)->index_size != \ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \ - HDassert(FALSE); \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT update for entry clean SC failed") \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "post HT update for entry clean SC failed") \ } -#define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) \ +#define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr, fail_val) \ if ( ( (cache_ptr)->index_size != \ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \ ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \ @@ -1135,27 +1123,26 @@ if ( ( (cache_ptr)->index_size != \ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \ - HDassert(FALSE); \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT update for entry dirty SC failed") \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "post HT update for entry dirty SC failed") \ } #else /* H5C_DO_SANITY_CHECKS */ #define H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) #define H5C__POST_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) -#define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) -#define H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr) +#define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) +#define H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) #define H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) #define H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) #define H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) -#define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) -#define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) +#define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr, fail_val) +#define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr, fail_val) #define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ - entry_ptr, was_clean) + entry_ptr, was_clean, fail_val) #define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ - entry_ptr) -#define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) -#define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) + entry_ptr, fail_val) +#define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr, fail_val) +#define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr, fail_val) #endif /* H5C_DO_SANITY_CHECKS */ @@ -1198,7 +1185,7 @@ if ( ( (cache_ptr)->index_size != \ #define H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, fail_val) \ { \ int k; \ - H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \ + H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) \ k = H5C__HASH_FCN((entry_ptr)->addr); \ if((entry_ptr)->ht_next) \ (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \ @@ -1230,7 +1217,7 @@ if ( ( (cache_ptr)->index_size != \ (cache_ptr)->il_tail, (cache_ptr)->il_len, \ (cache_ptr)->il_size, fail_val) \ H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) \ - H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr) \ + H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) \ } #define H5C__SEARCH_INDEX(cache_ptr, Addr, entry_ptr, fail_val) \ @@ -1262,35 +1249,35 @@ if ( ( (cache_ptr)->index_size != \ H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, (entry_ptr != NULL), depth) \ } -#define H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr) \ +#define H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr, fail_val) \ { \ - H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr); \ + H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr, fail_val) \ (cache_ptr)->dirty_index_size -= (entry_ptr)->size; \ ((cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) \ -= (entry_ptr)->size; \ (cache_ptr)->clean_index_size += (entry_ptr)->size; \ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring]) \ += (entry_ptr)->size; \ - H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr); \ + H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr, fail_val) \ } -#define H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr) \ +#define H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr, fail_val) \ { \ - H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr); \ + H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr, fail_val) \ (cache_ptr)->clean_index_size -= (entry_ptr)->size; \ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring]) \ -= (entry_ptr)->size; \ (cache_ptr)->dirty_index_size += (entry_ptr)->size; \ ((cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) \ += (entry_ptr)->size; \ - H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr); \ + H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr, fail_val) \ } #define H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size, \ - entry_ptr, was_clean) \ + entry_ptr, was_clean, fail_val) \ { \ H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ - entry_ptr, was_clean) \ + entry_ptr, was_clean, fail_val) \ (cache_ptr)->index_size -= (old_size); \ (cache_ptr)->index_size += (new_size); \ ((cache_ptr)->index_ring_size[(entry_ptr)->ring]) -= (old_size); \ @@ -1311,9 +1298,9 @@ if ( ( (cache_ptr)->index_size != \ } \ H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->il_len, \ (cache_ptr)->il_size, \ - (old_size), (new_size)) \ + (old_size), (new_size), (fail_val)) \ H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ - entry_ptr) \ + entry_ptr, fail_val) \ } @@ -2389,7 +2376,7 @@ if ( ( (cache_ptr)->index_size != \ #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS -#define H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_size) \ +#define H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_size, fail_val) \ { \ HDassert( (cache_ptr) ); \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ @@ -2405,7 +2392,7 @@ if ( ( (cache_ptr)->index_size != \ H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->coll_list_len, \ (cache_ptr)->coll_list_size, \ (entry_ptr)->size, \ - (new_size)); \ + (new_size), (fail_val)); \ \ } \ \ @@ -2414,7 +2401,7 @@ if ( ( (cache_ptr)->index_size != \ H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->pel_len, \ (cache_ptr)->pel_size, \ (entry_ptr)->size, \ - (new_size)); \ + (new_size), (fail_val)); \ \ } else { \ \ @@ -2425,7 +2412,7 @@ if ( ( (cache_ptr)->index_size != \ H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->LRU_list_len, \ (cache_ptr)->LRU_list_size, \ (entry_ptr)->size, \ - (new_size)); \ + (new_size), (fail_val)); \ \ /* Similarly, update the size of the clean or dirty LRU list as \ * appropriate. At present, the entry must be clean, but that \ @@ -2437,14 +2424,14 @@ if ( ( (cache_ptr)->index_size != \ H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->dLRU_list_len, \ (cache_ptr)->dLRU_list_size, \ (entry_ptr)->size, \ - (new_size)); \ + (new_size), (fail_val)); \ \ } else { \ \ H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->cLRU_list_len, \ (cache_ptr)->cLRU_list_size, \ (entry_ptr)->size, \ - (new_size)); \ + (new_size), (fail_val)); \ } \ \ /* End modified LRU specific code. */ \ @@ -2454,7 +2441,7 @@ if ( ( (cache_ptr)->index_size != \ #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ -#define H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_size) \ +#define H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_size, fail_val) \ { \ HDassert( (cache_ptr) ); \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ @@ -2470,7 +2457,7 @@ if ( ( (cache_ptr)->index_size != \ H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->pel_len, \ (cache_ptr)->pel_size, \ (entry_ptr)->size, \ - (new_size)); \ + (new_size), (fail_val)); \ \ } else { \ \ @@ -2481,7 +2468,7 @@ if ( ( (cache_ptr)->index_size != \ H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->LRU_list_len, \ (cache_ptr)->LRU_list_size, \ (entry_ptr)->size, \ - (new_size)); \ + (new_size), (fail_val)); \ \ /* End modified LRU specific code. */ \ } \ @@ -2751,7 +2738,6 @@ if ( ( (hd_ptr) == NULL ) || \ ) \ ) \ ) { \ - HDassert(0 && "coll DLL pre remove SC failed"); \ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "coll DLL pre remove SC failed") \ } @@ -2772,7 +2758,6 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ ) \ ) \ ) { \ - HDassert(0 && "COLL DLL sanity check failed"); \ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "COLL DLL sanity check failed")\ } @@ -2794,7 +2779,6 @@ if ( ( (entry_ptr) == NULL ) || \ ) \ ) \ ) { \ - HDassert(0 && "COLL DLL pre insert SC failed"); \ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "COLL DLL pre insert SC failed") \ } From cbaea1f1485ed7126aae0617969fcf292391aff2 Mon Sep 17 00:00:00 2001 From: Quincey Koziol Date: Fri, 14 Apr 2023 11:40:50 -0500 Subject: [PATCH 132/231] Cache macro parameter name consistency (#2738) * More cache macro parameter consistency --- src/H5Cpkg.h | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index e2f4271b033..056c1810a17 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -940,14 +940,14 @@ if ( ( (cache_ptr) == NULL ) || \ } /* (Keep in sync w/H5C_TEST__PRE_HT_SEARCH_SC macro in test/cache_common.h -QAK) */ -#define H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \ +#define H5C__PRE_HT_SEARCH_SC(cache_ptr, entry_addr, fail_val) \ if ( ( (cache_ptr) == NULL ) || \ ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ ( (cache_ptr)->index_size != \ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \ - ( ! H5F_addr_defined(Addr) ) || \ - ( H5C__HASH_FCN(Addr) < 0 ) || \ - ( H5C__HASH_FCN(Addr) >= H5C__HASH_TABLE_LEN ) ) { \ + ( ! H5F_addr_defined(entry_addr) ) || \ + ( H5C__HASH_FCN(entry_addr) < 0 ) || \ + ( H5C__HASH_FCN(entry_addr) >= H5C__HASH_TABLE_LEN ) ) { \ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "pre HT search SC failed") \ } @@ -1132,7 +1132,7 @@ if ( ( (cache_ptr)->index_size != \ #define H5C__POST_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) #define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) #define H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) -#define H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) +#define H5C__PRE_HT_SEARCH_SC(cache_ptr, entry_addr, fail_val) #define H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) #define H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) #define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr, fail_val) @@ -1220,15 +1220,15 @@ if ( ( (cache_ptr)->index_size != \ H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) \ } -#define H5C__SEARCH_INDEX(cache_ptr, Addr, entry_ptr, fail_val) \ +#define H5C__SEARCH_INDEX(cache_ptr, entry_addr, entry_ptr, fail_val) \ { \ int k; \ int depth = 0; \ - H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \ - k = H5C__HASH_FCN(Addr); \ + H5C__PRE_HT_SEARCH_SC(cache_ptr, entry_addr, fail_val) \ + k = H5C__HASH_FCN(entry_addr); \ entry_ptr = ((cache_ptr)->index)[k]; \ while(entry_ptr) { \ - if(H5F_addr_eq(Addr, (entry_ptr)->addr)) { \ + if(H5F_addr_eq(entry_addr, (entry_ptr)->addr)) { \ H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) \ if(entry_ptr != ((cache_ptr)->index)[k]) { \ if((entry_ptr)->ht_next) \ From d25e9a278eaa217ce4dd2d445afb8ad7b4ce4d6e Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Fri, 14 Apr 2023 12:00:03 -0700 Subject: [PATCH 133/231] Add Elena to the list of Fortran developers in CODEOWNERS (#2741) --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 506c668b94d..714ae8aba5d 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,5 +7,5 @@ # Order is important. The last matching pattern has the most precedence. # So if a pull request only touches javascript files, only these owners # will be requested to review. -/fortran/ @brtnfld @derobins +/fortran/ @brtnfld @derobins @epourmal /java/ @jhendersonHDF @byrnHDF @derobins From 708c154775511751ef0606a7717e3efd2aa8be5b Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Fri, 14 Apr 2023 15:17:24 -0700 Subject: [PATCH 134/231] Fix memory leaks when processing OH cont messages (#2723) Malformed object header continuation messages can result in a too-small buffer being passed to the decode function, which could lead to reading past the end of the buffer. Additionally, errors in processing these malformed messages can lead to allocated memory not being cleaned up. This fix adds bounds checking and cleanup code to the object header continuation message processing. Fixes #2604 --- release_docs/RELEASE.txt | 15 ++++++++++++++- src/H5Ocache.c | 5 +++-- src/H5Ocont.c | 23 +++++++++++++---------- 3 files changed, 30 insertions(+), 13 deletions(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 63f5a361ded..7e5332212bf 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -151,6 +151,18 @@ Bug Fixes since HDF5-1.14.0 release =================================== Library ------- + - Fixed memory leaks when processing malformed object header continuation messages + + Malformed object header continuation messages can result in a too-small + buffer being passed to the decode function, which could lead to reading + past the end of the buffer. Additionally, errors in processing these + malformed messages can lead to allocated memory not being cleaned up. + + This fix adds bounds checking and cleanup code to the object header + continuation message processing. + + (DER - 2023/04/13 GH-2604) + - Fixed memory leaks, aborts, and overflows in H5O EFL decode The external file list code could call assert(), read past buffer @@ -196,7 +208,8 @@ Bug Fixes since HDF5-1.14.0 release - Fixed potential heap buffer overrun in group info header decoding from malformed file - H5O__ginfo_decode could sometimes read past allocated memory when parsing a group info message from the header of a malformed file. + H5O__ginfo_decode could sometimes read past allocated memory when parsing a + group info message from the header of a malformed file. It now checks buffer size before each read to properly throw an error in these cases. diff --git a/src/H5Ocache.c b/src/H5Ocache.c index 0851493959d..42d8f3590e0 100644 --- a/src/H5Ocache.c +++ b/src/H5Ocache.c @@ -1510,8 +1510,9 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t chunk_size, const uint8_t H5O_cont_t *cont; /* Decode continuation message */ - cont = (H5O_cont_t *)(H5O_MSG_CONT->decode)(udata->f, NULL, 0, &ioflags, mesg->raw_size, - mesg->raw); + if (NULL == (cont = (H5O_cont_t *)(H5O_MSG_CONT->decode)(udata->f, NULL, 0, &ioflags, + mesg->raw_size, mesg->raw))) + HGOTO_ERROR(H5E_OHDR, H5E_BADMESG, FAIL, "bad continuation message found") H5_CHECKED_ASSIGN(cont->chunkno, unsigned, udata->cont_msg_info->nmsgs + 1, size_t); /* the next continuation message/chunk */ diff --git a/src/H5Ocont.c b/src/H5Ocont.c index 8919ced5850..bbf233da971 100644 --- a/src/H5Ocont.c +++ b/src/H5Ocont.c @@ -74,40 +74,43 @@ H5FL_DEFINE(H5O_cont_t); * Purpose: Decode the raw header continuation message. * * Return: Success: Ptr to the new native message - * * Failure: NULL - * - * Programmer: Robb Matzke - * Aug 6 1997 - * *------------------------------------------------------------------------- */ static void * H5O__cont_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, - unsigned H5_ATTR_UNUSED *ioflags, size_t H5_ATTR_UNUSED p_size, const uint8_t *p) + unsigned H5_ATTR_UNUSED *ioflags, size_t p_size, const uint8_t *p) { - H5O_cont_t *cont = NULL; - void *ret_value = NULL; /* Return value */ + H5O_cont_t *cont = NULL; + const uint8_t *p_end = p + p_size - 1; + void *ret_value = NULL; FUNC_ENTER_PACKAGE - /* check args */ HDassert(f); HDassert(p); /* Allocate space for the message */ if (NULL == (cont = H5FL_MALLOC(H5O_cont_t))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed"); + HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "memory allocation failed"); /* Decode */ + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_addr(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); H5F_addr_decode(f, &p, &(cont->addr)); + + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_size(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); H5F_DECODE_LENGTH(f, p, cont->size); + cont->chunkno = 0; /* Set return value */ ret_value = cont; done: + if (NULL == ret_value && NULL != cont) + H5FL_FREE(H5O_cont_t, cont); FUNC_LEAVE_NOAPI(ret_value) } /* end H5O__cont_decode() */ From 9e6fbdf99fd97f32b396f1ff2442ae368851e0ee Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Fri, 14 Apr 2023 18:14:50 -0700 Subject: [PATCH 135/231] Run parallel clang-format tasks (#2740) Adds xargs arguments to enable running clang-format in parallel --- bin/format_source | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/format_source b/bin/format_source index 227d22ab420..1d9e88adede 100755 --- a/bin/format_source +++ b/bin/format_source @@ -21,6 +21,6 @@ find . \( -type d -path ./config -prune -and -not -path ./config \) \ -or -name H5overflow.h \ \) \) \ -and \( -iname *.h -or -iname *.c -or -iname *.cpp -or -iname *.hpp -or -iname *.java \) \) \ - | xargs clang-format -style=file -i -fallback-style=none + | xargs -P0 -n1 clang-format -style=file -i -fallback-style=none exit 0 From aeda6f9f9558a818c29af70e5ab7cc01bef9a363 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Sun, 16 Apr 2023 09:09:41 -0700 Subject: [PATCH 136/231] Remove dates and initials from RELEASE.txt entries (#2746) --- release_docs/RELEASE.txt | 32 +++++++++++++------------------- 1 file changed, 13 insertions(+), 19 deletions(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 7e5332212bf..99ba1d1ab2a 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -161,7 +161,7 @@ Bug Fixes since HDF5-1.14.0 release This fix adds bounds checking and cleanup code to the object header continuation message processing. - (DER - 2023/04/13 GH-2604) + Fixes GitHub issue #2604 - Fixed memory leaks, aborts, and overflows in H5O EFL decode @@ -172,7 +172,7 @@ Bug Fixes since HDF5-1.14.0 release This fix cleans up allocated memory, adds buffer bounds checks, and converts asserts to HDF5 error checking. - (DER - 2023/04/13 GH-2605) + Fixes GitHub issue #2605 - Fixed potential heap buffer overflow in decoding of link info message @@ -181,7 +181,7 @@ Bug Fixes since HDF5-1.14.0 release checkings will remove the potential invalid read of any of these values that could be triggered by a malformed file. - (BMR - 2023/04/12 GH-2603) + Fixes GitHub issue #2603 - Memory leak @@ -194,7 +194,7 @@ Bug Fixes since HDF5-1.14.0 release As error is encountered in loading the illegal message, the memory allocated for cont_msg_info->msgs needs to be freed. - (VC - 2023/04/11 GH-2599) + Fixes GitHub issue #2599 - Fixed memory leaks that could occur when reading a dataset from a malformed file @@ -204,7 +204,7 @@ Bug Fixes since HDF5-1.14.0 release information threw an error, which is due to the memory that was allocated for pline and efl not being properly cleaned up on error. - (GS - 2023/4/11 GH#2602) + Fixes GitHub issue #2602 - Fixed potential heap buffer overrun in group info header decoding from malformed file @@ -213,7 +213,7 @@ Bug Fixes since HDF5-1.14.0 release It now checks buffer size before each read to properly throw an error in these cases. - (ML - 2023/4/6, #2601) + Fixes GitHub issue #2601 - Fixed potential buffer overrun issues in some object header decode routines @@ -221,8 +221,6 @@ Bug Fixes since HDF5-1.14.0 release ensure that memory buffers don't get overrun when decoding buffers read from a (possibly corrupted) HDF5 file. - (JTH - 2023/04/05) - - Fixed a heap buffer overflow that occurs when reading from a dataset with a compact layout within a malformed HDF5 file @@ -240,7 +238,7 @@ Bug Fixes since HDF5-1.14.0 release dataspace and datatype information). If the two sizes do not match, opening of the dataset will fail. - (JTH - 2023/04/04, GH-2606) + Fixes GitHub issue #2606 - Fixed a memory corruption issue that can occur when reading from a dataset using a hyperslab selection in the file @@ -254,8 +252,6 @@ Bug Fixes since HDF5-1.14.0 release being copied when projecting the point selection onto the hyperslab selection's dataspace. - (JTH - 2023/03/23) - - Fixed issues in the Subfiling VFD when using the SELECT_IOC_EVERY_NTH_RANK or SELECT_IOC_TOTAL I/O concentrator selection strategies @@ -274,8 +270,6 @@ Bug Fixes since HDF5-1.14.0 release Also added a regression test for these two I/O concentrator selection strategies to prevent future issues. - (JTH - 2023/03/15) - - Fix CVE-2021-37501 / GHSA-rfgw-5vq3-wrjf Check for overflow when calculating on-disk attribute data size. @@ -287,7 +281,7 @@ Bug Fixes since HDF5-1.14.0 release The test case was crafted in a way that the overflow caused the size to be 0. - (EFE - 2023/02/11 GH-2458) + Fixes GitHub #2458 - Fixed an issue with collective metadata writes of global heap data @@ -302,7 +296,7 @@ Bug Fixes since HDF5-1.14.0 release to treat global heap metadata as raw data, as done elsewhere in the library. - (JTH - 2023/02/16, GH #2433) + Fixes GitHub issue #2433 - Fixed buffer overflow error in image decoding function. @@ -314,7 +308,7 @@ Bug Fixes since HDF5-1.14.0 release The error was fixed by inserting corresponding checks for buffer overflow. - (KE - 2023/02/07 GH #2432) + Fixes GitHub issue #2432 Java Library @@ -340,7 +334,7 @@ Bug Fixes since HDF5-1.14.0 release that use HDF5 and try to use or parse information out of these files after deleting temporary HDF5 build directories. - (JTH - 2023/04/05 GH-2422, GH-2621) + Fixes GitHub issue #2621 - Correct the CMake generated pkg-config file @@ -353,7 +347,7 @@ Bug Fixes since HDF5-1.14.0 release supported pkconfig files. Still recommend that the CMake config file method be used for building projects with CMake. - (ADB - 2023/02/16 GH-1546,GH-2259) + Fixes GitHub issues #1546 and #2259 Tools @@ -363,7 +357,7 @@ Bug Fixes since HDF5-1.14.0 release Replaced the H5TOOLS_GOTO_ERROR with just H5TOOLS_ERROR. - (ADB - 2023/04/06 GH-2598) + Fixes GitHub issue #2598 Performance From 36429d1c2e4cb39c381b3757200cd256cdd5906a Mon Sep 17 00:00:00 2001 From: Dave Allured Date: Sun, 16 Apr 2023 14:07:02 -0600 Subject: [PATCH 137/231] H5Spoint.c: Fix mistake in comment (#2750) --- src/H5Spoint.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/H5Spoint.c b/src/H5Spoint.c index 1c3697cc15a..9cac3c4fdd0 100644 --- a/src/H5Spoint.c +++ b/src/H5Spoint.c @@ -122,7 +122,7 @@ const H5S_select_class_t H5S_sel_point[1] = {{ H5S__point_iter_init, }}; -/* Format version bounds for dataspace hyperslab selection */ +/* Format version bounds for dataspace point selection */ const unsigned H5O_sds_point_ver_bounds[] = { H5S_POINT_VERSION_1, /* H5F_LIBVER_EARLIEST */ H5S_POINT_VERSION_1, /* H5F_LIBVER_V18 */ From a0da034e03f348ee25153e478da842d02fe425f1 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Sun, 16 Apr 2023 18:01:33 -0700 Subject: [PATCH 138/231] Sanitize the attribute info message decode fxn (#2748) Adds bounds checking on the buffer in the attribute info message's decode function (H5O__ainfo_decode). --- src/H5Oainfo.c | 37 ++++++++++++++++++++++++------------- 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/src/H5Oainfo.c b/src/H5Oainfo.c index 3364a674f8d..f2ce582573e 100644 --- a/src/H5Oainfo.c +++ b/src/H5Oainfo.c @@ -13,8 +13,6 @@ /*------------------------------------------------------------------------- * * Created: H5Oainfo.c - * Mar 6 2007 - * Quincey Koziol * * Purpose: Attribute Information messages. * @@ -88,27 +86,28 @@ H5FL_DEFINE_STATIC(H5O_ainfo_t); * * Return: Success: Ptr to new message in native form. * Failure: NULL - * - * Programmer: Quincey Koziol - * Mar 6 2007 - * *------------------------------------------------------------------------- */ static void * H5O__ainfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, - unsigned H5_ATTR_UNUSED *ioflags, size_t H5_ATTR_UNUSED p_size, const uint8_t *p) + unsigned H5_ATTR_UNUSED *ioflags, size_t p_size, const uint8_t *p) { - H5O_ainfo_t *ainfo = NULL; /* Attribute info */ - unsigned char flags; /* Flags for encoding attribute info */ - void *ret_value = NULL; /* Return value */ + const uint8_t *p_end = p + p_size - 1; /* End of input buffer */ + H5O_ainfo_t *ainfo = NULL; /* Attribute info */ + unsigned char flags; /* Flags for encoding attribute info */ + uint8_t sizeof_addr; /* Size of addresses in this file */ + void *ret_value = NULL; /* Return value */ FUNC_ENTER_PACKAGE - /* check args */ HDassert(f); HDassert(p); + sizeof_addr = H5F_sizeof_addr(f); + /* Version of message */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); if (*p++ != H5O_AINFO_VERSION) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad version number for message") @@ -117,6 +116,8 @@ H5O__ainfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUS HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") /* Get the flags for the message */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); flags = *p++; if (flags & ~H5O_AINFO_ALL_FLAGS) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad flag value for message") @@ -127,20 +128,30 @@ H5O__ainfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUS ainfo->nattrs = HSIZET_MAX; /* Max. creation order value for the object */ - if (ainfo->track_corder) + if (ainfo->track_corder) { + if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); UINT16DECODE(p, ainfo->max_crt_idx) + } else ainfo->max_crt_idx = H5O_MAX_CRT_ORDER_IDX; /* Address of fractal heap to store "dense" attributes */ + if (H5_IS_BUFFER_OVERFLOW(p, sizeof_addr, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); H5F_addr_decode(f, &p, &(ainfo->fheap_addr)); /* Address of v2 B-tree to index names of attributes (names are always indexed) */ + if (H5_IS_BUFFER_OVERFLOW(p, sizeof_addr, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); H5F_addr_decode(f, &p, &(ainfo->name_bt2_addr)); /* Address of v2 B-tree to index creation order of links, if there is one */ - if (ainfo->index_corder) + if (ainfo->index_corder) { + if (H5_IS_BUFFER_OVERFLOW(p, sizeof_addr, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); H5F_addr_decode(f, &p, &(ainfo->corder_bt2_addr)); + } else ainfo->corder_bt2_addr = HADDR_UNDEF; From 5b9cc2fe5db13de3c4c1b538d612fe1e1e588888 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Sun, 16 Apr 2023 20:37:06 -0500 Subject: [PATCH 139/231] Update hdfeos5 workflow concurrency group (#2766) Changes concurrency group for hdfeos5 workflow so that it is separate from the concurrency group for the CI workflow --- .github/workflows/hdfeos5.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/hdfeos5.yml b/.github/workflows/hdfeos5.yml index 2140fb6e000..142bf7cf692 100644 --- a/.github/workflows/hdfeos5.yml +++ b/.github/workflows/hdfeos5.yml @@ -16,7 +16,7 @@ on: # Using concurrency to cancel any in-progress job or run concurrency: - group: ${{ github.ref }} + group: hdfeos5-${{ github.ref }} cancel-in-progress: true jobs: From a1c66aa399df8a47e74ad1fc78ce95d98d1c6e0b Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Sun, 16 Apr 2023 18:58:47 -0700 Subject: [PATCH 140/231] Sanitize B-tree k ohdr message decode call (#2754) * Check buffer bounds * Clean up memory on errors --- src/H5Obtreek.c | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/src/H5Obtreek.c b/src/H5Obtreek.c index ff157dabd98..c21b3b91fae 100644 --- a/src/H5Obtreek.c +++ b/src/H5Obtreek.c @@ -59,33 +59,32 @@ const H5O_msg_class_t H5O_MSG_BTREEK[1] = {{ #define H5O_BTREEK_VERSION 0 /*------------------------------------------------------------------------- - * Function: H5O__btreek_decode + * Function: H5O__btreek_decode * - * Purpose: Decode a shared message table message and return a pointer + * Purpose: Decode a shared message table message and return a pointer * to a newly allocated H5O_btreek_t struct. * - * Return: Success: Ptr to new message in native struct. - * Failure: NULL - * - * Programmer: Quincey Koziol - * Mar 1, 2007 - * + * Return: Success: Pointer to new message in native struct + * Failure: NULL *------------------------------------------------------------------------- */ static void * -H5O__btreek_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, - unsigned H5_ATTR_UNUSED *ioflags, size_t H5_ATTR_UNUSED p_size, const uint8_t *p) +H5O__btreek_decode(H5F_t H5_ATTR_NDEBUG_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, + unsigned H5_ATTR_UNUSED mesg_flags, unsigned H5_ATTR_UNUSED *ioflags, size_t p_size, + const uint8_t *p) { - H5O_btreek_t *mesg; /* Native message */ - void *ret_value = NULL; /* Return value */ + const uint8_t *p_end = p + p_size - 1; /* End of input buffer */ + H5O_btreek_t *mesg = NULL; /* Native message */ + void *ret_value = NULL; /* Return value */ FUNC_ENTER_PACKAGE - /* Sanity check */ HDassert(f); HDassert(p); /* Version of message */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); if (*p++ != H5O_BTREEK_VERSION) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad version number for message") @@ -94,14 +93,22 @@ H5O__btreek_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsig HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for v1 B-tree 'K' message") /* Retrieve non-default B-tree 'K' values */ + if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); UINT16DECODE(p, mesg->btree_k[H5B_CHUNK_ID]); + if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); UINT16DECODE(p, mesg->btree_k[H5B_SNODE_ID]); + if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); UINT16DECODE(p, mesg->sym_leaf_k); /* Set return value */ ret_value = (void *)mesg; done: + if (NULL == ret_value) + H5MM_free(mesg); FUNC_LEAVE_NOAPI(ret_value) } /* end H5O__btreek_decode() */ From cb6610625f2424d77474ea45deb5c24640fd6456 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Mon, 17 Apr 2023 09:14:09 -0700 Subject: [PATCH 141/231] Remove more author/date lines from RELEASE.txt (#2767) --- release_docs/RELEASE.txt | 6 ------ 1 file changed, 6 deletions(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 99ba1d1ab2a..1036d48d980 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -55,8 +55,6 @@ New Features default for parallel debug builds and off by default for other build types. CMake has been updated to match this behavior. - (JTH - 2023/03/29) - - Added new option to build libaec and zlib inline with CMake. Using the CMake FetchContent module, the external filters can populate @@ -80,8 +78,6 @@ New Features See the CMakeFilters.cmake and config/cmake/cacheinit.cmake files for usage. - (ADB - 2023/02/21) - Library: -------- @@ -95,8 +91,6 @@ New Features machine's node-local storage while placing the subfiling configuration file on a file system readable by all machine nodes. - (JTH - 2023/02/22) - Parallel Library: ----------------- From 16e9f8afa5d55c463fc388afaa3bae3ba6da9791 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Mon, 17 Apr 2023 15:22:12 -0500 Subject: [PATCH 142/231] Check for invalid AAPL in H5Aopen (#2712) --- src/H5VLnative_attr.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/src/H5VLnative_attr.c b/src/H5VLnative_attr.c index b7e9b131a96..d83b6bdff1f 100644 --- a/src/H5VLnative_attr.c +++ b/src/H5VLnative_attr.c @@ -135,12 +135,13 @@ H5VL__native_attr_create(void *obj, const H5VL_loc_params_t *loc_params, const c *------------------------------------------------------------------------- */ void * -H5VL__native_attr_open(void *obj, const H5VL_loc_params_t *loc_params, const char *attr_name, - hid_t H5_ATTR_UNUSED aapl_id, hid_t H5_ATTR_UNUSED dxpl_id, void H5_ATTR_UNUSED **req) +H5VL__native_attr_open(void *obj, const H5VL_loc_params_t *loc_params, const char *attr_name, hid_t aapl_id, + hid_t H5_ATTR_UNUSED dxpl_id, void H5_ATTR_UNUSED **req) { - H5G_loc_t loc; /* Object location */ - H5A_t *attr = NULL; /* Attribute opened */ - void *ret_value; + H5P_genplist_t *plist; + H5G_loc_t loc; /* Object location */ + H5A_t *attr = NULL; /* Attribute opened */ + void *ret_value; FUNC_ENTER_PACKAGE @@ -148,6 +149,9 @@ H5VL__native_attr_open(void *obj, const H5VL_loc_params_t *loc_params, const cha if (H5G_loc_real(obj, loc_params->obj_type, &loc) < 0) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a file or file object") + if (NULL == (plist = H5P_object_verify(aapl_id, H5P_ATTRIBUTE_ACCESS))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "AAPL is not an attribute access property list") + if (loc_params->type == H5VL_OBJECT_BY_SELF) { /* H5Aopen */ /* Open the attribute */ From dcff3c9a5d6a6c0abb7b65b9d918e25b2fba2d81 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Mon, 17 Apr 2023 16:02:39 -0500 Subject: [PATCH 143/231] Update concurrency group for main CI workflow (#2768) Updates main CI workflow concurrency group so that the group should be a unique string formed as "workflow file-commit" or "workflow file-PR number". This should only cancel actions for a PR if the same PR is committed to while the actions are running. --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index ea63fd8f594..d08227eb2c8 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -17,7 +17,7 @@ on: # Using concurrency to cancel any in-progress job or run concurrency: - group: ${{ github.ref }} + group: ${{ github.workflow }}-${{ github.sha || github.event.pull_request.number }} cancel-in-progress: true # A workflow run is made up of one or more jobs that can run sequentially or From 225c3919b7e0d7f6f83dcb20ee2f6937fce76e0b Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Mon, 17 Apr 2023 19:10:44 -0700 Subject: [PATCH 144/231] Removes programmer/date lines from src headers (#2747) --- src/H5ACmodule.h | 9 ++-- src/H5ACpkg.h | 17 +++--- src/H5Amodule.h | 9 ++-- src/H5Apkg.h | 3 -- src/H5B2module.h | 9 ++-- src/H5B2pkg.h | 9 ++-- src/H5Bmodule.h | 9 ++-- src/H5Bpkg.h | 9 ++-- src/H5CXmodule.h | 9 ++-- src/H5Cmodule.h | 9 ++-- src/H5Dmodule.h | 9 ++-- src/H5Dpkg.h | 9 ++-- src/H5EAmodule.h | 9 ++-- src/H5EApkg.h | 3 -- src/H5ESmodule.h | 3 -- src/H5ESpkg.h | 9 ++-- src/H5Emodule.h | 9 ++-- src/H5Epkg.h | 9 ++-- src/H5FAmodule.h | 9 ++-- src/H5FApkg.h | 8 ++- src/H5FDcore.h | 3 -- src/H5FDdirect.h | 3 -- src/H5FDdrvr_module.h | 9 ++-- src/H5FDfamily.h | 3 -- src/H5FDhdfs.h | 8 --- src/H5FDlog.h | 3 -- src/H5FDmodule.h | 9 ++-- src/H5FDmpi.h | 3 -- src/H5FDmpio.h | 3 -- src/H5FDmulti.h | 3 -- src/H5FDpkg.h | 9 ++-- src/H5FDprivate.h | 4 -- src/H5FDpublic.h | 4 -- src/H5FDros3.h | 3 -- src/H5FDs3comms.h | 5 -- src/H5FDsec2.h | 5 +- src/H5FDstdio.h | 5 +- src/H5FDwindows.h | 6 +-- src/H5FLmodule.h | 9 ++-- src/H5FSmodule.h | 9 ++-- src/H5FSpkg.h | 3 -- src/H5Fmodule.h | 9 ++-- src/H5Fpkg.h | 9 ++-- src/H5Gmodule.h | 9 ++-- src/H5Gpkg.h | 3 -- src/H5HFmodule.h | 9 ++-- src/H5HFpkg.h | 9 ++-- src/H5HGmodule.h | 9 ++-- src/H5HGpkg.h | 3 -- src/H5HGprivate.h | 4 -- src/H5HLmodule.h | 9 ++-- src/H5HLpkg.h | 3 -- src/H5Imodule.h | 3 -- src/H5Ipkg.h | 9 ++-- src/H5Lmodule.h | 9 ++-- src/H5Lpkg.h | 3 -- src/H5MFmodule.h | 9 ++-- src/H5MFpkg.h | 3 -- src/H5Omodule.h | 9 ++-- src/H5Oshared.h | 117 ++++++++++++++---------------------------- src/H5PBmodule.h | 9 ++-- src/H5Pmodule.h | 9 ++-- src/H5Ppkg.h | 9 ++-- src/H5RSmodule.h | 7 +-- src/H5SLmodule.h | 9 ++-- src/H5SMmodule.h | 9 ++-- src/H5SMpkg.h | 3 -- src/H5SMprivate.h | 7 +-- src/H5Smodule.h | 9 ++-- src/H5Spkg.h | 9 ++-- src/H5Tmodule.h | 9 ++-- src/H5Tpkg.h | 9 ++-- src/H5VMprivate.h | 91 +++++++------------------------- src/H5Zmodule.h | 9 ++-- src/H5Zprivate.h | 4 -- src/H5Zpublic.h | 4 -- src/H5private.h | 23 +++------ 77 files changed, 203 insertions(+), 542 deletions(-) diff --git a/src/H5ACmodule.h b/src/H5ACmodule.h index a7f9a27d539..1ce26f6b15f 100644 --- a/src/H5ACmodule.h +++ b/src/H5ACmodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5AC package. Including this header means that the source file - * is part of the H5AC package. + * Purpose: This file contains declarations which define macros for the + * H5AC package. Including this header means that the source file + * is part of the H5AC package. */ #ifndef H5ACmodule_H #define H5ACmodule_H diff --git a/src/H5ACpkg.h b/src/H5ACpkg.h index 8ccab4e68ff..beb7ba7347b 100644 --- a/src/H5ACpkg.h +++ b/src/H5ACpkg.h @@ -11,18 +11,15 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: John Mainzer -- 4/19/06 + * Purpose: This file contains declarations which are normally visible + * only within the H5AC package (just H5AC.c at present). * - * Purpose: This file contains declarations which are normally visible - * only within the H5AC package (just H5AC.c at present). - * - * Source files outside the H5AC package should include - * H5ACprivate.h instead. - * - * The one exception to this rule is testpar/t_cache.c. The - * test code is easier to write if it can look at H5AC_aux_t. - * Indeed, this is the main reason why this file was created. + * Source files outside the H5AC package should include + * H5ACprivate.h instead. * + * The one exception to this rule is testpar/t_cache.c. The + * test code is easier to write if it can look at H5AC_aux_t. + * Indeed, this is the main reason why this file was created. */ #if !(defined H5AC_FRIEND || defined H5AC_MODULE) diff --git a/src/H5Amodule.h b/src/H5Amodule.h index 4823d0d6e7a..75a4c8c2402 100644 --- a/src/H5Amodule.h +++ b/src/H5Amodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5A package. Including this header means that the source file - * is part of the H5A package. + * Purpose: This file contains declarations which define macros for the + * H5A package. Including this header means that the source file + * is part of the H5A package. */ #ifndef H5Amodule_H #define H5Amodule_H diff --git a/src/H5Apkg.h b/src/H5Apkg.h index 49dfda2aeb0..3a5c411c5ff 100644 --- a/src/H5Apkg.h +++ b/src/H5Apkg.h @@ -11,9 +11,6 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Monday, Apr 20 - * * Purpose: This file contains declarations which are visible only within * the H5A package. Source files outside the H5A package should * include H5Aprivate.h instead. diff --git a/src/H5B2module.h b/src/H5B2module.h index 6a3131b5cc1..8eaea2f266d 100644 --- a/src/H5B2module.h +++ b/src/H5B2module.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5B2 package. Including this header means that the source file - * is part of the H5B2 package. + * Purpose: This file contains declarations which define macros for the + * H5B2 package. Including this header means that the source file + * is part of the H5B2 package. */ #ifndef H5B2module_H #define H5B2module_H diff --git a/src/H5B2pkg.h b/src/H5B2pkg.h index 626ae2f9350..668ea5d4834 100644 --- a/src/H5B2pkg.h +++ b/src/H5B2pkg.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Monday, January 31, 2005 - * - * Purpose: This file contains declarations which are visible only within - * the H5B2 package. Source files outside the H5B2 package should - * include H5B2private.h instead. + * Purpose: This file contains declarations which are visible only within + * the H5B2 package. Source files outside the H5B2 package should + * include H5B2private.h instead. */ #if !(defined H5B2_FRIEND || defined H5B2_MODULE) #error "Do not include this file outside the H5B2 package!" diff --git a/src/H5Bmodule.h b/src/H5Bmodule.h index 9c0f73bc627..0ded7562a7f 100644 --- a/src/H5Bmodule.h +++ b/src/H5Bmodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5B package. Including this header means that the source file - * is part of the H5B package. + * Purpose: This file contains declarations which define macros for the + * H5B package. Including this header means that the source file + * is part of the H5B package. */ #ifndef H5Bmodule_H #define H5Bmodule_H diff --git a/src/H5Bpkg.h b/src/H5Bpkg.h index 3147315e244..ef9f56e50d4 100644 --- a/src/H5Bpkg.h +++ b/src/H5Bpkg.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Thursday, May 15, 2003 - * - * Purpose: This file contains declarations which are visible only within - * the H5B package. Source files outside the H5B package should - * include H5Bprivate.h instead. + * Purpose: This file contains declarations which are visible only within + * the H5B package. Source files outside the H5B package should + * include H5Bprivate.h instead. */ #if !(defined H5B_FRIEND || defined H5B_MODULE) #error "Do not include this file outside the H5B package!" diff --git a/src/H5CXmodule.h b/src/H5CXmodule.h index f9844a1f9a0..ffb4804ab57 100644 --- a/src/H5CXmodule.h +++ b/src/H5CXmodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Sunday, February 25, 2018 - * - * Purpose: This file contains declarations which define macros for the - * H5CX package. Including this header means that the source file - * is part of the H5CX package. + * Purpose: This file contains declarations which define macros for the + * H5CX package. Including this header means that the source file + * is part of the H5CX package. */ #ifndef H5CXmodule_H #define H5CXmodule_H diff --git a/src/H5Cmodule.h b/src/H5Cmodule.h index a3ef4d65ea4..64c5279d92f 100644 --- a/src/H5Cmodule.h +++ b/src/H5Cmodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5C package. Including this header means that the source file - * is part of the H5C package. + * Purpose: This file contains declarations which define macros for the + * H5C package. Including this header means that the source file + * is part of the H5C package. */ #ifndef H5Cmodule_H #define H5Cmodule_H diff --git a/src/H5Dmodule.h b/src/H5Dmodule.h index 4bdf936e9b4..eb58f6870dc 100644 --- a/src/H5Dmodule.h +++ b/src/H5Dmodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5D package. Including this header means that the source file - * is part of the H5D package. + * Purpose: This file contains declarations which define macros for the + * H5D package. Including this header means that the source file + * is part of the H5D package. */ #ifndef H5Dmodule_H #define H5Dmodule_H diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h index 6a09408b719..0906028d33c 100644 --- a/src/H5Dpkg.h +++ b/src/H5Dpkg.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Monday, April 14, 2003 - * - * Purpose: This file contains declarations which are visible only within - * the H5D package. Source files outside the H5D package should - * include H5Dprivate.h instead. + * Purpose: This file contains declarations which are visible only within + * the H5D package. Source files outside the H5D package should + * include H5Dprivate.h instead. */ #if !(defined H5D_FRIEND || defined H5D_MODULE) #error "Do not include this file outside the H5D package!" diff --git a/src/H5EAmodule.h b/src/H5EAmodule.h index bba2a99b793..f992393ed56 100644 --- a/src/H5EAmodule.h +++ b/src/H5EAmodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5EA package. Including this header means that the source file - * is part of the H5EA package. + * Purpose: This file contains declarations which define macros for the + * H5EA package. Including this header means that the source file + * is part of the H5EA package. */ #ifndef H5EAmodule_H #define H5EAmodule_H diff --git a/src/H5EApkg.h b/src/H5EApkg.h index 272f1b827f7..5014cc50f70 100644 --- a/src/H5EApkg.h +++ b/src/H5EApkg.h @@ -11,9 +11,6 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Tuesday, June 17, 2008 - * * Purpose: This file contains declarations which are visible only * within the H5EA package. Source files outside the H5EA * package should include H5EAprivate.h instead. diff --git a/src/H5ESmodule.h b/src/H5ESmodule.h index d945b702bcf..f128feeab0e 100644 --- a/src/H5ESmodule.h +++ b/src/H5ESmodule.h @@ -11,9 +11,6 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Monday, April 6, 2020 - * * Purpose: This file contains declarations which define macros for the * H5ES package. Including this header means that the source file * is part of the H5ES package. diff --git a/src/H5ESpkg.h b/src/H5ESpkg.h index 68e16c127c7..655efbbe8e3 100644 --- a/src/H5ESpkg.h +++ b/src/H5ESpkg.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Wednesday, April 8, 2020 - * - * Purpose: This file contains declarations which are visible only within - * the H5ES package. Source files outside the H5ES package should - * include H5ESprivate.h instead. + * Purpose: This file contains declarations which are visible only within + * the H5ES package. Source files outside the H5ES package should + * include H5ESprivate.h instead. */ #if !(defined H5ES_FRIEND || defined H5ES_MODULE) #error "Do not include this file outside the H5ES package!" diff --git a/src/H5Emodule.h b/src/H5Emodule.h index 0e4655cac5a..7c4a83685ab 100644 --- a/src/H5Emodule.h +++ b/src/H5Emodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5E package. Including this header means that the source file - * is part of the H5E package. + * Purpose: This file contains declarations which define macros for the + * H5E package. Including this header means that the source file + * is part of the H5E package. */ #ifndef H5Emodule_H #define H5Emodule_H diff --git a/src/H5Epkg.h b/src/H5Epkg.h index c6097e9fff9..c761179defa 100644 --- a/src/H5Epkg.h +++ b/src/H5Epkg.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Wednesday, April 11, 2007 - * - * Purpose: This file contains declarations which are visible only within - * the H5E package. Source files outside the H5E package should - * include H5Eprivate.h instead. + * Purpose: This file contains declarations which are visible only within + * the H5E package. Source files outside the H5E package should + * include H5Eprivate.h instead. */ #if !(defined H5E_FRIEND || defined H5E_MODULE) #error "Do not include this file outside the H5E package!" diff --git a/src/H5FAmodule.h b/src/H5FAmodule.h index 3fbdb901d86..8ef75820d57 100644 --- a/src/H5FAmodule.h +++ b/src/H5FAmodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5FA package. Including this header means that the source file - * is part of the H5FA package. + * Purpose: This file contains declarations which define macros for the + * H5FA package. Including this header means that the source file + * is part of the H5FA package. */ #ifndef H5FAmodule_H #define H5FAmodule_H diff --git a/src/H5FApkg.h b/src/H5FApkg.h index 48aa8df6d75..c08e575370d 100644 --- a/src/H5FApkg.h +++ b/src/H5FApkg.h @@ -11,11 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: - * - * Purpose: This file contains declarations which are visible only within - * the H5FA package. Source files outside the H5FA package should - * include H5FAprivate.h instead. + * Purpose: This file contains declarations which are visible only within + * the H5FA package. Source files outside the H5FA package should + * include H5FAprivate.h instead. */ #if !(defined(H5FA_FRIEND) | defined(H5FA_MODULE)) #error "Do not include this file outside the H5FA package!" diff --git a/src/H5FDcore.h b/src/H5FDcore.h index fbff138b7bc..e4d792b5f98 100644 --- a/src/H5FDcore.h +++ b/src/H5FDcore.h @@ -11,9 +11,6 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Robb Matzke - * Monday, August 2, 1999 - * * Purpose: The public header file for the core driver. */ #ifndef H5FDcore_H diff --git a/src/H5FDdirect.h b/src/H5FDdirect.h index faa85db955a..7858dfdea43 100644 --- a/src/H5FDdirect.h +++ b/src/H5FDdirect.h @@ -11,9 +11,6 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Raymond Lu - * Wednesday, 20 September 2006 - * * Purpose: The public header file for the direct driver. */ #ifndef H5FDdirect_H diff --git a/src/H5FDdrvr_module.h b/src/H5FDdrvr_module.h index 4a7a4d10f1e..c984cf51004 100644 --- a/src/H5FDdrvr_module.h +++ b/src/H5FDdrvr_module.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5FD driver package. Including this header means that the source file - * is part of the H5FD driver package. + * Purpose: This file contains declarations which define macros for the + * H5FD driver package. Including this header means that the source file + * is part of the H5FD driver package. */ #ifndef H5FDdrvr_module_H #define H5FDdrvr_module_H diff --git a/src/H5FDfamily.h b/src/H5FDfamily.h index b0c560fadac..76020f0a268 100644 --- a/src/H5FDfamily.h +++ b/src/H5FDfamily.h @@ -11,9 +11,6 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Robb Matzke - * Monday, August 4, 1999 - * * Purpose: The public header file for the family driver. */ #ifndef H5FDfamily_H diff --git a/src/H5FDhdfs.h b/src/H5FDhdfs.h index e3888d256e7..c8c2c37f1b5 100644 --- a/src/H5FDhdfs.h +++ b/src/H5FDhdfs.h @@ -11,9 +11,6 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Jacob Smith - * 2018-04-23 - * * Purpose: The public header file for the hdfs driver. */ @@ -90,11 +87,6 @@ extern "C" { * * TBD: If -1, relies on a default value. * - * - * - * Programmer: Jacob Smith - * 2018-04-23 - * ****************************************************************************/ #define H5FD__CURR_HDFS_FAPL_T_VERSION 1 diff --git a/src/H5FDlog.h b/src/H5FDlog.h index 96ded839692..ae4e2d05e72 100644 --- a/src/H5FDlog.h +++ b/src/H5FDlog.h @@ -11,9 +11,6 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Monday, April 17, 2000 - * * Purpose: The public header file for the log driver. */ #ifndef H5FDlog_H diff --git a/src/H5FDmodule.h b/src/H5FDmodule.h index 728b4b83040..1e29ca912a6 100644 --- a/src/H5FDmodule.h +++ b/src/H5FDmodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5FD package. Including this header means that the source file - * is part of the H5FD package. + * Purpose: This file contains declarations which define macros for the + * H5FD package. Including this header means that the source file + * is part of the H5FD package. */ #ifndef H5FDmodule_H #define H5FDmodule_H diff --git a/src/H5FDmpi.h b/src/H5FDmpi.h index fa3862be405..9cee0e69b74 100644 --- a/src/H5FDmpi.h +++ b/src/H5FDmpi.h @@ -11,9 +11,6 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Friday, January 30, 2004 - * * Purpose: The public header file for common items for all MPI VFL drivers */ #ifndef H5FDmpi_H diff --git a/src/H5FDmpio.h b/src/H5FDmpio.h index ee7fa2e8d13..36786016dfe 100644 --- a/src/H5FDmpio.h +++ b/src/H5FDmpio.h @@ -11,9 +11,6 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Robb Matzke - * Monday, August 2, 1999 - * * Purpose: The public header file for the mpio driver. */ #ifndef H5FDmpio_H diff --git a/src/H5FDmulti.h b/src/H5FDmulti.h index 1765c6ad15b..23c37039c3e 100644 --- a/src/H5FDmulti.h +++ b/src/H5FDmulti.h @@ -11,9 +11,6 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Robb Matzke - * Monday, August 2, 1999 - * * Purpose: The public header file for the "multi" driver. */ #ifndef H5FDmulti_H diff --git a/src/H5FDpkg.h b/src/H5FDpkg.h index 893486db870..b1d929c3e64 100644 --- a/src/H5FDpkg.h +++ b/src/H5FDpkg.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Thursday, January 3, 2008 - * - * Purpose: This file contains declarations which are visible only within - * the H5FD package. Source files outside the H5FD package should - * include H5FDprivate.h instead. + * Purpose: This file contains declarations which are visible only within + * the H5FD package. Source files outside the H5FD package should + * include H5FDprivate.h instead. */ #if !(defined H5FD_FRIEND || defined H5FD_MODULE) #error "Do not include this file outside the H5FD package!" diff --git a/src/H5FDprivate.h b/src/H5FDprivate.h index 0d5c323fb1e..c4ccfdd6f9d 100644 --- a/src/H5FDprivate.h +++ b/src/H5FDprivate.h @@ -10,10 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * Programmer: Robb Matzke - * Monday, July 26, 1999 - */ #ifndef H5FDprivate_H #define H5FDprivate_H diff --git a/src/H5FDpublic.h b/src/H5FDpublic.h index 422cd182ab9..891b3485bfd 100644 --- a/src/H5FDpublic.h +++ b/src/H5FDpublic.h @@ -10,10 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * Programmer: Robb Matzke - * Monday, July 26, 1999 - */ #ifndef H5FDpublic_H #define H5FDpublic_H diff --git a/src/H5FDros3.h b/src/H5FDros3.h index 4c9e77fdc9d..f84b1a44a6d 100644 --- a/src/H5FDros3.h +++ b/src/H5FDros3.h @@ -13,9 +13,6 @@ /* * Read-Only S3 Virtual File Driver (VFD) * - * Programmer: John Mainzer - * 2017-10-10 - * * Purpose: The public header file for the ros3 driver. */ #ifndef H5FDros3_H diff --git a/src/H5FDs3comms.h b/src/H5FDs3comms.h index aa354d97366..1b21ad7510d 100644 --- a/src/H5FDs3comms.h +++ b/src/H5FDs3comms.h @@ -45,11 +45,6 @@ * ``` * ...in destination buffer. * - * TODO: put documentation in a consistent place and point to it from here. - * - * Programmer: Jacob Smith - * 2017-11-30 - * *****************************************************************************/ #include "H5private.h" /* Generic Functions */ diff --git a/src/H5FDsec2.h b/src/H5FDsec2.h index 5fa75b6f2f2..56f45751d32 100644 --- a/src/H5FDsec2.h +++ b/src/H5FDsec2.h @@ -11,10 +11,7 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Robb Matzke - * Monday, August 2, 1999 - * - * Purpose: The public header file for the sec2 driver. + * Purpose: The public header file for the sec2 driver */ #ifndef H5FDsec2_H #define H5FDsec2_H diff --git a/src/H5FDstdio.h b/src/H5FDstdio.h index 6ff5162a670..e2e05a77d64 100644 --- a/src/H5FDstdio.h +++ b/src/H5FDstdio.h @@ -11,10 +11,7 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Robb Matzke - * Monday, August 2, 1999 - * - * Purpose: The public header file for the sec2 driver. + * Purpose: The public header file for the C stdio driver */ #ifndef H5FDstdio_H #define H5FDstdio_H diff --git a/src/H5FDwindows.h b/src/H5FDwindows.h index a43e16fc78d..a491e54d41c 100644 --- a/src/H5FDwindows.h +++ b/src/H5FDwindows.h @@ -11,11 +11,7 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Scott Wegner - * Based on code by Robb Matzke - * Thursday, May 24 2007 - * - * Purpose: The public header file for the windows driver. + * Purpose: The public header file for the Windows driver */ #ifndef H5FDwindows_H #define H5FDwindows_H diff --git a/src/H5FLmodule.h b/src/H5FLmodule.h index 04856038c83..5e9f15733f5 100644 --- a/src/H5FLmodule.h +++ b/src/H5FLmodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5FL package. Including this header means that the source file - * is part of the H5FL package. + * Purpose: This file contains declarations which define macros for the + * H5FL package. Including this header means that the source file + * is part of the H5FL package. */ #ifndef H5FLmodule_H #define H5FLmodule_H diff --git a/src/H5FSmodule.h b/src/H5FSmodule.h index 841b8fde70e..46bf8bf0c27 100644 --- a/src/H5FSmodule.h +++ b/src/H5FSmodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5FS package. Including this header means that the source file - * is part of the H5FS package. + * Purpose: This file contains declarations which define macros for the + * H5FS package. Including this header means that the source file + * is part of the H5FS package. */ #ifndef H5FSmodule_H #define H5FSmodule_H diff --git a/src/H5FSpkg.h b/src/H5FSpkg.h index 987e695e4ed..4ec7aafe6bc 100644 --- a/src/H5FSpkg.h +++ b/src/H5FSpkg.h @@ -11,9 +11,6 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Tuesday, May 2, 2006 - * * Purpose: This file contains declarations which are visible only within * the H5FS package. Source files outside the H5FS package should * include H5FSprivate.h instead. diff --git a/src/H5Fmodule.h b/src/H5Fmodule.h index 2d02e2fd7c7..6812ab306fa 100644 --- a/src/H5Fmodule.h +++ b/src/H5Fmodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5F package. Including this header means that the source file - * is part of the H5F package. + * Purpose: This file contains declarations which define macros for the + * H5F package. Including this header means that the source file + * is part of the H5F package. */ #ifndef H5Fmodule_H #define H5Fmodule_H diff --git a/src/H5Fpkg.h b/src/H5Fpkg.h index 8e003273932..9f7002f9c58 100644 --- a/src/H5Fpkg.h +++ b/src/H5Fpkg.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Thursday, September 28, 2000 - * - * Purpose: This file contains declarations which are visible only within - * the H5F package. Source files outside the H5F package should - * include H5Fprivate.h instead. + * Purpose: This file contains declarations which are visible only within + * the H5F package. Source files outside the H5F package should + * include H5Fprivate.h instead. */ #if !(defined H5F_FRIEND || defined H5F_MODULE) #error "Do not include this file outside the H5F package!" diff --git a/src/H5Gmodule.h b/src/H5Gmodule.h index f09486d650f..bebca878ab4 100644 --- a/src/H5Gmodule.h +++ b/src/H5Gmodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5G package. Including this header means that the source file - * is part of the H5G package. + * Purpose: This file contains declarations which define macros for the + * H5G package. Including this header means that the source file + * is part of the H5G package. */ #ifndef H5Gmodule_H #define H5Gmodule_H diff --git a/src/H5Gpkg.h b/src/H5Gpkg.h index ce85f2782e8..fdc05ec6ccb 100644 --- a/src/H5Gpkg.h +++ b/src/H5Gpkg.h @@ -11,9 +11,6 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Robb Matzke - * Thursday, September 18, 1997 - * * Purpose: This file contains declarations which are visible * only within the H5G package. Source files outside the * H5G package should include H5Gprivate.h instead. diff --git a/src/H5HFmodule.h b/src/H5HFmodule.h index 818c3d4078c..c4fb437d4f1 100644 --- a/src/H5HFmodule.h +++ b/src/H5HFmodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5HF package. Including this header means that the source file - * is part of the H5HF package. + * Purpose: This file contains declarations which define macros for the + * H5HF package. Including this header means that the source file + * is part of the H5HF package. */ #ifndef H5HFmodule_H #define H5HFmodule_H diff --git a/src/H5HFpkg.h b/src/H5HFpkg.h index f1ac41d8d2d..83fda1f1f75 100644 --- a/src/H5HFpkg.h +++ b/src/H5HFpkg.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Friday, February 24, 2006 - * - * Purpose: This file contains declarations which are visible only within - * the H5HF package. Source files outside the H5HF package should - * include H5HFprivate.h instead. + * Purpose: This file contains declarations which are visible only within + * the H5HF package. Source files outside the H5HF package should + * include H5HFprivate.h instead. */ #if !(defined H5HF_FRIEND || defined H5HF_MODULE) #error "Do not include this file outside the H5HF package!" diff --git a/src/H5HGmodule.h b/src/H5HGmodule.h index 412223e02b0..772ebe6163d 100644 --- a/src/H5HGmodule.h +++ b/src/H5HGmodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5HG package. Including this header means that the source file - * is part of the H5HG package. + * Purpose: This file contains declarations which define macros for the + * H5HG package. Including this header means that the source file + * is part of the H5HG package. */ #ifndef H5HGmodule_H #define H5HGmodule_H diff --git a/src/H5HGpkg.h b/src/H5HGpkg.h index 5623e8bc0f4..ab7cd093c0e 100644 --- a/src/H5HGpkg.h +++ b/src/H5HGpkg.h @@ -11,9 +11,6 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Wednesday, July 9, 2003 - * * Purpose: This file contains declarations which are visible * only within the H5HG package. Source files outside the * H5HG package should include H5HGprivate.h instead. diff --git a/src/H5HGprivate.h b/src/H5HGprivate.h index fae3713fafb..892a0c7326a 100644 --- a/src/H5HGprivate.h +++ b/src/H5HGprivate.h @@ -10,10 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * Programmer: Robb Matzke - * Friday, March 27, 1998 - */ #ifndef H5HGprivate_H #define H5HGprivate_H diff --git a/src/H5HLmodule.h b/src/H5HLmodule.h index 5432f1a68b7..1a871abf745 100644 --- a/src/H5HLmodule.h +++ b/src/H5HLmodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5HL package. Including this header means that the source file - * is part of the H5HL package. + * Purpose: This file contains declarations which define macros for the + * H5HL package. Including this header means that the source file + * is part of the H5HL package. */ #ifndef H5HLmodule_H #define H5HLmodule_H diff --git a/src/H5HLpkg.h b/src/H5HLpkg.h index 9471bd9f43e..47c20603f59 100644 --- a/src/H5HLpkg.h +++ b/src/H5HLpkg.h @@ -11,9 +11,6 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Wednesday, July 9, 2003 - * * Purpose: This file contains declarations which are visible * only within the H5HL package. Source files outside the * H5HL package should include H5HLprivate.h instead. diff --git a/src/H5Imodule.h b/src/H5Imodule.h index 9470cc92ca9..e6828506d1e 100644 --- a/src/H5Imodule.h +++ b/src/H5Imodule.h @@ -11,9 +11,6 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * * Purpose: This file contains declarations which define macros for the * H5I package. Including this header means that the source file * is part of the H5I package. diff --git a/src/H5Ipkg.h b/src/H5Ipkg.h index b921ea06255..1009ecb6254 100644 --- a/src/H5Ipkg.h +++ b/src/H5Ipkg.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Thursday, May 15, 2003 - * - * Purpose: This file contains declarations which are visible only within - * the H5I package. Source files outside the H5I package should - * include H5Iprivate.h instead. + * Purpose: This file contains declarations which are visible only within + * the H5I package. Source files outside the H5I package should + * include H5Iprivate.h instead. */ #if !(defined H5I_FRIEND || defined H5I_MODULE) #error "Do not include this file outside the H5I package!" diff --git a/src/H5Lmodule.h b/src/H5Lmodule.h index cbb50608f14..26f9045334e 100644 --- a/src/H5Lmodule.h +++ b/src/H5Lmodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5L package. Including this header means that the source file - * is part of the H5L package. + * Purpose: This file contains declarations which define macros for the + * H5L package. Including this header means that the source file + * is part of the H5L package. */ #ifndef H5Lmodule_H #define H5Lmodule_H diff --git a/src/H5Lpkg.h b/src/H5Lpkg.h index 25d9978df46..53737d1a3aa 100644 --- a/src/H5Lpkg.h +++ b/src/H5Lpkg.h @@ -11,9 +11,6 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: James Laird - * Friday, December 1, 2005 - * * Purpose: This file contains declarations which are visible * only within the H5L package. Source files outside the * H5L package should include H5Lprivate.h instead. diff --git a/src/H5MFmodule.h b/src/H5MFmodule.h index 3e806c68c72..dd32cdc210d 100644 --- a/src/H5MFmodule.h +++ b/src/H5MFmodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5MF package. Including this header means that the source file - * is part of the H5MF package. + * Purpose: This file contains declarations which define macros for the + * H5MF package. Including this header means that the source file + * is part of the H5MF package. */ #ifndef H5MFmodule_H #define H5MFmodule_H diff --git a/src/H5MFpkg.h b/src/H5MFpkg.h index 2711fcf841a..b6796742517 100644 --- a/src/H5MFpkg.h +++ b/src/H5MFpkg.h @@ -11,9 +11,6 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Tuesday, January 8, 2008 - * * Purpose: This file contains declarations which are visible only within * the H5MF package. Source files outside the H5MF package should * include H5MFprivate.h instead. diff --git a/src/H5Omodule.h b/src/H5Omodule.h index deb00bda57a..c3c3496bf32 100644 --- a/src/H5Omodule.h +++ b/src/H5Omodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5O package. Including this header means that the source file - * is part of the H5O package. + * Purpose: This file contains declarations which define macros for the + * H5O package. Including this header means that the source file + * is part of the H5O package. */ #ifndef H5Omodule_H #define H5Omodule_H diff --git a/src/H5Oshared.h b/src/H5Oshared.h index 3280c4befdd..b7d2353aa1c 100644 --- a/src/H5Oshared.h +++ b/src/H5Oshared.h @@ -11,16 +11,13 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Friday, January 19, 2007 - * * Purpose: This file contains inline definitions for "generic" routines - * supporting a "shared message interface" (ala Java) for object - * header messages that can be shared. This interface is - * dependent on a bunch of macros being defined which define - * the name of the interface and "real" methods which need to - * be implemented for each message class that supports the - * shared message interface. + * supporting a "shared message interface" (ala Java) for object + * header messages that can be shared. This interface is + * dependent on a bunch of macros being defined which define + * the name of the interface and "real" methods which need to + * be implemented for each message class that supports the + * shared message interface. */ #ifndef H5Oshared_H @@ -31,16 +28,12 @@ * * Purpose: Decode an object header message that may be shared. * - * Note: The actual name of this routine can be different in each source - * file that this header file is included in, and must be defined - * prior to including this header file. - * - * Return: Success: Pointer to the new message in native form - * Failure: NULL - * - * Programmer: Quincey Koziol - * Friday, January 19, 2007 + * Note: The actual name of this routine can be different in each source + * file that this header file is included in, and must be defined + * prior to including this header file. * + * Return: Success: Pointer to the new message in native form + * Failure: NULL *------------------------------------------------------------------------- */ static inline void * @@ -90,16 +83,11 @@ H5O_SHARED_DECODE(H5F_t *f, H5O_t *open_oh, unsigned mesg_flags, unsigned *iofla * * Purpose: Encode an object header message that may be shared. * - * Note: The actual name of this routine can be different in each source - * file that this header file is included in, and must be defined - * prior to including this header file. - * - * Return: Success: Non-negative - * Failure: Negative - * - * Programmer: Quincey Koziol - * Friday, January 19, 2007 + * Note: The actual name of this routine can be different in each source + * file that this header file is included in, and must be defined + * prior to including this header file. * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static inline herr_t @@ -143,18 +131,14 @@ H5O_SHARED_ENCODE(H5F_t *f, hbool_t disable_shared, uint8_t *p, const void *_mes /*------------------------------------------------------------------------- * Function: H5O_SHARED_SIZE * - * Purpose: Returns the length of an encoded message. + * Purpose: Returns the length of an encoded message. * - * Note: The actual name of this routine can be different in each source - * file that this header file is included in, and must be defined - * prior to including this header file. - * - * Return: Success: Length - * Failure: 0 - * - * Programmer: Quincey Koziol - * Friday, January 19, 2007 + * Note: The actual name of this routine can be different in each source + * file that this header file is included in, and must be defined + * prior to including this header file. * + * Return: Success: Length + * Failure: 0 *------------------------------------------------------------------------- */ static inline size_t @@ -198,16 +182,11 @@ H5O_SHARED_SIZE(const H5F_t *f, hbool_t disable_shared, const void *_mesg) * Purpose: Decrement reference count on any objects referenced by * message * - * Note: The actual name of this routine can be different in each source - * file that this header file is included in, and must be defined - * prior to including this header file. - * - * Return: Success: Non-negative - * Failure: Negative - * - * Programmer: Quincey Koziol - * Friday, January 19, 2007 + * Note: The actual name of this routine can be different in each source + * file that this header file is included in, and must be defined + * prior to including this header file. * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static inline herr_t @@ -249,16 +228,11 @@ H5O_SHARED_DELETE(H5F_t *f, H5O_t *open_oh, void *_mesg) * Purpose: Increment reference count on any objects referenced by * message * - * Note: The actual name of this routine can be different in each source - * file that this header file is included in, and must be defined - * prior to including this header file. - * - * Return: Success: Non-negative - * Failure: Negative - * - * Programmer: Quincey Koziol - * Friday, January 19, 2007 + * Note: The actual name of this routine can be different in each source + * file that this header file is included in, and must be defined + * prior to including this header file. * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static inline herr_t @@ -299,16 +273,11 @@ H5O_SHARED_LINK(H5F_t *f, H5O_t *open_oh, void *_mesg) * * Purpose: Copies a message from _SRC to _DEST in file * - * Note: The actual name of this routine can be different in each source - * file that this header file is included in, and must be defined - * prior to including this header file. - * - * Return: Success: Non-negative - * Failure: Negative - * - * Programmer: Quincey Koziol - * Friday, January 19, 2007 + * Note: The actual name of this routine can be different in each source + * file that this header file is included in, and must be defined + * prior to including this header file. * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static inline void * @@ -366,12 +335,7 @@ H5O_SHARED_COPY_FILE(H5F_t *file_src, void *_native_src, H5F_t *file_dst, hbool_ * file that this header file is included in, and must be defined * prior to including this header file. * - * Return: Success: Non-negative - * Failure: Negative - * - * Programmer: Peter Cao - * May 25, 2007 - * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static inline herr_t @@ -432,16 +396,11 @@ H5O_SHARED_POST_COPY_FILE(const H5O_loc_t H5_ATTR_NDEBUG_UNUSED *oloc_src, const * * Purpose: Prints debugging info for a potentially shared message. * - * Note: The actual name of this routine can be different in each source - * file that this header file is included in, and must be defined - * prior to including this header file. - * - * Return: Success: Non-negative - * Failure: Negative - * - * Programmer: Quincey Koziol - * Saturday, February 3, 2007 + * Note: The actual name of this routine can be different in each source + * file that this header file is included in, and must be defined + * prior to including this header file. * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static inline herr_t diff --git a/src/H5PBmodule.h b/src/H5PBmodule.h index 39db5d971f5..3a353dbd0b2 100644 --- a/src/H5PBmodule.h +++ b/src/H5PBmodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5PB package. Including this header means that the source file - * is part of the H5PB package. + * Purpose: This file contains declarations which define macros for the + * H5PB package. Including this header means that the source file + * is part of the H5PB package. */ #ifndef H5PBmodule_H #define H5PBmodule_H diff --git a/src/H5Pmodule.h b/src/H5Pmodule.h index f2a1e5aa7fe..ea6a5e49c0e 100644 --- a/src/H5Pmodule.h +++ b/src/H5Pmodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5P package. Including this header means that the source file - * is part of the H5P package. + * Purpose: This file contains declarations which define macros for the + * H5P package. Including this header means that the source file + * is part of the H5P package. */ #ifndef H5Pmodule_H #define H5Pmodule_H diff --git a/src/H5Ppkg.h b/src/H5Ppkg.h index 05cc6a8d8a9..9a58d0065ff 100644 --- a/src/H5Ppkg.h +++ b/src/H5Ppkg.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Friday, November 16, 2001 - * - * Purpose: This file contains declarations which are visible only within - * the H5P package. Source files outside the H5P package should - * include H5Pprivate.h instead. + * Purpose: This file contains declarations which are visible only within + * the H5P package. Source files outside the H5P package should + * include H5Pprivate.h instead. */ #if !(defined H5P_FRIEND || defined H5P_MODULE) #error "Do not include this file outside the H5P package!" diff --git a/src/H5RSmodule.h b/src/H5RSmodule.h index 8cd033311a0..ee6b7e80e51 100644 --- a/src/H5RSmodule.h +++ b/src/H5RSmodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, October 10, 2020 - * * Purpose: This file contains declarations which define macros for the - * H5RS package. Including this header means that the source file - * is part of the H5RS package. + * H5RS package. Including this header means that the source file + * is part of the H5RS package. */ #ifndef H5RSmodule_H #define H5RSmodule_H diff --git a/src/H5SLmodule.h b/src/H5SLmodule.h index 606fa906fea..b0b3064f15b 100644 --- a/src/H5SLmodule.h +++ b/src/H5SLmodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5SL package. Including this header means that the source file - * is part of the H5SL package. + * Purpose: This file contains declarations which define macros for the + * H5SL package. Including this header means that the source file + * is part of the H5SL package. */ #ifndef H5SLmodule_H #define H5SLmodule_H diff --git a/src/H5SMmodule.h b/src/H5SMmodule.h index 6308e145cb2..1eaeea29e99 100644 --- a/src/H5SMmodule.h +++ b/src/H5SMmodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5SM package. Including this header means that the source file - * is part of the H5SM package. + * Purpose: This file contains declarations which define macros for the + * H5SM package. Including this header means that the source file + * is part of the H5SM package. */ #ifndef H5SMmodule_H #define H5SMmodule_H diff --git a/src/H5SMpkg.h b/src/H5SMpkg.h index 6e446015b7c..64f5c76937f 100644 --- a/src/H5SMpkg.h +++ b/src/H5SMpkg.h @@ -11,9 +11,6 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: James Laird - * Thursday, March 30, 2006 - * * Purpose: This file contains declarations which are visible only within * the H5SM shared object header messages package. Source files * outside the H5SM package should include H5SMprivate.h instead. diff --git a/src/H5SMprivate.h b/src/H5SMprivate.h index 95f831edff2..f6496f3988b 100644 --- a/src/H5SMprivate.h +++ b/src/H5SMprivate.h @@ -11,11 +11,8 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: James Laird - * Thursday, March 2, 2006 - * - * Purpose: This file contains private declarations for the H5SM - * shared object header messages module. + * Purpose: This file contains private declarations for the H5SM + * shared object header messages module. */ #ifndef H5SMprivate_H #define H5SMprivate_H diff --git a/src/H5Smodule.h b/src/H5Smodule.h index 73f5953e4d9..c5081c781d2 100644 --- a/src/H5Smodule.h +++ b/src/H5Smodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5S package. Including this header means that the source file - * is part of the H5S package. + * Purpose: This file contains declarations which define macros for the + * H5S package. Including this header means that the source file + * is part of the H5S package. */ #ifndef H5Smodule_H #define H5Smodule_H diff --git a/src/H5Spkg.h b/src/H5Spkg.h index 8cc00be0b78..1163484a73d 100644 --- a/src/H5Spkg.h +++ b/src/H5Spkg.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Thursday, September 28, 2000 - * - * Purpose: This file contains declarations which are visible only within - * the H5S package. Source files outside the H5S package should - * include H5Sprivate.h instead. + * Purpose: This file contains declarations which are visible only within + * the H5S package. Source files outside the H5S package should + * include H5Sprivate.h instead. */ #if !(defined H5S_FRIEND || defined H5S_MODULE) #error "Do not include this file outside the H5S package!" diff --git a/src/H5Tmodule.h b/src/H5Tmodule.h index bc521f1cf21..c059d6e5c4c 100644 --- a/src/H5Tmodule.h +++ b/src/H5Tmodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5T package. Including this header means that the source file - * is part of the H5T package. + * Purpose: This file contains declarations which define macros for the + * H5T package. Including this header means that the source file + * is part of the H5T package. */ #ifndef H5Tmodule_H #define H5Tmodule_H diff --git a/src/H5Tpkg.h b/src/H5Tpkg.h index 98288f3e153..504f756adde 100644 --- a/src/H5Tpkg.h +++ b/src/H5Tpkg.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Robb Matzke - * Monday, December 8, 1997 - * - * Purpose: This file contains declarations which are visible only within - * the H5T package. Source files outside the H5T package should - * include H5Tprivate.h instead. + * Purpose: This file contains declarations which are visible only within + * the H5T package. Source files outside the H5T package should + * include H5Tprivate.h instead. */ #if !(defined H5T_FRIEND || defined H5T_MODULE) #error "Do not include this file outside the H5T package!" diff --git a/src/H5VMprivate.h b/src/H5VMprivate.h index 1f94f0e70ad..49bb53d18d9 100644 --- a/src/H5VMprivate.h +++ b/src/H5VMprivate.h @@ -10,10 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * Programmer: Robb Matzke - * Friday, October 10, 1997 - */ #ifndef H5VMprivate_H #define H5VMprivate_H @@ -133,13 +129,8 @@ H5_DLL ssize_t H5VM_memcpyvv(void *_dst, size_t dst_max_nseq, size_t *dst_curr_s * only as an optimization and the naming (with a single underscore) * reflects its inclusion in a "private" header file. * - * Return: Success: Product of elements - * - * Failure: 1 if N is zero - * - * Programmer: Robb Matzke - * Friday, October 10, 1997 - * + * Return: Success: Product of elements + * Failure: 1 if N is zero *------------------------------------------------------------------------- */ static inline hsize_t H5_ATTR_UNUSED @@ -168,14 +159,8 @@ H5VM_vector_reduce_product(unsigned n, const hsize_t *v) * only as an optimization and the naming (with a single underscore) * reflects its inclusion in a "private" header file. * - * Return: Success: TRUE if all elements are zero, - * FALSE otherwise - * - * Failure: TRUE if N is zero - * - * Programmer: Robb Matzke - * Friday, October 10, 1997 - * + * Return: Success: TRUE if all elements are zero, + * Failure: TRUE if N is zero *------------------------------------------------------------------------- */ static inline htri_t H5_ATTR_UNUSED @@ -205,14 +190,9 @@ H5VM_vector_zerop_u(int n, const hsize_t *v) * only as an optimization and the naming (with a single underscore) * reflects its inclusion in a "private" header file. * - * Return: Success: TRUE if all elements are zero, - * FALSE otherwise - * - * Failure: TRUE if N is zero - * - * Programmer: Robb Matzke - * Friday, October 10, 1997 - * + * Return: Success: TRUE if all elements are zero, + * FALSE otherwise + * Failure: TRUE if N is zero *------------------------------------------------------------------------- */ static inline htri_t H5_ATTR_UNUSED @@ -243,15 +223,11 @@ H5VM_vector_zerop_s(int n, const hssize_t *v) * only as an optimization and the naming (with a single underscore) * reflects its inclusion in a "private" header file. * - * Return: Success: -1 if V1 is less than V2 - * 0 if they are equal - * 1 if V1 is greater than V2 - * - * Failure: 0 if N is zero - * - * Programmer: Robb Matzke - * Friday, October 10, 1997 + * Return: Success: -1 if V1 is less than V2 + * 0 if they are equal + * 1 if V1 is greater than V2 * + * Failure: 0 if N is zero *------------------------------------------------------------------------- */ static inline int H5_ATTR_UNUSED @@ -291,15 +267,11 @@ H5VM_vector_cmp_u(unsigned n, const hsize_t *v1, const hsize_t *v2) * only as an optimization and the naming (with a single underscore) * reflects its inclusion in a "private" header file. * - * Return: Success: -1 if V1 is less than V2 - * 0 if they are equal - * 1 if V1 is greater than V2 - * - * Failure: 0 if N is zero - * - * Programmer: Robb Matzke - * Wednesday, April 8, 1998 + * Return: Success: -1 if V1 is less than V2 + * 0 if they are equal + * 1 if V1 is greater than V2 * + * Failure: 0 if N is zero *------------------------------------------------------------------------- */ static inline int H5_ATTR_UNUSED @@ -339,10 +311,6 @@ H5VM_vector_cmp_s(unsigned n, const hssize_t *v1, const hssize_t *v2) * reflects its inclusion in a "private" header file. * * Return: void - * - * Programmer: Robb Matzke - * Monday, October 13, 1997 - * *------------------------------------------------------------------------- */ static inline void H5_ATTR_UNUSED @@ -383,10 +351,6 @@ static const unsigned char LogTable256[] = { * reflects its inclusion in a "private" header file. * * Return: log2(n) (always - no failure condition) - * - * Programmer: Quincey Koziol - * Monday, March 6, 2006 - * *------------------------------------------------------------------------- */ static inline unsigned H5_ATTR_UNUSED @@ -432,10 +396,6 @@ static const unsigned MultiplyDeBruijnBitPosition[32] = {0, 1, 28, 2, 29, 14, * reflects its inclusion in a "private" header file. * * Return: log2(n) (always - no failure condition) - * - * Programmer: Quincey Koziol - * Monday, February 27, 2006 - * *------------------------------------------------------------------------- */ static inline H5_ATTR_PURE unsigned @@ -450,16 +410,13 @@ H5VM_log2_of2(uint32_t n) /*------------------------------------------------------------------------- * Function: H5VM_power2up * - * Purpose: Round up a number to the next power of 2 + * Purpose: Round up a number to the next power of 2 * * Note: Although this routine is 'static' in this file, that's intended * only as an optimization and the naming (with a single underscore) * reflects its inclusion in a "private" header file. * - * Return: Return the number which is a power of 2 - * - * Programmer: Vailin Choi; Nov 2014 - * + * Return: Return the number which is a power of 2 *------------------------------------------------------------------------- */ static inline H5_ATTR_CONST hsize_t @@ -488,10 +445,6 @@ H5VM_power2up(hsize_t n) * reflects its inclusion in a "private" header file. * * Return: Number of bytes needed - * - * Programmer: Quincey Koziol - * Thursday, March 13, 2008 - * *------------------------------------------------------------------------- */ static inline unsigned H5_ATTR_UNUSED @@ -520,10 +473,6 @@ static const unsigned char H5VM_bit_clear_g[8] = {0x7F, 0xBF, 0xDF, 0xEF, 0xF7, * reflects its inclusion in a "private" header file. * * Return: TRUE/FALSE - * - * Programmer: Quincey Koziol - * Tuesday, November 25, 2008 - * *------------------------------------------------------------------------- */ static inline hbool_t H5_ATTR_UNUSED @@ -549,11 +498,7 @@ H5VM_bit_get(const unsigned char *buf, size_t offset) * only as an optimization and the naming (with a single underscore) * reflects its inclusion in a "private" header file. * - * Return: None - * - * Programmer: Quincey Koziol - * Tuesday, November 25, 2008 - * + * Return: void *------------------------------------------------------------------------- */ static inline void H5_ATTR_UNUSED diff --git a/src/H5Zmodule.h b/src/H5Zmodule.h index 57e08c1dd50..8b1a0dedc4e 100644 --- a/src/H5Zmodule.h +++ b/src/H5Zmodule.h @@ -11,12 +11,9 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Quincey Koziol - * Saturday, September 12, 2015 - * - * Purpose: This file contains declarations which define macros for the - * H5Z package. Including this header means that the source file - * is part of the H5Z package. + * Purpose: This file contains declarations which define macros for the + * H5Z package. Including this header means that the source file + * is part of the H5Z package. */ #ifndef H5Zmodule_H #define H5Zmodule_H diff --git a/src/H5Zprivate.h b/src/H5Zprivate.h index 7b70022e1b8..166273805b6 100644 --- a/src/H5Zprivate.h +++ b/src/H5Zprivate.h @@ -10,10 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* Programmer: Robb Matzke - * Thursday, April 16, 1998 - */ - #ifndef H5Zprivate_H #define H5Zprivate_H diff --git a/src/H5Zpublic.h b/src/H5Zpublic.h index a63729e5745..8a059870c99 100644 --- a/src/H5Zpublic.h +++ b/src/H5Zpublic.h @@ -10,10 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* Programmer: Robb Matzke - * Thursday, April 16, 1998 - */ - #ifndef H5Zpublic_H #define H5Zpublic_H diff --git a/src/H5private.h b/src/H5private.h index eb7d8e0a810..70aed8f76e1 100644 --- a/src/H5private.h +++ b/src/H5private.h @@ -10,14 +10,11 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* Programmer: Robb Matzke - * Friday, October 30, 1998 - * - * Purpose: This file is included by all HDF5 library source files to - * define common things which are not defined in the HDF5 API. - * The configuration constants like H5_HAVE_UNISTD_H etc. are - * defined in H5config.h which is included by H5public.h. - * +/* + * Purpose: This file is included by all HDF5 library source files to + * define common things which are not defined in the HDF5 API. + * The configuration constants like H5_HAVE_UNISTD_H etc. are + * defined in H5config.h which is included by H5public.h. */ #ifndef H5private_H @@ -1866,9 +1863,6 @@ H5_DLL herr_t H5_trace_args(struct H5RS_str_t *rs, const char *type, va_list ap) * use initializers that require special cleanup code to * execute if FUNC_ENTER() fails since a failing FUNC_ENTER() * returns immediately without branching to the `done' label. - * - * Programmer: Quincey Koziol - * *------------------------------------------------------------------------- */ @@ -2294,11 +2288,8 @@ H5_DLL herr_t H5CX_pop(hbool_t update_dxpl_props); FUNC_ENTER_COMMON_NOERR(H5_IS_PKG(__func__)); /*------------------------------------------------------------------------- - * Purpose: Register function exit for code profiling. This should be - * the last statement executed by a function. - * - * Programmer: Quincey Koziol - * + * Purpose: Register function exit for code profiling. This should be + * the last statement executed by a function. *------------------------------------------------------------------------- */ /* Threadsafety termination code for API routines */ From 2b6c1bc41293000cece28dc6b18a73b54cce2d0c Mon Sep 17 00:00:00 2001 From: raylu-hdf <60487644+raylu-hdf@users.noreply.github.com> Date: Tue, 18 Apr 2023 10:19:45 -0500 Subject: [PATCH 145/231] Jira issue OESS-337: Create test for H5VLconnector_info_to_str (#2334) * Jira issue OESS-337: Adding a test case for H5VLconnector_info_to_str, H5VLconnector_str_to_info, and H5VLconnector_free_info. The test may need to change after the possible changes of the parameters of the API functions. --- test/vol.c | 158 +++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 152 insertions(+), 6 deletions(-) diff --git a/test/vol.c b/test/vol.c index 776248b96a9..27ffdcde1c6 100644 --- a/test/vol.c +++ b/test/vol.c @@ -174,6 +174,9 @@ static const H5VL_class_t reg_opt_vol_g = { }; static herr_t fake_get_cap_flags(const void *info, uint64_t *cap_flags); +static herr_t fake_vol_info_to_str(const void *info, char **str); +static herr_t fake_vol_str_to_info(const char *str, void **info); +static herr_t fake_vol_free_info(void *info); #define FAKE_VOL_NAME "fake" #define FAKE_VOL_VALUE ((H5VL_class_value_t)501) @@ -193,12 +196,12 @@ static const H5VL_class_t fake_vol_g = { NULL, /* terminate */ { /* info_cls */ - (size_t)0, /* size */ - NULL, /* copy */ - NULL, /* compare */ - NULL, /* free */ - NULL, /* to_str */ - NULL, /* from_str */ + (size_t)0, /* size */ + NULL, /* copy */ + NULL, /* compare */ + fake_vol_free_info, /* free */ + fake_vol_info_to_str, /* to_str */ + fake_vol_str_to_info, /* from_str */ }, { /* wrap_cls */ @@ -558,6 +561,77 @@ reg_opt_datatype_get(void H5_ATTR_UNUSED *obj, H5VL_datatype_get_args_t *args, h return ret_value; } /* end reg_opt_datatype_get() */ +/*------------------------------------------------------------------------- + * Function: fake_vol_info_to_str + * + * Purpose: Convert the fake VOL info to a string + * + * Return: SUCCEED/FAIL + * + *------------------------------------------------------------------------- + */ +static herr_t +fake_vol_info_to_str(const void *info, char **str) +{ + herr_t ret_value = SUCCEED; /* Return value */ + const int val = *(const int *)info; + const int str_size = 16; /* The size of the string */ + + /* Verify the info is correct before continuing */ + if (val != INT_MAX) { + HDprintf("The value of info (%d) is incorrect\n", val); + return FAIL; + } + + /* Allocate the string long enough for the info */ + *str = (char *)malloc(str_size); + + HDsnprintf(*str, str_size, "%d", *((const int *)info)); + + return ret_value; +} /* end fake_vol_info_to_str() */ + +/*------------------------------------------------------------------------- + * Function: fake_vol_str_to_info + * + * Purpose: Convert a string to a VOL info + * + * Return: SUCCEED/FAIL + * + *------------------------------------------------------------------------- + */ +static herr_t +fake_vol_str_to_info(const char *str, void **info /*out*/) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + *((int **)info) = (int *)malloc(sizeof(int)); + + **((int **)info) = atoi(str); + + return ret_value; +} /* end fake_vol_str_to_info() */ + +/*------------------------------------------------------------------------- + * Function: fake_vol_free_info + * + * Purpose: Free the memory of a VOL info + * + * Return: SUCCEED/FAIL + * + *------------------------------------------------------------------------- + */ +static herr_t +fake_vol_free_info(void *info) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + if (info) + HDfree(info); + + return ret_value; +} /* end fake_vol_free_info() */ + /*------------------------------------------------------------------------- * Function: fake_get_cap_flags * @@ -2362,6 +2436,77 @@ test_wrap_register(void) return FAIL; } /* end test_wrap_register() */ +/*------------------------------------------------------------------------- + * Function: test_info_to_str() + * + * Purpose: Tests the conversion between a VOL info and a string + * + * Return: SUCCEED/FAIL + * + *------------------------------------------------------------------------- + */ +static herr_t +test_info_to_str(void) +{ + hid_t fapl_id = H5I_INVALID_HID; + hid_t vol_id = H5I_INVALID_HID; + int info = INT_MAX; + char *ret_str = NULL; + int *ret_info = NULL; + + TESTING("conversion between a VOL info and a string"); + + /* Register a fake VOL */ + if ((vol_id = H5VLregister_connector(&fake_vol_g, H5P_DEFAULT)) < 0) + TEST_ERROR; + + if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) + TEST_ERROR; + + if (H5Pset_vol(fapl_id, vol_id, NULL) < 0) + TEST_ERROR; + + /* Serialize the VOL info into a string */ + if (H5VLconnector_info_to_str(&info, vol_id, &ret_str) < 0) + TEST_ERROR; + + /* Parse the string and construct it into a VOL info */ + if (H5VLconnector_str_to_info(ret_str, vol_id, (void **)(&ret_info)) < 0) + TEST_ERROR; + + if (*ret_info != info) + FAIL_PUTS_ERROR("the returned VOL info doesn't match the original info"); + + /* Free the VOL info being returned */ + if (H5VLfree_connector_info(vol_id, ret_info) < 0) + TEST_ERROR; + + /* Free the string being returned */ + if (ret_str) + HDfree(ret_str); + + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + + /* Unregister the fake VOL ID */ + if (H5VLunregister_connector(vol_id) < 0) + TEST_ERROR; + + PASSED(); + + return SUCCEED; + +error: + H5E_BEGIN_TRY + { + H5VLunregister_connector(vol_id); + H5Pclose(fapl_id); + } + H5E_END_TRY; + + return FAIL; +} /* end test_info_to_str() */ + /*------------------------------------------------------------------------- * Function: test_query_optional * @@ -2479,6 +2624,7 @@ main(void) nerrors += test_vol_cap_flags() < 0 ? 1 : 0; nerrors += test_get_vol_name() < 0 ? 1 : 0; nerrors += test_wrap_register() < 0 ? 1 : 0; + nerrors += test_info_to_str() < 0 ? 1 : 0; nerrors += test_query_optional() < 0 ? 1 : 0; if (nerrors) { From 31451b96c0aa5d5eda6fb4285772aa689894b329 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Tue, 18 Apr 2023 13:21:18 -0500 Subject: [PATCH 146/231] Add no subsets option to h5diff like h5dump #2688 (#2756) --- release_docs/RELEASE.txt | 7 + tools/lib/h5diff.h | 45 +++--- tools/lib/h5tools_utils.c | 122 ++++++++++++++++ tools/lib/h5tools_utils.h | 3 + tools/src/h5diff/h5diff_common.c | 157 +++------------------ tools/src/h5dump/h5dump.c | 117 +-------------- tools/test/h5diff/testfiles/h5diff_10.txt | 2 + tools/test/h5diff/testfiles/h5diff_600.txt | 2 + tools/test/h5diff/testfiles/h5diff_603.txt | 2 + tools/test/h5diff/testfiles/h5diff_606.txt | 2 + tools/test/h5diff/testfiles/h5diff_612.txt | 2 + tools/test/h5diff/testfiles/h5diff_615.txt | 2 + tools/test/h5diff/testfiles/h5diff_621.txt | 2 + tools/test/h5diff/testfiles/h5diff_622.txt | 2 + tools/test/h5diff/testfiles/h5diff_623.txt | 2 + tools/test/h5diff/testfiles/h5diff_624.txt | 2 + 16 files changed, 203 insertions(+), 268 deletions(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 1036d48d980..62ac8f2e784 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -346,6 +346,13 @@ Bug Fixes since HDF5-1.14.0 release Tools ----- + - Names of objects with square brackets will have trouble without the + special argument, --no-compact-subset, on the h5dump command line. + + h5diff did not have this option and now it has been added. + + Fixes GitHub issue #2682 + - In the tools traverse function - an error in either visit call will bypass the cleanup of the local data variables. diff --git a/tools/lib/h5diff.h b/tools/lib/h5diff.h index 917ea6af17e..d16c8551733 100644 --- a/tools/lib/h5diff.h +++ b/tools/lib/h5diff.h @@ -51,28 +51,29 @@ typedef struct { int mode_quiet; /* quiet mode: no output at all */ int mode_report; /* report mode: print the data */ int mode_verbose; /* verbose mode: print the data, list of objcets, warnings */ - int mode_verbose_level; /* control verbose details */ - int mode_list_not_cmp; /* list not comparable messages */ - int print_header; /* print header */ - int print_percentage; /* print percentage */ - int print_dims; /* print dimension index */ - int delta_bool; /* delta, absolute value to compare */ - double delta; /* delta value */ - int use_system_epsilon; /* flag to use system epsilon (1 or 0) */ - int percent_bool; /* relative error to compare*/ - double percent; /* relative error value */ - hbool_t follow_links; /* follow symbolic links */ - int no_dangle_links; /* return error when find dangling link */ - int cmn_objs; /* do we have common objects */ - int not_cmp; /* are the objects comparable */ - int contents; /* equal contents */ - int do_nans; /* consider Nans while diffing floats */ - int exclude_path; /* exclude path to an object */ - int exclude_attr_path; /* exclude path to an object */ - struct exclude_path_list *exclude; /* keep exclude path list */ - struct exclude_path_list *exclude_attr; /* keep exclude attribute list */ - int count_bool; /* count, compare up to count */ - hsize_t count; /* count value */ + int mode_verbose_level; /* control verbose details */ + int mode_list_not_cmp; /* list not comparable messages */ + int print_header; /* print header */ + int print_percentage; /* print percentage */ + int print_dims; /* print dimension index */ + int delta_bool; /* delta, absolute value to compare */ + double delta; /* delta value */ + int use_system_epsilon; /* flag to use system epsilon (1 or 0) */ + int percent_bool; /* relative error to compare*/ + double percent; /* relative error value */ + hbool_t follow_links; /* follow symbolic links */ + int no_dangle_links; /* return error when find dangling link */ + int cmn_objs; /* do we have common objects */ + int not_cmp; /* are the objects comparable */ + int contents; /* equal contents */ + int do_nans; /* consider Nans while diffing floats */ + int disable_compact_subset; /* disable compact form of subset notation */ + int exclude_path; /* exclude path to an object */ + int exclude_attr_path; /* exclude path to an object */ + struct exclude_path_list *exclude; /* keep exclude path list */ + struct exclude_path_list *exclude_attr; /* keep exclude attribute list */ + int count_bool; /* count, compare up to count */ + hsize_t count; /* count value */ diff_err_t err_stat; /* an error occurred (2, error, 1, differences, 0, no error) */ hsize_t nelmts; /* total number of elements */ hsize_t hs_nelmts; /* number of elements to read at a time*/ diff --git a/tools/lib/h5tools_utils.c b/tools/lib/h5tools_utils.c index c8b0644b7f8..8a9d8060441 100644 --- a/tools/lib/h5tools_utils.c +++ b/tools/lib/h5tools_utils.c @@ -157,6 +157,128 @@ help_ref_msg(FILE *output) HDfprintf(output, "see the <%s> entry in the 'HDF5 Reference Manual'.\n", h5tools_getprogname()); } +/*------------------------------------------------------------------------- + * Function: parse_hsize_list + * + * Purpose: Parse a list of comma or space separated integers and return + * them in a list. The string being passed into this function + * should be at the start of the list you want to parse. You are + * responsible for freeing the array returned from here. + * + * Lists in the so-called "terse" syntax are separated by + * semicolons (;). The lists themselves can be separated by + * either commas (,) or white spaces. + * + * Return: + *------------------------------------------------------------------------- + */ +void +parse_hsize_list(const char *h_list, subset_d *d) +{ + hsize_t *p_list; + const char *ptr; + unsigned int size_count = 0; + unsigned int i = 0; + unsigned int last_digit = 0; + + if (!h_list || !*h_list || *h_list == ';') + return; + + H5TOOLS_START_DEBUG(" - h_list:%s", h_list); + /* count how many integers do we have */ + for (ptr = h_list; ptr && *ptr && *ptr != ';' && *ptr != ']'; ptr++) + if (HDisdigit(*ptr)) { + if (!last_digit) + /* the last read character wasn't a digit */ + size_count++; + + last_digit = 1; + } + else + last_digit = 0; + + if (size_count == 0) { + /* there aren't any integers to read */ + H5TOOLS_ENDDEBUG("No integers to read"); + return; + } + H5TOOLS_DEBUG("Number integers to read=%ld", size_count); + + /* allocate an array for the integers in the list */ + if ((p_list = (hsize_t *)HDcalloc(size_count, sizeof(hsize_t))) == NULL) + H5TOOLS_INFO("Unable to allocate space for subset data"); + + for (ptr = h_list; i < size_count && ptr && *ptr && *ptr != ';' && *ptr != ']'; ptr++) + if (HDisdigit(*ptr)) { + /* we should have an integer now */ + p_list[i++] = (hsize_t)HDstrtoull(ptr, NULL, 0); + + while (HDisdigit(*ptr)) + /* scroll to end of integer */ + ptr++; + } + d->data = p_list; + d->len = size_count; + H5TOOLS_ENDDEBUG(" "); +} + +/*------------------------------------------------------------------------- + * Function: parse_subset_params + * + * Purpose: Parse the so-called "terse" syntax for specifying subsetting parameters. + * + * Return: Success: struct subset_t object + * Failure: NULL + *------------------------------------------------------------------------- + */ +struct subset_t * +parse_subset_params(const char *dset) +{ + struct subset_t *s = NULL; + char *brace; + const char *q_dset; + + H5TOOLS_START_DEBUG(" - dset:%s", dset); + /* if dset name is quoted wait till after second quote to look for subset brackets */ + if (*dset == '"') + q_dset = HDstrchr(dset, '"'); + else + q_dset = dset; + if ((brace = HDstrrchr(q_dset, '[')) != NULL) { + *brace++ = '\0'; + + s = (struct subset_t *)HDcalloc(1, sizeof(struct subset_t)); + parse_hsize_list(brace, &s->start); + + while (*brace && *brace != ';') + brace++; + + if (*brace) + brace++; + + parse_hsize_list(brace, &s->stride); + + while (*brace && *brace != ';') + brace++; + + if (*brace) + brace++; + + parse_hsize_list(brace, &s->count); + + while (*brace && *brace != ';') + brace++; + + if (*brace) + brace++; + + parse_hsize_list(brace, &s->block); + } + H5TOOLS_ENDDEBUG(" "); + + return s; +} + /***************************************************************************** * * Function: parse_tuple() diff --git a/tools/lib/h5tools_utils.h b/tools/lib/h5tools_utils.h index f28456e374e..a4fde5c8e2d 100644 --- a/tools/lib/h5tools_utils.h +++ b/tools/lib/h5tools_utils.h @@ -67,9 +67,12 @@ typedef struct find_objs_t { H5TOOLS_DLLVAR unsigned h5tools_nCols; /*max number of columns for outputting */ /* Definitions of useful routines */ +H5TOOLS_DLL struct subset_t *parse_subset_params(const char *dset); + H5TOOLS_DLL void indentation(unsigned); H5TOOLS_DLL void print_version(const char *progname); H5TOOLS_DLL void parallel_print(const char *format, ...) H5_ATTR_FORMAT(printf, 1, 2); +H5TOOLS_DLL void parse_hsize_list(const char *h_list, subset_d *d); H5TOOLS_DLL herr_t parse_tuple(const char *start, int sep, char **cpy_out, unsigned *nelems, char ***ptrs_out); H5TOOLS_DLL void error_msg(const char *fmt, ...) H5_ATTR_FORMAT(printf, 1, 2); diff --git a/tools/src/h5diff/h5diff_common.c b/tools/src/h5diff/h5diff_common.c index 96ab70087a3..a2fe5d9b80e 100644 --- a/tools/src/h5diff/h5diff_common.c +++ b/tools/src/h5diff/h5diff_common.c @@ -24,23 +24,24 @@ static int check_d_input(const char *); * Command-line options: The user can specify short or long-named * parameters. */ -static const char *s_opts = "hVrv*qn:d:p:NcelxE:A:S*"; -static struct h5_long_options l_opts[] = {{"help", no_arg, 'h'}, - {"version", no_arg, 'V'}, - {"report", no_arg, 'r'}, - {"verbose", optional_arg, 'v'}, - {"quiet", no_arg, 'q'}, - {"count", require_arg, 'n'}, +static const char *s_opts = "cd:ehln:p:qrv*xA:CE:NS*V"; +static struct h5_long_options l_opts[] = {{"compare", no_arg, 'c'}, {"delta", require_arg, 'd'}, - {"relative", require_arg, 'p'}, - {"nan", no_arg, 'N'}, - {"compare", no_arg, 'c'}, {"use-system-epsilon", no_arg, 'e'}, + {"help", no_arg, 'h'}, {"follow-symlinks", no_arg, 'l'}, + {"count", require_arg, 'n'}, + {"relative", require_arg, 'p'}, + {"quiet", no_arg, 'q'}, + {"report", no_arg, 'r'}, + {"verbose", optional_arg, 'v'}, {"no-dangling-links", no_arg, 'x'}, - {"exclude-path", require_arg, 'E'}, {"exclude-attribute", require_arg, 'A'}, + {"no-compact-subset", no_arg, 'C'}, + {"exclude-path", require_arg, 'E'}, + {"nan", no_arg, 'N'}, {"enable-error-stack", optional_arg, 'S'}, + {"version", no_arg, 'V'}, {"vol-value-1", require_arg, '1'}, {"vol-name-1", require_arg, '2'}, {"vol-info-1", require_arg, '3'}, @@ -103,122 +104,6 @@ check_options(diff_opt_t *opts) } } -/*------------------------------------------------------------------------- - * Function: parse_hsize_list - * - * Purpose: Parse a list of comma or space separated integers and return - * them in a list. The string being passed into this function - * should be at the start of the list you want to parse. You are - * responsible for freeing the array returned from here. - * - * Lists in the so-called "terse" syntax are separated by - * semicolons (;). The lists themselves can be separated by - * either commas (,) or white spaces. - * - * Return: - *------------------------------------------------------------------------- - */ -static void -parse_hsize_list(const char *h_list, subset_d *d) -{ - hsize_t *p_list; - const char *ptr; - unsigned int size_count = 0; - unsigned int i = 0; - unsigned int last_digit = 0; - - if (!h_list || !*h_list || *h_list == ';') - return; - - H5TOOLS_START_DEBUG(" - h_list:%s", h_list); - /* count how many integers do we have */ - for (ptr = h_list; ptr && *ptr && *ptr != ';' && *ptr != ']'; ptr++) - if (HDisdigit(*ptr)) { - if (!last_digit) - /* the last read character wasn't a digit */ - size_count++; - - last_digit = 1; - } - else - last_digit = 0; - - if (size_count == 0) { - /* there aren't any integers to read */ - H5TOOLS_ENDDEBUG("No integers to read"); - return; - } - H5TOOLS_DEBUG("Number integers to read=%ld", size_count); - - /* allocate an array for the integers in the list */ - if ((p_list = (hsize_t *)HDcalloc(size_count, sizeof(hsize_t))) == NULL) - H5TOOLS_INFO("Unable to allocate space for subset data"); - - for (ptr = h_list; i < size_count && ptr && *ptr && *ptr != ';' && *ptr != ']'; ptr++) - if (HDisdigit(*ptr)) { - /* we should have an integer now */ - p_list[i++] = (hsize_t)HDstrtoull(ptr, NULL, 0); - - while (HDisdigit(*ptr)) - /* scroll to end of integer */ - ptr++; - } - d->data = p_list; - d->len = size_count; - H5TOOLS_ENDDEBUG(" "); -} - -/*------------------------------------------------------------------------- - * Function: parse_subset_params - * - * Purpose: Parse the so-called "terse" syntax for specifying subsetting parameters. - * - * Return: Success: struct subset_t object - * Failure: NULL - *------------------------------------------------------------------------- - */ -static struct subset_t * -parse_subset_params(const char *dset) -{ - struct subset_t *s = NULL; - char *brace; - - H5TOOLS_START_DEBUG(" - dset:%s", dset); - if ((brace = HDstrrchr(dset, '[')) != NULL) { - *brace++ = '\0'; - - s = (struct subset_t *)HDcalloc(1, sizeof(struct subset_t)); - parse_hsize_list(brace, &s->start); - - while (*brace && *brace != ';') - brace++; - - if (*brace) - brace++; - - parse_hsize_list(brace, &s->stride); - - while (*brace && *brace != ';') - brace++; - - if (*brace) - brace++; - - parse_hsize_list(brace, &s->count); - - while (*brace && *brace != ';') - brace++; - - if (*brace) - brace++; - - parse_hsize_list(brace, &s->block); - } - H5TOOLS_ENDDEBUG(" "); - - return s; -} - /*------------------------------------------------------------------------- * Function: parse_command_line * @@ -355,6 +240,10 @@ parse_command_line(int argc, const char *const *argv, const char **fname1, const } break; + case 'C': + opts->disable_compact_subset = TRUE; + break; + case 'A': opts->exclude_attr_path = 1; @@ -570,13 +459,10 @@ parse_command_line(int argc, const char *const *argv, const char **fname1, const } H5TOOLS_DEBUG("objname2 = %s", *objname2); - /* - * TRILABS_227 is complete except for an issue with printing indices - * the following calls will enable subsetting - */ - opts->sset[0] = parse_subset_params(*objname1); - - opts->sset[1] = parse_subset_params(*objname2); + if (!opts->disable_compact_subset) { + opts->sset[0] = parse_subset_params(*objname1); + opts->sset[1] = parse_subset_params(*objname2); + } H5TOOLS_ENDDEBUG(" "); } @@ -935,6 +821,9 @@ usage(void) * the following will be needed for subsetting */ PRINTVALSTREAM(rawoutstream, " Subsetting options:\n"); + PRINTVALSTREAM(rawoutstream, + " --no-compact-subset Disable compact form of subsetting and allow the use\n"); + PRINTVALSTREAM(rawoutstream, " of \"[\" in dataset names.\n"); PRINTVALSTREAM(rawoutstream, " Subsetting is available by using the fcompact form of subsetting, as follows:\n"); PRINTVALSTREAM(rawoutstream, " obj1 /foo/mydataset[START;STRIDE;COUNT;BLOCK]\n"); diff --git a/tools/src/h5dump/h5dump.c b/tools/src/h5dump/h5dump.c index 1c221243f69..6c1556ab9a6 100644 --- a/tools/src/h5dump/h5dump.c +++ b/tools/src/h5dump/h5dump.c @@ -564,114 +564,6 @@ set_sort_order(const char *form) return iter_order; } -/*------------------------------------------------------------------------- - * Function: parse_hsize_list - * - * Purpose: Parse a list of comma or space separated integers and return - * them in a list. The string being passed into this function - * should be at the start of the list you want to parse. You are - * responsible for freeing the array returned from here. - * - * Lists in the so-called "terse" syntax are separated by - * semicolons (;). The lists themselves can be separated by - * either commas (,) or white spaces. - * - * Return: - *------------------------------------------------------------------------- - */ -static void -parse_hsize_list(const char *h_list, subset_d *d) -{ - hsize_t *p_list; - const char *ptr; - unsigned int size_count = 0; - unsigned int i = 0; - unsigned int last_digit = 0; - - if (!h_list || !*h_list || *h_list == ';') - return; - - /* count how many integers do we have */ - for (ptr = h_list; ptr && *ptr && *ptr != ';' && *ptr != ']'; ptr++) - if (HDisdigit(*ptr)) { - if (!last_digit) - /* the last read character wasn't a digit */ - size_count++; - - last_digit = 1; - } - else - last_digit = 0; - - if (size_count == 0) - /* there aren't any integers to read */ - return; - - /* allocate an array for the integers in the list */ - p_list = (hsize_t *)HDcalloc(size_count, sizeof(hsize_t)); - - for (ptr = h_list; i < size_count && ptr && *ptr && *ptr != ';' && *ptr != ']'; ptr++) - if (HDisdigit(*ptr)) { - /* we should have an integer now */ - p_list[i++] = (hsize_t)HDstrtoull(ptr, NULL, 0); - - while (HDisdigit(*ptr)) - /* scroll to end of integer */ - ptr++; - } - d->data = p_list; - d->len = size_count; -} - -/*------------------------------------------------------------------------- - * Function: parse_subset_params - * - * Purpose: Parse the so-called "terse" syntax for specifying subsetting parameters. - * - * Return: Success: struct subset_t object - * Failure: NULL - *------------------------------------------------------------------------- - */ -static struct subset_t * -parse_subset_params(const char *dset) -{ - struct subset_t *s = NULL; - char *brace; - - if (!dump_opts.disable_compact_subset && ((brace = HDstrrchr(dset, '[')) != NULL)) { - *brace++ = '\0'; - - s = (struct subset_t *)HDcalloc(1, sizeof(struct subset_t)); - parse_hsize_list(brace, &s->start); - - while (*brace && *brace != ';') - brace++; - - if (*brace) - brace++; - - parse_hsize_list(brace, &s->stride); - - while (*brace && *brace != ';') - brace++; - - if (*brace) - brace++; - - parse_hsize_list(brace, &s->count); - - while (*brace && *brace != ';') - brace++; - - if (*brace) - brace++; - - parse_hsize_list(brace, &s->block); - } - - return s; -} - /*------------------------------------------------------------------------- * Function: parse_mask_list * @@ -957,10 +849,11 @@ parse_command_line(int argc, const char *const *argv) for (i = 0; i < argc; i++) if (!hand[i].func) { - hand[i].func = handle_datasets; - hand[i].obj = HDstrdup(H5_optarg); - hand[i].subset_info = parse_subset_params(hand[i].obj); - last_dset = &hand[i]; + hand[i].func = handle_datasets; + hand[i].obj = HDstrdup(H5_optarg); + if (!dump_opts.disable_compact_subset) + hand[i].subset_info = parse_subset_params(hand[i].obj); + last_dset = &hand[i]; break; } diff --git a/tools/test/h5diff/testfiles/h5diff_10.txt b/tools/test/h5diff/testfiles/h5diff_10.txt index b1d607e7359..fe3474c076c 100644 --- a/tools/test/h5diff/testfiles/h5diff_10.txt +++ b/tools/test/h5diff/testfiles/h5diff_10.txt @@ -157,6 +157,8 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]] symbolic links are compared.). Subsetting options: + --no-compact-subset Disable compact form of subsetting and allow the use + of "[" in dataset names. Subsetting is available by using the fcompact form of subsetting, as follows: obj1 /foo/mydataset[START;STRIDE;COUNT;BLOCK] It is not required to use all parameters, but until the last parameter value used, diff --git a/tools/test/h5diff/testfiles/h5diff_600.txt b/tools/test/h5diff/testfiles/h5diff_600.txt index 5236964cbbf..eaf9c15ac19 100644 --- a/tools/test/h5diff/testfiles/h5diff_600.txt +++ b/tools/test/h5diff/testfiles/h5diff_600.txt @@ -157,6 +157,8 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]] symbolic links are compared.). Subsetting options: + --no-compact-subset Disable compact form of subsetting and allow the use + of "[" in dataset names. Subsetting is available by using the fcompact form of subsetting, as follows: obj1 /foo/mydataset[START;STRIDE;COUNT;BLOCK] It is not required to use all parameters, but until the last parameter value used, diff --git a/tools/test/h5diff/testfiles/h5diff_603.txt b/tools/test/h5diff/testfiles/h5diff_603.txt index e7bad17a086..aa0697aa6fd 100644 --- a/tools/test/h5diff/testfiles/h5diff_603.txt +++ b/tools/test/h5diff/testfiles/h5diff_603.txt @@ -158,6 +158,8 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]] symbolic links are compared.). Subsetting options: + --no-compact-subset Disable compact form of subsetting and allow the use + of "[" in dataset names. Subsetting is available by using the fcompact form of subsetting, as follows: obj1 /foo/mydataset[START;STRIDE;COUNT;BLOCK] It is not required to use all parameters, but until the last parameter value used, diff --git a/tools/test/h5diff/testfiles/h5diff_606.txt b/tools/test/h5diff/testfiles/h5diff_606.txt index 410528db1aa..87d6b012926 100644 --- a/tools/test/h5diff/testfiles/h5diff_606.txt +++ b/tools/test/h5diff/testfiles/h5diff_606.txt @@ -158,6 +158,8 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]] symbolic links are compared.). Subsetting options: + --no-compact-subset Disable compact form of subsetting and allow the use + of "[" in dataset names. Subsetting is available by using the fcompact form of subsetting, as follows: obj1 /foo/mydataset[START;STRIDE;COUNT;BLOCK] It is not required to use all parameters, but until the last parameter value used, diff --git a/tools/test/h5diff/testfiles/h5diff_612.txt b/tools/test/h5diff/testfiles/h5diff_612.txt index ffc191e4c1a..a44d94abc83 100644 --- a/tools/test/h5diff/testfiles/h5diff_612.txt +++ b/tools/test/h5diff/testfiles/h5diff_612.txt @@ -158,6 +158,8 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]] symbolic links are compared.). Subsetting options: + --no-compact-subset Disable compact form of subsetting and allow the use + of "[" in dataset names. Subsetting is available by using the fcompact form of subsetting, as follows: obj1 /foo/mydataset[START;STRIDE;COUNT;BLOCK] It is not required to use all parameters, but until the last parameter value used, diff --git a/tools/test/h5diff/testfiles/h5diff_615.txt b/tools/test/h5diff/testfiles/h5diff_615.txt index 2ff50e9a99b..5dc9cd3716c 100644 --- a/tools/test/h5diff/testfiles/h5diff_615.txt +++ b/tools/test/h5diff/testfiles/h5diff_615.txt @@ -158,6 +158,8 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]] symbolic links are compared.). Subsetting options: + --no-compact-subset Disable compact form of subsetting and allow the use + of "[" in dataset names. Subsetting is available by using the fcompact form of subsetting, as follows: obj1 /foo/mydataset[START;STRIDE;COUNT;BLOCK] It is not required to use all parameters, but until the last parameter value used, diff --git a/tools/test/h5diff/testfiles/h5diff_621.txt b/tools/test/h5diff/testfiles/h5diff_621.txt index 7db49590677..d7998b58ba6 100644 --- a/tools/test/h5diff/testfiles/h5diff_621.txt +++ b/tools/test/h5diff/testfiles/h5diff_621.txt @@ -158,6 +158,8 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]] symbolic links are compared.). Subsetting options: + --no-compact-subset Disable compact form of subsetting and allow the use + of "[" in dataset names. Subsetting is available by using the fcompact form of subsetting, as follows: obj1 /foo/mydataset[START;STRIDE;COUNT;BLOCK] It is not required to use all parameters, but until the last parameter value used, diff --git a/tools/test/h5diff/testfiles/h5diff_622.txt b/tools/test/h5diff/testfiles/h5diff_622.txt index db77f88e3c4..badfddd6f37 100644 --- a/tools/test/h5diff/testfiles/h5diff_622.txt +++ b/tools/test/h5diff/testfiles/h5diff_622.txt @@ -158,6 +158,8 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]] symbolic links are compared.). Subsetting options: + --no-compact-subset Disable compact form of subsetting and allow the use + of "[" in dataset names. Subsetting is available by using the fcompact form of subsetting, as follows: obj1 /foo/mydataset[START;STRIDE;COUNT;BLOCK] It is not required to use all parameters, but until the last parameter value used, diff --git a/tools/test/h5diff/testfiles/h5diff_623.txt b/tools/test/h5diff/testfiles/h5diff_623.txt index 11739be23e2..6ddd3da1bff 100644 --- a/tools/test/h5diff/testfiles/h5diff_623.txt +++ b/tools/test/h5diff/testfiles/h5diff_623.txt @@ -158,6 +158,8 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]] symbolic links are compared.). Subsetting options: + --no-compact-subset Disable compact form of subsetting and allow the use + of "[" in dataset names. Subsetting is available by using the fcompact form of subsetting, as follows: obj1 /foo/mydataset[START;STRIDE;COUNT;BLOCK] It is not required to use all parameters, but until the last parameter value used, diff --git a/tools/test/h5diff/testfiles/h5diff_624.txt b/tools/test/h5diff/testfiles/h5diff_624.txt index 6af9fe3a488..083632a9d84 100644 --- a/tools/test/h5diff/testfiles/h5diff_624.txt +++ b/tools/test/h5diff/testfiles/h5diff_624.txt @@ -158,6 +158,8 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]] symbolic links are compared.). Subsetting options: + --no-compact-subset Disable compact form of subsetting and allow the use + of "[" in dataset names. Subsetting is available by using the fcompact form of subsetting, as follows: obj1 /foo/mydataset[START;STRIDE;COUNT;BLOCK] It is not required to use all parameters, but until the last parameter value used, From a83febf340cfcad3a7cb3937c1a8d51bf04882b0 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Tue, 18 Apr 2023 11:35:56 -0700 Subject: [PATCH 147/231] Clean up H5Oefl decode function (#2755) * Use the H5_IS_BUFFER_OVERFLOW macro * Attempt to close local heap on errors if left open --- src/H5Oefl.c | 55 ++++++++++++++++++++++++++-------------------------- 1 file changed, 28 insertions(+), 27 deletions(-) diff --git a/src/H5Oefl.c b/src/H5Oefl.c index 35e2d9f145c..16d69e02dd4 100644 --- a/src/H5Oefl.c +++ b/src/H5Oefl.c @@ -10,11 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * Programmer: Robb Matzke - * Tuesday, November 25, 1997 - */ - #include "H5Omodule.h" /* This source code file is part of the H5O module */ #include "H5private.h" /* Generic Functions */ @@ -64,14 +59,14 @@ const H5O_msg_class_t H5O_MSG_EFL[1] = {{ /*------------------------------------------------------------------------- * Function: H5O__efl_decode * - * Purpose: Decode an external file list message and return a pointer to - * the message (and some other data). + * Purpose: Decode an external file list message and return a pointer to + * the message (and some other data). * - * We allow zero dimension size starting from the 1.8.7 release. - * The dataset size of external storage can be zero. + * We allow zero dimension size starting from the 1.8.7 release. + * The dataset size of external storage can be zero. * - * Return: Success: Pointer to a new message struct - * Failure: NULL + * Return: Success: Pointer to a new message struct + * Failure: NULL *------------------------------------------------------------------------- */ static void * @@ -90,36 +85,38 @@ H5O__efl_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED /* Check args */ HDassert(f); HDassert(p); - HDassert(p_size > 0); if (NULL == (mesg = (H5O_efl_t *)H5MM_calloc(sizeof(H5O_efl_t)))) HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "memory allocation failed") /* Version (1 byte) */ - if ((p + 1 - 1) > p_end) - HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "ran off end of input buffer while decoding") + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); version = *p++; if (version != H5O_EFL_VERSION) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad version number for external file list message") /* Reserved (3 bytes) */ - if ((p + 3 - 1) > p_end) - HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "ran off end of input buffer while decoding") + if (H5_IS_BUFFER_OVERFLOW(p, 3, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); p += 3; /* Number of slots (2x 2 bytes) */ - if ((p + 4 - 1) > p_end) - HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "ran off end of input buffer while decoding") + if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); UINT16DECODE(p, mesg->nalloc); if (mesg->nalloc <= 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad number of allocated slots when parsing efl msg") + + if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); UINT16DECODE(p, mesg->nused); if (mesg->nused > mesg->nalloc) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad number of in-use slots when parsing efl msg") /* Heap address */ - if ((p + H5F_SIZEOF_ADDR(f) - 1) > p_end) - HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "ran off end of input buffer while decoding") + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_addr(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); H5F_addr_decode(f, &p, &(mesg->heap_addr)); if (H5F_addr_defined(mesg->heap_addr) == FALSE) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad local heap address when parsing efl msg") @@ -143,8 +140,8 @@ H5O__efl_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED for (size_t u = 0; u < mesg->nused; u++) { /* Name */ - if ((p + H5F_SIZEOF_SIZE(f) - 1) > p_end) - HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "ran off end of input buffer while decoding") + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_size(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); H5F_DECODE_LENGTH(f, p, mesg->slot[u].name_offset); if ((s = (const char *)H5HL_offset_into(heap, mesg->slot[u].name_offset)) == NULL) @@ -156,13 +153,13 @@ H5O__efl_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "string duplication failed") /* File offset */ - if ((p + H5F_SIZEOF_SIZE(f) - 1) > p_end) - HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "ran off end of input buffer while decoding") + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_size(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); H5F_DECODE_LENGTH(f, p, mesg->slot[u].offset); /* Size */ - if ((p + H5F_SIZEOF_SIZE(f) - 1) > p_end) - HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "ran off end of input buffer while decoding") + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_size(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); H5F_DECODE_LENGTH(f, p, mesg->slot[u].size); } @@ -173,7 +170,7 @@ H5O__efl_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED ret_value = mesg; done: - if (ret_value == NULL) + if (ret_value == NULL) { if (mesg != NULL) { if (mesg->slot != NULL) { for (size_t u = 0; u < mesg->nused; u++) @@ -182,6 +179,10 @@ H5O__efl_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED } H5MM_xfree(mesg); } + if (heap != NULL) + if (H5HL_unprotect(heap) < 0) + HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, NULL, "unable to unprotect local heap") + } FUNC_LEAVE_NOAPI(ret_value) } /* end H5O__efl_decode() */ From 6fe6110475c2a4e98239094432c1f7415a8f8c4f Mon Sep 17 00:00:00 2001 From: Larry Knox Date: Tue, 18 Apr 2023 15:06:24 -0500 Subject: [PATCH 148/231] Add new version of COPYING_LBNL_HDF5. (#2773) --- COPYING_LBNL_HDF5 | 108 +++++++++++++++++++++++++--------------------- 1 file changed, 60 insertions(+), 48 deletions(-) diff --git a/COPYING_LBNL_HDF5 b/COPYING_LBNL_HDF5 index 16fba5d1768..ebc00df1b0c 100644 --- a/COPYING_LBNL_HDF5 +++ b/COPYING_LBNL_HDF5 @@ -1,49 +1,61 @@ -Copyright Notice and License Terms for -HDF5 (Hierarchical Data Format 5) Software Library and Utilities ------------------------------------------------------------------------------ - -HDF5 (Hierarchical Data Format 5) -Copyright (c) 2016, The Regents of the University of California, through -Lawrence Berkeley National Laboratory (subject to receipt of any required -approvals from the U.S. Dept. of Energy). - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions, and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions, and the following disclaimer in the documentation - and/or materials provided with the distribution. - -3. Neither the name of the University of California, Lawrence Berkeley -National Laboratory, U.S. Dept. of Energy nor the names of its contributors -may be used to endorse or promote products derived from this software without -specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF -THE POSSIBILITY OF SUCH DAMAGE. - -You are under no obligation whatsoever to provide any bug fixes, patches, -or upgrades to the features, functionality or performance of the source -code ("Enhancements") to anyone; however, if you choose to make your -Enhancements available either publicly, or directly to Lawrence Berkeley -National Laboratory, without imposing a separate written license agreement -for such Enhancements, then you hereby grant the following license: -a non-exclusive, royalty-free perpetual license to install, use, modify, -prepare derivative works, incorporate into other computer software, -distribute, and sublicense such enhancements or derivative works thereof, -in binary and source code form. +**************************** +*** Copyright Notice *** +Hierarchical Data Format 5 (HDF5) v1.12.0 Copyright (c) 2020, HDF Group and The +Regents of the University of California, through Lawrence Berkeley National +Laboratory (subject to receipt of any required approvals from the U.S. Dept. of +Energy). All rights reserved. + +If you have questions about your rights to use or distribute this software, +please contact Berkeley Lab's Intellectual Property Office at IPO@lbl.gov. + +NOTICE. This Software was partially developed under funding from the U.S. +Department of Energy and the U.S. Government consequently retains certain +rights. As such, the U.S. Government has been granted for itself and others +acting on its behalf a paid-up, nonexclusive, irrevocable, worldwide license in +the Software to reproduce, distribute copies to the public, prepare derivative +works, and perform publicly and display publicly, and to permit others to do so. + +**************************** +*** License Agreement *** + +Hierarchical Data Format 5 (HDF5) v1.12.0 Copyright (c) 2020, HDF Group and The +Regents of the University of California, through Lawrence Berkeley National +Laboratory (subject to receipt of any required approvals from the U.S. Dept. of +Energy). All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +(1) Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +(2) Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation and/or +other materials provided with the distribution. + +(3) Neither the name of the HDF Group, University of California, Lawrence +Berkeley National Laboratory, U.S. Dept. of Energy, nor the names of its +contributors may be used to endorse or promote products derived from this +software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +You are under no obligation whatsoever to provide any bug fixes, patches, or +upgrades to the features, functionality or performance of the source code +("Enhancements") to anyone; however, if you choose to make your Enhancements +available either publicly, or directly to Lawrence Berkeley National Laboratory, +without imposing a separate written license agreement for such Enhancements, +then you hereby grant the following license: a non-exclusive, royalty-free +perpetual license to install, use, modify, prepare derivative works, incorporate +into other computer software, distribute, and sublicense such enhancements or +derivative works thereof, in binary and source code form. From 3fccfce18393ef8b3a61445c457ca2b8fc06ccf1 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Tue, 18 Apr 2023 14:23:34 -0700 Subject: [PATCH 149/231] Sanitize the ohdr modification time decode fxns (#2762) --- src/H5Omtime.c | 74 ++++++++++++++++++++++++++------------------------ 1 file changed, 38 insertions(+), 36 deletions(-) diff --git a/src/H5Omtime.c b/src/H5Omtime.c index 5a491ddb1cf..8205a01cfe8 100644 --- a/src/H5Omtime.c +++ b/src/H5Omtime.c @@ -10,10 +10,8 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* Programmer: Robb Matzke - * Friday, July 24, 1998 - * - * Purpose: The object modification time message. +/* + * Purpose: The object modification time message */ #include "H5Omodule.h" /* This source code file is part of the H5O module */ @@ -93,46 +91,49 @@ const H5O_msg_class_t H5O_MSG_MTIME_NEW[1] = {{ H5FL_DEFINE(time_t); /*------------------------------------------------------------------------- - * Function: H5O__mtime_new_decode + * Function: H5O__mtime_new_decode * * Purpose: Decode a new modification time message and return a pointer to * a new time_t value. * + * This version of the modification time was used in HDF5 + * 1.6.1 and later. + * * The new modification time message format was added due to the * performance overhead of the old format. * - * Return: Success: Ptr to new message in native struct. - * - * Failure: NULL - * - * Programmer: Quincey Koziol - * Jan 3 2002 - * + * Return: Success: Pointer to new message in native struct + * Failure: NULL *------------------------------------------------------------------------- */ static void * -H5O__mtime_new_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, - unsigned H5_ATTR_UNUSED mesg_flags, unsigned H5_ATTR_UNUSED *ioflags, - size_t H5_ATTR_UNUSED p_size, const uint8_t *p) +H5O__mtime_new_decode(H5F_t H5_ATTR_NDEBUG_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, + unsigned H5_ATTR_UNUSED mesg_flags, unsigned H5_ATTR_UNUSED *ioflags, size_t p_size, + const uint8_t *p) { - time_t *mesg; - uint32_t tmp_time; /* Temporary copy of the time */ - void *ret_value = NULL; /* Return value */ + const uint8_t *p_end = p + p_size - 1; /* End of input buffer */ + time_t *mesg = NULL; + uint32_t tmp_time; /* Temporary copy of the time */ + void *ret_value = NULL; /* Return value */ FUNC_ENTER_PACKAGE - /* check args */ HDassert(f); HDassert(p); - /* decode */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); if (*p++ != H5O_MTIME_VERSION) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad version number for mtime message"); /* Skip reserved bytes */ + if (H5_IS_BUFFER_OVERFLOW(p, 3, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); p += 3; /* Get the time_t from the file */ + if (H5_IS_BUFFER_OVERFLOW(p, 4, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); UINT32DECODE(p, tmp_time); /* The return value */ @@ -153,35 +154,36 @@ H5O__mtime_new_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, * Purpose: Decode a modification time message and return a pointer to a * new time_t value. * + * This version of the modification time was used in HDF5 + * 1.6.0 and earlier. + * * The new modification time message format was added due to the * performance overhead of the old format. * - * Return: Success: Ptr to new message in native struct. - * - * Failure: NULL - * - * Programmer: Robb Matzke - * Jul 24 1998 - * + * Return: Success: Pointer to new message in native struct + * Failure: NULL *------------------------------------------------------------------------- */ static void * -H5O__mtime_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, - unsigned H5_ATTR_UNUSED *ioflags, size_t H5_ATTR_UNUSED p_size, const uint8_t *p) +H5O__mtime_decode(H5F_t H5_ATTR_NDEBUG_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, + unsigned H5_ATTR_UNUSED mesg_flags, unsigned H5_ATTR_UNUSED *ioflags, size_t p_size, + const uint8_t *p) { - time_t *mesg, the_time; - struct tm tm; - int i; /* Local index variable */ - void *ret_value = NULL; /* Return value */ + const uint8_t *p_end = p + p_size - 1; /* End of input buffer */ + time_t *mesg = NULL; + time_t the_time; + struct tm tm; + void *ret_value = NULL; FUNC_ENTER_PACKAGE - /* check args */ HDassert(f); HDassert(p); - /* decode */ - for (i = 0; i < 14; i++) + /* Buffer should have 14 message bytes and 2 reserved bytes */ + if (H5_IS_BUFFER_OVERFLOW(p, 16, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + for (int i = 0; i < 14; i++) if (!HDisdigit(p[i])) HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, NULL, "badly formatted modification time message") From 580ea1f946e6085758518ebec86c0c88d58de3b1 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Wed, 19 Apr 2023 08:20:46 -0700 Subject: [PATCH 150/231] Sanitize the H5Oname decode function (#2757) * Add bounds checking * Add memory cleanup --- src/H5Oname.c | 34 ++++++++++++++-------------------- 1 file changed, 14 insertions(+), 20 deletions(-) diff --git a/src/H5Oname.c b/src/H5Oname.c index a1e239b65b6..10ea365e2b1 100644 --- a/src/H5Oname.c +++ b/src/H5Oname.c @@ -13,10 +13,8 @@ /*------------------------------------------------------------------------- * * Created: H5Oname.c - * Aug 12 1997 - * Robb Matzke * - * Purpose: Object name message. + * Purpose: Object name (comment) message * *------------------------------------------------------------------------- */ @@ -67,41 +65,37 @@ const H5O_msg_class_t H5O_MSG_NAME[1] = {{ * Purpose: Decode a name message and return a pointer to a new * native message struct. * - * Return: Success: Ptr to new message in native struct. - * - * Failure: NULL - * - * Programmer: Robb Matzke - * Aug 12 1997 - * + * Return: Success: Ptr to new message in native struct. + * Failure: NULL *------------------------------------------------------------------------- */ static void * -H5O__name_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, - unsigned H5_ATTR_UNUSED *ioflags, size_t H5_ATTR_UNUSED p_size, const uint8_t *p) +H5O__name_decode(H5F_t H5_ATTR_NDEBUG_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, + unsigned H5_ATTR_UNUSED mesg_flags, unsigned H5_ATTR_UNUSED *ioflags, size_t p_size, + const uint8_t *p) { - H5O_name_t *mesg; - void *ret_value = NULL; /* Return value */ + H5O_name_t *mesg = NULL; + void *ret_value = NULL; FUNC_ENTER_PACKAGE - /* check args */ HDassert(f); HDassert(p); - /* decode */ if (NULL == (mesg = (H5O_name_t *)H5MM_calloc(sizeof(H5O_name_t)))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") - if (NULL == (mesg->s = (char *)H5MM_strdup((const char *)p))) + + if (NULL == (mesg->s = (char *)H5MM_strndup((const char *)p, p_size - 1))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") - /* Set return value */ ret_value = mesg; done: if (NULL == ret_value) - if (mesg) - mesg = (H5O_name_t *)H5MM_xfree(mesg); + if (mesg) { + H5MM_xfree(mesg->s); + H5MM_xfree(mesg); + } FUNC_LEAVE_NOAPI(ret_value) } /* end H5O__name_decode() */ From 78f152b58ee1d8e91b0c03f14d7e6bb62972d0b9 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Wed, 19 Apr 2023 08:21:25 -0700 Subject: [PATCH 151/231] Sanitize the attribute object header msg code (#2749) Adds: * Bounds checks on buffer access * Better memory cleanup --- src/H5Oattr.c | 114 +++++++++++++++++++++++++++++++----------------- test/titerate.c | 7 ++- 2 files changed, 80 insertions(+), 41 deletions(-) diff --git a/src/H5Oattr.c b/src/H5Oattr.c index e431cd2e620..1d48a7859e8 100644 --- a/src/H5Oattr.c +++ b/src/H5Oattr.c @@ -112,95 +112,125 @@ H5FL_EXTERN(H5S_extent_t); Pointer to the new message in native order on success, NULL on failure DESCRIPTION This function decodes the "raw" disk form of a attribute message - into a struct in memory native format. The struct is allocated within this - function using malloc() and is returned to the caller. + into a struct in memory native format. The struct is allocated within this + function using malloc() and is returned to the caller. --------------------------------------------------------------------------*/ static void * H5O__attr_decode(H5F_t *f, H5O_t *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, unsigned *ioflags, size_t p_size, const uint8_t *p) { - H5A_t *attr = NULL; - H5S_extent_t *extent; /*extent dimensionality information */ - size_t name_len; /*attribute name length */ - size_t dt_size; /* Datatype size */ - hssize_t sds_size; /* Signed Dataspace size */ - hsize_t ds_size; /* Dataspace size */ - unsigned flags = 0; /* Attribute flags */ - H5A_t *ret_value = NULL; /* Return value */ + H5A_t *attr = NULL; + const uint8_t *p_end = p + p_size - 1; /* End of input buffer */ + size_t delta = 0; /* Amount to move p in next field */ + H5S_extent_t *extent = NULL; /* Extent dimensionality information */ + size_t name_len; /* Attribute name length */ + size_t dt_size; /* Datatype size */ + hssize_t sds_size; /* Signed Dataspace size */ + hsize_t ds_size; /* Dataspace size */ + unsigned flags = 0; /* Attribute flags */ + H5A_t *ret_value = NULL; /* Return value */ FUNC_ENTER_PACKAGE - /* check args */ HDassert(f); HDassert(p); if (NULL == (attr = H5FL_CALLOC(H5A_t))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") - if (NULL == (attr->shared = H5FL_CALLOC(H5A_shared_t))) HGOTO_ERROR(H5E_FILE, H5E_NOSPACE, NULL, "can't allocate shared attr structure") /* Version number */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); attr->shared->version = *p++; if (attr->shared->version < H5O_ATTR_VERSION_1 || attr->shared->version > H5O_ATTR_VERSION_LATEST) HGOTO_ERROR(H5E_ATTR, H5E_CANTLOAD, NULL, "bad version number for attribute message") /* Get the flags byte if we have a later version of the attribute */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); if (attr->shared->version >= H5O_ATTR_VERSION_2) { flags = *p++; /* Check for unknown flag */ if (flags & (unsigned)~H5O_ATTR_FLAG_ALL) HGOTO_ERROR(H5E_ATTR, H5E_CANTLOAD, NULL, "unknown flag for attribute message") - } /* end if */ + } else - p++; /* Byte is unused when version<2 */ + p++; /* Byte is unused when version < 2 */ - /* - * Decode the sizes of the parts of the attribute. The sizes stored in + /* Decode the sizes of the parts of the attribute. The sizes stored in * the file are exact but the parts are aligned on 8-byte boundaries. */ - UINT16DECODE(p, name_len); /*including null*/ + if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + UINT16DECODE(p, name_len); /* Including null */ + if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); UINT16DECODE(p, attr->shared->dt_size); + if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); UINT16DECODE(p, attr->shared->ds_size); - /* - * Decode the character encoding for the name for versions 3 or later, + /* Decode the character encoding for the name for versions 3 or later, * as well as some reserved bytes. */ - if (attr->shared->version >= H5O_ATTR_VERSION_3) + if (attr->shared->version >= H5O_ATTR_VERSION_3) { + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); attr->shared->encoding = (H5T_cset_t)*p++; + } - /* Decode and store the name */ - if (NULL == (attr->shared->name = H5MM_strdup((const char *)p))) + /* Decode and store the name + * + * NOTE: If the buffer overflow error message changes, test_corrupted_attnamelen() + * in titerate.c will fail since it looks for it explicitly. + */ + if (H5_IS_BUFFER_OVERFLOW(p, name_len, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + if (NULL == (attr->shared->name = H5MM_strndup((const char *)p, name_len - 1))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") /* Make an attempt to detect corrupted name or name length - HDFFV-10588 */ - if (name_len != (HDstrlen(attr->shared->name) + 1)) + if (name_len != (HDstrnlen(attr->shared->name, name_len) + 1)) HGOTO_ERROR(H5E_ATTR, H5E_CANTDECODE, NULL, "attribute name has different length than stored length") + /* Determine pointer movement and check if it's valid */ if (attr->shared->version < H5O_ATTR_VERSION_2) - p += H5O_ALIGN_OLD(name_len); /* advance the memory pointer */ + delta = H5O_ALIGN_OLD(name_len); else - p += name_len; /* advance the memory pointer */ + delta = name_len; + if (H5_IS_BUFFER_OVERFLOW(p, delta, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + p += delta; /* Decode the attribute's datatype */ + if (H5_IS_BUFFER_OVERFLOW(p, attr->shared->dt_size, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); if (NULL == (attr->shared->dt = (H5T_t *)(H5O_MSG_DTYPE->decode)( f, open_oh, ((flags & H5O_ATTR_FLAG_TYPE_SHARED) ? H5O_MSG_FLAG_SHARED : 0), ioflags, attr->shared->dt_size, p))) HGOTO_ERROR(H5E_ATTR, H5E_CANTDECODE, NULL, "can't decode attribute datatype") + + /* Determine pointer movement and check if it's valid */ if (attr->shared->version < H5O_ATTR_VERSION_2) - p += H5O_ALIGN_OLD(attr->shared->dt_size); + delta = H5O_ALIGN_OLD(attr->shared->dt_size); else - p += attr->shared->dt_size; + delta = attr->shared->dt_size; + if (H5_IS_BUFFER_OVERFLOW(p, delta, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + p += delta; - /* decode the attribute dataspace. It can be shared in versions >= 3 + /* Decode the attribute dataspace. It can be shared in versions >= 3 * What's actually shared, though, is only the extent. */ if (NULL == (attr->shared->ds = H5FL_CALLOC(H5S_t))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") /* Decode attribute's dataspace extent */ + if (H5_IS_BUFFER_OVERFLOW(p, attr->shared->ds_size, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); if ((extent = (H5S_extent_t *)(H5O_MSG_SDSPACE->decode)( f, open_oh, ((flags & H5O_ATTR_FLAG_SPACE_SHARED) ? H5O_MSG_FLAG_SHARED : 0), ioflags, attr->shared->ds_size, p)) == NULL) @@ -216,10 +246,14 @@ H5O__attr_decode(H5F_t *f, H5O_t *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, u if (H5S_select_all(attr->shared->ds, FALSE) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSET, NULL, "unable to set all selection") + /* Determine pointer movement and check if it's valid */ if (attr->shared->version < H5O_ATTR_VERSION_2) - p += H5O_ALIGN_OLD(attr->shared->ds_size); + delta = H5O_ALIGN_OLD(attr->shared->ds_size); else - p += attr->shared->ds_size; + delta = attr->shared->ds_size; + if (H5_IS_BUFFER_OVERFLOW(p, delta, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + p += delta; /* Get the datatype & dataspace sizes */ if (0 == (dt_size = H5T_get_size(attr->shared->dt))) @@ -234,17 +268,18 @@ H5O__attr_decode(H5F_t *f, H5O_t *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, u if ((attr->shared->data_size / dt_size) != ds_size) HGOTO_ERROR(H5E_RESOURCE, H5E_OVERFLOW, NULL, "data size exceeds addressable range") - /* Go get the data */ + /* Get the data */ if (attr->shared->data_size) { /* Ensure that data size doesn't exceed buffer size, in case of - it's being corrupted in the file */ - if (attr->shared->data_size > p_size) - HGOTO_ERROR(H5E_RESOURCE, H5E_OVERFLOW, NULL, "data size exceeds buffer size") + * it's being corrupted in the file + */ + if (H5_IS_BUFFER_OVERFLOW(p, attr->shared->data_size, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); if (NULL == (attr->shared->data = H5FL_BLK_MALLOC(attr_buf, attr->shared->data_size))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") H5MM_memcpy(attr->shared->data, p, attr->shared->data_size); - } /* end if */ + } /* Increment the reference count for this object header message in cache(compact storage) or for the object from dense storage. */ @@ -254,15 +289,16 @@ H5O__attr_decode(H5F_t *f, H5O_t *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, u ret_value = attr; done: - if (NULL == ret_value) + if (NULL == ret_value) { if (attr) { - /* Free any dynamically allocated items */ if (attr->shared) if (H5A__shared_free(attr) < 0) HDONE_ERROR(H5E_ATTR, H5E_CANTRELEASE, NULL, "can't release attribute info") - attr = H5FL_FREE(H5A_t, attr); - } /* end if */ + } + if (extent) + extent = H5FL_FREE(H5S_extent_t, extent); + } FUNC_LEAVE_NOAPI(ret_value) } /* end H5O__attr_decode() */ diff --git a/test/titerate.c b/test/titerate.c index 1e23ade10bf..82d8e9ff983 100644 --- a/test/titerate.c +++ b/test/titerate.c @@ -1011,8 +1011,11 @@ test_corrupted_attnamelen(void) hbool_t driver_is_default_compatible; const char *testfile = H5_get_srcdir_filename(CORRUPTED_ATNAMELEN_FILE); /* Corrected test file name */ - const char *err_message = "attribute name has different length than stored length"; - /* the error message produced when the failure occurs */ + /* The error message produced when the failure occurs + * + * FIXME: This is incredibly fragile! + */ + const char *err_message = "ran off end of input buffer while decoding"; /* Output message about test being performed */ MESSAGE(5, ("Testing the Handling of Corrupted Attribute's Name Length\n")); From d8c6d7d7277a8a521ebe25d1f6278e57127a53a0 Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Fri, 21 Apr 2023 11:07:48 -0500 Subject: [PATCH 152/231] Added Fortran Async APIs (#2715) H5A, H5D, H5ES, H5G, H5F, H5L and H5O async APIs were added. --- doxygen/aliases | 4 + fortran/src/CMakeLists.txt | 4 + fortran/src/H5Af.c | 492 ---------- fortran/src/H5Aff.F90 | 1310 ++++++++++++++++++++----- fortran/src/H5Df.c | 224 ----- fortran/src/H5Dff.F90 | 789 +++++++++++++--- fortran/src/H5ESff.F90 | 296 ++++++ fortran/src/H5Eff.F90 | 4 +- fortran/src/H5Ff.c | 167 ---- fortran/src/H5Fff.F90 | 421 ++++++++- fortran/src/H5Gf.c | 348 ------- fortran/src/H5Gff.F90 | 936 +++++++++++++++--- fortran/src/H5Lf.c | 320 ------- fortran/src/H5Lff.F90 | 687 +++++++++++--- fortran/src/H5Of.c | 225 +---- fortran/src/H5Off.F90 | 513 ++++++++-- fortran/src/H5Pff.F90 | 62 +- fortran/src/H5Rff.F90 | 25 +- fortran/src/H5Sff.F90 | 14 +- fortran/src/H5Tff.F90 | 12 +- fortran/src/H5VLff.F90 | 27 +- fortran/src/H5_buildiface.F90 | 36 +- fortran/src/H5_f.c | 19 +- fortran/src/H5_ff.F90 | 29 +- fortran/src/H5config_f.inc.cmake | 6 + fortran/src/H5config_f.inc.in | 3 + fortran/src/H5f90global.F90 | 31 +- fortran/src/H5f90proto.h | 16 +- fortran/src/HDF5.F90 | 1 + fortran/src/Makefile.am | 9 +- fortran/src/hdf5_fortrandll.def.in | 52 +- fortran/test/Makefile.am | 2 +- fortran/test/tH5A_1_8.F90 | 5 + fortran/test/tH5G_1_8.F90 | 162 +++- fortran/test/tf.F90 | 86 +- fortran/testpar/CMakeLists.txt | 40 + fortran/testpar/Makefile.am | 3 +- fortran/testpar/async.F90 | 1417 ++++++++++++++++++++++++++++ fortran/testpar/ptest.F90 | 5 + fortran/testpar/subfiling.F90 | 14 +- release_docs/RELEASE.txt | 3 +- 41 files changed, 6152 insertions(+), 2667 deletions(-) create mode 100644 fortran/src/H5ESff.F90 create mode 100644 fortran/testpar/async.F90 diff --git a/doxygen/aliases b/doxygen/aliases index bb31325315e..27090e6f3f1 100644 --- a/doxygen/aliases +++ b/doxygen/aliases @@ -380,3 +380,7 @@ ALIASES += obj_info_fields="
    Direct#H5FD_DIRECTThis is the #H5FD_SEC2 driver except data is written to or + * This is the #H5FD_SEC2 driver, except data is written to or * read from the file synchronously without being cached by the * system.H5Pset_fapl_direct()Multi#H5FD_MULTIWith this driver, data can be stored in multiple files - * according to the type of the data. I/O might work better if + * according to the type of data. I/O might work better if * data is stored in separate files based on the type of data. * The Split driver is a special case of this driver.H5Pset_fapl_multi()
    + + + + + + + + + + + + @@ -952,4 +964,3 @@ encoding for object names. //! [acpl_table] * */ - \ No newline at end of file diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index ad51ff1e734..89a821c3108 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -105,6 +105,16 @@ New Features machine's node-local storage while placing the subfiling configuration file on a file system readable by all machine nodes. + - Added H5Pset_selection_io(), H5Pget_selection_io(), and + H5Pget_no_selection_io_cause() API functions to manage the selection I/O + feature. This can be used to enable collective I/O with type conversion, + or it can be used with custom VFDs that support vector or selection I/O. + + - Added H5Pset_modify_write_buf() and H5Pget_modify_write_buf() API + functions to allow the library to modify the contents of write buffers, in + order to avoid malloc/memcpy. Currently only used for type conversion + with selection I/O. + Parallel Library: ----------------- diff --git a/src/H5.c b/src/H5.c index 259e2409588..e0db6b9e6a5 100644 --- a/src/H5.c +++ b/src/H5.c @@ -82,8 +82,6 @@ hbool_t H5_libinit_g = FALSE; /* Library hasn't been initialized */ hbool_t H5_libterm_g = FALSE; /* Library isn't being shutdown */ #endif -hbool_t H5_use_selection_io_g = FALSE; - char H5_lib_vers_info_g[] = H5_VERS_INFO; static hbool_t H5_dont_atexit_g = FALSE; H5_debug_t H5_debug_g; /* debugging info */ @@ -141,8 +139,7 @@ herr_t H5_init_library(void) { size_t i; - char *env_use_select_io = NULL; - herr_t ret_value = SUCCEED; + herr_t ret_value = SUCCEED; FUNC_ENTER_NOAPI(FAIL) @@ -274,14 +271,6 @@ H5_init_library(void) } /* clang-format on */ - /* Check for HDF5_USE_SELECTION_IO env variable */ - env_use_select_io = HDgetenv("HDF5_USE_SELECTION_IO"); - if (NULL != env_use_select_io && HDstrcmp(env_use_select_io, "") && HDstrcmp(env_use_select_io, "0") && - HDstrcmp(env_use_select_io, "no") && HDstrcmp(env_use_select_io, "No") && - HDstrcmp(env_use_select_io, "NO") && HDstrcmp(env_use_select_io, "false") && - HDstrcmp(env_use_select_io, "False") && HDstrcmp(env_use_select_io, "FALSE")) - H5_use_selection_io_g = TRUE; - /* Debugging? */ H5__debug_mask("-all"); H5__debug_mask(HDgetenv("HDF5_DEBUG")); diff --git a/src/H5CX.c b/src/H5CX.c index 95e824d1a12..e5595b7088b 100644 --- a/src/H5CX.c +++ b/src/H5CX.c @@ -94,14 +94,12 @@ H5CX_RETRIEVE_PROP_COMMON(PL, DEF_PL, PROP_NAME, PROP_FIELD) \ } /* end if */ -#ifdef H5_HAVE_PARALLEL /* Macro for the duplicated code to retrieve possibly set properties from a property list */ #define H5CX_RETRIEVE_PROP_VALID_SET(PL, DEF_PL, PROP_NAME, PROP_FIELD) \ /* Check if the value has been retrieved already */ \ if (!((*head)->ctx.H5_GLUE(PROP_FIELD, _valid) || (*head)->ctx.H5_GLUE(PROP_FIELD, _set))) { \ H5CX_RETRIEVE_PROP_COMMON(PL, DEF_PL, PROP_NAME, PROP_FIELD) \ - } /* end if */ -#endif /* H5_HAVE_PARALLEL */ + } /* end if */ #if defined(H5_HAVE_PARALLEL) && defined(H5_HAVE_INSTRUMENTED_LIBRARY) /* Macro for the duplicated code to test and set properties for a property list */ @@ -127,7 +125,6 @@ } #endif /* defined(H5_HAVE_PARALLEL) && defined(H5_HAVE_INSTRUMENTED_LIBRARY) */ -#ifdef H5_HAVE_PARALLEL /* Macro for the duplicated code to test and set properties for a property list */ #define H5CX_SET_PROP(PROP_NAME, PROP_FIELD) \ if ((*head)->ctx.H5_GLUE(PROP_FIELD, _set)) { \ @@ -137,8 +134,7 @@ /* Set the property */ \ if (H5P_set((*head)->ctx.dxpl, PROP_NAME, &(*head)->ctx.PROP_FIELD) < 0) \ HGOTO_ERROR(H5E_CONTEXT, H5E_CANTSET, NULL, "error setting data xfer property") \ - } /* end if */ -#endif /* H5_HAVE_PARALLEL */ + } /* end if */ /******************/ /* Local Typedefs */ @@ -242,16 +238,20 @@ typedef struct H5CX_t { unsigned mpio_chunk_opt_ratio; /* Collective chunk ratio (H5D_XFER_MPIO_CHUNK_OPT_RATIO_NAME) */ hbool_t mpio_chunk_opt_ratio_valid; /* Whether collective chunk ratio is valid */ #endif /* H5_HAVE_PARALLEL */ - H5Z_EDC_t err_detect; /* Error detection info (H5D_XFER_EDC_NAME) */ - hbool_t err_detect_valid; /* Whether error detection info is valid */ - H5Z_cb_t filter_cb; /* Filter callback function (H5D_XFER_FILTER_CB_NAME) */ - hbool_t filter_cb_valid; /* Whether filter callback function is valid */ - H5Z_data_xform_t *data_transform; /* Data transform info (H5D_XFER_XFORM_NAME) */ - hbool_t data_transform_valid; /* Whether data transform info is valid */ - H5T_vlen_alloc_info_t vl_alloc_info; /* VL datatype alloc info (H5D_XFER_VLEN_*_NAME) */ - hbool_t vl_alloc_info_valid; /* Whether VL datatype alloc info is valid */ - H5T_conv_cb_t dt_conv_cb; /* Datatype conversion struct (H5D_XFER_CONV_CB_NAME) */ - hbool_t dt_conv_cb_valid; /* Whether datatype conversion struct is valid */ + H5Z_EDC_t err_detect; /* Error detection info (H5D_XFER_EDC_NAME) */ + hbool_t err_detect_valid; /* Whether error detection info is valid */ + H5Z_cb_t filter_cb; /* Filter callback function (H5D_XFER_FILTER_CB_NAME) */ + hbool_t filter_cb_valid; /* Whether filter callback function is valid */ + H5Z_data_xform_t *data_transform; /* Data transform info (H5D_XFER_XFORM_NAME) */ + hbool_t data_transform_valid; /* Whether data transform info is valid */ + H5T_vlen_alloc_info_t vl_alloc_info; /* VL datatype alloc info (H5D_XFER_VLEN_*_NAME) */ + hbool_t vl_alloc_info_valid; /* Whether VL datatype alloc info is valid */ + H5T_conv_cb_t dt_conv_cb; /* Datatype conversion struct (H5D_XFER_CONV_CB_NAME) */ + hbool_t dt_conv_cb_valid; /* Whether datatype conversion struct is valid */ + H5D_selection_io_mode_t selection_io_mode; /* Selection I/O mode (H5D_XFER_SELECTION_IO_MODE_NAME) */ + hbool_t selection_io_mode_valid; /* Whether selection I/O mode is valid */ + hbool_t modify_write_buf; /* Whether the library can modify write buffers */ + hbool_t modify_write_buf_valid; /* Whether the modify_write_buf field is valid */ /* Return-only DXPL properties to return to application */ #ifdef H5_HAVE_PARALLEL @@ -297,9 +297,13 @@ typedef struct H5CX_t { hbool_t mpio_coll_rank0_bcast; /* Instrumented "collective chunk multi ratio ind" value (H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME) */ hbool_t - mpio_coll_rank0_bcast_set; /* Whether instrumented "collective chunk multi ratio ind" value is set */ -#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ -#endif /* H5_HAVE_PARALLEL */ + mpio_coll_rank0_bcast_set; /* Whether instrumented "collective chunk multi ratio ind" value is set */ +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ +#endif /* H5_HAVE_PARALLEL */ + uint32_t no_selection_io_cause; /* Reason for not performing selection I/O + (H5D_XFER_NO_SELECTION_IO_CAUSE_NAME) */ + hbool_t no_selection_io_cause_set; /* Whether reason for not performing selection I/O is set */ + hbool_t no_selection_io_cause_valid; /* Whether reason for not performing selection I/O is valid */ /* Cached LCPL properties */ H5T_cset_t encoding; /* Link name character encoding */ @@ -370,15 +374,19 @@ typedef struct H5CX_dxpl_cache_t { uint32_t mpio_global_no_coll_cause; /* Global reason for breaking collective I/O (H5D_MPIO_GLOBAL_NO_COLLECTIVE_CAUSE_NAME) */ H5FD_mpio_chunk_opt_t - mpio_chunk_opt_mode; /* Collective chunk option (H5D_XFER_MPIO_CHUNK_OPT_HARD_NAME) */ - unsigned mpio_chunk_opt_num; /* Collective chunk threshold (H5D_XFER_MPIO_CHUNK_OPT_NUM_NAME) */ - unsigned mpio_chunk_opt_ratio; /* Collective chunk ratio (H5D_XFER_MPIO_CHUNK_OPT_RATIO_NAME) */ -#endif /* H5_HAVE_PARALLEL */ - H5Z_EDC_t err_detect; /* Error detection info (H5D_XFER_EDC_NAME) */ - H5Z_cb_t filter_cb; /* Filter callback function (H5D_XFER_FILTER_CB_NAME) */ - H5Z_data_xform_t *data_transform; /* Data transform info (H5D_XFER_XFORM_NAME) */ - H5T_vlen_alloc_info_t vl_alloc_info; /* VL datatype alloc info (H5D_XFER_VLEN_*_NAME) */ - H5T_conv_cb_t dt_conv_cb; /* Datatype conversion struct (H5D_XFER_CONV_CB_NAME) */ + mpio_chunk_opt_mode; /* Collective chunk option (H5D_XFER_MPIO_CHUNK_OPT_HARD_NAME) */ + unsigned mpio_chunk_opt_num; /* Collective chunk threshold (H5D_XFER_MPIO_CHUNK_OPT_NUM_NAME) */ + unsigned mpio_chunk_opt_ratio; /* Collective chunk ratio (H5D_XFER_MPIO_CHUNK_OPT_RATIO_NAME) */ +#endif /* H5_HAVE_PARALLEL */ + H5Z_EDC_t err_detect; /* Error detection info (H5D_XFER_EDC_NAME) */ + H5Z_cb_t filter_cb; /* Filter callback function (H5D_XFER_FILTER_CB_NAME) */ + H5Z_data_xform_t *data_transform; /* Data transform info (H5D_XFER_XFORM_NAME) */ + H5T_vlen_alloc_info_t vl_alloc_info; /* VL datatype alloc info (H5D_XFER_VLEN_*_NAME) */ + H5T_conv_cb_t dt_conv_cb; /* Datatype conversion struct (H5D_XFER_CONV_CB_NAME) */ + H5D_selection_io_mode_t selection_io_mode; /* Selection I/O mode (H5D_XFER_SELECTION_IO_MODE_NAME) */ + uint32_t no_selection_io_cause; /* Reasons for not performing selection I/O + (H5D_XFER_NO_SELECTION_IO_CAUSE_NAME) */ + hbool_t modify_write_buf; /* Whether the library can modify write buffers */ } H5CX_dxpl_cache_t; /* Typedef for cached default link creation property list information */ @@ -566,6 +574,19 @@ H5CX_init(void) if (H5P_get(dx_plist, H5D_XFER_CONV_CB_NAME, &H5CX_def_dxpl_cache.dt_conv_cb) < 0) HGOTO_ERROR(H5E_CONTEXT, H5E_CANTGET, FAIL, "Can't retrieve datatype conversion exception callback") + /* Get the selection I/O mode */ + if (H5P_get(dx_plist, H5D_XFER_SELECTION_IO_MODE_NAME, &H5CX_def_dxpl_cache.selection_io_mode) < 0) + HGOTO_ERROR(H5E_CONTEXT, H5E_CANTGET, FAIL, "Can't retrieve parallel transfer method") + + /* Get the local & global reasons for breaking selection I/O values */ + if (H5P_get(dx_plist, H5D_XFER_NO_SELECTION_IO_CAUSE_NAME, &H5CX_def_dxpl_cache.no_selection_io_cause) < + 0) + HGOTO_ERROR(H5E_CONTEXT, H5E_CANTGET, FAIL, "Can't retrieve cause for no selection I/O") + + /* Get the modify write buffer property */ + if (H5P_get(dx_plist, H5D_XFER_MODIFY_WRITE_BUF_NAME, &H5CX_def_dxpl_cache.modify_write_buf) < 0) + HGOTO_ERROR(H5E_CONTEXT, H5E_CANTGET, FAIL, "Can't retrieve modify write buffer property") + /* Reset the "default LCPL cache" information */ HDmemset(&H5CX_def_lcpl_cache, 0, sizeof(H5CX_lcpl_cache_t)); @@ -2563,6 +2584,111 @@ H5CX_get_dt_conv_cb(H5T_conv_cb_t *dt_conv_cb) FUNC_LEAVE_NOAPI(ret_value) } /* end H5CX_get_dt_conv_cb() */ +/*------------------------------------------------------------------------- + * Function: H5CX_get_selection_io_mode + * + * Purpose: Retrieves the selection I/O mode for the current API call context. + * + * Return: Non-negative on success / Negative on failure + * + * Programmer: Vailin Choi + * March 5, 2023 + * + *------------------------------------------------------------------------- + */ +herr_t +H5CX_get_selection_io_mode(H5D_selection_io_mode_t *selection_io_mode) +{ + H5CX_node_t **head = NULL; /* Pointer to head of API context list */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity check */ + HDassert(selection_io_mode); + head = H5CX_get_my_context(); /* Get the pointer to the head of the API context, for this thread */ + HDassert(head && *head); + HDassert(H5P_DEFAULT != (*head)->ctx.dxpl_id); + + H5CX_RETRIEVE_PROP_VALID(dxpl, H5P_DATASET_XFER_DEFAULT, H5D_XFER_SELECTION_IO_MODE_NAME, + selection_io_mode) + + /* Get the value */ + *selection_io_mode = (*head)->ctx.selection_io_mode; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5CX_get_selection_io_mode() */ + +/*------------------------------------------------------------------------- + * Function: H5CX_get_no_selection_io_cause + * + * Purpose: Retrieves the cause for not performing selection I/O + * for the current API call context. + * + * Return: Non-negative on success / Negative on failure + * + * Programmer: Vailin Choi + * April 15, 2023 + * + *------------------------------------------------------------------------- + */ +herr_t +H5CX_get_no_selection_io_cause(uint32_t *no_selection_io_cause) +{ + H5CX_node_t **head = NULL; /* Pointer to head of API context list */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity check */ + HDassert(no_selection_io_cause); + head = H5CX_get_my_context(); /* Get the pointer to the head of the API context, for this thread */ + HDassert(head && *head); + HDassert(H5P_DEFAULT != (*head)->ctx.dxpl_id); + + H5CX_RETRIEVE_PROP_VALID_SET(dxpl, H5P_DATASET_XFER_DEFAULT, H5D_XFER_NO_SELECTION_IO_CAUSE_NAME, + no_selection_io_cause) + + /* Get the value */ + *no_selection_io_cause = (*head)->ctx.no_selection_io_cause; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5CX_get_no_selection_io_cause() */ + +/*------------------------------------------------------------------------- + * Function: H5CX_get_modify_write_buf + * + * Purpose: Retrieves the modify write buffer property for the current API call context. + * + * Return: Non-negative on success / Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +H5CX_get_modify_write_buf(hbool_t *modify_write_buf) +{ + H5CX_node_t **head = NULL; /* Pointer to head of API context list */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity check */ + HDassert(modify_write_buf); + head = H5CX_get_my_context(); /* Get the pointer to the head of the API context, for this thread */ + HDassert(head && *head); + HDassert(H5P_DEFAULT != (*head)->ctx.dxpl_id); + + H5CX_RETRIEVE_PROP_VALID(dxpl, H5P_DATASET_XFER_DEFAULT, H5D_XFER_MODIFY_WRITE_BUF_NAME, modify_write_buf) + + /* Get the value */ + *modify_write_buf = (*head)->ctx.modify_write_buf; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5CX_get_selection_io_mode() */ + /*------------------------------------------------------------------------- * Function: H5CX_get_encoding * @@ -3543,6 +3669,41 @@ H5CX_test_set_mpio_coll_rank0_bcast(hbool_t mpio_coll_rank0_bcast) #endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ #endif /* H5_HAVE_PARALLEL */ +/*------------------------------------------------------------------------- + * Function: H5CX_set_no_selecction_io_cause + * + * Purpose: Sets the reason for not performing selection I/O for + * the current API call context. + * + * Return: + * + * Programmer: Vailin Choi + * April 15, 2023 + * + *------------------------------------------------------------------------- + */ +void +H5CX_set_no_selection_io_cause(uint32_t no_selection_io_cause) +{ + H5CX_node_t **head = NULL; /* Pointer to head of API context list */ + + FUNC_ENTER_NOAPI_NOINIT_NOERR + + /* Sanity checks */ + head = H5CX_get_my_context(); /* Get the pointer to the head of the API context, for this thread */ + HDassert(head && *head); + HDassert((*head)->ctx.dxpl_id != H5P_DEFAULT); + + /* If we're using the default DXPL, don't modify it */ + if ((*head)->ctx.dxpl_id != H5P_DATASET_XFER_DEFAULT) { + /* Cache the value for later, marking it to set in DXPL when context popped */ + (*head)->ctx.no_selection_io_cause = no_selection_io_cause; + (*head)->ctx.no_selection_io_cause_set = TRUE; + } /* end if */ + + FUNC_LEAVE_NOAPI_VOID +} /* end H5CX_set_no_selectiion_io_cause() */ + /*------------------------------------------------------------------------- * Function: H5CX_get_ohdr_flags * @@ -3596,11 +3757,7 @@ H5CX__pop_common(hbool_t update_dxpl_props) H5CX_node_t **head = NULL; /* Pointer to head of API context list */ H5CX_node_t *ret_value = NULL; /* Return value */ -#ifdef H5_HAVE_PARALLEL FUNC_ENTER_PACKAGE -#else - FUNC_ENTER_PACKAGE_NOERR -#endif /* Sanity check */ head = H5CX_get_my_context(); /* Get the pointer to the head of the API context, for this thread */ @@ -3608,6 +3765,7 @@ H5CX__pop_common(hbool_t update_dxpl_props) /* Check for cached DXPL properties to return to application */ if (update_dxpl_props) { + H5CX_SET_PROP(H5D_XFER_NO_SELECTION_IO_CAUSE_NAME, no_selection_io_cause) #ifdef H5_HAVE_PARALLEL H5CX_SET_PROP(H5D_MPIO_ACTUAL_CHUNK_OPT_MODE_NAME, mpio_actual_chunk_opt) H5CX_SET_PROP(H5D_MPIO_ACTUAL_IO_MODE_NAME, mpio_actual_io_mode) @@ -3629,9 +3787,7 @@ H5CX__pop_common(hbool_t update_dxpl_props) ret_value = (*head); (*head) = (*head)->next; -#ifdef H5_HAVE_PARALLEL done: -#endif /* H5_HAVE_PARALLEL */ FUNC_LEAVE_NOAPI(ret_value) } /* end H5CX__pop_common() */ diff --git a/src/H5CXprivate.h b/src/H5CXprivate.h index 4c034c1a4d4..f0bec205da7 100644 --- a/src/H5CXprivate.h +++ b/src/H5CXprivate.h @@ -115,6 +115,9 @@ H5_DLL herr_t H5CX_get_filter_cb(H5Z_cb_t *filter_cb); H5_DLL herr_t H5CX_get_data_transform(H5Z_data_xform_t **data_transform); H5_DLL herr_t H5CX_get_vlen_alloc_info(H5T_vlen_alloc_info_t *vl_alloc_info); H5_DLL herr_t H5CX_get_dt_conv_cb(H5T_conv_cb_t *cb_struct); +H5_DLL herr_t H5CX_get_selection_io_mode(H5D_selection_io_mode_t *selection_io_mode); +H5_DLL herr_t H5CX_get_no_selection_io_cause(uint32_t *no_selection_io_cause); +H5_DLL herr_t H5CX_get_modify_write_buf(hbool_t *modify_write_buf); /* "Getter" routines for LCPL properties cached in API context */ H5_DLL herr_t H5CX_get_encoding(H5T_cset_t *encoding); @@ -158,6 +161,9 @@ H5_DLL herr_t H5CX_set_nlinks(size_t nlinks); H5_DLL herr_t H5CX_init(void); /* "Setter" routines for cached DXPL properties that must be returned to application */ + +H5_DLL void H5CX_set_no_selection_io_cause(uint32_t no_selection_io_cause); + #ifdef H5_HAVE_PARALLEL H5_DLL void H5CX_set_mpio_actual_chunk_opt(H5D_mpio_actual_chunk_opt_mode_t chunk_opt); H5_DLL void H5CX_set_mpio_actual_io_mode(H5D_mpio_actual_io_mode_t actual_io_mode); diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index 830560d454a..59577c3171b 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -289,21 +289,21 @@ static ssize_t H5D__nonexistent_readvv(const H5D_io_info_t *io_info, const H5D_d static int H5D__chunk_format_convert_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata); /* Helper routines */ -static herr_t H5D__chunk_set_info_real(H5O_layout_chunk_t *layout, unsigned ndims, const hsize_t *curr_dims, - const hsize_t *max_dims); -static herr_t H5D__chunk_cinfo_cache_reset(H5D_chunk_cached_t *last); -static herr_t H5D__chunk_cinfo_cache_update(H5D_chunk_cached_t *last, const H5D_chunk_ud_t *udata); -static hbool_t H5D__chunk_cinfo_cache_found(const H5D_chunk_cached_t *last, H5D_chunk_ud_t *udata); -static herr_t H5D__create_piece_map_single(H5D_dset_io_info_t *di, H5D_io_info_t *io_info); -static herr_t H5D__create_piece_file_map_all(H5D_dset_io_info_t *di, H5D_io_info_t *io_info); -static herr_t H5D__create_piece_file_map_hyper(H5D_dset_io_info_t *di, H5D_io_info_t *io_info); -static herr_t H5D__create_piece_mem_map_1d(const H5D_dset_io_info_t *di); -static herr_t H5D__create_piece_mem_map_hyper(const H5D_dset_io_info_t *di); -static herr_t H5D__piece_file_cb(void *elem, const H5T_t *type, unsigned ndims, const hsize_t *coords, +static herr_t H5D__chunk_set_info_real(H5O_layout_chunk_t *layout, unsigned ndims, const hsize_t *curr_dims, + const hsize_t *max_dims); +static herr_t H5D__chunk_cinfo_cache_reset(H5D_chunk_cached_t *last); +static herr_t H5D__chunk_cinfo_cache_update(H5D_chunk_cached_t *last, const H5D_chunk_ud_t *udata); +static hbool_t H5D__chunk_cinfo_cache_found(const H5D_chunk_cached_t *last, H5D_chunk_ud_t *udata); +static herr_t H5D__create_piece_map_single(H5D_dset_io_info_t *di, H5D_io_info_t *io_info); +static herr_t H5D__create_piece_file_map_all(H5D_dset_io_info_t *di, H5D_io_info_t *io_info); +static herr_t H5D__create_piece_file_map_hyper(H5D_dset_io_info_t *di, H5D_io_info_t *io_info); +static herr_t H5D__create_piece_mem_map_1d(const H5D_dset_io_info_t *di); +static herr_t H5D__create_piece_mem_map_hyper(const H5D_dset_io_info_t *di); +static herr_t H5D__piece_file_cb(void *elem, const H5T_t *type, unsigned ndims, const hsize_t *coords, + void *_opdata); +static herr_t H5D__piece_mem_cb(void *elem, const H5T_t *type, unsigned ndims, const hsize_t *coords, void *_opdata); -static herr_t H5D__piece_mem_cb(void *elem, const H5T_t *type, unsigned ndims, const hsize_t *coords, - void *_opdata); -static htri_t H5D__chunk_may_use_select_io(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset_info); +static herr_t H5D__chunk_may_use_select_io(H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset_info); static unsigned H5D__chunk_hash_val(const H5D_shared_t *shared, const hsize_t *scaled); static herr_t H5D__chunk_flush_entry(const H5D_t *dset, H5D_rdcc_ent_t *ent, hbool_t reset); static herr_t H5D__chunk_cache_evict(const H5D_t *dset, H5D_rdcc_ent_t *ent, hbool_t flush); @@ -1062,9 +1062,8 @@ H5D__chunk_io_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo) htri_t file_space_normalized = FALSE; /* File dataspace was normalized */ unsigned f_ndims; /* The number of dimensions of the file's dataspace */ int sm_ndims; /* The number of dimensions of the memory buffer's dataspace (signed) */ - htri_t use_selection_io = FALSE; /* Whether to use selection I/O */ - unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ + unsigned u; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -1114,10 +1113,29 @@ H5D__chunk_io_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo) /* Check if we're performing selection I/O and save the result if it hasn't * been disabled already */ - if (io_info->use_select_io) { - if ((use_selection_io = H5D__chunk_may_use_select_io(io_info, dinfo)) < 0) + if (io_info->use_select_io != H5D_SELECTION_IO_MODE_OFF) + if (H5D__chunk_may_use_select_io(io_info, dinfo) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't check if selection I/O is possible") - io_info->use_select_io = (hbool_t)use_selection_io; + + /* Calculate type conversion buffer size if necessary. Currently only implemented for selection I/O. */ + if (io_info->use_select_io != H5D_SELECTION_IO_MODE_OFF && + !(dinfo->type_info.is_xform_noop && dinfo->type_info.is_conv_noop)) { + H5SL_node_t *chunk_node; /* Current node in chunk skip list */ + + /* Iterate through nodes in chunk skip list */ + chunk_node = H5D_CHUNK_GET_FIRST_NODE(dinfo); + while (chunk_node) { + H5D_piece_info_t *piece_info; /* Chunk information */ + + /* Get the actual chunk information from the skip list node */ + piece_info = H5D_CHUNK_GET_NODE_INFO(dinfo, chunk_node); + + /* Handle type conversion buffer */ + H5D_INIT_PIECE_TCONV(io_info, dinfo, piece_info) + + /* Advance to next chunk in list */ + chunk_node = H5D_CHUNK_GET_NEXT_NODE(dinfo, chunk_node); + } } done: @@ -1571,6 +1589,10 @@ H5D__create_piece_map_single(H5D_dset_io_info_t *di, H5D_io_info_t *io_info) /* Indicate that the chunk's memory dataspace is shared */ piece_info->mspace_shared = TRUE; + /* Initialize in-place type conversion info. Start with it disabled. */ + piece_info->in_place_tconv = FALSE; + piece_info->buf_off = 0; + /* make connection to related dset info from this piece_info */ piece_info->dset_info = di; @@ -1700,6 +1722,10 @@ H5D__create_piece_file_map_all(H5D_dset_io_info_t *di, H5D_io_info_t *io_info) /* make connection to related dset info from this piece_info */ new_piece_info->dset_info = di; + /* Initialize in-place type conversion info. Start with it disabled. */ + new_piece_info->in_place_tconv = FALSE; + new_piece_info->buf_off = 0; + /* Insert the new chunk into the skip list */ if (H5SL_insert(fm->dset_sel_pieces, new_piece_info, &new_piece_info->index) < 0) { H5D__free_piece_info(new_piece_info, NULL, NULL); @@ -1896,6 +1922,10 @@ H5D__create_piece_file_map_hyper(H5D_dset_io_info_t *dinfo, H5D_io_info_t *io_in /* make connection to related dset info from this piece_info */ new_piece_info->dset_info = dinfo; + /* Initialize in-place type conversion info. Start with it disabled. */ + new_piece_info->in_place_tconv = FALSE; + new_piece_info->buf_off = 0; + /* Add piece to global piece_count */ io_info->piece_count++; @@ -2271,6 +2301,10 @@ H5D__piece_file_cb(void H5_ATTR_UNUSED *elem, const H5T_t H5_ATTR_UNUSED *type, H5MM_memcpy(piece_info->scaled, scaled, sizeof(hsize_t) * fm->f_ndims); piece_info->scaled[fm->f_ndims] = 0; + /* Initialize in-place type conversion info. Start with it disabled. */ + piece_info->in_place_tconv = FALSE; + piece_info->buf_off = 0; + /* Make connection to related dset info from this piece_info */ piece_info->dset_info = dinfo; @@ -2547,11 +2581,11 @@ H5D__chunk_cacheable(const H5D_io_info_t H5_ATTR_PARALLEL_USED *io_info, H5D_dse * *------------------------------------------------------------------------- */ -static htri_t -H5D__chunk_may_use_select_io(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset_info) +static herr_t +H5D__chunk_may_use_select_io(H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset_info) { - const H5D_t *dataset = NULL; /* Local pointer to dataset info */ - htri_t ret_value = FAIL; /* Return value */ + const H5D_t *dataset = NULL; /* Local pointer to dataset info */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -2562,20 +2596,22 @@ H5D__chunk_may_use_select_io(const H5D_io_info_t *io_info, const H5D_dset_io_inf dataset = dset_info->dset; HDassert(dataset); - /* Don't use selection I/O if it's globally disabled, there is a type - * conversion, or if there are filters on the dataset (for now) */ - if (dset_info->io_ops.single_read != H5D__select_read || dataset->shared->dcpl_cache.pline.nused > 0) - ret_value = FALSE; + /* Don't use selection I/O if there are filters on the dataset (for now) */ + if (dataset->shared->dcpl_cache.pline.nused > 0) { + io_info->use_select_io = H5D_SELECTION_IO_MODE_OFF; + io_info->no_selection_io_cause |= H5D_SEL_IO_DATASET_FILTER; + } else { hbool_t page_buf_enabled; - HDassert(dset_info->io_ops.single_write == H5D__select_write); - /* Check if the page buffer is enabled */ if (H5PB_enabled(io_info->f_sh, H5FD_MEM_DRAW, &page_buf_enabled) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't check if page buffer is enabled") - if (page_buf_enabled) - ret_value = FALSE; + if (page_buf_enabled) { + /* Note that page buffer is disabled in parallel */ + io_info->use_select_io = H5D_SELECTION_IO_MODE_OFF; + io_info->no_selection_io_cause |= H5D_SEL_IO_PAGE_BUFFER; + } else { /* Check if chunks in this dataset may be cached, if so don't use * selection I/O (for now). Note that chunks temporarily cached for @@ -2586,16 +2622,14 @@ H5D__chunk_may_use_select_io(const H5D_io_info_t *io_info, const H5D_dset_io_inf * must bypass the chunk-cache scheme because other MPI processes * could be writing to other elements in the same chunk. */ - if (io_info->using_mpi_vfd && (H5F_ACC_RDWR & H5F_INTENT(dataset->oloc.file))) - ret_value = TRUE; - else { + if (!(io_info->using_mpi_vfd && (H5F_ACC_RDWR & H5F_INTENT(dataset->oloc.file)))) { #endif /* H5_HAVE_PARALLEL */ /* Check if the chunk is too large to keep in the cache */ H5_CHECK_OVERFLOW(dataset->shared->layout.u.chunk.size, uint32_t, size_t); - if ((size_t)dataset->shared->layout.u.chunk.size > dataset->shared->cache.chunk.nbytes_max) - ret_value = TRUE; - else - ret_value = FALSE; + if ((size_t)dataset->shared->layout.u.chunk.size <= dataset->shared->cache.chunk.nbytes_max) { + io_info->use_select_io = H5D_SELECTION_IO_MODE_OFF; + io_info->no_selection_io_cause |= H5D_SEL_IO_CHUNK_CACHE; + } #ifdef H5_HAVE_PARALLEL } /* end else */ #endif /* H5_HAVE_PARALLEL */ @@ -2668,7 +2702,7 @@ H5D__chunk_read(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info) } /* Different blocks depending on whether we're using selection I/O */ - if (io_info->use_select_io) { + if (io_info->use_select_io == H5D_SELECTION_IO_MODE_ON) { size_t num_chunks; size_t element_sizes[2] = {dset_info->type_info.src_type_size, 0}; void *bufs[2] = {dset_info->buf.vp, NULL}; @@ -2749,6 +2783,8 @@ H5D__chunk_read(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info) io_info->addrs[io_info->pieces_added] = udata.chunk_block.offset; io_info->element_sizes[io_info->pieces_added] = element_sizes[0]; io_info->rbufs[io_info->pieces_added] = bufs[0]; + if (io_info->sel_pieces) + io_info->sel_pieces[io_info->pieces_added] = chunk_info; io_info->pieces_added++; } } /* end if */ @@ -2766,7 +2802,7 @@ H5D__chunk_read(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info) chunk_node = H5D_CHUNK_GET_NEXT_NODE(dset_info, chunk_node); } /* end while */ - /* Only perform I/O if not performing multi dataset I/O, otherwise the + /* Only perform I/O if not performing multi dataset I/O or type conversion, otherwise the * higher level will handle it after all datasets have been processed */ if (H5D_LAYOUT_CB_PERFORM_IO(io_info)) { /* Issue selection I/O call (we can skip the page buffer because we've @@ -2787,7 +2823,13 @@ H5D__chunk_read(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info) chunk_addrs = H5MM_xfree(chunk_addrs); } /* end if */ } /* end if */ - } /* end if */ + +#ifdef H5_HAVE_PARALLEL + /* Report that collective chunk I/O was used (will only be set on the DXPL if collective I/O was + * requested) */ + io_info->actual_io_mode |= H5D_MPIO_CHUNK_COLLECTIVE; +#endif /* H5_HAVE_PARALLEL */ + } /* end if */ else { H5D_io_info_t ctg_io_info; /* Contiguous I/O info object */ H5D_storage_t ctg_store; /* Chunk storage information as contiguous dataset */ @@ -2981,7 +3023,7 @@ H5D__chunk_write(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info) cpt_store.compact.dirty = &cpt_dirty; /* Different blocks depending on whether we're using selection I/O */ - if (io_info->use_select_io) { + if (io_info->use_select_io == H5D_SELECTION_IO_MODE_ON) { size_t num_chunks; size_t element_sizes[2] = {dset_info->type_info.dst_type_size, 0}; const void *bufs[2] = {dset_info->buf.cvp, NULL}; @@ -3139,6 +3181,8 @@ H5D__chunk_write(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info) io_info->addrs[io_info->pieces_added] = udata.chunk_block.offset; io_info->element_sizes[io_info->pieces_added] = element_sizes[0]; io_info->wbufs[io_info->pieces_added] = bufs[0]; + if (io_info->sel_pieces) + io_info->sel_pieces[io_info->pieces_added] = chunk_info; io_info->pieces_added++; } } /* end else */ @@ -3147,7 +3191,7 @@ H5D__chunk_write(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info) chunk_node = H5D_CHUNK_GET_NEXT_NODE(dset_info, chunk_node); } /* end while */ - /* Only perform I/O if not performing multi dataset I/O, otherwise the + /* Only perform I/O if not performing multi dataset I/O or type conversion, otherwise the * higher level will handle it after all datasets have been processed */ if (H5D_LAYOUT_CB_PERFORM_IO(io_info)) { /* Issue selection I/O call (we can skip the page buffer because we've @@ -3168,7 +3212,13 @@ H5D__chunk_write(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info) chunk_addrs = H5MM_xfree(chunk_addrs); } /* end if */ } /* end if */ - } /* end if */ + +#ifdef H5_HAVE_PARALLEL + /* Report that collective chunk I/O was used (will only be set on the DXPL if collective I/O was + * requested) */ + io_info->actual_io_mode |= H5D_MPIO_CHUNK_COLLECTIVE; +#endif /* H5_HAVE_PARALLEL */ + } /* end if */ else { /* Iterate through nodes in chunk skip list */ chunk_node = H5D_CHUNK_GET_FIRST_NODE(dset_info); diff --git a/src/H5Dcompact.c b/src/H5Dcompact.c index 9567c60cdb1..5f45da45ee7 100644 --- a/src/H5Dcompact.c +++ b/src/H5Dcompact.c @@ -252,7 +252,8 @@ H5D__compact_io_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo) dinfo->store->compact.dirty = &dinfo->dset->shared->layout.storage.u.compact.dirty; /* Disable selection I/O */ - io_info->use_select_io = FALSE; + io_info->use_select_io = H5D_SELECTION_IO_MODE_OFF; + io_info->no_selection_io_cause |= H5D_SEL_IO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; FUNC_LEAVE_NOAPI(SUCCEED) } /* end H5D__compact_io_init() */ diff --git a/src/H5Dcontig.c b/src/H5Dcontig.c index 3eddfff3b96..c8c7ee74414 100644 --- a/src/H5Dcontig.c +++ b/src/H5Dcontig.c @@ -106,7 +106,7 @@ static herr_t H5D__contig_io_term(H5D_io_info_t *io_info, H5D_dset_io_info_t *d /* Helper routines */ static herr_t H5D__contig_write_one(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info, hsize_t offset, size_t size); -static htri_t H5D__contig_may_use_select_io(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset_info, +static herr_t H5D__contig_may_use_select_io(H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset_info, H5D_io_op_type_t op_type); /*********************/ @@ -586,8 +586,7 @@ H5D__contig_io_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo) int sf_ndims; /* The number of dimensions of the file dataspace (signed) */ - htri_t use_selection_io = FALSE; /* Whether to use selection I/O */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -666,6 +665,16 @@ H5D__contig_io_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo) /* get dset file address for piece */ new_piece_info->faddr = dinfo->dset->shared->layout.storage.u.contig.addr; + /* Initialize in-place type conversion info. Start with it disabled. */ + new_piece_info->in_place_tconv = FALSE; + new_piece_info->buf_off = 0; + + /* Calculate type conversion buffer size and check for in-place conversion if necessary. Currently + * only implemented for selection I/O. */ + if (io_info->use_select_io != H5D_SELECTION_IO_MODE_OFF && + !(dinfo->type_info.is_xform_noop && dinfo->type_info.is_conv_noop)) + H5D_INIT_PIECE_TCONV(io_info, dinfo, new_piece_info) + /* Save piece to dataset info struct so it is freed at the end of the * operation */ dinfo->layout_io_info.contig_piece_info = new_piece_info; @@ -676,11 +685,9 @@ H5D__contig_io_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo) /* Check if we're performing selection I/O if it hasn't been disabled * already */ - if (io_info->use_select_io) { - if ((use_selection_io = H5D__contig_may_use_select_io(io_info, dinfo, H5D_IO_OP_READ)) < 0) + if (io_info->use_select_io != H5D_SELECTION_IO_MODE_OFF) + if (H5D__contig_may_use_select_io(io_info, dinfo, io_info->op_type) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't check if selection I/O is possible") - io_info->use_select_io = (hbool_t)use_selection_io; - } done: if (ret_value < 0) { @@ -740,12 +747,12 @@ H5D__contig_mdio_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo) * *------------------------------------------------------------------------- */ -static htri_t -H5D__contig_may_use_select_io(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset_info, +static herr_t +H5D__contig_may_use_select_io(H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset_info, H5D_io_op_type_t op_type) { - const H5D_t *dataset = NULL; /* Local pointer to dataset info */ - htri_t ret_value = FAIL; /* Return value */ + const H5D_t *dataset = NULL; /* Local pointer to dataset info */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -757,27 +764,33 @@ H5D__contig_may_use_select_io(const H5D_io_info_t *io_info, const H5D_dset_io_in dataset = dset_info->dset; - /* Don't use selection I/O if it's globally disabled, if there is a type - * conversion, or if it's not a contiguous dataset, or if the sieve buffer - * exists (write) or is dirty (read) */ - if (dset_info->io_ops.single_read != H5D__select_read || - dset_info->layout_ops.readvv != H5D__contig_readvv || - (op_type == H5D_IO_OP_READ && dataset->shared->cache.contig.sieve_dirty) || - (op_type == H5D_IO_OP_WRITE && dataset->shared->cache.contig.sieve_buf)) - ret_value = FALSE; + /* None of the reasons this function might disable selection I/O are relevant to parallel, so no need to + * update no_selection_io_cause since we're only keeping track of the reason for no selection I/O in + * parallel (for now) */ + + /* Don't use selection I/O if it's globally disabled, if it's not a contiguous dataset, or if the sieve + * buffer exists (write) or is dirty (read) */ + if (dset_info->layout_ops.readvv != H5D__contig_readvv) { + io_info->use_select_io = H5D_SELECTION_IO_MODE_OFF; + io_info->no_selection_io_cause |= H5D_SEL_IO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; + } + else if ((op_type == H5D_IO_OP_READ && dataset->shared->cache.contig.sieve_dirty) || + (op_type == H5D_IO_OP_WRITE && dataset->shared->cache.contig.sieve_buf)) { + io_info->use_select_io = H5D_SELECTION_IO_MODE_OFF; + io_info->no_selection_io_cause |= H5D_SEL_IO_CONTIGUOUS_SIEVE_BUFFER; + } else { hbool_t page_buf_enabled; - HDassert(dset_info->io_ops.single_write == H5D__select_write); HDassert(dset_info->layout_ops.writevv == H5D__contig_writevv); /* Check if the page buffer is enabled */ if (H5PB_enabled(io_info->f_sh, H5FD_MEM_DRAW, &page_buf_enabled) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't check if page buffer is enabled") - if (page_buf_enabled) - ret_value = FALSE; - else - ret_value = TRUE; + if (page_buf_enabled) { + io_info->use_select_io = H5D_SELECTION_IO_MODE_OFF; + io_info->no_selection_io_cause |= H5D_SEL_IO_PAGE_BUFFER; + } } /* end else */ done: @@ -810,9 +823,9 @@ H5D__contig_read(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo) HDassert(dinfo->mem_space); HDassert(dinfo->file_space); - if (io_info->use_select_io) { - /* Only perform I/O if not performing multi dataset I/O with selection - * I/O, otherwise the higher level will handle it after all datasets + if (io_info->use_select_io == H5D_SELECTION_IO_MODE_ON) { + /* Only perform I/O if not performing multi dataset I/O or type conversion, + * otherwise the higher level will handle it after all datasets * have been processed */ if (H5D_LAYOUT_CB_PERFORM_IO(io_info)) { size_t dst_type_size = dinfo->type_info.dst_type_size; @@ -841,10 +854,17 @@ H5D__contig_read(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo) io_info->addrs[io_info->pieces_added] = dinfo->store->contig.dset_addr; io_info->element_sizes[io_info->pieces_added] = dinfo->type_info.src_type_size; io_info->rbufs[io_info->pieces_added] = dinfo->buf.vp; + if (io_info->sel_pieces) + io_info->sel_pieces[io_info->pieces_added] = dinfo->layout_io_info.contig_piece_info; io_info->pieces_added++; } } - } /* end if */ + +#ifdef H5_HAVE_PARALLEL + /* Report that collective contiguous I/O was used */ + io_info->actual_io_mode |= H5D_MPIO_CONTIGUOUS_COLLECTIVE; +#endif /* H5_HAVE_PARALLEL */ + } /* end if */ else /* Read data through legacy (non-selection I/O) pathway */ if ((dinfo->io_ops.single_read)(io_info, dinfo) < 0) @@ -880,9 +900,9 @@ H5D__contig_write(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo) HDassert(dinfo->mem_space); HDassert(dinfo->file_space); - if (io_info->use_select_io) { - /* Only perform I/O if not performing multi dataset I/O with selection - * I/O, otherwise the higher level will handle it after all datasets + if (io_info->use_select_io == H5D_SELECTION_IO_MODE_ON) { + /* Only perform I/O if not performing multi dataset I/O or type conversion, + * otherwise the higher level will handle it after all datasets * have been processed */ if (H5D_LAYOUT_CB_PERFORM_IO(io_info)) { size_t dst_type_size = dinfo->type_info.dst_type_size; @@ -911,10 +931,17 @@ H5D__contig_write(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo) io_info->addrs[io_info->pieces_added] = dinfo->store->contig.dset_addr; io_info->element_sizes[io_info->pieces_added] = dinfo->type_info.dst_type_size; io_info->wbufs[io_info->pieces_added] = dinfo->buf.cvp; + if (io_info->sel_pieces) + io_info->sel_pieces[io_info->pieces_added] = dinfo->layout_io_info.contig_piece_info; io_info->pieces_added++; } } - } /* end if */ + +#ifdef H5_HAVE_PARALLEL + /* Report that collective contiguous I/O was used */ + io_info->actual_io_mode |= H5D_MPIO_CONTIGUOUS_COLLECTIVE; +#endif /* H5_HAVE_PARALLEL */ + } /* end if */ else /* Write data through legacy (non-selection I/O) pathway */ if ((dinfo->io_ops.single_write)(io_info, dinfo) < 0) diff --git a/src/H5Defl.c b/src/H5Defl.c index faa5e4def18..7509df74a56 100644 --- a/src/H5Defl.c +++ b/src/H5Defl.c @@ -213,7 +213,8 @@ H5D__efl_io_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo) H5MM_memcpy(&dinfo->store->efl, &(dinfo->dset->shared->dcpl_cache.efl), sizeof(H5O_efl_t)); /* Disable selection I/O */ - io_info->use_select_io = FALSE; + io_info->use_select_io = H5D_SELECTION_IO_MODE_OFF; + io_info->no_selection_io_cause |= H5D_SEL_IO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; FUNC_LEAVE_NOAPI(SUCCEED) } /* end H5D__efl_io_init() */ diff --git a/src/H5Dio.c b/src/H5Dio.c index 5ec0efe01e4..f6f743c62f9 100644 --- a/src/H5Dio.c +++ b/src/H5Dio.c @@ -43,14 +43,16 @@ /********************/ /* Setup/teardown routines */ -static herr_t H5D__ioinfo_init(size_t count, H5D_dset_io_info_t *dset_info, H5D_io_info_t *io_info); +static herr_t H5D__ioinfo_init(size_t count, H5D_io_op_type_t op_type, H5D_dset_io_info_t *dset_info, + H5D_io_info_t *io_info); static herr_t H5D__dset_ioinfo_init(H5D_t *dset, H5D_dset_io_info_t *dset_info, H5D_storage_t *store); static herr_t H5D__typeinfo_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info, hid_t mem_type_id); static herr_t H5D__typeinfo_init_phase2(H5D_io_info_t *io_info); +static herr_t H5D__typeinfo_init_phase3(H5D_io_info_t *io_info); #ifdef H5_HAVE_PARALLEL static herr_t H5D__ioinfo_adjust(H5D_io_info_t *io_info); #endif /* H5_HAVE_PARALLEL */ -static herr_t H5D__typeinfo_term(H5D_io_info_t *io_info, size_t type_info_init); +static herr_t H5D__typeinfo_term(H5D_io_info_t *io_info); /*********************/ /* Package Variables */ @@ -77,7 +79,6 @@ herr_t H5D__read(size_t count, H5D_dset_io_info_t *dset_info) { H5D_io_info_t io_info; /* Dataset I/O info for multi dsets */ - size_t type_info_init = 0; /* Number of datatype info structs that have been initialized */ H5S_t *orig_mem_space_local; /* Local buffer for orig_mem_space */ H5S_t **orig_mem_space = NULL; /* If not NULL, ptr to an array of dataspaces */ /* containing the original memory spaces contained */ @@ -107,9 +108,8 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info) FUNC_ENTER_NOAPI(FAIL) /* Init io_info */ - if (H5D__ioinfo_init(count, dset_info, &io_info) < 0) + if (H5D__ioinfo_init(count, H5D_IO_OP_READ, dset_info, &io_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize I/O info") - io_info.op_type = H5D_IO_OP_READ; /* Allocate store buffer if necessary */ if (count > 1) @@ -148,7 +148,6 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info) /* Set up datatype info for operation */ if (H5D__typeinfo_init(&io_info, &(dset_info[i]), dset_info[i].mem_type_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to set up type info") - type_info_init++; /* Make certain that the number of elements in each selection is the same, and cache nelmts in * dset_info */ @@ -285,7 +284,6 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info) } } /* end of for loop */ - HDassert(type_info_init == count); HDassert(io_op_init + io_skipped == count); /* If no datasets have I/O, we're done */ @@ -297,10 +295,17 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to set up type info (second phase)") #ifdef H5_HAVE_PARALLEL - /* Adjust I/O info for any parallel I/O */ + /* Adjust I/O info for any parallel or selection I/O */ if (H5D__ioinfo_adjust(&io_info) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to adjust I/O info for parallel I/O") -#endif /*H5_HAVE_PARALLEL*/ + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, + "unable to adjust I/O info for parallel or selection I/O") +#endif /* H5_HAVE_PARALLEL */ + + /* Perform third phase of type info initialization */ + if (H5D__typeinfo_init_phase3(&io_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to set up type info (third phase)") + + H5CX_set_no_selection_io_cause(io_info.no_selection_io_cause); /* If multi dataset I/O callback is not provided, perform read IO via * single-dset path with looping */ @@ -339,6 +344,7 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info) else { haddr_t prev_tag = HADDR_UNDEF; + /* Allocate selection I/O parameter arrays if necessary */ if (!H5D_LAYOUT_CB_PERFORM_IO(&io_info) && io_info.piece_count > 0) { if (NULL == (io_info.mem_spaces = H5MM_malloc(io_info.piece_count * sizeof(H5S_t *)))) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, @@ -354,6 +360,11 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info) if (NULL == (io_info.rbufs = H5MM_malloc(io_info.piece_count * sizeof(void *)))) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for read buffer list") + if (io_info.max_tconv_type_size > 0) + if (NULL == + (io_info.sel_pieces = H5MM_malloc(io_info.piece_count * sizeof(io_info.sel_pieces[0])))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, + "unable to allocate array of selected pieces") } /* Loop with serial & single-dset read IO path */ @@ -373,15 +384,39 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info) H5AC_tag(prev_tag, NULL); } - /* Make final multi dataset selection I/O call if we are using both - * features - in this case the multi_read callbacks did not perform the - * actual I/O */ - H5_CHECK_OVERFLOW(io_info.pieces_added, size_t, uint32_t) - if (!H5D_LAYOUT_CB_PERFORM_IO(&io_info)) - if (H5F_shared_select_read(io_info.f_sh, H5FD_MEM_DRAW, (uint32_t)io_info.pieces_added, - io_info.mem_spaces, io_info.file_spaces, io_info.addrs, - io_info.element_sizes, io_info.rbufs) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "selection read failed") + /* Make final selection I/O call if the multi_read callbacks did not perform the actual I/O + * (if using selection I/O and either multi dataset or type conversion) */ + if (!H5D_LAYOUT_CB_PERFORM_IO(&io_info)) { + /* Check for type conversion */ + if (io_info.max_tconv_type_size > 0) { + /* Type conversion pathway */ + if (H5D__scatgath_read_select(&io_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "type conversion selection read failed") + } + else { + /* Call selection I/O directly */ + H5_CHECK_OVERFLOW(io_info.pieces_added, size_t, uint32_t) + if (H5F_shared_select_read(io_info.f_sh, H5FD_MEM_DRAW, (uint32_t)io_info.pieces_added, + io_info.mem_spaces, io_info.file_spaces, io_info.addrs, + io_info.element_sizes, io_info.rbufs) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "selection read failed") + } + } + +#ifdef H5_HAVE_PARALLEL + /* Report the actual I/O mode to the application if appropriate */ + if (io_info.using_mpi_vfd) { + H5FD_mpio_xfer_t xfer_mode; /* Parallel transfer for this request */ + + /* Get the parallel I/O transfer mode */ + if (H5CX_get_io_xfer_mode(&xfer_mode) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get MPI-I/O transfer mode") + + /* Only report the collective I/O mode if we're actually performing collective I/O */ + if (xfer_mode == H5FD_MPIO_COLLECTIVE) + H5CX_set_mpio_actual_io_mode(io_info.actual_io_mode); + } +#endif /* H5_HAVE_PARALLEL */ } done: @@ -392,7 +427,7 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info) HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down I/O op info") /* Shut down datatype info for operation */ - if (H5D__typeinfo_term(&io_info, type_info_init) < 0) + if (H5D__typeinfo_term(&io_info) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down type info") /* Discard projected mem spaces and restore originals */ @@ -443,7 +478,6 @@ herr_t H5D__write(size_t count, H5D_dset_io_info_t *dset_info) { H5D_io_info_t io_info; /* Dataset I/O info for multi dsets */ - size_t type_info_init = 0; /* Number of datatype info structs that have been initialized */ H5S_t *orig_mem_space_local; /* Local buffer for orig_mem_space */ H5S_t **orig_mem_space = NULL; /* If not NULL, ptr to an array of dataspaces */ /* containing the original memory spaces contained */ @@ -471,9 +505,8 @@ H5D__write(size_t count, H5D_dset_io_info_t *dset_info) FUNC_ENTER_NOAPI(FAIL) /* Init io_info */ - if (H5D__ioinfo_init(count, dset_info, &io_info) < 0) + if (H5D__ioinfo_init(count, H5D_IO_OP_WRITE, dset_info, &io_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize I/O info") - io_info.op_type = H5D_IO_OP_WRITE; /* Allocate store buffer if necessary */ if (count > 1) @@ -509,9 +542,8 @@ H5D__write(size_t count, H5D_dset_io_info_t *dset_info) /* Set up datatype info for operation */ if (H5D__typeinfo_init(&io_info, &(dset_info[i]), dset_info[i].mem_type_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to set up type info") - type_info_init++; - /* Various MPI based checks */ + /* Various MPI based checks */ #ifdef H5_HAVE_PARALLEL if (H5F_HAS_FEATURE(dset_info[i].dset->oloc.file, H5FD_FEAT_HAS_MPI)) { /* If MPI based VFD is used, no VL or region reference datatype support yet. */ @@ -660,7 +692,6 @@ H5D__write(size_t count, H5D_dset_io_info_t *dset_info) H5AC_tag(prev_tag, NULL); } /* end of for loop */ - HDassert(type_info_init == count); HDassert(io_op_init == count); /* Perform second phase of type info initialization */ @@ -668,10 +699,17 @@ H5D__write(size_t count, H5D_dset_io_info_t *dset_info) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to set up type info (second phase)") #ifdef H5_HAVE_PARALLEL - /* Adjust I/O info for any parallel I/O */ + /* Adjust I/O info for any parallel or selection I/O */ if (H5D__ioinfo_adjust(&io_info) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to adjust I/O info for parallel I/O") -#endif /*H5_HAVE_PARALLEL*/ + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, + "unable to adjust I/O info for parallel or selection I/O") +#endif /* H5_HAVE_PARALLEL */ + + /* Perform third phase of type info initialization */ + if (H5D__typeinfo_init_phase3(&io_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to set up type info (third phase)") + + H5CX_set_no_selection_io_cause(io_info.no_selection_io_cause); /* If multi dataset I/O callback is not provided, perform write IO via * single-dset path with looping */ @@ -710,6 +748,7 @@ H5D__write(size_t count, H5D_dset_io_info_t *dset_info) else { haddr_t prev_tag = HADDR_UNDEF; + /* Allocate selection I/O parameter arrays if necessary */ if (!H5D_LAYOUT_CB_PERFORM_IO(&io_info) && io_info.piece_count > 0) { if (NULL == (io_info.mem_spaces = H5MM_malloc(io_info.piece_count * sizeof(H5S_t *)))) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, @@ -724,7 +763,12 @@ H5D__write(size_t count, H5D_dset_io_info_t *dset_info) "memory allocation failed for element size list") if (NULL == (io_info.wbufs = H5MM_malloc(io_info.piece_count * sizeof(const void *)))) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, - "memory allocation failed for read buffer list") + "memory allocation failed for write buffer list") + if (io_info.max_tconv_type_size > 0) + if (NULL == + (io_info.sel_pieces = H5MM_malloc(io_info.piece_count * sizeof(io_info.sel_pieces[0])))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, + "unable to allocate array of selected pieces") } /* loop with serial & single-dset write IO path */ @@ -742,15 +786,45 @@ H5D__write(size_t count, H5D_dset_io_info_t *dset_info) H5AC_tag(prev_tag, NULL); } - /* Make final multi dataset selection I/O call if we are using both - * features - in this case the multi_write callbacks did not perform the - * actual I/O */ - H5_CHECK_OVERFLOW(io_info.pieces_added, size_t, uint32_t) - if (!H5D_LAYOUT_CB_PERFORM_IO(&io_info)) - if (H5F_shared_select_write(io_info.f_sh, H5FD_MEM_DRAW, (uint32_t)io_info.pieces_added, - io_info.mem_spaces, io_info.file_spaces, io_info.addrs, - io_info.element_sizes, io_info.wbufs) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "selection write failed") + /* Make final selection I/O call if the multi_write callbacks did not perform the actual I/O + * (if using selection I/O and either multi dataset or type conversion) */ + if (!H5D_LAYOUT_CB_PERFORM_IO(&io_info)) { + /* Check for type conversion */ + if (io_info.max_tconv_type_size > 0) { + /* Type conversion pathway */ + if (H5D__scatgath_write_select(&io_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "type conversion selection write failed") + } + else { + /* Call selection I/O directly */ + H5_CHECK_OVERFLOW(io_info.pieces_added, size_t, uint32_t) + if (H5F_shared_select_write(io_info.f_sh, H5FD_MEM_DRAW, (uint32_t)io_info.pieces_added, + io_info.mem_spaces, io_info.file_spaces, io_info.addrs, + io_info.element_sizes, io_info.wbufs) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "selection write failed") + } + } + +#ifdef H5_HAVE_PARALLEL + /* Report the actual I/O mode to the application if appropriate */ + if (io_info.using_mpi_vfd) { + H5FD_mpio_xfer_t xfer_mode; /* Parallel transfer for this request */ + + /* Get the parallel I/O transfer mode */ + if (H5CX_get_io_xfer_mode(&xfer_mode) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get MPI-I/O transfer mode") + + /* Only report the collective I/O mode if we're actually performing collective I/O */ + if (xfer_mode == H5FD_MPIO_COLLECTIVE) { + H5CX_set_mpio_actual_io_mode(io_info.actual_io_mode); + + /* If we did selection I/O, report that we used "link chunk" mode, since that's the most + * analogous to what selection I/O does */ + if (io_info.use_select_io == H5D_SELECTION_IO_MODE_ON) + H5CX_set_mpio_actual_chunk_opt(H5D_MPIO_LINK_CHUNK); + } + } +#endif /* H5_HAVE_PARALLEL */ } #ifdef OLD_WAY @@ -781,7 +855,7 @@ H5D__write(size_t count, H5D_dset_io_info_t *dset_info) } /* Shut down datatype info for operation */ - if (H5D__typeinfo_term(&io_info, type_info_init) < 0) + if (H5D__typeinfo_term(&io_info) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down type info") /* Discard projected mem spaces and restore originals */ @@ -819,8 +893,11 @@ H5D__write(size_t count, H5D_dset_io_info_t *dset_info) *------------------------------------------------------------------------- */ static herr_t -H5D__ioinfo_init(size_t count, H5D_dset_io_info_t *dset_info, H5D_io_info_t *io_info) +H5D__ioinfo_init(size_t count, H5D_io_op_type_t op_type, H5D_dset_io_info_t *dset_info, + H5D_io_info_t *io_info) { + H5D_selection_io_mode_t selection_io_mode; + FUNC_ENTER_PACKAGE_NOERR /* check args */ @@ -833,8 +910,9 @@ H5D__ioinfo_init(size_t count, H5D_dset_io_info_t *dset_info, H5D_io_info_t *io_ HDmemset(io_info, 0, sizeof(*io_info)); /* Set up simple fields */ - io_info->f_sh = count > 0 ? H5F_SHARED(dset_info[0].dset->oloc.file) : NULL; - io_info->count = count; + io_info->op_type = op_type; + io_info->f_sh = count > 0 ? H5F_SHARED(dset_info[0].dset->oloc.file) : NULL; + io_info->count = count; /* Start without multi-dataset I/O ops. If we're not using the collective * I/O path then we will call the single dataset callbacks in a loop. */ @@ -842,16 +920,31 @@ H5D__ioinfo_init(size_t count, H5D_dset_io_info_t *dset_info, H5D_io_info_t *io_ /* Use provided dset_info */ io_info->dsets_info = dset_info; - /* Start with selection I/O on if the global is on, layout callback will - * turn it off if appropriate */ - io_info->use_select_io = H5_use_selection_io_g; + /* Start with selection I/O mode from property list. If enabled, layout callback will turn it off if it + * is not supported by the layout. Handling of H5D_SELECTION_IO_MODE_AUTO occurs in H5D__ioinfo_adjust. + */ + H5CX_get_selection_io_mode(&selection_io_mode); + io_info->use_select_io = selection_io_mode; + + /* Record no selection I/O cause if it was disabled by the API */ + if (selection_io_mode == H5D_SELECTION_IO_MODE_OFF) + io_info->no_selection_io_cause = H5D_SEL_IO_DISABLE_BY_API; #ifdef H5_HAVE_PARALLEL + /* Determine if the file was opened with an MPI VFD */ if (count > 0) io_info->using_mpi_vfd = H5F_HAS_FEATURE(dset_info[0].dset->oloc.file, H5FD_FEAT_HAS_MPI); #endif /* H5_HAVE_PARALLEL */ + /* Check if we could potentially use in-place type conversion */ + if (op_type == H5D_IO_OP_READ) + /* Always on for read (modulo other restrictions that are handled in layout callbacks) */ + io_info->may_use_in_place_tconv = TRUE; + else + /* Only enable in-place type conversion if we're allowed to modify the write buffer */ + H5CX_get_modify_write_buf(&io_info->may_use_in_place_tconv); + FUNC_LEAVE_NOAPI(SUCCEED) } /* end H5D__ioinfo_init() */ @@ -998,9 +1091,9 @@ H5D__typeinfo_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info, hid_t /* Check if the datatypes are compound subsets of one another */ type_info->cmpd_subset = H5T_path_compound_subset(type_info->tpath); - /* Update io_info->max_type_size */ - io_info->max_type_size = - MAX3(io_info->max_type_size, type_info->src_type_size, type_info->dst_type_size); + /* Update io_info->max_tconv_type_size */ + io_info->max_tconv_type_size = + MAX3(io_info->max_tconv_type_size, type_info->src_type_size, type_info->dst_type_size); /* Check if we need a background buffer */ if ((io_info->op_type == H5D_IO_OP_WRITE) && H5T_detect_class(dset->shared->type, H5T_VLEN, FALSE)) @@ -1025,8 +1118,12 @@ H5D__typeinfo_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info, hid_t /*------------------------------------------------------------------------- * Function: H5D__typeinfo_init_phase2 * - * Purpose: Finish initializing type info for all datasets after - * calculating the max type size across all datasets. + * Purpose: Continue initializing type info for all datasets after + * calculating the max type size across all datasets, and + * before final determination of collective/independent in + * H5D__ioinfo_adjust(). Currently just checks to see if + * selection I/O can be used with type conversion, and sets + * no_collective_cause flags related to selection I/O. * * Return: Non-negative on success/Negative on failure * @@ -1042,89 +1139,65 @@ H5D__typeinfo_init_phase2(H5D_io_info_t *io_info) /* check args */ HDassert(io_info); - /* Check if we need to allocate a shared type conversion buffer */ - if (io_info->max_type_size) { - void *tconv_buf; /* Temporary conversion buffer pointer */ - void *bkgr_buf; /* Background conversion buffer pointer */ + /* If selection I/O mode is default (auto), enable it here if the VFD supports it (it will be turned off + * later if something else conflicts), otherwise disable it. If we're using the MPIO VFD, the automatic + * selection will happen in H5D__mpio_opt_possible() inside H5D__ioinfo_adjust(). */ +#ifdef H5_HAVE_PARALLEL + if (!io_info->using_mpi_vfd) +#endif /* H5_HAVE_PARALLEL */ + if (io_info->use_select_io == H5D_SELECTION_IO_MODE_DEFAULT) { + if (H5F_has_vector_select_io(io_info->dsets_info[0].dset->oloc.file, + io_info->op_type == H5D_IO_OP_WRITE)) + io_info->use_select_io = H5D_SELECTION_IO_MODE_ON; + else { + io_info->use_select_io = H5D_SELECTION_IO_MODE_OFF; + io_info->no_selection_io_cause |= H5D_SEL_IO_DEFAULT_OFF; + } + } + + /* If we're doing type conversion and we might be doing selection I/O, check if the buffers are large + * enough to handle the whole I/O */ + if (io_info->max_tconv_type_size && io_info->use_select_io != H5D_SELECTION_IO_MODE_OFF) { size_t max_temp_buf; /* Maximum temporary buffer size */ - size_t target_size; /* Desired buffer size */ size_t i; /* Local index variable */ - /* Get info from API context */ - if (H5CX_get_max_temp_buf(&max_temp_buf) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't retrieve max. temp. buf size") - if (H5CX_get_tconv_buf(&tconv_buf) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't retrieve temp. conversion buffer pointer") - if (H5CX_get_bkgr_buf(&bkgr_buf) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't retrieve background conversion buffer pointer") - - /* Set up datatype conversion/background buffers */ - target_size = max_temp_buf; - - /* If the buffer is too small to hold even one element (in the dataset with the largest , try to make - * it bigger */ - if (target_size < io_info->max_type_size) { - hbool_t default_buffer_info; /* Whether the buffer information are the defaults */ - - /* Detect if we have all default settings for buffers */ - default_buffer_info = - (hbool_t)((H5D_TEMP_BUF_SIZE == max_temp_buf) && (NULL == tconv_buf) && (NULL == bkgr_buf)); + /* Collective I/O, conversion buffer must be large enough for entire I/O (for now) */ - /* Check if we are using the default buffer info */ - if (default_buffer_info) - /* OK to get bigger for library default settings */ - target_size = io_info->max_type_size; - else - /* Don't get bigger than the application has requested */ - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "temporary buffer max size is too small") - } /* end if */ - - /* Get a temporary buffer for type conversion unless the app has already - * supplied one through the xfer properties. Instead of allocating a - * buffer which is the exact size, we allocate the target size. This - * buffer is shared among all datasets in the operation, unlike for the - * background buffer, where each dataset gets its own. */ - if (NULL == (io_info->tconv_buf = (uint8_t *)tconv_buf)) { - /* Allocate temporary buffer */ - if (NULL == (io_info->tconv_buf = H5FL_BLK_MALLOC(type_conv, target_size))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for type conversion") - io_info->tconv_buf_allocated = TRUE; - } /* end if */ - - /* Don't use API provided background buffer if there's more than one dataset, since each - * dataset needs its own */ - if (io_info->count > 1) - bkgr_buf = NULL; - - /* Iterate over datasets */ + /* Calculate size of background buffer (tconv buf size was calculated in layout io_init callbacks) + */ for (i = 0; i < io_info->count; i++) { H5D_type_info_t *type_info = &io_info->dsets_info[i].type_info; - /* Compute the number of elements that will fit into buffer */ - type_info->request_nelmts = target_size / MAX(type_info->src_type_size, type_info->dst_type_size); - ; + /* Check for background buffer */ + if (type_info->need_bkg) { + /* Add size of this dataset's background buffer to the global background buffer size + */ + io_info->bkg_buf_size += io_info->dsets_info[i].nelmts * type_info->dst_type_size; - /* Sanity check elements in temporary buffer */ - if (type_info->request_nelmts == 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "temporary buffer max size is too small") - - /* Allocate background buffer if necessary */ - if (type_info->need_bkg && NULL == (type_info->bkg_buf = (uint8_t *)bkgr_buf)) { - size_t bkg_size; /* Desired background buffer size */ + /* Check if we need to fill the background buffer with the destination contents */ + if (type_info->need_bkg == H5T_BKG_YES) + io_info->must_fill_bkg = TRUE; + } + } - /* Compute the background buffer size */ - /* (don't try to use buffers smaller than the default size) */ - bkg_size = type_info->request_nelmts * type_info->dst_type_size; - if (bkg_size < max_temp_buf) - bkg_size = max_temp_buf; + /* Get max temp buffer size from API context */ + if (H5CX_get_max_temp_buf(&max_temp_buf) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't retrieve max. temp. buf size") - /* Allocate background buffer */ - /* (Need calloc()-like call since memory needs to be initialized) */ - if (NULL == (type_info->bkg_buf = H5FL_BLK_CALLOC(type_conv, bkg_size))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, - "memory allocation failed for background conversion") - type_info->bkg_buf_allocated = TRUE; - } /* end if */ + /* Check if the needed type conversion or background buffer size is too big */ + if (io_info->tconv_buf_size > max_temp_buf) { + io_info->use_select_io = H5D_SELECTION_IO_MODE_OFF; + io_info->no_selection_io_cause |= H5D_SEL_IO_TCONV_BUF_TOO_SMALL; + io_info->tconv_buf_size = 0; + io_info->bkg_buf_size = 0; + io_info->must_fill_bkg = FALSE; + } + if (io_info->bkg_buf_size > max_temp_buf) { + io_info->use_select_io = H5D_SELECTION_IO_MODE_OFF; + io_info->no_selection_io_cause |= H5D_SEL_IO_BKG_BUF_TOO_SMALL; + io_info->tconv_buf_size = 0; + io_info->bkg_buf_size = 0; + io_info->must_fill_bkg = FALSE; } } @@ -1133,15 +1206,13 @@ H5D__typeinfo_init_phase2(H5D_io_info_t *io_info) } /* end H5D__typeinfo_init_phase2() */ #ifdef H5_HAVE_PARALLEL - /*------------------------------------------------------------------------- - * Function: H5D__ioinfo_adjust - * - * Purpose: Adjust operation's I/O info for any parallel I/O + * Function: H5D__ioinfo_adjust * - * This was derived from H5D__ioinfo_adjust for multi-dset work. + * Purpose: Adjust operation's I/O info for any parallel I/O, also + * handle decision on selection I/O even in serial case * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1193,7 +1264,7 @@ H5D__ioinfo_adjust(H5D_io_info_t *io_info) * selection I/O is to be used - in this case the file driver will * handle collective I/O */ /* Check for selection/vector support in file driver? -NAF */ - if (!io_info->use_select_io) { + if (io_info->use_select_io == H5D_SELECTION_IO_MODE_OFF) { io_info->md_io_ops.multi_read_md = H5D__collective_read; io_info->md_io_ops.multi_write_md = H5D__collective_write; io_info->md_io_ops.single_read_md = H5D__mpio_select_read; @@ -1269,6 +1340,137 @@ H5D__ioinfo_adjust(H5D_io_info_t *io_info) } /* end H5D__ioinfo_adjust() */ #endif /* H5_HAVE_PARALLEL */ +/*------------------------------------------------------------------------- + * Function: H5D__typeinfo_init_phase3 + * + * Purpose: Finish initializing type info for all datasets after + * calculating the max type size across all datasets. And + * after final collective/independent determination in + * H5D__ioinfo_adjust(). + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__typeinfo_init_phase3(H5D_io_info_t *io_info) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* check args */ + HDassert(io_info); + + /* Check if we need to allocate a shared type conversion buffer */ + if (io_info->max_tconv_type_size) { + void *tconv_buf; /* Temporary conversion buffer pointer */ + void *bkgr_buf; /* Background conversion buffer pointer */ + + /* Get provided buffers from API context */ + if (H5CX_get_tconv_buf(&tconv_buf) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't retrieve temp. conversion buffer pointer") + if (H5CX_get_bkgr_buf(&bkgr_buf) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't retrieve background conversion buffer pointer") + + /* Check if we're doing selection I/O */ + if (io_info->use_select_io == H5D_SELECTION_IO_MODE_ON) { + /* Selection I/O, conversion buffers must be large enough for entire I/O (for now) */ + + /* Allocate global type conversion buffer (if any, could be none if datasets in this + * I/O have 0 elements selected) */ + /* Allocating large buffers here will blow out all other type conversion buffers + * on the free list. Should we change this to a regular malloc? Would require + * keeping track of which version of free() to call. -NAF */ + if (io_info->tconv_buf_size > 0) { + if (NULL == (io_info->tconv_buf = H5FL_BLK_MALLOC(type_conv, io_info->tconv_buf_size))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, + "memory allocation failed for type conversion") + io_info->tconv_buf_allocated = TRUE; + } + + /* Allocate global background buffer (if any) */ + if (io_info->bkg_buf_size > 0) { + if (NULL == (io_info->bkg_buf = H5FL_BLK_MALLOC(type_conv, io_info->bkg_buf_size))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, + "memory allocation failed for type conversion") + io_info->bkg_buf_allocated = TRUE; + } + } + else { + /* No selection I/O, only need to make sure it's big enough for one element */ + size_t max_temp_buf; /* Maximum temporary buffer size */ + size_t target_size; /* Desired buffer size */ + size_t i; + + /* Make sure selection I/O is disabled (DEFAULT should have been handled by now) */ + HDassert(io_info->use_select_io == H5D_SELECTION_IO_MODE_OFF); + + /* Get max buffer size from API context */ + if (H5CX_get_max_temp_buf(&max_temp_buf) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't retrieve max. temp. buf size") + + /* Set up datatype conversion/background buffers */ + target_size = max_temp_buf; + + /* If the buffer is too small to hold even one element (in the dataset with the largest , try to + * make it bigger */ + if (target_size < io_info->max_tconv_type_size) { + hbool_t default_buffer_info; /* Whether the buffer information are the defaults */ + + /* Detect if we have all default settings for buffers */ + default_buffer_info = (hbool_t)((H5D_TEMP_BUF_SIZE == max_temp_buf) && (NULL == tconv_buf) && + (NULL == bkgr_buf)); + + /* Check if we are using the default buffer info */ + if (default_buffer_info) + /* OK to get bigger for library default settings */ + target_size = io_info->max_tconv_type_size; + else + /* Don't get bigger than the application has requested */ + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "temporary buffer max size is too small") + } /* end if */ + + /* Get a temporary buffer for type conversion unless the app has already + * supplied one through the xfer properties. Instead of allocating a + * buffer which is the exact size, we allocate the target size. This + * buffer is shared among all datasets in the operation. */ + if (NULL == (io_info->tconv_buf = (uint8_t *)tconv_buf)) { + /* Allocate temporary buffer */ + if (NULL == (io_info->tconv_buf = H5FL_BLK_MALLOC(type_conv, target_size))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, + "memory allocation failed for type conversion") + io_info->tconv_buf_allocated = TRUE; + } /* end if */ + + /* Iterate over datasets */ + for (i = 0; i < io_info->count; i++) { + H5D_type_info_t *type_info = &io_info->dsets_info[i].type_info; + + /* Compute the number of elements that will fit into buffer */ + type_info->request_nelmts = + target_size / MAX(type_info->src_type_size, type_info->dst_type_size); + + /* Check if we need a background buffer and one hasn't been allocated yet */ + if (type_info->need_bkg && (NULL == io_info->bkg_buf) && + (NULL == (io_info->bkg_buf = (uint8_t *)bkgr_buf))) { + /* Allocate background buffer with the same size as the type conversion buffer. We can do + * this since the number of elements that fit in the type conversion buffer will never be + * larger than the number that could fit in a background buffer of equal size, since the + * tconv element size is max(src, dst) and the bkg element size is dst */ + if (NULL == (io_info->bkg_buf = H5FL_BLK_MALLOC(type_conv, target_size))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, + "memory allocation failed for background conversion") + io_info->bkg_buf_allocated = TRUE; + } + } + } + } + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__typeinfo_init_phase3() */ + /*------------------------------------------------------------------------- * Function: H5D__typeinfo_term * @@ -1282,10 +1484,8 @@ H5D__ioinfo_adjust(H5D_io_info_t *io_info) *------------------------------------------------------------------------- */ static herr_t -H5D__typeinfo_term(H5D_io_info_t *io_info, size_t type_info_init) +H5D__typeinfo_term(H5D_io_info_t *io_info) { - size_t i; - FUNC_ENTER_PACKAGE_NOERR /* Check for releasing datatype conversion & background buffers */ @@ -1293,11 +1493,10 @@ H5D__typeinfo_term(H5D_io_info_t *io_info, size_t type_info_init) HDassert(io_info->tconv_buf); (void)H5FL_BLK_FREE(type_conv, io_info->tconv_buf); } /* end if */ - for (i = 0; i < type_info_init; i++) - if (io_info->dsets_info[i].type_info.bkg_buf_allocated) { - HDassert(io_info->dsets_info[i].type_info.bkg_buf); - (void)H5FL_BLK_FREE(type_conv, io_info->dsets_info[i].type_info.bkg_buf); - } /* end if */ + if (io_info->bkg_buf_allocated) { + HDassert(io_info->bkg_buf); + (void)H5FL_BLK_FREE(type_conv, io_info->bkg_buf); + } /* end if */ FUNC_LEAVE_NOAPI(SUCCEED) } /* end H5D__typeinfo_term() */ diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c index 8e413c14bad..77edfc4625d 100644 --- a/src/H5Dmpio.c +++ b/src/H5Dmpio.c @@ -626,13 +626,17 @@ H5D__mpio_opt_possible(H5D_io_info_t *io_info) if (!H5FD_mpi_opt_types_g) local_cause[0] |= H5D_MPIO_MPI_OPT_TYPES_ENV_VAR_DISABLED; - /* Don't allow collective operations if datatype conversions need to happen */ - if (!type_info->is_conv_noop) - local_cause[0] |= H5D_MPIO_DATATYPE_CONVERSION; - - /* Don't allow collective operations if data transform operations should occur */ - if (!type_info->is_xform_noop) - local_cause[0] |= H5D_MPIO_DATA_TRANSFORMS; + /* Datatype conversions and transformations are allowed with selection I/O. If the selection I/O mode + * is auto (default), disable collective for now and re-enable later if we can */ + if (io_info->use_select_io != H5D_SELECTION_IO_MODE_ON) { + /* Don't allow collective operations if datatype conversions need to happen */ + if (!type_info->is_conv_noop) + local_cause[0] |= H5D_MPIO_DATATYPE_CONVERSION; + + /* Don't allow collective operations if data transform operations should occur */ + if (!type_info->is_xform_noop) + local_cause[0] |= H5D_MPIO_DATA_TRANSFORMS; + } /* Check whether these are both simple or scalar dataspaces */ if (!((H5S_SIMPLE == H5S_GET_EXTENT_TYPE(mem_space) || @@ -662,6 +666,15 @@ H5D__mpio_opt_possible(H5D_io_info_t *io_info) local_cause[0] |= H5D_MPIO_PARALLEL_FILTERED_WRITES_DISABLED; #endif + /* Check if we would be able to perform collective if we could use selection I/O. If so add reasons + * for not using selection I/O to local_cause[0] */ + if ((io_info->use_select_io == H5D_SELECTION_IO_MODE_OFF) && local_cause[0] && + !(local_cause[0] & + ~((unsigned)H5D_MPIO_DATATYPE_CONVERSION | (unsigned)H5D_MPIO_DATA_TRANSFORMS))) { + HDassert(io_info->no_selection_io_cause & H5D_MPIO_NO_SELECTION_IO_CAUSES); + local_cause[0] |= H5D_MPIO_NO_SELECTION_IO; + } + /* Check if we are able to do a MPI_Bcast of the data from one rank * instead of having all the processes involved in the collective I/O call. */ @@ -722,6 +735,25 @@ H5D__mpio_opt_possible(H5D_io_info_t *io_info) HMPI_GOTO_ERROR(FAIL, "MPI_Allreduce failed", mpi_code) } /* end else */ + /* If the selection I/O mode is default (auto), decide here whether it should be on or off */ + if (io_info->use_select_io == H5D_SELECTION_IO_MODE_DEFAULT) { + /* If the only reason(s) we've disabled collective are type conversions and/or transforms, enable + * selection I/O and re-enable collective I/O since it's supported by selection I/O */ + if (global_cause[0] && !(global_cause[0] & ~((unsigned)H5D_MPIO_DATATYPE_CONVERSION | + (unsigned)H5D_MPIO_DATA_TRANSFORMS))) { + HDassert(!(local_cause[0] & + ~((unsigned)H5D_MPIO_DATATYPE_CONVERSION | (unsigned)H5D_MPIO_DATA_TRANSFORMS))); + local_cause[0] = 0; + global_cause[0] = 0; + io_info->use_select_io = H5D_SELECTION_IO_MODE_ON; + } + else { + /* Otherwise, there's currently no benefit to selection I/O, so leave it off */ + io_info->use_select_io = H5D_SELECTION_IO_MODE_OFF; + io_info->no_selection_io_cause |= H5D_SEL_IO_DEFAULT_OFF; + } + } + /* Set the local & global values of no-collective-cause in the API context */ H5CX_set_mpio_local_no_coll_cause(local_cause[0]); H5CX_set_mpio_global_no_coll_cause(global_cause[0]); @@ -774,7 +806,7 @@ H5D__mpio_get_no_coll_cause_strings(char *local_cause, size_t local_cause_len, c * Use compile-time assertion so this routine is updated * when any new "no collective cause" values are added */ - HDcompile_assert(H5D_MPIO_NO_COLLECTIVE_MAX_CAUSE == (H5D_mpio_no_collective_cause_t)256); + HDcompile_assert(H5D_MPIO_NO_COLLECTIVE_MAX_CAUSE == (H5D_mpio_no_collective_cause_t)0x200); /* Initialize output buffers */ if (local_cause) @@ -827,6 +859,11 @@ H5D__mpio_get_no_coll_cause_strings(char *local_cause, size_t local_cause_len, c case H5D_MPIO_ERROR_WHILE_CHECKING_COLLECTIVE_POSSIBLE: cause_str = "an error occurred while checking if collective I/O was possible"; break; + case H5D_MPIO_NO_SELECTION_IO: + cause_str = "collective I/O may be supported by selection or vector I/O but that feature was " + "not possible (see causes via H5Pget_no_selection_io_cause())"; + break; + case H5D_MPIO_COLLECTIVE: case H5D_MPIO_NO_COLLECTIVE_MAX_CAUSE: default: @@ -1438,13 +1475,8 @@ H5D__link_piece_collective_io(H5D_io_info_t *io_info, int H5_ATTR_UNUSED mpi_ran HDassert(io_info->dsets_info[i].dset->shared->dcpl_cache.pline.nused == 0); if (io_info->dsets_info[i].layout->type == H5D_CHUNKED) actual_io_mode |= H5D_MPIO_CHUNK_COLLECTIVE; - else if (io_info->dsets_info[i].layout->type == H5D_CONTIGUOUS) { + else if (io_info->dsets_info[i].layout->type == H5D_CONTIGUOUS) actual_io_mode |= H5D_MPIO_CONTIGUOUS_COLLECTIVE; - - /* if only single-dset */ - if (1 == io_info->count) - actual_chunk_opt_mode = H5D_MPIO_NO_CHUNK_OPTIMIZATION; - } else HGOTO_ERROR(H5E_IO, H5E_UNSUPPORTED, FAIL, "unsupported storage layout") } diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h index 0906028d33c..4a23fe7a643 100644 --- a/src/H5Dpkg.h +++ b/src/H5Dpkg.h @@ -67,7 +67,49 @@ #define H5D_BT2_MERGE_PERC 40 /* Macro to determine if the layout I/O callback should perform I/O */ -#define H5D_LAYOUT_CB_PERFORM_IO(IO_INFO) (!(IO_INFO)->use_select_io || (IO_INFO)->count == 1) +#define H5D_LAYOUT_CB_PERFORM_IO(IO_INFO) \ + (((IO_INFO)->use_select_io == H5D_SELECTION_IO_MODE_OFF) || \ + ((IO_INFO)->count == 1 && (IO_INFO)->max_tconv_type_size == 0)) + +/* Macro to check if in-place type conversion will be used for a piece and add it to the global type + * conversion size if it won't be used */ +#define H5D_INIT_PIECE_TCONV(IO_INFO, DINFO, PIECE_INFO) \ + { \ + /* Check for potential in-place conversion */ \ + if ((IO_INFO)->may_use_in_place_tconv) { \ + size_t mem_type_size = ((IO_INFO)->op_type == H5D_IO_OP_READ) \ + ? (DINFO)->type_info.dst_type_size \ + : (DINFO)->type_info.src_type_size; \ + size_t file_type_size = ((IO_INFO)->op_type == H5D_IO_OP_READ) \ + ? (DINFO)->type_info.src_type_size \ + : (DINFO)->type_info.dst_type_size; \ + \ + /* Make sure the memory type is not smaller than the file type, otherwise the memory buffer \ + * won't be big enough to serve as the type conversion buffer */ \ + if (mem_type_size >= file_type_size) { \ + hbool_t is_contig; \ + hsize_t sel_off; \ + \ + /* Check if the space is contiguous */ \ + if (H5S_select_contig_block((PIECE_INFO)->mspace, &is_contig, &sel_off, NULL) < 0) \ + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't check if dataspace is contiguous") \ + \ + /* If the first sequence includes all the elements selected in this piece, it it contiguous \ + */ \ + if (is_contig) { \ + H5_CHECK_OVERFLOW(sel_off, hsize_t, size_t); \ + (PIECE_INFO)->in_place_tconv = TRUE; \ + (PIECE_INFO)->buf_off = (size_t)sel_off * mem_type_size; \ + } \ + } \ + } \ + \ + /* If we're not using in-place type conversion, add this piece to global type conversion buffer \ + * size. This will only be used if we must allocate a type conversion buffer for the entire I/O. */ \ + if (!(PIECE_INFO)->in_place_tconv) \ + (IO_INFO)->tconv_buf_size += (PIECE_INFO)->piece_points * MAX((DINFO)->type_info.src_type_size, \ + (DINFO)->type_info.dst_type_size); \ + } /****************************/ /* Package Private Typedefs */ @@ -83,15 +125,13 @@ typedef struct H5D_type_info_t { hid_t dst_type_id; /* Destination datatype ID */ /* Computed/derived values */ - size_t src_type_size; /* Size of source type */ - size_t dst_type_size; /* Size of destination type */ - hbool_t is_conv_noop; /* Whether the type conversion is a NOOP */ - hbool_t is_xform_noop; /* Whether the data transform is a NOOP */ - const H5T_subset_info_t *cmpd_subset; /* Info related to the compound subset conversion functions */ - H5T_bkg_t need_bkg; /* Type of background buf needed */ - size_t request_nelmts; /* Requested strip mine */ - uint8_t *bkg_buf; /* Background buffer */ - hbool_t bkg_buf_allocated; /* Whether the background buffer was allocated */ + size_t src_type_size; /* Size of source type */ + size_t dst_type_size; /* Size of destination type */ + hbool_t is_conv_noop; /* Whether the type conversion is a NOOP */ + hbool_t is_xform_noop; /* Whether the data transform is a NOOP */ + const H5T_subset_info_t *cmpd_subset; /* Info related to the compound subset conversion functions */ + H5T_bkg_t need_bkg; /* Type of background buf needed */ + size_t request_nelmts; /* Requested strip mine */ } H5D_type_info_t; /* Forward declaration of structs used below */ @@ -209,9 +249,11 @@ typedef struct H5D_piece_info_t { hsize_t piece_points; /* Number of elements selected in piece */ hsize_t scaled[H5O_LAYOUT_NDIMS]; /* Scaled coordinates of chunk (in file dataset's dataspace) */ H5S_t *fspace; /* Dataspace describing chunk & selection in it */ - unsigned fspace_shared; /* Indicate that the file space for a chunk is shared and shouldn't be freed */ - H5S_t *mspace; /* Dataspace describing selection in memory corresponding to this chunk */ - unsigned mspace_shared; /* Indicate that the memory space for a chunk is shared and shouldn't be freed */ + unsigned fspace_shared; /* Indicate that the file space for a chunk is shared and shouldn't be freed */ + H5S_t *mspace; /* Dataspace describing selection in memory corresponding to this chunk */ + unsigned mspace_shared; /* Indicate that the memory space for a chunk is shared and shouldn't be freed */ + hbool_t in_place_tconv; /* Whether to perform type conversion in-place */ + size_t buf_off; /* Buffer offset for in-place type conversion */ struct H5D_dset_io_info_t *dset_info; /* Pointer to dset_info */ } H5D_piece_info_t; @@ -263,10 +305,23 @@ typedef struct H5D_io_info_t { const void **wbufs; /* Array of write buffers */ haddr_t store_faddr; /* lowest file addr for read/write */ H5_flexible_const_ptr_t base_maddr; /* starting mem address */ - hbool_t use_select_io; /* Whether to use selection I/O */ + H5D_selection_io_mode_t use_select_io; /* Whether to use selection I/O */ uint8_t *tconv_buf; /* Datatype conv buffer */ hbool_t tconv_buf_allocated; /* Whether the type conversion buffer was allocated */ - size_t max_type_size; /* Largest of all source and destination type sizes */ + size_t tconv_buf_size; /* Size of type conversion buffer */ + uint8_t *bkg_buf; /* Background buffer */ + hbool_t bkg_buf_allocated; /* Whether the background buffer was allocated */ + size_t bkg_buf_size; /* Size of background buffer */ + size_t max_tconv_type_size; /* Largest of all source and destination type sizes involved in type + conversion */ + hbool_t + must_fill_bkg; /* Whether any datasets need a background buffer filled with destination contents */ + hbool_t may_use_in_place_tconv; /* Whether datasets in this I/O could potentially use in-place type + conversion if the type sizes are compatible with it */ +#ifdef H5_HAVE_PARALLEL + H5D_mpio_actual_io_mode_t actual_io_mode; /* Actual type of collective or independent I/O */ +#endif /* H5_HAVE_PARALLEL */ + unsigned no_selection_io_cause; /* "No selection I/O cause" flags */ } H5D_io_info_t; /* Created to pass both at once for callback func */ @@ -620,12 +675,14 @@ H5_DLL herr_t H5D__select_write(const H5D_io_info_t *io_info, const H5D_dset_io_ H5_DLL herr_t H5D_select_io_mem(void *dst_buf, H5S_t *dst_space, const void *src_buf, H5S_t *src_space, size_t elmt_size, size_t nelmts); -/* Functions that perform scatter-gather serial I/O operations */ +/* Functions that perform scatter-gather I/O operations */ H5_DLL herr_t H5D__scatter_mem(const void *_tscat_buf, H5S_sel_iter_t *iter, size_t nelmts, void *_buf); H5_DLL size_t H5D__gather_mem(const void *_buf, H5S_sel_iter_t *iter, size_t nelmts, void *_tgath_buf /*out*/); H5_DLL herr_t H5D__scatgath_read(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset_info); H5_DLL herr_t H5D__scatgath_write(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset_info); +H5_DLL herr_t H5D__scatgath_read_select(H5D_io_info_t *io_info); +H5_DLL herr_t H5D__scatgath_write_select(H5D_io_info_t *io_info); /* Functions that operate on dataset's layout information */ H5_DLL herr_t H5D__layout_set_io_ops(const H5D_t *dataset); diff --git a/src/H5Dprivate.h b/src/H5Dprivate.h index 357945b7c4c..8265ac25219 100644 --- a/src/H5Dprivate.h +++ b/src/H5Dprivate.h @@ -78,12 +78,15 @@ #define H5D_MPIO_LOCAL_NO_COLLECTIVE_CAUSE_NAME \ "local_no_collective_cause" /* cause of broken collective I/O in each process */ #define H5D_MPIO_GLOBAL_NO_COLLECTIVE_CAUSE_NAME \ - "global_no_collective_cause" /* cause of broken collective I/O in all processes */ -#define H5D_XFER_EDC_NAME "err_detect" /* EDC */ -#define H5D_XFER_FILTER_CB_NAME "filter_cb" /* Filter callback function */ -#define H5D_XFER_CONV_CB_NAME "type_conv_cb" /* Type conversion callback function */ -#define H5D_XFER_XFORM_NAME "data_transform" /* Data transform */ -#define H5D_XFER_DSET_IO_SEL_NAME "dset_io_selection" /* Dataset I/O selection */ + "global_no_collective_cause" /* cause of broken collective I/O in all processes */ +#define H5D_XFER_EDC_NAME "err_detect" /* EDC */ +#define H5D_XFER_FILTER_CB_NAME "filter_cb" /* Filter callback function */ +#define H5D_XFER_CONV_CB_NAME "type_conv_cb" /* Type conversion callback function */ +#define H5D_XFER_XFORM_NAME "data_transform" /* Data transform */ +#define H5D_XFER_DSET_IO_SEL_NAME "dset_io_selection" /* Dataset I/O selection */ +#define H5D_XFER_SELECTION_IO_MODE_NAME "selection_io_mode" /* Selection I/O mode */ +#define H5D_XFER_NO_SELECTION_IO_CAUSE_NAME "no_selection_io_cause" /* Cause for no selection I/O */ +#define H5D_XFER_MODIFY_WRITE_BUF_NAME "modify_write_buf" /* Modify write buffers */ #ifdef H5_HAVE_INSTRUMENTED_LIBRARY /* Collective chunk instrumentation properties */ #define H5D_XFER_COLL_CHUNK_LINK_HARD_NAME "coll_chunk_link_hard" diff --git a/src/H5Dscatgath.c b/src/H5Dscatgath.c index 2547bdcc8fb..802544cfc99 100644 --- a/src/H5Dscatgath.c +++ b/src/H5Dscatgath.c @@ -31,6 +31,11 @@ /* Local Macros */ /****************/ +/* Macro to determine if we're using H5D__compound_opt_read() */ +#define H5D__SCATGATH_USE_CMPD_OPT_READ(DSET_INFO, PIECE_INFO) \ + ((DSET_INFO)->type_info.cmpd_subset && H5T_SUBSET_FALSE != (DSET_INFO)->type_info.cmpd_subset->subset && \ + !(PIECE_INFO)->in_place_tconv) + /******************/ /* Local Typedefs */ /******************/ @@ -529,7 +534,7 @@ H5D__scatgath_read(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset_ } /* end if */ else { if (H5T_BKG_YES == dset_info->type_info.need_bkg) { - n = H5D__gather_mem(buf, bkg_iter, smine_nelmts, dset_info->type_info.bkg_buf /*out*/); + n = H5D__gather_mem(buf, bkg_iter, smine_nelmts, io_info->bkg_buf /*out*/); if (n != smine_nelmts) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "mem gather failed") } /* end if */ @@ -539,7 +544,7 @@ H5D__scatgath_read(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset_ */ if (H5T_convert(dset_info->type_info.tpath, dset_info->type_info.src_type_id, dset_info->type_info.dst_type_id, smine_nelmts, (size_t)0, (size_t)0, - io_info->tconv_buf, dset_info->type_info.bkg_buf) < 0) + io_info->tconv_buf, io_info->bkg_buf) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, FAIL, "datatype conversion failed") /* Do the data transform after the conversion (since we're using type mem_type) */ @@ -672,8 +677,7 @@ H5D__scatgath_write(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset } /* end if */ else { if (H5T_BKG_YES == dset_info->type_info.need_bkg) { - n = H5D__gather_file(io_info, dset_info, bkg_iter, smine_nelmts, - dset_info->type_info.bkg_buf /*out*/); + n = H5D__gather_file(io_info, dset_info, bkg_iter, smine_nelmts, io_info->bkg_buf /*out*/); if (n != smine_nelmts) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "file gather failed") } /* end if */ @@ -697,7 +701,7 @@ H5D__scatgath_write(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset */ if (H5T_convert(dset_info->type_info.tpath, dset_info->type_info.src_type_id, dset_info->type_info.dst_type_id, smine_nelmts, (size_t)0, (size_t)0, - io_info->tconv_buf, dset_info->type_info.bkg_buf) < 0) + io_info->tconv_buf, io_info->bkg_buf) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, FAIL, "datatype conversion failed") } /* end else */ @@ -726,6 +730,559 @@ H5D__scatgath_write(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__scatgath_write() */ +/*------------------------------------------------------------------------- + * Function: H5D__scatgath_read_select + * + * Purpose: Perform scatter/gather read from a list of dataset pieces + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +H5D__scatgath_read_select(H5D_io_info_t *io_info) +{ + H5S_t **tmp_mem_spaces = NULL; /* Memory spaces to use for read from disk */ + H5S_sel_iter_t *mem_iter = NULL; /* Memory selection iteration info */ + hbool_t mem_iter_init = FALSE; /* Memory selection iteration info has been initialized */ + void **tmp_bufs = NULL; /* Buffers to use for read from disk */ + void *tmp_bkg_buf = NULL; /* Temporary background buffer pointer */ + size_t tconv_bytes_used = 0; /* Number of bytes used so far in conversion buffer */ + size_t bkg_bytes_used = 0; /* Number of bytes used so far in background buffer */ + size_t i; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity check */ + HDassert(io_info); + HDassert(io_info->count > 0); + HDassert(io_info->mem_spaces || io_info->pieces_added == 0); + HDassert(io_info->file_spaces || io_info->pieces_added == 0); + HDassert(io_info->addrs || io_info->pieces_added == 0); + HDassert(io_info->element_sizes || io_info->pieces_added == 0); + HDassert(io_info->rbufs || io_info->pieces_added == 0); + + /* Allocate list of buffers (within the tconv buf) */ + if (NULL == (tmp_bufs = H5MM_malloc(io_info->pieces_added * sizeof(void *)))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for temporary buffer list") + + /* Allocate the iterator */ + if (NULL == (mem_iter = H5FL_MALLOC(H5S_sel_iter_t))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "can't allocate memory iterator") + + /* Allocate list of block memory spaces */ + /*!FIXME delay doing this until we find the first mem space that is non-contiguous or doesn't start at 0 + */ + if (NULL == (tmp_mem_spaces = H5MM_malloc(io_info->pieces_added * sizeof(H5S_t *)))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, + "memory allocation failed for temporary memory space list") + + /* Build read operation to tconv buffer */ + for (i = 0; i < io_info->pieces_added; i++) { + H5D_dset_io_info_t *dset_info = io_info->sel_pieces[i]->dset_info; + + HDassert(io_info->sel_pieces[i]->piece_points > 0); + + /* Check if this piece is involved in type conversion */ + if (dset_info->type_info.is_xform_noop && dset_info->type_info.is_conv_noop) { + /* No type conversion, just copy the mem space and buffer */ + tmp_mem_spaces[i] = io_info->mem_spaces[i]; + tmp_bufs[i] = io_info->rbufs[i]; + } + else { + /* Create block memory space */ + if (NULL == + (tmp_mem_spaces[i] = H5S_create_simple(1, &io_info->sel_pieces[i]->piece_points, NULL))) { + HDmemset(&tmp_mem_spaces[i], 0, (io_info->pieces_added - i) * sizeof(tmp_mem_spaces[0])); + HGOTO_ERROR(H5E_DATASET, H5E_CANTCREATE, FAIL, "unable to create simple memory dataspace") + } + + /* Check for in-place type conversion */ + if (io_info->sel_pieces[i]->in_place_tconv) + /* Set buffer to point to read buffer + offset */ + tmp_bufs[i] = (uint8_t *)(io_info->rbufs[i]) + io_info->sel_pieces[i]->buf_off; + else { + /* Set buffer to point into type conversion buffer */ + tmp_bufs[i] = io_info->tconv_buf + tconv_bytes_used; + tconv_bytes_used += + io_info->sel_pieces[i]->piece_points * + MAX(dset_info->type_info.src_type_size, dset_info->type_info.dst_type_size); + HDassert(tconv_bytes_used <= io_info->tconv_buf_size); + } + + /* Fill background buffer here unless we will use H5D__compound_opt_read(). Must do this before + * the read so the read buffer doesn't get wiped out if we're using in-place type conversion */ + if (!H5D__SCATGATH_USE_CMPD_OPT_READ(dset_info, io_info->sel_pieces[i])) { + /* Check for background buffer */ + if (dset_info->type_info.need_bkg) { + HDassert(io_info->bkg_buf); + + /* Calculate background buffer position */ + tmp_bkg_buf = io_info->bkg_buf + bkg_bytes_used; + bkg_bytes_used += + io_info->sel_pieces[i]->piece_points * dset_info->type_info.dst_type_size; + HDassert(bkg_bytes_used <= io_info->bkg_buf_size); + + /* Gather data from read buffer to background buffer if necessary */ + if (H5T_BKG_YES == dset_info->type_info.need_bkg) { + /* Initialize memory iterator */ + HDassert(!mem_iter_init); + if (H5S_select_iter_init(mem_iter, io_info->mem_spaces[i], + dset_info->type_info.dst_type_size, 0) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, + "unable to initialize memory selection information") + mem_iter_init = TRUE; /* Memory selection iteration info has been initialized */ + + if ((size_t)io_info->sel_pieces[i]->piece_points != + H5D__gather_mem(io_info->rbufs[i], mem_iter, + (size_t)io_info->sel_pieces[i]->piece_points, + tmp_bkg_buf /*out*/)) + HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "mem gather failed") + + /* Reset selection iterator */ + HDassert(mem_iter_init); + if (H5S_SELECT_ITER_RELEASE(mem_iter) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") + mem_iter_init = FALSE; + } + } + } + } + } + + /* Read data from all pieces */ + H5_CHECK_OVERFLOW(io_info->pieces_added, size_t, uint32_t) + if (H5F_shared_select_read(io_info->f_sh, H5FD_MEM_DRAW, (uint32_t)io_info->pieces_added, tmp_mem_spaces, + io_info->file_spaces, io_info->addrs, io_info->element_sizes, tmp_bufs) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "selection read failed") + + /* Reset bkg_bytes_used */ + bkg_bytes_used = 0; + + /* Perform type conversion and scatter data to memory buffers for datasets that need this */ + for (i = 0; i < io_info->pieces_added; i++) { + H5D_dset_io_info_t *dset_info = io_info->sel_pieces[i]->dset_info; + + HDassert(tmp_mem_spaces[i]); + + /* Check if this piece is involved in type conversion */ + if (tmp_mem_spaces[i] != io_info->mem_spaces[i]) { + H5_CHECK_OVERFLOW(io_info->sel_pieces[i]->piece_points, hsize_t, size_t); + + /* Initialize memory iterator */ + HDassert(!mem_iter_init); + if (H5S_select_iter_init(mem_iter, io_info->mem_spaces[i], dset_info->type_info.dst_type_size, + 0) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, + "unable to initialize memory selection information") + mem_iter_init = TRUE; /* Memory selection iteration info has been initialized */ + + /* If the source and destination are compound types and subset of each other + * and no conversion is needed, copy the data directly into user's buffer and + * bypass the rest of steps. + */ + if (H5D__SCATGATH_USE_CMPD_OPT_READ(dset_info, io_info->sel_pieces[i])) { + if (H5D__compound_opt_read((size_t)io_info->sel_pieces[i]->piece_points, mem_iter, + &dset_info->type_info, tmp_bufs[i], io_info->rbufs[i] /*out*/) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "datatype conversion failed") + } + else { + /* Check for background buffer */ + if (dset_info->type_info.need_bkg) { + HDassert(io_info->bkg_buf); + + /* Calculate background buffer position */ + tmp_bkg_buf = io_info->bkg_buf + bkg_bytes_used; + bkg_bytes_used += + io_info->sel_pieces[i]->piece_points * dset_info->type_info.dst_type_size; + HDassert(bkg_bytes_used <= io_info->bkg_buf_size); + } + + /* + * Perform datatype conversion. + */ + if (H5T_convert(dset_info->type_info.tpath, dset_info->type_info.src_type_id, + dset_info->type_info.dst_type_id, + (size_t)io_info->sel_pieces[i]->piece_points, (size_t)0, (size_t)0, + tmp_bufs[i], tmp_bkg_buf) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, FAIL, "datatype conversion failed") + + /* Do the data transform after the conversion (since we're using type mem_type) */ + if (!dset_info->type_info.is_xform_noop) { + H5Z_data_xform_t *data_transform; /* Data transform info */ + + /* Retrieve info from API context */ + if (H5CX_get_data_transform(&data_transform) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get data transform info") + + if (H5Z_xform_eval(data_transform, tmp_bufs[i], + (size_t)io_info->sel_pieces[i]->piece_points, + dset_info->type_info.mem_type) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "Error performing data transform") + } + + /* Scatter the data into memory if this was not an in-place conversion */ + if (!io_info->sel_pieces[i]->in_place_tconv) + if (H5D__scatter_mem(tmp_bufs[i], mem_iter, (size_t)io_info->sel_pieces[i]->piece_points, + io_info->rbufs[i] /*out*/) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "scatter failed") + } + + /* Release selection iterator */ + HDassert(mem_iter_init); + if (H5S_SELECT_ITER_RELEASE(mem_iter) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") + mem_iter_init = FALSE; + } + } + +done: + /* Release and free selection iterator */ + if (mem_iter_init && H5S_SELECT_ITER_RELEASE(mem_iter) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") + if (mem_iter) + mem_iter = H5FL_FREE(H5S_sel_iter_t, mem_iter); + + /* Free tmp_bufs */ + H5MM_free(tmp_bufs); + tmp_bufs = NULL; + + /* Clear and free tmp_mem_spaces */ + if (tmp_mem_spaces) { + for (i = 0; i < io_info->pieces_added; i++) + if (tmp_mem_spaces[i] != io_info->mem_spaces[i] && tmp_mem_spaces[i] && + H5S_close(tmp_mem_spaces[i]) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "Can't close dataspace") + H5MM_free(tmp_mem_spaces); + tmp_mem_spaces = NULL; + } + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__scatgath_read_select() */ + +/*------------------------------------------------------------------------- + * Function: H5D__scatgath_write_select + * + * Purpose: Perform scatter/gather write to a list of dataset pieces. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +H5D__scatgath_write_select(H5D_io_info_t *io_info) +{ + H5S_t **write_mem_spaces = NULL; /* Memory spaces to use for write to disk */ + size_t spaces_added = 0; /* Number of spaces added to write_mem_spaces */ + H5S_sel_iter_t *mem_iter = NULL; /* Memory selection iteration info */ + hbool_t mem_iter_init = FALSE; /* Memory selection iteration info has been initialized */ + const void **write_bufs = NULL; /* Buffers to use for write to disk */ + size_t tconv_bytes_used = 0; /* Number of bytes used so far in conversion buffer */ + size_t bkg_bytes_used = 0; /* Number of bytes used so far in background buffer */ + H5S_t **bkg_mem_spaces = NULL; /* Array of memory spaces for read to background buffer */ + H5S_t **bkg_file_spaces = NULL; /* Array of file spaces for read to background buffer */ + haddr_t *bkg_addrs = NULL; /* Array of file addresses for read to background buffer */ + size_t *bkg_element_sizes = NULL; /* Array of element sizes for read to background buffer */ + void **bkg_bufs = NULL; /* Array background buffers for read of existing file contents */ + size_t bkg_pieces = 0; /* Number of pieces that need to read the background data from disk */ + size_t i; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity check */ + HDassert(io_info); + HDassert(io_info->count > 0); + HDassert(io_info->mem_spaces || io_info->pieces_added == 0); + HDassert(io_info->file_spaces || io_info->pieces_added == 0); + HDassert(io_info->addrs || io_info->pieces_added == 0); + HDassert(io_info->element_sizes || io_info->pieces_added == 0); + HDassert(io_info->wbufs || io_info->pieces_added == 0); + + /* Allocate list of buffers (within the tconv buf) */ + if (NULL == (write_bufs = (const void **)H5MM_malloc(io_info->pieces_added * sizeof(const void *)))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for temporary buffer list") + + /* Allocate the iterator */ + if (NULL == (mem_iter = H5FL_MALLOC(H5S_sel_iter_t))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "can't allocate memory iterator") + + /* Allocate list of block memory spaces */ + /*!FIXME delay doing this until we find the first mem space that is non-contiguous or doesn't start at 0 + */ + if (NULL == (write_mem_spaces = H5MM_malloc(io_info->pieces_added * sizeof(H5S_t *)))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, + "memory allocation failed for temporary memory space list") + + /* Build operations to read data to background buffer and to write data */ + for (i = 0; i < io_info->pieces_added; i++) { + H5D_dset_io_info_t *dset_info = io_info->sel_pieces[i]->dset_info; + + HDassert(io_info->sel_pieces[i]->piece_points > 0); + + /* Check if this piece is involved in type conversion */ + if (dset_info->type_info.is_xform_noop && dset_info->type_info.is_conv_noop) { + /* No type conversion, just copy the mem space and buffer */ + write_mem_spaces[i] = io_info->mem_spaces[i]; + spaces_added++; + write_bufs[i] = io_info->wbufs[i]; + } + else { + void *tmp_write_buf; /* To sidestep const warnings */ + void *tmp_bkg_buf = NULL; + + H5_CHECK_OVERFLOW(io_info->sel_pieces[i]->piece_points, hsize_t, size_t); + + /* Initialize memory iterator */ + HDassert(!mem_iter_init); + if (H5S_select_iter_init(mem_iter, io_info->mem_spaces[i], dset_info->type_info.src_type_size, + 0) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, + "unable to initialize memory selection information") + mem_iter_init = TRUE; /* Memory selection iteration info has been initialized */ + + /* Create block memory space */ + if (NULL == + (write_mem_spaces[i] = H5S_create_simple(1, &io_info->sel_pieces[i]->piece_points, NULL))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCREATE, FAIL, "unable to create simple memory dataspace") + spaces_added++; + + /* Check for in-place type conversion */ + if (io_info->sel_pieces[i]->in_place_tconv) { + H5_flexible_const_ptr_t flex_buf; + + /* Set buffer to point to write buffer + offset */ + /* Use cast to union to twiddle away const. OK because if we're doing this it means the user + * explicitly allowed us to modify this buffer via H5Pset_modify_write_buf(). */ + flex_buf.cvp = io_info->wbufs[i]; + tmp_write_buf = (uint8_t *)flex_buf.vp + io_info->sel_pieces[i]->buf_off; + } + else { + /* Set buffer to point into type conversion buffer */ + tmp_write_buf = io_info->tconv_buf + tconv_bytes_used; + tconv_bytes_used += + io_info->sel_pieces[i]->piece_points * + MAX(dset_info->type_info.src_type_size, dset_info->type_info.dst_type_size); + HDassert(tconv_bytes_used <= io_info->tconv_buf_size); + + /* Gather data from application buffer into the datatype conversion buffer */ + if ((size_t)io_info->sel_pieces[i]->piece_points != + H5D__gather_mem(io_info->wbufs[i], mem_iter, (size_t)io_info->sel_pieces[i]->piece_points, + tmp_write_buf /*out*/)) + HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "mem gather failed") + } + + /* Set buffer for writing to disk (from type conversion buffer) */ + write_bufs[i] = (const void *)tmp_write_buf; + + /* If the source and destination are compound types and the destination is a subset of + * the source and no conversion is needed, copy the data directly into the type + * conversion buffer and bypass the rest of steps. If the source is a subset of the + * destination, the optimization is done in conversion function H5T_conv_struct_opt to + * protect the background data. + */ + if (dset_info->type_info.cmpd_subset && + H5T_SUBSET_DST == dset_info->type_info.cmpd_subset->subset && + dset_info->type_info.dst_type_size == dset_info->type_info.cmpd_subset->copy_size && + !io_info->sel_pieces[i]->in_place_tconv) { + if (H5D__compound_opt_write((size_t)io_info->sel_pieces[i]->piece_points, + &dset_info->type_info, tmp_write_buf) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "datatype conversion failed") + + /* No background buffer necessary, prevent this element from being considered in the second + * loop */ + /* Add this to H5Tconv.c? -NAF */ + dset_info->type_info.need_bkg = H5T_BKG_NO; + } /* end if */ + else { + /* Check for background buffer */ + if (dset_info->type_info.need_bkg) { + HDassert(io_info->bkg_buf); + + /* Calculate background buffer position */ + tmp_bkg_buf = io_info->bkg_buf + bkg_bytes_used; + bkg_bytes_used += + io_info->sel_pieces[i]->piece_points * dset_info->type_info.dst_type_size; + HDassert(bkg_bytes_used <= io_info->bkg_buf_size); + } + + /* Set up background buffer read operation if necessary */ + if (H5T_BKG_YES == dset_info->type_info.need_bkg) { + HDassert(io_info->must_fill_bkg); + + /* Allocate arrays of parameters for selection read to background buffer if necessary */ + if (!bkg_mem_spaces) { + HDassert(!bkg_file_spaces && !bkg_addrs && !bkg_element_sizes && !bkg_bufs); + if (NULL == (bkg_mem_spaces = H5MM_malloc(io_info->pieces_added * sizeof(H5S_t *)))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, + "memory allocation failed for memory space list") + if (NULL == (bkg_file_spaces = H5MM_malloc(io_info->pieces_added * sizeof(H5S_t *)))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, + "memory allocation failed for file space list") + if (NULL == (bkg_addrs = H5MM_malloc(io_info->pieces_added * sizeof(haddr_t)))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, + "memory allocation failed for piece address list") + if (NULL == (bkg_element_sizes = H5MM_malloc(io_info->pieces_added * sizeof(size_t)))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, + "memory allocation failed for element size list") + if (NULL == (bkg_bufs = H5MM_malloc(io_info->pieces_added * sizeof(const void *)))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, + "memory allocation failed for write buffer list") + } + + /* Use same (block) memory space, file space, address, and element size as write operation + */ + HDassert(bkg_mem_spaces && bkg_file_spaces && bkg_addrs && bkg_element_sizes && bkg_bufs); + bkg_mem_spaces[bkg_pieces] = write_mem_spaces[i]; + bkg_file_spaces[bkg_pieces] = io_info->file_spaces[i]; + bkg_addrs[bkg_pieces] = io_info->addrs[i]; + bkg_element_sizes[bkg_pieces] = io_info->element_sizes[i]; + + /* Use previously calculated background buffer position */ + bkg_bufs[bkg_pieces] = tmp_bkg_buf; + + /* Add piece */ + bkg_pieces++; + } + else { + /* Perform type conversion here to avoid second loop if no dsets use the background buffer + */ + /* Do the data transform before the type conversion (since + * transforms must be done in the memory type). */ + if (!dset_info->type_info.is_xform_noop) { + H5Z_data_xform_t *data_transform; /* Data transform info */ + + /* Retrieve info from API context */ + if (H5CX_get_data_transform(&data_transform) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get data transform info") + + if (H5Z_xform_eval(data_transform, tmp_write_buf, + (size_t)io_info->sel_pieces[i]->piece_points, + dset_info->type_info.mem_type) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "Error performing data transform") + } + + /* + * Perform datatype conversion. + */ + if (H5T_convert(dset_info->type_info.tpath, dset_info->type_info.src_type_id, + dset_info->type_info.dst_type_id, + (size_t)io_info->sel_pieces[i]->piece_points, (size_t)0, (size_t)0, + tmp_write_buf, tmp_bkg_buf) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, FAIL, "datatype conversion failed") + } + } + + /* Release selection iterator */ + HDassert(mem_iter_init); + if (H5S_SELECT_ITER_RELEASE(mem_iter) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") + mem_iter_init = FALSE; + } + } + + HDassert(spaces_added == io_info->pieces_added); + + /* Gather data to background buffer if necessary */ + if (io_info->must_fill_bkg) { + size_t j = 0; /* Index into array of background buffers */ + + /* Read data */ + H5_CHECK_OVERFLOW(bkg_pieces, size_t, uint32_t) + if (H5F_shared_select_read(io_info->f_sh, H5FD_MEM_DRAW, (uint32_t)bkg_pieces, bkg_mem_spaces, + bkg_file_spaces, bkg_addrs, bkg_element_sizes, bkg_bufs) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "selection read to background buffer failed") + + /* Perform type conversion on pieces with background buffers that were just read */ + for (i = 0; i < io_info->pieces_added; i++) { + H5D_dset_io_info_t *dset_info = io_info->sel_pieces[i]->dset_info; + + if (H5T_BKG_YES == dset_info->type_info.need_bkg) { + /* Non-const write_buf[i]. Use pointer math here to avoid const warnings. When + * there's a background buffer write_buf[i] always points inside the non-const tconv + * buf so this is OK. */ + void *tmp_write_buf = + (void *)((uint8_t *)io_info->tconv_buf + + ((const uint8_t *)write_bufs[i] - (const uint8_t *)io_info->tconv_buf)); + + /* Do the data transform before the type conversion (since + * transforms must be done in the memory type). */ + if (!dset_info->type_info.is_xform_noop) { + H5Z_data_xform_t *data_transform; /* Data transform info */ + + /* Retrieve info from API context */ + if (H5CX_get_data_transform(&data_transform) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get data transform info") + + if (H5Z_xform_eval(data_transform, tmp_write_buf, + (size_t)io_info->sel_pieces[i]->piece_points, + dset_info->type_info.mem_type) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "Error performing data transform") + } + + /* + * Perform datatype conversion. + */ + HDassert(j < bkg_pieces); + if (H5T_convert(dset_info->type_info.tpath, dset_info->type_info.src_type_id, + dset_info->type_info.dst_type_id, + (size_t)io_info->sel_pieces[i]->piece_points, (size_t)0, (size_t)0, + tmp_write_buf, bkg_bufs[j]) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, FAIL, "datatype conversion failed") + + /* Advance to next background buffer */ + j++; + } + } + + HDassert(j == bkg_pieces); + } + + /* Write data to disk */ + H5_CHECK_OVERFLOW(io_info->pieces_added, size_t, uint32_t) + if (H5F_shared_select_write(io_info->f_sh, H5FD_MEM_DRAW, (uint32_t)io_info->pieces_added, + write_mem_spaces, io_info->file_spaces, io_info->addrs, + io_info->element_sizes, write_bufs) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "selection write failed") + +done: + /* Release and free selection iterator */ + if (mem_iter_init && H5S_SELECT_ITER_RELEASE(mem_iter) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") + if (mem_iter) + mem_iter = H5FL_FREE(H5S_sel_iter_t, mem_iter); + + /* Free write_bufs */ + H5MM_free(write_bufs); + write_bufs = NULL; + + /* Clear and free write_mem_spaces */ + if (write_mem_spaces) { + for (i = 0; i < spaces_added; i++) { + HDassert(write_mem_spaces[i]); + if (write_mem_spaces[i] != io_info->mem_spaces[i] && H5S_close(write_mem_spaces[i]) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "Can't close dataspace") + } + H5MM_free(write_mem_spaces); + write_mem_spaces = NULL; + } + + /* Free bakcground buffer parameter arrays */ + H5MM_free(bkg_mem_spaces); + bkg_mem_spaces = NULL; + H5MM_free(bkg_file_spaces); + bkg_file_spaces = NULL; + H5MM_free(bkg_addrs); + bkg_addrs = NULL; + H5MM_free(bkg_element_sizes); + bkg_element_sizes = NULL; + H5MM_free(bkg_bufs); + bkg_bufs = NULL; + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__scatgath_write_select() */ + /*------------------------------------------------------------------------- * Function: H5D__compound_opt_read * diff --git a/src/H5Dvirtual.c b/src/H5Dvirtual.c index c40642fad43..6695c02b669 100644 --- a/src/H5Dvirtual.c +++ b/src/H5Dvirtual.c @@ -2384,7 +2384,8 @@ H5D__virtual_io_init(H5D_io_info_t *io_info, H5D_dset_io_info_t H5_ATTR_UNUSED * FUNC_ENTER_PACKAGE_NOERR /* Disable selection I/O */ - io_info->use_select_io = FALSE; + io_info->use_select_io = H5D_SELECTION_IO_MODE_OFF; + io_info->no_selection_io_cause |= H5D_SEL_IO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; FUNC_LEAVE_NOAPI(SUCCEED) } /* end H5D__virtual_io_init() */ diff --git a/src/H5FDint.c b/src/H5FDint.c index c5b87133c68..6d90aaecd91 100644 --- a/src/H5FDint.c +++ b/src/H5FDint.c @@ -469,6 +469,7 @@ H5FD_read_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addrs */ extend_sizes = FALSE; extend_types = FALSE; + uint32_t no_selection_io_cause; for (i = 0; i < count; i++) { @@ -505,6 +506,11 @@ H5FD_read_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addrs if ((file->cls->read)(file, type, dxpl_id, addrs[i], size, bufs[i]) < 0) HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver read request failed") } + + /* Add H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB to no selection I/O cause */ + H5CX_get_no_selection_io_cause(&no_selection_io_cause); + no_selection_io_cause |= H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB; + H5CX_set_no_selection_io_cause(no_selection_io_cause); } done: @@ -669,6 +675,7 @@ H5FD_write_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addr */ extend_sizes = FALSE; extend_types = FALSE; + uint32_t no_selection_io_cause; for (i = 0; i < count; i++) { @@ -705,6 +712,11 @@ H5FD_write_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addr if ((file->cls->write)(file, type, dxpl_id, addrs[i], size, bufs[i]) < 0) HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver write request failed") } + + /* Add H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB to no selection I/O cause */ + H5CX_get_no_selection_io_cause(&no_selection_io_cause); + no_selection_io_cause |= H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB; + H5CX_set_no_selection_io_cause(no_selection_io_cause); } done: @@ -991,6 +1003,14 @@ H5FD__read_selection_translate(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, uin 0) HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver read vector request failed") } + else { + uint32_t no_selection_io_cause; + + /* Add H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB to no selection I/O cause */ + H5CX_get_no_selection_io_cause(&no_selection_io_cause); + no_selection_io_cause |= H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB; + H5CX_set_no_selection_io_cause(no_selection_io_cause); + } done: /* Terminate and free iterators */ @@ -1630,6 +1650,14 @@ H5FD__write_selection_translate(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, ui 0) HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "driver write vector request failed") } + else { + uint32_t no_selection_io_cause; + + /* Add H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB to no selection I/O cause */ + H5CX_get_no_selection_io_cause(&no_selection_io_cause); + no_selection_io_cause |= H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB; + H5CX_set_no_selection_io_cause(no_selection_io_cause); + } done: /* Terminate and free iterators */ diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h index f3e463de30d..40c6cff2fbb 100644 --- a/src/H5Fprivate.h +++ b/src/H5Fprivate.h @@ -906,6 +906,7 @@ H5_DLL haddr_t H5F_shared_get_eoa(const H5F_shared_t *f_sh, H5FD_mem_t type); H5_DLL haddr_t H5F_get_eoa(const H5F_t *f, H5FD_mem_t type); H5_DLL herr_t H5F_shared_get_file_driver(const H5F_shared_t *f_sh, H5FD_t **file_handle); H5_DLL herr_t H5F_get_vfd_handle(const H5F_t *file, hid_t fapl, void **file_handle); +H5_DLL hbool_t H5F_has_vector_select_io(const H5F_t *f, hbool_t is_write); /* File mounting routines */ H5_DLL herr_t H5F_mount(const struct H5G_loc_t *loc, const char *name, H5F_t *child, hid_t plist_id); diff --git a/src/H5Fquery.c b/src/H5Fquery.c index 469df581c70..72b173f0ab9 100644 --- a/src/H5Fquery.c +++ b/src/H5Fquery.c @@ -1372,3 +1372,30 @@ H5F_get_file_locking(const H5F_t *f) FUNC_LEAVE_NOAPI(f->shared->use_file_locking) } /* end H5F_get_file_locking */ + +/*------------------------------------------------------------------------- + * Function: H5F_has_vector_select_io + * + * Purpose: Determine if vector or selection I/O is supported by this file + * + * Return: TRUE/FALSE + * + *------------------------------------------------------------------------- + */ +hbool_t +H5F_has_vector_select_io(const H5F_t *f, hbool_t is_write) +{ + hbool_t ret_value; /* Return value */ + + FUNC_ENTER_NOAPI_NOINIT_NOERR + + HDassert(f); + HDassert(f->shared); + + if (is_write) + ret_value = (f->shared->lf->cls->write_vector != NULL || f->shared->lf->cls->write_selection != NULL); + else + ret_value = (f->shared->lf->cls->read_vector != NULL || f->shared->lf->cls->read_selection != NULL); + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5F_has_vector_select_io */ diff --git a/src/H5Pdxpl.c b/src/H5Pdxpl.c index ecf03729292..6eef558de65 100644 --- a/src/H5Pdxpl.c +++ b/src/H5Pdxpl.c @@ -168,6 +168,19 @@ #define H5D_XFER_DSET_IO_SEL_ENC H5P__dxfr_edc_enc #define H5D_XFER_DSET_IO_SEL_DEC H5P__dxfr_edc_dec #endif /* QAK */ +/* Definition for selection I/O mode property */ +#define H5D_XFER_SELECTION_IO_MODE_SIZE sizeof(H5D_selection_io_mode_t) +#define H5D_XFER_SELECTION_IO_MODE_DEF H5D_SELECTION_IO_MODE_DEFAULT +#define H5D_XFER_SELECTION_IO_MODE_ENC H5P__dxfr_selection_io_mode_enc +#define H5D_XFER_SELECTION_IO_MODE_DEC H5P__dxfr_selection_io_mode_dec +/* Definitions for cause of no selection I/O property */ +#define H5D_XFER_NO_SELECTION_IO_CAUSE_SIZE sizeof(uint32_t) +#define H5D_XFER_NO_SELECTION_IO_CAUSE_DEF 0 +/* Definitions for modify write buffer property */ +#define H5D_XFER_MODIFY_WRITE_BUF_SIZE sizeof(hbool_t) +#define H5D_XFER_MODIFY_WRITE_BUF_DEF FALSE +#define H5D_XFER_MODIFY_WRITE_BUF_ENC H5P__dxfr_modify_write_buf_enc +#define H5D_XFER_MODIFY_WRITE_BUF_DEC H5P__dxfr_modify_write_buf_dec /******************/ /* Local Typedefs */ @@ -208,6 +221,10 @@ static herr_t H5P__dxfr_xform_close(const char *name, size_t size, void *value); static herr_t H5P__dxfr_dset_io_hyp_sel_copy(const char *name, size_t size, void *value); static int H5P__dxfr_dset_io_hyp_sel_cmp(const void *value1, const void *value2, size_t size); static herr_t H5P__dxfr_dset_io_hyp_sel_close(const char *name, size_t size, void *value); +static herr_t H5P__dxfr_selection_io_mode_enc(const void *value, void **pp, size_t *size); +static herr_t H5P__dxfr_selection_io_mode_dec(const void **pp, void *value); +static herr_t H5P__dxfr_modify_write_buf_enc(const void *value, void **pp, size_t *size); +static herr_t H5P__dxfr_modify_write_buf_dec(const void **pp, void *value); /*********************/ /* Package Variables */ @@ -277,6 +294,9 @@ static const H5T_conv_cb_t H5D_def_conv_cb_g = static const void *H5D_def_xfer_xform_g = H5D_XFER_XFORM_DEF; /* Default value for data transform */ static const H5S_t *H5D_def_dset_io_sel_g = H5D_XFER_DSET_IO_SEL_DEF; /* Default value for dataset I/O selection */ +static const H5D_selection_io_mode_t H5D_def_selection_io_mode_g = H5D_XFER_SELECTION_IO_MODE_DEF; +static const uint32_t H5D_def_no_selection_io_cause_g = H5D_XFER_NO_SELECTION_IO_CAUSE_DEF; +static const hbool_t H5D_def_modify_write_buf_g = H5D_XFER_MODIFY_WRITE_BUF_DEF; /*------------------------------------------------------------------------- * Function: H5P__dxfr_reg_prop @@ -441,6 +461,24 @@ H5P__dxfr_reg_prop(H5P_genclass_t *pclass) H5D_XFER_DSET_IO_SEL_CLOSE) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class") + if (H5P__register_real(pclass, H5D_XFER_SELECTION_IO_MODE_NAME, H5D_XFER_SELECTION_IO_MODE_SIZE, + &H5D_def_selection_io_mode_g, NULL, NULL, NULL, H5D_XFER_SELECTION_IO_MODE_ENC, + H5D_XFER_SELECTION_IO_MODE_DEC, NULL, NULL, NULL, NULL) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class") + + /* Register the cause of no selection I/O property */ + /* (Note: this property should not have an encode/decode callback) */ + if (H5P__register_real(pclass, H5D_XFER_NO_SELECTION_IO_CAUSE_NAME, H5D_XFER_NO_SELECTION_IO_CAUSE_SIZE, + &H5D_def_no_selection_io_cause_g, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class") + + /* Register the modify write buffer property */ + if (H5P__register_real(pclass, H5D_XFER_MODIFY_WRITE_BUF_NAME, H5D_XFER_MODIFY_WRITE_BUF_SIZE, + &H5D_def_modify_write_buf_g, NULL, NULL, NULL, H5D_XFER_MODIFY_WRITE_BUF_ENC, + H5D_XFER_MODIFY_WRITE_BUF_DEC, NULL, NULL, NULL, NULL) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class") + done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5P__dxfr_reg_prop() */ @@ -2258,6 +2296,78 @@ H5P__dxfr_dset_io_hyp_sel_close(const char H5_ATTR_UNUSED *name, size_t H5_ATTR_ FUNC_LEAVE_NOAPI(ret_value) } /* end H5P__dxfr_dset_io_hyp_sel_close() */ +/*------------------------------------------------------------------------- + * Function: H5P__dxfr_selection_io_mode_enc + * + * Purpose: Callback routine which is called whenever the selection + * I/O mode property in the dataset transfer property list + * is encoded. + * + * Return: Success: Non-negative + * Failure: Negative + * + * Programmer: Vailin Choi + * Feb 2023 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5P__dxfr_selection_io_mode_enc(const void *value, void **_pp, size_t *size) +{ + const H5D_selection_io_mode_t *select_io_mode = + (const H5D_selection_io_mode_t *)value; /* Create local alias for values */ + uint8_t **pp = (uint8_t **)_pp; + + FUNC_ENTER_PACKAGE_NOERR + + /* Sanity check */ + HDassert(select_io_mode); + HDassert(size); + + if (NULL != *pp) + /* Encode selection I/O mode property */ + *(*pp)++ = (uint8_t)*select_io_mode; + + /* Size of selection I/O mode property */ + (*size)++; + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5P__dxfr_selection_io_mode_enc() */ + +/*------------------------------------------------------------------------- + * Function: H5P__dxfr_selection_io_mode_dec + * + * Purpose: Callback routine which is called whenever the selection + * I/O mode property in the dataset transfer property list + * is decoded. + * + * Return: Success: Non-negative + * Failure: Negative + * + * Programmer: Vailin Choi + * Feb 2023 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5P__dxfr_selection_io_mode_dec(const void **_pp, void *_value) +{ + H5D_selection_io_mode_t *select_io_mode = (H5D_selection_io_mode_t *)_value; /* Selection I/O mode */ + const uint8_t **pp = (const uint8_t **)_pp; + + FUNC_ENTER_PACKAGE_NOERR + + /* Sanity checks */ + HDassert(pp); + HDassert(*pp); + HDassert(select_io_mode); + + /* Decode selection I/O mode property */ + *select_io_mode = (H5D_selection_io_mode_t) * (*pp)++; + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5P__dxfr_selection_io_dec() */ + /*------------------------------------------------------------------------- * Function: H5Pset_dataset_io_hyperslab_selection * @@ -2386,3 +2496,248 @@ H5Pset_dataset_io_hyperslab_selection(hid_t plist_id, unsigned rank, H5S_seloper FUNC_LEAVE_API(ret_value) } /* end H5Pset_dataset_io_hyperslab_selection() */ + +/*------------------------------------------------------------------------- + * Function: H5Pset_selection_io + * + * Purpose: To set the selection I/O mode in the dataset + * transfer property list. + * + * Note: The library may not perform selection I/O as it asks for if + * the layout callback determines that it is not feasible to do so. + * + * Return: Success: Non-negative + * Failure: Negative + * + * Programmer: Vailin Choi + * March 5, 2023 + * + *------------------------------------------------------------------------- + */ +herr_t +H5Pset_selection_io(hid_t plist_id, H5D_selection_io_mode_t selection_io_mode) +{ + H5P_genplist_t *plist; /* Property list pointer */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_API(FAIL) + H5TRACE2("e", "iDC", plist_id, selection_io_mode); + + /* Check arguments */ + if (plist_id == H5P_DEFAULT) + HGOTO_ERROR(H5E_PLIST, H5E_BADVALUE, FAIL, "can't set values in default property list") + + if (NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_XFER))) + HGOTO_ERROR(H5E_PLIST, H5E_BADTYPE, FAIL, "not a dxpl") + + /* Set the selection I/O mode */ + if (H5P_set(plist, H5D_XFER_SELECTION_IO_MODE_NAME, &selection_io_mode) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "unable to set value") + +done: + FUNC_LEAVE_API(ret_value) +} /* end H5Pset_selection_io() */ + +/*------------------------------------------------------------------------- + * Function: H5Pget_selection_io + * + * Purpose: To retrieve the selection I/O mode that is set in + * the dataset transfer property list. + * + * Note: The library may not perform selection I/O as it asks for if + * the layout callback determines that it is not feasible to do so. + * + * Return: Success: Non-negative + * Failure: Negative + * + * Programmer: Vailin Choi + * March 5, 2023 + * + *------------------------------------------------------------------------- + */ +herr_t +H5Pget_selection_io(hid_t plist_id, H5D_selection_io_mode_t *selection_io_mode /*out*/) +{ + H5P_genplist_t *plist; /* Property list pointer */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_API(FAIL) + H5TRACE2("e", "i*DC", plist_id, selection_io_mode); + + /* Check arguments */ + if (NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_XFER))) + HGOTO_ERROR(H5E_PLIST, H5E_BADTYPE, FAIL, "not a dxpl") + + /* Get the selection I/O mode */ + if (selection_io_mode) + if (H5P_get(plist, H5D_XFER_SELECTION_IO_MODE_NAME, selection_io_mode) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to get value") + +done: + FUNC_LEAVE_API(ret_value) +} /* end H5Pget_selection_io() */ + +/*------------------------------------------------------------------------- + * Function: H5Pget_no_selection_io_cause + * + * Purpose: Retrieves causes for not performing selection I/O + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi + * April 17, 2023 + *------------------------------------------------------------------------- + */ +herr_t +H5Pget_no_selection_io_cause(hid_t plist_id, uint32_t *no_selection_io_cause /*out*/) +{ + H5P_genplist_t *plist; + herr_t ret_value = SUCCEED; /* return value */ + + FUNC_ENTER_API(FAIL) + H5TRACE2("e", "ix", plist_id, no_selection_io_cause); + + /* Get the plist structure */ + if (NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_XFER))) + HGOTO_ERROR(H5E_ID, H5E_BADID, FAIL, "can't find object for ID") + + /* Return values */ + if (no_selection_io_cause) + if (H5P_get(plist, H5D_XFER_NO_SELECTION_IO_CAUSE_NAME, no_selection_io_cause) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to get no_selection_io_cause value") + +done: + FUNC_LEAVE_API(ret_value) +} /* end H5Pget_no_selection_io_cause() */ + +/*------------------------------------------------------------------------- + * Function: H5P__dxfr_modify_write_buf_enc + * + * Purpose: Callback routine which is called whenever the modify write + * buffer property in the dataset transfer property list is + * encoded. + * + * Return: Success: Non-negative + * Failure: Negative + * + *------------------------------------------------------------------------- + */ +static herr_t +H5P__dxfr_modify_write_buf_enc(const void *value, void **_pp /*out*/, size_t *size /*out*/) +{ + const hbool_t *modify_write_buf = (const hbool_t *)value; /* Create local alias for values */ + uint8_t **pp = (uint8_t **)_pp; + + FUNC_ENTER_PACKAGE_NOERR + + /* Sanity check */ + HDassert(modify_write_buf); + HDassert(size); + + if (NULL != *pp) + /* Encode modify write buf property. Use "!!" so we always get 0 or 1 */ + *(*pp)++ = (uint8_t)(!!(*modify_write_buf)); + + /* Size of modify write buf property */ + (*size)++; + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5P__dxfr_modify_write_buf_enc() */ + +/*------------------------------------------------------------------------- + * Function: H5P__dxfr_modify_write_buf_dec + * + * Purpose: Callback routine which is called whenever the modify write + * buffer property in the dataset transfer property list is + * decoded. + * + * Return: Success: Non-negative + * Failure: Negative + * + *------------------------------------------------------------------------- + */ +static herr_t +H5P__dxfr_modify_write_buf_dec(const void **_pp, void *_value /*out*/) +{ + hbool_t *modify_write_buf = (hbool_t *)_value; /* Modify write buffer */ + const uint8_t **pp = (const uint8_t **)_pp; + + FUNC_ENTER_PACKAGE_NOERR + + /* Sanity checks */ + HDassert(pp); + HDassert(*pp); + HDassert(modify_write_buf); + + /* Decode selection I/O mode property */ + *modify_write_buf = (hbool_t) * (*pp)++; + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5P__dxfr_modify_write_buf_dec() */ + +/*------------------------------------------------------------------------- + * Function: H5Pset_modify_write_buf + * + * Purpose: Allows the library to modify the contents of the write + * buffer + * + * Return: Success: Non-negative + * Failure: Negative + * + *------------------------------------------------------------------------- + */ +herr_t +H5Pset_modify_write_buf(hid_t plist_id, hbool_t modify_write_buf) +{ + H5P_genplist_t *plist; /* Property list pointer */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_API(FAIL) + H5TRACE2("e", "ib", plist_id, modify_write_buf); + + /* Check arguments */ + if (plist_id == H5P_DEFAULT) + HGOTO_ERROR(H5E_PLIST, H5E_BADVALUE, FAIL, "can't set values in default property list") + + if (NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_XFER))) + HGOTO_ERROR(H5E_PLIST, H5E_BADTYPE, FAIL, "not a dxpl") + + /* Set the selection I/O mode */ + if (H5P_set(plist, H5D_XFER_MODIFY_WRITE_BUF_NAME, &modify_write_buf) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "unable to set value") + +done: + FUNC_LEAVE_API(ret_value) +} /* end H5Pset_modify_write_buf() */ + +/*------------------------------------------------------------------------- + * Function: H5Pget_modify_write_buf + * + * Purpose: Retrieves the "modify write buffer" property + * + * Return: Success: Non-negative + * Failure: Negative + * + *------------------------------------------------------------------------- + */ +herr_t +H5Pget_modify_write_buf(hid_t plist_id, hbool_t *modify_write_buf /*out*/) +{ + H5P_genplist_t *plist; /* Property list pointer */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_API(FAIL) + H5TRACE2("e", "i*b", plist_id, modify_write_buf); + + /* Check arguments */ + if (NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_XFER))) + HGOTO_ERROR(H5E_PLIST, H5E_BADTYPE, FAIL, "not a dxpl") + + /* Get the selection I/O mode */ + if (modify_write_buf) + if (H5P_get(plist, H5D_XFER_MODIFY_WRITE_BUF_NAME, modify_write_buf) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to get value") + +done: + FUNC_LEAVE_API(ret_value) +} /* end H5Pget_modify_write_buf() */ diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h index bb5d421a8e3..a08119d5d69 100644 --- a/src/H5Ppublic.h +++ b/src/H5Ppublic.h @@ -328,7 +328,6 @@ typedef enum H5D_mpio_actual_io_mode_t { H5D_MPIO_CHUNK_MIXED = 0x1 | 0x2, /**< HDF5 performed one the chunk collective optimization schemes and some chunks were accessed independently, some collectively. */ - /** \internal The contiguous case is separate from the bit field. */ H5D_MPIO_CONTIGUOUS_COLLECTIVE = 0x4 /**< Collective I/O was performed on a contiguous dataset */ } H5D_mpio_actual_io_mode_t; @@ -344,7 +343,8 @@ typedef enum H5D_mpio_no_collective_cause_t { H5D_MPIO_SET_INDEPENDENT = 0x01, /**< Collective I/O was not performed because independent I/O was requested */ H5D_MPIO_DATATYPE_CONVERSION = 0x02, - /**< Collective I/O was not performed because datatype conversions were required */ + /**< Collective I/O was not performed because datatype conversions were required and selection I/O was not + possible (see below) */ H5D_MPIO_DATA_TRANSFORMS = 0x04, /**< Collective I/O was not performed because data transforms needed to be applied */ H5D_MPIO_MPI_OPT_TYPES_ENV_VAR_DISABLED = 0x08, @@ -357,11 +357,72 @@ typedef enum H5D_mpio_no_collective_cause_t { /**< Collective I/O was not performed because parallel filtered writes are disabled */ H5D_MPIO_ERROR_WHILE_CHECKING_COLLECTIVE_POSSIBLE = 0x80, /**< Error */ - H5D_MPIO_NO_COLLECTIVE_MAX_CAUSE = 0x100 + H5D_MPIO_NO_SELECTION_IO = 0x100, + /**< Collective I/O would be supported by selection or vector I/O but that feature was disabled + (see causes via H5Pget_no_selection_io_cause()) */ + H5D_MPIO_NO_COLLECTIVE_MAX_CAUSE = 0x200 /**< Sentinel */ } H5D_mpio_no_collective_cause_t; //! +/** + * Causes for H5Pget_no_selection_io_cause() property + */ +#define H5D_SEL_IO_DISABLE_BY_API \ + (0x0001u) /**< Selection I/O was not performed because \ + the feature was disabled by the API */ +#define H5D_SEL_IO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET \ + (0x0002u) /**< Selection I/O was not performed because the \ + dataset was neither contiguous nor chunked */ +#define H5D_SEL_IO_CONTIGUOUS_SIEVE_BUFFER \ + (0x0004u) /**< Selection I/O was not performed because of \ + sieve buffer for contiguous dataset */ +#define H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB \ + (0x0008u) /**< Selection I/O was not performed because the VFD \ + does not have vector or selection I/O callback */ +#define H5D_SEL_IO_PAGE_BUFFER \ + (0x0010u) /**< Selection I/O was not performed because of \ + page buffer */ +#define H5D_SEL_IO_DATASET_FILTER \ + (0x0020u) /**< Selection I/O was not performed because of \ + dataset filters */ +#define H5D_SEL_IO_CHUNK_CACHE \ + (0x0040u) /**< Selection I/O was not performed because of \ + chunk cache */ +#define H5D_SEL_IO_TCONV_BUF_TOO_SMALL \ + (0x0080u) /**< Selection I/O was not performed because the \ + type conversion buffer is too small */ +#define H5D_SEL_IO_BKG_BUF_TOO_SMALL \ + (0x0100u) /**< Selection I/O was not performed because the \ + type conversion background buffer is too small */ +#define H5D_SEL_IO_DEFAULT_OFF \ + (0x0200u) /**< Selection I/O was not performed because the \ + selection I/O mode is DEFAULT and the library \ + chose it to be off for this case */ + +/* Causes for H5D_MPIO_NO_SELECTION_IO */ +#define H5D_MPIO_NO_SELECTION_IO_CAUSES \ + (H5D_SEL_IO_DISABLE_BY_API | H5D_SEL_IO_TCONV_BUF_TOO_SMALL | H5D_SEL_IO_BKG_BUF_TOO_SMALL | \ + H5D_SEL_IO_DATASET_FILTER | H5D_SEL_IO_CHUNK_CACHE) + +//! +/** + * Selection I/O mode property + * + * \details The default value, #H5D_SELECTION_IO_MODE_DEFAULT, + * indicates selection I/O can be ON or OFF as + * determined by library internal. + */ +typedef enum H5D_selection_io_mode_t { + H5D_SELECTION_IO_MODE_DEFAULT = 0, + /**< Default selection I/O mode. */ + H5D_SELECTION_IO_MODE_OFF, + /**< Selection I/O is off. */ + H5D_SELECTION_IO_MODE_ON + /**< Selection I/O is on. */ +} H5D_selection_io_mode_t; +//! + /********************/ /* Public Variables */ /********************/ @@ -8204,6 +8265,191 @@ H5_DLL herr_t H5Pset_dataset_io_hyperslab_selection(hid_t plist_id, unsigned ran const hsize_t start[], const hsize_t stride[], const hsize_t count[], const hsize_t block[]); +/** + * + * \ingroup DXPL + * + * \brief Sets the selection I/O mode + * + * \dxpl_id{plist_id} + * \param[in] selection_io_mode The selection I/O mode to be set + * + * \return \herr_t + * + * \details H5Pset_selection_io() sets the selection I/O mode + * \p selection_io_mode in the dataset transfer property + * list \p plist_id. + * + * This can be used to enable collective I/O with type conversion, or + * with custom VFDs that support vector or selection I/O. + * + * Values that can be set in \p selection_io_mode: + * \snippet this H5D_selection_io_mode_t_snip + * \click4more + * + * \note The library may not perform selection I/O as it asks for if the + * layout callback determines that it is not feasible to do so. Please + * refer to H5Pget_no_selection_io_cause() for details. + * + * When used with type conversion, selection I/O requires the type + * conversion buffer (and the background buffer if applicable) be large + * enough to hold the entirety of the data involved in the I/O. For + * read operations, the library will use the application's read buffer + * as the type conversion buffer if the memory type is not smaller than + * the file type, eliminating the need for a separate type conversion + * buffer (a background buffer may still be required). For write + * operations, the library will similarly use the write buffer as a + * type conversion buffer, but only if H5Pset_modify_write_buf() is + * used to allow the library to modify the contents of the write + * buffer. + * + * \since 1.14.1 + * + */ +H5_DLL herr_t H5Pset_selection_io(hid_t plist_id, H5D_selection_io_mode_t selection_io_mode); + +/** + * + * \ingroup DXPL + * + * \brief Retrieves the selection I/O mode + * + * \dxpl_id{plist_id} + * \param[out] selection_io_mode The selection I/O mode + * + * \return \herr_t + * + * \details H5Pget_selection_io() queries the selection I/O mode set in + * in the dataset transfer property list \p plist_id. + * + * Values returned in \p selection_io_mode: + * \snippet this H5D_selection_io_mode_t_snip + * \click4more + * + * \note The library may not perform selection I/O as it asks for if the + * layout callback determines that it is not feasible to do so. Please + * refer to H5Pget_no_selection_io_cause() for details. + * + * \since 1.14.1 + * + */ +H5_DLL herr_t H5Pget_selection_io(hid_t plist_id, H5D_selection_io_mode_t *selection_io_mode); + +/** + * \ingroup DXPL + * + * \brief Retrieves the cause for not performing selection or vector I/O on the + * last parallel I/O call + * + * \dxpl_id{plist_id} + * \param[out] no_selection_io_cause A bitwise set value indicating the relevant + * causes that prevented selection I/O from + * being performed + * \return \herr_t + * + * \par Motivation: + * A user can request selection I/O to be performed via a data transfer + * property list (DXPL). This can be used to enable collective I/O with + * type conversion, or with custom VFDs that support vector or selection + * I/O. However, there are conditions that can cause HDF5 to forgo + * selection or vector I/O and perform legacy (scalar) I/O instead. + * + * \details H5Pget_no_selection_io_cause() can be used to determine whether + * selection or vector I/O was applied for the last preceding I/O call. + * If selection or vector I/O was not used, this function retrieves the + * cause(s) that prevent selection or vector I/O to be performed on + * that I/O call. The properties retrieved by this function are set + * before I/O takes place and are retained even when I/O fails. + * + * If a selection I/O request falls back to vector I/O, that is not + * considered "breaking" selection I/O by this function, since vector + * I/O still passes all information to the file driver in a single + * callback. + * + * Valid values returned in \p no_selection_io_cause are listed + * as follows. If there are multiple causes, it is a bitwise OR of + * the relevant causes. + * + * - #H5D_SEL_IO_DISABLE_BY_API + * Selection I/O was not performed because the feature was disabled by the API + * - #H5D_SEL_IO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET + * Selection I/O was not performed because the dataset was neither contiguous nor chunked + * - #H5D_SEL_IO_CONTIGUOUS_SIEVE_BUFFER + * Selection I/O was not performed because of sieve buffer for contiguous dataset + * - #H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB + * Selection I/O was not performed because the VFD does not have vector or selection I/O callback + * - #H5D_SEL_IO_PAGE_BUFFER + * Selection I/O was not performed because of page buffer + * - #H5D_SEL_IO_DATASET_FILTER + * Selection I/O was not performed because of dataset filters + * - #H5D_SEL_IO_CHUNK_CACHE + * Selection I/O was not performed because of chunk cache + * - #H5D_SEL_IO_TCONV_BUF_TOO_SMALL + * Selection I/O was not performed because the type conversion buffer is too small + * - #H5D_SEL_IO_BKG_BUF_TOO_SMALL + * Selection I/O was not performed because the type conversion background buffer is too small + * - #H5D_SEL_IO_DEFAULT_OFF + * Selection I/O was not performed because the selection I/O mode is DEFAULT and the library chose it + * to be off for this case + * + * \since 1.14.1 + * + */ +H5_DLL herr_t H5Pget_no_selection_io_cause(hid_t plist_id, uint32_t *no_selection_io_cause); + +/** + * + * \ingroup DXPL + * + * \brief Allows the library to modify the contents of the write buffer + * + * \dxpl_id{plist_id} + * \param[in] modify_write_buf Whether the library can modify the contents of the write buffer + * + * \return \herr_t + * + * \details H5Pset_modify_write_buf() sets whether the library is allowed to + * modify the contents of write buffers passed to HDF5 API routines + * that are passed the dataset transfer property list \p plist_id. The + * default value for modify_write_buf is FALSE. + * + * This function can be used to allow the library to perform in-place + * type conversion on write operations to save memory space. This is + * currently only used for selection I/O operations, which are used for + * collective I/O with type conversion. After making an API call with + * this parameter set to TRUE, the contents of the write buffer are + * undefined. + * + * \note When modify_write_buf is set to TRUE the library may violate the + * const qualifier on the API parameter for the write buffer. + * + * \since 1.14.1 + * + */ +H5_DLL herr_t H5Pset_modify_write_buf(hid_t plist_id, hbool_t modify_write_buf); + +/** + * + * \ingroup DXPL + * + * \brief Retrieves the "modify write buffer" property + * + * \dxpl_id{plist_id} + * \param[out] modify_write_buf Whether the library can modify the contents of the write buffer + * + * \return \herr_t + * + * \details H5Pget_modify_write_buf() gets the "modify write buffer" property + * from the dataset transfer property list \p plist_id. This property + * determines whether the library is allowed to modify the contents of + * write buffers passed to HDF5 API routines that are passed + * \p plist_id. The default value for modify_write_buf is FALSE. + * + * \since 1.14.1 + * + */ +H5_DLL herr_t H5Pget_modify_write_buf(hid_t plist_id, hbool_t *modify_write_buf); + /** * \ingroup LCPL * diff --git a/src/H5Sprivate.h b/src/H5Sprivate.h index 19127b36f80..4303eee3995 100644 --- a/src/H5Sprivate.h +++ b/src/H5Sprivate.h @@ -260,6 +260,7 @@ H5_DLL herr_t H5S_select_project_simple(const H5S_t *space, H5S_t *new_space, H5_DLL herr_t H5S_select_project_intersection(H5S_t *src_space, H5S_t *dst_space, H5S_t *src_intersect_space, H5S_t **new_space_ptr, hbool_t share_space); H5_DLL herr_t H5S_select_subtract(H5S_t *space, H5S_t *subtract_space); +H5_DLL herr_t H5S_select_contig_block(H5S_t *space, hbool_t *is_contig, hsize_t *off, size_t *len); /* Operations on all selections */ H5_DLL herr_t H5S_select_all(H5S_t *space, hbool_t rel_prev); diff --git a/src/H5Spublic.h b/src/H5Spublic.h index 871a8e7e035..bd5a82cc627 100644 --- a/src/H5Spublic.h +++ b/src/H5Spublic.h @@ -864,9 +864,9 @@ H5_DLL hid_t H5Ssel_iter_create(hid_t spaceid, size_t elmt_size, unsigned flags) * * \space_id{sel_iter_id} * \param[in] maxseq Maximum number of sequences to retrieve - * \param[in] maxbytes Maximum number of bytes to retrieve in sequences + * \param[in] maxelmts Maximum number of elements to retrieve in sequences * \param[out] nseq Number of sequences retrieved - * \param[out] nbytes Number of bytes retrieved, in all sequences + * \param[out] nelmts Number of elements retrieved, in all sequences * \param[out] off Array of sequence offsets * \param[out] len Array of sequence lengths * @@ -883,9 +883,9 @@ H5_DLL hid_t H5Ssel_iter_create(hid_t spaceid, size_t elmt_size, unsigned flags) * #H5S_SEL_ITER_GET_SEQ_LIST_SORTED flag is passed to * H5Ssel_iter_create() for a point selection. * - * \p maxseq and \p maxbytes specify the most sequences or bytes + * \p maxseq and \p maxelmts specify the most sequences or elements * possible to place into the \p off and \p len arrays. \p nseq and - * \p nbytes return the actual number of sequences and bytes put + * \p nelmts return the actual number of sequences and elements put * into the arrays. * * Each call to H5Ssel_iter_get_seq_list() will retrieve the next @@ -897,13 +897,13 @@ H5_DLL hid_t H5Ssel_iter_create(hid_t spaceid, size_t elmt_size, unsigned flags) * the iterator was created from (which can be retrieved with * H5Sget_select_npoints(). When there are no further sequences of * elements to retrieve, calls to this routine will set \p nseq - * and \p nbytes to zero. + * and \p nelmts to zero. * * \since 1.12.0 * */ -H5_DLL herr_t H5Ssel_iter_get_seq_list(hid_t sel_iter_id, size_t maxseq, size_t maxbytes, size_t *nseq, - size_t *nbytes, hsize_t *off, size_t *len); +H5_DLL herr_t H5Ssel_iter_get_seq_list(hid_t sel_iter_id, size_t maxseq, size_t maxelmts, size_t *nseq, + size_t *nelmts, hsize_t *off, size_t *len); /** * \ingroup H5S * diff --git a/src/H5Sselect.c b/src/H5Sselect.c index 9d13cf2972c..02889f71707 100644 --- a/src/H5Sselect.c +++ b/src/H5Sselect.c @@ -2865,9 +2865,9 @@ H5Ssel_iter_create(hid_t space_id, size_t elmt_size, unsigned flags) herr_t H5Ssel_iter_get_seq_list(sel_iter_id, maxseq, maxbytes, nseq, nbytes, off, len) hid_t sel_iter_id; IN: ID of the dataspace selection iterator to retrieve sequence from size_t maxseq; IN: Max. # of sequences to retrieve - size_t maxbytes; IN: Max. # of bytes to retrieve in sequences + size_t maxelmts; IN: Max. # of elements to retrieve in sequences size_t *nseq; OUT: # of sequences retrieved - size_t *nbytes; OUT: # of bytes retrieved, in all sequences + size_t *nelmts; OUT: # of elements retrieved, in all sequences hsize_t *off; OUT: Array of sequence offsets size_t *len; OUT: Array of sequence lengths RETURNS @@ -2882,8 +2882,8 @@ H5Ssel_iter_create(hid_t space_id, size_t elmt_size, unsigned flags) selections is "in order selected", unless the H5S_SEL_ITER_GET_SEQ_LIST_SORTED flag is passed to H5Sset_iter_create for a point selection. - MAXSEQ and MAXBYTES specify the most sequences or bytes possible to - place into the OFF and LEN arrays. *NSEQ and *NBYTES return the actual + MAXSEQ and MAXELMTS specify the most sequences or bytes possible to + place into the OFF and LEN arrays. *NSEQ and *NELMTS return the actual number of sequences and bytes put into the arrays. Each call to H5Ssel_iter_get_seq_list() will retrieve the next set @@ -2894,7 +2894,7 @@ H5Ssel_iter_create(hid_t space_id, size_t elmt_size, unsigned flags) of elements selected in the dataspace the iterator was created from (which can be retrieved with H5Sget_select_npoints). When there are no further sequences of elements to retrieve, calls to this routine will - set *NSEQ and *NBYTES to zero. + set *NSEQ and *NELMTS to zero. PROGRAMMER Quincey Koziol - February 11, 2019 GLOBAL VARIABLES @@ -2903,21 +2903,21 @@ H5Ssel_iter_create(hid_t space_id, size_t elmt_size, unsigned flags) REVISION LOG --------------------------------------------------------------------------*/ herr_t -H5Ssel_iter_get_seq_list(hid_t sel_iter_id, size_t maxseq, size_t maxbytes, size_t *nseq /*out*/, - size_t *nbytes /*out*/, hsize_t *off /*out*/, size_t *len /*out*/) +H5Ssel_iter_get_seq_list(hid_t sel_iter_id, size_t maxseq, size_t maxelmts, size_t *nseq /*out*/, + size_t *nelmts /*out*/, hsize_t *off /*out*/, size_t *len /*out*/) { H5S_sel_iter_t *sel_iter; /* Dataspace selection iterator to operate on */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) - H5TRACE7("e", "izzxxxx", sel_iter_id, maxseq, maxbytes, nseq, nbytes, off, len); + H5TRACE7("e", "izzxxxx", sel_iter_id, maxseq, maxelmts, nseq, nelmts, off, len); /* Check args */ if (NULL == (sel_iter = (H5S_sel_iter_t *)H5I_object_verify(sel_iter_id, H5I_SPACE_SEL_ITER))) HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "not a dataspace selection iterator") if (NULL == nseq) HGOTO_ERROR(H5E_DATASPACE, H5E_BADVALUE, FAIL, "'nseq' pointer is NULL") - if (NULL == nbytes) + if (NULL == nelmts) HGOTO_ERROR(H5E_DATASPACE, H5E_BADVALUE, FAIL, "'nbytes' pointer is NULL") if (NULL == off) HGOTO_ERROR(H5E_DATASPACE, H5E_BADVALUE, FAIL, "offset array pointer is NULL") @@ -2925,17 +2925,94 @@ H5Ssel_iter_get_seq_list(hid_t sel_iter_id, size_t maxseq, size_t maxbytes, size HGOTO_ERROR(H5E_DATASPACE, H5E_BADVALUE, FAIL, "length array pointer is NULL") /* Get the sequences of bytes */ - if (maxseq > 0 && maxbytes > 0 && sel_iter->elmt_left > 0) { - if (H5S_SELECT_ITER_GET_SEQ_LIST(sel_iter, maxseq, maxbytes, nseq, nbytes, off, len) < 0) + if (maxseq > 0 && maxelmts > 0 && sel_iter->elmt_left > 0) { + if (H5S_SELECT_ITER_GET_SEQ_LIST(sel_iter, maxseq, maxelmts, nseq, nelmts, off, len) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "sequence length generation failed") } /* end if */ else - *nseq = *nbytes = 0; + *nseq = *nelmts = 0; done: FUNC_LEAVE_API(ret_value) } /* end H5Ssel_iter_get_seq_list() */ +/*-------------------------------------------------------------------------- + NAME + H5S_select_contig_block + + PURPOSE + Determines if a selection is a single contiguous block, and returns the + offset and length (in elements) if it is + + USAGE + herr_t H5S_select_contig_block(space, is_contig, off, len) + H5S_t *space; IN: Selection to check + hbool_t *is_contig; OUT: Whether the selection is contiguous + hsize_t *off; OUT: Offset of selection + size_t *len; OUT: Length of selection + + RETURNS + Non-negative on success/Negative on failure. + + DESCRIPTION + Determines if a selection is a single contiguous block, and returns the + offset and length (in elements) if it is. + + GLOBAL VARIABLES + COMMENTS, BUGS, ASSUMPTIONS + EXAMPLES + REVISION LOG +--------------------------------------------------------------------------*/ +herr_t +H5S_select_contig_block(H5S_t *space, hbool_t *is_contig, hsize_t *off, size_t *len) +{ + H5S_sel_iter_t *iter = NULL; /* Selection iterator */ + hbool_t iter_init = FALSE; /* Selection iteration info has been initialized */ + size_t nseq_tmp; + size_t nelem_tmp; + hsize_t sel_off; + size_t sel_len; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(space); + + /* Allocate and initialize the iterator */ + if (NULL == (iter = H5FL_MALLOC(H5S_sel_iter_t))) + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate iterator") + if (H5S_select_iter_init(iter, space, 1, 0) < 0) + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize memory selection information") + iter_init = TRUE; + + /* Get list of sequences for selection, to check if it is contiguous */ + if (H5S_SELECT_ITER_GET_SEQ_LIST(iter, (size_t)1, (size_t)-1, &nseq_tmp, &nelem_tmp, &sel_off, &sel_len) < + 0) + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTNEXT, FAIL, "sequence length generation failed") + + /* If the first sequence includes all the elements selected in this piece, it it contiguous */ + H5_CHECK_OVERFLOW(space->select.num_elem, hsize_t, size_t); + if (sel_len == (size_t)space->select.num_elem) { + if (is_contig) + *is_contig = TRUE; + if (off) + *off = sel_off; + if (len) + *len = sel_len; + } + else if (is_contig) + *is_contig = FALSE; + +done: + if (iter_init && H5S_SELECT_ITER_RELEASE(iter) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") + if (iter) + iter = H5FL_FREE(H5S_sel_iter_t, iter); + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5S_select_contig_block() */ + /*-------------------------------------------------------------------------- NAME H5Ssel_iter_reset diff --git a/src/H5private.h b/src/H5private.h index 70aed8f76e1..eac2cbaae41 100644 --- a/src/H5private.h +++ b/src/H5private.h @@ -1965,14 +1965,6 @@ extern hbool_t H5_libterm_g; /* Is the library being shutdown? */ #endif /* H5_HAVE_THREADSAFE */ -/* Extern global to determine if we should use selection I/O if available (this - * variable should be removed once selection I/O performs as well as the - * previous scalar I/O implementation - * - * NOTE: Must be exposed via H5_DLLVAR so parallel tests pass on Windows. - */ -H5_DLLVAR hbool_t H5_use_selection_io_g; - #ifdef H5_HAVE_CODESTACK /* Include required function stack header */ diff --git a/src/H5trace.c b/src/H5trace.c index 3be5b9181bf..48e94a631d7 100644 --- a/src/H5trace.c +++ b/src/H5trace.c @@ -673,6 +673,31 @@ H5_trace_args(H5RS_str_t *rs, const char *type, va_list ap) } /* end block */ break; + case 'C': /* H5D_selection_io_mode_t */ + { + H5D_selection_io_mode_t selection_io_mode = + (H5D_selection_io_mode_t)HDva_arg(ap, int); + + switch (selection_io_mode) { + case H5D_SELECTION_IO_MODE_DEFAULT: + H5RS_acat(rs, "H5D_SELECTION_IO_MODE_DEFAULT"); + break; + + case H5D_SELECTION_IO_MODE_OFF: + H5RS_acat(rs, "H5D_SELECTION_IO_MODE_OFF"); + break; + + case H5D_SELECTION_IO_MODE_ON: + H5RS_acat(rs, "H5D_SELECTION_IO_MODE_ON"); + break; + + default: + H5RS_asprintf_cat(rs, "%ld", (long)selection_io_mode); + break; + } /* end switch */ + } /* end block */ + break; + case 'f': /* H5D_fill_time_t */ { H5D_fill_time_t fill_time = (H5D_fill_time_t)HDva_arg(ap, int); diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index da98f15a020..c3365b7b081 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -337,6 +337,7 @@ set (H5_TESTS page_buffer dtypes dsets + select_io_dset chunk_info # compression lib link cmpd_dset mdset diff --git a/test/Makefile.am b/test/Makefile.am index bad52c8b24d..291907ca0f9 100644 --- a/test/Makefile.am +++ b/test/Makefile.am @@ -65,7 +65,7 @@ TEST_PROG= testhdf5 \ accum hyperslab istore bittests dt_arith page_buffer \ dtypes dsets chunk_info cmpd_dset mdset cmpd_dtransform filter_fail extend direct_chunk \ external efc objcopy objcopy_ref links unlink twriteorder big mtime \ - fillval mount \ + fillval mount select_io_dset\ flush1 flush2 app_ref enum set_extent ttsafe enc_dec_plist \ enc_dec_plist_cross_platform getname vfd ros3 s3comms hdfs ntypes \ dangle dtransform reserved cross_read freespace mf vds file_image \ diff --git a/test/enc_dec_plist.c b/test/enc_dec_plist.c index fd4ae5eedfc..5b75178c7e9 100644 --- a/test/enc_dec_plist.c +++ b/test/enc_dec_plist.c @@ -367,6 +367,12 @@ main(void) if ((H5Pset_data_transform(dxpl, c_to_f)) < 0) FAIL_STACK_ERROR; + if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) + FAIL_STACK_ERROR; + + if (H5Pset_modify_write_buf(dxpl, TRUE) < 0) + FAIL_STACK_ERROR; + /* Test encoding & decoding property list */ if (test_encode_decode(dxpl, low, high) < 0) FAIL_PUTS_ERROR("DXPL encoding/decoding failed\n"); diff --git a/test/gen_plist.c b/test/gen_plist.c index afe8494bddc..5302dc7e3c8 100644 --- a/test/gen_plist.c +++ b/test/gen_plist.c @@ -183,6 +183,11 @@ main(void) assert(ret > 0); if ((ret = H5Pset_data_transform(dxpl1, c_to_f)) < 0) assert(ret > 0); + if ((ret = H5Pset_selection_io(dxpl1, H5D_SELECTION_IO_MODE_ON)) < 0) + assert(ret > 0); + + if ((ret = H5Pset_modify_write_buf(dxpl1, TRUE)) < 0) + assert(ret > 0); if ((ret = encode_plist(dxpl1, little_endian, word_length, "testfiles/plist_files/dxpl_")) < 0) assert(ret > 0); diff --git a/test/select_io_dset.c b/test/select_io_dset.c new file mode 100644 index 00000000000..9a1de06f2df --- /dev/null +++ b/test/select_io_dset.c @@ -0,0 +1,3269 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Programmer: + * + * Purpose: Tests selection IO for the dataset interface (H5D) + */ + +#include "testhdf5.h" +#include "H5srcdir.h" + +const char *FILENAME[] = {"select_io", /* 0 */ + NULL}; + +#define FILENAME_BUF_SIZE 1024 + +/* + * Test configurations + */ +typedef enum { + TEST_NO_TYPE_CONV, /* no type conversion (null case) */ + TEST_NO_SIZE_CHANGE_NO_BKG, /* no size change, no bkg buffer */ + TEST_LARGER_MEM_NO_BKG, /* larger memory type, no bkg buffer */ + TEST_SMALLER_MEM_NO_BKG, /* smaller memory type, no bkg buffer */ + TEST_CMPD_WITH_BKG, /* compound types with bkg buffer */ + TEST_MULTI_CONV_NO_BKG, /* multi dataset test 1 */ + TEST_MULTI_CONV_BKG, /* multi dataset test 2 */ + TEST_MULTI_CONV_SIZE_CHANGE, /* multi dataset test 3 */ + TEST_MULTI_ALL, /* multi dataset test 4 */ + TEST_SELECT_NTESTS +} test_select_config_t; + +#define DSET_SELECT_DIM 100 +#define DSET_SELECT_CHUNK_DIM 10 + +#define MULTI_NUM_DSETS 3 +#define MULTI_MIN_DSETS 3 +#define DSET_NAME_LEN 64 + +/* Compound type */ +typedef struct s1_t { + int a; + int b; + int c; + int d; +} s1_t; + +/* + * Variation of s1 with: + * --no conversion for 2 member types + * --1 larger mem type, + * --1 smaller mem type + */ +typedef struct s2_t { + int a; + long b; + int c; + short d; +} s2_t; + +/* Variation of s1: reverse of s1_t */ +typedef struct s3_t { + int d; + int c; + int b; + int a; +} s3_t; + +/* Variations of s1: only 2 members in s1_t */ +typedef struct s4_t { + unsigned int b; + unsigned int d; +} s4_t; + +/* Defines for test_multi_dsets_all() */ +typedef enum { + DSET_WITH_NO_CONV, /* Dataset with no type conversion */ + DSET_WITH_CONV_AND_NO_BKG, /* Dataset with type conversion but no background buffer */ + DSET_WITH_CONV_AND_BKG, /* Dataset with type conversion and background buffer */ + DSET_NTTYPES +} multi_dset_type_t; + +/* Test setting A and B */ +#define SETTING_A 1 +#define SETTING_B 2 + +/* Definitions of the test modes for test_get_no_selection_io_cause() */ +#define TEST_DISABLE_BY_API 0x001 +#define TEST_DATATYPE_CONVERSION 0x002 +#define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET 0x004 +#define TEST_CONTIGUOUS_SIEVE_BUFFER 0x008 +#define TEST_NO_VECTOR_OR_SELECTION_IO_CB 0x010 +#define TEST_PAGE_BUFFER 0x020 +#define TEST_DATASET_FILTER 0x040 +#define TEST_CHUNK_CACHE 0x080 +#define TEST_TCONV_BUF_TOO_SMALL 0x100 +#define TEST_IN_PLACE_TCONV 0x200 + +/* + * Case 1: single dataset read/write, no type conversion (null case) + * --create dataset with H5T_NATIVE_INT + * --write/read dataset with H5T_NATIVE_INT + */ +static herr_t +test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +{ + int i; + hid_t did = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hid_t ntrans_dxpl = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + int wbuf[DSET_SELECT_DIM]; + int wbuf_bak[DSET_SELECT_DIM]; + int trans_wbuf[DSET_SELECT_DIM]; + int rbuf[DSET_SELECT_DIM]; + char dset_name[DSET_NAME_LEN]; + const char *expr = "2*x"; + + /* Create 1d data space */ + dims[0] = DSET_SELECT_DIM; + if ((sid = H5Screate_simple(1, dims, NULL)) < 0) + FAIL_STACK_ERROR; + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR; + + if (chunked) { + cdims[0] = DSET_SELECT_CHUNK_DIM; + if (H5Pset_chunk(dcpl, 1, cdims) < 0) + FAIL_STACK_ERROR; + } + + /* Generate dataset name */ + HDsnprintf(dset_name, sizeof(dset_name), "no_tconv_%s_%s_%s", chunked ? "chunked" : "contig", + dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf"); + + /* Create dataset */ + if ((did = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + + /* Initialize data */ + for (i = 0; i < DSET_SELECT_DIM; i++) { + wbuf[i] = i; + trans_wbuf[i] = 2 * wbuf[i]; + } + + /* Create dataset transfer property list */ + if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + FAIL_STACK_ERROR; + + if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) + FAIL_STACK_ERROR; + + /* Set modify write buffer if requested */ + if (mwbuf) + if (H5Pset_modify_write_buf(dxpl, TRUE) < 0) + FAIL_STACK_ERROR; + + if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) + FAIL_STACK_ERROR; + + /* Set data transform */ + if (dtrans) + if (H5Pset_data_transform(dxpl, expr) < 0) + FAIL_STACK_ERROR; + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(wbuf_bak, wbuf, sizeof(wbuf)); + + /* Write data to the dataset with/without data transform */ + if (H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, wbuf) < 0) + FAIL_STACK_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(wbuf, wbuf_bak, sizeof(wbuf)); + + /* Read data from the dataset without data transform set in dxpl */ + if (H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, ntrans_dxpl, rbuf) < 0) + FAIL_STACK_ERROR; + + /* Verify data or transformed data read */ + for (i = 0; i < DSET_SELECT_DIM; i++) + if (rbuf[i] != (dtrans ? trans_wbuf[i] : wbuf[i])) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" At index %d\n", i); + TEST_ERROR; + } + + if (dtrans) { + + /* Read the data from the dataset with data transform set in dxpl */ + if (H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, rbuf) < 0) + FAIL_STACK_ERROR; + + /* Verify data read is transformed a second time */ + for (i = 0; i < DSET_SELECT_DIM; i++) + if (rbuf[i] != (2 * trans_wbuf[i])) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" At index %d\n", i); + TEST_ERROR; + } + } + + if (H5Sclose(sid) < 0) + FAIL_STACK_ERROR; + if (H5Dclose(did) < 0) + FAIL_STACK_ERROR; + if (H5Pclose(dcpl) < 0) + FAIL_STACK_ERROR; + if (H5Pclose(dxpl) < 0) + FAIL_STACK_ERROR; + if (H5Pclose(ntrans_dxpl) < 0) + FAIL_STACK_ERROR; + + PASSED(); + + return SUCCEED; + +error: + H5E_BEGIN_TRY + { + H5Sclose(sid); + H5Dclose(did); + H5Pclose(dcpl); + H5Pclose(dxpl); + H5Pclose(ntrans_dxpl); + } + H5E_END_TRY; + + return FAIL; + +} /* test_no_type_conv() */ + +/* + * Case 2: single dataset read/write, no size change, no background buffer + * --create dataset with H5T_STD_I32BE + * --write/read dataset with H5T_STD_I32LE + * --read again with H5T_STD_I32BE + */ +static herr_t +test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) +{ + int i; + hid_t did = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + char *wbuf = NULL; + char *wbuf_bak = NULL; + char *rbuf = NULL; + char dset_name[DSET_NAME_LEN]; + + if ((wbuf = (char *)HDmalloc((size_t)(4 * DSET_SELECT_DIM))) == NULL) + FAIL_STACK_ERROR; + if (mwbuf && (wbuf_bak = (char *)HDmalloc((size_t)(4 * DSET_SELECT_DIM))) == NULL) + FAIL_STACK_ERROR; + if ((rbuf = (char *)HDmalloc((size_t)(4 * DSET_SELECT_DIM))) == NULL) + FAIL_STACK_ERROR; + + /* Create dataset transfer property list */ + if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + FAIL_STACK_ERROR; + + if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) + FAIL_STACK_ERROR; + + /* Set modify write buffer if requested */ + if (mwbuf) + if (H5Pset_modify_write_buf(dxpl, TRUE) < 0) + FAIL_STACK_ERROR; + + /* Create 1d data space */ + dims[0] = DSET_SELECT_DIM; + if ((sid = H5Screate_simple(1, dims, NULL)) < 0) + FAIL_STACK_ERROR; + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR; + + if (chunked) { + cdims[0] = DSET_SELECT_CHUNK_DIM; + if (H5Pset_chunk(dcpl, 1, cdims) < 0) + FAIL_STACK_ERROR; + } + + /* Generate dataset name */ + HDsnprintf(dset_name, sizeof(dset_name), "no_size_change_%s_%s", chunked ? "chunked" : "contig", + mwbuf ? "mwbuf" : "nomwbuf"); + + /* Create 1d dataset */ + if ((did = H5Dcreate2(fid, dset_name, H5T_STD_I32BE, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + + /* Initialize data */ + for (i = 0; i < DSET_SELECT_DIM; i++) { + wbuf[i * 4 + 3] = 0x1; + wbuf[i * 4 + 2] = 0x2; + wbuf[i * 4 + 1] = 0x3; + wbuf[i * 4 + 0] = 0x4; + } + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(wbuf_bak, wbuf, (size_t)(4 * DSET_SELECT_DIM)); + + /* Write the data to the dataset with little endian */ + if (H5Dwrite(did, H5T_STD_I32LE, H5S_ALL, H5S_ALL, dxpl, wbuf) < 0) + FAIL_STACK_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(wbuf, wbuf_bak, (size_t)(4 * DSET_SELECT_DIM)); + + /* Read the data from the dataset with little endian */ + if (H5Dread(did, H5T_STD_I32LE, H5S_ALL, H5S_ALL, dxpl, rbuf) < 0) + FAIL_STACK_ERROR; + + /* Verify data read little endian */ + for (i = 0; i < DSET_SELECT_DIM; i++) + if (rbuf[4 * i + 0] != wbuf[4 * i + 0] || rbuf[4 * i + 1] != wbuf[4 * i + 1] || + rbuf[4 * i + 2] != wbuf[4 * i + 2] || rbuf[4 * i + 3] != wbuf[4 * i + 3]) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" At index %d\n", i); + TEST_ERROR; + } + + /* Read the data from the dataset with big endian */ + if (H5Dread(did, H5T_STD_I32BE, H5S_ALL, H5S_ALL, dxpl, rbuf) < 0) + FAIL_STACK_ERROR; + + /* Verify data read in big endian */ + for (i = 0; i < DSET_SELECT_DIM; i++) + if (rbuf[4 * i + 0] != wbuf[4 * i + 3] || rbuf[4 * i + 1] != wbuf[4 * i + 2] || + rbuf[4 * i + 2] != wbuf[4 * i + 1] || rbuf[4 * i + 3] != wbuf[4 * i + 0]) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" At index %d\n", i); + TEST_ERROR; + } + + if (H5Sclose(sid) < 0) + FAIL_STACK_ERROR; + if (H5Dclose(did) < 0) + FAIL_STACK_ERROR; + if (H5Pclose(dcpl) < 0) + FAIL_STACK_ERROR; + if (H5Pclose(dxpl) < 0) + FAIL_STACK_ERROR; + + HDfree(wbuf); + HDfree(wbuf_bak); + HDfree(rbuf); + + PASSED(); + + return SUCCEED; + +error: + H5E_BEGIN_TRY + { + H5Sclose(sid); + H5Dclose(did); + H5Pclose(dcpl); + H5Pclose(dxpl); + } + H5E_END_TRY; + + if (wbuf) + HDfree(wbuf); + if (wbuf_bak) + HDfree(wbuf_bak); + if (wbuf) + HDfree(rbuf); + + return FAIL; + +} /* test_no_size_change_no_bkg() */ + +/* + * Case 3: single dataset read/write, larger mem type, no background buffer + * --create dataset with H5T_NATIVE_INT + * --write dataset with H5T_NATIVE_LONG + * --read dataset with H5T_NATIVE_LLONG + * + */ +static herr_t +test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +{ + int i; + hid_t did = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hid_t ntrans_dxpl = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + long wbuf[DSET_SELECT_DIM]; + long wbuf_bak[DSET_SELECT_DIM]; + long trans_wbuf[DSET_SELECT_DIM]; + long long rbuf[DSET_SELECT_DIM]; + char dset_name[DSET_NAME_LEN]; + const char *expr = "5 * (10 - x)"; + + /* Create 1d data space */ + dims[0] = DSET_SELECT_DIM; + if ((sid = H5Screate_simple(1, dims, NULL)) < 0) + FAIL_STACK_ERROR; + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR; + + if (chunked) { + cdims[0] = DSET_SELECT_CHUNK_DIM; + if (H5Pset_chunk(dcpl, 1, cdims) < 0) + FAIL_STACK_ERROR; + } + + /* Generate dataset name */ + HDsnprintf(dset_name, sizeof(dset_name), "larger_no_bkg_%s_%s_%s", chunked ? "chunked" : "contig", + dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf"); + + /* Create dataset */ + if ((did = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + + /* Initialize data */ + for (i = 0; i < DSET_SELECT_DIM; i++) { + wbuf[i] = i; + trans_wbuf[i] = 5 * (10 - wbuf[i]); + } + + /* Create dataset transfer property list */ + if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + FAIL_STACK_ERROR; + + if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) + FAIL_STACK_ERROR; + + /* Set modify write buffer if requested */ + if (mwbuf) + if (H5Pset_modify_write_buf(dxpl, TRUE) < 0) + FAIL_STACK_ERROR; + + if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) + FAIL_STACK_ERROR; + + /* Set data transform */ + if (dtrans) + if (H5Pset_data_transform(dxpl, expr) < 0) + FAIL_STACK_ERROR; + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(wbuf_bak, wbuf, sizeof(wbuf)); + + /* Write data to the dataset with/without data transform set in dxpl */ + if (H5Dwrite(did, H5T_NATIVE_LONG, H5S_ALL, H5S_ALL, dxpl, wbuf) < 0) + FAIL_STACK_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(wbuf, wbuf_bak, sizeof(wbuf)); + + /* Read the data from the dataset without data transform in dxpl */ + if (H5Dread(did, H5T_NATIVE_LLONG, H5S_ALL, H5S_ALL, ntrans_dxpl, rbuf) < 0) + FAIL_STACK_ERROR; + + /* Verify data or transformed data read */ + for (i = 0; i < DSET_SELECT_DIM; i++) + if (rbuf[i] != (long long)(dtrans ? trans_wbuf[i] : wbuf[i])) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" At index %d\n", i); + TEST_ERROR; + } + + if (dtrans) { + + /* Read data from the dataset with data transform set in dxpl */ + if (H5Dread(did, H5T_NATIVE_LLONG, H5S_ALL, H5S_ALL, dxpl, rbuf) < 0) + FAIL_STACK_ERROR; + + /* Verify data read is transformed a second time */ + for (i = 0; i < DSET_SELECT_DIM; i++) + if (rbuf[i] != (long long)(5 * (10 - trans_wbuf[i]))) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" At index %d\n", i); + TEST_ERROR; + } + } + + if (H5Sclose(sid) < 0) + FAIL_STACK_ERROR; + if (H5Dclose(did) < 0) + FAIL_STACK_ERROR; + if (H5Pclose(dcpl) < 0) + FAIL_STACK_ERROR; + if (H5Pclose(dxpl) < 0) + FAIL_STACK_ERROR; + if (H5Pclose(ntrans_dxpl) < 0) + FAIL_STACK_ERROR; + + PASSED(); + + return SUCCEED; + +error: + H5E_BEGIN_TRY + { + H5Sclose(sid); + H5Dclose(did); + H5Pclose(dcpl); + H5Pclose(dxpl); + H5Pclose(ntrans_dxpl); + } + H5E_END_TRY; + + return FAIL; + +} /* test_larger_mem_type_no_bkg() */ + +/* + * Case 4: single dataset reader/write, smaller mem type, no background buffer + * --create dataset with H5T_NATIVE_INT + * --write dataset with H5T_NATIVE_SHORT + * --read dataset with H5T_NATIVE_SHORT + */ +static herr_t +test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +{ + int i; + hid_t did = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hid_t ntrans_dxpl = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + short wbuf[DSET_SELECT_DIM]; + int wbuf_bak[DSET_SELECT_DIM]; + short trans_wbuf[DSET_SELECT_DIM]; + short rbuf[DSET_SELECT_DIM]; + char dset_name[DSET_NAME_LEN]; + const char *expr = "2 * (10 + x)"; + + /* Create 1d data space */ + dims[0] = DSET_SELECT_DIM; + if ((sid = H5Screate_simple(1, dims, NULL)) < 0) + FAIL_STACK_ERROR; + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR; + + if (chunked) { + cdims[0] = DSET_SELECT_CHUNK_DIM; + if (H5Pset_chunk(dcpl, 1, cdims) < 0) + FAIL_STACK_ERROR; + } + + /* Generate dataset name */ + HDsnprintf(dset_name, sizeof(dset_name), "smaller_no_bkg_%s_%s_%s", chunked ? "chunked" : "contig", + dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf"); + + /* Create 1d chunked dataset with/without data transform */ + if ((did = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + + /* Initialize data */ + for (i = 0; i < DSET_SELECT_DIM; i++) { + wbuf[i] = (short)i; + trans_wbuf[i] = (short)(2 * (10 + wbuf[i])); + } + + /* Create dataset transfer property list */ + if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + FAIL_STACK_ERROR; + + if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) + FAIL_STACK_ERROR; + + /* Set modify write buffer if requested */ + if (mwbuf) + if (H5Pset_modify_write_buf(dxpl, TRUE) < 0) + FAIL_STACK_ERROR; + + if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) + FAIL_STACK_ERROR; + + /* Set data transform */ + if (dtrans) { + if (H5Pset_data_transform(dxpl, expr) < 0) + FAIL_STACK_ERROR; + } + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(wbuf_bak, wbuf, sizeof(wbuf)); + + /* Write data to the dataset with/without data transform in dxpl */ + if (H5Dwrite(did, H5T_NATIVE_SHORT, H5S_ALL, H5S_ALL, dxpl, wbuf) < 0) + FAIL_STACK_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(wbuf, wbuf_bak, sizeof(wbuf)); + + /* Read data from the dataset without data transform in dxpl */ + if (H5Dread(did, H5T_NATIVE_SHORT, H5S_ALL, H5S_ALL, ntrans_dxpl, rbuf) < 0) + FAIL_STACK_ERROR; + + /* Verify data or transformed data read */ + for (i = 0; i < DSET_SELECT_DIM; i++) + if (rbuf[i] != (dtrans ? trans_wbuf[i] : wbuf[i])) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" At index %d\n", i); + TEST_ERROR; + } + + if (dtrans) { + + /* Read data from the dataset with data transform set in dxpl */ + if (H5Dread(did, H5T_NATIVE_SHORT, H5S_ALL, H5S_ALL, dxpl, rbuf) < 0) + FAIL_STACK_ERROR; + + /* Verify data read is transformed a second time */ + for (i = 0; i < DSET_SELECT_DIM; i++) + if (rbuf[i] != (2 * (10 + trans_wbuf[i]))) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" At index %d\n", i); + TEST_ERROR; + } + } + + if (H5Sclose(sid) < 0) + FAIL_STACK_ERROR; + if (H5Dclose(did) < 0) + FAIL_STACK_ERROR; + if (H5Pclose(dcpl) < 0) + FAIL_STACK_ERROR; + if (H5Pclose(dxpl) < 0) + FAIL_STACK_ERROR; + if (H5Pclose(ntrans_dxpl) < 0) + FAIL_STACK_ERROR; + + PASSED(); + + return SUCCEED; + +error: + H5E_BEGIN_TRY + { + H5Sclose(sid); + H5Dclose(did); + H5Pclose(dcpl); + H5Pclose(dxpl); + H5Pclose(ntrans_dxpl); + } + H5E_END_TRY; + + return FAIL; + +} /* test_smaller_mem_type_no_bkg() */ + +/* + * Case 5: single dataset reade/write, compound types with background buffer + * + * (a) Initialize compound buffer in memory with unique values + * Write all compound fields to disk + * Verify values read + * (b) Update all fields of the compound type in memory write buffer with new unique values + * Write some but not all all compound fields to disk + * Read the entire compound type + * Verify the fields have the correct (old or new) values + * (c) Update all fields of the compound type in memory read buffer with new unique values + * Read some but not all the compound fields to memory + * Verify the fields have the correct (old, middle or new) values + * (d) Set up a different compound type which has: + * --no conversion for member types + * --a field with larger mem type + * --a field with smaller mem type + * Write this compound type to disk + * Read the entire compound type + * Verify the values read + * + */ +static herr_t +test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) +{ + int i; + hid_t did = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hid_t s1_tid = H5I_INVALID_HID; + hid_t s2_tid = H5I_INVALID_HID; + hid_t ss_ac_tid = H5I_INVALID_HID; + hid_t ss_bc_tid = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + s1_t *s1_wbuf = NULL; + s1_t *s1_wbuf_bak = NULL; + s1_t *s1_rbuf = NULL; + s2_t *s2_wbuf = NULL; + s2_t *s2_wbuf_bak = NULL; + s2_t *s2_rbuf = NULL; + char dset_name[DSET_NAME_LEN]; + + /* Create dataset transfer property list */ + if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + FAIL_STACK_ERROR; + + if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) + FAIL_STACK_ERROR; + + /* Set modify write buffer if requested */ + if (mwbuf) + if (H5Pset_modify_write_buf(dxpl, TRUE) < 0) + FAIL_STACK_ERROR; + + /* Allocate buffers for datasets */ + if (NULL == (s1_wbuf = (s1_t *)HDmalloc(sizeof(s1_t) * DSET_SELECT_DIM))) + FAIL_STACK_ERROR; + if (mwbuf && NULL == (s1_wbuf_bak = (s1_t *)HDmalloc(sizeof(s1_t) * DSET_SELECT_DIM))) + FAIL_STACK_ERROR; + if (NULL == (s1_rbuf = (s1_t *)HDmalloc(sizeof(s1_t) * DSET_SELECT_DIM))) + FAIL_STACK_ERROR; + if (NULL == (s2_wbuf = (s2_t *)HDmalloc(sizeof(s2_t) * DSET_SELECT_DIM))) + FAIL_STACK_ERROR; + if (mwbuf && NULL == (s2_wbuf_bak = (s2_t *)HDmalloc(sizeof(s2_t) * DSET_SELECT_DIM))) + FAIL_STACK_ERROR; + if (NULL == (s2_rbuf = (s2_t *)HDmalloc(sizeof(s2_t) * DSET_SELECT_DIM))) + FAIL_STACK_ERROR; + + /* Create the memory data type */ + if ((s1_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) + FAIL_STACK_ERROR; + + if (H5Tinsert(s1_tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT) < 0 || + H5Tinsert(s1_tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT) < 0 || + H5Tinsert(s1_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0 || + H5Tinsert(s1_tid, "d", HOFFSET(s1_t, d), H5T_NATIVE_INT) < 0) + FAIL_STACK_ERROR; + + /* Create 1d data space */ + dims[0] = DSET_SELECT_DIM; + if ((sid = H5Screate_simple(1, dims, NULL)) < 0) + FAIL_STACK_ERROR; + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR; + + if (chunked) { + cdims[0] = DSET_SELECT_CHUNK_DIM; + if (H5Pset_chunk(dcpl, 1, cdims) < 0) + FAIL_STACK_ERROR; + } + + /* Case 5(a) */ + + /* Generate dataset name */ + HDsnprintf(dset_name, sizeof(dset_name), "cmpd_with_bkg_%s_%s", chunked ? "chunked" : "contig", + mwbuf ? "mwbuf" : "nomwbuf"); + + /* Create 1d dataset */ + if ((did = H5Dcreate2(fid, dset_name, s1_tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + + /* Initialize data */ + for (i = 0; i < DSET_SELECT_DIM; i++) { + s1_wbuf[i].a = 4 * i; + s1_wbuf[i].b = (4 * i) + 1; + s1_wbuf[i].c = (4 * i) + 2; + s1_wbuf[i].d = (4 * i) + 3; + } + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(s1_wbuf_bak, s1_wbuf, sizeof(s1_t) * DSET_SELECT_DIM); + + /* Write all the data to the dataset */ + if (H5Dwrite(did, s1_tid, H5S_ALL, H5S_ALL, dxpl, s1_wbuf) < 0) + FAIL_STACK_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(s1_wbuf, s1_wbuf_bak, sizeof(s1_t) * DSET_SELECT_DIM); + + /* Read all the data from the dataset */ + if (H5Dread(did, s1_tid, H5S_ALL, H5S_ALL, dxpl, s1_rbuf) < 0) + FAIL_STACK_ERROR; + + /* Verify data read */ + for (i = 0; i < DSET_SELECT_DIM; i++) { + if (s1_wbuf[i].a != s1_rbuf[i].a || s1_wbuf[i].b != s1_rbuf[i].b || s1_wbuf[i].c != s1_rbuf[i].c || + s1_wbuf[i].d != s1_rbuf[i].d) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" At index %d\n", i); + TEST_ERROR; + } + } + + /* Case 5(b) */ + + /* Update s1_wbuf with unique values */ + for (i = 0; i < DSET_SELECT_DIM; i++) { + s1_wbuf[i].a = (4 * i) + DSET_SELECT_DIM; + s1_wbuf[i].b = (4 * i) + DSET_SELECT_DIM + 1; + s1_wbuf[i].c = (4 * i) + DSET_SELECT_DIM + 2; + s1_wbuf[i].d = (4 * i) + DSET_SELECT_DIM + 3; + } + + /* Create a compound type same size as s1_t */ + if ((ss_ac_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) + FAIL_STACK_ERROR; + + /* but contains only subset members of s1_t */ + if (H5Tinsert(ss_ac_tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT) < 0 || + H5Tinsert(ss_ac_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0) + FAIL_STACK_ERROR; + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(s1_wbuf_bak, s1_wbuf, sizeof(s1_t) * DSET_SELECT_DIM); + + /* Write s1_wbuf to the dataset with only subset members in ss_tid */ + if (H5Dwrite(did, ss_ac_tid, H5S_ALL, H5S_ALL, dxpl, s1_wbuf) < 0) + FAIL_STACK_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(s1_wbuf, s1_wbuf_bak, sizeof(s1_t) * DSET_SELECT_DIM); + + /* Read the whole compound back */ + if (H5Dread(did, ss_ac_tid, H5S_ALL, H5S_ALL, dxpl, s1_rbuf) < 0) + FAIL_STACK_ERROR; + + /* Verify the compound fields have the correct (old or new) values */ + for (i = 0; i < DSET_SELECT_DIM; i++) { + if (s1_rbuf[i].a != s1_wbuf[i].a || s1_rbuf[i].b != ((4 * i) + 1) || s1_rbuf[i].c != s1_wbuf[i].c || + s1_rbuf[i].d != ((4 * i) + 3)) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" At index %d\n", i); + TEST_ERROR; + } + } + + /* Case 5(c) */ + + /* Update s1_rbuf with new unique values */ + for (i = 0; i < DSET_SELECT_DIM; i++) { + s1_rbuf[i].a = (4 * i) + (2 * DSET_SELECT_DIM); + s1_rbuf[i].b = (4 * i) + (2 * DSET_SELECT_DIM) + 1; + s1_rbuf[i].c = (4 * i) + (2 * DSET_SELECT_DIM) + 2; + s1_rbuf[i].d = (4 * i) + (2 * DSET_SELECT_DIM) + 3; + } + + /* Create a compound type same size as s1_t */ + if ((ss_bc_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) + FAIL_STACK_ERROR; + + /* but contains only subset members of s1_t */ + if (H5Tinsert(ss_bc_tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT) < 0 || + H5Tinsert(ss_bc_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0) + FAIL_STACK_ERROR; + + /* Read the dataset: will read only what is set in */ + if (H5Dread(did, ss_bc_tid, H5S_ALL, H5S_ALL, dxpl, s1_rbuf) < 0) + FAIL_STACK_ERROR; + + /* Verify data read */ + for (i = 0; i < DSET_SELECT_DIM; i++) { + if (s1_rbuf[i].a != ((4 * i) + (2 * DSET_SELECT_DIM)) || s1_rbuf[i].b != ((4 * i) + 1) || + s1_rbuf[i].c != ((4 * i) + DSET_SELECT_DIM + 2) || + s1_rbuf[i].d != ((4 * i) + (2 * DSET_SELECT_DIM) + 3)) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" At index %d\n", i); + TEST_ERROR; + } + } + + /* Case 5(d) */ + + /* Create s2_t compound type with: + * --no conversion for 2 member types, + * --1 larger mem type + * --1 smaller mem type + */ + if ((s2_tid = H5Tcreate(H5T_COMPOUND, sizeof(s2_t))) < 0) + FAIL_STACK_ERROR; + + if (H5Tinsert(s2_tid, "a", HOFFSET(s2_t, a), H5T_NATIVE_INT) < 0 || + H5Tinsert(s2_tid, "b", HOFFSET(s2_t, b), H5T_NATIVE_LONG) < 0 || + H5Tinsert(s2_tid, "c", HOFFSET(s2_t, c), H5T_NATIVE_INT) < 0 || + H5Tinsert(s2_tid, "d", HOFFSET(s2_t, d), H5T_NATIVE_SHORT) < 0) + FAIL_STACK_ERROR; + + /* Update s2_wbuf with unique values */ + for (i = 0; i < DSET_SELECT_DIM; i++) { + s2_wbuf[i].a = 8 * i; + s2_wbuf[i].b = (long)((8 * i) + 1); + s2_wbuf[i].c = (8 * i) + 2; + s2_wbuf[i].d = (short)((8 * i) + 3); + } + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(s2_wbuf_bak, s2_wbuf, sizeof(s2_t) * DSET_SELECT_DIM); + + if (H5Dwrite(did, s2_tid, H5S_ALL, H5S_ALL, dxpl, s2_wbuf) < 0) + FAIL_STACK_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(s2_wbuf, s2_wbuf_bak, sizeof(s2_t) * DSET_SELECT_DIM); + + /* Read it back */ + if (H5Dread(did, s2_tid, H5S_ALL, H5S_ALL, dxpl, s2_rbuf) < 0) { + goto error; + } + + /* Verify data read */ + for (i = 0; i < DSET_SELECT_DIM; i++) { + if (s2_wbuf[i].a != s2_rbuf[i].a || s2_wbuf[i].b != s2_rbuf[i].b || s2_wbuf[i].c != s2_rbuf[i].c || + s2_wbuf[i].d != s2_rbuf[i].d) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" At index %d\n", i); + TEST_ERROR; + } + } + + if (H5Sclose(sid) < 0) + FAIL_STACK_ERROR; + if (H5Tclose(s1_tid) < 0) + FAIL_STACK_ERROR; + if (H5Tclose(s2_tid) < 0) + FAIL_STACK_ERROR; + if (H5Tclose(ss_ac_tid) < 0) + FAIL_STACK_ERROR; + if (H5Tclose(ss_bc_tid) < 0) + FAIL_STACK_ERROR; + if (H5Dclose(did) < 0) + FAIL_STACK_ERROR; + if (H5Pclose(dcpl) < 0) + FAIL_STACK_ERROR; + if (H5Pclose(dxpl) < 0) + FAIL_STACK_ERROR; + + /* Release buffers */ + HDfree(s1_wbuf); + HDfree(s1_wbuf_bak); + HDfree(s1_rbuf); + HDfree(s2_wbuf); + HDfree(s2_wbuf_bak); + HDfree(s2_rbuf); + + PASSED(); + + return SUCCEED; + +error: + H5E_BEGIN_TRY + { + H5Sclose(sid); + H5Tclose(s1_tid); + H5Tclose(s2_tid); + H5Tclose(ss_ac_tid); + H5Tclose(ss_bc_tid); + H5Dclose(did); + H5Pclose(dcpl); + H5Pclose(dxpl); + } + H5E_END_TRY; + + if (s1_wbuf) + HDfree(s1_wbuf); + if (s1_wbuf_bak) + HDfree(s1_wbuf_bak); + if (s1_rbuf) + HDfree(s1_rbuf); + if (s2_wbuf) + HDfree(s2_wbuf); + if (s2_wbuf_bak) + HDfree(s2_wbuf_bak); + if (s2_rbuf) + HDfree(s2_rbuf); + return FAIL; + +} /* test_cmpd_with_bkg() */ + +/* + * Test 1 for multi-dataset: + * --Datasets with/without type conversion+smaller/larger mem type+no background buffer + * + * Create datasets: randomized H5T_NATIVE_INT or H5T_NATIVE_LONG + * + * Case a--setting for multi write/read to ndsets: + * Datatype for all datasets: H5T_NATIVE_INT + * + * Case b--setting for multi write/read to ndsets: + * Datatype for all datasets: H5T_NATIVE_LONG + */ +static herr_t +test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +{ + size_t ndsets; + int i, j; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hid_t ntrans_dxpl = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + + hid_t file_sids[MULTI_NUM_DSETS]; + hid_t mem_sids[MULTI_NUM_DSETS]; + hid_t mem_tids[MULTI_NUM_DSETS]; + + char dset_names[MULTI_NUM_DSETS][DSET_NAME_LEN]; + hid_t dset_dids[MULTI_NUM_DSETS]; + + size_t buf_size; + int *total_wbuf = NULL; + int *total_wbuf_bak = NULL; + int *total_trans_wbuf = NULL; + int *total_rbuf = NULL; + long *total_lwbuf = NULL; + long *total_lwbuf_bak = NULL; + long *total_trans_lwbuf = NULL; + long *total_lrbuf = NULL; + + int *wbufi[MULTI_NUM_DSETS]; + int *trans_wbufi[MULTI_NUM_DSETS]; + int *rbufi[MULTI_NUM_DSETS]; + + long *lwbufi[MULTI_NUM_DSETS]; + long *trans_lwbufi[MULTI_NUM_DSETS]; + long *lrbufi[MULTI_NUM_DSETS]; + + const void *wbufs[MULTI_NUM_DSETS]; + void *rbufs[MULTI_NUM_DSETS]; + const char *expr = "2*x"; + + ndsets = MAX(MULTI_MIN_DSETS, MULTI_NUM_DSETS); + + dims[0] = DSET_SELECT_DIM; + + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR; + + if (chunked) { + cdims[0] = DSET_SELECT_CHUNK_DIM; + if (H5Pset_chunk(dcpl, 1, cdims) < 0) + FAIL_STACK_ERROR; + } + + /* Create dataset transfer property list */ + if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + FAIL_STACK_ERROR; + + if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) + FAIL_STACK_ERROR; + + /* Set modify write buffer if requested */ + if (mwbuf) + if (H5Pset_modify_write_buf(dxpl, TRUE) < 0) + FAIL_STACK_ERROR; + + if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) + FAIL_STACK_ERROR; + + /* Set data transform */ + if (dtrans) + if (H5Pset_data_transform(dxpl, expr) < 0) + FAIL_STACK_ERROR; + + /* Set up file space ids, mem space ids, and dataset ids */ + for (i = 0; i < (int)ndsets; i++) { + if ((file_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) + FAIL_STACK_ERROR; + + if ((mem_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) + FAIL_STACK_ERROR; + + /* Generate dataset name */ + HDsnprintf(dset_names[i], sizeof(dset_names[i]), "multi_dset%d_%s_%s_%s", i, + chunked ? "chunked" : "contig", dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf"); + + /* Create ith dataset */ + if ((dset_dids[i] = + H5Dcreate2(fid, dset_names[i], ((HDrandom() % 2) ? H5T_NATIVE_LONG : H5T_NATIVE_INT), + file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + } + + buf_size = ndsets * DSET_SELECT_DIM * sizeof(int); + + /* Allocate buffers for all datasets */ + if (NULL == (total_wbuf = (int *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + if (mwbuf && NULL == (total_wbuf_bak = (int *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + if (NULL == (total_trans_wbuf = (int *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + if (NULL == (total_rbuf = (int *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + + buf_size = ndsets * DSET_SELECT_DIM * sizeof(long); + + if (NULL == (total_lwbuf = (long *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + if (mwbuf && NULL == (total_lwbuf_bak = (long *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + if (NULL == (total_trans_lwbuf = (long *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + if (NULL == (total_lrbuf = (long *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + + /* Initialize buffer indices */ + for (i = 0; i < (int)ndsets; i++) { + wbufi[i] = total_wbuf + (i * DSET_SELECT_DIM); + trans_wbufi[i] = total_trans_wbuf + (i * DSET_SELECT_DIM); + rbufi[i] = total_rbuf + (i * DSET_SELECT_DIM); + + wbufs[i] = wbufi[i]; + rbufs[i] = rbufi[i]; + } + + /* Initialize the buffer data */ + for (i = 0; i < (int)ndsets; i++) + for (j = 0; j < DSET_SELECT_DIM; j++) { + wbufi[i][j] = (int)j; + trans_wbufi[i][j] = 2 * wbufi[i][j]; + } + + /* Case a */ + + /* Datatype setting for multi write/read */ + for (i = 0; i < (int)ndsets; i++) + mem_tids[i] = H5T_NATIVE_INT; + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(total_wbuf_bak, total_wbuf, ndsets * DSET_SELECT_DIM * sizeof(int)); + + /* Write data to the dataset with/without data transform */ + if (H5Dwrite_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, wbufs) < 0) + TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(total_wbuf, total_wbuf_bak, ndsets * DSET_SELECT_DIM * sizeof(int)); + + /* Read data from the dataset (if dtrans, without data transform set in dxpl) */ + if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, ntrans_dxpl, rbufs) < 0) + TEST_ERROR; + + /* Verify */ + for (i = 0; i < (int)ndsets; i++) + for (j = 0; j < DSET_SELECT_DIM; j++) + if (rbufi[i][j] != (dtrans ? trans_wbufi[i][j] : wbufi[i][j])) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" For dset %d at index %d\n", i, j); + TEST_ERROR; + } + + if (dtrans) { + + /* Read the data from the dataset with data transform set in dxpl */ + if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) + TEST_ERROR; + + /* Verify */ + for (i = 0; i < (int)ndsets; i++) + for (j = 0; j < DSET_SELECT_DIM; j++) + if (rbufi[i][j] != (2 * trans_wbufi[i][j])) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" For dset %d at index %d\n", i, j); + TEST_ERROR; + } + } + + /* Case b */ + + /* Datatype setting for multi write/read */ + for (i = 0; i < (int)ndsets; i++) + mem_tids[i] = H5T_NATIVE_LONG; + + /* Initialize buffer indices */ + for (i = 0; i < (int)ndsets; i++) { + lwbufi[i] = total_lwbuf + (i * DSET_SELECT_DIM); + trans_lwbufi[i] = total_trans_lwbuf + (i * DSET_SELECT_DIM); + lrbufi[i] = total_lrbuf + (i * DSET_SELECT_DIM); + wbufs[i] = lwbufi[i]; + rbufs[i] = lrbufi[i]; + } + + /* Initialize the buffer data */ + for (i = 0; i < (int)ndsets; i++) + for (j = 0; j < DSET_SELECT_DIM; j++) { + lwbufi[i][j] = (long)j + DSET_SELECT_DIM; + trans_lwbufi[i][j] = 2 * lwbufi[i][j]; + } + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(total_lwbuf_bak, total_lwbuf, ndsets * DSET_SELECT_DIM * sizeof(long)); + + /* Write data to the dataset with/without data transform */ + if (H5Dwrite_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, wbufs) < 0) + TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(total_lwbuf, total_lwbuf_bak, ndsets * DSET_SELECT_DIM * sizeof(long)); + + /* Read data from the dataset (if dtrans, with data transform again in dxpl) */ + if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) + TEST_ERROR; + + for (i = 0; i < (int)ndsets; i++) { + for (j = 0; j < DSET_SELECT_DIM; j++) { + if (lrbufi[i][j] != (dtrans ? (2 * trans_lwbufi[i][j]) : lwbufi[i][j])) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" For dset %d at index %d\n", i, j); + TEST_ERROR; + } + } + } + + if (H5Pclose(dcpl) < 0) + FAIL_STACK_ERROR; + if (H5Pclose(dxpl) < 0) + FAIL_STACK_ERROR; + if (H5Pclose(ntrans_dxpl) < 0) + FAIL_STACK_ERROR; + + for (i = 0; i < (int)ndsets; i++) { + if (H5Sclose(file_sids[i]) < 0) + FAIL_STACK_ERROR; + if (H5Sclose(mem_sids[i]) < 0) + FAIL_STACK_ERROR; + if (H5Dclose(dset_dids[i]) < 0) + FAIL_STACK_ERROR; + } + + HDfree(total_wbuf); + HDfree(total_wbuf_bak); + HDfree(total_rbuf); + HDfree(total_trans_wbuf); + HDfree(total_lwbuf); + HDfree(total_lwbuf_bak); + HDfree(total_lrbuf); + HDfree(total_trans_lwbuf); + + PASSED(); + + return SUCCEED; + +error: + H5E_BEGIN_TRY + H5Pclose(dcpl); + H5Pclose(dxpl); + H5Pclose(ntrans_dxpl); + for (i = 0; i < (int)ndsets; i++) { + H5Sclose(file_sids[i]); + H5Sclose(mem_sids[i]); + H5Dclose(dset_dids[i]); + } + H5E_END_TRY; + + if (total_wbuf) + HDfree(total_wbuf); + if (total_wbuf_bak) + HDfree(total_wbuf_bak); + if (total_trans_wbuf) + HDfree(total_trans_wbuf); + if (total_rbuf) + HDfree(total_rbuf); + if (total_lwbuf) + HDfree(total_lwbuf); + if (total_lwbuf_bak) + HDfree(total_lwbuf_bak); + if (total_lrbuf) + HDfree(total_lrbuf); + if (total_trans_lwbuf) + HDfree(total_lrbuf); + + return FAIL; + +} /* test_multi_dsets_no_bkg() */ + +/* + * Test 2 for multi-dataset: + * + * Datasets with compound types+background buffer + * + * Create datasets with the same compound type + * (a) Initialize compound buffer in memory with unique values + * All datasets: + * --Write all compound fields to disk + * --Read the entire compound type for all datasets + * --Verify values read + * (b) Update all fields of the compound type in memory write buffer with new unique values + * dset0: + * --Write some but not all all compound fields to disk + * --Read and verify the fields have the correct (old(a) or new) values + * Remaining datasets: + * --Untouched + * --Read and verify the fields have the correct old(a) values + * (c) Update all fields of the compound type in memory read buffer with new unique values + * Randomized dataset: + * --Read some but not all the compound fields to memory + * --Verify the fields have the correct (old(a) or new) values + * dset0: + * --Untouched + * --Read and verify the fields have the correct (old(a) or middle(b)) values + * Remaining datasets: + * --Untouched + * --Read and verify the fields have the correct old(a) values + * (d) Set up a different compound type which has: + * --no type conversion for 2 member types + * --a field with larger mem type + * --a field with smaller mem type + * All datasets: + * --Write the compound fields to disk + * --Read the entire compound type + * --Verify values read + */ +static herr_t +test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) +{ + size_t ndsets; + int i, j, mm; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + + hid_t file_sids[MULTI_NUM_DSETS]; + hid_t mem_sids[MULTI_NUM_DSETS]; + hid_t mem_tids[MULTI_NUM_DSETS]; + + hid_t s1_tid = H5I_INVALID_HID; + hid_t ss_ac_tid = H5I_INVALID_HID; + hid_t ss_bc_tid = H5I_INVALID_HID; + hid_t s2_tid = H5I_INVALID_HID; + + char dset_names[MULTI_NUM_DSETS][DSET_NAME_LEN]; + hid_t dset_dids[MULTI_NUM_DSETS]; + + size_t buf_size; + size_t s2_buf_size; + + s1_t *total_wbuf = NULL; + s1_t *total_wbuf_bak = NULL; + s1_t *total_rbuf = NULL; + + s2_t *s2_total_wbuf = NULL; + s2_t *s2_total_wbuf_bak = NULL; + s2_t *s2_total_rbuf = NULL; + + s1_t *wbufi[MULTI_NUM_DSETS]; + s1_t *rbufi[MULTI_NUM_DSETS]; + + s2_t *s2_wbufi[MULTI_NUM_DSETS]; + s2_t *s2_rbufi[MULTI_NUM_DSETS]; + + const void *wbufs[MULTI_NUM_DSETS]; + void *rbufs[MULTI_NUM_DSETS]; + + ndsets = MAX(MULTI_MIN_DSETS, MULTI_NUM_DSETS); + + /* Create dataset transfer property list */ + if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + FAIL_STACK_ERROR; + + if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) + FAIL_STACK_ERROR; + + /* Set modify write buffer if requested */ + if (mwbuf) + if (H5Pset_modify_write_buf(dxpl, TRUE) < 0) + FAIL_STACK_ERROR; + + dims[0] = DSET_SELECT_DIM; + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR; + + if (chunked) { + cdims[0] = DSET_SELECT_CHUNK_DIM; + if (H5Pset_chunk(dcpl, 1, cdims) < 0) + FAIL_STACK_ERROR; + } + + /* Create the memory data type */ + if ((s1_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) + FAIL_STACK_ERROR; + + if (H5Tinsert(s1_tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT) < 0 || + H5Tinsert(s1_tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT) < 0 || + H5Tinsert(s1_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0 || + H5Tinsert(s1_tid, "d", HOFFSET(s1_t, d), H5T_NATIVE_INT) < 0) + FAIL_STACK_ERROR; + + for (i = 0; i < (int)ndsets; i++) { + if ((file_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) + FAIL_STACK_ERROR; + if ((mem_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) + FAIL_STACK_ERROR; + + /* Generate dataset name */ + HDsnprintf(dset_names[i], sizeof(dset_names[i]), "multi_cmpd_dset%d_%s_%s", i, + chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf"); + + /* Create ith dataset */ + if ((dset_dids[i] = + H5Dcreate2(fid, dset_names[i], s1_tid, file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + } + + buf_size = ndsets * DSET_SELECT_DIM * sizeof(s1_t); + s2_buf_size = ndsets * DSET_SELECT_DIM * sizeof(s2_t); + + /* Allocate buffers */ + if (NULL == (total_wbuf = (s1_t *)HDmalloc(buf_size))) + TEST_ERROR; + if (mwbuf && NULL == (total_wbuf_bak = (s1_t *)HDmalloc(buf_size))) + TEST_ERROR; + if (NULL == (total_rbuf = (s1_t *)HDmalloc(buf_size))) + TEST_ERROR; + + if (NULL == (s2_total_wbuf = (s2_t *)HDmalloc(s2_buf_size))) + TEST_ERROR; + if (mwbuf && NULL == (s2_total_wbuf_bak = (s2_t *)HDmalloc(s2_buf_size))) + TEST_ERROR; + if (NULL == (s2_total_rbuf = (s2_t *)HDmalloc(s2_buf_size))) + TEST_ERROR; + + /* Initialize buffer indices */ + for (i = 0; i < (int)ndsets; i++) { + wbufi[i] = total_wbuf + (i * DSET_SELECT_DIM); + rbufi[i] = total_rbuf + (i * DSET_SELECT_DIM); + + wbufs[i] = wbufi[i]; + rbufs[i] = rbufi[i]; + } + + /* Case a */ + + /* Initialize the buffer data for all the datasets */ + for (i = 0; i < (int)ndsets; i++) + for (j = 0; j < DSET_SELECT_DIM; j++) { + wbufi[i][j].a = (4 * j); + wbufi[i][j].b = (4 * j) + 1; + wbufi[i][j].c = (4 * j) + 2; + wbufi[i][j].d = (4 * j) + 3; + } + + /* Datatype setting for multi write */ + for (i = 0; i < (int)ndsets; i++) + mem_tids[i] = s1_tid; + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(total_wbuf_bak, total_wbuf, buf_size); + + if (H5Dwrite_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, wbufs) < 0) + TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(total_wbuf, total_wbuf_bak, buf_size); + + if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) + TEST_ERROR; + + /* Verify data read */ + for (i = 0; i < (int)ndsets; i++) + for (j = 0; j < DSET_SELECT_DIM; j++) { + if (wbufi[i][j].a != rbufi[i][j].a || wbufi[i][j].b != rbufi[i][j].b || + wbufi[i][j].c != rbufi[i][j].c || wbufi[i][j].d != rbufi[i][j].d) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" For dset %d at index %d\n", i, j); + TEST_ERROR; + } + } + + /* Case b */ + + /* Update data in wbufi for dset0 with unique values */ + for (j = 0; j < DSET_SELECT_DIM; j++) { + wbufi[0][j].a = (4 * j) + DSET_SELECT_DIM; + wbufi[0][j].b = (4 * j) + DSET_SELECT_DIM + 1; + wbufi[0][j].c = (4 * j) + DSET_SELECT_DIM + 2; + wbufi[0][j].d = (4 * j) + DSET_SELECT_DIM + 3; + } + + /* Create a compound type same size as s1_t */ + if ((ss_ac_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) + FAIL_STACK_ERROR; + + /* but contains only subset members of s1_t */ + if (H5Tinsert(ss_ac_tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT) < 0 || + H5Tinsert(ss_ac_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0) + FAIL_STACK_ERROR; + + /* Untouched memory and file spaces for other datasets */ + for (i = 0; i < (int)ndsets; i++) { + if (i == 0) + continue; + + if (H5Sselect_none(mem_sids[i]) < 0) + TEST_ERROR; + if (H5Sselect_none(file_sids[i]) < 0) + TEST_ERROR; + } + + /* Datatype setting for write to dset0 */ + mem_tids[0] = ss_ac_tid; + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(total_wbuf_bak, total_wbuf, buf_size); + + if (H5Dwrite_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, wbufs) < 0) + TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(total_wbuf, total_wbuf_bak, buf_size); + + if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) + TEST_ERROR; + + /* Verify data read */ + for (i = 0; i < (int)ndsets; i++) + for (j = 0; j < DSET_SELECT_DIM; j++) + if (i == 0) { /* dset0 */ + if (wbufi[i][j].a != rbufi[i][j].a || ((4 * (int)j) + 1) != rbufi[i][j].b || + wbufi[i][j].c != rbufi[i][j].c || ((4 * (int)j) + 3) != rbufi[i][j].d) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" For dset %d at index %d\n", i, j); + TEST_ERROR; + } + } + else { /* other datasets */ + for (j = 0; j < DSET_SELECT_DIM; j++) + if ((4 * (int)j) != rbufi[i][j].a || ((4 * (int)j) + 1) != rbufi[i][j].b || + ((4 * (int)j) + 2) != rbufi[i][j].c || ((4 * (int)j) + 3) != rbufi[i][j].d) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" For dset %d at index %d\n", i, j); + TEST_ERROR; + } + } + + /* Case c */ + mm = HDrandom() % (int)ndsets; + if (!mm) + mm++; + + /* Update data in rbufi for dset1 with new unique values */ + for (j = 0; j < DSET_SELECT_DIM; j++) { + rbufi[mm][j].a = (4 * j) + (2 * DSET_SELECT_DIM); + rbufi[mm][j].b = (4 * j) + (2 * DSET_SELECT_DIM) + 1; + rbufi[mm][j].c = (4 * j) + (2 * DSET_SELECT_DIM) + 2; + rbufi[mm][j].d = (4 * j) + (2 * DSET_SELECT_DIM) + 3; + } + + /* Create a compound type same size as s1_t */ + if ((ss_bc_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) + FAIL_STACK_ERROR; + + /* but contains only subset members of s1_t */ + if (H5Tinsert(ss_bc_tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT) < 0 || + H5Tinsert(ss_bc_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0) + FAIL_STACK_ERROR; + + /* Reset memory and file space for dataset */ + if (H5Sselect_all(mem_sids[mm]) < 0) + FAIL_STACK_ERROR; + if (H5Sselect_all(file_sids[mm]) < 0) + FAIL_STACK_ERROR; + + /* Untouched memory and file space for other datasets */ + for (i = 0; i < (int)ndsets; i++) { + if (i == 0 || i == mm) + continue; + if (H5Sselect_none(mem_sids[i]) < 0) + TEST_ERROR; + if (H5Sselect_none(file_sids[i]) < 0) + TEST_ERROR; + } + + /* Datatype setting for read from dataset */ + mem_tids[mm] = ss_bc_tid; + + if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) + TEST_ERROR; + + /* Verify data read */ + /* dset0 */ + for (j = 0; j < DSET_SELECT_DIM; j++) + if (wbufi[0][j].a != rbufi[0][j].a || ((4 * (int)j) + 1) != rbufi[0][j].b || + wbufi[0][j].c != rbufi[0][j].c || ((4 * (int)j) + 3) != rbufi[0][j].d) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" For dset0 at index %d\n", j); + TEST_ERROR; + } + + /* dset */ + for (j = 0; j < DSET_SELECT_DIM; j++) + if (rbufi[mm][j].a != ((4 * (int)j) + (2 * DSET_SELECT_DIM)) || + rbufi[mm][j].b != ((4 * (int)j) + 1) || rbufi[mm][j].c != ((4 * (int)j) + 2) || + rbufi[mm][j].d != ((4 * (int)j) + (2 * DSET_SELECT_DIM) + 3)) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" For dset1 at index %d\n", j); + TEST_ERROR; + } + + /* other datasets */ + for (i = 0; i < (int)ndsets; i++) { + if (i == 0 || i == mm) + continue; + + for (j = 0; j < DSET_SELECT_DIM; j++) + if (rbufi[i][j].a != (4 * (int)j) || rbufi[i][j].b != ((4 * (int)j) + 1) || + rbufi[i][j].c != ((4 * (int)j) + 2) || rbufi[i][j].d != ((4 * (int)j) + 3)) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" For dset %d at index %d\n", i, j); + TEST_ERROR; + } + } + + /* Case d */ + + /* Create s2_t compound type with: + * --no conversion for 2 member types, + * --1 larger mem type + * --1 smaller mem type + */ + if ((s2_tid = H5Tcreate(H5T_COMPOUND, sizeof(s2_t))) < 0) + FAIL_STACK_ERROR; + + if (H5Tinsert(s2_tid, "a", HOFFSET(s2_t, a), H5T_NATIVE_INT) < 0 || + H5Tinsert(s2_tid, "b", HOFFSET(s2_t, b), H5T_NATIVE_LONG) < 0 || + H5Tinsert(s2_tid, "c", HOFFSET(s2_t, c), H5T_NATIVE_INT) < 0 || + H5Tinsert(s2_tid, "d", HOFFSET(s2_t, d), H5T_NATIVE_SHORT) < 0) + FAIL_STACK_ERROR; + + for (i = 0; i < (int)ndsets; i++) { + s2_wbufi[i] = s2_total_wbuf + (i * DSET_SELECT_DIM); + s2_rbufi[i] = s2_total_rbuf + (i * DSET_SELECT_DIM); + + wbufs[i] = s2_wbufi[i]; + rbufs[i] = s2_rbufi[i]; + + mem_tids[i] = s2_tid; + + if (H5Sselect_all(mem_sids[i]) < 0) + TEST_ERROR; + if (H5Sselect_all(file_sids[i]) < 0) + TEST_ERROR; + } + + /* Initialize the buffer data for all the datasets */ + for (i = 0; i < (int)ndsets; i++) + for (j = 0; j < DSET_SELECT_DIM; j++) { + s2_wbufi[i][j].a = 8 * j; + s2_wbufi[i][j].b = (long)((8 * j) + 1); + s2_wbufi[i][j].c = (8 * j) + 2; + s2_wbufi[i][j].d = (short)((8 * j) + 3); + } + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(s2_total_wbuf_bak, s2_total_wbuf, s2_buf_size); + + if (H5Dwrite_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, wbufs) < 0) + TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(s2_total_wbuf, s2_total_wbuf_bak, s2_buf_size); + + if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) + TEST_ERROR; + + /* Verify data read */ + for (i = 0; i < (int)ndsets; i++) + for (j = 0; j < DSET_SELECT_DIM; j++) + if (s2_wbufi[i][j].a != s2_rbufi[i][j].a || s2_wbufi[i][j].b != s2_rbufi[i][j].b || + s2_wbufi[i][j].c != s2_rbufi[i][j].c || s2_wbufi[i][j].d != s2_rbufi[i][j].d) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" For dset %d at index %d\n", i, j); + TEST_ERROR; + } + + if (H5Pclose(dcpl) < 0) + FAIL_STACK_ERROR; + if (H5Pclose(dxpl) < 0) + FAIL_STACK_ERROR; + + for (i = 0; i < (int)ndsets; i++) { + if (H5Sclose(file_sids[i]) < 0) + FAIL_STACK_ERROR; + if (H5Sclose(mem_sids[i]) < 0) + FAIL_STACK_ERROR; + if (H5Dclose(dset_dids[i]) < 0) + FAIL_STACK_ERROR; + } + + HDfree(total_wbuf); + HDfree(total_wbuf_bak); + HDfree(total_rbuf); + HDfree(s2_total_wbuf); + HDfree(s2_total_wbuf_bak); + HDfree(s2_total_rbuf); + + PASSED(); + + return SUCCEED; + +error: + H5E_BEGIN_TRY + H5Pclose(dcpl); + H5Pclose(dxpl); + for (i = 0; i < (int)ndsets; i++) { + H5Sclose(file_sids[i]); + H5Sclose(mem_sids[i]); + H5Dclose(dset_dids[i]); + } + H5E_END_TRY; + + if (total_wbuf) + HDfree(total_wbuf); + if (total_wbuf_bak) + HDfree(total_wbuf_bak); + if (total_rbuf) + HDfree(total_rbuf); + if (s2_total_wbuf) + HDfree(s2_total_wbuf); + if (s2_total_wbuf_bak) + HDfree(s2_total_wbuf_bak); + if (s2_total_rbuf) + HDfree(s2_total_rbuf); + + return FAIL; + +} /* test_multi_dsets_cmpd_with_bkg() */ + +/* + * Test 3 for multi-dataset: + * --Datasets with/without type conv+size change+no background buffer + * + * Create dset0: H5T_STD_I32BE + * Create other dateasets: randomized H5T_STD_I64LE or H5T_STD_I16LE + * + * Case a--setting for write/read to ndsets: + * Datatype for all datasets: H5T_STD_I32BE + * + * Case b--setting for write/read to ndsets + * Datatype for all datasets: H5T_STD_I64BE + * + * Case c--setting for write/read to ndsets + * Datatype for all datasets: H5T_STD_I16BE + */ +static herr_t +test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) +{ + size_t ndsets; + int i, j; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + + hid_t file_sids[MULTI_NUM_DSETS]; + hid_t mem_sids[MULTI_NUM_DSETS]; + hid_t mem_tids[MULTI_NUM_DSETS]; + + char dset_names[MULTI_NUM_DSETS][DSET_NAME_LEN]; + hid_t dset_dids[MULTI_NUM_DSETS]; + + size_t buf_size, ss; + uint8_t *total_wbuf = NULL; + uint8_t *total_wbuf_bak = NULL; + uint8_t *total_rbuf = NULL; + uint8_t *total_lwbuf = NULL; + uint8_t *total_lwbuf_bak = NULL; + uint8_t *total_lrbuf = NULL; + uint8_t *total_swbuf = NULL; + uint8_t *total_swbuf_bak = NULL; + uint8_t *total_srbuf = NULL; + + uint8_t *wbufi[MULTI_NUM_DSETS]; + uint8_t *rbufi[MULTI_NUM_DSETS]; + uint8_t *lwbufi[MULTI_NUM_DSETS]; + uint8_t *lrbufi[MULTI_NUM_DSETS]; + uint8_t *swbufi[MULTI_NUM_DSETS]; + uint8_t *srbufi[MULTI_NUM_DSETS]; + + const void *wbufs[MULTI_NUM_DSETS]; + void *rbufs[MULTI_NUM_DSETS]; + + ndsets = MAX(MULTI_MIN_DSETS, MULTI_NUM_DSETS); + + /* Create dataset transfer property list */ + if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + FAIL_STACK_ERROR; + + if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) + FAIL_STACK_ERROR; + + /* Set modify write buffer if requested */ + if (mwbuf) + if (H5Pset_modify_write_buf(dxpl, TRUE) < 0) + FAIL_STACK_ERROR; + + dims[0] = DSET_SELECT_DIM; + + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR; + + if (chunked) { + cdims[0] = DSET_SELECT_CHUNK_DIM; + if (H5Pset_chunk(dcpl, 1, cdims) < 0) + FAIL_STACK_ERROR; + } + + /* Set up file space ids, mem space ids, and dataset ids */ + for (i = 0; i < (int)ndsets; i++) { + if ((file_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) + FAIL_STACK_ERROR; + + if ((mem_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) + FAIL_STACK_ERROR; + + /* Generate dataset name */ + HDsnprintf(dset_names[i], sizeof(dset_names[i]), "multi_size_dset%d_%s_%s", i, + chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf"); + + /* Create ith dataset */ + if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], H5T_STD_I32BE, file_sids[i], H5P_DEFAULT, dcpl, + H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + } + + /* Case a */ + + ss = H5Tget_size(H5T_STD_I32BE); + buf_size = ndsets * ss * DSET_SELECT_DIM; + + /* Allocate buffers for all datasets */ + if (NULL == (total_wbuf = (uint8_t *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + if (NULL == (total_wbuf_bak = (uint8_t *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + if (NULL == (total_rbuf = (uint8_t *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + + /* Initialize buffer indices */ + for (i = 0; i < (int)ndsets; i++) { + wbufi[i] = total_wbuf + (i * (int)ss * DSET_SELECT_DIM); + rbufi[i] = total_rbuf + (i * (int)ss * DSET_SELECT_DIM); + + wbufs[i] = wbufi[i]; + rbufs[i] = rbufi[i]; + } + + /* Initialize the buffer data: big endian */ + for (i = 0; i < (int)ndsets; i++) + for (j = 0; j < DSET_SELECT_DIM; j++) { + wbufi[i][j * (int)ss + 0] = 0x1; + wbufi[i][j * (int)ss + 1] = 0x2; + wbufi[i][j * (int)ss + 2] = 0x3; + wbufi[i][j * (int)ss + 3] = (uint8_t)(0x4 + j); + } + + /* Datatype setting for multi write/read */ + for (i = 0; i < (int)ndsets; i++) + mem_tids[i] = H5T_STD_I32BE; + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(total_wbuf_bak, total_wbuf, buf_size); + + /* Write data to the dataset */ + if (H5Dwrite_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, wbufs) < 0) + TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(total_wbuf, total_wbuf_bak, buf_size); + + /* Read data from the dataset */ + if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) + TEST_ERROR; + + /* Verify */ + for (i = 0; i < (int)ndsets; i++) + /* Only compare when it's at least the size of H5T_STD_I32BE */ + if (H5Tget_size(H5Dget_type(dset_dids[i])) >= ss) { + for (j = 0; j < DSET_SELECT_DIM; j++) + if (rbufi[i][(int)ss * j + 0] != wbufi[i][(int)ss * j + 0] || + rbufi[i][(int)ss * j + 1] != wbufi[i][(int)ss * j + 1] || + rbufi[i][(int)ss * j + 2] != wbufi[i][(int)ss * j + 2] || + rbufi[i][(int)ss * j + 3] != wbufi[i][(int)ss * j + 3]) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" For dset %d at index %d\n", i, j); + TEST_ERROR; + } + } + + /* Case b */ + + ss = H5Tget_size(H5T_STD_I64BE); + buf_size = ndsets * (ss * DSET_SELECT_DIM); + + /* Allocate buffers for all datasets */ + if (NULL == (total_lwbuf = (uint8_t *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + if (NULL == (total_lwbuf_bak = (uint8_t *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + if (NULL == (total_lrbuf = (uint8_t *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + + /* Initialize buffer indices */ + for (i = 0; i < (int)ndsets; i++) { + lwbufi[i] = total_lwbuf + (i * (int)ss * DSET_SELECT_DIM); + lrbufi[i] = total_lrbuf + (i * (int)ss * DSET_SELECT_DIM); + + wbufs[i] = lwbufi[i]; + rbufs[i] = lrbufi[i]; + } + + /* Initialize the buffer data: big endian */ + for (i = 0; i < (int)ndsets; i++) + for (j = 0; j < DSET_SELECT_DIM; j++) { + lwbufi[i][j * (int)ss + 0] = 0x1; + lwbufi[i][j * (int)ss + 1] = 0x2; + lwbufi[i][j * (int)ss + 2] = 0x3; + lwbufi[i][j * (int)ss + 3] = 0x4; + lwbufi[i][j * (int)ss + 4] = 0x5; + lwbufi[i][j * (int)ss + 5] = 0x6; + lwbufi[i][j * (int)ss + 6] = 0x7; + lwbufi[i][j * (int)ss + 7] = (uint8_t)(0x8 + j); + } + + /* Datatype setting for multi write/read */ + for (i = 0; i < (int)ndsets; i++) + mem_tids[i] = H5T_STD_I64BE; + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(total_lwbuf_bak, total_lwbuf, buf_size); + + /* Write data to the dataset */ + if (H5Dwrite_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, wbufs) < 0) + TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(total_lwbuf, total_lwbuf_bak, buf_size); + + /* Read data from the dataset */ + if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) + TEST_ERROR; + + /* Verify */ + for (i = 0; i < (int)ndsets; i++) + /* Only compare when it's the size of H5T_STD_I64BE */ + if (H5Tget_size(H5Dget_type(dset_dids[i])) >= ss) { + for (j = 0; j < DSET_SELECT_DIM; j++) + if (lrbufi[i][(int)ss * j + 0] != lwbufi[i][(int)ss * j + 0] || + lrbufi[i][(int)ss * j + 1] != lwbufi[i][(int)ss * j + 1] || + lrbufi[i][(int)ss * j + 2] != lwbufi[i][(int)ss * j + 2] || + lrbufi[i][(int)ss * j + 3] != lwbufi[i][(int)ss * j + 3] || + lrbufi[i][(int)ss * j + 4] != lwbufi[i][(int)ss * j + 4] || + lrbufi[i][(int)ss * j + 5] != lwbufi[i][(int)ss * j + 5] || + lrbufi[i][(int)ss * j + 6] != lwbufi[i][(int)ss * j + 6] || + lrbufi[i][(int)ss * j + 7] != lwbufi[i][(int)ss * j + 7]) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" For dset %d at index %d\n", i, j); + TEST_ERROR; + } + } + + /* Case c */ + + ss = H5Tget_size(H5T_STD_I16BE); + buf_size = ndsets * (ss * DSET_SELECT_DIM); + + /* Allocate buffers for all datasets */ + if (NULL == (total_swbuf = (uint8_t *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + if (NULL == (total_swbuf_bak = (uint8_t *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + if (NULL == (total_srbuf = (uint8_t *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + + /* Initialize buffer indices */ + for (i = 0; i < (int)ndsets; i++) { + swbufi[i] = total_swbuf + (i * (int)ss * DSET_SELECT_DIM); + srbufi[i] = total_srbuf + (i * (int)ss * DSET_SELECT_DIM); + + wbufs[i] = swbufi[i]; + rbufs[i] = srbufi[i]; + } + + /* Initialize the buffer data: big endian */ + for (i = 0; i < (int)ndsets; i++) + for (j = 0; j < DSET_SELECT_DIM; j++) { + swbufi[i][j * (int)ss + 0] = 0x1; + swbufi[i][j * (int)ss + 1] = (uint8_t)(0x2 + j); + } + + /* Datatype setting for multi write/read */ + for (i = 0; i < (int)ndsets; i++) + mem_tids[i] = H5T_STD_I16BE; + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(total_swbuf_bak, total_swbuf, buf_size); + + /* Write data to the dataset */ + if (H5Dwrite_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, wbufs) < 0) + TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(total_swbuf, total_swbuf_bak, buf_size); + + /* Read data from the dataset */ + if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) + TEST_ERROR; + + /* Verify */ + for (i = 0; i < (int)ndsets; i++) + /* Can compare for all cases */ + for (j = 0; j < DSET_SELECT_DIM; j++) + if (srbufi[i][(int)ss * j + 0] != swbufi[i][(int)ss * j + 0] || + srbufi[i][(int)ss * j + 1] != swbufi[i][(int)ss * j + 1]) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" For dset %d at index %d\n", i, j); + TEST_ERROR; + } + + if (H5Pclose(dcpl) < 0) + FAIL_STACK_ERROR; + if (H5Pclose(dxpl) < 0) + FAIL_STACK_ERROR; + + for (i = 0; i < (int)ndsets; i++) { + if (H5Sclose(file_sids[i]) < 0) + FAIL_STACK_ERROR; + if (H5Sclose(mem_sids[i]) < 0) + FAIL_STACK_ERROR; + if (H5Dclose(dset_dids[i]) < 0) + FAIL_STACK_ERROR; + } + + HDfree(total_wbuf); + HDfree(total_wbuf_bak); + HDfree(total_rbuf); + HDfree(total_lwbuf); + HDfree(total_lwbuf_bak); + HDfree(total_lrbuf); + HDfree(total_swbuf); + HDfree(total_swbuf_bak); + HDfree(total_srbuf); + + PASSED(); + + return SUCCEED; + +error: + H5E_BEGIN_TRY + H5Pclose(dcpl); + H5Pclose(dxpl); + for (i = 0; i < (int)ndsets; i++) { + H5Sclose(file_sids[i]); + H5Sclose(mem_sids[i]); + H5Dclose(dset_dids[i]); + } + H5E_END_TRY; + + if (total_wbuf) + HDfree(total_wbuf); + if (total_wbuf_bak) + HDfree(total_wbuf_bak); + if (total_rbuf) + HDfree(total_rbuf); + if (total_lwbuf) + HDfree(total_lwbuf); + if (total_lwbuf_bak) + HDfree(total_lwbuf_bak); + if (total_lrbuf) + HDfree(total_lrbuf); + if (total_swbuf) + HDfree(total_swbuf); + if (total_swbuf_bak) + HDfree(total_swbuf_bak); + if (total_srbuf) + HDfree(total_srbuf); + + return FAIL; + +} /* test_multi_dsets_size_change_no_bkg() */ + +/* + * Test 4 for multi-dataset: + * + * Repeat the following test for niter times to ensure the + * random combinations of all dataset types are hit. + * + * Create randomized contiguous or chunked datasets with: + * --DSET_WITH_NO_CONV: + * --with no type conversion + * --dataset with H5T_NATIVE_INT + * --DSET_WITH_CONV_AND_NO_BKG: + * --type conversion without background buffer + * --dataset with H5T_NATIVE_LONG + * --DSET_WITH_CONV_AND_BKG: + * --type conversion with background buffer + * --dataset with compound type s1_t + * + * Do H5Dwrite_multi() and H5Dread_multi() for the above datasets: + * Setting A: + * --DSET_WITH_NO_CONV: + * --write: mem_tids[] = H5T_NATIVE_INT + * --read: r_mem_tids[] = H5T_NATIVE_INT + * --DSET_WITH_CONV_AND_NO_BKG: + * --write: mem_tids[] = H5T_NATIVE_ULONG + * --read: r_mem_tids[] = H5T_NATIVE_LONG + * --DSET_WITH_CONV_AND_BKG: + * --write: mem_tids[] = s1_tid; + * --read: r_mem_tids[i] = s3_tid; + * + * Setting B: + * --DSET_WITH_NO_CONV: + * --write: mem_tids[] = H5T_NATIVE_INT + * --read: r_mem_tids[] = H5T_NATIVE_INT + * --DSET_WITH_CONV_AND_NO_BKG: + * --write: mem_tids[] = H5T_NATIVE_LONG; + * --read: r_mem_tids[] = H5T_NATIVE_SHORT; + * --DSET_WITH_CONV_AND_BKG: + * --write: mem_tids[] = s4_tid; + * --read: r_mem_tids[i] = s1_tid; + * + * Verify the result read as below: + * Setting A: + * --DSET_WITH_NO_CONV: + * --verify data read in rbufi1[i][j] is same as wbufi1[i][j] + * --DSET_WITH_CONV_AND_NO_BKG: + * --verify data read in l_rbufi2[i][j] is all LONG_MAX + * --DSET_WITH_CONV_AND_BKG: + * --verify all fields read in s3_rbufi3[i][j] is the + * reverse of s1_wbufi3[i][j] + * Setting B: + * --DSET_WITH_NO_CONV: + * --verify data read in rbufi1[i][j] is same as wbufi1[i][j] + * --DSET_WITH_CONV_AND_NO_BKG: + * --verify data read in s_rbufi2[i][j] is all SHRT_MAX + * --DSET_WITH_CONV_AND_BKG: + * --verify fields read in s1_rbufi3[i][j] is as follows: + * --fields 'a' and 'c' are as s1_wbufi3[i][j].a and s1_wbufi3[i][j].c + * --fields 'b' and 'd' are (DSET_SELECT_DIM + j + start[0]) + * + */ +static herr_t +test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) +{ + size_t ndsets; + int i, j, mm; + int s, n; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + + hid_t file_sids[MULTI_NUM_DSETS]; + hid_t mem_sids[MULTI_NUM_DSETS]; + hid_t mem_tids[MULTI_NUM_DSETS]; + hid_t r_mem_tids[MULTI_NUM_DSETS]; + + multi_dset_type_t dset_types[MULTI_NUM_DSETS]; + + hid_t s1_tid = H5I_INVALID_HID; + hid_t s3_tid = H5I_INVALID_HID; + hid_t s4_tid = H5I_INVALID_HID; + + char dset_names[MULTI_NUM_DSETS][DSET_NAME_LEN]; + hid_t dset_dids[MULTI_NUM_DSETS]; + + size_t buf_size; + + int *total_wbuf1 = NULL; + int *total_wbuf1_bak = NULL; + int *total_rbuf1 = NULL; + + int *wbufi1[MULTI_NUM_DSETS]; + int *rbufi1[MULTI_NUM_DSETS]; + + unsigned long *ul_total_wbuf2 = NULL; + unsigned long *ul_total_wbuf2_bak = NULL; + long *l_total_rbuf2 = NULL; + unsigned long *ul_wbufi2[MULTI_NUM_DSETS]; + long *l_rbufi2[MULTI_NUM_DSETS]; + + long *l_total_wbuf2 = NULL; + long *l_total_wbuf2_bak = NULL; + short *s_total_rbuf2 = NULL; + long *l_wbufi2[MULTI_NUM_DSETS]; + short *s_rbufi2[MULTI_NUM_DSETS]; + + s1_t *s1_total_wbuf3 = NULL; + s1_t *s1_total_wbuf3_bak = NULL; + s3_t *s3_total_rbuf3 = NULL; + s1_t *s1_wbufi3[MULTI_NUM_DSETS]; + s3_t *s3_rbufi3[MULTI_NUM_DSETS]; + + s4_t *s4_total_wbuf3 = NULL; + s4_t *s4_total_wbuf3_bak = NULL; + s1_t *s1_total_rbuf3 = NULL; + s4_t *s4_wbufi3[MULTI_NUM_DSETS]; + s1_t *s1_rbufi3[MULTI_NUM_DSETS]; + + const void *wbufs[MULTI_NUM_DSETS]; + void *rbufs[MULTI_NUM_DSETS]; + + /* for n niter to ensure that all randomized dset_types with multi_dset_type_t will be covered */ + for (n = 0; n < niter; n++) { + + /* Set up the number of datasets for testing */ + ndsets = MAX(MULTI_MIN_DSETS, MULTI_NUM_DSETS); + + /* Create dataset transfer property list */ + if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + FAIL_STACK_ERROR; + + /* Enable selection I/O */ + if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) + FAIL_STACK_ERROR; + + /* Set modify write buffer if requested */ + if (mwbuf) + if (H5Pset_modify_write_buf(dxpl, TRUE) < 0) + FAIL_STACK_ERROR; + + /* Set dataset layout: contiguous or chunked */ + dims[0] = DSET_SELECT_DIM; + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR; + + if (chunked) { + cdims[0] = DSET_SELECT_CHUNK_DIM; + if (H5Pset_chunk(dcpl, 1, cdims) < 0) + FAIL_STACK_ERROR; + } + + /* Create compound data type: s1_t */ + if ((s1_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) + FAIL_STACK_ERROR; + + if (H5Tinsert(s1_tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT) < 0 || + H5Tinsert(s1_tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT) < 0 || + H5Tinsert(s1_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0 || + H5Tinsert(s1_tid, "d", HOFFSET(s1_t, d), H5T_NATIVE_INT) < 0) + FAIL_STACK_ERROR; + + /* Create compound data type: s3_t */ + if ((s3_tid = H5Tcreate(H5T_COMPOUND, sizeof(s3_t))) < 0) + FAIL_STACK_ERROR; + + if (H5Tinsert(s3_tid, "a", HOFFSET(s3_t, a), H5T_NATIVE_INT) < 0 || + H5Tinsert(s3_tid, "b", HOFFSET(s3_t, b), H5T_NATIVE_INT) < 0 || + H5Tinsert(s3_tid, "c", HOFFSET(s3_t, c), H5T_NATIVE_INT) < 0 || + H5Tinsert(s3_tid, "d", HOFFSET(s3_t, d), H5T_NATIVE_INT) < 0) + FAIL_STACK_ERROR; + + /* Create compound data type: s4_t */ + if ((s4_tid = H5Tcreate(H5T_COMPOUND, sizeof(s4_t))) < 0) + FAIL_STACK_ERROR; + + if (H5Tinsert(s4_tid, "b", HOFFSET(s4_t, b), H5T_NATIVE_UINT) < 0 || + H5Tinsert(s4_tid, "d", HOFFSET(s4_t, d), H5T_NATIVE_UINT) < 0) + FAIL_STACK_ERROR; + + /* Create dataset for i ndsets */ + for (i = 0; i < (int)ndsets; i++) { + + /* File space ids */ + if ((file_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) + FAIL_STACK_ERROR; + + /* Memory space ids */ + if ((mem_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) + FAIL_STACK_ERROR; + + mm = HDrandom() % (int)ndsets; + if (mm == 0) { + dset_types[i] = DSET_WITH_NO_CONV; + HDsnprintf(dset_names[i], sizeof(dset_names[i]), "multi_all_nconv_dset%d_%s_%s", i, + chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf"); + if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], H5T_NATIVE_INT, file_sids[i], H5P_DEFAULT, + dcpl, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + } + else if (mm == 1) { + dset_types[i] = DSET_WITH_CONV_AND_NO_BKG; + HDsnprintf(dset_names[i], sizeof(dset_names[i]), "multi_all_conv_nbkg_dset%d_%s_%s", i, + chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf"); + if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], H5T_NATIVE_LONG, file_sids[i], H5P_DEFAULT, + dcpl, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + } + else { + dset_types[i] = DSET_WITH_CONV_AND_BKG; + HDsnprintf(dset_names[i], sizeof(dset_names[i]), "multi_all_conv_bkg_dset%d_%s_%s", i, + chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf"); + if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], s1_tid, file_sids[i], H5P_DEFAULT, dcpl, + H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + } + + } /* end for i ndsets */ + + /* Allocate buffers for all datasets */ + + /* DSET_WITH_NO_CONV */ + buf_size = ndsets * DSET_SELECT_DIM * sizeof(int); + if (NULL == (total_wbuf1 = (int *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + if (mwbuf && NULL == (total_wbuf1_bak = (int *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + if (NULL == (total_rbuf1 = (int *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + + /* DSET_WITH_CONV_AND_NO_BKG */ + buf_size = ndsets * DSET_SELECT_DIM * sizeof(unsigned long); + if (NULL == (ul_total_wbuf2 = (unsigned long *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + if (mwbuf && NULL == (ul_total_wbuf2_bak = (unsigned long *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + buf_size = ndsets * DSET_SELECT_DIM * sizeof(long); + if (NULL == (l_total_rbuf2 = (long *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + + buf_size = ndsets * DSET_SELECT_DIM * sizeof(long); + if (NULL == (l_total_wbuf2 = (long *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + if (mwbuf && NULL == (l_total_wbuf2_bak = (long *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + buf_size = ndsets * DSET_SELECT_DIM * sizeof(short); + if (NULL == (s_total_rbuf2 = (short *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + + /* DSET_WITH_CONV_AND_BKG */ + buf_size = ndsets * DSET_SELECT_DIM * sizeof(s1_t); + if (NULL == (s1_total_wbuf3 = (s1_t *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + if (mwbuf && NULL == (s1_total_wbuf3_bak = (s1_t *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + buf_size = ndsets * DSET_SELECT_DIM * sizeof(s3_t); + if (NULL == (s3_total_rbuf3 = (s3_t *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + + buf_size = ndsets * DSET_SELECT_DIM * sizeof(s4_t); + if (NULL == (s4_total_wbuf3 = (s4_t *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + if (mwbuf && NULL == (s4_total_wbuf3_bak = (s4_t *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + buf_size = ndsets * DSET_SELECT_DIM * sizeof(s1_t); + if (NULL == (s1_total_rbuf3 = (s1_t *)HDmalloc(buf_size))) + FAIL_STACK_ERROR; + + /* Test with s settings for ndsets */ + for (s = SETTING_A; s <= SETTING_B; s++) { + + /* for i ndsets */ + for (i = 0; i < (int)ndsets; i++) { + + switch (dset_types[i]) { + + case DSET_WITH_NO_CONV: + /* Initialize buffer indices */ + wbufi1[i] = total_wbuf1 + (i * DSET_SELECT_DIM); + rbufi1[i] = total_rbuf1 + (i * DSET_SELECT_DIM); + + wbufs[i] = wbufi1[i]; + rbufs[i] = rbufi1[i]; + + /* Initialize the buffer data */ + for (j = 0; j < DSET_SELECT_DIM; j++) + wbufi1[i][j] = (int)j; + + /* Same for all cases */ + mem_tids[i] = H5T_NATIVE_INT; + r_mem_tids[i] = H5T_NATIVE_INT; + + break; + + case DSET_WITH_CONV_AND_NO_BKG: + + if (s == SETTING_A) { + /* Initialize buffer indices */ + ul_wbufi2[i] = ul_total_wbuf2 + (i * DSET_SELECT_DIM); + l_rbufi2[i] = l_total_rbuf2 + (i * DSET_SELECT_DIM); + + wbufs[i] = ul_wbufi2[i]; + rbufs[i] = l_rbufi2[i]; + + for (j = 0; j < DSET_SELECT_DIM; j++) + ul_wbufi2[i][j] = ULONG_MAX - (unsigned long)j; + + mem_tids[i] = H5T_NATIVE_ULONG; + r_mem_tids[i] = H5T_NATIVE_LONG; + } + else if (s == SETTING_B) { + /* Initialize buffer indices */ + l_wbufi2[i] = l_total_wbuf2 + (i * DSET_SELECT_DIM); + s_rbufi2[i] = s_total_rbuf2 + (i * DSET_SELECT_DIM); + + wbufs[i] = l_wbufi2[i]; + rbufs[i] = s_rbufi2[i]; + + /* Initialize the buffer data */ + for (j = 0; j < DSET_SELECT_DIM; j++) + l_wbufi2[i][j] = LONG_MAX - (long)j; + + mem_tids[i] = H5T_NATIVE_LONG; + r_mem_tids[i] = H5T_NATIVE_SHORT; + } + + break; + + case DSET_WITH_CONV_AND_BKG: + + if (s == SETTING_A) { + /* Initialize buffer indices */ + s1_wbufi3[i] = s1_total_wbuf3 + (i * DSET_SELECT_DIM); + s3_rbufi3[i] = s3_total_rbuf3 + (i * DSET_SELECT_DIM); + + wbufs[i] = s1_wbufi3[i]; + rbufs[i] = s3_rbufi3[i]; + + /* Initialize buffer data for s1_t */ + for (j = 0; j < DSET_SELECT_DIM; j++) { + s1_wbufi3[i][j].a = (4 * j); + s1_wbufi3[i][j].b = (4 * j) + 1; + s1_wbufi3[i][j].c = (4 * j) + 2; + s1_wbufi3[i][j].d = (4 * j) + 3; + } + mem_tids[i] = s1_tid; + r_mem_tids[i] = s3_tid; + } + else if (s == SETTING_B) { + /* Initialize buffer indices */ + s4_wbufi3[i] = s4_total_wbuf3 + (i * DSET_SELECT_DIM); + s1_rbufi3[i] = s1_total_rbuf3 + (i * DSET_SELECT_DIM); + + wbufs[i] = s4_wbufi3[i]; + rbufs[i] = s1_rbufi3[i]; + + /* Initialize buffer data for s4_t */ + for (j = 0; j < DSET_SELECT_DIM; j++) { + s4_wbufi3[i][j].b = DSET_SELECT_DIM + (unsigned int)j; + s4_wbufi3[i][j].d = DSET_SELECT_DIM + (unsigned int)j; + } + mem_tids[i] = s4_tid; + r_mem_tids[i] = s1_tid; + } + + break; + + case DSET_NTTYPES: + default: + TEST_ERROR; + + } /* end switch dset_types */ + + } /* end for i ndsets */ + + /* Copy wbufs if the library will be modifying them */ + if (mwbuf) { + HDmemcpy(total_wbuf1_bak, total_wbuf1, ndsets * DSET_SELECT_DIM * sizeof(int)); + HDmemcpy(ul_total_wbuf2_bak, ul_total_wbuf2, + ndsets * DSET_SELECT_DIM * sizeof(unsigned long)); + HDmemcpy(l_total_wbuf2_bak, l_total_wbuf2, ndsets * DSET_SELECT_DIM * sizeof(long)); + HDmemcpy(s1_total_wbuf3_bak, s1_total_wbuf3, ndsets * DSET_SELECT_DIM * sizeof(s1_t)); + HDmemcpy(s4_total_wbuf3_bak, s4_total_wbuf3, ndsets * DSET_SELECT_DIM * sizeof(s4_t)); + } + + if (H5Dwrite_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, wbufs) < 0) + TEST_ERROR; + + /* Restore wbufs from backup if the library modified them */ + if (mwbuf) { + HDmemcpy(total_wbuf1, total_wbuf1_bak, ndsets * DSET_SELECT_DIM * sizeof(int)); + HDmemcpy(ul_total_wbuf2, ul_total_wbuf2_bak, + ndsets * DSET_SELECT_DIM * sizeof(unsigned long)); + HDmemcpy(l_total_wbuf2, l_total_wbuf2_bak, ndsets * DSET_SELECT_DIM * sizeof(long)); + HDmemcpy(s1_total_wbuf3, s1_total_wbuf3_bak, ndsets * DSET_SELECT_DIM * sizeof(s1_t)); + HDmemcpy(s4_total_wbuf3, s4_total_wbuf3_bak, ndsets * DSET_SELECT_DIM * sizeof(s4_t)); + } + + if (H5Dread_multi(ndsets, dset_dids, r_mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) + TEST_ERROR; + + /* Verify result read */ + /* for i ndsets */ + for (i = 0; i < (int)ndsets; i++) { + switch (dset_types[i]) { + + case DSET_WITH_NO_CONV: + for (j = 0; j < DSET_SELECT_DIM; j++) + if (rbufi1[i][j] != wbufi1[i][j]) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" For dset %d at index %d\n", i, j); + TEST_ERROR; + } + + break; + + case DSET_WITH_CONV_AND_NO_BKG: + if (s == SETTING_A) { + for (j = 0; j < DSET_SELECT_DIM; j++) + if (l_rbufi2[i][j] != LONG_MAX) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" For dset %d at index %d\n", i, j); + TEST_ERROR; + } + } + else if (s == SETTING_B) { + for (j = 0; j < DSET_SELECT_DIM; j++) + if (s_rbufi2[i][j] != SHRT_MAX) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" For dset %d at index %d\n", i, j); + } + } + + break; + + case DSET_WITH_CONV_AND_BKG: + if (s == SETTING_A) { + for (j = 0; j < DSET_SELECT_DIM; j++) + if (s3_rbufi3[i][j].a != s1_wbufi3[i][j].a || + s3_rbufi3[i][j].b != s1_wbufi3[i][j].b || + s3_rbufi3[i][j].c != s1_wbufi3[i][j].c || + s3_rbufi3[i][j].d != s1_wbufi3[i][j].d) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" For dset %d at index %d\n", i, j); + } + } + else if (s == SETTING_B) { + for (j = 0; j < DSET_SELECT_DIM; j++) + if (s1_rbufi3[i][j].a != s1_wbufi3[i][j].a || + s1_rbufi3[i][j].b != (DSET_SELECT_DIM + j) || + s1_rbufi3[i][j].c != s1_wbufi3[i][j].c || + s1_rbufi3[i][j].d != (DSET_SELECT_DIM + j)) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" For dset %d at index %d\n", i, j); + } + } + + break; + + case DSET_NTTYPES: + default: + TEST_ERROR; + + } /* end switch dset_types */ + + } /* end for i ndsets */ + + } /* end for s settings */ + + /* Closing */ + if (H5Pclose(dcpl) < 0) + FAIL_STACK_ERROR; + if (H5Pclose(dxpl) < 0) + FAIL_STACK_ERROR; + + if (H5Tclose(s1_tid) < 0) + FAIL_STACK_ERROR; + if (H5Tclose(s3_tid) < 0) + FAIL_STACK_ERROR; + if (H5Tclose(s4_tid) < 0) + FAIL_STACK_ERROR; + + for (i = 0; i < (int)ndsets; i++) { + if (H5Sclose(file_sids[i]) < 0) + FAIL_STACK_ERROR; + if (H5Dclose(dset_dids[i]) < 0) + FAIL_STACK_ERROR; + /* Don't delete the last set of datasets */ + if ((n + 1) != niter) + if (H5Ldelete(fid, dset_names[i], H5P_DEFAULT) < 0) + FAIL_STACK_ERROR; + } + + /* Freeing */ + HDfree(total_wbuf1); + total_wbuf1 = NULL; + HDfree(total_wbuf1_bak); + total_wbuf1_bak = NULL; + HDfree(total_rbuf1); + total_rbuf1 = NULL; + + HDfree(ul_total_wbuf2); + ul_total_wbuf2 = NULL; + HDfree(ul_total_wbuf2_bak); + ul_total_wbuf2_bak = NULL; + HDfree(l_total_rbuf2); + l_total_rbuf2 = NULL; + HDfree(l_total_wbuf2); + l_total_wbuf2 = NULL; + HDfree(l_total_wbuf2_bak); + l_total_wbuf2_bak = NULL; + HDfree(s_total_rbuf2); + s_total_rbuf2 = NULL; + + HDfree(s1_total_wbuf3); + s1_total_wbuf3 = NULL; + HDfree(s1_total_wbuf3_bak); + s1_total_wbuf3_bak = NULL; + HDfree(s3_total_rbuf3); + s3_total_rbuf3 = NULL; + HDfree(s4_total_wbuf3); + s4_total_wbuf3 = NULL; + HDfree(s4_total_wbuf3_bak); + s4_total_wbuf3_bak = NULL; + HDfree(s1_total_rbuf3); + s1_total_rbuf3 = NULL; + + } /* end for n niter */ + + PASSED(); + + return SUCCEED; + +error: + H5E_BEGIN_TRY + H5Pclose(dcpl); + H5Pclose(dxpl); + H5Tclose(s1_tid); + H5Tclose(s3_tid); + H5Tclose(s4_tid); + for (i = 0; i < (int)ndsets; i++) { + H5Sclose(file_sids[i]); + H5Sclose(mem_sids[i]); + H5Dclose(dset_dids[i]); + } + H5E_END_TRY; + + if (total_wbuf1) + HDfree(total_wbuf1); + if (total_wbuf1_bak) + HDfree(total_wbuf1_bak); + if (total_rbuf1) + HDfree(total_rbuf1); + + if (ul_total_wbuf2) + HDfree(ul_total_wbuf2); + if (ul_total_wbuf2_bak) + HDfree(ul_total_wbuf2_bak); + if (l_total_rbuf2) + HDfree(l_total_rbuf2); + if (l_total_wbuf2) + HDfree(l_total_wbuf2); + if (l_total_wbuf2_bak) + HDfree(l_total_wbuf2_bak); + if (s_total_rbuf2) + HDfree(s_total_rbuf2); + + if (s1_total_wbuf3) + HDfree(s1_total_wbuf3); + if (s1_total_wbuf3_bak) + HDfree(s1_total_wbuf3_bak); + if (s3_total_rbuf3) + HDfree(s3_total_rbuf3); + if (s4_total_wbuf3) + HDfree(s4_total_wbuf3); + if (s4_total_wbuf3_bak) + HDfree(s4_total_wbuf3_bak); + if (s1_total_rbuf3) + HDfree(s1_total_rbuf3); + + return FAIL; + +} /* test_multi_dsets_all() */ + +/* + * Verify H5Pset/get_selection_io API works as expected + */ +static herr_t +test_set_get_select_io_mode(hid_t fid) +{ + hid_t did = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + int i; + long wbuf[DSET_SELECT_DIM]; + H5D_selection_io_mode_t selection_io_mode; + + HDprintf("\n"); + TESTING("H5Pget/set_selection_io_mode()"); + + if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + TEST_ERROR; + + /* default case */ + if (H5Pget_selection_io(dxpl, &selection_io_mode) < 0) + TEST_ERROR; + + if (selection_io_mode != H5D_SELECTION_IO_MODE_DEFAULT) + TEST_ERROR; + + /* Disable case */ + if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_OFF) < 0) + TEST_ERROR; + + if (H5Pget_selection_io(dxpl, &selection_io_mode) < 0) + TEST_ERROR; + + if (selection_io_mode != H5D_SELECTION_IO_MODE_OFF) + TEST_ERROR; + + /* Enable case */ + if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) + TEST_ERROR; + + if (H5Pget_selection_io(dxpl, &selection_io_mode) < 0) + TEST_ERROR; + + if (selection_io_mode != H5D_SELECTION_IO_MODE_ON) + TEST_ERROR; + + /* Create 1d data space */ + dims[0] = DSET_SELECT_DIM; + if ((sid = H5Screate_simple(1, dims, NULL)) < 0) + FAIL_STACK_ERROR; + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR; + + cdims[0] = DSET_SELECT_CHUNK_DIM; + if (H5Pset_chunk(dcpl, 1, cdims) < 0) + FAIL_STACK_ERROR; + + if ((did = H5Dcreate2(fid, "test_chk_dset", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + + /* Initialize data */ + for (i = 0; i < DSET_SELECT_DIM; i++) + wbuf[i] = i; + + /* May change the selection io actually performed */ + if (H5Dwrite(did, H5T_NATIVE_LONG, H5S_ALL, H5S_ALL, dxpl, wbuf) < 0) + FAIL_STACK_ERROR; + + if (H5Pget_selection_io(dxpl, &selection_io_mode) < 0) + TEST_ERROR; + + /* Should still be enabled */ + if (selection_io_mode != H5D_SELECTION_IO_MODE_ON) + TEST_ERROR; + + if (H5Dclose(did) < 0) + FAIL_STACK_ERROR; + if (H5Pclose(dcpl) < 0) + FAIL_STACK_ERROR; + if (H5Pclose(dxpl) < 0) + FAIL_STACK_ERROR; + if (H5Sclose(sid) < 0) + FAIL_STACK_ERROR; + + PASSED(); + + return SUCCEED; + +error: + H5E_BEGIN_TRY + { + H5Sclose(sid); + H5Dclose(did); + H5Pclose(dcpl); + H5Pclose(dxpl); + } + H5E_END_TRY; + + return FAIL; +} /* test_set_get_select_io_mode() */ + +/* + * To test with various test_mode that no selelction I/O is performed + * + * Note: It's the responsibility of the tester to feed proper combination + * of test_mode as needed. + */ +static herr_t +test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_mode) +{ + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hid_t fid = H5I_INVALID_HID; + hid_t fcpl = H5I_INVALID_HID; + hid_t did = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + hbool_t is_chunked = FALSE; + hid_t tid = H5T_NATIVE_INT; + uint32_t no_selection_io_cause_write = 0; + uint32_t no_selection_io_cause_read = 0; + uint32_t no_selection_io_cause_write_expected = 0; + uint32_t no_selection_io_cause_read_expected = 0; + int wbuf[DSET_SELECT_DIM]; + int rbuf[DSET_SELECT_DIM]; + int i; + + if ((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0) + FAIL_STACK_ERROR; + + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR; + if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + FAIL_STACK_ERROR; + + /* Enable page buffering to trigger H5D_PAGE_BUFFER */ + if (test_mode & TEST_PAGE_BUFFER) { + if (H5Pset_page_buffer_size(fapl, 4096, 0, 0) < 0) + FAIL_STACK_ERROR; + if (H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 0, (hsize_t)1) < 0) + FAIL_STACK_ERROR; + } + else { + /* Not page buffer test, reset to default */ + if (H5Pset_page_buffer_size(fapl, 0, 0, 0) < 0) + FAIL_STACK_ERROR; + if (H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, 0, (hsize_t)1) < 0) + FAIL_STACK_ERROR; + } + + if ((fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0) + FAIL_STACK_ERROR; + + /* If default mode, 1st write will trigger cb, 2nd write will trigger sieve */ + /* If on mode, will trigger nothing because the on mode path is different */ + /* Need 2 writes */ + if (test_mode & TEST_CONTIGUOUS_SIEVE_BUFFER) { + no_selection_io_cause_write_expected |= H5D_SEL_IO_CONTIGUOUS_SIEVE_BUFFER; + no_selection_io_cause_read_expected |= H5D_SEL_IO_CONTIGUOUS_SIEVE_BUFFER; + } + + if (test_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET) { + if (H5Pset_layout(dcpl, H5D_COMPACT) < 0) + FAIL_STACK_ERROR; + no_selection_io_cause_write_expected |= H5D_SEL_IO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; + no_selection_io_cause_read_expected |= H5D_SEL_IO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; + } + + if (test_mode == TEST_DATASET_FILTER) { + if (H5Pset_deflate(dcpl, 9) < 0) + FAIL_STACK_ERROR; + is_chunked = TRUE; + no_selection_io_cause_write_expected |= H5D_SEL_IO_DATASET_FILTER; + no_selection_io_cause_read_expected |= H5D_SEL_IO_DATASET_FILTER; + } + + if (test_mode == TEST_CHUNK_CACHE) { + is_chunked = TRUE; + no_selection_io_cause_write_expected |= H5D_SEL_IO_CHUNK_CACHE; + no_selection_io_cause_read_expected |= H5D_SEL_IO_CHUNK_CACHE; + } + + if (test_mode == TEST_DISABLE_BY_API) { + if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_OFF) < 0) + FAIL_STACK_ERROR; + no_selection_io_cause_write_expected |= H5D_SEL_IO_DISABLE_BY_API; + no_selection_io_cause_read_expected |= H5D_SEL_IO_DISABLE_BY_API; + } + + if (test_mode & TEST_NO_VECTOR_OR_SELECTION_IO_CB) { + no_selection_io_cause_write_expected |= H5D_SEL_IO_DEFAULT_OFF; + no_selection_io_cause_read_expected |= H5D_SEL_IO_DEFAULT_OFF; + } + + /* Datatype conversion */ + if (test_mode & TEST_DATATYPE_CONVERSION) { + if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) + FAIL_STACK_ERROR; + tid = H5T_NATIVE_UINT; + + /* If we're testing a too small tconv buffer, set the buffer to be too small */ + if (test_mode & TEST_TCONV_BUF_TOO_SMALL) { + if (H5Pset_buffer(dxpl, sizeof(int), NULL, NULL) < 0) + FAIL_STACK_ERROR; + + /* If we're using in-place type conversion sel io will succeed and only switch to scalar at the + * VFL */ + if (test_mode & TEST_IN_PLACE_TCONV) { + if (H5Pset_modify_write_buf(dxpl, TRUE) < 0) + FAIL_STACK_ERROR; + no_selection_io_cause_write_expected |= H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB; + } + else + no_selection_io_cause_write_expected |= H5D_SEL_IO_TCONV_BUF_TOO_SMALL; + + /* In-place type conversion for read doesn't require modify_write_buf */ + no_selection_io_cause_read_expected |= H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB; + } + else { + /* sel io will succeed and only switch to scalar at the VFL */ + no_selection_io_cause_write_expected |= H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB; + no_selection_io_cause_read_expected |= H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB; + } + } + + if (test_mode & TEST_PAGE_BUFFER) { + no_selection_io_cause_write_expected |= H5D_SEL_IO_PAGE_BUFFER; + no_selection_io_cause_read_expected |= H5D_SEL_IO_PAGE_BUFFER; + } + + /* Create 1d data space */ + dims[0] = DSET_SELECT_DIM; + if ((sid = H5Screate_simple(1, dims, NULL)) < 0) + FAIL_STACK_ERROR; + + if (is_chunked) { + cdims[0] = DSET_SELECT_CHUNK_DIM; + if (H5Pset_chunk(dcpl, 1, cdims) < 0) + FAIL_STACK_ERROR; + } + + if ((did = H5Dcreate2(fid, "no_selection_io_cause", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, + H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + + /* Initialize data */ + for (i = 0; i < DSET_SELECT_DIM; i++) + wbuf[i] = i; + + if (H5Dwrite(did, tid, H5S_ALL, H5S_ALL, dxpl, wbuf) < 0) + FAIL_STACK_ERROR; + + if (test_mode & TEST_CONTIGUOUS_SIEVE_BUFFER) { + if (H5Dwrite(did, tid, H5S_ALL, H5S_ALL, dxpl, wbuf) < 0) + FAIL_STACK_ERROR; + } + + if (H5Pget_no_selection_io_cause(dxpl, &no_selection_io_cause_write) < 0) + TEST_ERROR; + + /* Verify causes of no selection I/O for write are as expected */ + if (no_selection_io_cause_write != no_selection_io_cause_write_expected) + TEST_ERROR; + + /* Flush to clear the sieve buf */ + if (test_mode & TEST_NO_VECTOR_OR_SELECTION_IO_CB || test_mode & TEST_DATATYPE_CONVERSION || + test_mode & TEST_PAGE_BUFFER) { + + if (H5Dflush(did) < 0) + FAIL_STACK_ERROR; + } + + if (H5Dread(did, tid, H5S_ALL, H5S_ALL, dxpl, rbuf) < 0) + FAIL_STACK_ERROR; + + /* Verify causes of no selection I/O for write is as expected */ + if (H5Pget_no_selection_io_cause(dxpl, &no_selection_io_cause_read) < 0) + TEST_ERROR; + + /* Verify causes of no selection I/O for read are as expected */ + if (no_selection_io_cause_read != no_selection_io_cause_read_expected) + TEST_ERROR; + + if (H5Dclose(did) < 0) + FAIL_STACK_ERROR; + if (H5Sclose(sid) < 0) + FAIL_STACK_ERROR; + + if (H5Pclose(dcpl) < 0) + FAIL_STACK_ERROR; + if (H5Pclose(dxpl) < 0) + FAIL_STACK_ERROR; + + if (H5Fclose(fid) < 0) + FAIL_STACK_ERROR; + + if (H5Pclose(fcpl) < 0) + FAIL_STACK_ERROR; + + return SUCCEED; + +error: + H5E_BEGIN_TRY + { + H5Pclose(fcpl); + H5Pclose(dcpl); + H5Pclose(dxpl); + H5Dclose(did); + H5Dclose(sid); + H5Fclose(fid); + } + H5E_END_TRY; + + return FAIL; +} /* test_no_selection_io_cause_mode() */ + +/* + * Test for causes of not performing selection I/O + */ +static herr_t +test_get_no_selection_io_cause(const char *filename, hid_t fapl) +{ + + int errs = 0; + + HDprintf("\n"); + TESTING("H5Pget_no_selection_io_cause()"); + + errs += test_no_selection_io_cause_mode(filename, fapl, TEST_DISABLE_BY_API); + errs += test_no_selection_io_cause_mode(filename, fapl, TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET); + errs += test_no_selection_io_cause_mode(filename, fapl, TEST_CONTIGUOUS_SIEVE_BUFFER); + errs += test_no_selection_io_cause_mode(filename, fapl, TEST_DATASET_FILTER); + errs += test_no_selection_io_cause_mode(filename, fapl, TEST_CHUNK_CACHE); + errs += test_no_selection_io_cause_mode(filename, fapl, TEST_NO_VECTOR_OR_SELECTION_IO_CB); + errs += test_no_selection_io_cause_mode(filename, fapl, TEST_DATATYPE_CONVERSION); + errs += + test_no_selection_io_cause_mode(filename, fapl, TEST_DATATYPE_CONVERSION | TEST_TCONV_BUF_TOO_SMALL); + errs += test_no_selection_io_cause_mode( + filename, fapl, TEST_DATATYPE_CONVERSION | TEST_TCONV_BUF_TOO_SMALL | TEST_IN_PLACE_TCONV); +#ifndef H5_HAVE_PARALLEL + errs += test_no_selection_io_cause_mode(filename, fapl, TEST_PAGE_BUFFER); +#endif + + if (errs) { + HDprintf(" FAILED\n"); + return FAIL; + } + else { + PASSED(); + return SUCCEED; + } +} + +/*------------------------------------------------------------------------- + * Function: main + * + * Purpose: Test cases for selection I/O + * + * Return: EXIT_SUCCESS/EXIT_FAILURE + * + * Programmer: + * + *------------------------------------------------------------------------- + */ +int +main(void) +{ + int nerrors = 0; + char filename[FILENAME_BUF_SIZE]; + hid_t fapl = H5I_INVALID_HID; + hid_t fid = H5I_INVALID_HID; + int test_select_config; + unsigned chunked; + unsigned dtrans; + unsigned mwbuf; + + /* Testing setup */ + h5_reset(); + fapl = h5_fileaccess(); + + h5_fixname(FILENAME[0], fapl, filename, sizeof filename); + + if ((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + TEST_ERROR; + + /* Test with contiguous or chunked dataset */ + for (chunked = FALSE; chunked <= TRUE; chunked++) { + + /* Data transforms only apply to integer or floating-point datasets */ + /* therefore, not all tests are run with data transform */ + for (dtrans = FALSE; dtrans <= TRUE; dtrans++) { + + /* Test with and without modify_write_buf turned on */ + for (mwbuf = FALSE; mwbuf <= TRUE; mwbuf++) { + /* Print configuration message */ + printf("Testing for selection I/O "); + if (chunked) + printf("with chunked dataset, "); + else + printf("with contiguous dataset, "); + if (dtrans) + printf("data transform, "); + else + printf("without data transform, "); + if (mwbuf) + printf("and with modifying write buffers\n"); + else + printf("and without modifying write buffers\n"); + + for (test_select_config = (int)TEST_NO_TYPE_CONV; + test_select_config < (int)TEST_SELECT_NTESTS; test_select_config++) { + + switch (test_select_config) { + case TEST_NO_TYPE_CONV: /* case 1 */ + TESTING_2("No type conversion (null case)"); + + nerrors += (test_no_type_conv(fid, chunked, dtrans, mwbuf) < 0 ? 1 : 0); + + break; + + case TEST_NO_SIZE_CHANGE_NO_BKG: /* case 2 */ + TESTING_2("No size change, no background buffer"); + + /* Data transforms does not apply to the dataset datatype for this test */ + if (dtrans) + SKIPPED(); + else + nerrors += (test_no_size_change_no_bkg(fid, chunked, mwbuf) < 0 ? 1 : 0); + + break; + + case TEST_LARGER_MEM_NO_BKG: /* case 3 */ + TESTING_2("Larger memory type, no background buffer"); + + nerrors += (test_larger_mem_type_no_bkg(fid, chunked, dtrans, mwbuf) < 0 ? 1 : 0); + + break; + + case TEST_SMALLER_MEM_NO_BKG: /* case 4 */ + TESTING_2("Smaller memory type, no background buffer"); + + nerrors += + (test_smaller_mem_type_no_bkg(fid, chunked, dtrans, mwbuf) < 0 ? 1 : 0); + + break; + + case TEST_CMPD_WITH_BKG: /* case 5 */ + TESTING_2("Compound types with background buffer"); + + /* Data transforms does not apply to the dataset datatype for this test */ + if (dtrans) + SKIPPED(); + else + nerrors += (test_cmpd_with_bkg(fid, chunked, mwbuf) < 0 ? 1 : 0); + + break; + + case TEST_MULTI_CONV_NO_BKG: /* case 6 */ + TESTING_2("multi-datasets: type conv + no bkg buffer"); + + nerrors += test_multi_dsets_no_bkg(fid, chunked, dtrans, mwbuf); + break; + + case TEST_MULTI_CONV_BKG: /* case 7 */ + TESTING_2("multi-datasets: type conv + bkg buffer"); + + /* Data transforms does not apply to the dataset datatype for this test */ + if (dtrans) + SKIPPED(); + else + nerrors += test_multi_dsets_cmpd_with_bkg(fid, chunked, mwbuf); + + break; + + case TEST_MULTI_CONV_SIZE_CHANGE: /* case 8 */ + TESTING_2("multi-datasets: type conv + size change + no bkg buffer"); + + /* Data transforms does not apply to the dataset datatype for this test */ + if (dtrans) + SKIPPED(); + else + nerrors += test_multi_dsets_size_change_no_bkg(fid, chunked, mwbuf); + + break; + + case TEST_MULTI_ALL: /* case 9 */ + TESTING_2("multi-datasets: no conv + conv without bkg + conv with bkg"); + + /* Data transforms does not apply to the dataset datatype for this test */ + if (dtrans) + SKIPPED(); + else + nerrors += test_multi_dsets_all(10, fid, chunked, mwbuf); + + break; + + case TEST_SELECT_NTESTS: + default: + TEST_ERROR; + + } /* end switch */ + } /* end for test_select_config */ + + } /* end mwbuf */ + + } /* end dtrans */ + + } /* end chunked */ + + nerrors += test_set_get_select_io_mode(fid); + + if (H5Fclose(fid) < 0) + TEST_ERROR; + + /* Use own file */ + nerrors += test_get_no_selection_io_cause(filename, fapl); + + if (nerrors) + goto error; + + printf("\n===================================\n"); + HDprintf("All selection I/O dataset tests passed.\n"); + printf("===================================\n"); + + h5_cleanup(FILENAME, fapl); + + HDexit(EXIT_SUCCESS); + +error: + nerrors = MAX(1, nerrors); + HDprintf("***** %d SELECTION I/O DATASET TEST%s FAILED! *****\n", nerrors, 1 == nerrors ? "" : "S"); + HDexit(EXIT_FAILURE); + +} /* end main() */ diff --git a/test/testfiles/plist_files/def_dxpl_32be b/test/testfiles/plist_files/def_dxpl_32be index b13f45680324b314d57b21d66dc2f5ac4a7c3451..77ed5d1c637e43ca58c907a3e978346e2c573e05 100644 GIT binary patch delta 73 zcmaFJ*v2Hvz+Rl1lbT$TnV%P*nIE5CqVZqFqJMDoq5Ig|U2PTG@O`Mkj0Hq!YxBvhE diff --git a/test/testfiles/plist_files/def_dxpl_32le b/test/testfiles/plist_files/def_dxpl_32le index b13f45680324b314d57b21d66dc2f5ac4a7c3451..77ed5d1c637e43ca58c907a3e978346e2c573e05 100644 GIT binary patch delta 73 zcmaFJ*v2Hvz+Rl1lbT$TnV%P*nIE5CqVZqFqJMDoq5Ig|U2PTG@O`Mkj0Hq!YxBvhE diff --git a/test/testfiles/plist_files/def_dxpl_64be b/test/testfiles/plist_files/def_dxpl_64be index b13f45680324b314d57b21d66dc2f5ac4a7c3451..77ed5d1c637e43ca58c907a3e978346e2c573e05 100644 GIT binary patch delta 73 zcmaFJ*v2Hvz+Rl1lbT$TnV%P*nIE5CqVZqFqJMDoq5Ig|U2PTG@O`Mkj0Hq!YxBvhE diff --git a/test/testfiles/plist_files/def_dxpl_64le b/test/testfiles/plist_files/def_dxpl_64le index b13f45680324b314d57b21d66dc2f5ac4a7c3451..77ed5d1c637e43ca58c907a3e978346e2c573e05 100644 GIT binary patch delta 73 zcmaFJ*v2Hvz+Rl1lbT$TnV%P*nIE5CqVZqFqJMDoq5Ig|U2PTG@O`Mkj0Hq!YxBvhE diff --git a/test/testfiles/plist_files/dxpl_32be b/test/testfiles/plist_files/dxpl_32be index 5ff2ea01f96a5c0d613d061d2bb822ebc65e0ac2..b6ff37d972e4560a61ae4c27bfa849fa0da5cc40 100644 GIT binary patch delta 102 zcmaFL*u@mbz+Rl1lbT$TnV%P*nIE59l9|uI!N9<9l9|uI!N9<9l9|uI!N9<9l9|uI!N9<= 0), "H5Dcreate2() dataset succeeded"); + /* Set up the dxpl for the write */ + dxpl_write = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); + /* * Set expected causes and some tweaks based on the type of test */ if (selection_mode & TEST_DATATYPE_CONVERSION) { test_name = "Broken Collective I/O - Datatype Conversion"; - no_collective_cause_local_expected |= H5D_MPIO_DATATYPE_CONVERSION; - no_collective_cause_global_expected |= H5D_MPIO_DATATYPE_CONVERSION; + /* set different sign to trigger type conversion */ data_type = H5T_NATIVE_UINT; + + /* Disable selection I/O since datatype conversion is supported in collective with selection I/O */ + ret = H5Pset_selection_io(dxpl_write, H5D_SELECTION_IO_MODE_OFF); + VRFY((ret >= 0), "H5Pset_selection_io succeeded"); + + no_collective_cause_local_expected |= H5D_MPIO_DATATYPE_CONVERSION | H5D_MPIO_NO_SELECTION_IO; + no_collective_cause_global_expected |= H5D_MPIO_DATATYPE_CONVERSION | H5D_MPIO_NO_SELECTION_IO; + no_selection_io_cause_expected |= H5D_SEL_IO_DISABLE_BY_API; } if (selection_mode & TEST_DATA_TRANSFORMS) { test_name = "Broken Collective I/O - DATA Transforms"; - no_collective_cause_local_expected |= H5D_MPIO_DATA_TRANSFORMS; - no_collective_cause_global_expected |= H5D_MPIO_DATA_TRANSFORMS; + + /* Set transform */ + ret = H5Pset_data_transform(dxpl_write, "x+1"); + VRFY((ret >= 0), "H5Pset_data_transform succeeded"); + + /* Disable selection I/O since data transforms are supported in collective with selection I/O */ + ret = H5Pset_selection_io(dxpl_write, H5D_SELECTION_IO_MODE_OFF); + VRFY((ret >= 0), "H5Pset_selection_io succeeded"); + + no_collective_cause_local_expected |= H5D_MPIO_DATA_TRANSFORMS | H5D_MPIO_NO_SELECTION_IO; + no_collective_cause_global_expected |= H5D_MPIO_DATA_TRANSFORMS | H5D_MPIO_NO_SELECTION_IO; + no_selection_io_cause_expected |= H5D_SEL_IO_DISABLE_BY_API; } if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES) { test_name = "Broken Collective I/O - No Simple or Scalar DataSpace"; no_collective_cause_local_expected |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES; no_collective_cause_global_expected |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES; + no_collective_cause_local_expected &= ~(unsigned)H5D_MPIO_NO_SELECTION_IO; + no_collective_cause_global_expected &= ~(unsigned)H5D_MPIO_NO_SELECTION_IO; } if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT || @@ -3996,6 +4023,8 @@ test_no_collective_cause_mode(int selection_mode) test_name = "Broken Collective I/O - No CONTI or CHUNKED Dataset"; no_collective_cause_local_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; no_collective_cause_global_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; + no_collective_cause_local_expected &= ~(unsigned)H5D_MPIO_NO_SELECTION_IO; + no_collective_cause_global_expected &= ~(unsigned)H5D_MPIO_NO_SELECTION_IO; } if (selection_mode & TEST_COLLECTIVE) { @@ -4008,6 +4037,8 @@ test_no_collective_cause_mode(int selection_mode) test_name = "Broken Collective I/O - Independent"; no_collective_cause_local_expected = H5D_MPIO_SET_INDEPENDENT; no_collective_cause_global_expected = H5D_MPIO_SET_INDEPENDENT; + no_collective_cause_local_expected &= ~(unsigned)H5D_MPIO_NO_SELECTION_IO; + no_collective_cause_global_expected &= ~(unsigned)H5D_MPIO_NO_SELECTION_IO; /* switch to independent io */ is_independent = 1; } @@ -4037,10 +4068,6 @@ test_no_collective_cause_mode(int selection_mode) for (i = 0; i < length; i++) buffer[i] = i; - /* Set up the dxpl for the write */ - dxpl_write = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); - if (is_independent) { /* Set Independent I/O */ ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT); @@ -4052,11 +4079,6 @@ test_no_collective_cause_mode(int selection_mode) VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); } - if (selection_mode & TEST_DATA_TRANSFORMS) { - ret = H5Pset_data_transform(dxpl_write, "x+1"); - VRFY((ret >= 0), "H5Pset_data_transform succeeded"); - } - /*--------------------- * Test Write access *---------------------*/ @@ -4072,6 +4094,20 @@ test_no_collective_cause_mode(int selection_mode) &no_collective_cause_global_write); VRFY((ret >= 0), "retrieving no collective cause succeeded"); + ret = H5Pget_no_selection_io_cause(dxpl_write, &no_selection_io_cause_write); + VRFY((ret >= 0), "retrieving no selection io cause succeeded"); + + if (no_collective_cause_local_write & H5D_MPIO_NO_SELECTION_IO) { + VRFY((no_selection_io_cause_write == no_selection_io_cause_expected), + "H5D_MPIO_NO_SELECTION_IO for write is as expected"); + } + + if (no_collective_cause_global_write & H5D_MPIO_NO_SELECTION_IO) { + + VRFY((no_selection_io_cause_write == no_selection_io_cause_expected), + "H5D_MPIO_NO_SELECTION_IO for write is as expected"); + } + /*--------------------- * Test Read access *---------------------*/ @@ -4092,6 +4128,21 @@ test_no_collective_cause_mode(int selection_mode) &no_collective_cause_global_read); VRFY((ret >= 0), "retrieving no collective cause succeeded"); + ret = H5Pget_no_selection_io_cause(dxpl_read, &no_selection_io_cause_read); + VRFY((ret >= 0), "retrieving no selection io cause succeeded"); + + if (no_collective_cause_local_read & H5D_MPIO_NO_SELECTION_IO) { + + VRFY((no_selection_io_cause_read == no_selection_io_cause_expected), + "H5D_MPIO_NO_SELECTION_IO for read is as expected"); + } + + if (no_collective_cause_global_read & H5D_MPIO_NO_SELECTION_IO) { + + VRFY((no_selection_io_cause_read == no_selection_io_cause_expected), + "H5D_MPIO_NO_SELECTION_IO for read is as expected"); + } + /* Check write vs read */ VRFY((no_collective_cause_local_read == no_collective_cause_local_write), "reading and writing are the same for local cause of Broken Collective I/O"); diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c index 5f853e3694d..6636ffa9c52 100644 --- a/testpar/t_coll_chunk.c +++ b/testpar/t_coll_chunk.c @@ -566,7 +566,8 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap hsize_t start[RANK], count[RANK], stride[RANK], block[RANK]; #ifdef H5_HAVE_INSTRUMENTED_LIBRARY - unsigned prop_value; + unsigned prop_value; + H5D_selection_io_mode_t selection_io_mode; #endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ int mpi_size, mpi_rank; @@ -804,7 +805,11 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap /* Only check chunk optimization mode if selection I/O is not being used - * selection I/O bypasses this IO mode decision - it's effectively always * multi chunk currently */ - if (facc_type == FACC_MPIO && !H5_use_selection_io_g) { + + status = H5Pget_selection_io(xfer_plist, &selection_io_mode); + VRFY((status >= 0), "testing property list get succeeded"); + + if (facc_type == FACC_MPIO && (selection_io_mode != H5D_SELECTION_IO_MODE_ON)) { switch (api_option) { case API_LINK_HARD: status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, &prop_value); diff --git a/testpar/t_dset.c b/testpar/t_dset.c index 34c4d97b8b8..d144235ec0b 100644 --- a/testpar/t_dset.c +++ b/testpar/t_dset.c @@ -3348,13 +3348,27 @@ test_actual_io_mode(int selection_mode) void actual_io_mode_tests(void) { - int mpi_size = -1; + H5D_selection_io_mode_t selection_io_mode; + hid_t dxpl_id = H5I_INVALID_HID; + herr_t ret; + int mpi_size = -1; + int mpi_rank = -1; + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* Only run these tests if selection I/O is not being used - selection I/O * bypasses this IO mode decision - it's effectively always multi chunk * currently */ - if (!H5_use_selection_io_g) { + + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_id >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); + ret = H5Pget_selection_io(dxpl_id, &selection_io_mode); + VRFY((ret >= 0), "retrieving selection io mode succeeded"); + ret = H5Pclose(dxpl_id); + VRFY((ret >= 0), "H5Pclose succeeded"); + + if (selection_io_mode != H5D_SELECTION_IO_MODE_ON) { test_actual_io_mode(TEST_ACTUAL_IO_NO_COLLECTIVE); /* @@ -3438,6 +3452,10 @@ test_no_collective_cause_mode(int selection_mode) uint32_t no_collective_cause_global_read = 0; uint32_t no_collective_cause_global_expected = 0; + uint32_t no_selection_io_cause_write = 0; + uint32_t no_selection_io_cause_read = 0; + uint32_t no_selection_io_cause_expected = 0; + const char *filename; const char *test_name; hbool_t is_chunked = 1; @@ -3538,27 +3556,50 @@ test_no_collective_cause_mode(int selection_mode) dataset = H5Dcreate2(fid, "nocolcause", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded"); + /* Set up the dxpl for the write */ + dxpl_write = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); + /* * Set expected causes and some tweaks based on the type of test */ if (selection_mode & TEST_DATATYPE_CONVERSION) { test_name = "Broken Collective I/O - Datatype Conversion"; - no_collective_cause_local_expected |= H5D_MPIO_DATATYPE_CONVERSION; - no_collective_cause_global_expected |= H5D_MPIO_DATATYPE_CONVERSION; + /* set different sign to trigger type conversion */ data_type = H5T_NATIVE_UINT; + + /* Disable selection I/O since datatype conversion is supported in collective with selection I/O */ + ret = H5Pset_selection_io(dxpl_write, H5D_SELECTION_IO_MODE_OFF); + VRFY((ret >= 0), "H5Pset_selection_io succeeded"); + + no_collective_cause_local_expected |= H5D_MPIO_DATATYPE_CONVERSION | H5D_MPIO_NO_SELECTION_IO; + no_collective_cause_global_expected |= H5D_MPIO_DATATYPE_CONVERSION | H5D_MPIO_NO_SELECTION_IO; + no_selection_io_cause_expected |= H5D_SEL_IO_DISABLE_BY_API; } if (selection_mode & TEST_DATA_TRANSFORMS) { test_name = "Broken Collective I/O - DATA Transforms"; - no_collective_cause_local_expected |= H5D_MPIO_DATA_TRANSFORMS; - no_collective_cause_global_expected |= H5D_MPIO_DATA_TRANSFORMS; + + /* Set transform */ + ret = H5Pset_data_transform(dxpl_write, "x+1"); + VRFY((ret >= 0), "H5Pset_data_transform succeeded"); + + /* Disable selection I/O since data transforms are supported in collective with selection I/O */ + ret = H5Pset_selection_io(dxpl_write, H5D_SELECTION_IO_MODE_OFF); + VRFY((ret >= 0), "H5Pset_selection_io succeeded"); + + no_collective_cause_local_expected |= H5D_MPIO_DATA_TRANSFORMS | H5D_MPIO_NO_SELECTION_IO; + no_collective_cause_global_expected |= H5D_MPIO_DATA_TRANSFORMS | H5D_MPIO_NO_SELECTION_IO; + no_selection_io_cause_expected |= H5D_SEL_IO_DISABLE_BY_API; } if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES) { test_name = "Broken Collective I/O - No Simple or Scalar DataSpace"; no_collective_cause_local_expected |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES; no_collective_cause_global_expected |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES; + no_collective_cause_local_expected &= ~(unsigned)H5D_MPIO_NO_SELECTION_IO; + no_collective_cause_global_expected &= ~(unsigned)H5D_MPIO_NO_SELECTION_IO; } if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT || @@ -3566,6 +3607,8 @@ test_no_collective_cause_mode(int selection_mode) test_name = "Broken Collective I/O - No CONTI or CHUNKED Dataset"; no_collective_cause_local_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; no_collective_cause_global_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; + no_collective_cause_local_expected &= ~(unsigned)H5D_MPIO_NO_SELECTION_IO; + no_collective_cause_global_expected &= ~(unsigned)H5D_MPIO_NO_SELECTION_IO; } if (selection_mode & TEST_COLLECTIVE) { @@ -3578,6 +3621,8 @@ test_no_collective_cause_mode(int selection_mode) test_name = "Broken Collective I/O - Independent"; no_collective_cause_local_expected = H5D_MPIO_SET_INDEPENDENT; no_collective_cause_global_expected = H5D_MPIO_SET_INDEPENDENT; + no_collective_cause_local_expected &= ~(unsigned)H5D_MPIO_NO_SELECTION_IO; + no_collective_cause_global_expected &= ~(unsigned)H5D_MPIO_NO_SELECTION_IO; /* switch to independent io */ is_independent = 1; } @@ -3607,10 +3652,6 @@ test_no_collective_cause_mode(int selection_mode) for (i = 0; i < length; i++) buffer[i] = i; - /* Set up the dxpl for the write */ - dxpl_write = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); - if (is_independent) { /* Set Independent I/O */ ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT); @@ -3642,6 +3683,20 @@ test_no_collective_cause_mode(int selection_mode) &no_collective_cause_global_write); VRFY((ret >= 0), "retrieving no collective cause succeeded"); + ret = H5Pget_no_selection_io_cause(dxpl_write, &no_selection_io_cause_write); + VRFY((ret >= 0), "retrieving no selection io cause succeeded"); + + if (no_collective_cause_local_write & H5D_MPIO_NO_SELECTION_IO) { + VRFY((no_selection_io_cause_write == no_selection_io_cause_expected), + "H5D_MPIO_NO_SELECTION_IO for write is as expected"); + } + + if (no_collective_cause_global_write & H5D_MPIO_NO_SELECTION_IO) { + + VRFY((no_selection_io_cause_write == no_selection_io_cause_expected), + "H5D_MPIO_NO_SELECTION_IO for write is as expected"); + } + /*--------------------- * Test Read access *---------------------*/ @@ -3662,6 +3717,21 @@ test_no_collective_cause_mode(int selection_mode) &no_collective_cause_global_read); VRFY((ret >= 0), "retrieving no collective cause succeeded"); + ret = H5Pget_no_selection_io_cause(dxpl_read, &no_selection_io_cause_read); + VRFY((ret >= 0), "retrieving no selection io cause succeeded"); + + if (no_collective_cause_local_read & H5D_MPIO_NO_SELECTION_IO) { + + VRFY((no_selection_io_cause_read == no_selection_io_cause_expected), + "H5D_MPIO_NO_SELECTION_IO for read is as expected"); + } + + if (no_collective_cause_global_read & H5D_MPIO_NO_SELECTION_IO) { + + VRFY((no_selection_io_cause_read == no_selection_io_cause_expected), + "H5D_MPIO_NO_SELECTION_IO for read is as expected"); + } + /* Check write vs read */ VRFY((no_collective_cause_local_read == no_collective_cause_local_write), "reading and writing are the same for local cause of Broken Collective I/O"); diff --git a/testpar/t_select_io_dset.c b/testpar/t_select_io_dset.c new file mode 100644 index 00000000000..10b29e403ea --- /dev/null +++ b/testpar/t_select_io_dset.c @@ -0,0 +1,3786 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * + * Purpose: Test selection I/O + */ + +#include "h5test.h" +#include "testpar.h" + +#define FILENAME "pselect_io.h5" + +/* MPI variables */ +int mpi_size; +int mpi_rank; + +/* Number of errors */ +int nerrors = 0; +int curr_nerrors = 0; + +#define P_TEST_ERROR \ + do { \ + nerrors++; \ + H5_FAILED(); \ + AT(); \ + } while (0) + +#define CHECK_PASSED() \ + do { \ + int err_result = (nerrors > curr_nerrors); \ + \ + MPI_Allreduce(MPI_IN_PLACE, &err_result, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); \ + \ + if (MAINPROCESS) { \ + if (err_result == 0) \ + PASSED(); \ + else \ + HDputs(" ***TEST FAILED***"); \ + } \ + } while (0) + +/* + * Test configurations + */ +typedef enum { + TEST_NO_TYPE_CONV, /* no type conversion (null case) */ + TEST_NO_SIZE_CHANGE_NO_BKG, /* no size change, no bkg buffer */ + TEST_LARGER_MEM_NO_BKG, /* larger memory type, no bkg buffer */ + TEST_SMALLER_MEM_NO_BKG, /* smaller memory type, no bkg buffer */ + TEST_CMPD_WITH_BKG, /* compound types with bkg buffer */ + TEST_TYPE_CONV_SEL_EMPTY, /* some processes have null/empty selections and with type conversion */ + TEST_MULTI_CONV_NO_BKG, /* multi dataset test 1 */ + TEST_MULTI_CONV_BKG, /* multi dataset test 2 */ + TEST_MULTI_CONV_SIZE_CHANGE, /* multi dataset test 3 */ + TEST_MULTI_CONV_SEL_EMPTY, /* multi dataset test 4 */ + TEST_MULTI_ALL, /* multi dataset test 5 */ + TEST_SELECT_NTESTS +} test_select_config_t; + +#define DSET_SELECT_DIM 100 +#define DSET_SELECT_CHUNK_DIM 10 + +#define MULTI_NUM_DSETS 3 +#define MULTI_MIN_DSETS 3 +#define DSET_NAME_LEN 64 + +/* Compound type */ +typedef struct s1_t { + int a; + int b; + int c; + int d; +} s1_t; + +/* + * Variation of s1 with: + * --no conversion for 2 member types + * --1 larger mem type, + * --1 smaller mem type + */ +typedef struct s2_t { + int a; + long long b; + int c; + short d; +} s2_t; + +/* Variation of s1: reverse of s1_t */ +typedef struct s3_t { + int d; + int c; + int b; + int a; +} s3_t; + +/* Variations of s1: only 2 members in s1_t */ +typedef struct s4_t { + unsigned int b; + unsigned int d; +} s4_t; + +/* Defines for test_multi_dsets_all() */ +typedef enum { + DSET_WITH_NO_CONV, /* Dataset with no type conversion */ + DSET_WITH_CONV_AND_NO_BKG, /* Dataset with type conversion but no background buffer */ + DSET_WITH_CONV_AND_BKG, /* Dataset with type conversion and background buffer */ + DSET_NTTYPES +} multi_dset_type_t; + +/* Test setting A and B */ +#define SETTING_A 1 +#define SETTING_B 2 + +/* Definitions of the test modes for test_get_no_selection_io_cause() */ +#define TEST_DISABLE_BY_API 0x001 +#define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET 0x002 +#define TEST_DATATYPE_CONVERSION 0x004 +#define TEST_TCONV_BUF_TOO_SMALL 0x008 +#define TEST_IN_PLACE_TCONV 0x010 + +/* + * Helper routine to set dxpl + * --selection I/O mode + * --type of I/O + * --type of collective I/O + */ +static void +set_dxpl(hid_t dxpl, H5D_selection_io_mode_t select_io_mode, H5FD_mpio_xfer_t mpio_type, + H5FD_mpio_collective_opt_t mpio_coll_opt, unsigned mwbuf) +{ + if (H5Pset_selection_io(dxpl, select_io_mode) < 0) + P_TEST_ERROR; + + if (H5Pset_dxpl_mpio(dxpl, mpio_type) < 0) + P_TEST_ERROR; + + if (H5Pset_dxpl_mpio_collective_opt(dxpl, mpio_coll_opt) < 0) + P_TEST_ERROR; + + if (mwbuf) + if (H5Pset_modify_write_buf(dxpl, TRUE) < 0) + P_TEST_ERROR; + +} /* set_dxpl() */ + +/* + * Helper routine to check actual I/O mode on a dxpl + */ +static void +check_io_mode(hid_t dxpl, unsigned chunked) +{ + H5D_mpio_actual_io_mode_t actual_io_mode = H5D_MPIO_NO_COLLECTIVE; + + if (H5Pget_mpio_actual_io_mode(dxpl, &actual_io_mode) < 0) + P_TEST_ERROR; + + if (chunked) { + if (actual_io_mode != H5D_MPIO_CHUNK_COLLECTIVE) { + nerrors++; + if (MAINPROCESS) + HDprintf("\n Failed: Incorrect I/O mode (expected chunked, returned %u)", + (unsigned)actual_io_mode); + } + } + else if (actual_io_mode != H5D_MPIO_CONTIGUOUS_COLLECTIVE) { + nerrors++; + if (MAINPROCESS) + HDprintf("\n Failed: Incorrect I/O mode (expected contiguous, returned %u)", + (unsigned)actual_io_mode); + } + +} /* check_io_mode() */ + +/* + * Case 1: single dataset read/write, no type conversion (null case) + */ +static void +test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +{ + int i; + hid_t did = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hid_t ntrans_dxpl = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + hsize_t start[1], stride[1], count[1], block[1]; + int wbuf[DSET_SELECT_DIM]; + int wbuf_bak[DSET_SELECT_DIM]; + int trans_wbuf[DSET_SELECT_DIM]; + int rbuf[DSET_SELECT_DIM]; + char dset_name[DSET_NAME_LEN]; + const char *expr = "2*x"; + + curr_nerrors = nerrors; + + /* Create 1d data space */ + dims[0] = DSET_SELECT_DIM; + if ((sid = H5Screate_simple(1, dims, NULL)) < 0) + P_TEST_ERROR; + + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + P_TEST_ERROR; + + if (chunked) { + cdims[0] = DSET_SELECT_CHUNK_DIM; + if (H5Pset_chunk(dcpl, 1, cdims) < 0) + P_TEST_ERROR; + } + + /* Generate dataset name */ + HDsnprintf(dset_name, sizeof(dset_name), "no_tconv_%s_%s_%s", chunked ? "chunked" : "contig", + dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf"); + + /* Create dataset */ + if ((did = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + P_TEST_ERROR; + + /* Each process takes x number of elements */ + block[0] = dims[0] / (hsize_t)mpi_size; + stride[0] = block[0]; + count[0] = 1; + start[0] = (hsize_t)mpi_rank * block[0]; + + /* Initialize data */ + for (i = 0; i < (int)block[0]; i++) { + wbuf[i] = i + (int)start[0]; + trans_wbuf[i] = 2 * wbuf[i]; + } + + /* Create a memory dataspace */ + if ((mspace_id = H5Screate_simple(1, block, NULL)) < 0) + P_TEST_ERROR; + + /* Create a file dataspace */ + if ((fspace_id = H5Dget_space(did)) < 0) + P_TEST_ERROR; + + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) + P_TEST_ERROR; + + /* Create dataset transfer property list */ + if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + P_TEST_ERROR; + + /* Set selection I/O mode, type of I/O and type of collective I/O */ + set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + + if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) + P_TEST_ERROR; + + /* Set data transform */ + if (dtrans) + if (H5Pset_data_transform(dxpl, expr) < 0) + P_TEST_ERROR; + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(wbuf_bak, wbuf, sizeof(wbuf)); + + /* Write data to the dataset with/without data transform */ + if (H5Dwrite(did, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl, wbuf) < 0) + P_TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(wbuf, wbuf_bak, sizeof(wbuf)); + + check_io_mode(dxpl, chunked); + + /* Read data from the dataset (if dtrans, without data transform set in dxpl) */ + if (H5Dread(did, H5T_NATIVE_INT, mspace_id, fspace_id, ntrans_dxpl, rbuf) < 0) + P_TEST_ERROR; + + /* Verify data read (if dtrans, verify data is transformed) */ + for (i = 0; i < (int)block[0]; i++) + if (rbuf[i] != (dtrans ? trans_wbuf[i] : wbuf[i])) { + nerrors++; + HDprintf("\n Error in first data verification:\n"); + HDprintf(" At index %d: %d, %d\n", i + (int)start[0], dtrans ? trans_wbuf[i] : wbuf[i], + rbuf[i]); + break; + } + + if (dtrans) { + + /* Read the data from the dataset with data transform set in dxpl */ + if (H5Dread(did, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl, rbuf) < 0) + P_TEST_ERROR; + + /* Verify data read is transformed a second time */ + for (i = 0; i < (int)block[0]; i++) + if (rbuf[i] != (2 * trans_wbuf[i])) { + nerrors++; + HDprintf("\n Error in second data verification:.\n"); + HDprintf(" At index %d: %d, %d\n", i + (int)start[0], 2 * trans_wbuf[i], rbuf[i]); + break; + } + } + + if (H5Sclose(mspace_id) < 0) + P_TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + P_TEST_ERROR; + + if (H5Sclose(sid) < 0) + P_TEST_ERROR; + if (H5Dclose(did) < 0) + P_TEST_ERROR; + if (H5Pclose(dxpl) < 0) + P_TEST_ERROR; + if (H5Pclose(ntrans_dxpl) < 0) + P_TEST_ERROR; + + CHECK_PASSED(); + + return; +} /* test_no_type_conv() */ + +/* + * Case 2: single dataset read/write, no size change, no background buffer + */ +static void +test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) +{ + int i; + hid_t did = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + hsize_t start[1], stride[1], count[1], block[1]; + char *wbuf = NULL; + char *wbuf_bak = NULL; + char *rbuf = NULL; + char dset_name[DSET_NAME_LEN]; + + curr_nerrors = nerrors; + + if ((wbuf = (char *)HDmalloc((size_t)(4 * DSET_SELECT_DIM))) == NULL) + P_TEST_ERROR; + if (mwbuf && (wbuf_bak = (char *)HDmalloc((size_t)(4 * DSET_SELECT_DIM))) == NULL) + P_TEST_ERROR; + if ((rbuf = (char *)HDmalloc((size_t)(4 * DSET_SELECT_DIM))) == NULL) + P_TEST_ERROR; + + /* Create 1d data space */ + dims[0] = DSET_SELECT_DIM; + if ((sid = H5Screate_simple(1, dims, NULL)) < 0) + P_TEST_ERROR; + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + P_TEST_ERROR; + + if (chunked) { + cdims[0] = DSET_SELECT_CHUNK_DIM; + if (H5Pset_chunk(dcpl, 1, cdims) < 0) + P_TEST_ERROR; + } + + /* Generate dataset name */ + HDsnprintf(dset_name, sizeof(dset_name), "no_size_change_%s_%s", chunked ? "chunked" : "contig", + mwbuf ? "mwbuf" : "nomwbuf"); + + /* Create 1d dataset */ + if ((did = H5Dcreate2(fid, dset_name, H5T_STD_I32BE, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + P_TEST_ERROR; + + /* Each process takes x number of elements */ + block[0] = dims[0] / (hsize_t)mpi_size; + stride[0] = block[0]; + count[0] = 1; + start[0] = (hsize_t)mpi_rank * block[0]; + + /* Initialize data */ + for (i = 0; i < (int)block[0]; i++) { + wbuf[i * 4 + 3] = 0x1; + wbuf[i * 4 + 2] = 0x2; + wbuf[i * 4 + 1] = 0x3; + wbuf[i * 4 + 0] = 0x4; + } + + /* Create a memory dataspace independently */ + if ((mspace_id = H5Screate_simple(1, block, NULL)) < 0) + P_TEST_ERROR; + + /* Create a file dataspace independently */ + if ((fspace_id = H5Dget_space(did)) < 0) + P_TEST_ERROR; + + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) + P_TEST_ERROR; + + /* Create dataset transfer property list */ + if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + P_TEST_ERROR; + + /* Set selection I/O mode, type of I/O and type of collective I/O */ + set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(wbuf_bak, wbuf, (size_t)(4 * DSET_SELECT_DIM)); + + /* Write the data to the dataset with little endian */ + if (H5Dwrite(did, H5T_STD_I32LE, mspace_id, fspace_id, dxpl, wbuf) < 0) + P_TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(wbuf, wbuf_bak, (size_t)(4 * DSET_SELECT_DIM)); + + check_io_mode(dxpl, chunked); + + /* Read the data from the dataset with little endian */ + if (H5Dread(did, H5T_STD_I32LE, mspace_id, fspace_id, dxpl, rbuf) < 0) + P_TEST_ERROR; + + /* Verify data read */ + for (i = 0; i < (int)block[0]; i++) { + if (rbuf[4 * i + 0] != wbuf[4 * i + 0] || rbuf[4 * i + 1] != wbuf[4 * i + 1] || + rbuf[4 * i + 2] != wbuf[4 * i + 2] || rbuf[4 * i + 3] != wbuf[4 * i + 3]) { + nerrors++; + HDprintf("\n Error in data verification:\n"); + HDprintf("\n Error in data verification at index %d\n", i + (int)start[0]); + break; + } + } + + /* Read the data from the dataset with big endian */ + if (H5Dread(did, H5T_STD_I32BE, mspace_id, fspace_id, dxpl, rbuf) < 0) + P_TEST_ERROR; + + /* Verify data read */ + for (i = 0; i < (int)block[0]; i++) { + if (rbuf[4 * i + 0] != wbuf[4 * i + 3] || rbuf[4 * i + 1] != wbuf[4 * i + 2] || + rbuf[4 * i + 2] != wbuf[4 * i + 1] || rbuf[4 * i + 3] != wbuf[4 * i + 0]) { + nerrors++; + HDprintf("\n Error in data verification at index %d\n", i + (int)start[0]); + break; + } + } + + if (H5Sclose(mspace_id) < 0) + P_TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + P_TEST_ERROR; + + if (H5Sclose(sid) < 0) + P_TEST_ERROR; + if (H5Dclose(did) < 0) + P_TEST_ERROR; + + if (wbuf) + HDfree(wbuf); + + if (wbuf_bak) + HDfree(wbuf_bak); + + if (rbuf) + HDfree(rbuf); + + CHECK_PASSED(); + + return; +} /* test_no_size_change_no_bkg() */ + +/* + * Case 3: single dataset read/write, larger mem type, no background buffer + */ +static void +test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +{ + int i; + hid_t did = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hid_t ntrans_dxpl = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + hsize_t start[1], stride[1], count[1], block[1]; + long wbuf[DSET_SELECT_DIM]; + long wbuf_bak[DSET_SELECT_DIM]; + long trans_wbuf[DSET_SELECT_DIM]; + long long rbuf[DSET_SELECT_DIM]; + char dset_name[DSET_NAME_LEN]; + const char *expr = "100 - x"; + + curr_nerrors = nerrors; + + /* Create 1d data space */ + dims[0] = DSET_SELECT_DIM; + if ((sid = H5Screate_simple(1, dims, NULL)) < 0) + P_TEST_ERROR; + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + P_TEST_ERROR; + + if (chunked) { + cdims[0] = DSET_SELECT_CHUNK_DIM; + if (H5Pset_chunk(dcpl, 1, cdims) < 0) + P_TEST_ERROR; + } + + /* Generate dataset name */ + HDsnprintf(dset_name, sizeof(dset_name), "larger_no_bkg_%s_%s_%s", chunked ? "chunked" : "contig", + dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf"); + + /* Create 1d chunked dataset with/without data transform */ + if ((did = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + P_TEST_ERROR; + + /* Each process takes x number of elements */ + block[0] = dims[0] / (hsize_t)mpi_size; + stride[0] = block[0]; + count[0] = 1; + start[0] = (hsize_t)mpi_rank * block[0]; + + /* Initialize data */ + for (i = 0; i < (int)block[0]; i++) { + wbuf[i] = i + (int)start[0]; + trans_wbuf[i] = 100 - wbuf[i]; + } + + /* Create a memory dataspace */ + if ((mspace_id = H5Screate_simple(1, block, NULL)) < 0) + P_TEST_ERROR; + + /* Create a file dataspace */ + if ((fspace_id = H5Dget_space(did)) < 0) + P_TEST_ERROR; + + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) + P_TEST_ERROR; + + /* Create dataset transfer property list */ + if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + P_TEST_ERROR; + + /* Set selection I/O mode, type of I/O and type of collective I/O */ + set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + + if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) + P_TEST_ERROR; + + /* Set data transform */ + if (dtrans) + if (H5Pset_data_transform(dxpl, expr) < 0) + P_TEST_ERROR; + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(wbuf_bak, wbuf, sizeof(wbuf)); + + /* Write data to the dataset with/without data transform set in dxpl */ + if (H5Dwrite(did, H5T_NATIVE_LONG, mspace_id, fspace_id, dxpl, wbuf) < 0) + P_TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(wbuf, wbuf_bak, sizeof(wbuf)); + + check_io_mode(dxpl, chunked); + + /* Read data from the dataset (if dtrans, without data transform set in dxpl) */ + if (H5Dread(did, H5T_NATIVE_LLONG, mspace_id, fspace_id, ntrans_dxpl, rbuf) < 0) + P_TEST_ERROR; + + /* Verify data read (if dtrans, verify data is transformed) */ + for (i = 0; i < (int)block[0]; i++) + if (rbuf[i] != (long long)(dtrans ? trans_wbuf[i] : wbuf[i])) { + nerrors++; + HDprintf("\n Error in first data verification:\n"); + HDprintf(" At index %d: %lld, %lld\n", i + (int)start[0], + (long long)(dtrans ? trans_wbuf[i] : wbuf[i]), rbuf[i]); + break; + } + + if (dtrans) { + + /* Read data from the dataset with data transform set in dxpl */ + if (H5Dread(did, H5T_NATIVE_LLONG, mspace_id, fspace_id, dxpl, rbuf) < 0) + P_TEST_ERROR; + + /* Verify data read is transformed a second time */ + for (i = 0; i < (int)block[0]; i++) + if (rbuf[i] != (long long)(100 - trans_wbuf[i])) { + nerrors++; + HDprintf("\n Error in second data verification:.\n"); + HDprintf(" At index %d: %lld, %lld\n", i + (int)start[0], + (long long)(100 - trans_wbuf[i]), rbuf[i]); + break; + } + } + if (H5Sclose(mspace_id) < 0) + P_TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + P_TEST_ERROR; + if (H5Sclose(sid) < 0) + P_TEST_ERROR; + if (H5Dclose(did) < 0) + P_TEST_ERROR; + if (H5Pclose(dxpl) < 0) + P_TEST_ERROR; + if (H5Pclose(ntrans_dxpl) < 0) + P_TEST_ERROR; + + CHECK_PASSED(); + + return; + +} /* test_larger_mem_type_no_bkg() */ + +/* + * Case 4: single dataset reader/write, smaller mem type, no background buffer + */ +static void +test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +{ + int i; + hid_t did = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hid_t ntrans_dxpl = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + hsize_t start[1], stride[1], count[1], block[1]; + short wbuf[DSET_SELECT_DIM]; + int wbuf_bak[DSET_SELECT_DIM]; + short trans_wbuf[DSET_SELECT_DIM]; + short rbuf[DSET_SELECT_DIM]; + char dset_name[DSET_NAME_LEN]; + const char *expr = "2 * (10 + x)"; + + curr_nerrors = nerrors; + + /* Create 1d data space */ + dims[0] = DSET_SELECT_DIM; + if ((sid = H5Screate_simple(1, dims, NULL)) < 0) + P_TEST_ERROR; + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + P_TEST_ERROR; + + if (chunked) { + cdims[0] = DSET_SELECT_CHUNK_DIM; + if (H5Pset_chunk(dcpl, 1, cdims) < 0) + P_TEST_ERROR; + } + + /* Generate dataset name */ + HDsnprintf(dset_name, sizeof(dset_name), "smaller_no_bkg_%s_%s_%s", chunked ? "chunked" : "contig", + dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf"); + + /* Create 1d chunked dataset with/without data transform */ + if ((did = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + P_TEST_ERROR; + + /* Each process takes x number of elements */ + block[0] = dims[0] / (hsize_t)mpi_size; + stride[0] = block[0]; + count[0] = 1; + start[0] = (hsize_t)mpi_rank * block[0]; + + /* Initialize data */ + for (i = 0; i < (int)block[0]; i++) { + wbuf[i] = (short)(i + (int)start[0]); + trans_wbuf[i] = (short)(2 * (10 + wbuf[i])); + } + + /* Create a memory dataspace */ + if ((mspace_id = H5Screate_simple(1, block, NULL)) < 0) + P_TEST_ERROR; + + /* Create a file dataspace */ + if ((fspace_id = H5Dget_space(did)) < 0) + P_TEST_ERROR; + + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) + P_TEST_ERROR; + + /* Create dataset transfer property list */ + if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + P_TEST_ERROR; + + /* Set selection I/O mode, type of I/O and type of collective I/O */ + set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + + if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) + P_TEST_ERROR; + + /* Set data transform */ + if (dtrans) { + if (H5Pset_data_transform(dxpl, expr) < 0) + P_TEST_ERROR; + } + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(wbuf_bak, wbuf, sizeof(wbuf)); + + /* Write data to the dataset with/without data transform in dxpl */ + if (H5Dwrite(did, H5T_NATIVE_SHORT, mspace_id, fspace_id, dxpl, wbuf) < 0) + P_TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(wbuf, wbuf_bak, sizeof(wbuf)); + + check_io_mode(dxpl, chunked); + + /* Read data from the dataset (if dtrans, without data transform set in dxpl) */ + if (H5Dread(did, H5T_NATIVE_SHORT, mspace_id, fspace_id, ntrans_dxpl, rbuf) < 0) + P_TEST_ERROR; + + /* Verify data read (if dtrans, verify data is transformed) */ + for (i = 0; i < (int)block[0]; i++) + if (rbuf[i] != (dtrans ? trans_wbuf[i] : wbuf[i])) { + nerrors++; + HDprintf("\n Error in first data verification:\n"); + HDprintf(" At index %d: %d, %d\n", i + (int)start[0], wbuf[i], rbuf[i]); + break; + } + + if (dtrans) { + + /* Read data from the dataset with data transform set in dxpl */ + if (H5Dread(did, H5T_NATIVE_SHORT, mspace_id, fspace_id, dxpl, rbuf) < 0) + P_TEST_ERROR; + + /* Verify data read is transformed a second time */ + for (i = 0; i < (int)block[0]; i++) + if (rbuf[i] != (2 * (10 + trans_wbuf[i]))) { + nerrors++; + HDprintf("\n Error in second data verification:.\n"); + HDprintf(" At index %d: %d, %d\n", i + (int)start[0], (2 * (10 - trans_wbuf[i])), + rbuf[i]); + break; + } + } + + if (H5Sclose(mspace_id) < 0) + P_TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + P_TEST_ERROR; + + if (H5Sclose(sid) < 0) + P_TEST_ERROR; + if (H5Dclose(did) < 0) + P_TEST_ERROR; + if (H5Pclose(dxpl) < 0) + P_TEST_ERROR; + if (H5Pclose(ntrans_dxpl) < 0) + P_TEST_ERROR; + + CHECK_PASSED(); + + return; + +} /* test_smaller_mem_type_no_bkg() */ + +/* + * Case 5: single dataset reade/write, compound types with background buffer + * + * (a) Initialize compound buffer in memory with unique values + * Write all compound fields to disk + * Verify values read + * (b) Update all fields of the compound type in memory write buffer with new unique values + * Write some but not all all compound fields to disk + * Read the entire compound type + * Verify the fields have the correct (old or new) values + * (c) Update all fields of the compound type in memory read buffer with new unique values + * Read some but not all the compound fields to memory + * Verify the fields have the correct (old, middle or new) values + * (d) Set up a different compound type which has: + * --no conversion for member types + * --a field with larger mem type + * --a field with smaller mem type + * Write this compound type to disk + * Read the entire compound type + * Verify the values read + */ +static void +test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) +{ + int i; + hid_t did = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hid_t s1_tid = H5I_INVALID_HID; + hid_t s2_tid = H5I_INVALID_HID; + hid_t ss_ac_tid = H5I_INVALID_HID; + hid_t ss_bc_tid = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + hsize_t start[1], stride[1], count[1], block[1]; + s1_t *s1_wbuf = NULL; + s1_t *s1_wbuf_bak = NULL; + s1_t *s1_rbuf = NULL; + s2_t *s2_wbuf = NULL; + s2_t *s2_wbuf_bak = NULL; + s2_t *s2_rbuf = NULL; + char dset_name[DSET_NAME_LEN]; + + curr_nerrors = nerrors; + + /* Allocate buffers for datasets */ + if (NULL == (s1_wbuf = (s1_t *)HDmalloc(sizeof(s1_t) * DSET_SELECT_DIM))) + P_TEST_ERROR; + if (mwbuf && NULL == (s1_wbuf_bak = (s1_t *)HDmalloc(sizeof(s1_t) * DSET_SELECT_DIM))) + P_TEST_ERROR; + if (NULL == (s1_rbuf = (s1_t *)HDmalloc(sizeof(s1_t) * DSET_SELECT_DIM))) + P_TEST_ERROR; + if (NULL == (s2_wbuf = (s2_t *)HDmalloc(sizeof(s2_t) * DSET_SELECT_DIM))) + P_TEST_ERROR; + if (mwbuf && NULL == (s2_wbuf_bak = (s2_t *)HDmalloc(sizeof(s2_t) * DSET_SELECT_DIM))) + P_TEST_ERROR; + if (NULL == (s2_rbuf = (s2_t *)HDmalloc(sizeof(s2_t) * DSET_SELECT_DIM))) + P_TEST_ERROR; + + /* Create the memory data type */ + if ((s1_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) + P_TEST_ERROR; + + if (H5Tinsert(s1_tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT) < 0 || + H5Tinsert(s1_tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT) < 0 || + H5Tinsert(s1_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0 || + H5Tinsert(s1_tid, "d", HOFFSET(s1_t, d), H5T_NATIVE_INT) < 0) + P_TEST_ERROR; + + /* Create 1d data space */ + dims[0] = DSET_SELECT_DIM; + if ((sid = H5Screate_simple(1, dims, NULL)) < 0) + P_TEST_ERROR; + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + P_TEST_ERROR; + + if (chunked) { + cdims[0] = DSET_SELECT_CHUNK_DIM; + if (H5Pset_chunk(dcpl, 1, cdims) < 0) + P_TEST_ERROR; + } + + /* Case 5(a) */ + + /* Generate dataset name */ + HDsnprintf(dset_name, sizeof(dset_name), "cmpd_with_bkg_%s_%s", chunked ? "chunked" : "contig", + mwbuf ? "mwbuf" : "nomwbuf"); + + /* Create 1d dataset */ + if ((did = H5Dcreate2(fid, dset_name, s1_tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + P_TEST_ERROR; + + /* Each process takes x number of elements */ + block[0] = dims[0] / (hsize_t)mpi_size; + stride[0] = block[0]; + count[0] = 1; + start[0] = (hsize_t)mpi_rank * block[0]; + + /* Initialize data */ + for (i = 0; i < (int)block[0]; i++) { + s1_wbuf[i].a = 4 * (i + (int)start[0]); + s1_wbuf[i].b = 4 * (i + (int)start[0]) + 1; + s1_wbuf[i].c = 4 * (i + (int)start[0]) + 2; + s1_wbuf[i].d = 4 * (i + (int)start[0]) + 3; + } + + /* Create a memory dataspace */ + if ((mspace_id = H5Screate_simple(1, block, NULL)) < 0) + P_TEST_ERROR; + + /* Create a file dataspace */ + if ((fspace_id = H5Dget_space(did)) < 0) + P_TEST_ERROR; + + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) + P_TEST_ERROR; + + /* Create dataset transfer property list */ + if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + P_TEST_ERROR; + + /* Set selection I/O mode, type of I/O and type of collective I/O */ + set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(s1_wbuf_bak, s1_wbuf, sizeof(s1_t) * DSET_SELECT_DIM); + + /* Write all the data to the dataset */ + if (H5Dwrite(did, s1_tid, mspace_id, fspace_id, dxpl, s1_wbuf) < 0) + P_TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(s1_wbuf, s1_wbuf_bak, sizeof(s1_t) * DSET_SELECT_DIM); + + check_io_mode(dxpl, chunked); + + /* Read all the data from the dataset */ + HDmemset(s1_rbuf, 0, sizeof(s1_t) * DSET_SELECT_DIM); + if (H5Dread(did, s1_tid, mspace_id, fspace_id, dxpl, s1_rbuf) < 0) + P_TEST_ERROR; + + /* Verify data read */ + for (i = 0; i < (int)block[0]; i++) + if (s1_wbuf[i].a != s1_rbuf[i].a || s1_wbuf[i].b != s1_rbuf[i].b || s1_wbuf[i].c != s1_rbuf[i].c || + s1_wbuf[i].d != s1_rbuf[i].d) { + nerrors++; + HDprintf("\n Error in 1st data verification:\n"); + HDprintf(" At index %d: %d/%d, %d/%d, %d/%d, %d/%d\n", i + (int)start[0], s1_wbuf[i].a, + s1_rbuf[i].a, s1_wbuf[i].b, s1_rbuf[i].b, s1_wbuf[i].c, s1_rbuf[i].c, s1_wbuf[i].d, + s1_rbuf[i].d); + break; + } + + /* Case 5(b) */ + + /* Update s1_wbuf with unique values */ + for (i = 0; i < (int)block[0]; i++) { + s1_wbuf[i].a = 4 * (i + (int)start[0]) + DSET_SELECT_DIM; + s1_wbuf[i].b = 4 * (i + (int)start[0]) + DSET_SELECT_DIM + 1; + s1_wbuf[i].c = 4 * (i + (int)start[0]) + DSET_SELECT_DIM + 2; + s1_wbuf[i].d = 4 * (i + (int)start[0]) + DSET_SELECT_DIM + 3; + } + + /* Create a compound type same size as s1_t */ + if ((ss_ac_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) + P_TEST_ERROR; + + /* but contains only subset members of s1_t */ + if (H5Tinsert(ss_ac_tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT) < 0 || + H5Tinsert(ss_ac_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0) + P_TEST_ERROR; + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(s1_wbuf_bak, s1_wbuf, sizeof(s1_t) * DSET_SELECT_DIM); + + /* Write s1_wbuf to the dataset but with only subset members in ss_tid */ + if (H5Dwrite(did, ss_ac_tid, mspace_id, fspace_id, dxpl, s1_wbuf) < 0) + P_TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(s1_wbuf, s1_wbuf_bak, sizeof(s1_t) * DSET_SELECT_DIM); + + /* Read the whole compound back */ + HDmemset(s1_rbuf, 0, sizeof(s1_t) * DSET_SELECT_DIM); + if (H5Dread(did, s1_tid, mspace_id, fspace_id, dxpl, s1_rbuf) < 0) + P_TEST_ERROR; + + /* Verify the compound fields have the correct (old or new) values */ + for (i = 0; i < (int)block[0]; i++) + if (s1_rbuf[i].a != s1_wbuf[i].a || s1_rbuf[i].b != (4 * (i + (int)start[0]) + 1) || + s1_rbuf[i].c != s1_wbuf[i].c || s1_rbuf[i].d != (4 * (i + (int)start[0]) + 3)) { + nerrors++; + HDprintf("\n Error in 2nd data verification:\n"); + HDprintf(" At index %d: %d/%d, %d/%d, %d/%d, %d/%d\n", i + (int)start[0], s1_wbuf[i].a, + s1_rbuf[i].a, (4 * (i + (int)start[0]) + 1), s1_rbuf[i].b, s1_wbuf[i].c, s1_rbuf[i].c, + (4 * (i + (int)start[0]) + 3), s1_rbuf[i].d); + break; + } + + /* Case 5(c) */ + + /* Update s1_rbuf with new unique values */ + for (i = 0; i < (int)block[0]; i++) { + s1_rbuf[i].a = (4 * (i + (int)start[0])) + (2 * DSET_SELECT_DIM); + s1_rbuf[i].b = (4 * (i + (int)start[0])) + (2 * DSET_SELECT_DIM) + 1; + s1_rbuf[i].c = (4 * (i + (int)start[0])) + (2 * DSET_SELECT_DIM) + 2; + s1_rbuf[i].d = (4 * (i + (int)start[0])) + (2 * DSET_SELECT_DIM) + 3; + } + + /* Create a compound type same size as s1_t */ + if ((ss_bc_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) + P_TEST_ERROR; + + /* but contains only subset members of s1_t */ + if (H5Tinsert(ss_bc_tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT) < 0 || + H5Tinsert(ss_bc_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0) + P_TEST_ERROR; + + /* Read the dataset: will read only what is set in ss_bc_tid */ + if (H5Dread(did, ss_bc_tid, mspace_id, fspace_id, dxpl, s1_rbuf) < 0) + P_TEST_ERROR; + + /* Verify data read */ + for (i = 0; i < (int)block[0]; i++) + if (s1_rbuf[i].a != ((4 * (i + (int)start[0])) + (2 * DSET_SELECT_DIM)) || + s1_rbuf[i].b != (4 * (i + (int)start[0]) + 1) || + s1_rbuf[i].c != (4 * (i + (int)start[0]) + DSET_SELECT_DIM + 2) || + s1_rbuf[i].d != ((4 * (i + (int)start[0])) + (2 * DSET_SELECT_DIM) + 3)) { + nerrors++; + HDprintf("\n Error in 3rd data verification:\n"); + HDprintf(" At index %d: %d/%d, %d/%d, %d/%d, %d/%d\n", i + (int)start[0], + ((4 * (i + (int)start[0])) + (2 * DSET_SELECT_DIM)), s1_rbuf[i].a, + (4 * (i + (int)start[0]) + 1), s1_rbuf[i].b, + (4 * (i + (int)start[0]) + DSET_SELECT_DIM + 2), s1_rbuf[i].c, + ((4 * (i + (int)start[0])) + (2 * DSET_SELECT_DIM) + 3), s1_rbuf[i].d); + break; + } + + /* Case 5(d) */ + + /* Create s2_t compound type with: + * --no conversion for 2 member types, + * --1 larger mem type + * --1 smaller mem type + */ + if ((s2_tid = H5Tcreate(H5T_COMPOUND, sizeof(s2_t))) < 0) + P_TEST_ERROR; + + if (H5Tinsert(s2_tid, "a", HOFFSET(s2_t, a), H5T_NATIVE_INT) < 0 || + H5Tinsert(s2_tid, "b", HOFFSET(s2_t, b), H5T_NATIVE_LLONG) < 0 || + H5Tinsert(s2_tid, "c", HOFFSET(s2_t, c), H5T_NATIVE_INT) < 0 || + H5Tinsert(s2_tid, "d", HOFFSET(s2_t, d), H5T_NATIVE_SHORT) < 0) + P_TEST_ERROR; + + /* Update s2_wbuf with unique values */ + for (i = 0; i < (int)block[0]; i++) { + s2_wbuf[i].a = (8 * (i + (int)start[0])); + s2_wbuf[i].b = (long long)(8 * (i + (int)start[0]) + 1); + s2_wbuf[i].c = (8 * (i + (int)start[0]) + 2); + s2_wbuf[i].d = (short)(8 * (i + (int)start[0]) + 3); + } + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(s2_wbuf_bak, s2_wbuf, sizeof(s2_t) * DSET_SELECT_DIM); + + if (H5Dwrite(did, s2_tid, mspace_id, fspace_id, dxpl, s2_wbuf) < 0) + P_TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(s2_wbuf, s2_wbuf_bak, sizeof(s2_t) * DSET_SELECT_DIM); + + /* Read it back */ + HDmemset(s2_rbuf, 0, sizeof(s2_t) * DSET_SELECT_DIM); + if (H5Dread(did, s2_tid, mspace_id, fspace_id, dxpl, s2_rbuf) < 0) + P_TEST_ERROR; + + /* Verify data read */ + for (i = 0; i < (int)block[0]; i++) + if (s2_wbuf[i].a != s2_rbuf[i].a || s2_wbuf[i].b != s2_rbuf[i].b || s2_wbuf[i].c != s2_rbuf[i].c || + s2_wbuf[i].d != s2_rbuf[i].d) { + nerrors++; + HDprintf("\n Error in 4th data verification:\n"); + HDprintf(" At index %d: %d/%d, %lld/%lld, %d/%d, %d/%d\n", i + (int)start[0], s2_wbuf[i].a, + s2_rbuf[i].a, s2_wbuf[i].b, s2_rbuf[i].b, s2_wbuf[i].c, s2_rbuf[i].c, s2_wbuf[i].d, + s2_rbuf[i].d); + break; + } + + if (H5Sclose(mspace_id) < 0) + P_TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + P_TEST_ERROR; + + if (H5Sclose(sid) < 0) + P_TEST_ERROR; + if (H5Tclose(s1_tid) < 0) + P_TEST_ERROR; + if (H5Tclose(s2_tid) < 0) + P_TEST_ERROR; + if (H5Tclose(ss_ac_tid) < 0) + P_TEST_ERROR; + if (H5Tclose(ss_bc_tid) < 0) + P_TEST_ERROR; + if (H5Dclose(did) < 0) + P_TEST_ERROR; + + /* Release buffers */ + HDfree(s1_wbuf); + HDfree(s1_wbuf_bak); + HDfree(s1_rbuf); + HDfree(s2_wbuf); + HDfree(s2_wbuf_bak); + HDfree(s2_rbuf); + + CHECK_PASSED(); + + return; + +} /* test_cmpd_with_bkg() */ + +/* + * Case 6: Type conversions + some processes have null/empty selections in datasets + */ +static void +test_type_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +{ + int i; + hid_t did = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hid_t ntrans_dxpl = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + hsize_t start[1], stride[1], count[1], block[1]; + + long lwbuf[DSET_SELECT_DIM]; + long lwbuf_bak[DSET_SELECT_DIM]; + long trans_lwbuf[DSET_SELECT_DIM]; + long lrbuf[DSET_SELECT_DIM]; + short srbuf[DSET_SELECT_DIM]; + short swbuf[DSET_SELECT_DIM]; + short swbuf_bak[DSET_SELECT_DIM]; + short trans_swbuf[DSET_SELECT_DIM]; + long long llrbuf[DSET_SELECT_DIM]; + char dset_name[DSET_NAME_LEN]; + + const char *expr = "2*x"; + + curr_nerrors = nerrors; + + /* Create 1d data space */ + dims[0] = DSET_SELECT_DIM; + if ((sid = H5Screate_simple(1, dims, NULL)) < 0) + P_TEST_ERROR; + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + P_TEST_ERROR; + + if (chunked) { + cdims[0] = DSET_SELECT_CHUNK_DIM; + if (H5Pset_chunk(dcpl, 1, cdims) < 0) + P_TEST_ERROR; + } + + /* Generate dataset name */ + HDsnprintf(dset_name, sizeof(dset_name), "tconv_sel_empty_%s_%s_%s", chunked ? "chunked" : "contig", + dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf"); + + /* Create dataset */ + if ((did = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + P_TEST_ERROR; + + /* Create dataset transfer property list */ + if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + P_TEST_ERROR; + + /* Set selection I/O mode, type of I/O and type of collective I/O */ + set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + + if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) + P_TEST_ERROR; + + /* Set data transform */ + if (dtrans) { + if (H5Pset_data_transform(dxpl, expr) < 0) + P_TEST_ERROR; + } + + /* Each process takes x number of elements */ + block[0] = dims[0] / (hsize_t)mpi_size; + stride[0] = block[0]; + count[0] = 1; + start[0] = (hsize_t)mpi_rank * block[0]; + + /* Initialize data */ + for (i = 0; i < (int)block[0]; i++) { + lwbuf[i] = i + (int)start[0]; + trans_lwbuf[i] = 2 * lwbuf[i]; + } + + /* Case 6(a) process 0: hyperslab; other processes: select none */ + + /* Create a file dataspace */ + if ((fspace_id = H5Dget_space(did)) < 0) + P_TEST_ERROR; + if (MAINPROCESS) { + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) + P_TEST_ERROR; + } + else { + if (H5Sselect_none(fspace_id) < 0) + P_TEST_ERROR; + } + + /* Create a memory dataspace */ + if ((mspace_id = H5Screate_simple(1, block, NULL)) < 0) + P_TEST_ERROR; + if (mpi_rank) { + if (H5Sselect_none(mspace_id) < 0) + P_TEST_ERROR; + } + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(lwbuf_bak, lwbuf, sizeof(lwbuf)); + + /* Write data to the dataset with/without data transform in dxpl */ + if (H5Dwrite(did, H5T_NATIVE_LONG, mspace_id, fspace_id, dxpl, lwbuf) < 0) + P_TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(lwbuf, lwbuf_bak, sizeof(lwbuf)); + + check_io_mode(dxpl, chunked); + + /* Read the data from the dataset: type conversion int-->long */ + /* If dtrans, without data transform set in dxpl */ + if (H5Dread(did, H5T_NATIVE_LONG, mspace_id, fspace_id, ntrans_dxpl, lrbuf) < 0) + P_TEST_ERROR; + + if (MAINPROCESS) { + for (i = 0; i < (int)block[0]; i++) + if (lrbuf[i] != (dtrans ? trans_lwbuf[i] : lwbuf[i])) { + nerrors++; + HDprintf("\n Error in first data verification:\n"); + HDprintf(" At index %d: %ld, %ld\n", i + (int)start[0], + dtrans ? trans_lwbuf[i] : lwbuf[i], lrbuf[i]); + break; + } + } + + if (H5Sclose(mspace_id) < 0) + P_TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + P_TEST_ERROR; + + /* Case 6(b) process 0: get 0 row; other processes: hyperslab */ + + block[0] = mpi_rank ? (dims[0] / (hsize_t)mpi_size) : 0; + stride[0] = mpi_rank ? block[0] : 1; + count[0] = 1; + start[0] = mpi_rank ? ((hsize_t)mpi_rank * block[0]) : 0; + + /* Create a file dataspace */ + if ((fspace_id = H5Dget_space(did)) < 0) + P_TEST_ERROR; + + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) + P_TEST_ERROR; + + /* Create a memory dataspace */ + if ((mspace_id = H5Screate_simple(1, block, NULL)) < 0) + P_TEST_ERROR; + + /* need to make memory space to match for process 0 */ + if (MAINPROCESS) { + if (H5Sselect_hyperslab(mspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) + P_TEST_ERROR; + } + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(lwbuf_bak, lwbuf, sizeof(lwbuf)); + + /* Write data to the dataset with/without data transform */ + if (H5Dwrite(did, H5T_NATIVE_LONG, mspace_id, fspace_id, dxpl, lwbuf) < 0) + P_TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(lwbuf, lwbuf_bak, sizeof(lwbuf)); + + /* Read the data from the dataset: type conversion int-->short */ + /* If dtrans, without data transform set in dxpl */ + if (H5Dread(did, H5T_NATIVE_SHORT, mspace_id, fspace_id, ntrans_dxpl, srbuf) < 0) + P_TEST_ERROR; + + if (mpi_rank) { + for (i = 0; i < (int)block[0]; i++) + if (srbuf[i] != (short)(dtrans ? trans_lwbuf[i] : lwbuf[i])) { + HDprintf("\n Error in second data verification:\n"); + HDprintf(" At index %d: %d, %d\n", i + (int)start[0], + (short)(dtrans ? trans_lwbuf[i] : lwbuf[i]), srbuf[i]); + break; + } + } + + if (H5Sclose(mspace_id) < 0) + P_TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + P_TEST_ERROR; + + /* Case 6(c) process 0: select none; other processes: select all */ + + /* Initialize data */ + block[0] = DSET_SELECT_DIM; + for (i = 0; i < (int)block[0]; i++) { + swbuf[i] = (short)(i + DSET_SELECT_DIM); + trans_swbuf[i] = (short)(2 * swbuf[i]); + } + + /* Create a file dataspace */ + if ((fspace_id = H5Dget_space(did)) < 0) + P_TEST_ERROR; + if (MAINPROCESS) { + if (H5Sselect_none(fspace_id) < 0) + P_TEST_ERROR; + } + else { + if (H5Sselect_all(fspace_id) < 0) + P_TEST_ERROR; + } + + /* Create a memory dataspace */ + if ((mspace_id = H5Screate_simple(1, block, NULL)) < 0) + P_TEST_ERROR; + if (MAINPROCESS) { + if (H5Sselect_none(mspace_id) < 0) + P_TEST_ERROR; + } + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(swbuf_bak, swbuf, sizeof(swbuf)); + + /* Write data to the dataset with/without data transform */ + if (H5Dwrite(did, H5T_NATIVE_SHORT, mspace_id, fspace_id, dxpl, swbuf) < 0) + P_TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(swbuf, swbuf_bak, sizeof(swbuf)); + + /* Read the data from the dataset: type conversion int-->llong */ + /* If dtrans, without data transform set in dxpl */ + if (H5Dread(did, H5T_NATIVE_LLONG, mspace_id, fspace_id, ntrans_dxpl, llrbuf) < 0) + P_TEST_ERROR; + + if (mpi_rank) { + for (i = 0; i < (int)block[0]; i++) + if (llrbuf[i] != (long long)(dtrans ? trans_swbuf[i] : swbuf[i])) { + HDprintf("\n Error in third data verification:\n"); + HDprintf(" At index %d: %lld, %lld\n", i + (int)start[0], + (long long)(dtrans ? trans_swbuf[i] : swbuf[i]), llrbuf[i]); + break; + } + } + + if (H5Sclose(mspace_id) < 0) + P_TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + P_TEST_ERROR; + + if (H5Sclose(sid) < 0) + P_TEST_ERROR; + if (H5Dclose(did) < 0) + P_TEST_ERROR; + if (H5Pclose(dxpl) < 0) + P_TEST_ERROR; + if (H5Pclose(ntrans_dxpl) < 0) + P_TEST_ERROR; + + CHECK_PASSED(); + + return; + +} /* test_type_conv_sel_empty() */ + +/* + * Test 1 for multi-dataset: + * --Datasets with/without type conversion+smaller/larger mem type+no background buffer + * + * Create datasets: randomized H5T_NATIVE_INT or H5T_NATIVE_LONG + * + * Case a--setting for multi write/read to ndsets: + * Datatype for all datasets: H5T_NATIVE_INT + * + * Case b--setting for multi write/read to ndsets: + * Datatype for all datasets: H5T_NATIVE_LONG + */ +static void +test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +{ + size_t ndsets; + int i, j; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hid_t ntrans_dxpl = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + hsize_t start[1], stride[1], count[1], block[1]; + + hid_t file_sids[MULTI_NUM_DSETS]; + hid_t mem_sids[MULTI_NUM_DSETS]; + hid_t mem_tids[MULTI_NUM_DSETS]; + + char dset_names[MULTI_NUM_DSETS][DSET_NAME_LEN]; + hid_t dset_dids[MULTI_NUM_DSETS]; + + size_t buf_size; + + int *total_wbuf = NULL; + int *total_wbuf_bak = NULL; + int *total_trans_wbuf = NULL; + int *total_rbuf = NULL; + + long *total_lwbuf = NULL; + long *total_lwbuf_bak = NULL; + long *total_trans_lwbuf = NULL; + long *total_lrbuf = NULL; + + int *wbufi[MULTI_NUM_DSETS]; + int *trans_wbufi[MULTI_NUM_DSETS]; + int *rbufi[MULTI_NUM_DSETS]; + + long *lwbufi[MULTI_NUM_DSETS]; + long *trans_lwbufi[MULTI_NUM_DSETS]; + long *lrbufi[MULTI_NUM_DSETS]; + + const void *wbufs[MULTI_NUM_DSETS]; + void *rbufs[MULTI_NUM_DSETS]; + const char *expr = "2*x"; + + curr_nerrors = nerrors; + + ndsets = MAX(MULTI_MIN_DSETS, MULTI_NUM_DSETS); + + dims[0] = DSET_SELECT_DIM; + + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + P_TEST_ERROR; + + if (chunked) { + cdims[0] = DSET_SELECT_CHUNK_DIM; + if (H5Pset_chunk(dcpl, 1, cdims) < 0) + P_TEST_ERROR; + } + + /* Create dataset transfer property list */ + if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + P_TEST_ERROR; + + /* Set selection I/O mode, type of I/O and type of collective I/O */ + set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + + if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) + P_TEST_ERROR; + + /* Set data transform */ + if (dtrans) + if (H5Pset_data_transform(dxpl, expr) < 0) + P_TEST_ERROR; + + /* Set up file space ids and dataset ids */ + for (i = 0; i < (int)ndsets; i++) { + if ((file_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) + P_TEST_ERROR; + + /* Generate dataset name */ + HDsnprintf(dset_names[i], sizeof(dset_names[i]), "multi_dset%d_%s_%s_%s", i, + chunked ? "chunked" : "contig", dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf"); + + /* Create ith dataset */ + if ((dset_dids[i] = + H5Dcreate2(fid, dset_names[i], ((HDrandom() % 2) ? H5T_NATIVE_LONG : H5T_NATIVE_INT), + file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + P_TEST_ERROR; + } + + /* Each process takes x number of elements */ + block[0] = dims[0] / (hsize_t)mpi_size; + stride[0] = block[0]; + count[0] = 1; + start[0] = (hsize_t)mpi_rank * block[0]; + + for (i = 0; i < (int)ndsets; i++) { + if ((mem_sids[i] = H5Screate_simple(1, block, NULL)) < 0) + P_TEST_ERROR; + + if (H5Sselect_hyperslab(file_sids[i], H5S_SELECT_SET, start, stride, count, block) < 0) + P_TEST_ERROR; + } + + buf_size = ndsets * DSET_SELECT_DIM * sizeof(int); + + /* Allocate buffers for all datasets */ + if (NULL == (total_wbuf = (int *)HDmalloc(buf_size))) + P_TEST_ERROR; + if (mwbuf && NULL == (total_wbuf_bak = (int *)HDmalloc(buf_size))) + P_TEST_ERROR; + if (NULL == (total_trans_wbuf = (int *)HDmalloc(buf_size))) + P_TEST_ERROR; + if (NULL == (total_rbuf = (int *)HDmalloc(buf_size))) + P_TEST_ERROR; + + buf_size = ndsets * DSET_SELECT_DIM * sizeof(long); + + if (NULL == (total_lwbuf = (long *)HDmalloc(buf_size))) + P_TEST_ERROR; + if (mwbuf && NULL == (total_lwbuf_bak = (long *)HDmalloc(buf_size))) + P_TEST_ERROR; + if (NULL == (total_trans_lwbuf = (long *)HDmalloc(buf_size))) + P_TEST_ERROR; + if (NULL == (total_lrbuf = (long *)HDmalloc(buf_size))) + P_TEST_ERROR; + + /* Initialize buffer indices */ + for (i = 0; i < (int)ndsets; i++) { + wbufi[i] = total_wbuf + (i * DSET_SELECT_DIM); + rbufi[i] = total_rbuf + (i * DSET_SELECT_DIM); + trans_wbufi[i] = total_trans_wbuf + (i * DSET_SELECT_DIM); + + wbufs[i] = wbufi[i]; + rbufs[i] = rbufi[i]; + } + + /* Case a */ + + /* Initialize the buffer data */ + for (i = 0; i < (int)ndsets; i++) + for (j = 0; j < (int)block[0]; j++) { + wbufi[i][j] = j + (int)start[0]; + trans_wbufi[i][j] = 2 * wbufi[i][j]; + } + + /* Datatype setting for multi write/read */ + for (i = 0; i < (int)ndsets; i++) + mem_tids[i] = H5T_NATIVE_INT; + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(total_wbuf_bak, total_wbuf, ndsets * DSET_SELECT_DIM * sizeof(int)); + + /* Write data to the dataset with/without data transform */ + if (H5Dwrite_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, wbufs) < 0) + P_TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(total_wbuf, total_wbuf_bak, ndsets * DSET_SELECT_DIM * sizeof(int)); + + check_io_mode(dxpl, chunked); + + /* Read data from the dataset (if dtrans, without data transform set in dxpl) */ + if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, ntrans_dxpl, rbufs) < 0) + P_TEST_ERROR; + + /* Verify */ + for (i = 0; i < (int)ndsets; i++) + for (j = 0; j < (int)block[0]; j++) + if (rbufi[i][j] != (dtrans ? trans_wbufi[i][j] : wbufi[i][j])) { + nerrors++; + HDprintf("\n Error in 1st data verification for dset %d:\n", i); + HDprintf(" At index %d: %d, %d\n", j + (int)start[0], + dtrans ? trans_wbufi[i][j] : wbufi[i][j], rbufi[i][j]); + break; + } + + if (dtrans) { + + /* Read the data from the dataset with data transform set in dxpl */ + if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) + P_TEST_ERROR; + + /* Verify */ + for (i = 0; i < (int)ndsets; i++) + for (j = 0; j < (int)block[0]; j++) + if (rbufi[i][j] != (2 * trans_wbufi[i][j])) { + nerrors++; + HDprintf("\n Error in 1st (with dtrans) data verification for dset %d:\n", i); + HDprintf(" At index %d: %d, %d\n", j + (int)start[0], 2 * trans_wbufi[i][j], + rbufi[i][j]); + break; + } + } + + /* Case b */ + + /* Initialize buffer indices */ + for (i = 0; i < (int)ndsets; i++) { + lwbufi[i] = total_lwbuf + (i * DSET_SELECT_DIM); + trans_lwbufi[i] = total_trans_lwbuf + (i * DSET_SELECT_DIM); + lrbufi[i] = total_lrbuf + (i * DSET_SELECT_DIM); + wbufs[i] = lwbufi[i]; + rbufs[i] = lrbufi[i]; + } + + /* Initialize the buffer data */ + for (i = 0; i < (int)ndsets; i++) + for (j = 0; j < (int)block[0]; j++) { + lwbufi[i][j] = j + (int)start[0] + DSET_SELECT_DIM; + trans_lwbufi[i][j] = 2 * lwbufi[i][j]; + } + + /* Datatype setting for multi write/read */ + for (i = 0; i < (int)ndsets; i++) + mem_tids[i] = H5T_NATIVE_LONG; + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(total_lwbuf_bak, total_lwbuf, ndsets * DSET_SELECT_DIM * sizeof(long)); + + /* Write data to the dataset with/without data transform */ + if (H5Dwrite_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, wbufs) < 0) + P_TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(total_lwbuf, total_lwbuf_bak, ndsets * DSET_SELECT_DIM * sizeof(long)); + + /* Read data from the dataset (if dtrans, with data transform again in dxpl */ + if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) + P_TEST_ERROR; + + for (i = 0; i < (int)ndsets; i++) { + for (j = 0; j < (int)block[0]; j++) { + if (lrbufi[i][j] != (dtrans ? (2 * trans_lwbufi[i][j]) : lwbufi[i][j])) { + nerrors++; + HDprintf("\n Error in 2nd data verification for dset %d:\n", i); + HDprintf(" At index %d: %ld/%ld\n", j + (int)start[0], + (dtrans ? (2 * trans_lwbufi[i][j]) : lwbufi[i][j]), lrbufi[i][j]); + break; + } + } + } + + if (H5Pclose(dcpl) < 0) + P_TEST_ERROR; + if (H5Pclose(dxpl) < 0) + P_TEST_ERROR; + if (H5Pclose(ntrans_dxpl) < 0) + P_TEST_ERROR; + + for (i = 0; i < (int)ndsets; i++) { + if (H5Sclose(file_sids[i]) < 0) + P_TEST_ERROR; + if (H5Sclose(mem_sids[i]) < 0) + P_TEST_ERROR; + if (H5Dclose(dset_dids[i]) < 0) + P_TEST_ERROR; + } + + HDfree(total_wbuf); + HDfree(total_wbuf_bak); + HDfree(total_rbuf); + HDfree(total_trans_wbuf); + HDfree(total_lwbuf); + HDfree(total_lwbuf_bak); + HDfree(total_trans_lwbuf); + HDfree(total_lrbuf); + + CHECK_PASSED(); + + return; + +} /* test_multi_dsets_no_bkg() */ + +/* + * Test 2 for multi-dataset: + * Datasets with compound types+background buffer + * + * Create datasets with the same compound type + * Case (a) Initialize compound buffer in memory with unique values + * All datasets: + * --Write all compound fields to disk + * --Read the entire compound type for all datasets + * --Verify values read + * Case (b) Update all fields of the compound type in memory write buffer with new unique values + * dset0: + * --Write some but not all all compound fields to disk + * --Read and verify the fields have the correct (old(a) or new) values + * Remaining datasets: + * --Untouched + * --Read and verify the fields have the correct old(a) values + * Case (c) Update all fields of the compound type in memory read buffer with new unique values + * Randomized dataset: + * --Read some but not all the compound fields to memory + * --Verify the fields have the correct (old(a) or new) values + * dset0: + * --Untouched + * --Read and verify the fields have the correct (old(a) or middle(b)) values + * Remaining datasets: + * --Untouched + * --Read and verify the fields have the correct old(a) values + * Case (d) Set up a different compound type which has: + * --no type conversion for 2 member types + * --a field with larger mem type + * --a field with smaller mem type + * All datasets: + * --Write the compound fields to disk + * --Read the entire compound type + * --Verify values read + */ +static void +test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) +{ + size_t ndsets; + int i, j, mm; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + hsize_t start[1], stride[1], count[1], block[1]; + + hid_t file_sids[MULTI_NUM_DSETS]; + hid_t mem_sids[MULTI_NUM_DSETS]; + hid_t mem_tids[MULTI_NUM_DSETS]; + + hid_t s1_tid = H5I_INVALID_HID; + hid_t ss_ac_tid = H5I_INVALID_HID; + hid_t ss_bc_tid = H5I_INVALID_HID; + hid_t s2_tid = H5I_INVALID_HID; + + char dset_names[MULTI_NUM_DSETS][DSET_NAME_LEN]; + hid_t dset_dids[MULTI_NUM_DSETS]; + + size_t buf_size; + size_t s2_buf_size; + + s1_t *total_wbuf = NULL; + s1_t *total_wbuf_bak = NULL; + s1_t *total_rbuf = NULL; + + s2_t *s2_total_wbuf = NULL; + s2_t *s2_total_wbuf_bak = NULL; + s2_t *s2_total_rbuf = NULL; + + s1_t *wbufi[MULTI_NUM_DSETS]; + s1_t *rbufi[MULTI_NUM_DSETS]; + + s2_t *s2_wbufi[MULTI_NUM_DSETS]; + s2_t *s2_rbufi[MULTI_NUM_DSETS]; + + const void *wbufs[MULTI_NUM_DSETS]; + void *rbufs[MULTI_NUM_DSETS]; + + curr_nerrors = nerrors; + + ndsets = MAX(MULTI_MIN_DSETS, MULTI_NUM_DSETS); + + dims[0] = DSET_SELECT_DIM; + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + P_TEST_ERROR; + + if (chunked) { + cdims[0] = DSET_SELECT_CHUNK_DIM; + if (H5Pset_chunk(dcpl, 1, cdims) < 0) + P_TEST_ERROR; + } + + /* Create dataset transfer property list */ + if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + P_TEST_ERROR; + + /* Set selection I/O mode, type of I/O and type of collective I/O */ + set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + + /* Each process takes x number of elements */ + block[0] = dims[0] / (hsize_t)mpi_size; + stride[0] = block[0]; + count[0] = 1; + start[0] = (hsize_t)mpi_rank * block[0]; + + /* Create the memory data type */ + if ((s1_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) + P_TEST_ERROR; + + if (H5Tinsert(s1_tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT) < 0 || + H5Tinsert(s1_tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT) < 0 || + H5Tinsert(s1_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0 || + H5Tinsert(s1_tid, "d", HOFFSET(s1_t, d), H5T_NATIVE_INT) < 0) + P_TEST_ERROR; + + for (i = 0; i < (int)ndsets; i++) { + if ((file_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) + P_TEST_ERROR; + if ((mem_sids[i] = H5Screate_simple(1, block, NULL)) < 0) + P_TEST_ERROR; + + /* Generate dataset name */ + HDsnprintf(dset_names[i], sizeof(dset_names[i]), "multi_cmpd_dset%d_%s_%s", i, + chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf"); + + /* Create ith dataset */ + if ((dset_dids[i] = + H5Dcreate2(fid, dset_names[i], s1_tid, file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + P_TEST_ERROR; + + if (H5Sselect_hyperslab(file_sids[i], H5S_SELECT_SET, start, stride, count, block) < 0) + P_TEST_ERROR; + } + + buf_size = ndsets * DSET_SELECT_DIM * sizeof(s1_t); + s2_buf_size = ndsets * DSET_SELECT_DIM * sizeof(s2_t); + + /* Allocate buffers for all datasets */ + if (NULL == (total_wbuf = (s1_t *)HDmalloc(buf_size))) + P_TEST_ERROR; + if (mwbuf && NULL == (total_wbuf_bak = (s1_t *)HDmalloc(buf_size))) + P_TEST_ERROR; + if (NULL == (total_rbuf = (s1_t *)HDmalloc(buf_size))) + P_TEST_ERROR; + + if (NULL == (s2_total_wbuf = (s2_t *)HDmalloc(s2_buf_size))) + P_TEST_ERROR; + if (mwbuf && NULL == (s2_total_wbuf_bak = (s2_t *)HDmalloc(s2_buf_size))) + P_TEST_ERROR; + if (NULL == (s2_total_rbuf = (s2_t *)HDmalloc(s2_buf_size))) + P_TEST_ERROR; + + /* Initialize buffer indices */ + for (i = 0; i < (int)ndsets; i++) { + wbufi[i] = total_wbuf + (i * DSET_SELECT_DIM); + rbufi[i] = total_rbuf + (i * DSET_SELECT_DIM); + + wbufs[i] = wbufi[i]; + rbufs[i] = rbufi[i]; + } + + /* Case a */ + + /* Initialize the buffer data for all the datasets */ + for (i = 0; i < (int)ndsets; i++) + for (j = 0; j < (int)block[0]; j++) { + wbufi[i][j].a = 4 * (j + (int)start[0]); + wbufi[i][j].b = 4 * (j + (int)start[0]) + 1; + wbufi[i][j].c = 4 * (j + (int)start[0]) + 2; + wbufi[i][j].d = 4 * (j + (int)start[0]) + 3; + } + + /* Datatype setting for multi write */ + for (i = 0; i < (int)ndsets; i++) + mem_tids[i] = s1_tid; + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(total_wbuf_bak, total_wbuf, buf_size); + + if (H5Dwrite_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, wbufs) < 0) + P_TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(total_wbuf, total_wbuf_bak, buf_size); + + check_io_mode(dxpl, chunked); + + if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) + P_TEST_ERROR; + + /* Verify data read */ + for (i = 0; i < (int)ndsets; i++) + for (j = 0; j < (int)block[0]; j++) { + if (wbufi[i][j].a != rbufi[i][j].a || wbufi[i][j].b != rbufi[i][j].b || + wbufi[i][j].c != rbufi[i][j].c || wbufi[i][j].d != rbufi[i][j].d) { + nerrors++; + HDprintf("\n Error in 1st data verification for dset %d:\n", i); + HDprintf(" At index %d: %d/%d, %d/%d, %d/%d, %d/%d\n", j + (int)start[0], wbufi[i][j].a, + rbufi[i][j].a, wbufi[i][j].b, rbufi[i][j].b, wbufi[i][j].c, rbufi[i][j].c, + wbufi[i][j].d, rbufi[i][j].d); + + break; + } + } + + /* Case b */ + + /* Update data in wbufi for dset0 with unique values */ + for (j = 0; j < (int)block[0]; j++) { + wbufi[0][j].a = (4 * (j + (int)start[0])) + DSET_SELECT_DIM; + wbufi[0][j].b = (4 * (j + (int)start[0])) + DSET_SELECT_DIM + 1; + wbufi[0][j].c = (4 * (j + (int)start[0])) + DSET_SELECT_DIM + 2; + wbufi[0][j].d = (4 * (j + (int)start[0])) + DSET_SELECT_DIM + 3; + } + + /* Create a compound type same size as s1_t */ + if ((ss_ac_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) + P_TEST_ERROR; + + /* but contains only subset members of s1_t */ + if (H5Tinsert(ss_ac_tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT) < 0 || + H5Tinsert(ss_ac_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0) + P_TEST_ERROR; + + /* Datatype setting for write to dset0 */ + mem_tids[0] = ss_ac_tid; + + /* Untouched memory and file spaces for other datasets */ + for (i = 0; i < (int)ndsets; i++) { + if (i == 0) + continue; + + if (H5Sselect_none(mem_sids[i]) < 0) + P_TEST_ERROR; + if (H5Sselect_none(file_sids[i]) < 0) + P_TEST_ERROR; + } + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(total_wbuf_bak, total_wbuf, buf_size); + + if (H5Dwrite_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, wbufs) < 0) + P_TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(total_wbuf, total_wbuf_bak, buf_size); + + if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) + P_TEST_ERROR; + + /* Verify data read */ + for (i = 0; i < (int)ndsets; i++) + if (i == 0) { /* dset0 */ + for (j = 0; j < (int)block[0]; j++) + if (wbufi[i][j].a != rbufi[i][j].a || (4 * (j + (int)start[0]) + 1) != rbufi[i][j].b || + wbufi[i][j].c != rbufi[i][j].c || (4 * (j + (int)start[0]) + 3) != rbufi[i][j].d) { + nerrors++; + HDprintf("\n Error in 2nd data verification for dset %d:\n", i); + HDprintf(" At index %d: %d/%d, %d/%d, %d/%d, %d/%d\n", j + (int)start[0], + wbufi[i][j].a, rbufi[i][j].a, (4 * (j + (int)start[0]) + 1), rbufi[i][j].b, + wbufi[i][j].c, rbufi[i][j].c, (4 * (j + (int)start[0]) + 3), rbufi[i][j].d); + break; + } + } + else { /* other datasets */ + for (j = 0; j < (int)block[0]; j++) + if ((4 * (j + (int)start[0])) != rbufi[i][j].a || + (4 * (j + (int)start[0]) + 1) != rbufi[i][j].b || + (4 * (j + (int)start[0]) + 2) != rbufi[i][j].c || + (4 * (j + (int)start[0]) + 3) != rbufi[i][j].d) { + nerrors++; + HDprintf("\n Error in 2nd data verification for dset %d:\n", i); + HDprintf(" At index %d: %d/%d, %d/%d, %d/%d, %d/%d\n", j + (int)start[0], + (4 * (j + (int)start[0])), rbufi[i][j].a, (4 * (j + (int)start[0]) + 1), + rbufi[i][j].b, (4 * (j + (int)start[0]) + 2), rbufi[i][j].c, + (4 * (j + (int)start[0]) + 3), rbufi[i][j].d); + break; + } + } + + /* Case c */ + mm = HDrandom() % (int)ndsets; + if (!mm) + mm++; + + /* Update data in rbufi for dset with new unique values */ + for (j = 0; j < (int)block[0]; j++) { + rbufi[mm][j].a = (4 * (j + (int)start[0])) + (2 * DSET_SELECT_DIM); + rbufi[mm][j].b = (4 * (j + (int)start[0])) + (2 * DSET_SELECT_DIM) + 1; + rbufi[mm][j].c = (4 * (j + (int)start[0])) + (2 * DSET_SELECT_DIM) + 2; + rbufi[mm][j].d = (4 * (j + (int)start[0])) + (2 * DSET_SELECT_DIM) + 3; + } + + /* Create a compound type same size as s1_t */ + if ((ss_bc_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) + P_TEST_ERROR; + + /* but contains only subset members of s1_t */ + if (H5Tinsert(ss_bc_tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT) < 0 || + H5Tinsert(ss_bc_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0) + P_TEST_ERROR; + + /* Reset memory and file spaces for dset */ + if (H5Sselect_all(mem_sids[mm]) < 0) + P_TEST_ERROR; + if (H5Sselect_all(file_sids[mm]) < 0) + P_TEST_ERROR; + if (H5Sselect_hyperslab(file_sids[mm], H5S_SELECT_SET, start, stride, count, block) < 0) + P_TEST_ERROR; + + /* Untouched memory and file space for other datasets */ + for (i = 0; i < (int)ndsets; i++) { + if (i == 0 || i == mm) + continue; + if (H5Sselect_none(mem_sids[i]) < 0) + P_TEST_ERROR; + if (H5Sselect_none(file_sids[i]) < 0) + P_TEST_ERROR; + } + + /* Datatype setting for read from dataset */ + mem_tids[mm] = ss_bc_tid; + + if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) + P_TEST_ERROR; + + /* Verify data read */ + /* dset0 */ + for (j = 0; j < (int)block[0]; j++) + if (wbufi[0][j].a != rbufi[0][j].a || ((4 * (j + (int)start[0])) + 1) != rbufi[0][j].b || + wbufi[0][j].c != rbufi[0][j].c || ((4 * (j + (int)start[0])) + 3) != rbufi[0][j].d) { + nerrors++; + HDprintf("\n Error in 3rd data verification for dset0:\n"); + HDprintf(" At index %d: %d/%d, %d/%d, %d/%d, %d/%d\n", j + (int)start[0], wbufi[0][j].a, + rbufi[0][j].a, (4 * (j + (int)start[0]) + 1), rbufi[0][j].b, wbufi[0][j].c, + rbufi[0][j].c, (4 * (j + (int)start[0]) + 3), rbufi[0][j].d); + break; + } + + /* dset */ + for (j = 0; j < (int)block[0]; j++) + if (rbufi[mm][j].a != ((4 * (j + (int)start[0])) + (2 * DSET_SELECT_DIM)) || + rbufi[mm][j].b != ((4 * (j + (int)start[0])) + 1) || + rbufi[mm][j].c != ((4 * (j + (int)start[0])) + 2) || + rbufi[mm][j].d != ((4 * (j + (int)start[0])) + (2 * DSET_SELECT_DIM) + 3)) { + nerrors++; + HDprintf("\n Error in 3rd data verification for dset %d:\n", mm); + HDprintf(" At index %d: %d/%d, %d/%d, %d/%d, %d/%d\n", j + (int)start[0], + ((4 * (j + (int)start[0])) + (2 * DSET_SELECT_DIM)), rbufi[mm][j].a, + ((4 * (j + (int)start[0])) + 1), rbufi[mm][j].b, ((4 * (j + (int)start[0])) + 2), + rbufi[mm][j].c, ((4 * (j + (int)start[0])) + (2 * DSET_SELECT_DIM) + 3), rbufi[mm][j].d); + break; + } + + /* other datasets */ + for (i = 0; i < (int)ndsets; i++) { + if (i == 0 || i == mm) + continue; + + for (j = 0; j < (int)block[0]; j++) + if (rbufi[i][j].a != ((4 * (j + (int)start[0]))) || + rbufi[i][j].b != ((4 * (j + (int)start[0])) + 1) || + rbufi[i][j].c != ((4 * (j + (int)start[0])) + 2) || + rbufi[i][j].d != ((4 * (j + (int)start[0])) + 3)) { + nerrors++; + HDprintf("\n Error in 3rd data verification for dset %d:\n", i); + HDprintf(" At index %d: %d/%d, %d/%d, %d/%d, %d/%d\n", j + (int)start[0], + ((4 * (j + (int)start[0]))), rbufi[i][j].a, ((4 * (j + (int)start[0])) + 1), + rbufi[i][j].b, ((4 * (j + (int)start[0])) + 2), rbufi[i][j].c, + ((4 * (j + (int)start[0])) + 3), rbufi[i][j].d); + break; + } + } + + /* Case d */ + + /* Create s2_t compound type with: + * --no conversion for 2 member types, + * --1 larger mem type + * --1 smaller mem type + */ + if ((s2_tid = H5Tcreate(H5T_COMPOUND, sizeof(s2_t))) < 0) + P_TEST_ERROR; + + if (H5Tinsert(s2_tid, "a", HOFFSET(s2_t, a), H5T_NATIVE_INT) < 0 || + H5Tinsert(s2_tid, "b", HOFFSET(s2_t, b), H5T_NATIVE_LLONG) < 0 || + H5Tinsert(s2_tid, "c", HOFFSET(s2_t, c), H5T_NATIVE_INT) < 0 || + H5Tinsert(s2_tid, "d", HOFFSET(s2_t, d), H5T_NATIVE_SHORT) < 0) + P_TEST_ERROR; + + for (i = 0; i < (int)ndsets; i++) { + s2_wbufi[i] = s2_total_wbuf + (i * DSET_SELECT_DIM); + s2_rbufi[i] = s2_total_rbuf + (i * DSET_SELECT_DIM); + + wbufs[i] = s2_wbufi[i]; + rbufs[i] = s2_rbufi[i]; + + mem_tids[i] = s2_tid; + + if (H5Sselect_all(mem_sids[i]) < 0) + P_TEST_ERROR; + if (H5Sselect_all(file_sids[i]) < 0) + P_TEST_ERROR; + if (H5Sselect_hyperslab(file_sids[i], H5S_SELECT_SET, start, stride, count, block) < 0) + P_TEST_ERROR; + } + + /* Initialize the buffer data for all the datasets */ + for (i = 0; i < (int)ndsets; i++) + for (j = 0; j < (int)block[0]; j++) { + s2_wbufi[i][j].a = 8 * (j + (int)start[0]); + s2_wbufi[i][j].b = (long long)((8 * (j + (int)start[0])) + 1); + s2_wbufi[i][j].c = (8 * (j + (int)start[0])) + 2; + s2_wbufi[i][j].d = (short)((8 * (j + (int)start[0])) + 3); + } + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(s2_total_wbuf_bak, s2_total_wbuf, s2_buf_size); + + if (H5Dwrite_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, wbufs) < 0) + P_TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(s2_total_wbuf, s2_total_wbuf_bak, s2_buf_size); + + if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) + P_TEST_ERROR; + + for (i = 0; i < (int)ndsets; i++) { + for (j = 0; j < (int)block[0]; j++) + if (s2_rbufi[i][j].a != s2_wbufi[i][j].a || s2_rbufi[i][j].b != s2_wbufi[i][j].b || + s2_rbufi[i][j].c != s2_wbufi[i][j].c || s2_rbufi[i][j].d != s2_wbufi[i][j].d) { + nerrors++; + HDprintf("\n Error in 3rd data verification for dset %d:\n", i); + HDprintf(" At index %d: %d/%d, %lld/%lld, %d/%d, %d/%d\n", j + (int)start[0], + s2_wbufi[i][j].a, s2_rbufi[i][j].a, s2_wbufi[i][j].b, s2_rbufi[i][j].b, + s2_wbufi[i][j].c, s2_rbufi[i][j].c, s2_wbufi[i][j].d, s2_rbufi[i][j].d); + break; + } + } + + if (H5Pclose(dcpl) < 0) + P_TEST_ERROR; + + if (H5Pclose(dxpl) < 0) + P_TEST_ERROR; + + for (i = 0; i < (int)ndsets; i++) { + if (H5Sclose(file_sids[i]) < 0) + P_TEST_ERROR; + if (H5Sclose(mem_sids[i]) < 0) + P_TEST_ERROR; + if (H5Dclose(dset_dids[i]) < 0) + P_TEST_ERROR; + } + + HDfree(total_wbuf); + HDfree(total_wbuf_bak); + HDfree(total_rbuf); + HDfree(s2_total_wbuf); + HDfree(s2_total_wbuf_bak); + HDfree(s2_total_rbuf); + + CHECK_PASSED(); + + return; + +} /* test_multi_dsets_cmpd_with_bkg() */ + +/* + * Test 3 for multi-dataset: + * --Datasets with/without type conv+size change+no background buffer + * + * Create dset0: H5T_STD_I32BE + * Create other dateasets: randomized H5T_STD_I64LE or H5T_STD_I16LE + * + * Case a--setting for multi write/read to ndsets: + * Datatype for all datasets: H5T_STD_I32BE + * + * Case b--setting for multi write/read to ndsets + * Datatype for all datasets: H5T_STD_I64BE + * + * Case c--setting for multi write/read to ndsets + * Datatype for all datasets: H5T_STD_I16BE + */ +static void +test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) +{ + size_t ndsets; + int i, j; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + hsize_t start[1], stride[1], count[1], block[1]; + + hid_t file_sids[MULTI_NUM_DSETS]; + hid_t mem_sids[MULTI_NUM_DSETS]; + hid_t mem_tids[MULTI_NUM_DSETS]; + + char dset_names[MULTI_NUM_DSETS][DSET_NAME_LEN]; + hid_t dset_dids[MULTI_NUM_DSETS]; + + size_t buf_size, ss; + uint8_t *total_wbuf = NULL; + uint8_t *total_wbuf_bak = NULL; + uint8_t *total_rbuf = NULL; + uint8_t *total_lwbuf = NULL; + uint8_t *total_lwbuf_bak = NULL; + uint8_t *total_lrbuf = NULL; + uint8_t *total_swbuf = NULL; + uint8_t *total_swbuf_bak = NULL; + uint8_t *total_srbuf = NULL; + + uint8_t *wbufi[MULTI_NUM_DSETS]; + uint8_t *rbufi[MULTI_NUM_DSETS]; + uint8_t *lwbufi[MULTI_NUM_DSETS]; + uint8_t *lrbufi[MULTI_NUM_DSETS]; + uint8_t *swbufi[MULTI_NUM_DSETS]; + uint8_t *srbufi[MULTI_NUM_DSETS]; + + const void *wbufs[MULTI_NUM_DSETS]; + void *rbufs[MULTI_NUM_DSETS]; + + curr_nerrors = nerrors; + + ndsets = MAX(MULTI_MIN_DSETS, MULTI_NUM_DSETS); + + dims[0] = DSET_SELECT_DIM; + + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + P_TEST_ERROR; + + if (chunked) { + cdims[0] = DSET_SELECT_CHUNK_DIM; + if (H5Pset_chunk(dcpl, 1, cdims) < 0) + P_TEST_ERROR; + } + + /* Create dataset transfer property list */ + if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + P_TEST_ERROR; + + /* Set selection I/O mode, type of I/O and type of collective I/O */ + set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + + /* Set up file space ids, mem space ids, and dataset ids */ + for (i = 0; i < (int)ndsets; i++) { + if ((file_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) + P_TEST_ERROR; + + /* Generate dataset name */ + HDsnprintf(dset_names[i], sizeof(dset_names[i]), "multi_size_dset%d_%s_%s", i, + chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf"); + + /* Create ith dataset */ + if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], H5T_STD_I32BE, file_sids[i], H5P_DEFAULT, dcpl, + H5P_DEFAULT)) < 0) + P_TEST_ERROR; + } + + /* Each process takes x number of elements */ + block[0] = dims[0] / (hsize_t)mpi_size; + stride[0] = block[0]; + count[0] = 1; + start[0] = (hsize_t)mpi_rank * block[0]; + + for (i = 0; i < (int)ndsets; i++) { + if ((mem_sids[i] = H5Screate_simple(1, block, NULL)) < 0) + P_TEST_ERROR; + + if (H5Sselect_hyperslab(file_sids[i], H5S_SELECT_SET, start, stride, count, block) < 0) + P_TEST_ERROR; + } + + /* Case a */ + + ss = H5Tget_size(H5T_STD_I32BE); + buf_size = ndsets * ss * DSET_SELECT_DIM; + + /* Allocate buffers for all datasets */ + if (NULL == (total_wbuf = (uint8_t *)HDmalloc(buf_size))) + P_TEST_ERROR; + if (NULL == (total_wbuf_bak = (uint8_t *)HDmalloc(buf_size))) + P_TEST_ERROR; + if (NULL == (total_rbuf = (uint8_t *)HDmalloc(buf_size))) + P_TEST_ERROR; + + /* Initialize buffer indices */ + for (i = 0; i < (int)ndsets; i++) { + wbufi[i] = total_wbuf + (i * (int)ss * DSET_SELECT_DIM); + rbufi[i] = total_rbuf + (i * (int)ss * DSET_SELECT_DIM); + + wbufs[i] = wbufi[i]; + rbufs[i] = rbufi[i]; + } + + /* Initialize the buffer data: big endian */ + for (i = 0; i < (int)ndsets; i++) + for (j = 0; j < (int)block[0]; j++) { + wbufi[i][j * (int)ss + 0] = 0x1; + wbufi[i][j * (int)ss + 1] = 0x2; + wbufi[i][j * (int)ss + 2] = 0x3; + wbufi[i][j * (int)ss + 3] = (uint8_t)(0x4 + j + (int)start[0]); + } + + /* Datatype setting for multi write/read */ + for (i = 0; i < (int)ndsets; i++) + mem_tids[i] = H5T_STD_I32BE; + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(total_wbuf_bak, total_wbuf, buf_size); + + /* Write data to the dataset */ + if (H5Dwrite_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, wbufs) < 0) + P_TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(total_wbuf, total_wbuf_bak, buf_size); + + check_io_mode(dxpl, chunked); + + /* Read data from the dataset */ + if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) + P_TEST_ERROR; + + /* Verify */ + for (i = 0; i < (int)ndsets; i++) + /* Only compare when it's at least the size of H5T_STD_I32BE */ + if (H5Tget_size(H5Dget_type(dset_dids[i])) >= ss) { + for (j = 0; j < (int)block[0]; j++) + if (rbufi[i][(int)ss * j + 0] != wbufi[i][(int)ss * j + 0] || + rbufi[i][(int)ss * j + 1] != wbufi[i][(int)ss * j + 1] || + rbufi[i][(int)ss * j + 2] != wbufi[i][(int)ss * j + 2] || + rbufi[i][(int)ss * j + 3] != wbufi[i][(int)ss * j + 3]) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" For dset %d at index %d\n", i, j + (int)start[0]); + P_TEST_ERROR; + } + } + + /* Case b */ + + ss = H5Tget_size(H5T_STD_I64BE); + buf_size = ndsets * (ss * DSET_SELECT_DIM); + + /* Allocate buffers for all datasets */ + if (NULL == (total_lwbuf = (uint8_t *)HDmalloc(buf_size))) + P_TEST_ERROR; + if (NULL == (total_lwbuf_bak = (uint8_t *)HDmalloc(buf_size))) + P_TEST_ERROR; + if (NULL == (total_lrbuf = (uint8_t *)HDmalloc(buf_size))) + P_TEST_ERROR; + + /* Initialize buffer indices */ + for (i = 0; i < (int)ndsets; i++) { + lwbufi[i] = total_lwbuf + (i * (int)ss * DSET_SELECT_DIM); + lrbufi[i] = total_lrbuf + (i * (int)ss * DSET_SELECT_DIM); + + wbufs[i] = lwbufi[i]; + rbufs[i] = lrbufi[i]; + } + + /* Initialize the buffer data: big endian */ + for (i = 0; i < (int)ndsets; i++) + for (j = 0; j < (int)block[0]; j++) { + lwbufi[i][j * (int)ss + 0] = 0x1; + lwbufi[i][j * (int)ss + 1] = 0x2; + lwbufi[i][j * (int)ss + 2] = 0x3; + lwbufi[i][j * (int)ss + 3] = 0x4; + lwbufi[i][j * (int)ss + 4] = 0x5; + lwbufi[i][j * (int)ss + 5] = 0x6; + lwbufi[i][j * (int)ss + 6] = 0x7; + lwbufi[i][j * (int)ss + 7] = (uint8_t)(0x8 + j + (int)start[0]); + } + + /* Datatype setting for multi write/read */ + for (i = 0; i < (int)ndsets; i++) + mem_tids[i] = H5T_STD_I64BE; + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(total_lwbuf_bak, total_lwbuf, buf_size); + + /* Write data to the dataset */ + if (H5Dwrite_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, wbufs) < 0) + P_TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(total_lwbuf, total_lwbuf_bak, buf_size); + + /* Read data from the dataset */ + if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) + P_TEST_ERROR; + + /* Verify */ + for (i = 0; i < (int)ndsets; i++) + /* Only compare when it's the size of H5T_STD_I64BE */ + if (H5Tget_size(H5Dget_type(dset_dids[i])) >= ss) { + for (j = 0; j < (int)block[0]; j++) + if (lrbufi[i][(int)ss * j + 0] != lwbufi[i][(int)ss * j + 0] || + lrbufi[i][(int)ss * j + 1] != lwbufi[i][(int)ss * j + 1] || + lrbufi[i][(int)ss * j + 2] != lwbufi[i][(int)ss * j + 2] || + lrbufi[i][(int)ss * j + 3] != lwbufi[i][(int)ss * j + 3] || + lrbufi[i][(int)ss * j + 4] != lwbufi[i][(int)ss * j + 4] || + lrbufi[i][(int)ss * j + 5] != lwbufi[i][(int)ss * j + 5] || + lrbufi[i][(int)ss * j + 6] != lwbufi[i][(int)ss * j + 6] || + lrbufi[i][(int)ss * j + 7] != lwbufi[i][(int)ss * j + 7]) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" For dset %d at index %d\n", i, j + (int)start[0]); + P_TEST_ERROR; + } + } + + /* Case c */ + + ss = H5Tget_size(H5T_STD_I16BE); + buf_size = ndsets * (ss * DSET_SELECT_DIM); + + /* Allocate buffers for all datasets */ + if (NULL == (total_swbuf = (uint8_t *)HDmalloc(buf_size))) + P_TEST_ERROR; + if (NULL == (total_swbuf_bak = (uint8_t *)HDmalloc(buf_size))) + P_TEST_ERROR; + if (NULL == (total_srbuf = (uint8_t *)HDmalloc(buf_size))) + P_TEST_ERROR; + + /* Initialize buffer indices */ + for (i = 0; i < (int)ndsets; i++) { + swbufi[i] = total_swbuf + (i * (int)ss * DSET_SELECT_DIM); + srbufi[i] = total_srbuf + (i * (int)ss * DSET_SELECT_DIM); + + wbufs[i] = swbufi[i]; + rbufs[i] = srbufi[i]; + } + + /* Initialize the buffer data: big endian */ + for (i = 0; i < (int)ndsets; i++) + for (j = 0; j < (int)block[0]; j++) { + swbufi[i][j * (int)ss + 0] = 0x1; + swbufi[i][j * (int)ss + 1] = (uint8_t)(0x2 + j + (int)start[0]); + } + + /* Datatype setting for multi write/read */ + for (i = 0; i < (int)ndsets; i++) + mem_tids[i] = H5T_STD_I16BE; + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(total_swbuf_bak, total_swbuf, buf_size); + + /* Write data to the dataset */ + if (H5Dwrite_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, wbufs) < 0) + P_TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(total_swbuf, total_swbuf_bak, buf_size); + + /* Read data from the dataset */ + if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) + P_TEST_ERROR; + + /* Verify */ + for (i = 0; i < (int)ndsets; i++) + /* Can compare for all cases */ + for (j = 0; j < (int)block[0]; j++) + if (srbufi[i][(int)ss * j + 0] != swbufi[i][(int)ss * j + 0] || + srbufi[i][(int)ss * j + 1] != swbufi[i][(int)ss * j + 1]) { + H5_FAILED(); + HDprintf(" Read different values than written.\n"); + HDprintf(" For dset %d at index %d\n", i, j + (int)start[0]); + P_TEST_ERROR; + } + + if (H5Pclose(dcpl) < 0) + P_TEST_ERROR; + if (H5Pclose(dxpl) < 0) + P_TEST_ERROR; + + for (i = 0; i < (int)ndsets; i++) { + if (H5Sclose(file_sids[i]) < 0) + P_TEST_ERROR; + if (H5Sclose(mem_sids[i]) < 0) + P_TEST_ERROR; + if (H5Dclose(dset_dids[i]) < 0) + P_TEST_ERROR; + } + + HDfree(total_wbuf); + HDfree(total_wbuf_bak); + HDfree(total_rbuf); + HDfree(total_lwbuf); + HDfree(total_lwbuf_bak); + HDfree(total_lrbuf); + HDfree(total_swbuf); + HDfree(total_swbuf_bak); + HDfree(total_srbuf); + + CHECK_PASSED(); + + return; + +} /* test_multi_dsets_size_change_no_bkg() */ + +/* + * Test 4 for multi-dataset: + * Datasets with type conversions+some processes have null/empty selections + * + * Create dset0: H5T_NATIVE_INT + * Create other datasets: randomized H5T_NATIVE_LLONG or H5T_NATIVE_SHORT + * Type conversions + some processes have null/empty selections in datasets + * + * Case (a): dset0 + * process 0: hyperslab; other processes: select none + * Case (b): randomized dset + * process 0: get 0 row; other processes: hyperslab + * Case (c): randomized dset + * process 0: select none; other processes: select all + * + * Memory datatype for multi write to all datasets: H5T_NATIVE_INT + * --this will not trigger type conversion for case (a) but + * type conversion for cases (b) & (c) + * Memory datatype for multi read to all datasets: H5T_NATIVE_LONG + * --this will trigger type conversion for (a), (b) & (c) + */ +static void +test_multi_dsets_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +{ + size_t ndsets; + int i, j; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hid_t ntrans_dxpl = H5I_INVALID_HID; + + hsize_t dims[1]; + hsize_t cdims[1]; + hsize_t start[1], stride[1], count[1], block[1]; + + hid_t file_sids[MULTI_NUM_DSETS]; + hid_t mem_sids[MULTI_NUM_DSETS]; + hid_t mem_tids[MULTI_NUM_DSETS]; + + char dset_names[MULTI_NUM_DSETS][DSET_NAME_LEN]; + hid_t dset_dids[MULTI_NUM_DSETS]; + + size_t buf_size; + int *total_wbuf = NULL; + int *total_wbuf_bak = NULL; + int *total_trans_wbuf = NULL; + long *total_lrbuf = NULL; + + int *wbufi[MULTI_NUM_DSETS]; + int *trans_wbufi[MULTI_NUM_DSETS]; + long *l_rbufi[MULTI_NUM_DSETS]; + + const void *wbufs[MULTI_NUM_DSETS]; + void *rbufs[MULTI_NUM_DSETS]; + + int save_block0; + int mm, ll; + + const char *expr = "2*x"; + + curr_nerrors = nerrors; + + ndsets = MAX(MULTI_MIN_DSETS, MULTI_NUM_DSETS); + + /* Create 1d data space */ + dims[0] = DSET_SELECT_DIM; + + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + P_TEST_ERROR; + + if (chunked) { + cdims[0] = DSET_SELECT_CHUNK_DIM; + if (H5Pset_chunk(dcpl, 1, cdims) < 0) + P_TEST_ERROR; + } + + /* Create dataset transfer property list */ + if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + P_TEST_ERROR; + + /* Set selection I/O mode, type of I/O and type of collective I/O */ + set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + + if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) + P_TEST_ERROR; + + /* Set data transform */ + if (dtrans) + if (H5Pset_data_transform(dxpl, expr) < 0) + P_TEST_ERROR; + + /* Set up file space ids and dataset ids */ + for (i = 0; i < (int)ndsets; i++) { + if ((file_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) + P_TEST_ERROR; + + /* Generate dataset name */ + HDsnprintf(dset_names[i], sizeof(dset_names[i]), "multi_sel_dset%d_%s_%s_%s", i, + chunked ? "chunked" : "contig", dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf"); + + if (i == 0) { + if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], H5T_NATIVE_INT, file_sids[i], H5P_DEFAULT, + dcpl, H5P_DEFAULT)) < 0) + P_TEST_ERROR; + } + else { + if ((dset_dids[i] = + H5Dcreate2(fid, dset_names[i], ((HDrandom() % 2) ? H5T_NATIVE_LLONG : H5T_NATIVE_SHORT), + file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + P_TEST_ERROR; + } + } + + buf_size = ndsets * DSET_SELECT_DIM * sizeof(int); + + /* Allocate buffers for all datasets */ + if (NULL == (total_wbuf = (int *)HDmalloc(buf_size))) + P_TEST_ERROR; + if (NULL == (total_wbuf_bak = (int *)HDmalloc(buf_size))) + P_TEST_ERROR; + if (NULL == (total_trans_wbuf = (int *)HDmalloc(buf_size))) + P_TEST_ERROR; + if (NULL == (total_lrbuf = (long *)HDmalloc(ndsets * DSET_SELECT_DIM * sizeof(long)))) + P_TEST_ERROR; + + /* Initialize buffer indices */ + for (i = 0; i < (int)ndsets; i++) { + wbufi[i] = total_wbuf + (i * DSET_SELECT_DIM); + trans_wbufi[i] = total_trans_wbuf + (i * DSET_SELECT_DIM); + + wbufs[i] = wbufi[i]; + } + + /* + * Case (a): dset0 + * process 0: hyperslab; other processes: select none + */ + + /* Each process takes x number of elements */ + block[0] = dims[0] / (hsize_t)mpi_size; + save_block0 = (int)block[0]; + stride[0] = block[0]; + count[0] = 1; + start[0] = (hsize_t)mpi_rank * block[0]; + + /* Get file dataspace */ + if ((file_sids[0] = H5Dget_space(dset_dids[0])) < 0) + P_TEST_ERROR; + + if (MAINPROCESS) { + if (H5Sselect_hyperslab(file_sids[0], H5S_SELECT_SET, start, stride, count, block) < 0) + P_TEST_ERROR; + } + else { + if (H5Sselect_none(file_sids[0]) < 0) + P_TEST_ERROR; + } + + /* Create memory dataspace */ + if ((mem_sids[0] = H5Screate_simple(1, block, NULL)) < 0) + P_TEST_ERROR; + if (mpi_rank) { + if (H5Sselect_none(mem_sids[0]) < 0) + P_TEST_ERROR; + } + + /* Initialize data for wbufi[0] */ + for (j = 0; j < (int)block[0]; j++) { + wbufi[0][j] = j + (int)start[0]; + trans_wbufi[0][j] = 2 * wbufi[0][j]; + } + + /* + * Case (b): choose a dataset -- dset + * process 0: get 0 row; other processes: hyperslab + */ + + mm = HDrandom() % (int)ndsets; + if (mm == 0) + mm++; + + block[0] = mpi_rank ? (dims[0] / (hsize_t)mpi_size) : 0; + stride[0] = mpi_rank ? block[0] : 1; + count[0] = 1; + start[0] = mpi_rank ? ((hsize_t)mpi_rank * block[0]) : 0; + + /* Get file dataspace */ + if ((file_sids[mm] = H5Dget_space(dset_dids[mm])) < 0) + P_TEST_ERROR; + + if (H5Sselect_hyperslab(file_sids[mm], H5S_SELECT_SET, start, stride, count, block) < 0) + P_TEST_ERROR; + + /* Create a memory dataspace */ + if ((mem_sids[mm] = H5Screate_simple(1, block, NULL)) < 0) + P_TEST_ERROR; + + /* need to make memory space to match for process 0 */ + if (MAINPROCESS) { + if (H5Sselect_hyperslab(mem_sids[mm], H5S_SELECT_SET, start, stride, count, block) < 0) + P_TEST_ERROR; + } + else { + if (H5Sselect_all(mem_sids[mm]) < 0) + P_TEST_ERROR; + } + + /* Initialize data for wbufi[1] */ + for (j = 0; j < (int)block[0]; j++) { + wbufi[mm][j] = j + (int)start[0]; + trans_wbufi[mm][j] = 2 * wbufi[mm][j]; + } + + /* + * Case (c): choose a dataset -- dset + * process 0: select none; other processes: select all + */ + + ll = mm + 1; + if ((ll % (int)ndsets) == 0) + ll = 1; + + /* Get file dataspace */ + if ((file_sids[ll] = H5Dget_space(dset_dids[ll])) < 0) + P_TEST_ERROR; + if (MAINPROCESS) { + if (H5Sselect_none(file_sids[ll]) < 0) + P_TEST_ERROR; + } + else { + if (H5Sselect_all(file_sids[ll]) < 0) + P_TEST_ERROR; + } + + /* Create a memory dataspace */ + if ((mem_sids[ll] = H5Screate_simple(1, dims, NULL)) < 0) + P_TEST_ERROR; + if (MAINPROCESS) { + if (H5Sselect_none(mem_sids[ll]) < 0) + P_TEST_ERROR; + } + else if (H5Sselect_all(mem_sids[ll]) < 0) + P_TEST_ERROR; + + /* Initialize data for wbufi[ll] */ + for (j = 0; j < (int)dims[0]; j++) { + wbufi[ll][j] = (int)j + DSET_SELECT_DIM; + trans_wbufi[ll][j] = 2 * wbufi[ll][j]; + } + + /* Set up remaining dsets */ + for (i = 0; i < (int)ndsets; i++) { + if (i == 0 || i == mm || i == ll) + continue; + /* Get file dataspace */ + if ((file_sids[i] = H5Dget_space(dset_dids[i])) < 0) + P_TEST_ERROR; + if (H5Sselect_none(file_sids[i]) < 0) + P_TEST_ERROR; + + if ((mem_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) + P_TEST_ERROR; + if (H5Sselect_none(mem_sids[i]) < 0) + P_TEST_ERROR; + } + + /* Set up mem_tids[] for multi write */ + for (i = 0; i < (int)ndsets; i++) + mem_tids[i] = H5T_NATIVE_INT; + + /* Copy wbuf if the library will be modifying it */ + if (mwbuf) + HDmemcpy(total_wbuf_bak, total_wbuf, buf_size); + + /* Write data to the dataset with/without data transform */ + if (H5Dwrite_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, wbufs) < 0) + P_TEST_ERROR; + + /* Restore wbuf from backup if the library modified it */ + if (mwbuf) + HDmemcpy(total_wbuf, total_wbuf_bak, buf_size); + + check_io_mode(dxpl, chunked); + + /* Initialize buffer indices */ + for (i = 0; i < (int)ndsets; i++) { + l_rbufi[i] = total_lrbuf + (i * DSET_SELECT_DIM); + rbufs[i] = l_rbufi[i]; + } + + /* Set up mem_tids[] for multi read */ + for (i = 0; i < (int)ndsets; i++) + mem_tids[i] = H5T_NATIVE_LONG; + + if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, ntrans_dxpl, rbufs) < 0) + P_TEST_ERROR; + + if (MAINPROCESS) { + /* Case a: verify dset0 */ + for (j = 0; j < save_block0; j++) + if (l_rbufi[0][j] != (dtrans ? (long)trans_wbufi[0][j] : (long)wbufi[0][j])) { + nerrors++; + HDprintf(" Verify dset0 at index %d: %ld, %ld\n", j + (int)start[0], + dtrans ? (long)trans_wbufi[0][j] : (long)wbufi[0][j], l_rbufi[0][j]); + break; + } + } + + if (mpi_rank) { + /* Case b: verify dset */ + for (j = 0; j < (int)block[0]; j++) + if (l_rbufi[mm][j] != (long)(dtrans ? trans_wbufi[mm][j] : wbufi[mm][j])) { + nerrors++; + HDprintf(" Verify dset %d at index %d: %ld, %ld\n", mm, j + (int)start[0], + (long)(dtrans ? trans_wbufi[mm][j] : wbufi[mm][j]), l_rbufi[mm][j]); + break; + } + + /* Case c: verify dset */ + for (j = 0; j < (int)dims[0]; j++) + if (l_rbufi[ll][j] != (long)(dtrans ? trans_wbufi[ll][j] : wbufi[ll][j])) { + nerrors++; + HDprintf(" Verify dset %d at index %d: %ld, %ld\n", ll, j, + (long)(dtrans ? trans_wbufi[ll][j] : wbufi[ll][j]), l_rbufi[ll][j]); + break; + } + } + + if (H5Pclose(dcpl) < 0) + P_TEST_ERROR; + if (H5Pclose(dxpl) < 0) + P_TEST_ERROR; + if (H5Pclose(ntrans_dxpl) < 0) + P_TEST_ERROR; + + for (i = 0; i < (int)ndsets; i++) { + if (H5Sclose(file_sids[i]) < 0) + P_TEST_ERROR; + if (H5Sclose(mem_sids[i]) < 0) + P_TEST_ERROR; + if (H5Dclose(dset_dids[i]) < 0) + P_TEST_ERROR; + } + + HDfree(total_wbuf); + HDfree(total_wbuf_bak); + HDfree(total_trans_wbuf); + HDfree(total_lrbuf); + + CHECK_PASSED(); + + return; + +} /* test_multi_dsets_conv_sel_empty() */ + +/* + * Test 5 for multi-dataset: + * + * Repeat the following test for niter times to ensure the + * random combinations of all dataset types are hit. + * + * Create randomized contiguous or chunked datasets with: + * --DSET_WITH_NO_CONV: + * --with no type conversion + * --dataset with H5T_NATIVE_INT + * --DSET_WITH_CONV_AND_NO_BKG: + * --type conversion without background buffer + * --dataset with H5T_NATIVE_LONG + * --DSET_WITH_CONV_AND_BKG: + * --type conversion with background buffer + * --dataset with compound type s1_t + * + * Do H5Dwrite_multi() and H5Dread_multi() for the above randomized + * datasets with the settings below: + * Setting A: + * --DSET_WITH_NO_CONV: + * --write: mem_tids[] = H5T_NATIVE_INT + * --read: r_mem_tids[] = H5T_NATIVE_INT + * --DSET_WITH_CONV_AND_NO_BKG: + * --write: mem_tids[] = H5T_NATIVE_ULONG + * --read: r_mem_tids[] = H5T_NATIVE_LONG + * --DSET_WITH_CONV_AND_BKG: + * --write: mem_tids[] = s1_tid; + * --read: r_mem_tids[i] = s3_tid; + * + * Setting B: + * --DSET_WITH_NO_CONV: + * --write: mem_tids[] = H5T_NATIVE_INT + * --read: r_mem_tids[] = H5T_NATIVE_INT + * --DSET_WITH_CONV_AND_NO_BKG: + * --write: mem_tids[] = H5T_NATIVE_LONG; + * --read: r_mem_tids[] = H5T_NATIVE_SHORT; + * --DSET_WITH_CONV_AND_BKG: + * --write: mem_tids[] = s4_tid; + * --read: r_mem_tids[i] = s1_tid; + * + * Verify the result read as below: + * Setting A: + * --DSET_WITH_NO_CONV: + * --verify data read in rbufi1[i][j] is same as wbufi1[i][j] + * --DSET_WITH_CONV_AND_NO_BKG: + * --verify data read in l_rbufi2[i][j] is all LONG_MAX + * --DSET_WITH_CONV_AND_BKG: + * --verify all fields read in s3_rbufi3[i][j] is the + * reverse of s1_wbufi3[i][j] + * Setting B: + * --DSET_WITH_NO_CONV: + * --verify data read in rbufi1[i][j] is same as wbufi1[i][j] + * --DSET_WITH_CONV_AND_NO_BKG: + * --verify data read in s_rbufi2[i][j] is all SHRT_MAX + * --DSET_WITH_CONV_AND_BKG: + * --verify fields read in s1_rbufi3[i][j] is as follows: + * --fields 'a' and 'c' are as s1_wbufi3[i][j].a and s1_wbufi3[i][j].c + * --fields 'b' and 'd' are (DSET_SELECT_DIM + j + start[0]) + */ +static void +test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) +{ + size_t ndsets; + int i, j, mm; + int s, n; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + + hsize_t dims[1]; + hsize_t cdims[1]; + hsize_t start[1], stride[1], count[1], block[1]; + + hid_t file_sids[MULTI_NUM_DSETS]; + hid_t mem_sids[MULTI_NUM_DSETS]; + hid_t mem_tids[MULTI_NUM_DSETS]; + hid_t r_mem_tids[MULTI_NUM_DSETS]; + + multi_dset_type_t dset_types[MULTI_NUM_DSETS]; + + hid_t s1_tid = H5I_INVALID_HID; + hid_t s3_tid = H5I_INVALID_HID; + hid_t s4_tid = H5I_INVALID_HID; + + char dset_names[MULTI_NUM_DSETS][DSET_NAME_LEN]; + hid_t dset_dids[MULTI_NUM_DSETS]; + + size_t buf_size; + + int *total_wbuf1 = NULL; + int *total_wbuf1_bak = NULL; + int *total_rbuf1 = NULL; + + int *wbufi1[MULTI_NUM_DSETS]; + int *rbufi1[MULTI_NUM_DSETS]; + + unsigned long *ul_total_wbuf2 = NULL; + unsigned long *ul_total_wbuf2_bak = NULL; + long *l_total_rbuf2 = NULL; + unsigned long *ul_wbufi2[MULTI_NUM_DSETS]; + long *l_rbufi2[MULTI_NUM_DSETS]; + + long *l_total_wbuf2 = NULL; + long *l_total_wbuf2_bak = NULL; + short *s_total_rbuf2 = NULL; + long *l_wbufi2[MULTI_NUM_DSETS]; + short *s_rbufi2[MULTI_NUM_DSETS]; + + s1_t *s1_total_wbuf3 = NULL; + s1_t *s1_total_wbuf3_bak = NULL; + s3_t *s3_total_rbuf3 = NULL; + s1_t *s1_wbufi3[MULTI_NUM_DSETS]; + s3_t *s3_rbufi3[MULTI_NUM_DSETS]; + + s4_t *s4_total_wbuf3 = NULL; + s4_t *s4_total_wbuf3_bak = NULL; + s1_t *s1_total_rbuf3 = NULL; + s4_t *s4_wbufi3[MULTI_NUM_DSETS]; + s1_t *s1_rbufi3[MULTI_NUM_DSETS]; + + const void *wbufs[MULTI_NUM_DSETS]; + void *rbufs[MULTI_NUM_DSETS]; + + /* for n niter to ensure that all randomized dset_types with multi_dset_type_t will be covered */ + for (n = 0; n < niter; n++) { + + /* Set up the number of datasets for testing */ + ndsets = MAX(MULTI_MIN_DSETS, MULTI_NUM_DSETS); + + /* Create dataset transfer property list */ + if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + P_TEST_ERROR; + + /* Set selection I/O mode, type of I/O and type of collective I/O */ + set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + + /* Set dataset layout: contiguous or chunked */ + dims[0] = DSET_SELECT_DIM; + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + P_TEST_ERROR; + + if (chunked) { + cdims[0] = DSET_SELECT_CHUNK_DIM; + if (H5Pset_chunk(dcpl, 1, cdims) < 0) + P_TEST_ERROR; + } + + /* Each process takes x number of elements */ + block[0] = dims[0] / (hsize_t)mpi_size; + stride[0] = block[0]; + count[0] = 1; + start[0] = (hsize_t)mpi_rank * block[0]; + + /* Create compound data type: s1_t */ + if ((s1_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) + P_TEST_ERROR; + + if (H5Tinsert(s1_tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT) < 0 || + H5Tinsert(s1_tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT) < 0 || + H5Tinsert(s1_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0 || + H5Tinsert(s1_tid, "d", HOFFSET(s1_t, d), H5T_NATIVE_INT) < 0) + P_TEST_ERROR; + + /* Create compound data type: s3_t */ + if ((s3_tid = H5Tcreate(H5T_COMPOUND, sizeof(s3_t))) < 0) + P_TEST_ERROR; + + if (H5Tinsert(s3_tid, "a", HOFFSET(s3_t, a), H5T_NATIVE_INT) < 0 || + H5Tinsert(s3_tid, "b", HOFFSET(s3_t, b), H5T_NATIVE_INT) < 0 || + H5Tinsert(s3_tid, "c", HOFFSET(s3_t, c), H5T_NATIVE_INT) < 0 || + H5Tinsert(s3_tid, "d", HOFFSET(s3_t, d), H5T_NATIVE_INT) < 0) + P_TEST_ERROR; + + /* Create compound data type: s4_t */ + if ((s4_tid = H5Tcreate(H5T_COMPOUND, sizeof(s4_t))) < 0) + P_TEST_ERROR; + + if (H5Tinsert(s4_tid, "b", HOFFSET(s4_t, b), H5T_NATIVE_UINT) < 0 || + H5Tinsert(s4_tid, "d", HOFFSET(s4_t, d), H5T_NATIVE_UINT) < 0) + P_TEST_ERROR; + + /* Create dataset for i ndsets */ + for (i = 0; i < (int)ndsets; i++) { + + /* File space ids */ + if ((file_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) + P_TEST_ERROR; + + /* Memory space ids */ + if ((mem_sids[i] = H5Screate_simple(1, block, NULL)) < 0) + P_TEST_ERROR; + + mm = HDrandom() % (int)ndsets; + if (mm == 0) { + dset_types[i] = DSET_WITH_NO_CONV; + HDsnprintf(dset_names[i], sizeof(dset_names[i]), "multi_all_nconv_dset%d_%s_%s", i, + chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf"); + if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], H5T_NATIVE_INT, file_sids[i], H5P_DEFAULT, + dcpl, H5P_DEFAULT)) < 0) + P_TEST_ERROR; + } + else if (mm == 1) { + dset_types[i] = DSET_WITH_CONV_AND_NO_BKG; + HDsnprintf(dset_names[i], sizeof(dset_names[i]), "multi_all_conv_nbkg_dset%d_%s_%s", i, + chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf"); + if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], H5T_NATIVE_LONG, file_sids[i], H5P_DEFAULT, + dcpl, H5P_DEFAULT)) < 0) + P_TEST_ERROR; + } + else { + dset_types[i] = DSET_WITH_CONV_AND_BKG; + HDsnprintf(dset_names[i], sizeof(dset_names[i]), "multi_all_conv_bkg_dset%d_%s_%s", i, + chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf"); + if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], s1_tid, file_sids[i], H5P_DEFAULT, dcpl, + H5P_DEFAULT)) < 0) + P_TEST_ERROR; + } + + if (H5Sselect_hyperslab(file_sids[i], H5S_SELECT_SET, start, stride, count, block) < 0) + P_TEST_ERROR; + + } /* end for i ndsets */ + + /* Allocate buffers for all datasets */ + + /* DSET_WITH_NO_CONV */ + buf_size = ndsets * DSET_SELECT_DIM * sizeof(int); + if (NULL == (total_wbuf1 = (int *)HDmalloc(buf_size))) + P_TEST_ERROR; + if (mwbuf && NULL == (total_wbuf1_bak = (int *)HDmalloc(buf_size))) + P_TEST_ERROR; + if (NULL == (total_rbuf1 = (int *)HDmalloc(buf_size))) + P_TEST_ERROR; + + /* DSET_WITH_CONV_AND_NO_BKG */ + buf_size = ndsets * DSET_SELECT_DIM * sizeof(unsigned long); + if (NULL == (ul_total_wbuf2 = (unsigned long *)HDmalloc(buf_size))) + P_TEST_ERROR; + if (mwbuf && NULL == (ul_total_wbuf2_bak = (unsigned long *)HDmalloc(buf_size))) + P_TEST_ERROR; + buf_size = ndsets * DSET_SELECT_DIM * sizeof(long); + if (NULL == (l_total_rbuf2 = (long *)HDmalloc(buf_size))) + P_TEST_ERROR; + + buf_size = ndsets * DSET_SELECT_DIM * sizeof(long); + if (NULL == (l_total_wbuf2 = (long *)HDmalloc(buf_size))) + P_TEST_ERROR; + if (mwbuf && NULL == (l_total_wbuf2_bak = (long *)HDmalloc(buf_size))) + P_TEST_ERROR; + buf_size = ndsets * DSET_SELECT_DIM * sizeof(short); + if (NULL == (s_total_rbuf2 = (short *)HDmalloc(buf_size))) + P_TEST_ERROR; + + /* DSET_WITH_CONV_AND_BKG */ + buf_size = ndsets * DSET_SELECT_DIM * sizeof(s1_t); + if (NULL == (s1_total_wbuf3 = (s1_t *)HDmalloc(buf_size))) + P_TEST_ERROR; + if (mwbuf && NULL == (s1_total_wbuf3_bak = (s1_t *)HDmalloc(buf_size))) + P_TEST_ERROR; + buf_size = ndsets * DSET_SELECT_DIM * sizeof(s3_t); + if (NULL == (s3_total_rbuf3 = (s3_t *)HDmalloc(buf_size))) + P_TEST_ERROR; + + buf_size = ndsets * DSET_SELECT_DIM * sizeof(s4_t); + if (NULL == (s4_total_wbuf3 = (s4_t *)HDmalloc(buf_size))) + P_TEST_ERROR; + if (mwbuf && NULL == (s4_total_wbuf3_bak = (s4_t *)HDmalloc(buf_size))) + P_TEST_ERROR; + buf_size = ndsets * DSET_SELECT_DIM * sizeof(s1_t); + if (NULL == (s1_total_rbuf3 = (s1_t *)HDmalloc(buf_size))) + P_TEST_ERROR; + + /* Test with s settings for ndsets */ + for (s = SETTING_A; s <= SETTING_B; s++) { + + /* for i ndsets */ + for (i = 0; i < (int)ndsets; i++) { + + switch (dset_types[i]) { + + case DSET_WITH_NO_CONV: + /* Initialize buffer indices */ + wbufi1[i] = total_wbuf1 + (i * DSET_SELECT_DIM); + rbufi1[i] = total_rbuf1 + (i * DSET_SELECT_DIM); + + wbufs[i] = wbufi1[i]; + rbufs[i] = rbufi1[i]; + + /* Initialize the buffer data */ + for (j = 0; j < (int)block[0]; j++) + wbufi1[i][j] = j + (int)start[0]; + + /* Same for all cases */ + mem_tids[i] = H5T_NATIVE_INT; + r_mem_tids[i] = H5T_NATIVE_INT; + + break; + + case DSET_WITH_CONV_AND_NO_BKG: + if (s == SETTING_A) { + /* Initialize buffer indices */ + ul_wbufi2[i] = ul_total_wbuf2 + (i * DSET_SELECT_DIM); + l_rbufi2[i] = l_total_rbuf2 + (i * DSET_SELECT_DIM); + + wbufs[i] = ul_wbufi2[i]; + rbufs[i] = l_rbufi2[i]; + + for (j = 0; j < (int)block[0]; j++) + ul_wbufi2[i][j] = ULONG_MAX - (unsigned long)(j + (int)start[0]); + + mem_tids[i] = H5T_NATIVE_ULONG; + r_mem_tids[i] = H5T_NATIVE_LONG; + } + else if (s == SETTING_B) { + /* Initialize buffer indices */ + l_wbufi2[i] = l_total_wbuf2 + (i * DSET_SELECT_DIM); + s_rbufi2[i] = s_total_rbuf2 + (i * DSET_SELECT_DIM); + + wbufs[i] = l_wbufi2[i]; + rbufs[i] = s_rbufi2[i]; + + /* Initialize the buffer data */ + for (j = 0; j < (int)block[0]; j++) + l_wbufi2[i][j] = LONG_MAX - (long)(j + (int)start[0]); + + mem_tids[i] = H5T_NATIVE_LONG; + r_mem_tids[i] = H5T_NATIVE_SHORT; + } + + break; + + case DSET_WITH_CONV_AND_BKG: + + if (s == SETTING_A) { + /* Initialize buffer indices */ + s1_wbufi3[i] = s1_total_wbuf3 + (i * DSET_SELECT_DIM); + s3_rbufi3[i] = s3_total_rbuf3 + (i * DSET_SELECT_DIM); + + wbufs[i] = s1_wbufi3[i]; + rbufs[i] = s3_rbufi3[i]; + + /* Initialize buffer data for s1_t */ + for (j = 0; j < (int)block[0]; j++) { + s1_wbufi3[i][j].a = (4 * j + (int)start[0]); + s1_wbufi3[i][j].b = (4 * j + (int)start[0]) + 1; + s1_wbufi3[i][j].c = (4 * j + (int)start[0]) + 2; + s1_wbufi3[i][j].d = (4 * j + (int)start[0]) + 3; + } + mem_tids[i] = s1_tid; + r_mem_tids[i] = s3_tid; + } + else if (s == SETTING_B) { + /* Initialize buffer indices */ + s4_wbufi3[i] = s4_total_wbuf3 + (i * DSET_SELECT_DIM); + s1_rbufi3[i] = s1_total_rbuf3 + (i * DSET_SELECT_DIM); + + wbufs[i] = s4_wbufi3[i]; + rbufs[i] = s1_rbufi3[i]; + + /* Initialize buffer data for s4_t */ + for (j = 0; j < (int)block[0]; j++) { + s4_wbufi3[i][j].b = DSET_SELECT_DIM + (unsigned int)(j + (int)start[0]); + s4_wbufi3[i][j].d = DSET_SELECT_DIM + (unsigned int)(j + (int)start[0]); + } + mem_tids[i] = s4_tid; + r_mem_tids[i] = s1_tid; + } + + break; + + case DSET_NTTYPES: + default: + P_TEST_ERROR; + + } /* end switch dset_types */ + + } /* end for i ndsets */ + + /* Copy wbufs if the library will be modifying them */ + if (mwbuf) { + HDmemcpy(total_wbuf1_bak, total_wbuf1, ndsets * DSET_SELECT_DIM * sizeof(int)); + HDmemcpy(ul_total_wbuf2_bak, ul_total_wbuf2, + ndsets * DSET_SELECT_DIM * sizeof(unsigned long)); + HDmemcpy(l_total_wbuf2_bak, l_total_wbuf2, ndsets * DSET_SELECT_DIM * sizeof(long)); + HDmemcpy(s1_total_wbuf3_bak, s1_total_wbuf3, ndsets * DSET_SELECT_DIM * sizeof(s1_t)); + HDmemcpy(s4_total_wbuf3_bak, s4_total_wbuf3, ndsets * DSET_SELECT_DIM * sizeof(s4_t)); + } + + if (H5Dwrite_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, wbufs) < 0) + P_TEST_ERROR; + + /* Restore wbufs from backup if the library modified them */ + if (mwbuf) { + HDmemcpy(total_wbuf1, total_wbuf1_bak, ndsets * DSET_SELECT_DIM * sizeof(int)); + HDmemcpy(ul_total_wbuf2, ul_total_wbuf2_bak, + ndsets * DSET_SELECT_DIM * sizeof(unsigned long)); + HDmemcpy(l_total_wbuf2, l_total_wbuf2_bak, ndsets * DSET_SELECT_DIM * sizeof(long)); + HDmemcpy(s1_total_wbuf3, s1_total_wbuf3_bak, ndsets * DSET_SELECT_DIM * sizeof(s1_t)); + HDmemcpy(s4_total_wbuf3, s4_total_wbuf3_bak, ndsets * DSET_SELECT_DIM * sizeof(s4_t)); + } + + if (H5Dread_multi(ndsets, dset_dids, r_mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) + P_TEST_ERROR; + + check_io_mode(dxpl, chunked); + + /* Verify result read */ + /* for i ndsets */ + for (i = 0; i < (int)ndsets; i++) { + switch (dset_types[i]) { + + case DSET_WITH_NO_CONV: + for (j = 0; j < (int)block[0]; j++) + if (rbufi1[i][j] != wbufi1[i][j]) { + nerrors++; + HDprintf(" Read different values than written.\n"); + HDprintf(" For dset %d at index %d\n", i, j + (int)start[0]); + break; + } + + break; + + case DSET_WITH_CONV_AND_NO_BKG: + if (s == SETTING_A) { + for (j = 0; j < (int)block[0]; j++) + if (l_rbufi2[i][j] != LONG_MAX) { + nerrors++; + HDprintf(" Read different values than written.\n"); + HDprintf(" For dset %d at index %d\n", i, j + (int)start[0]); + break; + } + } + else if (s == SETTING_B) { + for (j = 0; j < (int)block[0]; j++) + if (s_rbufi2[i][j] != SHRT_MAX) { + nerrors++; + HDprintf(" Read different values than written.\n"); + HDprintf(" For dset %d at index %d\n", i, j + (int)start[0]); + break; + } + } + + break; + + case DSET_WITH_CONV_AND_BKG: + if (s == SETTING_A) { + for (j = 0; j < (int)block[0]; j++) + if (s3_rbufi3[i][j].a != s1_wbufi3[i][j].a || + s3_rbufi3[i][j].b != s1_wbufi3[i][j].b || + s3_rbufi3[i][j].c != s1_wbufi3[i][j].c || + s3_rbufi3[i][j].d != s1_wbufi3[i][j].d) { + nerrors++; + HDprintf(" Read different values than written.\n"); + HDprintf(" For dset %d at index %d\n", i, j + (int)start[0]); + break; + } + } + else if (s == SETTING_B) { + for (j = 0; j < (int)block[0]; j++) + if (s1_rbufi3[i][j].a != s1_wbufi3[i][j].a || + s1_rbufi3[i][j].b != (DSET_SELECT_DIM + j + (int)start[0]) || + s1_rbufi3[i][j].c != s1_wbufi3[i][j].c || + s1_rbufi3[i][j].d != (DSET_SELECT_DIM + j + (int)start[0])) { + nerrors++; + HDprintf(" Read different values than written.\n"); + HDprintf(" For dset %d at index %d\n", i, j + (int)start[0]); + break; + } + } + + break; + + case DSET_NTTYPES: + default: + P_TEST_ERROR; + + } /* end switch dset_types */ + + } /* end for i ndsets */ + + } /* end for s settings */ + + /* Closing */ + if (H5Pclose(dcpl) < 0) + P_TEST_ERROR; + if (H5Pclose(dxpl) < 0) + P_TEST_ERROR; + + if (H5Tclose(s1_tid) < 0) + P_TEST_ERROR; + if (H5Tclose(s3_tid) < 0) + P_TEST_ERROR; + if (H5Tclose(s4_tid) < 0) + P_TEST_ERROR; + + for (i = 0; i < (int)ndsets; i++) { + if (H5Sclose(file_sids[i]) < 0) + P_TEST_ERROR; + if (H5Dclose(dset_dids[i]) < 0) + P_TEST_ERROR; + /* Don't delete the last set of datasets */ + if ((n + 1) != niter) + if (H5Ldelete(fid, dset_names[i], H5P_DEFAULT) < 0) + P_TEST_ERROR; + } + + /* Freeing */ + HDfree(total_wbuf1); + total_wbuf1 = NULL; + HDfree(total_wbuf1_bak); + total_wbuf1_bak = NULL; + HDfree(total_rbuf1); + total_rbuf1 = NULL; + + HDfree(ul_total_wbuf2); + ul_total_wbuf2 = NULL; + HDfree(ul_total_wbuf2_bak); + ul_total_wbuf2_bak = NULL; + HDfree(l_total_rbuf2); + l_total_rbuf2 = NULL; + HDfree(l_total_wbuf2); + l_total_wbuf2 = NULL; + HDfree(l_total_wbuf2_bak); + l_total_wbuf2_bak = NULL; + HDfree(s_total_rbuf2); + s_total_rbuf2 = NULL; + + HDfree(s1_total_wbuf3); + s1_total_wbuf3 = NULL; + HDfree(s1_total_wbuf3_bak); + s1_total_wbuf3_bak = NULL; + HDfree(s3_total_rbuf3); + s3_total_rbuf3 = NULL; + HDfree(s4_total_wbuf3); + s4_total_wbuf3 = NULL; + HDfree(s4_total_wbuf3_bak); + s4_total_wbuf3_bak = NULL; + HDfree(s1_total_rbuf3); + s1_total_rbuf3 = NULL; + + } /* end for n niter */ + + CHECK_PASSED(); + + return; + +} /* test_multi_dsets_all() */ + +/* + * Test with various test_mode that no selection I/O is performed + * + * Note: It is the responsibility of the tester to + * understand and feed proper combination of test_mode + * as needed. + */ +static void +test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_mode) +{ + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hid_t fid = H5I_INVALID_HID; + hid_t did = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + hbool_t is_chunked = FALSE; + hid_t tid = H5T_NATIVE_INT; + uint32_t no_selection_io_cause_write = 0; + uint32_t no_selection_io_cause_read = 0; + uint32_t no_selection_io_cause_write_expected = 0; + uint32_t no_selection_io_cause_read_expected = 0; + int wbuf[DSET_SELECT_DIM]; + int rbuf[DSET_SELECT_DIM]; + int i; + + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + P_TEST_ERROR; + if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + P_TEST_ERROR; + + if ((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + P_TEST_ERROR; + + if (test_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET) { + if (H5Pset_layout(dcpl, H5D_COMPACT) < 0) + P_TEST_ERROR; + no_selection_io_cause_write_expected |= H5D_SEL_IO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; + no_selection_io_cause_read_expected |= H5D_SEL_IO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; + } + + if (test_mode == TEST_DISABLE_BY_API) { + if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_OFF) < 0) + P_TEST_ERROR; + no_selection_io_cause_write_expected |= H5D_SEL_IO_DISABLE_BY_API; + no_selection_io_cause_read_expected |= H5D_SEL_IO_DISABLE_BY_API; + } + + /* Datatype conversion */ + if (test_mode & TEST_DATATYPE_CONVERSION) { + if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) + P_TEST_ERROR; + tid = H5T_NATIVE_UINT; + + /* If we're testing a too small tconv buffer, set the buffer to be too small */ + if (test_mode & TEST_TCONV_BUF_TOO_SMALL) { + if (H5Pset_buffer(dxpl, sizeof(int), NULL, NULL) < 0) + P_TEST_ERROR; + + /* If we're using in-place type conversion sel io will succeed */ + if (test_mode & TEST_IN_PLACE_TCONV) { + if (H5Pset_modify_write_buf(dxpl, TRUE) < 0) + P_TEST_ERROR; + } + else + no_selection_io_cause_write_expected |= H5D_SEL_IO_TCONV_BUF_TOO_SMALL; + + /* In-place type conversion for read doesn't require modify_write_buf */ + } + + /* If the tconv buf is largge enough sel io will succeed */ + } + + /* Create 1d data space */ + dims[0] = DSET_SELECT_DIM; + if ((sid = H5Screate_simple(1, dims, NULL)) < 0) + P_TEST_ERROR; + + if (is_chunked) { + cdims[0] = DSET_SELECT_CHUNK_DIM; + if (H5Pset_chunk(dcpl, 1, cdims) < 0) + P_TEST_ERROR; + } + + if ((did = H5Dcreate2(fid, "no_selection_io_cause", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, + H5P_DEFAULT)) < 0) + P_TEST_ERROR; + + /* Initialize data */ + for (i = 0; i < DSET_SELECT_DIM; i++) + wbuf[i] = i; + + if (H5Dwrite(did, tid, H5S_ALL, H5S_ALL, dxpl, wbuf) < 0) + P_TEST_ERROR; + + if (H5Pget_no_selection_io_cause(dxpl, &no_selection_io_cause_write) < 0) + P_TEST_ERROR; + + /* Verify causes of no selection I/O for write are as expected */ + if (no_selection_io_cause_write != no_selection_io_cause_write_expected) + P_TEST_ERROR; + + if (H5Dread(did, tid, H5S_ALL, H5S_ALL, dxpl, rbuf) < 0) + P_TEST_ERROR; + + if (H5Pget_no_selection_io_cause(dxpl, &no_selection_io_cause_read) < 0) + P_TEST_ERROR; + + /* Verify causes of no selection I/O for read are as expected */ + if (no_selection_io_cause_read != no_selection_io_cause_read_expected) + P_TEST_ERROR; + + if (H5Dclose(did) < 0) + P_TEST_ERROR; + + if (H5Pclose(dcpl) < 0) + P_TEST_ERROR; + + if (H5Pclose(dxpl) < 0) + P_TEST_ERROR; + + if (H5Sclose(sid) < 0) + P_TEST_ERROR; + + if (H5Fclose(fid) < 0) + P_TEST_ERROR; + + return; + +} /* test_no_selection_io_cause_mode() */ + +/* + * Test for causes of not performing selection I/O + */ +static void +test_get_no_selection_io_cause(const char *filename, hid_t fapl) +{ + test_no_selection_io_cause_mode(filename, fapl, TEST_DISABLE_BY_API); + test_no_selection_io_cause_mode(filename, fapl, TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET); + test_no_selection_io_cause_mode(filename, fapl, TEST_DATATYPE_CONVERSION); + test_no_selection_io_cause_mode(filename, fapl, TEST_DATATYPE_CONVERSION | TEST_TCONV_BUF_TOO_SMALL); + test_no_selection_io_cause_mode( + filename, fapl, TEST_DATATYPE_CONVERSION | TEST_TCONV_BUF_TOO_SMALL | TEST_IN_PLACE_TCONV); + + CHECK_PASSED(); + + return; +} /* test_get_no_selection_io_cause() */ + +/*------------------------------------------------------------------------- + * Function: main + * + * Purpose: Runs tests with all combinations of configuration + * flags. + * + * Return: Success: 0 + * Failure: 1 + * + *------------------------------------------------------------------------- + */ +int +main(int argc, char *argv[]) +{ + int ret; + hid_t fapl = H5I_INVALID_HID; + hid_t fid = H5I_INVALID_HID; + int test_select_config; + unsigned chunked; + unsigned dtrans; + unsigned mwbuf; + + h5_reset(); + + /* Initialize MPI */ + MPI_Init(&argc, &argv); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + if ((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0) + P_TEST_ERROR; + + /* Set MPIO file driver */ + if (H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL) < 0) + P_TEST_ERROR; + + if ((fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + P_TEST_ERROR; + + /* Test with contiguous or chunked dataset */ + for (chunked = FALSE; chunked <= TRUE; chunked++) { + + /* Data transforms only apply to integer or floating-point datasets */ + /* therefore, not all tests are run with data transform */ + for (dtrans = FALSE; dtrans <= TRUE; dtrans++) { + + /* Test with and without modify_write_buf turned on */ + for (mwbuf = FALSE; mwbuf <= TRUE; mwbuf++) { + + if (MAINPROCESS) { + /* Print configuration message */ + printf("Testing for selection I/O "); + if (chunked) + printf("with chunked dataset, "); + else + printf("with contiguous dataset, "); + if (dtrans) + printf("data transform, "); + else + printf("without data transform, "); + if (mwbuf) + printf("and with modifying write buffers\n"); + else + printf("and without modifying write buffers\n"); + } + + for (test_select_config = (int)TEST_NO_TYPE_CONV; + test_select_config < (int)TEST_SELECT_NTESTS; test_select_config++) { + + switch (test_select_config) { + + case TEST_NO_TYPE_CONV: /* case 1 */ + if (MAINPROCESS) + TESTING_2("No type conversion (null case)"); + + test_no_type_conv(fid, chunked, dtrans, mwbuf); + + break; + + case TEST_NO_SIZE_CHANGE_NO_BKG: /* case 2 */ + if (MAINPROCESS) + TESTING_2("No size change, no background buffer"); + + /* Data transforms does not apply to the dataset datatype for this test */ + if (dtrans) { + if (MAINPROCESS) + SKIPPED(); + continue; + } + + test_no_size_change_no_bkg(fid, chunked, mwbuf); + + break; + + case TEST_LARGER_MEM_NO_BKG: /* case 3 */ + if (MAINPROCESS) + TESTING_2("Larger memory type, no background buffer"); + + test_larger_mem_type_no_bkg(fid, chunked, dtrans, mwbuf); + + break; + + case TEST_SMALLER_MEM_NO_BKG: /* case 4 */ + if (MAINPROCESS) + TESTING_2("Smaller memory type, no background buffer"); + + test_smaller_mem_type_no_bkg(fid, chunked, dtrans, mwbuf); + + break; + + case TEST_CMPD_WITH_BKG: /* case 5 */ + if (MAINPROCESS) + TESTING_2("Compound types with background buffer"); + /* Data transforms does not apply to the dataset datatype for this test */ + if (dtrans) { + if (MAINPROCESS) + SKIPPED(); + continue; + } + + test_cmpd_with_bkg(fid, chunked, mwbuf); + + break; + + case TEST_TYPE_CONV_SEL_EMPTY: /* case 6 */ + if (MAINPROCESS) + TESTING_2("Empty selections + Type conversion"); + + test_type_conv_sel_empty(fid, chunked, dtrans, mwbuf); + + break; + + case TEST_MULTI_CONV_NO_BKG: /* case 7 */ + if (MAINPROCESS) + TESTING_2("multi-datasets: type conv + no bkg buffer"); + + test_multi_dsets_no_bkg(fid, chunked, dtrans, mwbuf); + + break; + + case TEST_MULTI_CONV_BKG: /* case 8 */ + if (MAINPROCESS) + TESTING_2("multi-datasets: type conv + bkg buffer"); + + /* Data transforms does not apply to the dataset datatype for this test */ + if (dtrans) { + if (MAINPROCESS) + SKIPPED(); + } + else + test_multi_dsets_cmpd_with_bkg(fid, chunked, mwbuf); + + break; + + case TEST_MULTI_CONV_SIZE_CHANGE: /* case 9 */ + if (MAINPROCESS) + TESTING_2("multi-datasets: type conv + size change + no bkg buffer"); + + /* Data transforms does not apply to the dataset datatype for this test */ + if (dtrans) { + if (MAINPROCESS) + SKIPPED(); + } + else + test_multi_dsets_size_change_no_bkg(fid, chunked, mwbuf); + + break; + + case TEST_MULTI_CONV_SEL_EMPTY: /* case 10 */ + if (MAINPROCESS) + TESTING_2("multi-datasets: type conv + empty selections"); + + test_multi_dsets_conv_sel_empty(fid, chunked, dtrans, mwbuf); + + break; + + case TEST_MULTI_ALL: /* case 11 */ + if (MAINPROCESS) + TESTING_2("multi-datasets: no conv + conv without bkg + conv with bkg"); + + /* Data transforms does not apply to the dataset datatype for this test */ + if (dtrans) { + if (MAINPROCESS) + SKIPPED(); + } + else + test_multi_dsets_all(2, fid, chunked, mwbuf); + + break; + + case TEST_SELECT_NTESTS: + default: + P_TEST_ERROR; + break; + + } /* end switch */ + + } /* end for test_select_config */ + + } /* end mwbuf */ + + } /* end dtrans */ + } /* end chunked */ + + if (H5Fclose(fid) < 0) + P_TEST_ERROR; + + if (MAINPROCESS) { + printf("\n"); + TESTING("Testing for H5Pget_no_selection_io_cause()"); + } + test_get_no_selection_io_cause(FILENAME, fapl); + + /* Barrier to make sure all ranks are done before deleting the file, and + * also to clean up output (make sure PASSED is printed before any of the + * following messages) */ + if (MPI_Barrier(MPI_COMM_WORLD) != MPI_SUCCESS) + P_TEST_ERROR; + + /* Delete file */ + if (H5Fdelete(FILENAME, fapl) < 0) + P_TEST_ERROR; + + if (H5Pclose(fapl) < 0) + P_TEST_ERROR; + + /* Gather errors from all processes */ + MPI_Allreduce(&nerrors, &ret, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); + nerrors = ret; + + if (MAINPROCESS) { + printf("\n===================================\n"); + if (nerrors) + HDprintf("***Parallel selection I/O dataset tests detected %d errors***\n", nerrors); + else + HDprintf("Parallel selection I/O dataset tests finished with no errors\n"); + printf("===================================\n"); + } + + /* close HDF5 library */ + H5close(); + + /* MPI_Finalize must be called AFTER H5close which may use MPI calls */ + MPI_Finalize(); + + /* cannot just return (nerrors) because exit code is limited to 1 byte */ + return (nerrors != 0); +} /* end main() */ diff --git a/testpar/t_subfiling_vfd.c b/testpar/t_subfiling_vfd.c index 85df3bd4939..f827aa5a823 100644 --- a/testpar/t_subfiling_vfd.c +++ b/testpar/t_subfiling_vfd.c @@ -425,7 +425,10 @@ test_stripe_sizes(void) VRFY(tmp_filename, "HDmalloc succeeded"); dxpl_id = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxpl_id >= 0), "DCPL creation succeeded"); + VRFY((dxpl_id >= 0), "DXPL creation succeeded"); + + /* Set selection I/O mode on DXPL */ + VRFY((H5Pset_selection_io(dxpl_id, H5D_SELECTION_IO_MODE_ON) >= 0), "H5Pset_selection_io succeeded"); for (size_t i = 0; i < SUBF_NITER; i++) { H5FD_subfiling_params_t cfg; @@ -1011,12 +1014,19 @@ test_read_different_stripe_size(void) hid_t file_id = H5I_INVALID_HID; hid_t fapl_id = H5I_INVALID_HID; hid_t dset_id = H5I_INVALID_HID; + hid_t dxpl_id = H5I_INVALID_HID; hid_t fspace_id = H5I_INVALID_HID; char *tmp_filename = NULL; void *buf = NULL; curr_nerrors = nerrors; + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_id >= 0), "DXPL creation succeeded"); + + /* Set selection I/O mode on DXPL */ + VRFY((H5Pset_selection_io(dxpl_id, H5D_SELECTION_IO_MODE_ON) >= 0), "H5Pset_selection_io succeeded"); + if (MAINPROCESS) TESTING_2("file re-opening with different stripe size"); @@ -1066,7 +1076,7 @@ test_read_different_stripe_size(void) for (size_t i = 0; i < count[0]; i++) ((SUBF_C_TYPE *)buf)[i] = (SUBF_C_TYPE)((size_t)mpi_rank + i); - VRFY((H5Dwrite(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, fspace_id, H5P_DEFAULT, buf) >= 0), + VRFY((H5Dwrite(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, fspace_id, dxpl_id, buf) >= 0), "Dataset write succeeded"); HDfree(buf); @@ -1133,7 +1143,7 @@ test_read_different_stripe_size(void) buf = HDcalloc(1, count[0] * sizeof(SUBF_C_TYPE)); VRFY(buf, "HDcalloc succeeded"); - VRFY((H5Dread(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, fspace_id, H5P_DEFAULT, buf) >= 0), + VRFY((H5Dread(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, fspace_id, dxpl_id, buf) >= 0), "Dataset read succeeded"); for (size_t i = 0; i < count[0]; i++) { @@ -1185,6 +1195,7 @@ test_read_different_stripe_size(void) } H5E_END_TRY; + VRFY((H5Pclose(dxpl_id) >= 0), "DXPL close succeeded"); VRFY((H5Pclose(fapl_id) >= 0), "FAPL close succeeded"); HDfree(tmp_filename); @@ -1214,11 +1225,18 @@ test_subfiling_precreate_rank_0(void) hid_t file_id = H5I_INVALID_HID; hid_t fapl_id = H5I_INVALID_HID; hid_t dset_id = H5I_INVALID_HID; + hid_t dxpl_id = H5I_INVALID_HID; hid_t fspace_id = H5I_INVALID_HID; void *buf = NULL; curr_nerrors = nerrors; + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_id >= 0), "DXPL creation succeeded"); + + /* Set selection I/O mode on DXPL */ + VRFY((H5Pset_selection_io(dxpl_id, H5D_SELECTION_IO_MODE_ON) >= 0), "H5Pset_selection_io succeeded"); + if (MAINPROCESS) TESTING_2("file pre-creation on rank 0"); @@ -1278,7 +1296,7 @@ test_subfiling_precreate_rank_0(void) for (size_t i = 0; i < dset_dims[0]; i++) ((SUBF_C_TYPE *)buf)[i] = (SUBF_C_TYPE)((i / n_elements_per_rank) + (i % n_elements_per_rank)); - VRFY((H5Dwrite(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, fspace_id, H5P_DEFAULT, buf) >= 0), + VRFY((H5Dwrite(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, fspace_id, dxpl_id, buf) >= 0), "Dataset write succeeded"); HDfree(buf); @@ -1357,7 +1375,7 @@ test_subfiling_precreate_rank_0(void) buf = HDcalloc(1, count[0] * sizeof(SUBF_C_TYPE)); VRFY(buf, "HDcalloc succeeded"); - VRFY((H5Dread(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, fspace_id, H5P_DEFAULT, buf) >= 0), + VRFY((H5Dread(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, fspace_id, dxpl_id, buf) >= 0), "Dataset read succeeded"); for (size_t i = 0; i < n_elements_per_rank; i++) { @@ -1380,6 +1398,7 @@ test_subfiling_precreate_rank_0(void) H5E_END_TRY; VRFY((H5Pclose(fapl_id) >= 0), "FAPL close succeeded"); + VRFY((H5Pclose(dxpl_id) >= 0), "DXPL close succeeded"); CHECK_PASSED(); } @@ -1405,11 +1424,18 @@ test_subfiling_write_many_read_one(void) hid_t file_id = H5I_INVALID_HID; hid_t fapl_id = H5I_INVALID_HID; hid_t dset_id = H5I_INVALID_HID; + hid_t dxpl_id = H5I_INVALID_HID; hid_t fspace_id = H5I_INVALID_HID; void *buf = NULL; curr_nerrors = nerrors; + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_id >= 0), "DXPL creation succeeded"); + + /* Set selection I/O mode on DXPL */ + VRFY((H5Pset_selection_io(dxpl_id, H5D_SELECTION_IO_MODE_ON) >= 0), "H5Pset_selection_io succeeded"); + if (MAINPROCESS) TESTING_2("reading back file with single MPI rank"); @@ -1461,7 +1487,7 @@ test_subfiling_write_many_read_one(void) for (size_t i = 0; i < count[0]; i++) ((SUBF_C_TYPE *)buf)[i] = (SUBF_C_TYPE)((size_t)mpi_rank + i); - VRFY((H5Dwrite(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, fspace_id, H5P_DEFAULT, buf) >= 0), + VRFY((H5Dwrite(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, fspace_id, dxpl_id, buf) >= 0), "Dataset write succeeded"); HDfree(buf); @@ -1486,7 +1512,7 @@ test_subfiling_write_many_read_one(void) buf = HDcalloc(1, target_size); VRFY(buf, "HDcalloc succeeded"); - VRFY((H5Dread(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, H5S_ALL, H5P_DEFAULT, buf) >= 0), + VRFY((H5Dread(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, H5S_ALL, dxpl_id, buf) >= 0), "Dataset read succeeded"); for (size_t i = 0; i < (size_t)mpi_size; i++) { @@ -1516,6 +1542,7 @@ test_subfiling_write_many_read_one(void) VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Barrier succeeded"); VRFY((H5Sclose(fspace_id) >= 0), "File dataspace close succeeded"); + VRFY((H5Pclose(dxpl_id) >= 0), "DXPL close succeeded"); CHECK_PASSED(); } @@ -1543,11 +1570,18 @@ test_subfiling_write_many_read_few(void) hid_t file_id = H5I_INVALID_HID; hid_t fapl_id = H5I_INVALID_HID; hid_t dset_id = H5I_INVALID_HID; + hid_t dxpl_id = H5I_INVALID_HID; hid_t fspace_id = H5I_INVALID_HID; void *buf = NULL; curr_nerrors = nerrors; + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_id >= 0), "DXPL creation succeeded"); + + /* Set selection I/O mode on DXPL */ + VRFY((H5Pset_selection_io(dxpl_id, H5D_SELECTION_IO_MODE_ON) >= 0), "H5Pset_selection_io succeeded"); + if (MAINPROCESS) TESTING_2("reading back file with fewer MPI ranks than written with"); @@ -1609,7 +1643,7 @@ test_subfiling_write_many_read_few(void) for (size_t i = 0; i < count[0]; i++) ((SUBF_C_TYPE *)buf)[i] = (SUBF_C_TYPE)((size_t)mpi_rank + i); - VRFY((H5Dwrite(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, fspace_id, H5P_DEFAULT, buf) >= 0), + VRFY((H5Dwrite(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, fspace_id, dxpl_id, buf) >= 0), "Dataset write succeeded"); HDfree(buf); @@ -1664,7 +1698,7 @@ test_subfiling_write_many_read_few(void) buf = HDcalloc(1, target_size); VRFY(buf, "HDcalloc succeeded"); - VRFY((H5Dread(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, H5S_ALL, H5P_DEFAULT, buf) >= 0), + VRFY((H5Dread(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, H5S_ALL, dxpl_id, buf) >= 0), "Dataset read succeeded"); for (size_t i = 0; i < (size_t)mpi_size; i++) { @@ -1699,6 +1733,7 @@ test_subfiling_write_many_read_few(void) VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Barrier succeeded"); VRFY((H5Sclose(fspace_id) >= 0), "File dataspace close succeeded"); + VRFY((H5Pclose(dxpl_id) >= 0), "DXPL close succeeded"); CHECK_PASSED(); } @@ -1727,6 +1762,7 @@ test_subfiling_h5fuse(void) hid_t file_id = H5I_INVALID_HID; hid_t fapl_id = H5I_INVALID_HID; hid_t dset_id = H5I_INVALID_HID; + hid_t dxpl_id = H5I_INVALID_HID; hid_t fspace_id = H5I_INVALID_HID; void *buf = NULL; int skip_test = 0; @@ -1734,6 +1770,12 @@ test_subfiling_h5fuse(void) curr_nerrors = nerrors; + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_id >= 0), "DXPL creation succeeded"); + + /* Set selection I/O mode on DXPL */ + VRFY((H5Pset_selection_io(dxpl_id, H5D_SELECTION_IO_MODE_ON) >= 0), "H5Pset_selection_io succeeded"); + if (MAINPROCESS) TESTING_2("h5fuse utility"); @@ -1826,7 +1868,7 @@ test_subfiling_h5fuse(void) for (size_t i = 0; i < count[0]; i++) ((SUBF_C_TYPE *)buf)[i] = (SUBF_C_TYPE)((size_t)mpi_rank + i); - VRFY((H5Dwrite(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, fspace_id, H5P_DEFAULT, buf) >= 0), + VRFY((H5Dwrite(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, fspace_id, dxpl_id, buf) >= 0), "Dataset write succeeded"); HDfree(buf); @@ -1899,7 +1941,7 @@ test_subfiling_h5fuse(void) buf = HDcalloc(1, target_size); VRFY(buf, "HDcalloc succeeded"); - VRFY((H5Dread(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, H5S_ALL, H5P_DEFAULT, buf) >= 0), + VRFY((H5Dread(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, H5S_ALL, dxpl_id, buf) >= 0), "Dataset read succeeded"); for (size_t i = 0; i < (size_t)mpi_size; i++) { @@ -1969,6 +2011,7 @@ test_subfiling_h5fuse(void) } VRFY((H5Pclose(fapl_id) >= 0), "FAPL close succeeded"); + VRFY((H5Pclose(dxpl_id) >= 0), "DXPL close succeeded"); mpi_code_g = MPI_Barrier(comm_g); VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Barrier succeeded"); @@ -2132,9 +2175,6 @@ main(int argc, char **argv) H5open(); - /* Enable selection I/O using internal temporary workaround */ - H5_use_selection_io_g = TRUE; - if (MAINPROCESS) { HDprintf("Testing Subfiling VFD functionality\n"); } From 912a4fd283431f126d1cabc3e25c92b1566f1066 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Fri, 28 Apr 2023 17:12:51 -0700 Subject: [PATCH 176/231] Converted H5D asserts to normal error checking (#2842) These cases can trip when processing malformed files and it's better to invoke normal HDF5 error handling than crash a process. --- src/H5Dbtree.c | 32 +++++++++-------------- src/H5Dchunk.c | 70 ++++++++++++++++++++++++-------------------------- 2 files changed, 45 insertions(+), 57 deletions(-) diff --git a/src/H5Dbtree.c b/src/H5Dbtree.c index 2937b507cd0..a9dfad6ad6c 100644 --- a/src/H5Dbtree.c +++ b/src/H5Dbtree.c @@ -10,13 +10,9 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* Programmer: Robb Matzke - * Wednesday, October 8, 1997 - * - * Purpose: v1 B-tree indexed (chunked) I/O functions. The chunks are +/* Purpose: v1 B-tree indexed (chunked) I/O functions. The chunks are * given a multi-dimensional index which is used as a lookup key * in a B-tree that maps chunk index to disk address. - * */ /****************/ @@ -627,15 +623,11 @@ H5D__btree_remove(H5F_t *f, haddr_t addr, void *_lt_key /*in,out */, hbool_t *lt } /* end H5D__btree_remove() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_decode_key + * Function: H5D__btree_decode_key * - * Purpose: Decodes a raw key into a native key for the B-tree - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Robb Matzke - * Friday, October 10, 1997 + * Purpose: Decodes a raw key into a native key for the B-tree * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t @@ -644,33 +636,33 @@ H5D__btree_decode_key(const H5B_shared_t *shared, const uint8_t *raw, void *_key const H5O_layout_chunk_t *layout; /* Chunk layout description */ H5D_btree_key_t *key = (H5D_btree_key_t *)_key; /* Pointer to decoded key */ hsize_t tmp_offset; /* Temporary coordinate offset, from file */ - unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - /* check args */ HDassert(shared); HDassert(raw); HDassert(key); layout = (const H5O_layout_chunk_t *)shared->udata; HDassert(layout); - HDassert(layout->ndims > 0 && layout->ndims <= H5O_LAYOUT_NDIMS); - /* decode */ + if (layout->ndims > H5O_LAYOUT_NDIMS) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "bad number of dimensions") + UINT32DECODE(raw, key->nbytes); UINT32DECODE(raw, key->filter_mask); - for (u = 0; u < layout->ndims; u++) { + for (unsigned u = 0; u < layout->ndims; u++) { if (layout->dim[u] == 0) HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "chunk size must be > 0, dim = %u ", u) /* Retrieve coordinate offset */ UINT64DECODE(raw, tmp_offset); - HDassert(0 == (tmp_offset % layout->dim[u])); + if (0 != (tmp_offset % layout->dim[u])) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "bad coordinate offset") /* Convert to a scaled offset */ key->scaled[u] = tmp_offset / layout->dim[u]; - } /* end for */ + } done: FUNC_LEAVE_NOAPI(ret_value) diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index 59577c3171b..0ab4da15283 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -10,31 +10,28 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* Programmer: Quincey Koziol - * Thursday, April 24, 2008 - * - * Purpose: Abstract indexed (chunked) I/O functions. The logical - * multi-dimensional dataspace is regularly partitioned into - * same-sized "chunks", the first of which is aligned with the - * logical origin. The chunks are indexed by different methods, - * that map a chunk index to disk address. Each chunk can be - * compressed independently and the chunks may move around in the - * file as their storage requirements change. - * - * Cache: Disk I/O is performed in units of chunks and H5MF_alloc() - * contains code to optionally align chunks on disk block - * boundaries for performance. - * - * The chunk cache is an extendible hash indexed by a function - * of storage B-tree address and chunk N-dimensional offset - * within the dataset. Collisions are not resolved -- one of - * the two chunks competing for the hash slot must be preempted - * from the cache. All entries in the hash also participate in - * a doubly-linked list and entries are penalized by moving them - * toward the front of the list. When a new chunk is about to - * be added to the cache the heap is pruned by preempting - * entries near the front of the list to make room for the new - * entry which is added to the end of the list. +/* Purpose: Abstract indexed (chunked) I/O functions. The logical + * multi-dimensional dataspace is regularly partitioned into + * same-sized "chunks", the first of which is aligned with the + * logical origin. The chunks are indexed by different methods, + * that map a chunk index to disk address. Each chunk can be + * compressed independently and the chunks may move around in the + * file as their storage requirements change. + * + * Cache: Disk I/O is performed in units of chunks and H5MF_alloc() + * contains code to optionally align chunks on disk block + * boundaries for performance. + * + * The chunk cache is an extendible hash indexed by a function + * of storage B-tree address and chunk N-dimensional offset + * within the dataset. Collisions are not resolved -- one of + * the two chunks competing for the hash slot must be preempted + * from the cache. All entries in the hash also participate in + * a doubly-linked list and entries are penalized by moving them + * toward the front of the list. When a new chunk is about to + * be added to the cache the heap is pruned by preempting + * entries near the front of the list to make room for the new + * entry which is added to the end of the list. */ /****************/ @@ -670,31 +667,30 @@ H5D__get_chunk_storage_size(H5D_t *dset, const hsize_t *offset, hsize_t *storage /*------------------------------------------------------------------------- * Function: H5D__chunk_set_info_real * - * Purpose: Internal routine to set the information about chunks for a dataset - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * Tuesday, June 30, 2009 + * Purpose: Internal routine to set the information about chunks for a dataset * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t H5D__chunk_set_info_real(H5O_layout_chunk_t *layout, unsigned ndims, const hsize_t *curr_dims, const hsize_t *max_dims) { - unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - /* Sanity checks */ HDassert(layout); - HDassert(ndims > 0); HDassert(curr_dims); + /* Can happen when corrupt files are parsed */ + if (ndims == 0) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "number of dimensions cannot be zero") + /* Compute the # of chunks in dataset dimensions */ - for (u = 0, layout->nchunks = 1, layout->max_nchunks = 1; u < ndims; u++) { + layout->nchunks = 1; + layout->max_nchunks = 1; + for (unsigned u = 0; u < ndims; u++) { /* Round up to the next integer # of chunks, to accommodate partial chunks */ layout->chunks[u] = ((curr_dims[u] + layout->dim[u]) - 1) / layout->dim[u]; if (H5S_UNLIMITED == max_dims[u]) @@ -710,7 +706,7 @@ H5D__chunk_set_info_real(H5O_layout_chunk_t *layout, unsigned ndims, const hsize /* Accumulate the # of chunks */ layout->nchunks *= layout->chunks[u]; layout->max_nchunks *= layout->max_chunks[u]; - } /* end for */ + } /* Get the "down" sizes for each dimension */ H5VM_array_down(ndims, layout->chunks, layout->down_chunks); From c58ee5885445ef54c7d82b37466ad1b171670e7d Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Sat, 29 Apr 2023 10:35:13 -0700 Subject: [PATCH 177/231] Don't use strnlen when len is not known (#2855) The datatype object header message decode function was updated to do bounds checking on the decode buffer. This buffer may arrive with no buffer size via H5Tdecode(), in which case the buffer size will have been set to SIZE_MAX by the library. This fix changes the string length calls to strlen when we don't know the buffer size (and avoids a potential compiler bug with icc 17). --- src/H5Odtype.c | 56 ++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 41 insertions(+), 15 deletions(-) diff --git a/src/H5Odtype.c b/src/H5Odtype.c index b6e1b907b97..977e4b189ff 100644 --- a/src/H5Odtype.c +++ b/src/H5Odtype.c @@ -336,18 +336,31 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t for (dt->shared->u.compnd.nmembs = 0; dt->shared->u.compnd.nmembs < nmembs; dt->shared->u.compnd.nmembs++) { - size_t actual_name_length; /* Actual length of name */ - size_t max = (size_t)(p_end - *pp + 1); /* Max possible name length */ - unsigned ndims = 0; /* Number of dimensions of the array field */ - htri_t can_upgrade; /* Whether we can upgrade this type's version */ - hsize_t dim[H5O_LAYOUT_NDIMS]; /* Dimensions of the array */ - H5T_t *array_dt; /* Temporary pointer to the array datatype */ - H5T_t *temp_type; /* Temporary pointer to the field's datatype */ + size_t actual_name_length = 0; /* Actual length of name */ + unsigned ndims = 0; /* Number of dimensions of the array field */ + htri_t can_upgrade; /* Whether we can upgrade this type's version */ + hsize_t dim[H5O_LAYOUT_NDIMS]; /* Dimensions of the array */ + H5T_t *array_dt; /* Temporary pointer to the array datatype */ + H5T_t *temp_type; /* Temporary pointer to the field's datatype */ /* Get the length of the field name */ - actual_name_length = HDstrnlen((const char *)*pp, max); - if (actual_name_length == max) - HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, FAIL, "field name not null terminated") + if (!skip) { + /* There is a realistic buffer end, so check bounds */ + + size_t max = (size_t)(p_end - *pp + 1); /* Max possible name length */ + + actual_name_length = HDstrnlen((const char *)*pp, max); + if (actual_name_length == max) + HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, FAIL, "field name not null terminated") + } + else { + /* The buffer end can't be determined when it's an unbounded buffer + * passed via H5Tdecode(), so don't bounds check and hope for + * the best. + */ + actual_name_length = HDstrlen((const char *)*pp); + } + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, actual_name_length, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); @@ -624,12 +637,25 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t for (dt->shared->u.enumer.nmembs = 0; dt->shared->u.enumer.nmembs < nmembs; dt->shared->u.enumer.nmembs++) { - size_t actual_name_length; /* Actual length of name */ - size_t max = (size_t)(p_end - *pp + 1); /* Max possible name length */ + size_t actual_name_length = 0; /* Actual length of name */ - actual_name_length = HDstrnlen((const char *)*pp, max); - if (actual_name_length == max) - HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, FAIL, "enum name not null terminated") + /* Get the length of the enum name */ + if (!skip) { + /* There is a realistic buffer end, so check bounds */ + + size_t max = (size_t)(p_end - *pp + 1); /* Max possible name length */ + + actual_name_length = HDstrnlen((const char *)*pp, max); + if (actual_name_length == max) + HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, FAIL, "enum name not null terminated") + } + else { + /* The buffer end can't be determined when it's an unbounded buffer + * passed via H5Tdecode(), so don't bounds check and hope for + * the best. + */ + actual_name_length = HDstrlen((const char *)*pp); + } if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, actual_name_length, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); From 2f64975e6984f96c0a26d685fe41917e27be9057 Mon Sep 17 00:00:00 2001 From: "H. Joe Lee" Date: Sat, 29 Apr 2023 12:35:33 -0500 Subject: [PATCH 178/231] chore(configure.ac): fix output message for enabling tools (#2684) (#2854) Close #2684. --- configure.ac | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.ac b/configure.ac index d5f46da7577..1675bee56fb 100644 --- a/configure.ac +++ b/configure.ac @@ -1155,7 +1155,7 @@ AC_SUBST([HDF5_TOOLS]) ## Default is to build tests and tools HDF5_TOOLS=yes -AC_MSG_CHECKING([if building tools is disabled]) +AC_MSG_CHECKING([if building tools is enabled]) AC_ARG_ENABLE([tools], [AS_HELP_STRING([--enable-tools], From 023753404269315ca44603fb929a6db6d4ff8e96 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Sat, 29 Apr 2023 12:36:17 -0500 Subject: [PATCH 179/231] Add DT workflow for creating daily binaries (#2843) --- .github/workflows/cmake-ctest.yml | 217 ++++++++++++++++++++++++++++++ .github/workflows/daily-build.yml | 21 +++ .github/workflows/tarball.yml | 105 +++++++++++++++ CMakePresets.json | 18 ++- 4 files changed, 360 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/cmake-ctest.yml create mode 100644 .github/workflows/daily-build.yml create mode 100644 .github/workflows/tarball.yml diff --git a/.github/workflows/cmake-ctest.yml b/.github/workflows/cmake-ctest.yml new file mode 100644 index 00000000000..e3a52b8ff91 --- /dev/null +++ b/.github/workflows/cmake-ctest.yml @@ -0,0 +1,217 @@ +name: hdf5 dev ctest runs + +# Controls when the action will run. Triggers the workflow on a schedule +on: + workflow_call: + inputs: + file_base: + description: "The common base name of the source tarballs" + required: true + type: string + +# A workflow run is made up of one or more jobs that can run sequentially or +# in parallel +jobs: + build_and_test_win: + # Windows w/ MSVC + CMake + # + name: "Windows MSVC CTest" + runs-on: windows-latest + steps: + - name: Install Dependencies (Windows) + run: choco install ninja + + - name: Enable Developer Command Prompt + uses: ilammy/msvc-dev-cmd@v1.12.1 + + - name: Set file base name (Windows) + id: set-file-base + run: | + FILE_NAME_BASE=$(echo "${{ inputs.file_base }}") + echo "FILE_BASE=$FILE_NAME_BASE" >> $GITHUB_OUTPUT + shell: bash + + # Get files created by release script + - name: Get zip-tarball (Windows) + uses: actions/download-artifact@v3 + with: + name: zip-tarball + path: ${{ github.workspace }} + + - name: using powershell + shell: pwsh + run: Get-Location + + - name: List files for the space (Windows) + run: | + Get-ChildItem -Path ${{ github.workspace }} + Get-ChildItem -Path ${{ runner.workspace }} + shell: pwsh + + - name: Uncompress source (Windows) + working-directory: ${{ github.workspace }} + run: 7z x ${{ steps.set-file-base.outputs.FILE_BASE }}.zip + shell: bash + + - name: Run ctest (Windows) + run: | + cd "${{ runner.workspace }}/hdf5/hdfsrc" + cmake --workflow --preset=ci-StdShar-MSVC --fresh + shell: bash + + - name: Publish binary (Windows) + id: publish-ctest-binary + run: | + mkdir "${{ runner.workspace }}/build" + mkdir "${{ runner.workspace }}/build/hdf5" + cp ${{ runner.workspace }}/hdf5/hdfsrc/COPYING ${{ runner.workspace }}/build/hdf5 + cp ${{ runner.workspace }}/hdf5/hdfsrc/COPYING_LBNL_HDF5 ${{ runner.workspace }}/build/hdf5 + cp ${{ runner.workspace }}/hdf5/hdfsrc/README.md ${{ runner.workspace }}/build/hdf5 + cp ${{ runner.workspace }}/hdf5/build/ci-StdShar-MSVC/*.zip ${{ runner.workspace }}/build/hdf5 + cd "${{ runner.workspace }}/build" + 7z a -tzip ${{ steps.set-file-base.outputs.FILE_BASE }}-win_vs2022.zip hdf5 + shell: bash + + - name: List files in the space (Windows) + run: | + Get-ChildItem -Path ${{ github.workspace }} + Get-ChildItem -Path ${{ runner.workspace }} + shell: pwsh + + # Save files created by ctest script + - name: Save published binary (Windows) + uses: actions/upload-artifact@v3 + with: + name: zip-vs2022-binary + path: ${{ runner.workspace }}/build/${{ steps.set-file-base.outputs.FILE_BASE }}-win_vs2022.zip + if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` + + build_and_test_linux: + # Linux (Ubuntu) w/ gcc + CMake + # + name: "Ubuntu gcc CMake" + runs-on: ubuntu-latest + steps: + - name: Install CMake Dependencies (Linux) + run: sudo apt-get install ninja-build + + - name: Set file base name (Linux) + id: set-file-base + run: | + FILE_NAME_BASE=$(echo "${{ inputs.file_base }}") + echo "FILE_BASE=$FILE_NAME_BASE" >> $GITHUB_OUTPUT + + # Get files created by release script + - name: Get tgz-tarball (Linux) + uses: actions/download-artifact@v3 + with: + name: tgz-tarball + path: ${{ github.workspace }} + + - name: List files for the space (Linux) + run: | + ls ${{ github.workspace }} + ls ${{ runner.workspace }} + + - name: Uncompress source (Linux) + run: tar -zxvf ${{ github.workspace }}/${{ steps.set-file-base.outputs.FILE_BASE }}.tar.gz + + - name: Run ctest (Linux) + run: | + cd "${{ runner.workspace }}/hdf5/hdfsrc" + cmake --workflow --preset=ci-StdShar-GNUC --fresh + shell: bash + + - name: Publish binary (Linux) + id: publish-ctest-binary + run: | + mkdir "${{ runner.workspace }}/build" + mkdir "${{ runner.workspace }}/build/hdf5" + cp ${{ runner.workspace }}/hdf5/hdfsrc/COPYING ${{ runner.workspace }}/build/hdf5 + cp ${{ runner.workspace }}/hdf5/hdfsrc/COPYING_LBNL_HDF5 ${{ runner.workspace }}/build/hdf5 + cp ${{ runner.workspace }}/hdf5/hdfsrc/README.md ${{ runner.workspace }}/build/hdf5 + cp ${{ runner.workspace }}/hdf5/build/ci-StdShar-GNUC/*.tar.gz ${{ runner.workspace }}/build/hdf5 + cd "${{ runner.workspace }}/build" + tar -zcvf ${{ steps.set-file-base.outputs.FILE_BASE }}-ubuntu-2204.tar.gz hdf5 + shell: bash + + - name: List files in the space (Linux) + run: | + ls ${{ github.workspace }} + ls ${{ runner.workspace }} + + # Save files created by ctest script + - name: Save published binary (Linux) + uses: actions/upload-artifact@v3 + with: + name: tgz-ubuntu-2204-binary + path: ${{ runner.workspace }}/build/${{ steps.set-file-base.outputs.FILE_BASE }}-ubuntu-2204.tar.gz + if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` + + build_and_test_mac: + # MacOS w/ Clang + CMake + # + name: "MacOS Clang CMake" + runs-on: macos-11 + steps: + - name: Install Dependencies (MacOS) + run: brew install ninja + + - name: Set file base name (MacOS) + id: set-file-base + run: | + FILE_NAME_BASE=$(echo "${{ inputs.file_base }}") + echo "FILE_BASE=$FILE_NAME_BASE" >> $GITHUB_OUTPUT + + # Get files created by release script + - name: Get tgz-tarball (MacOS) + uses: actions/download-artifact@v3 + with: + name: tgz-tarball + path: ${{ github.workspace }} + + - name: List files for the space (MacOS) + run: | + ls ${{ github.workspace }} + ls ${{ runner.workspace }} + + - name: Uncompress source (MacOS) + run: tar -zxvf ${{ github.workspace }}/${{ steps.set-file-base.outputs.FILE_BASE }}.tar.gz + + # symlinks the compiler executables to a common location + - name: Setup GNU Fortran + uses: modflowpy/install-gfortran-action@v1 + + - name: Run ctest (MacOS) + id: run-ctest + run: | + cd "${{ runner.workspace }}/hdf5/hdfsrc" + cmake --workflow --preset=ci-StdShar-Clang --fresh + shell: bash + + - name: Publish binary (MacOS) + id: publish-ctest-binary + run: | + mkdir "${{ runner.workspace }}/build" + mkdir "${{ runner.workspace }}/build/hdf5" + cp ${{ runner.workspace }}/hdf5/hdfsrc/COPYING ${{ runner.workspace }}/build/hdf5 + cp ${{ runner.workspace }}/hdf5/hdfsrc/COPYING_LBNL_HDF5 ${{ runner.workspace }}/build/hdf5 + cp ${{ runner.workspace }}/hdf5/hdfsrc/README.md ${{ runner.workspace }}/build/hdf5 + cp ${{ runner.workspace }}/hdf5/build/ci-StdShar-Clang/*.tar.gz ${{ runner.workspace }}/build/hdf5 + cd "${{ runner.workspace }}/build" + tar -zcvf ${{ steps.set-file-base.outputs.FILE_BASE }}-osx12.tar.gz hdf5 + shell: bash + + - name: List files in the space (MacOS) + run: | + ls ${{ github.workspace }} + ls ${{ runner.workspace }} + + # Save files created by ctest script + - name: Save published binary (MacOS) + uses: actions/upload-artifact@v3 + with: + name: tgz-osx12-binary + path: ${{ runner.workspace }}/build/${{ steps.set-file-base.outputs.FILE_BASE }}-osx12.tar.gz + if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` + diff --git a/.github/workflows/daily-build.yml b/.github/workflows/daily-build.yml new file mode 100644 index 00000000000..0e040cae83a --- /dev/null +++ b/.github/workflows/daily-build.yml @@ -0,0 +1,21 @@ +name: hdf5 dev daily build + +# Controls when the action will run. Triggers the workflow on a schedule +on: + workflow_dispatch: + schedule: + - cron: "6 0 * * *" + +# A workflow run is made up of one or more jobs that can run sequentially or +# in parallel. +jobs: + call-workflow-tarball: + uses: ./.github/workflows/tarball.yml + + call-workflow-ctest: + needs: call-workflow-tarball + uses: ./.github/workflows/cmake-ctest.yml + with: + file_base: ${{ needs.call-workflow-tarball.outputs.file_base }} + if: ${{ needs.call-workflow-tarball.outputs.has_changes == 'true' }} + diff --git a/.github/workflows/tarball.yml b/.github/workflows/tarball.yml new file mode 100644 index 00000000000..12f3438d557 --- /dev/null +++ b/.github/workflows/tarball.yml @@ -0,0 +1,105 @@ +name: hdf5 dev tarball + +# Controls when the action will run. Triggers the workflow on a schedule +on: + workflow_call: + outputs: + has_changes: + description: "Whether there were changes the previous day" + value: ${{ jobs.check_commits.outputs.has_changes }} + file_base: + description: "The common base name of the source tarballs" + value: ${{ jobs.create_tarball.outputs.file_base }} + +# A workflow run is made up of one or more jobs that can run sequentially or +# in parallel +jobs: + check_commits: + name: Check for recent commits + runs-on: ubuntu-latest + outputs: + has_changes: ${{ steps.check-new-commits.outputs.has-new-commits }} + branch_ref: ${{ steps.get-branch-name.outputs.BRANCH_REF }} + branch_sha: ${{ steps.get-branch-sha.outputs.BRANCH_SHA }} + steps: + - name: Get branch name + id: get-branch-name + env: + GITHUB_REF: ${{ github.ref }} + GITHUB_REF_NAME: ${{ github.ref_name }} + GITHUB_HEAD_REF: ${{ github.head_ref }} + #run: echo "${{ env.GITHUB_REF_NAME }} | grep -P '[0-9]+/merge' &> /dev/null && BRANCH_REF=${{ env.GITHUB_HEAD_REF }} || BRANCH_REF=${{ env.GITHUB_REF_NAME }}" >> $GITHUB_OUTPUT + run: echo "BRANCH_REF=${{ env.GITHUB_HEAD_REF || env.GITHUB_REF_NAME }}" >> $GITHUB_OUTPUT + + - name: Get branch sha + id: get-branch-sha + env: + GITHUB_SHA: ${{ github.sha }} + GITHUB_WF_SHA: ${{ github.workflow_sha }} + run: | + SHORT_SHA=$(echo "${{ env.GITHUB_WF_SHA }}" | cut -c1-7) + echo "BRANCH_SHA=$SHORT_SHA" >> $GITHUB_OUTPUT + + - name: Check for changed source + id: check-new-commits + uses: adriangl/check-new-commits-action@v1 + with: + seconds: 86400 # One day in seconds + branch: '${{ steps.get-branch-name.outputs.branch_ref }}' + + - run: echo "You have ${{ steps.check-new-commits.outputs.new-commits-number }} new commit(s) in ${{ steps.get-branch-name.outputs.BRANCH_REF }} ✅!" + if: ${{ steps.check-new-commits.outputs.has-new-commits == 'true' }} + + - run: echo "Short commit sha is ${{ steps.get-branch-sha.outputs.BRANCH_SHA }}!" + + create_tarball: + name: Create a source tarball + runs-on: ubuntu-latest + needs: check_commits + if: ${{ needs.check_commits.outputs.has_changes == 'true' }} + outputs: + file_base: ${{ steps.set-file-base.outputs.FILE_BASE }} + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - name: Get Sources + uses: actions/checkout@v3 + with: + path: hdfsrc + + - name: Install Autotools Dependencies (Linux, serial) + run: | + sudo apt update + sudo apt install automake autoconf libtool libtool-bin gzip dos2unix + + - name: Set file base name + id: set-file-base + run: | + FILE_NAME_BASE=$(echo "hdf5-${{ needs.check_commits.outputs.branch_ref }}-${{ needs.check_commits.outputs.branch_sha }}") + echo "FILE_BASE=$FILE_NAME_BASE" >> $GITHUB_OUTPUT + + - name: Run release script + id: run-release-script + run: | + cd "$GITHUB_WORKSPACE/hdfsrc" + bin/bbrelease -d $GITHUB_WORKSPACE --branch ${{ needs.check_commits.outputs.branch_ref }} --revision gzip zip + shell: bash + + - name: List files in the repository + run: | + ls ${{ github.workspace }} + ls $GITHUB_WORKSPACE + + # Save files created by release script + - name: Save tgz-tarball + uses: actions/upload-artifact@v3 + with: + name: tgz-tarball + path: ${{ steps.set-file-base.outputs.FILE_BASE }}.tar.gz + if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` + + - name: Save zip-tarball + uses: actions/upload-artifact@v3 + with: + name: zip-tarball + path: ${{ steps.set-file-base.outputs.FILE_BASE }}.zip + if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` diff --git a/CMakePresets.json b/CMakePresets.json index d861b445cf5..66f31a4745d 100644 --- a/CMakePresets.json +++ b/CMakePresets.json @@ -104,6 +104,17 @@ { "name": "ci-StdShar-MSVC", "description": "MSVC Standard Config for x64 (Release)", + "inherits": [ + "ci-x64-Release-MSVC", + "ci-CPP", + "ci-Java", + "ci-StdShar", + "ci-StdExamples" + ] + }, + { + "name": "ci-StdShar-MSVC-Fortran", + "description": "MSVC Standard Config for x64 (Release)", "inherits": [ "ci-x64-Release-MSVC", "ci-CPP", @@ -171,7 +182,12 @@ "configurePreset": "ci-StdShar-MSVC", "inherits": [ "ci-x64-Release-MSVC" - ] + ], + "filter": { + "exclude": { + "name": "H5DUMP-tfloatsattrs" + } + } }, { "name": "ci-StdShar-Clang", From 97964c1e8aa18ba00e8e60ad776b8459c955c7b2 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Sun, 30 Apr 2023 10:26:26 -0700 Subject: [PATCH 180/231] Fix TRACE macros in selection I/O plist calls (#2857) --- src/H5Pdxpl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/H5Pdxpl.c b/src/H5Pdxpl.c index 6eef558de65..942d6f21dea 100644 --- a/src/H5Pdxpl.c +++ b/src/H5Pdxpl.c @@ -2562,7 +2562,7 @@ H5Pget_selection_io(hid_t plist_id, H5D_selection_io_mode_t *selection_io_mode / herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) - H5TRACE2("e", "i*DC", plist_id, selection_io_mode); + H5TRACE2("e", "ix", plist_id, selection_io_mode); /* Check arguments */ if (NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_XFER))) @@ -2727,7 +2727,7 @@ H5Pget_modify_write_buf(hid_t plist_id, hbool_t *modify_write_buf /*out*/) herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) - H5TRACE2("e", "i*b", plist_id, modify_write_buf); + H5TRACE2("e", "ix", plist_id, modify_write_buf); /* Check arguments */ if (NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_XFER))) From 27736c3031b44404a41d4d71ae3b64697a401a89 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Sun, 30 Apr 2023 12:10:45 -0700 Subject: [PATCH 181/231] Remove selection I/O test from testphdf5 in CMake (#2860) t_select_io_dset is a stand-alone program, not a part of testphdf5. --- testpar/CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/testpar/CMakeLists.txt b/testpar/CMakeLists.txt index fb66e76103e..3a44fca7c1b 100644 --- a/testpar/CMakeLists.txt +++ b/testpar/CMakeLists.txt @@ -8,7 +8,6 @@ project (HDF5_TEST_PAR C) set (testphdf5_SOURCES ${HDF5_TEST_PAR_SOURCE_DIR}/testphdf5.c ${HDF5_TEST_PAR_SOURCE_DIR}/t_dset.c - ${HDF5_TEST_PAR_SOURCE_DIR}/t_select_io_dset.c ${HDF5_TEST_PAR_SOURCE_DIR}/t_file.c ${HDF5_TEST_PAR_SOURCE_DIR}/t_file_image.c ${HDF5_TEST_PAR_SOURCE_DIR}/t_mdset.c From 8241362b6877a8a763825dc7c948d87e64d93ad2 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Sun, 30 Apr 2023 12:46:33 -0700 Subject: [PATCH 182/231] Fix memory leaks in H5Dwrite w/ selection I/O (#2859) --- src/H5Dio.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/H5Dio.c b/src/H5Dio.c index f6f743c62f9..1cd58353011 100644 --- a/src/H5Dio.c +++ b/src/H5Dio.c @@ -876,6 +876,13 @@ H5D__write(size_t count, H5D_dset_io_info_t *dset_info) /* Free global piece array */ H5MM_xfree(io_info.sel_pieces); + /* Free selection I/O arrays */ + H5MM_xfree(io_info.mem_spaces); + H5MM_xfree(io_info.file_spaces); + H5MM_xfree(io_info.addrs); + H5MM_xfree(io_info.element_sizes); + H5MM_xfree(io_info.wbufs); + /* Free store array if it was allocated */ if (store != &store_local) H5MM_free(store); From fda997f4c9fd2678e5d634e2e81b0370548ca06f Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Sun, 30 Apr 2023 14:56:11 -0500 Subject: [PATCH 183/231] Use powershell shell to copy files (#2858) --- .github/workflows/cmake-ctest.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/cmake-ctest.yml b/.github/workflows/cmake-ctest.yml index e3a52b8ff91..c21872d2ee6 100644 --- a/.github/workflows/cmake-ctest.yml +++ b/.github/workflows/cmake-ctest.yml @@ -64,13 +64,13 @@ jobs: run: | mkdir "${{ runner.workspace }}/build" mkdir "${{ runner.workspace }}/build/hdf5" - cp ${{ runner.workspace }}/hdf5/hdfsrc/COPYING ${{ runner.workspace }}/build/hdf5 - cp ${{ runner.workspace }}/hdf5/hdfsrc/COPYING_LBNL_HDF5 ${{ runner.workspace }}/build/hdf5 - cp ${{ runner.workspace }}/hdf5/hdfsrc/README.md ${{ runner.workspace }}/build/hdf5 - cp ${{ runner.workspace }}/hdf5/build/ci-StdShar-MSVC/*.zip ${{ runner.workspace }}/build/hdf5 + Copy-Item -Path ${{ runner.workspace }}/hdf5/hdfsrc/COPYING -Destination ${{ runner.workspace }}/build/hdf5/ + Copy-Item -Path ${{ runner.workspace }}/hdf5/hdfsrc/COPYING_LBNL_HDF5 -Destination ${{ runner.workspace }}/build/hdf5/ + Copy-Item -Path ${{ runner.workspace }}/hdf5/hdfsrc/README.md -Destination ${{ runner.workspace }}/build/hdf5/ + Copy-Item -Path ${{ runner.workspace }}/hdf5/build/ci-StdShar-MSVC/* -Destination ${{ runner.workspace }}/build/hdf5/ -Include *.zip cd "${{ runner.workspace }}/build" 7z a -tzip ${{ steps.set-file-base.outputs.FILE_BASE }}-win_vs2022.zip hdf5 - shell: bash + shell: pwsh - name: List files in the space (Windows) run: | From 849c70048b84ee63916223b4d0784968bca333e6 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Mon, 1 May 2023 09:06:39 -0700 Subject: [PATCH 184/231] Remove "the the" from comments and docs (#2865) --- doxygen/dox/LearnBasics2.dox | 2 +- doxygen/dox/LearnBasics3.dox | 2 +- doxygen/dox/ViewTools.dox | 2 +- doxygen/examples/H5.format.1.0.html | 2 +- doxygen/examples/H5.format.1.1.html | 2 +- doxygen/examples/ImageSpec.html | 12 ++++++------ doxygen/examples/tables/propertyLists.dox | 2 +- doxygen/hdf5_navtree_hacks.js | 2 +- fortran/src/H5Af.c | 6 +++--- fortran/src/H5Lf.c | 2 +- fortran/src/H5Sf.c | 4 ++-- fortran/src/H5Tf.c | 2 +- fortran/test/tH5A_1_8.F90 | 12 ++++++------ hl/test/test_ds.c | 2 +- hl/test/test_dset_append.c | 2 +- release_docs/HISTORY-1_0-1_8_0.txt | 2 +- release_docs/HISTORY-1_8_0-1_10_0.txt | 4 ++-- test/tselect.c | 6 +++--- tools/lib/h5tools_dump.c | 2 +- 19 files changed, 35 insertions(+), 35 deletions(-) diff --git a/doxygen/dox/LearnBasics2.dox b/doxygen/dox/LearnBasics2.dox index 6f94c7f7eb5..87bbe87fe47 100644 --- a/doxygen/dox/LearnBasics2.dox +++ b/doxygen/dox/LearnBasics2.dox @@ -788,7 +788,7 @@ The function #H5Tarray_create creates a new array datatype object. Parameters sp \li the dimension permutation of the array, i.e., whether the elements of the array are listed in C or FORTRAN order.

    Working with existing array datatypes

    -When working with existing arrays, one must first determine the the rank, or number of dimensions, of the array. +When working with existing arrays, one must first determine the rank, or number of dimensions, of the array. The function #H5Tget_array_dims returns the rank of a specified array datatype. diff --git a/doxygen/dox/LearnBasics3.dox b/doxygen/dox/LearnBasics3.dox index 2fe0f5249d9..06afacd7d27 100644 --- a/doxygen/dox/LearnBasics3.dox +++ b/doxygen/dox/LearnBasics3.dox @@ -210,7 +210,7 @@ For details on compiling an HDF5 application: an extendible array dataset, pass in #H5P_DATASET_CREATE for the property list class. \li The #H5Pset_chunk call modifies a Dataset Creation Property List instance to store a chunked layout dataset and sets the size of the chunks used. -\li To extend an unlimited dimension dataset use the the #H5Dset_extent call. Please be aware that +\li To extend an unlimited dimension dataset use the #H5Dset_extent call. Please be aware that after this call, the dataset's dataspace must be refreshed with #H5Dget_space before more data can be accessed. \li The #H5Pget_chunk call retrieves the size of chunks for the raw data of a chunked layout dataset. \li Once there is no longer a need for a Property List instance, it should be closed with the #H5Pclose call. diff --git a/doxygen/dox/ViewTools.dox b/doxygen/dox/ViewTools.dox index 2212d4ba3f2..66b2def0624 100644 --- a/doxygen/dox/ViewTools.dox +++ b/doxygen/dox/ViewTools.dox @@ -465,7 +465,7 @@ example h5_crtgrpar.c. To disp \endcode \subsubsection subsubsecViewToolsViewDset_h5dumpEx5 Example 5 -The -p option is used to examine the the dataset filters, storage layout, and fill value properties of a dataset. +The -p option is used to examine the dataset filters, storage layout, and fill value properties of a dataset. This option can be useful for checking how well compression works, or even for analyzing performance and dataset size issues related to chunking. (The smaller the chunk size, the more chunks that HDF5 diff --git a/doxygen/examples/H5.format.1.0.html b/doxygen/examples/H5.format.1.0.html index 4eb05480239..26d04213d84 100644 --- a/doxygen/examples/H5.format.1.0.html +++ b/doxygen/examples/H5.format.1.0.html @@ -1488,7 +1488,7 @@

    Disk Format: Level 1E - Global Heap

    - diff --git a/doxygen/examples/H5.format.1.1.html b/doxygen/examples/H5.format.1.1.html index 9d03a766a12..3af50d66194 100644 --- a/doxygen/examples/H5.format.1.1.html +++ b/doxygen/examples/H5.format.1.1.html @@ -6091,7 +6091,7 @@

    Name: Shared Object Message

    FlagPurpose
    !! \ingroup FH5A !! -!! \brief Creates a dataset as an attribute of a group, dataset, or named datatype +!! \brief Creates a dataset as an attribute of a group, dataset, or named datatype. !! !! \param loc_id Identifier of an object (group, dataset, or named datatype) attribute is attached to !! \param name Attribute name @@ -147,8 +161,8 @@ SUBROUTINE h5acreate_f(loc_id, name, type_id, space_id, attr_id, & INTEGER(HID_T), INTENT(OUT) :: attr_id INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: acpl_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: aapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: acpl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: aapl_id INTEGER(HID_T) :: acpl_id_default INTEGER(HID_T) :: aapl_id_default @@ -158,6 +172,7 @@ INTEGER(HID_T) FUNCTION H5Acreate2(loc_id, name, type_id, & space_id, acpl_id_default, aapl_id_default) BIND(C,NAME='H5Acreate2') IMPORT :: C_CHAR IMPORT :: HID_T + IMPLICIT NONE INTEGER(HID_T), INTENT(IN), VALUE :: loc_id CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: name INTEGER(HID_T), INTENT(IN), VALUE :: type_id @@ -167,12 +182,13 @@ INTEGER(HID_T) FUNCTION H5Acreate2(loc_id, name, type_id, & END FUNCTION H5Acreate2 END INTERFACE + c_name = TRIM(name)//C_NULL_CHAR + acpl_id_default = H5P_DEFAULT_F aapl_id_default = H5P_DEFAULT_F IF (PRESENT(acpl_id)) acpl_id_default = acpl_id IF (PRESENT(aapl_id)) aapl_id_default = aapl_id - c_name = TRIM(name)//C_NULL_CHAR attr_id = h5acreate2(loc_id, c_name, type_id, space_id, & acpl_id_default, aapl_id_default) @@ -181,13 +197,94 @@ END FUNCTION H5Acreate2 END SUBROUTINE h5acreate_f +!> +!! \ingroup FH5A +!! +!! \brief Asynchronously creates a dataset as an attribute of a group, dataset, or named datatype. +!! +!! \param loc_id Identifier of an object (group, dataset, or named datatype) attribute is attached to +!! \param name Attribute name +!! \param type_id Attribute datatype identifier +!! \param space_id Attribute dataspace identifier +!! \param attr_id Attribute identifier +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param acpl_id Attribute creation property list identifier +!! \param aapl_id Attribute access property list identifier +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Acreate_async() +!! + SUBROUTINE h5acreate_async_f(loc_id, name, type_id, space_id, attr_id, es_id, & + hdferr, acpl_id, aapl_id, file, func, line) + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: loc_id + CHARACTER(LEN=*), INTENT(IN) :: name + INTEGER(HID_T), INTENT(IN) :: type_id + INTEGER(HID_T), INTENT(IN) :: space_id + INTEGER(HID_T), INTENT(OUT) :: attr_id + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + INTEGER(HID_T) , INTENT(IN), OPTIONAL :: acpl_id + INTEGER(HID_T) , INTENT(IN), OPTIONAL :: aapl_id + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + INTEGER(HID_T) :: acpl_id_default + INTEGER(HID_T) :: aapl_id_default + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name + + INTERFACE + INTEGER(HID_T) FUNCTION H5Acreate_async(file, func, line, loc_id, name, type_id, & + space_id, acpl_id_default, aapl_id_default, es_id) BIND(C,NAME='H5Acreate_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + INTEGER(HID_T), VALUE :: type_id + INTEGER(HID_T), VALUE :: space_id + INTEGER(HID_T), VALUE :: acpl_id_default + INTEGER(HID_T), VALUE :: aapl_id_default + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Acreate_async + END INTERFACE + + acpl_id_default = H5P_DEFAULT_F + aapl_id_default = H5P_DEFAULT_F + IF (PRESENT(acpl_id)) acpl_id_default = acpl_id + IF (PRESENT(aapl_id)) aapl_id_default = aapl_id + IF (PRESENT(file)) file_default = file + IF (PRESENT(func)) func_default = func + IF (PRESENT(line)) line_default = INT(line, C_INT) + + c_name = TRIM(name)//C_NULL_CHAR + + attr_id = h5acreate_async(file_default, func_default, line_default, & + loc_id, c_name, type_id, space_id, & + acpl_id_default, aapl_id_default, es_id) + + hdferr = 0 + IF(attr_id.LT.0) hdferr = -1 + + END SUBROUTINE h5acreate_async_f + !> !! \ingroup FH5A !! !! \brief Opens an attribute specified by name. !! -!! \param obj_id Identifier of a group, dataset, or named -!! datatype attribute to be attached to +!! \param obj_id Identifier of a group, dataset, or named datatype attribute to be attached to !! \param name Attribute name !! \param attr_id Attribute identifier !! \param hdferr \fortran_error @@ -203,17 +300,6 @@ SUBROUTINE H5Aopen_name_f(obj_id, name, attr_id, hdferr) CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name -! H5Aopen_name is deprecated - INTERFACE - INTEGER(HID_T) FUNCTION H5Aopen(obj_id, name, aapl_id) BIND(C,NAME='H5Aopen') - IMPORT :: C_CHAR - IMPORT :: HID_T - INTEGER(HID_T), INTENT(IN), VALUE :: obj_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: name - INTEGER(HID_T), INTENT(IN), VALUE :: aapl_id - END FUNCTION H5Aopen - END INTERFACE - c_name = TRIM(name)//C_NULL_CHAR attr_id = H5Aopen(obj_id, c_name, H5P_DEFAULT_F) @@ -221,11 +307,15 @@ END FUNCTION H5Aopen IF(attr_id.LT.0) hdferr = -1 END SUBROUTINE H5Aopen_name_f + +#ifndef H5_NO_DEPRECATED_SYMBOLS !> !! \ingroup FH5A !! !! \brief Opens the attribute specified by its index. !! +!! \deprecation_note{H5Aopen_by_idx_f()} +!! !! \param obj_id Identifier of a group, dataset, or named datatype an attribute to be attached to !! \param index Index of the attribute to open (zero-based) !! \param attr_id Attribute identifier @@ -233,27 +323,30 @@ END SUBROUTINE H5Aopen_name_f !! !! See C API: @ref H5Aopen_idx() !! - SUBROUTINE H5Aopen_idx_f(obj_id, index, attr_id, hdferr) + SUBROUTINE h5aopen_idx_f(obj_id, index, attr_id, hdferr) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: obj_id INTEGER, INTENT(IN) :: index INTEGER(HID_T), INTENT(OUT) :: attr_id INTEGER, INTENT(OUT) :: hdferr INTERFACE - INTEGER(HID_T) FUNCTION H5Aopen_by_idx(obj_id, index) BIND(C,NAME='H5Aopen_by_idx') + INTEGER(HID_T) FUNCTION H5Aopen_idx(obj_id, index) BIND(C,NAME='H5Aopen_idx') IMPORT :: HID_T IMPORT :: C_INT - INTEGER(HID_T), INTENT(IN) :: obj_id - INTEGER(C_INT), INTENT(IN) :: index - END FUNCTION H5Aopen_by_idx + IMPLICIT NONE + INTEGER(HID_T), VALUE :: obj_id + INTEGER(C_INT), VALUE :: index + END FUNCTION H5Aopen_idx END INTERFACE - attr_id = H5Aopen_by_idx(obj_id, INT(index, C_INT)) + attr_id = H5Aopen_idx(obj_id, INT(index, C_INT)) hdferr = 0 IF(attr_id.LT.0) hdferr = -1 - END SUBROUTINE H5Aopen_idx_f + END SUBROUTINE h5aopen_idx_f +#endif + !> !! \ingroup FH5A !! @@ -265,7 +358,7 @@ END SUBROUTINE H5Aopen_idx_f !! !! See C API: @ref H5Aget_space() !! - SUBROUTINE H5Aget_space_f(attr_id, space_id, hdferr) + SUBROUTINE h5aget_space_f(attr_id, space_id, hdferr) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: attr_id INTEGER(HID_T), INTENT(OUT) :: space_id @@ -283,7 +376,7 @@ END FUNCTION H5Aget_space hdferr = 0 IF(space_id.LT.0) hdferr = -1 - END SUBROUTINE H5Aget_space_f + END SUBROUTINE h5aget_space_f !> !! \ingroup FH5A !! @@ -295,7 +388,7 @@ END SUBROUTINE H5Aget_space_f !! !! See C API: @ref H5Aget_type() !! - SUBROUTINE H5Aget_type_f(attr_id, type_id, hdferr) + SUBROUTINE h5aget_type_f(attr_id, type_id, hdferr) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: attr_id INTEGER(HID_T), INTENT(OUT) :: type_id @@ -313,7 +406,7 @@ END FUNCTION H5Aget_type hdferr = 0 IF(type_id.LT.0) hdferr = -1 - END SUBROUTINE H5Aget_type_f + END SUBROUTINE h5aget_type_f !> !! \ingroup FH5A !! @@ -337,6 +430,7 @@ INTEGER FUNCTION h5aget_name_c(attr_id, size, buf) & BIND(C,NAME='h5aget_name_c') IMPORT :: C_CHAR IMPORT :: HID_T, SIZE_T + IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: attr_id INTEGER(SIZE_T), INTENT(IN) :: size CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(OUT) :: buf @@ -379,16 +473,12 @@ SUBROUTINE h5aget_name_by_idx_f(loc_id, obj_name, idx_type, order, & INTEGER(HID_T), INTENT(IN) :: loc_id CHARACTER(LEN=*), INTENT(IN) :: obj_name INTEGER, INTENT(IN) :: idx_type - ! H5_INDEX_N_F - Number of indices defined - INTEGER, INTENT(IN) :: order - ! H5_ITER_NATIVE_F - No particular order, whatever is fastest - ! H5_ITER_N_F - Number of iteration orders INTEGER(HSIZE_T), INTENT(IN) :: n CHARACTER(LEN=*), INTENT(OUT) :: name INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lapl_id - INTEGER(SIZE_T), OPTIONAL, INTENT(OUT) :: size + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id + INTEGER(SIZE_T), INTENT(OUT), OPTIONAL :: size INTEGER(HID_T) :: lapl_id_default INTEGER(SIZE_T) :: obj_namelen INTEGER(SIZE_T) :: size_default @@ -444,6 +534,7 @@ SUBROUTINE h5aget_num_attrs_f(obj_id, attr_num, hdferr) INTERFACE INTEGER FUNCTION h5aget_num_attrs_c(obj_id, attr_num) BIND(C,name='h5aget_num_attrs_c') IMPORT :: HID_T + IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: obj_id INTEGER, INTENT(OUT) :: attr_num END FUNCTION h5aget_num_attrs_c @@ -464,7 +555,7 @@ END SUBROUTINE h5aget_num_attrs_f !! !! See C API: @ref H5Adelete() !! - SUBROUTINE H5Adelete_f(obj_id, name, hdferr) + SUBROUTINE h5adelete_f(obj_id, name, hdferr) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: obj_id CHARACTER(LEN=*), INTENT(IN) :: name @@ -475,6 +566,7 @@ SUBROUTINE H5Adelete_f(obj_id, name, hdferr) INTEGER FUNCTION H5Adelete_c(obj_id, name, namelen) BIND(C,NAME='h5adelete_c') IMPORT :: C_CHAR IMPORT :: HID_T, SIZE_T + IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: obj_id CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: name INTEGER(SIZE_T) :: namelen @@ -483,7 +575,7 @@ END FUNCTION H5Adelete_c namelen = LEN(name) hdferr = H5Adelete_c(obj_id, name, namelen) - END SUBROUTINE H5Adelete_f + END SUBROUTINE h5adelete_f !> !! \ingroup FH5A @@ -495,7 +587,7 @@ END SUBROUTINE H5Adelete_f !! !! See C API: @ref H5Aclose() !! - SUBROUTINE H5Aclose_f(attr_id, hdferr) + SUBROUTINE h5aclose_f(attr_id, hdferr) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: attr_id INTEGER, INTENT(OUT) :: hdferr @@ -503,12 +595,61 @@ SUBROUTINE H5Aclose_f(attr_id, hdferr) INTERFACE INTEGER FUNCTION H5Aclose(attr_id) BIND(C, NAME='H5Aclose') IMPORT :: HID_T + IMPLICIT NONE INTEGER(HID_T), INTENT(IN), VALUE :: attr_id END FUNCTION H5Aclose END INTERFACE hdferr = INT(H5Aclose(attr_id)) - END SUBROUTINE H5Aclose_f + END SUBROUTINE h5aclose_f + +!> +!! \ingroup FH5A +!! +!! \brief Asynchronously closes the specified attribute. +!! +!! \param attr_id Attribute identifier +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Aclose_async() +!! + SUBROUTINE h5aclose_async_f(attr_id, es_id, hdferr, file, func, line) + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: attr_id + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER FUNCTION H5Aclose_async(file, func, line, attr_id, es_id) BIND(C, NAME='H5Aclose_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: attr_id + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Aclose_async + END INTERFACE + + IF (PRESENT(file)) file_default = file + IF (PRESENT(func)) func_default = func + IF (PRESENT(line)) line_default = INT(line, C_INT) + + hdferr = H5Aclose_async(file_default, func_default, line_default, attr_id, es_id) + + END SUBROUTINE h5aclose_async_f !> !! \ingroup FH5A @@ -521,7 +662,7 @@ END SUBROUTINE H5Aclose_f !! !! See C API: @ref H5Aget_storage_size() !! - SUBROUTINE H5Aget_storage_size_f(attr_id, size, hdferr) + SUBROUTINE h5aget_storage_size_f(attr_id, size, hdferr) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: attr_id INTEGER(HSIZE_T), INTENT(OUT) :: size @@ -530,6 +671,7 @@ SUBROUTINE H5Aget_storage_size_f(attr_id, size, hdferr) INTERFACE INTEGER(HSIZE_T) FUNCTION H5Aget_storage_size(attr_id) BIND(C,NAME='H5Aget_storage_size') IMPORT :: HID_T, HSIZE_T + IMPLICIT NONE INTEGER(HID_T), INTENT(IN), VALUE :: attr_id END FUNCTION H5Aget_storage_size END INTERFACE @@ -539,7 +681,7 @@ END FUNCTION H5Aget_storage_size hdferr = 0 IF(size.LT.0) hdferr = -1 - END SUBROUTINE H5Aget_storage_size_f + END SUBROUTINE h5aget_storage_size_f !> !! \ingroup FH5A @@ -552,7 +694,7 @@ END SUBROUTINE H5Aget_storage_size_f !! !! See C API: @ref H5Aget_create_plist() !! - SUBROUTINE H5Aget_create_plist_f(attr_id, creation_prop_id, hdferr) + SUBROUTINE h5aget_create_plist_f(attr_id, creation_prop_id, hdferr) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: attr_id INTEGER(HID_T), INTENT(OUT) :: creation_prop_id @@ -560,6 +702,7 @@ SUBROUTINE H5Aget_create_plist_f(attr_id, creation_prop_id, hdferr) INTERFACE INTEGER(HID_T) FUNCTION H5Aget_create_plist(attr_id) BIND(C,NAME='H5Aget_create_plist') IMPORT :: HID_T + IMPLICIT NONE INTEGER(HID_T), INTENT(IN), VALUE :: attr_id END FUNCTION H5Aget_create_plist END INTERFACE @@ -569,68 +712,139 @@ END FUNCTION H5Aget_create_plist hdferr = 0 IF(creation_prop_id.LT.0) hdferr = -1 - END SUBROUTINE H5Aget_create_plist_f + END SUBROUTINE h5aget_create_plist_f !> !! \ingroup FH5A !! !! \brief Renames an attribute !! -!! \param loc_id Location or object identifier; may be dataset or group +!! \param loc_id Location or object identifier; may be dataset or group or named datatype !! \param obj_name Name of object, relative to location, whose attribute is to be renamed !! \param old_attr_name Prior attribute name !! \param new_attr_name New attribute name -!! \param lapl_id Link access property list identifier !! \param hdferr \fortran_error +!! \param lapl_id Link access property list identifier !! !! See C API: @ref H5Arename_by_name() !! - SUBROUTINE H5Arename_by_name_f(loc_id, obj_name, old_attr_name, new_attr_name, & + SUBROUTINE h5arename_by_name_f(loc_id, obj_name, old_attr_name, new_attr_name, & hdferr, lapl_id) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: loc_id CHARACTER(LEN=*), INTENT(IN) :: obj_name CHARACTER(LEN=*), INTENT(IN) :: old_attr_name CHARACTER(LEN=*), INTENT(IN) :: new_attr_name - INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id + INTEGER(HID_T) :: lapl_id_default - INTEGER(SIZE_T) :: obj_namelen - INTEGER(SIZE_T) :: old_attr_namelen - INTEGER(SIZE_T) :: new_attr_namelen + CHARACTER(LEN=LEN_TRIM(obj_name) +1,KIND=C_CHAR) :: c_obj_name + CHARACTER(LEN=LEN_TRIM(old_attr_name)+1,KIND=C_CHAR) :: c_old_attr_name + CHARACTER(LEN=LEN_TRIM(new_attr_name)+1,KIND=C_CHAR) :: c_new_attr_name INTERFACE - INTEGER FUNCTION H5Arename_by_name_c(loc_id, obj_name, obj_namelen, & - old_attr_name, old_attr_namelen, new_attr_name, new_attr_namelen, & - lapl_id_default) BIND(C,NAME='h5arename_by_name_c') + INTEGER FUNCTION H5Arename_by_name(loc_id, obj_name, & + old_attr_name, new_attr_name, lapl_id_default) & + BIND(C,NAME='H5Arename_by_name') IMPORT :: C_CHAR - IMPORT :: HID_T, SIZE_T + IMPORT :: HID_T IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: loc_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: obj_name - INTEGER(SIZE_T) :: obj_namelen - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: old_attr_name - INTEGER(SIZE_T) :: old_attr_namelen - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: new_attr_name - INTEGER(SIZE_T) :: new_attr_namelen - INTEGER(HID_T) :: lapl_id_default + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: obj_name + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: old_attr_name + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: new_attr_name + INTEGER(HID_T), VALUE :: lapl_id_default - END FUNCTION H5Arename_by_name_c + END FUNCTION H5Arename_by_name END INTERFACE - obj_namelen = LEN(obj_name) - old_attr_namelen = LEN(old_attr_name) - new_attr_namelen = LEN(new_attr_name) + c_obj_name = TRIM(obj_name)//C_NULL_CHAR + c_old_attr_name = TRIM(old_attr_name)//C_NULL_CHAR + c_new_attr_name = TRIM(new_attr_name)//C_NULL_CHAR + + lapl_id_default = H5P_DEFAULT_F + IF(PRESENT(lapl_id)) lapl_id_default=lapl_id + + hdferr = 0 + hdferr = H5Arename_by_name(loc_id, c_obj_name, c_old_attr_name, c_new_attr_name, lapl_id_default) + + END SUBROUTINE h5arename_by_name_f +!> +!! \ingroup FH5A +!! +!! \brief Asynchronously renames an attribute +!! +!! \param loc_id Location or object identifier; may be dataset or group or named datatype +!! \param obj_name Name of object, relative to location, whose attribute is to be renamed +!! \param old_attr_name Prior attribute name +!! \param new_attr_name New attribute name +!! \param hdferr \fortran_error +!! \param lapl_id Link access property list identifier +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Arename_by_name() +!! + SUBROUTINE h5arename_by_name_async_f(loc_id, obj_name, old_attr_name, new_attr_name, es_id, & + hdferr, lapl_id, file, func, line) + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: loc_id + CHARACTER(LEN=*), INTENT(IN) :: obj_name + CHARACTER(LEN=*), INTENT(IN) :: old_attr_name + CHARACTER(LEN=*), INTENT(IN) :: new_attr_name + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + INTEGER(HID_T) :: lapl_id_default + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + CHARACTER(LEN=LEN_TRIM(obj_name) +1,KIND=C_CHAR) :: c_obj_name + CHARACTER(LEN=LEN_TRIM(old_attr_name)+1,KIND=C_CHAR) :: c_old_attr_name + CHARACTER(LEN=LEN_TRIM(new_attr_name)+1,KIND=C_CHAR) :: c_new_attr_name + + INTERFACE + INTEGER FUNCTION H5Arename_by_name_async(file, func, line, loc_id, obj_name, & + old_attr_name, new_attr_name, lapl_id_default, es_id) & + BIND(C,NAME='H5Arename_by_name_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: obj_name + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: old_attr_name + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: new_attr_name + INTEGER(HID_T), VALUE :: lapl_id_default + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Arename_by_name_async + END INTERFACE + + c_obj_name = TRIM(obj_name)//C_NULL_CHAR + c_old_attr_name = TRIM(old_attr_name)//C_NULL_CHAR + c_new_attr_name = TRIM(new_attr_name)//C_NULL_CHAR lapl_id_default = H5P_DEFAULT_F IF(PRESENT(lapl_id)) lapl_id_default=lapl_id - hdferr = H5Arename_by_name_c(loc_id, obj_name, obj_namelen, & - old_attr_name, old_attr_namelen, new_attr_name, new_attr_namelen, & - lapl_id_default) + IF (PRESENT(file)) file_default = file + IF (PRESENT(func)) func_default = func + IF (PRESENT(line)) line_default = INT(line, C_INT) + + hdferr = 0 + hdferr = H5Arename_by_name_async(file_default, func_default, line_default, & + loc_id, c_obj_name, c_old_attr_name, c_new_attr_name, lapl_id_default, es_id) - END SUBROUTINE H5Arename_by_name_f + END SUBROUTINE h5arename_by_name_async_f !> !! \ingroup FH5A @@ -646,45 +860,102 @@ END SUBROUTINE H5Arename_by_name_f !! !! See C API: @ref H5Aopen() !! - SUBROUTINE H5Aopen_f(obj_id, attr_name, attr_id, hdferr, aapl_id) + SUBROUTINE h5aopen_f(obj_id, attr_name, attr_id, hdferr, aapl_id) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: obj_id CHARACTER(LEN=*), INTENT(IN) :: attr_name INTEGER(HID_T), INTENT(OUT) :: attr_id INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: aapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: aapl_id + INTEGER(HID_T) :: aapl_id_default + CHARACTER(LEN=LEN_TRIM(attr_name)+1,KIND=C_CHAR) :: c_attr_name - INTEGER(SIZE_T) :: attr_namelen + c_attr_name = TRIM(attr_name)//C_NULL_CHAR + + aapl_id_default = H5P_DEFAULT_F + IF(PRESENT(aapl_id)) aapl_id_default = aapl_id + + attr_id = INT(H5Aopen(obj_id, c_attr_name, aapl_id_default), HID_T) + + hdferr = 0 + IF(attr_id.LT.0) hdferr = -1 + + END SUBROUTINE h5aopen_f + +!> +!! \ingroup FH5A +!! +!! \brief Asynchronously opens an attribute for an object specified by object identifier and attribute name. +!! +!! \param obj_id Identifier for object to which attribute is attached +!! \param attr_name Name of attribute to open +!! \param attr_id Attribute identifier +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param aapl_id Attribute access property list +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Aopen_async() +!! + SUBROUTINE h5aopen_async_f(obj_id, attr_name, attr_id, es_id, hdferr, aapl_id, file, func, line) + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: obj_id + CHARACTER(LEN=*), INTENT(IN) :: attr_name + INTEGER(HID_T), INTENT(OUT) :: attr_id + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + INTEGER(HID_T), INTENT(IN), OPTIONAL :: aapl_id + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN) , OPTIONAL :: line + + INTEGER(HID_T) :: aapl_id_default + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + CHARACTER(LEN=LEN_TRIM(attr_name)+1,KIND=C_CHAR) :: c_attr_name INTERFACE - INTEGER FUNCTION H5Aopen_c(obj_id, attr_name, attr_namelen, aapl_id_default, attr_id) & - BIND(C,NAME='h5aopen_c') - IMPORT :: C_CHAR - IMPORT :: HID_T, SIZE_T - INTEGER(HID_T), INTENT(IN) :: obj_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: attr_name - INTEGER(HID_T) :: aapl_id_default - INTEGER(SIZE_T) :: attr_namelen - INTEGER(HID_T), INTENT(OUT) :: attr_id - END FUNCTION H5Aopen_c + INTEGER(HID_T) FUNCTION H5Aopen_async(file, func, line, & + obj_id, attr_name, aapl_id_default, es_id) BIND(C,NAME='H5Aopen_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: obj_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: attr_name + INTEGER(HID_T), VALUE :: aapl_id_default + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Aopen_async END INTERFACE - attr_namelen = LEN(attr_name) + c_attr_name = TRIM(attr_name)//C_NULL_CHAR aapl_id_default = H5P_DEFAULT_F IF(PRESENT(aapl_id)) aapl_id_default = aapl_id + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) - hdferr = H5Aopen_c(obj_id, attr_name, attr_namelen, aapl_id_default, attr_id) + attr_id = INT(H5Aopen_async(file_default, func_default, line_default, & + obj_id, c_attr_name, aapl_id_default, es_id), HID_T) + + hdferr = 0 + IF(attr_id.LT.0) hdferr = -1 - END SUBROUTINE H5Aopen_f + END SUBROUTINE h5aopen_async_f !> !! \ingroup FH5A !! !! \brief Deletes an attribute from an object according to index order !! -!! \param loc_id Location or object identifier; may be dataset or group +!! \param loc_id Location or object identifier; may be dataset or group or named datatype !! \param obj_name Name of object, relative to location, from which attribute is to be removed !! \param idx_type Type of index; Possible values are: !! \li H5_INDEX_UNKNOWN_F = -1 - Unknown index type @@ -705,7 +976,7 @@ END SUBROUTINE H5Aopen_f !! !! See C API: @ref H5Adelete_by_idx() !! - SUBROUTINE H5Adelete_by_idx_f(loc_id, obj_name, idx_type, order, n, hdferr, lapl_id) + SUBROUTINE h5adelete_by_idx_f(loc_id, obj_name, idx_type, order, n, hdferr, lapl_id) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: loc_id CHARACTER(LEN=*), INTENT(IN) :: obj_name @@ -713,7 +984,7 @@ SUBROUTINE H5Adelete_by_idx_f(loc_id, obj_name, idx_type, order, n, hdferr, lapl INTEGER, INTENT(IN) :: order INTEGER(HSIZE_T), INTENT(IN) :: n INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id INTEGER(SIZE_T) :: obj_namelen INTEGER(HID_T) :: lapl_id_default @@ -739,7 +1010,7 @@ END FUNCTION H5Adelete_by_idx_c obj_namelen = LEN(obj_name) hdferr = H5Adelete_by_idx_c(loc_id, obj_name, obj_namelen, idx_type, order, n, lapl_id_default) - END SUBROUTINE H5Adelete_by_idx_f + END SUBROUTINE h5adelete_by_idx_f !> !! \ingroup FH5A @@ -760,7 +1031,7 @@ SUBROUTINE H5Adelete_by_name_f(loc_id, obj_name, attr_name, hdferr, lapl_id) CHARACTER(LEN=*), INTENT(IN) :: obj_name CHARACTER(LEN=*), INTENT(IN) :: attr_name INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id INTEGER(SIZE_T) :: attr_namelen INTEGER(SIZE_T) :: obj_namelen @@ -771,6 +1042,7 @@ INTEGER FUNCTION H5Adelete_by_name_c(loc_id, obj_name, obj_namelen, attr_name, a BIND(C,NAME='h5adelete_by_name_c') IMPORT :: C_CHAR IMPORT :: HID_T, SIZE_T + IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: loc_id CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: obj_name CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: attr_name @@ -788,7 +1060,7 @@ END FUNCTION H5Adelete_by_name_c hdferr = H5Adelete_by_name_c(loc_id, obj_name, obj_namelen, attr_name, attr_namelen, lapl_id_default) - END SUBROUTINE H5Adelete_by_name_f + END SUBROUTINE h5adelete_by_name_f !> !! \ingroup FH5A @@ -817,7 +1089,7 @@ END SUBROUTINE H5Adelete_by_name_f !! !! See C API: @ref H5Aopen_by_idx() !! - SUBROUTINE H5Aopen_by_idx_f(loc_id, obj_name, idx_type, order, n, attr_id, hdferr, aapl_id, lapl_id) + SUBROUTINE h5aopen_by_idx_f(loc_id, obj_name, idx_type, order, n, attr_id, hdferr, aapl_id, lapl_id) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: loc_id CHARACTER(LEN=*), INTENT(IN) :: obj_name @@ -828,40 +1100,134 @@ SUBROUTINE H5Aopen_by_idx_f(loc_id, obj_name, idx_type, order, n, attr_id, hdfer INTEGER(HID_T), INTENT(OUT) :: attr_id INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: aapl_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lapl_id - INTEGER(SIZE_T) :: obj_namelen + INTEGER(HID_T), INTENT(IN), OPTIONAL :: aapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id INTEGER(HID_T) :: aapl_id_default INTEGER(HID_T) :: lapl_id_default + CHARACTER(LEN=LEN_TRIM(obj_name)+1,KIND=C_CHAR) :: c_obj_name INTERFACE - INTEGER FUNCTION H5Aopen_by_idx_c(loc_id, obj_name, obj_namelen, idx_type, order, n, & - aapl_id_default, lapl_id_default, attr_id) BIND(C,NAME='h5aopen_by_idx_c') - IMPORT :: C_CHAR - IMPORT :: HID_T, SIZE_T, HSIZE_T - INTEGER(HID_T), INTENT(IN) :: loc_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: obj_name - INTEGER, INTENT(IN) :: idx_type - INTEGER, INTENT(IN) :: order - INTEGER(HSIZE_T), INTENT(IN) :: n - INTEGER(HID_T) :: aapl_id_default - INTEGER(HID_T) :: lapl_id_default - INTEGER(SIZE_T) :: obj_namelen - INTEGER(HID_T), INTENT(OUT) :: attr_id - END FUNCTION H5Aopen_by_idx_c + INTEGER(HID_T) FUNCTION H5Aopen_by_idx(loc_id, obj_name, idx_type, order, n, & + aapl_id_default, lapl_id_default) BIND(C,NAME='H5Aopen_by_idx') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T, HSIZE_T + IMPLICIT NONE + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: obj_name + INTEGER(C_INT), VALUE :: idx_type + INTEGER(C_INT), VALUE :: order + INTEGER(HSIZE_T), VALUE :: n + INTEGER(HID_T), VALUE :: aapl_id_default + INTEGER(HID_T), VALUE :: lapl_id_default + END FUNCTION H5Aopen_by_idx END INTERFACE - obj_namelen = LEN(obj_name) + c_obj_name = TRIM(obj_name)//C_NULL_CHAR + + aapl_id_default = H5P_DEFAULT_F + IF(PRESENT(aapl_id)) aapl_id_default = aapl_id + lapl_id_default = H5P_DEFAULT_F + IF(PRESENT(lapl_id)) lapl_id_default = lapl_id + + attr_id = INT(H5Aopen_by_idx(loc_id, c_obj_name, INT(idx_type, C_INT), INT(order, C_INT), n, & + aapl_id_default, lapl_id_default), HID_T) + + END SUBROUTINE h5aopen_by_idx_f + +!> +!! \ingroup FH5A +!! +!! \brief Asynchronously opens an existing attribute that is attached to an object specified by location and name. +!! +!! \param loc_id Location of object to which attribute is attached. +!! \param obj_name Name of object to which attribute is attached, relative to location. +!! \param idx_type Type of index; Possible values are: +!! \li H5_INDEX_UNKNOWN_F = -1 - Unknown index type +!! \li H5_INDEX_NAME_F - Index on names +!! \li H5_INDEX_CRT_ORDER_F - Index on creation order +!! \li H5_INDEX_N_F - Number of indices defined +!! +!! \param order Order in which to iterate over index; Possible values are: +!! \li H5_ITER_UNKNOWN_F - Unknown order +!! \li H5_ITER_INC_F - Increasing order +!! \li H5_ITER_DEC_F - Decreasing order +!! \li H5_ITER_NATIVE_F - No particular order, whatever is fastest +!! \li H5_ITER_N_F - Number of iteration orders +!! \param n Attribute’s position in index. +!! \param attr_id Attribute identifier. +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param aapl_id Attribute access property list. +!! \param lapl_id Link access property list. +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Aopen_by_idx_async() +!! + SUBROUTINE h5aopen_by_idx_async_f(loc_id, obj_name, idx_type, order, n, attr_id, es_id, hdferr, & + aapl_id, lapl_id, file, func, line) + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: loc_id + CHARACTER(LEN=*), INTENT(IN) :: obj_name + INTEGER, INTENT(IN) :: idx_type + INTEGER, INTENT(IN) :: order + INTEGER(HSIZE_T), INTENT(IN) :: n + INTEGER(HID_T), INTENT(OUT) :: attr_id + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + INTEGER(HID_T), INTENT(IN), OPTIONAL :: aapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN) , OPTIONAL :: line + + INTEGER(HID_T) :: aapl_id_default + INTEGER(HID_T) :: lapl_id_default + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + CHARACTER(LEN=LEN_TRIM(obj_name)+1,KIND=C_CHAR) :: c_obj_name + + INTERFACE + INTEGER(HID_T) FUNCTION H5Aopen_by_idx_async(file, func, line, & + loc_id, obj_name, idx_type, order, n, & + aapl_id_default, lapl_id_default, es_id) BIND(C,NAME='H5Aopen_by_idx_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T, HSIZE_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: obj_name + INTEGER(C_INT), VALUE :: idx_type + INTEGER(C_INT), VALUE :: order + INTEGER(HSIZE_T), VALUE :: n + INTEGER(HID_T), VALUE :: aapl_id_default + INTEGER(HID_T), VALUE :: lapl_id_default + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Aopen_by_idx_async + END INTERFACE + + c_obj_name = TRIM(obj_name)//C_NULL_CHAR aapl_id_default = H5P_DEFAULT_F IF(PRESENT(aapl_id)) aapl_id_default = aapl_id lapl_id_default = H5P_DEFAULT_F IF(PRESENT(lapl_id)) lapl_id_default = lapl_id + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) - hdferr = H5Aopen_by_idx_c(loc_id, obj_name, obj_namelen, idx_type, order, n, & - aapl_id_default, lapl_id_default, attr_id) + attr_id = INT(H5Aopen_by_idx_async(file_default, func_default, line_default, & + loc_id, c_obj_name, INT(idx_type, C_INT), INT(order, C_INT), n, & + aapl_id_default, lapl_id_default, es_id), HID_T) + + hdferr = 0 + IF(attr_id.LT.0) hdferr = -1 - END SUBROUTINE H5Aopen_by_idx_f + END SUBROUTINE h5aopen_by_idx_async_f !> !! \ingroup FH5A @@ -878,7 +1244,7 @@ END SUBROUTINE H5Aopen_by_idx_f !! !! See C API: @ref H5Aget_info() !! - SUBROUTINE H5Aget_info_f(attr_id, f_corder_valid, corder, cset, data_size, hdferr) + SUBROUTINE h5aget_info_f(attr_id, f_corder_valid, corder, cset, data_size, hdferr) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: attr_id LOGICAL, INTENT(OUT) :: f_corder_valid @@ -907,12 +1273,12 @@ END FUNCTION H5Aget_info_c IF (corder_valid .EQ. 1) f_corder_valid =.TRUE. - END SUBROUTINE H5Aget_info_f + END SUBROUTINE h5aget_info_f !> !! \ingroup FH5A !! -!! \brief Retrieves attribute information, by attribute index position +!! \brief Retrieves attribute information by attribute index position !! !! \param loc_id Location of object to which attribute is attached !! \param obj_name Name of object to which attribute is attached, relative to location @@ -928,7 +1294,7 @@ END SUBROUTINE H5Aget_info_f !! !! See C API: @ref H5Aget_info_by_idx() !! - SUBROUTINE H5Aget_info_by_idx_f(loc_id, obj_name, idx_type, order, n, & + SUBROUTINE h5aget_info_by_idx_f(loc_id, obj_name, idx_type, order, n, & f_corder_valid, corder, cset, data_size, hdferr, lapl_id) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: loc_id @@ -946,7 +1312,7 @@ SUBROUTINE H5Aget_info_by_idx_f(loc_id, obj_name, idx_type, order, n, & INTEGER, INTENT(OUT) :: cset INTEGER(HSIZE_T), INTENT(OUT) :: data_size INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id INTEGER :: corder_valid INTEGER(SIZE_T) :: obj_namelen INTEGER(HID_T) :: lapl_id_default @@ -982,7 +1348,7 @@ END FUNCTION H5Aget_info_by_idx_c f_corder_valid =.FALSE. IF (corder_valid .EQ. 1) f_corder_valid =.TRUE. - END SUBROUTINE H5Aget_info_by_idx_f + END SUBROUTINE h5aget_info_by_idx_f !> !! \ingroup FH5A @@ -1001,7 +1367,7 @@ END SUBROUTINE H5Aget_info_by_idx_f !! !! See C API: @ref H5Aget_info_by_name() !! - SUBROUTINE H5Aget_info_by_name_f(loc_id, obj_name, attr_name, & + SUBROUTINE h5aget_info_by_name_f(loc_id, obj_name, attr_name, & f_corder_valid, corder, cset, data_size, hdferr, lapl_id) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: loc_id @@ -1014,7 +1380,7 @@ SUBROUTINE H5Aget_info_by_name_f(loc_id, obj_name, attr_name, & INTEGER, INTENT(OUT) :: cset INTEGER(HSIZE_T), INTENT(OUT) :: data_size INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id INTEGER :: corder_valid INTEGER(SIZE_T) :: obj_namelen INTEGER(SIZE_T) :: attr_namelen @@ -1052,7 +1418,7 @@ END FUNCTION H5Aget_info_by_name_c f_corder_valid =.FALSE. IF (corder_valid .EQ. 1) f_corder_valid =.TRUE. - END SUBROUTINE H5Aget_info_by_name_f + END SUBROUTINE h5aget_info_by_name_f !> !! \ingroup FH5A @@ -1072,7 +1438,7 @@ END SUBROUTINE H5Aget_info_by_name_f !! !! See C API: @ref H5Acreate_by_name() !! - SUBROUTINE H5Acreate_by_name_f(loc_id, obj_name, attr_name, type_id, space_id, attr, hdferr, & + SUBROUTINE h5acreate_by_name_f(loc_id, obj_name, attr_name, type_id, space_id, attr, hdferr, & acpl_id, aapl_id, lapl_id) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: loc_id @@ -1086,37 +1452,126 @@ SUBROUTINE H5Acreate_by_name_f(loc_id, obj_name, attr_name, type_id, space_id, a INTEGER(HID_T), INTENT(IN), OPTIONAL :: acpl_id INTEGER(HID_T), INTENT(IN), OPTIONAL :: aapl_id INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id - INTEGER(SIZE_T) :: obj_namelen - INTEGER(SIZE_T) :: attr_namelen INTEGER(HID_T) :: acpl_id_default INTEGER(HID_T) :: aapl_id_default INTEGER(HID_T) :: lapl_id_default + CHARACTER(LEN=LEN_TRIM(obj_name)+1,KIND=C_CHAR) :: c_obj_name + CHARACTER(LEN=LEN_TRIM(attr_name)+1,KIND=C_CHAR) :: c_attr_name INTERFACE - INTEGER FUNCTION H5Acreate_by_name_c(loc_id, obj_name, obj_namelen, attr_name, attr_namelen, & - type_id, space_id, acpl_id_default, aapl_id_default, lapl_id_default, attr) & - BIND(C,NAME='h5acreate_by_name_c') + INTEGER(HID_T) FUNCTION H5Acreate_by_name(loc_id, obj_name, attr_name, & + type_id, space_id, acpl_id_default, aapl_id_default, lapl_id_default) & + BIND(C,NAME='H5Acreate_by_name') IMPORT :: C_CHAR - IMPORT :: HID_T, SIZE_T + IMPORT :: HID_T IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: loc_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: obj_name - INTEGER(SIZE_T), INTENT(IN) :: obj_namelen - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: attr_name - INTEGER(SIZE_T), INTENT(IN) :: attr_namelen - INTEGER(HID_T), INTENT(IN) :: type_id - INTEGER(HID_T), INTENT(IN) :: space_id - INTEGER(HID_T) :: acpl_id_default - INTEGER(HID_T) :: aapl_id_default - INTEGER(HID_T) :: lapl_id_default - INTEGER(HID_T), INTENT(OUT) :: attr + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: obj_name + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: attr_name + INTEGER(HID_T), VALUE :: type_id + INTEGER(HID_T), VALUE :: space_id + INTEGER(HID_T), VALUE :: acpl_id_default + INTEGER(HID_T), VALUE :: aapl_id_default + INTEGER(HID_T), VALUE :: lapl_id_default + END FUNCTION H5Acreate_by_name + END INTERFACE + + c_obj_name = TRIM(obj_name)//C_NULL_CHAR + c_attr_name = TRIM(attr_name)//C_NULL_CHAR + + acpl_id_default = H5P_DEFAULT_F + aapl_id_default = H5P_DEFAULT_F + lapl_id_default = H5P_DEFAULT_F + + IF(PRESENT(acpl_id)) acpl_id_default = acpl_id + IF(PRESENT(aapl_id)) aapl_id_default = aapl_id + IF(PRESENT(lapl_id)) lapl_id_default = lapl_id + + attr = INT(H5Acreate_by_name(loc_id, c_obj_name, c_attr_name, type_id, space_id, & + acpl_id_default, aapl_id_default, lapl_id_default), HID_T) + + hdferr = 0 + IF(attr.LT.0) hdferr = -1 + + END SUBROUTINE h5acreate_by_name_f + +!> +!! \ingroup FH5A +!! +!! \brief Asynchronously creates an attribute attached to a specified object +!! +!! \param loc_id Location or object identifier; may be dataset or group +!! \param obj_name Name, relative to loc_id, of object that attribute is to be attached to +!! \param attr_name Attribute name +!! \param type_id Attribute datatype identifier +!! \param space_id Attribute dataspace identifier +!! \param attr An attribute identifier +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param acpl_id Attribute creation property list identifier (Currently not used.) +!! \param aapl_id Attribute access property list identifier (Currently not used.) +!! \param lapl_id Link access property list +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Acreate_by_name_async() +!! + SUBROUTINE h5acreate_by_name_async_f(loc_id, obj_name, attr_name, type_id, space_id, attr, es_id, hdferr, & + acpl_id, aapl_id, lapl_id, file, func, line) + + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: loc_id + CHARACTER(LEN=*), INTENT(IN) :: obj_name + CHARACTER(LEN=*), INTENT(IN) :: attr_name + INTEGER(HID_T), INTENT(IN) :: type_id + INTEGER(HID_T), INTENT(IN) :: space_id + INTEGER(HID_T), INTENT(OUT) :: attr + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + + INTEGER(HID_T), INTENT(IN), OPTIONAL :: acpl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: aapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line - END FUNCTION H5Acreate_by_name_c + INTEGER(HID_T) :: acpl_id_default + INTEGER(HID_T) :: aapl_id_default + INTEGER(HID_T) :: lapl_id_default + CHARACTER(LEN=LEN_TRIM(obj_name)+1,KIND=C_CHAR) :: c_obj_name + CHARACTER(LEN=LEN_TRIM(attr_name)+1,KIND=C_CHAR) :: c_attr_name + + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER(HID_T) FUNCTION H5Acreate_by_name_async(file, func, line, loc_id, obj_name, attr_name, & + type_id, space_id, acpl_id_default, aapl_id_default, lapl_id_default, es_id) & + BIND(C,NAME='H5Acreate_by_name_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: obj_name + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: attr_name + INTEGER(HID_T), VALUE :: type_id + INTEGER(HID_T), VALUE :: space_id + INTEGER(HID_T), VALUE :: acpl_id_default + INTEGER(HID_T), VALUE :: aapl_id_default + INTEGER(HID_T), VALUE :: lapl_id_default + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Acreate_by_name_async END INTERFACE - obj_namelen = LEN(obj_name) - attr_namelen = LEN(attr_name) + c_obj_name = TRIM(obj_name)//C_NULL_CHAR + c_attr_name = TRIM(attr_name)//C_NULL_CHAR acpl_id_default = H5P_DEFAULT_F aapl_id_default = H5P_DEFAULT_F @@ -1125,10 +1580,18 @@ END FUNCTION H5Acreate_by_name_c IF(PRESENT(acpl_id)) acpl_id_default = acpl_id IF(PRESENT(aapl_id)) aapl_id_default = aapl_id IF(PRESENT(lapl_id)) lapl_id_default = lapl_id + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + attr = H5Acreate_by_name_async(file_default, func_default, line_default, & + loc_id, c_obj_name, c_attr_name, & + type_id, space_id, acpl_id_default, aapl_id_default, lapl_id_default, es_id) + + hdferr = 0 + IF(attr.LT.0) hdferr = -1 - hdferr = H5Acreate_by_name_c(loc_id, obj_name, obj_namelen, attr_name, attr_namelen, & - type_id, space_id, acpl_id_default, aapl_id_default, lapl_id_default, attr) - END SUBROUTINE H5Acreate_by_name_f + END SUBROUTINE h5acreate_by_name_async_f !> !! \ingroup FH5A @@ -1142,35 +1605,94 @@ END SUBROUTINE H5Acreate_by_name_f !! !! See C API: @ref H5Aexists() !! - SUBROUTINE H5Aexists_f(obj_id, attr_name, attr_exists, hdferr) + SUBROUTINE h5aexists_f(obj_id, attr_name, attr_exists, hdferr) IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: obj_id + INTEGER(HID_T), INTENT(IN) :: obj_id CHARACTER(LEN=*), INTENT(IN) :: attr_name LOGICAL, INTENT(OUT) :: attr_exists INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T) :: attr_exists_c - INTEGER(SIZE_T) :: attr_namelen + + CHARACTER(LEN=LEN_TRIM(attr_name)+1,KIND=C_CHAR) :: c_attr_name + INTEGER(C_INT) :: attr_exists_c INTERFACE - INTEGER FUNCTION H5Aexists_c(obj_id, attr_name, attr_namelen, attr_exists_c) BIND(C,NAME='h5aexists_c') - IMPORT :: C_CHAR - IMPORT :: HID_T, SIZE_T + INTEGER(C_INT) FUNCTION H5Aexists(obj_id, attr_name) BIND(C,NAME='H5Aexists') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: obj_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: attr_name - INTEGER(SIZE_T) :: attr_namelen - INTEGER(HID_T) :: attr_exists_c - END FUNCTION H5Aexists_c + INTEGER(HID_T), VALUE :: obj_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: attr_name + END FUNCTION H5Aexists END INTERFACE - attr_namelen = LEN(attr_name) + c_attr_name = TRIM(attr_name)//C_NULL_CHAR - hdferr = H5Aexists_c(obj_id, attr_name, attr_namelen, attr_exists_c) + attr_exists_c = H5Aexists(obj_id, c_attr_name) attr_exists = .FALSE. IF(attr_exists_c.GT.0) attr_exists = .TRUE. - END SUBROUTINE H5Aexists_f + hdferr = 0 + IF(attr_exists_c.LT.0) hdferr = -1 + + END SUBROUTINE h5aexists_f + +!> +!! \ingroup FH5A +!! +!! \brief Asynchronously determines whether an attribute with a given name exists on an object +!! +!! \param obj_id Object identifier +!! \param attr_name Attribute name +!! \param attr_exists Pointer to attribute exists status, must be of type LOGICAL(C_BOOL) and initialize to .FALSE. +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Aexists_async() +!! + SUBROUTINE h5aexists_async_f(obj_id, attr_name, attr_exists, es_id, hdferr, file, func, line) + IMPLICIT NONE + INTEGER(HID_T) , INTENT(IN) :: obj_id + CHARACTER(LEN=*), INTENT(IN) :: attr_name + TYPE(C_PTR) , INTENT(INOUT) :: attr_exists + INTEGER(HID_T) , INTENT(IN) :: es_id + INTEGER , INTENT(OUT) :: hdferr + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + CHARACTER(LEN=LEN_TRIM(attr_name)+1,KIND=C_CHAR) :: c_attr_name + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER(C_INT) FUNCTION H5Aexists_async(file, func, line, & + obj_id, attr_name, exists, es_id) BIND(C,NAME='H5Aexists_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: obj_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: attr_name + TYPE(C_PTR) , VALUE :: exists + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Aexists_async + END INTERFACE + + c_attr_name = TRIM(attr_name)//C_NULL_CHAR + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + hdferr = INT(H5Aexists_async(file_default, func_default, line_default, obj_id, c_attr_name, attr_exists, es_id)) + + END SUBROUTINE h5aexists_async_f !> !! \ingroup FH5A @@ -1186,48 +1708,120 @@ END SUBROUTINE H5Aexists_f !! !! See C API: @ref H5Aexists_by_name() !! - SUBROUTINE H5Aexists_by_name_f(loc_id, obj_name, attr_name, attr_exists, hdferr, lapl_id) + SUBROUTINE h5aexists_by_name_f(loc_id, obj_name, attr_name, attr_exists, hdferr, lapl_id) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: loc_id CHARACTER(LEN=*), INTENT(IN) :: obj_name CHARACTER(LEN=*), INTENT(IN) :: attr_name LOGICAL, INTENT(OUT) :: attr_exists INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lapl_id - INTEGER :: attr_exists_c - INTEGER(SIZE_T) :: obj_namelen - INTEGER(SIZE_T) :: attr_namelen + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id + INTEGER(C_INT) :: attr_exists_c INTEGER(HID_T) :: lapl_id_default + CHARACTER(LEN=LEN_TRIM(obj_name)+1,KIND=C_CHAR) :: c_obj_name + CHARACTER(LEN=LEN_TRIM(attr_name)+1,KIND=C_CHAR) :: c_attr_name INTERFACE - INTEGER FUNCTION H5Aexists_by_name_c(loc_id, obj_name, obj_namelen, attr_name, attr_namelen, & - lapl_id_default, attr_exists_c) BIND(C,NAME='h5aexists_by_name_c') - IMPORT :: C_CHAR - IMPORT :: HID_T, SIZE_T + INTEGER(C_INT) FUNCTION H5Aexists_by_name(loc_id, obj_name, attr_name, lapl_id_default) & + BIND(C,NAME='H5Aexists_by_name') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: loc_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: obj_name - INTEGER(SIZE_T), INTENT(IN) :: obj_namelen - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: attr_name - INTEGER(SIZE_T), INTENT(IN) :: attr_namelen - INTEGER(HID_T), INTENT(IN) :: lapl_id_default - INTEGER, INTENT(OUT) :: attr_exists_c - END FUNCTION H5Aexists_by_name_c + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: obj_name + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: attr_name + INTEGER(HID_T), VALUE :: lapl_id_default + END FUNCTION H5Aexists_by_name END INTERFACE - attr_namelen = LEN(attr_name) - obj_namelen = LEN(obj_name) + c_obj_name = TRIM(obj_name)//C_NULL_CHAR + c_attr_name = TRIM(attr_name)//C_NULL_CHAR lapl_id_default = H5P_DEFAULT_F IF(PRESENT(lapl_id)) lapl_id_default = lapl_id - hdferr = H5Aexists_by_name_c(loc_id, obj_name, obj_namelen, attr_name, attr_namelen, lapl_id_default, attr_exists_c) + attr_exists_c = H5Aexists_by_name(loc_id, c_obj_name, c_attr_name, lapl_id_default) attr_exists = .FALSE. IF(attr_exists_c.GT.0) attr_exists = .TRUE. - END SUBROUTINE H5Aexists_by_name_f + hdferr = 0 + IF(attr_exists_c.LT.0) hdferr = -1 + + END SUBROUTINE h5aexists_by_name_f + +!> +!! \ingroup FH5A +!! +!! \brief Asynchronously determines whether an attribute with a given name exists on an object +!! +!! \param loc_id Location identifier +!! \param obj_name Object name either relative to loc_id, absolute from the file’s root group, or '. '(a dot) +!! \param attr_name Attribute name +!! \param attr_exists Pointer to attribute exists status, must be of type LOGICAL(C_BOOL) and initialize to .FALSE. +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param lapl_id Link access property list identifier +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Aexists_by_name_async() +!! + SUBROUTINE h5aexists_by_name_async_f(loc_id, obj_name, attr_name, attr_exists, es_id, hdferr, lapl_id, file, func, line) + IMPLICIT NONE + INTEGER (HID_T), INTENT(IN) :: loc_id + CHARACTER(LEN=*), INTENT(IN) :: obj_name + CHARACTER(LEN=*), INTENT(IN) :: attr_name + TYPE(C_PTR) , INTENT(INOUT) :: attr_exists + INTEGER (HID_T), INTENT(IN) :: es_id + INTEGER , INTENT(OUT) :: hdferr + INTEGER (HID_T), INTENT(IN) , OPTIONAL :: lapl_id + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN) , OPTIONAL :: line + + INTEGER(HID_T) :: lapl_id_default + CHARACTER(LEN=LEN_TRIM(obj_name)+1,KIND=C_CHAR) :: c_obj_name + CHARACTER(LEN=LEN_TRIM(attr_name)+1,KIND=C_CHAR) :: c_attr_name + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER(C_INT) FUNCTION H5Aexists_by_name_async(file, func, line, & + loc_id, obj_name, attr_name, exists, lapl_id_default, es_id) & + BIND(C,NAME='H5Aexists_by_name_async') + IMPORT :: C_CHAR, C_PTR, C_INT + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: obj_name + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: attr_name + TYPE(C_PTR) , VALUE :: exists + INTEGER(HID_T), VALUE :: lapl_id_default + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Aexists_by_name_async + END INTERFACE + + c_obj_name = TRIM(obj_name)//C_NULL_CHAR + c_attr_name = TRIM(attr_name)//C_NULL_CHAR + + lapl_id_default = H5P_DEFAULT_F + IF(PRESENT(lapl_id)) lapl_id_default = lapl_id + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + hdferr = INT(H5Aexists_by_name_async(file_default, func_default, line_default, & + loc_id, c_obj_name, c_attr_name, attr_exists, lapl_id_default, es_id)) + + END SUBROUTINE h5aexists_by_name_async_f + !> !! \ingroup FH5A !! @@ -1243,50 +1837,129 @@ END SUBROUTINE H5Aexists_by_name_f !! !! See C API: @ref H5Aopen_by_name() !! - SUBROUTINE H5Aopen_by_name_f(loc_id, obj_name, attr_name, attr_id, hdferr, aapl_id, lapl_id) + SUBROUTINE h5aopen_by_name_f(loc_id, obj_name, attr_name, attr_id, hdferr, aapl_id, lapl_id) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: loc_id CHARACTER(LEN=*), INTENT(IN) :: obj_name CHARACTER(LEN=*), INTENT(IN) :: attr_name INTEGER(HID_T), INTENT(OUT) :: attr_id INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: aapl_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: aapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id + INTEGER(HID_T) :: aapl_id_default INTEGER(HID_T) :: lapl_id_default - - INTEGER(SIZE_T) :: obj_namelen - INTEGER(SIZE_T) :: attr_namelen + CHARACTER(LEN=LEN_TRIM(obj_name)+1,KIND=C_CHAR) :: c_obj_name + CHARACTER(LEN=LEN_TRIM(attr_name)+1,KIND=C_CHAR) :: c_attr_name INTERFACE - INTEGER FUNCTION H5Aopen_by_name_c(loc_id, obj_name, obj_namelen, attr_name, attr_namelen, & - aapl_id_default, lapl_id_default, attr_id) BIND(C,NAME='h5aopen_by_name_c') + INTEGER(HID_T) FUNCTION H5Aopen_by_name(loc_id, obj_name, attr_name, aapl_id_default, lapl_id_default) & + BIND(C,NAME='H5Aopen_by_name') IMPORT :: C_CHAR - IMPORT :: HID_T, SIZE_T + IMPORT :: HID_T IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: loc_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: obj_name - INTEGER(SIZE_T), INTENT(IN) :: obj_namelen - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: attr_name - INTEGER(SIZE_T), INTENT(IN) :: attr_namelen - INTEGER(HID_T) :: aapl_id_default - INTEGER(HID_T) :: lapl_id_default - INTEGER(HID_T), INTENT(OUT) :: attr_id - END FUNCTION H5Aopen_by_name_c + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: obj_name + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: attr_name + INTEGER(HID_T), VALUE :: aapl_id_default + INTEGER(HID_T), VALUE :: lapl_id_default + END FUNCTION H5Aopen_by_name END INTERFACE - attr_namelen = LEN(attr_name) - obj_namelen = LEN(obj_name) + c_obj_name = TRIM(obj_name)//C_NULL_CHAR + c_attr_name = TRIM(attr_name)//C_NULL_CHAR aapl_id_default = H5P_DEFAULT_F lapl_id_default = H5P_DEFAULT_F IF(PRESENT(aapl_id)) aapl_id_default = aapl_id IF(PRESENT(lapl_id)) lapl_id_default = lapl_id - hdferr = H5Aopen_by_name_c(loc_id, obj_name, obj_namelen, attr_name, attr_namelen, & - aapl_id_default, lapl_id_default, attr_id) + attr_id = INT(H5Aopen_by_name(loc_id, c_obj_name, c_attr_name, aapl_id_default, lapl_id_default), HID_T) - END SUBROUTINE H5Aopen_by_name_f + hdferr = 0 + IF(attr_id.LT.0) hdferr = -1 + + END SUBROUTINE h5aopen_by_name_f + +!> +!! \ingroup FH5A +!! +!! \brief Asynchronously opens an attribute for an object by object name and attribute name. +!! +!! \param loc_id Location from which to find object to which attribute is attached +!! \param obj_name Object name either relative to loc_id, absolute from the file’s root group, or '.' (a dot) +!! \param attr_name Attribute name +!! \param attr_id Attribute identifier +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param aapl_id Attribute access property list (Currently unused; should be passed in as H5P_DEFAULT.) +!! \param lapl_id Link access property list identifier +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Aopen_by_name_async() +!! + SUBROUTINE h5aopen_by_name_async_f(loc_id, obj_name, attr_name, attr_id, es_id, hdferr, & + aapl_id, lapl_id, file, func, line) + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: loc_id + CHARACTER(LEN=*), INTENT(IN) :: obj_name + CHARACTER(LEN=*), INTENT(IN) :: attr_name + INTEGER(HID_T), INTENT(OUT) :: attr_id + INTEGER(HID_T), INTENT(IN) :: es_id + + INTEGER, INTENT(OUT) :: hdferr + INTEGER(HID_T), INTENT(IN), OPTIONAL :: aapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + INTEGER(HID_T) :: aapl_id_default + INTEGER(HID_T) :: lapl_id_default + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + CHARACTER(LEN=LEN_TRIM(obj_name)+1,KIND=C_CHAR) :: c_obj_name + CHARACTER(LEN=LEN_TRIM(attr_name)+1,KIND=C_CHAR) :: c_attr_name + + INTERFACE + INTEGER(HID_T) FUNCTION H5Aopen_by_name_async(file, func, line, loc_id, obj_name, attr_name, & + aapl_id_default, lapl_id_default, es_id) BIND(C,NAME='H5Aopen_by_name_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: obj_name + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: attr_name + INTEGER(HID_T), VALUE :: aapl_id_default + INTEGER(HID_T), VALUE :: lapl_id_default + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Aopen_by_name_async + END INTERFACE + + c_obj_name = TRIM(obj_name)//C_NULL_CHAR + c_attr_name = TRIM(attr_name)//C_NULL_CHAR + + aapl_id_default = H5P_DEFAULT_F + lapl_id_default = H5P_DEFAULT_F + IF(PRESENT(aapl_id)) aapl_id_default = aapl_id + IF(PRESENT(lapl_id)) lapl_id_default = lapl_id + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + attr_id = INT(H5Aopen_by_name_async(file_default, func_default, line_default, & + loc_id, c_obj_name, c_attr_name, aapl_id_default, lapl_id_default, es_id), HID_T) + + hdferr = 0 + IF(attr_id.LT.0) hdferr = -1 + + END SUBROUTINE h5aopen_by_name_async_f !> !! \ingroup FH5A @@ -1300,36 +1973,209 @@ END SUBROUTINE H5Aopen_by_name_f !! !! See C API: @ref H5Arename() !! - SUBROUTINE H5Arename_f(loc_id, old_attr_name, new_attr_name, hdferr) + SUBROUTINE h5arename_f(loc_id, old_attr_name, new_attr_name, hdferr) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: loc_id CHARACTER(LEN=*), INTENT(IN) :: old_attr_name CHARACTER(LEN=*), INTENT(IN) :: new_attr_name INTEGER, INTENT(OUT) :: hdferr - INTEGER(SIZE_T) :: old_attr_namelen - INTEGER(SIZE_T) :: new_attr_namelen + + CHARACTER(LEN=LEN_TRIM(old_attr_name)+1,KIND=C_CHAR) :: c_old_attr_name + CHARACTER(LEN=LEN_TRIM(new_attr_name)+1,KIND=C_CHAR) :: c_new_attr_name INTERFACE - INTEGER FUNCTION H5Arename_c(loc_id, & - old_attr_name, old_attr_namelen, new_attr_name, new_attr_namelen) BIND(C,NAME='h5arename_c') + INTEGER FUNCTION H5Arename(loc_id, old_attr_name, new_attr_name) & + BIND(C,NAME='H5Arename') IMPORT :: C_CHAR - IMPORT :: HID_T, SIZE_T + IMPORT :: HID_T IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: loc_id + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: old_attr_name + CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: new_attr_name + END FUNCTION H5Arename + END INTERFACE + + c_old_attr_name = TRIM(old_attr_name)//C_NULL_CHAR + c_new_attr_name = TRIM(new_attr_name)//C_NULL_CHAR + + hdferr = H5Arename(loc_id, c_old_attr_name, c_new_attr_name) + + END SUBROUTINE h5arename_f + +!> +!! \ingroup FH5A +!! +!! \brief Asynchronously renames an attribute +!! +!! \param loc_id Location or object identifier; may be dataset or group +!! \param old_attr_name Prior attribute name +!! \param new_attr_name New attribute name +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Arename_async() +!! + SUBROUTINE h5arename_async_f(loc_id, old_attr_name, new_attr_name, es_id, hdferr, & + file, func, line) + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: loc_id + CHARACTER(LEN=*), INTENT(IN) :: old_attr_name + CHARACTER(LEN=*), INTENT(IN) :: new_attr_name + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + CHARACTER(LEN=LEN_TRIM(old_attr_name)+1,KIND=C_CHAR) :: c_old_attr_name + CHARACTER(LEN=LEN_TRIM(new_attr_name)+1,KIND=C_CHAR) :: c_new_attr_name + + INTERFACE + INTEGER FUNCTION H5Arename_async(file, func, line, loc_id, old_attr_name, new_attr_name, es_id) & + BIND(C,NAME='H5Arename_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: loc_id CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: old_attr_name - INTEGER(SIZE_T) :: old_attr_namelen CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: new_attr_name - INTEGER(SIZE_T) :: new_attr_namelen - END FUNCTION H5Arename_c + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Arename_async + END INTERFACE + + c_old_attr_name = TRIM(old_attr_name)//C_NULL_CHAR + c_new_attr_name = TRIM(new_attr_name)//C_NULL_CHAR + + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + hdferr = H5Arename_async(file_default, func_default, line_default, & + loc_id, c_old_attr_name, c_new_attr_name, es_id) + + END SUBROUTINE h5arename_async_f + +!> +!! \ingroup FH5A +!! +!! \brief Asynchronously reads an attribute. +!! +!! \param attr_id Identifier of an attribute to read. +!! \param memtype_id Identifier of the attribute datatype (in memory). +!! \param buf Buffer for data to be read. +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Aread_async() +!! + + SUBROUTINE h5aread_async_f(attr_id, mem_type_id, buf, es_id, hdferr, file, func, line) + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: attr_id + INTEGER(HID_T), INTENT(IN) :: mem_type_id + TYPE(C_PTR) , INTENT(OUT) :: buf + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER , INTENT(OUT) :: hdferr + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER FUNCTION H5Aread_async(file, func, line, attr_id, mem_type_id, buf, es_id) & + BIND(C,NAME='H5Aread_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: attr_id + INTEGER(HID_T), VALUE :: mem_type_id + TYPE(C_PTR) , VALUE :: buf + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Aread_async + END INTERFACE + + IF (PRESENT(file)) file_default = file + IF (PRESENT(func)) func_default = func + IF (PRESENT(line)) line_default = INT(line, C_INT) + + hdferr = H5Aread_async(file_default, func_default, line_default, attr_id, mem_type_id, buf, es_id) + + END SUBROUTINE h5aread_async_f + +!> +!! \ingroup FH5A +!! +!! \brief Asynchronously writes an attribute. +!! +!! \param attr_id Identifier of an attribute to read. +!! \param memtype_id Identifier of the attribute datatype (in memory). +!! \param buf Data to be written. +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Awrite_async() +!! + + SUBROUTINE h5awrite_async_f(attr_id, mem_type_id, buf, es_id, hdferr, file, func, line) + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: attr_id + INTEGER(HID_T), INTENT(IN) :: mem_type_id + TYPE(C_PTR) , INTENT(IN) :: buf + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER , INTENT(OUT) :: hdferr + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER FUNCTION H5Awrite_async(file, func, line, attr_id, mem_type_id, buf, es_id) & + BIND(C,NAME='H5Awrite_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: attr_id + INTEGER(HID_T), VALUE :: mem_type_id + TYPE(C_PTR) , VALUE :: buf + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Awrite_async END INTERFACE - old_attr_namelen = LEN(old_attr_name) - new_attr_namelen = LEN(new_attr_name) + IF (PRESENT(file)) file_default = file + IF (PRESENT(func)) func_default = func + IF (PRESENT(line)) line_default = INT(line, C_INT) - hdferr = H5Arename_c(loc_id, & - old_attr_name, old_attr_namelen, new_attr_name, new_attr_namelen) + hdferr = H5Awrite_async(file_default, func_default, line_default, attr_id, mem_type_id, buf, es_id) - END SUBROUTINE H5Arename_f + END SUBROUTINE h5awrite_async_f #ifdef H5_DOXYGEN @@ -1425,7 +2271,7 @@ END SUBROUTINE h5aread_f #else - SUBROUTINE H5Awrite_char_scalar(attr_id, memtype_id, buf, dims, hdferr) + SUBROUTINE h5awrite_char_scalar(attr_id, memtype_id, buf, dims, hdferr) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: attr_id INTEGER(HID_T), INTENT(IN) :: memtype_id @@ -1435,9 +2281,9 @@ SUBROUTINE H5Awrite_char_scalar(attr_id, memtype_id, buf, dims, hdferr) CALL H5Awrite_char_scalar_fix(attr_id, memtype_id, buf, LEN(buf), dims, hdferr) - END SUBROUTINE H5Awrite_char_scalar + END SUBROUTINE h5awrite_char_scalar - SUBROUTINE H5Awrite_char_scalar_fix(attr_id, memtype_id, buf, buf_len, dims, hdferr) + SUBROUTINE h5awrite_char_scalar_fix(attr_id, memtype_id, buf, buf_len, dims, hdferr) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: attr_id INTEGER(HID_T), INTENT(IN) :: memtype_id @@ -1451,9 +2297,9 @@ SUBROUTINE H5Awrite_char_scalar_fix(attr_id, memtype_id, buf, buf_len, dims, hdf hdferr = H5Awrite_f_c(attr_id, memtype_id, f_ptr) - END SUBROUTINE H5Awrite_char_scalar_fix + END SUBROUTINE h5awrite_char_scalar_fix - SUBROUTINE H5Awrite_ptr(attr_id, mem_type_id, buf, hdferr) + SUBROUTINE h5awrite_ptr(attr_id, mem_type_id, buf, hdferr) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: attr_id INTEGER(HID_T), INTENT(IN) :: mem_type_id @@ -1462,9 +2308,9 @@ SUBROUTINE H5Awrite_ptr(attr_id, mem_type_id, buf, hdferr) hdferr = H5Awrite_f_c(attr_id, mem_type_id, buf) - END SUBROUTINE H5Awrite_ptr + END SUBROUTINE h5awrite_ptr - SUBROUTINE H5Aread_char_scalar(attr_id, memtype_id, buf, dims, hdferr) + SUBROUTINE h5aread_char_scalar(attr_id, memtype_id, buf, dims, hdferr) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: attr_id INTEGER(HID_T), INTENT(IN) :: memtype_id @@ -1489,18 +2335,18 @@ SUBROUTINE H5Aread_char_scalar_fix(attr_id, memtype_id, buf, buf_len, hdferr) hdferr = H5Aread_f_c(attr_id, memtype_id, f_ptr) - END SUBROUTINE H5Aread_char_scalar_fix + END SUBROUTINE h5aread_char_scalar_fix - SUBROUTINE H5Aread_ptr(attr_id, mem_type_id, buf, hdferr) + SUBROUTINE h5aread_ptr(attr_id, mem_type_id, buf, hdferr) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: attr_id INTEGER(HID_T), INTENT(IN) :: mem_type_id - TYPE(C_PTR), INTENT(INOUT), TARGET :: buf + TYPE(C_PTR), INTENT(INOUT) :: buf INTEGER, INTENT(OUT) :: hdferr hdferr = H5Aread_f_c(attr_id, mem_type_id, buf) - END SUBROUTINE H5Aread_ptr + END SUBROUTINE h5aread_ptr #endif diff --git a/fortran/src/H5Df.c b/fortran/src/H5Df.c index e92e6a80a32..3df2fbf5fc3 100644 --- a/fortran/src/H5Df.c +++ b/fortran/src/H5Df.c @@ -20,111 +20,6 @@ #include "H5f90.h" -/****if* H5Df/h5dcreate_c - * NAME - * h5dcreate_c - * PURPOSE - * Call H5Dcreate2 to create a dataset - * INPUTS - * loc_id - file or group identifier - * name - name of the dataset - * namelen - name length - * type_id - datatype identifier - * space_id - dataspace identifier - * crt_pr - identifier of creation property list - * OUTPUTS - * dset_id - dataset identifier - * RETURNS - * 0 on success, -1 on failure - * AUTHOR - * Elena Pourmal - * Wednesday, August 4, 1999 - * HISTORY - * - Added optional parameters introduced in version 1.8 - * February, 2008 - * SOURCE - */ -int_f -h5dcreate_c(hid_t_f *loc_id, _fcd name, int_f *namelen, hid_t_f *type_id, hid_t_f *space_id, hid_t_f *lcpl_id, - hid_t_f *dcpl_id, hid_t_f *dapl_id, hid_t_f *dset_id) -/******/ -{ - char *c_name = NULL; - hid_t c_dset_id; - int ret_value = -1; - - /* - * Convert FORTRAN name to C name - */ - if (NULL == (c_name = (char *)HD5f2cstring(name, (size_t)*namelen))) - goto DONE; - - /* - * Call H5Dcreate2 function. - */ - if ((c_dset_id = H5Dcreate2((hid_t)*loc_id, c_name, (hid_t)*type_id, (hid_t)*space_id, (hid_t)*lcpl_id, - (hid_t)*dcpl_id, (hid_t)*dapl_id)) < 0) - goto DONE; - *dset_id = (hid_t_f)c_dset_id; - - ret_value = 0; - -DONE: - if (c_name) - HDfree(c_name); - return ret_value; -} - -/****if* H5Df/h5dopen_c - * NAME - * h5dopen_c - * PURPOSE - * Call H5Dopen2 to open a dataset - * INPUTS - * loc_id - file or group identifier - * name - name of the dataset - * namelen - name length - * dapl_id - Dataset access property list - * OUTPUTS - * dset_id - dataset identifier - * RETURNS - * 0 on success, -1 on failure - * AUTHOR - * Elena Pourmal - * Wednesday, August 4, 1999 - * HISTORY - * Added 1.8 parameter: dapl_id - * SOURCE - */ -int_f -h5dopen_c(hid_t_f *loc_id, _fcd name, int_f *namelen, hid_t_f *dapl_id, hid_t_f *dset_id) -/******/ -{ - char *c_name = NULL; - hid_t c_dset_id; - int ret_value = -1; - - /* - * Convert FORTRAN name to C name - */ - if (NULL == (c_name = (char *)HD5f2cstring(name, (size_t)*namelen))) - goto DONE; - - /* - * Call H5Dopen2 function. - */ - if ((c_dset_id = H5Dopen2((hid_t)*loc_id, c_name, (hid_t)*dapl_id)) < 0) - goto DONE; - - *dset_id = (hid_t_f)c_dset_id; - ret_value = 0; - -DONE: - if (c_name) - HDfree(c_name); - return ret_value; -} - /****if* H5Df/h5dwrite_ref_reg_c * NAME * h5dwrite_ref_reg_c @@ -268,71 +163,6 @@ h5dread_ref_reg_c(hid_t_f *dset_id, hid_t_f *mem_type_id, hid_t_f *mem_space_id, return ret_value; } -/****if* H5Df/h5dclose_c - * NAME - * h5dclose_c - * PURPOSE - * Call H5Dclose to close a dataset - * INPUTS - * dset_id - identifier of the dataset to be closed - * RETURNS - * 0 on success, -1 on failure - * AUTHOR - * Elena Pourmal - * Wednesday, August 4, 1999 - * HISTORY - * - * SOURCE - */ - -int_f -h5dclose_c(hid_t_f *dset_id) -/******/ -{ - int ret_value = 0; - hid_t c_dset_id; - c_dset_id = (hid_t)*dset_id; - if (H5Dclose(c_dset_id) < 0) - ret_value = -1; - return ret_value; -} - -/****if* H5Df/h5dget_space_c - * NAME - * h5dget_space_c - * PURPOSE - * Call H5Dget_space to obtain dataspace of a dataset - * INPUTS - * dset_id - identifier of the dataset - * OUTPUTS - * space_id - identifier of the dataset's dataspace - * RETURNS - * 0 on success, -1 on failure - * AUTHOR - * Elena Pourmal - * Thursday, August 19, 1999 - * HISTORY - * - * SOURCE - */ - -int_f -h5dget_space_c(hid_t_f *dset_id, hid_t_f *space_id) -/******/ -{ - int ret_value = -1; - hid_t c_dset_id; - hid_t c_space_id; - - c_dset_id = (hid_t)*dset_id; - c_space_id = H5Dget_space(c_dset_id); - if (c_space_id < 0) - return ret_value; - ret_value = 0; - *space_id = (hid_t_f)c_space_id; - return ret_value; -} - /****if* H5Df/h5dget_type_c * NAME * h5dget_type_c @@ -410,60 +240,6 @@ h5dget_create_plist_c(hid_t_f *dset_id, hid_t_f *plist_id) return ret_value; } -/****if* H5Df/h5dset_extent_c - * NAME - * h5dset_extent_c - * PURPOSE - * Call H5Dset_extent to extend dataset with unlimited dimensions - * INPUTS - * dset_id - identifier of the dataset - * OUTPUTS - * dims - array with the dimension sizes - * RETURNS - * 0 on success, -1 on failure - * AUTHOR - * Elena Pourmal - * Thursday, August 19, 1999 - * - * HISTORY - * Changed name from the now obsolete h5dextend - * to h5dset_extent in order to match new fortran interface. - * -MSB- March 14, 2008 - * SOURCE - */ - -int_f -h5dset_extent_c(hid_t_f *dset_id, hsize_t_f *dims) -/******/ -{ - hid_t c_space_id; - hsize_t c_dims[H5S_MAX_RANK]; - int rank; - int i; - int status; - int ret_value = -1; - - if ((c_space_id = H5Dget_space((hid_t)*dset_id)) < 0) - return ret_value; - - rank = H5Sget_simple_extent_ndims(c_space_id); - H5Sclose(c_space_id); - if (rank < 0) - return ret_value; - - /* - * Reverse dimensions due to C-FORTRAN storage order. - */ - for (i = 0; i < rank; i++) - c_dims[i] = (hsize_t)dims[rank - i - 1]; - - status = H5Dset_extent((hid_t)*dset_id, c_dims); - - if (status >= 0) - ret_value = 0; - return ret_value; -} - /****if* H5Df/h5dget_storage_size_c * NAME * h5dget_storage_size_c diff --git a/fortran/src/H5Dff.F90 b/fortran/src/H5Dff.F90 index bbfeb06c15c..06034ac4d19 100644 --- a/fortran/src/H5Dff.F90 +++ b/fortran/src/H5Dff.F90 @@ -88,6 +88,7 @@ MODULE H5D USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR, C_CHAR USE H5GLOBAL USE H5LIB, ONLY : h5kind_to_type + USE H5S, ONLY : H5Sget_simple_extent_ndims_f, H5Sclose_f PRIVATE h5dread_vl_integer, h5dread_vl_real, h5dread_vl_string PRIVATE h5dwrite_vl_integer, h5dwrite_vl_real, h5dwrite_vl_string @@ -232,37 +233,119 @@ SUBROUTINE h5dcreate_f(loc_id, name, type_id, space_id, dset_id, & INTEGER(HID_T), INTENT(IN) :: space_id INTEGER(HID_T), INTENT(OUT) :: dset_id INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: dcpl_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lcpl_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: dapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: dcpl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lcpl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: dapl_id INTEGER(HID_T) :: lcpl_id_default INTEGER(HID_T) :: dcpl_id_default INTEGER(HID_T) :: dapl_id_default - - INTEGER :: namelen ! Name length + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name INTERFACE - INTEGER FUNCTION h5dcreate_c(loc_id, name, namelen, type_id, & - space_id, lcpl_id_default, dcpl_id_default, dapl_id_default, dset_id) & - BIND(C,NAME='h5dcreate_c') + INTEGER(HID_T) FUNCTION H5Dcreate2(loc_id, name, type_id, & + space_id, lcpl_id_default, dcpl_id_default, dapl_id_default) & + BIND(C,NAME='H5Dcreate2') IMPORT :: C_CHAR IMPORT :: HID_T IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: loc_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: name - INTEGER :: namelen - INTEGER(HID_T), INTENT(IN) :: type_id - INTEGER(HID_T), INTENT(IN) :: space_id + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + INTEGER(HID_T), VALUE :: type_id + INTEGER(HID_T), VALUE :: space_id + INTEGER(HID_T), VALUE :: lcpl_id_default + INTEGER(HID_T), VALUE :: dcpl_id_default + INTEGER(HID_T), VALUE :: dapl_id_default + END FUNCTION H5Dcreate2 + END INTERFACE - INTEGER(HID_T) :: lcpl_id_default - INTEGER(HID_T) :: dcpl_id_default - INTEGER(HID_T) :: dapl_id_default + c_name = TRIM(name)//C_NULL_CHAR - INTEGER(HID_T), INTENT(OUT) :: dset_id - END FUNCTION h5dcreate_c + lcpl_id_default = H5P_DEFAULT_F + dcpl_id_default = H5P_DEFAULT_F + dapl_id_default = H5P_DEFAULT_F + + IF(PRESENT(lcpl_id)) lcpl_id_default = lcpl_id + IF(PRESENT(dcpl_id)) dcpl_id_default = dcpl_id + IF(PRESENT(dapl_id)) dapl_id_default = dapl_id + + dset_id = INT(h5dcreate2(loc_id, c_name, type_id, space_id, & + lcpl_id_default, dcpl_id_default, dapl_id_default), HID_T) + + hdferr = 0 + IF(dset_id.LT.0) hdferr = -1 + + END SUBROUTINE h5dcreate_f + +!> +!! \ingroup FH5D +!! +!! \brief Asynchronously creates a dataset at the specified location. +!! +!! \param loc_id File or group identifier +!! \param name Dataset name +!! \param type_id Dataset datatype identifier +!! \param space_id Dataset dataspace identifier +!! \param dset_id Dataset identifier +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param dcpl_id Dataset creation property list +!! \param lcpl_id Link creation property list +!! \param dapl_id Dataset access property list +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Dcreate_async() +!! + SUBROUTINE h5dcreate_async_f(loc_id, name, type_id, space_id, dset_id, es_id, & + hdferr, dcpl_id, lcpl_id, dapl_id, file, func, line) + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: loc_id + CHARACTER(LEN=*), INTENT(IN) :: name + INTEGER(HID_T), INTENT(IN) :: type_id + INTEGER(HID_T), INTENT(IN) :: space_id + INTEGER(HID_T), INTENT(OUT) :: dset_id + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + INTEGER(HID_T), INTENT(IN), OPTIONAL :: dcpl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lcpl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: dapl_id + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + INTEGER(HID_T) :: lcpl_id_default + INTEGER(HID_T) :: dcpl_id_default + INTEGER(HID_T) :: dapl_id_default + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER(HID_T) FUNCTION H5Dcreate_async(file, func, line, loc_id, name, type_id, & + space_id, lcpl_id_default, dcpl_id_default, dapl_id_default, es_id) & + BIND(C,NAME='H5Dcreate_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + INTEGER(HID_T), VALUE :: type_id + INTEGER(HID_T), VALUE :: space_id + INTEGER(HID_T), VALUE :: lcpl_id_default + INTEGER(HID_T), VALUE :: dcpl_id_default + INTEGER(HID_T), VALUE :: dapl_id_default + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Dcreate_async END INTERFACE + c_name = TRIM(name)//C_NULL_CHAR + lcpl_id_default = H5P_DEFAULT_F dcpl_id_default = H5P_DEFAULT_F dapl_id_default = H5P_DEFAULT_F @@ -270,12 +353,18 @@ END FUNCTION h5dcreate_c IF(PRESENT(lcpl_id)) lcpl_id_default = lcpl_id IF(PRESENT(dcpl_id)) dcpl_id_default = dcpl_id IF(PRESENT(dapl_id)) dapl_id_default = dapl_id + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) - namelen = LEN(name) - hdferr = h5dcreate_c(loc_id, name, namelen, type_id, space_id, & - lcpl_id_default, dcpl_id_default, dapl_id_default, dset_id) + dset_id = h5dcreate_async(file_default, func_default, line_default, & + loc_id, c_name, type_id, space_id, & + lcpl_id_default, dcpl_id_default, dapl_id_default, es_id) - END SUBROUTINE h5dcreate_f + hdferr = 0 + IF(dset_id.LT.0) hdferr = -1 + + END SUBROUTINE h5dcreate_async_f !> !! \ingroup FH5D @@ -296,33 +385,102 @@ SUBROUTINE h5dopen_f(loc_id, name, dset_id, hdferr, dapl_id) CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(HID_T), INTENT(OUT) :: dset_id INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: dapl_id - INTEGER :: namelen ! Name length + INTEGER(HID_T), INTENT(IN), OPTIONAL :: dapl_id INTEGER(HID_T) :: dapl_id_default + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name INTERFACE - INTEGER FUNCTION h5dopen_c(loc_id, name, namelen, dapl_id_default, dset_id) & - BIND(C,NAME='h5dopen_c') + INTEGER(HID_T) FUNCTION H5Dopen2(loc_id, name, dapl_id_default) & + BIND(C,NAME='H5Dopen2') IMPORT :: C_CHAR IMPORT :: HID_T IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: loc_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: name - INTEGER :: namelen - INTEGER(HID_T), INTENT(IN) :: dapl_id_default - INTEGER(HID_T), INTENT(OUT) :: dset_id - END FUNCTION h5dopen_c + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + INTEGER(HID_T),VALUE :: dapl_id_default + END FUNCTION H5Dopen2 END INTERFACE + c_name = TRIM(name)//C_NULL_CHAR + dapl_id_default = H5P_DEFAULT_F IF(PRESENT(dapl_id)) dapl_id_default = dapl_id - namelen = LEN(name) - hdferr = h5dopen_c(loc_id, name, namelen, dapl_id_default, dset_id) + dset_id = INT(H5Dopen2(loc_id, c_name, dapl_id_default), HID_T) + + hdferr = 0 + IF(dset_id.LT.0) hdferr = -1 END SUBROUTINE h5dopen_f +!> +!! \ingroup FH5D +!! +!! \brief Asynchronously opens an existing dataset. +!! +!! \param loc_id File or group identifier +!! \param name Dataset name +!! \param dset_id Dataset identifier +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param dapl_id Dataset access property list +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Dopen_async() +!! + SUBROUTINE h5dopen_async_f(loc_id, name, dset_id, es_id, hdferr, dapl_id, file, func, line) + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: loc_id + CHARACTER(LEN=*), INTENT(IN) :: name + INTEGER(HID_T), INTENT(OUT) :: dset_id + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + INTEGER(HID_T), INTENT(IN), OPTIONAL :: dapl_id + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + INTEGER(HID_T) :: dapl_id_default + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER(HID_T) FUNCTION H5Dopen_async(file, func, line, loc_id, name, dapl_id_default, es_id) & + BIND(C,NAME='H5Dopen_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + INTEGER(HID_T), VALUE :: dapl_id_default + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Dopen_async + END INTERFACE + + c_name = TRIM(name)//C_NULL_CHAR + + dapl_id_default = H5P_DEFAULT_F + IF(PRESENT(dapl_id)) dapl_id_default = dapl_id + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + dset_id = H5Dopen_async(file_default, func_default, line_default, & + loc_id, c_name, dapl_id_default, es_id) + + hdferr = 0 + IF(dset_id.LT.0) hdferr = -1 + + END SUBROUTINE h5dopen_async_f + !> !! \ingroup FH5D !! @@ -337,19 +495,68 @@ SUBROUTINE h5dclose_f(dset_id, hdferr) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: dset_id INTEGER, INTENT(OUT) :: hdferr + INTERFACE - INTEGER FUNCTION h5dclose_c(dset_id) & - BIND(C,NAME='h5dclose_c') + INTEGER(C_INT) FUNCTION H5Dclose(dset_id) BIND(C,NAME='H5Dclose') + IMPORT :: C_INT IMPORT :: HID_T IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: dset_id - END FUNCTION h5dclose_c + INTEGER(HID_T), VALUE :: dset_id + END FUNCTION h5dclose END INTERFACE - hdferr = h5dclose_c(dset_id) + hdferr = INT(H5Dclose(dset_id)) END SUBROUTINE h5dclose_f +!> +!! \ingroup FH5D +!! +!! \brief Asynchronously closes a dataset. +!! +!! \param dset_id Dataset identifier +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Dclose_async() +!! + SUBROUTINE h5dclose_async_f(dset_id, es_id, hdferr, file, func, line) + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: dset_id + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER(C_INT) FUNCTION H5Dclose_async(file, func, line, dset_id, es_id) BIND(C,NAME='H5Dclose_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: dset_id + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Dclose_async + END INTERFACE + + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + hdferr = INT(H5Dclose_async(file_default, func_default, line_default, dset_id, es_id)) + + END SUBROUTINE h5dclose_async_f + !> !! \ingroup FH5D !! @@ -386,29 +593,149 @@ END SUBROUTINE h5dget_type_f !! \brief Extends a dataset with unlimited dimension. !! !! \param dataset_id Dataset identifier -!! \param size Array containing the new magnitude of each dimension +!! \param fsize Array containing the new magnitude of each dimension !! \param hdferr \fortran_error !! !! See C API: @ref H5Dset_extent() !! - SUBROUTINE h5dset_extent_f(dataset_id, size, hdferr) + SUBROUTINE h5dset_extent_f(dataset_id, fsize, hdferr) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: dataset_id - INTEGER(HSIZE_T), DIMENSION(*), INTENT(IN) :: size + INTEGER(HSIZE_T), DIMENSION(*), INTENT(IN) :: fsize INTEGER, INTENT(OUT) :: hdferr + + INTEGER(HSIZE_T), DIMENSION(:), ALLOCATABLE :: csize + INTEGER(HID_T) :: space_id + INTEGER :: rank + INTEGER :: i + INTERFACE - INTEGER FUNCTION h5dset_extent_c(dataset_id, size) & - BIND(C,NAME='h5dset_extent_c') + INTEGER(C_INT) FUNCTION H5Dset_extent(dataset_id, size) & + BIND(C,NAME='H5Dset_extent') + IMPORT :: C_INT IMPORT :: HID_T, HSIZE_T IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: dataset_id - INTEGER(HSIZE_T), DIMENSION(*), INTENT(IN) :: size - END FUNCTION h5dset_extent_c + INTEGER(HID_T), VALUE :: dataset_id + INTEGER(HSIZE_T), DIMENSION(*) :: size + END FUNCTION H5Dset_extent END INTERFACE - hdferr = H5Dset_extent_c(dataset_id, size) + CALL H5Dget_space_f(dataset_id, space_id, hdferr) + IF(hdferr.LT.0) RETURN + + CALL H5Sget_simple_extent_ndims_f(space_id, rank, hdferr) + IF( hdferr.LT.0 .OR. rank.LT.0 )THEN + CALL H5Sclose_f(space_id, hdferr) + hdferr = -1 + RETURN + ENDIF + CALL H5Sclose_f(space_id, hdferr) + IF(hdferr.LT.0) RETURN + + ALLOCATE(csize(rank), STAT=hdferr) + IF (hdferr .NE. 0 ) THEN + hdferr = -1 + RETURN + ENDIF + + ! + ! Reverse dimensions due to C-FORTRAN storage order. + ! + DO i = 1, rank + csize(i) = fsize(rank - i + 1) + ENDDO + + hdferr = INT(H5Dset_extent(dataset_id, csize)) + + DEALLOCATE(csize) + END SUBROUTINE h5dset_extent_f + +!> +!! \ingroup FH5D +!! +!! \brief Asynchronously extends a dataset with unlimited dimension. +!! +!! \param dataset_id Dataset identifier +!! \param fsize Array containing the new magnitude of each dimension +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Dset_extent_async() +!! + SUBROUTINE h5dset_extent_async_f(dataset_id, fsize, es_id, hdferr, file, func, line) + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: dataset_id + INTEGER(HSIZE_T), DIMENSION(*), INTENT(IN) :: fsize + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + INTEGER(HSIZE_T), DIMENSION(:), ALLOCATABLE :: csize + INTEGER(HID_T) :: space_id + INTEGER :: rank + INTEGER :: i + + INTERFACE + INTEGER(C_INT) FUNCTION H5Dset_extent_async(file, func, line, dataset_id, size, es_id) & + BIND(C,NAME='H5Dset_extent_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T, HSIZE_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: dataset_id + INTEGER(HSIZE_T), DIMENSION(*) :: size + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Dset_extent_async + END INTERFACE + + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + CALL H5Dget_space_f(dataset_id, space_id, hdferr) + IF(hdferr.LT.0) RETURN + + CALL H5Sget_simple_extent_ndims_f(space_id, rank, hdferr) + IF( hdferr.LT.0 .OR. rank.LT.0 )THEN + CALL H5Sclose_f(space_id, hdferr) + hdferr = -1 + RETURN + ENDIF + CALL H5Sclose_f(space_id, hdferr) + IF(hdferr.LT.0) RETURN + + ALLOCATE(csize(rank), STAT=hdferr) + IF (hdferr .NE. 0 ) THEN + hdferr = -1 + RETURN + ENDIF + + ! + ! Reverse dimensions due to C-FORTRAN storage order. + ! + DO i = 1, rank + csize(i) = fsize(rank - i + 1) + ENDDO + + hdferr = INT(H5Dset_extent_async(file_default, func_default, line_default, & + dataset_id, csize, es_id)) + + DEALLOCATE(csize) + + END SUBROUTINE h5dset_extent_async_f + !> !! \ingroup FH5D !! @@ -551,8 +878,8 @@ SUBROUTINE h5dcreate_anon_f(loc_id, type_id, space_id, dset_id, hdferr, dcpl_id, INTEGER(HID_T), INTENT(IN) :: space_id INTEGER(HID_T), INTENT(OUT) :: dset_id INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: dcpl_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: dapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: dcpl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: dapl_id INTEGER(HID_T) :: dcpl_id_default INTEGER(HID_T) :: dapl_id_default @@ -611,14 +938,14 @@ SUBROUTINE h5dread_vl_f(dset_id, mem_type_id, buf, dims, len, hdferr, mem_space_ INTEGER(HSIZE_T), INTENT(IN), DIMENSION(2) :: dims INTEGER(SIZE_T), INTENT(INOUT), DIMENSION(*) :: len INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp END SUBROUTINE h5dread_vl_f !> !! \ingroup FH5D !! - !! \brief Writes variable-length data. F2003 API h5dwritef should be used instead. + !! \brief Writes variable-length data. F2003 API h5dwrite_f should be used instead. !! !! \param dset_id Dataset identifier. !! \param mem_type_id Memory datatype identifier. @@ -641,9 +968,9 @@ SUBROUTINE h5dwrite_vl_f(dset_id, mem_type_id, buf, dims, len, hdferr, mem_space INTEGER(HSIZE_T), INTENT(IN), DIMENSION(2) :: dims INTEGER(SIZE_T), INTENT(IN), DIMENSION(*) :: len INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp END SUBROUTINE h5dwrite_vl_f #else @@ -657,9 +984,9 @@ SUBROUTINE h5dwrite_vl_integer(dset_id, mem_type_id, buf, dims, len, & INTEGER(SIZE_T), INTENT(IN), DIMENSION(*) :: len INTEGER, INTENT(IN), DIMENSION(dims(1),dims(2)), TARGET :: buf INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp INTEGER(HID_T) :: xfer_prp_default INTEGER(HID_T) :: mem_space_id_default INTEGER(HID_T) :: file_space_id_default @@ -707,9 +1034,9 @@ SUBROUTINE h5dread_vl_integer(dset_id, mem_type_id, buf, dims, len, & INTEGER(SIZE_T), INTENT(INOUT), DIMENSION(*) :: len INTEGER, INTENT(INOUT), DIMENSION(dims(1),dims(2)), TARGET :: buf INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp INTEGER(HID_T) :: xfer_prp_default INTEGER(HID_T) :: mem_space_id_default INTEGER(HID_T) :: file_space_id_default @@ -761,9 +1088,9 @@ SUBROUTINE h5dwrite_vl_real(dset_id, mem_type_id, buf, dims, len, & REAL, INTENT(IN), & DIMENSION(dims(1),dims(2)) :: buf ! Data buffer INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp INTEGER(HID_T) :: xfer_prp_default INTEGER(HID_T) :: mem_space_id_default @@ -815,9 +1142,9 @@ SUBROUTINE h5dread_vl_real(dset_id, mem_type_id, buf, dims, len, & REAL, INTENT(INOUT), & DIMENSION(dims(1),dims(2)) :: buf ! Data buffer INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp INTEGER(HID_T) :: xfer_prp_default INTEGER(HID_T) :: mem_space_id_default @@ -871,9 +1198,9 @@ SUBROUTINE h5dwrite_vl_string(dset_id, mem_type_id, buf, dims, str_len, & INTEGER(SIZE_T), INTENT(IN), DIMENSION(*) :: str_len CHARACTER(LEN=*), INTENT(IN), DIMENSION(dims(2)) :: buf INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp INTEGER(HID_T) :: xfer_prp_default INTEGER(HID_T) :: mem_space_id_default @@ -925,9 +1252,9 @@ SUBROUTINE h5dread_vl_string(dset_id, mem_type_id, buf, dims, str_len, & CHARACTER(LEN=*), INTENT(OUT), & DIMENSION(dims(2)) :: buf ! Data buffer INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp INTEGER(HID_T) :: xfer_prp_default INTEGER(HID_T) :: mem_space_id_default @@ -1001,8 +1328,7 @@ END SUBROUTINE h5dget_offset_f !> !! \ingroup FH5D !! -!! \brief Returns an identifier for a copy of the dataspace for a -!! dataset. +!! \brief Returns an identifier for a copy of the dataspace for a dataset. !! !! \param dataset_id Dataset identifier. !! \param dataspace_id Dataspace identifier. @@ -1016,17 +1342,75 @@ SUBROUTINE h5dget_space_f(dataset_id, dataspace_id, hdferr) INTEGER(HID_T), INTENT(OUT) :: dataspace_id INTEGER, INTENT(OUT) :: hdferr INTERFACE - INTEGER FUNCTION h5dget_space_c(dataset_id, dataspace_id) BIND(C,NAME='h5dget_space_c') + INTEGER(HID_T) FUNCTION H5Dget_space(dataset_id) BIND(C,NAME='H5Dget_space') IMPORT :: HID_T IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: dataset_id - INTEGER(HID_T), INTENT(OUT) :: dataspace_id - END FUNCTION h5dget_space_c + INTEGER(HID_T), VALUE :: dataset_id + END FUNCTION H5Dget_space END INTERFACE - hdferr = h5dget_space_c(dataset_id, dataspace_id) + dataspace_id = h5dget_space(dataset_id) + + hdferr = 0 + IF(dataspace_id.LT.0) hdferr = -1 + END SUBROUTINE h5dget_space_f +!> +!! \ingroup FH5D +!! +!! \brief Asynchronously returns an identifier for a copy of the dataspace for a dataset. +!! +!! \param dataset_id Dataset identifier. +!! \param dataspace_id Dataspace identifier. +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Dget_space_async() +!! + SUBROUTINE h5dget_space_async_f(dataset_id, dataspace_id, es_id, hdferr, file, func, line) + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: dataset_id + INTEGER(HID_T), INTENT(OUT) :: dataspace_id + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER(HID_T) FUNCTION H5Dget_space_async(file, func, line, dataset_id, es_id) & + BIND(C,NAME='H5Dget_space_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: dataset_id + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Dget_space_async + END INTERFACE + + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + dataspace_id = H5Dget_space_async(file_default, func_default, line_default, & + dataset_id, es_id) + + hdferr = 0 + IF(dataspace_id.LT.0) hdferr = -1 + + END SUBROUTINE h5dget_space_async_f + !> !! \ingroup FH5D !! @@ -1091,7 +1475,141 @@ END FUNCTION h5dvlen_reclaim_c hdferr = H5Dvlen_reclaim_c(type_id, space_id, plist_id, buf) - END SUBROUTINE H5Dvlen_reclaim_f + END SUBROUTINE h5dvlen_reclaim_f + +!> +!! \ingroup FH5D +!! +!! \brief Asynchronously reads raw data from a dataset into a buffer. +!! +!! \param dset_id Identifier of the dataset read from. +!! \param mem_type_id Identifier of the memory datatype. +!! \param buf Buffer to receive data read from file. +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param mem_space_id Identifier of the memory dataspace. +!! \param file_space_id Identifier of dataset's dataspace in the file. (Default: H5S_ALL_F) +!! \param xfer_prp Identifier of a transfer property list for this I/O operation. +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Dread_async() +!! + SUBROUTINE h5dread_async_f(dset_id, mem_type_id, buf, es_id, hdferr, & + mem_space_id, file_space_id, xfer_prp, file, func, line) + USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: dset_id + INTEGER(HID_T), INTENT(IN) :: mem_type_id + TYPE(C_PTR), INTENT(INOUT) :: buf + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER(C_INT) FUNCTION H5Dread_async(file, func, line, dset_id, mem_type_id, & + mem_space_id, file_space_id, xfer_prp, buf, es_id) BIND(C,NAME='H5Dread_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: dset_id + INTEGER(HID_T), VALUE :: mem_type_id + INTEGER(HID_T), VALUE :: mem_space_id + INTEGER(HID_T), VALUE :: file_space_id + INTEGER(HID_T), VALUE :: xfer_prp + TYPE(C_PTR) , VALUE :: buf + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Dread_async + END INTERFACE + + IF (PRESENT(file)) file_default = file + IF (PRESENT(func)) func_default = func + IF (PRESENT(line)) line_default = INT(line, C_INT) + + hdferr = H5Dread_async(file_default, func_default, line_default, & + dset_id, mem_type_id, mem_space_id, file_space_id, xfer_prp, buf, es_id ) + + END SUBROUTINE h5dread_async_f + +!> +!! \ingroup FH5D +!! +!! \brief Asynchronously writes raw data from a buffer to a dataset. +!! +!! \param dset_id Identifier of the dataset to write to. +!! \param mem_type_id Identifier of the memory datatype. +!! \param buf Buffer with data to be written to the file. +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param mem_space_id Identifier of the memory dataspace. +!! \param file_space_id Identifier of the dataset's dataspace in the file. +!! \param xfer_prp Identifier of a transfer property list for this I/O operation. +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Dwrite_async() +!! + SUBROUTINE h5dwrite_async_f(dset_id, mem_type_id, buf, es_id, hdferr, & + mem_space_id, file_space_id, xfer_prp, file, func, line) + USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: dset_id + INTEGER(HID_T), INTENT(IN) :: mem_type_id + TYPE(C_PTR), INTENT(IN) :: buf + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER(C_INT) FUNCTION H5Dwrite_async(file, func, line, dset_id, mem_type_id, & + mem_space_id, file_space_id, xfer_prp, buf, es_id) BIND(C,NAME='H5Dwrite_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: dset_id + INTEGER(HID_T), VALUE :: mem_type_id + INTEGER(HID_T), VALUE :: mem_space_id + INTEGER(HID_T), VALUE :: file_space_id + INTEGER(HID_T), VALUE :: xfer_prp + TYPE(C_PTR) , VALUE :: buf + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Dwrite_async + END INTERFACE + + IF (PRESENT(file)) file_default = file + IF (PRESENT(func)) func_default = func + IF (PRESENT(line)) line_default = INT(line, C_INT) + + hdferr = H5Dwrite_async(file_default, func_default, line_default, & + dset_id, mem_type_id, mem_space_id, file_space_id, xfer_prp, buf, es_id) + + END SUBROUTINE h5dwrite_async_f #ifdef H5_DOXYGEN !> @@ -1118,9 +1636,9 @@ SUBROUTINE h5dwrite_f(dset_id, mem_type_id, buf, hdferr, mem_space_id, file_spac INTEGER(HID_T), INTENT(IN) :: mem_type_id TYPE(C_PTR), INTENT(IN) :: buf INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp END SUBROUTINE h5dwrite !> !! \ingroup FH5D @@ -1146,9 +1664,9 @@ SUBROUTINE h5dread_f(dset_id, mem_type_id, buf, hdferr, mem_space_id, file_space INTEGER(HID_T), INTENT(IN) :: mem_type_id TYPE(C_PTR), INTENT(INOUT) :: buf INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp END SUBROUTINE h5dread_f !> @@ -1177,9 +1695,9 @@ SUBROUTINE h5dwrite_f___F90_VERSION(dset_id, mem_type_id, buf, dims, hdferr, mem INTEGER(HID_T), INTENT(IN) :: mem_type_id TYPE(TYPE), INTENT(IN) :: buf DIMENSION(*), INTEGER(HSIZE_T), INTENT(IN) :: dims - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp END SUBROUTINE h5dwrite_f___F90_VERSION !> @@ -1209,9 +1727,9 @@ SUBROUTINE h5dread_f___F90_VERSION(dset_id, mem_type_id, buf, dims, hdferr, mem_ INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims TYPE(TYPE), INTENT(INOUT) :: buf INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp END SUBROUTINE h5dread_f___F90_VERSION !> @@ -1273,9 +1791,9 @@ SUBROUTINE h5dwrite_reference_obj(dset_id, mem_type_id, buf, dims, hdferr, & INTEGER(HSIZE_T), DIMENSION(*), INTENT(IN) :: dims TYPE(hobj_ref_t_f), DIMENSION(dims(1)), INTENT(IN), TARGET :: buf INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp INTEGER(HID_T) :: xfer_prp_default INTEGER(HID_T) :: mem_space_id_default @@ -1305,9 +1823,9 @@ SUBROUTINE h5dwrite_reference_dsetreg(dset_id, mem_type_id, buf, dims, hdferr, & INTEGER(HSIZE_T), DIMENSION(*), INTENT(IN) :: dims TYPE(hdset_reg_ref_t_f), DIMENSION(dims(1)), INTENT(IN), TARGET :: buf INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp INTEGER(HID_T) :: xfer_prp_default INTEGER(HID_T) :: mem_space_id_default @@ -1315,7 +1833,6 @@ SUBROUTINE h5dwrite_reference_dsetreg(dset_id, mem_type_id, buf, dims, hdferr, & INTEGER, ALLOCATABLE, DIMENSION(:) :: ref_buf INTEGER :: i INTEGER(HSIZE_T) :: j - TYPE(C_PTR) :: f_ptr INTERFACE INTEGER FUNCTION h5dwrite_ref_reg_c(dset_id, mem_type_id,& mem_space_id_default, & @@ -1339,7 +1856,6 @@ END FUNCTION h5dwrite_ref_reg_c IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id IF(PRESENT(file_space_id)) file_space_id_default = file_space_id - f_ptr = C_LOC(buf(1)) ALLOCATE(ref_buf(REF_REG_BUF_LEN*dims(1)), stat=hdferr) IF (hdferr .NE. 0 ) THEN @@ -1360,16 +1876,15 @@ END SUBROUTINE h5dwrite_reference_dsetreg SUBROUTINE h5dwrite_char_scalar(dset_id, mem_type_id, buf, dims, hdferr, & mem_space_id, file_space_id, xfer_prp) - USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: dset_id INTEGER(HID_T), INTENT(IN) :: mem_type_id INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims CHARACTER(*), INTENT(IN), TARGET :: buf INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp CALL h5dwrite_char_scalar_fix(dset_id, mem_type_id, buf, LEN(buf), dims, hdferr, & mem_space_id, file_space_id, xfer_prp) @@ -1378,7 +1893,6 @@ END SUBROUTINE h5dwrite_char_scalar SUBROUTINE h5dwrite_char_scalar_fix(dset_id, mem_type_id, buf, buf_len, dims, hdferr, & mem_space_id, file_space_id, xfer_prp) - USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: dset_id INTEGER(HID_T), INTENT(IN) :: mem_type_id @@ -1386,9 +1900,9 @@ SUBROUTINE h5dwrite_char_scalar_fix(dset_id, mem_type_id, buf, buf_len, dims, hd INTEGER, INTENT(IN) :: buf_len CHARACTER(LEN=buf_len), INTENT(IN), TARGET :: buf INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp INTEGER(HID_T) :: xfer_prp_default INTEGER(HID_T) :: mem_space_id_default INTEGER(HID_T) :: file_space_id_default @@ -1411,7 +1925,6 @@ END SUBROUTINE h5dwrite_char_scalar_fix SUBROUTINE h5dread_reference_obj(dset_id, mem_type_id, buf, dims, hdferr, & mem_space_id, file_space_id, xfer_prp) - USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: dset_id INTEGER(HID_T), INTENT(IN) :: mem_type_id @@ -1419,9 +1932,9 @@ SUBROUTINE h5dread_reference_obj(dset_id, mem_type_id, buf, dims, hdferr, & TYPE(hobj_ref_t_f), INTENT(INOUT) , & DIMENSION(dims(1)), TARGET :: buf INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp INTEGER(HID_T) :: xfer_prp_default INTEGER(HID_T) :: mem_space_id_default @@ -1451,9 +1964,9 @@ SUBROUTINE h5dread_reference_dsetreg(dset_id, mem_type_id, buf, dims, hdferr, & TYPE(hdset_reg_ref_t_f), INTENT(INOUT), & DIMENSION(dims(1)), TARGET :: buf INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp INTEGER(HID_T) :: xfer_prp_default INTEGER(HID_T) :: mem_space_id_default INTEGER(HID_T) :: file_space_id_default @@ -1506,7 +2019,6 @@ END SUBROUTINE h5dread_reference_dsetreg SUBROUTINE h5dread_char_scalar(dset_id, mem_type_id, buf, dims, hdferr, & mem_space_id, file_space_id, xfer_prp) - USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: dset_id INTEGER(HID_T), INTENT(IN) :: mem_type_id @@ -1514,9 +2026,9 @@ SUBROUTINE h5dread_char_scalar(dset_id, mem_type_id, buf, dims, hdferr, & CHARACTER(LEN=*), INTENT(INOUT) :: buf INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp INTEGER(HID_T) :: xfer_prp_default INTEGER(HID_T) :: mem_space_id_default @@ -1537,16 +2049,15 @@ END SUBROUTINE h5dread_char_scalar SUBROUTINE h5dread_char_scalar_fix(dset_id, mem_type_id, buf, buf_len, hdferr, & mem_space_id, file_space_id, xfer_prp) - USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: dset_id INTEGER(HID_T), INTENT(IN) :: mem_type_id INTEGER, INTENT(IN) :: buf_len CHARACTER(LEN=buf_len), INTENT(INOUT), TARGET :: buf INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp TYPE(C_PTR) :: f_ptr @@ -1559,15 +2070,14 @@ END SUBROUTINE h5dread_char_scalar_fix SUBROUTINE h5dwrite_ptr(dset_id, mem_type_id, buf, hdferr, & mem_space_id, file_space_id, xfer_prp) - USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: dset_id INTEGER(HID_T), INTENT(IN) :: mem_type_id TYPE(C_PTR), INTENT(IN) :: buf INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp INTEGER(HID_T) :: xfer_prp_default INTEGER(HID_T) :: mem_space_id_default @@ -1588,15 +2098,14 @@ END SUBROUTINE h5dwrite_ptr SUBROUTINE h5dread_ptr(dset_id, mem_type_id, buf, hdferr, & mem_space_id, file_space_id, xfer_prp) - USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: dset_id INTEGER(HID_T), INTENT(IN) :: mem_type_id TYPE(C_PTR), INTENT(INOUT) :: buf INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp INTEGER(HID_T) :: xfer_prp_default INTEGER(HID_T) :: mem_space_id_default @@ -1616,7 +2125,6 @@ SUBROUTINE h5dread_ptr(dset_id, mem_type_id, buf, hdferr, & END SUBROUTINE h5dread_ptr SUBROUTINE h5dfill_ptr(fill_value, fill_type_id, buf, buf_type_id, space_id, hdferr) - USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR IMPLICIT NONE TYPE(C_PTR) :: fill_value INTEGER(HID_T), INTENT(IN) :: fill_type_id @@ -1643,7 +2151,6 @@ END FUNCTION h5dfill END SUBROUTINE h5dfill_ptr SUBROUTINE h5dfill_integer(fill_value, space_id, buf, hdferr) - USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR IMPLICIT NONE INTEGER, INTENT(IN), TARGET :: fill_value ! Fill value INTEGER(HID_T), INTENT(IN) :: space_id ! Memory dataspace selection identifier @@ -1667,7 +2174,6 @@ SUBROUTINE h5dfill_integer(fill_value, space_id, buf, hdferr) END SUBROUTINE h5dfill_integer SUBROUTINE h5dfill_c_float(fill_value, space_id, buf, hdferr) - USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR IMPLICIT NONE REAL(KIND=C_FLOAT), INTENT(IN), TARGET :: fill_value INTEGER(HID_T), INTENT(IN) :: space_id @@ -1739,7 +2245,6 @@ END SUBROUTINE h5dfill_c_long_double #endif SUBROUTINE h5dfill_char(fill_value, space_id, buf, hdferr) - USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR IMPLICIT NONE CHARACTER, INTENT(IN), TARGET :: fill_value INTEGER(HID_T), INTENT(IN) :: space_id @@ -1775,7 +2280,7 @@ END SUBROUTINE h5dfill_char !! \param hdferr \fortran_error !! \param xfer_prp Identifier of a transfer property list for this I/O operation. !! - SUBROUTINE H5Dread_multi_f(count, dset_id, mem_type_id, mem_space_id, file_space_id, buf, hdferr, xfer_prp) + SUBROUTINE h5dread_multi_f(count, dset_id, mem_type_id, mem_space_id, file_space_id, buf, hdferr, xfer_prp) IMPLICIT NONE INTEGER(SIZE_T), INTENT(IN) :: count @@ -1811,7 +2316,7 @@ END FUNCTION H5Dread_multi hdferr = H5Dread_multi(count, dset_id, mem_type_id, mem_space_id, file_space_id, xfer_prp_default, buf) - END SUBROUTINE H5Dread_multi_f + END SUBROUTINE h5dread_multi_f !> !! \ingroup FH5D !! @@ -1826,7 +2331,7 @@ END SUBROUTINE H5Dread_multi_f !! \param hdferr \fortran_error !! \param xfer_prp Identifier of a transfer property list for this I/O operation. !! - SUBROUTINE H5Dwrite_multi_f(count, dset_id, mem_type_id, mem_space_id, file_space_id, buf, hdferr, xfer_prp) + SUBROUTINE h5dwrite_multi_f(count, dset_id, mem_type_id, mem_space_id, file_space_id, buf, hdferr, xfer_prp) IMPLICIT NONE INTEGER(SIZE_T), INTENT(IN) :: count @@ -1862,7 +2367,7 @@ END FUNCTION H5Dwrite_multi hdferr = H5Dwrite_multi(count, dset_id, mem_type_id, mem_space_id, file_space_id, xfer_prp_default, buf) - END SUBROUTINE H5Dwrite_multi_f + END SUBROUTINE h5dwrite_multi_f #endif diff --git a/fortran/src/H5ESff.F90 b/fortran/src/H5ESff.F90 new file mode 100644 index 00000000000..5b19a514955 --- /dev/null +++ b/fortran/src/H5ESff.F90 @@ -0,0 +1,296 @@ +!> @defgroup FH5ES Fortran Event Set (H5ES) Interface +!! +!! @see H5ES, C-API +!! +!! @see @ref H5ES_UG, User Guide +!! +! +! COPYRIGHT +! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * +! Copyright by The HDF Group. * +! All rights reserved. * +! * +! This file is part of HDF5. The full HDF5 copyright notice, including * +! terms governing use, modification, and redistribution, is contained in * +! the COPYING file, which can be found at the root of the source code * +! distribution tree, or in https://www.hdfgroup.org/licenses. * +! If you do not have access to either file, you may request a copy from * +! help@hdfgroup.org. * +! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * +! + +MODULE H5ES + + USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR, C_CHAR, C_INT64_T, C_BOOL + USE H5GLOBAL + IMPLICIT NONE + +CONTAINS + +!> +!! \ingroup FH5ES +!! +!! \brief Creates an event set. +!! +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! +!! See C API: @ref H5EScreate() +!! + SUBROUTINE h5escreate_f(es_id, hdferr) + IMPLICIT NONE + + INTEGER(HID_T), INTENT(OUT) :: es_id + INTEGER , INTENT(OUT) :: hdferr + + INTERFACE + INTEGER(HID_T) FUNCTION H5EScreate() BIND(C,NAME='H5EScreate') + IMPORT :: HID_T + END FUNCTION H5EScreate + END INTERFACE + + es_id = H5EScreate() + + hdferr = 0 + IF(es_id.LT.0) hdferr = -1 + + END SUBROUTINE h5escreate_f +!> +!! \ingroup FH5ES +!! +!! \brief Retrieves number of events in an event set. +!! +!! \param es_id \es_id +!! \param count The number of events in the event set +!! \param hdferr \fortran_error +!! +!! See C API: @ref H5ESget_count() +!! + SUBROUTINE h5esget_count_f(es_id, count, hdferr) + IMPLICIT NONE + + INTEGER(hid_t), INTENT(IN) :: es_id + INTEGER(size_t), INTENT(OUT) :: count + INTEGER, INTENT(OUT) :: hdferr + + INTERFACE + INTEGER(C_INT) FUNCTION H5ESget_count(es_id, count) BIND(C,NAME='H5ESget_count') + IMPORT :: C_INT + IMPORT :: HID_T, SIZE_T + INTEGER(HID_T), VALUE :: es_id + INTEGER(SIZE_T) :: count + END FUNCTION H5ESget_count + END INTERFACE + + hdferr = INT(H5ESget_count(es_id, count)) + + END SUBROUTINE h5esget_count_f +!> +!! \ingroup FH5ES +!! +!! \brief Retrieves the next operation counter to be assigned in an event set. +!! +!! \param es_id \es_id +!! \param counter The number of events in the event set +!! \param hdferr \fortran_error +!! +!! See C API: @ref H5ESget_op_counter() +!! + SUBROUTINE h5esget_op_counter_f(es_id, counter, hdferr) + + IMPLICIT NONE + + INTEGER(HID_T) , INTENT(IN) :: es_id + INTEGER(C_INT64_T), INTENT(OUT) :: counter + INTEGER , INTENT(OUT) :: hdferr + + INTERFACE + INTEGER(C_INT) FUNCTION H5ESget_op_counter(es_id, counter) BIND(C,NAME='H5ESget_op_counter') + IMPORT :: C_INT + IMPORT :: HID_T, C_INT64_T + INTEGER(HID_T) , VALUE :: es_id + INTEGER(C_INT64_T) :: counter + END FUNCTION H5ESget_op_counter + END INTERFACE + + hdferr = INT(H5ESget_op_counter(es_id, counter)) + + END SUBROUTINE h5esget_op_counter_f +!> +!! \ingroup FH5ES +!! +!! \brief Waits for operations in event set to complete. +!! +!! \param es_id \es_id +!! \param timeout The number of events in the event set +!! \param num_in_progress The number of operations still in progress +!! \param err_occurred Flag if an operation in the event set failed +!! \param hdferr \fortran_error +!! +!! See C API: @ref H5ESwait() +!! + SUBROUTINE h5eswait_f(es_id, timeout, num_in_progress, err_occurred, hdferr) + + IMPLICIT NONE + + INTEGER(HID_T) , INTENT(IN) :: es_id + INTEGER(C_INT64_T), INTENT(IN) :: timeout + INTEGER(SIZE_T) , INTENT(OUT) :: num_in_progress + LOGICAL , INTENT(OUT) :: err_occurred + INTEGER , INTENT(OUT) :: hdferr + + LOGICAL(C_BOOL) :: err_occurred_c = .FALSE. + + INTERFACE + INTEGER(C_INT) FUNCTION H5ESwait(es_id, timeout, num_in_progress, err_occurred) BIND(C,NAME='H5ESwait') + IMPORT :: C_INT + IMPORT :: HID_T, C_INT64_T, SIZE_T, C_BOOL + INTEGER(HID_T) , VALUE :: es_id + INTEGER(C_INT64_T), VALUE :: timeout + INTEGER(SIZE_T) :: num_in_progress + LOGICAL(C_BOOL) :: err_occurred + END FUNCTION H5ESwait + END INTERFACE + + hdferr = INT(H5ESwait(es_id, timeout, num_in_progress, err_occurred_c)) + + ! Transfer value of C c_bool type to Fortran LOGICAL + err_occurred = err_occurred_c + + END SUBROUTINE h5eswait_f +!> +!! \ingroup FH5ES +!! +!! \brief Attempt to cancel operations in an event set. +!! +!! \param es_id \es_id +!! \param num_not_canceled The number of events not canceled +!! \param err_occurred Status indicating if error is present in the event set +!! \param hdferr \fortran_error +!! +!! See C API: @ref H5EScancel() +!! + SUBROUTINE h5escancel_f(es_id, num_not_canceled, err_occurred, hdferr) + + IMPLICIT NONE + + INTEGER(hid_t) , INTENT(IN) :: es_id + INTEGER(size_t), INTENT(OUT) :: num_not_canceled + LOGICAL , INTENT(OUT) :: err_occurred + INTEGER , INTENT(OUT) :: hdferr + + LOGICAL(C_BOOL) :: err_occurred_c = .FALSE. + + INTERFACE + INTEGER(C_INT) FUNCTION H5EScancel(es_id, num_not_canceled, err_occurred) BIND(C,NAME='H5EScancel') + IMPORT :: C_INT + IMPORT :: HID_T, SIZE_T, C_BOOL + INTEGER(HID_T) , VALUE :: es_id + INTEGER(SIZE_T) :: num_not_canceled + LOGICAL(C_BOOL) :: err_occurred + END FUNCTION H5EScancel + END INTERFACE + + hdferr = INT(H5EScancel(es_id, num_not_canceled, err_occurred_c)) + + ! Transfer value of C c_bool type to Fortran LOGICAL + err_occurred = err_occurred_c + + END SUBROUTINE h5escancel_f +!> +!! \ingroup FH5ES +!! +!! \brief Checks for failed operations. +!! +!! \param es_id \es_id +!! \param err_occurred Status indicating if error is present in the event set +!! \param hdferr \fortran_error +!! +!! See C API: @ref H5ESget_err_status() +!! + SUBROUTINE h5esget_err_status_f(es_id, err_occurred, hdferr) + + IMPLICIT NONE + + INTEGER(hid_t), INTENT(IN) :: es_id + LOGICAL , INTENT(OUT) :: err_occurred + INTEGER , INTENT(OUT) :: hdferr + + LOGICAL(C_BOOL) :: err_occurred_c = .FALSE. + + INTERFACE + INTEGER(C_INT) FUNCTION H5ESget_err_status(es_id, err_occurred) BIND(C,NAME='H5ESget_err_status') + IMPORT :: C_INT + IMPORT :: HID_T, C_BOOL + INTEGER(HID_T) , VALUE :: es_id + LOGICAL(C_BOOL) :: err_occurred + END FUNCTION H5ESget_err_status + END INTERFACE + + hdferr = INT(H5ESget_err_status(es_id, err_occurred_c)) + + ! Transfer value of C c_bool type to Fortran LOGICAL + err_occurred = err_occurred_c + + END SUBROUTINE h5esget_err_status_f +!> +!! \ingroup FH5ES +!! +!! \brief Retrieves the number of failed operations. +!! +!! \param es_id \es_id +!! \param num_errs Number of errors +!! \param hdferr \fortran_error +!! +!! See C API: @ref H5ESget_err_count() +!! + SUBROUTINE h5esget_err_count_f(es_id, num_errs, hdferr) + + IMPLICIT NONE + + INTEGER(HID_T) , INTENT(IN) :: es_id + INTEGER(SIZE_T), INTENT(OUT) :: num_errs + INTEGER , INTENT(OUT) :: hdferr + + INTERFACE + INTEGER(C_INT) FUNCTION H5ESget_err_count(es_id, num_errs) BIND(C,NAME='H5ESget_err_count') + IMPORT :: C_INT + IMPORT :: HID_T, SIZE_T + INTEGER(HID_T) , VALUE :: es_id + INTEGER(SIZE_T) :: num_errs + END FUNCTION H5ESget_err_count + END INTERFACE + + hdferr = INT(H5ESget_err_count(es_id, num_errs)) + + END SUBROUTINE h5esget_err_count_f + +!> +!! \ingroup FH5ES +!! +!! \brief Terminates access to an event set. +!! +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! +!! See C API: @ref H5ESclose() +!! + SUBROUTINE h5esclose_f(es_id, hdferr) + + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER , INTENT(OUT) :: hdferr + + INTERFACE + INTEGER(C_INT) FUNCTION H5ESclose(es_id) BIND(C,NAME='H5ESclose') + IMPORT :: C_INT + IMPORT :: HID_T + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5ESclose + END INTERFACE + + hdferr = INT(H5ESclose(es_id)) + + END SUBROUTINE h5esclose_f + +END MODULE H5ES diff --git a/fortran/src/H5Eff.F90 b/fortran/src/H5Eff.F90 index 26b2c77baf4..a4c9e8cd16d 100644 --- a/fortran/src/H5Eff.F90 +++ b/fortran/src/H5Eff.F90 @@ -58,7 +58,7 @@ MODULE H5E SUBROUTINE h5eclear_f(hdferr, estack_id) IMPLICIT NONE INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: estack_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: estack_id INTEGER(HID_T) :: estack_id_default INTERFACE @@ -86,7 +86,7 @@ END SUBROUTINE h5eclear_f !! See C API: @ref H5Eprint2() !! SUBROUTINE h5eprint_f(hdferr, name) - CHARACTER(LEN=*), OPTIONAL, INTENT(IN) :: name + CHARACTER(LEN=*), INTENT(IN), OPTIONAL :: name INTEGER, INTENT(OUT) :: hdferr INTEGER :: namelen diff --git a/fortran/src/H5Ff.c b/fortran/src/H5Ff.c index 7402e18dcb0..b2f09ef516a 100644 --- a/fortran/src/H5Ff.c +++ b/fortran/src/H5Ff.c @@ -91,47 +91,6 @@ h5fcreate_c(_fcd name, int_f *namelen, int_f *access_flags, hid_t_f *crt_prp, hi return ret_value; } -/****if* H5Ff/h5fflush_c - * NAME - * h5fflush_c - * PURPOSE - * Call H5Fflush to flush the object - * INPUTS - * object_id - identifier of either a file, a dataset, - * a group, an attribute or a named data type - * scope - integer to specify the flushing action, either - * H5F_SCOPE_GLOBAL or H5F_SCOPE_LOCAL - * RETURNS - * 0 on success, -1 on failure - * AUTHOR - * Xiangyang Su - * Friday, November 5, 1999 - * SOURCE - */ -int_f -h5fflush_c(hid_t_f *object_id, int_f *scope) -/******/ -{ - int ret_value = -1; - hid_t c_file_id; - H5F_scope_t c_scope; - htri_t status; - c_scope = (H5F_scope_t)*scope; - - /* - * Call H5Fflush function. - */ - - c_file_id = *object_id; - - status = H5Fflush(c_file_id, c_scope); - - if (status >= 0) - ret_value = 0; - - return ret_value; -} - /****if* H5Ff/h5fmount_c * NAME * h5fmount_c @@ -240,103 +199,6 @@ h5funmount_c(hid_t_f *loc_id, _fcd dsetname, int_f *namelen) return ret_value; } -/****if* H5Ff/h5fopen_c - * NAME - * h5fopen_c - * PURPOSE - * Call H5Fopen to open the file - * INPUTS - * name - name of the file - * namelen - name length - * access_flags - file access flags - * acc_prp - identifier of access property list - * OUTPUTS - * file_id - file identifier - * RETURNS - * 0 on success, -1 on failure - * AUTHOR - * Elena Pourmal - * Tuesday, August 3, 1999 - * SOURCE - */ -int_f -h5fopen_c(_fcd name, int_f *namelen, int_f *access_flags, hid_t_f *acc_prp, hid_t_f *file_id) -/******/ -{ - int ret_value = -1; - char *c_name; - int_f c_namelen; - hid_t c_file_id; - unsigned c_access_flags; - hid_t c_acc_prp; - c_acc_prp = (hid_t)*acc_prp; - - /* - * Define access flags - */ - c_access_flags = (unsigned)*access_flags; - - /* - * Define access property - */ - c_acc_prp = *acc_prp; - - /* - * Convert FORTRAN name to C name - */ - c_namelen = *namelen; - c_name = (char *)HD5f2cstring(name, (size_t)c_namelen); - if (c_name == NULL) - return ret_value; - - /* - * Call H5Fopen function. - */ - c_file_id = H5Fopen(c_name, c_access_flags, c_acc_prp); - - if (c_file_id >= 0) { - ret_value = 0; - *file_id = (hid_t_f)c_file_id; - } /* end if */ - - HDfree(c_name); - return ret_value; -} - -/****if* H5Ff/h5freopen_c - * NAME - * h5freopen_c - * PURPOSE - * Call H5Freopen to open the file - * INPUTS - * file_id1 - file identifier - * OUTPUTS - * file_id2 - file identifier - * RETURNS - * 0 on success, -1 on failure - * AUTHOR - * Xiangyang Su - * Wednesday, November 3, 1999 - * SOURCE - */ -int_f -h5freopen_c(hid_t_f *file_id1, hid_t_f *file_id2) -/******/ -{ - int ret_value = -1; - hid_t c_file_id1, c_file_id2; - - c_file_id1 = *file_id1; - c_file_id2 = H5Freopen(c_file_id1); - - if (c_file_id2 < 0) - return ret_value; - *file_id2 = (hid_t_f)c_file_id2; - - ret_value = 0; - return ret_value; -} - /****if* H5Ff/h5fget_create_plist_c * NAME * h5fget_create_plist_c @@ -407,35 +269,6 @@ h5fget_access_plist_c(hid_t_f *file_id, hid_t_f *access_id) return ret_value; } -/****if* H5Ff/h5fclose_c - * NAME - * h5fclose_c - * PURPOSE - * Call H5Fclose to close the file - * INPUTS - * file_id - identifier of the file to be closed - * RETURNS - * 0 on success, -1 on failure - * AUTHOR - * Elena Pourmal - * Monday, July 26, 1999 - * HISTORY - * - * SOURCE - */ - -int_f -h5fclose_c(hid_t_f *file_id) -/******/ -{ - int ret_value = 0; - hid_t c_file_id; - - c_file_id = (hid_t)*file_id; - if (H5Fclose(c_file_id) < 0) - ret_value = -1; - return ret_value; -} /****if* H5Ff/h5fget_obj_count_c * NAME * h5fget_obj_count_c diff --git a/fortran/src/H5Fff.F90 b/fortran/src/H5Fff.F90 index 551db7dc028..f1a0d2acb11 100644 --- a/fortran/src/H5Fff.F90 +++ b/fortran/src/H5Fff.F90 @@ -43,6 +43,7 @@ MODULE H5F ! Number of objects opened in H5open_f INTEGER(SIZE_T) :: H5OPEN_NUM_OBJ + #ifndef H5_DOXYGEN INTERFACE INTEGER(C_INT) FUNCTION h5fis_accessible(name, & @@ -81,46 +82,127 @@ SUBROUTINE h5fcreate_f(name, access_flags, file_id, hdferr, & INTEGER, INTENT(IN) :: access_flags INTEGER(HID_T), INTENT(OUT) :: file_id INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: creation_prp - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: access_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: creation_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: access_prp + INTEGER(HID_T) :: creation_prp_default INTEGER(HID_T) :: access_prp_default - INTEGER :: namelen ! Length of the name character string + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name INTERFACE - INTEGER FUNCTION h5fcreate_c(name, namelen, access_flags, & - creation_prp_default, access_prp_default, file_id) BIND(C,NAME='h5fcreate_c') + INTEGER(HID_T) FUNCTION H5Fcreate(name, access_flags, & + creation_prp_default, access_prp_default) BIND(C,NAME='H5Fcreate') IMPORT :: C_CHAR IMPORT :: HID_T - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: name - INTEGER, INTENT(IN) :: access_flags - INTEGER(HID_T), INTENT(OUT) :: file_id - INTEGER(HID_T), INTENT(IN) :: creation_prp_default - INTEGER(HID_T), INTENT(IN) :: access_prp_default - INTEGER :: namelen - END FUNCTION h5fcreate_c + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + INTEGER, VALUE :: access_flags + INTEGER(HID_T), VALUE :: creation_prp_default + INTEGER(HID_T), VALUE :: access_prp_default + END FUNCTION H5Fcreate END INTERFACE + c_name = TRIM(name)//C_NULL_CHAR + creation_prp_default = H5P_DEFAULT_F access_prp_default = H5P_DEFAULT_F IF (PRESENT(creation_prp)) creation_prp_default = creation_prp IF (PRESENT(access_prp)) access_prp_default = access_prp - namelen = LEN_TRIM(name) - hdferr = h5fcreate_c(name, namelen, access_flags, & - creation_prp_default, access_prp_default, file_id) + + file_id = h5fcreate(c_name, access_flags, & + creation_prp_default, access_prp_default) + + hdferr = 0 + IF(file_id.LT.0) hdferr = -1 END SUBROUTINE h5fcreate_f + !> !! \ingroup FH5F !! -!! \brief Flushes all buffers associated with a file to disk +!! \brief Asynchronously creates HDF5 files. !! -!! \param object_id Identifier of object used to identify the file. -!! \param scope Specifies the scope of the flushing action. Possible values are: -!! \li H5F_SCOPE_GLOBAL_F -!! \li H5F_SCOPE_LOCAL_F +!! \param name Name of the file to create +!! \param access_flags File access flags. Allowable values are: +!! \li H5F_ACC_TRUNC_F +!! \li H5F_ACC_EXCL_F +!! \param file_id File identifier +!! \param es_id \es_id !! \param hdferr \fortran_error +!! \param creation_prp File creation property list identifier +!! \param access_prp File access property list identifier +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Fcreate_async() +!! + SUBROUTINE h5fcreate_async_f(name, access_flags, file_id, es_id, hdferr, & + creation_prp, access_prp, file, func, line) + IMPLICIT NONE + CHARACTER(LEN=*), INTENT(IN) :: name + INTEGER, INTENT(IN) :: access_flags + INTEGER(HID_T), INTENT(OUT) :: file_id + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + INTEGER(HID_T), INTENT(IN), OPTIONAL :: creation_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: access_prp + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + INTEGER(HID_T) :: creation_prp_default + INTEGER(HID_T) :: access_prp_default + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER(HID_T) FUNCTION H5Fcreate_async(file, func, line, name, access_flags, & + creation_prp_default, access_prp_default, es_id) BIND(C,NAME='H5Fcreate_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + INTEGER, VALUE :: access_flags + INTEGER(HID_T), VALUE :: creation_prp_default + INTEGER(HID_T), VALUE :: access_prp_default + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Fcreate_async + END INTERFACE + + c_name = TRIM(name)//C_NULL_CHAR + + creation_prp_default = H5P_DEFAULT_F + access_prp_default = H5P_DEFAULT_F + + IF(PRESENT(creation_prp)) creation_prp_default = creation_prp + IF(PRESENT(access_prp)) access_prp_default = access_prp + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + file_id = H5Fcreate_async(file_default, func_default, line_default, & + c_name, access_flags, creation_prp_default, access_prp_default, es_id) + + hdferr = 0 + IF(file_id.LT.0) hdferr = -1 + + END SUBROUTINE h5fcreate_async_f +!> +!! \ingroup FH5F +!! +!! \brief Flushes all buffers associated with a file to disk. +!! +!! \param object_id Identifier of object used to identify the file. +!! \param scope Specifies the scope of the flushing action. Possible values are: +!! \li H5F_SCOPE_GLOBAL_F +!! \li H5F_SCOPE_LOCAL_F +!! \param hdferr \fortran_error !! !! See C API: @ref H5Fflush() !! @@ -131,20 +213,75 @@ SUBROUTINE h5fflush_f(object_id, scope, hdferr) INTEGER, INTENT(OUT) :: hdferr INTERFACE - INTEGER FUNCTION h5fflush_c(object_id, scope) BIND(C,NAME='h5fflush_c') + INTEGER FUNCTION H5Fflush(object_id, scope) BIND(C,NAME='H5Fflush') + IMPORT :: C_INT IMPORT :: HID_T IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: object_id - INTEGER, INTENT(IN) :: scope - END FUNCTION h5fflush_c + INTEGER(HID_T), VALUE :: object_id + INTEGER(C_INT), VALUE :: scope + END FUNCTION H5Fflush END INTERFACE - hdferr = h5fflush_c(object_id, scope) + hdferr = H5Fflush(object_id, INT(scope, C_INT)) END SUBROUTINE h5fflush_f !> !! \ingroup FH5F !! +!! \brief Asynchronously flushes all buffers associated with a file to disk. +!! +!! \param object_id Identifier of object used to identify the file. +!! \param scope Specifies the scope of the flushing action. Possible values are: +!! \li H5F_SCOPE_GLOBAL_F +!! \li H5F_SCOPE_LOCAL_F +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Fflush_async() +!! + SUBROUTINE h5fflush_async_f(object_id, scope, es_id, hdferr, file, func, line) + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: object_id + INTEGER, INTENT(IN) :: scope + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER FUNCTION H5Fflush_async(file, func, line, object_id, scope, es_id) & + BIND(C,NAME='H5Fflush_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: object_id + INTEGER(C_INT), VALUE :: scope + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Fflush_async + END INTERFACE + + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + hdferr = H5Fflush_async(file_default, func_default, line_default, & + object_id, INT(scope, C_INT), es_id) + + END SUBROUTINE h5fflush_async_f +!> +!! \ingroup FH5F +!! !! \brief Mounts a file. !! !! \param loc_id The identifier for of file or group in which name is defined. @@ -161,7 +298,7 @@ SUBROUTINE h5fmount_f(loc_id, name, child_id, hdferr, access_prp) CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(HID_T), INTENT(IN) :: child_id INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: access_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: access_prp INTEGER(HID_T) :: access_prp_default INTEGER :: namelen ! Length of the name character string @@ -219,6 +356,7 @@ END FUNCTION h5funmount_c hdferr = h5funmount_c(loc_id, name, namelen) END SUBROUTINE h5funmount_f + !> !! \ingroup FH5F !! @@ -240,30 +378,106 @@ SUBROUTINE h5fopen_f(name, access_flags, file_id, hdferr, access_prp) INTEGER, INTENT(IN) :: access_flags INTEGER(HID_T), INTENT(OUT) :: file_id INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: access_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: access_prp + INTEGER(HID_T) :: access_prp_default - INTEGER :: namelen ! Length of the name character string + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name INTERFACE - INTEGER FUNCTION h5fopen_c(name, namelen, access_flags, & - access_prp_default, file_id) BIND(C,NAME='h5fopen_c') - IMPORT :: C_CHAR + INTEGER(HID_T) FUNCTION H5Fopen(name, access_flags, access_prp_default) & + BIND(C,NAME='H5Fopen') + IMPORT :: C_CHAR, C_INT, C_PTR IMPORT :: HID_T IMPLICIT NONE - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: name - INTEGER :: namelen - INTEGER, INTENT(IN) :: access_flags - INTEGER(HID_T), INTENT(IN) :: access_prp_default - INTEGER(HID_T), INTENT(OUT) :: file_id - END FUNCTION h5fopen_c + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + INTEGER(C_INT), VALUE :: access_flags + INTEGER(HID_T), VALUE :: access_prp_default + END FUNCTION H5Fopen END INTERFACE + c_name = TRIM(name)//C_NULL_CHAR + access_prp_default = H5P_DEFAULT_F - IF (PRESENT(access_prp)) access_prp_default = access_prp - namelen = LEN_TRIM(name) - hdferr = h5fopen_c(name, namelen, access_flags, & - access_prp_default, file_id) + + IF(PRESENT(access_prp)) access_prp_default = access_prp + + file_id = H5Fopen(c_name, INT(access_flags, C_INT), access_prp_default) + + hdferr = 0 + IF(file_id.LT.0) hdferr = -1 + END SUBROUTINE h5fopen_f + +!> +!! \ingroup FH5F +!! +!! \brief Asynchronously opens HDF5 file. +!! +!! \param name Name of the file to acecss. +!! \param access_flags File access flags. Allowable values are: +!! \li H5F_ACC_RDWR_F +!! \li H5F_ACC_RDONLY_F +!! \param file_id File identifier +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param access_prp File access property list identifier +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Fopen_async() +!! + SUBROUTINE h5fopen_async_f(name, access_flags, file_id, es_id, hdferr, & + access_prp, file, func, line) + IMPLICIT NONE + CHARACTER(LEN=*), INTENT(IN) :: name + INTEGER, INTENT(IN) :: access_flags + INTEGER(HID_T), INTENT(OUT) :: file_id + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + INTEGER(HID_T), INTENT(IN), OPTIONAL :: access_prp + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + INTEGER(HID_T) :: access_prp_default + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER(HID_T) FUNCTION H5Fopen_async(file, func, line, name, access_flags, access_prp_default, es_id) & + BIND(C,NAME='H5Fopen_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + INTEGER(C_INT), VALUE :: access_flags + INTEGER(HID_T), VALUE :: access_prp_default + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Fopen_async + END INTERFACE + + c_name = TRIM(name)//C_NULL_CHAR + + access_prp_default = H5P_DEFAULT_F + + IF(PRESENT(access_prp)) access_prp_default = access_prp + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + file_id = H5Fopen_async(file_default, func_default, line_default, & + c_name, INT(access_flags, C_INT), access_prp_default, es_id) + + hdferr = 0 + IF(file_id.LT.0) hdferr = -1 + + END SUBROUTINE h5fopen_async_f !> !! \ingroup FH5F !! @@ -281,20 +495,75 @@ SUBROUTINE h5freopen_f(file_id, ret_file_id, hdferr) INTEGER(HID_T), INTENT(OUT) :: ret_file_id INTEGER, INTENT(OUT) :: hdferr INTERFACE - INTEGER FUNCTION h5freopen_c(file_id, ret_file_id) BIND(C,NAME='h5freopen_c') + INTEGER(HID_T) FUNCTION H5Freopen(file_id) BIND(C,NAME='H5Freopen') IMPORT :: HID_T IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: file_id - INTEGER(HID_T), INTENT(OUT) :: ret_file_id - END FUNCTION h5freopen_c + INTEGER(HID_T), VALUE :: file_id + END FUNCTION H5Freopen END INTERFACE - hdferr = h5freopen_c(file_id, ret_file_id) + ret_file_id = h5freopen(file_id) + + hdferr = 0 + IF(ret_file_id.LT.0) hdferr = -1 END SUBROUTINE h5freopen_f !> !! \ingroup FH5F !! +!! \brief Asynchronously reopens HDF5 file. +!! +!! \param file_id Identifier of a file for which an additional identifier is required. +!! \param ret_file_id New file identifier. +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Freopen_async() +!! + SUBROUTINE h5freopen_async_f(file_id, ret_file_id, es_id, hdferr, file, func, line) + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: file_id + INTEGER(HID_T), INTENT(OUT) :: ret_file_id + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER(HID_T) FUNCTION H5Freopen_async(file, func, line, file_id, es_id) & + BIND(C,NAME='H5Freopen_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: file_id + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Freopen_async + END INTERFACE + + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + ret_file_id = h5freopen_async(file_default, func_default, line_default, file_id, es_id) + + hdferr = 0 + IF(ret_file_id.LT.0) hdferr = -1 + + END SUBROUTINE h5freopen_async_f +!> +!! \ingroup FH5F +!! !! \brief Returns a file creation property list identifier. !! !! \param file_id Identifier of a file to creation property list of. @@ -366,7 +635,7 @@ SUBROUTINE h5fis_accessible_f(name, status, hdferr, access_prp) CHARACTER(LEN=*), INTENT(IN) :: name LOGICAL, INTENT(OUT) :: status INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: access_prp + INTEGER(HID_T), INTENT(IN), OPTIONAL :: access_prp INTEGER(HID_T) :: access_prp_default CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name @@ -437,17 +706,67 @@ SUBROUTINE h5fclose_f(file_id, hdferr) INTEGER(HID_T), INTENT(IN) :: file_id INTEGER, INTENT(OUT) :: hdferr INTERFACE - INTEGER FUNCTION h5fclose_c(file_id) BIND(C,NAME='h5fclose_c') + INTEGER(C_INT) FUNCTION H5Fclose(file_id) BIND(C,NAME='H5Fclose') + IMPORT :: C_INT IMPORT :: HID_T IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: file_id - END FUNCTION h5fclose_c + INTEGER(HID_T), VALUE :: file_id + END FUNCTION H5Fclose END INTERFACE - hdferr = h5fclose_c(file_id) + hdferr = INT(H5Fclose(file_id)) END SUBROUTINE h5fclose_f +!> +!! \ingroup FH5F +!! +!! \brief Asynchronously closes HDF5 file. +!! +!! \param file_id File identifier +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Fclose_async() +!! + SUBROUTINE h5fclose_async_f(file_id, es_id, hdferr, file, func, line) + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: file_id + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER(C_INT) FUNCTION H5Fclose_async(file, func, line, file_id, es_id) & + BIND(C,NAME='H5Fclose_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: file_id + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Fclose_async + END INTERFACE + + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + hdferr = INT(H5Fclose_async(file_default, func_default, line_default, file_id, es_id)) + + END SUBROUTINE h5fclose_async_f + !> !! \ingroup FH5F !! diff --git a/fortran/src/H5Gf.c b/fortran/src/H5Gf.c index 9513a58a8f5..445fcea1abd 100644 --- a/fortran/src/H5Gf.c +++ b/fortran/src/H5Gf.c @@ -21,128 +21,6 @@ #include "H5f90.h" #include "H5Eprivate.h" -/****if* H5Gf/h5gcreate_c - * NAME - * h5gcreate_c - * PURPOSE - * Call H5Gcreate to create a group - * INPUTS - * loc_id - file or group identifier - * name - name of the group - * namelen - name length - * size_hint - length of names in the group - * OUTPUTS - * grp_id - group identifier - * RETURNS - * 0 on success, -1 on failure - * AUTHOR - * Elena Pourmal - * Wednesday, August 5, 1999 - * HISTORY - * Changed to call H5Gcreate2 because H5Gcreate flip-flops and - * H5Gcreate1 can be compiled out of the library - * QAK - 2007/08/23 - * SOURCE - */ -int_f -h5gcreate_c(hid_t_f *loc_id, _fcd name, int_f *namelen, size_t_f *size_hint, hid_t_f *grp_id, - hid_t_f *lcpl_id, hid_t_f *gcpl_id, hid_t_f *gapl_id) -/******/ -{ - hid_t c_gcpl_id = -1; /* Group creation property list */ - char *c_name = NULL; - hid_t c_grp_id; - int_f ret_value = -1; - - /* - * Convert FORTRAN name to C name - */ - if (NULL == (c_name = (char *)HD5f2cstring(name, (size_t)*namelen))) - goto DONE; - - /* - * Call H5Gcreate function. - */ - if (*size_hint == (size_t_f)OBJECT_NAMELEN_DEFAULT_F) { - c_grp_id = H5Gcreate2((hid_t)*loc_id, c_name, (hid_t)*lcpl_id, (hid_t)*gcpl_id, (hid_t)*gapl_id); - } - else { - /* Create the group creation property list */ - if ((c_gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) - goto DONE; - - /* Set the local heap size hint */ - if (H5Pset_local_heap_size_hint(c_gcpl_id, (size_t)*size_hint) < 0) - goto DONE; - - /* Create the group */ - c_grp_id = H5Gcreate2((hid_t)*loc_id, c_name, H5P_DEFAULT, c_gcpl_id, H5P_DEFAULT); - } - if (c_grp_id < 0) - goto DONE; - - /* Everything OK, set values to return */ - *grp_id = (hid_t_f)c_grp_id; - ret_value = 0; - -DONE: - if (c_gcpl_id > 0) - H5Pclose(c_gcpl_id); - if (c_name) - HDfree(c_name); - return ret_value; -} - -/****if* H5Gf/h5gopen_c - * NAME - * h5gopen_c - * PURPOSE - * Call H5Gopen to open a dataset - * INPUTS - * loc_id - file or group identifier - * name - name of the group - * namelen - name length - * gapl_id - Group access property list identifier - * OUTPUTS - * grp_id - group identifier - * RETURNS - * 0 on success, -1 on failure - * AUTHOR - * Elena Pourmal - * Wednesday, August 5, 1999 - * - * SOURCE - */ -int_f -h5gopen_c(hid_t_f *loc_id, _fcd name, int_f *namelen, hid_t_f *gapl_id, hid_t_f *grp_id) -/******/ -{ - char *c_name = NULL; - hid_t c_grp_id; - int ret_value = -1; - - /* - * Convert FORTRAN name to C name - */ - if (NULL == (c_name = (char *)HD5f2cstring(name, (size_t)*namelen))) - goto DONE; - - /* - * Call H5Gopen function. - */ - if ((c_grp_id = H5Gopen2((hid_t)*loc_id, c_name, (hid_t)*gapl_id)) < 0) - goto DONE; - - /* Everything OK, set values to return */ - *grp_id = (hid_t_f)c_grp_id; - ret_value = 0; - -DONE: - if (c_name) - HDfree(c_name); - return ret_value; -} - /****if* H5Gf/h5gget_obj_info_idx_c * NAME * h5gget_obj_info_idx_c @@ -273,32 +151,6 @@ h5gn_members_c(hid_t_f *loc_id, _fcd name, int_f *namelen, int_f *nmembers) return ret_value; } -/****if* H5Gf/h5gclose_c - * NAME - * h5gclose_c - * PURPOSE - * Call H5Gclose to close the group - * INPUTS - * grp_id - identifier of the group to be closed - * RETURNS - * 0 on success, -1 on failure - * AUTHOR - * Elena Pourmal - * Wednesday, August 5, 1999 - * SOURCE - */ - -int_f -h5gclose_c(hid_t_f *grp_id) -/******/ -{ - int ret_value = 0; - - if (H5Gclose((hid_t)*grp_id) < 0) - ret_value = -1; - return ret_value; -} - /****if* H5Gf/h5glink_c * NAME * h5glink_c @@ -852,203 +704,3 @@ h5gget_create_plist_c(hid_t_f *grp_id, hid_t_f *gcpl_id) done: return ret_value; } - -/****if* H5Gf/h5gget_info_c - * NAME - * h5gget_info_c - * PURPOSE - * Call H5Gget_info - * INPUTS - * group_id - Group identifier - * OUTPUTS - * - * storage_type - Type of storage for links in group: - * H5G_STORAGE_TYPE_COMPACT: Compact storage - * H5G_STORAGE_TYPE_DENSE: Indexed storage - * H5G_STORAGE_TYPE_SYMBOL_TABLE: Symbol tables, the original HDF5 structure - * - * nlinks - Number of links in group - * max_corder - Current maximum creation order value for group - * mounted - Whether group has a file mounted on it (0 = false, 1 = true) - * - * RETURNS - * 0 on success, -1 on failure - * AUTHOR - * M. Scot Breitenfeld - * February 15, 2008 - * HISTORY - * - * - Added 'mounted' parameter - * M. Scot Breitenfeld - * July 16, 2008 - * SOURCE - */ -int_f -h5gget_info_c(hid_t_f *group_id, int_f *storage_type, int_f *nlinks, int_f *max_corder, int_f *mounted) -/******/ -{ - - int_f ret_value = 0; /* Return value */ - H5G_info_t ginfo; - - /* - * Call H5Gget_info function. - */ - if (H5Gget_info((hid_t)*group_id, &ginfo) < 0) - HGOTO_DONE(FAIL); - - /* Unpack the structure */ - - *storage_type = (int_f)ginfo.storage_type; - *nlinks = (int_f)ginfo.nlinks; - *max_corder = (int_f)ginfo.max_corder; - *mounted = 0; - if (ginfo.mounted) - *mounted = 1; - -done: - return ret_value; -} - -/****if* H5Gf/h5gget_info_by_idx_c - * NAME - * h5gget_info_by_idx_c - * PURPOSE - * Call H5Gget_info_by_idx - * INPUTS - * - * loc_id - File or group identifier - * group_name - Name of group containing group for which information is to be retrieved - * group_namelen - name length - * index_type - Index type - * order - Order of the count in the index - * n - Position in the index of the group for which information is retrieved - * lapl_id - Link access property list - * OUTPUTS - * - * storage_type - Type of storage for links in group: - * H5G_STORAGE_TYPE_COMPACT: Compact storage - * H5G_STORAGE_TYPE_DENSE: Indexed storage - * H5G_STORAGE_TYPE_SYMBOL_TABLE: Symbol tables, the original HDF5 structure - * - * nlinks - Number of links in group - * max_corder - Current maximum creation order value for group - * mounted - Whether group has a file mounted on it (0 = false, 1 = true) - * - * RETURNS - * 0 on success, -1 on failure - * AUTHOR - * M. Scot Breitenfeld - * February 18, 2008 - * HISTORY - * - * - Added 'mounted' parameter - * M. Scot Breitenfeld - * July 16, 2008 - * SOURCE - */ -int_f -h5gget_info_by_idx_c(hid_t_f *loc_id, _fcd group_name, size_t_f *group_namelen, int_f *index_type, - int_f *order, hsize_t_f *n, hid_t_f *lapl_id, int_f *storage_type, int_f *nlinks, - int_f *max_corder, int_f *mounted) -/******/ -{ - char *c_group_name = NULL; /* Buffer to hold group name C string */ - int_f ret_value = 0; /* Return value */ - H5G_info_t ginfo; - /* - * Convert FORTRAN name to C name - */ - if ((c_group_name = HD5f2cstring(group_name, (size_t)*group_namelen)) == NULL) - HGOTO_DONE(FAIL); - - /* - * Call H5Gget_info_by_idx function. - */ - if (H5Gget_info_by_idx((hid_t)*loc_id, c_group_name, (H5_index_t)*index_type, (H5_iter_order_t)*order, - (hsize_t)*n, &ginfo, (hid_t)*lapl_id) < 0) - HGOTO_DONE(FAIL); - - /* Unpack the structure */ - - *storage_type = (int_f)ginfo.storage_type; - *nlinks = (int_f)ginfo.nlinks; - *max_corder = (int_f)ginfo.max_corder; - *mounted = 0; - if (ginfo.mounted) - *mounted = 1; - -done: - if (c_group_name) - HDfree(c_group_name); - return ret_value; -} - -/****if* H5Gf/h5gget_info_by_name_c - * NAME - * h5gget_info_by_name_c - * PURPOSE - * Call H5Gget_info_by_name - * INPUTS - * - * loc_id - File or group identifier - * group_name - Name of group containing group for which information is to be retrieved - * group_namelen - name length - * lapl_id - Link access property list - * OUTPUTS - * - * storage_type - Type of storage for links in group: - * H5G_STORAGE_TYPE_COMPACT: Compact storage - * H5G_STORAGE_TYPE_DENSE: Indexed storage - * H5G_STORAGE_TYPE_SYMBOL_TABLE: Symbol tables, the original HDF5 structure - * - * nlinks - Number of links in group - * max_corder - Current maximum creation order value for group - * mounted - Whether group has a file mounted on it (0 = false, 1 = true) - * - * RETURNS - * 0 on success, -1 on failure - * AUTHOR - * M. Scot Breitenfeld - * February 18, 2008 - * HISTORY - * - * - Added 'mounted' parameter - * M. Scot Breitenfeld - * July 16, 2008 - * SOURCE - */ -int_f -h5gget_info_by_name_c(hid_t_f *loc_id, _fcd group_name, size_t_f *group_namelen, hid_t_f *lapl_id, - int_f *storage_type, int_f *nlinks, int_f *max_corder, int_f *mounted) -/******/ -{ - char *c_group_name = NULL; /* Buffer to hold group name C string */ - int_f ret_value = 0; /* Return value */ - H5G_info_t ginfo; - /* - * Convert FORTRAN name to C name - */ - if ((c_group_name = HD5f2cstring(group_name, (size_t)*group_namelen)) == NULL) - HGOTO_DONE(FAIL); - - /* - * Call H5Gget_info_by_name function. - */ - if (H5Gget_info_by_name((hid_t)*loc_id, c_group_name, &ginfo, (hid_t)*lapl_id) < 0) - HGOTO_DONE(FAIL); - - /* Unpack the structure */ - - *storage_type = (int_f)ginfo.storage_type; - *nlinks = (int_f)ginfo.nlinks; - *max_corder = (int_f)ginfo.max_corder; - *mounted = 0; - if (ginfo.mounted) - *mounted = 1; - -done: - if (c_group_name) - HDfree(c_group_name); - return ret_value; -} diff --git a/fortran/src/H5Gff.F90 b/fortran/src/H5Gff.F90 index 436eecefb72..655c226a464 100644 --- a/fortran/src/H5Gff.F90 +++ b/fortran/src/H5Gff.F90 @@ -36,8 +36,127 @@ ! MODULE H5G - USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_CHAR + USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_CHAR, C_INT USE H5GLOBAL + USE H5P, ONLY : H5Pcreate_f, H5Pset_local_heap_size_hint_f, H5Pclose_f + + +! +! @brief Fortran2003 Derived Type for @ref H5G_info_t +! + TYPE, BIND(C) :: H5G_info_t + INTEGER(C_INT ) :: storage_type !< Type of storage for links in group: + !< \li H5G_STORAGE_TYPE_COMPACT_F: Compact storage + !< \li H5G_STORAGE_TYPE_DENSE_F: Indexed storage + !< \li H5G_STORAGE_TYPE_SYMBOL_TABLE_F: Symbol tables, the original HDF5 structure + INTEGER(HSIZE_T) :: nlinks !< Number of links in group + INTEGER(C_INT64_T) :: max_corder !< Current maximum creation order value for group + LOGICAL(C_BOOL) :: mounted !< Whether group has a file mounted on it + END TYPE H5G_info_t + +#ifndef H5_DOXYGEN + INTERFACE H5Gget_info_f + MODULE PROCEDURE h5Gget_info_f90 + MODULE PROCEDURE h5Gget_info_f03 + END INTERFACE + + INTERFACE H5Gget_info_by_idx_f + MODULE PROCEDURE H5Gget_info_by_idx_f90 + MODULE PROCEDURE H5Gget_info_by_idx_f03 + END INTERFACE + + INTERFACE H5Gget_info_by_name_f + MODULE PROCEDURE H5Gget_info_by_name_f90 + MODULE PROCEDURE H5Gget_info_by_name_f03 + END INTERFACE + + INTERFACE + INTEGER(C_INT) FUNCTION H5Gget_info(loc_id, ginfo) BIND(C,NAME='H5Gget_info') + IMPORT :: C_INT, C_PTR + IMPORT :: HID_T + INTEGER(HID_T), VALUE :: loc_id + TYPE(C_PTR), VALUE :: ginfo + END FUNCTION H5Gget_info + END INTERFACE + + INTERFACE + INTEGER(C_INT) FUNCTION H5Gget_info_async(file, func, line, loc_id, ginfo, es_id) & + BIND(C,NAME='H5Gget_info_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: loc_id + TYPE(C_PTR) , VALUE :: ginfo + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Gget_info_async + END INTERFACE + + INTERFACE + INTEGER(C_INT) FUNCTION H5Gget_info_by_idx(loc_id, group_name, idx_type, order, n, ginfo, lapl_id) & + BIND(C,NAME='H5Gget_info_by_idx') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T, HSIZE_T + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: group_name + INTEGER(C_INT) , VALUE :: idx_type + INTEGER(C_INT) , VALUE :: order + INTEGER(HSIZE_T), VALUE :: n + TYPE(C_PTR) , VALUE :: ginfo + INTEGER(HID_T) , VALUE :: lapl_id + END FUNCTION H5Gget_info_by_idx + END INTERFACE + + INTERFACE + INTEGER(C_INT) FUNCTION H5Gget_info_by_idx_async(file, func, line, loc_id, & + group_name, idx_type, order, n, ginfo, lapl_id, es_id) & + BIND(C,NAME='H5Gget_info_by_idx_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T, HSIZE_T + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: group_name + INTEGER(C_INT) , VALUE :: idx_type + INTEGER(C_INT) , VALUE :: order + INTEGER(HSIZE_T), VALUE :: n + TYPE(C_PTR) , VALUE :: ginfo + INTEGER(HID_T) , VALUE :: lapl_id + INTEGER(HID_T) , VALUE :: es_id + END FUNCTION H5Gget_info_by_idx_async + END INTERFACE + + INTERFACE + INTEGER(C_INT) FUNCTION H5Gget_info_by_name(loc_id, name, ginfo, lapl_id) & + BIND(C,NAME='H5Gget_info_by_name') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + TYPE(C_PTR), VALUE :: ginfo + INTEGER(HID_T), VALUE :: lapl_id + END FUNCTION H5Gget_info_by_name + END INTERFACE + + INTERFACE + INTEGER(C_INT) FUNCTION H5Gget_info_by_name_async(file, func, line,loc_id, name, ginfo, lapl_id, es_id) & + BIND(C,NAME='H5Gget_info_by_name_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + TYPE(C_PTR), VALUE :: ginfo + INTEGER(HID_T), VALUE :: lapl_id + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Gget_info_by_name_async + END INTERFACE + +#endif CONTAINS @@ -52,7 +171,7 @@ MODULE H5G !! \param hdferr \fortran_error !! \param size_hint A parameter indicating the number of bytes to reserve for the names that will appear in the group. !! Set to OBJECT_NAMELEN_DEFAULT_F if using any of the optional parameters lcpl_id, gcpl_id, -!! and/or gapl_id when not using keywords in specifying the optional parameters. +!! and/or gapl_id when not using keywords in specifying the optional parameters. See @ref H5Gcreate1(). !! \param lcpl_id Property list for link creation. !! \param gcpl_id Property list for group creation. !! \param gapl_id Property list for group access. @@ -65,50 +184,187 @@ SUBROUTINE h5gcreate_f(loc_id, name, grp_id, hdferr, size_hint, lcpl_id, gcpl_id CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(HID_T), INTENT(OUT) :: grp_id INTEGER, INTENT(OUT) :: hdferr - INTEGER(SIZE_T), OPTIONAL, INTENT(IN) :: size_hint - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lcpl_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: gcpl_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: gapl_id + INTEGER(SIZE_T), INTENT(IN), OPTIONAL :: size_hint + INTEGER(HID_T) , INTENT(IN), OPTIONAL :: lcpl_id + INTEGER(HID_T) , INTENT(IN), OPTIONAL :: gcpl_id + INTEGER(HID_T) , INTENT(IN), OPTIONAL :: gapl_id INTEGER(HID_T) :: lcpl_id_default INTEGER(HID_T) :: gcpl_id_default INTEGER(HID_T) :: gapl_id_default - INTEGER :: namelen ! Length of the name character string INTEGER(SIZE_T) :: size_hint_default + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name INTERFACE - INTEGER FUNCTION h5gcreate_c(loc_id, name, namelen, & - size_hint_default, grp_id, lcpl_id_default, gcpl_id_default, gapl_id_default) & - BIND(C,NAME='h5gcreate_c') + INTEGER(HID_T) FUNCTION H5Gcreate2(loc_id, name, & + lcpl_id_default, gcpl_id_default, gapl_id_default) & + BIND(C,NAME='H5Gcreate2') IMPORT :: C_CHAR - IMPORT :: HID_T, SIZE_T - INTEGER(HID_T), INTENT(IN) :: loc_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: name - INTEGER :: namelen - INTEGER(SIZE_T) :: size_hint_default - INTEGER(HID_T), INTENT(OUT) :: grp_id - INTEGER(HID_T) :: lcpl_id_default - INTEGER(HID_T) :: gcpl_id_default - INTEGER(HID_T) :: gapl_id_default - END FUNCTION h5gcreate_c + IMPORT :: HID_T + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + INTEGER(HID_T), VALUE :: lcpl_id_default + INTEGER(HID_T), VALUE :: gcpl_id_default + INTEGER(HID_T), VALUE :: gapl_id_default + END FUNCTION H5Gcreate2 END INTERFACE - size_hint_default = OBJECT_NAMELEN_DEFAULT_F - IF (PRESENT(size_hint)) size_hint_default = size_hint + hdferr = 0 + c_name = TRIM(name)//C_NULL_CHAR + lcpl_id_default = H5P_DEFAULT_F - IF(PRESENT(lcpl_id)) lcpl_id_default = lcpl_id gcpl_id_default = H5P_DEFAULT_F - IF(PRESENT(gcpl_id)) gcpl_id_default = gcpl_id gapl_id_default = H5P_DEFAULT_F - IF(PRESENT(gapl_id)) gapl_id_default = gapl_id + size_hint_default = OBJECT_NAMELEN_DEFAULT_F - namelen = LEN(name) + IF(PRESENT(size_hint)) size_hint_default = size_hint + IF(PRESENT(lcpl_id)) lcpl_id_default = lcpl_id + IF(PRESENT(gcpl_id)) gcpl_id_default = gcpl_id + IF(PRESENT(gapl_id)) gapl_id_default = gapl_id + ! + ! size_hint was introduced as an overload option for H5Gcreate1, + ! it was removed in H5Gcreate2. + ! + IF(size_hint_default .EQ. OBJECT_NAMELEN_DEFAULT_F)THEN + grp_id = H5Gcreate2(loc_id, c_name, & + lcpl_id_default, gcpl_id_default, gapl_id_default) + ELSE + ! Create the group creation property list + CALL H5Pcreate_f(H5P_GROUP_CREATE_F, gcpl_id_default, hdferr) + IF(hdferr.LT.0) RETURN + + ! Set the local heap size hint + CALL H5Pset_local_heap_size_hint_f(gcpl_id_default, size_hint, hdferr) + IF(hdferr.LT.0)THEN + CALL H5Pclose_f(gcpl_id_default, hdferr) + hdferr = -1 + RETURN + END IF + + grp_id = H5Gcreate2(loc_id, c_name, & + H5P_DEFAULT_F, gcpl_id_default, H5P_DEFAULT_F) + + CALL H5Pclose_f(gcpl_id_default, hdferr) + IF(hdferr.LT.0) RETURN + ENDIF - hdferr = h5gcreate_c(loc_id, name, namelen, size_hint_default, grp_id, & - lcpl_id_default, gcpl_id_default, gapl_id_default) + IF(grp_id.LT.0) hdferr = -1 END SUBROUTINE h5gcreate_f + +!> +!! \ingroup FH5G +!! +!! \brief Asynchronously creates a new group. +!! +!! \param loc_id Location identifier. +!! \param name Group name at the specified location. +!! \param grp_id Group identifier. +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param size_hint A parameter indicating the number of bytes to reserve for the names that will appear in the group. +!! Set to OBJECT_NAMELEN_DEFAULT_F if using any of the optional parameters lcpl_id, gcpl_id, +!! and/or gapl_id when not using keywords in specifying the optional parameters. See @ref H5Gcreate1(). +!! \param lcpl_id Property list for link creation. +!! \param gcpl_id Property list for group creation. +!! \param gapl_id Property list for group access. +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Gcreate_async() +!! + SUBROUTINE h5gcreate_async_f(loc_id, name, grp_id, es_id, hdferr, & + size_hint, lcpl_id, gcpl_id, gapl_id, file, func, line) + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: loc_id + CHARACTER(LEN=*), INTENT(IN) :: name + INTEGER(HID_T), INTENT(OUT) :: grp_id + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + INTEGER(SIZE_T), INTENT(IN), OPTIONAL :: size_hint + INTEGER(HID_T) , INTENT(IN), OPTIONAL :: lcpl_id + INTEGER(HID_T) , INTENT(IN), OPTIONAL :: gcpl_id + INTEGER(HID_T) , INTENT(IN), OPTIONAL :: gapl_id + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + INTEGER(HID_T) :: lcpl_id_default + INTEGER(HID_T) :: gcpl_id_default + INTEGER(HID_T) :: gapl_id_default + INTEGER(SIZE_T) :: size_hint_default + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER(HID_T) FUNCTION H5Gcreate_async(file, func, line, loc_id, name, & + lcpl_id_default, gcpl_id_default, gapl_id_default, es_id) & + BIND(C,NAME='H5Gcreate_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + INTEGER(HID_T), VALUE :: lcpl_id_default + INTEGER(HID_T), VALUE :: gcpl_id_default + INTEGER(HID_T), VALUE :: gapl_id_default + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Gcreate_async + END INTERFACE + + hdferr = 0 + c_name = TRIM(name)//C_NULL_CHAR + + lcpl_id_default = H5P_DEFAULT_F + gcpl_id_default = H5P_DEFAULT_F + gapl_id_default = H5P_DEFAULT_F + size_hint_default = OBJECT_NAMELEN_DEFAULT_F + + IF(PRESENT(size_hint)) size_hint_default = size_hint + IF(PRESENT(lcpl_id)) lcpl_id_default = lcpl_id + IF(PRESENT(gcpl_id)) gcpl_id_default = gcpl_id + IF(PRESENT(gapl_id)) gapl_id_default = gapl_id + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + ! + ! size_hint was introduced as an overload option for H5Gcreate1, + ! it was removed in H5Gcreate2. + ! + IF(size_hint_default .EQ. OBJECT_NAMELEN_DEFAULT_F)THEN + grp_id = H5Gcreate_async(file_default, func_default, line_default, loc_id, c_name, & + lcpl_id_default, gcpl_id_default, gapl_id_default, es_id) + ELSE + ! Create the group creation property list + CALL H5Pcreate_f(H5P_GROUP_CREATE_F, gcpl_id_default, hdferr) + IF(hdferr.LT.0) RETURN + + ! Set the local heap size hint + CALL H5Pset_local_heap_size_hint_f(gcpl_id_default, size_hint, hdferr) + IF(hdferr.LT.0)THEN + CALL H5Pclose_f(gcpl_id_default, hdferr) + hdferr = -1 + RETURN + END IF + + grp_id = H5Gcreate_async(file_default, func_default, line_default, loc_id, c_name, & + H5P_DEFAULT_F, gcpl_id_default, H5P_DEFAULT_F, es_id) + + CALL H5Pclose_f(gcpl_id_default, hdferr) + IF(hdferr.LT.0) RETURN + ENDIF + + IF(grp_id.LT.0) hdferr = -1 + + END SUBROUTINE h5gcreate_async_f + !> !! \ingroup FH5G !! @@ -128,30 +384,100 @@ SUBROUTINE h5gopen_f(loc_id, name, grp_id, hdferr, gapl_id) CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(HID_T), INTENT(OUT) :: grp_id INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: gapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: gapl_id + + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name INTEGER(HID_T) :: gapl_id_default - INTEGER :: namelen ! Length of the name character string INTERFACE - INTEGER FUNCTION h5gopen_c(loc_id, name, namelen, gapl_id_default, grp_id) & - BIND(C,NAME='h5gopen_c') + INTEGER(HID_T) FUNCTION H5Gopen2(loc_id, name, gapl_id_default) & + BIND(C,NAME='H5Gopen2') IMPORT :: C_CHAR IMPORT :: HID_T - INTEGER(HID_T), INTENT(IN) :: loc_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: name - INTEGER :: namelen - INTEGER(HID_T), INTENT(IN) :: gapl_id_default - INTEGER(HID_T), INTENT(OUT) :: grp_id - END FUNCTION h5gopen_c + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + INTEGER(HID_T), VALUE :: gapl_id_default + END FUNCTION H5Gopen2 END INTERFACE + c_name = TRIM(name)//C_NULL_CHAR + gapl_id_default = H5P_DEFAULT_F IF(PRESENT(gapl_id)) gapl_id_default = gapl_id - namelen = LEN(name) - hdferr = h5gopen_c(loc_id, name, namelen, gapl_id_default, grp_id) + grp_id = H5Gopen2(loc_id, c_name, gapl_id_default) + + hdferr = 0 + IF(grp_id.LT.0) hdferr = -1 END SUBROUTINE h5gopen_f + +!> +!! \ingroup FH5G +!! +!! \brief Asynchronously opens an existing group. +!! +!! \param loc_id Location identifier. +!! \param name Name of the group to open. +!! \param grp_id Group identifier. +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param gapl_id Group access property list identifier. +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Gopen_async() +!! + SUBROUTINE h5gopen_async_f(loc_id, name, grp_id, es_id, hdferr, & + gapl_id, file, func, line) + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: loc_id + CHARACTER(LEN=*), INTENT(IN) :: name + INTEGER(HID_T), INTENT(OUT) :: grp_id + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + INTEGER(HID_T), INTENT(IN), OPTIONAL :: gapl_id + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + INTEGER(HID_T) :: gapl_id_default + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER(HID_T) FUNCTION H5Gopen_async(file, func, line, loc_id, name, gapl_id_default, es_id) & + BIND(C,NAME='H5Gopen_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + INTEGER(HID_T), VALUE :: gapl_id_default + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Gopen_async + END INTERFACE + + c_name = TRIM(name)//C_NULL_CHAR + + gapl_id_default = H5P_DEFAULT_F + IF(PRESENT(gapl_id)) gapl_id_default = gapl_id + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + grp_id = H5Gopen_async(file_default, func_default, line_default, & + loc_id, c_name, gapl_id_default, es_id) + + hdferr = 0 + IF(grp_id.LT.0) hdferr = -1 + + END SUBROUTINE h5gopen_async_f !> !! \ingroup FH5G !! @@ -167,18 +493,66 @@ SUBROUTINE h5gclose_f(grp_id, hdferr) INTEGER(HID_T), INTENT(IN) :: grp_id INTEGER, INTENT(OUT) :: hdferr INTERFACE - INTEGER FUNCTION h5gclose_c(grp_id) BIND(C,NAME='h5gclose_c') + INTEGER(C_INT) FUNCTION H5Gclose(grp_id) BIND(C,NAME='H5Gclose') + IMPORT :: C_INT IMPORT :: HID_T - INTEGER(HID_T), INTENT(IN) :: grp_id - END FUNCTION h5gclose_c + INTEGER(HID_T), VALUE :: grp_id + END FUNCTION H5Gclose END INTERFACE - hdferr = h5gclose_c(grp_id) + hdferr = INT(H5Gclose(grp_id)) END SUBROUTINE h5gclose_f !> !! \ingroup FH5G !! +!! \brief Asynchronously closes the specified group. +!! +!! \param grp_id Group identifier. +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Gclose_async() +!! + SUBROUTINE h5gclose_async_f(grp_id, es_id, hdferr, file, func, line) + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: grp_id + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER(C_INT) FUNCTION H5Gclose_async(file, func, line, grp_id, es_id) & + BIND(C,NAME='H5Gclose_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: grp_id + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Gclose_async + END INTERFACE + + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + hdferr = INT(H5Gclose_async(file_default, func_default, line_default, grp_id, es_id)) + + END SUBROUTINE h5gclose_async_f +!> +!! \ingroup FH5G +!! !! \brief Returns name and type of the group member identified by its index. !! !! \param loc_id Location identifier. @@ -604,8 +978,8 @@ SUBROUTINE h5Gcreate_anon_f(loc_id, grp_id, hdferr, gcpl_id, gapl_id) INTEGER(HID_T), INTENT(IN) :: loc_id INTEGER(HID_T), INTENT(OUT) :: grp_id INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: gcpl_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: gapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: gcpl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: gapl_id INTEGER(HID_T) :: gcpl_id_default INTEGER(HID_T) :: gapl_id_default @@ -657,15 +1031,96 @@ END FUNCTION h5gget_create_plist_c END SUBROUTINE h5gget_create_plist_f +#ifdef H5_DOXYGEN !> !! \ingroup FH5G !! !! \brief Retrieves information about a group !! -!! \param group_id Group identifier. +!! \attention \fortran_approved +!! +!! \param loc_id Location identifier. The identifier may be that of a file, group, dataset, named datatype, or attribute. +!! \param ginfo Derived type in which group information is returned. +!! \param hdferr \fortran_error +!! +!! See C API: @ref H5Gget_info() +!! + SUBROUTINE h5gget_info_f(& +#else + SUBROUTINE h5gget_info_f03(& +#endif + loc_id, ginfo, hdferr) + + IMPLICIT NONE + + INTEGER(HID_T) , INTENT(IN) :: loc_id + TYPE(H5G_info_t), INTENT(OUT), TARGET :: ginfo + INTEGER , INTENT(OUT) :: hdferr + + TYPE(C_PTR) :: ptr + + ptr = C_LOC(ginfo) + + hdferr = INT(H5Gget_info(loc_id, ptr)) + +#ifdef H5_DOXYGEN + END SUBROUTINE h5gget_info_f +#else + END SUBROUTINE h5gget_info_f03 +#endif + +!> +!! \ingroup FH5G +!! +!! \brief Asynchronously retrieves information about a group +!! +!! \param loc_id Location identifier. The identifier may be that of a file, group, dataset, named datatype, or attribute. +!! \param ginfo Derived type in which group information is returned. +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Gget_info_async() +!! + SUBROUTINE h5gget_info_async_f(loc_id, ginfo, es_id, hdferr, file, func, line) + + IMPLICIT NONE + + INTEGER(HID_T) , INTENT(IN) :: loc_id + TYPE(H5G_info_t), INTENT(OUT), TARGET :: ginfo + INTEGER(HID_T) , INTENT(IN) :: es_id + INTEGER , INTENT(OUT) :: hdferr + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + TYPE(C_PTR) :: ptr + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + ptr = C_LOC(ginfo) + + hdferr = INT(H5Gget_info_async(file_default, func_default, line_default, loc_id, ptr, es_id)) + + END SUBROUTINE h5gget_info_async_f + +!> +!! \ingroup FH5G +!! +!! \brief Retrieves information about a group. +!! +!! \attention \fortran_obsolete. Both nlinks and max_corder can overflow. +!! +!! \param loc_id Location identifier. The identifier may be that of a file, group, dataset, named datatype, or attribute. !! \param storage_type Type of storage for links in group: !! \li H5G_STORAGE_TYPE_COMPACT_F: Compact storage -!! \li H5G_STORAGE_TYPE_DENS_FE: Indexed storage +!! \li H5G_STORAGE_TYPE_DENSE_F: Indexed storage !! \li H5G_STORAGE_TYPE_SYMBOL_TABLE_F: Symbol tables, the original HDF5 structure !! \param nlinks Number of links in group. !! \param max_corder Current maximum creation order value for group. @@ -674,48 +1129,169 @@ END SUBROUTINE h5gget_create_plist_f !! !! See C API: @ref H5Gget_info() !! - SUBROUTINE h5gget_info_f(group_id, storage_type, nlinks, max_corder, hdferr, mounted) +#ifdef H5_DOXYGEN + SUBROUTINE h5gget_info_f(& +#else + SUBROUTINE h5gget_info_f90(& +#endif + loc_id, storage_type, nlinks, max_corder, hdferr, mounted) + IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: group_id + INTEGER(HID_T), INTENT(IN) :: loc_id INTEGER, INTENT(OUT) :: storage_type INTEGER, INTENT(OUT) :: nlinks INTEGER, INTENT(OUT) :: max_corder INTEGER, INTENT(OUT) :: hdferr LOGICAL, INTENT(OUT), OPTIONAL :: mounted - INTEGER :: mounted_c - INTERFACE - INTEGER FUNCTION h5gget_info_c(group_id, storage_type, nlinks, max_corder, mounted_c) & - BIND(C,NAME='h5gget_info_c') - IMPORT :: HID_T - INTEGER(HID_T), INTENT(IN) :: group_id - INTEGER, INTENT(OUT) :: storage_type - INTEGER, INTENT(OUT) :: nlinks - INTEGER, INTENT(OUT) :: max_corder - INTEGER :: mounted_c - END FUNCTION h5gget_info_c - END INTERFACE + TYPE(H5G_info_t), TARGET :: ginfo + TYPE(C_PTR) :: ptr + + ptr = C_LOC(ginfo) + hdferr = INT(H5Gget_info(loc_id, ptr)) - hdferr = h5gget_info_c(group_id, storage_type, nlinks, max_corder, mounted_c) + storage_type = INT(ginfo%storage_type) + nlinks = INT(ginfo%nlinks) + max_corder = INT(ginfo%max_corder) IF(PRESENT(mounted))THEN - IF(mounted_c.EQ.0) THEN - mounted = .FALSE. - ELSE + IF(ginfo%mounted) THEN mounted = .TRUE. + ELSE + mounted = .FALSE. ENDIF ENDIF - +#ifdef H5_DOXYGEN END SUBROUTINE h5gget_info_f +#else + END SUBROUTINE h5gget_info_f90 +#endif + +!> +!! \ingroup FH5G +!! +!! \brief Retrieves information about a group, according to the group’s position within an index. +!! +!! \attention \fortran_approved +!! +!! \param loc_id Location identifier. The identifier may be that of a file, group, dataset, named datatype, or attribute. +!! \param group_name Name of group containing group for which information is to be retrieved. +!! \param idx_type Index type. +!! \param order Order of the count in the index. +!! \param n Position in the index of the group for which information is retrieved. +!! \param ginfo Derived type in which group information is returned. +!! \param hdferr \fortran_error +!! \param lapl_id Link access property list. +!! +!! See C API: @ref H5Gget_info_by_idx() +!! +#ifdef H5_DOXYGEN + SUBROUTINE h5gget_info_by_idx_f(& +#else + SUBROUTINE h5gget_info_by_idx_f03(& +#endif + loc_id, group_name, idx_type, order, n, ginfo, hdferr, lapl_id) + + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: loc_id + CHARACTER(LEN=*), INTENT(IN) :: group_name + INTEGER, INTENT(IN) :: idx_type + INTEGER, INTENT(IN) :: order + INTEGER(HSIZE_T), INTENT(IN) :: n + TYPE(H5G_info_t), INTENT(OUT), TARGET :: ginfo + INTEGER, INTENT(OUT) :: hdferr + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id + + INTEGER(HID_T) :: lapl_id_default + CHARACTER(LEN=LEN_TRIM(group_name)+1,KIND=C_CHAR) :: c_group_name + TYPE(C_PTR) :: ptr + + c_group_name = TRIM(group_name)//C_NULL_CHAR + + lapl_id_default = H5P_DEFAULT_F + IF(PRESENT(lapl_id)) lapl_id_default = lapl_id + + ptr = C_LOC(ginfo) + + hdferr = H5Gget_info_by_idx(loc_id, c_group_name, & + INT(idx_type,C_INT), INT(order, C_INT), n, ptr, lapl_id_default ) + +#ifdef H5_DOXYGEN + END SUBROUTINE h5gget_info_by_idx_f +#else + END SUBROUTINE h5gget_info_by_idx_f03 +#endif + +!> +!! \ingroup FH5G +!! +!! \brief Asynchronously retrieves information about a group, according to the group’s position within an index. +!! +!! \param loc_id Location identifier. The identifier may be that of a file, group, dataset, named datatype, or attribute. +!! \param group_name Name of group containing group for which information is to be retrieved. +!! \param idx_type Index type. +!! \param order Order of the count in the index. +!! \param n Position in the index of the group for which information is retrieved. +!! \param ginfo Derived type in which group information is returned. +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param lapl_id Link access property list. +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Gget_info_by_idx_async() +!! + SUBROUTINE h5gget_info_by_idx_async_f(loc_id, group_name, idx_type, order, n, ginfo, es_id, hdferr, & + lapl_id, file, func, line) + + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: loc_id + CHARACTER(LEN=*), INTENT(IN) :: group_name + INTEGER, INTENT(IN) :: idx_type + INTEGER, INTENT(IN) :: order + INTEGER(HSIZE_T), INTENT(IN) :: n + TYPE(H5G_info_t), INTENT(OUT), TARGET :: ginfo + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + INTEGER(HID_T) :: lapl_id_default + CHARACTER(LEN=LEN_TRIM(group_name)+1,KIND=C_CHAR) :: c_group_name + TYPE(C_PTR) :: ptr + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + c_group_name = TRIM(group_name)//C_NULL_CHAR + + lapl_id_default = H5P_DEFAULT_F + IF(PRESENT(lapl_id)) lapl_id_default = lapl_id + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + ptr = C_LOC(ginfo) + + hdferr = H5Gget_info_by_idx_async(file_default, func_default, line_default, loc_id, c_group_name, & + INT(idx_type,C_INT), INT(order, C_INT), n, ptr, lapl_id_default, es_id ) + + END SUBROUTINE h5gget_info_by_idx_async_f + !> !! \ingroup FH5G !! !! \brief Retrieves information about a group, according to the group’s position within an index. !! +!! \attention \fortran_obsolete. Both nlinks and max_corder can overflow. +!! !! \param loc_id File or group identifier. !! \param group_name Name of group containing group for which information is to be retrieved. -!! \param index_type Index type. +!! \param idx_type Index type. !! \param order Order of the count in the index. !! \param n Position in the index of the group for which information is retrieved. !! \param storage_type Type of storage for links in group: @@ -730,71 +1306,164 @@ END SUBROUTINE h5gget_info_f !! !! See C API: @ref H5Gget_info_by_idx() !! - SUBROUTINE h5gget_info_by_idx_f(loc_id, group_name, index_type, order, n, & +#ifdef H5_DOXYGEN + SUBROUTINE h5gget_info_by_idx_f(& +#else + SUBROUTINE h5gget_info_by_idx_f90(& +#endif + loc_id, group_name, idx_type, order, n, & storage_type, nlinks, max_corder, hdferr, lapl_id, mounted) + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: loc_id CHARACTER(LEN=*), INTENT(IN) :: group_name - INTEGER, INTENT(IN) :: index_type + INTEGER, INTENT(IN) :: idx_type INTEGER, INTENT(IN) :: order INTEGER(HSIZE_T), INTENT(IN) :: n - INTEGER, INTENT(OUT) :: storage_type INTEGER, INTENT(OUT) :: nlinks INTEGER, INTENT(OUT) :: max_corder INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id LOGICAL, INTENT(OUT), OPTIONAL :: mounted - INTEGER :: mounted_c - INTEGER(HID_T) :: lapl_id_default - INTEGER(SIZE_T) :: group_name_len ! length of group name - INTERFACE - INTEGER FUNCTION h5gget_info_by_idx_c(loc_id, group_name, group_name_len, index_type, order, n, lapl_id_default, & - storage_type, nlinks, max_corder, mounted_c) BIND(C,NAME='h5gget_info_by_idx_c') - IMPORT :: C_CHAR - IMPORT :: HID_T, SIZE_T, HSIZE_T - INTEGER(HID_T), INTENT(IN) :: loc_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: group_name - INTEGER, INTENT(IN) :: index_type - INTEGER, INTENT(IN) :: order - INTEGER(HSIZE_T), INTENT(IN) :: n - INTEGER(HID_T) :: lapl_id_default - INTEGER, INTENT(OUT) :: storage_type - INTEGER, INTENT(OUT) :: nlinks - INTEGER, INTENT(OUT) :: max_corder - - INTEGER(SIZE_T) :: group_name_len - INTEGER :: mounted_c - - END FUNCTION h5gget_info_by_idx_c - END INTERFACE + INTEGER(HID_T) :: lapl_id_default + CHARACTER(LEN=LEN_TRIM(group_name)+1,KIND=C_CHAR) :: c_group_name + TYPE(H5G_info_t), TARGET :: ginfo + TYPE(C_PTR) :: ptr - group_name_len = LEN(group_name) + c_group_name = TRIM(group_name)//C_NULL_CHAR + ptr = C_LOC(ginfo) lapl_id_default = H5P_DEFAULT_F IF(PRESENT(lapl_id)) lapl_id_default = lapl_id - hdferr = h5gget_info_by_idx_c(loc_id, group_name, group_name_len, & - index_type, order, n, lapl_id_default, & - storage_type, nlinks, max_corder, mounted_c) + + hdferr = H5Gget_info_by_idx(loc_id, c_group_name, & + INT(idx_type,C_INT), INT(order, C_INT), n, ptr, lapl_id_default ) + + storage_type = INT(ginfo%storage_type) + nlinks = INT(ginfo%nlinks) + max_corder = INT(ginfo%max_corder) IF(PRESENT(mounted))THEN - IF(mounted_c.EQ.0) THEN - mounted = .FALSE. - ELSE + IF(ginfo%mounted) THEN mounted = .TRUE. + ELSE + mounted = .FALSE. ENDIF ENDIF - +#ifdef H5_DOXYGEN END SUBROUTINE h5gget_info_by_idx_f +#else + END SUBROUTINE h5gget_info_by_idx_f90 +#endif !> !! \ingroup FH5G !! -!! \brief Retrieves information about a group. +!! \brief Retrieves information about a group by its name. +!! +!! \attention \fortran_approved +!! +!! \param loc_id File or group identifier. +!! \param name Name of group containing group for which information is to be retrieved. +!! \param ginfo Derived type in which group information is returned. +!! \param hdferr \fortran_error +!! \param lapl_id Link access property list. +!! +!! See C API: @ref H5Gget_info_by_name() +!! +#ifdef H5_DOXYGEN + SUBROUTINE h5gget_info_by_name_f( & +#else + SUBROUTINE h5gget_info_by_name_f03( & +#endif + loc_id, name, ginfo, hdferr, lapl_id) + + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: loc_id + CHARACTER(LEN=*), INTENT(IN) :: name + TYPE(H5G_info_t), INTENT(OUT), TARGET :: ginfo + INTEGER, INTENT(OUT) :: hdferr + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id + + INTEGER(HID_T) :: lapl_id_default + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name + TYPE(C_PTR) :: ptr + + c_name = TRIM(name)//C_NULL_CHAR + ptr = C_LOC(ginfo) + + lapl_id_default = H5P_DEFAULT_F + IF(PRESENT(lapl_id)) lapl_id_default = lapl_id + + hdferr = INT(h5gget_info_by_name(loc_id, c_name, ptr, lapl_id_default)) + +#ifdef H5_DOXYGEN + END SUBROUTINE h5gget_info_by_name_f +#else + END SUBROUTINE h5gget_info_by_name_f03 +#endif + +!> +!! \ingroup FH5G +!! +!! \brief Asynchronously retrieves information about a group by its name. +!! +!! \param loc_id File or group identifier. +!! \param name Name of group containing group for which information is to be retrieved. +!! \param ginfo Derived type in which group information is returned. +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param lapl_id Link access property list. +!! +!! See C API: @ref H5Gget_info_by_name_async() +!! + SUBROUTINE h5gget_info_by_name_async_f(loc_id, name, ginfo, es_id, hdferr, & + lapl_id, file, func, line) + + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: loc_id + CHARACTER(LEN=*), INTENT(IN) :: name + TYPE(H5G_info_t), INTENT(OUT), TARGET :: ginfo + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + INTEGER(HID_T) :: lapl_id_default + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name + TYPE(C_PTR) :: ptr + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + c_name = TRIM(name)//C_NULL_CHAR + ptr = C_LOC(ginfo) + + lapl_id_default = H5P_DEFAULT_F + IF(PRESENT(lapl_id)) lapl_id_default = lapl_id + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + hdferr = INT(h5gget_info_by_name_async(file_default, func_default, line_default, & + loc_id, c_name, ptr, lapl_id_default, es_id)) + + END SUBROUTINE h5gget_info_by_name_async_f + +!> +!! \ingroup FH5G +!! +!! \brief Retrieves information about a group by its name. +!! +!! \attention \fortran_obsolete. Both nlinks and max_corder can overflow. !! !! \param loc_id File or group identifier. -!! \param group_name Name of group containing group for which information is to be retrieved. +!! \param name Name of group containing group for which information is to be retrieved. !! \param storage_type Type of storage for links in group: !! \li H5G_STORAGE_TYPE_COMPACT_F: Compact storage !! \li H5G_STORAGE_TYPE_DENSE_F: Indexed storage @@ -807,56 +1476,53 @@ END SUBROUTINE h5gget_info_by_idx_f !! !! See C API: @ref H5Gget_info_by_name() !! - SUBROUTINE h5gget_info_by_name_f(loc_id, group_name, & - storage_type, nlinks, max_corder, hdferr, lapl_id, mounted) +#ifdef H5_DOXYGEN + SUBROUTINE h5gget_info_by_name_f( & +#else + SUBROUTINE h5gget_info_by_name_f90( & +#endif + loc_id, name, storage_type, nlinks, max_corder, hdferr, lapl_id, mounted) + IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: loc_id - CHARACTER(LEN=*), INTENT(IN) :: group_name + CHARACTER(LEN=*), INTENT(IN) :: name INTEGER, INTENT(OUT) :: storage_type INTEGER, INTENT(OUT) :: nlinks INTEGER, INTENT(OUT) :: max_corder INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id LOGICAL, INTENT(OUT), OPTIONAL :: mounted - INTEGER :: mounted_c - INTEGER(HID_T) :: lapl_id_default - INTEGER(SIZE_T) :: group_name_len ! length of group name - - INTERFACE - INTEGER FUNCTION h5gget_info_by_name_c(loc_id, group_name, group_name_len, lapl_id_default, & - storage_type, nlinks, max_corder, mounted_c) BIND(C,NAME='h5gget_info_by_name_c') - IMPORT :: C_CHAR - IMPORT :: HID_T, SIZE_T - INTEGER(HID_T), INTENT(IN) :: loc_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: group_name - INTEGER(HID_T), INTENT(IN) :: lapl_id_default - INTEGER, INTENT(OUT) :: storage_type - INTEGER, INTENT(OUT) :: nlinks - INTEGER, INTENT(OUT) :: max_corder - - INTEGER(SIZE_T) :: group_name_len - INTEGER :: mounted_c - END FUNCTION h5gget_info_by_name_c - END INTERFACE + INTEGER(HID_T) :: lapl_id_default + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name + TYPE(H5G_info_t), TARGET :: ginfo + TYPE(C_PTR) :: ptr - group_name_len = LEN(group_name) + c_name = TRIM(name)//C_NULL_CHAR + ptr = C_LOC(ginfo) lapl_id_default = H5P_DEFAULT_F IF(PRESENT(lapl_id)) lapl_id_default = lapl_id - hdferr = h5gget_info_by_name_c(loc_id, group_name, group_name_len, lapl_id_default, & - storage_type, nlinks, max_corder, mounted_c) + hdferr = INT(H5Gget_info_by_name(loc_id, c_name, ptr, lapl_id_default)) + + storage_type = INT(ginfo%storage_type) + nlinks = INT(ginfo%nlinks) + max_corder = INT(ginfo%max_corder) IF(PRESENT(mounted))THEN - IF(mounted_c.EQ.0) THEN - mounted = .FALSE. - ELSE + IF(ginfo%mounted) THEN mounted = .TRUE. + ELSE + mounted = .FALSE. ENDIF ENDIF +#ifdef H5_DOXYGEN END SUBROUTINE h5gget_info_by_name_f +#else + END SUBROUTINE h5gget_info_by_name_f90 +#endif END MODULE H5G diff --git a/fortran/src/H5Lf.c b/fortran/src/H5Lf.c index 6951feff962..63bed998fd4 100644 --- a/fortran/src/H5Lf.c +++ b/fortran/src/H5Lf.c @@ -144,277 +144,6 @@ h5lcreate_external_c(_fcd file_name, size_t_f *file_namelen, _fcd obj_name, size return ret_value; } -/****if* H5Lf/h5ldelete_c - * NAME - * h5ldelete_c - * PURPOSE - * Call H5Ldelete - * INPUTS - * - * - * loc_id - Identifier of the file or group containing the object - * name - Name of the link to delete - * lapl_id - Link access property list identifier - * namelen - length of name - * - * RETURNS - * 0 on success, -1 on failure - * AUTHOR - * M. Scot Breitenfeld - * January, 2008 - * SOURCE - */ - -int_f -h5ldelete_c(hid_t_f *loc_id, _fcd name, size_t_f *namelen, hid_t_f *lapl_id) -/******/ -{ - char *c_name = NULL; - int_f ret_value = 0; - - /* - * Convert FORTRAN name to C name - */ - if ((c_name = HD5f2cstring(name, (size_t)*namelen)) == NULL) - HGOTO_DONE(FAIL); - - /* - * Call H5Ldelete function. - */ - if (H5Ldelete((hid_t)*loc_id, c_name, (hid_t)*lapl_id) < 0) - HGOTO_DONE(FAIL); - -done: - if (c_name) - HDfree(c_name); - - return ret_value; -} - -/****if* H5Lf/h5lcreate_soft_c - * NAME - * h5lcreate_soft_c - * PURPOSE - * Call H5Lcreate_soft - * INPUTS - * - * - * target_path - Path to the target object, which is not required to exist. - * link_loc_id - The file or group identifier for the new link. - * link_name - The name of the new link. - * lcpl_id - Link creation property list identifier. - * lapl_id - Link access property list identifier. - * - * RETURNS - * 0 on success, -1 on failure - * AUTHOR - * M. Scot Breitenfeld - * February 20, 2008 - * SOURCE - */ - -int_f -h5lcreate_soft_c(_fcd target_path, size_t_f *target_path_len, hid_t_f *link_loc_id, _fcd link_name, - size_t_f *link_name_len, hid_t_f *lcpl_id, hid_t_f *lapl_id) -/******/ -{ - char *c_target_path = NULL; - char *c_link_name = NULL; - int_f ret_value = 0; - - /* - * Convert FORTRAN name to C name - */ - if ((c_target_path = HD5f2cstring(target_path, (size_t)*target_path_len)) == NULL) - HGOTO_DONE(FAIL); - if ((c_link_name = HD5f2cstring(link_name, (size_t)*link_name_len)) == NULL) - HGOTO_DONE(FAIL); - - /* - * Call H5Adelete function. - */ - if (H5Lcreate_soft(c_target_path, (hid_t)*link_loc_id, c_link_name, (hid_t)*lcpl_id, (hid_t)*lapl_id) < 0) - HGOTO_DONE(FAIL); - -done: - if (c_target_path) - HDfree(c_target_path); - if (c_link_name) - HDfree(c_link_name); - - return ret_value; -} - -/****if* H5Lf/h5lcreate_hard_c - * NAME - * h5lcreate_hard_c - * PURPOSE - * Call H5Lcreate_hard - * INPUTS - * - * obj_loc_id - The file or group identifier for the target object. - * obj_name - Name of the target object, which must already exist. - * obj_namelen - Name length - * link_loc_id - The file or group identifier for the new link. - * link_name - The name of the new link. - * link_namelen- Name length - * lcpl_id - Link creation property list identifier. - * lapl_id - Link access property list identifier. - * - * RETURNS - * 0 on success, -1 on failure - * AUTHOR - * M. Scot Breitenfeld - * February 27, 2008 - * SOURCE - */ -int_f -h5lcreate_hard_c(hid_t_f *obj_loc_id, _fcd obj_name, size_t_f *obj_namelen, hid_t_f *link_loc_id, - _fcd link_name, size_t_f *link_namelen, hid_t_f *lcpl_id, hid_t_f *lapl_id) -/******/ -{ - char *c_obj_name = NULL; - char *c_link_name = NULL; - int_f ret_value = 0; - - /* - * Convert FORTRAN name to C name - */ - if ((c_obj_name = HD5f2cstring(obj_name, (size_t)*obj_namelen)) == NULL) - HGOTO_DONE(FAIL); - if ((c_link_name = HD5f2cstring(link_name, (size_t)*link_namelen)) == NULL) - HGOTO_DONE(FAIL); - - /* - * Call H5Lcreate_hard function. - */ - if (H5Lcreate_hard((hid_t)*obj_loc_id, c_obj_name, (hid_t)*link_loc_id, c_link_name, (hid_t)*lcpl_id, - (hid_t)*lapl_id) < 0) - HGOTO_DONE(FAIL); - -done: - if (c_obj_name) - HDfree(c_obj_name); - if (c_link_name) - HDfree(c_link_name); - - return ret_value; -} - -/****if* H5Lf/h5ldelete_by_idx_c - * NAME - * h5ldelete_by_idx_c - * PURPOSE - * Calls h5ldelete_by_idx - * INPUTS - * - * loc_id - File or group identifier specifying location of subject group - * group_name - Name of subject group - * group_namelen - Name length - * index_field - Type of index; Possible values are: - * H5_INDEX_UNKNOWN_F = -1 - Unknown index type - * H5_INDEX_NAME_F - Index on names - * H5_INDEX_CRT_ORDER_F - Index on creation order - * H5_INDEX_N_F - Number of indices defined - * order - Order within field or index; Possible values are: - * H5_ITER_UNKNOWN_F - Unknown order - * H5_ITER_INC_F - Increasing order - * H5_ITER_DEC_F - Decreasing order - * H5_ITER_NATIVE_F - No particular order, whatever is fastest - * H5_ITER_N_F - Number of iteration orders - * n - Link for which to retrieve information - * lapl_id - Link access property list - * - * OUTPUTS - * N/A - * RETURNS - * 0 on success, -1 on failure - * AUTHOR - * M. Scot Breitenfeld - * February 29, 2008 - * HISTORY - * N/A - * SOURCE - */ -int_f -h5ldelete_by_idx_c(hid_t_f *loc_id, _fcd group_name, size_t_f *group_namelen, int_f *index_field, - int_f *order, hsize_t_f *n, hid_t_f *lapl_id) -/******/ -{ - char *c_group_name = NULL; /* Buffer to hold C string */ - H5_index_t c_index_field; - H5_iter_order_t c_order; - int_f ret_value = 0; /* Return value */ - - /* - * Convert FORTRAN name to C name - */ - if ((c_group_name = HD5f2cstring(group_name, (size_t)*group_namelen)) == NULL) - HGOTO_DONE(FAIL); - - c_index_field = (H5_index_t)*index_field; - c_order = (H5_iter_order_t)*order; - - /* - * Call H5Ldelete_by_name function. - */ - if (H5Ldelete_by_idx((hid_t)*loc_id, c_group_name, c_index_field, c_order, (hsize_t)*n, (hid_t)*lapl_id) < - 0) - HGOTO_DONE(FAIL); - -done: - if (c_group_name) - HDfree(c_group_name); - return ret_value; -} - -/****if* H5Lf/h5lexists_c - * NAME - * h5lexists_c - * PURPOSE - * Calls H5Lexists - * INPUTS - * - * loc_id - Identifier of the file or group to query. - * name - Link name to check - * lapl_id - Link access property list identifier. - * OUTPUTS - * - * link_exists_c - returns a positive value, for TRUE, or 0 (zero), for FALSE. - * RETURNS - * 0 on success, -1 on failure - * AUTHOR - * M. Scot Breitenfeld - * February 29, 2008 - * HISTORY - * - * SOURCE - */ -int_f -h5lexists_c(hid_t_f *loc_id, _fcd name, size_t_f *namelen, hid_t_f *lapl_id, int_f *link_exists) -/******/ -{ - char *c_name = NULL; /* Buffer to hold C string */ - int_f ret_value = 0; /* Return value */ - - /* - * Convert FORTRAN name to C name - */ - if ((c_name = HD5f2cstring(name, (size_t)*namelen)) == NULL) - HGOTO_DONE(FAIL); - - /* - * Call H5Lexists function. - */ - if ((*link_exists = (int_f)H5Lexists((hid_t)*loc_id, c_name, (hid_t)*lapl_id)) < 0) - HGOTO_DONE(FAIL); - -done: - if (c_name) - HDfree(c_name); - return ret_value; -} - /****if* H5Lf/h5lget_info_c * NAME * h5lget_info_c @@ -953,55 +682,6 @@ h5lget_val_c(hid_t_f *link_loc_id, _fcd link_name, size_t_f *link_namelen, size_ return ret_value; } -/****if* H5Lf/h5literate_c - * NAME - * h5literate_c - * PURPOSE - * Calls H5Literate - * INPUTS - * - * group_id - Identifier specifying subject group - * index_type - Type of index which determines the order - * order - Order within index - * idx - Iteration position at which to start - * op - Callback function passing data regarding the link to the calling application - * op_data - User-defined pointer to data required by the application for its processing of the link - * - * OUTPUTS - * - * idx - Position at which an interrupted iteration may be restarted - * - * RETURNS - * >0 on success, 0< on failure - * AUTHOR - * M. Scot Breitenfeld - * July 8, 2008 - * SOURCE - */ -int_f -h5literate_c(hid_t_f *group_id, int_f *index_type, int_f *order, hsize_t_f *idx, H5L_iterate2_t op, - void *op_data) -/******/ -{ - int_f ret_value = -1; /* Return value */ - herr_t func_ret_value; /* H5Linterate return value */ - hsize_t idx_c = 0; - - idx_c = (hsize_t)*idx; - - /* - * Call H5Linterate - */ - - func_ret_value = - H5Literate2((hid_t)*group_id, (H5_index_t)*index_type, (H5_iter_order_t)*order, &idx_c, op, op_data); - - ret_value = (int_f)func_ret_value; - *idx = (hsize_t_f)idx_c; - - return ret_value; -} - /****if* H5Lf/h5literate_by_name_c * NAME * h5literate_by_name_c diff --git a/fortran/src/H5Lff.F90 b/fortran/src/H5Lff.F90 index 8171c1b4dd6..9111144657b 100644 --- a/fortran/src/H5Lff.F90 +++ b/fortran/src/H5Lff.F90 @@ -42,13 +42,14 @@ MODULE H5L IMPLICIT NONE + TYPE, bind(c) :: union_t TYPE(H5O_TOKEN_T_F) :: token !< Type for object tokens INTEGER(size_t) :: val_size !< Size of a soft link or user-defined link value END TYPE union_t ! -! @brief Fortran2003 Derived Type for h5l_info_t +! @brief Fortran2003 Derived Type for @ref H5L_info_t ! TYPE, bind(c) :: h5l_info_t INTEGER(c_int) :: type !< Specifies the link class. Valid values include the following: @@ -90,8 +91,8 @@ SUBROUTINE h5lcopy_f(src_loc_id, src_name, dest_loc_id, dest_name, hdferr, & CHARACTER(LEN=*), INTENT(IN) :: dest_name INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lcpl_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lcpl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id INTEGER(HID_T) :: lcpl_id_default INTEGER(HID_T) :: lapl_id_default @@ -147,31 +148,93 @@ SUBROUTINE h5ldelete_f(loc_id, name, hdferr, lapl_id) INTEGER(HID_T), INTENT(IN) :: loc_id CHARACTER(LEN=*), INTENT(IN) :: name INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id + INTEGER(HID_T) :: lapl_id_default - INTEGER(SIZE_T) :: namelen + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name INTERFACE - INTEGER FUNCTION h5ldelete_c(loc_id, name, namelen, lapl_id_default) BIND(C,name='h5ldelete_c') - IMPORT :: c_char + INTEGER(C_INT) FUNCTION H5Ldelete(loc_id, name, lapl_id_default) BIND(C,name='H5Ldelete') + IMPORT :: C_CHAR, C_INT, C_PTR IMPORT :: HID_T, SIZE_T IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: loc_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: name - INTEGER(HID_T) :: lapl_id_default - INTEGER(SIZE_T) :: namelen - END FUNCTION h5ldelete_c + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + INTEGER(HID_T), VALUE :: lapl_id_default + END FUNCTION h5ldelete END INTERFACE - namelen = LEN(name) + c_name = TRIM(name)//C_NULL_CHAR lapl_id_default = H5P_DEFAULT_F IF(PRESENT(lapl_id)) lapl_id_default = lapl_id - hdferr = h5ldelete_c(loc_id, name, namelen, lapl_id_default) + hdferr = INT(H5Ldelete(loc_id, c_name, lapl_id_default)) END SUBROUTINE h5ldelete_f +!> +!! \ingroup FH5L +!! +!! \brief Asynchronously removes a link from a group. +!! +!! \param loc_id Identifier of the file or group containing the object. +!! \param name Name of the link to delete. +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param lapl_id Link access property list identifier. +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Ldelete_async() +!! + SUBROUTINE h5ldelete_async_f(loc_id, name, es_id, hdferr, lapl_id, file, func, line) + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: loc_id + CHARACTER(LEN=*), INTENT(IN) :: name + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + INTEGER(HID_T) :: lapl_id_default + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER(C_INT) FUNCTION H5Ldelete_async(file, func, line, loc_id, name, lapl_id_default, es_id) & + BIND(C,name='H5Ldelete_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T, SIZE_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + INTEGER(HID_T), VALUE :: lapl_id_default + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Ldelete_async + END INTERFACE + + c_name = TRIM(name)//C_NULL_CHAR + + lapl_id_default = H5P_DEFAULT_F + IF(PRESENT(lapl_id)) lapl_id_default = lapl_id + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + hdferr = INT(H5Ldelete_async(file_default, func_default, line_default, & + loc_id, c_name, lapl_id_default, es_id)) + + END SUBROUTINE h5ldelete_async_f + !> !! \ingroup FH5L !! @@ -192,49 +255,118 @@ SUBROUTINE h5lcreate_soft_f(target_path, link_loc_id, link_name, hdferr, lcpl_id INTEGER(HID_T), INTENT(IN) :: link_loc_id CHARACTER(LEN=*), INTENT(IN) :: link_name INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lcpl_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lcpl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id + INTEGER(HID_T) :: lcpl_id_default INTEGER(HID_T) :: lapl_id_default - INTEGER(SIZE_T) :: target_path_len - INTEGER(SIZE_T) :: link_name_len + CHARACTER(LEN=LEN_TRIM(target_path) +1,KIND=C_CHAR) :: c_target_path + CHARACTER(LEN=LEN_TRIM(link_name)+1,KIND=C_CHAR) :: c_link_name INTERFACE - INTEGER FUNCTION h5lcreate_soft_c(target_path, target_path_len, & - link_loc_id, & - link_name,link_name_len, & - lcpl_id_default, lapl_id_default ) BIND(C,NAME='h5lcreate_soft_c') - IMPORT :: c_char - IMPORT :: HID_T, SIZE_T + INTEGER(C_INT) FUNCTION H5Lcreate_soft(target_path, link_loc_id, link_name, & + lcpl_id_default, lapl_id_default ) BIND(C,NAME='H5Lcreate_soft') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T IMPLICIT NONE - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: target_path - INTEGER(SIZE_T) :: target_path_len - INTEGER(HID_T), INTENT(IN) :: link_loc_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: link_name - INTEGER(SIZE_T) :: link_name_len - INTEGER(HID_T) :: lcpl_id_default - INTEGER(HID_T) :: lapl_id_default - END FUNCTION h5lcreate_soft_c + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: target_path + INTEGER(HID_T), VALUE :: link_loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: link_name + INTEGER(HID_T), VALUE :: lcpl_id_default + INTEGER(HID_T), VALUE :: lapl_id_default + END FUNCTION H5Lcreate_soft END INTERFACE - target_path_len = LEN(target_path) - link_name_len = LEN(link_name) + c_target_path = TRIM(target_path)//C_NULL_CHAR + c_link_name = TRIM(link_name)//C_NULL_CHAR lcpl_id_default = H5P_DEFAULT_F - IF(PRESENT(lcpl_id)) lcpl_id_default = lcpl_id lapl_id_default = H5P_DEFAULT_F + IF(PRESENT(lcpl_id)) lcpl_id_default = lcpl_id IF(PRESENT(lapl_id)) lapl_id_default = lapl_id - hdferr = h5lcreate_soft_c(target_path, target_path_len,& - link_loc_id, & - link_name, link_name_len, & - lcpl_id_default, lapl_id_default ) + hdferr = INT(H5Lcreate_soft(c_target_path, link_loc_id, c_link_name, & + lcpl_id_default, lapl_id_default)) END SUBROUTINE h5lcreate_soft_f !> !! \ingroup FH5L !! +!! \brief Asynchronously creates a soft link to an object. +!! +!! \param target_path Path to the target object, which is not required to exist. +!! \param link_loc_id The file or group identifier for the new link. +!! \param link_name The name of the new link. +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param lcpl_id Link creation property list identifier. +!! \param lapl_id Link access property list identifier. +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Lcreate_soft_async() +!! + SUBROUTINE h5lcreate_soft_async_f(target_path, link_loc_id, link_name, es_id, hdferr,& + lcpl_id, lapl_id, file, func, line) + IMPLICIT NONE + CHARACTER(LEN=*), INTENT(IN) :: target_path + INTEGER(HID_T), INTENT(IN) :: link_loc_id + CHARACTER(LEN=*), INTENT(IN) :: link_name + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lcpl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + INTEGER(HID_T) :: lcpl_id_default + INTEGER(HID_T) :: lapl_id_default + CHARACTER(LEN=LEN_TRIM(target_path) +1,KIND=C_CHAR) :: c_target_path + CHARACTER(LEN=LEN_TRIM(link_name)+1,KIND=C_CHAR) :: c_link_name + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER(C_INT) FUNCTION H5Lcreate_soft_async(file, func, line, target_path, link_loc_id, link_name, & + lcpl_id_default, lapl_id_default, es_id) BIND(C,NAME='H5Lcreate_soft_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: target_path + INTEGER(HID_T), VALUE :: link_loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: link_name + INTEGER(HID_T), VALUE :: lcpl_id_default + INTEGER(HID_T), VALUE :: lapl_id_default + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Lcreate_soft_async + END INTERFACE + + c_target_path = TRIM(target_path)//C_NULL_CHAR + c_link_name = TRIM(link_name)//C_NULL_CHAR + + lcpl_id_default = H5P_DEFAULT_F + lapl_id_default = H5P_DEFAULT_F + IF(PRESENT(lcpl_id)) lcpl_id_default = lcpl_id + IF(PRESENT(lapl_id)) lapl_id_default = lapl_id + + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + hdferr = INT(H5Lcreate_soft_async(file_default, func_default, line_default,& + c_target_path, link_loc_id, c_link_name, lcpl_id_default, lapl_id_default, es_id)) + + END SUBROUTINE h5lcreate_soft_async_f +!> +!! \ingroup FH5L +!! !! \brief Creates a hard link to an object. !! !! \param obj_loc_id The file or group identifier for the target object. @@ -247,51 +379,124 @@ END SUBROUTINE h5lcreate_soft_f !! !! See C API: @ref H5Lcreate_hard() !! - SUBROUTINE h5lcreate_hard_f(obj_loc_id, obj_name, link_loc_id, link_name, hdferr, lcpl_id, lapl_id) + SUBROUTINE h5lcreate_hard_f(obj_loc_id, obj_name, link_loc_id, link_name, hdferr, & + lcpl_id, lapl_id) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: obj_loc_id CHARACTER(LEN=*), INTENT(IN) :: obj_name INTEGER(HID_T), INTENT(IN) :: link_loc_id CHARACTER(LEN=*), INTENT(IN) :: link_name - INTEGER, INTENT(OUT) :: hdferr + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lcpl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lcpl_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lapl_id INTEGER(HID_T) :: lcpl_id_default INTEGER(HID_T) :: lapl_id_default + CHARACTER(LEN=LEN_TRIM(obj_name) +1,KIND=C_CHAR) :: c_obj_name + CHARACTER(LEN=LEN_TRIM(link_name)+1,KIND=C_CHAR) :: c_link_name + INTERFACE + INTEGER(C_INT) FUNCTION H5Lcreate_hard(obj_loc_id, obj_name, & + link_loc_id, link_name, lcpl_id_default, lapl_id_default) BIND(C,NAME='H5Lcreate_hard') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + INTEGER(HID_T), VALUE :: obj_loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: obj_name + INTEGER(HID_T), VALUE :: link_loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: link_name + INTEGER(HID_T), VALUE :: lcpl_id_default + INTEGER(HID_T), VALUE :: lapl_id_default + END FUNCTION H5Lcreate_hard + END INTERFACE - INTEGER(SIZE_T) :: obj_namelen - INTEGER(SIZE_T) :: link_namelen + c_obj_name = TRIM(obj_name)//C_NULL_CHAR + c_link_name = TRIM(link_name)//C_NULL_CHAR + + lcpl_id_default = H5P_DEFAULT_F + lapl_id_default = H5P_DEFAULT_F + IF(PRESENT(lcpl_id)) lcpl_id_default = lcpl_id + IF(PRESENT(lapl_id)) lapl_id_default = lapl_id + + hdferr = INT(H5Lcreate_hard(obj_loc_id, c_obj_name, & + link_loc_id, c_link_name, lcpl_id_default, lapl_id_default)) + + END SUBROUTINE h5lcreate_hard_f +!> +!! \ingroup FH5L +!! +!! \brief Asynchronously creates a hard link to an object. +!! +!! \param obj_loc_id The file or group identifier for the target object. +!! \param obj_name Name of the target object, which must already exist. +!! \param link_loc_id The file or group identifier for the new link. +!! \param link_name The name of the new link. +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param lcpl_id Link creation property list identifier. +!! \param lapl_id Link access property list identifier. +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Lcreate_hard_async() +!! + SUBROUTINE h5lcreate_hard_async_f(obj_loc_id, obj_name, link_loc_id, link_name, es_id, hdferr, & + lcpl_id, lapl_id, file, func, line) + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: obj_loc_id + CHARACTER(LEN=*), INTENT(IN) :: obj_name + INTEGER(HID_T), INTENT(IN) :: link_loc_id + CHARACTER(LEN=*), INTENT(IN) :: link_name + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lcpl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + INTEGER(HID_T) :: lcpl_id_default + INTEGER(HID_T) :: lapl_id_default + CHARACTER(LEN=LEN_TRIM(obj_name) +1,KIND=C_CHAR) :: c_obj_name + CHARACTER(LEN=LEN_TRIM(link_name)+1,KIND=C_CHAR) :: c_link_name + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 INTERFACE - INTEGER FUNCTION h5lcreate_hard_c(obj_loc_id, obj_name, obj_namelen, & - link_loc_id, link_name, link_namelen, lcpl_id_default, lapl_id_default) BIND(C,NAME='h5lcreate_hard_c') - IMPORT :: c_char - IMPORT :: HID_T, SIZE_T + INTEGER(C_INT) FUNCTION H5Lcreate_hard_async(file, func, line, obj_loc_id, obj_name, & + link_loc_id, link_name, lcpl_id_default, lapl_id_default, es_id) BIND(C,NAME='H5Lcreate_hard_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: obj_loc_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: obj_name - INTEGER(HID_T), INTENT(IN) :: link_loc_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: link_name - INTEGER(SIZE_T) :: obj_namelen - INTEGER(SIZE_T) :: link_namelen - INTEGER(HID_T) :: lcpl_id_default - INTEGER(HID_T) :: lapl_id_default - END FUNCTION h5lcreate_hard_c + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: obj_loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: obj_name + INTEGER(HID_T), VALUE :: link_loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: link_name + INTEGER(HID_T), VALUE :: lcpl_id_default + INTEGER(HID_T), VALUE :: lapl_id_default + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Lcreate_hard_async END INTERFACE - obj_namelen = LEN(obj_name) - link_namelen = LEN(link_name) + + c_obj_name = TRIM(obj_name)//C_NULL_CHAR + c_link_name = TRIM(link_name)//C_NULL_CHAR lcpl_id_default = H5P_DEFAULT_F - IF(PRESENT(lcpl_id)) lcpl_id_default = lcpl_id lapl_id_default = H5P_DEFAULT_F + IF(PRESENT(lcpl_id)) lcpl_id_default = lcpl_id IF(PRESENT(lapl_id)) lapl_id_default = lapl_id + IF (PRESENT(file)) file_default = file + IF (PRESENT(func)) func_default = func + IF (PRESENT(line)) line_default = INT(line, C_INT) - hdferr = h5lcreate_hard_c(obj_loc_id, obj_name, obj_namelen, & - link_loc_id, link_name, link_namelen, lcpl_id_default, lapl_id_default) + hdferr = INT(H5Lcreate_hard_async(file_default, func_default, line_default, obj_loc_id, c_obj_name, & + link_loc_id, c_link_name, lcpl_id_default, lapl_id_default, es_id)) - END SUBROUTINE h5lcreate_hard_f + END SUBROUTINE h5lcreate_hard_async_f !> !! \ingroup FH5L @@ -318,8 +523,8 @@ SUBROUTINE h5lcreate_external_f(file_name, obj_name, link_loc_id, link_name, hdf INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lcpl_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lcpl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id INTEGER(HID_T) :: lcpl_id_default INTEGER(HID_T) :: lapl_id_default @@ -391,34 +596,116 @@ SUBROUTINE h5ldelete_by_idx_f(loc_id, group_name, index_field, order, n, hdferr, INTEGER, INTENT(IN) :: order INTEGER(HSIZE_T), INTENT(IN) :: n INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id + INTEGER(HID_T) :: lapl_id_default - INTEGER(SIZE_T) :: group_namelen + CHARACTER(LEN=LEN_TRIM(group_name)+1,KIND=C_CHAR) :: c_group_name INTERFACE - INTEGER FUNCTION h5ldelete_by_idx_c(loc_id, group_name, group_namelen, index_field, order, n, lapl_id_default) & - BIND(C,NAME='h5ldelete_by_idx_c') - IMPORT :: c_char - IMPORT :: HID_T, SIZE_T, HSIZE_T + INTEGER(C_INT) FUNCTION H5Ldelete_by_idx(loc_id, group_name, index_field, order, n, lapl_id_default) & + BIND(C,NAME='H5Ldelete_by_idx') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T, HSIZE_T IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: loc_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: group_name - INTEGER, INTENT(IN) :: index_field - INTEGER, INTENT(IN) :: order - INTEGER(HSIZE_T), INTENT(IN) :: n - INTEGER(HID_T) :: lapl_id_default - INTEGER(SIZE_T) :: group_namelen - END FUNCTION h5ldelete_by_idx_c + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: group_name + INTEGER(C_INT), VALUE :: index_field + INTEGER(C_INT), VALUE :: order + INTEGER(HSIZE_T), VALUE :: n + INTEGER(HID_T), VALUE :: lapl_id_default + END FUNCTION H5Ldelete_by_idx END INTERFACE + c_group_name = TRIM(group_name)//C_NULL_CHAR + lapl_id_default = H5P_DEFAULT_F IF(PRESENT(lapl_id)) lapl_id_default = lapl_id - group_namelen = LEN(group_name) - hdferr = h5ldelete_by_idx_c(loc_id, group_name, group_namelen, index_field, order, n, lapl_id_default) + hdferr = INT(H5Ldelete_by_idx(loc_id, c_group_name, INT(index_field,C_INT), INT(order, C_INT), n, lapl_id_default)) END SUBROUTINE h5ldelete_by_idx_f +!> +!! \ingroup FH5L +!! +!! \brief Asynchronously removes the nth link in a group. +!! +!! \param loc_id File or group identifier specifying location of subject group. +!! \param group_name Name of subject group. +!! \param index_field Type of index; Possible values are: +!! \li H5_INDEX_UNKNOWN_F = -1 - Unknown index type +!! \li H5_INDEX_NAME_F - Index on names +!! \li H5_INDEX_CRT_ORDER_F - Index on creation order +!! \li H5_INDEX_N_F - Number of indices defined +!! \param order Order within field or index; Possible values are: +!! \li H5_ITER_UNKNOWN_F - Unknown order +!! \li H5_ITER_INC_F - Increasing order +!! \li H5_ITER_DEC_F - Decreasing order +!! \li H5_ITER_NATIVE_F - No particular order, whatever is fastest +!! \li H5_ITER_N_F - Number of iteration orders +!! \param n Link for which to retrieve information. +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param lapl_id Link access property list. +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Ldelete_by_idx_async() +!! + SUBROUTINE h5ldelete_by_idx_async_f(loc_id, group_name, index_field, order, n, es_id, hdferr, & + lapl_id, file, func, line) + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: loc_id + CHARACTER(LEN=*), INTENT(IN) :: group_name + INTEGER, INTENT(IN) :: index_field + INTEGER, INTENT(IN) :: order + INTEGER(HSIZE_T), INTENT(IN) :: n + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + INTEGER(HID_T) :: lapl_id_default + CHARACTER(LEN=LEN_TRIM(group_name)+1,KIND=C_CHAR) :: c_group_name + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER(C_INT) FUNCTION H5Ldelete_by_idx_async(file, func, line, loc_id, group_name, index_field, order, n, & + lapl_id_default, es_id) BIND(C,NAME='H5Ldelete_by_idx_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T, HSIZE_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: group_name + INTEGER(C_INT), VALUE :: index_field + INTEGER(C_INT), VALUE :: order + INTEGER(HSIZE_T), VALUE :: n + INTEGER(HID_T), VALUE :: lapl_id_default + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Ldelete_by_idx_async + END INTERFACE + + c_group_name = TRIM(group_name)//C_NULL_CHAR + + lapl_id_default = H5P_DEFAULT_F + IF(PRESENT(lapl_id)) lapl_id_default = lapl_id + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + hdferr = INT(H5Ldelete_by_idx_async(file_default, func_default, line_default, & + loc_id, c_group_name, INT(index_field,C_INT), INT(order, C_INT), n, lapl_id_default, es_id)) + + END SUBROUTINE h5ldelete_by_idx_async_f + !> !! \ingroup FH5L !! @@ -438,38 +725,104 @@ SUBROUTINE h5lexists_f(loc_id, name, link_exists, hdferr, lapl_id) CHARACTER(LEN=*), INTENT(IN) :: name LOGICAL, INTENT(OUT) :: link_exists INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lapl_id - INTEGER :: link_exists_c + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id + + INTEGER(C_INT) :: link_exists_c INTEGER(HID_T) :: lapl_id_default - INTEGER(SIZE_T) :: namelen + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name INTERFACE - INTEGER FUNCTION h5lexists_c(loc_id, name, namelen, lapl_id_default, link_exists_c) & - BIND(C,NAME='h5lexists_c') - IMPORT :: c_char - IMPORT :: HID_T, SIZE_T + INTEGER(C_INT) FUNCTION H5Lexists(loc_id, name, lapl_id_default) BIND(C,NAME='H5Lexists') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: loc_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: name - INTEGER(SIZE_T), INTENT(IN) :: namelen - INTEGER, INTENT(OUT) :: link_exists_c - INTEGER(HID_T) :: lapl_id_default + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + INTEGER(HID_T), VALUE :: lapl_id_default - END FUNCTION h5lexists_c + END FUNCTION H5Lexists END INTERFACE - namelen = LEN(name) + c_name = TRIM(name)//C_NULL_CHAR lapl_id_default = H5P_DEFAULT_F IF(PRESENT(lapl_id)) lapl_id_default = lapl_id - hdferr = h5lexists_c(loc_id, name, namelen, lapl_id_default, link_exists_c) + link_exists_c = H5Lexists(loc_id, c_name, lapl_id_default) link_exists = .FALSE. IF(link_exists_c.GT.0) link_exists = .TRUE. + hdferr = 0 + IF(link_exists_c.LT.0) hdferr = -1 + END SUBROUTINE h5lexists_f +!> +!! \ingroup FH5L +!! +!! \brief Asynchronously checks if a link with a particular name exists in a group. +!! +!! \param loc_id Identifier of the file or group to query. +!! \param name Link name to check. +!! \param link_exists Pointer to Link exists status, must be of type LOGICAL(C_BOOL) and initialize to .FALSE. +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param lapl_id Link access property list identifier. +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Lexists_async() +!! + SUBROUTINE h5lexists_async_f(loc_id, name, link_exists, es_id, hdferr, lapl_id, file, func, line) + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: loc_id + CHARACTER(LEN=*), INTENT(IN) :: name + TYPE(C_PTR) , INTENT(INOUT) :: link_exists + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER, INTENT(OUT) :: hdferr + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + INTEGER(HID_T) :: lapl_id_default + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER(C_INT) FUNCTION H5Lexists_async(file, func, line, & + loc_id, name, exists, lapl_id_default, es_id) BIND(C,NAME='H5Lexists_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + TYPE(C_PTR) , VALUE :: exists + INTEGER(HID_T), VALUE :: lapl_id_default + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Lexists_async + END INTERFACE + + c_name = TRIM(name)//C_NULL_CHAR + + lapl_id_default = H5P_DEFAULT_F + IF(PRESENT(lapl_id)) lapl_id_default = lapl_id + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + hdferr = INT(H5Lexists_async(file_default, func_default, line_default, loc_id, c_name, & + link_exists, lapl_id_default, es_id)) + + END SUBROUTINE h5lexists_async_f + !> !! \ingroup FH5L !! @@ -509,7 +862,7 @@ SUBROUTINE h5lget_info_f(link_loc_id, link_name, & TYPE(H5O_TOKEN_T_F), INTENT(OUT), TARGET :: token INTEGER(SIZE_T), INTENT(OUT) :: val_size INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id INTEGER(SIZE_T) :: link_namelen INTEGER(HID_T) :: lapl_id_default INTEGER :: corder_valid @@ -599,13 +952,11 @@ SUBROUTINE h5lget_info_by_idx_f(loc_id, group_name, index_field, order, n, & TYPE(H5O_TOKEN_T_F), INTENT(OUT), TARGET :: token INTEGER(SIZE_T), INTENT(OUT) :: val_size INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id INTEGER :: corder_valid INTEGER(SIZE_T) :: group_namelen INTEGER(HID_T) :: lapl_id_default -! MS FORTRAN needs explicit interface for C functions called here. -! INTERFACE INTEGER FUNCTION h5lget_info_by_idx_c(loc_id, group_name, group_namelen, index_field, order, n, & link_type, corder_valid, corder, cset, token, val_size, lapl_id_default) & @@ -697,8 +1048,8 @@ SUBROUTINE h5lmove_f(src_loc_id, src_name, dest_loc_id, dest_name, hdferr, lcpl_ INTEGER(HID_T), INTENT(IN) :: dest_loc_id CHARACTER(LEN=*), INTENT(IN) :: dest_name INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lcpl_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lcpl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id INTEGER(SIZE_T) :: src_namelen INTEGER(SIZE_T) :: dest_namelen @@ -733,7 +1084,7 @@ END FUNCTION h5lmove_c src_namelen = LEN(src_name) dest_namelen = LEN(dest_name) - hdferr = H5Lmove_c(src_loc_id, src_name, src_namelen, dest_loc_id, & + hdferr = h5lmove_c(src_loc_id, src_name, src_namelen, dest_loc_id, & dest_name, dest_namelen, lcpl_id_default, lapl_id_default) END SUBROUTINE h5lmove_f @@ -775,9 +1126,9 @@ SUBROUTINE h5lget_name_by_idx_f(loc_id, group_name, index_field, order, n, & CHARACTER(LEN=*), INTENT(OUT) :: name INTEGER, INTENT(OUT) :: hdferr INTEGER(SIZE_T) :: group_namelen - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id INTEGER(HID_T) :: lapl_id_default - INTEGER(SIZE_T), OPTIONAL, INTENT(OUT) :: size + INTEGER(SIZE_T), INTENT(OUT), OPTIONAL :: size INTEGER(SIZE_T) :: size_default INTERFACE @@ -940,54 +1291,54 @@ END SUBROUTINE h5lget_name_by_idx_f !! !! \brief Iterates through links in a group. !! -!! \param group_id Identifier specifying subject group. -!! \param index_type Type of index which determines the order: +!! \param group_id Identifier specifying subject group. +!! \param idx_type Type of index which determines the order: !! \li H5_INDEX_NAME_F - Alphanumeric index on name !! \li H5_INDEX_CRT_ORDER_F - Index on creation order -!! \param order Order within index: +!! \param order Order within index: !! \li H5_ITER_INC_F - Increasing order !! \li H5_ITER_DEC_F - Decreasing order !! \li H5_ITER_NATIVE_F - Fastest available order !! \param idx Iteration position at which to start, or
    !! Position at which an interrupted iteration may be restarted -!! \param op Callback function passing data regarding the link to the calling application. -!! \param op_data User-defined pointer to data required by the application for its processing of the link. -!! \param return_value Return context: +!! \param op Callback function passing data regarding the link to the calling application. +!! \param op_data User-defined pointer to data required by the application for its processing of the link. +!! \param return_value Return context: !! \li Success: The return value of the first operator that !! returns non-zero, or zero if all members were processed with no operator returning non-zero. !! \li Failure: Negative if something goes wrong within the !! library, or the negative value returned by one of the operators. -!! \param hdferr \fortran_error +!! \param hdferr \fortran_error !! !! See C API: @ref H5Literate2() !! - SUBROUTINE h5literate_f(group_id, index_type, order, idx, op, op_data, return_value, hdferr) - USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR, C_FUNPTR + SUBROUTINE h5literate_f(group_id, idx_type, order, idx, op, op_data, return_value, hdferr) IMPLICIT NONE INTEGER(HID_T) , INTENT(IN) :: group_id - INTEGER , INTENT(IN) :: index_type + INTEGER , INTENT(IN) :: idx_type INTEGER , INTENT(IN) :: order INTEGER(HSIZE_T), INTENT(INOUT) :: idx TYPE(C_FUNPTR) , INTENT(IN) :: op TYPE(C_PTR) , INTENT(IN) :: op_data INTEGER , INTENT(OUT) :: return_value INTEGER , INTENT(OUT) :: hdferr + INTERFACE - INTEGER FUNCTION h5literate_c(group_id, index_type, order, idx, op, op_data) & - BIND(C, NAME='h5literate_c') - IMPORT :: c_ptr, c_funptr + INTEGER(C_INT) FUNCTION H5Literate2(group_id, idx_type, order, idx, op, op_data) & + BIND(C, NAME='H5Literate2') + IMPORT :: C_INT, C_PTR, C_FUNPTR IMPORT :: HID_T, HSIZE_T IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: group_id - INTEGER, INTENT(IN) :: index_type - INTEGER, INTENT(IN) :: order - INTEGER(HSIZE_T), INTENT(INOUT) :: idx - TYPE(C_FUNPTR), VALUE :: op - TYPE(C_PTR), VALUE :: op_data - END FUNCTION h5literate_c + INTEGER(HID_T) , VALUE :: group_id + INTEGER(C_INT) , VALUE :: idx_type + INTEGER(C_INT) , VALUE :: order + INTEGER(HSIZE_T) :: idx + TYPE(C_FUNPTR) , VALUE :: op + TYPE(C_PTR) , VALUE :: op_data + END FUNCTION H5Literate2 END INTERFACE - return_value = h5literate_c(group_id, index_type, order, idx, op, op_data) + return_value = INT(H5Literate2(group_id, INT(idx_type, C_INT), INT(order, C_INT), idx, op, op_data)) IF(return_value.GE.0)THEN hdferr = 0 @@ -997,6 +1348,88 @@ END FUNCTION h5literate_c END SUBROUTINE h5literate_f +!> +!! \ingroup FH5L +!! +!! \brief Asynchronously iterates through links in a group. +!! +!! \param group_id Identifier specifying subject group. +!! \param idx_type Type of index which determines the order: +!! \li H5_INDEX_NAME_F - Alphanumeric index on name +!! \li H5_INDEX_CRT_ORDER_F - Index on creation order +!! \param order Order within index: +!! \li H5_ITER_INC_F - Increasing order +!! \li H5_ITER_DEC_F - Decreasing order +!! \li H5_ITER_NATIVE_F - Fastest available order +!! \param idx Iteration position at which to start, or
    +!! Position at which an interrupted iteration may be restarted +!! \param op Callback function passing data regarding the link to the calling application. +!! \param op_data User-defined pointer to data required by the application for its processing of the link. +!! \param return_value N/A +!! +!! \warning The returned value of the callback routine op will not be set +!! in \p return_value for H5Literate_async_f(), so \p return_value should +!! not be used for determining the return state of the callback routine. +!! +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! +!! See C API: @ref H5Literate_async() +!! + SUBROUTINE h5literate_async_f(group_id, idx_type, order, idx, op, op_data, return_value, es_id, hdferr, & + file, func, line) + IMPLICIT NONE + INTEGER(HID_T) , INTENT(IN) :: group_id + INTEGER , INTENT(IN) :: idx_type + INTEGER , INTENT(IN) :: order + INTEGER(HSIZE_T), INTENT(INOUT) :: idx + TYPE(C_FUNPTR) , INTENT(IN) :: op + TYPE(C_PTR) , INTENT(IN) :: op_data + INTEGER , INTENT(OUT) :: return_value + INTEGER(HID_T) , INTENT(IN) :: es_id + INTEGER , INTENT(OUT) :: hdferr + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER(C_INT) FUNCTION H5Literate_async(file, func, line, & + group_id, idx_type, order, idx, op, op_data, es_id) BIND(C, NAME='H5Literate_async') + IMPORT :: C_CHAR, C_INT, C_PTR, C_FUNPTR + IMPORT :: HID_T, HSIZE_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T) , VALUE :: group_id + INTEGER(C_INT) , VALUE :: idx_type + INTEGER(C_INT) , VALUE :: order + INTEGER(HSIZE_T) :: idx + TYPE(C_FUNPTR) , VALUE :: op + TYPE(C_PTR) , VALUE :: op_data + INTEGER(HID_T) , VALUE :: es_id + END FUNCTION H5Literate_async + END INTERFACE + + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + return_value = INT(H5Literate_async(file_default, func_default, line_default, & + group_id, INT(idx_type, C_INT), INT(order, C_INT), idx, op, op_data, es_id)) + + IF(return_value.GE.0)THEN + hdferr = 0 + ELSE + hdferr = -1 + END IF + + END SUBROUTINE h5literate_async_f + !> !! \ingroup FH5L !! diff --git a/fortran/src/H5Of.c b/fortran/src/H5Of.c index 019699e691f..186a9a2a1c9 100644 --- a/fortran/src/H5Of.c +++ b/fortran/src/H5Of.c @@ -132,76 +132,6 @@ h5olink_c(hid_t_f *object_id, hid_t_f *new_loc_id, _fcd name, size_t_f *namelen, return ret_value; } -/****if* H5Of/h5oopen_c - * NAME - * h5oopen_c - * PURPOSE - * Calls H5Oopen - * INPUTS - * loc_id - File or group identifier - * name - Attribute access property list - * namelen - Size of name - * lapl_id - Link access property list - * OUTPUTS - * obj_id - Dataset identifier - * RETURNS - * 0 on success, -1 on failure - * AUTHOR - * M. Scot Breitenfeld - * April 18, 2008 - * SOURCE - */ -int_f -h5oopen_c(hid_t_f *loc_id, _fcd name, size_t_f *namelen, hid_t_f *lapl_id, hid_t_f *obj_id) -/******/ -{ - char *c_name = NULL; /* Buffer to hold C string */ - int_f ret_value = 0; /* Return value */ - - /* - * Convert FORTRAN name to C name - */ - if ((c_name = HD5f2cstring(name, (size_t)*namelen)) == NULL) - HGOTO_DONE(FAIL); - - /* - * Call H5Oopen function. - */ - if ((*obj_id = (hid_t_f)H5Oopen((hid_t)*loc_id, c_name, (hid_t)*lapl_id)) < 0) - HGOTO_DONE(FAIL); - -done: - if (c_name) - HDfree(c_name); - return ret_value; -} -/****if* H5Of/h5oclose_c - * NAME - * h5oclose_c - * PURPOSE - * Call H5Oclose - * INPUTS - * object_id - Object identifier - * RETURNS - * 0 on success, -1 on failure - * AUTHOR - * M. Scot Breitenfeld - * December 17, 2008 - * SOURCE - */ -int_f -h5oclose_c(hid_t_f *object_id) -/******/ -{ - int_f ret_value = 0; /* Return value */ - - if (H5Oclose((hid_t)*object_id) < 0) - HGOTO_DONE(FAIL); - -done: - return ret_value; -} - /****if* H5Of/h5ovisit_c * NAME * h5ovisit_c @@ -292,6 +222,11 @@ h5oopen_by_token_c(hid_t_f *loc_id, H5O_token_t *token, hid_t_f *obj_id) * namelen - Name length. * lapl_id - Link access property list. * fields - Flags specifying the fields to include in object_info. + * file - Filename the async subroutine is being called from + * func - Function name the async subroutine is being called in + * line - Line number the async subroutine is being called at + * es_id - Event set identifier + * * OUTPUTS * object_info - Buffer in which to return object information. * @@ -303,31 +238,30 @@ h5oopen_by_token_c(hid_t_f *loc_id, H5O_token_t *token, hid_t_f *obj_id) * SOURCE */ int_f -h5oget_info_by_name_c(hid_t_f *loc_id, _fcd name, size_t_f *namelen, hid_t_f *lapl_id, - H5O_info_t_f *object_info, int_f *fields) +h5oget_info_by_name_c(hid_t_f *loc_id, char *name, hid_t_f *lapl_id, H5O_info_t_f *object_info, int_f *fields, + hid_t_f *es_id, char *file, char *func, int_f *line) /******/ { - char *c_name = NULL; /* Buffer to hold C string */ - int_f ret_value = 0; /* Return value */ + int_f ret_value = 0; /* Return value */ H5O_info2_t Oinfo; - /* - * Convert FORTRAN name to C name - */ - if ((c_name = HD5f2cstring(name, (size_t)*namelen)) == NULL) - HGOTO_DONE(FAIL); - /* * Call H5Oinfo_by_name function. */ - if (H5Oget_info_by_name3((hid_t)*loc_id, c_name, &Oinfo, (unsigned)*fields, (hid_t)*lapl_id) < 0) - HGOTO_DONE(FAIL); + + if ((hid_t)*es_id != -1) { + if (H5Oget_info_by_name3((hid_t)*loc_id, name, &Oinfo, (unsigned)*fields, (hid_t)*lapl_id) < 0) + HGOTO_DONE(FAIL); + } + else { + if (H5Oget_info_by_name_async_wrap(file, func, (unsigned)*line, (hid_t)*loc_id, name, &Oinfo, + (unsigned)*fields, (hid_t)*lapl_id, (hid_t)*es_id) < 0) + HGOTO_DONE(FAIL); + } ret_value = fill_h5o_info_t_f(Oinfo, object_info); done: - if (c_name) - HDfree(c_name); return ret_value; } @@ -424,78 +358,22 @@ h5oget_info_c(hid_t_f *object_id, H5O_info_t_f *object_info, int_f *fields) return ret_value; } -/* ***if* H5Of/H5Ocopy_c - * NAME - * H5Ocopy_c - * PURPOSE - * Calls H5Ocopy - * INPUTS - * src_loc_id - Object identifier indicating the location of the source object to be copied - * src_name - Name of the source object to be copied - * src_name_len - Length of src_name - * dst_loc_id - Location identifier specifying the destination - * dst_name - Name to be assigned to the new copy - * dst_name_len - Length of dst_name - * ocpypl_id - Object copy property list - * lcpl_id - Link creation property list for the new hard link - * - * RETURNS - * 0 on success, -1 on failure - * AUTHOR - * M. Scot Breitenfeld - * March 14, 2012 - * SOURCE - */ -int_f -h5ocopy_c(hid_t_f *src_loc_id, _fcd src_name, size_t_f *src_name_len, hid_t_f *dst_loc_id, _fcd dst_name, - size_t_f *dst_name_len, hid_t_f *ocpypl_id, hid_t_f *lcpl_id) -/******/ -{ - char *c_src_name = NULL; /* Buffer to hold C string */ - char *c_dst_name = NULL; /* Buffer to hold C string */ - - int_f ret_value = 0; /* Return value */ - - /* - * Convert FORTRAN name to C name - */ - if ((c_src_name = HD5f2cstring(src_name, (size_t)*src_name_len)) == NULL) - HGOTO_DONE(FAIL); - if ((c_dst_name = HD5f2cstring(dst_name, (size_t)*dst_name_len)) == NULL) - HGOTO_DONE(FAIL); - - /* - * Call H5Ocopy function. - */ - if (H5Ocopy((hid_t)*src_loc_id, c_src_name, (hid_t)*dst_loc_id, c_dst_name, (hid_t)*ocpypl_id, - (hid_t)*lcpl_id) < 0) - HGOTO_DONE(FAIL); - -done: - if (c_src_name) - HDfree(c_src_name); - if (c_dst_name) - HDfree(c_dst_name); - - return ret_value; -} - /****if* H5Of/h5ovisit_by_name_c * NAME * h5ovisit_by_name_c * PURPOSE * Calls H5Ovisit_by_name * INPUTS - * object_id - Identifier specifying subject group - * index_type - Type of index which determines the order - * order - Order within index - * idx - Iteration position at which to start - * op - Callback function passing data regarding the link to the calling application - * op_data - User-defined pointer to data required by the application for its processing of the link + * object_id - Identifier specifying subject group. + * index_type - Type of index which determines the order. + * order - Order within index. + * idx - Iteration position at which to start. + * op - Callback function passing data regarding the link to the calling application. + * op_data - User-defined pointer to data required by the application for its processing of the link. * fields - Flags specifying the fields to include in object_info. * * OUTPUTS - * idx - Position at which an interrupted iteration may be restarted + * idx - Position at which an interrupted iteration may be restarted. * * RETURNS * >0 on success, 0< on failure @@ -730,59 +608,6 @@ h5oset_comment_by_name_c(hid_t_f *object_id, _fcd name, size_t_f *namelen, _fcd HDfree(c_comment); return ret_value; } -/****if* H5Of/h5oopen_by_idx_c - * NAME - * h5oopen_by_idx_c - * PURPOSE - * Calls H5Oopen_by_idx_c - * INPUTS - * loc_id - A file or group identifier. - * group_name - Name of group, relative to loc_id, in which object is located. - * group_namelen - Length of group_name - * index_type - Type of index by which objects are ordered. - * order - Order of iteration within index. - * n - Object to open. - * lapl_id - Link access property list. - * OUTPUTS - * obj_id - An object identifier for the opened object. - * RETURNS - * 0 on success, -1 on failure - * AUTHOR - * M. Scot Breitenfeld - * May 17, 2012 - * SOURCE - */ -int_f -h5oopen_by_idx_c(hid_t_f *loc_id, _fcd group_name, size_t_f *group_namelen, int_f *index_type, int_f *order, - hsize_t_f *n, hid_t_f *obj_id, hid_t_f *lapl_id) -/******/ -{ - char *c_group_name = NULL; /* Buffer to hold C string */ - int_f ret_value = 0; - H5_index_t c_index_type; - H5_iter_order_t c_order; - - /* - * Convert FORTRAN string to C string - */ - if ((c_group_name = HD5f2cstring(group_name, (size_t)*group_namelen)) == NULL) - HGOTO_DONE(FAIL); - - c_index_type = (H5_index_t)*index_type; - c_order = (H5_iter_order_t)*order; - - /* - * Call H5Oopen_by_idx function. - */ - if ((*obj_id = (hid_t_f)H5Oopen_by_idx((hid_t)*loc_id, c_group_name, c_index_type, c_order, (hsize_t)*n, - (hid_t)*lapl_id)) < 0) - HGOTO_DONE(FAIL); - -done: - if (c_group_name) - HDfree(c_group_name); - return ret_value; -} /****if* H5Of/h5oget_comment_c * NAME diff --git a/fortran/src/H5Off.F90 b/fortran/src/H5Off.F90 index 7bb3a0c2cd6..215f6e86cfa 100644 --- a/fortran/src/H5Off.F90 +++ b/fortran/src/H5Off.F90 @@ -41,6 +41,7 @@ MODULE H5O USE H5GLOBAL IMPLICIT NONE + !> @brief h5o_info_t derived type. The time values are an integer array as specified in the Fortran intrinsic DATE_AND_TIME(VALUES). TYPE, BIND(C) :: h5o_info_t INTEGER(C_LONG) :: fileno !< File number that object is located in @@ -135,6 +136,24 @@ MODULE H5O TYPE(meta_size_t) :: meta_size END TYPE c_h5o_native_info_t + INTERFACE + INTEGER FUNCTION h5oget_info_by_name_c(loc_id, name, lapl_id_default, object_info, fields, & + es_id, file, func, line ) & + BIND(C, NAME='h5oget_info_by_name_c') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T, SIZE_T + IMPLICIT NONE + INTEGER(HID_T) :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + INTEGER(HID_T) :: lapl_id_default + TYPE(C_PTR), VALUE :: object_info + INTEGER :: fields + INTEGER(HID_T) :: es_id + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT) :: line + END FUNCTION h5oget_info_by_name_c + END INTERFACE CONTAINS @@ -212,30 +231,100 @@ SUBROUTINE h5oopen_f(loc_id, name, obj_id, hdferr, lapl_id) INTEGER(HID_T) , INTENT(OUT) :: obj_id INTEGER , INTENT(OUT) :: hdferr INTEGER(HID_T) , INTENT(IN), OPTIONAL :: lapl_id + INTEGER(HID_T) :: lapl_id_default - INTEGER(SIZE_T) :: namelen + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name INTERFACE - INTEGER FUNCTION h5oopen_c(loc_id, name, namelen, lapl_id_default, obj_id) BIND(C,NAME='h5oopen_c') + INTEGER(HID_T) FUNCTION H5Oopen(loc_id, name, lapl_id_default) BIND(C,NAME='H5Oopen') IMPORT :: C_CHAR - IMPORT :: HID_T, SIZE_T + IMPORT :: HID_T IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: loc_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: name - INTEGER(HID_T) :: lapl_id_default - INTEGER(SIZE_T) :: namelen - INTEGER(HID_T), INTENT(OUT) :: obj_id - END FUNCTION h5oopen_c + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + INTEGER(HID_T), VALUE :: lapl_id_default + END FUNCTION H5Oopen END INTERFACE - namelen = LEN(name) + c_name = TRIM(name)//C_NULL_CHAR lapl_id_default = H5P_DEFAULT_F IF(PRESENT(lapl_id)) lapl_id_default = lapl_id - hdferr = h5oopen_c(loc_id, name, namelen, lapl_id_default, obj_id) + obj_id = H5Oopen(loc_id, c_name, lapl_id_default) + + hdferr = 0 + IF(obj_id.LT.0) hdferr = -1 END SUBROUTINE h5oopen_f + +!> +!! \ingroup FH5O +!! +!! \brief Asynchronously opens an object in an HDF5 file by location identifier and path name. +!! +!! \param loc_id File or group identifier. +!! \param name Path to the object, relative to loc_id. +!! \param obj_id Object identifier for the opened object. +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param lapl_id Access property list identifier for the link pointing to the object. +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Oopen_async() +!! + SUBROUTINE h5oopen_async_f(loc_id, name, obj_id, es_id, hdferr, lapl_id, file, func, line) + IMPLICIT NONE + INTEGER(HID_T) , INTENT(IN) :: loc_id + CHARACTER(LEN=*), INTENT(IN) :: name + INTEGER(HID_T) , INTENT(OUT) :: obj_id + INTEGER(HID_T) , INTENT(IN) :: es_id + INTEGER , INTENT(OUT) :: hdferr + INTEGER(HID_T) , INTENT(IN), OPTIONAL :: lapl_id + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + INTEGER(HID_T) :: lapl_id_default + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER(HID_T) FUNCTION H5Oopen_async(file, func, line, & + loc_id, name, lapl_id_default, es_id) BIND(C,NAME='H5Oopen_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + INTEGER(HID_T), VALUE :: lapl_id_default + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Oopen_async + END INTERFACE + + c_name = TRIM(name)//C_NULL_CHAR + + lapl_id_default = H5P_DEFAULT_F + IF(PRESENT(lapl_id)) lapl_id_default = lapl_id + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + obj_id = H5Oopen_async(file_default, func_default, line_default, & + loc_id, c_name, lapl_id_default, es_id) + + hdferr = 0 + IF(obj_id.LT.0) hdferr = -1 + + END SUBROUTINE h5oopen_async_f + !> !! \ingroup FH5O !! @@ -248,19 +337,66 @@ END SUBROUTINE h5oopen_f !! SUBROUTINE h5oclose_f(object_id, hdferr) IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: object_id + INTEGER(HID_T), INTENT(IN) :: object_id INTEGER , INTENT(OUT) :: hdferr INTERFACE - INTEGER FUNCTION h5oclose_c(object_id) BIND(C,NAME='h5oclose_c') + INTEGER(C_INT) FUNCTION H5Oclose(object_id) BIND(C,NAME='H5Oclose') + IMPORT :: C_INT IMPORT :: HID_T IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: object_id - END FUNCTION h5oclose_c + INTEGER(HID_T), VALUE :: object_id + END FUNCTION H5Oclose END INTERFACE - hdferr = h5oclose_c(object_id) + hdferr = INT(H5Oclose(object_id)) + END SUBROUTINE h5oclose_f +!> +!! \ingroup FH5O +!! +!! \brief Asynchronously closes an object in an HDF5 file. +!! +!! \param object_id Object identifier. +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! +!! See C API: @ref H5Oclose_async_f() +!! + SUBROUTINE h5oclose_async_f(object_id, es_id, hdferr, file, func, line) + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: object_id + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER , INTENT(OUT) :: hdferr + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER(C_INT) FUNCTION H5Oclose_async(file, func, line, object_id, es_id) BIND(C,NAME='H5Oclose_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: object_id + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Oclose_async + END INTERFACE + + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + hdferr = INT(H5Oclose_async(file_default, func_default, line_default, object_id, es_id)) + + END SUBROUTINE h5oclose_async_f + !> !! \ingroup FH5O !! @@ -318,40 +454,115 @@ SUBROUTINE h5ocopy_f(src_loc_id, src_name, dst_loc_id, dst_name, hdferr, ocpypl_ INTEGER(HID_T) , INTENT(IN), OPTIONAL :: ocpypl_id INTEGER(HID_T) , INTENT(IN), OPTIONAL :: lcpl_id - INTEGER(SIZE_T) :: src_name_len, dst_name_len INTEGER(HID_T) :: ocpypl_id_default, lcpl_id_default + CHARACTER(LEN=LEN_TRIM(src_name)+1,KIND=C_CHAR) :: c_src_name + CHARACTER(LEN=LEN_TRIM(dst_name)+1,KIND=C_CHAR) :: c_dst_name INTERFACE - INTEGER FUNCTION h5ocopy_c(src_loc_id, src_name, src_name_len, & - dst_loc_id, dst_name, dst_name_len, ocpypl_id_default, lcpl_id_default) & - BIND(C,NAME='h5ocopy_c') - IMPORT :: C_CHAR - IMPORT :: HID_T, SIZE_T + INTEGER(C_INT) FUNCTION H5Ocopy(src_loc_id, src_name, dst_loc_id, dst_name, & + ocpypl_id_default, lcpl_id_default) BIND(C,NAME='H5Ocopy') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T IMPLICIT NONE - INTEGER(HID_T) , INTENT(IN) :: src_loc_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: src_name - INTEGER(HID_T) , INTENT(IN) :: dst_loc_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: dst_name - INTEGER(HID_T) , INTENT(IN) :: ocpypl_id_default - INTEGER(HID_T) , INTENT(IN) :: lcpl_id_default - INTEGER(SIZE_T) :: src_name_len, dst_name_len - - END FUNCTION h5ocopy_c + INTEGER(HID_T) , VALUE :: src_loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: src_name + INTEGER(HID_T) , VALUE :: dst_loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: dst_name + INTEGER(HID_T) , VALUE :: ocpypl_id_default + INTEGER(HID_T) , VALUE :: lcpl_id_default + END FUNCTION H5Ocopy END INTERFACE - src_name_len = LEN(src_name) - dst_name_len = LEN(dst_name) + c_src_name = TRIM(src_name)//C_NULL_CHAR + c_dst_name = TRIM(dst_name)//C_NULL_CHAR ocpypl_id_default = H5P_DEFAULT_F + lcpl_id_default = H5P_DEFAULT_F IF(PRESENT(ocpypl_id)) ocpypl_id_default = ocpypl_id - lcpl_id_default = H5P_DEFAULT_F IF(PRESENT(lcpl_id)) lcpl_id_default = lcpl_id - hdferr = h5ocopy_c(src_loc_id, src_name, src_name_len, & - dst_loc_id, dst_name, dst_name_len, ocpypl_id_default, lcpl_id_default) + hdferr = INT(H5Ocopy(src_loc_id, c_src_name, & + dst_loc_id, c_dst_name, ocpypl_id_default, lcpl_id_default)) END SUBROUTINE h5ocopy_f +!> +!! \ingroup FH5O +!! +!! \brief Asynchronously copies an object in an HDF5 file. +!! +!! \param src_loc_id Object identifier indicating the location of the source object to be copied. +!! \param src_name Name of the source object to be copied. +!! \param dst_loc_id Location identifier specifying the destination. +!! \param dst_name Name to be assigned to the new copy. +!! \param ocpypl_id Object copy property list. +!! \param lcpl_id Link creation property list for the new hard link. +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Ocopy_async() +!! + SUBROUTINE h5ocopy_async_f(src_loc_id, src_name, dst_loc_id, dst_name, es_id, hdferr, & + ocpypl_id, lcpl_id, file, func, line) + IMPLICIT NONE + INTEGER(HID_T) , INTENT(IN) :: src_loc_id + CHARACTER(LEN=*), INTENT(IN) :: src_name + INTEGER(HID_T) , INTENT(IN) :: dst_loc_id + CHARACTER(LEN=*), INTENT(IN) :: dst_name + INTEGER(HID_T) , INTENT(IN) :: es_id + INTEGER , INTENT(OUT) :: hdferr + INTEGER(HID_T) , INTENT(IN), OPTIONAL :: ocpypl_id + INTEGER(HID_T) , INTENT(IN), OPTIONAL :: lcpl_id + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line + + INTEGER(HID_T) :: ocpypl_id_default, lcpl_id_default + CHARACTER(LEN=LEN_TRIM(src_name)+1,KIND=C_CHAR) :: c_src_name + CHARACTER(LEN=LEN_TRIM(dst_name)+1,KIND=C_CHAR) :: c_dst_name + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER(C_INT) FUNCTION H5Ocopy_async(file, func, line, src_loc_id, src_name, dst_loc_id, dst_name, & + ocpypl_id_default, lcpl_id_default, es_id) BIND(C,NAME='H5Ocopy_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT) , VALUE :: line + INTEGER(HID_T) , VALUE :: src_loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: src_name + INTEGER(HID_T) , VALUE :: dst_loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: dst_name + INTEGER(HID_T) , VALUE :: ocpypl_id_default + INTEGER(HID_T) , VALUE :: lcpl_id_default + INTEGER(HID_T) , VALUE :: es_id + END FUNCTION H5Ocopy_async + END INTERFACE + + c_src_name = TRIM(src_name)//C_NULL_CHAR + c_dst_name = TRIM(dst_name)//C_NULL_CHAR + + ocpypl_id_default = H5P_DEFAULT_F + lcpl_id_default = H5P_DEFAULT_F + IF(PRESENT(ocpypl_id)) ocpypl_id_default = ocpypl_id + IF(PRESENT(lcpl_id)) lcpl_id_default = lcpl_id + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + hdferr = INT(H5Ocopy_async(file_default, func_default, line_default, & + src_loc_id, c_src_name, & + dst_loc_id, c_dst_name, ocpypl_id_default, lcpl_id_default, es_id)) + + END SUBROUTINE h5ocopy_async_f + !> !! \ingroup FH5O !! @@ -577,7 +788,6 @@ END SUBROUTINE h5oincr_refcount_f !! \param n Object to open. !! \param obj_id An object identifier for the opened object. !! \param hdferr \fortran_error -!! !! \param lapl_id Link access property list. !! !! See C API: @ref H5Oopen_by_idx() @@ -585,43 +795,123 @@ END SUBROUTINE h5oincr_refcount_f SUBROUTINE h5oopen_by_idx_f(loc_id, group_name, index_type, order, n, obj_id, & hdferr, lapl_id) IMPLICIT NONE - INTEGER(HID_T) , INTENT(IN) :: loc_id - CHARACTER(LEN=*), INTENT(IN) :: group_name - INTEGER , INTENT(IN) :: index_type - INTEGER , INTENT(IN) :: order - INTEGER(HSIZE_T), INTENT(IN) :: n - INTEGER(HID_T) , INTENT(OUT) :: obj_id - INTEGER , INTENT(OUT) :: hdferr - INTEGER(HID_T) , INTENT(IN) , OPTIONAL :: lapl_id - INTEGER(SIZE_T) :: group_namelen + INTEGER(HID_T) , INTENT(IN) :: loc_id + CHARACTER(LEN=*), INTENT(IN) :: group_name + INTEGER , INTENT(IN) :: index_type + INTEGER , INTENT(IN) :: order + INTEGER(HSIZE_T), INTENT(IN) :: n + INTEGER(HID_T) , INTENT(OUT) :: obj_id + INTEGER , INTENT(OUT) :: hdferr + INTEGER(HID_T) , INTENT(IN), OPTIONAL :: lapl_id + INTEGER(HID_T) :: lapl_id_default + CHARACTER(LEN=LEN_TRIM(group_name)+1,KIND=C_CHAR) :: c_group_name INTERFACE - INTEGER FUNCTION h5oopen_by_idx_c(loc_id, group_name, group_namelen, index_type, order, n, obj_id, lapl_id_default) & - BIND(C,NAME='h5oopen_by_idx_c') - IMPORT :: C_CHAR - IMPORT :: HID_T, SIZE_T, HSIZE_T + INTEGER(HID_T) FUNCTION H5Oopen_by_idx(loc_id, group_name, index_type, order, n, lapl_id_default) & + BIND(C,NAME='H5Oopen_by_idx') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T, HSIZE_T IMPLICIT NONE - INTEGER(HID_T) , INTENT(IN) :: loc_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: group_name - INTEGER(SIZE_T) , INTENT(IN) :: group_namelen - INTEGER , INTENT(IN) :: index_type - INTEGER , INTENT(IN) :: order - INTEGER(HSIZE_T), INTENT(IN) :: n - INTEGER(HID_T) , INTENT(OUT) :: obj_id - INTEGER(HID_T) , INTENT(IN) :: lapl_id_default + INTEGER(HID_T) , VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: group_name + INTEGER(C_INT) , VALUE :: index_type + INTEGER(C_INT) , VALUE :: order + INTEGER(HSIZE_T), VALUE :: n + INTEGER(HID_T) , VALUE :: lapl_id_default + END FUNCTION H5Oopen_by_idx + END INTERFACE + + c_group_name = TRIM(group_name)//C_NULL_CHAR + + lapl_id_default = H5P_DEFAULT_F + IF(PRESENT(lapl_id)) lapl_id_default = lapl_id + + obj_id = H5Oopen_by_idx(loc_id, c_group_name, INT(index_type, C_INT), INT(order, C_INT), n, lapl_id_default) + + hdferr = 0 + IF(obj_id.LT.0) hdferr = -1 + + END SUBROUTINE H5oopen_by_idx_f + +!> +!! \ingroup FH5O +!! +!! \brief Asynchronously open the nth object in a group. +!! +!! \param loc_id A file or group identifier. +!! \param group_name Name of group, relative to loc_id, in which object is located. +!! \param index_type Type of index by which objects are ordered. +!! \param order Order of iteration within index, NOTE: zero-based. +!! \param n Object to open. +!! \param obj_id An object identifier for the opened object. +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! +!! \param lapl_id Link access property list. +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Oopen_by_idx_async() +!! + SUBROUTINE h5oopen_by_idx_async_f(loc_id, group_name, index_type, order, n, obj_id, es_id, & + hdferr, lapl_id, file, func, line) + IMPLICIT NONE + INTEGER(HID_T) , INTENT(IN) :: loc_id + CHARACTER(LEN=*), INTENT(IN) :: group_name + INTEGER , INTENT(IN) :: index_type + INTEGER , INTENT(IN) :: order + INTEGER(HSIZE_T), INTENT(IN) :: n + INTEGER(HID_T) , INTENT(OUT) :: obj_id + INTEGER(HID_T) , INTENT(IN) :: es_id + INTEGER , INTENT(OUT) :: hdferr + INTEGER(HID_T) , INTENT(IN), OPTIONAL :: lapl_id + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN), OPTIONAL :: line - END FUNCTION h5oopen_by_idx_c + INTEGER(HID_T) :: lapl_id_default + CHARACTER(LEN=LEN_TRIM(group_name)+1,KIND=C_CHAR) :: c_group_name + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + + INTERFACE + INTEGER(HID_T) FUNCTION H5Oopen_by_idx_async(file, func, line, & + loc_id, group_name, index_type, order, n, lapl_id_default, es_id) & + BIND(C,NAME='H5Oopen_by_idx_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T, HSIZE_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT) , VALUE :: line + INTEGER(HID_T) , VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: group_name + INTEGER(C_INT) , VALUE :: index_type + INTEGER(C_INT) , VALUE :: order + INTEGER(HSIZE_T), VALUE :: n + INTEGER(HID_T) , VALUE :: lapl_id_default + INTEGER(HID_T) , VALUE :: es_id + END FUNCTION H5Oopen_by_idx_async END INTERFACE - group_namelen = LEN(group_name) + c_group_name = TRIM(group_name)//C_NULL_CHAR lapl_id_default = H5P_DEFAULT_F IF(PRESENT(lapl_id)) lapl_id_default = lapl_id + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) - hdferr = h5oopen_by_idx_c(loc_id, group_name, group_namelen, index_type, order, n, obj_id, lapl_id_default) + obj_id = H5Oopen_by_idx_async(file_default, func_default, line_default, & + loc_id, c_group_name, INT(index_type, C_INT), INT(order, C_INT), n, lapl_id_default, es_id) - END SUBROUTINE H5Oopen_by_idx_f + hdferr = 0 + IF(obj_id.LT.0) hdferr = -1 + + END SUBROUTINE H5oopen_by_idx_async_f !> !! \ingroup FH5O @@ -794,40 +1084,105 @@ SUBROUTINE h5oget_info_by_name_f(loc_id, name, object_info, hdferr, lapl_id, fie INTEGER(HID_T) , INTENT(IN) , OPTIONAL :: lapl_id INTEGER , INTENT(IN) , OPTIONAL :: fields - INTEGER(SIZE_T) :: namelen INTEGER(HID_T) :: lapl_id_default TYPE(C_PTR) :: ptr INTEGER :: fields_c + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name - INTERFACE - INTEGER FUNCTION h5oget_info_by_name_c(loc_id, name, namelen, lapl_id_default, object_info, fields) & - BIND(C, NAME='h5oget_info_by_name_c') - IMPORT :: c_char, c_ptr - IMPORT :: HID_T, SIZE_T - IMPLICIT NONE - INTEGER(HID_T) , INTENT(IN) :: loc_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: name - INTEGER(SIZE_T) , INTENT(IN) :: namelen - INTEGER(HID_T) , INTENT(IN) :: lapl_id_default - TYPE(C_PTR), VALUE :: object_info - INTEGER , INTENT(IN) :: fields - END FUNCTION h5oget_info_by_name_c - END INTERFACE + ! Async -- Not Used -- + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + INTEGER(HID_T) :: es_id = -1 fields_c = H5O_INFO_ALL_F IF(PRESENT(fields)) fields_c = fields - namelen = LEN(name) + c_name = TRIM(name)//C_NULL_CHAR lapl_id_default = H5P_DEFAULT_F IF(PRESENT(lapl_id)) lapl_id_default = lapl_id ptr = C_LOC(object_info) - hdferr = H5Oget_info_by_name_c(loc_id, name, namelen, lapl_id_default, ptr, fields_c) + hdferr = H5Oget_info_by_name_c(loc_id, c_name, lapl_id_default, ptr, fields_c, & + es_id, file_default, func_default, line_default) END SUBROUTINE H5Oget_info_by_name_f +!> +!! \ingroup FH5O +!! +!! \brief Asynchronously retrieves the metadata for an object, identifying the object by location and relative name. +!! +!! \param loc_id File or group identifier specifying location of group in which object is located. +!! \param name Name of group, relative to loc_id. +!! \param object_info Pointer to buffer returning object information, points to variable of datatype TYPE(C_H5O_INFO_T). +!! \param es_id \es_id +!! \param hdferr \fortran_error +!! \param lapl_id Link access property list. +!! \param fields Flags specifying the fields to include in object_info. +!! \param file \fortran_file +!! \param func \fortran_func +!! \param line \fortran_line +!! +!! See C API: @ref H5Oget_info_by_name_async() +!! + SUBROUTINE h5oget_info_by_name_async_f(loc_id, name, object_info, es_id, hdferr, & + lapl_id, fields, file, func, line) + IMPLICIT NONE + INTEGER(HID_T) , INTENT(IN) :: loc_id + CHARACTER(LEN=*), INTENT(IN) :: name + TYPE(C_PTR) , INTENT(INOUT) :: object_info + INTEGER(HID_T) , INTENT(IN) :: es_id + INTEGER , INTENT(OUT) :: hdferr + INTEGER(HID_T) , INTENT(IN) , OPTIONAL :: lapl_id + INTEGER , INTENT(IN) , OPTIONAL :: fields + TYPE(C_PTR), OPTIONAL :: file + TYPE(C_PTR), OPTIONAL :: func + INTEGER , INTENT(IN) , OPTIONAL :: line + + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name + INTEGER(HID_T) :: lapl_id_default + TYPE(C_PTR) :: file_default = C_NULL_PTR + TYPE(C_PTR) :: func_default = C_NULL_PTR + INTEGER(KIND=C_INT) :: line_default = 0 + INTEGER(C_INT) :: fields_c + + INTERFACE + INTEGER(C_INT) FUNCTION H5Oget_info_by_name_async(file, func, line, & + loc_id, name, object_info, fields, lapl_id_default, es_id) BIND(C,NAME='H5Oget_info_by_name_async') + IMPORT :: C_CHAR, C_INT, C_PTR + IMPORT :: HID_T + IMPLICIT NONE + TYPE(C_PTR), VALUE :: file + TYPE(C_PTR), VALUE :: func + INTEGER(C_INT), VALUE :: line + INTEGER(HID_T), VALUE :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + TYPE(C_PTR), VALUE :: object_info + INTEGER(C_INT), VALUE :: fields + INTEGER(HID_T), VALUE :: lapl_id_default + INTEGER(HID_T), VALUE :: es_id + END FUNCTION H5Oget_info_by_name_async + END INTERFACE + + fields_c = INT(H5O_INFO_ALL_F, C_INT) + IF(PRESENT(fields)) fields_c = INT(fields, C_INT) + IF(PRESENT(file)) file_default = file + IF(PRESENT(func)) func_default = func + IF(PRESENT(line)) line_default = INT(line, C_INT) + + c_name = TRIM(name)//C_NULL_CHAR + + lapl_id_default = H5P_DEFAULT_F + IF(PRESENT(lapl_id)) lapl_id_default = lapl_id + + hdferr = H5Oget_info_by_name_async(file_default, func_default, line_default, & + loc_id, c_name, object_info, fields_c, lapl_id_default, es_id) + + END SUBROUTINE H5oget_info_by_name_async_f + !> !! \ingroup FH5O !! diff --git a/fortran/src/H5Pff.F90 b/fortran/src/H5Pff.F90 index 75d73659abc..75cd3230657 100644 --- a/fortran/src/H5Pff.F90 +++ b/fortran/src/H5Pff.F90 @@ -188,7 +188,7 @@ END FUNCTION h5pinsert_c !> @brief H5FD_subfiling_params_t derived type used in the subfiling VFD. TYPE, BIND(C) :: H5FD_subfiling_params_t - INTEGER(ENUM_T) :: ioc_selection !< Method to select I/O concentrators + INTEGER(C_INT) :: ioc_selection !< Method to select I/O concentrators INTEGER(C_INT64_T) :: stripe_size !< Size (in bytes) of data stripes in subfiles INTEGER(C_INT32_T) :: stripe_count !< Target number of subfiles to use END TYPE H5FD_subfiling_params_t @@ -524,15 +524,13 @@ END SUBROUTINE h5pset_deflate_f !! !! See C API: @ref H5Pget_version() !! - SUBROUTINE h5pget_version_f(prp_id, boot, freelist, & - stab, shhdr, hdferr) + SUBROUTINE h5pget_version_f(prp_id, boot, freelist, stab, shhdr, hdferr) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: prp_id - INTEGER, DIMENSION(:), INTENT(OUT) :: boot - INTEGER, DIMENSION(:), INTENT(OUT) :: freelist - - INTEGER, DIMENSION(:), INTENT(OUT) :: stab - INTEGER, DIMENSION(:), INTENT(OUT) :: shhdr + INTEGER, DIMENSION(*), INTENT(OUT) :: boot + INTEGER, DIMENSION(*), INTENT(OUT) :: freelist + INTEGER, DIMENSION(*), INTENT(OUT) :: stab + INTEGER, DIMENSION(*), INTENT(OUT) :: shhdr INTEGER, INTENT(OUT) :: hdferr INTERFACE @@ -2698,7 +2696,8 @@ END SUBROUTINE h5pset_fapl_multi_l !! \param memb_map Mapping array. !! \param memb_fapl Property list for each memory usage type. !! \param memb_name Names of member file. -!! \param memb_addr Offsets within the virtual address space, from 0 (zero) to HADDR_MAX_F, at which each type of data storage begins. +!! \param memb_addr Offsets within the virtual address space, from 0 (zero) to HADDR_MAX_F, +!! at which each type of data storage begins. !! \param relax Flag. !! \param hdferr \fortran_error !! @@ -2781,7 +2780,8 @@ END SUBROUTINE h5pset_fapl_multi_s !! \param memb_map Mapping array. !! \param memb_fapl Property list for each memory usage type. !! \param memb_name Names of member file. -!! \param memb_addr Offsets within the virtual address space, from 0 (zero) to HADDR_MAX_F, at which each type of data storage begins. +!! \param memb_addr Offsets within the virtual address space, from 0 (zero) to HADDR_MAX_F, at which +!! each type of data storage begins. !! \param relax Flag. !! \param hdferr \fortran_error !! \param maxlen_out Maximum length for memb_name array element. @@ -2793,7 +2793,7 @@ SUBROUTINE h5pget_fapl_multi_f(prp_id, memb_map, memb_fapl, memb_name, memb_addr INTEGER(HID_T), DIMENSION(*), INTENT(OUT) :: memb_fapl CHARACTER(LEN=*), DIMENSION(*), INTENT(OUT) :: memb_name REAL, DIMENSION(*), INTENT(OUT) :: memb_addr - INTEGER, OPTIONAL, INTENT(OUT) :: maxlen_out + INTEGER, INTENT(OUT), OPTIONAL :: maxlen_out LOGICAL, INTENT(OUT) :: relax INTEGER, INTENT(OUT) :: hdferr @@ -3201,7 +3201,7 @@ SUBROUTINE h5pget_libver_bounds_f(fapl_id, low, high, hdferr) INTEGER, INTENT(OUT) :: low INTEGER, INTENT(OUT) :: high INTEGER, INTENT(OUT) :: hdferr - INTEGER(ENUM_T) :: low_c, high_c + INTEGER(C_INT) :: low_c, high_c INTEGER(C_INT) :: hdferr_c ! ! MS FORTRAN needs explicit interface for C functions called here. @@ -3209,11 +3209,11 @@ SUBROUTINE h5pget_libver_bounds_f(fapl_id, low, high, hdferr) INTERFACE INTEGER(C_INT) FUNCTION h5pget_libver_bounds(fapl_id, low, high) & BIND(C,NAME='H5Pget_libver_bounds') - IMPORT :: C_INT, HID_T, ENUM_T + IMPORT :: C_INT, HID_T IMPLICIT NONE INTEGER(HID_T) , INTENT(IN) , VALUE :: fapl_id - INTEGER(ENUM_T), INTENT(OUT) :: low - INTEGER(ENUM_T), INTENT(OUT) :: high + INTEGER(C_INT), INTENT(OUT) :: low + INTEGER(C_INT), INTENT(OUT) :: high END FUNCTION h5pget_libver_bounds END INTERFACE @@ -3252,15 +3252,15 @@ SUBROUTINE h5pset_libver_bounds_f(fapl_id, low, high, hdferr) INTERFACE INTEGER(C_INT) FUNCTION h5pset_libver_bounds(fapl_id, low, high) & BIND(C,NAME='H5Pset_libver_bounds') - IMPORT :: C_INT, HID_T, ENUM_T + IMPORT :: C_INT, HID_T IMPLICIT NONE INTEGER(HID_T), INTENT(IN), VALUE :: fapl_id - INTEGER(ENUM_T), INTENT(IN), VALUE :: low - INTEGER(ENUM_T), INTENT(IN), VALUE :: high + INTEGER(C_INT), INTENT(IN), VALUE :: low + INTEGER(C_INT), INTENT(IN), VALUE :: high END FUNCTION h5pset_libver_bounds END INTERFACE - hdferr_c = h5pset_libver_bounds(fapl_id, INT(low, ENUM_T), INT(high, ENUM_T)) + hdferr_c = h5pset_libver_bounds(fapl_id, INT(low, C_INT), INT(high, C_INT)) hdferr = 0 IF(hdferr_c.LT.0) hdferr = -1 @@ -5523,14 +5523,14 @@ SUBROUTINE h5pset_virtual_view_f(dapl_id, view, hdferr) INTERFACE INTEGER FUNCTION h5pset_virtual_view(dapl_id, view) BIND(C,NAME='H5Pset_virtual_view') - IMPORT :: HID_T, ENUM_T + IMPORT :: C_INT, HID_T IMPLICIT NONE INTEGER(HID_T), INTENT(IN), VALUE :: dapl_id - INTEGER(ENUM_T), INTENT(IN), VALUE :: view + INTEGER(C_INT), INTENT(IN), VALUE :: view END FUNCTION h5pset_virtual_view END INTERFACE - hdferr = INT( h5pset_virtual_view(dapl_id, INT(view,ENUM_T)) ) + hdferr = INT( h5pset_virtual_view(dapl_id, INT(view,C_INT)) ) END SUBROUTINE h5pset_virtual_view_f @@ -5553,13 +5553,13 @@ SUBROUTINE h5pget_virtual_view_f(dapl_id, view, hdferr) INTEGER(HID_T), INTENT(IN) :: dapl_id INTEGER , INTENT(INOUT) :: view INTEGER , INTENT(OUT) :: hdferr - INTEGER(ENUM_T) :: view_enum + INTEGER(C_INT) :: view_enum INTERFACE INTEGER FUNCTION h5pget_virtual_view(dapl_id, view) BIND(C,NAME='H5Pget_virtual_view') - IMPORT :: HID_T, ENUM_T + IMPORT :: C_INT, HID_T IMPLICIT NONE INTEGER(HID_T), INTENT(IN), VALUE :: dapl_id - INTEGER(ENUM_T), INTENT(OUT) :: view + INTEGER(C_INT), INTENT(OUT) :: view END FUNCTION h5pget_virtual_view END INTERFACE @@ -5636,8 +5636,10 @@ END SUBROUTINE h5pget_virtual_printf_gap_f !! !! \brief Sets the mapping between virtual and source datasets. !! -!! \param dcpl_id The identifier of the dataset creation property list that will be used when creating the virtual dataset. -!! \param vspace_id The dataspace identifier with the selection within the virtual dataset applied, possibly an unlimited selection. +!! \param dcpl_id The identifier of the dataset creation property list that will be used when creating the +!! virtual dataset. +!! \param vspace_id The dataspace identifier with the selection within the virtual dataset applied, possibly an +!! unlimited selection. !! \param src_file_name The name of the HDF5 file where the source dataset is located. !! \param src_dset_name The path to the HDF5 dataset in the file specified by src_file_name. !! \param src_space_id The source dataset’s dataspace identifier with a selection applied, possibly an unlimited selection. @@ -6051,7 +6053,8 @@ SUBROUTINE h5pget_file_locking_f(fapl_id, use_file_locking, ignore_disabled_lock LOGICAL(C_BOOL) :: c_ignore_flag INTERFACE - INTEGER FUNCTION h5pget_file_locking(fapl_id, use_file_locking, ignore_disabled_locks) BIND(C, NAME='H5Pget_file_locking') + INTEGER FUNCTION h5pget_file_locking(fapl_id, use_file_locking, ignore_disabled_locks) & + BIND(C, NAME='H5Pget_file_locking') IMPORT :: HID_T, C_BOOL IMPLICIT NONE INTEGER(HID_T), INTENT(IN), VALUE :: fapl_id @@ -6090,7 +6093,8 @@ SUBROUTINE h5pset_file_locking_f(fapl_id, use_file_locking, ignore_disabled_lock LOGICAL(C_BOOL) :: c_ignore_flag INTERFACE - INTEGER FUNCTION h5pset_file_locking(fapl_id, use_file_locking, ignore_disabled_locks) BIND(C, NAME='H5Pset_file_locking') + INTEGER FUNCTION h5pset_file_locking(fapl_id, use_file_locking, ignore_disabled_locks) & + BIND(C, NAME='H5Pset_file_locking') IMPORT :: HID_T, C_BOOL IMPLICIT NONE INTEGER(HID_T), INTENT(IN), VALUE :: fapl_id diff --git a/fortran/src/H5Rff.F90 b/fortran/src/H5Rff.F90 index 3aed34a4551..1b2cd8aea09 100644 --- a/fortran/src/H5Rff.F90 +++ b/fortran/src/H5Rff.F90 @@ -497,7 +497,7 @@ SUBROUTINE h5rget_name_object_f(loc_id, ref, name, hdferr, size) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: loc_id TYPE(hobj_ref_t_f), INTENT(IN), TARGET :: ref - INTEGER(SIZE_T), OPTIONAL, INTENT(OUT) :: size + INTEGER(SIZE_T), INTENT(OUT), OPTIONAL :: size CHARACTER(LEN=*), INTENT(INOUT) :: name INTEGER, INTENT(OUT) :: hdferr @@ -533,7 +533,7 @@ SUBROUTINE h5rget_name_region_f(loc_id, ref, name, hdferr, size) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: loc_id TYPE(hdset_reg_ref_t_f), INTENT(IN), TARGET :: ref - INTEGER(SIZE_T), OPTIONAL, INTENT(OUT) :: size + INTEGER(SIZE_T), INTENT(OUT), OPTIONAL :: size CHARACTER(LEN=*), INTENT(INOUT) :: name INTEGER, INTENT(OUT) :: hdferr INTEGER(SIZE_T) :: size_default @@ -580,7 +580,7 @@ SUBROUTINE h5rget_name_ptr_f(& TYPE(C_PTR), INTENT(IN) :: ref CHARACTER(LEN=*), INTENT(INOUT) :: name INTEGER, INTENT(OUT) :: hdferr - INTEGER(SIZE_T), OPTIONAL, INTENT(OUT) :: size + INTEGER(SIZE_T), INTENT(OUT), OPTIONAL :: size INTEGER(SIZE_T) :: size_default INTEGER(SIZE_T) :: name_len @@ -601,21 +601,20 @@ END SUBROUTINE h5rget_name_ptr_f !! !! \brief Retrieves the type of object that an object reference points to. !! -!! loc_id - Identifier for the dataset containing the reference or for the group that dataset is in. -!! ref_type - Type of reference to query. -!! ref - Reference to query. -!! obj_type - Type of referenced object: -!! \li H5G_UNKNOWN_F -!! \li H H5G_GROUP_F -!! \li H H5G_DATASET_F -!! \li H H5G_TYPE_F -!! hdferr - \fortran_error +!! \param loc_id Identifier for the dataset containing the reference or for the group that dataset is in. +!! \param ref_type Type of reference to query. +!! \param ref Reference to query. +!! \param obj_type Type of referenced object: +!! \li H5G_UNKNOWN_F +!! \li H5G_GROUP_F +!! \li H5G_DATASET_F +!! \li H5G_TYPE_F +!! \param hdferr \fortran_error !! !! See C API: @ref H5Rget_obj_type3() !! SUBROUTINE h5rget_obj_type_f(loc_id, ref_type, ref, obj_type, hdferr) - USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: loc_id INTEGER, INTENT(IN) :: ref_type diff --git a/fortran/src/H5Sff.F90 b/fortran/src/H5Sff.F90 index 9a2f89a636c..72627d96458 100644 --- a/fortran/src/H5Sff.F90 +++ b/fortran/src/H5Sff.F90 @@ -58,10 +58,10 @@ SUBROUTINE h5screate_simple_f(rank, dims, space_id, hdferr, maxdims) IMPLICIT NONE INTEGER, INTENT(IN) :: rank - INTEGER(HSIZE_T), INTENT(IN) :: dims(rank) + INTEGER(HSIZE_T), INTENT(IN), DIMENSION(1:rank) :: dims INTEGER(HID_T), INTENT(OUT) :: space_id INTEGER, INTENT(OUT) :: hdferr - INTEGER(HSIZE_T), OPTIONAL, INTENT(IN) :: maxdims(rank) + INTEGER(HSIZE_T), INTENT(IN), OPTIONAL, DIMENSION(1:rank) :: maxdims INTEGER(HSIZE_T), ALLOCATABLE, DIMENSION(:) :: f_maxdims INTERFACE @@ -81,9 +81,9 @@ END FUNCTION h5screate_simple_c RETURN ENDIF IF (PRESENT(maxdims)) THEN - f_maxdims = maxdims + f_maxdims(1:rank) = maxdims(1:rank) ELSE - f_maxdims = dims + f_maxdims(1:rank) = dims(1:rank) ENDIF hdferr = h5screate_simple_c(rank, dims, f_maxdims, space_id) DEALLOCATE(f_maxdims) @@ -823,8 +823,8 @@ SUBROUTINE h5sselect_hyperslab_f(space_id, OPERATOR, start, count, & INTEGER(HSIZE_T), DIMENSION(*), INTENT(IN) :: start INTEGER(HSIZE_T), DIMENSION(*), INTENT(IN) :: count INTEGER, INTENT(OUT) :: hdferr - INTEGER(HSIZE_T), DIMENSION(:), OPTIONAL, INTENT(IN) :: stride - INTEGER(HSIZE_T), DIMENSION(:), OPTIONAL, INTENT(IN) :: BLOCK + INTEGER(HSIZE_T), DIMENSION(:), INTENT(IN), OPTIONAL :: stride + INTEGER(HSIZE_T), DIMENSION(:), INTENT(IN), OPTIONAL :: BLOCK INTEGER(HSIZE_T), DIMENSION(:), ALLOCATABLE :: def_block INTEGER(HSIZE_T), DIMENSION(:), ALLOCATABLE :: def_stride INTEGER :: rank @@ -1225,7 +1225,7 @@ SUBROUTINE h5sencode_f(obj_id, buf, nalloc, hdferr, fapl_id) CHARACTER(LEN=*), INTENT(OUT) :: buf INTEGER(SIZE_T), INTENT(INOUT) :: nalloc INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: fapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: fapl_id INTEGER(HID_T) :: fapl_id_default INTERFACE diff --git a/fortran/src/H5Tff.F90 b/fortran/src/H5Tff.F90 index 006aa799886..84b96541947 100644 --- a/fortran/src/H5Tff.F90 +++ b/fortran/src/H5Tff.F90 @@ -80,7 +80,7 @@ SUBROUTINE h5topen_f(loc_id, name, type_id, hdferr, tapl_id) CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(HID_T), INTENT(OUT) :: type_id INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: tapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: tapl_id INTEGER :: namelen ! Name length INTEGER(HID_T) :: tapl_id_default @@ -126,9 +126,9 @@ SUBROUTINE h5tcommit_f(loc_id, name, type_id, hdferr, & CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(HID_T), INTENT(IN) :: type_id INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: lcpl_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: tcpl_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: tapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: lcpl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: tcpl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: tapl_id INTEGER :: namelen ! Name length @@ -1793,8 +1793,8 @@ SUBROUTINE h5tcommit_anon_f(loc_id, dtype_id, hdferr, tcpl_id, tapl_id) INTEGER(HID_T), INTENT(IN) :: loc_id INTEGER(HID_T), INTENT(IN) :: dtype_id INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: tcpl_id - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: tapl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: tcpl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: tapl_id INTEGER(HID_T) :: tcpl_id_default INTEGER(HID_T) :: tapl_id_default diff --git a/fortran/src/H5VLff.F90 b/fortran/src/H5VLff.F90 index 11ac349e3bd..3b451d006ed 100644 --- a/fortran/src/H5VLff.F90 +++ b/fortran/src/H5VLff.F90 @@ -64,7 +64,8 @@ SUBROUTINE H5VLregister_connector_by_name_f(name, vol_id, hdferr, vipl_id) CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(HID_T), INTENT(OUT) :: vol_id INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: vipl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: vipl_id + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name INTEGER(HID_T) :: vipl_id_default @@ -73,8 +74,8 @@ INTEGER(HID_T) FUNCTION H5VLregister_connector_by_name(name, vipl_id) & BIND(C,NAME='H5VLregister_connector_by_name') IMPORT :: C_CHAR IMPORT :: HID_T - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: name - INTEGER(HID_T), INTENT(IN), VALUE :: vipl_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + INTEGER(HID_T), VALUE :: vipl_id END FUNCTION H5VLregister_connector_by_name END INTERFACE @@ -105,7 +106,7 @@ SUBROUTINE H5VLregister_connector_by_value_f(connector_value, vol_id, hdferr, vi INTEGER, INTENT(IN) :: connector_value INTEGER(HID_T), INTENT(OUT) :: vol_id INTEGER, INTENT(OUT) :: hdferr - INTEGER(HID_T), OPTIONAL, INTENT(IN) :: vipl_id + INTEGER(HID_T), INTENT(IN), OPTIONAL :: vipl_id INTEGER(HID_T) :: vipl_id_default INTERFACE @@ -114,7 +115,7 @@ INTEGER(HID_T) FUNCTION H5VLregister_connector_by_value(connector_value, vipl_id IMPORT :: HID_T IMPORT :: C_INT INTEGER(C_INT), VALUE :: connector_value - INTEGER(HID_T), INTENT(IN), VALUE :: vipl_id + INTEGER(HID_T), VALUE :: vipl_id END FUNCTION H5VLregister_connector_by_value END INTERFACE @@ -151,7 +152,7 @@ SUBROUTINE H5VLis_connector_registered_by_name_f(name, registered, hdferr) INTEGER(C_INT) FUNCTION H5VLis_connector_registered_by_name(name) BIND(C,NAME='H5VLis_connector_registered_by_name') IMPORT :: C_CHAR IMPORT :: C_INT - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: name + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name END FUNCTION H5VLis_connector_registered_by_name END INTERFACE @@ -219,7 +220,7 @@ SUBROUTINE H5VLget_connector_id_f(obj_id, vol_id, hdferr) INTERFACE INTEGER(HID_T) FUNCTION H5VLget_connector_id(obj_id) BIND(C,NAME='H5VLget_connector_id') IMPORT :: HID_T - INTEGER(HID_T), INTENT(IN) :: obj_id + INTEGER(HID_T), VALUE :: obj_id END FUNCTION H5VLget_connector_id END INTERFACE @@ -254,7 +255,7 @@ SUBROUTINE H5VLget_connector_id_by_name_f(name, vol_id, hdferr) INTEGER(HID_T) FUNCTION H5VLget_connector_id_by_name(name) BIND(C,NAME='H5VLget_connector_id_by_name') IMPORT :: C_CHAR IMPORT :: HID_T - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: name + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name END FUNCTION H5VLget_connector_id_by_name END INTERFACE @@ -327,9 +328,9 @@ SUBROUTINE H5VLget_connector_name_f(obj_id, name, hdferr, name_len) INTEGER(SIZE_T) FUNCTION H5VLget_connector_name(obj_id, name, size) BIND(C,NAME='H5VLget_connector_name') IMPORT :: HID_T, SIZE_T, C_PTR, C_CHAR IMPLICIT NONE - INTEGER(HID_T) , INTENT(IN), VALUE :: obj_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(OUT) :: name - INTEGER(SIZE_T), INTENT(IN), VALUE :: size + INTEGER(HID_T) , VALUE :: obj_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + INTEGER(SIZE_T), VALUE :: size END FUNCTION H5VLget_connector_name END INTERFACE @@ -367,7 +368,7 @@ SUBROUTINE H5VLclose_f(vol_id, hdferr) INTERFACE INTEGER FUNCTION H5VLclose(vol_id) BIND(C, NAME='H5VLclose') IMPORT :: HID_T - INTEGER(HID_T), INTENT(IN), VALUE :: vol_id + INTEGER(HID_T), VALUE :: vol_id END FUNCTION H5VLclose END INTERFACE @@ -393,7 +394,7 @@ SUBROUTINE H5VLunregister_connector_f(plugin_id, hdferr) INTERFACE INTEGER FUNCTION H5VLunregister_connector(plugin_id) BIND(C, NAME='H5VLunregister_connector') IMPORT :: HID_T - INTEGER(HID_T), INTENT(IN), VALUE :: plugin_id + INTEGER(HID_T), VALUE :: plugin_id END FUNCTION H5VLunregister_connector END INTERFACE diff --git a/fortran/src/H5_buildiface.F90 b/fortran/src/H5_buildiface.F90 index c473e515790..4572b4c5ae7 100644 --- a/fortran/src/H5_buildiface.F90 +++ b/fortran/src/H5_buildiface.F90 @@ -717,9 +717,9 @@ PROGRAM H5_buildiface WRITE(11,'(A)') ' INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims' WRITE(11,'(A)') ' REAL(KIND='//TRIM(ADJUSTL(chr2))//'),INTENT(INOUT)'//TRIM(rank_dim_line(j))//', TARGET :: buf' WRITE(11,'(A)') ' INTEGER, INTENT(OUT) :: hdferr' - WRITE(11,'(A)') ' INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id' - WRITE(11,'(A)') ' INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id' - WRITE(11,'(A)') ' INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp' + WRITE(11,'(A)') ' INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id' + WRITE(11,'(A)') ' INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id' + WRITE(11,'(A)') ' INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp' WRITE(11,'(A)') ' INTEGER(HID_T) :: xfer_prp_default' WRITE(11,'(A)') ' INTEGER(HID_T) :: mem_space_id_default' WRITE(11,'(A)') ' INTEGER(HID_T) :: file_space_id_default' @@ -756,9 +756,9 @@ PROGRAM H5_buildiface WRITE(11,'(A)') ' INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims' WRITE(11,'(A)') ' INTEGER(KIND='//TRIM(ADJUSTL(chr2))//'),INTENT(INOUT)'//TRIM(rank_dim_line(j))//', TARGET :: buf' WRITE(11,'(A)') ' INTEGER, INTENT(OUT) :: hdferr' - WRITE(11,'(A)') ' INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id' - WRITE(11,'(A)') ' INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id' - WRITE(11,'(A)') ' INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp' + WRITE(11,'(A)') ' INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id' + WRITE(11,'(A)') ' INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id' + WRITE(11,'(A)') ' INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp' WRITE(11,'(A)') ' INTEGER(HID_T) :: xfer_prp_default' WRITE(11,'(A)') ' INTEGER(HID_T) :: mem_space_id_default' WRITE(11,'(A)') ' INTEGER(HID_T) :: file_space_id_default' @@ -790,9 +790,9 @@ PROGRAM H5_buildiface WRITE(11,'(A)') ' INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims' WRITE(11,'(A)') ' CHARACTER(LEN=*), INTENT(INOUT)'//TRIM(rank_dim_line(j))//', TARGET :: buf' WRITE(11,'(A)') ' INTEGER, INTENT(OUT) :: hdferr' - WRITE(11,'(A)') ' INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id' - WRITE(11,'(A)') ' INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id' - WRITE(11,'(A)') ' INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp' + WRITE(11,'(A)') ' INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id' + WRITE(11,'(A)') ' INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id' + WRITE(11,'(A)') ' INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp' WRITE(11,'(A)') ' INTEGER(HID_T) :: xfer_prp_default' WRITE(11,'(A)') ' INTEGER(HID_T) :: mem_space_id_default' WRITE(11,'(A)') ' INTEGER(HID_T) :: file_space_id_default' @@ -829,9 +829,9 @@ PROGRAM H5_buildiface WRITE(11,'(A)') ' INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims' WRITE(11,'(A)') ' REAL(KIND='//TRIM(ADJUSTL(chr2))//'),INTENT(IN)'//TRIM(rank_dim_line(j))//', TARGET :: buf' WRITE(11,'(A)') ' INTEGER, INTENT(OUT) :: hdferr' - WRITE(11,'(A)') ' INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id' - WRITE(11,'(A)') ' INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id' - WRITE(11,'(A)') ' INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp' + WRITE(11,'(A)') ' INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id' + WRITE(11,'(A)') ' INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id' + WRITE(11,'(A)') ' INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp' WRITE(11,'(A)') ' INTEGER(HID_T) :: xfer_prp_default' WRITE(11,'(A)') ' INTEGER(HID_T) :: mem_space_id_default' WRITE(11,'(A)') ' INTEGER(HID_T) :: file_space_id_default' @@ -867,9 +867,9 @@ PROGRAM H5_buildiface WRITE(11,'(A)') ' INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims' WRITE(11,'(A)') ' INTEGER(KIND='//TRIM(ADJUSTL(chr2))//'),INTENT(IN)'//TRIM(rank_dim_line(j))//', TARGET :: buf' WRITE(11,'(A)') ' INTEGER, INTENT(OUT) :: hdferr' - WRITE(11,'(A)') ' INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id' - WRITE(11,'(A)') ' INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id' - WRITE(11,'(A)') ' INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp' + WRITE(11,'(A)') ' INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id' + WRITE(11,'(A)') ' INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id' + WRITE(11,'(A)') ' INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp' WRITE(11,'(A)') ' INTEGER(HID_T) :: xfer_prp_default' WRITE(11,'(A)') ' INTEGER(HID_T) :: mem_space_id_default' WRITE(11,'(A)') ' INTEGER(HID_T) :: file_space_id_default' @@ -900,9 +900,9 @@ PROGRAM H5_buildiface WRITE(11,'(A)') ' INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims' WRITE(11,'(A)') ' CHARACTER(LEN=*),INTENT(IN)'//TRIM(rank_dim_line(j))//', TARGET :: buf' WRITE(11,'(A)') ' INTEGER, INTENT(OUT) :: hdferr' - WRITE(11,'(A)') ' INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id' - WRITE(11,'(A)') ' INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id' - WRITE(11,'(A)') ' INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp' + WRITE(11,'(A)') ' INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id' + WRITE(11,'(A)') ' INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id' + WRITE(11,'(A)') ' INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp' WRITE(11,'(A)') ' INTEGER(HID_T) :: xfer_prp_default' WRITE(11,'(A)') ' INTEGER(HID_T) :: mem_space_id_default' WRITE(11,'(A)') ' INTEGER(HID_T) :: file_space_id_default' diff --git a/fortran/src/H5_f.c b/fortran/src/H5_f.c index 90ca7d6f8b7..3e1b65da3c7 100644 --- a/fortran/src/H5_f.c +++ b/fortran/src/H5_f.c @@ -386,6 +386,7 @@ h5close_types_c(hid_t_f *types, int_f *lentypes, hid_t_f *floatingtypes, int_f * * h5d_size_flags - H5D interface flags of type size_t * h5e_flags - H5E interface flags * h5e_hid_flags - H5E interface flags of type hid_t + * h5es_flags - H5ES interface flags * h5f_flags - H5F interface flags * h5fd_flags - H5FD interface flags * h5fd_hid_flags - H5FD interface flags of type hid_t @@ -424,10 +425,11 @@ h5close_types_c(hid_t_f *types, int_f *lentypes, hid_t_f *floatingtypes, int_f * */ int_f h5init_flags_c(int_f *h5d_flags, size_t_f *h5d_size_flags, int_f *h5e_flags, hid_t_f *h5e_hid_flags, - int_f *h5f_flags, int_f *h5fd_flags, hid_t_f *h5fd_hid_flags, int_f *h5g_flags, - int_f *h5i_flags, int_f *h5l_flags, int_f *h5o_flags, hid_t_f *h5p_flags, int_f *h5p_flags_int, - int_f *h5r_flags, int_f *h5s_flags, hid_t_f *h5s_hid_flags, hsize_t_f *h5s_hsize_flags, - int_f *h5t_flags, int_f *h5z_flags, int_f *h5_generic_flags, haddr_t_f *h5_haddr_generic_flags) + H5ES_status_t *h5es_flags, hid_t_f *h5es_hid_flags, int_f *h5f_flags, int_f *h5fd_flags, + hid_t_f *h5fd_hid_flags, int_f *h5g_flags, int_f *h5i_flags, int_f *h5l_flags, + int_f *h5o_flags, hid_t_f *h5p_flags, int_f *h5p_flags_int, int_f *h5r_flags, int_f *h5s_flags, + hid_t_f *h5s_hid_flags, hsize_t_f *h5s_hsize_flags, int_f *h5t_flags, int_f *h5z_flags, + int_f *h5_generic_flags, haddr_t_f *h5_haddr_generic_flags) /******/ { /* @@ -475,6 +477,15 @@ h5init_flags_c(int_f *h5d_flags, size_t_f *h5d_size_flags, int_f *h5e_flags, hid h5e_flags[1] = (int_f)H5E_MINOR; h5e_flags[2] = (int_f)H5E_WALK_UPWARD; h5e_flags[3] = (int_f)H5E_WALK_DOWNWARD; + /* + * H5ES flags + */ + h5es_hid_flags[0] = (hid_t_f)H5ES_NONE; + + h5es_flags[0] = H5ES_STATUS_IN_PROGRESS; + h5es_flags[1] = H5ES_STATUS_SUCCEED; + h5es_flags[2] = H5ES_STATUS_CANCELED; + h5es_flags[3] = H5ES_STATUS_FAIL; /* * H5F flags diff --git a/fortran/src/H5_ff.F90 b/fortran/src/H5_ff.F90 index 31e0d28ab26..f952cac6b70 100644 --- a/fortran/src/H5_ff.F90 +++ b/fortran/src/H5_ff.F90 @@ -89,6 +89,13 @@ MODULE H5LIB INTEGER, PARAMETER :: H5E_HID_FLAGS_LEN = 1 INTEGER(HID_T), DIMENSION(1:H5E_HID_FLAGS_LEN) :: H5E_hid_flags ! + ! H5ES flags declaration + ! + INTEGER, PARAMETER :: H5ES_FLAGS_LEN = 4 + INTEGER, DIMENSION(1:H5ES_FLAGS_LEN) :: H5ES_flags + INTEGER, PARAMETER :: H5ES_HID_FLAGS_LEN = 1 + INTEGER(HID_T), DIMENSION(1:H5ES_HID_FLAGS_LEN) :: H5ES_hid_flags + ! ! H5FD flags declaration ! INTEGER, PARAMETER :: H5FD_FLAGS_LEN = 22 @@ -186,6 +193,8 @@ INTEGER FUNCTION h5init_flags_c(i_H5D_flags, & i_H5D_size_flags,& i_H5E_flags, & i_H5E_hid_flags, & + i_H5ES_flags, & + i_H5ES_hid_flags, & i_H5F_flags, & i_H5FD_flags, & i_H5FD_hid_flags, & @@ -207,6 +216,7 @@ INTEGER FUNCTION h5init_flags_c(i_H5D_flags, & IMPORT :: HID_T, SIZE_T, HSIZE_T, HADDR_T IMPORT :: H5D_FLAGS_LEN, H5D_SIZE_FLAGS_LEN, & H5E_FLAGS_LEN, H5E_HID_FLAGS_LEN, & + H5ES_FLAGS_LEN, H5ES_HID_FLAGS_LEN, & H5F_FLAGS_LEN, H5G_FLAGS_LEN, H5FD_FLAGS_LEN, & H5FD_HID_FLAGS_LEN, H5I_FLAGS_LEN, H5L_FLAGS_LEN, & H5O_FLAGS_LEN, H5P_FLAGS_LEN, H5P_FLAGS_INT_LEN, & @@ -217,6 +227,8 @@ INTEGER FUNCTION h5init_flags_c(i_H5D_flags, & INTEGER(SIZE_T) , DIMENSION(1:H5D_SIZE_FLAGS_LEN) :: i_H5D_size_flags INTEGER , DIMENSION(1:H5E_FLAGS_LEN) :: i_H5E_flags INTEGER(HID_T) , DIMENSION(1:H5E_HID_FLAGS_LEN) :: i_H5E_hid_flags + INTEGER , DIMENSION(1:H5ES_FLAGS_LEN) :: i_H5ES_flags + INTEGER(HID_T) , DIMENSION(1:H5ES_HID_FLAGS_LEN) :: i_H5ES_hid_flags INTEGER , DIMENSION(1:H5F_FLAGS_LEN) :: i_H5F_flags INTEGER , DIMENSION(1:H5G_FLAGS_LEN) :: i_H5G_flags INTEGER , DIMENSION(1:H5FD_FLAGS_LEN) :: i_H5FD_flags @@ -244,7 +256,7 @@ INTEGER FUNCTION h5init1_flags_c( i_H5LIB_flags ) & END FUNCTION h5init1_flags_c END INTERFACE - + error = 0 ! Check if H5open_f has already been called. If so, skip doing it again. IF(H5OPEN_NUM_OBJ .NE. 0) RETURN @@ -303,6 +315,8 @@ END FUNCTION h5init1_flags_c H5D_size_flags, & H5E_flags, & H5E_hid_flags, & + H5ES_flags, & + H5ES_hid_flags, & H5F_flags, & H5FD_flags, & H5FD_hid_flags, & @@ -421,6 +435,19 @@ END FUNCTION h5init1_flags_c H5E_MINOR_F = H5E_flags(2) H5E_WALK_UPWARD_F = H5E_flags(3) H5E_WALK_DOWNWARD_F = H5E_flags(4) + ! + ! H5ES flags + ! + H5ES_NONE_F = H5ES_hid_flags(1) + + H5ES_STATUS_IN_PROGRESS_F = INT(H5ES_flags(1)) + H5ES_STATUS_SUCCEED_F = INT(H5ES_flags(2)) + H5ES_STATUS_CANCELED_F = INT(H5ES_flags(3)) + H5ES_STATUS_FAIL_F = INT(H5ES_flags(4)) + + H5ES_WAIT_FOREVER_F = HUGE(0_C_INT64_T) + H5ES_WAIT_NONE_F = 0_C_INT64_T + ! ! H5FD flags ! diff --git a/fortran/src/H5config_f.inc.cmake b/fortran/src/H5config_f.inc.cmake index 46dfb690aa6..34fb091c787 100644 --- a/fortran/src/H5config_f.inc.cmake +++ b/fortran/src/H5config_f.inc.cmake @@ -76,3 +76,9 @@ ! Fortran compiler id #define H5_Fortran_COMPILER_ID @CMAKE_Fortran_COMPILER_ID@ + +! Define if deprecated public API symbols are disabled +#cmakedefine01 H5_NO_DEPRECATED_SYMBOLS +#if H5_NO_DEPRECATED_SYMBOLS == 0 +#undef H5_NO_DEPRECATED_SYMBOLS +#endif \ No newline at end of file diff --git a/fortran/src/H5config_f.inc.in b/fortran/src/H5config_f.inc.in index 0ce33eccd0e..7fb76e12449 100644 --- a/fortran/src/H5config_f.inc.in +++ b/fortran/src/H5config_f.inc.in @@ -74,3 +74,6 @@ ! Fortran compiler name #undef Fortran_COMPILER_ID +! Define if deprecated public API symbols are disabled +#undef NO_DEPRECATED_SYMBOLS + diff --git a/fortran/src/H5f90global.F90 b/fortran/src/H5f90global.F90 index 30bab33cd9b..984cae91467 100644 --- a/fortran/src/H5f90global.F90 +++ b/fortran/src/H5f90global.F90 @@ -28,15 +28,8 @@ MODULE H5GLOBAL IMPLICIT NONE - ! Enumerate data type that is interoperable with C. - ENUM, BIND(C) - ENUMERATOR :: enum_dtype - END ENUM - !> \addtogroup FH5 !> @{ - INTEGER, PARAMETER :: ENUM_T = KIND(enum_dtype) !< Enumerate data type that is interoperable with C. - ! Parameters used in the function 'h5kind_to_type' located in H5_ff.F90. ! The flag is used to tell the function whether the kind input variable ! is for a REAL or INTEGER data type. @@ -382,10 +375,10 @@ MODULE H5GLOBAL INTEGER :: H5D_VDS_FIRST_MISSING_F !< H5D_VDS_FIRST_MISSING INTEGER :: H5D_VDS_LAST_AVAILABLE_F !< H5D_VDS_LAST_AVAILABLE INTEGER :: H5D_VIRTUAL_F !< H5D_VIRTUAL -!> @} ! ! H5E flags declaration ! +!> @} !DEC$if defined(BUILD_HDF5_DLL) !DEC$ATTRIBUTES DLLEXPORT :: H5E_DEFAULT_F !DEC$ATTRIBUTES DLLEXPORT :: H5E_MAJOR_F @@ -400,6 +393,28 @@ MODULE H5GLOBAL INTEGER :: H5E_MINOR_F !< H5E_MINOR INTEGER :: H5E_WALK_UPWARD_F !< H5E_WALK_UPWARD INTEGER :: H5E_WALK_DOWNWARD_F !< H5E_WALK_DOWNWARD +!> @} + ! + ! H5ES flags declaration + ! + !DEC$if defined(BUILD_HDF5_DLL) + !DEC$ATTRIBUTES DLLEXPORT :: H5ES_NONE_F + !DEC$ATTRIBUTES DLLEXPORT :: H5ES_STATUS_IN_PROGRESS_F + !DEC$ATTRIBUTES DLLEXPORT :: H5ES_STATUS_SUCCEED_F + !DEC$ATTRIBUTES DLLEXPORT :: H5ES_STATUS_CANCELED_F + !DEC$ATTRIBUTES DLLEXPORT :: H5ES_STATUS_FAIL_F + !DEC$ATTRIBUTES DLLEXPORT :: H5ES_WAIT_FOREVER_F + !DEC$ATTRIBUTES DLLEXPORT :: H5ES_WAIT_NONE_F + !DEC$endif +!> \addtogroup FH5ES +!> @{ + INTEGER(HID_T) :: H5ES_NONE_F !< H5ES_NONE + INTEGER :: H5ES_STATUS_IN_PROGRESS_F !< H5ES_STATUS_IN_PROGRESS + INTEGER :: H5ES_STATUS_SUCCEED_F !< H5ES_STATUS_SUCCEED + INTEGER :: H5ES_STATUS_CANCELED_F !< H5ES_STATUS_CANCELED + INTEGER :: H5ES_STATUS_FAIL_F !< H5ES_STATUS_FAIL + INTEGER(C_INT64_T) :: H5ES_WAIT_FOREVER_F !< H5ES_WAIT_FOREVER + INTEGER(C_INT64_T) :: H5ES_WAIT_NONE_F !< H5ES_WAIT_NONE !> @} ! ! H5FD file drivers flags declaration diff --git a/fortran/src/H5f90proto.h b/fortran/src/H5f90proto.h index f2a84193b17..11addfa8ce5 100644 --- a/fortran/src/H5f90proto.h +++ b/fortran/src/H5f90proto.h @@ -353,8 +353,8 @@ H5_FCDLL int_f h5oget_info_c(hid_t_f *object_id, H5O_info_t_f *object_info, int_ H5_FCDLL int_f h5oget_info_by_idx_c(hid_t_f *loc_id, _fcd group_name, size_t_f *namelen, int_f *index_field, int_f *order, hsize_t_f *n, hid_t_f *lapl_id, H5O_info_t_f *object_info, int_f *fields); -H5_FCDLL int_f h5oget_info_by_name_c(hid_t_f *loc_id, _fcd name, size_t_f *namelen, hid_t_f *lapl_id, - H5O_info_t_f *object_info, int_f *fields); +H5_FCDLL int_f h5oget_info_by_name_c(hid_t_f *loc_id, char *name, hid_t_f *lapl_id, H5O_info_t_f *object_info, + int_f *fields, hid_t_f *es_id, char *file, char *func, int_f *line); H5_FCDLL int_f h5ocopy_c(hid_t_f *src_loc_id, _fcd src_name, size_t_f *src_name_len, hid_t_f *dst_loc_id, _fcd dst_name, size_t_f *dst_name_len, hid_t_f *ocpypl_id, hid_t_f *lcpl_id); H5_FCDLL int_f h5odecr_refcount_c(hid_t_f *object_id); @@ -580,12 +580,12 @@ H5_FCDLL int_f h5init_types_c(hid_t_f *types, hid_t_f *floatingtypes, hid_t_f *i H5_FCDLL int_f h5close_types_c(hid_t_f *types, int_f *lentypes, hid_t_f *floatingtypes, int_f *floatinglen, hid_t_f *integertypes, int_f *integerlen); H5_FCDLL int_f h5init_flags_c(int_f *h5d_flags, size_t_f *h5d_size_flags, int_f *h5e_flags, - hid_t_f *h5e_hid_flags, int_f *h5f_flags, int_f *h5fd_flags, - hid_t_f *h5fd_hid_flags, int_f *h5g_flags, int_f *h5i_flags, int_f *h5l_flags, - int_f *h5o_flags, hid_t_f *h5p_flags, int_f *h5p_flags_int, int_f *h5r_flags, - int_f *h5s_flags, hid_t_f *h5s_hid_flags, hsize_t_f *h5s_hsize_flags, - int_f *h5t_flags, int_f *h5z_flags, int_f *h5_generic_flags, - haddr_t_f *h5_haddr_generic_flags); + hid_t_f *h5e_hid_flags, H5ES_status_t *h5es_flags, hid_t_f *h5es_hid_flags, + int_f *h5f_flags, int_f *h5fd_flags, hid_t_f *h5fd_hid_flags, int_f *h5g_flags, + int_f *h5i_flags, int_f *h5l_flags, int_f *h5o_flags, hid_t_f *h5p_flags, + int_f *h5p_flags_int, int_f *h5r_flags, int_f *h5s_flags, + hid_t_f *h5s_hid_flags, hsize_t_f *h5s_hsize_flags, int_f *h5t_flags, + int_f *h5z_flags, int_f *h5_generic_flags, haddr_t_f *h5_haddr_generic_flags); H5_FCDLL int_f h5init1_flags_c(int_f *h5lib_flags); H5_FCDLL int_f h5get_libversion_c(int_f *majnum, int_f *minnum, int_f *relnum); H5_FCDLL int_f h5check_version_c(int_f *majnum, int_f *minnum, int_f *relnum); diff --git a/fortran/src/HDF5.F90 b/fortran/src/HDF5.F90 index 9fe6e192317..faedc40a05b 100644 --- a/fortran/src/HDF5.F90 +++ b/fortran/src/HDF5.F90 @@ -28,6 +28,7 @@ MODULE HDF5 USE H5F USE H5G USE H5E + USE H5ES USE H5I USE H5L USE H5S diff --git a/fortran/src/Makefile.am b/fortran/src/Makefile.am index 7d4154e722f..d42a41d2c9f 100644 --- a/fortran/src/Makefile.am +++ b/fortran/src/Makefile.am @@ -41,7 +41,7 @@ endif # Source files for the library. libhdf5_fortran_la_SOURCES=H5fortran_types.F90 H5f90global.F90 \ - H5_ff.F90 H5Aff.F90 H5Dff.F90 H5Eff.F90 \ + H5_ff.F90 H5Aff.F90 H5Dff.F90 H5Eff.F90 H5ESff.F90 \ H5Fff.F90 H5Gff.F90 H5Iff.F90 H5Lff.F90 H5Off.F90 H5Pff.F90 H5Rff.F90 H5Sff.F90 \ H5Tff.F90 H5VLff.F90 H5Zff.F90 H5_gen.F90 H5fortkit.F90 \ H5f90kit.c H5_f.c H5Af.c H5Df.c H5Ef.c H5Ff.c H5Gf.c \ @@ -143,10 +143,11 @@ H5f90global.lo: $(srcdir)/H5f90global.F90 H5fortran_types.lo H5_buildiface.lo: $(srcdir)/H5_buildiface.F90 H5_ff.lo: $(srcdir)/H5_ff.F90 H5Fff.lo H5f90global.lo H5Aff.lo: $(srcdir)/H5Aff.F90 H5f90global.lo -H5Dff.lo: $(srcdir)/H5Dff.F90 H5f90global.lo H5_ff.lo +H5Dff.lo: $(srcdir)/H5Dff.F90 H5f90global.lo H5_ff.lo H5Sff.lo H5Eff.lo: $(srcdir)/H5Eff.F90 H5f90global.lo +H5ESff.lo: $(srcdir)/H5ESff.F90 H5f90global.lo H5Fff.lo: $(srcdir)/H5Fff.F90 H5f90global.lo -H5Gff.lo: $(srcdir)/H5Gff.F90 H5f90global.lo +H5Gff.lo: $(srcdir)/H5Gff.F90 H5f90global.lo H5Pff.lo H5Iff.lo: $(srcdir)/H5Iff.F90 H5f90global.lo H5Lff.lo: $(srcdir)/H5Lff.F90 H5f90global.lo H5Off.lo: $(srcdir)/H5Off.F90 H5f90global.lo @@ -159,7 +160,7 @@ H5Zff.lo: $(srcdir)/H5Zff.F90 H5f90global.lo H5_gen.lo: H5_gen.F90 H5f90global.lo H5Aff.lo H5Dff.lo H5Pff.lo HDF5.lo: $(srcdir)/HDF5.F90 H5f90global.lo H5_ff.lo H5Aff.lo \ H5Dff.lo \ - H5Eff.lo \ + H5Eff.lo H5ESff.lo \ H5Fff.lo H5Gff.lo H5Iff.lo H5Lff.lo \ H5Off.lo H5Pff.lo H5Rff.lo \ H5Sff.lo H5Tff.lo H5Zff.lo H5_gen.lo diff --git a/fortran/src/hdf5_fortrandll.def.in b/fortran/src/hdf5_fortrandll.def.in index 4fa6f6aadf2..e55be46370b 100644 --- a/fortran/src/hdf5_fortrandll.def.in +++ b/fortran/src/hdf5_fortrandll.def.in @@ -13,8 +13,9 @@ H5LIB_mp_H5GMTIME H5A_mp_H5AWRITE_CHAR_SCALAR H5A_mp_H5AREAD_CHAR_SCALAR H5A_mp_H5ACREATE_F +H5A_mp_H5ACREATE_ASYNC_F H5A_mp_H5AOPEN_NAME_F -H5A_mp_H5AOPEN_IDX_F +@H5_NO_DEPRECATED_SYMBOLS@H5A_mp_H5AOPEN_IDX_F H5A_mp_H5AGET_SPACE_F H5A_mp_H5AGET_TYPE_F H5A_mp_H5AGET_NAME_F @@ -22,27 +23,41 @@ H5A_mp_H5AGET_NAME_BY_IDX_F H5A_mp_H5AGET_NUM_ATTRS_F H5A_mp_H5ADELETE_F H5A_mp_H5ACLOSE_F +H5A_mp_H5ACLOSE_ASYNC_F H5A_mp_H5AGET_STORAGE_SIZE_F H5A_mp_H5AGET_CREATE_PLIST_F H5A_mp_H5ARENAME_BY_NAME_F +H5A_mp_H5ARENAME_BY_NAME_ASYNC_F H5A_mp_H5AOPEN_F +H5A_mp_H5AOPEN_ASYNC_F H5A_mp_H5ADELETE_BY_IDX_F H5A_mp_H5ADELETE_BY_NAME_F H5A_mp_H5AOPEN_BY_IDX_F +H5A_mp_H5AOPEN_BY_IDX_ASYNC_F H5A_mp_H5AGET_INFO_F H5A_mp_H5AGET_INFO_BY_IDX_F H5A_mp_H5AGET_INFO_BY_NAME_F H5A_mp_H5ACREATE_BY_NAME_F +H5A_mp_H5ACREATE_BY_NAME_ASYNC_F H5A_mp_H5AEXISTS_F +H5A_mp_H5AEXISTS_ASYNC_F H5A_mp_H5AEXISTS_BY_NAME_F +H5A_mp_H5AEXISTS_BY_NAME_ASYNC_F H5A_mp_H5AOPEN_BY_NAME_F +H5A_mp_H5AOPEN_BY_NAME_ASYNC_F H5A_mp_H5AWRITE_PTR +H5A_mp_H5AWRITE_ASYNC_F H5A_mp_H5AREAD_PTR +H5A_mp_H5AREAD_ASYNC_F H5A_mp_H5ARENAME_F +H5A_mp_H5ARENAME_ASYNC_F ; H5D H5D_mp_H5DCREATE_F +H5D_mp_H5DCREATE_ASYNC_F H5D_mp_H5DOPEN_F +H5D_mp_H5DOPEN_ASYNC_F H5D_mp_H5DCLOSE_F +H5D_mp_H5DCLOSE_ASYNC_F H5D_mp_H5DWRITE_REFERENCE_OBJ H5D_mp_H5DWRITE_REFERENCE_DSETREG H5D_mp_H5DWRITE_CHAR_SCALAR @@ -50,8 +65,10 @@ H5D_mp_H5DREAD_REFERENCE_OBJ H5D_mp_H5DREAD_REFERENCE_DSETREG H5D_mp_H5DREAD_CHAR_SCALAR H5D_mp_H5DGET_SPACE_F +H5D_mp_H5DGET_SPACE_ASYNC_F H5D_mp_H5DGET_TYPE_F H5D_mp_H5DSET_EXTENT_F +H5D_mp_H5DSET_EXTENT_ASYNC_F H5D_mp_H5DGET_CREATE_PLIST_F H5D_mp_H5DGET_STORAGE_SIZE_F H5D_mp_H5DVLEN_GET_MAX_LEN_F @@ -82,6 +99,8 @@ H5D_mp_H5DREAD_PTR H5D_mp_H5DVLEN_RECLAIM_F H5D_mp_H5DREAD_MULTI_F H5D_mp_H5DWRITE_MULTI_F +H5D_mp_H5DWRITE_ASYNC_F +H5D_mp_H5DREAD_ASYNC_F ; H5E H5E_mp_H5ECLEAR_F H5E_mp_H5EPRINT_F @@ -90,15 +109,20 @@ H5E_mp_H5EGET_MINOR_F H5E_mp_H5ESET_AUTO_F ; H5F H5F_mp_H5FCREATE_F +H5F_mp_H5FCREATE_ASYNC_F H5F_mp_H5FFLUSH_F +H5F_mp_H5FFLUSH_ASYNC_F H5F_mp_H5FCLOSE_F +H5F_mp_H5FCLOSE_ASYNC_F H5F_mp_H5FGET_OBJ_COUNT_F H5F_mp_H5FGET_OBJ_IDS_F H5F_mp_H5FGET_FREESPACE_F H5F_mp_H5FMOUNT_F H5F_mp_H5FUNMOUNT_F H5F_mp_H5FOPEN_F +H5F_mp_H5FOPEN_ASYNC_F H5F_mp_H5FREOPEN_F +H5F_mp_H5FREOPEN_ASYNC_F H5F_mp_H5FGET_CREATE_PLIST_F H5F_mp_H5FGET_ACCESS_PLIST_F H5F_mp_H5FIS_ACCESSIBLE_F @@ -111,8 +135,11 @@ H5F_mp_H5FGET_DSET_NO_ATTRS_HINT_F H5F_mp_H5FSET_DSET_NO_ATTRS_HINT_F ; H5G H5G_mp_H5GOPEN_F +H5G_mp_H5GOPEN_ASYNC_F H5G_mp_H5GCREATE_F +H5G_mp_H5GCREATE_ASYNC_F H5G_mp_H5GCLOSE_F +H5G_mp_H5GCLOSE_ASYNC_F H5G_mp_H5GGET_OBJ_INFO_IDX_F H5G_mp_H5GN_MEMBERS_F H5G_mp_H5GLINK_F @@ -125,9 +152,15 @@ H5G_mp_H5GSET_COMMENT_F H5G_mp_H5GGET_COMMENT_F H5G_mp_H5GCREATE_ANON_F H5G_mp_H5GGET_CREATE_PLIST_F -H5G_mp_H5GGET_INFO_F -H5G_mp_H5GGET_INFO_BY_IDX_F -H5G_mp_H5GGET_INFO_BY_NAME_F +H5G_mp_H5GGET_INFO_F90 +H5G_mp_H5GGET_INFO_BY_IDX_F90 +H5G_mp_H5GGET_INFO_BY_NAME_F90 +H5G_mp_H5GGET_INFO_F03 +H5G_mp_H5GGET_INFO_BY_IDX_F03 +H5G_mp_H5GGET_INFO_BY_NAME_F03 +H5G_mp_H5GGET_INFO_ASYNC_F +H5G_mp_H5GGET_INFO_BY_IDX_ASYNC_F +H5G_mp_H5GGET_INFO_BY_NAME_ASYNC_F H5G_mp_H5GGET_OBJ_INFO_IDX_F ; H5GLOBAL ; PREDEFINED_TYPES DATA @@ -160,21 +193,29 @@ H5I_mp_H5IIS_VALID_F ; H5L H5L_mp_H5LCOPY_F H5L_mp_H5LDELETE_F +H5L_mp_H5LDELETE_ASYNC_F H5L_mp_H5LCREATE_SOFT_F +H5L_mp_H5LCREATE_SOFT_ASYNC_F H5L_mp_H5LCREATE_HARD_F +H5L_mp_H5LCREATE_HARD_ASYNC_F H5L_mp_H5LCREATE_EXTERNAL_F H5L_mp_H5LDELETE_BY_IDX_F +H5L_mp_H5LDELETE_BY_IDX_ASYNC_F H5L_mp_H5LEXISTS_F +H5L_mp_H5LEXISTS_ASYNC_F H5L_mp_H5LGET_INFO_F H5L_mp_H5LGET_INFO_BY_IDX_F H5L_mp_H5LIS_REGISTERED_F H5L_mp_H5LMOVE_F H5L_mp_H5LGET_NAME_BY_IDX_F H5L_mp_H5LITERATE_F +H5L_mp_H5LITERATE_ASYNC_F H5L_mp_H5LITERATE_BY_NAME_F ; H5O H5O_mp_H5OCLOSE_F +H5O_mp_H5OCLOSE_ASYNC_F H5O_mp_H5OCOPY_F +H5O_mp_H5OCOPY_ASYNC_F H5O_mp_H5ODECR_REFCOUNT_F H5O_mp_H5OEXISTS_BY_NAME_F H5O_mp_H5OGET_COMMENT_F @@ -183,11 +224,14 @@ H5O_mp_H5OINCR_REFCOUNT_F H5O_mp_H5OLINK_F H5O_mp_H5OOPEN_BY_TOKEN_F H5O_mp_H5OOPEN_BY_IDX_F +H5O_mp_H5OOPEN_BY_IDX_ASYNC_F H5O_mp_H5OOPEN_F +H5O_mp_H5OOPEN_ASYNC_F H5O_mp_H5OSET_COMMENT_F H5O_mp_H5OSET_COMMENT_BY_NAME_F H5O_mp_H5OGET_INFO_BY_IDX_F H5O_mp_H5OGET_INFO_BY_NAME_F +H5O_mp_H5OGET_INFO_BY_NAME_ASYNC_F H5O_mp_H5OGET_INFO_F H5O_mp_H5OVISIT_BY_NAME_F H5O_mp_H5OVISIT_F diff --git a/fortran/test/Makefile.am b/fortran/test/Makefile.am index 7d85a27d8f5..6ceddd68139 100644 --- a/fortran/test/Makefile.am +++ b/fortran/test/Makefile.am @@ -47,7 +47,7 @@ fortranlib_test_1_8_SOURCES = tH5O.F90 tH5A_1_8.F90 tH5G_1_8.F90 tH5MISC_1_8.F90 fortranlib_test_1_8.F90 fortranlib_test_F03_SOURCES = tH5E_F03.F90 tH5F_F03.F90 tH5L_F03.F90 \ - tH5O_F03.F90 tH5P_F03.F90 tH5T_F03.F90 tHDF5_F03.F90 fortranlib_test_F03.F90 + tH5O_F03.F90 tH5P_F03.F90 tH5T_F03.F90 tHDF5_F03.F90 fortranlib_test_F03.F90 vol_connector_SOURCES=vol_connector.F90 diff --git a/fortran/test/tH5A_1_8.F90 b/fortran/test/tH5A_1_8.F90 index 5344f4bfaa0..d43279e7545 100644 --- a/fortran/test/tH5A_1_8.F90 +++ b/fortran/test/tH5A_1_8.F90 @@ -2614,6 +2614,7 @@ SUBROUTINE test_attr_many(new_format, fcpl, fapl, total_error) WRITE(chr5,'(I5.5)') u attrname = 'attr '//chr5 CALL H5Aexists_f( gid, attrname, exists, error) + CALL check("H5Aexists_f", error, total_error) CALL verify("H5Aexists",exists,.FALSE.,total_error ) CALL H5Aexists_by_name_f(fid, GROUP1_NAME, attrname, exists, error, lapl_id = H5P_DEFAULT_F) @@ -2623,9 +2624,11 @@ SUBROUTINE test_attr_many(new_format, fcpl, fapl, total_error) CALL check("h5acreate_f",error,total_error) CALL H5Aexists_f(gid, attrname, exists, error) + CALL check("H5Aexists_f", error, total_error) CALL verify("H5Aexists",exists,.TRUE.,total_error ) CALL H5Aexists_by_name_f(fid, GROUP1_NAME, attrname, exists, error) + CALL check("H5Aexists_by_name_f", error, total_error) CALL verify("H5Aexists_by_name_f",exists,.TRUE.,total_error ) attr_data1(1) = u @@ -2638,9 +2641,11 @@ SUBROUTINE test_attr_many(new_format, fcpl, fapl, total_error) CALL check("h5aclose_f",error,total_error) CALL H5Aexists_f(gid, attrname, exists, error) + CALL check("H5Aexists_f", error, total_error) CALL verify("H5Aexists",exists,.TRUE.,total_error ) CALL H5Aexists_by_name_f(fid, GROUP1_NAME, attrname, exists, error) + CALL check("H5Aexists_by_name_f", error, total_error) CALL verify("H5Aexists_by_name_f",exists,.TRUE.,total_error ) ENDDO diff --git a/fortran/test/tH5G_1_8.F90 b/fortran/test/tH5G_1_8.F90 index a4b25f28ac1..c820d78c15f 100644 --- a/fortran/test/tH5G_1_8.F90 +++ b/fortran/test/tH5G_1_8.F90 @@ -163,6 +163,7 @@ SUBROUTINE group_info(cleanup, fapl, total_error) ! H5G_STORAGE_TYPE_SYMBOL_TABLE: Symbol tables, the original HDF5 structure INTEGER :: nlinks ! Number of links in group INTEGER :: max_corder ! Current maximum creation order value for group + TYPE(H5G_info_t) :: ginfo INTEGER :: u,v ! Local index variables CHARACTER(LEN=2) :: chr2 @@ -283,29 +284,61 @@ SUBROUTINE group_info(cleanup, fapl, total_error) CALL check("H5Gget_info_f", error, total_error) ! Check (new/empty) group's information - CALL verify("H5Gget_info_f", storage_type, H5G_STORAGE_TYPE_COMPACT_F, total_error) - CALL verify("H5Gget_info_f", max_corder, 0, total_error) - CALL verify("H5Gget_info_f", nlinks, 0, total_error) + CALL verify("H5Gget_info_f.storage_type", storage_type, H5G_STORAGE_TYPE_COMPACT_F, total_error) + CALL verify("H5Gget_info_f.max_corder", max_corder, 0, total_error) + CALL VERIFY("H5Gget_info_f.nlinks", nlinks, 0, total_error) CALL verify("H5Gget_info_f.mounted", mounted,.FALSE.,total_error) + ! Retrieve group's information (F03) + CALL H5Gget_info_f(group_id2, ginfo, error) + CALL check("H5Gget_info_f", error, total_error) + + CALL VERIFY("H5Gget_info_f.storage_type", & + ginfo%storage_type, INT(H5G_STORAGE_TYPE_COMPACT_F,C_INT), total_error) + CALL verify("H5Gget_info_f.max_corder", ginfo%max_corder, 0_C_INT64_T, total_error) + CALL verify("H5Gget_info_f.nlinks", ginfo%nlinks, 0_HSIZE_T, total_error) + CALL verify("H5Gget_info_f.mounted", LOGICAL(ginfo%mounted), .FALSE.,total_error) + ! Retrieve group's information CALL H5Gget_info_by_name_f(group_id, objname, storage_type, nlinks, max_corder, error, mounted=mounted) CALL check("H5Gget_info_by_name_f", error, total_error) ! Check (new/empty) group's information - CALL verify("H5Gget_info_by_name_f", storage_type, H5G_STORAGE_TYPE_COMPACT_F, total_error) - CALL verify("H5Gget_info_by_name_f", max_corder, 0, total_error) - CALL verify("H5Gget_info_by_name_f", nlinks, 0, total_error) - CALL verify("H5Gget_info_by_name_f.mounted", mounted,.FALSE.,total_error) + CALL verify("H5Gget_info_by_name_f.storage_type", storage_type, H5G_STORAGE_TYPE_COMPACT_F, total_error) + CALL verify("H5Gget_info_by_name_f.max_corder", max_corder, 0, total_error) + CALL verify("H5Gget_info_by_name_f.nlinks", nlinks, 0, total_error) + CALL verify("H5Gget_info_by_name_f.mounted", mounted, .FALSE., total_error) + + ! Retrieve group's information (F03) + CALL H5Gget_info_by_name_f(group_id, objname, ginfo, error) + CALL check("H5Gget_info_by_name_f", error, total_error) + + ! Check (new/empty) group's information + CALL VERIFY("H5Gget_info_by_name_f.storage_type", & + ginfo%storage_type, INT(H5G_STORAGE_TYPE_COMPACT_F, C_INT), total_error) + CALL verify("H5Gget_info_by_name_f.max_corder", ginfo%max_corder, 0_C_INT64_T, total_error) + CALL verify("H5Gget_info_by_name_f.nlinks", ginfo%nlinks, 0_HSIZE_T, total_error) + CALL VERIFY("H5Gget_info_by_name_f.mounted", LOGICAL(ginfo%mounted), .FALSE.,total_error) ! Retrieve group's information CALL H5Gget_info_by_name_f(group_id2, ".", storage_type, nlinks, max_corder, error) CALL check("H5Gget_info_by_name", error, total_error) ! Check (new/empty) group's information - CALL verify("H5Gget_info_by_name_f", storage_type, H5G_STORAGE_TYPE_COMPACT_F, total_error) - CALL verify("H5Gget_info_by_name_f", max_corder, 0, total_error) - CALL verify("H5Gget_info_by_name_f", nlinks, 0, total_error) + CALL VERIFY("H5Gget_info_by_name_f.storage_type", & + ginfo%storage_type, INT(H5G_STORAGE_TYPE_COMPACT_F, C_INT), total_error) + CALL verify("H5Gget_info_by_name_f.max_corder", ginfo%max_corder, 0_C_INT64_T, total_error) + CALL verify("H5Gget_info_by_name_f.nlinks", ginfo%nlinks, 0_HSIZE_T, total_error) + + ! Retrieve group's information (F03) + CALL H5Gget_info_by_name_f(group_id2, ".", ginfo, error) + CALL check("H5Gget_info_by_name", error, total_error) + + ! Check (new/empty) group's information + CALL VERIFY("H5Gget_info_by_name_f.storage_type", & + ginfo%storage_type, INT(H5G_STORAGE_TYPE_COMPACT_F, C_INT), total_error) + CALL verify("H5Gget_info_by_name_f.max_corder", ginfo%max_corder, 0_C_INT64_T, total_error) + CALL verify("H5Gget_info_by_name_f.nlinks", ginfo%nlinks, 0_HSIZE_T, total_error) ! Create objects in new group created DO v = 0, u @@ -331,6 +364,15 @@ SUBROUTINE group_info(cleanup, fapl, total_error) CALL verify("H5Gget_info_f", max_corder, u+1, total_error) CALL verify("H5Gget_info_f", nlinks, u+1, total_error) + ! Retrieve group's information (F03) + CALL H5Gget_info_f(group_id2, ginfo, error) + CALL check("H5Gget_info_f", error, total_error) + + ! Check (new) group's information + CALL VERIFY("H5Gget_info_f", ginfo%storage_type, INT(H5G_STORAGE_TYPE_COMPACT_F, C_INT), total_error) + CALL VERIFY("H5Gget_info_f", ginfo%max_corder, INT(u+1,C_INT64_T), total_error) + CALL VERIFY("H5Gget_info_f", ginfo%nlinks, INT(u+1, HSIZE_T), total_error) + ! Retrieve group's information CALL H5Gget_info_by_name_f(group_id, objname, storage_type, nlinks, max_corder, error) CALL check("H5Gget_info_by_name_f", error, total_error) @@ -340,6 +382,15 @@ SUBROUTINE group_info(cleanup, fapl, total_error) CALL verify("H5Gget_info_by_name_f",max_corder, u+1, total_error) CALL verify("H5Gget_info_by_name_f", nlinks, u+1, total_error) + ! Retrieve group's information (F03) + CALL H5Gget_info_by_name_f(group_id, objname, ginfo, error) + CALL check("H5Gget_info_by_name_f", error, total_error) + + ! Check (new) group's information + CALL VERIFY("H5Gget_info_by_name_f", ginfo%storage_type, INT(H5G_STORAGE_TYPE_COMPACT_F, C_INT), total_error) + CALL VERIFY("H5Gget_info_by_name_f", ginfo%max_corder, INT(u+1,C_INT64_T), total_error) + CALL VERIFY("H5Gget_info_by_name_f", ginfo%nlinks, INT(u+1, HSIZE_T), total_error) + ! Retrieve group's information CALL H5Gget_info_by_name_f(group_id2, ".", storage_type, nlinks, max_corder, error) CALL check("H5Gget_info_by_name_f", error, total_error) @@ -349,6 +400,15 @@ SUBROUTINE group_info(cleanup, fapl, total_error) CALL verify("H5Gget_info_by_name_f", max_corder, u+1, total_error) CALL verify("H5Gget_info_by_name_f", nlinks, u+1, total_error) + ! Retrieve group's information (F03) + CALL H5Gget_info_by_name_f(group_id2, ".", ginfo, error) + CALL check("H5Gget_info_by_name_f", error, total_error) + + ! Check (new) group's information + CALL VERIFY("H5Gget_info_by_name_f", ginfo%storage_type, INT(H5G_STORAGE_TYPE_COMPACT_F, C_INT), total_error) + CALL VERIFY("H5Gget_info_by_name_f", ginfo%max_corder, INT(u+1,C_INT64_T), total_error) + CALL VERIFY("H5Gget_info_by_name_f", ginfo%nlinks, INT(u+1, HSIZE_T), total_error) + ! Retrieve group's information IF(order.NE.H5_ITER_NATIVE_F)THEN IF(order.EQ.H5_ITER_INC_F) THEN @@ -356,16 +416,31 @@ SUBROUTINE group_info(cleanup, fapl, total_error) storage_type, nlinks, max_corder, error,lapl_id=H5P_DEFAULT_F, mounted=mounted) CALL check("H5Gget_info_by_idx_f", error, total_error) CALL verify("H5Gget_info_by_idx_f", mounted,.FALSE.,total_error) + + CALL H5Gget_info_by_idx_f(group_id, ".", idx_type, order, INT(u,HSIZE_T), & + ginfo, error,lapl_id=H5P_DEFAULT_F) + CALL check("H5Gget_info_by_idx_f", error, total_error) + CALL VERIFY("H5Gget_info_by_idx_f", LOGICAL(ginfo%mounted), .FALSE., total_error) + ELSE CALL H5Gget_info_by_idx_f(group_id, ".", idx_type, order, INT(0,HSIZE_T), & storage_type, nlinks, max_corder, error, mounted=mounted) + CALL check("H5Gget_info_by_idx_f", error, total_error) CALL verify("H5Gget_info_by_idx_f", mounted,.FALSE.,total_error) + + CALL H5Gget_info_by_idx_f(group_id, ".", idx_type, order, INT(0,HSIZE_T), & + ginfo, error) CALL check("H5Gget_info_by_idx_f", error, total_error) + CALL verify("H5Gget_info_by_idx_f", LOGICAL(ginfo%mounted),.FALSE.,total_error) ENDIF ! Check (new) group's information CALL verify("H5Gget_info_by_idx_f", storage_type, H5G_STORAGE_TYPE_COMPACT_F, total_error) CALL verify("H5Gget_info_by_idx_f", max_corder, u+1, total_error) CALL verify("H5Gget_info_by_idx_f", nlinks, u+1, total_error) + + CALL VERIFY("H5Gget_info_by_idx_f", ginfo%storage_type, INT(H5G_STORAGE_TYPE_COMPACT_F, C_INT), total_error) + CALL verify("H5Gget_info_by_idx_f", ginfo%max_corder, INT(u+1,C_INT64_T), total_error) + CALL verify("H5Gget_info_by_idx_f", ginfo%nlinks, INT(u+1, HSIZE_T), total_error) ENDIF ! Close group created CALL H5Gclose_f(group_id2, error) @@ -380,6 +455,15 @@ SUBROUTINE group_info(cleanup, fapl, total_error) CALL verify("H5Gget_info_f", max_corder, u+1, total_error) CALL verify("H5Gget_info_f", nlinks, u+1, total_error) + ! Retrieve main group's information (F03) + CALL H5Gget_info_f(group_id, ginfo, error) + CALL check("H5Gget_info_f", error, total_error) + + ! Check main group's information + CALL VERIFY("H5Gget_info_f", ginfo%storage_type, INT(H5G_STORAGE_TYPE_COMPACT_F, C_INT), total_error) + CALL verify("H5Gget_info_f", ginfo%max_corder, INT(u+1,C_INT64_T), total_error) + CALL verify("H5Gget_info_f", ginfo%nlinks, INT(u+1, HSIZE_T), total_error) + ! Retrieve main group's information, by name CALL H5Gget_info_by_name_f(file_id, CORDER_GROUP_NAME, storage_type, nlinks, max_corder, error) CALL check("H5Gget_info_by_name_f", error, total_error) @@ -389,6 +473,15 @@ SUBROUTINE group_info(cleanup, fapl, total_error) CALL verify("H5Gget_info_by_name_f", max_corder, u+1, total_error) CALL verify("H5Gget_info_by_name_f", nlinks, u+1, total_error) + ! Retrieve main group's information, by name (F03) + CALL H5Gget_info_by_name_f(file_id, CORDER_GROUP_NAME, ginfo, error) + CALL check("H5Gget_info_by_name_f", error, total_error) + + ! Check main group's information + CALL VERIFY("H5Gget_info_by_name_f", ginfo%storage_type, INT(H5G_STORAGE_TYPE_COMPACT_F,C_INT), total_error) + CALL verify("H5Gget_info_by_name_f", ginfo%max_corder, INT(u+1,C_INT64_T), total_error) + CALL verify("H5Gget_info_by_name_f", ginfo%nlinks, INT(u+1, HSIZE_T), total_error) + ! Retrieve main group's information, by name CALL H5Gget_info_by_name_f(group_id, ".", storage_type, nlinks, max_corder, error, H5P_DEFAULT_F) CALL check("H5Gget_info_by_name_f", error, total_error) @@ -398,6 +491,15 @@ SUBROUTINE group_info(cleanup, fapl, total_error) CALL verify("H5Gget_info_by_name_f", max_corder, u+1, total_error) CALL verify("H5Gget_info_by_name_f", nlinks, u+1, total_error) + ! Retrieve main group's information, by name + CALL H5Gget_info_by_name_f(group_id, ".", ginfo, error, H5P_DEFAULT_F) + CALL check("H5Gget_info_by_name_f", error, total_error) + + ! Check main group's information + CALL VERIFY("H5Gget_info_by_name_f", ginfo%storage_type, INT(H5G_STORAGE_TYPE_COMPACT_F, C_INT), total_error) + CALL verify("H5Gget_info_by_name_f", ginfo%max_corder, INT(u+1,C_INT64_T), total_error) + CALL verify("H5Gget_info_by_name_f", ginfo%nlinks, INT(u+1, HSIZE_T), total_error) + ! Create soft link in another group, to objects in main group valname = CORDER_GROUP_NAME//objname @@ -411,31 +513,39 @@ SUBROUTINE group_info(cleanup, fapl, total_error) CALL verify("H5Gget_info_f", storage_type, H5G_STORAGE_TYPE_COMPACT_F, total_error) CALL verify("H5Gget_info_f", max_corder, u+1, total_error) CALL verify("H5Gget_info_f", nlinks, u+1, total_error) + + ! Retrieve soft link group's information, by name (F03) + CALL H5Gget_info_f(soft_group_id, ginfo, error) + CALL check("H5Gget_info_f", error, total_error) + + ! Check soft link group's information + CALL VERIFY("H5Gget_info_f", ginfo%storage_type, INT(H5G_STORAGE_TYPE_COMPACT_F, C_INT), total_error) + CALL verify("H5Gget_info_f", ginfo%max_corder, INT(u+1,C_INT64_T), total_error) + CALL verify("H5Gget_info_f", ginfo%nlinks, INT(u+1, HSIZE_T), total_error) ENDDO ! Close the groups - CALL H5Gclose_f(group_id, error) - CALL check("H5Gclose_f", error, total_error) - CALL H5Gclose_f(soft_group_id, error) - CALL check("H5Gclose_f", error, total_error) + CALL H5Gclose_f(group_id, error) + CALL check("H5Gclose_f", error, total_error) + CALL H5Gclose_f(soft_group_id, error) + CALL check("H5Gclose_f", error, total_error) - ! Close the file - CALL H5Fclose_f(file_id, error) - CALL check("H5Fclose_f", error, total_error) - ENDDO + ! Close the file + CALL H5Fclose_f(file_id, error) + CALL check("H5Fclose_f", error, total_error) ENDDO ENDDO + ENDDO - ! Free resources - CALL H5Pclose_f(gcpl_id, error) - CALL check("H5Pclose_f", error, total_error) - - IF(cleanup) CALL h5_cleanup_f(prefix, H5P_DEFAULT_F, error) - CALL check("h5_cleanup_f", error, total_error) + ! Free resources + CALL H5Pclose_f(gcpl_id, error) + CALL check("H5Pclose_f", error, total_error) + IF(cleanup) CALL h5_cleanup_f(prefix, H5P_DEFAULT_F, error) + CALL check("h5_cleanup_f", error, total_error) - END SUBROUTINE group_info +END SUBROUTINE group_info !------------------------------------------------------------------------- ! * Function: timestamps @@ -1119,9 +1229,11 @@ SUBROUTINE cklinks(fapl, total_error) CALL H5Lexists_f(file,"d1",Lexists, error) + CALL check("H5Lexists_f", error, total_error) CALL verify("H5Lexists", Lexists,.TRUE.,total_error) CALL H5Lexists_f(file,"grp1/hard",Lexists, error) + CALL check("H5Lexists_f", error, total_error) CALL verify("H5Lexists", Lexists,.TRUE.,total_error) ! Cleanup diff --git a/fortran/test/tf.F90 b/fortran/test/tf.F90 index 0c518f5c64c..73f43bc3016 100644 --- a/fortran/test/tf.F90 +++ b/fortran/test/tf.F90 @@ -36,6 +36,8 @@ MODULE TH5_MISC INTEGER, PARAMETER :: sp = SELECTED_REAL_KIND(5) ! This should map to REAL*4 on most modern processors INTEGER, PARAMETER :: dp = SELECTED_REAL_KIND(10) ! This should map to REAL*8 on most modern processors + INTEGER, PARAMETER :: TAB_SPACE = 88 ! Tab spacing for printing results + ! generic compound datatype TYPE :: comp_datatype SEQUENCE @@ -55,6 +57,84 @@ MODULE TH5_MISC CONTAINS +!This definition is needed for Windows DLLs +!DEC$if defined(BUILD_HDF5_TEST_DLL) +!DEC$attributes dllexport :: write_test_header +!DEC$endif + SUBROUTINE write_test_header(title_header) + + ! Writes the test header + + IMPLICIT NONE + + CHARACTER(LEN=*), INTENT(IN) :: title_header ! test name + INTEGER, PARAMETER :: width = TAB_SPACE+10 + CHARACTER(LEN=2*width) ::title_centered =" " + INTEGER :: len, i + + len=LEN_TRIM(title_header) + title_centered(1:3) ="| |" + title_centered((width-len)/2:(width-len)/2+len) = TRIM(title_header) + title_centered(width-1:width+2) ="| |" + + WRITE(*,'(1X)', ADVANCE="NO") + DO i = 1, width-1 + WRITE(*,'("_")', ADVANCE="NO") + ENDDO + WRITE(*,'()') + WRITE(*,'("| ")', ADVANCE="NO") + DO i = 1, width-5 + WRITE(*,'("_")', ADVANCE="NO") + ENDDO + WRITE(*,'(" |")') + + WRITE(*,'("| |")', ADVANCE="NO") + DO i = 1, width-5 + WRITE(*,'(1X)', ADVANCE="NO") + ENDDO + WRITE(*,'("| |")') + + WRITE(*,'(A)') title_centered + + WRITE(*,'("| |")', ADVANCE="NO") + DO i = 1, width-5 + WRITE(*,'(1X)', ADVANCE="NO") + ENDDO + WRITE(*,'("| |")') + + WRITE(*,'("| |")', ADVANCE="NO") + DO i = 1, width-5 + WRITE(*,'("_")', ADVANCE="NO") + ENDDO + WRITE(*,'("| |")') + + WRITE(*,'("|")', ADVANCE="NO") + DO i = 1, width-1 + WRITE(*,'("_")', ADVANCE="NO") + ENDDO + WRITE(*,'("|",/)') + + END SUBROUTINE write_test_header + +!This definition is needed for Windows DLLs +!DEC$if defined(BUILD_HDF5_TEST_DLL) +!DEC$attributes dllexport :: write_test_footer +!DEC$endif + SUBROUTINE write_test_footer() + + ! Writes the test footer + + IMPLICIT NONE + INTEGER, PARAMETER :: width = TAB_SPACE+10 + INTEGER :: i + + DO i = 1, width + WRITE(*,'("_")', ADVANCE="NO") + ENDDO + WRITE(*,'(/)') + + END SUBROUTINE write_test_footer + !This definition is needed for Windows DLLs !DEC$if defined(BUILD_HDF5_TEST_DLL) !DEC$attributes dllexport :: write_test_status @@ -78,7 +158,7 @@ SUBROUTINE write_test_status( test_result, test_title, total_error) CHARACTER(LEN=8), PARAMETER :: success = ' PASSED ' CHARACTER(LEN=8), PARAMETER :: failure = '*FAILED*' CHARACTER(LEN=8), PARAMETER :: skip = '--SKIP--' - + CHARACTER(LEN=10) :: FMT error_string = failure IF (test_result == 0) THEN @@ -86,8 +166,8 @@ SUBROUTINE write_test_status( test_result, test_title, total_error) ELSE IF (test_result == -1) THEN error_string = skip ENDIF - - WRITE(*, fmt = '(A, T88, A)') test_title, error_string + WRITE(FMT,'("(A,T",I0,",A)")') TAB_SPACE + WRITE(*, fmt = FMT) test_title, error_string IF(test_result.GT.0) total_error = total_error + test_result diff --git a/fortran/testpar/CMakeLists.txt b/fortran/testpar/CMakeLists.txt index 58ef95d470e..ca241f6d87f 100644 --- a/fortran/testpar/CMakeLists.txt +++ b/fortran/testpar/CMakeLists.txt @@ -98,6 +98,46 @@ if(MSVC) set_property(TARGET subfiling_test PROPERTY LINK_FLAGS "/SUBSYSTEM:CONSOLE ${WIN_LINK_FLAGS}") endif() +#-- Adding test for async_test +add_executable (async_test + async.F90 +) +target_include_directories (async_test + PRIVATE ${TESTPAR_INCLUDES} +) +target_compile_options(async_test + PRIVATE + "${HDF5_CMAKE_Fortran_FLAGS}" + $<$:${WIN_COMPILE_FLAGS}> +) +if (NOT BUILD_SHARED_LIBS) + target_link_libraries (async_test + PRIVATE + ${HDF5_F90_TEST_LIB_TARGET} ${HDF5_F90_LIB_TARGET} ${HDF5_LIB_TARGET} ${LINK_Fortran_LIBS} + $<$:"ws2_32.lib"> + ) + set_target_properties (async_test PROPERTIES + FOLDER test/fortran + LINKER_LANGUAGE Fortran + Fortran_MODULE_DIRECTORY ${CMAKE_Fortran_MODULE_DIRECTORY}/static + ) +else () + target_link_libraries (async_test + PRIVATE + ${HDF5_F90_TEST_LIBSH_TARGET} ${HDF5_F90_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} ${LINK_Fortran_LIBS} + $<$:"ws2_32.lib"> + ) + set_target_properties (async_test PROPERTIES + FOLDER test/fortran + LINKER_LANGUAGE Fortran + Fortran_MODULE_DIRECTORY ${CMAKE_Fortran_MODULE_DIRECTORY}/shared + ) +endif () + +if(MSVC) + set_property(TARGET async_test PROPERTY LINK_FLAGS "/SUBSYSTEM:CONSOLE ${WIN_LINK_FLAGS}") +endif() + if (HDF5_TEST_FORTRAN AND HDF5_TEST_PARALLEL) include (CMakeTests.cmake) endif () diff --git a/fortran/testpar/Makefile.am b/fortran/testpar/Makefile.am index b1cefbc44a4..7f9f2846928 100644 --- a/fortran/testpar/Makefile.am +++ b/fortran/testpar/Makefile.am @@ -32,7 +32,7 @@ else endif # These are our main targets -TEST_PROG_PARA=parallel_test subfiling_test +TEST_PROG_PARA=parallel_test subfiling_test async_test check_PROGRAMS=$(TEST_PROG_PARA) # Temporary files @@ -41,6 +41,7 @@ CHECK_CLEANFILES+=parf[12].h5 subf.h5* # Test source files parallel_test_SOURCES=ptest.F90 hyper.F90 mdset.F90 multidsetrw.F90 subfiling_test_SOURCES=subfiling.F90 +async_test_SOURCES=async.F90 # The tests depend on several libraries. LDADD=$(LIBH5FTEST) $(LIBH5TEST) $(LIBH5F) $(LIBHDF5) diff --git a/fortran/testpar/async.F90 b/fortran/testpar/async.F90 new file mode 100644 index 00000000000..e3a80ad8aa6 --- /dev/null +++ b/fortran/testpar/async.F90 @@ -0,0 +1,1417 @@ +! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * +! Copyright by The HDF Group. * +! All rights reserved. * +! * +! This file is part of HDF5. The full HDF5 copyright notice, including * +! terms governing use, modification, and redistribution, is contained in * +! the COPYING file, which can be found at the root of the source code * +! distribution tree, or in https://www.hdfgroup.org/licenses. * +! If you do not have access to either file, you may request a copy from * +! help@hdfgroup.org. * +! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * +! +! Tests async Fortran wrappers. It needs an async VOL. It will skip the tests if +! HDF5_VOL_CONNECTOR is not set or is set to a non-supporting async VOL. +! +MODULE test_async_APIs + + USE MPI + USE HDF5 + USE TH5_MISC + USE TH5_MISC_GEN + + INTEGER(C_INT), PARAMETER :: op_data_type = 200 + INTEGER(C_INT), PARAMETER :: op_data_command = 99 + + LOGICAL :: async_enabled = .TRUE. + LOGICAL :: mpi_thread_mult = .TRUE. + + ! Custom group iteration callback data + TYPE, bind(c) :: iter_info + CHARACTER(KIND=C_CHAR), DIMENSION(1:12) :: name ! The name of the object + INTEGER(c_int) :: TYPE ! The TYPE of the object + INTEGER(c_int) :: command ! The TYPE of RETURN value + END TYPE iter_info + + CHARACTER(LEN=10), TARGET :: app_file = "async.F90"//C_NULL_CHAR + CHARACTER(LEN=10), TARGET :: app_func = "func_name"//C_NULL_CHAR + INTEGER :: app_line = 42 + +CONTAINS + + INTEGER(KIND=C_INT) FUNCTION liter_cb(group, name, link_info, op_data) bind(C) + + IMPLICIT NONE + + INTEGER(HID_T), VALUE :: group + CHARACTER(LEN=1), DIMENSION(1:12) :: name + TYPE (H5L_info_t) :: link_info + TYPE(iter_info) :: op_data + + liter_cb = 0 + + op_data%name(1:12) = name(1:12) + + SELECT CASE (op_data%command) + + CASE(0) + liter_cb = 0 + CASE(2) + liter_cb = op_data%command*10 + END SELECT + op_data%command = op_data_command + op_data%type = op_data_type + + END FUNCTION liter_cb + + SUBROUTINE H5ES_tests(cleanup, total_error) + ! + ! Test H5ES routines + ! + IMPLICIT NONE + LOGICAL, INTENT(IN) :: cleanup + INTEGER, INTENT(INOUT) :: total_error + + INTEGER :: nerrors = 0 + INTEGER(HID_T) :: fapl_id + INTEGER(HID_T) :: file_id + CHARACTER(len=80) :: filename = "h5es_tests.h5" + INTEGER :: hdferror + + INTEGER(HID_T) :: es_id + INTEGER(SIZE_T) :: count + INTEGER(C_INT64_T) :: counter + INTEGER(SIZE_T) :: num_not_canceled + INTEGER(SIZE_T) :: num_in_progress + LOGICAL :: err_occurred + + CALL h5pcreate_f(H5P_FILE_ACCESS_F, fapl_id, hdferror) + CALL check("h5pcreate_f", hdferror, nerrors) + + CALL h5pset_fapl_mpio_f(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL, hdferror) + CALL check("h5pset_fapl_mpio_f", hdferror, nerrors) + + CALL H5EScreate_f(es_id, hdferror) + CALL check("H5EScreate_f", hdferror, nerrors) + + CALL H5ESget_count_f(es_id, count, hdferror) + CALL check("H5ESget_count_f", hdferror, nerrors) + CALL VERIFY("H5ESget_count_f", count, 0_SIZE_T,total_error) + + CALL H5EScancel_f(es_id, num_not_canceled, err_occurred, hdferror) + CALL check("H5EScancel_f", hdferror, nerrors) + CALL VERIFY("H5EScancel_f", num_not_canceled, 0_size_t, total_error) + CALL VERIFY("H5EScancel_f", err_occurred, .FALSE., total_error) + + CALL H5Fcreate_async_f(filename, H5F_ACC_TRUNC_F, file_id, es_id, hdferror, access_prp = fapl_id) + CALL check("h5fcreate_f", hdferror, nerrors) + + CALL H5ESget_count_f(es_id, count, hdferror) + CALL check("H5ESget_count_f", hdferror, nerrors) + IF(async_enabled)THEN + CALL VERIFY("H5ESget_count_f", count, 2_SIZE_T,total_error) + ELSE + CALL VERIFY("H5ESget_count_f", count, 0_SIZE_T,total_error) + ENDIF + + CALL H5ESget_op_counter_f(es_id, counter, hdferror) + CALL check("H5ESget_op_counter_f", hdferror, nerrors) + IF(async_enabled)THEN + CALL VERIFY("H5ESget_op_counter_f", counter, 2_C_INT64_T, total_error) + ELSE + CALL VERIFY("H5ESget_op_counter_f", counter, 0_C_INT64_T, total_error) + ENDIF + + CALL H5Pclose_f(fapl_id, hdferror) + CALL check("h5pclose_f", hdferror, nerrors) + + CALL H5Fclose_async_f(file_id, es_id, hdferror) + CALL check("h5fclose_f", hdferror, nerrors) + CALL H5ESget_count_f(es_id, count, hdferror) + CALL check("H5ESget_count_f", hdferror, nerrors) + IF(async_enabled)THEN + CALL VERIFY("H5ESget_count_f", count, 3_SIZE_T,total_error) + ELSE + CALL VERIFY("H5ESget_count_f", count, 0_SIZE_T,total_error) + ENDIF + + CALL H5ESwait_f(es_id, H5ES_WAIT_FOREVER_F, num_in_progress, err_occurred, hdferror); + CALL check("H5ESwait_f", hdferror, nerrors) + CALL VERIFY("H5ESwait_f", err_occurred, .FALSE., total_error) + + CALL H5ESget_count_f(es_id, count, hdferror) + CALL check("H5ESget_count_f", hdferror, nerrors) + CALL VERIFY("H5ESget_count_f", count, 0_SIZE_T,total_error) + + CALL H5ESclose_f(es_id, hdferror) + CALL check("H5ESclose_f", hdferror, nerrors) + + END SUBROUTINE H5ES_tests + + SUBROUTINE H5A_async_tests(cleanup, total_error) + ! + ! Test H5A async routines + ! + IMPLICIT NONE + LOGICAL, INTENT(IN) :: cleanup + INTEGER, INTENT(INOUT) :: total_error + + INTEGER(HID_T) :: fapl_id + INTEGER(HID_T) :: file_id + CHARACTER(len=80) :: filename = "h5a_tests.h5" + INTEGER :: hdferror + INTEGER(HID_T) :: es_id + INTEGER(SIZE_T) :: num_in_progress + LOGICAL :: err_occurred + + CHARACTER(LEN=4), PARAMETER :: attr_name = "ATTR" + INTEGER, TARGET :: attr_data0 = 100 + INTEGER, TARGET :: attr_data1 = 101 + INTEGER, TARGET :: attr_data2 = 101 + INTEGER, TARGET :: attr_rdata0 + INTEGER, TARGET :: attr_rdata1 + INTEGER, TARGET :: attr_rdata2 + INTEGER(HID_T) :: space_id + INTEGER(HID_T) :: attr_id0, attr_id1, attr_id2 + LOGICAL :: exists + LOGICAL(C_BOOL), TARGET :: exists0 = .FALSE., exists1 = .FALSE., exists2 = .FALSE., exists3 = .FALSE. + TYPE(C_PTR) :: f_ptr, f_ptr1, f_ptr2 + + CALL H5EScreate_f(es_id, hdferror) + CALL check("H5EScreate_f", hdferror, total_error) + ! + ! Create the file. + ! + CALL h5pcreate_f(H5P_FILE_ACCESS_F, fapl_id, hdferror) + CALL check("h5pcreate_f", hdferror, total_error) + + CALL h5pset_fapl_mpio_f(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL, hdferror) + CALL check("h5pset_fapl_mpio_f", hdferror, total_error) + + CALL h5fcreate_async_f(filename, H5F_ACC_TRUNC_F, file_id, es_id, hdferror, access_prp = fapl_id ) + CALL check("h5fcreate_f",hdferror, total_error) + + CALL H5Screate_f(H5S_SCALAR_F, space_id, hdferror) + CALL check("H5Screate_f", hdferror, total_error) + + f_ptr1 = C_LOC(app_file) + f_ptr2 = C_LOC(app_func) + CALL h5acreate_async_f(file_id, attr_name, H5T_NATIVE_INTEGER, space_id, attr_id0, es_id, hdferror, & + file=f_ptr1, func=f_ptr2, line=app_line) + CALL check("h5acreate_f",hdferror,total_error) + + f_ptr = C_LOC(attr_data0) + CALL H5Awrite_async_f(attr_id0, H5T_NATIVE_INTEGER, f_ptr, es_id, hdferror) + CALL check("H5Awrite_async_f",hdferror,total_error) + + CALL H5Aclose_async_f(attr_id0, es_id, hdferror) + CALL check("H5Aclose_async_f",hdferror,total_error) + + CALL h5acreate_by_name_async_f(file_id, "/", TRIM(attr_name)//"00", & + H5T_NATIVE_INTEGER, space_id, attr_id1, es_id, hdferror) + CALL check("h5acreate_by_name_async_f",hdferror,total_error) + + CALL h5acreate_by_name_async_f(file_id, "/", TRIM(attr_name)//"01", & + H5T_NATIVE_INTEGER, space_id, attr_id2, es_id, hdferror) + CALL check("h5acreate_by_name_async_f",hdferror,total_error) + + f_ptr = C_LOC(attr_data1) + CALL H5Awrite_async_f(attr_id1, H5T_NATIVE_INTEGER, f_ptr, es_id, hdferror) + CALL check("H5Awrite_async_f",hdferror,total_error) + + CALL H5Aclose_async_f(attr_id1, es_id, hdferror) + CALL check("H5Aclose_async_f",hdferror,total_error) + + f_ptr = C_LOC(attr_data2) + CALL H5Awrite_async_f(attr_id2, H5T_NATIVE_INTEGER, f_ptr, es_id, hdferror) + CALL check("H5Awrite_async_f",hdferror,total_error) + + CALL H5Aclose_async_f(attr_id2, es_id, hdferror) + CALL check("H5Aclose_async_f",hdferror,total_error) + + CALL H5Sclose_f(space_id, hdferror) + CALL check("H5Sclose_f",hdferror,total_error) + CALL H5Fclose_async_f(file_id, es_id, hdferror) + CALL check("H5Fclose_async_f",hdferror, total_error) + + CALL H5ESwait_f(es_id, H5ES_WAIT_FOREVER_F, num_in_progress, err_occurred, hdferror) + CALL check("H5ESwait_f", hdferror, total_error) + CALL VERIFY("H5ESwait_f", err_occurred, .FALSE., total_error) + + CALL h5fopen_async_f(filename, H5F_ACC_RDWR_F, file_id, es_id, hdferror, access_prp = fapl_id ) + CALL check("h5fopen_async_f",hdferror, total_error) + + f_ptr = C_LOC(exists0) + CALL H5Aexists_async_f(file_id, attr_name, f_ptr, es_id, hdferror) + CALL check("H5Aexists_async_f",hdferror, total_error) + + f_ptr = C_LOC(exists1) + CALL H5Aexists_async_f(file_id, TRIM(attr_name)//"00", f_ptr, es_id, hdferror) + CALL check("H5Aexists_async_f",hdferror, total_error) + + f_ptr = C_LOC(exists2) + CALL H5Aexists_by_name_async_f(file_id, "/", attr_name, f_ptr, es_id, hdferror) + CALL check("H5Aexists_by_name_async_f",hdferror, total_error) + + f_ptr = C_LOC(exists3) + CALL H5Aexists_by_name_async_f(file_id, "/", TRIM(attr_name)//"00", f_ptr, es_id, hdferror) + CALL check("H5Aexists_by_name_async_f",hdferror, total_error) + + CALL H5Aopen_async_f(file_id, attr_name, attr_id0, es_id, hdferror) + CALL check("H5Aopen_async_f", hdferror, total_error) + + f_ptr = C_LOC(attr_rdata0) + CALL H5Aread_async_f(attr_id0, H5T_NATIVE_INTEGER, f_ptr, es_id, hdferror) + CALL check("H5Aread_async_f", hdferror, total_error) + + CALL H5Aclose_async_f(attr_id0, es_id, hdferror) + CALL check("H5Aclose_async_f",hdferror,total_error) + + CALL H5Aopen_by_name_async_f(file_id, "/", TRIM(attr_name)//"00", attr_id1, es_id, hdferror) + CALL check("H5Aopen_by_name_async_f", hdferror, total_error) + + f_ptr = C_LOC(attr_rdata1) + CALL H5Aread_async_f(attr_id1, H5T_NATIVE_INTEGER, f_ptr, es_id, hdferror) + CALL check("H5Aread_async_f", hdferror, total_error) + + CALL H5Aclose_async_f(attr_id1, es_id, hdferror) + CALL check("H5Aclose_async_f",hdferror,total_error) + + CALL H5Aopen_by_idx_async_f(file_id, ".", H5_INDEX_CRT_ORDER_F, H5_ITER_INC_F, INT(2,HSIZE_T), attr_id2, es_id, hdferror) + CALL check("H5Aopen_by_idx_async_f", hdferror, total_error) + + f_ptr = C_LOC(attr_rdata2) + CALL H5Aread_async_f(attr_id2, H5T_NATIVE_INTEGER, f_ptr, es_id, hdferror) + CALL check("H5Aread_async_f", hdferror, total_error) + + CALL H5Aclose_async_f(attr_id2, es_id, hdferror) + CALL check("H5Aclose_async_f",hdferror,total_error) + + CALL H5Arename_async_f(file_id, TRIM(attr_name)//"00", TRIM(attr_name)//"05", es_id, hdferror) + CALL check("H5Arename_async_f",hdferror,total_error) + + CALL H5Arename_by_name_async_f(file_id, ".", TRIM(attr_name)//"01", TRIM(attr_name)//"06", es_id, hdferror) + CALL check("H5Arename_by_name_async_f",hdferror,total_error) + + CALL H5Fclose_async_f(file_id, es_id, hdferror) + CALL check("H5Fclose_async_f",hdferror,total_error) + + CALL H5ESwait_f(es_id, H5ES_WAIT_FOREVER_F, num_in_progress, err_occurred, hdferror) + CALL check("H5ESwait_f", hdferror, total_error) + CALL VERIFY("H5ESwait_f", err_occurred, .FALSE., total_error) + + CALL VERIFY("H5Aexists_async_f", LOGICAL(exists0), .TRUE., total_error) + CALL VERIFY("H5Aexists_async_f", LOGICAL(exists1), .TRUE., total_error) + CALL VERIFY("H5Aexists_by_name_async_f", LOGICAL(exists2), .TRUE., total_error) + CALL VERIFY("H5Aexists_by_name_async_f", LOGICAL(exists3), .TRUE., total_error) + + CALL VERIFY("H5Aread_async_f", attr_rdata0, attr_data0, total_error) + CALL VERIFY("H5Aread_async_f", attr_rdata1, attr_data1, total_error) + CALL VERIFY("H5Aread_async_f", attr_rdata2, attr_data2, total_error) + + CALL h5fopen_f(filename, H5F_ACC_RDWR_F, file_id, hdferror, access_prp = fapl_id ) + CALL check("h5fopen_f",hdferror, total_error) + + CALL H5Aexists_f(file_id, TRIM(attr_name)//"05", exists, hdferror) + CALL check("H5Aexist_f",hdferror, total_error) + CALL VERIFY("H5Arename_async_f", exists, .TRUE., total_error) + + CALL H5Aexists_f(file_id, TRIM(attr_name)//"06", exists, hdferror) + CALL check("H5Aexist_f",hdferror, total_error) + CALL VERIFY("H5Arename_by_name_async_f", exists, .TRUE., total_error) + + CALL H5Aexists_f(file_id, TRIM(attr_name)//"01", exists, hdferror) + CALL check("H5Aexist_f",hdferror, total_error) + CALL VERIFY("H5Arename_async_f", exists, .FALSE., total_error) + + CALL H5Aexists_f(file_id, TRIM(attr_name)//"02", exists, hdferror) + CALL check("H5Aexist_f",hdferror, total_error) + CALL VERIFY("H5Arename_by_name_async_f", exists, .FALSE., total_error) + + CALL H5Fclose_f(file_id, hdferror) + CALL check("H5Fclose_f",hdferror,total_error) + + CALL H5Pclose_f(fapl_id, hdferror) + CALL check(" H5Pclose_f",hdferror, total_error) + + CALL H5ESclose_f(es_id, hdferror) + CALL check("H5ESclose_f", hdferror, total_error) + + END SUBROUTINE H5A_async_tests + + SUBROUTINE H5D_async_tests(cleanup, total_error) + ! + ! Test H5D async routines + ! + IMPLICIT NONE + LOGICAL, INTENT(IN) :: cleanup + INTEGER, INTENT(INOUT) :: total_error + + INTEGER(HID_T) :: fapl_id + INTEGER(HID_T) :: file_id + CHARACTER(len=80) :: filename = "h5d_tests.h5" + INTEGER :: hdferror + INTEGER(HID_T) :: es_id + INTEGER(SIZE_T) :: num_in_progress + LOGICAL :: err_occurred + + CHARACTER(LEN=8), PARAMETER :: dsetname = "IntArray" + INTEGER(HID_T) :: crp_list ! File identifier + INTEGER(HID_T) :: dset_id ! Dataset identifier + + INTEGER(HID_T) :: filespace ! Dataspace identifier in file + INTEGER(HID_T) :: memspace ! Dataspace identifier in memory + INTEGER(HID_T) :: xfer_prp ! Property list identifier + TYPE(C_PTR) :: f_ptr + + INTEGER(HSIZE_T), DIMENSION(1) :: dims = (/4/) + INTEGER(HSIZE_T), DIMENSION(1) :: dimsf + + INTEGER(HSIZE_T), DIMENSION(1) :: count + INTEGER(HSSIZE_T), DIMENSION(1) :: offset + INTEGER, ALLOCATABLE, DIMENSION(:), TARGET :: idata + INTEGER, ALLOCATABLE, DIMENSION(:), TARGET :: rdata + INTEGER(HSIZE_T), DIMENSION(1) :: idims, imaxdims + INTEGER(HSIZE_T), DIMENSION(1) :: maxdims + INTEGER(HSIZE_T) :: i + INTEGER(HSIZE_T), DIMENSION(1) :: extend_dim + INTEGER, TARGET :: fillvalue = 99 + + INTEGER :: error ! Error flags + INTEGER :: mpierror ! MPI error flag + INTEGER :: comm, info + INTEGER :: mpi_size, mpi_rank + + comm = MPI_COMM_WORLD + info = MPI_INFO_NULL + + CALL MPI_COMM_SIZE(comm, mpi_size, mpierror) + CALL MPI_COMM_RANK(comm, mpi_rank, mpierror) + + CALL H5EScreate_f(es_id, hdferror) + CALL check("H5EScreate_f", hdferror, total_error) + ! + ! Create the file. + ! + CALL h5pcreate_f(H5P_FILE_ACCESS_F, fapl_id, hdferror) + CALL check("h5pcreate_f", hdferror, total_error) + + CALL h5pset_fapl_mpio_f(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL, hdferror) + CALL check("h5pset_fapl_mpio_f", hdferror, total_error) + + CALL h5fcreate_async_f(filename, H5F_ACC_TRUNC_F, file_id, es_id, error, access_prp = fapl_id ) + CALL check("h5fcreate_f",hdferror, total_error) + + dimsf(1) = dims(1)*mpi_size + ALLOCATE(idata(1:dims(1))) + + idata(:) = mpi_rank + + CALL h5pcreate_f(H5P_DATASET_CREATE_F, crp_list, hdferror) + CALL h5pset_chunk_f(crp_list, 1, dims, error) + f_ptr = C_LOC(fillvalue) + CALL h5pset_fill_value_f(crp_list, H5T_NATIVE_INTEGER, f_ptr, hdferror) + + ! + ! Create data space for the dataset. + ! + maxdims(1) = H5S_UNLIMITED_F + CALL h5screate_simple_f(1, dimsf, filespace, hdferror, maxdims) + CALL check("h5screate_simple_f", hdferror, total_error) + + ! + ! create contiguous dataset in the file. + CALL h5dcreate_async_f(file_id, dsetname, H5T_NATIVE_INTEGER, filespace, & + dset_id, es_id, hdferror, crp_list) + CALL check("h5dcreate_async_f", hdferror, total_error) + + COUNT(1) = dims(1) + offset(1) = mpi_rank * COUNT(1) + CALL h5screate_simple_f(1, dims(1), memspace, hdferror) + CALL check("h5screate_simple_f", hdferror, total_error) + + CALL h5sselect_hyperslab_f (filespace, H5S_SELECT_SET_F, offset, count, hdferror) + CALL check("h5sselect_hyperslab_f", hdferror, total_error) + + CALL h5pcreate_f(H5P_DATASET_XFER_F, xfer_prp, error) + CALL h5pset_dxpl_mpio_f(xfer_prp, H5FD_MPIO_COLLECTIVE_F, error) + + ! + ! Write data to the dataset + ! + f_ptr = C_LOC(idata) + CALL h5dwrite_async_f(dset_id, H5T_NATIVE_INTEGER, f_ptr, es_id, hdferror, & + mem_space_id = memspace, file_space_id = filespace, xfer_prp = xfer_prp) + CALL check("h5dwrite_async_f", hdferror, total_error) + ! + ! Terminate access to the dataset. + ! + CALL h5dclose_async_f(dset_id, es_id, error) + CALL check("h5dclose_f",error,total_error) + + ! + ! Close dataspaces. + ! + CALL h5sclose_f(filespace, hdferror) + CALL check("h5sclose_f",hdferror,total_error) + CALL h5sclose_f(memspace, error) + CALL check("h5sclose_f",hdferror,total_error) + CALL h5pclose_f(crp_list, hdferror) + CALL check("h5pclose_f",hdferror,total_error) + + ! + ! Close the file. + ! + CALL h5fclose_async_f(file_id, es_id, hdferror) + CALL check("h5fclose_async_f",hdferror,total_error) + + ! Complete the operations + CALL H5ESwait_f(es_id, H5ES_WAIT_FOREVER_F, num_in_progress, err_occurred, hdferror); + CALL check("H5ESwait_f", hdferror, total_error) + CALL VERIFY("H5ESwait_f", err_occurred, .FALSE., total_error) + CALL VERIFY("H5ESwait_f", num_in_progress, 0_size_t , total_error) + + CALL h5fopen_async_f(filename, H5F_ACC_RDWR_F, file_id, es_id, hdferror, access_prp = fapl_id) + CALL check("h5fopen_async_f",hdferror,total_error) + + CALL h5dopen_async_f(file_id, dsetname, dset_id, es_id, hdferror) + CALL check("h5dopen_async_f",hdferror,total_error) + + CALL H5Dget_space_async_f(dset_id, filespace, es_id, hdferror) + CALL check("h5dopen_async_f",hdferror,total_error) + + CALL h5sget_simple_extent_dims_f(filespace, idims, imaxdims, hdferror) + CALL check("h5sget_simple_extent_dims_f", hdferror, total_error) + CALL VERIFY("h5sget_simple_extent_dims_f", idims(1), dimsf(1), total_error) + CALL VERIFY("h5sget_simple_extent_dims_f", imaxdims(1), H5S_UNLIMITED_F, total_error) + + ! Check reading the data back + ALLOCATE(rdata(1:dims(1))) + + CALL h5screate_simple_f(1, dims(1), memspace, hdferror) + CALL check("h5screate_simple_f", hdferror, total_error) + + CALL h5sselect_hyperslab_f (filespace, H5S_SELECT_SET_F, offset, count, hdferror) + CALL check("h5sselect_hyperslab_f", hdferror, total_error) + + f_ptr = C_LOC(rdata) + CALL h5dread_async_f(dset_id, H5T_NATIVE_INTEGER, f_ptr, es_id, hdferror, & + mem_space_id = memspace, file_space_id = filespace, xfer_prp = xfer_prp) + CALL check("h5dread_async_f", hdferror, total_error) + + CALL h5sclose_f(filespace, hdferror) + CALL check("h5sclose_f",hdferror,total_error) + + CALL h5sclose_f(memspace, hdferror) + CALL check("h5sclose_f",hdferror,total_error) + + ! Extend the dataset + extend_dim(1) = dimsf(1)*2 + CALL H5Dset_extent_async_f(dset_id, extend_dim, es_id, hdferror) + CALL check("H5Dset_extent_async_f", error, total_error) + + CALL h5dclose_async_f(dset_id, es_id, error) + CALL check("h5dclose_async_f",error,total_error) + + CALL h5fclose_async_f(file_id, es_id, hdferror) + CALL check("h5fclose_async_f",hdferror,total_error) + + CALL H5ESwait_f(es_id, H5ES_WAIT_FOREVER_F, num_in_progress, err_occurred, hdferror) + CALL check("H5ESwait_f", hdferror, total_error) + CALL VERIFY("H5ESwait_f", err_occurred, .FALSE., total_error) + + ! Verify the data read + DO i = 1, dims(1) + CALL VERIFY("h5dread_f", idata(i), rdata(i), total_error) + ENDDO + + CALL h5fopen_f(filename, H5F_ACC_RDWR_F, file_id, hdferror, access_prp = fapl_id ) + CALL check("h5fopen_f",error,total_error) + + CALL h5dopen_f(file_id, dsetname, dset_id, hdferror) + CALL check("h5dopen_async_f",hdferror,total_error) + + CALL H5Dget_space_f(dset_id, filespace, hdferror) + CALL check("h5dopen_async_f",hdferror,total_error) + + CALL H5Sget_simple_extent_dims_f(filespace, idims, imaxdims, hdferror) + CALL check("h5sget_simple_extent_dims_f", hdferror, total_error) + CALL VERIFY("h5sget_simple_extent_dims_f", idims(1), extend_dim(1), total_error) + CALL VERIFY("h5sget_simple_extent_dims_f", imaxdims(1), H5S_UNLIMITED_F, total_error) + + CALL h5sclose_f(filespace, hdferror) + CALL check("h5sclose_f",hdferror,total_error) + + CALL h5dclose_f(dset_id, error) + CALL check("h5dclose_f",error,total_error) + + CALL h5fclose_f(file_id, hdferror) + CALL check("h5fclose_f",hdferror,total_error) + + END SUBROUTINE H5D_async_tests + + + SUBROUTINE H5G_async_tests(cleanup, total_error) + ! + ! Test H5G async routines + ! + IMPLICIT NONE + LOGICAL, INTENT(IN) :: cleanup + INTEGER, INTENT(INOUT) :: total_error + + INTEGER(HID_T) :: fapl_id + INTEGER(HID_T) :: file_id + CHARACTER(len=80) :: filename = "h5g_tests.h5" + INTEGER :: hdferror + INTEGER(HID_T) :: es_id + INTEGER(SIZE_T) :: num_in_progress + LOGICAL :: err_occurred + + CHARACTER(LEN=6), PARAMETER:: grpname="group1" + + INTEGER(HID_T) :: group_id, group_id1 + INTEGER(HID_T) :: gcpl_id + CHARACTER(LEN=2) :: chr2 + CHARACTER(LEN=7) :: objname ! Object name + INTEGER :: v, i + + TYPE(H5G_info_t), DIMENSION(1:3) :: ginfo + + INTEGER :: error + INTEGER :: mpierror ! MPI error flag + INTEGER :: comm, info + INTEGER :: mpi_size, mpi_rank + + comm = MPI_COMM_WORLD + info = MPI_INFO_NULL + + CALL MPI_COMM_SIZE(comm, mpi_size, mpierror) + CALL MPI_COMM_RANK(comm, mpi_rank, mpierror) + + CALL H5EScreate_f(es_id, hdferror) + CALL check("H5EScreate_f", hdferror, total_error) + ! + ! Create the file. + ! + CALL h5pcreate_f(H5P_FILE_ACCESS_F, fapl_id, hdferror) + CALL check("h5pcreate_f", hdferror, total_error) + + CALL h5pset_fapl_mpio_f(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL, hdferror) + CALL check("h5pset_fapl_mpio_f", hdferror, total_error) + + CALL h5fcreate_async_f(filename, H5F_ACC_TRUNC_F, file_id, es_id, error, access_prp = fapl_id ) + CALL check("h5fcreate_f",hdferror, total_error) + + ! Test group API + CALL H5Pcreate_f(H5P_GROUP_CREATE_F, gcpl_id, hdferror ) + CALL check("H5Pcreate_f", hdferror, total_error) + + CALL H5Pset_link_creation_order_f(gcpl_id, IOR(H5P_CRT_ORDER_TRACKED_F, H5P_CRT_ORDER_INDEXED_F), hdferror) + CALL check("H5Pset_link_creation_order_f", hdferror, total_error) + + CALL H5Gcreate_async_f (file_id, grpname, group_id, es_id, hdferror, gcpl_id=gcpl_id) + CALL check("H5Gcreate_async_f", hdferror, total_error) + + ! Create objects in new group created + DO v = 0, 2 + ! Make name for link + WRITE(chr2,'(I2.2)') v + objname = 'fill '//chr2 + + ! Create hard link, with group object + CALL H5Gcreate_async_f(group_id, objname, group_id1, es_id, hdferror, gcpl_id=gcpl_id) + CALL check("H5Gcreate_async_f", hdferror, total_error) + + ! Close group created + CALL H5Gclose_async_f(group_id1, es_id, hdferror) + CALL check("H5Gclose_async_f", hdferror, total_error) + ENDDO + + CALL H5Pclose_f(gcpl_id, hdferror) + CALL check("H5Pclose_f", hdferror, total_error) + + CALL H5Gclose_async_f(group_id, es_id, hdferror) + CALL check("H5Gclose_async_f", hdferror, total_error) + + ! + ! Close the file. + ! + CALL h5fclose_async_f(file_id, es_id, hdferror) + CALL check("h5fclose_async_f",hdferror,total_error) + + ! Complete the operations + CALL H5ESwait_f(es_id, H5ES_WAIT_FOREVER_F, num_in_progress, err_occurred, hdferror); + CALL check("H5ESwait_f", hdferror, total_error) + CALL VERIFY("H5ESwait_f", err_occurred, .FALSE., total_error) + CALL VERIFY("H5ESwait_f", num_in_progress, 0_size_t , total_error) + + CALL h5fopen_async_f(filename, H5F_ACC_RDWR_F, file_id, es_id, hdferror, access_prp = fapl_id ) + CALL check("h5fopen_async_f",hdferror,total_error) + + CALL h5gopen_async_f(file_id, grpname, group_id, es_id, hdferror) + CALL check("h5gopen_async_f",hdferror,total_error) + + CALL h5gget_info_async_f(group_id, ginfo(1), es_id, hdferror) + CALL check("H5Gget_info_async_f", hdferror, total_error) + + CALL H5Gget_info_by_name_async_f(group_id, ".", ginfo(2), es_id, hdferror) + CALL check("H5Gget_info_by_name_async_f", hdferror, total_error) + + CALL H5Gget_info_by_idx_async_f(group_id, ".", H5_INDEX_CRT_ORDER_F, H5_ITER_INC_F, & + INT(0,HSIZE_T), ginfo(3), es_id, error) + CALL check("H5Gget_info_by_idx_async_f", hdferror, total_error) + + CALL H5Gclose_async_f(group_id, es_id, hdferror) + CALL check("H5Gclose_async_f", hdferror, total_error) + + CALL h5fclose_async_f(file_id, es_id, hdferror) + CALL check("h5fclose_async_f",hdferror,total_error) + + CALL H5ESwait_f(es_id, H5ES_WAIT_FOREVER_F, num_in_progress, err_occurred, hdferror) + CALL check("H5ESwait_f", hdferror, total_error) + CALL VERIFY("H5ESwait_f", err_occurred, .FALSE., total_error) + + ! Verify the group APIs + DO i = 1, 2 + CALL VERIFY("H5Gget_info_by_name_f.storage_type", & + ginfo(i)%storage_type, INT(H5G_STORAGE_TYPE_COMPACT_F, C_INT), total_error) + CALL VERIFY("H5Gget_info_by_name_f.max_corder", ginfo(i)%max_corder, 3_C_INT64_T, total_error) + CALL VERIFY("H5Gget_info_by_name_f.nlinks", ginfo(i)%nlinks, 3_HSIZE_T, total_error) + CALL VERIFY("H5Gget_info_f.mounted", LOGICAL(ginfo(i)%mounted),.FALSE.,total_error) + ENDDO + CALL VERIFY("H5Gget_info_by_name_f.storage_type", & + ginfo(3)%storage_type, INT(H5G_STORAGE_TYPE_COMPACT_F, C_INT), total_error) + CALL VERIFY("H5Gget_info_by_name_f.max_corder", ginfo(3)%max_corder, 0_C_INT64_T, total_error) + CALL VERIFY("H5Gget_info_by_name_f.nlinks", ginfo(3)%nlinks, 0_HSIZE_T, total_error) + CALL VERIFY("H5Gget_info_f.mounted", LOGICAL(ginfo(3)%mounted),.FALSE.,total_error) + + END SUBROUTINE H5G_async_tests + + SUBROUTINE H5F_async_tests(cleanup, total_error) + ! + ! Test H5F async routines + ! + IMPLICIT NONE + LOGICAL, INTENT(IN) :: cleanup + INTEGER, INTENT(INOUT) :: total_error + + INTEGER(HID_T) :: fapl_id + INTEGER(HID_T) :: file_id + CHARACTER(len=80) :: filename = "h5f_tests.h5" + INTEGER :: hdferror + INTEGER(HID_T) :: es_id + INTEGER(SIZE_T) :: num_in_progress + LOGICAL :: err_occurred + + INTEGER(HID_T) :: ret_file_id + + INTEGER :: error ! Error flags + INTEGER :: mpierror ! MPI error flag + INTEGER :: comm, info + INTEGER :: mpi_size, mpi_rank + + comm = MPI_COMM_WORLD + info = MPI_INFO_NULL + + CALL MPI_COMM_SIZE(comm, mpi_size, mpierror) + CALL MPI_COMM_RANK(comm, mpi_rank, mpierror) + + CALL H5EScreate_f(es_id, hdferror) + CALL check("H5EScreate_f", hdferror, total_error) + ! + ! Create the file. + ! + CALL h5pcreate_f(H5P_FILE_ACCESS_F, fapl_id, hdferror) + CALL check("h5pcreate_f", hdferror, total_error) + + CALL h5pset_fapl_mpio_f(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL, hdferror) + CALL check("h5pset_fapl_mpio_f", hdferror, total_error) + + CALL h5fcreate_async_f(filename, H5F_ACC_TRUNC_F, file_id, es_id, error, access_prp = fapl_id ) + CALL check("h5fcreate_f",hdferror, total_error) + + ! + ! Close the file. + ! + CALL h5fclose_async_f(file_id, es_id, hdferror) + CALL check("h5fclose_async_f",hdferror,total_error) + + ! Complete the operations + CALL H5ESwait_f(es_id, H5ES_WAIT_FOREVER_F, num_in_progress, err_occurred, hdferror); + CALL check("H5ESwait_f", hdferror, total_error) + CALL VERIFY("H5ESwait_f", err_occurred, .FALSE., total_error) + CALL VERIFY("H5ESwait_f", num_in_progress, 0_size_t , total_error) + + CALL H5Fopen_async_f(filename, H5F_ACC_RDWR_F, file_id, es_id, hdferror, access_prp = fapl_id ) + CALL check("h5fopen_async_f",hdferror,total_error) + + CALL H5Freopen_async_f(file_id, ret_file_id, es_id, hdferror) + CALL check("H5Freopen_async_f", hdferror, total_error) + + CALL H5Fclose_async_f(ret_file_id, es_id, hdferror) + CALL check("h5fclose_async_f",hdferror,total_error) + + CALL H5Fflush_async_f(file_id, H5F_SCOPE_GLOBAL_F, es_id, hdferror) + CALL check("h5fflush_async_f",hdferror, total_error) + + CALL H5Fclose_async_f(file_id, es_id, hdferror) + CALL check("h5fclose_async_f",hdferror,total_error) + + CALL H5ESwait_f(es_id, H5ES_WAIT_FOREVER_F, num_in_progress, err_occurred, hdferror) + CALL check("H5ESwait_f", hdferror, total_error) + CALL VERIFY("H5ESwait_f", err_occurred, .FALSE., total_error) + + END SUBROUTINE H5F_async_tests + + SUBROUTINE H5L_async_tests(cleanup, total_error) + ! + ! Test H5L async routines + ! + IMPLICIT NONE + LOGICAL, INTENT(IN) :: cleanup + INTEGER, INTENT(INOUT) :: total_error + + INTEGER(HID_T) :: fapl_id + INTEGER(HID_T) :: file_id + CHARACTER(len=80) :: filename = "h5l_tests.h5" + INTEGER :: hdferror + INTEGER(HID_T) :: es_id + INTEGER(SIZE_T) :: num_in_progress + LOGICAL :: err_occurred + + INTEGER(hid_t) :: gid = -1, gid2 = -1, gid3 = -1 ! Group IDs + INTEGER(hid_t) :: aid = -1, aid2 = -1, aid3 = -1 ! Attribute ID + INTEGER(hid_t) :: sid = -1 ! Dataspace ID + CHARACTER(LEN=12), PARAMETER :: CORDER_GROUP_NAME = "corder_group" + CHARACTER(LEN=12), PARAMETER :: CORDER_GROUP_NAME2 = "corder_grp00" + LOGICAL(C_BOOL), TARGET :: exists1, exists2 + LOGICAL :: exists + TYPE(C_PTR) :: f_ptr + + INTEGER(HID_T) :: group_id ! Group ID + INTEGER(HID_T) :: gcpl_id ! Group creation property list ID + + INTEGER :: idx_type ! Type of index to operate on + LOGICAL, DIMENSION(1:2) :: use_index = (/.FALSE.,.TRUE./) + ! Use index on creation order values + INTEGER :: max_compact ! Maximum # of links to store in group compactly + INTEGER :: min_dense ! Minimum # of links to store in group "densely" + + CHARACTER(LEN=7) :: objname ! Object name + + INTEGER :: u ! Local index variable + INTEGER :: Input1, i + INTEGER(HID_T) :: group_id2 + INTEGER :: iorder ! Order within in the index + CHARACTER(LEN=2) :: chr2 + ! + INTEGER(hsize_t) idx ! Index in the group + TYPE(iter_info), TARGET :: info + TYPE(C_FUNPTR) :: f1 + TYPE(C_PTR) :: f2 + INTEGER(C_INT) :: ret_value + + INTEGER :: error ! Error flags + INTEGER :: mpierror ! MPI error flag + INTEGER :: comm + INTEGER :: mpi_size, mpi_rank + + INTEGER(SIZE_T) :: count + + comm = MPI_COMM_WORLD + + CALL MPI_COMM_SIZE(comm, mpi_size, mpierror) + CALL MPI_COMM_RANK(comm, mpi_rank, mpierror) + + CALL H5EScreate_f(es_id, hdferror) + CALL check("H5EScreate_f", hdferror, total_error) + ! + ! Create the file. + ! + CALL h5pcreate_f(H5P_FILE_ACCESS_F, fapl_id, hdferror) + CALL check("h5pcreate_f", hdferror, total_error) + + CALL h5pset_fapl_mpio_f(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL, hdferror) + CALL check("h5pset_fapl_mpio_f", hdferror, total_error) + + CALL h5fcreate_async_f(filename, H5F_ACC_TRUNC_F, file_id, es_id, error, access_prp = fapl_id ) + CALL check("h5fcreate_f",hdferror, total_error) + + CALL H5Pcreate_f(H5P_GROUP_CREATE_F, gcpl_id, hdferror ) + CALL check("H5Pcreate_f", hdferror, total_error) + + CALL H5Pset_link_creation_order_f(gcpl_id, IOR(H5P_CRT_ORDER_TRACKED_F, H5P_CRT_ORDER_INDEXED_F), hdferror) + CALL check("H5Pset_link_creation_order_f", hdferror, total_error) + + ! Create group with creation order tracking on + CALL H5Gcreate_async_f(file_id, CORDER_GROUP_NAME, gid3, es_id, hdferror, gcpl_id=gcpl_id) + CALL check("H5Gcreate_f", hdferror, total_error) + + ! Create group + CALL H5Gcreate_async_f(file_id, "/Group1", gid, es_id, hdferror) + CALL check("H5Gcreate_async_f",hdferror, total_error) + + ! Create nested group + CALL H5Gcreate_async_f(gid, "Group2", gid2, es_id, hdferror) + CALL check("H5Gcreate_async_f",hdferror, total_error) + + CALL H5Screate_f(H5S_SCALAR_F, sid, hdferror) + CALL check("H5Screate_f",hdferror, total_error) + CALL H5Acreate_async_f(gid2, "Attr1", H5T_NATIVE_INTEGER, sid, aid, es_id, hdferror) + CALL check("H5Acreate_async_f",hdferror, total_error) + CALL H5Acreate_async_f(gid2, "Attr2", H5T_NATIVE_INTEGER, sid, aid2, es_id, hdferror) + CALL check("H5Acreate_async_f",hdferror, total_error) + CALL H5Acreate_async_f(gid2, "Attr3", H5T_NATIVE_INTEGER, sid, aid3, es_id, hdferror) + CALL check("H5Acreate_async_f",hdferror, total_error) + CALL H5Aclose_async_f(aid, es_id, hdferror) + CALL check("H5Aclose_async_f",hdferror, total_error) + CALL H5Aclose_async_f(aid2, es_id, hdferror) + CALL check("H5Aclose_async_f",hdferror, total_error) + CALL H5Aclose_async_f(aid3, es_id, hdferror) + CALL check("H5Aclose_async_f",hdferror, total_error) + CALL H5Sclose_f(sid,hdferror) + CALL check("H5Sclose_f",hdferror, total_error) + + ! Close groups + CALL h5gclose_async_f(gid2, es_id, hdferror) + CALL check("h5gclose_async_f",hdferror, total_error) + CALL h5gclose_async_f(gid, es_id, hdferror) + CALL check("h5gclose_async_f",hdferror, total_error) + CALL h5gclose_async_f(gid3, es_id, hdferror) + CALL check("h5gclose_async_f",hdferror, total_error) + + ! Close the group creation property list + CALL H5Pclose_f(gcpl_id, hdferror) + CALL check("H5Pclose_f", hdferror, total_error) + + ! Create soft links to groups created + CALL H5Lcreate_soft_async_f("/Group1", file_id, "/soft_one", es_id, hdferror) + CALL H5Lcreate_soft_async_f("/Group1/Group2", file_id, "/soft_two", es_id, hdferror) + + ! Create hard links to all groups + CALL H5Lcreate_hard_async_f(file_id, "/", file_id, "hard_zero", es_id, hdferror) + CALL check("H5Lcreate_hard_async_f",hdferror, total_error) + CALL H5Lcreate_hard_async_f(file_id, "/Group1", file_id, "hard_one", es_id, hdferror) + CALL check("H5Lcreate_hard_async_f",hdferror, total_error) + CALL H5Lcreate_hard_async_f(file_id, "/Group1/Group2", file_id, "hard_two", es_id, hdferror) + CALL check("H5Lcreate_hard_async_f",hdferror, total_error) + + ! + ! Close the file. + ! + CALL h5fclose_async_f(file_id, es_id, hdferror) + CALL check("h5fclose_async_f",hdferror,total_error) + + ! Complete the operations + CALL H5ESwait_f(es_id, H5ES_WAIT_FOREVER_F, num_in_progress, err_occurred, hdferror); + CALL check("H5ESwait_f", hdferror, total_error) + CALL VERIFY("H5ESwait_f", err_occurred, .FALSE., total_error) + CALL VERIFY("H5ESwait_f", num_in_progress, 0_size_t , total_error) + + CALL H5Fopen_async_f(filename, H5F_ACC_RDWR_F, file_id, es_id, hdferror, access_prp = fapl_id ) + CALL check("h5fopen_async_f",hdferror,total_error) + + exists1 = .FALSE. + f_ptr = C_LOC(exists1) + CALL H5Lexists_async_f(file_id, "hard_zero", f_ptr, es_id, hdferror) + CALL check("H5Lexists_async_f",hdferror,total_error) + + exists2 = .FALSE. + f_ptr = C_LOC(exists2) + CALL H5Lexists_async_f(file_id, "hard_two", f_ptr, es_id, hdferror) + CALL check("H5Lexists_async_f",hdferror,total_error) + + CALL H5Ldelete_async_f(file_id, "hard_two", es_id, hdferror) + CALL check("H5Ldelete_async_f",hdferror,total_error) + + CALL H5Fclose_async_f(file_id, es_id, hdferror) + CALL check("h5fclose_async_f",hdferror,total_error) + + CALL H5ESwait_f(es_id, H5ES_WAIT_FOREVER_F, num_in_progress, err_occurred, hdferror) + CALL check("H5ESwait_f", hdferror, total_error) + CALL VERIFY("H5ESwait_f", err_occurred, .FALSE., total_error) + + CALL VERIFY("H5Lexists_async_f", LOGICAL(exists1), .TRUE., total_error) + CALL VERIFY("H5Lexists_async_f", LOGICAL(exists2), .TRUE., total_error) + + CALL h5fopen_f(filename, H5F_ACC_RDWR_F, file_id, hdferror, access_prp = fapl_id ) + CALL check("h5fopen_f",hdferror, total_error) + + ! Verify the link was deleted + CALL H5Lexists_f(file_id, "hard_two", exists, hdferror) + CALL check("H5Lexist_f",hdferror, total_error) + CALL VERIFY("H5Ldelete_async_f", exists, .FALSE., total_error) + + CALL H5Fclose_f(file_id, hdferror) + CALL check("H5Fclose_f", hdferror,total_error) + + ! Loop over operating on different indices on link fields + DO idx_type = H5_INDEX_NAME_F, H5_INDEX_CRT_ORDER_F + ! Loop over operating in different orders + DO iorder = H5_ITER_INC_F, H5_ITER_DEC_F + ! Loop over using index for creation order value + DO i = 1, 2 + ! Create file + CALL H5Fcreate_async_f(filename, H5F_ACC_TRUNC_F, file_id, es_id, hdferror, access_prp=fapl_id) + CALL check("H5Fcreate_async_f", hdferror, total_error) + + ! Create group creation property list + CALL H5Pcreate_f(H5P_GROUP_CREATE_F, gcpl_id, hdferror ) + CALL check("H5Pcreate_f", hdferror, total_error) + + ! Set creation order tracking & indexing on group + IF(use_index(i))THEN + Input1 = H5P_CRT_ORDER_INDEXED_F + ELSE + Input1 = 0 + ENDIF + + CALL H5Pset_link_creation_order_f(gcpl_id, IOR(H5P_CRT_ORDER_TRACKED_F, Input1), hdferror) + CALL check("H5Pset_link_creation_order_f", hdferror, total_error) + + ! Create group with creation order tracking on + CALL H5Gcreate_async_f(file_id, CORDER_GROUP_NAME2, group_id, es_id, hdferror, gcpl_id=gcpl_id) + CALL check("H5Gcreate_async_f", hdferror, total_error) + + ! Query the group creation properties + CALL H5Pget_link_phase_change_f(gcpl_id, max_compact, min_dense, hdferror) + CALL check("H5Pget_link_phase_change_f", hdferror, total_error) + + ! Create several links, up to limit of compact form + DO u = 0, max_compact-1 + ! Make name for link + WRITE(chr2,'(I2.2)') u + objname = 'fill '//chr2 + + ! Create hard link, with group object + CALL H5Gcreate_async_f(group_id, objname, group_id2, es_id, hdferror) + CALL check("H5Gcreate_async_f", hdferror, total_error) + CALL H5Gclose_async_f(group_id2, es_id, hdferror) + CALL check("H5Gclose_async_f", hdferror, total_error) + ENDDO + + ! Delete links from compact group + DO u = 0, (max_compact - 1) -1 + ! Delete first link in appropriate order + CALL H5Ldelete_by_idx_async_f(group_id, ".", idx_type, iorder, INT(0,HSIZE_T), es_id, hdferror) + CALL check("H5Ldelete_by_idx_async_f", hdferror, total_error) + ENDDO + + idx = 0 + info%command = 2 + f1 = C_FUNLOC(liter_cb) + f2 = C_LOC(info) + + CALL H5Literate_async_f(file_id, H5_INDEX_NAME_F, H5_ITER_INC_F, idx, f1, f2, ret_value, es_id, hdferror) + CALL check("H5Literate_async_f", error, total_error) + + ! Close the group + CALL H5Gclose_async_f(group_id, es_id, hdferror) + CALL check("H5Gclose_async_f", hdferror, total_error) + ! Close the group creation property list + CALL H5Pclose_f(gcpl_id, hdferror) + CALL check("H5Pclose_f", hdferror, total_error) + ! Close the file + CALL H5Fclose_async_f(file_id, es_id, hdferror) + CALL check("H5Fclose_async_f", hdferror, total_error) + + CALL H5ESget_count_f(es_id, count, hdferror) + + ! Complete the operations + CALL H5ESwait_f(es_id, H5ES_WAIT_FOREVER_F, num_in_progress, err_occurred, hdferror); + CALL check("H5ESwait_f", hdferror, total_error) + CALL VERIFY("H5ESwait_f", err_occurred, .FALSE., total_error) + CALL VERIFY("H5ESwait_f", num_in_progress, 0_size_t , total_error) + + ! NOTE: ret_value will not be correct since H5Literate_async is not returning a pointer return value, herr_t. + CALL VERIFY("H5Literate_async_f", info%type, op_data_type, total_error) + CALL VERIFY("H5Literate_async_f", info%command, op_data_command, total_error) + CALL VERIFY("H5Literate_async_f", info%name(1)(1:1), CORDER_GROUP_NAME2(1:1), total_error) + + ENDDO + ENDDO + + ENDDO + + CALL H5Pclose_f(fapl_id, hdferror) + CALL check(" H5Pclose_f",hdferror, total_error) + + CALL H5ESclose_f(es_id, hdferror) + CALL check("H5ESclose_f", hdferror, total_error) + + + END SUBROUTINE H5L_async_tests + + SUBROUTINE H5O_async_tests(cleanup, total_error) + ! + ! Test H5O async routines + ! + IMPLICIT NONE + LOGICAL, INTENT(IN) :: cleanup + INTEGER, INTENT(INOUT) :: total_error + INTEGER(HID_T) :: file_id + INTEGER(HID_T) :: group_id, group_id1, group_id2, group_id3 + INTEGER(HID_T) :: space_id + INTEGER(HID_T) :: attr_id + INTEGER(HID_T) :: dset_id + INTEGER(HID_T) :: fapl_id + INTEGER(HID_T) :: lcpl_id + INTEGER(HID_T) :: ocpypl_id + TYPE(C_H5O_INFO_T), TARGET :: oinfo_f + TYPE(C_PTR) :: f_ptr + CHARACTER(len=80) :: filename = "h5o_tests.h5" + + INTEGER :: hdferror ! Value returned from API calls + + ! Data for tested h5ocopy_async_f + CHARACTER(LEN=3) , PARAMETER :: dataset = "DS1" + INTEGER , PARAMETER :: dim0 = 4 + + INTEGER(HSIZE_T), DIMENSION(1:1) :: dims2 = (/dim0/) ! size read/write buffer + INTEGER(C_INT), DIMENSION(1:8) :: atime, btime, ctime, mtime + + INTEGER(HID_T) :: es_id + INTEGER(SIZE_T) :: num_in_progress + LOGICAL :: err_occurred + + ! Make a FAPL that uses the "use the latest version of the format" bounds + CALL H5Pcreate_f(H5P_FILE_ACCESS_F,fapl_id,hdferror) + CALL check("h5Pcreate_f",hdferror,total_error) + + ! Set the "use the latest version of the format" bounds for creating objects in the file + CALL H5Pset_libver_bounds_f(fapl_id, H5F_LIBVER_LATEST_F, H5F_LIBVER_LATEST_F, hdferror) + CALL check("H5Pset_libver_bounds_f",hdferror, total_error) + + CALL h5pset_fapl_mpio_f(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL, hdferror) + + CALL H5EScreate_f(es_id, hdferror) + CALL check("H5EScreate_f", hdferror, total_error) + + ! Create a new HDF5 file + CALL H5Fcreate_async_f(filename, H5F_ACC_TRUNC_F, file_id, es_id, hdferror, H5P_DEFAULT_F, fapl_id) + CALL check("H5Fcreate_f", hdferror, total_error) + + ! Close the FAPL + CALL h5pclose_f(fapl_id, hdferror) + CALL check("h5pclose_f",hdferror,total_error) + + ! + ! Create dataspace. Setting size to be the current size. + ! + CALL h5screate_simple_f(1, dims2, space_id, hdferror) + CALL check("h5screate_simple_f", hdferror, total_error) + ! + ! Create intermediate groups + ! + CALL h5gcreate_async_f(file_id,"/G1",group_id1,es_id,hdferror) + CALL check("h5gcreate_f", hdferror, total_error) + CALL h5gcreate_async_f(file_id,"/G1/G2",group_id2,es_id,hdferror) + CALL check("h5gcreate_f", hdferror, total_error) + CALL h5gcreate_async_f(file_id,"/G1/G2/G3",group_id3,es_id,hdferror) + CALL check("h5gcreate_f", hdferror, total_error) + + ! + ! Create the dataset + ! + CALL h5dcreate_async_f(group_id3, dataset, H5T_STD_I32LE, space_id, dset_id, es_id, hdferror) + CALL check("h5dcreate_f", hdferror, total_error) + + ! Create a soft link to /G1 + CALL h5lcreate_soft_async_f("/G1", file_id, "/G1_LINK", es_id, hdferror) + CALL check("h5lcreate_soft_f", hdferror, total_error) + + ! Create a soft link to /G1000, does not exist + CALL h5lcreate_soft_async_f("/G1000", file_id, "/G1_FALSE", es_id, hdferror) + CALL check("h5lcreate_soft_f", hdferror, total_error) + + ! Create a soft link to /G1_LINK + CALL h5lcreate_soft_async_f("/G1_FALSE", file_id, "/G2_FALSE", es_id, hdferror) + CALL check("h5lcreate_soft_f", hdferror, total_error) + ! + ! Close and release resources. + ! + CALL h5dclose_async_f(dset_id, es_id, hdferror) + CALL check(" h5dclose_f", hdferror, total_error) + CALL h5sclose_f(space_id, hdferror) + CALL check("h5sclose_f", hdferror, total_error) + CALL h5gclose_async_f(group_id1, es_id, hdferror) + CALL check("h5gclose_async_f", hdferror, total_error) + CALL h5gclose_async_f(group_id2, es_id, hdferror) + CALL check("h5gclose_async_f", hdferror, total_error) + CALL h5gclose_async_f(group_id3, es_id, hdferror) + CALL check("h5gclose_async_f", hdferror, total_error) + + ! Test opening an object by index + CALL h5oopen_by_idx_async_f(file_id, "/G1/G2/G3", H5_INDEX_NAME_F, H5_ITER_INC_F, 0_hsize_t, group_id, es_id, hdferror) + CALL check("h5oopen_by_idx_f", hdferror, total_error) + + CALL h5oclose_async_f(group_id, es_id, hdferror) + CALL check("h5gclose_f", hdferror, total_error) + + ! Test opening an object + CALL h5oopen_async_f(file_id, "/G1/G2/G3", group_id, es_id, hdferror) + CALL check("h5oopen_by_idx_f", hdferror, total_error) + + CALL H5Screate_f(H5S_SCALAR_F, space_id, hdferror) + CALL check("H5Screate_f", hdferror, total_error) + + CALL h5acreate_async_f(group_id, "ATTR", H5T_NATIVE_INTEGER, space_id, attr_id, es_id, hdferror) + CALL check("h5acreate_f",hdferror,total_error) + + CALL H5Aclose_async_f(attr_id, es_id, hdferror) + CALL check("H5Aclose_async_f",hdferror,total_error) + + CALL h5oclose_async_f(group_id, es_id, hdferror) + CALL check("h5gclose_f", hdferror, total_error) + + f_ptr = C_LOC(oinfo_f) + CALL H5Oget_info_by_name_async_f(file_id, "/G1/G2/G3", f_ptr, es_id, hdferror, fields=H5O_INFO_ALL_F) + CALL check("H5Oget_info_by_name_async_f", hdferror, total_error) + ! + ! create property to pass copy options + ! + CALL h5pcreate_f(H5P_LINK_CREATE_F, lcpl_id, hdferror) + CALL check("h5Pcreate_f", hdferror, total_error) + + CALL h5pset_create_inter_group_f(lcpl_id, 1, hdferror) + CALL check("H5Pset_create_inter_group_f", hdferror, total_error) + ! + ! Check optional parameter lcpl_id, this would fail if lcpl_id was not specified + ! + CALL h5ocopy_async_f(file_id, "/G1/G2/G3/DS1", file_id, "/G1/G_cp1/DS2", es_id, hdferror, lcpl_id=lcpl_id) + CALL check("h5ocopy_f -- W/ OPTION: lcpl_id", hdferror ,total_error) + + CALL h5pclose_f(lcpl_id, hdferror) + CALL check("h5pclose_f",hdferror,total_error) + + CALL h5pcreate_f(H5P_OBJECT_COPY_F, ocpypl_id, hdferror) + CALL check("h5Pcreate_f",hdferror,total_error) + + CALL h5pset_copy_object_f(ocpypl_id, H5O_COPY_SHALLOW_HIERARCHY_F, hdferror) + CALL check("H5Pset_copy_object_f",hdferror,total_error) + + CALL h5ocopy_async_f(file_id, "/G1/G2", file_id, "/G1/G_cp2", es_id, hdferror, ocpypl_id=ocpypl_id) + CALL check("h5ocopy_f",hdferror,total_error) + + CALL h5pclose_f(ocpypl_id, hdferror) + CALL check("h5pclose_f",hdferror,total_error) + + CALL h5fclose_async_f(file_id, es_id, hdferror) + CALL check("h5fclose_f",hdferror,total_error) + + CALL H5ESwait_f(es_id, H5ES_WAIT_FOREVER_F, num_in_progress, err_occurred, hdferror) + CALL check("H5ESwait_f", hdferror, total_error) + CALL VERIFY("H5ESwait_f", err_occurred, .FALSE., total_error) + + IF( oinfo_f%fileno.LE.0 )THEN + hdferror = -1 + CALL check("H5Oget_info_by_name_async_f", hdferror, total_error) + ENDIF + + atime(1:8) = h5gmtime(oinfo_f%atime) + btime(1:8) = h5gmtime(oinfo_f%btime) + ctime(1:8) = h5gmtime(oinfo_f%ctime) + mtime(1:8) = h5gmtime(oinfo_f%mtime) + + IF( atime(1) .LT. 2021 .OR. & + btime(1).LT. 2021 .OR. & + ctime(1) .LT. 2021 .OR. & + mtime(1) .LT. 2021 )THEN + hdferror = -1 + ENDIF + CALL check("H5Oget_info_by_name_async_f", hdferror, total_error) + + CALL VERIFY("H5Oget_info_by_name_async_f", oinfo_f%num_attrs, 1_HSIZE_T, total_error) + CALL VERIFY("H5Oget_info_by_name_async_f", oinfo_f%type, INT(H5G_GROUP_F, C_INT), total_error) + + CALL H5ESclose_f(es_id, hdferror) + CALL check("H5ESclose_f", hdferror, total_error) + + END SUBROUTINE H5O_async_tests + +END MODULE test_async_APIs + +! +! The main program for async HDF5 Fortran tests +! +PROGRAM async_test + USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_INT64_T + USE HDF5 + USE MPI + USE TH5_MISC + USE TH5_MISC_GEN + USE test_async_APIs + + IMPLICIT NONE + + INTEGER :: total_error = 0 ! sum of the number of errors + INTEGER :: mpierror ! MPI hdferror flag + INTEGER :: mpi_size ! number of processes in the group of communicator + INTEGER :: mpi_rank ! rank of the calling process in the communicator + INTEGER :: required, provided + + INTEGER(HID_T) :: vol_id + INTEGER :: hdferror + LOGICAL :: registered + INTEGER :: sum + INTEGER :: nerrors = 0 + + LOGICAL :: cleanup + INTEGER :: ret_total_error = 0 + + ! + ! initialize MPI + ! + required = MPI_THREAD_MULTIPLE + CALL mpi_init_thread(required, provided, mpierror) + IF (mpierror .NE. MPI_SUCCESS) THEN + WRITE(*,*) "MPI_INIT_THREAD *FAILED*" + nerrors = nerrors + 1 + ENDIF + IF (provided .NE. required) THEN + mpi_thread_mult = .FALSE. + ENDIF + + CALL mpi_comm_rank( MPI_COMM_WORLD, mpi_rank, mpierror ) + IF (mpierror .NE. MPI_SUCCESS) THEN + WRITE(*,*) "MPI_COMM_RANK *FAILED* Process = ", mpi_rank + nerrors = nerrors + 1 + ENDIF + CALL mpi_comm_size( MPI_COMM_WORLD, mpi_size, mpierror ) + IF (mpierror .NE. MPI_SUCCESS) THEN + WRITE(*,*) "MPI_COMM_SIZE *FAILED* Process = ", mpi_rank + nerrors = nerrors + 1 + ENDIF + + IF(nerrors.NE.0)THEN + IF(mpi_rank==0) CALL write_test_status(sum, & + 'Testing Initializing mpi_init_thread', total_error) + CALL MPI_Barrier(MPI_COMM_WORLD, mpierror) + CALL mpi_abort(MPI_COMM_WORLD, 1, mpierror) + ENDIF + + IF(mpi_rank==0) CALL write_test_header("ASYNC FORTRAN TESTING") + + ! + ! Initialize the HDF5 fortran interface + ! + CALL h5open_f(hdferror) + + + ! CHECK ASYNC VOLS AVAILABILITY + ! + ! (1) Check if ASYNC VOL is available + CALL H5VLis_connector_registered_by_name_f("async", registered, hdferror) + CALL check("H5VLis_connector_registered_by_name_f", hdferror, total_error) + + IF(.NOT.registered)THEN + + ! (2) check if the DAOS VOL is available + CALL H5VLis_connector_registered_by_name_f("daos", registered, hdferror) + CALL check("H5VLis_connector_registered_by_name_f", hdferror, total_error) + + IF(.NOT.registered)THEN + ! No async compatible VOL found + async_enabled = .FALSE. + ELSE + CALL H5Vlregister_connector_by_name_f("daos", vol_id, hdferror) + CALL check("H5Vlregister_connector_by_name_f", hdferror, total_error) + ENDIF + + ELSE + CALL H5Vlregister_connector_by_name_f("async", vol_id, hdferror) + CALL check("H5Vlregister_connector_by_name_f", hdferror, total_error) + ENDIF + + IF ( (async_enabled .EQV. .TRUE.) .AND. (mpi_thread_mult .EQV. .FALSE.) ) THEN + total_error = -1 ! Skip test + IF(mpi_rank==0) CALL write_test_status(total_error, & + "No MPI_Init_thread support for MPI_THREAD_MULTIPLE", total_error) + CALL MPI_Barrier(MPI_COMM_WORLD, mpierror) + CALL MPI_Finalize(mpierror) + STOP + ENDIF + +! IF(total_error.LT.0)THEN +! IF(mpi_rank==0) CALL write_test_status(total_error, & +! 'Testing async APIs', total_error) +! STOP +! ENDIF + + ! H5ES API TESTING + ret_total_error = 0 + CALL H5ES_tests(cleanup, ret_total_error) + IF(mpi_rank==0) CALL write_test_status(ret_total_error, & + 'H5ES API tests', total_error) + + ! H5A ASYNC API TESTING + ret_total_error = 0 + CALL H5A_async_tests(cleanup, ret_total_error) + IF(mpi_rank==0) CALL write_test_status(ret_total_error, & + 'H5A async API tests', total_error) + + ! H5D ASYNC API TESTING + ret_total_error = 0 + CALL H5D_async_tests(cleanup, ret_total_error) + IF(mpi_rank==0) CALL write_test_status(ret_total_error, & + 'H5D async API tests', total_error) + + ! H5G ASYNC API TESTING + ret_total_error = 0 + CALL H5G_async_tests(cleanup, ret_total_error) + IF(mpi_rank==0) CALL write_test_status(ret_total_error, & + 'H5G async API tests', total_error) + + ! H5F ASYNC API TESTING + ret_total_error = 0 + CALL H5F_async_tests(cleanup, ret_total_error) + IF(mpi_rank==0) CALL write_test_status(ret_total_error, & + 'H5F async API tests', total_error) + + ! H5L ASYNC API TESTING + ret_total_error = 0 + CALL H5L_async_tests(cleanup, ret_total_error) + IF(mpi_rank==0) CALL write_test_status(ret_total_error, & + 'H5L async API tests', total_error) + + ! H5O ASYNC API TESTING + ret_total_error = 0 + CALL H5O_async_tests(cleanup, ret_total_error) + IF(mpi_rank==0) CALL write_test_status(ret_total_error, & + 'H5O async API tests', total_error) + + IF(async_enabled)THEN + CALL H5VLclose_f(vol_id, hdferror) + CALL check("H5VLclose_f", hdferror, total_error) + ENDIF + + ! + ! close HDF5 interface + ! + CALL h5close_f(hdferror) + + CALL MPI_ALLREDUCE(total_error, sum, 1, MPI_INTEGER, MPI_SUM, MPI_COMM_WORLD, mpierror) + + IF(mpi_rank==0) CALL write_test_footer() + + ! + ! close MPI + ! + IF (sum == 0) THEN + CALL mpi_finalize(mpierror) + IF (mpierror .NE. MPI_SUCCESS) THEN + WRITE(*,*) "MPI_FINALIZE *FAILED* Process = ", mpi_rank + ENDIF + ELSE + WRITE(*,*) 'Errors detected in process ', mpi_rank + CALL mpi_abort(MPI_COMM_WORLD, 1, mpierror) + IF (mpierror .NE. MPI_SUCCESS) THEN + WRITE(*,*) "MPI_ABORT *FAILED* Process = ", mpi_rank + ENDIF + ENDIF + + ! + ! end main program + ! + +END PROGRAM async_test diff --git a/fortran/testpar/ptest.F90 b/fortran/testpar/ptest.F90 index 29749332f10..b754e297cc0 100644 --- a/fortran/testpar/ptest.F90 +++ b/fortran/testpar/ptest.F90 @@ -55,6 +55,9 @@ PROGRAM parallel_test ! initialize the HDF5 fortran interface ! CALL h5open_f(hdferror) + + IF(mpi_rank==0) CALL write_test_header("COMPREHENSIVE PARALLEL FORTRAN TESTS") + ! ! test write/read dataset by hyperslabs (contiguous/chunk) with independent/collective MPI I/O ! @@ -94,6 +97,8 @@ PROGRAM parallel_test CALL MPI_ALLREDUCE(total_error, sum, 1, MPI_INTEGER, MPI_SUM, MPI_COMM_WORLD, mpierror) + IF(mpi_rank==0) CALL write_test_footer() + ! ! close MPI ! diff --git a/fortran/testpar/subfiling.F90 b/fortran/testpar/subfiling.F90 index 18614b655f2..043ac6cb771 100644 --- a/fortran/testpar/subfiling.F90 +++ b/fortran/testpar/subfiling.F90 @@ -91,6 +91,8 @@ PROGRAM subfiling_test ! CALL h5open_f(hdferror) + IF(mpi_rank==0) CALL write_test_header("SUBFILING FORTRAN TESTING") + ! *********************************** ! Test H5Pset/get_mpi_params_f APIs ! *********************************** @@ -384,6 +386,9 @@ PROGRAM subfiling_test WRITE(*,*) "MPI_ABORT *FAILED* Process = ", mpi_rank ENDIF ENDIF + + IF(mpi_rank==0) CALL write_test_footer() + ! ! end main program ! @@ -392,8 +397,13 @@ PROGRAM subfiling_test CALL mpi_init(mpierror) CALL mpi_comm_rank(MPI_COMM_WORLD, mpi_rank, mpierror) - IF(mpi_rank==0) CALL write_test_status( -1, & - 'Subfiling not enabled', total_error) + + IF(mpi_rank==0) THEN + CALL write_test_header("SUBFILING FORTRAN TESTING") + CALL write_test_status( -1, 'Subfiling not enabled', total_error) + CALL write_test_footer() + ENDIF + CALL mpi_finalize(mpierror) #endif diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 62ac8f2e784..4de0b9487fe 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -99,8 +99,9 @@ New Features Fortran Library: ---------------- - - + - Added Fortran async APIs + H5A, H5D, H5ES, H5G, H5F, H5L and H5O async APIs were added. C++ Library: ------------ From ca41b3c1304e39994d4620222bfb878519f909e6 Mon Sep 17 00:00:00 2001 From: "H. Joe Lee" Date: Sun, 23 Apr 2023 15:07:50 -0500 Subject: [PATCH 153/231] Change Powershell to PowerShell in docs (#2794) --- doc/getting-started-with-hdf5-development.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/getting-started-with-hdf5-development.md b/doc/getting-started-with-hdf5-development.md index 732d81709b8..04c42dc0c65 100644 --- a/doc/getting-started-with-hdf5-development.md +++ b/doc/getting-started-with-hdf5-development.md @@ -746,7 +746,7 @@ to do this via a script. These are normally named `test_.sh.in`. The step. In the past, we have tried to stick to POSIX Bourne shell scripts, but many scripts now require bash. -If you write a new test script, it is important to also add a Powershell +If you write a new test script, it is important to also add a PowerShell equivalent for testing on Windows. It's helpful to run any new shell scripts through `shellcheck` From ea6a4e1011d955b7b2aaf858d738e99bfe0bf422 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Sun, 23 Apr 2023 15:08:44 -0500 Subject: [PATCH 154/231] Add bug note to H5Dget_space_status documentation (#2788) --- src/H5Dpublic.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/H5Dpublic.h b/src/H5Dpublic.h index 9ec6f708abb..a1d1b062a3f 100644 --- a/src/H5Dpublic.h +++ b/src/H5Dpublic.h @@ -461,6 +461,20 @@ H5_DLL hid_t H5Dget_space_async(hid_t dset_id, hid_t es_id); * \details H5Dget_space_status() determines whether space has been allocated * for the dataset \p dset_id. * + * \note \Bold{BUG:} Prior to the HDF5 1.13.1, 1.12.2 and 1.10.9 releases, + * H5Dget_space_status() may return incorrect space allocation status + * values for datasets with filters applied to them. + * H5Dget_space_status() calculated the space allocation status by + * comparing the sum of the sizes of all the allocated chunks in the + * dataset against the total data size of the dataset, as calculated by + * the number of elements in the dataset's dataspace multiplied by the + * dataset's datatype size. If the dataset had any compression filters + * applied to it and the dataset chunks were successfully compressed, + * the sum of the sizes of the allocated dataset chunks would generally + * always be less than the total data size of the dataset, and + * H5Dget_space_status() wouldn't ever return + * `H5D_SPACE_STATUS_ALLOCATED`. + * * \since 1.6.0 * */ From e69f9f770852e77f58b5b8df2a26aacd86e1b33c Mon Sep 17 00:00:00 2001 From: "H. Joe Lee" Date: Sun, 23 Apr 2023 15:09:40 -0500 Subject: [PATCH 155/231] docs: remove signature requirement from CONTRIBUTING.md (#2784) Per 2023-04-14 engineering team meeting decision --- CONTRIBUTING.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 62b00eabd80..687e9819008 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -94,7 +94,6 @@ The release note entry syntax is shown below. Problem/Solution - Signature ``` * **Entry Elements** - The elements of the entry - title, problem, solution, and signature - are described in more detail in the table @@ -112,10 +111,6 @@ You might also consider the following as you describe the solution: * What is the functional impact? * Is there a workaround – a way for users design their software so as not to encounter the issue? If so, what is the workaround? * For a performance fix, how has the performance improved? Links to published documentation would be good. - * **Signature** - Each entry must be signed with the initials of the author, the date in YYYY/MM/DD format, and the JIRA ticket number. The -following is an example entry written by developer Xavier Zolo on April 16, 2014 about JIRA ticket HDFFV-5555: (XYZ - 2014/04/16, HDFFV-5555). The -signature is enclosed in parentheses. JIRA or Github numbers should not be used in the description of the problem or the solution. They are like -abbreviations that customers and external users will not be able to interpret. # Checklist From 12a78a53ec3b3b5c1eb916204289a00a57f3961f Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Sun, 23 Apr 2023 15:11:17 -0500 Subject: [PATCH 156/231] Correct CMake option defaults - deprecate szip for libaec (#2778) --- CMakeFilters.cmake | 34 +--- CMakeInstallation.cmake | 6 +- config/cmake/HDFLibMacros.cmake | 42 ++--- config/cmake/cacheinit.cmake | 8 +- config/cmake/{ => examples}/CTestScript.cmake | 27 ++-- .../{ => examples}/HDF5_Examples.cmake.in | 0 .../HDF5_Examples_options.cmake | 0 config/cmake/hdf5-config.cmake.in | 2 +- config/cmake/mccacheinit.cmake | 20 ++- release_docs/INSTALL_CMake.txt | 149 ++++++++++-------- release_docs/RELEASE.txt | 5 + 11 files changed, 143 insertions(+), 150 deletions(-) rename config/cmake/{ => examples}/CTestScript.cmake (88%) rename config/cmake/{ => examples}/HDF5_Examples.cmake.in (100%) rename config/cmake/{ => examples}/HDF5_Examples_options.cmake (100%) diff --git a/CMakeFilters.cmake b/CMakeFilters.cmake index eb62071e073..a55d7ae0295 100644 --- a/CMakeFilters.cmake +++ b/CMakeFilters.cmake @@ -9,7 +9,6 @@ # If you do not have access to either file, you may request a copy from # help@hdfgroup.org. # -option (USE_LIBAEC "Use AEC library as SZip Filter" OFF) option (USE_LIBAEC_STATIC "Use static AEC library " OFF) option (ZLIB_USE_EXTERNAL "Use External Library Building for ZLIB" 0) option (SZIP_USE_EXTERNAL "Use External Library Building for SZIP" 0) @@ -29,8 +28,6 @@ endif () option (BUILD_SZIP_WITH_FETCHCONTENT "Use FetchContent to use original source files" OFF) if (BUILD_SZIP_WITH_FETCHCONTENT) - # Only libaec library is usable - set (USE_LIBAEC ON CACHE BOOL "Use libaec szip replacement" FORCE) set (SZIP_USE_EXTERNAL "Use External Library Building for SZIP" 1) if (NOT LIBAEC_USE_LOCALCONTENT) set (SZIP_URL ${LIBAEC_TGZ_ORIGPATH}/${LIBAEC_TGZ_ORIGNAME}) @@ -66,10 +63,7 @@ if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MAT message (VERBOSE "Filter ZLIB file ${ZLIB_URL} not found") endif () if (NOT BUILD_SZIP_WITH_FETCHCONTENT) - set (SZIP_URL ${TGZPATH}/${SZIP_TGZ_NAME}) - if (USE_LIBAEC) - set (SZIP_URL ${TGZPATH}/${SZAEC_TGZ_NAME}) - endif () + set (SZIP_URL ${TGZPATH}/${SZAEC_TGZ_NAME}) endif () if (NOT EXISTS "${SZIP_URL}") set (HDF5_ENABLE_SZIP_SUPPORT OFF CACHE BOOL "" FORCE) @@ -142,15 +136,13 @@ if (HDF5_ENABLE_SZIP_SUPPORT) option (HDF5_ENABLE_SZIP_ENCODING "Use SZip Encoding" OFF) if (NOT SZIP_USE_EXTERNAL) set(SZIP_FOUND FALSE) - if (USE_LIBAEC) - set(libaec_USE_STATIC_LIBS ${USE_LIBAEC_STATIC}) - find_package (libaec 1.0.5 CONFIG) - if (SZIP_FOUND) - set (LINK_COMP_LIBS ${LINK_COMP_LIBS} ${SZIP_LIBRARIES}) - endif () + set(libaec_USE_STATIC_LIBS ${USE_LIBAEC_STATIC}) + find_package (libaec 1.0.5 CONFIG) + if (SZIP_FOUND) + set (LINK_COMP_LIBS ${LINK_COMP_LIBS} ${SZIP_LIBRARIES}) endif () if (NOT SZIP_FOUND) - find_package (SZIP NAMES ${SZIP_PACKAGE_NAME}${HDF_PACKAGE_EXT} COMPONENTS static shared) + find_package (SZIP NAMES ${LIBAEC_PACKAGE_NAME}${HDF_PACKAGE_EXT} COMPONENTS static shared) if (NOT SZIP_FOUND) find_package (SZIP) # Legacy find endif () @@ -171,12 +163,7 @@ if (HDF5_ENABLE_SZIP_SUPPORT) set (H5_HAVE_SZLIB_H 1) set (H5_HAVE_LIBSZ 1) message (VERBOSE "SZIP is built from fetch content") - if (USE_LIBAEC) - message (VERBOSE "... with library AEC") - set (SZIP_PACKAGE_NAME ${LIBAEC_PACKAGE_NAME}) - else () - set (SZIP_PACKAGE_NAME ${SZIP_PACKAGE_NAME}) - endif () + message (VERBOSE "... with library AEC") set (LINK_COMP_LIBS ${LINK_COMP_LIBS} ${SZIP_STATIC_LIBRARY}) elseif (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") EXTERNAL_SZIP_LIBRARY (${HDF5_ALLOW_EXTERNAL_SUPPORT} ${HDF5_ENABLE_SZIP_ENCODING}) @@ -184,12 +171,7 @@ if (HDF5_ENABLE_SZIP_SUPPORT) set (H5_HAVE_SZLIB_H 1) set (H5_HAVE_LIBSZ 1) message (VERBOSE "Filter SZIP is built") - if (USE_LIBAEC) - message (VERBOSE "... with library AEC") - set (SZIP_PACKAGE_NAME ${LIBAEC_PACKAGE_NAME}) - else () - set (SZIP_PACKAGE_NAME ${SZIP_PACKAGE_NAME}) - endif () + message (VERBOSE "... with library AEC") set (LINK_COMP_LIBS ${LINK_COMP_LIBS} ${SZIP_STATIC_LIBRARY}) else () message (FATAL_ERROR "SZIP is Required for SZIP support in HDF5") diff --git a/CMakeInstallation.cmake b/CMakeInstallation.cmake index d13f8bb781a..00ed5cd6795 100644 --- a/CMakeInstallation.cmake +++ b/CMakeInstallation.cmake @@ -142,7 +142,7 @@ install ( option (HDF5_PACK_EXAMPLES "Package the HDF5 Library Examples Compressed File" OFF) if (HDF5_PACK_EXAMPLES) configure_file ( - ${HDF_RESOURCES_DIR}/HDF5_Examples.cmake.in + ${HDF_RESOURCES_DIR}/examples/HDF5_Examples.cmake.in ${HDF5_BINARY_DIR}/HDF5_Examples.cmake @ONLY ) install ( @@ -169,13 +169,13 @@ if (HDF5_PACK_EXAMPLES) ) install ( FILES - ${HDF_RESOURCES_DIR}/CTestScript.cmake + ${HDF_RESOURCES_DIR}/examples/CTestScript.cmake DESTINATION ${HDF5_INSTALL_DATA_DIR} COMPONENT hdfdocuments ) install ( FILES - ${HDF_RESOURCES_DIR}/HDF5_Examples_options.cmake + ${HDF_RESOURCES_DIR}/examples/HDF5_Examples_options.cmake DESTINATION ${HDF5_INSTALL_DATA_DIR} COMPONENT hdfdocuments ) diff --git a/config/cmake/HDFLibMacros.cmake b/config/cmake/HDFLibMacros.cmake index d2f2660d0a2..b3985d5db6a 100644 --- a/config/cmake/HDFLibMacros.cmake +++ b/config/cmake/HDFLibMacros.cmake @@ -66,7 +66,6 @@ macro (ORIGINAL_SZIP_LIBRARY compress_type encoding) add_subdirectory(${szip_SOURCE_DIR} ${szip_BINARY_DIR}) endif() - set (USE_LIBAEC ON CACHE BOOL "Use libaec szip replacement" FORCE) add_library (${HDF_PACKAGE_NAMESPACE}szaec-static ALIAS szaec-static) add_library (${HDF_PACKAGE_NAMESPACE}aec-static ALIAS aec-static) set (SZIP_STATIC_LIBRARY "${HDF_PACKAGE_NAMESPACE}szaec-static;${HDF_PACKAGE_NAMESPACE}aec-static") @@ -127,47 +126,30 @@ macro (EXTERNAL_SZIP_LIBRARY compress_type encoding) endif () externalproject_get_property (SZIP BINARY_DIR SOURCE_DIR) # -##include (${BINARY_DIR}/${SZIP_PACKAGE_NAME}${HDF_PACKAGE_EXT}-targets.cmake) +##include (${BINARY_DIR}/${LIBAEC_PACKAGE_NAME}${HDF_PACKAGE_EXT}-targets.cmake) # Create imported target szip-static - if (USE_LIBAEC) - add_library(${HDF_PACKAGE_NAMESPACE}szaec-static STATIC IMPORTED) - HDF_IMPORT_SET_LIB_OPTIONS (${HDF_PACKAGE_NAMESPACE}szaec-static "szaec" STATIC "") - add_dependencies (${HDF_PACKAGE_NAMESPACE}szaec-static SZIP) - add_library(${HDF_PACKAGE_NAMESPACE}aec-static STATIC IMPORTED) - HDF_IMPORT_SET_LIB_OPTIONS (${HDF_PACKAGE_NAMESPACE}aec-static "aec" STATIC "") - add_dependencies (${HDF_PACKAGE_NAMESPACE}aec-static SZIP) - set (SZIP_STATIC_LIBRARY "${HDF_PACKAGE_NAMESPACE}szaec-static;${HDF_PACKAGE_NAMESPACE}aec-static") - else () - add_library(${HDF_PACKAGE_NAMESPACE}szip-static STATIC IMPORTED) - HDF_IMPORT_SET_LIB_OPTIONS (${HDF_PACKAGE_NAMESPACE}szip-static "szip" STATIC "") - add_dependencies (${HDF_PACKAGE_NAMESPACE}szip-static SZIP) - set (SZIP_STATIC_LIBRARY "${HDF_PACKAGE_NAMESPACE}szip-static") - endif () + add_library(${HDF_PACKAGE_NAMESPACE}szaec-static STATIC IMPORTED) + HDF_IMPORT_SET_LIB_OPTIONS (${HDF_PACKAGE_NAMESPACE}szaec-static "szaec" STATIC "") + add_dependencies (${HDF_PACKAGE_NAMESPACE}szaec-static SZIP) + add_library(${HDF_PACKAGE_NAMESPACE}aec-static STATIC IMPORTED) + HDF_IMPORT_SET_LIB_OPTIONS (${HDF_PACKAGE_NAMESPACE}aec-static "aec" STATIC "") + add_dependencies (${HDF_PACKAGE_NAMESPACE}aec-static SZIP) + set (SZIP_STATIC_LIBRARY "${HDF_PACKAGE_NAMESPACE}szaec-static;${HDF_PACKAGE_NAMESPACE}aec-static") set (SZIP_LIBRARIES ${SZIP_STATIC_LIBRARY}) set (SZIP_INCLUDE_DIR_GEN "${BINARY_DIR}") - if (USE_LIBAEC) - set (SZIP_INCLUDE_DIR "${SOURCE_DIR}/include") - else () - set (SZIP_INCLUDE_DIR "${SOURCE_DIR}/src") - endif () + set (SZIP_INCLUDE_DIR "${SOURCE_DIR}/include") set (SZIP_FOUND 1) set (SZIP_INCLUDE_DIRS ${SZIP_INCLUDE_DIR_GEN} ${SZIP_INCLUDE_DIR}) endmacro () #------------------------------------------------------------------------------- macro (PACKAGE_SZIP_LIBRARY compress_type) - set (SZIP_HDR "SZconfig") - if (USE_LIBAEC) - set (SZIP_HDR "aec_config") - else () - set (SZIP_HDR "libaec_Export") - endif () add_custom_target (SZIP-GenHeader-Copy ALL - COMMAND ${CMAKE_COMMAND} -E copy_if_different ${SZIP_INCLUDE_DIR_GEN}/${SZIP_HDR}.h ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ - COMMENT "Copying ${SZIP_INCLUDE_DIR_GEN}/${SZIP_HDR}.h to ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/" + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${SZIP_INCLUDE_DIR_GEN}/aec_config.h ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ + COMMENT "Copying ${SZIP_INCLUDE_DIR_GEN}/aec_config.h to ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/" ) - set (EXTERNAL_HEADER_LIST ${EXTERNAL_HEADER_LIST} ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${SZIP_HDR}.h) + set (EXTERNAL_HEADER_LIST ${EXTERNAL_HEADER_LIST} ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/aec_config.h) if (${compress_type} MATCHES "GIT" OR ${compress_type} MATCHES "TGZ") add_dependencies (SZIP-GenHeader-Copy SZIP) endif () diff --git a/config/cmake/cacheinit.cmake b/config/cmake/cacheinit.cmake index 7c5cc1e2bb5..44608915241 100644 --- a/config/cmake/cacheinit.cmake +++ b/config/cmake/cacheinit.cmake @@ -25,6 +25,8 @@ set (HDF5_BUILD_CPP_LIB ON CACHE BOOL "Build C++ support" FORCE) set (HDF5_BUILD_FORTRAN ON CACHE BOOL "Build FORTRAN support" FORCE) +set (HDF5_BUILD_JAVA ON CACHE BOOL "Build JAVA support" FORCE) + set (HDF5_INSTALL_MOD_FORTRAN "NO" CACHE STRING "Copy FORTRAN mod files to include directory (NO SHARED STATIC)" FORCE) set_property (CACHE HDF5_INSTALL_MOD_FORTRAN PROPERTY STRINGS NO SHARED STATIC) @@ -44,21 +46,17 @@ set (HDF_TEST_EXPRESS "2" CACHE STRING "Control testing framework (0-3)" FORCE) set (HDF5_MINGW_STATIC_GCC_LIBS ON CACHE BOOL "Statically link libgcc/libstdc++" FORCE) -set (HDF5_ALLOW_EXTERNAL_SUPPORT "NO" CACHE STRING "Allow External Library Building (NO GIT TGZ)" FORCE) +set (HDF5_ALLOW_EXTERNAL_SUPPORT "TGZ" CACHE STRING "Allow External Library Building (NO GIT TGZ)" FORCE) set_property (CACHE HDF5_ALLOW_EXTERNAL_SUPPORT PROPERTY STRINGS NO GIT TGZ) - set (ZLIB_PACKAGE_NAME "zlib" CACHE STRING "Name of ZLIB package" FORCE) set (ZLIB_TGZ_NAME "ZLib.tar.gz" CACHE STRING "Use HDF5_ZLib from compressed file" FORCE) set (ZLIB_TGZ_ORIGPATH "https://github.com/madler/zlib/releases/download/v1.2.13" CACHE STRING "Use ZLIB from original location" FORCE) set (ZLIB_TGZ_ORIGNAME "zlib-1.2.13.tar.gz" CACHE STRING "Use ZLIB from original compressed file" FORCE) set (ZLIB_USE_LOCALCONTENT OFF CACHE BOOL "Use local file for ZLIB FetchContent" FORCE) -set (SZIP_PACKAGE_NAME "szip" CACHE STRING "Name of SZIP package" FORCE) set (LIBAEC_PACKAGE_NAME "libaec" CACHE STRING "Name of AEC SZIP package" FORCE) -set (SZIP_TGZ_NAME "SZip.tar.gz" CACHE STRING "Use SZip from compressed file" FORCE) set (SZAEC_TGZ_NAME "LIBAEC.tar.gz" CACHE STRING "Use SZip AEC from compressed file" FORCE) -set (USE_LIBAEC ON CACHE BOOL "Use libaec szip replacement" FORCE) set (LIBAEC_TGZ_ORIGPATH "https://github.com/MathisRosenhauer/libaec/releases/download/v1.0.6/libaec-1.0.6.tar.gz" CACHE STRING "Use LIBAEC from original location" FORCE) set (LIBAEC_TGZ_ORIGNAME "libaec-v1.0.6.tar.gz" CACHE STRING "Use LIBAEC from original compressed file" FORCE) set (LIBAEC_USE_LOCALCONTENT OFF CACHE BOOL "Use local file for LIBAEC FetchContent" FORCE) diff --git a/config/cmake/CTestScript.cmake b/config/cmake/examples/CTestScript.cmake similarity index 88% rename from config/cmake/CTestScript.cmake rename to config/cmake/examples/CTestScript.cmake index a2b122c35d9..657806ce3c2 100644 --- a/config/cmake/CTestScript.cmake +++ b/config/cmake/examples/CTestScript.cmake @@ -35,23 +35,30 @@ if (NOT SITE_OS_NAME) message (STATUS "Dashboard script uname output: ${osname}-${osrel}-${cpu}\n") set (CTEST_BUILD_NAME "${osname}-${osrel}-${cpu}") - if (SITE_BUILDNAME_SUFFIX) - set (CTEST_BUILD_NAME "${SITE_BUILDNAME_SUFFIX}-${CTEST_BUILD_NAME}") - endif () - set (BUILD_OPTIONS "${ADD_BUILD_OPTIONS}") else () ## machine name provided ## -------------------------- if (CMAKE_HOST_UNIX) - set(CTEST_BUILD_NAME "${SITE_OS_NAME}-${SITE_OS_VERSION}-${SITE_OS_BITS}-${SITE_COMPILER_NAME}-${SITE_COMPILER_VERSION}") + set (CTEST_BUILD_NAME "${SITE_OS_NAME}-${SITE_OS_VERSION}-${SITE_OS_BITS}-${SITE_COMPILER_NAME}-${SITE_COMPILER_VERSION}") else () - set(CTEST_BUILD_NAME "${SITE_OS_NAME}-${SITE_OS_VERSION}-${SITE_COMPILER_NAME}") - endif () - if (SITE_BUILDNAME_SUFFIX) - set(CTEST_BUILD_NAME "${CTEST_BUILD_NAME}-${SITE_BUILDNAME_SUFFIX}") + set (CTEST_BUILD_NAME "${SITE_OS_NAME}-${SITE_OS_VERSION}-${SITE_COMPILER_NAME}") endif () - set (BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DSITE:STRING=${CTEST_SITE} -DBUILDNAME:STRING=${CTEST_BUILD_NAME}") endif () +if (SITE_BUILDNAME_SUFFIX) + set (CTEST_BUILD_NAME "${SITE_BUILDNAME_SUFFIX}-${CTEST_BUILD_NAME}") +endif () +set (BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DSITE:STRING=${CTEST_SITE} -DBUILDNAME:STRING=${CTEST_BUILD_NAME}") + +# Launchers work only with Makefile and Ninja generators. +if(NOT "${CTEST_CMAKE_GENERATOR}" MATCHES "Make|Ninja" OR LOCAL_SKIP_TEST) + set(CTEST_USE_LAUNCHERS 0) + set(ENV{CTEST_USE_LAUNCHERS_DEFAULT} 0) + set(BUILD_OPTIONS "${BUILD_OPTIONS} -DCTEST_USE_LAUNCHERS:BOOL=OFF") +else() + set(CTEST_USE_LAUNCHERS 1) + set(ENV{CTEST_USE_LAUNCHERS_DEFAULT} 1) + set(BUILD_OPTIONS "${BUILD_OPTIONS} -DCTEST_USE_LAUNCHERS:BOOL=ON") +endif() #----------------------------------------------------------------------------- # MacOS machines need special options diff --git a/config/cmake/HDF5_Examples.cmake.in b/config/cmake/examples/HDF5_Examples.cmake.in similarity index 100% rename from config/cmake/HDF5_Examples.cmake.in rename to config/cmake/examples/HDF5_Examples.cmake.in diff --git a/config/cmake/HDF5_Examples_options.cmake b/config/cmake/examples/HDF5_Examples_options.cmake similarity index 100% rename from config/cmake/HDF5_Examples_options.cmake rename to config/cmake/examples/HDF5_Examples_options.cmake diff --git a/config/cmake/hdf5-config.cmake.in b/config/cmake/hdf5-config.cmake.in index b778c426e26..2f8f673ca1b 100644 --- a/config/cmake/hdf5-config.cmake.in +++ b/config/cmake/hdf5-config.cmake.in @@ -118,7 +118,7 @@ if (NOT TARGET "@HDF5_PACKAGE@") include (@PACKAGE_SHARE_INSTALL_DIR@/@ZLIB_PACKAGE_NAME@@HDF_PACKAGE_EXT@-targets.cmake) endif () if (${HDF5_PACKAGE_NAME}_ENABLE_SZIP_SUPPORT AND ${HDF5_PACKAGE_NAME}_PACKAGE_EXTLIBS) - include (@PACKAGE_SHARE_INSTALL_DIR@/@SZIP_PACKAGE_NAME@@HDF_PACKAGE_EXT@-targets.cmake) + include (@PACKAGE_SHARE_INSTALL_DIR@/@LIBAEC_PACKAGE_NAME@@HDF_PACKAGE_EXT@-targets.cmake) endif () include (@PACKAGE_SHARE_INSTALL_DIR@/@HDF5_PACKAGE@@HDF_PACKAGE_EXT@-targets.cmake) endif () diff --git a/config/cmake/mccacheinit.cmake b/config/cmake/mccacheinit.cmake index dcd7ca65256..068adff6269 100644 --- a/config/cmake/mccacheinit.cmake +++ b/config/cmake/mccacheinit.cmake @@ -17,22 +17,20 @@ set (CMAKE_INSTALL_FRAMEWORK_PREFIX "Library/Frameworks" CACHE STRING "Frameworks installation directory" FORCE) -set (BUILD_TESTING ON CACHE BOOL "Build HDF5 Unit Testing" FORCE) - set (HDF_PACKAGE_EXT "" CACHE STRING "Name of HDF package extension" FORCE) set (HDF_PACKAGE_NAMESPACE "hdf5::" CACHE STRING "Name for HDF package namespace" FORCE) set (HDF5_BUILD_CPP_LIB ON CACHE BOOL "Build HDF5 C++ Library" FORCE) -set (HDF5_BUILD_EXAMPLES ON CACHE BOOL "Build HDF5 Library Examples" FORCE) - set (HDF5_BUILD_FORTRAN ON CACHE BOOL "Build FORTRAN support" FORCE) set (HDF5_BUILD_HL_LIB ON CACHE BOOL "Build HIGH Level HDF5 Library" FORCE) set (HDF5_BUILD_TOOLS ON CACHE BOOL "Build HDF5 Tools" FORCE) +set (HDF5_BUILD_EXAMPLES ON CACHE BOOL "Build HDF5 Library Examples" FORCE) + set (HDF5_ENABLE_Z_LIB_SUPPORT ON CACHE BOOL "Enable Zlib Filters" FORCE) set (HDF5_ENABLE_SZIP_SUPPORT ON CACHE BOOL "Use SZip Filter" FORCE) @@ -45,19 +43,19 @@ set (HDF5_ENABLE_USING_MEMCHECKER ON CACHE BOOL "Indicate that a memory checker set (HDF5_NO_PACKAGES ON CACHE BOOL "CPACK - Disable packaging" FORCE) -set (HDF5_ALLOW_EXTERNAL_SUPPORT "NO" CACHE STRING "Allow External Library Building (NO GIT TGZ)" FORCE) +set (HDF_TEST_EXPRESS "2" CACHE STRING "Control testing framework (0-3)" FORCE) + +set (HDF5_MINGW_STATIC_GCC_LIBS ON CACHE BOOL "Statically link libgcc/libstdc++" FORCE) + +set (HDF5_ALLOW_EXTERNAL_SUPPORT "TGZ" CACHE STRING "Allow External Library Building (NO GIT TGZ)" FORCE) set_property (CACHE HDF5_ALLOW_EXTERNAL_SUPPORT PROPERTY STRINGS NO GIT TGZ) +set (ZLIB_PACKAGE_NAME "zlib" CACHE STRING "Name of ZLIB package" FORCE) set (ZLIB_TGZ_NAME "ZLib.tar.gz" CACHE STRING "Use ZLib from compressed file" FORCE) -set (SZIP_TGZ_NAME "SZip.tar.gz" CACHE STRING "Use SZip from compressed file" FORCE) +set (LIBAEC_PACKAGE_NAME "libaec" CACHE STRING "Name of AEC SZIP package" FORCE) set (SZAEC_TGZ_NAME "LIBAEC.tar.gz" CACHE STRING "Use SZip AEC from compressed file" FORCE) -set (USE_LIBAEC ON CACHE BOOL "Use libaec szip replacement" FORCE) set (CMAKE_BUILD_TYPE "Debug" CACHE STRING "Build Debug" FORCE) set (CTEST_CONFIGURATION_TYPE "Debug" CACHE STRING "Build Debug" FORCE) - -set (ZLIB_PACKAGE_NAME "zlib" CACHE STRING "Name of ZLIB package" FORCE) -set (LIBAEC_PACKAGE_NAME "libaec" CACHE STRING "Name of AEC SZIP package" FORCE) -set (SZIP_PACKAGE_NAME "szip" CACHE STRING "Name of SZIP package" FORCE) diff --git a/release_docs/INSTALL_CMake.txt b/release_docs/INSTALL_CMake.txt index 59edaf265f8..b9d8338a56d 100644 --- a/release_docs/INSTALL_CMake.txt +++ b/release_docs/INSTALL_CMake.txt @@ -306,7 +306,7 @@ IV. Further considerations CMake options: HDF5_ALLOW_EXTERNAL_SUPPORT:STRING="TGZ" ZLIB_TGZ_NAME:STRING="zlib_src.ext" - SZIP_TGZ_NAME:STRING="szip_src.ext" + SZAEC_TGZ_NAME:STRING="szaec_src.ext" TGZPATH:STRING="some_location" where "some_location" is the URL or full path to the compressed file and ext is the type of compression file. Also set CMAKE_BUILD_TYPE @@ -512,13 +512,20 @@ These five steps are described in detail below. ######################## set (CMAKE_INSTALL_FRAMEWORK_PREFIX "Library/Frameworks" CACHE STRING "Frameworks installation directory" FORCE) set (HDF_PACKAGE_EXT "" CACHE STRING "Name of HDF package extension" FORCE) + set (HDF_PACKAGE_NAMESPACE "hdf5::" CACHE STRING "Name for HDF package namespace (can be empty)" FORCE) + set (HDF5_BUILD_CPP_LIB ON CACHE BOOL "Build C++ support" FORCE) set (HDF5_BUILD_FORTRAN ON CACHE BOOL "Build FORTRAN support" FORCE) + set (HDF5_BUILD_JAVA ON CACHE BOOL "Build JAVA support" FORCE) + set (HDF5_INSTALL_MOD_FORTRAN "NO" CACHE STRING "Copy FORTRAN mod files to include directory (NO SHARED STATIC)" FORCE) + set_property (CACHE HDF5_INSTALL_MOD_FORTRAN PROPERTY STRINGS NO SHARED STATIC) + set (HDF5_BUILD_GENERATORS ON CACHE BOOL "Build Test Generators" FORCE) set (HDF5_ENABLE_Z_LIB_SUPPORT ON CACHE BOOL "Enable Zlib Filters" FORCE) set (HDF5_ENABLE_SZIP_SUPPORT ON CACHE BOOL "Use SZip Filter" FORCE) set (HDF5_ENABLE_SZIP_ENCODING ON CACHE BOOL "Use SZip Encoding" FORCE) - set (MPIEXEC_MAX_NUMPROCS "3" CACHE STRING "Minimum number of processes for HDF parallel tests" FORCE) + set (MPIEXEC_MAX_NUMPROCS "4" CACHE STRING "Minimum number of processes for HDF parallel tests" FORCE) set (HDF5_ENABLE_ALL_WARNINGS ON CACHE BOOL "Enable all warnings" FORCE) set (HDF_TEST_EXPRESS "2" CACHE STRING "Control testing framework (0-3)" FORCE) + set (HDF5_MINGW_STATIC_GCC_LIBS ON CACHE BOOL "Statically link libgcc/libstdc++" FORCE) set (HDF5_ALLOW_EXTERNAL_SUPPORT "NO" CACHE STRING "Allow External Library Building (NO GIT TGZ)" FORCE) set_property (CACHE HDF5_ALLOW_EXTERNAL_SUPPORT PROPERTY STRINGS NO GIT TGZ) set (ZLIB_PACKAGE_NAME "zlib" CACHE STRING "Name of ZLIB package" FORCE) @@ -526,11 +533,8 @@ These five steps are described in detail below. set (ZLIB_TGZ_ORIGPATH "https://github.com/madler/zlib/releases/download/v1.2.13" CACHE STRING "Use ZLIB from original location" FORCE) set (ZLIB_TGZ_ORIGNAME "zlib-1.2.13.tar.gz" CACHE STRING "Use ZLIB from original compressed file" FORCE) set (ZLIB_USE_LOCALCONTENT OFF CACHE BOOL "Use local file for ZLIB FetchContent" FORCE) - set (SZIP_PACKAGE_NAME "szip" CACHE STRING "Name of SZIP package" FORCE) set (LIBAEC_PACKAGE_NAME "libaec" CACHE STRING "Name of AEC SZIP package" FORCE) - set (SZIP_TGZ_NAME "SZip.tar.gz" CACHE STRING "Use SZip from compressed file" FORCE) set (SZAEC_TGZ_NAME "LIBAEC.tar.gz" CACHE STRING "Use SZip AEC from compressed file" FORCE) - set (USE_LIBAEC ON CACHE BOOL "Use libaec szip replacement" FORCE) set (LIBAEC_TGZ_ORIGPATH "https://github.com/MathisRosenhauer/libaec/releases/download/v1.0.6/libaec-1.0.6.tar.gz" CACHE STRING "Use LIBAEC from original location" FORCE) set (LIBAEC_TGZ_ORIGNAME "libaec-v1.0.6.tar.gz" CACHE STRING "Use LIBAEC from original compressed file" FORCE) set (LIBAEC_USE_LOCALCONTENT OFF CACHE BOOL "Use local file for LIBAEC FetchContent" FORCE) @@ -614,7 +618,13 @@ These five steps are described in detail below. set (ZFP_GIT_BRANCH "master" CACHE STRING "" FORCE) set (ZFP_TGZ_NAME "zfp.tar.gz" CACHE STRING "Use ZFP from compressed file" FORCE) set (ZFP_PACKAGE_NAME "zfp" CACHE STRING "Name of ZFP package" FORCE) - + ###### + # zstd + ###### + set (ZSTD_GIT_URL "https://github.com/facebook/zstd" CACHE STRING "Use ZSTD from repository" FORCE) + set (ZSTD_GIT_BRANCH "dev" CACHE STRING "" FORCE) + set (ZSTD_TGZ_NAME "zstd.tar.gz" CACHE STRING "Use ZSTD from compressed file" FORCE) + set (ZSTD_PACKAGE_NAME "zstd" CACHE STRING "Name of ZSTD package" FORCE) 2. Configure the cache settings @@ -638,7 +648,7 @@ These five steps are described in detail below. 2.2 Preferred command line example on Windows in c:\MyHDFstuff\hdf5\build directory: - cmake -C ../config/cmake/cacheinit.cmake -G "Visual Studio 12 2013" \ + cmake -C ../config/cmake/cacheinit.cmake -G "Visual Studio 16 2019" "-Ax64"\ -DHDF5_ENABLE_SZIP_SUPPORT:BOOL=OFF -DHDF5_ENABLE_Z_LIB_SUPPORT:BOOL=OFF \ -DCMAKE_BUILD_TYPE:STRING=Release .. @@ -688,7 +698,6 @@ These five steps are described in detail below. The options to control the TGZ URL (config/cmake/cacheinit.cmake file) are: ZLIB_TGZ_NAME:STRING="zlib_src.ext" - SZIP_TGZ_NAME:STRING="szip_src.ext" LIBAEC_TGZ_NAME:STRING="libaec_src.ext" PLUGIN_TGZ_NAME:STRING="plugin_src.ext" TGZPATH:STRING="some_location" @@ -697,9 +706,8 @@ These five steps are described in detail below. file such as .bz2, .tar, .tar.gz, .tgz, or .zip. Also define CMAKE_BUILD_TYPE to be the configuration type. - NOTE: the USE_LIBAEC option will use the file named by LIBAEC_TGZ_NAME - to build SZIP instead of the file named by SZIP_TGZ_NAME. This option - is also used to account for the different headers and library names. + NOTE: HDF5 uses the file named by LIBAEC_TGZ_NAME to build SZIP. This + is used to account for the different headers and library names. 4. Test HDF5 @@ -767,14 +775,14 @@ if (WINDOWS) DISABLE_PDB_FILES "Do not install PDB files" OFF ---------------- HDF5 Build Options --------------------- -HDF5_BUILD_CPP_LIB "Build HDF5 C++ Library" OFF -HDF5_BUILD_EXAMPLES "Build HDF5 Library Examples" ON -HDF5_BUILD_FORTRAN "Build FORTRAN support" OFF -HDF5_BUILD_JAVA "Build JAVA support" OFF -HDF5_BUILD_HL_LIB "Build HIGH Level HDF5 Library" ON -HDF5_BUILD_TOOLS "Build HDF5 Tools" ON -HDF5_BUILD_HL_TOOLS "Build HIGH Level HDF5 Tools" ON -HDF5_BUILD_HL_GIF_TOOLS "Build HIGH Level HDF5 GIF Tools" OFF +HDF5_BUILD_CPP_LIB "Build HDF5 C++ Library" OFF +HDF5_BUILD_EXAMPLES "Build HDF5 Library Examples" ON +HDF5_BUILD_FORTRAN "Build FORTRAN support" OFF +HDF5_BUILD_JAVA "Build JAVA support" OFF +HDF5_BUILD_HL_LIB "Build HIGH Level HDF5 Library" ON +HDF5_BUILD_TOOLS "Build HDF5 Tools" ON +HDF5_BUILD_HL_TOOLS "Build HIGH Level HDF5 Tools" ON +HDF5_BUILD_HL_GIF_TOOLS "Build HIGH Level HDF5 GIF Tools" OFF ---------------- HDF5 Folder Build Options --------------------- Defaults relative to $ @@ -800,59 +808,73 @@ HDF5_INSTALL_DOC_DIR "HDF5_INSTALL_DATA_DIR/doc/hdf5" ---------------- HDF5 Advanced Options --------------------- HDF5_USE_GNU_DIRS "TRUE to use GNU Coding Standard install directory variables, - FALSE to use historical settings" FALSE -ONLY_SHARED_LIBS "Only Build Shared Libraries" OFF -ALLOW_UNSUPPORTED "Allow unsupported combinations of configure options" OFF -HDF5_EXTERNAL_LIB_PREFIX "Use prefix for custom library naming." "" -HDF5_DISABLE_COMPILER_WARNINGS "Disable compiler warnings" OFF -HDF5_ENABLE_ALL_WARNINGS "Enable all warnings" OFF -HDF5_ENABLE_CODESTACK "Enable the function stack tracing (for developer debugging)." OFF -HDF5_ENABLE_COVERAGE "Enable code coverage for Libraries and Programs" OFF -HDF5_ENABLE_DEBUG_APIS "Turn on extra debug output in all packages" OFF -HDF5_ENABLE_DEPRECATED_SYMBOLS "Enable deprecated public API symbols" ON -HDF5_ENABLE_DIRECT_VFD "Build the Direct I/O Virtual File Driver" OFF -HDF5_ENABLE_EMBEDDED_LIBINFO "embed library info into executables" ON -HDF5_ENABLE_PARALLEL "Enable parallel build (requires MPI)" OFF + FALSE to use historical settings" FALSE +ONLY_SHARED_LIBS "Only Build Shared Libraries" OFF +ALLOW_UNSUPPORTED "Allow unsupported combinations of configure options" OFF +HDF5_EXTERNAL_LIB_PREFIX "Use prefix for custom library naming." "" +HDF5_DISABLE_COMPILER_WARNINGS "Disable compiler warnings" OFF +HDF5_ENABLE_ALL_WARNINGS "Enable all warnings" OFF +HDF5_ENABLE_CODESTACK "Enable the function stack tracing (for developer debugging)." OFF +HDF5_ENABLE_COVERAGE "Enable code coverage for Libraries and Programs" OFF +HDF5_ENABLE_DEBUG_APIS "Turn on extra debug output in all packages" OFF +HDF5_ENABLE_DEPRECATED_SYMBOLS "Enable deprecated public API symbols" ON +HDF5_ENABLE_DIRECT_VFD "Build the Direct I/O Virtual File Driver" OFF +HDF5_ENABLE_EMBEDDED_LIBINFO "embed library info into executables" ON +HDF5_ENABLE_PARALLEL "Enable parallel build (requires MPI)" OFF HDF5_ENABLE_PREADWRITE "Use pread/pwrite in sec2/log/core VFDs in place of read/write (when available)" ON -HDF5_ENABLE_TRACE "Enable API tracing capability" OFF -HDF5_ENABLE_USING_MEMCHECKER "Indicate that a memory checker is used" OFF -HDF5_GENERATE_HEADERS "Rebuild Generated Files" ON -HDF5_BUILD_GENERATORS "Build Test Generators" OFF -HDF5_JAVA_PACK_JRE "Package a JRE installer directory" OFF -HDF5_NO_PACKAGES "Do not include CPack Packaging" OFF -HDF5_PACK_EXAMPLES "Package the HDF5 Library Examples Compressed File" OFF -HDF5_PACK_MACOSX_FRAMEWORK "Package the HDF5 Library in a Frameworks" OFF +HDF5_ENABLE_TRACE "Enable API tracing capability" OFF +HDF5_ENABLE_USING_MEMCHECKER "Indicate that a memory checker is used" OFF +HDF5_GENERATE_HEADERS "Rebuild Generated Files" ON +HDF5_BUILD_GENERATORS "Build Test Generators" OFF +HDF5_JAVA_PACK_JRE "Package a JRE installer directory" OFF +HDF5_NO_PACKAGES "Do not include CPack Packaging" OFF +HDF5_PACK_EXAMPLES "Package the HDF5 Library Examples Compressed File" OFF +HDF5_PACK_MACOSX_FRAMEWORK "Package the HDF5 Library in a Frameworks" OFF HDF5_BUILD_FRAMEWORKS "TRUE to build as frameworks libraries, - FALSE to build according to BUILD_SHARED_LIBS" FALSE -HDF5_PACKAGE_EXTLIBS "CPACK - include external libraries" OFF -HDF5_STRICT_FORMAT_CHECKS "Whether to perform strict file format checks" OFF -HDF_TEST_EXPRESS "Control testing framework (0-3)" "3" -HDF5_TEST_VFD "Execute tests with different VFDs" OFF -HDF5_TEST_PASSTHROUGH_VOL "Execute tests with different passthrough VOL connectors" OFF -DEFAULT_API_VERSION "Enable default API (v16, v18, v110, v112, v114)" "v114" -HDF5_USE_FOLDERS "Enable folder grouping of projects in IDEs." ON -HDF5_WANT_DATA_ACCURACY "IF data accuracy is guaranteed during data conversions" ON + FALSE to build according to BUILD_SHARED_LIBS" FALSE +HDF5_PACKAGE_EXTLIBS "CPACK - include external libraries" OFF +HDF5_STRICT_FORMAT_CHECKS "Whether to perform strict file format checks" OFF +DEFAULT_API_VERSION "Enable default API (v16, v18, v110, v112, v114)" "v114" +HDF5_USE_FOLDERS "Enable folder grouping of projects in IDEs." ON +HDF5_WANT_DATA_ACCURACY "IF data accuracy is guaranteed during data conversions" ON HDF5_WANT_DCONV_EXCEPTION "exception handling functions is checked during data conversions" ON -HDF5_ENABLE_THREADSAFE "Enable Threadsafety" OFF -HDF5_MSVC_NAMING_CONVENTION "Use MSVC Naming conventions for Shared Libraries" OFF -HDF5_MINGW_STATIC_GCC_LIBS "Statically link libgcc/libstdc++" OFF +HDF5_ENABLE_THREADSAFE "Enable Threadsafety" OFF +HDF5_MSVC_NAMING_CONVENTION "Use MSVC Naming conventions for Shared Libraries" OFF +HDF5_MINGW_STATIC_GCC_LIBS "Statically link libgcc/libstdc++" OFF if (APPLE) HDF5_BUILD_WITH_INSTALL_NAME "Build with library install_name set to the installation path" OFF if (CMAKE_BUILD_TYPE MATCHES Debug) HDF5_ENABLE_INSTRUMENT "Instrument The library" OFF -if (HDF5_TEST_VFD) - HDF5_TEST_FHEAP_VFD "Execute fheap test with different VFDs" ON if (HDF5_BUILD_FORTRAN) - HDF5_INSTALL_MOD_FORTRAN "Copy FORTRAN mod files to include directory (NO SHARED STATIC)" "XX" - if (BUILD_SHARED_LIBS AND BUILD_STATIC_LIBS) default HDF5_INSTALL_MOD_FORTRAN is SHARED - if (BUILD_SHARED_LIBS AND NOT BUILD_STATIC_LIBS) default HDF5_INSTALL_MOD_FORTRAN is SHARED - if (NOT BUILD_SHARED_LIBS AND BUILD_STATIC_LIBS) default HDF5_INSTALL_MOD_FORTRAN is STATIC + HDF5_INSTALL_MOD_FORTRAN "Copy FORTRAN mod files to include directory (NO SHARED STATIC)" SHARED + if (BUILD_SHARED_LIBS AND BUILD_STATIC_LIBS) default HDF5_INSTALL_MOD_FORTRAN is SHARED + if (BUILD_SHARED_LIBS AND NOT BUILD_STATIC_LIBS) default HDF5_INSTALL_MOD_FORTRAN is SHARED + if (NOT BUILD_SHARED_LIBS AND BUILD_STATIC_LIBS) default HDF5_INSTALL_MOD_FORTRAN is STATIC if (NOT BUILD_SHARED_LIBS AND NOT BUILD_STATIC_LIBS) default HDF5_INSTALL_MOD_FORTRAN is SHARED -HDF5_BUILD_DOC "Build documentation" OFF -HDF5_ENABLE_ANALYZER_TOOLS "enable the use of Clang tools" OFF -HDF5_ENABLE_SANITIZERS "execute the Clang sanitizer" OFF -HDF5_ENABLE_FORMATTERS "format source files" OFF -TEST_SHELL_SCRIPTS "Enable shell script tests" ON +HDF5_BUILD_DOC "Build documentation" OFF +HDF5_ENABLE_ANALYZER_TOOLS "enable the use of Clang tools" OFF +HDF5_ENABLE_SANITIZERS "execute the Clang sanitizer" OFF +HDF5_ENABLE_FORMATTERS "format source files" OFF +HDF5_DIMENSION_SCALES_NEW_REF "Use new-style references with dimension scale APIs" OFF + +---------------- HDF5 Advanced Test Options --------------------- +if (BUILD_TESTING) + HDF5_TEST_SERIAL "Execute non-parallel tests" ON + HDF5_TEST_TOOLS "Execute tools tests" ON + HDF5_TEST_EXAMPLES "Execute tests on examples" ON + HDF5_TEST_SWMR "Execute SWMR tests" ON + HDF5_TEST_PARALLEL "Execute parallel tests" ON + HDF5_TEST_FORTRAN "Execute fortran tests" ON + HDF5_TEST_CPP "Execute cpp tests" ON + HDF5_TEST_JAVA "Execute java tests" ON + HDF_TEST_EXPRESS "Control testing framework (0-3)" "3" + HDF5_TEST_PASSTHROUGH_VOL "Execute tests with different passthrough VOL connectors" OFF + if (HDF5_TEST_PASSTHROUGH_VOL) + HDF5_TEST_FHEAP_PASSTHROUGH_VOL "Execute fheap test with different passthrough VOL connectors" ON + HDF5_TEST_VFD "Execute tests with different VFDs" OFF + if (HDF5_TEST_VFD) + HDF5_TEST_FHEAP_VFD "Execute fheap test with different VFDs" ON + TEST_SHELL_SCRIPTS "Enable shell script tests" ON ---------------- External Library Options --------------------- HDF5_ALLOW_EXTERNAL_SUPPORT "Allow External Library Building (NO GIT TGZ)" "NO" @@ -864,7 +886,6 @@ ZLIB_USE_EXTERNAL "Use External Library Building for ZLIB" SZIP_USE_EXTERNAL "Use External Library Building for SZIP" 0 if (HDF5_ENABLE_SZIP_SUPPORT) HDF5_ENABLE_SZIP_ENCODING "Use SZip Encoding" OFF - USE_LIBAEC "Use libaec szip replacement" OFF if (WINDOWS) H5_DEFAULT_PLUGINDIR "%ALLUSERSPROFILE%/hdf5/lib/plugin" else () diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 4de0b9487fe..2698b43436b 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -47,6 +47,11 @@ New Features Configuration: ------------- + - Deprecated and removed old SZIP library in favor of LIBAEC library + + LIBAEC library has been used in HDF5 binaries as the szip library of choice + for a few years. We are removing the options for using the old SZIP library. + - Enabled instrumentation of the library by default in CMake for parallel debug builds From c20ff8cdad6ca541baa80c237839af5cc9df038d Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Mon, 24 Apr 2023 10:21:12 -0500 Subject: [PATCH 157/231] Fix ROS3 VFD anonymous credential usage with h5dump and h5ls (#2798) --- tools/src/h5dump/h5dump.c | 13 ++++++++++++- tools/src/h5ls/h5ls.c | 18 +++++++++++++++--- 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/tools/src/h5dump/h5dump.c b/tools/src/h5dump/h5dump.c index 6c1556ab9a6..2a5eeb70b2d 100644 --- a/tools/src/h5dump/h5dump.c +++ b/tools/src/h5dump/h5dump.c @@ -862,8 +862,19 @@ parse_command_line(int argc, const char *const *argv) case 'f': vfd_info_g.type = VFD_BY_NAME; vfd_info_g.u.name = H5_optarg; - vfd_info_g.info = NULL; use_custom_vfd_g = TRUE; + +#ifdef H5_HAVE_ROS3_VFD + if (0 == HDstrcmp(vfd_info_g.u.name, drivernames[ROS3_VFD_IDX])) + if (!vfd_info_g.info) + vfd_info_g.info = &ros3_fa_g; +#endif +#ifdef H5_HAVE_LIBHDFS + if (0 == HDstrcmp(vfd_info_g.u.name, drivernames[HDFS_VFD_IDX])) + if (!vfd_info_g.info) + vfd_info_g.info = &hdfs_fa_g; +#endif + break; case 'g': dump_opts.display_all = 0; diff --git a/tools/src/h5ls/h5ls.c b/tools/src/h5ls/h5ls.c index 4e7298c4cb6..5d9e184571d 100644 --- a/tools/src/h5ls/h5ls.c +++ b/tools/src/h5ls/h5ls.c @@ -2651,8 +2651,8 @@ main(int argc, char *argv[]) hid_t fapl_id = H5P_DEFAULT; hbool_t custom_vol_fapl = FALSE; hbool_t custom_vfd_fapl = FALSE; - h5tools_vol_info_t vol_info; - h5tools_vfd_info_t vfd_info; + h5tools_vol_info_t vol_info = {0}; + h5tools_vfd_info_t vfd_info = {0}; #ifdef H5_HAVE_ROS3_VFD /* Default "anonymous" S3 configuration */ @@ -2763,7 +2763,6 @@ main(int argc, char *argv[]) else if (!HDstrncmp(argv[argno], "--vfd=", (size_t)6)) { vfd_info.type = VFD_BY_NAME; vfd_info.u.name = argv[argno] + 6; - vfd_info.info = NULL; custom_vfd_fapl = TRUE; } else if (!HDstrncmp(argv[argno], "--vfd-value=", (size_t)12)) { @@ -2980,6 +2979,19 @@ main(int argc, char *argv[]) /* Setup a custom fapl for file accesses */ if (custom_vol_fapl || custom_vfd_fapl) { +#ifdef H5_HAVE_ROS3_VFD + if (custom_vfd_fapl && (0 == HDstrcmp(vfd_info.u.name, drivernames[ROS3_VFD_IDX]))) { + if (!vfd_info.info) + vfd_info.info = &ros3_fa; + } +#endif +#ifdef H5_HAVE_LIBHDFS + if (custom_vfd_fapl && (0 == HDstrcmp(vfd_info.u.name, drivernames[HDFS_VFD_IDX]))) { + if (!vfd_info.info) + vfd_info.info = &hdfs_fa; + } +#endif + if ((fapl_id = h5tools_get_fapl(H5P_DEFAULT, custom_vol_fapl ? &vol_info : NULL, custom_vfd_fapl ? &vfd_info : NULL)) < 0) { error_msg("failed to setup file access property list (fapl) for file\n"); From c4f3f74b4966690e1f054afa42dc3dbc5f75a318 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Mon, 24 Apr 2023 11:12:30 -0500 Subject: [PATCH 158/231] Add release note for ROS3 VFD anonymous credential fix (#2801) --- release_docs/RELEASE.txt | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 2698b43436b..8bc90b8cc7f 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -151,6 +151,19 @@ Bug Fixes since HDF5-1.14.0 release =================================== Library ------- + - Fixed ROS3 VFD anonymous credential usage with h5dump and h5ls + + ROS3 VFD anonymous credential functionality became broken in h5dump + and h5ls in the HDF5 1.14.0 release with the added support for VFD + plugins, which changed the way that the tools handled setting of + credential information that the VFD uses. The tools could be + provided the command-line option of "--s3-cred=(,,)" as a workaround + for anonymous credential usage, but the documentation for this + option stated that anonymous credentials could be used by simply + omitting the option. The latter functionality has been restored. + + Fixes GitHub issue #2406 + - Fixed memory leaks when processing malformed object header continuation messages Malformed object header continuation messages can result in a too-small From 32353e022e87a1a030eb2160de58456c110066c7 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Mon, 24 Apr 2023 14:42:03 -0500 Subject: [PATCH 159/231] Fix hdfeos5 workflow concurrency conflicts (#2804) --- .github/workflows/hdfeos5.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/hdfeos5.yml b/.github/workflows/hdfeos5.yml index 142bf7cf692..03c97462963 100644 --- a/.github/workflows/hdfeos5.yml +++ b/.github/workflows/hdfeos5.yml @@ -16,7 +16,7 @@ on: # Using concurrency to cancel any in-progress job or run concurrency: - group: hdfeos5-${{ github.ref }} + group: ${{ github.workflow }}-${{ github.sha || github.event.pull_request.number }} cancel-in-progress: true jobs: From f4addad5a50afd6d6ad28a45f24c46682de56c2c Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Tue, 25 Apr 2023 10:02:30 -0700 Subject: [PATCH 160/231] Sanitize object header message decode functions (#2795) * Add buffer bounds checks * Convert asserts to real error handling to better detect broken files * General cleanup --- src/H5Obogus.c | 22 +++---- src/H5Ocache_image.c | 37 ++++++------ src/H5Odrvinfo.c | 42 ++++++++------ src/H5Ofill.c | 104 ++++++++++++++++++--------------- src/H5Ofsinfo.c | 70 ++++++++++++----------- src/H5Oginfo.c | 41 +++++-------- src/H5Olayout.c | 133 +++++++++++++++++++++---------------------- src/H5Olinfo.c | 15 ++--- src/H5Olink.c | 102 +++++++++++++++++---------------- src/H5Opline.c | 96 +++++++++++++++---------------- src/H5Orefcount.c | 35 ++++++------ src/H5Osdspace.c | 54 ++++++++---------- src/H5Oshmesg.c | 31 ++++++---- src/H5Ostab.c | 34 +++++------ 14 files changed, 407 insertions(+), 409 deletions(-) diff --git a/src/H5Obogus.c b/src/H5Obogus.c index 549c3e94a46..1b83ed1d576 100644 --- a/src/H5Obogus.c +++ b/src/H5Obogus.c @@ -13,8 +13,6 @@ /*------------------------------------------------------------------------- * * Created: H5Obogus.c - * Jan 21 2003 - * Quincey Koziol * * Purpose: "bogus" message. This message is guaranteed to never * be found in a valid HDF5 file and is only used to @@ -95,25 +93,20 @@ const H5O_msg_class_t H5O_MSG_BOGUS_INVALID[1] = {{ * Purpose: Decode a "bogus" message and return a pointer to a new * native message struct. * - * Return: Success: Ptr to new message in native struct. - * + * Return: Success: Pointer to new message in native struct * Failure: NULL - * - * Programmer: Quincey Koziol - * Jan 21 2003 - * *------------------------------------------------------------------------- */ static void * -H5O__bogus_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, - unsigned H5_ATTR_UNUSED *ioflags, size_t H5_ATTR_UNUSED p_size, const uint8_t *p) +H5O__bogus_decode(H5F_t *f, H5O_t H5_ATTR_NDEBUG_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, + unsigned H5_ATTR_UNUSED *ioflags, size_t p_size, const uint8_t *p) { - H5O_bogus_t *mesg = NULL; - void *ret_value; /* Return value */ + const uint8_t *p_end = p + p_size - 1; + H5O_bogus_t *mesg = NULL; + void *ret_value; FUNC_ENTER_PACKAGE - /* check args */ HDassert(f); HDassert(p); @@ -121,7 +114,8 @@ H5O__bogus_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUS if (NULL == (mesg = (H5O_bogus_t *)H5MM_calloc(sizeof(H5O_bogus_t)))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") - /* decode */ + if (H5_IS_BUFFER_OVERFLOW(p, 4, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); UINT32DECODE(p, mesg->u); /* Validate the bogus info */ diff --git a/src/H5Ocache_image.c b/src/H5Ocache_image.c index bd273ec6d72..a06bebc32ac 100644 --- a/src/H5Ocache_image.c +++ b/src/H5Ocache_image.c @@ -13,14 +13,12 @@ /*------------------------------------------------------------------------- * * Created: H5Ocache_image.c - * June 21, 2015 - * John Mainzer * * Purpose: A message indicating that a metadata cache image block - * of the indicated length exists at the specified offset - * in the HDF5 file. + * of the indicated length exists at the specified offset + * in the HDF5 file. * - * The mdci_msg only appears in the superblock extension. + * The mdci_msg only appears in the superblock extension * *------------------------------------------------------------------------- */ @@ -79,30 +77,28 @@ H5FL_DEFINE(H5O_mdci_t); * Function: H5O__mdci_decode * * Purpose: Decode a metadata cache image message and return a - * pointer to a newly allocated H5O_mdci_t struct. - * - * Return: Success: Ptr to new message in native struct. - * Failure: NULL - * - * Programmer: John Mainzer - * 6/22/15 + * pointer to a newly allocated H5O_mdci_t struct. * + * Return: Success: Pointer to new message in native struct + * Failure: NULL *------------------------------------------------------------------------- */ static void * H5O__mdci_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, - unsigned H5_ATTR_UNUSED *ioflags, size_t H5_ATTR_UNUSED p_size, const uint8_t *p) + unsigned H5_ATTR_UNUSED *ioflags, size_t p_size, const uint8_t *p) { - H5O_mdci_t *mesg; /* Native message */ - void *ret_value = NULL; /* Return value */ + H5O_mdci_t *mesg = NULL; /* New cache image message */ + const uint8_t *p_end = p + p_size - 1; /* End of the p buffer */ + void *ret_value = NULL; FUNC_ENTER_PACKAGE - /* Sanity check */ HDassert(f); HDassert(p); /* Version of message */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); if (*p++ != H5O_MDCI_VERSION_0) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad version number for message") @@ -111,14 +107,21 @@ H5O__mdci_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSE HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for metadata cache image message") - /* Decode */ + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_addr(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); H5F_addr_decode(f, &p, &(mesg->addr)); + + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_size(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); H5F_DECODE_LENGTH(f, p, mesg->size); /* Set return value */ ret_value = (void *)mesg; done: + if (!ret_value && mesg) + H5FL_FREE(H5O_mdci_t, mesg); + FUNC_LEAVE_NOAPI(ret_value) } /* end H5O__mdci_decode() */ diff --git a/src/H5Odrvinfo.c b/src/H5Odrvinfo.c index 923856fe978..53de66d2e0a 100644 --- a/src/H5Odrvinfo.c +++ b/src/H5Odrvinfo.c @@ -60,34 +60,32 @@ const H5O_msg_class_t H5O_MSG_DRVINFO[1] = {{ #define H5O_DRVINFO_VERSION 0 /*------------------------------------------------------------------------- - * Function: H5O__drvinfo_decode + * Function: H5O__drvinfo_decode * - * Purpose: Decode a shared message table message and return a pointer + * Purpose: Decode a shared message table message and return a pointer * to a newly allocated H5O_drvinfo_t struct. * - * Return: Success: Ptr to new message in native struct. - * Failure: NULL - * - * Programmer: Quincey Koziol - * Mar 1, 2007 - * + * Return: Success: Pointer to new message in native struct + * Failure: NULL *------------------------------------------------------------------------- */ static void * H5O__drvinfo_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, - unsigned H5_ATTR_UNUSED mesg_flags, unsigned H5_ATTR_UNUSED *ioflags, - size_t H5_ATTR_UNUSED p_size, const uint8_t *p) + unsigned H5_ATTR_UNUSED mesg_flags, unsigned H5_ATTR_UNUSED *ioflags, size_t p_size, + const uint8_t *p) { - H5O_drvinfo_t *mesg; /* Native message */ - void *ret_value = NULL; /* Return value */ + H5O_drvinfo_t *mesg = NULL; /* Native message */ + const uint8_t *p_end = p + p_size - 1; /* End of the p buffer */ + void *ret_value = NULL; FUNC_ENTER_PACKAGE - /* Sanity check */ HDassert(f); HDassert(p); /* Version of message */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); if (*p++ != H5O_DRVINFO_VERSION) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad version number for message") @@ -96,27 +94,37 @@ H5O__drvinfo_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for driver info message") /* Retrieve driver name */ + if (H5_IS_BUFFER_OVERFLOW(p, 8, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); H5MM_memcpy(mesg->name, p, 8); mesg->name[8] = '\0'; p += 8; /* Decode buffer size */ + if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); UINT16DECODE(p, mesg->len); - HDassert(mesg->len); + if (0 == mesg->len) + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "message length can't be zero"); /* Allocate space for buffer */ - if (NULL == (mesg->buf = (uint8_t *)H5MM_malloc(mesg->len))) { - mesg = (H5O_drvinfo_t *)H5MM_xfree(mesg); + if (NULL == (mesg->buf = (uint8_t *)H5MM_malloc(mesg->len))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for driver info buffer") - } /* end if */ /* Copy encoded driver info into buffer */ + if (H5_IS_BUFFER_OVERFLOW(p, mesg->len, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); H5MM_memcpy(mesg->buf, p, mesg->len); /* Set return value */ ret_value = (void *)mesg; done: + if (!ret_value && mesg) { + H5MM_xfree(mesg->buf); + H5MM_xfree(mesg); + } + FUNC_LEAVE_NOAPI(ret_value) } /* end H5O__drvinfo_decode() */ diff --git a/src/H5Ofill.c b/src/H5Ofill.c index 45877d25bd3..7b789bcafb5 100644 --- a/src/H5Ofill.c +++ b/src/H5Ofill.c @@ -10,11 +10,9 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* Programmer: Robb Matzke - * Wednesday, September 30, 1998 - * +/* * Purpose: The fill message indicates a bit pattern to use for - * uninitialized data points of a dataset. + * uninitialized data points of a dataset. */ #include "H5Omodule.h" /* This source code file is part of the H5O module */ @@ -179,16 +177,12 @@ H5FL_BLK_EXTERN(type_conv); /*------------------------------------------------------------------------- * Function: H5O__fill_new_decode * - * Purpose: Decode a new fill value message. The new fill value - * message is fill value plus space allocation time and - * fill value writing time and whether fill value is defined. - * - * Return: Success: Ptr to new message in native struct. - * Failure: NULL - * - * Programmer: Raymond Lu - * Feb 26, 2002 + * Purpose: Decode a new fill value message. The new fill value + * message is fill value plus space allocation time and + * fill value writing time and whether fill value is defined. * + * Return: Success: Pointer to new message in native struct + * Failure: NULL *------------------------------------------------------------------------- */ static void * @@ -209,12 +203,21 @@ H5O__fill_new_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for fill value message") /* Version */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); fill->version = *p++; if (fill->version < H5O_FILL_VERSION_1 || fill->version > H5O_FILL_VERSION_LATEST) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad version number for fill value message") /* Decode each version */ if (fill->version < H5O_FILL_VERSION_3) { + + /* Versions 1 & 2 */ + + /* Buffer size check for the next three bytes */ + if (H5_IS_BUFFER_OVERFLOW(p, 3, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + /* Space allocation time */ fill->alloc_time = (H5D_alloc_time_t)*p++; @@ -226,26 +229,34 @@ H5O__fill_new_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, /* Only decode fill value information if one is defined */ if (fill->fill_defined) { + + if (H5_IS_BUFFER_OVERFLOW(p, 4, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); INT32DECODE(p, fill->size); + if (fill->size > 0) { H5_CHECK_OVERFLOW(fill->size, ssize_t, size_t); - /* Ensure that fill size doesn't exceed buffer size, due to possible data corruption */ - if (p + fill->size - 1 > p_end) - HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "fill size exceeds buffer size") + if (H5_IS_BUFFER_OVERFLOW(p, fill->size, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); if (NULL == (fill->buf = H5MM_malloc((size_t)fill->size))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for fill value") H5MM_memcpy(fill->buf, p, (size_t)fill->size); - } /* end if */ - } /* end if */ + } + } else - fill->size = (-1); - } /* end if */ + fill->size = -1; + } else { + + /* Version 3 */ + unsigned flags; /* Status flags */ /* Flags */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); flags = *p++; /* Check for unknown flags */ @@ -261,39 +272,45 @@ H5O__fill_new_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, /* Check for undefined fill value */ if (flags & H5O_FILL_FLAG_UNDEFINED_VALUE) { - /* Sanity check */ - HDassert(!(flags & H5O_FILL_FLAG_HAVE_VALUE)); + + if (flags & (unsigned)~H5O_FILL_FLAG_HAVE_VALUE) + HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "have value and undefined value flags both set") /* Set value for "undefined" fill value */ - fill->size = (-1); - } /* end if */ + fill->size = -1; + } else if (flags & H5O_FILL_FLAG_HAVE_VALUE) { /* Fill value size */ + if (H5_IS_BUFFER_OVERFLOW(p, 4, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); UINT32DECODE(p, fill->size); /* Fill value */ H5_CHECK_OVERFLOW(fill->size, ssize_t, size_t); + + if (H5_IS_BUFFER_OVERFLOW(p, fill->size, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + if (NULL == (fill->buf = H5MM_malloc((size_t)fill->size))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for fill value") H5MM_memcpy(fill->buf, p, (size_t)fill->size); /* Set the "defined" flag */ fill->fill_defined = TRUE; - } /* end else */ + } else /* Set the "defined" flag */ fill->fill_defined = TRUE; - } /* end else */ + } /* Set return value */ ret_value = (void *)fill; done: if (!ret_value && fill) { - if (fill->buf) - H5MM_xfree(fill->buf); + H5MM_xfree(fill->buf); fill = H5FL_FREE(H5O_fill_t, fill); - } /* end if */ + } FUNC_LEAVE_NOAPI(ret_value) } /* end H5O__fill_new_decode() */ @@ -301,14 +318,10 @@ H5O__fill_new_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, /*------------------------------------------------------------------------- * Function: H5O__fill_old_decode * - * Purpose: Decode an old fill value message. - * - * Return: Success: Ptr to new message in native struct. - * Failure: NULL - * - * Programmer: Robb Matzke - * Wednesday, September 30, 1998 + * Purpose: Decode an old fill value message * + * Return: Success: Pointer to new message in native struct + * Failure: NULL *------------------------------------------------------------------------- */ static void * @@ -335,6 +348,8 @@ H5O__fill_old_decode(H5F_t *f, H5O_t *open_oh, unsigned H5_ATTR_UNUSED mesg_flag fill->fill_time = H5D_FILL_TIME_IFSET; /* Fill value size */ + if (H5_IS_BUFFER_OVERFLOW(p, 4, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); UINT32DECODE(p, fill->size); /* Only decode the fill value itself if there is one */ @@ -342,8 +357,8 @@ H5O__fill_old_decode(H5F_t *f, H5O_t *open_oh, unsigned H5_ATTR_UNUSED mesg_flag H5_CHECK_OVERFLOW(fill->size, ssize_t, size_t); /* Ensure that fill size doesn't exceed buffer size, due to possible data corruption */ - if (p + fill->size - 1 > p_end) - HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "fill size exceeds buffer size") + if (H5_IS_BUFFER_OVERFLOW(p, fill->size, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); /* Get the datatype message */ if ((exists = H5O_msg_exists_oh(open_oh, H5O_DTYPE_ID)) < 0) @@ -354,15 +369,15 @@ H5O__fill_old_decode(H5F_t *f, H5O_t *open_oh, unsigned H5_ATTR_UNUSED mesg_flag /* Verify size */ if (fill->size != (ssize_t)H5T_GET_SIZE(dt)) HGOTO_ERROR(H5E_SYM, H5E_CANTGET, NULL, "inconsistent fill value size") - } /* end if */ + } if (NULL == (fill->buf = H5MM_malloc((size_t)fill->size))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for fill value") H5MM_memcpy(fill->buf, p, (size_t)fill->size); fill->fill_defined = TRUE; - } /* end if */ + } else - fill->size = (-1); + fill->size = -1; /* Set return value */ ret_value = (void *)fill; @@ -372,10 +387,9 @@ H5O__fill_old_decode(H5F_t *f, H5O_t *open_oh, unsigned H5_ATTR_UNUSED mesg_flag H5O_msg_free(H5O_DTYPE_ID, dt); if (!ret_value && fill) { - if (fill->buf) - H5MM_xfree(fill->buf); - fill = H5FL_FREE(H5O_fill_t, fill); - } /* end if */ + H5MM_xfree(fill->buf); + H5FL_FREE(H5O_fill_t, fill); + } FUNC_LEAVE_NOAPI(ret_value) } /* end H5O__fill_old_decode() */ diff --git a/src/H5Ofsinfo.c b/src/H5Ofsinfo.c index b3766060e54..eb4ed5e683e 100644 --- a/src/H5Ofsinfo.c +++ b/src/H5Ofsinfo.c @@ -13,10 +13,8 @@ /*------------------------------------------------------------------------- * * Created: H5Ofsinfo.c - * Feb 2009 - * Vailin Choi * - * Purpose: File space info message. + * Purpose: File space info message * *------------------------------------------------------------------------- */ @@ -82,27 +80,22 @@ H5FL_DEFINE_STATIC(H5O_fsinfo_t); * * Purpose: Decode a message and return a pointer to a newly allocated one. * - * Return: Success: Ptr to new message in native form. - * Failure: NULL - * - * Programmer: Vailin Choi; Feb 2009 - * + * Return: Success: Pointer to new message in native form + * Failure: NULL *------------------------------------------------------------------------- */ - static void * H5O__fsinfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, unsigned H5_ATTR_UNUSED *ioflags, size_t p_size, const uint8_t *p) { - H5O_fsinfo_t *fsinfo = NULL; /* File space info message */ - H5F_mem_page_t ptype; /* Memory type for iteration */ - unsigned vers; /* message version */ - const uint8_t *p_end = p + p_size; - void *ret_value = NULL; /* Return value */ + H5O_fsinfo_t *fsinfo = NULL; /* File space info message */ + H5F_mem_page_t ptype; /* Memory type for iteration */ + unsigned vers; /* Message version */ + const uint8_t *p_end = p + p_size - 1; /* End of the p buffer */ + void *ret_value = NULL; FUNC_ENTER_PACKAGE - /* check args */ HDassert(f); HDassert(p); @@ -114,8 +107,8 @@ H5O__fsinfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU fsinfo->fs_addr[ptype - 1] = HADDR_UNDEF; /* Version of message */ - if (p + 1 - 1 > p_end) /* one byte for version */ - HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "ran off end of input buffer while decoding") + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); vers = *p++; if (vers == H5O_FSINFO_VERSION_0) { @@ -129,8 +122,8 @@ H5O__fsinfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU fsinfo->pgend_meta_thres = H5F_FILE_SPACE_PGEND_META_THRES; fsinfo->eoa_pre_fsm_fsalloc = HADDR_UNDEF; - if (p + 1 + H5F_SIZEOF_SIZE(f) - 1 > p_end) /* one byte for strategy + sizeof(f) */ - HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "ran off end of input buffer while decoding") + if (H5_IS_BUFFER_OVERFLOW(p, 1 + H5F_sizeof_size(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); strategy = (H5F_file_space_type_t)*p++; /* File space strategy */ H5F_DECODE_LENGTH(f, p, threshold); /* Free-space section threshold */ @@ -143,9 +136,9 @@ H5O__fsinfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU if (HADDR_UNDEF == (fsinfo->eoa_pre_fsm_fsalloc = H5F_get_eoa(f, H5FD_MEM_DEFAULT))) HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "unable to get file size") for (type = H5FD_MEM_SUPER; type < H5FD_MEM_NTYPES; type++) { - if (p + H5_SIZEOF_HADDR_T > p_end) - HGOTO_ERROR(H5E_FILE, H5E_CANTDECODE, NULL, - "ran off end of input buffer while decoding") + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_addr(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding"); H5F_addr_decode(f, &p, &(fsinfo->fs_addr[type - 1])); } break; @@ -167,32 +160,43 @@ H5O__fsinfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU case H5F_FILE_SPACE_DEFAULT: default: HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "invalid file space strategy") - } /* end switch */ + } fsinfo->version = H5O_FSINFO_VERSION_1; fsinfo->mapped = TRUE; } else { - HDassert(vers >= H5O_FSINFO_VERSION_1); + if (vers < H5O_FSINFO_VERSION_1) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "bad version number") fsinfo->version = vers; - /* strategy (1) + persist (1) + sizeof(f) + sizeof(f) + pgend_meta_thres (2) + sizeofaddr(f) */ - if (p + 1 + 1 + 2 * H5F_SIZEOF_SIZE(f) + 2 + H5F_SIZEOF_ADDR(f) - 1 > p_end) - HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "ran off end of input buffer while decoding") + if (H5_IS_BUFFER_OVERFLOW(p, 1 + 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); fsinfo->strategy = (H5F_fspace_strategy_t)*p++; /* File space strategy */ fsinfo->persist = *p++; /* Free-space persist or not */ - H5F_DECODE_LENGTH(f, p, fsinfo->threshold); /* Free-space section threshold */ + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_size(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + H5F_DECODE_LENGTH(f, p, fsinfo->threshold); /* Free-space section threshold */ + + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_size(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); H5F_DECODE_LENGTH(f, p, fsinfo->page_size); /* File space page size */ - UINT16DECODE(p, fsinfo->pgend_meta_thres); /* Page end metadata threshold */ + + if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + UINT16DECODE(p, fsinfo->pgend_meta_thres); /* Page end metadata threshold */ + + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_addr(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); H5F_addr_decode(f, &p, &(fsinfo->eoa_pre_fsm_fsalloc)); /* EOA before free-space header and section info */ /* Decode addresses of free space managers, if persisting */ if (fsinfo->persist) for (ptype = H5F_MEM_PAGE_SUPER; ptype < H5F_MEM_PAGE_NTYPES; ptype++) { - if (p + H5F_SIZEOF_SIZE(f) - 1 > p_end) /* one byte for sizeof(f) */ - HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "ran off end of input buffer while decoding") + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_addr(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); H5F_addr_decode(f, &p, &(fsinfo->fs_addr[ptype - 1])); } fsinfo->mapped = FALSE; @@ -202,8 +206,8 @@ H5O__fsinfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU ret_value = fsinfo; done: - if (ret_value == NULL && fsinfo != NULL) - fsinfo = H5FL_FREE(H5O_fsinfo_t, fsinfo); + if (!ret_value && fsinfo) + H5FL_FREE(H5O_fsinfo_t, fsinfo); FUNC_LEAVE_NOAPI(ret_value) } /* end H5O__fsinfo_decode() */ diff --git a/src/H5Oginfo.c b/src/H5Oginfo.c index 54d8b8bdcf1..df45e5399db 100644 --- a/src/H5Oginfo.c +++ b/src/H5Oginfo.c @@ -13,10 +13,8 @@ /*------------------------------------------------------------------------- * * Created: H5Oginfo.c - * Aug 23 2005 - * Quincey Koziol * - * Purpose: Group Information messages. + * Purpose: Group Information messages * *------------------------------------------------------------------------- */ @@ -78,34 +76,24 @@ H5FL_DEFINE_STATIC(H5O_ginfo_t); * Purpose: Decode a message and return a pointer to * a newly allocated one. * - * Return: Success: Ptr to new message in native order. - * - * Failure: NULL - * - * Programmer: Quincey Koziol - * Aug 30 2005 - * + * Return: Success: Pointer to new message in native order + * Failure: NULL *------------------------------------------------------------------------- */ static void * H5O__ginfo_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, unsigned H5_ATTR_UNUSED *ioflags, size_t p_size, const uint8_t *p) { - H5O_ginfo_t *ginfo = NULL; /* Pointer to group information message */ - unsigned char flags; /* Flags for encoding group info */ - void *ret_value = NULL; /* Return value */ + H5O_ginfo_t *ginfo = NULL; /* Pointer to group information message */ + unsigned char flags; /* Flags for encoding group info */ + const uint8_t *p_end = p + p_size - 1; /* End of the p buffer */ + void *ret_value = NULL; FUNC_ENTER_PACKAGE - /* check args */ + HDassert(f); HDassert(p); - if (p_size == 0) - HGOTO_ERROR(H5E_OHDR, H5E_ARGS, NULL, "size of given ginfo was zero") - - /* Points at last valid byte in buffer */ - const uint8_t *p_end = p + p_size - 1; - /* Version of message */ if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") @@ -132,11 +120,11 @@ H5O__ginfo_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsign HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") UINT16DECODE(p, ginfo->max_compact) UINT16DECODE(p, ginfo->min_dense) - } /* end if */ + } else { ginfo->max_compact = H5G_CRT_GINFO_MAX_COMPACT; ginfo->min_dense = H5G_CRT_GINFO_MIN_DENSE; - } /* end else */ + } /* Get the estimated # of entries & name lengths */ if (ginfo->store_est_entry_info) { @@ -144,19 +132,18 @@ H5O__ginfo_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsign HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") UINT16DECODE(p, ginfo->est_num_entries) UINT16DECODE(p, ginfo->est_name_len) - } /* end if */ + } else { ginfo->est_num_entries = H5G_CRT_GINFO_EST_NUM_ENTRIES; ginfo->est_name_len = H5G_CRT_GINFO_EST_NAME_LEN; - } /* end if */ + } /* Set return value */ ret_value = ginfo; done: - if (ret_value == NULL) - if (ginfo != NULL) - ginfo = H5FL_FREE(H5O_ginfo_t, ginfo); + if (!ret_value && ginfo) + H5FL_FREE(H5O_ginfo_t, ginfo); FUNC_LEAVE_NOAPI(ret_value) } /* end H5O__ginfo_decode() */ diff --git a/src/H5Olayout.c b/src/H5Olayout.c index a58fc0c66ba..f784f246ca7 100644 --- a/src/H5Olayout.c +++ b/src/H5Olayout.c @@ -10,10 +10,8 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* Programmer: Robb Matzke - * Wednesday, October 8, 1997 - * - * Purpose: Messages related to data layout. +/* + * Purpose: Messages related to data layout */ #define H5D_FRIEND /*suppress error about including H5Dpkg */ @@ -78,13 +76,8 @@ H5FL_DEFINE(H5O_layout_t); * Purpose: Decode an data layout message and return a pointer to a * new one created with malloc(). * - * Return: Success: Ptr to new message in native order. - * + * Return: Success: Pointer to new message in native order * Failure: NULL - * - * Programmer: Robb Matzke - * Wednesday, October 8, 1997 - * *------------------------------------------------------------------------- */ static void * @@ -94,16 +87,13 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU const uint8_t *p_end = p + p_size - 1; /* End of the p buffer */ H5O_layout_t *mesg = NULL; uint8_t *heap_block = NULL; - unsigned u; - void *ret_value = NULL; /* Return value */ + void *ret_value = NULL; FUNC_ENTER_PACKAGE - /* check args */ HDassert(f); HDassert(p); - /* decode */ if (NULL == (mesg = H5FL_CALLOC(H5O_layout_t))) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, NULL, "memory allocation failed") mesg->storage.type = H5D_LAYOUT_ERROR; @@ -144,33 +134,33 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU /* Address */ if (mesg->type == H5D_CONTIGUOUS) { - if (H5_IS_BUFFER_OVERFLOW(p, H5F_SIZEOF_ADDR(f), p_end)) + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_addr(f), p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") H5F_addr_decode(f, &p, &(mesg->storage.u.contig.addr)); /* Set the layout operations */ mesg->ops = H5D_LOPS_CONTIG; - } /* end if */ + } else if (mesg->type == H5D_CHUNKED) { - if (H5_IS_BUFFER_OVERFLOW(p, H5F_SIZEOF_ADDR(f), p_end)) + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_addr(f), p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") H5F_addr_decode(f, &p, &(mesg->storage.u.chunk.idx_addr)); /* Set the layout operations */ mesg->ops = H5D_LOPS_CHUNK; - /* Set the chunk operations */ - /* (Only "btree" indexing type currently supported in this version) */ + /* Set the chunk operations + * (Only "btree" indexing type currently supported in this version) + */ mesg->storage.u.chunk.idx_type = H5D_CHUNK_IDX_BTREE; mesg->storage.u.chunk.ops = H5D_COPS_BTREE; - } /* end if */ - else { - /* Sanity check */ - HDassert(mesg->type == H5D_COMPACT); - + } + else if (mesg->type == H5D_COMPACT) { /* Set the layout operations */ mesg->ops = H5D_LOPS_COMPACT; - } /* end else */ + } + else + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "invalid layout type") /* Read the size */ if (mesg->type != H5D_CHUNKED) { @@ -178,24 +168,24 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU * truncation of the dimension sizes when they were stored in this * version of the layout message. Compute the contiguous storage * size in the dataset code, where we've got the dataspace - * information available also. - QAK 5/26/04 + * information available also. */ - if (H5_IS_BUFFER_OVERFLOW(p, (ndims * sizeof(uint32_t)), p_end)) + if (H5_IS_BUFFER_OVERFLOW(p, (ndims * 4), p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") p += ndims * sizeof(uint32_t); /* Skip over dimension sizes */ - } /* end if */ + } else { if (ndims < 2) HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "bad dimensions for chunked storage") mesg->u.chunk.ndims = ndims; - if (H5_IS_BUFFER_OVERFLOW(p, (ndims * sizeof(uint32_t)), p_end)) + if (H5_IS_BUFFER_OVERFLOW(p, (ndims * 4), p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") + for (unsigned u = 0; u < ndims; u++) { - for (u = 0; u < ndims; u++) { UINT32DECODE(p, mesg->u.chunk.dim[u]); - /* Just in case that something goes very wrong, such as file corruption. */ + /* Just in case that something goes very wrong, such as file corruption */ if (mesg->u.chunk.dim[u] == 0) HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "bad chunk dimension value when parsing layout message - chunk dimension " @@ -204,12 +194,13 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU } /* Compute chunk size */ - for (u = 1, mesg->u.chunk.size = mesg->u.chunk.dim[0]; u < ndims; u++) + mesg->u.chunk.size = mesg->u.chunk.dim[0]; + for (unsigned u = 1; u < ndims; u++) mesg->u.chunk.size *= mesg->u.chunk.dim[u]; - } /* end if */ + } if (mesg->type == H5D_COMPACT) { - if (H5_IS_BUFFER_OVERFLOW(p, sizeof(uint32_t), p_end)) + if (H5_IS_BUFFER_OVERFLOW(p, 4, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") UINT32DECODE(p, mesg->storage.u.compact.size); @@ -223,9 +214,9 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU "memory allocation failed for compact data buffer") H5MM_memcpy(mesg->storage.u.compact.buf, p, mesg->storage.u.compact.size); p += mesg->storage.u.compact.size; - } /* end if */ - } /* end if */ - } /* end if */ + } + } + } else { /* Layout & storage class */ if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) @@ -236,7 +227,7 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU switch (mesg->type) { case H5D_COMPACT: /* Compact data size */ - if (H5_IS_BUFFER_OVERFLOW(p, sizeof(uint16_t), p_end)) + if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") UINT16DECODE(p, mesg->storage.u.compact.size); @@ -254,7 +245,7 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU /* Compact data */ H5MM_memcpy(mesg->storage.u.compact.buf, p, mesg->storage.u.compact.size); p += mesg->storage.u.compact.size; - } /* end if */ + } /* Set the layout operations */ mesg->ops = H5D_LOPS_COMPACT; @@ -262,12 +253,12 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU case H5D_CONTIGUOUS: /* Contiguous storage address */ - if (H5_IS_BUFFER_OVERFLOW(p, H5F_SIZEOF_ADDR(f), p_end)) + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_addr(f), p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") H5F_addr_decode(f, &p, &(mesg->storage.u.contig.addr)); /* Contiguous storage size */ - if (H5_IS_BUFFER_OVERFLOW(p, H5F_SIZEOF_SIZE(f), p_end)) + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_size(f), p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") H5F_DECODE_LENGTH(f, p, mesg->storage.u.contig.size); @@ -292,17 +283,18 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "bad dimensions for chunked storage") /* B-tree address */ - if (H5_IS_BUFFER_OVERFLOW(p, H5F_SIZEOF_ADDR(f), p_end)) + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_addr(f), p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") H5F_addr_decode(f, &p, &(mesg->storage.u.chunk.idx_addr)); - if (H5_IS_BUFFER_OVERFLOW(p, (mesg->u.chunk.ndims * sizeof(uint32_t)), p_end)) + if (H5_IS_BUFFER_OVERFLOW(p, (mesg->u.chunk.ndims * 4), p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") /* Chunk dimensions */ - for (u = 0; u < mesg->u.chunk.ndims; u++) { + for (unsigned u = 0; u < mesg->u.chunk.ndims; u++) { + UINT32DECODE(p, mesg->u.chunk.dim[u]); /* Just in case that something goes very wrong, such as file corruption. */ @@ -311,17 +303,19 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU "bad chunk dimension value when parsing layout message - chunk " "dimension must be positive: mesg->u.chunk.dim[%u] = %u", u, mesg->u.chunk.dim[u]) - } /* end for */ + } /* Compute chunk size */ - for (u = 1, mesg->u.chunk.size = mesg->u.chunk.dim[0]; u < mesg->u.chunk.ndims; u++) + mesg->u.chunk.size = mesg->u.chunk.dim[0]; + for (unsigned u = 1; u < mesg->u.chunk.ndims; u++) mesg->u.chunk.size *= mesg->u.chunk.dim[u]; - /* Set the chunk operations */ - /* (Only "btree" indexing type supported with v3 of message format) */ + /* Set the chunk operations + * (Only "btree" indexing type supported with v3 of message format) + */ mesg->storage.u.chunk.idx_type = H5D_CHUNK_IDX_BTREE; mesg->storage.u.chunk.ops = H5D_COPS_BTREE; - } /* end if */ + } else { /* Get the chunked layout flags */ if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) @@ -360,7 +354,7 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU "ran off end of input buffer while decoding") /* Chunk dimensions */ - for (u = 0; u < mesg->u.chunk.ndims; u++) { + for (unsigned u = 0; u < mesg->u.chunk.ndims; u++) { UINT64DECODE_VAR(p, mesg->u.chunk.dim[u], mesg->u.chunk.enc_bytes_per_dim); /* Just in case that something goes very wrong, such as file corruption. */ @@ -372,7 +366,8 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU } /* Compute chunk size */ - for (u = 1, mesg->u.chunk.size = mesg->u.chunk.dim[0]; u < mesg->u.chunk.ndims; u++) + mesg->u.chunk.size = mesg->u.chunk.dim[0]; + for (unsigned u = 1; u < mesg->u.chunk.ndims; u++) mesg->u.chunk.size *= mesg->u.chunk.dim[u]; /* Chunk index type */ @@ -397,12 +392,12 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU case H5D_CHUNK_IDX_SINGLE: /* Single Chunk Index */ if (mesg->u.chunk.flags & H5O_LAYOUT_CHUNK_SINGLE_INDEX_WITH_FILTER) { - if (H5_IS_BUFFER_OVERFLOW(p, H5F_SIZEOF_SIZE(f) + sizeof(uint32_t), p_end)) + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_size(f) + 4, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") H5F_DECODE_LENGTH(f, p, mesg->storage.u.chunk.u.single.nbytes); UINT32DECODE(p, mesg->storage.u.chunk.u.single.filter_mask); - } /* end if */ + } /* Set the chunk operations */ mesg->storage.u.chunk.ops = H5D_COPS_SINGLE; @@ -475,7 +470,7 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU break; case H5D_CHUNK_IDX_BT2: /* v2 B-tree index */ - if (H5_IS_BUFFER_OVERFLOW(p, sizeof(uint32_t), p_end)) + if (H5_IS_BUFFER_OVERFLOW(p, 4, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") UINT32DECODE(p, mesg->u.chunk.u.btree2.cparam.node_size); @@ -511,14 +506,14 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU case H5D_CHUNK_IDX_NTYPES: default: HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "Invalid chunk index type") - } /* end switch */ + } /* Chunk index address */ - if (H5_IS_BUFFER_OVERFLOW(p, H5F_SIZEOF_ADDR(f), p_end)) + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_addr(f), p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") H5F_addr_decode(f, &p, &(mesg->storage.u.chunk.idx_addr)); - } /* end else */ + } /* Set the layout operations */ mesg->ops = H5D_LOPS_CHUNK; @@ -530,12 +525,12 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU HGOTO_ERROR(H5E_OHDR, H5E_VERSION, NULL, "invalid layout version with virtual layout") /* Heap information */ - if (H5_IS_BUFFER_OVERFLOW(p, H5F_SIZEOF_ADDR(f), p_end)) + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_addr(f), p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") H5F_addr_decode(f, &p, &(mesg->storage.u.virt.serial_list_hobjid.addr)); /* NOTE: virtual mapping global heap entry address could be undefined */ - if (H5_IS_BUFFER_OVERFLOW(p, sizeof(uint32_t), p_end)) + if (H5_IS_BUFFER_OVERFLOW(p, 4, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") UINT32DECODE(p, mesg->storage.u.virt.serial_list_hobjid.idx); @@ -580,7 +575,7 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU (unsigned)H5O_LAYOUT_VDS_GH_ENC_VERS, (unsigned)heap_vers) /* Number of entries */ - if (H5_IS_BUFFER_OVERFLOW(heap_block_p, H5F_SIZEOF_SIZE(f), heap_block_p_end)) + if (H5_IS_BUFFER_OVERFLOW(heap_block_p, H5F_sizeof_size(f), heap_block_p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") H5F_DECODE_LENGTH(f, heap_block_p, tmp_hsize) @@ -679,9 +674,9 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU else mesg->storage.u.virt.list[i].source_dset.dset_name = mesg->storage.u.virt.list[i].source_dset_name; - } /* end if */ + } - /* unlim_dim fields */ + /* Unlim_dim fields */ mesg->storage.u.virt.list[i].unlim_dim_source = H5S_get_select_unlim_dim(mesg->storage.u.virt.list[i].source_select); mesg->storage.u.virt.list[i].unlim_dim_virtual = @@ -697,7 +692,7 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU mesg->storage.u.virt.list[i].source_select; mesg->storage.u.virt.list[i].source_dset.clipped_virtual_select = mesg->storage.u.virt.list[i].source_dset.virtual_select; - } /* end if */ + } /* Check mapping for validity (do both pre and post * checks here, since we had to allocate the entry list @@ -713,10 +708,10 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU if (H5D_virtual_update_min_dims(mesg, i) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, NULL, "unable to update virtual dataset minimum dimensions") - } /* end for */ + } /* Read stored checksum */ - if (H5_IS_BUFFER_OVERFLOW(heap_block_p, sizeof(uint32_t), heap_block_p_end)) + if (H5_IS_BUFFER_OVERFLOW(heap_block_p, 4, heap_block_p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") UINT32DECODE(heap_block_p, stored_chksum) @@ -743,8 +738,8 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU case H5D_NLAYOUTS: default: HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "Invalid layout class") - } /* end switch */ - } /* end else */ + } + } /* Set return value */ ret_value = mesg; @@ -755,8 +750,8 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU if (mesg->type == H5D_VIRTUAL) if (H5D__virtual_reset_layout(mesg) < 0) HDONE_ERROR(H5E_OHDR, H5E_CANTFREE, NULL, "unable to reset virtual layout") - mesg = H5FL_FREE(H5O_layout_t, mesg); - } /* end if */ + H5FL_FREE(H5O_layout_t, mesg); + } heap_block = (uint8_t *)H5MM_xfree(heap_block); diff --git a/src/H5Olinfo.c b/src/H5Olinfo.c index 11138df2228..a82be727459 100644 --- a/src/H5Olinfo.c +++ b/src/H5Olinfo.c @@ -13,16 +13,14 @@ /*------------------------------------------------------------------------- * * Created: H5Olinfo.c - * Aug 23 2005 - * Quincey Koziol * - * Purpose: Link Information messages. + * Purpose: Link information messages * *------------------------------------------------------------------------- */ -#define H5G_FRIEND /*suppress error about including H5Gpkg */ -#define H5L_FRIEND /*suppress error about including H5Lpkg */ +#define H5G_FRIEND /* Suppress error about including H5Gpkg */ +#define H5L_FRIEND /* Suppress error about including H5Lpkg */ #include "H5Omodule.h" /* This source code file is part of the H5O module */ #include "H5private.h" /* Generic Functions */ @@ -95,12 +93,8 @@ H5FL_DEFINE_STATIC(H5O_linfo_t); * * Purpose: Decode a message and return a pointer to a newly allocated one. * - * Return: Success: Ptr to new message in native form. + * Return: Success: Pointer to new message in native form * Failure: NULL - * - * Programmer: Quincey Koziol - * Aug 23 2005 - * *------------------------------------------------------------------------- */ static void * @@ -115,7 +109,6 @@ H5O__linfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUS FUNC_ENTER_PACKAGE - /* check args */ HDassert(f); HDassert(p); diff --git a/src/H5Olink.c b/src/H5Olink.c index dabf87e6e95..160b1d0946a 100644 --- a/src/H5Olink.c +++ b/src/H5Olink.c @@ -13,10 +13,8 @@ /*------------------------------------------------------------------------- * * Created: H5Olink.c - * Aug 29 2005 - * Quincey Koziol * - * Purpose: Link messages. + * Purpose: Link messages * *------------------------------------------------------------------------- */ @@ -100,32 +98,27 @@ H5FL_DEFINE_STATIC(H5O_link_t); * Purpose: Decode a message and return a pointer to * a newly allocated one. * - * Return: Success: Ptr to new message in native order. - * - * Failure: NULL - * - * Programmer: Quincey Koziol - * Aug 29 2005 - * + * Return: Success: Pointer to new message in native order + * Failure: NULL *------------------------------------------------------------------------- */ static void * H5O__link_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, unsigned H5_ATTR_UNUSED *ioflags, size_t p_size, const uint8_t *p) { - H5O_link_t *lnk = NULL; /* Pointer to link message */ - size_t len = 0; /* Length of a string in the message */ - unsigned char link_flags; /* Flags for encoding link info */ - const uint8_t *p_end = p + p_size; /* End of the p buffer */ - void *ret_value = NULL; /* Return value */ + H5O_link_t *lnk = NULL; /* Pointer to link message */ + size_t len = 0; /* Length of a string in the message */ + unsigned char link_flags; /* Flags for encoding link info */ + const uint8_t *p_end = p + p_size - 1; /* End of the p buffer */ + void *ret_value = NULL; FUNC_ENTER_PACKAGE - /* check args */ HDassert(f); HDassert(p); - /* decode */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") if (*p++ != H5O_LINK_VERSION) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad version number for message") @@ -134,6 +127,8 @@ H5O__link_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSE HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") /* Get the encoding flags for the link */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") link_flags = *p++; if (link_flags & ~H5O_LINK_ALL_FLAGS) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad flag value for message") @@ -141,63 +136,74 @@ H5O__link_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSE /* Check for non-default link type */ if (link_flags & H5O_LINK_STORE_LINK_TYPE) { /* Get the type of the link */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") lnk->type = (H5L_type_t)*p++; if (lnk->type < H5L_TYPE_HARD || lnk->type > H5L_TYPE_MAX) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad link type") - } /* end if */ + } else lnk->type = H5L_TYPE_HARD; /* Get the link creation time from the file */ if (link_flags & H5O_LINK_STORE_CORDER) { + if (H5_IS_BUFFER_OVERFLOW(p, 8, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") INT64DECODE(p, lnk->corder) lnk->corder_valid = TRUE; - } /* end if */ + } else { lnk->corder = 0; lnk->corder_valid = FALSE; - } /* end else */ + } /* Check for non-default name character set */ if (link_flags & H5O_LINK_STORE_NAME_CSET) { /* Get the link name's character set */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") lnk->cset = (H5T_cset_t)*p++; if (lnk->cset < H5T_CSET_ASCII || lnk->cset > H5T_CSET_UTF8) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad cset type") - } /* end if */ + } else lnk->cset = H5T_CSET_ASCII; /* Get the length of the link's name */ switch (link_flags & H5O_LINK_NAME_SIZE) { case 0: /* 1 byte size */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") len = *p++; break; case 1: /* 2 byte size */ + if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") UINT16DECODE(p, len); break; case 2: /* 4 byte size */ + if (H5_IS_BUFFER_OVERFLOW(p, 4, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") UINT32DECODE(p, len); break; case 3: /* 8 byte size */ + if (H5_IS_BUFFER_OVERFLOW(p, 8, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") UINT64DECODE(p, len); break; default: - HDassert(0 && "bad size for name"); - } /* end switch */ + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "no appropriate size for name length") + } if (len == 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "invalid name length") - /* Make sure that length doesn't exceed buffer size, which could occur - when the file is corrupted */ - if (p + len > p_end) - HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "name length causes read past end of buffer") - /* Get the link's name */ + if (H5_IS_BUFFER_OVERFLOW(p, len, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") if (NULL == (lnk->name = (char *)H5MM_malloc(len + 1))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") H5MM_memcpy(lnk->name, p, len); @@ -208,20 +214,21 @@ H5O__link_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSE switch (lnk->type) { case H5L_TYPE_HARD: /* Get the address of the object the link points to */ + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_addr(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") H5F_addr_decode(f, &p, &(lnk->u.hard.addr)); break; case H5L_TYPE_SOFT: /* Get the link value */ + if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") UINT16DECODE(p, len) if (len == 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "invalid link length") - /* Make sure that length doesn't exceed buffer size, which could occur - when the file is corrupted */ - if (p + len > p_end) - HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "name length causes read past end of buffer") - + if (H5_IS_BUFFER_OVERFLOW(p, len, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") if (NULL == (lnk->u.soft.name = (char *)H5MM_malloc((size_t)len + 1))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") H5MM_memcpy(lnk->u.soft.name, p, len); @@ -238,16 +245,15 @@ H5O__link_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSE HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "unknown link type") /* A UD link. Get the user-supplied data */ + if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") UINT16DECODE(p, len) if (lnk->type == H5L_TYPE_EXTERNAL && len < 3) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "external link information length < 3") lnk->u.ud.size = len; if (len > 0) { - /* Make sure that length doesn't exceed buffer size, which could - occur when the file is corrupted */ - if (p + len > p_end) - HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "name length causes read past end of buffer") - + if (H5_IS_BUFFER_OVERFLOW(p, len, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") if (NULL == (lnk->u.ud.udata = H5MM_malloc((size_t)len))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") H5MM_memcpy(lnk->u.ud.udata, p, len); @@ -255,22 +261,20 @@ H5O__link_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSE } else lnk->u.ud.udata = NULL; - } /* end switch */ + } /* Set return value */ ret_value = lnk; done: - if (ret_value == NULL) - if (lnk != NULL) { - if (lnk->name != NULL) - H5MM_xfree(lnk->name); - if (lnk->type == H5L_TYPE_SOFT && lnk->u.soft.name != NULL) - H5MM_xfree(lnk->u.soft.name); - if (lnk->type >= H5L_TYPE_UD_MIN && lnk->u.ud.size > 0 && lnk->u.ud.udata != NULL) - H5MM_xfree(lnk->u.ud.udata); - lnk = H5FL_FREE(H5O_link_t, lnk); - } /* end if */ + if (!ret_value && lnk) { + H5MM_xfree(lnk->name); + if (lnk->type == H5L_TYPE_SOFT && lnk->u.soft.name != NULL) + H5MM_xfree(lnk->u.soft.name); + if (lnk->type >= H5L_TYPE_UD_MIN && lnk->u.ud.size > 0 && lnk->u.ud.udata != NULL) + H5MM_xfree(lnk->u.ud.udata); + H5FL_FREE(H5O_link_t, lnk); + } FUNC_LEAVE_NOAPI(ret_value) } /* end H5O__link_decode() */ diff --git a/src/H5Opline.c b/src/H5Opline.c index 4ccd96cf45d..711a67b67cd 100644 --- a/src/H5Opline.c +++ b/src/H5Opline.c @@ -11,10 +11,7 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Robb Matzke - * Wednesday, April 15, 1998 - * - * Purpose: Data filter pipeline message. + * Purpose: Data filter pipeline message */ #include "H5Omodule.h" /* This source code file is part of the H5O module */ @@ -103,12 +100,8 @@ H5FL_DEFINE(H5O_pline_t); * * Purpose: Decodes a filter pipeline message. * - * Return: Success: Ptr to the native message. + * Return: Success: Pointer to a new pipeline message * Failure: NULL - * - * Programmer: Robb Matzke - * Wednesday, April 15, 1998 - * *------------------------------------------------------------------------- */ @@ -121,11 +114,11 @@ H5O__pline_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsign size_t name_length; /* Length of filter name */ size_t i; /* Local index variable */ const uint8_t *p_end = p + p_size - 1; /* End of the p buffer */ - void *ret_value = NULL; /* Return value */ + void *ret_value = NULL; FUNC_ENTER_PACKAGE - /* check args */ + HDassert(f); HDassert(p); /* Allocate space for I/O pipeline message */ @@ -133,14 +126,15 @@ H5O__pline_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsign HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") /* Version */ - if (p + 4 - 1 > p_end) /* 4 byte is minimum for all versions */ - HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "ran off the end of the buffer: current p = %p, p_end = %p", - (const void *)(p + 4), (const void *)p_end) + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") pline->version = *p++; if (pline->version < H5O_PLINE_VERSION_1 || pline->version > H5O_PLINE_VERSION_LATEST) HGOTO_ERROR(H5E_PLINE, H5E_CANTLOAD, NULL, "bad version number for filter pipeline message") /* Number of filters */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") pline->nused = *p++; if (pline->nused > H5Z_MAX_NFILTERS) { @@ -153,8 +147,11 @@ H5O__pline_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsign } /* Reserved */ - if (pline->version == H5O_PLINE_VERSION_1) + if (pline->version == H5O_PLINE_VERSION_1) { + if (H5_IS_BUFFER_OVERFLOW(p, 6, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") p += 6; + } /* Allocate array for filters */ pline->nalloc = pline->nused; @@ -164,94 +161,95 @@ H5O__pline_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsign /* Decode filters */ for (i = 0, filter = &pline->filter[0]; i < pline->nused; i++, filter++) { /* Filter ID */ - if (p + 6 - 1 > p_end) /* 6 bytes minimum */ - HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, - "ran off the end of the buffer: current p = %p, p_end = %p", (const void *)(p + 6), - (const void *)p_end) + if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") UINT16DECODE(p, filter->id); /* Length of filter name */ if (pline->version > H5O_PLINE_VERSION_1 && filter->id < H5Z_FILTER_RESERVED) name_length = 0; else { + if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") UINT16DECODE(p, name_length); if (pline->version == H5O_PLINE_VERSION_1 && name_length % 8) HGOTO_ERROR(H5E_PLINE, H5E_CANTLOAD, NULL, "filter name length is not a multiple of eight") - if (p + 4 - 1 > p_end) /* with name_length 4 bytes to go */ - HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, - "ran off the end of the buffer: current p = %p, p_end = %p", - (const void *)(p + 4), (const void *)p_end) - } /* end if */ + } /* Filter flags */ + if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") UINT16DECODE(p, filter->flags); /* Number of filter parameters ("client data elements") */ + if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") UINT16DECODE(p, filter->cd_nelmts); /* Filter name, if there is one */ if (name_length) { - size_t actual_name_length; /* Actual length of name */ - size_t len = (size_t)(p_end - p + 1); + size_t actual_name_length; /* Actual length of name */ + size_t max = (size_t)(p_end - p + 1); /* Max possible name length */ + /* Determine actual name length (without padding, but with null terminator) */ - actual_name_length = HDstrnlen((const char *)p, len); - if (actual_name_length == len) + actual_name_length = HDstrnlen((const char *)p, max); + if (actual_name_length == max) HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "filter name not null terminated") actual_name_length += 1; /* include \0 byte */ - HDassert(actual_name_length <= name_length); /* Allocate space for the filter name, or use the internal buffer */ if (actual_name_length > H5Z_COMMON_NAME_LEN) { filter->name = (char *)H5MM_malloc(actual_name_length); if (NULL == filter->name) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for filter name") - } /* end if */ + } else filter->name = filter->_name; HDstrncpy(filter->name, (const char *)p, actual_name_length); + + if (H5_IS_BUFFER_OVERFLOW(p, name_length, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") p += name_length; - } /* end if */ + } /* Filter parameters */ if (filter->cd_nelmts) { - size_t j; /* Local index variable */ /* Allocate space for the client data elements, or use the internal buffer */ if (filter->cd_nelmts > H5Z_COMMON_CD_VALUES) { filter->cd_values = (unsigned *)H5MM_malloc(filter->cd_nelmts * sizeof(unsigned)); if (NULL == filter->cd_values) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for client data") - } /* end if */ + } else filter->cd_values = filter->_cd_values; - /* - * Read the client data values and the padding - */ - for (j = 0; j < filter->cd_nelmts; j++) { - if (p + 4 - 1 <= p_end) - UINT32DECODE(p, filter->cd_values[j]) - else - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, - "ran off the end of the buffer: current p = %p, p_size = %zu, p_end = %p", - (const void *)p, p_size, (const void *)p_end) + /* Read the client data values and the padding */ + for (size_t j = 0; j < filter->cd_nelmts; j++) { + if (H5_IS_BUFFER_OVERFLOW(p, 4, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") + UINT32DECODE(p, filter->cd_values[j]) } if (pline->version == H5O_PLINE_VERSION_1) - if (filter->cd_nelmts % 2) - p += 4; /*padding*/ - } /* end if */ - } /* end for */ + if (filter->cd_nelmts % 2) { + if (H5_IS_BUFFER_OVERFLOW(p, 4, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, + "ran off end of input buffer while decoding") + p += 4; /* padding */ + } + } + } /* Set return value */ ret_value = pline; done: - if (NULL == ret_value && pline) { + if (!ret_value && pline) { H5O__pline_reset(pline); H5O__pline_free(pline); - } /* end if */ + } FUNC_LEAVE_NOAPI(ret_value) } /* end H5O__pline_decode() */ diff --git a/src/H5Orefcount.c b/src/H5Orefcount.c index 51da22c2683..f4d3b5c9abc 100644 --- a/src/H5Orefcount.c +++ b/src/H5Orefcount.c @@ -13,10 +13,8 @@ /*------------------------------------------------------------------------- * * Created: H5Orefcount.c - * Mar 10 2007 - * Quincey Koziol * - * Purpose: Object ref. count messages. + * Purpose: Object reference count messages * *------------------------------------------------------------------------- */ @@ -72,31 +70,30 @@ H5FL_DEFINE_STATIC(H5O_refcount_t); /*------------------------------------------------------------------------- * Function: H5O__refcount_decode * - * Purpose: Decode a message and return a pointer to a newly allocated one. - * - * Return: Success: Ptr to new message in native form. - * Failure: NULL - * - * Programmer: Quincey Koziol - * Mar 10 2007 + * Purpose: Decode a message and return a pointer to a newly allocated + * one. * + * Return: Success: Pointer to new message in native form + * Failure: NULL *------------------------------------------------------------------------- */ static void * H5O__refcount_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, - unsigned H5_ATTR_UNUSED mesg_flags, unsigned H5_ATTR_UNUSED *ioflags, - size_t H5_ATTR_UNUSED p_size, const uint8_t *p) + unsigned H5_ATTR_UNUSED mesg_flags, unsigned H5_ATTR_UNUSED *ioflags, size_t p_size, + const uint8_t *p) { - H5O_refcount_t *refcount = NULL; /* Reference count */ - void *ret_value = NULL; /* Return value */ + H5O_refcount_t *refcount = NULL; /* Reference count */ + const uint8_t *p_end = p + p_size - 1; /* End of the p buffer */ + void *ret_value = NULL; FUNC_ENTER_PACKAGE - /* check args */ HDassert(f); HDassert(p); /* Version of message */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") if (*p++ != H5O_REFCOUNT_VERSION) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad version number for message") @@ -104,15 +101,17 @@ H5O__refcount_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, if (NULL == (refcount = H5FL_MALLOC(H5O_refcount_t))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") - /* Get ref. count for object */ + /* Get reference count for object */ + if (H5_IS_BUFFER_OVERFLOW(p, 4, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") UINT32DECODE(p, *refcount) /* Set return value */ ret_value = refcount; done: - if (ret_value == NULL && refcount != NULL) - refcount = H5FL_FREE(H5O_refcount_t, refcount); + if (!ret_value && refcount) + H5FL_FREE(H5O_refcount_t, refcount); FUNC_LEAVE_NOAPI(ret_value) } /* end H5O__refcount_decode() */ diff --git a/src/H5Osdspace.c b/src/H5Osdspace.c index e9a0dc6e322..9bf5d6f4582 100644 --- a/src/H5Osdspace.c +++ b/src/H5Osdspace.c @@ -115,11 +115,9 @@ H5O__sdspace_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UN FUNC_ENTER_PACKAGE - /* check args */ HDassert(f); HDassert(p); - /* decode */ if (NULL == (sdim = H5FL_CALLOC(H5S_extent_t))) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, NULL, "dataspace structure allocation failed") sdim->type = H5S_NO_CLASS; @@ -154,9 +152,11 @@ H5O__sdspace_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UN if (sdim->type != H5S_SIMPLE && sdim->rank > 0) HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "invalid rank for scalar or NULL dataspace") - } /* end if */ + } else { - /* Set the dataspace type to be simple or scalar as appropriate */ + /* Set the dataspace type to be simple or scalar as appropriate + * (version 1 does not allow H5S_NULL) + */ if (sdim->rank > 0) sdim->type = H5S_SIMPLE; else @@ -166,50 +166,46 @@ H5O__sdspace_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UN if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") p++; - } /* end else */ - HDassert(sdim->type != H5S_NULL || sdim->version >= H5O_SDSPACE_VERSION_2); + } - /* Only Version 1 has these reserved bytes */ + /* Version 1 has 4 reserved bytes */ if (version == H5O_SDSPACE_VERSION_1) { if (H5_IS_BUFFER_OVERFLOW(p, 4, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") - p += 4; /*reserved*/ + p += 4; } /* Decode dimension sizes */ if (sdim->rank > 0) { - uint8_t sizeof_size = H5F_SIZEOF_SIZE(f); - /* - * Ensure that decoding doesn't cause reading past buffer's end, - * due to possible data corruption - check that we have space to - * decode a "sdim->rank" number of hsize_t values - */ - if (H5_IS_BUFFER_OVERFLOW(p, (sizeof_size * sdim->rank), p_end)) + /* Sizes */ + + /* Check that we have space to decode sdim->rank values */ + if (H5_IS_BUFFER_OVERFLOW(p, (H5F_sizeof_size(f) * sdim->rank), p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") if (NULL == (sdim->size = (hsize_t *)H5FL_ARR_MALLOC(hsize_t, (size_t)sdim->rank))) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, NULL, "memory allocation failed") - for (i = 0; i < sdim->rank; i++) H5F_DECODE_LENGTH(f, p, sdim->size[i]); + /* Max sizes */ + if (flags & H5S_VALID_MAX) { if (NULL == (sdim->max = (hsize_t *)H5FL_ARR_MALLOC(hsize_t, (size_t)sdim->rank))) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, NULL, "memory allocation failed") - /* - * Ensure that decoding doesn't cause reading past buffer's end, - * due to possible data corruption - check that we have space to - * decode a "sdim->rank" number of hsize_t values - */ - if (H5_IS_BUFFER_OVERFLOW(p, (sizeof_size * sdim->rank), p_end)) + /* Check that we have space to decode sdim->rank values */ + if (H5_IS_BUFFER_OVERFLOW(p, (H5F_sizeof_size(f) * sdim->rank), p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding") - for (i = 0; i < sdim->rank; i++) H5F_DECODE_LENGTH(f, p, sdim->max[i]); - } /* end if */ - } /* end if */ + } + + /* NOTE: The version 1 permutation indexes were never implemented so + * there is nothing to decode. + */ + } /* Compute the number of elements in the extent */ if (sdim->type == H5S_NULL) @@ -217,16 +213,16 @@ H5O__sdspace_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UN else { for (i = 0, sdim->nelem = 1; i < sdim->rank; i++) sdim->nelem *= sdim->size[i]; - } /* end else */ + } /* Set return value */ - ret_value = (void *)sdim; /*success*/ + ret_value = (void *)sdim; done: if (!ret_value && sdim) { H5S__extent_release(sdim); - sdim = H5FL_FREE(H5S_extent_t, sdim); - } /* end if */ + H5FL_FREE(H5S_extent_t, sdim); + } FUNC_LEAVE_NOAPI(ret_value) } /* end H5O__sdspace_decode() */ diff --git a/src/H5Oshmesg.c b/src/H5Oshmesg.c index 586e2ced983..8510c6e3e81 100644 --- a/src/H5Oshmesg.c +++ b/src/H5Oshmesg.c @@ -56,29 +56,25 @@ const H5O_msg_class_t H5O_MSG_SHMESG[1] = {{ }}; /*------------------------------------------------------------------------- - * Function: H5O__shmesg_decode + * Function: H5O__shmesg_decode * - * Purpose: Decode a shared message table message and return a pointer + * Purpose: Decode a shared message table message and return a pointer * to a newly allocated H5O_shmesg_table_t struct. * - * Return: Success: Ptr to new message in native struct. - * Failure: NULL - * - * Programmer: James Laird - * Jan 29, 2007 - * + * Return: Success: Ptr to new message in native struct. + * Failure: NULL *------------------------------------------------------------------------- */ static void * H5O__shmesg_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, - unsigned H5_ATTR_UNUSED *ioflags, size_t H5_ATTR_UNUSED p_size, const uint8_t *p) + unsigned H5_ATTR_UNUSED *ioflags, size_t p_size, const uint8_t *p) { - H5O_shmesg_table_t *mesg; /* Native message */ - void *ret_value = NULL; /* Return value */ + H5O_shmesg_table_t *mesg; /* New shared message table */ + const uint8_t *p_end = p + p_size - 1; /* End of the p buffer */ + void *ret_value = NULL; FUNC_ENTER_PACKAGE - /* Sanity check */ HDassert(f); HDassert(p); @@ -87,14 +83,25 @@ H5O__shmesg_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU "memory allocation failed for shared message table message") /* Retrieve version, table address, and number of indexes */ + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); mesg->version = *p++; + + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_addr(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); H5F_addr_decode(f, &p, &(mesg->addr)); + + if (H5_IS_BUFFER_OVERFLOW(p, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); mesg->nindexes = *p++; /* Set return value */ ret_value = (void *)mesg; done: + if (!ret_value && mesg) + H5MM_xfree(mesg); + FUNC_LEAVE_NOAPI(ret_value) } /* end H5O__shmesg_decode() */ diff --git a/src/H5Ostab.c b/src/H5Ostab.c index ae4635e6a5e..2428f06dd87 100644 --- a/src/H5Ostab.c +++ b/src/H5Ostab.c @@ -13,10 +13,8 @@ /*------------------------------------------------------------------------- * * Created: H5Ostab.c - * Aug 6 1997 - * Robb Matzke * - * Purpose: Symbol table messages. + * Purpose: Symbol table messages * *------------------------------------------------------------------------- */ @@ -78,41 +76,39 @@ H5FL_DEFINE_STATIC(H5O_stab_t); * Purpose: Decode a symbol table message and return a pointer to * a newly allocated one. * - * Return: Success: Ptr to new message in native order. - * - * Failure: NULL - * - * Programmer: Robb Matzke - * Aug 6 1997 - * + * Return: Success: Pointer to new message in native order + * Failure: NULL *------------------------------------------------------------------------- */ static void * H5O__stab_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, - unsigned H5_ATTR_UNUSED *ioflags, size_t H5_ATTR_UNUSED p_size, const uint8_t *p) + unsigned H5_ATTR_UNUSED *ioflags, size_t p_size, const uint8_t *p) { - H5O_stab_t *stab = NULL; - void *ret_value = NULL; /* Return value */ + H5O_stab_t *stab = NULL; + const uint8_t *p_end = p + p_size - 1; /* End of the p buffer */ + void *ret_value = NULL; FUNC_ENTER_PACKAGE - /* check args */ HDassert(f); HDassert(p); - /* decode */ if (NULL == (stab = H5FL_CALLOC(H5O_stab_t))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") + + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_addr(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); H5F_addr_decode(f, &p, &(stab->btree_addr)); + + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_addr(f), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); H5F_addr_decode(f, &p, &(stab->heap_addr)); - /* Set return value */ ret_value = stab; done: - if (ret_value == NULL) - if (stab != NULL) - stab = H5FL_FREE(H5O_stab_t, stab); + if (!ret_value && stab) + H5FL_FREE(H5O_stab_t, stab); FUNC_LEAVE_NOAPI(ret_value) } /* end H5O__stab_decode() */ From f5d5b98381d1af7223d992963fcafe72f12b1e14 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Tue, 25 Apr 2023 10:03:12 -0700 Subject: [PATCH 161/231] H5Odtype.c decode cleanup (#2797) * Adds bounds checking to H5Odtype.c * Minor tidy of dtypes test --- src/H5Odtype.c | 319 ++++++++++++++++++++++++++++++++++++------------- test/dtypes.c | 171 +++++++++++++------------- 2 files changed, 321 insertions(+), 169 deletions(-) diff --git a/src/H5Odtype.c b/src/H5Odtype.c index e5e8996cd81..b6e1b907b97 100644 --- a/src/H5Odtype.c +++ b/src/H5Odtype.c @@ -24,6 +24,15 @@ #include "H5Tpkg.h" /* Datatypes */ #include "H5VMprivate.h" /* Vectors and arrays */ +/* Variant boundary-checking macro, used here since H5Tdecode() doesn't take a + * size parameter so we need to ignore the bounds checks. + * + * This is a separate macro since we don't want to inflict that behavior on + * the rest of the library. + */ +#define H5_DTYPE_IS_BUFFER_OVERFLOW(skip, ptr, size, buffer_end) \ + (skip ? FALSE : ((ptr) + (size)-1) > (buffer_end)) + /* PRIVATE PROTOTYPES */ static herr_t H5O__dtype_encode(H5F_t *f, uint8_t *p, const void *mesg); static void *H5O__dtype_decode(H5F_t *f, H5O_t *open_oh, unsigned mesg_flags, unsigned *ioflags, @@ -108,35 +117,46 @@ const H5O_msg_class_t H5O_MSG_DTYPE[1] = {{ }}; /*------------------------------------------------------------------------- - * Function: H5O__dtype_decode_helper + * Function: H5O__dtype_decode_helper * - * Purpose: Decodes a datatype + * Purpose: Decodes a datatype * - * Return: TRUE if we can upgrade the parent type's version even + * Return: TRUE if we can upgrade the parent type's version even * with strict format checks * FALSE if we cannot - * Negative on failure - * - * Programmer: Robb Matzke - * Monday, December 8, 1997 - * + * NEGATIVE on failure *------------------------------------------------------------------------- */ static htri_t -H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t *dt) +H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t *dt, hbool_t skip, + const uint8_t *p_end) { - unsigned flags, version; - unsigned i; - size_t z; - htri_t ret_value = FALSE; /* Return value */ + unsigned flags; + unsigned version; + htri_t ret_value = FALSE; FUNC_ENTER_PACKAGE - /* check args */ HDassert(pp && *pp); HDassert(dt && dt->shared); + /* XXX NOTE! + * + * H5Tencode() does not take a buffer size, so normal bounds checking in + * that case is impossible. + * + * Instead of using our normal H5_IS_BUFFER_OVERFLOW macro, use + * H5_DTYPE_IS_BUFFER_OVERFLOW, which will skip the check when the + * we're decoding a buffer from H5Tconvert(). + * + * Even if this is fixed at some point in the future, as long as we + * support the old, size-less API call, we will need to use the modified + * macros. + */ + /* Version, class & flags */ + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 4, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT32DECODE(*pp, flags); version = (flags >> 4) & 0x0f; if (version < H5O_DTYPE_VERSION_1 || version > H5O_DTYPE_VERSION_LATEST) @@ -146,6 +166,8 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t flags >>= 8; /* Size */ + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 4, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT32DECODE(*pp, dt->shared->size); /* Check for invalid datatype size */ @@ -161,6 +183,8 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t dt->shared->u.atomic.lsb_pad = (flags & 0x2) ? H5T_PAD_ONE : H5T_PAD_ZERO; dt->shared->u.atomic.msb_pad = (flags & 0x4) ? H5T_PAD_ONE : H5T_PAD_ZERO; dt->shared->u.atomic.u.i.sign = (flags & 0x8) ? H5T_SGN_2 : H5T_SGN_NONE; + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 2 + 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT16DECODE(*pp, dt->shared->u.atomic.offset); UINT16DECODE(*pp, dt->shared->u.atomic.prec); break; @@ -178,7 +202,7 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t /* VAX order if both 1st and 6th bits are turned on*/ if (flags & 0x40) dt->shared->u.atomic.order = H5T_ORDER_VAX; - } /* end if */ + } dt->shared->u.atomic.lsb_pad = (flags & 0x2) ? H5T_PAD_ONE : H5T_PAD_ZERO; dt->shared->u.atomic.msb_pad = (flags & 0x4) ? H5T_PAD_ONE : H5T_PAD_ZERO; dt->shared->u.atomic.u.f.pad = (flags & 0x8) ? H5T_PAD_ONE : H5T_PAD_ZERO; @@ -197,21 +221,40 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t default: HGOTO_ERROR(H5E_DATATYPE, H5E_UNSUPPORTED, FAIL, "unknown floating-point normalization") - } /* end switch */ + } dt->shared->u.atomic.u.f.sign = (flags >> 8) & 0xff; + + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 2 + 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT16DECODE(*pp, dt->shared->u.atomic.offset); UINT16DECODE(*pp, dt->shared->u.atomic.prec); + + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 1 + 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); dt->shared->u.atomic.u.f.epos = *(*pp)++; dt->shared->u.atomic.u.f.esize = *(*pp)++; - HDassert(dt->shared->u.atomic.u.f.esize > 0); + if (dt->shared->u.atomic.u.f.esize == 0) + HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, FAIL, "exponent size can't be zero") + + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 1 + 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); dt->shared->u.atomic.u.f.mpos = *(*pp)++; dt->shared->u.atomic.u.f.msize = *(*pp)++; - HDassert(dt->shared->u.atomic.u.f.msize > 0); + if (dt->shared->u.atomic.u.f.msize == 0) + HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, FAIL, "mantissa size can't be zero") + + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 4, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT32DECODE(*pp, dt->shared->u.atomic.u.f.ebias); break; - case H5T_TIME: /* Time datatypes */ + case H5T_TIME: + /* + * Time datatypes... + */ dt->shared->u.atomic.order = (flags & 0x1) ? H5T_ORDER_BE : H5T_ORDER_LE; + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT16DECODE(*pp, dt->shared->u.atomic.prec); break; @@ -236,22 +279,35 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t dt->shared->u.atomic.order = (flags & 0x1) ? H5T_ORDER_BE : H5T_ORDER_LE; dt->shared->u.atomic.lsb_pad = (flags & 0x2) ? H5T_PAD_ONE : H5T_PAD_ZERO; dt->shared->u.atomic.msb_pad = (flags & 0x4) ? H5T_PAD_ONE : H5T_PAD_ZERO; + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 2 + 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT16DECODE(*pp, dt->shared->u.atomic.offset); UINT16DECODE(*pp, dt->shared->u.atomic.prec); break; - case H5T_OPAQUE: + case H5T_OPAQUE: { + size_t z; + /* * Opaque types... */ + + /* The opaque tag flag field must be aligned */ z = flags & (H5T_OPAQUE_TAG_MAX - 1); - HDassert(0 == (z & 0x7)); /*must be aligned*/ + if (0 != (z & 0x7)) + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "opaque flag field must be aligned") + if (NULL == (dt->shared->u.opaque.tag = (char *)H5MM_malloc(z + 1))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed") + + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, z, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); H5MM_memcpy(dt->shared->u.opaque.tag, *pp, z); dt->shared->u.opaque.tag[z] = '\0'; + *pp += z; break; + } case H5T_COMPOUND: { unsigned nmembs; /* Number of compound members */ @@ -274,15 +330,26 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t HGOTO_ERROR(H5E_DATATYPE, H5E_CANTALLOC, FAIL, "memory allocation failed") dt->shared->u.compnd.nalloc = nmembs; - HDassert(dt->shared->u.compnd.memb_size == 0); + if (dt->shared->u.compnd.memb_size != 0) + HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, FAIL, "member size not initialized to zero") for (dt->shared->u.compnd.nmembs = 0; dt->shared->u.compnd.nmembs < nmembs; dt->shared->u.compnd.nmembs++) { - unsigned ndims = 0; /* Number of dimensions of the array field */ - htri_t can_upgrade; /* Whether we can upgrade this type's version */ - hsize_t dim[H5O_LAYOUT_NDIMS]; /* Dimensions of the array */ - H5T_t *array_dt; /* Temporary pointer to the array datatype */ - H5T_t *temp_type; /* Temporary pointer to the field's datatype */ + + size_t actual_name_length; /* Actual length of name */ + size_t max = (size_t)(p_end - *pp + 1); /* Max possible name length */ + unsigned ndims = 0; /* Number of dimensions of the array field */ + htri_t can_upgrade; /* Whether we can upgrade this type's version */ + hsize_t dim[H5O_LAYOUT_NDIMS]; /* Dimensions of the array */ + H5T_t *array_dt; /* Temporary pointer to the array datatype */ + H5T_t *temp_type; /* Temporary pointer to the field's datatype */ + + /* Get the length of the field name */ + actual_name_length = HDstrnlen((const char *)*pp, max); + if (actual_name_length == max) + HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, FAIL, "field name not null terminated") + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, actual_name_length, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); /* Decode the field name */ if (NULL == (dt->shared->u.compnd.memb[dt->shared->u.compnd.nmembs].name = @@ -291,26 +358,45 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t "can't duplicate compound member name string") /* Version 3 of the datatype message eliminated the padding to multiple of 8 bytes */ - if (version >= H5O_DTYPE_VERSION_3) + if (version >= H5O_DTYPE_VERSION_3) { /* Advance past name, including null terminator */ - *pp += HDstrlen((const char *)*pp) + 1; - else + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, actual_name_length + 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, + "ran off end of input buffer while decoding"); + *pp += actual_name_length + 1; + } + else { /* Advance multiple of 8 w/ null terminator */ - *pp += ((HDstrlen((const char *)*pp) + 8) / 8) * 8; + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, ((actual_name_length + 8) / 8) * 8, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, + "ran off end of input buffer while decoding"); + *pp += ((actual_name_length + 8) / 8) * 8; + } /* Decode the field offset */ /* (starting with version 3 of the datatype message, use the minimum # of bytes required) */ - if (version >= H5O_DTYPE_VERSION_3) + if (version >= H5O_DTYPE_VERSION_3) { + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, offset_nbytes, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, + "ran off end of input buffer while decoding"); UINT32DECODE_VAR(*pp, dt->shared->u.compnd.memb[dt->shared->u.compnd.nmembs].offset, offset_nbytes) - else + } + else { + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 4, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, + "ran off end of input buffer while decoding"); UINT32DECODE(*pp, dt->shared->u.compnd.memb[dt->shared->u.compnd.nmembs].offset) + } /* Older versions of the library allowed a field to have * intrinsic 'arrayness'. Newer versions of the library * use the separate array datatypes. */ if (version == H5O_DTYPE_VERSION_1) { /* Decode the number of dimensions */ + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, + "ran off end of input buffer while decoding"); ndims = *(*pp)++; /* Check that ndims is valid */ @@ -320,18 +406,31 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t HGOTO_ERROR(H5E_DATATYPE, H5E_BADTYPE, FAIL, "invalid number of dimensions for array") } - *pp += 3; /*reserved bytes */ + /* Skip reserved bytes */ + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 3, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, + "ran off end of input buffer while decoding"); + *pp += 3; /* Skip dimension permutation */ + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 4, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, + "ran off end of input buffer while decoding"); *pp += 4; /* Skip reserved bytes */ + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 4, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, + "ran off end of input buffer while decoding"); *pp += 4; /* Decode array dimension sizes */ - for (i = 0; i < 4; i++) + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, (4 * 4), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, + "ran off end of input buffer while decoding"); + for (int i = 0; i < 4; i++) UINT32DECODE(*pp, dim[i]); - } /* end if */ + } /* Allocate space for the field's datatype */ if (NULL == (temp_type = H5T__alloc())) { @@ -341,14 +440,15 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t } /* Decode the field's datatype information */ - if ((can_upgrade = H5O__dtype_decode_helper(ioflags, pp, temp_type)) < 0) { + if ((can_upgrade = H5O__dtype_decode_helper(ioflags, pp, temp_type, skip, p_end)) < 0) { dt->shared->u.compnd.memb[dt->shared->u.compnd.nmembs].name = H5MM_xfree(dt->shared->u.compnd.memb[dt->shared->u.compnd.nmembs].name); if (H5T_close_real(temp_type) < 0) HDONE_ERROR(H5E_DATATYPE, H5E_CANTRELEASE, FAIL, "can't release datatype info") HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDECODE, FAIL, "unable to decode member type") - } /* end if */ - HDassert(temp_type->shared->size > 0); + } + if (temp_type->shared->size == 0) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDECODE, FAIL, "type size can't be zero") /* Upgrade the version if we can and it is necessary */ if (can_upgrade && temp_type->shared->version > version) { @@ -356,7 +456,7 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t /* Pass "can_upgrade" flag down to parent type */ ret_value = TRUE; - } /* end if */ + } /* Go create the array datatype now, for older versions of the datatype message */ if (version == H5O_DTYPE_VERSION_1) { @@ -371,7 +471,7 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t "can't release datatype info") HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to create array datatype") - } /* end if */ + } /* Close the base type for the array */ if (H5T_close_real(temp_type) < 0) { @@ -394,16 +494,15 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t /* Set the return value to indicate that we should freely * upgrade parent types */ ret_value = TRUE; - } /* end else */ - } /* end if */ - } /* end if */ + } + } + } /* Keep track of the maximum member version found */ if (temp_type->shared->version > max_version) max_version = temp_type->shared->version; - /* - * Set the "force conversion" flag if VL datatype fields exist in this + /* Set the "force conversion" flag if VL datatype fields exist in this * type or any component types */ if (temp_type->shared->force_conv == TRUE) @@ -416,29 +515,30 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t /* Set the field datatype (finally :-) */ dt->shared->u.compnd.memb[dt->shared->u.compnd.nmembs].type = temp_type; - /* Check if this field overlaps with a prior field */ - /* (probably indicates that the file is corrupt) */ + /* Check if this field overlaps with a prior field + * (probably indicates that the file is corrupt) + */ if (dt->shared->u.compnd.nmembs > 0 && dt->shared->u.compnd.memb[dt->shared->u.compnd.nmembs].offset < max_memb_pos) { - for (i = 0; i < dt->shared->u.compnd.nmembs; i++) + for (unsigned u = 0; u < dt->shared->u.compnd.nmembs; u++) if ((dt->shared->u.compnd.memb[dt->shared->u.compnd.nmembs].offset >= - dt->shared->u.compnd.memb[i].offset && + dt->shared->u.compnd.memb[u].offset && dt->shared->u.compnd.memb[dt->shared->u.compnd.nmembs].offset < - (dt->shared->u.compnd.memb[i].offset + dt->shared->u.compnd.memb[i].size)) || + (dt->shared->u.compnd.memb[u].offset + dt->shared->u.compnd.memb[u].size)) || (dt->shared->u.compnd.memb[dt->shared->u.compnd.nmembs].offset < - dt->shared->u.compnd.memb[i].offset && + dt->shared->u.compnd.memb[u].offset && (dt->shared->u.compnd.memb[dt->shared->u.compnd.nmembs].offset + dt->shared->u.compnd.memb[dt->shared->u.compnd.nmembs].size) > - dt->shared->u.compnd.memb[i].offset)) + dt->shared->u.compnd.memb[u].offset)) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDECODE, FAIL, "member overlaps with previous member") - } /* end if */ + } /* Update the maximum member position covered */ max_memb_pos = MAX(max_memb_pos, (dt->shared->u.compnd.memb[dt->shared->u.compnd.nmembs].offset + dt->shared->u.compnd.memb[dt->shared->u.compnd.nmembs].size)); - } /* end for */ + } /* Check if the compound type is packed */ H5T__update_packed(dt); @@ -451,14 +551,17 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t /* We won't mark the message dirty since there were no * errors in the file, simply type versions that we will no * longer encode. */ - } /* end if */ + } /* Check that no member of this compound has a version greater * than the compound itself. */ H5O_DTYPE_CHECK_VERSION(dt, version, max_version, ioflags, "compound", FAIL) } break; - case H5T_REFERENCE: /* Reference datatypes... */ + case H5T_REFERENCE: + /* + * Reference datatypes... + */ dt->shared->u.atomic.order = H5T_ORDER_NONE; dt->shared->u.atomic.prec = 8 * dt->shared->size; dt->shared->u.atomic.offset = 0; @@ -501,7 +604,7 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t nmembs = flags & 0xffff; if (NULL == (dt->shared->parent = H5T__alloc())) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "can't allocate parent datatype") - if (H5O__dtype_decode_helper(ioflags, pp, dt->shared->parent) < 0) + if (H5O__dtype_decode_helper(ioflags, pp, dt->shared->parent, skip, p_end) < 0) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDECODE, FAIL, "unable to decode parent datatype") if (dt->shared->parent->shared->size != dt->shared->size) HGOTO_ERROR(H5E_DATATYPE, H5E_BADSIZE, FAIL, "ENUM datatype size does not match parent") @@ -520,37 +623,61 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t /* Names */ for (dt->shared->u.enumer.nmembs = 0; dt->shared->u.enumer.nmembs < nmembs; dt->shared->u.enumer.nmembs++) { + + size_t actual_name_length; /* Actual length of name */ + size_t max = (size_t)(p_end - *pp + 1); /* Max possible name length */ + + actual_name_length = HDstrnlen((const char *)*pp, max); + if (actual_name_length == max) + HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, FAIL, "enum name not null terminated") + + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, actual_name_length, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); if (NULL == (dt->shared->u.enumer.name[dt->shared->u.enumer.nmembs] = H5MM_xstrdup((const char *)*pp))) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTCOPY, FAIL, "can't duplicate enum name string") /* Version 3 of the datatype message eliminated the padding to multiple of 8 bytes */ - if (version >= H5O_DTYPE_VERSION_3) + if (version >= H5O_DTYPE_VERSION_3) { /* Advance past name, including null terminator */ - *pp += HDstrlen((const char *)*pp) + 1; - else + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, actual_name_length + 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, + "ran off end of input buffer while decoding"); + *pp += actual_name_length + 1; + } + else { /* Advance multiple of 8 w/ null terminator */ - *pp += ((HDstrlen((const char *)*pp) + 8) / 8) * 8; - } /* end for */ - HDassert(dt->shared->u.enumer.nmembs == nmembs); + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, ((actual_name_length + 8) / 8) * 8, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, + "ran off end of input buffer while decoding"); + *pp += ((actual_name_length + 8) / 8) * 8; + } + } + if (dt->shared->u.enumer.nmembs != nmembs) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "incorrect number of enum members decoded"); /* Values */ + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, nmembs * dt->shared->parent->shared->size, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); H5MM_memcpy(dt->shared->u.enumer.value, *pp, nmembs * dt->shared->parent->shared->size); *pp += nmembs * dt->shared->parent->shared->size; } break; - case H5T_VLEN: /* Variable length datatypes... */ + case H5T_VLEN: + /* + * Variable length datatypes... + */ /* Set the type of VL information, either sequence or string */ dt->shared->u.vlen.type = (H5T_vlen_type_t)(flags & 0x0f); if (dt->shared->u.vlen.type == H5T_VLEN_STRING) { dt->shared->u.vlen.pad = (H5T_str_t)((flags >> 4) & 0x0f); dt->shared->u.vlen.cset = (H5T_cset_t)((flags >> 8) & 0x0f); - } /* end if */ + } /* Decode base type of VL information */ if (NULL == (dt->shared->parent = H5T__alloc())) HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "memory allocation failed") - if (H5O__dtype_decode_helper(ioflags, pp, dt->shared->parent) < 0) + if (H5O__dtype_decode_helper(ioflags, pp, dt->shared->parent, skip, p_end) < 0) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDECODE, FAIL, "unable to decode VL parent type") /* Check if the parent of this vlen has a version greater than the @@ -565,8 +692,13 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "invalid datatype location") break; - case H5T_ARRAY: /* Array datatypes */ + case H5T_ARRAY: + /* + * Array datatypes... + */ /* Decode the number of dimensions */ + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); dt->shared->u.array.ndims = *(*pp)++; /* Double-check the number of dimensions */ @@ -574,23 +706,32 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t HGOTO_ERROR(H5E_DATATYPE, H5E_CANTLOAD, FAIL, "too many dimensions for array datatype") /* Skip reserved bytes, if version has them */ - if (version < H5O_DTYPE_VERSION_3) + if (version < H5O_DTYPE_VERSION_3) { + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 3, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); *pp += 3; + } /* Decode array dimension sizes & compute number of elements */ - for (i = 0, dt->shared->u.array.nelem = 1; i < (unsigned)dt->shared->u.array.ndims; i++) { - UINT32DECODE(*pp, dt->shared->u.array.dim[i]); - dt->shared->u.array.nelem *= dt->shared->u.array.dim[i]; - } /* end for */ + dt->shared->u.array.nelem = 1; + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, (dt->shared->u.array.ndims * 4), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); + for (unsigned u = 0; u < dt->shared->u.array.ndims; u++) { + UINT32DECODE(*pp, dt->shared->u.array.dim[u]); + dt->shared->u.array.nelem *= dt->shared->u.array.dim[u]; + } /* Skip array dimension permutations, if version has them */ - if (version < H5O_DTYPE_VERSION_3) + if (version < H5O_DTYPE_VERSION_3) { + if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, (dt->shared->u.array.ndims * 4), p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); *pp += dt->shared->u.array.ndims * 4; + } /* Decode base type of array */ if (NULL == (dt->shared->parent = H5T__alloc())) HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "memory allocation failed") - if (H5O__dtype_decode_helper(ioflags, pp, dt->shared->parent) < 0) + if (H5O__dtype_decode_helper(ioflags, pp, dt->shared->parent, skip, p_end) < 0) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDECODE, FAIL, "unable to decode array parent type") /* Check if the parent of this array has a version greater than the @@ -600,8 +741,7 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t /* There should be no array datatypes with version < 2. */ H5O_DTYPE_CHECK_VERSION(dt, version, H5O_DTYPE_VERSION_2, ioflags, "array", FAIL) - /* - * Set the "force conversion" flag if a VL base datatype is used or + /* Set the "force conversion" flag if a VL base datatype is used or * or if any components of the base datatype are VL types. */ if (dt->shared->parent->shared->force_conv == TRUE) @@ -612,7 +752,7 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t case H5T_NCLASSES: default: HGOTO_ERROR(H5E_DATATYPE, H5E_UNSUPPORTED, FAIL, "unknown datatype class found") - } /* end switch */ + } done: /* Cleanup on error */ @@ -1150,27 +1290,36 @@ H5O__dtype_encode_helper(uint8_t **pp, const H5T_t *dt) Pointer to the new message in native order on success, NULL on failure DESCRIPTION This function decodes the "raw" disk form of a simple datatype message - into a struct in memory native format. The struct is allocated within this - function using malloc() and is returned to the caller. + into a struct in memory native format. The struct is allocated within this + function using malloc() and is returned to the caller. --------------------------------------------------------------------------*/ static void * H5O__dtype_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, - unsigned *ioflags /*in,out*/, size_t H5_ATTR_UNUSED p_size, const uint8_t *p) + unsigned *ioflags /*in,out*/, size_t p_size, const uint8_t *p) { - H5T_t *dt = NULL; - void *ret_value = NULL; /* Return value */ + hbool_t skip; + H5T_t *dt = NULL; + const uint8_t *p_end = p + p_size - 1; + void *ret_value = NULL; FUNC_ENTER_PACKAGE - /* check args */ + HDassert(f); HDassert(p); /* Allocate datatype message */ if (NULL == (dt = H5T__alloc())) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") + /* If we are decoding a buffer from H5Tdecode(), we won't have the size + * of the buffer and bounds checking will be impossible. In this case, + * the library will have set p_size to SIZE_MAX and we can use that + * as a signal to skip bounds checking. + */ + skip = (p_size == SIZE_MAX ? TRUE : FALSE); + /* Perform actual decode of message */ - if (H5O__dtype_decode_helper(ioflags, &p, dt) < 0) + if (H5O__dtype_decode_helper(ioflags, &p, dt, skip, p_end) < 0) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDECODE, NULL, "can't decode type") /* Set return value */ diff --git a/test/dtypes.c b/test/dtypes.c index 37fc8c7c243..bd55491b9a7 100644 --- a/test/dtypes.c +++ b/test/dtypes.c @@ -5965,34 +5965,37 @@ opaque_funcs(void) * * Purpose: Tests functions of encoding and decoding datatype. * - * Return: Success: 0 - * - * Failure: number of errors - * - * Programmer: Raymond Lu - * July 14, 2004 - * + * Return: Success: 0 + * Failure: number of errors *------------------------------------------------------------------------- */ static int test_encode(void) { - struct s1 { + struct cmpd { int a; float b; long c; double d; }; - hid_t file = -1, tid1 = -1, tid2 = -1, tid3 = -1; - hid_t decoded_tid1 = -1, decoded_tid2 = -1, decoded_tid3 = -1; + hid_t file = H5I_INVALID_HID; + hid_t tid1 = H5I_INVALID_HID; + hid_t tid2 = H5I_INVALID_HID; + hid_t tid3 = H5I_INVALID_HID; + hid_t decoded_tid1 = H5I_INVALID_HID; + hid_t decoded_tid2 = H5I_INVALID_HID; + hid_t decoded_tid3 = H5I_INVALID_HID; char filename[1024]; - char compnd_type[] = "Compound_type", enum_type[] = "Enum_type"; - char vlstr_type[] = "VLstring_type"; + char compnd_type[] = "Compound_type"; + char enum_type[] = "Enum_type"; + char vlstr_type[] = "VLstring_type"; short enum_val; size_t cmpd_buf_size = 0; size_t enum_buf_size = 0; size_t vlstr_buf_size = 0; - unsigned char *cmpd_buf = NULL, *enum_buf = NULL, *vlstr_buf = NULL; + unsigned char *cmpd_buf = NULL; + unsigned char *enum_buf = NULL; + unsigned char *vlstr_buf = NULL; hid_t ret_id; herr_t ret; @@ -6008,75 +6011,75 @@ test_encode(void) *----------------------------------------------------------------------- */ /* Create a compound datatype */ - if ((tid1 = H5Tcreate(H5T_COMPOUND, sizeof(struct s1))) < 0) { + if ((tid1 = H5Tcreate(H5T_COMPOUND, sizeof(struct cmpd))) < 0) { H5_FAILED(); HDprintf("Can't create datatype!\n"); goto error; - } /* end if */ - if (H5Tinsert(tid1, "a", HOFFSET(struct s1, a), H5T_NATIVE_INT) < 0) { + } + if (H5Tinsert(tid1, "a", HOFFSET(struct cmpd, a), H5T_NATIVE_INT) < 0) { H5_FAILED(); HDprintf("Can't insert field 'a'\n"); goto error; - } /* end if */ - if (H5Tinsert(tid1, "b", HOFFSET(struct s1, b), H5T_NATIVE_FLOAT) < 0) { + } + if (H5Tinsert(tid1, "b", HOFFSET(struct cmpd, b), H5T_NATIVE_FLOAT) < 0) { H5_FAILED(); HDprintf("Can't insert field 'b'\n"); goto error; - } /* end if */ - if (H5Tinsert(tid1, "c", HOFFSET(struct s1, c), H5T_NATIVE_LONG) < 0) { + } + if (H5Tinsert(tid1, "c", HOFFSET(struct cmpd, c), H5T_NATIVE_LONG) < 0) { H5_FAILED(); HDprintf("Can't insert field 'c'\n"); goto error; - } /* end if */ - if (H5Tinsert(tid1, "d", HOFFSET(struct s1, d), H5T_NATIVE_DOUBLE) < 0) { + } + if (H5Tinsert(tid1, "d", HOFFSET(struct cmpd, d), H5T_NATIVE_DOUBLE) < 0) { H5_FAILED(); HDprintf("Can't insert field 'd'\n"); goto error; - } /* end if */ + } /* Create a enumerate datatype */ if ((tid2 = H5Tcreate(H5T_ENUM, sizeof(short))) < 0) { H5_FAILED(); HDprintf("Can't create enumerate type\n"); goto error; - } /* end if */ + } if (H5Tenum_insert(tid2, "RED", (enum_val = 0, &enum_val)) < 0) { H5_FAILED(); HDprintf("Can't insert field into enumeration type\n"); goto error; - } /* end if */ + } if (H5Tenum_insert(tid2, "GREEN", (enum_val = 1, &enum_val)) < 0) { H5_FAILED(); HDprintf("Can't insert field into enumeration type\n"); goto error; - } /* end if */ + } if (H5Tenum_insert(tid2, "BLUE", (enum_val = 2, &enum_val)) < 0) { H5_FAILED(); HDprintf("Can't insert field into enumeration type\n"); goto error; - } /* end if */ + } if (H5Tenum_insert(tid2, "ORANGE", (enum_val = 3, &enum_val)) < 0) { H5_FAILED(); HDprintf("Can't insert field into enumeration type\n"); goto error; - } /* end if */ + } if (H5Tenum_insert(tid2, "YELLOW", (enum_val = 4, &enum_val)) < 0) { H5_FAILED(); HDprintf("Can't insert field into enumeration type\n"); goto error; - } /* end if */ + } /* Create a variable-length string type */ if ((tid3 = H5Tcopy(H5T_C_S1)) < 0) { H5_FAILED(); HDprintf("Can't copy a string type\n"); goto error; - } /* end if */ + } if (H5Tset_size(tid3, H5T_VARIABLE) < 0) { H5_FAILED(); HDprintf("Can't the string type to be variable-length\n"); goto error; - } /* end if */ + } /*----------------------------------------------------------------------- * Test encoding and decoding compound, enumerate, and VL string datatypes @@ -6087,12 +6090,12 @@ test_encode(void) H5_FAILED(); HDprintf("Can't encode compound type\n"); goto error; - } /* end if */ + } if (cmpd_buf_size > 0) cmpd_buf = (unsigned char *)HDcalloc((size_t)1, cmpd_buf_size); - /* Try decoding bogus buffer */ + /* Try decoding an incorrect (empty) buffer (should fail) */ H5E_BEGIN_TRY { ret_id = H5Tdecode(cmpd_buf); @@ -6100,7 +6103,7 @@ test_encode(void) H5E_END_TRY; if (ret_id != FAIL) { H5_FAILED(); - HDprintf("Decoded bogus buffer!\n"); + HDprintf("Decoded an empty buffer!\n"); goto error; } @@ -6108,7 +6111,7 @@ test_encode(void) H5_FAILED(); HDprintf("Can't encode compound type\n"); goto error; - } /* end if */ + } /* Decode from the compound buffer and return an object handle */ if ((decoded_tid1 = H5Tdecode(cmpd_buf)) < 0) @@ -6119,26 +6122,26 @@ test_encode(void) H5_FAILED(); HDprintf("Datatype wasn't encoded & decoded identically\n"); goto error; - } /* end if */ + } /* Query member number and member index by name, for compound type. */ if (H5Tget_nmembers(decoded_tid1) != 4) { H5_FAILED(); HDprintf("Can't get member number\n"); goto error; - } /* end if */ + } if (H5Tget_member_index(decoded_tid1, "c") != 2) { H5_FAILED(); HDprintf("Can't get correct index number\n"); goto error; - } /* end if */ + } /* Encode enumerate type in a buffer */ if (H5Tencode(tid2, NULL, &enum_buf_size) < 0) { H5_FAILED(); HDprintf("Can't encode enumerate type\n"); goto error; - } /* end if */ + } if (enum_buf_size > 0) enum_buf = (unsigned char *)HDcalloc((size_t)1, enum_buf_size); @@ -6147,40 +6150,40 @@ test_encode(void) H5_FAILED(); HDprintf("Can't encode enumerate type\n"); goto error; - } /* end if */ + } /* Decode from the enumerate buffer and return an object handle */ if ((decoded_tid2 = H5Tdecode(enum_buf)) < 0) { H5_FAILED(); HDprintf("Can't decode enumerate type\n"); goto error; - } /* end if */ + } /* Verify that the datatype was copied exactly */ if (H5Tequal(decoded_tid2, tid2) <= 0) { H5_FAILED(); HDprintf("Datatype wasn't encoded & decoded identically\n"); goto error; - } /* end if */ + } /* Query member number and member index by name, for enumeration type. */ if (H5Tget_nmembers(decoded_tid2) != 5) { H5_FAILED(); HDprintf("Can't get member number\n"); goto error; - } /* end if */ + } if (H5Tget_member_index(decoded_tid2, "ORANGE") != 3) { H5_FAILED(); HDprintf("Can't get correct index number\n"); goto error; - } /* end if */ + } /* Encode VL string type in a buffer */ if (H5Tencode(tid3, NULL, &vlstr_buf_size) < 0) { H5_FAILED(); HDprintf("Can't encode VL string type\n"); goto error; - } /* end if */ + } if (vlstr_buf_size > 0) vlstr_buf = (unsigned char *)HDcalloc((size_t)1, vlstr_buf_size); @@ -6189,26 +6192,26 @@ test_encode(void) H5_FAILED(); HDprintf("Can't encode VL string type\n"); goto error; - } /* end if */ + } /* Decode from the VL string buffer and return an object handle */ if ((decoded_tid3 = H5Tdecode(vlstr_buf)) < 0) { H5_FAILED(); HDprintf("Can't decode VL string type\n"); goto error; - } /* end if */ + } /* Verify that the datatype was copied exactly */ if (H5Tequal(decoded_tid3, tid3) <= 0) { H5_FAILED(); HDprintf("Datatype wasn't encoded & decoded identically\n"); goto error; - } /* end if */ + } if (!H5Tis_variable_str(decoded_tid3)) { H5_FAILED(); HDprintf("Datatype wasn't encoded & decoded identically\n"); goto error; - } /* end if */ + } /*----------------------------------------------------------------------- * Commit and reopen the compound, enumerate, VL string datatypes @@ -6219,17 +6222,17 @@ test_encode(void) H5_FAILED(); HDprintf("Can't commit compound datatype\n"); goto error; - } /* end if */ + } if (H5Tclose(tid1) < 0) { H5_FAILED(); HDprintf("Can't close datatype\n"); goto error; - } /* end if */ + } if (H5Tclose(decoded_tid1) < 0) { H5_FAILED(); HDprintf("Can't close datatype\n"); goto error; - } /* end if */ + } HDfree(cmpd_buf); cmpd_buf_size = 0; @@ -6238,17 +6241,17 @@ test_encode(void) H5_FAILED(); HDprintf("Can't commit compound datatype\n"); goto error; - } /* end if */ + } if (H5Tclose(tid2) < 0) { H5_FAILED(); HDprintf("Can't close datatype\n"); goto error; - } /* end if */ + } if (H5Tclose(decoded_tid2) < 0) { H5_FAILED(); HDprintf("Can't close datatype\n"); goto error; - } /* end if */ + } HDfree(enum_buf); enum_buf_size = 0; @@ -6257,17 +6260,17 @@ test_encode(void) H5_FAILED(); HDprintf("Can't commit vl string datatype\n"); goto error; - } /* end if */ + } if (H5Tclose(tid3) < 0) { H5_FAILED(); HDprintf("Can't close datatype\n"); goto error; - } /* end if */ + } if (H5Tclose(decoded_tid3) < 0) { H5_FAILED(); HDprintf("Can't close datatype\n"); goto error; - } /* end if */ + } HDfree(vlstr_buf); vlstr_buf_size = 0; @@ -6288,7 +6291,7 @@ test_encode(void) H5_FAILED(); HDprintf("Can't encode compound type\n"); goto error; - } /* end if */ + } if (cmpd_buf_size > 0) cmpd_buf = (unsigned char *)HDcalloc((size_t)1, cmpd_buf_size); @@ -6297,7 +6300,7 @@ test_encode(void) H5_FAILED(); HDprintf("Can't encode compound type\n"); goto error; - } /* end if */ + } /* Decode from the compound buffer and return an object handle */ if ((decoded_tid1 = H5Tdecode(cmpd_buf)) < 0) @@ -6308,26 +6311,26 @@ test_encode(void) H5_FAILED(); HDprintf("Datatype wasn't encoded & decoded identically\n"); goto error; - } /* end if */ + } /* Query member number and member index by name, for compound type. */ if (H5Tget_nmembers(decoded_tid1) != 4) { H5_FAILED(); HDprintf("Can't get member number\n"); goto error; - } /* end if */ + } if (H5Tget_member_index(decoded_tid1, "c") != 2) { H5_FAILED(); HDprintf("Can't get correct index number\n"); goto error; - } /* end if */ + } /* Encode enumerate type in a buffer */ if (H5Tencode(tid2, NULL, &enum_buf_size) < 0) { H5_FAILED(); HDprintf("Can't encode enumerate type\n"); goto error; - } /* end if */ + } if (enum_buf_size > 0) enum_buf = (unsigned char *)HDcalloc((size_t)1, enum_buf_size); @@ -6336,40 +6339,40 @@ test_encode(void) H5_FAILED(); HDprintf("Can't encode enumerate type\n"); goto error; - } /* end if */ + } /* Decode from the enumerate buffer and return an object handle */ if ((decoded_tid2 = H5Tdecode(enum_buf)) < 0) { H5_FAILED(); HDprintf("Can't decode enumerate type\n"); goto error; - } /* end if */ + } /* Verify that the datatype was copied exactly */ if (H5Tequal(decoded_tid2, tid2) <= 0) { H5_FAILED(); HDprintf("Datatype wasn't encoded & decoded identically\n"); goto error; - } /* end if */ + } /* Query member number and member index by name, for enumeration type. */ if (H5Tget_nmembers(decoded_tid2) != 5) { H5_FAILED(); HDprintf("Can't get member number\n"); goto error; - } /* end if */ + } if (H5Tget_member_index(decoded_tid2, "ORANGE") != 3) { H5_FAILED(); HDprintf("Can't get correct index number\n"); goto error; - } /* end if */ + } /* Encode VL string type in a buffer */ if (H5Tencode(tid3, NULL, &vlstr_buf_size) < 0) { H5_FAILED(); HDprintf("Can't encode VL string type\n"); goto error; - } /* end if */ + } if (vlstr_buf_size > 0) vlstr_buf = (unsigned char *)HDcalloc((size_t)1, vlstr_buf_size); @@ -6378,14 +6381,14 @@ test_encode(void) H5_FAILED(); HDprintf("Can't encode VL string type\n"); goto error; - } /* end if */ + } /* Decode from the VL string buffer and return an object handle */ if ((decoded_tid3 = H5Tdecode(vlstr_buf)) < 0) { H5_FAILED(); HDprintf("Can't decode VL string type\n"); goto error; - } /* end if */ + } HDfree(vlstr_buf); /* Verify that the datatype was copied exactly */ @@ -6393,12 +6396,12 @@ test_encode(void) H5_FAILED(); HDprintf("Datatype wasn't encoded & decoded identically\n"); goto error; - } /* end if */ + } if (!H5Tis_variable_str(decoded_tid3)) { H5_FAILED(); HDprintf("Datatype wasn't encoded & decoded identically\n"); goto error; - } /* end if */ + } /*----------------------------------------------------------------------- * Test the reference count of the decoded datatypes @@ -6410,19 +6413,19 @@ test_encode(void) H5_FAILED(); HDprintf("Decoded datatype has incorrect reference count\n"); goto error; - } /* end if */ + } if (H5Iget_ref(decoded_tid2) != 1) { H5_FAILED(); HDprintf("Decoded datatype has incorrect reference count\n"); goto error; - } /* end if */ + } if (H5Iget_ref(decoded_tid3) != 1) { H5_FAILED(); HDprintf("Decoded datatype has incorrect reference count\n"); goto error; - } /* end if */ + } /* Make sure the reference counts for the decoded datatypes can be * decremented and the datatypes are closed. */ @@ -6430,19 +6433,19 @@ test_encode(void) H5_FAILED(); HDprintf("Decoded datatype can't close\n"); goto error; - } /* end if */ + } if (H5Idec_ref(decoded_tid2) != 0) { H5_FAILED(); HDprintf("Decoded datatype can't close\n"); goto error; - } /* end if */ + } if (H5Idec_ref(decoded_tid3) != 0) { H5_FAILED(); HDprintf("Decoded datatype can't close\n"); goto error; - } /* end if */ + } /* Make sure the decoded datatypes are already closed. */ H5E_BEGIN_TRY @@ -6487,23 +6490,23 @@ test_encode(void) H5_FAILED(); HDprintf("Can't close datatype\n"); goto error; - } /* end if */ + } if (H5Tclose(tid2) < 0) { H5_FAILED(); HDprintf("Can't close datatype\n"); goto error; - } /* end if */ + } if (H5Tclose(tid3) < 0) { H5_FAILED(); HDprintf("Can't close datatype\n"); goto error; - } /* end if */ + } if (H5Fclose(file) < 0) { H5_FAILED(); HDprintf("Can't close file\n"); goto error; - } /* end if */ + } HDfree(cmpd_buf); HDfree(enum_buf); From 1e1abd74490b97a7bd52f6a327f92cdfbdfd713b Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Tue, 25 Apr 2023 10:03:41 -0700 Subject: [PATCH 162/231] Harden the v1 B-tree and local heap cache clients (#2803) * Hardens v1 B-tree deserialize function * Harden the H5HL deserialize functionality --- src/H5Bcache.c | 83 +++++++++++++++++++++--------------------------- src/H5Bpkg.h | 22 ++++++------- src/H5Bprivate.h | 17 +++++----- src/H5HLcache.c | 64 ++++++++++++++++++++----------------- 4 files changed, 90 insertions(+), 96 deletions(-) diff --git a/src/H5Bcache.c b/src/H5Bcache.c index cd0a0ba3454..437bc1b43b7 100644 --- a/src/H5Bcache.c +++ b/src/H5Bcache.c @@ -13,10 +13,8 @@ /*------------------------------------------------------------------------- * * Created: H5Bcache.c - * Oct 31 2005 - * Quincey Koziol * - * Purpose: Implement B-tree metadata cache methods. + * Purpose: Implement B-tree metadata cache methods * *------------------------------------------------------------------------- */ @@ -83,13 +81,9 @@ const H5AC_class_t H5AC_BT[1] = {{ /*------------------------------------------------------------------------- * Function: H5B__cache_get_initial_load_size * - * Purpose: Compute the size of the data structure on disk. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * May 18, 2010 + * Purpose: Compute the size of the data structure on disk * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t @@ -117,24 +111,20 @@ H5B__cache_get_initial_load_size(void *_udata, size_t *image_len) /*------------------------------------------------------------------------- * Function: H5B__cache_deserialize * - * Purpose: Deserialize the data structure from disk. - * - * Return: Success: Pointer to a new B-tree node. - * Failure: NULL - * - * Programmer: Quincey Koziol - * Mar 24, 2008 + * Purpose: Deserialize the data structure from disk * + * Return: Success: Pointer to a new B-tree node + * Failure: NULL *------------------------------------------------------------------------- */ static void * -H5B__cache_deserialize(const void *_image, size_t H5_ATTR_UNUSED len, void *_udata, - hbool_t H5_ATTR_UNUSED *dirty) +H5B__cache_deserialize(const void *_image, size_t len, void *_udata, hbool_t H5_ATTR_UNUSED *dirty) { H5B_t *bt = NULL; /* Pointer to the deserialized B-tree node */ H5B_cache_ud_t *udata = (H5B_cache_ud_t *)_udata; /* User data for callback */ H5B_shared_t *shared; /* Pointer to shared B-tree info */ const uint8_t *image = (const uint8_t *)_image; /* Pointer into image buffer */ + const uint8_t *p_end = image + len - 1; /* End of image buffer */ uint8_t *native; /* Pointer to native keys */ unsigned u; /* Local index variable */ H5B_t *ret_value = NULL; /* Return value */ @@ -156,7 +146,8 @@ H5B__cache_deserialize(const void *_image, size_t H5_ATTR_UNUSED len, void *_uda /* Get a pointer to the shared info, for convenience */ shared = (H5B_shared_t *)H5UC_GET_OBJ(bt->rc_shared); - HDassert(shared); + if (NULL == shared) + HGOTO_ERROR(H5E_BTREE, H5E_CANTGET, NULL, "can't get a pointer to shared data") /* Allocate space for the native keys and child addresses */ if (NULL == (bt->native = H5FL_BLK_MALLOC(native_block, shared->sizeof_keys))) @@ -164,49 +155,61 @@ H5B__cache_deserialize(const void *_image, size_t H5_ATTR_UNUSED len, void *_uda if (NULL == (bt->child = H5FL_SEQ_MALLOC(haddr_t, (size_t)shared->two_k))) HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, NULL, "can't allocate buffer for child addresses") - /* magic number */ + /* Magic number */ + if (H5_IS_BUFFER_OVERFLOW(image, H5_SIZEOF_MAGIC, p_end)) + HGOTO_ERROR(H5E_BTREE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); if (HDmemcmp(image, H5B_MAGIC, (size_t)H5_SIZEOF_MAGIC) != 0) HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, NULL, "wrong B-tree signature") image += H5_SIZEOF_MAGIC; - /* node type and level */ + /* Node type and level */ + if (H5_IS_BUFFER_OVERFLOW(image, 2, p_end)) + HGOTO_ERROR(H5E_BTREE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); if (*image++ != (uint8_t)udata->type->id) HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, NULL, "incorrect B-tree node type") bt->level = *image++; - /* entries used */ + /* Entries used */ + if (H5_IS_BUFFER_OVERFLOW(image, 2, p_end)) + HGOTO_ERROR(H5E_BTREE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); UINT16DECODE(image, bt->nchildren); /* Check if bt->nchildren is greater than two_k */ if (bt->nchildren > shared->two_k) HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, NULL, "number of children is greater than maximum") - /* sibling pointers */ + /* Sibling pointers */ + if (H5_IS_BUFFER_OVERFLOW(image, H5F_sizeof_addr(udata->f), p_end)) + HGOTO_ERROR(H5E_BTREE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); H5F_addr_decode(udata->f, (const uint8_t **)&image, &(bt->left)); + + if (H5_IS_BUFFER_OVERFLOW(image, H5F_sizeof_addr(udata->f), p_end)) + HGOTO_ERROR(H5E_BTREE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); H5F_addr_decode(udata->f, (const uint8_t **)&image, &(bt->right)); - /* the child/key pairs */ + /* Child/key pairs */ native = bt->native; for (u = 0; u < bt->nchildren; u++) { /* Decode native key value */ + if (H5_IS_BUFFER_OVERFLOW(image, shared->sizeof_rkey, p_end)) + HGOTO_ERROR(H5E_BTREE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); if ((udata->type->decode)(shared, image, native) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTDECODE, NULL, "unable to decode key") image += shared->sizeof_rkey; native += udata->type->sizeof_nkey; /* Decode address value */ + if (H5_IS_BUFFER_OVERFLOW(image, H5F_sizeof_addr(udata->f), p_end)) + HGOTO_ERROR(H5E_BTREE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); H5F_addr_decode(udata->f, (const uint8_t **)&image, bt->child + u); - } /* end for */ + } - /* Decode final key */ + /* Final key */ if (bt->nchildren > 0) { /* Decode native key value */ if ((udata->type->decode)(shared, image, native) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTDECODE, NULL, "unable to decode key") - } /* end if */ - - /* Sanity check */ - HDassert((size_t)((const uint8_t *)image - (const uint8_t *)_image) <= len); + } /* Set return value */ ret_value = bt; @@ -224,11 +227,7 @@ H5B__cache_deserialize(const void *_image, size_t H5_ATTR_UNUSED len, void *_uda * * Purpose: Compute the size of the data structure on disk. * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * May 20, 2010 - * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t @@ -256,13 +255,9 @@ H5B__cache_image_len(const void *_thing, size_t *image_len) /*------------------------------------------------------------------------- * Function: H5B__cache_serialize * - * Purpose: Serialize the data structure for writing to disk. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * Mar 24, 2008 + * Purpose: Serialize the data structure for writing to disk * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t @@ -341,11 +336,7 @@ H5B__cache_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNUSED len, vo * * Purpose: Destroy/release an "in core representation" of a data structure * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * Mar 26, 2008 - * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t diff --git a/src/H5Bpkg.h b/src/H5Bpkg.h index ef9f56e50d4..f50bd48ac2e 100644 --- a/src/H5Bpkg.h +++ b/src/H5Bpkg.h @@ -44,21 +44,21 @@ /* The B-tree node as stored in memory... */ typedef struct H5B_t { H5AC_info_t cache_info; /* Information for H5AC cache functions */ - /* _must_ be first field in structure */ - H5UC_t *rc_shared; /*ref-counted shared info */ - unsigned level; /*node level */ - unsigned nchildren; /*number of child pointers */ - haddr_t left; /*address of left sibling */ - haddr_t right; /*address of right sibling */ - uint8_t *native; /*array of keys in native format */ - haddr_t *child; /*2k child pointers */ + /* MUST be first field in structure */ + H5UC_t *rc_shared; /* Ref-counted shared info */ + unsigned level; /* Node level */ + unsigned nchildren; /* Number of child pointers */ + haddr_t left; /* Address of left sibling */ + haddr_t right; /* Address of right sibling */ + uint8_t *native; /* Array of keys in native format */ + haddr_t *child; /* 2k child pointers */ } H5B_t; /* Callback info for loading a B-tree node into the cache */ typedef struct H5B_cache_ud_t { - H5F_t *f; /* File that B-tree node is within */ - const struct H5B_class_t *type; /* Type of tree */ - H5UC_t *rc_shared; /* Ref-counted shared info */ + H5F_t *f; /* File that B-tree node is within */ + const struct H5B_class_t *type; /* Type of tree */ + H5UC_t *rc_shared; /* Ref-counted shared info */ } H5B_cache_ud_t; /*****************************/ diff --git a/src/H5Bprivate.h b/src/H5Bprivate.h index 49e400c81c6..0017c437952 100644 --- a/src/H5Bprivate.h +++ b/src/H5Bprivate.h @@ -82,16 +82,16 @@ typedef int (*H5B_operator_t)(H5F_t *f, const void *_lt_key, haddr_t addr, const * the instances of nodes in that B-tree. */ typedef struct H5B_shared_t { - const struct H5B_class_t *type; /* Type of tree */ - unsigned two_k; /* 2*"K" value for tree's nodes */ - size_t sizeof_rkey; /* Size of raw (disk) key */ - size_t sizeof_rnode; /* Size of raw (disk) node */ - size_t sizeof_keys; /* Size of native (memory) key node */ - size_t sizeof_addr; /* Size of file address (in bytes) */ - size_t sizeof_len; /* Size of file lengths (in bytes) */ + const struct H5B_class_t *type; /* Type of tree */ + unsigned two_k; /* 2*"K" value for tree's nodes */ + size_t sizeof_rkey; /* Size of raw (disk) key */ + size_t sizeof_rnode; /* Size of raw (disk) node */ + size_t sizeof_keys; /* Size of native (memory) key node */ + size_t sizeof_addr; /* Size of file address (in bytes) */ + size_t sizeof_len; /* Size of file lengths (in bytes) */ uint8_t *page; /* Disk page */ size_t *nkey; /* Offsets of each native key in native key buffer */ - void *udata; /* 'Local' info for a B-tree */ + void *udata; /* 'Local' info for a B-tree */ } H5B_shared_t; /* @@ -101,7 +101,6 @@ typedef struct H5B_shared_t { * has an array of K values indexed by the `id' class field below. The * array is initialized with the HDF5_BTREE_K_DEFAULT macro. */ - typedef struct H5B_class_t { H5B_subid_t id; /*id as found in file*/ size_t sizeof_nkey; /*size of native (memory) key*/ diff --git a/src/H5HLcache.c b/src/H5HLcache.c index 72af9b45594..c04efb6b3ad 100644 --- a/src/H5HLcache.c +++ b/src/H5HLcache.c @@ -13,10 +13,8 @@ /*------------------------------------------------------------------------- * * Created: H5HLcache.c - * Feb 5 2008 - * Quincey Koziol * - * Purpose: Implement local heap metadata cache methods. + * Purpose: Implement local heap metadata cache methods * *------------------------------------------------------------------------- */ @@ -81,7 +79,8 @@ static herr_t H5HL__cache_datablock_notify(H5C_notify_action_t action, void *_th static herr_t H5HL__cache_datablock_free_icr(void *thing); /* Header deserialization */ -static herr_t H5HL__hdr_deserialize(H5HL_t *heap, const uint8_t *image, H5HL_cache_prfx_ud_t *udata); +static herr_t H5HL__hdr_deserialize(H5HL_t *heap, const uint8_t *image, size_t len, + H5HL_cache_prfx_ud_t *udata); /* Free list de/serialization */ static herr_t H5HL__fl_deserialize(H5HL_t *heap); @@ -137,38 +136,39 @@ const H5AC_class_t H5AC_LHEAP_DBLK[1] = {{ /*------------------------------------------------------------------------- * Function: H5HL__hdr_deserialize() * - * Purpose: Decode a local heap's header - * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Programmer: Quincey Koziol - * December 15, 2016 + * Purpose: Decode a local heap's header * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t -H5HL__hdr_deserialize(H5HL_t *heap, const uint8_t *image, H5HL_cache_prfx_ud_t *udata) +H5HL__hdr_deserialize(H5HL_t *heap, const uint8_t *image, size_t len, H5HL_cache_prfx_ud_t *udata) { - herr_t ret_value = SUCCEED; /* Return value */ + const uint8_t *p_end = image + len - 1; /* End of image buffer */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - /* Sanity checks */ HDassert(heap); HDassert(image); HDassert(udata); - /* Check magic number */ + /* Magic number */ + if (H5_IS_BUFFER_OVERFLOW(image, H5_SIZEOF_MAGIC, p_end)) + HGOTO_ERROR(H5E_HEAP, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); if (HDmemcmp(image, H5HL_MAGIC, (size_t)H5_SIZEOF_MAGIC) != 0) HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "bad local heap signature") image += H5_SIZEOF_MAGIC; /* Version */ + if (H5_IS_BUFFER_OVERFLOW(image, 1, p_end)) + HGOTO_ERROR(H5E_HEAP, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); if (H5HL_VERSION != *image++) HGOTO_ERROR(H5E_HEAP, H5E_VERSION, FAIL, "wrong version number in local heap") /* Reserved */ + if (H5_IS_BUFFER_OVERFLOW(image, 3, p_end)) + HGOTO_ERROR(H5E_HEAP, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); image += 3; /* Store the prefix's address & length */ @@ -176,14 +176,20 @@ H5HL__hdr_deserialize(H5HL_t *heap, const uint8_t *image, H5HL_cache_prfx_ud_t * heap->prfx_size = udata->sizeof_prfx; /* Heap data size */ + if (H5_IS_BUFFER_OVERFLOW(image, udata->sizeof_size, p_end)) + HGOTO_ERROR(H5E_HEAP, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); H5F_DECODE_LENGTH_LEN(image, heap->dblk_size, udata->sizeof_size); /* Free list head */ + if (H5_IS_BUFFER_OVERFLOW(image, udata->sizeof_size, p_end)) + HGOTO_ERROR(H5E_HEAP, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); H5F_DECODE_LENGTH_LEN(image, heap->free_block, udata->sizeof_size); if (heap->free_block != H5HL_FREE_NULL && heap->free_block >= heap->dblk_size) HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "bad heap free list") /* Heap data address */ + if (H5_IS_BUFFER_OVERFLOW(image, udata->sizeof_addr, p_end)) + HGOTO_ERROR(H5E_HEAP, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); H5F_addr_decode_len(udata->sizeof_addr, &image, &(heap->dblk_addr)); done: @@ -344,8 +350,7 @@ H5HL__cache_prefix_get_initial_load_size(void H5_ATTR_UNUSED *_udata, size_t *im *------------------------------------------------------------------------- */ static herr_t -H5HL__cache_prefix_get_final_load_size(const void *_image, size_t H5_ATTR_NDEBUG_UNUSED image_len, - void *_udata, size_t *actual_len) +H5HL__cache_prefix_get_final_load_size(const void *_image, size_t image_len, void *_udata, size_t *actual_len) { const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */ H5HL_cache_prfx_ud_t *udata = (H5HL_cache_prfx_ud_t *)_udata; /* User data for callback */ @@ -363,7 +368,7 @@ H5HL__cache_prefix_get_final_load_size(const void *_image, size_t H5_ATTR_NDEBUG HDmemset(&heap, 0, sizeof(H5HL_t)); /* Deserialize the heap's header */ - if (H5HL__hdr_deserialize(&heap, (const uint8_t *)image, udata) < 0) + if (H5HL__hdr_deserialize(&heap, (const uint8_t *)image, image_len, udata) < 0) HGOTO_ERROR(H5E_HEAP, H5E_CANTDECODE, FAIL, "can't decode local heap header") /* Set the final size for the cache image */ @@ -383,25 +388,22 @@ H5HL__cache_prefix_get_final_load_size(const void *_image, size_t H5_ATTR_NDEBUG /*------------------------------------------------------------------------- * Function: H5HL__cache_prefix_deserialize * - * Purpose: Given a buffer containing the on disk image of the local - * heap prefix, deserialize it, load its contents into a newly allocated - * instance of H5HL_prfx_t, and return a pointer to the new instance. - * - * Return: Success: Pointer to in core representation - * Failure: NULL - * - * Programmer: John Mainzer - * 6/21/14 + * Purpose: Given a buffer containing the on disk image of the local + * heap prefix, deserialize it, load its contents into a newly + * allocated instance of H5HL_prfx_t, and return a pointer to + * the new instance. * + * Return: Success: Pointer to in core representation + * Failure: NULL *------------------------------------------------------------------------- */ static void * -H5HL__cache_prefix_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUSED len, void *_udata, - hbool_t H5_ATTR_UNUSED *dirty) +H5HL__cache_prefix_deserialize(const void *_image, size_t len, void *_udata, hbool_t H5_ATTR_UNUSED *dirty) { H5HL_t *heap = NULL; /* Local heap */ H5HL_prfx_t *prfx = NULL; /* Heap prefix deserialized */ const uint8_t *image = (const uint8_t *)_image; /* Pointer into decoding buffer */ + const uint8_t *p_end = image + len - 1; /* End of image buffer */ H5HL_cache_prfx_ud_t *udata = (H5HL_cache_prfx_ud_t *)_udata; /* User data for callback */ void *ret_value = NULL; /* Return value */ @@ -422,7 +424,7 @@ H5HL__cache_prefix_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUSED HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, NULL, "can't allocate local heap structure"); /* Deserialize the heap's header */ - if (H5HL__hdr_deserialize(heap, (const uint8_t *)image, udata) < 0) + if (H5HL__hdr_deserialize(heap, (const uint8_t *)image, len, udata) < 0) HGOTO_ERROR(H5E_HEAP, H5E_CANTDECODE, NULL, "can't decode local heap header") /* Allocate the heap prefix */ @@ -446,6 +448,8 @@ H5HL__cache_prefix_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUSED image = ((const uint8_t *)_image) + heap->prfx_size; /* Copy the heap data from the speculative read buffer */ + if (H5_IS_BUFFER_OVERFLOW(image, heap->dblk_size, p_end)) + HGOTO_ERROR(H5E_HEAP, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); H5MM_memcpy(heap->dblk_image, image, heap->dblk_size); /* Build free list */ From aeb02063fb4ff87b10267ca9ecd3debd6716b6cd Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Tue, 25 Apr 2023 10:03:57 -0700 Subject: [PATCH 163/231] Sanitize H5HG cache deserialization code (#2808) --- src/H5HGcache.c | 291 +++++++++++++++++++++++++----------------------- src/H5HGpkg.h | 35 +++--- 2 files changed, 173 insertions(+), 153 deletions(-) diff --git a/src/H5HGcache.c b/src/H5HGcache.c index 235a990eed2..bbfae7cdee9 100644 --- a/src/H5HGcache.c +++ b/src/H5HGcache.c @@ -13,10 +13,8 @@ /*------------------------------------------------------------------------- * * Created: H5HGcache.c - * Feb 5 2008 - * Quincey Koziol * - * Purpose: Implement global heap metadata cache methods. + * Purpose: Implement global heap metadata cache methods * *------------------------------------------------------------------------- */ @@ -30,12 +28,12 @@ /***********/ /* Headers */ /***********/ -#include "H5private.h" /* Generic Functions */ -#include "H5Eprivate.h" /* Error handling */ -#include "H5Fprivate.h" /* File access */ -#include "H5HGpkg.h" /* Global heaps */ -#include "H5MFprivate.h" /* File memory management */ -#include "H5MMprivate.h" /* Memory management */ +#include "H5private.h" /* Generic Functions */ +#include "H5Eprivate.h" /* Error handling */ +#include "H5Fprivate.h" /* File access */ +#include "H5HGpkg.h" /* Global heaps */ +#include "H5MFprivate.h" /* File memory management */ +#include "H5MMprivate.h" /* Memory management */ /****************/ /* Local Macros */ @@ -63,7 +61,7 @@ static herr_t H5HG__cache_heap_serialize(const H5F_t *f, void *image, size_t len static herr_t H5HG__cache_heap_free_icr(void *thing); /* Prefix deserialization */ -static herr_t H5HG__hdr_deserialize(H5HG_heap_t *heap, const uint8_t *image, const H5F_t *f); +static herr_t H5HG__hdr_deserialize(H5HG_heap_t *heap, const uint8_t *image, size_t len, const H5F_t *f); /*********************/ /* Package Variables */ @@ -96,65 +94,64 @@ const H5AC_class_t H5AC_GHEAP[1] = {{ /*******************/ /*------------------------------------------------------------------------- - * Function: H5HG__hdr_deserialize() + * Function: H5HG__hdr_deserialize * - * Purpose: Decode a global heap's header - * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Programmer: Quincey Koziol - * December 15, 2016 + * Purpose: Decode a global heap's header * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t -H5HG__hdr_deserialize(H5HG_heap_t *heap, const uint8_t *image, const H5F_t *f) +H5HG__hdr_deserialize(H5HG_heap_t *heap, const uint8_t *image, size_t len, const H5F_t *f) { - herr_t ret_value = SUCCEED; /* Return value */ + const uint8_t *p_end = image + len - 1; /* End of image buffer */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - /* Sanity check */ HDassert(heap); HDassert(image); HDassert(f); /* Magic number */ + if (H5_IS_BUFFER_OVERFLOW(image, H5_SIZEOF_MAGIC, p_end)) + HGOTO_ERROR(H5E_HEAP, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); if (HDmemcmp(image, H5HG_MAGIC, (size_t)H5_SIZEOF_MAGIC) != 0) HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "bad global heap collection signature") image += H5_SIZEOF_MAGIC; /* Version */ + if (H5_IS_BUFFER_OVERFLOW(image, 1, p_end)) + HGOTO_ERROR(H5E_HEAP, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); if (H5HG_VERSION != *image++) HGOTO_ERROR(H5E_HEAP, H5E_VERSION, FAIL, "wrong version number in global heap") /* Reserved */ + if (H5_IS_BUFFER_OVERFLOW(image, 3, p_end)) + HGOTO_ERROR(H5E_HEAP, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); image += 3; /* Size */ + if (H5_IS_BUFFER_OVERFLOW(image, H5F_sizeof_size(f), p_end)) + HGOTO_ERROR(H5E_HEAP, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); H5F_DECODE_LENGTH(f, image, heap->size); - HDassert(heap->size >= H5HG_MINSIZE); + if (heap->size < H5HG_MINSIZE) + HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "global heap size is too small"); done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5HG__hdr_deserialize() */ /*------------------------------------------------------------------------- - * Function: H5HG__cache_heap_get_initial_load_size() - * - * Purpose: Return the initial speculative read size to the metadata - * cache. This size will be used in the initial attempt to read - * the global heap. If this read is too small, the cache will - * try again with the correct value obtained from - * H5HG__cache_get_final_load_size(). + * Function: H5HG__cache_heap_get_initial_load_size * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Programmer: John Mainzer - * 7/27/14 + * Purpose: Return the initial speculative read size to the metadata + * cache. This size will be used in the initial attempt to read + * the global heap. If this read is too small, the cache will + * try again with the correct value obtained from + * H5HG__cache_get_final_load_size(). * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t @@ -162,39 +159,30 @@ H5HG__cache_heap_get_initial_load_size(void H5_ATTR_UNUSED *_udata, size_t *imag { FUNC_ENTER_PACKAGE_NOERR - /* Sanity check */ HDassert(image_len); - /* Set the image length size */ - *image_len = (size_t)H5HG_MINSIZE; + *image_len = H5HG_MINSIZE; FUNC_LEAVE_NOAPI(SUCCEED) } /* end H5HG__cache_heap_get_initial_load_size() */ /*------------------------------------------------------------------------- - * Function: H5HG__cache_heap_get_initial_load_size() - * - * Purpose: Return the final read size for a speculatively ready heap to - * the metadata cache. + * Function: H5HG__cache_heap_get_final_load_size * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Programmer: Quincey Koziol - * November 18, 2016 + * Purpose: Return the final read size for a speculatively ready heap to + * the metadata cache. * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t -H5HG__cache_heap_get_final_load_size(const void *image, size_t H5_ATTR_NDEBUG_UNUSED image_len, void *udata, - size_t *actual_len) +H5HG__cache_heap_get_final_load_size(const void *image, size_t image_len, void *udata, size_t *actual_len) { - H5HG_heap_t heap; /* Global heap */ - herr_t ret_value = SUCCEED; /* Return value */ + H5HG_heap_t heap; + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - /* Sanity check */ HDassert(image); HDassert(udata); HDassert(actual_len); @@ -202,10 +190,10 @@ H5HG__cache_heap_get_final_load_size(const void *image, size_t H5_ATTR_NDEBUG_UN HDassert(image_len == H5HG_MINSIZE); /* Deserialize the heap's header */ - if (H5HG__hdr_deserialize(&heap, (const uint8_t *)image, (const H5F_t *)udata) < 0) + if (H5HG__hdr_deserialize(&heap, (const uint8_t *)image, image_len, (const H5F_t *)udata) < 0) HGOTO_ERROR(H5E_HEAP, H5E_CANTDECODE, FAIL, "can't decode global heap prefix") - /* Set the final size for the cache image */ + /* Set the actual global heap size */ *actual_len = heap.size; done: @@ -215,31 +203,28 @@ H5HG__cache_heap_get_final_load_size(const void *image, size_t H5_ATTR_NDEBUG_UN /*------------------------------------------------------------------------- * Function: H5HG__cache_heap_deserialize * - * Purpose: Given a buffer containing the on disk image of the global - * heap, deserialize it, load its contents into a newly allocated - * instance of H5HG_heap_t, and return a pointer to the new instance. - * - * Return: Success: Pointer to in core representation - * Failure: NULL - * - * Programmer: John Mainzer - * 7/27/14 + * Purpose: Given a buffer containing the on disk image of the global + * heap, deserialize it, load its contents into a newly allocated + * instance of H5HG_heap_t, and return a pointer to the new + * instance. * + * Return: Success: Pointer to a new global heap + * Failure: NULL *------------------------------------------------------------------------- */ static void * H5HG__cache_heap_deserialize(const void *_image, size_t len, void *_udata, hbool_t H5_ATTR_UNUSED *dirty) { - H5F_t *f = (H5F_t *)_udata; /* File pointer -- obtained from user data */ - H5HG_heap_t *heap = NULL; /* New global heap */ - uint8_t *image; /* Pointer to image to decode */ - size_t max_idx = 0; /* Maximum heap object index seen */ - size_t nalloc; /* Number of objects allocated */ - void *ret_value = NULL; /* Return value */ + H5F_t *f = (H5F_t *)_udata; /* File pointer */ + H5HG_heap_t *heap = NULL; /* New global heap */ + uint8_t *p = NULL; /* Pointer to objects in (copied) image buffer */ + const uint8_t *p_end = NULL; /* End of (copied) image buffer */ + size_t max_idx = 0; /* Maximum heap object index seen */ + size_t nalloc = 0; /* Number of objects allocated */ + void *ret_value = NULL; FUNC_ENTER_PACKAGE - /* Sanity checks */ HDassert(_image); HDassert(len >= (size_t)H5HG_MINSIZE); HDassert(f); @@ -252,15 +237,28 @@ H5HG__cache_heap_deserialize(const void *_image, size_t len, void *_udata, hbool if (NULL == (heap->chunk = H5FL_BLK_MALLOC(gheap_chunk, len))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") - /* Copy the image buffer into the newly allocate chunk */ + /* Copy the image buffer into the newly allocated chunk */ H5MM_memcpy(heap->chunk, _image, len); + /* Set p_end + * + * Note that parsing moves along p / heap->chunk, so p_end + * has to refer to the end of that buffer and NOT _image + */ + p_end = heap->chunk + len - 1; + /* Deserialize the heap's header */ - if (H5HG__hdr_deserialize(heap, (const uint8_t *)heap->chunk, f) < 0) + if (H5_IS_BUFFER_OVERFLOW(heap->chunk, H5HG_SIZEOF_HDR(f), p_end)) + HGOTO_ERROR(H5E_HEAP, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + if (H5HG__hdr_deserialize(heap, (const uint8_t *)heap->chunk, len, f) < 0) HGOTO_ERROR(H5E_HEAP, H5E_CANTDECODE, NULL, "can't decode global heap header") /* Decode each object */ - image = heap->chunk + H5HG_SIZEOF_HDR(f); + + /* Set the p pointer to the objects in heap->chunk */ + p = heap->chunk + H5HG_SIZEOF_HDR(f); + + /* Set the number of allocated objects */ nalloc = H5HG_NOBJS(f, heap->size); /* Calloc the obj array because the file format spec makes no guarantee @@ -270,32 +268,43 @@ H5HG__cache_heap_deserialize(const void *_image, size_t len, void *_udata, hbool HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") heap->nalloc = nalloc; - while (image < (heap->chunk + heap->size)) { - if ((image + H5HG_SIZEOF_OBJHDR(f)) > (heap->chunk + heap->size)) { - /* - * The last bit of space is too tiny for an object header, so + while (p < (heap->chunk + heap->size)) { + + if ((p + H5HG_SIZEOF_OBJHDR(f)) > (heap->chunk + heap->size)) { + + /* The last bit of space is too tiny for an object header, so * we assume that it's free space. */ - HDassert(NULL == heap->obj[0].begin); - heap->obj[0].size = (size_t)(((const uint8_t *)heap->chunk + heap->size) - image); - heap->obj[0].begin = image; - image += heap->obj[0].size; - } /* end if */ + if (NULL != heap->obj[0].begin) + HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "object 0 should not be set"); + heap->obj[0].size = (size_t)(((const uint8_t *)heap->chunk + heap->size) - p); + heap->obj[0].begin = p; + + /* No buffer overflow check here since this just moves the pointer + * to the end of the buffer, which was calculated above + */ + p += heap->obj[0].size; + } else { - size_t need = 0; - unsigned idx; - uint8_t *begin = image; + size_t need = 0; /* # bytes needed to store the object */ + unsigned idx; /* Heap object index */ + uint8_t *begin = p; /* Pointer to start of object */ - UINT16DECODE(image, idx); + /* Parse a normal heap entry */ + + if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) + HGOTO_ERROR(H5E_HEAP, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + UINT16DECODE(p, idx); /* Check if we need more room to store heap objects */ if (idx >= heap->nalloc) { size_t new_alloc; /* New allocation number */ - H5HG_obj_t *new_obj; /* New array of object descriptions */ + H5HG_obj_t *new_obj; /* New array of object descriptions */ /* Determine the new number of objects to index */ new_alloc = MAX(heap->nalloc * 2, (idx + 1)); - HDassert(idx < new_alloc); + if (idx >= new_alloc) + HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "inappropriate heap index") /* Reallocate array of objects */ if (NULL == (new_obj = H5FL_SEQ_REALLOC(H5HG_obj_t, heap->obj, new_alloc))) @@ -307,16 +316,32 @@ H5HG__cache_heap_deserialize(const void *_image, size_t len, void *_udata, hbool /* Update heap information */ heap->nalloc = new_alloc; heap->obj = new_obj; - HDassert(heap->nalloc > heap->nused); - } /* end if */ - - UINT16DECODE(image, heap->obj[idx].nrefs); - image += 4; /*reserved*/ - H5F_DECODE_LENGTH(f, image, heap->obj[idx].size); + if (heap->nalloc <= heap->nused) + HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "inappropriate # allocated slots") + } + + /* Number of references */ + if (H5_IS_BUFFER_OVERFLOW(p, 2, p_end)) + HGOTO_ERROR(H5E_HEAP, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + UINT16DECODE(p, heap->obj[idx].nrefs); + + /* Reserved bytes */ + if (H5_IS_BUFFER_OVERFLOW(p, 4, p_end)) + HGOTO_ERROR(H5E_HEAP, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + p += 4; + + /* Object length */ + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_size(f), p_end)) + HGOTO_ERROR(H5E_HEAP, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + H5F_DECODE_LENGTH(f, p, heap->obj[idx].size); + + /* Object + * + * Points to beginning of object, INCLUDING the header. + */ heap->obj[idx].begin = begin; - /* - * The total storage size includes the size of the object + /* The total storage size includes the size of the object * header and is zero padded so the next object header is * properly aligned. The entire obj array was calloc'ed, * so no need to zero the space here. The last bit of space @@ -327,26 +352,33 @@ H5HG__cache_heap_deserialize(const void *_image, size_t len, void *_udata, hbool need = H5HG_SIZEOF_OBJHDR(f) + H5HG_ALIGN(heap->obj[idx].size); if (idx > max_idx) max_idx = idx; - } /* end if */ + } else need = heap->obj[idx].size; - image = begin + need; - } /* end else */ - } /* end while */ - - /* Sanity checks */ - HDassert(image == heap->chunk + heap->size); - HDassert(H5HG_ISALIGNED(heap->obj[0].size)); - - /* Set the next index value to use */ + /* Make sure the extra padding doesn't cause us to overrun + * the buffer + */ + if (H5_IS_BUFFER_OVERFLOW(begin, need, p_end)) + HGOTO_ERROR(H5E_HEAP, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + p = begin + need; + } + } + + /* Post-parse checks */ + if (p != heap->chunk + heap->size) + HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "partially decoded global heap"); + if (FALSE == H5HG_ISALIGNED(heap->obj[0].size)) + HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "decoded global heap is not aligned"); + + /* Set the next index value to use when creating a new object */ if (max_idx > 0) heap->nused = max_idx + 1; else heap->nused = 1; - /* Sanity check */ - HDassert(max_idx < heap->nused); + if (max_idx >= heap->nused) + HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "bad `next unused` heap index value"); /* Add the new heap to the CWFS list for the file */ if (H5F_cwfs_add(f, heap) < 0) @@ -365,15 +397,10 @@ H5HG__cache_heap_deserialize(const void *_image, size_t len, void *_udata, hbool /*------------------------------------------------------------------------- * Function: H5HG__cache_heap_image_len * - * Purpose: Return the on disk image size of the global heap to the - * metadata cache via the image_len. - * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Programmer: John Mainzer - * 7/27/14 + * Purpose: Return the on disk image size of the global heap to the + * metadata cache via the image_len. * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t @@ -383,7 +410,6 @@ H5HG__cache_heap_image_len(const void *_thing, size_t *image_len) FUNC_ENTER_PACKAGE_NOERR - /* Sanity checks */ HDassert(heap); HDassert(heap->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(heap->cache_info.type == H5AC_GHEAP); @@ -398,17 +424,12 @@ H5HG__cache_heap_image_len(const void *_thing, size_t *image_len) /*------------------------------------------------------------------------- * Function: H5HG__cache_heap_serialize * - * Purpose: Given an appropriately sized buffer and an instance of - * H5HG_heap_t, serialize the global heap for writing to file, - * and copy the serialized version into the buffer. - * + * Purpose: Given an appropriately sized buffer and an instance of + * H5HG_heap_t, serialize the global heap for writing to file, + * and copy the serialized version into the buffer. * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Programmer: John Mainzer - * 7/27/14 * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t @@ -426,7 +447,7 @@ H5HG__cache_heap_serialize(const H5F_t H5_ATTR_NDEBUG_UNUSED *f, void *image, si HDassert(heap->size == len); HDassert(heap->chunk); - /* copy the image into the buffer */ + /* Copy the image into the buffer */ H5MM_memcpy(image, heap->chunk, len); FUNC_LEAVE_NOAPI(SUCCEED) @@ -435,29 +456,23 @@ H5HG__cache_heap_serialize(const H5F_t H5_ATTR_NDEBUG_UNUSED *f, void *image, si /*------------------------------------------------------------------------- * Function: H5HG__cache_heap_free_icr * - * Purpose: Free the in memory representation of the supplied global heap. - * - * Note: The metadata cache sets the object's cache_info.magic to - * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr - * callback (checked in assert). - * - * Return: Success: SUCCEED - * Failure: FAIL + * Purpose: Free the in memory representation of the supplied global heap. * - * Programmer: John Mainzer - * 7/27/14 + * Note: The metadata cache sets the object's cache_info.magic to + * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr + * callback (checked in assert). * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t H5HG__cache_heap_free_icr(void *_thing) { H5HG_heap_t *heap = (H5HG_heap_t *)_thing; - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - /* Sanity checks */ HDassert(heap); HDassert(heap->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC); HDassert(heap->cache_info.type == H5AC_GHEAP); diff --git a/src/H5HGpkg.h b/src/H5HGpkg.h index ab7cd093c0e..99725d86f94 100644 --- a/src/H5HGpkg.h +++ b/src/H5HGpkg.h @@ -103,27 +103,32 @@ H5FL_BLK_EXTERN(gheap_chunk); /****************************/ typedef struct H5HG_obj_t { - int nrefs; /* reference count */ - size_t size; /* total size of object */ - uint8_t *begin; /* ptr to object into heap->chunk */ + int nrefs; /* Reference count */ + size_t size; /* Total size of object */ + uint8_t *begin; /* Pointer to object into heap->chunk (INCLUDES header) */ } H5HG_obj_t; /* Forward declarations for fields */ struct H5F_shared_t; struct H5HG_heap_t { - H5AC_info_t cache_info; /* Information for H5AC cache functions, _must_ be */ - /* first field in structure */ - haddr_t addr; /*collection address */ - size_t size; /*total size of collection */ - uint8_t *chunk; /*the collection, incl. header */ - size_t nalloc; /*numb object slots allocated */ - size_t nused; /*number of slots used */ - /* If this value is >65535 then all indices */ - /* have been used at some time and the */ - /* correct new index should be searched for */ - struct H5F_shared_t *shared; /* shared file */ - H5HG_obj_t *obj; /*array of object descriptions */ + H5AC_info_t cache_info; /* Information for H5AC cache functions, MUST be + * the first field in structure + */ + haddr_t addr; /* Collection address */ + size_t size; /* Total size of collection */ + uint8_t *chunk; /* Collection of elements - note that this + * INCLUDES the header, so it's not just + * the objects! + */ + size_t nalloc; /* # object slots allocated */ + size_t nused; /* # of slots used + * If this value is >65535 then all indices + * have been used at some time and the + * correct new index should be searched for + */ + struct H5F_shared_t *shared; /* Shared file */ + H5HG_obj_t *obj; /* Array of object descriptions */ }; /******************************/ From eda0b13878f07c418c7ead12a17128696f2b3f95 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Tue, 25 Apr 2023 12:04:17 -0500 Subject: [PATCH 164/231] Allow H5P_DEFAULT in H5Pget_vol_cap_flags and H5Pget_vol_id (#2807) --- release_docs/RELEASE.txt | 6 ++++++ src/H5Pfapl.c | 6 ++++++ test/vol.c | 26 +++++++++++++++++++++++++- 3 files changed, 37 insertions(+), 1 deletion(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 8bc90b8cc7f..34ca003a338 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -151,6 +151,12 @@ Bug Fixes since HDF5-1.14.0 release =================================== Library ------- + - Fixed H5Pget_vol_cap_flags and H5Pget_vol_id to accept H5P_DEFAULT + + H5Pget_vol_cap_flags and H5Pget_vol_id were updated to correctly + accept H5P_DEFAULT for the 'plist_id' FAPL parameter. Previously, + they would fail if provided with H5P_DEFAULT as the FAPL. + - Fixed ROS3 VFD anonymous credential usage with h5dump and h5ls ROS3 VFD anonymous credential functionality became broken in h5dump diff --git a/src/H5Pfapl.c b/src/H5Pfapl.c index 8e9b680f270..84d2522699c 100644 --- a/src/H5Pfapl.c +++ b/src/H5Pfapl.c @@ -6147,6 +6147,9 @@ H5Pget_vol_id(hid_t plist_id, hid_t *vol_id /*out*/) FUNC_ENTER_API(FAIL) H5TRACE2("e", "ix", plist_id, vol_id); + if (H5P_DEFAULT == plist_id) + plist_id = H5P_FILE_ACCESS_DEFAULT; + /* Get property list for ID */ if (NULL == (plist = (H5P_genplist_t *)H5I_object_verify(plist_id, H5I_GENPROP_LST))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a property list") @@ -6263,6 +6266,9 @@ H5Pget_vol_cap_flags(hid_t plist_id, uint64_t *cap_flags) /* Get the 'cap_flags' from the connector */ if (cap_flags) { + if (H5P_DEFAULT == plist_id) + plist_id = H5P_FILE_ACCESS_DEFAULT; + if (TRUE == H5P_isa_class(plist_id, H5P_FILE_ACCESS)) { H5P_genplist_t *plist; /* Property list pointer */ H5VL_connector_prop_t connector_prop; /* Property for VOL connector ID & info */ diff --git a/test/vol.c b/test/vol.c index 27ffdcde1c6..29bbb0654b8 100644 --- a/test/vol.c +++ b/test/vol.c @@ -2227,9 +2227,10 @@ test_vol_cap_flags(void) hid_t fapl_id = H5I_INVALID_HID; hid_t vol_id = H5I_INVALID_HID; uint64_t vol_cap_flags = H5VL_CAP_FLAG_NONE; + char *vol_env = NULL; H5VL_pass_through_info_t passthru_info; - TESTING("VOL capacity flags"); + TESTING("VOL capability flags"); /* Register a fake VOL */ if ((vol_id = H5VLregister_connector(&fake_vol_g, H5P_DEFAULT)) < 0) @@ -2251,6 +2252,29 @@ test_vol_cap_flags(void) if (vol_cap_flags & H5VL_CAP_FLAG_ATTR_BASIC) TEST_ERROR; + /* If using the native VOL by default, check flags again with H5P_DEFAULT */ + vol_env = HDgetenv(HDF5_VOL_CONNECTOR); + if (!vol_env || (0 == HDstrcmp(vol_env, "native"))) { + H5VL_class_t *cls; + hid_t connector_id; + + if (H5Pget_vol_id(H5P_DEFAULT, &connector_id) < 0) + TEST_ERROR; + if (NULL == (cls = H5I_object(connector_id))) + TEST_ERROR; + + vol_cap_flags = H5VL_CAP_FLAG_NONE; + + if (H5Pget_vol_cap_flags(H5P_DEFAULT, &vol_cap_flags) < 0) + TEST_ERROR; + + if (vol_cap_flags != cls->cap_flags) + TEST_ERROR; + + if (H5VLclose(connector_id) < 0) + TEST_ERROR; + } + /* Stack the [internal] passthrough VOL connector on top of the fake connector */ passthru_info.under_vol_id = vol_id; passthru_info.under_vol_info = NULL; From eec3350d319f593e7e373bca85f6d43e2f9d7e1b Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Wed, 26 Apr 2023 15:54:23 -0700 Subject: [PATCH 165/231] Harden H5G cache deserialization (#2810) --- src/H5Gcache.c | 113 ++++++++++++++++++++----------------------------- 1 file changed, 45 insertions(+), 68 deletions(-) diff --git a/src/H5Gcache.c b/src/H5Gcache.c index b6c6a85f91e..e088fd81b70 100644 --- a/src/H5Gcache.c +++ b/src/H5Gcache.c @@ -13,10 +13,8 @@ /*------------------------------------------------------------------------- * * Created: H5Gcache.c - * Feb 5 2008 - * Quincey Koziol * - * Purpose: Implement group metadata cache methods. + * Purpose: Implement group metadata cache methods * *------------------------------------------------------------------------- */ @@ -101,15 +99,10 @@ H5FL_SEQ_EXTERN(H5G_entry_t); /*------------------------------------------------------------------------- * Function: H5G__cache_node_get_initial_load_size() * - * Purpose: Determine the size of the on-disk image of the node, and - * return this value in *image_len. - * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Programmer: John Mainzer - * 7/21/14 + * Purpose: Determine the size of the on-disk image of the node, and + * return this value in *image_len. * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t @@ -119,7 +112,6 @@ H5G__cache_node_get_initial_load_size(void *_udata, size_t *image_len) FUNC_ENTER_PACKAGE_NOERR - /* Sanity checks */ HDassert(f); HDassert(image_len); @@ -132,22 +124,18 @@ H5G__cache_node_get_initial_load_size(void *_udata, size_t *image_len) /*------------------------------------------------------------------------- * Function: H5G__cache_node_deserialize * - * Purpose: Given a buffer containing the on disk image of a symbol table - * node, allocate an instance of H5G_node_t, load the contents of the - * image into it, and return a pointer to the instance. + * Purpose: Given a buffer containing the on disk image of a symbol table + * node, allocate an instance of H5G_node_t, load the contents of the + * image into it, and return a pointer to the instance. * - * Note that deserializing the image requires access to the file - * pointer, which is not included in the parameter list for this - * callback. Finesse this issue by passing in the file pointer - * twice to the H5AC_protect() call -- once as the file pointer - * proper, and again as the user data + * Note that deserializing the image requires access to the file + * pointer, which is not included in the parameter list for this + * callback. Finesse this issue by passing in the file pointer + * twice to the H5AC_protect() call -- once as the file pointer + * proper, and again as the user data * * Return: Success: Pointer to in core representation * Failure: NULL - * - * Programmer: John Mainzer - * 6/21/14 - * *------------------------------------------------------------------------- */ static void * @@ -157,11 +145,10 @@ H5G__cache_node_deserialize(const void *_image, size_t len, void *_udata, hbool_ H5G_node_t *sym = NULL; /* Symbol table node created */ const uint8_t *image = (const uint8_t *)_image; /* Pointer to image to deserialize */ const uint8_t *image_end = image + len - 1; /* Pointer to end of image buffer */ - void *ret_value = NULL; /* Return value */ + void *ret_value = NULL; FUNC_ENTER_PACKAGE - /* Sanity checks */ HDassert(image); HDassert(len > 0); HDassert(f); @@ -174,22 +161,30 @@ H5G__cache_node_deserialize(const void *_image, size_t len, void *_udata, hbool_ if (NULL == (sym->entry = H5FL_SEQ_CALLOC(H5G_entry_t, (size_t)(2 * H5F_SYM_LEAF_K(f))))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") - /* magic */ + /* Magic */ + if (H5_IS_BUFFER_OVERFLOW(image, H5_SIZEOF_MAGIC, image_end)) + HGOTO_ERROR(H5E_SYM, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); if (HDmemcmp(image, H5G_NODE_MAGIC, (size_t)H5_SIZEOF_MAGIC) != 0) HGOTO_ERROR(H5E_SYM, H5E_BADVALUE, NULL, "bad symbol table node signature") image += H5_SIZEOF_MAGIC; - /* version */ + /* Version */ + if (H5_IS_BUFFER_OVERFLOW(image, 1, image_end)) + HGOTO_ERROR(H5E_SYM, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); if (H5G_NODE_VERS != *image++) HGOTO_ERROR(H5E_SYM, H5E_VERSION, NULL, "bad symbol table node version") - /* reserved */ + /* Reserved */ + if (H5_IS_BUFFER_OVERFLOW(image, 1, image_end)) + HGOTO_ERROR(H5E_SYM, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); image++; - /* number of symbols */ + /* Number of symbols */ + if (H5_IS_BUFFER_OVERFLOW(image, 2, image_end)) + HGOTO_ERROR(H5E_SYM, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); UINT16DECODE(image, sym->nsyms); - /* entries */ + /* Entries */ if (H5G__ent_decode_vec(f, &image, image_end, sym->entry, sym->nsyms) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, NULL, "unable to decode symbol table entries") @@ -208,14 +203,9 @@ H5G__cache_node_deserialize(const void *_image, size_t len, void *_udata, hbool_ * Function: H5G__cache_node_image_len * * Purpose: Compute the size of the data structure on disk and return - * it in *image_len. - * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Programmer: John Mainzer - * 6/21/14 + * it in *image_len * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t @@ -225,7 +215,6 @@ H5G__cache_node_image_len(const void *_thing, size_t *image_len) FUNC_ENTER_PACKAGE_NOERR - /* Sanity checks */ HDassert(sym); HDassert(sym->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(sym->cache_info.type == H5AC_SNODE); @@ -239,17 +228,12 @@ H5G__cache_node_image_len(const void *_thing, size_t *image_len) /*------------------------------------------------------------------------- * Function: H5G__cache_node_serialize * - * Purpose: Given a correctly sized buffer and an instance of H5G_node_t, - * serialize the contents of the instance of H5G_node_t, and write - * this data into the supplied buffer. This buffer will be written - * to disk. - * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Programmer: John Mainzer - * 7/21/14 + * Purpose: Given a correctly sized buffer and an instance of H5G_node_t, + * serialize the contents of the instance of H5G_node_t, and write + * this data into the supplied buffer. This buffer will be written + * to disk. * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t @@ -257,11 +241,10 @@ H5G__cache_node_serialize(const H5F_t *f, void *_image, size_t len, void *_thing { H5G_node_t *sym = (H5G_node_t *)_thing; /* Pointer to object */ uint8_t *image = (uint8_t *)_image; /* Pointer into raw data buffer */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - /* Sanity checks */ HDassert(f); HDassert(image); HDassert(sym); @@ -269,20 +252,20 @@ H5G__cache_node_serialize(const H5F_t *f, void *_image, size_t len, void *_thing HDassert(sym->cache_info.type == H5AC_SNODE); HDassert(len == sym->node_size); - /* magic number */ + /* Magic number */ H5MM_memcpy(image, H5G_NODE_MAGIC, (size_t)H5_SIZEOF_MAGIC); image += H5_SIZEOF_MAGIC; - /* version number */ + /* Version number */ *image++ = H5G_NODE_VERS; - /* reserved */ + /* Reserved */ *image++ = 0; - /* number of symbols */ + /* Number of symbols */ UINT16ENCODE(image, sym->nsyms); - /* entries */ + /* Entries */ if (H5G__ent_encode_vec(f, &image, sym->entry, sym->nsyms) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTENCODE, FAIL, "can't serialize") @@ -296,29 +279,23 @@ H5G__cache_node_serialize(const H5F_t *f, void *_image, size_t len, void *_thing /*------------------------------------------------------------------------- * Function: H5G__cache_node_free_icr * - * Purpose: Destroys a symbol table node in memory. - * - * Note: The metadata cache sets the object's cache_info.magic to - * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr - * callback (checked in assert). - * - * Return: Success: SUCCEED - * Failure: FAIL + * Purpose: Destroy a symbol table node in memory * - * Programmer: John Mainzer - * 6/21/14 + * Note: The metadata cache sets the object's cache_info.magic to + * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr + * callback (checked in assert). * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t H5G__cache_node_free_icr(void *_thing) { H5G_node_t *sym = (H5G_node_t *)_thing; /* Pointer to the object */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - /* Sanity checks */ HDassert(sym); HDassert(sym->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC); HDassert(sym->cache_info.type == H5AC_SNODE); From 5dcdf49c5b0de41c0e670aa662878fe3d8923d80 Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Wed, 26 Apr 2023 17:56:17 -0500 Subject: [PATCH 166/231] H5fuse.sh optimization updates (#2806) Changed to processing subfiles at the subfile level. Simplified parameter arguments. Enabled running it in parallel. Added option to specify subfiling configuration location. --- utils/subfiling_vfd/h5fuse.sh.in | 222 +++++++++++++++++++++++-------- 1 file changed, 165 insertions(+), 57 deletions(-) diff --git a/utils/subfiling_vfd/h5fuse.sh.in b/utils/subfiling_vfd/h5fuse.sh.in index 48e3e61fa98..09a3a056811 100755 --- a/utils/subfiling_vfd/h5fuse.sh.in +++ b/utils/subfiling_vfd/h5fuse.sh.in @@ -14,6 +14,7 @@ BLD='\033[1m' GRN='\033[0;32m' RED='\033[0;31m' PUR='\033[0;35m' +CYN='\033[0;36m' NC='\033[0m' # No Color ############################################################ @@ -23,15 +24,40 @@ function usage() { echo "" # Display usage echo "Purpose: Combine subfiles into a single HDF5 file. Requires the subfiling - configuration file either as a command-line argument, or the script will + configuration file either as a command-line argument or the script will search for the *.config file in the current directory." echo "" - echo "usage: h5fuse.sh [-h] [-f filename]" - echo "-h Print this help." - echo "-f filename Subfile configuration file." + echo "usage: h5fuse.sh [-f filename] [-h] [-p] [-q] [-r] [-v] " + echo "-f filename Subfile configuration file." + echo "-h Print this help." + echo "-q Quiet all output. [no]" + echo "-p h5fuse.sh is being run in parallel, with more than one rank. [no]" + echo "-r Remove subfiles after being processed. [no]" + echo "-v Verbose output. [no]" echo "" } +function gen_mpi() { + +# Program to determine MPI rank and size if being run in parallel (-p). + +cat > "${c_src}" << EOL +#include +#include +int main() { + MPI_Init(NULL, NULL); + int world_size; + MPI_Comm_size(MPI_COMM_WORLD, &world_size); + int world_rank; + MPI_Comm_rank(MPI_COMM_WORLD, &world_rank); + printf("%d %d", world_rank, world_size); + MPI_Barrier(MPI_COMM_WORLD); + MPI_Finalize(); +} +EOL + +} + ############################################################ ############################################################ # Main program # @@ -43,14 +69,25 @@ function usage() { ############################################################ # Get the options file_config="" - -while getopts ":h:f:" option; do +verbose="false" +quiet="false" +rm_subf="false" +parallel="false" +while getopts "hpqrvf:" option; do case $option in + f) # subfiling configuration file + file_config=$OPTARG;; h) # display Help usage exit;; - f) # subfiling configuration file - file_config=$OPTARG;; + p) # HDF5 fused file + parallel="true";; + q) # quiet all output + quiet="true";; + r) # remove completed subfiles + rm_subf="true";; + v) # verbose output + verbose="true";; \?) # Invalid option echo -e "$RED ERROR: Invalid option ${BLD}-${OPTARG}${RED} $NC" usage @@ -61,12 +98,20 @@ while getopts ":h:f:" option; do done FAILED=1 -nfiles=1 ############################################################ # Configure file checks # ############################################################ +# +SUBF_CONFDIR="${H5FD_SUBFILING_CONFIG_FILE_PREFIX:-.}" + +# cd to the subfile configuration location +if [ "$SUBF_CONFDIR" != "." ] || [ "$SUBF_CONFDIR" != "$PWD" ]; then + cd "$SUBF_CONFDIR" || exit +fi + +# Try to find the config file if [ -z "$file_config" ]; then - nfiles=$(find . -maxdepth 1 -type f -iname "*.config" -printf '.' | wc -m) + nfiles=$(find "$SUBF_CONFDIR" -maxdepth 1 -type f -iname "*.config" -printf '.' | wc -m) if [[ "$nfiles" != "1" ]]; then if [[ "$nfiles" == "0" ]]; then echo -e "$RED Failed to find .config file in current directory. $NC" @@ -78,7 +123,7 @@ if [ -z "$file_config" ]; then exit $FAILED fi fi - file_config=$(find . -maxdepth 1 -type f -iname "*.config") + file_config=$(find "$SUBF_CONFDIR" -maxdepth 1 -type f -iname '*.config') fi if [ ! -f "$file_config" ]; then @@ -104,62 +149,125 @@ if test -z "$subfile_dir"; then exit $FAILED fi -subfiles=( $( sed -e '1,/subfile_dir=/d' "$file_config" ) ) -#for i in "${subfiles[@]}"; do -# echo "$i" -#done +# For bash 4.4+ +mapfile -t subfiles < <( sed -e '1,/subfile_dir=/d' "$file_config" ) if [ ${#subfiles[@]} -eq 0 ]; then echo -e "$RED failed to find subfiles list in $file_config $NC" exit $FAILED fi +nsubfiles=${#subfiles[@]} + +# Get the number of local subfiles +subfiles_loc=() +subfiles_size=() +for i in "${subfiles[@]}"; do + subfile="${subfile_dir}/${i}" + if [ -f "${subfile}" ]; then + subfiles_loc+=("$subfile") + subfiles_size+=($(wc -c "${subfile}" | awk '{print $1}')) + else + subfiles_size+=(0) + fi +done + +START="$(date +%s%N)" + +mpi_rank=0 +mpi_size=1 +nstart=1 +nend=$nsubfiles + +if [ "$parallel" == "true" ]; then + + hex=$(hexdump -n 16 -v -e '/1 "%02X"' /dev/urandom) + c_exec="h5fuse_"${hex} + c_src=${c_exec}.c + + # Generate and compile an MPI program to get MPI rank and size + if [ ! -f "${c_src}" ]; then + gen_mpi + CC=@CC@ + ${CC} "${c_src}" -o "${c_exec}" + fi + wait + rank_size=$(./"${c_exec}") + read -r mpi_rank mpi_size <<<"$rank_size" -rm -f "$hdf5_file" + rm -f "${c_src}" "${c_exec}" -## COMBINE SUBFILES INTO AN HDF5 FILE ## + # Divide the subfiles among the ranks + iwork1=$(( nsubfiles / mpi_size )) + iwork2=$(( nsubfiles % mpi_size )) + min=$(( mpi_rank < iwork2 ? mpi_rank : iwork2 )) + nstart=$(( mpi_rank * iwork1 + 1 + min )) + nend=$(( nstart + iwork1 - 1 )) + if [ $iwork2 -gt "$mpi_rank" ]; then + nend=$(( nend + 1 )) + fi +fi +############################################################ +# COMBINE SUBFILES INTO AN HDF5 FILE # +############################################################ +icnt=1 skip=0 -status=$nfiles -START="$(date +%s%N)" -while [ "$status" -gt 0 ]; do - icnt=0 - for i in "${subfiles[@]}"; do - subfile="${subfile_dir}/${i}" - # Verify the file exists - if [ ! -f "${subfile}" ]; then - echo -e "$RED ERROR: file \"${subfile}\" does not exist. $NC" - exit $FAILED - fi +seek=0 +seek_cnt=0 +for i in "${subfiles[@]}"; do - # Verify the file is not being accessed by a process - t_max=60 - t_sleep=1 - t_elapsed=0 + subfile="${subfile_dir}/${i}" - while fuser -s "${subfile}"; do - if [[ $((t_elapsed % 5)) -eq 0 ]]; then - echo -e "$GRN waiting for process to finish accessing file \"${subfile}\" ... [${t_elapsed}s/${t_max}s] $NC" - fi - sleep $t_sleep - t_elapsed=$((t_elapsed+t_sleep)) - if [[ $t_elapsed -ge $t_max ]]; then - echo -e "$RED ERROR: file \"${subfile}\" still has process accessing it after ${t_elapsed}s $NC" - exit $FAILED + # bs=BYTES read and write up to BYTES bytes at a time; overrides ibs and obs + # ibs=BYTES read up to BYTES bytes at a time + # obs=BYTES write BYTES bytes at a time + # seek=N skip N obs-sized blocks at start of output + # skip=N skip N ibs-sized blocks at start of input + + status=1 + fsize=${subfiles_size[icnt-1]} + if [ "$fsize" -eq "0" ]; then + seek_cnt=$((seek_cnt+1)) + seek=$seek_cnt + if [ "$rm_subf" == "true" ]; then + if [ -f "${subfile}" ]; then + \rm -f "$subfile" + fi + fi + else + if [ $icnt -ge "$nstart" ] && [ $icnt -le "$nend" ]; then + records_left=$fsize + while [ "$status" -gt 0 ]; do + if [ $((skip*stripe_size)) -le "$fsize" ] && [ "$records_left" -gt 0 ]; then + EXEC="dd count=1 bs=$stripe_size if=$subfile of=$hdf5_file skip=$skip seek=$seek conv=notrunc" + if [ "$verbose" == "true" ]; then + echo -e "$GRN $EXEC $NC" + fi + err=$( $EXEC 2>&1 1>/dev/null ) + if [ $? -ne 0 ]; then + echo -e "$CYN ERR: dd Utility Failed $NC" + echo -e "$CYN MSG: $err $NC" + exit $FAILED + fi + records_left=$((records_left-stripe_size)) + skip=$((skip+1)) + seek=$((seek_cnt+skip*nsubfiles)) + else + status=0 + skip=0 + fi + done; wait + if [ "$rm_subf" == "true" ]; then + \rm -f "$subfile" fi - done - - fsize=$(wc -c "${subfile}" | awk '{print $1}') - if [ $((skip*stripe_size)) -le "$fsize" ]; then - EXEC="dd count=1 bs=$stripe_size if=$subfile of=$hdf5_file skip=$skip oflag=append conv=notrunc" - echo -e "$GRN $EXEC $NC" - err="$( $EXEC 2>&1 > /dev/null &)" - icnt=$((icnt+1)) - else - subfiles=("${subfiles[@]:0:icnt}" "${subfiles[@]:$((icnt+1))}") - status=${#subfiles[@]} - fi - done; wait - skip=$((skip+1)) -done + fi + seek_cnt=$((seek_cnt+1)) + seek=$seek_cnt + fi + icnt=$(( icnt +1 )) +done; wait + END=$(( $(date +%s%N) - START )) DURATION_SEC=$(awk -vp="$END" -vq=0.000000001 'BEGIN{printf "%.4f" ,p * q}') -echo -e "$PUR COMPLETION TIME = $DURATION_SEC s $NC" +if [ "$quiet" == "false" ]; then + echo -e "$PUR COMPLETION TIME = $DURATION_SEC s $NC" +fi \ No newline at end of file From 6ddd5b7deb9da59880b031374926062905c57c86 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Wed, 26 Apr 2023 17:56:57 -0500 Subject: [PATCH 167/231] Add support for CMakePresets and fix example download (#2817) --- .gitignore | 1 + CMakeInstallation.cmake | 53 ++- CMakePresets.json | 237 +++++++++++ config/cmake-presets/hidden-presets.json | 491 +++++++++++++++++++++++ config/cmake/ConfigureChecks.cmake | 3 +- release_docs/INSTALL_CMake.txt | 98 ++++- release_docs/RELEASE.txt | 9 + 7 files changed, 880 insertions(+), 12 deletions(-) create mode 100644 CMakePresets.json create mode 100644 config/cmake-presets/hidden-presets.json diff --git a/.gitignore b/.gitignore index 3caf16a1c67..cbaccb29d32 100644 --- a/.gitignore +++ b/.gitignore @@ -41,3 +41,4 @@ src/H5overflow.h src/H5version.h /.classpath +/CMakeUserPresets.json diff --git a/CMakeInstallation.cmake b/CMakeInstallation.cmake index 00ed5cd6795..cc7d219fa8c 100644 --- a/CMakeInstallation.cmake +++ b/CMakeInstallation.cmake @@ -150,17 +150,54 @@ if (HDF5_PACK_EXAMPLES) DESTINATION ${HDF5_INSTALL_DATA_DIR} COMPONENT hdfdocuments ) - if (EXISTS "${HDF5_EXAMPLES_COMPRESSED_DIR}/${HDF5_EXAMPLES_COMPRESSED}") + + option (EXAMPLES_USE_RELEASE_NAME "Use the released examples artifact name" OFF) + option (EXAMPLES_DOWNLOAD "Download to use released examples files" OFF) + if (EXAMPLES_DOWNLOAD) + if (NOT EXAMPLES_USE_LOCALCONTENT) + set (EXAMPLES_URL ${EXAMPLES_TGZ_ORIGPATH}/${EXAMPLES_TGZ_ORIGNAME}) + else () + set (EXAMPLES_URL ${TGZPATH}/${EXAMPLES_TGZ_ORIGNAME}) + endif () + message (VERBOSE "Examples file is ${EXAMPLES_URL}") + file (DOWNLOAD ${EXAMPLES_URL} ${HDF5_BINARY_DIR}/${HDF5_EXAMPLES_COMPRESSED}) + if (EXISTS "${HDF5_BINARY_DIR}/${HDF5_EXAMPLES_COMPRESSED}") + execute_process( + COMMAND ${CMAKE_COMMAND} -E tar xzf ${HDF5_EXAMPLES_COMPRESSED} + WORKING_DIRECTORY ${HDF5_BINARY_DIR} + COMMAND_ECHO STDOUT + ) + endif () + set (EXAMPLES_USE_RELEASE_NAME ON CACHE BOOL "" FORCE) + else () + if (EXISTS "${HDF5_EXAMPLES_COMPRESSED_DIR}/${HDF5_EXAMPLES_COMPRESSED}") + execute_process( + COMMAND ${CMAKE_COMMAND} -E tar xzf ${HDF5_EXAMPLES_COMPRESSED_DIR}/${HDF5_EXAMPLES_COMPRESSED} + WORKING_DIRECTORY ${HDF5_BINARY_DIR} + COMMAND_ECHO STDOUT + ) + endif () + endif () + if (EXAMPLES_USE_RELEASE_NAME) + get_filename_component (EX_LAST_EXT ${HDF5_EXAMPLES_COMPRESSED} LAST_EXT) + if (${EX_LAST_EXT} STREQUAL ".zip") + get_filename_component (EX_DIR_NAME ${HDF5_EXAMPLES_COMPRESSED} NAME_WLE) + else () + get_filename_component (EX_DIR_NAME ${HDF5_EXAMPLES_COMPRESSED} NAME_WLE) + get_filename_component (EX_DIR_NAME ${EX_DIR_NAME} NAME_WLE) + endif () execute_process( - COMMAND ${CMAKE_COMMAND} -E tar xzf ${HDF5_EXAMPLES_COMPRESSED_DIR}/${HDF5_EXAMPLES_COMPRESSED} - ) - install ( - DIRECTORY ${HDF5_BINARY_DIR}/HDF5Examples - DESTINATION ${HDF5_INSTALL_DATA_DIR} - USE_SOURCE_PERMISSIONS - COMPONENT hdfdocuments + COMMAND ${CMAKE_COMMAND} -E rename ${EX_DIR_NAME} HDF5Examples + WORKING_DIRECTORY ${HDF5_BINARY_DIR} + COMMAND_ECHO STDOUT ) endif () + install ( + DIRECTORY ${HDF5_BINARY_DIR}/HDF5Examples + DESTINATION ${HDF5_INSTALL_DATA_DIR} + USE_SOURCE_PERMISSIONS + COMPONENT hdfdocuments + ) install ( FILES ${HDF5_SOURCE_DIR}/release_docs/USING_CMake_Examples.txt diff --git a/CMakePresets.json b/CMakePresets.json new file mode 100644 index 00000000000..d861b445cf5 --- /dev/null +++ b/CMakePresets.json @@ -0,0 +1,237 @@ +{ + "version": 6, + "include": [ + "config/cmake-presets/hidden-presets.json" + ], + "configurePresets": [ + { + "name": "ci-base-tgz", + "hidden": true, + "inherits": "ci-base", + "cacheVariables": { + "HDF5_ALLOW_EXTERNAL_SUPPORT": "NO", + "TGZPATH": {"type": "STRING", "value": "${sourceParentDir}/temp"} + } + }, + { + "name": "ci-StdCompression", + "hidden": true, + "inherits": "ci-base-tgz", + "cacheVariables": { + "HDF5_ENABLE_Z_LIB_SUPPORT": "ON", + "HDF5_ENABLE_SZIP_SUPPORT": "ON", + "HDF5_ENABLE_SZIP_ENCODING": "ON", + "BUILD_ZLIB_WITH_FETCHCONTENT": "ON", + "ZLIB_PACKAGE_NAME": {"type": "STRING", "value": "zlib"}, + "ZLIB_TGZ_ORIGPATH": {"type": "STRING", "value": "https://github.com/madler/zlib/releases/download/v1.2.13"}, + "ZLIB_TGZ_ORIGNAME": {"type": "STRING", "value": "zlib-1.2.13.tar.gz"}, + "ZLIB_USE_LOCALCONTENT": "OFF", + "BUILD_SZIP_WITH_FETCHCONTENT": "ON", + "LIBAEC_PACKAGE_NAME": {"type": "STRING", "value": "libaec"}, + "LIBAEC_TGZ_ORIGPATH": {"type": "STRING", "value": "https://github.com/MathisRosenhauer/libaec/releases/download/v1.0.6"}, + "LIBAEC_TGZ_ORIGNAME": {"type": "STRING", "value": "libaec-1.0.6.tar.gz"}, + "LIBAEC_USE_LOCALCONTENT": "OFF" + } + }, + { + "name": "ci-base-plugins", + "hidden": true, + "inherits": "ci-base-tgz", + "cacheVariables": { + "PLUGIN_TGZ_NAME": {"type": "STRING", "value": "hdf5_plugins-1.14.0.tar.gz"}, + "PLUGIN_PACKAGE_NAME": {"type": "STRING", "value": "pl"}, + "BSHUF_TGZ_NAME": {"type": "STRING", "value": "bitshuffle.tar.gz"}, + "BSHUF_PACKAGE_NAME": {"type": "STRING", "value": "bshuf"}, + "BLOSC_TGZ_NAME": {"type": "STRING", "value": "c-blosc.tar.gz"}, + "BLOSC_PACKAGE_NAME": {"type": "STRING", "value": "blosc"}, + "BLOSC_ZLIB_TGZ_NAME": {"type": "STRING", "value": "ZLib.tar.gz"}, + "BLOSC_ZLIB_PACKAGE_NAME": {"type": "STRING", "value": "zlib"}, + "BZ2_TGZ_NAME": {"type": "STRING", "value": "BZ2.tar.gz"}, + "BZ2_PACKAGE_NAME": {"type": "STRING", "value": "bz2"}, + "FPZIP_TGZ_NAME": {"type": "STRING", "value": "fpzip.tar.gz"}, + "FPZIP_PACKAGE_NAME": {"type": "STRING", "value": "fpzip"}, + "JPEG_TGZ_NAME": {"type": "STRING", "value": "JPEG.tar.gz"}, + "JPEG_PACKAGE_NAME": {"type": "STRING", "value": "jpeg"}, + "BUILD_LZ4_LIBRARY_SOURCE": "ON", + "LZ4_TGZ_NAME": {"type": "STRING", "value": "lz4.tar.gz"}, + "LZ4_PACKAGE_NAME": {"type": "STRING", "value": "lz4"}, + "LZF_TGZ_NAME": {"type": "STRING", "value": "lzf.tar.gz"}, + "LZF_PACKAGE_NAME": {"type": "STRING", "value": "lzf"}, + "SZ_TGZ_NAME": {"type": "STRING", "value": "szf.tar.gz"}, + "SZ_PACKAGE_NAME": {"type": "STRING", "value": "SZ"}, + "ZFP_TGZ_NAME": {"type": "STRING", "value": "zfp.tar.gz"}, + "ZFP_PACKAGE_NAME": {"type": "STRING", "value": "zfp"}, + "ZSTD_TGZ_NAME": {"type": "STRING", "value": "zstd.tar.gz"}, + "ZSTD_PACKAGE_NAME": {"type": "STRING", "value": "zstd"} + } + }, + { + "name": "ci-StdPlugins", + "hidden": true, + "inherits": ["ci-base-plugins", "ci-base-tgz"], + "cacheVariables": { + "HDF5_ENABLE_PLUGIN_SUPPORT": "ON", + "PLUGIN_TGZ_ORIGPATH": {"type": "STRING", "value": "https://github.com/HDFGroup/hdf5_plugins/archive/refs/tags"}, + "PLUGIN_TGZ_ORIGNAME": {"type": "STRING", "value": "hdf5_plugins-1.14.0.tar.gz"} + } + }, + { + "name": "ci-StdExamples", + "hidden": true, + "inherits": "ci-base", + "cacheVariables": { + "HDF5_PACK_EXAMPLES": "ON", + "HDF5_EXAMPLES_COMPRESSED": {"type": "STRING", "value": "hdf5-examples-2.0.3.tar.gz"}, + "HDF5_EXAMPLES_COMPRESSED_DIR": {"type": "STRING", "value": "${sourceParentDir}/temp"}, + "EXAMPLES_TGZ_ORIGPATH": {"type": "STRING", "value": "https://github.com/HDFGroup/hdf5-examples/archive/refs/tags/"}, + "EXAMPLES_TGZ_ORIGNAME": {"type": "STRING", "value": "2.0.3.tar.gz"}, + "EXAMPLES_DOWNLOAD": "ON" + } + }, + { + "name": "ci-StdShar", + "hidden": true, + "inherits": "ci-StdCompression", + "cacheVariables": { + "HDF_PACKAGE_NAMESPACE": {"type": "STRING", "value": "hdf5::"}, + "HDF5_INSTALL_MOD_FORTRAN": "NO", + "HDF5_BUILD_GENERATORS": "ON", + "HDF5_ENABLE_ALL_WARNINGS": "ON", + "HDF5_MINGW_STATIC_GCC_LIBS": "ON", + "HDF_TEST_EXPRESS": "2" + } + }, + { + "name": "ci-StdShar-MSVC", + "description": "MSVC Standard Config for x64 (Release)", + "inherits": [ + "ci-x64-Release-MSVC", + "ci-CPP", + "ci-Fortran", + "ci-Java", + "ci-StdShar", + "ci-StdExamples" + ] + }, + { + "name": "ci-StdShar-Clang", + "description": "Clang Standard Config for x64 (Release)", + "inherits": [ + "ci-x64-Release-Clang", + "ci-CPP", + "ci-Fortran", + "ci-Java", + "ci-StdShar", + "ci-StdExamples" + ] + }, + { + "name": "ci-StdShar-GNUC", + "description": "GNUC Standard Config for x64 (Release)", + "inherits": [ + "ci-x64-Release-GNUC", + "ci-CPP", + "ci-Fortran", + "ci-Java", + "ci-StdShar", + "ci-StdExamples" + ] + } + ], + "buildPresets": [ + { + "name": "ci-StdShar-MSVC", + "description": "MSVC Standard Build for x64 (Release)", + "configurePreset": "ci-StdShar-MSVC", + "inherits": [ + "ci-x64-Release-MSVC" + ] + }, + { + "name": "ci-StdShar-Clang", + "description": "Clang Standard Build for x64 (Release)", + "configurePreset": "ci-StdShar-Clang", + "inherits": [ + "ci-x64-Release-Clang" + ] + }, + { + "name": "ci-StdShar-GNUC", + "description": "GNUC Standard Build for x64 (Release)", + "configurePreset": "ci-StdShar-GNUC", + "verbose": false, + "inherits": [ + "ci-x64-Release-GNUC" + ] + } + ], + "testPresets": [ + { + "name": "ci-StdShar-MSVC", + "configurePreset": "ci-StdShar-MSVC", + "inherits": [ + "ci-x64-Release-MSVC" + ] + }, + { + "name": "ci-StdShar-Clang", + "configurePreset": "ci-StdShar-Clang", + "inherits": [ + "ci-x64-Release-Clang" + ] + }, + { + "name": "ci-StdShar-GNUC", + "configurePreset": "ci-StdShar-GNUC", + "inherits": [ + "ci-x64-Release-GNUC" + ] + } + ], + "packagePresets": [ + { + "name": "ci-StdShar-MSVC", + "configurePreset": "ci-StdShar-MSVC", + "inherits": "ci-x64-Release-MSVC" + }, + { + "name": "ci-StdShar-Clang", + "configurePreset": "ci-StdShar-Clang", + "inherits": "ci-x64-Release-Clang" + }, + { + "name": "ci-StdShar-GNUC", + "configurePreset": "ci-StdShar-GNUC", + "inherits": "ci-x64-Release-GNUC" + } + ], + "workflowPresets": [ + { + "name": "ci-StdShar-MSVC", + "steps": [ + {"type": "configure", "name": "ci-StdShar-MSVC"}, + {"type": "build", "name": "ci-StdShar-MSVC"}, + {"type": "test", "name": "ci-StdShar-MSVC"}, + {"type": "package", "name": "ci-StdShar-MSVC"} + ] + }, + { + "name": "ci-StdShar-Clang", + "steps": [ + {"type": "configure", "name": "ci-StdShar-Clang"}, + {"type": "build", "name": "ci-StdShar-Clang"}, + {"type": "test", "name": "ci-StdShar-Clang"}, + {"type": "package", "name": "ci-StdShar-Clang"} + ] + }, + { + "name": "ci-StdShar-GNUC", + "steps": [ + {"type": "configure", "name": "ci-StdShar-GNUC"}, + {"type": "build", "name": "ci-StdShar-GNUC"}, + {"type": "test", "name": "ci-StdShar-GNUC"}, + {"type": "package", "name": "ci-StdShar-GNUC"} + ] + } + ] +} \ No newline at end of file diff --git a/config/cmake-presets/hidden-presets.json b/config/cmake-presets/hidden-presets.json new file mode 100644 index 00000000000..c616e7d1f4b --- /dev/null +++ b/config/cmake-presets/hidden-presets.json @@ -0,0 +1,491 @@ +{ + "version": 6, + "configurePresets": [ + { + "name": "ci-base", + "displayName": "Basic Config", + "description": "Basic build using Ninja generator", + "generator": "Ninja", + "hidden": true, + "binaryDir": "${sourceParentDir}/build/${presetName}", + "installDir": "${sourceParentDir}/install/${presetName}" + }, + { + "name": "ci-x64", + "architecture": { + "value": "x64", + "strategy": "external" + }, + "hidden": true + }, + { + "name": "ci-x86", + "architecture": { + "value": "x86", + "strategy": "external" + }, + "hidden": true + }, + { + "name": "ci-Debug", + "cacheVariables": { + "CMAKE_BUILD_TYPE": "Debug" + }, + "hidden": true + }, + { + "name": "ci-Release", + "cacheVariables": { + "CMAKE_BUILD_TYPE": "RelWithDebInfo", + "HDF5_BUILD_DOC": "ON" + }, + "hidden": true + }, + { + "name": "ci-MSVC", + "hidden": true, + "cacheVariables": { + "CMAKE_C_COMPILER": "cl", + "CMAKE_CXX_COMPILER": "cl" + }, + "toolset": { + "value": "host=x64", + "strategy": "external" + }, + "condition": { + "type": "equals", + "lhs": "${hostSystemName}", + "rhs": "Windows" + } + }, + { + "name": "ci-Clang", + "hidden": true, + "cacheVariables": { + "CMAKE_TOOLCHAIN_FILE": "config/toolchain/clang.cmake" + }, + "toolset": { + "value": "host=x64", + "strategy": "external" + } + }, + { + "name": "ci-GNUC", + "hidden": true, + "cacheVariables": { + "CMAKE_TOOLCHAIN_FILE": "config/toolchain/gcc.cmake" + }, + "condition": { + "type": "equals", + "lhs": "${hostSystemName}", + "rhs": "Linux" + }, + "toolset": { + "value": "host=x64", + "strategy": "external" + } + }, + { + "name": "ci-Intel", + "hidden": true, + "cacheVariables": { + "CMAKE_TOOLCHAIN_FILE": "config/toolchain/intel.cmake" + }, + "toolset": { + "value": "host=x64", + "strategy": "external" + } + }, + { + "name": "ci-Fortran-Clang", + "hidden": true, + "cacheVariables": { + "CMAKE_Fortran_COMPILER": "gfortran" + }, + "condition": { + "type": "matches", + "string": "${presetName}", + "regex": ".*-Clang" + } + }, + { + "name": "ci-Fortran", + "hidden": true, + "inherits": "ci-Fortran-Clang", + "cacheVariables": { + "HDF5_BUILD_FORTRAN": "ON" + }, + "toolset": { + "value": "host=x64", + "strategy": "external" + } + }, + { + "name": "ci-CPP", + "hidden": true, + "cacheVariables": { + "HDF5_BUILD_CPP_LIB": "ON" + } + }, + { + "name": "ci-Java", + "hidden": true, + "cacheVariables": { + "HDF5_BUILD_JAVA": "ON" + }, + "toolset": { + "value": "host=x64", + "strategy": "external" + } + }, + { + "name": "ci-x64-Debug-MSVC", + "description": "MSVC for x64 (Debug)", + "hidden": true, + "inherits": [ + "ci-base", + "ci-x64", + "ci-Debug", + "ci-MSVC" + ] + }, + { + "name": "ci-x64-Release-MSVC", + "description": "MSVC for x64 (Release)", + "hidden": true, + "inherits": [ + "ci-base", + "ci-x64", + "ci-Release", + "ci-MSVC" + ] + }, + { + "name": "ci-x64-Debug-Clang", + "description": "Clang/LLVM for x64 (Debug)", + "hidden": true, + "inherits": [ + "ci-base", + "ci-x64", + "ci-Debug", + "ci-Clang" + ] + }, + { + "name": "ci-x64-Release-Clang", + "description": "Clang/LLVM for x64 (Release)", + "hidden": true, + "inherits": [ + "ci-base", + "ci-x64", + "ci-Release", + "ci-Clang" + ] + }, + { + "name": "ci-x64-Debug-GNUC", + "description": "GNUC for x64 (Debug)", + "hidden": true, + "inherits": [ + "ci-base", + "ci-x64", + "ci-Debug", + "ci-GNUC" + ] + }, + { + "name": "ci-x64-Release-GNUC", + "description": "GNUC for x64 (Release)", + "hidden": true, + "inherits": [ + "ci-base", + "ci-x64", + "ci-Release", + "ci-GNUC" + ] + }, + { + "name": "ci-x64-Debug-MSVC-asan", + "description": "x64-Debug-MSVC with /fsanitize=address", + "hidden": true, + "inherits": "ci-x64-Debug-MSVC", + "cacheVariables": { + "USE_SANITIZER": "Address", + "HDF5_ENABLE_SANITIZERS": "ON" + } + }, + { + "name": "ci-x64-Debug-GNUC-asan", + "hidden": true, + "inherits": "ci-x64-Debug-GNUC", + "cacheVariables": { + "USE_SANITIZER": "Address", + "HDF5_ENABLE_SANITIZERS": "ON" + } + }, + { + "name": "ci-x64-Debug-GNUC-tsan", + "hidden": true, + "inherits": "ci-x64-Debug-GNUC", + "cacheVariables": { + "USE_SANITIZER": "Thread", + "HDF5_ENABLE_SANITIZERS": "ON" + } + }, + { + "name": "ci-x64-Debug-GNUC-lsan", + "hidden": true, + "inherits": "ci-x64-Debug-GNUC", + "cacheVariables": { + "USE_SANITIZER": "Leak", + "HDF5_ENABLE_SANITIZERS": "ON" + } + }, + { + "name": "ci-x64-Debug-GNUC-ubsan", + "hidden": true, + "inherits": "ci-x64-Debug-GNUC", + "cacheVariables": { + "USE_SANITIZER": "Undefined", + "HDF5_ENABLE_SANITIZERS": "ON" + } + } + ], + "buildPresets": [ + { + "name": "ci-base", + "configurePreset": "ci-base", + "hidden": true, + "verbose": true, + "jobs": 8 + }, + { + "name": "ci-x64-Debug-MSVC", + "configurePreset": "ci-x64-Debug-MSVC", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-x64-Release-MSVC", + "configurePreset": "ci-x64-Release-MSVC", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-x64-Debug-Clang", + "configurePreset": "ci-x64-Debug-Clang", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-x64-Release-Clang", + "configurePreset": "ci-x64-Release-Clang", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-x64-Debug-GNUC", + "configurePreset": "ci-x64-Debug-GNUC", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-x64-Release-GNUC", + "configurePreset": "ci-x64-Release-GNUC", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-x64-Debug-MSVC-asan", + "configurePreset": "ci-x64-Debug-MSVC-asan", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-x64-Debug-GNUC-asan", + "configurePreset": "ci-x64-Debug-GNUC-asan", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-x64-Debug-GNUC-tsan", + "configurePreset": "ci-x64-Debug-GNUC-tsan", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-x64-Debug-GNUC-lsan", + "configurePreset": "ci-x64-Debug-GNUC-lsan", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-x64-Debug-GNUC-ubsan", + "configurePreset": "ci-x64-Debug-GNUC-ubsan", + "hidden": true, + "inherits": [ + "ci-base" + ] + } + ], + "testPresets": [ + { + "name": "ci-base", + "configurePreset": "ci-base", + "output": { + "outputOnFailure": false, + "shortProgress": true, + "verbosity": "verbose" + }, + "hidden": true, + "execution": { + "noTestsAction": "error", + "timeout": 180, + "jobs": 8 + } + }, + { + "name": "ci-x64-Debug-MSVC", + "configurePreset": "ci-x64-Debug-MSVC", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-x64-Release-MSVC", + "configurePreset": "ci-x64-Release-MSVC", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-x64-Debug-Clang", + "configurePreset": "ci-x64-Debug-Clang", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-x64-Release-Clang", + "configurePreset": "ci-x64-Release-Clang", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-x64-Debug-GNUC", + "configurePreset": "ci-x64-Debug-GNUC", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-x64-Release-GNUC", + "configurePreset": "ci-x64-Release-GNUC", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-x64-Debug-MSVC-asan", + "configurePreset": "ci-x64-Debug-MSVC-asan", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-x64-Debug-GNUC-asan", + "configurePreset": "ci-x64-Debug-GNUC-asan", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-x64-Debug-GNUC-tsan", + "configurePreset": "ci-x64-Debug-GNUC-tsan", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-x64-Debug-GNUC-lsan", + "configurePreset": "ci-x64-Debug-GNUC-lsan", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-x64-Debug-GNUC-ubsan", + "configurePreset": "ci-x64-Debug-GNUC-ubsan", + "inherits": [ + "ci-base" + ] + } + ], + "packagePresets": [ + { + "name": "ci-base", + "hidden": true, + "output": { + "verbose": true + } + }, + { + "name": "ci-x64-Release-MSVC", + "configurePreset": "ci-x64-Release-MSVC", + "hidden": true, + "inherits": "ci-base", + "generators": [ + "ZIP" + ] + }, + { + "name": "ci-x64-Release-Clang", + "configurePreset": "ci-x64-Release-Clang", + "hidden": true, + "inherits": "ci-base", + "generators": [ + "TGZ" + ] + }, + { + "name": "ci-x64-Release-GNUC", + "configurePreset": "ci-x64-Release-GNUC", + "hidden": true, + "inherits": "ci-base", + "generators": [ + "TGZ" + ] + } + ] +} \ No newline at end of file diff --git a/config/cmake/ConfigureChecks.cmake b/config/cmake/ConfigureChecks.cmake index 869f5ea3c2c..b768928498d 100644 --- a/config/cmake/ConfigureChecks.cmake +++ b/config/cmake/ConfigureChecks.cmake @@ -131,7 +131,7 @@ CHECK_INCLUDE_FILE_CONCAT ("netdb.h" ${HDF_PREFIX}_HAVE_NETDB_H) CHECK_INCLUDE_FILE_CONCAT ("arpa/inet.h" ${HDF_PREFIX}_HAVE_ARPA_INET_H) if (WINDOWS) CHECK_INCLUDE_FILE_CONCAT ("shlwapi.h" ${HDF_PREFIX}_HAVE_SHLWAPI_H) - # Checking for StrStrIA in the library is not relaible for mingw32 to stdcall + # Checking for StrStrIA in the library is not reliable for mingw32 to stdcall set (LINK_LIBS ${LINK_LIBS} "shlwapi") endif () @@ -826,7 +826,6 @@ if (HDF5_BUILD_FORTRAN) message (FATAL_ERROR "Compilation of C ${FUNCTION_NAME} - Failed") endif () endmacro () - set (PROG_SRC " #include \n\ diff --git a/release_docs/INSTALL_CMake.txt b/release_docs/INSTALL_CMake.txt index b9d8338a56d..ac048550094 100644 --- a/release_docs/INSTALL_CMake.txt +++ b/release_docs/INSTALL_CMake.txt @@ -14,6 +14,7 @@ Section VI: CMake option defaults for HDF5 Section VII: User Defined Options for HDF5 Libraries with CMake Section VIII: User Defined Compile Flags for HDF5 Libraries with CMake Section IX: Considerations for cross-compiling +Section X: Using CMakePresets.json for compiling ************************************************************************ @@ -209,10 +210,10 @@ Notes: This short set of instructions is written for users who want to 5. Configure the C library, tools and tests with one of the following commands: On Windows 32 bit - cmake -G "Visual Studio 12 2013" -DCMAKE_BUILD_TYPE:STRING=Release -DBUILD_SHARED_LIBS:BOOL=OFF -DBUILD_TESTING:BOOL=ON -DHDF5_BUILD_TOOLS:BOOL=ON ..\hdf5-1.15."X" + cmake -G "Visual Studio 16 2019" -A Win32 -DCMAKE_BUILD_TYPE:STRING=Release -DBUILD_SHARED_LIBS:BOOL=OFF -DBUILD_TESTING:BOOL=ON -DHDF5_BUILD_TOOLS:BOOL=ON ..\hdf5-1.15."X" On Windows 64 bit - cmake -G "Visual Studio 12 2013 Win64" -DCMAKE_BUILD_TYPE:STRING=Release -DBUILD_SHARED_LIBS:BOOL=OFF -DBUILD_TESTING:BOOL=ON -DHDF5_BUILD_TOOLS:BOOL=ON ..\hdf5-1.15."X" + cmake -G "Visual Studio 16 2019 Win64" -A x64 -DCMAKE_BUILD_TYPE:STRING=Release -DBUILD_SHARED_LIBS:BOOL=OFF -DBUILD_TESTING:BOOL=ON -DHDF5_BUILD_TOOLS:BOOL=ON ..\hdf5-1.15."X" On Linux and Mac cmake -G "Unix Makefiles" -DCMAKE_BUILD_TYPE:STRING=Release -DBUILD_SHARED_LIBS:BOOL=OFF -DBUILD_TESTING:BOOL=ON -DHDF5_BUILD_TOOLS:BOOL=ON ../hdf5-1.15."X" @@ -1042,6 +1043,99 @@ The HDF5 CMake variables; HDF5_USE_PREGEN: set this to true HDF5_USE_PREGEN_DIR: set this path to the preset H5Tinit.c file + +======================================================================== +X: Using CMakePresets.json for compiling +======================================================================== + +One problem that CMake users often face is sharing settings with other people for common +ways to configure a project. This may be done to support CI builds, or for users who +frequently use the same build. CMake supports two main files, CMakePresets.json and CMakeUserPresets.json, +that allow users to specify common configure options and share them with others. CMake also supports +files included with the include field. + +CMakePresets.json and CMakeUserPresets.json live in the project's root directory. They +both have exactly the same format, and both are optional (though at least one must be +present if --preset is specified). CMakePresets.json is meant to specify project-wide build +details, while CMakeUserPresets.json is meant for developers to specify their own local build details. + +See CMake documentation for details: https://cmake.org/cmake/help/latest/manual/cmake-presets.7.html + +HDF-provided CMakePresets.json +------------------------------- +The CMakePresets.json provided by HDF requires CMake version 3.25, which supports package +and workflow presets, and ninja build system. The top-level configuration group is intended to be +a standard set of options to produce a package of shared and staic libraries and tools. Other configurations +used for inheriting settings are in the included json file in "config/cmake-presets/hidden-presets.json". + +Available configurations presets can be displayed by executing: + cmake -S --list-presets + +Using individual command presets (where is GNUC or MSVC or Clang): + change directory to the hdf5 source folder + cmake --presets=ci-StdShar- + cmake --build --presets=ci-StdShar- + ctest --presets=ci-StdShar- + cpack --presets=ci-StdShar- + + +Using the workflow preset to configure, build, test and package the standard configuration is: + change directory to the hdf5 source folder + execute "cmake --workflow --presets=ci-StdShar- --fresh" + where is GNUC or MSVC or Clang + +Creating your own configurations +-------------------------------- +The quickest way is to copy CMakePresets.json to CMakeUserPresets.json and +edit CMakeUserPresets.json configuration names from ci-* to my-*. Change the +"configurePresets" section "inherits" field only for those that you have alternate +options. Then change the "configurePreset" field entries in the "buildPresets", +"testPresets", "packagePresets" sections to match your my-StdShar-. +And finally the names settings in the "workflowPresets" steps will also need the ci-* to my-* change. + +For instance, to change the support files to use a local directory, edit CMakeUserPresets.json: +...... + { + "name": "my-base-tgz", + "hidden": true, + "inherits": "ci-base", + "cacheVariables": { + "HDF5_ALLOW_EXTERNAL_SUPPORT": {"type": "STRING", "value": "TGZ"}, + "TGZPATH": {"type": "STRING", "value": "${sourceParentDir}/temp"} + } + }, + { + "name": "my-StdCompression", + "hidden": true, + "inherits": "my-base-tgz", + "cacheVariables": { +...... + { + "name": "my-StdShar", + "hidden": true, + "inherits": "my-StdCompression", + "cacheVariables": { +...... + { + "name": "my-StdShar-GNUC", + "description": "GNUC Standard Config for x64 (Release)", + "inherits": [ + "ci-x64-Release-GNUC", + "ci-CPP", + "ci-Fortran", + "ci-Java", + "my-StdShar", + "my-StdExamples" + ] + } +...... + + +Then you can change or add options for your specific case. + + + + ======================================================================== For further assistance, send email to help@hdfgroup.org ======================================================================== diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 34ca003a338..773dbde3a13 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -47,6 +47,15 @@ New Features Configuration: ------------- + - Added support for CMake presets file. + + CMake supports two main files, CMakePresets.json and CMakeUserPresets.json, + that allow users to specify common configure options and share them with others. + HDF added a CMakePresets.json file of a typical configuration and support + file, config/cmake-presets/hidden-presets.json. + Also added a section to INSTALL_CMake.txt with very basic explanation of the + process to use CMakePresets. + - Deprecated and removed old SZIP library in favor of LIBAEC library LIBAEC library has been used in HDF5 binaries as the szip library of choice From fe25568b4dcc1ccb53a640a98bdc71a6e134bcbf Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Wed, 26 Apr 2023 17:57:22 -0500 Subject: [PATCH 168/231] Fix v1 object header gap bug in H5Ocopy (#2785) --- release_docs/RELEASE.txt | 12 ++++++++ src/H5Ocopy.c | 9 ++++-- test/objcopy.c | 64 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 83 insertions(+), 2 deletions(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 773dbde3a13..ad51ff1e734 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -160,6 +160,18 @@ Bug Fixes since HDF5-1.14.0 release =================================== Library ------- + - Fixed a bug in H5Ocopy that could generate invalid HDF5 files + + H5Ocopy was missing a check to determine whether the new object's + object header version is greater than version 1. Without this check, + copying of objects with object headers that are smaller than a + certain size would cause H5Ocopy to create an object header for the + new object that has a gap in the header data. According to the + HDF5 File Format Specification, this is not allowed for version + 1 of the object header format. + + Fixes GitHub issue #2653 + - Fixed H5Pget_vol_cap_flags and H5Pget_vol_id to accept H5P_DEFAULT H5Pget_vol_cap_flags and H5Pget_vol_id were updated to correctly diff --git a/src/H5Ocopy.c b/src/H5Ocopy.c index 926d0da642a..9852d1f62fa 100644 --- a/src/H5Ocopy.c +++ b/src/H5Ocopy.c @@ -532,10 +532,15 @@ H5O__copy_header_real(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out*/, H5 HDassert((oh_dst->flags & H5O_HDR_CHUNK0_SIZE) == H5O_HDR_CHUNK0_1); /* Determine whether to create gap or NULL message */ - if (delta < H5O_SIZEOF_MSGHDR_OH(oh_dst)) + if ((oh_dst->version > H5O_VERSION_1) && (delta < H5O_SIZEOF_MSGHDR_OH(oh_dst))) dst_oh_gap = delta; - else + else { + /* NULL message must be at least size of message header */ + if (delta < H5O_SIZEOF_MSGHDR_OH(oh_dst)) + delta = H5O_SIZEOF_MSGHDR_OH(oh_dst); + dst_oh_null = delta; + } /* Increase destination object header size */ dst_oh_size += delta; diff --git a/test/objcopy.c b/test/objcopy.c index 012c81d808f..eac99e0e811 100644 --- a/test/objcopy.c +++ b/test/objcopy.c @@ -16397,6 +16397,68 @@ test_copy_iterate_cb(hid_t loc_id, const char *name, const H5L_info2_t H5_ATTR_U return (H5_ITER_ERROR); } /* end test_copy_iterate_cb */ +/* + * Test for a bug with copying of v1 object headers where the + * new object header would end up with a gap in the header data, + * which v1 object header shouldn't have. + */ +static int +test_copy_cdt_v1_header_bug(hid_t fcpl_src, hid_t src_fapl) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t type_id = H5I_INVALID_HID; + hid_t ocpypl_id = H5I_INVALID_HID; + char src_filename[NAME_BUF_SIZE]; + + TESTING("H5Ocopy(): bug with copying v1 object headers"); + + /* Initialize the filenames */ + h5_fixname(FILENAME[0], src_fapl, src_filename, sizeof src_filename); + + if ((file_id = H5Fcreate(src_filename, H5F_ACC_TRUNC, fcpl_src, src_fapl)) < 0) + TEST_ERROR; + + if ((type_id = H5Tcreate(H5T_STRING, 385)) < 0) + TEST_ERROR; + if (H5Tset_strpad(type_id, H5T_STR_NULLPAD) < 0) + TEST_ERROR; + if (H5Tset_cset(type_id, H5T_CSET_ASCII) < 0) + TEST_ERROR; + + if (H5Tcommit2(file_id, "committed_str_type", type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + + if ((ocpypl_id = H5Pcreate(H5P_OBJECT_COPY)) < 0) + TEST_ERROR; + if (H5Pset_copy_object(ocpypl_id, H5O_COPY_WITHOUT_ATTR_FLAG) < 0) + TEST_ERROR; + + if (H5Ocopy(file_id, "committed_str_type", file_id, "committed_str_type2", ocpypl_id, H5P_DEFAULT) < 0) + TEST_ERROR; + + if (H5Tclose(type_id) < 0) + TEST_ERROR; + if (H5Pclose(ocpypl_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Tclose(type_id); + H5Pclose(ocpypl_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + static int test_copy_iterate(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t dst_fapl) { @@ -17577,6 +17639,8 @@ main(void) nerrors += test_copy_null_ref(fcpl_src, fcpl_dst, src_fapl, dst_fapl); nerrors += test_copy_null_ref_open(fcpl_src, fcpl_dst, src_fapl, dst_fapl); + nerrors += test_copy_cdt_v1_header_bug(fcpl_src, src_fapl); + nerrors += test_copy_iterate(fcpl_src, fcpl_dst, src_fapl, dst_fapl); } /* end if */ From 2a2a96f4afd8bde782d3fb442566861d7605eb5b Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Wed, 26 Apr 2023 17:39:42 -0700 Subject: [PATCH 169/231] Harden superblock cache deserialization (#2809) --- src/H5Fsuper_cache.c | 360 ++++++++++++++++++------------------------- 1 file changed, 146 insertions(+), 214 deletions(-) diff --git a/src/H5Fsuper_cache.c b/src/H5Fsuper_cache.c index 467e2875ac2..7dbaf22fae1 100644 --- a/src/H5Fsuper_cache.c +++ b/src/H5Fsuper_cache.c @@ -13,10 +13,8 @@ /*------------------------------------------------------------------------- * * Created: H5Fsuper_cache.c - * Aug 15 2009 - * Quincey Koziol * - * Purpose: Implement file superblock & driver info metadata cache methods. + * Purpose: Implement file superblock & driver info metadata cache methods * *------------------------------------------------------------------------- */ @@ -76,10 +74,10 @@ static herr_t H5F__cache_drvrinfo_serialize(const H5F_t *f, void *image, size_t static herr_t H5F__cache_drvrinfo_free_icr(void *thing); /* Local encode/decode routines */ -static herr_t H5F__superblock_prefix_decode(H5F_super_t *sblock, const uint8_t **image_ref, +static herr_t H5F__superblock_prefix_decode(H5F_super_t *sblock, const uint8_t **image_ref, size_t len, const H5F_superblock_cache_ud_t *udata, hbool_t extend_eoa); static herr_t H5F__drvrinfo_prefix_decode(H5O_drvinfo_t *drvinfo, char *drv_name, const uint8_t **image_ref, - H5F_drvrinfo_cache_ud_t *udata, hbool_t extend_eoa); + size_t len, H5F_drvrinfo_cache_ud_t *udata, hbool_t extend_eoa); /*********************/ /* Package Variables */ @@ -135,25 +133,21 @@ H5FL_EXTERN(H5F_super_t); /*------------------------------------------------------------------------- * Function: H5F__superblock_prefix_decode * - * Purpose: Decode a superblock prefix - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * December 15, 2016 + * Purpose: Decode a superblock prefix * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t -H5F__superblock_prefix_decode(H5F_super_t *sblock, const uint8_t **image_ref, +H5F__superblock_prefix_decode(H5F_super_t *sblock, const uint8_t **image_ref, size_t len, const H5F_superblock_cache_ud_t *udata, hbool_t extend_eoa) { const uint8_t *image = (const uint8_t *)*image_ref; /* Pointer into raw data buffer */ - htri_t ret_value = SUCCEED; /* Return value */ + const uint8_t *end = image + len - 1; /* Pointer to end of buffer */ + htri_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - /* Check arguments */ HDassert(sblock); HDassert(image_ref); HDassert(image); @@ -161,27 +155,37 @@ H5F__superblock_prefix_decode(H5F_super_t *sblock, const uint8_t **image_ref, HDassert(udata->f); /* Skip over signature (already checked when locating the superblock) */ + if (H5_IS_BUFFER_OVERFLOW(image, H5F_SIGNATURE_LEN, end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); image += H5F_SIGNATURE_LEN; /* Superblock version */ + if (H5_IS_BUFFER_OVERFLOW(image, 1, end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); sblock->super_vers = *image++; if (sblock->super_vers > HDF5_SUPERBLOCK_VERSION_LATEST) HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "bad superblock version number") - /* Sanity check */ - HDassert(((size_t)(image - (const uint8_t *)*image_ref)) == H5F_SUPERBLOCK_FIXED_SIZE); + /* Size check */ + if (((size_t)(image - (const uint8_t *)*image_ref)) != H5F_SUPERBLOCK_FIXED_SIZE) + HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "bad superblock (fixed) size") /* Determine the size of addresses & size of offsets, for computing the * variable-sized portion of the superblock. */ if (sblock->super_vers < HDF5_SUPERBLOCK_VERSION_2) { + if (H5_IS_BUFFER_OVERFLOW(image, 6, end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); sblock->sizeof_addr = image[4]; sblock->sizeof_size = image[5]; - } /* end if */ + } else { + if (H5_IS_BUFFER_OVERFLOW(image, 2, end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); sblock->sizeof_addr = image[0]; sblock->sizeof_size = image[1]; - } /* end else */ + } + if (sblock->sizeof_addr != 2 && sblock->sizeof_addr != 4 && sblock->sizeof_addr != 8 && sblock->sizeof_addr != 16 && sblock->sizeof_addr != 32) HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "bad byte number in an address") @@ -196,12 +200,13 @@ H5F__superblock_prefix_decode(H5F_super_t *sblock, const uint8_t **image_ref, /* Determine the size of the variable-length part of the superblock */ variable_size = (size_t)H5F_SUPERBLOCK_VARLEN_SIZE(sblock->super_vers, sblock->sizeof_addr, sblock->sizeof_size); - HDassert(variable_size > 0); + if (variable_size == 0) + HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "variable size can't be zero") /* Make certain we can read the variable-sized portion of the superblock */ if (H5F__set_eoa(udata->f, H5FD_MEM_SUPER, (haddr_t)(H5F_SUPERBLOCK_FIXED_SIZE + variable_size)) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "set end of space allocation request failed") - } /* end if */ + } /* Update the image buffer pointer */ *image_ref = image; @@ -211,28 +216,24 @@ H5F__superblock_prefix_decode(H5F_super_t *sblock, const uint8_t **image_ref, } /* end H5F__superblock_prefix_decode() */ /*------------------------------------------------------------------------- - * Function: H5F__drvrinfo_prefix_decode - * - * Purpose: Decode a driver info prefix + * Function: H5F__drvrinfo_prefix_decode * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * December 15, 2016 + * Purpose: Decode a driver info prefix * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t -H5F__drvrinfo_prefix_decode(H5O_drvinfo_t *drvrinfo, char *drv_name, const uint8_t **image_ref, +H5F__drvrinfo_prefix_decode(H5O_drvinfo_t *drvrinfo, char *drv_name, const uint8_t **image_ref, size_t len, H5F_drvrinfo_cache_ud_t *udata, hbool_t extend_eoa) { const uint8_t *image = (const uint8_t *)*image_ref; /* Pointer into raw data buffer */ + const uint8_t *end = image + len - 1; /* Pointer to end of buffer */ unsigned drv_vers; /* Version of driver info block */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - /* Sanity check */ HDassert(drvrinfo); HDassert(image_ref); HDassert(image); @@ -240,21 +241,30 @@ H5F__drvrinfo_prefix_decode(H5O_drvinfo_t *drvrinfo, char *drv_name, const uint8 HDassert(udata->f); /* Version number */ + if (H5_IS_BUFFER_OVERFLOW(image, 1, end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); drv_vers = *image++; if (drv_vers != HDF5_DRIVERINFO_VERSION_0) HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "bad driver information block version number") - image += 3; /* reserved bytes */ + /* Reserved bytes */ + if (H5_IS_BUFFER_OVERFLOW(image, 3, end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); + image += 3; /* Driver info size */ + if (H5_IS_BUFFER_OVERFLOW(image, 4, end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT32DECODE(image, drvrinfo->len); /* Driver name and/or version */ if (drv_name) { + if (H5_IS_BUFFER_OVERFLOW(image, 8, end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); H5MM_memcpy(drv_name, (const char *)image, (size_t)8); drv_name[8] = '\0'; image += 8; /* advance past name/version */ - } /* end if */ + } /* Extend the EOA if required so that we can read the complete driver info block */ if (extend_eoa) { @@ -273,7 +283,7 @@ H5F__drvrinfo_prefix_decode(H5O_drvinfo_t *drvrinfo, char *drv_name, const uint8 if (H5F_addr_gt(min_eoa, eoa)) if (H5FD_set_eoa(udata->f->shared->lf, H5FD_MEM_SUPER, min_eoa) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "set end of space allocation request failed") - } /* end if */ + } /* Update the image buffer pointer */ *image_ref = image; @@ -285,13 +295,9 @@ H5F__drvrinfo_prefix_decode(H5O_drvinfo_t *drvrinfo, char *drv_name, const uint8 /*------------------------------------------------------------------------- * Function: H5F__cache_superblock_get_initial_load_size * - * Purpose: Compute the size of the data structure on disk. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * July 17, 2013 + * Purpose: Compute the size of the data structure on disk * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t @@ -299,7 +305,6 @@ H5F__cache_superblock_get_initial_load_size(void H5_ATTR_UNUSED *_udata, size_t { FUNC_ENTER_PACKAGE_NOERR - /* Check arguments */ HDassert(image_len); /* Set the initial image length size */ @@ -312,27 +317,22 @@ H5F__cache_superblock_get_initial_load_size(void H5_ATTR_UNUSED *_udata, size_t /*------------------------------------------------------------------------- * Function: H5F__cache_superblock_get_final_load_size * - * Purpose: Compute the final size of the data structure on disk. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * November 17, 2016 + * Purpose: Compute the final size of the data structure on disk * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t -H5F__cache_superblock_get_final_load_size(const void *_image, size_t H5_ATTR_NDEBUG_UNUSED image_len, - void *_udata, size_t *actual_len) +H5F__cache_superblock_get_final_load_size(const void *_image, size_t image_len, void *_udata, + size_t *actual_len) { const uint8_t *image = _image; /* Pointer into raw data buffer */ H5F_superblock_cache_ud_t *udata = (H5F_superblock_cache_ud_t *)_udata; /* User data */ H5F_super_t sblock; /* Temporary file superblock */ - htri_t ret_value = SUCCEED; /* Return value */ + htri_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - /* Check arguments */ HDassert(image); HDassert(udata); HDassert(actual_len); @@ -340,7 +340,7 @@ H5F__cache_superblock_get_final_load_size(const void *_image, size_t H5_ATTR_NDE HDassert(image_len >= H5F_SUPERBLOCK_FIXED_SIZE + 6); /* Deserialize the file superblock's prefix */ - if (H5F__superblock_prefix_decode(&sblock, &image, udata, TRUE) < 0) + if (H5F__superblock_prefix_decode(&sblock, &image, image_len, udata, TRUE) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTDECODE, FAIL, "can't decode file superblock prefix") /* Save the version to be used in verify_chksum callback */ @@ -357,14 +357,11 @@ H5F__cache_superblock_get_final_load_size(const void *_image, size_t H5_ATTR_NDE /*------------------------------------------------------------------------- * Function: H5F__cache_superblock_verify_chksum * - * Purpose: Verify the computed checksum of the data structure is the - * same as the stored chksum. - * - * Return: Success: TRUE/FALSE - * Failure: Negative - * - * Programmer: Vailin Choi; Aug 2015 + * Purpose: Verify the computed checksum of the data structure is the + * same as the stored chksum. * + * Return: Success: TRUE/FALSE + * Failure: Negative *------------------------------------------------------------------------- */ static htri_t @@ -372,13 +369,12 @@ H5F__cache_superblock_verify_chksum(const void *_image, size_t len, void *_udata { const uint8_t *image = _image; /* Pointer into raw data buffer */ H5F_superblock_cache_ud_t *udata = (H5F_superblock_cache_ud_t *)_udata; /* User data */ - uint32_t stored_chksum; /* Stored metadata checksum value */ - uint32_t computed_chksum; /* Computed metadata checksum value */ - htri_t ret_value = TRUE; /* Return value */ + uint32_t stored_chksum; /* Stored metadata checksum value */ + uint32_t computed_chksum; /* Computed metadata checksum value */ + htri_t ret_value = TRUE; FUNC_ENTER_PACKAGE_NOERR - /* Check arguments */ HDassert(image); HDassert(udata); @@ -390,36 +386,31 @@ H5F__cache_superblock_verify_chksum(const void *_image, size_t len, void *_udata if (stored_chksum != computed_chksum) ret_value = FALSE; - } /* end if */ + } FUNC_LEAVE_NOAPI(ret_value) } /* end H5F__cache_superblock_verify_chksum() */ /*------------------------------------------------------------------------- - * Function: H5F__cache_superblock_deserialize - * - * Purpose: Loads an object from the disk. + * Function: H5F__cache_superblock_deserialize * - * Return: Success: Pointer to new object - * Failure: NULL - * - * Programmer: Quincey Koziol - * July 18 2013 + * Purpose: Load an object from the disk * + * Return: Success: Pointer to new object + * Failure: NULL *------------------------------------------------------------------------- */ static void * -H5F__cache_superblock_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUSED len, void *_udata, - hbool_t H5_ATTR_UNUSED *dirty) +H5F__cache_superblock_deserialize(const void *_image, size_t len, void *_udata, hbool_t H5_ATTR_UNUSED *dirty) { H5F_super_t *sblock = NULL; /* File's superblock */ H5F_superblock_cache_ud_t *udata = (H5F_superblock_cache_ud_t *)_udata; /* User data */ - const uint8_t *image = _image; /* Pointer into raw data buffer */ - H5F_super_t *ret_value = NULL; /* Return value */ + const uint8_t *image = _image; /* Pointer into raw data buffer */ + const uint8_t *end = image + len - 1; /* Pointer to end of buffer */ + H5F_super_t *ret_value = NULL; FUNC_ENTER_PACKAGE - /* Check arguments */ HDassert(image); HDassert(udata); HDassert(udata->f); @@ -430,11 +421,9 @@ H5F__cache_superblock_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUS HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") /* Deserialize the file superblock's prefix */ - if (H5F__superblock_prefix_decode(sblock, &image, udata, FALSE) < 0) + if (H5F__superblock_prefix_decode(sblock, &image, len, udata, FALSE) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTDECODE, NULL, "can't decode file superblock prefix") - const uint8_t *image_end = image + len - 1; - /* Check for older version of superblock format */ if (sblock->super_vers < HDF5_SUPERBLOCK_VERSION_2) { uint32_t status_flags; /* File status flags */ @@ -442,122 +431,113 @@ H5F__cache_superblock_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUS unsigned snode_btree_k; /* B-tree symbol table internal node 'K' value */ unsigned chunk_btree_k; /* B-tree chunk internal node 'K' value */ - /* Check whether the image pointer is out of bounds */ - if (H5_IS_BUFFER_OVERFLOW(image, 1, image_end)) - HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") - /* Freespace version (hard-wired) */ + if (H5_IS_BUFFER_OVERFLOW(image, 1, end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") if (HDF5_FREESPACE_VERSION != *image++) HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad free space version number") - /* Check whether the image pointer is out of bounds */ - if (H5_IS_BUFFER_OVERFLOW(image, 1, image_end)) - HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") - /* Root group version number (hard-wired) */ + if (H5_IS_BUFFER_OVERFLOW(image, 1, end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") if (HDF5_OBJECTDIR_VERSION != *image++) HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad object directory version number") /* Skip over reserved byte */ - image++; - - /* Check whether the image pointer is out of bounds */ - if (H5_IS_BUFFER_OVERFLOW(image, 1, image_end)) + if (H5_IS_BUFFER_OVERFLOW(image, 1, end)) HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") + image++; /* Shared header version number (hard-wired) */ + if (H5_IS_BUFFER_OVERFLOW(image, 1, end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") if (HDF5_SHAREDHEADER_VERSION != *image++) HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad shared-header format version number") /* Skip over size of file addresses (already decoded) */ + if (H5_IS_BUFFER_OVERFLOW(image, 1, end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") image++; udata->f->shared->sizeof_addr = sblock->sizeof_addr; /* Keep a local copy also */ /* Skip over size of file sizes (already decoded) */ + if (H5_IS_BUFFER_OVERFLOW(image, 1, end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") image++; udata->f->shared->sizeof_size = sblock->sizeof_size; /* Keep a local copy also */ /* Skip over reserved byte */ - image++; - - /* Check whether the image pointer is out of bounds */ - if (H5_IS_BUFFER_OVERFLOW(image, sizeof(uint16_t), image_end)) + if (H5_IS_BUFFER_OVERFLOW(image, 1, end)) HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") + image++; /* Various B-tree sizes */ + if (H5_IS_BUFFER_OVERFLOW(image, 2, end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") UINT16DECODE(image, sym_leaf_k); if (sym_leaf_k == 0) HGOTO_ERROR(H5E_FILE, H5E_BADRANGE, NULL, "bad symbol table leaf node 1/2 rank") udata->sym_leaf_k = sym_leaf_k; /* Keep a local copy also */ - /* Check whether the image pointer is out of bounds */ - if (H5_IS_BUFFER_OVERFLOW(image, sizeof(uint16_t), image_end)) - HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") - /* Need 'get' call to set other array values */ + if (H5_IS_BUFFER_OVERFLOW(image, 2, end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") UINT16DECODE(image, snode_btree_k); if (snode_btree_k == 0) HGOTO_ERROR(H5E_FILE, H5E_BADRANGE, NULL, "bad 1/2 rank for btree internal nodes") udata->btree_k[H5B_SNODE_ID] = snode_btree_k; - /* - * Delay setting the value in the property list until we've checked + /* Delay setting the value in the property list until we've checked * for the indexed storage B-tree internal 'K' value later. */ - /* Check whether the image pointer is out of bounds */ - if (H5_IS_BUFFER_OVERFLOW(image, sizeof(uint32_t), image_end)) - HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") - /* File status flags (not really used yet) */ + if (H5_IS_BUFFER_OVERFLOW(image, 4, end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") UINT32DECODE(image, status_flags); - HDassert(status_flags <= 255); + if (status_flags > 255) + HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad superblock status flags") sblock->status_flags = (uint8_t)status_flags; if (sblock->status_flags & ~H5F_SUPER_ALL_FLAGS) HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad flag value for superblock") - /* - * If the superblock version # is greater than 0, read in the indexed + /* If the superblock version # is greater than 0, read in the indexed * storage B-tree internal 'K' value */ if (sblock->super_vers > HDF5_SUPERBLOCK_VERSION_DEF) { - /* Check whether the image pointer is out of bounds */ - if (H5_IS_BUFFER_OVERFLOW(image, sizeof(uint16_t), image_end)) + if (H5_IS_BUFFER_OVERFLOW(image, 2, end)) HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") - UINT16DECODE(image, chunk_btree_k); /* Reserved bytes are present only in version 1 */ if (sblock->super_vers == HDF5_SUPERBLOCK_VERSION_1) { - image += 2; /* reserved */ - - /* Check whether the image pointer is out of bounds */ - if (H5_IS_BUFFER_OVERFLOW(image, 1, image_end)) + /* Reserved */ + if (H5_IS_BUFFER_OVERFLOW(image, 2, end)) HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") + image += 2; } - } /* end if */ + } else chunk_btree_k = HDF5_BTREE_CHUNK_IK_DEF; udata->btree_k[H5B_CHUNK_ID] = chunk_btree_k; - /* Check whether the image pointer will be out of bounds */ - if (H5_IS_BUFFER_OVERFLOW(image, H5F_SIZEOF_ADDR(udata->f) * 4, image_end)) - HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") - /* Remainder of "variable-sized" portion of superblock */ + if (H5_IS_BUFFER_OVERFLOW(image, H5F_sizeof_addr(udata->f) * 4, end)) + HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") H5F_addr_decode(udata->f, (const uint8_t **)&image, &sblock->base_addr /*out*/); H5F_addr_decode(udata->f, (const uint8_t **)&image, &sblock->ext_addr /*out*/); H5F_addr_decode(udata->f, (const uint8_t **)&image, &udata->stored_eof /*out*/); H5F_addr_decode(udata->f, (const uint8_t **)&image, &sblock->driver_addr /*out*/); /* Allocate space for the root group symbol table entry */ - HDassert(!sblock->root_ent); + if (sblock->root_ent) + HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "root entry should not exist yet") if (NULL == (sblock->root_ent = (H5G_entry_t *)H5MM_calloc(sizeof(H5G_entry_t)))) HGOTO_ERROR(H5E_FILE, H5E_CANTALLOC, NULL, "can't allocate space for root group symbol table entry") - /* decode the root group symbol table entry */ - if (H5G_ent_decode(udata->f, (const uint8_t **)&image, sblock->root_ent, image_end) < 0) + /* Decode the root group symbol table entry */ + if (H5G_ent_decode(udata->f, (const uint8_t **)&image, sblock->root_ent, end) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTDECODE, NULL, "can't decode root group symbol table entry") /* Set the root group address to the correct value */ @@ -572,11 +552,10 @@ H5F__cache_superblock_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUS /* Eliminate the driver info */ sblock->driver_addr = HADDR_UNDEF; udata->drvrinfo_removed = TRUE; - } /* end if */ + } /* NOTE: Driver info block is decoded separately, later */ - - } /* end if */ + } else { uint32_t read_chksum; /* Checksum read from file */ @@ -588,7 +567,7 @@ H5F__cache_superblock_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUS udata->f->shared->sizeof_size = sblock->sizeof_size; /* Keep a local copy also */ /* Check whether the image pointer is out of bounds */ - if (H5_IS_BUFFER_OVERFLOW(image, 1, image_end)) + if (H5_IS_BUFFER_OVERFLOW(image, 1, end)) HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") /* File status flags (not really used yet) */ @@ -597,7 +576,7 @@ H5F__cache_superblock_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUS HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad flag value for superblock") /* Check whether the image pointer will be out of bounds */ - if (H5_IS_BUFFER_OVERFLOW(image, H5F_SIZEOF_ADDR(udata->f) * 4, image_end)) + if (H5_IS_BUFFER_OVERFLOW(image, H5F_SIZEOF_ADDR(udata->f) * 4, end)) HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") /* Base, superblock extension, end of file & root group object header addresses */ @@ -609,7 +588,7 @@ H5F__cache_superblock_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUS /* checksum verification already done in verify_chksum cb */ /* Check whether the image pointer will be out of bounds */ - if (H5_IS_BUFFER_OVERFLOW(image, sizeof(uint32_t), image_end)) + if (H5_IS_BUFFER_OVERFLOW(image, sizeof(uint32_t), end)) HGOTO_ERROR(H5E_FILE, H5E_OVERFLOW, NULL, "image pointer is out of bounds") /* Decode checksum */ @@ -621,12 +600,12 @@ H5F__cache_superblock_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUS * any attempt to load the Driver Information Block. */ sblock->driver_addr = HADDR_UNDEF; - } /* end else */ + } - /* Sanity check */ - HDassert((size_t)(image - (const uint8_t *)_image) <= len); + /* Size check */ + if ((size_t)(image - (const uint8_t *)_image) > len) + HDONE_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad decoded superblock size") - /* Set return value */ ret_value = sblock; done: @@ -641,13 +620,9 @@ H5F__cache_superblock_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUS /*------------------------------------------------------------------------- * Function: H5F__cache_superblock_image_len * - * Purpose: Compute the size of the data structure on disk. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * July 19, 2013 + * Purpose: Compute the size of the data structure on disk * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t @@ -657,7 +632,6 @@ H5F__cache_superblock_image_len(const void *_thing, size_t *image_len) FUNC_ENTER_PACKAGE_NOERR - /* Check arguments */ HDassert(sblock); HDassert(sblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(sblock->cache_info.type == H5AC_SUPERBLOCK); @@ -672,13 +646,9 @@ H5F__cache_superblock_image_len(const void *_thing, size_t *image_len) /*------------------------------------------------------------------------- * Function: H5F__cache_superblock_serialize * - * Purpose: Flushes a dirty object to disk. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * July 19 2013 + * Purpose: Flush a dirty object to disk * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t @@ -687,11 +657,10 @@ H5F__cache_superblock_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNU H5F_super_t *sblock = (H5F_super_t *)_thing; /* Pointer to the object */ uint8_t *image = _image; /* Pointer into raw data buffer */ haddr_t rel_eof; /* Relative EOF for file */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - /* Sanity check */ HDassert(f); HDassert(image); HDassert(sblock); @@ -796,7 +765,7 @@ H5F__cache_superblock_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNU /* Sanity check */ HDassert((size_t)(image - (uint8_t *)_image) == (size_t)H5F_SUPERBLOCK_SIZE(sblock)); - } /* end else */ + } /* Sanity check */ HDassert((size_t)(image - (uint8_t *)_image) == len); @@ -808,29 +777,24 @@ H5F__cache_superblock_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNU /*------------------------------------------------------------------------- * Function: H5F__cache_superblock_free_icr * - * Purpose: Destroy/release an "in core representation" of a data + * Purpose: Destroy/release an "in core representation" of a data * structure * - * Note: The metadata cache sets the object's cache_info.magic to - * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr - * callback (checked in assert). - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * July 20, 2013 + * Note: The metadata cache sets the object's cache_info.magic to + * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr + * callback (checked in assert). * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t H5F__cache_superblock_free_icr(void *_thing) { H5F_super_t *sblock = (H5F_super_t *)_thing; /* Pointer to the object */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - /* Sanity check */ HDassert(sblock); HDassert(sblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC); HDassert(sblock->cache_info.type == H5AC_SUPERBLOCK); @@ -848,11 +812,7 @@ H5F__cache_superblock_free_icr(void *_thing) * * Purpose: Compute the initial size of the data structure on disk. * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * July 20, 2013 - * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t @@ -860,7 +820,6 @@ H5F__cache_drvrinfo_get_initial_load_size(void H5_ATTR_UNUSED *_udata, size_t *i { FUNC_ENTER_PACKAGE_NOERR - /* Check arguments */ HDassert(image_len); /* Set the initial image length size */ @@ -874,25 +833,20 @@ H5F__cache_drvrinfo_get_initial_load_size(void H5_ATTR_UNUSED *_udata, size_t *i * * Purpose: Compute the final size of the data structure on disk. * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * November 17, 2016 - * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t -H5F__cache_drvrinfo_get_final_load_size(const void *_image, size_t H5_ATTR_NDEBUG_UNUSED image_len, - void *_udata, size_t *actual_len) +H5F__cache_drvrinfo_get_final_load_size(const void *_image, size_t image_len, void *_udata, + size_t *actual_len) { const uint8_t *image = _image; /* Pointer into raw data buffer */ H5F_drvrinfo_cache_ud_t *udata = (H5F_drvrinfo_cache_ud_t *)_udata; /* User data */ H5O_drvinfo_t drvrinfo; /* Driver info */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - /* Check arguments */ HDassert(image); HDassert(udata); HDassert(actual_len); @@ -900,7 +854,7 @@ H5F__cache_drvrinfo_get_final_load_size(const void *_image, size_t H5_ATTR_NDEBU HDassert(image_len == H5F_DRVINFOBLOCK_HDR_SIZE); /* Deserialize the file driver info's prefix */ - if (H5F__drvrinfo_prefix_decode(&drvrinfo, NULL, &image, udata, TRUE) < 0) + if (H5F__drvrinfo_prefix_decode(&drvrinfo, NULL, &image, image_len, udata, TRUE) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTDECODE, FAIL, "can't decode file driver info prefix") /* Set the final size for the cache image */ @@ -913,29 +867,23 @@ H5F__cache_drvrinfo_get_final_load_size(const void *_image, size_t H5_ATTR_NDEBU /*------------------------------------------------------------------------- * Function: H5F__cache_drvrinfo_deserialize * - * Purpose: Loads an object from the disk. - * - * Return: Success: Pointer to a new driver info struct - * Failure: NULL - * - * Programmer: Quincey Koziol - * July 20 2013 + * Purpose: Loads an object from the disk * + * Return: Success: Pointer to a new driver info struct + * Failure: NULL *------------------------------------------------------------------------- */ static void * -H5F__cache_drvrinfo_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUSED len, void *_udata, - hbool_t H5_ATTR_UNUSED *dirty) +H5F__cache_drvrinfo_deserialize(const void *_image, size_t len, void *_udata, hbool_t H5_ATTR_UNUSED *dirty) { H5O_drvinfo_t *drvinfo = NULL; /* Driver info */ H5F_drvrinfo_cache_ud_t *udata = (H5F_drvrinfo_cache_ud_t *)_udata; /* User data */ const uint8_t *image = _image; /* Pointer into raw data buffer */ char drv_name[9]; /* Name of driver */ - H5O_drvinfo_t *ret_value = NULL; /* Return value */ + H5O_drvinfo_t *ret_value = NULL; FUNC_ENTER_PACKAGE - /* Sanity check */ HDassert(image); HDassert(len >= H5F_DRVINFOBLOCK_HDR_SIZE); HDassert(udata); @@ -946,7 +894,7 @@ H5F__cache_drvrinfo_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUSED HGOTO_ERROR(H5E_FILE, H5E_CANTALLOC, NULL, "memory allocation failed for driver info message") /* Deserialize the file driver info's prefix */ - if (H5F__drvrinfo_prefix_decode(drvinfo, drv_name, &image, udata, FALSE) < 0) + if (H5F__drvrinfo_prefix_decode(drvinfo, drv_name, &image, len, udata, FALSE) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTDECODE, NULL, "can't decode file driver info prefix") /* Sanity check */ @@ -959,7 +907,6 @@ H5F__cache_drvrinfo_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUSED /* Sanity check */ HDassert((size_t)(image - (const uint8_t *)_image) <= len); - /* Set return value */ ret_value = drvinfo; done: @@ -973,13 +920,9 @@ H5F__cache_drvrinfo_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUSED /*------------------------------------------------------------------------- * Function: H5F__cache_drvrinfo_image_len * - * Purpose: Compute the size of the data structure on disk. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * July 20, 2013 + * Purpose: Compute the size of the data structure on disk * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t @@ -989,7 +932,6 @@ H5F__cache_drvrinfo_image_len(const void *_thing, size_t *image_len) FUNC_ENTER_PACKAGE_NOERR - /* Check arguments */ HDassert(drvinfo); HDassert(drvinfo->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(drvinfo->cache_info.type == H5AC_DRVRINFO); @@ -1005,13 +947,9 @@ H5F__cache_drvrinfo_image_len(const void *_thing, size_t *image_len) /*------------------------------------------------------------------------- * Function: H5F__cache_drvrinfo_serialize * - * Purpose: Flushes a dirty object to disk. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * July 20 2013 + * Purpose: Flush a dirty object to disk * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t @@ -1020,11 +958,10 @@ H5F__cache_drvrinfo_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_NDEBU H5O_drvinfo_t *drvinfo = (H5O_drvinfo_t *)_thing; /* Pointer to the object */ uint8_t *image = _image; /* Pointer into raw data buffer */ uint8_t *dbuf; /* Pointer to beginning of driver info */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - /* check arguments */ HDassert(f); HDassert(image); HDassert(drvinfo); @@ -1061,18 +998,14 @@ H5F__cache_drvrinfo_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_NDEBU /*------------------------------------------------------------------------- * Function: H5F__cache_drvrinfo_free_icr * - * Purpose: Destroy/release an "in core representation" of a data + * Purpose: Destroy/release an "in core representation" of a data * structure * - * Note: The metadata cache sets the object's cache_info.magic to - * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr - * callback (checked in assert). - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * July 20, 2013 + * Note: The metadata cache sets the object's cache_info.magic to + * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr + * callback (checked in assert). * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t @@ -1082,7 +1015,6 @@ H5F__cache_drvrinfo_free_icr(void *_thing) FUNC_ENTER_PACKAGE_NOERR - /* Check arguments */ HDassert(drvinfo); HDassert(drvinfo->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC); HDassert(drvinfo->cache_info.type == H5AC_DRVRINFO); From 2e1b8571ce3015b630fee836f8c7f56cefd4e287 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Thu, 27 Apr 2023 11:52:11 -0500 Subject: [PATCH 170/231] Subfiling VFD - check if MPI is finalized during VFD termination (#2683) --- src/H5FDsubfiling/H5FDioc.c | 18 ++++++--- src/H5FDsubfiling/H5FDsubfiling.c | 42 +++++++++++++++----- src/H5FDsubfiling/H5subfiling_common.c | 55 ++++++++++++++++++++------ 3 files changed, 87 insertions(+), 28 deletions(-) diff --git a/src/H5FDsubfiling/H5FDioc.c b/src/H5FDsubfiling/H5FDioc.c index 2fd8b64104c..7d20021389b 100644 --- a/src/H5FDsubfiling/H5FDioc.c +++ b/src/H5FDsubfiling/H5FDioc.c @@ -887,16 +887,20 @@ H5FD__ioc_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxaddr) static herr_t H5FD__ioc_close_int(H5FD_ioc_t *file_ptr) { + int mpi_finalized; + int mpi_code; herr_t ret_value = SUCCEED; HDassert(file_ptr); + if (MPI_SUCCESS != (mpi_code = MPI_Finalized(&mpi_finalized))) + H5_SUBFILING_MPI_GOTO_ERROR(FAIL, "MPI_Finalized failed", mpi_code); + if (file_ptr->context_id >= 0) { subfiling_context_t *sf_context = H5_get_subfiling_object(file_ptr->context_id); - int mpi_code; /* Don't allow IOC threads to be finalized until everyone gets here */ - if (file_ptr->mpi_size > 1) + if (!mpi_finalized && (file_ptr->mpi_size > 1)) if (MPI_SUCCESS != (mpi_code = MPI_Barrier(file_ptr->comm))) H5_SUBFILING_MPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_code); @@ -911,10 +915,12 @@ H5FD__ioc_close_int(H5FD_ioc_t *file_ptr) file_ptr->context_id = -1; } - if (H5_mpi_comm_free(&file_ptr->comm) < 0) - H5_SUBFILING_GOTO_ERROR(H5E_VFL, H5E_CANTFREE, FAIL, "unable to free MPI Communicator"); - if (H5_mpi_info_free(&file_ptr->info) < 0) - H5_SUBFILING_GOTO_ERROR(H5E_VFL, H5E_CANTFREE, FAIL, "unable to free MPI Info object"); + if (!mpi_finalized) { + if (H5_mpi_comm_free(&file_ptr->comm) < 0) + H5_SUBFILING_GOTO_ERROR(H5E_VFL, H5E_CANTFREE, FAIL, "unable to free MPI Communicator"); + if (H5_mpi_info_free(&file_ptr->info) < 0) + H5_SUBFILING_GOTO_ERROR(H5E_VFL, H5E_CANTFREE, FAIL, "unable to free MPI Info object"); + } done: HDfree(file_ptr->file_path); diff --git a/src/H5FDsubfiling/H5FDsubfiling.c b/src/H5FDsubfiling/H5FDsubfiling.c index e0861908d10..64c92edd773 100644 --- a/src/H5FDsubfiling/H5FDsubfiling.c +++ b/src/H5FDsubfiling/H5FDsubfiling.c @@ -374,12 +374,29 @@ H5FD__subfiling_term(void) herr_t ret_value = SUCCEED; if (H5FD_SUBFILING_g >= 0) { + int mpi_finalized; int mpi_code; + /* + * Retrieve status of whether MPI has already been terminated. + * This can happen if an HDF5 ID is left unclosed and HDF5 + * shuts down after MPI_Finalize() is called in an application. + */ + if (MPI_SUCCESS != (mpi_code = MPI_Finalized(&mpi_finalized))) + H5_SUBFILING_MPI_GOTO_ERROR(FAIL, "MPI_Finalized failed", mpi_code); + /* Free RPC message MPI Datatype */ - if (H5_subfiling_rpc_msg_type != MPI_DATATYPE_NULL) - if (MPI_SUCCESS != (mpi_code = MPI_Type_free(&H5_subfiling_rpc_msg_type))) - H5_SUBFILING_MPI_GOTO_ERROR(FAIL, "MPI_Type_free failed", mpi_code); + if (H5_subfiling_rpc_msg_type != MPI_DATATYPE_NULL) { + if (!mpi_finalized) { + if (MPI_SUCCESS != (mpi_code = MPI_Type_free(&H5_subfiling_rpc_msg_type))) + H5_SUBFILING_MPI_GOTO_ERROR(FAIL, "MPI_Type_free failed", mpi_code); + } +#ifdef H5FD_SUBFILING_DEBUG + else + HDprintf("** WARNING **: HDF5 is terminating the Subfiling VFD after MPI_Finalize() was " + "called - an HDF5 ID was probably left unclosed\n"); +#endif + } /* Clean up resources */ if (H5_subfiling_terminate() < 0) @@ -1297,10 +1314,15 @@ H5FD__subfiling_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t ma static herr_t H5FD__subfiling_close_int(H5FD_subfiling_t *file_ptr) { + int mpi_finalized; + int mpi_code; herr_t ret_value = SUCCEED; HDassert(file_ptr); + if (MPI_SUCCESS != (mpi_code = MPI_Finalized(&mpi_finalized))) + H5_SUBFILING_MPI_GOTO_ERROR(FAIL, "MPI_Finalized failed", mpi_code); + if (file_ptr->sf_file && H5FD_close(file_ptr->sf_file) < 0) H5_SUBFILING_GOTO_ERROR(H5E_IO, H5E_CANTCLOSEFILE, FAIL, "unable to close subfile"); if (file_ptr->stub_file && H5FD_close(file_ptr->stub_file) < 0) @@ -1311,13 +1333,15 @@ H5FD__subfiling_close_int(H5FD_subfiling_t *file_ptr) H5_SUBFILING_GOTO_ERROR(H5E_VFL, H5E_ARGS, FAIL, "can't close IOC FAPL"); file_ptr->fa.ioc_fapl_id = H5I_INVALID_HID; - if (H5_mpi_comm_free(&file_ptr->comm) < 0) - H5_SUBFILING_GOTO_ERROR(H5E_VFL, H5E_CANTFREE, FAIL, "unable to free MPI Communicator"); - if (H5_mpi_info_free(&file_ptr->info) < 0) - H5_SUBFILING_GOTO_ERROR(H5E_VFL, H5E_CANTFREE, FAIL, "unable to free MPI Info object"); + if (!mpi_finalized) { + if (H5_mpi_comm_free(&file_ptr->comm) < 0) + H5_SUBFILING_GOTO_ERROR(H5E_VFL, H5E_CANTFREE, FAIL, "unable to free MPI Communicator"); + if (H5_mpi_info_free(&file_ptr->info) < 0) + H5_SUBFILING_GOTO_ERROR(H5E_VFL, H5E_CANTFREE, FAIL, "unable to free MPI Info object"); - if (H5_mpi_comm_free(&file_ptr->ext_comm) < 0) - H5_SUBFILING_GOTO_ERROR(H5E_VFL, H5E_CANTFREE, FAIL, "can't free MPI communicator"); + if (H5_mpi_comm_free(&file_ptr->ext_comm) < 0) + H5_SUBFILING_GOTO_ERROR(H5E_VFL, H5E_CANTFREE, FAIL, "can't free MPI communicator"); + } file_ptr->fail_to_encode = FALSE; diff --git a/src/H5FDsubfiling/H5subfiling_common.c b/src/H5FDsubfiling/H5subfiling_common.c index 58f36430c31..8fea7948f35 100644 --- a/src/H5FDsubfiling/H5subfiling_common.c +++ b/src/H5FDsubfiling/H5subfiling_common.c @@ -338,8 +338,18 @@ H5_free_subfiling_object(int64_t object_id) static herr_t H5_free_subfiling_object_int(subfiling_context_t *sf_context) { + int mpi_finalized; + int mpi_code; + herr_t ret_value = SUCCEED; + HDassert(sf_context); + if (MPI_SUCCESS != (mpi_code = MPI_Finalized(&mpi_finalized))) { + /* Assume MPI is finalized or worse, and try to clean up what we can */ + H5_SUBFILING_MPI_DONE_ERROR(FAIL, "MPI_Finalized failed", mpi_code); + mpi_finalized = 1; + } + sf_context->sf_context_id = -1; sf_context->h5_file_id = UINT64_MAX; sf_context->sf_num_fids = 0; @@ -352,28 +362,38 @@ H5_free_subfiling_object_int(subfiling_context_t *sf_context) sf_context->sf_base_addr = -1; if (sf_context->sf_msg_comm != MPI_COMM_NULL) { - if (H5_mpi_comm_free(&sf_context->sf_msg_comm) < 0) - return FAIL; + if (!mpi_finalized) { + if (H5_mpi_comm_free(&sf_context->sf_msg_comm) < 0) + return FAIL; + } sf_context->sf_msg_comm = MPI_COMM_NULL; } if (sf_context->sf_data_comm != MPI_COMM_NULL) { - if (H5_mpi_comm_free(&sf_context->sf_data_comm) < 0) - return FAIL; + if (!mpi_finalized) { + if (H5_mpi_comm_free(&sf_context->sf_data_comm) < 0) + return FAIL; + } sf_context->sf_data_comm = MPI_COMM_NULL; } if (sf_context->sf_eof_comm != MPI_COMM_NULL) { - if (H5_mpi_comm_free(&sf_context->sf_eof_comm) < 0) - return FAIL; + if (!mpi_finalized) { + if (H5_mpi_comm_free(&sf_context->sf_eof_comm) < 0) + return FAIL; + } sf_context->sf_eof_comm = MPI_COMM_NULL; } if (sf_context->sf_node_comm != MPI_COMM_NULL) { - if (H5_mpi_comm_free(&sf_context->sf_node_comm) < 0) - return FAIL; + if (!mpi_finalized) { + if (H5_mpi_comm_free(&sf_context->sf_node_comm) < 0) + return FAIL; + } sf_context->sf_node_comm = MPI_COMM_NULL; } if (sf_context->sf_group_comm != MPI_COMM_NULL) { - if (H5_mpi_comm_free(&sf_context->sf_group_comm) < 0) - return FAIL; + if (!mpi_finalized) { + if (H5_mpi_comm_free(&sf_context->sf_group_comm) < 0) + return FAIL; + } sf_context->sf_group_comm = MPI_COMM_NULL; } @@ -402,16 +422,24 @@ H5_free_subfiling_object_int(subfiling_context_t *sf_context) HDfree(sf_context); - return SUCCEED; + H5_SUBFILING_FUNC_LEAVE; } static herr_t H5_free_subfiling_topology(sf_topology_t *topology) { + int mpi_finalized; + int mpi_code; herr_t ret_value = SUCCEED; HDassert(topology); + if (MPI_SUCCESS != (mpi_code = MPI_Finalized(&mpi_finalized))) { + /* Assume MPI is finalized or worse, but clean up what we can */ + H5_SUBFILING_MPI_DONE_ERROR(FAIL, "MPI_Finalized failed", mpi_code); + mpi_finalized = 1; + } + #ifndef NDEBUG { hbool_t topology_cached = FALSE; @@ -442,8 +470,9 @@ H5_free_subfiling_topology(sf_topology_t *topology) HDfree(topology->io_concentrators); topology->io_concentrators = NULL; - if (H5_mpi_comm_free(&topology->app_comm) < 0) - H5_SUBFILING_DONE_ERROR(H5E_VFL, H5E_CANTFREE, FAIL, "can't free MPI communicator"); + if (!mpi_finalized) + if (H5_mpi_comm_free(&topology->app_comm) < 0) + H5_SUBFILING_DONE_ERROR(H5E_VFL, H5E_CANTFREE, FAIL, "can't free MPI communicator"); HDfree(topology); From 9b2d57446217bb080fc59d613fe72a4f3ce275b5 Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Fri, 28 Apr 2023 07:45:29 -0500 Subject: [PATCH 171/231] h5fuse.sh fix for summit. (#2841) * updated directory locations --- testpar/t_subfiling_vfd.c | 2 +- utils/subfiling_vfd/h5fuse.sh.in | 16 ++++++---------- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/testpar/t_subfiling_vfd.c b/testpar/t_subfiling_vfd.c index 0c2bca7a893..85df3bd4939 100644 --- a/testpar/t_subfiling_vfd.c +++ b/testpar/t_subfiling_vfd.c @@ -1858,7 +1858,7 @@ test_subfiling_h5fuse(void) args[0] = HDstrdup("env"); args[1] = HDstrdup("sh"); args[2] = HDstrdup("h5fuse.sh"); - args[3] = HDstrdup("-f"); + args[3] = HDstrdup("-q -f"); args[4] = tmp_filename; args[5] = NULL; diff --git a/utils/subfiling_vfd/h5fuse.sh.in b/utils/subfiling_vfd/h5fuse.sh.in index 09a3a056811..2085033e281 100755 --- a/utils/subfiling_vfd/h5fuse.sh.in +++ b/utils/subfiling_vfd/h5fuse.sh.in @@ -102,28 +102,23 @@ FAILED=1 # Configure file checks # ############################################################ # -SUBF_CONFDIR="${H5FD_SUBFILING_CONFIG_FILE_PREFIX:-.}" - -# cd to the subfile configuration location -if [ "$SUBF_CONFDIR" != "." ] || [ "$SUBF_CONFDIR" != "$PWD" ]; then - cd "$SUBF_CONFDIR" || exit -fi +SUBF_CONFDIR="${H5FD_SUBFILING_CONFIG_FILE_PREFIX:-$PWD}" # Try to find the config file if [ -z "$file_config" ]; then nfiles=$(find "$SUBF_CONFDIR" -maxdepth 1 -type f -iname "*.config" -printf '.' | wc -m) if [[ "$nfiles" != "1" ]]; then if [[ "$nfiles" == "0" ]]; then - echo -e "$RED Failed to find .config file in current directory. $NC" + echo -e "$RED Failed to find .config file in ${SUBF_CONFDIR} $NC" usage exit $FAILED else - echo -e "$RED More than one .config file found in current directory. $NC" + echo -e "$RED More than one .config file found in ${SUBF_CONFDIR} $NC" usage exit $FAILED fi fi - file_config=$(find "$SUBF_CONFDIR" -maxdepth 1 -type f -iname '*.config') + file_config=$(find "${SUBF_CONFDIR}" -maxdepth 1 -type f -iname '*.config') fi if [ ! -f "$file_config" ]; then @@ -150,7 +145,8 @@ if test -z "$subfile_dir"; then fi # For bash 4.4+ -mapfile -t subfiles < <( sed -e '1,/subfile_dir=/d' "$file_config" ) +subfs=$(sed -e '1,/subfile_dir=/d' "$file_config") +mapfile -t subfiles <<< "$subfs" if [ ${#subfiles[@]} -eq 0 ]; then echo -e "$RED failed to find subfiles list in $file_config $NC" exit $FAILED From abe51936a1c111cece4ee1465df2fc9dfc5c865f Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Fri, 28 Apr 2023 11:02:38 -0500 Subject: [PATCH 172/231] Correct compression install files (#2838) --- config/cmake/LIBAEC/CMakeLists.txt | 12 ++++++++---- config/cmake/ZLIB/CMakeLists.txt | 18 ++++++++++++++---- 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/config/cmake/LIBAEC/CMakeLists.txt b/config/cmake/LIBAEC/CMakeLists.txt index 212c9bfe886..fb650ec04b5 100644 --- a/config/cmake/LIBAEC/CMakeLists.txt +++ b/config/cmake/LIBAEC/CMakeLists.txt @@ -369,6 +369,10 @@ if (WIN32) find_program (WIX_EXECUTABLE candle PATHS "${CPACK_WIX_ROOT}/bin") endif () +configure_file (${LIBAEC_SOURCE_DIR}/LICENSE.txt ${LIBAEC_BINARY_DIR}/LIBAEC_LICENSE.txt @ONLY) +configure_file (${LIBAEC_SOURCE_DIR}/README.SZIP ${LIBAEC_BINARY_DIR}/LIBAEC_README.SZIP @ONLY) +configure_file (${LIBAEC_SOURCE_DIR}/README.md ${LIBAEC_BINARY_DIR}/LIBAEC_README.md @ONLY) + #----------------------------------------------------------------------------- # Set the cpack variables #----------------------------------------------------------------------------- @@ -383,9 +387,9 @@ if (NOT LIBAEC_EXTERNALLY_CONFIGURED) set (CPACK_PACKAGE_VERSION_MAJOR "${LIBAEC_PACKAGE_VERSION_MAJOR}") set (CPACK_PACKAGE_VERSION_MINOR "${LIBAEC_PACKAGE_VERSION_MINOR}") set (CPACK_PACKAGE_VERSION_PATCH "") - set (CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE.txt") - set (CPACK_PACKAGE_DESCRIPTION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/README.SZIP") - set (CPACK_RESOURCE_FILE_README "${CMAKE_CURRENT_SOURCE_DIR}/README.md") + set (CPACK_RESOURCE_FILE_LICENSE "${LIBAEC_BINARY_DIR}/LIBAEC_LICENSE.txt") + set (CPACK_PACKAGE_DESCRIPTION_FILE "${LIBAEC_BINARY_DIR}/LIBAEC_README.SZIP") + set (CPACK_RESOURCE_FILE_README "${LIBAEC_BINARY_DIR}/LIBAEC_README.md") set (CPACK_PACKAGE_RELOCATABLE TRUE) set (CPACK_PACKAGE_DESCRIPTION_SUMMARY "libaec - Adaptive Entropy Coding library by Deutsches Klimarechenzentrum GmbH") set (CPACK_PACKAGE_INSTALL_DIRECTORY "${CPACK_PACKAGE_VENDOR}/${CPACK_PACKAGE_NAME}/${CPACK_PACKAGE_VERSION}") @@ -419,7 +423,7 @@ if (NOT LIBAEC_EXTERNALLY_CONFIGURED) endif () #WiX variables set (CPACK_WIX_UNINSTALL "1") - set (CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE.txt") + set (CPACK_RESOURCE_FILE_LICENSE "${LIBAEC_BINARY_DIR}/LIBAEC_LICENSE.txt") elseif (APPLE) list (APPEND CPACK_GENERATOR "STGZ") list (APPEND CPACK_GENERATOR "DragNDrop") diff --git a/config/cmake/ZLIB/CMakeLists.txt b/config/cmake/ZLIB/CMakeLists.txt index c74ecea9dd9..5e42fb2576c 100644 --- a/config/cmake/ZLIB/CMakeLists.txt +++ b/config/cmake/ZLIB/CMakeLists.txt @@ -422,6 +422,16 @@ if (WIN32) find_program (WIX_EXECUTABLE candle PATHS "${CPACK_WIX_ROOT}/bin") endif () +#----------------------------------------------------------------------------- +# Configure the LICENSE.txt file for the windows binary package +#----------------------------------------------------------------------------- +if (WIN32) + configure_file (${ZLIB_SOURCE_DIR}/LICENSE ${ZLIB_BINARY_DIR}/ZLIB_LICENSE.txt @ONLY) +else () + configure_file (${ZLIB_SOURCE_DIR}/LICENSE ${ZLIB_BINARY_DIR}/ZLIB_LICENSE @ONLY) +endif () +configure_file (${ZLIB_SOURCE_DIR}/README ${ZLIB_BINARY_DIR}/ZLIB_README @ONLY) + #----------------------------------------------------------------------------- # Set the cpack variables #----------------------------------------------------------------------------- @@ -436,9 +446,9 @@ if (NOT ZLIB_EXTERNALLY_CONFIGURED) set (CPACK_PACKAGE_VERSION_MAJOR "${ZLIB_PACKAGE_VERSION_MAJOR}") set (CPACK_PACKAGE_VERSION_MINOR "${ZLIB_PACKAGE_VERSION_MINOR}") set (CPACK_PACKAGE_VERSION_PATCH "") - set (CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/README") - set (CPACK_PACKAGE_DESCRIPTION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/README") - set (CPACK_RESOURCE_FILE_README "${CMAKE_CURRENT_SOURCE_DIR}/README") + set (CPACK_RESOURCE_FILE_LICENSE "${ZLIB_BINARY_DIR}/ZLIB_LICENSE") + set (CPACK_PACKAGE_DESCRIPTION_FILE "${ZLIB_BINARY_DIR}/ZLIB_README") + set (CPACK_RESOURCE_FILE_README "${ZLIB_BINARY_DIR}/ZLIB_README") set (CPACK_PACKAGE_RELOCATABLE TRUE) set (CPACK_PACKAGE_DESCRIPTION_SUMMARY "zlib Installation") set (CPACK_PACKAGE_INSTALL_DIRECTORY "${CPACK_PACKAGE_VENDOR}/${CPACK_PACKAGE_NAME}/${CPACK_PACKAGE_VERSION}") @@ -472,7 +482,7 @@ if (NOT ZLIB_EXTERNALLY_CONFIGURED) endif () #WiX variables set (CPACK_WIX_UNINSTALL "1") - set (CPACK_RESOURCE_FILE_LICENSE "${JPEG_BINARY_DIR}/README") + set (CPACK_RESOURCE_FILE_LICENSE "${ZLIB_BINARY_DIR}/ZLIB_LICENSE.txt") elseif (APPLE) list (APPEND CPACK_GENERATOR "STGZ") list (APPEND CPACK_GENERATOR "DragNDrop") From 7fc68487b3ef3deb50b2abf7cd4a7dff96462138 Mon Sep 17 00:00:00 2001 From: "H. Joe Lee" Date: Fri, 28 Apr 2023 11:08:59 -0500 Subject: [PATCH 173/231] Remove unused variable warning (#2828) Polaris FORTRAN compiler reported the unused variable warning. --- fortran/test/tH5T_F03.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fortran/test/tH5T_F03.F90 b/fortran/test/tH5T_F03.F90 index 2256b50f210..86e49b60de4 100644 --- a/fortran/test/tH5T_F03.F90 +++ b/fortran/test/tH5T_F03.F90 @@ -1374,7 +1374,7 @@ SUBROUTINE t_enum(total_error) INTEGER(SIZE_T) , PARAMETER :: NAME_BUF_SIZE = 16 ! Enumerated type - INTEGER, PARAMETER :: SOLID=0, LIQUID=1, GAS=2, PLASMA=3 + INTEGER, PARAMETER :: SOLID=0, PLASMA=3 INTEGER(HID_T) :: file, filetype, memtype, space, dset ! Handles From 64974d5ee8a9ab44c30c5a7cec5fc9d6f6f26d2e Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Fri, 28 Apr 2023 11:10:48 -0500 Subject: [PATCH 174/231] Update H5Dget_space_status bug note to reference 1.14.0 (#2839) --- src/H5Dpublic.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/H5Dpublic.h b/src/H5Dpublic.h index a1d1b062a3f..4315c7b9a5a 100644 --- a/src/H5Dpublic.h +++ b/src/H5Dpublic.h @@ -461,7 +461,7 @@ H5_DLL hid_t H5Dget_space_async(hid_t dset_id, hid_t es_id); * \details H5Dget_space_status() determines whether space has been allocated * for the dataset \p dset_id. * - * \note \Bold{BUG:} Prior to the HDF5 1.13.1, 1.12.2 and 1.10.9 releases, + * \note \Bold{BUG:} Prior to the HDF5 1.14.0, 1.12.2 and 1.10.9 releases, * H5Dget_space_status() may return incorrect space allocation status * values for datasets with filters applied to them. * H5Dget_space_status() calculated the space allocation status by From 4a6112c273fbc1a05f155cf97350d13c60c8663f Mon Sep 17 00:00:00 2001 From: Neil Fortner Date: Fri, 28 Apr 2023 18:58:25 -0500 Subject: [PATCH 175/231] Implement selection I/O with type conversion (#2823) Initial implementation of selection I/O with type conversion. Allows Parallel collective I/O with type conversion, as long as selection I/O is enabled. --- bin/trace | 1 + doxygen/examples/tables/propertyLists.dox | 13 +- release_docs/RELEASE.txt | 10 + src/H5.c | 13 +- src/H5CX.c | 224 +- src/H5CXprivate.h | 6 + src/H5Dchunk.c | 140 +- src/H5Dcompact.c | 3 +- src/H5Dcontig.c | 91 +- src/H5Defl.c | 3 +- src/H5Dio.c | 477 ++- src/H5Dmpio.c | 60 +- src/H5Dpkg.h | 89 +- src/H5Dprivate.h | 15 +- src/H5Dscatgath.c | 567 ++- src/H5Dvirtual.c | 3 +- src/H5FDint.c | 28 + src/H5Fprivate.h | 1 + src/H5Fquery.c | 27 + src/H5Pdxpl.c | 355 ++ src/H5Ppublic.h | 252 +- src/H5Sprivate.h | 1 + src/H5Spublic.h | 14 +- src/H5Sselect.c | 101 +- src/H5private.h | 8 - src/H5trace.c | 25 + test/CMakeLists.txt | 1 + test/Makefile.am | 2 +- test/enc_dec_plist.c | 6 + test/gen_plist.c | 5 + test/select_io_dset.c | 3269 ++++++++++++++++++ test/testfiles/plist_files/def_dxpl_32be | Bin 225 -> 262 bytes test/testfiles/plist_files/def_dxpl_32le | Bin 225 -> 262 bytes test/testfiles/plist_files/def_dxpl_64be | Bin 225 -> 262 bytes test/testfiles/plist_files/def_dxpl_64le | Bin 225 -> 262 bytes test/testfiles/plist_files/dxpl_32be | Bin 229 -> 266 bytes test/testfiles/plist_files/dxpl_32le | Bin 229 -> 266 bytes test/testfiles/plist_files/dxpl_64be | Bin 229 -> 266 bytes test/testfiles/plist_files/dxpl_64le | Bin 229 -> 266 bytes testpar/CMakeLists.txt | 2 + testpar/Makefile.am | 2 +- testpar/t_2Gio.c | 77 +- testpar/t_coll_chunk.c | 9 +- testpar/t_dset.c | 90 +- testpar/t_select_io_dset.c | 3786 +++++++++++++++++++++ testpar/t_subfiling_vfd.c | 68 +- 46 files changed, 9466 insertions(+), 378 deletions(-) create mode 100644 test/select_io_dset.c create mode 100644 testpar/t_select_io_dset.c diff --git a/bin/trace b/bin/trace index 2b563e5699b..c620b805368 100755 --- a/bin/trace +++ b/bin/trace @@ -37,6 +37,7 @@ $Source = ""; "H5D_alloc_time_t" => "Da", "H5D_append_cb_t" => "DA", "H5FD_mpio_collective_opt_t" => "Dc", + "H5D_selection_io_mode_t" => "DC", "H5D_fill_time_t" => "Df", "H5D_fill_value_t" => "DF", "H5D_gather_func_t" => "Dg", diff --git a/doxygen/examples/tables/propertyLists.dox b/doxygen/examples/tables/propertyLists.dox index 375fd509702..4e20197ad7f 100644 --- a/doxygen/examples/tables/propertyLists.dox +++ b/doxygen/examples/tables/propertyLists.dox @@ -694,6 +694,18 @@ of the library for reading or writing the actual data.
    Gets local and global causes that broke collective I/O on the last parallel I/O call.
    #H5Pset_selection_io/#H5Pget_selection_ioSets/gets the selection I/O mode.
    #H5Pget_no_selection_io_causeGets the cause for not performing selection or vector I/O on the last parallel I/O call.
    #H5Pset_modify_write_buf/#H5Pget_modify_write_bufSets/gets a flag allowing the library to modify the contents of the write buffer.
    H5Pset_preserve/H5Pget_preserve No longer available, deprecated as it no longer has any effect.
    Object Size This is the size of the the fields + Object Size This is the size of the fields above plus the object data stored for the object. The actual storage size is rounded up to a multiple of eight.Used by the library before version 1.6.1. In this version, the Flags field is used to indicate whether the actual message is stored in the global heap (never implemented). The Pointer field - either contains the the header message address in the global heap + either contains the header message address in the global heap (never implemented) or the address of the shared object header.
    diff --git a/doxygen/examples/ImageSpec.html b/doxygen/examples/ImageSpec.html index 1b700ff7a93..130d86ecf6a 100644 --- a/doxygen/examples/ImageSpec.html +++ b/doxygen/examples/ImageSpec.html @@ -851,7 +851,7 @@

    "RGB"
    -Each color index contains a triplet where the the first value defines the +Each color index contains a triplet where the first value defines the red component, second defines the green component, and the third the blue component.
    @@ -859,7 +859,7 @@

    "CMY"
    -Each color index contains a triplet where the the first value defines the +Each color index contains a triplet where the first value defines the cyan component, second defines the magenta component, and the third the yellow component.
    @@ -867,7 +867,7 @@

    "CMYK"
    -Each color index contains a quadruplet where the the first value defines +Each color index contains a quadruplet where the first value defines the cyan component, second defines the magenta component, the third the yellow component, and the forth the black component.
    @@ -875,7 +875,7 @@

    "YCbCr"
    -Class Y encoding model. Each color index contains a triplet where the the +Class Y encoding model. Each color index contains a triplet where the first value defines the luminance, second defines the Cb Chromonance, and the third the Cr Chromonance.
    @@ -884,14 +884,14 @@

    Composite encoding color model. Each color index contains a triplet where -the the first value defines the luminance component, second defines the +the first value defines the luminance component, second defines the chromonance component, and the third the value component.
    "HSV"
    -Each color index contains a triplet where the the first value defines the +Each color index contains a triplet where the first value defines the hue component, second defines the saturation component, and the third the value component. The hue component defines the hue spectrum with a low value representing magenta/red progressing to a high value which would diff --git a/doxygen/examples/tables/propertyLists.dox b/doxygen/examples/tables/propertyLists.dox index 4e20197ad7f..a260c9de683 100644 --- a/doxygen/examples/tables/propertyLists.dox +++ b/doxygen/examples/tables/propertyLists.dox @@ -245,7 +245,7 @@ file access property list. #H5Pset_page_buffer_size/#H5Pget_page_buffer_size -Set/get the the maximum size for the page buffer. +Set/get the maximum size for the page buffer. #H5Pset_sieve_buf_size/#H5Pget_sieve_buf_size diff --git a/doxygen/hdf5_navtree_hacks.js b/doxygen/hdf5_navtree_hacks.js index dda89846701..804701f7f6f 100644 --- a/doxygen/hdf5_navtree_hacks.js +++ b/doxygen/hdf5_navtree_hacks.js @@ -141,7 +141,7 @@ function initNavTree(toroot,relpath) $(window).on("load", showRoot); } -// return false if the the node has no children at all, or has only section/subsection children +// return false if the node has no children at all, or has only section/subsection children function checkChildrenData(node) { if (!(typeof(node.childrenData)==='string')) { for (var i in node.childrenData) { diff --git a/fortran/src/H5Af.c b/fortran/src/H5Af.c index ba3b62d3d2e..0fdd8fdf99e 100644 --- a/fortran/src/H5Af.c +++ b/fortran/src/H5Af.c @@ -359,7 +359,7 @@ h5aget_name_by_idx_c(hid_t_f *loc_id, _fcd obj_name, size_t_f *obj_namelen, int_ * loc_id - Object identifier * OUTPUTS * - * corder_valid - Indicates whether the the creation order data is valid for this attribute + * corder_valid - Indicates whether the creation order data is valid for this attribute * corder - Is a positive integer containing the creation order of the attribute * cset - Indicates the character set used for the attribute’s name * data_size - indicates the size, in the number of characters, of the attribute @@ -427,7 +427,7 @@ h5aget_info_c(hid_t_f *loc_id, int_f *corder_valid, int_f *corder, int_f *cset, * lapl_id - Link access property list * OUTPUTS * - * corder_valid - Indicates whether the the creation order data is valid for this attribute + * corder_valid - Indicates whether the creation order data is valid for this attribute * corder - Is a positive integer containing the creation order of the attribute * cset - Indicates the character set used for the attribute’s name * data_size - indicates the size, in the number of characters, of the attribute @@ -493,7 +493,7 @@ h5aget_info_by_idx_c(hid_t_f *loc_id, _fcd obj_name, size_t_f *obj_namelen, int_ * lapl_id - Link access property list * OUTPUTS * - * corder_valid - Indicates whether the the creation order data is valid for this attribute + * corder_valid - Indicates whether the creation order data is valid for this attribute * corder - Is a positive integer containing the creation order of the attribute * cset - Indicates the character set used for the attribute’s name * data_size - indicates the size, in the number of characters, of the attribute diff --git a/fortran/src/H5Lf.c b/fortran/src/H5Lf.c index 63bed998fd4..c7ce51196fc 100644 --- a/fortran/src/H5Lf.c +++ b/fortran/src/H5Lf.c @@ -232,7 +232,7 @@ h5lget_info_c(hid_t_f *link_loc_id, _fcd link_name, size_t_f *link_namelen, int_ * lapl_id - Link access property list * OUTPUTS * - * corder_valid - Indicates whether the the creation order data is valid for this attribute + * corder_valid - Indicates whether the creation order data is valid for this attribute * corder - Is a positive integer containing the creation order of the attribute * cset - Indicates the character set used for the attribute’s name * data_size - indicates the size, in the number of characters, of the attribute diff --git a/fortran/src/H5Sf.c b/fortran/src/H5Sf.c index d04b63913e0..00b97f810c5 100644 --- a/fortran/src/H5Sf.c +++ b/fortran/src/H5Sf.c @@ -176,7 +176,7 @@ h5scopy_c(hid_t_f *space_id, hid_t_f *new_space_id) * h5sget_select_hyper_nblocks_c * PURPOSE * Call H5SH5Sget_select_hyper_nblocks to - * get the the number of hyperslab blocks in + * get the number of hyperslab blocks in * the current dataspace selection if successful * INPUTS * space_id - identifier of the dataspace @@ -215,7 +215,7 @@ h5sget_select_hyper_nblocks_c(hid_t_f *space_id, hssize_t_f *num_blocks) * h5sget_select_elem_npoints_c * PURPOSE * Call H5Sget_select_elem_npoints to - * get the the number of element points in + * get the number of element points in * the current dataspace selection if successful * INPUTS * space_id - identifier of the dataspace diff --git a/fortran/src/H5Tf.c b/fortran/src/H5Tf.c index 11f62b76168..a74f5d193d6 100644 --- a/fortran/src/H5Tf.c +++ b/fortran/src/H5Tf.c @@ -1373,7 +1373,7 @@ h5tget_member_index_c(hid_t_f *type_id, _fcd name, int_f *namelen, int_f *idx) * type_id - identifier of the dataspace * member_no - Number of the field whose offset is requested * OUTPUTS - * offset - byte offset of the the beginning of the field of + * offset - byte offset of the beginning of the field of * a compound datatype * RETURNS * always 0 diff --git a/fortran/test/tH5A_1_8.F90 b/fortran/test/tH5A_1_8.F90 index d43279e7545..03e26ec1d9f 100644 --- a/fortran/test/tH5A_1_8.F90 +++ b/fortran/test/tH5A_1_8.F90 @@ -234,7 +234,7 @@ SUBROUTINE test_attr_corder_create_compact(fcpl,fapl, total_error) INTEGER(HID_T) :: attr !String Attribute identifier INTEGER(HSIZE_T), DIMENSION(7) :: data_dims - LOGICAL :: f_corder_valid ! Indicates whether the the creation order data is valid for this attribute + LOGICAL :: f_corder_valid ! Indicates whether the creation order data is valid for this attribute INTEGER :: corder ! Is a positive integer containing the creation order of the attribute INTEGER :: cset ! Indicates the character set used for the attribute’s name INTEGER(HSIZE_T) :: data_size ! indicates the size, in the number of characters @@ -411,7 +411,7 @@ SUBROUTINE test_attr_null_space(fcpl, fapl, total_error) INTEGER(HSIZE_T) :: storage_size ! attributes storage requirements - LOGICAL :: f_corder_valid ! Indicates whether the the creation order data is valid for this attribute + LOGICAL :: f_corder_valid ! Indicates whether the creation order data is valid for this attribute INTEGER :: corder ! Is a positive integer containing the creation order of the attribute INTEGER :: cset ! Indicates the character set used for the attribute’s name INTEGER(HSIZE_T) :: data_size ! indicates the size, in the number of characters @@ -753,7 +753,7 @@ SUBROUTINE test_attr_info_by_idx(new_format, fcpl, fapl, total_error) INTEGER(HID_T) :: attr !String Attribute identifier INTEGER(HSIZE_T), DIMENSION(7) :: data_dims - LOGICAL :: f_corder_valid ! Indicates whether the the creation order data is valid for this attribute + LOGICAL :: f_corder_valid ! Indicates whether the creation order data is valid for this attribute INTEGER :: corder ! Is a positive integer containing the creation order of the attribute INTEGER :: cset ! Indicates the character set used for the attribute’s name INTEGER(HSIZE_T) :: data_size ! indicates the size, in the number of characters @@ -934,7 +934,7 @@ SUBROUTINE attr_info_by_idx_check(obj_id, attrname, n, use_index, total_error ) CHARACTER(LEN=*) :: attrname INTEGER(HSIZE_T) :: n LOGICAL :: use_index - LOGICAL :: f_corder_valid ! Indicates whether the the creation order data is valid for this attribute + LOGICAL :: f_corder_valid ! Indicates whether the creation order data is valid for this attribute INTEGER :: corder ! Is a positive integer containing the creation order of the attribute INTEGER :: cset ! Indicates the character set used for the attribute’s name INTEGER(HSIZE_T) :: data_size ! indicates the size, in the number of characters @@ -1397,7 +1397,7 @@ SUBROUTINE test_attr_delete_by_idx(new_format, fcpl, fapl, total_error) INTEGER(HID_T) :: attr !String Attribute identifier INTEGER(HSIZE_T), DIMENSION(7) :: data_dims - LOGICAL :: f_corder_valid ! Indicates whether the the creation order data is valid for this attribute + LOGICAL :: f_corder_valid ! Indicates whether the creation order data is valid for this attribute INTEGER :: corder ! Is a positive integer containing the creation order of the attribute INTEGER :: cset ! Indicates the character set used for the attribute’s name INTEGER(HSIZE_T) :: data_size ! indicates the size, in the number of characters @@ -2690,7 +2690,7 @@ SUBROUTINE attr_open_check(fid, dsetname, obj_id, max_attrs, total_error ) INTEGER :: u CHARACTER (LEN=8) :: attrname INTEGER :: error - LOGICAL :: f_corder_valid ! Indicates whether the the creation order data is valid for this attribute + LOGICAL :: f_corder_valid ! Indicates whether the creation order data is valid for this attribute INTEGER :: corder ! Is a positive integer containing the creation order of the attribute INTEGER :: cset ! Indicates the character set used for the attribute’s name INTEGER(HSIZE_T) :: data_size ! indicates the size, in the number of characters diff --git a/hl/test/test_ds.c b/hl/test/test_ds.c index b74b62a428d..70af31a3686 100644 --- a/hl/test/test_ds.c +++ b/hl/test/test_ds.c @@ -3673,7 +3673,7 @@ read_scale(hid_t dset, unsigned dim, hid_t scale_id, void *visitor_data) * Function: match_dim_scale * * Purpose: example operator function used by H5DSiterate_scales, used - * to verify the the DSID scale size matches the dataset DIM size + * to verify the DSID scale size matches the dataset DIM size * * Return: * The return values from an operator are: diff --git a/hl/test/test_dset_append.c b/hl/test/test_dset_append.c index f8d38c224b2..c4afe8917c4 100644 --- a/hl/test/test_dset_append.c +++ b/hl/test/test_dset_append.c @@ -1265,7 +1265,7 @@ main(void) /* * The following tests illustrate the scenarios when H5DOappend does not work with extensible array * indexing: - * - when the the dataset has 1 unlimited dimension and the other dimension is fixed but extendible + * - when the dataset has 1 unlimited dimension and the other dimension is fixed but extendible * - the dataset expands along 1 dimension and then expands along the other dimension */ flush_ct = 0; /* Reset flush counter */ diff --git a/release_docs/HISTORY-1_0-1_8_0.txt b/release_docs/HISTORY-1_0-1_8_0.txt index 3669f4d9561..333d0dc58cb 100644 --- a/release_docs/HISTORY-1_0-1_8_0.txt +++ b/release_docs/HISTORY-1_0-1_8_0.txt @@ -1407,7 +1407,7 @@ Known Problems for your installation. Observe that the basic idea is to insert the script as the first item - on the command line which executes the the test. The script then + on the command line which executes the test. The script then executes the test and filters out the offending text before passing it on. diff --git a/release_docs/HISTORY-1_8_0-1_10_0.txt b/release_docs/HISTORY-1_8_0-1_10_0.txt index 7b84fbcf428..cc42d3bc7a5 100644 --- a/release_docs/HISTORY-1_8_0-1_10_0.txt +++ b/release_docs/HISTORY-1_8_0-1_10_0.txt @@ -791,7 +791,7 @@ Bug Fixes since HDF5-1.8.0 release and the external filters are being built, the CMAKE_BUILD_TYPE define must be set to the same value as the configuration (-DCMAKE_BUILD_TYPE:STRING=Release if using -C Release). This is needed - by the the szip and zlib filter build commands. (ADB - HDFFV-8695) + by the szip and zlib filter build commands. (ADB - HDFFV-8695) - CMake: Remove use of XLATE_UTILITY program. (ADB - 2014/03/28 HDFFV-8640) - CMake: Added missing quotes in setting the CMAKE_EXE_LINKER_FLAGS for the MPI option. (ADB - 2014/02/27 HDFFV-8674) @@ -1698,7 +1698,7 @@ Known Problems for your installation. Observe that the basic idea is to insert the script as the first item - on the command line which executes the the test. The script then + on the command line which executes the test. The script then executes the test and filters out the offending text before passing it on. diff --git a/test/tselect.c b/test/tselect.c index 3dd739c1deb..d67e5bdea9e 100644 --- a/test/tselect.c +++ b/test/tselect.c @@ -2280,7 +2280,7 @@ test_select_hyper_contig_dr__run_test(int test_num, const uint16_t *cube_buf, co * H5Sselect_shape_same() views as being of the same shape. * * Start by writing small_rank D slices from the in memory large cube, to - * the the on disk small cube dataset. After each write, read the small + * the on disk small cube dataset. After each write, read the small * cube dataset back from disk, and verify that it contains the expected * data. Verify that H5Sselect_shape_same() returns true on the * memory and file selections. @@ -3309,7 +3309,7 @@ test_select_hyper_checker_board_dr__run_test(int test_num, const uint16_t *cube_ * H5Sselect_shape_same() views as being of the same shape. * * Start by writing small_rank D slices from the in memory large cube, to - * the the on disk small cube dataset. After each write, read the small + * the on disk small cube dataset. After each write, read the small * cube dataset back from disk, and verify that it contains the expected * data. Verify that H5Sselect_shape_same() returns true on the * memory and file selections. @@ -15840,7 +15840,7 @@ test_hyper_io_1d(void) hsize_t chunk_dims[1]; /* Chunk dimension size */ hsize_t offset[1]; /* Starting offset for hyperslab */ hsize_t stride[1]; /* Distance between blocks in the hyperslab selection */ - hsize_t count[1]; /* # of blocks in the the hyperslab selection */ + hsize_t count[1]; /* # of blocks in the hyperslab selection */ hsize_t block[1]; /* Size of block in the hyperslab selection */ unsigned int wdata[CHUNKSZ]; /* Data to be written */ unsigned int rdata[NUM_ELEMENTS / 10]; /* Data to be read */ diff --git a/tools/lib/h5tools_dump.c b/tools/lib/h5tools_dump.c index 435ca87fb5f..57a171215b0 100644 --- a/tools/lib/h5tools_dump.c +++ b/tools/lib/h5tools_dump.c @@ -3747,7 +3747,7 @@ h5tools_dump_dcpl(FILE *stream, const h5tool_format_t *info, h5tools_context_t * /*------------------------------------------------------------------------- * Function: dump_comment * - * Purpose: prints the comment for the the object name + * Purpose: prints the comment for the object name * * Return: void *------------------------------------------------------------------------- From c66d028c6ceb94eac3c4f9f632a90034e9369fa7 Mon Sep 17 00:00:00 2001 From: Neil Fortner Date: Mon, 1 May 2023 14:31:09 -0500 Subject: [PATCH 185/231] Skip page buffer test for "no selection I/O cause" when using split or multi driver (#2866) --- test/select_io_dset.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/test/select_io_dset.c b/test/select_io_dset.c index 9a1de06f2df..0724365b767 100644 --- a/test/select_io_dset.c +++ b/test/select_io_dset.c @@ -2860,6 +2860,17 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_ int rbuf[DSET_SELECT_DIM]; int i; + /* Check for (currently) incompatible combinations */ + if (test_mode & TEST_PAGE_BUFFER) { + char *env_h5_drvr = NULL; + + /* The split and multi driver are not compatible with page buffering. No message since the other + * cases aren't skipped. */ + env_h5_drvr = HDgetenv(HDF5_DRIVER); + if (env_h5_drvr && (!HDstrcmp(env_h5_drvr, "split") || !HDstrcmp(env_h5_drvr, "multi"))) + return 0; + } + if ((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0) FAIL_STACK_ERROR; From 709605d19cdbc2a6fe3999350dfe9d22844901c6 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Mon, 1 May 2023 14:46:04 -0500 Subject: [PATCH 186/231] Correct usage of CMAKE_BUILD_TYPE (#2863) Fixes a problem when using multi-config builds with the CMake GUI --- config/cmake/HDFLibMacros.cmake | 8 ++++---- config/cmake/HDFMacros.cmake | 20 ++++++++++---------- config/cmake/libhdf5.settings.cmake.in | 2 +- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/config/cmake/HDFLibMacros.cmake b/config/cmake/HDFLibMacros.cmake index b3985d5db6a..282fe0cae7e 100644 --- a/config/cmake/HDFLibMacros.cmake +++ b/config/cmake/HDFLibMacros.cmake @@ -88,7 +88,7 @@ macro (EXTERNAL_SZIP_LIBRARY compress_type encoding) -DBUILD_SHARED_LIBS:BOOL=OFF -DSZIP_PACKAGE_EXT:STRING=${HDF_PACKAGE_EXT} -DSZIP_EXTERNALLY_CONFIGURED:BOOL=OFF - -DCMAKE_BUILD_TYPE:STRING=${CMAKE_BUILD_TYPE} + -DCMAKE_BUILD_TYPE:STRING=${HDF_CFG_NAME} -DCMAKE_DEBUG_POSTFIX:STRING=${CMAKE_DEBUG_POSTFIX} -DCMAKE_INSTALL_PREFIX:PATH=${CMAKE_INSTALL_PREFIX} -DCMAKE_RUNTIME_OUTPUT_DIRECTORY:PATH=${CMAKE_RUNTIME_OUTPUT_DIRECTORY} @@ -110,7 +110,7 @@ macro (EXTERNAL_SZIP_LIBRARY compress_type encoding) -DBUILD_SHARED_LIBS:BOOL=OFF -DSZIP_PACKAGE_EXT:STRING=${HDF_PACKAGE_EXT} -DSZIP_EXTERNALLY_CONFIGURED:BOOL=OFF - -DCMAKE_BUILD_TYPE:STRING=${CMAKE_BUILD_TYPE} + -DCMAKE_BUILD_TYPE:STRING=${HDF_CFG_NAME} -DCMAKE_DEBUG_POSTFIX:STRING=${CMAKE_DEBUG_POSTFIX} -DCMAKE_INSTALL_PREFIX:PATH=${CMAKE_INSTALL_PREFIX} -DCMAKE_RUNTIME_OUTPUT_DIRECTORY:PATH=${CMAKE_RUNTIME_OUTPUT_DIRECTORY} @@ -166,7 +166,7 @@ macro (EXTERNAL_ZLIB_LIBRARY compress_type) -DBUILD_SHARED_LIBS:BOOL=OFF -DZLIB_PACKAGE_EXT:STRING=${HDF_PACKAGE_EXT} -DZLIB_EXTERNALLY_CONFIGURED:BOOL=OFF - -DCMAKE_BUILD_TYPE:STRING=${CMAKE_BUILD_TYPE} + -DCMAKE_BUILD_TYPE:STRING=${HDF_CFG_NAME} -DCMAKE_DEBUG_POSTFIX:STRING=${CMAKE_DEBUG_POSTFIX} -DCMAKE_INSTALL_PREFIX:PATH=${CMAKE_INSTALL_PREFIX} -DCMAKE_RUNTIME_OUTPUT_DIRECTORY:PATH=${CMAKE_RUNTIME_OUTPUT_DIRECTORY} @@ -187,7 +187,7 @@ macro (EXTERNAL_ZLIB_LIBRARY compress_type) -DBUILD_SHARED_LIBS:BOOL=OFF -DZLIB_PACKAGE_EXT:STRING=${HDF_PACKAGE_EXT} -DZLIB_EXTERNALLY_CONFIGURED:BOOL=OFF - -DCMAKE_BUILD_TYPE:STRING=${CMAKE_BUILD_TYPE} + -DCMAKE_BUILD_TYPE:STRING=${HDF_CFG_NAME} -DCMAKE_DEBUG_POSTFIX:STRING=${CMAKE_DEBUG_POSTFIX} -DCMAKE_INSTALL_PREFIX:PATH=${CMAKE_INSTALL_PREFIX} -DCMAKE_RUNTIME_OUTPUT_DIRECTORY:PATH=${CMAKE_RUNTIME_OUTPUT_DIRECTORY} diff --git a/config/cmake/HDFMacros.cmake b/config/cmake/HDFMacros.cmake index 5ac7316883c..938b876f070 100644 --- a/config/cmake/HDFMacros.cmake +++ b/config/cmake/HDFMacros.cmake @@ -14,8 +14,15 @@ macro (SET_HDF_BUILD_TYPE) get_property (_isMultiConfig GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) if (_isMultiConfig) - set (HDF_CFG_NAME ${CMAKE_BUILD_TYPE}) - set (HDF_BUILD_TYPE ${CMAKE_CFG_INTDIR}) + set_property (CACHE CMAKE_CONFIGURATION_TYPES PROPERTY STRINGS "Debug" "Release" + "MinSizeRel" "RelWithDebInfo" "Developer" FORCE) + if (CMAKE_BUILD_TYPE) + set (HDF_CFG_NAME ${CMAKE_BUILD_TYPE}) + set (HDF_BUILD_TYPE ${CMAKE_BUILD_TYPE}) + else () + set (HDF_CFG_NAME "RelWithDebInfo") + set (HDF_BUILD_TYPE ${CMAKE_CFG_INTDIR}) + endif () set (HDF_CFG_BUILD_TYPE \${CMAKE_INSTALL_CONFIG_NAME}) else () set (HDF_CFG_BUILD_TYPE ".") @@ -27,13 +34,6 @@ macro (SET_HDF_BUILD_TYPE) set (HDF_BUILD_TYPE "Release") endif () endif () - if (NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES) - message (VERBOSE "Setting build type to 'RelWithDebInfo' as none was specified.") - set (CMAKE_BUILD_TYPE RelWithDebInfo CACHE STRING "Choose the type of build." FORCE) - # Set the possible values of build type for cmake-gui - set_property (CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" - "MinSizeRel" "RelWithDebInfo" "Developer") - endif () endmacro () #------------------------------------------------------------------------------- @@ -455,7 +455,7 @@ macro (HDF_DIR_PATHS package_prefix) ) get_property(_isMultiConfig GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) if(_isMultiConfig) - set (CMAKE_TEST_OUTPUT_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${CMAKE_BUILD_TYPE}) + set (CMAKE_TEST_OUTPUT_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${HDF_CFG_NAME}) set (CMAKE_PDB_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/bin CACHE PATH "Single Directory for all pdb files." ) diff --git a/config/cmake/libhdf5.settings.cmake.in b/config/cmake/libhdf5.settings.cmake.in index f2cf6c073f7..00ae980c2b9 100644 --- a/config/cmake/libhdf5.settings.cmake.in +++ b/config/cmake/libhdf5.settings.cmake.in @@ -13,7 +13,7 @@ General Information: Compiling Options: ------------------ - Build Mode: @CMAKE_BUILD_TYPE@ + Build Mode: @HDF_CFG_NAME@ Debugging Symbols: @HDF5_ENABLE_SYMBOLS@ Asserts: @HDF5_ENABLE_ASSERTS@ Profiling: @HDF5_ENABLE_PROFILING@ From 048e31749ca6131a8f561469a1fbf6f284a14eed Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Mon, 1 May 2023 17:20:17 -0500 Subject: [PATCH 187/231] Remove duplicate variable for configuration use (#2870) --- config/cmake/HDFMacros.cmake | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/config/cmake/HDFMacros.cmake b/config/cmake/HDFMacros.cmake index 938b876f070..e0e122031b6 100644 --- a/config/cmake/HDFMacros.cmake +++ b/config/cmake/HDFMacros.cmake @@ -14,24 +14,21 @@ macro (SET_HDF_BUILD_TYPE) get_property (_isMultiConfig GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) if (_isMultiConfig) - set_property (CACHE CMAKE_CONFIGURATION_TYPES PROPERTY STRINGS "Debug" "Release" - "MinSizeRel" "RelWithDebInfo" "Developer" FORCE) + # HDF_CFG_BUILD_TYPE is used in the Fortran install commands for the build location of the .mod files + set (HDF_CFG_BUILD_TYPE \${CMAKE_INSTALL_CONFIG_NAME}) if (CMAKE_BUILD_TYPE) + # set the default to the specified command line define set (HDF_CFG_NAME ${CMAKE_BUILD_TYPE}) - set (HDF_BUILD_TYPE ${CMAKE_BUILD_TYPE}) else () - set (HDF_CFG_NAME "RelWithDebInfo") - set (HDF_BUILD_TYPE ${CMAKE_CFG_INTDIR}) + # set the default to the MultiConfig variable + set (HDF_CFG_NAME ${CMAKE_CFG_INTDIR}) endif () - set (HDF_CFG_BUILD_TYPE \${CMAKE_INSTALL_CONFIG_NAME}) else () set (HDF_CFG_BUILD_TYPE ".") if (CMAKE_BUILD_TYPE) set (HDF_CFG_NAME ${CMAKE_BUILD_TYPE}) - set (HDF_BUILD_TYPE ${CMAKE_BUILD_TYPE}) else () set (HDF_CFG_NAME "Release") - set (HDF_BUILD_TYPE "Release") endif () endif () endmacro () @@ -175,8 +172,8 @@ macro (HDF_IMPORT_SET_LIB_OPTIONS libtarget libname libtype libversion) ) else () set_target_properties (${libtarget} PROPERTIES - IMPORTED_IMPLIB "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${HDF_BUILD_TYPE}/${CMAKE_IMPORT_LIBRARY_PREFIX}${IMPORT_LIB_NAME}${CMAKE_IMPORT_LIBRARY_SUFFIX}" - IMPORTED_LOCATION "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${HDF_BUILD_TYPE}/${CMAKE_IMPORT_LIBRARY_PREFIX}${IMPORT_LIB_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX}" + IMPORTED_IMPLIB "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${HDF_CFG_NAME}/${CMAKE_IMPORT_LIBRARY_PREFIX}${IMPORT_LIB_NAME}${CMAKE_IMPORT_LIBRARY_SUFFIX}" + IMPORTED_LOCATION "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${HDF_CFG_NAME}/${CMAKE_IMPORT_LIBRARY_PREFIX}${IMPORT_LIB_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX}" ) endif () else () @@ -201,7 +198,7 @@ macro (HDF_IMPORT_SET_LIB_OPTIONS libtarget libname libtype libversion) else () if (WIN32 AND NOT MINGW) set_target_properties (${libtarget} PROPERTIES - IMPORTED_LOCATION "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${HDF_BUILD_TYPE}/${IMPORT_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}" + IMPORTED_LOCATION "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${HDF_CFG_NAME}/${IMPORT_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}" IMPORTED_LINK_INTERFACE_LANGUAGES "C" ) else () From 31c5ff6e4d34c7decba54ea01c0675b935e5800e Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Mon, 1 May 2023 17:21:47 -0500 Subject: [PATCH 188/231] Add Fortran Selection IO APIs (#2864) new selection IO fortran APIs with tests --- doxygen/aliases | 1 + fortran/src/H5Pff.F90 | 134 +++++++++++++++++++++++ fortran/src/H5_f.c | 3 + fortran/src/H5_ff.F90 | 63 ++++++----- fortran/src/H5f90global.F90 | 6 + fortran/src/hdf5_fortrandll.def.in | 4 + fortran/test/H5_test_buildiface.F90 | 77 ++++++++++--- fortran/test/fortranlib_test.F90 | 6 +- fortran/test/tH5P.F90 | 164 +++++++++++++++++++++++++++- release_docs/RELEASE.txt | 6 +- testpar/CMakeLists.txt | 1 + 11 files changed, 413 insertions(+), 52 deletions(-) diff --git a/doxygen/aliases b/doxygen/aliases index 27090e6f3f1..96977f398e2 100644 --- a/doxygen/aliases +++ b/doxygen/aliases @@ -383,4 +383,5 @@ ALIASES += fortran_obsolete="Obsolete API, use the Fortran 2003 version instead. ALIASES += fortran_file="Pointer to filename the async subroutine is being called from, filename must be null character terminated" ALIASES += fortran_func="Pointer to function name the async subroutine is being called in, func must be null character terminated" ALIASES += fortran_line="Line number the async subroutine is being called at" +ALIASES += fortran_plist_id="Property list identifier" diff --git a/fortran/src/H5Pff.F90 b/fortran/src/H5Pff.F90 index 75cd3230657..49917dd6411 100644 --- a/fortran/src/H5Pff.F90 +++ b/fortran/src/H5Pff.F90 @@ -606,6 +606,140 @@ END FUNCTION h5pget_userblock_c hdferr = h5pget_userblock_c(prp_id, block_size) END SUBROUTINE h5pget_userblock_f +!> +!! \ingroup FH5P +!! +!! \brief Sets the selection I/O mode +!! +!! \param plist_id \fortran_plist_id +!! \param selection_io_mode The selection I/O mode +!! \param hdferr \fortran_error +!! +!! See C API: @ref H5Pset_selection_io() +!! + SUBROUTINE h5pset_selection_io_f(plist_id, selection_io_mode, hdferr) + + IMPLICIT NONE + + INTEGER(HID_T), INTENT(IN) :: plist_id + INTEGER, INTENT(IN) :: selection_io_mode + INTEGER, INTENT(OUT) :: hdferr + + INTERFACE + INTEGER(C_INT) FUNCTION H5Pset_selection_io(plist_id, selection_io_mode) BIND(C, NAME='H5Pset_selection_io') + IMPORT :: HID_T, C_INT + IMPLICIT NONE + INTEGER(HID_T), VALUE :: plist_id + INTEGER(C_INT), VALUE :: selection_io_mode + END FUNCTION H5Pset_selection_io + END INTERFACE + + hdferr = INT(H5Pset_selection_io(plist_id, INT(selection_io_mode, C_INT))) + + END SUBROUTINE h5pset_selection_io_f + +!> +!! \ingroup FH5P +!! +!! \brief Retrieves the selection I/O mode +!! +!! \param plist_id \fortran_plist_id +!! \param selection_io_mode The selection I/O mode +!! \param hdferr \fortran_error +!! +!! See C API: @ref H5Pget_selection_io() +!! + SUBROUTINE h5pget_selection_io_f(plist_id, selection_io_mode, hdferr) + + IMPLICIT NONE + + INTEGER(HID_T), INTENT(IN) :: plist_id + INTEGER, INTENT(OUT) :: selection_io_mode + INTEGER, INTENT(OUT) :: hdferr + + INTEGER(C_INT) :: c_selection_io_mode + + INTERFACE + INTEGER(C_INT) FUNCTION H5Pget_selection_io(plist_id, selection_io_mode) BIND(C, NAME='H5Pget_selection_io') + IMPORT :: HID_T, C_INT + IMPLICIT NONE + INTEGER(HID_T), VALUE :: plist_id + INTEGER(C_INT) :: selection_io_mode + END FUNCTION H5Pget_selection_io + END INTERFACE + + hdferr = INT(H5Pget_selection_io(plist_id, c_selection_io_mode)) + selection_io_mode = INT(c_selection_io_mode) + + END SUBROUTINE h5pget_selection_io_f + +!> +!! \ingroup FH5P +!! +!! \brief Allows the library to modify the contents of the write buffer +!! +!! \param plist_id \fortran_plist_id +!! \param modify_write_buf Whether the library can modify the contents of the write buffer +!! \param hdferr \fortran_error +!! +!! See C API: @ref H5Pset_modify_write_buf() +!! + SUBROUTINE h5pset_modify_write_buf_f(plist_id, modify_write_buf, hdferr) + + IMPLICIT NONE + + INTEGER(HID_T), INTENT(IN) :: plist_id + LOGICAL, INTENT(IN) :: modify_write_buf + INTEGER, INTENT(OUT) :: hdferr + + INTERFACE + INTEGER(C_INT) FUNCTION H5Pset_modify_write_buf(plist_id, modify_write_buf) BIND(C, NAME='H5Pset_modify_write_buf') + IMPORT :: HID_T, C_INT, C_BOOL + IMPLICIT NONE + INTEGER(HID_T), VALUE :: plist_id + LOGICAL(C_BOOL), VALUE :: modify_write_buf + END FUNCTION H5Pset_modify_write_buf + END INTERFACE + + hdferr = INT(H5Pset_modify_write_buf(plist_id, LOGICAL(modify_write_buf, C_BOOL))) + + END SUBROUTINE h5pset_modify_write_buf_f + +!> +!! \ingroup FH5P +!! +!! \brief Retrieves the "modify write buffer" property +!! +!! \param plist_id \fortran_plist_id +!! \param modify_write_buf Whether the library can modify the contents of the write buffer +!! \param hdferr \fortran_error +!! +!! See C API: @ref H5Pget_modify_write_buf() +!! + SUBROUTINE h5pget_modify_write_buf_f(plist_id, modify_write_buf, hdferr) + + IMPLICIT NONE + + INTEGER(HID_T), INTENT(IN) :: plist_id + LOGICAL, INTENT(OUT) :: modify_write_buf + INTEGER, INTENT(OUT) :: hdferr + + LOGICAL(C_BOOL) :: c_modify_write_buf + + INTERFACE + INTEGER(C_INT) FUNCTION H5Pget_modify_write_buf(plist_id, modify_write_buf) BIND(C, NAME='H5Pget_modify_write_buf') + IMPORT :: HID_T, C_INT, C_BOOL + IMPLICIT NONE + INTEGER(HID_T), VALUE :: plist_id + LOGICAL(C_BOOL) :: modify_write_buf + END FUNCTION H5Pget_modify_write_buf + END INTERFACE + + hdferr = INT(H5Pget_modify_write_buf(plist_id, c_modify_write_buf)) + modify_write_buf = LOGICAL(c_modify_write_buf) + + END SUBROUTINE h5pget_modify_write_buf_f + !> !! \ingroup FH5P !! diff --git a/fortran/src/H5_f.c b/fortran/src/H5_f.c index 3e1b65da3c7..a9f2d96523c 100644 --- a/fortran/src/H5_f.c +++ b/fortran/src/H5_f.c @@ -467,6 +467,9 @@ h5init_flags_c(int_f *h5d_flags, size_t_f *h5d_size_flags, int_f *h5e_flags, hid h5d_flags[26] = (int_f)H5D_VDS_FIRST_MISSING; h5d_flags[27] = (int_f)H5D_VDS_LAST_AVAILABLE; h5d_flags[28] = (int_f)H5D_VIRTUAL; + h5d_flags[29] = (int_f)H5D_SELECTION_IO_MODE_DEFAULT; + h5d_flags[30] = (int_f)H5D_SELECTION_IO_MODE_OFF; + h5d_flags[31] = (int_f)H5D_SELECTION_IO_MODE_ON; /* * H5E flags diff --git a/fortran/src/H5_ff.F90 b/fortran/src/H5_ff.F90 index f952cac6b70..651c96dd867 100644 --- a/fortran/src/H5_ff.F90 +++ b/fortran/src/H5_ff.F90 @@ -77,7 +77,7 @@ MODULE H5LIB ! ! H5D flags declaration ! - INTEGER, PARAMETER :: H5D_FLAGS_LEN = 29 + INTEGER, PARAMETER :: H5D_FLAGS_LEN = 32 INTEGER, DIMENSION(1:H5D_FLAGS_LEN) :: H5D_flags INTEGER, PARAMETER :: H5D_SIZE_FLAGS_LEN = 2 INTEGER(SIZE_T), DIMENSION(1:H5D_SIZE_FLAGS_LEN) :: H5D_size_flags @@ -394,35 +394,38 @@ END FUNCTION h5init1_flags_c ! ! H5D flags ! - H5D_COMPACT_F = H5D_flags(1) - H5D_CONTIGUOUS_F = H5D_flags(2) - H5D_CHUNKED_F = H5D_flags(3) - H5D_ALLOC_TIME_ERROR_F = H5D_flags(4) - H5D_ALLOC_TIME_DEFAULT_F = H5D_flags(5) - H5D_ALLOC_TIME_EARLY_F = H5D_flags(6) - H5D_ALLOC_TIME_LATE_F = H5D_flags(7) - H5D_ALLOC_TIME_INCR_F = H5D_flags(8) - H5D_SPACE_STS_ERROR_F = H5D_flags(9) - H5D_SPACE_STS_NOT_ALLOCATED_F = H5D_flags(10) - H5D_SPACE_STS_PART_ALLOCATED_F = H5D_flags(11) - H5D_SPACE_STS_ALLOCATED_F = H5D_flags(12) - H5D_FILL_TIME_ERROR_F = H5D_flags(13) - H5D_FILL_TIME_ALLOC_F = H5D_flags(14) - H5D_FILL_TIME_NEVER_F = H5D_flags(15) - H5D_FILL_VALUE_ERROR_F = H5D_flags(16) - H5D_FILL_VALUE_UNDEFINED_F = H5D_flags(17) - H5D_FILL_VALUE_DEFAULT_F = H5D_flags(18) - H5D_FILL_VALUE_USER_DEFINED_F = H5D_flags(19) - H5D_CHUNK_CACHE_W0_DFLT_F = H5D_flags(20) - H5D_MPIO_NO_COLLECTIVE_F = H5D_flags(21) - H5D_MPIO_CHUNK_INDEPENDENT_F = H5D_flags(22) - H5D_MPIO_CHUNK_COLLECTIVE_F = H5D_flags(23) - H5D_MPIO_CHUNK_MIXED_F = H5D_flags(24) - H5D_MPIO_CONTIG_COLLECTIVE_F = H5D_flags(25) - H5D_VDS_ERROR_F = H5D_flags(26) - H5D_VDS_FIRST_MISSING_F = H5D_flags(27) - H5D_VDS_LAST_AVAILABLE_F = H5D_flags(28) - H5D_VIRTUAL_F = H5D_flags(29) + H5D_COMPACT_F = H5D_flags(1) + H5D_CONTIGUOUS_F = H5D_flags(2) + H5D_CHUNKED_F = H5D_flags(3) + H5D_ALLOC_TIME_ERROR_F = H5D_flags(4) + H5D_ALLOC_TIME_DEFAULT_F = H5D_flags(5) + H5D_ALLOC_TIME_EARLY_F = H5D_flags(6) + H5D_ALLOC_TIME_LATE_F = H5D_flags(7) + H5D_ALLOC_TIME_INCR_F = H5D_flags(8) + H5D_SPACE_STS_ERROR_F = H5D_flags(9) + H5D_SPACE_STS_NOT_ALLOCATED_F = H5D_flags(10) + H5D_SPACE_STS_PART_ALLOCATED_F = H5D_flags(11) + H5D_SPACE_STS_ALLOCATED_F = H5D_flags(12) + H5D_FILL_TIME_ERROR_F = H5D_flags(13) + H5D_FILL_TIME_ALLOC_F = H5D_flags(14) + H5D_FILL_TIME_NEVER_F = H5D_flags(15) + H5D_FILL_VALUE_ERROR_F = H5D_flags(16) + H5D_FILL_VALUE_UNDEFINED_F = H5D_flags(17) + H5D_FILL_VALUE_DEFAULT_F = H5D_flags(18) + H5D_FILL_VALUE_USER_DEFINED_F = H5D_flags(19) + H5D_CHUNK_CACHE_W0_DFLT_F = H5D_flags(20) + H5D_MPIO_NO_COLLECTIVE_F = H5D_flags(21) + H5D_MPIO_CHUNK_INDEPENDENT_F = H5D_flags(22) + H5D_MPIO_CHUNK_COLLECTIVE_F = H5D_flags(23) + H5D_MPIO_CHUNK_MIXED_F = H5D_flags(24) + H5D_MPIO_CONTIG_COLLECTIVE_F = H5D_flags(25) + H5D_VDS_ERROR_F = H5D_flags(26) + H5D_VDS_FIRST_MISSING_F = H5D_flags(27) + H5D_VDS_LAST_AVAILABLE_F = H5D_flags(28) + H5D_VIRTUAL_F = H5D_flags(29) + H5D_SELECTION_IO_MODE_DEFAULT_F = H5D_flags(30) + H5D_SELECTION_IO_MODE_OFF_F = H5D_flags(31) + H5D_SELECTION_IO_MODE_ON_F = H5D_flags(32) H5D_CHUNK_CACHE_NSLOTS_DFLT_F = H5D_size_flags(1) H5D_CHUNK_CACHE_NBYTES_DFLT_F = H5D_size_flags(2) diff --git a/fortran/src/H5f90global.F90 b/fortran/src/H5f90global.F90 index 984cae91467..c37e22d0082 100644 --- a/fortran/src/H5f90global.F90 +++ b/fortran/src/H5f90global.F90 @@ -330,6 +330,9 @@ MODULE H5GLOBAL !DEC$ATTRIBUTES DLLEXPORT :: H5D_VDS_FIRST_MISSING_F !DEC$ATTRIBUTES DLLEXPORT :: H5D_VDS_LAST_AVAILABLE_F !DEC$ATTRIBUTES DLLEXPORT :: H5D_VIRTUAL_F + !DEC$ATTRIBUTES DLLEXPORT :: H5D_SELECTION_IO_MODE_DEFAULT_F + !DEC$ATTRIBUTES DLLEXPORT :: H5D_SELECTION_IO_MODE_OFF_F + !DEC$ATTRIBUTES DLLEXPORT :: H5D_SELECTION_IO_MODE_ON_F !DEC$endif !> \addtogroup FH5D !> @{ @@ -375,6 +378,9 @@ MODULE H5GLOBAL INTEGER :: H5D_VDS_FIRST_MISSING_F !< H5D_VDS_FIRST_MISSING INTEGER :: H5D_VDS_LAST_AVAILABLE_F !< H5D_VDS_LAST_AVAILABLE INTEGER :: H5D_VIRTUAL_F !< H5D_VIRTUAL + INTEGER :: H5D_SELECTION_IO_MODE_DEFAULT_F !< H5D_SELECTION_IO_MODE_DEFAULT_F + INTEGER :: H5D_SELECTION_IO_MODE_OFF_F !< H5D_SELECTION_IO_MODE_OFF_F + INTEGER :: H5D_SELECTION_IO_MODE_ON_F !< H5D_SELECTION_IO_MODE_ON_F ! ! H5E flags declaration ! diff --git a/fortran/src/hdf5_fortrandll.def.in b/fortran/src/hdf5_fortrandll.def.in index e55be46370b..47196336a36 100644 --- a/fortran/src/hdf5_fortrandll.def.in +++ b/fortran/src/hdf5_fortrandll.def.in @@ -391,6 +391,10 @@ H5P_mp_H5PSET_VOL_F H5P_mp_H5PGET_VOL_ID_F H5P_mp_H5PSET_FILE_LOCKING_F H5P_mp_H5PGET_FILE_LOCKING_F +H5P_mp_H5PSET_SELECTION_IO_F +H5P_mp_H5PGET_SELECTION_IO_F +H5P_mp_H5PSET_MODIFY_WRITE_BUF_F +H5P_mp_H5PGET_MODIFY_WRITE_BUF_F ; Parallel @H5_NOPAREXP@H5P_mp_H5PSET_FAPL_MPIO_F @H5_NOPAREXP@H5P_mp_H5PGET_FAPL_MPIO_F diff --git a/fortran/test/H5_test_buildiface.F90 b/fortran/test/H5_test_buildiface.F90 index 0ea38526997..ca945db621a 100644 --- a/fortran/test/H5_test_buildiface.F90 +++ b/fortran/test/H5_test_buildiface.F90 @@ -133,14 +133,25 @@ PROGRAM H5_test_buildiface WRITE(11,'(A)') '!DEC$endif' ! Subroutine API - WRITE(11,'(A)') ' SUBROUTINE verify_integer_kind_'//TRIM(ADJUSTL(chr2))//'(string,value,correct_value,total_error)' + WRITE(11,'(A)') ' SUBROUTINE verify_integer_kind_'//TRIM(ADJUSTL(chr2))//'(string,value,correct_value,total_error,chck_eq)' WRITE(11,'(A)') ' IMPLICIT NONE' WRITE(11,'(A)') ' CHARACTER(LEN=*) :: string' WRITE(11,'(A)') ' INTEGER(KIND='//TRIM(ADJUSTL(chr2))//') :: value, correct_value' WRITE(11,'(A)') ' INTEGER :: total_error' - WRITE(11,'(A)') ' IF (value .NE. correct_value) THEN' - WRITE(11,'(A)') ' total_error=total_error+1' - WRITE(11,'(A)') ' WRITE(*,*) "ERROR: INCORRECT INTEGER VALIDATION ", string' + WRITE(11,'(A)') ' LOGICAL, OPTIONAL :: chck_eq' + WRITE(11,'(A)') ' LOGICAL :: chck_eq_opt' + WRITE(11,'(A)') ' chck_eq_opt = .TRUE.' + WRITE(11,'(A)') ' IF(PRESENT(chck_eq)) chck_eq_opt = chck_eq' + WRITE(11,'(A)') ' IF(chck_eq_opt .EQV. .TRUE.)THEN' + WRITE(11,'(A)') ' IF (value .NE. correct_value) THEN' + WRITE(11,'(A)') ' total_error=total_error+1' + WRITE(11,'(A)') ' WRITE(*,*) "ERROR: INCORRECT INTEGER VALIDATION ", string' + WRITE(11,'(A)') ' ENDIF' + WRITE(11,'(A)') ' ELSE' + WRITE(11,'(A)') ' IF (value .EQ. correct_value) THEN' + WRITE(11,'(A)') ' total_error=total_error+1' + WRITE(11,'(A)') ' WRITE(*,*) "ERROR: INCORRECT INTEGER VALIDATION ", string' + WRITE(11,'(A)') ' ENDIF' WRITE(11,'(A)') ' ENDIF' WRITE(11,'(A)') ' END SUBROUTINE verify_integer_kind_'//TRIM(ADJUSTL(chr2)) ENDDO @@ -157,14 +168,25 @@ PROGRAM H5_test_buildiface WRITE(11,'(A)') '!DEC$endif' ! Subroutine API - WRITE(11,'(A)') ' SUBROUTINE verify_real_kind_'//TRIM(ADJUSTL(chr2))//'(string,value,correct_value,total_error)' + WRITE(11,'(A)') ' SUBROUTINE verify_real_kind_'//TRIM(ADJUSTL(chr2))//'(string,value,correct_value,total_error,chck_eq)' WRITE(11,'(A)') ' IMPLICIT NONE' WRITE(11,'(A)') ' CHARACTER(LEN=*) :: string' WRITE(11,'(A)') ' REAL(KIND='//TRIM(ADJUSTL(chr2))//') :: value, correct_value' WRITE(11,'(A)') ' INTEGER :: total_error' - WRITE(11,'(A)') ' IF (.NOT.real_eq_kind_'//TRIM(ADJUSTL(chr2))//'( value, correct_value) ) THEN' - WRITE(11,'(A)') ' total_error=total_error+1' - WRITE(11,'(A)') ' WRITE(*,*) "ERROR: INCORRECT REAL VALIDATION ", string' + WRITE(11,'(A)') ' LOGICAL, OPTIONAL :: chck_eq' + WRITE(11,'(A)') ' LOGICAL :: chck_eq_opt' + WRITE(11,'(A)') ' chck_eq_opt = .TRUE.' + WRITE(11,'(A)') ' IF(PRESENT(chck_eq)) chck_eq_opt = chck_eq' + WRITE(11,'(A)') ' IF(chck_eq_opt .EQV. .TRUE.)THEN' + WRITE(11,'(A)') ' IF (.NOT.real_eq_kind_'//TRIM(ADJUSTL(chr2))//'( value, correct_value) ) THEN' + WRITE(11,'(A)') ' total_error=total_error+1' + WRITE(11,'(A)') ' WRITE(*,*) "ERROR: INCORRECT REAL VALIDATION ", string' + WRITE(11,'(A)') ' ENDIF' + WRITE(11,'(A)') ' ELSE' + WRITE(11,'(A)') ' IF (real_eq_kind_'//TRIM(ADJUSTL(chr2))//'( value, correct_value) ) THEN' + WRITE(11,'(A)') ' total_error=total_error+1' + WRITE(11,'(A)') ' WRITE(*,*) "ERROR: INCORRECT REAL VALIDATION ", string' + WRITE(11,'(A)') ' ENDIF' WRITE(11,'(A)') ' ENDIF' WRITE(11,'(A)') ' END SUBROUTINE verify_real_kind_'//TRIM(ADJUSTL(chr2)) @@ -228,14 +250,25 @@ PROGRAM H5_test_buildiface WRITE(11,'(A)') '!DEC$endif' ! Subroutine API - WRITE(11,'(A)') ' SUBROUTINE verify_character(string,value,correct_value,total_error)' + WRITE(11,'(A)') ' SUBROUTINE verify_character(string,value,correct_value,total_error,chck_eq)' WRITE(11,'(A)') ' IMPLICIT NONE' WRITE(11,'(A)') ' CHARACTER*(*) :: string' WRITE(11,'(A)') ' CHARACTER*(*) :: value, correct_value' WRITE(11,'(A)') ' INTEGER :: total_error' - WRITE(11,'(A)') ' IF (TRIM(value) .NE. TRIM(correct_value)) THEN' - WRITE(11,'(A)') ' total_error = total_error + 1' - WRITE(11,'(A)') ' WRITE(*,*) "ERROR: INCORRECT VALIDATION ", string' + WRITE(11,'(A)') ' LOGICAL, OPTIONAL :: chck_eq' + WRITE(11,'(A)') ' LOGICAL :: chck_eq_opt' + WRITE(11,'(A)') ' chck_eq_opt = .TRUE.' + WRITE(11,'(A)') ' IF(PRESENT(chck_eq)) chck_eq_opt = chck_eq' + WRITE(11,'(A)') ' IF(chck_eq_opt .EQV. .TRUE.)THEN' + WRITE(11,'(A)') ' IF (TRIM(value) .NE. TRIM(correct_value)) THEN' + WRITE(11,'(A)') ' total_error = total_error + 1' + WRITE(11,'(A)') ' WRITE(*,*) "ERROR: INCORRECT VALIDATION ", string' + WRITE(11,'(A)') ' ENDIF' + WRITE(11,'(A)') ' ELSE' + WRITE(11,'(A)') ' IF (TRIM(value) .EQ. TRIM(correct_value)) THEN' + WRITE(11,'(A)') ' total_error = total_error + 1' + WRITE(11,'(A)') ' WRITE(*,*) "ERROR: INCORRECT VALIDATION ", string' + WRITE(11,'(A)') ' ENDIF' WRITE(11,'(A)') ' ENDIF' WRITE(11,'(A)') ' END SUBROUTINE verify_character' @@ -248,16 +281,26 @@ PROGRAM H5_test_buildiface WRITE(11,'(A)') '!DEC$attributes dllexport :: verify_logical' WRITE(11,'(A)') '!DEC$endif' ! Subroutine API - WRITE(11,'(A)') ' SUBROUTINE verify_logical(string,value,correct_value,total_error)' + WRITE(11,'(A)') ' SUBROUTINE verify_logical(string,value,correct_value,total_error,chck_eq)' WRITE(11,'(A)') ' IMPLICIT NONE' WRITE(11,'(A)') ' CHARACTER(LEN=*) :: string' WRITE(11,'(A)') ' LOGICAL :: value, correct_value' WRITE(11,'(A)') ' INTEGER :: total_error' - WRITE(11,'(A)') ' IF (value .NEQV. correct_value) THEN' - WRITE(11,'(A)') ' total_error = total_error + 1' - WRITE(11,'(A)') ' WRITE(*,*) "ERROR: INCORRECT VALIDATION ", string' + WRITE(11,'(A)') ' LOGICAL, OPTIONAL :: chck_eq' + WRITE(11,'(A)') ' LOGICAL :: chck_eq_opt' + WRITE(11,'(A)') ' chck_eq_opt = .TRUE.' + WRITE(11,'(A)') ' IF(PRESENT(chck_eq)) chck_eq_opt = chck_eq' + WRITE(11,'(A)') ' IF(chck_eq_opt .EQV. .TRUE.)THEN' + WRITE(11,'(A)') ' IF (value .NEQV. correct_value) THEN' + WRITE(11,'(A)') ' total_error = total_error + 1' + WRITE(11,'(A)') ' WRITE(*,*) "ERROR: INCORRECT VALIDATION ", string' + WRITE(11,'(A)') ' ENDIF' + WRITE(11,'(A)') ' ELSE' + WRITE(11,'(A)') ' IF (value .EQV. correct_value) THEN' + WRITE(11,'(A)') ' total_error = total_error + 1' + WRITE(11,'(A)') ' WRITE(*,*) "ERROR: INCORRECT VALIDATION ", string' + WRITE(11,'(A)') ' ENDIF' WRITE(11,'(A)') ' ENDIF' - WRITE(11,'(A)') ' END SUBROUTINE verify_logical' WRITE(11,'(A)') "END MODULE TH5_MISC_gen" diff --git a/fortran/test/fortranlib_test.F90 b/fortran/test/fortranlib_test.F90 index d7fca7dbb7c..ec0dcece578 100644 --- a/fortran/test/fortranlib_test.F90 +++ b/fortran/test/fortranlib_test.F90 @@ -193,7 +193,7 @@ PROGRAM fortranlibtest ret_total_error = 0 CALL external_test(cleanup, ret_total_error) - CALL write_test_status(ret_total_error, ' External dataset test', total_error) + CALL write_test_status(ret_total_error, ' External dataset and Selection IO test', total_error) ret_total_error = 0 CALL multi_file_test(cleanup, ret_total_error) @@ -207,6 +207,10 @@ PROGRAM fortranlibtest CALL test_misc_properties(cleanup, ret_total_error) CALL write_test_status(ret_total_error, ' Miscellaneous properties', total_error) + ret_total_error = 0 + CALL test_in_place_conversion(cleanup, ret_total_error) + CALL write_test_status(ret_total_error, ' Test in-place conversion', total_error) + ! ! '=========================================' ! 'Testing ATTRIBUTE interface ' diff --git a/fortran/test/tH5P.F90 b/fortran/test/tH5P.F90 index 3db5b288661..37ecdac8deb 100644 --- a/fortran/test/tH5P.F90 +++ b/fortran/test/tH5P.F90 @@ -34,8 +34,8 @@ SUBROUTINE external_test(cleanup, total_error) ! This subroutine tests following functionalities: ! h5pset_external_f, h5pget_external_count_f, -! h5pget_external_f - +! h5pget_external_f, h5pget_selection_io_f +! h5pSet_selection_io_f IMPLICIT NONE LOGICAL, INTENT(IN) :: cleanup @@ -62,6 +62,7 @@ SUBROUTINE external_test(cleanup, total_error) INTEGER(SIZE_T) :: namesize INTEGER(HSIZE_T) :: size, buf_size INTEGER :: idx + INTEGER :: selection_io_mode buf_size = 4*1024*1024 @@ -77,6 +78,44 @@ SUBROUTINE external_test(cleanup, total_error) CALL h5pcreate_f(H5P_DATASET_XFER_F, plist_id, error) CALL check("h5pcreate_f", error, total_error) + + ! Check default Selection IO state + CALL h5pget_selection_io_f(plist_id, selection_io_mode, error) + CALL check("h5pget_selection_io_f", error, total_error) + CALL VERIFY("h5pget_selection_io_f", selection_io_mode, H5D_SELECTION_IO_MODE_DEFAULT_F, total_error) + + ! Turn off Section IO + CALL h5pset_selection_io_f(plist_id, H5D_SELECTION_IO_MODE_OFF_F, error) + CALL check("h5pset_selection_io_f", error, total_error) + + CALL h5pget_selection_io_f(plist_id, selection_io_mode, error) + CALL check("h5pget_selection_io_f", error, total_error) + CALL VERIFY("h5pget_selection_io_f", selection_io_mode, H5D_SELECTION_IO_MODE_OFF_F, total_error) + + ! Turn on Section IO + CALL h5pset_selection_io_f(plist_id, H5D_SELECTION_IO_MODE_ON_F, error) + CALL check("h5pset_selection_io_f", error, total_error) + + CALL h5pget_selection_io_f(plist_id, selection_io_mode, error) + CALL check("h5pget_selection_io_f", error, total_error) + CALL VERIFY("h5pget_selection_io_f", selection_io_mode, H5D_SELECTION_IO_MODE_ON_F, total_error) + + ! Turn off Section IO + CALL h5pset_selection_io_f(plist_id, H5D_SELECTION_IO_MODE_OFF_F, error) + CALL check("h5pset_selection_io_f", error, total_error) + + CALL h5pget_selection_io_f(plist_id, selection_io_mode, error) + CALL check("h5pget_selection_io_f", error, total_error) + CALL VERIFY("h5pget_selection_io_f", selection_io_mode, H5D_SELECTION_IO_MODE_OFF_F, total_error) + + ! Change back to the default + CALL h5pset_selection_io_f(plist_id, H5D_SELECTION_IO_MODE_DEFAULT_F, error) + CALL check("h5pset_selection_io_f", error, total_error) + + CALL h5pget_selection_io_f(plist_id, selection_io_mode, error) + CALL check("h5pget_selection_io_f", error, total_error) + CALL VERIFY("h5pget_selection_io_f", selection_io_mode, H5D_SELECTION_IO_MODE_DEFAULT_F, total_error) + CALL h5pset_buffer_f(plist_id, buf_size, error) CALL check("h5pset_buffer_f", error, total_error) CALL h5pget_buffer_f(plist_id, size, error) @@ -796,4 +835,125 @@ SUBROUTINE test_misc_properties(cleanup, total_error) END SUBROUTINE test_misc_properties +!------------------------------------------------------------------------- +! Function: test_in_place_conversion +! +! Purpose: single dataset reader/write, smaller mem type, no background buffer +! -- create dataset with H5T_NATIVE_DOUBLE +! -- write dataset with H5T_NATIVE_REAL +! -- read dataset with H5T_NATIVE_REAL +! +! Tests APIs: +! h5pset_modify_write_buf_f, h5pget_modify_write_buf_f +! +! Return: Success: 0 +! Failure: >0 +! +!------------------------------------------------------------------------- +! +SUBROUTINE test_in_place_conversion(cleanup, total_error) + + IMPLICIT NONE + LOGICAL, INTENT(IN) :: cleanup + INTEGER, INTENT(INOUT) :: total_error + + CHARACTER(LEN=12), PARAMETER :: filename = "inplace_conv" + CHARACTER(LEN=80) :: fix_filename + CHARACTER(LEN=4), PARAMETER :: dsetname = "dset" + + INTEGER(HID_T) :: file_id ! File identifier + INTEGER(HID_T) :: dset_id ! Dataset identifier + INTEGER(HID_T) :: dspace_id ! Dataspace identifier + INTEGER(HID_T) :: plist_id + LOGICAL :: modify_write_buf + INTEGER :: error !error code + + INTEGER, PARAMETER :: array_len = 10 + INTEGER(HSIZE_T), DIMENSION(1) :: dims = (/array_len/) ! Dataset dimensions + INTEGER :: rank = 1 ! Dataset rank + + REAL(KIND=Fortran_DOUBLE), DIMENSION(1:array_len), TARGET :: wbuf_d + REAL(KIND=Fortran_DOUBLE), DIMENSION(1:array_len) :: wbuf_d_org + REAL(KIND=Fortran_REAL) , DIMENSION(1:array_len), TARGET :: rbuf + INTEGER :: i + TYPE(C_PTR) :: f_ptr + + ! create the data + DO i = 1, array_len + wbuf_d(i) = 1_Fortran_DOUBLE + 0.123456789123456_Fortran_DOUBLE + wbuf_d_org(i) = wbuf_d(i) + ENDDO + + ! + !Create file "inplace_conv.h5" using default properties. + ! + CALL h5_fixname_f(filename, fix_filename, H5P_DEFAULT_F, error) + IF (error .NE. 0) STOP "Cannot modify filename" + + CALL h5fcreate_f(fix_filename, H5F_ACC_TRUNC_F, file_id, error) + CALL check("h5fcreate_f",error,total_error) + ! + ! Create the dataspace. + ! + CALL h5screate_simple_f(rank, dims, dspace_id, error) + CALL check("h5screate_simple_f", error, total_error) + + ! Create dataset transfer property list + CALL h5pcreate_f(H5P_DATASET_XFER_F, plist_id, error) + CALL check("h5pcreate_f", error, total_error) + + CALL h5pset_selection_io_f(plist_id, H5D_SELECTION_IO_MODE_ON_F, error) + CALL check("h5pset_selection_io_f", error, total_error) + + CALL h5pget_modify_write_buf_f(plist_id, modify_write_buf, error) + CALL check("h5pget_modify_write_buf_f", error, total_error) + CALL VERIFY("h5pget_modify_write_buf_f", modify_write_buf, .FALSE., total_error) + + ! Set to modify the write buffer + CALL h5pset_modify_write_buf_f(plist_id, .TRUE., error) + CALL check("h5pset_modify_write_buf_f", error, total_error) + + CALL h5pget_modify_write_buf_f(plist_id, modify_write_buf, error) + CALL check("h5pget_modify_write_buf_f", error, total_error) + CALL VERIFY("h5pget_modify_write_buf_f", modify_write_buf, .TRUE., total_error) + + CALL h5dcreate_f(file_id, dsetname, H5T_NATIVE_REAL, dspace_id, dset_id, error) + CALL check("h5dcreate_f", error, total_error) + + f_ptr = C_LOC(wbuf_d) + CALL h5dwrite_f(dset_id, H5T_NATIVE_DOUBLE, f_ptr, error, H5S_ALL_F, H5S_ALL_F, xfer_prp=plist_id) + CALL check("h5dwrite_f", error, total_error) + + ! Should not be equal for in-place buffer use + CALL VERIFY("h5dwrite_f -- in-place", wbuf_d(1), wbuf_d_org(1), total_error, .FALSE.) + + f_ptr = C_LOC(rbuf) + CALL h5dread_f(dset_id, H5T_NATIVE_REAL, f_ptr, error) + CALL check("h5dread_f", error, total_error) + + DO i = 1, array_len + CALL VERIFY("h5dwrite_f -- in-place", rbuf(i), REAL(wbuf_d_org(i), Fortran_REAL), total_error) + ENDDO + + ! + ! End access to the dataset and release resources used by it. + ! + CALL h5dclose_f(dset_id, error) + CALL check("h5dclose_f", error, total_error) + ! + ! Terminate access to the data space. + ! + CALL h5sclose_f(dspace_id, error) + CALL check("h5sclose_f", error, total_error) + + ! + ! Close the file. + ! + CALL h5fclose_f(file_id, error) + CALL check("h5fclose_f", error, total_error) + CALL h5pclose_f(plist_id, error) + CALL check("h5pclose_f", error, total_error) + +END SUBROUTINE test_in_place_conversion + END MODULE TH5P diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 89a821c3108..b3aa5b64471 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -123,9 +123,11 @@ New Features Fortran Library: ---------------- - - Added Fortran async APIs + - Fortran async APIs H5A, H5D, H5ES, H5G, H5F, H5L and H5O were added. - H5A, H5D, H5ES, H5G, H5F, H5L and H5O async APIs were added. + - Added Fortran APIs: + h5pset_selection_io_f, h5pget_selection_io_f + h5pset_modify_write_buf_f, h5pget_modify_write_buf_f C++ Library: ------------ diff --git a/testpar/CMakeLists.txt b/testpar/CMakeLists.txt index 3a44fca7c1b..fb66e76103e 100644 --- a/testpar/CMakeLists.txt +++ b/testpar/CMakeLists.txt @@ -8,6 +8,7 @@ project (HDF5_TEST_PAR C) set (testphdf5_SOURCES ${HDF5_TEST_PAR_SOURCE_DIR}/testphdf5.c ${HDF5_TEST_PAR_SOURCE_DIR}/t_dset.c + ${HDF5_TEST_PAR_SOURCE_DIR}/t_select_io_dset.c ${HDF5_TEST_PAR_SOURCE_DIR}/t_file.c ${HDF5_TEST_PAR_SOURCE_DIR}/t_file_image.c ${HDF5_TEST_PAR_SOURCE_DIR}/t_mdset.c From b4adcf83763fd3848594cd8fedb9e6a95310d10d Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Mon, 1 May 2023 16:24:14 -0700 Subject: [PATCH 189/231] Harden H5O cache deserialize calls (#2856) --- src/H5Ocache.c | 532 ++++++++++++++++++++++--------------------------- 1 file changed, 237 insertions(+), 295 deletions(-) diff --git a/src/H5Ocache.c b/src/H5Ocache.c index 42d8f3590e0..72261faa805 100644 --- a/src/H5Ocache.c +++ b/src/H5Ocache.c @@ -13,10 +13,8 @@ /*------------------------------------------------------------------------- * * Created: H5Ocache.c - * Sep 28 2005 - * Quincey Koziol * - * Purpose: Object header metadata cache virtual functions. + * Purpose: Object header metadata cache virtual functions * *------------------------------------------------------------------------- */ @@ -30,13 +28,13 @@ /***********/ /* Headers */ /***********/ -#include "H5private.h" /* Generic Functions */ -#include "H5Eprivate.h" /* Error handling */ -#include "H5FLprivate.h" /* Free lists */ -#include "H5MFprivate.h" /* File memory management */ -#include "H5MMprivate.h" /* Memory management */ -#include "H5Opkg.h" /* Object headers */ -#include "H5WBprivate.h" /* Wrapped Buffers */ +#include "H5private.h" /* Generic Functions */ +#include "H5Eprivate.h" /* Error handling */ +#include "H5FLprivate.h" /* Free lists */ +#include "H5MFprivate.h" /* File memory management */ +#include "H5MMprivate.h" /* Memory management */ +#include "H5Opkg.h" /* Object headers */ +#include "H5WBprivate.h" /* Wrapped Buffers */ /****************/ /* Local Macros */ @@ -74,7 +72,7 @@ static herr_t H5O__cache_chk_notify(H5AC_notify_action_t action, void *_thing); static herr_t H5O__cache_chk_free_icr(void *thing); /* Prefix routines */ -static herr_t H5O__prefix_deserialize(const uint8_t *image, H5O_cache_ud_t *udata); +static herr_t H5O__prefix_deserialize(const uint8_t *image, size_t len, H5O_cache_ud_t *udata); /* Chunk routines */ static herr_t H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t chunk_size, const uint8_t *image, @@ -144,15 +142,10 @@ H5FL_SEQ_DEFINE(H5O_cont_t); /*------------------------------------------------------------------------- * Function: H5O__cache_get_initial_load_size() * - * Purpose: Tell the metadata cache how much data to read from file in - * the first speculative read for the object header. - * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Programmer: John Mainzer - * 7/28/14 + * Purpose: Tell the metadata cache how much data to read from file in + * the first speculative read for the object header. * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t @@ -160,7 +153,6 @@ H5O__cache_get_initial_load_size(void H5_ATTR_UNUSED *_udata, size_t *image_len) { FUNC_ENTER_PACKAGE_NOERR - /* Check arguments */ HDassert(image_len); /* Set the image length size */ @@ -172,33 +164,26 @@ H5O__cache_get_initial_load_size(void H5_ATTR_UNUSED *_udata, size_t *image_len) /*------------------------------------------------------------------------- * Function: H5O__cache_get_final_load_size() * - * Purpose: Tell the metadata cache the final size of an object header. - * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Programmer: Quincey Koziol - * November 18, 2016 + * Purpose: Tell the metadata cache the final size of an object header. * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t -H5O__cache_get_final_load_size(const void *image, size_t H5_ATTR_NDEBUG_UNUSED image_len, void *_udata, - size_t *actual_len) +H5O__cache_get_final_load_size(const void *image, size_t image_len, void *_udata, size_t *actual_len) { H5O_cache_ud_t *udata = (H5O_cache_ud_t *)_udata; /* User data for callback */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - /* Check arguments */ HDassert(image); HDassert(udata); HDassert(actual_len); HDassert(*actual_len == image_len); /* Deserialize the object header prefix */ - if (H5O__prefix_deserialize((const uint8_t *)image, udata) < 0) + if (H5O__prefix_deserialize((const uint8_t *)image, image_len, udata) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTDECODE, FAIL, "can't deserialize object header prefix") /* Sanity check */ @@ -219,10 +204,6 @@ H5O__cache_get_final_load_size(const void *image, size_t H5_ATTR_NDEBUG_UNUSED i * * Return: Success: TRUE/FALSE * Failure: Negative - * - * Programmer: Vailin Choi - * Aug 2015 - * *------------------------------------------------------------------------- */ static htri_t @@ -230,11 +211,10 @@ H5O__cache_verify_chksum(const void *_image, size_t len, void *_udata) { const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */ H5O_cache_ud_t *udata = (H5O_cache_ud_t *)_udata; /* User data for callback */ - htri_t ret_value = TRUE; /* Return value */ + htri_t ret_value = TRUE; FUNC_ENTER_PACKAGE_NOERR - /* Check arguments */ HDassert(image); HDassert(udata); HDassert(udata->oh); @@ -257,8 +237,8 @@ H5O__cache_verify_chksum(const void *_image, size_t len, void *_udata) in H5O__prefix_deserialize() */ udata->free_oh = TRUE; ret_value = FALSE; - } /* end if */ - } /* end if */ + } + } else HDassert(!(udata->common.file_intent & H5F_ACC_SWMR_WRITE)); @@ -268,21 +248,18 @@ H5O__cache_verify_chksum(const void *_image, size_t len, void *_udata) /*------------------------------------------------------------------------- * Function: H5O__cache_deserialize * - * Purpose: Attempt to deserialize the object header contained in the - * supplied buffer, load the data into an instance of H5O_t, and - * return a pointer to the new instance. + * Purpose: Attempt to deserialize the object header contained in the + * supplied buffer, load the data into an instance of H5O_t, and + * return a pointer to the new instance. * - * Note that the object header is read with with a speculative read. - * If the initial read is too small, make note of this fact and return - * without error. H5C__load_entry() will note the size discrepancy - * and retry the deserialize operation with the correct size read. + * Note that the object header is read with with a speculative + * read. If the initial read is too small, make note of this fact + * and return without error. H5C__load_entry() will note the + * size discrepancy and retry the deserialize operation with + * the correct size read. * * Return: Success: Pointer to in core representation * Failure: NULL - * - * Programmer: John Mainzer - * 7/28/14 - * *------------------------------------------------------------------------- */ static void * @@ -290,11 +267,10 @@ H5O__cache_deserialize(const void *image, size_t len, void *_udata, hbool_t *dir { H5O_t *oh = NULL; /* Object header read in */ H5O_cache_ud_t *udata = (H5O_cache_ud_t *)_udata; /* User data for callback */ - void *ret_value = NULL; /* Return value */ + void *ret_value = NULL; FUNC_ENTER_PACKAGE - /* Check arguments */ HDassert(image); HDassert(len > 0); HDassert(udata); @@ -302,19 +278,18 @@ H5O__cache_deserialize(const void *image, size_t len, void *_udata, hbool_t *dir HDassert(udata->common.cont_msg_info); HDassert(dirty); - /* Check for partially deserialized object header */ - /* (Object header prefix will be deserialized if the object header came - * through the 'get_final_load_size' callback and not deserialized if - * the object header is coming from a cache image - QAK, 2016/12/14) + /* Check for partially deserialized object header + * + * The Object header prefix will be deserialized if the object header came + * through the 'get_final_load_size' callback and not deserialized if + * the object header is coming from a cache image. */ if (NULL == udata->oh) { /* Deserialize the object header prefix */ - if (H5O__prefix_deserialize((const uint8_t *)image, udata) < 0) + if (H5O__prefix_deserialize((const uint8_t *)image, len, udata) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTDECODE, NULL, "can't deserialize object header prefix") - - /* Sanity check */ HDassert(udata->oh); - } /* end if */ + } /* Retrieve partially deserialized object header from user data */ oh = udata->oh; @@ -327,7 +302,7 @@ H5O__cache_deserialize(const void *image, size_t len, void *_udata, hbool_t *dir /* Create virtual entry, for use as proxy */ if (NULL == (oh->proxy = H5AC_proxy_entry_create())) HGOTO_ERROR(H5E_OHDR, H5E_CANTCREATE, NULL, "can't create object header proxy") - } /* end if */ + } else oh->proxy = NULL; @@ -354,16 +329,11 @@ H5O__cache_deserialize(const void *image, size_t len, void *_udata, hbool_t *dir /*------------------------------------------------------------------------- * Function: H5O__cache_image_len * - * Purpose: Compute the size in bytes of the specified instance of - * H5O_t on disk, and return it in *image_len. On failure, - * the value of *image_len is undefined. - * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Programmer: John Mainzer - * 7/28/14 + * Purpose: Compute the size in bytes of the specified instance of + * H5O_t on disk, and return it in *image_len. On failure, + * the value of *image_len is undefined. * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t @@ -373,7 +343,6 @@ H5O__cache_image_len(const void *_thing, size_t *image_len) FUNC_ENTER_PACKAGE_NOERR - /* Check arguments */ HDassert(oh); HDassert(oh->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(oh->cache_info.type == H5AC_OHDR); @@ -388,15 +357,10 @@ H5O__cache_image_len(const void *_thing, size_t *image_len) /*------------------------------------------------------------------------- * Function: H5O__cache_serialize * - * Purpose: Serialize the contents of the supplied object header, and - * load this data into the supplied buffer. - * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Programmer: John Mainzer - * 7/28/14 + * Purpose: Serialize the contents of the supplied object header, and + * load this data into the supplied buffer. * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t @@ -404,11 +368,10 @@ H5O__cache_serialize(const H5F_t *f, void *image, size_t len, void *_thing) { H5O_t *oh = (H5O_t *)_thing; /* Object header to encode */ uint8_t *chunk_image; /* Pointer to object header prefix buffer */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - /* Check arguments */ HDassert(f); HDassert(image); HDassert(oh); @@ -451,13 +414,13 @@ H5O__cache_serialize(const H5F_t *f, void *image, size_t len, void *_thing) UINT32ENCODE(chunk_image, oh->mtime); UINT32ENCODE(chunk_image, oh->ctime); UINT32ENCODE(chunk_image, oh->btime); - } /* end if */ + } /* Attribute fields */ if (oh->flags & H5O_HDR_ATTR_STORE_PHASE_CHANGE) { UINT16ENCODE(chunk_image, oh->max_compact); UINT16ENCODE(chunk_image, oh->min_dense); - } /* end if */ + } /* First chunk size */ switch (oh->flags & H5O_HDR_CHUNK0_SIZE) { @@ -483,8 +446,8 @@ H5O__cache_serialize(const H5F_t *f, void *image, size_t len, void *_thing) default: HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "bad size for chunk 0") - } /* end switch */ - } /* end if */ + } + } else { /* Version */ *chunk_image++ = oh->version; @@ -509,7 +472,7 @@ H5O__cache_serialize(const H5F_t *f, void *image, size_t len, void *_thing) /* Zero to alignment */ HDmemset(chunk_image, 0, (size_t)(H5O_SIZEOF_HDR(oh) - 12)); chunk_image += (size_t)(H5O_SIZEOF_HDR(oh) - 12); - } /* end else */ + } HDassert((size_t)(chunk_image - oh->chunk[0].image) == (size_t)(H5O_SIZEOF_HDR(oh) - H5O_SIZEOF_CHKSUM_OH(oh))); @@ -533,24 +496,17 @@ H5O__cache_serialize(const H5F_t *f, void *image, size_t len, void *_thing) * * Purpose: Handle cache action notifications * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * Jul 23 2016 - * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t H5O__cache_notify(H5AC_notify_action_t action, void *_thing) { H5O_t *oh = (H5O_t *)_thing; - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - /* - * Check arguments. - */ HDassert(oh); switch (action) { @@ -563,12 +519,12 @@ H5O__cache_notify(H5AC_notify_action_t action, void *_thing) /* Register the object header as a parent of the virtual entry */ if (H5AC_proxy_entry_add_parent(oh->proxy, oh) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "can't add object header as parent of proxy") - } /* end if */ + } break; case H5AC_NOTIFY_ACTION_AFTER_FLUSH: case H5AC_NOTIFY_ACTION_ENTRY_DIRTIED: - /* do nothing */ + /* Do nothing */ break; case H5AC_NOTIFY_ACTION_ENTRY_CLEANED: { @@ -578,17 +534,17 @@ H5O__cache_notify(H5AC_notify_action_t action, void *_thing) for (u = 0; u < oh->nmesgs; u++) if (oh->mesg[u].chunkno == 0) oh->mesg[u].dirty = FALSE; -#ifndef NDEBUG +#ifdef H5O_DEBUG /* Reset the number of messages dirtied by decoding */ oh->ndecode_dirtied = 0; -#endif /* NDEBUG */ +#endif } break; case H5AC_NOTIFY_ACTION_CHILD_DIRTIED: case H5AC_NOTIFY_ACTION_CHILD_CLEANED: case H5AC_NOTIFY_ACTION_CHILD_UNSERIALIZED: case H5AC_NOTIFY_ACTION_CHILD_SERIALIZED: - /* do nothing */ + /* Do nothing */ break; case H5AC_NOTIFY_ACTION_BEFORE_EVICT: @@ -596,12 +552,12 @@ H5O__cache_notify(H5AC_notify_action_t action, void *_thing) /* Unregister the object header as a parent of the virtual entry */ if (H5AC_proxy_entry_remove_parent(oh->proxy, oh) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "can't remove object header as parent of proxy") - } /* end if */ + } break; default: HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "unknown action from metadata cache") - } /* end switch */ + } done: FUNC_LEAVE_NOAPI(ret_value) @@ -610,29 +566,23 @@ H5O__cache_notify(H5AC_notify_action_t action, void *_thing) /*------------------------------------------------------------------------- * Function: H5O__cache_free_icr * - * Purpose: Free the in core representation of the supplied object header. - * - * Note: The metadata cache sets the object's cache_info.magic to - * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr - * callback (checked in assert). + * Purpose: Free the in core representation of the supplied object header. * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Programmer: John Mainzer - * 7/28/14 + * Note: The metadata cache sets the object's cache_info.magic to + * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr + * callback (checked in assert). * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t H5O__cache_free_icr(void *_thing) { H5O_t *oh = (H5O_t *)_thing; /* Object header to destroy */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - /* Check arguments */ HDassert(oh); HDassert(oh->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC); HDassert(oh->cache_info.type == H5AC_OHDR); @@ -648,16 +598,11 @@ H5O__cache_free_icr(void *_thing) /*------------------------------------------------------------------------- * Function: H5O__cache_chk_get_initial_load_size() * - * Purpose: Tell the metadata cache how large the on disk image of the - * chunk proxy is, so it can load the image into a buffer for the - * deserialize call. - * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Programmer: John Mainzer - * 7/28/14 + * Purpose: Tell the metadata cache how large the on disk image of the + * chunk proxy is, so it can load the image into a buffer for the + * deserialize call. * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t @@ -667,7 +612,6 @@ H5O__cache_chk_get_initial_load_size(void *_udata, size_t *image_len) FUNC_ENTER_PACKAGE_NOERR - /* Check arguments */ HDassert(udata); HDassert(udata->oh); HDassert(image_len); @@ -686,10 +630,6 @@ H5O__cache_chk_get_initial_load_size(void *_udata, size_t *image_len) * * Return: Success: TRUE/FALSE * Failure: Negative - * - * Programmer: Vailin Choi - * Aug 2015 - * *------------------------------------------------------------------------- */ static htri_t @@ -697,11 +637,10 @@ H5O__cache_chk_verify_chksum(const void *_image, size_t len, void *_udata) { const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */ H5O_chk_cache_ud_t *udata = (H5O_chk_cache_ud_t *)_udata; /* User data for callback */ - htri_t ret_value = TRUE; /* Return value */ + htri_t ret_value = TRUE; FUNC_ENTER_PACKAGE_NOERR - /* Check arguments */ HDassert(image); /* There is no checksum for version 1 */ @@ -714,7 +653,7 @@ H5O__cache_chk_verify_chksum(const void *_image, size_t len, void *_udata) if (stored_chksum != computed_chksum) ret_value = FALSE; - } /* end if */ + } FUNC_LEAVE_NOAPI(ret_value) } /* end H5O__cache_chk_verify_chksum() */ @@ -722,16 +661,12 @@ H5O__cache_chk_verify_chksum(const void *_image, size_t len, void *_udata) /*------------------------------------------------------------------------- * Function: H5O__cache_chk_deserialize * - * Purpose: Attempt to deserialize the object header continuation chunk - * contained in the supplied buffer, load the data into an instance - * of H5O_chunk_proxy_t, and return a pointer to the new instance. + * Purpose: Attempt to deserialize the object header continuation chunk + * contained in the supplied buffer, load the data into an instance + * of H5O_chunk_proxy_t, and return a pointer to the new instance. * * Return: Success: Pointer to in core representation * Failure: NULL - * - * Programmer: John Mainzer - * 7/28/14 - * *------------------------------------------------------------------------- */ static void * @@ -739,11 +674,10 @@ H5O__cache_chk_deserialize(const void *image, size_t len, void *_udata, hbool_t { H5O_chunk_proxy_t *chk_proxy = NULL; /* Chunk proxy object */ H5O_chk_cache_ud_t *udata = (H5O_chk_cache_ud_t *)_udata; /* User data for callback */ - void *ret_value = NULL; /* Return value */ + void *ret_value = NULL; FUNC_ENTER_PACKAGE - /* Check arguments */ HDassert(image); HDassert(len > 0); HDassert(udata); @@ -757,7 +691,6 @@ H5O__cache_chk_deserialize(const void *image, size_t len, void *_udata, hbool_t /* Check if we are still decoding the object header */ /* (as opposed to bringing a piece of it back from the file) */ if (udata->decoding) { - /* Sanity check */ HDassert(udata->common.f); HDassert(udata->common.cont_msg_info); @@ -768,7 +701,7 @@ H5O__cache_chk_deserialize(const void *image, size_t len, void *_udata, hbool_t /* Set the chunk number for the chunk proxy */ H5_CHECKED_ASSIGN(chk_proxy->chunkno, unsigned, udata->oh->nchunks - 1, size_t); - } /* end if */ + } else { /* Sanity check */ HDassert(udata->chunkno < udata->oh->nchunks); @@ -781,7 +714,7 @@ H5O__cache_chk_deserialize(const void *image, size_t len, void *_udata, hbool_t */ HDassert(0 == HDmemcmp(image, udata->oh->chunk[chk_proxy->chunkno].image, udata->oh->chunk[chk_proxy->chunkno].size)); - } /* end else */ + } /* Increment reference count of object header */ if (H5O__inc_rc(udata->oh) < 0) @@ -802,15 +735,10 @@ H5O__cache_chk_deserialize(const void *image, size_t len, void *_udata, hbool_t /*------------------------------------------------------------------------- * Function: H5O__cache_chk_image_len * - * Purpose: Return the on disk image size of a object header chunk to the - * metadata cache via the image_len. - * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Programmer: John Mainzer - * 7/28/14 + * Purpose: Return the on disk image size of a object header chunk to the + * metadata cache via the image_len. * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t @@ -820,7 +748,6 @@ H5O__cache_chk_image_len(const void *_thing, size_t *image_len) FUNC_ENTER_PACKAGE_NOERR - /* Check arguments */ HDassert(chk_proxy); HDassert(chk_proxy->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(chk_proxy->cache_info.type == H5AC_OHDR_CHK); @@ -835,28 +762,22 @@ H5O__cache_chk_image_len(const void *_thing, size_t *image_len) /*------------------------------------------------------------------------- * Function: H5O__cache_chk_serialize * - * Purpose: Given a pointer to an instance of an object header chunk and an - * appropriately sized buffer, serialize the contents of the - * instance for writing to disk, and copy the serialized data - * into the buffer. - * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Programmer: John Mainzer - * 7/28/14 + * Purpose: Given a pointer to an instance of an object header chunk and an + * appropriately sized buffer, serialize the contents of the + * instance for writing to disk, and copy the serialized data + * into the buffer. * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t H5O__cache_chk_serialize(const H5F_t *f, void *image, size_t len, void *_thing) { H5O_chunk_proxy_t *chk_proxy = (H5O_chunk_proxy_t *)_thing; /* Object header chunk to serialize */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - /* Check arguments */ HDassert(f); HDassert(image); HDassert(chk_proxy); @@ -883,24 +804,17 @@ H5O__cache_chk_serialize(const H5F_t *f, void *image, size_t len, void *_thing) * * Purpose: Handle cache action notifications * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Neil Fortner - * Mar 20 2012 - * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t H5O__cache_chk_notify(H5AC_notify_action_t action, void *_thing) { H5O_chunk_proxy_t *chk_proxy = (H5O_chunk_proxy_t *)_thing; - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - /* - * Check arguments. - */ HDassert(chk_proxy); HDassert(chk_proxy->oh); @@ -922,13 +836,13 @@ H5O__cache_chk_notify(H5AC_notify_action_t action, void *_thing) */ if (H5AC_create_flush_dependency(chk_proxy->fd_parent, chk_proxy) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTDEPEND, FAIL, "unable to create flush dependency") - } /* end if */ + } /* Add flush dependency on object header */ { if (H5AC_create_flush_dependency(chk_proxy->oh, chk_proxy) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTDEPEND, FAIL, "unable to create flush dependency") - } /* end if */ + } /* Add flush dependency on object header proxy, if proxy exists */ { @@ -940,12 +854,12 @@ H5O__cache_chk_notify(H5AC_notify_action_t action, void *_thing) HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "can't add object header chunk as parent of proxy") } - } /* end if */ + } break; case H5AC_NOTIFY_ACTION_AFTER_FLUSH: case H5AC_NOTIFY_ACTION_ENTRY_DIRTIED: - /* do nothing */ + /* Do nothing */ break; case H5AC_NOTIFY_ACTION_ENTRY_CLEANED: { @@ -961,7 +875,7 @@ H5O__cache_chk_notify(H5AC_notify_action_t action, void *_thing) case H5AC_NOTIFY_ACTION_CHILD_CLEANED: case H5AC_NOTIFY_ACTION_CHILD_UNSERIALIZED: case H5AC_NOTIFY_ACTION_CHILD_SERIALIZED: - /* do nothing */ + /* Do nothing */ break; case H5AC_NOTIFY_ACTION_BEFORE_EVICT: @@ -978,7 +892,7 @@ H5O__cache_chk_notify(H5AC_notify_action_t action, void *_thing) if (H5AC_destroy_flush_dependency(chk_proxy->fd_parent, chk_proxy) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency") chk_proxy->fd_parent = NULL; - } /* end if */ + } /* Unregister the object header as a parent of the virtual entry */ if (H5AC_destroy_flush_dependency(chk_proxy->oh, chk_proxy) < 0) @@ -988,16 +902,12 @@ H5O__cache_chk_notify(H5AC_notify_action_t action, void *_thing) if (H5AC_proxy_entry_remove_parent(chk_proxy->oh->proxy, chk_proxy) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "can't remove object header chunk as parent of proxy") - } /* end if */ + } break; default: -#ifdef NDEBUG HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "unknown action from metadata cache") -#else /* NDEBUG */ - HDassert(0 && "Unknown action?!?"); -#endif /* NDEBUG */ - } /* end switch */ + } done: FUNC_LEAVE_NOAPI(ret_value) @@ -1006,30 +916,24 @@ H5O__cache_chk_notify(H5AC_notify_action_t action, void *_thing) /*------------------------------------------------------------------------- * Function: H5O__cache_chk_free_icr * - * Purpose: Free the in core memory associated with the supplied object - * header continuation chunk. - * - * Note: The metadata cache sets the object's cache_info.magic to - * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr - * callback (checked in assert). - * - * Return: Success: SUCCEED - * Failure: FAIL + * Purpose: Free the in core memory associated with the supplied object + * header continuation chunk. * - * Programmer: John Mainzer - * 7/28/14 + * Note: The metadata cache sets the object's cache_info.magic to + * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr + * callback (checked in assert). * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t H5O__cache_chk_free_icr(void *_thing) { H5O_chunk_proxy_t *chk_proxy = (H5O_chunk_proxy_t *)_thing; /* Object header chunk proxy to release */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - /* Check arguments */ HDassert(chk_proxy); HDassert(chk_proxy->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC); HDassert(chk_proxy->cache_info.type == H5AC_OHDR_CHK); @@ -1045,26 +949,20 @@ H5O__cache_chk_free_icr(void *_thing) /*------------------------------------------------------------------------- * Function: H5O__add_cont_msg * - * Purpose: Add information from a continuation message to the list of + * Purpose: Add information from a continuation message to the list of * continuation messages in the object header * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Programmer: Quincey Koziol - * July 12, 2008 - * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t H5O__add_cont_msg(H5O_cont_msgs_t *cont_msg_info, const H5O_cont_t *cont) { - size_t contno; /* Continuation message index */ - herr_t ret_value = SUCCEED; /* Return value */ + size_t contno; /* Continuation message index */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - /* Check arguments */ HDassert(cont_msg_info); HDassert(cont); @@ -1077,7 +975,7 @@ H5O__add_cont_msg(H5O_cont_msgs_t *cont_msg_info, const H5O_cont_t *cont) HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, FAIL, "memory allocation failed") cont_msg_info->alloc_nmsgs = na; cont_msg_info->msgs = x; - } /* end if */ + } /* Init the continuation message info */ contno = cont_msg_info->nmsgs++; @@ -1092,26 +990,21 @@ H5O__add_cont_msg(H5O_cont_msgs_t *cont_msg_info, const H5O_cont_t *cont) /*------------------------------------------------------------------------- * Function: H5O__prefix_deserialize() * - * Purpose: Deserialize an object header prefix - * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Programmer: Quincey Koziol - * December 14, 2016 + * Purpose: Deserialize an object header prefix * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t -H5O__prefix_deserialize(const uint8_t *_image, H5O_cache_ud_t *udata) +H5O__prefix_deserialize(const uint8_t *_image, size_t len, H5O_cache_ud_t *udata) { const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */ + const uint8_t *p_end = image + len - 1; /* End of image buffer */ H5O_t *oh = NULL; /* Object header read in */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - /* Check arguments */ HDassert(image); HDassert(udata); @@ -1125,16 +1018,23 @@ H5O__prefix_deserialize(const uint8_t *_image, H5O_cache_ud_t *udata) /* Check for presence of magic number */ /* (indicates version 2 or later) */ + if (H5_IS_BUFFER_OVERFLOW(image, H5_SIZEOF_MAGIC, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); if (!HDmemcmp(image, H5O_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC)) { - /* Magic number */ + + /* Magic number (bounds checked above) */ image += H5_SIZEOF_MAGIC; /* Version */ + if (H5_IS_BUFFER_OVERFLOW(image, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); oh->version = *image++; if (H5O_VERSION_2 != oh->version) HGOTO_ERROR(H5E_OHDR, H5E_VERSION, FAIL, "bad object header version number") /* Flags */ + if (H5_IS_BUFFER_OVERFLOW(image, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); oh->flags = *image++; if (oh->flags & ~H5O_HDR_ALL_FLAGS) HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "unknown object header status flag(s)") @@ -1144,7 +1044,10 @@ H5O__prefix_deserialize(const uint8_t *_image, H5O_cache_ud_t *udata) /* Time fields */ if (oh->flags & H5O_HDR_STORE_TIMES) { - uint32_t tmp; /* Temporary value */ + uint32_t tmp; + + if (H5_IS_BUFFER_OVERFLOW(image, 4 + 4 + 4 + 4, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT32DECODE(image, tmp); oh->atime = (time_t)tmp; @@ -1154,48 +1057,61 @@ H5O__prefix_deserialize(const uint8_t *_image, H5O_cache_ud_t *udata) oh->ctime = (time_t)tmp; UINT32DECODE(image, tmp); oh->btime = (time_t)tmp; - } /* end if */ + } else oh->atime = oh->mtime = oh->ctime = oh->btime = 0; /* Attribute fields */ if (oh->flags & H5O_HDR_ATTR_STORE_PHASE_CHANGE) { + if (H5_IS_BUFFER_OVERFLOW(image, 2 + 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); + UINT16DECODE(image, oh->max_compact); UINT16DECODE(image, oh->min_dense); if (oh->max_compact < oh->min_dense) HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "bad object header attribute phase change values") - } /* end if */ + } else { oh->max_compact = H5O_CRT_ATTR_MAX_COMPACT_DEF; oh->min_dense = H5O_CRT_ATTR_MIN_DENSE_DEF; - } /* end else */ + } /* First chunk size */ switch (oh->flags & H5O_HDR_CHUNK0_SIZE) { case 0: /* 1 byte size */ + if (H5_IS_BUFFER_OVERFLOW(image, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); udata->chunk0_size = *image++; break; case 1: /* 2 byte size */ + if (H5_IS_BUFFER_OVERFLOW(image, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT16DECODE(image, udata->chunk0_size); break; case 2: /* 4 byte size */ + if (H5_IS_BUFFER_OVERFLOW(image, 4, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT32DECODE(image, udata->chunk0_size); break; case 3: /* 8 byte size */ + if (H5_IS_BUFFER_OVERFLOW(image, 8, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT64DECODE(image, udata->chunk0_size); break; default: HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "bad size for chunk 0") - } /* end switch */ + } if (udata->chunk0_size > 0 && udata->chunk0_size < H5O_SIZEOF_MSGHDR_OH(oh)) HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "bad object header chunk size") - } /* end if */ + } else { /* Version */ + if (H5_IS_BUFFER_OVERFLOW(image, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); oh->version = *image++; if (H5O_VERSION_1 != oh->version) HGOTO_ERROR(H5E_OHDR, H5E_VERSION, FAIL, "bad object header version number") @@ -1204,12 +1120,18 @@ H5O__prefix_deserialize(const uint8_t *_image, H5O_cache_ud_t *udata) oh->flags = H5O_CRT_OHDR_FLAGS_DEF; /* Reserved */ + if (H5_IS_BUFFER_OVERFLOW(image, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); image++; /* Number of messages */ + if (H5_IS_BUFFER_OVERFLOW(image, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT16DECODE(image, udata->v1_pfx_nmesgs); /* Link count */ + if (H5_IS_BUFFER_OVERFLOW(image, 4, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT32DECODE(image, oh->nlink); /* Reset unused time fields */ @@ -1220,21 +1142,27 @@ H5O__prefix_deserialize(const uint8_t *_image, H5O_cache_ud_t *udata) oh->min_dense = 0; /* First chunk size */ + if (H5_IS_BUFFER_OVERFLOW(image, 4, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT32DECODE(image, udata->chunk0_size); if ((udata->v1_pfx_nmesgs > 0 && udata->chunk0_size < H5O_SIZEOF_MSGHDR_OH(oh)) || (udata->v1_pfx_nmesgs == 0 && udata->chunk0_size > 0)) HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "bad object header chunk size") /* Reserved, in version 1 (for 8-byte alignment padding) */ + if (H5_IS_BUFFER_OVERFLOW(image, 4, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); image += 4; - } /* end else */ + } /* Verify object header prefix length */ - HDassert((size_t)(image - _image) == (size_t)(H5O_SIZEOF_HDR(oh) - H5O_SIZEOF_CHKSUM_OH(oh))); + if ((size_t)(image - _image) != (size_t)(H5O_SIZEOF_HDR(oh) - H5O_SIZEOF_CHKSUM_OH(oh))) + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "bad object header prefix length") /* If udata->oh is to be freed (see H5O__cache_verify_chksum), - save the pointer to udata->oh and free it later after setting - udata->oh with the new object header */ + * save the pointer to udata->oh and free it later after setting + * udata->oh with the new object header + */ if (udata->free_oh) { H5O_t *saved_oh = udata->oh; HDassert(udata->oh); @@ -1263,34 +1191,27 @@ H5O__prefix_deserialize(const uint8_t *_image, H5O_cache_ud_t *udata) /*------------------------------------------------------------------------- * Function: H5O__chunk_deserialize * - * Purpose: Deserialize a chunk for an object header - * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Programmer: Quincey Koziol - * July 12, 2008 + * Purpose: Deserialize a chunk for an object header * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t chunk_size, const uint8_t *image, size_t len, H5O_common_cache_ud_t *udata, hbool_t *dirty) { - const uint8_t *chunk_image; /* Pointer into buffer to decode */ + const uint8_t *chunk_image = NULL; /* Pointer into buffer to decode */ + const uint8_t *p_end = NULL; /* End of image buffer */ uint8_t *eom_ptr; /* Pointer to end of messages for a chunk */ unsigned merged_null_msgs = 0; /* Number of null messages merged together */ unsigned chunkno; /* Current chunk's index */ -#ifndef NDEBUG - unsigned nullcnt; /* Count of null messages (for sanity checking gaps in chunks) */ -#endif /* NDEBUG */ - hbool_t mesgs_modified = + unsigned nullcnt; /* Count of null messages (for sanity checking gaps in chunks) */ + hbool_t mesgs_modified = FALSE; /* Whether any messages were modified when the object header was deserialized */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - /* Check arguments */ HDassert(oh); HDassert(H5F_addr_defined(addr)); HDassert(image); @@ -1307,7 +1228,7 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t chunk_size, const uint8_t HGOTO_ERROR(H5E_OHDR, H5E_CANTALLOC, FAIL, "memory allocation failed") oh->alloc_nchunks = na; oh->chunk = x; - } /* end if */ + } /* Init the chunk data info */ chunkno = (unsigned)oh->nchunks++; @@ -1329,24 +1250,31 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t chunk_size, const uint8_t /* Point into chunk image to decode */ chunk_image = oh->chunk[chunkno].image; + p_end = chunk_image + oh->chunk[chunkno].size - 1; + + /* Skip over [already decoded] prefix in special case of chunk 0 */ + if (chunkno == 0) { + size_t skip = (size_t)(H5O_SIZEOF_HDR(oh) - H5O_SIZEOF_CHKSUM_OH(oh)); + + if (H5_IS_BUFFER_OVERFLOW(chunk_image, skip, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); + chunk_image += skip; + } - /* Handle chunk 0 as special case */ - if (chunkno == 0) - /* Skip over [already decoded] prefix */ - chunk_image += (size_t)(H5O_SIZEOF_HDR(oh) - H5O_SIZEOF_CHKSUM_OH(oh)); /* Check for magic # on chunks > 0 in later versions of the format */ else if (chunkno > 0 && oh->version > H5O_VERSION_1) { /* Magic number */ - if (HDmemcmp(chunk_image, H5O_CHK_MAGIC, (size_t)H5_SIZEOF_MAGIC) != 0) + if (H5_IS_BUFFER_OVERFLOW(chunk_image, H5_SIZEOF_MAGIC, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); + if (HDmemcmp(chunk_image, H5O_CHK_MAGIC, H5_SIZEOF_MAGIC) != 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, FAIL, "wrong object header chunk signature") chunk_image += H5_SIZEOF_MAGIC; - } /* end if */ + } /* Decode messages from this chunk */ eom_ptr = oh->chunk[chunkno].image + (oh->chunk[chunkno].size - H5O_SIZEOF_CHKSUM_OH(oh)); -#ifndef NDEBUG nullcnt = 0; -#endif /* NDEBUG */ + while (chunk_image < eom_ptr) { size_t mesg_size; /* Size of message read in */ unsigned id; /* ID (type) of current message */ @@ -1356,17 +1284,27 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t chunk_size, const uint8_t /* Decode message prefix info */ /* Version # */ - if (oh->version == H5O_VERSION_1) + if (oh->version == H5O_VERSION_1) { + if (H5_IS_BUFFER_OVERFLOW(chunk_image, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT16DECODE(chunk_image, id) - else + } + else { + if (H5_IS_BUFFER_OVERFLOW(chunk_image, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); id = *chunk_image++; + } /* Message size */ + if (H5_IS_BUFFER_OVERFLOW(chunk_image, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT16DECODE(chunk_image, mesg_size); if (mesg_size != H5O_ALIGN_OH(oh, mesg_size)) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, FAIL, "message not aligned") /* Message flags */ + if (H5_IS_BUFFER_OVERFLOW(chunk_image, 1, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); flags = *chunk_image++; if (flags & ~H5O_MSG_FLAG_BITS) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, FAIL, "unknown flag for message") @@ -1381,13 +1319,20 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t chunk_size, const uint8_t * knows about */ /* Reserved bytes/creation index */ - if (oh->version == H5O_VERSION_1) - chunk_image += 3; /*reserved*/ + if (oh->version == H5O_VERSION_1) { + /* Reserved bytes */ + if (H5_IS_BUFFER_OVERFLOW(chunk_image, 3, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); + chunk_image += 3; + } else { /* Only decode creation index if they are being tracked */ - if (oh->flags & H5O_HDR_ATTR_CRT_ORDER_TRACKED) + if (oh->flags & H5O_HDR_ATTR_CRT_ORDER_TRACKED) { + if (H5_IS_BUFFER_OVERFLOW(chunk_image, 2, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT16DECODE(chunk_image, crt_idx); - } /* end else */ + } + } /* Try to detect invalidly formatted object header message that * extends past end of chunk. @@ -1395,11 +1340,9 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t chunk_size, const uint8_t if (chunk_image + mesg_size > eom_ptr) HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, FAIL, "corrupt object header") -#ifndef NDEBUG /* Increment count of null messages */ if (H5O_NULL_ID == id) nullcnt++; -#endif /* NDEBUG */ /* Check for combining two adjacent 'null' messages */ if ((udata->file_intent & H5F_ACC_RDWR) && H5O_NULL_ID == id && oh->nmesgs > 0 && @@ -1412,7 +1355,7 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t chunk_size, const uint8_t oh->mesg[mesgno].raw_size += (size_t)H5O_SIZEOF_MSGHDR_OH(oh) + mesg_size; oh->mesg[mesgno].dirty = TRUE; merged_null_msgs++; - } /* end if */ + } else { H5O_mesg_t *mesg; /* Pointer to new message */ unsigned ioflags = 0; /* Flags for decode routine */ @@ -1476,20 +1419,20 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t chunk_size, const uint8_t /* This is a bit aggressive, since the application may * never change anything about the object (metadata or * raw data), but we can sort out the finer details - * when/if we start using the flag - QAK + * when/if we start using the flag. */ /* Also, it's possible that this functionality may not * get invoked if the object header is brought into * the metadata cache in some other "weird" way, like - * using H5Ocopy() - QAK + * using H5Ocopy(). */ mesg->flags |= H5O_MSG_FLAG_WAS_UNKNOWN; /* Mark the message and chunk as dirty */ mesg->dirty = TRUE; mesgs_modified = TRUE; - } /* end if */ - } /* end if */ + } + } else { /* Check for message of unshareable class marked as "shareable" */ @@ -1500,7 +1443,7 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t chunk_size, const uint8_t /* Set message class for "known" messages */ mesg->type = H5O_msg_class_g[id]; - } /* end else */ + } /* Do some inspection/interpretation of new messages from this chunk */ /* (detect continuation messages, ref. count messages, etc.) */ @@ -1522,7 +1465,7 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t chunk_size, const uint8_t /* Add to continuation messages left to interpret */ if (H5O__add_cont_msg(udata->cont_msg_info, cont) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "can't add continuation message") - } /* end if */ + } /* Check if message is a ref. count message */ else if (H5O_REFCOUNT_ID == id) { H5O_refcount_t *refcount; @@ -1542,24 +1485,24 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t chunk_size, const uint8_t if (!refcount) HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "can't decode refcount") oh->nlink = *refcount; - } /* end if */ + } /* Check if message is a link message */ else if (H5O_LINK_ID == id) { /* Increment the count of link messages */ oh->link_msgs_seen++; - } /* end if */ + } /* Check if message is an attribute message */ else if (H5O_ATTR_ID == id) { /* Increment the count of attribute messages */ oh->attr_msgs_seen++; - } /* end if */ + } /* Mark the message & chunk as dirty if the message was changed by decoding */ if ((ioflags & H5O_DECODEIO_DIRTY) && (udata->file_intent & H5F_ACC_RDWR)) { mesg->dirty = TRUE; mesgs_modified = TRUE; - } /* end if */ - } /* end else */ + } + } /* Advance decode pointer past message */ chunk_image += mesg_size; @@ -1567,18 +1510,20 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t chunk_size, const uint8_t /* Check for 'gap' at end of chunk */ if ((eom_ptr - chunk_image) > 0 && (eom_ptr - chunk_image) < H5O_SIZEOF_MSGHDR_OH(oh)) { /* Gaps can only occur in later versions of the format */ - HDassert(oh->version > H5O_VERSION_1); + if (oh->version == H5O_VERSION_1) + HGOTO_ERROR(H5E_OHDR, H5E_BADMESG, FAIL, "gap found in early version of file format"); /* Gaps should only occur in chunks with no null messages */ - HDassert(nullcnt == 0); + if (nullcnt != 0) + HGOTO_ERROR(H5E_OHDR, H5E_BADMESG, FAIL, "gap in chunk with no null messages"); /* Set gap information for chunk */ oh->chunk[chunkno].gap = (size_t)(eom_ptr - chunk_image); /* Increment location in chunk */ chunk_image += oh->chunk[chunkno].gap; - } /* end if */ - } /* end while */ + } + } /* Check for correct checksum on chunks, in later versions of the format */ if (oh->version > H5O_VERSION_1) { @@ -1587,11 +1532,14 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t chunk_size, const uint8_t /* checksum verification already done in verify_chksum cb */ /* Metadata checksum */ + if (H5_IS_BUFFER_OVERFLOW(chunk_image, 4, p_end)) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT32DECODE(chunk_image, stored_chksum); - } /* end if */ + } - /* Sanity check */ - HDassert(chunk_image == oh->chunk[chunkno].image + oh->chunk[chunkno].size); + /* Size check */ + if (chunk_image != oh->chunk[chunkno].image + oh->chunk[chunkno].size) + HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "object header image size mismatch"); /* Mark the chunk dirty if we've modified messages */ if (mesgs_modified) @@ -1601,7 +1549,7 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t chunk_size, const uint8_t if (merged_null_msgs > 0) { udata->merged_null_msgs += merged_null_msgs; *dirty = TRUE; - } /* end if */ + } done: if (ret_value < 0 && udata->cont_msg_info->msgs) { @@ -1612,28 +1560,22 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t chunk_size, const uint8_t } /* H5O__chunk_deserialize() */ /*------------------------------------------------------------------------- - * Function: H5O__chunk_serialize + * Function: H5O__chunk_serialize * - * Purpose: Serialize a chunk for an object header - * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Programmer: Quincey Koziol - * July 12, 2008 + * Purpose: Serialize a chunk for an object header * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ static herr_t H5O__chunk_serialize(const H5F_t *f, H5O_t *oh, unsigned chunkno) { - H5O_mesg_t *curr_msg; /* Pointer to current message being operated on */ - unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ + H5O_mesg_t *curr_msg; /* Pointer to current message being operated on */ + unsigned u; /* Local index variable */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE - /* Check arguments */ HDassert(f); HDassert(oh); @@ -1673,7 +1615,7 @@ H5O__chunk_serialize(const H5F_t *f, H5O_t *oh, unsigned chunkno) /* Metadata checksum */ chunk_image = oh->chunk[chunkno].image + (oh->chunk[chunkno].size - H5O_SIZEOF_CHKSUM); UINT32ENCODE(chunk_image, metadata_chksum); - } /* end if */ + } done: FUNC_LEAVE_NOAPI(ret_value) From f003c23da170da659fa0342f6bbd8b6bb2dfb19a Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Mon, 1 May 2023 22:25:25 -0700 Subject: [PATCH 190/231] Add bin directory to make distclean (#2872) This allows h5cc to be cleaned up --- Makefile.am | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile.am b/Makefile.am index 72d49597b4e..0f2a2472c8d 100644 --- a/Makefile.am +++ b/Makefile.am @@ -89,7 +89,7 @@ endif SUBDIRS = src $(TESTSERIAL_DIR) $(TESTPARALLEL_DIR) bin $(TOOLS_DIR) utils . \ $(CXX_DIR) $(FORTRAN_DIR) $(JAVA_DIR) $(HDF5_HL_DIR) -DIST_SUBDIRS = src test testpar tools utils . c++ fortran hl examples java +DIST_SUBDIRS = src test testpar bin tools utils . c++ fortran hl examples java # Some files generated during configure that should be cleaned DISTCLEANFILES=config/stamp1 config/stamp2 From 04ce63606223dc37f39b68a2f38a8d79d9607343 Mon Sep 17 00:00:00 2001 From: Larry Knox Date: Tue, 2 May 2023 11:02:26 -0500 Subject: [PATCH 191/231] Redo remove selection I/O test from testphdf5 in CMake #2860. (#2874) --- testpar/CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/testpar/CMakeLists.txt b/testpar/CMakeLists.txt index fb66e76103e..3a44fca7c1b 100644 --- a/testpar/CMakeLists.txt +++ b/testpar/CMakeLists.txt @@ -8,7 +8,6 @@ project (HDF5_TEST_PAR C) set (testphdf5_SOURCES ${HDF5_TEST_PAR_SOURCE_DIR}/testphdf5.c ${HDF5_TEST_PAR_SOURCE_DIR}/t_dset.c - ${HDF5_TEST_PAR_SOURCE_DIR}/t_select_io_dset.c ${HDF5_TEST_PAR_SOURCE_DIR}/t_file.c ${HDF5_TEST_PAR_SOURCE_DIR}/t_file_image.c ${HDF5_TEST_PAR_SOURCE_DIR}/t_mdset.c From ace1456d7f718ee451fee0033a834651955945be Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Tue, 2 May 2023 11:56:52 -0500 Subject: [PATCH 192/231] Fixed test failure for when REAL is promoted via a compiler flag (#2873) --- fortran/test/fortranlib_test.F90 | 2 +- fortran/test/tH5P.F90 | 25 +++++++++++++------------ 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/fortran/test/fortranlib_test.F90 b/fortran/test/fortranlib_test.F90 index ec0dcece578..eb587a9eda8 100644 --- a/fortran/test/fortranlib_test.F90 +++ b/fortran/test/fortranlib_test.F90 @@ -204,7 +204,7 @@ PROGRAM fortranlibtest CALL write_test_status(ret_total_error, ' Dataset chunk cache configuration', total_error) ret_total_error = 0 - CALL test_misc_properties(cleanup, ret_total_error) + CALL test_misc_properties(ret_total_error) CALL write_test_status(ret_total_error, ' Miscellaneous properties', total_error) ret_total_error = 0 diff --git a/fortran/test/tH5P.F90 b/fortran/test/tH5P.F90 index 37ecdac8deb..57320dfacb7 100644 --- a/fortran/test/tH5P.F90 +++ b/fortran/test/tH5P.F90 @@ -777,10 +777,9 @@ END SUBROUTINE test_chunk_cache ! !------------------------------------------------------------------------- ! -SUBROUTINE test_misc_properties(cleanup, total_error) +SUBROUTINE test_misc_properties(total_error) IMPLICIT NONE - LOGICAL, INTENT(IN) :: cleanup INTEGER, INTENT(INOUT) :: total_error INTEGER(hid_t) :: fapl_id = -1 ! Local fapl @@ -872,18 +871,17 @@ SUBROUTINE test_in_place_conversion(cleanup, total_error) INTEGER(HSIZE_T), DIMENSION(1) :: dims = (/array_len/) ! Dataset dimensions INTEGER :: rank = 1 ! Dataset rank - REAL(KIND=Fortran_DOUBLE), DIMENSION(1:array_len), TARGET :: wbuf_d - REAL(KIND=Fortran_DOUBLE), DIMENSION(1:array_len) :: wbuf_d_org - REAL(KIND=Fortran_REAL) , DIMENSION(1:array_len), TARGET :: rbuf + REAL(KIND=C_DOUBLE), DIMENSION(1:array_len), TARGET :: wbuf_d + REAL(KIND=C_DOUBLE), DIMENSION(1:array_len) :: wbuf_d_org + REAL(KIND=C_FLOAT), DIMENSION(1:array_len), TARGET :: rbuf INTEGER :: i TYPE(C_PTR) :: f_ptr ! create the data DO i = 1, array_len - wbuf_d(i) = 1_Fortran_DOUBLE + 0.123456789123456_Fortran_DOUBLE + wbuf_d(i) = 1.0_C_DOUBLE + 0.123456789123456_C_DOUBLE wbuf_d_org(i) = wbuf_d(i) ENDDO - ! !Create file "inplace_conv.h5" using default properties. ! @@ -917,22 +915,22 @@ SUBROUTINE test_in_place_conversion(cleanup, total_error) CALL check("h5pget_modify_write_buf_f", error, total_error) CALL VERIFY("h5pget_modify_write_buf_f", modify_write_buf, .TRUE., total_error) - CALL h5dcreate_f(file_id, dsetname, H5T_NATIVE_REAL, dspace_id, dset_id, error) + CALL h5dcreate_f(file_id, dsetname, h5kind_to_type(KIND(rbuf(1)), H5_REAL_KIND), dspace_id, dset_id, error) CALL check("h5dcreate_f", error, total_error) - f_ptr = C_LOC(wbuf_d) - CALL h5dwrite_f(dset_id, H5T_NATIVE_DOUBLE, f_ptr, error, H5S_ALL_F, H5S_ALL_F, xfer_prp=plist_id) + f_ptr = C_LOC(wbuf_d(1)) + CALL h5dwrite_f(dset_id, h5kind_to_type(KIND(wbuf_d(1)), H5_REAL_KIND), f_ptr, error, H5S_ALL_F, H5S_ALL_F, xfer_prp=plist_id) CALL check("h5dwrite_f", error, total_error) ! Should not be equal for in-place buffer use CALL VERIFY("h5dwrite_f -- in-place", wbuf_d(1), wbuf_d_org(1), total_error, .FALSE.) f_ptr = C_LOC(rbuf) - CALL h5dread_f(dset_id, H5T_NATIVE_REAL, f_ptr, error) + CALL h5dread_f(dset_id, h5kind_to_type(KIND(rbuf(1)), H5_REAL_KIND), f_ptr, error) CALL check("h5dread_f", error, total_error) DO i = 1, array_len - CALL VERIFY("h5dwrite_f -- in-place", rbuf(i), REAL(wbuf_d_org(i), Fortran_REAL), total_error) + CALL VERIFY("h5dwrite_f -- in-place", rbuf(i), REAL(wbuf_d_org(i), C_FLOAT), total_error) ENDDO ! @@ -954,6 +952,9 @@ SUBROUTINE test_in_place_conversion(cleanup, total_error) CALL h5pclose_f(plist_id, error) CALL check("h5pclose_f", error, total_error) + IF(cleanup) CALL h5_cleanup_f(fix_filename, H5P_DEFAULT_F, error) + CALL check("h5_cleanup_f", error, total_error) + END SUBROUTINE test_in_place_conversion END MODULE TH5P From 74787e91704ef943756748a54e67bb20d010f190 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Tue, 2 May 2023 14:52:39 -0500 Subject: [PATCH 193/231] Add initial version of HDF5 API tests (#2877) --- .github/workflows/codespell.yml | 2 +- CMakeLists.txt | 54 +- hl/c++/test/Makefile.am | 2 +- test/API/CMake/CheckAsan.cmake | 37 + test/API/CMake/CheckUbsan.cmake | 37 + test/API/CMakeLists.txt | 314 + test/API/H5_api_async_test.c | 2730 ++ test/API/H5_api_async_test.h | 29 + test/API/H5_api_attribute_test.c | 11027 +++++++ test/API/H5_api_attribute_test.h | 203 + test/API/H5_api_dataset_test.c | 11683 +++++++ test/API/H5_api_dataset_test.h | 331 + test/API/H5_api_datatype_test.c | 2693 ++ test/API/H5_api_datatype_test.h | 79 + test/API/H5_api_file_test.c | 2564 ++ test/API/H5_api_file_test.h | 85 + test/API/H5_api_group_test.c | 2394 ++ test/API/H5_api_group_test.h | 65 + test/API/H5_api_link_test.c | 27072 ++++++++++++++++ test/API/H5_api_link_test.h | 437 + test/API/H5_api_misc_test.c | 1060 + test/API/H5_api_misc_test.h | 52 + test/API/H5_api_object_test.c | 7172 ++++ test/API/H5_api_object_test.h | 191 + test/API/H5_api_test.c | 227 + test/API/H5_api_test.h | 73 + test/API/H5_api_test_config.h.in | 66 + test/API/H5_api_test_util.c | 819 + test/API/H5_api_test_util.h | 24 + test/API/H5_api_tests_disabled.h | 46 + test/API/README.md | 86 + test/API/driver/CMakeLists.txt | 17 + test/API/driver/h5_api_test_driver.cxx | 910 + test/API/driver/h5_api_test_driver.hxx | 93 + test/API/driver/kwsys/.clang-format | 22 + test/API/driver/kwsys/.hooks-config | 2 + test/API/driver/kwsys/Base64.c | 225 + test/API/driver/kwsys/Base64.h.in | 110 + test/API/driver/kwsys/CMakeLists.txt | 1260 + test/API/driver/kwsys/CONTRIBUTING.rst | 49 + test/API/driver/kwsys/CTestConfig.cmake | 9 + test/API/driver/kwsys/CTestCustom.cmake.in | 14 + .../API/driver/kwsys/CommandLineArguments.cxx | 768 + .../driver/kwsys/CommandLineArguments.hxx.in | 270 + test/API/driver/kwsys/Configure.h.in | 89 + test/API/driver/kwsys/Configure.hxx.in | 65 + test/API/driver/kwsys/ConsoleBuf.hxx.in | 398 + test/API/driver/kwsys/Copyright.txt | 38 + test/API/driver/kwsys/Directory.cxx | 236 + test/API/driver/kwsys/Directory.hxx.in | 72 + test/API/driver/kwsys/DynamicLoader.cxx | 495 + test/API/driver/kwsys/DynamicLoader.hxx.in | 106 + test/API/driver/kwsys/Encoding.h.in | 69 + test/API/driver/kwsys/Encoding.hxx.in | 80 + test/API/driver/kwsys/EncodingC.c | 72 + test/API/driver/kwsys/EncodingCXX.cxx | 288 + test/API/driver/kwsys/ExtraTest.cmake.in | 1 + test/API/driver/kwsys/FStream.cxx | 55 + test/API/driver/kwsys/FStream.hxx.in | 278 + test/API/driver/kwsys/GitSetup/.gitattributes | 6 + test/API/driver/kwsys/GitSetup/LICENSE | 202 + test/API/driver/kwsys/GitSetup/NOTICE | 5 + test/API/driver/kwsys/GitSetup/README | 87 + test/API/driver/kwsys/GitSetup/config | 4 + test/API/driver/kwsys/GitSetup/config.sample | 32 + .../API/driver/kwsys/GitSetup/git-gerrit-push | 74 + .../API/driver/kwsys/GitSetup/git-gitlab-push | 177 + test/API/driver/kwsys/GitSetup/pre-commit | 26 + test/API/driver/kwsys/GitSetup/setup-aliases | 6 + test/API/driver/kwsys/GitSetup/setup-gerrit | 147 + test/API/driver/kwsys/GitSetup/setup-gitlab | 140 + test/API/driver/kwsys/GitSetup/setup-hooks | 64 + test/API/driver/kwsys/GitSetup/setup-ssh | 111 + test/API/driver/kwsys/GitSetup/setup-stage | 82 + test/API/driver/kwsys/GitSetup/setup-upstream | 104 + test/API/driver/kwsys/GitSetup/setup-user | 39 + test/API/driver/kwsys/GitSetup/tips | 55 + test/API/driver/kwsys/Glob.cxx | 448 + test/API/driver/kwsys/Glob.hxx.in | 134 + test/API/driver/kwsys/IOStream.cxx | 255 + test/API/driver/kwsys/IOStream.hxx.in | 126 + test/API/driver/kwsys/MD5.c | 494 + test/API/driver/kwsys/MD5.h.in | 97 + test/API/driver/kwsys/Process.h.in | 544 + test/API/driver/kwsys/ProcessUNIX.c | 2920 ++ test/API/driver/kwsys/ProcessWin32.c | 2786 ++ test/API/driver/kwsys/README.rst | 37 + test/API/driver/kwsys/RegularExpression.cxx | 1218 + .../API/driver/kwsys/RegularExpression.hxx.in | 562 + test/API/driver/kwsys/SetupForDevelopment.sh | 20 + test/API/driver/kwsys/SharedForward.h.in | 879 + test/API/driver/kwsys/String.c | 100 + test/API/driver/kwsys/String.h.in | 57 + test/API/driver/kwsys/String.hxx.in | 65 + test/API/driver/kwsys/System.c | 236 + test/API/driver/kwsys/System.h.in | 60 + test/API/driver/kwsys/SystemInformation.cxx | 5466 ++++ .../API/driver/kwsys/SystemInformation.hxx.in | 170 + test/API/driver/kwsys/SystemTools.cxx | 4703 +++ test/API/driver/kwsys/SystemTools.hxx.in | 981 + test/API/driver/kwsys/Terminal.c | 414 + test/API/driver/kwsys/Terminal.h.in | 170 + test/API/driver/kwsys/clang-format.bash | 128 + test/API/driver/kwsys/hash_fun.hxx.in | 166 + test/API/driver/kwsys/hash_map.hxx.in | 423 + test/API/driver/kwsys/hash_set.hxx.in | 392 + test/API/driver/kwsys/hashtable.hxx.in | 995 + test/API/driver/kwsys/kwsysHeaderDump.pl | 41 + .../API/driver/kwsys/kwsysPlatformTests.cmake | 216 + test/API/driver/kwsys/kwsysPlatformTestsC.c | 108 + .../driver/kwsys/kwsysPlatformTestsCXX.cxx | 335 + test/API/driver/kwsys/kwsysPrivate.h | 34 + .../driver/kwsys/testCommandLineArguments.cxx | 209 + .../kwsys/testCommandLineArguments1.cxx | 93 + test/API/driver/kwsys/testConfigure.cxx | 30 + test/API/driver/kwsys/testConsoleBuf.cxx | 782 + test/API/driver/kwsys/testConsoleBuf.hxx | 17 + test/API/driver/kwsys/testConsoleBufChild.cxx | 55 + test/API/driver/kwsys/testDirectory.cxx | 110 + test/API/driver/kwsys/testDynamicLoader.cxx | 133 + test/API/driver/kwsys/testDynload.c | 13 + test/API/driver/kwsys/testDynloadImpl.c | 10 + test/API/driver/kwsys/testDynloadImpl.h | 15 + test/API/driver/kwsys/testDynloadUse.c | 15 + test/API/driver/kwsys/testEncode.c | 67 + test/API/driver/kwsys/testEncoding.cxx | 286 + test/API/driver/kwsys/testFStream.cxx | 113 + test/API/driver/kwsys/testFail.c | 24 + test/API/driver/kwsys/testHashSTL.cxx | 64 + test/API/driver/kwsys/testProcess.c | 728 + test/API/driver/kwsys/testSharedForward.c.in | 27 + .../driver/kwsys/testSystemInformation.cxx | 106 + test/API/driver/kwsys/testSystemTools.bin | Bin 0 -> 766 bytes test/API/driver/kwsys/testSystemTools.cxx | 1128 + test/API/driver/kwsys/testSystemTools.h.in | 12 + test/API/driver/kwsys/testTerminal.c | 22 + test/API/driver/kwsys/update-gitsetup.bash | 20 + test/API/driver/kwsys/update-third-party.bash | 169 + test/API/tarray.c | 2250 ++ test/API/tattr.c | 11929 +++++++ test/API/tchecksum.c | 251 + test/API/tconfig.c | 199 + test/API/tcoords.c | 724 + test/API/testhdf5.c | 729 + test/API/testhdf5.h | 349 + test/API/tfile.c | 8381 +++++ test/API/tgenprop.c | 2201 ++ test/API/th5o.c | 1889 ++ test/API/th5s.c | 3538 ++ test/API/tid.c | 1413 + test/API/titerate.c | 1263 + test/API/tmisc.c | 6349 ++++ test/API/trefer.c | 3641 +++ test/API/tselect.c | 16314 ++++++++++ test/API/ttime.c | 231 + test/API/tunicode.c | 867 + test/API/tvlstr.c | 1013 + test/API/tvltypes.c | 3268 ++ test/CMakeLists.txt | 20 + test/h5test.c | 7 + test/h5test.h | 72 +- test/vol.c | 62 +- testpar/API/CMakeLists.txt | 279 + testpar/API/H5_api_async_test_parallel.c | 3668 +++ testpar/API/H5_api_async_test_parallel.h | 29 + testpar/API/H5_api_attribute_test_parallel.c | 47 + testpar/API/H5_api_attribute_test_parallel.h | 20 + testpar/API/H5_api_dataset_test_parallel.c | 8149 +++++ testpar/API/H5_api_dataset_test_parallel.h | 20 + testpar/API/H5_api_datatype_test_parallel.c | 47 + testpar/API/H5_api_datatype_test_parallel.h | 20 + testpar/API/H5_api_file_test_parallel.c | 367 + testpar/API/H5_api_file_test_parallel.h | 20 + testpar/API/H5_api_group_test_parallel.c | 47 + testpar/API/H5_api_group_test_parallel.h | 20 + testpar/API/H5_api_link_test_parallel.c | 47 + testpar/API/H5_api_link_test_parallel.h | 20 + testpar/API/H5_api_misc_test_parallel.c | 47 + testpar/API/H5_api_misc_test_parallel.h | 20 + testpar/API/H5_api_object_test_parallel.c | 47 + testpar/API/H5_api_object_test_parallel.h | 20 + testpar/API/H5_api_test_parallel.c | 338 + testpar/API/H5_api_test_parallel.h | 188 + testpar/API/t_bigio.c | 1942 ++ testpar/API/t_chunk_alloc.c | 512 + testpar/API/t_coll_chunk.c | 1417 + testpar/API/t_coll_md_read.c | 654 + testpar/API/t_dset.c | 4335 +++ testpar/API/t_file.c | 1032 + testpar/API/t_file_image.c | 371 + testpar/API/t_filter_read.c | 564 + testpar/API/t_mdset.c | 2814 ++ testpar/API/t_ph5basic.c | 192 + testpar/API/t_prop.c | 646 + testpar/API/t_pshutdown.c | 150 + testpar/API/t_shapesame.c | 4516 +++ testpar/API/t_span_tree.c | 2622 ++ testpar/API/testphdf5.c | 1007 + testpar/API/testphdf5.h | 343 + testpar/CMakeLists.txt | 20 + 200 files changed, 212642 insertions(+), 49 deletions(-) create mode 100644 test/API/CMake/CheckAsan.cmake create mode 100644 test/API/CMake/CheckUbsan.cmake create mode 100644 test/API/CMakeLists.txt create mode 100644 test/API/H5_api_async_test.c create mode 100644 test/API/H5_api_async_test.h create mode 100644 test/API/H5_api_attribute_test.c create mode 100644 test/API/H5_api_attribute_test.h create mode 100644 test/API/H5_api_dataset_test.c create mode 100644 test/API/H5_api_dataset_test.h create mode 100644 test/API/H5_api_datatype_test.c create mode 100644 test/API/H5_api_datatype_test.h create mode 100644 test/API/H5_api_file_test.c create mode 100644 test/API/H5_api_file_test.h create mode 100644 test/API/H5_api_group_test.c create mode 100644 test/API/H5_api_group_test.h create mode 100644 test/API/H5_api_link_test.c create mode 100644 test/API/H5_api_link_test.h create mode 100644 test/API/H5_api_misc_test.c create mode 100644 test/API/H5_api_misc_test.h create mode 100644 test/API/H5_api_object_test.c create mode 100644 test/API/H5_api_object_test.h create mode 100644 test/API/H5_api_test.c create mode 100644 test/API/H5_api_test.h create mode 100644 test/API/H5_api_test_config.h.in create mode 100644 test/API/H5_api_test_util.c create mode 100644 test/API/H5_api_test_util.h create mode 100644 test/API/H5_api_tests_disabled.h create mode 100644 test/API/README.md create mode 100644 test/API/driver/CMakeLists.txt create mode 100644 test/API/driver/h5_api_test_driver.cxx create mode 100644 test/API/driver/h5_api_test_driver.hxx create mode 100644 test/API/driver/kwsys/.clang-format create mode 100644 test/API/driver/kwsys/.hooks-config create mode 100644 test/API/driver/kwsys/Base64.c create mode 100644 test/API/driver/kwsys/Base64.h.in create mode 100644 test/API/driver/kwsys/CMakeLists.txt create mode 100644 test/API/driver/kwsys/CONTRIBUTING.rst create mode 100644 test/API/driver/kwsys/CTestConfig.cmake create mode 100644 test/API/driver/kwsys/CTestCustom.cmake.in create mode 100644 test/API/driver/kwsys/CommandLineArguments.cxx create mode 100644 test/API/driver/kwsys/CommandLineArguments.hxx.in create mode 100644 test/API/driver/kwsys/Configure.h.in create mode 100644 test/API/driver/kwsys/Configure.hxx.in create mode 100644 test/API/driver/kwsys/ConsoleBuf.hxx.in create mode 100644 test/API/driver/kwsys/Copyright.txt create mode 100644 test/API/driver/kwsys/Directory.cxx create mode 100644 test/API/driver/kwsys/Directory.hxx.in create mode 100644 test/API/driver/kwsys/DynamicLoader.cxx create mode 100644 test/API/driver/kwsys/DynamicLoader.hxx.in create mode 100644 test/API/driver/kwsys/Encoding.h.in create mode 100644 test/API/driver/kwsys/Encoding.hxx.in create mode 100644 test/API/driver/kwsys/EncodingC.c create mode 100644 test/API/driver/kwsys/EncodingCXX.cxx create mode 100644 test/API/driver/kwsys/ExtraTest.cmake.in create mode 100644 test/API/driver/kwsys/FStream.cxx create mode 100644 test/API/driver/kwsys/FStream.hxx.in create mode 100644 test/API/driver/kwsys/GitSetup/.gitattributes create mode 100644 test/API/driver/kwsys/GitSetup/LICENSE create mode 100644 test/API/driver/kwsys/GitSetup/NOTICE create mode 100644 test/API/driver/kwsys/GitSetup/README create mode 100644 test/API/driver/kwsys/GitSetup/config create mode 100644 test/API/driver/kwsys/GitSetup/config.sample create mode 100644 test/API/driver/kwsys/GitSetup/git-gerrit-push create mode 100644 test/API/driver/kwsys/GitSetup/git-gitlab-push create mode 100644 test/API/driver/kwsys/GitSetup/pre-commit create mode 100644 test/API/driver/kwsys/GitSetup/setup-aliases create mode 100644 test/API/driver/kwsys/GitSetup/setup-gerrit create mode 100644 test/API/driver/kwsys/GitSetup/setup-gitlab create mode 100644 test/API/driver/kwsys/GitSetup/setup-hooks create mode 100644 test/API/driver/kwsys/GitSetup/setup-ssh create mode 100644 test/API/driver/kwsys/GitSetup/setup-stage create mode 100644 test/API/driver/kwsys/GitSetup/setup-upstream create mode 100644 test/API/driver/kwsys/GitSetup/setup-user create mode 100644 test/API/driver/kwsys/GitSetup/tips create mode 100644 test/API/driver/kwsys/Glob.cxx create mode 100644 test/API/driver/kwsys/Glob.hxx.in create mode 100644 test/API/driver/kwsys/IOStream.cxx create mode 100644 test/API/driver/kwsys/IOStream.hxx.in create mode 100644 test/API/driver/kwsys/MD5.c create mode 100644 test/API/driver/kwsys/MD5.h.in create mode 100644 test/API/driver/kwsys/Process.h.in create mode 100644 test/API/driver/kwsys/ProcessUNIX.c create mode 100644 test/API/driver/kwsys/ProcessWin32.c create mode 100644 test/API/driver/kwsys/README.rst create mode 100644 test/API/driver/kwsys/RegularExpression.cxx create mode 100644 test/API/driver/kwsys/RegularExpression.hxx.in create mode 100644 test/API/driver/kwsys/SetupForDevelopment.sh create mode 100644 test/API/driver/kwsys/SharedForward.h.in create mode 100644 test/API/driver/kwsys/String.c create mode 100644 test/API/driver/kwsys/String.h.in create mode 100644 test/API/driver/kwsys/String.hxx.in create mode 100644 test/API/driver/kwsys/System.c create mode 100644 test/API/driver/kwsys/System.h.in create mode 100644 test/API/driver/kwsys/SystemInformation.cxx create mode 100644 test/API/driver/kwsys/SystemInformation.hxx.in create mode 100644 test/API/driver/kwsys/SystemTools.cxx create mode 100644 test/API/driver/kwsys/SystemTools.hxx.in create mode 100644 test/API/driver/kwsys/Terminal.c create mode 100644 test/API/driver/kwsys/Terminal.h.in create mode 100644 test/API/driver/kwsys/clang-format.bash create mode 100644 test/API/driver/kwsys/hash_fun.hxx.in create mode 100644 test/API/driver/kwsys/hash_map.hxx.in create mode 100644 test/API/driver/kwsys/hash_set.hxx.in create mode 100644 test/API/driver/kwsys/hashtable.hxx.in create mode 100644 test/API/driver/kwsys/kwsysHeaderDump.pl create mode 100644 test/API/driver/kwsys/kwsysPlatformTests.cmake create mode 100644 test/API/driver/kwsys/kwsysPlatformTestsC.c create mode 100644 test/API/driver/kwsys/kwsysPlatformTestsCXX.cxx create mode 100644 test/API/driver/kwsys/kwsysPrivate.h create mode 100644 test/API/driver/kwsys/testCommandLineArguments.cxx create mode 100644 test/API/driver/kwsys/testCommandLineArguments1.cxx create mode 100644 test/API/driver/kwsys/testConfigure.cxx create mode 100644 test/API/driver/kwsys/testConsoleBuf.cxx create mode 100644 test/API/driver/kwsys/testConsoleBuf.hxx create mode 100644 test/API/driver/kwsys/testConsoleBufChild.cxx create mode 100644 test/API/driver/kwsys/testDirectory.cxx create mode 100644 test/API/driver/kwsys/testDynamicLoader.cxx create mode 100644 test/API/driver/kwsys/testDynload.c create mode 100644 test/API/driver/kwsys/testDynloadImpl.c create mode 100644 test/API/driver/kwsys/testDynloadImpl.h create mode 100644 test/API/driver/kwsys/testDynloadUse.c create mode 100644 test/API/driver/kwsys/testEncode.c create mode 100644 test/API/driver/kwsys/testEncoding.cxx create mode 100644 test/API/driver/kwsys/testFStream.cxx create mode 100644 test/API/driver/kwsys/testFail.c create mode 100644 test/API/driver/kwsys/testHashSTL.cxx create mode 100644 test/API/driver/kwsys/testProcess.c create mode 100644 test/API/driver/kwsys/testSharedForward.c.in create mode 100644 test/API/driver/kwsys/testSystemInformation.cxx create mode 100644 test/API/driver/kwsys/testSystemTools.bin create mode 100644 test/API/driver/kwsys/testSystemTools.cxx create mode 100644 test/API/driver/kwsys/testSystemTools.h.in create mode 100644 test/API/driver/kwsys/testTerminal.c create mode 100644 test/API/driver/kwsys/update-gitsetup.bash create mode 100644 test/API/driver/kwsys/update-third-party.bash create mode 100644 test/API/tarray.c create mode 100644 test/API/tattr.c create mode 100644 test/API/tchecksum.c create mode 100644 test/API/tconfig.c create mode 100644 test/API/tcoords.c create mode 100644 test/API/testhdf5.c create mode 100644 test/API/testhdf5.h create mode 100644 test/API/tfile.c create mode 100644 test/API/tgenprop.c create mode 100644 test/API/th5o.c create mode 100644 test/API/th5s.c create mode 100644 test/API/tid.c create mode 100644 test/API/titerate.c create mode 100644 test/API/tmisc.c create mode 100644 test/API/trefer.c create mode 100644 test/API/tselect.c create mode 100644 test/API/ttime.c create mode 100644 test/API/tunicode.c create mode 100644 test/API/tvlstr.c create mode 100644 test/API/tvltypes.c create mode 100644 testpar/API/CMakeLists.txt create mode 100644 testpar/API/H5_api_async_test_parallel.c create mode 100644 testpar/API/H5_api_async_test_parallel.h create mode 100644 testpar/API/H5_api_attribute_test_parallel.c create mode 100644 testpar/API/H5_api_attribute_test_parallel.h create mode 100644 testpar/API/H5_api_dataset_test_parallel.c create mode 100644 testpar/API/H5_api_dataset_test_parallel.h create mode 100644 testpar/API/H5_api_datatype_test_parallel.c create mode 100644 testpar/API/H5_api_datatype_test_parallel.h create mode 100644 testpar/API/H5_api_file_test_parallel.c create mode 100644 testpar/API/H5_api_file_test_parallel.h create mode 100644 testpar/API/H5_api_group_test_parallel.c create mode 100644 testpar/API/H5_api_group_test_parallel.h create mode 100644 testpar/API/H5_api_link_test_parallel.c create mode 100644 testpar/API/H5_api_link_test_parallel.h create mode 100644 testpar/API/H5_api_misc_test_parallel.c create mode 100644 testpar/API/H5_api_misc_test_parallel.h create mode 100644 testpar/API/H5_api_object_test_parallel.c create mode 100644 testpar/API/H5_api_object_test_parallel.h create mode 100644 testpar/API/H5_api_test_parallel.c create mode 100644 testpar/API/H5_api_test_parallel.h create mode 100644 testpar/API/t_bigio.c create mode 100644 testpar/API/t_chunk_alloc.c create mode 100644 testpar/API/t_coll_chunk.c create mode 100644 testpar/API/t_coll_md_read.c create mode 100644 testpar/API/t_dset.c create mode 100644 testpar/API/t_file.c create mode 100644 testpar/API/t_file_image.c create mode 100644 testpar/API/t_filter_read.c create mode 100644 testpar/API/t_mdset.c create mode 100644 testpar/API/t_ph5basic.c create mode 100644 testpar/API/t_prop.c create mode 100644 testpar/API/t_pshutdown.c create mode 100644 testpar/API/t_shapesame.c create mode 100644 testpar/API/t_span_tree.c create mode 100644 testpar/API/testphdf5.c create mode 100644 testpar/API/testphdf5.h diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml index a4edb0f3e7b..ddf10389c1e 100644 --- a/.github/workflows/codespell.yml +++ b/.github/workflows/codespell.yml @@ -11,5 +11,5 @@ jobs: - uses: actions/checkout@v3 - uses: codespell-project/actions-codespell@master with: - skip: ./bin/trace,./hl/tools/h5watch/h5watch.c,./tools/test/h5jam/tellub.c,./config/sanitizer/LICENSE,./config/sanitizer/sanitizers.cmake,./tools/test/h5repack/testfiles/*.dat + skip: ./bin/trace,./hl/tools/h5watch/h5watch.c,./tools/test/h5jam/tellub.c,./config/sanitizer/LICENSE,./config/sanitizer/sanitizers.cmake,./tools/test/h5repack/testfiles/*.dat,./test/API/driver ignore_words_list: isnt,inout,nd,parms,parm,ba,offsetP,ser,ois,had,fiter,fo,clude,refere,minnum,offsetp,creat,ans:,eiter,lastr,ans,isn't,ifset,sur,trun,dne,tthe,hda,filname,te,htmp,minnum,ake,gord,numer,ro,oce,msdos diff --git a/CMakeLists.txt b/CMakeLists.txt index 8cfa71dcd64..530b1538a4c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -208,20 +208,23 @@ set (HDF5_HL_F90_C_LIBSH_TARGET "${HDF5_HL_F90_C_LIB_CORENAME}-shared") #----------------------------------------------------------------------------- # Define some CMake variables for use later in the project #----------------------------------------------------------------------------- -set (HDF_CONFIG_DIR ${HDF5_SOURCE_DIR}/config) -set (HDF_RESOURCES_DIR ${HDF5_SOURCE_DIR}/config/cmake) -set (HDF5_SRC_DIR ${HDF5_SOURCE_DIR}/src) -set (HDF5_TEST_SRC_DIR ${HDF5_SOURCE_DIR}/test) -set (HDF5_CPP_SRC_DIR ${HDF5_SOURCE_DIR}/c++) -set (HDF5_CPP_TST_DIR ${HDF5_SOURCE_DIR}/c++/test) -set (HDF5_HL_SRC_DIR ${HDF5_SOURCE_DIR}/hl) -set (HDF5_HL_CPP_SRC_DIR ${HDF5_SOURCE_DIR}/hl/c++) -set (HDF5_HL_TOOLS_DIR ${HDF5_SOURCE_DIR}/hl/tools) -set (HDF5_TOOLS_DIR ${HDF5_SOURCE_DIR}/tools) -set (HDF5_TOOLS_SRC_DIR ${HDF5_SOURCE_DIR}/tools/src) -set (HDF5_PERFORM_SRC_DIR ${HDF5_SOURCE_DIR}/tools/src/perform) -set (HDF5_UTILS_DIR ${HDF5_SOURCE_DIR}/utils) -set (HDF5_F90_SRC_DIR ${HDF5_SOURCE_DIR}/fortran) +set (HDF_CONFIG_DIR ${HDF5_SOURCE_DIR}/config) +set (HDF_RESOURCES_DIR ${HDF5_SOURCE_DIR}/config/cmake) +set (HDF5_SRC_DIR ${HDF5_SOURCE_DIR}/src) +set (HDF5_TEST_SRC_DIR ${HDF5_SOURCE_DIR}/test) +set (HDF5_TEST_PAR_DIR ${HDF5_SOURCE_DIR}/testpar) +set (HDF5_TEST_API_SRC_DIR ${HDF5_SOURCE_DIR}/test/API) +set (HDF5_TEST_API_PAR_SRC_DIR ${HDF5_SOURCE_DIR}/testpar/API) +set (HDF5_CPP_SRC_DIR ${HDF5_SOURCE_DIR}/c++) +set (HDF5_CPP_TST_DIR ${HDF5_SOURCE_DIR}/c++/test) +set (HDF5_HL_SRC_DIR ${HDF5_SOURCE_DIR}/hl) +set (HDF5_HL_CPP_SRC_DIR ${HDF5_SOURCE_DIR}/hl/c++) +set (HDF5_HL_TOOLS_DIR ${HDF5_SOURCE_DIR}/hl/tools) +set (HDF5_TOOLS_DIR ${HDF5_SOURCE_DIR}/tools) +set (HDF5_TOOLS_SRC_DIR ${HDF5_SOURCE_DIR}/tools/src) +set (HDF5_PERFORM_SRC_DIR ${HDF5_SOURCE_DIR}/tools/src/perform) +set (HDF5_UTILS_DIR ${HDF5_SOURCE_DIR}/utils) +set (HDF5_F90_SRC_DIR ${HDF5_SOURCE_DIR}/fortran) set (HDF5_JAVA_JNI_SRC_DIR ${HDF5_SOURCE_DIR}/java/src/jni) set (HDF5_JAVA_HDF5_SRC_DIR ${HDF5_SOURCE_DIR}/java/src/hdf) set (HDF5_JAVA_TEST_SRC_DIR ${HDF5_SOURCE_DIR}/java/test) @@ -958,6 +961,25 @@ if (BUILD_TESTING) math (EXPR CTEST_LONG_TIMEOUT "${DART_TESTING_TIMEOUT} * 2") math (EXPR CTEST_VERY_LONG_TIMEOUT "${DART_TESTING_TIMEOUT} * 3") + option (HDF5_TEST_API "Execute HDF5 API tests" OFF) + mark_as_advanced (HDF5_TEST_API) + if (HDF5_TEST_API) + option (HDF5_TEST_API_INSTALL "Install HDF5 API tests" OFF) + mark_as_advanced (HDF5_TEST_API_INSTALL) + + # Enable HDF5 Async API tests + option (HDF5_TEST_API_ENABLE_ASYNC "Enable HDF5 Async API tests" OFF) + mark_as_advanced (HDF5_TEST_API_ENABLE_ASYNC) + + # Build and use HDF5 test driver program for API tests + option (HDF5_TEST_API_ENABLE_DRIVER "Enable HDF5 API test driver program" OFF) + mark_as_advanced (HDF5_TEST_API_ENABLE_DRIVER) + if (HDF5_TEST_API_ENABLE_DRIVER) + set (HDF5_TEST_API_SERVER "" CACHE STRING "Server executable for running API tests") + mark_as_advanced (HDF5_TEST_API_SERVER) + endif () + endif () + option (HDF5_TEST_VFD "Execute tests with different VFDs" OFF) mark_as_advanced (HDF5_TEST_VFD) if (HDF5_TEST_VFD) @@ -1014,11 +1036,11 @@ if (BUILD_TESTING) mark_as_advanced (HDF5_TEST_JAVA) if (NOT HDF5_EXTERNALLY_CONFIGURED) - if (EXISTS "${HDF5_SOURCE_DIR}/test" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/test") + if (EXISTS "${HDF5_TEST_SRC_DIR}" AND IS_DIRECTORY "${HDF5_TEST_SRC_DIR}") add_subdirectory (test) endif () if (H5_HAVE_PARALLEL) - if (EXISTS "${HDF5_SOURCE_DIR}/testpar" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/testpar") + if (EXISTS "${HDF5_TEST_PAR_DIR}" AND IS_DIRECTORY "${HDF5_TEST_PAR_DIR}") add_subdirectory (testpar) endif () endif () diff --git a/hl/c++/test/Makefile.am b/hl/c++/test/Makefile.am index 251d56a306a..73f14632ba9 100644 --- a/hl/c++/test/Makefile.am +++ b/hl/c++/test/Makefile.am @@ -26,7 +26,7 @@ TEST_PROG=ptableTest check_PROGRAMS=$(TEST_PROG) # The tests depend on the hdf5, hdf5 C++, and hdf5_hl libraries -LDADD=$(LIBH5CPP_HL) $(LIBH5_HL) $(LIBH5CPP) $(LIBHDF5) +LDADD=$(LIBH5CPP_HL) $(LIBH5_HL) $(LIBH5TEST) $(LIBH5CPP) $(LIBHDF5) ptableTest_SOURCES=ptableTest.cpp diff --git a/test/API/CMake/CheckAsan.cmake b/test/API/CMake/CheckAsan.cmake new file mode 100644 index 00000000000..32f4b4535cb --- /dev/null +++ b/test/API/CMake/CheckAsan.cmake @@ -0,0 +1,37 @@ +set(ASAN_FLAG "-fsanitize=address") +set(ASAN_C_FLAGS "-O1 -g ${ASAN_FLAG} -fsanitize-address-use-after-scope -fno-omit-frame-pointer -fno-optimize-sibling-calls") +set(ASAN_CXX_FLAGS ${ASAN_C_FLAGS}) + +get_property(ASAN_LANGUAGES GLOBAL PROPERTY ENABLED_LANGUAGES) +foreach(lang ${ASAN_LANGUAGES}) + set(ASAN_${lang}_LANG_ENABLED 1) +endforeach() + +if(ASAN_C_LANG_ENABLED) + include(CheckCCompilerFlag) + set(CMAKE_REQUIRED_LINK_OPTIONS ${ASAN_FLAG}) + check_c_compiler_flag(${ASAN_FLAG} ASAN_C_FLAG_SUPPORTED) + if(NOT ASAN_C_FLAG_SUPPORTED) + message(STATUS "Asan flags are not supported by the C compiler.") + else() + if(NOT CMAKE_C_FLAGS_ASAN) + set(CMAKE_C_FLAGS_ASAN ${ASAN_C_FLAGS} CACHE STRING "Flags used by the C compiler during ASAN builds." FORCE) + endif() + endif() + unset(CMAKE_REQUIRED_LINK_OPTIONS) +endif() + +if(ASAN_CXX_LANG_ENABLED) + include(CheckCXXCompilerFlag) + set(CMAKE_REQUIRED_LINK_OPTIONS ${ASAN_FLAG}) + check_cxx_compiler_flag(${ASAN_FLAG} ASAN_CXX_FLAG_SUPPORTED) + if(NOT ASAN_CXX_FLAG_SUPPORTED) + message(STATUS "Asan flags are not supported by the CXX compiler.") + else() + if(NOT CMAKE_CXX_FLAGS_ASAN) + set(CMAKE_CXX_FLAGS_ASAN ${ASAN_CXX_FLAGS} CACHE STRING "Flags used by the CXX compiler during ASAN builds." FORCE) + endif() + endif() + unset(CMAKE_REQUIRED_LINK_OPTIONS) +endif() + diff --git a/test/API/CMake/CheckUbsan.cmake b/test/API/CMake/CheckUbsan.cmake new file mode 100644 index 00000000000..f2b9c2cf0c5 --- /dev/null +++ b/test/API/CMake/CheckUbsan.cmake @@ -0,0 +1,37 @@ +set(UBSAN_FLAG "-fsanitize=undefined") +set(UBSAN_C_FLAGS "-O1 -g ${UBSAN_FLAG} -fno-omit-frame-pointer") +set(UBSAN_CXX_FLAGS ${UBSAN_C_FLAGS}) + +get_property(UBSAN_LANGUAGES GLOBAL PROPERTY ENABLED_LANGUAGES) +foreach(lang ${UBSAN_LANGUAGES}) + set(UBSAN_${lang}_LANG_ENABLED 1) +endforeach() + +if(UBSAN_C_LANG_ENABLED) + include(CheckCCompilerFlag) + set(CMAKE_REQUIRED_LINK_OPTIONS ${UBSAN_FLAG}) + check_c_compiler_flag(${UBSAN_FLAG} UBSAN_C_FLAG_SUPPORTED) + if(NOT UBSAN_C_FLAG_SUPPORTED) + message(STATUS "Ubsan flags are not supported by the C compiler.") + else() + if(NOT CMAKE_C_FLAGS_UBSAN) + set(CMAKE_C_FLAGS_UBSAN ${UBSAN_C_FLAGS} CACHE STRING "Flags used by the C compiler during UBSAN builds." FORCE) + endif() + endif() + unset(CMAKE_REQUIRED_LINK_OPTIONS) +endif() + +if(UBSAN_CXX_LANG_ENABLED) + include(CheckCXXCompilerFlag) + set(CMAKE_REQUIRED_LINK_OPTIONS ${UBSAN_FLAG}) + check_cxx_compiler_flag(${UBSAN_FLAG} UBSAN_CXX_FLAG_SUPPORTED) + if(NOT UBSAN_CXX_FLAG_SUPPORTED) + message(STATUS "Ubsan flags are not supported by the CXX compiler.") + else() + if(NOT CMAKE_CXX_FLAGS_UBSAN) + set(CMAKE_CXX_FLAGS_UBSAN ${UBSAN_CXX_FLAGS} CACHE STRING "Flags used by the CXX compiler during UBSAN builds." FORCE) + endif() + endif() + unset(CMAKE_REQUIRED_LINK_OPTIONS) +endif() + diff --git a/test/API/CMakeLists.txt b/test/API/CMakeLists.txt new file mode 100644 index 00000000000..d189d67d5ac --- /dev/null +++ b/test/API/CMakeLists.txt @@ -0,0 +1,314 @@ +# Copyright by The HDF Group. +# All rights reserved. +# +# This file is part of HDF5. The full HDF5 copyright notice, including +# terms governing use, modification, and redistribution, is contained in +# the COPYING file, which can be found at the root of the source code +# distribution tree, or in https://www.hdfgroup.org/licenses. +# If you do not have access to either file, you may request a copy from +# help@hdfgroup.org. +# + +#------------------------------------------------------------------------------ +# Set module path +#------------------------------------------------------------------------------ +set(HDF5_TEST_API_CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMake") +set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${HDF5_TEST_API_CMAKE_MODULE_PATH}) + +# TODO: probably not necessary +#------------------------------------------------------------------------------ +# Setup CMake Environment +#------------------------------------------------------------------------------ +if (WIN32) + message("The HDF5 API test suite is currently not supported on this platform." FATAL_ERROR) +endif () + +#------------------------------------------------------------------------------ +# Setup testing configuration file +#------------------------------------------------------------------------------ +if (HDF5_TEST_PARALLEL) + set (HDF5_TEST_API_HAVE_PARALLEL 1) +endif () +if (HDF5_TEST_API_ENABLE_ASYNC) + set (H5_API_TEST_HAVE_ASYNC 1) +endif () + +configure_file( + ${CMAKE_CURRENT_SOURCE_DIR}/H5_api_test_config.h.in + ${HDF5_TEST_BINARY_DIR}/H5_api_test_config.h +) + +#------------------------------------------------------------------------------ +# Compile kwsys library and setup TestDriver +#------------------------------------------------------------------------------ +if (HDF5_TEST_API_ENABLE_DRIVER) + add_subdirectory (driver) +endif () + +#------------------------------------------------------------------------------ +# Setup for API tests +#------------------------------------------------------------------------------ + +# Ported HDF5 tests +set (HDF5_API_TESTS_EXTRA + testhdf5 +) + +# List of files generated by the HDF5 API tests which +# should be cleaned up in case the test failed to remove +# them +set (HDF5_API_TESTS_FILES + H5_api_test.h5 + H5_api_async_test.h5 + H5_api_async_test_0.h5 + H5_api_async_test_1.h5 + H5_api_async_test_2.h5 + H5_api_async_test_3.h5 + H5_api_async_test_4.h5 + test_file.h5 + invalid_params_file.h5 + excl_flag_file.h5 + overlapping_file.h5 + file_permission.h5 + flush_file.h5 + property_list_test_file1.h5 + property_list_test_file2.h5 + intent_test_file.h5 + file_obj_count1.h5 + file_obj_count2.h5 + file_mount.h5 + file_name_retrieval.h5 + filespace_info.h5 + test_file_id.h5 + test_close_degree.h5 + test_free_sections.h5 + file_size.h5 + file_info.h5 + double_group_open.h5 + ext_link_file.h5 + ext_link_file_2.h5 + ext_link_file_3.h5 + ext_link_file_4.h5 + ext_link_file_ping_pong_1.h5 + ext_link_file_ping_pong_2.h5 + ext_link_invalid_params_file.h5 + object_copy_test_file.h5 +) + +#----------------------------------------------------------------------------- +# Build the main API test executable +#----------------------------------------------------------------------------- +foreach (api_test ${HDF5_API_TESTS}) + set (HDF5_API_TEST_SRCS + ${HDF5_API_TEST_SRCS} + ${CMAKE_CURRENT_SOURCE_DIR}/H5_api_${api_test}_test.c + ) +endforeach () + +set (HDF5_API_TEST_SRCS + ${HDF5_API_TEST_SRCS} + ${CMAKE_CURRENT_SOURCE_DIR}/H5_api_test.c + ${HDF5_TEST_API_SRC_DIR}/H5_api_test_util.c +) + +add_executable (h5_api_test ${HDF5_API_TEST_SRCS}) +target_include_directories ( + h5_api_test + PRIVATE + "${HDF5_SRC_INCLUDE_DIRS}" + "${HDF5_TEST_SRC_DIR}" + "${HDF5_TEST_API_SRC_DIR}" + "${HDF5_SRC_BINARY_DIR}" + "${HDF5_TEST_BINARY_DIR}" +) +target_compile_options ( + h5_api_test + PRIVATE + "${HDF5_CMAKE_C_FLAGS}" +) +target_compile_definitions ( + h5_api_test + PRIVATE + $<$:${HDF5_DEVELOPER_DEFS}> +) +if (NOT BUILD_SHARED_LIBS) + TARGET_C_PROPERTIES (h5_api_test STATIC) + target_link_libraries ( + h5_api_test + PRIVATE + ${HDF5_TEST_LIB_TARGET} + ) +else () + TARGET_C_PROPERTIES (h5_api_test SHARED) + target_link_libraries ( + h5_api_test + PRIVATE + ${HDF5_TEST_LIBSH_TARGET} + ) +endif () +set_target_properties ( + h5_api_test + PROPERTIES + FOLDER test/API +) +# Add Target to clang-format +if (HDF5_ENABLE_FORMATTERS) + clang_format (HDF5_TEST_h5_api_test_FORMAT h5_api_test) +endif () + +#----------------------------------------------------------------------------- +# Build the ported HDF5 test executables +#----------------------------------------------------------------------------- +foreach (api_test_extra ${HDF5_API_TESTS_EXTRA}) + unset (HDF5_API_TEST_EXTRA_SRCS) + + set (HDF5_API_TEST_EXTRA_SRCS + ${HDF5_API_TEST_EXTRA_SRCS} + ${CMAKE_CURRENT_SOURCE_DIR}/${api_test_extra}.c + ) + + if (${api_test_extra} STREQUAL "testhdf5") + set (HDF5_API_TEST_EXTRA_SRCS + ${HDF5_API_TEST_EXTRA_SRCS} + ${CMAKE_CURRENT_SOURCE_DIR}/tarray.c + ${CMAKE_CURRENT_SOURCE_DIR}/tattr.c + ${CMAKE_CURRENT_SOURCE_DIR}/tchecksum.c + ${CMAKE_CURRENT_SOURCE_DIR}/tconfig.c + ${CMAKE_CURRENT_SOURCE_DIR}/tcoords.c + ${CMAKE_CURRENT_SOURCE_DIR}/tfile.c + ${CMAKE_CURRENT_SOURCE_DIR}/tgenprop.c + ${CMAKE_CURRENT_SOURCE_DIR}/th5o.c + ${CMAKE_CURRENT_SOURCE_DIR}/th5s.c + ${CMAKE_CURRENT_SOURCE_DIR}/tid.c + ${CMAKE_CURRENT_SOURCE_DIR}/titerate.c + ${CMAKE_CURRENT_SOURCE_DIR}/tmisc.c + ${CMAKE_CURRENT_SOURCE_DIR}/trefer.c + ${CMAKE_CURRENT_SOURCE_DIR}/tselect.c + ${CMAKE_CURRENT_SOURCE_DIR}/ttime.c + ${CMAKE_CURRENT_SOURCE_DIR}/tunicode.c + ${CMAKE_CURRENT_SOURCE_DIR}/tvlstr.c + ${CMAKE_CURRENT_SOURCE_DIR}/tvltypes.c + ) + endif () + + add_executable (h5_api_test_${api_test_extra} ${HDF5_API_TEST_EXTRA_SRCS}) + target_include_directories ( + h5_api_test_${api_test_extra} + PRIVATE + "${HDF5_SRC_INCLUDE_DIRS}" + "${HDF5_TEST_SRC_DIR}" + "${HDF5_TEST_API_SRC_DIR}" + "${HDF5_SRC_BINARY_DIR}" + "${HDF5_TEST_BINARY_DIR}" + ) + target_compile_options ( + h5_api_test_${api_test_extra} + PRIVATE + "${HDF5_CMAKE_C_FLAGS}" + ) + target_compile_definitions ( + h5_api_test_${api_test_extra} + PRIVATE + $<$:${HDF5_DEVELOPER_DEFS}> + ) + if (NOT BUILD_SHARED_LIBS) + TARGET_C_PROPERTIES (h5_api_test_${api_test_extra} STATIC) + target_link_libraries (h5_api_test_${api_test_extra} PRIVATE ${HDF5_TEST_LIB_TARGET}) + else () + TARGET_C_PROPERTIES (h5_api_test_${api_test_extra} SHARED) + target_link_libraries (h5_api_test_${api_test_extra} PRIVATE ${HDF5_TEST_LIBSH_TARGET}) + endif () + set_target_properties ( + h5_api_test_${api_test_extra} + PROPERTIES + FOLDER test/API + ) + # Add Target to clang-format + if (HDF5_ENABLE_FORMATTERS) + clang_format (HDF5_TEST_h5_api_test_${api_test_extra}_FORMAT h5_api_test_${api_test_extra}) + endif () +endforeach () + +#----------------------------------------------------------------------------- +# Add tests if HDF5 serial testing is enabled +#----------------------------------------------------------------------------- +if (HDF5_TEST_SERIAL) + if (HDF5_TEST_API_ENABLE_DRIVER) + if ("${HDF5_TEST_API_SERVER}" STREQUAL "") + message (FATAL_ERROR "Please set HDF5_TEST_API_SERVER to point to a server executable for the test driver program.") + endif () + + # Driver options + if (HDF5_TEST_API_SERVER_ALLOW_ERRORS) + set (HDF5_TEST_API_DRIVER_EXTRA_FLAGS --allow-server-errors) + endif () + if (HDF5_TEST_API_CLIENT_HELPER) + set (HDF5_TEST_API_DRIVER_EXTRA_FLAGS ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS} + --client-helper ${HDF5_TEST_API_CLIENT_HELPER} + ) + endif () + if (HDF5_TEST_API_CLIENT_INIT) + set (HDF5_TEST_API_DRIVER_EXTRA_FLAGS ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS} + --client-init ${HDF5_TEST_API_CLIENT_INIT} + ) + endif () + + set(last_api_test "") + foreach (api_test ${HDF5_API_TESTS}) + add_test ( + NAME "h5_api_test_${api_test}" + COMMAND $ + --server ${HDF5_TEST_API_SERVER} + --client $ "${api_test}" + --serial + ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS} + ) + + set_tests_properties("h5_api_test_${api_test}" PROPERTIES DEPENDS "${last_api_test}") + + set(last_api_test "h5_api_test_${api_test}") + endforeach () + + foreach (hdf5_test ${HDF5_API_TESTS_EXTRA}) + add_test ( + NAME "h5_api_test_${hdf5_test}" + COMMAND $ + --server ${HDF5_TEST_API_SERVER} + --client $ + --serial + ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS} + ) + endforeach () + + # Hook external tests to same test suite + foreach (ext_api_test ${HDF5_API_EXT_SERIAL_TESTS}) + add_test ( + NAME "h5_api_ext_test_${ext_api_test}" + COMMAND $ + --server ${HDF5_TEST_API_SERVER} + --client $ + --serial + ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS} + ) + endforeach () + else () + set(last_api_test "") + foreach (api_test ${HDF5_API_TESTS}) + add_test ( + NAME "h5_api_test_${api_test}" + COMMAND $ "${api_test}" + ) + + set_tests_properties("h5_api_test_${api_test}" PROPERTIES DEPENDS "${last_api_test}") + + set(last_api_test "h5_api_test_${api_test}") + endforeach () + + foreach (hdf5_test ${HDF5_API_TESTS_EXTRA}) + add_test ( + NAME "h5_api_test_${hdf5_test}" + COMMAND $ + ) + endforeach () + endif () +endif () diff --git a/test/API/H5_api_async_test.c b/test/API/H5_api_async_test.c new file mode 100644 index 00000000000..b5208bafb86 --- /dev/null +++ b/test/API/H5_api_async_test.c @@ -0,0 +1,2730 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#include "H5_api_async_test.h" + +#ifdef H5ESpublic_H + +static int test_one_dataset_io(void); +static int test_multi_dataset_io(void); +static int test_multi_file_dataset_io(void); +static int test_multi_file_grp_dset_io(void); +static int test_set_extent(void); +static int test_attribute_exists(void); +static int test_attribute_io(void); +static int test_attribute_io_tconv(void); +static int test_attribute_io_compound(void); +static int test_group(void); +static int test_link(void); +static int test_ocopy_orefresh(void); +static int test_file_reopen(void); + +/* + * The array of async tests to be performed. + */ +static int (*async_tests[])(void) = { + test_one_dataset_io, + test_multi_dataset_io, + test_multi_file_dataset_io, + test_multi_file_grp_dset_io, + test_set_extent, + test_attribute_exists, + test_attribute_io, + test_attribute_io_tconv, + test_attribute_io_compound, + test_group, + test_link, + test_ocopy_orefresh, + test_file_reopen, +}; + +/* Highest "printf" file created (starting at 0) */ +int max_printf_file = -1; + +/* + * Create file and dataset, write to dataset + */ +static int +test_one_dataset_io(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + hid_t es_id = H5I_INVALID_HID; + hsize_t dims[2] = {6, 10}; + size_t num_in_progress; + hbool_t op_failed; + int wbuf[6][10]; + int rbuf[6][10]; + int i, j; + + TESTING_MULTIPART("single dataset I/O"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { + SKIPPED(); + HDprintf( + " API functions for basic file, dataset, or flush aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + /* Create dataspace */ + if ((space_id = H5Screate_simple(2, dims, NULL)) < 0) + TEST_ERROR; + + /* Create event stack */ + if ((es_id = H5EScreate()) < 0) + TEST_ERROR; + + /* Create file asynchronously */ + if ((file_id = H5Fcreate_async(ASYNC_API_TEST_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Create the dataset asynchronously */ + if ((dset_id = H5Dcreate_async(file_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(single_dset_eswait) + { + TESTING_2("synchronization using H5ESwait()"); + + /* Initialize wbuf */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) + wbuf[i][j] = 10 * i + j; + + /* Write the dataset asynchronously */ + if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf, es_id) < 0) + PART_TEST_ERROR(single_dset_eswait); + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(single_dset_eswait); + if (op_failed) + PART_TEST_ERROR(single_dset_eswait); + + /* Read the dataset asynchronously */ + if (H5Dread_async(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf, es_id) < 0) + PART_TEST_ERROR(single_dset_eswait); + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(single_dset_eswait); + if (op_failed) + PART_TEST_ERROR(single_dset_eswait); + + /* Verify the read data */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) + if (wbuf[i][j] != rbuf[i][j]) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + PART_ERROR(single_dset_eswait); + } /* end if */ + + PASSED(); + } + PART_END(single_dset_eswait); + + PART_BEGIN(single_dset_dclose) + { + TESTING_2("synchronization using H5Dclose()"); + + /* Update wbuf */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) + wbuf[i][j] += 6 * 10; + + /* Write the dataset asynchronously */ + if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf, es_id) < 0) + PART_TEST_ERROR(single_dset_dclose); + + /* Close the dataset synchronously */ + if (H5Dclose(dset_id) < 0) + PART_TEST_ERROR(single_dset_dclose); + + /* Re-open the dataset asynchronously */ + if ((dset_id = H5Dopen_async(file_id, "dset", H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(single_dset_dclose); + + /* Read the dataset asynchronously */ + if (H5Dread_async(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf, es_id) < 0) + PART_TEST_ERROR(single_dset_dclose); + + /* Close the dataset synchronously */ + if (H5Dclose(dset_id) < 0) + PART_TEST_ERROR(single_dset_dclose); + + /* Verify the read data */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) + if (wbuf[i][j] != rbuf[i][j]) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + PART_ERROR(single_dset_dclose); + } /* end if */ + + /* Re-open the dataset asynchronously */ + if ((dset_id = H5Dopen_async(file_id, "dset", H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(single_dset_dclose); + + PASSED(); + } + PART_END(single_dset_dclose); + + PART_BEGIN(single_dset_dflush) + { + TESTING_2("synchronization using H5Oflush_async()"); + + /* Update wbuf */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) + wbuf[i][j] += 6 * 10; + + /* Write the dataset asynchronously */ + if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf, es_id) < 0) + PART_TEST_ERROR(single_dset_dflush); + + /* Flush the dataset asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the write. */ + if (H5Oflush_async(dset_id, es_id) < 0) + PART_TEST_ERROR(single_dset_dflush); + + /* Read the dataset asynchronously */ + if (H5Dread_async(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf, es_id) < 0) + PART_TEST_ERROR(single_dset_dflush); + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(single_dset_dflush); + if (op_failed) + PART_TEST_ERROR(single_dset_dflush); + + /* Verify the read data */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) + if (wbuf[i][j] != rbuf[i][j]) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + PART_ERROR(single_dset_dflush); + } /* end if */ + + PASSED(); + } + PART_END(single_dset_dflush); + + PART_BEGIN(single_dset_fclose) + { + TESTING_2("synchronization using H5Fclose()"); + + /* Update wbuf */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) + wbuf[i][j] += 6 * 10; + + /* Write the dataset asynchronously */ + if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf, es_id) < 0) + PART_TEST_ERROR(single_dset_fclose); + + /* Close the dataset asynchronously */ + if (H5Dclose_async(dset_id, es_id) < 0) + PART_TEST_ERROR(single_dset_fclose); + + /* Close the file synchronously */ + if (H5Fclose(file_id) < 0) + PART_TEST_ERROR(single_dset_fclose); + + /* Reopen the file asynchronously. */ + if ((file_id = H5Fopen_async(ASYNC_API_TEST_FILE, H5F_ACC_RDONLY, H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(single_dset_fclose); + + /* Re-open the dataset asynchronously */ + if ((dset_id = H5Dopen_async(file_id, "dset", H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(single_dset_fclose); + + /* Read the dataset asynchronously */ + if (H5Dread_async(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf, es_id) < 0) + PART_TEST_ERROR(single_dset_fclose); + + /* Close the dataset asynchronously */ + if (H5Dclose_async(dset_id, es_id) < 0) + PART_TEST_ERROR(single_dset_fclose); + + /* Close the file synchronously */ + if (H5Fclose(file_id) < 0) + PART_TEST_ERROR(single_dset_fclose); + + /* Verify the read data */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) + if (wbuf[i][j] != rbuf[i][j]) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + PART_ERROR(single_dset_fclose); + } /* end if */ + + PASSED(); + } + PART_END(single_dset_fclose); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5ESclose(es_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Dclose(dset_id); + H5Fclose(file_id); + H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed); + H5ESclose(es_id); + } + H5E_END_TRY; + + return 1; +} /* end test_one_dataset_io() */ + +/* + * Create file and multiple datasets, write to them and read from them + */ +static int +test_multi_dataset_io(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t dset_id[5] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID}; + hid_t space_id = H5I_INVALID_HID; + hid_t es_id = H5I_INVALID_HID; + hsize_t dims[2] = {6, 10}; + size_t num_in_progress; + hbool_t op_failed; + char dset_name[32]; + int wbuf[5][6][10]; + int rbuf[5][6][10]; + int i, j, k; + + TESTING_MULTIPART("multi dataset I/O"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { + SKIPPED(); + HDprintf( + " API functions for basic file, dataset, or flush aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + /* Create dataspace */ + if ((space_id = H5Screate_simple(2, dims, NULL)) < 0) + TEST_ERROR; + + /* Create event stack */ + if ((es_id = H5EScreate()) < 0) + TEST_ERROR; + + /* Create file asynchronously */ + if ((file_id = H5Fcreate_async(ASYNC_API_TEST_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(multi_dset_open) + { + TESTING_2("keeping datasets open"); + + /* Loop over datasets */ + for (i = 0; i < 5; i++) { + /* Set dataset name */ + sprintf(dset_name, "dset%d", i); + + /* Create the dataset asynchronously */ + if ((dset_id[i] = H5Dcreate_async(file_id, dset_name, H5T_NATIVE_INT, space_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_dset_open); + + /* Initialize wbuf. Must use a new slice of wbuf for each dset + * since we can't overwrite the buffers until I/O is done. */ + for (j = 0; j < 6; j++) + for (k = 0; k < 10; k++) + wbuf[i][j][k] = 6 * 10 * i + 10 * j + k; + + /* Write the dataset asynchronously */ + if (H5Dwrite_async(dset_id[i], H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf[i], + es_id) < 0) + PART_TEST_ERROR(multi_dset_open); + } /* end for */ + + /* Flush the file asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the write. */ + if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0) + PART_TEST_ERROR(multi_dset_open); + + /* Loop over datasets */ + for (i = 0; i < 5; i++) { + /* Read the dataset asynchronously */ + if (H5Dread_async(dset_id[i], H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf[i], es_id) < + 0) + PART_TEST_ERROR(multi_dset_open); + } /* end for */ + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(multi_dset_open); + if (op_failed) + PART_TEST_ERROR(multi_dset_open); + /*printf("\nwbuf:\n"); + for(i = 0; i < 5; i++) { + for(j = 0; j < 6; j++) { + for(k = 0; k < 10; k++) + printf("%d ", wbuf[i][j][k]); + printf("\n"); + } + printf("\n"); + } + printf("\nrbuf:\n"); + for(i = 0; i < 5; i++) { + for(j = 0; j < 6; j++) { + for(k = 0; k < 10; k++) + printf("%d ", rbuf[i][j][k]); + printf("\n"); + } + printf("\n"); + }*/ + /* Verify the read data */ + for (i = 0; i < 5; i++) + for (j = 0; j < 6; j++) + for (k = 0; k < 10; k++) + if (wbuf[i][j][k] != rbuf[i][j][k]) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + PART_ERROR(multi_dset_open); + } /* end if */ + + /* Close the datasets */ + for (i = 0; i < 5; i++) + if (H5Dclose(dset_id[i]) < 0) + PART_TEST_ERROR(multi_dset_open); + + PASSED(); + } + PART_END(multi_dset_open); + + PART_BEGIN(multi_dset_close) + { + TESTING_2("closing datasets between I/O"); + + /* Loop over datasets */ + for (i = 0; i < 5; i++) { + /* Set dataset name */ + sprintf(dset_name, "dset%d", i); + + /* Open the dataset asynchronously */ + if ((dset_id[0] = H5Dopen_async(file_id, dset_name, H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_dset_close); + + /* Update wbuf */ + for (j = 0; j < 6; j++) + for (k = 0; k < 10; k++) + wbuf[i][j][k] += 5 * 6 * 10; + + /* Write the dataset asynchronously */ + if (H5Dwrite_async(dset_id[0], H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf[i], + es_id) < 0) + PART_TEST_ERROR(multi_dset_close); + + /* Close the dataset asynchronously */ + if (H5Dclose_async(dset_id[0], es_id) < 0) + PART_TEST_ERROR(multi_dset_close); + } /* end for */ + + /* Flush the file asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the write. */ + if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0) + PART_TEST_ERROR(multi_dset_close); + + /* Loop over datasets */ + for (i = 0; i < 5; i++) { + /* Set dataset name */ + sprintf(dset_name, "dset%d", i); + + /* Open the dataset asynchronously */ + if ((dset_id[0] = H5Dopen_async(file_id, dset_name, H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_dset_close); + + /* Read the dataset asynchronously */ + if (H5Dread_async(dset_id[0], H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf[i], es_id) < + 0) + PART_TEST_ERROR(multi_dset_close); + + /* Close the dataset asynchronously */ + if (H5Dclose_async(dset_id[0], es_id) < 0) + PART_TEST_ERROR(multi_dset_close); + } /* end for */ + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(multi_dset_close); + if (op_failed) + PART_TEST_ERROR(multi_dset_close); + /*printf("\nwbuf:\n"); + for(i = 0; i < 5; i++) { + for(j = 0; j < 6; j++) { + for(k = 0; k < 10; k++) + printf("%d ", wbuf[i][j][k]); + printf("\n"); + } + printf("\n"); + } + printf("\nrbuf:\n"); + for(i = 0; i < 5; i++) { + for(j = 0; j < 6; j++) { + for(k = 0; k < 10; k++) + printf("%d ", rbuf[i][j][k]); + printf("\n"); + } + printf("\n"); + }*/ + /* Verify the read data */ + for (i = 0; i < 5; i++) + for (j = 0; j < 6; j++) + for (k = 0; k < 10; k++) + if (wbuf[i][j][k] != rbuf[i][j][k]) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + PART_ERROR(multi_dset_close); + } /* end if */ + + PASSED(); + } + PART_END(multi_dset_close); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Fclose_async(file_id, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5ESclose(es_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + for (i = 0; i < 5; i++) + H5Dclose(dset_id[i]); + H5Fclose(file_id); + H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed); + H5ESclose(es_id); + } + H5E_END_TRY; + + return 1; +} /* end test_multi_dataset_io() */ + +/* + * Create multiple files, each with a single dataset, write to them and read + * from them + */ +static int +test_multi_file_dataset_io(void) +{ + hid_t file_id[5] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID}; + hid_t dset_id[5] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID}; + hid_t space_id = H5I_INVALID_HID; + hid_t es_id = H5I_INVALID_HID; + hsize_t dims[2] = {6, 10}; + size_t num_in_progress; + hbool_t op_failed; + char file_name[32]; + int wbuf[5][6][10]; + int rbuf[5][6][10]; + int i, j, k; + + TESTING_MULTIPART("multi file dataset I/O"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { + SKIPPED(); + HDprintf( + " API functions for basic file, dataset, or flush aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + /* Create dataspace */ + if ((space_id = H5Screate_simple(2, dims, NULL)) < 0) + TEST_ERROR; + + /* Create event stack */ + if ((es_id = H5EScreate()) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(multi_file_dset_open) + { + TESTING_2("keeping files and datasets open"); + + /* Loop over files */ + for (i = 0; i < 5; i++) { + /* Set file name */ + sprintf(file_name, ASYNC_API_TEST_FILE_PRINTF, i); + + /* Create file asynchronously */ + if ((file_id[i] = + H5Fcreate_async(file_name, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_dset_open); + if (i > max_printf_file) + max_printf_file = i; + + /* Create the dataset asynchronously */ + if ((dset_id[i] = H5Dcreate_async(file_id[i], "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_dset_open); + + /* Initialize wbuf. Must use a new slice of wbuf for each dset + * since we can't overwrite the buffers until I/O is done. */ + for (j = 0; j < 6; j++) + for (k = 0; k < 10; k++) + wbuf[i][j][k] = 6 * 10 * i + 10 * j + k; + + /* Write the dataset asynchronously */ + if (H5Dwrite_async(dset_id[i], H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf[i], + es_id) < 0) + PART_TEST_ERROR(multi_file_dset_open); + } /* end for */ + + /* Loop over files */ + for (i = 0; i < 5; i++) { + /* Flush the dataset asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the write. */ + if (H5Oflush_async(dset_id[i], es_id) < 0) + PART_TEST_ERROR(multi_file_dset_open); + + /* Read the dataset asynchronously */ + if (H5Dread_async(dset_id[i], H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf[i], es_id) < + 0) + PART_TEST_ERROR(multi_file_dset_open); + } /* end for */ + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(multi_file_dset_open); + if (op_failed) + PART_TEST_ERROR(multi_file_dset_open); + + /* Verify the read data */ + for (i = 0; i < 5; i++) + for (j = 0; j < 6; j++) + for (k = 0; k < 10; k++) + if (wbuf[i][j][k] != rbuf[i][j][k]) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + PART_ERROR(multi_file_dset_open); + } /* end if */ + + /* Close the datasets */ + for (i = 0; i < 5; i++) + if (H5Dclose(dset_id[i]) < 0) + PART_TEST_ERROR(multi_file_dset_open); + + PASSED(); + } + PART_END(multi_file_dset_open); + + PART_BEGIN(multi_file_dset_dclose) + { + TESTING_2("closing datasets between I/O"); + + /* Loop over files */ + for (i = 0; i < 5; i++) { + /* Open the dataset asynchronously */ + if ((dset_id[0] = H5Dopen_async(file_id[i], "dset", H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_dset_dclose); + + /* Update wbuf */ + for (j = 0; j < 6; j++) + for (k = 0; k < 10; k++) + wbuf[i][j][k] += 5 * 6 * 10; + + /* Write the dataset asynchronously */ + if (H5Dwrite_async(dset_id[0], H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf[i], + es_id) < 0) + PART_TEST_ERROR(multi_file_dset_dclose); + + /* Close the dataset asynchronously */ + if (H5Dclose_async(dset_id[0], es_id) < 0) + PART_TEST_ERROR(multi_file_dset_dclose); + } /* end for */ + + /* Loop over files */ + for (i = 0; i < 5; i++) { + /* Flush the file asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the write. */ + if (H5Fflush_async(file_id[i], H5F_SCOPE_LOCAL, es_id) < 0) + PART_TEST_ERROR(multi_file_dset_open); + + /* Open the dataset asynchronously */ + if ((dset_id[0] = H5Dopen_async(file_id[i], "dset", H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_dset_dclose); + + /* Read the dataset asynchronously */ + if (H5Dread_async(dset_id[0], H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf[i], es_id) < + 0) + PART_TEST_ERROR(multi_file_dset_dclose); + + /* Close the dataset asynchronously */ + if (H5Dclose_async(dset_id[0], es_id) < 0) + PART_TEST_ERROR(multi_file_dset_dclose); + } /* end for */ + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(multi_file_dset_dclose); + if (op_failed) + PART_TEST_ERROR(multi_file_dset_dclose); + + /* Verify the read data */ + for (i = 0; i < 5; i++) + for (j = 0; j < 6; j++) + for (k = 0; k < 10; k++) + if (wbuf[i][j][k] != rbuf[i][j][k]) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + PART_ERROR(multi_file_dset_dclose); + } /* end if */ + + /* Close the files */ + for (i = 0; i < 5; i++) + if (H5Fclose(file_id[i]) < 0) + PART_TEST_ERROR(multi_file_dset_dclose); + + PASSED(); + } + PART_END(multi_file_dset_dclose); + + PART_BEGIN(multi_file_dset_fclose) + { + TESTING_2("closing files between I/O"); + + /* Loop over files */ + for (i = 0; i < 5; i++) { + /* Set file name */ + sprintf(file_name, ASYNC_API_TEST_FILE_PRINTF, i); + + /* Open the file asynchronously */ + if ((file_id[0] = H5Fopen_async(file_name, H5F_ACC_RDWR, H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_dset_fclose); + + /* Open the dataset asynchronously */ + if ((dset_id[0] = H5Dopen_async(file_id[0], "dset", H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_dset_fclose); + + /* Update wbuf */ + for (j = 0; j < 6; j++) + for (k = 0; k < 10; k++) + wbuf[i][j][k] += 5 * 6 * 10; + + /* Write the dataset asynchronously */ + if (H5Dwrite_async(dset_id[0], H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf[i], + es_id) < 0) + PART_TEST_ERROR(multi_file_dset_fclose); + + /* Close the dataset asynchronously */ + if (H5Dclose_async(dset_id[0], es_id) < 0) + PART_TEST_ERROR(multi_file_dset_fclose); + + /* Close the file asynchronously */ + if (H5Fclose_async(file_id[0], es_id) < 0) + PART_TEST_ERROR(multi_file_dset_fclose); + } /* end for */ + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(multi_file_dset_fclose); + if (op_failed) + PART_TEST_ERROR(multi_file_dset_fclose); + + /* Loop over files */ + for (i = 0; i < 5; i++) { + /* Set file name */ + sprintf(file_name, ASYNC_API_TEST_FILE_PRINTF, i); + + /* Open the file asynchronously */ + if ((file_id[0] = H5Fopen_async(file_name, H5F_ACC_RDONLY, H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_dset_fclose); + + /* Open the dataset asynchronously */ + if ((dset_id[0] = H5Dopen_async(file_id[0], "dset", H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_dset_fclose); + + /* Read the dataset asynchronously */ + if (H5Dread_async(dset_id[0], H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf[i], es_id) < + 0) + PART_TEST_ERROR(multi_file_dset_fclose); + + /* Close the dataset asynchronously */ + if (H5Dclose_async(dset_id[0], es_id) < 0) + PART_TEST_ERROR(multi_file_dset_fclose); + + /* Close the file asynchronously */ + if (H5Fclose_async(file_id[0], es_id) < 0) + PART_TEST_ERROR(multi_file_dset_fclose); + } /* end for */ + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(multi_file_dset_fclose); + if (op_failed) + PART_TEST_ERROR(multi_file_dset_fclose); + + /* Verify the read data */ + for (i = 0; i < 5; i++) + for (j = 0; j < 6; j++) + for (k = 0; k < 10; k++) + if (wbuf[i][j][k] != rbuf[i][j][k]) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + PART_ERROR(multi_file_dset_fclose); + } /* end if */ + + PASSED(); + } + PART_END(multi_file_dset_fclose); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5ESclose(es_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + for (i = 0; i < 5; i++) { + H5Dclose(dset_id[i]); + H5Fclose(file_id[i]); + } /* end for */ + H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed); + H5ESclose(es_id); + } + H5E_END_TRY; + + return 1; +} /* end test_multi_file_dataset_io() */ + +/* + * Create multiple files, each with a single group and dataset, write to them + * and read from them + */ +static int +test_multi_file_grp_dset_io(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t grp_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + hid_t es_id = H5I_INVALID_HID; + hsize_t dims[2] = {6, 10}; + size_t num_in_progress; + hbool_t op_failed; + char file_name[32]; + int wbuf[5][6][10]; + int rbuf[5][6][10]; + int i, j, k; + + TESTING_MULTIPART("multi file dataset I/O with groups"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + /* Create dataspace */ + if ((space_id = H5Screate_simple(2, dims, NULL)) < 0) + TEST_ERROR; + + /* Create event stack */ + if ((es_id = H5EScreate()) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(multi_file_grp_dset_no_kick) + { + TESTING_2("without intermediate calls to H5ESwait()"); + + /* Loop over files */ + for (i = 0; i < 5; i++) { + /* Set file name */ + sprintf(file_name, ASYNC_API_TEST_FILE_PRINTF, i); + + /* Create file asynchronously */ + if ((file_id = H5Fcreate_async(file_name, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT, es_id)) < + 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + if (i > max_printf_file) + max_printf_file = i; + + /* Create the group asynchronously */ + if ((grp_id = H5Gcreate_async(file_id, "grp", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < + 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + + /* Create the dataset asynchronously */ + if ((dset_id = H5Dcreate_async(grp_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + + /* Initialize wbuf. Must use a new slice of wbuf for each dset + * since we can't overwrite the buffers until I/O is done. */ + for (j = 0; j < 6; j++) + for (k = 0; k < 10; k++) + wbuf[i][j][k] = 6 * 10 * i + 10 * j + k; + + /* Write the dataset asynchronously */ + if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf[i], es_id) < + 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + + /* Close the dataset asynchronously */ + if (H5Dclose_async(dset_id, es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + + /* Close the group asynchronously */ + if (H5Gclose_async(grp_id, es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + + /* Close the file asynchronously */ + if (H5Fclose_async(file_id, es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + } /* end for */ + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + if (op_failed) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + + /* Loop over files */ + for (i = 0; i < 5; i++) { + /* Set file name */ + sprintf(file_name, ASYNC_API_TEST_FILE_PRINTF, i); + + /* Open the file asynchronously */ + if ((file_id = H5Fopen_async(file_name, H5F_ACC_RDONLY, H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + + /* Open the group asynchronously */ + if ((grp_id = H5Gopen_async(file_id, "grp", H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + + /* Open the dataset asynchronously */ + if ((dset_id = H5Dopen_async(grp_id, "dset", H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + + /* Read the dataset asynchronously */ + if (H5Dread_async(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf[i], es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + + /* Close the dataset asynchronously */ + if (H5Dclose_async(dset_id, es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + + /* Close the group asynchronously */ + if (H5Gclose_async(grp_id, es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + + /* Close the file asynchronously */ + if (H5Fclose_async(file_id, es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + } /* end for */ + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + if (op_failed) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + + /* Verify the read data */ + for (i = 0; i < 5; i++) + for (j = 0; j < 6; j++) + for (k = 0; k < 10; k++) + if (wbuf[i][j][k] != rbuf[i][j][k]) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + PART_ERROR(multi_file_grp_dset_no_kick); + } /* end if */ + + PASSED(); + } + PART_END(multi_file_grp_dset_no_kick); + + PART_BEGIN(multi_file_grp_dset_kick) + { + TESTING_2("with intermediate calls to H5ESwait() (0 timeout)"); + + /* Loop over files */ + for (i = 0; i < 5; i++) { + /* Set file name */ + sprintf(file_name, ASYNC_API_TEST_FILE_PRINTF, i); + + /* Create file asynchronously */ + if ((file_id = H5Fcreate_async(file_name, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT, es_id)) < + 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + if (i > max_printf_file) + max_printf_file = i; + + /* Create the group asynchronously */ + if ((grp_id = H5Gcreate_async(file_id, "grp", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < + 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Create the dataset asynchronously */ + if ((dset_id = H5Dcreate_async(grp_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Update wbuf */ + for (j = 0; j < 6; j++) + for (k = 0; k < 10; k++) + wbuf[i][j][k] += 5 * 6 * 10; + + /* Write the dataset asynchronously */ + if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf[i], es_id) < + 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Close the dataset asynchronously */ + if (H5Dclose_async(dset_id, es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Close the group asynchronously */ + if (H5Gclose_async(grp_id, es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Close the file asynchronously */ + if (H5Fclose_async(file_id, es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Kick the event stack to make progress */ + if (H5ESwait(es_id, 0, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + if (op_failed) + PART_TEST_ERROR(multi_file_grp_dset_kick); + } /* end for */ + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + if (op_failed) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Loop over files */ + for (i = 0; i < 5; i++) { + /* Set file name */ + sprintf(file_name, ASYNC_API_TEST_FILE_PRINTF, i); + + /* Open the file asynchronously */ + if ((file_id = H5Fopen_async(file_name, H5F_ACC_RDONLY, H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Open the group asynchronously */ + if ((grp_id = H5Gopen_async(file_id, "grp", H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Open the dataset asynchronously */ + if ((dset_id = H5Dopen_async(grp_id, "dset", H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Read the dataset asynchronously */ + if (H5Dread_async(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf[i], es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Close the dataset asynchronously */ + if (H5Dclose_async(dset_id, es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Close the group asynchronously */ + if (H5Gclose_async(grp_id, es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Close the file asynchronously */ + if (H5Fclose_async(file_id, es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Kick the event stack to make progress */ + if (H5ESwait(es_id, 0, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + if (op_failed) + PART_TEST_ERROR(multi_file_grp_dset_kick); + } /* end for */ + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + if (op_failed) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Verify the read data */ + for (i = 0; i < 5; i++) + for (j = 0; j < 6; j++) + for (k = 0; k < 10; k++) + if (wbuf[i][j][k] != rbuf[i][j][k]) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + PART_ERROR(multi_file_grp_dset_kick); + } /* end if */ + + PASSED(); + } + PART_END(multi_file_grp_dset_kick); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5ESclose(es_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Gclose(grp_id); + H5Dclose(dset_id); + H5Fclose(file_id); + H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed); + H5ESclose(es_id); + } + H5E_END_TRY; + + return 1; +} /* end test_multi_file_grp_dset_io() */ + +/* + * Create file and dataset, write to dataset + */ +static int +test_set_extent(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id[6] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, + H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID}; + hid_t fspace_out[6] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, + H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID}; + hid_t mspace_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t es_id = H5I_INVALID_HID; + hsize_t dims[2] = {1, 10}; + hsize_t mdims[2] = {7, 10}; + hsize_t cdims[2] = {2, 3}; + hsize_t start[2] = {0, 0}; + hsize_t count[2] = {1, 10}; + size_t num_in_progress; + hbool_t op_failed; + htri_t tri_ret; + int wbuf[6][10]; + int rbuf[6][10]; + int i, j; + + TESTING("H5Dset_extent() and H5Dget_space()"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf(" API functions for basic file, dataset, dataset more, or flush aren't supported with " + "this connector\n"); + return 0; + } + + /* Create file dataspace */ + if ((fspace_id[0] = H5Screate_simple(2, dims, mdims)) < 0) + TEST_ERROR; + + /* Create memory dataspace */ + if ((mspace_id = H5Screate_simple(1, &dims[1], NULL)) < 0) + TEST_ERROR; + + /* Create DCPL */ + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) + TEST_ERROR; + + /* Set chunking */ + if (H5Pset_chunk(dcpl_id, 2, cdims) < 0) + TEST_ERROR; + + /* Initialize wbuf */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) + wbuf[i][j] = 10 * i + j; + + /* Create event stack */ + if ((es_id = H5EScreate()) < 0) + TEST_ERROR; + + /* Create file asynchronously */ + if ((file_id = H5Fcreate_async(ASYNC_API_TEST_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Create the dataset asynchronously */ + if ((dset_id = H5Dcreate_async(file_id, "dset", H5T_NATIVE_INT, fspace_id[0], H5P_DEFAULT, dcpl_id, + H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Extend the first dataset from 1 to 6, 1 at a time */ + for (i = 0; i < 6; i++) { + /* No need to extend on the first iteration */ + if (i) { + /* Copy dataspace */ + if ((fspace_id[i] = H5Scopy(fspace_id[i - 1])) < 0) + TEST_ERROR; + + /* Extend datapace */ + dims[0] = (hsize_t)(i + 1); + if (H5Sset_extent_simple(fspace_id[i], 2, dims, mdims) < 0) + TEST_ERROR; + + /* Extend dataset asynchronously */ + if (H5Dset_extent_async(dset_id, dims, es_id) < 0) + TEST_ERROR; + + /* Select hyperslab in file space to match new region */ + start[0] = (hsize_t)i; + if (H5Sselect_hyperslab(fspace_id[i], H5S_SELECT_SET, start, NULL, count, NULL) < 0) + TEST_ERROR; + } /* end if */ + + /* Get dataset dataspace */ + if ((fspace_out[i] = H5Dget_space_async(dset_id, es_id)) < 0) + TEST_ERROR; + + /* Write the dataset slice asynchronously */ + if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id[i], H5P_DEFAULT, wbuf[i], es_id) < 0) + TEST_ERROR; + } /* end for */ + + /* Flush the dataset asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the write. */ + if (H5Oflush_async(dset_id, es_id) < 0) + TEST_ERROR; + + /* Read the entire dataset asynchronously */ + if (H5Dread_async(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf, es_id) < 0) + TEST_ERROR; + + /* Verify extents are correct. We do not need to wait because of the + * "future id" capability. */ + for (i = 0; i < 6; i++) { + if ((tri_ret = H5Sextent_equal(fspace_id[i], fspace_out[i])) < 0) + TEST_ERROR; + if (!tri_ret) + FAIL_PUTS_ERROR(" dataspaces are not equal\n"); + if (i && H5Sclose(fspace_id[i]) < 0) + TEST_ERROR; + if (H5Sclose(fspace_out[i]) < 0) + TEST_ERROR; + } /* end for */ + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Verify the read data */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) + if (wbuf[i][j] != rbuf[i][j]) + FAIL_PUTS_ERROR(" data verification failed\n"); + + /* + * Now try extending the dataset, closing it, reopening it, and getting the + * space. + */ + /* Extend datapace */ + dims[0] = (hsize_t)7; + if (H5Sset_extent_simple(fspace_id[0], 2, dims, mdims) < 0) + TEST_ERROR; + + /* Extend dataset asynchronously */ + if (H5Dset_extent_async(dset_id, dims, es_id) < 0) + TEST_ERROR; + + /* Close dataset asynchronously */ + if (H5Dclose_async(dset_id, es_id) < 0) + TEST_ERROR; + + /* Open dataset asynchronously */ + if ((dset_id = H5Dopen_async(file_id, "dset", H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Get dataset dataspace asynchronously */ + if ((fspace_out[0] = H5Dget_space_async(dset_id, es_id)) < 0) + TEST_ERROR; + + /* Verify the extents match */ + if ((tri_ret = H5Sextent_equal(fspace_id[0], fspace_out[0])) < 0) + TEST_ERROR; + if (!tri_ret) + FAIL_PUTS_ERROR(" dataspaces are not equal\n"); + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Close */ + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + if (H5Sclose(mspace_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id[0]) < 0) + TEST_ERROR; + if (H5Sclose(fspace_out[0]) < 0) + TEST_ERROR; + if (H5Pclose(dcpl_id) < 0) + TEST_ERROR; + if (H5ESclose(es_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(mspace_id); + for (i = 0; i < 6; i++) { + H5Sclose(fspace_id[i]); + H5Sclose(fspace_out[i]); + } /* end for */ + H5Pclose(dcpl_id); + H5Dclose(dset_id); + H5Fclose(file_id); + H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed); + H5ESclose(es_id); + } + H5E_END_TRY; + + return 1; +} /* end test_set_extent() */ + +/* + * Test H5Aexists() + */ +static int +test_attribute_exists(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + hid_t es_id = H5I_INVALID_HID; + hsize_t dims[2] = {6, 10}; + hbool_t exists1; + hbool_t exists2; + size_t num_in_progress; + hbool_t op_failed; + + TESTING("H5Aexists()"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file, dataset, dataset more, attribute, or flush aren't " + "supported with this connector\n"); + return 0; + } + + /* Create dataspace */ + if ((space_id = H5Screate_simple(2, dims, NULL)) < 0) + TEST_ERROR; + + /* Create event stack */ + if ((es_id = H5EScreate()) < 0) + TEST_ERROR; + + /* Open file asynchronously */ + if ((file_id = H5Fopen_async(ASYNC_API_TEST_FILE, H5F_ACC_RDWR, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Create the dataset asynchronously */ + if ((dset_id = H5Dcreate_async(file_id, "attr_exists_dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Check if the attribute exists asynchronously */ + if (H5Aexists_async(dset_id, "attr", &exists1, es_id) < 0) + TEST_ERROR; + + /* Flush the dataset asynchronously. This will effectively work as a + * barrier, guaranteeing the create takes place after the existence check + */ + if (H5Oflush_async(dset_id, es_id) < 0) + TEST_ERROR; + + /* Create the attribute asynchronously */ + if ((attr_id = + H5Acreate_async(dset_id, "attr", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Flush the dataset asynchronously. This will effectively work as a + * barrier, guaranteeing the existence check takes place after the create. + */ + if (H5Oflush_async(dset_id, es_id) < 0) + TEST_ERROR; + + /* Check if the attribute exists asynchronously */ + if (H5Aexists_async(dset_id, "attr", &exists2, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Check if H5Aexists returned the correct values */ + if (exists1) + FAIL_PUTS_ERROR(" H5Aexists returned TRUE for an attribute that should not exist"); + if (!exists2) + FAIL_PUTS_ERROR(" H5Aexists returned FALSE for an attribute that should exist"); + + /* Close */ + if (H5Aclose_async(attr_id, es_id) < 0) + TEST_ERROR; + if (H5Dclose_async(dset_id, es_id) < 0) + TEST_ERROR; + if (H5Fclose_async(file_id, es_id) < 0) + TEST_ERROR; + if (H5Sclose(space_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (H5ESclose(es_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Aclose(attr_id); + H5Dclose(dset_id); + H5Fclose(file_id); + H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed); + H5ESclose(es_id); + } + H5E_END_TRY; + + return 1; +} /* end test_attribute_io() */ + +/* + * Create file, dataset, and attribute, write to attribute + */ +static int +test_attribute_io(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + hid_t es_id = H5I_INVALID_HID; + hsize_t dims[2] = {6, 10}; + size_t num_in_progress; + hbool_t op_failed; + int wbuf[6][10]; + int rbuf[6][10]; + int i, j; + + TESTING("attribute I/O"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file, dataset, dataset more, attribute, or flush aren't " + "supported with this connector\n"); + return 0; + } + + /* Create dataspace */ + if ((space_id = H5Screate_simple(2, dims, NULL)) < 0) + TEST_ERROR; + + /* Create event stack */ + if ((es_id = H5EScreate()) < 0) + TEST_ERROR; + + /* Open file asynchronously */ + if ((file_id = H5Fopen_async(ASYNC_API_TEST_FILE, H5F_ACC_RDWR, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Create the dataset asynchronously */ + if ((dset_id = H5Dcreate_async(file_id, "attr_dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Create the attribute asynchronously */ + if ((attr_id = + H5Acreate_async(dset_id, "attr", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Initialize wbuf */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) + wbuf[i][j] = 10 * i + j; + + /* Write the attribute asynchronously */ + if (H5Awrite_async(attr_id, H5T_NATIVE_INT, wbuf, es_id) < 0) + TEST_ERROR; + + /* Flush the dataset asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the write. */ + if (H5Oflush_async(dset_id, es_id) < 0) + TEST_ERROR; + + /* Read the attribute asynchronously */ + if (H5Aread_async(attr_id, H5T_NATIVE_INT, rbuf, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Verify the read data */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) + if (wbuf[i][j] != rbuf[i][j]) + FAIL_PUTS_ERROR(" data verification failed\n"); + + /* Close the attribute asynchronously */ + if (H5Aclose_async(attr_id, es_id) < 0) + TEST_ERROR; + + /* Open the attribute asynchronously */ + if ((attr_id = H5Aopen_async(dset_id, "attr", H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Read the attribute asynchronously */ + if (H5Aread_async(attr_id, H5T_NATIVE_INT, rbuf, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Verify the read data */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) + if (wbuf[i][j] != rbuf[i][j]) + FAIL_PUTS_ERROR(" data verification failed\n"); + + /* Close out of order to see if it trips things up */ + if (H5Dclose_async(dset_id, es_id) < 0) + TEST_ERROR; + if (H5Aclose_async(attr_id, es_id) < 0) + TEST_ERROR; + if (H5Fclose_async(file_id, es_id) < 0) + TEST_ERROR; + if (H5Sclose(space_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (H5ESclose(es_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Aclose(attr_id); + H5Dclose(dset_id); + H5Fclose(file_id); + H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed); + H5ESclose(es_id); + } + H5E_END_TRY; + + return 1; +} /* end test_attribute_io() */ + +/* + * Create file, dataset, and attribute, write to attribute with type conversion + */ +static int +test_attribute_io_tconv(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + hid_t es_id = H5I_INVALID_HID; + hsize_t dims[2] = {6, 10}; + size_t num_in_progress; + hbool_t op_failed; + int wbuf[6][10]; + int rbuf[6][10]; + int i, j; + + TESTING("attribute I/O with type conversion"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, attribute, or flush aren't supported with this connector\n"); + return 0; + } + + /* Create dataspace */ + if ((space_id = H5Screate_simple(2, dims, NULL)) < 0) + TEST_ERROR; + + /* Create event stack */ + if ((es_id = H5EScreate()) < 0) + TEST_ERROR; + + /* Open file asynchronously */ + if ((file_id = H5Fopen_async(ASYNC_API_TEST_FILE, H5F_ACC_RDWR, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Create the attribute asynchronously by name */ + if ((attr_id = H5Acreate_by_name_async(file_id, "attr_dset", "attr_tconv", H5T_STD_U16BE, space_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Initialize wbuf */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) + wbuf[i][j] = 10 * i + j; + + /* Write the attribute asynchronously */ + if (H5Awrite_async(attr_id, H5T_NATIVE_INT, wbuf, es_id) < 0) + TEST_ERROR; + + /* Flush the dataset asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the write. */ + if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0) + TEST_ERROR; + + /* Read the attribute asynchronously */ + if (H5Aread_async(attr_id, H5T_NATIVE_INT, rbuf, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Verify the read data */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) + if (wbuf[i][j] != rbuf[i][j]) + FAIL_PUTS_ERROR(" data verification failed\n"); + + /* Close the attribute asynchronously */ + if (H5Aclose_async(attr_id, es_id) < 0) + TEST_ERROR; + + /* Open the attribute asynchronously */ + if ((attr_id = + H5Aopen_by_name_async(file_id, "attr_dset", "attr_tconv", H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Read the attribute asynchronously */ + if (H5Aread_async(attr_id, H5T_NATIVE_INT, rbuf, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Verify the read data */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) + if (wbuf[i][j] != rbuf[i][j]) + FAIL_PUTS_ERROR(" data verification failed\n"); + + /* Close */ + if (H5Aclose_async(attr_id, es_id) < 0) + TEST_ERROR; + if (H5Fclose_async(file_id, es_id) < 0) + TEST_ERROR; + if (H5Sclose(space_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (H5ESclose(es_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Aclose(attr_id); + H5Fclose(file_id); + H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed); + H5ESclose(es_id); + } + H5E_END_TRY; + + return 1; +} /* end test_attribute_io_tconv() */ + +/* + * Create file, dataset, and attribute, write to attribute with compound type + * conversion + */ +typedef struct tattr_cmpd_t { + int a; + int b; +} tattr_cmpd_t; + +static int +test_attribute_io_compound(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + hid_t mtype_id = H5I_INVALID_HID; + hid_t ftype_id = H5I_INVALID_HID; + hid_t mtypea_id = H5I_INVALID_HID; + hid_t mtypeb_id = H5I_INVALID_HID; + hid_t es_id = H5I_INVALID_HID; + hsize_t dims[2] = {6, 10}; + size_t num_in_progress; + hbool_t op_failed; + tattr_cmpd_t wbuf[6][10]; + tattr_cmpd_t rbuf[6][10]; + tattr_cmpd_t fbuf[6][10]; + int i, j; + + TESTING("attribute I/O with compound type conversion"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, attribute, or flush aren't supported with this connector\n"); + return 0; + } + + /* Create datatype */ + if ((mtype_id = H5Tcreate(H5T_COMPOUND, sizeof(tattr_cmpd_t))) < 0) + TEST_ERROR; + if (H5Tinsert(mtype_id, "a_name", HOFFSET(tattr_cmpd_t, a), H5T_NATIVE_INT) < 0) + TEST_ERROR; + if (H5Tinsert(mtype_id, "b_name", HOFFSET(tattr_cmpd_t, b), H5T_NATIVE_INT) < 0) + TEST_ERROR; + + if ((mtypea_id = H5Tcreate(H5T_COMPOUND, sizeof(tattr_cmpd_t))) < 0) + TEST_ERROR; + if (H5Tinsert(mtypea_id, "a_name", HOFFSET(tattr_cmpd_t, a), H5T_NATIVE_INT) < 0) + TEST_ERROR; + + if ((mtypeb_id = H5Tcreate(H5T_COMPOUND, sizeof(tattr_cmpd_t))) < 0) + TEST_ERROR; + if (H5Tinsert(mtypeb_id, "b_name", HOFFSET(tattr_cmpd_t, b), H5T_NATIVE_INT) < 0) + TEST_ERROR; + + if ((ftype_id = H5Tcreate(H5T_COMPOUND, 2 + 8)) < 0) + TEST_ERROR; + if (H5Tinsert(ftype_id, "a_name", 0, H5T_STD_U16BE) < 0) + TEST_ERROR; + if (H5Tinsert(ftype_id, "b_name", 2, H5T_STD_I64LE) < 0) + TEST_ERROR; + + /* Create dataspace */ + if ((space_id = H5Screate_simple(2, dims, NULL)) < 0) + TEST_ERROR; + + /* Create event stack */ + if ((es_id = H5EScreate()) < 0) + TEST_ERROR; + + /* Open file asynchronously */ + if ((file_id = H5Fopen_async(ASYNC_API_TEST_FILE, H5F_ACC_RDWR, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Create the attribute asynchronously by name */ + if ((attr_id = H5Acreate_by_name_async(file_id, "attr_dset", "attr_cmpd", ftype_id, space_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Initialize wbuf */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) { + wbuf[i][j].a = 2 * (10 * i + j); + wbuf[i][j].b = 2 * (10 * i + j) + 1; + } /* end for */ + + /* Write the attribute asynchronously */ + if (H5Awrite_async(attr_id, mtype_id, wbuf, es_id) < 0) + TEST_ERROR; + + /* Update fbuf */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) { + fbuf[i][j].a = wbuf[i][j].a; + fbuf[i][j].b = wbuf[i][j].b; + } /* end for */ + + /* Flush the dataset asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the write. */ + if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0) + TEST_ERROR; + + /* Read the attribute asynchronously */ + if (H5Aread_async(attr_id, mtype_id, rbuf, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Verify the read data */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) { + if (rbuf[i][j].a != fbuf[i][j].a) + FAIL_PUTS_ERROR(" data verification failed\n"); + if (rbuf[i][j].b != fbuf[i][j].b) + FAIL_PUTS_ERROR(" data verification failed\n"); + } /* end for */ + + /* Clear the read buffer */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) { + rbuf[i][j].a = -2; + rbuf[i][j].b = -2; + } /* end for */ + + /* Read the attribute asynchronously (element a only) */ + if (H5Aread_async(attr_id, mtypea_id, rbuf, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Verify the read data */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) { + if (rbuf[i][j].a != fbuf[i][j].a) + FAIL_PUTS_ERROR(" data verification failed\n"); + if (rbuf[i][j].b != -2) + FAIL_PUTS_ERROR(" data verification failed\n"); + } /* end for */ + + /* Clear the read buffer */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) { + rbuf[i][j].a = -2; + rbuf[i][j].b = -2; + } /* end for */ + + /* Read the attribute asynchronously (element b only) */ + if (H5Aread_async(attr_id, mtypeb_id, rbuf, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Verify the read data */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) { + if (rbuf[i][j].a != -2) + FAIL_PUTS_ERROR(" data verification failed\n"); + if (rbuf[i][j].b != fbuf[i][j].b) + FAIL_PUTS_ERROR(" data verification failed\n"); + } /* end for */ + + /* Update wbuf */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) { + wbuf[i][j].a += 2 * 6 * 10; + wbuf[i][j].b += 2 * 6 * 10; + } /* end for */ + + /* Write the attribute asynchronously (element a only) */ + if (H5Awrite_async(attr_id, mtypea_id, wbuf, es_id) < 0) + TEST_ERROR; + + /* Update fbuf */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) + fbuf[i][j].a = wbuf[i][j].a; + + /* Flush the dataset asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the write. */ + if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0) + TEST_ERROR; + + /* Clear the read buffer */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) { + rbuf[i][j].a = -2; + rbuf[i][j].b = -2; + } /* end for */ + + /* Read the attribute asynchronously */ + if (H5Aread_async(attr_id, mtype_id, rbuf, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Verify the read data */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) { + if (rbuf[i][j].a != fbuf[i][j].a) + FAIL_PUTS_ERROR(" data verification failed\n"); + if (rbuf[i][j].b != fbuf[i][j].b) + FAIL_PUTS_ERROR(" data verification failed\n"); + } /* end for */ + + /* Update wbuf */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) { + wbuf[i][j].a += 2 * 6 * 10; + wbuf[i][j].b += 2 * 6 * 10; + } /* end for */ + + /* Write the attribute asynchronously (element b only) */ + if (H5Awrite_async(attr_id, mtypeb_id, wbuf, es_id) < 0) + TEST_ERROR; + + /* Update fbuf */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) + fbuf[i][j].b = wbuf[i][j].b; + + /* Flush the dataset asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the write. */ + if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0) + TEST_ERROR; + + /* Clear the read buffer */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) { + rbuf[i][j].a = -2; + rbuf[i][j].b = -2; + } /* end for */ + + /* Read the attribute asynchronously */ + if (H5Aread_async(attr_id, mtype_id, rbuf, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Verify the read data */ + for (i = 0; i < 6; i++) + for (j = 0; j < 10; j++) { + if (rbuf[i][j].a != fbuf[i][j].a) + FAIL_PUTS_ERROR(" data verification failed\n"); + if (rbuf[i][j].b != fbuf[i][j].b) + FAIL_PUTS_ERROR(" data verification failed\n"); + } /* end for */ + + /* Close */ + if (H5Aclose_async(attr_id, es_id) < 0) + TEST_ERROR; + if (H5Fclose_async(file_id, es_id) < 0) + TEST_ERROR; + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(mtype_id) < 0) + TEST_ERROR; + if (H5Tclose(ftype_id) < 0) + TEST_ERROR; + if (H5Tclose(mtypea_id) < 0) + TEST_ERROR; + if (H5Tclose(mtypeb_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (H5ESclose(es_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Tclose(mtype_id); + H5Tclose(ftype_id); + H5Tclose(mtypea_id); + H5Tclose(mtypeb_id); + H5Aclose(attr_id); + H5Fclose(file_id); + H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed); + H5ESclose(es_id); + } + H5E_END_TRY; + + return 1; +} /* end test_attribute_io_compound() */ + +/* + * Test group interfaces + */ +static int +test_group(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t parent_group_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t subgroup_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + hid_t es_id = H5I_INVALID_HID; + H5G_info_t info1; + H5G_info_t info2; + H5G_info_t info3; + size_t num_in_progress; + hbool_t op_failed; + + TESTING("group operations"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_MORE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, group more, or creation order aren't supported " + "with this connector\n"); + return 0; + } + + /* Create GCPL */ + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) + TEST_ERROR; + + /* Track creation order */ + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) + TEST_ERROR; + + /* Create event stack */ + if ((es_id = H5EScreate()) < 0) + TEST_ERROR; + + /* Open file asynchronously */ + if ((file_id = H5Fopen_async(ASYNC_API_TEST_FILE, H5F_ACC_RDWR, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Create the parent group asynchronously */ + if ((parent_group_id = + H5Gcreate_async(file_id, "group_parent", H5P_DEFAULT, gcpl_id, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Create 3 subgroups asynchronously, the first with no sub-subgroups, the + * second with 1, and the third with 2 */ + if ((group_id = + H5Gcreate_async(parent_group_id, "group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + if (H5Gclose_async(group_id, es_id) < 0) + TEST_ERROR; + + if ((group_id = + H5Gcreate_async(parent_group_id, "group2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + if ((subgroup_id = H5Gcreate_async(group_id, "subgroup1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < + 0) + TEST_ERROR; + if (H5Gclose_async(subgroup_id, es_id) < 0) + TEST_ERROR; + if (H5Gclose_async(group_id, es_id) < 0) + TEST_ERROR; + + if ((group_id = + H5Gcreate_async(parent_group_id, "group3", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + if ((subgroup_id = H5Gcreate_async(group_id, "subgroup1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < + 0) + TEST_ERROR; + if (H5Gclose_async(subgroup_id, es_id) < 0) + TEST_ERROR; + if ((subgroup_id = H5Gcreate_async(group_id, "subgroup2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < + 0) + TEST_ERROR; + if (H5Gclose_async(subgroup_id, es_id) < 0) + TEST_ERROR; + if (H5Gclose_async(group_id, es_id) < 0) + TEST_ERROR; + + /* Flush the file asynchronously. This will effectively work as a barrier, + * guaranteeing the read takes place after the write. */ + if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0) + TEST_ERROR; + + /* Test H5Gget_info_async */ + /* Open group1 asynchronously */ + if ((group_id = H5Gopen_async(parent_group_id, "group1", H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Get info */ + if (H5Gget_info_async(group_id, &info1, es_id) < 0) + TEST_ERROR; + + /* Test H5Gget_info_by_idx_async */ + if (H5Gget_info_by_idx_async(parent_group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, &info2, + H5P_DEFAULT, es_id) < 0) + TEST_ERROR; + + /* Test H5Gget_info_by_name_async */ + if (H5Gget_info_by_name_async(parent_group_id, "group3", &info3, H5P_DEFAULT, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Verify group infos */ + if (info1.nlinks != 0) + FAIL_PUTS_ERROR(" incorrect number of links"); + if (info2.nlinks != 1) + FAIL_PUTS_ERROR(" incorrect number of links"); + if (info3.nlinks != 2) + FAIL_PUTS_ERROR(" incorrect number of links"); + + /* Close */ + if (H5Gclose_async(group_id, es_id) < 0) + TEST_ERROR; + if (H5Gclose_async(parent_group_id, es_id) < 0) + TEST_ERROR; + if (H5Fclose_async(file_id, es_id) < 0) + TEST_ERROR; + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (H5ESclose(es_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + H5Gclose(group_id); + H5Gclose(parent_group_id); + H5Fclose(file_id); + H5Pclose(gcpl_id); + H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed); + H5ESclose(es_id); + } + H5E_END_TRY; + + return 1; +} /* end test_group() */ + +/* + * Test link interfaces + */ +static int +test_link(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t parent_group_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + hid_t es_id = H5I_INVALID_HID; + hbool_t existsh1; + hbool_t existsh2; + hbool_t existsh3; + hbool_t existss1; + hbool_t existss2; + hbool_t existss3; + size_t num_in_progress; + hbool_t op_failed; + + TESTING("link operations"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, link, hard link, soft link, flush, or creation order " + "aren't supported with this connector\n"); + return 0; + } + + /* Create GCPL */ + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) + TEST_ERROR; + + /* Track creation order */ + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) + TEST_ERROR; + + /* Create event stack */ + if ((es_id = H5EScreate()) < 0) + TEST_ERROR; + + /* Open file asynchronously */ + if ((file_id = H5Fopen_async(ASYNC_API_TEST_FILE, H5F_ACC_RDWR, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Create the parent group asynchronously */ + if ((parent_group_id = + H5Gcreate_async(file_id, "link_parent", H5P_DEFAULT, gcpl_id, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Create subgroup asynchronously. */ + if ((group_id = H5Gcreate_async(parent_group_id, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < + 0) + TEST_ERROR; + if (H5Gclose_async(group_id, es_id) < 0) + TEST_ERROR; + + /* Flush the parent group asynchronously. This will effectively work as a + * barrier, guaranteeing the link to the subgroup is visible to later tasks. + */ + if (H5Oflush_async(parent_group_id, es_id) < 0) + TEST_ERROR; + + /* Create hard link asynchronously */ + if (H5Lcreate_hard_async(parent_group_id, "group", parent_group_id, "hard_link", H5P_DEFAULT, H5P_DEFAULT, + es_id) < 0) + TEST_ERROR; + + /* Flush the parent group asynchronously. This will effectively work as a + * barrier, guaranteeing the soft link create takes place after the hard + * link create. */ + if (H5Oflush_async(parent_group_id, es_id) < 0) + TEST_ERROR; + + /* Create soft link asynchronously */ + if (H5Lcreate_soft_async("/link_parent/group", parent_group_id, "soft_link", H5P_DEFAULT, H5P_DEFAULT, + es_id) < 0) + TEST_ERROR; + + /* Flush the parent group asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the writes. */ + if (H5Oflush_async(parent_group_id, es_id) < 0) + TEST_ERROR; + + /* Check if hard link exists */ + if (H5Lexists_async(parent_group_id, "hard_link", &existsh1, H5P_DEFAULT, es_id) < 0) + TEST_ERROR; + + /* Check if soft link exists */ + if (H5Lexists_async(parent_group_id, "soft_link", &existss1, H5P_DEFAULT, es_id) < 0) + TEST_ERROR; + + /* Flush the parent group asynchronously. This will effectively work as a + * barrier, guaranteeing the delete takes place after the reads. */ + if (H5Oflush_async(parent_group_id, es_id) < 0) + TEST_ERROR; + + /* Delete soft link by index */ + if (H5Ldelete_by_idx_async(parent_group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, H5P_DEFAULT, es_id) < + 0) + TEST_ERROR; + + /* Flush the parent group asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the delete. */ + if (H5Oflush_async(parent_group_id, es_id) < 0) + TEST_ERROR; + + /* Check if hard link exists */ + if (H5Lexists_async(parent_group_id, "hard_link", &existsh2, H5P_DEFAULT, es_id) < 0) + TEST_ERROR; + + /* Check if soft link exists */ + if (H5Lexists_async(parent_group_id, "soft_link", &existss2, H5P_DEFAULT, es_id) < 0) + TEST_ERROR; + + /* Flush the parent group asynchronously. This will effectively work as a + * barrier, guaranteeing the delete takes place after the reads. */ + if (H5Oflush_async(parent_group_id, es_id) < 0) + TEST_ERROR; + + /* Delete hard link */ + if (H5Ldelete_async(parent_group_id, "hard_link", H5P_DEFAULT, es_id) < 0) + TEST_ERROR; + + /* Flush the parent group asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the delete. */ + if (H5Oflush_async(parent_group_id, es_id) < 0) + TEST_ERROR; + + /* Check if hard link exists */ + if (H5Lexists_async(parent_group_id, "hard_link", &existsh3, H5P_DEFAULT, es_id) < 0) + TEST_ERROR; + + /* Check if soft link exists */ + if (H5Lexists_async(parent_group_id, "soft_link", &existss3, H5P_DEFAULT, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Check if existence returns were correct */ + if (!existsh1) + FAIL_PUTS_ERROR(" link exists returned FALSE for link that should exist"); + if (!existss1) + FAIL_PUTS_ERROR(" link exists returned FALSE for link that should exist"); + if (!existsh2) + FAIL_PUTS_ERROR(" link exists returned FALSE for link that should exist"); + if (existss2) + FAIL_PUTS_ERROR(" link exists returned TRUE for link that should not exist"); + if (existsh3) + FAIL_PUTS_ERROR(" link exists returned TRUE for link that should not exist"); + if (existsh3) + FAIL_PUTS_ERROR(" link exists returned TRUE for link that should not exist"); + + /* Close */ + if (H5Gclose_async(parent_group_id, es_id) < 0) + TEST_ERROR; + if (H5Fclose_async(file_id, es_id) < 0) + TEST_ERROR; + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (H5ESclose(es_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Gclose(parent_group_id); + H5Fclose(file_id); + H5Pclose(gcpl_id); + H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed); + H5ESclose(es_id); + } + H5E_END_TRY; + + return 1; +} /* end test_link() */ + +/* + * Test H5Ocopy() and H5Orefresh() + */ +static int +test_ocopy_orefresh(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t parent_group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + hid_t es_id = H5I_INVALID_HID; + hsize_t dims[2] = {6, 10}; + size_t num_in_progress; + hbool_t op_failed; + + TESTING("H5Ocopy() and H5Orefresh()"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, object more, flush, or refresh aren't " + "supported with this connector\n"); + return 0; + } + + /* Create dataspace */ + if ((space_id = H5Screate_simple(2, dims, NULL)) < 0) + TEST_ERROR; + + /* Create event stack */ + if ((es_id = H5EScreate()) < 0) + TEST_ERROR; + + /* Open file asynchronously */ + if ((file_id = H5Fopen_async(ASYNC_API_TEST_FILE, H5F_ACC_RDWR, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Create the parent group asynchronously */ + if ((parent_group_id = + H5Gcreate_async(file_id, "ocopy_parent", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Create dataset asynchronously. */ + if ((dset_id = H5Dcreate_async(parent_group_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + if (H5Dclose_async(dset_id, es_id) < 0) + TEST_ERROR; + + /* Flush the parent group asynchronously. This will effectively work as a + * barrier, guaranteeing the copy takes place after dataset create. */ + if (H5Oflush_async(parent_group_id, es_id) < 0) + TEST_ERROR; + + /* Copy dataset */ + if (H5Ocopy_async(parent_group_id, "dset", parent_group_id, "copied_dset", H5P_DEFAULT, H5P_DEFAULT, + es_id) < 0) + TEST_ERROR; + + /* Flush the parent group asynchronously. This will effectively work as a + * barrier, guaranteeing the dataset open takes place copy. */ + if (H5Oflush_async(parent_group_id, es_id) < 0) + TEST_ERROR; + + /* Open the copied dataset asynchronously */ + if ((dset_id = H5Dopen_async(parent_group_id, "copied_dset", H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Refresh the copied dataset asynchronously */ + if (H5Orefresh(dset_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Close */ + if (H5Dclose_async(dset_id, es_id) < 0) + TEST_ERROR; + if (H5Gclose_async(parent_group_id, es_id) < 0) + TEST_ERROR; + if (H5Fclose_async(file_id, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (H5ESclose(es_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + H5Gclose(parent_group_id); + H5Fclose(file_id); + H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed); + H5ESclose(es_id); + } + H5E_END_TRY; + + return 1; +} /* end test_ocopy_orefresh() */ + +/* + * Test H5Freopen() + */ +static int +test_file_reopen(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t reopened_file_id = H5I_INVALID_HID; + hid_t es_id = H5I_INVALID_HID; + size_t num_in_progress; + hbool_t op_failed; + + TESTING("H5Freopen()"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE)) { + SKIPPED(); + HDprintf(" API functions for basic file or file more aren't supported with this connector\n"); + return 0; + } + + /* Create event stack */ + if ((es_id = H5EScreate()) < 0) + TEST_ERROR; + + /* Open file asynchronously */ + if ((file_id = H5Fopen_async(ASYNC_API_TEST_FILE, H5F_ACC_RDWR, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Reopen file asynchronously */ + if ((reopened_file_id = H5Freopen_async(file_id, es_id)) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Close */ + if (H5Fclose_async(reopened_file_id, es_id) < 0) + TEST_ERROR; + if (H5Fclose_async(file_id, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (H5ESclose(es_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Fclose(reopened_file_id); + H5Fclose(file_id); + H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed); + H5ESclose(es_id); + } + H5E_END_TRY; + + return 1; +} /* end test_file_reopen() */ + +/* + * Cleanup temporary test files + */ +static void +cleanup_files(void) +{ + char file_name[64]; + int i; + + H5Fdelete(ASYNC_API_TEST_FILE, H5P_DEFAULT); + for (i = 0; i <= max_printf_file; i++) { + HDsnprintf(file_name, 64, ASYNC_API_TEST_FILE_PRINTF, i); + H5Fdelete(file_name, H5P_DEFAULT); + } /* end for */ +} + +int +H5_api_async_test(void) +{ + size_t i; + int nerrors; + + HDprintf("**********************************************\n"); + HDprintf("* *\n"); + HDprintf("* API Async Tests *\n"); + HDprintf("* *\n"); + HDprintf("**********************************************\n\n"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_ASYNC)) { + SKIPPED(); + HDprintf(" Async APIs aren't supported with this connector\n"); + return 0; + } + + for (i = 0, nerrors = 0; i < ARRAY_LENGTH(async_tests); i++) { + nerrors += (*async_tests[i])() ? 1 : 0; + } + + HDprintf("\n"); + + HDprintf("Cleaning up testing files\n"); + cleanup_files(); + + return nerrors; +} + +#else /* H5ESpublic_H */ + +int +H5_api_async_test(void) +{ + HDprintf("**********************************************\n"); + HDprintf("* *\n"); + HDprintf("* API Async Tests *\n"); + HDprintf("* *\n"); + HDprintf("**********************************************\n\n"); + + HDprintf("SKIPPED due to no async support in HDF5 library\n"); + + return 0; +} + +#endif /* H5ESpublic_H */ diff --git a/test/API/H5_api_async_test.h b/test/API/H5_api_async_test.h new file mode 100644 index 00000000000..f6df48a097c --- /dev/null +++ b/test/API/H5_api_async_test.h @@ -0,0 +1,29 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#ifndef H5_API_ASYNC_TEST_H +#define H5_API_ASYNC_TEST_H + +#include "H5_api_test.h" + +int H5_api_async_test(void); + +/************************************************ + * * + * API async test defines * + * * + ************************************************/ + +#define ASYNC_API_TEST_FILE "H5_api_async_test.h5" +#define ASYNC_API_TEST_FILE_PRINTF "H5_api_async_test_%d.h5" + +#endif diff --git a/test/API/H5_api_attribute_test.c b/test/API/H5_api_attribute_test.c new file mode 100644 index 00000000000..7f767a7f0d8 --- /dev/null +++ b/test/API/H5_api_attribute_test.c @@ -0,0 +1,11027 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#include "H5_api_attribute_test.h" + +/* + * TODO: Additional tests to be written: + * + * - Test for creating a large attribute. + * - Test for checking that object's max. attr. creation + * order value gets reset when all attributes are removed. + */ + +static int test_create_attribute_on_root(void); +static int test_create_attribute_on_dataset(void); +static int test_create_attribute_on_datatype(void); +static int test_create_attribute_with_null_space(void); +static int test_create_attribute_with_scalar_space(void); +static int test_create_attribute_with_space_in_name(void); +static int test_create_attribute_invalid_params(void); +static int test_open_attribute(void); +static int test_open_attribute_invalid_params(void); +static int test_write_attribute(void); +static int test_write_attribute_invalid_params(void); +static int test_read_attribute(void); +static int test_read_attribute_invalid_params(void); +static int test_read_empty_attribute(void); +static int test_close_attribute_invalid_id(void); +static int test_get_attribute_space_and_type(void); +static int test_get_attribute_space_and_type_invalid_params(void); +static int test_attribute_property_lists(void); +static int test_get_attribute_name(void); +static int test_get_attribute_name_invalid_params(void); +static int test_get_attribute_storage_size(void); +static int test_get_attribute_info(void); +static int test_get_attribute_info_invalid_params(void); +static int test_rename_attribute(void); +static int test_rename_attribute_invalid_params(void); +static int test_attribute_iterate_group(void); +static int test_attribute_iterate_dataset(void); +static int test_attribute_iterate_datatype(void); +static int test_attribute_iterate_index_saving(void); +static int test_attribute_iterate_invalid_params(void); +static int test_attribute_iterate_0_attributes(void); +static int test_delete_attribute(void); +static int test_delete_attribute_invalid_params(void); +static int test_attribute_exists(void); +static int test_attribute_exists_invalid_params(void); +static int test_attribute_many(void); +static int test_attribute_duplicate_id(void); +static int test_get_number_attributes(void); +static int test_attr_shared_dtype(void); + +static herr_t attr_iter_callback1(hid_t location_id, const char *attr_name, const H5A_info_t *ainfo, + void *op_data); +static herr_t attr_iter_callback2(hid_t location_id, const char *attr_name, const H5A_info_t *ainfo, + void *op_data); + +/* + * The array of attribute tests to be performed. + */ +static int (*attribute_tests[])(void) = {test_create_attribute_on_root, + test_create_attribute_on_dataset, + test_create_attribute_on_datatype, + test_create_attribute_with_null_space, + test_create_attribute_with_scalar_space, + test_create_attribute_with_space_in_name, + test_create_attribute_invalid_params, + test_open_attribute, + test_open_attribute_invalid_params, + test_write_attribute, + test_write_attribute_invalid_params, + test_read_attribute, + test_read_attribute_invalid_params, + test_read_empty_attribute, + test_close_attribute_invalid_id, + test_get_attribute_space_and_type, + test_get_attribute_space_and_type_invalid_params, + test_attribute_property_lists, + test_get_attribute_name, + test_get_attribute_name_invalid_params, + test_get_attribute_storage_size, + test_get_attribute_info, + test_get_attribute_info_invalid_params, + test_rename_attribute, + test_rename_attribute_invalid_params, + test_attribute_iterate_group, + test_attribute_iterate_dataset, + test_attribute_iterate_datatype, + test_attribute_iterate_index_saving, + test_attribute_iterate_invalid_params, + test_attribute_iterate_0_attributes, + test_delete_attribute, + test_delete_attribute_invalid_params, + test_attribute_exists, + test_attribute_exists_invalid_params, + test_attribute_duplicate_id, + test_attribute_many, + test_get_number_attributes, + test_attr_shared_dtype}; + +/* + * A test to check that an attribute can be created on + * the root group. + */ +static int +test_create_attribute_on_root(void) +{ + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID, attr_id2 = H5I_INVALID_HID; + hid_t attr_dtype1 = H5I_INVALID_HID, attr_dtype2 = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + + TESTING_MULTIPART("attribute creation on the root group"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file or attribute aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((space_id = generate_random_dataspace(ATTRIBUTE_CREATE_ON_ROOT_SPACE_RANK, NULL, NULL, TRUE)) < 0) + TEST_ERROR; + + if ((attr_dtype1 = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + if ((attr_dtype2 = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Acreate2) + { + TESTING_2("H5Acreate on the root group"); + + if ((attr_id = H5Acreate2(file_id, ATTRIBUTE_CREATE_ON_ROOT_ATTR_NAME, attr_dtype1, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s' using H5Acreate\n", + ATTRIBUTE_CREATE_ON_ROOT_ATTR_NAME); + PART_ERROR(H5Acreate2); + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(file_id, ATTRIBUTE_CREATE_ON_ROOT_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_CREATE_ON_ROOT_ATTR_NAME); + PART_ERROR(H5Acreate2); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' did not exist\n", ATTRIBUTE_CREATE_ON_ROOT_ATTR_NAME); + PART_ERROR(H5Acreate2); + } + + PASSED(); + } + PART_END(H5Acreate2); + + PART_BEGIN(H5Acreate_by_name) + { + TESTING_2("H5Acreate_by_name on the root group"); + + if ((attr_id2 = H5Acreate_by_name(file_id, "/", ATTRIBUTE_CREATE_ON_ROOT_ATTR_NAME2, attr_dtype2, + space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute on root group using H5Acreate_by_name\n"); + PART_ERROR(H5Acreate_by_name); + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(file_id, ATTRIBUTE_CREATE_ON_ROOT_ATTR_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_CREATE_ON_ROOT_ATTR_NAME2); + PART_ERROR(H5Acreate_by_name); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' did not exist\n", ATTRIBUTE_CREATE_ON_ROOT_ATTR_NAME2); + PART_ERROR(H5Acreate_by_name); + } + + PASSED(); + } + PART_END(H5Acreate_by_name); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype1) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype2) < 0) + TEST_ERROR; + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + if (H5Aclose(attr_id2) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Tclose(attr_dtype1); + H5Tclose(attr_dtype2); + H5Aclose(attr_id); + H5Aclose(attr_id2); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that an attribute can be created on + * a dataset. + */ +static int +test_create_attribute_on_dataset(void) +{ + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID, attr_id2 = H5I_INVALID_HID; + hid_t attr_dtype1 = H5I_INVALID_HID, attr_dtype2 = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t dset_space_id = H5I_INVALID_HID; + hid_t attr_space_id = H5I_INVALID_HID; + + TESTING_MULTIPART("attribute creation on a dataset"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, or attribute aren't supported with this " + "connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_CREATE_ON_DATASET_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_CREATE_ON_DATASET_GROUP_NAME); + goto error; + } + + if ((dset_space_id = + generate_random_dataspace(ATTRIBUTE_CREATE_ON_DATASET_DSET_SPACE_RANK, NULL, NULL, FALSE)) < 0) + TEST_ERROR; + if ((attr_space_id = + generate_random_dataspace(ATTRIBUTE_CREATE_ON_DATASET_ATTR_SPACE_RANK, NULL, NULL, TRUE)) < 0) + TEST_ERROR; + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + if ((attr_dtype1 = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + if ((attr_dtype2 = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, ATTRIBUTE_CREATE_ON_DATASET_DSET_NAME, dset_dtype, dset_space_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Acreate_on_dataset) + { + TESTING_2("H5Acreate on a dataset"); + + if ((attr_id = H5Acreate2(dset_id, ATTRIBUTE_CREATE_ON_DATASET_ATTR_NAME, attr_dtype1, + attr_space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + PART_ERROR(H5Acreate_on_dataset); + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(dset_id, ATTRIBUTE_CREATE_ON_DATASET_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_CREATE_ON_DATASET_ATTR_NAME); + PART_ERROR(H5Acreate_on_dataset); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' did not exist\n", ATTRIBUTE_CREATE_ON_DATASET_ATTR_NAME); + PART_ERROR(H5Acreate_on_dataset); + } + + PASSED(); + } + PART_END(H5Acreate_on_dataset); + + PART_BEGIN(H5Acreate_by_name_on_dataset) + { + TESTING_2("H5Acreate_by_name on a dataset"); + + if ((attr_id2 = H5Acreate_by_name(group_id, ATTRIBUTE_CREATE_ON_DATASET_DSET_NAME, + ATTRIBUTE_CREATE_ON_DATASET_ATTR_NAME2, attr_dtype2, + attr_space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute on dataset by name\n"); + PART_ERROR(H5Acreate_by_name_on_dataset); + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(dset_id, ATTRIBUTE_CREATE_ON_DATASET_ATTR_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_CREATE_ON_DATASET_ATTR_NAME2); + PART_ERROR(H5Acreate_by_name_on_dataset); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' did not exist\n", ATTRIBUTE_CREATE_ON_DATASET_ATTR_NAME2); + PART_ERROR(H5Acreate_by_name_on_dataset); + } + + PASSED(); + } + PART_END(H5Acreate_by_name_on_dataset); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(dset_space_id) < 0) + TEST_ERROR; + if (H5Sclose(attr_space_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype1) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype2) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + if (H5Aclose(attr_id2) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(dset_space_id); + H5Sclose(attr_space_id); + H5Tclose(dset_dtype); + H5Tclose(attr_dtype1); + H5Tclose(attr_dtype2); + H5Dclose(dset_id); + H5Aclose(attr_id); + H5Aclose(attr_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that an attribute can be created on + * a committed datatype. + */ +static int +test_create_attribute_on_datatype(void) +{ + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t type_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID, attr_id2 = H5I_INVALID_HID; + hid_t attr_dtype1 = H5I_INVALID_HID, attr_dtype2 = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + + TESTING_MULTIPART("attribute creation on a committed datatype"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, stored datatype, or attribute aren't supported " + "with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_CREATE_ON_DATATYPE_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_CREATE_ON_DATATYPE_GROUP_NAME); + goto error; + } + + if ((type_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create datatype\n"); + goto error; + } + + if (H5Tcommit2(group_id, ATTRIBUTE_CREATE_ON_DATATYPE_DTYPE_NAME, type_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't commit datatype\n"); + goto error; + } + + if ((space_id = generate_random_dataspace(ATTRIBUTE_CREATE_ON_DATATYPE_SPACE_RANK, NULL, NULL, TRUE)) < 0) + TEST_ERROR; + + if ((attr_dtype1 = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + if ((attr_dtype2 = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Acreate_on_datatype) + { + TESTING_2("H5Acreate on a committed datatype"); + + if ((attr_id = H5Acreate2(type_id, ATTRIBUTE_CREATE_ON_DATATYPE_ATTR_NAME, attr_dtype1, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute on datatype using H5Acreate\n"); + PART_ERROR(H5Acreate_on_datatype); + } + + if ((attr_exists = H5Aexists(type_id, ATTRIBUTE_CREATE_ON_DATATYPE_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + PART_ERROR(H5Acreate_on_datatype); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + PART_ERROR(H5Acreate_on_datatype); + } + + PASSED(); + } + PART_END(H5Acreate_on_datatype); + + PART_BEGIN(H5Acreate_by_name_on_datatype) + { + TESTING_2("H5Acreate_by_name on a committed datatype"); + + if ((attr_id2 = H5Acreate_by_name(group_id, ATTRIBUTE_CREATE_ON_DATATYPE_DTYPE_NAME, + ATTRIBUTE_CREATE_ON_DATATYPE_ATTR_NAME2, attr_dtype2, space_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute on datatype using H5Acreate_by_name\n"); + PART_ERROR(H5Acreate_by_name_on_datatype); + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(type_id, ATTRIBUTE_CREATE_ON_DATATYPE_ATTR_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + PART_ERROR(H5Acreate_by_name_on_datatype); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + PART_ERROR(H5Acreate_by_name_on_datatype); + } + + PASSED(); + } + PART_END(H5Acreate_by_name_on_datatype); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype1) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype2) < 0) + TEST_ERROR; + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + if (H5Aclose(attr_id2) < 0) + TEST_ERROR; + if (H5Tclose(type_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Tclose(attr_dtype1); + H5Tclose(attr_dtype2); + H5Aclose(attr_id); + H5Aclose(attr_id2); + H5Tclose(type_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that creating an attribute with a + * NULL dataspace is not problematic. + */ +static int +test_create_attribute_with_null_space(void) +{ + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t attr_dtype = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + + TESTING("attribute creation with a NULL dataspace"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or attribute aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file\n"); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_CREATE_NULL_DATASPACE_TEST_SUBGROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup\n"); + goto error; + } + + if ((space_id = H5Screate(H5S_NULL)) < 0) + TEST_ERROR; + + if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_CREATE_NULL_DATASPACE_TEST_ATTR_NAME, attr_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_CREATE_NULL_DATASPACE_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + + if ((attr_id = H5Aopen(group_id, ATTRIBUTE_CREATE_NULL_DATASPACE_TEST_ATTR_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open attribute\n"); + goto error; + } + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype) < 0) + TEST_ERROR; + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Tclose(attr_dtype); + H5Aclose(attr_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that creating an attribute with a + * scalar dataspace is not problematic. + */ +static int +test_create_attribute_with_scalar_space(void) +{ + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t attr_dtype = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + + TESTING("attribute creation with a SCALAR dataspace"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or attribute aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file\n"); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_CREATE_SCALAR_DATASPACE_TEST_SUBGROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup\n"); + goto error; + } + + if ((space_id = H5Screate(H5S_SCALAR)) < 0) + TEST_ERROR; + + if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_CREATE_SCALAR_DATASPACE_TEST_ATTR_NAME, attr_dtype, + space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_CREATE_SCALAR_DATASPACE_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + + if ((attr_id = H5Aopen(group_id, ATTRIBUTE_CREATE_SCALAR_DATASPACE_TEST_ATTR_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open attribute\n"); + goto error; + } + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype) < 0) + TEST_ERROR; + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Tclose(attr_dtype); + H5Aclose(attr_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a space in an attribute's name + * is not problematic. + */ +static int +test_create_attribute_with_space_in_name(void) +{ + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t attr_id2 = H5I_INVALID_HID; + hid_t attr_dtype = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + + TESTING("attribute creation with a space in attribute's name"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or attribute aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file\n"); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_CREATE_WITH_SPACE_IN_NAME_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", + ATTRIBUTE_CREATE_WITH_SPACE_IN_NAME_GROUP_NAME); + goto error; + } + + if ((space_id = + generate_random_dataspace(ATTRIBUTE_CREATE_WITH_SPACE_IN_NAME_SPACE_RANK, NULL, NULL, TRUE)) < 0) + TEST_ERROR; + + if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_CREATE_WITH_SPACE_IN_NAME_ATTR_NAME, attr_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_CREATE_WITH_SPACE_IN_NAME_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype) < 0) + TEST_ERROR; + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Tclose(attr_dtype); + H5Aclose(attr_id); + H5Aclose(attr_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that an attribute can't be created when + * H5Acreate is passed invalid parameters. + */ +static int +test_create_attribute_invalid_params(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t attr_dtype = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + + TESTING_MULTIPART("attribute creation with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or attribute aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_CREATE_INVALID_PARAMS_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group\n"); + goto error; + } + + if ((space_id = generate_random_dataspace(ATTRIBUTE_CREATE_INVALID_PARAMS_SPACE_RANK, NULL, NULL, TRUE)) < + 0) + TEST_ERROR; + if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Acreate_invalid_loc_id) + { + TESTING_2("H5Acreate with invalid loc_id"); + + H5E_BEGIN_TRY + { + attr_id = H5Acreate2(H5I_INVALID_HID, ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME, attr_dtype, + space_id, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" created attribute using H5Acreate with an invalid loc_id!\n"); + H5Aclose(attr_id); + PART_ERROR(H5Acreate_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Acreate_invalid_loc_id); + + PART_BEGIN(H5Acreate_invalid_attr_name) + { + TESTING_2("H5Acreate with invalid attribute name"); + + H5E_BEGIN_TRY + { + attr_id = H5Acreate2(group_id, NULL, attr_dtype, space_id, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" created attribute using H5Acreate with a NULL name!\n"); + H5Aclose(attr_id); + PART_ERROR(H5Acreate_invalid_attr_name); + } + + H5E_BEGIN_TRY + { + attr_id = H5Acreate2(group_id, "", attr_dtype, space_id, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" created attribute using H5Acreate with an invalid name of ''!\n"); + H5Aclose(attr_id); + PART_ERROR(H5Acreate_invalid_attr_name); + } + + PASSED(); + } + PART_END(H5Acreate_invalid_attr_name); + + PART_BEGIN(H5Acreate_invalid_datatype) + { + TESTING_2("H5Acreate with an invalid datatype"); + + H5E_BEGIN_TRY + { + attr_id = H5Acreate2(group_id, ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME, H5I_INVALID_HID, + space_id, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" created attribute using H5Acreate with an invalid datatype!\n"); + H5Aclose(attr_id); + PART_ERROR(H5Acreate_invalid_datatype); + } + + PASSED(); + } + PART_END(H5Acreate_invalid_datatype); + + PART_BEGIN(H5Acreate_invalid_dataspace) + { + TESTING_2("H5Acreate with an invalid dataspace"); + + H5E_BEGIN_TRY + { + attr_id = H5Acreate2(group_id, ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME, attr_dtype, + H5I_INVALID_HID, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" created attribute using H5Acreate with an invalid dataspace!\n"); + H5Aclose(attr_id); + PART_ERROR(H5Acreate_invalid_dataspace); + } + + PASSED(); + } + PART_END(H5Acreate_invalid_dataspace); + + PART_BEGIN(H5Acreate_invalid_acpl) + { + TESTING_2("H5Acreate with an invalid ACPL"); + + H5E_BEGIN_TRY + { + attr_id = H5Acreate2(group_id, ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME, attr_dtype, + space_id, H5I_INVALID_HID, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" created attribute using H5Acreate with an invalid ACPL!\n"); + H5Aclose(attr_id); + PART_ERROR(H5Acreate_invalid_acpl); + } + + PASSED(); + } + PART_END(H5Acreate_invalid_acpl); + + PART_BEGIN(H5Acreate_invalid_aapl) + { + TESTING_2("H5Acreate with an invalid AAPL"); +#ifndef NO_INVALID_PROPERTY_LIST_TESTS + H5E_BEGIN_TRY + { + attr_id = H5Acreate2(group_id, ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME, attr_dtype, + space_id, H5P_DEFAULT, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" created attribute using H5Acreate with an invalid AAPL!\n"); + H5Aclose(attr_id); + PART_ERROR(H5Acreate_invalid_aapl); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Acreate_invalid_aapl); +#endif + } + PART_END(H5Acreate_invalid_aapl); + + PART_BEGIN(H5Acreate_by_name_invalid_loc_id) + { + TESTING_2("H5Acreate_by_name with an invalid loc_id"); + + H5E_BEGIN_TRY + { + attr_id = H5Acreate_by_name(H5I_INVALID_HID, ATTRIBUTE_CREATE_INVALID_PARAMS_GROUP_NAME, + ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME, attr_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" created attribute using H5Acreate_by_name with an invalid loc_id!\n"); + H5Aclose(attr_id); + PART_ERROR(H5Acreate_by_name_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Acreate_by_name_invalid_loc_id); + + PART_BEGIN(H5Acreate_by_name_invalid_obj_name) + { + TESTING_2("H5Acreate_by_name with invalid object name"); + + H5E_BEGIN_TRY + { + attr_id = H5Acreate_by_name(group_id, NULL, ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME, + attr_dtype, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" created attribute using H5Acreate_by_name with a NULL object name!\n"); + H5Aclose(attr_id); + PART_ERROR(H5Acreate_by_name_invalid_obj_name); + } + + H5E_BEGIN_TRY + { + attr_id = H5Acreate_by_name(group_id, "", ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME, + attr_dtype, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf( + " created attribute using H5Acreate_by_name with an invalid object name of ''!\n"); + H5Aclose(attr_id); + PART_ERROR(H5Acreate_by_name_invalid_obj_name); + } + + PASSED(); + } + PART_END(H5Acreate_by_name_invalid_obj_name); + + PART_BEGIN(H5Acreate_by_name_invalid_attr_name) + { + TESTING_2("H5Acreate_by_name with invalid attribute name"); + + H5E_BEGIN_TRY + { + attr_id = H5Acreate_by_name(container_group, ATTRIBUTE_CREATE_INVALID_PARAMS_GROUP_NAME, NULL, + attr_dtype, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" created attribute using H5Acreate_by_name with a NULL attribute name!\n"); + H5Aclose(attr_id); + PART_ERROR(H5Acreate_by_name_invalid_attr_name); + } + + H5E_BEGIN_TRY + { + attr_id = H5Acreate_by_name(container_group, ATTRIBUTE_CREATE_INVALID_PARAMS_GROUP_NAME, "", + attr_dtype, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf( + " created attribute using H5Acreate_by_name with an invalid attribute name of ''!\n"); + H5Aclose(attr_id); + PART_ERROR(H5Acreate_by_name_invalid_attr_name); + } + + PASSED(); + } + PART_END(H5Acreate_by_name_invalid_attr_name); + + PART_BEGIN(H5Acreate_by_name_invalid_datatype) + { + TESTING_2("H5Acreate_by_name with invalid datatype"); + + H5E_BEGIN_TRY + { + attr_id = H5Acreate_by_name(container_group, ATTRIBUTE_CREATE_INVALID_PARAMS_GROUP_NAME, + ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME, H5I_INVALID_HID, + space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" created attribute using H5Acreate_by_name with an invalid datatype!\n"); + H5Aclose(attr_id); + PART_ERROR(H5Acreate_by_name_invalid_datatype); + } + + PASSED(); + } + PART_END(H5Acreate_by_name_invalid_datatype); + + PART_BEGIN(H5Acreate_by_name_invalid_dataspace) + { + TESTING_2("H5Acreate_by_name with invalid dataspace"); + + H5E_BEGIN_TRY + { + attr_id = H5Acreate_by_name(container_group, ATTRIBUTE_CREATE_INVALID_PARAMS_GROUP_NAME, + ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME, attr_dtype, + H5I_INVALID_HID, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" created attribute using H5Acreate_by_name with an invalid dataspace!\n"); + H5Aclose(attr_id); + PART_ERROR(H5Acreate_by_name_invalid_dataspace); + } + + PASSED(); + } + PART_END(H5Acreate_by_name_invalid_dataspace); + + PART_BEGIN(H5Acreate_by_name_invalid_acpl) + { + TESTING_2("H5Acreate_by_name with invalid ACPL"); + + H5E_BEGIN_TRY + { + attr_id = H5Acreate_by_name(container_group, ATTRIBUTE_CREATE_INVALID_PARAMS_GROUP_NAME, + ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME, attr_dtype, space_id, + H5I_INVALID_HID, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" created attribute using H5Acreate_by_name with an invalid ACPL!\n"); + H5Aclose(attr_id); + PART_ERROR(H5Acreate_by_name_invalid_acpl); + } + + PASSED(); + } + PART_END(H5Acreate_by_name_invalid_acpl); + + PART_BEGIN(H5Acreate_by_name_invalid_aapl) + { + TESTING_2("H5Acreate_by_name with invalid AAPL"); +#ifndef NO_INVALID_PROPERTY_LIST_TESTS + H5E_BEGIN_TRY + { + attr_id = H5Acreate_by_name(container_group, ATTRIBUTE_CREATE_INVALID_PARAMS_GROUP_NAME, + ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME, attr_dtype, space_id, + H5P_DEFAULT, H5I_INVALID_HID, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" created attribute using H5Acreate_by_name with an invalid AAPL!\n"); + H5Aclose(attr_id); + PART_ERROR(H5Acreate_by_name_invalid_aapl); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Acreate_by_name_invalid_aapl); +#endif + } + PART_END(H5Acreate_by_name_invalid_aapl); + + PART_BEGIN(H5Acreate_by_name_invalid_lapl) + { + TESTING_2("H5Acreate_by_name with invalid LAPL"); + + H5E_BEGIN_TRY + { + attr_id = H5Acreate_by_name(container_group, ATTRIBUTE_CREATE_INVALID_PARAMS_GROUP_NAME, + ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME, attr_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" created attribute using H5Acreate_by_name with an invalid LAPL!\n"); + H5Aclose(attr_id); + PART_ERROR(H5Acreate_by_name_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Acreate_by_name_invalid_lapl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Tclose(attr_dtype); + H5Aclose(attr_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test for H5Aopen(_by_idx). + */ +static int +test_open_attribute(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + hid_t attr_type = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + + TESTING_MULTIPART("attribute opening"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or attribute aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create GCPL for attribute creation order tracking\n"); + goto error; + } + + if (H5Pset_attr_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) { + H5_FAILED(); + HDprintf(" couldn't set attribute creation order tracking\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_OPEN_TEST_GROUP_NAME); + goto error; + } + + if ((space_id = generate_random_dataspace(ATTRIBUTE_OPEN_TEST_SPACE_RANK, NULL, NULL, TRUE)) < 0) + TEST_ERROR; + + if ((attr_type = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + /* Create several attributes */ + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_OPEN_TEST_ATTR_NAME, attr_type, space_id, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME); + goto error; + } + + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_OPEN_TEST_ATTR_NAME2, attr_type, space_id, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME2); + goto error; + } + + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_OPEN_TEST_ATTR_NAME3, attr_type, space_id, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME3); + goto error; + } + + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Aopen) + { + TESTING_2("H5Aopen"); + + if ((attr_id = H5Aopen(group_id, ATTRIBUTE_OPEN_TEST_ATTR_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open attribute '%s' using H5Aopen\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME); + PART_ERROR(H5Aopen); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME); + PART_ERROR(H5Aopen); + } + + PASSED(); + } + PART_END(H5Aopen); + + PART_BEGIN(H5Aopen_by_name) + { + TESTING_2("H5Aopen_by_name"); + + if ((attr_id = H5Aopen_by_name(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, + ATTRIBUTE_OPEN_TEST_ATTR_NAME, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open attribute '%s' using H5Aopen_by_name\n", + ATTRIBUTE_OPEN_TEST_ATTR_NAME); + PART_ERROR(H5Aopen_by_name); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME); + PART_ERROR(H5Aopen_by_name); + } + + PASSED(); + } + PART_END(H5Aopen_by_name); + + PART_BEGIN(H5Aopen_by_idx_crt_order_increasing) + { + TESTING_2("H5Aopen_by_idx by creation order in increasing order"); + + if ((attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER, + H5_ITER_INC, 0, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open attribute '%s' at index %d using H5Aopen_by_idx by creation " + "order in increasing order\n", + ATTRIBUTE_OPEN_TEST_ATTR_NAME, 0); + PART_ERROR(H5Aopen_by_idx_crt_order_increasing); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME); + PART_ERROR(H5Aopen_by_idx_crt_order_increasing); + } + + if ((attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER, + H5_ITER_INC, 1, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open attribute '%s' at index %d using H5Aopen_by_idx by creation " + "order in increasing order\n", + ATTRIBUTE_OPEN_TEST_ATTR_NAME2, 1); + PART_ERROR(H5Aopen_by_idx_crt_order_increasing); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME2); + PART_ERROR(H5Aopen_by_idx_crt_order_increasing); + } + + if ((attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER, + H5_ITER_INC, 2, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open attribute '%s' at index %d using H5Aopen_by_idx by creation " + "order in increasing order\n", + ATTRIBUTE_OPEN_TEST_ATTR_NAME3, 2); + PART_ERROR(H5Aopen_by_idx_crt_order_increasing); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME3); + PART_ERROR(H5Aopen_by_idx_crt_order_increasing); + } + + PASSED(); + } + PART_END(H5Aopen_by_idx_crt_order_increasing); + + PART_BEGIN(H5Aopen_by_idx_crt_order_decreasing) + { + TESTING_2("H5Aopen_by_idx by creation order in decreasing order"); + + if ((attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER, + H5_ITER_DEC, 2, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open attribute '%s' at index %d using H5Aopen_by_idx by creation " + "order in decreasing order\n", + ATTRIBUTE_OPEN_TEST_ATTR_NAME, 2); + PART_ERROR(H5Aopen_by_idx_crt_order_decreasing); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME); + PART_ERROR(H5Aopen_by_idx_crt_order_decreasing); + } + + if ((attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER, + H5_ITER_DEC, 1, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open attribute '%s' at index %d using H5Aopen_by_idx by creation " + "order in decreasing order\n", + ATTRIBUTE_OPEN_TEST_ATTR_NAME2, 1); + PART_ERROR(H5Aopen_by_idx_crt_order_decreasing); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME2); + PART_ERROR(H5Aopen_by_idx_crt_order_decreasing); + } + + if ((attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER, + H5_ITER_DEC, 0, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open attribute '%s' at index %d using H5Aopen_by_idx by creation " + "order in decreasing order\n", + ATTRIBUTE_OPEN_TEST_ATTR_NAME3, 0); + PART_ERROR(H5Aopen_by_idx_crt_order_decreasing); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME3); + PART_ERROR(H5Aopen_by_idx_crt_order_decreasing); + } + + PASSED(); + } + PART_END(H5Aopen_by_idx_crt_order_decreasing); + + PART_BEGIN(H5Aopen_by_idx_name_order_increasing) + { + TESTING_2("H5Aopen_by_idx by alphabetical order in increasing order"); + + if ((attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_INC, 0, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open attribute '%s' at index %d using H5Aopen_by_idx by alphabetical " + "order in increasing order\n", + ATTRIBUTE_OPEN_TEST_ATTR_NAME, 0); + PART_ERROR(H5Aopen_by_idx_name_order_increasing); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME); + PART_ERROR(H5Aopen_by_idx_name_order_increasing); + } + + if ((attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_INC, 1, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open attribute '%s' at index %d using H5Aopen_by_idx by alphabetical " + "order in increasing order\n", + ATTRIBUTE_OPEN_TEST_ATTR_NAME2, 1); + PART_ERROR(H5Aopen_by_idx_name_order_increasing); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME2); + PART_ERROR(H5Aopen_by_idx_name_order_increasing); + } + + if ((attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_INC, 2, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open attribute '%s' at index %d using H5Aopen_by_idx by alphabetical " + "order in increasing order\n", + ATTRIBUTE_OPEN_TEST_ATTR_NAME3, 2); + PART_ERROR(H5Aopen_by_idx_name_order_increasing); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME3); + PART_ERROR(H5Aopen_by_idx_name_order_increasing); + } + + PASSED(); + } + PART_END(H5Aopen_by_idx_name_order_increasing); + + PART_BEGIN(H5Aopen_by_idx_name_order_decreasing) + { + TESTING_2("H5Aopen_by_idx by alphabetical order in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + if ((attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_DEC, 2, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open attribute '%s' at index %lld using H5Aopen_by_idx by " + "alphabetical order in decreasing order\n", + ATTRIBUTE_OPEN_TEST_ATTR_NAME, 2); + PART_ERROR(H5Aopen_by_idx_name_order_decreasing); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME); + PART_ERROR(H5Aopen_by_idx_name_order_decreasing); + } + + if ((attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_DEC, 1, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open attribute '%s' at index %lld using H5Aopen_by_idx by " + "alphabetical order in decreasing order\n", + ATTRIBUTE_OPEN_TEST_ATTR_NAME2, 1); + PART_ERROR(H5Aopen_by_idx_name_order_decreasing); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME2); + PART_ERROR(H5Aopen_by_idx_name_order_decreasing); + } + + if ((attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_DEC, 0, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open attribute '%s' at index %lld using H5Aopen_by_idx by " + "alphabetical order in decreasing order\n", + ATTRIBUTE_OPEN_TEST_ATTR_NAME3, 0); + PART_ERROR(H5Aopen_by_idx_name_order_decreasing); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME3); + PART_ERROR(H5Aopen_by_idx_name_order_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Aopen_by_idx_name_order_decreasing); +#endif + } + PART_END(H5Aopen_by_idx_name_order_decreasing); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_type) < 0) + TEST_ERROR; + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Tclose(attr_type); + H5Aclose(attr_id); + H5Pclose(gcpl_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that an attribute can't be opened when + * H5Aopen(_by_name/_by_idx) is passed invalid parameters. + */ +static int +test_open_attribute_invalid_params(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + hid_t attr_type = H5I_INVALID_HID; + + TESTING_MULTIPART("attribute opening with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or attribute aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME); + goto error; + } + + if ((space_id = + generate_random_dataspace(ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_SPACE_RANK, NULL, NULL, TRUE)) < 0) + TEST_ERROR; + + if ((attr_type = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME, attr_type, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME); + goto error; + } + + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Aopen_invalid_loc_id) + { + TESTING_2("H5Aopen with an invalid loc_id"); + + H5E_BEGIN_TRY + { + attr_id = H5Aopen(H5I_INVALID_HID, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" opened attribute '%s' using H5Aopen with an invalid loc_id!\n", + ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME); + H5Aclose(attr_id); + PART_ERROR(H5Aopen_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Aopen_invalid_loc_id); + + PART_BEGIN(H5Aopen_invalid_attr_name) + { + TESTING_2("H5Aopen with an invalid attribute name"); + + H5E_BEGIN_TRY + { + attr_id = H5Aopen(group_id, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" opened attribute '%s' using H5Aopen with a NULL attribute name!\n", + ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME); + H5Aclose(attr_id); + PART_ERROR(H5Aopen_invalid_attr_name); + } + + H5E_BEGIN_TRY + { + attr_id = H5Aopen(group_id, "", H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" opened attribute '%s' using H5Aopen with an invalid attribute name of ''!\n", + ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME); + H5Aclose(attr_id); + PART_ERROR(H5Aopen_invalid_attr_name); + } + + PASSED(); + } + PART_END(H5Aopen_invalid_attr_name); + + PART_BEGIN(H5Aopen_invalid_aapl) + { + TESTING_2("H5Aopen with an invalid AAPL"); + + H5E_BEGIN_TRY + { + attr_id = H5Aopen(group_id, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" opened attribute '%s' using H5Aopen with an invalid AAPL!\n", + ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME); + H5Aclose(attr_id); + PART_ERROR(H5Aopen_invalid_aapl); + } + + PASSED(); + } + PART_END(H5Aopen_invalid_aapl); + + PART_BEGIN(H5Aopen_by_name_invalid_loc_id) + { + TESTING_2("H5Aopen_by_name with an invalid loc_id"); + + H5E_BEGIN_TRY + { + attr_id = + H5Aopen_by_name(H5I_INVALID_HID, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME, + ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" opened attribute '%s' using H5Aopen_by_name with an invalid loc_id!\n", + ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME); + H5Aclose(attr_id); + PART_ERROR(H5Aopen_by_name_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Aopen_by_name_invalid_loc_id); + + PART_BEGIN(H5Aopen_by_name_invalid_obj_name) + { + TESTING_2("H5Aopen_by_name with an invalid object name"); + + H5E_BEGIN_TRY + { + attr_id = H5Aopen_by_name(container_group, NULL, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME, + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" opened attribute '%s' using H5Aopen_by_name with a NULL object name!\n", + ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME); + H5Aclose(attr_id); + PART_ERROR(H5Aopen_by_name_invalid_obj_name); + } + + H5E_BEGIN_TRY + { + attr_id = H5Aopen_by_name(container_group, "", ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME, + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf( + " opened attribute '%s' using H5Aopen_by_name with an invalid object name of ''!\n", + ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME); + H5Aclose(attr_id); + PART_ERROR(H5Aopen_by_name_invalid_obj_name); + } + + PASSED(); + } + PART_END(H5Aopen_by_name_invalid_obj_name); + + PART_BEGIN(H5Aopen_by_name_invalid_attr_name) + { + TESTING_2("H5Aopen_by_name with an invalid attribute name"); + + H5E_BEGIN_TRY + { + attr_id = H5Aopen_by_name(container_group, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME, + NULL, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" opened attribute '%s' using H5Aopen_by_name with a NULL attribute name!\n", + ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME); + H5Aclose(attr_id); + PART_ERROR(H5Aopen_by_name_invalid_attr_name); + } + + H5E_BEGIN_TRY + { + attr_id = H5Aopen_by_name(container_group, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME, "", + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf( + " opened attribute '%s' using H5Aopen_by_name with an invalid attribute name of ''!\n", + ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME); + H5Aclose(attr_id); + PART_ERROR(H5Aopen_by_name_invalid_attr_name); + } + + PASSED(); + } + PART_END(H5Aopen_by_name_invalid_attr_name); + + PART_BEGIN(H5Aopen_by_name_invalid_aapl) + { + TESTING_2("H5Aopen_by_name with an invalid AAPL"); + + H5E_BEGIN_TRY + { + attr_id = H5Aopen_by_name(container_group, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME, + ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME, H5I_INVALID_HID, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" opened attribute '%s' using H5Aopen_by_name with an invalid AAPL!\n", + ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME); + H5Aclose(attr_id); + PART_ERROR(H5Aopen_by_name_invalid_aapl); + } + + PASSED(); + } + PART_END(H5Aopen_by_name_invalid_aapl); + + PART_BEGIN(H5Aopen_by_name_invalid_lapl) + { + TESTING_2("H5Aopen_by_name with an invalid LAPL"); + + H5E_BEGIN_TRY + { + attr_id = H5Aopen_by_name(container_group, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME, + ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME, H5P_DEFAULT, + H5I_INVALID_HID); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" opened attribute '%s' using H5Aopen_by_name with an invalid LAPL!\n", + ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME); + H5Aclose(attr_id); + PART_ERROR(H5Aopen_by_name_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Aopen_by_name_invalid_lapl); + + PART_BEGIN(H5Aopen_by_idx_invalid_loc_id) + { + TESTING_2("H5Aopen_by_idx with an invalid loc_id"); + + H5E_BEGIN_TRY + { + attr_id = H5Aopen_by_idx(H5I_INVALID_HID, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME, + H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" opened attribute '%s' using H5Aopen_by_idx with an invalid loc_id!\n", + ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME); + H5Aclose(attr_id); + PART_ERROR(H5Aopen_by_idx_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Aopen_by_idx_invalid_loc_id); + + PART_BEGIN(H5Aopen_by_idx_invalid_obj_name) + { + TESTING_2("H5Aopen_by_idx with an invalid object name"); + + H5E_BEGIN_TRY + { + attr_id = H5Aopen_by_idx(container_group, NULL, H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" opened attribute '%s' using H5Aopen_by_idx with a NULL object name!\n", + ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME); + H5Aclose(attr_id); + PART_ERROR(H5Aopen_by_idx_invalid_obj_name); + } + + H5E_BEGIN_TRY + { + attr_id = H5Aopen_by_idx(container_group, "", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf( + " opened attribute '%s' using H5Aopen_by_idx with an invalid object name of ''!\n", + ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME); + H5Aclose(attr_id); + PART_ERROR(H5Aopen_by_idx_invalid_obj_name); + } + + PASSED(); + } + PART_END(H5Aopen_by_idx_invalid_obj_name); + + PART_BEGIN(H5Aopen_by_idx_invalid_index_type) + { + TESTING_2("H5Aopen_by_idx with an invalid index type"); + + H5E_BEGIN_TRY + { + attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME, + H5_INDEX_UNKNOWN, H5_ITER_INC, 0, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" opened attribute '%s' using H5Aopen_by_idx with invalid index type " + "H5_INDEX_UNKNOWN!\n", + ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME); + H5Aclose(attr_id); + PART_ERROR(H5Aopen_by_idx_invalid_index_type); + } + + H5E_BEGIN_TRY + { + attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME, + H5_INDEX_N, H5_ITER_INC, 0, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf( + " opened attribute '%s' using H5Aopen_by_idx with invalid index type H5_INDEX_N!\n", + ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME); + H5Aclose(attr_id); + PART_ERROR(H5Aopen_by_idx_invalid_index_type); + } + + PASSED(); + } + PART_END(H5Aopen_by_idx_invalid_index_type); + + PART_BEGIN(H5Aopen_by_idx_invalid_iter_order) + { + TESTING_2("H5Aopen_by_idx with an invalid iteration order"); + + H5E_BEGIN_TRY + { + attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME, + H5_INDEX_NAME, H5_ITER_UNKNOWN, 0, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" opened attribute '%s' using H5Aopen_by_idx with invalid iteration order " + "H5_ITER_UNKNOWN!\n", + ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME); + H5Aclose(attr_id); + PART_ERROR(H5Aopen_by_idx_invalid_iter_order); + } + + H5E_BEGIN_TRY + { + attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME, + H5_INDEX_NAME, H5_ITER_N, 0, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" opened attribute '%s' using H5Aopen_by_idx with invalid iteration order " + "H5_ITER_N!\n", + ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME); + H5Aclose(attr_id); + PART_ERROR(H5Aopen_by_idx_invalid_iter_order); + } + + PASSED(); + } + PART_END(H5Aopen_by_idx_invalid_iter_order); + + PART_BEGIN(H5Aopen_by_idx_invalid_aapl) + { + TESTING_2("H5Aopen_by_idx with an invalid AAPL"); + + H5E_BEGIN_TRY + { + attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME, + H5_INDEX_NAME, H5_ITER_INC, 0, H5I_INVALID_HID, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" opened attribute '%s' using H5Aopen_by_idx with an invalid AAPL!\n", + ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME); + H5Aclose(attr_id); + PART_ERROR(H5Aopen_by_idx_invalid_aapl); + } + + PASSED(); + } + PART_END(H5Aopen_by_idx_invalid_aapl); + + PART_BEGIN(H5Aopen_by_idx_invalid_lapl) + { + TESTING_2("H5Aopen_by_idx with an invalid LAPL"); + + H5E_BEGIN_TRY + { + attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME, + H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" opened attribute '%s' using H5Aopen_by_idx with an invalid LAPL!\n", + ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME); + H5Aclose(attr_id); + PART_ERROR(H5Aopen_by_idx_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Aopen_by_idx_invalid_lapl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_type) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Tclose(attr_type); + H5Aclose(attr_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a simple write to an attribute + * can be made. + */ +static int +test_write_attribute(void) +{ + hsize_t dims[ATTRIBUTE_WRITE_TEST_SPACE_RANK]; + size_t i, data_size; + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + void *data = NULL; + + TESTING("H5Awrite"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, attribute, or file flush aren't supported with " + "this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file\n"); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_WRITE_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_WRITE_TEST_GROUP_NAME); + goto error; + } + + if ((space_id = generate_random_dataspace(ATTRIBUTE_WRITE_TEST_SPACE_RANK, NULL, dims, TRUE)) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_WRITE_TEST_ATTR_NAME, ATTRIBUTE_WRITE_TEST_ATTR_DTYPE, + space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_WRITE_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + for (i = 0, data_size = 1; i < ATTRIBUTE_WRITE_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= ATTRIBUTE_WRITE_TEST_ATTR_DTYPE_SIZE; + + if (NULL == (data = HDmalloc(data_size))) + TEST_ERROR; + + for (i = 0; i < data_size / ATTRIBUTE_WRITE_TEST_ATTR_DTYPE_SIZE; i++) + ((int *)data)[i] = (int)i; + + if (H5Awrite(attr_id, ATTRIBUTE_WRITE_TEST_ATTR_DTYPE, data) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to attribute\n"); + goto error; + } + + /* Make sure that the attribute can be flushed to the file */ + if (H5Fflush(file_id, H5F_SCOPE_GLOBAL) < 0) { + H5_FAILED(); + HDprintf(" couldn't flush the attribute\n"); + goto error; + } + + if (data) { + HDfree(data); + data = NULL; + } + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (data) + HDfree(data); + H5Sclose(space_id); + H5Aclose(attr_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that writing an attribute fails when + * H5Awrite is passed invalid parameters. + */ +static int +test_write_attribute_invalid_params(void) +{ + hsize_t dims[ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_SPACE_RANK]; + size_t i, data_size; + htri_t attr_exists; + herr_t err_ret = -1; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + void *data = NULL; + + TESTING_MULTIPART("H5Awrite with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or attribute aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", + ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_GROUP_NAME); + goto error; + } + + if ((space_id = + generate_random_dataspace(ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_SPACE_RANK, NULL, dims, TRUE)) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_ATTR_NAME, + ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_ATTR_DTYPE, space_id, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + for (i = 0, data_size = 1; i < ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_ATTR_DTYPE_SIZE; + + if (NULL == (data = HDmalloc(data_size))) + TEST_ERROR; + + for (i = 0; i < data_size / ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_ATTR_DTYPE_SIZE; i++) + ((int *)data)[i] = (int)i; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Awrite_invalid_attr_id) + { + TESTING_2("H5Awrite with an invalid attr_id"); + + H5E_BEGIN_TRY + { + err_ret = H5Awrite(H5I_INVALID_HID, ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_ATTR_DTYPE, data); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" wrote to attribute using an invalid attr_id!\n"); + PART_ERROR(H5Awrite_invalid_attr_id); + } + + PASSED(); + } + PART_END(H5Awrite_invalid_attr_id); + + PART_BEGIN(H5Awrite_invalid_datatype) + { + TESTING_2("H5Awrite with an invalid datatype"); + + H5E_BEGIN_TRY + { + err_ret = H5Awrite(attr_id, H5I_INVALID_HID, data); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" wrote to attribute using an invalid datatype!\n"); + PART_ERROR(H5Awrite_invalid_datatype); + } + + PASSED(); + } + PART_END(H5Awrite_invalid_datatype); + + PART_BEGIN(H5Awrite_invalid_data_buf) + { + TESTING_2("H5Awrite with an invalid data buffer"); + + H5E_BEGIN_TRY + { + err_ret = H5Awrite(attr_id, ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_ATTR_DTYPE, NULL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" wrote to attribute using an invalid data buffer!\n"); + PART_ERROR(H5Awrite_invalid_data_buf); + } + + PASSED(); + } + PART_END(H5Awrite_invalid_data_buf); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (data) { + HDfree(data); + data = NULL; + } + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (data) + HDfree(data); + H5Sclose(space_id); + H5Aclose(attr_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that simple data can be read back + * and verified after it has been written to an + * attribute. + */ +static int +test_read_attribute(void) +{ + hsize_t dims[ATTRIBUTE_READ_TEST_SPACE_RANK]; + size_t i, data_size; + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + void *data = NULL; + void *read_buf = NULL; + + TESTING("H5Aread"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or attribute aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_READ_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_READ_TEST_GROUP_NAME); + goto error; + } + + if ((space_id = generate_random_dataspace(ATTRIBUTE_READ_TEST_SPACE_RANK, NULL, dims, TRUE)) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_READ_TEST_ATTR_NAME, ATTRIBUTE_READ_TEST_ATTR_DTYPE, + space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_READ_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + for (i = 0, data_size = 1; i < ATTRIBUTE_READ_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= ATTRIBUTE_READ_TEST_ATTR_DTYPE_SIZE; + + if (NULL == (data = HDmalloc(data_size))) + TEST_ERROR; + if (NULL == (read_buf = HDcalloc(1, data_size))) + TEST_ERROR; + + for (i = 0; i < data_size / ATTRIBUTE_READ_TEST_ATTR_DTYPE_SIZE; i++) + ((int *)data)[i] = (int)i; + + if (H5Awrite(attr_id, ATTRIBUTE_READ_TEST_ATTR_DTYPE, data) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to attribute\n"); + goto error; + } + + if (data) { + HDfree(data); + data = NULL; + } + + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + + if ((attr_id = H5Aopen(group_id, ATTRIBUTE_READ_TEST_ATTR_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open attribute\n"); + goto error; + } + + if (H5Aread(attr_id, ATTRIBUTE_READ_TEST_ATTR_DTYPE, read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from attribute\n"); + goto error; + } + + for (i = 0; i < data_size / ATTRIBUTE_READ_TEST_ATTR_DTYPE_SIZE; i++) { + if (((int *)read_buf)[i] != (int)i) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + goto error; + } + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (data) + HDfree(data); + if (read_buf) + HDfree(read_buf); + H5Sclose(space_id); + H5Aclose(attr_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that reading an attribute fails when + * H5Aread is passed invalid parameters. + */ +static int +test_read_attribute_invalid_params(void) +{ + hsize_t dims[ATTRIBUTE_READ_INVALID_PARAMS_TEST_SPACE_RANK]; + size_t i, data_size; + htri_t attr_exists; + herr_t err_ret = -1; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + void *data = NULL; + void *read_buf = NULL; + + TESTING_MULTIPART("H5Aread with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or attribute aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_READ_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_READ_INVALID_PARAMS_TEST_GROUP_NAME); + goto error; + } + + if ((space_id = + generate_random_dataspace(ATTRIBUTE_READ_INVALID_PARAMS_TEST_SPACE_RANK, NULL, dims, TRUE)) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_READ_INVALID_PARAMS_TEST_ATTR_NAME, + ATTRIBUTE_READ_INVALID_PARAMS_TEST_ATTR_DTYPE, space_id, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_READ_INVALID_PARAMS_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + for (i = 0, data_size = 1; i < ATTRIBUTE_READ_INVALID_PARAMS_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= ATTRIBUTE_READ_INVALID_PARAMS_TEST_ATTR_DTYPE_SIZE; + + if (NULL == (data = HDmalloc(data_size))) + TEST_ERROR; + if (NULL == (read_buf = HDcalloc(1, data_size))) + TEST_ERROR; + + for (i = 0; i < data_size / ATTRIBUTE_READ_INVALID_PARAMS_TEST_ATTR_DTYPE_SIZE; i++) + ((int *)data)[i] = (int)i; + + if (H5Awrite(attr_id, ATTRIBUTE_READ_INVALID_PARAMS_TEST_ATTR_DTYPE, data) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to attribute\n"); + goto error; + } + + if (data) { + HDfree(data); + data = NULL; + } + + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + + if ((attr_id = H5Aopen(group_id, ATTRIBUTE_READ_INVALID_PARAMS_TEST_ATTR_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open attribute\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Aread_invalid_attr_id) + { + TESTING_2("H5Aread with an invalid attr_id"); + + H5E_BEGIN_TRY + { + err_ret = H5Aread(H5I_INVALID_HID, ATTRIBUTE_READ_INVALID_PARAMS_TEST_ATTR_DTYPE, read_buf); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" read attribute with an invalid attr_id!\n"); + PART_ERROR(H5Aread_invalid_attr_id); + } + + PASSED(); + } + PART_END(H5Aread_invalid_attr_id); + + PART_BEGIN(H5Aread_invalid_datatype) + { + TESTING_2("H5Aread with an invalid datatype"); + + H5E_BEGIN_TRY + { + err_ret = H5Aread(attr_id, H5I_INVALID_HID, read_buf); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" read attribute with an invalid datatype!\n"); + PART_ERROR(H5Aread_invalid_datatype); + } + + PASSED(); + } + PART_END(H5Aread_invalid_datatype); + + PART_BEGIN(H5Aread_invalid_read_buf) + { + TESTING_2("H5Aread with an invalid read buffer"); + + H5E_BEGIN_TRY + { + err_ret = H5Aread(attr_id, ATTRIBUTE_READ_INVALID_PARAMS_TEST_ATTR_DTYPE, NULL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" read attribute with an invalid read buffer!\n"); + PART_ERROR(H5Aread_invalid_read_buf); + } + + PASSED(); + } + PART_END(H5Aread_invalid_read_buf); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (data) + HDfree(data); + if (read_buf) + HDfree(read_buf); + H5Sclose(space_id); + H5Aclose(attr_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * Test reading an empty attribute is ok + */ +static int +test_read_empty_attribute(void) +{ + hsize_t dims[ATTRIBUTE_READ_EMPTY_SPACE_RANK]; + size_t i, data_size; + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + void *read_buf = NULL; + + TESTING("reading an empty attribute"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or attribute aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_READ_EMPTY_ATTR_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_READ_EMPTY_ATTR_GROUP_NAME); + goto error; + } + + if ((space_id = generate_random_dataspace(ATTRIBUTE_READ_EMPTY_SPACE_RANK, NULL, dims, TRUE)) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_READ_EMPTY_ATTR_NAME, ATTRIBUTE_READ_EMPTY_DTYPE, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_READ_EMPTY_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + + if ((attr_id = H5Aopen(group_id, ATTRIBUTE_READ_EMPTY_ATTR_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open attribute\n"); + goto error; + } + + for (i = 0, data_size = 1; i < ATTRIBUTE_READ_EMPTY_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= ATTRIBUTE_READ_EMPTY_DTYPE_SIZE; + + if (NULL == (read_buf = HDcalloc(1, data_size))) + TEST_ERROR; + + if (H5Aread(attr_id, ATTRIBUTE_READ_EMPTY_DTYPE, read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from attribute\n"); + goto error; + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + H5Sclose(space_id); + H5Aclose(attr_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} +/* + * A test to check that H5Aclose fails when it is passed + * an invalid attribute ID. + */ +static int +test_close_attribute_invalid_id(void) +{ + herr_t err_ret = -1; + hid_t file_id = H5I_INVALID_HID; + + TESTING("H5Aclose with an invalid attribute ID"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file or attribute aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + H5E_BEGIN_TRY + { + err_ret = H5Aclose(H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Aclose succeeded with an invalid attribute ID!\n"); + goto error; + } + + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that valid copies of an attribute's + * dataspace and datatype can be retrieved with + * H5Aget_space and H5Aget_type, respectively. + */ +static int +test_get_attribute_space_and_type(void) +{ + hsize_t attr_dims[ATTRIBUTE_GET_SPACE_TYPE_TEST_SPACE_RANK]; + size_t i; + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t attr_dtype = H5I_INVALID_HID; + hid_t attr_space_id = H5I_INVALID_HID; + hid_t tmp_type_id = H5I_INVALID_HID; + hid_t tmp_space_id = H5I_INVALID_HID; + + TESTING_MULTIPART("retrieval of an attribute's dataspace and datatype"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_MORE)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or attribute aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_GET_SPACE_TYPE_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_GET_SPACE_TYPE_TEST_GROUP_NAME); + goto error; + } + + if ((attr_space_id = + generate_random_dataspace(ATTRIBUTE_GET_SPACE_TYPE_TEST_SPACE_RANK, NULL, attr_dims, TRUE)) < 0) + TEST_ERROR; + + if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_GET_SPACE_TYPE_TEST_ATTR_NAME, attr_dtype, attr_space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_GET_SPACE_TYPE_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + /* Retrieve the attribute's datatype and dataspace and verify them */ + PART_BEGIN(H5Aget_type) + { + TESTING_2("H5Aget_type"); + + if ((tmp_type_id = H5Aget_type(attr_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve attribute's datatype\n"); + PART_ERROR(H5Aget_type); + } + + { + htri_t types_equal = H5Tequal(tmp_type_id, attr_dtype); + + if (types_equal < 0) { + H5_FAILED(); + HDprintf(" datatype was invalid\n"); + PART_ERROR(H5Aget_type); + } + + if (!types_equal) { + H5_FAILED(); + HDprintf(" attribute's datatype did not match\n"); + PART_ERROR(H5Aget_type); + } + } + + PASSED(); + } + PART_END(H5Aget_type); + + PART_BEGIN(H5Aget_space) + { + TESTING_2("H5Aget_space"); + + if ((tmp_space_id = H5Aget_space(attr_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve attribute's dataspace\n"); + PART_ERROR(H5Aget_space); + } + + { + hsize_t space_dims[ATTRIBUTE_GET_SPACE_TYPE_TEST_SPACE_RANK]; + + if (H5Sget_simple_extent_dims(tmp_space_id, space_dims, NULL) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve dimensions of dataspace\n"); + PART_ERROR(H5Aget_space); + } + + for (i = 0; i < ATTRIBUTE_GET_SPACE_TYPE_TEST_SPACE_RANK; i++) + if (space_dims[i] != attr_dims[i]) { + H5_FAILED(); + HDprintf(" attribute's dataspace dims didn't match\n"); + PART_ERROR(H5Aget_space); + } + } + + PASSED(); + } + PART_END(H5Aget_space); + + /* Now close the attribute and verify that this still works after opening an + * attribute instead of creating it + */ + if (attr_id >= 0) { + H5E_BEGIN_TRY + { + H5Aclose(attr_id); + } + H5E_END_TRY; + attr_id = H5I_INVALID_HID; + } + if (tmp_type_id >= 0) { + H5E_BEGIN_TRY + { + H5Tclose(tmp_type_id); + } + H5E_END_TRY; + tmp_type_id = H5I_INVALID_HID; + } + if (tmp_space_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(tmp_space_id); + } + H5E_END_TRY; + tmp_space_id = H5I_INVALID_HID; + } + + PART_BEGIN(H5Aget_type_reopened) + { + TESTING_2("H5Aget_type after re-opening an attribute"); + + if ((attr_id = H5Aopen(group_id, ATTRIBUTE_GET_SPACE_TYPE_TEST_ATTR_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open attribute '%s'\n", ATTRIBUTE_GET_SPACE_TYPE_TEST_ATTR_NAME); + PART_ERROR(H5Aget_type_reopened); + } + + if ((tmp_type_id = H5Aget_type(attr_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve attribute's datatype\n"); + PART_ERROR(H5Aget_type_reopened); + } + + { + htri_t types_equal = H5Tequal(tmp_type_id, attr_dtype); + + if (types_equal < 0) { + H5_FAILED(); + HDprintf(" datatype was invalid\n"); + PART_ERROR(H5Aget_type_reopened); + } + + if (!types_equal) { + H5_FAILED(); + HDprintf(" attribute's datatype did not match\n"); + PART_ERROR(H5Aget_type_reopened); + } + } + + if (attr_id >= 0) { + H5E_BEGIN_TRY + { + H5Aclose(attr_id); + } + H5E_END_TRY; + attr_id = H5I_INVALID_HID; + } + + PASSED(); + } + PART_END(H5Aget_type_reopened); + + PART_BEGIN(H5Aget_space_reopened) + { + TESTING_2("H5Aget_space after re-opening an attribute"); + + if ((attr_id = H5Aopen(group_id, ATTRIBUTE_GET_SPACE_TYPE_TEST_ATTR_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open attribute '%s'\n", ATTRIBUTE_GET_SPACE_TYPE_TEST_ATTR_NAME); + PART_ERROR(H5Aget_space_reopened); + } + + if ((tmp_space_id = H5Aget_space(attr_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve attribute's dataspace\n"); + PART_ERROR(H5Aget_space_reopened); + } + + { + hsize_t space_dims[ATTRIBUTE_GET_SPACE_TYPE_TEST_SPACE_RANK]; + + if (H5Sget_simple_extent_dims(tmp_space_id, space_dims, NULL) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve dimensions of dataspace\n"); + PART_ERROR(H5Aget_space_reopened); + } + + for (i = 0; i < ATTRIBUTE_GET_SPACE_TYPE_TEST_SPACE_RANK; i++) { + if (space_dims[i] != attr_dims[i]) { + H5_FAILED(); + HDprintf(" dataspace dims didn't match!\n"); + PART_ERROR(H5Aget_space_reopened); + } + } + } + + if (attr_id >= 0) { + H5E_BEGIN_TRY + { + H5Aclose(attr_id); + } + H5E_END_TRY; + attr_id = H5I_INVALID_HID; + } + + PASSED(); + } + PART_END(H5Aget_space_reopened); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(tmp_space_id) < 0) + TEST_ERROR; + if (H5Sclose(attr_space_id) < 0) + TEST_ERROR; + if (H5Tclose(tmp_type_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(tmp_space_id); + H5Sclose(attr_space_id); + H5Tclose(tmp_type_id); + H5Tclose(attr_dtype); + H5Aclose(attr_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that an attribute's dataspace and datatype + * can't be retrieved when H5Aget_space and H5Aget_type are passed + * invalid parameters, respectively. + */ +static int +test_get_attribute_space_and_type_invalid_params(void) +{ + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t attr_dtype = H5I_INVALID_HID; + hid_t attr_space_id = H5I_INVALID_HID; + hid_t tmp_type_id = H5I_INVALID_HID; + hid_t tmp_space_id = H5I_INVALID_HID; + + TESTING_MULTIPART("H5Aget_type/H5Aget_space with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_MORE)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or attribute aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_GET_SPACE_TYPE_INVALID_PARAMS_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", + ATTRIBUTE_GET_SPACE_TYPE_INVALID_PARAMS_TEST_GROUP_NAME); + goto error; + } + + if ((attr_space_id = generate_random_dataspace(ATTRIBUTE_GET_SPACE_TYPE_INVALID_PARAMS_TEST_SPACE_RANK, + NULL, NULL, TRUE)) < 0) + TEST_ERROR; + + if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_GET_SPACE_TYPE_INVALID_PARAMS_TEST_ATTR_NAME, attr_dtype, + attr_space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_GET_SPACE_TYPE_INVALID_PARAMS_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + /* Retrieve the attribute's datatype and dataspace and verify them */ + PART_BEGIN(H5Aget_type_invalid_attr_id) + { + TESTING_2("H5Aget_type with an invalid attr_id"); + + H5E_BEGIN_TRY + { + tmp_type_id = H5Aget_type(H5I_INVALID_HID); + } + H5E_END_TRY; + + if (tmp_type_id >= 0) { + H5_FAILED(); + HDprintf(" retrieved copy of attribute's datatype using an invalid attr_id!\n"); + PART_ERROR(H5Aget_type_invalid_attr_id); + } + + PASSED(); + } + PART_END(H5Aget_type_invalid_attr_id); + + PART_BEGIN(H5Aget_space_invalid_attr_id) + { + TESTING_2("H5Aget_space with an invalid attr_id"); + + H5E_BEGIN_TRY + { + tmp_space_id = H5Aget_space(H5I_INVALID_HID); + } + H5E_END_TRY; + + if (tmp_space_id >= 0) { + H5_FAILED(); + HDprintf(" retrieved copy of attribute's dataspace using an invalid attr_id!\n"); + PART_ERROR(H5Aget_space_invalid_attr_id); + } + + PASSED(); + } + PART_END(H5Aget_space_invalid_attr_id); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(attr_space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype) < 0) + TEST_ERROR; + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(tmp_space_id); + H5Sclose(attr_space_id); + H5Tclose(tmp_type_id); + H5Tclose(attr_dtype); + H5Aclose(attr_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that an ACPL used for attribute creation + * can be persisted and that a valid copy of that ACPL can + * be retrieved later with a call to H5Aget_create_plist. + */ +static int +test_attribute_property_lists(void) +{ + H5T_cset_t encoding = H5T_CSET_UTF8; + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t attr_id1 = H5I_INVALID_HID, attr_id2 = H5I_INVALID_HID; + hid_t attr_dtype1 = H5I_INVALID_HID, attr_dtype2 = H5I_INVALID_HID; + hid_t acpl_id1 = H5I_INVALID_HID, acpl_id2 = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + + TESTING_MULTIPART("attribute property list operations"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, attribute, or getting property list aren't " + "supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file\n"); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_PROPERTY_LIST_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group\n"); + goto error; + } + + if ((space_id = generate_random_dataspace(ATTRIBUTE_PROPERTY_LIST_TEST_SPACE_RANK, NULL, NULL, TRUE)) < 0) + TEST_ERROR; + + if ((attr_dtype1 = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + if ((attr_dtype2 = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + if ((acpl_id1 = H5Pcreate(H5P_ATTRIBUTE_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create ACPL\n"); + goto error; + } + + if (H5Pset_char_encoding(acpl_id1, encoding) < 0) { + H5_FAILED(); + HDprintf(" couldn't set ACPL property value\n"); + goto error; + } + + if ((attr_id1 = H5Acreate2(group_id, ATTRIBUTE_PROPERTY_LIST_TEST_ATTRIBUTE_NAME1, attr_dtype1, space_id, + acpl_id1, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + if ((attr_id2 = H5Acreate2(group_id, ATTRIBUTE_PROPERTY_LIST_TEST_ATTRIBUTE_NAME2, attr_dtype2, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + if (H5Pclose(acpl_id1) < 0) + TEST_ERROR; + + /* Verify the attributes have been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_PROPERTY_LIST_TEST_ATTRIBUTE_NAME1)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_PROPERTY_LIST_TEST_ATTRIBUTE_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Aget_create_plist) + { + TESTING_2("H5Aget_create_plist"); + + /* Try to retrieve copies of the two property lists, one which has the property set and one which + * does not */ + if ((acpl_id1 = H5Aget_create_plist(attr_id1)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get property list\n"); + PART_ERROR(H5Aget_create_plist); + } + + if ((acpl_id2 = H5Aget_create_plist(attr_id2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get property list\n"); + PART_ERROR(H5Aget_create_plist); + } + + /* Ensure that property list 1 has the property list set and property list 2 does not */ + encoding = H5T_CSET_ERROR; + + if (H5Pget_char_encoding(acpl_id1, &encoding) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve ACPL property value\n"); + PART_ERROR(H5Aget_create_plist); + } + + if (H5T_CSET_UTF8 != encoding) { + H5_FAILED(); + HDprintf(" ACPL property value was incorrect\n"); + PART_ERROR(H5Aget_create_plist); + } + + encoding = H5T_CSET_ERROR; + + if (H5Pget_char_encoding(acpl_id2, &encoding) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve ACPL property value\n"); + PART_ERROR(H5Aget_create_plist); + } + + if (H5T_CSET_UTF8 == encoding) { + H5_FAILED(); + HDprintf(" ACPL property value was set!\n"); + PART_ERROR(H5Aget_create_plist); + } + + PASSED(); + } + PART_END(H5Aget_create_plist); + + /* Now close the property lists and attribute and see if we can still retrieve copies of + * the property lists upon opening (instead of creating) an attribute + */ + if (acpl_id1 >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(acpl_id1); + } + H5E_END_TRY; + acpl_id1 = H5I_INVALID_HID; + } + if (acpl_id2 >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(acpl_id2); + } + H5E_END_TRY; + acpl_id2 = H5I_INVALID_HID; + } + if (attr_id1 >= 0) { + H5E_BEGIN_TRY + { + H5Aclose(attr_id1); + } + H5E_END_TRY; + attr_id1 = H5I_INVALID_HID; + } + if (attr_id2 >= 0) { + H5E_BEGIN_TRY + { + H5Aclose(attr_id2); + } + H5E_END_TRY; + attr_id2 = H5I_INVALID_HID; + } + + PART_BEGIN(H5Aget_create_plist_reopened) + { + TESTING_2("H5Aget_create_plist after re-opening an attribute"); + + if ((attr_id1 = H5Aopen(group_id, ATTRIBUTE_PROPERTY_LIST_TEST_ATTRIBUTE_NAME1, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't open attribute '%s'\n", ATTRIBUTE_PROPERTY_LIST_TEST_ATTRIBUTE_NAME1); + PART_ERROR(H5Aget_create_plist_reopened); + } + + if ((attr_id2 = H5Aopen(group_id, ATTRIBUTE_PROPERTY_LIST_TEST_ATTRIBUTE_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't open attribute '%s'\n", ATTRIBUTE_PROPERTY_LIST_TEST_ATTRIBUTE_NAME2); + PART_ERROR(H5Aget_create_plist_reopened); + } + + if ((acpl_id1 = H5Aget_create_plist(attr_id1)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get property list\n"); + PART_ERROR(H5Aget_create_plist_reopened); + } + + if ((acpl_id2 = H5Aget_create_plist(attr_id2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get property list\n"); + PART_ERROR(H5Aget_create_plist_reopened); + } + + /* XXX: Check the value to be tested as above */ + PASSED(); + } + PART_END(H5Aget_create_plist_reopened); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(acpl_id1) < 0) + TEST_ERROR; + if (H5Pclose(acpl_id2) < 0) + TEST_ERROR; + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype1) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype2) < 0) + TEST_ERROR; + if (H5Aclose(attr_id1) < 0) + TEST_ERROR; + if (H5Aclose(attr_id2) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(acpl_id1); + H5Pclose(acpl_id2); + H5Sclose(space_id); + H5Tclose(attr_dtype1); + H5Tclose(attr_dtype2); + H5Aclose(attr_id1); + H5Aclose(attr_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that an attribute's name can be + * correctly retrieved with H5Aget_name and + * H5Aget_name_by_idx. + */ +static int +test_get_attribute_name(void) +{ + ssize_t name_buf_size; + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t attr_dtype = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + char *name_buf = NULL; + + TESTING_MULTIPART("retrieval of an attribute's name"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, attribute, or creation order aren't supported " + "with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create GCPL for attribute creation order tracking\n"); + goto error; + } + + if (H5Pset_attr_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) { + H5_FAILED(); + HDprintf(" couldn't set attribute creation order tracking\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_GET_NAME_TEST_GROUP_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_GET_NAME_TEST_GROUP_NAME); + goto error; + } + + if ((space_id = generate_random_dataspace(ATTRIBUTE_GET_NAME_TEST_SPACE_RANK, NULL, NULL, TRUE)) < 0) + TEST_ERROR; + + if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + /* Create several attributes */ + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME, attr_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME); + goto error; + } + + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2, attr_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2); + goto error; + } + + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3, attr_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3); + goto error; + } + + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + + /* Verify the attributes have been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' did not exist\n", ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME); + goto error; + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' did not exist\n", ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2); + goto error; + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' did not exist\n", ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3); + goto error; + } + + /* Allocate the name buffer */ + name_buf_size = strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME) + 2; + if (NULL == (name_buf = (char *)HDmalloc((size_t)name_buf_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for storing attribute's name\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Aget_name) + { + TESTING_2("H5Aget_name"); + + if ((attr_id = H5Aopen(group_id, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open attribute '%s'\n", ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME); + PART_ERROR(H5Aget_name); + } + + *name_buf = '\0'; + if (H5Aget_name(attr_id, (size_t)name_buf_size, name_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve attribute name\n"); + PART_ERROR(H5Aget_name); + } + + if (HDstrncmp(name_buf, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME, + strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME) + 1)) { + H5_FAILED(); + HDprintf(" retrieved attribute name '%s' didn't match '%s'\n", name_buf, + ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME); + PART_ERROR(H5Aget_name); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME); + PART_ERROR(H5Aget_name); + } + + PASSED(); + } + PART_END(H5Aget_name); + + H5E_BEGIN_TRY + { + H5Aclose(attr_id); + attr_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Aget_name_by_idx_crt_order_increasing) + { + TESTING_2("H5Aget_name_by_idx by creation order in increasing order"); + + *name_buf = '\0'; + if (H5Aget_name_by_idx(container_group, ATTRIBUTE_GET_NAME_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER, + H5_ITER_INC, 0, name_buf, (size_t)name_buf_size, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve name of attribute at index %d using H5Aget_name_by_index by " + "creation order in increasing order\n", + 0); + PART_ERROR(H5Aget_name_by_idx_crt_order_increasing); + } + + if (HDstrncmp(name_buf, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME, + strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME) + 1)) { + H5_FAILED(); + HDprintf(" retrieved attribute name '%s' didn't match '%s'\n", name_buf, + ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME); + PART_ERROR(H5Aget_name_by_idx_crt_order_increasing); + } + + *name_buf = '\0'; + if (H5Aget_name_by_idx(container_group, ATTRIBUTE_GET_NAME_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER, + H5_ITER_INC, 1, name_buf, (size_t)name_buf_size, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve name of attribute at index %d using H5Aget_name_by_index by " + "creation order in increasing order\n", + 1); + PART_ERROR(H5Aget_name_by_idx_crt_order_increasing); + } + + if (HDstrncmp(name_buf, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2, + strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2) + 1)) { + H5_FAILED(); + HDprintf(" retrieved attribute name '%s' didn't match '%s'\n", name_buf, + ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2); + PART_ERROR(H5Aget_name_by_idx_crt_order_increasing); + } + + *name_buf = '\0'; + if (H5Aget_name_by_idx(container_group, ATTRIBUTE_GET_NAME_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER, + H5_ITER_INC, 2, name_buf, (size_t)name_buf_size, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve name of attribute at index %d using H5Aget_name_by_index by " + "creation order in increasing order\n", + 2); + PART_ERROR(H5Aget_name_by_idx_crt_order_increasing); + } + + if (HDstrncmp(name_buf, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3, + strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3) + 1)) { + H5_FAILED(); + HDprintf(" retrieved attribute name '%s' didn't match '%s'\n", name_buf, + ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3); + PART_ERROR(H5Aget_name_by_idx_crt_order_increasing); + } + + PASSED(); + } + PART_END(H5Aget_name_by_idx_crt_order_increasing); + + PART_BEGIN(H5Aget_name_by_idx_crt_order_decreasing) + { + TESTING_2("H5Aget_name_by_idx by creation order in decreasing order"); + + *name_buf = '\0'; + if (H5Aget_name_by_idx(container_group, ATTRIBUTE_GET_NAME_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER, + H5_ITER_DEC, 2, name_buf, (size_t)name_buf_size, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve name of attribute at index %d using H5Aget_name_by_index by " + "creation order in decreasing order\n", + 2); + PART_ERROR(H5Aget_name_by_idx_crt_order_decreasing); + } + + if (HDstrncmp(name_buf, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME, + strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME) + 1)) { + H5_FAILED(); + HDprintf(" retrieved attribute name '%s' didn't match '%s'\n", name_buf, + ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME); + PART_ERROR(H5Aget_name_by_idx_crt_order_decreasing); + } + + *name_buf = '\0'; + if (H5Aget_name_by_idx(container_group, ATTRIBUTE_GET_NAME_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER, + H5_ITER_DEC, 1, name_buf, (size_t)name_buf_size, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve name of attribute at index %d using H5Aget_name_by_index by " + "creation order in decreasing order\n", + 1); + PART_ERROR(H5Aget_name_by_idx_crt_order_decreasing); + } + + if (HDstrncmp(name_buf, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2, + strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2) + 1)) { + H5_FAILED(); + HDprintf(" retrieved attribute name '%s' didn't match '%s'\n", name_buf, + ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2); + PART_ERROR(H5Aget_name_by_idx_crt_order_decreasing); + } + + *name_buf = '\0'; + if (H5Aget_name_by_idx(container_group, ATTRIBUTE_GET_NAME_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER, + H5_ITER_DEC, 0, name_buf, (size_t)name_buf_size, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve name of attribute at index %d using H5Aget_name_by_index by " + "creation order in decreasing order\n", + 0); + PART_ERROR(H5Aget_name_by_idx_crt_order_decreasing); + } + + if (HDstrncmp(name_buf, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3, + strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3) + 1)) { + H5_FAILED(); + HDprintf(" retrieved attribute name '%s' didn't match '%s'\n", name_buf, + ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3); + PART_ERROR(H5Aget_name_by_idx_crt_order_decreasing); + } + + PASSED(); + } + PART_END(H5Aget_name_by_idx_crt_order_decreasing); + + PART_BEGIN(H5Aget_name_by_idx_name_order_increasing) + { + TESTING_2("H5Aget_name_by_idx by alphabetical order in increasing order"); + + *name_buf = '\0'; + if (H5Aget_name_by_idx(container_group, ATTRIBUTE_GET_NAME_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_INC, 0, name_buf, (size_t)name_buf_size, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve name of attribute at index %d using H5Aget_name_by_index by " + "alphabetical order in increasing order\n", + 0); + PART_ERROR(H5Aget_name_by_idx_name_order_increasing); + } + + if (HDstrncmp(name_buf, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME, + strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME) + 1)) { + H5_FAILED(); + HDprintf(" retrieved attribute name '%s' didn't match '%s'\n", name_buf, + ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME); + PART_ERROR(H5Aget_name_by_idx_name_order_increasing); + } + + *name_buf = '\0'; + if (H5Aget_name_by_idx(container_group, ATTRIBUTE_GET_NAME_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_INC, 1, name_buf, (size_t)name_buf_size, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve name of attribute at index %d using H5Aget_name_by_index by " + "alphabetical order in increasing order\n", + 1); + PART_ERROR(H5Aget_name_by_idx_name_order_increasing); + } + + if (HDstrncmp(name_buf, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2, + strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2) + 1)) { + H5_FAILED(); + HDprintf(" retrieved attribute name '%s' didn't match '%s'\n", name_buf, + ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2); + PART_ERROR(H5Aget_name_by_idx_name_order_increasing); + } + + *name_buf = '\0'; + if (H5Aget_name_by_idx(container_group, ATTRIBUTE_GET_NAME_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_INC, 2, name_buf, (size_t)name_buf_size, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve name of attribute at index %d using H5Aget_name_by_index by " + "alphabetical order in increasing order\n", + 2); + PART_ERROR(H5Aget_name_by_idx_name_order_increasing); + } + + if (HDstrncmp(name_buf, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3, + strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3) + 1)) { + H5_FAILED(); + HDprintf(" retrieved attribute name '%s' didn't match '%s'\n", name_buf, + ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3); + PART_ERROR(H5Aget_name_by_idx_name_order_increasing); + } + + PASSED(); + } + PART_END(H5Aget_name_by_idx_name_order_increasing); + + PART_BEGIN(H5Aget_name_by_idx_name_order_decreasing) + { + TESTING_2("H5Aget_name_by_idx by alphabetical order in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + *name_buf = '\0'; + if (H5Aget_name_by_idx(container_group, ATTRIBUTE_GET_NAME_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_DEC, 2, name_buf, (size_t)name_buf_size, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve name of attribute at index %lld using H5Aget_name_by_index " + "by alphabetical order in decreasing order\n", + 2); + PART_ERROR(H5Aget_name_by_idx_name_order_decreasing); + } + + if (HDstrncmp(name_buf, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME, + strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME) + 1)) { + H5_FAILED(); + HDprintf(" retrieved attribute name '%s' didn't match '%s'\n", name_buf, + ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME); + PART_ERROR(H5Aget_name_by_idx_name_order_decreasing); + } + + *name_buf = '\0'; + if (H5Aget_name_by_idx(container_group, ATTRIBUTE_GET_NAME_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_DEC, 1, name_buf, (size_t)name_buf_size, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve name of attribute at index %lld using H5Aget_name_by_index " + "by alphabetical order in decreasing order\n", + 1); + PART_ERROR(H5Aget_name_by_idx_name_order_decreasing); + } + + if (HDstrncmp(name_buf, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2, + strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2) + 1)) { + H5_FAILED(); + HDprintf(" retrieved attribute name '%s' didn't match '%s'\n", name_buf, + ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2); + PART_ERROR(H5Aget_name_by_idx_name_order_decreasing); + } + + *name_buf = '\0'; + if (H5Aget_name_by_idx(container_group, ATTRIBUTE_GET_NAME_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_DEC, 0, name_buf, (size_t)name_buf_size, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve name of attribute at index %lld using H5Aget_name_by_index " + "by alphabetical order in decreasing order\n", + 0); + PART_ERROR(H5Aget_name_by_idx_name_order_decreasing); + } + + if (HDstrncmp(name_buf, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3, + strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3) + 1)) { + H5_FAILED(); + HDprintf(" retrieved attribute name '%s' didn't match '%s'\n", name_buf, + ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3); + PART_ERROR(H5Aget_name_by_idx_name_order_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Aget_name_by_idx_name_order_decreasing); +#endif + } + PART_END(H5Aget_name_by_idx_name_order_decreasing); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (name_buf) { + HDfree(name_buf); + name_buf = NULL; + } + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype) < 0) + TEST_ERROR; + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (name_buf) + HDfree(name_buf); + H5Sclose(space_id); + H5Tclose(attr_dtype); + H5Aclose(attr_id); + H5Pclose(gcpl_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that an attribute's name can't be + * retrieved when H5Aget_name(_by_idx) is passed invalid + * parameters. + */ +static int +test_get_attribute_name_invalid_params(void) +{ + ssize_t name_buf_size; + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t attr_dtype = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + char *name_buf = NULL; + + TESTING_MULTIPART("retrieval of an attribute's name with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_MORE)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or attribute aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", + ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_GROUP_NAME); + goto error; + } + + if ((space_id = generate_random_dataspace(ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_SPACE_RANK, NULL, NULL, + TRUE)) < 0) + TEST_ERROR; + + if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_ATTRIBUTE_NAME, attr_dtype, + space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_ATTRIBUTE_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + /* + * Allocate an actual buffer for the tests. + */ + + if ((name_buf_size = H5Aget_name(attr_id, 0, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve name buf size\n"); + goto error; + } + + if (NULL == (name_buf = (char *)HDmalloc((size_t)name_buf_size + 1))) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Aget_name_invalid_attr_id) + { + TESTING_2("H5Aget_name with an invalid attr_id"); + + H5E_BEGIN_TRY + { + name_buf_size = H5Aget_name(H5I_INVALID_HID, (size_t)name_buf_size + 1, name_buf); + } + H5E_END_TRY; + + if (name_buf_size >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute name using H5Aget_name with an invalid attr_id!\n"); + PART_ERROR(H5Aget_name_invalid_attr_id); + } + + PASSED(); + } + PART_END(H5Aget_name_invalid_attr_id); + + PART_BEGIN(H5Aget_name_invalid_name_buf) + { + TESTING_2("H5Aget_name with an invalid name buffer"); + + H5E_BEGIN_TRY + { + name_buf_size = 1; + name_buf_size = H5Aget_name(attr_id, (size_t)name_buf_size, NULL); + } + H5E_END_TRY; + + if (name_buf_size >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute name using H5Aget_name with an invalid name buffer!\n"); + PART_ERROR(H5Aget_name_invalid_name_buf); + } + + PASSED(); + } + PART_END(H5Aget_name_invalid_name_buf); + + PART_BEGIN(H5Aget_name_by_idx_invalid_loc_id) + { + TESTING_2("H5Aget_name_by_idx with an invalid loc_id"); + + H5E_BEGIN_TRY + { + name_buf_size = H5Aget_name_by_idx( + H5I_INVALID_HID, ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_INC, 0, name_buf, (size_t)name_buf_size + 1, H5P_DEFAULT); + } + H5E_END_TRY; + + if (name_buf_size >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute name using H5Aget_name_by_idx with an invalid loc_id!\n"); + PART_ERROR(H5Aget_name_by_idx_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Aget_name_by_idx_invalid_loc_id); + + PART_BEGIN(H5Aget_name_by_idx_invalid_obj_name) + { + TESTING_2("H5Aget_name_by_idx with an invalid object name"); + + H5E_BEGIN_TRY + { + name_buf_size = H5Aget_name_by_idx(container_group, NULL, H5_INDEX_NAME, H5_ITER_INC, 0, + name_buf, (size_t)name_buf_size + 1, H5P_DEFAULT); + } + H5E_END_TRY; + + if (name_buf_size >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute name using H5Aget_name_by_idx with a NULL object name!\n"); + PART_ERROR(H5Aget_name_by_idx_invalid_obj_name); + } + + H5E_BEGIN_TRY + { + name_buf_size = H5Aget_name_by_idx(container_group, "", H5_INDEX_NAME, H5_ITER_INC, 0, + name_buf, (size_t)name_buf_size + 1, H5P_DEFAULT); + } + H5E_END_TRY; + + if (name_buf_size >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute name using H5Aget_name_by_idx with an invalid object name " + "of ''!\n"); + PART_ERROR(H5Aget_name_by_idx_invalid_obj_name); + } + + PASSED(); + } + PART_END(H5Aget_name_by_idx_invalid_obj_name); + + PART_BEGIN(H5Aget_name_by_idx_invalid_index_type) + { + TESTING_2("H5Aget_name_by_idx with an invalid index type"); + + H5E_BEGIN_TRY + { + name_buf_size = H5Aget_name_by_idx( + container_group, ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_GROUP_NAME, H5_INDEX_UNKNOWN, + H5_ITER_INC, 0, name_buf, (size_t)name_buf_size + 1, H5P_DEFAULT); + } + H5E_END_TRY; + + if (name_buf_size >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute name using H5Aget_name_by_idx with invalid index type " + "H5_INDEX_UNKNOWN!\n"); + PART_ERROR(H5Aget_name_by_idx_invalid_index_type); + } + + H5E_BEGIN_TRY + { + name_buf_size = H5Aget_name_by_idx( + container_group, ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_GROUP_NAME, H5_INDEX_N, + H5_ITER_INC, 0, name_buf, (size_t)name_buf_size + 1, H5P_DEFAULT); + } + H5E_END_TRY; + + if (name_buf_size >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute name using H5Aget_name_by_idx with invalid index type " + "H5_INDEX_N!\n"); + PART_ERROR(H5Aget_name_by_idx_invalid_index_type); + } + + PASSED(); + } + PART_END(H5Aget_name_by_idx_invalid_index_type); + + PART_BEGIN(H5Aget_name_by_idx_invalid_iter_order) + { + TESTING_2("H5Aget_name_by_idx with an invalid iteration order"); + + H5E_BEGIN_TRY + { + name_buf_size = H5Aget_name_by_idx( + container_group, ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_UNKNOWN, 0, name_buf, (size_t)name_buf_size + 1, H5P_DEFAULT); + } + H5E_END_TRY; + + if (name_buf_size >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute name using H5Aget_name_by_idx with invalid iteration order " + "H5_ITER_UNKNOWN!\n"); + PART_ERROR(H5Aget_name_by_idx_invalid_iter_order); + } + + H5E_BEGIN_TRY + { + name_buf_size = H5Aget_name_by_idx( + container_group, ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_N, 0, name_buf, (size_t)name_buf_size + 1, H5P_DEFAULT); + } + H5E_END_TRY; + + if (name_buf_size >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute name using H5Aget_name_by_idx with invalid iteration order " + "H5_ITER_N!\n"); + PART_ERROR(H5Aget_name_by_idx_invalid_iter_order); + } + + PASSED(); + } + PART_END(H5Aget_name_by_idx_invalid_iter_order); + + PART_BEGIN(H5Aget_name_by_idx_invalid_name_buf) + { + TESTING_2("H5Aget_name_by_idx with an invalid name buffer"); + + H5E_BEGIN_TRY + { + name_buf_size = 1; + name_buf_size = H5Aget_name_by_idx( + container_group, ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_INC, 0, NULL, (size_t)name_buf_size, H5P_DEFAULT); + } + H5E_END_TRY; + + if (name_buf_size >= 0) { + H5_FAILED(); + HDprintf( + " retrieved attribute name using H5Aget_name_by_idx with an invalid name buffer!\n"); + PART_ERROR(H5Aget_name_by_idx_invalid_name_buf); + } + + PASSED(); + } + PART_END(H5Aget_name_by_idx_invalid_name_buf); + + PART_BEGIN(H5Aget_name_by_idx_invalid_lapl) + { + TESTING_2("H5Aget_name_by_idx with an invalid LAPL"); + + H5E_BEGIN_TRY + { + name_buf_size = H5Aget_name_by_idx( + container_group, ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_INC, 0, name_buf, (size_t)name_buf_size + 1, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (name_buf_size >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute name using H5Aget_name_by_idx with an invalid LAPL!\n"); + PART_ERROR(H5Aget_name_by_idx_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Aget_name_by_idx_invalid_lapl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (name_buf) { + HDfree(name_buf); + name_buf = NULL; + } + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype) < 0) + TEST_ERROR; + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (name_buf) + HDfree(name_buf); + H5Sclose(space_id); + H5Tclose(attr_dtype); + H5Aclose(attr_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test for H5Aget_storage_size. + */ +static int +test_get_attribute_storage_size(void) +{ + TESTING("H5Aget_storage_size"); + + SKIPPED(); + + return 0; +} + +/* + * A test to check the functionality of H5Aget_info(_by_idx). + */ +static int +test_get_attribute_info(void) +{ + H5A_info_t attr_info; + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t attr_dtype = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + + TESTING_MULTIPART("retrieval of attribute info"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, attribute, or creation order aren't supported " + "with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file\n"); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create GCPL for attribute creation order tracking\n"); + goto error; + } + + if (H5Pset_attr_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) { + H5_FAILED(); + HDprintf(" couldn't set attribute creation order tracking\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_GET_INFO_TEST_GROUP_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_GET_INFO_TEST_GROUP_NAME); + goto error; + } + + if ((space_id = generate_random_dataspace(ATTRIBUTE_GET_INFO_TEST_SPACE_RANK, NULL, NULL, TRUE)) < 0) + TEST_ERROR; + + if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + /* Create several attributes */ + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_GET_INFO_TEST_ATTR_NAME, attr_dtype, space_id, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME); + goto error; + } + + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_GET_INFO_TEST_ATTR_NAME2, attr_dtype, space_id, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME2); + goto error; + } + + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_GET_INFO_TEST_ATTR_NAME3, attr_dtype, space_id, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME3); + goto error; + } + + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + + /* Verify the attributes have been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_GET_INFO_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' did not exist\n", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME); + goto error; + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_GET_INFO_TEST_ATTR_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME2); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' did not exist\n", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME2); + goto error; + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_GET_INFO_TEST_ATTR_NAME3)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME3); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' did not exist\n", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME3); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Aget_info) + { + TESTING_2("H5Aget_info"); + + if ((attr_id = H5Aopen(group_id, ATTRIBUTE_GET_INFO_TEST_ATTR_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open attribute '%s'\n", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME); + PART_ERROR(H5Aget_info); + } + + HDmemset(&attr_info, 0, sizeof(attr_info)); + if (H5Aget_info(attr_id, &attr_info) < 0) { + H5_FAILED(); + HDprintf(" couldn't get attribute info\n"); + PART_ERROR(H5Aget_info); + } + + if (attr_info.corder_valid && (attr_info.corder != 0)) { + H5_FAILED(); + HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n", + (long long)attr_info.corder, (long long)0); + PART_ERROR(H5Aget_info); + } + + /* Ensure that the cset field is at least set to a meaningful value */ + if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 && + attr_info.cset != H5T_CSET_ERROR) { + H5_FAILED(); + HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n"); + PART_ERROR(H5Aget_info); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME); + PART_ERROR(H5Aget_info); + } + + PASSED(); + } + PART_END(H5Aget_info); + + H5E_BEGIN_TRY + { + H5Aclose(attr_id); + attr_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Aget_info_by_name) + { + TESTING_2("H5Aget_info_by_name"); + + HDmemset(&attr_info, 0, sizeof(attr_info)); + if (H5Aget_info_by_name(group_id, ".", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME, &attr_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get attribute info by name '%s'\n", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME); + PART_ERROR(H5Aget_info_by_name); + } + + if (attr_info.corder_valid && (attr_info.corder != 0)) { + H5_FAILED(); + HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n", + (long long)attr_info.corder, (long long)0); + PART_ERROR(H5Aget_info_by_name); + } + + /* Ensure that the cset field is at least set to a meaningful value */ + if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 && + attr_info.cset != H5T_CSET_ERROR) { + H5_FAILED(); + HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n"); + PART_ERROR(H5Aget_info_by_name); + } + + HDmemset(&attr_info, 0, sizeof(attr_info)); + if (H5Aget_info_by_name(group_id, ".", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME2, &attr_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get attribute info by name '%s'\n", + ATTRIBUTE_GET_INFO_TEST_ATTR_NAME2); + PART_ERROR(H5Aget_info_by_name); + } + + if (attr_info.corder_valid && (attr_info.corder != 1)) { + H5_FAILED(); + HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n", + (long long)attr_info.corder, (long long)1); + PART_ERROR(H5Aget_info_by_name); + } + + /* Ensure that the cset field is at least set to a meaningful value */ + if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 && + attr_info.cset != H5T_CSET_ERROR) { + H5_FAILED(); + HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n"); + PART_ERROR(H5Aget_info_by_name); + } + + HDmemset(&attr_info, 0, sizeof(attr_info)); + if (H5Aget_info_by_name(group_id, ".", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME3, &attr_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get attribute info by name '%s'\n", + ATTRIBUTE_GET_INFO_TEST_ATTR_NAME3); + PART_ERROR(H5Aget_info_by_name); + } + + if (attr_info.corder_valid && (attr_info.corder != 2)) { + H5_FAILED(); + HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n", + (long long)attr_info.corder, (long long)2); + PART_ERROR(H5Aget_info_by_name); + } + + /* Ensure that the cset field is at least set to a meaningful value */ + if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 && + attr_info.cset != H5T_CSET_ERROR) { + H5_FAILED(); + HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n"); + PART_ERROR(H5Aget_info_by_name); + } + + PASSED(); + } + PART_END(H5Aget_info_by_name); + + PART_BEGIN(H5Aget_info_by_idx_crt_order_increasing) + { + TESTING_2("H5Aget_info_by_idx by creation order in increasing order"); + + HDmemset(&attr_info, 0, sizeof(attr_info)); + if (H5Aget_info_by_idx(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, &attr_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get info for attribute at index %d using H5Aget_info_by_idx by " + "creation order in increasing order\n", + 0); + PART_ERROR(H5Aget_info_by_idx_crt_order_increasing); + } + + if (attr_info.corder_valid && (attr_info.corder != 0)) { + H5_FAILED(); + HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n", + (long long)attr_info.corder, (long long)0); + PART_ERROR(H5Aget_info_by_idx_crt_order_increasing); + } + + /* Ensure that the cset field is at least set to a meaningful value */ + if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 && + attr_info.cset != H5T_CSET_ERROR) { + H5_FAILED(); + HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n"); + PART_ERROR(H5Aget_info_by_idx_crt_order_increasing); + } + + HDmemset(&attr_info, 0, sizeof(attr_info)); + if (H5Aget_info_by_idx(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, &attr_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get info for attribute at index %d using H5Aget_info_by_idx by " + "creation order in increasing order\n", + 1); + PART_ERROR(H5Aget_info_by_idx_crt_order_increasing); + } + + if (attr_info.corder_valid && (attr_info.corder != 1)) { + H5_FAILED(); + HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n", + (long long)attr_info.corder, (long long)1); + PART_ERROR(H5Aget_info_by_idx_crt_order_increasing); + } + + /* Ensure that the cset field is at least set to a meaningful value */ + if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 && + attr_info.cset != H5T_CSET_ERROR) { + H5_FAILED(); + HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n"); + PART_ERROR(H5Aget_info_by_idx_crt_order_increasing); + } + + HDmemset(&attr_info, 0, sizeof(attr_info)); + if (H5Aget_info_by_idx(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, &attr_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get info for attribute at index %d using H5Aget_info_by_idx by " + "creation order in increasing order\n", + 2); + PART_ERROR(H5Aget_info_by_idx_crt_order_increasing); + } + + if (attr_info.corder_valid && (attr_info.corder != 2)) { + H5_FAILED(); + HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n", + (long long)attr_info.corder, (long long)2); + PART_ERROR(H5Aget_info_by_idx_crt_order_increasing); + } + + /* Ensure that the cset field is at least set to a meaningful value */ + if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 && + attr_info.cset != H5T_CSET_ERROR) { + H5_FAILED(); + HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n"); + PART_ERROR(H5Aget_info_by_idx_crt_order_increasing); + } + + PASSED(); + } + PART_END(H5Aget_info_by_idx_crt_order_increasing); + + PART_BEGIN(H5Aget_info_by_idx_crt_order_decreasing) + { + TESTING_2("H5Aget_info_by_idx by creation order in decreasing order"); + + HDmemset(&attr_info, 0, sizeof(attr_info)); + if (H5Aget_info_by_idx(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, &attr_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get info for attribute at index %d using H5Aget_info_by_idx by " + "creation order in decreasing order\n", + 2); + PART_ERROR(H5Aget_info_by_idx_crt_order_decreasing); + } + + if (attr_info.corder_valid && (attr_info.corder != 0)) { + H5_FAILED(); + HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n", + (long long)attr_info.corder, (long long)0); + PART_ERROR(H5Aget_info_by_idx_crt_order_decreasing); + } + + /* Ensure that the cset field is at least set to a meaningful value */ + if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 && + attr_info.cset != H5T_CSET_ERROR) { + H5_FAILED(); + HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n"); + PART_ERROR(H5Aget_info_by_idx_crt_order_decreasing); + } + + HDmemset(&attr_info, 0, sizeof(attr_info)); + if (H5Aget_info_by_idx(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, &attr_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get info for attribute at index %d using H5Aget_info_by_idx by " + "creation order in decreasing order\n", + 1); + PART_ERROR(H5Aget_info_by_idx_crt_order_decreasing); + } + + if (attr_info.corder_valid && (attr_info.corder != 1)) { + H5_FAILED(); + HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n", + (long long)attr_info.corder, (long long)1); + PART_ERROR(H5Aget_info_by_idx_crt_order_decreasing); + } + + /* Ensure that the cset field is at least set to a meaningful value */ + if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 && + attr_info.cset != H5T_CSET_ERROR) { + H5_FAILED(); + HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n"); + PART_ERROR(H5Aget_info_by_idx_crt_order_decreasing); + } + + HDmemset(&attr_info, 0, sizeof(attr_info)); + if (H5Aget_info_by_idx(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, &attr_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get info for attribute at index %d using H5Aget_info_by_idx by " + "creation order in decreasing order\n", + 0); + PART_ERROR(H5Aget_info_by_idx_crt_order_decreasing); + } + + if (attr_info.corder_valid && (attr_info.corder != 2)) { + H5_FAILED(); + HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n", + (long long)attr_info.corder, (long long)2); + PART_ERROR(H5Aget_info_by_idx_crt_order_decreasing); + } + + /* Ensure that the cset field is at least set to a meaningful value */ + if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 && + attr_info.cset != H5T_CSET_ERROR) { + H5_FAILED(); + HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n"); + PART_ERROR(H5Aget_info_by_idx_crt_order_decreasing); + } + + PASSED(); + } + PART_END(H5Aget_info_by_idx_crt_order_decreasing); + + PART_BEGIN(H5Aget_info_by_idx_name_order_increasing) + { + TESTING_2("H5Aget_info_by_idx by alphabetical order in increasing order"); + + HDmemset(&attr_info, 0, sizeof(attr_info)); + if (H5Aget_info_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, &attr_info, H5P_DEFAULT) < + 0) { + H5_FAILED(); + HDprintf(" couldn't get info for attribute at index %d using H5Aget_info_by_idx by " + "alphabetical order in increasing order\n", + 0); + PART_ERROR(H5Aget_info_by_idx_name_order_increasing); + } + + if (attr_info.corder_valid && (attr_info.corder != 0)) { + H5_FAILED(); + HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n", + (long long)attr_info.corder, (long long)0); + PART_ERROR(H5Aget_info_by_idx_name_order_increasing); + } + + /* Ensure that the cset field is at least set to a meaningful value */ + if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 && + attr_info.cset != H5T_CSET_ERROR) { + H5_FAILED(); + HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n"); + PART_ERROR(H5Aget_info_by_idx_name_order_increasing); + } + + HDmemset(&attr_info, 0, sizeof(attr_info)); + if (H5Aget_info_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1, &attr_info, H5P_DEFAULT) < + 0) { + H5_FAILED(); + HDprintf(" couldn't get info for attribute at index %d using H5Aget_info_by_idx by " + "alphabetical order in increasing order\n", + 1); + PART_ERROR(H5Aget_info_by_idx_name_order_increasing); + } + + if (attr_info.corder_valid && (attr_info.corder != 1)) { + H5_FAILED(); + HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n", + (long long)attr_info.corder, (long long)1); + PART_ERROR(H5Aget_info_by_idx_name_order_increasing); + } + + /* Ensure that the cset field is at least set to a meaningful value */ + if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 && + attr_info.cset != H5T_CSET_ERROR) { + H5_FAILED(); + HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n"); + PART_ERROR(H5Aget_info_by_idx_name_order_increasing); + } + + HDmemset(&attr_info, 0, sizeof(attr_info)); + if (H5Aget_info_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2, &attr_info, H5P_DEFAULT) < + 0) { + H5_FAILED(); + HDprintf(" couldn't get info for attribute at index %d using H5Aget_info_by_idx by " + "alphabetical order in increasing order\n", + 2); + PART_ERROR(H5Aget_info_by_idx_name_order_increasing); + } + + if (attr_info.corder_valid && (attr_info.corder != 2)) { + H5_FAILED(); + HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n", + (long long)attr_info.corder, (long long)2); + PART_ERROR(H5Aget_info_by_idx_name_order_increasing); + } + + /* Ensure that the cset field is at least set to a meaningful value */ + if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 && + attr_info.cset != H5T_CSET_ERROR) { + H5_FAILED(); + HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n"); + PART_ERROR(H5Aget_info_by_idx_name_order_increasing); + } + + PASSED(); + } + PART_END(H5Aget_info_by_idx_name_order_increasing); + + PART_BEGIN(H5Aget_info_by_idx_name_order_decreasing) + { + TESTING_2("H5Aget_info_by_idx by alphabetical order in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + HDmemset(&attr_info, 0, sizeof(attr_info)); + if (H5Aget_info_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, &attr_info, H5P_DEFAULT) < + 0) { + H5_FAILED(); + HDprintf(" couldn't get info for attribute at index %lld using H5Aget_info_by_idx by " + "alphabetical order in decreasing order\n", + 2); + PART_ERROR(H5Aget_info_by_idx_name_order_decreasing); + } + + if (attr_info.corder_valid && (attr_info.corder != 0)) { + H5_FAILED(); + HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n", + (long long)attr_info.corder, (long long)0); + PART_ERROR(H5Aget_info_by_idx_name_order_decreasing); + } + + /* Ensure that the cset field is at least set to a meaningful value */ + if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 && + attr_info.cset != H5T_CSET_ERROR) { + H5_FAILED(); + HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n"); + PART_ERROR(H5Aget_info_by_idx_name_order_decreasing); + } + + HDmemset(&attr_info, 0, sizeof(attr_info)); + if (H5Aget_info_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, &attr_info, H5P_DEFAULT) < + 0) { + H5_FAILED(); + HDprintf(" couldn't get info for attribute at index %lld using H5Aget_info_by_idx by " + "alphabetical order in decreasing order\n", + 1); + PART_ERROR(H5Aget_info_by_idx_name_order_decreasing); + } + + if (attr_info.corder_valid && (attr_info.corder != 1)) { + H5_FAILED(); + HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n", + (long long)attr_info.corder, (long long)1); + PART_ERROR(H5Aget_info_by_idx_name_order_decreasing); + } + + /* Ensure that the cset field is at least set to a meaningful value */ + if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 && + attr_info.cset != H5T_CSET_ERROR) { + H5_FAILED(); + HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n"); + PART_ERROR(H5Aget_info_by_idx_name_order_decreasing); + } + + HDmemset(&attr_info, 0, sizeof(attr_info)); + if (H5Aget_info_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, &attr_info, H5P_DEFAULT) < + 0) { + H5_FAILED(); + HDprintf(" couldn't get info for attribute at index %lld using H5Aget_info_by_idx by " + "alphabetical order in decreasing order\n", + 0); + PART_ERROR(H5Aget_info_by_idx_name_order_decreasing); + } + + if (attr_info.corder_valid && (attr_info.corder != 2)) { + H5_FAILED(); + HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n", + (long long)attr_info.corder, (long long)2); + PART_ERROR(H5Aget_info_by_idx_name_order_decreasing); + } + + /* Ensure that the cset field is at least set to a meaningful value */ + if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 && + attr_info.cset != H5T_CSET_ERROR) { + H5_FAILED(); + HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n"); + PART_ERROR(H5Aget_info_by_idx_name_order_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Aget_info_by_idx_name_order_decreasing); +#endif + } + PART_END(H5Aget_info_by_idx_name_order_decreasing); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype) < 0) + TEST_ERROR; + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Tclose(attr_dtype); + H5Aclose(attr_id); + H5Pclose(gcpl_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that H5Aget_info(_by_name/_by_idx) + * doesn't succeed when passed invalid parameters. + */ +static int +test_get_attribute_info_invalid_params(void) +{ + H5A_info_t attr_info; + htri_t attr_exists; + herr_t err_ret = -1; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t attr_dtype = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + + TESTING_MULTIPART("retrieval of attribute info with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_MORE)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or attribute aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_GET_INFO_INVALID_PARAMS_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", + ATTRIBUTE_GET_INFO_INVALID_PARAMS_TEST_GROUP_NAME); + goto error; + } + + if ((space_id = generate_random_dataspace(ATTRIBUTE_GET_INFO_INVALID_PARAMS_TEST_SPACE_RANK, NULL, NULL, + TRUE)) < 0) + TEST_ERROR; + + if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_GET_INFO_INVALID_PARAMS_TEST_ATTR_NAME, attr_dtype, + space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_GET_INFO_INVALID_PARAMS_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Aget_info_invalid_attr_id) + { + TESTING_2("H5Aget_info with an invalid attr_id"); + + H5E_BEGIN_TRY + { + err_ret = H5Aget_info(H5I_INVALID_HID, &attr_info); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute info using H5Aget_info with an invalid attr_id!\n"); + PART_ERROR(H5Aget_info_invalid_attr_id); + } + + PASSED(); + } + PART_END(H5Aget_info_invalid_attr_id); + + PART_BEGIN(H5Aget_info_invalid_attr_info_pointer) + { + TESTING_2("H5Aget_info with an invalid attribute info pointer"); + + H5E_BEGIN_TRY + { + err_ret = H5Aget_info(attr_id, NULL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute info using H5Aget_info with an invalid attr_id!\n"); + PART_ERROR(H5Aget_info_invalid_attr_info_pointer); + } + + PASSED(); + } + PART_END(H5Aget_info_invalid_attr_info_pointer); + + PART_BEGIN(H5Aget_info_by_name_invalid_loc_id) + { + TESTING_2("H5Aget_info_by_name with an invalid loc_id"); + + H5E_BEGIN_TRY + { + err_ret = H5Aget_info_by_name(H5I_INVALID_HID, ".", + ATTRIBUTE_GET_INFO_INVALID_PARAMS_TEST_ATTR_NAME, &attr_info, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute info using H5Aget_info_by_name with an invalid loc_id!\n"); + PART_ERROR(H5Aget_info_by_name_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Aget_info_by_name_invalid_loc_id); + + PART_BEGIN(H5Aget_info_by_name_invalid_obj_name) + { + TESTING_2("H5Aget_info_by_name with an invalid object name"); + + H5E_BEGIN_TRY + { + err_ret = + H5Aget_info_by_name(group_id, NULL, ATTRIBUTE_GET_INFO_INVALID_PARAMS_TEST_ATTR_NAME, + &attr_info, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute info using H5Aget_info_by_name with a NULL object name!\n"); + PART_ERROR(H5Aget_info_by_name_invalid_obj_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Aget_info_by_name(group_id, "", ATTRIBUTE_GET_INFO_INVALID_PARAMS_TEST_ATTR_NAME, + &attr_info, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute info using H5Aget_info_by_name with an invalid object name " + "of ''!\n"); + PART_ERROR(H5Aget_info_by_name_invalid_obj_name); + } + + PASSED(); + } + PART_END(H5Aget_info_by_name_invalid_obj_name); + + PART_BEGIN(H5Aget_info_by_name_invalid_attr_name) + { + TESTING_2("H5Aget_info_by_name with an invalid attribute name"); + + H5E_BEGIN_TRY + { + err_ret = H5Aget_info_by_name(group_id, ".", NULL, &attr_info, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf( + " retrieved attribute info using H5Aget_info_by_name with a NULL attribute name!\n"); + PART_ERROR(H5Aget_info_by_name_invalid_attr_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Aget_info_by_name(group_id, ".", "", &attr_info, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute info using H5Aget_info_by_name with an invalid attribute " + "name of ''!\n"); + PART_ERROR(H5Aget_info_by_name_invalid_attr_name); + } + + PASSED(); + } + PART_END(H5Aget_info_by_name_invalid_attr_name); + + PART_BEGIN(H5Aget_info_by_name_invalid_attr_info_pointer) + { + TESTING_2("H5Aget_info_by_name with an invalid attribute info pointer"); + + H5E_BEGIN_TRY + { + err_ret = H5Aget_info_by_name(group_id, ".", ATTRIBUTE_GET_INFO_INVALID_PARAMS_TEST_ATTR_NAME, + NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute info using H5Aget_info_by_name with an invalid attribute " + "info pointer!\n"); + PART_ERROR(H5Aget_info_by_name_invalid_attr_info_pointer); + } + + PASSED(); + } + PART_END(H5Aget_info_by_name_invalid_attr_info_pointer); + + PART_BEGIN(H5Aget_info_by_name_invalid_lapl) + { + TESTING_2("H5Aget_info_by_name with an invalid LAPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Aget_info_by_name(group_id, ".", ATTRIBUTE_GET_INFO_INVALID_PARAMS_TEST_ATTR_NAME, + &attr_info, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute info using H5Aget_info_by_name with an invalid LAPL!\n"); + PART_ERROR(H5Aget_info_by_name_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Aget_info_by_name_invalid_lapl); + + PART_BEGIN(H5Aget_info_by_idx_invalid_loc_id) + { + TESTING_2("H5Aget_info_by_idx with an invalid loc_id"); + + H5E_BEGIN_TRY + { + err_ret = H5Aget_info_by_idx(H5I_INVALID_HID, ".", H5_INDEX_NAME, H5_ITER_INC, 0, &attr_info, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute info using H5Aget_info_by_idx with an invalid loc_id!\n"); + PART_ERROR(H5Aget_info_by_idx_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Aget_info_by_idx_invalid_loc_id); + + PART_BEGIN(H5Aget_info_by_idx_invalid_obj_name) + { + TESTING_2("H5Aget_info_by_idx with an invalid object name"); + + H5E_BEGIN_TRY + { + err_ret = H5Aget_info_by_idx(group_id, NULL, H5_INDEX_NAME, H5_ITER_INC, 0, &attr_info, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute info using H5Aget_info_by_idx with a NULL object name!\n"); + PART_ERROR(H5Aget_info_by_idx_invalid_obj_name); + } + + H5E_BEGIN_TRY + { + err_ret = + H5Aget_info_by_idx(group_id, "", H5_INDEX_NAME, H5_ITER_INC, 0, &attr_info, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute info using H5Aget_info_by_idx with an invalid object name " + "of ''!\n"); + PART_ERROR(H5Aget_info_by_idx_invalid_obj_name); + } + + PASSED(); + } + PART_END(H5Aget_info_by_idx_invalid_obj_name); + + PART_BEGIN(H5Aget_info_by_idx_invalid_index_type) + { + TESTING_2("H5Aget_info_by_idx with an invalid index type"); + + H5E_BEGIN_TRY + { + err_ret = H5Aget_info_by_idx(group_id, ".", H5_INDEX_UNKNOWN, H5_ITER_INC, 0, &attr_info, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute info using H5Aget_info_by_idx with invalid index type " + "H5_INDEX_UNKNOWN!\n"); + PART_ERROR(H5Aget_info_by_idx_invalid_index_type); + } + + H5E_BEGIN_TRY + { + err_ret = + H5Aget_info_by_idx(group_id, ".", H5_INDEX_N, H5_ITER_INC, 0, &attr_info, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute info using H5Aget_info_by_idx with invalid index type " + "H5_INDEX_N!\n"); + PART_ERROR(H5Aget_info_by_idx_invalid_index_type); + } + + PASSED(); + } + PART_END(H5Aget_info_by_idx_invalid_index_type); + + PART_BEGIN(H5Aget_info_by_idx_invalid_iter_order) + { + TESTING_2("H5Aget_info_by_idx with an invalid iteration order"); + + H5E_BEGIN_TRY + { + err_ret = H5Aget_info_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_UNKNOWN, 0, &attr_info, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute info using H5Aget_info_by_idx with invalid iteration order " + "H5_ITER_UNKNOWN!\n"); + PART_ERROR(H5Aget_info_by_idx_invalid_iter_order); + } + + H5E_BEGIN_TRY + { + err_ret = + H5Aget_info_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_N, 0, &attr_info, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute info using H5Aget_info_by_idx with invalid iteration order " + "H5_ITER_N!\n"); + PART_ERROR(H5Aget_info_by_idx_invalid_iter_order); + } + + PASSED(); + } + PART_END(H5Aget_info_by_idx_invalid_iter_order); + + PART_BEGIN(H5Aget_info_by_idx_invalid_attr_info_pointer) + { + TESTING_2("H5Aget_info_by_idx with an invalid attribute info pointer"); + + H5E_BEGIN_TRY + { + err_ret = H5Aget_info_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute info using H5Aget_info_by_idx with an invalid attribute " + "info pointer!\n"); + PART_ERROR(H5Aget_info_by_idx_invalid_attr_info_pointer); + } + + PASSED(); + } + PART_END(H5Aget_info_by_idx_invalid_attr_info_pointer); + + PART_BEGIN(H5Aget_info_by_idx_invalid_lapl) + { + TESTING_2("H5Aget_info_by_idx with an invalid LAPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Aget_info_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, &attr_info, + H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved attribute info using H5Aget_info_by_idx with an invalid LAPL!\n"); + PART_ERROR(H5Aget_info_by_idx_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Aget_info_by_idx_invalid_lapl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype) < 0) + TEST_ERROR; + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Tclose(attr_dtype); + H5Aclose(attr_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that an attribute can be renamed + * with H5Arename and H5Arename_by_name. + */ +static int +test_rename_attribute(void) +{ + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID, attr_id2 = H5I_INVALID_HID; + hid_t attr_dtype = H5I_INVALID_HID; + hid_t attr_space_id = H5I_INVALID_HID; + + TESTING_MULTIPART("attribute renaming"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_MORE)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or attribute aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_RENAME_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_RENAME_TEST_GROUP_NAME); + goto error; + } + + if ((attr_space_id = generate_random_dataspace(ATTRIBUTE_RENAME_TEST_SPACE_RANK, NULL, NULL, TRUE)) < 0) + TEST_ERROR; + + if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_RENAME_TEST_ATTR_NAME, attr_dtype, attr_space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + if ((attr_id2 = H5Acreate2(group_id, ATTRIBUTE_RENAME_TEST_ATTR_NAME2, attr_dtype, attr_space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + /* Verify the attributes have been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_RENAME_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_RENAME_TEST_ATTR_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Arename) + { + TESTING_2("H5Arename"); + + if (H5Arename(group_id, ATTRIBUTE_RENAME_TEST_ATTR_NAME, ATTRIBUTE_RENAME_TEST_NEW_NAME) < 0) { + H5_FAILED(); + HDprintf(" couldn't rename attribute '%s' to '%s' using H5Arename\n", + ATTRIBUTE_RENAME_TEST_ATTR_NAME, ATTRIBUTE_RENAME_TEST_NEW_NAME); + PART_ERROR(H5Arename); + } + + /* Verify the attribute has been renamed */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_RENAME_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + PART_ERROR(H5Arename); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not get renamed!\n"); + PART_ERROR(H5Arename); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_RENAME_TEST_NEW_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + PART_ERROR(H5Arename); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not get renamed!\n"); + PART_ERROR(H5Arename); + } + + PASSED(); + } + PART_END(H5Arename); + + PART_BEGIN(H5Arename_by_name) + { + TESTING_2("H5Arename_by_name"); + + if (H5Arename_by_name(container_group, ATTRIBUTE_RENAME_TEST_GROUP_NAME, + ATTRIBUTE_RENAME_TEST_ATTR_NAME2, ATTRIBUTE_RENAME_TEST_NEW_NAME2, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't rename attribute '%s' to '%s' using H5Arename_by_name\n", + ATTRIBUTE_RENAME_TEST_ATTR_NAME2, ATTRIBUTE_RENAME_TEST_NEW_NAME2); + PART_ERROR(H5Arename_by_name); + } + + /* Verify the attribute has been renamed */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_RENAME_TEST_ATTR_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + PART_ERROR(H5Arename_by_name); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not get renamed!\n"); + PART_ERROR(H5Arename_by_name); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_RENAME_TEST_NEW_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + PART_ERROR(H5Arename_by_name); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not get renamed!\n"); + PART_ERROR(H5Arename_by_name); + } + + PASSED(); + } + PART_END(H5Arename_by_name); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(attr_space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype) < 0) + TEST_ERROR; + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + if (H5Aclose(attr_id2) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(attr_space_id); + H5Tclose(attr_dtype); + H5Aclose(attr_id); + H5Aclose(attr_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that an attribute can't be renamed + * when H5Arename(_by_name) is passed invalid parameters. + */ +static int +test_rename_attribute_invalid_params(void) +{ + htri_t attr_exists; + herr_t err_ret = -1; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID, attr_id2 = H5I_INVALID_HID; + hid_t attr_dtype = H5I_INVALID_HID; + hid_t attr_space_id = H5I_INVALID_HID; + + TESTING_MULTIPART("attribute renaming with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_MORE)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or attribute aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", + ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_GROUP_NAME); + goto error; + } + + if ((attr_space_id = generate_random_dataspace(ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_SPACE_RANK, NULL, + NULL, TRUE)) < 0) + TEST_ERROR; + + if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME, attr_dtype, + attr_space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + if ((attr_id2 = H5Acreate2(group_id, ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME2, attr_dtype, + attr_space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + /* Verify the attributes have been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Arename_invalid_loc_id) + { + TESTING_2("H5Arename with an invalid loc_id"); + + H5E_BEGIN_TRY + { + err_ret = H5Arename(H5I_INVALID_HID, ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME, + ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_NEW_NAME); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" renamed attribute using H5Arename with an invalid loc_id!\n"); + PART_ERROR(H5Arename_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Arename_invalid_loc_id); + + PART_BEGIN(H5Arename_invalid_old_attr_name) + { + TESTING_2("H5Arename with an invalid old attribute name"); + + H5E_BEGIN_TRY + { + err_ret = H5Arename(group_id, NULL, ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_NEW_NAME); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" renamed attribute using H5Arename with a NULL old attribute name!\n"); + PART_ERROR(H5Arename_invalid_old_attr_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Arename(group_id, "", ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_NEW_NAME); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" renamed attribute using H5Arename with an invalid old attribute name of ''!\n"); + PART_ERROR(H5Arename_invalid_old_attr_name); + } + + PASSED(); + } + PART_END(H5Arename_invalid_old_attr_name); + + PART_BEGIN(H5Arename_invalid_new_attr_name) + { + TESTING_2("H5Arename with an invalid new attribute name"); + + H5E_BEGIN_TRY + { + err_ret = H5Arename(group_id, ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME, NULL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" renamed attribute using H5Arename with a NULL new attribute name!\n"); + PART_ERROR(H5Arename_invalid_new_attr_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Arename(group_id, ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME, ""); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" renamed attribute using H5Arename with an invalid new attribute name of ''!\n"); + PART_ERROR(H5Arename_invalid_new_attr_name); + } + + PASSED(); + } + PART_END(H5Arename_invalid_new_attr_name); + + PART_BEGIN(H5Arename_by_name_invalid_loc_id) + { + TESTING_2("H5Arename_by_name with an invalid loc_id"); + + H5E_BEGIN_TRY + { + err_ret = + H5Arename_by_name(H5I_INVALID_HID, ".", ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME, + ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_NEW_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" renamed attribute using H5Arename_by_name with an invalid loc_id!\n"); + PART_ERROR(H5Arename_by_name_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Arename_by_name_invalid_loc_id); + + PART_BEGIN(H5Arename_by_name_invalid_obj_name) + { + TESTING_2("H5Arename_by_name with an invalid object name"); + + H5E_BEGIN_TRY + { + err_ret = H5Arename_by_name(group_id, NULL, ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME, + ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_NEW_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" renamed attribute using H5Arename_by_name with a NULL object name!\n"); + PART_ERROR(H5Arename_by_name_invalid_obj_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Arename_by_name(group_id, "", ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME, + ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_NEW_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf( + " renamed attribute using H5Arename_by_name with an invalid object name of ''!\n"); + PART_ERROR(H5Arename_by_name_invalid_obj_name); + } + + PASSED(); + } + PART_END(H5Arename_by_name_invalid_obj_name); + + PART_BEGIN(H5Arename_by_name_invalid_old_attr_name) + { + TESTING_2("H5Arename_by_name with an invalid old attribute name"); + + H5E_BEGIN_TRY + { + err_ret = H5Arename_by_name(group_id, ".", NULL, + ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_NEW_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" renamed attribute using H5Arename_by_name with a NULL old attribute name!\n"); + PART_ERROR(H5Arename_by_name_invalid_old_attr_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Arename_by_name(group_id, ".", "", ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_NEW_NAME, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" renamed attribute using H5Arename_by_name with an invalid old attribute name " + "of ''!\n"); + PART_ERROR(H5Arename_by_name_invalid_old_attr_name); + } + + PASSED(); + } + PART_END(H5Arename_by_name_invalid_old_attr_name); + + PART_BEGIN(H5Arename_by_name_invalid_new_attr_name) + { + TESTING_2("H5Arename_by_name with an invalid new attribute name"); + + H5E_BEGIN_TRY + { + err_ret = H5Arename_by_name(group_id, ".", ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME, + NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" renamed attribute using H5Arename_by_name with a NULL new attribute name!\n"); + PART_ERROR(H5Arename_by_name_invalid_new_attr_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Arename_by_name(group_id, ".", ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME, "", + H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" renamed attribute using H5Arename_by_name with an invalid new attribute name " + "of ''!\n"); + PART_ERROR(H5Arename_by_name_invalid_new_attr_name); + } + + PASSED(); + } + PART_END(H5Arename_by_name_invalid_new_attr_name); + + PART_BEGIN(H5Arename_by_name_invalid_lapl) + { + TESTING_2("H5Arename_by_name with an invalid LAPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Arename_by_name(group_id, ".", ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME, + ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_NEW_NAME, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" renamed attribute using H5Arename_by_name with an invalid LAPL!\n"); + PART_ERROR(H5Arename_by_name_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Arename_by_name_invalid_lapl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(attr_space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype) < 0) + TEST_ERROR; + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + if (H5Aclose(attr_id2) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(attr_space_id); + H5Tclose(attr_dtype); + H5Aclose(attr_id); + H5Aclose(attr_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check the functionality of attribute + * iteration using H5Aiterate(_by_name) on a group. + * Iteration is done in increasing and decreasing + * order of both attribute name and attribute + * creation order. + */ +static int +test_attribute_iterate_group(void) +{ + size_t link_counter; + size_t i; + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t attr_dtype = H5I_INVALID_HID; + hid_t attr_space_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + + TESTING_MULTIPART("attribute iteration on a group"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, attribute, iterate, or creation order aren't " + "supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create GCPL for attribute creation order tracking\n"); + goto error; + } + + if (H5Pset_attr_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) { + H5_FAILED(); + HDprintf(" couldn't set attribute creation order tracking\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_ITERATE_TEST_GRP_SUBGROUP_NAME, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", ATTRIBUTE_ITERATE_TEST_GRP_SUBGROUP_NAME); + goto error; + } + + if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + if ((attr_space_id = + generate_random_dataspace(ATTRIBUTE_ITERATE_TEST_ATTR_SPACE_RANK, NULL, NULL, TRUE)) < 0) + TEST_ERROR; + + /* Create some attributes with a reverse-ordering naming scheme to test creation order */ + for (i = 0; i < ATTRIBUTE_ITERATE_TEST_NUM_ATTRS; i++) { + char attr_name[ATTRIBUTE_ITERATE_TEST_ATTR_NAME_BUF_SIZE]; + + HDsnprintf(attr_name, ATTRIBUTE_ITERATE_TEST_ATTR_NAME_BUF_SIZE, + ATTRIBUTE_ITERATE_TEST_ATTR_NAME "%d", (int)(ATTRIBUTE_ITERATE_TEST_NUM_ATTRS - i - 1)); + + if ((attr_id = H5Acreate2(group_id, attr_name, attr_dtype, attr_space_id, H5P_DEFAULT, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", attr_name); + goto error; + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(group_id, attr_name)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", attr_name); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' did not exist\n", attr_name); + goto error; + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close attribute '%s'\n", attr_name); + goto error; + } + } + + PASSED(); + + BEGIN_MULTIPART + { + /* + * NOTE: A counter is passed to the iteration callback to try to match up the + * expected attributes with a given step throughout all of the following + * iterations. Since the only information we can count on in the attribute + * iteration callback is the attribute's name, we need some other way of + * ensuring that the attributes are coming back in the correct order. + */ + + PART_BEGIN(H5Aiterate2_name_increasing) + { + TESTING_2("H5Aiterate by attribute name in increasing order"); + + link_counter = 0; + + /* Test basic attribute iteration capability using both index types and both index orders */ + if (H5Aiterate2(group_id, H5_INDEX_NAME, H5_ITER_INC, NULL, attr_iter_callback1, &link_counter) < + 0) { + H5_FAILED(); + HDprintf(" H5Aiterate2 by index type name in increasing order failed\n"); + PART_ERROR(H5Aiterate2_name_increasing); + } + + /* Make sure that the attribute iteration callback was actually called */ + if (link_counter == 0) { + H5_FAILED(); + HDprintf(" H5Aiterate sentinel value is unchanged; supplied callback function must not " + "have been called!\n"); + PART_ERROR(H5Aiterate2_name_increasing); + } + + PASSED(); + } + PART_END(H5Aiterate2_name_increasing); + + PART_BEGIN(H5Aiterate2_name_decreasing) + { + TESTING_2("H5Aiterate by attribute name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + link_counter = ATTRIBUTE_ITERATE_TEST_NUM_ATTRS; + + if (H5Aiterate2(group_id, H5_INDEX_NAME, H5_ITER_DEC, NULL, attr_iter_callback1, &link_counter) < + 0) { + H5_FAILED(); + HDprintf(" H5Aiterate2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Aiterate2_name_decreasing); + } + + /* Make sure that the attribute iteration callback was actually called */ + if (link_counter == ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) { + H5_FAILED(); + HDprintf(" H5Aiterate sentinel value is unchanged; supplied callback function must not " + "have been called!\n"); + PART_ERROR(H5Aiterate2_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Aiterate2_name_decreasing); +#endif + } + PART_END(H5Aiterate2_name_decreasing); + + PART_BEGIN(H5Aiterate2_creation_increasing) + { + TESTING_2("H5Aiterate by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + link_counter = 2 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS; + + if (H5Aiterate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, attr_iter_callback1, + &link_counter) < 0) { + H5_FAILED(); + HDprintf(" H5Aiterate2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Aiterate2_creation_increasing); + } + + /* Make sure that the attribute iteration callback was actually called */ + if (link_counter == 2 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) { + H5_FAILED(); + HDprintf(" H5Aiterate sentinel value is unchanged; supplied callback function must not " + "have been called!\n"); + PART_ERROR(H5Aiterate2_creation_increasing); + } + + PASSED(); + } + PART_END(H5Aiterate2_creation_increasing); + + PART_BEGIN(H5Aiterate2_creation_decreasing) + { + TESTING_2("H5Aiterate by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + link_counter = 3 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS; + + if (H5Aiterate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, attr_iter_callback1, + &link_counter) < 0) { + H5_FAILED(); + HDprintf(" H5Aiterate2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Aiterate2_creation_decreasing); + } + + /* Make sure that the attribute iteration callback was actually called */ + if (link_counter == 3 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) { + H5_FAILED(); + HDprintf(" H5Aiterate sentinel value is unchanged; supplied callback function must not " + "have been called!\n"); + PART_ERROR(H5Aiterate2_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Aiterate2_creation_decreasing); + + PART_BEGIN(H5Aiterate_by_name_name_increasing) + { + TESTING_2("H5Aiterate_by_name by attribute name in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + link_counter = 0; + + if (H5Aiterate_by_name( + file_id, "/" ATTRIBUTE_TEST_GROUP_NAME "/" ATTRIBUTE_ITERATE_TEST_GRP_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_INC, NULL, attr_iter_callback1, &link_counter, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name by index type name in increasing order failed\n"); + PART_ERROR(H5Aiterate_by_name_name_increasing); + } + + /* Make sure that the attribute iteration callback was actually called */ + if (link_counter == 0) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name sentinel value is unchanged; supplied callback function " + "must not have been called!\n"); + PART_ERROR(H5Aiterate_by_name_name_increasing); + } + + PASSED(); + } + PART_END(H5Aiterate_by_name_name_increasing); + + PART_BEGIN(H5Aiterate_by_name_name_decreasing) + { + TESTING_2("H5Aiterate_by_name by attribute name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + link_counter = ATTRIBUTE_ITERATE_TEST_NUM_ATTRS; + + if (H5Aiterate_by_name( + file_id, "/" ATTRIBUTE_TEST_GROUP_NAME "/" ATTRIBUTE_ITERATE_TEST_GRP_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_DEC, NULL, attr_iter_callback1, &link_counter, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name by index type name in decreasing order failed\n"); + PART_ERROR(H5Aiterate_by_name_name_decreasing); + } + + /* Make sure that the attribute iteration callback was actually called */ + if (link_counter == ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name sentinel value is unchanged; supplied callback function " + "must not have been called!\n"); + PART_ERROR(H5Aiterate_by_name_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Aiterate_by_name_name_decreasing); +#endif + } + PART_END(H5Aiterate_by_name_name_decreasing); + + PART_BEGIN(H5Aiterate_by_name_creation_increasing) + { + TESTING_2("H5Aiterate_by_name by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + link_counter = 2 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS; + + if (H5Aiterate_by_name(file_id, + "/" ATTRIBUTE_TEST_GROUP_NAME "/" ATTRIBUTE_ITERATE_TEST_GRP_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, attr_iter_callback1, &link_counter, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name by index type creation order in increasing order failed\n"); + PART_ERROR(H5Aiterate_by_name_creation_increasing); + } + + /* Make sure that the attribute iteration callback was actually called */ + if (link_counter == 2 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name sentinel value is unchanged; supplied callback function " + "must not have been called!\n"); + PART_ERROR(H5Aiterate_by_name_creation_increasing); + } + + PASSED(); + } + PART_END(H5Aiterate_by_name_creation_increasing); + + PART_BEGIN(H5Aiterate_by_name_creation_decreasing) + { + TESTING_2("H5Aiterate_by_name by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + link_counter = 3 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS; + + if (H5Aiterate_by_name(file_id, + "/" ATTRIBUTE_TEST_GROUP_NAME "/" ATTRIBUTE_ITERATE_TEST_GRP_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, attr_iter_callback1, &link_counter, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Aiterate_by_name_creation_decreasing); + } + + /* Make sure that the attribute iteration callback was actually called */ + if (link_counter == 3 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name sentinel value is unchanged; supplied callback function " + "must not have been called!\n"); + PART_ERROR(H5Aiterate_by_name_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Aiterate_by_name_creation_decreasing); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(attr_space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype) < 0) + TEST_ERROR; + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(attr_space_id); + H5Tclose(attr_dtype); + H5Aclose(attr_id); + H5Pclose(gcpl_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check the functionality of attribute + * iteration using H5Aiterate(_by_name) on a dataset. + * Iteration is done in increasing and decreasing + * order of both attribute name and attribute + * creation order. + */ +static int +test_attribute_iterate_dataset(void) +{ + size_t link_counter; + size_t i; + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t attr_dtype = H5I_INVALID_HID; + hid_t dset_space_id = H5I_INVALID_HID; + hid_t attr_space_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + + TESTING_MULTIPART("attribute iteration on a dataset"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, attribute, iterate, or creation order " + "aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_ITERATE_TEST_DSET_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", ATTRIBUTE_ITERATE_TEST_DSET_SUBGROUP_NAME); + goto error; + } + + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create DCPL for attribute creation order tracking\n"); + goto error; + } + + if (H5Pset_attr_creation_order(dcpl_id, H5P_CRT_ORDER_TRACKED) < 0) { + H5_FAILED(); + HDprintf(" couldn't set attribute creation order tracking\n"); + goto error; + } + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + if ((dset_space_id = + generate_random_dataspace(ATTRIBUTE_ITERATE_TEST_DSET_SPACE_RANK, NULL, NULL, FALSE)) < 0) + TEST_ERROR; + if ((attr_space_id = + generate_random_dataspace(ATTRIBUTE_ITERATE_TEST_ATTR_SPACE_RANK, NULL, NULL, TRUE)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, ATTRIBUTE_ITERATE_TEST_DSET_NAME, dset_dtype, dset_space_id, + H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", ATTRIBUTE_ITERATE_TEST_DSET_NAME); + goto error; + } + + /* Create some attributes with a reverse-ordering naming scheme to test creation order */ + for (i = 0; i < ATTRIBUTE_ITERATE_TEST_NUM_ATTRS; i++) { + char attr_name[ATTRIBUTE_ITERATE_TEST_ATTR_NAME_BUF_SIZE]; + + HDsnprintf(attr_name, ATTRIBUTE_ITERATE_TEST_ATTR_NAME_BUF_SIZE, + ATTRIBUTE_ITERATE_TEST_ATTR_NAME "%d", (int)(ATTRIBUTE_ITERATE_TEST_NUM_ATTRS - i - 1)); + + if ((attr_id = H5Acreate2(dset_id, attr_name, attr_dtype, attr_space_id, H5P_DEFAULT, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", attr_name); + goto error; + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(dset_id, attr_name)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", attr_name); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' did not exist\n", attr_name); + goto error; + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close attribute '%s'\n", attr_name); + goto error; + } + } + + PASSED(); + + BEGIN_MULTIPART + { + /* + * NOTE: A counter is passed to the iteration callback to try to match up the + * expected attributes with a given step throughout all of the following + * iterations. Since the only information we can count on in the attribute + * iteration callback is the attribute's name, we need some other way of + * ensuring that the attributes are coming back in the correct order. + */ + + PART_BEGIN(H5Aiterate2_name_increasing) + { + TESTING_2("H5Aiterate by attribute name in increasing order"); + + link_counter = 0; + + /* Test basic attribute iteration capability using both index types and both index orders */ + if (H5Aiterate2(dset_id, H5_INDEX_NAME, H5_ITER_INC, NULL, attr_iter_callback1, &link_counter) < + 0) { + H5_FAILED(); + HDprintf(" H5Aiterate2 by index type name in increasing order failed\n"); + PART_ERROR(H5Aiterate2_name_increasing); + } + + /* Make sure that the attribute iteration callback was actually called */ + if (link_counter == 0) { + H5_FAILED(); + HDprintf(" H5Aiterate sentinel value is unchanged; supplied callback function must not " + "have been called!\n"); + PART_ERROR(H5Aiterate2_name_increasing); + } + + PASSED(); + } + PART_END(H5Aiterate2_name_increasing); + + PART_BEGIN(H5Aiterate2_name_decreasing) + { + TESTING_2("H5Aiterate by attribute name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + link_counter = ATTRIBUTE_ITERATE_TEST_NUM_ATTRS; + + if (H5Aiterate2(dset_id, H5_INDEX_NAME, H5_ITER_DEC, NULL, attr_iter_callback1, &link_counter) < + 0) { + H5_FAILED(); + HDprintf(" H5Aiterate2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Aiterate2_name_decreasing); + } + + /* Make sure that the attribute iteration callback was actually called */ + if (link_counter == ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) { + H5_FAILED(); + HDprintf(" H5Aiterate sentinel value is unchanged; supplied callback function must not " + "have been called!\n"); + PART_ERROR(H5Aiterate2_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Aiterate2_name_decreasing); +#endif + } + PART_END(H5Aiterate2_name_decreasing); + + PART_BEGIN(H5Aiterate2_creation_increasing) + { + TESTING_2("H5Aiterate by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + link_counter = 2 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS; + + if (H5Aiterate2(dset_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, attr_iter_callback1, + &link_counter) < 0) { + H5_FAILED(); + HDprintf(" H5Aiterate2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Aiterate2_creation_increasing); + } + + /* Make sure that the attribute iteration callback was actually called */ + if (link_counter == 2 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) { + H5_FAILED(); + HDprintf(" H5Aiterate sentinel value is unchanged; supplied callback function must not " + "have been called!\n"); + PART_ERROR(H5Aiterate2_creation_increasing); + } + + PASSED(); + } + PART_END(H5Aiterate2_creation_increasing); + + PART_BEGIN(H5Aiterate2_creation_decreasing) + { + TESTING_2("H5Aiterate by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + link_counter = 3 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS; + + if (H5Aiterate2(dset_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, attr_iter_callback1, + &link_counter) < 0) { + H5_FAILED(); + HDprintf(" H5Aiterate2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Aiterate2_creation_decreasing); + } + + /* Make sure that the attribute iteration callback was actually called */ + if (link_counter == 3 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) { + H5_FAILED(); + HDprintf(" H5Aiterate sentinel value is unchanged; supplied callback function must not " + "have been called!\n"); + PART_ERROR(H5Aiterate2_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Aiterate2_creation_decreasing); + + PART_BEGIN(H5Aiterate_by_name_name_increasing) + { + TESTING_2("H5Aiterate_by_name by attribute name in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + link_counter = 0; + + if (H5Aiterate_by_name(file_id, + "/" ATTRIBUTE_TEST_GROUP_NAME "/" ATTRIBUTE_ITERATE_TEST_DSET_SUBGROUP_NAME + "/" ATTRIBUTE_ITERATE_TEST_DSET_NAME, + H5_INDEX_NAME, H5_ITER_INC, NULL, attr_iter_callback1, &link_counter, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name by index type name in increasing order failed\n"); + PART_ERROR(H5Aiterate_by_name_name_increasing); + } + + /* Make sure that the attribute iteration callback was actually called */ + if (link_counter == 0) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name sentinel value is unchanged; supplied callback function " + "must not have been called!\n"); + PART_ERROR(H5Aiterate_by_name_name_increasing); + } + + PASSED(); + } + PART_END(H5Aiterate_by_name_name_increasing); + + PART_BEGIN(H5Aiterate_by_name_name_decreasing) + { + TESTING_2("H5Aiterate_by_name by attribute name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + link_counter = ATTRIBUTE_ITERATE_TEST_NUM_ATTRS; + + if (H5Aiterate_by_name(file_id, + "/" ATTRIBUTE_TEST_GROUP_NAME "/" ATTRIBUTE_ITERATE_TEST_DSET_SUBGROUP_NAME + "/" ATTRIBUTE_ITERATE_TEST_DSET_NAME, + H5_INDEX_NAME, H5_ITER_DEC, NULL, attr_iter_callback1, &link_counter, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name by index type name in decreasing order failed\n"); + PART_ERROR(H5Aiterate_by_name_name_decreasing); + } + + /* Make sure that the attribute iteration callback was actually called */ + if (link_counter == ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name sentinel value is unchanged; supplied callback function " + "must not have been called!\n"); + PART_ERROR(H5Aiterate_by_name_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Aiterate_by_name_name_decreasing); +#endif + } + PART_END(H5Aiterate_by_name_name_decreasing); + + PART_BEGIN(H5Aiterate_by_name_creation_increasing) + { + TESTING_2("H5Aiterate_by_name by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + link_counter = 2 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS; + + if (H5Aiterate_by_name(file_id, + "/" ATTRIBUTE_TEST_GROUP_NAME "/" ATTRIBUTE_ITERATE_TEST_DSET_SUBGROUP_NAME + "/" ATTRIBUTE_ITERATE_TEST_DSET_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, attr_iter_callback1, &link_counter, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name by index type creation order in increasing order failed\n"); + PART_ERROR(H5Aiterate_by_name_creation_increasing); + } + + /* Make sure that the attribute iteration callback was actually called */ + if (link_counter == 2 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name sentinel value is unchanged; supplied callback function " + "must not have been called!\n"); + PART_ERROR(H5Aiterate_by_name_creation_increasing); + } + + PASSED(); + } + PART_END(H5Aiterate_by_name_creation_increasing); + + PART_BEGIN(H5Aiterate_by_name_creation_decreasing) + { + TESTING_2("H5Aiterate_by_name by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + link_counter = 3 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS; + + if (H5Aiterate_by_name(file_id, + "/" ATTRIBUTE_TEST_GROUP_NAME "/" ATTRIBUTE_ITERATE_TEST_DSET_SUBGROUP_NAME + "/" ATTRIBUTE_ITERATE_TEST_DSET_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, attr_iter_callback1, &link_counter, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Aiterate_by_name_creation_decreasing); + } + + /* Make sure that the attribute iteration callback was actually called */ + if (link_counter == 3 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name sentinel value is unchanged; supplied callback function " + "must not have been called!\n"); + PART_ERROR(H5Aiterate_by_name_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Aiterate_by_name_creation_decreasing); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(dcpl_id) < 0) + TEST_ERROR; + if (H5Sclose(attr_space_id) < 0) + TEST_ERROR; + if (H5Sclose(dset_space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(dcpl_id); + H5Sclose(attr_space_id); + H5Sclose(dset_space_id); + H5Tclose(attr_dtype); + H5Tclose(dset_dtype); + H5Aclose(attr_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check the functionality of attribute + * iteration using H5Aiterate(_by_name) on a committed + * datatype. Iteration is done in increasing and + * decreasing order of both attribute name and attribute + * creation order. + */ +static int +test_attribute_iterate_datatype(void) +{ + size_t link_counter; + size_t i; + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t type_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t attr_dtype = H5I_INVALID_HID; + hid_t attr_space_id = H5I_INVALID_HID; + hid_t tcpl_id = H5I_INVALID_HID; + + TESTING_MULTIPART("attribute iteration on a committed datatype"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, stored datatype, attribute, iterate, or creation " + "order aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_ITERATE_TEST_DTYPE_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", ATTRIBUTE_ITERATE_TEST_DTYPE_SUBGROUP_NAME); + goto error; + } + + if ((tcpl_id = H5Pcreate(H5P_DATATYPE_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create TCPL for attribute creation order tracking\n"); + goto error; + } + + if (H5Pset_attr_creation_order(tcpl_id, H5P_CRT_ORDER_TRACKED) < 0) { + H5_FAILED(); + HDprintf(" couldn't set attribute creation order tracking\n"); + goto error; + } + + if ((type_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + if ((attr_space_id = + generate_random_dataspace(ATTRIBUTE_ITERATE_TEST_ATTR_SPACE_RANK, NULL, NULL, TRUE)) < 0) + TEST_ERROR; + + if (H5Tcommit2(group_id, ATTRIBUTE_ITERATE_TEST_DTYPE_NAME, type_id, H5P_DEFAULT, tcpl_id, H5P_DEFAULT) < + 0) { + H5_FAILED(); + HDprintf(" couldn't commit datatype '%s'\n", ATTRIBUTE_ITERATE_TEST_DTYPE_NAME); + goto error; + } + + /* Create some attributes with a reverse-ordering naming scheme to test creation order */ + for (i = 0; i < ATTRIBUTE_ITERATE_TEST_NUM_ATTRS; i++) { + char attr_name[ATTRIBUTE_ITERATE_TEST_ATTR_NAME_BUF_SIZE]; + + HDsnprintf(attr_name, ATTRIBUTE_ITERATE_TEST_ATTR_NAME_BUF_SIZE, + ATTRIBUTE_ITERATE_TEST_ATTR_NAME "%d", (int)(ATTRIBUTE_ITERATE_TEST_NUM_ATTRS - i - 1)); + + if ((attr_id = H5Acreate2(type_id, attr_name, attr_dtype, attr_space_id, H5P_DEFAULT, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", attr_name); + goto error; + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(type_id, attr_name)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", attr_name); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' did not exist\n", attr_name); + goto error; + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close attribute '%s'\n", attr_name); + goto error; + } + } + + PASSED(); + + BEGIN_MULTIPART + { + /* + * NOTE: A counter is passed to the iteration callback to try to match up the + * expected attributes with a given step throughout all of the following + * iterations. Since the only information we can count on in the attribute + * iteration callback is the attribute's name, we need some other way of + * ensuring that the attributes are coming back in the correct order. + */ + + PART_BEGIN(H5Aiterate2_name_increasing) + { + TESTING_2("H5Aiterate by attribute name in increasing order"); + + link_counter = 0; + + /* Test basic attribute iteration capability using both index types and both index orders */ + if (H5Aiterate2(type_id, H5_INDEX_NAME, H5_ITER_INC, NULL, attr_iter_callback1, &link_counter) < + 0) { + H5_FAILED(); + HDprintf(" H5Aiterate2 by index type name in increasing order failed\n"); + PART_ERROR(H5Aiterate2_name_increasing); + } + + /* Make sure that the attribute iteration callback was actually called */ + if (link_counter == 0) { + H5_FAILED(); + HDprintf(" H5Aiterate sentinel value is unchanged; supplied callback function must not " + "have been called!\n"); + PART_ERROR(H5Aiterate2_name_increasing); + } + + PASSED(); + } + PART_END(H5Aiterate2_name_increasing); + + PART_BEGIN(H5Aiterate2_name_decreasing) + { + TESTING_2("H5Aiterate by attribute name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + link_counter = ATTRIBUTE_ITERATE_TEST_NUM_ATTRS; + + if (H5Aiterate2(type_id, H5_INDEX_NAME, H5_ITER_DEC, NULL, attr_iter_callback1, &link_counter) < + 0) { + H5_FAILED(); + HDprintf(" H5Aiterate2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Aiterate2_name_decreasing); + } + + /* Make sure that the attribute iteration callback was actually called */ + if (link_counter == ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) { + H5_FAILED(); + HDprintf(" H5Aiterate sentinel value is unchanged; supplied callback function must not " + "have been called!\n"); + PART_ERROR(H5Aiterate2_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Aiterate2_name_decreasing); +#endif + } + PART_END(H5Aiterate2_name_decreasing); + + PART_BEGIN(H5Aiterate2_creation_increasing) + { + TESTING_2("H5Aiterate by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + link_counter = 2 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS; + + if (H5Aiterate2(type_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, attr_iter_callback1, + &link_counter) < 0) { + H5_FAILED(); + HDprintf(" H5Aiterate2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Aiterate2_creation_increasing); + } + + /* Make sure that the attribute iteration callback was actually called */ + if (link_counter == 2 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) { + H5_FAILED(); + HDprintf(" H5Aiterate sentinel value is unchanged; supplied callback function must not " + "have been called!\n"); + PART_ERROR(H5Aiterate2_creation_increasing); + } + + PASSED(); + } + PART_END(H5Aiterate2_creation_increasing); + + PART_BEGIN(H5Aiterate2_creation_decreasing) + { + TESTING_2("H5Aiterate by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + link_counter = 3 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS; + + if (H5Aiterate2(type_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, attr_iter_callback1, + &link_counter) < 0) { + H5_FAILED(); + HDprintf(" H5Aiterate2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Aiterate2_creation_decreasing); + } + + /* Make sure that the attribute iteration callback was actually called */ + if (link_counter == 3 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) { + H5_FAILED(); + HDprintf(" H5Aiterate sentinel value is unchanged; supplied callback function must not " + "have been called!\n"); + PART_ERROR(H5Aiterate2_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Aiterate2_creation_decreasing); + + PART_BEGIN(H5Aiterate_by_name_name_increasing) + { + TESTING_2("H5Aiterate_by_name by attribute name in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + link_counter = 0; + + if (H5Aiterate_by_name( + file_id, + "/" ATTRIBUTE_TEST_GROUP_NAME "/" ATTRIBUTE_ITERATE_TEST_DTYPE_SUBGROUP_NAME + "/" ATTRIBUTE_ITERATE_TEST_DTYPE_NAME, + H5_INDEX_NAME, H5_ITER_INC, NULL, attr_iter_callback1, &link_counter, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name by index type name in increasing order failed\n"); + PART_ERROR(H5Aiterate_by_name_name_increasing); + } + + /* Make sure that the attribute iteration callback was actually called */ + if (link_counter == 0) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name sentinel value is unchanged; supplied callback function " + "must not have been called!\n"); + PART_ERROR(H5Aiterate_by_name_name_increasing); + } + + PASSED(); + } + PART_END(H5Aiterate_by_name_name_increasing); + + PART_BEGIN(H5Aiterate_by_name_name_decreasing) + { + TESTING_2("H5Aiterate_by_name by attribute name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + link_counter = ATTRIBUTE_ITERATE_TEST_NUM_ATTRS; + + if (H5Aiterate_by_name( + file_id, + "/" ATTRIBUTE_TEST_GROUP_NAME "/" ATTRIBUTE_ITERATE_TEST_DTYPE_SUBGROUP_NAME + "/" ATTRIBUTE_ITERATE_TEST_DTYPE_NAME, + H5_INDEX_NAME, H5_ITER_DEC, NULL, attr_iter_callback1, &link_counter, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name by index type name in decreasing order failed\n"); + PART_ERROR(H5Aiterate_by_name_name_decreasing); + } + + /* Make sure that the attribute iteration callback was actually called */ + if (link_counter == ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name sentinel value is unchanged; supplied callback function " + "must not have been called!\n"); + PART_ERROR(H5Aiterate_by_name_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Aiterate_by_name_name_decreasing); +#endif + } + PART_END(H5Aiterate_by_name_name_decreasing); + + PART_BEGIN(H5Aiterate_by_name_creation_increasing) + { + TESTING_2("H5Aiterate_by_name by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + link_counter = 2 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS; + + if (H5Aiterate_by_name(file_id, + "/" ATTRIBUTE_TEST_GROUP_NAME + "/" ATTRIBUTE_ITERATE_TEST_DTYPE_SUBGROUP_NAME + "/" ATTRIBUTE_ITERATE_TEST_DTYPE_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, attr_iter_callback1, &link_counter, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name by index type creation order in increasing order failed\n"); + PART_ERROR(H5Aiterate_by_name_creation_increasing); + } + + /* Make sure that the attribute iteration callback was actually called */ + if (link_counter == 2 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name sentinel value is unchanged; supplied callback function " + "must not have been called!\n"); + PART_ERROR(H5Aiterate_by_name_creation_increasing); + } + + PASSED(); + } + PART_END(H5Aiterate_by_name_creation_increasing); + + PART_BEGIN(H5Aiterate_by_name_creation_decreasing) + { + TESTING_2("H5Aiterate_by_name by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + link_counter = 3 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS; + + if (H5Aiterate_by_name(file_id, + "/" ATTRIBUTE_TEST_GROUP_NAME + "/" ATTRIBUTE_ITERATE_TEST_DTYPE_SUBGROUP_NAME + "/" ATTRIBUTE_ITERATE_TEST_DTYPE_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, attr_iter_callback1, &link_counter, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Aiterate_by_name_creation_decreasing); + } + + /* Make sure that the attribute iteration callback was actually called */ + if (link_counter == 3 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name sentinel value is unchanged; supplied callback function " + "must not have been called!\n"); + PART_ERROR(H5Aiterate_by_name_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Aiterate_by_name_creation_decreasing); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(tcpl_id) < 0) + TEST_ERROR; + if (H5Sclose(attr_space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype) < 0) + TEST_ERROR; + if (H5Tclose(type_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(tcpl_id); + H5Sclose(attr_space_id); + H5Tclose(attr_dtype); + H5Tclose(type_id); + H5Aclose(attr_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check the functionality of attribute + * iteration index saving using H5Aiterate(_by_name). + * Iteration is done in increasing and decreasing + * order of both attribute name and attribute + * creation order. + */ +static int +test_attribute_iterate_index_saving(void) +{ + TESTING("attribute iteration index saving capability"); + + SKIPPED(); + + return 1; +} + +/* + * A test to check that an object's attributes can't + * be iterated over when H5Aiterate(_by_name) is + * passed invalid parameters. + */ +static int +test_attribute_iterate_invalid_params(void) +{ + herr_t err_ret = -1; + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID, attr_id2 = H5I_INVALID_HID, attr_id3 = H5I_INVALID_HID, + attr_id4 = H5I_INVALID_HID; + hid_t attr_dtype = H5I_INVALID_HID; + hid_t attr_space_id = H5I_INVALID_HID; + + TESTING_MULTIPART("attribute iteration with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, attribute, or iterate aren't supported with this " + "connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file\n"); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_SUBGROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup\n"); + goto error; + } + + if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + if ((attr_space_id = generate_random_dataspace(ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_SPACE_RANK, + NULL, NULL, TRUE)) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_NAME, attr_dtype, + attr_space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + if ((attr_id2 = H5Acreate2(group_id, ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_NAME2, attr_dtype, + attr_space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + if ((attr_id3 = H5Acreate2(group_id, ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_NAME3, attr_dtype, + attr_space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + if ((attr_id4 = H5Acreate2(group_id, ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_NAME4, attr_dtype, + attr_space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + /* Verify the attributes have been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_NAME3)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_NAME4)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Aiterate_invalid_loc_id) + { + TESTING_2("H5Aiterate with an invalid loc_id"); + + H5E_BEGIN_TRY + { + err_ret = + H5Aiterate2(H5I_INVALID_HID, H5_INDEX_NAME, H5_ITER_INC, NULL, attr_iter_callback2, NULL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" iterated over attributes using H5Aiterate with an invalid loc_id!\n"); + PART_ERROR(H5Aiterate_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Aiterate_invalid_loc_id); + + PART_BEGIN(H5Aiterate_invalid_index_type) + { + TESTING_2("H5Aiterate with an invalid index type"); + + H5E_BEGIN_TRY + { + err_ret = + H5Aiterate2(group_id, H5_INDEX_UNKNOWN, H5_ITER_INC, NULL, attr_iter_callback2, NULL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" iterated over attributes using H5Aiterate with invalid index type " + "H5_INDEX_UNKNOWN!\n"); + PART_ERROR(H5Aiterate_invalid_index_type); + } + + H5E_BEGIN_TRY + { + err_ret = H5Aiterate2(group_id, H5_INDEX_N, H5_ITER_INC, NULL, attr_iter_callback2, NULL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf( + " iterated over attributes using H5Aiterate with invalid index type H5_INDEX_N!\n"); + PART_ERROR(H5Aiterate_invalid_index_type); + } + + PASSED(); + } + PART_END(H5Aiterate_invalid_index_type); + + PART_BEGIN(H5Aiterate_invalid_index_order) + { + TESTING_2("H5Aiterate with an invalid index ordering"); + + H5E_BEGIN_TRY + { + err_ret = + H5Aiterate2(group_id, H5_INDEX_NAME, H5_ITER_UNKNOWN, NULL, attr_iter_callback2, NULL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" iterated over attributes using H5Aiterate with invalid index ordering " + "H5_ITER_UNKNOWN!\n"); + PART_ERROR(H5Aiterate_invalid_index_order); + } + + H5E_BEGIN_TRY + { + err_ret = H5Aiterate2(group_id, H5_INDEX_NAME, H5_ITER_N, NULL, attr_iter_callback2, NULL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf( + " iterated over attributes using H5Aiterate with invalid index ordering H5_ITER_N!\n"); + PART_ERROR(H5Aiterate_invalid_index_order); + } + + PASSED(); + } + PART_END(H5Aiterate_invalid_index_order); + + PART_BEGIN(H5Aiterate_by_name_invalid_loc_id) + { + TESTING_2("H5Aiterate_by_name with an invalid loc_id"); + + H5E_BEGIN_TRY + { + err_ret = H5Aiterate_by_name(H5I_INVALID_HID, ".", H5_INDEX_NAME, H5_ITER_INC, NULL, + attr_iter_callback2, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" iterated over attributes using H5Aiterate_by_name with an invalid loc_id!\n"); + PART_ERROR(H5Aiterate_by_name_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Aiterate_by_name_invalid_loc_id); + + PART_BEGIN(H5Aiterate_by_name_invalid_obj_name) + { + TESTING_2("H5Aiterate_by_name with an invalid object name"); + + H5E_BEGIN_TRY + { + err_ret = H5Aiterate_by_name(group_id, NULL, H5_INDEX_NAME, H5_ITER_INC, NULL, + attr_iter_callback2, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" iterated over attributes using H5Aiterate_by_name with a NULL object name!\n"); + PART_ERROR(H5Aiterate_by_name_invalid_obj_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Aiterate_by_name(group_id, "", H5_INDEX_NAME, H5_ITER_INC, NULL, + attr_iter_callback2, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" iterated over attributes using H5Aiterate_by_name with an invalid object name " + "of ''!\n"); + PART_ERROR(H5Aiterate_by_name_invalid_obj_name); + } + + PASSED(); + } + PART_END(H5Aiterate_by_name_invalid_obj_name); + + PART_BEGIN(H5Aiterate_by_name_invalid_index_type) + { + TESTING_2("H5Aiterate_by_name with an invalid index type"); + + H5E_BEGIN_TRY + { + err_ret = H5Aiterate_by_name(group_id, ".", H5_INDEX_UNKNOWN, H5_ITER_INC, NULL, + attr_iter_callback2, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" iterated over attributes using H5Aiterate_by_name with invalid index type " + "H5_INDEX_UNKNOWN!\n"); + PART_ERROR(H5Aiterate_by_name_invalid_index_type); + } + + H5E_BEGIN_TRY + { + err_ret = H5Aiterate_by_name(group_id, ".", H5_INDEX_N, H5_ITER_INC, NULL, + attr_iter_callback2, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" iterated over attributes using H5Aiterate_by_name with invalid index type " + "H5_INDEX_N!\n"); + PART_ERROR(H5Aiterate_by_name_invalid_index_type); + } + + PASSED(); + } + PART_END(H5Aiterate_by_name_invalid_index_type); + + PART_BEGIN(H5Aiterate_by_name_invalid_index_order) + { + TESTING_2("H5Aiterate_by_name with an invalid index ordering"); + + H5E_BEGIN_TRY + { + err_ret = H5Aiterate_by_name(group_id, ".", H5_INDEX_NAME, H5_ITER_UNKNOWN, NULL, + attr_iter_callback2, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" iterated over attributes using H5Aiterate_by_name with invalid index ordering " + "H5_ITER_UNKNOWN!\n"); + PART_ERROR(H5Aiterate_by_name_invalid_index_order); + } + + H5E_BEGIN_TRY + { + err_ret = H5Aiterate_by_name(group_id, ".", H5_INDEX_NAME, H5_ITER_N, NULL, + attr_iter_callback2, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" iterated over attributes using H5Aiterate_by_name with invalid index ordering " + "H5_ITER_N!\n"); + PART_ERROR(H5Aiterate_by_name_invalid_index_order); + } + + PASSED(); + } + PART_END(H5Aiterate_by_name_invalid_index_order); + + PART_BEGIN(H5Aiterate_by_name_invalid_lapl) + { + TESTING_2("H5Aiterate_by_name with an invalid LAPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Aiterate_by_name(group_id, ".", H5_INDEX_NAME, H5_ITER_INC, NULL, + attr_iter_callback2, NULL, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" iterated over attributes using H5Aiterate_by_name with an invalid LAPL!\n"); + PART_ERROR(H5Aiterate_by_name_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Aiterate_by_name_invalid_lapl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(attr_space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype) < 0) + TEST_ERROR; + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + if (H5Aclose(attr_id2) < 0) + TEST_ERROR; + if (H5Aclose(attr_id3) < 0) + TEST_ERROR; + if (H5Aclose(attr_id4) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(attr_space_id); + H5Tclose(attr_dtype); + H5Aclose(attr_id); + H5Aclose(attr_id2); + H5Aclose(attr_id3); + H5Aclose(attr_id4); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that attribute iteration performed + * on an object with no attributes attached to it is + * not problematic. + */ +static int +test_attribute_iterate_0_attributes(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t dset_space_id = H5I_INVALID_HID; + + TESTING_MULTIPART("attribute iteration on object with 0 attributes"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, attribute, or iterate aren't supported " + "with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file\n"); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_ITERATE_TEST_0_ATTRIBUTES_SUBGROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup\n"); + goto error; + } + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + if ((dset_space_id = generate_random_dataspace(ATTRIBUTE_ITERATE_TEST_0_ATTRIBUTES_DSET_SPACE_RANK, NULL, + NULL, FALSE)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, ATTRIBUTE_ITERATE_TEST_0_ATTRIBUTES_DSET_NAME, dset_dtype, + dset_space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Aiterate_0_attributes_native) + { + TESTING_2("H5Aiterate (native order)"); + + if (H5Aiterate2(dset_id, H5_INDEX_NAME, H5_ITER_NATIVE, NULL, attr_iter_callback2, NULL) < 0) { + H5_FAILED(); + HDprintf(" H5Aiterate2 on object with 0 attributes failed\n"); + PART_ERROR(H5Aiterate_0_attributes_native); + } + + PASSED(); + } + PART_END(H5Aiterate_0_attributes_native); + + PART_BEGIN(H5Aiterate_0_attributes_inc) + { + TESTING_2("H5Aiterate (increasing order)"); + + if (H5Aiterate2(dset_id, H5_INDEX_NAME, H5_ITER_INC, NULL, attr_iter_callback2, NULL) < 0) { + H5_FAILED(); + HDprintf(" H5Aiterate2 on object with 0 attributes failed\n"); + PART_ERROR(H5Aiterate_0_attributes_inc); + } + + PASSED(); + } + PART_END(H5Aiterate_0_attributes_inc); + + PART_BEGIN(H5Aiterate_0_attributes_dec) + { + TESTING_2("H5Aiterate (decreasing order)"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + if (H5Aiterate2(dset_id, H5_INDEX_NAME, H5_ITER_DEC, NULL, attr_iter_callback2, NULL) < 0) { + H5_FAILED(); + HDprintf(" H5Aiterate2 on object with 0 attributes failed\n"); + PART_ERROR(H5Aiterate_0_attributes_dec); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Aiterate_0_attributes_dec); +#endif + } + PART_END(H5Aiterate_0_attributes_dec); + + PART_BEGIN(H5Aiterate_by_name_0_attributes_native) + { + TESTING_2("H5Aiterate_by_name (native order)"); + + if (H5Aiterate_by_name(group_id, ATTRIBUTE_ITERATE_TEST_0_ATTRIBUTES_DSET_NAME, H5_INDEX_NAME, + H5_ITER_NATIVE, NULL, attr_iter_callback2, NULL, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name on object with 0 attributes failed\n"); + PART_ERROR(H5Aiterate_by_name_0_attributes_native); + } + + PASSED(); + } + PART_END(H5Aiterate_by_name_0_attributes_native); + + PART_BEGIN(H5Aiterate_by_name_0_attributes_inc) + { + TESTING_2("H5Aiterate_by_name (increasing order)"); + + if (H5Aiterate_by_name(group_id, ATTRIBUTE_ITERATE_TEST_0_ATTRIBUTES_DSET_NAME, H5_INDEX_NAME, + H5_ITER_INC, NULL, attr_iter_callback2, NULL, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name on object with 0 attributes failed\n"); + PART_ERROR(H5Aiterate_by_name_0_attributes_inc); + } + + PASSED(); + } + PART_END(H5Aiterate_by_name_0_attributes_inc); + + PART_BEGIN(H5Aiterate_by_name_0_attributes_dec) + { + TESTING_2("H5Aiterate_by_name (decreasing order)"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + if (H5Aiterate_by_name(group_id, ATTRIBUTE_ITERATE_TEST_0_ATTRIBUTES_DSET_NAME, H5_INDEX_NAME, + H5_ITER_DEC, NULL, attr_iter_callback2, NULL, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Aiterate_by_name on object with 0 attributes failed\n"); + PART_ERROR(H5Aiterate_by_name_0_attributes_dec); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Aiterate_by_name_0_attributes_dec); +#endif + } + PART_END(H5Aiterate_by_name_0_attributes_dec); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(dset_space_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(dset_space_id); + H5Tclose(dset_dtype); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that an attribute can be deleted + * using H5Adelete(_by_idx). + */ +static int +test_delete_attribute(void) +{ + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t attr_dtype = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + + TESTING_MULTIPART("attribute deletion"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, attribute, or creation order aren't supported " + "with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create GCPL for attribute creation order tracking\n"); + goto error; + } + + if (H5Pset_attr_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) { + H5_FAILED(); + HDprintf(" couldn't set attribute creation order tracking\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_DELETION_TEST_GROUP_NAME); + goto error; + } + + if ((space_id = generate_random_dataspace(ATTRIBUTE_DELETION_TEST_SPACE_RANK, NULL, NULL, TRUE)) < 0) + TEST_ERROR; + + if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Adelete) + { + TESTING_2("H5Adelete"); + + /* Test H5Adelete */ + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME, attr_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete); + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' didn't exist before deletion\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete); + } + + /* Delete the attribute */ + if (H5Adelete(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME) < 0) { + H5_FAILED(); + HDprintf(" failed to delete attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete); + } + + /* Verify the attribute has been deleted */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete); + } + + PASSED(); + } + PART_END(H5Adelete); + + H5E_BEGIN_TRY + { + H5Aclose(attr_id); + attr_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Adelete_by_name) + { + TESTING_2("H5Adelete_by_name"); + + /* Test H5Adelete_by_name */ + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME, attr_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_name); + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_name); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' didn't exist before deletion\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_name); + } + + /* Delete the attribute */ + if (H5Adelete_by_name(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, + ATTRIBUTE_DELETION_TEST_ATTR_NAME, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to delete attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_name); + } + + /* Verify the attribute has been deleted */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_name); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_name); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_name); + } + + PASSED(); + } + PART_END(H5Adelete_by_name); + + H5E_BEGIN_TRY + { + H5Aclose(attr_id); + attr_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Adelete_by_idx_crt_order_increasing) + { + TESTING_2("H5Adelete_by_idx by creation order in increasing order"); + + /* Create several attributes */ + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME, attr_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2, attr_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3, attr_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + /* Verify the attributes have been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' didn't exist before deletion\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' didn't exist before deletion\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' didn't exist before deletion\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + /* Delete an attribute */ + if (H5Adelete_by_idx(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER, + H5_ITER_INC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to delete attribute using H5Adelete_by_idx by creation order in " + "increasing order\n"); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + /* Ensure that the attribute is gone and others remain */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' doesn't exist after deletion of a different attribute!\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' doesn't exist after deletion of a different attribute!\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + /* Repeat until all attributes have been deleted */ + if (H5Adelete_by_idx(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER, + H5_ITER_INC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to delete attribute using H5Adelete_by_idx by creation order in " + "increasing order\n"); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' doesn't exist after deletion of a different attribute!\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if (H5Adelete_by_idx(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER, + H5_ITER_INC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to delete attribute using H5Adelete_by_idx by creation order in " + "increasing order\n"); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_crt_order_increasing); + } + + PASSED(); + } + PART_END(H5Adelete_by_idx_crt_order_increasing); + + H5E_BEGIN_TRY + { + H5Aclose(attr_id); + attr_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Adelete_by_idx_crt_order_decreasing) + { + TESTING_2("H5Adelete_by_idx by creation order in decreasing order"); + + /* Create several attributes */ + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME, attr_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2, attr_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3, attr_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + /* Verify the attributes have been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' didn't exist before deletion\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' didn't exist before deletion\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' didn't exist before deletion\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + /* Delete an attribute */ + if (H5Adelete_by_idx(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER, + H5_ITER_DEC, 2, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to delete attribute using H5Adelete_by_idx by creation order in " + "decreasing order\n"); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + /* Ensure that the attribute is gone and others remain */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' doesn't exist after deletion of a different attribute!\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' doesn't exist after deletion of a different attribute!\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + /* Repeat until all attributes have been deleted */ + if (H5Adelete_by_idx(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER, + H5_ITER_DEC, 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to delete attribute using H5Adelete_by_idx by creation order in " + "decreasing order\n"); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' doesn't exist after deletion of a different attribute!\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if (H5Adelete_by_idx(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER, + H5_ITER_DEC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to delete attribute using H5Adelete_by_idx by creation order in " + "decreasing order\n"); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_crt_order_decreasing); + } + + PASSED(); + } + PART_END(H5Adelete_by_idx_crt_order_decreasing); + + H5E_BEGIN_TRY + { + H5Aclose(attr_id); + attr_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Adelete_by_idx_name_order_increasing) + { + TESTING_2("H5Adelete_by_idx by alphabetical order in increasing order"); + + /* Create several attributes */ + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME, attr_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2, attr_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3, attr_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + /* Verify the attributes have been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' didn't exist before deletion\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' didn't exist before deletion\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' didn't exist before deletion\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + /* Delete an attribute */ + if (H5Adelete_by_idx(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_INC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to delete attribute using H5Adelete_by_idx by alphabetical order in " + "increasing order\n"); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + /* Ensure that the attribute is gone and others remain */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' doesn't exist after deletion of a different attribute!\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' doesn't exist after deletion of a different attribute!\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + /* Repeat until all attributes have been deleted */ + if (H5Adelete_by_idx(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_INC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to delete attribute using H5Adelete_by_idx by alphabetical order in " + "increasing order\n"); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' doesn't exist after deletion of a different attribute!\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if (H5Adelete_by_idx(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_INC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to delete attribute using H5Adelete_by_idx by alphabetical order in " + "increasing order\n"); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_name_order_increasing); + } + + PASSED(); + } + PART_END(H5Adelete_by_idx_name_order_increasing); + + H5E_BEGIN_TRY + { + H5Aclose(attr_id); + attr_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Adelete_by_idx_name_order_decreasing) + { + TESTING_2("H5Adelete_by_idx by alphabetical order in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Create several attributes */ + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME, attr_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2, attr_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3, attr_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + /* Verify the attributes have been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' didn't exist before deletion\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' didn't exist before deletion\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' didn't exist before deletion\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + /* Delete an attribute */ + if (H5Adelete_by_idx(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_DEC, 2, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to delete attribute using H5Adelete_by_idx by alphabetical order in " + "decreasing order\n"); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + /* Ensure that the attribute is gone and others remain */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' doesn't exist after deletion of a different attribute!\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' doesn't exist after deletion of a different attribute!\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + /* Repeat until all attributes have been deleted */ + if (H5Adelete_by_idx(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_DEC, 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to delete attribute using H5Adelete_by_idx by alphabetical order in " + "decreasing order\n"); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' doesn't exist after deletion of a different attribute!\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if (H5Adelete_by_idx(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_DEC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to delete attribute using H5Adelete_by_idx by alphabetical order in " + "decreasing order\n"); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + if (attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME3); + PART_ERROR(H5Adelete_by_idx_name_order_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Adelete_by_idx_name_order_decreasing); +#endif + } + PART_END(H5Adelete_by_idx_name_order_decreasing); + + H5E_BEGIN_TRY + { + H5Aclose(attr_id); + attr_id = H5I_INVALID_HID; + } + H5E_END_TRY; + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype) < 0) + TEST_ERROR; + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Tclose(attr_dtype); + H5Aclose(attr_id); + H5Pclose(gcpl_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that an attribute can't be deleted + * when H5Adelete(_by_name/_by_idx) is passed invalid + * parameters. + */ +static int +test_delete_attribute_invalid_params(void) +{ + herr_t err_ret = -1; + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t attr_dtype = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + + TESTING_MULTIPART("attribute deletion with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or attribute aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_DELETION_INVALID_PARAMS_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", + ATTRIBUTE_DELETION_INVALID_PARAMS_TEST_GROUP_NAME); + goto error; + } + + if ((space_id = generate_random_dataspace(ATTRIBUTE_DELETION_INVALID_PARAMS_TEST_SPACE_RANK, NULL, NULL, + TRUE)) < 0) + TEST_ERROR; + + if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_INVALID_PARAMS_TEST_ATTR_NAME, attr_dtype, + space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_INVALID_PARAMS_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute didn't exists\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Adelete_invalid_loc_id) + { + TESTING_2("H5Adelete with an invalid loc_id"); + + H5E_BEGIN_TRY + { + err_ret = H5Adelete(H5I_INVALID_HID, ATTRIBUTE_DELETION_INVALID_PARAMS_TEST_ATTR_NAME); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" deleted an attribute using H5Adelete with an invalid loc_id!\n"); + PART_ERROR(H5Adelete_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Adelete_invalid_loc_id); + + PART_BEGIN(H5Adelete_invalid_attr_name) + { + TESTING_2("H5Adelete with an invalid attribute name"); + + H5E_BEGIN_TRY + { + err_ret = H5Adelete(group_id, NULL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" deleted an attribute using H5Adelete with a NULL attribute name!\n"); + PART_ERROR(H5Adelete_invalid_attr_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Adelete(group_id, ""); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" deleted an attribute using H5Adelete with an invalid attribute name of ''!\n"); + PART_ERROR(H5Adelete_invalid_attr_name); + } + + PASSED(); + } + PART_END(H5Adelete_invalid_attr_name); + + PART_BEGIN(H5Adelete_by_name_invalid_loc_id) + { + TESTING_2("H5Adelete_by_name with an invalid loc_id"); + + H5E_BEGIN_TRY + { + err_ret = H5Adelete_by_name(H5I_INVALID_HID, ".", + ATTRIBUTE_DELETION_INVALID_PARAMS_TEST_ATTR_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" deleted an attribute using H5Adelete_by_name with an invalid loc_id!\n"); + PART_ERROR(H5Adelete_by_name_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Adelete_by_name_invalid_loc_id); + + PART_BEGIN(H5Adelete_by_name_invalid_obj_name) + { + TESTING_2("H5Adelete_by_name with an invalid object name"); + + H5E_BEGIN_TRY + { + err_ret = H5Adelete_by_name(group_id, NULL, ATTRIBUTE_DELETION_INVALID_PARAMS_TEST_ATTR_NAME, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" deleted an attribute using H5Adelete_by_name with a NULL object name!\n"); + PART_ERROR(H5Adelete_by_name_invalid_obj_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Adelete_by_name(group_id, "", ATTRIBUTE_DELETION_INVALID_PARAMS_TEST_ATTR_NAME, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf( + " deleted an attribute using H5Adelete_by_name with an invalid object name of ''!\n"); + PART_ERROR(H5Adelete_by_name_invalid_obj_name); + } + + PASSED(); + } + PART_END(H5Adelete_by_name_invalid_obj_name); + + PART_BEGIN(H5Adelete_by_name_invalid_attr_name) + { + TESTING_2("H5Adelete_by_name with an invalid attribute name"); + + H5E_BEGIN_TRY + { + err_ret = H5Adelete_by_name(group_id, ".", NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" deleted an attribute using H5Adelete_by_name with a NULL attribute name!\n"); + PART_ERROR(H5Adelete_by_name_invalid_attr_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Adelete_by_name(group_id, ".", "", H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" deleted an attribute using H5Adelete_by_name with an invalid attribute name of " + "''!\n"); + PART_ERROR(H5Adelete_by_name_invalid_attr_name); + } + + PASSED(); + } + PART_END(H5Adelete_by_name_invalid_attr_name); + + PART_BEGIN(H5Adelete_by_name_invalid_lapl) + { + TESTING_2("H5Adelete_by_name with an invalid LAPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Adelete_by_name(group_id, ".", ATTRIBUTE_DELETION_INVALID_PARAMS_TEST_ATTR_NAME, + H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" deleted an attribute using H5Adelete_by_name with an invalid LAPL!\n"); + PART_ERROR(H5Adelete_by_name_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Adelete_by_name_invalid_lapl); + + PART_BEGIN(H5Adelete_by_idx_invalid_loc_id) + { + TESTING_2("H5Adelete_by_idx with an invalid loc_id"); + + H5E_BEGIN_TRY + { + err_ret = H5Adelete_by_idx(H5I_INVALID_HID, ".", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" deleted an attribute using H5Adelete_by_idx with an invalid loc_id!\n"); + PART_ERROR(H5Adelete_by_idx_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Adelete_by_idx_invalid_loc_id); + + PART_BEGIN(H5Adelete_by_idx_invalid_obj_name) + { + TESTING_2("H5Adelete_by_idx with an invalid object name"); + + H5E_BEGIN_TRY + { + err_ret = H5Adelete_by_idx(group_id, NULL, H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" deleted an attribute using H5Adelete_by_idx with a NULL object name!\n"); + PART_ERROR(H5Adelete_by_idx_invalid_obj_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Adelete_by_idx(group_id, "", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf( + " deleted an attribute using H5Adelete_by_idx with an invalid object name of ''!\n"); + PART_ERROR(H5Adelete_by_idx_invalid_obj_name); + } + + PASSED(); + } + PART_END(H5Adelete_by_idx_invalid_obj_name); + + PART_BEGIN(H5Adelete_by_idx_invalid_index_type) + { + TESTING_2("H5Adelete_by_idx with an invalid index type"); + + H5E_BEGIN_TRY + { + err_ret = H5Adelete_by_idx(group_id, ".", H5_INDEX_UNKNOWN, H5_ITER_INC, 0, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" deleted an attribute using H5Adelete_by_idx with invalid index type " + "H5_INDEX_UNKNOWN!\n"); + PART_ERROR(H5Adelete_by_idx_invalid_index_type); + } + + H5E_BEGIN_TRY + { + err_ret = H5Adelete_by_idx(group_id, ".", H5_INDEX_N, H5_ITER_INC, 0, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf( + " deleted an attribute using H5Adelete_by_idx with invalid index type H5_INDEX_N!\n"); + PART_ERROR(H5Adelete_by_idx_invalid_index_type); + } + + PASSED(); + } + PART_END(H5Adelete_by_idx_invalid_index_type); + + PART_BEGIN(H5Adelete_by_idx_invalid_index_order) + { + TESTING_2("H5Adelete_by_idx with an invalid index ordering"); + + H5E_BEGIN_TRY + { + err_ret = H5Adelete_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_UNKNOWN, 0, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" deleted an attribute using H5Adelete_by_idx with invalid index ordering " + "H5_ITER_UNKNOWN!\n"); + PART_ERROR(H5Adelete_by_idx_invalid_index_order); + } + + H5E_BEGIN_TRY + { + err_ret = H5Adelete_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_N, 0, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" deleted an attribute using H5Adelete_by_idx with invalid index ordering " + "H5_ITER_N!\n"); + PART_ERROR(H5Adelete_by_idx_invalid_index_order); + } + + PASSED(); + } + PART_END(H5Adelete_by_idx_invalid_index_order); + + PART_BEGIN(H5Adelete_by_idx_invalid_lapl) + { + TESTING_2("H5Adelete_by_idx with an invalid LAPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Adelete_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" deleted an attribute using H5Adelete_by_idx with an invalid LAPL!\n"); + PART_ERROR(H5Adelete_by_idx_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Adelete_by_idx_invalid_lapl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype) < 0) + TEST_ERROR; + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Tclose(attr_dtype); + H5Aclose(attr_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test for H5Aexists and H5Aexists_by_name. + */ +static int +test_attribute_exists(void) +{ + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t attr_dtype = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + + TESTING_MULTIPART("attribute existence"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or attribute aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_EXISTS_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_EXISTS_TEST_GROUP_NAME); + goto error; + } + + if ((space_id = generate_random_dataspace(ATTRIBUTE_EXISTS_TEST_SPACE_RANK, NULL, NULL, TRUE)) < 0) + TEST_ERROR; + + if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_EXISTS_TEST_ATTR_NAME, attr_dtype, space_id, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Aexists) + { + TESTING_2("H5Aexists"); + + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_EXISTS_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + PART_ERROR(H5Aexists); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' did not exist\n", ATTRIBUTE_EXISTS_TEST_ATTR_NAME); + PART_ERROR(H5Aexists); + } + + PASSED(); + } + PART_END(H5Aexists); + + PART_BEGIN(H5Aexists_by_name) + { + TESTING_2("H5Aexists_by_name"); + + if ((attr_exists = H5Aexists_by_name(container_group, ATTRIBUTE_EXISTS_TEST_GROUP_NAME, + ATTRIBUTE_EXISTS_TEST_ATTR_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists by name\n"); + PART_ERROR(H5Aexists_by_name); + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute '%s' did not exist by name\n", ATTRIBUTE_EXISTS_TEST_ATTR_NAME); + PART_ERROR(H5Aexists_by_name); + } + + PASSED(); + } + PART_END(H5Aexists_by_name); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Tclose(attr_dtype); + H5Aclose(attr_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to ensure that H5Aexists(_by_name) will fail when + * given invalid parameters. + */ +static int +test_attribute_exists_invalid_params(void) +{ + herr_t err_ret = -1; + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t attr_dtype = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + + TESTING_MULTIPART("attribute existence with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or attribute aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", + ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_GROUP_NAME); + goto error; + } + + if ((space_id = generate_random_dataspace(ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_SPACE_RANK, NULL, NULL, + TRUE)) < 0) + TEST_ERROR; + + if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_ATTR_NAME, attr_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute didn't exists\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Aexists_invalid_loc_id) + { + TESTING_2("H5Aexists with an invalid loc_id"); + + H5E_BEGIN_TRY + { + err_ret = H5Aexists(H5I_INVALID_HID, ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_ATTR_NAME); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Aexists with an invalid loc_id succeeded!\n"); + PART_ERROR(H5Aexists_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Aexists_invalid_loc_id); + + PART_BEGIN(H5Aexists_invalid_attr_name) + { + TESTING_2("H5Aexists with invalid attribute name"); + + H5E_BEGIN_TRY + { + err_ret = H5Aexists(group_id, NULL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Aexists with a NULL attribute name succeeded!\n"); + PART_ERROR(H5Aexists_invalid_attr_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Aexists(group_id, ""); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Aexists with an invalid attribute name of '' succeeded!\n"); + PART_ERROR(H5Aexists_invalid_attr_name); + } + + PASSED(); + } + PART_END(H5Aexists_invalid_attr_name); + + PART_BEGIN(H5Aexists_by_name_invalid_loc_id) + { + TESTING_2("H5Aexists_by_name with an invalid loc_id"); + + H5E_BEGIN_TRY + { + err_ret = H5Aexists_by_name(H5I_INVALID_HID, ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_GROUP_NAME, + ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_ATTR_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Aexists_by_name with an invalid loc_id succeeded!\n"); + PART_ERROR(H5Aexists_by_name_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Aexists_by_name_invalid_loc_id); + + PART_BEGIN(H5Aexists_by_name_invalid_obj_name) + { + TESTING_2("H5Aexists_by_name with invalid object name"); + + H5E_BEGIN_TRY + { + err_ret = H5Aexists_by_name(file_id, NULL, ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_ATTR_NAME, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Aexists_by_name with a NULL object name succeeded!\n"); + PART_ERROR(H5Aexists_by_name_invalid_obj_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Aexists_by_name(file_id, "", ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_ATTR_NAME, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Aexists_by_name with an invalid object name of '' succeeded!\n"); + PART_ERROR(H5Aexists_by_name_invalid_obj_name); + } + + PASSED(); + } + PART_END(H5Aexists_by_name_invalid_obj_name); + + PART_BEGIN(H5Aexists_by_name_invalid_attr_name) + { + TESTING_2("H5Aexists_by_name with invalid attribute name"); + + H5E_BEGIN_TRY + { + err_ret = H5Aexists_by_name(file_id, ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_GROUP_NAME, NULL, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Aexists_by_name with a NULL attribute name succeeded!\n"); + PART_ERROR(H5Aexists_by_name_invalid_attr_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Aexists_by_name(file_id, ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_GROUP_NAME, "", + H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Aexists_by_name with an invalid attribute name of '' succeeded!\n"); + PART_ERROR(H5Aexists_by_name_invalid_attr_name); + } + + PASSED(); + } + PART_END(H5Aexists_by_name_invalid_attr_name); + + PART_BEGIN(H5Aexists_by_name_invalid_lapl) + { + TESTING_2("H5Aexists_by_name with an invalid link access property list"); + + H5E_BEGIN_TRY + { + err_ret = H5Aexists_by_name(file_id, ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_GROUP_NAME, + ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_ATTR_NAME, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Aexists_by_name with an invalid link access property list succeeded!\n"); + PART_ERROR(H5Aexists_by_name_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Aexists_by_name_invalid_lapl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Tclose(attr_dtype); + H5Aclose(attr_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to make sure many attributes can be written + * to the file + */ +static int +test_attribute_many(void) +{ + unsigned u; + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t attr_dtype = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + char attrname[ATTRIBUTE_MANY_NAME_BUF_SIZE]; /* Name of attribute */ + + TESTING("creating many attributes"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or attribute aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file\n"); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_MANY_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create the group '%s'\n", ATTRIBUTE_MANY_GROUP_NAME); + goto error; + } + + if ((space_id = generate_random_dataspace(ATTRIBUTE_MANY_SPACE_RANK, NULL, NULL, TRUE)) < 0) + TEST_ERROR; + + if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + /* Create many attributes */ + for (u = 0; u < ATTRIBUTE_MANY_NUMB; u++) { + sprintf(attrname, "many-%06u", u); + + if ((attr_id = H5Acreate2(group_id, attrname, attr_dtype, space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(group_id, attrname)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + } + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Tclose(attr_dtype); + H5Aclose(attr_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to make sure an attribute can be opened for + * a second time + */ +static int +test_attribute_duplicate_id(void) +{ + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID, attr_id2 = H5I_INVALID_HID; + hid_t attr_dtype = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + + TESTING("duplicated IDs for an attribute"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or attribute aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file\n"); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_DUPLICATE_ID_GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create the group '%s'\n", ATTRIBUTE_DUPLICATE_ID_GRP_NAME); + goto error; + } + + if ((space_id = generate_random_dataspace(ATTRIBUTE_DUPLICATE_ID_SPACE_RANK, NULL, NULL, TRUE)) < 0) + TEST_ERROR; + + if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DUPLICATE_ID_ATTR_NAME, attr_dtype, space_id, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DUPLICATE_ID_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + /* Open the attribute just created and get a second ID */ + if ((attr_id2 = H5Aopen(group_id, ATTRIBUTE_DUPLICATE_ID_ATTR_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" attribute can't be opened for a second time\n"); + goto error; + } + + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + if (H5Aclose(attr_id2) < 0) + TEST_ERROR; + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Tclose(attr_dtype); + H5Aclose(attr_id); + H5Aclose(attr_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that the number of attributes attached + * to an object (group, dataset, datatype) can be retrieved. + * + * XXX: Cover all of the cases and move to H5O tests. + */ +static int +test_get_number_attributes(void) +{ + H5O_info2_t obj_info; + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t attr_dtype = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + + TESTING_MULTIPART("retrieval of the number of attributes on an object"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, attribute, or object aren't supported with this " + "connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file\n"); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_GET_NUM_ATTRS_TEST_GRP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create the group '%s'\n", ATTRIBUTE_GET_NUM_ATTRS_TEST_GRP_NAME); + goto error; + } + + if ((space_id = generate_random_dataspace(ATTRIBUTE_GET_NUM_ATTRS_TEST_SPACE_RANK, NULL, NULL, TRUE)) < 0) + TEST_ERROR; + + if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_GET_NUM_ATTRS_TEST_ATTR_NAME, attr_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_GET_NUM_ATTRS_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Oget_info) + { + TESTING_2("H5Oget_info"); + + /* Now get the number of attributes from the group */ + if (H5Oget_info3(group_id, &obj_info, H5O_INFO_ALL) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve group info using H5Oget_info3\n"); + PART_ERROR(H5Oget_info); + } + + if (obj_info.num_attrs != 1) { + H5_FAILED(); + HDprintf(" invalid number of attributes received\n"); + PART_ERROR(H5Oget_info); + } + + PASSED(); + } + PART_END(H5Oget_info); + + PART_BEGIN(H5Oget_info_by_name) + { + TESTING_2("H5Oget_info_by_name"); + + if (H5Oget_info_by_name3(container_group, ATTRIBUTE_GET_NUM_ATTRS_TEST_GRP_NAME, &obj_info, + H5O_INFO_ALL, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve group info using H5Oget_info_by_name3\n"); + PART_ERROR(H5Oget_info_by_name); + } + + if (obj_info.num_attrs != 1) { + H5_FAILED(); + HDprintf(" invalid number of attributes received\n"); + PART_ERROR(H5Oget_info_by_name); + } + + PASSED(); + } + PART_END(H5Oget_info_by_name); + + PART_BEGIN(H5Oget_info_by_idx) + { + TESTING_2("H5Oget_info_by_idx"); + + if (H5Oget_info_by_idx3(container_group, ".", H5_INDEX_NAME, H5_ITER_INC, 0, &obj_info, + H5O_INFO_ALL, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve group info using H5Oget_info_by_idx3\n"); + PART_ERROR(H5Oget_info_by_idx); + } + + if (obj_info.num_attrs != 1) { + H5_FAILED(); + HDprintf(" invalid number of attributes received\n"); + PART_ERROR(H5Oget_info_by_idx); + } + + PASSED(); + } + PART_END(H5Oget_info_by_idx); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype) < 0) + TEST_ERROR; + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Tclose(attr_dtype); + H5Aclose(attr_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that the reference count of a named datatype used by + * attribute and a dataset is correct. + * + * XXX: May move to H5O tests. + */ +static int +test_attr_shared_dtype(void) +{ +#ifndef NO_SHARED_DATATYPES + H5O_info2_t obj_info; + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t attr_dtype = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; +#endif + + TESTING("shared datatype for attributes"); + +#ifndef NO_SHARED_DATATYPES + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, attribute, stored datatype, or object aren't " + "supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file\n"); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_SHARED_DTYPE_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create the group '%s'\n", ATTRIBUTE_SHARED_DTYPE_GROUP_NAME); + goto error; + } + + if ((space_id = generate_random_dataspace(ATTRIBUTE_SHARED_DTYPE_SPACE_RANK, NULL, NULL, TRUE)) < 0) + TEST_ERROR; + + if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + /* Commit datatype to file */ + if (H5Tcommit2(group_id, ATTRIBUTE_SHARED_DTYPE_NAME, attr_dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < + 0) { + H5_FAILED(); + HDprintf(" couldn't commit datatype\n"); + goto error; + } + + if (H5Oget_info_by_name3(group_id, ATTRIBUTE_SHARED_DTYPE_NAME, &obj_info, H5O_INFO_ALL, H5P_DEFAULT) < + 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve root group info using H5Oget_info_by_name3\n"); + goto error; + } + + if (obj_info.rc != 1) { + H5_FAILED(); + HDprintf(" reference count of the named datatype is wrong: %u\n", obj_info.rc); + goto error; + } + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_SHARED_DTYPE_ATTR_NAME, attr_dtype, space_id, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute\n"); + goto error; + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_SHARED_DTYPE_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute exists\n"); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + if (H5Oget_info_by_name3(group_id, ATTRIBUTE_SHARED_DTYPE_NAME, &obj_info, H5O_INFO_ALL, H5P_DEFAULT) < + 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve root group info using H5Oget_info_by_name3\n"); + goto error; + } + + if (obj_info.rc != 2) { + H5_FAILED(); + HDprintf(" reference count of the named datatype is wrong: %u\n", obj_info.rc); + goto error; + } + + if ((dset_id = H5Dcreate2(group_id, ATTRIBUTE_SHARED_DTYPE_DSET_NAME, attr_dtype, space_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset\n"); + goto error; + } + + if (H5Oget_info_by_name3(group_id, ATTRIBUTE_SHARED_DTYPE_NAME, &obj_info, H5O_INFO_ALL, H5P_DEFAULT) < + 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve root group info using H5Oget_info_by_name3\n"); + goto error; + } + + if (obj_info.rc != 3) { + H5_FAILED(); + HDprintf(" reference count of the named datatype is wrong: %u\n", obj_info.rc); + goto error; + } + + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Tclose(attr_dtype) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Tclose(attr_dtype); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +#else + SKIPPED(); + return 0; +#endif +} + +static herr_t +attr_iter_callback1(hid_t location_id, const char *attr_name, const H5A_info_t *ainfo, void *op_data) +{ + size_t *i = (size_t *)op_data; + size_t counter_val = *((size_t *)op_data); + size_t test_iteration; + char expected_attr_name[ATTRIBUTE_ITERATE_TEST_ATTR_NAME_BUF_SIZE]; + herr_t ret_val = H5_ITER_CONT; + + UNUSED(location_id); + UNUSED(ainfo); + + /* + * Four tests are run in the following order per attribute iteration API call: + * + * - iteration by attribute name in increasing order + * - iteration by attribute name in decreasing order + * - iteration by attribute creation order in increasing order + * - iteration by attribute creation order in decreasing order + * + * Based on how the test is written, this will mean that the attribute names + * will run in increasing order on the first and fourth tests and decreasing + * order on the second and third tests. + */ + test_iteration = (counter_val / ATTRIBUTE_ITERATE_TEST_NUM_ATTRS); + if (test_iteration == 0 || test_iteration == 3) { + HDsnprintf(expected_attr_name, ATTRIBUTE_ITERATE_TEST_ATTR_NAME_BUF_SIZE, + ATTRIBUTE_ITERATE_TEST_ATTR_NAME "%d", + (int)(counter_val % ATTRIBUTE_ITERATE_TEST_NUM_ATTRS)); + } + else { + HDsnprintf( + expected_attr_name, ATTRIBUTE_ITERATE_TEST_ATTR_NAME_BUF_SIZE, + ATTRIBUTE_ITERATE_TEST_ATTR_NAME "%d", + (int)(ATTRIBUTE_ITERATE_TEST_NUM_ATTRS - (counter_val % ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) - 1)); + } + + if (HDstrncmp(attr_name, expected_attr_name, ATTRIBUTE_ITERATE_TEST_ATTR_NAME_BUF_SIZE)) { + HDprintf(" attribute name '%s' didn't match expected name '%s'\n", attr_name, expected_attr_name); + ret_val = H5_ITER_ERROR; + goto done; + } + + /* + * If the attribute's creation order is marked as valid, make sure + * that it corresponds to what is expected based on the order that + * the attributes were created in. + */ + if (ainfo->corder_valid) { + H5O_msg_crt_idx_t expected_crt_order; + + /* + * As the attributes are created with a reverse-ordering naming + * scheme to test creation order, their creation order values will + * be listed in reverse ordering on the first and fourth tests and + * in normal ordering on the second and third tests. + */ + if (test_iteration == 0 || test_iteration == 3) + expected_crt_order = (H5O_msg_crt_idx_t)(ATTRIBUTE_ITERATE_TEST_NUM_ATTRS - + (counter_val % ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) - 1); + else + expected_crt_order = (H5O_msg_crt_idx_t)(counter_val % ATTRIBUTE_ITERATE_TEST_NUM_ATTRS); + + if (ainfo->corder != expected_crt_order) { + H5_FAILED(); + HDprintf(" attribute's creation order value of %lld didn't match expected value of %lld\n", + (long long)ainfo->corder, (long long)expected_crt_order); + ret_val = H5_ITER_ERROR; + goto done; + } + } + +done: + (*i)++; + + return ret_val; +} + +static herr_t +attr_iter_callback2(hid_t location_id, const char *attr_name, const H5A_info_t *ainfo, void *op_data) +{ + UNUSED(location_id); + UNUSED(attr_name); + UNUSED(ainfo); + UNUSED(op_data); + + return 0; +} + +int +H5_api_attribute_test(void) +{ + size_t i; + int nerrors; + + HDprintf("**********************************************\n"); + HDprintf("* *\n"); + HDprintf("* API Attribute Tests *\n"); + HDprintf("* *\n"); + HDprintf("**********************************************\n\n"); + + for (i = 0, nerrors = 0; i < ARRAY_LENGTH(attribute_tests); i++) { + nerrors += (*attribute_tests[i])() ? 1 : 0; + } + + HDprintf("\n"); + + return nerrors; +} diff --git a/test/API/H5_api_attribute_test.h b/test/API/H5_api_attribute_test.h new file mode 100644 index 00000000000..76562635714 --- /dev/null +++ b/test/API/H5_api_attribute_test.h @@ -0,0 +1,203 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#ifndef H5_API_ATTRIBUTE_TEST_H +#define H5_API_ATTRIBUTE_TEST_H + +#include "H5_api_test.h" + +int H5_api_attribute_test(void); + +/************************************************** + * * + * API Attribute test defines * + * * + **************************************************/ + +#define ATTRIBUTE_CREATE_ON_ROOT_SPACE_RANK 1 +#define ATTRIBUTE_CREATE_ON_ROOT_ATTR_NAME "attr_on_root" +#define ATTRIBUTE_CREATE_ON_ROOT_ATTR_NAME2 "attr_on_root2" + +#define ATTRIBUTE_CREATE_ON_DATASET_DSET_SPACE_RANK 2 +#define ATTRIBUTE_CREATE_ON_DATASET_ATTR_SPACE_RANK 1 +#define ATTRIBUTE_CREATE_ON_DATASET_GROUP_NAME "attr_on_dataset_test" +#define ATTRIBUTE_CREATE_ON_DATASET_DSET_NAME "dataset_with_attr" +#define ATTRIBUTE_CREATE_ON_DATASET_ATTR_NAME "attr_on_dataset" +#define ATTRIBUTE_CREATE_ON_DATASET_ATTR_NAME2 "attr_on_dataset2" + +#define ATTRIBUTE_CREATE_ON_DATATYPE_SPACE_RANK 1 +#define ATTRIBUTE_CREATE_ON_DATATYPE_DTYPE_NAME "datatype_with_attr" +#define ATTRIBUTE_CREATE_ON_DATATYPE_GROUP_NAME "attr_on_datatype_test" +#define ATTRIBUTE_CREATE_ON_DATATYPE_ATTR_NAME "attr_on_datatype" +#define ATTRIBUTE_CREATE_ON_DATATYPE_ATTR_NAME2 "attr_on_datatype2" + +#define ATTRIBUTE_CREATE_NULL_DATASPACE_TEST_SUBGROUP_NAME "attr_with_null_space_test" +#define ATTRIBUTE_CREATE_NULL_DATASPACE_TEST_ATTR_NAME "attr_with_null_space" + +#define ATTRIBUTE_CREATE_SCALAR_DATASPACE_TEST_SUBGROUP_NAME "attr_with_scalar_space_test" +#define ATTRIBUTE_CREATE_SCALAR_DATASPACE_TEST_ATTR_NAME "attr_with_scalar_space" + +#define ATTRIBUTE_CREATE_WITH_SPACE_IN_NAME_SPACE_RANK 1 +#define ATTRIBUTE_CREATE_WITH_SPACE_IN_NAME_GROUP_NAME "attr_with_space_in_name_test" +#define ATTRIBUTE_CREATE_WITH_SPACE_IN_NAME_ATTR_NAME "attr with space in name" + +#define ATTRIBUTE_CREATE_INVALID_PARAMS_SPACE_RANK 1 +#define ATTRIBUTE_CREATE_INVALID_PARAMS_GROUP_NAME "attribute_create_invalid_params_test" +#define ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME "invalid_params_attr" + +#define ATTRIBUTE_OPEN_TEST_SPACE_RANK 1 +#define ATTRIBUTE_OPEN_TEST_GROUP_NAME "attribute_open_test" +#define ATTRIBUTE_OPEN_TEST_ATTR_NAME "attribute_open_test_attr" +#define ATTRIBUTE_OPEN_TEST_ATTR_NAME2 ATTRIBUTE_OPEN_TEST_ATTR_NAME "2" +#define ATTRIBUTE_OPEN_TEST_ATTR_NAME3 ATTRIBUTE_OPEN_TEST_ATTR_NAME "3" + +#define ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME "attribute_open_invalid_params_test" +#define ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_SPACE_RANK 1 +#define ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME "attribute_open_invalid_params_attr" + +#define ATTRIBUTE_WRITE_TEST_ATTR_DTYPE_SIZE sizeof(int) +#define ATTRIBUTE_WRITE_TEST_ATTR_DTYPE H5T_NATIVE_INT +#define ATTRIBUTE_WRITE_TEST_SPACE_RANK 1 +#define ATTRIBUTE_WRITE_TEST_GROUP_NAME "attr_write_test" +#define ATTRIBUTE_WRITE_TEST_ATTR_NAME "write_test_attr" + +#define ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_ATTR_DTYPE_SIZE sizeof(int) +#define ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_ATTR_DTYPE H5T_NATIVE_INT +#define ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_SPACE_RANK 1 +#define ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_GROUP_NAME "attr_write_invalid_params_test" +#define ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_ATTR_NAME "invalid_params_write_test_attr" + +#define ATTRIBUTE_READ_TEST_ATTR_DTYPE_SIZE sizeof(int) +#define ATTRIBUTE_READ_TEST_ATTR_DTYPE H5T_NATIVE_INT +#define ATTRIBUTE_READ_TEST_SPACE_RANK 1 +#define ATTRIBUTE_READ_TEST_GROUP_NAME "attr_read_test" +#define ATTRIBUTE_READ_TEST_ATTR_NAME "read_test_attr" + +#define ATTRIBUTE_READ_INVALID_PARAMS_TEST_ATTR_DTYPE_SIZE sizeof(int) +#define ATTRIBUTE_READ_INVALID_PARAMS_TEST_ATTR_DTYPE H5T_NATIVE_INT +#define ATTRIBUTE_READ_INVALID_PARAMS_TEST_SPACE_RANK 1 +#define ATTRIBUTE_READ_INVALID_PARAMS_TEST_GROUP_NAME "attr_read_invalid_params_test" +#define ATTRIBUTE_READ_INVALID_PARAMS_TEST_ATTR_NAME "invalid_params_read_test_attr" + +#define ATTRIBUTE_READ_EMPTY_SPACE_RANK 1 +#define ATTRIBUTE_READ_EMPTY_ATTR_GROUP_NAME "read_empty_attr_test" +#define ATTRIBUTE_READ_EMPTY_ATTR_NAME "read_empty_attr" +#define ATTRIBUTE_READ_EMPTY_DTYPE H5T_NATIVE_INT +#define ATTRIBUTE_READ_EMPTY_DTYPE_SIZE sizeof(int) + +#define ATTRIBUTE_GET_SPACE_TYPE_TEST_SPACE_RANK 1 +#define ATTRIBUTE_GET_SPACE_TYPE_TEST_GROUP_NAME "get_attr_space_type_test" +#define ATTRIBUTE_GET_SPACE_TYPE_TEST_ATTR_NAME "get_space_type_test_attr" + +#define ATTRIBUTE_GET_SPACE_TYPE_INVALID_PARAMS_TEST_SPACE_RANK 1 +#define ATTRIBUTE_GET_SPACE_TYPE_INVALID_PARAMS_TEST_GROUP_NAME "get_attr_space_type_invalid_params_test" +#define ATTRIBUTE_GET_SPACE_TYPE_INVALID_PARAMS_TEST_ATTR_NAME "get_space_type_invalid_params_test_attr" + +#define ATTRIBUTE_PROPERTY_LIST_TEST_ATTRIBUTE_NAME1 "property_list_test_attribute1" +#define ATTRIBUTE_PROPERTY_LIST_TEST_ATTRIBUTE_NAME2 "property_list_test_attribute2" +#define ATTRIBUTE_PROPERTY_LIST_TEST_SUBGROUP_NAME "attribute_property_list_test_group" +#define ATTRIBUTE_PROPERTY_LIST_TEST_SPACE_RANK 1 + +#define ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME "attr_name_retrieval_attr" +#define ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2 ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME "2" +#define ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3 ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME "3" +#define ATTRIBUTE_GET_NAME_TEST_SPACE_RANK 1 +#define ATTRIBUTE_GET_NAME_TEST_GROUP_NAME "retrieve_attr_name_test" + +#define ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_ATTRIBUTE_NAME "invalid_params_attr_name_retrieval_attr" +#define ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_SPACE_RANK 1 +#define ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_GROUP_NAME "retrieve_attr_name_invalid_params_test" + +#define ATTRIBUTE_GET_INFO_TEST_SPACE_RANK 1 +#define ATTRIBUTE_GET_INFO_TEST_GROUP_NAME "attr_get_info_test" +#define ATTRIBUTE_GET_INFO_TEST_ATTR_NAME "get_info_test_attr" +#define ATTRIBUTE_GET_INFO_TEST_ATTR_NAME2 ATTRIBUTE_GET_INFO_TEST_ATTR_NAME "2" +#define ATTRIBUTE_GET_INFO_TEST_ATTR_NAME3 ATTRIBUTE_GET_INFO_TEST_ATTR_NAME "3" + +#define ATTRIBUTE_GET_INFO_INVALID_PARAMS_TEST_SPACE_RANK 1 +#define ATTRIBUTE_GET_INFO_INVALID_PARAMS_TEST_GROUP_NAME "attr_get_info_invalid_params_test" +#define ATTRIBUTE_GET_INFO_INVALID_PARAMS_TEST_ATTR_NAME "invalid_params_get_info_test_attr" + +#define ATTRIBUTE_RENAME_TEST_SPACE_RANK 1 +#define ATTRIBUTE_RENAME_TEST_GROUP_NAME "attr_rename_test" +#define ATTRIBUTE_RENAME_TEST_ATTR_NAME "rename_test_attr" +#define ATTRIBUTE_RENAME_TEST_ATTR_NAME2 "rename_test_attr2" +#define ATTRIBUTE_RENAME_TEST_NEW_NAME "renamed_attr" +#define ATTRIBUTE_RENAME_TEST_NEW_NAME2 "renamed_attr2" + +#define ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_SPACE_RANK 1 +#define ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_GROUP_NAME "attr_rename_invalid_params_test" +#define ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME "invalid_params_rename_test_attr" +#define ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME2 "invalid_params_rename_test_attr2" +#define ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_NEW_NAME "invalid_params_renamed_attr" +#define ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_NEW_NAME2 "invalid_params_renamed_attr2" + +#define ATTRIBUTE_ITERATE_TEST_ATTR_NAME_BUF_SIZE 256 +#define ATTRIBUTE_ITERATE_TEST_DSET_SPACE_RANK 2 +#define ATTRIBUTE_ITERATE_TEST_ATTR_SPACE_RANK 1 +#define ATTRIBUTE_ITERATE_TEST_GRP_SUBGROUP_NAME "attribute_iterate_group_test" +#define ATTRIBUTE_ITERATE_TEST_DSET_SUBGROUP_NAME "attribute_iterate_dset_test" +#define ATTRIBUTE_ITERATE_TEST_DTYPE_SUBGROUP_NAME "attribute_iterate_datatype_test" +#define ATTRIBUTE_ITERATE_TEST_DSET_NAME "attribute_iterate_dset" +#define ATTRIBUTE_ITERATE_TEST_DTYPE_NAME "attribute_iterate_dtype" +#define ATTRIBUTE_ITERATE_TEST_ATTR_NAME "iter_attr" +#define ATTRIBUTE_ITERATE_TEST_NUM_ATTRS 4 + +#define ATTRIBUTE_ITERATE_TEST_0_ATTRIBUTES_DSET_SPACE_RANK 2 +#define ATTRIBUTE_ITERATE_TEST_0_ATTRIBUTES_SUBGROUP_NAME "attribute_iterate_test_0_attributes" +#define ATTRIBUTE_ITERATE_TEST_0_ATTRIBUTES_DSET_NAME "attribute_iterate_dset" + +#define ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_SPACE_RANK 1 +#define ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_SUBGROUP_NAME "attribute_iterate_invalid_params_test" +#define ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_NAME "invalid_params_iter_attr1" +#define ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_NAME2 "invalid_params_iter_attr2" +#define ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_NAME3 "invalid_params_iter_attr3" +#define ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_NAME4 "invalid_params_iter_attr4" + +#define ATTRIBUTE_DELETION_TEST_SPACE_RANK 1 +#define ATTRIBUTE_DELETION_TEST_GROUP_NAME "attr_deletion_test" +#define ATTRIBUTE_DELETION_TEST_ATTR_NAME "attr_to_be_deleted" +#define ATTRIBUTE_DELETION_TEST_ATTR_NAME2 ATTRIBUTE_DELETION_TEST_ATTR_NAME "2" +#define ATTRIBUTE_DELETION_TEST_ATTR_NAME3 ATTRIBUTE_DELETION_TEST_ATTR_NAME "3" + +#define ATTRIBUTE_DELETION_INVALID_PARAMS_TEST_SPACE_RANK 1 +#define ATTRIBUTE_DELETION_INVALID_PARAMS_TEST_GROUP_NAME "attr_deletion_invalid_params_test" +#define ATTRIBUTE_DELETION_INVALID_PARAMS_TEST_ATTR_NAME "invalid_params_attr_to_be_deleted" + +#define ATTRIBUTE_EXISTS_TEST_GROUP_NAME "attr_exists_test" +#define ATTRIBUTE_EXISTS_TEST_SPACE_RANK 1 +#define ATTRIBUTE_EXISTS_TEST_ATTR_NAME "attr_exists" + +#define ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_SPACE_RANK 1 +#define ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_GROUP_NAME "attr_exists_invalid_params_test" +#define ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_ATTR_NAME "invalid_params_attr_exists" + +#define ATTRIBUTE_MANY_GROUP_NAME "group_for_many_attributes" +#define ATTRIBUTE_MANY_NAME_BUF_SIZE 32U +#define ATTRIBUTE_MANY_NUMB 64U +#define ATTRIBUTE_MANY_SPACE_RANK 1 + +#define ATTRIBUTE_DUPLICATE_ID_GRP_NAME "attr_duplicate_open_test" +#define ATTRIBUTE_DUPLICATE_ID_ATTR_NAME "attr_duplicated_id" +#define ATTRIBUTE_DUPLICATE_ID_SPACE_RANK 1 + +#define ATTRIBUTE_GET_NUM_ATTRS_TEST_GRP_NAME "get_num_attrs_test" +#define ATTRIBUTE_GET_NUM_ATTRS_TEST_ATTR_NAME "get_num_attrs_test_attribute" +#define ATTRIBUTE_GET_NUM_ATTRS_TEST_SPACE_RANK 1 + +#define ATTRIBUTE_SHARED_DTYPE_NAME "Datatype" +#define ATTRIBUTE_SHARED_DTYPE_GROUP_NAME "shared_dtype_group" +#define ATTRIBUTE_SHARED_DTYPE_ATTR_NAME "shared_dtype_attr" +#define ATTRIBUTE_SHARED_DTYPE_DSET_NAME "shared_dtype_dset" +#define ATTRIBUTE_SHARED_DTYPE_SPACE_RANK 1 + +#endif diff --git a/test/API/H5_api_dataset_test.c b/test/API/H5_api_dataset_test.c new file mode 100644 index 00000000000..35a19f3842e --- /dev/null +++ b/test/API/H5_api_dataset_test.c @@ -0,0 +1,11683 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#include "H5_api_dataset_test.h" + +/* + * XXX: H5Dread_chunk/H5Dwrite_chunk, H5Dfill/scatter/gather + */ + +static int test_create_dataset_under_root(void); +static int test_create_dataset_under_existing_group(void); +static int test_create_dataset_invalid_params(void); +static int test_create_anonymous_dataset(void); +static int test_create_anonymous_dataset_invalid_params(void); +static int test_create_dataset_null_space(void); +static int test_create_dataset_scalar_space(void); +static int test_create_zero_dim_dset(void); +static int test_create_dataset_random_shapes(void); +static int test_create_dataset_predefined_types(void); +static int test_create_dataset_string_types(void); +static int test_create_dataset_compound_types(void); +static int test_create_dataset_enum_types(void); +static int test_create_dataset_array_types(void); +static int test_create_dataset_creation_properties(void); +static int test_create_many_dataset(void); +static int test_open_dataset(void); +static int test_open_dataset_invalid_params(void); +static int test_close_dataset_invalid_params(void); +static int test_get_dataset_space_and_type(void); +static int test_get_dataset_space_and_type_invalid_params(void); +static int test_get_dataset_space_status(void); +static int test_get_dataset_space_status_invalid_params(void); +static int test_dataset_property_lists(void); +static int test_get_dataset_storage_size(void); +static int test_get_dataset_storage_size_invalid_params(void); +static int test_get_dataset_chunk_storage_size(void); +static int test_get_dataset_chunk_storage_size_invalid_params(void); +static int test_get_dataset_offset(void); +static int test_get_dataset_offset_invalid_params(void); +static int test_read_dataset_small_all(void); +static int test_read_dataset_small_hyperslab(void); +static int test_read_dataset_small_point_selection(void); +static int test_dataset_io_point_selections(void); +#ifndef NO_LARGE_TESTS +static int test_read_dataset_large_all(void); +static int test_read_dataset_large_hyperslab(void); +static int test_read_dataset_large_point_selection(void); +#endif +static int test_read_dataset_invalid_params(void); +static int test_write_dataset_small_all(void); +static int test_write_dataset_small_hyperslab(void); +static int test_write_dataset_small_point_selection(void); +#ifndef NO_LARGE_TESTS +static int test_write_dataset_large_all(void); +static int test_write_dataset_large_hyperslab(void); +static int test_write_dataset_large_point_selection(void); +#endif +static int test_write_dataset_data_verification(void); +static int test_write_dataset_invalid_params(void); +static int test_dataset_builtin_type_conversion(void); +static int test_dataset_compound_partial_io(void); +static int test_dataset_set_extent_chunked_unlimited(void); +static int test_dataset_set_extent_chunked_fixed(void); +static int test_dataset_set_extent_data(void); +static int test_dataset_set_extent_double_handles(void); +static int test_dataset_set_extent_invalid_params(void); +static int test_flush_dataset(void); +static int test_flush_dataset_invalid_params(void); +static int test_refresh_dataset(void); +static int test_refresh_dataset_invalid_params(void); + +/* + * Chunking tests + */ +static int test_create_single_chunk_dataset(void); +static int test_write_single_chunk_dataset(void); +static int test_create_multi_chunk_dataset(void); +static int test_write_multi_chunk_dataset_same_shape_read(void); +static int test_write_multi_chunk_dataset_diff_shape_read(void); +static int test_overwrite_multi_chunk_dataset_same_shape_read(void); +static int test_overwrite_multi_chunk_dataset_diff_shape_read(void); +static int test_read_partial_chunk_all_selection(void); +static int test_read_partial_chunk_hyperslab_selection(void); +static int test_read_partial_chunk_point_selection(void); + +static int test_get_vlen_buf_size(void); + +/* + * The array of dataset tests to be performed. + */ +static int (*dataset_tests[])(void) = { + test_create_dataset_under_root, + test_create_dataset_under_existing_group, + test_create_dataset_invalid_params, + test_create_anonymous_dataset, + test_create_anonymous_dataset_invalid_params, + test_create_dataset_null_space, + test_create_dataset_scalar_space, + test_create_zero_dim_dset, + test_create_dataset_random_shapes, + test_create_dataset_predefined_types, + test_create_dataset_string_types, + test_create_dataset_compound_types, + test_create_dataset_enum_types, + test_create_dataset_array_types, + test_create_dataset_creation_properties, + test_create_many_dataset, + test_open_dataset, + test_open_dataset_invalid_params, + test_close_dataset_invalid_params, + test_get_dataset_space_and_type, + test_get_dataset_space_and_type_invalid_params, + test_get_dataset_space_status, + test_get_dataset_space_status_invalid_params, + test_dataset_property_lists, + test_get_dataset_storage_size, + test_get_dataset_storage_size_invalid_params, + test_get_dataset_chunk_storage_size, + test_get_dataset_chunk_storage_size_invalid_params, + test_get_dataset_offset, + test_get_dataset_offset_invalid_params, + test_read_dataset_small_all, + test_read_dataset_small_hyperslab, + test_read_dataset_small_point_selection, + test_dataset_io_point_selections, +#ifndef NO_LARGE_TESTS + test_read_dataset_large_all, + test_read_dataset_large_hyperslab, + test_read_dataset_large_point_selection, +#endif + test_read_dataset_invalid_params, + test_write_dataset_small_all, + test_write_dataset_small_hyperslab, + test_write_dataset_small_point_selection, +#ifndef NO_LARGE_TESTS + test_write_dataset_large_all, + test_write_dataset_large_hyperslab, + test_write_dataset_large_point_selection, +#endif + test_write_dataset_data_verification, + test_write_dataset_invalid_params, + test_dataset_builtin_type_conversion, + test_dataset_compound_partial_io, + test_dataset_set_extent_chunked_unlimited, + test_dataset_set_extent_chunked_fixed, + test_dataset_set_extent_data, + test_dataset_set_extent_double_handles, + test_dataset_set_extent_invalid_params, + test_flush_dataset, + test_flush_dataset_invalid_params, + test_refresh_dataset, + test_refresh_dataset_invalid_params, + test_create_single_chunk_dataset, + test_write_single_chunk_dataset, + test_create_multi_chunk_dataset, + test_write_multi_chunk_dataset_same_shape_read, + test_write_multi_chunk_dataset_diff_shape_read, + test_overwrite_multi_chunk_dataset_same_shape_read, + test_overwrite_multi_chunk_dataset_diff_shape_read, + test_read_partial_chunk_all_selection, + test_read_partial_chunk_hyperslab_selection, + test_read_partial_chunk_point_selection, + test_get_vlen_buf_size, +}; + +/* + * A test to check that a dataset can be + * created under the root group. + */ +static int +test_create_dataset_under_root(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + + TESTING("dataset creation under root group"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file or dataset aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((fspace_id = generate_random_dataspace(DATASET_CREATE_UNDER_ROOT_SPACE_RANK, NULL, NULL, FALSE)) < 0) + TEST_ERROR; + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + /* Create the Dataset under the root group of the file */ + if ((dset_id = H5Dcreate2(file_id, DATASET_CREATE_UNDER_ROOT_DSET_NAME, dset_dtype, fspace_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_CREATE_UNDER_ROOT_DSET_NAME); + goto error; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + H5Tclose(dset_dtype); + H5Dclose(dset_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a dataset can be created + * under a group that is not the root group. + */ +static int +test_create_dataset_under_existing_group(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + + TESTING("dataset creation under an existing group"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_CREATE_UNDER_EXISTING_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", DATASET_CREATE_UNDER_EXISTING_GROUP_NAME); + goto error; + } + + if ((fspace_id = generate_random_dataspace(DATASET_CREATE_UNDER_EXISTING_SPACE_RANK, NULL, NULL, FALSE)) < + 0) + TEST_ERROR; + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_CREATE_UNDER_EXISTING_DSET_NAME, dset_dtype, fspace_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_CREATE_UNDER_EXISTING_DSET_NAME); + goto error; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + H5Tclose(dset_dtype); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a dataset can't be created + * when H5Dcreate is passed invalid parameters. + */ +static int +test_create_dataset_invalid_params(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + + TESTING_MULTIPART("H5Dcreate with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_CREATE_INVALID_PARAMS_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", DATASET_CREATE_INVALID_PARAMS_GROUP_NAME); + goto error; + } + + if ((fspace_id = generate_random_dataspace(DATASET_CREATE_INVALID_PARAMS_SPACE_RANK, NULL, NULL, FALSE)) < + 0) + TEST_ERROR; + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Dcreate_invalid_loc_id) + { + TESTING_2("H5Dcreate with an invalid loc_id"); + + H5E_BEGIN_TRY + { + dset_id = H5Dcreate2(H5I_INVALID_HID, DATASET_CREATE_INVALID_PARAMS_DSET_NAME, dset_dtype, + fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (dset_id >= 0) { + H5_FAILED(); + HDprintf(" created dataset using H5Dcreate with an invalid loc_id!\n"); + H5Dclose(dset_id); + PART_ERROR(H5Dcreate_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Dcreate_invalid_loc_id); + + PART_BEGIN(H5Dcreate_invalid_dataset_name) + { + TESTING_2("H5Dcreate with an invalid dataset name"); + + H5E_BEGIN_TRY + { + dset_id = + H5Dcreate2(group_id, NULL, dset_dtype, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (dset_id >= 0) { + H5_FAILED(); + HDprintf(" created dataset using H5Dcreate with a NULL dataset name!\n"); + H5Dclose(dset_id); + PART_ERROR(H5Dcreate_invalid_dataset_name); + } + + H5E_BEGIN_TRY + { + dset_id = + H5Dcreate2(group_id, "", dset_dtype, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (dset_id >= 0) { + H5_FAILED(); + HDprintf(" created dataset using H5Dcreate with an invalid dataset name of ''!\n"); + H5Dclose(dset_id); + PART_ERROR(H5Dcreate_invalid_dataset_name); + } + + PASSED(); + } + PART_END(H5Dcreate_invalid_dataset_name); + + PART_BEGIN(H5Dcreate_invalid_datatype) + { + TESTING_2("H5Dcreate with an invalid datatype"); + + H5E_BEGIN_TRY + { + dset_id = H5Dcreate2(group_id, DATASET_CREATE_INVALID_PARAMS_DSET_NAME, H5I_INVALID_HID, + fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (dset_id >= 0) { + H5_FAILED(); + HDprintf(" created dataset using H5Dcreate with an invalid datatype!\n"); + H5Dclose(dset_id); + PART_ERROR(H5Dcreate_invalid_datatype); + } + + PASSED(); + } + PART_END(H5Dcreate_invalid_datatype); + + PART_BEGIN(H5Dcreate_invalid_dataspace) + { + TESTING_2("H5Dcreate with an invalid dataspace"); + + H5E_BEGIN_TRY + { + dset_id = H5Dcreate2(group_id, DATASET_CREATE_INVALID_PARAMS_DSET_NAME, dset_dtype, + H5I_INVALID_HID, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (dset_id >= 0) { + H5_FAILED(); + HDprintf(" created dataset using H5Dcreate with an invalid dataspace!\n"); + H5Dclose(dset_id); + PART_ERROR(H5Dcreate_invalid_dataspace); + } + + PASSED(); + } + PART_END(H5Dcreate_invalid_dataspace); + + PART_BEGIN(H5Dcreate_invalid_lcpl) + { + TESTING_2("H5Dcreate with an invalid LCPL"); + + H5E_BEGIN_TRY + { + dset_id = H5Dcreate2(group_id, DATASET_CREATE_INVALID_PARAMS_DSET_NAME, dset_dtype, fspace_id, + H5I_INVALID_HID, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (dset_id >= 0) { + H5_FAILED(); + HDprintf(" created dataset using H5Dcreate with an invalid LCPL!\n"); + H5Dclose(dset_id); + PART_ERROR(H5Dcreate_invalid_lcpl); + } + + PASSED(); + } + PART_END(H5Dcreate_invalid_lcpl); + + PART_BEGIN(H5Dcreate_invalid_dcpl) + { + TESTING_2("H5Dcreate with an invalid DCPL"); + + H5E_BEGIN_TRY + { + dset_id = H5Dcreate2(group_id, DATASET_CREATE_INVALID_PARAMS_DSET_NAME, dset_dtype, fspace_id, + H5P_DEFAULT, H5I_INVALID_HID, H5P_DEFAULT); + } + H5E_END_TRY; + + if (dset_id >= 0) { + H5_FAILED(); + HDprintf(" created dataset using H5Dcreate with an invalid DCPL!\n"); + H5Dclose(dset_id); + PART_ERROR(H5Dcreate_invalid_dcpl); + } + + PASSED(); + } + PART_END(H5Dcreate_invalid_dcpl); + + PART_BEGIN(H5Dcreate_invalid_dapl) + { + TESTING_2("H5Dcreate with an invalid DAPL"); + + H5E_BEGIN_TRY + { + dset_id = H5Dcreate2(group_id, DATASET_CREATE_INVALID_PARAMS_DSET_NAME, dset_dtype, fspace_id, + H5P_DEFAULT, H5P_DEFAULT, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (dset_id >= 0) { + H5_FAILED(); + HDprintf(" created dataset using H5Dcreate with an invalid DAPL!\n"); + H5Dclose(dset_id); + PART_ERROR(H5Dcreate_invalid_dapl); + } + + PASSED(); + } + PART_END(H5Dcreate_invalid_dapl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + H5Tclose(dset_dtype); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that an anonymous dataset can be created. + */ +static int +test_create_anonymous_dataset(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + + TESTING("anonymous dataset creation"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_CREATE_ANONYMOUS_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", DATASET_CREATE_ANONYMOUS_GROUP_NAME); + goto error; + } + + if ((fspace_id = generate_random_dataspace(DATASET_CREATE_ANONYMOUS_SPACE_RANK, NULL, NULL, FALSE)) < 0) + TEST_ERROR; + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate_anon(group_id, dset_dtype, fspace_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create anonymous dataset\n"); + goto error; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + H5Tclose(dset_dtype); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that an anonymous dataset can't + * be created when H5Dcreate_anon is passed invalid + * parameters. + */ +static int +test_create_anonymous_dataset_invalid_params(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + + TESTING_MULTIPART("anonymous dataset creation with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_CREATE_ANONYMOUS_INVALID_PARAMS_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", + DATASET_CREATE_ANONYMOUS_INVALID_PARAMS_GROUP_NAME); + goto error; + } + + if ((fspace_id = generate_random_dataspace(DATASET_CREATE_ANONYMOUS_INVALID_PARAMS_SPACE_RANK, NULL, NULL, + FALSE)) < 0) + TEST_ERROR; + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Dcreate_anon_invalid_loc_id) + { + TESTING_2("H5Dcreate_anon with an invalid loc_id"); + + H5E_BEGIN_TRY + { + dset_id = H5Dcreate_anon(H5I_INVALID_HID, dset_dtype, fspace_id, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (dset_id >= 0) { + H5_FAILED(); + HDprintf(" created anonymous dataset using an invalid loc_id!\n"); + H5Dclose(dset_id); + PART_ERROR(H5Dcreate_anon_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Dcreate_anon_invalid_loc_id); + + PART_BEGIN(H5Dcreate_anon_invalid_datatype) + { + TESTING_2("H5Dcreate_anon with an invalid dataset datatype"); + + H5E_BEGIN_TRY + { + dset_id = H5Dcreate_anon(group_id, H5I_INVALID_HID, fspace_id, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (dset_id >= 0) { + H5_FAILED(); + HDprintf(" created anonymous dataset using an invalid dataset datatype!\n"); + H5Dclose(dset_id); + PART_ERROR(H5Dcreate_anon_invalid_datatype); + } + + PASSED(); + } + PART_END(H5Dcreate_anon_invalid_datatype); + + PART_BEGIN(H5Dcreate_anon_invalid_dataspace) + { + TESTING_2("H5Dcreate_anon with an invalid dataset dataspace"); + + H5E_BEGIN_TRY + { + dset_id = H5Dcreate_anon(group_id, dset_dtype, H5I_INVALID_HID, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (dset_id >= 0) { + H5_FAILED(); + HDprintf(" created anonymous dataset using an invalid dataset dataspace!\n"); + H5Dclose(dset_id); + PART_ERROR(H5Dcreate_anon_invalid_dataspace); + } + + PASSED(); + } + PART_END(H5Dcreate_anon_invalid_dataspace); + + PART_BEGIN(H5Dcreate_anon_invalid_dcpl) + { + TESTING_2("H5Dcreate_anon with an invalid DCPL"); + + H5E_BEGIN_TRY + { + dset_id = H5Dcreate_anon(group_id, dset_dtype, fspace_id, H5I_INVALID_HID, H5P_DEFAULT); + } + H5E_END_TRY; + + if (dset_id >= 0) { + H5_FAILED(); + HDprintf(" created anonymous dataset using an invalid DCPL!\n"); + H5Dclose(dset_id); + PART_ERROR(H5Dcreate_anon_invalid_dcpl); + } + + PASSED(); + } + PART_END(H5Dcreate_anon_invalid_dcpl); + + PART_BEGIN(H5Dcreate_anon_invalid_dapl) + { + TESTING_2("H5Dcreate_anon with an invalid DAPL"); + + H5E_BEGIN_TRY + { + dset_id = H5Dcreate_anon(group_id, dset_dtype, fspace_id, H5P_DEFAULT, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (dset_id >= 0) { + H5_FAILED(); + HDprintf(" created anonymous dataset using an invalid DAPL!\n"); + H5Dclose(dset_id); + PART_ERROR(H5Dcreate_anon_invalid_dapl); + } + + PASSED(); + } + PART_END(H5Dcreate_anon_invalid_dapl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + H5Tclose(dset_dtype); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that creating a dataset with a NULL + * dataspace is not problematic. + */ +static int +test_create_dataset_null_space(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + + TESTING("dataset creation with a NULL dataspace"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_CREATE_NULL_DATASPACE_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + DATASET_CREATE_NULL_DATASPACE_TEST_SUBGROUP_NAME); + goto error; + } + + if ((fspace_id = H5Screate(H5S_NULL)) < 0) + TEST_ERROR; + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_CREATE_NULL_DATASPACE_TEST_DSET_NAME, dset_dtype, fspace_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_CREATE_NULL_DATASPACE_TEST_DSET_NAME); + goto error; + } + + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + + if ((dset_id = H5Dopen2(group_id, DATASET_CREATE_NULL_DATASPACE_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_CREATE_NULL_DATASPACE_TEST_DSET_NAME); + goto error; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + H5Tclose(dset_dtype); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that creating a dataset with a scalar + * dataspace is not problematic. + */ +static int +test_create_dataset_scalar_space(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + + TESTING("dataset creation with a SCALAR dataspace"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_CREATE_SCALAR_DATASPACE_TEST_SUBGROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + DATASET_CREATE_SCALAR_DATASPACE_TEST_SUBGROUP_NAME); + goto error; + } + + if ((fspace_id = H5Screate(H5S_SCALAR)) < 0) + TEST_ERROR; + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_CREATE_SCALAR_DATASPACE_TEST_DSET_NAME, dset_dtype, fspace_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_CREATE_SCALAR_DATASPACE_TEST_DSET_NAME); + goto error; + } + + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + + if ((dset_id = H5Dopen2(group_id, DATASET_CREATE_SCALAR_DATASPACE_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_CREATE_SCALAR_DATASPACE_TEST_DSET_NAME); + goto error; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + H5Tclose(dset_dtype); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that creating a dataset with a dataspace + * which contains a 0-sized dimension is not problematic. + */ +static int +test_create_zero_dim_dset(void) +{ + hsize_t dims[ZERO_DIM_DSET_TEST_SPACE_RANK] = {0}; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + int data[1]; + + TESTING("creation of 0-sized dataset"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ZERO_DIM_DSET_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", ZERO_DIM_DSET_TEST_GROUP_NAME); + goto error; + } + + if ((fspace_id = H5Screate_simple(1, dims, NULL)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, ZERO_DIM_DSET_TEST_DSET_NAME, H5T_NATIVE_INT, fspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to create 0-sized dataset\n"); + goto error; + } + + if (H5Sselect_none(fspace_id) < 0) { + H5_FAILED(); + HDprintf(" failed to set none selection in dataset's file dataspace\n"); + goto error; + } + + /* Attempt to write 0 elements to dataset */ + if (H5Dwrite(dset_id, H5T_NATIVE_INT, fspace_id, fspace_id, H5P_DEFAULT, data) < 0) { + H5_FAILED(); + HDprintf(" failed to write 0 elements to 0-sized dataset\n"); + goto error; + } + + /* Attempt to read 0 elements from dataset */ + if (H5Dread(dset_id, H5T_NATIVE_INT, fspace_id, fspace_id, H5P_DEFAULT, data) < 0) { + H5_FAILED(); + HDprintf(" failed to read 0 elements from 0-sized dataset\n"); + goto error; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a dataset can be created with + * a variety of different dataspace shapes. + */ +static int +test_create_dataset_random_shapes(void) +{ + size_t i; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID, space_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + + TESTING("dataset creation with random dimension sizes"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_SHAPE_TEST_SUBGROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group\n"); + goto error; + } + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + for (i = 0; i < DATASET_SHAPE_TEST_NUM_ITERATIONS; i++) { + char name[100]; + int ndims = rand() % DATASET_SHAPE_TEST_MAX_DIMS + 1; + + if ((space_id = generate_random_dataspace(ndims, NULL, NULL, FALSE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataspace\n"); + goto error; + } + + HDsprintf(name, "%s%zu", DATASET_SHAPE_TEST_DSET_BASE_NAME, i + 1); + + if ((dset_id = H5Dcreate2(group_id, name, dset_dtype, space_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset\n"); + goto error; + } + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + } + + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Tclose(dset_dtype); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a dataset can be created using + * each of the predefined integer and floating-point + * datatypes. + */ +static int +test_create_dataset_predefined_types(void) +{ + size_t i; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t predefined_type_test_table[] = {H5T_STD_U8LE, H5T_STD_U8BE, H5T_STD_I8LE, H5T_STD_I8BE, + H5T_STD_U16LE, H5T_STD_U16BE, H5T_STD_I16LE, H5T_STD_I16BE, + H5T_STD_U32LE, H5T_STD_U32BE, H5T_STD_I32LE, H5T_STD_I32BE, + H5T_STD_U64LE, H5T_STD_U64BE, H5T_STD_I64LE, H5T_STD_I64BE, + H5T_IEEE_F32LE, H5T_IEEE_F32BE, H5T_IEEE_F64LE, H5T_IEEE_F64BE}; + + TESTING("dataset creation with predefined datatypes"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_PREDEFINED_TYPE_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create sub-container group '%s'\n", + DATASET_PREDEFINED_TYPE_TEST_SUBGROUP_NAME); + goto error; + } + + for (i = 0; i < ARRAY_LENGTH(predefined_type_test_table); i++) { + char name[100]; + + if ((fspace_id = + generate_random_dataspace(DATASET_PREDEFINED_TYPE_TEST_SPACE_RANK, NULL, NULL, FALSE)) < 0) + TEST_ERROR; + + HDsprintf(name, "%s%zu", DATASET_PREDEFINED_TYPE_TEST_BASE_NAME, i); + + if ((dset_id = H5Dcreate2(group_id, name, predefined_type_test_table[i], fspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", name); + goto error; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + + if ((dset_id = H5Dopen2(group_id, name, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to open dataset '%s'\n", name); + goto error; + } + + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + } + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a dataset can be created using + * string datatypes. + */ +static int +test_create_dataset_string_types(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id_fixed = H5I_INVALID_HID, dset_id_variable = H5I_INVALID_HID; + hid_t type_id_fixed = H5I_INVALID_HID, type_id_variable = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + + TESTING_MULTIPART("dataset creation with string types"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_STRING_TYPE_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", DATASET_STRING_TYPE_TEST_SUBGROUP_NAME); + goto error; + } + + if ((type_id_fixed = H5Tcreate(H5T_STRING, DATASET_STRING_TYPE_TEST_STRING_LENGTH)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create fixed-length string type\n"); + goto error; + } + + if ((type_id_variable = H5Tcreate(H5T_STRING, H5T_VARIABLE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create variable-length string type\n"); + goto error; + } + + if ((fspace_id = generate_random_dataspace(DATASET_STRING_TYPE_TEST_SPACE_RANK, NULL, NULL, FALSE)) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Dcreate_fixed_string_type) + { + TESTING_2("creation of fixed-size string dataset"); + + if ((dset_id_fixed = H5Dcreate2(group_id, DATASET_STRING_TYPE_TEST_DSET_NAME1, type_id_fixed, + fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create fixed-length string dataset '%s'\n", + DATASET_STRING_TYPE_TEST_DSET_NAME1); + PART_ERROR(H5Dcreate_fixed_string_type); + } + + if (dset_id_fixed >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id_fixed); + } + H5E_END_TRY; + dset_id_fixed = H5I_INVALID_HID; + } + + if ((dset_id_fixed = H5Dopen2(group_id, DATASET_STRING_TYPE_TEST_DSET_NAME1, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to open dataset '%s'\n", DATASET_STRING_TYPE_TEST_DSET_NAME1); + PART_ERROR(H5Dcreate_fixed_string_type); + } + + PASSED(); + } + PART_END(H5Dcreate_fixed_string_type); + + PART_BEGIN(H5Dcreate_variable_string_type) + { + TESTING_2("creation of variable-length string dataset"); + + if ((dset_id_variable = + H5Dcreate2(group_id, DATASET_STRING_TYPE_TEST_DSET_NAME2, type_id_variable, fspace_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create variable-length string dataset '%s'\n", + DATASET_STRING_TYPE_TEST_DSET_NAME2); + PART_ERROR(H5Dcreate_variable_string_type); + } + + if (dset_id_variable >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id_variable); + } + H5E_END_TRY; + dset_id_variable = H5I_INVALID_HID; + } + + if ((dset_id_variable = H5Dopen2(group_id, DATASET_STRING_TYPE_TEST_DSET_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" failed to open dataset '%s'\n", DATASET_STRING_TYPE_TEST_DSET_NAME2); + PART_ERROR(H5Dcreate_variable_string_type); + } + + PASSED(); + } + PART_END(H5Dcreate_variable_string_type); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Tclose(type_id_fixed) < 0) + TEST_ERROR; + if (H5Tclose(type_id_variable) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id_fixed) < 0) + TEST_ERROR; + if (H5Dclose(dset_id_variable) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Tclose(type_id_fixed); + H5Tclose(type_id_variable); + H5Sclose(fspace_id); + H5Dclose(dset_id_fixed); + H5Dclose(dset_id_variable); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a dataset can be created using + * a variety of compound datatypes. + */ +static int +test_create_dataset_compound_types(void) +{ + size_t i, j; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t compound_type = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t type_pool[DATASET_COMPOUND_TYPE_TEST_MAX_SUBTYPES]; + int num_passes; + + TESTING("dataset creation with compound datatypes"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + /* + * Make sure to pre-initialize all the compound field IDs + * so we don't try to close an uninitialized ID value; + * memory checkers will likely complain. + */ + for (j = 0; j < DATASET_COMPOUND_TYPE_TEST_MAX_SUBTYPES; j++) + type_pool[j] = H5I_INVALID_HID; + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_COMPOUND_TYPE_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", DATASET_COMPOUND_TYPE_TEST_SUBGROUP_NAME); + goto error; + } + + if ((fspace_id = generate_random_dataspace(DATASET_COMPOUND_TYPE_TEST_DSET_RANK, NULL, NULL, FALSE)) < 0) + TEST_ERROR; + + num_passes = (rand() % DATASET_COMPOUND_TYPE_TEST_MAX_PASSES) + 1; + + for (i = 0; i < (size_t)num_passes; i++) { + size_t num_subtypes; + size_t compound_size = 0; + size_t next_offset = 0; + char dset_name[256]; + + /* + * Also pre-initialize all of the compound field IDs at the + * beginning of each loop so that we don't try to close an + * invalid ID. + */ + for (j = 0; j < DATASET_COMPOUND_TYPE_TEST_MAX_SUBTYPES; j++) + type_pool[j] = H5I_INVALID_HID; + + num_subtypes = (size_t)(rand() % DATASET_COMPOUND_TYPE_TEST_MAX_SUBTYPES) + 1; + + if ((compound_type = H5Tcreate(H5T_COMPOUND, 1)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create compound datatype\n"); + goto error; + } + + /* Start adding subtypes to the compound type */ + for (j = 0; j < num_subtypes; j++) { + size_t member_size; + char member_name[256]; + + HDsnprintf(member_name, 256, "member%zu", j); + + if ((type_pool[j] = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create compound datatype member %zu\n", j); + goto error; + } + + if (!(member_size = H5Tget_size(type_pool[j]))) { + H5_FAILED(); + HDprintf(" couldn't get compound member %zu size\n", j); + goto error; + } + + compound_size += member_size; + + if (H5Tset_size(compound_type, compound_size) < 0) + TEST_ERROR; + + if (H5Tinsert(compound_type, member_name, next_offset, type_pool[j]) < 0) + TEST_ERROR; + + next_offset += member_size; + } + + if (H5Tpack(compound_type) < 0) + TEST_ERROR; + + HDsnprintf(dset_name, sizeof(dset_name), "%s%zu", DATASET_COMPOUND_TYPE_TEST_DSET_NAME, i); + + if ((dset_id = H5Dcreate2(group_id, dset_name, compound_type, fspace_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", dset_name); + goto error; + } + + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + + if ((dset_id = H5Dopen2(group_id, dset_name, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to open dataset '%s'\n", dset_name); + goto error; + } + + for (j = 0; j < num_subtypes; j++) + if (type_pool[j] >= 0 && H5Tclose(type_pool[j]) < 0) + TEST_ERROR; + if (H5Tclose(compound_type) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + for (i = 0; i < DATASET_COMPOUND_TYPE_TEST_MAX_SUBTYPES; i++) + H5Tclose(type_pool[i]); + H5Tclose(compound_type); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a dataset can be created with + * enum datatypes. + */ +static int +test_create_dataset_enum_types(void) +{ + size_t i; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id_native = H5I_INVALID_HID, dset_id_non_native = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t enum_native = H5I_INVALID_HID, enum_non_native = H5I_INVALID_HID; + const char *enum_type_test_table[] = {"RED", "GREEN", "BLUE", "BLACK", "WHITE", + "PURPLE", "ORANGE", "YELLOW", "BROWN"}; + + TESTING("dataset creation with enum types"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_ENUM_TYPE_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", DATASET_ENUM_TYPE_TEST_SUBGROUP_NAME); + goto error; + } + + if ((enum_native = H5Tcreate(H5T_ENUM, sizeof(int))) < 0) { + H5_FAILED(); + HDprintf(" couldn't create native enum type\n"); + goto error; + } + + for (i = 0; i < ARRAY_LENGTH(enum_type_test_table); i++) + if (H5Tenum_insert(enum_native, enum_type_test_table[i], &i) < 0) + TEST_ERROR; + + if ((enum_non_native = H5Tenum_create(H5T_STD_U32LE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create non-native enum type\n"); + goto error; + } + + for (i = 0; i < DATASET_ENUM_TYPE_TEST_NUM_MEMBERS; i++) { + char val_name[15]; + + HDsprintf(val_name, "%s%zu", DATASET_ENUM_TYPE_TEST_VAL_BASE_NAME, i); + + if (H5Tenum_insert(enum_non_native, val_name, &i) < 0) + TEST_ERROR; + } + + if ((fspace_id = generate_random_dataspace(DATASET_ENUM_TYPE_TEST_SPACE_RANK, NULL, NULL, FALSE)) < 0) + TEST_ERROR; + + if ((dset_id_native = H5Dcreate2(group_id, DATASET_ENUM_TYPE_TEST_DSET_NAME1, enum_native, fspace_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create native enum dataset '%s'\n", DATASET_ENUM_TYPE_TEST_DSET_NAME1); + goto error; + } + + if ((dset_id_non_native = H5Dcreate2(group_id, DATASET_ENUM_TYPE_TEST_DSET_NAME2, enum_non_native, + fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create non-native enum dataset '%s'\n", DATASET_ENUM_TYPE_TEST_DSET_NAME2); + goto error; + } + + if (H5Dclose(dset_id_native) < 0) + TEST_ERROR; + if (H5Dclose(dset_id_non_native) < 0) + TEST_ERROR; + + if ((dset_id_native = H5Dopen2(group_id, DATASET_ENUM_TYPE_TEST_DSET_NAME1, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to open dataset '%s'\n", DATASET_ENUM_TYPE_TEST_DSET_NAME1); + goto error; + } + + if ((dset_id_non_native = H5Dopen2(group_id, DATASET_ENUM_TYPE_TEST_DSET_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to open dataset '%s'\n", DATASET_ENUM_TYPE_TEST_DSET_NAME2); + goto error; + } + + if (H5Tclose(enum_native) < 0) + TEST_ERROR; + if (H5Tclose(enum_non_native) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id_native) < 0) + TEST_ERROR; + if (H5Dclose(dset_id_non_native) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Tclose(enum_native); + H5Tclose(enum_non_native); + H5Sclose(fspace_id); + H5Dclose(dset_id_native); + H5Dclose(dset_id_non_native); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a dataset can be created using + * array datatypes. + */ +static int +test_create_dataset_array_types(void) +{ + hsize_t array_dims1[DATASET_ARRAY_TYPE_TEST_RANK1]; + hsize_t array_dims2[DATASET_ARRAY_TYPE_TEST_RANK2]; + hsize_t array_dims3[DATASET_ARRAY_TYPE_TEST_RANK3]; + size_t i; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id1 = H5I_INVALID_HID, dset_id2 = H5I_INVALID_HID, dset_id3 = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t array_type_id1 = H5I_INVALID_HID, array_type_id2 = H5I_INVALID_HID, + array_type_id3 = H5I_INVALID_HID; + hid_t array_base_type_id1 = H5I_INVALID_HID, array_base_type_id2 = H5I_INVALID_HID, + array_base_type_id3 = H5I_INVALID_HID; + hid_t nested_type_id = H5I_INVALID_HID; + + TESTING("dataset creation with array types"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_ARRAY_TYPE_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", DATASET_ARRAY_TYPE_TEST_SUBGROUP_NAME); + goto error; + } + + /* Test creation of array with some different types */ + for (i = 0; i < DATASET_ARRAY_TYPE_TEST_RANK1; i++) + array_dims1[i] = (hsize_t)(rand() % MAX_DIM_SIZE + 1); + + if ((array_base_type_id1 = generate_random_datatype(H5T_ARRAY, FALSE)) < 0) + TEST_ERROR; + + if ((array_type_id1 = H5Tarray_create2(array_base_type_id1, DATASET_ARRAY_TYPE_TEST_RANK1, array_dims1)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't create first array type\n"); + goto error; + } + + for (i = 0; i < DATASET_ARRAY_TYPE_TEST_RANK2; i++) + array_dims2[i] = (hsize_t)(rand() % MAX_DIM_SIZE + 1); + + if ((array_base_type_id2 = generate_random_datatype(H5T_ARRAY, FALSE)) < 0) + TEST_ERROR; + + if ((array_type_id2 = H5Tarray_create2(array_base_type_id2, DATASET_ARRAY_TYPE_TEST_RANK2, array_dims2)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't create second array type\n"); + goto error; + } + + /* Test nested arrays */ + for (i = 0; i < DATASET_ARRAY_TYPE_TEST_RANK3; i++) + array_dims3[i] = (hsize_t)(rand() % MAX_DIM_SIZE + 1); + + if ((array_base_type_id3 = generate_random_datatype(H5T_ARRAY, FALSE)) < 0) + TEST_ERROR; + + if ((nested_type_id = H5Tarray_create2(array_base_type_id3, DATASET_ARRAY_TYPE_TEST_RANK3, array_dims3)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't create nested array base type\n"); + goto error; + } + + if ((array_type_id3 = H5Tarray_create2(nested_type_id, DATASET_ARRAY_TYPE_TEST_RANK3, array_dims3)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create nested array type\n"); + goto error; + } + + if ((fspace_id = generate_random_dataspace(DATASET_ARRAY_TYPE_TEST_SPACE_RANK, NULL, NULL, FALSE)) < 0) + TEST_ERROR; + + if ((dset_id1 = H5Dcreate2(group_id, DATASET_ARRAY_TYPE_TEST_DSET_NAME1, array_type_id1, fspace_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create array type dataset '%s'\n", DATASET_ARRAY_TYPE_TEST_DSET_NAME1); + goto error; + } + + if ((dset_id2 = H5Dcreate2(group_id, DATASET_ARRAY_TYPE_TEST_DSET_NAME2, array_type_id2, fspace_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create array type dataset '%s'\n", DATASET_ARRAY_TYPE_TEST_DSET_NAME2); + goto error; + } + + if ((dset_id3 = H5Dcreate2(group_id, DATASET_ARRAY_TYPE_TEST_DSET_NAME3, array_type_id3, fspace_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create nested array type dataset '%s'\n", DATASET_ARRAY_TYPE_TEST_DSET_NAME3); + goto error; + } + + if (H5Dclose(dset_id1) < 0) + TEST_ERROR; + if (H5Dclose(dset_id2) < 0) + TEST_ERROR; + if (H5Dclose(dset_id3) < 0) + TEST_ERROR; + + if ((dset_id1 = H5Dopen2(group_id, DATASET_ARRAY_TYPE_TEST_DSET_NAME1, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to open dataset '%s'\n", DATASET_ARRAY_TYPE_TEST_DSET_NAME1); + goto error; + } + + if ((dset_id2 = H5Dopen2(group_id, DATASET_ARRAY_TYPE_TEST_DSET_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to open dataset '%s'\n", DATASET_ARRAY_TYPE_TEST_DSET_NAME2); + goto error; + } + + if ((dset_id3 = H5Dopen2(group_id, DATASET_ARRAY_TYPE_TEST_DSET_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to open dataset '%s'\n", DATASET_ARRAY_TYPE_TEST_DSET_NAME3); + goto error; + } + + if (H5Tclose(array_base_type_id1) < 0) + TEST_ERROR; + if (H5Tclose(array_base_type_id2) < 0) + TEST_ERROR; + if (H5Tclose(array_base_type_id3) < 0) + TEST_ERROR; + if (H5Tclose(nested_type_id) < 0) + TEST_ERROR; + if (H5Tclose(array_type_id1) < 0) + TEST_ERROR; + if (H5Tclose(array_type_id2) < 0) + TEST_ERROR; + if (H5Tclose(array_type_id3) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id1) < 0) + TEST_ERROR; + if (H5Dclose(dset_id2) < 0) + TEST_ERROR; + if (H5Dclose(dset_id3) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Tclose(array_base_type_id1); + H5Tclose(array_base_type_id2); + H5Tclose(array_base_type_id3); + H5Tclose(nested_type_id); + H5Tclose(array_type_id1); + H5Tclose(array_type_id2); + H5Tclose(array_type_id3); + H5Sclose(fspace_id); + H5Dclose(dset_id1); + H5Dclose(dset_id2); + H5Dclose(dset_id3); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check the functionality of the different + * dataset creation properties. + */ +static int +test_create_dataset_creation_properties(void) +{ + hsize_t dims[DATASET_CREATION_PROPERTIES_TEST_SHAPE_RANK]; + hsize_t chunk_dims[DATASET_CREATION_PROPERTIES_TEST_SHAPE_RANK]; + size_t i; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID, dcpl_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID, compact_dtype = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID, compact_fspace_id = H5I_INVALID_HID; + + TESTING_MULTIPART("dataset creation properties"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILTERS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER) || !(vol_cap_flags_g & H5VL_CAP_FLAG_TRACK_TIMES) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FILTERS)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, creation order, track time, or filter " + "pipeline aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_CREATION_PROPERTIES_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", DATASET_CREATION_PROPERTIES_TEST_GROUP_NAME); + goto error; + } + + if ((fspace_id = + generate_random_dataspace(DATASET_CREATION_PROPERTIES_TEST_SHAPE_RANK, NULL, dims, FALSE)) < 0) + TEST_ERROR; + if ((compact_fspace_id = + generate_random_dataspace(DATASET_CREATION_PROPERTIES_TEST_SHAPE_RANK, NULL, NULL, TRUE)) < 0) + TEST_ERROR; + + /* Set chunk dims to be size of dataset - for filters test */ + for (i = 0; i < DATASET_CREATION_PROPERTIES_TEST_SHAPE_RANK; i++) + chunk_dims[i] = dims[i]; + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + if ((compact_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + /* Test the alloc time property */ + PART_BEGIN(DCPL_alloc_time_test) + { + H5D_alloc_time_t alloc_times[] = {H5D_ALLOC_TIME_DEFAULT, H5D_ALLOC_TIME_EARLY, + H5D_ALLOC_TIME_INCR, H5D_ALLOC_TIME_LATE}; + + TESTING_2("dataset storage space allocation time property"); + + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create DCPL\n"); + PART_ERROR(DCPL_alloc_time_test); + } + + for (i = 0; i < ARRAY_LENGTH(alloc_times); i++) { + char name[100]; + + if (H5Pset_alloc_time(dcpl_id, alloc_times[i]) < 0) { + H5_FAILED(); + HDprintf(" couldn't set alloc time property value\n"); + PART_ERROR(DCPL_alloc_time_test); + } + + HDsprintf(name, "%s%zu", DATASET_CREATION_PROPERTIES_TEST_ALLOC_TIMES_BASE_NAME, i); + + if ((dset_id = H5Dcreate2(group_id, name, dset_dtype, fspace_id, H5P_DEFAULT, dcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", name); + PART_ERROR(DCPL_alloc_time_test); + } + + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + if ((dset_id = H5Dopen2(group_id, name, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", name); + PART_ERROR(DCPL_alloc_time_test); + } + + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + } + + if (dcpl_id >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(dcpl_id); + } + H5E_END_TRY; + dcpl_id = H5I_INVALID_HID; + } + + PASSED(); + } + PART_END(DCPL_alloc_time_test); + + /* Test the attribute creation order property */ + PART_BEGIN(DCPL_attr_crt_order_test) + { + unsigned creation_orders[] = {H5P_CRT_ORDER_TRACKED, + H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED}; + + TESTING_2("attribute creation order property for DCPL"); + + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create DCPL\n"); + PART_ERROR(DCPL_attr_crt_order_test); + } + + for (i = 0; i < ARRAY_LENGTH(creation_orders); i++) { + char name[100]; + + if (H5Pset_attr_creation_order(dcpl_id, creation_orders[i]) < 0) { + H5_FAILED(); + HDprintf(" couldn't set attribute creation order property\n"); + PART_ERROR(DCPL_attr_crt_order_test); + } + + HDsprintf(name, "%s%zu", DATASET_CREATION_PROPERTIES_TEST_CRT_ORDER_BASE_NAME, i); + + if ((dset_id = H5Dcreate2(group_id, name, dset_dtype, fspace_id, H5P_DEFAULT, dcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", name); + PART_ERROR(DCPL_attr_crt_order_test); + } + + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + if ((dset_id = H5Dopen2(group_id, name, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", name); + PART_ERROR(DCPL_attr_crt_order_test); + } + + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + } + + if (dcpl_id >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(dcpl_id); + } + H5E_END_TRY; + dcpl_id = H5I_INVALID_HID; + } + + PASSED(); + } + PART_END(DCPL_attr_crt_order_test); + + /* Test the attribute phase change property */ + PART_BEGIN(DCPL_attr_phase_change_test) + { + TESTING_2("attribute phase change property for DCPL"); + + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create DCPL\n"); + PART_ERROR(DCPL_attr_phase_change_test); + } + + if (H5Pset_attr_phase_change(dcpl_id, DATASET_CREATION_PROPERTIES_TEST_MAX_COMPACT, + DATASET_CREATION_PROPERTIES_TEST_MIN_DENSE) < 0) { + H5_FAILED(); + HDprintf(" couldn't set attribute phase change property\n"); + PART_ERROR(DCPL_attr_phase_change_test); + } + + if ((dset_id = H5Dcreate2(group_id, DATASET_CREATION_PROPERTIES_TEST_PHASE_CHANGE_DSET_NAME, + dset_dtype, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", + DATASET_CREATION_PROPERTIES_TEST_PHASE_CHANGE_DSET_NAME); + PART_ERROR(DCPL_attr_phase_change_test); + } + + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_CREATION_PROPERTIES_TEST_PHASE_CHANGE_DSET_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", + DATASET_CREATION_PROPERTIES_TEST_PHASE_CHANGE_DSET_NAME); + PART_ERROR(DCPL_attr_phase_change_test); + } + + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + if (dcpl_id >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(dcpl_id); + } + H5E_END_TRY; + dcpl_id = H5I_INVALID_HID; + } + + PASSED(); + } + PART_END(DCPL_attr_phase_change_test); + + /* Test the fill time property */ + PART_BEGIN(DCPL_fill_time_property_test) + { + H5D_fill_time_t fill_times[] = {H5D_FILL_TIME_IFSET, H5D_FILL_TIME_ALLOC, H5D_FILL_TIME_NEVER}; + + TESTING_2("dataset fill time property"); + + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create DCPL\n"); + PART_ERROR(DCPL_fill_time_property_test); + } + + for (i = 0; i < ARRAY_LENGTH(fill_times); i++) { + char name[100]; + + if (H5Pset_fill_time(dcpl_id, fill_times[i]) < 0) { + H5_FAILED(); + HDprintf(" couldn't set dataset fill time property\n"); + PART_ERROR(DCPL_fill_time_property_test); + } + + HDsprintf(name, "%s%zu", DATASET_CREATION_PROPERTIES_TEST_FILL_TIMES_BASE_NAME, i); + + if ((dset_id = H5Dcreate2(group_id, name, dset_dtype, fspace_id, H5P_DEFAULT, dcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", name); + PART_ERROR(DCPL_fill_time_property_test); + } + + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + if ((dset_id = H5Dopen2(group_id, name, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", name); + PART_ERROR(DCPL_fill_time_property_test); + } + + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + } + + if (dcpl_id >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(dcpl_id); + } + H5E_END_TRY; + dcpl_id = H5I_INVALID_HID; + } + + PASSED(); + } + PART_END(DCPL_fill_time_property_test); + + /* TODO: Test the fill value property */ + + /* Test filters */ + PART_BEGIN(DCPL_filters_test) + { + TESTING_2("dataset filters"); + + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create DCPL\n"); + PART_ERROR(DCPL_filters_test); + } + + if (H5Pset_chunk(dcpl_id, DATASET_CREATION_PROPERTIES_TEST_SHAPE_RANK, chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" couldn't set chunking on DCPL\n"); + PART_ERROR(DCPL_filters_test); + } + + /* Set all of the available filters on the DCPL */ + if (H5Pset_deflate(dcpl_id, 7) < 0) { + H5_FAILED(); + HDprintf(" couldn't set deflate filter on DCPL\n"); + PART_ERROR(DCPL_filters_test); + } + if (H5Pset_shuffle(dcpl_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't set shuffle filter on DCPL\n"); + PART_ERROR(DCPL_filters_test); + } + if (H5Pset_fletcher32(dcpl_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't set fletcher32 filter on DCPL\n"); + PART_ERROR(DCPL_filters_test); + } + if (H5Pset_nbit(dcpl_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't set nbit filter on DCPL\n"); + PART_ERROR(DCPL_filters_test); + } + if (H5Pset_scaleoffset(dcpl_id, H5Z_SO_FLOAT_ESCALE, 2) < 0) { + H5_FAILED(); + HDprintf(" couldn't set scaleoffset filter on DCPL\n"); + PART_ERROR(DCPL_filters_test); + } + + /* + * Use a simple datatype, as not all filters support all datatypes. + */ + if ((dset_id = H5Dcreate2(group_id, DATASET_CREATION_PROPERTIES_TEST_FILTERS_DSET_NAME, + H5T_NATIVE_INT, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", + DATASET_CREATION_PROPERTIES_TEST_FILTERS_DSET_NAME); + PART_ERROR(DCPL_filters_test); + } + + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_CREATION_PROPERTIES_TEST_FILTERS_DSET_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", + DATASET_CREATION_PROPERTIES_TEST_FILTERS_DSET_NAME); + PART_ERROR(DCPL_filters_test); + } + + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + if (dcpl_id >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(dcpl_id); + } + H5E_END_TRY; + dcpl_id = H5I_INVALID_HID; + } + + PASSED(); + } + PART_END(DCPL_filters_test); + + /* Test the dataset storage layout property */ + PART_BEGIN(DCPL_storage_layout_test) + { + H5D_layout_t layouts[] = {H5D_COMPACT, H5D_CONTIGUOUS, H5D_CHUNKED}; + + TESTING_2("dataset storage layouts"); + + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create DCPL\n"); + PART_ERROR(DCPL_storage_layout_test); + } + + for (i = 0; i < ARRAY_LENGTH(layouts); i++) { + char name[100]; + + if (H5Pset_layout(dcpl_id, layouts[i]) < 0) { + H5_FAILED(); + HDprintf(" couldn't set storage layout property\n"); + PART_ERROR(DCPL_storage_layout_test); + } + + if (H5D_CHUNKED == layouts[i]) { + hsize_t local_chunk_dims[DATASET_CREATION_PROPERTIES_TEST_CHUNK_DIM_RANK]; + size_t j; + + for (j = 0; j < DATASET_CREATION_PROPERTIES_TEST_CHUNK_DIM_RANK; j++) + local_chunk_dims[j] = (hsize_t)(rand() % (int)dims[j] + 1); + + if (H5Pset_chunk(dcpl_id, DATASET_CREATION_PROPERTIES_TEST_CHUNK_DIM_RANK, + local_chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" couldn't set chunk dimensionality\n"); + PART_ERROR(DCPL_storage_layout_test); + } + } + + HDsprintf(name, "%s%zu", DATASET_CREATION_PROPERTIES_TEST_LAYOUTS_BASE_NAME, i); + + if ((dset_id = + H5Dcreate2(group_id, name, (H5D_COMPACT == layouts[i]) ? compact_dtype : dset_dtype, + (H5D_COMPACT == layouts[i]) ? compact_fspace_id : fspace_id, H5P_DEFAULT, + dcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", name); + PART_ERROR(DCPL_storage_layout_test); + } + + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + if ((dset_id = H5Dopen2(group_id, name, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", name); + PART_ERROR(DCPL_storage_layout_test); + } + + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + } + + if (dcpl_id >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(dcpl_id); + } + H5E_END_TRY; + dcpl_id = H5I_INVALID_HID; + } + + PASSED(); + } + PART_END(DCPL_storage_layout_test); + + /* Test the "track object times" property */ + PART_BEGIN(DCPL_track_obj_times_test) + { + TESTING_2("object time tracking property for DCPL"); + + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create DCPL\n"); + PART_ERROR(DCPL_track_obj_times_test); + } + + if (H5Pset_obj_track_times(dcpl_id, true) < 0) { + H5_FAILED(); + HDprintf(" couldn't set object time tracking property\n"); + PART_ERROR(DCPL_track_obj_times_test); + } + + if ((dset_id = H5Dcreate2(group_id, DATASET_CREATION_PROPERTIES_TEST_TRACK_TIMES_YES_DSET_NAME, + dset_dtype, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", + DATASET_CREATION_PROPERTIES_TEST_TRACK_TIMES_YES_DSET_NAME); + PART_ERROR(DCPL_track_obj_times_test); + } + + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_CREATION_PROPERTIES_TEST_TRACK_TIMES_YES_DSET_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", + DATASET_CREATION_PROPERTIES_TEST_TRACK_TIMES_YES_DSET_NAME); + PART_ERROR(DCPL_track_obj_times_test); + } + + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + if (H5Pset_obj_track_times(dcpl_id, false) < 0) { + H5_FAILED(); + HDprintf(" couldn't set object time tracking property\n"); + PART_ERROR(DCPL_track_obj_times_test); + } + + if ((dset_id = H5Dcreate2(group_id, DATASET_CREATION_PROPERTIES_TEST_TRACK_TIMES_NO_DSET_NAME, + dset_dtype, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", + DATASET_CREATION_PROPERTIES_TEST_TRACK_TIMES_NO_DSET_NAME); + PART_ERROR(DCPL_track_obj_times_test); + } + + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_CREATION_PROPERTIES_TEST_TRACK_TIMES_NO_DSET_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", + DATASET_CREATION_PROPERTIES_TEST_TRACK_TIMES_NO_DSET_NAME); + PART_ERROR(DCPL_track_obj_times_test); + } + + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + if (dcpl_id >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(dcpl_id); + } + H5E_END_TRY; + dcpl_id = H5I_INVALID_HID; + } + + PASSED(); + } + PART_END(DCPL_track_obj_times_test); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(compact_fspace_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Tclose(compact_dtype) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(compact_fspace_id); + H5Sclose(fspace_id); + H5Tclose(compact_dtype); + H5Tclose(dset_dtype); + H5Dclose(dset_id); + H5Pclose(dcpl_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to create many small datasets (100,000) + */ +static int +test_create_many_dataset(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dataspace_id = H5I_INVALID_HID; + char dset_name[DSET_NAME_BUF_SIZE]; + unsigned char data; + unsigned int i; + + TESTING("creating many datasets"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_MANY_CREATE_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", DATASET_MANY_CREATE_GROUP_NAME); + goto error; + } + + if ((dataspace_id = H5Screate(H5S_SCALAR)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create scalar data space\n"); + goto error; + } + + HDprintf("\n"); + for (i = 0; i < DATASET_NUMB; i++) { + HDprintf("\r %u/%u", i + 1, DATASET_NUMB); + sprintf(dset_name, "dset_%02u", i); + data = i % 256; + + if ((dset_id = H5Dcreate2(group_id, dset_name, H5T_NATIVE_UCHAR, dataspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", dset_name); + goto error; + } + + if (H5Dwrite(dset_id, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", dset_name); + goto error; + } + + if (H5Dclose(dset_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close dataset '%s'\n", dset_name); + goto error; + } + } + + if (H5Sclose(dataspace_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + H5Sclose(dataspace_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that re-opening a dataset with + * H5Dopen succeeds. + */ +static int +test_open_dataset(void) +{ + TESTING("H5Dopen"); + + SKIPPED(); + + return 0; +} + +/* + * A test to check that H5Dopen fails when it is + * passed invalid parameters. + */ +static int +test_open_dataset_invalid_params(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + + TESTING_MULTIPART("H5Dopen with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_OPEN_INVALID_PARAMS_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", DATASET_OPEN_INVALID_PARAMS_GROUP_NAME); + goto error; + } + + if ((fspace_id = generate_random_dataspace(DATASET_OPEN_INVALID_PARAMS_SPACE_RANK, NULL, NULL, FALSE)) < + 0) + TEST_ERROR; + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_OPEN_INVALID_PARAMS_DSET_NAME, dset_dtype, fspace_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_OPEN_INVALID_PARAMS_DSET_NAME); + goto error; + } + + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Dopen_invalid_loc_id) + { + TESTING_2("H5Dopen with an invalid loc_id"); + + H5E_BEGIN_TRY + { + dset_id = H5Dopen2(H5I_INVALID_HID, DATASET_OPEN_INVALID_PARAMS_DSET_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + + if (dset_id >= 0) { + H5_FAILED(); + HDprintf(" opened dataset using H5Dopen2 with an invalid loc_id!\n"); + H5Dclose(dset_id); + PART_ERROR(H5Dopen_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Dopen_invalid_loc_id); + + PART_BEGIN(H5Dopen_invalid_dataset_name) + { + TESTING_2("H5Dopen with an invalid dataset name"); + + H5E_BEGIN_TRY + { + dset_id = H5Dopen2(group_id, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (dset_id >= 0) { + H5_FAILED(); + HDprintf(" opened dataset using H5Dopen2 with a NULL dataset name!\n"); + H5Dclose(dset_id); + PART_ERROR(H5Dopen_invalid_dataset_name); + } + + H5E_BEGIN_TRY + { + dset_id = H5Dopen2(group_id, "", H5P_DEFAULT); + } + H5E_END_TRY; + + if (dset_id >= 0) { + H5_FAILED(); + HDprintf(" opened dataset using H5Dopen2 with an invalid dataset name of ''!\n"); + H5Dclose(dset_id); + PART_ERROR(H5Dopen_invalid_dataset_name); + } + + PASSED(); + } + PART_END(H5Dopen_invalid_dataset_name); + + PART_BEGIN(H5Dopen_invalid_dapl) + { + TESTING_2("H5Dopen with an invalid DAPL"); + + H5E_BEGIN_TRY + { + dset_id = H5Dopen2(group_id, DATASET_OPEN_INVALID_PARAMS_DSET_NAME, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (dset_id >= 0) { + H5_FAILED(); + HDprintf(" opened dataset using H5Dopen2 with an invalid DAPL!\n"); + H5Dclose(dset_id); + PART_ERROR(H5Dopen_invalid_dapl); + } + + PASSED(); + } + PART_END(H5Dopen_invalid_dapl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + H5Tclose(dset_dtype); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that H5Dclose fails when it is + * passed an invalid dataset ID. + */ +static int +test_close_dataset_invalid_params(void) +{ + herr_t err_ret = -1; + hid_t file_id = H5I_INVALID_HID; + + TESTING("H5Dclose with an invalid dataset ID"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file or dataset aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + H5E_BEGIN_TRY + { + err_ret = H5Dclose(H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Dclose succeeded with an invalid dataset ID!\n"); + goto error; + } + + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that valid copies of a dataset's dataspace + * and datatype can be retrieved with H5Dget_space and + * H5Dget_type, respectively. + */ +static int +test_get_dataset_space_and_type(void) +{ + hsize_t dset_dims[DATASET_GET_SPACE_TYPE_TEST_SPACE_RANK]; + size_t i; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t dset_space_id = H5I_INVALID_HID; + hid_t tmp_type_id = H5I_INVALID_HID; + hid_t tmp_space_id = H5I_INVALID_HID; + + TESTING_MULTIPART("retrieval of a dataset's dataspace and datatype"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_GET_SPACE_TYPE_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", DATASET_GET_SPACE_TYPE_TEST_GROUP_NAME); + goto error; + } + + if ((dset_space_id = + generate_random_dataspace(DATASET_GET_SPACE_TYPE_TEST_SPACE_RANK, NULL, dset_dims, FALSE)) < 0) + TEST_ERROR; + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_GET_SPACE_TYPE_TEST_DSET_NAME, dset_dtype, dset_space_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_GET_SPACE_TYPE_TEST_DSET_NAME); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + /* Retrieve the dataset's datatype and dataspace and verify them */ + PART_BEGIN(H5Dget_type) + { + TESTING_2("H5Dget_type"); + + if ((tmp_type_id = H5Dget_type(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve dataset's datatype\n"); + PART_ERROR(H5Dget_type); + } + + { + htri_t types_equal = H5Tequal(tmp_type_id, dset_dtype); + + if (types_equal < 0) { + H5_FAILED(); + HDprintf(" datatype was invalid\n"); + PART_ERROR(H5Dget_type); + } + + if (!types_equal) { + H5_FAILED(); + HDprintf(" dataset's datatype did not match\n"); + PART_ERROR(H5Dget_type); + } + } + + PASSED(); + } + PART_END(H5Dget_type); + + PART_BEGIN(H5Dget_space) + { + TESTING_2("H5Dget_space"); + + if ((tmp_space_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve dataset's dataspace\n"); + PART_ERROR(H5Dget_space); + } + + { + hsize_t space_dims[DATASET_GET_SPACE_TYPE_TEST_SPACE_RANK]; + + if (H5Sget_simple_extent_dims(tmp_space_id, space_dims, NULL) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve dataspace dimensions\n"); + PART_ERROR(H5Dget_space); + } + + for (i = 0; i < DATASET_GET_SPACE_TYPE_TEST_SPACE_RANK; i++) + if (space_dims[i] != dset_dims[i]) { + H5_FAILED(); + HDprintf(" dataset's dataspace dims didn't match\n"); + PART_ERROR(H5Dget_space); + } + } + + PASSED(); + } + PART_END(H5Dget_space); + + /* Now close the dataset and verify that this still works after + * opening an attribute instead of creating it. + */ + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + if (tmp_type_id >= 0) { + H5E_BEGIN_TRY + { + H5Tclose(tmp_type_id); + } + H5E_END_TRY; + tmp_type_id = H5I_INVALID_HID; + } + if (tmp_space_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(tmp_space_id); + } + H5E_END_TRY; + tmp_space_id = H5I_INVALID_HID; + } + + PART_BEGIN(H5Dget_type_reopened) + { + TESTING_2("H5Dget_type after re-opening a dataset"); + + if ((dset_id = H5Dopen2(group_id, DATASET_GET_SPACE_TYPE_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_GET_SPACE_TYPE_TEST_DSET_NAME); + PART_ERROR(H5Dget_type_reopened); + } + + if ((tmp_type_id = H5Dget_type(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve dataset's datatype\n"); + PART_ERROR(H5Dget_type_reopened); + } + + { + htri_t types_equal = H5Tequal(tmp_type_id, dset_dtype); + + if (types_equal < 0) { + H5_FAILED(); + HDprintf(" datatype was invalid\n"); + PART_ERROR(H5Dget_type_reopened); + } + + if (!types_equal) { + H5_FAILED(); + HDprintf(" dataset's datatype did not match\n"); + PART_ERROR(H5Dget_type_reopened); + } + } + + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + PASSED(); + } + PART_END(H5Dget_type_reopened); + + PART_BEGIN(H5Dget_space_reopened) + { + TESTING_2("H5Dget_space after re-opening a dataset"); + + if ((dset_id = H5Dopen2(group_id, DATASET_GET_SPACE_TYPE_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_GET_SPACE_TYPE_TEST_DSET_NAME); + PART_ERROR(H5Dget_space_reopened); + } + + if ((tmp_space_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve dataset's dataspace\n"); + PART_ERROR(H5Dget_space_reopened); + } + + { + hsize_t space_dims[DATASET_GET_SPACE_TYPE_TEST_SPACE_RANK]; + + if (H5Sget_simple_extent_dims(tmp_space_id, space_dims, NULL) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve dataspace dimensions\n"); + PART_ERROR(H5Dget_space_reopened); + } + + for (i = 0; i < DATASET_GET_SPACE_TYPE_TEST_SPACE_RANK; i++) { + if (space_dims[i] != dset_dims[i]) { + H5_FAILED(); + HDprintf(" dataset's dataspace dims didn't match!\n"); + PART_ERROR(H5Dget_space_reopened); + } + } + } + + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + PASSED(); + } + PART_END(H5Dget_space_reopened); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(tmp_space_id) < 0) + TEST_ERROR; + if (H5Sclose(dset_space_id) < 0) + TEST_ERROR; + if (H5Tclose(tmp_type_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(tmp_space_id); + H5Sclose(dset_space_id); + H5Tclose(tmp_type_id); + H5Tclose(dset_dtype); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a dataset's dataspace and datatype + * can't be retrieved when H5Dget_space and H5Dget_type are passed + * invalid parameters, respectively. + */ +static int +test_get_dataset_space_and_type_invalid_params(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t dset_space_id = H5I_INVALID_HID; + hid_t tmp_type_id = H5I_INVALID_HID; + hid_t tmp_space_id = H5I_INVALID_HID; + + TESTING_MULTIPART("H5Dget_type/H5Dget_space with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_GET_SPACE_TYPE_INVALID_PARAMS_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", + DATASET_GET_SPACE_TYPE_INVALID_PARAMS_TEST_GROUP_NAME); + goto error; + } + + if ((dset_space_id = generate_random_dataspace(DATASET_GET_SPACE_TYPE_INVALID_PARAMS_TEST_SPACE_RANK, + NULL, NULL, FALSE)) < 0) + TEST_ERROR; + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_GET_SPACE_TYPE_INVALID_PARAMS_TEST_DSET_NAME, dset_dtype, + dset_space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_GET_SPACE_TYPE_INVALID_PARAMS_TEST_DSET_NAME); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Dget_type_invalid_dset_id) + { + TESTING_2("H5Dget_type with an invalid dset_id"); + + H5E_BEGIN_TRY + { + tmp_type_id = H5Dget_type(H5I_INVALID_HID); + } + H5E_END_TRY; + + if (tmp_type_id >= 0) { + H5_FAILED(); + HDprintf(" retrieved copy of dataset's datatype using an invalid dataset ID!\n"); + PART_ERROR(H5Dget_type_invalid_dset_id); + } + + PASSED(); + } + PART_END(H5Dget_type_invalid_dset_id); + + PART_BEGIN(H5Dget_space_invalid_dset_id) + { + TESTING_2("H5Dget_space with an invalid dset_id"); + + H5E_BEGIN_TRY + { + tmp_space_id = H5Dget_space(H5I_INVALID_HID); + } + H5E_END_TRY; + + if (tmp_space_id >= 0) { + H5_FAILED(); + HDprintf(" retrieved copy of dataset's dataspace using an invalid dataset ID!\n"); + PART_ERROR(H5Dget_space_invalid_dset_id); + } + + PASSED(); + } + PART_END(H5Dget_space_invalid_dset_id); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(dset_space_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(tmp_space_id); + H5Sclose(dset_space_id); + H5Tclose(tmp_type_id); + H5Tclose(dset_dtype); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test for H5Dget_space_status. + */ +static int +test_get_dataset_space_status(void) +{ + TESTING("H5Dget_space_status"); + + SKIPPED(); + + return 0; +} + +/* + * A test to check that a dataset's dataspace allocation + * status can't be retrieved with H5Dget_space_status when + * it is passed invalid parameters. + */ +static int +test_get_dataset_space_status_invalid_params(void) +{ + TESTING("H5Dget_space_status with invalid parameters"); + + SKIPPED(); + + return 0; +} + +/* + * A test to check that a DCPL used for dataset creation + * can be persisted and that a valid copy of that DCPL can + * be retrieved later with a call to H5Dget_create_plist. + * Also tests that a valid copy of a DAPL used for dataset + * access can be retrieved with a call to H5Dget_access_plist. + */ +static int +test_dataset_property_lists(void) +{ + const char *path_prefix = "/test_prefix"; + hsize_t dims[DATASET_PROPERTY_LIST_TEST_SPACE_RANK]; + hsize_t chunk_dims[DATASET_PROPERTY_LIST_TEST_SPACE_RANK]; + size_t i; + herr_t err_ret = -1; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id1 = H5I_INVALID_HID, dset_id2 = H5I_INVALID_HID, dset_id3 = H5I_INVALID_HID, + dset_id4 = H5I_INVALID_HID; + hid_t dcpl_id1 = H5I_INVALID_HID, dcpl_id2 = H5I_INVALID_HID; + hid_t dapl_id1 = H5I_INVALID_HID, dapl_id2 = H5I_INVALID_HID; + hid_t dset_dtype1 = H5I_INVALID_HID, dset_dtype2 = H5I_INVALID_HID, dset_dtype3 = H5I_INVALID_HID, + dset_dtype4 = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + char *tmp_prefix = NULL; + char vol_name[5]; + + TESTING_MULTIPART("dataset property list operations"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, or get property list aren't supported " + "with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + /** for DAOS VOL, this test is problematic since auto chunking can be selected, so skip for now */ + if (H5VLget_connector_name(file_id, vol_name, 5) < 0) { + H5_FAILED(); + HDprintf(" couldn't get VOL connector name\n"); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_PROPERTY_LIST_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", DATASET_PROPERTY_LIST_TEST_SUBGROUP_NAME); + goto error; + } + + if ((space_id = generate_random_dataspace(DATASET_PROPERTY_LIST_TEST_SPACE_RANK, NULL, dims, FALSE)) < 0) + TEST_ERROR; + + for (i = 0; i < DATASET_PROPERTY_LIST_TEST_SPACE_RANK; i++) + chunk_dims[i] = (hsize_t)(rand() % (int)dims[i] + 1); + + if ((dset_dtype1 = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + if ((dset_dtype2 = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + if ((dset_dtype3 = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + if ((dset_dtype4 = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + if ((dcpl_id1 = H5Pcreate(H5P_DATASET_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create DCPL\n"); + goto error; + } + + if (H5Pset_chunk(dcpl_id1, DATASET_PROPERTY_LIST_TEST_SPACE_RANK, chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" couldn't set DCPL property\n"); + goto error; + } + + if ((dset_id1 = H5Dcreate2(group_id, DATASET_PROPERTY_LIST_TEST_DSET_NAME1, dset_dtype1, space_id, + H5P_DEFAULT, dcpl_id1, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_PROPERTY_LIST_TEST_DSET_NAME1); + goto error; + } + + if ((dset_id2 = H5Dcreate2(group_id, DATASET_PROPERTY_LIST_TEST_DSET_NAME2, dset_dtype2, space_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_PROPERTY_LIST_TEST_DSET_NAME2); + goto error; + } + + if (H5Pclose(dcpl_id1) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Dget_create_plist) + { + TESTING_2("H5Dget_create_plist"); + + /* Try to receive copies of the two property lists, one which has the property set and one which + * does not */ + if ((dcpl_id1 = H5Dget_create_plist(dset_id1)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get property list\n"); + PART_ERROR(H5Dget_create_plist); + } + + if ((dcpl_id2 = H5Dget_create_plist(dset_id2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get property list\n"); + PART_ERROR(H5Dget_create_plist); + } + + /* Ensure that property list 1 has the property set and property list 2 does not */ + { + hsize_t tmp_chunk_dims[DATASET_PROPERTY_LIST_TEST_SPACE_RANK]; + + HDmemset(tmp_chunk_dims, 0, sizeof(tmp_chunk_dims)); + + if (H5Pget_chunk(dcpl_id1, DATASET_PROPERTY_LIST_TEST_SPACE_RANK, tmp_chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" couldn't get DCPL property value\n"); + PART_ERROR(H5Dget_create_plist); + } + + for (i = 0; i < DATASET_PROPERTY_LIST_TEST_SPACE_RANK; i++) + if (tmp_chunk_dims[i] != chunk_dims[i]) { + H5_FAILED(); + HDprintf(" DCPL property values were incorrect\n"); + PART_ERROR(H5Dget_create_plist); + } + + H5E_BEGIN_TRY + { + err_ret = H5Pget_chunk(dcpl_id2, DATASET_PROPERTY_LIST_TEST_SPACE_RANK, tmp_chunk_dims); + } + H5E_END_TRY; + + /* DAOS VOL can auto chunk, so don't fail */ + if (err_ret >= 0 && strcmp(vol_name, "daos") != 0) { + H5_FAILED(); + HDprintf(" property list 2 shouldn't have had chunk dimensionality set (not a chunked " + "layout)\n"); + PART_ERROR(H5Dget_create_plist); + } + } + + PASSED(); + } + PART_END(H5Dget_create_plist); + + PART_BEGIN(H5Dget_access_plist) + { + TESTING_2("H5Dget_access_plist"); + + if ((dapl_id1 = H5Pcreate(H5P_DATASET_ACCESS)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create DAPL\n"); + PART_ERROR(H5Dget_access_plist); + } + + if (H5Pset_efile_prefix(dapl_id1, path_prefix) < 0) { + H5_FAILED(); + HDprintf(" couldn't set DAPL property\n"); + PART_ERROR(H5Dget_access_plist); + } + + if ((dset_id3 = H5Dcreate2(group_id, DATASET_PROPERTY_LIST_TEST_DSET_NAME3, dset_dtype3, space_id, + H5P_DEFAULT, H5P_DEFAULT, dapl_id1)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset\n"); + PART_ERROR(H5Dget_access_plist); + } + + if ((dset_id4 = H5Dcreate2(group_id, DATASET_PROPERTY_LIST_TEST_DSET_NAME4, dset_dtype4, space_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset\n"); + PART_ERROR(H5Dget_access_plist); + } + + if (dapl_id1 >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(dapl_id1); + } + H5E_END_TRY; + dapl_id1 = H5I_INVALID_HID; + } + + /* Try to receive copies of the two property lists, one which has the property set and one which + * does not */ + if ((dapl_id1 = H5Dget_access_plist(dset_id3)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get property list\n"); + PART_ERROR(H5Dget_access_plist); + } + + if ((dapl_id2 = H5Dget_access_plist(dset_id4)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get property list\n"); + PART_ERROR(H5Dget_access_plist); + } + + /* Ensure that property list 1 has the property set and property list 2 does not */ + { + ssize_t buf_size = 0; + + if ((buf_size = H5Pget_efile_prefix(dapl_id1, NULL, 0)) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve size for property value buffer\n"); + PART_ERROR(H5Dget_access_plist); + } + + if (NULL == (tmp_prefix = (char *)HDcalloc(1, (size_t)buf_size + 1))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for property value\n"); + PART_ERROR(H5Dget_access_plist); + } + + if (H5Pget_efile_prefix(dapl_id1, tmp_prefix, (size_t)buf_size + 1) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve property list value\n"); + PART_ERROR(H5Dget_access_plist); + } + + if (HDstrncmp(tmp_prefix, path_prefix, (size_t)buf_size + 1)) { + H5_FAILED(); + HDprintf(" DAPL values were incorrect!\n"); + PART_ERROR(H5Dget_access_plist); + } + + HDmemset(tmp_prefix, 0, (size_t)buf_size + 1); + + if (H5Pget_efile_prefix(dapl_id2, tmp_prefix, (size_t)buf_size) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve property list value\n"); + PART_ERROR(H5Dget_access_plist); + } + + if (!HDstrncmp(tmp_prefix, path_prefix, (size_t)buf_size + 1)) { + H5_FAILED(); + HDprintf(" DAPL property value was set!\n"); + PART_ERROR(H5Dget_access_plist); + } + } + + PASSED(); + } + PART_END(H5Dget_access_plist); + + /* Now close the property lists and datasets and see if we can still retrieve copies of + * the property lists upon opening (instead of creating) a dataset + */ + if (dcpl_id1 >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(dcpl_id1); + } + H5E_END_TRY; + dcpl_id1 = H5I_INVALID_HID; + } + if (dcpl_id2 >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(dcpl_id2); + } + H5E_END_TRY; + dcpl_id2 = H5I_INVALID_HID; + } + if (dset_id1 >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id1); + } + H5E_END_TRY; + dset_id1 = H5I_INVALID_HID; + } + if (dset_id2 >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id2); + } + H5E_END_TRY; + dset_id2 = H5I_INVALID_HID; + } + + PART_BEGIN(H5Dget_create_plist_reopened) + { + TESTING_2("H5Dget_create_plist after re-opening a dataset"); + + if ((dset_id1 = H5Dopen2(group_id, DATASET_PROPERTY_LIST_TEST_DSET_NAME1, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_PROPERTY_LIST_TEST_DSET_NAME1); + PART_ERROR(H5Dget_create_plist_reopened); + } + + if ((dset_id2 = H5Dopen2(group_id, DATASET_PROPERTY_LIST_TEST_DSET_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_PROPERTY_LIST_TEST_DSET_NAME2); + PART_ERROR(H5Dget_create_plist_reopened); + } + + if ((dcpl_id1 = H5Dget_create_plist(dset_id1)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get property list\n"); + PART_ERROR(H5Dget_create_plist_reopened); + } + + if ((dcpl_id2 = H5Dget_create_plist(dset_id2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get property list\n"); + PART_ERROR(H5Dget_create_plist_reopened); + } + + /* Ensure that property list 1 has the property set and property list 2 does not */ + { + hsize_t tmp_chunk_dims[DATASET_PROPERTY_LIST_TEST_SPACE_RANK]; + + HDmemset(tmp_chunk_dims, 0, sizeof(tmp_chunk_dims)); + + if (H5Pget_chunk(dcpl_id1, DATASET_PROPERTY_LIST_TEST_SPACE_RANK, tmp_chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" couldn't get DCPL property value\n"); + PART_ERROR(H5Dget_create_plist_reopened); + } + + for (i = 0; i < DATASET_PROPERTY_LIST_TEST_SPACE_RANK; i++) + if (tmp_chunk_dims[i] != chunk_dims[i]) { + H5_FAILED(); + HDprintf(" DCPL property values were incorrect\n"); + PART_ERROR(H5Dget_create_plist_reopened); + } + + H5E_BEGIN_TRY + { + err_ret = H5Pget_chunk(dcpl_id2, DATASET_PROPERTY_LIST_TEST_SPACE_RANK, tmp_chunk_dims); + } + H5E_END_TRY; + + /* DAOS VOL can auto chunk, so don't fail */ + if (err_ret >= 0 && strcmp(vol_name, "daos") != 0) { + H5_FAILED(); + HDprintf(" property list 2 shouldn't have had chunk dimensionality set (not a chunked " + "layout)\n"); + PART_ERROR(H5Dget_create_plist_reopened); + } + } + + PASSED(); + } + PART_END(H5Dget_create_plist_reopened); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (tmp_prefix) { + HDfree(tmp_prefix); + tmp_prefix = NULL; + } + + if (H5Pclose(dcpl_id1) < 0) + TEST_ERROR; + if (H5Pclose(dcpl_id2) < 0) + TEST_ERROR; + if (H5Pclose(dapl_id1) < 0) + TEST_ERROR; + if (H5Pclose(dapl_id2) < 0) + TEST_ERROR; + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype1) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype2) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype3) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype4) < 0) + TEST_ERROR; + if (H5Dclose(dset_id1) < 0) + TEST_ERROR; + if (H5Dclose(dset_id2) < 0) + TEST_ERROR; + if (H5Dclose(dset_id3) < 0) + TEST_ERROR; + if (H5Dclose(dset_id4) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (tmp_prefix) + HDfree(tmp_prefix); + H5Pclose(dcpl_id1); + H5Pclose(dcpl_id2); + H5Pclose(dapl_id1); + H5Pclose(dapl_id2); + H5Sclose(space_id); + H5Tclose(dset_dtype1); + H5Tclose(dset_dtype2); + H5Tclose(dset_dtype3); + H5Tclose(dset_dtype4); + H5Dclose(dset_id1); + H5Dclose(dset_id2); + H5Dclose(dset_id3); + H5Dclose(dset_id4); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test for H5Dget_storage_size. + */ +static int +test_get_dataset_storage_size(void) +{ + TESTING("H5Dget_storage_size"); + + SKIPPED(); + + return 0; +} + +/* + * A test to check that a dataset's storage size can't + * be retrieved when H5Dget_storage_size is passed + * invalid parameters. + */ +static int +test_get_dataset_storage_size_invalid_params(void) +{ + TESTING("H5Dget_storage_size with invalid parameters"); + + SKIPPED(); + + return 0; +} + +/* + * A test for H5Dget_chunk_storage_size. + */ +static int +test_get_dataset_chunk_storage_size(void) +{ + TESTING("H5Dget_chunk_storage_size"); + + SKIPPED(); + + return 0; +} + +/* + * A test to check that the size of an allocated chunk in + * a dataset can't be retrieved when H5Dget_chunk_storage_size + * is passed invalid parameters. + */ +static int +test_get_dataset_chunk_storage_size_invalid_params(void) +{ + TESTING("H5Dget_chunk_storage_size with invalid parameters"); + + SKIPPED(); + + return 0; +} + +/* + * A test for H5Dget_offset. + */ +static int +test_get_dataset_offset(void) +{ + TESTING("H5Dget_offset"); + + SKIPPED(); + + return 0; +} + +/* + * A test to check that a dataset's offset can't be + * retrieved when H5Dget_offset is passed invalid + * parameters. + */ +static int +test_get_dataset_offset_invalid_params(void) +{ + TESTING("H5Dget_offset with invalid parameters"); + + SKIPPED(); + + return 0; +} + +/* + * A test to check that a small amount of data can be + * read back from a dataset using an H5S_ALL selection. + */ +static int +test_read_dataset_small_all(void) +{ + hsize_t dims[DATASET_SMALL_READ_TEST_ALL_DSET_SPACE_RANK] = {10, 5, 3}; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + void *read_buf = NULL; + + TESTING("small read from dataset with H5S_ALL"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_SMALL_READ_TEST_ALL_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", DATASET_SMALL_READ_TEST_ALL_GROUP_NAME); + goto error; + } + + if ((fspace_id = H5Screate_simple(DATASET_SMALL_READ_TEST_ALL_DSET_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_SMALL_READ_TEST_ALL_DSET_NAME, + DATASET_SMALL_READ_TEST_ALL_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_SMALL_READ_TEST_ALL_DSET_NAME); + goto error; + } + + for (i = 0, data_size = 1; i < DATASET_SMALL_READ_TEST_ALL_DSET_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_SMALL_READ_TEST_ALL_DSET_DTYPESIZE; + + if (NULL == (read_buf = HDmalloc(data_size))) + TEST_ERROR; + + if (H5Dread(dset_id, DATASET_SMALL_READ_TEST_ALL_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf) < + 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_SMALL_READ_TEST_ALL_DSET_NAME); + goto error; + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a small amount of data can be + * read back from a dataset using a hyperslab selection. + */ +static int +test_read_dataset_small_hyperslab(void) +{ + hsize_t start[DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_SPACE_RANK]; + hsize_t stride[DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_SPACE_RANK]; + hsize_t count[DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_SPACE_RANK]; + hsize_t block[DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_SPACE_RANK]; + hsize_t dims[DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_SPACE_RANK] = {10, 5, 3}; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID, fspace_id = H5I_INVALID_HID; + void *read_buf = NULL; + + TESTING("small read from dataset with a hyperslab selection"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_SMALL_READ_TEST_HYPERSLAB_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_SMALL_READ_TEST_HYPERSLAB_GROUP_NAME); + goto error; + } + + if ((fspace_id = H5Screate_simple(DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + if ((mspace_id = H5Screate_simple(DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_SPACE_RANK - 1, dims, NULL)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_NAME, + DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_DTYPE, fspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_NAME); + goto error; + } + + for (i = 0; i < DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_SPACE_RANK; i++) { + start[i] = 0; + stride[i] = 1; + count[i] = dims[i]; + block[i] = 1; + } + + count[2] = 1; + + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) + TEST_ERROR; + + for (i = 0, data_size = 1; i < DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_SPACE_RANK - 1; i++) + data_size *= dims[i]; + data_size *= DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_DTYPESIZE; + + if (NULL == (read_buf = HDmalloc(data_size))) + TEST_ERROR; + + if (H5Dread(dset_id, DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_DTYPE, mspace_id, fspace_id, H5P_DEFAULT, + read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_NAME); + goto error; + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (H5Sclose(mspace_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a small amount of data can be + * read back from a dataset using a point selection. + */ +static int +test_read_dataset_small_point_selection(void) +{ + hsize_t points[DATASET_SMALL_READ_TEST_POINT_SELECTION_NUM_POINTS * + DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_SPACE_RANK]; + hsize_t dims[DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_SPACE_RANK] = {10, 10, 10}; + hsize_t mspace_dims[] = {DATASET_SMALL_READ_TEST_POINT_SELECTION_NUM_POINTS}; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *data = NULL; + + TESTING("small read from dataset with a point selection"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_SMALL_READ_TEST_POINT_SELECTION_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_SMALL_READ_TEST_POINT_SELECTION_GROUP_NAME); + goto error; + } + + if ((fspace_id = H5Screate_simple(DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_SPACE_RANK, dims, NULL)) < + 0) + TEST_ERROR; + if ((mspace_id = H5Screate_simple(1, mspace_dims, NULL)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_NAME, + DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_DTYPE, fspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_NAME); + goto error; + } + + data_size = DATASET_SMALL_READ_TEST_POINT_SELECTION_NUM_POINTS * + DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_DTYPESIZE; + + if (NULL == (data = HDmalloc(data_size))) + TEST_ERROR; + + for (i = 0; i < DATASET_SMALL_READ_TEST_POINT_SELECTION_NUM_POINTS; i++) { + size_t j; + + for (j = 0; j < DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_SPACE_RANK; j++) + points[(i * DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_SPACE_RANK) + j] = i; + } + + if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, DATASET_SMALL_READ_TEST_POINT_SELECTION_NUM_POINTS, + points) < 0) { + H5_FAILED(); + HDprintf(" couldn't select points\n"); + goto error; + } + + if (H5Dread(dset_id, DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_DTYPE, mspace_id, fspace_id, + H5P_DEFAULT, data) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_NAME); + goto error; + } + + if (data) { + HDfree(data); + data = NULL; + } + + if (H5Sclose(mspace_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (data) + HDfree(data); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * Tests point selection I/O with different patterns + */ +#define DATASET_IO_POINT_DIM_0 6 +#define DATASET_IO_POINT_DIM_1 9 +#define DATASET_IO_POINT_CDIM_0 4 +#define DATASET_IO_POINT_CDIM_1 3 +#define DATASET_IO_POINT_NPOINTS 10 +#define DATASET_IO_POINT_GEN_POINTS(POINTS, I, J) \ + { \ + for ((I) = 0; (I) < DATASET_IO_POINT_NPOINTS; (I)++) \ + do { \ + (POINTS)[2 * (I)] = (hsize_t)(rand() % DATASET_IO_POINT_DIM_0); \ + (POINTS)[2 * (I) + 1] = (hsize_t)(rand() % DATASET_IO_POINT_DIM_1); \ + for ((J) = 0; ((J) < (I)) && (((POINTS)[2 * (I)] != (POINTS)[2 * (J)]) || \ + ((POINTS)[2 * (I) + 1] != (POINTS)[2 * (J) + 1])); \ + (J)++) \ + ; \ + } while ((J) < (I)); \ + } +static int +test_dataset_io_point_selections(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t mspace_id_full = H5I_INVALID_HID, mspace_id_all = H5I_INVALID_HID, fspace_id = H5I_INVALID_HID; + hid_t dcpl_id_chunk = H5I_INVALID_HID; + hsize_t dims[2] = {DATASET_IO_POINT_DIM_0, DATASET_IO_POINT_DIM_1}; + hsize_t cdims[2] = {DATASET_IO_POINT_CDIM_0, DATASET_IO_POINT_CDIM_1}; + hsize_t points[DATASET_IO_POINT_NPOINTS * 2]; + hsize_t points2[DATASET_IO_POINT_NPOINTS * 2]; + hsize_t npoints = DATASET_IO_POINT_NPOINTS; + hsize_t start[2] = {1, 2}; + hsize_t stride[2] = {2, 5}; + hsize_t count[2] = {2, 1}; + hsize_t block[2] = {1, 5}; + int buf_all[DATASET_IO_POINT_DIM_0][DATASET_IO_POINT_DIM_1]; + int file_state[DATASET_IO_POINT_DIM_0][DATASET_IO_POINT_DIM_1]; + int erbuf[DATASET_IO_POINT_DIM_0][DATASET_IO_POINT_DIM_1]; + int buf_point[DATASET_IO_POINT_NPOINTS]; + hbool_t do_chunk; + int i, j; + + TESTING("point selection I/O with all selection in memory and points in file"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + /* Create dataspaces and DCPL */ + if ((mspace_id_full = H5Screate_simple(2, dims, NULL)) < 0) + TEST_ERROR; + if ((mspace_id_all = H5Screate_simple(1, &npoints, NULL)) < 0) + TEST_ERROR; + if ((fspace_id = H5Screate_simple(2, dims, NULL)) < 0) + TEST_ERROR; + if ((dcpl_id_chunk = H5Pcreate(H5P_DATASET_CREATE)) < 0) + TEST_ERROR; + + /* Enable chunking on chunk DCPL */ + if (H5Pset_chunk(dcpl_id_chunk, 2, cdims) < 0) + TEST_ERROR; + + /* Open file */ + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) + TEST_ERROR; + + /* Open container group */ + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) + TEST_ERROR; + + /* Create group */ + if ((group_id = H5Gcreate2(container_group, DATASET_IO_POINT_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) + TEST_ERROR; + + /* Perform with and without chunking */ + for (do_chunk = FALSE;; do_chunk = TRUE) { + if (do_chunk) { + TESTING("point selection I/O with all selection in memory and points in file with chunking"); + + /* Create chunked dataset */ + if ((dset_id = H5Dcreate2(group_id, DATASET_IO_POINT_DSET_NAME_CHUNK, H5T_NATIVE_INT, fspace_id, + H5P_DEFAULT, dcpl_id_chunk, H5P_DEFAULT)) < 0) + TEST_ERROR; + } /* end if */ + else + /* Create non-chunked dataset */ + if ((dset_id = H5Dcreate2(group_id, DATASET_IO_POINT_DSET_NAME_NOCHUNK, H5T_NATIVE_INT, fspace_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + TEST_ERROR; + + /* Fill write buffer */ + for (i = 0; i < DATASET_IO_POINT_DIM_0; i++) + for (j = 0; j < DATASET_IO_POINT_DIM_1; j++) + buf_all[i][j] = rand(); + + /* Write data */ + if (H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_all) < 0) + FAIL_PUTS_ERROR("Failed to write entire dataset"); + + /* Update file_state */ + for (i = 0; i < DATASET_IO_POINT_DIM_0; i++) + for (j = 0; j < DATASET_IO_POINT_DIM_1; j++) + file_state[i][j] = buf_all[i][j]; + + /* Generate points to read */ + DATASET_IO_POINT_GEN_POINTS(points, i, j); + + /* Select points */ + if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, DATASET_IO_POINT_NPOINTS, points) < 0) + TEST_ERROR; + + /* Wipe read buffer */ + memset(buf_point, 0, sizeof(buf_point)); + + /* Read points to "all" memory buffer */ + if (H5Dread(dset_id, H5T_NATIVE_INT, mspace_id_all, fspace_id, H5P_DEFAULT, buf_point) < 0) + FAIL_PUTS_ERROR("Failed to read points from dataset to all memory buffer"); + + /* Verify data */ + for (i = 0; i < DATASET_IO_POINT_NPOINTS; i++) + if (buf_point[i] != file_state[points[2 * i]][points[2 * i + 1]]) + FAIL_PUTS_ERROR("Incorrect data read from points to all memory buffer"); + + /* Generate points to write */ + DATASET_IO_POINT_GEN_POINTS(points, i, j); + + /* Select points */ + if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, DATASET_IO_POINT_NPOINTS, points) < 0) + TEST_ERROR; + + /* Fill write buffer */ + for (i = 0; i < DATASET_IO_POINT_NPOINTS; i++) + buf_point[i] = rand(); + + /* Write points from "all" memory buffer */ + if (H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id_all, fspace_id, H5P_DEFAULT, buf_point) < 0) + FAIL_PUTS_ERROR("Failed to write points to dataset from all memory buffer"); + + /* Update file state */ + for (i = 0; i < DATASET_IO_POINT_NPOINTS; i++) + file_state[points[2 * i]][points[2 * i + 1]] = buf_point[i]; + + /* Wipe read buffer */ + memset(buf_all, 0, sizeof(buf_all)); + + /* Read entire dataset */ + if (H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_all) < 0) + FAIL_PUTS_ERROR("Failed to read entire dataset"); + + /* Verify data */ + for (i = 0; i < DATASET_IO_POINT_DIM_0; i++) + for (j = 0; j < DATASET_IO_POINT_DIM_1; j++) + if (buf_all[i][j] != file_state[i][j]) + FAIL_PUTS_ERROR("Incorrect data found after writing from all memory buffer to points"); + + PASSED(); + + if (do_chunk) + TESTING("point selection I/O with points in memory and file (same shape) with chunking"); + else + TESTING("point selection I/O with points in memory and file (same shape)"); + + /* Generate points to read */ + DATASET_IO_POINT_GEN_POINTS(points, i, j); + + /* Select points */ + if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, DATASET_IO_POINT_NPOINTS, points) < 0) + TEST_ERROR; + + /* Wipe read buffer */ + memset(buf_all, 0, sizeof(buf_all)); + + /* Generate expected read buffer */ + memset(erbuf, 0, sizeof(erbuf)); + for (i = 0; i < DATASET_IO_POINT_NPOINTS; i++) + erbuf[points[2 * i]][points[2 * i + 1]] = file_state[points[2 * i]][points[2 * i + 1]]; + + /* Read data points->points */ + if (H5Dread(dset_id, H5T_NATIVE_INT, fspace_id, fspace_id, H5P_DEFAULT, buf_all) < 0) + FAIL_PUTS_ERROR("Failed to read points from dataset to points in memory buffer"); + + /* Verify data */ + for (i = 0; i < DATASET_IO_POINT_DIM_0; i++) + for (j = 0; j < DATASET_IO_POINT_DIM_1; j++) + if (buf_all[i][j] != erbuf[i][j]) + FAIL_PUTS_ERROR("Incorrect data found read from points in file to points in memory"); + + /* Generate points to write */ + DATASET_IO_POINT_GEN_POINTS(points, i, j); + + /* Select points */ + if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, DATASET_IO_POINT_NPOINTS, points) < 0) + TEST_ERROR; + + /* Fill write buffer */ + for (i = 0; i < DATASET_IO_POINT_DIM_0; i++) + for (j = 0; j < DATASET_IO_POINT_DIM_1; j++) + buf_all[i][j] = rand(); + + /* Write data points->points */ + if (H5Dwrite(dset_id, H5T_NATIVE_INT, fspace_id, fspace_id, H5P_DEFAULT, buf_all) < 0) + FAIL_PUTS_ERROR("Failed to write from in memory to points in dataset"); + + /* Update file_state */ + for (i = 0; i < DATASET_IO_POINT_NPOINTS; i++) + file_state[points[2 * i]][points[2 * i + 1]] = buf_all[points[2 * i]][points[2 * i + 1]]; + + /* Wipe read buffer */ + memset(buf_all, 0, sizeof(buf_all)); + + /* Read entire dataset */ + if (H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_all) < 0) + FAIL_PUTS_ERROR("Failed to read entire dataset"); + + /* Verify data */ + for (i = 0; i < DATASET_IO_POINT_DIM_0; i++) + for (j = 0; j < DATASET_IO_POINT_DIM_1; j++) + if (buf_all[i][j] != file_state[i][j]) + FAIL_PUTS_ERROR( + "Incorrect data found after writing from points in memory to points in dataset"); + + PASSED(); + + if (do_chunk) + TESTING("point selection I/O with points in memory and file (different shape) with chunking"); + else + TESTING("point selection I/O with points in memory and file (different shape)"); + + /* Generate points to read */ + DATASET_IO_POINT_GEN_POINTS(points, i, j); + DATASET_IO_POINT_GEN_POINTS(points2, i, j); + + /* Select points */ + if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, DATASET_IO_POINT_NPOINTS, points) < 0) + TEST_ERROR; + if (H5Sselect_elements(mspace_id_full, H5S_SELECT_SET, DATASET_IO_POINT_NPOINTS, points2) < 0) + TEST_ERROR; + + /* Wipe read buffer */ + memset(buf_all, 0, sizeof(buf_all)); + + /* Generate expected read buffer */ + memset(erbuf, 0, sizeof(erbuf)); + for (i = 0; i < DATASET_IO_POINT_NPOINTS; i++) + erbuf[points2[2 * i]][points2[2 * i + 1]] = file_state[points[2 * i]][points[2 * i + 1]]; + + /* Read data points->points */ + if (H5Dread(dset_id, H5T_NATIVE_INT, mspace_id_full, fspace_id, H5P_DEFAULT, buf_all) < 0) + FAIL_PUTS_ERROR("Failed to read points from dataset to points in memory buffer"); + + /* Verify data */ + for (i = 0; i < DATASET_IO_POINT_DIM_0; i++) + for (j = 0; j < DATASET_IO_POINT_DIM_1; j++) + if (buf_all[i][j] != erbuf[i][j]) + FAIL_PUTS_ERROR( + "Incorrect data found after reading from points in file to points in memory"); + + /* Generate points to write */ + DATASET_IO_POINT_GEN_POINTS(points, i, j); + DATASET_IO_POINT_GEN_POINTS(points2, i, j); + + /* Select points */ + if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, DATASET_IO_POINT_NPOINTS, points) < 0) + TEST_ERROR; + if (H5Sselect_elements(mspace_id_full, H5S_SELECT_SET, DATASET_IO_POINT_NPOINTS, points2) < 0) + TEST_ERROR; + + /* Fill write buffer */ + for (i = 0; i < DATASET_IO_POINT_DIM_0; i++) + for (j = 0; j < DATASET_IO_POINT_DIM_1; j++) + buf_all[i][j] = rand(); + + /* Write data points->points */ + if (H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id_full, fspace_id, H5P_DEFAULT, buf_all) < 0) + FAIL_PUTS_ERROR("Failed to write from points in memory to points in dataset"); + + /* Update file_state */ + for (i = 0; i < DATASET_IO_POINT_NPOINTS; i++) + file_state[points[2 * i]][points[2 * i + 1]] = buf_all[points2[2 * i]][points2[2 * i + 1]]; + + /* Wipe read buffer */ + memset(buf_all, 0, sizeof(buf_all)); + + /* Read entire dataset */ + if (H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_all) < 0) + FAIL_PUTS_ERROR("Failed to read entire dataset"); + + /* Verify data */ + for (i = 0; i < DATASET_IO_POINT_DIM_0; i++) + for (j = 0; j < DATASET_IO_POINT_DIM_1; j++) + if (buf_all[i][j] != file_state[i][j]) + FAIL_PUTS_ERROR( + "Incorrect data found after writing from points in memory to points in dataset"); + + PASSED(); + + if (do_chunk) + TESTING("point selection I/O with hyperslab in memory and points in file with chunking"); + else + TESTING("point selection I/O with hyperslab in memory and points in file"); + + /* Generate points to read */ + DATASET_IO_POINT_GEN_POINTS(points, i, j); + + /* Select points */ + if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, DATASET_IO_POINT_NPOINTS, points) < 0) + TEST_ERROR; + + /* Select hyperslab */ + if (H5Sselect_hyperslab(mspace_id_full, H5S_SELECT_SET, start, stride, count, block) < 0) + TEST_ERROR; + + /* Wipe read buffer */ + memset(buf_all, 0, sizeof(buf_all)); + + /* Generate expected read buffer */ + memset(erbuf, 0, sizeof(erbuf)); + for (i = 0; i < DATASET_IO_POINT_NPOINTS; i++) + erbuf[start[0] + (stride[0] * ((hsize_t)i / block[1]))][start[1] + ((hsize_t)i % block[1])] = + file_state[points[2 * i]][points[2 * i + 1]]; + + /* Read data points->hslab */ + if (H5Dread(dset_id, H5T_NATIVE_INT, mspace_id_full, fspace_id, H5P_DEFAULT, buf_all) < 0) + FAIL_PUTS_ERROR("Failed to read points from dataset to hyperslab in memory buffer"); + + /* Verify data */ + for (i = 0; i < DATASET_IO_POINT_DIM_0; i++) + for (j = 0; j < DATASET_IO_POINT_DIM_1; j++) + if (buf_all[i][j] != erbuf[i][j]) + FAIL_PUTS_ERROR( + "Incorrect data found after reading from points in file to hyperslab in memory"); + + /* Generate points to write */ + DATASET_IO_POINT_GEN_POINTS(points, i, j); + + /* Select points */ + if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, DATASET_IO_POINT_NPOINTS, points) < 0) + TEST_ERROR; + + /* Fill write buffer */ + for (i = 0; i < DATASET_IO_POINT_DIM_0; i++) + for (j = 0; j < DATASET_IO_POINT_DIM_1; j++) + buf_all[i][j] = rand(); + + /* Write data hlsab->points */ + if (H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id_full, fspace_id, H5P_DEFAULT, buf_all) < 0) + FAIL_PUTS_ERROR("Failed to write from hyperslab in memory to points in dataset"); + + /* Update file_state */ + for (i = 0; i < DATASET_IO_POINT_NPOINTS; i++) + file_state[points[2 * i]][points[2 * i + 1]] = + buf_all[start[0] + (stride[0] * ((hsize_t)i / block[1]))][start[1] + ((hsize_t)i % block[1])]; + + /* Wipe read buffer */ + memset(buf_all, 0, sizeof(buf_all)); + + /* Read entire dataset */ + if (H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_all) < 0) + FAIL_PUTS_ERROR("Failed to read entire dataset"); + + /* Verify data */ + for (i = 0; i < DATASET_IO_POINT_DIM_0; i++) + for (j = 0; j < DATASET_IO_POINT_DIM_1; j++) + if (buf_all[i][j] != file_state[i][j]) + FAIL_PUTS_ERROR( + "Incorrect data found after writing from hyperslab in memory to points in dataset"); + + PASSED(); + + if (do_chunk) + TESTING("point selection I/O with points in memory and hyperslab in file with chunking"); + else + TESTING("point selection I/O with points in memory and hyperslab in file"); + + /* Generate points to read */ + DATASET_IO_POINT_GEN_POINTS(points, i, j); + + /* Select points */ + if (H5Sselect_elements(mspace_id_full, H5S_SELECT_SET, DATASET_IO_POINT_NPOINTS, points) < 0) + TEST_ERROR; + + /* Select hyperslab */ + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) + TEST_ERROR; + + /* Wipe read buffer */ + memset(buf_all, 0, sizeof(buf_all)); + + /* Generate expected read buffer */ + memset(erbuf, 0, sizeof(erbuf)); + for (i = 0; i < DATASET_IO_POINT_NPOINTS; i++) + erbuf[points[2 * i]][points[2 * i + 1]] = + file_state[start[0] + (stride[0] * ((hsize_t)i / block[1]))] + [start[1] + ((hsize_t)i % block[1])]; + + /* Read data hslab->points */ + if (H5Dread(dset_id, H5T_NATIVE_INT, mspace_id_full, fspace_id, H5P_DEFAULT, buf_all) < 0) + FAIL_PUTS_ERROR("Failed to read hyperslab from dataset to points in memory buffer"); + + /* Verify data */ + for (i = 0; i < DATASET_IO_POINT_DIM_0; i++) + for (j = 0; j < DATASET_IO_POINT_DIM_1; j++) + if (buf_all[i][j] != erbuf[i][j]) + FAIL_PUTS_ERROR( + "Incorrect data found after reading from hyperslab in file to points in memory"); + + /* Generate points to write */ + DATASET_IO_POINT_GEN_POINTS(points, i, j); + + /* Select points */ + if (H5Sselect_elements(mspace_id_full, H5S_SELECT_SET, DATASET_IO_POINT_NPOINTS, points) < 0) + TEST_ERROR; + + /* Fill write buffer */ + for (i = 0; i < DATASET_IO_POINT_DIM_0; i++) + for (j = 0; j < DATASET_IO_POINT_DIM_1; j++) + buf_all[i][j] = rand(); + + /* Write data points->hslab */ + if (H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id_full, fspace_id, H5P_DEFAULT, buf_all) < 0) + FAIL_PUTS_ERROR("Failed to write from points in memory to hyperslab in dataset"); + + /* Update file_state */ + for (i = 0; i < DATASET_IO_POINT_NPOINTS; i++) + file_state[start[0] + (stride[0] * ((hsize_t)i / block[1]))][start[1] + ((hsize_t)i % block[1])] = + buf_all[points[2 * i]][points[2 * i + 1]]; + + /* Wipe read buffer */ + memset(buf_all, 0, sizeof(buf_all)); + + /* Read entire dataset */ + if (H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_all) < 0) + FAIL_PUTS_ERROR("Failed to read entire dataset"); + + /* Verify data */ + for (i = 0; i < DATASET_IO_POINT_DIM_0; i++) + for (j = 0; j < DATASET_IO_POINT_DIM_1; j++) + if (buf_all[i][j] != file_state[i][j]) + FAIL_PUTS_ERROR( + "Incorrect data found after writing from points in memory to hyperslab in dataset"); + + if (!do_chunk) + PASSED(); + + /* Close dataset */ + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + + /* Exit after chunked run */ + if (do_chunk) + break; + } /* end for */ + + /* Close */ + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + if (H5Pclose(dcpl_id_chunk) < 0) + TEST_ERROR; + if (H5Sclose(mspace_id_full) < 0) + TEST_ERROR; + if (H5Sclose(mspace_id_all) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + H5Sclose(mspace_id_full); + H5Sclose(mspace_id_all); + H5Pclose(dcpl_id_chunk); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} /* end test_dataset_io_point_selections() */ + +#ifndef NO_LARGE_TESTS +/* + * A test to check that a large amount of data can be + * read back from a dataset using an H5S_ALL selection. + */ +static int +test_read_dataset_large_all(void) +{ + hsize_t dims[DATASET_LARGE_READ_TEST_ALL_DSET_SPACE_RANK] = {600, 600, 600}; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + void *read_buf = NULL; + + TESTING("large read from dataset with H5S_ALL"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_LARGE_READ_TEST_ALL_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", DATASET_LARGE_READ_TEST_ALL_GROUP_NAME); + goto error; + } + + if ((fspace_id = H5Screate_simple(DATASET_LARGE_READ_TEST_ALL_DSET_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_LARGE_READ_TEST_ALL_DSET_NAME, + DATASET_LARGE_READ_TEST_ALL_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_LARGE_READ_TEST_ALL_DSET_NAME); + goto error; + } + + for (i = 0, data_size = 1; i < DATASET_LARGE_READ_TEST_ALL_DSET_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_LARGE_READ_TEST_ALL_DSET_DTYPESIZE; + + if (NULL == (read_buf = HDmalloc(data_size))) + TEST_ERROR; + + if (H5Dread(dset_id, DATASET_LARGE_READ_TEST_ALL_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf) < + 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_LARGE_READ_TEST_ALL_DSET_NAME); + goto error; + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a large amount of data can be + * read back from a dataset using a hyperslab selection. + */ +static int +test_read_dataset_large_hyperslab(void) +{ + hsize_t start[DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_SPACE_RANK]; + hsize_t stride[DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_SPACE_RANK]; + hsize_t count[DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_SPACE_RANK]; + hsize_t block[DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_SPACE_RANK]; + hsize_t dims[DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_SPACE_RANK] = {600, 600, 600}; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID, fspace_id = H5I_INVALID_HID; + void *read_buf = NULL; + + TESTING("large read from dataset with a hyperslab selection"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_LARGE_READ_TEST_HYPERSLAB_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_LARGE_READ_TEST_HYPERSLAB_GROUP_NAME); + goto error; + } + + if ((fspace_id = H5Screate_simple(DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + if ((mspace_id = H5Screate_simple(DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_NAME, + DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_DTYPE, fspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_NAME); + goto error; + } + + for (i = 0; i < DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_SPACE_RANK; i++) { + start[i] = 0; + stride[i] = 1; + count[i] = dims[i]; + block[i] = 1; + } + + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) + TEST_ERROR; + + for (i = 0, data_size = 1; i < DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_DTYPESIZE; + + if (NULL == (read_buf = HDmalloc(data_size))) + TEST_ERROR; + + if (H5Dread(dset_id, DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_DTYPE, mspace_id, fspace_id, H5P_DEFAULT, + read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_NAME); + goto error; + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (H5Sclose(mspace_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a large amount of data can be + * read back from a dataset using a large point selection. + * + * XXX: Test takes up significant amounts of memory. + */ +static int +test_read_dataset_large_point_selection(void) +{ + hsize_t *points = NULL; + hsize_t dims[DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_SPACE_RANK] = {225000000}; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + void *data = NULL; + + TESTING("large read from dataset with a point selection"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_LARGE_READ_TEST_POINT_SELECTION_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_LARGE_READ_TEST_POINT_SELECTION_GROUP_NAME); + goto error; + } + + if ((fspace_id = H5Screate_simple(DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_SPACE_RANK, dims, NULL)) < + 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_NAME, + DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_DTYPE, fspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_NAME); + goto error; + } + + for (i = 0, data_size = 1; i < DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_DTYPESIZE; + + if (NULL == (data = HDmalloc(data_size))) + TEST_ERROR; + if (NULL == + (points = HDmalloc((data_size / DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_DTYPESIZE) * + ((DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_SPACE_RANK) * (sizeof(hsize_t)))))) + TEST_ERROR; + + /* Select the entire dataspace */ + for (i = 0; i < data_size / DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_DTYPESIZE; i++) { + points[i] = i; + } + + if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, + data_size / DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_DTYPESIZE, points) < 0) { + H5_FAILED(); + HDprintf(" couldn't select points\n"); + goto error; + } + + if (H5Dread(dset_id, DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_DTYPE, H5S_ALL, fspace_id, H5P_DEFAULT, + data) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_NAME); + goto error; + } + + if (data) { + HDfree(data); + data = NULL; + } + + if (points) { + HDfree(points); + points = NULL; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (data) + HDfree(data); + if (points) + HDfree(points); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} +#endif + +/* + * A test to check that data can't be read from a + * dataset when H5Dread is passed invalid parameters. + */ +static int +test_read_dataset_invalid_params(void) +{ + hsize_t dims[DATASET_READ_INVALID_PARAMS_TEST_DSET_SPACE_RANK] = {10, 5, 3}; + herr_t err_ret = -1; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + void *read_buf = NULL; + + TESTING_MULTIPART("H5Dread with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_READ_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_READ_INVALID_PARAMS_TEST_GROUP_NAME); + goto error; + } + + if ((fspace_id = H5Screate_simple(DATASET_READ_INVALID_PARAMS_TEST_DSET_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_READ_INVALID_PARAMS_TEST_DSET_NAME, + DATASET_READ_INVALID_PARAMS_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_READ_INVALID_PARAMS_TEST_DSET_NAME); + goto error; + } + + for (i = 0, data_size = 1; i < DATASET_READ_INVALID_PARAMS_TEST_DSET_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_READ_INVALID_PARAMS_TEST_DSET_DTYPESIZE; + + if (NULL == (read_buf = HDmalloc(data_size))) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Dread_invalid_dset_id) + { + TESTING_2("H5Dread with an invalid dataset ID"); + + H5E_BEGIN_TRY + { + err_ret = H5Dread(H5I_INVALID_HID, DATASET_READ_INVALID_PARAMS_TEST_DSET_DTYPE, H5S_ALL, + H5S_ALL, H5P_DEFAULT, read_buf); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" read from dataset using H5Dread with an invalid dataset ID!\n"); + PART_ERROR(H5Dread_invalid_dset_id); + } + + PASSED(); + } + PART_END(H5Dread_invalid_dset_id); + + PART_BEGIN(H5Dread_invalid_datatype) + { + TESTING_2("H5Dread with an invalid memory datatype"); + + H5E_BEGIN_TRY + { + err_ret = H5Dread(dset_id, H5I_INVALID_HID, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" read from dataset using H5Dread with an invalid memory datatype!\n"); + PART_ERROR(H5Dread_invalid_datatype); + } + + PASSED(); + } + PART_END(H5Dread_invalid_datatype); + + PART_BEGIN(H5Dread_invalid_mem_dataspace) + { + TESTING_2("H5Dread with an invalid memory dataspace"); + + H5E_BEGIN_TRY + { + err_ret = H5Dread(dset_id, DATASET_READ_INVALID_PARAMS_TEST_DSET_DTYPE, H5I_INVALID_HID, + H5S_ALL, H5P_DEFAULT, read_buf); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" read from dataset using H5Dread with an invalid memory dataspace!\n"); + PART_ERROR(H5Dread_invalid_mem_dataspace); + } + + PASSED(); + } + PART_END(H5Dread_invalid_mem_dataspace); + + PART_BEGIN(H5Dread_invalid_file_dataspace) + { + TESTING_2("H5Dread with an invalid file dataspace"); + + H5E_BEGIN_TRY + { + err_ret = H5Dread(dset_id, DATASET_READ_INVALID_PARAMS_TEST_DSET_DTYPE, H5S_ALL, + H5I_INVALID_HID, H5P_DEFAULT, read_buf); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" read from dataset using H5Dread with an invalid file dataspace!\n"); + PART_ERROR(H5Dread_invalid_file_dataspace); + } + + PASSED(); + } + PART_END(H5Dread_invalid_file_dataspace); + + PART_BEGIN(H5Dread_invalid_dxpl) + { + TESTING_2("H5Dread with an invalid DXPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Dread(dset_id, DATASET_READ_INVALID_PARAMS_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, + H5I_INVALID_HID, read_buf); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" read from dataset using H5Dread with an invalid DXPL!\n"); + PART_ERROR(H5Dread_invalid_dxpl); + } + + PASSED(); + } + PART_END(H5Dread_invalid_dxpl); + + PART_BEGIN(H5Dread_invalid_data_buf) + { + TESTING_2("H5Dread with an invalid data buffer"); + + H5E_BEGIN_TRY + { + err_ret = H5Dread(dset_id, DATASET_READ_INVALID_PARAMS_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, + H5P_DEFAULT, NULL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" read from dataset using H5Dread with an invalid data buffer!\n"); + PART_ERROR(H5Dread_invalid_data_buf); + } + + PASSED(); + } + PART_END(H5Dread_invalid_data_buf); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a small write can be + * made to a dataset using an H5S_ALL selection. + */ +static int +test_write_dataset_small_all(void) +{ + hssize_t space_npoints; + hsize_t dims[DATASET_SMALL_WRITE_TEST_ALL_DSET_SPACE_RANK] = {10, 5, 3}; + size_t i; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + void *data = NULL; + + TESTING("small write to dataset with H5S_ALL"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_SMALL_WRITE_TEST_ALL_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", DATASET_SMALL_WRITE_TEST_ALL_GROUP_NAME); + goto error; + } + + if ((fspace_id = H5Screate_simple(DATASET_SMALL_WRITE_TEST_ALL_DSET_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_SMALL_WRITE_TEST_ALL_DSET_NAME, + DATASET_SMALL_WRITE_TEST_ALL_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_SMALL_WRITE_TEST_ALL_DSET_NAME); + goto error; + } + + /* Close the dataset and dataspace to ensure that writing works correctly in this manner */ + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + + if ((dset_id = H5Dopen2(group_id, DATASET_SMALL_WRITE_TEST_ALL_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_SMALL_WRITE_TEST_ALL_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + goto error; + } + + if (NULL == (data = HDmalloc((hsize_t)space_npoints * DATASET_SMALL_WRITE_TEST_ALL_DSET_DTYPESIZE))) + TEST_ERROR; + + for (i = 0; i < (hsize_t)space_npoints; i++) + ((int *)data)[i] = (int)i; + + if (H5Dwrite(dset_id, DATASET_SMALL_WRITE_TEST_ALL_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", DATASET_SMALL_WRITE_TEST_ALL_DSET_NAME); + goto error; + } + + if (data) { + HDfree(data); + data = NULL; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (data) + HDfree(data); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a small write can be made + * to a dataset using a hyperslab selection. + */ +static int +test_write_dataset_small_hyperslab(void) +{ + hsize_t start[DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK]; + hsize_t stride[DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK]; + hsize_t count[DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK]; + hsize_t block[DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK]; + hsize_t dims[DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK] = {10, 5, 3}; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID, fspace_id = H5I_INVALID_HID; + void *data = NULL; + + TESTING("small write to dataset with a hyperslab selection"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_SMALL_WRITE_TEST_HYPERSLAB_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_SMALL_WRITE_TEST_HYPERSLAB_GROUP_NAME); + goto error; + } + + if ((fspace_id = H5Screate_simple(DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + if ((mspace_id = H5Screate_simple(DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK - 1, dims, NULL)) < + 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_NAME, + DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_DTYPE, fspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_NAME); + goto error; + } + + for (i = 0, data_size = 1; i < DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK - 1; i++) + data_size *= dims[i]; + data_size *= DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_DTYPESIZE; + + if (NULL == (data = HDmalloc(data_size))) + TEST_ERROR; + + for (i = 0; i < data_size / DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_DTYPESIZE; i++) + ((int *)data)[i] = (int)i; + + for (i = 0; i < DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK; i++) { + start[i] = 0; + stride[i] = 1; + count[i] = dims[i]; + block[i] = 1; + } + + count[2] = 1; + + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) + TEST_ERROR; + + if (H5Dwrite(dset_id, DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_DTYPE, mspace_id, fspace_id, H5P_DEFAULT, + data) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_NAME); + goto error; + } + + if (data) { + HDfree(data); + data = NULL; + } + + if (H5Sclose(mspace_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (data) + HDfree(data); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a small write can be made + * to a dataset using a point selection. + */ +static int +test_write_dataset_small_point_selection(void) +{ + hsize_t points[DATASET_SMALL_WRITE_TEST_POINT_SELECTION_NUM_POINTS * + DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_SPACE_RANK]; + hsize_t dims[DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_SPACE_RANK] = {10, 10, 10}; + hsize_t mdims[] = {DATASET_SMALL_WRITE_TEST_POINT_SELECTION_NUM_POINTS}; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *data = NULL; + + TESTING("small write to dataset with a point selection"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_SMALL_WRITE_TEST_POINT_SELECTION_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_SMALL_WRITE_TEST_POINT_SELECTION_GROUP_NAME); + goto error; + } + + if ((fspace_id = H5Screate_simple(DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_SPACE_RANK, dims, NULL)) < + 0) + TEST_ERROR; + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_NAME, + DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_DTYPE, fspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_NAME); + goto error; + } + + data_size = DATASET_SMALL_WRITE_TEST_POINT_SELECTION_NUM_POINTS * + DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_DTYPESIZE; + + if (NULL == (data = HDmalloc(data_size))) + TEST_ERROR; + + for (i = 0; i < data_size / DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_DTYPESIZE; i++) + ((int *)data)[i] = (int)i; + + for (i = 0; i < DATASET_SMALL_WRITE_TEST_POINT_SELECTION_NUM_POINTS; i++) { + size_t j; + + for (j = 0; j < DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_SPACE_RANK; j++) + points[(i * DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_SPACE_RANK) + j] = i; + } + + if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, DATASET_SMALL_WRITE_TEST_POINT_SELECTION_NUM_POINTS, + points) < 0) { + H5_FAILED(); + HDprintf(" couldn't select points\n"); + goto error; + } + + if (H5Dwrite(dset_id, DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_DTYPE, mspace_id, fspace_id, + H5P_DEFAULT, data) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_NAME); + goto error; + } + + if (data) { + HDfree(data); + data = NULL; + } + + if (H5Sclose(mspace_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (data) + HDfree(data); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +#ifndef NO_LARGE_TESTS +/* + * A test to check that a large write can be made + * to a dataset using an H5S_ALL selection. + */ +static int +test_write_dataset_large_all(void) +{ + hssize_t space_npoints; + hsize_t dims[DATASET_LARGE_WRITE_TEST_ALL_DSET_SPACE_RANK] = {600, 600, 600}; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + void *data = NULL; + + TESTING("large write to dataset with H5S_ALL"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, basic or more dataset aren't supported with this " + "connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_LARGE_WRITE_TEST_ALL_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", DATASET_LARGE_WRITE_TEST_ALL_GROUP_NAME); + goto error; + } + + if ((fspace_id = H5Screate_simple(DATASET_LARGE_WRITE_TEST_ALL_DSET_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_LARGE_WRITE_TEST_ALL_DSET_NAME, + DATASET_LARGE_WRITE_TEST_ALL_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_LARGE_WRITE_TEST_ALL_DSET_NAME); + goto error; + } + + /* Close the dataset and dataspace to ensure that retrieval of file space ID is working */ + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + + if ((dset_id = H5Dopen2(group_id, DATASET_LARGE_WRITE_TEST_ALL_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_LARGE_WRITE_TEST_ALL_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + goto error; + } + + if (NULL == (data = HDmalloc((hsize_t)space_npoints * DATASET_LARGE_WRITE_TEST_ALL_DSET_DTYPESIZE))) + TEST_ERROR; + + for (i = 0; i < (hsize_t)space_npoints; i++) + ((int *)data)[i] = (int)i; + + if (H5Dwrite(dset_id, DATASET_LARGE_WRITE_TEST_ALL_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", DATASET_LARGE_WRITE_TEST_ALL_DSET_NAME); + goto error; + } + + if (data) { + HDfree(data); + data = NULL; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a large write can be made + * to a dataset using a hyperslab selection. + */ +static int +test_write_dataset_large_hyperslab(void) +{ + hsize_t start[DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK]; + hsize_t stride[DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK]; + hsize_t count[DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK]; + hsize_t block[DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK]; + hsize_t dims[DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK] = {600, 600, 600}; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID, fspace_id = H5I_INVALID_HID; + void *data = NULL; + + TESTING("large write to dataset with a hyperslab selection"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_LARGE_WRITE_TEST_HYPERSLAB_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_LARGE_WRITE_TEST_HYPERSLAB_GROUP_NAME); + goto error; + } + + if ((fspace_id = H5Screate_simple(DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + if ((mspace_id = H5Screate_simple(DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_NAME, + DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_DTYPE, fspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_NAME); + goto error; + } + + for (i = 0, data_size = 1; i < DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_DTYPESIZE; + + if (NULL == (data = HDmalloc(data_size))) + TEST_ERROR; + + for (i = 0; i < data_size / DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_DTYPESIZE; i++) + ((int *)data)[i] = (int)i; + + for (i = 0; i < DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK; i++) { + start[i] = 0; + stride[i] = 1; + count[i] = dims[i]; + block[i] = 1; + } + + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) + TEST_ERROR; + + if (H5Dwrite(dset_id, DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_DTYPE, mspace_id, fspace_id, H5P_DEFAULT, + data) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_NAME); + goto error; + } + + if (data) { + HDfree(data); + data = NULL; + } + + if (H5Sclose(mspace_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (data) + HDfree(data); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a large write can be made + * to a dataset using a point selection. + */ +static int +test_write_dataset_large_point_selection(void) +{ + TESTING("large write to dataset with a point selection"); + + SKIPPED(); + + return 0; + +error: + return 1; +} +#endif + +/* + * A test to ensure that data is read back correctly from + * a dataset after it has been written. + */ +static int +test_write_dataset_data_verification(void) +{ + hssize_t space_npoints; + hsize_t dims[DATASET_DATA_VERIFY_WRITE_TEST_DSET_SPACE_RANK] = {10, 10, 10}; + hsize_t start[DATASET_DATA_VERIFY_WRITE_TEST_DSET_SPACE_RANK]; + hsize_t stride[DATASET_DATA_VERIFY_WRITE_TEST_DSET_SPACE_RANK]; + hsize_t count[DATASET_DATA_VERIFY_WRITE_TEST_DSET_SPACE_RANK]; + hsize_t block[DATASET_DATA_VERIFY_WRITE_TEST_DSET_SPACE_RANK]; + hsize_t + points[DATASET_DATA_VERIFY_WRITE_TEST_NUM_POINTS * DATASET_DATA_VERIFY_WRITE_TEST_DSET_SPACE_RANK]; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *data = NULL; + void *write_buf = NULL; + void *read_buf = NULL; + + TESTING_MULTIPART("verification of dataset data using H5Dwrite then H5Dread"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, basic or more dataset aren't supported with this " + "connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_DATA_VERIFY_WRITE_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_GROUP_NAME); + goto error; + } + + if ((fspace_id = H5Screate_simple(DATASET_DATA_VERIFY_WRITE_TEST_DSET_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME, + DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME); + goto error; + } + + for (i = 0, data_size = 1; i < DATASET_DATA_VERIFY_WRITE_TEST_DSET_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPESIZE; + + if (NULL == (data = HDmalloc(data_size))) + TEST_ERROR; + + for (i = 0; i < data_size / DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPESIZE; i++) + ((int *)data)[i] = (int)i; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Dwrite_all_read) + { + TESTING_2("H5Dwrite using H5S_ALL then H5Dread"); + + if (H5Dwrite(dset_id, DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, + data) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME); + PART_ERROR(H5Dwrite_all_read); + } + + if (data) { + HDfree(data); + data = NULL; + } + + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME); + PART_ERROR(H5Dwrite_all_read); + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + PART_ERROR(H5Dwrite_all_read); + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + PART_ERROR(H5Dwrite_all_read); + } + + if (NULL == + (data = HDmalloc((hsize_t)space_npoints * DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPESIZE))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + PART_ERROR(H5Dwrite_all_read); + } + + if (H5Dread(dset_id, DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, + data) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME); + PART_ERROR(H5Dwrite_all_read); + } + + for (i = 0; i < (hsize_t)space_npoints; i++) + if (((int *)data)[i] != (int)i) { + H5_FAILED(); + HDprintf(" H5S_ALL selection data verification failed\n"); + PART_ERROR(H5Dwrite_all_read); + } + + if (data) { + HDfree(data); + data = NULL; + } + + PASSED(); + } + PART_END(H5Dwrite_all_read); + + PART_BEGIN(H5Dwrite_hyperslab_read) + { + TESTING_2("H5Dwrite using hyperslab selection then H5Dread"); + + data_size = dims[1] * 2 * DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPESIZE; + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + for (i = 0; i < data_size / DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPESIZE; i++) + ((int *)write_buf)[i] = 56; + + for (i = 0, data_size = 1; i < DATASET_DATA_VERIFY_WRITE_TEST_DSET_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPESIZE; + + if (NULL == (data = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset data verification\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + if (H5Dread(dset_id, DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, + data) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + for (i = 0; i < 2; i++) { + size_t j; + + for (j = 0; j < dims[1]; j++) + ((int *)data)[(i * dims[1] * dims[2]) + (j * dims[2])] = 56; + } + + /* Write to first two rows of dataset */ + start[0] = start[1] = start[2] = 0; + stride[0] = stride[1] = stride[2] = 1; + count[0] = 2; + count[1] = dims[1]; + count[2] = 1; + block[0] = block[1] = block[2] = 1; + + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) { + H5_FAILED(); + HDprintf(" couldn't select hyperslab for dataset write\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + { + hsize_t mdims[] = {(hsize_t)2 * dims[1]}; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + } + + if (H5Dwrite(dset_id, DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPE, mspace_id, fspace_id, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + if (mspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(mspace_id); + } + H5E_END_TRY; + mspace_id = H5I_INVALID_HID; + } + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + if (NULL == (read_buf = HDmalloc((hsize_t)space_npoints * + DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPESIZE))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + if (H5Dread(dset_id, DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, + read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + if (memcmp(data, read_buf, data_size)) { + H5_FAILED(); + HDprintf(" hyperslab selection data verification failed\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + if (data) { + HDfree(data); + data = NULL; + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + PASSED(); + } + PART_END(H5Dwrite_hyperslab_read); + + PART_BEGIN(H5Dwrite_point_sel_read) + { + TESTING_2("H5Dwrite using point selection then H5Dread"); + + data_size = + DATASET_DATA_VERIFY_WRITE_TEST_NUM_POINTS * DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPESIZE; + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + + for (i = 0; i < data_size / DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPESIZE; i++) + ((int *)write_buf)[i] = 13; + + for (i = 0, data_size = 1; i < DATASET_DATA_VERIFY_WRITE_TEST_DSET_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPESIZE; + + if (NULL == (data = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset data verification\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + + if (H5Dread(dset_id, DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, + data) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME); + PART_ERROR(H5Dwrite_point_sel_read); + } + + for (i = 0; i < dims[0]; i++) { + size_t j; + + for (j = 0; j < dims[1]; j++) { + size_t k; + + for (k = 0; k < dims[2]; k++) { + if (i == j && j == k) + ((int *)data)[(i * dims[1] * dims[2]) + (j * dims[2]) + k] = 13; + } + } + } + + /* Select a series of 10 points in the dataset */ + for (i = 0; i < DATASET_DATA_VERIFY_WRITE_TEST_NUM_POINTS; i++) { + size_t j; + + for (j = 0; j < DATASET_DATA_VERIFY_WRITE_TEST_DSET_SPACE_RANK; j++) + points[(i * DATASET_DATA_VERIFY_WRITE_TEST_DSET_SPACE_RANK) + j] = i; + } + + if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, DATASET_DATA_VERIFY_WRITE_TEST_NUM_POINTS, + points) < 0) { + H5_FAILED(); + HDprintf(" couldn't select elements in dataspace\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + + { + hsize_t mdims[] = {(hsize_t)DATASET_DATA_VERIFY_WRITE_TEST_NUM_POINTS}; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + } + + if (H5Dwrite(dset_id, DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPE, mspace_id, fspace_id, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME); + PART_ERROR(H5Dwrite_point_sel_read); + } + + if (mspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(mspace_id); + } + H5E_END_TRY; + mspace_id = H5I_INVALID_HID; + } + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME); + PART_ERROR(H5Dwrite_point_sel_read); + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + + if (NULL == (read_buf = HDmalloc((hsize_t)space_npoints * + DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPESIZE))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + + if (H5Dread(dset_id, DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, + read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME); + PART_ERROR(H5Dwrite_point_sel_read); + } + + if (memcmp(data, read_buf, data_size)) { + H5_FAILED(); + HDprintf(" point selection data verification failed\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + + PASSED(); + } + PART_END(H5Dwrite_point_sel_read); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (data) { + HDfree(data); + data = NULL; + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (data) + HDfree(data); + if (write_buf) + HDfree(write_buf); + if (read_buf) + HDfree(read_buf); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a dataset can't be written to + * when H5Dwrite is passed invalid parameters. + */ +static int +test_write_dataset_invalid_params(void) +{ + hssize_t space_npoints; + hsize_t dims[DATASET_WRITE_INVALID_PARAMS_TEST_DSET_SPACE_RANK] = {10, 5, 3}; + herr_t err_ret = -1; + size_t i; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + void *data = NULL; + + TESTING_MULTIPART("H5Dwrite with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_WRITE_INVALID_PARAMS_TEST_GROUP_NAME); + goto error; + } + + if ((fspace_id = H5Screate_simple(DATASET_WRITE_INVALID_PARAMS_TEST_DSET_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_INVALID_PARAMS_TEST_DSET_NAME, + DATASET_SMALL_WRITE_TEST_ALL_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_INVALID_PARAMS_TEST_DSET_NAME); + goto error; + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + goto error; + } + + if (NULL == (data = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_INVALID_PARAMS_TEST_DSET_DTYPESIZE))) + TEST_ERROR; + + for (i = 0; i < (hsize_t)space_npoints; i++) + ((int *)data)[i] = (int)i; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Dwrite_invalid_dset_id) + { + TESTING_2("H5Dwrite with an invalid dataset ID"); + + H5E_BEGIN_TRY + { + err_ret = H5Dwrite(H5I_INVALID_HID, DATASET_WRITE_INVALID_PARAMS_TEST_DSET_DTYPE, H5S_ALL, + H5S_ALL, H5P_DEFAULT, data); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" wrote to dataset using H5Dwrite with an invalid dataset ID!\n"); + PART_ERROR(H5Dwrite_invalid_dset_id); + } + + PASSED(); + } + PART_END(H5Dwrite_invalid_dset_id); + + PART_BEGIN(H5Dwrite_invalid_datatype) + { + TESTING_2("H5Dwrite with an invalid memory datatype"); + + H5E_BEGIN_TRY + { + err_ret = H5Dwrite(dset_id, H5I_INVALID_HID, H5S_ALL, H5S_ALL, H5P_DEFAULT, data); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" wrote to dataset using H5Dwrite with an invalid memory datatype!\n"); + PART_ERROR(H5Dwrite_invalid_datatype); + } + + PASSED(); + } + PART_END(H5Dwrite_invalid_datatype); + + PART_BEGIN(H5Dwrite_invalid_mem_dataspace) + { + TESTING_2("H5Dwrite with an invalid memory dataspace"); + + H5E_BEGIN_TRY + { + err_ret = H5Dwrite(dset_id, DATASET_WRITE_INVALID_PARAMS_TEST_DSET_DTYPE, H5I_INVALID_HID, + H5S_ALL, H5P_DEFAULT, data); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" wrote to dataset using H5Dwrite with an invalid memory dataspace!\n"); + PART_ERROR(H5Dwrite_invalid_mem_dataspace); + } + + PASSED(); + } + PART_END(H5Dwrite_invalid_mem_dataspace); + + PART_BEGIN(H5Dwrite_invalid_file_dataspace) + { + TESTING_2("H5Dwrite with an invalid file dataspace"); + + H5E_BEGIN_TRY + { + err_ret = H5Dwrite(dset_id, DATASET_WRITE_INVALID_PARAMS_TEST_DSET_DTYPE, H5S_ALL, + H5I_INVALID_HID, H5P_DEFAULT, data); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" wrote to dataset using H5Dwrite with an invalid file dataspace!\n"); + PART_ERROR(H5Dwrite_invalid_file_dataspace); + } + + PASSED(); + } + PART_END(H5Dwrite_invalid_file_dataspace); + + PART_BEGIN(H5Dwrite_invalid_dxpl) + { + TESTING_2("H5Dwrite with an invalid DXPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Dwrite(dset_id, DATASET_WRITE_INVALID_PARAMS_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, + H5I_INVALID_HID, data); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" wrote to dataset using H5Dwrite with an invalid DXPL!\n"); + PART_ERROR(H5Dwrite_invalid_dxpl); + } + + PASSED(); + } + PART_END(H5Dwrite_invalid_dxpl); + + PART_BEGIN(H5Dwrite_invalid_data_buf) + { + TESTING_2("H5Dwrite with an invalid data buffer"); + + H5E_BEGIN_TRY + { + err_ret = H5Dwrite(dset_id, DATASET_WRITE_INVALID_PARAMS_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, + H5P_DEFAULT, NULL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" wrote to dataset using H5Dwrite with an invalid data buffer!\n"); + PART_ERROR(H5Dwrite_invalid_data_buf); + } + + PASSED(); + } + PART_END(H5Dwrite_invalid_data_buf); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (data) { + HDfree(data); + data = NULL; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (data) + HDfree(data); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to ensure that data is read back correctly from a dataset after it has + * been written, using type conversion with builtin types. + */ +static int +test_dataset_builtin_type_conversion(void) +{ + hssize_t space_npoints; + hsize_t dims[DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_SPACE_RANK] = {10, 10, 10}; + hsize_t start[DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_SPACE_RANK]; + hsize_t stride[DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_SPACE_RANK]; + hsize_t count[DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_SPACE_RANK]; + hsize_t block[DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_SPACE_RANK]; + hsize_t points[DATASET_DATA_BUILTIN_CONVERSION_TEST_NUM_POINTS * + DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_SPACE_RANK]; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + hid_t file_type_id = H5I_INVALID_HID; + H5T_order_t native_order; + void *data = NULL; + void *write_buf = NULL; + void *read_buf = NULL; + + TESTING_MULTIPART( + "verification of dataset data using H5Dwrite then H5Dread with type conversion of builtin types"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, basic or more dataset aren't supported with this " + "connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((native_order = H5Tget_order(DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get native byte order\n"); + goto error; + } + if (native_order == H5T_ORDER_LE) + file_type_id = H5T_STD_I32BE; + else + file_type_id = H5T_STD_I32LE; + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_DATA_BUILTIN_CONVERSION_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_GROUP_NAME); + goto error; + } + + if ((fspace_id = H5Screate_simple(DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME, file_type_id, + fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME); + goto error; + } + + for (i = 0, data_size = 1; i < DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPESIZE; + + if (NULL == (data = HDmalloc(data_size))) + TEST_ERROR; + + for (i = 0; i < data_size / DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPESIZE; i++) + ((int *)data)[i] = (int)i; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Dwrite_all_read) + { + TESTING_2("H5Dwrite then H5Dread with H5S_ALL selection"); + + if (H5Dwrite(dset_id, DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPE, H5S_ALL, H5S_ALL, + H5P_DEFAULT, data) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", + DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME); + PART_ERROR(H5Dwrite_all_read); + } + + if (data) { + HDfree(data); + data = NULL; + } + + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME); + PART_ERROR(H5Dwrite_all_read); + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + PART_ERROR(H5Dwrite_all_read); + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + PART_ERROR(H5Dwrite_all_read); + } + + if (NULL == (data = HDmalloc((hsize_t)space_npoints * + DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPESIZE))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + PART_ERROR(H5Dwrite_all_read); + } + + if (H5Dread(dset_id, DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPE, H5S_ALL, H5S_ALL, + H5P_DEFAULT, data) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", + DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME); + PART_ERROR(H5Dwrite_all_read); + } + + for (i = 0; i < (hsize_t)space_npoints; i++) + if (((int *)data)[i] != (int)i) { + H5_FAILED(); + HDprintf(" H5S_ALL selection data verification failed\n"); + PART_ERROR(H5Dwrite_all_read); + } + + if (data) { + HDfree(data); + data = NULL; + } + + PASSED(); + } + PART_END(H5Dwrite_all_read); + + PART_BEGIN(H5Dwrite_hyperslab_read) + { + TESTING_2("H5Dwrite using hyperslab selection then H5Dread"); + + data_size = dims[1] * 2 * DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPESIZE; + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + for (i = 0; i < data_size / DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPESIZE; i++) + ((int *)write_buf)[i] = 56; + + for (i = 0, data_size = 1; i < DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPESIZE; + + if (NULL == (data = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset data verification\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + if (H5Dread(dset_id, DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPE, H5S_ALL, H5S_ALL, + H5P_DEFAULT, data) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", + DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + for (i = 0; i < 2; i++) { + size_t j; + + for (j = 0; j < dims[1]; j++) + ((int *)data)[(i * dims[1] * dims[2]) + (j * dims[2])] = 56; + } + + /* Write to first two rows of dataset */ + start[0] = start[1] = start[2] = 0; + stride[0] = stride[1] = stride[2] = 1; + count[0] = 2; + count[1] = dims[1]; + count[2] = 1; + block[0] = block[1] = block[2] = 1; + + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) { + H5_FAILED(); + HDprintf(" couldn't select hyperslab for dataset write\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + { + hsize_t mdims[] = {(hsize_t)2 * dims[1]}; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + } + + if (H5Dwrite(dset_id, DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPE, mspace_id, fspace_id, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", + DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + if (mspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(mspace_id); + } + H5E_END_TRY; + mspace_id = H5I_INVALID_HID; + } + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + if (NULL == (read_buf = HDmalloc((hsize_t)space_npoints * + DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPESIZE))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + if (H5Dread(dset_id, DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPE, H5S_ALL, H5S_ALL, + H5P_DEFAULT, read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", + DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + if (memcmp(data, read_buf, data_size)) { + H5_FAILED(); + HDprintf(" hyperslab selection data verification failed\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + if (data) { + HDfree(data); + data = NULL; + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + PASSED(); + } + PART_END(H5Dwrite_hyperslab_read); + + PART_BEGIN(H5Dwrite_point_sel_read) + { + TESTING_2("H5Dwrite using point selection then H5Dread"); + + data_size = DATASET_DATA_BUILTIN_CONVERSION_TEST_NUM_POINTS * + DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPESIZE; + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + + for (i = 0; i < data_size / DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPESIZE; i++) + ((int *)write_buf)[i] = 13; + + for (i = 0, data_size = 1; i < DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPESIZE; + + if (NULL == (data = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset data verification\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + + if (H5Dread(dset_id, DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPE, H5S_ALL, H5S_ALL, + H5P_DEFAULT, data) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", + DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME); + PART_ERROR(H5Dwrite_point_sel_read); + } + + for (i = 0; i < dims[0]; i++) { + size_t j; + + for (j = 0; j < dims[1]; j++) { + size_t k; + + for (k = 0; k < dims[2]; k++) { + if (i == j && j == k) + ((int *)data)[(i * dims[1] * dims[2]) + (j * dims[2]) + k] = 13; + } + } + } + + /* Select a series of 10 points in the dataset */ + for (i = 0; i < DATASET_DATA_BUILTIN_CONVERSION_TEST_NUM_POINTS; i++) { + size_t j; + + for (j = 0; j < DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_SPACE_RANK; j++) + points[(i * DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_SPACE_RANK) + j] = i; + } + + if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, DATASET_DATA_BUILTIN_CONVERSION_TEST_NUM_POINTS, + points) < 0) { + H5_FAILED(); + HDprintf(" couldn't select elements in dataspace\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + + { + hsize_t mdims[] = {(hsize_t)DATASET_DATA_BUILTIN_CONVERSION_TEST_NUM_POINTS}; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + } + + if (H5Dwrite(dset_id, DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPE, mspace_id, fspace_id, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", + DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME); + PART_ERROR(H5Dwrite_point_sel_read); + } + + if (mspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(mspace_id); + } + H5E_END_TRY; + mspace_id = H5I_INVALID_HID; + } + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME); + PART_ERROR(H5Dwrite_point_sel_read); + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + + if (NULL == (read_buf = HDmalloc((hsize_t)space_npoints * + DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPESIZE))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + + if (H5Dread(dset_id, DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPE, H5S_ALL, H5S_ALL, + H5P_DEFAULT, read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", + DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME); + PART_ERROR(H5Dwrite_point_sel_read); + } + + if (memcmp(data, read_buf, data_size)) { + H5_FAILED(); + HDprintf(" point selection data verification failed\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + + PASSED(); + } + PART_END(H5Dwrite_point_sel_read); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (data) { + HDfree(data); + data = NULL; + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (data) + HDfree(data); + if (write_buf) + HDfree(write_buf); + if (read_buf) + HDfree(read_buf); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to ensure that data is read back correctly from a dataset after it has + * been written, using partial element I/O with compound types + */ +typedef struct dataset_compount_partial_io_t { + int a; + int b; +} dataset_compount_partial_io_t; + +static int +test_dataset_compound_partial_io(void) +{ + hsize_t dims[1] = {DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS}; + size_t i; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + hid_t full_type_id = H5I_INVALID_HID; + hid_t a_type_id = H5I_INVALID_HID; + hid_t b_type_id = H5I_INVALID_HID; + dataset_compount_partial_io_t wbuf[DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS]; + dataset_compount_partial_io_t rbuf[DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS]; + dataset_compount_partial_io_t fbuf[DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS]; + dataset_compount_partial_io_t erbuf[DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS]; + + TESTING_MULTIPART( + "verification of dataset data using H5Dwrite then H5Dread with partial element compound type I/O"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_DATA_COMPOUND_PARTIAL_IO_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_DATA_COMPOUND_PARTIAL_IO_TEST_GROUP_NAME); + goto error; + } + + if ((space_id = H5Screate_simple(1, dims, NULL)) < 0) + TEST_ERROR; + + if ((full_type_id = H5Tcreate(H5T_COMPOUND, sizeof(dataset_compount_partial_io_t))) < 0) + TEST_ERROR; + if (H5Tinsert(full_type_id, "a", HOFFSET(dataset_compount_partial_io_t, a), H5T_NATIVE_INT) < 0) + TEST_ERROR; + if (H5Tinsert(full_type_id, "b", HOFFSET(dataset_compount_partial_io_t, b), H5T_NATIVE_INT) < 0) + TEST_ERROR; + + if ((a_type_id = H5Tcreate(H5T_COMPOUND, sizeof(dataset_compount_partial_io_t))) < 0) + TEST_ERROR; + if (H5Tinsert(a_type_id, "a", HOFFSET(dataset_compount_partial_io_t, a), H5T_NATIVE_INT) < 0) + TEST_ERROR; + + if ((b_type_id = H5Tcreate(H5T_COMPOUND, sizeof(dataset_compount_partial_io_t))) < 0) + TEST_ERROR; + if (H5Tinsert(b_type_id, "b", HOFFSET(dataset_compount_partial_io_t, b), H5T_NATIVE_INT) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_DATA_COMPOUND_PARTIAL_IO_TEST_DSET_NAME, full_type_id, + space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_DATA_COMPOUND_PARTIAL_IO_TEST_DSET_NAME); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(write_full_read_full) + { + TESTING_2("H5Dwrite then H5Dread with all compound members"); + + /* Initialize wbuf */ + for (i = 0; i < DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS; i++) { + wbuf[i].a = (int)(2 * i); + wbuf[i].b = (int)(2 * i + 1); + } + + /* Write data */ + if (H5Dwrite(dset_id, full_type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf) < 0) + PART_TEST_ERROR(write_full_read_full); + + /* Update fbuf to match file state */ + for (i = 0; i < DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS; i++) { + fbuf[i].a = wbuf[i].a; + fbuf[i].b = wbuf[i].b; + } + + /* Initialize rbuf to -1 */ + for (i = 0; i < DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS; i++) { + rbuf[i].a = -1; + rbuf[i].b = -1; + } + + /* Set erbuf (simply match file state since we're reading the whole + * thing) */ + for (i = 0; i < DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS; i++) { + erbuf[i].a = fbuf[i].a; + erbuf[i].b = fbuf[i].b; + } + + /* Read data */ + if (H5Dread(dset_id, full_type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0) + PART_TEST_ERROR(write_full_read_full); + + /* Verify data */ + for (i = 0; i < DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS; i++) { + if (rbuf[i].a != erbuf[i].a) + PART_TEST_ERROR(write_full_read_full); + if (rbuf[i].b != erbuf[i].b) + PART_TEST_ERROR(write_full_read_full); + } + + PASSED(); + } + PART_END(write_full_read_full); + + PART_BEGIN(read_a) + { + TESTING_2("H5Dread with compound member a"); + + /* Initialize rbuf to -1 */ + for (i = 0; i < DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS; i++) { + rbuf[i].a = -1; + rbuf[i].b = -1; + } + + /* Set erbuf (element a comes from the file, element b in untouched) + */ + for (i = 0; i < DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS; i++) { + erbuf[i].a = fbuf[i].a; + erbuf[i].b = rbuf[i].b; + } + + /* Read data */ + if (H5Dread(dset_id, a_type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0) + PART_TEST_ERROR(read_a); + + /* Verify data */ + for (i = 0; i < DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS; i++) { + if (rbuf[i].a != erbuf[i].a) + PART_TEST_ERROR(read_a); + if (rbuf[i].b != erbuf[i].b) + PART_TEST_ERROR(read_a); + } + + PASSED(); + } + PART_END(read_a); + + PART_BEGIN(write_b_read_full) + { + TESTING_2("H5Dwrite with compound member b then H5Dread with all compound members"); + + /* Initialize wbuf */ + for (i = 0; i < DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS; i++) { + wbuf[i].a = (int)(2 * DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS + 2 * i); + wbuf[i].b = (int)(2 * DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS + 2 * i + 1); + } + + /* Write data */ + if (H5Dwrite(dset_id, b_type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf) < 0) + PART_TEST_ERROR(write_full_read_full); + + /* Update fbuf to match file state - only element b was updated */ + for (i = 0; i < DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS; i++) { + fbuf[i].b = wbuf[i].b; + } + + /* Initialize rbuf to -1 */ + for (i = 0; i < DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS; i++) { + rbuf[i].a = -1; + rbuf[i].b = -1; + } + + /* Set erbuf (simply match file state since we're reading the whole + * thing) */ + for (i = 0; i < DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS; i++) { + erbuf[i].a = fbuf[i].a; + erbuf[i].b = fbuf[i].b; + } + + /* Read data */ + if (H5Dread(dset_id, full_type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0) + PART_TEST_ERROR(write_b_read_full); + + /* Verify data */ + for (i = 0; i < DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS; i++) { + if (rbuf[i].a != erbuf[i].a) + PART_TEST_ERROR(write_b_read_full); + if (rbuf[i].b != erbuf[i].b) + PART_TEST_ERROR(write_b_read_full); + } + + PASSED(); + } + PART_END(write_b_read_full); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + if (H5Tclose(full_type_id) < 0) + TEST_ERROR; + if (H5Tclose(a_type_id) < 0) + TEST_ERROR; + if (H5Tclose(b_type_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + H5Tclose(full_type_id); + H5Tclose(a_type_id); + H5Tclose(b_type_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a chunked dataset's extent can be + * changed by using H5Dset_extent. This test uses unlimited + * dimensions for the dataset, so the dimensionality of the + * dataset may both shrink and grow. + */ +static int +test_dataset_set_extent_chunked_unlimited(void) +{ + hsize_t dims[DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_SPACE_RANK]; + hsize_t max_dims[DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_SPACE_RANK]; + hsize_t chunk_dims[DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_SPACE_RANK]; + hsize_t new_dims[DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_SPACE_RANK]; + size_t i; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + + TESTING("H5Dset_extent on chunked dataset with unlimited dimensions"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, basic or more dataset aren't supported with this " + "connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_GROUP_NAME); + goto error; + } + + for (i = 0; i < DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_SPACE_RANK; i++) { + max_dims[i] = H5S_UNLIMITED; + chunk_dims[i] = (hsize_t)(rand() % MAX_DIM_SIZE + 1); + } + + if ((fspace_id = generate_random_dataspace(DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_SPACE_RANK, max_dims, + dims, FALSE)) < 0) + TEST_ERROR; + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) + TEST_ERROR; + + if (H5Pset_chunk(dcpl_id, DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_SPACE_RANK, chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" unable to set dataset chunk dimensionality\n"); + goto error; + } + + if ((dset_id = H5Dcreate2(group_id, DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_DSET_NAME, dset_dtype, + fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_DSET_NAME); + goto error; + } + + for (i = 0; i < DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_NUM_PASSES; i++) { + size_t j; + + for (j = 0; j < DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_SPACE_RANK; j++) { + /* Ensure that the new dimensionality doesn't match the old dimensionality. */ + do { + new_dims[j] = (hsize_t)(rand() % MAX_DIM_SIZE + 1); + } while (new_dims[j] == dims[j]); + } + + if (H5Dset_extent(dset_id, new_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to set dataset extent\n"); + goto error; + } + + /* Retrieve the new dimensions of the dataset and ensure they + * are different from the original. + */ + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve dataset's dataspace\n"); + goto error; + } + + if (H5Sget_simple_extent_dims(fspace_id, new_dims, NULL) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve dataset dimensionality\n"); + goto error; + } + + /* + * Make sure the dimensions have been changed. + */ + for (j = 0; j < DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_SPACE_RANK; j++) { + if (dims[j] == new_dims[j]) { + H5_FAILED(); + HDprintf(" dataset dimension %llu wasn't changed!\n", (unsigned long long)j); + goto error; + } + } + + /* + * Remember the current dimensionality of the dataset before + * changing them again. + */ + HDmemcpy(dims, new_dims, sizeof(new_dims)); + } + + /* + * Now close and re-open the dataset each pass to check the persistence + * of the changes to the dataset's dimensionality. + */ + for (i = 0; i < DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_NUM_PASSES; i++) { + size_t j; + + for (j = 0; j < DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_SPACE_RANK; j++) { + /* Ensure that the new dimensionality doesn't match the old dimensionality. */ + do { + new_dims[j] = (hsize_t)(rand() % MAX_DIM_SIZE + 1); + } while (new_dims[j] == dims[j]); + } + + if (H5Dset_extent(dset_id, new_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to set dataset extent\n"); + goto error; + } + + /* Retrieve the new dimensions of the dataset and ensure they + * are different from the original. + */ + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + + if ((dset_id = H5Dopen2(group_id, DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_DSET_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" failed to open dataset '%s'\n", + DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve dataset's dataspace\n"); + goto error; + } + + if (H5Sget_simple_extent_dims(fspace_id, new_dims, NULL) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve dataset dimensionality\n"); + goto error; + } + + /* + * Make sure the dimensions have been changed. + */ + for (j = 0; j < DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_SPACE_RANK; j++) { + if (dims[j] == new_dims[j]) { + H5_FAILED(); + HDprintf(" dataset dimension %llu wasn't changed!\n", (unsigned long long)j); + goto error; + } + } + + /* + * Remember the current dimensionality of the dataset before + * changing them again. + */ + HDmemcpy(dims, new_dims, sizeof(new_dims)); + } + + if (H5Pclose(dcpl_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(dcpl_id); + H5Sclose(fspace_id); + H5Tclose(dset_dtype); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a chunked dataset's extent can be + * changed by using H5Dset_extent. This test uses fixed-size + * dimensions for the dataset, so the dimensionality of the + * dataset may only shrink. + */ +static int +test_dataset_set_extent_chunked_fixed(void) +{ + hsize_t dims[DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_SPACE_RANK]; + hsize_t dims2[DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_SPACE_RANK]; + hsize_t chunk_dims[DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_SPACE_RANK]; + hsize_t new_dims[DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_SPACE_RANK]; + size_t i; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID, dset_id2 = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID, fspace_id2 = H5I_INVALID_HID; + + TESTING("H5Dset_extent on chunked dataset with fixed dimensions"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, basic or more dataset aren't supported with this " + "connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_GROUP_NAME); + goto error; + } + + for (i = 0; i < DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_SPACE_RANK; i++) { + dims[i] = (hsize_t)(rand() % MAX_DIM_SIZE + 1); + dims2[i] = dims[i]; + do { + chunk_dims[i] = (hsize_t)(rand() % MAX_DIM_SIZE + 1); + } while (chunk_dims[i] > dims[i]); + } + + if ((fspace_id = H5Screate_simple(DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + if ((fspace_id2 = H5Screate_simple(DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_SPACE_RANK, dims2, NULL)) < 0) + TEST_ERROR; + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) + TEST_ERROR; + + if (H5Pset_chunk(dcpl_id, DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_SPACE_RANK, chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" unable to set dataset chunk dimensionality\n"); + goto error; + } + + /* + * NOTE: Since shrinking the dimension size can quickly end in a situation + * where the dimensions are of size 1 and we can't shrink them further, we + * use two datasets here to ensure the second test can run at least once. + */ + if ((dset_id = H5Dcreate2(group_id, DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_DSET_NAME, dset_dtype, + fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_DSET_NAME); + goto error; + } + + if ((dset_id2 = H5Dcreate2(group_id, DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_DSET_NAME2, dset_dtype, + fspace_id2, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_DSET_NAME2); + goto error; + } + + for (i = 0; i < DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_NUM_PASSES; i++) { + hbool_t skip_iterations = FALSE; + size_t j; + + for (j = 0; j < DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_SPACE_RANK; j++) { + /* Ensure that the new dimensionality is less than the old dimensionality. */ + do { + if (dims[j] == 1) { + skip_iterations = TRUE; + break; + } + else + new_dims[j] = (hsize_t)(rand() % MAX_DIM_SIZE + 1); + } while (new_dims[j] >= dims[j]); + } + + /* + * If we've shrunk one of the dimensions to size 1, skip the rest of + * the iterations. + */ + if (skip_iterations) + break; + + if (H5Dset_extent(dset_id, new_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to set dataset extent\n"); + goto error; + } + + /* Retrieve the new dimensions of the dataset and ensure they + * are different from the original. + */ + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve dataset's dataspace\n"); + goto error; + } + + if (H5Sget_simple_extent_dims(fspace_id, new_dims, NULL) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve dataset dimensionality\n"); + goto error; + } + + /* + * Make sure the dimensions have been changed. + */ + for (j = 0; j < DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_SPACE_RANK; j++) { + if (dims[j] == new_dims[j]) { + H5_FAILED(); + HDprintf(" dataset dimension %llu wasn't changed!\n", (unsigned long long)j); + goto error; + } + } + + /* + * Remember the current dimensionality of the dataset before + * changing them again. + */ + HDmemcpy(dims, new_dims, sizeof(new_dims)); + } + + /* + * Now close and re-open the dataset each pass to check the persistence + * of the changes to the dataset's dimensionality. + */ + for (i = 0; i < DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_NUM_PASSES; i++) { + hbool_t skip_iterations = FALSE; + size_t j; + + for (j = 0; j < DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_SPACE_RANK; j++) { + /* Ensure that the new dimensionality is less than the old dimensionality. */ + do { + if (dims2[j] == 1) { + skip_iterations = TRUE; + break; + } + else + new_dims[j] = (hsize_t)(rand() % MAX_DIM_SIZE + 1); + } while (new_dims[j] >= dims2[j]); + } + + /* + * If we've shrunk one of the dimensions to size 1, skip the rest of + * the iterations. + */ + if (skip_iterations) + break; + + if (H5Dset_extent(dset_id2, new_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to set dataset extent2\n"); + goto error; + } + + /* Retrieve the new dimensions of the dataset and ensure they + * are different from the original. + */ + if (H5Sclose(fspace_id2) < 0) + TEST_ERROR; + if (H5Dclose(dset_id2) < 0) + TEST_ERROR; + + if ((dset_id2 = H5Dopen2(group_id, DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_DSET_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" failed to open dataset '%s'\n", DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_DSET_NAME2); + goto error; + } + + if ((fspace_id2 = H5Dget_space(dset_id2)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve dataset's dataspace\n"); + goto error; + } + + if (H5Sget_simple_extent_dims(fspace_id2, new_dims, NULL) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve dataset dimensionality\n"); + goto error; + } + + /* + * Make sure the dimensions have been changed. + */ + for (j = 0; j < DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_SPACE_RANK; j++) { + if (dims2[j] == new_dims[j]) { + H5_FAILED(); + HDprintf(" dataset dimension %llu wasn't changed!\n", (unsigned long long)j); + goto error; + } + } + + /* + * Remember the current dimensionality of the dataset before + * changing them again. + */ + HDmemcpy(dims2, new_dims, sizeof(new_dims)); + } + + if (H5Pclose(dcpl_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id2) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id2) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(dcpl_id); + H5Sclose(fspace_id); + H5Sclose(fspace_id2); + H5Tclose(dset_dtype); + H5Dclose(dset_id); + H5Dclose(dset_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check the data is correct after expanding + * and shrinking the dataset with H5Dset_extent + */ +static int +test_dataset_set_extent_data(void) +{ + hsize_t dims_origin[DATASET_SET_EXTENT_DATA_TEST_SPACE_RANK] = {DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM, + DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM}; + hsize_t dims_expand[DATASET_SET_EXTENT_DATA_TEST_SPACE_RANK] = { + DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM * 2 - 1, DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM * 2 - 1}; + hsize_t dims_shrink[DATASET_SET_EXTENT_DATA_TEST_SPACE_RANK] = { + DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM / 2 + 1, DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM / 2 + 1}; + hsize_t dims_chunk[DATASET_SET_EXTENT_DATA_TEST_SPACE_RANK] = {DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM, + DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM}; + hsize_t dims_max[DATASET_SET_EXTENT_DATA_TEST_SPACE_RANK] = {H5S_UNLIMITED, H5S_UNLIMITED}; + hsize_t dims_out[DATASET_SET_EXTENT_DATA_TEST_SPACE_RANK]; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID, dset_space_id = H5I_INVALID_HID; + int buf_origin[DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM][DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM]; +#ifndef NO_CLEAR_ON_SHRINK + int buf_expand2[DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM][DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM]; +#endif + int buf_expand[DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM * 2 - 1] + [DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM * 2 - 1]; + int buf_shrink[DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM / 2 + 1] + [DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM / 2 + 1]; + int i, j; + + TESTING_MULTIPART("H5Dset_extent on data correctness"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, basic or more dataset aren't supported with this " + "connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_SET_EXTENT_DATA_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", DATASET_SET_EXTENT_DATA_TEST_GROUP_NAME); + goto error; + } + + if ((fspace_id = H5Screate_simple(DATASET_SET_EXTENT_DATA_TEST_SPACE_RANK, dims_origin, dims_max)) < 0) + TEST_ERROR; + + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) + TEST_ERROR; + + if (H5Pset_chunk(dcpl_id, DATASET_SET_EXTENT_DATA_TEST_SPACE_RANK, dims_chunk) < 0) { + H5_FAILED(); + HDprintf(" unable to set dataset chunk dimensionality\n"); + goto error; + } + + if ((dset_id = H5Dcreate2(group_id, DATASET_SET_EXTENT_DATA_TEST_DSET_NAME, H5T_NATIVE_INT, fspace_id, + H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_SET_EXTENT_DATA_TEST_DSET_NAME); + goto error; + } + + for (i = 0; i < DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM; i++) + for (j = 0; j < DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM; j++) + buf_origin[i][j] = i + j; + + /* Write the original data + * X X X X X X X X + * X X X X X X X X + * X X X X X X X X + * X X X X X X X X + * X X X X X X X X + * X X X X X X X X + * X X X X X X X X + * X X X X X X X X + */ + if (H5Dwrite(dset_id, H5T_NATIVE_INT, fspace_id, H5S_ALL, H5P_DEFAULT, buf_origin) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Dset_extent_data_expand) + { + TESTING_2("H5Dset_extent for data expansion"); + + /* Expand the dataset. The extended space should be initialized with the + * the default value (0) + * X X X X X X X X 0 0 0 0 0 0 0 + * X X X X X X X X 0 0 0 0 0 0 0 + * X X X X X X X X 0 0 0 0 0 0 0 + * X X X X X X X X 0 0 0 0 0 0 0 + * X X X X X X X X 0 0 0 0 0 0 0 + * X X X X X X X X 0 0 0 0 0 0 0 + * X X X X X X X X 0 0 0 0 0 0 0 + * X X X X X X X X 0 0 0 0 0 0 0 + * 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + * 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + * 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + * 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + * 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + * 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + * 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + */ + if (H5Dset_extent(dset_id, dims_expand) < 0) + PART_ERROR(H5Dset_extent_data_expand); + + if (H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_expand) < 0) + PART_ERROR(H5Dset_extent_data_expand); + + /* compare the expanded data */ + for (i = 0; i < (int)dims_expand[0]; i++) { + for (j = 0; j < (int)dims_expand[1]; j++) { + if (i >= (int)dims_origin[0] || j >= (int)dims_origin[1]) { + if (buf_expand[i][j] != 0) { + H5_FAILED(); + HDprintf(" buf_expand[%d][%d] = %d. It should be 0\n", i, j, buf_expand[i][j]); + PART_ERROR(H5Dset_extent_data_expand); + } + } + else { + if (buf_expand[i][j] != buf_origin[i][j]) { + H5_FAILED(); + HDprintf(" buf_expand[%d][%d] = %d. It should be %d\n", i, j, buf_expand[i][j], + buf_origin[i][j]); + PART_ERROR(H5Dset_extent_data_expand); + } + } + } + } + + PASSED(); + } + PART_END(H5Dset_extent_data_expand); + + PART_BEGIN(H5Dset_extent_data_shrink) + { + TESTING_2("H5Dset_extent for data shrinking"); + + /* Shrink the dataset. + * X X X X X + * X X X X X + * X X X X X + * X X X X X + * X X X X X + */ + if (H5Dset_extent(dset_id, dims_shrink) < 0) + PART_ERROR(H5Dset_extent_data_shrink); + + if (H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_shrink) < 0) + PART_ERROR(H5Dset_extent_data_shrink); + + /* compare the shrunk data */ + for (i = 0; i < (int)dims_shrink[0]; i++) { + for (j = 0; j < (int)dims_shrink[1]; j++) { + if (buf_shrink[i][j] != buf_origin[i][j]) { + H5_FAILED(); + HDprintf(" buf_shrink[%d][%d] = %d. It should be %d\n", i, j, buf_shrink[i][j], + buf_origin[i][j]); + PART_ERROR(H5Dset_extent_data_shrink); + } + } + } + + PASSED(); + } + PART_END(H5Dset_extent_data_shrink); + + PART_BEGIN(H5Dset_extent_data_expand_to_origin) + { + TESTING_2("H5Dset_extent for data back to the original size"); +#ifndef NO_CLEAR_ON_SHRINK + /* Expand the dataset back to the original size. The data should look like this: + * X X X X X 0 0 0 + * X X X X X 0 0 0 + * X X X X X 0 0 0 + * X X X X X 0 0 0 + * X X X X X 0 0 0 + * 0 0 0 0 0 0 0 0 + * 0 0 0 0 0 0 0 0 + * 0 0 0 0 0 0 0 0 + */ + if (H5Dset_extent(dset_id, dims_origin) < 0) + PART_ERROR(H5Dset_extent_data_expand_to_origin); + + if (H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_expand2) < 0) + PART_ERROR(H5Dset_extent_data_expand_to_origin); + + /* compare the expanded data */ + for (i = 0; i < (int)dims_origin[0]; i++) { + for (j = 0; j < (int)dims_origin[1]; j++) { + if (i >= (int)dims_shrink[0] || j >= (int)dims_shrink[1]) { + if (buf_expand2[i][j] != 0) { + H5_FAILED(); + HDprintf(" buf_expand2[%d][%d] = %d. It should be 0\n", i, j, + buf_expand2[i][j]); + PART_ERROR(H5Dset_extent_data_expand_to_origin); + } + } + else { + if (buf_expand2[i][j] != buf_origin[i][j]) { + H5_FAILED(); + HDprintf(" buf_expand2[%d][%d] = %d. It should be %d.\n", i, j, + buf_expand2[i][j], buf_origin[i][j]); + PART_ERROR(H5Dset_extent_data_expand_to_origin); + } + } + } + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Dset_extent_data_expand_to_origin); +#endif + } + PART_END(H5Dset_extent_data_expand_to_origin); + + PART_BEGIN(H5Dset_extent_data_shrink_to_zero) + { + TESTING_2("H5Dset_extent for data shrink to zero size"); + + /* Shrink the dimensions to 0 and verify it */ + dims_shrink[0] = dims_shrink[1] = 0; + + if (H5Dset_extent(dset_id, dims_shrink) < 0) + PART_ERROR(H5Dset_extent_data_shrink_to_zero); + + /* get the space */ + if ((dset_space_id = H5Dget_space(dset_id)) < 0) + PART_ERROR(H5Dset_extent_data_shrink_to_zero); + + /* get dimensions */ + if (H5Sget_simple_extent_dims(dset_space_id, dims_out, NULL) < 0) + PART_ERROR(H5Dset_extent_data_shrink_to_zero); + + if (H5Sclose(dset_space_id) < 0) + PART_ERROR(H5Dset_extent_data_shrink_to_zero); + + /* Verify the dimensions are 0 */ + for (i = 0; i < DATASET_SET_EXTENT_DATA_TEST_SPACE_RANK; i++) + if (dims_out[i] != 0) { + H5_FAILED(); + HDprintf(" dims_out[%d] = %llu. It should be 0.\n", i, + (long long unsigned int)dims_out[i]); + PART_ERROR(H5Dset_extent_data_shrink_to_zero); + } + + PASSED(); + } + PART_END(H5Dset_extent_data_shrink_to_zero); + + PART_BEGIN(H5Dset_extent_data_expand_to_origin_again) + { + TESTING_2("H5Dset_extent for data expansion back to the original again"); +#ifndef NO_CLEAR_ON_SHRINK + /* Expand the dataset back to the original size. The data should look like this: + * 0 0 0 0 0 0 0 0 + * 0 0 0 0 0 0 0 0 + * 0 0 0 0 0 0 0 0 + * 0 0 0 0 0 0 0 0 + * 0 0 0 0 0 0 0 0 + * 0 0 0 0 0 0 0 0 + * 0 0 0 0 0 0 0 0 + * 0 0 0 0 0 0 0 0 + */ + if (H5Dset_extent(dset_id, dims_origin) < 0) + PART_ERROR(H5Dset_extent_data_expand_to_origin_again); + + if (H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_expand2) < 0) + PART_ERROR(H5Dset_extent_data_expand_to_origin_again); + + /* The data should be all zeros */ + for (i = 0; i < (int)dims_origin[0]; i++) { + for (j = 0; j < (int)dims_origin[1]; j++) { + if (buf_expand2[i][j] != 0) { + H5_FAILED(); + HDprintf(" buf_expand2[%d][%d] = %d. It should be 0.\n", i, j, buf_expand2[i][j]); + PART_ERROR(H5Dset_extent_data_expand_to_origin_again); + } + } + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Dset_extent_data_expand_to_origin_again); +#endif + } + PART_END(H5Dset_extent_data_expand_to_origin_again); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(dcpl_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(dcpl_id); + H5Sclose(fspace_id); + H5Sclose(dset_space_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} /* test_dataset_set_extent_data */ + +/* + * If a dataset is opened twice and one of the handles is + * used to extend the dataset, then the other handle should + * return the new size when queried. + */ +static int +test_dataset_set_extent_double_handles(void) +{ +#ifndef NO_DOUBLE_OBJECT_OPENS + hsize_t dims_origin[DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_RANK] = { + DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_DIM, DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_DIM}; + hsize_t dims_expand[DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_RANK] = { + DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_DIM * 2, + DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_DIM * 2}; + hsize_t dims_chunk[DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_RANK] = { + DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_DIM / 2, + DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_DIM / 2}; + hsize_t dims_max[DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_RANK] = {H5S_UNLIMITED, H5S_UNLIMITED}; + hsize_t dims_out[DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_RANK]; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID, dset_id2 = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID, dset_space_id = H5I_INVALID_HID; + int i; +#endif + + TESTING("H5Dset_extent on double dataset handles"); + +#ifndef NO_DOUBLE_OBJECT_OPENS + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, basic or more dataset aren't supported with this " + "connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", DATASET_SET_EXTENT_DATA_TEST_GROUP_NAME); + goto error; + } + + if ((fspace_id = + H5Screate_simple(DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_RANK, dims_origin, dims_max)) < 0) + TEST_ERROR; + + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) + TEST_ERROR; + + if (H5Pset_chunk(dcpl_id, DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_RANK, dims_chunk) < 0) { + H5_FAILED(); + HDprintf(" unable to set dataset chunk dimensionality\n"); + goto error; + } + + /* Create the dataset */ + if ((dset_id = H5Dcreate2(group_id, DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_DSET_NAME, H5T_NATIVE_INT, + fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_DSET_NAME); + goto error; + } + + /* Open the same dataset again */ + if ((dset_id2 = H5Dopen2(group_id, DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_DSET_NAME); + goto error; + } + + /* Expand the dataset's dimensions with the first dataset handle */ + if (H5Dset_extent(dset_id, dims_expand) < 0) + TEST_ERROR; + + /* Get the data space with the second dataset handle */ + if ((dset_space_id = H5Dget_space(dset_id2)) < 0) + TEST_ERROR; + + /* Get the dimensions with the second dataset handle */ + if (H5Sget_simple_extent_dims(dset_space_id, dims_out, NULL) < 0) + TEST_ERROR; + + if (H5Sclose(dset_space_id) < 0) + TEST_ERROR; + + for (i = 0; i < DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_RANK; i++) + if (dims_out[i] != dims_expand[i]) { + H5_FAILED(); + HDprintf(" dims_out[%d] = %d. It should be %d.\n", i, dims_out[i], dims_expand[i]); + goto error; + } + + if (H5Pclose(dcpl_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id2) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(dcpl_id); + H5Sclose(fspace_id); + H5Sclose(dset_space_id); + H5Dclose(dset_id); + H5Dclose(dset_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +#else + SKIPPED(); + return 0; +#endif +} /* test_dataset_set_extent_double_handles */ + +/* + * A test to check that a dataset's extent can't be + * changed when H5Dset_extent is passed invalid parameters. + */ +static int +test_dataset_set_extent_invalid_params(void) +{ + hsize_t dims[DATASET_SET_EXTENT_INVALID_PARAMS_TEST_SPACE_RANK]; + hsize_t chunk_dims[DATASET_SET_EXTENT_INVALID_PARAMS_TEST_SPACE_RANK]; + hsize_t new_dims[DATASET_SET_EXTENT_INVALID_PARAMS_TEST_SPACE_RANK]; + hsize_t compact_dims[DATASET_SET_EXTENT_INVALID_PARAMS_TEST_SPACE_RANK] = {3, 3}; + size_t i; + herr_t err_ret = -1; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t chunked_dset_id = H5I_INVALID_HID, compact_dset_id = H5I_INVALID_HID, + contiguous_dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t chunked_dcpl_id = H5I_INVALID_HID, compact_dcpl_id = H5I_INVALID_HID, + contiguous_dcpl_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID, compact_fspace_id = H5I_INVALID_HID; + char vol_name[5]; + + TESTING_MULTIPART("H5Dset_extent with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, basic or more dataset aren't supported with this " + "connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + /** for DAOS VOL, this test is problematic since auto chunking can be selected, so skip for now */ + if (H5VLget_connector_name(file_id, vol_name, 5) < 0) { + H5_FAILED(); + HDprintf(" couldn't get VOL connector name\n"); + goto error; + } + if (strcmp(vol_name, "daos") == 0) { + if (H5Fclose(file_id) < 0) + TEST_ERROR; + SKIPPED(); + return 0; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_SET_EXTENT_INVALID_PARAMS_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_SET_EXTENT_INVALID_PARAMS_TEST_GROUP_NAME); + goto error; + } + + if ((fspace_id = generate_random_dataspace(DATASET_SET_EXTENT_INVALID_PARAMS_TEST_SPACE_RANK, NULL, dims, + FALSE)) < 0) + TEST_ERROR; + + for (i = 0; i < DATASET_SET_EXTENT_INVALID_PARAMS_TEST_SPACE_RANK; i++) { + do { + new_dims[i] = (hsize_t)(rand() % MAX_DIM_SIZE + 1); + } while (new_dims[i] > dims[i]); + do { + chunk_dims[i] = (hsize_t)(rand() % MAX_DIM_SIZE + 1); + } while (chunk_dims[i] > dims[i]); + } + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + /* Create a compact dataset */ + if ((compact_dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) + TEST_ERROR; + + if (H5Pset_layout(compact_dcpl_id, H5D_COMPACT) < 0) + TEST_ERROR; + + /* Keep the data space small because the storage size of compact dataset is limited to 64K */ + if ((compact_fspace_id = + H5Screate_simple(DATASET_SET_EXTENT_INVALID_PARAMS_TEST_SPACE_RANK, compact_dims, NULL)) < 0) + TEST_ERROR; + + if ((compact_dset_id = + H5Dcreate2(group_id, DATASET_SET_EXTENT_INVALID_LAYOUT_TEST_COMPACT_DSET_NAME, H5T_NATIVE_INT, + compact_fspace_id, H5P_DEFAULT, compact_dcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", + DATASET_SET_EXTENT_INVALID_LAYOUT_TEST_COMPACT_DSET_NAME); + goto error; + } + + /* Create a contiguous dataset */ + if ((contiguous_dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) + TEST_ERROR; + + if (H5Pset_layout(contiguous_dcpl_id, H5D_CONTIGUOUS) < 0) + TEST_ERROR; + + if ((contiguous_dset_id = + H5Dcreate2(group_id, DATASET_SET_EXTENT_INVALID_LAYOUT_TEST_CONTIGUOUS_DSET_NAME, dset_dtype, + fspace_id, H5P_DEFAULT, contiguous_dcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", + DATASET_SET_EXTENT_INVALID_LAYOUT_TEST_CONTIGUOUS_DSET_NAME); + goto error; + } + + /* Create a chunked dataset */ + if ((chunked_dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) + TEST_ERROR; + + if (H5Pset_chunk(chunked_dcpl_id, DATASET_SET_EXTENT_INVALID_PARAMS_TEST_SPACE_RANK, chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" unable to set dataset chunk dimensionality\n"); + goto error; + } + + if ((chunked_dset_id = H5Dcreate2(group_id, DATASET_SET_EXTENT_INVALID_PARAMS_TEST_DSET_NAME, dset_dtype, + fspace_id, H5P_DEFAULT, chunked_dcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_SET_EXTENT_INVALID_PARAMS_TEST_DSET_NAME); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Dset_extent_invalid_layout_compact) + { + TESTING_2("H5Dset_extent with an invalid dataset layout (compact)"); + + H5E_BEGIN_TRY + { + err_ret = H5Dset_extent(compact_dset_id, new_dims); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" setting dataset extent succeeded with an invalid layout (compact)\n"); + PART_ERROR(H5Dset_extent_invalid_layout_compact); + } + + PASSED(); + } + PART_END(H5Dset_extent_invalid_layout_compact); + + PART_BEGIN(H5Dset_extent_invalid_layout_contiguous) + { + TESTING_2("H5Dset_extent with an invalid dataset layout (contiguous)"); + + H5E_BEGIN_TRY + { + err_ret = H5Dset_extent(contiguous_dset_id, new_dims); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" setting dataset extent succeeded with an invalid layout (contiguous)\n"); + PART_ERROR(H5Dset_extent_invalid_layout_contiguous); + } + + PASSED(); + } + PART_END(H5Dset_extent_invalid_layout_contiguous); + + PART_BEGIN(H5Dset_extent_invalid_dset_id) + { + TESTING_2("H5Dset_extent with an invalid dataset ID"); + + H5E_BEGIN_TRY + { + err_ret = H5Dset_extent(H5I_INVALID_HID, new_dims); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" setting dataset extent succeeded with an invalid dataset ID\n"); + PART_ERROR(H5Dset_extent_invalid_dset_id); + } + + PASSED(); + } + PART_END(H5Dset_extent_invalid_dset_id); + + PART_BEGIN(H5Dset_extent_null_dim_pointer) + { + TESTING_2("H5Dset_extent with NULL dimension pointer"); + + H5E_BEGIN_TRY + { + err_ret = H5Dset_extent(chunked_dset_id, NULL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" setting dataset extent succeeded with a NULL dimension pointer\n"); + PART_ERROR(H5Dset_extent_null_dim_pointer); + } + + PASSED(); + } + PART_END(H5Dset_extent_null_dim_pointer); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(chunked_dcpl_id) < 0) + TEST_ERROR; + if (H5Pclose(compact_dcpl_id) < 0) + TEST_ERROR; + if (H5Pclose(contiguous_dcpl_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Sclose(compact_fspace_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Dclose(chunked_dset_id) < 0) + TEST_ERROR; + if (H5Dclose(compact_dset_id) < 0) + TEST_ERROR; + if (H5Dclose(contiguous_dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(chunked_dcpl_id); + H5Pclose(compact_dcpl_id); + H5Pclose(contiguous_dcpl_id); + H5Sclose(fspace_id); + H5Sclose(compact_fspace_id); + H5Tclose(dset_dtype); + H5Dclose(chunked_dset_id); + H5Dclose(compact_dset_id); + H5Dclose(contiguous_dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} /* test_dataset_set_extent_invalid_params */ + +/* + * A test for H5Dflush. + */ +static int +test_flush_dataset(void) +{ + TESTING("H5Dflush"); + + SKIPPED(); + + return 0; +} + +/* + * A test to check that H5Dflush fails when it is + * passed invalid parameters. + */ +static int +test_flush_dataset_invalid_params(void) +{ + TESTING("H5Dflush with invalid parameters"); + + SKIPPED(); + + return 0; +} + +/* + * A test for H5Drefresh. + */ +static int +test_refresh_dataset(void) +{ + TESTING("H5Drefresh"); + + SKIPPED(); + + return 0; +} + +/* + * A test to check that H5Drefresh fails when it is + * passed invalid parameters. + */ +static int +test_refresh_dataset_invalid_params(void) +{ + TESTING("H5Drefresh"); + + SKIPPED(); + + return 0; +} + +/* + * A test to create a dataset composed of a single chunk. + */ +static int +test_create_single_chunk_dataset(void) +{ + hsize_t dims[DATASET_SINGLE_CHUNK_TEST_SPACE_RANK]; + hsize_t retrieved_chunk_dims[DATASET_SINGLE_CHUNK_TEST_SPACE_RANK]; + size_t i; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + + TESTING("creation of dataset with single chunk"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, or get property list aren't supported " + "with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_SINGLE_CHUNK_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", DATASET_SINGLE_CHUNK_TEST_GROUP_NAME); + goto error; + } + + if ((fspace_id = generate_random_dataspace(DATASET_SINGLE_CHUNK_TEST_SPACE_RANK, NULL, dims, FALSE)) < 0) + TEST_ERROR; + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) + TEST_ERROR; + + if (H5Pset_chunk(dcpl_id, DATASET_SINGLE_CHUNK_TEST_SPACE_RANK, dims) < 0) { + H5_FAILED(); + HDprintf(" failed to set chunking on DCPL\n"); + goto error; + } + + if ((dset_id = H5Dcreate2(group_id, DATASET_SINGLE_CHUNK_TEST_DSET_NAME, dset_dtype, fspace_id, + H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_SINGLE_CHUNK_TEST_DSET_NAME); + goto error; + } + + /* + * See if a copy of the DCPL reports the correct chunking. + */ + if (H5Pclose(dcpl_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close DCPL\n"); + goto error; + } + + if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve copy of DCPL\n"); + goto error; + } + + memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims)); + if (H5Pget_chunk(dcpl_id, DATASET_SINGLE_CHUNK_TEST_SPACE_RANK, retrieved_chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve chunking info\n"); + goto error; + } + + for (i = 0; i < DATASET_SINGLE_CHUNK_TEST_SPACE_RANK; i++) { + if (dims[i] != retrieved_chunk_dims[i]) { + H5_FAILED(); + HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified " + "dimensionality\n"); + goto error; + } + } + + /* + * Now close the dataset and retrieve a copy + * of the DCPL after re-opening it. + */ + if (H5Pclose(dcpl_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close DCPL\n"); + goto error; + } + + if (H5Dclose(dset_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close dataset\n"); + goto error; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_SINGLE_CHUNK_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to re-open dataset\n"); + goto error; + } + + if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve copy of DCPL\n"); + goto error; + } + + memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims)); + if (H5Pget_chunk(dcpl_id, DATASET_SINGLE_CHUNK_TEST_SPACE_RANK, retrieved_chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve chunking info\n"); + goto error; + } + + for (i = 0; i < DATASET_SINGLE_CHUNK_TEST_SPACE_RANK; i++) { + if (dims[i] != retrieved_chunk_dims[i]) { + H5_FAILED(); + HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified " + "dimensionality\n"); + goto error; + } + } + + if (H5Pclose(dcpl_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(dcpl_id); + H5Sclose(fspace_id); + H5Tclose(dset_dtype); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a single-chunk dataset can be written + * and read correctly. + */ +static int +test_write_single_chunk_dataset(void) +{ + hssize_t space_npoints; + hsize_t dims[DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_SPACE_RANK]; + hsize_t retrieved_chunk_dims[DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_SPACE_RANK]; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + void *write_buf = NULL; + void *read_buf = NULL; + + TESTING("write to dataset with single chunk"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, or get property list aren't supported " + "with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_SINGLE_CHUNK_WRITE_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_SINGLE_CHUNK_WRITE_TEST_GROUP_NAME); + goto error; + } + + if ((fspace_id = generate_random_dataspace(DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_SPACE_RANK, NULL, dims, + FALSE)) < 0) + TEST_ERROR; + + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) + TEST_ERROR; + + if (H5Pset_chunk(dcpl_id, DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_SPACE_RANK, dims) < 0) { + H5_FAILED(); + HDprintf(" failed to set chunking on DCPL\n"); + goto error; + } + + if ((dset_id = H5Dcreate2(group_id, DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_NAME, + DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, dcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_NAME); + goto error; + } + + /* + * See if a copy of the DCPL reports the correct chunking. + */ + if (H5Pclose(dcpl_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close DCPL\n"); + goto error; + } + + if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve copy of DCPL\n"); + goto error; + } + + memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims)); + if (H5Pget_chunk(dcpl_id, DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_SPACE_RANK, retrieved_chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve chunking info\n"); + goto error; + } + + for (i = 0; i < DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_SPACE_RANK; i++) { + if (dims[i] != retrieved_chunk_dims[i]) { + H5_FAILED(); + HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified " + "dimensionality\n"); + goto error; + } + } + + for (i = 0, data_size = 1; i < DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_DTYPESIZE; + + if (NULL == (write_buf = HDmalloc(data_size))) + TEST_ERROR; + + for (i = 0; i < data_size / DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_DTYPESIZE; i++) + ((int *)write_buf)[i] = (int)i; + + if (H5Dwrite(dset_id, DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, + write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_NAME); + goto error; + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + goto error; + } + + if (NULL == + (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_DTYPESIZE))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + goto error; + } + + if (H5Dread(dset_id, DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, + read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_NAME); + goto error; + } + + for (i = 0; i < (hsize_t)space_npoints; i++) + if (((int *)read_buf)[i] != (int)i) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + goto error; + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (H5Pclose(dcpl_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (write_buf) + HDfree(write_buf); + if (read_buf) + HDfree(read_buf); + H5Pclose(dcpl_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to create a dataset composed of multiple chunks. + */ +static int +test_create_multi_chunk_dataset(void) +{ + hsize_t dims[DATASET_MULTI_CHUNK_TEST_SPACE_RANK] = {100, 100}; + hsize_t chunk_dims[DATASET_MULTI_CHUNK_TEST_SPACE_RANK] = {10, 10}; + hsize_t retrieved_chunk_dims[DATASET_MULTI_CHUNK_TEST_SPACE_RANK]; + size_t i; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + + TESTING("creation of dataset with multiple chunks"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, or get property list aren't supported " + "with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_MULTI_CHUNK_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", DATASET_MULTI_CHUNK_TEST_GROUP_NAME); + goto error; + } + + if ((fspace_id = H5Screate_simple(DATASET_MULTI_CHUNK_TEST_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) + TEST_ERROR; + + if (H5Pset_chunk(dcpl_id, DATASET_MULTI_CHUNK_TEST_SPACE_RANK, chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to set chunking on DCPL\n"); + goto error; + } + + if ((dset_id = H5Dcreate2(group_id, DATASET_MULTI_CHUNK_TEST_DSET_NAME, dset_dtype, fspace_id, + H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_MULTI_CHUNK_TEST_DSET_NAME); + goto error; + } + + /* + * See if a copy of the DCPL reports the correct chunking. + */ + if (H5Pclose(dcpl_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close DCPL\n"); + goto error; + } + + if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve copy of DCPL\n"); + goto error; + } + + memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims)); + if (H5Pget_chunk(dcpl_id, DATASET_MULTI_CHUNK_TEST_SPACE_RANK, retrieved_chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve chunking info\n"); + goto error; + } + + for (i = 0; i < DATASET_MULTI_CHUNK_TEST_SPACE_RANK; i++) { + if (chunk_dims[i] != retrieved_chunk_dims[i]) { + H5_FAILED(); + HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified " + "dimensionality\n"); + goto error; + } + } + + /* + * Now close the dataset and retrieve a copy + * of the DCPL after re-opening it. + */ + if (H5Pclose(dcpl_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close DCPL\n"); + goto error; + } + + if (H5Dclose(dset_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close dataset\n"); + goto error; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_MULTI_CHUNK_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to re-open dataset\n"); + goto error; + } + + if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve copy of DCPL\n"); + goto error; + } + + memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims)); + if (H5Pget_chunk(dcpl_id, DATASET_MULTI_CHUNK_TEST_SPACE_RANK, retrieved_chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve chunking info\n"); + goto error; + } + + for (i = 0; i < DATASET_MULTI_CHUNK_TEST_SPACE_RANK; i++) { + if (chunk_dims[i] != retrieved_chunk_dims[i]) { + H5_FAILED(); + HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified " + "dimensionality\n"); + goto error; + } + } + + if (H5Pclose(dcpl_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(dcpl_id); + H5Sclose(fspace_id); + H5Tclose(dset_dtype); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a dataset composed of multiple chunks + * can be written and read correctly. When reading back the + * chunks of the dataset, the file dataspace and memory dataspace + * used are the same shape. + */ +static int +test_write_multi_chunk_dataset_same_shape_read(void) +{ + hsize_t dims[DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK] = {100, 100}; + hsize_t chunk_dims[DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK] = {10, 10}; + hsize_t retrieved_chunk_dims[DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK]; + hsize_t start[DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK]; + hsize_t count[DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK]; + size_t i, data_size, chunk_size; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *write_buf = NULL; + int read_buf[10][10]; + + TESTING("write to dataset with multiple chunks using same shaped dataspaces"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, or get property list aren't supported " + "with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_GROUP_NAME); + goto error; + } + + if ((fspace_id = H5Screate_simple(DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK, dims, + NULL)) < 0) + TEST_ERROR; + + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) + TEST_ERROR; + + if (H5Pset_chunk(dcpl_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK, chunk_dims) < + 0) { + H5_FAILED(); + HDprintf(" failed to set chunking on DCPL\n"); + goto error; + } + + if ((dset_id = H5Dcreate2(group_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME, + DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, fspace_id, + H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", + DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME); + goto error; + } + + /* + * See if a copy of the DCPL reports the correct chunking. + */ + if (H5Pclose(dcpl_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close DCPL\n"); + goto error; + } + + if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve copy of DCPL\n"); + goto error; + } + + memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims)); + if (H5Pget_chunk(dcpl_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK, + retrieved_chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve chunking info\n"); + goto error; + } + + for (i = 0; i < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) { + if (chunk_dims[i] != retrieved_chunk_dims[i]) { + H5_FAILED(); + HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified " + "dimensionality\n"); + goto error; + } + } + + for (i = 0, chunk_size = 1; i < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) + chunk_size *= chunk_dims[i]; + chunk_size *= DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE; + + for (i = 0, data_size = 1; i < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE; + + if (NULL == (write_buf = HDmalloc(data_size))) + TEST_ERROR; + + /* + * Ensure that each underlying chunk contains the values + * + * chunk_index .. (chunk_nelemts - 1) + chunk_index. + * + * That is to say, for a chunk size of 10 x 10, chunk 0 + * contains the values + * + * 0 .. 99 + * + * while the next chunk contains the values + * + * 1 .. 100 + * + * and so on. + */ + for (i = 0; i < data_size / DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE; i++) { + size_t j; + size_t base; + size_t tot_adjust; + + /* + * Calculate a starting base value by taking the index value mod + * the size of a chunk in each dimension. + */ + for (j = 0, base = i; j < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++) + if (chunk_dims[j] > 1 && base >= chunk_dims[j]) + base %= chunk_dims[j]; + + /* + * Calculate the adjustment in each dimension. + */ + for (j = 0, tot_adjust = 0; j < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++) { + if (j == (DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK - 1)) + tot_adjust += (i % dims[j]) / chunk_dims[j]; + else { + size_t k; + size_t n_faster_elemts; + + /* + * Calculate the number of elements in faster dimensions. + */ + for (k = j + 1, n_faster_elemts = 1; + k < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; k++) + n_faster_elemts *= dims[k]; + + tot_adjust += (((i / n_faster_elemts) / chunk_dims[j]) * (dims[j + 1] / chunk_dims[j + 1])) + + (((i / n_faster_elemts) % chunk_dims[j]) * chunk_dims[j + 1]); + } + } + + ((int *)write_buf)[i] = (int)(base + tot_adjust); + } + + /* + * Write every chunk in the dataset. + */ + if (H5Dwrite(dset_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", + DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME); + goto error; + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + if ((dset_id = + H5Dopen2(group_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", + DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + /* + * Create 2-dimensional memory dataspace for read buffer. + */ + { + hsize_t mdims[] = {chunk_dims[0], chunk_dims[1]}; + + if ((mspace_id = H5Screate_simple(2, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" failed to create memory dataspace\n"); + goto error; + } + } + + for (i = 0; i < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) { + count[i] = chunk_dims[i]; + } + + /* + * Read every chunk in the dataset, checking the data for each one. + */ + HDprintf("\n"); + for (i = 0; i < data_size / chunk_size; i++) { + size_t j, k; + + HDprintf("\r Reading chunk %zu", i); + + for (j = 0; j < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++) { + if (dims[j] == chunk_dims[j]) + start[j] = 0; + else if (j == (DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK - 1)) + /* Fastest changing dimension */ + start[j] = (i * chunk_dims[j]) % dims[j]; + else + start[j] = ((i * chunk_dims[j + 1]) / dims[j + 1]) * (chunk_dims[j]); + } + + /* + * Adjust file dataspace selection for next chunk. + */ + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) < 0) { + H5_FAILED(); + HDprintf(" failed to set hyperslab selection\n"); + goto error; + } + + for (j = 0; j < chunk_dims[0]; j++) + for (k = 0; k < chunk_dims[1]; k++) + read_buf[j][k] = 0; + + if (H5Dread(dset_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, mspace_id, fspace_id, + H5P_DEFAULT, read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", + DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME); + goto error; + } + + for (j = 0; j < chunk_dims[0]; j++) { + for (k = 0; k < chunk_dims[1]; k++) { + if (read_buf[j][k] != (int)((j * chunk_dims[0]) + k + i)) { + H5_FAILED(); + HDprintf(" data verification failed for chunk %lld\n", (long long)i); + goto error; + } + } + } + } + + if (H5Pclose(dcpl_id) < 0) + TEST_ERROR; + if (H5Sclose(mspace_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (write_buf) + HDfree(write_buf); + H5Pclose(dcpl_id); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a dataset composed of multiple chunks + * can be written and read correctly. When reading back the + * chunks of the dataset, the file dataspace and memory dataspace + * used are differently shaped. + */ +static int +test_write_multi_chunk_dataset_diff_shape_read(void) +{ + hsize_t dims[DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK] = {100, 100}; + hsize_t chunk_dims[DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK] = {10, 10}; + hsize_t retrieved_chunk_dims[DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK]; + hsize_t start[DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK]; + hsize_t count[DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK]; + size_t i, data_size, chunk_size; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *write_buf = NULL; + void *read_buf = NULL; + + TESTING("write to dataset with multiple chunks using differently shaped dataspaces"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, or get property list aren't supported " + "with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_GROUP_NAME); + goto error; + } + + if ((fspace_id = H5Screate_simple(DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK, dims, + NULL)) < 0) + TEST_ERROR; + + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) + TEST_ERROR; + + if (H5Pset_chunk(dcpl_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK, chunk_dims) < + 0) { + H5_FAILED(); + HDprintf(" failed to set chunking on DCPL\n"); + goto error; + } + + if ((dset_id = H5Dcreate2(group_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME, + DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, fspace_id, + H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", + DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME); + goto error; + } + + /* + * See if a copy of the DCPL reports the correct chunking. + */ + if (H5Pclose(dcpl_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close DCPL\n"); + goto error; + } + + if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve copy of DCPL\n"); + goto error; + } + + memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims)); + if (H5Pget_chunk(dcpl_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK, + retrieved_chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve chunking info\n"); + goto error; + } + + for (i = 0; i < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) { + if (chunk_dims[i] != retrieved_chunk_dims[i]) { + H5_FAILED(); + HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified " + "dimensionality\n"); + goto error; + } + } + + for (i = 0, chunk_size = 1; i < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) + chunk_size *= chunk_dims[i]; + chunk_size *= DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE; + + for (i = 0, data_size = 1; i < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE; + + if (NULL == (write_buf = HDmalloc(data_size))) + TEST_ERROR; + + /* + * Ensure that each underlying chunk contains the values + * + * chunk_index .. (chunk_nelemts - 1) + chunk_index. + * + * That is to say, for a chunk size of 10 x 10, chunk 0 + * contains the values + * + * 0 .. 99 + * + * while the next chunk contains the values + * + * 1 .. 100 + * + * and so on. + */ + for (i = 0; i < data_size / DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE; i++) { + size_t j; + size_t base; + size_t tot_adjust; + + /* + * Calculate a starting base value by taking the index value mod + * the size of a chunk in each dimension. + */ + for (j = 0, base = i; j < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++) + if (chunk_dims[j] > 1 && base >= chunk_dims[j]) + base %= chunk_dims[j]; + + /* + * Calculate the adjustment in each dimension. + */ + for (j = 0, tot_adjust = 0; j < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++) { + if (j == (DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK - 1)) + tot_adjust += (i % dims[j]) / chunk_dims[j]; + else { + size_t k; + size_t n_faster_elemts; + + /* + * Calculate the number of elements in faster dimensions. + */ + for (k = j + 1, n_faster_elemts = 1; + k < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; k++) + n_faster_elemts *= dims[k]; + + tot_adjust += (((i / n_faster_elemts) / chunk_dims[j]) * (dims[j + 1] / chunk_dims[j + 1])) + + (((i / n_faster_elemts) % chunk_dims[j]) * chunk_dims[j + 1]); + } + } + + ((int *)write_buf)[i] = (int)(base + tot_adjust); + } + + /* + * Write every chunk in the dataset. + */ + if (H5Dwrite(dset_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", + DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME); + goto error; + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + if ((dset_id = + H5Dopen2(group_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", + DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + /* + * Allocate single chunk-sized read buffer. + */ + if (NULL == (read_buf = HDmalloc(chunk_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + goto error; + } + + /* + * Create 1-dimensional memory dataspace for read buffer. + */ + { + hsize_t mdims[] = {chunk_size / DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE}; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" failed to create memory dataspace\n"); + goto error; + } + } + + for (i = 0; i < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) { + count[i] = chunk_dims[i]; + } + + /* + * Read every chunk in the dataset, checking the data for each one. + */ + HDprintf("\n"); + for (i = 0; i < data_size / chunk_size; i++) { + size_t j; + + HDprintf("\r Reading chunk %zu", i); + + for (j = 0; j < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++) { + if (dims[j] == chunk_dims[j]) + start[j] = 0; + else if (j == (DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK - 1)) + /* Fastest changing dimension */ + start[j] = (i * chunk_dims[j]) % dims[j]; + else + start[j] = ((i * chunk_dims[j + 1]) / dims[j + 1]) * (chunk_dims[j]); + } + + /* + * Adjust file dataspace selection for next chunk. + */ + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) < 0) { + H5_FAILED(); + HDprintf(" failed to set hyperslab selection\n"); + goto error; + } + + memset(read_buf, 0, chunk_size); + if (H5Dread(dset_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, mspace_id, fspace_id, + H5P_DEFAULT, read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", + DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME); + goto error; + } + + for (j = 0; j < (hsize_t)chunk_size / DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE; + j++) + if (((int *)read_buf)[j] != (int)(j + i)) { + H5_FAILED(); + HDprintf(" data verification failed for chunk %lld\n", (long long)i); + goto error; + } + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (H5Pclose(dcpl_id) < 0) + TEST_ERROR; + if (H5Sclose(mspace_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (write_buf) + HDfree(write_buf); + if (read_buf) + HDfree(read_buf); + H5Pclose(dcpl_id); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a dataset composed of multiple chunks + * can be written and read correctly several times in a row. + * When reading back the chunks of the dataset, the file + * dataspace and memory dataspace used are the same shape. + */ +static int +test_overwrite_multi_chunk_dataset_same_shape_read(void) +{ + hsize_t dims[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK] = {100, 100}; + hsize_t chunk_dims[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK] = {10, 10}; + hsize_t retrieved_chunk_dims[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK]; + hsize_t start[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK]; + hsize_t count[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK]; + size_t i, data_size, chunk_size; + size_t niter; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *write_buf = NULL; + int read_buf[10][10]; + + TESTING("several overwrites to dataset with multiple chunks using same shaped dataspaces"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, or get property list aren't supported " + "with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_GROUP_NAME); + goto error; + } + + if ((fspace_id = H5Screate_simple(DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK, + dims, NULL)) < 0) + TEST_ERROR; + + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) + TEST_ERROR; + + if (H5Pset_chunk(dcpl_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK, + chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to set chunking on DCPL\n"); + goto error; + } + + if ((dset_id = H5Dcreate2(group_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME, + DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, fspace_id, + H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", + DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME); + goto error; + } + + /* + * See if a copy of the DCPL reports the correct chunking. + */ + if (H5Pclose(dcpl_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close DCPL\n"); + goto error; + } + + if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve copy of DCPL\n"); + goto error; + } + + memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims)); + if (H5Pget_chunk(dcpl_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK, + retrieved_chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve chunking info\n"); + goto error; + } + + for (i = 0; i < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) { + if (chunk_dims[i] != retrieved_chunk_dims[i]) { + H5_FAILED(); + HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified " + "dimensionality\n"); + goto error; + } + } + + for (i = 0, chunk_size = 1; i < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) + chunk_size *= chunk_dims[i]; + chunk_size *= DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE; + + for (i = 0, data_size = 1; i < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE; + + if (NULL == (write_buf = HDmalloc(data_size))) + TEST_ERROR; + + /* + * Create 2-dimensional memory dataspace for read buffer. + */ + { + hsize_t mdims[] = {chunk_dims[0], chunk_dims[1]}; + + if ((mspace_id = H5Screate_simple(2, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" failed to create memory dataspace\n"); + goto error; + } + } + + for (i = 0; i < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) { + count[i] = chunk_dims[i]; + } + + HDprintf("\n"); + for (niter = 0; niter < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_NITERS; niter++) { + memset(write_buf, 0, data_size); + + /* + * Ensure that each underlying chunk contains the values + * + * chunk_index .. (chunk_nelemts - 1) + chunk_index. + * + * That is to say, for a chunk size of 10 x 10, chunk 0 + * contains the values + * + * 0 .. 99 + * + * while the next chunk contains the values + * + * 1 .. 100 + * + * and so on. On each iteration, we add 1 to the previous + * values. + */ + for (i = 0; i < data_size / DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE; i++) { + size_t j; + size_t base; + size_t tot_adjust; + + /* + * Calculate a starting base value by taking the index value mod + * the size of a chunk in each dimension. + */ + for (j = 0, base = i; j < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++) + if (chunk_dims[j] > 1 && base >= chunk_dims[j]) + base %= chunk_dims[j]; + + /* + * Calculate the adjustment in each dimension. + */ + for (j = 0, tot_adjust = 0; + j < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++) { + if (j == (DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK - 1)) + tot_adjust += (i % dims[j]) / chunk_dims[j]; + else { + size_t k; + size_t n_faster_elemts; + + /* + * Calculate the number of elements in faster dimensions. + */ + for (k = j + 1, n_faster_elemts = 1; + k < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; k++) + n_faster_elemts *= dims[k]; + + tot_adjust += + (((i / n_faster_elemts) / chunk_dims[j]) * (dims[j + 1] / chunk_dims[j + 1])) + + (((i / n_faster_elemts) % chunk_dims[j]) * chunk_dims[j + 1]); + } + } + + ((int *)write_buf)[i] = (int)(base + tot_adjust + niter); + } + + /* + * Write every chunk in the dataset. + */ + if (H5Dwrite(dset_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", + DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME); + goto error; + } + + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", + DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + /* + * Read every chunk in the dataset, checking the data for each one. + */ + for (i = 0; i < data_size / chunk_size; i++) { + size_t j, k; + + HDprintf("\r Reading chunk %zu", i); + + for (j = 0; j < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++) { + if (dims[j] == chunk_dims[j]) + start[j] = 0; + else if (j == (DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK - 1)) + /* Fastest changing dimension */ + start[j] = (i * chunk_dims[j]) % dims[j]; + else + start[j] = ((i * chunk_dims[j + 1]) / dims[j + 1]) * (chunk_dims[j]); + } + + /* + * Adjust file dataspace selection for next chunk. + */ + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) < 0) { + H5_FAILED(); + HDprintf(" failed to set hyperslab selection\n"); + goto error; + } + + for (j = 0; j < chunk_dims[0]; j++) + for (k = 0; k < chunk_dims[1]; k++) + read_buf[j][k] = 0; + + if (H5Dread(dset_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, mspace_id, + fspace_id, H5P_DEFAULT, read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", + DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME); + goto error; + } + + for (j = 0; j < chunk_dims[0]; j++) { + for (k = 0; k < chunk_dims[1]; k++) { + if (read_buf[j][k] != (int)((j * chunk_dims[0]) + k + i + niter)) { + H5_FAILED(); + HDprintf(" data verification failed for chunk %lld\n", (long long)i); + goto error; + } + } + } + } + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + + if (H5Pclose(dcpl_id) < 0) + TEST_ERROR; + if (H5Sclose(mspace_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (write_buf) + HDfree(write_buf); + H5Pclose(dcpl_id); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a dataset composed of multiple chunks + * can be written and read correctly several times in a row. + * When reading back the chunks of the dataset, the file + * dataspace and memory dataspace used are differently shaped. + */ +static int +test_overwrite_multi_chunk_dataset_diff_shape_read(void) +{ + hsize_t dims[DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK] = {100, 100}; + hsize_t chunk_dims[DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK] = {10, 10}; + hsize_t retrieved_chunk_dims[DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK]; + hsize_t start[DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK]; + hsize_t count[DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK]; + size_t i, data_size, chunk_size; + size_t niter; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *write_buf = NULL; + void *read_buf = NULL; + + TESTING("several overwrites to dataset with multiple chunks using differently shaped dataspaces"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, or get property list aren't supported " + "with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_GROUP_NAME); + goto error; + } + + if ((fspace_id = H5Screate_simple(DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK, + dims, NULL)) < 0) + TEST_ERROR; + + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) + TEST_ERROR; + + if (H5Pset_chunk(dcpl_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK, + chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to set chunking on DCPL\n"); + goto error; + } + + if ((dset_id = H5Dcreate2(group_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME, + DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, fspace_id, + H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", + DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME); + goto error; + } + + /* + * See if a copy of the DCPL reports the correct chunking. + */ + if (H5Pclose(dcpl_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close DCPL\n"); + goto error; + } + + if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve copy of DCPL\n"); + goto error; + } + + memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims)); + if (H5Pget_chunk(dcpl_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK, + retrieved_chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve chunking info\n"); + goto error; + } + + for (i = 0; i < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) { + if (chunk_dims[i] != retrieved_chunk_dims[i]) { + H5_FAILED(); + HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified " + "dimensionality\n"); + goto error; + } + } + + for (i = 0, chunk_size = 1; i < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) + chunk_size *= chunk_dims[i]; + chunk_size *= DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE; + + for (i = 0, data_size = 1; i < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE; + + if (NULL == (write_buf = HDmalloc(data_size))) + TEST_ERROR; + + /* + * Allocate single chunk-sized read buffer. + */ + if (NULL == (read_buf = HDmalloc(chunk_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + goto error; + } + + /* + * Create 1-dimensional memory dataspace for read buffer. + */ + { + hsize_t mdims[] = {chunk_size / DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE}; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" failed to create memory dataspace\n"); + goto error; + } + } + + for (i = 0; i < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) { + count[i] = chunk_dims[i]; + } + + HDprintf("\n"); + for (niter = 0; niter < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_NITERS; niter++) { + memset(write_buf, 0, data_size); + + /* + * Ensure that each underlying chunk contains the values + * + * chunk_index .. (chunk_nelemts - 1) + chunk_index. + * + * That is to say, for a chunk size of 10 x 10, chunk 0 + * contains the values + * + * 0 .. 99 + * + * while the next chunk contains the values + * + * 1 .. 100 + * + * and so on. On each iteration, we add 1 to the previous + * values. + */ + for (i = 0; i < data_size / DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE; i++) { + size_t j; + size_t base; + size_t tot_adjust; + + /* + * Calculate a starting base value by taking the index value mod + * the size of a chunk in each dimension. + */ + for (j = 0, base = i; j < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++) + if (chunk_dims[j] > 1 && base >= chunk_dims[j]) + base %= chunk_dims[j]; + + /* + * Calculate the adjustment in each dimension. + */ + for (j = 0, tot_adjust = 0; + j < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++) { + if (j == (DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK - 1)) + tot_adjust += (i % dims[j]) / chunk_dims[j]; + else { + size_t k; + size_t n_faster_elemts; + + /* + * Calculate the number of elements in faster dimensions. + */ + for (k = j + 1, n_faster_elemts = 1; + k < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; k++) + n_faster_elemts *= dims[k]; + + tot_adjust += + (((i / n_faster_elemts) / chunk_dims[j]) * (dims[j + 1] / chunk_dims[j + 1])) + + (((i / n_faster_elemts) % chunk_dims[j]) * chunk_dims[j + 1]); + } + } + + ((int *)write_buf)[i] = (int)(base + tot_adjust + niter); + } + + /* + * Write every chunk in the dataset. + */ + if (H5Dwrite(dset_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", + DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME); + goto error; + } + + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", + DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + /* + * Read every chunk in the dataset, checking the data for each one. + */ + for (i = 0; i < data_size / chunk_size; i++) { + size_t j; + + HDprintf("\r Reading chunk %zu", i); + + for (j = 0; j < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++) { + if (dims[j] == chunk_dims[j]) + start[j] = 0; + else if (j == (DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK - 1)) + /* Fastest changing dimension */ + start[j] = (i * chunk_dims[j]) % dims[j]; + else + start[j] = ((i * chunk_dims[j + 1]) / dims[j + 1]) * (chunk_dims[j]); + } + + /* + * Adjust file dataspace selection for next chunk. + */ + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) < 0) { + H5_FAILED(); + HDprintf(" failed to set hyperslab selection\n"); + goto error; + } + + memset(read_buf, 0, chunk_size); + if (H5Dread(dset_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, mspace_id, + fspace_id, H5P_DEFAULT, read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", + DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME); + goto error; + } + + for (j = 0; + j < (hsize_t)chunk_size / DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE; + j++) + if (((int *)read_buf)[j] != (int)(j + i + niter)) { + H5_FAILED(); + HDprintf(" data verification failed for chunk %lld\n", (long long)i); + goto error; + } + } + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (H5Pclose(dcpl_id) < 0) + TEST_ERROR; + if (H5Sclose(mspace_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (write_buf) + HDfree(write_buf); + if (read_buf) + HDfree(read_buf); + H5Pclose(dcpl_id); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to ensure that a partial chunk can be written and + * then read correctly when the selection used in a chunked + * dataset's file dataspace is H5S_ALL. + */ +#define FIXED_DIMSIZE 25 +#define FIXED_CHUNK_DIMSIZE 10 +static int +test_read_partial_chunk_all_selection(void) +{ + DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_CTYPE write_buf[FIXED_DIMSIZE][FIXED_DIMSIZE]; + DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_CTYPE read_buf[FIXED_DIMSIZE][FIXED_DIMSIZE]; + hsize_t dims[DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_SPACE_RANK] = {FIXED_DIMSIZE, FIXED_DIMSIZE}; + hsize_t chunk_dims[DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_SPACE_RANK] = {FIXED_CHUNK_DIMSIZE, + FIXED_CHUNK_DIMSIZE}; + hsize_t retrieved_chunk_dims[DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_SPACE_RANK]; + size_t i, j; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + + TESTING("reading a partial chunk using H5S_ALL for file dataspace"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, or get property list aren't supported " + "with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_GROUP_NAME); + goto error; + } + + if ((fspace_id = H5Screate_simple(DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_SPACE_RANK, dims, NULL)) < + 0) + TEST_ERROR; + + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) + TEST_ERROR; + + if (H5Pset_chunk(dcpl_id, DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_SPACE_RANK, chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to set chunking on DCPL\n"); + goto error; + } + + if ((dset_id = H5Dcreate2(group_id, DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_NAME, + DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, + dcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_NAME); + goto error; + } + + /* + * See if a copy of the DCPL reports the correct chunking. + */ + if (H5Pclose(dcpl_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close DCPL\n"); + goto error; + } + + if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve copy of DCPL\n"); + goto error; + } + + memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims)); + if (H5Pget_chunk(dcpl_id, DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_SPACE_RANK, retrieved_chunk_dims) < + 0) { + H5_FAILED(); + HDprintf(" failed to retrieve chunking info\n"); + goto error; + } + + for (i = 0; i < DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_SPACE_RANK; i++) { + if (chunk_dims[i] != retrieved_chunk_dims[i]) { + H5_FAILED(); + HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified " + "dimensionality\n"); + goto error; + } + } + + for (i = 0; i < FIXED_DIMSIZE; i++) + for (j = 0; j < FIXED_DIMSIZE; j++) + write_buf[i][j] = (DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_CTYPE)((i * FIXED_DIMSIZE) + j); + + for (i = 0; i < FIXED_DIMSIZE; i++) + for (j = 0; j < FIXED_DIMSIZE; j++) + read_buf[i][j] = -1; + + if (H5Dwrite(dset_id, DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, + write_buf) < 0) { + H5_FAILED(); + HDprintf(" failed to write to dataset\n"); + goto error; + } + + /* + * Close and re-open the dataset to ensure that the data is written. + */ + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if ((dset_id = H5Dopen2(group_id, DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to re-open dataset\n"); + goto error; + } + + if (H5Dread(dset_id, DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, + read_buf) < 0) { + H5_FAILED(); + HDprintf(" failed to read from dataset\n"); + goto error; + } + + for (i = 0; i < FIXED_DIMSIZE; i++) + for (j = 0; j < FIXED_DIMSIZE; j++) + if (read_buf[i][j] != (int)((i * FIXED_DIMSIZE) + j)) { + H5_FAILED(); + HDprintf( + " data verification failed for read buffer element %lld: expected %lld but was %lld\n", + (long long)((i * FIXED_DIMSIZE) + j), (long long)((i * FIXED_DIMSIZE) + j), + (long long)read_buf[i][j]); + goto error; + } + + if (H5Pclose(dcpl_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(dcpl_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} +#undef FIXED_DIMSIZE +#undef FIXED_CHUNK_DIMSIZE + +/* + * A test to ensure that a partial chunk can be written and + * then read correctly when the selection used in a chunked + * dataset's file dataspace is a hyperslab selection. + */ +#define FIXED_DIMSIZE 25 +#define FIXED_CHUNK_DIMSIZE 10 +#define FIXED_NCHUNKS 9 /* For convenience - make sure to adjust this as necessary */ +static int +test_read_partial_chunk_hyperslab_selection(void) +{ + DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_CTYPE write_buf[FIXED_CHUNK_DIMSIZE][FIXED_CHUNK_DIMSIZE]; + DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_CTYPE read_buf[FIXED_CHUNK_DIMSIZE][FIXED_CHUNK_DIMSIZE]; + hsize_t dims[DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK] = {FIXED_DIMSIZE, FIXED_DIMSIZE}; + hsize_t chunk_dims[DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK] = {FIXED_CHUNK_DIMSIZE, + FIXED_CHUNK_DIMSIZE}; + hsize_t retrieved_chunk_dims[DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK]; + size_t i, j, k; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + + TESTING("reading a partial chunk using a hyperslab selection in file dataspace"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, or get property list aren't supported " + "with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_GROUP_NAME); + goto error; + } + + if ((fspace_id = + H5Screate_simple(DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) + TEST_ERROR; + + if (H5Pset_chunk(dcpl_id, DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK, chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to set chunking on DCPL\n"); + goto error; + } + + if ((dset_id = H5Dcreate2(group_id, DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_NAME, + DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, + dcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_NAME); + goto error; + } + + /* + * See if a copy of the DCPL reports the correct chunking. + */ + if (H5Pclose(dcpl_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close DCPL\n"); + goto error; + } + + if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve copy of DCPL\n"); + goto error; + } + + memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims)); + if (H5Pget_chunk(dcpl_id, DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK, + retrieved_chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve chunking info\n"); + goto error; + } + + for (i = 0; i < DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK; i++) { + if (chunk_dims[i] != retrieved_chunk_dims[i]) { + H5_FAILED(); + HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified " + "dimensionality\n"); + goto error; + } + } + + for (i = 0; i < FIXED_CHUNK_DIMSIZE; i++) + for (j = 0; j < FIXED_CHUNK_DIMSIZE; j++) + write_buf[i][j] = + (DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_CTYPE)((i * FIXED_CHUNK_DIMSIZE) + j); + + for (i = 0; i < FIXED_CHUNK_DIMSIZE; i++) + for (j = 0; j < FIXED_CHUNK_DIMSIZE; j++) + read_buf[i][j] = -1; + + /* + * Create chunk-sized memory dataspace for read buffer. + */ + { + hsize_t mdims[DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK] = {FIXED_CHUNK_DIMSIZE, + FIXED_CHUNK_DIMSIZE}; + + if ((mspace_id = H5Screate_simple(DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK, mdims, + NULL)) < 0) { + H5_FAILED(); + HDprintf(" failed to create memory dataspace\n"); + goto error; + } + } + + /* + * Write and read each chunk in the dataset. + */ + for (i = 0; i < FIXED_NCHUNKS; i++) { + hsize_t start[DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK]; + hsize_t count[DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK]; + hbool_t on_partial_edge_chunk = FALSE; + size_t n_chunks_per_dim = (dims[1] / chunk_dims[1]) + (((dims[1] % chunk_dims[1]) > 0) ? 1 : 0); + + on_partial_edge_chunk = + (i > 0) && (((i + 1) % n_chunks_per_dim == 0) || (i / n_chunks_per_dim == n_chunks_per_dim - 1)); + + for (j = 0; j < DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK; j++) { + if (j == 0) + start[j] = (i / n_chunks_per_dim) * chunk_dims[j]; + else + start[j] = (i % n_chunks_per_dim) * chunk_dims[j]; + + if (on_partial_edge_chunk) { + /* + * Partial edge chunk + */ + if (j == 0) { + if (i / n_chunks_per_dim == n_chunks_per_dim - 1) + /* This partial edge chunk spans the remainder of the first dimension */ + count[j] = dims[j] - ((i / n_chunks_per_dim) * chunk_dims[j]); + else + /* This partial edge chunk spans the whole first dimension */ + count[j] = chunk_dims[j]; + } + else { + if (i % n_chunks_per_dim == n_chunks_per_dim - 1) + /* This partial edge chunk spans the remainder of the second dimension */ + count[j] = dims[j] - ((i % n_chunks_per_dim) * chunk_dims[j]); + else + /* This partial edge chunk spans the whole second dimension */ + count[j] = chunk_dims[j]; + } + } + else + count[j] = chunk_dims[j]; + } + + if (on_partial_edge_chunk) { + hsize_t m_start[DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK] = {0, 0}; + + if (H5Sselect_hyperslab(mspace_id, H5S_SELECT_SET, m_start, NULL, count, NULL) < 0) { + H5_FAILED(); + HDprintf(" failed to select hyperslab in memory dataspace\n"); + goto error; + } + } + else { + if (H5Sselect_all(mspace_id) < 0) { + H5_FAILED(); + HDprintf(" failed to select entire memory dataspace\n"); + goto error; + } + } + + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) < 0) { + H5_FAILED(); + HDprintf(" failed to select hyperslab\n"); + goto error; + } + + HDprintf("\r Writing chunk %zu", i); + + if (H5Dwrite(dset_id, DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_DTYPE, mspace_id, fspace_id, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" failed to write to dataset\n"); + goto error; + } + + /* + * Close and re-open the dataset to ensure the data gets written. + */ + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if ((dset_id = H5Dopen2(group_id, DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" failed to re-open dataset\n"); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve dataspace from dataset\n"); + goto error; + } + + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) < 0) { + H5_FAILED(); + HDprintf(" failed to select hyperslab\n"); + goto error; + } + + HDprintf("\r Reading chunk %zu", i); + + if (H5Dread(dset_id, DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_DTYPE, mspace_id, fspace_id, + H5P_DEFAULT, read_buf) < 0) { + H5_FAILED(); + HDprintf(" failed to read from dataset\n"); + goto error; + } + + for (j = 0; j < FIXED_CHUNK_DIMSIZE; j++) + for (k = 0; k < FIXED_CHUNK_DIMSIZE; k++) + if (read_buf[j][k] != (int)((j * FIXED_CHUNK_DIMSIZE) + k)) { + H5_FAILED(); + HDprintf(" data verification failed for read buffer element %lld: expected %lld but " + "was %lld\n", + (long long)((j * FIXED_CHUNK_DIMSIZE) + k), + (long long)((j * FIXED_CHUNK_DIMSIZE) + k), (long long)read_buf[j][k]); + goto error; + } + } + + if (H5Pclose(dcpl_id) < 0) + TEST_ERROR; + if (H5Sclose(mspace_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(dcpl_id); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} +#undef FIXED_DIMSIZE +#undef FIXED_CHUNK_DIMSIZE +#undef FIXED_NCHUNKS + +/* + * A test to ensure that a partial chunk can be written and + * then read correctly when the selection used in a chunked + * dataset's file dataspace is a point selection. + */ +#define FIXED_DIMSIZE 25 +#define FIXED_CHUNK_DIMSIZE 10 +static int +test_read_partial_chunk_point_selection(void) +{ + TESTING("reading a partial chunk using a point selection in file dataspace"); + SKIPPED(); + + return 1; +} +#undef FIXED_DIMSIZE +#undef FIXED_CHUNK_DIMSIZE + +/* + * A test to verify that H5Dvlen_get_buf_size returns + * correct size + */ +static int +test_get_vlen_buf_size(void) +{ + hvl_t wdata[DATASET_GET_VLEN_BUF_SIZE_DSET_SPACE_DIM]; /* Information to write */ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t dataset = H5I_INVALID_HID; + hid_t dspace_id = H5I_INVALID_HID; + hid_t dtype_id = H5I_INVALID_HID; + hbool_t freed_wdata = FALSE; + hsize_t dims1[] = {DATASET_GET_VLEN_BUF_SIZE_DSET_SPACE_DIM}; + hsize_t size; /* Number of bytes which will be used */ + unsigned i, j; + + TESTING("H5Dvlen_get_buf_size"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, or more aren't supported with this " + "connector\n"); + return 0; + } + + /* Allocate and initialize VL data to write */ + for (i = 0; i < DATASET_GET_VLEN_BUF_SIZE_DSET_SPACE_DIM; i++) { + wdata[i].p = HDmalloc((i + 1) * sizeof(unsigned int)); + wdata[i].len = i + 1; + for (j = 0; j < (i + 1); j++) + ((unsigned int *)wdata[i].p)[j] = i * 10 + j; + } /* end for */ + + /* Open the file */ + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_GET_VLEN_BUF_SIZE_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", DATASET_GET_VLEN_BUF_SIZE_GROUP_NAME); + goto error; + } + + /* Create dataspace for dataset */ + if ((dspace_id = H5Screate_simple(DATASET_GET_VLEN_BUF_SIZE_DSET_SPACE_RANK, dims1, NULL)) < 0) + TEST_ERROR; + + /* Create a datatype to refer to */ + if ((dtype_id = H5Tvlen_create(H5T_NATIVE_UINT)) < 0) + TEST_ERROR; + + /* Create a dataset */ + if ((dataset = H5Dcreate2(group_id, DATASET_GET_VLEN_BUF_SIZE_DSET_NAME, dtype_id, dspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) + TEST_ERROR; + + /* Write dataset to disk */ + if (H5Dwrite(dataset, dtype_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata) < 0) + TEST_ERROR; + + /* Make certain the correct amount of memory will be used */ + if (H5Dvlen_get_buf_size(dataset, dtype_id, dspace_id, &size) < 0) + TEST_ERROR; + + /* 10 elements allocated = 1 + 2 + 3 + 4 elements for each array position */ + if (size != + ((DATASET_GET_VLEN_BUF_SIZE_DSET_SPACE_DIM * (DATASET_GET_VLEN_BUF_SIZE_DSET_SPACE_DIM + 1)) / 2) * + sizeof(unsigned int)) { + H5_FAILED(); + HDprintf( + " H5Dvlen_get_buf_size returned wrong size (%lu), compared to the correct size (%lu)\n", size, + ((DATASET_GET_VLEN_BUF_SIZE_DSET_SPACE_DIM * (DATASET_GET_VLEN_BUF_SIZE_DSET_SPACE_DIM + 1)) / + 2) * + sizeof(unsigned int)); + goto error; + } + + if (H5Treclaim(dtype_id, dspace_id, H5P_DEFAULT, wdata) < 0) + TEST_ERROR; + freed_wdata = TRUE; + + if (H5Dclose(dataset) < 0) + TEST_ERROR; + + if (H5Tclose(dtype_id) < 0) + TEST_ERROR; + + if (H5Sclose(dspace_id) < 0) + TEST_ERROR; + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + + if (H5Gclose(container_group) < 0) + TEST_ERROR; + + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (!freed_wdata) + H5Treclaim(dtype_id, dspace_id, H5P_DEFAULT, wdata); + H5Sclose(dspace_id); + H5Tclose(dtype_id); + H5Dclose(dataset); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} /* end test_get_vlen_buf_size() */ + +int +H5_api_dataset_test(void) +{ + size_t i; + int nerrors; + + HDprintf("**********************************************\n"); + HDprintf("* *\n"); + HDprintf("* API Dataset Tests *\n"); + HDprintf("* *\n"); + HDprintf("**********************************************\n\n"); + + for (i = 0, nerrors = 0; i < ARRAY_LENGTH(dataset_tests); i++) { + nerrors += (*dataset_tests[i])() ? 1 : 0; + } + + HDprintf("\n"); + + return nerrors; +} diff --git a/test/API/H5_api_dataset_test.h b/test/API/H5_api_dataset_test.h new file mode 100644 index 00000000000..5a50a063443 --- /dev/null +++ b/test/API/H5_api_dataset_test.h @@ -0,0 +1,331 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#ifndef H5_API_DATASET_TEST_H +#define H5_API_DATASET_TEST_H + +#include "H5_api_test.h" + +int H5_api_dataset_test(void); + +/************************************************ + * * + * API Dataset test defines * + * * + ************************************************/ + +#define DATASET_CREATE_UNDER_ROOT_DSET_NAME "/dset_under_root" +#define DATASET_CREATE_UNDER_ROOT_SPACE_RANK 2 + +#define DATASET_CREATE_UNDER_EXISTING_SPACE_RANK 2 +#define DATASET_CREATE_UNDER_EXISTING_GROUP_NAME "dset_under_group_test" +#define DATASET_CREATE_UNDER_EXISTING_DSET_NAME "nested_dset" + +#define DATASET_CREATE_INVALID_PARAMS_SPACE_RANK 2 +#define DATASET_CREATE_INVALID_PARAMS_GROUP_NAME "dset_create_invalid_params_test" +#define DATASET_CREATE_INVALID_PARAMS_DSET_NAME "invalid_params_dset" + +#define DATASET_CREATE_ANONYMOUS_DATASET_NAME "anon_dset" +#define DATASET_CREATE_ANONYMOUS_GROUP_NAME "anon_dset_creation_test" +#define DATASET_CREATE_ANONYMOUS_SPACE_RANK 2 + +#define DATASET_CREATE_ANONYMOUS_INVALID_PARAMS_DATASET_NAME "invalid_params_anon_dset" +#define DATASET_CREATE_ANONYMOUS_INVALID_PARAMS_GROUP_NAME "anon_dset_creation_invalid_params_test" +#define DATASET_CREATE_ANONYMOUS_INVALID_PARAMS_SPACE_RANK 2 + +#define DATASET_CREATE_NULL_DATASPACE_TEST_SUBGROUP_NAME "dataset_with_null_space_test" +#define DATASET_CREATE_NULL_DATASPACE_TEST_DSET_NAME "dataset_with_null_space" + +#define DATASET_CREATE_SCALAR_DATASPACE_TEST_SUBGROUP_NAME "dataset_with_scalar_space_test" +#define DATASET_CREATE_SCALAR_DATASPACE_TEST_DSET_NAME "dataset_with_scalar_space" + +#define ZERO_DIM_DSET_TEST_GROUP_NAME "zero_dim_dset_test" +#define ZERO_DIM_DSET_TEST_SPACE_RANK 1 +#define ZERO_DIM_DSET_TEST_DSET_NAME "zero_dim_dset" + +#define DATASET_MANY_CREATE_GROUP_NAME "group_for_many_datasets" +#define DSET_NAME_BUF_SIZE 64u +#define DATASET_NUMB 100u + +#define DATASET_SHAPE_TEST_DSET_BASE_NAME "dataset_shape_test" +#define DATASET_SHAPE_TEST_SUBGROUP_NAME "dataset_shape_test" +#define DATASET_SHAPE_TEST_NUM_ITERATIONS 5 +#define DATASET_SHAPE_TEST_MAX_DIMS 5 + +#define DATASET_PREDEFINED_TYPE_TEST_SPACE_RANK 2 +#define DATASET_PREDEFINED_TYPE_TEST_BASE_NAME "predefined_type_dset" +#define DATASET_PREDEFINED_TYPE_TEST_SUBGROUP_NAME "predefined_type_dataset_test" + +#define DATASET_STRING_TYPE_TEST_STRING_LENGTH 40 +#define DATASET_STRING_TYPE_TEST_SPACE_RANK 2 +#define DATASET_STRING_TYPE_TEST_DSET_NAME1 "fixed_length_string_dset" +#define DATASET_STRING_TYPE_TEST_DSET_NAME2 "variable_length_string_dset" +#define DATASET_STRING_TYPE_TEST_SUBGROUP_NAME "string_type_dataset_test" + +#define DATASET_COMPOUND_TYPE_TEST_SUBGROUP_NAME "compound_type_dataset_test" +#define DATASET_COMPOUND_TYPE_TEST_DSET_NAME "compound_type_test" +#define DATASET_COMPOUND_TYPE_TEST_MAX_SUBTYPES 5 +#define DATASET_COMPOUND_TYPE_TEST_MAX_PASSES 5 +#define DATASET_COMPOUND_TYPE_TEST_DSET_RANK 2 + +#define DATASET_ENUM_TYPE_TEST_VAL_BASE_NAME "INDEX" +#define DATASET_ENUM_TYPE_TEST_SUBGROUP_NAME "enum_type_dataset_test" +#define DATASET_ENUM_TYPE_TEST_NUM_MEMBERS 16 +#define DATASET_ENUM_TYPE_TEST_SPACE_RANK 2 +#define DATASET_ENUM_TYPE_TEST_DSET_NAME1 "enum_native_dset" +#define DATASET_ENUM_TYPE_TEST_DSET_NAME2 "enum_non_native_dset" + +#define DATASET_ARRAY_TYPE_TEST_SUBGROUP_NAME "array_type_dataset_test" +#define DATASET_ARRAY_TYPE_TEST_DSET_NAME1 "array_type_test1" +#define DATASET_ARRAY_TYPE_TEST_DSET_NAME2 "array_type_test2" +#define DATASET_ARRAY_TYPE_TEST_DSET_NAME3 "array_type_test3" +#define DATASET_ARRAY_TYPE_TEST_SPACE_RANK 2 +#define DATASET_ARRAY_TYPE_TEST_RANK1 2 +#define DATASET_ARRAY_TYPE_TEST_RANK2 2 +#define DATASET_ARRAY_TYPE_TEST_RANK3 2 + +#define DATASET_CREATION_PROPERTIES_TEST_TRACK_TIMES_YES_DSET_NAME "track_times_true_test" +#define DATASET_CREATION_PROPERTIES_TEST_TRACK_TIMES_NO_DSET_NAME "track_times_false_test" +#define DATASET_CREATION_PROPERTIES_TEST_PHASE_CHANGE_DSET_NAME "attr_phase_change_test" +#define DATASET_CREATION_PROPERTIES_TEST_ALLOC_TIMES_BASE_NAME "alloc_time_test" +#define DATASET_CREATION_PROPERTIES_TEST_FILL_TIMES_BASE_NAME "fill_times_test" +#define DATASET_CREATION_PROPERTIES_TEST_CRT_ORDER_BASE_NAME "creation_order_test" +#define DATASET_CREATION_PROPERTIES_TEST_LAYOUTS_BASE_NAME "layout_test" +#define DATASET_CREATION_PROPERTIES_TEST_FILTERS_DSET_NAME "filters_test" +#define DATASET_CREATION_PROPERTIES_TEST_GROUP_NAME "creation_properties_test" +#define DATASET_CREATION_PROPERTIES_TEST_CHUNK_DIM_RANK DATASET_CREATION_PROPERTIES_TEST_SHAPE_RANK +#define DATASET_CREATION_PROPERTIES_TEST_MAX_COMPACT 12 +#define DATASET_CREATION_PROPERTIES_TEST_MIN_DENSE 8 +#define DATASET_CREATION_PROPERTIES_TEST_SHAPE_RANK 3 + +#define DATASET_OPEN_INVALID_PARAMS_SPACE_RANK 2 +#define DATASET_OPEN_INVALID_PARAMS_GROUP_NAME "dataset_open_test" +#define DATASET_OPEN_INVALID_PARAMS_DSET_NAME "open_test_dataset" + +#define DATASET_GET_SPACE_TYPE_TEST_SPACE_RANK 2 +#define DATASET_GET_SPACE_TYPE_TEST_GROUP_NAME "get_dset_space_type_test" +#define DATASET_GET_SPACE_TYPE_TEST_DSET_NAME "get_space_type_test_dset" + +#define DATASET_GET_SPACE_TYPE_INVALID_PARAMS_TEST_SPACE_RANK 2 +#define DATASET_GET_SPACE_TYPE_INVALID_PARAMS_TEST_GROUP_NAME "get_dset_space_type_invalid_params_test" +#define DATASET_GET_SPACE_TYPE_INVALID_PARAMS_TEST_DSET_NAME "get_space_type_test_invalid_params_dset" + +#define DATASET_PROPERTY_LIST_TEST_SUBGROUP_NAME "dataset_property_list_test_group" +#define DATASET_PROPERTY_LIST_TEST_SPACE_RANK 2 +#define DATASET_PROPERTY_LIST_TEST_DSET_NAME1 "property_list_test_dataset1" +#define DATASET_PROPERTY_LIST_TEST_DSET_NAME2 "property_list_test_dataset2" +#define DATASET_PROPERTY_LIST_TEST_DSET_NAME3 "property_list_test_dataset3" +#define DATASET_PROPERTY_LIST_TEST_DSET_NAME4 "property_list_test_dataset4" + +#define DATASET_SMALL_READ_TEST_ALL_DSET_SPACE_RANK 3 +#define DATASET_SMALL_READ_TEST_ALL_DSET_DTYPESIZE sizeof(int) +#define DATASET_SMALL_READ_TEST_ALL_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_SMALL_READ_TEST_ALL_GROUP_NAME "dataset_small_read_all_test" +#define DATASET_SMALL_READ_TEST_ALL_DSET_NAME "dataset_small_read_all_dset" + +#define DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_SPACE_RANK 3 +#define DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_DTYPESIZE sizeof(int) +#define DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_SMALL_READ_TEST_HYPERSLAB_GROUP_NAME "dataset_small_read_hyperslab_test" +#define DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_NAME "dataset_small_read_hyperslab_dset" + +#define DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_SPACE_RANK 3 +#define DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_DTYPESIZE sizeof(int) +#define DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_SMALL_READ_TEST_POINT_SELECTION_NUM_POINTS 10 +#define DATASET_SMALL_READ_TEST_POINT_SELECTION_GROUP_NAME "dataset_small_read_point_selection_test" +#define DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_NAME "dataset_small_read_point_selection_dset" + +#define DATASET_IO_POINT_GROUP_NAME "dataset_io_point_selection_test" +#define DATASET_IO_POINT_DSET_NAME_NOCHUNK "dataset_io_point_selection_dset_nochunk" +#define DATASET_IO_POINT_DSET_NAME_CHUNK "dataset_io_point_selection_dset_chunk" + +#ifndef NO_LARGE_TESTS +#define DATASET_LARGE_READ_TEST_ALL_DSET_SPACE_RANK 3 +#define DATASET_LARGE_READ_TEST_ALL_DSET_DTYPESIZE sizeof(int) +#define DATASET_LARGE_READ_TEST_ALL_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_LARGE_READ_TEST_ALL_GROUP_NAME "dataset_large_read_all_test" +#define DATASET_LARGE_READ_TEST_ALL_DSET_NAME "dataset_large_read_all_dset" + +#define DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_SPACE_RANK 3 +#define DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_DTYPESIZE sizeof(int) +#define DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_LARGE_READ_TEST_HYPERSLAB_GROUP_NAME "dataset_large_read_hyperslab_test" +#define DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_NAME "dataset_large_read_hyperslab_dset" + +#define DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_SPACE_RANK 1 +#define DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_DTYPESIZE sizeof(int) +#define DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_LARGE_READ_TEST_POINT_SELECTION_GROUP_NAME "dataset_large_read_point_selection_test" +#define DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_NAME "dataset_large_read_point_selection_dset" +#endif + +#define DATASET_READ_INVALID_PARAMS_TEST_DSET_SPACE_RANK 3 +#define DATASET_READ_INVALID_PARAMS_TEST_DSET_DTYPESIZE sizeof(int) +#define DATASET_READ_INVALID_PARAMS_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_READ_INVALID_PARAMS_TEST_GROUP_NAME "dataset_read_invalid_params_test" +#define DATASET_READ_INVALID_PARAMS_TEST_DSET_NAME "dataset_read_invalid_params_dset" + +#define DATASET_SMALL_WRITE_TEST_ALL_DSET_SPACE_RANK 3 +#define DATASET_SMALL_WRITE_TEST_ALL_DSET_DTYPESIZE sizeof(int) +#define DATASET_SMALL_WRITE_TEST_ALL_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_SMALL_WRITE_TEST_ALL_GROUP_NAME "dataset_small_write_all_test" +#define DATASET_SMALL_WRITE_TEST_ALL_DSET_NAME "dataset_small_write_all_dset" + +#define DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK 3 +#define DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_DTYPESIZE sizeof(int) +#define DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_SMALL_WRITE_TEST_HYPERSLAB_GROUP_NAME "dataset_small_write_hyperslab_test" +#define DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_NAME "dataset_small_write_hyperslab_dset" + +#define DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_SPACE_RANK 3 +#define DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_DTYPESIZE sizeof(int) +#define DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_SMALL_WRITE_TEST_POINT_SELECTION_NUM_POINTS 10 +#define DATASET_SMALL_WRITE_TEST_POINT_SELECTION_GROUP_NAME "dataset_small_write_point_selection_test" +#define DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_NAME "dataset_small_write_point_selection_dset" + +#ifndef NO_LARGE_TESTS +#define DATASET_LARGE_WRITE_TEST_ALL_DSET_SPACE_RANK 3 +#define DATASET_LARGE_WRITE_TEST_ALL_DSET_DTYPESIZE sizeof(int) +#define DATASET_LARGE_WRITE_TEST_ALL_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_LARGE_WRITE_TEST_ALL_GROUP_NAME "dataset_large_write_all_test" +#define DATASET_LARGE_WRITE_TEST_ALL_DSET_NAME "dataset_large_write_all_dset" + +#define DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK 3 +#define DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_DTYPESIZE sizeof(int) +#define DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_LARGE_WRITE_TEST_HYPERSLAB_GROUP_NAME "dataset_large_write_hyperslab_test" +#define DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_NAME "dataset_large_write_hyperslab_dset" + +#define DATASET_LARGE_WRITE_TEST_POINT_SELECTION_DSET_SPACE_RANK 3 +#define DATASET_LARGE_WRITE_TEST_POINT_SELECTION_DSET_DTYPESIZE sizeof(int) +#define DATASET_LARGE_WRITE_TEST_POINT_SELECTION_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_LARGE_WRITE_TEST_POINT_SELECTION_GROUP_NAME "dataset_large_write_point_selection_test" +#define DATASET_LARGE_WRITE_TEST_POINT_SELECTION_DSET_NAME "dataset_large_write_point_selection_dset" +#endif + +#define DATASET_DATA_VERIFY_WRITE_TEST_DSET_SPACE_RANK 3 +#define DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPESIZE sizeof(int) +#define DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_DATA_VERIFY_WRITE_TEST_NUM_POINTS 10 +#define DATASET_DATA_VERIFY_WRITE_TEST_GROUP_NAME "dataset_data_write_verification_test" +#define DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME "dataset_data_write_verification_dset" + +#define DATASET_WRITE_INVALID_PARAMS_TEST_DSET_SPACE_RANK 3 +#define DATASET_WRITE_INVALID_PARAMS_TEST_DSET_DTYPESIZE sizeof(int) +#define DATASET_WRITE_INVALID_PARAMS_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_WRITE_INVALID_PARAMS_TEST_GROUP_NAME "dataset_write_invalid_params_test" +#define DATASET_WRITE_INVALID_PARAMS_TEST_DSET_NAME "dataset_write_invalid_params_dset" + +#define DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_SPACE_RANK 3 +#define DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPESIZE sizeof(int) +#define DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPE H5T_NATIVE_INT +#define DATASET_DATA_BUILTIN_CONVERSION_TEST_NUM_POINTS 10 +#define DATASET_DATA_BUILTIN_CONVERSION_TEST_GROUP_NAME "dataset_builtin_conversion_verification_test" +#define DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME "dataset_builtin_conversion_verification_dset" + +#define DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS 10 +#define DATASET_DATA_COMPOUND_PARTIAL_IO_TEST_GROUP_NAME "dataset_compound_partial_io_test" +#define DATASET_DATA_COMPOUND_PARTIAL_IO_TEST_DSET_NAME "dataset_compound_partial_io_test" + +#define DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_SPACE_RANK 2 +#define DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_NUM_PASSES 3 +#define DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_GROUP_NAME "set_extent_chunked_unlimited_test" +#define DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_DSET_NAME "set_extent_chunked_unlimited_test_dset" + +#define DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_SPACE_RANK 2 +#define DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_NUM_PASSES 3 +#define DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_GROUP_NAME "set_extent_chunked_fixed_test" +#define DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_DSET_NAME "set_extent_chunked_fixed_test_dset" +#define DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_DSET_NAME2 "set_extent_chunked_fixed_test_dset2" + +#define DATASET_SET_EXTENT_DATA_TEST_SPACE_RANK 2 +#define DATASET_SET_EXTENT_DATA_TEST_GROUP_NAME "set_extent_chunked_data_test" +#define DATASET_SET_EXTENT_DATA_TEST_DSET_NAME "set_extent_chunked_data_test_dset" +#define DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM 8 + +#define DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_RANK 2 +#define DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_GROUP_NAME "set_extent_chunked_double_handles_test" +#define DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_DSET_NAME "set_extent_chunked_double_handles_test_dset" +#define DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_DIM 8 + +#define DATASET_SET_EXTENT_INVALID_PARAMS_TEST_SPACE_RANK 2 +#define DATASET_SET_EXTENT_INVALID_PARAMS_TEST_GROUP_NAME "set_extent_invalid_params_test" +#define DATASET_SET_EXTENT_INVALID_PARAMS_TEST_DSET_NAME "set_extent_invalid_params_test_dset" +#define DATASET_SET_EXTENT_INVALID_LAYOUT_TEST_COMPACT_DSET_NAME "set_extent_invalid_layout_test_compact_dset" +#define DATASET_SET_EXTENT_INVALID_LAYOUT_TEST_CONTIGUOUS_DSET_NAME \ + "set_extent_invalid_layout_test_contiguous_dset" + +#define DATASET_SINGLE_CHUNK_TEST_SPACE_RANK 2 +#define DATASET_SINGLE_CHUNK_TEST_GROUP_NAME "single_chunk_dataset_test" +#define DATASET_SINGLE_CHUNK_TEST_DSET_NAME "single_chunk_dataset" + +#define DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_SPACE_RANK 2 +#define DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_DTYPESIZE sizeof(int) +#define DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_SINGLE_CHUNK_WRITE_TEST_GROUP_NAME "single_chunk_dataset_write_test" +#define DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_NAME "single_chunk_dataset" + +#define DATASET_MULTI_CHUNK_TEST_SPACE_RANK 2 +#define DATASET_MULTI_CHUNK_TEST_GROUP_NAME "multi_chunk_dataset_test" +#define DATASET_MULTI_CHUNK_TEST_DSET_NAME "multi_chunk_dataset" + +#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK 2 +#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE sizeof(int) +#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_GROUP_NAME \ + "multi_chunk_dataset_write_same_space_read_test" +#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME "multi_chunk_dataset" + +#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK 2 +#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE sizeof(int) +#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_GROUP_NAME \ + "multi_chunk_dataset_write_diff_space_read_test" +#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME "multi_chunk_dataset" + +#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK 2 +#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE sizeof(int) +#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_GROUP_NAME \ + "multi_chunk_dataset_same_space_overwrite_test" +#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME "multi_chunk_dataset" +#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_NITERS 10 + +#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK 2 +#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE sizeof(int) +#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_GROUP_NAME \ + "multi_chunk_dataset_diff_space_overwrite_test" +#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME "multi_chunk_dataset" +#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_NITERS 10 + +#define DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_SPACE_RANK 2 +#define DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_DTYPESIZE sizeof(int) +#define DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_CTYPE int +#define DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_GROUP_NAME "read_partial_chunk_all_sel_test" +#define DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_NAME "read_partial_chunk_all_sel_dset" + +#define DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK 2 +#define DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_DTYPESIZE sizeof(int) +#define DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_CTYPE int +#define DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_GROUP_NAME "read_partial_chunk_hyper_sel_test" +#define DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_NAME "read_partial_chunk_hyper_sel_dset" + +#define DATASET_GET_VLEN_BUF_SIZE_DSET_SPACE_RANK 1 +#define DATASET_GET_VLEN_BUF_SIZE_DSET_SPACE_DIM 4 +#define DATASET_GET_VLEN_BUF_SIZE_GROUP_NAME "get_vlen_buffer_size_group" +#define DATASET_GET_VLEN_BUF_SIZE_DSET_NAME "get_vlen_buffer_size_dset" +#endif diff --git a/test/API/H5_api_datatype_test.c b/test/API/H5_api_datatype_test.c new file mode 100644 index 00000000000..9d5329244df --- /dev/null +++ b/test/API/H5_api_datatype_test.c @@ -0,0 +1,2693 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#include "H5_api_datatype_test.h" + +/* + * Disable tests that currently compromise internal HDF5 integrity. + */ +#define PROBLEMATIC_TESTS + +static int test_create_committed_datatype(void); +static int test_create_committed_datatype_invalid_params(void); +static int test_create_anonymous_committed_datatype(void); +static int test_create_anonymous_committed_datatype_invalid_params(void); +#ifndef PROBLEMATIC_TESTS +static int test_create_committed_datatype_empty_types(void); +#endif +static int test_recommit_committed_type(void); +static int test_open_committed_datatype(void); +static int test_open_committed_datatype_invalid_params(void); +static int test_reopen_committed_datatype_indirect(void); +static int test_close_committed_datatype_invalid_id(void); +static int test_datatype_property_lists(void); +static int test_create_dataset_with_committed_type(void); +static int test_create_attribute_with_committed_type(void); +static int test_delete_committed_type(void); +static int test_resurrect_datatype(void); +static int test_flush_committed_datatype(void); +static int test_flush_committed_datatype_invalid_params(void); +static int test_refresh_committed_datatype(void); +static int test_refresh_committed_datatype_invalid_params(void); +#ifndef PROBLEMATIC_TESTS +static int test_cant_commit_predefined(void); +#endif +static int test_cant_modify_committed_type(void); + +/* + * The array of datatype tests to be performed. + */ +static int (*datatype_tests[])(void) = { + test_create_committed_datatype, + test_create_committed_datatype_invalid_params, + test_create_anonymous_committed_datatype, + test_create_anonymous_committed_datatype_invalid_params, +#ifndef PROBLEMATIC_TESTS + test_create_committed_datatype_empty_types, +#endif + test_recommit_committed_type, + test_open_committed_datatype, + test_open_committed_datatype_invalid_params, + test_reopen_committed_datatype_indirect, + test_close_committed_datatype_invalid_id, + test_datatype_property_lists, + test_create_dataset_with_committed_type, + test_create_attribute_with_committed_type, + test_delete_committed_type, + test_resurrect_datatype, + test_flush_committed_datatype, + test_flush_committed_datatype_invalid_params, + test_refresh_committed_datatype, + test_refresh_committed_datatype_invalid_params, +#ifndef PROBLEMATIC_TESTS + test_cant_commit_predefined, +#endif + test_cant_modify_committed_type, +}; + +/* + * A test to check that a committed datatype can be created. + */ +static int +test_create_committed_datatype(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t type_id = H5I_INVALID_HID; + + TESTING("creation of a committed datatype"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, or stored datatype aren't supported with this " + "connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATATYPE_CREATE_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", DATATYPE_CREATE_TEST_GROUP_NAME); + goto error; + } + + if ((type_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create datatype to commit\n"); + goto error; + } + + if (H5Tcommit2(group_id, DATATYPE_CREATE_TEST_TYPE_NAME, type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < + 0) { + H5_FAILED(); + HDprintf(" couldn't commit datatype '%s'\n", DATATYPE_CREATE_TEST_TYPE_NAME); + goto error; + } + + if (H5Tclose(type_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Tclose(type_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a committed datatype can't be + * created when H5Tcommit2 is passed invalid parameters. + */ +static int +test_create_committed_datatype_invalid_params(void) +{ + herr_t err_ret = -1; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t type_id = H5I_INVALID_HID; + + TESTING_MULTIPART("H5Tcommit2 with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, or stored datatype aren't supported with this " + "connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATATYPE_CREATE_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATATYPE_CREATE_INVALID_PARAMS_TEST_GROUP_NAME); + goto error; + } + + if ((type_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create datatype to commit\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Tcommit2_invalid_loc_id) + { + TESTING_2("H5Tcommit2 with an invalid loc_id"); + + H5E_BEGIN_TRY + { + err_ret = H5Tcommit2(H5I_INVALID_HID, DATATYPE_CREATE_INVALID_PARAMS_TEST_TYPE_NAME, type_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Tcommit2 succeeded with an invalid loc_id!\n"); + PART_ERROR(H5Tcommit2_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Tcommit2_invalid_loc_id); + + PART_BEGIN(H5Tcommit2_invalid_type_name) + { + TESTING_2("H5Tcommit2 with an invalid datatype name"); + + H5E_BEGIN_TRY + { + err_ret = H5Tcommit2(group_id, NULL, type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Tcommit2 succeeded with an invalid datatype name!\n"); + PART_ERROR(H5Tcommit2_invalid_type_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Tcommit2(group_id, "", type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Tcommit2 succeeded with an invalid datatype name!\n"); + PART_ERROR(H5Tcommit2_invalid_type_name); + } + + PASSED(); + } + PART_END(H5Tcommit2_invalid_type_name); + + PART_BEGIN(H5Tcommit2_invalid_type_id) + { + TESTING_2("H5Tcommit2 with an invalid datatype ID"); + + H5E_BEGIN_TRY + { + err_ret = H5Tcommit2(group_id, DATATYPE_CREATE_INVALID_PARAMS_TEST_TYPE_NAME, H5I_INVALID_HID, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Tcommit2 succeeded with an invalid datatype ID!\n"); + PART_ERROR(H5Tcommit2_invalid_type_id); + } + + PASSED(); + } + PART_END(H5Tcommit2_invalid_type_id); + + PART_BEGIN(H5Tcommit2_invalid_lcpl) + { + TESTING_2("H5Tcommit2 with an invalid LCPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Tcommit2(group_id, DATATYPE_CREATE_INVALID_PARAMS_TEST_TYPE_NAME, type_id, + H5I_INVALID_HID, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Tcommit2 succeeded with an invalid LCPL!\n"); + PART_ERROR(H5Tcommit2_invalid_lcpl); + } + + PASSED(); + } + PART_END(H5Tcommit2_invalid_lcpl); + + PART_BEGIN(H5Tcommit2_invalid_tcpl) + { + TESTING_2("H5Tcommit2 with an invalid TCPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Tcommit2(group_id, DATATYPE_CREATE_INVALID_PARAMS_TEST_TYPE_NAME, type_id, + H5P_DEFAULT, H5I_INVALID_HID, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Tcommit2 succeeded with an invalid TCPL!\n"); + PART_ERROR(H5Tcommit2_invalid_tcpl); + } + + PASSED(); + } + PART_END(H5Tcommit2_invalid_tcpl); + + PART_BEGIN(H5Tcommit2_invalid_tapl) + { + TESTING_2("H5Tcommit2 with an invalid TAPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Tcommit2(group_id, DATATYPE_CREATE_INVALID_PARAMS_TEST_TYPE_NAME, type_id, + H5P_DEFAULT, H5P_DEFAULT, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Tcommit2 succeeded with an invalid TAPL!\n"); + PART_ERROR(H5Tcommit2_invalid_tapl); + } + + PASSED(); + } + PART_END(H5Tcommit2_invalid_tapl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Tclose(type_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Tclose(type_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that an anonymous committed datatype + * can be created with H5Tcommit_anon. + */ +static int +test_create_anonymous_committed_datatype(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t type_id = H5I_INVALID_HID; + + TESTING("creation of anonymous committed datatype"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, or stored datatype aren't supported with this " + "connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATATYPE_CREATE_ANONYMOUS_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", DATATYPE_CREATE_ANONYMOUS_GROUP_NAME); + goto error; + } + + if ((type_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create datatype\n"); + goto error; + } + + if (H5Tcommit_anon(group_id, type_id, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't commit anonymous datatype\n"); + goto error; + } + + if (H5Tclose(type_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Tclose(type_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a committed datatype can't be + * created when H5Tcommit_anon is passed invalid parameters. + */ +static int +test_create_anonymous_committed_datatype_invalid_params(void) +{ + herr_t err_ret = -1; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t type_id = H5I_INVALID_HID; + + TESTING_MULTIPART("H5Tcommit_anon with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, or stored datatype aren't supported with this " + "connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATATYPE_CREATE_ANONYMOUS_INVALID_PARAMS_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATATYPE_CREATE_ANONYMOUS_INVALID_PARAMS_GROUP_NAME); + goto error; + } + + if ((type_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create datatype\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Tcommit_anon_invalid_loc_id) + { + TESTING_2("H5Tcommit_anon with an invalid loc_id"); + + H5E_BEGIN_TRY + { + err_ret = H5Tcommit_anon(H5I_INVALID_HID, type_id, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Tcommit_anon succeeded with an invalid loc_id!\n"); + PART_ERROR(H5Tcommit_anon_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Tcommit_anon_invalid_loc_id); + + PART_BEGIN(H5Tcommit_anon_invalid_type_id) + { + TESTING_2("H5Tcommit_anon with an invalid datatype ID"); + + H5E_BEGIN_TRY + { + err_ret = H5Tcommit_anon(group_id, H5I_INVALID_HID, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Tcommit_anon succeeded with an invalid datatype ID!\n"); + PART_ERROR(H5Tcommit_anon_invalid_type_id); + } + + PASSED(); + } + PART_END(H5Tcommit_anon_invalid_type_id); + + PART_BEGIN(H5Tcommit_anon_invalid_tcpl) + { + TESTING_2("H5Tcommit_anon with an invalid TCPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Tcommit_anon(group_id, type_id, H5I_INVALID_HID, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Tcommit_anon succeeded with an invalid TCPL!\n"); + PART_ERROR(H5Tcommit_anon_invalid_tcpl); + } + + PASSED(); + } + PART_END(H5Tcommit_anon_invalid_tcpl); + + PART_BEGIN(H5Tcommit_anon_invalid_tapl) + { + TESTING_2("H5Tcommit_anon with an invalid TAPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Tcommit_anon(group_id, type_id, H5P_DEFAULT, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Tcommit_anon succeeded with an invalid TAPL!\n"); + PART_ERROR(H5Tcommit_anon_invalid_tapl); + } + + PASSED(); + } + PART_END(H5Tcommit_anon_invalid_tapl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Tclose(type_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Tclose(type_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that committing a datatype fails with empty + * compound and enum datatypes. + */ +#ifndef PROBLEMATIC_TESTS +static int +test_create_committed_datatype_empty_types(void) +{ + herr_t err_ret = FAIL; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t type_id = H5I_INVALID_HID; + + TESTING_MULTIPART("creation of committed datatype with empty types"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, or stored datatype aren't supported with this " + "connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATATYPE_CREATE_EMPTY_TYPES_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATATYPE_CREATE_EMPTY_TYPES_TEST_GROUP_NAME); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Tcommit_empty_compound_type) + { + TESTING_2("creation of committed datatype with empty compound type"); + + if ((type_id = H5Tcreate(H5T_COMPOUND, (size_t)32)) < 0) { + H5_FAILED(); + HDprintf(" failed to create compound type\n"); + PART_ERROR(H5Tcommit_empty_compound_type); + } + + H5E_BEGIN_TRY + { + err_ret = H5Tcommit2(group_id, DATATYPE_CREATE_EMPTY_TYPES_TEST_CMPD_TYPE_NAME, type_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" committed empty compound datatype!\n"); + PART_ERROR(H5Tcommit_empty_compound_type); + } + + /* Add a field to the compound datatype */ + if (H5Tinsert(type_id, "a", (size_t)0, H5T_NATIVE_INT) < 0) { + H5_FAILED(); + HDprintf(" failed to insert field into compound datatype\n"); + PART_ERROR(H5Tcommit_empty_compound_type); + } + + /* Attempt to commit the now non-empty compound datatype */ + if (H5Tcommit2(group_id, DATATYPE_CREATE_EMPTY_TYPES_TEST_CMPD_TYPE_NAME, type_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to commit non-empty compound datatype\n"); + PART_ERROR(H5Tcommit_empty_compound_type); + } + + PASSED(); + } + PART_END(H5Tcommit_empty_compound_type); + + if (type_id >= 0) { + H5E_BEGIN_TRY + { + H5Tclose(type_id); + } + H5E_END_TRY; + type_id = H5I_INVALID_HID; + } + + PART_BEGIN(H5Tcommit_empty_enum_type) + { + int enum_val = 1; + + TESTING_2("creation of committed datatype with empty enum type"); + + if ((type_id = H5Tenum_create(H5T_NATIVE_INT)) < 0) { + H5_FAILED(); + HDprintf(" failed to create enum type\n"); + PART_ERROR(H5Tcommit_empty_enum_type); + } + + H5E_BEGIN_TRY + { + err_ret = H5Tcommit2(group_id, DATATYPE_CREATE_EMPTY_TYPES_TEST_ENUM_TYPE_NAME, type_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" committed empty enum datatype!\n"); + PART_ERROR(H5Tcommit_empty_enum_type); + } + + /* Add a field to the enum datatype */ + if (H5Tenum_insert(type_id, "a", &enum_val) < 0) { + H5_FAILED(); + HDprintf(" failed to insert field into enum datatype\n"); + PART_ERROR(H5Tcommit_empty_enum_type); + } + + /* Attempt to commit the now non-empty enum datatype */ + if (H5Tcommit2(group_id, DATATYPE_CREATE_EMPTY_TYPES_TEST_ENUM_TYPE_NAME, type_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to commit non-empty enum datatype\n"); + PART_ERROR(H5Tcommit_empty_enum_type); + } + + PASSED(); + } + PART_END(H5Tcommit_empty_enum_type); + + if (type_id >= 0) { + H5E_BEGIN_TRY + { + H5Tclose(type_id); + } + H5E_END_TRY; + type_id = H5I_INVALID_HID; + } + } + END_MULTIPART; + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + +error: + H5E_BEGIN_TRY + { + H5Tclose(type_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} +#endif + +/* + * A test to check that a committed datatype can't be re-committed. + */ +static int +test_recommit_committed_type(void) +{ + htri_t is_committed = FALSE; + herr_t err_ret; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t type_id = H5I_INVALID_HID; + + TESTING("inability to re-commit a committed datatype"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, or stored datatype aren't supported with this " + "connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, RECOMMIT_COMMITTED_TYPE_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", RECOMMIT_COMMITTED_TYPE_TEST_GROUP_NAME); + goto error; + } + + /* Copy a predefined datatype and commit the copy */ + if ((type_id = H5Tcopy(H5T_NATIVE_INT)) < 0) { + H5_FAILED(); + HDprintf(" failed to copy predefined integer datatype\n"); + goto error; + } + + if (H5Tcommit2(group_id, "native_int", type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to commit datatype\n"); + goto error; + } + + if ((is_committed = H5Tcommitted(type_id)) < 0) { + H5_FAILED(); + HDprintf(" failed to determine if datatype is committed\n"); + goto error; + } + + if (!is_committed) { + H5_FAILED(); + HDprintf(" H5Tcommitted() returned false!\n"); + goto error; + } + + /* We should not be able to re-commit a committed type */ + H5E_BEGIN_TRY + { + err_ret = H5Tcommit2(group_id, "native_int", type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" re-committed an already committed datatype!\n"); + goto error; + } + + if (H5Tclose(type_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Tclose(type_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a committed datatype + * can be opened using H5Topen2. + */ +static int +test_open_committed_datatype(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t type_id = H5I_INVALID_HID; + + TESTING("H5Topen2"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, or stored datatype aren't supported with this " + "connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATATYPE_OPEN_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", DATATYPE_OPEN_TEST_GROUP_NAME); + goto error; + } + + if ((type_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create datatype to commit\n"); + goto error; + } + + if (H5Tcommit2(group_id, DATATYPE_OPEN_TEST_TYPE_NAME, type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < + 0) { + H5_FAILED(); + HDprintf(" couldn't commit datatype '%s'\n", DATATYPE_OPEN_TEST_TYPE_NAME); + goto error; + } + + if (H5Tclose(type_id) < 0) + TEST_ERROR; + + if ((type_id = H5Topen2(group_id, DATATYPE_OPEN_TEST_TYPE_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open committed datatype '%s'\n", DATATYPE_OPEN_TEST_TYPE_NAME); + goto error; + } + + if (H5Tclose(type_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Tclose(type_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a committed datatype can't + * be opened when H5Topen2 is passed invalid parameters. + */ +static int +test_open_committed_datatype_invalid_params(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t type_id = H5I_INVALID_HID; + + TESTING_MULTIPART("H5Topen2 with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, or stored datatype aren't supported with this " + "connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATATYPE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATATYPE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME); + goto error; + } + + if ((type_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create datatype to commit\n"); + goto error; + } + + if (H5Tcommit2(group_id, DATATYPE_OPEN_INVALID_PARAMS_TEST_TYPE_NAME, type_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't commit datatype '%s'\n", DATATYPE_OPEN_INVALID_PARAMS_TEST_TYPE_NAME); + goto error; + } + + if (H5Tclose(type_id) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Topen2_invalid_loc_id) + { + TESTING_2("H5Topen2 with an invalid location ID"); + + H5E_BEGIN_TRY + { + type_id = H5Topen2(H5I_INVALID_HID, DATATYPE_OPEN_INVALID_PARAMS_TEST_TYPE_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + + if (type_id >= 0) { + H5_FAILED(); + HDprintf(" opened committed datatype with an invalid location ID!\n"); + H5Tclose(type_id); + PART_ERROR(H5Topen2_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Topen2_invalid_loc_id); + + PART_BEGIN(H5Topen2_invalid_type_name) + { + TESTING_2("H5Topen2 with an invalid datatype name"); + + H5E_BEGIN_TRY + { + type_id = H5Topen2(group_id, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (type_id >= 0) { + H5_FAILED(); + HDprintf(" opened committed datatype with an invalid datatype name!\n"); + H5Tclose(type_id); + PART_ERROR(H5Topen2_invalid_type_name); + } + + H5E_BEGIN_TRY + { + type_id = H5Topen2(group_id, "", H5P_DEFAULT); + } + H5E_END_TRY; + + if (type_id >= 0) { + H5_FAILED(); + HDprintf(" opened committed datatype with an invalid datatype name!\n"); + H5Tclose(type_id); + PART_ERROR(H5Topen2_invalid_type_name); + } + + PASSED(); + } + PART_END(H5Topen2_invalid_type_name); + + PART_BEGIN(H5Topen2_invalid_tapl) + { + TESTING_2("H5Topen2 with an invalid TAPL"); + + H5E_BEGIN_TRY + { + type_id = H5Topen2(group_id, DATATYPE_OPEN_INVALID_PARAMS_TEST_TYPE_NAME, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (type_id >= 0) { + H5_FAILED(); + HDprintf(" opened committed datatype with an invalid TAPL!\n"); + H5Tclose(type_id); + PART_ERROR(H5Topen2_invalid_tapl); + } + + PASSED(); + } + PART_END(H5Topen2_invalid_tapl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Tclose(type_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that open named datatypes can be reopened indirectly + * through H5Dget_type without causing problems. + */ +static int +test_reopen_committed_datatype_indirect(void) +{ + size_t dt_size = 0; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t type_id = H5I_INVALID_HID, reopened_type_id = H5I_INVALID_HID; + hid_t strtype = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + + TESTING_MULTIPART("reopening open committed datatypes using H5Dget_type"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, or stored datatype aren't supported with " + "this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATATYPE_REOPEN_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", DATATYPE_REOPEN_TEST_GROUP_NAME); + goto error; + } + + if ((space_id = generate_random_dataspace(DATATYPE_REOPEN_TEST_SPACE_RANK, NULL, NULL, FALSE)) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(reopen_compound_type) + { + TESTING_2("re-open of compound datatype"); + + if ((strtype = H5Tcopy(H5T_C_S1)) < 0) { + H5_FAILED(); + HDprintf(" failed to copy C-string datatype\n"); + PART_ERROR(reopen_compound_type); + } + + if (H5Tset_size(strtype, H5T_VARIABLE) < 0) { + H5_FAILED(); + HDprintf(" failed to set string datatype's size to variable\n"); + PART_ERROR(reopen_compound_type); + } + + if ((type_id = H5Tcreate(H5T_COMPOUND, sizeof(char *))) < 0) { + H5_FAILED(); + HDprintf(" failed to create compound datatype\n"); + PART_ERROR(reopen_compound_type); + } + + if (H5Tinsert(type_id, "vlstr", (size_t)0, strtype) < 0) { + H5_FAILED(); + HDprintf(" failed to insert field into compound datatype\n"); + PART_ERROR(reopen_compound_type); + } + + if (H5Tclose(strtype) < 0) { + H5_FAILED(); + HDprintf(" failed to close string datatype\n"); + PART_ERROR(reopen_compound_type); + } + + /* Get size of compound type */ + if ((dt_size = H5Tget_size(type_id)) == 0) { + H5_FAILED(); + HDprintf(" failed to retrieve size of compound datatype\n"); + PART_ERROR(reopen_compound_type); + } + + /* Commit compound type and verify the size doesn't change */ + if (H5Tcommit2(group_id, "cmpd_type", type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to commit compound datatype\n"); + PART_ERROR(reopen_compound_type); + } + + if (dt_size != H5Tget_size(type_id)) { + H5_FAILED(); + HDprintf(" committing datatype caused its size to change!\n"); + PART_ERROR(reopen_compound_type); + } + + /* Create dataset with compound type */ + if ((dset_id = H5Dcreate2(group_id, "cmpd_dset", type_id, space_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to create dataset using committed datatype\n"); + PART_ERROR(reopen_compound_type); + } + + /* Indirectly reopen type and verify that the size doesn't change */ + if ((reopened_type_id = H5Dget_type(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" failed to re-open committed datatype using H5Dget_type\n"); + PART_ERROR(reopen_compound_type); + } + + if (dt_size != H5Tget_size(reopened_type_id)) { + H5_FAILED(); + HDprintf(" size of re-opened datatype didn't match size of original datatype\n"); + PART_ERROR(reopen_compound_type); + } + + PASSED(); + } + PART_END(reopen_compound_type); + + H5E_BEGIN_TRY + { + H5Tclose(strtype); + strtype = H5I_INVALID_HID; + H5Tclose(type_id); + type_id = H5I_INVALID_HID; + H5Tclose(reopened_type_id); + reopened_type_id = H5I_INVALID_HID; + H5Dclose(dset_id); + dset_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(reopen_enum_type) + { + int enum_value; + + TESTING_2("re-open of enum datatype"); + + if ((type_id = H5Tenum_create(H5T_NATIVE_INT)) < 0) { + H5_FAILED(); + HDprintf(" failed to create enum datatype\n"); + PART_ERROR(reopen_enum_type); + } + + enum_value = 0; + if (H5Tenum_insert(type_id, "val1", &enum_value) < 0) { + H5_FAILED(); + HDprintf(" failed to insert value into enum datatype\n"); + PART_ERROR(reopen_enum_type); + } + + enum_value = 1; + if (H5Tenum_insert(type_id, "val2", &enum_value) < 0) { + H5_FAILED(); + HDprintf(" failed to insert value into enum datatype\n"); + PART_ERROR(reopen_enum_type); + } + + /* Get size of enum type */ + if ((dt_size = H5Tget_size(type_id)) == 0) { + H5_FAILED(); + HDprintf(" failed to retrieve size of enum datatype\n"); + PART_ERROR(reopen_enum_type); + } + + /* Commit enum type and verify the size doesn't change */ + if (H5Tcommit2(group_id, "enum_type", type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to commit enum datatype\n"); + PART_ERROR(reopen_enum_type); + } + + if (dt_size != H5Tget_size(type_id)) { + H5_FAILED(); + HDprintf(" committing datatype caused its size to change!\n"); + PART_ERROR(reopen_enum_type); + } + + /* Create dataset with enum type */ + if ((dset_id = H5Dcreate2(group_id, "enum_dset", type_id, space_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to create dataset using committed datatype\n"); + PART_ERROR(reopen_enum_type); + } + + /* Indirectly reopen type and verify that the size doesn't change */ + if ((reopened_type_id = H5Dget_type(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" failed to re-open committed datatype using H5Dget_type\n"); + PART_ERROR(reopen_enum_type); + } + + if (dt_size != H5Tget_size(reopened_type_id)) { + H5_FAILED(); + HDprintf(" size of re-opened datatype didn't match size of original datatype\n"); + PART_ERROR(reopen_enum_type); + } + + PASSED(); + } + PART_END(reopen_enum_type); + + H5E_BEGIN_TRY + { + H5Tclose(type_id); + type_id = H5I_INVALID_HID; + H5Tclose(reopened_type_id); + reopened_type_id = H5I_INVALID_HID; + H5Dclose(dset_id); + dset_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(reopen_vlen_type) + { + TESTING_2("reopen of a variable-length datatype"); + + if ((type_id = H5Tvlen_create(H5T_NATIVE_INT)) < 0) { + H5_FAILED(); + HDprintf(" failed to create variable-length datatype\n"); + PART_ERROR(reopen_vlen_type); + } + + /* Get size of variable-length type */ + if ((dt_size = H5Tget_size(type_id)) == 0) { + H5_FAILED(); + HDprintf(" failed to retrieve size of variable-length datatype\n"); + PART_ERROR(reopen_vlen_type); + } + + /* Commit variable-length type and verify the size doesn't change */ + if (H5Tcommit2(group_id, "vlen_type", type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to commit variable-length datatype\n"); + PART_ERROR(reopen_vlen_type); + } + + if (dt_size != H5Tget_size(type_id)) { + H5_FAILED(); + HDprintf(" committing datatype caused its size to change!\n"); + PART_ERROR(reopen_vlen_type); + } + + /* Create dataset with variable-length type */ + if ((dset_id = H5Dcreate2(group_id, "vlen_dset", type_id, space_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to create dataset using committed datatype\n"); + PART_ERROR(reopen_vlen_type); + } + + /* Indirectly reopen type and verify that the size doesn't change */ + if ((reopened_type_id = H5Dget_type(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" failed to re-open committed datatype using H5Dget_type\n"); + PART_ERROR(reopen_vlen_type); + } + + if (dt_size != H5Tget_size(reopened_type_id)) { + H5_FAILED(); + HDprintf(" size of re-opened datatype didn't match size of original datatype\n"); + PART_ERROR(reopen_vlen_type); + } + + PASSED(); + } + PART_END(reopen_vlen_type); + + H5E_BEGIN_TRY + { + H5Tclose(type_id); + type_id = H5I_INVALID_HID; + H5Tclose(reopened_type_id); + reopened_type_id = H5I_INVALID_HID; + H5Dclose(dset_id); + dset_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(reopen_opaque_type) + { + const char *tag = "opaque_tag"; + + TESTING_2("reopen of an opaque datatype"); + + if ((type_id = H5Tcreate(H5T_OPAQUE, (size_t)13)) < 0) { + H5_FAILED(); + HDprintf(" failed to create opaque datatype\n"); + PART_ERROR(reopen_opaque_type); + } + + if (H5Tset_tag(type_id, tag) < 0) { + H5_FAILED(); + HDprintf(" failed to set tag on opaque datatype\n"); + PART_ERROR(reopen_opaque_type); + } + + /* Get size of opaque type */ + if ((dt_size = H5Tget_size(type_id)) == 0) { + H5_FAILED(); + HDprintf(" failed to retrieve size of opaque datatype\n"); + PART_ERROR(reopen_opaque_type); + } + + /* Commit opaque type and verify the size doesn't change */ + if (H5Tcommit2(group_id, "opaque_type", type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to commit opaque datatype\n"); + PART_ERROR(reopen_opaque_type); + } + + if (dt_size != H5Tget_size(type_id)) { + H5_FAILED(); + HDprintf(" committing datatype caused its size to change!\n"); + PART_ERROR(reopen_opaque_type); + } + + /* Create dataset with opaque type */ + if ((dset_id = H5Dcreate2(group_id, "opaque_dset", type_id, space_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to create dataset using committed datatype\n"); + PART_ERROR(reopen_opaque_type); + } + + /* Indirectly reopen type and verify that the size doesn't change */ + if ((reopened_type_id = H5Dget_type(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" failed to re-open committed datatype using H5Dget_type\n"); + PART_ERROR(reopen_opaque_type); + } + + if (dt_size != H5Tget_size(reopened_type_id)) { + H5_FAILED(); + HDprintf(" size of re-opened datatype didn't match size of original datatype\n"); + PART_ERROR(reopen_opaque_type); + } + + PASSED(); + } + PART_END(reopen_opaque_type); + + H5E_BEGIN_TRY + { + H5Tclose(type_id); + type_id = H5I_INVALID_HID; + H5Tclose(reopened_type_id); + reopened_type_id = H5I_INVALID_HID; + H5Dclose(dset_id); + dset_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(reopen_array_type) + { + hsize_t array_dims[] = {2, 3}; + + TESTING_2("reopen of an array datatype"); + + if ((type_id = H5Tarray_create2(H5T_NATIVE_INT, 1, array_dims)) < 0) { + H5_FAILED(); + HDprintf(" failed to create array datatype\n"); + PART_ERROR(reopen_array_type); + } + + /* Get size of array type */ + if ((dt_size = H5Tget_size(type_id)) == 0) { + H5_FAILED(); + HDprintf(" failed to retrieve size of array datatype\n"); + PART_ERROR(reopen_array_type); + } + + /* Commit array type and verify the size doesn't change */ + if (H5Tcommit2(group_id, "array_type", type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to commit array datatype\n"); + PART_ERROR(reopen_array_type); + } + + if (dt_size != H5Tget_size(type_id)) { + H5_FAILED(); + HDprintf(" committing datatype caused its size to change!\n"); + PART_ERROR(reopen_array_type); + } + + /* Create dataset with array type */ + if ((dset_id = H5Dcreate2(group_id, "array_dset", type_id, space_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to create dataset using committed datatype\n"); + PART_ERROR(reopen_array_type); + } + + /* Indirectly reopen type and verify that the size doesn't change */ + if ((reopened_type_id = H5Dget_type(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" failed to re-open committed datatype using H5Dget_type\n"); + PART_ERROR(reopen_array_type); + } + + if (dt_size != H5Tget_size(reopened_type_id)) { + H5_FAILED(); + HDprintf(" size of re-opened datatype didn't match size of original datatype\n"); + PART_ERROR(reopen_array_type); + } + + PASSED(); + } + PART_END(reopen_array_type); + + H5E_BEGIN_TRY + { + H5Tclose(type_id); + type_id = H5I_INVALID_HID; + H5Tclose(reopened_type_id); + reopened_type_id = H5I_INVALID_HID; + H5Dclose(dset_id); + dset_id = H5I_INVALID_HID; + } + H5E_END_TRY; + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Tclose(strtype); + H5Tclose(type_id); + H5Tclose(reopened_type_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that H5Tclose fails when + * it is passed an invalid datatype ID. + */ +static int +test_close_committed_datatype_invalid_id(void) +{ + herr_t err_ret = -1; + hid_t file_id = H5I_INVALID_HID; + + TESTING("H5Tclose with an invalid committed datatype ID"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) { + SKIPPED(); + HDprintf( + " API functions for basic file or stored datatype aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + H5E_BEGIN_TRY + { + err_ret = H5Tclose(H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Tclose succeeded with an invalid committed datatype ID!\n"); + goto error; + } + + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a TCPL used for datatype creation + * can be persisted and that a valid copy of that TCPL can + * be retrieved later with a call to H5Tget_create_plist. + */ +static int +test_datatype_property_lists(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t type_id1 = H5I_INVALID_HID, type_id2 = H5I_INVALID_HID; + hid_t tcpl_id1 = H5I_INVALID_HID, tcpl_id2 = H5I_INVALID_HID; + + TESTING_MULTIPART("datatype property list operations"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, stored datatype, or getting property list aren't " + "supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATATYPE_PROPERTY_LIST_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", DATATYPE_PROPERTY_LIST_TEST_SUBGROUP_NAME); + goto error; + } + + if ((type_id1 = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create datatype\n"); + goto error; + } + + if ((type_id2 = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create datatype\n"); + goto error; + } + + if ((tcpl_id1 = H5Pcreate(H5P_DATATYPE_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create TCPL\n"); + goto error; + } + + /* Currently no TCPL routines are defined */ + + if (H5Tcommit2(group_id, DATATYPE_PROPERTY_LIST_TEST_DATATYPE_NAME1, type_id1, H5P_DEFAULT, tcpl_id1, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't commit datatype '%s'\n", DATATYPE_PROPERTY_LIST_TEST_DATATYPE_NAME1); + goto error; + } + + if (H5Tcommit2(group_id, DATATYPE_PROPERTY_LIST_TEST_DATATYPE_NAME2, type_id2, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't commit datatype '%s'\n", DATATYPE_PROPERTY_LIST_TEST_DATATYPE_NAME2); + goto error; + } + + if (H5Pclose(tcpl_id1) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Tget_create_plist) + { + TESTING_2("H5Tget_create_plist"); + + /* Try to receive copies for the two property lists */ + if ((tcpl_id1 = H5Tget_create_plist(type_id1)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get property list\n"); + PART_ERROR(H5Tget_create_plist); + } + + if ((tcpl_id2 = H5Tget_create_plist(type_id2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get property list\n"); + PART_ERROR(H5Tget_create_plist); + } + + PASSED(); + } + PART_END(H5Tget_create_plist); + + /* Now close the property lists and datatypes and see if we can still retrieve copies of + * the property lists upon opening (instead of creating) a datatype + */ + if (tcpl_id1 >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(tcpl_id1); + } + H5E_END_TRY; + tcpl_id1 = H5I_INVALID_HID; + } + if (tcpl_id2 >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(tcpl_id2); + } + H5E_END_TRY; + tcpl_id2 = H5I_INVALID_HID; + } + if (type_id1 >= 0) { + H5E_BEGIN_TRY + { + H5Tclose(type_id1); + } + H5E_END_TRY; + type_id1 = H5I_INVALID_HID; + } + if (type_id2 >= 0) { + H5E_BEGIN_TRY + { + H5Tclose(type_id2); + } + H5E_END_TRY; + type_id2 = H5I_INVALID_HID; + } + + PART_BEGIN(H5Tget_create_plist_reopened) + { + TESTING_2("H5Tget_create_plist after re-opening committed datatype"); + + if ((type_id1 = H5Topen2(group_id, DATATYPE_PROPERTY_LIST_TEST_DATATYPE_NAME1, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't open datatype '%s'\n", DATATYPE_PROPERTY_LIST_TEST_DATATYPE_NAME1); + PART_ERROR(H5Tget_create_plist_reopened); + } + + if ((type_id2 = H5Topen2(group_id, DATATYPE_PROPERTY_LIST_TEST_DATATYPE_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't open datatype '%s'\n", DATATYPE_PROPERTY_LIST_TEST_DATATYPE_NAME2); + PART_ERROR(H5Tget_create_plist_reopened); + } + + if ((tcpl_id1 = H5Tget_create_plist(type_id1)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get property list\n"); + PART_ERROR(H5Tget_create_plist_reopened); + } + + if ((tcpl_id2 = H5Tget_create_plist(type_id2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get property list\n"); + PART_ERROR(H5Tget_create_plist_reopened); + } + + PASSED(); + } + PART_END(H5Tget_create_plist_reopened); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(tcpl_id1) < 0) + TEST_ERROR; + if (H5Pclose(tcpl_id2) < 0) + TEST_ERROR; + if (H5Tclose(type_id1) < 0) + TEST_ERROR; + if (H5Tclose(type_id2) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(tcpl_id1); + H5Pclose(tcpl_id2); + H5Tclose(type_id1); + H5Tclose(type_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a dataset can be created using + * a committed datatype. + */ +static int +test_create_dataset_with_committed_type(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t type_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + + TESTING("dataset creation with a committed datatype"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, or stored datatype aren't supported with " + "this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_CREATE_WITH_DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", DATASET_CREATE_WITH_DATATYPE_TEST_GROUP_NAME); + goto error; + } + + if ((type_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create datatype\n"); + goto error; + } + + if (H5Tcommit2(group_id, DATASET_CREATE_WITH_DATATYPE_TEST_TYPE_NAME, type_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't commit datatype '%s'\n", DATASET_CREATE_WITH_DATATYPE_TEST_TYPE_NAME); + goto error; + } + + if (H5Tclose(type_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gopen2(container_group, DATASET_CREATE_WITH_DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_CREATE_WITH_DATATYPE_TEST_GROUP_NAME); + goto error; + } + + if ((type_id = H5Topen2(group_id, DATASET_CREATE_WITH_DATATYPE_TEST_TYPE_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open committed datatype '%s'\n", DATASET_CREATE_WITH_DATATYPE_TEST_TYPE_NAME); + goto error; + } + + if ((fspace_id = generate_random_dataspace(DATATYPE_CREATE_TEST_DATASET_DIMS, NULL, NULL, FALSE)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_CREATE_WITH_DATATYPE_TEST_DSET_NAME, type_id, fspace_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s' using committed datatype\n", + DATASET_CREATE_WITH_DATATYPE_TEST_DSET_NAME); + goto error; + } + + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + + if ((dset_id = H5Dopen2(group_id, DATASET_CREATE_WITH_DATATYPE_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to open dataset '%s'\n", DATASET_CREATE_WITH_DATATYPE_TEST_DSET_NAME); + goto error; + } + + if (H5Tclose(type_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Tclose(type_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that an attribute can be created + * using a committed datatype. + */ +static int +test_create_attribute_with_committed_type(void) +{ + htri_t attr_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t type_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + + TESTING("attribute creation with a committed datatype"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, attribute, or stored datatype aren't supported " + "with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", + ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_GROUP_NAME); + goto error; + } + + if ((type_id = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create datatype\n"); + goto error; + } + + if (H5Tcommit2(group_id, ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_DTYPE_NAME, type_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't commit datatype '%s'\n", ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_DTYPE_NAME); + goto error; + } + + if (H5Tclose(type_id) < 0) + TEST_ERROR; + + if ((type_id = H5Topen2(group_id, ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_DTYPE_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open committed datatype '%s'\n", + ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_DTYPE_NAME); + goto error; + } + + if ((space_id = + generate_random_dataspace(ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_SPACE_RANK, NULL, NULL, TRUE)) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_ATTR_NAME, type_id, space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_ATTR_NAME); + goto error; + } + + /* Verify the attribute has been created */ + if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_ATTR_NAME)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if attribute '%s' exists\n", + ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_ATTR_NAME); + goto error; + } + + if (!attr_exists) { + H5_FAILED(); + HDprintf(" attribute did not exist\n"); + goto error; + } + + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + + if ((attr_id = H5Aopen(group_id, ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_ATTR_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open attribute '%s'\n", ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_ATTR_NAME); + goto error; + } + + if (H5Tclose(type_id) < 0) + TEST_ERROR; + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Tclose(type_id); + H5Sclose(space_id); + H5Aclose(attr_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a committed datatype can + * be deleted. + */ +static int +test_delete_committed_type(void) +{ + htri_t type_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t type_id = H5I_INVALID_HID; + + TESTING("committed datatype deletion"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, attribute, or stored datatype aren't supported " + "with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATATYPE_DELETE_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group '%s'\n", DATATYPE_DELETE_TEST_GROUP_NAME); + goto error; + } + + if ((type_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create datatype\n"); + goto error; + } + + if (H5Tcommit2(group_id, DATATYPE_DELETE_TEST_DTYPE_NAME, type_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't commit datatype '%s'\n", DATATYPE_DELETE_TEST_DTYPE_NAME); + goto error; + } + + if ((type_exists = H5Lexists(group_id, DATATYPE_DELETE_TEST_DTYPE_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if datatype '%s' exists\n", DATATYPE_DELETE_TEST_DTYPE_NAME); + goto error; + } + + if (!type_exists) { + H5_FAILED(); + HDprintf(" datatype didn't exist\n"); + goto error; + } + + if (H5Ldelete(group_id, DATATYPE_DELETE_TEST_DTYPE_NAME, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete datatype '%s'\n", DATATYPE_DELETE_TEST_DTYPE_NAME); + goto error; + } + + if ((type_exists = H5Lexists(group_id, DATATYPE_DELETE_TEST_DTYPE_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if datatype '%s' exists\n", DATATYPE_DELETE_TEST_DTYPE_NAME); + goto error; + } + + if (type_exists) { + H5_FAILED(); + HDprintf(" datatype exists\n"); + goto error; + } + + if (H5Tclose(type_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Tclose(type_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a committed datatype can still be opened when + * the link to the datatype is deleted and then a new one is created. + */ +static int +test_resurrect_datatype(void) +{ +#ifndef NO_ID_PREVENTS_OBJ_DELETE + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t type_id = H5I_INVALID_HID; +#endif /* NO_ID_PREVENTS_OBJ_DELETE */ + + TESTING("resurrecting datatype after deletion"); + +#ifndef NO_ID_PREVENTS_OBJ_DELETE + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, link, hard link, or stored datatype aren't " + "supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATATYPE_RESURRECT_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", DATATYPE_RESURRECT_TEST_GROUP_NAME); + goto error; + } + + /* Create a named datatype in the file */ + if ((type_id = H5Tcopy(H5T_NATIVE_INT)) < 0) { + H5_FAILED(); + HDprintf(" failed to copy predefined integer type\n"); + goto error; + } + + if (H5Tcommit2(group_id, DATATYPE_RESURRECT_TEST_DTYPE_NAME, type_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to commit datatype\n"); + goto error; + } + + /* Unlink the datatype while it's open (will mark it for deletion when closed) */ + if (H5Ldelete(group_id, DATATYPE_RESURRECT_TEST_DTYPE_NAME, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to delete datatype\n"); + goto error; + } +#ifndef NO_OBJECT_GET_NAME + /* Check that datatype name is NULL */ + if (H5Iget_name(type_id, NULL, (size_t)0) != 0) { + H5_FAILED(); + HDprintf(" deleted datatype name was not NULL!\n"); + goto error; + } +#endif + + /* Re-link the datatype to the group hierarchy (shouldn't get deleted now) */ + if (H5Lcreate_hard(type_id, ".", group_id, DATATYPE_RESURRECT_TEST_DTYPE_NAME2, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create new link for deleted datatype\n"); + goto error; + } + + /* Close things */ + if (H5Tclose(type_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + /* Re-open the file */ + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gopen2(container_group, DATATYPE_RESURRECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container sub-group '%s'\n", DATATYPE_RESURRECT_TEST_GROUP_NAME); + goto error; + } + + /* Attempt to open the datatype under the new name */ + if ((type_id = H5Topen2(group_id, DATATYPE_RESURRECT_TEST_DTYPE_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to open resurrected datatype\n"); + goto error; + } + + if (H5Tclose(type_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); +#else /* NO_ID_PREVENTS_OBJ_DELETE */ + SKIPPED(); +#endif /* NO_ID_PREVENTS_OBJ_DELETE */ + + return 0; + +#ifndef NO_ID_PREVENTS_OBJ_DELETE +error: + H5E_BEGIN_TRY + { + H5Tclose(type_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +#endif /* NO_ID_PREVENTS_OBJ_DELETE */ +} + +static int +test_flush_committed_datatype(void) +{ + TESTING("H5Tflush"); + + SKIPPED(); + + return 0; +} + +static int +test_flush_committed_datatype_invalid_params(void) +{ + TESTING("H5Tflush with invalid parameters"); + + SKIPPED(); + + return 0; +} + +static int +test_refresh_committed_datatype(void) +{ + TESTING("H5Trefresh"); + + SKIPPED(); + + return 0; +} + +static int +test_refresh_committed_datatype_invalid_params(void) +{ + TESTING("H5Trefresh with invalid parameters"); + + SKIPPED(); + + return 0; +} + +/* + * A test to check that predefined HDF5 datatypes can't be directly committed. + * An application should first copy the type with H5Tcopy and then commit the + * copied datatype. + */ +#ifndef PROBLEMATIC_TESTS +static int +test_cant_commit_predefined(void) +{ + herr_t err_ret; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + + TESTING("inability to commit predefined types directly"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, or stored datatype aren't supported with this " + "connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, PREDEFINED_TYPE_COMMIT_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", PREDEFINED_TYPE_COMMIT_TEST_GROUP_NAME); + goto error; + } + + H5E_BEGIN_TRY + { + err_ret = H5Tcommit2(group_id, "committed_predefined_type", H5T_NATIVE_INT, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" committed a predefined datatype directly (without copying it)!\n"); + goto error; + } + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} +#endif + +/* + * A test to check that a datatype cannot be modified once it has been committed. + */ +static int +test_cant_modify_committed_type(void) +{ + htri_t is_committed = FALSE; + herr_t err_ret; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t type_id = H5I_INVALID_HID; + + TESTING("inability to modify a committed datatype"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, or stored datatype aren't supported with this " + "connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, MODIFY_COMMITTED_TYPE_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", MODIFY_COMMITTED_TYPE_TEST_GROUP_NAME); + goto error; + } + + /* Copy a predefined datatype and commit the copy */ + if ((type_id = H5Tcopy(H5T_NATIVE_INT)) < 0) { + H5_FAILED(); + HDprintf(" failed to copy predefined integer datatype\n"); + goto error; + } + + if (H5Tcommit2(group_id, "native_int", type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to commit datatype\n"); + goto error; + } + + if ((is_committed = H5Tcommitted(type_id)) < 0) { + H5_FAILED(); + HDprintf(" failed to determine if datatype is committed\n"); + goto error; + } + + if (!is_committed) { + H5_FAILED(); + HDprintf(" H5Tcommitted() returned false!\n"); + goto error; + } + + /* We should not be able to modify a type after it has been committed. */ + H5E_BEGIN_TRY + { + err_ret = H5Tset_precision(type_id, (size_t)256); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" modified committed datatype!\n"); + goto error; + } + + if (H5Tclose(type_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Tclose(type_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +int +H5_api_datatype_test(void) +{ + size_t i; + int nerrors; + + HDprintf("**********************************************\n"); + HDprintf("* *\n"); + HDprintf("* API Datatype Tests *\n"); + HDprintf("* *\n"); + HDprintf("**********************************************\n\n"); + + for (i = 0, nerrors = 0; i < ARRAY_LENGTH(datatype_tests); i++) { + nerrors += (*datatype_tests[i])() ? 1 : 0; + } + + HDprintf("\n"); + + return nerrors; +} diff --git a/test/API/H5_api_datatype_test.h b/test/API/H5_api_datatype_test.h new file mode 100644 index 00000000000..753f9b26eed --- /dev/null +++ b/test/API/H5_api_datatype_test.h @@ -0,0 +1,79 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#ifndef H5_API_DATATYPE_TEST_H +#define H5_API_DATATYPE_TEST_H + +#include "H5_api_test.h" + +int H5_api_datatype_test(void); + +/************************************************* + * * + * API Datatype test defines * + * * + *************************************************/ + +#define DATATYPE_CREATE_TEST_DATASET_DIMS 2 +#define DATATYPE_CREATE_TEST_GROUP_NAME "committed_datatype_creation_test" +#define DATATYPE_CREATE_TEST_TYPE_NAME "test_type" + +#define DATATYPE_CREATE_INVALID_PARAMS_TEST_SPACE_RANK 2 +#define DATATYPE_CREATE_INVALID_PARAMS_TEST_GROUP_NAME "committed_datatype_creation_invalid_params_test" +#define DATATYPE_CREATE_INVALID_PARAMS_TEST_TYPE_NAME "committed_datatype_creation_invalid_params_datatype" + +#define DATATYPE_CREATE_ANONYMOUS_GROUP_NAME "anonymous_type_creation_test" +#define DATATYPE_CREATE_ANONYMOUS_TYPE_NAME "anon_type" + +#define DATATYPE_CREATE_ANONYMOUS_INVALID_PARAMS_GROUP_NAME "anonymous_type_creation_invalid_params_test" + +#define DATATYPE_CREATE_EMPTY_TYPES_TEST_CMPD_TYPE_NAME "compound_type" +#define DATATYPE_CREATE_EMPTY_TYPES_TEST_ENUM_TYPE_NAME "enum_type" +#define DATATYPE_CREATE_EMPTY_TYPES_TEST_GROUP_NAME "committed_datatype_empty_types_test" + +#define RECOMMIT_COMMITTED_TYPE_TEST_GROUP_NAME "recommit_committed_type_test" + +#define DATATYPE_OPEN_TEST_GROUP_NAME "datatype_open_test" +#define DATATYPE_OPEN_TEST_TYPE_NAME "open_test_datatype" + +#define DATATYPE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME "datatype_open_invalid_params_test" +#define DATATYPE_OPEN_INVALID_PARAMS_TEST_TYPE_NAME "open_invalid_params_test_datatype" + +#define DATATYPE_REOPEN_TEST_SPACE_RANK 2 +#define DATATYPE_REOPEN_TEST_GROUP_NAME "datatype_reopen_test" + +#define DATASET_CREATE_WITH_DATATYPE_TEST_DATASET_DIMS 2 +#define DATASET_CREATE_WITH_DATATYPE_TEST_GROUP_NAME "dataset_create_with_committed_type_test" +#define DATASET_CREATE_WITH_DATATYPE_TEST_TYPE_NAME "committed_type_test_dtype1" +#define DATASET_CREATE_WITH_DATATYPE_TEST_DSET_NAME "committed_type_test_dset" + +#define ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_SPACE_RANK 2 +#define ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_GROUP_NAME "attribute_create_with_committed_type_test" +#define ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_DTYPE_NAME "committed_type_test_dtype2" +#define ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_ATTR_NAME "committed_type_test_attr" + +#define DATATYPE_DELETE_TEST_GROUP_NAME "datatype_deletion_test" +#define DATATYPE_DELETE_TEST_DTYPE_NAME "delete_test_dtype" + +#define DATATYPE_RESURRECT_TEST_GROUP_NAME "datatype_resurrection_test" +#define DATATYPE_RESURRECT_TEST_DTYPE_NAME "delete_test_dtype" +#define DATATYPE_RESURRECT_TEST_DTYPE_NAME2 "resurrected_dtype" + +#define DATATYPE_PROPERTY_LIST_TEST_SUBGROUP_NAME "datatype_property_list_test_group" +#define DATATYPE_PROPERTY_LIST_TEST_DATATYPE_NAME1 "property_list_test_datatype1" +#define DATATYPE_PROPERTY_LIST_TEST_DATATYPE_NAME2 "property_list_test_datatype2" + +#define PREDEFINED_TYPE_COMMIT_TEST_GROUP_NAME "predefined_type_commit_test" + +#define MODIFY_COMMITTED_TYPE_TEST_GROUP_NAME "modify_committed_type_test" + +#endif diff --git a/test/API/H5_api_file_test.c b/test/API/H5_api_file_test.c new file mode 100644 index 00000000000..279e9e79ecb --- /dev/null +++ b/test/API/H5_api_file_test.c @@ -0,0 +1,2564 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#include "H5_api_file_test.h" + +static int test_create_file(void); +static int test_create_file_invalid_params(void); +static int test_create_file_excl(void); +static int test_open_file(void); +static int test_open_file_invalid_params(void); +static int test_open_nonexistent_file(void); +static int test_file_open_overlap(void); +static int test_file_permission(void); +static int test_reopen_file(void); +static int test_close_file_invalid_id(void); +static int test_flush_file(void); +static int test_file_is_accessible(void); +static int test_file_property_lists(void); +static int test_get_file_intent(void); +static int test_get_file_obj_count(void); +static int test_file_mounts(void); +static int test_get_file_name(void); + +/* + * The array of file tests to be performed. + */ +static int (*file_tests[])(void) = { + test_create_file, + test_create_file_invalid_params, + test_create_file_excl, + test_open_file, + test_open_file_invalid_params, + test_open_nonexistent_file, + test_file_open_overlap, + test_file_permission, + test_reopen_file, + test_close_file_invalid_id, + test_flush_file, + test_file_is_accessible, + test_file_property_lists, + test_get_file_intent, + test_get_file_obj_count, + test_file_mounts, + test_get_file_name, +}; + +/* + * Tests that a file can be created. + */ +static int +test_create_file(void) +{ + hid_t file_id = H5I_INVALID_HID; + char *prefixed_filename = NULL; + + TESTING("H5Fcreate"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file aren't supported with this connector\n"); + return 0; + } + + if (prefix_filename(test_path_prefix, FILE_CREATE_TEST_FILENAME, &prefixed_filename) < 0) { + H5_FAILED(); + HDprintf(" couldn't prefix filename\n"); + goto error; + } + + if ((file_id = H5Fcreate(prefixed_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s'\n", prefixed_filename); + goto error; + } + + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + HDfree(prefixed_filename); + prefixed_filename = NULL; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Fclose(file_id); + } + H5E_END_TRY; + + HDfree(prefixed_filename); + + return 1; +} + +/* + * Tests that a file can't be created when H5Fcreate is passed + * invalid parameters. + */ +static int +test_create_file_invalid_params(void) +{ + hid_t file_id = H5I_INVALID_HID; + char *prefixed_filename = NULL; + + TESTING_MULTIPART("H5Fcreate with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file aren't supported with this connector\n"); + return 0; + } + + if (prefix_filename(test_path_prefix, FILE_CREATE_INVALID_PARAMS_FILE_NAME, &prefixed_filename) < 0) { + H5_FAILED(); + HDprintf(" couldn't prefix filename\n"); + goto error; + } + + BEGIN_MULTIPART + { + PART_BEGIN(H5Fcreate_invalid_name) + { + TESTING_2("H5Fcreate with invalid file name"); + + H5E_BEGIN_TRY + { + file_id = H5Fcreate(NULL, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (file_id >= 0) { + H5_FAILED(); + HDprintf(" file was created with a NULL name!\n"); + H5Fclose(file_id); + PART_ERROR(H5Fcreate_invalid_name); + } + + H5E_BEGIN_TRY + { + file_id = H5Fcreate("", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (file_id >= 0) { + H5_FAILED(); + HDprintf(" file was created with an invalid name of ''!\n"); + H5Fclose(file_id); + PART_ERROR(H5Fcreate_invalid_name); + } + + PASSED(); + } + PART_END(H5Fcreate_invalid_name); + + PART_BEGIN(H5Fcreate_invalid_flags) + { + TESTING_2("H5Fcreate with invalid flags"); + + H5E_BEGIN_TRY + { + file_id = H5Fcreate(prefixed_filename, H5F_ACC_RDWR, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (file_id >= 0) { + H5_FAILED(); + HDprintf(" file was created with invalid flag H5F_ACC_RDWR!\n"); + H5Fclose(file_id); + PART_ERROR(H5Fcreate_invalid_flags); + } + + H5E_BEGIN_TRY + { + file_id = H5Fcreate(prefixed_filename, H5F_ACC_CREAT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (file_id >= 0) { + H5_FAILED(); + HDprintf(" file was created with invalid flag H5F_ACC_CREAT!\n"); + H5Fclose(file_id); + PART_ERROR(H5Fcreate_invalid_flags); + } + + H5E_BEGIN_TRY + { + file_id = H5Fcreate(prefixed_filename, H5F_ACC_SWMR_READ, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (file_id >= 0) { + H5_FAILED(); + HDprintf(" file was created with invalid flag H5F_ACC_SWMR_READ!\n"); + H5Fclose(file_id); + PART_ERROR(H5Fcreate_invalid_flags); + } + + PASSED(); + } + PART_END(H5Fcreate_invalid_flags); + + PART_BEGIN(H5Fcreate_invalid_fcpl) + { + TESTING_2("H5Fcreate with invalid FCPL"); + + H5E_BEGIN_TRY + { + file_id = H5Fcreate(prefixed_filename, H5F_ACC_TRUNC, H5I_INVALID_HID, H5P_DEFAULT); + } + H5E_END_TRY; + + if (file_id >= 0) { + H5_FAILED(); + HDprintf(" file was created with invalid FCPL!\n"); + H5Fclose(file_id); + PART_ERROR(H5Fcreate_invalid_fcpl); + } + + PASSED(); + } + PART_END(H5Fcreate_invalid_fcpl); + } + END_MULTIPART; + + HDfree(prefixed_filename); + prefixed_filename = NULL; + + return 0; + +error: + H5E_BEGIN_TRY + { + /* Attempt to remove the file if it ended up being created. */ + H5Fdelete(prefixed_filename, H5P_DEFAULT); + + H5Fclose(file_id); + } + H5E_END_TRY; + + HDfree(prefixed_filename); + + return 1; +} + +/* + * Tests that file creation will fail when a file is created + * using the H5F_ACC_EXCL flag while the file already exists. + */ +static int +test_create_file_excl(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t file_id2 = H5I_INVALID_HID; + char *prefixed_filename = NULL; + + TESTING("H5Fcreate with H5F_ACC_EXCL/H5F_ACC_TRUNC flag"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file aren't supported with this connector\n"); + return 0; + } + + if (prefix_filename(test_path_prefix, FILE_CREATE_EXCL_FILE_NAME, &prefixed_filename) < 0) { + H5_FAILED(); + HDprintf(" couldn't prefix filename\n"); + goto error; + } + + if ((file_id = H5Fcreate(prefixed_filename, H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create first file\n"); + goto error; + } + + /* Close the file */ + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + /* Try again with H5F_ACC_EXCL. This should fail because the file already + * exists on disk from the previous steps. + */ + H5E_BEGIN_TRY + { + file_id = H5Fcreate(prefixed_filename, H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (file_id >= 0) { + H5_FAILED(); + HDprintf(" created already existing file using H5F_ACC_EXCL flag!\n"); + goto error; + } + + /* Test creating with H5F_ACC_TRUNC. This will truncate the existing file on disk. */ + if ((file_id = H5Fcreate(prefixed_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't truncate the existing file\n"); + goto error; + } + + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + HDfree(prefixed_filename); + prefixed_filename = NULL; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Fclose(file_id); + H5Fclose(file_id2); + } + H5E_END_TRY; + + HDfree(prefixed_filename); + + return 1; +} + +/* + * Tests that a file can be opened. + */ +static int +test_open_file(void) +{ + hid_t file_id = H5I_INVALID_HID; + + TESTING_MULTIPART("H5Fopen"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file aren't supported with this connector\n"); + return 0; + } + + BEGIN_MULTIPART + { + PART_BEGIN(H5Fopen_rdonly) + { + TESTING_2("H5Fopen in read-only mode"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" unable to open file '%s' in read-only mode\n", H5_api_test_filename); + PART_ERROR(H5Fopen_rdonly); + } + + PASSED(); + } + PART_END(H5Fopen_rdonly); + + if (file_id >= 0) { + H5E_BEGIN_TRY + { + H5Fclose(file_id); + } + H5E_END_TRY; + file_id = H5I_INVALID_HID; + } + + PART_BEGIN(H5Fopen_rdwrite) + { + TESTING_2("H5Fopen in read-write mode"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" unable to open file '%s' in read-write mode\n", H5_api_test_filename); + PART_ERROR(H5Fopen_rdwrite); + } + + PASSED(); + } + PART_END(H5Fopen_rdwrite); + + if (file_id >= 0) { + H5E_BEGIN_TRY + { + H5Fclose(file_id); + } + H5E_END_TRY; + file_id = H5I_INVALID_HID; + } + + /* + * XXX: SWMR open flags + */ + } + END_MULTIPART; + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * Tests that a file can't be opened when H5Fopen is given + * invalid parameters. + */ +static int +test_open_file_invalid_params(void) +{ + hid_t file_id = H5I_INVALID_HID; + + TESTING_MULTIPART("H5Fopen with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file aren't supported with this connector\n"); + return 0; + } + + BEGIN_MULTIPART + { + PART_BEGIN(H5Fopen_invalid_name) + { + TESTING_2("H5Fopen with invalid file name"); + + H5E_BEGIN_TRY + { + file_id = H5Fopen(NULL, H5F_ACC_RDWR, H5P_DEFAULT); + } + H5E_END_TRY; + + if (file_id >= 0) { + H5_FAILED(); + HDprintf(" file was opened with a NULL name!\n"); + H5Fclose(file_id); + PART_ERROR(H5Fopen_invalid_name); + } + + H5E_BEGIN_TRY + { + file_id = H5Fopen("", H5F_ACC_RDWR, H5P_DEFAULT); + } + H5E_END_TRY; + + if (file_id >= 0) { + H5_FAILED(); + HDprintf(" file was opened with an invalid name of ''!\n"); + H5Fclose(file_id); + PART_ERROR(H5Fopen_invalid_name); + } + + PASSED(); + } + PART_END(H5Fopen_invalid_name); + + PART_BEGIN(H5Fopen_invalid_flags) + { + TESTING_2("H5Fopen with invalid flags"); + + H5E_BEGIN_TRY + { + file_id = H5Fopen(H5_api_test_filename, H5F_ACC_TRUNC, H5P_DEFAULT); + } + H5E_END_TRY; + + if (file_id >= 0) { + H5_FAILED(); + HDprintf(" file was opened with invalid flag H5F_ACC_TRUNC!\n"); + H5Fclose(file_id); + PART_ERROR(H5Fopen_invalid_flags); + } + + H5E_BEGIN_TRY + { + file_id = H5Fopen(H5_api_test_filename, H5F_ACC_EXCL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (file_id >= 0) { + H5_FAILED(); + HDprintf(" file was opened with invalid flag H5F_ACC_EXCL!\n"); + H5Fclose(file_id); + PART_ERROR(H5Fopen_invalid_flags); + } + + PASSED(); + } + PART_END(H5Fopen_invalid_flags); + } + END_MULTIPART; + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to ensure that opening a file which doesn't exist will fail. + */ +static int +test_open_nonexistent_file(void) +{ + hid_t file_id = H5I_INVALID_HID; + char *prefixed_filename = NULL; + + TESTING("for invalid opening of a non-existent file"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file aren't supported with this connector\n"); + return 0; + } + + if (prefix_filename(test_path_prefix, NONEXISTENT_FILENAME, &prefixed_filename) < 0) { + H5_FAILED(); + HDprintf(" couldn't prefix filename\n"); + goto error; + } + + /* XXX: Make sure to first delete the file so we know for sure it doesn't exist */ + + H5E_BEGIN_TRY + { + file_id = H5Fopen(prefixed_filename, H5F_ACC_RDWR, H5P_DEFAULT); + } + H5E_END_TRY; + + if (file_id >= 0) { + H5_FAILED(); + HDprintf(" non-existent file was opened!\n"); + goto error; + } + + HDfree(prefixed_filename); + prefixed_filename = NULL; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Fclose(file_id); + } + H5E_END_TRY; + + HDfree(prefixed_filename); + + return 1; +} + +/* + * Tests that a file can be opened read-only or read-write + * and things are handled appropriately. + */ +static int +test_file_permission(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dspace_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t dtype_id = H5I_INVALID_HID; + char *prefixed_filename = NULL; + herr_t h5_ret = FAIL; + + TESTING_MULTIPART("file permissions (invalid creation of objects in read-only file)"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, attribute, stored datatype aren't " + "supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if (prefix_filename(test_path_prefix, FILE_PERMISSION_TEST_FILENAME, &prefixed_filename) < 0) { + H5_FAILED(); + HDprintf(" couldn't prefix filename\n"); + goto error; + } + + if ((file_id = H5Fcreate(prefixed_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s'\n", prefixed_filename); + goto error; + } + + if ((dspace_id = H5Screate(H5S_SCALAR)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create data space\n"); + goto error; + } + + if ((dset_id = H5Dcreate2(file_id, FILE_PERMISSION_TEST_DSET_NAME, H5T_STD_U32LE, dspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create data set: %s\n", FILE_PERMISSION_TEST_DSET_NAME); + goto error; + } + + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + /* Open the file (with read-only permission) */ + if ((file_id = H5Fopen(prefixed_filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Gcreate_rdonly_file) + { + TESTING_2("invalid creation of group in read-only file"); + + /* Create a group with the read-only file handle (should fail) */ + H5E_BEGIN_TRY + { + group_id = + H5Gcreate2(file_id, FILE_PERMISSION_TEST_GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (group_id >= 0) { + H5_FAILED(); + HDprintf(" a group was created in a read-only file!\n"); + PART_ERROR(H5Gcreate_rdonly_file); + } + + H5E_BEGIN_TRY + { + group_id = H5Gcreate_anon(file_id, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (group_id >= 0) { + H5_FAILED(); + HDprintf(" a group was created in a read-only file!\n"); + PART_ERROR(H5Gcreate_rdonly_file); + } + + PASSED(); + } + PART_END(H5Gcreate_rdonly_file); + + PART_BEGIN(H5Dcreate_rdonly_file) + { + TESTING_2("invalid creation of dataset in read-only file"); + + /* Create a dataset with the read-only file handle (should fail) */ + H5E_BEGIN_TRY + { + dset_id = H5Dcreate2(file_id, FILE_PERMISSION_TEST_DSET2_NAME, H5T_STD_U32LE, dspace_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (dset_id >= 0) { + H5_FAILED(); + HDprintf(" a dataset was created in a read-only file!\n"); + PART_ERROR(H5Dcreate_rdonly_file); + } + + H5E_BEGIN_TRY + { + dset_id = H5Dcreate_anon(file_id, H5T_STD_U32LE, dspace_id, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (dset_id >= 0) { + H5_FAILED(); + HDprintf(" a dataset was created in a read-only file!\n"); + PART_ERROR(H5Dcreate_rdonly_file); + } + + PASSED(); + } + PART_END(H5Dcreate_rdonly_file); + + PART_BEGIN(H5Acreate_rdonly_file) + { + TESTING_2("invalid creation of attribute in read-only file"); + + /* Create an attribute with the read-only file handle (should fail) */ + H5E_BEGIN_TRY + { + attr_id = H5Acreate2(file_id, FILE_PERMISSION_TEST_ATTR_NAME, H5T_NATIVE_INT, dspace_id, + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (attr_id >= 0) { + H5_FAILED(); + HDprintf(" an attribute was created in a read-only file!\n"); + PART_ERROR(H5Acreate_rdonly_file); + } + + PASSED(); + } + PART_END(H5Acreate_rdonly_file); + + PART_BEGIN(H5Tcommit_rdonly_file) + { + TESTING_2("invalid creation of committed datatype in read-only file"); + + if ((dtype_id = H5Tcopy(H5T_NATIVE_INT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't copy a native datatype\n"); + PART_ERROR(H5Tcommit_rdonly_file); + } + + /* Commit a datatype with the read-only file handle (should fail) */ + H5E_BEGIN_TRY + { + h5_ret = H5Tcommit2(file_id, FILE_PERMISSION_TEST_NAMED_DTYPE, dtype_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (h5_ret >= 0) { + H5_FAILED(); + HDprintf(" a named datatype was committed in a read-only file!\n"); + PART_ERROR(H5Tcommit_rdonly_file); + } + + H5E_BEGIN_TRY + { + h5_ret = H5Tcommit_anon(file_id, dtype_id, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (h5_ret >= 0) { + H5_FAILED(); + HDprintf(" a named datatype was committed in a read-only file!\n"); + PART_ERROR(H5Tcommit_rdonly_file); + } + + PASSED(); + } + PART_END(H5Tcommit_rdonly_file); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Tclose(dtype_id) < 0) + TEST_ERROR; + if (H5Sclose(dspace_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + HDfree(prefixed_filename); + prefixed_filename = NULL; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(dspace_id); + H5Dclose(dset_id); + H5Aclose(attr_id); + H5Tclose(dtype_id); + H5Gclose(group_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + HDfree(prefixed_filename); + + return 1; +} + +/* + * A test to check that a file can be re-opened with H5Freopen. + */ +static int +test_reopen_file(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t file_id2 = H5I_INVALID_HID; + + TESTING("re-open of a file with H5Freopen"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file\n"); + goto error; + } + + if ((file_id2 = H5Freopen(file_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't re-open file\n"); + goto error; + } + + if (H5Fclose(file_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id2) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Fclose(file_id); + H5Fclose(file_id2); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that H5Fclose doesn't succeed for an + * invalid file ID */ +static int +test_close_file_invalid_id(void) +{ + herr_t err_ret = -1; + + TESTING("H5Fclose with an invalid ID"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file aren't supported with this connector\n"); + return 0; + } + + H5E_BEGIN_TRY + { + err_ret = H5Fclose(H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" closed an invalid file ID!\n"); + goto error; + } + + PASSED(); + + return 0; + +error: + return 1; +} + +/* + * A test to check that a file can be flushed using H5Fflush. + */ +static int +test_flush_file(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t dspace_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + char *prefixed_filename = NULL; + char dset_name[32]; + unsigned u; + + TESTING_MULTIPART("H5Fflush"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { + SKIPPED(); + HDprintf(" API functions for basic file, dataset, or file flush aren't supported with this " + "connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if (prefix_filename(test_path_prefix, FILE_FLUSH_TEST_FILENAME, &prefixed_filename) < 0) { + H5_FAILED(); + HDprintf(" couldn't prefix filename\n"); + goto error; + } + + if ((file_id = H5Fcreate(prefixed_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s'\n", prefixed_filename); + goto error; + } + + /* Create multiple small datasets in file */ + if ((dspace_id = H5Screate(H5S_SCALAR)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create data space\n"); + goto error; + } + + for (u = 0; u < 10; u++) { + HDsprintf(dset_name, "Dataset %u", u); + + if ((dset_id = H5Dcreate2(file_id, dset_name, H5T_STD_U32LE, dspace_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create data set: %s\n", dset_name); + goto error; + } + + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Fflush_local) + { + TESTING_2("file flushing at local scope"); + + if (H5Fflush(file_id, H5F_SCOPE_LOCAL) < 0) { + H5_FAILED(); + HDprintf(" unable to flush file with scope H5F_SCOPE_LOCAL\n"); + PART_ERROR(H5Fflush_local); + } + + PASSED(); + } + PART_END(H5Fflush_local); + + PART_BEGIN(H5Fflush_global) + { + TESTING_2("file flushing at global scope"); + + if (H5Fflush(file_id, H5F_SCOPE_GLOBAL) < 0) { + H5_FAILED(); + HDprintf(" unable to flush file with scope H5F_SCOPE_GLOBAL\n"); + PART_ERROR(H5Fflush_global); + } + + PASSED(); + } + PART_END(H5Fflush_global); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(dspace_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + HDfree(prefixed_filename); + prefixed_filename = NULL; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(dspace_id); + H5Dclose(dset_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + HDfree(prefixed_filename); + + return 1; +} + +/* + * A test for H5Fis_accessible. + */ +static int +test_file_is_accessible(void) +{ + const char *const fake_filename = "nonexistent_file.h5"; + char *prefixed_filename = NULL; + htri_t is_accessible = FAIL; + + TESTING_MULTIPART("H5Fis_accessible"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file aren't supported with this connector\n"); + return 0; + } + + if (prefix_filename(test_path_prefix, fake_filename, &prefixed_filename) < 0) { + H5_FAILED(); + HDprintf(" couldn't prefix filename\n"); + goto error; + } + + BEGIN_MULTIPART + { + PART_BEGIN(H5Fis_accessible_valid_file) + { + TESTING_2("H5Fis_accessible on existing file"); + + if ((is_accessible = H5Fis_accessible(H5_api_test_filename, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if file '%s' is accessible with default FAPL\n", + H5_api_test_filename); + PART_ERROR(H5Fis_accessible_valid_file); + } + + if (!is_accessible) { + H5_FAILED(); + HDprintf(" file '%s' is not accessible with default FAPL\n", H5_api_test_filename); + PART_ERROR(H5Fis_accessible_valid_file); + } + + PASSED(); + } + PART_END(H5Fis_accessible_valid_file); + + is_accessible = -1; + + PART_BEGIN(H5Fis_accessible_invalid_file) + { + TESTING_2("H5Fis_accessible on non-existing file"); + + H5E_BEGIN_TRY + { + is_accessible = H5Fis_accessible(prefixed_filename, H5P_DEFAULT); + } + H5E_END_TRY; + + if (is_accessible > 0) { + H5_FAILED(); + HDprintf(" non-existent file '%s' was accessible with default FAPL: is_accessible=%d!\n", + prefixed_filename, is_accessible); + PART_ERROR(H5Fis_accessible_invalid_file); + } + + PASSED(); + } + PART_END(H5Fis_accessible_invalid_file); + } + END_MULTIPART; + + HDfree(prefixed_filename); + prefixed_filename = NULL; + + return 0; + +error: + HDfree(prefixed_filename); + + return 1; +} + +/* + * A test to check that a FCPL used for file creation can + * be persisted and that a valid copy of that FCPL can be + * retrieved later with a call to H5Fget_create_plist. Also + * tests that a valid copy of a FAPL used for file access + * can be retrieved with a call to H5Fget_access_plist. + */ +static int +test_file_property_lists(void) +{ + hsize_t prop_val = 0; + hid_t file_id1 = H5I_INVALID_HID; + hid_t file_id2 = H5I_INVALID_HID; + hid_t fcpl_id1 = H5I_INVALID_HID; + hid_t fcpl_id2 = H5I_INVALID_HID; + hid_t fapl_id1 = H5I_INVALID_HID; + hid_t fapl_id2 = H5I_INVALID_HID; + char *prefixed_filename1 = NULL; + char *prefixed_filename2 = NULL; + + TESTING_MULTIPART("file property list operations"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) { + SKIPPED(); + HDprintf(" API functions for basic or more file or get property list aren't supported with this " + "connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if (prefix_filename(test_path_prefix, FILE_PROPERTY_LIST_TEST_FNAME1, &prefixed_filename1) < 0) { + H5_FAILED(); + HDprintf(" couldn't prefix filename\n"); + goto error; + } + if (prefix_filename(test_path_prefix, FILE_PROPERTY_LIST_TEST_FNAME2, &prefixed_filename2) < 0) { + H5_FAILED(); + HDprintf(" couldn't prefix filename\n"); + goto error; + } + + if ((fcpl_id1 = H5Pcreate(H5P_FILE_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create FCPL\n"); + goto error; + } + + if (H5Pset_userblock(fcpl_id1, FILE_PROPERTY_LIST_TEST_FCPL_PROP_VAL) < 0) { + H5_FAILED(); + HDprintf(" failed to set test property on FCPL\n"); + goto error; + } + + if ((file_id1 = H5Fcreate(prefixed_filename1, H5F_ACC_TRUNC, fcpl_id1, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file\n"); + goto error; + } + + if ((file_id2 = H5Fcreate(prefixed_filename2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file\n"); + goto error; + } + + if (H5Pclose(fcpl_id1) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Fget_create_plist) + { + TESTING_2("H5Fget_create_plist"); + + /* Try to receive copies of the two property lists, one which has the property set and one which + * does not */ + if ((fcpl_id1 = H5Fget_create_plist(file_id1)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get FCPL\n"); + PART_ERROR(H5Fget_create_plist); + } + + if ((fcpl_id2 = H5Fget_create_plist(file_id2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get FCPL\n"); + PART_ERROR(H5Fget_create_plist); + } + + /* Ensure that property list 1 has the property set and property list 2 does not */ + if (H5Pget_userblock(fcpl_id1, &prop_val) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve test property from FCPL\n"); + PART_ERROR(H5Fget_create_plist); + } + + if (prop_val != FILE_PROPERTY_LIST_TEST_FCPL_PROP_VAL) { + H5_FAILED(); + HDprintf(" retrieved test property value '%llu' did not match expected value '%llu'\n", + (long long unsigned)prop_val, + (long long unsigned)FILE_PROPERTY_LIST_TEST_FCPL_PROP_VAL); + PART_ERROR(H5Fget_create_plist); + } + + if (H5Pget_userblock(fcpl_id2, &prop_val) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve test property from FCPL\n"); + PART_ERROR(H5Fget_create_plist); + } + + if (prop_val == FILE_PROPERTY_LIST_TEST_FCPL_PROP_VAL) { + HDprintf(" retrieved test property value '%llu' matched control value '%llu' when it " + "shouldn't have\n", + (long long unsigned)prop_val, + (long long unsigned)FILE_PROPERTY_LIST_TEST_FCPL_PROP_VAL); + PART_ERROR(H5Fget_create_plist); + } + + PASSED(); + } + PART_END(H5Fget_create_plist); + + PART_BEGIN(H5Fget_access_plist) + { + TESTING_2("H5Fget_access_plist"); + + /* Due to the nature of needing to supply a FAPL with the VOL connector having been set on it to + * the H5Fcreate() call, we cannot exactly test using H5P_DEFAULT as the FAPL for one of the + * create calls in this test. However, the use of H5Fget_access_plist() will still be used to + * check that the FAPL is correct after both creating and opening a file. + */ + if ((fapl_id1 = H5Fget_access_plist(file_id1)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get FAPL\n"); + PART_ERROR(H5Fget_access_plist); + } + + if ((fapl_id2 = H5Fget_access_plist(file_id2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get FAPL\n"); + PART_ERROR(H5Fget_access_plist); + } + + PASSED(); + } + PART_END(H5Fget_access_plist); + + /* Now see if we can still retrieve copies of the property lists upon opening + * (instead of creating) a file. If they were reconstructed properly upon file + * open, the creation property lists should also have the same test values + * as set before. + */ + if (fcpl_id1 >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(fcpl_id1); + } + H5E_END_TRY; + fcpl_id1 = H5I_INVALID_HID; + } + if (fcpl_id2 >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(fcpl_id2); + } + H5E_END_TRY; + fcpl_id2 = H5I_INVALID_HID; + } + if (fapl_id1 >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(fapl_id1); + } + H5E_END_TRY; + fapl_id1 = H5I_INVALID_HID; + } + if (fapl_id2 >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(fapl_id2); + } + H5E_END_TRY; + fapl_id2 = H5I_INVALID_HID; + } + if (file_id1 >= 0) { + H5E_BEGIN_TRY + { + H5Fclose(file_id1); + } + H5E_END_TRY; + file_id1 = H5I_INVALID_HID; + } + if (file_id2 >= 0) { + H5E_BEGIN_TRY + { + H5Fclose(file_id2); + } + H5E_END_TRY; + file_id2 = H5I_INVALID_HID; + } + + PART_BEGIN(H5Fget_create_plist_reopened) + { + TESTING_2("H5Fget_create_plist after re-opening file"); + + if ((file_id1 = H5Fopen(prefixed_filename1, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file\n"); + PART_ERROR(H5Fget_create_plist_reopened); + } + + if ((file_id2 = H5Fopen(prefixed_filename2, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file\n"); + PART_ERROR(H5Fget_create_plist_reopened); + } + + if ((fcpl_id1 = H5Fget_create_plist(file_id1)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get FCPL\n"); + PART_ERROR(H5Fget_create_plist_reopened); + } + + if ((fcpl_id2 = H5Fget_create_plist(file_id2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get FCPL\n"); + PART_ERROR(H5Fget_create_plist_reopened); + } + + /* Check the values of the test property */ + if (H5Pget_userblock(fcpl_id1, &prop_val) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve test property from FCPL\n"); + PART_ERROR(H5Fget_create_plist_reopened); + } + + if (prop_val != FILE_PROPERTY_LIST_TEST_FCPL_PROP_VAL) { + H5_FAILED(); + HDprintf(" retrieved test property value '%llu' did not match expected value '%llu'\n", + (long long unsigned)prop_val, + (long long unsigned)FILE_PROPERTY_LIST_TEST_FCPL_PROP_VAL); + PART_ERROR(H5Fget_create_plist_reopened); + } + + if (H5Pget_userblock(fcpl_id2, &prop_val) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve test property from FCPL\n"); + PART_ERROR(H5Fget_create_plist_reopened); + } + + if (prop_val == FILE_PROPERTY_LIST_TEST_FCPL_PROP_VAL) { + HDprintf(" retrieved test property value '%llu' matched control value '%llu' when it " + "shouldn't have\n", + (long long unsigned)prop_val, + (long long unsigned)FILE_PROPERTY_LIST_TEST_FCPL_PROP_VAL); + PART_ERROR(H5Fget_create_plist_reopened); + } + + PASSED(); + } + PART_END(H5Fget_create_plist_reopened); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(fcpl_id1) < 0) + TEST_ERROR; + if (H5Pclose(fcpl_id2) < 0) + TEST_ERROR; + if (H5Fclose(file_id1) < 0) + TEST_ERROR; + if (H5Fclose(file_id2) < 0) + TEST_ERROR; + + HDfree(prefixed_filename1); + prefixed_filename1 = NULL; + HDfree(prefixed_filename2); + prefixed_filename2 = NULL; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(fcpl_id1); + H5Pclose(fcpl_id2); + H5Pclose(fapl_id1); + H5Pclose(fapl_id2); + H5Fclose(file_id1); + H5Fclose(file_id2); + } + H5E_END_TRY; + + HDfree(prefixed_filename1); + HDfree(prefixed_filename2); + + return 1; +} + +/* + * A test to check that the file intent flags can be retrieved. + */ +static int +test_get_file_intent(void) +{ + unsigned file_intent; + hid_t file_id = H5I_INVALID_HID; + char *prefixed_filename = NULL; + + TESTING_MULTIPART("retrieval of file intent with H5Fget_intent"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE)) { + SKIPPED(); + HDprintf(" API functions for basic or more file aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if (prefix_filename(test_path_prefix, FILE_INTENT_TEST_FILENAME, &prefixed_filename) < 0) { + H5_FAILED(); + HDprintf(" couldn't prefix filename\n"); + goto error; + } + + /* Test that file intent retrieval works correctly for file create */ + if ((file_id = H5Fcreate(prefixed_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s'\n", prefixed_filename); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Fget_intent_file_creation) + { + TESTING_2("H5Fget_intent on newly-created file"); + + if (H5Fget_intent(file_id, &file_intent) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve file intent\n"); + PART_ERROR(H5Fget_intent_file_creation); + } + + if (H5F_ACC_RDWR != file_intent) { + H5_FAILED(); + HDprintf(" received incorrect file intent for file creation\n"); + PART_ERROR(H5Fget_intent_file_creation); + } + + PASSED(); + } + PART_END(H5Fget_intent_file_creation); + + if (file_id >= 0) { + H5E_BEGIN_TRY + { + H5Fclose(file_id); + } + H5E_END_TRY; + file_id = H5I_INVALID_HID; + } + + PART_BEGIN(H5Fget_intent_rdonly_file_open) + { + TESTING_2("H5Fget_intent for file opened read-only"); + + /* Test that file intent retrieval works correctly for file open */ + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + PART_ERROR(H5Fget_intent_rdonly_file_open); + } + + if (H5Fget_intent(file_id, &file_intent) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve file intent\n"); + PART_ERROR(H5Fget_intent_rdonly_file_open); + } + + if (H5F_ACC_RDONLY != file_intent) { + H5_FAILED(); + HDprintf(" received incorrect file intent for read-only file open\n"); + PART_ERROR(H5Fget_intent_rdonly_file_open); + } + + PASSED(); + } + PART_END(H5Fget_intent_rdonly_file_open); + + if (file_id >= 0) { + H5E_BEGIN_TRY + { + H5Fclose(file_id); + } + H5E_END_TRY; + file_id = H5I_INVALID_HID; + } + + PART_BEGIN(H5Fget_intent_rdwrite_file_open) + { + TESTING_2("H5Fget_intent for file opened read-write"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + PART_ERROR(H5Fget_intent_rdwrite_file_open); + } + + if (H5Fget_intent(file_id, &file_intent) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve file intent\n"); + PART_ERROR(H5Fget_intent_rdwrite_file_open); + } + + if (H5F_ACC_RDWR != file_intent) { + H5_FAILED(); + HDprintf(" received incorrect file intent\n"); + PART_ERROR(H5Fget_intent_rdwrite_file_open); + } + + PASSED(); + } + PART_END(H5Fget_intent_rdwrite_file_open); + + if (file_id >= 0) { + H5E_BEGIN_TRY + { + H5Fclose(file_id); + } + H5E_END_TRY; + file_id = H5I_INVALID_HID; + } + } + END_MULTIPART; + + HDfree(prefixed_filename); + prefixed_filename = NULL; + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Fclose(file_id); + } + H5E_END_TRY; + + HDfree(prefixed_filename); + + return 1; +} + +/* + * A test to check that the number of open objects and IDs of objects in a file + * can be retrieved. + */ +static int +test_get_file_obj_count(void) +{ + ssize_t obj_count; + hid_t file_id = H5I_INVALID_HID; + hid_t file_id2 = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t object_id = H5I_INVALID_HID; + hid_t named_dtype_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t dspace_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + char *prefixed_filename1 = NULL; + char *prefixed_filename2 = NULL; + + TESTING_MULTIPART("retrieval of open object number and IDs"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic or more file, basic dataset, group, datatype, or attribute " + "aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if (prefix_filename(test_path_prefix, GET_OBJ_COUNT_TEST_FILENAME1, &prefixed_filename1) < 0) { + H5_FAILED(); + HDprintf(" couldn't prefix filename\n"); + goto error; + } + if (prefix_filename(test_path_prefix, GET_OBJ_COUNT_TEST_FILENAME2, &prefixed_filename2) < 0) { + H5_FAILED(); + HDprintf(" couldn't prefix filename\n"); + goto error; + } + + if ((file_id = H5Fcreate(prefixed_filename1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s'\n", prefixed_filename1); + goto error; + } + + if ((group_id = H5Gcreate2(file_id, GET_OBJ_COUNT_TEST_GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", GET_OBJ_COUNT_TEST_GRP_NAME); + goto error; + } + + /* Create a second file while keeping the first file open */ + if ((file_id2 = H5Fcreate(prefixed_filename2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s'\n", prefixed_filename2); + goto error; + } + + /* Create a named datatype */ + if ((named_dtype_id = H5Tcopy(H5T_NATIVE_INT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't copy a native datatype\n"); + goto error; + } + + if (H5Tcommit2(file_id2, GET_OBJ_COUNT_TEST_NAMED_DTYPE, named_dtype_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't commit a named datatype\n"); + goto error; + } + + /* Create a dataspace for the attribute and dataset */ + if ((dspace_id = H5Screate(H5S_SCALAR)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create data space for attribute\n"); + goto error; + } + + /* Create an attribute for the second file */ + if ((attr_id = H5Acreate2(file_id2, GET_OBJ_COUNT_TEST_ATTR_NAME, H5T_NATIVE_INT, dspace_id, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create the attribute '%s'\n", GET_OBJ_COUNT_TEST_ATTR_NAME); + goto error; + } + + /* Create a dataset for the second file */ + if ((dset_id = H5Dcreate2(file_id2, GET_OBJ_COUNT_TEST_DSET_NAME, H5T_NATIVE_INT, dspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create the dataset '%s'\n", GET_OBJ_COUNT_TEST_DSET_NAME); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Fget_obj_count_files) + { + TESTING_2("H5Fget_obj_count for files"); + + /* Get the number of files currently opened */ + if ((obj_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_FILE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get the number of open files\n"); + PART_ERROR(H5Fget_obj_count_files); + } + + if (obj_count != 2) { + H5_FAILED(); + HDprintf(" number of open files (%ld) did not match expected number (2)\n", obj_count); + PART_ERROR(H5Fget_obj_count_files); + } + + PASSED(); + } + PART_END(H5Fget_obj_count_files); + + PART_BEGIN(H5Fget_obj_count_grps_single_file) + { + TESTING_2("H5Fget_obj_count for groups in single file"); + + /* Get the number of groups */ + if ((obj_count = H5Fget_obj_count(file_id, H5F_OBJ_GROUP)) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve number of open groups\n"); + PART_ERROR(H5Fget_obj_count_grps_single_file); + } + + if (obj_count != 1) { + H5_FAILED(); + HDprintf(" number of open groups (%ld) did not match expected number (1)\n", obj_count); + PART_ERROR(H5Fget_obj_count_grps_single_file); + } + + PASSED(); + } + PART_END(H5Fget_obj_count_grps_single_file); + + PART_BEGIN(H5Fget_obj_count_grps) + { + TESTING_2("H5Fget_obj_count for groups"); + + /* Get the number of groups in two opened files */ + if ((obj_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_GROUP)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get the number of open groups\n"); + PART_ERROR(H5Fget_obj_count_grps); + } + + if (obj_count != 1) { + H5_FAILED(); + HDprintf(" number of open groups (%ld) did not match expected number (1)\n", obj_count); + PART_ERROR(H5Fget_obj_count_grps); + } + + PASSED(); + } + PART_END(H5Fget_obj_count_grps); + + PART_BEGIN(H5Fget_obj_count_types) + { + TESTING_2("H5Fget_obj_count for datatypes"); +#ifndef WRONG_DATATYPE_OBJ_COUNT + /* Get the number of named datatype in two opened files */ + if ((obj_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_DATATYPE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get the number of open named datatypes\n"); + PART_ERROR(H5Fget_obj_count_types); + } + + if (obj_count != 1) { + H5_FAILED(); + HDprintf(" number of open named datatypes (%ld) did not match expected number (1)\n", + obj_count); + PART_ERROR(H5Fget_obj_count_types); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Fget_obj_count_types); +#endif + } + PART_END(H5Fget_obj_count_types); + + PART_BEGIN(H5Fget_obj_count_attrs) + { + TESTING_2("H5Fget_obj_count for attributes"); + + /* Get the number of attribute in two opened files */ + if ((obj_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_ATTR)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get the number of open attributes\n"); + PART_ERROR(H5Fget_obj_count_attrs); + } + + if (obj_count != 1) { + H5_FAILED(); + HDprintf(" number of open attributes (%ld) did not match expected number (1)\n", + obj_count); + PART_ERROR(H5Fget_obj_count_attrs); + } + + PASSED(); + } + PART_END(H5Fget_obj_count_attrs); + + PART_BEGIN(H5Fget_obj_count_dsets) + { + TESTING_2("H5Fget_obj_count for datasets"); + + /* Get the number of dataset in two opened files */ + if ((obj_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_DATASET)) < 0 || obj_count != 1) { + H5_FAILED(); + HDprintf(" couldn't get the number of open datasets\n"); + PART_ERROR(H5Fget_obj_count_dsets); + } + + if (obj_count != 1) { + H5_FAILED(); + HDprintf(" number of open datasets (%ld) did not match expected number (1)\n", obj_count); + PART_ERROR(H5Fget_obj_count_dsets); + } + + PASSED(); + } + PART_END(H5Fget_obj_count_dsets); + + PART_BEGIN(H5Fget_obj_count_all_single_file) + { + TESTING_2("H5Fget_obj_count for all object types in single file"); + + /* Get the number of all open objects */ + if ((obj_count = H5Fget_obj_count(file_id, H5F_OBJ_ALL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve number of open objects\n"); + PART_ERROR(H5Fget_obj_count_all_single_file); + } + + /* One for the file and another for the group */ + if (obj_count != 2) { + H5_FAILED(); + HDprintf(" number of open objects (%ld) did not match expected number (2)\n", obj_count); + PART_ERROR(H5Fget_obj_count_all_single_file); + } + + PASSED(); + } + PART_END(H5Fget_obj_count_all_single_file); + + PART_BEGIN(H5Fget_obj_count_all) + { + TESTING_2("H5Fget_obj_count for all object types"); +#ifndef WRONG_DATATYPE_OBJ_COUNT + /* Get the number of all open objects */ + if ((obj_count = H5Fget_obj_count(H5F_OBJ_ALL, H5F_OBJ_ALL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve number of open objects\n"); + PART_ERROR(H5Fget_obj_count_all); + } + + if (obj_count != 6) { + H5_FAILED(); + HDprintf(" number of open objects (%ld) did not match expected number (6)\n", obj_count); + PART_ERROR(H5Fget_obj_count_all); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Fget_obj_count_all); +#endif + } + PART_END(H5Fget_obj_count_all); + + PART_BEGIN(H5Fget_obj_ids_singular_grp) + { + TESTING_2("H5Fget_obj_ids for a singular group"); + + if (H5Fget_obj_ids(file_id, H5F_OBJ_GROUP, (size_t)obj_count, &object_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't get opened group IDs\n"); + PART_ERROR(H5Fget_obj_ids_singular_grp); + } + + if (object_id != group_id) { + H5_FAILED(); + HDprintf(" opened object ID (%ld) did not match only currently open group ID (%ld)\n", + object_id, group_id); + PART_ERROR(H5Fget_obj_ids_singular_grp); + } + + PASSED(); + } + PART_END(H5Fget_obj_ids_singular_grp); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Sclose(dspace_id) < 0) + TEST_ERROR; + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + if (H5Tclose(named_dtype_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id2) < 0) + TEST_ERROR; + + HDfree(prefixed_filename1); + prefixed_filename1 = NULL; + HDfree(prefixed_filename2); + prefixed_filename2 = NULL; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Tclose(named_dtype_id); + H5Sclose(dspace_id); + H5Aclose(attr_id); + H5Dclose(dset_id); + H5Fclose(file_id); + H5Fclose(file_id2); + } + H5E_END_TRY; + + HDfree(prefixed_filename1); + HDfree(prefixed_filename2); + + return 1; +} + +/* + * A test to check that opening files in an overlapping way + * works correctly. + */ +static int +test_file_open_overlap(void) +{ +#ifndef NO_DOUBLE_OBJECT_OPENS + ssize_t obj_count; + hid_t file_id = H5I_INVALID_HID; + hid_t file_id2 = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t dspace_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + char *prefixed_filename = NULL; +#endif + + TESTING("overlapping file opens"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic or more file, dataset, or group aren't supported with this " + "connector\n"); + return 0; + } + +#ifndef NO_DOUBLE_OBJECT_OPENS + if (prefix_filename(test_path_prefix, OVERLAPPING_FILENAME, &prefixed_filename) < 0) { + H5_FAILED(); + HDprintf(" couldn't prefix filename\n"); + goto error; + } + + if ((file_id = H5Fcreate(prefixed_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s'\n", prefixed_filename); + goto error; + } + + if ((file_id2 = H5Fopen(prefixed_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", prefixed_filename); + goto error; + } + + if ((group_id = H5Gcreate2(file_id, OVERLAPPING_OPEN_TEST_GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", OVERLAPPING_OPEN_TEST_GRP_NAME); + goto error; + } + + /* Create a dataspace for the dataset */ + if ((dspace_id = H5Screate(H5S_SCALAR)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create data space for dataset\n"); + goto error; + } + + /* Create a dataset in the group of the first file */ + if ((dset_id = H5Dcreate2(group_id, OVERLAPPING_OPEN_TEST_DSET_NAME, H5T_NATIVE_INT, dspace_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create the dataset '%s'\n", OVERLAPPING_OPEN_TEST_DSET_NAME); + goto error; + } + + /* Get the number of objects opened in the first file: 3 == file + dataset + group */ + if ((obj_count = H5Fget_obj_count(file_id, H5F_OBJ_LOCAL | H5F_OBJ_ALL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve the number of objects opened in the file\n"); + goto error; + } + + if (obj_count != 3) { + H5_FAILED(); + HDprintf(" number of objects opened in file (%ld) did not match expected number (3)\n", obj_count); + goto error; + } + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + /* Create a dataset in the second file */ + if ((dset_id = H5Dcreate2(file_id2, OVERLAPPING_OPEN_TEST_DSET_NAME, H5T_NATIVE_INT, dspace_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create the dataset '%s'\n", OVERLAPPING_OPEN_TEST_DSET_NAME); + goto error; + } + + /* Get the number of objects opened in the first file: 2 == file + dataset */ + if ((obj_count = H5Fget_obj_count(file_id2, H5F_OBJ_ALL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve the number of objects opened in the file\n"); + goto error; + } + + if (obj_count != 2) { + H5_FAILED(); + HDprintf(" number of objects opened in the file (%ld) did not match expected number (2)\n", + obj_count); + goto error; + } + + if (H5Sclose(dspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id2) < 0) + TEST_ERROR; + + HDfree(prefixed_filename); + prefixed_filename = NULL; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Sclose(dspace_id); + H5Dclose(dset_id); + H5Fclose(file_id); + H5Fclose(file_id2); + } + H5E_END_TRY; + + HDfree(prefixed_filename); + + return 1; +#else + SKIPPED(); + return 0; +#endif +} + +/* + * A test to check that file mounting and unmounting works + * correctly. + */ +static int +test_file_mounts(void) +{ +#ifndef NO_FILE_MOUNTS + hid_t file_id = H5I_INVALID_HID; + hid_t child_fid = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + char *prefixed_filename = NULL; +#endif + + TESTING("file mounting/unmounting"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_MOUNT) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file, file mount, or basic group aren't supported with this " + "connector\n"); + return 0; + } + +#ifndef NO_FILE_MOUNTS + if (prefix_filename(test_path_prefix, FILE_MOUNT_TEST_FILENAME, &prefixed_filename) < 0) { + H5_FAILED(); + HDprintf(" couldn't prefix filename\n"); + goto error; + } + + if ((file_id = H5Fcreate(prefixed_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s'\n", prefixed_filename); + goto error; + } + + if ((group_id = H5Gcreate2(file_id, FILE_MOUNT_TEST_GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", FILE_MOUNT_TEST_GRP_NAME); + goto error; + } + + if ((child_fid = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + /* Mount one file (child_fid) to the group of another file (file_id) */ + if (H5Fmount(file_id, FILE_MOUNT_TEST_GRP_NAME, child_fid, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't mount file\n"); + goto error; + } + + if (H5Funmount(file_id, FILE_MOUNT_TEST_GRP_NAME) < 0) { + H5_FAILED(); + HDprintf(" couldn't mount file\n"); + goto error; + } + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + if (H5Fclose(child_fid) < 0) + TEST_ERROR; + + HDfree(prefixed_filename); + prefixed_filename = NULL; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Fclose(file_id); + H5Fclose(child_fid); + } + H5E_END_TRY; + + HDfree(prefixed_filename); + + return 1; +#else + SKIPPED(); + return 0; +#endif +} + +/* + * A test to ensure that a file's name can be retrieved. + */ +static int +test_get_file_name(void) +{ + ssize_t file_name_buf_len = 0; + hid_t file_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dspace_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t named_dtype_id = H5I_INVALID_HID; + char *prefixed_filename = NULL; + char *file_name_buf = NULL; + + TESTING_MULTIPART("retrieval of file name"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic or more file, basic dataset, group, datatype, or attribute " + "aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if (prefix_filename(test_path_prefix, GET_FILE_NAME_TEST_FNAME, &prefixed_filename) < 0) { + H5_FAILED(); + HDprintf(" couldn't prefix filename\n"); + goto error; + } + + if ((file_id = H5Fcreate(prefixed_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s'\n", prefixed_filename); + goto error; + } + + /* Retrieve the size of the file name */ + if ((file_name_buf_len = H5Fget_name(file_id, NULL, 0)) < 0) + TEST_ERROR; + + /* Allocate buffer for file name */ + if (NULL == (file_name_buf = (char *)HDmalloc((size_t)file_name_buf_len + 1))) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Fget_name_file_id) + { + TESTING_2("H5Fget_name using file ID"); + + memset(file_name_buf, 0, (size_t)file_name_buf_len); + + /* Retrieve the actual file name */ + if (H5Fget_name(file_id, file_name_buf, (size_t)file_name_buf_len + 1) < 0) { + H5_FAILED(); + HDprintf(" couldn't get file name %s\n", prefixed_filename); + PART_ERROR(H5Fget_name_file_id); + } + + if (HDstrncmp(file_name_buf, prefixed_filename, (size_t)file_name_buf_len)) { + H5_FAILED(); + HDprintf(" file name '%s' didn't match expected name '%s'\n", file_name_buf, + prefixed_filename); + PART_ERROR(H5Fget_name_file_id); + } + + PASSED(); + } + PART_END(H5Fget_name_file_id); + + PART_BEGIN(H5Fget_name_grp_id) + { + TESTING_2("H5Fget_name using non-root group ID"); + + /* Attempt to retrieve the name of the file from an object that isn't the root group */ + memset(file_name_buf, 0, (size_t)file_name_buf_len); + + if ((group_id = H5Gcreate2(file_id, GET_FILE_NAME_TEST_GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to create group '%s'\n", GET_FILE_NAME_TEST_GRP_NAME); + PART_ERROR(H5Fget_name_grp_id); + } + + if (H5Fget_name(group_id, file_name_buf, (size_t)file_name_buf_len + 1) < 0) { + H5_FAILED(); + HDprintf(" couldn't get file name %s\n", prefixed_filename); + PART_ERROR(H5Fget_name_grp_id); + } + + if (HDstrncmp(file_name_buf, prefixed_filename, (size_t)file_name_buf_len)) { + H5_FAILED(); + HDprintf(" file name '%s' didn't match expected name '%s'\n", file_name_buf, + prefixed_filename); + PART_ERROR(H5Fget_name_grp_id); + } + + if (group_id >= 0) { + H5E_BEGIN_TRY + { + H5Gclose(group_id); + } + H5E_END_TRY; + group_id = H5I_INVALID_HID; + } + + PASSED(); + } + PART_END(H5Fget_name_grp_id); + + PART_BEGIN(H5Fget_name_dset_id) + { + TESTING_2("H5Fget_name using dataset ID"); + + if ((dspace_id = H5Screate(H5S_SCALAR)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataspace\n"); + PART_ERROR(H5Fget_name_dset_id); + } + + /* Create a dataset in the file */ + if ((dset_id = H5Dcreate2(file_id, GET_FILE_NAME_TEST_DSET_NAME, H5T_NATIVE_INT, dspace_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create the dataset '%s'\n", GET_FILE_NAME_TEST_DSET_NAME); + PART_ERROR(H5Fget_name_dset_id); + } + + /* Get and verify file name from the dataset */ + if (H5Fget_name(dset_id, file_name_buf, (size_t)file_name_buf_len + 1) < 0) { + H5_FAILED(); + HDprintf(" couldn't get file name %s\n", prefixed_filename); + PART_ERROR(H5Fget_name_dset_id); + } + + if (HDstrncmp(file_name_buf, prefixed_filename, (size_t)file_name_buf_len)) { + H5_FAILED(); + HDprintf(" file name '%s' didn't match expected name '%s'\n", file_name_buf, + prefixed_filename); + PART_ERROR(H5Fget_name_dset_id); + } + + if (dspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(dspace_id); + } + H5E_END_TRY; + dspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + PASSED(); + } + PART_END(H5Fget_name_dset_id); + + PART_BEGIN(H5Fget_name_attr_id) + { + TESTING_2("H5Fget_name using attribute ID"); + + if ((dspace_id = H5Screate(H5S_SCALAR)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataspace\n"); + PART_ERROR(H5Fget_name_attr_id); + } + + /* Create an attribute for the dataset */ + if ((attr_id = H5Acreate2(file_id, GET_FILE_NAME_TEST_ATTR_NAME, H5T_NATIVE_INT, dspace_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create the attribute '%s'\n", GET_FILE_NAME_TEST_ATTR_NAME); + PART_ERROR(H5Fget_name_attr_id); + } + + /* Get and verify file name from the attribute */ + if (H5Fget_name(attr_id, file_name_buf, (size_t)file_name_buf_len + 1) < 0) { + H5_FAILED(); + HDprintf(" couldn't get file name %s\n", prefixed_filename); + PART_ERROR(H5Fget_name_attr_id); + } + + if (HDstrncmp(file_name_buf, prefixed_filename, (size_t)file_name_buf_len)) { + H5_FAILED(); + HDprintf(" file name '%s' didn't match expected name '%s'\n", file_name_buf, + prefixed_filename); + PART_ERROR(H5Fget_name_attr_id); + } + + if (dspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(dspace_id); + } + H5E_END_TRY; + dspace_id = H5I_INVALID_HID; + } + if (attr_id >= 0) { + H5E_BEGIN_TRY + { + H5Aclose(attr_id); + } + H5E_END_TRY; + attr_id = H5I_INVALID_HID; + } + + PASSED(); + } + PART_END(H5Fget_name_attr_id); + + PART_BEGIN(H5Fget_name_dtype_id) + { + TESTING_2("H5Fget_name using committed datatype ID"); + + /* Create a named datatype */ + if ((named_dtype_id = H5Tcopy(H5T_NATIVE_INT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't copy a native datatype\n"); + PART_ERROR(H5Fget_name_dtype_id); + } + + if (H5Tcommit2(file_id, GET_FILE_NAME_TEST_NAMED_DTYPE, named_dtype_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't commit a named datatype\n"); + PART_ERROR(H5Fget_name_dtype_id); + } + + /* Get and verify file name from the committed datatype */ + if (H5Fget_name(named_dtype_id, file_name_buf, (size_t)file_name_buf_len + 1) < 0) { + H5_FAILED(); + HDprintf(" couldn't get file name %s\n", prefixed_filename); + PART_ERROR(H5Fget_name_dtype_id); + } + + if (HDstrncmp(file_name_buf, prefixed_filename, (size_t)file_name_buf_len)) { + H5_FAILED(); + HDprintf(" file name '%s' didn't match expected name '%s'\n", file_name_buf, + prefixed_filename); + PART_ERROR(H5Fget_name_dtype_id); + } + + if (named_dtype_id >= 0) { + H5E_BEGIN_TRY + { + H5Tclose(named_dtype_id); + } + H5E_END_TRY; + named_dtype_id = H5I_INVALID_HID; + } + + PASSED(); + } + PART_END(H5Fget_name_dtype_id); + + PART_BEGIN(H5Fget_name_dspace_id) + { + ssize_t name_len = 0; + + TESTING_2("invalid H5Fget_name using dataspace ID"); + + if ((dspace_id = H5Screate(H5S_SCALAR)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataspace\n"); + PART_ERROR(H5Fget_name_dspace_id); + } + + /* Try get file name from data space. Supposed to fail because + * it's illegal operation. */ + H5E_BEGIN_TRY + { + name_len = H5Fget_name(dspace_id, file_name_buf, (size_t)file_name_buf_len + 1); + } + H5E_END_TRY; + + if (name_len >= 0) { + H5_FAILED(); + HDprintf(" retrieved file name using H5Fget_name on a dataspace ID!\n"); + PART_ERROR(H5Fget_name_dspace_id); + } + + if (dspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(dspace_id); + } + H5E_END_TRY; + dspace_id = H5I_INVALID_HID; + } + + PASSED(); + } + PART_END(H5Fget_name_dspace_id); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (file_name_buf) { + HDfree(file_name_buf); + file_name_buf = NULL; + } + + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + HDfree(prefixed_filename); + prefixed_filename = NULL; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (file_name_buf) + HDfree(file_name_buf); + H5Tclose(named_dtype_id); + H5Sclose(dspace_id); + H5Dclose(dset_id); + H5Aclose(attr_id); + H5Gclose(group_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + HDfree(prefixed_filename); + + return 1; +} + +/* + * Cleanup temporary test files + */ +static void +cleanup_files(void) +{ + remove_test_file(test_path_prefix, FILE_CREATE_TEST_FILENAME); + remove_test_file(test_path_prefix, FILE_CREATE_EXCL_FILE_NAME); + + /* The below file should not get created */ + /* remove_test_file(test_path_prefix, FILE_CREATE_INVALID_PARAMS_FILE_NAME); */ + +#ifndef NO_DOUBLE_OBJECT_OPENS + remove_test_file(test_path_prefix, OVERLAPPING_FILENAME); +#endif + remove_test_file(test_path_prefix, FILE_PERMISSION_TEST_FILENAME); + remove_test_file(test_path_prefix, FILE_FLUSH_TEST_FILENAME); + remove_test_file(test_path_prefix, FILE_PROPERTY_LIST_TEST_FNAME1); + remove_test_file(test_path_prefix, FILE_PROPERTY_LIST_TEST_FNAME2); + remove_test_file(test_path_prefix, FILE_INTENT_TEST_FILENAME); + remove_test_file(test_path_prefix, GET_OBJ_COUNT_TEST_FILENAME1); + remove_test_file(test_path_prefix, GET_OBJ_COUNT_TEST_FILENAME2); +#ifndef NO_FILE_MOUNTS + remove_test_file(test_path_prefix, FILE_MOUNT_TEST_FILENAME); +#endif + remove_test_file(test_path_prefix, GET_FILE_NAME_TEST_FNAME); +} + +int +H5_api_file_test(void) +{ + size_t i; + int nerrors; + + HDprintf("**********************************************\n"); + HDprintf("* *\n"); + HDprintf("* API File Tests *\n"); + HDprintf("* *\n"); + HDprintf("**********************************************\n\n"); + + for (i = 0, nerrors = 0; i < ARRAY_LENGTH(file_tests); i++) { + nerrors += (*file_tests[i])() ? 1 : 0; + } + + HDprintf("\n"); + + HDprintf("Cleaning up testing files\n"); + cleanup_files(); + + return nerrors; +} diff --git a/test/API/H5_api_file_test.h b/test/API/H5_api_file_test.h new file mode 100644 index 00000000000..948cb6ae839 --- /dev/null +++ b/test/API/H5_api_file_test.h @@ -0,0 +1,85 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#ifndef H5_API_FILE_TEST_H +#define H5_API_FILE_TEST_H + +#include "H5_api_test.h" + +int H5_api_file_test(void); + +/********************************************* + * * + * API File test defines * + * * + *********************************************/ + +#define FILE_CREATE_TEST_FILENAME "test_file.h5" + +#define FILE_CREATE_INVALID_PARAMS_FILE_NAME "invalid_params_file.h5" + +#define FILE_CREATE_EXCL_FILE_NAME "excl_flag_file.h5" + +#define NONEXISTENT_FILENAME "nonexistent_file.h5" + +#define OVERLAPPING_FILENAME "overlapping_file.h5" +#define OVERLAPPING_OPEN_TEST_GRP_NAME "group" +#define OVERLAPPING_OPEN_TEST_DSET_NAME "dataset" + +#define FILE_PERMISSION_TEST_FILENAME "file_permission.h5" +#define FILE_PERMISSION_TEST_GRP_NAME "group" +#define FILE_PERMISSION_TEST_DSET_NAME "Dataset" +#define FILE_PERMISSION_TEST_DSET2_NAME "Dataset2" +#define FILE_PERMISSION_TEST_ATTR_NAME "attribute" +#define FILE_PERMISSION_TEST_NAMED_DTYPE "named_dtype" + +#define FILE_FLUSH_TEST_FILENAME "flush_file.h5" + +#define FILE_PROPERTY_LIST_TEST_FCPL_PROP_VAL 65536 +#define FILE_PROPERTY_LIST_TEST_FNAME1 "property_list_test_file1.h5" +#define FILE_PROPERTY_LIST_TEST_FNAME2 "property_list_test_file2.h5" + +#define FILE_INTENT_TEST_FILENAME "intent_test_file.h5" + +#define GET_OBJ_COUNT_TEST_FILENAME1 "file_obj_count1.h5" +#define GET_OBJ_COUNT_TEST_FILENAME2 "file_obj_count2.h5" +#define GET_OBJ_COUNT_TEST_GRP_NAME "/group" +#define GET_OBJ_COUNT_TEST_DSET_NAME "Dataset" +#define GET_OBJ_COUNT_TEST_ATTR_NAME "Attribute" +#define GET_OBJ_COUNT_TEST_NAMED_DTYPE "named_dtype" + +#define FILE_MOUNT_TEST_FILENAME "file_mount.h5" +#define FILE_MOUNT_TEST_GRP_NAME "group" + +#define GET_FILE_NAME_TEST_FNAME "file_name_retrieval.h5" +#define GET_FILE_NAME_TEST_GRP_NAME "group" +#define GET_FILE_NAME_TEST_DSET_NAME "dataset" +#define GET_FILE_NAME_TEST_ATTR_NAME "attribute" +#define GET_FILE_NAME_TEST_NAMED_DTYPE "datatype" + +#define FILESPACE_INFO_FILENAME "filespace_info.h5" +#define FSP_SIZE512 (hsize_t)512 + +#define FILE_GET_ID_TEST_FILENAME "test_file_id.h5" + +#define FILE_CLOSE_DEGREE_FILENAME "test_close_degree.h5" + +#define GET_FREE_SECTIONS_FILENAME "test_free_sections.h5" + +#define FILE_SIZE_FILENAME "file_size.h5" +#define KB 1024U + +#define FILE_INFO_FILENAME "file_info.h5" + +#define DOUBLE_GROUP_OPEN_FILENAME "double_group_open.h5" + +#endif diff --git a/test/API/H5_api_group_test.c b/test/API/H5_api_group_test.c new file mode 100644 index 00000000000..f65220291df --- /dev/null +++ b/test/API/H5_api_group_test.c @@ -0,0 +1,2394 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#include "H5_api_group_test.h" + +static int test_create_group_under_root(void); +static int test_create_group_under_existing_group(void); +static int test_create_many_groups(void); +static int test_create_deep_groups(void); +static int test_create_intermediate_group(void); +static int test_create_group_invalid_params(void); +static int test_create_anonymous_group(void); +static int test_create_anonymous_group_invalid_params(void); +static int test_open_nonexistent_group(void); +static int test_open_group_invalid_params(void); +static int test_close_group_invalid_id(void); +static int test_group_property_lists(void); +static int test_get_group_info(void); +static int test_get_group_info_invalid_params(void); +static int test_flush_group(void); +static int test_flush_group_invalid_params(void); +static int test_refresh_group(void); +static int test_refresh_group_invalid_params(void); +static int create_group_recursive(hid_t parent_gid, unsigned counter); + +/* + * The array of group tests to be performed. + */ +static int (*group_tests[])(void) = { + test_create_group_under_root, + test_create_group_under_existing_group, + test_create_many_groups, + test_create_deep_groups, + test_create_intermediate_group, + test_create_group_invalid_params, + test_create_anonymous_group, + test_create_anonymous_group_invalid_params, + test_open_nonexistent_group, + test_open_group_invalid_params, + test_close_group_invalid_id, + test_group_property_lists, + test_get_group_info, + test_get_group_info_invalid_params, + test_flush_group, + test_flush_group_invalid_params, + test_refresh_group, + test_refresh_group_invalid_params, +}; + +/* + * A test to check that a group can be created under the root group. + */ +static int +test_create_group_under_root(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t parent_gid = H5I_INVALID_HID; + + TESTING("creation of group under the root group"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file or group aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + /* Create the group under the root group of the file */ + if ((parent_gid = + H5Gcreate2(file_id, GROUP_CREATE_UNDER_ROOT_GNAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", GROUP_CREATE_UNDER_ROOT_GNAME); + goto error; + } + + if (H5Gclose(parent_gid) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(parent_gid); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a group can be created under an existing + * group which is not the root group. + */ +static int +test_create_group_under_existing_group(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t parent_group_id = H5I_INVALID_HID, child_group_id = H5I_INVALID_HID, + grandchild_group_id = H5I_INVALID_HID; + + TESTING("creation of group under existing group using a relative path"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file or group aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file\n"); + goto error; + } + + /* Open the already-existing group (/group_tests) in the file as the parent */ + if ((parent_group_id = H5Gopen2(file_id, GROUP_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open group\n"); + goto error; + } + + /* Create a new group (/group_tests/child_group) under the already-existing parent Group using a relative + * path */ + if ((child_group_id = H5Gcreate2(parent_group_id, GROUP_CREATE_UNDER_GROUP_REL_GNAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group using relative path: %s\n", GROUP_CREATE_UNDER_GROUP_REL_GNAME); + goto error; + } + + /* Create a new group (child_group/grandchild_group) under the already-existing parent Group using an + * absolute path */ + if ((grandchild_group_id = H5Gcreate2(parent_group_id, GROUP_CREATE_UNDER_GROUP_ABS_GNAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group using absolute path: %s\n", GROUP_CREATE_UNDER_GROUP_ABS_GNAME); + goto error; + } + + if (H5Gclose(grandchild_group_id) < 0) + TEST_ERROR; + if (H5Gclose(child_group_id) < 0) + TEST_ERROR; + if (H5Gclose(parent_group_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(grandchild_group_id); + H5Gclose(child_group_id); + H5Gclose(parent_group_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to create many (one million) groups + */ +static int +test_create_many_groups(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t parent_group_id = H5I_INVALID_HID, child_group_id = H5I_INVALID_HID; + char group_name[NAME_BUF_SIZE]; + unsigned i; + + TESTING("H5Gcreate many groups"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file or group aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, GROUP_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + if ((parent_group_id = H5Gcreate2(container_group, MANY_GROUP_CREATIONS_GNAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", MANY_GROUP_CREATIONS_GNAME); + goto error; + } + + /* Create multiple groups under the parent group */ + HDprintf("\n"); + for (i = 0; i < GROUP_NUMB_MANY; i++) { + HDprintf("\r %u/%u", i + 1, GROUP_NUMB_MANY); + sprintf(group_name, "group %02u", i); + if ((child_group_id = + H5Gcreate2(parent_group_id, group_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", group_name); + goto error; + } + + if (H5Gclose(child_group_id) < 0) + TEST_ERROR; + } + + if (H5Gclose(parent_group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(child_group_id); + H5Gclose(parent_group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to create groups of the depth GROUP_DEPTH. + */ +static int +test_create_deep_groups(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + + TESTING("H5Gcreate groups of great depths"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file or group aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, GROUP_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + /* Create the group under the root group of the file */ + if ((group_id = H5Gcreate2(container_group, DEEP_GROUP_CREATIONS_GNAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", DEEP_GROUP_CREATIONS_GNAME); + goto error; + } + + HDprintf("\n"); + if (create_group_recursive(group_id, 1) < 0) + TEST_ERROR; + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * Recursive function to create groups of the depth GROUP_DEPTH. + */ +static int +create_group_recursive(hid_t parent_gid, unsigned counter) +{ + hid_t child_gid = H5I_INVALID_HID; + char gname[NAME_BUF_SIZE]; + + HDprintf("\r %u/%u", counter, GROUP_DEPTH); + if (counter == 1) + sprintf(gname, "2nd_child_group"); + else if (counter == 2) + sprintf(gname, "3rd_child_group"); + else + sprintf(gname, "%dth_child_group", counter + 1); + if ((child_gid = H5Gcreate2(parent_gid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", gname); + goto error; + } + + if (counter < GROUP_DEPTH) { + if (create_group_recursive(child_gid, counter + 1) < 0) + TEST_ERROR; + } + + if (H5Gclose(child_gid) < 0) + TEST_ERROR; + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(child_gid); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to create groups automatically using H5Pset_create_intermediate_group + */ +static int +test_create_intermediate_group(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t crt_intmd_lcpl_id = H5I_INVALID_HID; + + TESTING("H5Gcreate group with intermediate group creation"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file or group aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, GROUP_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + /* Set up plist for creating intermediate groups */ + if ((crt_intmd_lcpl_id = H5Pcreate(H5P_LINK_CREATE)) < 0) + TEST_ERROR; + if (H5Pset_create_intermediate_group(crt_intmd_lcpl_id, TRUE) < 0) + TEST_ERROR; + + /* Create an intermediate group using a relative path */ + if ((group_id = H5Gcreate2(container_group, + GROUP_CREATE_INTMD_REL_INTMD_NAME "/" GROUP_CREATE_INTMD_REL_END_NAME, + crt_intmd_lcpl_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + group_id = H5I_INVALID_HID; + + /* Verify both groups were created */ + if ((group_id = + H5Gopen2(file_id, GROUP_TEST_GROUP_NAME "/" GROUP_CREATE_INTMD_REL_INTMD_NAME, H5P_DEFAULT)) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + group_id = H5I_INVALID_HID; + if ((group_id = H5Gopen2(file_id, + GROUP_TEST_GROUP_NAME "/" GROUP_CREATE_INTMD_REL_INTMD_NAME + "/" GROUP_CREATE_INTMD_REL_END_NAME, + H5P_DEFAULT)) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + group_id = H5I_INVALID_HID; + + /* Create an intermediate group using an absolute path */ + if ((group_id = H5Gcreate2(container_group, + "/" GROUP_TEST_GROUP_NAME "/" GROUP_CREATE_INTMD_ABS_INTMD_NAME + "/" GROUP_CREATE_INTMD_ABS_END_NAME, + crt_intmd_lcpl_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + group_id = H5I_INVALID_HID; + + /* Verify both groups were created */ + if ((group_id = + H5Gopen2(file_id, GROUP_TEST_GROUP_NAME "/" GROUP_CREATE_INTMD_ABS_INTMD_NAME, H5P_DEFAULT)) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + group_id = H5I_INVALID_HID; + if ((group_id = H5Gopen2(file_id, + GROUP_TEST_GROUP_NAME "/" GROUP_CREATE_INTMD_ABS_INTMD_NAME + "/" GROUP_CREATE_INTMD_ABS_END_NAME, + H5P_DEFAULT)) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + group_id = H5I_INVALID_HID; + + /* Create two intermediate groups using an absolute path */ + if ((group_id = H5Gcreate2(container_group, + "/" GROUP_TEST_GROUP_NAME "/" GROUP_CREATE_INTMD_MULT_INTMD1_NAME + "/" GROUP_CREATE_INTMD_MULT_INTMD2_NAME "/" GROUP_CREATE_INTMD_MULT_END_NAME, + crt_intmd_lcpl_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + group_id = H5I_INVALID_HID; + + /* Verify all three groups were created */ + if ((group_id = H5Gopen2(file_id, GROUP_TEST_GROUP_NAME "/" GROUP_CREATE_INTMD_MULT_INTMD1_NAME, + H5P_DEFAULT)) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + group_id = H5I_INVALID_HID; + if ((group_id = H5Gopen2(file_id, + GROUP_TEST_GROUP_NAME "/" GROUP_CREATE_INTMD_MULT_INTMD1_NAME + "/" GROUP_CREATE_INTMD_MULT_INTMD2_NAME, + H5P_DEFAULT)) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + group_id = H5I_INVALID_HID; + if ((group_id = H5Gopen2(file_id, + GROUP_TEST_GROUP_NAME "/" GROUP_CREATE_INTMD_MULT_INTMD1_NAME + "/" GROUP_CREATE_INTMD_MULT_INTMD2_NAME + "/" GROUP_CREATE_INTMD_MULT_END_NAME, + H5P_DEFAULT)) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + group_id = H5I_INVALID_HID; + + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + if (H5Pclose(crt_intmd_lcpl_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + H5Pclose(crt_intmd_lcpl_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a group can't be created when H5Gcreate + * is passed invalid parameters. + */ +static int +test_create_group_invalid_params(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + + TESTING_MULTIPART("H5Gcreate with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file or group aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Gcreate_invalid_loc_id) + { + TESTING_2("H5Gcreate with an invalid loc_id"); + + H5E_BEGIN_TRY + { + group_id = H5Gcreate2(H5I_INVALID_HID, GROUP_CREATE_INVALID_PARAMS_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (group_id >= 0) { + H5_FAILED(); + HDprintf(" created group with invalid loc_id!\n"); + H5Gclose(group_id); + PART_ERROR(H5Gcreate_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Gcreate_invalid_loc_id); + + PART_BEGIN(H5Gcreate_invalid_grp_name) + { + TESTING_2("H5Gcreate with an invalid group name"); + + H5E_BEGIN_TRY + { + group_id = H5Gcreate2(file_id, NULL, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (group_id >= 0) { + H5_FAILED(); + HDprintf(" created group with a NULL name!\n"); + H5Gclose(group_id); + PART_ERROR(H5Gcreate_invalid_grp_name); + } + + H5E_BEGIN_TRY + { + group_id = H5Gcreate2(file_id, "", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (group_id >= 0) { + H5_FAILED(); + HDprintf(" created group with an invalid group name of ''!\n"); + H5Gclose(group_id); + PART_ERROR(H5Gcreate_invalid_grp_name); + } + + PASSED(); + } + PART_END(H5Gcreate_invalid_grp_name); + + PART_BEGIN(H5Gcreate_invalid_lcpl) + { + TESTING_2("H5Gcreate with an invalid LCPL"); + + H5E_BEGIN_TRY + { + group_id = H5Gcreate2(file_id, GROUP_CREATE_INVALID_PARAMS_GROUP_NAME, H5I_INVALID_HID, + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (group_id >= 0) { + H5_FAILED(); + HDprintf(" created group with invalid LCPL!\n"); + H5Gclose(group_id); + PART_ERROR(H5Gcreate_invalid_lcpl); + } + + PASSED(); + } + PART_END(H5Gcreate_invalid_lcpl); + + PART_BEGIN(H5Gcreate_invalid_gcpl) + { + TESTING_2("H5Gcreate with an invalid GCPL"); + + H5E_BEGIN_TRY + { + group_id = H5Gcreate2(file_id, GROUP_CREATE_INVALID_PARAMS_GROUP_NAME, H5P_DEFAULT, + H5I_INVALID_HID, H5P_DEFAULT); + } + H5E_END_TRY; + + if (group_id >= 0) { + H5_FAILED(); + HDprintf(" created group with invalid GCPL!\n"); + H5Gclose(group_id); + PART_ERROR(H5Gcreate_invalid_gcpl); + } + + PASSED(); + } + PART_END(H5Gcreate_invalid_gcpl); + + PART_BEGIN(H5Gcreate_invalid_gapl) + { + TESTING_2("H5Gcreate with an invalid GAPL"); + + H5E_BEGIN_TRY + { + group_id = H5Gcreate2(file_id, GROUP_CREATE_INVALID_PARAMS_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (group_id >= 0) { + H5_FAILED(); + HDprintf(" created group with invalid GAPL!\n"); + H5Gclose(group_id); + PART_ERROR(H5Gcreate_invalid_gapl); + } + + PASSED(); + } + PART_END(H5Gcreate_invalid_gapl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that an anonymous group can be created with + * H5Gcreate_anon. + */ +static int +test_create_anonymous_group(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, new_group_id = H5I_INVALID_HID; + + TESTING("creation of anonymous group"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file or group aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file\n"); + goto error; + } + + if ((container_group = H5Gopen2(file_id, GROUP_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open group\n"); + goto error; + } + + if ((new_group_id = H5Gcreate_anon(file_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create anonymous group\n"); + goto error; + } + + if (H5Gclose(new_group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(new_group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that an anonymous group can't be created + * when H5Gcreate_anon is passed invalid parameters. + */ +static int +test_create_anonymous_group_invalid_params(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, new_group_id = H5I_INVALID_HID; + + TESTING_MULTIPART("H5Gcreate_anon with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file or group aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file\n"); + goto error; + } + + if ((container_group = H5Gopen2(file_id, GROUP_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open group\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Gcreate_anon_invalid_loc_id) + { + TESTING_2("H5Gcreate_anon with an invalid loc_id"); + + H5E_BEGIN_TRY + { + new_group_id = H5Gcreate_anon(H5I_INVALID_HID, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (new_group_id >= 0) { + H5_FAILED(); + HDprintf(" created anonymous group with invalid loc_id!\n"); + H5Gclose(new_group_id); + PART_ERROR(H5Gcreate_anon_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Gcreate_anon_invalid_loc_id); + + PART_BEGIN(H5Gcreate_anon_invalid_gcpl) + { + TESTING_2("H5Gcreate_anon with an invalid GCPL"); + + H5E_BEGIN_TRY + { + new_group_id = H5Gcreate_anon(container_group, H5I_INVALID_HID, H5P_DEFAULT); + } + H5E_END_TRY; + + if (new_group_id >= 0) { + H5_FAILED(); + HDprintf(" created anonymous group with invalid GCPL!\n"); + H5Gclose(new_group_id); + PART_ERROR(H5Gcreate_anon_invalid_gcpl); + } + + PASSED(); + } + PART_END(H5Gcreate_anon_invalid_gcpl); + + PART_BEGIN(H5Gcreate_anon_invalid_gapl) + { + TESTING_2("H5Gcreate_anon with an invalid GAPL"); + + H5E_BEGIN_TRY + { + new_group_id = H5Gcreate_anon(container_group, H5P_DEFAULT, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (new_group_id >= 0) { + H5_FAILED(); + HDprintf(" created anonymous group with invalid GAPL!\n"); + H5Gclose(new_group_id); + PART_ERROR(H5Gcreate_anon_invalid_gapl); + } + + PASSED(); + } + PART_END(H5Gcreate_anon_invalid_gapl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(new_group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a group which doesn't exist cannot + * be opened. + */ +static int +test_open_nonexistent_group(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + + TESTING("for invalid opening of a nonexistent group"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file or group aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file\n"); + goto error; + } + + H5E_BEGIN_TRY + { + group_id = H5Gopen2(file_id, OPEN_NONEXISTENT_GROUP_TEST_GNAME, H5P_DEFAULT); + } + H5E_END_TRY; + + if (group_id >= 0) { + H5_FAILED(); + HDprintf(" opened non-existent group!\n"); + goto error; + } + + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a group can't be opened when H5Gopen + * is passed invalid parameters. + */ +static int +test_open_group_invalid_params(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + + TESTING_MULTIPART("H5Gopen with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file or group aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Gopen_invalid_loc_id) + { + TESTING_2("H5Gopen with an invalid loc_id"); + + H5E_BEGIN_TRY + { + group_id = H5Gopen2(H5I_INVALID_HID, GROUP_TEST_GROUP_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + + if (group_id >= 0) { + H5_FAILED(); + HDprintf(" opened group using an invalid loc_id!\n"); + H5Gclose(group_id); + PART_ERROR(H5Gopen_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Gopen_invalid_loc_id); + + PART_BEGIN(H5Gopen_invalid_grp_name) + { + TESTING_2("H5Gopen with an invalid group name"); + + H5E_BEGIN_TRY + { + group_id = H5Gopen2(file_id, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (group_id >= 0) { + H5_FAILED(); + HDprintf(" opened group using a NULL name!\n"); + H5Gclose(group_id); + PART_ERROR(H5Gopen_invalid_grp_name); + } + + H5E_BEGIN_TRY + { + group_id = H5Gopen2(file_id, "", H5P_DEFAULT); + } + H5E_END_TRY; + + if (group_id >= 0) { + H5_FAILED(); + HDprintf(" opened group using an invalid name of ''!\n"); + H5Gclose(group_id); + PART_ERROR(H5Gopen_invalid_grp_name); + } + + PASSED(); + } + PART_END(H5Gopen_invalid_grp_name); + + PART_BEGIN(H5Gopen_invalid_gapl) + { + TESTING_2("H5Gopen with an invalid GAPL"); + + H5E_BEGIN_TRY + { + group_id = H5Gopen2(file_id, GROUP_TEST_GROUP_NAME, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (group_id >= 0) { + H5_FAILED(); + HDprintf(" opened group using an invalid GAPL!\n"); + H5Gclose(group_id); + PART_ERROR(H5Gopen_invalid_gapl); + } + + PASSED(); + } + PART_END(H5Gopen_invalid_gapl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that H5Gclose doesn't succeed for an + * invalid group ID. + */ +static int +test_close_group_invalid_id(void) +{ + herr_t err_ret = -1; + + TESTING("H5Gclose with an invalid group ID"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic group aren't supported with this connector\n"); + return 0; + } + + H5E_BEGIN_TRY + { + err_ret = H5Gclose(H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" close a group with an invalid ID!\n"); + goto error; + } + + PASSED(); + + return 0; + +error: + return 1; +} + +/* + * A test to check that a GCPL used for group creation can + * be persisted and that a valid copy of that GCPL can be + * retrieved later with a call to H5Gget_create_plist. + */ +static int +test_group_property_lists(void) +{ + unsigned dummy_prop_val = GROUP_PROPERTY_LIST_TEST_DUMMY_VAL; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id1 = H5I_INVALID_HID, group_id2 = H5I_INVALID_HID; + hid_t gcpl_id1 = H5I_INVALID_HID, gcpl_id2 = H5I_INVALID_HID; + + TESTING_MULTIPART("group property list operations"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, property list, creation order aren't supported " + "with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file\n"); + goto error; + } + + if ((container_group = H5Gopen2(file_id, GROUP_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + if ((gcpl_id1 = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create GCPL\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id1, dummy_prop_val) < 0) { + H5_FAILED(); + HDprintf(" couldn't set property on GCPL\n"); + goto error; + } + + /* Create the group in the file */ + if ((group_id1 = H5Gcreate2(container_group, GROUP_PROPERTY_LIST_TEST_GROUP_NAME1, H5P_DEFAULT, gcpl_id1, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group\n"); + goto error; + } + + /* Create the second group using H5P_DEFAULT for the GCPL */ + if ((group_id2 = H5Gcreate2(container_group, GROUP_PROPERTY_LIST_TEST_GROUP_NAME2, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group\n"); + goto error; + } + + if (H5Pclose(gcpl_id1) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Gget_create_plist) + { + TESTING_2("H5Gget_create_plist"); + + /* Try to retrieve copies of the two property lists, one which has the property set and one which + * does not */ + if ((gcpl_id1 = H5Gget_create_plist(group_id1)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get GCPL\n"); + PART_ERROR(H5Gget_create_plist); + } + + if ((gcpl_id2 = H5Gget_create_plist(group_id2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get GCPL\n"); + PART_ERROR(H5Gget_create_plist); + } + + /* Ensure that property list 1 has the property set and property list 2 does not */ + dummy_prop_val = 0; + + if (H5Pget_link_creation_order(gcpl_id1, &dummy_prop_val) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve GCPL property value\n"); + PART_ERROR(H5Gget_create_plist); + } + + if (dummy_prop_val != GROUP_PROPERTY_LIST_TEST_DUMMY_VAL) { + H5_FAILED(); + HDprintf(" retrieved GCPL property value '%llu' did not match expected value '%llu'\n", + (unsigned long long)dummy_prop_val, + (unsigned long long)GROUP_PROPERTY_LIST_TEST_DUMMY_VAL); + PART_ERROR(H5Gget_create_plist); + } + + dummy_prop_val = 0; + + if (H5Pget_link_creation_order(gcpl_id2, &dummy_prop_val) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve GCPL property value\n"); + PART_ERROR(H5Gget_create_plist); + } + + if (dummy_prop_val == GROUP_PROPERTY_LIST_TEST_DUMMY_VAL) { + H5_FAILED(); + HDprintf(" retrieved GCPL property value '%llu' matched control value '%llu' when it " + "shouldn't have\n", + (unsigned long long)dummy_prop_val, + (unsigned long long)GROUP_PROPERTY_LIST_TEST_DUMMY_VAL); + PART_ERROR(H5Gget_create_plist); + } + + PASSED(); + } + PART_END(H5Gget_create_plist); + + /* Now see if we can still retrieve copies of the property lists upon opening + * (instead of creating) a group. If they were reconstructed properly upon file + * open, the creation property lists should also have the same test values + * as set before. + */ + if (gcpl_id1 >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(gcpl_id1); + } + H5E_END_TRY; + gcpl_id1 = H5I_INVALID_HID; + } + if (gcpl_id2 >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(gcpl_id2); + } + H5E_END_TRY; + gcpl_id2 = H5I_INVALID_HID; + } + if (group_id1 >= 0) { + H5E_BEGIN_TRY + { + H5Gclose(group_id1); + } + H5E_END_TRY; + group_id1 = H5I_INVALID_HID; + } + if (group_id2 >= 0) { + H5E_BEGIN_TRY + { + H5Gclose(group_id2); + } + H5E_END_TRY; + group_id2 = H5I_INVALID_HID; + } + + PART_BEGIN(H5Gget_create_plist_reopened) + { + TESTING_2("H5Gget_create_plist after re-opening a group"); + + if ((group_id1 = H5Gopen2(container_group, GROUP_PROPERTY_LIST_TEST_GROUP_NAME1, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't open group\n"); + PART_ERROR(H5Gget_create_plist_reopened); + } + + if ((group_id2 = H5Gopen2(container_group, GROUP_PROPERTY_LIST_TEST_GROUP_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't open group\n"); + PART_ERROR(H5Gget_create_plist_reopened); + } + + if ((gcpl_id1 = H5Gget_create_plist(group_id1)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get property list\n"); + PART_ERROR(H5Gget_create_plist_reopened); + } + + if ((gcpl_id2 = H5Gget_create_plist(group_id2)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get property list\n"); + PART_ERROR(H5Gget_create_plist_reopened); + } + + /* Re-check the property values */ + dummy_prop_val = 0; + + if (H5Pget_link_creation_order(gcpl_id1, &dummy_prop_val) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve GCPL property value\n"); + PART_ERROR(H5Gget_create_plist_reopened); + } + + if (dummy_prop_val != GROUP_PROPERTY_LIST_TEST_DUMMY_VAL) { + H5_FAILED(); + HDprintf(" retrieved GCPL property value '%llu' did not match expected value '%llu'\n", + (unsigned long long)dummy_prop_val, + (unsigned long long)GROUP_PROPERTY_LIST_TEST_DUMMY_VAL); + PART_ERROR(H5Gget_create_plist_reopened); + } + + dummy_prop_val = 0; + + if (H5Pget_link_creation_order(gcpl_id2, &dummy_prop_val) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve GCPL property value\n"); + PART_ERROR(H5Gget_create_plist_reopened); + } + + if (dummy_prop_val == GROUP_PROPERTY_LIST_TEST_DUMMY_VAL) { + H5_FAILED(); + HDprintf(" retrieved GCPL property value '%llu' matched control value '%llu' when it " + "shouldn't have\n", + (unsigned long long)dummy_prop_val, + (unsigned long long)GROUP_PROPERTY_LIST_TEST_DUMMY_VAL); + PART_ERROR(H5Gget_create_plist_reopened); + } + + PASSED(); + } + PART_END(H5Gget_create_plist_reopened); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(gcpl_id1) < 0) + TEST_ERROR; + if (H5Pclose(gcpl_id2) < 0) + TEST_ERROR; + if (H5Gclose(group_id1) < 0) + TEST_ERROR; + if (H5Gclose(group_id2) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(gcpl_id1); + H5Pclose(gcpl_id2); + H5Gclose(group_id1); + H5Gclose(group_id2); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test for the functionality of H5Gget_info(_by_idx). + */ +static int +test_get_group_info(void) +{ + H5G_info_t group_info; + unsigned i; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t parent_group_id = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + char group_name[NAME_BUF_SIZE]; + + TESTING_MULTIPART("retrieval of group info"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_MORE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file or group, creation order aren't supported with this " + "connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, GROUP_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create a GCPL\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) { + H5_FAILED(); + HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n"); + goto error; + } + + if ((parent_group_id = H5Gcreate2(container_group, GROUP_GET_INFO_TEST_GROUP_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", GROUP_GET_INFO_TEST_GROUP_NAME); + goto error; + } + + /* Create multiple groups under the parent group */ + for (i = 0; i < GROUP_GET_INFO_TEST_GROUP_NUMB; i++) { + /* Create the groups with a reverse-ordering naming scheme to test creation order */ + HDsnprintf(group_name, NAME_BUF_SIZE, "group %02u", + (unsigned)(GROUP_GET_INFO_TEST_GROUP_NUMB - i - 1)); + + if ((group_id = H5Gcreate2(parent_group_id, group_name, H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", group_name); + goto error; + } + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Gget_info) + { + TESTING_2("retrieval of group info with H5Gget_info"); + + memset(&group_info, 0, sizeof(group_info)); + + /* Retrieve information about the parent group */ + if (H5Gget_info(parent_group_id, &group_info) < 0) { + H5_FAILED(); + HDprintf(" couldn't get group info\n"); + PART_ERROR(H5Gget_info); + } + + if (group_info.nlinks != GROUP_GET_INFO_TEST_GROUP_NUMB) { + H5_FAILED(); + HDprintf(" group's number of links '%lu' doesn't match expected value '%u'\n", + group_info.nlinks, (unsigned int)GROUP_GET_INFO_TEST_GROUP_NUMB); + PART_ERROR(H5Gget_info); + } + + /* + * For the purpose of this test, the max creation order should match + * the number of links in the group. + */ + if (group_info.max_corder != GROUP_GET_INFO_TEST_GROUP_NUMB) { + H5_FAILED(); + HDprintf(" group's max creation order '%lld' doesn't match expected value '%lld'\n", + (long long)group_info.max_corder, (long long)GROUP_GET_INFO_TEST_GROUP_NUMB); + PART_ERROR(H5Gget_info); + } + + /* Ensure that the storage_type field is at least set to a meaningful value */ + if (group_info.storage_type != H5G_STORAGE_TYPE_SYMBOL_TABLE && + group_info.storage_type != H5G_STORAGE_TYPE_COMPACT && + group_info.storage_type != H5G_STORAGE_TYPE_DENSE && + group_info.storage_type != H5G_STORAGE_TYPE_UNKNOWN) { + H5_FAILED(); + HDprintf(" group info's 'storage_type' field wasn't set to a meaningful value\n"); + PART_ERROR(H5Gget_info); + } + + /* Assume that mounted should be FALSE in this case */ + if (group_info.mounted != FALSE) { + H5_FAILED(); + HDprintf(" group info's 'mounted' field was TRUE when it should have been FALSE\n"); + PART_ERROR(H5Gget_info); + } + + PASSED(); + } + PART_END(H5Gget_info); + + PART_BEGIN(H5Gget_info_by_name) + { + TESTING_2("retrieval of group info with H5Gget_info_by_name"); + + memset(&group_info, 0, sizeof(group_info)); + + /* Retrieve information about the parent group */ + if (H5Gget_info_by_name(container_group, GROUP_GET_INFO_TEST_GROUP_NAME, &group_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get group info by name\n"); + PART_ERROR(H5Gget_info_by_name); + } + + if (group_info.nlinks != GROUP_GET_INFO_TEST_GROUP_NUMB) { + H5_FAILED(); + HDprintf(" group's number of links '%lu' doesn't match expected value '%u'\n", + group_info.nlinks, (unsigned int)GROUP_GET_INFO_TEST_GROUP_NUMB); + PART_ERROR(H5Gget_info_by_name); + } + + /* + * For the purpose of this test, the max creation order should match + * the number of links in the group. + */ + if (group_info.max_corder != GROUP_GET_INFO_TEST_GROUP_NUMB) { + H5_FAILED(); + HDprintf(" group's max creation order '%lld' doesn't match expected value '%lld'\n", + (long long)group_info.max_corder, (long long)GROUP_GET_INFO_TEST_GROUP_NUMB); + PART_ERROR(H5Gget_info_by_name); + } + + /* Ensure that the storage_type field is at least set to a meaningful value */ + if (group_info.storage_type != H5G_STORAGE_TYPE_SYMBOL_TABLE && + group_info.storage_type != H5G_STORAGE_TYPE_COMPACT && + group_info.storage_type != H5G_STORAGE_TYPE_DENSE && + group_info.storage_type != H5G_STORAGE_TYPE_UNKNOWN) { + H5_FAILED(); + HDprintf(" group info's 'storage_type' field wasn't set to a meaningful value\n"); + PART_ERROR(H5Gget_info_by_name); + } + + /* Assume that mounted should be FALSE in this case */ + if (group_info.mounted != FALSE) { + H5_FAILED(); + HDprintf(" group info's 'mounted' field was TRUE when it should have been FALSE\n"); + PART_ERROR(H5Gget_info_by_name); + } + + PASSED(); + } + PART_END(H5Gget_info_by_name); + + PART_BEGIN(H5Gget_info_by_idx_crt_order_increasing) + { + TESTING_2("H5Gget_info_by_idx by creation order in increasing order"); + + for (i = 0; i < GROUP_GET_INFO_TEST_GROUP_NUMB; i++) { + memset(&group_info, 0, sizeof(group_info)); + + /* Retrieve information about each group under the parent group */ + if (H5Gget_info_by_idx(container_group, GROUP_GET_INFO_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER, + H5_ITER_INC, (hsize_t)i, &group_info, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get group info for group at index %u\n", i); + PART_ERROR(H5Gget_info_by_idx_crt_order_increasing); + } + + if (group_info.nlinks != 0) { + H5_FAILED(); + HDprintf(" group's number of links '%lu' doesn't match expected value '%d'\n", + group_info.nlinks, 0); + PART_ERROR(H5Gget_info_by_idx_crt_order_increasing); + } + + if (group_info.max_corder != 0) { + H5_FAILED(); + HDprintf(" group's max creation order '%lld' doesn't match expected value '%d'\n", + (long long)group_info.max_corder, 0); + PART_ERROR(H5Gget_info_by_idx_crt_order_increasing); + } + + /* Ensure that the storage_type field is at least set to a meaningful value */ + if (group_info.storage_type != H5G_STORAGE_TYPE_SYMBOL_TABLE && + group_info.storage_type != H5G_STORAGE_TYPE_COMPACT && + group_info.storage_type != H5G_STORAGE_TYPE_DENSE && + group_info.storage_type != H5G_STORAGE_TYPE_UNKNOWN) { + H5_FAILED(); + HDprintf(" group info's 'storage_type' field wasn't set to a meaningful value\n"); + PART_ERROR(H5Gget_info_by_idx_crt_order_increasing); + } + + /* Assume that mounted should be FALSE in this case */ + if (group_info.mounted != FALSE) { + H5_FAILED(); + HDprintf(" group info's 'mounted' field was TRUE when it should have been FALSE\n"); + PART_ERROR(H5Gget_info_by_idx_crt_order_increasing); + } + } + + PASSED(); + } + PART_END(H5Gget_info_by_idx_crt_order_increasing); + + PART_BEGIN(H5Gget_info_by_idx_crt_order_decreasing) + { + TESTING_2("H5Gget_info_by_idx by creation order in decreasing order"); + + for (i = 0; i < GROUP_GET_INFO_TEST_GROUP_NUMB; i++) { + memset(&group_info, 0, sizeof(group_info)); + + /* Retrieve information about each group under the parent group */ + if (H5Gget_info_by_idx(container_group, GROUP_GET_INFO_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER, + H5_ITER_DEC, (hsize_t)i, &group_info, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get group info for group at index %u\n", i); + PART_ERROR(H5Gget_info_by_idx_crt_order_decreasing); + } + + if (group_info.nlinks != 0) { + H5_FAILED(); + HDprintf(" group's number of links '%lu' doesn't match expected value '%d'\n", + group_info.nlinks, 0); + PART_ERROR(H5Gget_info_by_idx_crt_order_decreasing); + } + + if (group_info.max_corder != 0) { + H5_FAILED(); + HDprintf(" group's max creation order '%lld' doesn't match expected value '%d'\n", + (long long)group_info.max_corder, 0); + PART_ERROR(H5Gget_info_by_idx_crt_order_decreasing); + } + + /* Ensure that the storage_type field is at least set to a meaningful value */ + if (group_info.storage_type != H5G_STORAGE_TYPE_SYMBOL_TABLE && + group_info.storage_type != H5G_STORAGE_TYPE_COMPACT && + group_info.storage_type != H5G_STORAGE_TYPE_DENSE && + group_info.storage_type != H5G_STORAGE_TYPE_UNKNOWN) { + H5_FAILED(); + HDprintf(" group info's 'storage_type' field wasn't set to a meaningful value\n"); + PART_ERROR(H5Gget_info_by_idx_crt_order_decreasing); + } + + /* Assume that mounted should be FALSE in this case */ + if (group_info.mounted != FALSE) { + H5_FAILED(); + HDprintf(" group info's 'mounted' field was TRUE when it should have been FALSE\n"); + PART_ERROR(H5Gget_info_by_idx_crt_order_decreasing); + } + } + + PASSED(); + } + PART_END(H5Gget_info_by_idx_crt_order_decreasing); + + PART_BEGIN(H5Gget_info_by_idx_name_order_increasing) + { + TESTING_2("H5Gget_info_by_idx by alphabetical order in increasing order"); + + for (i = 0; i < GROUP_GET_INFO_TEST_GROUP_NUMB; i++) { + memset(&group_info, 0, sizeof(group_info)); + + /* Retrieve information about each group under the parent group */ + if (H5Gget_info_by_idx(container_group, GROUP_GET_INFO_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_INC, (hsize_t)i, &group_info, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get group info for group at index %u\n", i); + PART_ERROR(H5Gget_info_by_idx_name_order_increasing); + } + + if (group_info.nlinks != 0) { + H5_FAILED(); + HDprintf(" group's number of links '%lu' doesn't match expected value '%d'\n", + group_info.nlinks, 0); + PART_ERROR(H5Gget_info_by_idx_name_order_increasing); + } + + if (group_info.max_corder != 0) { + H5_FAILED(); + HDprintf(" group's max creation order '%lld' doesn't match expected value '%d'\n", + (long long)group_info.max_corder, 0); + PART_ERROR(H5Gget_info_by_idx_name_order_increasing); + } + + /* Ensure that the storage_type field is at least set to a meaningful value */ + if (group_info.storage_type != H5G_STORAGE_TYPE_SYMBOL_TABLE && + group_info.storage_type != H5G_STORAGE_TYPE_COMPACT && + group_info.storage_type != H5G_STORAGE_TYPE_DENSE && + group_info.storage_type != H5G_STORAGE_TYPE_UNKNOWN) { + H5_FAILED(); + HDprintf(" group info's 'storage_type' field wasn't set to a meaningful value\n"); + PART_ERROR(H5Gget_info_by_idx_name_order_increasing); + } + + /* Assume that mounted should be FALSE in this case */ + if (group_info.mounted != FALSE) { + H5_FAILED(); + HDprintf(" group info's 'mounted' field was TRUE when it should have been FALSE\n"); + PART_ERROR(H5Gget_info_by_idx_name_order_increasing); + } + } + + PASSED(); + } + PART_END(H5Gget_info_by_idx_name_order_increasing); + + PART_BEGIN(H5Gget_info_by_idx_name_order_decreasing) + { + TESTING_2("H5Gget_info_by_idx by alphabetical order in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + for (i = 0; i < GROUP_GET_INFO_TEST_GROUP_NUMB; i++) { + memset(&group_info, 0, sizeof(group_info)); + + /* Retrieve information about each group under the parent group */ + if (H5Gget_info_by_idx(container_group, GROUP_GET_INFO_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_DEC, (hsize_t)i, &group_info, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get group info for group at index %u\n", i); + PART_ERROR(H5Gget_info_by_idx_name_order_decreasing); + } + + if (group_info.nlinks != 0) { + H5_FAILED(); + HDprintf(" group's number of links '%lld' doesn't match expected value '%lld'\n", + group_info.nlinks, 0); + PART_ERROR(H5Gget_info_by_idx_name_order_decreasing); + } + + if (group_info.max_corder != 0) { + H5_FAILED(); + HDprintf(" group's max creation order '%lld' doesn't match expected value '%lld'\n", + (long long)group_info.max_corder, 0); + PART_ERROR(H5Gget_info_by_idx_name_order_decreasing); + } + + /* Ensure that the storage_type field is at least set to a meaningful value */ + if (group_info.storage_type != H5G_STORAGE_TYPE_SYMBOL_TABLE && + group_info.storage_type != H5G_STORAGE_TYPE_COMPACT && + group_info.storage_type != H5G_STORAGE_TYPE_DENSE && + group_info.storage_type != H5G_STORAGE_TYPE_UNKNOWN) { + H5_FAILED(); + HDprintf(" group info's 'storage_type' field wasn't set to a meaningful value\n"); + PART_ERROR(H5Gget_info_by_idx_name_order_decreasing); + } + + /* Assume that mounted should be FALSE in this case */ + if (group_info.mounted != FALSE) { + H5_FAILED(); + HDprintf(" group info's 'mounted' field was TRUE when it should have been FALSE\n"); + PART_ERROR(H5Gget_info_by_idx_name_order_decreasing); + } + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Gget_info_by_idx_name_order_decreasing); +#endif + } + PART_END(H5Gget_info_by_idx_name_order_decreasing); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(parent_group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(gcpl_id); + H5Gclose(parent_group_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a group's info can't be retrieved when + * H5Gget_info(_by_name/_by_idx) is passed invalid parameters. + */ +static int +test_get_group_info_invalid_params(void) +{ + H5G_info_t group_info; + herr_t err_ret = -1; + hid_t file_id = H5I_INVALID_HID; + + TESTING_MULTIPART("retrieval of group info with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, more group, creation order aren't supported with this " + "connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Gget_info_invalid_loc_id) + { + TESTING_2("H5Gget_info with an invalid loc_id"); + + H5E_BEGIN_TRY + { + err_ret = H5Gget_info(H5I_INVALID_HID, &group_info); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved info of group using H5Gget_info with an invalid loc_id!\n"); + PART_ERROR(H5Gget_info_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Gget_info_invalid_loc_id); + + PART_BEGIN(H5Gget_info_invalid_grp_info_pointer) + { + TESTING_2("H5Gget_info with an invalid group info pointer"); + + H5E_BEGIN_TRY + { + err_ret = H5Gget_info(file_id, NULL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved info of group using H5Gget_info with invalid group info pointer!\n"); + PART_ERROR(H5Gget_info_invalid_grp_info_pointer); + } + + PASSED(); + } + PART_END(H5Gget_info_invalid_grp_info_pointer); + + PART_BEGIN(H5Gget_info_by_name_invalid_loc_id) + { + TESTING_2("H5Gget_info_by_name with an invalid loc_id"); + + H5E_BEGIN_TRY + { + err_ret = H5Gget_info_by_name(H5I_INVALID_HID, ".", &group_info, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved info of group using H5Gget_info_by_name with an invalid loc_id!\n"); + PART_ERROR(H5Gget_info_by_name_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Gget_info_by_name_invalid_loc_id); + + PART_BEGIN(H5Gget_info_by_name_invalid_grp_name) + { + TESTING_2("H5Gget_info_by_name with an invalid group name"); + + H5E_BEGIN_TRY + { + err_ret = H5Gget_info_by_name(file_id, NULL, &group_info, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved info of group using H5Gget_info_by_name with a NULL name!\n"); + PART_ERROR(H5Gget_info_by_name_invalid_grp_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Gget_info_by_name(file_id, "", &group_info, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf( + " retrieved info of group using H5Gget_info_by_name with an invalid name of ''!\n"); + PART_ERROR(H5Gget_info_by_name_invalid_grp_name); + } + + PASSED(); + } + PART_END(H5Gget_info_by_name_invalid_grp_name); + + PART_BEGIN(H5Gget_info_by_name_invalid_grp_info_pointer) + { + TESTING_2("H5Gget_info_by_name with an invalid group info pointer"); + + H5E_BEGIN_TRY + { + err_ret = H5Gget_info_by_name(file_id, ".", NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved info of group using H5Gget_info_by_name with an invalid group info " + "pointer!\n"); + PART_ERROR(H5Gget_info_by_name_invalid_grp_info_pointer); + } + + PASSED(); + } + PART_END(H5Gget_info_by_name_invalid_grp_info_pointer); + + PART_BEGIN(H5Gget_info_by_name_invalid_lapl) + { + TESTING_2("H5Gget_info_by_name with an invalid LAPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Gget_info_by_name(file_id, ".", &group_info, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved info of group using H5Gget_info_by_name with an invalid LAPL!\n"); + PART_ERROR(H5Gget_info_by_name_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Gget_info_by_name_invalid_lapl); + + PART_BEGIN(H5Gget_info_by_idx_invalid_loc_id) + { + TESTING_2("H5Gget_info_by_idx with an invalid loc_id"); + + H5E_BEGIN_TRY + { + err_ret = H5Gget_info_by_idx(H5I_INVALID_HID, ".", H5_INDEX_NAME, H5_ITER_INC, 0, &group_info, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved info of group using H5Gget_info_by_idx with an invalid loc_id!\n"); + PART_ERROR(H5Gget_info_by_idx_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Gget_info_by_idx_invalid_loc_id); + + PART_BEGIN(H5Gget_info_by_idx_invalid_grp_name) + { + TESTING_2("H5Gget_info_by_idx with an invalid group name"); + + H5E_BEGIN_TRY + { + err_ret = H5Gget_info_by_idx(file_id, NULL, H5_INDEX_NAME, H5_ITER_INC, 0, &group_info, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved info of group using H5Gget_info_by_idx with a NULL group name!\n"); + PART_ERROR(H5Gget_info_by_idx_invalid_grp_name); + } + + H5E_BEGIN_TRY + { + err_ret = + H5Gget_info_by_idx(file_id, "", H5_INDEX_NAME, H5_ITER_INC, 0, &group_info, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved info of group using H5Gget_info_by_idx with an invalid group name of " + "''!\n"); + PART_ERROR(H5Gget_info_by_idx_invalid_grp_name); + } + + PASSED(); + } + PART_END(H5Gget_info_by_idx_invalid_grp_name); + + PART_BEGIN(H5Gget_info_by_idx_invalid_index_type) + { + TESTING_2("H5Gget_info_by_idx with an invalid index type"); + + H5E_BEGIN_TRY + { + err_ret = H5Gget_info_by_idx(file_id, ".", H5_INDEX_UNKNOWN, H5_ITER_INC, 0, &group_info, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved info of group using H5Gget_info_by_idx with invalid index type " + "H5_INDEX_UNKNOWN!\n"); + PART_ERROR(H5Gget_info_by_idx_invalid_index_type); + } + + H5E_BEGIN_TRY + { + err_ret = + H5Gget_info_by_idx(file_id, ".", H5_INDEX_N, H5_ITER_INC, 0, &group_info, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved info of group using H5Gget_info_by_idx with invalid index type " + "H5_INDEX_N!\n"); + PART_ERROR(H5Gget_info_by_idx_invalid_index_type); + } + + PASSED(); + } + PART_END(H5Gget_info_by_idx_invalid_index_type); + + PART_BEGIN(H5Gget_info_by_idx_invalid_iter_order) + { + TESTING_2("H5Gget_info_by_idx with an invalid iteration order"); + + H5E_BEGIN_TRY + { + err_ret = H5Gget_info_by_idx(file_id, ".", H5_INDEX_NAME, H5_ITER_UNKNOWN, 0, &group_info, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved info of group using H5Gget_info_by_idx with invalid iteration order " + "H5_ITER_UNKNOWN!\n"); + PART_ERROR(H5Gget_info_by_idx_invalid_iter_order); + } + + H5E_BEGIN_TRY + { + err_ret = + H5Gget_info_by_idx(file_id, ".", H5_INDEX_NAME, H5_ITER_N, 0, &group_info, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved info of group using H5Gget_info_by_idx with invalid iteration order " + "H5_ITER_N!\n"); + PART_ERROR(H5Gget_info_by_idx_invalid_iter_order); + } + + PASSED(); + } + PART_END(H5Gget_info_by_idx_invalid_iter_order); + + PART_BEGIN(H5Gget_info_by_idx_invalid_grp_info_pointer) + { + TESTING_2("H5Gget_info_by_idx with an invalid group info pointer"); + + H5E_BEGIN_TRY + { + err_ret = H5Gget_info_by_idx(file_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved info of group using H5Gget_info_by_idx with an invalid group info " + "pointer!\n"); + PART_ERROR(H5Gget_info_by_idx_invalid_grp_info_pointer); + } + + PASSED(); + } + PART_END(H5Gget_info_by_idx_invalid_grp_info_pointer); + + PART_BEGIN(H5Gget_info_by_idx_invalid_lapl) + { + TESTING_2("H5Gget_info_by_idx with an invalid LAPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Gget_info_by_idx(file_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, &group_info, + H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" retrieved info of group using H5Gget_info_by_idx with an invalid LAPL!\n"); + PART_ERROR(H5Gget_info_by_idx_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Gget_info_by_idx_invalid_lapl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test for H5Gflush. + */ +static int +test_flush_group(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + + TESTING("H5Gflush"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { + SKIPPED(); + HDprintf(" API functions for basic file, more group, creation order aren't supported with this " + "connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, GROUP_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, GROUP_FLUSH_GNAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", GROUP_FLUSH_GNAME); + goto error; + } + + /* Flush the group */ + if (H5Gflush(group_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't flush the group '%s'\n", GROUP_FLUSH_GNAME); + goto error; + } + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that H5Gflush fails when it + * is passed invalid parameters. + */ +static int +test_flush_group_invalid_params(void) +{ + herr_t status; + + TESTING("H5Gflush with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { + SKIPPED(); + HDprintf(" API functions for group flush aren't supported with this connector\n"); + return 0; + } + + H5E_BEGIN_TRY + { + status = H5Gflush(H5I_INVALID_HID); + } + H5E_END_TRY; + + if (status >= 0) { + H5_FAILED(); + HDprintf(" flushed group with invalid ID!\n"); + goto error; + } + + PASSED(); + + return 0; + +error: + return 1; +} + +/* + * A test for H5Grefresh. + */ +static int +test_refresh_group(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + + TESTING("H5Grefresh"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or refresh aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, GROUP_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, GROUP_REFRESH_GNAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", GROUP_REFRESH_GNAME); + goto error; + } + + /* Refresh the group */ + if (H5Grefresh(group_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't refresh the group '%s'\n", GROUP_REFRESH_GNAME); + goto error; + } + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that H5Grefresh fails when it + * is passed invalid parameters. + */ +static int +test_refresh_group_invalid_params(void) +{ + herr_t status; + + TESTING("H5Grefresh with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { + SKIPPED(); + HDprintf(" API functions for group refresh aren't supported with this connector\n"); + return 0; + } + + H5E_BEGIN_TRY + { + status = H5Grefresh(H5I_INVALID_HID); + } + H5E_END_TRY; + + if (status >= 0) { + H5_FAILED(); + HDprintf(" refreshed group with invalid ID!\n"); + goto error; + } + + PASSED(); + + return 0; + +error: + return 1; +} + +int +H5_api_group_test(void) +{ + size_t i; + int nerrors; + + HDprintf("**********************************************\n"); + HDprintf("* *\n"); + HDprintf("* API Group Tests *\n"); + HDprintf("* *\n"); + HDprintf("**********************************************\n\n"); + + for (i = 0, nerrors = 0; i < ARRAY_LENGTH(group_tests); i++) { + nerrors += (*group_tests[i])() ? 1 : 0; + } + + HDprintf("\n"); + + return nerrors; +} diff --git a/test/API/H5_api_group_test.h b/test/API/H5_api_group_test.h new file mode 100644 index 00000000000..baf14c81f45 --- /dev/null +++ b/test/API/H5_api_group_test.h @@ -0,0 +1,65 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#ifndef H5_API_GROUP_TEST_H +#define H5_API_GROUP_TEST_H + +#include "H5_api_test.h" + +int H5_api_group_test(void); + +/********************************************** + * * + * API Group test defines * + * * + **********************************************/ + +#define GROUP_CREATE_UNDER_ROOT_GNAME "/group_under_root" + +#define GROUP_CREATE_UNDER_GROUP_REL_GNAME "child_group" +#define GROUP_CREATE_UNDER_GROUP_ABS_GNAME "child_group/grandchild_group" + +#define GROUP_CREATE_INVALID_PARAMS_GROUP_NAME "/invalid_params_group" + +#define GROUP_CREATE_ANONYMOUS_GROUP_NAME "anon_group" + +#define GROUP_CREATE_INTMD_REL_INTMD_NAME "rel_intmd" +#define GROUP_CREATE_INTMD_REL_END_NAME "rel_end" +#define GROUP_CREATE_INTMD_ABS_INTMD_NAME "abs_intmd" +#define GROUP_CREATE_INTMD_ABS_END_NAME "abs_end" +#define GROUP_CREATE_INTMD_MULT_INTMD1_NAME "mult_intmd1" +#define GROUP_CREATE_INTMD_MULT_INTMD2_NAME "mult_intmd2" +#define GROUP_CREATE_INTMD_MULT_END_NAME "mult_end" + +#define OPEN_NONEXISTENT_GROUP_TEST_GNAME "/nonexistent_group" + +#define GROUP_PROPERTY_LIST_TEST_GROUP_NAME1 "property_list_test_group1" +#define GROUP_PROPERTY_LIST_TEST_GROUP_NAME2 "property_list_test_group2" +#define GROUP_PROPERTY_LIST_TEST_DUMMY_VAL H5P_CRT_ORDER_TRACKED + +#define GROUP_GET_INFO_TEST_GROUP_NAME "group_info_test" +#define GROUP_GET_INFO_TEST_GROUP_NUMB 16 + +#define GROUP_FLUSH_GNAME "group_flush_test" + +#define GROUP_REFRESH_GNAME "group_refresh_test" + +#define NAME_BUF_SIZE 64 +#define GROUP_NUMB 16 + +#define MANY_GROUP_CREATIONS_GNAME "home_for_many_groups" +#define GROUP_NUMB_MANY 100u + +#define DEEP_GROUP_CREATIONS_GNAME "home_for_deep_groups" +#define GROUP_DEPTH 100u + +#endif diff --git a/test/API/H5_api_link_test.c b/test/API/H5_api_link_test.c new file mode 100644 index 00000000000..9a8c65a4c73 --- /dev/null +++ b/test/API/H5_api_link_test.c @@ -0,0 +1,27072 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#include "H5_api_link_test.h" + +/* + * TODO: add link tests for short-circuit success in operator callback + */ + +static int test_create_hard_link(void); +static int test_create_hard_link_long_name(void); +static int test_create_hard_link_many(void); +static int test_create_hard_link_same_loc(void); +static int test_create_hard_link_invalid_params(void); +static int test_create_soft_link_existing_relative(void); +static int test_create_soft_link_existing_absolute(void); +static int test_create_soft_link_dangling_relative(void); +static int test_create_soft_link_dangling_absolute(void); +static int test_create_soft_link_long_name(void); +static int test_create_soft_link_many(void); +static int test_create_soft_link_invalid_params(void); +static int test_create_external_link(void); +static int test_create_external_link_dangling(void); +static int test_create_external_link_multi(void); +static int test_create_external_link_ping_pong(void); +static int test_create_external_link_invalid_params(void); +static int test_create_user_defined_link(void); +static int test_create_user_defined_link_invalid_params(void); +static int test_delete_link(void); +static int test_delete_link_reset_grp_max_crt_order(void); +static int test_delete_link_invalid_params(void); +static int test_copy_link(void); +static int test_copy_links_into_group_with_links(void); +static int test_copy_link_across_files(void); +static int test_copy_link_invalid_params(void); +static int test_move_link(void); +static int test_move_links_into_group_with_links(void); +static int test_move_link_across_files(void); +static int test_move_link_reset_grp_max_crt_order(void); +static int test_move_link_invalid_params(void); +static int test_get_link_val(void); +static int test_get_link_val_invalid_params(void); +static int test_get_link_info(void); +static int test_get_link_info_invalid_params(void); +static int test_get_link_name(void); +static int test_get_link_name_invalid_params(void); +static int test_link_iterate_hard_links(void); +static int test_link_iterate_soft_links(void); +static int test_link_iterate_external_links(void); +static int test_link_iterate_ud_links(void); +static int test_link_iterate_mixed_links(void); +static int test_link_iterate_invalid_params(void); +static int test_link_iterate_0_links(void); +static int test_link_visit_hard_links_no_cycles(void); +static int test_link_visit_soft_links_no_cycles(void); +static int test_link_visit_external_links_no_cycles(void); +static int test_link_visit_ud_links_no_cycles(void); +static int test_link_visit_mixed_links_no_cycles(void); +static int test_link_visit_hard_links_cycles(void); +static int test_link_visit_soft_links_cycles(void); +static int test_link_visit_external_links_cycles(void); +static int test_link_visit_ud_links_cycles(void); +static int test_link_visit_mixed_links_cycles(void); +static int test_link_visit_invalid_params(void); +static int test_link_visit_0_links(void); + +static herr_t link_iter_hard_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info, + void *op_data); +static herr_t link_iter_soft_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info, + void *op_data); +#ifndef NO_EXTERNAL_LINKS +static herr_t link_iter_external_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info, + void *op_data); +#endif +#ifndef NO_USER_DEFINED_LINKS +static herr_t link_iter_ud_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data); +#endif +#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS) +static herr_t link_iter_mixed_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info, + void *op_data); +#endif +static herr_t link_iter_invalid_params_cb(hid_t group_id, const char *name, const H5L_info2_t *info, + void *op_data); +static herr_t link_iter_0_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data); +#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS) +static herr_t link_iter_idx_saving_cb(hid_t group_id, const char *name, const H5L_info2_t *info, + void *op_data); +#endif + +static herr_t link_visit_hard_links_no_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info, + void *op_data); +static herr_t link_visit_soft_links_no_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info, + void *op_data); +#ifndef NO_EXTERNAL_LINKS +static herr_t link_visit_external_links_no_cycles_cb(hid_t group_id, const char *name, + const H5L_info2_t *info, void *op_data); +#endif +#ifndef NO_USER_DEFINED_LINKS +static herr_t link_visit_ud_links_no_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info, + void *op_data); +#endif +#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS) +static herr_t link_visit_mixed_links_no_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info, + void *op_data); +#endif +static herr_t link_visit_hard_links_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info, + void *op_data); +static herr_t link_visit_soft_links_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info, + void *op_data); +#ifndef NO_EXTERNAL_LINKS +static herr_t link_visit_external_links_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info, + void *op_data); +#endif +#ifndef NO_USER_DEFINED_LINKS +static herr_t link_visit_ud_links_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info, + void *op_data); +#endif +#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS) +static herr_t link_visit_mixed_links_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info, + void *op_data); +#endif +static herr_t link_visit_invalid_params_cb(hid_t group_id, const char *name, const H5L_info2_t *info, + void *op_data); +static herr_t link_visit_0_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data); + +/* + * The array of link tests to be performed. + */ +static int (*link_tests[])(void) = { + test_create_hard_link, + test_create_hard_link_long_name, + test_create_hard_link_many, + test_create_hard_link_same_loc, + test_create_hard_link_invalid_params, + test_create_soft_link_existing_relative, + test_create_soft_link_existing_absolute, + test_create_soft_link_dangling_relative, + test_create_soft_link_dangling_absolute, + test_create_soft_link_long_name, + test_create_soft_link_many, + test_create_soft_link_invalid_params, + test_create_external_link, + test_create_external_link_dangling, + test_create_external_link_multi, + test_create_external_link_ping_pong, + test_create_external_link_invalid_params, + test_create_user_defined_link, + test_create_user_defined_link_invalid_params, + test_delete_link, + test_delete_link_reset_grp_max_crt_order, + test_delete_link_invalid_params, + test_copy_link, + test_copy_links_into_group_with_links, + test_copy_link_across_files, + test_copy_link_invalid_params, + test_move_link, + test_move_links_into_group_with_links, + test_move_link_across_files, + test_move_link_reset_grp_max_crt_order, + test_move_link_invalid_params, + test_get_link_val, + test_get_link_val_invalid_params, + test_get_link_info, + test_get_link_info_invalid_params, + test_get_link_name, + test_get_link_name_invalid_params, + test_link_iterate_hard_links, + test_link_iterate_soft_links, + test_link_iterate_external_links, + test_link_iterate_ud_links, + test_link_iterate_mixed_links, + test_link_iterate_invalid_params, + test_link_iterate_0_links, + test_link_visit_hard_links_no_cycles, + test_link_visit_soft_links_no_cycles, + test_link_visit_external_links_no_cycles, + test_link_visit_ud_links_no_cycles, + test_link_visit_mixed_links_no_cycles, + test_link_visit_hard_links_cycles, + test_link_visit_soft_links_cycles, + test_link_visit_external_links_cycles, + test_link_visit_ud_links_cycles, + test_link_visit_mixed_links_cycles, + test_link_visit_invalid_params, + test_link_visit_0_links, +}; + +/* + * A test to check that a hard link can be created + * using H5Lcreate_hard. + */ +static int +test_create_hard_link(void) +{ + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + + TESTING("hard link creation"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file or group, basic or hard link aren't supported with this " + "connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, HARD_LINK_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", HARD_LINK_TEST_GROUP_NAME); + goto error; + } + + if (H5Lcreate_hard(file_id, "/", group_id, HARD_LINK_TEST_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", HARD_LINK_TEST_LINK_NAME); + goto error; + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(group_id, HARD_LINK_TEST_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", HARD_LINK_TEST_LINK_NAME); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link did not exist\n"); + goto error; + } + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a hard link with a long name can be created + * using H5Lcreate_hard. + */ +static int +test_create_hard_link_long_name(void) +{ + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + char vol_name[5]; + size_t name_len = MAX_NAME_LEN; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + char *objname = NULL; /* Name of object [Long] */ + size_t u; /* Local index variable */ + + TESTING("hard link creation with a long name"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file or group, basic or hard link aren't supported with this " + "connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, HARD_LINK_TEST_GROUP_LONG_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", HARD_LINK_TEST_GROUP_NAME); + goto error; + } + + if (H5VLget_connector_name(file_id, vol_name, 5) < 0) { + H5_FAILED(); + HDprintf(" couldn't get VOL connector name\n"); + goto error; + } + + /** for DAOS VOL, max link name supported is 99 (Lexical key) */ + if (strcmp(vol_name, "daos") == 0) + name_len = 99; + + /* Construct very long file name */ + if ((objname = (char *)HDmalloc((size_t)(name_len + 1))) == NULL) + TEST_ERROR; + + for (u = 0; u < name_len; u++) + objname[u] = 'a'; + objname[name_len] = '\0'; + + if (H5Lcreate_hard(file_id, "/", group_id, objname, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link with a long name\n"); + goto error; + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(group_id, objname, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if the link with a long name exists\n"); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link did not exist\n"); + goto error; + } + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + /* Release memory */ + if (objname) + HDfree(objname); + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + if (objname) + HDfree(objname); + + return 1; +} + +/* + * A test to check that many hard links can be created + * using H5Lcreate_hard. + */ +static int +test_create_hard_link_many(void) +{ + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID, group_id2 = H5I_INVALID_HID; +#ifndef NO_OBJECT_GET_NAME + char objname[HARD_LINK_TEST_GROUP_MANY_NAME_BUF_SIZE]; /* Object name */ +#endif + + TESTING("hard link creation of many links"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS)) { + SKIPPED(); + HDprintf( + " API functions for basic file or group, or hard link aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, HARD_LINK_TEST_GROUP_MANY_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", HARD_LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id2 = H5Gcreate2(group_id, HARD_LINK_TEST_GROUP_MANY_FINAL_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", HARD_LINK_TEST_GROUP_MANY_FINAL_NAME); + goto error; + } + + if (H5Lcreate_hard(group_id, HARD_LINK_TEST_GROUP_MANY_FINAL_NAME, group_id, "hard1", H5P_DEFAULT, + H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_hard(group_id, "hard1", group_id, "hard2", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_hard(group_id, "hard2", group_id, "hard3", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_hard(group_id, "hard3", group_id, "hard4", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_hard(group_id, "hard4", group_id, "hard5", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_hard(group_id, "hard5", group_id, "hard6", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_hard(group_id, "hard6", group_id, "hard7", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_hard(group_id, "hard7", group_id, "hard8", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_hard(group_id, "hard8", group_id, "hard9", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_hard(group_id, "hard9", group_id, "hard10", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_hard(group_id, "hard10", group_id, "hard11", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_hard(group_id, "hard11", group_id, "hard12", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_hard(group_id, "hard12", group_id, "hard13", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_hard(group_id, "hard13", group_id, "hard14", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_hard(group_id, "hard14", group_id, "hard15", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_hard(group_id, "hard15", group_id, "hard16", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_hard(group_id, "hard16", group_id, "hard17", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_hard(group_id, "hard17", group_id, "hard18", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_hard(group_id, "hard18", group_id, "hard19", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_hard(group_id, "hard19", group_id, "hard20", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_hard(group_id, "hard20", group_id, "hard21", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(group_id, "hard21", H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link 'hard21' exists\n"); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link 'hard21' did not exist\n"); + goto error; + } + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id2) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + /* Reopen the file and group and verify the hard link */ + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gopen2(container_group, HARD_LINK_TEST_GROUP_MANY_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container sub-group '%s'\n", HARD_LINK_TEST_GROUP_NAME); + goto error; + } + + /* Open the object through last hard link */ + if ((group_id2 = H5Gopen2(group_id, "hard21", H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open the group '%s' with the last hard link 'hard21'\n", + HARD_LINK_TEST_GROUP_MANY_FINAL_NAME); + goto error; + } +#ifndef NO_OBJECT_GET_NAME + /* Check name */ + if (H5Iget_name(group_id2, objname, (size_t)HARD_LINK_TEST_GROUP_MANY_NAME_BUF_SIZE) < 0) { + H5_FAILED(); + HDprintf(" couldn't get the name of the object '%s'\n", HARD_LINK_TEST_GROUP_MANY_FINAL_NAME); + goto error; + } + + if (HDstrcmp(objname, "/" LINK_TEST_GROUP_NAME "/" HARD_LINK_TEST_GROUP_MANY_NAME "/hard21")) { + H5_FAILED(); + HDprintf(" wrong name of the object '%s'\n", objname); + goto error; + } +#endif + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id2) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Gclose(group_id2); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that behavior is correct when using + * the H5L_SAME_LOC macro for H5Lcreate_hard(). + */ +static int +test_create_hard_link_same_loc(void) +{ + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + + TESTING_MULTIPART("hard link creation with H5L_SAME_LOC"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file or group, basic or hard link aren't supported with this " + "connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, H5L_SAME_LOC_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", H5L_SAME_LOC_TEST_GROUP_NAME); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5L_SAME_LOC_first_param) + { + TESTING_2("usage of H5L_SAME_LOC for first parameter of H5Lcreate_hard"); + + /* Library functionality for this part of the test is broken */ + if (H5Lcreate_hard(H5L_SAME_LOC, ".", group_id, H5L_SAME_LOC_TEST_LINK_NAME1, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create first link '%s'\n", H5L_SAME_LOC_TEST_LINK_NAME1); + PART_ERROR(H5L_SAME_LOC_first_param); + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(group_id, H5L_SAME_LOC_TEST_LINK_NAME1, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link exists\n"); + PART_ERROR(H5L_SAME_LOC_first_param); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link did not exist\n"); + PART_ERROR(H5L_SAME_LOC_first_param); + } + + PASSED(); + } + PART_END(H5L_SAME_LOC_first_param); + + PART_BEGIN(H5L_SAME_LOC_third_param) + { + TESTING_2("usage of H5L_SAME_LOC for third parameter of H5Lcreate_hard"); + + if (H5Lcreate_hard(group_id, ".", H5L_SAME_LOC, H5L_SAME_LOC_TEST_LINK_NAME2, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create second link '%s'\n", H5L_SAME_LOC_TEST_LINK_NAME2); + PART_ERROR(H5L_SAME_LOC_third_param); + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(group_id, H5L_SAME_LOC_TEST_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", H5L_SAME_LOC_TEST_LINK_NAME2); + PART_ERROR(H5L_SAME_LOC_third_param); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link did not exist\n"); + PART_ERROR(H5L_SAME_LOC_third_param); + } + + PASSED(); + } + PART_END(H5L_SAME_LOC_third_param); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a hard link can't be created when + * H5Lcreate_hard is passed invalid parameters. + */ +static int +test_create_hard_link_invalid_params(void) +{ + herr_t err_ret = -1; + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; +#ifndef NO_PREVENT_HARD_LINKS_ACROSS_FILES + char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH]; +#endif + hid_t ext_file_id = H5I_INVALID_HID; + + TESTING_MULTIPART("hard link creation with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file or group, basic or hard link aren't supported with this " + "connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, HARD_LINK_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", HARD_LINK_INVALID_PARAMS_TEST_GROUP_NAME); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Lcreate_hard_invalid_cur_loc_id) + { + TESTING_2("H5Lcreate_hard with an invalid cur_loc_id"); + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_hard(H5I_INVALID_HID, "/", group_id, + HARD_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created hard link with an invalid cur_loc_id!\n"); + PART_ERROR(H5Lcreate_hard_invalid_cur_loc_id); + } + + PASSED(); + } + PART_END(H5Lcreate_hard_invalid_cur_loc_id); + + PART_BEGIN(H5Lcreate_hard_invalid_cur_name) + { + TESTING_2("H5Lcreate_hard with an invalid cur_name"); + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_hard(file_id, NULL, group_id, HARD_LINK_INVALID_PARAMS_TEST_LINK_NAME, + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created hard link with a NULL cur_name!\n"); + PART_ERROR(H5Lcreate_hard_invalid_cur_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_hard(file_id, "", group_id, HARD_LINK_INVALID_PARAMS_TEST_LINK_NAME, + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created hard link with an invalid cur_name of ''!\n"); + PART_ERROR(H5Lcreate_hard_invalid_cur_name); + } + + PASSED(); + } + PART_END(H5Lcreate_hard_invalid_cur_name); + + PART_BEGIN(H5Lcreate_hard_invalid_new_loc_id) + { + TESTING_2("H5Lcreate_hard with an invalid new_loc_id"); + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_hard(file_id, "/", H5I_INVALID_HID, + HARD_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created hard link with an invalid new_loc_id!\n"); + PART_ERROR(H5Lcreate_hard_invalid_new_loc_id); + } + + PASSED(); + } + PART_END(H5Lcreate_hard_invalid_new_loc_id); + + PART_BEGIN(H5Lcreate_hard_invalid_new_name) + { + TESTING_2("H5Lcreate_hard with an invalid new_name"); + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_hard(file_id, "/", group_id, NULL, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created hard link with a NULL new_name!\n"); + PART_ERROR(H5Lcreate_hard_invalid_new_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_hard(file_id, "/", group_id, "", H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created hard link with an invalid new_name of ''!\n"); + PART_ERROR(H5Lcreate_hard_invalid_new_name); + } + + PASSED(); + } + PART_END(H5Lcreate_hard_invalid_new_name); + + PART_BEGIN(H5Lcreate_hard_invalid_lcpl) + { + TESTING_2("H5Lcreate_hard with an invalid LCPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_hard(file_id, "/", group_id, HARD_LINK_INVALID_PARAMS_TEST_LINK_NAME, + H5I_INVALID_HID, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created hard link with an invalid LCPL!\n"); + PART_ERROR(H5Lcreate_hard_invalid_lcpl); + } + + PASSED(); + } + PART_END(H5Lcreate_hard_invalid_lcpl); + + PART_BEGIN(H5Lcreate_hard_invalid_lapl) + { + TESTING_2("H5Lcreate_hard with an invalid LAPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_hard(file_id, "/", group_id, HARD_LINK_INVALID_PARAMS_TEST_LINK_NAME, + H5P_DEFAULT, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created hard link with an invalid LAPL!\n"); + PART_ERROR(H5Lcreate_hard_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Lcreate_hard_invalid_lapl); + + PART_BEGIN(H5Lcreate_hard_invalid_same_loc) + { + TESTING_2("H5Lcreate_hard with the invalid same location"); + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_hard(H5L_SAME_LOC, "/", H5L_SAME_LOC, + HARD_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created hard link with the invalid same location!\n"); + PART_ERROR(H5Lcreate_hard_invalid_same_loc); + } + + PASSED(); + } + PART_END(H5Lcreate_hard_invalid_same_loc); + + PART_BEGIN(H5Lcreate_hard_across_files) + { + TESTING_2("H5Lcreate_hard across files"); +#ifndef NO_PREVENT_HARD_LINKS_ACROSS_FILES + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Lcreate_hard_across_files); + } + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_hard(file_id, "/", ext_file_id, HARD_LINK_INVALID_PARAMS_TEST_LINK_NAME, + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created hard link across files!\n"); + PART_ERROR(H5Lcreate_hard_across_files); + } + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_hard(ext_file_id, "/", group_id, HARD_LINK_INVALID_PARAMS_TEST_LINK_NAME, + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created hard link across files!\n"); + PART_ERROR(H5Lcreate_hard_across_files); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lcreate_hard_across_files); +#endif + } + PART_END(H5Lcreate_hard_across_files); + + PART_BEGIN(H5Lcreate_hard_invalid_existence) + { + TESTING_2("invalid link existence after previous invalid H5Lcreate_hard calls"); + + /* Verify the link hasn't been created */ + if ((link_exists = H5Lexists(group_id, HARD_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", + HARD_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_hard_invalid_existence); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" link existed!\n"); + PART_ERROR(H5Lcreate_hard_invalid_existence); + } + + PASSED(); + } + PART_END(H5Lcreate_hard_invalid_existence); + } + END_MULTIPART; + + TESTING_2("test cleanup"); +#ifndef NO_PREVENT_HARD_LINKS_ACROSS_FILES + if (H5Fclose(ext_file_id) < 0) + TEST_ERROR; +#endif + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(ext_file_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} /* test_create_hard_link_invalid_params */ + +/* + * A test to check that a soft link, which points to an + * existing object with a relative path, can be created. + */ +static int +test_create_soft_link_existing_relative(void) +{ + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t object_id = H5I_INVALID_HID; + + TESTING("soft link creation to existing object by relative path"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file or group, basic or soft link aren't supported with this " + "connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, SOFT_LINK_EXISTING_RELATIVE_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + SOFT_LINK_EXISTING_RELATIVE_TEST_SUBGROUP_NAME); + goto error; + } + + if ((object_id = H5Gcreate2(group_id, SOFT_LINK_EXISTING_RELATIVE_TEST_OBJECT_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to create object '%s' for soft link's target\n", + SOFT_LINK_EXISTING_RELATIVE_TEST_OBJECT_NAME); + goto error; + } + + if (H5Gclose(object_id) < 0) + TEST_ERROR; + + if (H5Lcreate_soft(SOFT_LINK_EXISTING_RELATIVE_TEST_OBJECT_NAME, group_id, + SOFT_LINK_EXISTING_RELATIVE_TEST_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", SOFT_LINK_EXISTING_RELATIVE_TEST_LINK_NAME); + goto error; + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(group_id, SOFT_LINK_EXISTING_RELATIVE_TEST_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", SOFT_LINK_EXISTING_RELATIVE_TEST_LINK_NAME); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link did not exist\n"); + goto error; + } + + if ((object_id = H5Gopen2(group_id, SOFT_LINK_EXISTING_RELATIVE_TEST_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open object '%s' through the soft link\n", + SOFT_LINK_EXISTING_RELATIVE_TEST_OBJECT_NAME); + goto error; + } + + if (H5Gclose(object_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(object_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a soft link, which points to an + * existing object using an absolute path, can be created. + */ +static int +test_create_soft_link_existing_absolute(void) +{ + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID, root_id = H5I_INVALID_HID; + + TESTING("soft link creation to existing object by absolute path"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file or group, basic or soft link aren't supported with this " + "connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, SOFT_LINK_EXISTING_ABSOLUTE_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + SOFT_LINK_EXISTING_ABSOLUTE_TEST_SUBGROUP_NAME); + goto error; + } + + if (H5Lcreate_soft("/", group_id, SOFT_LINK_EXISTING_ABSOLUTE_TEST_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < + 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", SOFT_LINK_EXISTING_ABSOLUTE_TEST_LINK_NAME); + goto error; + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(group_id, SOFT_LINK_EXISTING_ABSOLUTE_TEST_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", SOFT_LINK_EXISTING_ABSOLUTE_TEST_LINK_NAME); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link did not exist\n"); + goto error; + } + + if ((root_id = H5Gopen2(group_id, SOFT_LINK_EXISTING_ABSOLUTE_TEST_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open object pointed to by soft link '%s'\n", + SOFT_LINK_EXISTING_ABSOLUTE_TEST_LINK_NAME); + goto error; + } + + if (H5Gclose(root_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(root_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a soft link, which points to + * an object that doesn't exist by using a relative + * path, can be created. + */ +static int +test_create_soft_link_dangling_relative(void) +{ + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t object_id = H5I_INVALID_HID; + + TESTING("dangling soft link creation to object by relative path"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file or group, basic or soft link aren't supported with this " + "connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, SOFT_LINK_DANGLING_RELATIVE_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + SOFT_LINK_DANGLING_RELATIVE_TEST_SUBGROUP_NAME); + goto error; + } + + if (H5Lcreate_soft(SOFT_LINK_DANGLING_RELATIVE_TEST_OBJECT_NAME, group_id, + SOFT_LINK_DANGLING_RELATIVE_TEST_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", SOFT_LINK_DANGLING_RELATIVE_TEST_LINK_NAME); + goto error; + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(group_id, SOFT_LINK_DANGLING_RELATIVE_TEST_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", SOFT_LINK_DANGLING_RELATIVE_TEST_LINK_NAME); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link did not exist\n"); + goto error; + } + + H5E_BEGIN_TRY + { + object_id = H5Gopen2(group_id, SOFT_LINK_DANGLING_RELATIVE_TEST_LINK_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + + if (object_id >= 0) { + H5_FAILED(); + HDprintf(" opened target of dangling link '%s'!\n", SOFT_LINK_DANGLING_RELATIVE_TEST_LINK_NAME); + H5Gclose(object_id); + goto error; + } + + if ((object_id = H5Gcreate2(group_id, SOFT_LINK_DANGLING_RELATIVE_TEST_OBJECT_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to create object '%s' for soft link's target\n", + SOFT_LINK_DANGLING_RELATIVE_TEST_OBJECT_NAME); + goto error; + } + + if (H5Gclose(object_id) < 0) + TEST_ERROR; + + if ((object_id = H5Gopen2(group_id, SOFT_LINK_DANGLING_RELATIVE_TEST_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to open object pointed to by soft link '%s'\n", + SOFT_LINK_DANGLING_RELATIVE_TEST_LINK_NAME); + goto error; + } + + if (H5Gclose(object_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(object_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a soft link, which points to an + * object that doesn't exist by using an absolute path, + * can be created. + */ +static int +test_create_soft_link_dangling_absolute(void) +{ + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t object_id = H5I_INVALID_HID; + + TESTING("dangling soft link creation to object by absolute path"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file or group, basic or soft link aren't supported with this " + "connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, SOFT_LINK_DANGLING_ABSOLUTE_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + SOFT_LINK_DANGLING_ABSOLUTE_TEST_SUBGROUP_NAME); + goto error; + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" SOFT_LINK_DANGLING_ABSOLUTE_TEST_SUBGROUP_NAME + "/" SOFT_LINK_DANGLING_ABSOLUTE_TEST_OBJECT_NAME, + group_id, SOFT_LINK_DANGLING_ABSOLUTE_TEST_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", SOFT_LINK_DANGLING_ABSOLUTE_TEST_LINK_NAME); + goto error; + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(group_id, SOFT_LINK_DANGLING_ABSOLUTE_TEST_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", SOFT_LINK_DANGLING_ABSOLUTE_TEST_LINK_NAME); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link did not exist\n"); + goto error; + } + + H5E_BEGIN_TRY + { + object_id = H5Gopen2(group_id, SOFT_LINK_DANGLING_ABSOLUTE_TEST_LINK_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + + if (object_id >= 0) { + H5_FAILED(); + HDprintf(" opened target of dangling link '%s'!\n", SOFT_LINK_DANGLING_ABSOLUTE_TEST_LINK_NAME); + H5Gclose(object_id); + goto error; + } + + if ((object_id = H5Gcreate2(group_id, SOFT_LINK_DANGLING_ABSOLUTE_TEST_OBJECT_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to create object '%s' for soft link's target\n", + SOFT_LINK_DANGLING_ABSOLUTE_TEST_OBJECT_NAME); + goto error; + } + + if (H5Gclose(object_id) < 0) + TEST_ERROR; + + if ((object_id = H5Gopen2(group_id, SOFT_LINK_DANGLING_ABSOLUTE_TEST_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to open object pointed to by soft link '%s'\n", + SOFT_LINK_DANGLING_ABSOLUTE_TEST_LINK_NAME); + goto error; + } + + if (H5Gclose(object_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(object_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a soft link with a long name can be created + * using H5Lcreate_soft. + */ +static int +test_create_soft_link_long_name(void) +{ + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + char vol_name[5]; + size_t name_len = MAX_NAME_LEN; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + char *objname = NULL; /* Name of object [Long] */ + size_t u; /* Local index variable */ + + TESTING("soft link creation with a long name"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file or group, basic or soft link aren't supported with this " + "connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, SOFT_LINK_TEST_GROUP_LONG_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", SOFT_LINK_TEST_GROUP_LONG_NAME); + goto error; + } + + if (H5VLget_connector_name(file_id, vol_name, 5) < 0) { + H5_FAILED(); + HDprintf(" couldn't get VOL connector name\n"); + goto error; + } + + /** for DAOS VOL, max link name supported is 99 (Lexical key) */ + if (strcmp(vol_name, "daos") == 0) + name_len = 99; + + /* Construct very long file name */ + if ((objname = (char *)HDmalloc((size_t)(name_len + 1))) == NULL) + TEST_ERROR; + + for (u = 0; u < name_len; u++) + objname[u] = 'b'; + objname[name_len] = '\0'; + + if (H5Lcreate_soft(SOFT_LINK_TEST_LONG_OBJECT_NAME, group_id, objname, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link with a long name\n"); + goto error; + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(group_id, objname, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if the link with a long name exists\n"); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link did not exist\n"); + goto error; + } + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + /* Release memory */ + if (objname) + HDfree(objname); + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + if (objname) + HDfree(objname); + + return 1; +} + +/* + * A test to check that many soft links can be created + * using H5Lcreate_soft. + */ +static int +test_create_soft_link_many(void) +{ +#ifndef NO_SOFT_LINK_MANY_DANGLING + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t object_id = H5I_INVALID_HID; +#ifndef NO_OBJECT_GET_NAME + char objname[SOFT_LINK_TEST_GROUP_MANY_NAME_BUF_SIZE]; /* Object name */ +#endif +#endif + + TESTING("soft link creation of many links"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file or group, basic or soft link aren't supported with this " + "connector\n"); + return 0; + } + +#ifndef NO_SOFT_LINK_MANY_DANGLING + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, SOFT_LINK_TEST_GROUP_MANY_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", SOFT_LINK_TEST_GROUP_MANY_NAME); + goto error; + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" SOFT_LINK_TEST_GROUP_MANY_NAME + "/" SOFT_LINK_TEST_GROUP_MANY_FINAL_NAME, + group_id, "soft1", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_soft("soft1", group_id, "soft2", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_soft("soft2", group_id, "soft3", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_soft("soft3", group_id, "soft4", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_soft("soft4", group_id, "soft5", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_soft("soft5", group_id, "soft6", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_soft("soft6", group_id, "soft7", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_soft("soft7", group_id, "soft8", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_soft("soft8", group_id, "soft9", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_soft("soft9", group_id, "soft10", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_soft("soft10", group_id, "soft11", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_soft("soft11", group_id, "soft12", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_soft("soft12", group_id, "soft13", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_soft("soft13", group_id, "soft14", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_soft("soft14", group_id, "soft15", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + if (H5Lcreate_soft("soft15", group_id, "soft16", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(group_id, "soft16", H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link 'soft16' exists\n"); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link 'soft16' did not exist\n"); + goto error; + } + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + /* Reopen the file and group and verify the hard link */ + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gopen2(container_group, SOFT_LINK_TEST_GROUP_MANY_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container sub-group '%s'\n", SOFT_LINK_TEST_GROUP_MANY_NAME); + goto error; + } + + /* + * XXX: Try to open the object through last soft link. If should fail because it doesn't exist. If + * H5Oopen is available, use that. + */ + H5E_BEGIN_TRY + { + object_id = H5Gopen2(group_id, "soft16", H5P_DEFAULT); + } + H5E_END_TRY; + + if (object_id >= 0) { + H5_FAILED(); + HDprintf(" opened target of dangling soft link '%s'!\n", SOFT_LINK_TEST_GROUP_MANY_NAME); + H5Gclose(object_id); + goto error; + } + + if ((object_id = H5Gcreate2(group_id, SOFT_LINK_TEST_GROUP_MANY_FINAL_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to create object '%s' for soft link's target\n", SOFT_LINK_TEST_GROUP_MANY_NAME); + goto error; + } + + if (H5Gclose(object_id) < 0) + TEST_ERROR; + + /* + * XXX: Open the object through last soft link. It should work this time. If H5Oopen is available, use + * that. + */ + if ((object_id = H5Gopen2(group_id, "soft16", H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to open object pointed to by soft link '%s'\n", SOFT_LINK_TEST_GROUP_MANY_NAME); + goto error; + } +#ifndef NO_OBJECT_GET_NAME + /* Check name */ + if (H5Iget_name(object_id, objname, (size_t)SOFT_LINK_TEST_GROUP_MANY_NAME_BUF_SIZE) < 0) { + H5_FAILED(); + HDprintf(" couldn't get the name of the object 'soft16'\n"); + goto error; + } + + if (HDstrcmp(objname, "/" LINK_TEST_GROUP_NAME "/" SOFT_LINK_TEST_GROUP_MANY_NAME "/soft16")) { + H5_FAILED(); + HDprintf(" wrong name of the object '%s'\n", objname); + goto error; + } +#endif + + if (H5Gclose(object_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(object_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +#else + SKIPPED(); + return 0; +#endif +} + +/* + * A test to check that a soft link can't be created + * when H5Lcreate_soft is passed invalid parameters. + */ +static int +test_create_soft_link_invalid_params(void) +{ + herr_t err_ret = -1; + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + + TESTING_MULTIPART("soft link creation with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, or link aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, SOFT_LINK_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", SOFT_LINK_INVALID_PARAMS_TEST_GROUP_NAME); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Lcreate_soft_invalid_link_target) + { + TESTING_2("H5Lcreate_soft with an invalid link target"); + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_soft(NULL, group_id, SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created soft link '%s' with an invalid link target!\n", + SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_soft_invalid_link_target); + } + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_soft("", group_id, SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created soft link '%s' with an invalid link target!\n", + SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_soft_invalid_link_target); + } + + PASSED(); + } + PART_END(H5Lcreate_soft_invalid_link_target); + + PART_BEGIN(H5Lcreate_soft_invalid_link_loc_id) + { + TESTING_2("H5Lcreate_soft with an invalid link_loc_id"); + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_soft("/", H5I_INVALID_HID, SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME, + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created soft link '%s' with an invalid link_loc_id!\n", + SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_soft_invalid_link_loc_id); + } + + PASSED(); + } + PART_END(H5Lcreate_soft_invalid_link_loc_id); + + PART_BEGIN(H5Lcreate_soft_invalid_link_name) + { + TESTING_2("H5Lcreate_soft with an invalid link name"); + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_soft("/", group_id, NULL, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created soft link '%s' with a NULL link name!\n", + SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_soft_invalid_link_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_soft("/", group_id, "", H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created soft link '%s' with an invalid link name of ''!\n", + SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_soft_invalid_link_name); + } + + PASSED(); + } + PART_END(H5Lcreate_soft_invalid_link_name); + + PART_BEGIN(H5Lcreate_soft_invalid_lcpl) + { + TESTING_2("H5Lcreate_soft with an invalid LCPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_soft("/", group_id, SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME, + H5I_INVALID_HID, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created soft link '%s' with an invalid LCPL!\n", + SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_soft_invalid_lcpl); + } + + PASSED(); + } + PART_END(H5Lcreate_soft_invalid_lcpl); + + PART_BEGIN(H5Lcreate_soft_invalid_lapl) + { + TESTING_2("H5Lcreate_soft with an invalid LAPL"); +#ifndef NO_INVALID_PROPERTY_LIST_TESTS + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_soft("/", group_id, SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT, + H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created soft link '%s' with an invalid LAPL!\n", + SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_soft_invalid_lapl); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lcreate_soft_invalid_lapl); +#endif + } + PART_END(H5Lcreate_soft_invalid_lapl); + + PART_BEGIN(H5Lcreate_soft_invalid_existence) + { + TESTING_2("invalid link existence after previous invalid H5Lcreate_soft calls"); + + /* Verify the link hasn't been created */ + if ((link_exists = H5Lexists(group_id, SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", + SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_soft_invalid_existence); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" link '%s' existed!\n", SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_soft_invalid_existence); + } + + PASSED(); + } + PART_END(H5Lcreate_soft_invalid_existence); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that an external link can be created + * using H5Lcreate_external. + */ +static int +test_create_external_link(void) +{ +#ifndef NO_EXTERNAL_LINKS + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t root_id = H5I_INVALID_HID; + char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH]; +#endif + + TESTING("external link creation to existing object"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, basic link, or external link aren't supported " + "with this connector\n"); + return 0; + } + +#ifndef NO_EXTERNAL_LINKS + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", EXTERNAL_LINK_TEST_FILE_NAME); + + if ((file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + goto error; + } + + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, EXTERNAL_LINK_TEST_SUBGROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", EXTERNAL_LINK_TEST_SUBGROUP_NAME); + goto error; + } + + if (H5Lcreate_external(ext_link_filename, "/", group_id, EXTERNAL_LINK_TEST_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", EXTERNAL_LINK_TEST_LINK_NAME); + goto error; + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(group_id, EXTERNAL_LINK_TEST_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", EXTERNAL_LINK_TEST_LINK_NAME); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link did not exist\n"); + goto error; + } + + if ((root_id = H5Gopen2(group_id, EXTERNAL_LINK_TEST_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open root group of other file using external link '%s'\n", + EXTERNAL_LINK_TEST_LINK_NAME); + goto error; + } + + if (H5Gclose(root_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(root_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +#else + SKIPPED(); + return 0; +#endif +} + +/* + * A test to check that an external link, which points to an + * object that doesn't exist by using an absolute path, can + * be created. + */ +static int +test_create_external_link_dangling(void) +{ +#ifndef NO_EXTERNAL_LINKS + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID, ext_file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t object_id = H5I_INVALID_HID; + char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH]; +#endif + + TESTING("dangling external link creation"); + +#ifndef NO_EXTERNAL_LINKS + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, basic link, or external link aren't supported " + "with this connector\n"); + return 0; + } + + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + goto error; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, EXTERNAL_LINK_TEST_DANGLING_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", EXTERNAL_LINK_TEST_DANGLING_SUBGROUP_NAME); + goto error; + } + + if (H5Lcreate_external(ext_link_filename, "/" EXTERNAL_LINK_TEST_DANGLING_OBJECT_NAME, group_id, + EXTERNAL_LINK_TEST_DANGLING_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dangling external link '%s'\n", EXTERNAL_LINK_TEST_DANGLING_LINK_NAME); + goto error; + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(group_id, EXTERNAL_LINK_TEST_DANGLING_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", EXTERNAL_LINK_TEST_DANGLING_LINK_NAME); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link did not exist\n"); + goto error; + } + + H5E_BEGIN_TRY + { + object_id = H5Gopen2(group_id, EXTERNAL_LINK_TEST_DANGLING_LINK_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + + if (object_id >= 0) { + H5_FAILED(); + HDprintf(" opened non-existent object in other file using dangling external link '%s'!\n", + EXTERNAL_LINK_TEST_DANGLING_LINK_NAME); + H5Gclose(object_id); + goto error; + } + + if ((object_id = H5Gcreate2(ext_file_id, EXTERNAL_LINK_TEST_DANGLING_OBJECT_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to create object '%s' for external link's target\n", + EXTERNAL_LINK_TEST_DANGLING_OBJECT_NAME); + goto error; + } + + if (H5Gclose(object_id) < 0) + TEST_ERROR; + + if ((object_id = H5Gopen2(group_id, EXTERNAL_LINK_TEST_DANGLING_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to open object pointed to by external link '%s'\n", + EXTERNAL_LINK_TEST_DANGLING_LINK_NAME); + goto error; + } + + if (H5Gclose(object_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + if (H5Fclose(ext_file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(object_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + H5Fclose(ext_file_id); + } + H5E_END_TRY; + + return 1; +#else + SKIPPED(); + return 0; +#endif +} + +/* + * A test to check that an external link to an object + * that crosses several files using H5Lcreate_external. + */ +static int +test_create_external_link_multi(void) +{ +#ifndef NO_EXTERNAL_LINKS + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t group_id2 = H5I_INVALID_HID, group_id3 = H5I_INVALID_HID; + hid_t root_id = H5I_INVALID_HID; + char ext_link_filename1[H5_API_TEST_FILENAME_MAX_LENGTH]; + char ext_link_filename2[H5_API_TEST_FILENAME_MAX_LENGTH]; + char ext_link_filename3[H5_API_TEST_FILENAME_MAX_LENGTH]; + char objname[EXTERNAL_LINK_TEST_MULTI_NAME_BUF_SIZE]; +#endif + + TESTING_MULTIPART("external link creation to an object across several files"); + +#ifndef NO_EXTERNAL_LINKS + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, or external link aren't supported with this " + "connector\n"); + return 0; + } + + BEGIN_MULTIPART + { + PART_BEGIN(H5Lcreate_external_first_file) + { + TESTING_2("Create the first external file to be pointed to"); + + HDsnprintf(ext_link_filename1, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((file_id = H5Fcreate(ext_link_filename1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", + ext_link_filename1); + PART_ERROR(H5Lcreate_external_first_file); + } + + /* Create object down a path */ + if ((group_id = H5Gcreate2(file_id, "A", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create a group\n"); + PART_ERROR(H5Lcreate_external_first_file); + } + + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close a group\n"); + PART_ERROR(H5Lcreate_external_first_file); + } + + if ((group_id = H5Gcreate2(file_id, "A/B", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create a group\n"); + PART_ERROR(H5Lcreate_external_first_file); + } + + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close a group\n"); + PART_ERROR(H5Lcreate_external_first_file); + } + + if ((group_id = H5Gcreate2(file_id, "A/B/C", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create a group\n"); + PART_ERROR(H5Lcreate_external_first_file); + } + + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close a group\n"); + PART_ERROR(H5Lcreate_external_first_file); + } + + /* Close file */ + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close a group\n"); + PART_ERROR(H5Lcreate_external_first_file); + } + + PASSED(); + } + PART_END(H5Lcreate_external_first_file); + + PART_BEGIN(H5Lcreate_external_second_file) + { + TESTING_2("Create the second external file to be pointed to"); + + HDsnprintf(ext_link_filename2, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME2); + + if ((file_id = H5Fcreate(ext_link_filename2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", + ext_link_filename2); + PART_ERROR(H5Lcreate_external_second_file); + } + + /* Create object down a path */ + if ((group_id = H5Gcreate2(file_id, "D", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create a group\n"); + PART_ERROR(H5Lcreate_external_second_file); + } + + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close a group\n"); + PART_ERROR(H5Lcreate_external_second_file); + } + + if ((group_id = H5Gcreate2(file_id, "D/E", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create a group\n"); + PART_ERROR(H5Lcreate_external_second_file); + } + + /* Create external link to object in first file */ + if (H5Lcreate_external(ext_link_filename1, "/A/B/C", group_id, "F", H5P_DEFAULT, H5P_DEFAULT) < + 0) { + H5_FAILED(); + HDprintf(" couldn't create external link 'F'\n"); + PART_ERROR(H5Lcreate_external_second_file); + } + + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close a group\n"); + PART_ERROR(H5Lcreate_external_second_file); + } + + /* Close file */ + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close a file\n"); + PART_ERROR(H5Lcreate_external_second_file); + } + + PASSED(); + } + PART_END(H5Lcreate_external_second_file); + + PART_BEGIN(H5Lcreate_external_third_file) + { + TESTING_2("Create the third external file to be pointed to"); + + HDsnprintf(ext_link_filename3, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME3); + + if ((file_id = H5Fcreate(ext_link_filename3, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", + ext_link_filename3); + PART_ERROR(H5Lcreate_external_third_file); + } + + /* Create object down a path */ + if ((group_id = H5Gcreate2(file_id, "G", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create a group\n"); + PART_ERROR(H5Lcreate_external_third_file); + } + + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close a group\n"); + PART_ERROR(H5Lcreate_external_third_file); + } + + if ((group_id = H5Gcreate2(file_id, "G/H", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create a group\n"); + PART_ERROR(H5Lcreate_external_third_file); + } + + /* Create external link to object in second file */ + if (H5Lcreate_external(ext_link_filename2, "/D/E/F", group_id, "I", H5P_DEFAULT, H5P_DEFAULT) < + 0) { + H5_FAILED(); + HDprintf(" couldn't create external link 'I'\n"); + PART_ERROR(H5Lcreate_external_third_file); + } + + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close a group\n"); + PART_ERROR(H5Lcreate_external_third_file); + } + + /* Close file */ + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close a file\n"); + PART_ERROR(H5Lcreate_external_third_file); + } + + PASSED(); + } + PART_END(H5Lcreate_external_third_file); + + PART_BEGIN(H5Lcreate_external_final_file) + { + TESTING_2("Open the file and create the final external link"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + PART_ERROR(H5Lcreate_external_final_file); + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + PART_ERROR(H5Lcreate_external_final_file); + } + + if ((group_id = H5Gcreate2(container_group, EXTERNAL_LINK_TEST_MULTI_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", EXTERNAL_LINK_TEST_MULTI_NAME); + PART_ERROR(H5Lcreate_external_final_file); + } + + if (H5Lcreate_external(ext_link_filename3, "/G/H/I", group_id, "ext_link", H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link 'ext_link'\n"); + PART_ERROR(H5Lcreate_external_final_file); + } + + if ((group_id2 = H5Gopen2(group_id, "ext_link", H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open the group that is the external link\n"); + PART_ERROR(H5Lcreate_external_final_file); + } + + /* Check name */ + if (H5Iget_name(group_id2, objname, (size_t)EXTERNAL_LINK_TEST_MULTI_NAME_BUF_SIZE) < 0) { + H5_FAILED(); + HDprintf(" couldn't get the name of the object '%s'\n", + HARD_LINK_TEST_GROUP_MANY_FINAL_NAME); + PART_ERROR(H5Lcreate_external_final_file); + } + + if (HDstrcmp(objname, "/A/B/C")) { + H5_FAILED(); + HDprintf(" wrong name of the object '%s'\n", objname); + PART_ERROR(H5Lcreate_external_final_file); + } + + /* Create an object in the external file */ + if ((group_id3 = H5Gcreate2(group_id2, "new_group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create a group 'new_group' in the external file\n"); + PART_ERROR(H5Lcreate_external_final_file); + } + + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close a group\n"); + PART_ERROR(H5Lcreate_external_final_file); + } + + if (H5Gclose(group_id2) < 0) { + H5_FAILED(); + HDprintf(" couldn't close a group\n"); + PART_ERROR(H5Lcreate_external_final_file); + } + + if (H5Gclose(group_id3) < 0) { + H5_FAILED(); + HDprintf(" couldn't close a group\n"); + PART_ERROR(H5Lcreate_external_final_file); + } + + if (H5Gclose(container_group) < 0) { + H5_FAILED(); + HDprintf(" couldn't close a group\n"); + PART_ERROR(H5Lcreate_external_final_file); + } + + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close a file\n"); + PART_ERROR(H5Lcreate_external_final_file); + } + + PASSED(); + } + PART_END(H5Lcreate_external_final_file); + + PART_BEGIN(H5Lcreate_external_object_created) + { + TESTING_2("Check the group being created through the external link"); + + if ((file_id = H5Fopen(ext_link_filename1, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", ext_link_filename1); + PART_ERROR(H5Lcreate_external_object_created); + } + + if ((group_id = H5Gopen2(file_id, "/A/B/C/new_group", H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open a group 'new_group' in the external file\n"); + PART_ERROR(H5Lcreate_external_object_created); + } + + /* Check name */ + if (H5Iget_name(group_id, objname, (size_t)EXTERNAL_LINK_TEST_MULTI_NAME_BUF_SIZE) < 0) { + H5_FAILED(); + HDprintf(" couldn't get the name of the object '/A/B/C/new_group'\n"); + PART_ERROR(H5Lcreate_external_object_created); + } + + if (HDstrcmp(objname, "/A/B/C/new_group")) { + H5_FAILED(); + HDprintf(" wrong name of the object '%s'\n", objname); + PART_ERROR(H5Lcreate_external_object_created); + } + + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close the group\n"); + PART_ERROR(H5Lcreate_external_object_created); + } + + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close the file\n"); + PART_ERROR(H5Lcreate_external_object_created); + } + + PASSED(); + } + PART_END(H5Lcreate_external_object_created); + } + END_MULTIPART; + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(root_id); + H5Gclose(group_id); + H5Gclose(group_id2); + H5Gclose(group_id3); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +#else + SKIPPED(); + return 0; +#endif +} + +/* + * A test to build a file with external link to object that + * goes back and forth between two files a couple of times: + * + * file1:/link1 -> file2: /link2 + * file2:/link2 -> file1: /link3 + * file1:/link3 -> file2: /link4 + * file2:/link4 -> file1: /link5 + * file1:/link5 -> file2: /link6 + * file2:/link6 -> file1: /final + */ +static int +test_create_external_link_ping_pong(void) +{ +#ifndef NO_EXTERNAL_LINKS + hid_t file_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t group_id2 = H5I_INVALID_HID; + char ext_link_filename1[H5_API_TEST_FILENAME_MAX_LENGTH]; + char ext_link_filename2[H5_API_TEST_FILENAME_MAX_LENGTH]; + char objname[EXTERNAL_LINK_TEST_MULTI_NAME_BUF_SIZE]; +#endif + + TESTING_MULTIPART("external link creation to an object in ping pong style"); + +#ifndef NO_EXTERNAL_LINKS + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, or external link aren't supported with this " + "connector\n"); + return 0; + } + + HDsnprintf(ext_link_filename1, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", EXTERNAL_LINK_TEST_PING_PONG_NAME1); + HDsnprintf(ext_link_filename2, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", EXTERNAL_LINK_TEST_PING_PONG_NAME2); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Lcreate_external_first_file) + { + TESTING_2("Create the first external file"); + + /* Create the first file */ + if ((file_id = H5Fcreate(ext_link_filename1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", + ext_link_filename1); + PART_ERROR(H5Lcreate_external_first_file); + } + + /* Create external links for chain */ + if (H5Lcreate_external(ext_link_filename2, "/link2", file_id, "link1", H5P_DEFAULT, H5P_DEFAULT) < + 0) { + H5_FAILED(); + HDprintf(" couldn't create external link\n"); + PART_ERROR(H5Lcreate_external_first_file); + } + + if (H5Lcreate_external(ext_link_filename2, "/link4", file_id, "link3", H5P_DEFAULT, H5P_DEFAULT) < + 0) { + H5_FAILED(); + HDprintf(" couldn't create external link\n"); + PART_ERROR(H5Lcreate_external_first_file); + } + + if (H5Lcreate_external(ext_link_filename2, "/link6", file_id, "link5", H5P_DEFAULT, H5P_DEFAULT) < + 0) { + H5_FAILED(); + HDprintf(" couldn't create external link\n"); + PART_ERROR(H5Lcreate_external_first_file); + } + + /* Create final object */ + if ((group_id = H5Gcreate2(file_id, "final", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create a group\n"); + PART_ERROR(H5Lcreate_external_first_file); + } + + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close a group\n"); + PART_ERROR(H5Lcreate_external_first_file); + } + + /* Close file */ + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close a group\n"); + PART_ERROR(H5Lcreate_external_first_file); + } + + PASSED(); + } + PART_END(H5Lcreate_external_first_file); + + PART_BEGIN(H5Lcreate_external_second_file) + { + TESTING_2("Create the second external file"); + + /* Create the second file */ + if ((file_id = H5Fcreate(ext_link_filename2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link\n", ext_link_filename2); + PART_ERROR(H5Lcreate_external_second_file); + } + + /* Create external links for chain */ + if (H5Lcreate_external(ext_link_filename1, "/link3", file_id, "link2", H5P_DEFAULT, H5P_DEFAULT) < + 0) { + H5_FAILED(); + HDprintf(" couldn't create external link\n"); + PART_ERROR(H5Lcreate_external_second_file); + } + + if (H5Lcreate_external(ext_link_filename1, "/link5", file_id, "link4", H5P_DEFAULT, H5P_DEFAULT) < + 0) { + H5_FAILED(); + HDprintf(" couldn't create external link\n"); + PART_ERROR(H5Lcreate_external_second_file); + } + + if (H5Lcreate_external(ext_link_filename1, "/final", file_id, "link6", H5P_DEFAULT, H5P_DEFAULT) < + 0) { + H5_FAILED(); + HDprintf(" couldn't create external link\n"); + PART_ERROR(H5Lcreate_external_second_file); + } + + /* Close file */ + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close the file\n"); + PART_ERROR(H5Lcreate_external_second_file); + } + + PASSED(); + } + PART_END(H5Lcreate_external_second_file); + + PART_BEGIN(H5Lcreate_external_verify) + { + TESTING_2("Open the first file to verify the object being pointed to"); + + if ((file_id = H5Fopen(ext_link_filename1, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", ext_link_filename1); + PART_ERROR(H5Lcreate_external_verify); + } + + /* Open object through external link */ + if ((group_id = H5Gopen2(file_id, "link1", H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open the group that is the external link 'link1'\n"); + PART_ERROR(H5Lcreate_external_verify); + } + + /* Check the name of the object being pointed to */ + if (H5Iget_name(group_id, objname, (size_t)EXTERNAL_LINK_TEST_PING_PONG_NAME_BUF_SIZE) < 0) { + H5_FAILED(); + HDprintf(" couldn't get the name of the object\n"); + PART_ERROR(H5Lcreate_external_verify); + } + + if (HDstrcmp(objname, "/final")) { + H5_FAILED(); + HDprintf(" wrong name of the object '%s'\n", objname); + PART_ERROR(H5Lcreate_external_verify); + } + + /* Create an object in the external file */ + if ((group_id2 = H5Gcreate2(group_id, "new_group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create a new group 'new_group'\n"); + PART_ERROR(H5Lcreate_external_verify); + } + + if (H5Gclose(group_id2) < 0) { + H5_FAILED(); + HDprintf(" couldn't close a group\n"); + PART_ERROR(H5Lcreate_external_verify); + } + + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close a group\n"); + PART_ERROR(H5Lcreate_external_verify); + } + + /* Close file */ + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close the file\n"); + PART_ERROR(H5Lcreate_external_verify); + } + + PASSED(); + } + PART_END(H5Lcreate_external_verify); + + PART_BEGIN(H5Lcreate_external_verify_again) + { + TESTING_2("Open the first file to verify the object being created"); + + if ((file_id = H5Fopen(ext_link_filename1, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", ext_link_filename1); + PART_ERROR(H5Lcreate_external_verify_again); + } + + /* Open object through external link */ + if ((group_id = H5Gopen2(file_id, "/final/new_group", H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open the group that is the external link\n"); + PART_ERROR(H5Lcreate_external_verify_again); + } + + /* Check the name of the object being pointed to */ + if (H5Iget_name(group_id, objname, (size_t)EXTERNAL_LINK_TEST_PING_PONG_NAME_BUF_SIZE) < 0) { + H5_FAILED(); + HDprintf(" couldn't get the name of the object\n"); + PART_ERROR(H5Lcreate_external_verify_again); + } + + if (HDstrcmp(objname, "/final/new_group")) { + H5_FAILED(); + HDprintf(" wrong name of the object '%s'\n", objname); + PART_ERROR(H5Lcreate_external_verify_again); + } + + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close a group\n"); + PART_ERROR(H5Lcreate_external_verify_again); + } + + /* Close file */ + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close the file\n"); + PART_ERROR(H5Lcreate_external_verify_again); + } + + PASSED(); + } + PART_END(H5Lcreate_external_verify_again); + } + END_MULTIPART; + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Gclose(group_id2); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +#else + SKIPPED(); + return 0; +#endif +} + +/* + * A test to check that an external link can't be created + * when H5Lcreate_external is passed invalid parameters. + */ +static int +test_create_external_link_invalid_params(void) +{ + herr_t err_ret = -1; + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH]; + + TESTING_MULTIPART("H5Lcreate_external with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, or basic link or external link aren't supported " + "with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_INVALID_PARAMS_TEST_FILE_NAME); + + if ((file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + goto error; + } + + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, EXTERNAL_LINK_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + EXTERNAL_LINK_INVALID_PARAMS_TEST_GROUP_NAME); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Lcreate_external_invalid_file_name) + { + TESTING_2("H5Lcreate_external with an invalid file name"); + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_external(NULL, "/", group_id, EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME, + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created external link '%s' using a NULL file name!\n", + EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_external_invalid_file_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_external("", "/", group_id, EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME, + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created external link '%s' using an invalid file name of ''!\n", + EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_external_invalid_file_name); + } + + PASSED(); + } + PART_END(H5Lcreate_external_invalid_file_name); + + PART_BEGIN(H5Lcreate_external_invalid_ext_obj_name) + { + TESTING_2("H5Lcreate_external with an invalid external object name"); + + H5E_BEGIN_TRY + { + err_ret = + H5Lcreate_external(ext_link_filename, NULL, group_id, + EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created external link '%s' using a NULL external object name!\n", + EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_external_invalid_ext_obj_name); + } + + H5E_BEGIN_TRY + { + err_ret = + H5Lcreate_external(ext_link_filename, "", group_id, + EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created external link '%s' using an invalid external object name of ''!\n", + EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_external_invalid_ext_obj_name); + } + + PASSED(); + } + PART_END(H5Lcreate_external_invalid_ext_obj_name); + + PART_BEGIN(H5Lcreate_external_invalid_link_loc_id) + { + TESTING_2("H5Lcreate_external with an invalid link_loc_id"); + + H5E_BEGIN_TRY + { + err_ret = + H5Lcreate_external(ext_link_filename, "/", H5I_INVALID_HID, + EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created external link '%s' using an invalid link_loc_id!\n", + EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_external_invalid_link_loc_id); + } + + PASSED(); + } + PART_END(H5Lcreate_external_invalid_link_loc_id); + + PART_BEGIN(H5Lcreate_external_invalid_link_name) + { + TESTING_2("H5Lcreate_external with an invalid link name"); + + H5E_BEGIN_TRY + { + err_ret = + H5Lcreate_external(ext_link_filename, "/", group_id, NULL, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created external link '%s' using a NULL link_loc_id!\n", + EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_external_invalid_link_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_external(ext_link_filename, "/", group_id, "", H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created external link '%s' using an invalid link name of ''!\n", + EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_external_invalid_link_name); + } + + PASSED(); + } + PART_END(H5Lcreate_external_invalid_link_name); + + PART_BEGIN(H5Lcreate_external_invalid_lcpl) + { + TESTING_2("H5Lcreate_external with an invalid LCPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_external(ext_link_filename, "/", group_id, + EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5I_INVALID_HID, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created external link '%s' using an invalid LCPL!\n", + EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_external_invalid_lcpl); + } + + PASSED(); + } + PART_END(H5Lcreate_external_invalid_lcpl); + + PART_BEGIN(H5Lcreate_external_invalid_lapl) + { + TESTING_2("H5Lcreate_external with an invalid LAPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_external(ext_link_filename, "/", group_id, + EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT, + H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created external link '%s' using an invalid LAPL!\n", + EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_external_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Lcreate_external_invalid_lapl); + + PART_BEGIN(H5Lcreate_external_invalid_existence) + { + TESTING_2("invalid link existence after previous invalid H5Lcreate_external calls"); + + /* Verify the link hasn't been created */ + if ((link_exists = + H5Lexists(group_id, EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", + EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_external_invalid_existence); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" link '%s' existed!\n", EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_external_invalid_existence); + } + + PASSED(); + } + PART_END(H5Lcreate_external_invalid_existence); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a user-defined link can be created. + */ +static int +test_create_user_defined_link(void) +{ +#ifndef NO_USER_DEFINED_LINKS + ssize_t udata_size; + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + char udata[UD_LINK_TEST_UDATA_MAX_SIZE]; +#endif + + TESTING("user-defined link creation"); + +#ifndef NO_USER_DEFINED_LINKS + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_UD_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, or user-defined link aren't supported with this " + "connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, UD_LINK_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", UD_LINK_TEST_GROUP_NAME); + goto error; + } + + if ((udata_size = HDsnprintf(udata, UD_LINK_TEST_UDATA_MAX_SIZE, "udata")) < 0) + TEST_ERROR; + + if (H5Lcreate_ud(group_id, UD_LINK_TEST_LINK_NAME, H5L_TYPE_EXTERNAL, udata, (size_t)udata_size, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create user-defined link '%s'\n", UD_LINK_TEST_LINK_NAME); + goto error; + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(group_id, UD_LINK_TEST_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", UD_LINK_TEST_LINK_NAME); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' didn't exist!\n", UD_LINK_TEST_LINK_NAME); + goto error; + } + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +#else + SKIPPED(); + return 0; +#endif +} + +/* + * A test to check that H5Lcreate_ud fails when + * it is given invalid parameters. + */ +static int +test_create_user_defined_link_invalid_params(void) +{ + ssize_t udata_size; + htri_t link_exists; + herr_t err_ret = -1; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + char udata[UD_LINK_INVALID_PARAMS_TEST_UDATA_MAX_SIZE]; + + TESTING_MULTIPART("H5Lcreate_ud with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_UD_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, or link aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, UD_LINK_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", UD_LINK_INVALID_PARAMS_TEST_GROUP_NAME); + goto error; + } + + if ((udata_size = HDsnprintf(udata, UD_LINK_INVALID_PARAMS_TEST_UDATA_MAX_SIZE, "udata")) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Lcreate_ud_invalid_link_loc_id) + { + TESTING_2("H5Lcreate_ud with an invalid link location ID"); + + H5E_BEGIN_TRY + { + err_ret = + H5Lcreate_ud(H5I_INVALID_HID, UD_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5L_TYPE_EXTERNAL, + udata, (size_t)udata_size, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created user-defined link '%s' with an invalid link location ID!\n", + UD_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_ud_invalid_link_loc_id); + } + + PASSED(); + } + PART_END(H5Lcreate_ud_invalid_link_loc_id); + + PART_BEGIN(H5Lcreate_ud_invalid_link_name) + { + TESTING_2("H5Lcreate_ud with an invalid link name"); + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_ud(group_id, NULL, H5L_TYPE_EXTERNAL, udata, (size_t)udata_size, + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created user-defined link '%s' with a NULL link name!\n", + UD_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_ud_invalid_link_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_ud(group_id, "", H5L_TYPE_EXTERNAL, udata, (size_t)udata_size, + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created user-defined link '%s' with an invalid link name of ''!\n", + UD_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_ud_invalid_link_name); + } + + PASSED(); + } + PART_END(H5Lcreate_ud_invalid_link_name); + + PART_BEGIN(H5Lcreate_ud_invalid_link_type) + { + TESTING_2("H5Lcreate_ud with an invalid link type"); + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_ud(group_id, UD_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5L_TYPE_HARD, udata, + (size_t)udata_size, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created user-defined link '%s' with an invalid link type!\n", + UD_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_ud_invalid_link_type); + } + + PASSED(); + } + PART_END(H5Lcreate_ud_invalid_link_type); + + PART_BEGIN(H5Lcreate_ud_invalid_udata_pointer) + { + TESTING_2("H5Lcreate_ud with an invalid udata pointer"); + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_ud(group_id, UD_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5L_TYPE_EXTERNAL, + NULL, (size_t)udata_size, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created user-defined link '%s' with an invalid udata pointer!\n", + UD_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_ud_invalid_udata_pointer); + } + + PASSED(); + } + PART_END(H5Lcreate_ud_invalid_udata_pointer); + + PART_BEGIN(H5Lcreate_ud_invalid_lcpl) + { + TESTING_2("H5Lcreate_ud with an invalid LCPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_ud(group_id, UD_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5L_TYPE_EXTERNAL, + udata, (size_t)udata_size, H5I_INVALID_HID, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created user-defined link '%s' with an invalid LCPL!\n", + UD_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_ud_invalid_lcpl); + } + + PASSED(); + } + PART_END(H5Lcreate_ud_invalid_lcpl); + + PART_BEGIN(H5Lcreate_ud_invalid_lapl) + { + TESTING_2("H5Lcreate_ud with an invalid LAPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Lcreate_ud(group_id, UD_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5L_TYPE_EXTERNAL, + udata, (size_t)udata_size, H5P_DEFAULT, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" created user-defined link '%s' with an invalid LAPL!\n", + UD_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_ud_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Lcreate_ud_invalid_lapl); + + PART_BEGIN(H5Lcreate_ud_invalid_existence) + { + TESTING_2("invalid link existence after previous invalid H5Lcreate_ud calls"); + + /* Verify the link hasn't been created */ + if ((link_exists = H5Lexists(group_id, UD_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", + UD_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_ud_invalid_existence); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" link '%s' existed!\n", UD_LINK_INVALID_PARAMS_TEST_LINK_NAME); + PART_ERROR(H5Lcreate_ud_invalid_existence); + } + + PASSED(); + } + PART_END(H5Lcreate_ud_invalid_existence); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a link can be deleted + * using H5Ldelete and H5Ldelete_by_idx. + */ +static int +test_delete_link(void) +{ + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID, ext_file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t subgroup_id = H5I_INVALID_HID; + hid_t nested_grp_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; +#ifndef NO_EXTERNAL_LINKS + char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH]; +#endif + + TESTING_MULTIPART("link deletion"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, or link, hard, soft, or external link aren't " + "supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create GCPL for link creation order tracking\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) { + H5_FAILED(); + HDprintf(" couldn't set link creation order tracking\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, LINK_DELETE_TEST_SUBGROUP_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP_NAME); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Ldelete_hard) + { + TESTING_2("H5Ldelete on hard link"); + + if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP1_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP1_NAME); + PART_ERROR(H5Ldelete_hard); + } + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create first hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_hard); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if first hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_hard); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" first hard link did not exist\n"); + PART_ERROR(H5Ldelete_hard); + } + + if (H5Ldelete(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete hard link '%s' using H5Ldelete\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_hard); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if first hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_hard); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" first hard link exists!\n"); + PART_ERROR(H5Ldelete_hard); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP1_NAME); + PART_ERROR(H5Ldelete_hard); + } + + PASSED(); + } + PART_END(H5Ldelete_hard); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Ldelete_hard_indirect) + { + TESTING_2("H5Ldelete on nested hard link"); + + if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_NESTED_SUBGROUP_NAME1, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_NESTED_SUBGROUP_NAME1); + PART_ERROR(H5Ldelete_hard_indirect); + } + + if ((nested_grp_id = H5Gcreate2(subgroup_id, LINK_DELETE_TEST_NESTED_GRP_NAME, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_NESTED_GRP_NAME); + PART_ERROR(H5Ldelete_hard_indirect); + } + + if (H5Lcreate_hard(nested_grp_id, ".", nested_grp_id, LINK_DELETE_TEST_HARD_LINK_NAME, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create first hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_hard_indirect); + } + + if ((link_exists = H5Lexists(nested_grp_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if first hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_hard_indirect); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" first hard link did not exist\n"); + PART_ERROR(H5Ldelete_hard_indirect); + } + + if (H5Ldelete(subgroup_id, LINK_DELETE_TEST_NESTED_HARD_LINK_NAME, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete hard link '%s' using H5Ldelete\n", + LINK_DELETE_TEST_NESTED_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_hard_indirect); + } + + if ((link_exists = H5Lexists(nested_grp_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if first hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_hard_indirect); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" first hard link exists!\n"); + PART_ERROR(H5Ldelete_hard_indirect); + } + + if (H5Gclose(nested_grp_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_NESTED_GRP_NAME); + PART_ERROR(H5Ldelete_hard_indirect); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP1_NAME); + PART_ERROR(H5Ldelete_hard_indirect); + } + + PASSED(); + } + PART_END(H5Ldelete_hard_indirect); + + H5E_BEGIN_TRY + { + H5Gclose(nested_grp_id); + nested_grp_id = H5I_INVALID_HID; + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Ldelete_soft) + { + TESTING_2("H5Ldelete on soft link"); + + if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP2_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP2_NAME); + PART_ERROR(H5Ldelete_soft); + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_DELETE_TEST_SUBGROUP_NAME + "/" LINK_DELETE_TEST_SUBGROUP2_NAME, + subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create first soft link '%s'\n", LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_soft); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if first soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_soft); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" first soft link did not exist\n"); + PART_ERROR(H5Ldelete_soft); + } + + if (H5Ldelete(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete soft link '%s' using H5Ldelete\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_soft); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if first soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_soft); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" first soft link exists!\n"); + PART_ERROR(H5Ldelete_soft); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP2_NAME); + PART_ERROR(H5Ldelete_soft); + } + + PASSED(); + } + PART_END(H5Ldelete_soft); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Ldelete_external) + { + TESTING_2("H5Ldelete on external link"); +#ifndef NO_EXTERNAL_LINKS + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Ldelete_external); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Ldelete_external); + } + + if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP3_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP3_NAME); + PART_ERROR(H5Ldelete_external); + } + + if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create first external link '%s'\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_external); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if first external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_external); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" first external link did not exist\n"); + PART_ERROR(H5Ldelete_external); + } + + if (H5Ldelete(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete external link '%s' using H5Ldelete\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_external); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if first external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_external); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" first external link exists!\n"); + PART_ERROR(H5Ldelete_external); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP3_NAME); + PART_ERROR(H5Ldelete_external); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Ldelete_external); +#endif + } + PART_END(H5Ldelete_external); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + H5Fclose(ext_file_id); + ext_file_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Ldelete_ud) + { + TESTING_2("H5Ldelete on user-defined link"); + + /* TODO */ + + SKIPPED(); + PART_EMPTY(H5Ldelete_ud); + } + PART_END(H5Ldelete_ud); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Ldelete_by_idx_hard_crt_order_increasing) + { + TESTING_2("H5Ldelete_by_idx on hard link by creation order in increasing order"); + + if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP5_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP5_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + /* Create several hard links */ + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + /* Delete a link */ + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete hard link '%s' using H5Ldelete_by_idx by creation order in " + "increasing order\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + /* Ensure that the link is gone and others remain */ + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + /* Repeat until all links have been deleted */ + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete hard link '%s' using H5Ldelete_by_idx by creation order in " + "increasing order\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete hard link '%s' using H5Ldelete_by_idx by creation order in " + "increasing order\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP5_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing); + } + + PASSED(); + } + PART_END(H5Ldelete_by_idx_hard_crt_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Ldelete_by_idx_hard_crt_order_decreasing) + { + TESTING_2("H5Ldelete_by_idx on hard link by creation order in decreasing order"); + + if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP6_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP6_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + /* Create several hard links */ + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + /* Delete a link */ + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete hard link '%s' using H5Ldelete_by_idx by creation order in " + "decreasing order\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + /* Ensure that the link is gone and others remain */ + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + /* Repeat until all links have been deleted */ + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete hard link '%s' using H5Ldelete_by_idx by creation order in " + "decreasing order\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete hard link '%s' using H5Ldelete_by_idx by creation order in " + "decreasing order\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP6_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing); + } + + PASSED(); + } + PART_END(H5Ldelete_by_idx_hard_crt_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Ldelete_by_idx_hard_name_order_increasing) + { + TESTING_2("H5Ldelete_by_idx on hard link by alphabetical order in increasing order"); + + if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP7_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP7_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + /* Create several hard links */ + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + /* Delete a link */ + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete hard link '%s' using H5Ldelete_by_idx by alphabetical order in " + "increasing order\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + /* Ensure that the link is gone and others remain */ + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + /* Repeat until all links have been deleted */ + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete hard link '%s' using H5Ldelete_by_idx by alphabetical order in " + "increasing order\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete hard link '%s' using H5Ldelete_by_idx by alphabetical order in " + "increasing order\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP7_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing); + } + + PASSED(); + } + PART_END(H5Ldelete_by_idx_hard_name_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Ldelete_by_idx_hard_name_order_decreasing) + { + TESTING_2("H5Ldelete_by_idx on hard link by alphabetical order in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP8_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP8_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + /* Create several hard links */ + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + /* Delete a link */ + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete hard link '%s' using H5Ldelete_by_idx by alphabetical order in " + "decreasing order\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + /* Ensure that the link is gone and others remain */ + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + /* Repeat until all links have been deleted */ + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete hard link '%s' using H5Ldelete_by_idx by alphabetical order in " + "decreasing order\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete hard link '%s' using H5Ldelete_by_idx by alphabetical order in " + "decreasing order\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP8_NAME); + PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Ldelete_by_idx_hard_name_order_decreasing); +#endif + } + PART_END(H5Ldelete_by_idx_hard_name_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Ldelete_by_idx_soft_crt_order_increasing) + { + TESTING_2("H5Ldelete_by_idx on soft link by creation order in increasing order"); + + if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP9_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP9_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + /* Create several soft links */ + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_DELETE_TEST_SUBGROUP_NAME + "/" LINK_DELETE_TEST_SUBGROUP9_NAME, + subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_DELETE_TEST_SUBGROUP_NAME + "/" LINK_DELETE_TEST_SUBGROUP9_NAME, + subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_DELETE_TEST_SUBGROUP_NAME + "/" LINK_DELETE_TEST_SUBGROUP9_NAME, + subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + /* Delete a link */ + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete soft link '%s' using H5Ldelete_by_idx by creation order in " + "increasing order\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + /* Ensure that the link is gone and others remain */ + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + /* Repeat until all links have been deleted */ + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete soft link '%s' using H5Ldelete_by_idx by creation order in " + "increasing order\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete soft link '%s' using H5Ldelete_by_idx by creation order in " + "increasing order\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP9_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing); + } + + PASSED(); + } + PART_END(H5Ldelete_by_idx_soft_crt_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Ldelete_by_idx_soft_crt_order_decreasing) + { + TESTING_2("H5Ldelete_by_idx on soft link by creation order in decreasing order"); + + if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP10_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP10_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + /* Create several soft links */ + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_DELETE_TEST_SUBGROUP_NAME + "/" LINK_DELETE_TEST_SUBGROUP10_NAME, + subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_DELETE_TEST_SUBGROUP_NAME + "/" LINK_DELETE_TEST_SUBGROUP10_NAME, + subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_DELETE_TEST_SUBGROUP_NAME + "/" LINK_DELETE_TEST_SUBGROUP10_NAME, + subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + /* Delete a link */ + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete soft link '%s' using H5Ldelete_by_idx by creation order in " + "decreasing order\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + /* Ensure that the link is gone and others remain */ + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + /* Repeat until all links have been deleted */ + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete soft link '%s' using H5Ldelete_by_idx by creation order in " + "decreasing order\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete soft link '%s' using H5Ldelete_by_idx by creation order in " + "decreasing order\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP10_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing); + } + + PASSED(); + } + PART_END(H5Ldelete_by_idx_soft_crt_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Ldelete_by_idx_soft_name_order_increasing) + { + TESTING_2("H5Ldelete_by_idx on soft link by alphabetical order in increasing order"); + + if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP11_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP11_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + /* Create several soft links */ + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_DELETE_TEST_SUBGROUP_NAME + "/" LINK_DELETE_TEST_SUBGROUP11_NAME, + subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_DELETE_TEST_SUBGROUP_NAME + "/" LINK_DELETE_TEST_SUBGROUP11_NAME, + subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_DELETE_TEST_SUBGROUP_NAME + "/" LINK_DELETE_TEST_SUBGROUP11_NAME, + subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + /* Delete a link */ + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete soft link '%s' using H5Ldelete_by_idx by alphabetical order in " + "increasing order\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + /* Ensure that the link is gone and others remain */ + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + /* Repeat until all links have been deleted */ + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete soft link '%s' using H5Ldelete_by_idx by alphabetical order in " + "increasing order\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete soft link '%s' using H5Ldelete_by_idx by alphabetical order in " + "increasing order\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP11_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing); + } + + PASSED(); + } + PART_END(H5Ldelete_by_idx_soft_name_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Ldelete_by_idx_soft_name_order_decreasing) + { + TESTING_2("H5Ldelete_by_idx on soft link by alphabetical order in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP12_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP12_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + /* Create several soft links */ + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_DELETE_TEST_SUBGROUP_NAME + "/" LINK_DELETE_TEST_SUBGROUP12_NAME, + subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_DELETE_TEST_SUBGROUP_NAME + "/" LINK_DELETE_TEST_SUBGROUP12_NAME, + subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_DELETE_TEST_SUBGROUP_NAME + "/" LINK_DELETE_TEST_SUBGROUP12_NAME, + subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + /* Delete a link */ + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete soft link '%s' using H5Ldelete_by_idx by alphabetical order in " + "decreasing order\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + /* Ensure that the link is gone and others remain */ + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + /* Repeat until all links have been deleted */ + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete soft link '%s' using H5Ldelete_by_idx by alphabetical order in " + "decreasing order\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete soft link '%s' using H5Ldelete_by_idx by alphabetical order in " + "decreasing order\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP12_NAME); + PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Ldelete_by_idx_soft_name_order_decreasing); +#endif + } + PART_END(H5Ldelete_by_idx_soft_name_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Ldelete_by_idx_external_crt_order_increasing) + { + TESTING_2("H5Ldelete_by_idx on external link by creation order in increasing order"); +#ifndef NO_EXTERNAL_LINKS + /* Create file for external link to reference */ + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP13_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP13_NAME); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + /* Create several external links */ + if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + /* Delete a link */ + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete external link '%s' using H5Ldelete_by_idx by creation order in " + "increasing order\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + /* Ensure that the link is gone and others remain */ + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' exists after deletion!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + /* Repeat until all links have been deleted */ + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete external link '%s' using H5Ldelete_by_idx by creation order in " + "increasing order\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' exists after deletion!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' exists after deletion!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete external link '%s' using H5Ldelete_by_idx by creation order in " + "increasing order\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' exists after deletion!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' exists after deletion!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' exists after deletion!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP13_NAME); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Ldelete_by_idx_external_crt_order_increasing); +#endif + } + PART_END(H5Ldelete_by_idx_external_crt_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + H5Fclose(ext_file_id); + ext_file_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Ldelete_by_idx_external_crt_order_decreasing) + { + TESTING_2("H5Ldelete_by_idx on external link by creation order in decreasing order"); +#ifndef NO_EXTERNAL_LINKS + /* Create file for external link to reference */ + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP14_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP14_NAME); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + /* Create several external links */ + if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + /* Delete a link */ + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete external link '%s' using H5Ldelete_by_idx by creation order in " + "decreasing order\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + /* Ensure that the link is gone and others remain */ + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' exists after deletion!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + /* Repeat until all links have been deleted */ + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete external link '%s' using H5Ldelete_by_idx by creation order in " + "decreasing order\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' exists after deletion!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' exists after deletion!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete external link '%s' using H5Ldelete_by_idx by creation order in " + "decreasing order\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' exists after deletion!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' exists after deletion!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' exists after deletion!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP14_NAME); + PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Ldelete_by_idx_external_crt_order_decreasing); +#endif + } + PART_END(H5Ldelete_by_idx_external_crt_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + H5Fclose(ext_file_id); + ext_file_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Ldelete_by_idx_external_name_order_increasing) + { + TESTING_2("H5Ldelete_by_idx on external link by alphabetical order in increasing order"); +#ifndef NO_EXTERNAL_LINKS + /* Create file for external link to reference */ + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP15_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP15_NAME); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + /* Create several external links */ + if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + /* Delete a link */ + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete external link '%s' using H5Ldelete_by_idx by alphabetical " + "order in increasing order\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + /* Ensure that the link is gone and others remain */ + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' exists after deletion!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + /* Repeat until all links have been deleted */ + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete external link '%s' using H5Ldelete_by_idx by alphabetical " + "order in increasing order\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' exists after deletion!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' exists after deletion!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete external link '%s' using H5Ldelete_by_idx by alphabetical " + "order in increasing order\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' exists after deletion!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' exists after deletion!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' exists after deletion!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP15_NAME); + PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Ldelete_by_idx_external_name_order_increasing); +#endif + } + PART_END(H5Ldelete_by_idx_external_name_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + H5Fclose(ext_file_id); + ext_file_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Ldelete_by_idx_external_name_order_decreasing) + { + TESTING_2("H5Ldelete_by_idx on external link by alphabetical order in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Create file for external link to reference */ + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP16_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP16_NAME); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + /* Create several external links */ + if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before deletion\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + /* Delete a link */ + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete external link '%s' using H5Ldelete_by_idx by alphabetical " + "order in decreasing order\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + /* Ensure that the link is gone and others remain */ + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' exists after deletion!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + /* Repeat until all links have been deleted */ + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete external link '%s' using H5Ldelete_by_idx by alphabetical " + "order in decreasing order\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' exists after deletion!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' exists after deletion!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist after deletion of a different link!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't delete external link '%s' using H5Ldelete_by_idx by alphabetical " + "order in decreasing order\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' exists after deletion!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' exists after deletion!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' exists after deletion!\n", + LINK_DELETE_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP16_NAME); + PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Ldelete_by_idx_external_name_order_decreasing); +#endif + } + PART_END(H5Ldelete_by_idx_external_name_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + H5Fclose(ext_file_id); + ext_file_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Ldelete_by_idx_ud_crt_order_increasing) + { + TESTING_2("H5Ldelete_by_idx on user-defined link by creation order in increasing order"); + + /* TODO */ + + SKIPPED(); + PART_EMPTY(H5Ldelete_by_idx_ud_crt_order_increasing); + } + PART_END(H5Ldelete_by_idx_ud_crt_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Ldelete_by_idx_ud_crt_order_decreasing) + { + TESTING_2("H5Ldelete_by_idx on user-defined link by creation order in decreasing order"); + + /* TODO */ + + SKIPPED(); + PART_EMPTY(H5Ldelete_by_idx_ud_crt_order_decreasing); + } + PART_END(H5Ldelete_by_idx_ud_crt_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Ldelete_by_idx_ud_name_order_increasing) + { + TESTING_2("H5Ldelete_by_idx on user-defined link by alphabetical order in increasing order"); + + /* TODO */ + + SKIPPED(); + PART_EMPTY(H5Ldelete_by_idx_ud_name_order_increasing); + } + PART_END(H5Ldelete_by_idx_ud_name_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Ldelete_by_idx_ud_name_order_decreasing) + { + TESTING_2("H5Ldelete_by_idx on user-defined link by alphabetical order in decreasing order"); + + /* TODO */ + + SKIPPED(); + PART_EMPTY(H5Ldelete_by_idx_ud_name_order_decreasing); + } + PART_END(H5Ldelete_by_idx_ud_name_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(gcpl_id); + H5Gclose(subgroup_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(ext_file_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a group's always-increasing + * maximum link creation order value gets reset once + * all the links have been deleted from the group. + */ +static int +test_delete_link_reset_grp_max_crt_order(void) +{ +#ifndef NO_MAX_LINK_CRT_ORDER_RESET + H5G_info_t grp_info; + size_t i; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t subgroup_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + char link_name[LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_BUF_SIZE]; +#endif + + TESTING_MULTIPART("H5Ldelete of all links in group resets group's maximum link creation order value"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_MORE)) { + SKIPPED(); + HDprintf(" API functions for basic file, basic and more group, or basic link aren't supported " + "with this connector\n"); + return 0; + } + +#ifndef NO_MAX_LINK_CRT_ORDER_RESET + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create GCPL for link creation order tracking\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) { + H5_FAILED(); + HDprintf(" couldn't set link creation order tracking\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_SUBGROUP_NAME, + H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_SUBGROUP_NAME); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Ldelete_links_bottom_up) + { + TESTING_2("H5Ldelete from least-recently created link to most-recently created link"); + + if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_SUBGROUP1_NAME, + H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", + LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_SUBGROUP1_NAME); + PART_ERROR(H5Ldelete_links_bottom_up); + } + + /* Create several links inside the group */ + for (i = 0; i < LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS; i++) { + snprintf(link_name, LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_BUF_SIZE, "link%d", (int)i); + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, link_name, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", link_name); + PART_ERROR(H5Ldelete_links_bottom_up); + } + } + + /* Delete the links, checking the group's maximum creation order value each time */ + for (i = 0; i < LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS; i++) { + memset(&grp_info, 0, sizeof(grp_info)); + + if (H5Gget_info(subgroup_id, &grp_info) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve group's info\n"); + PART_ERROR(H5Ldelete_links_bottom_up); + } + + if (grp_info.max_corder != LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" group's maximum creation order value got adjusted to %lld during link " + "deletion; value should have remained at %lld\n", + (long long)grp_info.max_corder, LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS); + PART_ERROR(H5Ldelete_links_bottom_up); + } + + snprintf(link_name, LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_BUF_SIZE, "link%d", (int)i); + + if (H5Ldelete(subgroup_id, link_name, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to delete link '%s'\n", link_name); + PART_ERROR(H5Ldelete_links_bottom_up); + } + } + + /* Ensure the group's maximum creation order value has now reset to 0 after all the links are gone + */ + memset(&grp_info, 0, sizeof(grp_info)); + + if (H5Gget_info(subgroup_id, &grp_info) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve group's info\n"); + PART_ERROR(H5Ldelete_links_bottom_up); + } + + if (grp_info.max_corder != 0) { + H5_FAILED(); + HDprintf(" group's maximum creation order value didn't reset to 0 after deleting all " + "links from group; value is still %lld\n", + (long long)grp_info.max_corder); + PART_ERROR(H5Ldelete_links_bottom_up); + } + + PASSED(); + } + PART_END(H5Ldelete_links_bottom_up); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Ldelete_links_top_down) + { + TESTING_2("H5Ldelete from most-recently created link to least-recently created link"); + + if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_SUBGROUP2_NAME, + H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", + LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_SUBGROUP2_NAME); + PART_ERROR(H5Ldelete_links_top_down); + } + + /* Create several links inside the group */ + for (i = 0; i < LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS; i++) { + snprintf(link_name, LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_BUF_SIZE, "link%d", (int)i); + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, link_name, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", link_name); + PART_ERROR(H5Ldelete_links_top_down); + } + } + + /* Delete the links, checking the group's maximum creation order value each time */ + for (i = 0; i < LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS; i++) { + memset(&grp_info, 0, sizeof(grp_info)); + + if (H5Gget_info(subgroup_id, &grp_info) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve group's info\n"); + PART_ERROR(H5Ldelete_links_top_down); + } + + if (grp_info.max_corder != LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" group's maximum creation order value got adjusted to %lld during link " + "deletion; value should have remained at %lld\n", + (long long)grp_info.max_corder, LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS); + PART_ERROR(H5Ldelete_links_top_down); + } + + snprintf(link_name, LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_BUF_SIZE, "link%d", + (int)(LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS - i - 1)); + + if (H5Ldelete(subgroup_id, link_name, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to delete link '%s'\n", link_name); + PART_ERROR(H5Ldelete_links_top_down); + } + } + + /* Ensure the group's maximum creation order value has now reset to 0 after all the links are gone + */ + memset(&grp_info, 0, sizeof(grp_info)); + + if (H5Gget_info(subgroup_id, &grp_info) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve group's info\n"); + PART_ERROR(H5Ldelete_links_top_down); + } + + if (grp_info.max_corder != 0) { + H5_FAILED(); + HDprintf(" group's maximum creation order value didn't reset to 0 after deleting all " + "links from group; value is still %lld\n", + (long long)grp_info.max_corder); + PART_ERROR(H5Ldelete_links_top_down); + } + + PASSED(); + } + PART_END(H5Ldelete_links_top_down); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(gcpl_id); + H5Gclose(subgroup_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +#else + SKIPPED(); + return 0; +#endif +} + +static int +test_delete_link_invalid_params(void) +{ + htri_t link_exists; + herr_t err_ret = -1; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + + TESTING_MULTIPART("H5Ldelete with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_BY_IDX) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, or link aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, LINK_DELETE_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", LINK_DELETE_INVALID_PARAMS_TEST_GROUP_NAME); + goto error; + } + + if (H5Lcreate_hard(group_id, ".", group_id, LINK_DELETE_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", LINK_DELETE_INVALID_PARAMS_TEST_HARD_LINK_NAME); + goto error; + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(group_id, LINK_DELETE_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if first hard link '%s' exists\n", + LINK_DELETE_INVALID_PARAMS_TEST_HARD_LINK_NAME); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" first hard link did not exist\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Ldelete_invalid_loc_id) + { + TESTING_2("H5Ldelete with an invalid location ID"); + + H5E_BEGIN_TRY + { + err_ret = + H5Ldelete(H5I_INVALID_HID, LINK_DELETE_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ldelete succeeded with an invalid location ID!\n"); + PART_ERROR(H5Ldelete_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Ldelete_invalid_loc_id); + + PART_BEGIN(H5Ldelete_invalid_link_name) + { + TESTING_2("H5Ldelete with an invalid link name"); + + H5E_BEGIN_TRY + { + err_ret = H5Ldelete(group_id, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ldelete succeeded with a NULL link name!\n"); + PART_ERROR(H5Ldelete_invalid_link_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Ldelete(group_id, "", H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ldelete succeeded with an invalid link name of ''!\n"); + PART_ERROR(H5Ldelete_invalid_link_name); + } + + PASSED(); + } + PART_END(H5Ldelete_invalid_link_name); + + PART_BEGIN(H5Ldelete_invalid_lapl) + { + TESTING_2("H5Ldelete with an invalid LAPL"); +#ifndef NO_INVALID_PROPERTY_LIST_TESTS + H5E_BEGIN_TRY + { + err_ret = + H5Ldelete(group_id, LINK_DELETE_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ldelete succeeded with an invalid LAPL!\n"); + PART_ERROR(H5Ldelete_invalid_lapl); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Ldelete_invalid_lapl); +#endif + } + PART_END(H5Ldelete_invalid_lapl); + + PART_BEGIN(H5Ldelete_by_idx_invalid_loc_id) + { + TESTING_2("H5Ldelete_by_idx with an invalid location ID"); + + H5E_BEGIN_TRY + { + err_ret = H5Ldelete_by_idx(H5I_INVALID_HID, ".", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ldelete_by_idx succeeded with an invalid location ID!\n"); + PART_ERROR(H5Ldelete_by_idx_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Ldelete_by_idx_invalid_loc_id); + + PART_BEGIN(H5Ldelete_by_idx_invalid_grp_name) + { + TESTING_2("H5Ldelete_by_idx with an invalid group name"); + + H5E_BEGIN_TRY + { + err_ret = H5Ldelete_by_idx(group_id, NULL, H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ldelete_by_idx succeeded with a NULL group name!\n"); + PART_ERROR(H5Ldelete_by_idx_invalid_grp_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Ldelete_by_idx(group_id, "", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ldelete_by_idx succeeded with an invalid group name of ''!\n"); + PART_ERROR(H5Ldelete_by_idx_invalid_grp_name); + } + + PASSED(); + } + PART_END(H5Ldelete_by_idx_invalid_grp_name); + + PART_BEGIN(H5Ldelete_by_idx_invalid_index_type) + { + TESTING_2("H5Ldelete_by_idx with an invalid index type"); + + H5E_BEGIN_TRY + { + err_ret = H5Ldelete_by_idx(group_id, ".", H5_INDEX_UNKNOWN, H5_ITER_INC, 0, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ldelete_by_idx succeeded with invalid index type H5_INDEX_UNKNOWN!\n"); + PART_ERROR(H5Ldelete_by_idx_invalid_index_type); + } + + H5E_BEGIN_TRY + { + err_ret = H5Ldelete_by_idx(group_id, ".", H5_INDEX_N, H5_ITER_INC, 0, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ldelete_by_idx succeeded with invalid index type H5_INDEX_N!\n"); + PART_ERROR(H5Ldelete_by_idx_invalid_index_type); + } + + PASSED(); + } + PART_END(H5Ldelete_by_idx_invalid_index_type); + + PART_BEGIN(H5Ldelete_by_idx_invalid_index_order) + { + TESTING_2("H5Ldelete_by_idx with an invalid iteration ordering"); + + H5E_BEGIN_TRY + { + err_ret = H5Ldelete_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_UNKNOWN, 0, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ldelete_by_idx succeeded with invalid iteration ordering H5_ITER_UNKNOWN!\n"); + PART_ERROR(H5Ldelete_by_idx_invalid_index_order); + } + + H5E_BEGIN_TRY + { + err_ret = H5Ldelete_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_N, 0, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ldelete_by_idx succeeded with invalid iteration ordering H5_ITER_N!\n"); + PART_ERROR(H5Ldelete_by_idx_invalid_index_order); + } + + PASSED(); + } + PART_END(H5Ldelete_by_idx_invalid_index_order); + + PART_BEGIN(H5Ldelete_by_idx_invalid_lapl) + { + TESTING_2("H5Ldelete_by_idx with an invalid LAPL"); +#ifndef NO_INVALID_PROPERTY_LIST_TESTS + H5E_BEGIN_TRY + { + err_ret = H5Ldelete_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ldelete_by_idx succeeded with an invalid LAPL!\n"); + PART_ERROR(H5Ldelete_by_idx_invalid_lapl); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Ldelete_by_idx_invalid_lapl); +#endif + } + PART_END(H5Ldelete_by_idx_invalid_lapl); + + PART_BEGIN(H5Ldelete_by_idx_link_existence) + { + TESTING_2("valid link existence after previous invalid H5Ldelete(_by_idx) calls"); + + /* Verify that the link hasn't been deleted */ + if ((link_exists = + H5Lexists(group_id, LINK_DELETE_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + LINK_DELETE_INVALID_PARAMS_TEST_HARD_LINK_NAME); + PART_ERROR(H5Ldelete_by_idx_link_existence); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link didn't exist!\n"); + PART_ERROR(H5Ldelete_by_idx_link_existence); + } + + PASSED(); + } + PART_END(H5Ldelete_by_idx_link_existence); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a link can be copied using H5Lcopy. + */ +static int +test_copy_link(void) +{ + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID, ext_file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t src_grp_id = H5I_INVALID_HID, dst_grp_id = H5I_INVALID_HID; +#ifndef NO_EXTERNAL_LINKS + char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH]; +#endif + + TESTING_MULTIPART("link copying"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, or link, hard, soft, or external link aren't " + "supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't opewn container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, COPY_LINK_TEST_SUBGROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", COPY_LINK_TEST_SUBGROUP_NAME); + goto error; + } + + if ((src_grp_id = H5Gcreate2(group_id, COPY_LINK_TEST_SRC_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", COPY_LINK_TEST_SRC_GROUP_NAME); + goto error; + } + + if ((dst_grp_id = H5Gcreate2(group_id, COPY_LINK_TEST_DST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", COPY_LINK_TEST_DST_GROUP_NAME); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Lcopy_hard_no_check) + { + TESTING_2("H5Lcopy on hard link (copied link's properties not checked)"); + + /* Try to copy a hard link */ + if (H5Lcreate_hard(group_id, ".", src_grp_id, COPY_LINK_TEST_HARD_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", COPY_LINK_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lcopy_hard_no_check); + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", COPY_LINK_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lcopy_hard_no_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link did not exist\n"); + PART_ERROR(H5Lcopy_hard_no_check); + } + + /* Verify the link doesn't currently exist in the target group */ + if ((link_exists = H5Lexists(dst_grp_id, COPY_LINK_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", COPY_LINK_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lcopy_hard_no_check); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link existed in target group before copy!\n"); + PART_ERROR(H5Lcopy_hard_no_check); + } + + /* Copy the link */ + if (H5Lcopy(src_grp_id, COPY_LINK_TEST_HARD_LINK_NAME, dst_grp_id, + COPY_LINK_TEST_HARD_LINK_COPY_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to copy hard link '%s'\n", COPY_LINK_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lcopy_hard_no_check); + } + + /* Verify the link has been copied and still exists in the source group */ + if ((link_exists = H5Lexists(dst_grp_id, COPY_LINK_TEST_HARD_LINK_COPY_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link copy '%s' exists\n", + COPY_LINK_TEST_HARD_LINK_COPY_NAME); + PART_ERROR(H5Lcopy_hard_no_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link copy did not exist\n"); + PART_ERROR(H5Lcopy_hard_no_check); + } + + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if original hard link '%s' exists\n", + COPY_LINK_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lcopy_hard_no_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" original hard link did not exist\n"); + PART_ERROR(H5Lcopy_hard_no_check); + } + + PASSED(); + } + PART_END(H5Lcopy_hard_no_check); + + PART_BEGIN(H5Lcopy_hard_check) + { + H5L_info2_t orig_info, new_info; + int cmp_value; + + TESTING_2("H5Lcopy on hard link (copied link's properties checked)"); + + /* Try to copy a hard link */ + if (H5Lcreate_hard(group_id, ".", src_grp_id, COPY_LINK_TEST_HARD_LINK_NAME2, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", COPY_LINK_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lcopy_hard_check); + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", COPY_LINK_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lcopy_hard_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link did not exist\n"); + PART_ERROR(H5Lcopy_hard_check); + } + + /* Retrieve the link's info */ + if (H5Lget_info2(src_grp_id, COPY_LINK_TEST_HARD_LINK_NAME2, &orig_info, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve info for link '%s'\n", COPY_LINK_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lcopy_hard_check); + } + + /* Verify the link doesn't currently exist in the target group */ + if ((link_exists = H5Lexists(dst_grp_id, COPY_LINK_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", COPY_LINK_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lcopy_hard_check); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link existed in target group before copy!\n"); + PART_ERROR(H5Lcopy_hard_check); + } + + /* Copy the link */ + if (H5Lcopy(src_grp_id, COPY_LINK_TEST_HARD_LINK_NAME2, dst_grp_id, + COPY_LINK_TEST_HARD_LINK_COPY_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to copy hard link '%s'\n", COPY_LINK_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lcopy_hard_check); + } + + /* Verify the link has been copied and still exists in the source group */ + if ((link_exists = H5Lexists(dst_grp_id, COPY_LINK_TEST_HARD_LINK_COPY_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link copy '%s' exists\n", + COPY_LINK_TEST_HARD_LINK_COPY_NAME2); + PART_ERROR(H5Lcopy_hard_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link copy did not exist\n"); + PART_ERROR(H5Lcopy_hard_check); + } + + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if original hard link '%s' exists\n", + COPY_LINK_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lcopy_hard_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" original hard link did not exist\n"); + PART_ERROR(H5Lcopy_hard_check); + } + + /* Retrieve the new link's info */ + if (H5Lget_info2(dst_grp_id, COPY_LINK_TEST_HARD_LINK_COPY_NAME2, &new_info, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve info for link '%s'\n", COPY_LINK_TEST_HARD_LINK_COPY_NAME2); + PART_ERROR(H5Lcopy_hard_check); + } + + if (new_info.type != orig_info.type) { + H5_FAILED(); + HDprintf(" copied link's link type doesn't match original link's type\n"); + PART_ERROR(H5Lcopy_hard_check); + } + + if (H5Otoken_cmp(dst_grp_id, &new_info.u.token, &orig_info.u.token, &cmp_value) < 0) { + H5_FAILED(); + HDprintf(" failed to compare link target tokens\n"); + PART_ERROR(H5Lcopy_hard_check); + } + + if (cmp_value != 0) { + H5_FAILED(); + HDprintf(" copied hard link's object token doesn't match original link's object token\n"); + PART_ERROR(H5Lcopy_hard_check); + } + + if (new_info.corder_valid != orig_info.corder_valid) { + H5_FAILED(); + HDprintf(" copied link's 'corder_valid' field doesn't match original link's " + "'corder_valid' field\n"); + PART_ERROR(H5Lcopy_hard_check); + } + + if (new_info.corder_valid && orig_info.corder_valid && (new_info.corder != orig_info.corder)) { + H5_FAILED(); + HDprintf(" copied link's creation order value %" PRId64 + " doesn't match original link's creation order value %" PRId64 "\n", + new_info.corder, orig_info.corder); + PART_ERROR(H5Lcopy_hard_check); + } + + if (new_info.cset != orig_info.cset) { + H5_FAILED(); + HDprintf(" copied link's character set doesn't match original link's character set\n"); + PART_ERROR(H5Lcopy_hard_check); + } + + PASSED(); + } + PART_END(H5Lcopy_hard_check); + + PART_BEGIN(H5Lcopy_hard_same_loc) + { + TESTING_2("H5Lcopy on hard link using H5L_SAME_LOC"); + + /* Try to copy a hard link */ + if (H5Lcreate_hard(group_id, ".", src_grp_id, COPY_LINK_TEST_HARD_LINK_NAME3, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", COPY_LINK_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lcopy_hard_same_loc); + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", COPY_LINK_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lcopy_hard_same_loc); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link did not exist\n"); + PART_ERROR(H5Lcopy_hard_same_loc); + } + + /* Verify the links don't currently exist in the target group */ + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_HARD_LINK_SAME_LOC_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + COPY_LINK_TEST_HARD_LINK_SAME_LOC_NAME); + PART_ERROR(H5Lcopy_hard_same_loc); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link existed in target group before copy!\n"); + PART_ERROR(H5Lcopy_hard_same_loc); + } + + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_HARD_LINK_SAME_LOC_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + COPY_LINK_TEST_HARD_LINK_SAME_LOC_NAME2); + PART_ERROR(H5Lcopy_hard_same_loc); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link existed in target group before copy!\n"); + PART_ERROR(H5Lcopy_hard_same_loc); + } + + /* Copy the link using H5L_SAME_LOC as the first parameter to H5Lcopy */ + if (H5Lcopy(H5L_SAME_LOC, COPY_LINK_TEST_HARD_LINK_NAME3, src_grp_id, + COPY_LINK_TEST_HARD_LINK_SAME_LOC_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf( + " failed to copy hard link '%s' using H5L_SAME_LOC as first parameter to H5Lcopy\n", + COPY_LINK_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lcopy_hard_same_loc); + } + + /* Copy the link using H5L_SAME_LOC as the third parameter to H5Lcopy */ + if (H5Lcopy(src_grp_id, COPY_LINK_TEST_HARD_LINK_NAME3, H5L_SAME_LOC, + COPY_LINK_TEST_HARD_LINK_SAME_LOC_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf( + " failed to copy hard link '%s' using H5L_SAME_LOC as third parameter to H5Lcopy\n", + COPY_LINK_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lcopy_hard_same_loc); + } + + /* Verify the links have been copied and the original still exist in the source group */ + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_HARD_LINK_SAME_LOC_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link copy '%s' exists\n", + COPY_LINK_TEST_HARD_LINK_SAME_LOC_NAME); + PART_ERROR(H5Lcopy_hard_same_loc); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link copy did not exist\n"); + PART_ERROR(H5Lcopy_hard_same_loc); + } + + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_HARD_LINK_SAME_LOC_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link copy '%s' exists\n", + COPY_LINK_TEST_HARD_LINK_SAME_LOC_NAME2); + PART_ERROR(H5Lcopy_hard_same_loc); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link copy did not exist\n"); + PART_ERROR(H5Lcopy_hard_same_loc); + } + + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if original hard link '%s' exists\n", + COPY_LINK_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lcopy_hard_same_loc); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" original hard link did not exist\n"); + PART_ERROR(H5Lcopy_hard_same_loc); + } + + PASSED(); + } + PART_END(H5Lcopy_hard_same_loc); + + PART_BEGIN(H5Lcopy_soft_no_check) + { + TESTING_2("H5Lcopy on soft link (copied link's properties not checked)"); + + /* Try to copy a soft link */ + if (H5Lcreate_soft(COPY_LINK_TEST_SOFT_LINK_TARGET_PATH, src_grp_id, + COPY_LINK_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", COPY_LINK_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lcopy_soft_no_check); + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", COPY_LINK_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lcopy_soft_no_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link did not exist\n"); + PART_ERROR(H5Lcopy_soft_no_check); + } + + /* Verify the link doesn't currently exist in the target group */ + if ((link_exists = H5Lexists(dst_grp_id, COPY_LINK_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", COPY_LINK_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lcopy_soft_no_check); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link existed in target group before copy!\n"); + PART_ERROR(H5Lcopy_soft_no_check); + } + + /* Copy the link */ + if (H5Lcopy(src_grp_id, COPY_LINK_TEST_SOFT_LINK_NAME, dst_grp_id, + COPY_LINK_TEST_SOFT_LINK_COPY_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to copy soft link '%s'\n", COPY_LINK_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lcopy_soft_no_check); + } + + /* Verify the link has been copied and still exists in the source group */ + if ((link_exists = H5Lexists(dst_grp_id, COPY_LINK_TEST_SOFT_LINK_COPY_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' copy exists\n", + COPY_LINK_TEST_SOFT_LINK_COPY_NAME); + PART_ERROR(H5Lcopy_soft_no_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link copy did not exist\n"); + PART_ERROR(H5Lcopy_soft_no_check); + } + + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if original soft link '%s' exists\n", + COPY_LINK_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lcopy_soft_no_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" original soft link did not exist\n"); + PART_ERROR(H5Lcopy_soft_no_check); + } + + PASSED(); + } + PART_END(H5Lcopy_soft_no_check); + + PART_BEGIN(H5Lcopy_soft_check) + { + H5L_info2_t orig_info, new_info; + char orig_link_val[COPY_LINK_TEST_LINK_VAL_BUF_SIZE]; + char new_link_val[COPY_LINK_TEST_LINK_VAL_BUF_SIZE]; + + TESTING_2("H5Lcopy on soft link (copied link's properties checked)"); + + /* Try to copy a soft link */ + if (H5Lcreate_soft(COPY_LINK_TEST_SOFT_LINK_TARGET_PATH, src_grp_id, + COPY_LINK_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", COPY_LINK_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lcopy_soft_check); + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", COPY_LINK_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lcopy_soft_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link did not exist\n"); + PART_ERROR(H5Lcopy_soft_check); + } + + /* Retrieve the link's info */ + if (H5Lget_info2(src_grp_id, COPY_LINK_TEST_SOFT_LINK_NAME2, &orig_info, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve info for link '%s'\n", COPY_LINK_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lcopy_soft_check); + } + + /* Retrieve the link's value */ + if (H5Lget_val(src_grp_id, COPY_LINK_TEST_SOFT_LINK_NAME2, orig_link_val, + COPY_LINK_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve value for soft link '%s'\n", COPY_LINK_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lcopy_soft_check); + } + + /* Verify the link doesn't currently exist in the target group */ + if ((link_exists = H5Lexists(dst_grp_id, COPY_LINK_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", COPY_LINK_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lcopy_soft_check); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link existed in target group before copy!\n"); + PART_ERROR(H5Lcopy_soft_check); + } + + /* Copy the link */ + if (H5Lcopy(src_grp_id, COPY_LINK_TEST_SOFT_LINK_NAME2, dst_grp_id, + COPY_LINK_TEST_SOFT_LINK_COPY_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to copy soft link '%s'\n", COPY_LINK_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lcopy_soft_check); + } + + /* Verify the link has been copied and still exists in the source group */ + if ((link_exists = H5Lexists(dst_grp_id, COPY_LINK_TEST_SOFT_LINK_COPY_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' copy exists\n", + COPY_LINK_TEST_SOFT_LINK_COPY_NAME2); + PART_ERROR(H5Lcopy_soft_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link copy did not exist\n"); + PART_ERROR(H5Lcopy_soft_check); + } + + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if original soft link '%s' exists\n", + COPY_LINK_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lcopy_soft_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" original soft link did not exist\n"); + PART_ERROR(H5Lcopy_soft_check); + } + + /* Retrieve the new link's info */ + if (H5Lget_info2(dst_grp_id, COPY_LINK_TEST_SOFT_LINK_COPY_NAME2, &new_info, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve info for link '%s'\n", COPY_LINK_TEST_SOFT_LINK_COPY_NAME2); + PART_ERROR(H5Lcopy_soft_check); + } + + if (new_info.type != orig_info.type) { + H5_FAILED(); + HDprintf(" copied link's link type doesn't match original link's type\n"); + PART_ERROR(H5Lcopy_soft_check); + } + + if (new_info.u.val_size != orig_info.u.val_size) { + H5_FAILED(); + HDprintf(" copied soft link's value size of %llu doesn't match original link's value size " + "of %llu\n", + (unsigned long long)new_info.u.val_size, (unsigned long long)orig_info.u.val_size); + PART_ERROR(H5Lcopy_soft_check); + } + + if (new_info.corder_valid != orig_info.corder_valid) { + H5_FAILED(); + HDprintf(" copied link's 'corder_valid' field doesn't match original link's " + "'corder_valid' field\n"); + PART_ERROR(H5Lcopy_soft_check); + } + + if (new_info.corder_valid && orig_info.corder_valid && (new_info.corder != orig_info.corder)) { + H5_FAILED(); + HDprintf(" copied link's creation order value %" PRId64 + " doesn't match original link's creation order value %" PRId64 "\n", + new_info.corder, orig_info.corder); + PART_ERROR(H5Lcopy_soft_check); + } + + if (new_info.cset != orig_info.cset) { + H5_FAILED(); + HDprintf(" copied link's character set doesn't match original link's character set\n"); + PART_ERROR(H5Lcopy_soft_check); + } + + /* Check the soft link's value */ + if (H5Lget_val(dst_grp_id, COPY_LINK_TEST_SOFT_LINK_COPY_NAME2, new_link_val, + COPY_LINK_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve value for soft link '%s'\n", + COPY_LINK_TEST_SOFT_LINK_COPY_NAME2); + PART_ERROR(H5Lcopy_soft_check); + } + + if (HDstrncmp(orig_link_val, new_link_val, COPY_LINK_TEST_LINK_VAL_BUF_SIZE)) { + H5_FAILED(); + HDprintf(" copied soft link's value '%s' doesn't match original link's value '%s'\n", + new_link_val, orig_link_val); + PART_ERROR(H5Lcopy_soft_check); + } + + PASSED(); + } + PART_END(H5Lcopy_soft_check); + + PART_BEGIN(H5Lcopy_soft_same_loc) + { + TESTING_2("H5Lcopy on soft link using H5L_SAME_LOC"); + + /* Try to copy a soft link */ + if (H5Lcreate_soft(COPY_LINK_TEST_SOFT_LINK_TARGET_PATH, src_grp_id, + COPY_LINK_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", COPY_LINK_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lcopy_soft_same_loc); + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", COPY_LINK_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lcopy_soft_same_loc); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link did not exist\n"); + PART_ERROR(H5Lcopy_soft_same_loc); + } + + /* Verify the links don't currently exist in the target group */ + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_SOFT_LINK_SAME_LOC_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + COPY_LINK_TEST_SOFT_LINK_SAME_LOC_NAME); + PART_ERROR(H5Lcopy_soft_same_loc); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link existed in target group before copy!\n"); + PART_ERROR(H5Lcopy_soft_same_loc); + } + + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_SOFT_LINK_SAME_LOC_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + COPY_LINK_TEST_SOFT_LINK_SAME_LOC_NAME2); + PART_ERROR(H5Lcopy_soft_same_loc); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link existed in target group before copy!\n"); + PART_ERROR(H5Lcopy_soft_same_loc); + } + + /* Copy the link using H5L_SAME_LOC as the first parameter to H5Lcopy */ + if (H5Lcopy(H5L_SAME_LOC, COPY_LINK_TEST_SOFT_LINK_NAME3, src_grp_id, + COPY_LINK_TEST_SOFT_LINK_SAME_LOC_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf( + " failed to copy soft link '%s' using H5L_SAME_LOC as first parameter to H5Lcopy\n", + COPY_LINK_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lcopy_soft_same_loc); + } + + /* Copy the link using H5L_SAME_LOC as the third parameter to H5Lcopy */ + if (H5Lcopy(src_grp_id, COPY_LINK_TEST_SOFT_LINK_NAME3, H5L_SAME_LOC, + COPY_LINK_TEST_SOFT_LINK_SAME_LOC_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf( + " failed to copy soft link '%s' using H5L_SAME_LOC as third parameter to H5Lcopy\n", + COPY_LINK_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lcopy_soft_same_loc); + } + + /* Verify the links have been copied and the original still exists in the source group */ + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_SOFT_LINK_SAME_LOC_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' copy exists\n", + COPY_LINK_TEST_SOFT_LINK_SAME_LOC_NAME); + PART_ERROR(H5Lcopy_soft_same_loc); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link copy did not exist\n"); + PART_ERROR(H5Lcopy_soft_same_loc); + } + + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_SOFT_LINK_SAME_LOC_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' copy exists\n", + COPY_LINK_TEST_SOFT_LINK_SAME_LOC_NAME2); + PART_ERROR(H5Lcopy_soft_same_loc); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link copy did not exist\n"); + PART_ERROR(H5Lcopy_soft_same_loc); + } + + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if original soft link '%s' exists\n", + COPY_LINK_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lcopy_soft_same_loc); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" original soft link did not exist\n"); + PART_ERROR(H5Lcopy_soft_same_loc); + } + + PASSED(); + } + PART_END(H5Lcopy_soft_same_loc); + + PART_BEGIN(H5Lcopy_external_no_check) + { + TESTING_2("H5Lcopy on external link (copied link's properties not checked)"); +#ifndef NO_EXTERNAL_LINKS + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Lcopy_external_no_check); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Lcopy_external_no_check); + } + + /* Try to copy an external link */ + if (H5Lcreate_external(ext_link_filename, "/", src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", COPY_LINK_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Lcopy_external_no_check); + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + COPY_LINK_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Lcopy_external_no_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link did not exist\n"); + PART_ERROR(H5Lcopy_external_no_check); + } + + /* Verify the link doesn't currently exist in the target group */ + if ((link_exists = H5Lexists(dst_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + COPY_LINK_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Lcopy_external_no_check); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link existed in target group before copy!\n"); + PART_ERROR(H5Lcopy_external_no_check); + } + + /* Copy the link */ + if (H5Lcopy(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME, dst_grp_id, + COPY_LINK_TEST_EXTERNAL_LINK_COPY_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to copy external link '%s'\n", COPY_LINK_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Lcopy_external_no_check); + } + + /* Verify the link has been copied and still exists in the source group */ + if ((link_exists = H5Lexists(dst_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_COPY_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link copy '%s' exists\n", + COPY_LINK_TEST_EXTERNAL_LINK_COPY_NAME); + PART_ERROR(H5Lcopy_external_no_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link copy did not exist\n"); + PART_ERROR(H5Lcopy_external_no_check); + } + + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if original external link '%s' exists\n", + COPY_LINK_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Lcopy_external_no_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" original external link did not exist\n"); + PART_ERROR(H5Lcopy_external_no_check); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lcopy_external_no_check); +#endif + } + PART_END(H5Lcopy_external_no_check); + + H5E_BEGIN_TRY + { + H5Fclose(ext_file_id); + ext_file_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lcopy_external_check) + { +#ifndef NO_EXTERNAL_LINKS + H5L_info2_t orig_info, new_info; + const char *orig_filename, *new_filename; + const char *orig_objname, *new_objname; + unsigned unpack_flags = 0; + char orig_link_val[COPY_LINK_TEST_LINK_VAL_BUF_SIZE]; + char new_link_val[COPY_LINK_TEST_LINK_VAL_BUF_SIZE]; +#endif + + TESTING_2("H5Lcopy on external link (copied link's properties checked)"); +#ifndef NO_EXTERNAL_LINKS + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Lcopy_external_check); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Lcopy_external_check); + } + + /* Try to copy an external link */ + if (H5Lcreate_external(ext_link_filename, "/", src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME2, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", COPY_LINK_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Lcopy_external_check); + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + COPY_LINK_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Lcopy_external_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link did not exist\n"); + PART_ERROR(H5Lcopy_external_check); + } + + /* Retrieve the link's info */ + if (H5Lget_info2(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME2, &orig_info, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve info for link '%s'\n", COPY_LINK_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Lcopy_external_check); + } + + /* Retrieve the link's value */ + if (H5Lget_val(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME2, orig_link_val, + COPY_LINK_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve value for external link '%s'\n", + COPY_LINK_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Lcopy_external_check); + } + + if (H5Lunpack_elink_val(orig_link_val, orig_info.u.val_size, &unpack_flags, &orig_filename, + &orig_objname) < 0) { + H5_FAILED(); + HDprintf(" couldn't unpack original external link's value buffer\n"); + PART_ERROR(H5Lcopy_external_check); + } + + /* Verify the link doesn't currently exist in the target group */ + if ((link_exists = H5Lexists(dst_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + COPY_LINK_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Lcopy_external_check); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link existed in target group before copy!\n"); + PART_ERROR(H5Lcopy_external_check); + } + + /* Copy the link */ + if (H5Lcopy(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME2, dst_grp_id, + COPY_LINK_TEST_EXTERNAL_LINK_COPY_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to copy external link '%s'\n", COPY_LINK_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Lcopy_external_check); + } + + /* Verify the link has been copied and still exists in the source group */ + if ((link_exists = H5Lexists(dst_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_COPY_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link copy '%s' exists\n", + COPY_LINK_TEST_EXTERNAL_LINK_COPY_NAME2); + PART_ERROR(H5Lcopy_external_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link copy did not exist\n"); + PART_ERROR(H5Lcopy_external_check); + } + + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if original external link '%s' exists\n", + COPY_LINK_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Lcopy_external_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" original external link did not exist\n"); + PART_ERROR(H5Lcopy_external_check); + } + + /* Retrieve the new link's info */ + if (H5Lget_info2(dst_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_COPY_NAME2, &new_info, H5P_DEFAULT) < + 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve info for link '%s'\n", + COPY_LINK_TEST_EXTERNAL_LINK_COPY_NAME2); + PART_ERROR(H5Lcopy_external_check); + } + + if (new_info.type != orig_info.type) { + H5_FAILED(); + HDprintf(" copied link's link type doesn't match original link's type\n"); + PART_ERROR(H5Lcopy_external_check); + } + + if (new_info.u.val_size != orig_info.u.val_size) { + H5_FAILED(); + HDprintf(" copied external link's value size of %llu doesn't match original link's value " + "size of %llu\n", + (unsigned long long)new_info.u.val_size, (unsigned long long)orig_info.u.val_size); + PART_ERROR(H5Lcopy_external_check); + } + + if (new_info.corder_valid != orig_info.corder_valid) { + H5_FAILED(); + HDprintf(" copied link's 'corder_valid' field doesn't match original link's " + "'corder_valid' field\n"); + PART_ERROR(H5Lcopy_external_check); + } + + if (new_info.corder_valid && orig_info.corder_valid && (new_info.corder != orig_info.corder)) { + H5_FAILED(); + HDprintf(" copied link's creation order value %lld doesn't match original link's creation " + "order value %lld\n", + new_info.corder, orig_info.corder); + PART_ERROR(H5Lcopy_external_check); + } + + if (new_info.cset != orig_info.cset) { + H5_FAILED(); + HDprintf(" copied link's character set doesn't match original link's character set\n"); + PART_ERROR(H5Lcopy_external_check); + } + + /* Check the external link's value */ + if (H5Lget_val(dst_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_COPY_NAME2, new_link_val, + COPY_LINK_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve value for external link '%s'\n", + COPY_LINK_TEST_EXTERNAL_LINK_COPY_NAME2); + PART_ERROR(H5Lcopy_external_check); + } + + if (H5Lunpack_elink_val(new_link_val, new_info.u.val_size, &unpack_flags, &new_filename, + &new_objname) < 0) { + H5_FAILED(); + HDprintf(" couldn't unpack copied external link's value buffer\n"); + PART_ERROR(H5Lcopy_external_check); + } + + if (HDstrncmp(new_filename, orig_filename, strlen(orig_filename)) < 0) { + H5_FAILED(); + HDprintf(" copied external link's filename '%s' doesn't match original external link's " + "filename '%s'\n", + new_filename, orig_filename); + PART_ERROR(H5Lcopy_external_check); + } + + if (HDstrncmp(new_objname, orig_objname, strlen(orig_objname)) < 0) { + H5_FAILED(); + HDprintf(" copied external link's object name '%s' doesn't match original external link's " + "object name '%s'\n", + new_objname, orig_objname); + PART_ERROR(H5Lcopy_external_check); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lcopy_external_check); +#endif + } + PART_END(H5Lcopy_external_check); + + H5E_BEGIN_TRY + { + H5Fclose(ext_file_id); + ext_file_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lcopy_external_same_loc) + { + TESTING_2("H5Lcopy on external link using H5L_SAME_LOC"); +#ifndef NO_EXTERNAL_LINKS + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Lcopy_external_same_loc); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Lcopy_external_same_loc); + } + + /* Try to copy an external link */ + if (H5Lcreate_external(ext_link_filename, "/", src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME3, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", COPY_LINK_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Lcopy_external_same_loc); + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + COPY_LINK_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Lcopy_external_same_loc); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link did not exist\n"); + PART_ERROR(H5Lcopy_external_same_loc); + } + + /* Verify the links don't currently exist in the target group */ + if ((link_exists = + H5Lexists(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_SAME_LOC_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + COPY_LINK_TEST_EXTERNAL_LINK_SAME_LOC_NAME); + PART_ERROR(H5Lcopy_external_same_loc); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link existed in target group before copy!\n"); + PART_ERROR(H5Lcopy_external_same_loc); + } + + if ((link_exists = + H5Lexists(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_SAME_LOC_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + COPY_LINK_TEST_EXTERNAL_LINK_SAME_LOC_NAME2); + PART_ERROR(H5Lcopy_external_same_loc); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link existed in target group before copy!\n"); + PART_ERROR(H5Lcopy_external_same_loc); + } + + /* Copy the link using H5L_SAME_LOC as the first parameter to H5Lcopy */ + if (H5Lcopy(H5L_SAME_LOC, COPY_LINK_TEST_EXTERNAL_LINK_NAME3, src_grp_id, + COPY_LINK_TEST_EXTERNAL_LINK_SAME_LOC_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to copy external link '%s' using H5L_SAME_LOC as first parameter to " + "H5Lcopy\n", + COPY_LINK_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Lcopy_external_same_loc); + } + + /* Copy the link using H5L_SAME_LOC as the third parameter to H5Lcopy */ + if (H5Lcopy(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME3, H5L_SAME_LOC, + COPY_LINK_TEST_EXTERNAL_LINK_SAME_LOC_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to copy external link '%s' using H5L_SAME_LOC as third parameter to " + "H5Lcopy\n", + COPY_LINK_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Lcopy_external_same_loc); + } + + /* Verify the links have been copied and the original still exists in the source group */ + if ((link_exists = + H5Lexists(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_SAME_LOC_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link copy '%s' exists\n", + COPY_LINK_TEST_EXTERNAL_LINK_SAME_LOC_NAME); + PART_ERROR(H5Lcopy_external_same_loc); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link copy did not exist\n"); + PART_ERROR(H5Lcopy_external_same_loc); + } + + if ((link_exists = + H5Lexists(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_SAME_LOC_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link copy '%s' exists\n", + COPY_LINK_TEST_EXTERNAL_LINK_SAME_LOC_NAME2); + PART_ERROR(H5Lcopy_external_same_loc); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link copy did not exist\n"); + PART_ERROR(H5Lcopy_external_same_loc); + } + + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if original external link '%s' exists\n", + COPY_LINK_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Lcopy_external_same_loc); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" original external link did not exist\n"); + PART_ERROR(H5Lcopy_external_same_loc); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lcopy_external_same_loc); +#endif + } + PART_END(H5Lcopy_external_same_loc); + + H5E_BEGIN_TRY + { + H5Fclose(ext_file_id); + ext_file_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lcopy_ud_no_check) + { + TESTING_2("H5Lcopy on user-defined link (copied link's properties not checked)"); + + /* TODO */ + + SKIPPED(); + PART_EMPTY(H5Lcopy_ud_no_check); + } + PART_END(H5Lcopy_ud_no_check); + + PART_BEGIN(H5Lcopy_ud_check) + { + TESTING_2("H5Lcopy on user-defined link (copied link's properties checked)"); + + /* TODO */ + + SKIPPED(); + PART_EMPTY(H5Lcopy_ud_check); + } + PART_END(H5Lcopy_ud_check); + + PART_BEGIN(H5Lcopy_ud_same_loc) + { + TESTING_2("H5Lcopy on user-defined link using H5L_SAME_LOC"); + + /* TODO */ + + SKIPPED(); + PART_EMPTY(H5Lcopy_ud_same_loc); + } + PART_END(H5Lcopy_ud_same_loc); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Gclose(dst_grp_id) < 0) + TEST_ERROR; + if (H5Gclose(src_grp_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(dst_grp_id); + H5Gclose(src_grp_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(ext_file_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that using H5Lcopy to copy links into a + * group which already contains links will cause the new links + * to have creation order values ranging from the target group's + * maximum link creation order value and upwards. This is to + * check that it is not possible to run into the situation where + * H5Lcopy might cause a group to have two links with the same + * creation order values. + */ +static int +test_copy_links_into_group_with_links(void) +{ + TESTING("H5Lcopy adjusting creation order values for copied links"); + + SKIPPED(); + + return 1; +} + +/* + * A test to check the behavior of copying a link across files. + * This should fail for hard links but succeed for soft and + * external links (and user-defined links of those types). + * + * TODO: Ideally, tests should be written to verify that the + * copied links retain the properties of the original + * links. + */ +static int +test_copy_link_across_files(void) +{ + TESTING("link copying across files"); + + /* TODO */ + + SKIPPED(); + + return 0; +} + +/* + * A test to check that a link can't be copied + * when H5Lcopy is passed invalid parameters. + */ +static int +test_copy_link_invalid_params(void) +{ + herr_t err_ret = -1; + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t src_grp_id = H5I_INVALID_HID, dst_grp_id = H5I_INVALID_HID; + char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH]; + hid_t ext_file_id = H5I_INVALID_HID; + + TESTING_MULTIPART("H5Lcopy with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, or basic and more link aren't supported with this " + "connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, COPY_LINK_INVALID_PARAMS_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", COPY_LINK_INVALID_PARAMS_TEST_SUBGROUP_NAME); + goto error; + } + + if ((src_grp_id = H5Gcreate2(group_id, COPY_LINK_INVALID_PARAMS_TEST_SRC_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", COPY_LINK_INVALID_PARAMS_TEST_SRC_GROUP_NAME); + goto error; + } + + if ((dst_grp_id = H5Gcreate2(group_id, COPY_LINK_INVALID_PARAMS_TEST_DST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", COPY_LINK_INVALID_PARAMS_TEST_DST_GROUP_NAME); + goto error; + } + + if (H5Lcreate_hard(group_id, ".", src_grp_id, COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME); + goto error; + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link did not exist\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Lcopy_invalid_src_loc_id) + { + TESTING_2("H5Lcopy with an invalid source location ID"); + + H5E_BEGIN_TRY + { + err_ret = + H5Lcopy(H5I_INVALID_HID, COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, dst_grp_id, + COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_COPY_NAME, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lcopy succeeded with an invalid source location ID\n"); + PART_ERROR(H5Lcopy_invalid_src_loc_id); + } + + PASSED(); + } + PART_END(H5Lcopy_invalid_src_loc_id); + + PART_BEGIN(H5Lcopy_invalid_src_name) + { + TESTING_2("H5Lcopy with an invalid source name"); + + H5E_BEGIN_TRY + { + err_ret = + H5Lcopy(src_grp_id, NULL, dst_grp_id, COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_COPY_NAME, + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lcopy succeeded with a NULL source name\n"); + PART_ERROR(H5Lcopy_invalid_src_name); + } + + H5E_BEGIN_TRY + { + err_ret = + H5Lcopy(src_grp_id, "", dst_grp_id, COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_COPY_NAME, + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lcopy succeeded with an invalid source name of ''\n"); + PART_ERROR(H5Lcopy_invalid_src_name); + } + + PASSED(); + } + PART_END(H5Lcopy_invalid_src_name); + + PART_BEGIN(H5Lcopy_invalid_dst_loc_id) + { + TESTING_2("H5Lcopy with an invalid destination location ID"); + + H5E_BEGIN_TRY + { + err_ret = + H5Lcopy(src_grp_id, COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5I_INVALID_HID, + COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_COPY_NAME, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lcopy succeeded with an invalid destination location ID\n"); + PART_ERROR(H5Lcopy_invalid_dst_loc_id); + } + + PASSED(); + } + PART_END(H5Lcopy_invalid_dst_loc_id); + + PART_BEGIN(H5Lcopy_invalid_dst_name) + { + TESTING_2("H5Lcopy with an invalid destination name"); + + H5E_BEGIN_TRY + { + err_ret = H5Lcopy(src_grp_id, COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, dst_grp_id, NULL, + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lcopy succeeded with a NULL destination name\n"); + PART_ERROR(H5Lcopy_invalid_dst_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Lcopy(src_grp_id, COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, dst_grp_id, "", + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lcopy succeeded with an invalid destination name of ''\n"); + PART_ERROR(H5Lcopy_invalid_dst_name); + } + + PASSED(); + } + PART_END(H5Lcopy_invalid_dst_name); + + PART_BEGIN(H5Lcopy_invalid_lcpl) + { + TESTING_2("H5Lcopy with an invalid LCPL"); + + H5E_BEGIN_TRY + { + err_ret = + H5Lcopy(src_grp_id, COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, dst_grp_id, + COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_COPY_NAME, H5I_INVALID_HID, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lcopy succeeded with an invalid LCPL\n"); + PART_ERROR(H5Lcopy_invalid_lcpl); + } + + PASSED(); + } + PART_END(H5Lcopy_invalid_lcpl); + + PART_BEGIN(H5Lcopy_invalid_lapl) + { + TESTING_2("H5Lcopy with an invalid LAPL"); +#ifndef NO_INVALID_PROPERTY_LIST_TESTS + H5E_BEGIN_TRY + { + err_ret = + H5Lcopy(src_grp_id, COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, dst_grp_id, + COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_COPY_NAME, H5P_DEFAULT, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lcopy succeeded with an invalid LAPL\n"); + PART_ERROR(H5Lcopy_invalid_lapl); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lcopy_invalid_lapl); +#endif + } + PART_END(H5Lcopy_invalid_lapl); + + PART_BEGIN(H5Lcopy_invalid_same_location) + { + TESTING_2("H5Lcopy with an invalid same location"); + + H5E_BEGIN_TRY + { + err_ret = + H5Lcopy(H5L_SAME_LOC, COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5L_SAME_LOC, + COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_COPY_NAME, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lcopy succeeded with an invalid same location\n"); + PART_ERROR(H5Lcopy_invalid_same_location); + } + + PASSED(); + } + PART_END(H5Lcopy_invalid_same_location); + + PART_BEGIN(H5Lcopy_invalid_across_files) + { + TESTING_2("H5Lcopy invalid across files"); + + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Lcopy_invalid_across_files); + } + + H5E_BEGIN_TRY + { + err_ret = + H5Lcopy(src_grp_id, COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, ext_file_id, + COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_COPY_NAME, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lcopy succeeded in copying a hard link across files!\n"); + PART_ERROR(H5Lcopy_invalid_across_files); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Lcopy_invalid_across_files); + } + + PASSED(); + } + PART_END(H5Lcopy_invalid_across_files); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Gclose(dst_grp_id) < 0) + TEST_ERROR; + if (H5Gclose(src_grp_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(dst_grp_id); + H5Gclose(src_grp_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(ext_file_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a link can be moved with H5Lmove. + */ +static int +test_move_link(void) +{ + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t src_grp_id = H5I_INVALID_HID, dst_grp_id = H5I_INVALID_HID; + char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH]; + hid_t ext_file_id = H5I_INVALID_HID; + + TESTING_MULTIPART("link moving"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, or link, hard, soft, or external link aren't " + "supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", EXTERNAL_LINK_TEST_FILE_NAME); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, MOVE_LINK_TEST_SUBGROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", MOVE_LINK_TEST_SUBGROUP_NAME); + goto error; + } + + if ((src_grp_id = H5Gcreate2(group_id, MOVE_LINK_TEST_SRC_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", MOVE_LINK_TEST_SRC_GROUP_NAME); + goto error; + } + + if ((dst_grp_id = H5Gcreate2(group_id, MOVE_LINK_TEST_DST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", MOVE_LINK_TEST_DST_GROUP_NAME); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Lmove_hard_no_check) + { + TESTING_2("H5Lmove on hard link (moved link's properties not checked)"); + + /* Try to move a hard link */ + if (H5Lcreate_hard(group_id, ".", src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", MOVE_LINK_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lmove_hard_no_check); + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", MOVE_LINK_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lmove_hard_no_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link did not exist\n"); + PART_ERROR(H5Lmove_hard_no_check); + } + + /* Verify the link doesn't currently exist in the target group */ + if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", MOVE_LINK_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lmove_hard_no_check); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link existed in target group before move!\n"); + PART_ERROR(H5Lmove_hard_no_check); + } + + /* Move the link */ + if (H5Lmove(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME, dst_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to move link '%s'\n", MOVE_LINK_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lmove_hard_no_check); + } + + /* Verify the link has been moved */ + if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", MOVE_LINK_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lmove_hard_no_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link did not exist\n"); + PART_ERROR(H5Lmove_hard_no_check); + } + + /* Verify the old link is gone */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if old hard link '%s' exists\n", + MOVE_LINK_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lmove_hard_no_check); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" old hard link exists\n"); + PART_ERROR(H5Lmove_hard_no_check); + } + + PASSED(); + } + PART_END(H5Lmove_hard_no_check); + + PART_BEGIN(H5Lmove_hard_check) + { + H5L_info2_t orig_info, new_info; + int cmp_value; + + TESTING_2("H5Lmove on hard link (moved link's properties checked)"); + + /* Try to move a hard link */ + if (H5Lcreate_hard(group_id, ".", src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME2, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", MOVE_LINK_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lmove_hard_check); + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", MOVE_LINK_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lmove_hard_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link did not exist\n"); + PART_ERROR(H5Lmove_hard_check); + } + + /* Retrieve the link's info */ + if (H5Lget_info2(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME2, &orig_info, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve info for link '%s'\n", MOVE_LINK_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lmove_hard_check); + } + + /* Verify the link doesn't currently exist in the target group */ + if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", MOVE_LINK_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lmove_hard_check); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link existed in target group before move!\n"); + PART_ERROR(H5Lmove_hard_check); + } + + /* Move the link */ + if (H5Lmove(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME2, dst_grp_id, + MOVE_LINK_TEST_HARD_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to move link '%s'\n", MOVE_LINK_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lmove_hard_check); + } + + /* Verify the link has been moved */ + if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", MOVE_LINK_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lmove_hard_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link did not exist\n"); + PART_ERROR(H5Lmove_hard_check); + } + + /* Verify the old link is gone */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if old hard link '%s' exists\n", + MOVE_LINK_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lmove_hard_check); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" old hard link exists\n"); + PART_ERROR(H5Lmove_hard_check); + } + + /* Retrieve the moved link's info */ + if (H5Lget_info2(dst_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME2, &new_info, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve info for link '%s'\n", MOVE_LINK_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lmove_hard_check); + } + + if (new_info.type != orig_info.type) { + H5_FAILED(); + HDprintf(" moved link's link type doesn't match original link's type\n"); + PART_ERROR(H5Lmove_hard_check); + } + + if (H5Otoken_cmp(dst_grp_id, &new_info.u.token, &orig_info.u.token, &cmp_value) < 0) { + H5_FAILED(); + HDprintf(" failed to compare link target tokens\n"); + PART_ERROR(H5Lmove_hard_check); + } + + if (cmp_value != 0) { + H5_FAILED(); + HDprintf(" moved hard link's object token doesn't match original link's object token\n"); + PART_ERROR(H5Lmove_hard_check); + } + + if (new_info.corder_valid != orig_info.corder_valid) { + H5_FAILED(); + HDprintf(" moved link's 'corder_valid' field doesn't match original link's 'corder_valid' " + "field\n"); + PART_ERROR(H5Lmove_hard_check); + } + + if (new_info.corder_valid && orig_info.corder_valid && (new_info.corder != orig_info.corder)) { + H5_FAILED(); + HDprintf(" moved link's creation order value %" PRId64 + " doesn't match original link's creation order value %" PRId64 "\n", + new_info.corder, orig_info.corder); + PART_ERROR(H5Lmove_hard_check); + } + + if (new_info.cset != orig_info.cset) { + H5_FAILED(); + HDprintf(" moved link's character set doesn't match original link's character set\n"); + PART_ERROR(H5Lmove_hard_check); + } + + PASSED(); + } + PART_END(H5Lmove_hard_check); + + PART_BEGIN(H5Lmove_hard_same_loc) + { + TESTING_2("H5Lmove on hard link using H5L_SAME_LOC"); + + /* Try to move a hard link */ + if (H5Lcreate_hard(group_id, ".", src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME3, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", MOVE_LINK_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lmove_hard_same_loc); + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", MOVE_LINK_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lmove_hard_same_loc); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link did not exist\n"); + PART_ERROR(H5Lmove_hard_same_loc); + } + + /* Verify the link doesn't currently exist in the target group */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_SAME_LOC_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + MOVE_LINK_TEST_HARD_LINK_SAME_LOC_NAME); + PART_ERROR(H5Lmove_hard_same_loc); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link existed in target group before move!\n"); + PART_ERROR(H5Lmove_hard_same_loc); + } + + /* Rename the link using H5L_SAME_LOC as the first parameter to H5Lmove */ + if (H5Lmove(H5L_SAME_LOC, MOVE_LINK_TEST_HARD_LINK_NAME3, src_grp_id, + MOVE_LINK_TEST_HARD_LINK_SAME_LOC_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to move link '%s' using H5L_SAME_LOC as first parameter to H5Lmove\n", + MOVE_LINK_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lmove_hard_same_loc); + } + + /* Ensure the link has been renamed */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", MOVE_LINK_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lmove_hard_same_loc); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" original hard link existed in target group after move!\n"); + PART_ERROR(H5Lmove_hard_same_loc); + } + + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_SAME_LOC_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + MOVE_LINK_TEST_HARD_LINK_SAME_LOC_NAME); + PART_ERROR(H5Lmove_hard_same_loc); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link did not exist after move!\n"); + PART_ERROR(H5Lmove_hard_same_loc); + } + + /* Rename the link back using H5L_SAME_LOC as the third parameter to H5Lmove */ + if (H5Lmove(src_grp_id, MOVE_LINK_TEST_HARD_LINK_SAME_LOC_NAME, H5L_SAME_LOC, + MOVE_LINK_TEST_HARD_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to move link '%s' using H5L_SAME_LOC as third parameter to H5Lmove\n", + MOVE_LINK_TEST_HARD_LINK_SAME_LOC_NAME); + PART_ERROR(H5Lmove_hard_same_loc); + } + + /* Verify the link has been renamed back */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", MOVE_LINK_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lmove_hard_same_loc); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" original hard link did not exist after moving the link back!\n"); + PART_ERROR(H5Lmove_hard_same_loc); + } + + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_SAME_LOC_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if old hard link '%s' exists\n", + MOVE_LINK_TEST_HARD_LINK_SAME_LOC_NAME); + PART_ERROR(H5Lmove_hard_same_loc); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" renamed hard link exists after moving the link back!\n"); + PART_ERROR(H5Lmove_hard_same_loc); + } + + PASSED(); + } + PART_END(H5Lmove_hard_same_loc); + + PART_BEGIN(H5Lmove_hard_rename) + { + TESTING_2("H5Lmove to rename hard link without moving it"); + + /* Try to rename a hard link */ + if (H5Lcreate_hard(group_id, ".", src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME4, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", MOVE_LINK_TEST_HARD_LINK_NAME4); + PART_ERROR(H5Lmove_hard_rename); + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME4, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", MOVE_LINK_TEST_HARD_LINK_NAME4); + PART_ERROR(H5Lmove_hard_rename); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link did not exist\n"); + PART_ERROR(H5Lmove_hard_rename); + } + + /* Verify the renamed link doesn't currently exist in the source group */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NEW_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if renamed hard link '%s' exists\n", + MOVE_LINK_TEST_HARD_LINK_NEW_NAME); + PART_ERROR(H5Lmove_hard_rename); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" renamed hard link existed in source group before move!\n"); + PART_ERROR(H5Lmove_hard_rename); + } + + /* Rename the link */ + if (H5Lmove(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME4, src_grp_id, + MOVE_LINK_TEST_HARD_LINK_NEW_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to rename link '%s'\n", MOVE_LINK_TEST_HARD_LINK_NAME4); + PART_ERROR(H5Lmove_hard_rename); + } + + /* Verify the link has been renamed */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NEW_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if renamed hard link '%s' exists\n", + MOVE_LINK_TEST_HARD_LINK_NEW_NAME); + PART_ERROR(H5Lmove_hard_rename); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" renamed hard link did not exist\n"); + PART_ERROR(H5Lmove_hard_rename); + } + + /* Verify the old link is gone */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME4, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if old hard link '%s' exists\n", + MOVE_LINK_TEST_HARD_LINK_NAME4); + PART_ERROR(H5Lmove_hard_rename); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" old hard link exists\n"); + PART_ERROR(H5Lmove_hard_rename); + } + + PASSED(); + } + PART_END(H5Lmove_hard_rename); + + PART_BEGIN(H5Lmove_soft_no_check) + { + TESTING_2("H5Lmove on soft link (moved link's properties not checked)"); + + /* Try to move a soft link */ + if (H5Lcreate_soft(MOVE_LINK_TEST_SOFT_LINK_TARGET_PATH, src_grp_id, + MOVE_LINK_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", MOVE_LINK_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lmove_soft_no_check); + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", MOVE_LINK_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lmove_soft_no_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link did not exist\n"); + PART_ERROR(H5Lmove_soft_no_check); + } + + /* Verify the link doesn't currently exist in the target group */ + if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", MOVE_LINK_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lmove_soft_no_check); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link existed in target group before move!\n"); + PART_ERROR(H5Lmove_soft_no_check); + } + + /* Move the link */ + if (H5Lmove(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME, dst_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to move link '%s'\n", MOVE_LINK_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lmove_soft_no_check); + } + + /* Verify the link has been moved */ + if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", MOVE_LINK_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lmove_soft_no_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link did not exist\n"); + PART_ERROR(H5Lmove_soft_no_check); + } + + /* Verify the old link is gone */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if old soft link '%s' exists\n", + MOVE_LINK_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lmove_soft_no_check); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" old soft link exists\n"); + PART_ERROR(H5Lmove_soft_no_check); + } + + PASSED(); + } + PART_END(H5Lmove_soft_no_check); + + PART_BEGIN(H5Lmove_soft_check) + { + H5L_info2_t orig_info, new_info; + char orig_link_val[MOVE_LINK_TEST_LINK_VAL_BUF_SIZE]; + char new_link_val[MOVE_LINK_TEST_LINK_VAL_BUF_SIZE]; + + TESTING_2("H5Lmove on soft link (moved link's properties checked)"); + + /* Try to move a soft link */ + if (H5Lcreate_soft(MOVE_LINK_TEST_SOFT_LINK_TARGET_PATH, src_grp_id, + MOVE_LINK_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", MOVE_LINK_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lmove_soft_check); + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", MOVE_LINK_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lmove_soft_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link did not exist\n"); + PART_ERROR(H5Lmove_soft_check); + } + + /* Retrieve the link's info */ + if (H5Lget_info2(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME2, &orig_info, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve info for link '%s'\n", MOVE_LINK_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lmove_soft_check); + } + + /* Retrieve the link's value */ + if (H5Lget_val(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME2, orig_link_val, + MOVE_LINK_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve value for soft link '%s'\n", MOVE_LINK_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lmove_soft_check); + } + + /* Verify the link doesn't currently exist in the target group */ + if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", MOVE_LINK_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lmove_soft_check); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link existed in target group before move!\n"); + PART_ERROR(H5Lmove_soft_check); + } + + /* Move the link */ + if (H5Lmove(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME2, dst_grp_id, + MOVE_LINK_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to move link '%s'\n", MOVE_LINK_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lmove_soft_check); + } + + /* Verify the link has been moved */ + if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", MOVE_LINK_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lmove_soft_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link did not exist\n"); + PART_ERROR(H5Lmove_soft_check); + } + + /* Verify the old link is gone */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if old soft link '%s' exists\n", + MOVE_LINK_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lmove_soft_check); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" old soft link exists\n"); + PART_ERROR(H5Lmove_soft_check); + } + + /* Retrieve the moved link's info */ + if (H5Lget_info2(dst_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME2, &new_info, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve info for link '%s'\n", MOVE_LINK_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lmove_soft_check); + } + + if (new_info.type != orig_info.type) { + H5_FAILED(); + HDprintf(" moved link's link type doesn't match original link's type\n"); + PART_ERROR(H5Lmove_soft_check); + } + + if (new_info.u.val_size != orig_info.u.val_size) { + H5_FAILED(); + HDprintf(" moved soft link's value size of %llu doesn't match original link's value size " + "of %llu\n", + (unsigned long long)new_info.u.val_size, (unsigned long long)orig_info.u.val_size); + PART_ERROR(H5Lmove_soft_check); + } + + if (new_info.corder_valid != orig_info.corder_valid) { + H5_FAILED(); + HDprintf(" moved link's 'corder_valid' field doesn't match original link's 'corder_valid' " + "field\n"); + PART_ERROR(H5Lmove_soft_check); + } + + if (new_info.corder_valid && orig_info.corder_valid && (new_info.corder != orig_info.corder)) { + H5_FAILED(); + HDprintf(" moved link's creation order value %" PRId64 + " doesn't match original link's creation order value %" PRId64 "\n", + new_info.corder, orig_info.corder); + PART_ERROR(H5Lmove_soft_check); + } + + if (new_info.cset != orig_info.cset) { + H5_FAILED(); + HDprintf(" moved link's character set doesn't match original link's character set\n"); + PART_ERROR(H5Lmove_soft_check); + } + + /* Check the soft link's value */ + if (H5Lget_val(dst_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME2, new_link_val, + MOVE_LINK_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve value for soft link '%s'\n", MOVE_LINK_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lmove_soft_check); + } + + if (HDstrncmp(orig_link_val, new_link_val, MOVE_LINK_TEST_LINK_VAL_BUF_SIZE)) { + H5_FAILED(); + HDprintf(" moved soft link's value '%s' doesn't match original link's value '%s'\n", + new_link_val, orig_link_val); + PART_ERROR(H5Lmove_soft_check); + } + + PASSED(); + } + PART_END(H5Lmove_soft_check); + + PART_BEGIN(H5Lmove_soft_same_loc) + { + TESTING_2("H5Lmove on soft link using H5L_SAME_LOC"); + + /* Try to move a soft link */ + if (H5Lcreate_soft(MOVE_LINK_TEST_SOFT_LINK_TARGET_PATH, src_grp_id, + MOVE_LINK_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", MOVE_LINK_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lmove_soft_same_loc); + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", MOVE_LINK_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lmove_soft_same_loc); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link did not exist\n"); + PART_ERROR(H5Lmove_soft_same_loc); + } + + /* Verify the link doesn't currently exist in the target group */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_SAME_LOC_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + MOVE_LINK_TEST_SOFT_LINK_SAME_LOC_NAME); + PART_ERROR(H5Lmove_soft_same_loc); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" soft link existed in target group before move!\n"); + PART_ERROR(H5Lmove_soft_same_loc); + } + + /* Rename the link using H5L_SAME_LOC as the first parameter to H5Lmove */ + if (H5Lmove(H5L_SAME_LOC, MOVE_LINK_TEST_SOFT_LINK_NAME3, src_grp_id, + MOVE_LINK_TEST_SOFT_LINK_SAME_LOC_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to move link '%s' using H5L_SAME_LOC as first parameter to H5Lmove\n", + MOVE_LINK_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lmove_soft_same_loc); + } + + /* Ensure the link has been renamed */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", MOVE_LINK_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lmove_soft_same_loc); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" original soft link existed in target group after move!\n"); + PART_ERROR(H5Lmove_soft_same_loc); + } + + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_SAME_LOC_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + MOVE_LINK_TEST_SOFT_LINK_SAME_LOC_NAME); + PART_ERROR(H5Lmove_soft_same_loc); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link did not exist after move!\n"); + PART_ERROR(H5Lmove_soft_same_loc); + } + + /* Rename the link back using H5L_SAME_LOC as the third parameter to H5Lmove */ + if (H5Lmove(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_SAME_LOC_NAME, H5L_SAME_LOC, + MOVE_LINK_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to move link '%s' using H5L_SAME_LOC as third parameter to H5Lmove\n", + MOVE_LINK_TEST_SOFT_LINK_SAME_LOC_NAME); + PART_ERROR(H5Lmove_soft_same_loc); + } + + /* Verify the link has been renamed back */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", MOVE_LINK_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lmove_soft_same_loc); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" original hard link did not exist after moving the link back!\n"); + PART_ERROR(H5Lmove_soft_same_loc); + } + + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_SAME_LOC_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if old soft link '%s' exists\n", + MOVE_LINK_TEST_SOFT_LINK_SAME_LOC_NAME); + PART_ERROR(H5Lmove_soft_same_loc); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" renamed soft link exists after moving the link back!\n"); + PART_ERROR(H5Lmove_soft_same_loc); + } + + PASSED(); + } + PART_END(H5Lmove_soft_same_loc); + + PART_BEGIN(H5Lmove_soft_rename) + { + TESTING_2("H5Lmove to rename soft link without moving it"); + + /* Try to rename a soft link */ + if (H5Lcreate_soft(MOVE_LINK_TEST_SOFT_LINK_TARGET_PATH, src_grp_id, + MOVE_LINK_TEST_SOFT_LINK_NAME4, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", MOVE_LINK_TEST_SOFT_LINK_NAME4); + PART_ERROR(H5Lmove_soft_rename); + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME4, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", MOVE_LINK_TEST_SOFT_LINK_NAME4); + PART_ERROR(H5Lmove_soft_rename); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link did not exist\n"); + PART_ERROR(H5Lmove_soft_rename); + } + + /* Verify the renamed link doesn't currently exist in the source group */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NEW_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if renamed soft link '%s' exists\n", + MOVE_LINK_TEST_SOFT_LINK_NEW_NAME); + PART_ERROR(H5Lmove_soft_rename); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" renamed soft link existed in source group before move!\n"); + PART_ERROR(H5Lmove_soft_rename); + } + + /* Rename the link */ + if (H5Lmove(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME4, src_grp_id, + MOVE_LINK_TEST_SOFT_LINK_NEW_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to rename link '%s'\n", MOVE_LINK_TEST_SOFT_LINK_NAME4); + PART_ERROR(H5Lmove_soft_rename); + } + + /* Verify the link has been renamed */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NEW_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if renamed soft link '%s' exists\n", + MOVE_LINK_TEST_SOFT_LINK_NEW_NAME); + PART_ERROR(H5Lmove_soft_rename); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" renamed soft link did not exist\n"); + PART_ERROR(H5Lmove_soft_rename); + } + + /* Verify the old link is gone */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME4, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if old soft link '%s' exists\n", + MOVE_LINK_TEST_SOFT_LINK_NAME4); + PART_ERROR(H5Lmove_soft_rename); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" old soft link exists\n"); + PART_ERROR(H5Lmove_soft_rename); + } + + PASSED(); + } + PART_END(H5Lmove_soft_rename); + + PART_BEGIN(H5Lmove_external_no_check) + { + TESTING_2("H5Lmove on external link (moved link's properties not checked)"); +#ifndef NO_EXTERNAL_LINKS + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Lmove_external_no_check); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Lmove_external_no_check); + } + + /* Try to move an external link */ + if (H5Lcreate_external(ext_link_filename, "/", src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", MOVE_LINK_TEST_EXTERN_LINK_NAME); + PART_ERROR(H5Lmove_external_no_check); + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + MOVE_LINK_TEST_EXTERN_LINK_NAME); + PART_ERROR(H5Lmove_external_no_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link did not exist\n"); + PART_ERROR(H5Lmove_external_no_check); + } + + /* Verify the link doesn't currently exist in the target group */ + if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + MOVE_LINK_TEST_EXTERN_LINK_NAME); + PART_ERROR(H5Lmove_external_no_check); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link existed in target group before move!\n"); + PART_ERROR(H5Lmove_external_no_check); + } + + /* Move the link */ + if (H5Lmove(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME, dst_grp_id, + MOVE_LINK_TEST_EXTERN_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to move link '%s'\n", MOVE_LINK_TEST_EXTERN_LINK_NAME); + PART_ERROR(H5Lmove_external_no_check); + } + + /* Verify the link has been moved */ + if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + MOVE_LINK_TEST_EXTERN_LINK_NAME); + PART_ERROR(H5Lmove_external_no_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link did not exist\n"); + PART_ERROR(H5Lmove_external_no_check); + } + + /* Verify the old link is gone */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if old external link '%s' exists\n", + MOVE_LINK_TEST_EXTERN_LINK_NAME); + PART_ERROR(H5Lmove_external_no_check); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" old external link exists\n"); + PART_ERROR(H5Lmove_external_no_check); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lmove_external_no_check); +#endif + } + PART_END(H5Lmove_external_no_check); + + H5E_BEGIN_TRY + { + H5Fclose(ext_file_id); + ext_file_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lmove_external_check) + { +#ifndef NO_EXTERNAL_LINKS + H5L_info2_t orig_info, new_info; + const char *orig_filename, *new_filename; + const char *orig_objname, *new_objname; + unsigned unpack_flags = 0; + char orig_link_val[MOVE_LINK_TEST_LINK_VAL_BUF_SIZE]; + char new_link_val[MOVE_LINK_TEST_LINK_VAL_BUF_SIZE]; +#endif + + TESTING_2("H5Lmove on external link (moved link's properties checked)"); +#ifndef NO_EXTERNAL_LINKS + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Lmove_external_check); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Lmove_external_check); + } + + /* Try to move an external link */ + if (H5Lcreate_external(ext_link_filename, "/", src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME2, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", MOVE_LINK_TEST_EXTERN_LINK_NAME2); + PART_ERROR(H5Lmove_external_check); + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + MOVE_LINK_TEST_EXTERN_LINK_NAME2); + PART_ERROR(H5Lmove_external_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link did not exist\n"); + PART_ERROR(H5Lmove_external_check); + } + + /* Retrieve the link's info */ + if (H5Lget_info2(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME2, &orig_info, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve info for link '%s'\n", MOVE_LINK_TEST_EXTERN_LINK_NAME2); + PART_ERROR(H5Lmove_external_check); + } + + /* Retrieve the link's value */ + if (H5Lget_val(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME2, orig_link_val, + MOVE_LINK_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve value for external link '%s'\n", + MOVE_LINK_TEST_EXTERN_LINK_NAME2); + PART_ERROR(H5Lmove_external_check); + } + + if (H5Lunpack_elink_val(orig_link_val, orig_info.u.val_size, &unpack_flags, &orig_filename, + &orig_objname) < 0) { + H5_FAILED(); + HDprintf(" couldn't unpack original external link's value buffer\n"); + PART_ERROR(H5Lmove_external_check); + } + + /* Verify the link doesn't currently exist in the target group */ + if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + MOVE_LINK_TEST_EXTERN_LINK_NAME2); + PART_ERROR(H5Lmove_external_check); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link existed in target group before move!\n"); + PART_ERROR(H5Lmove_external_check); + } + + /* Move the link */ + if (H5Lmove(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME2, dst_grp_id, + MOVE_LINK_TEST_EXTERN_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to move link '%s'\n", MOVE_LINK_TEST_EXTERN_LINK_NAME2); + PART_ERROR(H5Lmove_external_check); + } + + /* Verify the link has been moved */ + if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + MOVE_LINK_TEST_EXTERN_LINK_NAME2); + PART_ERROR(H5Lmove_external_check); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link did not exist\n"); + PART_ERROR(H5Lmove_external_check); + } + + /* Verify the old link is gone */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if old external link '%s' exists\n", + MOVE_LINK_TEST_EXTERN_LINK_NAME2); + PART_ERROR(H5Lmove_external_check); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" old external link exists\n"); + PART_ERROR(H5Lmove_external_check); + } + + /* Retrieve the moved link's info */ + if (H5Lget_info2(dst_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME2, &new_info, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve info for link '%s'\n", MOVE_LINK_TEST_EXTERN_LINK_NAME2); + PART_ERROR(H5Lmove_external_check); + } + + if (new_info.type != orig_info.type) { + H5_FAILED(); + HDprintf(" moved link's link type doesn't match original link's type\n"); + PART_ERROR(H5Lmove_external_check); + } + + if (new_info.u.val_size != orig_info.u.val_size) { + H5_FAILED(); + HDprintf(" moved external link's value size of %llu doesn't match original link's value " + "size of %llu\n", + (unsigned long long)new_info.u.val_size, (unsigned long long)orig_info.u.val_size); + PART_ERROR(H5Lmove_external_check); + } + + if (new_info.corder_valid != orig_info.corder_valid) { + H5_FAILED(); + HDprintf(" moved link's 'corder_valid' field doesn't match original link's 'corder_valid' " + "field\n"); + PART_ERROR(H5Lmove_external_check); + } + + if (new_info.corder_valid && orig_info.corder_valid && (new_info.corder != orig_info.corder)) { + H5_FAILED(); + HDprintf(" moved link's creation order value %lld doesn't match original link's creation " + "order value %lld\n", + new_info.corder, orig_info.corder); + PART_ERROR(H5Lmove_external_check); + } + + if (new_info.cset != orig_info.cset) { + H5_FAILED(); + HDprintf(" moved link's character set doesn't match original link's character set\n"); + PART_ERROR(H5Lmove_external_check); + } + + /* Check the external link's value */ + if (H5Lget_val(dst_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME2, new_link_val, + MOVE_LINK_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't retrieve value for external link '%s'\n", + MOVE_LINK_TEST_EXTERN_LINK_NAME2); + PART_ERROR(H5Lmove_external_check); + } + + if (H5Lunpack_elink_val(new_link_val, new_info.u.val_size, &unpack_flags, &new_filename, + &new_objname) < 0) { + H5_FAILED(); + HDprintf(" couldn't unpack moved external link's value buffer\n"); + PART_ERROR(H5Lmove_external_check); + } + + if (HDstrncmp(new_filename, orig_filename, strlen(orig_filename)) < 0) { + H5_FAILED(); + HDprintf(" moved external link's filename '%s' doesn't match original external link's " + "filename '%s'\n", + new_filename, orig_filename); + PART_ERROR(H5Lmove_external_check); + } + + if (HDstrncmp(new_objname, orig_objname, strlen(orig_objname)) < 0) { + H5_FAILED(); + HDprintf(" moved external link's object name '%s' doesn't match original external link's " + "object name '%s'\n", + new_objname, orig_objname); + PART_ERROR(H5Lmove_external_check); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lmove_external_check); +#endif + } + PART_END(H5Lmove_external_check); + + H5E_BEGIN_TRY + { + H5Fclose(ext_file_id); + ext_file_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lmove_external_same_loc) + { + TESTING_2("H5Lmove on external link using H5L_SAME_LOC"); +#ifndef NO_EXTERNAL_LINKS + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Lmove_external_same_loc); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Lmove_external_same_loc); + } + + /* Try to move an external link */ + if (H5Lcreate_external(ext_link_filename, "/", src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME3, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", MOVE_LINK_TEST_EXTERN_LINK_NAME3); + PART_ERROR(H5Lmove_external_same_loc); + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + MOVE_LINK_TEST_EXTERN_LINK_NAME3); + PART_ERROR(H5Lmove_external_same_loc); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link did not exist\n"); + PART_ERROR(H5Lmove_external_same_loc); + } + + /* Verify the link doesn't currently exist in the target group */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_SAME_LOC_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + MOVE_LINK_TEST_EXTERN_LINK_SAME_LOC_NAME); + PART_ERROR(H5Lmove_external_same_loc); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" external link existed in target group before move!\n"); + PART_ERROR(H5Lmove_external_same_loc); + } + + /* Rename the link using H5L_SAME_LOC as the first parameter to H5Lmove */ + if (H5Lmove(H5L_SAME_LOC, MOVE_LINK_TEST_EXTERN_LINK_NAME3, src_grp_id, + MOVE_LINK_TEST_EXTERN_LINK_SAME_LOC_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to move link '%s'\n", MOVE_LINK_TEST_EXTERN_LINK_NAME3); + PART_ERROR(H5Lmove_external_same_loc); + } + + /* Ensure the link has been renamed */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + MOVE_LINK_TEST_EXTERN_LINK_NAME3); + PART_ERROR(H5Lmove_external_same_loc); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" original external link existed in target group after move!\n"); + PART_ERROR(H5Lmove_external_same_loc); + } + + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_SAME_LOC_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + MOVE_LINK_TEST_EXTERN_LINK_SAME_LOC_NAME); + PART_ERROR(H5Lmove_external_same_loc); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link did not exist after move!\n"); + PART_ERROR(H5Lmove_external_same_loc); + } + + /* Rename the link back using H5L_SAME_LOC as the third parameter to H5Lmove */ + if (H5Lmove(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_SAME_LOC_NAME, H5L_SAME_LOC, + MOVE_LINK_TEST_EXTERN_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to move link '%s'\n", MOVE_LINK_TEST_EXTERN_LINK_SAME_LOC_NAME); + PART_ERROR(H5Lmove_external_same_loc); + } + + /* Verify the link has been renamed back */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + MOVE_LINK_TEST_EXTERN_LINK_NAME3); + PART_ERROR(H5Lmove_external_same_loc); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" original external link did not exist after moving the link back!\n"); + PART_ERROR(H5Lmove_external_same_loc); + } + + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_SAME_LOC_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if old external link '%s' exists\n", + MOVE_LINK_TEST_EXTERN_LINK_SAME_LOC_NAME); + PART_ERROR(H5Lmove_external_same_loc); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" renamed external link exists after moving the link back!\n"); + PART_ERROR(H5Lmove_external_same_loc); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lmove_external_same_loc); +#endif + } + PART_END(H5Lmove_external_same_loc); + + H5E_BEGIN_TRY + { + H5Fclose(ext_file_id); + ext_file_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lmove_external_rename) + { + TESTING_2("H5Lmove to rename external link without moving it"); +#ifndef NO_EXTERNAL_LINKS + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Lmove_external_rename); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Lmove_external_rename); + } + + /* Try to move an external link */ + if (H5Lcreate_external(ext_link_filename, "/", src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME4, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", MOVE_LINK_TEST_EXTERN_LINK_NAME4); + PART_ERROR(H5Lmove_external_rename); + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME4, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + MOVE_LINK_TEST_EXTERN_LINK_NAME4); + PART_ERROR(H5Lmove_external_rename); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link did not exist\n"); + PART_ERROR(H5Lmove_external_rename); + } + + /* Verify the renamed link doesn't currently exist in the source group */ + if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NEW_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if renamed external link '%s' exists\n", + MOVE_LINK_TEST_EXTERN_LINK_NEW_NAME); + PART_ERROR(H5Lmove_external_rename); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" renamed external link existed in source group before move!\n"); + PART_ERROR(H5Lmove_external_rename); + } + + /* Rename the link */ + if (H5Lmove(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME4, src_grp_id, + MOVE_LINK_TEST_EXTERN_LINK_NEW_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to rename link '%s'\n", MOVE_LINK_TEST_EXTERN_LINK_NAME4); + PART_ERROR(H5Lmove_external_rename); + } + + /* Verify the link has been renamed */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NEW_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if renamed external link '%s' exists\n", + MOVE_LINK_TEST_EXTERN_LINK_NEW_NAME); + PART_ERROR(H5Lmove_external_rename); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" renamed external link did not exist\n"); + PART_ERROR(H5Lmove_external_rename); + } + + /* Verify the old link is gone */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME4, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if old external link '%s' exists\n", + MOVE_LINK_TEST_EXTERN_LINK_NAME4); + PART_ERROR(H5Lmove_external_rename); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" old external link exists\n"); + PART_ERROR(H5Lmove_external_rename); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lmove_external_rename); +#endif + } + PART_END(H5Lmove_external_rename); + + H5E_BEGIN_TRY + { + H5Fclose(ext_file_id); + ext_file_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lmove_ud_no_check) + { + TESTING_2("H5Lmove on user-defined link (moved link's properties not checked)"); + + /* TODO */ + + SKIPPED(); + PART_EMPTY(H5Lmove_ud_no_check); + } + PART_END(H5Lmove_ud_no_check); + + PART_BEGIN(H5Lmove_ud_check) + { + TESTING_2("H5Lmove on user-defined link (moved link's properties checked)"); + + /* TODO */ + + SKIPPED(); + PART_EMPTY(H5Lmove_ud_check); + } + PART_END(H5Lmove_ud_check); + + PART_BEGIN(H5Lmove_ud_same_loc) + { + TESTING_2("H5Lmove on user-defined link using H5L_SAME_LOC"); + + /* TODO */ + + SKIPPED(); + PART_EMPTY(H5Lmove_ud_same_loc); + } + PART_END(H5Lmove_ud_same_loc); + + PART_BEGIN(H5Lmove_ud_rename) + { + TESTING_2("H5Lmove to rename user-defined link without moving it"); + + /* TODO */ + + SKIPPED(); + PART_EMPTY(H5Lmove_ud_rename); + } + PART_END(H5Lmove_ud_rename); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Gclose(dst_grp_id) < 0) + TEST_ERROR; + if (H5Gclose(src_grp_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(dst_grp_id); + H5Gclose(src_grp_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + H5Fclose(ext_file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that using H5Lmove to move links into a + * group which already contains links will cause the new links + * to have creation order values ranging from the target group's + * maximum link creation order value and upwards. This is to + * check that it is not possible to run into the situation where + * H5Lmove might cause a group to have two links with the same + * creation order values. + */ +static int +test_move_links_into_group_with_links(void) +{ + H5L_info2_t link_info; + size_t i; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t src_grp_id = H5I_INVALID_HID, dst_grp_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + char link_name[MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_BUF_SIZE]; + + TESTING("H5Lmove adjusting creation order values for moved links"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, or basic or hard link, or creation order aren't " + "supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_SUBGROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create GCPL for link creation order tracking\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) { + H5_FAILED(); + HDprintf(" couldn't set link creation order tracking\n"); + goto error; + } + + if ((src_grp_id = H5Gcreate2(group_id, MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_SRC_GRP_NAME, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_SRC_GRP_NAME); + goto error; + } + + if ((dst_grp_id = H5Gcreate2(group_id, MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_DST_GRP_NAME, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_DST_GRP_NAME); + goto error; + } + + /* Create several links in the source group */ + for (i = 0; i < MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_NUM_LINKS; i++) { + snprintf(link_name, MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_BUF_SIZE, "link_to_move%d", (int)i); + + if (H5Lcreate_hard(src_grp_id, ".", src_grp_id, link_name, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create link '%s' in source group\n", link_name); + goto error; + } + + /* Check the current creation order value for each link */ + memset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info2(src_grp_id, link_name, &link_info, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve info for link '%s'\n", link_name); + goto error; + } + + if (!link_info.corder_valid) { + H5_FAILED(); + HDprintf(" creation order value for newly-created link '%s' was marked as not valid!\n", + link_name); + goto error; + } + + if (link_info.corder != (int64_t)i) { + H5_FAILED(); + HDprintf(" creation order value %lld for link '%s' did not match expected value %lld\n", + (long long)link_info.corder, link_name, (long long)i); + goto error; + } + } + + /* Create several links in the destination group */ + for (i = 0; i < MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_NUM_LINKS; i++) { + snprintf(link_name, MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_BUF_SIZE, "link%d", (int)i); + + if (H5Lcreate_hard(dst_grp_id, ".", dst_grp_id, link_name, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create link '%s' in destination group\n", link_name); + goto error; + } + } + + /* Move all the links from the source group into the destination group */ + for (i = 0; i < MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_NUM_LINKS; i++) { + snprintf(link_name, MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_BUF_SIZE, "link_to_move%d", (int)i); + + if (H5Lmove(src_grp_id, link_name, dst_grp_id, link_name, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to move link '%s' from source group to destination group\n", link_name); + goto error; + } + + /* Check that the creation order value for each moved link has been adjusted */ + memset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info2(dst_grp_id, link_name, &link_info, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve info for link '%s'\n", link_name); + goto error; + } + + if (!link_info.corder_valid) { + H5_FAILED(); + HDprintf(" creation order value for moved link '%s' was marked as not valid!\n", link_name); + goto error; + } + + if (link_info.corder != (int64_t)(i + MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_NUM_LINKS)) { + H5_FAILED(); + HDprintf(" creation order value for moved link '%s' was not adjusted after move! It should " + "have been %lld but was %lld\n", + link_name, (long long)(i + MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_NUM_LINKS), + (long long)link_info.corder); + goto error; + } + } + + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(dst_grp_id) < 0) + TEST_ERROR; + if (H5Gclose(src_grp_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(gcpl_id); + H5Gclose(dst_grp_id); + H5Gclose(src_grp_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check the behavior of moving a link across files. + * This should fail for hard links but succeed for soft and + * external links (and user-defined links of those types). + * + * TODO: Ideally, tests should be written to verify that the + * moved links retain their original properties. + */ +static int +test_move_link_across_files(void) +{ + TESTING("link moving across files"); + + /* TODO */ + + SKIPPED(); + + return 0; +} + +/* + * A test to check that a group's always-increasing + * maximum link creation order value gets reset once + * all the links have been moved out of the group. + */ +static int +test_move_link_reset_grp_max_crt_order(void) +{ +#ifndef NO_MAX_LINK_CRT_ORDER_RESET + H5G_info_t grp_info; + size_t i; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t src_grp_id = H5I_INVALID_HID, dst_grp_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + char link_name[MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_BUF_SIZE]; +#endif + + TESTING("H5Lmove of all links out of group resets group's maximum link creation order value"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_MORE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, more or hard link, or creation order aren't " + "supported with this connector\n"); + return 0; + } + +#ifndef NO_MAX_LINK_CRT_ORDER_RESET + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create GCPL for link creation order tracking\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) { + H5_FAILED(); + HDprintf(" couldn't set link creation order tracking\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_SUBGROUP_NAME, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_SUBGROUP_NAME); + goto error; + } + + if ((src_grp_id = H5Gcreate2(group_id, MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_SRC_GRP_NAME, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_SRC_GRP_NAME); + goto error; + } + + if ((dst_grp_id = H5Gcreate2(group_id, MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_DST_GRP_NAME, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_DST_GRP_NAME); + goto error; + } + + /* Create several links inside the source group */ + for (i = 0; i < MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS; i++) { + snprintf(link_name, MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_BUF_SIZE, "link%d", (int)i); + + if (H5Lcreate_hard(src_grp_id, ".", src_grp_id, link_name, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s' in source group\n", link_name); + goto error; + } + } + + /* + * Move links out of the source group and into the destination group, checking the + * source group's maximum creation order value each time. + */ + for (i = 0; i < MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS; i++) { + memset(&grp_info, 0, sizeof(grp_info)); + + if (H5Gget_info(src_grp_id, &grp_info) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve source group's info\n"); + goto error; + } + + if (grp_info.max_corder != MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" source group's maximum creation order value got adjusted to %lld during link " + "moving; value should have remained at %lld\n", + (long long)grp_info.max_corder, MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS); + goto error; + } + + snprintf(link_name, MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_BUF_SIZE, "link%d", (int)i); + + if (H5Lmove(src_grp_id, link_name, dst_grp_id, link_name, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to move link '%s' to destination group\n", link_name); + goto error; + } + } + + /* + * Ensure the source group's maximum creation order value has now + * reset to 0 after all the links have been moved out of it. + */ + memset(&grp_info, 0, sizeof(grp_info)); + + if (H5Gget_info(src_grp_id, &grp_info) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve source group's info\n"); + goto error; + } + + if (grp_info.max_corder != 0) { + H5_FAILED(); + HDprintf(" source group's maximum creation order value didn't reset to 0 after moving all links " + "out of it; value is still %lld\n", + (long long)grp_info.max_corder); + goto error; + } + + /* For good measure, check that destination group's max. creation order value is as expected */ + memset(&grp_info, 0, sizeof(grp_info)); + + if (H5Gget_info(dst_grp_id, &grp_info) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve destination group's info\n"); + goto error; + } + + if (grp_info.max_corder != MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" destination group's maximum creation order value of %lld didn't match expected value " + "of %lld after moving all links into it\n", + (long long)grp_info.max_corder, MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS); + goto error; + } + + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(dst_grp_id) < 0) + TEST_ERROR; + if (H5Gclose(src_grp_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(gcpl_id); + H5Gclose(dst_grp_id); + H5Gclose(src_grp_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +#else + SKIPPED(); + return 0; +#endif +} + +/* + * A test to check that H5Lmove fails when it is given + * invalid parameters. + */ +static int +test_move_link_invalid_params(void) +{ + htri_t link_exists; + herr_t err_ret = -1; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t src_grp_id = H5I_INVALID_HID, dst_grp_id = H5I_INVALID_HID; + char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH]; + hid_t ext_file_id = H5I_INVALID_HID; + + TESTING_MULTIPART("H5Lmove with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, more or hard link aren't supported with this " + "connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, MOVE_LINK_INVALID_PARAMS_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", MOVE_LINK_INVALID_PARAMS_TEST_SUBGROUP_NAME); + goto error; + } + + if ((src_grp_id = H5Gcreate2(group_id, MOVE_LINK_INVALID_PARAMS_TEST_SRC_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", MOVE_LINK_INVALID_PARAMS_TEST_SRC_GROUP_NAME); + goto error; + } + + if ((dst_grp_id = H5Gcreate2(group_id, MOVE_LINK_INVALID_PARAMS_TEST_DST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", MOVE_LINK_INVALID_PARAMS_TEST_DST_GROUP_NAME); + goto error; + } + + if (H5Lcreate_hard(group_id, ".", src_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME); + goto error; + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link did not exist\n"); + goto error; + } + + /* Verify the link doesn't currently exist in the target group */ + if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME); + goto error; + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link existed in target group before move!\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Lmove_invalid_src_loc_id) + { + TESTING_2("H5Lmove with an invalid source location ID"); + + H5E_BEGIN_TRY + { + err_ret = H5Lmove(H5I_INVALID_HID, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, dst_grp_id, + MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lmove succeeded with an invalid source location ID!\n"); + PART_ERROR(H5Lmove_invalid_src_loc_id); + } + + PASSED(); + } + PART_END(H5Lmove_invalid_src_loc_id); + + PART_BEGIN(H5Lmove_invalid_src_name) + { + TESTING_2("H5Lmove with an invalid source name"); + + H5E_BEGIN_TRY + { + err_ret = H5Lmove(src_grp_id, NULL, dst_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lmove succeeded with a NULL source name!\n"); + PART_ERROR(H5Lmove_invalid_src_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Lmove(src_grp_id, "", dst_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lmove succeeded with an invalid source name of ''!\n"); + PART_ERROR(H5Lmove_invalid_src_name); + } + + PASSED(); + } + PART_END(H5Lmove_invalid_src_name); + + PART_BEGIN(H5Lmove_invalid_dst_loc_id) + { + TESTING_2("H5Lmove with an invalid destination location ID"); + + H5E_BEGIN_TRY + { + err_ret = H5Lmove(src_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5I_INVALID_HID, + MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lmove succeeded with an invalid destination location ID!\n"); + PART_ERROR(H5Lmove_invalid_dst_loc_id); + } + + PASSED(); + } + PART_END(H5Lmove_invalid_dst_loc_id); + + PART_BEGIN(H5Lmove_invalid_dst_name) + { + TESTING_2("H5Lmove with an invalid destination name"); + + H5E_BEGIN_TRY + { + err_ret = H5Lmove(src_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, dst_grp_id, NULL, + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lmove succeeded with a NULL destination name!\n"); + PART_ERROR(H5Lmove_invalid_dst_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Lmove(src_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, dst_grp_id, "", + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lmove succeeded with an invalid destination name of ''!\n"); + PART_ERROR(H5Lmove_invalid_dst_name); + } + + PASSED(); + } + PART_END(H5Lmove_invalid_dst_name); + + PART_BEGIN(H5Lmove_invalid_lcpl) + { + TESTING_2("H5Lmove with an invalid LCPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Lmove(src_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, dst_grp_id, + MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5I_INVALID_HID, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lmove succeeded with an invalid LCPL!\n"); + PART_ERROR(H5Lmove_invalid_lcpl); + } + + PASSED(); + } + PART_END(H5Lmove_invalid_lcpl); + + PART_BEGIN(H5Lmove_invalid_lapl) + { + TESTING_2("H5Lmove with an invalid LAPL"); +#ifndef NO_INVALID_PROPERTY_LIST_TESTS + H5E_BEGIN_TRY + { + err_ret = H5Lmove(src_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, dst_grp_id, + MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lmove succeeded with an invalid LAPL!\n"); + PART_ERROR(H5Lmove_invalid_lapl); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lmove_invalid_lapl); +#endif + } + PART_END(H5Lmove_invalid_lapl); + + PART_BEGIN(H5Lmove_existence) + { + TESTING_2("valid link existence in original group after previous invalid H5Lmove calls"); + + /* Verify the link hasn't been moved */ + if ((link_exists = + H5Lexists(src_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lmove_existence); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link didn't exist in source group after invalid move!\n"); + PART_ERROR(H5Lmove_existence); + } + + if ((link_exists = + H5Lexists(dst_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lmove_existence); + } + + if (link_exists) { + H5_FAILED(); + HDprintf(" hard link existed in target group after invalid move!\n"); + PART_ERROR(H5Lmove_existence); + } + + PASSED(); + } + PART_END(H5Lmove_existence); + + PART_BEGIN(H5Lmove_same_location) + { + TESTING_2("H5Lmove with an invalid same location"); + + /* Move a group within the file. Both of source and destination use + * H5L_SAME_LOC. Should fail. */ + H5E_BEGIN_TRY + { + err_ret = H5Lmove(H5L_SAME_LOC, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5L_SAME_LOC, + MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lmove succeeded with an invalid same location!\n"); + PART_ERROR(H5Lmove_same_location); + } + + PASSED(); + } + PART_END(H5Lmove_same_location); + + PART_BEGIN(H5Lmove_across_files) + { + TESTING_2("H5Lmove into another file"); + + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Lmove_across_files); + } + + /* Move a group across files. */ + H5E_BEGIN_TRY + { + err_ret = H5Lmove(src_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, ext_file_id, + MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lmove succeeded in moving a hard link across files!\n"); + PART_ERROR(H5Lmove_across_files); + } + + /* Ensure that original link still exists */ + if ((link_exists = + H5Lexists(src_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if original link '%s' exists after invalid link move\n", + MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lmove_across_files); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" original link '%s' didn't exist after failed move!\n", + MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lmove_across_files); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close a file!\n"); + PART_ERROR(H5Lmove_across_files); + } + + PASSED(); + } + PART_END(H5Lmove_across_files); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Gclose(dst_grp_id) < 0) + TEST_ERROR; + if (H5Gclose(src_grp_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(dst_grp_id); + H5Gclose(src_grp_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(ext_file_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a soft or external link's value can + * be retrieved by using H5Lget_val and H5Lget_val_by_idx. + */ +static int +test_get_link_val(void) +{ + H5L_info2_t link_info; +#ifndef NO_EXTERNAL_LINKS + const char *ext_link_filepath; + const char *ext_link_val; + unsigned ext_link_flags; +#endif + htri_t link_exists; + size_t link_val_size; + char link_val_buf[GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE]; + hid_t file_id = H5I_INVALID_HID, ext_file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t subgroup_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; +#ifndef NO_EXTERNAL_LINKS + char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH]; +#endif + + TESTING_MULTIPART("link value retrieval"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, basic, more, soft, external link, or creation " + "order aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create GCPL for link creation order tracking\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) { + H5_FAILED(); + HDprintf(" couldn't set link creation order tracking\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, GET_LINK_VAL_TEST_SUBGROUP_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", GET_LINK_VAL_TEST_SUBGROUP_NAME); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Lget_val_soft) + { + const char *link_target = "/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_TEST_SUBGROUP_NAME; + + TESTING_2("H5Lget_val on soft link"); + + HDmemset(&link_info, 0, sizeof(link_info)); + + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_VAL_TEST_SUBGROUP1_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_VAL_TEST_SUBGROUP1_NAME); + PART_ERROR(H5Lget_val_soft); + } + + if (H5Lcreate_soft(link_target, subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_val_soft); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_val_soft); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_val_soft); + } + + if (H5Lget_info2(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME, &link_info, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve soft link's info\n"); + PART_ERROR(H5Lget_val_soft); + } + + link_val_size = strlen(link_target) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link value size %zu did not match expected size of %zu\n", link_info.u.val_size, + link_val_size); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing); + } + + if (H5Lget_val(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME, link_val_buf, + GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get soft link value\n"); + PART_ERROR(H5Lget_val_soft); + } + + if (HDstrncmp(link_val_buf, link_target, link_val_size)) { + H5_FAILED(); + HDprintf(" soft link value did not match\n"); + PART_ERROR(H5Lget_val_soft); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", GET_LINK_VAL_TEST_SUBGROUP1_NAME); + PART_ERROR(H5Lget_val_soft); + } + + PASSED(); + } + PART_END(H5Lget_val_soft); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_val_external) + { +#ifndef NO_EXTERNAL_LINKS + const char *ext_obj_name = "/"; +#endif + + TESTING_2("H5Lget_val on external link"); +#ifndef NO_EXTERNAL_LINKS + HDmemset(&link_info, 0, sizeof(link_info)); + + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Lget_val_external); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Lget_val_external); + } + + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_VAL_TEST_SUBGROUP2_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_VAL_TEST_SUBGROUP2_NAME); + PART_ERROR(H5Lget_val_external); + } + + if (H5Lcreate_external(ext_link_filename, ext_obj_name, subgroup_id, + GET_LINK_VAL_TEST_EXT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", GET_LINK_VAL_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_val_external); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + GET_LINK_VAL_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_val_external); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist\n", GET_LINK_VAL_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_val_external); + } + + if (H5Lget_info2(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME, &link_info, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve external link's info\n"); + PART_ERROR(H5Lget_val_external); + } + + link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_obj_name) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link value size %lld did not match expected size of %lld\n", + link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_val_external); + } + + if (H5Lget_val(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME, link_val_buf, + GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get external link value\n"); + PART_ERROR(H5Lget_val_external); + } + + if (H5Lunpack_elink_val(link_val_buf, link_info.u.val_size, &ext_link_flags, &ext_link_filepath, + &ext_link_val) < 0) { + H5_FAILED(); + HDprintf(" couldn't unpack external link value buffer\n"); + PART_ERROR(H5Lget_val_external); + } + + if (HDstrncmp(ext_link_filepath, ext_link_filename, strlen(ext_link_filename) + 1)) { + H5_FAILED(); + HDprintf(" external link target file '%s' did not match expected '%s'\n", + ext_link_filepath, ext_link_filename); + PART_ERROR(H5Lget_val_external); + } + + if (HDstrncmp(ext_link_val, ext_obj_name, strlen(ext_obj_name) + 1)) { + H5_FAILED(); + HDprintf(" external link value '%s' did not match expected '%s'\n", ext_link_val, + ext_obj_name); + PART_ERROR(H5Lget_val_external); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", GET_LINK_VAL_TEST_SUBGROUP2_NAME); + PART_ERROR(H5Lget_val_external); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lget_val_external); +#endif + } + PART_END(H5Lget_val_external); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + H5Fclose(ext_file_id); + ext_file_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_val_ud) + { + TESTING_2("H5Lget_val on user-defined link"); + + /* TODO */ + + SKIPPED(); + PART_EMPTY(H5Lget_val_ud); + } + PART_END(H5Lget_val_ud); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_val_by_idx_soft_crt_order_increasing) + { + const char *link_target_a = "/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_TEST_SUBGROUP_NAME + "/" GET_LINK_VAL_TEST_SUBGROUP4_NAME "A"; + const char *link_target_b = "/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_TEST_SUBGROUP_NAME + "/" GET_LINK_VAL_TEST_SUBGROUP4_NAME "B"; + const char *link_target_c = "/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_TEST_SUBGROUP_NAME + "/" GET_LINK_VAL_TEST_SUBGROUP4_NAME "C"; + + TESTING_2("H5Lget_val_by_idx on soft link by creation order in increasing order"); + + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_VAL_TEST_SUBGROUP4_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_VAL_TEST_SUBGROUP4_NAME); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing); + } + + /* Create several soft links */ + if (H5Lcreate_soft(link_target_a, subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing); + } + + if (H5Lcreate_soft(link_target_b, subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing); + } + + if (H5Lcreate_soft(link_target_c, subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing); + } + + /* Verify the links exist */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing); + } + + /* Retrieve the info and value of each link in turn */ + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve soft link's info at index %d\n", 0); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing); + } + + link_val_size = strlen(link_target_a) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link value size %zu for link at index %d did not match expected size of %zu\n", + link_info.u.val_size, 0, link_val_size); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing); + } + + HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE); + if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, link_val_buf, + GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get soft link value at index %d\n", 0); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing); + } + + if (HDstrncmp(link_val_buf, link_target_a, strlen(link_target_a) + 1)) { + H5_FAILED(); + HDprintf(" link value '%s' for link at index %d did not match expected value '%s'\n", + link_val_buf, 0, link_target_a); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve soft link's info at index %d\n", 1); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing); + } + + link_val_size = strlen(link_target_b) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link value size %zu for link at index %d did not match expected size of %zu\n", + link_info.u.val_size, 1, link_val_size); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing); + } + + HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE); + if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, link_val_buf, + GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get soft link value at index %d\n", 1); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing); + } + + if (HDstrncmp(link_val_buf, link_target_b, strlen(link_target_b) + 1)) { + H5_FAILED(); + HDprintf(" link value '%s' for link at index %d did not match expected value '%s'\n", + link_val_buf, 1, link_target_b); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve soft link's info at index %d\n", 2); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing); + } + + link_val_size = strlen(link_target_c) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link value size %zu for link at index %d did not match expected size of %zu\n", + link_info.u.val_size, 2, link_val_size); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing); + } + + HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE); + if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, link_val_buf, + GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get soft link value at index %d\n", 2); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing); + } + + if (HDstrncmp(link_val_buf, link_target_c, strlen(link_target_c) + 1)) { + H5_FAILED(); + HDprintf(" link value '%s' for link at index %d did not match expected value '%s'\n", + link_val_buf, 2, link_target_c); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", GET_LINK_VAL_TEST_SUBGROUP4_NAME); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing); + } + + PASSED(); + } + PART_END(H5Lget_val_by_idx_soft_crt_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_val_by_idx_soft_crt_order_decreasing) + { + const char *link_target_a = "/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_TEST_SUBGROUP_NAME + "/" GET_LINK_VAL_TEST_SUBGROUP5_NAME "A"; + const char *link_target_b = "/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_TEST_SUBGROUP_NAME + "/" GET_LINK_VAL_TEST_SUBGROUP5_NAME "B"; + const char *link_target_c = "/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_TEST_SUBGROUP_NAME + "/" GET_LINK_VAL_TEST_SUBGROUP5_NAME "C"; + + TESTING_2("H5Lget_val_by_idx on soft link by creation order in decreasing order"); + + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_VAL_TEST_SUBGROUP5_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_VAL_TEST_SUBGROUP5_NAME); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing); + } + + /* Create several soft links */ + if (H5Lcreate_soft(link_target_a, subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing); + } + + if (H5Lcreate_soft(link_target_b, subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing); + } + + if (H5Lcreate_soft(link_target_c, subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing); + } + + /* Verify the links exist */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing); + } + + /* Retrieve the info and value of each link in turn */ + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve soft link's info at index %d\n", 2); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing); + } + + link_val_size = strlen(link_target_a) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link value size %zu for link at index %d did not match expected size of %zu\n", + link_info.u.val_size, 2, link_val_size); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing); + } + + HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE); + if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, link_val_buf, + GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get soft link value at index %d\n", 2); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing); + } + + if (HDstrncmp(link_val_buf, link_target_a, strlen(link_target_a) + 1)) { + H5_FAILED(); + HDprintf(" link value '%s' for link at index %d did not match expected value '%s'\n", + link_val_buf, 2, link_target_a); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve soft link's info at index %d\n", 1); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing); + } + + link_val_size = strlen(link_target_b) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link value size %zu for link at index %d did not match expected size of %zu\n", + link_info.u.val_size, 1, link_val_size); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing); + } + + HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE); + if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, link_val_buf, + GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get soft link value at index %d\n", 1); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing); + } + + if (HDstrncmp(link_val_buf, link_target_b, strlen(link_target_b) + 1)) { + H5_FAILED(); + HDprintf(" link value '%s' for link at index %d did not match expected value '%s'\n", + link_val_buf, 1, link_target_b); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve soft link's info at index %d\n", 0); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing); + } + + link_val_size = strlen(link_target_c) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link value size %zu for link at index %d did not match expected size of %zu\n", + link_info.u.val_size, 0, link_val_size); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing); + } + + HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE); + if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, link_val_buf, + GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get soft link value at index %d\n", 0); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing); + } + + if (HDstrncmp(link_val_buf, link_target_c, strlen(link_target_c) + 1)) { + H5_FAILED(); + HDprintf(" link value '%s' for link at index %d did not match expected value '%s'\n", + link_val_buf, 0, link_target_c); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", GET_LINK_VAL_TEST_SUBGROUP5_NAME); + PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing); + } + + PASSED(); + } + PART_END(H5Lget_val_by_idx_soft_crt_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_val_by_idx_soft_name_order_increasing) + { + const char *link_target_a = "/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_TEST_SUBGROUP_NAME + "/" GET_LINK_VAL_TEST_SUBGROUP6_NAME "A"; + const char *link_target_b = "/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_TEST_SUBGROUP_NAME + "/" GET_LINK_VAL_TEST_SUBGROUP6_NAME "B"; + const char *link_target_c = "/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_TEST_SUBGROUP_NAME + "/" GET_LINK_VAL_TEST_SUBGROUP6_NAME "C"; + + TESTING_2("H5Lget_val_by_idx on soft link by alphabetical order in increasing order"); + + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_VAL_TEST_SUBGROUP6_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_VAL_TEST_SUBGROUP6_NAME); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing); + } + + /* Create several soft links */ + if (H5Lcreate_soft(link_target_a, subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing); + } + + if (H5Lcreate_soft(link_target_b, subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing); + } + + if (H5Lcreate_soft(link_target_c, subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing); + } + + /* Verify the links exist */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing); + } + + /* Retrieve the info and value of each link in turn */ + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve soft link's info at index %d\n", 0); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing); + } + + link_val_size = strlen(link_target_a) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link value size %zu for link at index %d did not match expected size of %zu\n", + link_info.u.val_size, 0, link_val_size); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing); + } + + HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE); + if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, link_val_buf, + GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get soft link value at index %d\n", 0); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing); + } + + if (HDstrncmp(link_val_buf, link_target_a, strlen(link_target_a) + 1)) { + H5_FAILED(); + HDprintf(" link value '%s' for link at index %d did not match expected value '%s'\n", + link_val_buf, 0, link_target_a); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve soft link's info at index %d\n", 1); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing); + } + + link_val_size = strlen(link_target_b) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link value size %zu for link at index %d did not match expected size of %zu\n", + link_info.u.val_size, 1, link_val_size); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing); + } + + HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE); + if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1, link_val_buf, + GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get soft link value at index %d\n", 1); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing); + } + + if (HDstrncmp(link_val_buf, link_target_b, strlen(link_target_b) + 1)) { + H5_FAILED(); + HDprintf(" link value '%s' for link at index %d did not match expected value '%s'\n", + link_val_buf, 1, link_target_b); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve soft link's info at index %d\n", 2); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing); + } + + link_val_size = strlen(link_target_c) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link value size %zu for link at index %d did not match expected size of %zu\n", + link_info.u.val_size, 2, link_val_size); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing); + } + + HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE); + if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2, link_val_buf, + GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get soft link value at index %d\n", 2); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing); + } + + if (HDstrncmp(link_val_buf, link_target_c, strlen(link_target_c) + 1)) { + H5_FAILED(); + HDprintf(" link value '%s' for link at index %d did not match expected value '%s'\n", + link_val_buf, 2, link_target_c); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", GET_LINK_VAL_TEST_SUBGROUP6_NAME); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing); + } + + PASSED(); + } + PART_END(H5Lget_val_by_idx_soft_name_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_val_by_idx_soft_name_order_decreasing) + { +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + const char *link_target_a = "/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_TEST_SUBGROUP_NAME + "/" GET_LINK_VAL_TEST_SUBGROUP7_NAME "A"; + const char *link_target_b = "/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_TEST_SUBGROUP_NAME + "/" GET_LINK_VAL_TEST_SUBGROUP7_NAME "B"; + const char *link_target_c = "/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_TEST_SUBGROUP_NAME + "/" GET_LINK_VAL_TEST_SUBGROUP7_NAME "C"; +#endif + + TESTING_2("H5Lget_val_by_idx on soft link by alphabetical order in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_VAL_TEST_SUBGROUP7_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_VAL_TEST_SUBGROUP7_NAME); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing); + } + + /* Create several soft links */ + if (H5Lcreate_soft(link_target_a, subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing); + } + + if (H5Lcreate_soft(link_target_b, subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing); + } + + if (H5Lcreate_soft(link_target_c, subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing); + } + + /* Verify the links exist */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing); + } + + /* Retrieve the info and value of each link in turn */ + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve soft link's info at index %lld\n", 2); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing); + } + + link_val_size = strlen(link_target_a) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf( + " link value size %lld for link at index %lld did not match expected size of %lld\n", + link_info.u.val_size, 2, link_val_size); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing); + } + + HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE); + if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, link_val_buf, + GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get soft link value at index %lld\n", 2); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing); + } + + if (HDstrncmp(link_val_buf, link_target_a, strlen(link_target_a) + 1)) { + H5_FAILED(); + HDprintf(" link value '%s' for link at index %lld did not match expected value '%s'\n", + link_val_buf, 2, link_target_a); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve soft link's info at index %lld\n", 1); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing); + } + + link_val_size = strlen(link_target_b) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf( + " link value size %lld for link at index %lld did not match expected size of %lld\n", + link_info.u.val_size, 1, link_val_size); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing); + } + + HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE); + if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, link_val_buf, + GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get soft link value at index %lld\n", 1); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing); + } + + if (HDstrncmp(link_val_buf, link_target_b, strlen(link_target_b) + 1)) { + H5_FAILED(); + HDprintf(" link value '%s' for link at index %lld did not match expected value '%s'\n", + link_val_buf, 1, link_target_b); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve soft link's info at index %lld\n", 0); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing); + } + + link_val_size = strlen(link_target_c) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf( + " link value size %lld for link at index %lld did not match expected size of %lld\n", + link_info.u.val_size, 0, link_val_size); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing); + } + + HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE); + if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, link_val_buf, + GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get soft link value at index %lld\n", 0); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing); + } + + if (HDstrncmp(link_val_buf, link_target_c, strlen(link_target_c) + 1)) { + H5_FAILED(); + HDprintf(" link value '%s' for link at index %lld did not match expected value '%s'\n", + link_val_buf, 0, link_target_c); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", GET_LINK_VAL_TEST_SUBGROUP7_NAME); + PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lget_val_by_idx_soft_name_order_decreasing); +#endif + } + PART_END(H5Lget_val_by_idx_soft_name_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_val_by_idx_external_crt_order_increasing) + { +#ifndef NO_EXTERNAL_LINKS + const char *ext_obj_name_a = "/A"; + const char *ext_obj_name_b = "/B"; + const char *ext_obj_name_c = "/C"; +#endif + + TESTING_2("H5Lget_val_by_idx on external link by creation order in increasing order"); +#ifndef NO_EXTERNAL_LINKS + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_VAL_TEST_SUBGROUP8_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_VAL_TEST_SUBGROUP8_NAME); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + /* Create several external links */ + if (H5Lcreate_external(ext_link_filename, ext_obj_name_a, subgroup_id, + GET_LINK_VAL_TEST_EXT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", GET_LINK_VAL_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + if (H5Lcreate_external(ext_link_filename, ext_obj_name_b, subgroup_id, + GET_LINK_VAL_TEST_EXT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", GET_LINK_VAL_TEST_EXT_LINK_NAME2); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + if (H5Lcreate_external(ext_link_filename, ext_obj_name_c, subgroup_id, + GET_LINK_VAL_TEST_EXT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", GET_LINK_VAL_TEST_EXT_LINK_NAME3); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + /* Verify the links exist */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_EXT_LINK_NAME2); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_EXT_LINK_NAME2); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_EXT_LINK_NAME3); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_EXT_LINK_NAME3); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + /* Retrieve the info and value of each link in turn */ + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve external link's info at index %lld\n", 0); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_obj_name_a) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf( + " link value size %lld for link at index %lld did not match expected size of %lld\n", + link_info.u.val_size, 0, link_val_size); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE); + if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, link_val_buf, + GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get external link value at index %lld\n", 0); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + if (H5Lunpack_elink_val(link_val_buf, link_info.u.val_size, &ext_link_flags, &ext_link_filepath, + &ext_link_val) < 0) { + H5_FAILED(); + HDprintf(" couldn't unpack external link value buffer\n"); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + if (HDstrncmp(ext_link_filepath, ext_link_filename, strlen(ext_link_filename) + 1)) { + H5_FAILED(); + HDprintf(" external link target file '%s' did not match expected '%s'\n", + ext_link_filepath, ext_link_filename); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + if (HDstrncmp(ext_link_val, ext_obj_name_a, strlen(ext_obj_name_a) + 1)) { + H5_FAILED(); + HDprintf(" external link value '%s' did not match expected '%s'\n", ext_link_val, + ext_obj_name_a); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve external link's info at index %lld\n", 1); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_obj_name_b) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf( + " link value size %lld for link at index %lld did not match expected size of %lld\n", + link_info.u.val_size, 1, link_val_size); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE); + if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, link_val_buf, + GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get external link value at index %lld\n", 1); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + if (H5Lunpack_elink_val(link_val_buf, link_info.u.val_size, &ext_link_flags, &ext_link_filepath, + &ext_link_val) < 0) { + H5_FAILED(); + HDprintf(" couldn't unpack external link value buffer\n"); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + if (HDstrncmp(ext_link_filepath, ext_link_filename, strlen(ext_link_filename) + 1)) { + H5_FAILED(); + HDprintf(" external link target file '%s' did not match expected '%s'\n", + ext_link_filepath, ext_link_filename); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + if (HDstrncmp(ext_link_val, ext_obj_name_b, strlen(ext_obj_name_b) + 1)) { + H5_FAILED(); + HDprintf(" external link value '%s' did not match expected '%s'\n", ext_link_val, + ext_obj_name_b); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve external link's info at index %lld\n", 2); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_obj_name_c) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf( + " link value size %lld for link at index %lld did not match expected size of %lld\n", + link_info.u.val_size, 2, link_val_size); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE); + if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, link_val_buf, + GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get external link value at index %lld\n", 2); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + if (H5Lunpack_elink_val(link_val_buf, link_info.u.val_size, &ext_link_flags, &ext_link_filepath, + &ext_link_val) < 0) { + H5_FAILED(); + HDprintf(" couldn't unpack external link value buffer\n"); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + if (HDstrncmp(ext_link_filepath, ext_link_filename, strlen(ext_link_filename) + 1)) { + H5_FAILED(); + HDprintf(" external link target file '%s' did not match expected '%s'\n", + ext_link_filepath, ext_link_filename); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + if (HDstrncmp(ext_link_val, ext_obj_name_c, strlen(ext_obj_name_c) + 1)) { + H5_FAILED(); + HDprintf(" external link value '%s' did not match expected '%s'\n", ext_link_val, + ext_obj_name_c); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", GET_LINK_VAL_TEST_SUBGROUP8_NAME); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lget_val_by_idx_external_crt_order_increasing); +#endif + } + PART_END(H5Lget_val_by_idx_external_crt_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + H5Fclose(ext_file_id); + ext_file_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_val_by_idx_external_crt_order_decreasing) + { +#ifndef NO_EXTERNAL_LINKS + const char *ext_obj_name_a = "/A"; + const char *ext_obj_name_b = "/B"; + const char *ext_obj_name_c = "/C"; +#endif + + TESTING_2("H5Lget_val_by_idx on external link by creation order in decreasing order"); +#ifndef NO_EXTERNAL_LINKS + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_VAL_TEST_SUBGROUP9_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_VAL_TEST_SUBGROUP9_NAME); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + /* Create several external links */ + if (H5Lcreate_external(ext_link_filename, ext_obj_name_a, subgroup_id, + GET_LINK_VAL_TEST_EXT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", GET_LINK_VAL_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + if (H5Lcreate_external(ext_link_filename, ext_obj_name_b, subgroup_id, + GET_LINK_VAL_TEST_EXT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", GET_LINK_VAL_TEST_EXT_LINK_NAME2); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + if (H5Lcreate_external(ext_link_filename, ext_obj_name_c, subgroup_id, + GET_LINK_VAL_TEST_EXT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", GET_LINK_VAL_TEST_EXT_LINK_NAME3); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + /* Verify the links exist */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_EXT_LINK_NAME2); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_EXT_LINK_NAME2); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_EXT_LINK_NAME3); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_EXT_LINK_NAME3); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + /* Retrieve the info and value of each link in turn */ + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve external link's info at index %lld\n", 2); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_obj_name_a) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf( + " link value size %lld for link at index %lld did not match expected size of %lld\n", + link_info.u.val_size, 2, link_val_size); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE); + if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, link_val_buf, + GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get external link value at index %lld\n", 2); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + if (H5Lunpack_elink_val(link_val_buf, link_info.u.val_size, &ext_link_flags, &ext_link_filepath, + &ext_link_val) < 0) { + H5_FAILED(); + HDprintf(" couldn't unpack external link value buffer\n"); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + if (HDstrncmp(ext_link_filepath, ext_link_filename, strlen(ext_link_filename) + 1)) { + H5_FAILED(); + HDprintf(" external link target file '%s' did not match expected '%s'\n", + ext_link_filepath, ext_link_filename); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + if (HDstrncmp(ext_link_val, ext_obj_name_a, strlen(ext_obj_name_a) + 1)) { + H5_FAILED(); + HDprintf(" external link value '%s' did not match expected '%s'\n", ext_link_val, + ext_obj_name_a); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve external link's info at index %lld\n", 1); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_obj_name_b) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf( + " link value size %lld for link at index %lld did not match expected size of %lld\n", + link_info.u.val_size, 1, link_val_size); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE); + if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, link_val_buf, + GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get external link value at index %lld\n", 1); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + if (H5Lunpack_elink_val(link_val_buf, link_info.u.val_size, &ext_link_flags, &ext_link_filepath, + &ext_link_val) < 0) { + H5_FAILED(); + HDprintf(" couldn't unpack external link value buffer\n"); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + if (HDstrncmp(ext_link_filepath, ext_link_filename, strlen(ext_link_filename) + 1)) { + H5_FAILED(); + HDprintf(" external link target file '%s' did not match expected '%s'\n", + ext_link_filepath, ext_link_filename); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + if (HDstrncmp(ext_link_val, ext_obj_name_b, strlen(ext_obj_name_b) + 1)) { + H5_FAILED(); + HDprintf(" external link value '%s' did not match expected '%s'\n", ext_link_val, + ext_obj_name_b); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve external link's info at index %lld\n", 0); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_obj_name_c) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf( + " link value size %lld for link at index %lld did not match expected size of %lld\n", + link_info.u.val_size, 0, link_val_size); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE); + if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, link_val_buf, + GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get external link value at index %lld\n", 0); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + if (H5Lunpack_elink_val(link_val_buf, link_info.u.val_size, &ext_link_flags, &ext_link_filepath, + &ext_link_val) < 0) { + H5_FAILED(); + HDprintf(" couldn't unpack external link value buffer\n"); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + if (HDstrncmp(ext_link_filepath, ext_link_filename, strlen(ext_link_filename) + 1)) { + H5_FAILED(); + HDprintf(" external link target file '%s' did not match expected '%s'\n", + ext_link_filepath, ext_link_filename); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + if (HDstrncmp(ext_link_val, ext_obj_name_c, strlen(ext_obj_name_c) + 1)) { + H5_FAILED(); + HDprintf(" external link value '%s' did not match expected '%s'\n", ext_link_val, + ext_obj_name_c); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", GET_LINK_VAL_TEST_SUBGROUP9_NAME); + PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lget_val_by_idx_external_crt_order_decreasing); +#endif + } + PART_END(H5Lget_val_by_idx_external_crt_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + H5Fclose(ext_file_id); + ext_file_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_val_by_idx_external_name_order_increasing) + { +#ifndef NO_EXTERNAL_LINKS + const char *ext_obj_name_a = "/A"; + const char *ext_obj_name_b = "/B"; + const char *ext_obj_name_c = "/C"; +#endif + + TESTING_2("H5Lget_val_by_idx on external link by alphabetical order in increasing order"); +#ifndef NO_EXTERNAL_LINKS + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_VAL_TEST_SUBGROUP10_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_VAL_TEST_SUBGROUP10_NAME); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + /* Create several external links */ + if (H5Lcreate_external(ext_link_filename, ext_obj_name_a, subgroup_id, + GET_LINK_VAL_TEST_EXT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", GET_LINK_VAL_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + if (H5Lcreate_external(ext_link_filename, ext_obj_name_b, subgroup_id, + GET_LINK_VAL_TEST_EXT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", GET_LINK_VAL_TEST_EXT_LINK_NAME2); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + if (H5Lcreate_external(ext_link_filename, ext_obj_name_c, subgroup_id, + GET_LINK_VAL_TEST_EXT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", GET_LINK_VAL_TEST_EXT_LINK_NAME3); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + /* Verify the links exist */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_EXT_LINK_NAME2); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_EXT_LINK_NAME2); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_EXT_LINK_NAME3); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_EXT_LINK_NAME3); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + /* Retrieve the info and value of each link in turn */ + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve external link's info at index %lld\n", 0); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_obj_name_a) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf( + " link value size %lld for link at index %lld did not match expected size of %lld\n", + link_info.u.val_size, 0, link_val_size); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE); + if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, link_val_buf, + GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get external link value at index %lld\n", 0); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + if (H5Lunpack_elink_val(link_val_buf, link_info.u.val_size, &ext_link_flags, &ext_link_filepath, + &ext_link_val) < 0) { + H5_FAILED(); + HDprintf(" couldn't unpack external link value buffer\n"); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + if (HDstrncmp(ext_link_filepath, ext_link_filename, strlen(ext_link_filename) + 1)) { + H5_FAILED(); + HDprintf(" external link target file '%s' did not match expected '%s'\n", + ext_link_filepath, ext_link_filename); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + if (HDstrncmp(ext_link_val, ext_obj_name_a, strlen(ext_obj_name_a) + 1)) { + H5_FAILED(); + HDprintf(" external link value '%s' did not match expected '%s'\n", ext_link_val, + ext_obj_name_a); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve external link's info at index %lld\n", 1); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_obj_name_b) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf( + " link value size %lld for link at index %lld did not match expected size of %lld\n", + link_info.u.val_size, 1, link_val_size); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE); + if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1, link_val_buf, + GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get external link value at index %lld\n", 1); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + if (H5Lunpack_elink_val(link_val_buf, link_info.u.val_size, &ext_link_flags, &ext_link_filepath, + &ext_link_val) < 0) { + H5_FAILED(); + HDprintf(" couldn't unpack external link value buffer\n"); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + if (HDstrncmp(ext_link_filepath, ext_link_filename, strlen(ext_link_filename) + 1)) { + H5_FAILED(); + HDprintf(" external link target file '%s' did not match expected '%s'\n", + ext_link_filepath, ext_link_filename); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + if (HDstrncmp(ext_link_val, ext_obj_name_b, strlen(ext_obj_name_b) + 1)) { + H5_FAILED(); + HDprintf(" external link value '%s' did not match expected '%s'\n", ext_link_val, + ext_obj_name_b); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve external link's info at index %lld\n", 2); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_obj_name_c) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf( + " link value size %lld for link at index %lld did not match expected size of %lld\n", + link_info.u.val_size, 2, link_val_size); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE); + if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2, link_val_buf, + GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get external link value at index %lld\n", 2); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + if (H5Lunpack_elink_val(link_val_buf, link_info.u.val_size, &ext_link_flags, &ext_link_filepath, + &ext_link_val) < 0) { + H5_FAILED(); + HDprintf(" couldn't unpack external link value buffer\n"); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + if (HDstrncmp(ext_link_filepath, ext_link_filename, strlen(ext_link_filename) + 1)) { + H5_FAILED(); + HDprintf(" external link target file '%s' did not match expected '%s'\n", + ext_link_filepath, ext_link_filename); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + if (HDstrncmp(ext_link_val, ext_obj_name_c, strlen(ext_obj_name_c) + 1)) { + H5_FAILED(); + HDprintf(" external link value '%s' did not match expected '%s'\n", ext_link_val, + ext_obj_name_c); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", GET_LINK_VAL_TEST_SUBGROUP10_NAME); + PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lget_val_by_idx_external_name_order_increasing); +#endif + } + PART_END(H5Lget_val_by_idx_external_name_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + H5Fclose(ext_file_id); + ext_file_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_val_by_idx_external_name_order_decreasing) + { +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + const char *ext_obj_name_a = "/A"; + const char *ext_obj_name_b = "/B"; + const char *ext_obj_name_c = "/C"; +#endif + + TESTING_2("H5Lget_val_by_idx on external link by alphabetical order in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_VAL_TEST_SUBGROUP11_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_VAL_TEST_SUBGROUP11_NAME); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + /* Create several external links */ + if (H5Lcreate_external(ext_link_filename, ext_obj_name_a, subgroup_id, + GET_LINK_VAL_TEST_EXT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", GET_LINK_VAL_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + if (H5Lcreate_external(ext_link_filename, ext_obj_name_b, subgroup_id, + GET_LINK_VAL_TEST_EXT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", GET_LINK_VAL_TEST_EXT_LINK_NAME2); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + if (H5Lcreate_external(ext_link_filename, ext_obj_name_c, subgroup_id, + GET_LINK_VAL_TEST_EXT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", GET_LINK_VAL_TEST_EXT_LINK_NAME3); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + /* Verify the links exist */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_EXT_LINK_NAME2); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_EXT_LINK_NAME2); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_EXT_LINK_NAME3); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_EXT_LINK_NAME3); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + /* Retrieve the info and value of each link in turn */ + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve external link's info at index %lld\n", 2); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_obj_name_a) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf( + " link value size %lld for link at index %lld did not match expected size of %lld\n", + link_info.u.val_size, 2, link_val_size); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE); + if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, link_val_buf, + GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get external link value at index %lld\n", 2); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + if (H5Lunpack_elink_val(link_val_buf, link_info.u.val_size, &ext_link_flags, &ext_link_filepath, + &ext_link_val) < 0) { + H5_FAILED(); + HDprintf(" couldn't unpack external link value buffer\n"); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + if (HDstrncmp(ext_link_filepath, ext_link_filename, strlen(ext_link_filename) + 1)) { + H5_FAILED(); + HDprintf(" external link target file '%s' did not match expected '%s'\n", + ext_link_filepath, ext_link_filename); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + if (HDstrncmp(ext_link_val, ext_obj_name_a, strlen(ext_obj_name_a) + 1)) { + H5_FAILED(); + HDprintf(" external link value '%s' did not match expected '%s'\n", ext_link_val, + ext_obj_name_a); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve external link's info at index %lld\n", 1); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_obj_name_b) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf( + " link value size %lld for link at index %lld did not match expected size of %lld\n", + link_info.u.val_size, 1, link_val_size); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE); + if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, link_val_buf, + GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get external link value at index %lld\n", 1); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + if (H5Lunpack_elink_val(link_val_buf, link_info.u.val_size, &ext_link_flags, &ext_link_filepath, + &ext_link_val) < 0) { + H5_FAILED(); + HDprintf(" couldn't unpack external link value buffer\n"); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + if (HDstrncmp(ext_link_filepath, ext_link_filename, strlen(ext_link_filename) + 1)) { + H5_FAILED(); + HDprintf(" external link target file '%s' did not match expected '%s'\n", + ext_link_filepath, ext_link_filename); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + if (HDstrncmp(ext_link_val, ext_obj_name_b, strlen(ext_obj_name_b) + 1)) { + H5_FAILED(); + HDprintf(" external link value '%s' did not match expected '%s'\n", ext_link_val, + ext_obj_name_b); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve external link's info at index %lld\n", 0); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_obj_name_c) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf( + " link value size %lld for link at index %lld did not match expected size of %lld\n", + link_info.u.val_size, 0, link_val_size); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE); + if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, link_val_buf, + GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get external link value at index %lld\n", 0); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + if (H5Lunpack_elink_val(link_val_buf, link_info.u.val_size, &ext_link_flags, &ext_link_filepath, + &ext_link_val) < 0) { + H5_FAILED(); + HDprintf(" couldn't unpack external link value buffer\n"); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + if (HDstrncmp(ext_link_filepath, ext_link_filename, strlen(ext_link_filename) + 1)) { + H5_FAILED(); + HDprintf(" external link target file '%s' did not match expected '%s'\n", + ext_link_filepath, ext_link_filename); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + if (HDstrncmp(ext_link_val, ext_obj_name_c, strlen(ext_obj_name_c) + 1)) { + H5_FAILED(); + HDprintf(" external link value '%s' did not match expected '%s'\n", ext_link_val, + ext_obj_name_c); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", GET_LINK_VAL_TEST_SUBGROUP11_NAME); + PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lget_val_by_idx_external_name_order_decreasing); +#endif + } + PART_END(H5Lget_val_by_idx_external_name_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + H5Fclose(ext_file_id); + ext_file_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_val_by_idx_ud_crt_order_increasing) + { + TESTING_2("H5Lget_val_by_idx on user-defined link by creation order in increasing order"); + + SKIPPED(); + PART_EMPTY(H5Lget_val_by_idx_ud_crt_order_increasing); + } + PART_END(H5Lget_val_by_idx_ud_crt_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_val_by_idx_ud_crt_order_decreasing) + { + TESTING_2("H5Lget_val_by_idx on user-defined link by creation order in decreasing order"); + + SKIPPED(); + PART_EMPTY(H5Lget_val_by_idx_ud_crt_order_decreasing); + } + PART_END(H5Lget_val_by_idx_ud_crt_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_val_by_idx_ud_name_order_increasing) + { + TESTING_2("H5Lget_val_by_idx on user-defined link by alphabetical order in increasing order"); + + SKIPPED(); + PART_EMPTY(H5Lget_val_by_idx_ud_name_order_increasing); + } + PART_END(H5Lget_val_by_idx_ud_name_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_val_by_idx_ud_name_order_decreasing) + { + TESTING_2("H5Lget_val_by_idx on user-defined link by alphabetical order in decreasing order"); + + SKIPPED(); + PART_EMPTY(H5Lget_val_by_idx_ud_name_order_decreasing); + } + PART_END(H5Lget_val_by_idx_ud_name_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(gcpl_id); + H5Gclose(subgroup_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(ext_file_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a soft or external link's value can't be + * retrieved when H5Lget_val(_by_idx) is passed invalid parameters. + */ +static int +test_get_link_val_invalid_params(void) +{ + H5L_info2_t link_info; + htri_t link_exists; + herr_t err_ret = -1; + size_t link_val_buf_size = 0; + char *link_val_buf = NULL; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + + TESTING_MULTIPART("link value retrieval with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, basic, more, soft, external link, or creation " + "order aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create a GCPL\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) { + H5_FAILED(); + HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, GET_LINK_VAL_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + GET_LINK_VAL_INVALID_PARAMS_TEST_GROUP_NAME); + goto error; + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_INVALID_PARAMS_TEST_GROUP_NAME, group_id, + GET_LINK_VAL_INVALID_PARAMS_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_INVALID_PARAMS_TEST_SOFT_LINK_NAME); + goto error; + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(group_id, GET_LINK_VAL_INVALID_PARAMS_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", + GET_LINK_VAL_INVALID_PARAMS_TEST_SOFT_LINK_NAME); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link did not exist\n"); + goto error; + } + + link_val_buf_size = 100; + if (NULL == (link_val_buf = (char *)HDmalloc(link_val_buf_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for storing link value\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Lget_val_invalid_loc_id) + { + TESTING_2("H5Lget_val with an invalid location ID"); + + HDmemset(&link_info, 0, sizeof(link_info)); + + H5E_BEGIN_TRY + { + err_ret = H5Lget_val(H5I_INVALID_HID, GET_LINK_VAL_INVALID_PARAMS_TEST_SOFT_LINK_NAME, + link_val_buf, link_val_buf_size, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_val succeeded with an invalid location ID\n"); + PART_ERROR(H5Lget_val_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Lget_val_invalid_loc_id); + + PART_BEGIN(H5Lget_val_invalid_link_name) + { + TESTING_2("H5Lget_val with an invalid link name"); + + H5E_BEGIN_TRY + { + err_ret = H5Lget_val(group_id, NULL, link_val_buf, link_val_buf_size, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_val succeeded with a NULL link name\n"); + PART_ERROR(H5Lget_val_invalid_link_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Lget_val(group_id, "", link_val_buf, link_val_buf_size, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_val succeeded with an invalid link name of ''\n"); + PART_ERROR(H5Lget_val_invalid_link_name); + } + + PASSED(); + } + PART_END(H5Lget_val_invalid_link_name); + + PART_BEGIN(H5Lget_val_invalid_lapl) + { + TESTING_2("H5Lget_val with an invalid LAPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Lget_val(group_id, GET_LINK_VAL_INVALID_PARAMS_TEST_SOFT_LINK_NAME, link_val_buf, + link_val_buf_size, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_val succeeded with an invalid LAPL\n"); + PART_ERROR(H5Lget_val_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Lget_val_invalid_lapl); + + PART_BEGIN(H5Lget_val_by_idx_invalid_loc_id) + { + TESTING_2("H5Lget_val_by_idx with an invalid location ID"); + + HDmemset(&link_info, 0, sizeof(link_info)); + + H5E_BEGIN_TRY + { + err_ret = H5Lget_val_by_idx(H5I_INVALID_HID, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, + link_val_buf, link_val_buf_size, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_val_by_idx succeeded with an invalid location ID!\n"); + PART_ERROR(H5Lget_val_by_idx_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Lget_val_by_idx_invalid_loc_id); + + PART_BEGIN(H5Lget_val_by_idx_invalid_grp_name) + { + TESTING_2("H5Lget_val_by_idx with an invalid group name"); + + H5E_BEGIN_TRY + { + err_ret = H5Lget_val_by_idx(group_id, NULL, H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, link_val_buf, + link_val_buf_size, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_val_by_idx succeeded with a NULL group name!\n"); + PART_ERROR(H5Lget_val_by_idx_invalid_grp_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Lget_val_by_idx(group_id, "", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, link_val_buf, + link_val_buf_size, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_val_by_idx succeeded with an invalid group name of ''!\n"); + PART_ERROR(H5Lget_val_by_idx_invalid_grp_name); + } + + PASSED(); + } + PART_END(H5Lget_val_by_idx_invalid_grp_name); + + PART_BEGIN(H5Lget_val_by_idx_invalid_index_type) + { + TESTING_2("H5Lget_val_by_idx with an invalid index type"); + + H5E_BEGIN_TRY + { + err_ret = H5Lget_val_by_idx(group_id, ".", H5_INDEX_UNKNOWN, H5_ITER_INC, 0, link_val_buf, + link_val_buf_size, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_val_by_idx succeeded with invalid index type H5_INDEX_UNKNOWN!\n"); + PART_ERROR(H5Lget_val_by_idx_invalid_index_type); + } + + H5E_BEGIN_TRY + { + err_ret = H5Lget_val_by_idx(group_id, ".", H5_INDEX_N, H5_ITER_INC, 0, link_val_buf, + link_val_buf_size, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_val_by_idx succeeded with invalid index type H5_INDEX_N!\n"); + PART_ERROR(H5Lget_val_by_idx_invalid_index_type); + } + + PASSED(); + } + PART_END(H5Lget_val_by_idx_invalid_index_type); + + PART_BEGIN(H5Lget_val_by_idx_invalid_iter_order) + { + TESTING_2("H5Lget_val_by_idx with an invalid iteration ordering"); + + H5E_BEGIN_TRY + { + err_ret = H5Lget_val_by_idx(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_UNKNOWN, 0, + link_val_buf, link_val_buf_size, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf( + " H5Lget_val_by_idx succeeded with invalid iteration ordering H5_ITER_UNKNOWN!\n"); + PART_ERROR(H5Lget_val_by_idx_invalid_iter_order); + } + + H5E_BEGIN_TRY + { + err_ret = H5Lget_val_by_idx(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_N, 0, link_val_buf, + link_val_buf_size, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_val_by_idx succeeded with invalid iteration ordering H5_ITER_N!\n"); + PART_ERROR(H5Lget_val_by_idx_invalid_iter_order); + } + + PASSED(); + } + PART_END(H5Lget_val_by_idx_invalid_iter_order); + + PART_BEGIN(H5Lget_val_by_idx_invalid_lapl) + { + TESTING_2("H5Lget_val_by_idx with an invalid LAPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Lget_val_by_idx(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, link_val_buf, + link_val_buf_size, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_val_by_idx succeeded with an invalid LAPL!\n"); + PART_ERROR(H5Lget_val_by_idx_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Lget_val_by_idx_invalid_lapl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (link_val_buf) { + HDfree(link_val_buf); + link_val_buf = NULL; + } + + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (link_val_buf) + HDfree(link_val_buf); + H5Pclose(gcpl_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check the functionality of H5Lget_info2 and + * H5Lget_info_by_idx2. + */ +static int +test_get_link_info(void) +{ + H5L_info2_t link_info; + htri_t link_exists; + size_t link_val_size; + hid_t file_id = H5I_INVALID_HID, ext_file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t subgroup_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; +#ifndef NO_EXTERNAL_LINKS + char *ext_objname; + char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH]; +#endif + + TESTING_MULTIPART("link info retrieval"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, basic, more, soft, hard, external link, or " + "creation order aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create GCPL for link creation order tracking\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) { + H5_FAILED(); + HDprintf(" couldn't set link creation order tracking\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, GET_LINK_INFO_TEST_GROUP_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", GET_LINK_INFO_TEST_GROUP_NAME); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Lget_info_hard) + { + TESTING_2("H5Lget_info2 on hard link"); + + HDmemset(&link_info, 0, sizeof(link_info)); + + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP1_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP1_NAME); + PART_ERROR(H5Lget_info_hard); + } + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_info_hard); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + GET_LINK_INFO_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_info_hard); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link did not exist\n"); + PART_ERROR(H5Lget_info_hard); + } + + if (H5Lget_info2(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME, &link_info, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get hard link info\n"); + PART_ERROR(H5Lget_info_hard); + } + + if (link_info.type != H5L_TYPE_HARD) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_hard); + } + + if (link_info.corder_valid && (link_info.corder != 0)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)0); + PART_ERROR(H5Lget_info_hard); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP1_NAME); + PART_ERROR(H5Lget_info_hard); + } + + PASSED(); + } + PART_END(H5Lget_info_hard); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_info_soft) + { + TESTING_2("H5Lget_info2 on soft link"); + + HDmemset(&link_info, 0, sizeof(link_info)); + + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP2_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP2_NAME); + PART_ERROR(H5Lget_info_soft); + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME + "/" GET_LINK_INFO_TEST_SUBGROUP2_NAME, + subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_INFO_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_info_soft); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + GET_LINK_INFO_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_info_soft); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link did not exist\n"); + PART_ERROR(H5Lget_info_soft); + } + + if (H5Lget_info2(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME, &link_info, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get soft link info\n"); + PART_ERROR(H5Lget_info_soft); + } + + if (link_info.type != H5L_TYPE_SOFT) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_soft); + } + + link_val_size = strlen("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME + "/" GET_LINK_INFO_TEST_SUBGROUP2_NAME) + + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link's value size '%zu' did not match expected value '%zu'\n", + link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_info_soft); + } + + if (link_info.corder_valid && (link_info.corder != 0)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)0); + PART_ERROR(H5Lget_info_soft); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP2_NAME); + PART_ERROR(H5Lget_info_soft); + } + + PASSED(); + } + PART_END(H5Lget_info_soft); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_info_external) + { + TESTING_2("H5Lget_info2 on external link"); +#ifndef NO_EXTERNAL_LINKS + HDmemset(&link_info, 0, sizeof(link_info)); + + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Lget_info_external); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Lget_info_external); + } + + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP3_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP3_NAME); + PART_ERROR(H5Lget_info_external); + } + + ext_objname = "/"; + if (H5Lcreate_external(ext_link_filename, ext_objname, subgroup_id, + GET_LINK_INFO_TEST_EXT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", GET_LINK_INFO_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_info_external); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + GET_LINK_INFO_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_info_external); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link did not exist\n"); + PART_ERROR(H5Lget_info_external); + } + + if (H5Lget_info2(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME, &link_info, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get external link info\n"); + PART_ERROR(H5Lget_info_external); + } + + if (link_info.type != H5L_TYPE_EXTERNAL) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_external); + } + + link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_objname) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link's value size '%lld' did not match expected value '%lld'\n", + (long long)link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_info_external); + } + + if (link_info.corder_valid && (link_info.corder != 0)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)0); + PART_ERROR(H5Lget_info_external); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP3_NAME); + PART_ERROR(H5Lget_info_external); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lget_info_external); +#endif + } + PART_END(H5Lget_info_external); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + H5Fclose(ext_file_id); + ext_file_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_info_ud) + { + TESTING_2("H5Lget_info2 on user-defined link"); + + /* TODO */ + + SKIPPED(); + PART_EMPTY(H5Lget_info_ud); + } + PART_END(H5Lget_info_ud); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_info_by_idx_hard_crt_order_increasing) + { + TESTING_2("H5Lget_info_by_idx2 on hard link by creation order in increasing order"); + + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP5_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP5_NAME); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing); + } + + /* Create several hard links */ + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing); + } + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME2, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing); + } + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME3, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + GET_LINK_INFO_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + GET_LINK_INFO_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + GET_LINK_INFO_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing); + } + + /* Retrieve info of links in turn */ + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get hard link info for index %d\n", 0); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing); + } + + if (link_info.type != H5L_TYPE_HARD) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing); + } + + if (link_info.corder_valid && (link_info.corder != 0)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)0); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get hard link info for index %d\n", 1); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing); + } + + if (link_info.type != H5L_TYPE_HARD) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing); + } + + if (link_info.corder_valid && (link_info.corder != 1)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)1); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get hard link info for index %d\n", 2); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing); + } + + if (link_info.type != H5L_TYPE_HARD) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing); + } + + if (link_info.corder_valid && (link_info.corder != 2)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)2); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP5_NAME); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing); + } + + PASSED(); + } + PART_END(H5Lget_info_by_idx_hard_crt_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_info_by_idx_hard_crt_order_decreasing) + { + TESTING_2("H5Lget_info_by_idx2 on hard link by creation order in decreasing order"); + + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP6_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP6_NAME); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing); + } + + /* Create several hard links */ + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing); + } + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME2, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing); + } + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME3, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + GET_LINK_INFO_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + GET_LINK_INFO_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + GET_LINK_INFO_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing); + } + + /* Retrieve info of links in turn */ + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get hard link info for index %d\n", 2); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing); + } + + if (link_info.type != H5L_TYPE_HARD) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing); + } + + if (link_info.corder_valid && (link_info.corder != 0)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)0); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get hard link info for index %d\n", 1); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing); + } + + if (link_info.type != H5L_TYPE_HARD) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing); + } + + if (link_info.corder_valid && (link_info.corder != 1)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)1); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get hard link info for index %d\n", 0); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing); + } + + if (link_info.type != H5L_TYPE_HARD) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing); + } + + if (link_info.corder_valid && (link_info.corder != 2)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)2); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP6_NAME); + PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing); + } + + PASSED(); + } + PART_END(H5Lget_info_by_idx_hard_crt_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_info_by_idx_hard_name_order_increasing) + { + TESTING_2("H5Lget_info_by_idx2 on hard link by alphabetical order in increasing order"); + + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP7_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP7_NAME); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing); + } + + /* Create several hard links */ + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing); + } + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME2, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing); + } + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME3, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + GET_LINK_INFO_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + GET_LINK_INFO_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + GET_LINK_INFO_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing); + } + + /* Retrieve info of links in turn */ + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get hard link info for index %d\n", 0); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing); + } + + if (link_info.type != H5L_TYPE_HARD) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing); + } + + if (link_info.corder_valid && (link_info.corder != 0)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)0); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get hard link info for index %d\n", 1); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing); + } + + if (link_info.type != H5L_TYPE_HARD) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing); + } + + if (link_info.corder_valid && (link_info.corder != 1)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)1); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get hard link info for index %d\n", 2); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing); + } + + if (link_info.type != H5L_TYPE_HARD) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing); + } + + if (link_info.corder_valid && (link_info.corder != 2)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)2); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP7_NAME); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing); + } + + PASSED(); + } + PART_END(H5Lget_info_by_idx_hard_name_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_info_by_idx_hard_name_order_decreasing) + { + TESTING_2("H5Lget_info_by_idx2 on hard link by alphabetical order in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP8_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP8_NAME); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing); + } + + /* Create several hard links */ + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing); + } + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME2, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing); + } + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME3, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + GET_LINK_INFO_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + GET_LINK_INFO_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + GET_LINK_INFO_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing); + } + + /* Retrieve info of links in turn */ + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get hard link info for index %lld\n", 2); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing); + } + + if (link_info.type != H5L_TYPE_HARD) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing); + } + + if (link_info.corder_valid && (link_info.corder != 0)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)0); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get hard link info for index %lld\n", 1); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing); + } + + if (link_info.type != H5L_TYPE_HARD) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing); + } + + if (link_info.corder_valid && (link_info.corder != 1)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)1); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get hard link info for index %lld\n", 0); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing); + } + + if (link_info.type != H5L_TYPE_HARD) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing); + } + + if (link_info.corder_valid && (link_info.corder != 2)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)2); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP8_NAME); + PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lget_info_by_idx_hard_name_order_decreasing); +#endif + } + PART_END(H5Lget_info_by_idx_hard_name_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_info_by_idx_soft_crt_order_increasing) + { + TESTING_2("H5Lget_info_by_idx2 on soft link by creation order in increasing order"); + + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP9_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP9_NAME); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing); + } + + /* Create several soft links */ + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME + "/" GET_LINK_INFO_TEST_SUBGROUP9_NAME, + subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_INFO_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing); + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME + "/" GET_LINK_INFO_TEST_SUBGROUP9_NAME, + subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_INFO_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing); + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME + "/" GET_LINK_INFO_TEST_SUBGROUP9_NAME, + subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_INFO_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + GET_LINK_INFO_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + GET_LINK_INFO_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + GET_LINK_INFO_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing); + } + + /* Retrieve info of links in turn */ + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get soft link info for index %d\n", 0); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing); + } + + if (link_info.type != H5L_TYPE_SOFT) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing); + } + + link_val_size = strlen("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME + "/" GET_LINK_INFO_TEST_SUBGROUP9_NAME) + + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link's value size '%zu' did not match expected value '%zu'\n", + link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing); + } + + if (link_info.corder_valid && (link_info.corder != 0)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)0); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get soft link info for index %d\n", 1); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing); + } + + if (link_info.type != H5L_TYPE_SOFT) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing); + } + + link_val_size = strlen("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME + "/" GET_LINK_INFO_TEST_SUBGROUP9_NAME) + + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link's value size '%zu' did not match expected value '%zu'\n", + link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing); + } + + if (link_info.corder_valid && (link_info.corder != 1)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)1); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get soft link info for index %d\n", 2); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing); + } + + if (link_info.type != H5L_TYPE_SOFT) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing); + } + + link_val_size = strlen("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME + "/" GET_LINK_INFO_TEST_SUBGROUP9_NAME) + + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link's value size '%zu' did not match expected value '%zu'\n", + link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing); + } + + if (link_info.corder_valid && (link_info.corder != 2)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)2); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP9_NAME); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing); + } + + PASSED(); + } + PART_END(H5Lget_info_by_idx_soft_crt_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_info_by_idx_soft_crt_order_decreasing) + { + TESTING_2("H5Lget_info_by_idx2 on soft link by creation order in decreasing order"); + + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP10_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP10_NAME); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing); + } + + /* Create several soft links */ + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME + "/" GET_LINK_INFO_TEST_SUBGROUP10_NAME, + subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_INFO_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing); + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME + "/" GET_LINK_INFO_TEST_SUBGROUP10_NAME, + subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_INFO_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing); + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME + "/" GET_LINK_INFO_TEST_SUBGROUP10_NAME, + subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_INFO_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + GET_LINK_INFO_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + GET_LINK_INFO_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + GET_LINK_INFO_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing); + } + + /* Retrieve info of links in turn */ + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get soft link info for index %d\n", 2); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing); + } + + if (link_info.type != H5L_TYPE_SOFT) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing); + } + + link_val_size = strlen("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME + "/" GET_LINK_INFO_TEST_SUBGROUP10_NAME) + + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link's value size '%zu' did not match expected value '%zu'\n", + link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing); + } + + if (link_info.corder_valid && (link_info.corder != 0)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)0); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get soft link info for index %d\n", 1); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing); + } + + if (link_info.type != H5L_TYPE_SOFT) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing); + } + + link_val_size = strlen("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME + "/" GET_LINK_INFO_TEST_SUBGROUP10_NAME) + + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link's value size '%zu' did not match expected value '%zu'\n", + link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing); + } + + if (link_info.corder_valid && (link_info.corder != 1)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)1); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get soft link info for index %d\n", 0); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing); + } + + if (link_info.type != H5L_TYPE_SOFT) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing); + } + + link_val_size = strlen("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME + "/" GET_LINK_INFO_TEST_SUBGROUP10_NAME) + + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link's value size '%zu' did not match expected value '%zu'\n", + link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing); + } + + if (link_info.corder_valid && (link_info.corder != 2)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)2); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP10_NAME); + PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing); + } + + PASSED(); + } + PART_END(H5Lget_info_by_idx_soft_crt_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_info_by_idx_soft_name_order_increasing) + { + TESTING_2("H5Lget_info_by_idx2 on soft link by alphabetical order in increasing order"); + + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP11_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP11_NAME); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing); + } + + /* Create several soft links */ + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME + "/" GET_LINK_INFO_TEST_SUBGROUP11_NAME, + subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_INFO_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing); + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME + "/" GET_LINK_INFO_TEST_SUBGROUP11_NAME, + subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_INFO_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing); + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME + "/" GET_LINK_INFO_TEST_SUBGROUP11_NAME, + subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_INFO_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + GET_LINK_INFO_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + GET_LINK_INFO_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + GET_LINK_INFO_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing); + } + + /* Retrieve info of links in turn */ + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get soft link info for index %d\n", 0); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing); + } + + if (link_info.type != H5L_TYPE_SOFT) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing); + } + + link_val_size = strlen("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME + "/" GET_LINK_INFO_TEST_SUBGROUP11_NAME) + + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link's value size '%zu' did not match expected value '%zu'\n", + link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing); + } + + if (link_info.corder_valid && (link_info.corder != 0)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)0); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get soft link info for index %d\n", 1); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing); + } + + if (link_info.type != H5L_TYPE_SOFT) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing); + } + + link_val_size = strlen("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME + "/" GET_LINK_INFO_TEST_SUBGROUP11_NAME) + + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link's value size '%zu' did not match expected value '%zu'\n", + link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing); + } + + if (link_info.corder_valid && (link_info.corder != 1)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)1); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get soft link info for index %d\n", 2); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing); + } + + if (link_info.type != H5L_TYPE_SOFT) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing); + } + + link_val_size = strlen("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME + "/" GET_LINK_INFO_TEST_SUBGROUP11_NAME) + + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link's value size '%zu' did not match expected value '%zu'\n", + link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing); + } + + if (link_info.corder_valid && (link_info.corder != 2)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)2); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP11_NAME); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing); + } + + PASSED(); + } + PART_END(H5Lget_info_by_idx_soft_name_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_info_by_idx_soft_name_order_decreasing) + { + TESTING_2("H5Lget_info_by_idx2 on soft link by alphabetical order in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP12_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP12_NAME); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing); + } + + /* Create several soft links */ + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME + "/" GET_LINK_INFO_TEST_SUBGROUP12_NAME, + subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_INFO_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing); + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME + "/" GET_LINK_INFO_TEST_SUBGROUP12_NAME, + subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_INFO_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing); + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME + "/" GET_LINK_INFO_TEST_SUBGROUP12_NAME, + subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", GET_LINK_INFO_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + GET_LINK_INFO_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + GET_LINK_INFO_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + GET_LINK_INFO_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing); + } + + /* Retrieve info of links in turn */ + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get soft link info for index %lld\n", 2); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing); + } + + if (link_info.type != H5L_TYPE_SOFT) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing); + } + + link_val_size = strlen("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME + "/" GET_LINK_INFO_TEST_SUBGROUP12_NAME) + + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link's value size '%lld' did not match expected value '%lld'\n", + (long long)link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing); + } + + if (link_info.corder_valid && (link_info.corder != 0)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)0); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get soft link info for index %lld\n", 1); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing); + } + + if (link_info.type != H5L_TYPE_SOFT) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing); + } + + link_val_size = strlen("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME + "/" GET_LINK_INFO_TEST_SUBGROUP12_NAME) + + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link's value size '%lld' did not match expected value '%lld'\n", + (long long)link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing); + } + + if (link_info.corder_valid && (link_info.corder != 1)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)1); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get soft link info for index %lld\n", 0); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing); + } + + if (link_info.type != H5L_TYPE_SOFT) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing); + } + + link_val_size = strlen("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME + "/" GET_LINK_INFO_TEST_SUBGROUP12_NAME) + + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link's value size '%lld' did not match expected value '%lld'\n", + (long long)link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing); + } + + if (link_info.corder_valid && (link_info.corder != 2)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)2); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP12_NAME); + PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lget_info_by_idx_soft_name_order_decreasing); +#endif + } + PART_END(H5Lget_info_by_idx_soft_name_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_info_by_idx_external_crt_order_increasing) + { + TESTING_2("H5Lget_info_by_idx2 on external link by creation order in increasing order"); +#ifndef NO_EXTERNAL_LINKS + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing); + } + + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP13_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP13_NAME); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing); + } + + /* Create several external links */ + ext_objname = "/"; + if (H5Lcreate_external(ext_link_filename, ext_objname, subgroup_id, + GET_LINK_INFO_TEST_EXT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", GET_LINK_INFO_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing); + } + + if (H5Lcreate_external(ext_link_filename, ext_objname, subgroup_id, + GET_LINK_INFO_TEST_EXT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", GET_LINK_INFO_TEST_EXT_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing); + } + + if (H5Lcreate_external(ext_link_filename, ext_objname, subgroup_id, + GET_LINK_INFO_TEST_EXT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", GET_LINK_INFO_TEST_EXT_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + GET_LINK_INFO_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + GET_LINK_INFO_TEST_EXT_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_EXT_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + GET_LINK_INFO_TEST_EXT_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_EXT_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing); + } + + /* Retrieve info of links in turn */ + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get external link info for index %lld\n", 0); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing); + } + + if (link_info.type != H5L_TYPE_EXTERNAL) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing); + } + + link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_objname) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link's value size '%lld' did not match expected value '%lld'\n", + (long long)link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing); + } + + if (link_info.corder_valid && (link_info.corder != 0)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)0); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get external link info for index %lld\n", 1); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing); + } + + if (link_info.type != H5L_TYPE_EXTERNAL) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing); + } + + link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_objname) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link's value size '%lld' did not match expected value '%lld'\n", + (long long)link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing); + } + + if (link_info.corder_valid && (link_info.corder != 1)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)1); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get external link info for index %lld\n", 2); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing); + } + + if (link_info.type != H5L_TYPE_EXTERNAL) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing); + } + + link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_objname) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link's value size '%lld' did not match expected value '%lld'\n", + (long long)link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing); + } + + if (link_info.corder_valid && (link_info.corder != 2)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)2); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP13_NAME); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lget_info_by_idx_external_crt_order_increasing); +#endif + } + PART_END(H5Lget_info_by_idx_external_crt_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + H5Fclose(ext_file_id); + ext_file_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_info_by_idx_external_crt_order_decreasing) + { + TESTING_2("H5Lget_info_by_idx2 on external link by creation order in decreasing order"); +#ifndef NO_EXTERNAL_LINKS + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing); + } + + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP14_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP14_NAME); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing); + } + + /* Create several external links */ + ext_objname = "/"; + if (H5Lcreate_external(ext_link_filename, ext_objname, subgroup_id, + GET_LINK_INFO_TEST_EXT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", GET_LINK_INFO_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing); + } + + if (H5Lcreate_external(ext_link_filename, ext_objname, subgroup_id, + GET_LINK_INFO_TEST_EXT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", GET_LINK_INFO_TEST_EXT_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing); + } + + if (H5Lcreate_external(ext_link_filename, ext_objname, subgroup_id, + GET_LINK_INFO_TEST_EXT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", GET_LINK_INFO_TEST_EXT_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + GET_LINK_INFO_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + GET_LINK_INFO_TEST_EXT_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_EXT_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + GET_LINK_INFO_TEST_EXT_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_EXT_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing); + } + + /* Retrieve info of links in turn */ + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get external link info for index %lld\n", 2); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing); + } + + if (link_info.type != H5L_TYPE_EXTERNAL) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing); + } + + link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_objname) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link's value size '%lld' did not match expected value '%lld'\n", + (long long)link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing); + } + + if (link_info.corder_valid && (link_info.corder != 0)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)0); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get external link info for index %lld\n", 1); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing); + } + + if (link_info.type != H5L_TYPE_EXTERNAL) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing); + } + + link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_objname) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link's value size '%lld' did not match expected value '%lld'\n", + (long long)link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing); + } + + if (link_info.corder_valid && (link_info.corder != 1)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)1); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get external link info for index %lld\n", 0); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing); + } + + if (link_info.type != H5L_TYPE_EXTERNAL) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing); + } + + link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_objname) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link's value size '%lld' did not match expected value '%lld'\n", + (long long)link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing); + } + + if (link_info.corder_valid && (link_info.corder != 2)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)2); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP14_NAME); + PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lget_info_by_idx_external_crt_order_decreasing); +#endif + } + PART_END(H5Lget_info_by_idx_external_crt_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + H5Fclose(ext_file_id); + ext_file_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_info_by_idx_external_name_order_increasing) + { + TESTING_2("H5Lget_info_by_idx2 on external link by alphabetical order in increasing order"); +#ifndef NO_EXTERNAL_LINKS + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing); + } + + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP15_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP15_NAME); + PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing); + } + + /* Create several external links */ + ext_objname = "/"; + if (H5Lcreate_external(ext_link_filename, ext_objname, subgroup_id, + GET_LINK_INFO_TEST_EXT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", GET_LINK_INFO_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing); + } + + if (H5Lcreate_external(ext_link_filename, ext_objname, subgroup_id, + GET_LINK_INFO_TEST_EXT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", GET_LINK_INFO_TEST_EXT_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing); + } + + if (H5Lcreate_external(ext_link_filename, ext_objname, subgroup_id, + GET_LINK_INFO_TEST_EXT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", GET_LINK_INFO_TEST_EXT_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + GET_LINK_INFO_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + GET_LINK_INFO_TEST_EXT_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_EXT_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + GET_LINK_INFO_TEST_EXT_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_EXT_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing); + } + + /* Retrieve info of links in turn */ + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get external link info for index %lld\n", 0); + PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing); + } + + if (link_info.type != H5L_TYPE_EXTERNAL) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing); + } + + link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_objname) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link's value size '%lld' did not match expected value '%lld'\n", + (long long)link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing); + } + + if (link_info.corder_valid && (link_info.corder != 0)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)0); + PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get external link info for index %lld\n", 1); + PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing); + } + + if (link_info.type != H5L_TYPE_EXTERNAL) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing); + } + + link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_objname) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link's value size '%lld' did not match expected value '%lld'\n", + (long long)link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing); + } + + if (link_info.corder_valid && (link_info.corder != 1)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)1); + PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get external link info for index %lld\n", 2); + PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing); + } + + if (link_info.type != H5L_TYPE_EXTERNAL) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing); + } + + link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_objname) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link's value size '%lld' did not match expected value '%lld'\n", + (long long)link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing); + } + + if (link_info.corder_valid && (link_info.corder != 2)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)2); + PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP15_NAME); + PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lget_info_by_idx_external_name_order_increasing); +#endif + } + PART_END(H5Lget_info_by_idx_external_name_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + H5Fclose(ext_file_id); + ext_file_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_info_by_idx_external_name_order_decreasing) + { + TESTING_2("H5Lget_info_by_idx2 on external link by alphabetical order in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing); + } + + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP16_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP16_NAME); + PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing); + } + + /* Create several external links */ + ext_objname = "/"; + if (H5Lcreate_external(ext_link_filename, ext_objname, subgroup_id, + GET_LINK_INFO_TEST_EXT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", GET_LINK_INFO_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing); + } + + if (H5Lcreate_external(ext_link_filename, ext_objname, subgroup_id, + GET_LINK_INFO_TEST_EXT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", GET_LINK_INFO_TEST_EXT_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing); + } + + if (H5Lcreate_external(ext_link_filename, ext_objname, subgroup_id, + GET_LINK_INFO_TEST_EXT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", GET_LINK_INFO_TEST_EXT_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + GET_LINK_INFO_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_EXT_LINK_NAME); + PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + GET_LINK_INFO_TEST_EXT_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_EXT_LINK_NAME2); + PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + GET_LINK_INFO_TEST_EXT_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before deletion\n", + GET_LINK_INFO_TEST_EXT_LINK_NAME3); + PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing); + } + + /* Retrieve info of links in turn */ + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get external link info for index %lld\n", 2); + PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing); + } + + if (link_info.type != H5L_TYPE_EXTERNAL) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing); + } + + link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_objname) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link's value size '%lld' did not match expected value '%lld'\n", + (long long)link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing); + } + + if (link_info.corder_valid && (link_info.corder != 0)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)0); + PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get external link info for index %lld\n", 1); + PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing); + } + + if (link_info.type != H5L_TYPE_EXTERNAL) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing); + } + + link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_objname) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link's value size '%lld' did not match expected value '%lld'\n", + (long long)link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing); + } + + if (link_info.corder_valid && (link_info.corder != 1)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)1); + PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing); + } + + HDmemset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, &link_info, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get external link info for index %lld\n", 0); + PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing); + } + + if (link_info.type != H5L_TYPE_EXTERNAL) { + H5_FAILED(); + HDprintf(" incorrect link type returned\n"); + PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing); + } + + link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_objname) + 1; + if (link_info.u.val_size != link_val_size) { + H5_FAILED(); + HDprintf(" link's value size '%lld' did not match expected value '%lld'\n", + (long long)link_info.u.val_size, link_val_size); + PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing); + } + + if (link_info.corder_valid && (link_info.corder != 2)) { + H5_FAILED(); + HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n", + (long long)link_info.corder, (long long)2); + PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP16_NAME); + PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lget_info_by_idx_external_name_order_decreasing); +#endif + } + PART_END(H5Lget_info_by_idx_external_name_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + H5Fclose(ext_file_id); + ext_file_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_info_by_idx_ud_crt_order_increasing) + { + TESTING_2("H5Lget_info_by_idx2 on user-defined link by creation order in increasing order"); + + SKIPPED(); + PART_EMPTY(H5Lget_info_by_idx_ud_crt_order_increasing); + } + PART_END(H5Lget_info_by_idx_ud_crt_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_info_by_idx_ud_crt_order_decreasing) + { + TESTING_2("H5Lget_info_by_idx2 on user-defined link by creation order in decreasing order"); + + SKIPPED(); + PART_EMPTY(H5Lget_info_by_idx_ud_crt_order_decreasing); + } + PART_END(H5Lget_info_by_idx_ud_crt_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_info_by_idx_ud_name_order_increasing) + { + TESTING_2("H5Lget_info_by_idx2 on user-defined link by alphabetical order in increasing order"); + + SKIPPED(); + PART_EMPTY(H5Lget_info_by_idx_ud_name_order_increasing); + } + PART_END(H5Lget_info_by_idx_ud_name_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_info_by_idx_ud_name_order_decreasing) + { + TESTING_2("H5Lget_info_by_idx2 on user-defined link by alphabetical order in decreasing order"); + + SKIPPED(); + PART_EMPTY(H5Lget_info_by_idx_ud_name_order_decreasing); + } + PART_END(H5Lget_info_by_idx_ud_name_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(gcpl_id); + H5Gclose(subgroup_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(ext_file_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a link's info can't be retrieved + * when H5Lget_info(_by_idx)2 is passed invalid parameters. + */ +static int +test_get_link_info_invalid_params(void) +{ + H5L_info2_t link_info; + herr_t err_ret = -1; + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + + TESTING_MULTIPART("link info retrieval with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, basic, more, soft, hard, external link, or " + "creation order aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create a GCPL\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) { + H5_FAILED(); + HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, GET_LINK_INFO_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + GET_LINK_INFO_INVALID_PARAMS_TEST_GROUP_NAME); + goto error; + } + + if (H5Lcreate_hard(group_id, ".", group_id, GET_LINK_INFO_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_INVALID_PARAMS_TEST_HARD_LINK_NAME); + goto error; + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(group_id, GET_LINK_INFO_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + GET_LINK_INFO_INVALID_PARAMS_TEST_HARD_LINK_NAME); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link did not exist\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Lget_info_invalid_loc_id) + { + TESTING_2("H5Lget_info2 with an invalid location ID"); + + HDmemset(&link_info, 0, sizeof(link_info)); + + H5E_BEGIN_TRY + { + err_ret = H5Lget_info2(H5I_INVALID_HID, GET_LINK_INFO_INVALID_PARAMS_TEST_HARD_LINK_NAME, + &link_info, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_info2 succeeded with an invalid location ID!\n"); + PART_ERROR(H5Lget_info_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Lget_info_invalid_loc_id); + + PART_BEGIN(H5Lget_info_invalid_link_name) + { + TESTING_2("H5Lget_info2 with an invalid link name"); + + H5E_BEGIN_TRY + { + err_ret = H5Lget_info2(group_id, NULL, &link_info, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_info2 succeeded with a NULL link name!\n"); + PART_ERROR(H5Lget_info_invalid_link_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Lget_info2(group_id, "", &link_info, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_info2 succeeded with an invalid link name of ''!\n"); + PART_ERROR(H5Lget_info_invalid_link_name); + } + + PASSED(); + } + PART_END(H5Lget_info_invalid_link_name); + + PART_BEGIN(H5Lget_info_invalid_lapl) + { + TESTING_2("H5Lget_info2 with an invalid LAPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Lget_info2(group_id, GET_LINK_INFO_INVALID_PARAMS_TEST_HARD_LINK_NAME, &link_info, + H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_info2 succeeded with an invalid LAPL!\n"); + PART_ERROR(H5Lget_info_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Lget_info_invalid_lapl); + + HDmemset(&link_info, 0, sizeof(link_info)); + + PART_BEGIN(H5Lget_info_by_idx_invalid_loc_id) + { + TESTING_2("H5Lget_info_by_idx2 with an invalid location ID"); + + H5E_BEGIN_TRY + { + err_ret = H5Lget_info_by_idx2(H5I_INVALID_HID, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, + &link_info, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_info_by_idx2 succeeded with an invalid location ID!\n"); + PART_ERROR(H5Lget_info_by_idx_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Lget_info_by_idx_invalid_loc_id); + + PART_BEGIN(H5Lget_info_by_idx_invalid_grp_name) + { + TESTING_2("H5Lget_info_by_idx2 with an invalid group name"); + + H5E_BEGIN_TRY + { + err_ret = H5Lget_info_by_idx2(group_id, NULL, H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, &link_info, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_info_by_idx2 succeeded with a NULL group name!\n"); + PART_ERROR(H5Lget_info_by_idx_invalid_grp_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Lget_info_by_idx2(group_id, "", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, &link_info, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_info_by_idx2 succeeded with an invalid group name of ''!\n"); + PART_ERROR(H5Lget_info_by_idx_invalid_grp_name); + } + + PASSED(); + } + PART_END(H5Lget_info_by_idx_invalid_grp_name); + + PART_BEGIN(H5Lget_info_by_idx_invalid_index_type) + { + TESTING_2("H5Lget_info_by_idx2 with an invalid index type"); + + H5E_BEGIN_TRY + { + err_ret = H5Lget_info_by_idx2(group_id, ".", H5_INDEX_UNKNOWN, H5_ITER_INC, 0, &link_info, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_info_by_idx2 succeeded with invalid index type H5_INDEX_UNKNOWN!\n"); + PART_ERROR(H5Lget_info_by_idx_invalid_index_type); + } + + H5E_BEGIN_TRY + { + err_ret = + H5Lget_info_by_idx2(group_id, ".", H5_INDEX_N, H5_ITER_INC, 0, &link_info, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_info_by_idx2 succeeded with invalid index type H5_INDEX_N!\n"); + PART_ERROR(H5Lget_info_by_idx_invalid_index_type); + } + + PASSED(); + } + PART_END(H5Lget_info_by_idx_invalid_index_type); + + PART_BEGIN(H5Lget_info_by_idx_invalid_iter_order) + { + TESTING_2("H5Lget_info_by_idx2 with an invalid iteration ordering"); + + H5E_BEGIN_TRY + { + err_ret = H5Lget_info_by_idx2(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_UNKNOWN, 0, + &link_info, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf( + " H5Lget_info_by_idx2 succeeded with invalid iteration ordering H5_ITER_UNKNOWN!\n"); + PART_ERROR(H5Lget_info_by_idx_invalid_iter_order); + } + + H5E_BEGIN_TRY + { + err_ret = H5Lget_info_by_idx2(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_N, 0, &link_info, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_info_by_idx2 succeeded with invalid iteration ordering H5_ITER_N!\n"); + PART_ERROR(H5Lget_info_by_idx_invalid_iter_order); + } + + PASSED(); + } + PART_END(H5Lget_info_by_idx_invalid_iter_order); + + PART_BEGIN(H5Lget_info_by_idx_invalid_lapl) + { + TESTING_2("H5Lget_info_by_idx2 with an invalid LAPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Lget_info_by_idx2(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, &link_info, + H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_info_by_idx2 succeeded with an invalid LAPL!\n"); + PART_ERROR(H5Lget_info_by_idx_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Lget_info_by_idx_invalid_lapl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(gcpl_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a link's name can be correctly + * retrieved by using H5Lget_name_by_idx. + */ +static int +test_get_link_name(void) +{ + ssize_t link_name_buf_size = 0; + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID, ext_file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t subgroup_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + char link_name_buf[GET_LINK_NAME_TEST_BUF_SIZE]; +#ifndef NO_EXTERNAL_LINKS + char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH]; +#endif + + TESTING_MULTIPART("link name retrieval"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, basic, more, soft, hard, external link, or " + "creation order aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create GCPL for link creation order tracking\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) { + H5_FAILED(); + HDprintf(" couldn't set link creation order tracking\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, GET_LINK_NAME_TEST_GROUP_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", GET_LINK_NAME_TEST_GROUP_NAME); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Lget_name_by_idx_hard_crt_order_increasing) + { + TESTING_2("H5Lget_name_by_idx on hard link by creation order in increasing order"); + + /* Create group to hold some links */ + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_NAME_TEST_HARD_SUBGROUP_NAME, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + GET_LINK_NAME_TEST_HARD_SUBGROUP_NAME); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing); + } + + /* Create several hard links in reverse order to test creation order */ + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME3, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create hard link '%s'\n", GET_LINK_NAME_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing); + } + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME2, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create hard link '%s'\n", GET_LINK_NAME_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing); + } + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create hard link '%s'\n", GET_LINK_NAME_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + GET_LINK_NAME_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + GET_LINK_NAME_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + GET_LINK_NAME_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing); + } + + /* Retrieve link names */ + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_HARD_LINK_NAME3, + strlen(GET_LINK_NAME_TEST_HARD_LINK_NAME3) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing); + } + + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_HARD_LINK_NAME2, + strlen(GET_LINK_NAME_TEST_HARD_LINK_NAME2) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing); + } + + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_HARD_LINK_NAME, + strlen(GET_LINK_NAME_TEST_HARD_LINK_NAME) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group\n"); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing); + } + + PASSED(); + } + PART_END(H5Lget_name_by_idx_hard_crt_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_name_by_idx_hard_crt_order_decreasing) + { + TESTING_2("H5Lget_name_by_idx on hard link by creation order in decreasing order"); + + /* Create group to hold some links */ + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_NAME_TEST_HARD_SUBGROUP_NAME2, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + GET_LINK_NAME_TEST_HARD_SUBGROUP_NAME2); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing); + } + + /* Create several hard links in reverse order to test creation order */ + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME3, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create hard link '%s'\n", GET_LINK_NAME_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing); + } + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME2, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create hard link '%s'\n", GET_LINK_NAME_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing); + } + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create hard link '%s'\n", GET_LINK_NAME_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + GET_LINK_NAME_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + GET_LINK_NAME_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + GET_LINK_NAME_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing); + } + + /* Retrieve link names */ + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_HARD_LINK_NAME, + strlen(GET_LINK_NAME_TEST_HARD_LINK_NAME) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing); + } + + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_HARD_LINK_NAME2, + strlen(GET_LINK_NAME_TEST_HARD_LINK_NAME2) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing); + } + + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_HARD_LINK_NAME3, + strlen(GET_LINK_NAME_TEST_HARD_LINK_NAME3) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group\n"); + PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing); + } + + PASSED(); + } + PART_END(H5Lget_name_by_idx_hard_crt_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_name_by_idx_hard_name_order_increasing) + { + TESTING_2("H5Lget_name_by_idx on hard link by alphabetical order in increasing order"); + + /* Create group to hold some links */ + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_NAME_TEST_HARD_SUBGROUP_NAME3, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + GET_LINK_NAME_TEST_HARD_SUBGROUP_NAME3); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing); + } + + /* Create several hard links */ + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create hard link '%s'\n", GET_LINK_NAME_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing); + } + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME2, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create hard link '%s'\n", GET_LINK_NAME_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing); + } + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME3, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create hard link '%s'\n", GET_LINK_NAME_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + GET_LINK_NAME_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + GET_LINK_NAME_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + GET_LINK_NAME_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing); + } + + /* Retrieve link names */ + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_HARD_LINK_NAME, + strlen(GET_LINK_NAME_TEST_HARD_LINK_NAME) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing); + } + + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_HARD_LINK_NAME2, + strlen(GET_LINK_NAME_TEST_HARD_LINK_NAME2) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing); + } + + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_HARD_LINK_NAME3, + strlen(GET_LINK_NAME_TEST_HARD_LINK_NAME3) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group\n"); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing); + } + + PASSED(); + } + PART_END(H5Lget_name_by_idx_hard_name_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_name_by_idx_hard_name_order_decreasing) + { + TESTING_2("H5Lget_name_by_idx on hard link by alphabetical order in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Create group to hold some links */ + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_NAME_TEST_HARD_SUBGROUP_NAME4, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + GET_LINK_NAME_TEST_HARD_SUBGROUP_NAME4); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing); + } + + /* Create several hard links */ + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create hard link '%s'\n", GET_LINK_NAME_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing); + } + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME2, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create hard link '%s'\n", GET_LINK_NAME_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing); + } + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME3, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create hard link '%s'\n", GET_LINK_NAME_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + GET_LINK_NAME_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + GET_LINK_NAME_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if hard link '%s' exists\n", + GET_LINK_NAME_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" hard link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing); + } + + /* Retrieve link names */ + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_HARD_LINK_NAME3, + strlen(GET_LINK_NAME_TEST_HARD_LINK_NAME3) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_HARD_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing); + } + + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_HARD_LINK_NAME2, + strlen(GET_LINK_NAME_TEST_HARD_LINK_NAME2) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_HARD_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing); + } + + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_HARD_LINK_NAME, + strlen(GET_LINK_NAME_TEST_HARD_LINK_NAME) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_HARD_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group\n"); + PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lget_name_by_idx_hard_name_order_decreasing); +#endif + } + PART_END(H5Lget_name_by_idx_hard_name_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_name_by_idx_soft_crt_order_increasing) + { + TESTING_2("H5Lget_name_by_idx on soft link by creation order in increasing order"); + + /* Create group to hold some links */ + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_NAME_TEST_SOFT_SUBGROUP_NAME, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + GET_LINK_NAME_TEST_SOFT_SUBGROUP_NAME); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing); + } + + /* Create several soft links in reverse order to test creation order */ + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_NAME_TEST_GROUP_NAME, subgroup_id, + GET_LINK_NAME_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create soft link '%s'\n", GET_LINK_NAME_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing); + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_NAME_TEST_GROUP_NAME, subgroup_id, + GET_LINK_NAME_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create soft link '%s'\n", GET_LINK_NAME_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing); + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_NAME_TEST_GROUP_NAME, subgroup_id, + GET_LINK_NAME_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create soft link '%s'\n", GET_LINK_NAME_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + GET_LINK_NAME_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + GET_LINK_NAME_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + GET_LINK_NAME_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing); + } + + /* Retrieve link names */ + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_SOFT_LINK_NAME3, + strlen(GET_LINK_NAME_TEST_SOFT_LINK_NAME3) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing); + } + + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_SOFT_LINK_NAME2, + strlen(GET_LINK_NAME_TEST_SOFT_LINK_NAME2) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing); + } + + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_SOFT_LINK_NAME, + strlen(GET_LINK_NAME_TEST_SOFT_LINK_NAME) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group\n"); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing); + } + + PASSED(); + } + PART_END(H5Lget_name_by_idx_soft_crt_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_name_by_idx_soft_crt_order_decreasing) + { + TESTING_2("H5Lget_name_by_idx on soft link by creation order in decreasing order"); + + /* Create group to hold some links */ + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_NAME_TEST_SOFT_SUBGROUP_NAME2, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + GET_LINK_NAME_TEST_SOFT_SUBGROUP_NAME2); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing); + } + + /* Create several soft links in reverse order to test creation order */ + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_NAME_TEST_GROUP_NAME, subgroup_id, + GET_LINK_NAME_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create soft link '%s'\n", GET_LINK_NAME_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing); + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_NAME_TEST_GROUP_NAME, subgroup_id, + GET_LINK_NAME_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create soft link '%s'\n", GET_LINK_NAME_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing); + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_NAME_TEST_GROUP_NAME, subgroup_id, + GET_LINK_NAME_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create soft link '%s'\n", GET_LINK_NAME_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + GET_LINK_NAME_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + GET_LINK_NAME_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + GET_LINK_NAME_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing); + } + + /* Retrieve link names */ + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_SOFT_LINK_NAME, + strlen(GET_LINK_NAME_TEST_SOFT_LINK_NAME) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing); + } + + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_SOFT_LINK_NAME2, + strlen(GET_LINK_NAME_TEST_SOFT_LINK_NAME2) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing); + } + + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_SOFT_LINK_NAME3, + strlen(GET_LINK_NAME_TEST_SOFT_LINK_NAME3) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group\n"); + PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing); + } + + PASSED(); + } + PART_END(H5Lget_name_by_idx_soft_crt_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_name_by_idx_soft_name_order_increasing) + { + TESTING_2("H5Lget_name_by_idx on soft link by alphabetical order in increasing order"); + + /* Create group to hold some links */ + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_NAME_TEST_SOFT_SUBGROUP_NAME3, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + GET_LINK_NAME_TEST_SOFT_SUBGROUP_NAME3); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing); + } + + /* Create several soft links */ + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_NAME_TEST_GROUP_NAME, subgroup_id, + GET_LINK_NAME_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create soft link '%s'\n", GET_LINK_NAME_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing); + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_NAME_TEST_GROUP_NAME, subgroup_id, + GET_LINK_NAME_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create soft link '%s'\n", GET_LINK_NAME_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing); + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_NAME_TEST_GROUP_NAME, subgroup_id, + GET_LINK_NAME_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create soft link '%s'\n", GET_LINK_NAME_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + GET_LINK_NAME_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + GET_LINK_NAME_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + GET_LINK_NAME_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing); + } + + /* Retrieve link names */ + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_SOFT_LINK_NAME, + strlen(GET_LINK_NAME_TEST_SOFT_LINK_NAME) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing); + } + + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_SOFT_LINK_NAME2, + strlen(GET_LINK_NAME_TEST_SOFT_LINK_NAME2) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing); + } + + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_SOFT_LINK_NAME3, + strlen(GET_LINK_NAME_TEST_SOFT_LINK_NAME3) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group\n"); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing); + } + + PASSED(); + } + PART_END(H5Lget_name_by_idx_soft_name_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_name_by_idx_soft_name_order_decreasing) + { + TESTING_2("H5Lget_name_by_idx on soft link by alphabetical order in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Create group to hold some links */ + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_NAME_TEST_SOFT_SUBGROUP_NAME4, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + GET_LINK_NAME_TEST_SOFT_SUBGROUP_NAME4); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing); + } + + /* Create several soft links */ + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_NAME_TEST_GROUP_NAME, subgroup_id, + GET_LINK_NAME_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create soft link '%s'\n", GET_LINK_NAME_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing); + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_NAME_TEST_GROUP_NAME, subgroup_id, + GET_LINK_NAME_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create soft link '%s'\n", GET_LINK_NAME_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing); + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_NAME_TEST_GROUP_NAME, subgroup_id, + GET_LINK_NAME_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create soft link '%s'\n", GET_LINK_NAME_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + GET_LINK_NAME_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + GET_LINK_NAME_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if soft link '%s' exists\n", + GET_LINK_NAME_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" soft link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing); + } + + /* Retrieve link names */ + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_SOFT_LINK_NAME3, + strlen(GET_LINK_NAME_TEST_SOFT_LINK_NAME3) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_SOFT_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing); + } + + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_SOFT_LINK_NAME2, + strlen(GET_LINK_NAME_TEST_SOFT_LINK_NAME2) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_SOFT_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing); + } + + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_SOFT_LINK_NAME, + strlen(GET_LINK_NAME_TEST_SOFT_LINK_NAME) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group\n"); + PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lget_name_by_idx_soft_name_order_decreasing); +#endif + } + PART_END(H5Lget_name_by_idx_soft_name_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_name_by_idx_external_crt_order_increasing) + { + TESTING_2("H5Lget_name_by_idx on external link by creation order in increasing order"); +#ifndef NO_EXTERNAL_LINKS + /* Create file for external link to reference */ + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing); + } + + /* Create group to hold some links */ + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_NAME_TEST_EXTERNAL_SUBGROUP_NAME, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + GET_LINK_NAME_TEST_EXTERNAL_SUBGROUP_NAME); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing); + } + + /* Create several external links in reverse order to test creation order */ + if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create external link '%s'\n", GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing); + } + + if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create external link '%s'\n", GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing); + } + + if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create external link '%s'\n", GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing); + } + + /* Retrieve link names */ + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3, + strlen(GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing); + } + + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2, + strlen(GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing); + } + + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME, + strlen(GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group\n"); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lget_name_by_idx_external_crt_order_increasing); +#endif + } + PART_END(H5Lget_name_by_idx_external_crt_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + H5Fclose(ext_file_id); + ext_file_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_name_by_idx_external_crt_order_decreasing) + { + TESTING_2("H5Lget_name_by_idx on external link by creation order in decreasing order"); +#ifndef NO_EXTERNAL_LINKS + /* Create file for external link to reference */ + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing); + } + + /* Create group to hold some links */ + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_NAME_TEST_EXTERNAL_SUBGROUP_NAME2, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + GET_LINK_NAME_TEST_EXTERNAL_SUBGROUP_NAME2); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing); + } + + /* Create several external links in reverse order to test creation order */ + if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create external link '%s'\n", GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing); + } + + if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create external link '%s'\n", GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing); + } + + if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create external link '%s'\n", GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing); + } + + /* Retrieve link names */ + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME, + strlen(GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing); + } + + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2, + strlen(GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing); + } + + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3, + strlen(GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group\n"); + PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lget_name_by_idx_external_crt_order_decreasing); +#endif + } + PART_END(H5Lget_name_by_idx_external_crt_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + H5Fclose(ext_file_id); + ext_file_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_name_by_idx_external_name_order_increasing) + { + TESTING_2("H5Lget_name_by_idx on external link by alphabetical order in increasing order"); +#ifndef NO_EXTERNAL_LINKS + /* Create file for external link to reference */ + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing); + } + + /* Create group to hold some links */ + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_NAME_TEST_EXTERNAL_SUBGROUP_NAME3, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + GET_LINK_NAME_TEST_EXTERNAL_SUBGROUP_NAME3); + PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing); + } + + /* Create several external links */ + if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create external link '%s'\n", GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing); + } + + if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create external link '%s'\n", GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing); + } + + if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create external link '%s'\n", GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing); + } + + /* Retrieve link names */ + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME, + strlen(GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing); + } + + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2, + strlen(GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing); + } + + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3, + strlen(GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group\n"); + PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lget_name_by_idx_external_name_order_increasing); +#endif + } + PART_END(H5Lget_name_by_idx_external_name_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + H5Fclose(ext_file_id); + ext_file_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_name_by_idx_external_name_order_decreasing) + { + TESTING_2("H5Lget_name_by_idx on external link by alphabetical order in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Create file for external link to reference */ + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", + EXTERNAL_LINK_TEST_FILE_NAME); + + if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing); + } + + if (H5Fclose(ext_file_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close file '%s'\n", ext_link_filename); + PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing); + } + + /* Create group to hold some links */ + if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_NAME_TEST_EXTERNAL_SUBGROUP_NAME4, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + GET_LINK_NAME_TEST_EXTERNAL_SUBGROUP_NAME4); + PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing); + } + + /* Create several external links */ + if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create external link '%s'\n", GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing); + } + + if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create external link '%s'\n", GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing); + } + + if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create external link '%s'\n", GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing); + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing); + } + + if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if external link '%s' exists\n", + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" external link '%s' did not exist before name retrieval\n", + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing); + } + + /* Retrieve link names */ + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3, + strlen(GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3); + PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing); + } + + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2, + strlen(GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2); + PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing); + } + + if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, + NULL, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing); + } + + if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, link_name_buf, + (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name\n"); + PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing); + } + + if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME, + strlen(GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME) + 1)) { + H5_FAILED(); + HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf, + GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME); + PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing); + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group\n"); + PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lget_name_by_idx_external_name_order_decreasing); +#endif + } + PART_END(H5Lget_name_by_idx_external_name_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + H5Fclose(ext_file_id); + ext_file_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_name_by_idx_ud_crt_order_increasing) + { + TESTING_2("H5Lget_name_by_idx on user-defined link by creation order in increasing order"); + + /* TODO */ + + SKIPPED(); + PART_EMPTY(H5Lget_name_by_idx_ud_crt_order_increasing); + } + PART_END(H5Lget_name_by_idx_ud_crt_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_name_by_idx_ud_crt_order_decreasing) + { + TESTING_2("H5Lget_name_by_idx on user-defined link by creation order in decreasing order"); + + /* TODO */ + + SKIPPED(); + PART_EMPTY(H5Lget_name_by_idx_ud_crt_order_decreasing); + } + PART_END(H5Lget_name_by_idx_ud_crt_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_name_by_idx_ud_name_order_increasing) + { + TESTING_2("H5Lget_name_by_idx on user-defined link by alphabetical order in increasing order"); + + /* TODO */ + + SKIPPED(); + PART_EMPTY(H5Lget_name_by_idx_ud_name_order_increasing); + } + PART_END(H5Lget_name_by_idx_ud_name_order_increasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + + PART_BEGIN(H5Lget_name_by_idx_ud_name_order_decreasing) + { + TESTING_2("H5Lget_name_by_idx on user-defined link by alphabetical order in decreasing order"); + + /* TODO */ + + SKIPPED(); + PART_EMPTY(H5Lget_name_by_idx_ud_name_order_decreasing); + } + PART_END(H5Lget_name_by_idx_ud_name_order_decreasing); + + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + subgroup_id = H5I_INVALID_HID; + } + H5E_END_TRY; + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(gcpl_id); + H5Gclose(subgroup_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(ext_file_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a link's name can't be retrieved + * when H5Lget_name_by_idx is passed invalid parameters. + */ +static int +test_get_link_name_invalid_params(void) +{ + ssize_t ret; + htri_t link_exists; + size_t link_name_buf_size = 0; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + char *link_name_buf = NULL; + + TESTING_MULTIPART("link name retrieval with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, basic, more, soft, hard, external link, or " + "creation order aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, GET_LINK_NAME_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + GET_LINK_NAME_INVALID_PARAMS_TEST_GROUP_NAME); + goto error; + } + + if (H5Lcreate_hard(group_id, ".", group_id, GET_LINK_NAME_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create hard link '%s'\n", GET_LINK_NAME_INVALID_PARAMS_TEST_HARD_LINK_NAME); + goto error; + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(group_id, GET_LINK_NAME_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link exists\n"); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link did not exist\n"); + goto error; + } + + if ((ret = H5Lget_name_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, NULL, link_name_buf_size, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve link name size\n"); + goto error; + } + + link_name_buf_size = (size_t)ret; + if (NULL == (link_name_buf = (char *)HDmalloc(link_name_buf_size + 1))) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Lget_name_by_idx_invalid_loc_id) + { + TESTING_2("H5Lget_name_by_idx with an invalid location ID"); + + H5E_BEGIN_TRY + { + ret = H5Lget_name_by_idx(H5I_INVALID_HID, ".", H5_INDEX_NAME, H5_ITER_INC, 0, link_name_buf, + link_name_buf_size + 1, H5P_DEFAULT); + } + H5E_END_TRY; + + if (ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_name_by_idx succeeded with an invalid location ID!\n"); + PART_ERROR(H5Lget_name_by_idx_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Lget_name_by_idx_invalid_loc_id); + + PART_BEGIN(H5Lget_name_by_idx_invalid_grp_name) + { + TESTING_2("H5Lget_name_by_idx with an invalid group name"); + + H5E_BEGIN_TRY + { + ret = H5Lget_name_by_idx(group_id, NULL, H5_INDEX_NAME, H5_ITER_INC, 0, link_name_buf, + link_name_buf_size + 1, H5P_DEFAULT); + } + H5E_END_TRY; + + if (ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_name_by_idx succeeded with a NULL group name!\n"); + PART_ERROR(H5Lget_name_by_idx_invalid_grp_name); + } + + H5E_BEGIN_TRY + { + ret = H5Lget_name_by_idx(group_id, "", H5_INDEX_NAME, H5_ITER_INC, 0, link_name_buf, + link_name_buf_size + 1, H5P_DEFAULT); + } + H5E_END_TRY; + + if (ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_name_by_idx succeeded with an invalid group name of ''!\n"); + PART_ERROR(H5Lget_name_by_idx_invalid_grp_name); + } + + PASSED(); + } + PART_END(H5Lget_name_by_idx_invalid_grp_name); + + PART_BEGIN(H5Lget_name_by_idx_invalid_index_type) + { + TESTING_2("H5Lget_name_by_idx with an invalid index type"); + + H5E_BEGIN_TRY + { + ret = H5Lget_name_by_idx(group_id, ".", H5_INDEX_UNKNOWN, H5_ITER_INC, 0, link_name_buf, + link_name_buf_size + 1, H5P_DEFAULT); + } + H5E_END_TRY; + + if (ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_name_by_idx succeeded with invalid index type H5_INDEX_UNKNOWN!\n"); + PART_ERROR(H5Lget_name_by_idx_invalid_index_type); + } + + H5E_BEGIN_TRY + { + ret = H5Lget_name_by_idx(group_id, ".", H5_INDEX_N, H5_ITER_INC, 0, link_name_buf, + link_name_buf_size + 1, H5P_DEFAULT); + } + H5E_END_TRY; + + if (ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_name_by_idx succeeded with invalid index type H5_INDEX_N!\n"); + PART_ERROR(H5Lget_name_by_idx_invalid_index_type); + } + + PASSED(); + } + PART_END(H5Lget_name_by_idx_invalid_index_type); + + PART_BEGIN(H5Lget_name_by_idx_invalid_iter_order) + { + TESTING_2("H5Lget_name_by_idx with an invalid iteration ordering"); + + H5E_BEGIN_TRY + { + ret = H5Lget_name_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_UNKNOWN, 0, link_name_buf, + link_name_buf_size + 1, H5P_DEFAULT); + } + H5E_END_TRY; + + if (ret >= 0) { + H5_FAILED(); + HDprintf( + " H5Lget_name_by_idx succeeded with invalid iteration ordering H5_ITER_UNKNOWN!\n"); + PART_ERROR(H5Lget_name_by_idx_invalid_iter_order); + } + + H5E_BEGIN_TRY + { + ret = H5Lget_name_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_N, 0, link_name_buf, + link_name_buf_size + 1, H5P_DEFAULT); + } + H5E_END_TRY; + + if (ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_name_by_idx succeeded with invalid iteration ordering H5_ITER_N!\n"); + PART_ERROR(H5Lget_name_by_idx_invalid_iter_order); + } + + PASSED(); + } + PART_END(H5Lget_name_by_idx_invalid_iter_order); + + PART_BEGIN(H5Lget_name_by_idx_invalid_lapl) + { + TESTING_2("H5Lget_name_by_idx with an invalid LAPL"); + + H5E_BEGIN_TRY + { + ret = H5Lget_name_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, link_name_buf, + link_name_buf_size + 1, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lget_name_by_idx succeeded with an invalid LAPL!\n"); + PART_ERROR(H5Lget_name_by_idx_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Lget_name_by_idx_invalid_lapl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (link_name_buf) { + HDfree(link_name_buf); + link_name_buf = NULL; + } + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (link_name_buf) + HDfree(link_name_buf); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check the functionality of link + * iteration using H5Literate(_by_name)2 with + * only hard links. Iteration is done in + * increasing and decreasing order of both link + * name and link creation order. + */ +static int +test_link_iterate_hard_links(void) +{ + size_t i; + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t dset_dspace = H5I_INVALID_HID; + + TESTING_MULTIPART("link iteration (only hard links)"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, link, or iterate aren't supported with " + "this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create GCPL for link creation order tracking\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) { + H5_FAILED(); + HDprintf(" couldn't set link creation order tracking\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, LINK_ITER_HARD_LINKS_TEST_SUBGROUP_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", LINK_ITER_HARD_LINKS_TEST_SUBGROUP_NAME); + goto error; + } + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + if ((dset_dspace = + generate_random_dataspace(LINK_ITER_HARD_LINKS_TEST_DSET_SPACE_RANK, NULL, NULL, FALSE)) < 0) + TEST_ERROR; + + for (i = 0; i < LINK_ITER_HARD_LINKS_TEST_NUM_LINKS; i++) { + char dset_name[LINK_ITER_HARD_LINKS_TEST_BUF_SIZE]; + + /* Create the datasets with a reverse-ordering naming scheme to test creation order later */ + HDsnprintf(dset_name, LINK_ITER_HARD_LINKS_TEST_BUF_SIZE, LINK_ITER_HARD_LINKS_TEST_LINK_NAME "%d", + (int)(LINK_ITER_HARD_LINKS_TEST_NUM_LINKS - i - 1)); + + if ((dset_id = H5Dcreate2(group_id, dset_name, dset_dtype, dset_dspace, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", dset_name); + goto error; + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(group_id, dset_name, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", dset_name); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", dset_name); + goto error; + } + + if (H5Dclose(dset_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close dataset '%s'\n", dset_name); + goto error; + } + } + + PASSED(); + + BEGIN_MULTIPART + { + /* + * NOTE: A counter is passed to the iteration callback to try to match up the + * expected links with a given step throughout all of the following + * iterations. This is to try and check that the links are indeed being + * returned in the correct order. + */ + + PART_BEGIN(H5Literate_link_name_increasing) + { + TESTING_2("H5Literate2 by link name in increasing order"); + + i = 0; + + /* Test basic link iteration capability using both index types and both index orders */ + if (H5Literate2(group_id, H5_INDEX_NAME, H5_ITER_INC, NULL, link_iter_hard_links_cb, &i) < 0) { + H5_FAILED(); + HDprintf(" H5Literate2 by index type name in increasing order failed\n"); + PART_ERROR(H5Literate_link_name_increasing); + } + + if (i != LINK_ITER_HARD_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_link_name_increasing); + } + + PASSED(); + } + PART_END(H5Literate_link_name_increasing); + + PART_BEGIN(H5Literate_link_name_decreasing) + { + TESTING_2("H5Literate2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = LINK_ITER_HARD_LINKS_TEST_NUM_LINKS; + + if (H5Literate2(group_id, H5_INDEX_NAME, H5_ITER_DEC, NULL, link_iter_hard_links_cb, &i) < 0) { + H5_FAILED(); + HDprintf(" H5Literate2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Literate_link_name_decreasing); + } + + if (i != 2 * LINK_ITER_HARD_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_link_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Literate_link_name_decreasing); +#endif + } + PART_END(H5Literate_link_name_decreasing); + + PART_BEGIN(H5Literate_link_creation_increasing) + { + TESTING_2("H5Literate2 by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * LINK_ITER_HARD_LINKS_TEST_NUM_LINKS; + + if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, link_iter_hard_links_cb, &i) < + 0) { + H5_FAILED(); + HDprintf(" H5Literate2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Literate_link_creation_increasing); + } + + if (i != 3 * LINK_ITER_HARD_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_link_creation_increasing); + } + + PASSED(); + } + PART_END(H5Literate_link_creation_increasing); + + PART_BEGIN(H5Literate_link_creation_decreasing) + { + TESTING_2("H5Literate2 by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * LINK_ITER_HARD_LINKS_TEST_NUM_LINKS; + + if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, link_iter_hard_links_cb, &i) < + 0) { + H5_FAILED(); + HDprintf(" H5Literate2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Literate_link_creation_decreasing); + } + + if (i != 4 * LINK_ITER_HARD_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_link_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Literate_link_creation_decreasing); + + PART_BEGIN(H5Literate_by_name_link_name_increasing) + { + TESTING_2("H5Literate_by_name2 by link name in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 0; + + if (H5Literate_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_HARD_LINKS_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_INC, NULL, link_iter_hard_links_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 by index type name in increasing order failed\n"); + PART_ERROR(H5Literate_by_name_link_name_increasing); + } + + if (i != LINK_ITER_HARD_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_by_name_link_name_increasing); + } + + PASSED(); + } + PART_END(H5Literate_by_name_link_name_increasing); + + PART_BEGIN(H5Literate_by_name_link_name_decreasing) + { + TESTING_2("H5Literate_by_name2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = LINK_ITER_HARD_LINKS_TEST_NUM_LINKS; + + if (H5Literate_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_HARD_LINKS_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_DEC, NULL, link_iter_hard_links_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Literate_by_name_link_name_decreasing); + } + + if (i != 2 * LINK_ITER_HARD_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_by_name_link_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Literate_by_name_link_name_decreasing); +#endif + } + PART_END(H5Literate_by_name_link_name_decreasing); + + PART_BEGIN(H5Literate_by_name_creation_increasing) + { + TESTING_2("H5Literate_by_name2 by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * LINK_ITER_HARD_LINKS_TEST_NUM_LINKS; + + if (H5Literate_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_HARD_LINKS_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, link_iter_hard_links_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Literate_by_name_creation_increasing); + } + + if (i != 3 * LINK_ITER_HARD_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_by_name_creation_increasing); + } + + PASSED(); + } + PART_END(H5Literate_by_name_creation_increasing); + + PART_BEGIN(H5Literate_by_name_creation_decreasing) + { + TESTING_2("H5Literate_by_name2 by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * LINK_ITER_HARD_LINKS_TEST_NUM_LINKS; + + if (H5Literate_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_HARD_LINKS_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, link_iter_hard_links_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Literate_by_name_creation_decreasing); + } + + if (i != 4 * LINK_ITER_HARD_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_by_name_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Literate_by_name_creation_decreasing); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(dset_dspace) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(dset_dspace); + H5Tclose(dset_dtype); + H5Dclose(dset_id); + H5Pclose(gcpl_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check the functionality of link + * iteration using H5Literate(_by_name)2 with + * only soft links. Iteration is done in + * increasing and decreasing order of both link + * name and link creation order. + */ +static int +test_link_iterate_soft_links(void) +{ + size_t i; + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + + TESTING_MULTIPART("link iteration (only soft links)"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, link, or iterate aren't supported with this " + "connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create GCPL for link creation order tracking\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) { + H5_FAILED(); + HDprintf(" couldn't set link creation order tracking\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, LINK_ITER_SOFT_LINKS_TEST_SUBGROUP_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", LINK_ITER_SOFT_LINKS_TEST_SUBGROUP_NAME); + goto error; + } + + for (i = 0; i < LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS; i++) { + char link_name[LINK_ITER_SOFT_LINKS_TEST_BUF_SIZE]; + char link_target[LINK_ITER_SOFT_LINKS_TEST_BUF_SIZE]; + + /* Create the links with a reverse-ordering naming scheme to test creation order later */ + HDsnprintf(link_name, LINK_ITER_SOFT_LINKS_TEST_BUF_SIZE, LINK_ITER_SOFT_LINKS_TEST_LINK_NAME "%d", + (int)(LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS - i - 1)); + + HDsnprintf(link_target, LINK_ITER_SOFT_LINKS_TEST_BUF_SIZE, "target%d", + (int)(LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS - i - 1)); + + if (H5Lcreate_soft(link_target, group_id, link_name, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", link_name); + goto error; + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(group_id, link_name, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", link_name); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", link_name); + goto error; + } + } + + PASSED(); + + BEGIN_MULTIPART + { + /* + * NOTE: A counter is passed to the iteration callback to try to match up the + * expected links with a given step throughout all of the following + * iterations. This is to try and check that the links are indeed being + * returned in the correct order. + */ + + PART_BEGIN(H5Literate_link_name_increasing) + { + TESTING_2("H5Literate2 by link name in increasing order"); + + i = 0; + + /* Test basic link iteration capability using both index types and both index orders */ + if (H5Literate2(group_id, H5_INDEX_NAME, H5_ITER_INC, NULL, link_iter_soft_links_cb, &i) < 0) { + H5_FAILED(); + HDprintf(" H5Literate2 by index type name in increasing order failed\n"); + PART_ERROR(H5Literate_link_name_increasing); + } + + if (i != LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_link_name_increasing); + } + + PASSED(); + } + PART_END(H5Literate_link_name_increasing); + + PART_BEGIN(H5Literate_link_name_decreasing) + { + TESTING_2("H5Literate2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS; + + if (H5Literate2(group_id, H5_INDEX_NAME, H5_ITER_DEC, NULL, link_iter_soft_links_cb, &i) < 0) { + H5_FAILED(); + HDprintf(" H5Literate2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Literate_link_name_decreasing); + } + + if (i != 2 * LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_link_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Literate_link_name_decreasing); +#endif + } + PART_END(H5Literate_link_name_decreasing); + + PART_BEGIN(H5Literate_link_creation_increasing) + { + TESTING_2("H5Literate2 by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS; + + if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, link_iter_soft_links_cb, &i) < + 0) { + H5_FAILED(); + HDprintf(" H5Literate2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Literate_link_creation_increasing); + } + + if (i != 3 * LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_link_creation_increasing); + } + + PASSED(); + } + PART_END(H5Literate_link_creation_increasing); + + PART_BEGIN(H5Literate_link_creation_decreasing) + { + TESTING_2("H5Literate2 by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS; + + if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, link_iter_soft_links_cb, &i) < + 0) { + H5_FAILED(); + HDprintf(" H5Literate2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Literate_link_creation_decreasing); + } + + if (i != 4 * LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_link_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Literate_link_creation_decreasing); + + PART_BEGIN(H5Literate_by_name_link_name_increasing) + { + TESTING_2("H5Literate_by_name2 by link name in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 0; + + if (H5Literate_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_SOFT_LINKS_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_INC, NULL, link_iter_soft_links_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 by index type name in increasing order failed\n"); + PART_ERROR(H5Literate_by_name_link_name_increasing); + } + + if (i != LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_by_name_link_name_increasing); + } + + PASSED(); + } + PART_END(H5Literate_by_name_link_name_increasing); + + PART_BEGIN(H5Literate_by_name_link_name_decreasing) + { + TESTING_2("H5Literate_by_name2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS; + + if (H5Literate_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_SOFT_LINKS_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_DEC, NULL, link_iter_soft_links_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Literate_by_name_link_name_decreasing); + } + + if (i != 2 * LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_by_name_link_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Literate_by_name_link_name_decreasing); +#endif + } + PART_END(H5Literate_by_name_link_name_decreasing); + + PART_BEGIN(H5Literate_by_name_creation_increasing) + { + TESTING_2("H5Literate_by_name2 by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS; + + if (H5Literate_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_SOFT_LINKS_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, link_iter_soft_links_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Literate_by_name_creation_increasing); + } + + if (i != 3 * LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_by_name_creation_increasing); + } + + PASSED(); + } + PART_END(H5Literate_by_name_creation_increasing); + + PART_BEGIN(H5Literate_by_name_creation_decreasing) + { + TESTING_2("H5Literate_by_name2 by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS; + + if (H5Literate_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_SOFT_LINKS_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, link_iter_soft_links_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Literate_by_name_creation_decreasing); + } + + if (i != 4 * LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_by_name_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Literate_by_name_creation_decreasing); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(gcpl_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check the functionality of link + * iteration using H5Literate(_by_name)2 with + * only external links. Iteration is done in + * increasing and decreasing order of both link + * name and link creation order. + */ +static int +test_link_iterate_external_links(void) +{ +#ifndef NO_EXTERNAL_LINKS + size_t i; + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH]; +#endif + + TESTING_MULTIPART("link iteration (only external links)"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, link, or iterate aren't supported with this " + "connector\n"); + return 0; + } + +#ifndef NO_EXTERNAL_LINKS + TESTING_2("test setup"); + + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", EXTERNAL_LINK_TEST_FILE_NAME); + + if ((file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + goto error; + } + + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create GCPL for link creation order tracking\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) { + H5_FAILED(); + HDprintf(" couldn't set link creation order tracking\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, LINK_ITER_EXT_LINKS_TEST_SUBGROUP_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", LINK_ITER_EXT_LINKS_TEST_SUBGROUP_NAME); + goto error; + } + + for (i = 0; i < LINK_ITER_EXT_LINKS_TEST_NUM_LINKS; i++) { + char link_name[LINK_ITER_EXT_LINKS_TEST_BUF_SIZE]; + + /* Create the links with a reverse-ordering naming scheme to test creation order later */ + HDsnprintf(link_name, LINK_ITER_EXT_LINKS_TEST_BUF_SIZE, LINK_ITER_EXT_LINKS_TEST_LINK_NAME "%d", + (int)(LINK_ITER_EXT_LINKS_TEST_NUM_LINKS - i - 1)); + + if (H5Lcreate_external(ext_link_filename, "/", group_id, link_name, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", link_name); + goto error; + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(group_id, link_name, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", link_name); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", link_name); + goto error; + } + } + + PASSED(); + + BEGIN_MULTIPART + { + /* + * NOTE: A counter is passed to the iteration callback to try to match up the + * expected links with a given step throughout all of the following + * iterations. This is to try and check that the links are indeed being + * returned in the correct order. + */ + + PART_BEGIN(H5Literate_link_name_increasing) + { + TESTING_2("H5Literate2 by link name in increasing order"); + + i = 0; + + /* Test basic link iteration capability using both index types and both index orders */ + if (H5Literate2(group_id, H5_INDEX_NAME, H5_ITER_INC, NULL, link_iter_external_links_cb, &i) < + 0) { + H5_FAILED(); + HDprintf(" H5Literate2 by index type name in increasing order failed\n"); + PART_ERROR(H5Literate_link_name_increasing); + } + + if (i != LINK_ITER_EXT_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_link_name_increasing); + } + + PASSED(); + } + PART_END(H5Literate_link_name_increasing); + + PART_BEGIN(H5Literate_link_name_decreasing) + { + TESTING_2("H5Literate2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = LINK_ITER_EXT_LINKS_TEST_NUM_LINKS; + + if (H5Literate2(group_id, H5_INDEX_NAME, H5_ITER_DEC, NULL, link_iter_external_links_cb, &i) < + 0) { + H5_FAILED(); + HDprintf(" H5Literate2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Literate_link_name_decreasing); + } + + if (i != 2 * LINK_ITER_EXT_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_link_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Literate_link_name_decreasing); +#endif + } + PART_END(H5Literate_link_name_decreasing); + + PART_BEGIN(H5Literate_link_creation_increasing) + { + TESTING_2("H5Literate2 by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * LINK_ITER_EXT_LINKS_TEST_NUM_LINKS; + + if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, link_iter_external_links_cb, + &i) < 0) { + H5_FAILED(); + HDprintf(" H5Literate2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Literate_link_creation_increasing); + } + + if (i != 3 * LINK_ITER_EXT_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_link_creation_increasing); + } + + PASSED(); + } + PART_END(H5Literate_link_creation_increasing); + + PART_BEGIN(H5Literate_link_creation_decreasing) + { + TESTING_2("H5Literate2 by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * LINK_ITER_EXT_LINKS_TEST_NUM_LINKS; + + if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, link_iter_external_links_cb, + &i) < 0) { + H5_FAILED(); + HDprintf(" H5Literate2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Literate_link_creation_decreasing); + } + + if (i != 4 * LINK_ITER_EXT_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_link_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Literate_link_creation_decreasing); + + PART_BEGIN(H5Literate_by_name_link_name_increasing) + { + TESTING_2("H5Literate_by_name2 by link name in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 0; + + if (H5Literate_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_EXT_LINKS_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_INC, NULL, link_iter_external_links_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 by index type name in increasing order failed\n"); + PART_ERROR(H5Literate_by_name_link_name_increasing); + } + + if (i != LINK_ITER_EXT_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_by_name_link_name_increasing); + } + + PASSED(); + } + PART_END(H5Literate_by_name_link_name_increasing); + + PART_BEGIN(H5Literate_by_name_link_name_decreasing) + { + TESTING_2("H5Literate_by_name2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = LINK_ITER_EXT_LINKS_TEST_NUM_LINKS; + + if (H5Literate_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_EXT_LINKS_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_DEC, NULL, link_iter_external_links_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Literate_by_name_link_name_decreasing); + } + + if (i != 2 * LINK_ITER_EXT_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_by_name_link_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Literate_by_name_link_name_decreasing); +#endif + } + PART_END(H5Literate_by_name_link_name_decreasing); + + PART_BEGIN(H5Literate_by_name_creation_increasing) + { + TESTING_2("H5Literate_by_name2 by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * LINK_ITER_EXT_LINKS_TEST_NUM_LINKS; + + if (H5Literate_by_name2(file_id, + "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_EXT_LINKS_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, link_iter_external_links_cb, &i, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Literate_by_name_creation_increasing); + } + + if (i != 3 * LINK_ITER_EXT_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_by_name_creation_increasing); + } + + PASSED(); + } + PART_END(H5Literate_by_name_creation_increasing); + + PART_BEGIN(H5Literate_by_name_creation_decreasing) + { + TESTING_2("H5Literate_by_name2 by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * LINK_ITER_EXT_LINKS_TEST_NUM_LINKS; + + if (H5Literate_by_name2(file_id, + "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_EXT_LINKS_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, link_iter_external_links_cb, &i, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Literate_by_name_creation_decreasing); + } + + if (i != 4 * LINK_ITER_EXT_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_by_name_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Literate_by_name_creation_decreasing); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(gcpl_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +#else + SKIPPED(); + return 0; +#endif +} + +/* + * A test to check the functionality of link + * iteration using H5Literate(_by_name)2 with + * only user-defined links. Iteration is done + * in increasing and decreasing order of both + * link name and link creation order. + * + * TODO refactor test so that creation order tests + * actually test the order that objects were created in. + */ +static int +test_link_iterate_ud_links(void) +{ + TESTING("link iteration (only user-defined links)"); + + SKIPPED(); + + return 1; +} + +/* + * A test to check the functionality of link + * iteration using H5Literate(_by_name)2 with + * mixed link types. Iteration is done in + * increasing and decreasing order of both link + * name and link creation order. + * + * TODO refactor test so that creation order tests + * actually test the order that objects were created in. + * + * TODO add UD links + * + * TODO refactor link saving portion into its own test + */ +static int +test_link_iterate_mixed_links(void) +{ +#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS) + hsize_t saved_idx; + size_t i; + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t dset_dspace = H5I_INVALID_HID; + int halted; + char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH]; +#endif + + TESTING_MULTIPART("link iteration (mixed link types)"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, link, soft or external link, iterate, or creation " + "order aren't supported with this connector\n"); + return 0; + } + +#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS) + TESTING_2("test setup"); + + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", EXTERNAL_LINK_TEST_FILE_NAME); + + if ((file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + goto error; + } + + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create GCPL for link creation order tracking\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) { + H5_FAILED(); + HDprintf(" couldn't set link creation order tracking\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, LINK_ITER_MIXED_LINKS_TEST_SUBGROUP_NAME, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", LINK_ITER_MIXED_LINKS_TEST_SUBGROUP_NAME); + goto error; + } + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + if ((dset_dspace = + generate_random_dataspace(LINK_ITER_MIXED_LINKS_TEST_DSET_SPACE_RANK, NULL, NULL, FALSE)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, LINK_ITER_MIXED_LINKS_TEST_HARD_LINK_NAME, dset_dtype, dset_dspace, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", LINK_ITER_MIXED_LINKS_TEST_HARD_LINK_NAME); + goto error; + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_ITER_MIXED_LINKS_TEST_SUBGROUP_NAME + "/" LINK_ITER_MIXED_LINKS_TEST_HARD_LINK_NAME, + group_id, LINK_ITER_MIXED_LINKS_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", LINK_ITER_MIXED_LINKS_TEST_SOFT_LINK_NAME); + goto error; + } + + if (H5Lcreate_external(ext_link_filename, "/", group_id, LINK_ITER_MIXED_LINKS_TEST_EXT_LINK_NAME, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", LINK_ITER_MIXED_LINKS_TEST_EXT_LINK_NAME); + goto error; + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(group_id, LINK_ITER_MIXED_LINKS_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", LINK_ITER_MIXED_LINKS_TEST_HARD_LINK_NAME); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" first link did not exist\n"); + goto error; + } + + if ((link_exists = H5Lexists(group_id, LINK_ITER_MIXED_LINKS_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", LINK_ITER_MIXED_LINKS_TEST_SOFT_LINK_NAME); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" second link did not exist\n"); + goto error; + } + + if ((link_exists = H5Lexists(group_id, LINK_ITER_MIXED_LINKS_TEST_EXT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", LINK_ITER_MIXED_LINKS_TEST_EXT_LINK_NAME); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" third link did not exist\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + /* + * NOTE: A counter is passed to the iteration callback to try to match up the + * expected links with a given step throughout all of the following + * iterations. This is to try and check that the links are indeed being + * returned in the correct order. + */ + + PART_BEGIN(H5Literate_link_name_increasing) + { + TESTING_2("H5Literate2 by link name in increasing order"); + + i = 0; + + /* Test basic link iteration capability using both index types and both index orders */ + if (H5Literate2(group_id, H5_INDEX_NAME, H5_ITER_INC, NULL, link_iter_mixed_links_cb, &i) < 0) { + H5_FAILED(); + HDprintf(" H5Literate2 by index type name in increasing order failed\n"); + PART_ERROR(H5Literate_link_name_increasing); + } + + if (i != LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_link_name_increasing); + } + + PASSED(); + } + PART_END(H5Literate_link_name_increasing); + + PART_BEGIN(H5Literate_link_name_decreasing) + { + TESTING_2("H5Literate2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS; + + if (H5Literate2(group_id, H5_INDEX_NAME, H5_ITER_DEC, NULL, link_iter_mixed_links_cb, &i) < 0) { + H5_FAILED(); + HDprintf(" H5Literate2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Literate_link_name_decreasing); + } + + if (i != 2 * LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_link_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Literate_link_name_decreasing); +#endif + } + PART_END(H5Literate_link_name_decreasing); + + PART_BEGIN(H5Literate_link_creation_increasing) + { + TESTING_2("H5Literate2 by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS; + + if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, link_iter_mixed_links_cb, &i) < + 0) { + H5_FAILED(); + HDprintf(" H5Literate2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Literate_link_creation_increasing); + } + + if (i != 3 * LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_link_creation_increasing); + } + + PASSED(); + } + PART_END(H5Literate_link_creation_increasing); + + PART_BEGIN(H5Literate_link_creation_decreasing) + { + TESTING_2("H5Literate2 by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS; + + if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, link_iter_mixed_links_cb, &i) < + 0) { + H5_FAILED(); + HDprintf(" H5Literate2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Literate_link_creation_decreasing); + } + + if (i != 4 * LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_link_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Literate_link_creation_decreasing); + + PART_BEGIN(H5Literate_by_name_link_name_increasing) + { + TESTING_2("H5Literate_by_name2 by link name in increasing order"); + + i = 0; + + if (H5Literate_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_MIXED_LINKS_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_INC, NULL, link_iter_mixed_links_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 by index type name in increasing order failed\n"); + PART_ERROR(H5Literate_by_name_link_name_increasing); + } + + if (i != LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_by_name_link_name_increasing); + } + + PASSED(); + } + PART_END(H5Literate_by_name_link_name_increasing); + + PART_BEGIN(H5Literate_by_name_link_name_decreasing) + { + TESTING_2("H5Literate_by_name2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS; + + if (H5Literate_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_MIXED_LINKS_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_DEC, NULL, link_iter_mixed_links_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Literate_by_name_link_name_decreasing); + } + + if (i != 2 * LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_by_name_link_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Literate_by_name_link_name_decreasing); +#endif + } + PART_END(H5Literate_by_name_link_name_decreasing); + + PART_BEGIN(H5Literate_by_name_creation_increasing) + { + TESTING_2("H5Literate_by_name2 by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS; + + if (H5Literate_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_MIXED_LINKS_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, link_iter_mixed_links_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Literate_by_name_creation_increasing); + } + + if (i != 3 * LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_by_name_creation_increasing); + } + + PASSED(); + } + PART_END(H5Literate_by_name_creation_increasing); + + PART_BEGIN(H5Literate_by_name_creation_decreasing) + { + TESTING_2("H5Literate_by_name2 by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS; + + if (H5Literate_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_MIXED_LINKS_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, link_iter_mixed_links_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Literate_by_name_creation_decreasing); + } + + if (i != 4 * LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not iterated over!\n"); + PART_ERROR(H5Literate_by_name_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Literate_by_name_creation_decreasing); + + PART_BEGIN(H5Literate_index_saving_increasing) + { + TESTING_2("H5Literate2 index-saving capabilities in increasing order"); + + /* Test the H5Literate2 index-saving capabilities */ + saved_idx = 0; + halted = 0; + + if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, &saved_idx, link_iter_idx_saving_cb, + &halted) < 0) { + H5_FAILED(); + HDprintf(" H5Literate2 index-saving capability test failed\n"); + PART_ERROR(H5Literate_index_saving_increasing); + } + + if (saved_idx != 2) { + H5_FAILED(); + HDprintf(" saved index after iteration was wrong\n"); + PART_ERROR(H5Literate_index_saving_increasing); + } + + if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, &saved_idx, link_iter_idx_saving_cb, + &halted) < 0) { + H5_FAILED(); + HDprintf(" couldn't finish iterating when beginning from saved index\n"); + PART_ERROR(H5Literate_index_saving_increasing); + } + + PASSED(); + } + PART_END(H5Literate_index_saving_increasing); + + PART_BEGIN(H5Literate_index_saving_decreasing) + { + TESTING_2("H5Literate2 index-saving capabilities in decreasing order"); + + saved_idx = LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS - 1; + halted = 0; + + if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, &saved_idx, link_iter_idx_saving_cb, + &halted) < 0) { + H5_FAILED(); + HDprintf(" H5Literate2 index-saving capability test failed\n"); + PART_ERROR(H5Literate_index_saving_decreasing); + } + + if (saved_idx != 2) { + H5_FAILED(); + HDprintf(" saved index after iteration was wrong\n"); + PART_ERROR(H5Literate_index_saving_decreasing); + } + + if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, &saved_idx, link_iter_idx_saving_cb, + &halted) < 0) { + H5_FAILED(); + HDprintf(" couldn't finish iterating when beginning from saved index\n"); + PART_ERROR(H5Literate_index_saving_decreasing); + } + + PASSED(); + } + PART_END(H5Literate_index_saving_decreasing); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(dset_dspace) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(dset_dspace); + H5Tclose(dset_dtype); + H5Dclose(dset_id); + H5Pclose(gcpl_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +#else + SKIPPED(); + return 0; +#endif +} + +/* + * A test to check that H5Literate(_by_name)2 fails + * when given invalid parameters. + */ +static int +test_link_iterate_invalid_params(void) +{ + herr_t err_ret = -1; + size_t i; + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t dset_dspace = H5I_INVALID_HID; + char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH]; + + TESTING_MULTIPART("link iteration with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, link, soft or external link, iterate, or " + "creation order aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", EXTERNAL_LINK_TEST_FILE_NAME); + + if ((file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + goto error; + } + + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create GCPL for link creation order tracking\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) { + H5_FAILED(); + HDprintf(" couldn't set link creation order tracking\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, LINK_ITER_INVALID_PARAMS_TEST_SUBGROUP_NAME, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + LINK_ITER_INVALID_PARAMS_TEST_SUBGROUP_NAME); + goto error; + } + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + if ((dset_dspace = + generate_random_dataspace(LINK_ITER_INVALID_PARAMS_TEST_DSET_SPACE_RANK, NULL, NULL, FALSE)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, LINK_ITER_INVALID_PARAMS_TEST_HARD_LINK_NAME, dset_dtype, dset_dspace, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", LINK_ITER_INVALID_PARAMS_TEST_HARD_LINK_NAME); + goto error; + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_ITER_INVALID_PARAMS_TEST_SUBGROUP_NAME + "/" LINK_ITER_INVALID_PARAMS_TEST_HARD_LINK_NAME, + group_id, LINK_ITER_INVALID_PARAMS_TEST_SOFT_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", LINK_ITER_INVALID_PARAMS_TEST_SOFT_LINK_NAME); + goto error; + } +#ifndef NO_EXTERNAL_LINKS + if (H5Lcreate_external(ext_link_filename, "/", group_id, LINK_ITER_INVALID_PARAMS_TEST_EXT_LINK_NAME, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", LINK_ITER_INVALID_PARAMS_TEST_EXT_LINK_NAME); + goto error; + } +#endif + /* Verify the links have been created */ + if ((link_exists = H5Lexists(group_id, LINK_ITER_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", + LINK_ITER_INVALID_PARAMS_TEST_HARD_LINK_NAME); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" first link did not exist\n"); + goto error; + } + + if ((link_exists = H5Lexists(group_id, LINK_ITER_INVALID_PARAMS_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", + LINK_ITER_INVALID_PARAMS_TEST_SOFT_LINK_NAME); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" second link did not exist\n"); + goto error; + } +#ifndef NO_EXTERNAL_LINKS + if ((link_exists = H5Lexists(group_id, LINK_ITER_INVALID_PARAMS_TEST_EXT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", LINK_ITER_INVALID_PARAMS_TEST_EXT_LINK_NAME); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" third link did not exist\n"); + goto error; + } +#endif + + PASSED(); + + BEGIN_MULTIPART + { + i = 0; + + PART_BEGIN(H5Literate_invalid_grp_id) + { + TESTING_2("H5Literate2 with an invalid group ID"); + + H5E_BEGIN_TRY + { + err_ret = H5Literate2(H5I_INVALID_HID, H5_INDEX_NAME, H5_ITER_INC, NULL, + link_iter_invalid_params_cb, NULL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Literate2 succeeded with an invalid group ID!\n"); + PART_ERROR(H5Literate_invalid_grp_id); + } + + PASSED(); + } + PART_END(H5Literate_invalid_grp_id); + + PART_BEGIN(H5Literate_invalid_index_type) + { + TESTING_2("H5Literate2 with an invalid index type"); + + H5E_BEGIN_TRY + { + err_ret = H5Literate2(group_id, H5_INDEX_UNKNOWN, H5_ITER_INC, NULL, + link_iter_invalid_params_cb, NULL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Literate2 succeeded with invalid index type H5_INDEX_UNKNOWN!\n"); + PART_ERROR(H5Literate_invalid_index_type); + } + + H5E_BEGIN_TRY + { + err_ret = + H5Literate2(group_id, H5_INDEX_N, H5_ITER_INC, NULL, link_iter_invalid_params_cb, NULL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Literate2 succeeded with invalid index type H5_INDEX_N!\n"); + PART_ERROR(H5Literate_invalid_index_type); + } + + PASSED(); + } + PART_END(H5Literate_invalid_index_type); + + PART_BEGIN(H5Literate_invalid_iter_order) + { + TESTING_2("H5Literate2 with an invalid iteration ordering"); + + H5E_BEGIN_TRY + { + err_ret = H5Literate2(group_id, H5_INDEX_NAME, H5_ITER_UNKNOWN, NULL, + link_iter_invalid_params_cb, NULL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Literate2 succeeded with invalid iteration ordering H5_ITER_UNKNOWN!\n"); + PART_ERROR(H5Literate_invalid_iter_order); + } + + H5E_BEGIN_TRY + { + err_ret = + H5Literate2(group_id, H5_INDEX_NAME, H5_ITER_N, NULL, link_iter_invalid_params_cb, NULL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Literate2 succeeded with invalid iteration ordering H5_ITER_N!\n"); + PART_ERROR(H5Literate_invalid_iter_order); + } + + PASSED(); + } + PART_END(H5Literate_invalid_iter_order); + + PART_BEGIN(H5Literate_by_name_invalid_loc_id) + { + TESTING_2("H5Literate_by_name2 with an invalid location ID"); + + H5E_BEGIN_TRY + { + err_ret = H5Literate_by_name2( + H5I_INVALID_HID, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_INVALID_PARAMS_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_INC, NULL, link_iter_invalid_params_cb, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 succeeded with an invalid location ID!\n"); + PART_ERROR(H5Literate_by_name_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Literate_by_name_invalid_loc_id); + + PART_BEGIN(H5Literate_by_name_invalid_grp_name) + { + TESTING_2("H5Literate_by_name2 with an invalid group name"); + + H5E_BEGIN_TRY + { + err_ret = H5Literate_by_name2(file_id, NULL, H5_INDEX_NAME, H5_ITER_INC, NULL, + link_iter_invalid_params_cb, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 succeeded with a NULL group name!\n"); + PART_ERROR(H5Literate_by_name_invalid_grp_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Literate_by_name2(file_id, "", H5_INDEX_NAME, H5_ITER_INC, NULL, + link_iter_invalid_params_cb, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 succeeded with an invalid group name of ''!\n"); + PART_ERROR(H5Literate_by_name_invalid_grp_name); + } + + PASSED(); + } + PART_END(H5Literate_by_name_invalid_grp_name); + + PART_BEGIN(H5Literate_by_name_invalid_index_type) + { + TESTING_2("H5Literate_by_name2 with an invalid index type"); + + H5E_BEGIN_TRY + { + err_ret = H5Literate_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_INVALID_PARAMS_TEST_SUBGROUP_NAME, + H5_INDEX_UNKNOWN, H5_ITER_INC, NULL, link_iter_invalid_params_cb, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 succeeded with invalid index type H5_INDEX_UNKNOWN!\n"); + PART_ERROR(H5Literate_by_name_invalid_index_type); + } + + H5E_BEGIN_TRY + { + err_ret = H5Literate_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_INVALID_PARAMS_TEST_SUBGROUP_NAME, + H5_INDEX_N, H5_ITER_INC, NULL, link_iter_invalid_params_cb, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 succeeded with invalid index type H5_INDEX_N!\n"); + PART_ERROR(H5Literate_by_name_invalid_index_type); + } + + PASSED(); + } + PART_END(H5Literate_by_name_invalid_index_type); + + PART_BEGIN(H5Literate_by_name_invalid_iter_order) + { + TESTING_2("H5Literate_by_name2 with an invalid iteration ordering"); + + H5E_BEGIN_TRY + { + err_ret = H5Literate_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_INVALID_PARAMS_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_UNKNOWN, NULL, link_iter_invalid_params_cb, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf( + " H5Literate_by_name2 succeeded with invalid iteration ordering H5_ITER_UNKNOWN!\n"); + PART_ERROR(H5Literate_by_name_invalid_iter_order); + } + + H5E_BEGIN_TRY + { + err_ret = H5Literate_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_INVALID_PARAMS_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_N, NULL, link_iter_invalid_params_cb, &i, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 succeeded with invalid iteration ordering H5_ITER_N!\n"); + PART_ERROR(H5Literate_by_name_invalid_iter_order); + } + + PASSED(); + } + PART_END(H5Literate_by_name_invalid_iter_order); + + PART_BEGIN(H5Literate_by_name_invalid_lapl) + { + TESTING_2("H5Literate_by_name2 with an invalid LAPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Literate_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_INVALID_PARAMS_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_INC, NULL, link_iter_invalid_params_cb, NULL, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 succeeded with an invalid LAPL!\n"); + PART_ERROR(H5Literate_by_name_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Literate_by_name_invalid_lapl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(dset_dspace) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(dset_dspace); + H5Tclose(dset_dtype); + H5Dclose(dset_id); + H5Pclose(gcpl_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that link iteration performed on a + * group with no links in it is not problematic. + */ +static int +test_link_iterate_0_links(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + + TESTING_MULTIPART("link iteration on group with 0 links"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, link iterate, or creation order aren't supported " + "with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create GCPL for link creation order tracking\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) { + H5_FAILED(); + HDprintf(" couldn't set link creation order tracking\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, LINK_ITER_0_LINKS_TEST_SUBGROUP_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", LINK_ITER_0_LINKS_TEST_SUBGROUP_NAME); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Literate_0_links_name_increasing) + { + TESTING_2("H5Literate2 by link name in increasing order"); + + /* Test basic link iteration capability using both index types and both index orders */ + if (H5Literate2(group_id, H5_INDEX_NAME, H5_ITER_INC, NULL, link_iter_0_links_cb, NULL) < 0) { + H5_FAILED(); + HDprintf(" H5Literate2 by index type name in increasing order failed\n"); + PART_ERROR(H5Literate_0_links_name_increasing); + } + + PASSED(); + } + PART_END(H5Literate_0_links_name_increasing); + + PART_BEGIN(H5Literate_0_links_name_decreasing) + { + TESTING_2("H5Literate2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + if (H5Literate2(group_id, H5_INDEX_NAME, H5_ITER_DEC, NULL, link_iter_0_links_cb, NULL) < 0) { + H5_FAILED(); + HDprintf(" H5Literate2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Literate_0_links_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Literate_0_links_name_decreasing); +#endif + } + PART_END(H5Literate_0_links_name_decreasing); + + PART_BEGIN(H5Literate_0_links_creation_increasing) + { + TESTING_2("H5Literate2 by creation order in increasing order"); + + if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, link_iter_0_links_cb, NULL) < + 0) { + H5_FAILED(); + HDprintf(" H5Literate2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Literate_0_links_creation_increasing); + } + + PASSED(); + } + PART_END(H5Literate_0_links_creation_increasing); + + PART_BEGIN(H5Literate_0_links_creation_decreasing) + { + TESTING_2("H5Literate2 by creation order in decreasing order"); + + if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, link_iter_0_links_cb, NULL) < + 0) { + H5_FAILED(); + HDprintf(" H5Literate2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Literate_0_links_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Literate_0_links_creation_decreasing); + + PART_BEGIN(H5Literate_by_name_0_links_name_increasing) + { + TESTING_2("H5Literate_by_name2 by link name in increasing order"); + + if (H5Literate_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_0_LINKS_TEST_SUBGROUP_NAME, H5_INDEX_NAME, + H5_ITER_INC, NULL, link_iter_0_links_cb, NULL, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 by index type name in increasing order failed\n"); + PART_ERROR(H5Literate_by_name_0_links_name_increasing); + } + + PASSED(); + } + PART_END(H5Literate_by_name_0_links_name_increasing); + + PART_BEGIN(H5Literate_by_name_0_links_name_decreasing) + { + TESTING_2("H5Literate_by_name2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + if (H5Literate_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_0_LINKS_TEST_SUBGROUP_NAME, H5_INDEX_NAME, + H5_ITER_DEC, NULL, link_iter_0_links_cb, NULL, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Literate_by_name_0_links_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Literate_by_name_0_links_name_decreasing); +#endif + } + PART_END(H5Literate_by_name_0_links_name_decreasing); + + PART_BEGIN(H5Literate_by_name_0_links_creation_increasing) + { + TESTING_2("H5Literate_by_name2 by creation order in increasing order"); + + if (H5Literate_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_0_LINKS_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, link_iter_0_links_cb, NULL, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Literate_by_name_0_links_creation_increasing); + } + + PASSED(); + } + PART_END(H5Literate_by_name_0_links_creation_increasing); + + PART_BEGIN(H5Literate_by_name_0_links_creation_decreasing) + { + TESTING_2("H5Literate_by_name2 by creation order in decreasing order"); + + if (H5Literate_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_0_LINKS_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, link_iter_0_links_cb, NULL, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Literate_by_name2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Literate_by_name_0_links_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Literate_by_name_0_links_creation_decreasing); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(gcpl_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check the functionality of recursive + * link iteration using H5Lvisit(_by_name)2 with + * only hard links and where there are no cyclic + * links. Iteration is done in increasing and + * decreasing order of both link name and link + * creation order. + */ +static int +test_link_visit_hard_links_no_cycles(void) +{ + size_t i; + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t subgroup_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t dset_dspace = H5I_INVALID_HID; + + TESTING_MULTIPART("link visiting without cycles (only hard links)"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, link iterate, or creation order aren't supported " + "with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create a GCPL\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) { + H5_FAILED(); + HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME, + H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME); + goto error; + } + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + if ((dset_dspace = generate_random_dataspace(LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_DSET_SPACE_RANK, NULL, + NULL, FALSE)) < 0) + TEST_ERROR; + + for (i = 0; i < LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS; i++) { + size_t j; + char grp_name[LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_BUF_SIZE]; + + /* Create the groups with a reverse-ordering naming scheme to test creation order later */ + HDsnprintf(grp_name, LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME "%d", + (int)(LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS - i - 1)); + + if ((subgroup_id = H5Gcreate2(group_id, grp_name, H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", grp_name); + goto error; + } + + for (j = 0; j < LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP; j++) { + char dset_name[LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_BUF_SIZE]; + + /* Create the datasets with a reverse-ordering naming scheme to test creation order later */ + HDsnprintf(dset_name, LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_LINK_NAME "%d", + (int)(LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP - j - 1)); + + if ((dset_id = H5Dcreate2(subgroup_id, dset_name, dset_dtype, dset_dspace, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", dset_name); + goto error; + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(subgroup_id, dset_name, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", dset_name); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", dset_name); + goto error; + } + + if (H5Dclose(dset_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close dataset '%s'\n", dset_name); + goto error; + } + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close subgroup '%s'\n", grp_name); + goto error; + } + } + + PASSED(); + + BEGIN_MULTIPART + { + /* + * NOTE: A counter is passed to the iteration callback to try to match up the + * expected links with a given step throughout all of the following + * iterations. This is to try and check that the links are indeed being + * returned in the correct order. + */ + + PART_BEGIN(H5Lvisit_no_cycles_link_name_increasing) + { + TESTING_2("H5Lvisit2 by link name in increasing order"); + + i = 0; + + if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_INC, link_visit_hard_links_no_cycles_cb, &i) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type name in increasing order failed\n"); + PART_ERROR(H5Lvisit_no_cycles_link_name_increasing); + } + + if (i != LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_no_cycles_link_name_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_no_cycles_link_name_increasing); + + PART_BEGIN(H5Lvisit_no_cycles_link_name_decreasing) + { + TESTING_2("H5Lvisit2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_DEC, link_visit_hard_links_no_cycles_cb, &i) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Lvisit_no_cycles_link_name_decreasing); + } + + if (i != 2 * LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_no_cycles_link_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lvisit_no_cycles_link_name_decreasing); +#endif + } + PART_END(H5Lvisit_no_cycles_link_name_decreasing); + + PART_BEGIN(H5Lvisit_no_cycles_link_creation_increasing) + { + TESTING_2("H5Lvisit2 by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_hard_links_no_cycles_cb, &i) < + 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Lvisit_no_cycles_link_creation_increasing); + } + + if (i != 3 * LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_no_cycles_link_creation_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_no_cycles_link_creation_increasing); + + PART_BEGIN(H5Lvisit_no_cycles_link_creation_decreasing) + { + TESTING_2("H5Lvisit2 by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_hard_links_no_cycles_cb, &i) < + 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Lvisit_no_cycles_link_creation_decreasing); + } + + if (i != 4 * LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_no_cycles_link_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Lvisit_no_cycles_link_creation_decreasing); + + PART_BEGIN(H5Lvisit_by_name_no_cycles_link_name_increasing) + { + TESTING_2("H5Lvisit_by_name2 by link name in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 0; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_INC, link_visit_hard_links_no_cycles_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type name in increasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_increasing); + } + + if (i != LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_no_cycles_link_name_increasing); + + PART_BEGIN(H5Lvisit_by_name_no_cycles_link_name_decreasing) + { + TESTING_2("H5Lvisit_by_name2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_DEC, link_visit_hard_links_no_cycles_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_decreasing); + } + + if (i != 2 * LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lvisit_by_name_no_cycles_link_name_decreasing); +#endif + } + PART_END(H5Lvisit_by_name_no_cycles_link_name_decreasing); + + PART_BEGIN(H5Lvisit_by_name_no_cycles_link_creation_increasing) + { + TESTING_2("H5Lvisit_by_name2 by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_hard_links_no_cycles_cb, &i, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_increasing); + } + + if (i != 3 * LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_no_cycles_link_creation_increasing); + + PART_BEGIN(H5Lvisit_by_name_no_cycles_link_creation_decreasing) + { + TESTING_2("H5Lvisit_by_name2 by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_hard_links_no_cycles_cb, &i, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_decreasing); + } + + if (i != 4 * LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_no_cycles_link_creation_decreasing); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(dset_dspace) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(dset_dspace); + H5Tclose(dset_dtype); + H5Dclose(dset_id); + H5Pclose(gcpl_id); + H5Gclose(subgroup_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check the functionality of recursive + * link iteration using H5Lvisit(_by_name)2 with + * only soft links and where there are no cyclic + * links. Iteration is done in increasing and + * decreasing order of both link name and link + * creation order. + */ +static int +test_link_visit_soft_links_no_cycles(void) +{ + size_t i; + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t subgroup_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + + TESTING_MULTIPART("link visiting without cycles (only soft links)"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, link, soft link, iterate, or creation order " + "aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create a GCPL\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) { + H5_FAILED(); + HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME, + H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME); + goto error; + } + + for (i = 0; i < LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS; i++) { + size_t j; + char grp_name[LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_BUF_SIZE]; + + /* Create the groups with a reverse-ordering naming scheme to test creation order later */ + HDsnprintf(grp_name, LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME "%d", + (int)(LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS - i - 1)); + + if ((subgroup_id = H5Gcreate2(group_id, grp_name, H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", grp_name); + goto error; + } + + for (j = 0; j < LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP; j++) { + char link_name[LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_BUF_SIZE]; + char link_target[LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_BUF_SIZE]; + + /* Create the links with a reverse-ordering naming scheme to test creation order later */ + HDsnprintf(link_name, LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_LINK_NAME "%d", + (int)(LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP - j - 1)); + + HDsnprintf(link_target, LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_BUF_SIZE, "target%d", + (int)(LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP - j - 1)); + + if (H5Lcreate_soft(link_target, subgroup_id, link_name, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", link_name); + goto error; + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(subgroup_id, link_name, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", link_name); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", link_name); + goto error; + } + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close subgroup '%s'\n", grp_name); + goto error; + } + } + + PASSED(); + + BEGIN_MULTIPART + { + /* + * NOTE: A counter is passed to the iteration callback to try to match up the + * expected links with a given step throughout all of the following + * iterations. This is to try and check that the links are indeed being + * returned in the correct order. + */ + + PART_BEGIN(H5Lvisit_no_cycles_link_name_increasing) + { + TESTING_2("H5Lvisit2 by link name in increasing order"); + + i = 0; + + if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_INC, link_visit_soft_links_no_cycles_cb, &i) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type name in increasing order failed\n"); + PART_ERROR(H5Lvisit_no_cycles_link_name_increasing); + } + + if (i != LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_no_cycles_link_name_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_no_cycles_link_name_increasing); + + PART_BEGIN(H5Lvisit_no_cycles_link_name_decreasing) + { + TESTING_2("H5Lvisit2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_DEC, link_visit_soft_links_no_cycles_cb, &i) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Lvisit_no_cycles_link_name_decreasing); + } + + if (i != 2 * LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_no_cycles_link_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lvisit_no_cycles_link_name_decreasing); +#endif + } + PART_END(H5Lvisit_no_cycles_link_name_decreasing); + + PART_BEGIN(H5Lvisit_no_cycles_link_creation_increasing) + { + TESTING_2("H5Lvisit2 by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_soft_links_no_cycles_cb, &i) < + 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Lvisit_no_cycles_link_creation_increasing); + } + + if (i != 3 * LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_no_cycles_link_creation_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_no_cycles_link_creation_increasing); + + PART_BEGIN(H5Lvisit_no_cycles_link_creation_decreasing) + { + TESTING_2("H5Lvisit2 by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_soft_links_no_cycles_cb, &i) < + 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Lvisit_no_cycles_link_creation_decreasing); + } + + if (i != 4 * LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_no_cycles_link_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Lvisit_no_cycles_link_creation_decreasing); + + PART_BEGIN(H5Lvisit_by_name_no_cycles_link_name_increasing) + { + TESTING_2("H5Lvisit_by_name2 by link name in increasing order"); + + i = 0; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_INC, link_visit_soft_links_no_cycles_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type name in increasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_increasing); + } + + if (i != LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_no_cycles_link_name_increasing); + + PART_BEGIN(H5Lvisit_by_name_no_cycles_link_name_decreasing) + { + TESTING_2("H5Lvisit_by_name2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_DEC, link_visit_soft_links_no_cycles_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_decreasing); + } + + if (i != 2 * LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lvisit_by_name_no_cycles_link_name_decreasing); +#endif + } + PART_END(H5Lvisit_by_name_no_cycles_link_name_decreasing); + + PART_BEGIN(H5Lvisit_by_name_no_cycles_link_creation_increasing) + { + TESTING_2("H5Lvisit_by_name2 by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_soft_links_no_cycles_cb, &i, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_increasing); + } + + if (i != 3 * LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_no_cycles_link_creation_increasing); + + PART_BEGIN(H5Lvisit_by_name_no_cycles_link_creation_decreasing) + { + TESTING_2("H5Lvisit_by_name2 by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_soft_links_no_cycles_cb, &i, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_decreasing); + } + + if (i != 4 * LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_no_cycles_link_creation_decreasing); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(gcpl_id); + H5Gclose(subgroup_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check the functionality of recursive + * link iteration using H5Lvisit(_by_name)2 with + * only external links and where there are no cyclic + * links. Iteration is done in increasing and + * decreasing order of both link name and link + * creation order. + */ +static int +test_link_visit_external_links_no_cycles(void) +{ +#ifndef NO_EXTERNAL_LINKS + size_t i; + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t subgroup_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH]; +#endif + + TESTING_MULTIPART("link visiting without cycles (only external links)"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, link, external link, iterate, or creation order " + "aren't supported with this connector\n"); + return 0; + } + +#ifndef NO_EXTERNAL_LINKS + TESTING_2("test setup"); + + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", EXTERNAL_LINK_TEST_FILE_NAME); + + if ((file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + goto error; + } + + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create a GCPL\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) { + H5_FAILED(); + HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME); + goto error; + } + + for (i = 0; i < LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS; i++) { + size_t j; + char grp_name[LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_BUF_SIZE]; + + /* Create the groups with a reverse-ordering naming scheme to test creation order later */ + HDsnprintf(grp_name, LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME "%d", + (int)(LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS - i - 1)); + + if ((subgroup_id = H5Gcreate2(group_id, grp_name, H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", grp_name); + goto error; + } + + for (j = 0; j < LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP; j++) { + char link_name[LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_BUF_SIZE]; + + /* Create the links with a reverse-ordering naming scheme to test creation order later */ + HDsnprintf(link_name, LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_LINK_NAME "%d", + (int)(LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP - j - 1)); + + if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, link_name, H5P_DEFAULT, H5P_DEFAULT) < + 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", link_name); + goto error; + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(subgroup_id, link_name, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", link_name); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", link_name); + goto error; + } + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close subgroup '%s'\n", grp_name); + goto error; + } + } + + PASSED(); + + BEGIN_MULTIPART + { + /* + * NOTE: A counter is passed to the iteration callback to try to match up the + * expected links with a given step throughout all of the following + * iterations. This is to try and check that the links are indeed being + * returned in the correct order. + */ + + PART_BEGIN(H5Lvisit_no_cycles_link_name_increasing) + { + TESTING_2("H5Lvisit2 by link name in increasing order"); + + i = 0; + + if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_INC, link_visit_external_links_no_cycles_cb, &i) < + 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type name in increasing order failed\n"); + PART_ERROR(H5Lvisit_no_cycles_link_name_increasing); + } + + if (i != LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_no_cycles_link_name_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_no_cycles_link_name_increasing); + + PART_BEGIN(H5Lvisit_no_cycles_link_name_decreasing) + { + TESTING_2("H5Lvisit2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_DEC, link_visit_external_links_no_cycles_cb, &i) < + 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Lvisit_no_cycles_link_name_decreasing); + } + + if (i != 2 * LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_no_cycles_link_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lvisit_no_cycles_link_name_decreasing); +#endif + } + PART_END(H5Lvisit_no_cycles_link_name_decreasing); + + PART_BEGIN(H5Lvisit_no_cycles_link_creation_increasing) + { + TESTING_2("H5Lvisit2 by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_external_links_no_cycles_cb, + &i) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Lvisit_no_cycles_link_creation_increasing); + } + + if (i != 3 * LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_no_cycles_link_creation_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_no_cycles_link_creation_increasing); + + PART_BEGIN(H5Lvisit_no_cycles_link_creation_decreasing) + { + TESTING_2("H5Lvisit2 by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_external_links_no_cycles_cb, + &i) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Lvisit_no_cycles_link_creation_decreasing); + } + + if (i != 4 * LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_no_cycles_link_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Lvisit_no_cycles_link_creation_decreasing); + + PART_BEGIN(H5Lvisit_by_name_no_cycles_link_name_increasing) + { + TESTING_2("H5Lvisit_by_name2 by link name in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 0; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_INC, link_visit_external_links_no_cycles_cb, &i, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type name in increasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_increasing); + } + + if (i != LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_no_cycles_link_name_increasing); + + PART_BEGIN(H5Lvisit_by_name_no_cycles_link_name_decreasing) + { + TESTING_2("H5Lvisit_by_name2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_DEC, link_visit_external_links_no_cycles_cb, &i, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_decreasing); + } + + if (i != 2 * LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lvisit_by_name_no_cycles_link_name_decreasing); +#endif + } + PART_END(H5Lvisit_by_name_no_cycles_link_name_decreasing); + + PART_BEGIN(H5Lvisit_by_name_no_cycles_link_creation_increasing) + { + TESTING_2("H5Lvisit_by_name2 by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_external_links_no_cycles_cb, &i, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_increasing); + } + + if (i != 3 * LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_no_cycles_link_creation_increasing); + + PART_BEGIN(H5Lvisit_by_name_no_cycles_link_creation_decreasing) + { + TESTING_2("H5Lvisit_by_name2 by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_external_links_no_cycles_cb, &i, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_decreasing); + } + + if (i != 4 * LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_no_cycles_link_creation_decreasing); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(gcpl_id); + H5Gclose(subgroup_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +#else + SKIPPED(); + return 0; +#endif +} + +/* + * A test to check the functionality of recursive + * link iteration using H5Lvisit(_by_name)2 with + * only user-defined links and where there are no + * cyclic links. Iteration is done in increasing + * and decreasing order of both link name and link + * creation order. + * + * TODO refactor test so that creation order tests + * actually test the order that objects were created in. + */ +static int +test_link_visit_ud_links_no_cycles(void) +{ + TESTING("link visiting without cycles (only user-defined links)"); + + SKIPPED(); + + return 1; +} + +/* + * A test to check the functionality of recursive + * link iteration using H5Lvisit(_by_name)2 with + * mixed link types and where there are no cyclic + * links. Iteration is done in increasing and + * decreasing order of both link name and link + * creation order. + * + * TODO refactor test so that creation order tests + * actually test the order that objects were created in. + * + * TODO add UD links + * + * TODO refactor test to create a macroed number of subgroups + */ +static int +test_link_visit_mixed_links_no_cycles(void) +{ +#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS) + size_t i; + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t subgroup1 = H5I_INVALID_HID, subgroup2 = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH]; +#endif + + TESTING_MULTIPART("link visiting without cycles (mixed link types)"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, link, hard, soft, external link, iterate, or " + "creation order aren't supported with this connector\n"); + return 0; + } + +#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS) + TESTING_2("test setup"); + + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", EXTERNAL_LINK_TEST_FILE_NAME); + + if ((file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + goto error; + } + + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create a GCPL\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) { + H5_FAILED(); + HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME, + H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME); + goto error; + } + + if ((subgroup1 = H5Gcreate2(group_id, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create first subgroup '%s'\n", + LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2); + goto error; + } + + if ((subgroup2 = H5Gcreate2(group_id, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create second subgroup '%s'\n", + LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3); + goto error; + } + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + if ((fspace_id = generate_random_dataspace(LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_SPACE_RANK, NULL, + NULL, FALSE)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(subgroup1, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME, dset_dtype, + fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create first dataset '%s'\n", LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME); + } + + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(subgroup2, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME2, dset_dtype, + fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create second dataset '%s'\n", LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME); + } + + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + + if (H5Lcreate_hard(subgroup1, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME, subgroup1, + LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME1, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create first hard link '%s'\n", + LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME1); + goto error; + } + + if (H5Lcreate_soft(LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME, subgroup1, + LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME2); + goto error; + } + + if (H5Lcreate_external(ext_link_filename, "/", subgroup2, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME3, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME3); + goto error; + } + + if (H5Lcreate_hard(subgroup2, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME2, subgroup2, + LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME4, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create second hard link '%s'\n", + LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME4); + goto error; + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup1, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME1, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if first link '%s' exists\n", + LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME1); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link 1 did not exist\n"); + goto error; + } + + if ((link_exists = H5Lexists(subgroup1, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME2, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if second link '%s' exists\n", + LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME2); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link 2 did not exist\n"); + goto error; + } + + if ((link_exists = H5Lexists(subgroup2, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME3, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if third link '%s' exists\n", + LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME3); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link 3 did not exist\n"); + goto error; + } + + if ((link_exists = H5Lexists(subgroup2, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME4, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if fourth link '%s' exists\n", + LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME4); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link 4 did not exist\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + /* + * NOTE: A counter is passed to the iteration callback to try to match up the + * expected links with a given step throughout all of the following + * iterations. This is to try and check that the links are indeed being + * returned in the correct order. + */ + + PART_BEGIN(H5Lvisit_no_cycles_link_name_increasing) + { + TESTING_2("H5Lvisit2 by link name in increasing order"); + + i = 0; + + if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_INC, link_visit_mixed_links_no_cycles_cb, &i) < + 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type name in increasing order failed\n"); + PART_ERROR(H5Lvisit_no_cycles_link_name_increasing); + } + + if (i != LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_no_cycles_link_name_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_no_cycles_link_name_increasing); + + PART_BEGIN(H5Lvisit_no_cycles_link_name_decreasing) + { + TESTING_2("H5Lvisit2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS; + + if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_DEC, link_visit_mixed_links_no_cycles_cb, &i) < + 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Lvisit_no_cycles_link_name_decreasing); + } + + if (i != 2 * LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_no_cycles_link_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lvisit_no_cycles_link_name_decreasing); +#endif + } + PART_END(H5Lvisit_no_cycles_link_name_decreasing); + + PART_BEGIN(H5Lvisit_no_cycles_link_creation_increasing) + { + TESTING_2("H5Lvisit2 by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS; + + if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_mixed_links_no_cycles_cb, + &i) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Lvisit_no_cycles_link_creation_increasing); + } + + if (i != 3 * LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_no_cycles_link_creation_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_no_cycles_link_creation_increasing); + + PART_BEGIN(H5Lvisit_no_cycles_link_creation_decreasing) + { + TESTING_2("H5Lvisit2 by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS; + + if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_mixed_links_no_cycles_cb, + &i) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Lvisit_no_cycles_link_creation_decreasing); + } + + if (i != 4 * LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_no_cycles_link_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Lvisit_no_cycles_link_creation_decreasing); + + PART_BEGIN(H5Lvisit_by_name_no_cycles_link_name_increasing) + { + TESTING_2("H5Lvisit_by_name2 by link name in increasing order"); + + i = 0; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_INC, link_visit_mixed_links_no_cycles_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type name in increasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_increasing); + } + + if (i != LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_no_cycles_link_name_increasing); + + PART_BEGIN(H5Lvisit_by_name_no_cycles_link_name_decreasing) + { + TESTING_2("H5Lvisit_by_name2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_DEC, link_visit_mixed_links_no_cycles_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_decreasing); + } + + if (i != 2 * LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lvisit_by_name_no_cycles_link_name_decreasing); +#endif + } + PART_END(H5Lvisit_by_name_no_cycles_link_name_decreasing); + + PART_BEGIN(H5Lvisit_by_name_no_cycles_link_creation_increasing) + { + TESTING_2("H5Lvisit_by_name2 by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_mixed_links_no_cycles_cb, &i, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_increasing); + } + + if (i != 3 * LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_no_cycles_link_creation_increasing); + + PART_BEGIN(H5Lvisit_by_name_no_cycles_link_creation_decreasing) + { + TESTING_2("H5Lvisit_by_name2 by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_mixed_links_no_cycles_cb, &i, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_decreasing); + } + + if (i != 4 * LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_no_cycles_link_creation_decreasing); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(subgroup1) < 0) + TEST_ERROR; + if (H5Gclose(subgroup2) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + H5Tclose(dset_dtype); + H5Dclose(dset_id); + H5Pclose(gcpl_id); + H5Gclose(subgroup1); + H5Gclose(subgroup2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +#else + SKIPPED(); + return 0; +#endif +} + +/* + * A test to check the functionality of recursive + * link iteration using H5Lvisit(_by_name)2 with + * only hard links and where there are cyclic links. + * Iteration is done in increasing and decreasing + * order of both link name and link creation order. + */ +static int +test_link_visit_hard_links_cycles(void) +{ + size_t i; + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t subgroup_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + + TESTING_MULTIPART("link visiting with cycles (only hard links)"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, link, hard link, iterate, or creation order " + "aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create a GCPL\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) { + H5_FAILED(); + HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, LINK_VISIT_HARD_LINKS_CYCLE_TEST_SUBGROUP_NAME, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + LINK_VISIT_HARD_LINKS_CYCLE_TEST_SUBGROUP_NAME); + goto error; + } + + for (i = 0; i < LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_SUBGROUPS; i++) { + size_t j; + char grp_name[LINK_VISIT_HARD_LINKS_CYCLE_TEST_BUF_SIZE]; + + /* Create the groups with a reverse-ordering naming scheme to test creation order later */ + HDsnprintf(grp_name, LINK_VISIT_HARD_LINKS_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_HARD_LINKS_CYCLE_TEST_NESTED_GRP_NAME "%d", + (int)(LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_SUBGROUPS - i - 1)); + + if ((subgroup_id = H5Gcreate2(group_id, grp_name, H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", grp_name); + goto error; + } + + for (j = 0; j < LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP; j++) { + char link_name[LINK_VISIT_HARD_LINKS_CYCLE_TEST_BUF_SIZE]; + + /* Create the links with a reverse-ordering naming scheme to test creation order later */ + HDsnprintf(link_name, LINK_VISIT_HARD_LINKS_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_HARD_LINKS_CYCLE_TEST_LINK_NAME "%d", + (int)(LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP - j - 1)); + + if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, link_name, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create hard link '%s'\n", link_name); + goto error; + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(subgroup_id, link_name, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", link_name); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", link_name); + goto error; + } + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close subgroup '%s'\n", grp_name); + goto error; + } + } + + PASSED(); + + BEGIN_MULTIPART + { + /* + * NOTE: A counter is passed to the iteration callback to try to match up the + * expected links with a given step throughout all of the following + * iterations. This is to try and check that the links are indeed being + * returned in the correct order. + */ + + PART_BEGIN(H5Lvisit_cycles_link_name_increasing) + { + TESTING_2("H5Lvisit2 by link name in increasing order"); + + i = 0; + + if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_INC, link_visit_hard_links_cycles_cb, &i) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type name in increasing order failed\n"); + PART_ERROR(H5Lvisit_cycles_link_name_increasing); + } + + if (i != LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_cycles_link_name_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_cycles_link_name_increasing); + + PART_BEGIN(H5Lvisit_cycles_link_name_decreasing) + { + TESTING_2("H5Lvisit2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_DEC, link_visit_hard_links_cycles_cb, &i) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Lvisit_cycles_link_name_decreasing); + } + + if (i != 2 * LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_cycles_link_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lvisit_cycles_link_name_decreasing); +#endif + } + PART_END(H5Lvisit_cycles_link_name_decreasing); + + PART_BEGIN(H5Lvisit_cycles_link_creation_increasing) + { + TESTING_2("H5Lvisit2 by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_hard_links_cycles_cb, &i) < + 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Lvisit_cycles_link_creation_increasing); + } + + if (i != 3 * LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_cycles_link_creation_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_cycles_link_creation_increasing); + + PART_BEGIN(H5Lvisit_cycles_link_creation_decreasing) + { + TESTING_2("H5Lvisit2 by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_hard_links_cycles_cb, &i) < + 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Lvisit_cycles_link_creation_decreasing); + } + + if (i != 4 * LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_cycles_link_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Lvisit_cycles_link_creation_decreasing); + + PART_BEGIN(H5Lvisit_by_name_cycles_link_name_increasing) + { + TESTING_2("H5Lvisit_by_name2 by link name in increasing order"); + + i = 0; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_HARD_LINKS_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_INC, link_visit_hard_links_cycles_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type name in increasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_name_increasing); + } + + if (i != LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_name_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_cycles_link_name_increasing); + + PART_BEGIN(H5Lvisit_by_name_cycles_link_name_decreasing) + { + TESTING_2("H5Lvisit_by_name2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_HARD_LINKS_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_DEC, link_visit_hard_links_cycles_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_name_decreasing); + } + + if (i != 2 * LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lvisit_by_name_cycles_link_name_decreasing); +#endif + } + PART_END(H5Lvisit_by_name_cycles_link_name_decreasing); + + PART_BEGIN(H5Lvisit_by_name_cycles_link_creation_increasing) + { + TESTING_2("H5Lvisit_by_name2 by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_HARD_LINKS_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_hard_links_cycles_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_creation_increasing); + } + + if (i != 3 * LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_creation_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_cycles_link_creation_increasing); + + PART_BEGIN(H5Lvisit_by_name_cycles_link_creation_decreasing) + { + TESTING_2("H5Lvisit_by_name2 by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_HARD_LINKS_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_hard_links_cycles_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_creation_decreasing); + } + + if (i != 4 * LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_cycles_link_creation_decreasing); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(gcpl_id); + H5Gclose(subgroup_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check the functionality of recursive + * link iteration using H5Lvisit(_by_name)2 with + * only soft links and where there are cyclic links. + * Iteration is done in increasing and decreasing + * order of both link name and link creation order. + */ +static int +test_link_visit_soft_links_cycles(void) +{ + size_t i; + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t subgroup_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + + TESTING_MULTIPART("link visiting with cycles (only soft links)"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, link, soft link, iterate, or creation order " + "aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create a GCPL\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) { + H5_FAILED(); + HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, LINK_VISIT_SOFT_LINKS_CYCLE_TEST_SUBGROUP_NAME, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + LINK_VISIT_SOFT_LINKS_CYCLE_TEST_SUBGROUP_NAME); + goto error; + } + + for (i = 0; i < LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_SUBGROUPS; i++) { + size_t j; + char grp_name[LINK_VISIT_SOFT_LINKS_CYCLE_TEST_BUF_SIZE]; + + /* Create the groups with a reverse-ordering naming scheme to test creation order later */ + HDsnprintf(grp_name, LINK_VISIT_SOFT_LINKS_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NESTED_GRP_NAME "%d", + (int)(LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_SUBGROUPS - i - 1)); + + if ((subgroup_id = H5Gcreate2(group_id, grp_name, H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", grp_name); + goto error; + } + + for (j = 0; j < LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP; j++) { + char link_name[LINK_VISIT_SOFT_LINKS_CYCLE_TEST_BUF_SIZE]; + char link_target[2 * LINK_VISIT_SOFT_LINKS_CYCLE_TEST_BUF_SIZE]; + + /* Create the links with a reverse-ordering naming scheme to test creation order later */ + HDsnprintf(link_name, LINK_VISIT_SOFT_LINKS_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_SOFT_LINKS_CYCLE_TEST_LINK_NAME "%d", + (int)(LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP - j - 1)); + + HDsnprintf(link_target, 2 * LINK_VISIT_SOFT_LINKS_CYCLE_TEST_BUF_SIZE, + "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_SOFT_LINKS_CYCLE_TEST_SUBGROUP_NAME "/%s", + grp_name); + + if (H5Lcreate_soft(link_target, subgroup_id, link_name, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", link_name); + goto error; + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(subgroup_id, link_name, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", link_name); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", link_name); + goto error; + } + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close subgroup '%s'\n", grp_name); + goto error; + } + } + + PASSED(); + + BEGIN_MULTIPART + { + /* + * NOTE: A counter is passed to the iteration callback to try to match up the + * expected links with a given step throughout all of the following + * iterations. This is to try and check that the links are indeed being + * returned in the correct order. + */ + + PART_BEGIN(H5Lvisit_cycles_link_name_increasing) + { + TESTING_2("H5Lvisit2 by link name in increasing order"); + + i = 0; + + if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_INC, link_visit_soft_links_cycles_cb, &i) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type name in increasing order failed\n"); + PART_ERROR(H5Lvisit_cycles_link_name_increasing); + } + + if (i != LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_cycles_link_name_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_cycles_link_name_increasing); + + PART_BEGIN(H5Lvisit_cycles_link_name_decreasing) + { + TESTING_2("H5Lvisit2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_DEC, link_visit_soft_links_cycles_cb, &i) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Lvisit_cycles_link_name_decreasing); + } + + if (i != 2 * LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_cycles_link_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lvisit_cycles_link_name_decreasing); +#endif + } + PART_END(H5Lvisit_cycles_link_name_decreasing); + + PART_BEGIN(H5Lvisit_cycles_link_creation_increasing) + { + TESTING_2("H5Lvisit2 by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_soft_links_cycles_cb, &i) < + 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Lvisit_cycles_link_creation_increasing); + } + + if (i != 3 * LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_cycles_link_creation_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_cycles_link_creation_increasing); + + PART_BEGIN(H5Lvisit_cycles_link_creation_decreasing) + { + TESTING_2("H5Lvisit2 by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_soft_links_cycles_cb, &i) < + 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Lvisit_cycles_link_creation_decreasing); + } + + if (i != 4 * LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_cycles_link_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Lvisit_cycles_link_creation_decreasing); + + PART_BEGIN(H5Lvisit_by_name_cycles_link_name_increasing) + { + TESTING_2("H5Lvisit_by_name2 by link name in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 0; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_SOFT_LINKS_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_INC, link_visit_soft_links_cycles_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type name in increasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_name_increasing); + } + + if (i != LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_name_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_cycles_link_name_increasing); + + PART_BEGIN(H5Lvisit_by_name_cycles_link_name_decreasing) + { + TESTING_2("H5Lvisit_by_name2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_SOFT_LINKS_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_DEC, link_visit_soft_links_cycles_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_name_decreasing); + } + + if (i != 2 * LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lvisit_by_name_cycles_link_name_decreasing); +#endif + } + PART_END(H5Lvisit_by_name_cycles_link_name_decreasing); + + PART_BEGIN(H5Lvisit_by_name_cycles_link_creation_increasing) + { + TESTING_2("H5Lvisit_by_name2 by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_SOFT_LINKS_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_soft_links_cycles_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_creation_increasing); + } + + if (i != 3 * LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_creation_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_cycles_link_creation_increasing); + + PART_BEGIN(H5Lvisit_by_name_cycles_link_creation_decreasing) + { + TESTING_2("H5Lvisit_by_name2 by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_SOFT_LINKS_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_soft_links_cycles_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_creation_decreasing); + } + + if (i != 4 * LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_cycles_link_creation_decreasing); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(gcpl_id); + H5Gclose(subgroup_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check the functionality of recursive + * link iteration using H5Lvisit(_by_name)2 with + * only external links and where there are cyclic + * links. Iteration is done in increasing and + * decreasing order of both link name and link + * creation order. + */ +static int +test_link_visit_external_links_cycles(void) +{ +#ifndef NO_EXTERNAL_LINKS + size_t i; + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t subgroup_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; +#endif + + TESTING_MULTIPART("link visiting with cycles (only external links)"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, link, external link, iterate, or creation order " + "aren't supported with this connector\n"); + return 0; + } + +#ifndef NO_EXTERNAL_LINKS + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create a GCPL\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) { + H5_FAILED(); + HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, LINK_VISIT_EXT_LINKS_CYCLE_TEST_SUBGROUP_NAME, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + LINK_VISIT_EXT_LINKS_CYCLE_TEST_SUBGROUP_NAME); + goto error; + } + + for (i = 0; i < LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_SUBGROUPS; i++) { + size_t j; + char grp_name[LINK_VISIT_EXT_LINKS_CYCLE_TEST_BUF_SIZE]; + + /* Create the groups with a reverse-ordering naming scheme to test creation order later */ + HDsnprintf(grp_name, LINK_VISIT_EXT_LINKS_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_EXT_LINKS_CYCLE_TEST_NESTED_GRP_NAME "%d", + (int)(LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_SUBGROUPS - i - 1)); + + if ((subgroup_id = H5Gcreate2(group_id, grp_name, H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup '%s'\n", grp_name); + goto error; + } + + for (j = 0; j < LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP; j++) { + char link_name[LINK_VISIT_EXT_LINKS_CYCLE_TEST_BUF_SIZE]; + char link_target_obj[2 * LINK_VISIT_EXT_LINKS_CYCLE_TEST_BUF_SIZE]; + + /* Create the links with a reverse-ordering naming scheme to test creation order later */ + HDsnprintf(link_name, LINK_VISIT_EXT_LINKS_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_EXT_LINKS_CYCLE_TEST_LINK_NAME "%d", + (int)(LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP - j - 1)); + + HDsnprintf(link_target_obj, 2 * LINK_VISIT_EXT_LINKS_CYCLE_TEST_BUF_SIZE, + "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_EXT_LINKS_CYCLE_TEST_SUBGROUP_NAME "/%s", + grp_name); + + if (H5Lcreate_external(H5_api_test_filename, link_target_obj, subgroup_id, link_name, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", link_name); + goto error; + } + + /* Verify the link has been created */ + if ((link_exists = H5Lexists(subgroup_id, link_name, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' exists\n", link_name); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link '%s' did not exist\n", link_name); + goto error; + } + } + + if (H5Gclose(subgroup_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close subgroup '%s'\n", grp_name); + goto error; + } + } + + PASSED(); + + BEGIN_MULTIPART + { + /* + * NOTE: A counter is passed to the iteration callback to try to match up the + * expected links with a given step throughout all of the following + * iterations. This is to try and check that the links are indeed being + * returned in the correct order. + */ + + PART_BEGIN(H5Lvisit_cycles_link_name_increasing) + { + TESTING_2("H5Lvisit2 by link name in increasing order"); + + i = 0; + + if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_INC, link_visit_external_links_cycles_cb, &i) < + 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type name in increasing order failed\n"); + PART_ERROR(H5Lvisit_cycles_link_name_increasing); + } + + if (i != LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_cycles_link_name_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_cycles_link_name_increasing); + + PART_BEGIN(H5Lvisit_cycles_link_name_decreasing) + { + TESTING_2("H5Lvisit2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_DEC, link_visit_external_links_cycles_cb, &i) < + 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Lvisit_cycles_link_name_decreasing); + } + + if (i != 2 * LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_cycles_link_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lvisit_cycles_link_name_decreasing); +#endif + } + PART_END(H5Lvisit_cycles_link_name_decreasing); + + PART_BEGIN(H5Lvisit_cycles_link_creation_increasing) + { + TESTING_2("H5Lvisit2 by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_external_links_cycles_cb, + &i) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Lvisit_cycles_link_creation_increasing); + } + + if (i != 3 * LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_cycles_link_creation_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_cycles_link_creation_increasing); + + PART_BEGIN(H5Lvisit_cycles_link_creation_decreasing) + { + TESTING_2("H5Lvisit2 by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_external_links_cycles_cb, + &i) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Lvisit_cycles_link_creation_decreasing); + } + + if (i != 4 * LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_cycles_link_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Lvisit_cycles_link_creation_decreasing); + + PART_BEGIN(H5Lvisit_by_name_cycles_link_name_increasing) + { + TESTING_2("H5Lvisit_by_name2 by link name in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 0; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_EXT_LINKS_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_INC, link_visit_external_links_cycles_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type name in increasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_name_increasing); + } + + if (i != LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_name_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_cycles_link_name_increasing); + + PART_BEGIN(H5Lvisit_by_name_cycles_link_name_decreasing) + { + TESTING_2("H5Lvisit_by_name2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_EXT_LINKS_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_DEC, link_visit_external_links_cycles_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_name_decreasing); + } + + if (i != 2 * LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lvisit_by_name_cycles_link_name_decreasing); +#endif + } + PART_END(H5Lvisit_by_name_cycles_link_name_decreasing); + + PART_BEGIN(H5Lvisit_by_name_cycles_link_creation_increasing) + { + TESTING_2("H5Lvisit_by_name2 by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit_by_name2(file_id, + "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_EXT_LINKS_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_external_links_cycles_cb, &i, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_creation_increasing); + } + + if (i != 3 * LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_creation_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_cycles_link_creation_increasing); + + PART_BEGIN(H5Lvisit_by_name_cycles_link_creation_decreasing) + { + TESTING_2("H5Lvisit_by_name2 by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST; + + if (H5Lvisit_by_name2(file_id, + "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_EXT_LINKS_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_external_links_cycles_cb, &i, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_creation_decreasing); + } + + if (i != 4 * LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_cycles_link_creation_decreasing); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(gcpl_id); + H5Gclose(subgroup_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +#else + SKIPPED(); + return 0; +#endif +} + +/* + * A test to check the functionality of recursive + * link iteration using H5Lvisit(_by_name)2 with + * only user-defined links and where there are + * cyclic links. Iteration is done in increasing + * and decreasing order of both link name and link + * creation order. + * + * TODO refactor test so that creation order tests + * actually test the order that objects were created in. + */ +static int +test_link_visit_ud_links_cycles(void) +{ + TESTING("link visiting with cycles (only user-defined links)"); + + SKIPPED(); + + return 1; +} + +/* + * A test to check the functionality of recursive + * link iteration using H5Lvisit(_by_name)2 with + * mixed link types and where there are cyclic links. + * Iteration is done in increasing and decreasing + * order of both link name and link creation order. + * + * TODO refactor test so that creation order tests + * actually test the order that objects were created in. + */ +static int +test_link_visit_mixed_links_cycles(void) +{ +#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS) + htri_t link_exists; + size_t i; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t subgroup1 = H5I_INVALID_HID, subgroup2 = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH]; +#endif + + TESTING_MULTIPART("link visiting with cycles (mixed link types)"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, link, hard, soft, external link, iterate, or " + "creation order aren't supported with this connector\n"); + return 0; + } + +#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS) + TESTING_2("test setup"); + + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", EXTERNAL_LINK_TEST_FILE_NAME); + + if ((file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + goto error; + } + + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create a GCPL\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) { + H5_FAILED(); + HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME); + goto error; + } + + if ((subgroup1 = H5Gcreate2(group_id, LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create first subgroup '%s'\n", + LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2); + goto error; + } + + if ((subgroup2 = H5Gcreate2(group_id, LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME3, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create second subgroup '%s'\n", + LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME3); + goto error; + } + + if (H5Lcreate_hard(group_id, LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2, subgroup1, + LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME1, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create first hard link '%s'\n", LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME1); + goto error; + } + + if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME + "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2, + subgroup1, LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME2, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME2); + goto error; + } + + if (H5Lcreate_external(ext_link_filename, "/", subgroup2, LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME3, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME3); + goto error; + } + + if (H5Lcreate_hard(group_id, LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME3, subgroup2, + LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME4, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create second hard link '%s'\n", LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME4); + goto error; + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup1, LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME1, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if first link '%s' exists\n", + LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME1); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" first link '%s' did not exist\n", LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME1); + goto error; + } + + if ((link_exists = H5Lexists(subgroup1, LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if second link '%s' exists\n", + LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME2); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" second link '%s' did not exist\n", LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME2); + goto error; + } + + if ((link_exists = H5Lexists(subgroup2, LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if third link '%s' exists\n", + LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME3); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" third link '%s' did not exist\n", LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME3); + goto error; + } + + if ((link_exists = H5Lexists(subgroup2, LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME4, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if fourth link '%s' exists\n", + LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME4); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" fourth link '%s' did not exist\n", LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME4); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + /* + * NOTE: A counter is passed to the iteration callback to try to match up the + * expected links with a given step throughout all of the following + * iterations. This is to try and check that the links are indeed being + * returned in the correct order. + */ + + PART_BEGIN(H5Lvisit_cycles_link_name_increasing) + { + TESTING_2("H5Lvisit2 by link name in increasing order"); + + i = 0; + + if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_INC, link_visit_mixed_links_cycles_cb, &i) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type name in increasing order failed\n"); + PART_ERROR(H5Lvisit_cycles_link_name_increasing); + } + + if (i != LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_cycles_link_name_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_cycles_link_name_increasing); + + PART_BEGIN(H5Lvisit_cycles_link_name_decreasing) + { + TESTING_2("H5Lvisit2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS; + + if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_DEC, link_visit_mixed_links_cycles_cb, &i) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Lvisit_cycles_link_name_decreasing); + } + + if (i != 2 * LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_cycles_link_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lvisit_cycles_link_name_decreasing); +#endif + } + PART_END(H5Lvisit_cycles_link_name_decreasing); + + PART_BEGIN(H5Lvisit_cycles_link_creation_increasing) + { + TESTING_2("H5Lvisit2 by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS; + + if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_mixed_links_cycles_cb, &i) < + 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Lvisit_cycles_link_creation_increasing); + } + + if (i != 3 * LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_cycles_link_creation_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_cycles_link_creation_increasing); + + PART_BEGIN(H5Lvisit_cycles_link_creation_decreasing) + { + TESTING_2("H5Lvisit2 by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS; + + if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_mixed_links_cycles_cb, &i) < + 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Lvisit_cycles_link_creation_decreasing); + } + + if (i != 4 * LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_cycles_link_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Lvisit_cycles_link_creation_decreasing); + + PART_BEGIN(H5Lvisit_by_name_cycles_link_name_increasing) + { + TESTING_2("H5Lvisit_by_name2 by link name in increasing order"); + + i = 0; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_INC, link_visit_mixed_links_cycles_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type name in increasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_name_increasing); + } + + if (i != LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_name_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_cycles_link_name_increasing); + + PART_BEGIN(H5Lvisit_by_name_cycles_link_name_decreasing) + { + TESTING_2("H5Lvisit_by_name2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_DEC, link_visit_mixed_links_cycles_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_name_decreasing); + } + + if (i != 2 * LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lvisit_by_name_cycles_link_name_decreasing); +#endif + } + PART_END(H5Lvisit_by_name_cycles_link_name_decreasing); + + PART_BEGIN(H5Lvisit_by_name_cycles_link_creation_increasing) + { + TESTING_2("H5Lvisit_by_name2 by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_mixed_links_cycles_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_creation_increasing); + } + + if (i != 3 * LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_creation_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_cycles_link_creation_increasing); + + PART_BEGIN(H5Lvisit_by_name_cycles_link_creation_decreasing) + { + TESTING_2("H5Lvisit_by_name2 by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS; + + if (H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_mixed_links_cycles_cb, &i, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_creation_decreasing); + } + + if (i != 4 * LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS) { + H5_FAILED(); + HDprintf(" some links were not visited!\n"); + PART_ERROR(H5Lvisit_by_name_cycles_link_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_cycles_link_creation_decreasing); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(subgroup1) < 0) + TEST_ERROR; + if (H5Gclose(subgroup2) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(gcpl_id); + H5Gclose(subgroup1); + H5Gclose(subgroup2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +#else + SKIPPED(); + return 0; +#endif +} + +/* + * A test to check that H5Lvisit(_by_name)2 fails when + * it is given invalid parameters. + */ +static int +test_link_visit_invalid_params(void) +{ + herr_t err_ret = -1; + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t subgroup1 = H5I_INVALID_HID, subgroup2 = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH]; + + TESTING_MULTIPART("link visiting with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, link, external link, iterate, or " + "creation order aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", EXTERNAL_LINK_TEST_FILE_NAME); + + if ((file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename); + goto error; + } + + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME); + goto error; + } + + if ((subgroup1 = H5Gcreate2(group_id, LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME2, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create first subgroup '%s'\n", LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME2); + goto error; + } + + if ((subgroup2 = H5Gcreate2(group_id, LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME3, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create second subgroup '%s'\n", LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME3); + goto error; + } + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + if ((fspace_id = generate_random_dataspace(LINK_VISIT_INVALID_PARAMS_TEST_DSET_SPACE_RANK, NULL, NULL, + FALSE)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(subgroup1, LINK_VISIT_INVALID_PARAMS_TEST_DSET_NAME, dset_dtype, fspace_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create first dataset '%s'\n", LINK_VISIT_INVALID_PARAMS_TEST_DSET_NAME); + } + + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(subgroup2, LINK_VISIT_INVALID_PARAMS_TEST_DSET_NAME, dset_dtype, fspace_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create second dataset '%s'\n", LINK_VISIT_INVALID_PARAMS_TEST_DSET_NAME); + } + + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + + if (H5Lcreate_hard(subgroup1, LINK_VISIT_INVALID_PARAMS_TEST_DSET_NAME, subgroup1, + LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME1, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create first hard link '%s'\n", LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME1); + goto error; + } + + if (H5Lcreate_soft(LINK_VISIT_INVALID_PARAMS_TEST_DSET_NAME, subgroup1, + LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME2); + goto error; + } +#ifndef NO_EXTERNAL_LINKS + if (H5Lcreate_external(ext_link_filename, "/", subgroup2, LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME3, + H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create external link '%s'\n", LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME3); + goto error; + } +#endif + if (H5Lcreate_hard(subgroup2, LINK_VISIT_INVALID_PARAMS_TEST_DSET_NAME, subgroup2, + LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME4, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create second hard link '%s'\n", LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME4); + goto error; + } + + /* Verify the links have been created */ + if ((link_exists = H5Lexists(subgroup1, LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME1, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if first link '%s' exists\n", + LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME1); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link 1 did not exist\n"); + goto error; + } + + if ((link_exists = H5Lexists(subgroup1, LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if second link '%s' exists\n", + LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME2); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link 2 did not exist\n"); + goto error; + } +#ifndef NO_EXTERNAL_LINKS + if ((link_exists = H5Lexists(subgroup2, LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if third link '%s' exists\n", + LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME3); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link 3 did not exist\n"); + goto error; + } +#endif + if ((link_exists = H5Lexists(subgroup2, LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME4, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if fourth link '%s' exists\n", + LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME4); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" link 4 did not exist\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Lvisit_invalid_grp_id) + { + TESTING_2("H5Lvisit2 with an invalid group ID"); + + H5E_BEGIN_TRY + { + err_ret = H5Lvisit2(H5I_INVALID_HID, H5_INDEX_NAME, H5_ITER_INC, link_visit_invalid_params_cb, + NULL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 succeeded with an invalid group ID!\n"); + PART_ERROR(H5Lvisit_invalid_grp_id); + } + + PASSED(); + } + PART_END(H5Lvisit_invalid_grp_id); + + PART_BEGIN(H5Lvisit_invalid_index_type) + { + TESTING_2("H5Lvisit2 with an invalid index type"); + + H5E_BEGIN_TRY + { + err_ret = + H5Lvisit2(group_id, H5_INDEX_UNKNOWN, H5_ITER_INC, link_visit_invalid_params_cb, NULL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 succeeded with invalid index type H5_INDEX_UNKNOWN!\n"); + PART_ERROR(H5Lvisit_invalid_index_type); + } + + H5E_BEGIN_TRY + { + err_ret = H5Lvisit2(group_id, H5_INDEX_N, H5_ITER_INC, link_visit_invalid_params_cb, NULL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 succeeded with invalid index type H5_INDEX_N!\n"); + PART_ERROR(H5Lvisit_invalid_index_type); + } + + PASSED(); + } + PART_END(H5Lvisit_invalid_index_type); + + PART_BEGIN(H5Lvisit_invalid_iter_order) + { + TESTING_2("H5Lvisit2 with an invalid iteration ordering"); + + H5E_BEGIN_TRY + { + err_ret = + H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_UNKNOWN, link_visit_invalid_params_cb, NULL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 succeeded with invalid iteration ordering H5_ITER_UNKNOWN!\n"); + PART_ERROR(H5Lvisit_invalid_iter_order); + } + + H5E_BEGIN_TRY + { + err_ret = H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_N, link_visit_invalid_params_cb, NULL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 succeeded with invalid iteration ordering H5_ITER_N!\n"); + PART_ERROR(H5Lvisit_invalid_iter_order); + } + + PASSED(); + } + PART_END(H5Lvisit_invalid_iter_order); + + PART_BEGIN(H5Lvisit_by_name_invalid_loc_id) + { + TESTING_2("H5Lvisit_by_name2 with an invalid location ID"); + + H5E_BEGIN_TRY + { + err_ret = H5Lvisit_by_name2( + H5I_INVALID_HID, + "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME, H5_INDEX_NAME, + H5_ITER_INC, link_visit_invalid_params_cb, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 succeeded with an invalid location ID!\n"); + PART_ERROR(H5Lvisit_by_name_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_invalid_loc_id); + + PART_BEGIN(H5Lvisit_by_name_invalid_grp_name) + { + TESTING_2("H5Lvisit_by_name2 with an invalid group name"); + + H5E_BEGIN_TRY + { + err_ret = H5Lvisit_by_name2(file_id, NULL, H5_INDEX_NAME, H5_ITER_INC, + link_visit_invalid_params_cb, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 succeeded with a NULL group name!\n"); + PART_ERROR(H5Lvisit_by_name_invalid_grp_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Lvisit_by_name2(file_id, "", H5_INDEX_NAME, H5_ITER_INC, + link_visit_invalid_params_cb, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 succeeded with an invalid group name of ''!\n"); + PART_ERROR(H5Lvisit_by_name_invalid_grp_name); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_invalid_grp_name); + + PART_BEGIN(H5Lvisit_by_name_invalid_index_type) + { + TESTING_2("H5Lvisit_by_name2 with an invalid index type"); + + H5E_BEGIN_TRY + { + err_ret = H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME, + H5_INDEX_UNKNOWN, H5_ITER_INC, link_visit_invalid_params_cb, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 succeeded with invalid index type H5_INDEX_UNKNOWN!\n"); + PART_ERROR(H5Lvisit_by_name_invalid_index_type); + } + + H5E_BEGIN_TRY + { + err_ret = H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME, + H5_INDEX_N, H5_ITER_INC, link_visit_invalid_params_cb, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 succeeded with invalid index type H5_INDEX_N!\n"); + PART_ERROR(H5Lvisit_by_name_invalid_index_type); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_invalid_index_type); + + PART_BEGIN(H5Lvisit_by_name_invalid_iter_order) + { + TESTING_2("H5Lvisit_by_name2 with an invalid iteration ordering"); + + H5E_BEGIN_TRY + { + err_ret = H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_UNKNOWN, link_visit_invalid_params_cb, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf( + " H5Lvisit_by_name2 succeeded with invalid iteration ordering H5_ITER_UNKNOWN!\n"); + PART_ERROR(H5Lvisit_by_name_invalid_iter_order); + } + + H5E_BEGIN_TRY + { + err_ret = H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_N, link_visit_invalid_params_cb, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 succeeded with invalid iteration ordering H5_ITER_N!\n"); + PART_ERROR(H5Lvisit_by_name_invalid_iter_order); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_invalid_iter_order); + + PART_BEGIN(H5Lvisit_by_name_invalid_lapl) + { + TESTING_2("H5Lvisit_by_name2 with an invalid LAPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Lvisit_by_name2( + file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_INC, link_visit_invalid_params_cb, NULL, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 succeeded with an invalid LAPL!\n"); + PART_ERROR(H5Lvisit_by_name_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_invalid_lapl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Gclose(subgroup1) < 0) + TEST_ERROR; + if (H5Gclose(subgroup2) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + H5Tclose(dset_dtype); + H5Dclose(dset_id); + H5Gclose(subgroup1); + H5Gclose(subgroup2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that recursive link iteration + * performed on a group with no links in it is + * not problematic. + */ +static int +test_link_visit_0_links(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + + TESTING_MULTIPART("link visiting on group with subgroups containing 0 links"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, link iterate, or creation order aren't supported " + "with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create a GCPL\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) { + H5_FAILED(); + HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, LINK_VISIT_0_LINKS_TEST_SUBGROUP_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", LINK_VISIT_0_LINKS_TEST_SUBGROUP_NAME); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Lvisit_0_links_name_increasing) + { + TESTING_2("H5Lvisit2 by link name in increasing order"); + + if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_INC, link_visit_0_links_cb, NULL) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type name in increasing order failed\n"); + PART_ERROR(H5Lvisit_0_links_name_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_0_links_name_increasing); + + PART_BEGIN(H5Lvisit_0_links_name_decreasing) + { + TESTING_2("H5Lvisit2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_DEC, link_visit_0_links_cb, NULL) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Lvisit_0_links_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lvisit_0_links_name_decreasing); +#endif + } + PART_END(H5Lvisit_0_links_name_decreasing); + + PART_BEGIN(H5Lvisit_0_links_creation_increasing) + { + TESTING_2("H5Lvisit2 by creation order in increasing order"); + + if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_0_links_cb, NULL) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Lvisit_0_links_creation_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_0_links_creation_increasing); + + PART_BEGIN(H5Lvisit_0_links_creation_decreasing) + { + TESTING_2("H5Lvisit2 by creation order in decreasing order"); + + if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_0_links_cb, NULL) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Lvisit_0_links_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Lvisit_0_links_creation_decreasing); + + PART_BEGIN(H5Lvisit_by_name_0_links_name_increasing) + { + TESTING_2("H5Lvisit_by_name2 by link name in increasing order"); + + if (H5Lvisit_by_name2(file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_0_LINKS_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_INC, link_visit_0_links_cb, NULL, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type name in increasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_0_links_name_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_0_links_name_increasing); + + PART_BEGIN(H5Lvisit_by_name_0_links_name_decreasing) + { + TESTING_2("H5Lvisit_by_name2 by link name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + if (H5Lvisit_by_name2(file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_0_LINKS_TEST_SUBGROUP_NAME, + H5_INDEX_NAME, H5_ITER_DEC, link_visit_0_links_cb, NULL, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type name in decreasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_0_links_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Lvisit_by_name_0_links_name_decreasing); +#endif + } + PART_END(H5Lvisit_by_name_0_links_name_decreasing); + + PART_BEGIN(H5Lvisit_by_name_0_links_creation_increasing) + { + TESTING_2("H5Lvisit_by_name2 by creation order in increasing order"); + + if (H5Lvisit_by_name2(file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_0_LINKS_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_0_links_cb, NULL, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type creation order in increasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_0_links_creation_increasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_0_links_creation_increasing); + + PART_BEGIN(H5Lvisit_by_name_0_links_creation_decreasing) + { + TESTING_2("H5Lvisit_by_name2 by creation order in decreasing order"); + + if (H5Lvisit_by_name2(file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_0_LINKS_TEST_SUBGROUP_NAME, + H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_0_links_cb, NULL, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Lvisit_by_name2 by index type creation order in decreasing order failed\n"); + PART_ERROR(H5Lvisit_by_name_0_links_creation_decreasing); + } + + PASSED(); + } + PART_END(H5Lvisit_by_name_0_links_creation_decreasing); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(gcpl_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * Link iteration callback for the hard links test which iterates + * through all of the links in the test group and checks to make sure + * their names and link classes match what is expected. + */ +static herr_t +link_iter_hard_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data) +{ + size_t *i = (size_t *)op_data; + size_t counter_val = *((size_t *)op_data); + size_t test_iteration; + char expected_link_name[LINK_ITER_HARD_LINKS_TEST_BUF_SIZE]; + herr_t ret_val = H5_ITER_CONT; + + UNUSED(group_id); + UNUSED(op_data); + + if (H5L_TYPE_HARD != info->type) { + ret_val = H5_ITER_ERROR; + HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n", name); + goto done; + } + + /* + * Four tests are run in the following order per link iteration API call: + * + * - iteration by link name in increasing order + * - iteration by link name in decreasing order + * - iteration by link creation order in increasing order + * - iteration by link creation order in decreasing order + * + * Based on how the test is written, this will mean that the dataset names + * will run in increasing order on the first and fourth tests and decreasing + * order on the second and third tests. + */ + test_iteration = (counter_val / LINK_ITER_HARD_LINKS_TEST_NUM_LINKS); + if (test_iteration == 0 || test_iteration == 3) { + HDsnprintf(expected_link_name, LINK_ITER_HARD_LINKS_TEST_BUF_SIZE, + LINK_ITER_HARD_LINKS_TEST_LINK_NAME "%d", + (int)(counter_val % LINK_ITER_HARD_LINKS_TEST_NUM_LINKS)); + } + else { + HDsnprintf(expected_link_name, LINK_ITER_HARD_LINKS_TEST_BUF_SIZE, + LINK_ITER_HARD_LINKS_TEST_LINK_NAME "%d", + (int)(LINK_ITER_HARD_LINKS_TEST_NUM_LINKS - + (counter_val % LINK_ITER_HARD_LINKS_TEST_NUM_LINKS) - 1)); + } + + if (HDstrncmp(name, expected_link_name, LINK_ITER_HARD_LINKS_TEST_BUF_SIZE)) { + HDprintf(" link name '%s' didn't match expected name '%s'\n", name, expected_link_name); + ret_val = H5_ITER_ERROR; + goto done; + } + +done: + (*i)++; + + return ret_val; +} + +/* + * Link iteration callback for the soft links test which iterates + * through all of the links in the test group and checks to make sure + * their names and link classes match what is expected. + */ +static herr_t +link_iter_soft_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data) +{ + size_t *i = (size_t *)op_data; + size_t counter_val = *((size_t *)op_data); + size_t test_iteration; + char expected_link_name[LINK_ITER_SOFT_LINKS_TEST_BUF_SIZE]; + herr_t ret_val = H5_ITER_CONT; + + UNUSED(group_id); + UNUSED(op_data); + + if (H5L_TYPE_SOFT != info->type) { + ret_val = H5_ITER_ERROR; + HDprintf(" link type for link '%s' was not H5L_TYPE_SOFT!\n", name); + goto done; + } + + /* + * Four tests are run in the following order per link iteration API call: + * + * - iteration by link name in increasing order + * - iteration by link name in decreasing order + * - iteration by link creation order in increasing order + * - iteration by link creation order in decreasing order + * + * Based on how the test is written, this will mean that the link names + * will run in increasing order on the first and fourth tests and decreasing + * order on the second and third tests. + */ + test_iteration = (counter_val / LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS); + if (test_iteration == 0 || test_iteration == 3) { + HDsnprintf(expected_link_name, LINK_ITER_SOFT_LINKS_TEST_BUF_SIZE, + LINK_ITER_SOFT_LINKS_TEST_LINK_NAME "%d", + (int)(counter_val % LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS)); + } + else { + HDsnprintf(expected_link_name, LINK_ITER_SOFT_LINKS_TEST_BUF_SIZE, + LINK_ITER_SOFT_LINKS_TEST_LINK_NAME "%d", + (int)(LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS - + (counter_val % LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS) - 1)); + } + + if (HDstrncmp(name, expected_link_name, LINK_ITER_SOFT_LINKS_TEST_BUF_SIZE)) { + HDprintf(" link name '%s' didn't match expected name '%s'\n", name, expected_link_name); + ret_val = H5_ITER_ERROR; + goto done; + } + +done: + (*i)++; + + return ret_val; +} + +/* + * Link iteration callback for the external links test which iterates + * through all of the links in the test group and checks to make sure + * their names and link classes match what is expected. + */ +#ifndef NO_EXTERNAL_LINKS +static herr_t +link_iter_external_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data) +{ + size_t *i = (size_t *)op_data; + size_t counter_val = *((size_t *)op_data); + size_t test_iteration; + char expected_link_name[LINK_ITER_EXT_LINKS_TEST_BUF_SIZE]; + herr_t ret_val = H5_ITER_CONT; + + UNUSED(group_id); + UNUSED(op_data); + + if (H5L_TYPE_EXTERNAL != info->type) { + ret_val = H5_ITER_ERROR; + HDprintf(" link type for link '%s' was not H5L_TYPE_EXTERNAL!\n", name); + goto done; + } + + /* + * Four tests are run in the following order per link iteration API call: + * + * - iteration by link name in increasing order + * - iteration by link name in decreasing order + * - iteration by link creation order in increasing order + * - iteration by link creation order in decreasing order + * + * Based on how the test is written, this will mean that the link names + * will run in increasing order on the first and fourth tests and decreasing + * order on the second and third tests. + */ + test_iteration = (counter_val / LINK_ITER_EXT_LINKS_TEST_NUM_LINKS); + if (test_iteration == 0 || test_iteration == 3) { + HDsnprintf(expected_link_name, LINK_ITER_EXT_LINKS_TEST_BUF_SIZE, + LINK_ITER_EXT_LINKS_TEST_LINK_NAME "%d", + (int)(counter_val % LINK_ITER_EXT_LINKS_TEST_NUM_LINKS)); + } + else { + HDsnprintf(expected_link_name, LINK_ITER_EXT_LINKS_TEST_BUF_SIZE, + LINK_ITER_EXT_LINKS_TEST_LINK_NAME "%d", + (int)(LINK_ITER_EXT_LINKS_TEST_NUM_LINKS - + (counter_val % LINK_ITER_EXT_LINKS_TEST_NUM_LINKS) - 1)); + } + + if (HDstrncmp(name, expected_link_name, LINK_ITER_EXT_LINKS_TEST_BUF_SIZE)) { + HDprintf(" link name '%s' didn't match expected name '%s'\n", name, expected_link_name); + ret_val = H5_ITER_ERROR; + goto done; + } + +done: + (*i)++; + + return ret_val; +} +#endif +#ifndef NO_USER_DEFINED_LINKS +static herr_t link_iter_ud_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data); +#endif +/* + * Link iteration callback for the mixed link types test which iterates + * through all of the links in the test group and checks to make sure + * their names and link classes match what is expected. + */ +#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS) +static herr_t +link_iter_mixed_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data) +{ + size_t *i = (size_t *)op_data; + size_t counter_val = *((size_t *)op_data); + herr_t ret_val = 0; + + UNUSED(group_id); + + if (!HDstrncmp(name, LINK_ITER_MIXED_LINKS_TEST_HARD_LINK_NAME, + strlen(LINK_ITER_MIXED_LINKS_TEST_HARD_LINK_NAME) + 1) && + (counter_val == 1 || counter_val == 4 || counter_val == 6 || counter_val == 11)) { + if (H5L_TYPE_HARD != info->type) { + ret_val = -1; + HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n", name); + } + + goto done; + } + else if (!HDstrncmp(name, LINK_ITER_MIXED_LINKS_TEST_SOFT_LINK_NAME, + strlen(LINK_ITER_MIXED_LINKS_TEST_SOFT_LINK_NAME) + 1) && + (counter_val == 2 || counter_val == 3 || counter_val == 7 || counter_val == 10)) { + if (H5L_TYPE_SOFT != info->type) { + ret_val = -1; + HDprintf(" link type for link '%s' was not H5L_TYPE_SOFT!\n", name); + } + + goto done; + } + else if (!HDstrncmp(name, LINK_ITER_MIXED_LINKS_TEST_EXT_LINK_NAME, + strlen(LINK_ITER_MIXED_LINKS_TEST_EXT_LINK_NAME) + 1) && + (counter_val == 0 || counter_val == 5 || counter_val == 8 || counter_val == 9)) { + if (H5L_TYPE_EXTERNAL != info->type) { + ret_val = -1; + HDprintf(" link type for link '%s' was not H5L_TYPE_EXTERNAL!\n", name); + } + + goto done; + } + + HDprintf(" link name '%s' didn't match known names or came in an incorrect order\n", name); + + ret_val = -1; + +done: + (*i)++; + + return ret_val; +} +#endif + +/* + * Link iteration callback for the H5Literate(_by_name)2 invalid + * parameters test which simply does nothing. + */ +static herr_t +link_iter_invalid_params_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data) +{ + UNUSED(group_id); + UNUSED(name); + UNUSED(info); + UNUSED(op_data); + + return 0; +} + +/* + * Link iteration callback for the 0 links iteration test which + * simply does nothing. + */ +static herr_t +link_iter_0_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data) +{ + UNUSED(group_id); + UNUSED(name); + UNUSED(info); + UNUSED(op_data); + + return 0; +} + +/* + * Link iteration callback to test that the index-saving behavior of H5Literate2 + * works correctly. + */ +#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS) +static herr_t +link_iter_idx_saving_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data) +{ + int *broken = (int *)op_data; + + UNUSED(group_id); + + if (broken && !*broken && + !HDstrncmp(name, LINK_ITER_MIXED_LINKS_TEST_SOFT_LINK_NAME, + strlen(LINK_ITER_MIXED_LINKS_TEST_SOFT_LINK_NAME) + 1)) { + return (*broken = 1); + } + + if (!HDstrncmp(name, LINK_ITER_MIXED_LINKS_TEST_HARD_LINK_NAME, + strlen(LINK_ITER_MIXED_LINKS_TEST_HARD_LINK_NAME) + 1)) { + if (H5L_TYPE_HARD != info->type) { + H5_FAILED(); + HDprintf(" link type did not match\n"); + goto error; + } + } + else if (!HDstrncmp(name, LINK_ITER_MIXED_LINKS_TEST_SOFT_LINK_NAME, + strlen(LINK_ITER_MIXED_LINKS_TEST_SOFT_LINK_NAME) + 1)) { + if (H5L_TYPE_SOFT != info->type) { + H5_FAILED(); + HDprintf(" link type did not match\n"); + goto error; + } + } + else if (!HDstrncmp(name, LINK_ITER_MIXED_LINKS_TEST_EXT_LINK_NAME, + strlen(LINK_ITER_MIXED_LINKS_TEST_EXT_LINK_NAME) + 1)) { + if (H5L_TYPE_EXTERNAL != info->type) { + H5_FAILED(); + HDprintf(" link type did not match\n"); + goto error; + } + } + else { + H5_FAILED(); + HDprintf(" link name didn't match known names\n"); + goto error; + } + + return 0; + +error: + return -1; +} +#endif + +/* + * Link visiting callback for the hard links + no cycles test which + * iterates recursively through all of the links in the test group and + * checks to make sure their names and link classes match what is expected. + */ +static herr_t +link_visit_hard_links_no_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data) +{ + hbool_t is_subgroup_link; + size_t *i = (size_t *)op_data; + size_t counter_val = *((size_t *)op_data); + size_t test_iteration; + size_t subgroup_number; + size_t link_idx_val; + char expected_link_name[LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_BUF_SIZE]; + herr_t ret_val = H5_ITER_CONT; + + UNUSED(group_id); + UNUSED(op_data); + + if (H5L_TYPE_HARD != info->type) { + ret_val = H5_ITER_ERROR; + HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n", name); + goto done; + } + + /* + * Four tests are run in the following order per link visiting API call: + * + * - visitation by link name in increasing order + * - visitation by link name in decreasing order + * - visitation by link creation order in increasing order + * - visitation by link creation order in decreasing order + * + * Based on how the test is written, this will mean that the dataset and group + * names will run in increasing order on the first and fourth tests and decreasing + * order on the second and third tests. + */ + test_iteration = counter_val / LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST; + + /* Determine which subgroup is currently being processed */ + subgroup_number = + /* Take the current counter value modulo the total number of links per test iteration (datasets + + subgroups) */ + (counter_val % LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) + /* and divide it by the number of links per subgroup + 1 to get the subgroup's index number. */ + / (LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1); + + /* Determine whether the current link points to the current subgroup itself */ + is_subgroup_link = (counter_val % (LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1) == 0); + if (!is_subgroup_link) { + /* Determine the index number of this link within its containing subgroup */ + link_idx_val = + /* Take the current counter value modulo the total number of links per test iteration (datasets + + subgroups) */ + (counter_val % LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) + /* and take it modulo the number of links per subgroup + 1, finally subtracting 1 to get the + link's index number. */ + % (LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1) - + 1; + } + + if (test_iteration == 0 || test_iteration == 3) { + if (is_subgroup_link) { + HDsnprintf(expected_link_name, LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME "%d", (int)subgroup_number); + } + else { + HDsnprintf(expected_link_name, LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME + "%d" + "/" LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_LINK_NAME "%d", + (int)subgroup_number, (int)link_idx_val); + } + } + else { + if (is_subgroup_link) { + HDsnprintf(expected_link_name, LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME "%d", + (int)(LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS - subgroup_number - 1)); + } + else { + HDsnprintf(expected_link_name, LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME + "%d" + "/" LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_LINK_NAME "%d", + (int)(LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS - subgroup_number - 1), + (int)(LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP - link_idx_val - 1)); + } + } + + if (HDstrncmp(name, expected_link_name, LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_BUF_SIZE)) { + HDprintf(" link name '%s' didn't match expected name '%s'\n", name, expected_link_name); + ret_val = H5_ITER_ERROR; + goto done; + } + +done: + (*i)++; + + return ret_val; +} + +/* + * Link visiting callback for the soft links + no cycles test which + * iterates recursively through all of the links in the test group and + * checks to make sure their names and link classes match what is expected. + */ +static herr_t +link_visit_soft_links_no_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data) +{ + hbool_t is_subgroup_link; + size_t *i = (size_t *)op_data; + size_t counter_val = *((size_t *)op_data); + size_t test_iteration; + size_t subgroup_number; + size_t link_idx_val; + char expected_link_name[LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_BUF_SIZE]; + herr_t ret_val = H5_ITER_CONT; + + UNUSED(group_id); + UNUSED(op_data); + + /* Determine whether the current link points to the current subgroup itself */ + is_subgroup_link = (counter_val % (LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1) == 0); + + if (is_subgroup_link) { + if (H5L_TYPE_HARD != info->type) { + ret_val = H5_ITER_ERROR; + HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n", name); + goto done; + } + } + else { + if (H5L_TYPE_SOFT != info->type) { + ret_val = H5_ITER_ERROR; + HDprintf(" link type for link '%s' was not H5L_TYPE_SOFT!\n", name); + goto done; + } + } + + /* + * Four tests are run in the following order per link visiting API call: + * + * - visitation by link name in increasing order + * - visitation by link name in decreasing order + * - visitation by link creation order in increasing order + * - visitation by link creation order in decreasing order + * + * Based on how the test is written, this will mean that the link names will + * run in increasing order on the first and fourth tests and decreasing + * order on the second and third tests. + */ + test_iteration = counter_val / LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST; + + /* Determine which subgroup is currently being processed */ + subgroup_number = + /* Take the current counter value modulo the total number of links per test iteration (links + + subgroups) */ + (counter_val % LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) + /* and divide it by the number of links per subgroup + 1 to get the subgroup's index number. */ + / (LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1); + + if (!is_subgroup_link) { + /* Determine the index number of this link within its containing subgroup */ + link_idx_val = + /* Take the current counter value modulo the total number of links per test iteration (links + + subgroups) */ + (counter_val % LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) + /* and take it modulo the number of links per subgroup + 1, finally subtracting 1 to get the + link's index number. */ + % (LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1) - + 1; + } + + if (test_iteration == 0 || test_iteration == 3) { + if (is_subgroup_link) { + HDsnprintf(expected_link_name, LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME "%d", (int)subgroup_number); + } + else { + HDsnprintf(expected_link_name, LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME + "%d" + "/" LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_LINK_NAME "%d", + (int)subgroup_number, (int)link_idx_val); + } + } + else { + if (is_subgroup_link) { + HDsnprintf(expected_link_name, LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME "%d", + (int)(LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS - subgroup_number - 1)); + } + else { + HDsnprintf(expected_link_name, LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME + "%d" + "/" LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_LINK_NAME "%d", + (int)(LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS - subgroup_number - 1), + (int)(LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP - link_idx_val - 1)); + } + } + + if (HDstrncmp(name, expected_link_name, LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_BUF_SIZE)) { + HDprintf(" link name '%s' didn't match expected name '%s'\n", name, expected_link_name); + ret_val = H5_ITER_ERROR; + goto done; + } + +done: + (*i)++; + + return ret_val; +} + +/* + * Link visiting callback for the external links + no cycles test which + * iterates recursively through all of the links in the test group and + * checks to make sure their names and link classes match what is expected. + */ +#ifndef NO_EXTERNAL_LINKS +static herr_t +link_visit_external_links_no_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info, + void *op_data) +{ + hbool_t is_subgroup_link; + size_t *i = (size_t *)op_data; + size_t counter_val = *((size_t *)op_data); + size_t test_iteration; + size_t subgroup_number; + size_t link_idx_val; + char expected_link_name[LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_BUF_SIZE]; + herr_t ret_val = H5_ITER_CONT; + + UNUSED(group_id); + UNUSED(op_data); + + /* Determine whether the current link points to the current subgroup itself */ + is_subgroup_link = (counter_val % (LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1) == 0); + + if (is_subgroup_link) { + if (H5L_TYPE_HARD != info->type) { + ret_val = H5_ITER_ERROR; + HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n", name); + goto done; + } + } + else { + if (H5L_TYPE_EXTERNAL != info->type) { + ret_val = H5_ITER_ERROR; + HDprintf(" link type for link '%s' was not H5L_TYPE_EXTERNAL!\n", name); + goto done; + } + } + + /* + * Four tests are run in the following order per link visiting API call: + * + * - visitation by link name in increasing order + * - visitation by link name in decreasing order + * - visitation by link creation order in increasing order + * - visitation by link creation order in decreasing order + * + * Based on how the test is written, this will mean that the link names will + * run in increasing order on the first and fourth tests and decreasing + * order on the second and third tests. + */ + test_iteration = counter_val / LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST; + + /* Determine which subgroup is currently being processed */ + subgroup_number = + /* Take the current counter value modulo the total number of links per test iteration (links + + subgroups) */ + (counter_val % LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) + /* and divide it by the number of links per subgroup + 1 to get the subgroup's index number. */ + / (LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1); + + if (!is_subgroup_link) { + /* Determine the index number of this link within its containing subgroup */ + link_idx_val = + /* Take the current counter value modulo the total number of links per test iteration (links + + subgroups) */ + (counter_val % LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) + /* and take it modulo the number of links per subgroup + 1, finally subtracting 1 to get the + link's index number. */ + % (LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1) - + 1; + } + + if (test_iteration == 0 || test_iteration == 3) { + if (is_subgroup_link) { + HDsnprintf(expected_link_name, LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME "%d", (int)subgroup_number); + } + else { + HDsnprintf(expected_link_name, LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME + "%d" + "/" LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_LINK_NAME "%d", + (int)subgroup_number, (int)link_idx_val); + } + } + else { + if (is_subgroup_link) { + HDsnprintf(expected_link_name, LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME "%d", + (int)(LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS - subgroup_number - 1)); + } + else { + HDsnprintf(expected_link_name, LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME + "%d" + "/" LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_LINK_NAME "%d", + (int)(LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS - subgroup_number - 1), + (int)(LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP - link_idx_val - 1)); + } + } + + if (HDstrncmp(name, expected_link_name, LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_BUF_SIZE)) { + HDprintf(" link name '%s' didn't match expected name '%s'\n", name, expected_link_name); + ret_val = H5_ITER_ERROR; + goto done; + } + +done: + (*i)++; + + return ret_val; +} +#endif +#ifndef NO_USER_DEFINED_LINKS +static herr_t link_visit_ud_links_no_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info, + void *op_data); +#endif +/* + * Link visiting callback for the mixed link types + no cycles test which + * iterates recursively through all of the links in the test group and + * checks to make sure their names and link classes match what is expected. + */ +#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS) +static herr_t +link_visit_mixed_links_no_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data) +{ + size_t *i = (size_t *)op_data; + size_t counter_val = *((size_t *)op_data); + herr_t ret_val = 0; + + UNUSED(group_id); + UNUSED(op_data); + + if (!HDstrncmp(name, + LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2 + "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME1, + strlen(LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2 + "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME1) + + 1) && + (counter_val == 2 || counter_val == 14 || counter_val == 18 || counter_val == 30)) { + if (H5L_TYPE_HARD != info->type) { + ret_val = -1; + HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n", + LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2 + "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME1); + } + + goto done; + } + else if (!HDstrncmp(name, + LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2 + "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME2, + strlen(LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2 + "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME2) + + 1) && + (counter_val == 3 || counter_val == 13 || counter_val == 19 || counter_val == 29)) { + if (H5L_TYPE_SOFT != info->type) { + ret_val = -1; + HDprintf(" link type for link '%s' was not H5L_TYPE_SOFT!\n", + LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2 + "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME2); + } + + goto done; + } + else if (!HDstrncmp(name, + LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3 + "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME3, + strlen(LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3 + "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME3) + + 1) && + (counter_val == 6 || counter_val == 10 || counter_val == 22 || counter_val == 26)) { + if (H5L_TYPE_EXTERNAL != info->type) { + ret_val = -1; + HDprintf(" link type for link '%s' was not H5L_TYPE_EXTERNAL!\n", + LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3 + "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME3); + } + + goto done; + } + else if (!HDstrncmp(name, + LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3 + "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME4, + strlen(LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3 + "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME4) + + 1) && + (counter_val == 7 || counter_val == 9 || counter_val == 23 || counter_val == 25)) { + if (H5L_TYPE_HARD != info->type) { + ret_val = -1; + HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n", + LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3 + "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME4); + } + + goto done; + } + else if (!HDstrncmp(name, + LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2 + "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME, + strlen(LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2 + "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME) + + 1) && + (counter_val == 1 || counter_val == 15 || counter_val == 17 || counter_val == 31)) { + if (H5L_TYPE_HARD != info->type) { + ret_val = -1; + HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n", + LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2 + "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME); + } + + goto done; + } + else if (!HDstrncmp(name, + LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3 + "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME2, + strlen(LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3 + "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME2) + + 1) && + (counter_val == 5 || counter_val == 11 || counter_val == 21 || counter_val == 27)) { + if (H5L_TYPE_HARD != info->type) { + ret_val = -1; + HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n", + LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3 + "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME2); + } + + goto done; + } + else if (!HDstrncmp(name, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2, + strlen(LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2) + 1) && + (counter_val == 0 || counter_val == 12 || counter_val == 16 || counter_val == 28)) { + if (H5L_TYPE_HARD != info->type) { + ret_val = -1; + HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n", + LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2); + } + + goto done; + } + else if (!HDstrncmp(name, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3, + strlen(LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3) + 1) && + (counter_val == 4 || counter_val == 8 || counter_val == 20 || counter_val == 24)) { + if (H5L_TYPE_HARD != info->type) { + ret_val = -1; + HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n", + LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3); + } + + goto done; + } + + HDprintf(" link name '%s' didn't match known names or came in an incorrect order\n", name); + + ret_val = -1; + +done: + (*i)++; + + return ret_val; +} +#endif + +/* + * Link visiting callback for the hard links + cycles test which + * iterates recursively through all of the links in the test group and + * checks to make sure their names and link classes match what is expected. + */ +static herr_t +link_visit_hard_links_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data) +{ + hbool_t is_subgroup_link; + size_t *i = (size_t *)op_data; + size_t counter_val = *((size_t *)op_data); + size_t test_iteration; + size_t subgroup_number; + size_t link_idx_val; + char expected_link_name[LINK_VISIT_HARD_LINKS_CYCLE_TEST_BUF_SIZE]; + herr_t ret_val = H5_ITER_CONT; + + UNUSED(group_id); + UNUSED(op_data); + + if (H5L_TYPE_HARD != info->type) { + ret_val = H5_ITER_ERROR; + HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n", name); + goto done; + } + + /* + * Four tests are run in the following order per link visiting API call: + * + * - visitation by link name in increasing order + * - visitation by link name in decreasing order + * - visitation by link creation order in increasing order + * - visitation by link creation order in decreasing order + * + * Based on how the test is written, this will mean that the link and group + * names will run in increasing order on the first and fourth tests and decreasing + * order on the second and third tests. + */ + test_iteration = counter_val / LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST; + + /* Determine which subgroup is currently being processed */ + subgroup_number = + /* Take the current counter value modulo the total number of links per test iteration (links + + subgroups) */ + (counter_val % LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) + /* and divide it by the number of links per subgroup + 1 to get the subgroup's index number. */ + / (LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1); + + /* Determine whether the current link points to the current subgroup itself */ + is_subgroup_link = (counter_val % (LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1) == 0); + if (!is_subgroup_link) { + /* Determine the index number of this link within its containing subgroup */ + link_idx_val = + /* Take the current counter value modulo the total number of links per test iteration (links + + subgroups) */ + (counter_val % LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) + /* and take it modulo the number of links per subgroup + 1, finally subtracting 1 to get the + link's index number. */ + % (LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1) - + 1; + } + + if (test_iteration == 0 || test_iteration == 3) { + if (is_subgroup_link) { + HDsnprintf(expected_link_name, LINK_VISIT_HARD_LINKS_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_HARD_LINKS_CYCLE_TEST_NESTED_GRP_NAME "%d", (int)subgroup_number); + } + else { + HDsnprintf(expected_link_name, LINK_VISIT_HARD_LINKS_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_HARD_LINKS_CYCLE_TEST_NESTED_GRP_NAME + "%d" + "/" LINK_VISIT_HARD_LINKS_CYCLE_TEST_LINK_NAME "%d", + (int)subgroup_number, (int)link_idx_val); + } + } + else { + if (is_subgroup_link) { + HDsnprintf(expected_link_name, LINK_VISIT_HARD_LINKS_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_HARD_LINKS_CYCLE_TEST_NESTED_GRP_NAME "%d", + (int)(LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_SUBGROUPS - subgroup_number - 1)); + } + else { + HDsnprintf(expected_link_name, LINK_VISIT_HARD_LINKS_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_HARD_LINKS_CYCLE_TEST_NESTED_GRP_NAME + "%d" + "/" LINK_VISIT_HARD_LINKS_CYCLE_TEST_LINK_NAME "%d", + (int)(LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_SUBGROUPS - subgroup_number - 1), + (int)(LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP - link_idx_val - 1)); + } + } + + if (HDstrncmp(name, expected_link_name, LINK_VISIT_HARD_LINKS_CYCLE_TEST_BUF_SIZE)) { + HDprintf(" link name '%s' didn't match expected name '%s'\n", name, expected_link_name); + ret_val = H5_ITER_ERROR; + goto done; + } + +done: + (*i)++; + + return ret_val; +} + +/* + * Link visiting callback for the soft links + cycles test which + * iterates recursively through all of the links in the test group and + * checks to make sure their names and link classes match what is expected. + */ +static herr_t +link_visit_soft_links_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data) +{ + hbool_t is_subgroup_link; + size_t *i = (size_t *)op_data; + size_t counter_val = *((size_t *)op_data); + size_t test_iteration; + size_t subgroup_number; + size_t link_idx_val; + char expected_link_name[LINK_VISIT_SOFT_LINKS_CYCLE_TEST_BUF_SIZE]; + herr_t ret_val = H5_ITER_CONT; + + UNUSED(group_id); + UNUSED(op_data); + + /* Determine whether the current link points to the current subgroup itself */ + is_subgroup_link = (counter_val % (LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1) == 0); + + if (is_subgroup_link) { + if (H5L_TYPE_HARD != info->type) { + ret_val = H5_ITER_ERROR; + HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n", name); + goto done; + } + } + else { + if (H5L_TYPE_SOFT != info->type) { + ret_val = H5_ITER_ERROR; + HDprintf(" link type for link '%s' was not H5L_TYPE_SOFT!\n", name); + goto done; + } + } + + /* + * Four tests are run in the following order per link visiting API call: + * + * - visitation by link name in increasing order + * - visitation by link name in decreasing order + * - visitation by link creation order in increasing order + * - visitation by link creation order in decreasing order + * + * Based on how the test is written, this will mean that the link and group + * names will run in increasing order on the first and fourth tests and decreasing + * order on the second and third tests. + */ + test_iteration = counter_val / LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST; + + /* Determine which subgroup is currently being processed */ + subgroup_number = + /* Take the current counter value modulo the total number of links per test iteration (links + + subgroups) */ + (counter_val % LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) + /* and divide it by the number of links per subgroup + 1 to get the subgroup's index number. */ + / (LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1); + + if (!is_subgroup_link) { + /* Determine the index number of this link within its containing subgroup */ + link_idx_val = + /* Take the current counter value modulo the total number of links per test iteration (links + + subgroups) */ + (counter_val % LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) + /* and take it modulo the number of links per subgroup + 1, finally subtracting 1 to get the + link's index number. */ + % (LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1) - + 1; + } + + if (test_iteration == 0 || test_iteration == 3) { + if (is_subgroup_link) { + HDsnprintf(expected_link_name, LINK_VISIT_SOFT_LINKS_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NESTED_GRP_NAME "%d", (int)subgroup_number); + } + else { + HDsnprintf(expected_link_name, LINK_VISIT_SOFT_LINKS_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NESTED_GRP_NAME + "%d" + "/" LINK_VISIT_SOFT_LINKS_CYCLE_TEST_LINK_NAME "%d", + (int)subgroup_number, (int)link_idx_val); + } + } + else { + if (is_subgroup_link) { + HDsnprintf(expected_link_name, LINK_VISIT_SOFT_LINKS_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NESTED_GRP_NAME "%d", + (int)(LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_SUBGROUPS - subgroup_number - 1)); + } + else { + HDsnprintf(expected_link_name, LINK_VISIT_SOFT_LINKS_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NESTED_GRP_NAME + "%d" + "/" LINK_VISIT_SOFT_LINKS_CYCLE_TEST_LINK_NAME "%d", + (int)(LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_SUBGROUPS - subgroup_number - 1), + (int)(LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP - link_idx_val - 1)); + } + } + + if (HDstrncmp(name, expected_link_name, LINK_VISIT_SOFT_LINKS_CYCLE_TEST_BUF_SIZE)) { + HDprintf(" link name '%s' didn't match expected name '%s'\n", name, expected_link_name); + ret_val = H5_ITER_ERROR; + goto done; + } + +done: + (*i)++; + + return ret_val; +} + +/* + * Link visiting callback for the external links + cycles test which + * iterates recursively through all of the links in the test group and + * checks to make sure their names and link classes match what is expected. + */ +#ifndef NO_EXTERNAL_LINKS +static herr_t +link_visit_external_links_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data) +{ + hbool_t is_subgroup_link; + size_t *i = (size_t *)op_data; + size_t counter_val = *((size_t *)op_data); + size_t test_iteration; + size_t subgroup_number; + size_t link_idx_val; + char expected_link_name[LINK_VISIT_EXT_LINKS_CYCLE_TEST_BUF_SIZE]; + herr_t ret_val = H5_ITER_CONT; + + UNUSED(group_id); + UNUSED(op_data); + + /* Determine whether the current link points to the current subgroup itself */ + is_subgroup_link = (counter_val % (LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1) == 0); + + if (is_subgroup_link) { + if (H5L_TYPE_HARD != info->type) { + ret_val = H5_ITER_ERROR; + HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n", name); + goto done; + } + } + else { + if (H5L_TYPE_EXTERNAL != info->type) { + ret_val = H5_ITER_ERROR; + HDprintf(" link type for link '%s' was not H5L_TYPE_EXTERNAL!\n", name); + goto done; + } + } + + /* + * Four tests are run in the following order per link visiting API call: + * + * - visitation by link name in increasing order + * - visitation by link name in decreasing order + * - visitation by link creation order in increasing order + * - visitation by link creation order in decreasing order + * + * Based on how the test is written, this will mean that the link and group + * names will run in increasing order on the first and fourth tests and decreasing + * order on the second and third tests. + */ + test_iteration = counter_val / LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST; + + /* Determine which subgroup is currently being processed */ + subgroup_number = + /* Take the current counter value modulo the total number of links per test iteration (links + + subgroups) */ + (counter_val % LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) + /* and divide it by the number of links per subgroup + 1 to get the subgroup's index number. */ + / (LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1); + + if (!is_subgroup_link) { + /* Determine the index number of this link within its containing subgroup */ + link_idx_val = + /* Take the current counter value modulo the total number of links per test iteration (links + + subgroups) */ + (counter_val % LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) + /* and take it modulo the number of links per subgroup + 1, finally subtracting 1 to get the + link's index number. */ + % (LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1) - + 1; + } + + if (test_iteration == 0 || test_iteration == 3) { + if (is_subgroup_link) { + HDsnprintf(expected_link_name, LINK_VISIT_EXT_LINKS_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_EXT_LINKS_CYCLE_TEST_NESTED_GRP_NAME "%d", (int)subgroup_number); + } + else { + HDsnprintf(expected_link_name, LINK_VISIT_EXT_LINKS_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_EXT_LINKS_CYCLE_TEST_NESTED_GRP_NAME + "%d" + "/" LINK_VISIT_EXT_LINKS_CYCLE_TEST_LINK_NAME "%d", + (int)subgroup_number, (int)link_idx_val); + } + } + else { + if (is_subgroup_link) { + HDsnprintf(expected_link_name, LINK_VISIT_EXT_LINKS_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_EXT_LINKS_CYCLE_TEST_NESTED_GRP_NAME "%d", + (int)(LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_SUBGROUPS - subgroup_number - 1)); + } + else { + HDsnprintf(expected_link_name, LINK_VISIT_EXT_LINKS_CYCLE_TEST_BUF_SIZE, + LINK_VISIT_EXT_LINKS_CYCLE_TEST_NESTED_GRP_NAME + "%d" + "/" LINK_VISIT_EXT_LINKS_CYCLE_TEST_LINK_NAME "%d", + (int)(LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_SUBGROUPS - subgroup_number - 1), + (int)(LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP - link_idx_val - 1)); + } + } + + if (HDstrncmp(name, expected_link_name, LINK_VISIT_EXT_LINKS_CYCLE_TEST_BUF_SIZE)) { + HDprintf(" link name '%s' didn't match expected name '%s'\n", name, expected_link_name); + ret_val = H5_ITER_ERROR; + goto done; + } + +done: + (*i)++; + + return ret_val; +} +#endif +#ifndef NO_USER_DEFINED_LINKS +static herr_t link_visit_ud_links_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info, + void *op_data); +#endif +/* + * Link visiting callback for the mixed link types + cycles test which + * iterates recursively through all of the links in the test group and + * checks to make sure their names and link classes match what is expected. + */ +#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS) +static herr_t +link_visit_mixed_links_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data) +{ + size_t *i = (size_t *)op_data; + size_t counter_val = *((size_t *)op_data); + herr_t ret_val = 0; + + UNUSED(group_id); + UNUSED(op_data); + + if (!HDstrncmp(name, + LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2 + "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME1, + strlen(LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2 + "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME1) + + 1) && + (counter_val == 1 || counter_val == 11 || counter_val == 13 || counter_val == 23)) { + if (H5L_TYPE_HARD != info->type) { + ret_val = -1; + HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n", + LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2 + "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME1); + } + + goto done; + } + else if (!HDstrncmp(name, + LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2 + "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME2, + strlen(LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2 + "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME2) + + 1) && + (counter_val == 2 || counter_val == 10 || counter_val == 14 || counter_val == 22)) { + if (H5L_TYPE_SOFT != info->type) { + ret_val = -1; + HDprintf(" link type for link '%s' was not H5L_TYPE_SOFT!\n", + LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2 + "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME2); + } + + goto done; + } + else if (!HDstrncmp(name, + LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME3 + "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME3, + strlen(LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME3 + "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME3) + + 1) && + (counter_val == 4 || counter_val == 8 || counter_val == 16 || counter_val == 20)) { + if (H5L_TYPE_EXTERNAL != info->type) { + ret_val = -1; + HDprintf(" link type for link '%s' was not H5L_TYPE_EXTERNAL!\n", + LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME3 + "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME3); + } + + goto done; + } + else if (!HDstrncmp(name, + LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME3 + "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME4, + strlen(LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME3 + "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME4) + + 1) && + (counter_val == 5 || counter_val == 7 || counter_val == 17 || counter_val == 19)) { + if (H5L_TYPE_HARD != info->type) { + ret_val = -1; + HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n", + LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME3 + "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME4); + } + + goto done; + } + else if (!HDstrncmp(name, LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2, + strlen(LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2) + 1) && + (counter_val == 0 || counter_val == 9 || counter_val == 12 || counter_val == 21)) { + if (H5L_TYPE_HARD != info->type) { + ret_val = -1; + HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n", + LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2); + } + + goto done; + } + else if (!HDstrncmp(name, LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME3, + strlen(LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME3) + 1) && + (counter_val == 3 || counter_val == 6 || counter_val == 15 || counter_val == 18)) { + if (H5L_TYPE_HARD != info->type) { + ret_val = -1; + HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n", + LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME3); + } + + goto done; + } + + HDprintf(" link name '%s' didn't match known names or came in an incorrect order\n", name); + + ret_val = -1; + +done: + (*i)++; + + return ret_val; +} +#endif + +/* + * Link visiting callback for the H5Lvisit(_by_name)2 invalid + * parameters test which simply does nothing. + */ +static herr_t +link_visit_invalid_params_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data) +{ + UNUSED(group_id); + UNUSED(name); + UNUSED(info); + UNUSED(op_data); + + return 0; +} + +/* + * Link visiting callback for the 0 links visiting test which + * simply does nothing. + */ +static herr_t +link_visit_0_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data) +{ + UNUSED(group_id); + UNUSED(name); + UNUSED(info); + UNUSED(op_data); + + return 0; +} + +/* + * Cleanup temporary test files + */ +static void +cleanup_files(void) +{ + H5Fdelete(EXTERNAL_LINK_TEST_FILE_NAME, H5P_DEFAULT); + H5Fdelete(EXTERNAL_LINK_INVALID_PARAMS_TEST_FILE_NAME, H5P_DEFAULT); +} + +int +H5_api_link_test(void) +{ + size_t i; + int nerrors; + + HDprintf("**********************************************\n"); + HDprintf("* *\n"); + HDprintf("* API Link Tests *\n"); + HDprintf("* *\n"); + HDprintf("**********************************************\n\n"); + + for (i = 0, nerrors = 0; i < ARRAY_LENGTH(link_tests); i++) { + nerrors += (*link_tests[i])() ? 1 : 0; + } + + HDprintf("\n"); + + HDprintf("Cleaning up testing files\n"); + cleanup_files(); + + return nerrors; +} diff --git a/test/API/H5_api_link_test.h b/test/API/H5_api_link_test.h new file mode 100644 index 00000000000..e1615175112 --- /dev/null +++ b/test/API/H5_api_link_test.h @@ -0,0 +1,437 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#ifndef H5_API_LINK_TEST_H +#define H5_API_LINK_TEST_H + +#include "H5_api_test.h" + +int H5_api_link_test(void); + +/********************************************* + * * + * API Link test defines * + * * + *********************************************/ + +#define HARD_LINK_TEST_GROUP_NAME "hard_link_creation_test" +#define HARD_LINK_TEST_LINK_NAME "hard_link" + +#define HARD_LINK_TEST_GROUP_LONG_NAME "hard_link_long_name" +#define MAX_NAME_LEN ((64 * 1024) + 1024) + +#define HARD_LINK_TEST_GROUP_MANY_NAME "hard_link_many_name" +#define HARD_LINK_TEST_GROUP_MANY_FINAL_NAME "hard_link_final" +#define HARD_LINK_TEST_GROUP_MANY_NAME_BUF_SIZE 1024 + +#define H5L_SAME_LOC_TEST_GROUP_NAME "h5l_same_loc_test_group" +#define H5L_SAME_LOC_TEST_LINK_NAME1 "h5l_same_loc_test_link1" +#define H5L_SAME_LOC_TEST_LINK_NAME2 "h5l_same_loc_test_link2" + +#define HARD_LINK_INVALID_PARAMS_TEST_GROUP_NAME "hard_link_creation_invalid_params_test" +#define HARD_LINK_INVALID_PARAMS_TEST_LINK_NAME "hard_link" + +#define SOFT_LINK_EXISTING_RELATIVE_TEST_SUBGROUP_NAME "soft_link_to_existing_relative_path_test" +#define SOFT_LINK_EXISTING_RELATIVE_TEST_OBJECT_NAME "group" +#define SOFT_LINK_EXISTING_RELATIVE_TEST_LINK_NAME "soft_link_to_existing_relative_path" + +#define SOFT_LINK_EXISTING_ABSOLUTE_TEST_SUBGROUP_NAME "soft_link_to_existing_absolute_path_test" +#define SOFT_LINK_EXISTING_ABSOLUTE_TEST_LINK_NAME "soft_link_to_existing_absolute_path" + +#define SOFT_LINK_DANGLING_RELATIVE_TEST_SUBGROUP_NAME "soft_link_dangling_relative_path_test" +#define SOFT_LINK_DANGLING_RELATIVE_TEST_OBJECT_NAME "group" +#define SOFT_LINK_DANGLING_RELATIVE_TEST_LINK_NAME "soft_link_dangling_relative_path" + +#define SOFT_LINK_DANGLING_ABSOLUTE_TEST_SUBGROUP_NAME "soft_link_dangling_absolute_path_test" +#define SOFT_LINK_DANGLING_ABSOLUTE_TEST_OBJECT_NAME "group" +#define SOFT_LINK_DANGLING_ABSOLUTE_TEST_LINK_NAME "soft_link_dangling_absolute_path" + +#define SOFT_LINK_TEST_GROUP_LONG_NAME "soft_link_long_name" +#define SOFT_LINK_TEST_LONG_OBJECT_NAME "soft_link_object_name" + +#define SOFT_LINK_TEST_GROUP_MANY_NAME "soft_link_many_name" +#define SOFT_LINK_TEST_GROUP_MANY_FINAL_NAME "soft_link_final" +#define SOFT_LINK_TEST_GROUP_MANY_NAME_BUF_SIZE 1024 + +#define SOFT_LINK_INVALID_PARAMS_TEST_GROUP_NAME "soft_link_creation_invalid_params_test" +#define SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME "soft_link_to_root" + +#define EXTERNAL_LINK_TEST_SUBGROUP_NAME "external_link_test" +#define EXTERNAL_LINK_TEST_FILE_NAME "ext_link_file.h5" +#define EXTERNAL_LINK_TEST_LINK_NAME "ext_link" + +#define EXTERNAL_LINK_TEST_DANGLING_SUBGROUP_NAME "external_link_dangling_test" +#define EXTERNAL_LINK_TEST_DANGLING_LINK_NAME "dangling_ext_link" +#define EXTERNAL_LINK_TEST_DANGLING_OBJECT_NAME "external_group" + +#define EXTERNAL_LINK_TEST_MULTI_NAME "external_link_multi_test" +#define EXTERNAL_LINK_TEST_MULTI_NAME_BUF_SIZE 1024 +#define EXTERNAL_LINK_TEST_FILE_NAME2 "ext_link_file_2.h5" +#define EXTERNAL_LINK_TEST_FILE_NAME3 "ext_link_file_3.h5" +#define EXTERNAL_LINK_TEST_FILE_NAME4 "ext_link_file_4.h5" + +#define EXTERNAL_LINK_TEST_PING_PONG_NAME1 "ext_link_file_ping_pong_1.h5" +#define EXTERNAL_LINK_TEST_PING_PONG_NAME2 "ext_link_file_ping_pong_2.h5" +#define EXTERNAL_LINK_TEST_PING_PONG_NAME_BUF_SIZE 1024 + +#define EXTERNAL_LINK_INVALID_PARAMS_TEST_GROUP_NAME "external_link_creation_invalid_params_test" +#define EXTERNAL_LINK_INVALID_PARAMS_TEST_FILE_NAME "ext_link_invalid_params_file.h5" +#define EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME "external_link" + +#define UD_LINK_TEST_UDATA_MAX_SIZE 256 +#define UD_LINK_TEST_GROUP_NAME "ud_link_creation_test" +#define UD_LINK_TEST_LINK_NAME "ud_link" + +#define UD_LINK_INVALID_PARAMS_TEST_UDATA_MAX_SIZE 256 +#define UD_LINK_INVALID_PARAMS_TEST_GROUP_NAME "ud_link_creation_invalid_params_test" +#define UD_LINK_INVALID_PARAMS_TEST_LINK_NAME "ud_link" + +#define LINK_DELETE_TEST_NESTED_GRP_NAME "nested_grp" +#define LINK_DELETE_TEST_HARD_LINK_NAME "hard_link" +#define LINK_DELETE_TEST_NESTED_HARD_LINK_NAME \ + LINK_DELETE_TEST_NESTED_GRP_NAME "/" LINK_DELETE_TEST_HARD_LINK_NAME +#define LINK_DELETE_TEST_HARD_LINK_NAME2 LINK_DELETE_TEST_HARD_LINK_NAME "2" +#define LINK_DELETE_TEST_HARD_LINK_NAME3 LINK_DELETE_TEST_HARD_LINK_NAME "3" +#define LINK_DELETE_TEST_SOFT_LINK_NAME "soft_link" +#define LINK_DELETE_TEST_SOFT_LINK_NAME2 LINK_DELETE_TEST_SOFT_LINK_NAME "2" +#define LINK_DELETE_TEST_SOFT_LINK_NAME3 LINK_DELETE_TEST_SOFT_LINK_NAME "3" +#define LINK_DELETE_TEST_EXTERNAL_LINK_NAME "external_link" +#define LINK_DELETE_TEST_EXTERNAL_LINK_NAME2 LINK_DELETE_TEST_EXTERNAL_LINK_NAME "2" +#define LINK_DELETE_TEST_EXTERNAL_LINK_NAME3 LINK_DELETE_TEST_EXTERNAL_LINK_NAME "3" +#define LINK_DELETE_TEST_SUBGROUP_NAME "link_delete_test" +#define LINK_DELETE_TEST_SUBGROUP1_NAME "H5Ldelete_hard_link" +#define LINK_DELETE_TEST_NESTED_SUBGROUP_NAME1 "H5Ldelete_nested_hard_link" +#define LINK_DELETE_TEST_SUBGROUP2_NAME "H5Ldelete_soft_link" +#define LINK_DELETE_TEST_SUBGROUP3_NAME "H5Ldelete_external_link" +#define LINK_DELETE_TEST_SUBGROUP4_NAME "H5Ldelete_ud_link" +#define LINK_DELETE_TEST_SUBGROUP5_NAME "H5Ldelete_by_idx_hard_link_crt_order_increasing" +#define LINK_DELETE_TEST_SUBGROUP6_NAME "H5Ldelete_by_idx_hard_link_crt_order_decreasing" +#define LINK_DELETE_TEST_SUBGROUP7_NAME "H5Ldelete_by_idx_hard_link_name_order_increasing" +#define LINK_DELETE_TEST_SUBGROUP8_NAME "H5Ldelete_by_idx_hard_link_name_order_decreasing" +#define LINK_DELETE_TEST_SUBGROUP9_NAME "H5Ldelete_by_idx_soft_link_crt_order_increasing" +#define LINK_DELETE_TEST_SUBGROUP10_NAME "H5Ldelete_by_idx_soft_link_crt_order_decreasing" +#define LINK_DELETE_TEST_SUBGROUP11_NAME "H5Ldelete_by_idx_soft_link_name_order_increasing" +#define LINK_DELETE_TEST_SUBGROUP12_NAME "H5Ldelete_by_idx_soft_link_name_order_decreasing" +#define LINK_DELETE_TEST_SUBGROUP13_NAME "H5Ldelete_by_idx_external_link_crt_order_increasing" +#define LINK_DELETE_TEST_SUBGROUP14_NAME "H5Ldelete_by_idx_external_link_crt_order_decreasing" +#define LINK_DELETE_TEST_SUBGROUP15_NAME "H5Ldelete_by_idx_external_link_name_order_increasing" +#define LINK_DELETE_TEST_SUBGROUP16_NAME "H5Ldelete_by_idx_external_link_name_order_decreasing" +#define LINK_DELETE_TEST_SUBGROUP17_NAME "H5Ldelete_by_idx_ud_link_crt_order_increasing" +#define LINK_DELETE_TEST_SUBGROUP18_NAME "H5Ldelete_by_idx_ud_link_crt_order_decreasing" +#define LINK_DELETE_TEST_SUBGROUP19_NAME "H5Ldelete_by_idx_ud_link_name_order_increasing" +#define LINK_DELETE_TEST_SUBGROUP20_NAME "H5Ldelete_by_idx_ud_link_name_order_decreasing" + +#define LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_SUBGROUP_NAME "H5Ldelete_reset_grp_max_crt_order_test" +#define LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_SUBGROUP1_NAME "H5Ldelete_bottom_up" +#define LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_SUBGROUP2_NAME "H5Ldelete_top_down" +#define LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS 5 +#define LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_BUF_SIZE 1024 + +#define LINK_DELETE_INVALID_PARAMS_TEST_HARD_LINK_NAME "hard_link" +#define LINK_DELETE_INVALID_PARAMS_TEST_GROUP_NAME "link_deletion_invalid_params_test" + +#define COPY_LINK_TEST_LINK_VAL_BUF_SIZE 1024 +#define COPY_LINK_TEST_EXTERNAL_LINK_NAME "external_link" +#define COPY_LINK_TEST_EXTERNAL_LINK_NAME2 COPY_LINK_TEST_EXTERNAL_LINK_NAME "2" +#define COPY_LINK_TEST_EXTERNAL_LINK_NAME3 COPY_LINK_TEST_EXTERNAL_LINK_NAME "3" +#define COPY_LINK_TEST_EXTERNAL_LINK_COPY_NAME "external_link_copy" +#define COPY_LINK_TEST_EXTERNAL_LINK_COPY_NAME2 COPY_LINK_TEST_EXTERNAL_LINK_COPY_NAME "2" +#define COPY_LINK_TEST_EXTERNAL_LINK_SAME_LOC_NAME "external_link_same_loc" +#define COPY_LINK_TEST_EXTERNAL_LINK_SAME_LOC_NAME2 COPY_LINK_TEST_EXTERNAL_LINK_SAME_LOC_NAME "2" +#define COPY_LINK_TEST_HARD_LINK_NAME "hard_link" +#define COPY_LINK_TEST_HARD_LINK_NAME2 COPY_LINK_TEST_HARD_LINK_NAME "2" +#define COPY_LINK_TEST_HARD_LINK_NAME3 COPY_LINK_TEST_HARD_LINK_NAME "3" +#define COPY_LINK_TEST_HARD_LINK_COPY_NAME "hard_link_copy" +#define COPY_LINK_TEST_HARD_LINK_COPY_NAME2 COPY_LINK_TEST_HARD_LINK_COPY_NAME "2" +#define COPY_LINK_TEST_HARD_LINK_SAME_LOC_NAME "hard_link_same_loc" +#define COPY_LINK_TEST_HARD_LINK_SAME_LOC_NAME2 COPY_LINK_TEST_HARD_LINK_SAME_LOC_NAME "2" +#define COPY_LINK_TEST_SOFT_LINK_TARGET_PATH "/" LINK_TEST_GROUP_NAME "/" COPY_LINK_TEST_SUBGROUP_NAME +#define COPY_LINK_TEST_SOFT_LINK_NAME "soft_link" +#define COPY_LINK_TEST_SOFT_LINK_NAME2 COPY_LINK_TEST_SOFT_LINK_NAME "2" +#define COPY_LINK_TEST_SOFT_LINK_NAME3 COPY_LINK_TEST_SOFT_LINK_NAME "3" +#define COPY_LINK_TEST_SOFT_LINK_COPY_NAME "soft_link_copy" +#define COPY_LINK_TEST_SOFT_LINK_COPY_NAME2 COPY_LINK_TEST_SOFT_LINK_COPY_NAME "2" +#define COPY_LINK_TEST_SOFT_LINK_SAME_LOC_NAME "soft_link_same_loc" +#define COPY_LINK_TEST_SOFT_LINK_SAME_LOC_NAME2 COPY_LINK_TEST_SOFT_LINK_SAME_LOC_NAME "2" +#define COPY_LINK_TEST_SRC_GROUP_NAME "src_group" +#define COPY_LINK_TEST_DST_GROUP_NAME "dst_group" +#define COPY_LINK_TEST_SUBGROUP_NAME "link_copy_test" + +#define COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_COPY_NAME "hard_link_copy" +#define COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME "hard_link" +#define COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NEW_NAME "hard_link_new" +#define COPY_LINK_INVALID_PARAMS_TEST_SRC_GROUP_NAME "src_group" +#define COPY_LINK_INVALID_PARAMS_TEST_DST_GROUP_NAME "dst_group" +#define COPY_LINK_INVALID_PARAMS_TEST_SUBGROUP_NAME "link_copy_invalid_params_test" + +#define MOVE_LINK_TEST_LINK_VAL_BUF_SIZE 1024 +#define MOVE_LINK_TEST_EXTERN_LINK_NAME "extern_link" +#define MOVE_LINK_TEST_EXTERN_LINK_NAME2 MOVE_LINK_TEST_EXTERN_LINK_NAME "2" +#define MOVE_LINK_TEST_EXTERN_LINK_NAME3 MOVE_LINK_TEST_EXTERN_LINK_NAME "3" +#define MOVE_LINK_TEST_EXTERN_LINK_NAME4 MOVE_LINK_TEST_EXTERN_LINK_NAME "4" +#define MOVE_LINK_TEST_EXTERN_LINK_NEW_NAME "extern_link_renamed" +#define MOVE_LINK_TEST_EXTERN_LINK_SAME_LOC_NAME "extern_link_same_loc" +#define MOVE_LINK_TEST_HARD_LINK_NAME "hard_link" +#define MOVE_LINK_TEST_HARD_LINK_NAME2 MOVE_LINK_TEST_HARD_LINK_NAME "2" +#define MOVE_LINK_TEST_HARD_LINK_NAME3 MOVE_LINK_TEST_HARD_LINK_NAME "3" +#define MOVE_LINK_TEST_HARD_LINK_NAME4 MOVE_LINK_TEST_HARD_LINK_NAME "4" +#define MOVE_LINK_TEST_HARD_LINK_NEW_NAME "hard_link_renamed" +#define MOVE_LINK_TEST_HARD_LINK_SAME_LOC_NAME "hard_link_same_loc" +#define MOVE_LINK_TEST_SOFT_LINK_TARGET_PATH "/" LINK_TEST_GROUP_NAME "/" MOVE_LINK_TEST_SUBGROUP_NAME +#define MOVE_LINK_TEST_SOFT_LINK_NAME "soft_link" +#define MOVE_LINK_TEST_SOFT_LINK_NAME2 MOVE_LINK_TEST_SOFT_LINK_NAME "2" +#define MOVE_LINK_TEST_SOFT_LINK_NAME3 MOVE_LINK_TEST_SOFT_LINK_NAME "3" +#define MOVE_LINK_TEST_SOFT_LINK_NAME4 MOVE_LINK_TEST_SOFT_LINK_NAME "4" +#define MOVE_LINK_TEST_SOFT_LINK_NEW_NAME "soft_link_renamed" +#define MOVE_LINK_TEST_SOFT_LINK_SAME_LOC_NAME "soft_link_same_loc" +#define MOVE_LINK_TEST_SRC_GROUP_NAME "src_group" +#define MOVE_LINK_TEST_DST_GROUP_NAME "dst_group" +#define MOVE_LINK_TEST_SUBGROUP_NAME "link_move_test" + +#define MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_SUBGROUP_NAME "link_move_into_group_with_links_test" +#define MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_SRC_GRP_NAME "source_group" +#define MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_DST_GRP_NAME "dest_group" +#define MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_NUM_LINKS 5 +#define MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_BUF_SIZE 1024 + +#define MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_SUBGROUP_NAME "H5Lmove_reset_grp_max_crt_order_test" +#define MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_SRC_GRP_NAME "source_group" +#define MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_DST_GRP_NAME "dest_group" +#define MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS 5 +#define MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_BUF_SIZE 1024 + +#define MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME "hard_link" +#define MOVE_LINK_INVALID_PARAMS_TEST_SRC_GROUP_NAME "src_grp" +#define MOVE_LINK_INVALID_PARAMS_TEST_DST_GROUP_NAME "dst_grp" +#define MOVE_LINK_INVALID_PARAMS_TEST_SUBGROUP_NAME "link_move_invalid_params_test" + +#define GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE 1024 +#define GET_LINK_VAL_TEST_SUBGROUP_NAME "get_link_val_test" +#define GET_LINK_VAL_TEST_SOFT_LINK_NAME "soft_link" +#define GET_LINK_VAL_TEST_SOFT_LINK_NAME2 GET_LINK_VAL_TEST_SOFT_LINK_NAME "2" +#define GET_LINK_VAL_TEST_SOFT_LINK_NAME3 GET_LINK_VAL_TEST_SOFT_LINK_NAME "3" +#define GET_LINK_VAL_TEST_EXT_LINK_NAME "ext_link" +#define GET_LINK_VAL_TEST_EXT_LINK_NAME2 GET_LINK_VAL_TEST_EXT_LINK_NAME "2" +#define GET_LINK_VAL_TEST_EXT_LINK_NAME3 GET_LINK_VAL_TEST_EXT_LINK_NAME "3" +#define GET_LINK_VAL_TEST_SUBGROUP1_NAME "H5Lget_val_soft_link" +#define GET_LINK_VAL_TEST_SUBGROUP2_NAME "H5Lget_val_external_link" +#define GET_LINK_VAL_TEST_SUBGROUP3_NAME "H5Lget_val_ud_link" +#define GET_LINK_VAL_TEST_SUBGROUP4_NAME "H5Lget_val_by_idx_soft_link_crt_order_increasing" +#define GET_LINK_VAL_TEST_SUBGROUP5_NAME "H5Lget_val_by_idx_soft_link_crt_order_decreasing" +#define GET_LINK_VAL_TEST_SUBGROUP6_NAME "H5Lget_val_by_idx_soft_link_name_order_increasing" +#define GET_LINK_VAL_TEST_SUBGROUP7_NAME "H5Lget_val_by_idx_soft_link_name_order_decreasing" +#define GET_LINK_VAL_TEST_SUBGROUP8_NAME "H5Lget_val_by_idx_external_link_crt_order_increasing" +#define GET_LINK_VAL_TEST_SUBGROUP9_NAME "H5Lget_val_by_idx_external_link_crt_order_decreasing" +#define GET_LINK_VAL_TEST_SUBGROUP10_NAME "H5Lget_val_by_idx_external_link_name_order_increasing" +#define GET_LINK_VAL_TEST_SUBGROUP11_NAME "H5Lget_val_by_idx_external_link_name_order_decreasing" +#define GET_LINK_VAL_TEST_SUBGROUP12_NAME "H5Lget_val_by_idx_ud_link_crt_order_increasing" +#define GET_LINK_VAL_TEST_SUBGROUP13_NAME "H5Lget_val_by_idx_ud_link_crt_order_decreasing" +#define GET_LINK_VAL_TEST_SUBGROUP14_NAME "H5Lget_val_by_idx_ud_link_name_order_increasing" +#define GET_LINK_VAL_TEST_SUBGROUP15_NAME "H5Lget_val_by_idx_ud_link_name_order_decreasing" + +#define GET_LINK_VAL_INVALID_PARAMS_TEST_SOFT_LINK_NAME "soft_link" +#define GET_LINK_VAL_INVALID_PARAMS_TEST_GROUP_NAME "get_link_val_invalid_params_test" + +#define GET_LINK_INFO_TEST_HARD_LINK_NAME "hard_link" +#define GET_LINK_INFO_TEST_HARD_LINK_NAME2 GET_LINK_INFO_TEST_HARD_LINK_NAME "2" +#define GET_LINK_INFO_TEST_HARD_LINK_NAME3 GET_LINK_INFO_TEST_HARD_LINK_NAME "3" +#define GET_LINK_INFO_TEST_SOFT_LINK_NAME "soft_link" +#define GET_LINK_INFO_TEST_SOFT_LINK_NAME2 GET_LINK_INFO_TEST_SOFT_LINK_NAME "2" +#define GET_LINK_INFO_TEST_SOFT_LINK_NAME3 GET_LINK_INFO_TEST_SOFT_LINK_NAME "3" +#define GET_LINK_INFO_TEST_EXT_LINK_NAME "ext_link" +#define GET_LINK_INFO_TEST_EXT_LINK_NAME2 GET_LINK_INFO_TEST_EXT_LINK_NAME "2" +#define GET_LINK_INFO_TEST_EXT_LINK_NAME3 GET_LINK_INFO_TEST_EXT_LINK_NAME "3" +#define GET_LINK_INFO_TEST_GROUP_NAME "get_link_info_test" +#define GET_LINK_INFO_TEST_SUBGROUP1_NAME "H5Lget_info_hard_link" +#define GET_LINK_INFO_TEST_SUBGROUP2_NAME "H5Lget_info_soft_link" +#define GET_LINK_INFO_TEST_SUBGROUP3_NAME "H5Lget_info_external_link" +#define GET_LINK_INFO_TEST_SUBGROUP4_NAME "H5Lget_info_ud_link" +#define GET_LINK_INFO_TEST_SUBGROUP5_NAME "H5Lget_info_by_idx_hard_link_crt_order_increasing" +#define GET_LINK_INFO_TEST_SUBGROUP6_NAME "H5Lget_info_by_idx_hard_link_crt_order_decreasing" +#define GET_LINK_INFO_TEST_SUBGROUP7_NAME "H5Lget_info_by_idx_hard_link_name_order_increasing" +#define GET_LINK_INFO_TEST_SUBGROUP8_NAME "H5Lget_info_by_idx_hard_link_name_order_decreasing" +#define GET_LINK_INFO_TEST_SUBGROUP9_NAME "H5Lget_info_by_idx_soft_link_crt_order_increasing" +#define GET_LINK_INFO_TEST_SUBGROUP10_NAME "H5Lget_info_by_idx_soft_link_crt_order_decreasing" +#define GET_LINK_INFO_TEST_SUBGROUP11_NAME "H5Lget_info_by_idx_soft_link_name_order_increasing" +#define GET_LINK_INFO_TEST_SUBGROUP12_NAME "H5Lget_info_by_idx_soft_link_name_order_decreasing" +#define GET_LINK_INFO_TEST_SUBGROUP13_NAME "H5Lget_info_by_idx_external_link_crt_order_increasing" +#define GET_LINK_INFO_TEST_SUBGROUP14_NAME "H5Lget_info_by_idx_external_link_crt_order_decreasing" +#define GET_LINK_INFO_TEST_SUBGROUP15_NAME "H5Lget_info_by_idx_external_link_name_order_increasing" +#define GET_LINK_INFO_TEST_SUBGROUP16_NAME "H5Lget_info_by_idx_external_link_name_order_decreasing" +#define GET_LINK_INFO_TEST_SUBGROUP17_NAME "H5Lget_info_by_idx_ud_link_crt_order_increasing" +#define GET_LINK_INFO_TEST_SUBGROUP18_NAME "H5Lget_info_by_idx_ud_link_crt_order_decreasing" +#define GET_LINK_INFO_TEST_SUBGROUP19_NAME "H5Lget_info_by_idx_ud_link_name_order_increasing" +#define GET_LINK_INFO_TEST_SUBGROUP20_NAME "H5Lget_info_by_idx_ud_link_name_order_decreasing" + +#define GET_LINK_INFO_INVALID_PARAMS_TEST_HARD_LINK_NAME "hard_link" +#define GET_LINK_INFO_INVALID_PARAMS_TEST_GROUP_NAME "get_link_info_invalid_params_test" + +#define GET_LINK_NAME_TEST_EXTERNAL_SUBGROUP_NAME "get_external_link_name_crt_order_increasing" +#define GET_LINK_NAME_TEST_EXTERNAL_SUBGROUP_NAME2 "get_external_link_name_crt_order_decreasing" +#define GET_LINK_NAME_TEST_EXTERNAL_SUBGROUP_NAME3 "get_external_link_name_alpha_order_increasing" +#define GET_LINK_NAME_TEST_EXTERNAL_SUBGROUP_NAME4 "get_external_link_name_alpha_order_decreasing" +#define GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME "external_link" +#define GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2 GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME "2" +#define GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3 GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME "3" +#define GET_LINK_NAME_TEST_HARD_SUBGROUP_NAME "get_hard_link_name_crt_order_increasing" +#define GET_LINK_NAME_TEST_HARD_SUBGROUP_NAME2 "get_hard_link_name_crt_order_decreasing" +#define GET_LINK_NAME_TEST_HARD_SUBGROUP_NAME3 "get_hard_link_name_alpha_order_increasing" +#define GET_LINK_NAME_TEST_HARD_SUBGROUP_NAME4 "get_hard_link_name_alpha_order_decreasing" +#define GET_LINK_NAME_TEST_HARD_LINK_NAME "hard_link" +#define GET_LINK_NAME_TEST_HARD_LINK_NAME2 GET_LINK_NAME_TEST_HARD_LINK_NAME "2" +#define GET_LINK_NAME_TEST_HARD_LINK_NAME3 GET_LINK_NAME_TEST_HARD_LINK_NAME "3" +#define GET_LINK_NAME_TEST_SOFT_SUBGROUP_NAME "get_soft_link_name_crt_order_increasing" +#define GET_LINK_NAME_TEST_SOFT_SUBGROUP_NAME2 "get_soft_link_name_crt_order_decreasing" +#define GET_LINK_NAME_TEST_SOFT_SUBGROUP_NAME3 "get_soft_link_name_alpha_order_increasing" +#define GET_LINK_NAME_TEST_SOFT_SUBGROUP_NAME4 "get_soft_link_name_alpha_order_decreasing" +#define GET_LINK_NAME_TEST_SOFT_LINK_NAME "soft_link" +#define GET_LINK_NAME_TEST_SOFT_LINK_NAME2 GET_LINK_NAME_TEST_SOFT_LINK_NAME "2" +#define GET_LINK_NAME_TEST_SOFT_LINK_NAME3 GET_LINK_NAME_TEST_SOFT_LINK_NAME "3" +#define GET_LINK_NAME_TEST_GROUP_NAME "get_link_name_test" +#define GET_LINK_NAME_TEST_BUF_SIZE 256 + +#define GET_LINK_NAME_INVALID_PARAMS_TEST_HARD_LINK_NAME "test_link1" +#define GET_LINK_NAME_INVALID_PARAMS_TEST_GROUP_NAME "get_link_name_invalid_params_test" + +#define LINK_ITER_HARD_LINKS_TEST_DSET_SPACE_RANK 2 +#define LINK_ITER_HARD_LINKS_TEST_SUBGROUP_NAME "link_iter_hard_links_test" +#define LINK_ITER_HARD_LINKS_TEST_LINK_NAME "hard_link" +#define LINK_ITER_HARD_LINKS_TEST_NUM_LINKS 10 +#define LINK_ITER_HARD_LINKS_TEST_BUF_SIZE 64 + +#define LINK_ITER_SOFT_LINKS_TEST_SUBGROUP_NAME "link_iter_soft_links_test" +#define LINK_ITER_SOFT_LINKS_TEST_LINK_NAME "soft_link" +#define LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS 10 +#define LINK_ITER_SOFT_LINKS_TEST_BUF_SIZE 64 + +#define LINK_ITER_EXT_LINKS_TEST_SUBGROUP_NAME "link_iter_ext_links_test" +#define LINK_ITER_EXT_LINKS_TEST_LINK_NAME "external_link" +#define LINK_ITER_EXT_LINKS_TEST_NUM_LINKS 10 +#define LINK_ITER_EXT_LINKS_TEST_BUF_SIZE 64 + +#define LINK_ITER_MIXED_LINKS_TEST_DSET_SPACE_RANK 2 +#define LINK_ITER_MIXED_LINKS_TEST_HARD_LINK_NAME "hard_link1" +#define LINK_ITER_MIXED_LINKS_TEST_SOFT_LINK_NAME "soft_link1" +#define LINK_ITER_MIXED_LINKS_TEST_EXT_LINK_NAME "ext_link1" +#define LINK_ITER_MIXED_LINKS_TEST_SUBGROUP_NAME "link_iter_mixed_links_test" +#define LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS 3 + +#define LINK_ITER_INVALID_PARAMS_TEST_DSET_SPACE_RANK 2 +#define LINK_ITER_INVALID_PARAMS_TEST_HARD_LINK_NAME "hard_link1" +#define LINK_ITER_INVALID_PARAMS_TEST_SOFT_LINK_NAME "soft_link1" +#define LINK_ITER_INVALID_PARAMS_TEST_EXT_LINK_NAME "ext_link1" +#define LINK_ITER_INVALID_PARAMS_TEST_SUBGROUP_NAME "link_iter_invalid_params_test" + +#define LINK_ITER_0_LINKS_TEST_SUBGROUP_NAME "link_iter_0_links_test" + +#define LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST \ + ((LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP * \ + LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS) + \ + LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS) +#define LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP 10 +#define LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_DSET_SPACE_RANK 2 +#define LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS 5 +#define LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME "subgroup" +#define LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME "link_visit_hard_links_no_cycle_test" +#define LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_LINK_NAME "hard_link" +#define LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_BUF_SIZE 256 + +#define LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST \ + ((LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP * \ + LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS) + \ + LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS) +#define LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP 10 +#define LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS 5 +#define LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME "subgroup" +#define LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME "link_visit_soft_links_no_cycle_test" +#define LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_LINK_NAME "soft_link" +#define LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_BUF_SIZE 256 + +#define LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST \ + ((LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP * \ + LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS) + \ + LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS) +#define LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP 10 +#define LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS 5 +#define LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME "subgroup" +#define LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME "link_visit_ext_links_no_cycle_test" +#define LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_LINK_NAME "external_link" +#define LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_BUF_SIZE 256 + +#define LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_SPACE_RANK 2 +#define LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME "dset" +#define LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME2 "dset2" +#define LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME "link_visit_mixed_links_no_cycle_test" +#define LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2 "link_visit_subgroup1" +#define LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3 "link_visit_subgroup2" +#define LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME1 "hard_link1" +#define LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME2 "soft_link1" +#define LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME3 "ext_link1" +#define LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME4 "hard_link2" +#define LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS 8 + +#define LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST \ + ((LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP * \ + LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_SUBGROUPS) + \ + LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_SUBGROUPS) +#define LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP 10 +#define LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_SUBGROUPS 5 +#define LINK_VISIT_HARD_LINKS_CYCLE_TEST_NESTED_GRP_NAME "subgroup" +#define LINK_VISIT_HARD_LINKS_CYCLE_TEST_SUBGROUP_NAME "link_visit_hard_links_cycle_test" +#define LINK_VISIT_HARD_LINKS_CYCLE_TEST_LINK_NAME "hard_link" +#define LINK_VISIT_HARD_LINKS_CYCLE_TEST_BUF_SIZE 256 + +#define LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST \ + ((LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP * \ + LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_SUBGROUPS) + \ + LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_SUBGROUPS) +#define LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP 10 +#define LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_SUBGROUPS 5 +#define LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NESTED_GRP_NAME "subgroup" +#define LINK_VISIT_SOFT_LINKS_CYCLE_TEST_SUBGROUP_NAME "link_visit_soft_links_cycle_test" +#define LINK_VISIT_SOFT_LINKS_CYCLE_TEST_LINK_NAME "soft_link" +#define LINK_VISIT_SOFT_LINKS_CYCLE_TEST_BUF_SIZE 256 + +#define LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST \ + ((LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP * LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_SUBGROUPS) + \ + LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_SUBGROUPS) +#define LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP 10 +#define LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_SUBGROUPS 5 +#define LINK_VISIT_EXT_LINKS_CYCLE_TEST_NESTED_GRP_NAME "subgroup" +#define LINK_VISIT_EXT_LINKS_CYCLE_TEST_SUBGROUP_NAME "link_visit_ext_links_cycle_test" +#define LINK_VISIT_EXT_LINKS_CYCLE_TEST_LINK_NAME "external_link" +#define LINK_VISIT_EXT_LINKS_CYCLE_TEST_BUF_SIZE 256 + +#define LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME "link_visit_mixed_links_cycle_test" +#define LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2 "link_visit_subgroup1" +#define LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME3 "link_visit_subgroup2" +#define LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME1 "hard_link1" +#define LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME2 "soft_link1" +#define LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME3 "ext_link1" +#define LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME4 "hard_link2" +#define LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS 6 + +#define LINK_VISIT_INVALID_PARAMS_TEST_DSET_SPACE_RANK 2 +#define LINK_VISIT_INVALID_PARAMS_TEST_DSET_NAME "dset" +#define LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME "link_visit_invalid_params_test" +#define LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME2 "link_visit_subgroup1" +#define LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME3 "link_visit_subgroup2" +#define LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME1 "hard_link1" +#define LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME2 "soft_link1" +#define LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME3 "ext_link1" +#define LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME4 "hard_link2" + +#define LINK_VISIT_0_LINKS_TEST_SUBGROUP_NAME "link_visit_0_links_test" +#define LINK_VISIT_0_LINKS_TEST_SUBGROUP_NAME2 "link_visit_0_links_test_subgroup1" +#define LINK_VISIT_0_LINKS_TEST_SUBGROUP_NAME3 "link_visit_0_links_test_subgroup2" + +#endif diff --git a/test/API/H5_api_misc_test.c b/test/API/H5_api_misc_test.c new file mode 100644 index 00000000000..256550b784e --- /dev/null +++ b/test/API/H5_api_misc_test.c @@ -0,0 +1,1060 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#include "H5_api_misc_test.h" + +static int test_open_link_without_leading_slash(void); +static int test_object_creation_by_absolute_path(void); +static int test_absolute_vs_relative_path(void); +static int test_dot_for_object_name(void); +static int test_symbols_in_compound_field_name(void); +static int test_double_init_term(void); + +/* + * The array of miscellaneous tests to be performed. + */ +static int (*misc_tests[])(void) = { + test_open_link_without_leading_slash, test_object_creation_by_absolute_path, + test_absolute_vs_relative_path, test_dot_for_object_name, + test_symbols_in_compound_field_name, test_double_init_term, +}; + +static int +test_open_link_without_leading_slash(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + + TESTING("opening a link without a leading slash"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file\n"); + goto error; + } + + if ((container_group = H5Gopen2(file_id, MISCELLANEOUS_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + if ((space_id = generate_random_dataspace(OPEN_LINK_WITHOUT_SLASH_DSET_SPACE_RANK, NULL, NULL, FALSE)) < + 0) + TEST_ERROR; + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(container_group, OPEN_LINK_WITHOUT_SLASH_DSET_NAME, dset_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset\n"); + goto error; + } + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file\n"); + goto error; + } + + if ((group_id = H5Gopen2(file_id, "/", H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open root group\n"); + goto error; + } + + if ((dset_id = H5Dopen2(group_id, MISCELLANEOUS_TEST_GROUP_NAME "/" OPEN_LINK_WITHOUT_SLASH_DSET_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset\n"); + goto error; + } + + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Tclose(dset_dtype); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +static int +test_object_creation_by_absolute_path(void) +{ + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID, sub_group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t dtype_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + + TESTING_MULTIPART("object creation by absolute path"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, link, or stored datatype aren't " + "supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file\n"); + goto error; + } + + if ((container_group = H5Gopen2(file_id, MISCELLANEOUS_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + /* Start by creating a group to hold all the objects for this test */ + if ((group_id = H5Gcreate2(container_group, OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_CONTAINER_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group\n"); + goto error; + } + + if ((link_exists = H5Lexists(file_id, + "/" MISCELLANEOUS_TEST_GROUP_NAME + "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_CONTAINER_GROUP_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link exists\n"); + goto error; + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" container group didn't exist at the correct location\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Gcreate_using_absolute_path) + { + TESTING_2("creation of group using absolute pathname"); + + /* Try to create a group under the container group by using an absolute pathname */ + if ((sub_group_id = H5Gcreate2(file_id, + "/" MISCELLANEOUS_TEST_GROUP_NAME + "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_CONTAINER_GROUP_NAME + "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_SUBGROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create subgroup by absolute pathname\n"); + PART_ERROR(H5Gcreate_using_absolute_path); + } + + if ((link_exists = H5Lexists(file_id, + "/" MISCELLANEOUS_TEST_GROUP_NAME + "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_CONTAINER_GROUP_NAME + "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_SUBGROUP_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link exists\n"); + PART_ERROR(H5Gcreate_using_absolute_path); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" subgroup didn't exist at the correct location\n"); + PART_ERROR(H5Gcreate_using_absolute_path); + } + + PASSED(); + } + PART_END(H5Gcreate_using_absolute_path); + + PART_BEGIN(H5Dcreate_using_absolute_path) + { + TESTING_2("creation of dataset using absolute pathname"); + + /* Try to create a dataset nested at the end of this group chain by using an absolute pathname */ + if ((fspace_id = generate_random_dataspace(OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_DSET_SPACE_RANK, + NULL, NULL, FALSE)) < 0) { + H5_FAILED(); + HDprintf(" failed to generate dataspace\n"); + PART_ERROR(H5Dcreate_using_absolute_path); + } + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) { + H5_FAILED(); + HDprintf(" failed to generate datatype\n"); + PART_ERROR(H5Dcreate_using_absolute_path); + } + + if ((dset_id = H5Dcreate2(file_id, + "/" MISCELLANEOUS_TEST_GROUP_NAME + "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_CONTAINER_GROUP_NAME + "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_SUBGROUP_NAME + "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_DSET_NAME, + dset_dtype, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset\n"); + PART_ERROR(H5Dcreate_using_absolute_path); + } + + if ((link_exists = H5Lexists(file_id, + "/" MISCELLANEOUS_TEST_GROUP_NAME + "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_CONTAINER_GROUP_NAME + "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_SUBGROUP_NAME + "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_DSET_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link exists\n"); + PART_ERROR(H5Dcreate_using_absolute_path); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" dataset didn't exist at the correct location\n"); + PART_ERROR(H5Dcreate_using_absolute_path); + } + + PASSED(); + } + PART_END(H5Dcreate_using_absolute_path); + + PART_BEGIN(H5Tcommit_using_absolute_path) + { + TESTING_2("creation of committed datatype using absolute pathname"); + + /* Try to create a committed datatype in the same fashion as the preceding dataset */ + if ((dtype_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create datatype\n"); + PART_ERROR(H5Tcommit_using_absolute_path); + } + + if (H5Tcommit2(file_id, + "/" MISCELLANEOUS_TEST_GROUP_NAME + "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_CONTAINER_GROUP_NAME + "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_SUBGROUP_NAME + "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_DTYPE_NAME, + dtype_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't commit datatype\n"); + PART_ERROR(H5Tcommit_using_absolute_path); + } + + if ((link_exists = H5Lexists(file_id, + "/" MISCELLANEOUS_TEST_GROUP_NAME + "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_CONTAINER_GROUP_NAME + "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_SUBGROUP_NAME + "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_DTYPE_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link exists\n"); + PART_ERROR(H5Tcommit_using_absolute_path); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" datatype didn't exist at the correct location\n"); + PART_ERROR(H5Tcommit_using_absolute_path); + } + + PASSED(); + } + PART_END(H5Tcommit_using_absolute_path); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Tclose(dtype_id) < 0) + TEST_ERROR; + if (H5Gclose(sub_group_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + H5Tclose(dset_dtype); + H5Dclose(dset_id); + H5Tclose(dtype_id); + H5Gclose(sub_group_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* XXX: Add testing for groups */ +static int +test_absolute_vs_relative_path(void) +{ + htri_t link_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id1 = H5I_INVALID_HID, dset_id2 = H5I_INVALID_HID, dset_id3 = H5I_INVALID_HID, + dset_id4 = H5I_INVALID_HID, dset_id5 = H5I_INVALID_HID, dset_id6 = H5I_INVALID_HID; + hid_t dset_dtype1 = H5I_INVALID_HID, dset_dtype2 = H5I_INVALID_HID, dset_dtype3 = H5I_INVALID_HID, + dset_dtype4 = H5I_INVALID_HID, dset_dtype5 = H5I_INVALID_HID, dset_dtype6 = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + + TESTING_MULTIPART("absolute vs. relative pathnames"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, or link aren't supported with this " + "connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file\n"); + goto error; + } + + if ((container_group = H5Gopen2(file_id, MISCELLANEOUS_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + /* Start by creating a group to be used during some of the dataset creation operations */ + if ((group_id = H5Gcreate2(container_group, ABSOLUTE_VS_RELATIVE_PATH_TEST_CONTAINER_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container group\n"); + goto error; + } + + if ((fspace_id = generate_random_dataspace(ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET_SPACE_RANK, NULL, NULL, + FALSE)) < 0) + TEST_ERROR; + + if ((dset_dtype1 = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + if ((dset_dtype2 = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + if ((dset_dtype3 = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + if ((dset_dtype4 = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + if ((dset_dtype5 = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + if ((dset_dtype6 = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Dcreate_absolute_from_root) + { + TESTING_2("dataset creation by absolute path from root group"); + + /* Create a dataset by absolute path in the form "/group/dataset" starting from the root group */ + if ((dset_id1 = H5Dcreate2(file_id, + "/" MISCELLANEOUS_TEST_GROUP_NAME + "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_CONTAINER_GROUP_NAME + "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET1_NAME, + dset_dtype1, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset by absolute path from root\n"); + PART_ERROR(H5Dcreate_absolute_from_root); + } + + if ((link_exists = H5Lexists(file_id, + "/" MISCELLANEOUS_TEST_GROUP_NAME + "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_CONTAINER_GROUP_NAME + "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET1_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link exists\n"); + PART_ERROR(H5Dcreate_absolute_from_root); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" didn't exist at the correct location\n"); + PART_ERROR(H5Dcreate_absolute_from_root); + } + + PASSED(); + } + PART_END(H5Dcreate_absolute_from_root); + + PART_BEGIN(H5Dcreate_absolute_from_nonroot) + { + TESTING_2("dataset creation by absolute path from non-root group"); + + /* Create a dataset by absolute path in the form "/group/dataset" starting from the container + * group */ + if ((dset_id4 = H5Dcreate2(container_group, + "/" MISCELLANEOUS_TEST_GROUP_NAME + "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_CONTAINER_GROUP_NAME + "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET4_NAME, + dset_dtype4, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset by absolute path from container group\n"); + PART_ERROR(H5Dcreate_absolute_from_nonroot); + } + + if ((link_exists = H5Lexists(file_id, + "/" MISCELLANEOUS_TEST_GROUP_NAME + "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_CONTAINER_GROUP_NAME + "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET4_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link exists\n"); + PART_ERROR(H5Dcreate_absolute_from_nonroot); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" didn't exist at the correct location\n"); + PART_ERROR(H5Dcreate_absolute_from_nonroot); + } + + PASSED(); + } + PART_END(H5Dcreate_absolute_from_nonroot); + + PART_BEGIN(H5Dcreate_relative_from_root) + { + TESTING_2("dataset creation by relative path from root group"); + + /* TODO: */ + + SKIPPED(); + PART_EMPTY(H5Dcreate_relative_from_root); + } + PART_END(H5Dcreate_relative_from_root); + + PART_BEGIN(H5Dcreate_relative_from_nonroot) + { + TESTING_2("dataset creation by relative path from non-root group"); + + /* Create a dataset by relative path in the form "dataset" starting from the test container group + */ + if ((dset_id5 = H5Dcreate2(group_id, ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET5_NAME, dset_dtype5, + fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset by relative path from container group\n"); + PART_ERROR(H5Dcreate_relative_from_nonroot); + } + + /* Create a dataset by relative path in the form "group/dataset" starting from the top-level + * container group */ + if ((dset_id2 = H5Dcreate2(container_group, + ABSOLUTE_VS_RELATIVE_PATH_TEST_CONTAINER_GROUP_NAME + "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET2_NAME, + dset_dtype2, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset by relative path from container group\n"); + PART_ERROR(H5Dcreate_relative_from_nonroot); + } + + if ((link_exists = H5Lexists(file_id, + "/" MISCELLANEOUS_TEST_GROUP_NAME + "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_CONTAINER_GROUP_NAME + "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET2_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link exists\n"); + PART_ERROR(H5Dcreate_relative_from_nonroot); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" didn't exist at the correct location\n"); + PART_ERROR(H5Dcreate_relative_from_nonroot); + } + + if ((link_exists = H5Lexists(file_id, + "/" MISCELLANEOUS_TEST_GROUP_NAME + "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_CONTAINER_GROUP_NAME + "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET5_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link exists\n"); + PART_ERROR(H5Dcreate_relative_from_nonroot); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" didn't exist at the correct location\n"); + PART_ERROR(H5Dcreate_relative_from_nonroot); + } + + PASSED(); + } + PART_END(H5Dcreate_relative_from_nonroot); + + PART_BEGIN(H5Dcreate_relative_leading_dot_root) + { + TESTING_2("dataset creation by path with leading '.' from root group"); + + /* Create a dataset by relative path in the form "./group/dataset" starting from the root group */ + if ((dset_id3 = H5Dcreate2(file_id, + "./" MISCELLANEOUS_TEST_GROUP_NAME + "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_CONTAINER_GROUP_NAME + "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET3_NAME, + dset_dtype3, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset by relative path from root with leading '.'\n"); + PART_ERROR(H5Dcreate_relative_leading_dot_root); + } + + if ((link_exists = H5Lexists(file_id, + "/" MISCELLANEOUS_TEST_GROUP_NAME + "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_CONTAINER_GROUP_NAME + "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET3_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link exists\n"); + PART_ERROR(H5Dcreate_relative_leading_dot_root); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" didn't exist at the correct location\n"); + PART_ERROR(H5Dcreate_relative_leading_dot_root); + } + + PASSED(); + } + PART_END(H5Dcreate_relative_leading_dot_root); + + PART_BEGIN(H5Dcreate_relative_leading_dot_nonroot) + { + TESTING_2("dataset creation by path with leading '.' from non-root group"); + + /* Create a dataset by relative path in the form "./dataset" starting from the container group */ + if ((dset_id6 = H5Dcreate2(group_id, "./" ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET6_NAME, dset_dtype6, + fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf( + " couldn't create dataset by relative path from container group with leading '.'\n"); + PART_ERROR(H5Dcreate_relative_leading_dot_nonroot); + } + + if ((link_exists = H5Lexists(file_id, + "/" MISCELLANEOUS_TEST_GROUP_NAME + "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_CONTAINER_GROUP_NAME + "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET6_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link exists\n"); + PART_ERROR(H5Dcreate_relative_leading_dot_nonroot); + } + + if (!link_exists) { + H5_FAILED(); + HDprintf(" didn't exist at the correct location\n"); + PART_ERROR(H5Dcreate_relative_leading_dot_nonroot); + } + + PASSED(); + } + PART_END(H5Dcreate_relative_leading_dot_nonroot); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype1) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype2) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype3) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype4) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype5) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype6) < 0) + TEST_ERROR; + if (H5Dclose(dset_id1) < 0) + TEST_ERROR; + if (H5Dclose(dset_id2) < 0) + TEST_ERROR; + if (H5Dclose(dset_id3) < 0) + TEST_ERROR; + if (H5Dclose(dset_id4) < 0) + TEST_ERROR; + if (H5Dclose(dset_id5) < 0) + TEST_ERROR; + if (H5Dclose(dset_id6) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + H5Tclose(dset_dtype1); + H5Tclose(dset_dtype2); + H5Tclose(dset_dtype3); + H5Tclose(dset_dtype4); + H5Tclose(dset_dtype5); + H5Tclose(dset_dtype6); + H5Dclose(dset_id1); + H5Dclose(dset_id2); + H5Dclose(dset_id3); + H5Dclose(dset_id4); + H5Dclose(dset_id5); + H5Dclose(dset_id6); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check creating/opening objects with the "." as the name + */ +static int +test_dot_for_object_name(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, subgroup_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID, dspace_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t dtype_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + herr_t ret = -1; + + TESTING_MULTIPART("creating objects with \".\" as the name"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, or stored datatype aren't supported with " + "this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, MISCELLANEOUS_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", MISCELLANEOUS_TEST_GROUP_NAME); + goto error; + } + + if ((subgroup_id = H5Gcreate2(container_group, DOT_AS_OBJECT_NAME_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", DOT_AS_OBJECT_NAME_TEST_SUBGROUP_NAME); + goto error; + } + + if ((dspace_id = H5Screate(H5S_SCALAR)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create data space\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Gcreate_dot_as_name) + { + TESTING_2("invalid creation of group with '.' as name"); + + /* Create a group with the "." as the name. It should fail. */ + H5E_BEGIN_TRY + { + group_id = H5Gcreate2(subgroup_id, ".", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (group_id >= 0) { + H5_FAILED(); + HDprintf(" a group was created with '.' as the name!\n"); + PART_ERROR(H5Gcreate_dot_as_name); + } + + PASSED(); + } + PART_END(H5Gcreate_dot_as_name); + + PART_BEGIN(H5Dcreate_dot_as_name) + { + TESTING_2("invalid creation of dataset with '.' as name"); + + /* Create a dataset with the "." as the name. It should fail. */ + H5E_BEGIN_TRY + { + dset_id = H5Dcreate2(subgroup_id, ".", H5T_NATIVE_INT, dspace_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (dset_id >= 0) { + H5_FAILED(); + HDprintf(" a dataset was created with '.' as the name!\n"); + PART_ERROR(H5Dcreate_dot_as_name); + } + + PASSED(); + } + PART_END(H5Dcreate_dot_as_name); + + PART_BEGIN(H5Tcommit_dot_as_name) + { + TESTING_2("invalid creation of committed datatype with '.' as name"); + + if ((dtype_id = H5Tcopy(H5T_NATIVE_INT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't copy a native datatype\n"); + PART_ERROR(H5Tcommit_dot_as_name); + } + + /* Commit a datatype with "." as the name. It should fail. */ + H5E_BEGIN_TRY + { + ret = H5Tcommit2(subgroup_id, ".", dtype_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (ret >= 0) { + H5_FAILED(); + HDprintf(" a named datatype was committed with '.' as the name!\n"); + PART_ERROR(H5Tcommit_dot_as_name); + } + + if (H5Tclose(dtype_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close datatype\n"); + PART_ERROR(H5Tcommit_dot_as_name); + } + + PASSED(); + } + PART_END(H5Tcommit_dot_as_name); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(dspace_id) < 0) + TEST_ERROR; + if (H5Gclose(subgroup_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(dspace_id); + H5Aclose(attr_id); + H5Dclose(dset_id); + H5Tclose(dtype_id); + H5Gclose(group_id); + H5Gclose(subgroup_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that the initialization and termination + * functions of a VOL connector can be called multiple times + * in a row. + * + * TODO: Not sure if this test can be done from public APIs + * at the moment. + */ +static int +test_double_init_term(void) +{ + TESTING("double init/term correctness"); + + SKIPPED(); + + return 0; + +#if 0 +error: + return 1; +#endif +} + +static int +test_symbols_in_compound_field_name(void) +{ + size_t i; + size_t total_type_size; + size_t next_offset; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t compound_type = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t type_pool[COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_NUM_SUBTYPES]; + char member_names[COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_NUM_SUBTYPES][256]; + + TESTING("usage of '{', '}' and '\\\"' symbols in compound field name"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + for (i = 0; i < COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_NUM_SUBTYPES; i++) + type_pool[i] = H5I_INVALID_HID; + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file\n"); + goto error; + } + + if ((container_group = H5Gopen2(file_id, MISCELLANEOUS_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_SUBGROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group\n"); + goto error; + } + + for (i = 0, total_type_size = 0; i < COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_NUM_SUBTYPES; i++) { + type_pool[i] = generate_random_datatype(H5T_NO_CLASS, FALSE); + total_type_size += H5Tget_size(type_pool[i]); + } + + HDsnprintf(member_names[0], 256, "{{{ member0"); + HDsnprintf(member_names[1], 256, "member1 }}}"); + HDsnprintf(member_names[2], 256, "{{{ member2 }}"); + HDsnprintf(member_names[3], 256, "{{ member3 }}}"); + HDsnprintf(member_names[4], 256, "\\\"member4"); + HDsnprintf(member_names[5], 256, "member5\\\""); + HDsnprintf(member_names[6], 256, "mem\\\"ber6"); + HDsnprintf(member_names[7], 256, "{{ member7\\\" }"); + HDsnprintf(member_names[8], 256, "{{ member8\\\\"); + + if ((compound_type = H5Tcreate(H5T_COMPOUND, total_type_size)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create compound datatype\n"); + goto error; + } + + for (i = 0, next_offset = 0; i < COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_NUM_SUBTYPES; i++) { + if (H5Tinsert(compound_type, member_names[i], next_offset, type_pool[i]) < 0) { + H5_FAILED(); + HDprintf(" couldn't insert compound member %zu\n", i); + goto error; + } + + next_offset += H5Tget_size(type_pool[i]); + } + + if (H5Tpack(compound_type) < 0) + TEST_ERROR; + + if ((fspace_id = generate_random_dataspace(COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_DSET_RANK, NULL, + NULL, FALSE)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_DSET_NAME, compound_type, + fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset\n"); + goto error; + } + + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + + if ((dset_id = H5Dopen2(group_id, COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_DSET_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" failed to open dataset\n"); + goto error; + } + + for (i = 0; i < COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_NUM_SUBTYPES; i++) + if (type_pool[i] >= 0 && H5Tclose(type_pool[i]) < 0) + TEST_ERROR; + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Tclose(compound_type) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + for (i = 0; i < COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_NUM_SUBTYPES; i++) + H5Tclose(type_pool[i]); + H5Sclose(fspace_id); + H5Tclose(compound_type); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +int +H5_api_misc_test(void) +{ + size_t i; + int nerrors; + + HDprintf("**********************************************\n"); + HDprintf("* *\n"); + HDprintf("* API Miscellaneous Tests *\n"); + HDprintf("* *\n"); + HDprintf("**********************************************\n\n"); + + for (i = 0, nerrors = 0; i < ARRAY_LENGTH(misc_tests); i++) { + nerrors += (*misc_tests[i])() ? 1 : 0; + } + + HDprintf("\n"); + + return nerrors; +} diff --git a/test/API/H5_api_misc_test.h b/test/API/H5_api_misc_test.h new file mode 100644 index 00000000000..8729db728be --- /dev/null +++ b/test/API/H5_api_misc_test.h @@ -0,0 +1,52 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#ifndef H5_API_MISC_TEST_H +#define H5_API_MISC_TEST_H + +#include "H5_api_test.h" + +int H5_api_misc_test(void); + +/****************************************************** + * * + * API Miscellaneous test defines * + * * + ******************************************************/ + +#define OPEN_LINK_WITHOUT_SLASH_DSET_SPACE_RANK 2 +#define OPEN_LINK_WITHOUT_SLASH_DSET_NAME "link_without_slash_test_dset" + +#define OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_CONTAINER_GROUP_NAME "absolute_path_test_container_group" +#define OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_SUBGROUP_NAME "absolute_path_test_subgroup" +#define OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_DTYPE_NAME "absolute_path_test_dtype" +#define OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_DSET_NAME "absolute_path_test_dset" +#define OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_DSET_SPACE_RANK 3 + +#define ABSOLUTE_VS_RELATIVE_PATH_TEST_CONTAINER_GROUP_NAME "absolute_vs_relative_test_container_group" +#define ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET1_NAME "absolute_vs_relative_test_dset1" +#define ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET2_NAME "absolute_vs_relative_test_dset2" +#define ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET3_NAME "absolute_vs_relative_test_dset3" +#define ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET4_NAME "absolute_vs_relative_test_dset4" +#define ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET5_NAME "absolute_vs_relative_test_dset5" +#define ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET6_NAME "absolute_vs_relative_test_dset6" +#define ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET_SPACE_RANK 3 + +#define DOT_AS_OBJECT_NAME_TEST_SUBGROUP_NAME "dot_as_object_name_test" + +#define COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_SUBGROUP_NAME \ + "compound_type_with_symbols_in_member_names_test" +#define COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_NUM_SUBTYPES 9 +#define COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_DSET_RANK 2 +#define COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_DSET_NAME "dset" + +#endif diff --git a/test/API/H5_api_object_test.c b/test/API/H5_api_object_test.c new file mode 100644 index 00000000000..e054356cf7e --- /dev/null +++ b/test/API/H5_api_object_test.c @@ -0,0 +1,7172 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#include "H5_api_object_test.h" + +static int test_open_object(void); +static int test_open_object_invalid_params(void); +static int test_object_exists(void); +static int test_object_exists_invalid_params(void); +static int test_get_object_info(void); +static int test_get_object_info_invalid_params(void); +static int test_link_object(void); +static int test_link_object_invalid_params(void); +static int test_incr_decr_object_refcount(void); +static int test_incr_decr_object_refcount_invalid_params(void); +static int test_object_copy_basic(void); +static int test_object_copy_already_existing(void); +static int test_object_copy_shallow_group_copy(void); +static int test_object_copy_no_attributes(void); +static int test_object_copy_by_soft_link(void); +static int test_object_copy_group_with_soft_links(void); +static int test_object_copy_between_files(void); +static int test_object_copy_invalid_params(void); +static int test_object_comments(void); +static int test_object_comments_invalid_params(void); +static int test_object_visit(void); +static int test_object_visit_soft_link(void); +static int test_object_visit_invalid_params(void); +static int test_close_object(void); +static int test_close_object_invalid_params(void); +static int test_close_invalid_objects(void); +static int test_flush_object(void); +static int test_flush_object_invalid_params(void); +static int test_refresh_object(void); +static int test_refresh_object_invalid_params(void); + +static herr_t object_copy_attribute_iter_callback(hid_t location_id, const char *attr_name, + const H5A_info_t *ainfo, void *op_data); +static herr_t object_copy_soft_link_non_expand_callback(hid_t group, const char *name, + const H5L_info2_t *info, void *op_data); +static herr_t object_copy_soft_link_expand_callback(hid_t group, const char *name, const H5L_info2_t *info, + void *op_data); +static herr_t object_visit_callback(hid_t o_id, const char *name, const H5O_info2_t *object_info, + void *op_data); +static herr_t object_visit_dset_callback(hid_t o_id, const char *name, const H5O_info2_t *object_info, + void *op_data); +static herr_t object_visit_dtype_callback(hid_t o_id, const char *name, const H5O_info2_t *object_info, + void *op_data); +static herr_t object_visit_soft_link_callback(hid_t o_id, const char *name, const H5O_info2_t *object_info, + void *op_data); +static herr_t object_visit_noop_callback(hid_t o_id, const char *name, const H5O_info2_t *object_info, + void *op_data); + +/* + * The array of object tests to be performed. + */ +static int (*object_tests[])(void) = { + test_open_object, + test_open_object_invalid_params, + test_object_exists, + test_object_exists_invalid_params, + test_get_object_info, + test_get_object_info_invalid_params, + test_link_object, + test_link_object_invalid_params, + test_incr_decr_object_refcount, + test_incr_decr_object_refcount_invalid_params, + test_object_copy_basic, + test_object_copy_already_existing, + test_object_copy_shallow_group_copy, + test_object_copy_no_attributes, + test_object_copy_by_soft_link, + test_object_copy_group_with_soft_links, + test_object_copy_between_files, + test_object_copy_invalid_params, + test_object_comments, + test_object_comments_invalid_params, + test_object_visit, + test_object_visit_soft_link, + test_object_visit_invalid_params, + test_close_object, + test_close_object_invalid_params, + test_close_invalid_objects, + test_flush_object, + test_flush_object_invalid_params, + test_refresh_object, + test_refresh_object_invalid_params, +}; + +/* + * A test to check that various objects (group, dataset, datatype) + * can be opened by using H5Oopen, H5Oopen_by_idx and H5Oopen_by_addr. + * + * XXX: create separate objects for each test part. + * + * XXX: Add more open by idx tests + * + * XXX: test opening through dangling and resolving soft links. + */ +static int +test_open_object(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t group_id2 = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t type_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + + TESTING_MULTIPART("object opening"); + + TESTING_2("test setup"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, object, dataset, or stored datatype aren't " + "supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, OBJECT_OPEN_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", OBJECT_OPEN_TEST_GROUP_NAME); + goto error; + } + + if ((fspace_id = generate_random_dataspace(OBJECT_OPEN_TEST_SPACE_RANK, NULL, NULL, FALSE)) < 0) + TEST_ERROR; + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Oopen_group) + { + TESTING_2("H5Oopen on a group"); + + if ((group_id2 = H5Gcreate2(group_id, OBJECT_OPEN_TEST_GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", OBJECT_OPEN_TEST_GRP_NAME); + PART_ERROR(H5Oopen_group); + } + + H5E_BEGIN_TRY + { + H5Gclose(group_id2); + } + H5E_END_TRY; + + if ((group_id2 = H5Oopen(group_id, OBJECT_OPEN_TEST_GRP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open group '%s' with H5Oopen\n", OBJECT_OPEN_TEST_GRP_NAME); + PART_ERROR(H5Oopen_group); + } + + if (H5Iget_type(group_id2) != H5I_GROUP) { + H5_FAILED(); + HDprintf(" ID is not a group\n"); + PART_ERROR(H5Oopen_group); + } + + if (H5Gclose(group_id2) < 0) { + H5_FAILED(); + HDprintf(" couldn't close group opened with H5Oopen\n"); + PART_ERROR(H5Oopen_group); + } + + PASSED(); + } + PART_END(H5Oopen_group); + + PART_BEGIN(H5Oopen_dset) + { + TESTING_2("H5Oopen on a dataset"); + + if ((dset_id = H5Dcreate2(group_id, OBJECT_OPEN_TEST_DSET_NAME, dset_dtype, fspace_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", OBJECT_OPEN_TEST_DSET_NAME); + PART_ERROR(H5Oopen_dset); + } + + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + + if ((dset_id = H5Oopen(group_id, OBJECT_OPEN_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s' with H5Oopen\n", OBJECT_OPEN_TEST_DSET_NAME); + PART_ERROR(H5Oopen_dset); + } + + if (H5Iget_type(dset_id) != H5I_DATASET) { + H5_FAILED(); + HDprintf(" ID is not a dataset\n"); + PART_ERROR(H5Oopen_dset); + } + + if (H5Dclose(dset_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close dataset opened with H5Oopen\n"); + PART_ERROR(H5Oopen_dset); + } + + PASSED(); + } + PART_END(H5Oopen_dset); + + PART_BEGIN(H5Oopen_dtype) + { + TESTING_2("H5Oopen on a committed datatype"); + + if ((type_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create datatype '%s'\n", OBJECT_OPEN_TEST_TYPE_NAME); + PART_ERROR(H5Oopen_dtype); + } + + if (H5Tcommit2(group_id, OBJECT_OPEN_TEST_TYPE_NAME, type_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't commit datatype '%s'\n", OBJECT_OPEN_TEST_TYPE_NAME); + PART_ERROR(H5Oopen_dtype); + } + + H5E_BEGIN_TRY + { + H5Tclose(type_id); + } + H5E_END_TRY; + + if ((type_id = H5Oopen(group_id, OBJECT_OPEN_TEST_TYPE_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open datatype '%s' with H5Oopen\n", OBJECT_OPEN_TEST_TYPE_NAME); + PART_ERROR(H5Oopen_dtype); + } + + if (H5Iget_type(type_id) != H5I_DATATYPE) { + H5_FAILED(); + HDprintf(" ID is not a dataset\n"); + PART_ERROR(H5Oopen_dtype); + } + + if (H5Tclose(type_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close committed datatype opened with H5Oopen\n"); + PART_ERROR(H5Oopen_dtype); + } + + PASSED(); + } + PART_END(H5Oopen_dtype); + + if (group_id2 >= 0) { + H5E_BEGIN_TRY + { + H5Gclose(group_id2); + } + H5E_END_TRY; + group_id2 = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + if (type_id >= 0) { + H5E_BEGIN_TRY + { + H5Tclose(type_id); + } + H5E_END_TRY; + type_id = H5I_INVALID_HID; + } + + PART_BEGIN(H5Oopen_by_idx_group) + { + TESTING_2("H5Oopen_by_idx on a group"); + + if ((group_id2 = H5Oopen_by_idx(container_group, OBJECT_OPEN_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_INC, 1, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open group '%s' with H5Oopen_by_idx\n", OBJECT_OPEN_TEST_GRP_NAME); + PART_ERROR(H5Oopen_by_idx_group); + } + + PASSED(); + } + PART_END(H5Oopen_by_idx_group); + + PART_BEGIN(H5Oopen_by_idx_dset) + { + TESTING_2("H5Oopen_by_idx on a dataset"); + + if ((dset_id = H5Oopen_by_idx(container_group, OBJECT_OPEN_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_INC, 0, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s' with H5Oopen_by_idx\n", OBJECT_OPEN_TEST_DSET_NAME); + PART_ERROR(H5Oopen_by_idx_dset); + } + + PASSED(); + } + PART_END(H5Oopen_by_idx_dset); + + PART_BEGIN(H5Oopen_by_idx_dtype) + { + TESTING_2("H5Oopen_by_idx on a committed datatype"); + + if ((type_id = H5Oopen_by_idx(container_group, OBJECT_OPEN_TEST_GROUP_NAME, H5_INDEX_NAME, + H5_ITER_INC, 2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open committed datatype '%s' with H5Oopen_by_idx\n", + OBJECT_OPEN_TEST_TYPE_NAME); + PART_ERROR(H5Oopen_by_idx_dtype); + } + + PASSED(); + } + PART_END(H5Oopen_by_idx_dtype); + + if (group_id2 >= 0) { + H5E_BEGIN_TRY + { + H5Gclose(group_id2); + } + H5E_END_TRY; + group_id2 = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + if (type_id >= 0) { + H5E_BEGIN_TRY + { + H5Tclose(type_id); + } + H5E_END_TRY; + type_id = H5I_INVALID_HID; + } + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + H5Tclose(dset_dtype); + H5Tclose(type_id); + H5Dclose(dset_id); + H5Gclose(group_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that various objects (group, dataset, datatype) + * can't be opened when H5Oopen, H5Oopen_by_idx and H5Oopen_by_addr + * are passed invalid parameters. + */ +static int +test_open_object_invalid_params(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t group_id2 = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + + TESTING_MULTIPART("object opening with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, object, or creation order aren't supported with " + "this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create a GCPL\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) { + H5_FAILED(); + HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, OBJECT_OPEN_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + OBJECT_OPEN_INVALID_PARAMS_TEST_GROUP_NAME); + goto error; + } + + if ((group_id2 = H5Gcreate2(group_id, OBJECT_OPEN_INVALID_PARAMS_TEST_GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", OBJECT_OPEN_INVALID_PARAMS_TEST_GRP_NAME); + goto error; + } + + if (H5Gclose(group_id2) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Oopen_invalid_loc_id) + { + TESTING_2("H5Oopen with an invalid location ID"); + + H5E_BEGIN_TRY + { + group_id2 = H5Oopen(H5I_INVALID_HID, OBJECT_OPEN_INVALID_PARAMS_TEST_GRP_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + + if (group_id2 >= 0) { + H5_FAILED(); + HDprintf(" H5Oopen succeeded with an invalid location ID!\n"); + H5Gclose(group_id2); + PART_ERROR(H5Oopen_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Oopen_invalid_loc_id); + + PART_BEGIN(H5Oopen_invalid_obj_name) + { + TESTING_2("H5Oopen with an invalid object name"); + + H5E_BEGIN_TRY + { + group_id2 = H5Oopen(group_id, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (group_id2 >= 0) { + H5_FAILED(); + HDprintf(" H5Oopen succeeded with a NULL object name!\n"); + H5Gclose(group_id2); + PART_ERROR(H5Oopen_invalid_obj_name); + } + + H5E_BEGIN_TRY + { + group_id2 = H5Oopen(group_id, "", H5P_DEFAULT); + } + H5E_END_TRY; + + if (group_id2 >= 0) { + H5_FAILED(); + HDprintf(" H5Oopen succeeded with an invalid object name of ''!\n"); + H5Gclose(group_id2); + PART_ERROR(H5Oopen_invalid_obj_name); + } + + PASSED(); + } + PART_END(H5Oopen_invalid_obj_name); + + PART_BEGIN(H5Oopen_invalid_lapl) + { + TESTING_2("H5Oopen with an invalid LAPL"); + + H5E_BEGIN_TRY + { + group_id2 = H5Oopen(group_id, OBJECT_OPEN_INVALID_PARAMS_TEST_GRP_NAME, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (group_id2 >= 0) { + H5_FAILED(); + HDprintf(" H5Oopen succeeded with an invalid LAPL!\n"); + H5Gclose(group_id2); + PART_ERROR(H5Oopen_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Oopen_invalid_lapl); + + PART_BEGIN(H5Oopen_by_idx_invalid_loc_id) + { + TESTING_2("H5Oopen_by_idx with an invalid location ID"); + + H5E_BEGIN_TRY + { + group_id2 = H5Oopen_by_idx(H5I_INVALID_HID, OBJECT_OPEN_INVALID_PARAMS_TEST_GROUP_NAME, + H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT); + } + H5E_END_TRY; + + if (group_id2 >= 0) { + H5_FAILED(); + HDprintf(" H5Oopen_by_idx succeeded with an invalid location ID!\n"); + H5Gclose(group_id2); + PART_ERROR(H5Oopen_by_idx_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Oopen_by_idx_invalid_loc_id); + + PART_BEGIN(H5Oopen_by_idx_invalid_grp_name) + { + TESTING_2("H5Oopen_by_idx with an invalid group name"); + + H5E_BEGIN_TRY + { + group_id2 = H5Oopen_by_idx(container_group, NULL, H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT); + } + H5E_END_TRY; + + if (group_id2 >= 0) { + H5_FAILED(); + HDprintf(" H5Oopen_by_idx succeeded with a NULL group name!\n"); + H5Gclose(group_id2); + PART_ERROR(H5Oopen_by_idx_invalid_grp_name); + } + + H5E_BEGIN_TRY + { + group_id2 = H5Oopen_by_idx(container_group, "", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT); + } + H5E_END_TRY; + + if (group_id2 >= 0) { + H5_FAILED(); + HDprintf(" H5Oopen_by_idx succeeded with an invalid group name of ''!\n"); + H5Gclose(group_id2); + PART_ERROR(H5Oopen_by_idx_invalid_grp_name); + } + + PASSED(); + } + PART_END(H5Oopen_by_idx_invalid_grp_name); + + PART_BEGIN(H5Oopen_by_idx_invalid_index_type) + { + TESTING_2("H5Oopen_by_idx with an invalid index type"); + + H5E_BEGIN_TRY + { + group_id2 = H5Oopen_by_idx(container_group, OBJECT_OPEN_INVALID_PARAMS_TEST_GROUP_NAME, + H5_INDEX_UNKNOWN, H5_ITER_INC, 0, H5P_DEFAULT); + } + H5E_END_TRY; + + if (group_id2 >= 0) { + H5_FAILED(); + HDprintf(" H5Oopen_by_idx succeeded with invalid index type H5_INDEX_UNKNOWN!\n"); + H5Gclose(group_id2); + PART_ERROR(H5Oopen_by_idx_invalid_index_type); + } + + H5E_BEGIN_TRY + { + group_id2 = H5Oopen_by_idx(container_group, OBJECT_OPEN_INVALID_PARAMS_TEST_GROUP_NAME, + H5_INDEX_N, H5_ITER_INC, 0, H5P_DEFAULT); + } + H5E_END_TRY; + + if (group_id2 >= 0) { + H5_FAILED(); + HDprintf(" H5Oopen_by_idx succeeded with invalid index type H5_INDEX_N!\n"); + H5Gclose(group_id2); + PART_ERROR(H5Oopen_by_idx_invalid_index_type); + } + + PASSED(); + } + PART_END(H5Oopen_by_idx_invalid_index_type); + + PART_BEGIN(H5Oopen_by_idx_invalid_iter_order) + { + TESTING_2("H5Oopen_by_idx with an invalid iteration order"); + + H5E_BEGIN_TRY + { + group_id2 = H5Oopen_by_idx(container_group, OBJECT_OPEN_INVALID_PARAMS_TEST_GROUP_NAME, + H5_INDEX_NAME, H5_ITER_UNKNOWN, 0, H5P_DEFAULT); + } + H5E_END_TRY; + + if (group_id2 >= 0) { + H5_FAILED(); + HDprintf( + " H5Oopen_by_idx succeeded with an invalid iteration ordering H5_ITER_UNKNOWN!\n"); + H5Gclose(group_id2); + PART_ERROR(H5Oopen_by_idx_invalid_iter_order); + } + + H5E_BEGIN_TRY + { + group_id2 = H5Oopen_by_idx(container_group, OBJECT_OPEN_INVALID_PARAMS_TEST_GROUP_NAME, + H5_INDEX_NAME, H5_ITER_N, 0, H5P_DEFAULT); + } + H5E_END_TRY; + + if (group_id2 >= 0) { + H5_FAILED(); + HDprintf(" H5Oopen_by_idx succeeded with an invalid iteration ordering H5_ITER_N!\n"); + H5Gclose(group_id2); + PART_ERROR(H5Oopen_by_idx_invalid_iter_order); + } + + PASSED(); + } + PART_END(H5Oopen_by_idx_invalid_iter_order); + + PART_BEGIN(H5Oopen_by_idx_invalid_lapl) + { + TESTING_2("H5Oopen_by_idx with an invalid LAPL"); + + H5E_BEGIN_TRY + { + group_id2 = H5Oopen_by_idx(container_group, OBJECT_OPEN_INVALID_PARAMS_TEST_GROUP_NAME, + H5_INDEX_NAME, H5_ITER_INC, 0, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (group_id2 >= 0) { + H5_FAILED(); + HDprintf(" H5Oopen_by_idx succeeded with an invalid LAPL!\n"); + H5Gclose(group_id2); + PART_ERROR(H5Oopen_by_idx_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Oopen_by_idx_invalid_lapl); + + PART_BEGIN(H5Oopen_by_token_invalid_loc_id) + { + TESTING_2("H5Oopen_by_token with an invalid location ID"); + + H5E_BEGIN_TRY + { + group_id2 = H5Oopen_by_token(H5I_INVALID_HID, H5O_TOKEN_UNDEF); + } + H5E_END_TRY; + + if (group_id2 >= 0) { + H5_FAILED(); + HDprintf(" H5Oopen_by_token succeeded with an invalid location ID!\n"); + H5Gclose(group_id2); + PART_ERROR(H5Oopen_by_token_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Oopen_by_token_invalid_loc_id); + + PART_BEGIN(H5Oopen_by_token_invalid_token) + { + TESTING_2("H5Oopen_by_token with an invalid token"); + + H5E_BEGIN_TRY + { + group_id2 = H5Oopen_by_token(file_id, H5O_TOKEN_UNDEF); + } + H5E_END_TRY; + + if (group_id2 >= 0) { + H5_FAILED(); + HDprintf(" H5Oopen_by_token succeeded with an invalid token!\n"); + H5Gclose(group_id2); + PART_ERROR(H5Oopen_by_token_invalid_token); + } + + PASSED(); + } + PART_END(H5Oopen_by_token_invalid_token); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(gcpl_id); + H5Gclose(group_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test for H5Oexists_by_name. + */ +static int +test_object_exists(void) +{ + htri_t object_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t group_id2 = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dtype_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + + TESTING_MULTIPART("object existence"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, object, dataset, stored datatype or soft link " + "aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, OBJECT_EXISTS_TEST_SUBGROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", OBJECT_EXISTS_TEST_SUBGROUP_NAME); + goto error; + } + + if ((fspace_id = generate_random_dataspace(OBJECT_EXISTS_TEST_DSET_SPACE_RANK, NULL, NULL, FALSE)) < 0) + TEST_ERROR; + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + PASSED(); + + /* + * NOTE: H5Oexists_by_name for hard links should always succeed. + * H5Oexists_by_name for a soft link may fail if the link doesn't resolve. + */ + BEGIN_MULTIPART + { + PART_BEGIN(H5Oexists_by_name_group) + { + TESTING_2("H5Oexists_by_name on a group"); + + if ((group_id2 = H5Gcreate2(group_id, OBJECT_EXISTS_TEST_GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", OBJECT_EXISTS_TEST_GRP_NAME); + PART_ERROR(H5Oexists_by_name_group); + } + + if ((object_exists = H5Oexists_by_name(group_id, OBJECT_EXISTS_TEST_GRP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if object '%s' exists\n", OBJECT_EXISTS_TEST_GRP_NAME); + PART_ERROR(H5Oexists_by_name_group); + } + + if (!object_exists) { + H5_FAILED(); + HDprintf(" object '%s' didn't exist!\n", OBJECT_EXISTS_TEST_GRP_NAME); + PART_ERROR(H5Oexists_by_name_group); + } + + if (H5Gclose(group_id2) < 0) { + H5_FAILED(); + HDprintf(" couldn't close group\n"); + PART_ERROR(H5Oexists_by_name_group); + } + + PASSED(); + } + PART_END(H5Oexists_by_name_group); + + PART_BEGIN(H5Oexists_by_name_dset) + { + TESTING_2("H5Oexists_by_name on a dataset"); + + if ((dset_id = H5Dcreate2(group_id, OBJECT_EXISTS_TEST_DSET_NAME, dset_dtype, fspace_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", OBJECT_EXISTS_TEST_DSET_NAME); + PART_ERROR(H5Oexists_by_name_dset); + } + + if ((object_exists = H5Oexists_by_name(group_id, OBJECT_EXISTS_TEST_DSET_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if object '%s' exists\n", OBJECT_EXISTS_TEST_DSET_NAME); + PART_ERROR(H5Oexists_by_name_dset); + } + + if (!object_exists) { + H5_FAILED(); + HDprintf(" object '%s' didn't exist!\n", OBJECT_EXISTS_TEST_DSET_NAME); + PART_ERROR(H5Oexists_by_name_dset); + } + + if (H5Dclose(dset_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close dataset\n"); + PART_ERROR(H5Oexists_by_name_dset); + } + + PASSED(); + } + PART_END(H5Oexists_by_name_dset); + + PART_BEGIN(H5Oexists_by_name_dtype) + { + TESTING_2("H5Oexists_by_name on a committed datatype"); + + if ((dtype_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create datatype '%s'\n", OBJECT_EXISTS_TEST_TYPE_NAME); + PART_ERROR(H5Oexists_by_name_dtype); + } + + if (H5Tcommit2(group_id, OBJECT_EXISTS_TEST_TYPE_NAME, dtype_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't commit datatype '%s'\n", OBJECT_EXISTS_TEST_TYPE_NAME); + PART_ERROR(H5Oexists_by_name_dtype); + } + + if ((object_exists = H5Oexists_by_name(group_id, OBJECT_EXISTS_TEST_TYPE_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if object '%s' exists\n", OBJECT_EXISTS_TEST_TYPE_NAME); + PART_ERROR(H5Oexists_by_name_dtype); + } + + if (!object_exists) { + H5_FAILED(); + HDprintf(" object '%s' didn't exist!\n", OBJECT_EXISTS_TEST_TYPE_NAME); + PART_ERROR(H5Oexists_by_name_dtype); + } + + if (H5Tclose(dtype_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close datatype\n"); + PART_ERROR(H5Oexists_by_name_dtype); + } + + PASSED(); + } + PART_END(H5Oexists_by_name_dtype); + + PART_BEGIN(H5Oexists_by_name_soft_link) + { + TESTING_2("H5Oexists_by_name for a soft link"); + + if (H5Lcreate_soft("/" OBJECT_TEST_GROUP_NAME "/" OBJECT_EXISTS_TEST_SUBGROUP_NAME, group_id, + OBJECT_EXISTS_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", OBJECT_EXISTS_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Oexists_by_name_soft_link); + } + + if ((object_exists = + H5Oexists_by_name(group_id, OBJECT_EXISTS_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if object '%s' exists\n", OBJECT_EXISTS_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Oexists_by_name_soft_link); + } + + if (!object_exists) { + H5_FAILED(); + HDprintf(" object '%s' didn't exist!\n", OBJECT_EXISTS_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Oexists_by_name_soft_link); + } + + PASSED(); + } + PART_END(H5Oexists_by_name_soft_link); + + PART_BEGIN(H5Oexists_by_name_dangling_soft_link) + { + TESTING_2("H5Oexists_by_name for a dangling soft link"); + + if (H5Lcreate_soft( + "/" OBJECT_TEST_GROUP_NAME "/" OBJECT_EXISTS_TEST_SUBGROUP_NAME "/non_existent_object", + group_id, OBJECT_EXISTS_TEST_DANGLING_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", OBJECT_EXISTS_TEST_DANGLING_LINK_NAME); + PART_ERROR(H5Oexists_by_name_dangling_soft_link); + } + + if ((object_exists = + H5Oexists_by_name(group_id, OBJECT_EXISTS_TEST_DANGLING_LINK_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if object '%s' exists\n", + "/" OBJECT_TEST_GROUP_NAME "/" OBJECT_EXISTS_TEST_SUBGROUP_NAME + "/non_existent_object"); + PART_ERROR(H5Oexists_by_name_dangling_soft_link); + } + + if (object_exists) { + H5_FAILED(); + HDprintf(" object pointed to by dangling soft link should not have existed!\n"); + PART_ERROR(H5Oexists_by_name_dangling_soft_link); + } + + PASSED(); + } + PART_END(H5Oexists_by_name_dangling_soft_link); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + H5Tclose(dset_dtype); + H5Tclose(dtype_id); + H5Dclose(dset_id); + H5Gclose(group_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that H5Oexists_by_name fails + * when it is passed invalid parameters. + */ +static int +test_object_exists_invalid_params(void) +{ + htri_t object_exists; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t group_id2 = H5I_INVALID_HID; + + TESTING_MULTIPART("object existence with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, or object aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, OBJECT_EXISTS_INVALID_PARAMS_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + OBJECT_EXISTS_INVALID_PARAMS_TEST_SUBGROUP_NAME); + goto error; + } + + if ((group_id2 = H5Gcreate2(group_id, OBJECT_EXISTS_INVALID_PARAMS_TEST_GRP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", OBJECT_EXISTS_INVALID_PARAMS_TEST_GRP_NAME); + goto error; + } + + if (H5Gclose(group_id2) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Oexists_by_name_invalid_loc_id) + { + TESTING_2("H5Oexists_by_name with an invalid location ID"); + + H5E_BEGIN_TRY + { + object_exists = H5Oexists_by_name(H5I_INVALID_HID, OBJECT_EXISTS_INVALID_PARAMS_TEST_GRP_NAME, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (object_exists >= 0) { + H5_FAILED(); + HDprintf(" H5Oexists_by_name succeeded with an invalid location ID!\n"); + PART_ERROR(H5Oexists_by_name_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Oexists_by_name_invalid_loc_id); + + PART_BEGIN(H5Oexists_by_name_invalid_obj_name) + { + TESTING_2("H5Oexists_by_name with an invalid object name"); + + H5E_BEGIN_TRY + { + object_exists = H5Oexists_by_name(group_id, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (object_exists >= 0) { + H5_FAILED(); + HDprintf(" H5Oexists_by_name succeeded with a NULL object name!\n"); + PART_ERROR(H5Oexists_by_name_invalid_obj_name); + } + + H5E_BEGIN_TRY + { + object_exists = H5Oexists_by_name(group_id, "", H5P_DEFAULT); + } + H5E_END_TRY; + + if (object_exists >= 0) { + H5_FAILED(); + HDprintf(" H5Oexists_by_name succeeded with an invalid object name of ''!\n"); + PART_ERROR(H5Oexists_by_name_invalid_obj_name); + } + + PASSED(); + } + PART_END(H5Oexists_by_name_invalid_obj_name); + + PART_BEGIN(H5Oexists_by_name_invalid_lapl) + { + TESTING_2("H5Oexists_by_name with an invalid LAPL"); + + H5E_BEGIN_TRY + { + object_exists = + H5Oexists_by_name(group_id, OBJECT_EXISTS_INVALID_PARAMS_TEST_GRP_NAME, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (object_exists >= 0) { + H5_FAILED(); + HDprintf(" H5Oexists_by_name succeeded with an invalid LAPL!\n"); + PART_ERROR(H5Oexists_by_name_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Oexists_by_name_invalid_lapl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test for H5Oget_info(_by_name/_by_idx). + */ +static int +test_get_object_info(void) +{ + TESTING("object info retrieval"); + + SKIPPED(); + + return 0; +} + +/* + * A test to check that an object's info can't be retrieved + * when H5Oget_info(_by_name/_by_idx) are passed invalid + * parameters. + */ +static int +test_get_object_info_invalid_params(void) +{ + TESTING("object info retrieval with invalid parameters"); + + SKIPPED(); + + return 0; +} + +/* + * A test for H5Olink. + */ +static int +test_link_object(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t group_id2 = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t type_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + + TESTING_MULTIPART("object linking"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, object, dataset, or stored datatype aren't " + "supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, OBJECT_LINK_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", OBJECT_LINK_TEST_GROUP_NAME); + goto error; + } + + if ((fspace_id = generate_random_dataspace(OBJECT_LINK_TEST_SPACE_RANK, NULL, NULL, FALSE)) < 0) + TEST_ERROR; + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Olink_group) + { + TESTING_2("H5Olink an anonymous group"); + + if ((group_id2 = H5Gcreate_anon(group_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create an anonymous group\n"); + PART_ERROR(H5Olink_group); + } + + if (H5Olink(group_id2, group_id, OBJECT_LINK_TEST_GROUP_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't link the anonymous group\n"); + PART_ERROR(H5Olink_group); + } + + PASSED(); + } + PART_END(H5Olink_group); + + PART_BEGIN(H5Olink_dataset) + { + TESTING_2("H5Olink an anonymous dataset"); + + if ((dset_id = H5Dcreate_anon(group_id, dset_dtype, fspace_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create an anonymous dataset\n"); + PART_ERROR(H5Olink_dataset); + } + + if (H5Olink(dset_id, group_id, OBJECT_LINK_TEST_DSET_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't link the anonymous dataset\n"); + PART_ERROR(H5Olink_dataset); + } + + PASSED(); + } + PART_END(H5Olink_dataset); + + PART_BEGIN(H5Olink_datatype) + { + TESTING_2("H5Olink an anonymous datatype"); + + if (H5Tcommit_anon(group_id, dset_dtype, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create an anonymous datatype\n"); + PART_ERROR(H5Olink_datatype); + } + + if (H5Olink(dset_dtype, group_id, OBJECT_LINK_TEST_DTYPE_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't link the anonymous datatype\n"); + PART_ERROR(H5Olink_datatype); + } + + PASSED(); + } + PART_END(H5Olink_datatype); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id2) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + H5Tclose(dset_dtype); + H5Tclose(type_id); + H5Dclose(dset_id); + H5Gclose(group_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that an object can't be linked into + * the file structure when H5Olink is passed invalid + * parameters. + */ +static int +test_link_object_invalid_params(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t group_id2 = H5I_INVALID_HID; + herr_t status; + + TESTING_MULTIPART("object linking with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, or object aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, OBJECT_LINK_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", OBJECT_LINK_TEST_GROUP_NAME); + goto error; + } + + if ((group_id2 = H5Gcreate_anon(group_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create an anonymous group\n"); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Olink_invalid_object_id) + { + TESTING_2("H5Olink with an invalid object ID"); + + H5E_BEGIN_TRY + { + status = H5Olink(H5I_INVALID_HID, group_id, OBJECT_LINK_TEST_GROUP_NAME2, H5P_DEFAULT, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (status >= 0) { + H5_FAILED(); + HDprintf(" H5Olink succeeded with an invalid object ID!\n"); + PART_ERROR(H5Olink_invalid_object_id); + } + + PASSED(); + } + PART_END(H5Olink_invalid_object_id); + + PART_BEGIN(H5Olink_invalid_location) + { + TESTING_2("H5Olink with an invalid location ID"); + + H5E_BEGIN_TRY + { + status = H5Olink(group_id2, H5I_INVALID_HID, OBJECT_LINK_TEST_GROUP_NAME2, H5P_DEFAULT, + H5P_DEFAULT); + } + H5E_END_TRY; + + if (status >= 0) { + H5_FAILED(); + HDprintf(" H5Olink succeeded with an invalid location ID!\n"); + PART_ERROR(H5Olink_invalid_location); + } + + PASSED(); + } + PART_END(H5Olink_invalid_location); + + PART_BEGIN(H5Olink_invalid_name) + { + TESTING_2("H5Olink with an invalid name"); + + H5E_BEGIN_TRY + { + status = H5Olink(group_id2, group_id, NULL, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (status >= 0) { + H5_FAILED(); + HDprintf(" H5Olink succeeded with NULL as the object name!\n"); + PART_ERROR(H5Olink_invalid_name); + } + + H5E_BEGIN_TRY + { + status = H5Olink(group_id2, group_id, "", H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (status >= 0) { + H5_FAILED(); + HDprintf(" H5Olink succeeded with an invalid object name of ''!\n"); + PART_ERROR(H5Olink_invalid_name); + } + + PASSED(); + } + PART_END(H5Olink_invalid_name); + + PART_BEGIN(H5Olink_invalid_lcpl) + { + TESTING_2("H5Olink with an invalid LCPL"); + + H5E_BEGIN_TRY + { + status = + H5Olink(group_id2, group_id, OBJECT_LINK_TEST_GROUP_NAME2, H5I_INVALID_HID, H5P_DEFAULT); + } + H5E_END_TRY; + + if (status >= 0) { + H5_FAILED(); + HDprintf(" H5Olink succeeded with an invalid LCPL!\n"); + PART_ERROR(H5Olink_invalid_lcpl); + } + + PASSED(); + } + PART_END(H5Olink_invalid_lcpl); + + PART_BEGIN(H5Olink_invalid_lapl) + { + TESTING_2("H5Olink with an invalid LAPL"); +#ifndef NO_INVALID_PROPERTY_LIST_TESTS + H5E_BEGIN_TRY + { + status = + H5Olink(group_id2, group_id, OBJECT_LINK_TEST_GROUP_NAME2, H5P_DEFAULT, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (status >= 0) { + H5_FAILED(); + HDprintf(" H5Olink succeeded with an invalid LAPL!\n"); + PART_ERROR(H5Olink_invalid_lapl); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Olink_invalid_lapl); +#endif + } + PART_END(H5Olink_invalid_lapl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id2) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test for H5Oincr_refcount/H5Odecr_refcount. + */ +static int +test_incr_decr_object_refcount(void) +{ + H5O_info2_t oinfo; /* Object info struct */ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t group_id2 = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + + TESTING_MULTIPART("increment/decrement the reference count of object"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, stored datatype, basic or more object " + "aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, OBJECT_REF_COUNT_TEST_SUBGROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", OBJECT_REF_COUNT_TEST_SUBGROUP_NAME); + goto error; + } + + if ((fspace_id = generate_random_dataspace(OBJECT_REF_COUNT_TEST_DSET_SPACE_RANK, NULL, NULL, FALSE)) < 0) + TEST_ERROR; + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Oincr_decr_refcount_group) + { + TESTING_2("H5Oincr_refcount/H5Odecr_refcount on a group"); + + if ((group_id2 = H5Gcreate2(group_id, OBJECT_REF_COUNT_TEST_GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", OBJECT_REF_COUNT_TEST_GRP_NAME); + PART_ERROR(H5Oincr_decr_refcount_group); + } + + /* Increment the reference count */ + if (H5Oincr_refcount(group_id2) < 0) { + H5_FAILED(); + HDprintf(" couldn't increment reference count for the group '%s' \n", + OBJECT_REF_COUNT_TEST_GRP_NAME); + PART_ERROR(H5Oincr_decr_refcount_group); + } + + /* Verify that reference count is 2 now */ + if (H5Oget_info_by_name3(group_id, OBJECT_REF_COUNT_TEST_GRP_NAME, &oinfo, H5O_INFO_BASIC, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get reference count for the group '%s' \n", + OBJECT_REF_COUNT_TEST_GRP_NAME); + PART_ERROR(H5Oincr_decr_refcount_group); + } + + if (oinfo.rc != 2) { + H5_FAILED(); + HDprintf(" the reference count for the group '%s' isn't 2: %d\n", + OBJECT_REF_COUNT_TEST_GRP_NAME, oinfo.rc); + PART_ERROR(H5Oincr_decr_refcount_group); + } + + /* Decrement the reference count */ + if (H5Odecr_refcount(group_id2) < 0) { + H5_FAILED(); + HDprintf(" couldn't decrement reference count for the group '%s' \n", + OBJECT_REF_COUNT_TEST_GRP_NAME); + PART_ERROR(H5Oincr_decr_refcount_group); + } + + /* Verify that reference count is 1 now */ + if (H5Oget_info_by_name3(group_id, OBJECT_REF_COUNT_TEST_GRP_NAME, &oinfo, H5O_INFO_BASIC, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get reference count for the group '%s' \n", + OBJECT_REF_COUNT_TEST_GRP_NAME); + PART_ERROR(H5Oincr_decr_refcount_group); + } + + if (oinfo.rc != 1) { + H5_FAILED(); + HDprintf(" the reference count for the group '%s' isn't 1: %d\n", + OBJECT_REF_COUNT_TEST_GRP_NAME, oinfo.rc); + PART_ERROR(H5Oincr_decr_refcount_group); + } + + if (H5Gclose(group_id2) < 0) { + H5_FAILED(); + HDprintf(" couldn't close group\n"); + PART_ERROR(H5Oincr_decr_refcount_group); + } + + PASSED(); + } + PART_END(H5Oincr_decr_refcount_group); + + PART_BEGIN(H5Oincr_decr_refcount_dset) + { + TESTING_2("H5Oincr_refcount/H5Odecr_refcount on a dataset"); + + if ((dset_id = H5Dcreate2(group_id, OBJECT_REF_COUNT_TEST_DSET_NAME, dset_dtype, fspace_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", OBJECT_REF_COUNT_TEST_DSET_NAME); + PART_ERROR(H5Oincr_decr_refcount_dset); + } + + /* Increment the reference count */ + if (H5Oincr_refcount(dset_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't increment reference count for the dataset '%s' \n", + OBJECT_REF_COUNT_TEST_DSET_NAME); + PART_ERROR(H5Oincr_decr_refcount_dset); + } + + /* Verify that reference count is 2 now */ + if (H5Oget_info_by_name3(group_id, OBJECT_REF_COUNT_TEST_DSET_NAME, &oinfo, H5O_INFO_BASIC, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get reference count for the dataset '%s' \n", + OBJECT_REF_COUNT_TEST_DSET_NAME); + PART_ERROR(H5Oincr_decr_refcount_dset); + } + + if (oinfo.rc != 2) { + H5_FAILED(); + HDprintf(" the reference count for the dataset '%s' isn't 2: %d\n", + OBJECT_REF_COUNT_TEST_DSET_NAME, oinfo.rc); + PART_ERROR(H5Oincr_decr_refcount_dset); + } + + /* Decrement the reference count */ + if (H5Odecr_refcount(dset_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't decrement reference count for the dataset '%s' \n", + OBJECT_REF_COUNT_TEST_DSET_NAME); + PART_ERROR(H5Oincr_decr_refcount_dset); + } + + /* Verify that reference count is 1 now */ + if (H5Oget_info_by_name3(group_id, OBJECT_REF_COUNT_TEST_DSET_NAME, &oinfo, H5O_INFO_BASIC, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get reference count for the dataset '%s' \n", + OBJECT_REF_COUNT_TEST_DSET_NAME); + PART_ERROR(H5Oincr_decr_refcount_dset); + } + + if (oinfo.rc != 1) { + H5_FAILED(); + HDprintf(" the reference count for the dataset '%s' isn't 1: %d\n", + OBJECT_REF_COUNT_TEST_DSET_NAME, oinfo.rc); + PART_ERROR(H5Oincr_decr_refcount_dset); + } + + if (H5Dclose(dset_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close dataset\n"); + PART_ERROR(H5Oincr_decr_refcount_dset); + } + + PASSED(); + } + PART_END(H5Oincr_decr_refcount_dset); + + PART_BEGIN(H5Oincr / decr_refcount_dtype) + { + TESTING_2("H5Oincr_refcount/H5Odecr_refcount on a committed datatype"); + + if (H5Tcommit2(group_id, OBJECT_REF_COUNT_TEST_TYPE_NAME, dset_dtype, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't commit datatype '%s'\n", OBJECT_REF_COUNT_TEST_TYPE_NAME); + PART_ERROR(H5Oincr_decr_refcount_dtype); + } + + /* Increment the reference count */ + if (H5Oincr_refcount(dset_dtype) < 0) { + H5_FAILED(); + HDprintf(" couldn't increment reference count for the datatype '%s' \n", + OBJECT_REF_COUNT_TEST_TYPE_NAME); + PART_ERROR(H5Oincr_decr_refcount_dtype); + } + + /* Verify that reference count is 2 now */ + if (H5Oget_info_by_name3(group_id, OBJECT_REF_COUNT_TEST_TYPE_NAME, &oinfo, H5O_INFO_BASIC, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get reference count for the datatype '%s' \n", + OBJECT_REF_COUNT_TEST_TYPE_NAME); + PART_ERROR(H5Oincr_decr_refcount_dtype); + } + + if (oinfo.rc != 2) { + H5_FAILED(); + HDprintf(" the reference count for the datatype '%s' isn't 2: %d\n", + OBJECT_REF_COUNT_TEST_TYPE_NAME, oinfo.rc); + PART_ERROR(H5Oincr_decr_refcount_dtype); + } + + /* Decrement the reference count */ + if (H5Odecr_refcount(dset_dtype) < 0) { + H5_FAILED(); + HDprintf(" couldn't decrement reference count for the datatype '%s' \n", + OBJECT_REF_COUNT_TEST_TYPE_NAME); + PART_ERROR(H5Oincr_decr_refcount_dtype); + } + + /* Verify that reference count is 1 now */ + if (H5Oget_info_by_name3(group_id, OBJECT_REF_COUNT_TEST_TYPE_NAME, &oinfo, H5O_INFO_BASIC, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't get reference count for the datatype '%s' \n", + OBJECT_REF_COUNT_TEST_TYPE_NAME); + PART_ERROR(H5Oincr_decr_refcount_dtype); + } + + if (oinfo.rc != 1) { + H5_FAILED(); + HDprintf(" the reference count for the datatype '%s' isn't 1: %d\n", + OBJECT_REF_COUNT_TEST_TYPE_NAME, oinfo.rc); + PART_ERROR(H5Oincr_decr_refcount_dtype); + } + + if (H5Tclose(dset_dtype) < 0) { + H5_FAILED(); + HDprintf(" couldn't close datatype\n"); + PART_ERROR(H5Oincr_decr_refcount_dtype); + } + + PASSED(); + } + PART_END(H5Oincr_decr_refcount_dtype); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + H5Tclose(dset_dtype); + H5Dclose(dset_id); + H5Gclose(group_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} /* test_incr_decr_object_refcount */ + +/* + * A test to check that H5Oincr_refcount/H5Odecr_refcount + * fail when passed invalid parameters. + */ +static int +test_incr_decr_object_refcount_invalid_params(void) +{ + herr_t status; + + TESTING_MULTIPART("object reference count incr./decr. with an invalid parameter"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE)) { + SKIPPED(); + HDprintf(" API functions for more object aren't supported with this connector\n"); + return 0; + } + + BEGIN_MULTIPART + { + PART_BEGIN(H5Oincr_refcount_invalid_param) + { + TESTING_2("H5Oincr_refcount with invalid object ID"); + + H5E_BEGIN_TRY + { + status = H5Oincr_refcount(H5I_INVALID_HID); + } + H5E_END_TRY; + + if (status >= 0) { + H5_FAILED(); + HDprintf(" incremented the reference count for an invalid object ID\n"); + PART_ERROR(H5Oincr_refcount_invalid_param); + } + + PASSED(); + } + PART_END(H5Oincr_refcount_invalid_param); + + PART_BEGIN(H5Odecr_refcount_invalid_param) + { + TESTING_2("H5Odecr_refcount with invalid object ID"); + + H5E_BEGIN_TRY + { + status = H5Odecr_refcount(H5I_INVALID_HID); + } + H5E_END_TRY; + + if (status >= 0) { + H5_FAILED(); + HDprintf(" decremented the reference count for an invalid object ID\n"); + PART_ERROR(H5Odecr_refcount_invalid_param); + } + + PASSED(); + } + PART_END(H5Odecr_refcount_invalid_param); + } + END_MULTIPART; + + return 0; + +error: + return 1; +} + +/* + * Basic tests for H5Ocopy. + */ +static int +test_object_copy_basic(void) +{ + H5O_info2_t object_info; + H5G_info_t group_info; + htri_t object_link_exists; + size_t i; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t group_id2 = H5I_INVALID_HID; + hid_t tmp_group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t tmp_dset_id = H5I_INVALID_HID; + hid_t dtype_id = H5I_INVALID_HID; + hid_t tmp_dtype_id = H5I_INVALID_HID; + hid_t tmp_attr_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t attr_space_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + + TESTING_MULTIPART("basic object copying"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, object, link, dataset, attribute, iterate, or " + "stored datatype aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, OBJECT_COPY_BASIC_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", OBJECT_COPY_BASIC_TEST_SUBGROUP_NAME); + goto error; + } + + if ((space_id = generate_random_dataspace(OBJECT_COPY_BASIC_TEST_SPACE_RANK, NULL, NULL, FALSE)) < 0) + TEST_ERROR; + if ((attr_space_id = generate_random_dataspace(OBJECT_COPY_BASIC_TEST_SPACE_RANK, NULL, NULL, TRUE)) < 0) + TEST_ERROR; + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + /* Create the test group object, along with its nested members and the attributes attached to it. */ + if ((group_id2 = H5Gcreate2(group_id, OBJECT_COPY_BASIC_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", OBJECT_COPY_BASIC_TEST_GROUP_NAME); + goto error; + } + + for (i = 0; i < (size_t)OBJECT_COPY_BASIC_TEST_NUM_NESTED_OBJS; i++) { + char grp_name[OBJECT_COPY_BASIC_TEST_BUF_SIZE]; + + snprintf(grp_name, OBJECT_COPY_BASIC_TEST_BUF_SIZE, "grp%d", (int)i); + + if ((tmp_group_id = H5Gcreate2(group_id2, grp_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s' under group '%s'\n", grp_name, + OBJECT_COPY_BASIC_TEST_GROUP_NAME); + goto error; + } + + /* Create a further nested group under the last group added */ + if (i == (OBJECT_COPY_BASIC_TEST_NUM_NESTED_OBJS - 1)) { + if (H5Gclose(H5Gcreate2(tmp_group_id, OBJECT_COPY_BASIC_TEST_DEEP_NESTED_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create nested group '%s' under group '%s'\n", + OBJECT_COPY_BASIC_TEST_DEEP_NESTED_GROUP_NAME, grp_name); + goto error; + } + } + + if (H5Gclose(tmp_group_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close group '%s'\n", grp_name); + goto error; + } + } + + for (i = 0; i < (size_t)OBJECT_COPY_BASIC_TEST_NUM_ATTRS; i++) { + char attr_name[OBJECT_COPY_BASIC_TEST_BUF_SIZE]; + + snprintf(attr_name, OBJECT_COPY_BASIC_TEST_BUF_SIZE, "attr%d", (int)i); + + if ((tmp_attr_id = H5Acreate2(group_id2, attr_name, H5T_NATIVE_INT, attr_space_id, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s' on group '%s'\n", attr_name, + OBJECT_COPY_BASIC_TEST_GROUP_NAME); + goto error; + } + + if (H5Aclose(tmp_attr_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close attribute '%s'\n", attr_name); + goto error; + } + } + + /* Create the test dataset object, along with the attributes attached to it. */ + if ((dset_id = H5Dcreate2(group_id, OBJECT_COPY_BASIC_TEST_DSET_NAME, dset_dtype, space_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", OBJECT_COPY_BASIC_TEST_DSET_NAME); + goto error; + } + + for (i = 0; i < (size_t)OBJECT_COPY_BASIC_TEST_NUM_ATTRS; i++) { + char attr_name[OBJECT_COPY_BASIC_TEST_BUF_SIZE]; + + snprintf(attr_name, OBJECT_COPY_BASIC_TEST_BUF_SIZE, "attr%d", (int)i); + + if ((tmp_attr_id = H5Acreate2(dset_id, attr_name, H5T_NATIVE_INT, attr_space_id, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s' on dataset '%s'\n", attr_name, + OBJECT_COPY_BASIC_TEST_DSET_NAME); + goto error; + } + + if (H5Aclose(tmp_attr_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close attribute '%s'\n", attr_name); + goto error; + } + } + + /* Create the test committed datatype object, along with the attributes attached to it. */ + if ((dtype_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create datatype\n"); + goto error; + } + + if (H5Tcommit2(group_id, OBJECT_COPY_BASIC_TEST_DTYPE_NAME, dtype_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't commit datatype '%s'\n", OBJECT_COPY_BASIC_TEST_DTYPE_NAME); + goto error; + } + + for (i = 0; i < (size_t)OBJECT_COPY_BASIC_TEST_NUM_ATTRS; i++) { + char attr_name[OBJECT_COPY_BASIC_TEST_BUF_SIZE]; + + snprintf(attr_name, OBJECT_COPY_BASIC_TEST_BUF_SIZE, "attr%d", (int)i); + + if ((tmp_attr_id = H5Acreate2(dtype_id, attr_name, H5T_NATIVE_INT, attr_space_id, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s' on committed datatype '%s'\n", attr_name, + OBJECT_COPY_BASIC_TEST_DTYPE_NAME); + goto error; + } + + if (H5Aclose(tmp_attr_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close attribute '%s'\n", attr_name); + goto error; + } + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Ocopy_group) + { + TESTING_2("H5Ocopy on a group (default copy options)"); + + if (H5Ocopy(group_id, OBJECT_COPY_BASIC_TEST_GROUP_NAME, group_id, + OBJECT_COPY_BASIC_TEST_NEW_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to copy group '%s' to '%s'\n", OBJECT_COPY_BASIC_TEST_GROUP_NAME, + OBJECT_COPY_BASIC_TEST_NEW_GROUP_NAME); + PART_ERROR(H5Ocopy_group); + } + + if ((object_link_exists = + H5Lexists(group_id, OBJECT_COPY_BASIC_TEST_NEW_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' to copied group exists\n", + OBJECT_COPY_BASIC_TEST_NEW_GROUP_NAME); + PART_ERROR(H5Ocopy_group); + } + + if (!object_link_exists) { + H5_FAILED(); + HDprintf(" link '%s' to copied group didn't exist!\n", + OBJECT_COPY_BASIC_TEST_NEW_GROUP_NAME); + PART_ERROR(H5Ocopy_group); + } + + /* Ensure that the new group has all the members of the copied group, and all its attributes */ + if ((tmp_group_id = H5Gopen2(group_id, OBJECT_COPY_BASIC_TEST_NEW_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to open group copy '%s'\n", OBJECT_COPY_BASIC_TEST_NEW_GROUP_NAME); + PART_ERROR(H5Ocopy_group); + } + + memset(&group_info, 0, sizeof(group_info)); + + /* + * Set link count to zero in case the connector doesn't support + * retrieval of group info. + */ + group_info.nlinks = 0; + + if (H5Gget_info(tmp_group_id, &group_info) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve group info\n"); + PART_ERROR(H5Ocopy_group); + } + + if (group_info.nlinks != OBJECT_COPY_BASIC_TEST_NUM_NESTED_OBJS) { + H5_FAILED(); + HDprintf(" copied group contained %d members instead of %d members after a deep copy!\n", + (int)group_info.nlinks, OBJECT_COPY_BASIC_TEST_NUM_NESTED_OBJS); + PART_ERROR(H5Ocopy_group); + } + + memset(&object_info, 0, sizeof(object_info)); + + /* + * Set attribute count to zero in case the connector doesn't + * support retrieval of object info. + */ + object_info.num_attrs = 0; + + if (H5Oget_info3(tmp_group_id, &object_info, H5O_INFO_ALL) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve object info\n"); + PART_ERROR(H5Ocopy_group); + } + + if (object_info.num_attrs == 0) { + H5_FAILED(); + HDprintf(" copied group didn't contain any attributes after copy operation!\n"); + PART_ERROR(H5Ocopy_group); + } + + /* Check the attribute names, types, etc. */ + i = 0; + if (H5Aiterate2(tmp_group_id, H5_INDEX_NAME, H5_ITER_INC, NULL, + object_copy_attribute_iter_callback, &i) < 0) { + H5_FAILED(); + HDprintf(" failed to iterate over copied group's attributes\n"); + PART_ERROR(H5Ocopy_group); + } + + if (i != OBJECT_COPY_BASIC_TEST_NUM_ATTRS) { + H5_FAILED(); + HDprintf( + " number of attributes on copied group (%llu) didn't match expected number (%llu)!\n", + (unsigned long long)i, (unsigned long long)OBJECT_COPY_BASIC_TEST_NUM_ATTRS); + PART_ERROR(H5Ocopy_group); + } + + if (H5Gclose(tmp_group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group copy\n"); + PART_ERROR(H5Ocopy_group); + } + + /* + * Ensure that the last immediate member of the copied group + * contains its single member after the deep copy. + */ + { + char grp_name[OBJECT_COPY_BASIC_TEST_BUF_SIZE]; + + snprintf(grp_name, OBJECT_COPY_BASIC_TEST_BUF_SIZE, + OBJECT_COPY_BASIC_TEST_NEW_GROUP_NAME "/grp%d", + OBJECT_COPY_BASIC_TEST_NUM_NESTED_OBJS - 1); + + if ((tmp_group_id = H5Gopen2(group_id, grp_name, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to open group '%s'\n", + OBJECT_COPY_BASIC_TEST_DEEP_NESTED_GROUP_NAME); + PART_ERROR(H5Ocopy_group); + } + + memset(&group_info, 0, sizeof(group_info)); + + /* + * Set link count to zero in case the connector doesn't support + * retrieval of group info. + */ + group_info.nlinks = 0; + + if (H5Gget_info(tmp_group_id, &group_info) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve group info\n"); + PART_ERROR(H5Ocopy_group); + } + + if (group_info.nlinks != 1) { + H5_FAILED(); + HDprintf(" copied group's immediate members didn't contain nested members after a " + "deep copy!\n"); + PART_ERROR(H5Ocopy_group); + } + + if (H5Gclose(tmp_group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", + OBJECT_COPY_BASIC_TEST_DEEP_NESTED_GROUP_NAME); + PART_ERROR(H5Ocopy_group); + } + } + + PASSED(); + } + PART_END(H5Ocopy_group); + + if (tmp_group_id >= 0) { + H5E_BEGIN_TRY + { + H5Gclose(tmp_group_id); + } + H5E_END_TRY; + tmp_group_id = H5I_INVALID_HID; + } + + PART_BEGIN(H5Ocopy_dset) + { + TESTING_2("H5Ocopy on a dataset (default copy options)"); + + if (H5Ocopy(group_id, OBJECT_COPY_BASIC_TEST_DSET_NAME, group_id, + OBJECT_COPY_BASIC_TEST_NEW_DSET_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to copy dataset '%s' to '%s'\n", OBJECT_COPY_BASIC_TEST_DSET_NAME, + OBJECT_COPY_BASIC_TEST_NEW_DSET_NAME); + PART_ERROR(H5Ocopy_dset); + } + + if ((object_link_exists = + H5Lexists(group_id, OBJECT_COPY_BASIC_TEST_NEW_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' to copied dataset exists\n", + OBJECT_COPY_BASIC_TEST_NEW_DSET_NAME); + PART_ERROR(H5Ocopy_dset); + } + + if (!object_link_exists) { + H5_FAILED(); + HDprintf(" link '%s' to copied dataset didn't exist!\n", + OBJECT_COPY_BASIC_TEST_NEW_DSET_NAME); + PART_ERROR(H5Ocopy_dset); + } + + /* Ensure that the new dataset has all of the attributes of the copied dataset */ + if ((tmp_dset_id = H5Dopen2(group_id, OBJECT_COPY_BASIC_TEST_NEW_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to open dataset copy '%s'\n", OBJECT_COPY_BASIC_TEST_NEW_DSET_NAME); + PART_ERROR(H5Ocopy_dset); + } + + memset(&object_info, 0, sizeof(object_info)); + + /* + * Set attribute count to zero in case the connector doesn't + * support retrieval of object info. + */ + object_info.num_attrs = 0; + + if (H5Oget_info3(tmp_dset_id, &object_info, H5O_INFO_ALL) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve object info\n"); + PART_ERROR(H5Ocopy_dset); + } + + if (object_info.num_attrs == 0) { + H5_FAILED(); + HDprintf(" copied dataset didn't contain any attributes after copy operation!\n"); + PART_ERROR(H5Ocopy_dset); + } + + /* Check the attribute names, types, etc. */ + i = 0; + if (H5Aiterate2(tmp_dset_id, H5_INDEX_NAME, H5_ITER_INC, NULL, + object_copy_attribute_iter_callback, &i) < 0) { + H5_FAILED(); + HDprintf(" failed to iterate over copied dataset's attributes\n"); + PART_ERROR(H5Ocopy_dset); + } + + if (i != OBJECT_COPY_BASIC_TEST_NUM_ATTRS) { + H5_FAILED(); + HDprintf(" number of attributes on copied dataset (%llu) didn't match expected number " + "(%llu)!\n", + (unsigned long long)i, (unsigned long long)OBJECT_COPY_BASIC_TEST_NUM_ATTRS); + PART_ERROR(H5Ocopy_dset); + } + + if (H5Dclose(tmp_dset_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close dataset copy\n"); + PART_ERROR(H5Ocopy_dset); + } + + PASSED(); + } + PART_END(H5Ocopy_dset); + + if (tmp_dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(tmp_dset_id); + } + H5E_END_TRY; + tmp_dset_id = H5I_INVALID_HID; + } + + PART_BEGIN(H5Ocopy_dtype) + { + TESTING_2("H5Ocopy on a committed datatype (default copy options)"); + + if (H5Ocopy(group_id, OBJECT_COPY_BASIC_TEST_DTYPE_NAME, group_id, + OBJECT_COPY_BASIC_TEST_NEW_DTYPE_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to copy datatype '%s' to '%s'\n", OBJECT_COPY_BASIC_TEST_DTYPE_NAME, + OBJECT_COPY_BASIC_TEST_NEW_DTYPE_NAME); + PART_ERROR(H5Ocopy_dtype); + } + + if ((object_link_exists = + H5Lexists(group_id, OBJECT_COPY_BASIC_TEST_NEW_DTYPE_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' to copied datatype exists\n", + OBJECT_COPY_BASIC_TEST_NEW_DTYPE_NAME); + PART_ERROR(H5Ocopy_dtype); + } + + if (!object_link_exists) { + H5_FAILED(); + HDprintf(" link '%s' to copied datatype didn't exist!\n", + OBJECT_COPY_BASIC_TEST_NEW_DTYPE_NAME); + PART_ERROR(H5Ocopy_dtype); + } + + /* Ensure that the new committed datatype has all the attributes of the copied datatype */ + if ((tmp_dtype_id = H5Topen2(group_id, OBJECT_COPY_BASIC_TEST_NEW_DTYPE_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to open datatype copy '%s'\n", OBJECT_COPY_BASIC_TEST_NEW_DTYPE_NAME); + PART_ERROR(H5Ocopy_dtype); + } + + memset(&object_info, 0, sizeof(object_info)); + + /* + * Set attribute count to zero in case the connector doesn't + * support retrieval of object info. + */ + object_info.num_attrs = 0; + + if (H5Oget_info3(tmp_dtype_id, &object_info, H5O_INFO_ALL) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve object info\n"); + PART_ERROR(H5Ocopy_dtype); + } + + if (object_info.num_attrs == 0) { + H5_FAILED(); + HDprintf( + " copied committed datatype didn't contain any attributes after copy operation!\n"); + PART_ERROR(H5Ocopy_dtype); + } + + /* Check the attribute names, types, etc. */ + i = 0; + if (H5Aiterate2(tmp_dtype_id, H5_INDEX_NAME, H5_ITER_INC, NULL, + object_copy_attribute_iter_callback, &i) < 0) { + H5_FAILED(); + HDprintf(" failed to iterate over copied datatype's attributes\n"); + PART_ERROR(H5Ocopy_dtype); + } + + if (i != OBJECT_COPY_BASIC_TEST_NUM_ATTRS) { + H5_FAILED(); + HDprintf(" number of attributes on copied datatype (%llu) didn't match expected number " + "(%llu)!\n", + (unsigned long long)i, (unsigned long long)OBJECT_COPY_BASIC_TEST_NUM_ATTRS); + PART_ERROR(H5Ocopy_dtype); + } + + if (H5Tclose(tmp_dtype_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close datatype copy\n"); + PART_ERROR(H5Ocopy_dtype); + } + + PASSED(); + } + PART_END(H5Ocopy_dtype); + + if (tmp_dtype_id >= 0) { + H5E_BEGIN_TRY + { + H5Tclose(tmp_dtype_id); + } + H5E_END_TRY; + tmp_dtype_id = H5I_INVALID_HID; + } + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(attr_space_id) < 0) + TEST_ERROR; + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Tclose(dtype_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id2) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(attr_space_id); + H5Sclose(space_id); + H5Aclose(tmp_attr_id); + H5Tclose(dset_dtype); + H5Tclose(tmp_dtype_id); + H5Tclose(dtype_id); + H5Dclose(tmp_dset_id); + H5Dclose(dset_id); + H5Gclose(tmp_group_id); + H5Gclose(group_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * Tests to ensure that H5Ocopy fails when attempting to copy + * an object to a destination where the object already exists. + */ +static int +test_object_copy_already_existing(void) +{ + herr_t err_ret; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t group_id2 = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dtype_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + + TESTING_MULTIPART("object copying to location where objects already exist"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, object, dataset, or stored datatype aren't " + "supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, OBJECT_COPY_ALREADY_EXISTING_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + OBJECT_COPY_ALREADY_EXISTING_TEST_SUBGROUP_NAME); + goto error; + } + + if ((space_id = + generate_random_dataspace(OBJECT_COPY_ALREADY_EXISTING_TEST_SPACE_RANK, NULL, NULL, FALSE)) < 0) + TEST_ERROR; + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + /* Create the test group object */ + if ((group_id2 = H5Gcreate2(group_id, OBJECT_COPY_ALREADY_EXISTING_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", OBJECT_COPY_ALREADY_EXISTING_TEST_GROUP_NAME); + goto error; + } + + /* Create the test dataset object */ + if ((dset_id = H5Dcreate2(group_id, OBJECT_COPY_ALREADY_EXISTING_TEST_DSET_NAME, dset_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", OBJECT_COPY_ALREADY_EXISTING_TEST_DSET_NAME); + goto error; + } + + /* Create the test committed datatype object */ + if ((dtype_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create datatype\n"); + goto error; + } + + if (H5Tcommit2(group_id, OBJECT_COPY_ALREADY_EXISTING_TEST_DTYPE_NAME, dtype_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't commit datatype '%s'\n", OBJECT_COPY_ALREADY_EXISTING_TEST_DTYPE_NAME); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Ocopy_already_existing_group) + { + TESTING_2("H5Ocopy group to location where group already exists"); + + H5E_BEGIN_TRY + { + err_ret = H5Ocopy(group_id, OBJECT_COPY_ALREADY_EXISTING_TEST_GROUP_NAME, group_id, + OBJECT_COPY_ALREADY_EXISTING_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" group copy succeeded in location where group already exists!\n"); + PART_ERROR(H5Ocopy_already_existing_group); + } + + PASSED(); + } + PART_END(H5Ocopy_already_existing_group); + + PART_BEGIN(H5Ocopy_already_existing_dset) + { + TESTING_2("H5Ocopy dataset to location where dataset already exists"); + + H5E_BEGIN_TRY + { + err_ret = H5Ocopy(group_id, OBJECT_COPY_ALREADY_EXISTING_TEST_DSET_NAME, group_id, + OBJECT_COPY_ALREADY_EXISTING_TEST_DSET_NAME, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" dataset copy succeeded in location where dataset already exists!\n"); + PART_ERROR(H5Ocopy_already_existing_dset); + } + + PASSED(); + } + PART_END(H5Ocopy_already_existing_dset); + + PART_BEGIN(H5Ocopy_already_existing_dtype) + { + TESTING_2("H5Ocopy committed datatype to location where committed datatype already exists"); + + H5E_BEGIN_TRY + { + err_ret = H5Ocopy(group_id, OBJECT_COPY_ALREADY_EXISTING_TEST_DTYPE_NAME, group_id, + OBJECT_COPY_ALREADY_EXISTING_TEST_DTYPE_NAME, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" committed datatype copy succeeded in location where committed datatype already " + "exists!\n"); + PART_ERROR(H5Ocopy_already_existing_dtype); + } + + PASSED(); + } + PART_END(H5Ocopy_already_existing_dtype); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Tclose(dtype_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id2) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(space_id); + H5Tclose(dset_dtype); + H5Tclose(dtype_id); + H5Dclose(dset_id); + H5Gclose(group_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to exercise the H5O_COPY_SHALLOW_HIERARCHY_FLAG flag + * for H5Ocopy. + */ +static int +test_object_copy_shallow_group_copy(void) +{ + H5G_info_t group_info; + htri_t object_link_exists; + size_t i; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t group_id2 = H5I_INVALID_HID; + hid_t tmp_group_id = H5I_INVALID_HID; + hid_t ocpypl_id = H5I_INVALID_HID; + + TESTING("object copying with H5O_COPY_SHALLOW_HIERARCHY_FLAG flag"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_MORE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, object, or link aren't supported with this " + "connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, OBJECT_COPY_SHALLOW_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", OBJECT_COPY_SHALLOW_TEST_SUBGROUP_NAME); + goto error; + } + + /* Create the test group object, along with its nested members. */ + if ((group_id2 = H5Gcreate2(group_id, OBJECT_COPY_SHALLOW_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", OBJECT_COPY_SHALLOW_TEST_GROUP_NAME); + goto error; + } + + for (i = 0; i < (size_t)OBJECT_COPY_SHALLOW_TEST_NUM_NESTED_OBJS; i++) { + char grp_name[OBJECT_COPY_SHALLOW_TEST_BUF_SIZE]; + + snprintf(grp_name, OBJECT_COPY_SHALLOW_TEST_BUF_SIZE, "grp%d", (int)i); + + if ((tmp_group_id = H5Gcreate2(group_id2, grp_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s' under group '%s'\n", grp_name, + OBJECT_COPY_SHALLOW_TEST_GROUP_NAME); + goto error; + } + + /* Create a further nested group under the last group added */ + if (i == (OBJECT_COPY_SHALLOW_TEST_NUM_NESTED_OBJS - 1)) { + if (H5Gclose(H5Gcreate2(tmp_group_id, OBJECT_COPY_SHALLOW_TEST_DEEP_NESTED_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create nested group '%s' under group '%s'\n", + OBJECT_COPY_SHALLOW_TEST_DEEP_NESTED_GROUP_NAME, grp_name); + goto error; + } + } + + if (H5Gclose(tmp_group_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close group '%s'\n", grp_name); + goto error; + } + } + + if ((ocpypl_id = H5Pcreate(H5P_OBJECT_COPY)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create OCopyPL\n"); + goto error; + } + + if (H5Pset_copy_object(ocpypl_id, H5O_COPY_SHALLOW_HIERARCHY_FLAG) < 0) { + H5_FAILED(); + HDprintf(" couldn't set object copying options\n"); + goto error; + } + + if (H5Ocopy(group_id, OBJECT_COPY_SHALLOW_TEST_GROUP_NAME, group_id, + OBJECT_COPY_SHALLOW_TEST_NEW_GROUP_NAME, ocpypl_id, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to copy group '%s' to '%s'\n", OBJECT_COPY_SHALLOW_TEST_GROUP_NAME, + OBJECT_COPY_SHALLOW_TEST_NEW_GROUP_NAME); + goto error; + } + + if ((object_link_exists = H5Lexists(group_id, OBJECT_COPY_SHALLOW_TEST_NEW_GROUP_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' to copied group exists\n", + OBJECT_COPY_SHALLOW_TEST_NEW_GROUP_NAME); + goto error; + } + + if (!object_link_exists) { + H5_FAILED(); + HDprintf(" link '%s' to copied group didn't exist!\n", OBJECT_COPY_SHALLOW_TEST_NEW_GROUP_NAME); + goto error; + } + + /* + * Ensure that the new group has only the immediate members of the copied group. + */ + if ((tmp_group_id = H5Gopen2(group_id, OBJECT_COPY_SHALLOW_TEST_NEW_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to open group copy '%s'\n", OBJECT_COPY_SHALLOW_TEST_NEW_GROUP_NAME); + goto error; + } + + memset(&group_info, 0, sizeof(group_info)); + + /* + * Set link count to zero in case the connector doesn't support + * retrieval of group info. + */ + group_info.nlinks = 0; + + if (H5Gget_info(tmp_group_id, &group_info) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve group info\n"); + goto error; + } + + if (group_info.nlinks != OBJECT_COPY_SHALLOW_TEST_NUM_NESTED_OBJS) { + H5_FAILED(); + HDprintf(" copied group contained %d members instead of %d members after a shallow copy!\n", + (int)group_info.nlinks, OBJECT_COPY_SHALLOW_TEST_NUM_NESTED_OBJS); + goto error; + } + + if (H5Gclose(tmp_group_id) < 0) + TEST_ERROR; + + /* + * Ensure that the last immediate member of the copied group doesn't + * contain any members after the shallow copy. + */ + { + char grp_name[OBJECT_COPY_SHALLOW_TEST_BUF_SIZE]; + + snprintf(grp_name, OBJECT_COPY_SHALLOW_TEST_BUF_SIZE, + OBJECT_COPY_SHALLOW_TEST_NEW_GROUP_NAME "/grp%d", + OBJECT_COPY_SHALLOW_TEST_NUM_NESTED_OBJS - 1); + + if ((tmp_group_id = H5Gopen2(group_id, grp_name, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to open group '%s'\n", grp_name); + goto error; + } + + memset(&group_info, 0, sizeof(group_info)); + + /* + * Set link count to non-zero in case the connector doesn't support + * retrieval of group info. + */ + group_info.nlinks = 1; + + if (H5Gget_info(tmp_group_id, &group_info) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve group info\n"); + goto error; + } + + if (group_info.nlinks != 0) { + H5_FAILED(); + HDprintf(" copied group's immediate members contained nested members after a shallow copy!\n"); + goto error; + } + + if (H5Gclose(tmp_group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", grp_name); + goto error; + } + } + + if (H5Pclose(ocpypl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id2) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(ocpypl_id); + H5Gclose(tmp_group_id); + H5Gclose(group_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * Tests to exercise the H5O_COPY_WITHOUT_ATTR_FLAG flag + * of H5Ocopy. + */ +static int +test_object_copy_no_attributes(void) +{ + H5O_info2_t object_info; + htri_t object_link_exists; + size_t i; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t group_id2 = H5I_INVALID_HID; + hid_t tmp_group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t tmp_dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t dtype_id = H5I_INVALID_HID; + hid_t tmp_dtype_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t attr_space_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + hid_t ocpypl_id = H5I_INVALID_HID; + + TESTING_MULTIPART("object copying with H5O_COPY_WITHOUT_ATTR_FLAG flag"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, object, link, dataset, attribute, or stored " + "datatype aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, OBJECT_COPY_NO_ATTRS_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", OBJECT_COPY_NO_ATTRS_TEST_SUBGROUP_NAME); + goto error; + } + + if ((space_id = generate_random_dataspace(OBJECT_COPY_NO_ATTRS_TEST_SPACE_RANK, NULL, NULL, FALSE)) < 0) + TEST_ERROR; + if ((attr_space_id = generate_random_dataspace(OBJECT_COPY_NO_ATTRS_TEST_SPACE_RANK, NULL, NULL, TRUE)) < + 0) + TEST_ERROR; + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + /* Create the test group object, along with the attributes attached to it. */ + if ((group_id2 = H5Gcreate2(group_id, OBJECT_COPY_NO_ATTRS_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", OBJECT_COPY_NO_ATTRS_TEST_GROUP_NAME); + goto error; + } + + for (i = 0; i < (size_t)OBJECT_COPY_NO_ATTRS_TEST_NUM_ATTRS; i++) { + char attr_name[OBJECT_COPY_NO_ATTRS_TEST_BUF_SIZE]; + + snprintf(attr_name, OBJECT_COPY_NO_ATTRS_TEST_BUF_SIZE, "attr%d", (int)i); + + if ((attr_id = H5Acreate2(group_id2, attr_name, H5T_NATIVE_INT, attr_space_id, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s' on group '%s'\n", attr_name, + OBJECT_COPY_NO_ATTRS_TEST_GROUP_NAME); + goto error; + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close attribute '%s'\n", attr_name); + goto error; + } + } + + /* Create the test dataset object, along with the attributes attached to it. */ + if ((dset_id = H5Dcreate2(group_id, OBJECT_COPY_NO_ATTRS_TEST_DSET_NAME, dset_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", OBJECT_COPY_NO_ATTRS_TEST_DSET_NAME); + goto error; + } + + for (i = 0; i < (size_t)OBJECT_COPY_NO_ATTRS_TEST_NUM_ATTRS; i++) { + char attr_name[OBJECT_COPY_NO_ATTRS_TEST_BUF_SIZE]; + + snprintf(attr_name, OBJECT_COPY_NO_ATTRS_TEST_BUF_SIZE, "attr%d", (int)i); + + if ((attr_id = H5Acreate2(dset_id, attr_name, H5T_NATIVE_INT, attr_space_id, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s' on dataset '%s'\n", attr_name, + OBJECT_COPY_NO_ATTRS_TEST_DSET_NAME); + goto error; + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close attribute '%s'\n", attr_name); + goto error; + } + } + + /* Create the test committed datatype object, along with the attributes attached to it. */ + if ((dtype_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create datatype\n"); + goto error; + } + + if (H5Tcommit2(group_id, OBJECT_COPY_NO_ATTRS_TEST_DTYPE_NAME, dtype_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't commit datatype '%s'\n", OBJECT_COPY_NO_ATTRS_TEST_DTYPE_NAME); + goto error; + } + + for (i = 0; i < (size_t)OBJECT_COPY_NO_ATTRS_TEST_NUM_ATTRS; i++) { + char attr_name[OBJECT_COPY_NO_ATTRS_TEST_BUF_SIZE]; + + snprintf(attr_name, OBJECT_COPY_NO_ATTRS_TEST_BUF_SIZE, "attr%d", (int)i); + + if ((attr_id = H5Acreate2(dtype_id, attr_name, H5T_NATIVE_INT, attr_space_id, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s' on committed datatype '%s'\n", attr_name, + OBJECT_COPY_NO_ATTRS_TEST_DTYPE_NAME); + goto error; + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close attribute '%s'\n", attr_name); + goto error; + } + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Ocopy_group_no_attributes) + { + TESTING_2("H5Ocopy on a group (without attributes)"); + + if ((ocpypl_id = H5Pcreate(H5P_OBJECT_COPY)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create OCopyPL\n"); + PART_ERROR(H5Ocopy_group_no_attributes); + } + + if (H5Pset_copy_object(ocpypl_id, H5O_COPY_WITHOUT_ATTR_FLAG) < 0) { + H5_FAILED(); + HDprintf(" couldn't set object copying options\n"); + PART_ERROR(H5Ocopy_group_no_attributes); + } + + if (H5Ocopy(group_id, OBJECT_COPY_NO_ATTRS_TEST_GROUP_NAME, group_id, + OBJECT_COPY_NO_ATTRS_TEST_NEW_GROUP_NAME, ocpypl_id, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to copy group '%s' to '%s'\n", OBJECT_COPY_NO_ATTRS_TEST_GROUP_NAME, + OBJECT_COPY_NO_ATTRS_TEST_NEW_GROUP_NAME); + PART_ERROR(H5Ocopy_group_no_attributes); + } + + if ((object_link_exists = + H5Lexists(group_id, OBJECT_COPY_NO_ATTRS_TEST_NEW_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' to copied group exists\n", + OBJECT_COPY_NO_ATTRS_TEST_NEW_GROUP_NAME); + PART_ERROR(H5Ocopy_group_no_attributes); + } + + if (!object_link_exists) { + H5_FAILED(); + HDprintf(" link '%s' to copied group didn't exist!\n", + OBJECT_COPY_NO_ATTRS_TEST_NEW_GROUP_NAME); + PART_ERROR(H5Ocopy_group_no_attributes); + } + + /* Ensure that the new group has no attributes */ + if ((tmp_group_id = H5Gopen2(group_id, OBJECT_COPY_NO_ATTRS_TEST_NEW_GROUP_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" failed to open group copy '%s'\n", OBJECT_COPY_NO_ATTRS_TEST_NEW_GROUP_NAME); + PART_ERROR(H5Ocopy_group_no_attributes); + } + + memset(&object_info, 0, sizeof(object_info)); + + /* + * Set attribute count to non-zero in case the connector doesn't + * support retrieval of object info. + */ + object_info.num_attrs = 1; + + if (H5Oget_info3(tmp_group_id, &object_info, H5O_INFO_ALL) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve object info\n"); + PART_ERROR(H5Ocopy_group_no_attributes); + } + + if (object_info.num_attrs != 0) { + H5_FAILED(); + HDprintf(" copied group contained attributes after a non-attribute copy!\n"); + PART_ERROR(H5Ocopy_group_no_attributes); + } + + if (H5Pclose(ocpypl_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close OCopyPL\n"); + PART_ERROR(H5Ocopy_group_no_attributes); + } + + if (H5Gclose(tmp_group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group copy\n"); + PART_ERROR(H5Ocopy_group_no_attributes); + } + + PASSED(); + } + PART_END(H5Ocopy_group_no_attributes); + + if (ocpypl_id >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(ocpypl_id); + } + H5E_END_TRY; + ocpypl_id = H5I_INVALID_HID; + } + if (tmp_group_id >= 0) { + H5E_BEGIN_TRY + { + H5Gclose(tmp_group_id); + } + H5E_END_TRY; + tmp_group_id = H5I_INVALID_HID; + } + + PART_BEGIN(H5Ocopy_dset_no_attributes) + { + TESTING_2("H5Ocopy on a dataset (without attributes)"); + + if ((ocpypl_id = H5Pcreate(H5P_OBJECT_COPY)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create OCopyPL\n"); + PART_ERROR(H5Ocopy_dset_no_attributes); + } + + if (H5Pset_copy_object(ocpypl_id, H5O_COPY_WITHOUT_ATTR_FLAG) < 0) { + H5_FAILED(); + HDprintf(" couldn't set object copying options\n"); + PART_ERROR(H5Ocopy_dset_no_attributes); + } + + if (H5Ocopy(group_id, OBJECT_COPY_NO_ATTRS_TEST_DSET_NAME, group_id, + OBJECT_COPY_NO_ATTRS_TEST_NEW_DSET_NAME, ocpypl_id, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to copy dataset '%s' to '%s'\n", OBJECT_COPY_NO_ATTRS_TEST_DSET_NAME, + OBJECT_COPY_NO_ATTRS_TEST_NEW_DSET_NAME); + PART_ERROR(H5Ocopy_dset_no_attributes); + } + + if ((object_link_exists = + H5Lexists(group_id, OBJECT_COPY_NO_ATTRS_TEST_NEW_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' to copied dataset exists\n", + OBJECT_COPY_NO_ATTRS_TEST_NEW_DSET_NAME); + PART_ERROR(H5Ocopy_dset_no_attributes); + } + + if (!object_link_exists) { + H5_FAILED(); + HDprintf(" link '%s' to copied dataset didn't exist!\n", + OBJECT_COPY_NO_ATTRS_TEST_NEW_DSET_NAME); + PART_ERROR(H5Ocopy_dset_no_attributes); + } + + /* Ensure that the new dataset doesn't have any attributes */ + if ((tmp_dset_id = H5Dopen2(group_id, OBJECT_COPY_NO_ATTRS_TEST_NEW_DSET_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" failed to open dataset copy '%s'\n", OBJECT_COPY_NO_ATTRS_TEST_NEW_DSET_NAME); + PART_ERROR(H5Ocopy_dset_no_attributes); + } + + memset(&object_info, 0, sizeof(object_info)); + + /* + * Set attribute count to non-zero in case the connector doesn't + * support retrieval of object info. + */ + object_info.num_attrs = 1; + + if (H5Oget_info3(tmp_dset_id, &object_info, H5O_INFO_ALL) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve object info\n"); + PART_ERROR(H5Ocopy_dset_no_attributes); + } + + if (object_info.num_attrs != 0) { + H5_FAILED(); + HDprintf(" copied dataset contained attributes after a non-attribute copy!\n"); + PART_ERROR(H5Ocopy_dset_no_attributes); + } + + if (H5Pclose(ocpypl_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close OCopyPL\n"); + PART_ERROR(H5Ocopy_dset_no_attributes); + } + + if (H5Dclose(tmp_dset_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close dataset copy\n"); + PART_ERROR(H5Ocopy_dset_no_attributes); + } + + PASSED(); + } + PART_END(H5Ocopy_dset_no_attributes); + + if (ocpypl_id >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(ocpypl_id); + } + H5E_END_TRY; + ocpypl_id = H5I_INVALID_HID; + } + if (tmp_dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(tmp_dset_id); + } + H5E_END_TRY; + tmp_dset_id = H5I_INVALID_HID; + } + + PART_BEGIN(H5Ocopy_dtype_no_attributes) + { + TESTING_2("H5Ocopy on a committed datatype (without attributes)"); + + if ((ocpypl_id = H5Pcreate(H5P_OBJECT_COPY)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create OCopyPL\n"); + PART_ERROR(H5Ocopy_dtype_no_attributes); + } + + if (H5Pset_copy_object(ocpypl_id, H5O_COPY_WITHOUT_ATTR_FLAG) < 0) { + H5_FAILED(); + HDprintf(" couldn't set object copying options\n"); + PART_ERROR(H5Ocopy_dtype_no_attributes); + } + + if (H5Ocopy(group_id, OBJECT_COPY_NO_ATTRS_TEST_DTYPE_NAME, group_id, + OBJECT_COPY_NO_ATTRS_TEST_NEW_DTYPE_NAME, ocpypl_id, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to copy datatype '%s' to '%s'\n", OBJECT_COPY_NO_ATTRS_TEST_DTYPE_NAME, + OBJECT_COPY_NO_ATTRS_TEST_NEW_DTYPE_NAME); + PART_ERROR(H5Ocopy_dtype_no_attributes); + } + + if ((object_link_exists = + H5Lexists(group_id, OBJECT_COPY_NO_ATTRS_TEST_NEW_DTYPE_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' to copied datatype exists\n", + OBJECT_COPY_NO_ATTRS_TEST_NEW_DTYPE_NAME); + PART_ERROR(H5Ocopy_dtype_no_attributes); + } + + if (!object_link_exists) { + H5_FAILED(); + HDprintf(" link '%s' to copied datatype didn't exist!\n", + OBJECT_COPY_NO_ATTRS_TEST_NEW_DTYPE_NAME); + PART_ERROR(H5Ocopy_dtype_no_attributes); + } + + /* Ensure that the new committed datatype doesn't have any attributes */ + if ((tmp_dtype_id = H5Topen2(group_id, OBJECT_COPY_NO_ATTRS_TEST_NEW_DTYPE_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" failed to open dataset copy '%s'\n", OBJECT_COPY_NO_ATTRS_TEST_NEW_DTYPE_NAME); + PART_ERROR(H5Ocopy_dtype_no_attributes); + } + + memset(&object_info, 0, sizeof(object_info)); + + /* + * Set attribute count to non-zero in case the connector doesn't + * support retrieval of object info. + */ + object_info.num_attrs = 1; + + if (H5Oget_info3(tmp_dtype_id, &object_info, H5O_INFO_ALL) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve object info\n"); + PART_ERROR(H5Ocopy_dtype_no_attributes); + } + + if (object_info.num_attrs != 0) { + H5_FAILED(); + HDprintf(" copied committed datatype contained attributes after a non-attribute copy!\n"); + PART_ERROR(H5Ocopy_dtype_no_attributes); + } + + if (H5Pclose(ocpypl_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close OCopyPL\n"); + PART_ERROR(H5Ocopy_dtype_no_attributes); + } + + if (H5Tclose(tmp_dtype_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close datatype copy\n"); + PART_ERROR(H5Ocopy_dtype_no_attributes); + } + + PASSED(); + } + PART_END(H5Ocopy_dtype_no_attributes); + + if (ocpypl_id >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(ocpypl_id); + } + H5E_END_TRY; + ocpypl_id = H5I_INVALID_HID; + } + if (tmp_dtype_id >= 0) { + H5E_BEGIN_TRY + { + H5Tclose(tmp_dtype_id); + } + H5E_END_TRY; + tmp_dtype_id = H5I_INVALID_HID; + } + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(attr_space_id) < 0) + TEST_ERROR; + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Tclose(dtype_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id2) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(ocpypl_id); + H5Sclose(attr_space_id); + H5Sclose(space_id); + H5Aclose(attr_id); + H5Tclose(dset_dtype); + H5Tclose(tmp_dtype_id); + H5Tclose(dtype_id); + H5Dclose(tmp_dset_id); + H5Dclose(dset_id); + H5Gclose(tmp_group_id); + H5Gclose(group_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * Tests to exercise the behavior of H5Ocopy when the source + * object specified is a soft link or dangling soft link. + */ +static int +test_object_copy_by_soft_link(void) +{ + H5O_info2_t object_info; + H5G_info_t group_info; + H5L_info2_t link_info; + htri_t object_link_exists; + size_t i; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t group_id2 = H5I_INVALID_HID; + hid_t tmp_group_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t attr_space_id = H5I_INVALID_HID; + + TESTING_MULTIPART("object copying through use of soft links"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, object, link, dataset, attribute, iterate, or " + "soft link aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, OBJECT_COPY_SOFT_LINK_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", OBJECT_COPY_SOFT_LINK_TEST_SUBGROUP_NAME); + goto error; + } + + if ((attr_space_id = generate_random_dataspace(OBJECT_COPY_SOFT_LINK_TEST_SPACE_RANK, NULL, NULL, TRUE)) < + 0) + TEST_ERROR; + + /* Create the test group object, along with its nested members and the attributes attached to it. */ + if ((group_id2 = H5Gcreate2(group_id, OBJECT_COPY_SOFT_LINK_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", OBJECT_COPY_SOFT_LINK_TEST_GROUP_NAME); + goto error; + } + + for (i = 0; i < (size_t)OBJECT_COPY_SOFT_LINK_TEST_NUM_NESTED_OBJS; i++) { + char grp_name[OBJECT_COPY_SOFT_LINK_TEST_BUF_SIZE]; + + snprintf(grp_name, OBJECT_COPY_SOFT_LINK_TEST_BUF_SIZE, "grp%d", (int)i); + + if ((tmp_group_id = H5Gcreate2(group_id2, grp_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s' under group '%s'\n", grp_name, + OBJECT_COPY_SOFT_LINK_TEST_GROUP_NAME); + goto error; + } + + /* Create a further nested group under the last group added */ + if (i == (OBJECT_COPY_SOFT_LINK_TEST_NUM_NESTED_OBJS - 1)) { + if (H5Gclose(H5Gcreate2(tmp_group_id, OBJECT_COPY_SOFT_LINK_TEST_DEEP_NESTED_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create nested group '%s' under group '%s'\n", + OBJECT_COPY_SOFT_LINK_TEST_DEEP_NESTED_GROUP_NAME, grp_name); + goto error; + } + } + + if (H5Gclose(tmp_group_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close group '%s'\n", grp_name); + goto error; + } + } + + for (i = 0; i < (size_t)OBJECT_COPY_SOFT_LINK_TEST_NUM_ATTRS; i++) { + char attr_name[OBJECT_COPY_SOFT_LINK_TEST_BUF_SIZE]; + + snprintf(attr_name, OBJECT_COPY_SOFT_LINK_TEST_BUF_SIZE, "attr%d", (int)i); + + if ((attr_id = H5Acreate2(group_id2, attr_name, H5T_NATIVE_INT, attr_space_id, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s' on group '%s'\n", attr_name, + OBJECT_COPY_SOFT_LINK_TEST_GROUP_NAME); + goto error; + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close attribute '%s'\n", attr_name); + goto error; + } + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Ocopy_through_soft_link) + { + TESTING_2("H5Ocopy through use of a soft link"); + + if (H5Lcreate_soft("/" OBJECT_TEST_GROUP_NAME "/" OBJECT_COPY_SOFT_LINK_TEST_SUBGROUP_NAME + "/" OBJECT_COPY_SOFT_LINK_TEST_GROUP_NAME, + group_id, OBJECT_COPY_SOFT_LINK_TEST_SOFT_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create soft link '%s' to group for copying\n", + OBJECT_COPY_SOFT_LINK_TEST_SOFT_LINK_NAME); + PART_ERROR(H5Ocopy_through_soft_link); + } + + if (H5Ocopy(group_id, OBJECT_COPY_SOFT_LINK_TEST_SOFT_LINK_NAME, group_id, + OBJECT_COPY_SOFT_LINK_TEST_NEW_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to copy group '%s' to '%s'\n", OBJECT_COPY_SOFT_LINK_TEST_GROUP_NAME, + OBJECT_COPY_SOFT_LINK_TEST_NEW_GROUP_NAME); + PART_ERROR(H5Ocopy_through_soft_link); + } + + if ((object_link_exists = + H5Lexists(group_id, OBJECT_COPY_SOFT_LINK_TEST_NEW_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' to copied group exists\n", + OBJECT_COPY_SOFT_LINK_TEST_NEW_GROUP_NAME); + PART_ERROR(H5Ocopy_through_soft_link); + } + + if (!object_link_exists) { + H5_FAILED(); + HDprintf(" link '%s' to copied group didn't exist!\n", + OBJECT_COPY_SOFT_LINK_TEST_NEW_GROUP_NAME); + PART_ERROR(H5Ocopy_through_soft_link); + } + + /* Make sure the new object is an actual group and not another soft link */ + memset(&link_info, 0, sizeof(link_info)); + if (H5Lget_info2(group_id, OBJECT_COPY_SOFT_LINK_TEST_NEW_GROUP_NAME, &link_info, H5P_DEFAULT) < + 0) { + H5_FAILED(); + HDprintf(" failed to retrieve info for link '%s'\n", + OBJECT_COPY_SOFT_LINK_TEST_NEW_GROUP_NAME); + PART_ERROR(H5Ocopy_through_soft_link); + } + + if (link_info.type != H5L_TYPE_HARD) { + H5_FAILED(); + HDprintf( + " after group copy through soft link, group's new link type wasn't H5L_TYPE_HARD!\n"); + PART_ERROR(H5Ocopy_through_soft_link); + } + + /* + * Ensure that the new group doesn't have any attributes and only the + * immediate members of the copied group. + */ + if ((tmp_group_id = H5Gopen2(group_id, OBJECT_COPY_SOFT_LINK_TEST_NEW_GROUP_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" failed to open group copy '%s'\n", OBJECT_COPY_SOFT_LINK_TEST_NEW_GROUP_NAME); + PART_ERROR(H5Ocopy_through_soft_link); + } + + memset(&group_info, 0, sizeof(group_info)); + + /* + * Set link count to zero in case the connector doesn't support + * retrieval of group info. + */ + group_info.nlinks = 0; + + if (H5Gget_info(tmp_group_id, &group_info) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve group info\n"); + PART_ERROR(H5Ocopy_through_soft_link); + } + + if (group_info.nlinks != OBJECT_COPY_SOFT_LINK_TEST_NUM_NESTED_OBJS) { + H5_FAILED(); + HDprintf( + " copied group contained %d members instead of %d members after a shallow copy!\n", + (int)group_info.nlinks, OBJECT_COPY_SOFT_LINK_TEST_NUM_NESTED_OBJS); + PART_ERROR(H5Ocopy_through_soft_link); + } + + memset(&object_info, 0, sizeof(object_info)); + + /* + * Set attribute count to zero in case the connector doesn't + * support retrieval of object info. + */ + object_info.num_attrs = 0; + + if (H5Oget_info3(tmp_group_id, &object_info, H5O_INFO_ALL) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve object info\n"); + PART_ERROR(H5Ocopy_through_soft_link); + } + + if (object_info.num_attrs == 0) { + H5_FAILED(); + HDprintf(" copied group didn't contain any attributes after copy operation!\n"); + PART_ERROR(H5Ocopy_through_soft_link); + } + + /* Check the attribute names, types, etc. */ + i = 0; + if (H5Aiterate2(tmp_group_id, H5_INDEX_NAME, H5_ITER_INC, NULL, + object_copy_attribute_iter_callback, &i) < 0) { + H5_FAILED(); + HDprintf(" failed to iterate over copied group's attributes\n"); + PART_ERROR(H5Ocopy_through_soft_link); + } + + if (i != OBJECT_COPY_SOFT_LINK_TEST_NUM_ATTRS) { + H5_FAILED(); + HDprintf( + " number of attributes on copied group (%llu) didn't match expected number (%llu)!\n", + (unsigned long long)i, (unsigned long long)OBJECT_COPY_SOFT_LINK_TEST_NUM_ATTRS); + PART_ERROR(H5Ocopy_through_soft_link); + } + + if (H5Gclose(tmp_group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group copy\n"); + PART_ERROR(H5Ocopy_through_soft_link); + } + + PASSED(); + } + PART_END(H5Ocopy_through_soft_link); + + if (tmp_group_id >= 0) { + H5E_BEGIN_TRY + { + H5Gclose(tmp_group_id); + } + H5E_END_TRY; + tmp_group_id = H5I_INVALID_HID; + } + + PART_BEGIN(H5Ocopy_through_dangling_soft_link) + { + herr_t err_ret; + + TESTING_2("H5Ocopy through use of a dangling soft link"); + + if (H5Lcreate_soft("/" OBJECT_TEST_GROUP_NAME "/" OBJECT_COPY_SOFT_LINK_TEST_SUBGROUP_NAME + "/nonexistent_object", + group_id, OBJECT_COPY_SOFT_LINK_TEST_DANGLING_LINK_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create dangling soft link '%s'\n", + OBJECT_COPY_SOFT_LINK_TEST_DANGLING_LINK_NAME); + PART_ERROR(H5Ocopy_through_dangling_soft_link); + } + + H5E_BEGIN_TRY + { + err_ret = + H5Ocopy(group_id, OBJECT_COPY_SOFT_LINK_TEST_DANGLING_LINK_NAME, group_id, + OBJECT_COPY_SOFT_LINK_TEST_DANGLING_LINK_NAME "2", H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" copied non-existent object through use of a dangling soft link!\n"); + PART_ERROR(H5Ocopy_through_dangling_soft_link); + } + + PASSED(); + } + PART_END(H5Ocopy_through_dangling_soft_link); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(attr_space_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id2) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(attr_space_id); + H5Aclose(attr_id); + H5Gclose(tmp_group_id); + H5Gclose(group_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * Tests for copying groups that contain soft links with + * H5Ocopy. Also tested is the H5O_COPY_EXPAND_SOFT_LINK_FLAG + * flag. + */ +static int +test_object_copy_group_with_soft_links(void) +{ + H5G_info_t group_info; + htri_t object_link_exists; + size_t i; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t group_id2 = H5I_INVALID_HID; + hid_t tmp_group_id = H5I_INVALID_HID; + hid_t ocpypl_id = H5I_INVALID_HID; + + TESTING_MULTIPART("group copying when group contains soft links"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, object, link, or soft link aren't supported with " + "this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_SUBGROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_SUBGROUP_NAME); + goto error; + } + + /* Create the test group object. */ + if ((group_id2 = H5Gcreate2(group_id, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_GROUP_NAME); + goto error; + } + + /* Create several groups at the root level and add soft links pointing to them inside + * the test group object. + */ + for (i = 0; i < (size_t)OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NUM_NESTED_OBJS; i++) { + char grp_name[OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_BUF_SIZE]; + char lnk_name[OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_BUF_SIZE]; + char lnk_target[2 * OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_BUF_SIZE]; + + snprintf(grp_name, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_BUF_SIZE, "grp%d", (int)i); + snprintf(lnk_name, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_BUF_SIZE, "link%d", (int)i); + snprintf(lnk_target, 2 * OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_BUF_SIZE, + "/" OBJECT_TEST_GROUP_NAME "/" OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_SUBGROUP_NAME "/%s", + grp_name); + + if ((tmp_group_id = H5Gcreate2(group_id, grp_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s' under group '%s'\n", grp_name, + OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_SUBGROUP_NAME); + goto error; + } + + if (H5Lcreate_soft(lnk_target, group_id2, lnk_name, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to create soft link '%s'\n", lnk_name); + goto error; + } + + if (H5Gclose(tmp_group_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close group '%s'\n", grp_name); + goto error; + } + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Ocopy_dont_expand_soft_links) + { + TESTING_2("H5Ocopy on group with soft links (soft links not expanded)"); + + if (H5Ocopy(group_id, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_GROUP_NAME, group_id, + OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NON_EXPAND_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to copy group '%s' to '%s'\n", + OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_GROUP_NAME, + OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NON_EXPAND_GROUP_NAME); + PART_ERROR(H5Ocopy_dont_expand_soft_links); + } + + if ((object_link_exists = + H5Lexists(group_id, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NON_EXPAND_GROUP_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' to copied group exists\n", + OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NON_EXPAND_GROUP_NAME); + PART_ERROR(H5Ocopy_dont_expand_soft_links); + } + + if (!object_link_exists) { + H5_FAILED(); + HDprintf(" link '%s' to copied group didn't exist!\n", + OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NON_EXPAND_GROUP_NAME); + PART_ERROR(H5Ocopy_dont_expand_soft_links); + } + + /* Ensure that the number of links is the same */ + if ((tmp_group_id = + H5Gopen2(group_id, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NON_EXPAND_GROUP_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to open group copy '%s'\n", + OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NON_EXPAND_GROUP_NAME); + PART_ERROR(H5Ocopy_dont_expand_soft_links); + } + + memset(&group_info, 0, sizeof(group_info)); + + /* + * Set link count to zero in case the connector doesn't support + * retrieval of group info. + */ + group_info.nlinks = 0; + + if (H5Gget_info(tmp_group_id, &group_info) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve group info\n"); + PART_ERROR(H5Ocopy_dont_expand_soft_links); + } + + if (group_info.nlinks != OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NUM_NESTED_OBJS) { + H5_FAILED(); + HDprintf(" copied group contained %d members instead of %d members after copy!\n", + (int)group_info.nlinks, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NUM_NESTED_OBJS); + PART_ERROR(H5Ocopy_dont_expand_soft_links); + } + + /* + * Iterate over the links in the copied group and ensure that they're all + * still soft links with their original values. + */ + i = 0; + if (H5Literate2(tmp_group_id, H5_INDEX_NAME, H5_ITER_INC, NULL, + object_copy_soft_link_non_expand_callback, &i) < 0) { + H5_FAILED(); + HDprintf(" failed to iterate over links in group '%s'\n", + OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NON_EXPAND_GROUP_NAME); + PART_ERROR(H5Ocopy_dont_expand_soft_links); + } + + if (i != OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NUM_NESTED_OBJS) { + H5_FAILED(); + HDprintf(" number of links in copied group (%llu) didn't match expected number (%llu)!\n", + (unsigned long long)i, + (unsigned long long)OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NUM_NESTED_OBJS); + PART_ERROR(H5Ocopy_dont_expand_soft_links); + } + + if (H5Gclose(tmp_group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group copy\n"); + PART_ERROR(H5Ocopy_dont_expand_soft_links); + } + + PASSED(); + } + PART_END(H5Ocopy_dont_expand_soft_links); + + if (tmp_group_id >= 0) { + H5E_BEGIN_TRY + { + H5Gclose(tmp_group_id); + } + H5E_END_TRY; + tmp_group_id = H5I_INVALID_HID; + } + + PART_BEGIN(H5Ocopy_expand_soft_links) + { + TESTING_2("H5Ocopy on group with soft links (soft links expanded)"); + + if ((ocpypl_id = H5Pcreate(H5P_OBJECT_COPY)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create OCopyPL\n"); + PART_ERROR(H5Ocopy_expand_soft_links); + } + + if (H5Pset_copy_object(ocpypl_id, H5O_COPY_EXPAND_SOFT_LINK_FLAG) < 0) { + H5_FAILED(); + HDprintf(" couldn't set object copying options\n"); + PART_ERROR(H5Ocopy_expand_soft_links); + } + + if (H5Ocopy(group_id, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_GROUP_NAME, group_id, + OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_EXPAND_GROUP_NAME, ocpypl_id, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to copy group '%s' to '%s'\n", + OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_GROUP_NAME, + OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_EXPAND_GROUP_NAME); + PART_ERROR(H5Ocopy_expand_soft_links); + } + + if ((object_link_exists = H5Lexists( + group_id, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_EXPAND_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' to copied group exists\n", + OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_EXPAND_GROUP_NAME); + PART_ERROR(H5Ocopy_expand_soft_links); + } + + if (!object_link_exists) { + H5_FAILED(); + HDprintf(" link '%s' to copied group didn't exist!\n", + OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_EXPAND_GROUP_NAME); + PART_ERROR(H5Ocopy_expand_soft_links); + } + + /* Ensure that the number of links is the same */ + if ((tmp_group_id = H5Gopen2(group_id, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_EXPAND_GROUP_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to open group copy '%s'\n", + OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_EXPAND_GROUP_NAME); + PART_ERROR(H5Ocopy_expand_soft_links); + } + + memset(&group_info, 0, sizeof(group_info)); + + /* + * Set link count to zero in case the connector doesn't support + * retrieval of group info. + */ + group_info.nlinks = 0; + + if (H5Gget_info(tmp_group_id, &group_info) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve group info\n"); + PART_ERROR(H5Ocopy_expand_soft_links); + } + + if (group_info.nlinks != OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NUM_NESTED_OBJS) { + H5_FAILED(); + HDprintf(" copied group contained %d members instead of %d members after copy!\n", + (int)group_info.nlinks, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NUM_NESTED_OBJS); + PART_ERROR(H5Ocopy_expand_soft_links); + } + + /* + * Iterate over the links in the copied group and ensure that they've all + * been expanded into hard links corresponding to the top-level groups + * created. + */ + i = 0; + if (H5Literate2(tmp_group_id, H5_INDEX_NAME, H5_ITER_INC, NULL, + object_copy_soft_link_expand_callback, &i) < 0) { + H5_FAILED(); + HDprintf(" failed to iterate over links in group '%s'\n", + OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_EXPAND_GROUP_NAME); + PART_ERROR(H5Ocopy_expand_soft_links); + } + + if (i != OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NUM_NESTED_OBJS) { + H5_FAILED(); + HDprintf(" number of links in copied group (%llu) didn't match expected number (%llu)!\n", + (unsigned long long)i, + (unsigned long long)OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NUM_NESTED_OBJS); + PART_ERROR(H5Ocopy_expand_soft_links); + } + + if (H5Pclose(ocpypl_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close OCopyPL\n"); + PART_ERROR(H5Ocopy_expand_soft_links); + } + + if (H5Gclose(tmp_group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group copy\n"); + PART_ERROR(H5Ocopy_expand_soft_links); + } + + PASSED(); + } + PART_END(H5Ocopy_expand_soft_links); + + if (ocpypl_id >= 0) { + H5E_BEGIN_TRY + { + H5Gclose(ocpypl_id); + } + H5E_END_TRY; + ocpypl_id = H5I_INVALID_HID; + } + if (tmp_group_id >= 0) { + H5E_BEGIN_TRY + { + H5Gclose(tmp_group_id); + } + H5E_END_TRY; + tmp_group_id = H5I_INVALID_HID; + } + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Gclose(group_id2) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(ocpypl_id); + H5Gclose(tmp_group_id); + H5Gclose(group_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * Tests for copying objects between two different files using + * H5Ocopy. + */ +static int +test_object_copy_between_files(void) +{ + H5O_info2_t object_info; + H5G_info_t group_info; + htri_t object_link_exists; + size_t i; + hid_t file_id = H5I_INVALID_HID; + hid_t file_id2 = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t group_id2 = H5I_INVALID_HID; + hid_t tmp_group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t tmp_dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t dtype_id = H5I_INVALID_HID; + hid_t tmp_dtype_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t attr_space_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + hid_t ocpypl_id = H5I_INVALID_HID; + + TESTING_MULTIPART("object copying between files"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, object, link, dataset, attribute, stored " + "datatype, or iterate aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + /* + * Create the second file for the between file copying tests. + */ + if ((file_id2 = H5Fcreate(OBJECT_COPY_BETWEEN_FILES_TEST_FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, OBJECT_COPY_BETWEEN_FILES_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + OBJECT_COPY_BETWEEN_FILES_TEST_SUBGROUP_NAME); + goto error; + } + + if ((space_id = generate_random_dataspace(OBJECT_COPY_BETWEEN_FILES_TEST_SPACE_RANK, NULL, NULL, FALSE)) < + 0) + TEST_ERROR; + if ((attr_space_id = + generate_random_dataspace(OBJECT_COPY_BETWEEN_FILES_TEST_SPACE_RANK, NULL, NULL, TRUE)) < 0) + TEST_ERROR; + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + /* Create the test group object, along with its nested members and the attributes attached to it. */ + if ((group_id2 = H5Gcreate2(group_id, OBJECT_COPY_BETWEEN_FILES_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", OBJECT_COPY_BETWEEN_FILES_TEST_GROUP_NAME); + goto error; + } + + for (i = 0; i < (size_t)OBJECT_COPY_BETWEEN_FILES_TEST_NUM_NESTED_OBJS; i++) { + char grp_name[OBJECT_COPY_BETWEEN_FILES_TEST_BUF_SIZE]; + + snprintf(grp_name, OBJECT_COPY_BETWEEN_FILES_TEST_BUF_SIZE, "grp%d", (int)i); + + if ((tmp_group_id = H5Gcreate2(group_id2, grp_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s' under group '%s'\n", grp_name, + OBJECT_COPY_BETWEEN_FILES_TEST_GROUP_NAME); + goto error; + } + + /* Create a further nested group under the last group added */ + if (i == (OBJECT_COPY_BETWEEN_FILES_TEST_NUM_NESTED_OBJS - 1)) { + if (H5Gclose(H5Gcreate2(tmp_group_id, OBJECT_COPY_BETWEEN_FILES_TEST_DEEP_NESTED_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create nested group '%s' under group '%s'\n", + OBJECT_COPY_BETWEEN_FILES_TEST_DEEP_NESTED_GROUP_NAME, grp_name); + goto error; + } + } + + if (H5Gclose(tmp_group_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close group '%s'\n", grp_name); + goto error; + } + } + + for (i = 0; i < (size_t)OBJECT_COPY_BETWEEN_FILES_TEST_NUM_ATTRS; i++) { + char attr_name[OBJECT_COPY_BETWEEN_FILES_TEST_BUF_SIZE]; + + snprintf(attr_name, OBJECT_COPY_BETWEEN_FILES_TEST_BUF_SIZE, "attr%d", (int)i); + + if ((attr_id = H5Acreate2(group_id2, attr_name, H5T_NATIVE_INT, attr_space_id, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s' on group '%s'\n", attr_name, + OBJECT_COPY_BETWEEN_FILES_TEST_GROUP_NAME); + goto error; + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close attribute '%s'\n", attr_name); + goto error; + } + } + + /* Create the test dataset object, along with the attributes attached to it. */ + if ((dset_id = H5Dcreate2(group_id, OBJECT_COPY_BETWEEN_FILES_TEST_DSET_NAME, dset_dtype, space_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", OBJECT_COPY_BETWEEN_FILES_TEST_DSET_NAME); + goto error; + } + + for (i = 0; i < (size_t)OBJECT_COPY_BETWEEN_FILES_TEST_NUM_ATTRS; i++) { + char attr_name[OBJECT_COPY_BETWEEN_FILES_TEST_BUF_SIZE]; + + snprintf(attr_name, OBJECT_COPY_BETWEEN_FILES_TEST_BUF_SIZE, "attr%d", (int)i); + + if ((attr_id = H5Acreate2(dset_id, attr_name, H5T_NATIVE_INT, attr_space_id, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s' on dataset '%s'\n", attr_name, + OBJECT_COPY_BETWEEN_FILES_TEST_DSET_NAME); + goto error; + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close attribute '%s'\n", attr_name); + goto error; + } + } + + /* Create the test committed datatype object, along with the attributes attached to it. */ + if ((dtype_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create datatype\n"); + goto error; + } + + if (H5Tcommit2(group_id, OBJECT_COPY_BETWEEN_FILES_TEST_DTYPE_NAME, dtype_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't commit datatype '%s'\n", OBJECT_COPY_BETWEEN_FILES_TEST_DTYPE_NAME); + goto error; + } + + for (i = 0; i < (size_t)OBJECT_COPY_BETWEEN_FILES_TEST_NUM_ATTRS; i++) { + char attr_name[OBJECT_COPY_BETWEEN_FILES_TEST_BUF_SIZE]; + + snprintf(attr_name, OBJECT_COPY_BETWEEN_FILES_TEST_BUF_SIZE, "attr%d", (int)i); + + if ((attr_id = H5Acreate2(dtype_id, attr_name, H5T_NATIVE_INT, attr_space_id, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create attribute '%s' on committed datatype '%s'\n", attr_name, + OBJECT_COPY_BETWEEN_FILES_TEST_DTYPE_NAME); + goto error; + } + + if (H5Aclose(attr_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close attribute '%s'\n", attr_name); + goto error; + } + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Ocopy_group_between_files) + { + TESTING_2("H5Ocopy on group between different files"); + + if (H5Ocopy(group_id, OBJECT_COPY_BETWEEN_FILES_TEST_GROUP_NAME, file_id2, + OBJECT_COPY_BETWEEN_FILES_TEST_NEW_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to copy group '%s' to second file '%s'\n", + OBJECT_COPY_BETWEEN_FILES_TEST_GROUP_NAME, + OBJECT_COPY_BETWEEN_FILES_TEST_NEW_GROUP_NAME); + PART_ERROR(H5Ocopy_group_between_files); + } + + if ((object_link_exists = + H5Lexists(file_id2, OBJECT_COPY_BETWEEN_FILES_TEST_NEW_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' to copied group exists\n", + OBJECT_COPY_BETWEEN_FILES_TEST_NEW_GROUP_NAME); + PART_ERROR(H5Ocopy_group_between_files); + } + + if (!object_link_exists) { + H5_FAILED(); + HDprintf(" link '%s' to copied group in second file '%s' didn't exist!\n", + OBJECT_COPY_BETWEEN_FILES_TEST_NEW_GROUP_NAME, + OBJECT_COPY_BETWEEN_FILES_TEST_FILE_NAME); + PART_ERROR(H5Ocopy_group_between_files); + } + + /* Ensure that the new group has all the members of the copied group, and all its attributes */ + if ((tmp_group_id = + H5Gopen2(file_id2, OBJECT_COPY_BETWEEN_FILES_TEST_NEW_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to open group copy '%s'\n", + OBJECT_COPY_BETWEEN_FILES_TEST_NEW_GROUP_NAME); + PART_ERROR(H5Ocopy_group_between_files); + } + + memset(&group_info, 0, sizeof(group_info)); + + /* + * Set link count to zero in case the connector doesn't support + * retrieval of group info. + */ + group_info.nlinks = 0; + + if (H5Gget_info(tmp_group_id, &group_info) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve group info\n"); + PART_ERROR(H5Ocopy_group_between_files); + } + + if (group_info.nlinks != OBJECT_COPY_BETWEEN_FILES_TEST_NUM_NESTED_OBJS) { + H5_FAILED(); + HDprintf(" copied group contained %d members instead of %d members after a deep copy!\n", + (int)group_info.nlinks, OBJECT_COPY_BETWEEN_FILES_TEST_NUM_NESTED_OBJS); + PART_ERROR(H5Ocopy_group_between_files); + } + + memset(&object_info, 0, sizeof(object_info)); + + /* + * Set attribute count to zero in case the connector doesn't + * support retrieval of object info. + */ + object_info.num_attrs = 0; + + if (H5Oget_info3(tmp_group_id, &object_info, H5O_INFO_ALL) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve object info\n"); + PART_ERROR(H5Ocopy_group_between_files); + } + + if (object_info.num_attrs == 0) { + H5_FAILED(); + HDprintf(" copied group didn't contain any attributes after copy operation!\n"); + PART_ERROR(H5Ocopy_group_between_files); + } + + /* Check the attribute names, types, etc. */ + i = 0; + if (H5Aiterate2(tmp_group_id, H5_INDEX_NAME, H5_ITER_INC, NULL, + object_copy_attribute_iter_callback, &i) < 0) { + H5_FAILED(); + HDprintf(" failed to iterate over copied group's attributes\n"); + PART_ERROR(H5Ocopy_group_between_files); + } + + if (i != OBJECT_COPY_BETWEEN_FILES_TEST_NUM_ATTRS) { + H5_FAILED(); + HDprintf( + " number of attributes on copied group (%llu) didn't match expected number (%llu)!\n", + (unsigned long long)i, (unsigned long long)OBJECT_COPY_BETWEEN_FILES_TEST_NUM_ATTRS); + PART_ERROR(H5Ocopy_group_between_files); + } + + if (H5Gclose(tmp_group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group copy\n"); + PART_ERROR(H5Ocopy_group_between_files); + } + + /* + * Ensure that the last immediate member of the copied group + * contains its single member after the deep copy. + */ + { + char grp_name[OBJECT_COPY_BETWEEN_FILES_TEST_BUF_SIZE]; + + snprintf(grp_name, OBJECT_COPY_BETWEEN_FILES_TEST_BUF_SIZE, + "/" OBJECT_COPY_BETWEEN_FILES_TEST_NEW_GROUP_NAME "/grp%d", + OBJECT_COPY_BETWEEN_FILES_TEST_NUM_NESTED_OBJS - 1); + + if ((tmp_group_id = H5Gopen2(file_id2, grp_name, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to open group '%s'\n", grp_name); + PART_ERROR(H5Ocopy_group_between_files); + } + + memset(&group_info, 0, sizeof(group_info)); + + /* + * Set link count to zero in case the connector doesn't support + * retrieval of group info. + */ + group_info.nlinks = 0; + + if (H5Gget_info(tmp_group_id, &group_info) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve group info\n"); + PART_ERROR(H5Ocopy_group_between_files); + } + + if (group_info.nlinks != 1) { + H5_FAILED(); + HDprintf(" copied group's immediate members didn't contain nested members after a " + "deep copy!\n"); + PART_ERROR(H5Ocopy_group_between_files); + } + + if (H5Gclose(tmp_group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close group '%s'\n", grp_name); + PART_ERROR(H5Ocopy_group_between_files); + } + } + + PASSED(); + } + PART_END(H5Ocopy_group_between_files); + + if (tmp_group_id >= 0) { + H5E_BEGIN_TRY + { + H5Gclose(tmp_group_id); + } + H5E_END_TRY; + tmp_group_id = H5I_INVALID_HID; + } + + PART_BEGIN(H5Ocopy_dset_between_files) + { + TESTING_2("H5Ocopy on dataset between different files"); + + if (H5Ocopy(group_id, OBJECT_COPY_BETWEEN_FILES_TEST_DSET_NAME, file_id2, + OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DSET_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to copy dataset '%s' to second file '%s'\n", + OBJECT_COPY_BETWEEN_FILES_TEST_DSET_NAME, + OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DSET_NAME); + PART_ERROR(H5Ocopy_dset_between_files); + } + + if ((object_link_exists = + H5Lexists(file_id2, OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' to copied dataset exists\n", + OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DSET_NAME); + PART_ERROR(H5Ocopy_dset_between_files); + } + + if (!object_link_exists) { + H5_FAILED(); + HDprintf(" link '%s' to copied dataset in second file '%s' didn't exist!\n", + OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DSET_NAME, + OBJECT_COPY_BETWEEN_FILES_TEST_FILE_NAME); + PART_ERROR(H5Ocopy_dset_between_files); + } + + /* Ensure that the new dataset has all the attributes of the copied dataset */ + if ((tmp_dset_id = + H5Dopen2(file_id2, OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to open dataset copy '%s'\n", + OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DSET_NAME); + PART_ERROR(H5Ocopy_dset_between_files); + } + + memset(&object_info, 0, sizeof(object_info)); + + /* + * Set attribute count to zero in case the connector doesn't + * support retrieval of object info. + */ + object_info.num_attrs = 0; + + if (H5Oget_info3(tmp_dset_id, &object_info, H5O_INFO_ALL) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve object info\n"); + PART_ERROR(H5Ocopy_dset_between_files); + } + + if (object_info.num_attrs == 0) { + H5_FAILED(); + HDprintf(" copied dataset didn't contain any attributes after copy operation!\n"); + PART_ERROR(H5Ocopy_dset_between_files); + } + + /* Check the attribute names, types, etc. */ + i = 0; + if (H5Aiterate2(tmp_dset_id, H5_INDEX_NAME, H5_ITER_INC, NULL, + object_copy_attribute_iter_callback, &i) < 0) { + H5_FAILED(); + HDprintf(" failed to iterate over copied dataset's attributes\n"); + PART_ERROR(H5Ocopy_dset_between_files); + } + + if (i != OBJECT_COPY_BETWEEN_FILES_TEST_NUM_ATTRS) { + H5_FAILED(); + HDprintf(" number of attributes on copied dataset (%llu) didn't match expected number " + "(%llu)!\n", + (unsigned long long)i, (unsigned long long)OBJECT_COPY_BETWEEN_FILES_TEST_NUM_ATTRS); + PART_ERROR(H5Ocopy_dset_between_files); + } + + if (H5Dclose(tmp_dset_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close dataset copy\n"); + PART_ERROR(H5Ocopy_dset_between_files); + } + + PASSED(); + } + PART_END(H5Ocopy_dset_between_files); + + if (tmp_dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(tmp_dset_id); + } + H5E_END_TRY; + tmp_dset_id = H5I_INVALID_HID; + } + + PART_BEGIN(H5Ocopy_dtype_between_files) + { + TESTING_2("H5Ocopy on committed datatype between different files"); + + if (H5Ocopy(group_id, OBJECT_COPY_BETWEEN_FILES_TEST_DTYPE_NAME, file_id2, + OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DTYPE_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" failed to copy committed datatype '%s' to second file '%s'\n", + OBJECT_COPY_BETWEEN_FILES_TEST_DTYPE_NAME, + OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DTYPE_NAME); + PART_ERROR(H5Ocopy_dtype_between_files); + } + + if ((object_link_exists = + H5Lexists(file_id2, OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DTYPE_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't determine if link '%s' to copied committed datatype exists\n", + OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DTYPE_NAME); + PART_ERROR(H5Ocopy_dtype_between_files); + } + + if (!object_link_exists) { + H5_FAILED(); + HDprintf(" link '%s' to copied committed datatype in second file '%s' didn't exist!\n", + OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DTYPE_NAME, + OBJECT_COPY_BETWEEN_FILES_TEST_FILE_NAME); + PART_ERROR(H5Ocopy_dtype_between_files); + } + + /* Ensure that the new committed datatype has all the attributes of the copied committed datatype + */ + if ((tmp_dtype_id = + H5Topen2(file_id2, OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DTYPE_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to open committed datatype copy '%s'\n", + OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DTYPE_NAME); + PART_ERROR(H5Ocopy_dtype_between_files); + } + + memset(&object_info, 0, sizeof(object_info)); + + /* + * Set attribute count to zero in case the connector doesn't + * support retrieval of object info. + */ + object_info.num_attrs = 0; + + if (H5Oget_info3(tmp_dtype_id, &object_info, H5O_INFO_ALL) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve object info\n"); + PART_ERROR(H5Ocopy_dtype_between_files); + } + + if (object_info.num_attrs == 0) { + H5_FAILED(); + HDprintf( + " copied committed datatype didn't contain any attributes after copy operation!\n"); + PART_ERROR(H5Ocopy_dtype_between_files); + } + + /* Check the attribute names, types, etc. */ + i = 0; + if (H5Aiterate2(tmp_dtype_id, H5_INDEX_NAME, H5_ITER_INC, NULL, + object_copy_attribute_iter_callback, &i) < 0) { + H5_FAILED(); + HDprintf(" failed to iterate over copied datatype's attributes\n"); + PART_ERROR(H5Ocopy_dtype_between_files); + } + + if (i != OBJECT_COPY_BETWEEN_FILES_TEST_NUM_ATTRS) { + H5_FAILED(); + HDprintf(" number of attributes on copied datatype (%llu) didn't match expected number " + "(%llu)!\n", + (unsigned long long)i, (unsigned long long)OBJECT_COPY_BETWEEN_FILES_TEST_NUM_ATTRS); + PART_ERROR(H5Ocopy_dtype_between_files); + } + + if (H5Tclose(tmp_dtype_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close committed datatype copy\n"); + PART_ERROR(H5Ocopy_dtype_between_files); + } + + PASSED(); + } + PART_END(H5Ocopy_dtype_between_files); + + if (tmp_dtype_id >= 0) { + H5E_BEGIN_TRY + { + H5Tclose(tmp_dtype_id); + } + H5E_END_TRY; + tmp_dtype_id = H5I_INVALID_HID; + } + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(attr_space_id) < 0) + TEST_ERROR; + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Tclose(dtype_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id2) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id2) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(ocpypl_id); + H5Sclose(attr_space_id); + H5Sclose(space_id); + H5Aclose(attr_id); + H5Tclose(dset_dtype); + H5Tclose(tmp_dtype_id); + H5Tclose(dtype_id); + H5Dclose(tmp_dset_id); + H5Dclose(dset_id); + H5Gclose(tmp_group_id); + H5Gclose(group_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id2); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that H5Ocopy fails when it + * is passed invalid parameters. + */ +static int +test_object_copy_invalid_params(void) +{ + herr_t err_ret = -1; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t group_id2 = H5I_INVALID_HID; + + TESTING_MULTIPART("object copying with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, or object aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, OBJECT_COPY_INVALID_PARAMS_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", + OBJECT_COPY_INVALID_PARAMS_TEST_SUBGROUP_NAME); + goto error; + } + + if ((group_id2 = H5Gcreate2(group_id, OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Ocopy_invalid_src_loc_id) + { + TESTING_2("H5Ocopy with an invalid source location ID"); + + H5E_BEGIN_TRY + { + err_ret = H5Ocopy(H5I_INVALID_HID, OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME, group_id, + OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME2, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ocopy succeeded with an invalid source location ID!\n"); + PART_ERROR(H5Ocopy_invalid_src_loc_id); + } + + PASSED(); + } + PART_END(H5Ocopy_invalid_src_loc_id); + + PART_BEGIN(H5Ocopy_invalid_src_obj_name) + { + TESTING_2("H5Ocopy with an invalid source object name"); + + H5E_BEGIN_TRY + { + err_ret = H5Ocopy(group_id, NULL, group_id, OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME2, + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ocopy succeeded with a NULL source object name!\n"); + PART_ERROR(H5Ocopy_invalid_src_obj_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Ocopy(group_id, "", group_id, OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME2, + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ocopy succeeded with an invalid source object name of ''!\n"); + PART_ERROR(H5Ocopy_invalid_src_obj_name); + } + + PASSED(); + } + PART_END(H5Ocopy_invalid_src_obj_name); + + PART_BEGIN(H5Ocopy_invalid_dst_loc_id) + { + TESTING_2("H5Ocopy with an invalid destination location ID"); + + H5E_BEGIN_TRY + { + err_ret = H5Ocopy(group_id, OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME, H5I_INVALID_HID, + OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME2, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ocopy succeeded with an invalid destination location ID!\n"); + PART_ERROR(H5Ocopy_invalid_dst_loc_id); + } + + PASSED(); + } + PART_END(H5Ocopy_invalid_dst_loc_id); + + PART_BEGIN(H5Ocopy_invalid_dst_obj_name) + { + TESTING_2("H5Ocopy with an invalid destination object name"); + + H5E_BEGIN_TRY + { + err_ret = H5Ocopy(group_id, OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME, group_id, NULL, + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ocopy succeeded with a NULL destination object name!\n"); + PART_ERROR(H5Ocopy_invalid_dst_obj_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Ocopy(group_id, OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME, group_id, "", + H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ocopy succeeded with an invalid destination object name of ''!\n"); + PART_ERROR(H5Ocopy_invalid_dst_obj_name); + } + + PASSED(); + } + PART_END(H5Ocopy_invalid_dst_obj_name); + + PART_BEGIN(H5Ocopy_invalid_ocpypl) + { + TESTING_2("H5Ocopy with an invalid OcpyPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Ocopy(group_id, OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME, group_id, + OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME2, H5I_INVALID_HID, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ocopy succeeded with an invalid OcpyPL!\n"); + PART_ERROR(H5Ocopy_invalid_ocpypl); + } + + PASSED(); + } + PART_END(H5Ocopy_invalid_ocpypl); + + PART_BEGIN(H5Ocopy_invalid_lcpl) + { + TESTING_2("H5Ocopy with an invalid LCPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Ocopy(group_id, OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME, group_id, + OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME2, H5P_DEFAULT, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ocopy succeeded with an invalid LCPL!\n"); + PART_ERROR(H5Ocopy_invalid_lcpl); + } + + PASSED(); + } + PART_END(H5Ocopy_invalid_lcpl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Gclose(group_id2) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test for H5Oset_comment(_by_name)/H5Oget_comment(_by_name). + */ +static int +test_object_comments(void) +{ + TESTING("object comments"); + + SKIPPED(); + + return 0; +} + +/* + * A test to check that H5Oset_comment(_by_name)/H5Oget_comment(_by_name) + * fail when passed invalid parameters. + */ +static int +test_object_comments_invalid_params(void) +{ + TESTING("object comment "); + + SKIPPED(); + + return 0; +} + +/* + * A test for H5Ovisit(_by_name). + * + * XXX: Should have test for checking nested object's names/paths. + */ +static int +test_object_visit(void) +{ + size_t i; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t group_id2 = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + hid_t type_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + + TESTING_MULTIPART("object visiting"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, object, dataset, attribute, stored datatype, " + "iterate, or creation order aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create a GCPL\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) { + H5_FAILED(); + HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, OBJECT_VISIT_TEST_SUBGROUP_NAME, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", OBJECT_VISIT_TEST_SUBGROUP_NAME); + goto error; + } + + if ((fspace_id = generate_random_dataspace(OBJECT_VISIT_TEST_SPACE_RANK, NULL, NULL, FALSE)) < 0) + TEST_ERROR; + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + if ((type_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create datatype '%s'\n", OBJECT_VISIT_TEST_TYPE_NAME); + goto error; + } + + if ((group_id2 = H5Gcreate2(group_id, OBJECT_VISIT_TEST_GROUP_NAME, H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", OBJECT_VISIT_TEST_GROUP_NAME); + goto error; + } + + if ((dset_id = H5Dcreate2(group_id, OBJECT_VISIT_TEST_DSET_NAME, dset_dtype, fspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", OBJECT_VISIT_TEST_DSET_NAME); + goto error; + } + + if (H5Tcommit2(group_id, OBJECT_VISIT_TEST_TYPE_NAME, type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < + 0) { + H5_FAILED(); + HDprintf(" couldn't commit datatype '%s'\n", OBJECT_VISIT_TEST_TYPE_NAME); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + /* + * NOTE: A counter is passed to the iteration callback to try to match up + * the expected objects with a given step throughout all of the following + * iterations. This is to try and check that the objects are indeed being + * returned in the correct order. + */ + + PART_BEGIN(H5Ovisit_obj_name_increasing) + { + TESTING_2("H5Ovisit by object name in increasing order"); + + i = 0; + + if (H5Ovisit3(group_id, H5_INDEX_NAME, H5_ITER_INC, object_visit_callback, &i, H5O_INFO_ALL) < + 0) { + H5_FAILED(); + HDprintf(" H5Ovisit by object name in increasing order failed\n"); + PART_ERROR(H5Ovisit_obj_name_increasing); + } + + if (i != OBJECT_VISIT_TEST_NUM_OBJS_VISITED) { + H5_FAILED(); + HDprintf(" some objects were not visited!\n"); + PART_ERROR(H5Ovisit_obj_name_increasing); + } + + PASSED(); + } + PART_END(H5Ovisit_obj_name_increasing); + + PART_BEGIN(H5Ovisit_obj_name_decreasing) + { + TESTING_2("H5Ovisit by object name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = OBJECT_VISIT_TEST_NUM_OBJS_VISITED; + + if (H5Ovisit3(group_id, H5_INDEX_NAME, H5_ITER_DEC, object_visit_callback, &i, H5O_INFO_ALL) < + 0) { + H5_FAILED(); + HDprintf(" H5Ovisit by object name in decreasing order failed\n"); + PART_ERROR(H5Ovisit_obj_name_decreasing); + } + + if (i != 2 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED) { + H5_FAILED(); + HDprintf(" some objects were not visited!\n"); + PART_ERROR(H5Ovisit_obj_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Ovisit_obj_name_decreasing); +#endif + } + PART_END(H5Ovisit_obj_name_decreasing); + + PART_BEGIN(H5Ovisit_create_order_increasing) + { + TESTING_2("H5Ovisit by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED; + + if (H5Ovisit3(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, object_visit_callback, &i, + H5O_INFO_ALL) < 0) { + H5_FAILED(); + HDprintf(" H5Ovisit by creation order in increasing order failed\n"); + PART_ERROR(H5Ovisit_create_order_increasing); + } + + if (i != 3 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED) { + H5_FAILED(); + HDprintf(" some objects were not visited!\n"); + PART_ERROR(H5Ovisit_create_order_increasing); + } + + PASSED(); + } + PART_END(H5Ovisit_create_order_increasing); + + PART_BEGIN(H5Ovisit_create_order_decreasing) + { + TESTING_2("H5Ovisit by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED; + + if (H5Ovisit3(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, object_visit_callback, &i, + H5O_INFO_ALL) < 0) { + H5_FAILED(); + HDprintf(" H5Ovisit by creation order in decreasing order failed\n"); + PART_ERROR(H5Ovisit_create_order_decreasing); + } + + if (i != 4 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED) { + H5_FAILED(); + HDprintf(" some objects were not visited!\n"); + PART_ERROR(H5Ovisit_create_order_decreasing); + } + + PASSED(); + } + PART_END(H5Ovisit_create_order_decreasing); + + PART_BEGIN(H5Ovisit_file) + { + TESTING_2("H5Ovisit on a file ID"); + + /* + * XXX: + */ + + SKIPPED(); + PART_EMPTY(H5Ovisit_file); + } + PART_END(H5Ovisit_file); + + PART_BEGIN(H5Ovisit_dset) + { + TESTING_2("H5Ovisit on a dataset ID"); + + if (H5Ovisit3(dset_id, H5_INDEX_NAME, H5_ITER_INC, object_visit_dset_callback, NULL, + H5O_INFO_ALL) < 0) { + H5_FAILED(); + HDprintf(" H5Ovisit failed\n"); + PART_ERROR(H5Ovisit_dset); + } + + PASSED(); + } + PART_END(H5Ovisit_dset); + + PART_BEGIN(H5Ovisit_dtype) + { + TESTING_2("H5Ovisit on a committed datatype ID"); + + if (H5Ovisit3(type_id, H5_INDEX_NAME, H5_ITER_INC, object_visit_dtype_callback, NULL, + H5O_INFO_ALL) < 0) { + H5_FAILED(); + HDprintf(" H5Ovisit failed\n"); + PART_ERROR(H5Ovisit_dtype); + } + + PASSED(); + } + PART_END(H5Ovisit_dtype); + + PART_BEGIN(H5Ovisit_by_name_obj_name_increasing) + { + TESTING_2("H5Ovisit_by_name by object name in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 0; + + /* First, test visiting using "." for the object name */ + if (H5Ovisit_by_name3(group_id, ".", H5_INDEX_NAME, H5_ITER_INC, object_visit_callback, &i, + H5O_INFO_ALL, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Ovisit_by_name by object name in increasing order failed\n"); + PART_ERROR(H5Ovisit_by_name_obj_name_increasing); + } + + if (i != OBJECT_VISIT_TEST_NUM_OBJS_VISITED) { + H5_FAILED(); + HDprintf(" some objects were not visited!\n"); + PART_ERROR(H5Ovisit_by_name_obj_name_increasing); + } + + /* Reset the special counter and repeat the test using an indirect object name. */ + i = 0; + + if (H5Ovisit_by_name3(container_group, OBJECT_VISIT_TEST_SUBGROUP_NAME, H5_INDEX_NAME, + H5_ITER_INC, object_visit_callback, &i, H5O_INFO_ALL, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Ovisit_by_name by object name in increasing order failed\n"); + PART_ERROR(H5Ovisit_by_name_obj_name_increasing); + } + + if (i != OBJECT_VISIT_TEST_NUM_OBJS_VISITED) { + H5_FAILED(); + HDprintf(" some objects were not visited!\n"); + PART_ERROR(H5Ovisit_by_name_obj_name_increasing); + } + + PASSED(); + } + PART_END(H5Ovisit_by_name_obj_name_increasing); + + PART_BEGIN(H5Ovisit_by_name_obj_name_decreasing) + { + TESTING_2("H5Ovisit_by_name by object name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = OBJECT_VISIT_TEST_NUM_OBJS_VISITED; + + /* First, test visiting using "." for the object name */ + if (H5Ovisit_by_name3(group_id, ".", H5_INDEX_NAME, H5_ITER_DEC, object_visit_callback, &i, + H5O_INFO_ALL, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Ovisit_by_name by object name in decreasing order failed\n"); + PART_ERROR(H5Ovisit_by_name_obj_name_decreasing); + } + + if (i != 2 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED) { + H5_FAILED(); + HDprintf(" some objects were not visited!\n"); + PART_ERROR(H5Ovisit_by_name_obj_name_decreasing); + } + + /* Reset the special counter and repeat the test using an indirect object name. */ + i = OBJECT_VISIT_TEST_NUM_OBJS_VISITED; + + if (H5Ovisit_by_name3(container_group, OBJECT_VISIT_TEST_SUBGROUP_NAME, H5_INDEX_NAME, + H5_ITER_DEC, object_visit_callback, &i, H5O_INFO_ALL, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Ovisit_by_name by object name in decreasing order failed\n"); + PART_ERROR(H5Ovisit_by_name_obj_name_decreasing); + } + + if (i != 2 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED) { + H5_FAILED(); + HDprintf(" some objects were not visited!\n"); + PART_ERROR(H5Ovisit_by_name_obj_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Ovisit_by_name_obj_name_decreasing); +#endif + } + PART_END(H5Ovisit_by_name_obj_name_decreasing); + + PART_BEGIN(H5Ovisit_by_name_create_order_increasing) + { + TESTING_2("H5Ovisit_by_name by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED; + + /* First, test visiting using "." for the object name */ + if (H5Ovisit_by_name3(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, object_visit_callback, &i, + H5O_INFO_ALL, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Ovisit_by_name by creation order in increasing order failed\n"); + PART_ERROR(H5Ovisit_by_name_create_order_increasing); + } + + if (i != 3 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED) { + H5_FAILED(); + HDprintf(" some objects were not visited!\n"); + PART_ERROR(H5Ovisit_by_name_create_order_increasing); + } + + /* Reset the special counter and repeat the test using an indirect object name. */ + i = 2 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED; + + if (H5Ovisit_by_name3(container_group, OBJECT_VISIT_TEST_SUBGROUP_NAME, H5_INDEX_CRT_ORDER, + H5_ITER_INC, object_visit_callback, &i, H5O_INFO_ALL, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Ovisit_by_name by creation order in increasing order failed\n"); + PART_ERROR(H5Ovisit_by_name_create_order_increasing); + } + + if (i != 3 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED) { + H5_FAILED(); + HDprintf(" some objects were not visited!\n"); + PART_ERROR(H5Ovisit_by_name_create_order_increasing); + } + + PASSED(); + } + PART_END(H5Ovisit_by_name_create_order_increasing); + + PART_BEGIN(H5Ovisit_by_name_create_order_decreasing) + { + TESTING_2("H5Ovisit_by_name by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED; + + /* First, test visiting using "." for the object name */ + if (H5Ovisit_by_name3(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, object_visit_callback, &i, + H5O_INFO_ALL, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Ovisit_by_name by creation order in decreasing order failed\n"); + PART_ERROR(H5Ovisit_by_name_create_order_decreasing); + } + + if (i != 4 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED) { + H5_FAILED(); + HDprintf(" some objects were not visited!\n"); + PART_ERROR(H5Ovisit_by_name_create_order_decreasing); + } + + /* Reset the special counter and repeat the test using an indirect object name. */ + i = 3 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED; + + if (H5Ovisit_by_name3(container_group, OBJECT_VISIT_TEST_SUBGROUP_NAME, H5_INDEX_CRT_ORDER, + H5_ITER_DEC, object_visit_callback, &i, H5O_INFO_ALL, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Ovisit_by_name by creation order in decreasing order failed\n"); + PART_ERROR(H5Ovisit_by_name_create_order_decreasing); + } + + if (i != 4 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED) { + H5_FAILED(); + HDprintf(" some objects were not visited!\n"); + PART_ERROR(H5Ovisit_by_name_create_order_decreasing); + } + + PASSED(); + } + PART_END(H5Ovisit_by_name_create_order_decreasing); + + PART_BEGIN(H5Ovisit_by_name_file) + { + TESTING_2("H5Ovisit_by_name on a file ID"); + + /* + * XXX: + */ + + SKIPPED(); + PART_EMPTY(H5Ovisit_by_name_file); + } + PART_END(H5Ovisit_by_name_file); + + PART_BEGIN(H5Ovisit_by_name_dset) + { + TESTING_2("H5Ovisit_by_name on a dataset ID"); + + if (H5Ovisit_by_name3(group_id, OBJECT_VISIT_TEST_DSET_NAME, H5_INDEX_NAME, H5_ITER_INC, + object_visit_dset_callback, NULL, H5O_INFO_ALL, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Ovisit_by_name failed\n"); + PART_ERROR(H5Ovisit_by_name_dset); + } + + PASSED(); + } + PART_END(H5Ovisit_by_name_dset); + + PART_BEGIN(H5Ovisit_by_name_dtype) + { + TESTING_2("H5Ovisit_by_name on a committed datatype ID"); + + if (H5Ovisit_by_name3(group_id, OBJECT_VISIT_TEST_TYPE_NAME, H5_INDEX_NAME, H5_ITER_INC, + object_visit_dtype_callback, NULL, H5O_INFO_ALL, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Ovisit_by_name failed\n"); + PART_ERROR(H5Ovisit_by_name_dtype); + } + + PASSED(); + } + PART_END(H5Ovisit_by_name_dtype); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Tclose(type_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id2) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + H5Tclose(dset_dtype); + H5Tclose(type_id); + H5Dclose(dset_id); + H5Pclose(gcpl_id); + H5Gclose(group_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test for H5Ovisit(_by_name) on soft links. Since + * H5Ovisit(_by_name) ignores soft links, this test is + * meant to verify that behavior by placing objects and + * the soft links pointing to those objects in separate + * groups. Visiting is done only on the group containing + * the links to ensure that the objects in the other group + * do not get visited. + */ +static int +test_object_visit_soft_link(void) +{ + size_t i; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t subgroup_id = H5I_INVALID_HID, subgroup_id2 = H5I_INVALID_HID; + hid_t linked_group_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + + TESTING_MULTIPART("object visiting with soft links"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, object, soft link, iterate, or creation order " + "aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME); + goto error; + } + + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create a GCPL\n"); + goto error; + } + + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) { + H5_FAILED(); + HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n"); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, OBJECT_VISIT_SOFT_LINK_TEST_SUBGROUP_NAME, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", OBJECT_VISIT_SOFT_LINK_TEST_SUBGROUP_NAME); + goto error; + } + + /* Create group to hold soft links */ + if ((subgroup_id = H5Gcreate2(group_id, OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME1, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME1); + goto error; + } + + /* Create group to hold objects pointed to by soft links */ + if ((subgroup_id2 = H5Gcreate2(group_id, OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME2, H5P_DEFAULT, gcpl_id, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME2); + goto error; + } + + /* Create objects under subgroup 2 */ + if ((linked_group_id = H5Gcreate2(subgroup_id2, OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME1, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME1); + goto error; + } + + if (H5Gclose(linked_group_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close group '%s'\n", OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME1); + goto error; + } + + if ((linked_group_id = H5Gcreate2(subgroup_id2, OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME2, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME2); + goto error; + } + + if (H5Gclose(linked_group_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close group '%s'\n", OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME2); + goto error; + } + + if ((linked_group_id = H5Gcreate2(subgroup_id2, OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME3, H5P_DEFAULT, + gcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME3); + goto error; + } + + if (H5Gclose(linked_group_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close group '%s'\n", OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME3); + goto error; + } + + if (H5Gclose(subgroup_id2) < 0) { + H5_FAILED(); + HDprintf(" couldn't close group '%s'\n", OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME2); + goto error; + } + + /* Create soft links under subgroup 1 to point to the previously-created objects */ + if (H5Lcreate_soft("/" OBJECT_TEST_GROUP_NAME "/" OBJECT_VISIT_SOFT_LINK_TEST_SUBGROUP_NAME + "/" OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME2 "/" OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME1, + subgroup_id, OBJECT_VISIT_SOFT_LINK_TEST_LINK_NAME1, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", OBJECT_VISIT_SOFT_LINK_TEST_LINK_NAME1); + goto error; + } + + if (H5Lcreate_soft("/" OBJECT_TEST_GROUP_NAME "/" OBJECT_VISIT_SOFT_LINK_TEST_SUBGROUP_NAME + "/" OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME2 "/" OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME2, + subgroup_id, OBJECT_VISIT_SOFT_LINK_TEST_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", OBJECT_VISIT_SOFT_LINK_TEST_LINK_NAME2); + goto error; + } + + if (H5Lcreate_soft("/" OBJECT_TEST_GROUP_NAME "/" OBJECT_VISIT_SOFT_LINK_TEST_SUBGROUP_NAME + "/" OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME2 "/" OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME3, + subgroup_id, OBJECT_VISIT_SOFT_LINK_TEST_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't create soft link '%s'\n", OBJECT_VISIT_SOFT_LINK_TEST_LINK_NAME3); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + /* + * NOTE: A counter is passed to the iteration callback to try to match up + * the expected objects with a given step throughout all of the following + * iterations. This is to try and check that the objects are indeed being + * returned in the correct order. + */ + + PART_BEGIN(H5Ovisit_obj_name_increasing) + { + TESTING_2("H5Ovisit by object name in increasing order"); + + i = 0; + + if (H5Ovisit3(subgroup_id, H5_INDEX_NAME, H5_ITER_INC, object_visit_soft_link_callback, &i, + H5O_INFO_ALL) < 0) { + H5_FAILED(); + HDprintf(" H5Ovisit by object name in increasing order failed\n"); + PART_ERROR(H5Ovisit_obj_name_increasing); + } + + if (i != OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED) { + H5_FAILED(); + HDprintf(" some objects were not visited!\n"); + PART_ERROR(H5Ovisit_obj_name_increasing); + } + + PASSED(); + } + PART_END(H5Ovisit_obj_name_increasing); + + PART_BEGIN(H5Ovisit_obj_name_decreasing) + { + TESTING_2("H5Ovisit by object name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED; + + if (H5Ovisit3(subgroup_id, H5_INDEX_NAME, H5_ITER_DEC, object_visit_soft_link_callback, &i, + H5O_INFO_ALL) < 0) { + H5_FAILED(); + HDprintf(" H5Ovisit by object name in decreasing order failed\n"); + PART_ERROR(H5Ovisit_obj_name_decreasing); + } + + if (i != 2 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED) { + H5_FAILED(); + HDprintf(" some objects were not visited!\n"); + PART_ERROR(H5Ovisit_obj_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Ovisit_obj_name_decreasing); +#endif + } + PART_END(H5Ovisit_obj_name_decreasing); + + PART_BEGIN(H5Ovisit_create_order_increasing) + { + TESTING_2("H5Ovisit by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED; + + if (H5Ovisit3(subgroup_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, object_visit_soft_link_callback, &i, + H5O_INFO_ALL) < 0) { + H5_FAILED(); + HDprintf(" H5Ovisit by creation order in increasing order failed\n"); + PART_ERROR(H5Ovisit_create_order_increasing); + } + + if (i != 3 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED) { + H5_FAILED(); + HDprintf(" some objects were not visited!\n"); + PART_ERROR(H5Ovisit_create_order_increasing); + } + + PASSED(); + } + PART_END(H5Ovisit_create_order_increasing); + + PART_BEGIN(H5Ovisit_create_order_decreasing) + { + TESTING_2("H5Ovisit by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED; + + if (H5Ovisit3(subgroup_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, object_visit_soft_link_callback, &i, + H5O_INFO_ALL) < 0) { + H5_FAILED(); + HDprintf(" H5Ovisit by creation order in decreasing order failed\n"); + PART_ERROR(H5Ovisit_create_order_decreasing); + } + + if (i != 4 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED) { + H5_FAILED(); + HDprintf(" some objects were not visited!\n"); + PART_ERROR(H5Ovisit_create_order_decreasing); + } + + PASSED(); + } + PART_END(H5Ovisit_create_order_decreasing); + + PART_BEGIN(H5Ovisit_by_name_obj_name_increasing) + { + TESTING_2("H5Ovisit_by_name by object name in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 0; + + /* First, test visiting using "." for the object name */ + if (H5Ovisit_by_name3(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, + object_visit_soft_link_callback, &i, H5O_INFO_ALL, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Ovisit_by_name by object name in increasing order failed\n"); + PART_ERROR(H5Ovisit_by_name_obj_name_increasing); + } + + if (i != OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED) { + H5_FAILED(); + HDprintf(" some objects were not visited!\n"); + PART_ERROR(H5Ovisit_by_name_obj_name_increasing); + } + + /* Reset the special counter and repeat the test using an indirect object name. */ + i = 0; + + /* Repeat the test using an indirect object name */ + if (H5Ovisit_by_name3(group_id, OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME1, H5_INDEX_NAME, + H5_ITER_INC, object_visit_soft_link_callback, &i, H5O_INFO_ALL, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Ovisit_by_name by object name in increasing order failed\n"); + PART_ERROR(H5Ovisit_by_name_obj_name_increasing); + } + + if (i != OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED) { + H5_FAILED(); + HDprintf(" some objects were not visited!\n"); + PART_ERROR(H5Ovisit_by_name_obj_name_increasing); + } + + PASSED(); + } + PART_END(H5Ovisit_by_name_obj_name_increasing); + + PART_BEGIN(H5Ovisit_by_name_obj_name_decreasing) + { + TESTING_2("H5Ovisit_by_name by object name in decreasing order"); +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Reset the counter to the appropriate value for the next test */ + i = OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED; + + /* First, test visiting using "." for the object name */ + if (H5Ovisit_by_name3(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, + object_visit_soft_link_callback, &i, H5O_INFO_ALL, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Ovisit_by_name by object name in decreasing order failed\n"); + PART_ERROR(H5Ovisit_by_name_obj_name_decreasing); + } + + if (i != 2 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED) { + H5_FAILED(); + HDprintf(" some objects were not visited!\n"); + PART_ERROR(H5Ovisit_by_name_obj_name_decreasing); + } + + /* Reset the special counter and repeat the test using an indirect object name. */ + i = OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED; + + /* Repeat the test using an indirect object name */ + if (H5Ovisit_by_name3(group_id, OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME1, H5_INDEX_NAME, + H5_ITER_DEC, object_visit_soft_link_callback, &i, H5O_INFO_ALL, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Ovisit_by_name by object name in decreasing order failed\n"); + PART_ERROR(H5Ovisit_by_name_obj_name_decreasing); + } + + if (i != 2 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED) { + H5_FAILED(); + HDprintf(" some objects were not visited!\n"); + PART_ERROR(H5Ovisit_by_name_obj_name_decreasing); + } + + PASSED(); +#else + SKIPPED(); + PART_EMPTY(H5Ovisit_by_name_obj_name_decreasing); +#endif + } + PART_END(H5Ovisit_by_name_obj_name_decreasing); + + PART_BEGIN(H5Ovisit_by_name_create_order_increasing) + { + TESTING_2("H5Ovisit_by_name by creation order in increasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 2 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED; + + /* First, test visiting using "." for the object name */ + if (H5Ovisit_by_name3(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, + object_visit_soft_link_callback, &i, H5O_INFO_ALL, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Ovisit_by_name by creation order in increasing order failed\n"); + PART_ERROR(H5Ovisit_by_name_create_order_increasing); + } + + if (i != 3 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED) { + H5_FAILED(); + HDprintf(" some objects were not visited!\n"); + PART_ERROR(H5Ovisit_by_name_create_order_increasing); + } + + /* Reset the special counter and repeat the test using an indirect object name. */ + i = 2 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED; + + /* Repeat the test using an indirect object name */ + if (H5Ovisit_by_name3(group_id, OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME1, H5_INDEX_CRT_ORDER, + H5_ITER_INC, object_visit_soft_link_callback, &i, H5O_INFO_ALL, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Ovisit_by_name by creation order in increasing order failed\n"); + PART_ERROR(H5Ovisit_by_name_create_order_increasing); + } + + if (i != 3 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED) { + H5_FAILED(); + HDprintf(" some objects were not visited!\n"); + PART_ERROR(H5Ovisit_by_name_create_order_increasing); + } + + PASSED(); + } + PART_END(H5Ovisit_by_name_create_order_increasing); + + PART_BEGIN(H5Ovisit_by_name_create_order_decreasing) + { + TESTING_2("H5Ovisit_by_name by creation order in decreasing order"); + + /* Reset the counter to the appropriate value for the next test */ + i = 3 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED; + + /* First, test visiting using "." for the object name */ + if (H5Ovisit_by_name3(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, + object_visit_soft_link_callback, &i, H5O_INFO_ALL, H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Ovisit_by_name by creation order in decreasing order failed\n"); + PART_ERROR(H5Ovisit_by_name_create_order_decreasing); + } + + if (i != 4 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED) { + H5_FAILED(); + HDprintf(" some objects were not visited!\n"); + PART_ERROR(H5Ovisit_by_name_create_order_decreasing); + } + + /* Reset the special counter and repeat the test using an indirect object name. */ + i = 3 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED; + + /* Repeat the test using an indirect object name */ + if (H5Ovisit_by_name3(group_id, OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME1, H5_INDEX_CRT_ORDER, + H5_ITER_DEC, object_visit_soft_link_callback, &i, H5O_INFO_ALL, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" H5Ovisit_by_name by creation order in decreasing order failed\n"); + PART_ERROR(H5Ovisit_by_name_create_order_decreasing); + } + + if (i != 4 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED) { + H5_FAILED(); + HDprintf(" some objects were not visited!\n"); + PART_ERROR(H5Ovisit_by_name_create_order_decreasing); + } + + PASSED(); + } + PART_END(H5Ovisit_by_name_create_order_decreasing); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + if (H5Gclose(subgroup_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(gcpl_id); + H5Gclose(linked_group_id); + H5Gclose(subgroup_id); + H5Gclose(subgroup_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that H5Ovisit(_by_name) fails when + * it is passed invalid parameters. + */ +static int +test_object_visit_invalid_params(void) +{ + herr_t err_ret = -1; + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t group_id2 = H5I_INVALID_HID; + + TESTING_MULTIPART("object visiting with invalid parameters"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or iterate aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, OBJECT_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + OBJECT_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME); + goto error; + } + + if ((group_id2 = H5Gcreate2(group_id, OBJECT_VISIT_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", OBJECT_VISIT_INVALID_PARAMS_TEST_GROUP_NAME); + goto error; + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Ovisit_invalid_obj_id) + { + TESTING_2("H5Ovisit with an invalid object ID"); + + H5E_BEGIN_TRY + { + err_ret = H5Ovisit3(H5I_INVALID_HID, H5_INDEX_NAME, H5_ITER_INC, object_visit_noop_callback, + NULL, H5O_INFO_ALL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ovisit succeeded with an invalid object ID!\n"); + PART_ERROR(H5Ovisit_invalid_obj_id); + } + + PASSED(); + } + PART_END(H5Ovisit_invalid_obj_id); + + PART_BEGIN(H5Ovisit_invalid_index_type) + { + TESTING_2("H5Ovisit with an invalid index type"); + + H5E_BEGIN_TRY + { + err_ret = H5Ovisit3(group_id, H5_INDEX_UNKNOWN, H5_ITER_INC, object_visit_noop_callback, NULL, + H5O_INFO_ALL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ovisit succeeded with invalid index type H5_INDEX_UNKNOWN!\n"); + PART_ERROR(H5Ovisit_invalid_index_type); + } + + H5E_BEGIN_TRY + { + err_ret = H5Ovisit3(group_id, H5_INDEX_N, H5_ITER_INC, object_visit_noop_callback, NULL, + H5O_INFO_ALL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ovisit succeeded with invalid index type H5_INDEX_N!\n"); + PART_ERROR(H5Ovisit_invalid_index_type); + } + + PASSED(); + } + PART_END(H5Ovisit_invalid_index_type); + + PART_BEGIN(H5Ovisit_invalid_iter_order) + { + TESTING_2("H5Ovisit with an invalid iteration ordering"); + + H5E_BEGIN_TRY + { + err_ret = H5Ovisit3(group_id, H5_INDEX_NAME, H5_ITER_UNKNOWN, object_visit_noop_callback, + NULL, H5O_INFO_ALL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ovisit succeeded with invalid iteration ordering H5_ITER_UNKNOWN!\n"); + PART_ERROR(H5Ovisit_invalid_iter_order); + } + + H5E_BEGIN_TRY + { + err_ret = H5Ovisit3(group_id, H5_INDEX_NAME, H5_ITER_N, object_visit_noop_callback, NULL, + H5O_INFO_ALL); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ovisit succeeded with invalid iteration ordering H5_ITER_N!\n"); + PART_ERROR(H5Ovisit_invalid_iter_order); + } + + PASSED(); + } + PART_END(H5Ovisit_invalid_iter_order); + + PART_BEGIN(H5Ovisit_by_name_invalid_loc_id) + { + TESTING_2("H5Ovisit_by_name with an invalid location ID"); + + H5E_BEGIN_TRY + { + err_ret = H5Ovisit_by_name3(H5I_INVALID_HID, ".", H5_INDEX_NAME, H5_ITER_N, + object_visit_noop_callback, NULL, H5O_INFO_ALL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ovisit_by_name succeeded with an invalid location ID!\n"); + PART_ERROR(H5Ovisit_by_name_invalid_loc_id); + } + + PASSED(); + } + PART_END(H5Ovisit_by_name_invalid_loc_id); + + PART_BEGIN(H5Ovisit_by_name_invalid_obj_name) + { + TESTING_2("H5Ovisit_by_name with an invalid object name"); + + H5E_BEGIN_TRY + { + err_ret = H5Ovisit_by_name3(group_id, NULL, H5_INDEX_NAME, H5_ITER_N, + object_visit_noop_callback, NULL, H5O_INFO_ALL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ovisit_by_name succeeded with a NULL object name!\n"); + PART_ERROR(H5Ovisit_by_name_invalid_obj_name); + } + + H5E_BEGIN_TRY + { + err_ret = H5Ovisit_by_name3(group_id, "", H5_INDEX_NAME, H5_ITER_N, + object_visit_noop_callback, NULL, H5O_INFO_ALL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ovisit_by_name succeeded with an invalid object name of ''!\n"); + PART_ERROR(H5Ovisit_by_name_invalid_obj_name); + } + + PASSED(); + } + PART_END(H5Ovisit_by_name_invalid_obj_name); + + PART_BEGIN(H5Ovisit_by_name_invalid_index_type) + { + TESTING_2("H5Ovisit_by_name with an invalid index type"); + + H5E_BEGIN_TRY + { + err_ret = H5Ovisit_by_name3(group_id, ".", H5_INDEX_UNKNOWN, H5_ITER_N, + object_visit_noop_callback, NULL, H5O_INFO_ALL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ovisit_by_name succeeded with invalid index type H5_INDEX_UNKNOWN!\n"); + PART_ERROR(H5Ovisit_by_name_invalid_index_type); + } + + H5E_BEGIN_TRY + { + err_ret = H5Ovisit_by_name3(group_id, ".", H5_INDEX_N, H5_ITER_N, object_visit_noop_callback, + NULL, H5O_INFO_ALL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ovisit_by_name succeeded with invalid index type H5_INDEX_N!\n"); + PART_ERROR(H5Ovisit_by_name_invalid_index_type); + } + + PASSED(); + } + PART_END(H5Ovisit_by_name_invalid_index_type); + + PART_BEGIN(H5Ovisit_by_name_invalid_iter_order) + { + TESTING_2("H5Ovisit_by_name with an invalid iteration ordering"); + + H5E_BEGIN_TRY + { + err_ret = H5Ovisit_by_name3(group_id, ".", H5_INDEX_NAME, H5_ITER_UNKNOWN, + object_visit_noop_callback, NULL, H5O_INFO_ALL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ovisit_by_name succeeded with invalid iteration ordering H5_ITER_UNKNOWN!\n"); + PART_ERROR(H5Ovisit_by_name_invalid_iter_order); + } + + H5E_BEGIN_TRY + { + err_ret = H5Ovisit_by_name3(group_id, ".", H5_INDEX_NAME, H5_ITER_N, + object_visit_noop_callback, NULL, H5O_INFO_ALL, H5P_DEFAULT); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ovisit_by_name succeeded with invalid iteration ordering H5_ITER_N!\n"); + PART_ERROR(H5Ovisit_by_name_invalid_iter_order); + } + + PASSED(); + } + PART_END(H5Ovisit_by_name_invalid_iter_order); + + PART_BEGIN(H5Ovisit_by_name_invalid_lapl) + { + TESTING_2("H5Ovisit_by_name with an invalid LAPL"); + + H5E_BEGIN_TRY + { + err_ret = H5Ovisit_by_name3(group_id, ".", H5_INDEX_NAME, H5_ITER_INC, + object_visit_noop_callback, NULL, H5O_INFO_ALL, H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Ovisit_by_name succeeded with an invalid LAPL!\n"); + PART_ERROR(H5Ovisit_by_name_invalid_lapl); + } + + PASSED(); + } + PART_END(H5Ovisit_by_name_invalid_lapl); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Gclose(group_id2) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test for H5Oclose. + */ +static int +test_close_object(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t group_id2 = H5I_INVALID_HID; + hid_t dtype_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_dtype = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + + TESTING_MULTIPART("H5Oclose"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, object, dataset, attribute, or stored datatype " + "aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, OBJECT_CLOSE_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container subgroup '%s'\n", OBJECT_CLOSE_TEST_GROUP_NAME); + goto error; + } + + if ((fspace_id = generate_random_dataspace(OBJECT_CLOSE_TEST_SPACE_RANK, NULL, NULL, FALSE)) < 0) + TEST_ERROR; + + if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Oclose_group) + { + TESTING_2("H5Oclose on a group"); + + if ((group_id2 = H5Gcreate2(group_id, OBJECT_CLOSE_TEST_GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create group '%s'\n", OBJECT_CLOSE_TEST_GRP_NAME); + PART_ERROR(H5Oclose_group); + } + + H5E_BEGIN_TRY + { + H5Gclose(group_id2); + } + H5E_END_TRY; + + if ((group_id2 = H5Oopen(group_id, OBJECT_CLOSE_TEST_GRP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open group '%s' with H5Oopen\n", OBJECT_CLOSE_TEST_GRP_NAME); + PART_ERROR(H5Oclose_group); + } + + if (H5Oclose(group_id2) < 0) { + H5_FAILED(); + HDprintf(" couldn't close group '%s' with H5Oclose\n", OBJECT_CLOSE_TEST_GRP_NAME); + PART_ERROR(H5Oclose_group); + } + + PASSED(); + } + PART_END(H5Oclose_group); + + PART_BEGIN(H5Oclose_dset) + { + TESTING_2("H5Oclose on a dataset"); + + if ((dset_id = H5Dcreate2(group_id, OBJECT_CLOSE_TEST_DSET_NAME, dset_dtype, fspace_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", OBJECT_CLOSE_TEST_DSET_NAME); + PART_ERROR(H5Oclose_dset); + } + + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + + if ((dset_id = H5Oopen(group_id, OBJECT_CLOSE_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s' with H5Oopen\n", OBJECT_CLOSE_TEST_DSET_NAME); + PART_ERROR(H5Oclose_dset); + } + + if (H5Oclose(dset_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close dataset '%s' with H5Oclose\n", OBJECT_CLOSE_TEST_DSET_NAME); + PART_ERROR(H5Oclose_dset); + } + + PASSED(); + } + PART_END(H5Oclose_dset); + + PART_BEGIN(H5Oclose_dtype) + { + TESTING_2("H5Oclose on a committed datatype"); + + if ((dtype_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create datatype '%s'\n", OBJECT_CLOSE_TEST_TYPE_NAME); + PART_ERROR(H5Oclose_dtype); + } + + if (H5Tcommit2(group_id, OBJECT_CLOSE_TEST_TYPE_NAME, dtype_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT) < 0) { + H5_FAILED(); + HDprintf(" couldn't commit datatype '%s'\n", OBJECT_CLOSE_TEST_TYPE_NAME); + PART_ERROR(H5Oclose_dtype); + } + + H5E_BEGIN_TRY + { + H5Tclose(dtype_id); + } + H5E_END_TRY; + + if ((dtype_id = H5Oopen(group_id, OBJECT_CLOSE_TEST_TYPE_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open datatype '%s' with H5Oopen\n", OBJECT_CLOSE_TEST_TYPE_NAME); + PART_ERROR(H5Oclose_dtype); + } + + if (H5Oclose(dtype_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't close datatype '%s' with H5Oclose\n", OBJECT_CLOSE_TEST_TYPE_NAME); + PART_ERROR(H5Oclose_dtype); + } + + PASSED(); + } + PART_END(H5Oclose_dtype); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Tclose(dset_dtype) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + H5Tclose(dset_dtype); + H5Tclose(dtype_id); + H5Dclose(dset_id); + H5Gclose(group_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that H5Oclose fails when it + * is passed invalid parameters. + */ +static int +test_close_object_invalid_params(void) +{ + herr_t err_ret = -1; + hid_t file_id = H5I_INVALID_HID; + + TESTING("H5Oclose with an invalid object ID"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file or object aren't supported with this connector\n"); + return 0; + } + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + H5E_BEGIN_TRY + { + err_ret = H5Oclose(H5I_INVALID_HID); + } + H5E_END_TRY; + + if (err_ret >= 0) { + H5_FAILED(); + HDprintf(" H5Oclose succeeded with an invalid object ID!\n"); + goto error; + } + + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that various objects (file, dataspace, property list, + * and attribute) can't be closed with H5Oclose. + */ +static int +test_close_invalid_objects(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t attr_dtype = H5I_INVALID_HID; + hid_t attr_space_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + herr_t status; + + TESTING_MULTIPART("H5Oclose invalid objects"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, object, dataset, attribute, or stored datatype " + "aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) + TEST_ERROR; + + if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, OBJECT_CLOSE_INVALID_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", OBJECT_OPEN_TEST_GROUP_NAME); + goto error; + } + + if ((attr_space_id = generate_random_dataspace(OBJECT_CLOSE_INVALID_TEST_SPACE_RANK, NULL, NULL, TRUE)) < + 0) + TEST_ERROR; + + if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) + TEST_ERROR; + + if ((attr_id = H5Acreate2(group_id, OBJECT_CLOSE_INVALID_TEST_ATTRIBUTE_NAME, attr_dtype, attr_space_id, + H5P_DEFAULT, H5P_DEFAULT)) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Oclose_file) + { + TESTING_2("H5Oclose with an invalid object - file"); + + H5E_BEGIN_TRY + { + status = H5Oclose(file_id); + } + H5E_END_TRY; + + if (status >= 0) { + H5_FAILED(); + HDprintf(" H5Oclose succeeded with an invalid object (file)!\n"); + PART_ERROR(H5Oclose_file); + } + + PASSED(); + } + PART_END(H5Oclose_file); + + PART_BEGIN(H5Oclose_plist) + { + TESTING_2("H5Oclose with an invalid object - property list"); + + H5E_BEGIN_TRY + { + status = H5Oclose(fapl_id); + } + H5E_END_TRY; + + if (status >= 0) { + H5_FAILED(); + HDprintf(" H5Oclose succeeded with an invalid object (property list)!\n"); + PART_ERROR(H5Oclose_plist); + } + + PASSED(); + } + PART_END(H5Oclose_plist); + + PART_BEGIN(H5Oclose_dspace) + { + TESTING_2("H5Oclose with an invalid object - data space"); + + H5E_BEGIN_TRY + { + status = H5Oclose(attr_space_id); + } + H5E_END_TRY; + + if (status >= 0) { + H5_FAILED(); + HDprintf(" H5Oclose succeeded with an invalid object (data space)!\n"); + PART_ERROR(H5Oclose_dspace); + } + + PASSED(); + } + PART_END(H5Oclose_dspace); + + PART_BEGIN(H5Oclose_attribute) + { + TESTING_2("H5Oclose with an invalid object - attribute"); + + H5E_BEGIN_TRY + { + status = H5Oclose(attr_id); + } + H5E_END_TRY; + + if (status >= 0) { + H5_FAILED(); + HDprintf(" H5Oclose succeeded with an invalid object (attribute)!\n"); + PART_ERROR(H5Oclose_attribute); + } + + PASSED(); + } + PART_END(H5Oclose_attribute); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Tclose(attr_dtype) < 0) + TEST_ERROR; + if (H5Aclose(attr_id) < 0) + TEST_ERROR; + if (H5Sclose(attr_space_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Tclose(attr_dtype); + H5Sclose(attr_space_id); + H5Aclose(attr_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Fclose(file_id); + H5Pclose(fapl_id); + } + H5E_END_TRY; + + return 1; +} /* test_close_invalid_objects */ + +/* + * A test for H5Oflush. + */ +static int +test_flush_object(void) +{ + TESTING("H5Oflush"); + + SKIPPED(); + + return 0; +} + +/* + * A test to check that H5Oflush fails when + * it is passed invalid parameters. + */ +static int +test_flush_object_invalid_params(void) +{ + TESTING("H5Oflush with invalid parameters"); + + SKIPPED(); + + return 0; +} + +/* + * A test for H5Orefresh. + */ +static int +test_refresh_object(void) +{ + TESTING("H5Orefresh"); + + SKIPPED(); + + return 0; +} + +/* + * A test to check that H5Orefresh fails when + * it is passed invalid parameters. + */ +static int +test_refresh_object_invalid_params(void) +{ + TESTING("H5Orefresh with invalid parameters"); + + SKIPPED(); + + return 0; +} + +/* + * H5Ocopy test callback to check that an object's attributes got copied + * over successfully to the new object. + */ +static herr_t +object_copy_attribute_iter_callback(hid_t location_id, const char *attr_name, const H5A_info_t *ainfo, + void *op_data) +{ + size_t *counter = (size_t *)op_data; + htri_t types_equal; + char expected_name[256]; + hid_t attr_id = H5I_INVALID_HID; + hid_t attr_type = H5I_INVALID_HID; + herr_t ret_value = H5_ITER_CONT; + + UNUSED(ainfo); + UNUSED(op_data); + + snprintf(expected_name, 256, "attr%d", (int)(*counter)); + + if (HDstrncmp(attr_name, expected_name, 256)) { + HDprintf(" attribute name '%s' did not match expected name '%s'\n", attr_name, expected_name); + ret_value = H5_ITER_ERROR; + goto done; + } + + if ((attr_id = H5Aopen(location_id, attr_name, H5P_DEFAULT)) < 0) { + HDprintf(" failed to open attribute '%s'\n", attr_name); + ret_value = H5_ITER_ERROR; + goto done; + } + + if ((attr_type = H5Aget_type(attr_id)) < 0) { + HDprintf(" failed to retrieve attribute's datatype\n"); + ret_value = H5_ITER_ERROR; + goto done; + } + + if ((types_equal = H5Tequal(attr_type, H5T_NATIVE_INT)) < 0) { + HDprintf(" failed to determine if attribute's datatype matched what is expected\n"); + ret_value = H5_ITER_ERROR; + goto done; + } + + if (!types_equal) { + HDprintf(" attribute datatype did not match expected H5T_NATIVE_INT\n"); + ret_value = H5_ITER_ERROR; + goto done; + } + +done: + if (attr_type >= 0) + H5Tclose(attr_type); + if (attr_id >= 0) + H5Aclose(attr_id); + + (*counter)++; + + return ret_value; +} + +/* + * H5Ocopy callback to check that a copied group's soft links + * have not been expanded when the default copy options are + * used. + */ +static herr_t +object_copy_soft_link_non_expand_callback(hid_t group, const char *name, const H5L_info2_t *info, + void *op_data) +{ + size_t *counter = (size_t *)op_data; + void *link_val_buf = NULL; + char expected_link_val[OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_BUF_SIZE]; + herr_t ret_value = H5_ITER_CONT; + + /* Make sure the link type is soft */ + if (H5L_TYPE_SOFT != info->type) { + HDprintf(" link type was not H5L_TYPE_SOFT; link must have been expanded!\n"); + ret_value = H5_ITER_ERROR; + goto done; + } + + if (NULL == (link_val_buf = calloc(1, info->u.val_size))) { + HDprintf(" failed to allocate buffer for link value\n"); + ret_value = H5_ITER_ERROR; + goto done; + } + + /* Retrieve the link's value */ + if (H5Lget_val(group, name, link_val_buf, info->u.val_size, H5P_DEFAULT) < 0) { + HDprintf(" failed to retrieve value of link '%s'\n", name); + ret_value = H5_ITER_ERROR; + goto done; + } + + /* Make sure link's value matches what is expected */ + snprintf(expected_link_val, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_BUF_SIZE, + "/" OBJECT_TEST_GROUP_NAME "/" OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_SUBGROUP_NAME "/grp%d", + (int)(*counter)); + + if (strncmp(link_val_buf, expected_link_val, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_BUF_SIZE)) { + HDprintf(" value '%s' for link '%s' did not match expected value '%s'\n", (char *)link_val_buf, + name, expected_link_val); + ret_value = H5_ITER_ERROR; + goto done; + } + +done: + if (link_val_buf) + free(link_val_buf); + + (*counter)++; + + return ret_value; +} + +/* + * H5Ocopy callback to check that a copied group's soft links + * have been expanded when the H5O_COPY_EXPAND_SOFT_LINK_FLAG + * flag is specified. + */ +static herr_t +object_copy_soft_link_expand_callback(hid_t group, const char *name, const H5L_info2_t *info, void *op_data) +{ + size_t *counter = (size_t *)op_data; + char expected_link_name[OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_BUF_SIZE]; + herr_t ret_value = H5_ITER_CONT; + + UNUSED(group); + + /* Make sure the link type is hard */ + if (H5L_TYPE_HARD != info->type) { + HDprintf(" link type was not H5L_TYPE_HARD; link must not have been expanded!\n"); + ret_value = H5_ITER_ERROR; + goto done; + } + + /* Ensure that the link's name still follows the 'link1', 'link2', etc. pattern */ + snprintf(expected_link_name, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_BUF_SIZE, "link%d", (int)(*counter)); + + if (strncmp(name, expected_link_name, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_BUF_SIZE)) { + HDprintf(" link name '%s' did not match expected name '%s'\n", name, expected_link_name); + ret_value = H5_ITER_ERROR; + goto done; + } + +done: + (*counter)++; + + return ret_value; +} + +/* + * H5Ovisit callback to simply iterate recursively through all of the objects in a + * group and check to make sure their names match what is expected. + */ +static herr_t +object_visit_callback(hid_t o_id, const char *name, const H5O_info2_t *object_info, void *op_data) +{ + size_t *i = (size_t *)op_data; + size_t counter_val = *((size_t *)op_data); + herr_t ret_val = 0; + + UNUSED(o_id); + + if (!HDstrncmp(name, ".", strlen(".") + 1) && + (counter_val == 0 || counter_val == 4 || counter_val == 8 || counter_val == 12)) { + if (H5O_TYPE_GROUP == object_info->type) + goto done; + else + HDprintf(" type for object '%s' was not H5O_TYPE_GROUP\n", name); + } + else if (!HDstrncmp(name, OBJECT_VISIT_TEST_GROUP_NAME, strlen(OBJECT_VISIT_TEST_GROUP_NAME) + 1) && + (counter_val == 2 || counter_val == 6 || counter_val == 9 || counter_val == 15)) { + if (H5O_TYPE_GROUP == object_info->type) + goto done; + else + HDprintf(" type for object '%s' was not H5O_TYPE_GROUP\n", name); + } + else if (!HDstrncmp(name, OBJECT_VISIT_TEST_DSET_NAME, strlen(OBJECT_VISIT_TEST_DSET_NAME) + 1) && + (counter_val == 1 || counter_val == 7 || counter_val == 10 || counter_val == 14)) { + if (H5O_TYPE_DATASET == object_info->type) + goto done; + else + HDprintf(" type for object '%s' was not H5O_TYPE_DATASET\n", name); + } + else if (!HDstrncmp(name, OBJECT_VISIT_TEST_TYPE_NAME, strlen(OBJECT_VISIT_TEST_TYPE_NAME) + 1) && + (counter_val == 3 || counter_val == 5 || counter_val == 11 || counter_val == 13)) { + if (H5O_TYPE_NAMED_DATATYPE == object_info->type) + goto done; + else + HDprintf(" type for object '%s' was not H5O_TYPE_NAMED_DATATYPE\n", name); + } + else + HDprintf(" object '%s' didn't match known names or came in an incorrect order\n", name); + + ret_val = -1; + +done: + (*i)++; + + return ret_val; +} + +/* + * H5Ovisit callback for visiting a singular dataset. + */ +static herr_t +object_visit_dset_callback(hid_t o_id, const char *name, const H5O_info2_t *object_info, void *op_data) +{ + herr_t ret_val = 0; + + UNUSED(o_id); + UNUSED(op_data); + + if (HDstrncmp(name, ".", strlen(".") + 1)) { + HDprintf(" object '%s' didn't match known names\n", name); + return -1; + } + + if (H5O_TYPE_DATASET != object_info->type) { + HDprintf(" object type was not H5O_TYPE_DATASET\n"); + return -1; + } + + return ret_val; +} + +/* + * H5Ovisit callback for visiting a singular committed datatype. + */ +static herr_t +object_visit_dtype_callback(hid_t o_id, const char *name, const H5O_info2_t *object_info, void *op_data) +{ + herr_t ret_val = 0; + + UNUSED(o_id); + UNUSED(op_data); + + if (HDstrncmp(name, ".", strlen(".") + 1)) { + HDprintf(" object '%s' didn't match known names\n", name); + return -1; + } + + if (H5O_TYPE_NAMED_DATATYPE != object_info->type) { + HDprintf(" object type was not H5O_TYPE_NAMED_DATATYPE\n"); + return -1; + } + + return ret_val; +} + +/* + * H5Ovisit callback for testing ignoring of + * soft links during object visiting. + */ +static herr_t +object_visit_soft_link_callback(hid_t o_id, const char *name, const H5O_info2_t *object_info, void *op_data) +{ + size_t *i = (size_t *)op_data; + size_t counter_val = *((size_t *)op_data); + herr_t ret_val = 0; + + UNUSED(o_id); + + if (!HDstrncmp(name, ".", strlen(".") + 1) && (counter_val <= 5)) { + if (H5O_TYPE_GROUP == object_info->type) + goto done; + else + HDprintf(" type for object '%s' was not H5O_TYPE_GROUP\n", name); + } + else + HDprintf(" object '%s' didn't match known names or came in an incorrect order\n", name); + + ret_val = -1; + +done: + (*i)++; + + return ret_val; +} + +/* + * H5Ovisit callback to simply iterate through all of the objects in a given + * group. + */ +static herr_t +object_visit_noop_callback(hid_t o_id, const char *name, const H5O_info2_t *object_info, void *op_data) +{ + UNUSED(o_id); + UNUSED(name); + UNUSED(object_info); + UNUSED(op_data); + + return 0; +} + +/* + * Cleanup temporary test files + */ +static void +cleanup_files(void) +{ + H5Fdelete(OBJECT_COPY_BETWEEN_FILES_TEST_FILE_NAME, H5P_DEFAULT); +} + +int +H5_api_object_test(void) +{ + size_t i; + int nerrors; + + HDprintf("**********************************************\n"); + HDprintf("* *\n"); + HDprintf("* API Object Tests *\n"); + HDprintf("* *\n"); + HDprintf("**********************************************\n\n"); + + for (i = 0, nerrors = 0; i < ARRAY_LENGTH(object_tests); i++) { + nerrors += (*object_tests[i])() ? 1 : 0; + } + + HDprintf("\n"); + + HDprintf("Cleaning up testing files\n"); + cleanup_files(); + + return nerrors; +} diff --git a/test/API/H5_api_object_test.h b/test/API/H5_api_object_test.h new file mode 100644 index 00000000000..54708434a18 --- /dev/null +++ b/test/API/H5_api_object_test.h @@ -0,0 +1,191 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#ifndef H5_API_OBJECT_TEST_H +#define H5_API_OBJECT_TEST_H + +#include "H5_api_test.h" + +int H5_api_object_test(void); + +/*********************************************** + * * + * API Object test defines * + * * + ***********************************************/ + +#define OBJECT_OPEN_TEST_SPACE_RANK 2 +#define OBJECT_OPEN_TEST_GROUP_NAME "object_open_test" +#define OBJECT_OPEN_TEST_GRP_NAME "object_open_test_group" +#define OBJECT_OPEN_TEST_DSET_NAME "object_open_test_dset" +#define OBJECT_OPEN_TEST_TYPE_NAME "object_open_test_type" + +#define OBJECT_OPEN_INVALID_PARAMS_TEST_GROUP_NAME "object_open_invalid_params_test" +#define OBJECT_OPEN_INVALID_PARAMS_TEST_GRP_NAME "object_open_invalid_params_test_group" + +#define OBJECT_CLOSE_INVALID_TEST_GROUP_NAME "object_close_invalid_params_test" +#define OBJECT_CLOSE_INVALID_TEST_ATTRIBUTE_NAME "object_close_invalid_test_attribute" +#define OBJECT_CLOSE_INVALID_TEST_SPACE_RANK 2 + +#define OBJECT_EXISTS_TEST_DSET_SPACE_RANK 2 +#define OBJECT_EXISTS_TEST_SUBGROUP_NAME "object_exists_test" +#define OBJECT_EXISTS_TEST_DANGLING_LINK_NAME "object_exists_test_dangling_soft_link" +#define OBJECT_EXISTS_TEST_SOFT_LINK_NAME "object_exists_test_soft_link" +#define OBJECT_EXISTS_TEST_GRP_NAME "object_exists_test_group" +#define OBJECT_EXISTS_TEST_TYPE_NAME "object_exists_test_type" +#define OBJECT_EXISTS_TEST_DSET_NAME "object_exists_test_dset" + +#define OBJECT_EXISTS_INVALID_PARAMS_TEST_SUBGROUP_NAME "object_exists_invalid_params_test" +#define OBJECT_EXISTS_INVALID_PARAMS_TEST_GRP_NAME "object_exists_invalid_params_test_group" + +#define OBJECT_COPY_BASIC_TEST_DEEP_NESTED_GROUP_NAME "deep_nested_group" +#define OBJECT_COPY_BASIC_TEST_NUM_NESTED_OBJS 3 +#define OBJECT_COPY_BASIC_TEST_NEW_GROUP_NAME "copied_group" +#define OBJECT_COPY_BASIC_TEST_NEW_DSET_NAME "copied_dset" +#define OBJECT_COPY_BASIC_TEST_NEW_DTYPE_NAME "copied_dtype" +#define OBJECT_COPY_BASIC_TEST_SUBGROUP_NAME "object_copy_basic_test" +#define OBJECT_COPY_BASIC_TEST_GROUP_NAME "group_to_copy" +#define OBJECT_COPY_BASIC_TEST_DSET_NAME "dset_to_copy" +#define OBJECT_COPY_BASIC_TEST_DTYPE_NAME "dtype_to_copy" +#define OBJECT_COPY_BASIC_TEST_SPACE_RANK 2 +#define OBJECT_COPY_BASIC_TEST_NUM_ATTRS 3 +#define OBJECT_COPY_BASIC_TEST_BUF_SIZE 256 + +#define OBJECT_COPY_ALREADY_EXISTING_TEST_SUBGROUP_NAME "object_copy_existing_objects_test" +#define OBJECT_COPY_ALREADY_EXISTING_TEST_GROUP_NAME "group_to_copy" +#define OBJECT_COPY_ALREADY_EXISTING_TEST_DSET_NAME "dset_to_copy" +#define OBJECT_COPY_ALREADY_EXISTING_TEST_DTYPE_NAME "dtype_to_copy" +#define OBJECT_COPY_ALREADY_EXISTING_TEST_SPACE_RANK 2 + +#define OBJECT_COPY_SHALLOW_TEST_DEEP_NESTED_GROUP_NAME "deep_nested_group" +#define OBJECT_COPY_SHALLOW_TEST_NUM_NESTED_OBJS 3 +#define OBJECT_COPY_SHALLOW_TEST_SUBGROUP_NAME "object_copy_shallow_group_copy_test" +#define OBJECT_COPY_SHALLOW_TEST_NEW_GROUP_NAME "copied_group" +#define OBJECT_COPY_SHALLOW_TEST_GROUP_NAME "group_to_copy" +#define OBJECT_COPY_SHALLOW_TEST_BUF_SIZE 256 + +#define OBJECT_COPY_NO_ATTRS_TEST_SUBGROUP_NAME "object_copy_no_attributes_test" +#define OBJECT_COPY_NO_ATTRS_TEST_NEW_GROUP_NAME "copied_group" +#define OBJECT_COPY_NO_ATTRS_TEST_NEW_DSET_NAME "copied_dset" +#define OBJECT_COPY_NO_ATTRS_TEST_NEW_DTYPE_NAME "copied_dtype" +#define OBJECT_COPY_NO_ATTRS_TEST_GROUP_NAME "group_to_copy" +#define OBJECT_COPY_NO_ATTRS_TEST_DSET_NAME "dset_to_copy" +#define OBJECT_COPY_NO_ATTRS_TEST_DTYPE_NAME "dtype_to_copy" +#define OBJECT_COPY_NO_ATTRS_TEST_SPACE_RANK 2 +#define OBJECT_COPY_NO_ATTRS_TEST_NUM_ATTRS 3 +#define OBJECT_COPY_NO_ATTRS_TEST_BUF_SIZE 256 + +#define OBJECT_COPY_SOFT_LINK_TEST_DEEP_NESTED_GROUP_NAME "deep_nested_group" +#define OBJECT_COPY_SOFT_LINK_TEST_DANGLING_LINK_NAME "dangling_link" +#define OBJECT_COPY_SOFT_LINK_TEST_NUM_NESTED_OBJS 3 +#define OBJECT_COPY_SOFT_LINK_TEST_SUBGROUP_NAME "object_copy_soft_link_test" +#define OBJECT_COPY_SOFT_LINK_TEST_SOFT_LINK_NAME "soft_link_to_group_to_copy" +#define OBJECT_COPY_SOFT_LINK_TEST_NEW_GROUP_NAME "copied_group" +#define OBJECT_COPY_SOFT_LINK_TEST_GROUP_NAME "group_to_copy" +#define OBJECT_COPY_SOFT_LINK_TEST_SPACE_RANK 2 +#define OBJECT_COPY_SOFT_LINK_TEST_NUM_ATTRS 3 +#define OBJECT_COPY_SOFT_LINK_TEST_BUF_SIZE 256 + +#define OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_EXPAND_DANGLING_GROUP_NAME "expanded_dangling_soft_links_group" +#define OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NON_EXPAND_GROUP_NAME "non_expanded_soft_links_group" +#define OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_EXPAND_GROUP_NAME "expanded_soft_links_group" +#define OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NUM_NESTED_OBJS 3 +#define OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_SUBGROUP_NAME "object_copy_group_with_soft_links_test" +#define OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_GROUP_NAME "group_to_copy" +#define OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_BUF_SIZE 256 + +#define OBJECT_COPY_BETWEEN_FILES_TEST_DEEP_NESTED_GROUP_NAME "deep_nested_group" +#define OBJECT_COPY_BETWEEN_FILES_TEST_NUM_NESTED_OBJS 3 +#define OBJECT_COPY_BETWEEN_FILES_TEST_SUBGROUP_NAME "object_copy_between_files_test" +#define OBJECT_COPY_BETWEEN_FILES_TEST_NEW_GROUP_NAME "copied_group" +#define OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DSET_NAME "copied_dset" +#define OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DTYPE_NAME "copied_dtype" +#define OBJECT_COPY_BETWEEN_FILES_TEST_FILE_NAME "object_copy_test_file.h5" +#define OBJECT_COPY_BETWEEN_FILES_TEST_GROUP_NAME "group_to_copy" +#define OBJECT_COPY_BETWEEN_FILES_TEST_DSET_NAME "dset_to_copy" +#define OBJECT_COPY_BETWEEN_FILES_TEST_DTYPE_NAME "dtype_to_copy" +#define OBJECT_COPY_BETWEEN_FILES_TEST_SPACE_RANK 2 +#define OBJECT_COPY_BETWEEN_FILES_TEST_NUM_ATTRS 3 +#define OBJECT_COPY_BETWEEN_FILES_TEST_BUF_SIZE 256 + +#define OBJECT_COPY_INVALID_PARAMS_TEST_SUBGROUP_NAME "object_copy_invalid_params_test" +#define OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME "object_copy_invalid_params_group" +#define OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME2 "object_copy_invalid_params_group_copy" + +#define OBJECT_VISIT_TEST_NUM_OBJS_VISITED 4 +#define OBJECT_VISIT_TEST_SUBGROUP_NAME "object_visit_test" +#define OBJECT_VISIT_TEST_SPACE_RANK 2 +#define OBJECT_VISIT_TEST_GROUP_NAME "object_visit_test_group" +#define OBJECT_VISIT_TEST_DSET_NAME "object_visit_test_dset" +#define OBJECT_VISIT_TEST_TYPE_NAME "object_visit_test_type" + +#define OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED 1 +#define OBJECT_VISIT_SOFT_LINK_TEST_SUBGROUP_NAME "object_visit_soft_link" +#define OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME1 "links_group" +#define OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME2 "objects_group" +#define OBJECT_VISIT_SOFT_LINK_TEST_LINK_NAME1 "soft_link1" +#define OBJECT_VISIT_SOFT_LINK_TEST_LINK_NAME2 "soft_link2" +#define OBJECT_VISIT_SOFT_LINK_TEST_LINK_NAME3 "soft_link3" +#define OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME1 "group1" +#define OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME2 "group2" +#define OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME3 "group3" + +#define OBJECT_VISIT_DANGLING_LINK_TEST_SUBGROUP_NAME "object_visit_dangling_link_test" +#define OBJECT_VISIT_DANGLING_LINK_TEST_LINK_NAME1 "dangling_link1" +#define OBJECT_VISIT_DANGLING_LINK_TEST_LINK_NAME2 "dangling_link2" +#define OBJECT_VISIT_DANGLING_LINK_TEST_LINK_NAME3 "dangling_link3" + +#define OBJECT_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME "object_visit_invalid_params_test" +#define OBJECT_VISIT_INVALID_PARAMS_TEST_GROUP_NAME "object_visit_invalid_params_group" + +#define OBJECT_CLOSE_TEST_SPACE_RANK 2 +#define OBJECT_CLOSE_TEST_GROUP_NAME "object_close_test" +#define OBJECT_CLOSE_TEST_GRP_NAME "object_close_test_group" +#define OBJECT_CLOSE_TEST_DSET_NAME "object_close_test_dset" +#define OBJECT_CLOSE_TEST_TYPE_NAME "object_close_test_type" + +#define OBJECT_LINK_TEST_GROUP_NAME "object_link_test_group" +#define OBJECT_LINK_TEST_GROUP_NAME2 "object_link_test_group_link" +#define OBJECT_LINK_TEST_DSET_NAME "object_link_test_dataset" +#define OBJECT_LINK_TEST_DTYPE_NAME "object_link_test_datatype" +#define OBJECT_LINK_TEST_SPACE_RANK 2 + +#define OBJECT_LINK_INVALID_PARAMS_TEST_GROUP_NAME "object_link_invalid_params_test_group" + +#define OBJ_REF_GET_TYPE_TEST_SUBGROUP_NAME "obj_ref_get_obj_type_test" +#define OBJ_REF_GET_TYPE_TEST_DSET_NAME "ref_dset" +#define OBJ_REF_GET_TYPE_TEST_TYPE_NAME "ref_dtype" +#define OBJ_REF_GET_TYPE_TEST_SPACE_RANK 2 + +#define OBJ_REF_DATASET_WRITE_TEST_SUBGROUP_NAME "obj_ref_write_test" +#define OBJ_REF_DATASET_WRITE_TEST_REF_DSET_NAME "ref_dset" +#define OBJ_REF_DATASET_WRITE_TEST_REF_TYPE_NAME "ref_dtype" +#define OBJ_REF_DATASET_WRITE_TEST_SPACE_RANK 1 +#define OBJ_REF_DATASET_WRITE_TEST_DSET_NAME "obj_ref_dset" + +#define OBJ_REF_DATASET_READ_TEST_SUBGROUP_NAME "obj_ref_read_test" +#define OBJ_REF_DATASET_READ_TEST_REF_DSET_NAME "ref_dset" +#define OBJ_REF_DATASET_READ_TEST_REF_TYPE_NAME "ref_dtype" +#define OBJ_REF_DATASET_READ_TEST_SPACE_RANK 1 +#define OBJ_REF_DATASET_READ_TEST_DSET_NAME "obj_ref_dset" + +#define OBJ_REF_DATASET_EMPTY_WRITE_TEST_SUBGROUP_NAME "obj_ref_empty_write_test" +#define OBJ_REF_DATASET_EMPTY_WRITE_TEST_SPACE_RANK 1 +#define OBJ_REF_DATASET_EMPTY_WRITE_TEST_DSET_NAME "obj_ref_dset" + +#define OBJECT_REF_COUNT_TEST_SUBGROUP_NAME "ref_count_test" +#define OBJECT_REF_COUNT_TEST_GRP_NAME "ref_count_test_group" +#define OBJECT_REF_COUNT_TEST_DSET_NAME "ref_count_dset" +#define OBJECT_REF_COUNT_TEST_TYPE_NAME "ref_count_dtype" +#define OBJECT_REF_COUNT_TEST_DSET_SPACE_RANK 2 + +#endif diff --git a/test/API/H5_api_test.c b/test/API/H5_api_test.c new file mode 100644 index 00000000000..6d61b754656 --- /dev/null +++ b/test/API/H5_api_test.c @@ -0,0 +1,227 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * A test suite which only makes public HDF5 API calls and which is meant + * to test the native VOL connector or a specified HDF5 VOL connector (or + * set of connectors stacked with each other). This test suite must assume + * that a VOL connector could only implement the File interface. Therefore, + * the suite should check that a particular piece of functionality is supported + * by the VOL connector before actually testing it. If the functionality is + * not supported, the test should simply be skipped, perhaps with a note as + * to why the test was skipped, if possible. + * + * If the VOL connector being used supports the creation of groups, this + * test suite will attempt to organize the output of these various tests + * into groups based on their respective HDF5 interface. + */ + +#include "H5_api_test.h" + +#include "H5_api_attribute_test.h" +#include "H5_api_dataset_test.h" +#include "H5_api_datatype_test.h" +#include "H5_api_file_test.h" +#include "H5_api_group_test.h" +#include "H5_api_link_test.h" +#include "H5_api_misc_test.h" +#include "H5_api_object_test.h" +#include "H5_api_test_util.h" +#ifdef H5_API_TEST_HAVE_ASYNC +#include "H5_api_async_test.h" +#endif + +char H5_api_test_filename[H5_API_TEST_FILENAME_MAX_LENGTH]; + +const char *test_path_prefix; + +/* X-macro to define the following for each test: + * - enum type + * - name + * - test function + * - enabled by default + */ +#ifdef H5_API_TEST_HAVE_ASYNC +#define H5_API_TESTS \ + X(H5_API_TEST_NULL, "", NULL, 0) \ + X(H5_API_TEST_FILE, "file", H5_api_file_test, 1) \ + X(H5_API_TEST_GROUP, "group", H5_api_group_test, 1) \ + X(H5_API_TEST_DATASET, "dataset", H5_api_dataset_test, 1) \ + X(H5_API_TEST_DATATYPE, "datatype", H5_api_datatype_test, 1) \ + X(H5_API_TEST_ATTRIBUTE, "attribute", H5_api_attribute_test, 1) \ + X(H5_API_TEST_LINK, "link", H5_api_link_test, 1) \ + X(H5_API_TEST_OBJECT, "object", H5_api_object_test, 1) \ + X(H5_API_TEST_MISC, "misc", H5_api_misc_test, 1) \ + X(H5_API_TEST_ASYNC, "async", H5_api_async_test, 1) \ + X(H5_API_TEST_MAX, "", NULL, 0) +#else +#define H5_API_TESTS \ + X(H5_API_TEST_NULL, "", NULL, 0) \ + X(H5_API_TEST_FILE, "file", H5_api_file_test, 1) \ + X(H5_API_TEST_GROUP, "group", H5_api_group_test, 1) \ + X(H5_API_TEST_DATASET, "dataset", H5_api_dataset_test, 1) \ + X(H5_API_TEST_DATATYPE, "datatype", H5_api_datatype_test, 1) \ + X(H5_API_TEST_ATTRIBUTE, "attribute", H5_api_attribute_test, 1) \ + X(H5_API_TEST_LINK, "link", H5_api_link_test, 1) \ + X(H5_API_TEST_OBJECT, "object", H5_api_object_test, 1) \ + X(H5_API_TEST_MISC, "misc", H5_api_misc_test, 1) \ + X(H5_API_TEST_MAX, "", NULL, 0) +#endif + +#define X(a, b, c, d) a, +enum H5_api_test_type { H5_API_TESTS }; +#undef X +#define X(a, b, c, d) b, +static const char *const H5_api_test_name[] = {H5_API_TESTS}; +#undef X +#define X(a, b, c, d) c, +static int (*H5_api_test_func[])(void) = {H5_API_TESTS}; +#undef X +#define X(a, b, c, d) d, +static int H5_api_test_enabled[] = {H5_API_TESTS}; +#undef X + +static enum H5_api_test_type +H5_api_test_name_to_type(const char *test_name) +{ + enum H5_api_test_type i = 0; + + while (strcmp(H5_api_test_name[i], test_name) && i != H5_API_TEST_MAX) + i++; + + return ((i == H5_API_TEST_MAX) ? H5_API_TEST_NULL : i); +} + +static void +H5_api_test_run(void) +{ + enum H5_api_test_type i; + + for (i = H5_API_TEST_FILE; i < H5_API_TEST_MAX; i++) + if (H5_api_test_enabled[i]) + (void)H5_api_test_func[i](); +} + +/******************************************************************************/ + +int +main(int argc, char **argv) +{ + const char *vol_connector_name; + unsigned seed; + hid_t fapl_id = H5I_INVALID_HID; + hbool_t err_occurred = FALSE; + + /* Simple argument checking, TODO can improve that later */ + if (argc > 1) { + enum H5_api_test_type i = H5_api_test_name_to_type(argv[1]); + if (i != H5_API_TEST_NULL) { + /* Run only specific API test */ + memset(H5_api_test_enabled, 0, sizeof(H5_api_test_enabled)); + H5_api_test_enabled[i] = 1; + } + } + +#ifdef H5_HAVE_PARALLEL + /* If HDF5 was built with parallel enabled, go ahead and call MPI_Init before + * running these tests. Even though these are meant to be serial tests, they will + * likely be run using mpirun (or similar) and we cannot necessarily expect HDF5 or + * an HDF5 VOL connector to call MPI_Init. + */ + MPI_Init(&argc, &argv); +#endif + + /* h5_reset(); */ + + n_tests_run_g = 0; + n_tests_passed_g = 0; + n_tests_failed_g = 0; + n_tests_skipped_g = 0; + + seed = (unsigned)HDtime(NULL); + srand(seed); + + if (NULL == (test_path_prefix = HDgetenv(HDF5_API_TEST_PATH_PREFIX))) + test_path_prefix = ""; + + HDsnprintf(H5_api_test_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s%s", test_path_prefix, + TEST_FILE_NAME); + + if (NULL == (vol_connector_name = HDgetenv(HDF5_VOL_CONNECTOR))) { + HDprintf("No VOL connector selected; using native VOL connector\n"); + vol_connector_name = "native"; + } + + HDprintf("Running API tests with VOL connector '%s'\n\n", vol_connector_name); + HDprintf("Test parameters:\n"); + HDprintf(" - Test file name: '%s'\n", H5_api_test_filename); + HDprintf(" - Test seed: %u\n", seed); + HDprintf("\n\n"); + + /* Retrieve the VOL cap flags - work around an HDF5 + * library issue by creating a FAPL + */ + if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) { + HDfprintf(stderr, "Unable to create FAPL\n"); + err_occurred = TRUE; + goto done; + } + + vol_cap_flags_g = H5VL_CAP_FLAG_NONE; + if (H5Pget_vol_cap_flags(fapl_id, &vol_cap_flags_g) < 0) { + HDfprintf(stderr, "Unable to retrieve VOL connector capability flags\n"); + err_occurred = TRUE; + goto done; + } + + /* + * Create the file that will be used for all of the tests, + * except for those which test file creation. + */ + if (create_test_container(H5_api_test_filename, vol_cap_flags_g) < 0) { + HDfprintf(stderr, "Unable to create testing container file '%s'\n", H5_api_test_filename); + err_occurred = TRUE; + goto done; + } + + /* Run all the tests that are enabled */ + H5_api_test_run(); + + HDprintf("Cleaning up testing files\n"); + H5Fdelete(H5_api_test_filename, fapl_id); + + if (n_tests_run_g > 0) { + HDprintf("%zu/%zu (%.2f%%) API tests passed with VOL connector '%s'\n", n_tests_passed_g, + n_tests_run_g, ((double)n_tests_passed_g / (double)n_tests_run_g * 100.0), + vol_connector_name); + HDprintf("%zu/%zu (%.2f%%) API tests did not pass with VOL connector '%s'\n", n_tests_failed_g, + n_tests_run_g, ((double)n_tests_failed_g / (double)n_tests_run_g * 100.0), + vol_connector_name); + HDprintf("%zu/%zu (%.2f%%) API tests were skipped with VOL connector '%s'\n", n_tests_skipped_g, + n_tests_run_g, ((double)n_tests_skipped_g / (double)n_tests_run_g * 100.0), + vol_connector_name); + } + +done: + if (fapl_id >= 0 && H5Pclose(fapl_id) < 0) { + HDfprintf(stderr, "Unable to close FAPL\n"); + err_occurred = TRUE; + } + + H5close(); + +#ifdef H5_HAVE_PARALLEL + MPI_Finalize(); +#endif + + HDexit(((err_occurred || n_tests_failed_g > 0) ? EXIT_FAILURE : EXIT_SUCCESS)); +} diff --git a/test/API/H5_api_test.h b/test/API/H5_api_test.h new file mode 100644 index 00000000000..296d2960af0 --- /dev/null +++ b/test/API/H5_api_test.h @@ -0,0 +1,73 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#ifndef H5_API_TEST_H +#define H5_API_TEST_H + +#include +#include + +#include "h5test.h" + +#include "H5_api_test_config.h" +#include "H5_api_test_util.h" +#include "H5_api_tests_disabled.h" + +/* Define H5VL_VERSION if not already defined */ +#ifndef H5VL_VERSION +#define H5VL_VERSION 0 +#endif + +/* Define macro to wait forever depending on version */ +#if H5VL_VERSION >= 2 +#define H5_API_TEST_WAIT_FOREVER H5ES_WAIT_FOREVER +#else +#define H5_API_TEST_WAIT_FOREVER UINT64_MAX +#endif + +/******************************************************************************/ + +/* The name of the file that all of the tests will operate on */ +#define TEST_FILE_NAME "H5_api_test.h5" +extern char H5_api_test_filename[]; + +extern const char *test_path_prefix; + +/* + * Environment variable specifying a prefix string to add to + * filenames generated by the API tests + */ +#define HDF5_API_TEST_PATH_PREFIX "HDF5_API_TEST_PATH_PREFIX" + +/* The names of a set of container groups which hold objects + * created by each of the different types of tests. + */ +#define GROUP_TEST_GROUP_NAME "group_tests" +#define ATTRIBUTE_TEST_GROUP_NAME "attribute_tests" +#define DATASET_TEST_GROUP_NAME "dataset_tests" +#define DATATYPE_TEST_GROUP_NAME "datatype_tests" +#define LINK_TEST_GROUP_NAME "link_tests" +#define OBJECT_TEST_GROUP_NAME "object_tests" +#define MISCELLANEOUS_TEST_GROUP_NAME "miscellaneous_tests" + +#define ARRAY_LENGTH(array) sizeof(array) / sizeof(array[0]) + +#define UNUSED(o) (void)(o); + +#define H5_API_TEST_FILENAME_MAX_LENGTH 1024 + +/* The maximum size of a dimension in an HDF5 dataspace as allowed + * for this testing suite so as not to try to create too large + * of a dataspace/datatype. */ +#define MAX_DIM_SIZE 16 + +#endif diff --git a/test/API/H5_api_test_config.h.in b/test/API/H5_api_test_config.h.in new file mode 100644 index 00000000000..c1833fafae6 --- /dev/null +++ b/test/API/H5_api_test_config.h.in @@ -0,0 +1,66 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#ifndef H5_API_TEST_CONFIG_H +#define H5_API_TEST_CONFIG_H + +#include "hdf5.h" + +#cmakedefine H5_API_TEST_HAVE_ASYNC + +#ifdef H5_HAVE_PARALLEL +#cmakedefine MPIEXEC_EXECUTABLE "@MPIEXEC_EXECUTABLE@" +#cmakedefine MPIEXEC "@MPIEXEC@" /* For compatibility */ +#ifndef MPIEXEC_EXECUTABLE +# define MPIEXEC_EXECUTABLE MPIEXEC +#endif +#cmakedefine MPIEXEC_NUMPROC_FLAG "@MPIEXEC_NUMPROC_FLAG@" +#cmakedefine MPIEXEC_PREFLAGS "@MPIEXEC_PREFLAGS@" +#cmakedefine MPIEXEC_POSTFLAGS "@MPIEXEC_POSTFLAGS@" +/* Server-specific flags if different */ +#cmakedefine MPIEXEC_SERVER_PREFLAGS "@MPIEXEC_SERVER_PREFLAGS@" +#cmakedefine MPIEXEC_SERVER_POSTFLAGS "@MPIEXEC_SERVER_POSTFLAGS@" +#cmakedefine MPIEXEC_MAX_NUMPROCS @MPIEXEC_MAX_NUMPROCS@ +#endif /* H5_HAVE_PARALLEL */ + +#cmakedefine DART_TESTING_TIMEOUT @DART_TESTING_TIMEOUT@ +#ifndef DART_TESTING_TIMEOUT +# define DART_TESTING_TIMEOUT 1500 +#endif + +#cmakedefine H5_API_TEST_ENV_VARS "@H5_API_TEST_ENV_VARS@" + +#cmakedefine H5_API_TEST_INIT_COMMAND "@H5_API_TEST_INIT_COMMAND@" + +#cmakedefine H5_API_TEST_SERVER_START_MSG "@H5_API_TEST_SERVER_START_MSG@" +#ifndef H5_API_TEST_SERVER_START_MSG +# define H5_API_TEST_SERVER_START_MSG "Waiting" +#endif +#cmakedefine H5_API_TEST_SERVER_EXIT_COMMAND "@H5_API_TEST_SERVER_EXIT_COMMAND@" + +#cmakedefine H5_API_TEST_CLIENT_HELPER_START_MSG "@H5_API_TEST_CLIENT_HELPER_START_MSG@" +#ifndef H5_API_TEST_CLIENT_HELPER_START_MSG +# define H5_API_TEST_CLIENT_HELPER_START_MSG "Waiting" +#endif +#cmakedefine H5_API_TEST_CLIENT_HELPER_EXIT_COMMAND "@H5_API_TEST_CLIENT_HELPER_EXIT_COMMAND@" + +#cmakedefine H5_API_TEST_CLIENT_INIT_TOKEN_REGEX "@H5_API_TEST_CLIENT_INIT_TOKEN_REGEX@" +#ifndef H5_API_TEST_CLIENT_INIT_TOKEN_REGEX +# define H5_API_TEST_CLIENT_INIT_TOKEN_REGEX "^token" +#endif +#cmakedefine H5_API_TEST_CLIENT_INIT_TOKEN_VAR "@H5_API_TEST_CLIENT_INIT_TOKEN_VAR@" +#ifndef H5_API_TEST_CLIENT_INIT_TOKEN_VAR +# define H5_API_TEST_CLIENT_INIT_TOKEN_VAR "TOKEN" +#endif + + +#endif /* H5_API_TEST_CONFIG_H */ diff --git a/test/API/H5_api_test_util.c b/test/API/H5_api_test_util.c new file mode 100644 index 00000000000..7fec2b65a25 --- /dev/null +++ b/test/API/H5_api_test_util.c @@ -0,0 +1,819 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#include "H5_api_test.h" +#include "H5_api_test_util.h" + +/* + * The maximum allowable size of a generated datatype. + * + * NOTE: HDF5 currently has limits on the maximum size of + * a datatype of an object, as this information is stored + * in the object header. In order to provide maximum + * compatibility between the native VOL connector and others + * for this test suite, we limit the size of a datatype here. + * This value should be adjusted as future HDF5 development + * allows. + */ +#define GENERATED_DATATYPE_MAX_SIZE 65536 + +/* + * The maximum size of a datatype for compact objects that + * must fit within the size of a native HDF5 object header message. + * This is typically used for attributes and compact datasets. + */ +#define COMPACT_DATATYPE_MAX_SIZE 1024 + +/* The maximum level of recursion that the generate_random_datatype() + * function should go down to, before being forced to choose a base type + * in order to not cause a stack overflow. + */ +#define TYPE_GEN_RECURSION_MAX_DEPTH 3 + +/* The maximum number of members allowed in an HDF5 compound type, as + * generated by the generate_random_datatype() function, for ease of + * development. + */ +#define COMPOUND_TYPE_MAX_MEMBERS 4 + +/* The maximum number and size of the dimensions of an HDF5 array + * datatype, as generated by the generate_random_datatype() function. + */ +#define ARRAY_TYPE_MAX_DIMS 4 + +/* The maximum number of members and the maximum size of those + * members' names for an HDF5 enum type, as generated by the + * generate_random_datatype() function. + */ +#define ENUM_TYPE_MAX_MEMBER_NAME_LENGTH 256 +#define ENUM_TYPE_MAX_MEMBERS 16 + +/* The maximum size of an HDF5 string datatype, as created by the + * generate_random_datatype() function. + */ +#define STRING_TYPE_MAX_SIZE 1024 + +/* + * The maximum dimensionality and dimension size of a dataspace + * generated for an attribute or compact dataset. + */ +#define COMPACT_SPACE_MAX_DIM_SIZE 4 +#define COMPACT_SPACE_MAX_DIMS 3 + +/* + * Helper function to generate a random HDF5 datatype in order to thoroughly + * test support for datatypes. The parent_class parameter is to support + * recursive generation of datatypes. In most cases, this function should be + * called with H5T_NO_CLASS for the parent_class parameter. + */ +/* + * XXX: limit size of datatype generated + */ +hid_t +generate_random_datatype(H5T_class_t parent_class, hbool_t is_compact) +{ + static int depth = 0; + hsize_t *array_dims = NULL; + size_t i; + hid_t compound_members[COMPOUND_TYPE_MAX_MEMBERS]; + hid_t datatype = H5I_INVALID_HID; + + depth++; + + for (i = 0; i < COMPOUND_TYPE_MAX_MEMBERS; i++) + compound_members[i] = H5I_INVALID_HID; + + switch (rand() % H5T_NCLASSES) { +case_integer: + case H5T_INTEGER: { + switch (rand() % 16) { + case 0: + if ((datatype = H5Tcopy(H5T_STD_I8BE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't copy predefined integer type\n"); + goto done; + } + + break; + + case 1: + if ((datatype = H5Tcopy(H5T_STD_I8LE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't copy predefined integer type\n"); + goto done; + } + + break; + + case 2: + if ((datatype = H5Tcopy(H5T_STD_I16BE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't copy predefined integer type\n"); + goto done; + } + + break; + + case 3: + if ((datatype = H5Tcopy(H5T_STD_I16LE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't copy predefined integer type\n"); + goto done; + } + + break; + + case 4: + if ((datatype = H5Tcopy(H5T_STD_I32BE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't copy predefined integer type\n"); + goto done; + } + + break; + + case 5: + if ((datatype = H5Tcopy(H5T_STD_I32LE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't copy predefined integer type\n"); + goto done; + } + + break; + + case 6: + if ((datatype = H5Tcopy(H5T_STD_I64BE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't copy predefined integer type\n"); + goto done; + } + + break; + + case 7: + if ((datatype = H5Tcopy(H5T_STD_I64LE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't copy predefined integer type\n"); + goto done; + } + + break; + + case 8: + if ((datatype = H5Tcopy(H5T_STD_U8BE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't copy predefined integer type\n"); + goto done; + } + + break; + + case 9: + if ((datatype = H5Tcopy(H5T_STD_U8LE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't copy predefined integer type\n"); + goto done; + } + + break; + + case 10: + if ((datatype = H5Tcopy(H5T_STD_U16BE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't copy predefined integer type\n"); + goto done; + } + + break; + + case 11: + if ((datatype = H5Tcopy(H5T_STD_U16LE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't copy predefined integer type\n"); + goto done; + } + + break; + + case 12: + if ((datatype = H5Tcopy(H5T_STD_U32BE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't copy predefined integer type\n"); + goto done; + } + + break; + + case 13: + if ((datatype = H5Tcopy(H5T_STD_U32LE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't copy predefined integer type\n"); + goto done; + } + + break; + + case 14: + if ((datatype = H5Tcopy(H5T_STD_U64BE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't copy predefined integer type\n"); + goto done; + } + + break; + + case 15: + if ((datatype = H5Tcopy(H5T_STD_U64LE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't copy predefined integer type\n"); + goto done; + } + + break; + + default: + H5_FAILED(); + HDprintf(" invalid value for predefined integer type; should not happen\n"); + goto done; + } + + break; + } + +case_float: + case H5T_FLOAT: { + switch (rand() % 4) { + case 0: + if ((datatype = H5Tcopy(H5T_IEEE_F32BE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't copy predefined floating-point type\n"); + goto done; + } + + break; + + case 1: + if ((datatype = H5Tcopy(H5T_IEEE_F32LE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't copy predefined floating-point type\n"); + goto done; + } + + break; + + case 2: + if ((datatype = H5Tcopy(H5T_IEEE_F64BE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't copy predefined floating-point type\n"); + goto done; + } + + break; + + case 3: + if ((datatype = H5Tcopy(H5T_IEEE_F64LE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't copy predefined floating-point type\n"); + goto done; + } + + break; + + default: + H5_FAILED(); + HDprintf(" invalid value for floating point type; should not happen\n"); + goto done; + } + + break; + } + +case_time: + case H5T_TIME: { + /* Time datatype is unsupported, try again */ + goto reroll; + break; + } + +case_string: + case H5T_STRING: { + /* Note: currently only H5T_CSET_ASCII is supported for the character set and + * only H5T_STR_NULLTERM is supported for string padding for variable-length + * strings and only H5T_STR_NULLPAD is supported for string padding for + * fixed-length strings, but these may change in the future. + */ + if (0 == (rand() % 2)) { + if ((datatype = H5Tcreate(H5T_STRING, (size_t)(rand() % STRING_TYPE_MAX_SIZE) + 1)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create fixed-length string datatype\n"); + goto done; + } + + if (H5Tset_strpad(datatype, H5T_STR_NULLPAD) < 0) { + H5_FAILED(); + HDprintf(" couldn't set H5T_STR_NULLPAD for fixed-length string type\n"); + goto done; + } + } + else { + /* + * Currently, all VL datatypes are disabled. + */ + goto reroll; + +#if 0 + if ((datatype = H5Tcreate(H5T_STRING, H5T_VARIABLE)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create variable-length string datatype\n"); + goto done; + } + + if (H5Tset_strpad(datatype, H5T_STR_NULLTERM) < 0) { + H5_FAILED(); + HDprintf(" couldn't set H5T_STR_NULLTERM for variable-length string type\n"); + goto done; + } +#endif + } + + if (H5Tset_cset(datatype, H5T_CSET_ASCII) < 0) { + H5_FAILED(); + HDprintf(" couldn't set string datatype character set\n"); + goto done; + } + + break; + } + +case_bitfield: + case H5T_BITFIELD: { + /* Bitfield datatype is unsupported, try again */ + goto reroll; + break; + } + +case_opaque: + case H5T_OPAQUE: { + /* Opaque datatype is unsupported, try again */ + goto reroll; + break; + } + +case_compound: + case H5T_COMPOUND: { + size_t num_members; + size_t next_offset = 0; + size_t compound_size = 0; + + /* Currently only allows arrays of integer, float or string. Pick another type if we + * are creating an array of something other than these. Also don't allow recursion + * to go too deep. Pick another type that doesn't recursively call this function. */ + if (H5T_ARRAY == parent_class || depth > TYPE_GEN_RECURSION_MAX_DEPTH) + goto reroll; + + if ((datatype = H5Tcreate(H5T_COMPOUND, 1)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create compound datatype\n"); + goto done; + } + + num_members = (size_t)(rand() % COMPOUND_TYPE_MAX_MEMBERS + 1); + + for (i = 0; i < num_members; i++) { + size_t member_size; + char member_name[256]; + + HDsnprintf(member_name, 256, "compound_member%zu", i); + + if ((compound_members[i] = generate_random_datatype(H5T_NO_CLASS, is_compact)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create compound datatype member %zu\n", i); + goto done; + } + + if (!(member_size = H5Tget_size(compound_members[i]))) { + H5_FAILED(); + HDprintf(" couldn't get compound member %zu size\n", i); + goto done; + } + + compound_size += member_size; + + if (H5Tset_size(datatype, compound_size) < 0) { + H5_FAILED(); + HDprintf(" couldn't set size for compound datatype\n"); + goto done; + } + + if (H5Tinsert(datatype, member_name, next_offset, compound_members[i]) < 0) { + H5_FAILED(); + HDprintf(" couldn't insert compound datatype member %zu\n", i); + goto done; + } + + next_offset += member_size; + } + + break; + } + +case_reference: + case H5T_REFERENCE: { + /* Temporarily disable generation of reference datatypes */ + goto reroll; + + /* Currently only allows arrays of integer, float or string. Pick another type if we + * are creating an array of something other than these. */ + if (H5T_ARRAY == parent_class) + goto reroll; + + if (0 == (rand() % 2)) { + if ((datatype = H5Tcopy(H5T_STD_REF_OBJ)) < 0) { + H5_FAILED(); + HDprintf(" couldn't copy object reference datatype\n"); + goto done; + } + } + else { + /* Region references are currently unsupported */ + goto reroll; + + if ((datatype = H5Tcopy(H5T_STD_REF_DSETREG)) < 0) { + H5_FAILED(); + HDprintf(" couldn't copy region reference datatype\n"); + goto done; + } + } + + break; + } + +case_enum: + case H5T_ENUM: { + /* Currently doesn't currently support ARRAY of ENUM, so try another type + * if this happens. */ + if (H5T_ARRAY == parent_class) + goto reroll; + + if ((datatype = H5Tenum_create(H5T_NATIVE_INT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create enum datatype\n"); + goto done; + } + + for (i = 0; i < (size_t)(rand() % ENUM_TYPE_MAX_MEMBERS + 1); i++) { + char name[ENUM_TYPE_MAX_MEMBER_NAME_LENGTH]; + int value = rand(); + + HDsnprintf(name, ENUM_TYPE_MAX_MEMBER_NAME_LENGTH, "enum_val%zu", i); + + if (H5Tenum_insert(datatype, name, &value) < 0) { + H5_FAILED(); + HDprintf(" couldn't insert member into enum datatype\n"); + goto done; + } + } + + break; + } + +case_vlen: + case H5T_VLEN: { + /* Variable-length datatypes are unsupported, try again */ + goto reroll; + break; + } + +case_array: + case H5T_ARRAY: { + unsigned ndims; + hid_t base_datatype = H5I_INVALID_HID; + + /* Currently doesn't currently support ARRAY of ARRAY, so try another type + * if this happens. Also check for too much recursion. */ + if (H5T_ARRAY == parent_class || depth > TYPE_GEN_RECURSION_MAX_DEPTH) + goto reroll; + + ndims = (unsigned)(rand() % ARRAY_TYPE_MAX_DIMS + 1); + + if (NULL == (array_dims = (hsize_t *)HDmalloc(ndims * sizeof(*array_dims)))) + goto done; + + for (i = 0; i < ndims; i++) + array_dims[i] = (hsize_t)(rand() % MAX_DIM_SIZE + 1); + + if ((base_datatype = generate_random_datatype(H5T_ARRAY, is_compact)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create array base datatype\n"); + goto done; + } + + if ((datatype = H5Tarray_create2(base_datatype, ndims, array_dims)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create array datatype\n"); + goto done; + } + + break; + } + + default: + H5_FAILED(); + HDprintf(" invalid datatype class\n"); + break; + } /* end if */ + +done: + if (depth > 0) + depth--; + + if (datatype < 0) { + for (i = 0; i < COMPOUND_TYPE_MAX_MEMBERS; i++) { + if (compound_members[i] > 0 && H5Tclose(compound_members[i]) < 0) { + H5_FAILED(); + HDprintf(" couldn't close compound member %zu\n", i); + } + } + } + + if (array_dims) { + HDfree(array_dims); + array_dims = NULL; + } + + if (is_compact && (depth == 0)) { + size_t type_size; + + /* + * Check to make sure that the generated datatype does + * not exceed the maximum compact datatype size if a + * compact datatype was requested. + */ + if (0 == (type_size = H5Tget_size(datatype))) { + H5_FAILED(); + HDprintf(" failed to retrieve datatype's size\n"); + H5Tclose(datatype); + datatype = H5I_INVALID_HID; + } + else { + if (type_size > COMPACT_DATATYPE_MAX_SIZE) { + /* + * Generate a new datatype. + */ + H5Tclose(datatype); + datatype = H5I_INVALID_HID; + goto reroll; + } + } + } + + return datatype; + +reroll: + if (depth > 0) + depth--; + + /* + * The datatype generation resulted in a datatype that is currently invalid + * for these tests, try again. + */ + switch (rand() % H5T_NCLASSES) { + case H5T_INTEGER: + goto case_integer; + case H5T_FLOAT: + goto case_float; + case H5T_TIME: + goto case_time; + case H5T_STRING: + goto case_string; + case H5T_BITFIELD: + goto case_bitfield; + case H5T_OPAQUE: + goto case_opaque; + case H5T_COMPOUND: + goto case_compound; + case H5T_REFERENCE: + goto case_reference; + case H5T_ENUM: + goto case_enum; + case H5T_VLEN: + goto case_vlen; + case H5T_ARRAY: + goto case_array; + default: + H5_FAILED(); + HDprintf(" invalid value for goto\n"); + break; + } + + return H5I_INVALID_HID; +} + +/* + * Helper function to generate a random HDF5 dataspace in order to thoroughly + * test support for dataspaces. + */ +hid_t +generate_random_dataspace(int rank, const hsize_t *max_dims, hsize_t *dims_out, hbool_t is_compact) +{ + hsize_t dataspace_dims[H5S_MAX_RANK]; + size_t i; + hid_t dataspace_id = H5I_INVALID_HID; + + if (rank < 0) + TEST_ERROR; + if (is_compact && (rank > COMPACT_SPACE_MAX_DIMS)) { + HDprintf(" current rank of compact dataspace (%lld) exceeds maximum dimensionality (%lld)\n", + (long long)rank, (long long)COMPACT_SPACE_MAX_DIMS); + TEST_ERROR; + } + + /* + * XXX: if max_dims is specified, make sure that the dimensions generated + * are not larger than this. + */ + for (i = 0; i < (size_t)rank; i++) { + if (is_compact) + dataspace_dims[i] = (hsize_t)(rand() % COMPACT_SPACE_MAX_DIM_SIZE + 1); + else + dataspace_dims[i] = (hsize_t)(rand() % MAX_DIM_SIZE + 1); + + if (dims_out) + dims_out[i] = dataspace_dims[i]; + } + + if ((dataspace_id = H5Screate_simple(rank, dataspace_dims, max_dims)) < 0) + TEST_ERROR; + + return dataspace_id; + +error: + return H5I_INVALID_HID; +} + +int +create_test_container(char *filename, uint64_t vol_cap_flags) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + + if (!(vol_cap_flags & H5VL_CAP_FLAG_FILE_BASIC)) { + HDprintf(" VOL connector doesn't support file creation\n"); + goto error; + } + + if ((file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + HDprintf(" couldn't create testing container file '%s'\n", filename); + goto error; + } + + if (vol_cap_flags & H5VL_CAP_FLAG_GROUP_BASIC) { + /* Create container groups for each of the test interfaces + * (group, attribute, dataset, etc.). + */ + if ((group_id = H5Gcreate2(file_id, GROUP_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) >= + 0) { + HDprintf(" created container group for Group tests\n"); + H5Gclose(group_id); + } + + if ((group_id = H5Gcreate2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) >= 0) { + HDprintf(" created container group for Attribute tests\n"); + H5Gclose(group_id); + } + + if ((group_id = + H5Gcreate2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) >= 0) { + HDprintf(" created container group for Dataset tests\n"); + H5Gclose(group_id); + } + + if ((group_id = + H5Gcreate2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) >= 0) { + HDprintf(" created container group for Datatype tests\n"); + H5Gclose(group_id); + } + + if ((group_id = H5Gcreate2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) >= + 0) { + HDprintf(" created container group for Link tests\n"); + H5Gclose(group_id); + } + + if ((group_id = H5Gcreate2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) >= + 0) { + HDprintf(" created container group for Object tests\n"); + H5Gclose(group_id); + } + + if ((group_id = H5Gcreate2(file_id, MISCELLANEOUS_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) >= 0) { + HDprintf(" created container group for Miscellaneous tests\n"); + H5Gclose(group_id); + } + } + + if (H5Fclose(file_id) < 0) { + HDprintf(" failed to close testing container\n"); + goto error; + } + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return -1; +} + +/* + * Add a prefix to the given filename. The caller + * is responsible for freeing the returned filename + * pointer with HDfree(). + */ +herr_t +prefix_filename(const char *prefix, const char *filename, char **filename_out) +{ + char *out_buf = NULL; + herr_t ret_value = SUCCEED; + + if (!prefix) { + HDprintf(" invalid file prefix\n"); + ret_value = FAIL; + goto done; + } + if (!filename || (*filename == '\0')) { + HDprintf(" invalid filename\n"); + ret_value = FAIL; + goto done; + } + if (!filename_out) { + HDprintf(" invalid filename_out buffer\n"); + ret_value = FAIL; + goto done; + } + + if (NULL == (out_buf = HDmalloc(H5_API_TEST_FILENAME_MAX_LENGTH))) { + HDprintf(" couldn't allocated filename buffer\n"); + ret_value = FAIL; + goto done; + } + + HDsnprintf(out_buf, H5_API_TEST_FILENAME_MAX_LENGTH, "%s%s", prefix, filename); + + *filename_out = out_buf; + +done: + return ret_value; +} + +/* + * Calls H5Fdelete on the given filename. If a prefix string + * is given, adds that prefix string to the filename before + * calling H5Fdelete + */ +herr_t +remove_test_file(const char *prefix, const char *filename) +{ + const char *test_file; + char *prefixed_filename = NULL; + herr_t ret_value = SUCCEED; + + if (prefix) { + if (prefix_filename(prefix, filename, &prefixed_filename) < 0) { + HDprintf(" couldn't prefix filename\n"); + ret_value = FAIL; + goto done; + } + + test_file = prefixed_filename; + } + else + test_file = filename; + + if (H5Fdelete(test_file, H5P_DEFAULT) < 0) { + HDprintf(" couldn't remove file '%s'\n", test_file); + ret_value = FAIL; + goto done; + } + +done: + HDfree(prefixed_filename); + + return ret_value; +} diff --git a/test/API/H5_api_test_util.h b/test/API/H5_api_test_util.h new file mode 100644 index 00000000000..86b0e3ebf96 --- /dev/null +++ b/test/API/H5_api_test_util.h @@ -0,0 +1,24 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#ifndef H5_API_TEST_UTIL_H_ +#define H5_API_TEST_UTIL_H_ + +#include "hdf5.h" + +hid_t generate_random_datatype(H5T_class_t parent_class, hbool_t is_compact); +hid_t generate_random_dataspace(int rank, const hsize_t *max_dims, hsize_t *dims_out, hbool_t is_compact); +int create_test_container(char *filename, uint64_t vol_cap_flags); +herr_t prefix_filename(const char *prefix, const char *filename, char **filename_out); +herr_t remove_test_file(const char *prefix, const char *filename); + +#endif /* H5_API_TEST_UTIL_H_ */ diff --git a/test/API/H5_api_tests_disabled.h b/test/API/H5_api_tests_disabled.h new file mode 100644 index 00000000000..672d2d93b18 --- /dev/null +++ b/test/API/H5_api_tests_disabled.h @@ -0,0 +1,46 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#ifndef H5_API_TESTS_DISABLED_H +#define H5_API_TESTS_DISABLED_H + +#include "H5_api_test_config.h" + +/* Contains #defines to temporarily disable API tests based + * on problematic or unsupported functionality */ + +#define NO_LARGE_TESTS +#define NO_ATTR_FILL_VALUE_SUPPORT +#define NO_DECREASING_ALPHA_ITER_ORDER +#define NO_USER_DEFINED_LINKS +#define NO_EXTERNAL_LINKS +#define NO_ITERATION_RESTART +#define NO_FILE_MOUNTS +#define NO_CLEAR_ON_SHRINK +#define NO_DOUBLE_OBJECT_OPENS +#define NO_OBJECT_GET_NAME +#define WRONG_DATATYPE_OBJ_COUNT +#define NO_SHARED_DATATYPES +#define NO_INVALID_PROPERTY_LIST_TESTS +#define NO_MAX_LINK_CRT_ORDER_RESET +#define NO_PREVENT_HARD_LINKS_ACROSS_FILES +#define NO_SOFT_LINK_MANY_DANGLING +#define NO_ID_PREVENTS_OBJ_DELETE +#define NO_WRITE_SAME_ELEMENT_TWICE +#define NO_PREVENT_CREATE_SAME_ATTRIBUTE_TWICE +#define NO_DELETE_NONEXISTENT_ATTRIBUTE +#define NO_TRUNCATE_OPEN_FILE +#define NO_CHECK_SELECTION_BOUNDS +#define NO_VALIDATE_DATASPACE +#define NO_REFERENCE_TO_DELETED + +#endif /* H5_API_TESTS_DISABLED_H */ diff --git a/test/API/README.md b/test/API/README.md new file mode 100644 index 00000000000..d57472da04a --- /dev/null +++ b/test/API/README.md @@ -0,0 +1,86 @@ +# HDF5 API Tests + +This directory contains several test applications that exercise [HDF5](https://github.com/HDFGroup/hdf5)'s +public API and serve as regression tests for HDF5 [VOL Connectors](https://portal.hdfgroup.org/display/HDF5/Virtual+Object+Layer). + +## Build Process and options + +These HDF5 API tests are enabled and built by default, but can be disabled if desired. +The following build options are available to influence how the API tests get built: + +### CMake + +To set an option, it should be prepended with `-D` when passed to the `cmake` command. +For example, + + cmake -DHDF5_TEST_API=OFF .. + +`HDF5_TEST_API` (Default: `ON`) - Determines whether the API tests will be built. + +`HDF5_TEST_API_INSTALL` (Default: `ON`) - Determines whether the API tests should be installed +on the system. + +`HDF5_TEST_API_ENABLE_ASYNC` (Default: `OFF`) - Determines whether tests for HDF5's asynchronous +I/O capabilities should be enabled. Note that the "native" HDF5 VOL connector doesn't support +this functionality, so these tests are directed towards VOL connectors that do. + +`HDF5_TEST_ENABLE_DRIVER` (Default: `OFF`) - Determines whether the API test driver program should +be built. This driver program is useful when a VOL connector relies upon a server executable +(as well as possible additional executables) in order to function. The driver program can be +supplied with a server executable and + +`HDF5_TEST_API_SERVER` (Default: empty string) - If `HDF5_TEST_ENABLE_DRIVER` is set to `ON`, this +option should be edited to point to the server executable that the driver program should attempt +to launch before running the API tests. + +### Autotools + +Currently unsupported + +### Usage + +These API tests currently only support usage with HDF5 VOL connectors that can be loaded dynamically +as a plugin. For information on how to build a VOL connector in this manner, refer to section 2.3 of +the [HDF5 VOL Connector Author Guide](https://portal.hdfgroup.org/display/HDF5/HDF5+VOL+Connector+Authors+Guide?preview=/53610813/59903039/vol_connector_author_guide.pdf). + +TODO: section on building VOL connectors alongside HDF5 for use with tests + +These API tests can also be used to test an HDF5 VOL connector that is external to the library. +For convenience, the `HDF5_TEST_API_INSTALL` option can be used to install these tests on the +system where other HDF5 executables (such as `h5dump`) are installed. + +To run these tests with your VOL connector, set the following two environment variables: + +`HDF5_VOL_CONNECTOR` - This environment variable should be set to the name chosen for the VOL connector +to be used. For example, HDF5's DAOS VOL connector uses the name "[daos](https://github.com/HDFGroup/vol-daos/blob/v1.2.0/src/daos_vol.h#L30)" and would therefore set: + + HDF5_VOL_CONNECTOR=daos + +`HDF5_PLUGIN_PATH` - This environment variable should be set to the directory that contains the built +library for the VOL connector to be used. + +Once these are set, the HDF5 API tests will attempt to automatically load the specified VOL connector +and use it when running tests. If HDF5 is unable to locate or load the VOL connector specified, it +will fall back to running the tests with the native HDF5 VOL connector and an error similar to the +following will appear in the test output: + + HDF5-DIAG: Error detected in HDF5 (1.13.0) MPI-process 0: + #000: /home/user/git/hdf5/src/H5.c line 1010 in H5open(): library initialization failed + major: Function entry/exit + minor: Unable to initialize object + #001: /home/user/git/hdf5/src/H5.c line 277 in H5_init_library(): unable to initialize vol interface + major: Function entry/exit + minor: Unable to initialize object + #002: /home/user/git/hdf5/src/H5VLint.c line 199 in H5VL_init_phase2(): unable to set default VOL connector + major: Virtual Object Layer + minor: Can't set value + #003: /home/user/git/hdf5/src/H5VLint.c line 429 in H5VL__set_def_conn(): can't register connector + major: Virtual Object Layer + minor: Unable to register new ID + #004: /home/user/git/hdf5/src/H5VLint.c line 1321 in H5VL__register_connector_by_name(): unable to load VOL connector + major: Virtual Object Layer + minor: Unable to initialize object + +### Help and Support + +For help with building or using the HDF5 API tests, please contact the [HDF Help Desk](https://portal.hdfgroup.org/display/support/The+HDF+Help+Desk). diff --git a/test/API/driver/CMakeLists.txt b/test/API/driver/CMakeLists.txt new file mode 100644 index 00000000000..2210068dc41 --- /dev/null +++ b/test/API/driver/CMakeLists.txt @@ -0,0 +1,17 @@ +cmake_minimum_required(VERSION 2.8.12.2 FATAL_ERROR) +project(H5_API_TEST_DRIVER CXX) + +include(CheckAsan) +include(CheckUbsan) + +set(CMAKE_CXX_STANDARD 11) + +set(KWSYS_NAMESPACE h5_api_test_sys) +set(KWSYS_USE_SystemTools 1) +set(KWSYS_USE_Process 1) +set(KWSYS_USE_RegularExpression 1) +add_subdirectory(kwsys) +include_directories(${CMAKE_CURRENT_BINARY_DIR}/kwsys) + +add_executable(h5_api_test_driver h5_api_test_driver.cxx) +target_link_libraries(h5_api_test_driver h5_api_test_sys) diff --git a/test/API/driver/h5_api_test_driver.cxx b/test/API/driver/h5_api_test_driver.cxx new file mode 100644 index 00000000000..b5d982188c8 --- /dev/null +++ b/test/API/driver/h5_api_test_driver.cxx @@ -0,0 +1,910 @@ +#include "h5_api_test_driver.hxx" + +#include "H5_api_test_config.h" + +#include +#include +#include +#include +#include + +#if !defined(_WIN32) || defined(__CYGWIN__) +# include +# include +#endif + +#include +#include + +using std::vector; +using std::string; +using std::cerr; + +// The main function as this class should only be used by this program +int +main(int argc, char *argv[]) +{ + H5APITestDriver d; + return d.Main(argc, argv); +} + +//---------------------------------------------------------------------------- +H5APITestDriver::H5APITestDriver() +{ + this->ClientArgStart = 0; + this->ClientArgCount = 0; + this->ClientHelperArgStart = 0; + this->ClientHelperArgCount = 0; + this->ClientInitArgStart = 0; + this->ClientInitArgCount = 0; + this->ServerArgStart = 0; + this->ServerArgCount = 0; + this->AllowErrorInOutput = false; + // try to make sure that this times out before dart so it can kill all the processes + this->TimeOut = DART_TESTING_TIMEOUT - 10.0; + this->ServerExitTimeOut = 2; /* 2 seconds timeout for server to exit */ + this->ClientHelper = false; + this->ClientInit = false; + this->TestServer = false; + this->TestSerial = false; + this->IgnoreServerResult = false; +} + +//---------------------------------------------------------------------------- +H5APITestDriver::~H5APITestDriver() +{ +} + +//---------------------------------------------------------------------------- +void +H5APITestDriver::SeparateArguments(const char *str, vector &flags) +{ + string arg = str; + string::size_type pos1 = 0; + string::size_type pos2 = arg.find_first_of(" ;"); + if (pos2 == arg.npos) { + flags.push_back(str); + return; + } + while (pos2 != arg.npos) { + flags.push_back(arg.substr(pos1, pos2 - pos1)); + pos1 = pos2 + 1; + pos2 = arg.find_first_of(" ;", pos1 + 1); + } + flags.push_back(arg.substr(pos1, pos2 - pos1)); +} + +//---------------------------------------------------------------------------- +void +H5APITestDriver::CollectConfiguredOptions() +{ + if (this->TimeOut < 0) + this->TimeOut = 1500; + +#ifdef H5_API_TEST_ENV_VARS + this->SeparateArguments(H5_API_TEST_ENV_VARS, this->ClientEnvVars); +#endif + + // now find all the mpi information if mpi run is set +#ifdef MPIEXEC_EXECUTABLE + this->MPIRun = MPIEXEC_EXECUTABLE; +#else + return; +#endif + int maxNumProc = 1; + +# ifdef MPIEXEC_MAX_NUMPROCS + if (!this->TestSerial) + maxNumProc = MPIEXEC_MAX_NUMPROCS; +# endif +# ifdef MPIEXEC_NUMPROC_FLAG + this->MPINumProcessFlag = MPIEXEC_NUMPROC_FLAG; +# endif +# ifdef MPIEXEC_PREFLAGS + this->SeparateArguments(MPIEXEC_PREFLAGS, this->MPIClientPreFlags); +# endif +# ifdef MPIEXEC_POSTFLAGS + this->SeparateArguments(MPIEXEC_POSTFLAGS, this->MPIClientPostFlags); +# endif +# ifdef MPIEXEC_SERVER_PREFLAGS + this->SeparateArguments(MPIEXEC_SERVER_PREFLAGS, this->MPIServerPreFlags); +#else + this->MPIServerPreFlags = this->MPIClientPreFlags; +# endif +# ifdef MPIEXEC_SERVER_POSTFLAGS + this->SeparateArguments(MPIEXEC_SERVER_POSTFLAGS, this->MPIServerPostFlags); +#else + this->MPIServerPostFlags = this->MPIClientPostFlags; +# endif + std::stringstream ss; + ss << maxNumProc; + this->MPIServerNumProcessFlag = "1"; + this->MPIClientNumProcessFlag = ss.str(); +} + +//---------------------------------------------------------------------------- +/// This adds the debug/build configuration crap for the executable on windows. +static string +FixExecutablePath(const string &path) +{ +#ifdef CMAKE_INTDIR + string parent_dir = + h5_api_test_sys::SystemTools::GetFilenamePath(path.c_str()); + + string filename = + h5_api_test_sys::SystemTools::GetFilenameName(path); + + if (!h5_api_test_sys::SystemTools::StringEndsWith(parent_dir.c_str(), CMAKE_INTDIR)) { + parent_dir += "/" CMAKE_INTDIR; + } + return parent_dir + "/" + filename; +#endif + + return path; +} + +//---------------------------------------------------------------------------- +int +H5APITestDriver::ProcessCommandLine(int argc, char *argv[]) +{ + int *ArgCountP = NULL; + int i; + for (i = 1; i < argc; ++i) { + if (strcmp(argv[i], "--client") == 0) { + this->ClientExecutable = ::FixExecutablePath(argv[i + 1]); + ++i; /* Skip executable */ + this->ClientArgStart = i + 1; + this->ClientArgCount = this->ClientArgStart; + ArgCountP = &this->ClientArgCount; + continue; + } + if (strcmp(argv[i], "--client-helper") == 0) { + std::cerr << "Client Helper" << std::endl; + this->ClientHelper = true; + this->ClientHelperExecutable = ::FixExecutablePath(argv[i + 1]); + ++i; /* Skip executable */ + this->ClientHelperArgStart = i + 1; + this->ClientHelperArgCount = this->ClientHelperArgStart; + ArgCountP = &this->ClientHelperArgCount; + continue; + } + if (strcmp(argv[i], "--client-init") == 0) { + std::cerr << "Client Init" << std::endl; + this->ClientInit = true; + this->ClientInitExecutable = ::FixExecutablePath(argv[i + 1]); + ++i; /* Skip executable */ + this->ClientInitArgStart = i + 1; + this->ClientInitArgCount = this->ClientInitArgStart; + ArgCountP = &this->ClientInitArgCount; + continue; + } + if (strcmp(argv[i], "--server") == 0) { + std::cerr << "Test Server" << std::endl; + this->TestServer = true; + this->ServerExecutable = ::FixExecutablePath(argv[i + 1]); + ++i; /* Skip executable */ + this->ServerArgStart = i + 1; + this->ServerArgCount = this->ServerArgStart; + ArgCountP = &this->ServerArgCount; + continue; + } + if (strcmp(argv[i], "--timeout") == 0) { + this->TimeOut = atoi(argv[i + 1]); + std::cerr << "The timeout was set to " << this->TimeOut << std::endl; + ArgCountP = NULL; + continue; + } + if (strncmp(argv[i], "--allow-errors", strlen("--allow-errors")) == 0) { + this->AllowErrorInOutput = true; + std::cerr << "The allow errors in output flag was set to " << + this->AllowErrorInOutput << std::endl; + ArgCountP = NULL; + continue; + } + if (strncmp(argv[i], "--allow-server-errors", strlen("--allow-server-errors")) == 0) { + this->IgnoreServerResult = true; + std::cerr << "The allow server errors in output flag was set to " << + this->IgnoreServerResult << std::endl; + ArgCountP = NULL; + continue; + } + if (strcmp(argv[i], "--serial") == 0) { + this->TestSerial = true; + std::cerr << "This is a serial test" << std::endl; + ArgCountP = NULL; + continue; + } + if (ArgCountP) + (*ArgCountP)++; + } + + return 1; +} + +//---------------------------------------------------------------------------- +void +H5APITestDriver::CreateCommandLine(vector &commandLine, + const char *cmd, int isServer, int isHelper, const char *numProc, int argStart, + int argCount, char *argv[]) +{ + if (!isServer && this->ClientEnvVars.size()) { + for (unsigned int i = 0; i < this->ClientEnvVars.size(); ++i) + commandLine.push_back(this->ClientEnvVars[i].c_str()); +#ifdef H5_API_TEST_CLIENT_INIT_TOKEN_VAR + if (this->ClientTokenVar.size()) + commandLine.push_back(this->ClientTokenVar.c_str()); +#endif + } + + if (!isHelper && this->MPIRun.size()) { + commandLine.push_back(this->MPIRun.c_str()); + commandLine.push_back(this->MPINumProcessFlag.c_str()); + commandLine.push_back(numProc); + + if (isServer) + for (unsigned int i = 0; i < this->MPIServerPreFlags.size(); ++i) + commandLine.push_back(this->MPIServerPreFlags[i].c_str()); + else + for (unsigned int i = 0; i < this->MPIClientPreFlags.size(); ++i) + commandLine.push_back(this->MPIClientPreFlags[i].c_str()); + } + + commandLine.push_back(cmd); + + if (isServer) + for (unsigned int i = 0; i < this->MPIServerPostFlags.size(); ++i) + commandLine.push_back(MPIServerPostFlags[i].c_str()); + else + for (unsigned int i = 0; i < this->MPIClientPostFlags.size(); ++i) + commandLine.push_back(MPIClientPostFlags[i].c_str()); + + // remaining flags for the test + for (int ii = argStart; ii < argCount; ++ii) { + commandLine.push_back(argv[ii]); + } + + commandLine.push_back(0); +} + +//---------------------------------------------------------------------------- +int +H5APITestDriver::StartServer(h5_api_test_sysProcess *server, const char *name, + vector &out, vector &err) +{ + if (!server) + return 1; + + cerr << "H5APITestDriver: starting process " << name << "\n"; + h5_api_test_sysProcess_SetTimeout(server, this->TimeOut); + h5_api_test_sysProcess_Execute(server); + int foundWaiting = 0; + string output; + while (!foundWaiting) { + int pipe = this->WaitForAndPrintLine(name, server, output, 100.0, out, + err, H5_API_TEST_SERVER_START_MSG, &foundWaiting); + if (pipe == h5_api_test_sysProcess_Pipe_None + || pipe == h5_api_test_sysProcess_Pipe_Timeout) { + break; + } + } + if (foundWaiting) { + cerr << "H5APITestDriver: " << name << " successfully started.\n"; + return 1; + } else { + cerr << "H5APITestDriver: " << name << " never started.\n"; + h5_api_test_sysProcess_Kill(server); + return 0; + } +} + +//---------------------------------------------------------------------------- +int +H5APITestDriver::StartClientHelper(h5_api_test_sysProcess *client, + const char *name, vector &out, vector &err) +{ + if (!client) + return 1; + + cerr << "H5APITestDriver: starting process " << name << "\n"; + h5_api_test_sysProcess_SetTimeout(client, this->TimeOut); + h5_api_test_sysProcess_Execute(client); + int foundWaiting = 0; + string output; + while (!foundWaiting) { + int pipe = this->WaitForAndPrintLine(name, client, output, 100.0, out, + err, H5_API_TEST_CLIENT_HELPER_START_MSG, &foundWaiting); + if (pipe == h5_api_test_sysProcess_Pipe_None + || pipe == h5_api_test_sysProcess_Pipe_Timeout) { + break; + } + } + if (foundWaiting) { + cerr << "H5APITestDriver: " << name << " successfully started.\n"; + return 1; + } else { + cerr << "H5APITestDriver: " << name << " never started.\n"; + h5_api_test_sysProcess_Kill(client); + return 0; + } +} + +//---------------------------------------------------------------------------- +int +H5APITestDriver::StartClientInit(h5_api_test_sysProcess *client, + const char *name, vector &out, vector &err) +{ + if (!client) + return 1; + + cerr << "H5APITestDriver: starting process " << name << "\n"; + h5_api_test_sysProcess_SetTimeout(client, this->TimeOut); + h5_api_test_sysProcess_Execute(client); + int foundToken = 0; + string output, token; + while (!foundToken) { + int pipe = this->WaitForAndPrintLine(name, client, output, 100.0, out, + err, NULL, NULL); + if (pipe == h5_api_test_sysProcess_Pipe_None + || pipe == h5_api_test_sysProcess_Pipe_Timeout) { + break; + } + if (this->OutputStringHasToken(name, H5_API_TEST_CLIENT_INIT_TOKEN_REGEX, output, token)) { + foundToken = 1; + this->ClientTokenVar = std::string(H5_API_TEST_CLIENT_INIT_TOKEN_VAR) + + std::string("=") + std::string(token); + break; + } + } + + if (foundToken) { + cerr << "H5APITestDriver: " << name << " token: " << token << " was found.\n"; + return 1; + } else { + cerr << "H5APITestDriver: " << name << " token was not found.\n"; + return 0; + } +} + +//---------------------------------------------------------------------------- +int +H5APITestDriver::StartClient(h5_api_test_sysProcess *client, const char *name) +{ + if (!client) + return 1; + + cerr << "H5APITestDriver: starting process " << name << "\n"; + h5_api_test_sysProcess_SetTimeout(client, this->TimeOut); + h5_api_test_sysProcess_Execute(client); + if (h5_api_test_sysProcess_GetState(client) + == h5_api_test_sysProcess_State_Executing) { + cerr << "H5APITestDriver: " << name << " successfully started.\n"; + return 1; + } else { + this->ReportStatus(client, name); + h5_api_test_sysProcess_Kill(client); + return 0; + } +} + +//---------------------------------------------------------------------------- +void +H5APITestDriver::Stop(h5_api_test_sysProcess *p, const char *name) +{ + if (p) { + cerr << "H5APITestDriver: killing process " << name << "\n"; + h5_api_test_sysProcess_Kill(p); + h5_api_test_sysProcess_WaitForExit(p, 0); + } +} + +//---------------------------------------------------------------------------- +int +H5APITestDriver::OutputStringHasError(const char *pname, string &output) +{ + const char* possibleMPIErrors[] = {"error", "Error", "Missing:", + "core dumped", "process in local group is dead", "Segmentation fault", + "erroneous", "ERROR:", "Error:", + "mpirun can *only* be used with MPI programs", "due to signal", + "failure", "abnormal termination", "failed", "FAILED", "Failed", 0}; + + const char* nonErrors[] = { + "Memcheck, a memory error detector", //valgrind + 0}; + + if (this->AllowErrorInOutput) + return 0; + + vector lines; + vector::iterator it; + h5_api_test_sys::SystemTools::Split(output.c_str(), lines); + + int i, j; + + for (it = lines.begin(); it != lines.end(); ++it) { + for (i = 0; possibleMPIErrors[i]; ++i) { + if (it->find(possibleMPIErrors[i]) != it->npos) { + int found = 1; + for (j = 0; nonErrors[j]; ++j) { + if (it->find(nonErrors[j]) != it->npos) { + found = 0; + cerr << "Non error \"" << it->c_str() + << "\" suppressed " << std::endl; + } + } + if (found) { + cerr + << "H5APITestDriver: ***** Test will fail, because the string: \"" + << possibleMPIErrors[i] + << "\"\nH5APITestDriver: ***** was found in the following output from the " + << pname << ":\n\"" << it->c_str() << "\"\n"; + return 1; + } + } + } + } + return 0; +} + +//---------------------------------------------------------------------------- +int +H5APITestDriver::OutputStringHasToken(const char *pname, const char *regex, + string &output, string &token) +{ + vector lines; + vector::iterator it; + h5_api_test_sys::SystemTools::Split(output.c_str(), lines); + h5_api_test_sys::RegularExpression re(regex); + + for (it = lines.begin(); it != lines.end(); ++it) { + if (re.find(*it)) { + token = re.match(1); + return 1; + } + } + + return 0; +} + +//---------------------------------------------------------------------------- +#define H5_API_CLEAN_PROCESSES do { \ + h5_api_test_sysProcess_Delete(client); \ + h5_api_test_sysProcess_Delete(client_helper); \ + h5_api_test_sysProcess_Delete(client_init); \ + h5_api_test_sysProcess_Delete(server); \ +} while (0) + +#define H5_API_EXECUTE_CMD(cmd) do { \ + if (strlen(cmd) > 0) { \ + std::vector commands = \ + h5_api_test_sys::SystemTools::SplitString(cmd, ';'); \ + for (unsigned int cc = 0; cc < commands.size(); cc++) { \ + std::string command = commands[cc]; \ + if (command.size() > 0) { \ + std::cout << command.c_str() << std::endl; \ + system(command.c_str()); \ + } \ + } \ + } \ +} while (0) + +//---------------------------------------------------------------------------- +int +H5APITestDriver::Main(int argc, char* argv[]) +{ +#ifdef H5_API_TEST_INIT_COMMAND + // run user-specified commands before initialization. + // For example: "killall -9 rsh test;" + H5_API_EXECUTE_CMD(H5_API_TEST_INIT_COMMAND); +#endif + + if (!this->ProcessCommandLine(argc, argv)) + return 1; + this->CollectConfiguredOptions(); + + // mpi code + // Allocate process managers. + h5_api_test_sysProcess *server = 0; + h5_api_test_sysProcess *client = 0; + h5_api_test_sysProcess *client_helper = 0; + h5_api_test_sysProcess *client_init = 0; + + if (this->TestServer) { + server = h5_api_test_sysProcess_New(); + if (!server) { + H5_API_CLEAN_PROCESSES; + cerr << "H5APITestDriver: Cannot allocate h5_api_test_sysProcess to " + "run the server.\n"; + return 1; + } + } + if (this->ClientHelper) { + client_helper = h5_api_test_sysProcess_New(); + if (!client_helper) { + H5API_CLEAN_PROCESSES; + cerr << "H5APITestDriver: Cannot allocate h5_api_test_sysProcess to " + "run the client helper.\n"; + return 1; + } + } + if (this->ClientInit) { + client_init = h5_api_test_sysProcess_New(); + if (!client_init) { + H5_API_CLEAN_PROCESSES; + cerr << "H5APITestDriver: Cannot allocate h5_api_test_sysProcess to " + "run the client init.\n"; + return 1; + } + } + client = h5_api_test_sysProcess_New(); + if (!client) { + H5_API_CLEAN_PROCESSES; + cerr << "H5APITestDriver: Cannot allocate h5_api_test_sysProcess to " + "run the client.\n"; + return 1; + } + + vector ClientStdOut; + vector ClientStdErr; + vector ClientHelperStdOut; + vector ClientHelperStdErr; + vector ClientInitStdOut; + vector ClientInitStdErr; + vector ServerStdOut; + vector ServerStdErr; + + vector serverCommand; + if (server) { + const char* serverExe = this->ServerExecutable.c_str(); + + this->CreateCommandLine(serverCommand, serverExe, 1, 0, + this->MPIServerNumProcessFlag.c_str(), this->ServerArgStart, + this->ServerArgCount, argv); + this->ReportCommand(&serverCommand[0], "server"); + h5_api_test_sysProcess_SetCommand(server, &serverCommand[0]); + h5_api_test_sysProcess_SetWorkingDirectory(server, + this->GetDirectory(serverExe).c_str()); + } + + vector clientHelperCommand; + if (client_helper) { + // Construct the client helper process command line. + const char *clientHelperExe = this->ClientHelperExecutable.c_str(); + this->CreateCommandLine(clientHelperCommand, clientHelperExe, 0, 1, + "1", this->ClientHelperArgStart, + this->ClientHelperArgCount, argv); + this->ReportCommand(&clientHelperCommand[0], "client_helper"); + h5_api_test_sysProcess_SetCommand(client_helper, &clientHelperCommand[0]); + h5_api_test_sysProcess_SetWorkingDirectory(client_helper, + this->GetDirectory(clientHelperExe).c_str()); + } + + vector clientInitCommand; + if (client_init) { + // Construct the client helper process command line. + const char *clientInitExe = this->ClientInitExecutable.c_str(); + this->CreateCommandLine(clientInitCommand, clientInitExe, 0, 1, + "1", this->ClientInitArgStart, this->ClientInitArgCount, argv); + this->ReportCommand(&clientInitCommand[0], "client_init"); + h5_api_test_sysProcess_SetCommand(client_init, &clientInitCommand[0]); + h5_api_test_sysProcess_SetWorkingDirectory(client_init, + this->GetDirectory(clientInitExe).c_str()); + } + + // Start the server if there is one + if (!this->StartServer(server, "server", ServerStdOut, ServerStdErr)) { + cerr << "H5APITestDriver: Server never started.\n"; + H5_API_CLEAN_PROCESSES; + return -1; + } + + // Start the client helper here if there is one + if (!this->StartClientHelper(client_helper, "client_helper", + ClientHelperStdOut, ClientHelperStdErr)) { + cerr << "H5APITestDriver: Client Helper never started.\n"; + this->Stop(server, "server"); +#ifdef H5_API_TEST_SERVER_EXIT_COMMAND + H5_API_EXECUTE_CMD(H5_API_TEST_SERVER_EXIT_COMMAND); +#endif + H5_API_CLEAN_PROCESSES; + return -1; + } + + // Start the client init here if there is one + if (!this->StartClientInit(client_init, "client_init", + ClientInitStdOut, ClientInitStdErr)) { + cerr << "H5APITestDriver: Client Init never started.\n"; + this->Stop(server, "server"); +#ifdef H5_API_TEST_SERVER_EXIT_COMMAND + H5_API_EXECUTE_CMD(H5_API_TEST_SERVER_EXIT_COMMAND); +#endif + this->Stop(client_helper, "client_helper"); +#ifdef H5_API_TEST_CLIENT_HELPER_EXIT_COMMAND + H5_API_EXECUTE_CMD(H5_API_TEST_CLIENT_HELPER_EXIT_COMMAND); +#endif + H5_API_CLEAN_PROCESSES; + return -1; + } + + // Construct the client process command line. + vector clientCommand; + const char *clientExe = this->ClientExecutable.c_str(); + this->CreateCommandLine(clientCommand, clientExe, 0, 0, + this->MPIClientNumProcessFlag.c_str(), this->ClientArgStart, + this->ClientArgCount, argv); + this->ReportCommand(&clientCommand[0], "client"); + h5_api_test_sysProcess_SetCommand(client, &clientCommand[0]); + h5_api_test_sysProcess_SetWorkingDirectory(client, + this->GetDirectory(clientExe).c_str()); + + // Now run the client + if (!this->StartClient(client, "client")) { + this->Stop(server, "server"); + this->Stop(client_helper, "client_helper"); + this->Stop(client_init, "client_init"); + H5_API_CLEAN_PROCESSES; + return -1; + } + + // Report the output of the processes. + int clientPipe = 1; + + string output; + int mpiError = 0; + while (clientPipe) { + clientPipe = this->WaitForAndPrintLine("client", client, output, 0.1, + ClientStdOut, ClientStdErr, NULL, NULL); + if (!mpiError && this->OutputStringHasError("client", output)) { + mpiError = 1; + } + // If client has died, we wait for output from the server processes + // for this->ServerExitTimeOut, then we'll kill the servers, if needed. + double timeout = (clientPipe) ? 0 : this->ServerExitTimeOut; + output = ""; + this->WaitForAndPrintLine("server", server, output, timeout, + ServerStdOut, ServerStdErr, NULL, NULL); + if (!mpiError && this->OutputStringHasError("server", output)) { + mpiError = 1; + } + output = ""; + } + + // Wait for the client and server to exit. + h5_api_test_sysProcess_WaitForExit(client, 0); + + // Once client is finished, the servers + // must finish quickly. If not, it usually is a sign that + // the client crashed/exited before it attempted to connect to + // the server. + if (server) { +#ifdef H5_API_TEST_SERVER_EXIT_COMMAND + H5_API_EXECUTE_CMD(H5_API_TEST_SERVER_EXIT_COMMAND); +#endif + h5_api_test_sysProcess_WaitForExit(server, &this->ServerExitTimeOut); + } + + if (client_helper) { +#ifdef H5_API_TEST_CLIENT_HELPER_EXIT_COMMAND + H5_API_EXECUTE_CMD(H5_API_TEST_CLIENT_HELPER_EXIT_COMMAND); +#endif + h5_api_test_sysProcess_WaitForExit(client_helper, 0); + } + + // Get the results. + int clientResult = this->ReportStatus(client, "client"); + int serverResult = 0; + if (server) { + serverResult = this->ReportStatus(server, "server"); + h5_api_test_sysProcess_Kill(server); + } + + // Free process managers. + H5_API_CLEAN_PROCESSES; + + // Report the server return code if it is nonzero. Otherwise report + // the client return code. + if (serverResult && !this->IgnoreServerResult) + return serverResult; + + if (mpiError) { + cerr + << "H5VLTestDriver: Error string found in output, H5APITestDriver returning " + << mpiError << "\n"; + return mpiError; + } + + // if server is fine return the client result + return clientResult; +} + +//---------------------------------------------------------------------------- +void +H5APITestDriver::ReportCommand(const char * const *command, const char *name) +{ + cerr << "H5APITestDriver: " << name << " command is:\n"; + for (const char * const *c = command; *c; ++c) + cerr << " \"" << *c << "\""; + cerr << "\n"; +} + +//---------------------------------------------------------------------------- +int +H5APITestDriver::ReportStatus(h5_api_test_sysProcess *process, const char *name) +{ + int result = 1; + switch (h5_api_test_sysProcess_GetState(process)) { + case h5_api_test_sysProcess_State_Starting: { + cerr << "H5APITestDriver: Never started " << name << " process.\n"; + } + break; + case h5_api_test_sysProcess_State_Error: { + cerr << "H5APITestDriver: Error executing " << name << " process: " + << h5_api_test_sysProcess_GetErrorString(process) << "\n"; + } + break; + case h5_api_test_sysProcess_State_Exception: { + cerr << "H5APITestDriver: " << name + << " process exited with an exception: "; + switch (h5_api_test_sysProcess_GetExitException(process)) { + case h5_api_test_sysProcess_Exception_None: { + cerr << "None"; + } + break; + case h5_api_test_sysProcess_Exception_Fault: { + cerr << "Segmentation fault"; + } + break; + case h5_api_test_sysProcess_Exception_Illegal: { + cerr << "Illegal instruction"; + } + break; + case h5_api_test_sysProcess_Exception_Interrupt: { + cerr << "Interrupted by user"; + } + break; + case h5_api_test_sysProcess_Exception_Numerical: { + cerr << "Numerical exception"; + } + break; + case h5_api_test_sysProcess_Exception_Other: { + cerr << "Unknown"; + } + break; + } + cerr << "\n"; + } + break; + case h5_api_test_sysProcess_State_Executing: { + cerr << "H5APITestDriver: Never terminated " << name + << " process.\n"; + } + break; + case h5_api_test_sysProcess_State_Exited: { + result = h5_api_test_sysProcess_GetExitValue(process); + cerr << "H5APITestDriver: " << name << " process exited with code " + << result << "\n"; + } + break; + case h5_api_test_sysProcess_State_Expired: { + cerr << "H5APITestDriver: killed " << name + << " process due to timeout.\n"; + } + break; + case h5_api_test_sysProcess_State_Killed: { + cerr << "H5APITestDriver: killed " << name << " process.\n"; + } + break; + } + return result; +} + +//---------------------------------------------------------------------------- +int +H5APITestDriver::WaitForLine(h5_api_test_sysProcess *process, string &line, + double timeout, vector &out, vector &err) +{ + line = ""; + vector::iterator outiter = out.begin(); + vector::iterator erriter = err.begin(); + while (1) { + // Check for a newline in stdout. + for (; outiter != out.end(); ++outiter) { + if ((*outiter == '\r') && ((outiter + 1) == out.end())) { + break; + } else if (*outiter == '\n' || *outiter == '\0') { + int length = outiter - out.begin(); + if (length > 1 && *(outiter - 1) == '\r') + --length; + if (length > 0) + line.append(&out[0], length); + out.erase(out.begin(), outiter + 1); + return h5_api_test_sysProcess_Pipe_STDOUT; + } + } + + // Check for a newline in stderr. + for (; erriter != err.end(); ++erriter) { + if ((*erriter == '\r') && ((erriter + 1) == err.end())) { + break; + } else if (*erriter == '\n' || *erriter == '\0') { + int length = erriter - err.begin(); + if (length > 1 && *(erriter - 1) == '\r') + --length; + if (length > 0) + line.append(&err[0], length); + err.erase(err.begin(), erriter + 1); + return h5_api_test_sysProcess_Pipe_STDERR; + } + } + + // No newlines found. Wait for more data from the process. + int length; + char *data; + int pipe = h5_api_test_sysProcess_WaitForData(process, &data, &length, + &timeout); + if (pipe == h5_api_test_sysProcess_Pipe_Timeout) { + // Timeout has been exceeded. + return pipe; + } else if (pipe == h5_api_test_sysProcess_Pipe_STDOUT) { + // Append to the stdout buffer. + vector::size_type size = out.size(); + out.insert(out.end(), data, data + length); + outiter = out.begin() + size; + } else if (pipe == h5_api_test_sysProcess_Pipe_STDERR) { + // Append to the stderr buffer. + vector::size_type size = err.size(); + err.insert(err.end(), data, data + length); + erriter = err.begin() + size; + } else if (pipe == h5_api_test_sysProcess_Pipe_None) { + // Both stdout and stderr pipes have broken. Return leftover data. + if (!out.empty()) { + line.append(&out[0], outiter - out.begin()); + out.erase(out.begin(), out.end()); + return h5_api_test_sysProcess_Pipe_STDOUT; + } else if (!err.empty()) { + line.append(&err[0], erriter - err.begin()); + err.erase(err.begin(), err.end()); + return h5_api_test_sysProcess_Pipe_STDERR; + } else { + return h5_api_test_sysProcess_Pipe_None; + } + } + } +} + +//---------------------------------------------------------------------------- +void +H5APITestDriver::PrintLine(const char *pname, const char *line) +{ + // if the name changed then the line is output from a different process + if (this->CurrentPrintLineName != pname) { + cerr << "-------------- " << pname << " output --------------\n"; + // save the current pname + this->CurrentPrintLineName = pname; + } + cerr << line << "\n"; + cerr.flush(); +} + +//---------------------------------------------------------------------------- +int +H5APITestDriver::WaitForAndPrintLine(const char *pname, + h5_api_test_sysProcess *process, string &line, double timeout, + vector &out, vector &err, const char *waitMsg, + int *foundWaiting) +{ + int pipe = this->WaitForLine(process, line, timeout, out, err); + if (pipe == h5_api_test_sysProcess_Pipe_STDOUT + || pipe == h5_api_test_sysProcess_Pipe_STDERR) { + this->PrintLine(pname, line.c_str()); + if (foundWaiting && (line.find(waitMsg) != line.npos)) + *foundWaiting = 1; + } + return pipe; +} + +//---------------------------------------------------------------------------- +string +H5APITestDriver::GetDirectory(string location) +{ + return h5_api_test_sys::SystemTools::GetParentDirectory(location.c_str()); +} diff --git a/test/API/driver/h5_api_test_driver.hxx b/test/API/driver/h5_api_test_driver.hxx new file mode 100644 index 00000000000..b8e05e7f3fa --- /dev/null +++ b/test/API/driver/h5_api_test_driver.hxx @@ -0,0 +1,93 @@ +#ifndef H5_API_TEST_DRIVER_H +#define H5_API_TEST_DRIVER_H + +#include +#include + +#include + +class H5APITestDriver { +public: + int Main(int argc, char *argv[]); + H5APITestDriver(); + ~H5APITestDriver(); + +protected: + void SeparateArguments(const char* str, std::vector &flags); + + void ReportCommand(const char * const *command, const char *name); + int ReportStatus(h5_api_test_sysProcess *process, const char *name); + int ProcessCommandLine(int argc, char *argv[]); + void CollectConfiguredOptions(); + void CreateCommandLine(std::vector &commandLine, + const char *cmd, int isServer, int isHelper, const char *numProc, + int argStart = 0, int argCount = 0, char *argv[] = 0); + + int StartServer(h5_api_test_sysProcess *server, const char *name, + std::vector &out, std::vector &err); + int StartClientHelper(h5_api_test_sysProcess *client, const char *name, + std::vector &out, std::vector &err); + int StartClientInit(h5_api_test_sysProcess *client, const char *name, + std::vector &out, std::vector &err); + int StartClient(h5_api_test_sysProcess *client, const char *name); + void Stop(h5_api_test_sysProcess *p, const char *name); + int OutputStringHasError(const char *pname, std::string &output); + int OutputStringHasToken(const char *pname, const char *regex, + std::string &output, std::string &token); + + int WaitForLine(h5_api_test_sysProcess *process, std::string &line, + double timeout, std::vector &out, std::vector &err); + void PrintLine(const char *pname, const char *line); + int WaitForAndPrintLine(const char *pname, h5_api_test_sysProcess *process, + std::string &line, double timeout, std::vector &out, + std::vector &err, const char *waitMsg, int *foundWaiting); + + std::string GetDirectory(std::string location); + +private: + std::string ClientExecutable; // fullpath to client executable + std::string ClientHelperExecutable; // fullpath to client helper executable + std::string ClientInitExecutable; // fullpath to client init executable + std::string ServerExecutable; // fullpath to server executable + std::string MPIRun; // fullpath to mpirun executable + + // This specify the preflags and post flags that can be set using: + // VTK_MPI_PRENUMPROC_FLAGS VTK_MPI_PREFLAGS / VTK_MPI_POSTFLAGS at config time + // std::vector MPIPreNumProcFlags; + std::vector ClientEnvVars; + std::vector MPIClientPreFlags; + std::vector MPIClientPostFlags; + std::vector MPIServerPreFlags; + std::vector MPIServerPostFlags; + + // Specify the number of process flag, this can be set using: VTK_MPI_NUMPROC_FLAG. + // This is then split into : + // MPIServerNumProcessFlag & MPIRenderServerNumProcessFlag + std::string MPINumProcessFlag; + std::string MPIServerNumProcessFlag; + std::string MPIClientNumProcessFlag; + + std::string ClientTokenVar; // use token to launch client if requested + + std::string CurrentPrintLineName; + + double TimeOut; + double ServerExitTimeOut; // time to wait for servers to finish. + bool ClientHelper; + bool ClientInit; + bool TestServer; + + int ClientArgStart; + int ClientArgCount; + int ClientHelperArgStart; + int ClientHelperArgCount; + int ClientInitArgStart; + int ClientInitArgCount; + int ServerArgStart; + int ServerArgCount; + bool AllowErrorInOutput; + bool TestSerial; + bool IgnoreServerResult; +}; + +#endif //H5_API_TEST_DRIVER_H diff --git a/test/API/driver/kwsys/.clang-format b/test/API/driver/kwsys/.clang-format new file mode 100644 index 00000000000..588b79016b4 --- /dev/null +++ b/test/API/driver/kwsys/.clang-format @@ -0,0 +1,22 @@ +--- +# This configuration requires clang-format version 6.0 exactly. +BasedOnStyle: Mozilla +AlignOperands: false +AllowShortFunctionsOnASingleLine: InlineOnly +AlwaysBreakAfterDefinitionReturnType: None +AlwaysBreakAfterReturnType: None +BinPackArguments: true +BinPackParameters: true +BraceWrapping: + AfterClass: true + AfterEnum: true + AfterFunction: true + AfterStruct: true + AfterUnion: true +BreakBeforeBraces: Custom +ColumnLimit: 79 +IndentPPDirectives: AfterHash +SortUsingDeclarations: false +SpaceAfterTemplateKeyword: true +Standard: Cpp03 +... diff --git a/test/API/driver/kwsys/.hooks-config b/test/API/driver/kwsys/.hooks-config new file mode 100644 index 00000000000..739cdd268bb --- /dev/null +++ b/test/API/driver/kwsys/.hooks-config @@ -0,0 +1,2 @@ +[hooks "chain"] + pre-commit = GitSetup/pre-commit diff --git a/test/API/driver/kwsys/Base64.c b/test/API/driver/kwsys/Base64.c new file mode 100644 index 00000000000..bf876f2d5e1 --- /dev/null +++ b/test/API/driver/kwsys/Base64.c @@ -0,0 +1,225 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" +#include KWSYS_HEADER(Base64.h) + +/* Work-around CMake dependency scanning limitation. This must + duplicate the above list of headers. */ +#if 0 +# include "Base64.h.in" +#endif + +static const unsigned char kwsysBase64EncodeTable[65] = + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz" + "0123456789+/"; + +static const unsigned char kwsysBase64DecodeTable[256] = { + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0x3E, 0xFF, 0xFF, 0xFF, 0x3F, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, + 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, + 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + /*------------------------------------*/ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF +}; + +static unsigned char kwsysBase64EncodeChar(int c) +{ + return kwsysBase64EncodeTable[(unsigned char)c]; +} + +static unsigned char kwsysBase64DecodeChar(unsigned char c) +{ + return kwsysBase64DecodeTable[c]; +} + +/* Encode 3 bytes into a 4 byte string. */ +void kwsysBase64_Encode3(const unsigned char* src, unsigned char* dest) +{ + dest[0] = kwsysBase64EncodeChar((src[0] >> 2) & 0x3F); + dest[1] = + kwsysBase64EncodeChar(((src[0] << 4) & 0x30) | ((src[1] >> 4) & 0x0F)); + dest[2] = + kwsysBase64EncodeChar(((src[1] << 2) & 0x3C) | ((src[2] >> 6) & 0x03)); + dest[3] = kwsysBase64EncodeChar(src[2] & 0x3F); +} + +/* Encode 2 bytes into a 4 byte string. */ +void kwsysBase64_Encode2(const unsigned char* src, unsigned char* dest) +{ + dest[0] = kwsysBase64EncodeChar((src[0] >> 2) & 0x3F); + dest[1] = + kwsysBase64EncodeChar(((src[0] << 4) & 0x30) | ((src[1] >> 4) & 0x0F)); + dest[2] = kwsysBase64EncodeChar(((src[1] << 2) & 0x3C)); + dest[3] = '='; +} + +/* Encode 1 bytes into a 4 byte string. */ +void kwsysBase64_Encode1(const unsigned char* src, unsigned char* dest) +{ + dest[0] = kwsysBase64EncodeChar((src[0] >> 2) & 0x3F); + dest[1] = kwsysBase64EncodeChar(((src[0] << 4) & 0x30)); + dest[2] = '='; + dest[3] = '='; +} + +/* Encode 'length' bytes from the input buffer and store the + encoded stream into the output buffer. Return the length of the encoded + buffer (output). Note that the output buffer must be allocated by the caller + (length * 1.5 should be a safe estimate). If 'mark_end' is true than an + extra set of 4 bytes is added to the end of the stream if the input is a + multiple of 3 bytes. These bytes are invalid chars and therefore they will + stop the decoder thus enabling the caller to decode a stream without + actually knowing how much data to expect (if the input is not a multiple of + 3 bytes then the extra padding needed to complete the encode 4 bytes will + stop the decoding anyway). */ +size_t kwsysBase64_Encode(const unsigned char* input, size_t length, + unsigned char* output, int mark_end) +{ + const unsigned char* ptr = input; + const unsigned char* end = input + length; + unsigned char* optr = output; + + /* Encode complete triplet */ + + while ((end - ptr) >= 3) { + kwsysBase64_Encode3(ptr, optr); + ptr += 3; + optr += 4; + } + + /* Encodes a 2-byte ending into 3 bytes and 1 pad byte and writes. */ + + if (end - ptr == 2) { + kwsysBase64_Encode2(ptr, optr); + optr += 4; + } + + /* Encodes a 1-byte ending into 2 bytes and 2 pad bytes */ + + else if (end - ptr == 1) { + kwsysBase64_Encode1(ptr, optr); + optr += 4; + } + + /* Do we need to mark the end */ + + else if (mark_end) { + optr[0] = optr[1] = optr[2] = optr[3] = '='; + optr += 4; + } + + return (size_t)(optr - output); +} + +/* Decode 4 bytes into a 3 byte string. */ +int kwsysBase64_Decode3(const unsigned char* src, unsigned char* dest) +{ + unsigned char d0, d1, d2, d3; + + d0 = kwsysBase64DecodeChar(src[0]); + d1 = kwsysBase64DecodeChar(src[1]); + d2 = kwsysBase64DecodeChar(src[2]); + d3 = kwsysBase64DecodeChar(src[3]); + + /* Make sure all characters were valid */ + + if (d0 == 0xFF || d1 == 0xFF || d2 == 0xFF || d3 == 0xFF) { + return 0; + } + + /* Decode the 3 bytes */ + + dest[0] = (unsigned char)(((d0 << 2) & 0xFC) | ((d1 >> 4) & 0x03)); + dest[1] = (unsigned char)(((d1 << 4) & 0xF0) | ((d2 >> 2) & 0x0F)); + dest[2] = (unsigned char)(((d2 << 6) & 0xC0) | ((d3 >> 0) & 0x3F)); + + /* Return the number of bytes actually decoded */ + + if (src[2] == '=') { + return 1; + } + if (src[3] == '=') { + return 2; + } + return 3; +} + +/* Decode bytes from the input buffer and store the decoded stream + into the output buffer until 'length' bytes have been decoded. Return the + real length of the decoded stream (which should be equal to 'length'). Note + that the output buffer must be allocated by the caller. If + 'max_input_length' is not null, then it specifies the number of encoded + bytes that should be at most read from the input buffer. In that case the + 'length' parameter is ignored. This enables the caller to decode a stream + without actually knowing how much decoded data to expect (of course, the + buffer must be large enough). */ +size_t kwsysBase64_Decode(const unsigned char* input, size_t length, + unsigned char* output, size_t max_input_length) +{ + const unsigned char* ptr = input; + unsigned char* optr = output; + + /* Decode complete triplet */ + + if (max_input_length) { + const unsigned char* end = input + max_input_length; + while (ptr < end) { + int len = kwsysBase64_Decode3(ptr, optr); + optr += len; + if (len < 3) { + return (size_t)(optr - output); + } + ptr += 4; + } + } else { + unsigned char* oend = output + length; + while ((oend - optr) >= 3) { + int len = kwsysBase64_Decode3(ptr, optr); + optr += len; + if (len < 3) { + return (size_t)(optr - output); + } + ptr += 4; + } + + /* Decode the last triplet */ + + if (oend - optr == 2) { + unsigned char temp[3]; + int len = kwsysBase64_Decode3(ptr, temp); + if (len >= 2) { + optr[0] = temp[0]; + optr[1] = temp[1]; + optr += 2; + } else if (len > 0) { + optr[0] = temp[0]; + optr += 1; + } + } else if (oend - optr == 1) { + unsigned char temp[3]; + int len = kwsysBase64_Decode3(ptr, temp); + if (len > 0) { + optr[0] = temp[0]; + optr += 1; + } + } + } + + return (size_t)(optr - output); +} diff --git a/test/API/driver/kwsys/Base64.h.in b/test/API/driver/kwsys/Base64.h.in new file mode 100644 index 00000000000..729f9729782 --- /dev/null +++ b/test/API/driver/kwsys/Base64.h.in @@ -0,0 +1,110 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifndef @KWSYS_NAMESPACE@_Base64_h +#define @KWSYS_NAMESPACE@_Base64_h + +#include <@KWSYS_NAMESPACE@/Configure.h> + +#include /* size_t */ + +/* Redefine all public interface symbol names to be in the proper + namespace. These macros are used internally to kwsys only, and are + not visible to user code. Use kwsysHeaderDump.pl to reproduce + these macros after making changes to the interface. */ +#if !defined(KWSYS_NAMESPACE) +# define kwsys_ns(x) @KWSYS_NAMESPACE@##x +# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT +#endif +#if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS +# define kwsysBase64 kwsys_ns(Base64) +# define kwsysBase64_Decode kwsys_ns(Base64_Decode) +# define kwsysBase64_Decode3 kwsys_ns(Base64_Decode3) +# define kwsysBase64_Encode kwsys_ns(Base64_Encode) +# define kwsysBase64_Encode1 kwsys_ns(Base64_Encode1) +# define kwsysBase64_Encode2 kwsys_ns(Base64_Encode2) +# define kwsysBase64_Encode3 kwsys_ns(Base64_Encode3) +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +/** + * Encode 3 bytes into a 4 byte string. + */ +kwsysEXPORT void kwsysBase64_Encode3(const unsigned char* src, + unsigned char* dest); + +/** + * Encode 2 bytes into a 4 byte string. + */ +kwsysEXPORT void kwsysBase64_Encode2(const unsigned char* src, + unsigned char* dest); + +/** + * Encode 1 bytes into a 4 byte string. + */ +kwsysEXPORT void kwsysBase64_Encode1(const unsigned char* src, + unsigned char* dest); + +/** + * Encode 'length' bytes from the input buffer and store the encoded + * stream into the output buffer. Return the length of the encoded + * buffer (output). Note that the output buffer must be allocated by + * the caller (length * 1.5 should be a safe estimate). If 'mark_end' + * is true than an extra set of 4 bytes is added to the end of the + * stream if the input is a multiple of 3 bytes. These bytes are + * invalid chars and therefore they will stop the decoder thus + * enabling the caller to decode a stream without actually knowing how + * much data to expect (if the input is not a multiple of 3 bytes then + * the extra padding needed to complete the encode 4 bytes will stop + * the decoding anyway). + */ +kwsysEXPORT size_t kwsysBase64_Encode(const unsigned char* input, + size_t length, unsigned char* output, + int mark_end); + +/** + * Decode 4 bytes into a 3 byte string. Returns the number of bytes + * actually decoded. + */ +kwsysEXPORT int kwsysBase64_Decode3(const unsigned char* src, + unsigned char* dest); + +/** + * Decode bytes from the input buffer and store the decoded stream + * into the output buffer until 'length' bytes have been decoded. + * Return the real length of the decoded stream (which should be equal + * to 'length'). Note that the output buffer must be allocated by the + * caller. If 'max_input_length' is not null, then it specifies the + * number of encoded bytes that should be at most read from the input + * buffer. In that case the 'length' parameter is ignored. This + * enables the caller to decode a stream without actually knowing how + * much decoded data to expect (of course, the buffer must be large + * enough). + */ +kwsysEXPORT size_t kwsysBase64_Decode(const unsigned char* input, + size_t length, unsigned char* output, + size_t max_input_length); + +#if defined(__cplusplus) +} /* extern "C" */ +#endif + +/* If we are building a kwsys .c or .cxx file, let it use these macros. + Otherwise, undefine them to keep the namespace clean. */ +#if !defined(KWSYS_NAMESPACE) +# undef kwsys_ns +# undef kwsysEXPORT +# if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS +# undef kwsysBase64 +# undef kwsysBase64_Decode +# undef kwsysBase64_Decode3 +# undef kwsysBase64_Encode +# undef kwsysBase64_Encode1 +# undef kwsysBase64_Encode2 +# undef kwsysBase64_Encode3 +# endif +#endif + +#endif diff --git a/test/API/driver/kwsys/CMakeLists.txt b/test/API/driver/kwsys/CMakeLists.txt new file mode 100644 index 00000000000..09bcdb9430f --- /dev/null +++ b/test/API/driver/kwsys/CMakeLists.txt @@ -0,0 +1,1260 @@ +# Distributed under the OSI-approved BSD 3-Clause License. See accompanying +# file Copyright.txt or https://cmake.org/licensing#kwsys for details. + +# The Kitware System Library is intended to be included in other +# projects. It is completely configurable in that the library's +# namespace can be configured and the components that are included can +# be selected invididually. + +# Typical usage is to import the kwsys directory tree into a +# subdirectory under a parent project and enable the classes that will +# be used. All classes are disabled by default. The CMake listfile +# above this one configures the library as follows: +# +# SET(KWSYS_NAMESPACE foosys) +# SET(KWSYS_USE_Directory 1) # Enable Directory class. +# SUBDIRS(kwsys) +# +# Optional settings are as follows: +# +# KWSYS_HEADER_ROOT = The directory into which to generate the kwsys headers. +# A directory called "${KWSYS_NAMESPACE}" will be +# created under this root directory to hold the files. +# KWSYS_SPLIT_OBJECTS_FROM_INTERFACE +# = Instead of creating a single ${KWSYS_NAMESPACE} library +# target, create three separate targets: +# ${KWSYS_NAMESPACE} +# - An INTERFACE library only containing usage +# requirements. +# ${KWSYS_NAMESPACE}_objects +# - An OBJECT library for the built kwsys objects. +# Note: This is omitted from the install rules +# ${KWSYS_NAMESPACE}_private +# - An INTERFACE library combining both that is +# appropriate for use with PRIVATE linking in +# target_link_libraries. Because of how interface +# properties propagate, this target is not suitable +# for use with PUBLIC or INTERFACE linking. +# KWSYS_ALIAS_TARGET = The name of an alias target to create to the actual target. +# +# Example: +# +# SET(KWSYS_HEADER_ROOT ${PROJECT_BINARY_DIR}) +# INCLUDE_DIRECTORIES(${PROJECT_BINARY_DIR}) +# +# KWSYS_CXX_STANDARD = A value for CMAKE_CXX_STANDARD within KWSys. +# Set to empty string to use no default value. +# KWSYS_CXX_COMPILE_FEATURES = target_compile_features arguments for KWSys. +# +# Optional settings to setup install rules are as follows: +# +# KWSYS_INSTALL_BIN_DIR = The installation target directories into +# KWSYS_INSTALL_LIB_DIR which the libraries and headers from +# KWSYS_INSTALL_INCLUDE_DIR kwsys should be installed by a "make install". +# The values should be specified relative to +# the installation prefix and NOT start with '/'. +# KWSYS_INSTALL_DOC_DIR = The installation target directory for documentation +# such as copyright information. +# +# KWSYS_INSTALL_COMPONENT_NAME_RUNTIME = Name of runtime and development +# KWSYS_INSTALL_COMPONENT_NAME_DEVELOPMENT installation components. +# If not given the install rules +# will not be in any component. +# +# KWSYS_INSTALL_EXPORT_NAME = The EXPORT option value for install(TARGETS) calls. +# +# Example: +# +# SET(KWSYS_INSTALL_BIN_DIR bin) +# SET(KWSYS_INSTALL_LIB_DIR lib) +# SET(KWSYS_INSTALL_INCLUDE_DIR include) +# SET(KWSYS_INSTALL_COMPONENT_NAME_RUNTIME Runtime) +# SET(KWSYS_INSTALL_COMPONENT_NAME_DEVELOPMENT Development) + +# Once configured, kwsys should be used as follows from C or C++ code: +# +# #include +# ... +# foosys::Directory directory; +# + +# NOTE: This library is intended for internal use by Kitware-driven +# projects. In order to keep it simple no attempt will be made to +# maintain backward compatibility when changes are made to KWSys. +# When an incompatible change is made Kitware's projects that use +# KWSys will be fixed, but no notification will necessarily be sent to +# any outside mailing list and no documentation of the change will be +# written. + +CMAKE_MINIMUM_REQUIRED(VERSION 3.1 FATAL_ERROR) +FOREACH(p + CMP0056 # CMake 3.2, Honor link flags in try_compile() source-file signature. + CMP0063 # CMake 3.3, Honor visibility properties for all target types. + CMP0067 # CMake 3.8, Honor language standard in try_compile source-file signature. + CMP0069 # CMake 3.9, INTERPROCEDURAL_OPTIMIZATION is enforced when enabled. + ) + IF(POLICY ${p}) + CMAKE_POLICY(SET ${p} NEW) + ENDIF() +ENDFOREACH() + +#----------------------------------------------------------------------------- +# If a namespace is not specified, use "kwsys" and enable testing. +# This should be the case only when kwsys is not included inside +# another project and is being tested. +IF(NOT KWSYS_NAMESPACE) + SET(KWSYS_NAMESPACE "kwsys") + SET(KWSYS_STANDALONE 1) +ENDIF() + +#----------------------------------------------------------------------------- +# The project name is that of the specified namespace. +PROJECT(${KWSYS_NAMESPACE}) + +# Tell CMake how to follow dependencies of sources in this directory. +SET_PROPERTY(DIRECTORY + PROPERTY IMPLICIT_DEPENDS_INCLUDE_TRANSFORM + "KWSYS_HEADER(%)=<${KWSYS_NAMESPACE}/%>" + ) + +if(KWSYS_CXX_STANDARD) + set(CMAKE_CXX_STANDARD "${KWSYS_CXX_STANDARD}") +elseif(NOT DEFINED CMAKE_CXX_STANDARD AND NOT DEFINED KWSYS_CXX_STANDARD) + if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang" + AND CMAKE_CXX_SIMULATE_ID STREQUAL "MSVC" + AND CMAKE_CXX_COMPILER_FRONTEND_VARIANT STREQUAL "GNU" + ) + set(CMAKE_CXX_STANDARD 14) + else() + set(CMAKE_CXX_STANDARD 11) + endif() +endif() + +# Select library components. +IF(KWSYS_STANDALONE OR CMake_SOURCE_DIR) + SET(KWSYS_ENABLE_C 1) + # Enable all components. + SET(KWSYS_USE_Base64 1) + SET(KWSYS_USE_Directory 1) + SET(KWSYS_USE_DynamicLoader 1) + SET(KWSYS_USE_Encoding 1) + SET(KWSYS_USE_Glob 1) + SET(KWSYS_USE_MD5 1) + SET(KWSYS_USE_Process 1) + SET(KWSYS_USE_RegularExpression 1) + SET(KWSYS_USE_System 1) + SET(KWSYS_USE_SystemTools 1) + SET(KWSYS_USE_CommandLineArguments 1) + SET(KWSYS_USE_Terminal 1) + SET(KWSYS_USE_IOStream 1) + SET(KWSYS_USE_FStream 1) + SET(KWSYS_USE_String 1) + SET(KWSYS_USE_SystemInformation 1) + SET(KWSYS_USE_ConsoleBuf 1) +ENDIF() + +# Enforce component dependencies. +IF(KWSYS_USE_SystemTools) + SET(KWSYS_USE_Directory 1) + SET(KWSYS_USE_FStream 1) + SET(KWSYS_USE_Encoding 1) +ENDIF() +IF(KWSYS_USE_Glob) + SET(KWSYS_USE_Directory 1) + SET(KWSYS_USE_SystemTools 1) + SET(KWSYS_USE_RegularExpression 1) + SET(KWSYS_USE_FStream 1) + SET(KWSYS_USE_Encoding 1) +ENDIF() +IF(KWSYS_USE_Process) + SET(KWSYS_USE_System 1) + SET(KWSYS_USE_Encoding 1) +ENDIF() +IF(KWSYS_USE_SystemInformation) + SET(KWSYS_USE_Process 1) +ENDIF() +IF(KWSYS_USE_System) + SET(KWSYS_USE_Encoding 1) +ENDIF() +IF(KWSYS_USE_Directory) + SET(KWSYS_USE_Encoding 1) +ENDIF() +IF(KWSYS_USE_DynamicLoader) + SET(KWSYS_USE_Encoding 1) +ENDIF() +IF(KWSYS_USE_FStream) + SET(KWSYS_USE_Encoding 1) +ENDIF() +IF(KWSYS_USE_ConsoleBuf) + SET(KWSYS_USE_Encoding 1) +ENDIF() + +# Specify default 8 bit encoding for Windows +IF(NOT KWSYS_ENCODING_DEFAULT_CODEPAGE) + SET(KWSYS_ENCODING_DEFAULT_CODEPAGE CP_ACP) +ENDIF() + +# Enable testing if building standalone. +IF(KWSYS_STANDALONE) + INCLUDE(Dart) + MARK_AS_ADVANCED(BUILD_TESTING DART_ROOT TCL_TCLSH) + IF(BUILD_TESTING) + ENABLE_TESTING() + ENDIF() +ENDIF() + +# Choose default shared/static build if not specified. +IF(NOT DEFINED KWSYS_BUILD_SHARED) + SET(KWSYS_BUILD_SHARED ${BUILD_SHARED_LIBS}) +ENDIF() + +# Include helper macros. +INCLUDE(${CMAKE_CURRENT_SOURCE_DIR}/kwsysPlatformTests.cmake) +INCLUDE(CheckTypeSize) + +# Do full dependency headers. +INCLUDE_REGULAR_EXPRESSION("^.*$") + +# Use new KWSYS_INSTALL_*_DIR variable names to control installation. +# Take defaults from the old names. Note that there was no old name +# for the bin dir, so we take the old lib dir name so DLLs will be +# installed in a compatible way for old code. +IF(NOT KWSYS_INSTALL_INCLUDE_DIR) + STRING(REGEX REPLACE "^/" "" KWSYS_INSTALL_INCLUDE_DIR + "${KWSYS_HEADER_INSTALL_DIR}") +ENDIF() +IF(NOT KWSYS_INSTALL_LIB_DIR) + STRING(REGEX REPLACE "^/" "" KWSYS_INSTALL_LIB_DIR + "${KWSYS_LIBRARY_INSTALL_DIR}") +ENDIF() +IF(NOT KWSYS_INSTALL_BIN_DIR) + STRING(REGEX REPLACE "^/" "" KWSYS_INSTALL_BIN_DIR + "${KWSYS_LIBRARY_INSTALL_DIR}") +ENDIF() + +# Setup header install rules. +SET(KWSYS_INSTALL_INCLUDE_OPTIONS) +IF(KWSYS_INSTALL_COMPONENT_NAME_DEVELOPMENT) + SET(KWSYS_INSTALL_INCLUDE_OPTIONS ${KWSYS_INSTALL_INCLUDE_OPTIONS} + COMPONENT ${KWSYS_INSTALL_COMPONENT_NAME_DEVELOPMENT} + ) +ENDIF() + +# Setup library install rules. +SET(KWSYS_INSTALL_LIBRARY_RULE) +SET(KWSYS_INSTALL_NAMELINK_RULE) +IF(KWSYS_INSTALL_LIB_DIR) + IF(KWSYS_INSTALL_EXPORT_NAME) + LIST(APPEND KWSYS_INSTALL_LIBRARY_RULE EXPORT ${KWSYS_INSTALL_EXPORT_NAME}) + ENDIF() + # Install the shared library to the lib directory. + SET(KWSYS_INSTALL_LIBRARY_RULE ${KWSYS_INSTALL_LIBRARY_RULE} + LIBRARY DESTINATION ${KWSYS_INSTALL_LIB_DIR} NAMELINK_SKIP + ) + # Assign the shared library to the runtime component. + IF(KWSYS_INSTALL_COMPONENT_NAME_RUNTIME) + SET(KWSYS_INSTALL_LIBRARY_RULE ${KWSYS_INSTALL_LIBRARY_RULE} + COMPONENT ${KWSYS_INSTALL_COMPONENT_NAME_RUNTIME} + ) + ENDIF() + IF(KWSYS_BUILD_SHARED) + SET(KWSYS_INSTALL_NAMELINK_RULE ${KWSYS_INSTALL_NAMELINK_RULE} + LIBRARY DESTINATION ${KWSYS_INSTALL_LIB_DIR} NAMELINK_ONLY + ) + # Assign the namelink to the development component. + IF(KWSYS_INSTALL_COMPONENT_NAME_DEVELOPMENT) + SET(KWSYS_INSTALL_NAMELINK_RULE ${KWSYS_INSTALL_NAMELINK_RULE} + COMPONENT ${KWSYS_INSTALL_COMPONENT_NAME_DEVELOPMENT} + ) + ENDIF() + ENDIF() + + # Install the archive to the lib directory. + SET(KWSYS_INSTALL_LIBRARY_RULE ${KWSYS_INSTALL_LIBRARY_RULE} + ARCHIVE DESTINATION ${KWSYS_INSTALL_LIB_DIR} + ) + # Assign the archive to the development component. + IF(KWSYS_INSTALL_COMPONENT_NAME_DEVELOPMENT) + SET(KWSYS_INSTALL_LIBRARY_RULE ${KWSYS_INSTALL_LIBRARY_RULE} + COMPONENT ${KWSYS_INSTALL_COMPONENT_NAME_DEVELOPMENT} + ) + ENDIF() +ENDIF() +IF(KWSYS_INSTALL_BIN_DIR) + # Install the runtime library to the bin directory. + SET(KWSYS_INSTALL_LIBRARY_RULE ${KWSYS_INSTALL_LIBRARY_RULE} + RUNTIME DESTINATION ${KWSYS_INSTALL_BIN_DIR} + ) + # Assign the runtime library to the runtime component. + IF(KWSYS_INSTALL_COMPONENT_NAME_RUNTIME) + SET(KWSYS_INSTALL_LIBRARY_RULE ${KWSYS_INSTALL_LIBRARY_RULE} + COMPONENT ${KWSYS_INSTALL_COMPONENT_NAME_RUNTIME} + ) + ENDIF() +ENDIF() + +# Do not support old KWSYS_*a_INSTALL_DIR variable names. +SET(KWSYS_HEADER_INSTALL_DIR) +SET(KWSYS_LIBRARY_INSTALL_DIR) + +# Generated source files will need this header. +STRING(COMPARE EQUAL "${PROJECT_SOURCE_DIR}" "${PROJECT_BINARY_DIR}" + KWSYS_IN_SOURCE_BUILD) +IF(NOT KWSYS_IN_SOURCE_BUILD) + CONFIGURE_FILE(${PROJECT_SOURCE_DIR}/kwsysPrivate.h + ${PROJECT_BINARY_DIR}/kwsysPrivate.h COPYONLY IMMEDIATE) +ENDIF() + +# Select plugin module file name convention. +IF(NOT KWSYS_DynamicLoader_PREFIX) + SET(KWSYS_DynamicLoader_PREFIX ${CMAKE_SHARED_MODULE_PREFIX}) +ENDIF() +IF(NOT KWSYS_DynamicLoader_SUFFIX) + SET(KWSYS_DynamicLoader_SUFFIX ${CMAKE_SHARED_MODULE_SUFFIX}) +ENDIF() + +#----------------------------------------------------------------------------- +# We require ANSI support from the C compiler. Add any needed flags. +IF(CMAKE_ANSI_CFLAGS) + SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_ANSI_CFLAGS}") +ENDIF() + +#----------------------------------------------------------------------------- +# Adjust compiler flags for some platforms. +IF(NOT CMAKE_COMPILER_IS_GNUCXX) + IF(CMAKE_SYSTEM MATCHES "OSF1-V.*") + STRING(REGEX MATCH "-timplicit_local" + KWSYS_CXX_FLAGS_HAVE_IMPLICIT_LOCAL "${CMAKE_CXX_FLAGS}") + STRING(REGEX MATCH "-no_implicit_include" + KWSYS_CXX_FLAGS_HAVE_NO_IMPLICIT_INCLUDE "${CMAKE_CXX_FLAGS}") + IF(NOT KWSYS_CXX_FLAGS_HAVE_IMPLICIT_LOCAL) + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -timplicit_local") + ENDIF() + IF(NOT KWSYS_CXX_FLAGS_HAVE_NO_IMPLICIT_INCLUDE) + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -no_implicit_include") + ENDIF() + ENDIF() + IF(CMAKE_SYSTEM MATCHES "HP-UX") + SET(KWSYS_PLATFORM_CXX_TEST_EXTRA_FLAGS "+p") + IF(CMAKE_CXX_COMPILER_ID MATCHES "HP") + # it is known that version 3.85 fails and 6.25 works without these flags + IF(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4) + # use new C++ library and improved template support + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -AA +hpxstd98") + ENDIF() + ENDIF() + ENDIF() +ENDIF() +IF(KWSYS_STANDALONE) + IF(CMAKE_CXX_COMPILER_ID STREQUAL SunPro) + IF(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.13) + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++03") + ELSE() + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -library=stlport4") + ENDIF() + ENDIF() +ENDIF() + +#----------------------------------------------------------------------------- +# Configure the standard library header wrappers based on compiler's +# capabilities and parent project's request. Enforce 0/1 as only +# possible values for configuration into Configure.hxx. + +# Check existence and uniqueness of long long and __int64. +KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_LONG_LONG + "Checking whether C++ compiler has 'long long'" DIRECT) +KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS___INT64 + "Checking whether C++ compiler has '__int64'" DIRECT) +IF(KWSYS_CXX_HAS___INT64) + KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_SAME_LONG_AND___INT64 + "Checking whether long and __int64 are the same type" DIRECT) + IF(KWSYS_CXX_HAS_LONG_LONG) + KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_SAME_LONG_LONG_AND___INT64 + "Checking whether long long and __int64 are the same type" DIRECT) + ENDIF() +ENDIF() + +# Enable the "long long" type if it is available. It is standard in +# C99 and C++03 but not in earlier standards. +IF(KWSYS_CXX_HAS_LONG_LONG) + SET(KWSYS_USE_LONG_LONG 1) +ELSE() + SET(KWSYS_USE_LONG_LONG 0) +ENDIF() + +# Enable the "__int64" type if it is available and unique. It is not +# standard. +SET(KWSYS_USE___INT64 0) +IF(KWSYS_CXX_HAS___INT64) + IF(NOT KWSYS_CXX_SAME_LONG_AND___INT64) + IF(NOT KWSYS_CXX_SAME_LONG_LONG_AND___INT64) + SET(KWSYS_USE___INT64 1) + ENDIF() + ENDIF() +ENDIF() + +IF(KWSYS_USE_Encoding) + # Look for type size helper macros. + KWSYS_PLATFORM_CXX_TEST(KWSYS_STL_HAS_WSTRING + "Checking whether wstring is available" DIRECT) +ENDIF() + +IF(KWSYS_USE_IOStream) + # Determine whether iostreams support long long. + IF(KWSYS_CXX_HAS_LONG_LONG) + KWSYS_PLATFORM_CXX_TEST(KWSYS_IOS_HAS_ISTREAM_LONG_LONG + "Checking if istream supports long long" DIRECT) + KWSYS_PLATFORM_CXX_TEST(KWSYS_IOS_HAS_OSTREAM_LONG_LONG + "Checking if ostream supports long long" DIRECT) + ELSE() + SET(KWSYS_IOS_HAS_ISTREAM_LONG_LONG 0) + SET(KWSYS_IOS_HAS_OSTREAM_LONG_LONG 0) + ENDIF() + IF(KWSYS_CXX_HAS___INT64) + KWSYS_PLATFORM_CXX_TEST(KWSYS_IOS_HAS_ISTREAM___INT64 + "Checking if istream supports __int64" DIRECT) + KWSYS_PLATFORM_CXX_TEST(KWSYS_IOS_HAS_OSTREAM___INT64 + "Checking if ostream supports __int64" DIRECT) + ELSE() + SET(KWSYS_IOS_HAS_ISTREAM___INT64 0) + SET(KWSYS_IOS_HAS_OSTREAM___INT64 0) + ENDIF() +ENDIF() + +IF(KWSYS_NAMESPACE MATCHES "^kwsys$") + SET(KWSYS_NAME_IS_KWSYS 1) +ELSE() + SET(KWSYS_NAME_IS_KWSYS 0) +ENDIF() + +IF(KWSYS_BUILD_SHARED) + SET(KWSYS_BUILD_SHARED 1) + SET(KWSYS_LIBRARY_TYPE SHARED) +ELSE() + SET(KWSYS_BUILD_SHARED 0) + SET(KWSYS_LIBRARY_TYPE STATIC) +ENDIF() + +if(NOT DEFINED KWSYS_BUILD_PIC) + set(KWSYS_BUILD_PIC 0) +endif() + +#----------------------------------------------------------------------------- +# Configure some implementation details. + +KWSYS_PLATFORM_C_TEST(KWSYS_C_HAS_PTRDIFF_T + "Checking whether C compiler has ptrdiff_t in stddef.h" DIRECT) +KWSYS_PLATFORM_C_TEST(KWSYS_C_HAS_SSIZE_T + "Checking whether C compiler has ssize_t in unistd.h" DIRECT) +IF(KWSYS_USE_Process) + KWSYS_PLATFORM_C_TEST(KWSYS_C_HAS_CLOCK_GETTIME_MONOTONIC + "Checking whether C compiler has clock_gettime" DIRECT) +ENDIF() + +SET_SOURCE_FILES_PROPERTIES(ProcessUNIX.c System.c PROPERTIES + COMPILE_FLAGS "-DKWSYS_C_HAS_PTRDIFF_T=${KWSYS_C_HAS_PTRDIFF_T} -DKWSYS_C_HAS_SSIZE_T=${KWSYS_C_HAS_SSIZE_T} -DKWSYS_C_HAS_CLOCK_GETTIME_MONOTONIC=${KWSYS_C_HAS_CLOCK_GETTIME_MONOTONIC}" + ) + +IF(DEFINED KWSYS_PROCESS_USE_SELECT) + GET_PROPERTY(ProcessUNIX_FLAGS SOURCE ProcessUNIX.c PROPERTY COMPILE_FLAGS) + SET_PROPERTY(SOURCE ProcessUNIX.c PROPERTY COMPILE_FLAGS "${ProcessUNIX_FLAGS} -DKWSYSPE_USE_SELECT=${KWSYSPE_USE_SELECT}") +ENDIF() + +IF(KWSYS_USE_DynamicLoader) + GET_PROPERTY(KWSYS_SUPPORTS_SHARED_LIBS GLOBAL PROPERTY TARGET_SUPPORTS_SHARED_LIBS) + IF(KWSYS_SUPPORTS_SHARED_LIBS) + SET(KWSYS_SUPPORTS_SHARED_LIBS 1) + ELSE() + SET(KWSYS_SUPPORTS_SHARED_LIBS 0) + ENDIF() + SET_PROPERTY(SOURCE DynamicLoader.cxx APPEND PROPERTY COMPILE_DEFINITIONS + KWSYS_SUPPORTS_SHARED_LIBS=${KWSYS_SUPPORTS_SHARED_LIBS}) +ENDIF() + +IF(KWSYS_USE_SystemTools) + if (NOT DEFINED KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP) + set(KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP 1) + endif () + if (KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP) + set(KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP 1) + else () + set(KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP 0) + endif () + KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_SETENV + "Checking whether CXX compiler has setenv" DIRECT) + KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_UNSETENV + "Checking whether CXX compiler has unsetenv" DIRECT) + KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_ENVIRON_IN_STDLIB_H + "Checking whether CXX compiler has environ in stdlib.h" DIRECT) + KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_UTIMES + "Checking whether CXX compiler has utimes" DIRECT) + KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_UTIMENSAT + "Checking whether CXX compiler has utimensat" DIRECT) + KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_STAT_HAS_ST_MTIM + "Checking whether CXX compiler struct stat has st_mtim member" DIRECT) + KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_STAT_HAS_ST_MTIMESPEC + "Checking whether CXX compiler struct stat has st_mtimespec member" DIRECT) + SET_PROPERTY(SOURCE SystemTools.cxx APPEND PROPERTY COMPILE_DEFINITIONS + KWSYS_CXX_HAS_SETENV=${KWSYS_CXX_HAS_SETENV} + KWSYS_CXX_HAS_UNSETENV=${KWSYS_CXX_HAS_UNSETENV} + KWSYS_CXX_HAS_ENVIRON_IN_STDLIB_H=${KWSYS_CXX_HAS_ENVIRON_IN_STDLIB_H} + KWSYS_CXX_HAS_UTIMES=${KWSYS_CXX_HAS_UTIMES} + KWSYS_CXX_HAS_UTIMENSAT=${KWSYS_CXX_HAS_UTIMENSAT} + KWSYS_CXX_STAT_HAS_ST_MTIM=${KWSYS_CXX_STAT_HAS_ST_MTIM} + KWSYS_CXX_STAT_HAS_ST_MTIMESPEC=${KWSYS_CXX_STAT_HAS_ST_MTIMESPEC} + ) + IF(NOT WIN32) + IF(KWSYS_STANDALONE) + OPTION(KWSYS_SYSTEMTOOLS_SUPPORT_WINDOWS_SLASHES "If true, Windows paths will be supported on Unix as well" ON) + ENDIF() + IF(KWSYS_SYSTEMTOOLS_SUPPORT_WINDOWS_SLASHES) + SET_PROPERTY(SOURCE SystemTools.cxx testSystemTools.cxx APPEND PROPERTY COMPILE_DEFINITIONS + KWSYS_SYSTEMTOOLS_SUPPORT_WINDOWS_SLASHES + ) + ENDIF() + ENDIF() + + # Disable getpwnam for static linux builds since it depends on shared glibc + GET_PROPERTY(SHARED_LIBS_SUPPORTED GLOBAL PROPERTY TARGET_SUPPORTS_SHARED_LIBS) + IF(CMAKE_SYSTEM_NAME MATCHES "Linux" AND NOT SHARED_LIBS_SUPPORTED) + SET_PROPERTY(SOURCE SystemTools.cxx APPEND PROPERTY COMPILE_DEFINITIONS + HAVE_GETPWNAM=0 + ) + ENDIF() +ENDIF() + +IF(KWSYS_USE_SystemInformation) + SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY + COMPILE_DEFINITIONS SIZEOF_VOID_P=${CMAKE_SIZEOF_VOID_P}) + IF(NOT CYGWIN) + INCLUDE(CheckIncludeFiles) + CHECK_INCLUDE_FILES("sys/types.h;ifaddrs.h" KWSYS_SYS_HAS_IFADDRS_H) + IF(KWSYS_SYS_HAS_IFADDRS_H) + SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY + COMPILE_DEFINITIONS KWSYS_SYS_HAS_IFADDRS_H=1) + ENDIF() + ENDIF() + IF(WIN32) + INCLUDE(CheckSymbolExists) + SET(CMAKE_REQUIRED_LIBRARIES Psapi) + CHECK_SYMBOL_EXISTS(GetProcessMemoryInfo "windows.h;psapi.h" KWSYS_SYS_HAS_PSAPI) + UNSET(CMAKE_REQUIRED_LIBRARIES) + IF(KWSYS_SYS_HAS_PSAPI) + SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY + COMPILE_DEFINITIONS KWSYS_SYS_HAS_PSAPI=1) + IF(MSVC70 OR MSVC71) + # Suppress LNK4089: all references to 'PSAPI.DLL' discarded by /OPT:REF + SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /IGNORE:4089") + ENDIF() + ENDIF() + ENDIF() + IF(CMAKE_SYSTEM MATCHES "HP-UX") + CHECK_INCLUDE_FILES("sys/mpctl.h" KWSYS_SYS_HAS_MPCTL_H) + IF(KWSYS_SYS_HAS_MPCTL_H) + SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY + COMPILE_DEFINITIONS KWSYS_SYS_HAS_MPCTL_H=1) + ENDIF() + ENDIF() + IF(CMAKE_SYSTEM MATCHES "BSD") + CHECK_INCLUDE_FILES("machine/cpu.h" KWSYS_SYS_HAS_MACHINE_CPU_H) + IF(KWSYS_SYS_HAS_MACHINE_CPU_H) + SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY + COMPILE_DEFINITIONS KWSYS_SYS_HAS_MACHINE_CPU_H=1) + ENDIF() + ENDIF() + KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_RLIMIT64 + "Checking whether CXX compiler has rlimit64" DIRECT) + SET(KWSYS_PLATFORM_CXX_TEST_DEFINES) + IF(KWSYS_CXX_HAS_RLIMIT64) + SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY + COMPILE_DEFINITIONS KWSYS_CXX_HAS_RLIMIT64=1) + ENDIF() + KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_ATOL + "Checking whether CXX compiler has atol" DIRECT) + IF(KWSYS_CXX_HAS_ATOL) + SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY + COMPILE_DEFINITIONS KWSYS_CXX_HAS_ATOL=1) + ENDIF() + KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_ATOLL + "Checking whether CXX compiler has atoll" DIRECT) + IF(KWSYS_CXX_HAS_ATOLL) + SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY + COMPILE_DEFINITIONS KWSYS_CXX_HAS_ATOLL=1) + ENDIF() + KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS__ATOI64 + "Checking whether CXX compiler has _atoi64" DIRECT) + IF(KWSYS_CXX_HAS__ATOI64) + SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY + COMPILE_DEFINITIONS KWSYS_CXX_HAS__ATOI64=1) + ENDIF() + IF(UNIX) + INCLUDE(CheckIncludeFileCXX) + # check for simple stack trace + # usually it's in libc but on FreeBSD + # it's in libexecinfo + FIND_LIBRARY(EXECINFO_LIB "execinfo") + MARK_AS_ADVANCED(EXECINFO_LIB) + IF (NOT EXECINFO_LIB) + SET(EXECINFO_LIB "") + ENDIF() + CHECK_INCLUDE_FILE_CXX("execinfo.h" KWSYS_CXX_HAS_EXECINFOH) + IF (KWSYS_CXX_HAS_EXECINFOH) + # we have the backtrace header check if it + # can be used with this compiler + SET(KWSYS_PLATFORM_CXX_TEST_LINK_LIBRARIES ${EXECINFO_LIB}) + KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_BACKTRACE + "Checking whether backtrace works with this C++ compiler" DIRECT) + SET(KWSYS_PLATFORM_CXX_TEST_LINK_LIBRARIES) + IF (KWSYS_CXX_HAS_BACKTRACE) + # backtrace is supported by this system and compiler. + # now check for the more advanced capabilities. + SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY + COMPILE_DEFINITIONS KWSYS_SYSTEMINFORMATION_HAS_BACKTRACE=1) + # check for symbol lookup using dladdr + CHECK_INCLUDE_FILE_CXX("dlfcn.h" KWSYS_CXX_HAS_DLFCNH) + IF (KWSYS_CXX_HAS_DLFCNH) + # we have symbol lookup libraries and headers + # check if they can be used with this compiler + SET(KWSYS_PLATFORM_CXX_TEST_LINK_LIBRARIES ${CMAKE_DL_LIBS}) + KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_DLADDR + "Checking whether dladdr works with this C++ compiler" DIRECT) + SET(KWSYS_PLATFORM_CXX_TEST_LINK_LIBRARIES) + IF (KWSYS_CXX_HAS_DLADDR) + # symbol lookup is supported by this system + # and compiler. + SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY + COMPILE_DEFINITIONS KWSYS_SYSTEMINFORMATION_HAS_SYMBOL_LOOKUP=1) + ENDIF() + ENDIF() + # c++ demangling support + # check for cxxabi headers + CHECK_INCLUDE_FILE_CXX("cxxabi.h" KWSYS_CXX_HAS_CXXABIH) + IF (KWSYS_CXX_HAS_CXXABIH) + # check if cxxabi can be used with this + # system and compiler. + KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_CXXABI + "Checking whether cxxabi works with this C++ compiler" DIRECT) + IF (KWSYS_CXX_HAS_CXXABI) + # c++ demangle using cxxabi is supported with + # this system and compiler + SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY + COMPILE_DEFINITIONS KWSYS_SYSTEMINFORMATION_HAS_CPP_DEMANGLE=1) + ENDIF() + ENDIF() + # basic backtrace works better with release build + # don't bother with advanced features for release + SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY + COMPILE_DEFINITIONS_DEBUG KWSYS_SYSTEMINFORMATION_HAS_DEBUG_BUILD=1) + SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY + COMPILE_DEFINITIONS_RELWITHDEBINFO KWSYS_SYSTEMINFORMATION_HAS_DEBUG_BUILD=1) + ENDIF() + ENDIF() + ENDIF() + IF(BORLAND) + KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_BORLAND_ASM + "Checking whether Borland CXX compiler supports assembler instructions" DIRECT) + IF(KWSYS_CXX_HAS_BORLAND_ASM) + SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY + COMPILE_DEFINITIONS KWSYS_CXX_HAS_BORLAND_ASM=1) + KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_BORLAND_ASM_CPUID + "Checking whether Borland CXX compiler supports CPUID assembler instruction" DIRECT) + IF(KWSYS_CXX_HAS_BORLAND_ASM_CPUID) + SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY + COMPILE_DEFINITIONS KWSYS_CXX_HAS_BORLAND_ASM_CPUID=1) + ENDIF() + ENDIF() + ENDIF() + IF(KWSYS_USE___INT64) + SET_PROPERTY(SOURCE SystemInformation.cxx testSystemInformation.cxx APPEND PROPERTY + COMPILE_DEFINITIONS KWSYS_USE___INT64=1) + ENDIF() + IF(KWSYS_USE_LONG_LONG) + SET_PROPERTY(SOURCE SystemInformation.cxx testSystemInformation.cxx APPEND PROPERTY + COMPILE_DEFINITIONS KWSYS_USE_LONG_LONG=1) + ENDIF() + IF(KWSYS_IOS_HAS_OSTREAM_LONG_LONG) + SET_PROPERTY(SOURCE SystemInformation.cxx testSystemInformation.cxx APPEND PROPERTY + COMPILE_DEFINITIONS KWSYS_IOS_HAS_OSTREAM_LONG_LONG=1) + ENDIF() + IF(KWSYS_IOS_HAS_OSTREAM___INT64) + SET_PROPERTY(SOURCE SystemInformation.cxx testSystemInformation.cxx APPEND PROPERTY + COMPILE_DEFINITIONS KWSYS_IOS_HAS_OSTREAM___INT64=1) + ENDIF() + IF(KWSYS_BUILD_SHARED) + SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY + COMPILE_DEFINITIONS KWSYS_BUILD_SHARED=1) + ENDIF() + + IF(UNIX AND NOT CYGWIN) + KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_GETLOADAVG + "Checking whether CXX compiler has getloadavg" DIRECT) + IF(KWSYS_CXX_HAS_GETLOADAVG) + SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY + COMPILE_DEFINITIONS KWSYS_CXX_HAS_GETLOADAVG=1) + ENDIF() + ENDIF() +ENDIF() + +IF(KWSYS_USE_FStream) + KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_EXT_STDIO_FILEBUF_H + "Checking whether is available" DIRECT) +ENDIF() + +#----------------------------------------------------------------------------- +# Choose a directory for the generated headers. +IF(NOT KWSYS_HEADER_ROOT) + SET(KWSYS_HEADER_ROOT "${PROJECT_BINARY_DIR}") +ENDIF() +SET(KWSYS_HEADER_DIR "${KWSYS_HEADER_ROOT}/${KWSYS_NAMESPACE}") +INCLUDE_DIRECTORIES(${KWSYS_HEADER_ROOT}) + +#----------------------------------------------------------------------------- +IF(KWSYS_INSTALL_DOC_DIR) + # Assign the license to the runtime component since it must be + # distributed with binary forms of this software. + IF(KWSYS_INSTALL_COMPONENT_NAME_RUNTIME) + SET(KWSYS_INSTALL_LICENSE_OPTIONS ${KWSYS_INSTALL_LICENSE_OPTIONS} + COMPONENT ${KWSYS_INSTALL_COMPONENT_NAME_RUNTIME} + ) + ENDIF() + + # Install the license under the documentation directory. + INSTALL(FILES ${CMAKE_CURRENT_SOURCE_DIR}/Copyright.txt + DESTINATION ${KWSYS_INSTALL_DOC_DIR}/${KWSYS_NAMESPACE} + ${KWSYS_INSTALL_LICENSE_OPTIONS}) +ENDIF() + +#----------------------------------------------------------------------------- +# Build a list of classes and headers we need to implement the +# selected components. Initialize with required components. +SET(KWSYS_CLASSES) +SET(KWSYS_H_FILES Configure SharedForward) +SET(KWSYS_HXX_FILES Configure String) + +IF(NOT CMake_SOURCE_DIR) + SET(KWSYS_HXX_FILES ${KWSYS_HXX_FILES} + hashtable hash_fun hash_map hash_set + ) +ENDIF() + +# Add selected C++ classes. +SET(cppclasses + Directory DynamicLoader Encoding Glob RegularExpression SystemTools + CommandLineArguments IOStream FStream SystemInformation ConsoleBuf + ) +FOREACH(cpp ${cppclasses}) + IF(KWSYS_USE_${cpp}) + # Use the corresponding class. + SET(KWSYS_CLASSES ${KWSYS_CLASSES} ${cpp}) + + # Load component-specific CMake code. + IF(EXISTS ${PROJECT_SOURCE_DIR}/kwsys${cpp}.cmake) + INCLUDE(${PROJECT_SOURCE_DIR}/kwsys${cpp}.cmake) + ENDIF() + ENDIF() +ENDFOREACH() + +# Add selected C components. +FOREACH(c + Process Base64 Encoding MD5 Terminal System String + ) + IF(KWSYS_USE_${c}) + # Use the corresponding header file. + SET(KWSYS_H_FILES ${KWSYS_H_FILES} ${c}) + + # Load component-specific CMake code. + IF(EXISTS ${PROJECT_SOURCE_DIR}/kwsys${c}.cmake) + INCLUDE(${PROJECT_SOURCE_DIR}/kwsys${c}.cmake) + ENDIF() + ENDIF() +ENDFOREACH() + +#----------------------------------------------------------------------------- +# Build a list of sources for the library based on components that are +# included. +SET(KWSYS_C_SRCS) +SET(KWSYS_CXX_SRCS) + +# Add the proper sources for this platform's Process implementation. +IF(KWSYS_USE_Process) + IF(NOT UNIX) + # Use the Windows implementation. + SET(KWSYS_C_SRCS ${KWSYS_C_SRCS} ProcessWin32.c) + ELSE() + # Use the UNIX implementation. + SET(KWSYS_C_SRCS ${KWSYS_C_SRCS} ProcessUNIX.c) + ENDIF() +ENDIF() + +# Add selected C sources. +FOREACH(c Base64 Encoding MD5 Terminal System String) + IF(KWSYS_USE_${c}) + IF(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${c}C.c) + LIST(APPEND KWSYS_C_SRCS ${c}C.c) + ELSE() + LIST(APPEND KWSYS_C_SRCS ${c}.c) + ENDIF() + ENDIF() +ENDFOREACH() + +# Configure headers of C++ classes and construct the list of sources. +FOREACH(c ${KWSYS_CLASSES}) + # Add this source to the list of source files for the library. + IF(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${c}CXX.cxx) + LIST(APPEND KWSYS_CXX_SRCS ${c}CXX.cxx) + ELSEIF(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${c}.cxx) + LIST(APPEND KWSYS_CXX_SRCS ${c}.cxx) + ENDIF() + + # Configure the header for this class. + CONFIGURE_FILE(${PROJECT_SOURCE_DIR}/${c}.hxx.in ${KWSYS_HEADER_DIR}/${c}.hxx + @ONLY IMMEDIATE) + SET(KWSYS_CXX_SRCS ${KWSYS_CXX_SRCS} ${KWSYS_HEADER_DIR}/${c}.hxx) + + # Create an install target for the header. + IF(KWSYS_INSTALL_INCLUDE_DIR) + INSTALL(FILES ${KWSYS_HEADER_DIR}/${c}.hxx + DESTINATION ${KWSYS_INSTALL_INCLUDE_DIR}/${KWSYS_NAMESPACE} + ${KWSYS_INSTALL_INCLUDE_OPTIONS}) + ENDIF() +ENDFOREACH() + +# Configure C headers. +FOREACH(h ${KWSYS_H_FILES}) + # Configure the header into the given directory. + CONFIGURE_FILE(${PROJECT_SOURCE_DIR}/${h}.h.in ${KWSYS_HEADER_DIR}/${h}.h + @ONLY IMMEDIATE) + SET(KWSYS_C_SRCS ${KWSYS_C_SRCS} ${KWSYS_HEADER_DIR}/${h}.h) + + # Create an install target for the header. + IF(KWSYS_INSTALL_INCLUDE_DIR) + INSTALL(FILES ${KWSYS_HEADER_DIR}/${h}.h + DESTINATION ${KWSYS_INSTALL_INCLUDE_DIR}/${KWSYS_NAMESPACE} + ${KWSYS_INSTALL_INCLUDE_OPTIONS}) + ENDIF() +ENDFOREACH() + +# Configure other C++ headers. +FOREACH(h ${KWSYS_HXX_FILES}) + # Configure the header into the given directory. + CONFIGURE_FILE(${PROJECT_SOURCE_DIR}/${h}.hxx.in ${KWSYS_HEADER_DIR}/${h}.hxx + @ONLY IMMEDIATE) + SET(KWSYS_CXX_SRCS ${KWSYS_CXX_SRCS} ${KWSYS_HEADER_DIR}/${h}.hxx) + + # Create an install target for the header. + IF(KWSYS_INSTALL_INCLUDE_DIR) + INSTALL(FILES ${KWSYS_HEADER_DIR}/${h}.hxx + DESTINATION ${KWSYS_INSTALL_INCLUDE_DIR}/${KWSYS_NAMESPACE} + ${KWSYS_INSTALL_INCLUDE_OPTIONS}) + ENDIF() +ENDFOREACH() + +#----------------------------------------------------------------------------- +# Add the library with the configured name and list of sources. +IF(KWSYS_C_SRCS OR KWSYS_CXX_SRCS) + IF(KWSYS_SPLIT_OBJECTS_FROM_INTERFACE) + SET(KWSYS_TARGET_INTERFACE ${KWSYS_NAMESPACE}) + SET(KWSYS_TARGET_OBJECT ${KWSYS_NAMESPACE}_objects) + SET(KWSYS_TARGET_LINK ${KWSYS_NAMESPACE}_private) + SET(KWSYS_TARGET_INSTALL ${KWSYS_TARGET_INTERFACE} ${KWSYS_TARGET_LINK}) + SET(KWSYS_LINK_DEPENDENCY INTERFACE) + ADD_LIBRARY(${KWSYS_TARGET_OBJECT} OBJECT + ${KWSYS_C_SRCS} ${KWSYS_CXX_SRCS}) + IF(KWSYS_BUILD_SHARED OR KWSYS_BUILD_PIC) + SET_PROPERTY(TARGET ${KWSYS_TARGET_OBJECT} PROPERTY + POSITION_INDEPENDENT_CODE TRUE) + ENDIF() + ADD_LIBRARY(${KWSYS_TARGET_INTERFACE} INTERFACE) + ADD_LIBRARY(${KWSYS_TARGET_LINK} INTERFACE) + TARGET_LINK_LIBRARIES(${KWSYS_TARGET_LINK} INTERFACE + ${KWSYS_TARGET_INTERFACE}) + TARGET_SOURCES(${KWSYS_TARGET_LINK} INTERFACE + $) + target_compile_features(${KWSYS_TARGET_OBJECT} PRIVATE ${KWSYS_CXX_COMPILE_FEATURES}) + target_compile_features(${KWSYS_TARGET_INTERFACE} INTERFACE ${KWSYS_CXX_COMPILE_FEATURES}) + ELSE() + SET(KWSYS_TARGET_INTERFACE ${KWSYS_NAMESPACE}) + SET(KWSYS_TARGET_OBJECT ${KWSYS_NAMESPACE}) + SET(KWSYS_TARGET_LINK ${KWSYS_NAMESPACE}) + set(KWSYS_TARGET_INSTALL ${KWSYS_TARGET_LINK}) + SET(KWSYS_LINK_DEPENDENCY PUBLIC) + ADD_LIBRARY(${KWSYS_TARGET_INTERFACE} ${KWSYS_LIBRARY_TYPE} + ${KWSYS_C_SRCS} ${KWSYS_CXX_SRCS}) + target_compile_features(${KWSYS_TARGET_INTERFACE} PUBLIC ${KWSYS_CXX_COMPILE_FEATURES}) + ENDIF() + if (KWSYS_ALIAS_TARGET) + add_library(${KWSYS_ALIAS_TARGET} ALIAS ${KWSYS_TARGET_INTERFACE}) + endif () + SET_TARGET_PROPERTIES(${KWSYS_TARGET_OBJECT} PROPERTIES + C_CLANG_TIDY "" + CXX_CLANG_TIDY "" + C_INCLUDE_WHAT_YOU_USE "" + CXX_INCLUDE_WHAT_YOU_USE "" + LABELS "${KWSYS_LABELS_LIB}") + IF(KWSYS_USE_DynamicLoader) + IF(UNIX) + TARGET_LINK_LIBRARIES(${KWSYS_TARGET_INTERFACE} ${KWSYS_LINK_DEPENDENCY} + ${CMAKE_DL_LIBS}) + ENDIF() + ENDIF() + + IF(KWSYS_USE_SystemInformation) + IF(WIN32) + TARGET_LINK_LIBRARIES(${KWSYS_TARGET_INTERFACE} ${KWSYS_LINK_DEPENDENCY} ws2_32) + # link in dbghelp.dll for symbol lookup if MSVC 1800 or later + # Note that the dbghelp runtime is part of MS Windows OS + IF(MSVC_VERSION AND NOT MSVC_VERSION VERSION_LESS 1800) + TARGET_LINK_LIBRARIES(${KWSYS_TARGET_INTERFACE} ${KWSYS_LINK_DEPENDENCY} dbghelp) + ENDIF() + IF(KWSYS_SYS_HAS_PSAPI) + TARGET_LINK_LIBRARIES(${KWSYS_TARGET_INTERFACE} ${KWSYS_LINK_DEPENDENCY} + Psapi) + ENDIF() + ELSEIF(UNIX) + IF (EXECINFO_LIB AND KWSYS_CXX_HAS_BACKTRACE) + # backtrace on FreeBSD is not in libc + TARGET_LINK_LIBRARIES(${KWSYS_TARGET_INTERFACE} ${KWSYS_LINK_DEPENDENCY} + ${EXECINFO_LIB}) + ENDIF() + IF (KWSYS_CXX_HAS_DLADDR) + # for symbol lookup using dladdr + TARGET_LINK_LIBRARIES(${KWSYS_TARGET_INTERFACE} ${KWSYS_LINK_DEPENDENCY} + ${CMAKE_DL_LIBS}) + ENDIF() + IF (CMAKE_SYSTEM_NAME STREQUAL "SunOS") + TARGET_LINK_LIBRARIES(${KWSYS_TARGET_INTERFACE} ${KWSYS_LINK_DEPENDENCY} + socket) + ENDIF() + ENDIF() + ENDIF() + + # Apply user-defined target properties to the library. + IF(KWSYS_PROPERTIES_CXX) + SET_TARGET_PROPERTIES(${KWSYS_TARGET_INTERFACE} PROPERTIES + ${KWSYS_PROPERTIES_CXX}) + ENDIF() + + # Set up include usage requirement + IF(COMMAND TARGET_INCLUDE_DIRECTORIES) + TARGET_INCLUDE_DIRECTORIES(${KWSYS_TARGET_INTERFACE} INTERFACE + $) + IF(KWSYS_INSTALL_INCLUDE_DIR) + TARGET_INCLUDE_DIRECTORIES(${KWSYS_TARGET_INTERFACE} INTERFACE + $) + ENDIF() + ENDIF() + + # Create an install target for the library. + IF(KWSYS_INSTALL_LIBRARY_RULE) + INSTALL(TARGETS ${KWSYS_TARGET_INSTALL} ${KWSYS_INSTALL_LIBRARY_RULE}) + ENDIF() + IF(KWSYS_INSTALL_NAMELINK_RULE) + INSTALL(TARGETS ${KWSYS_TARGET_INSTALL} ${KWSYS_INSTALL_NAMELINK_RULE}) + ENDIF() +ENDIF() + +# Add a C-only library if requested. +IF(KWSYS_ENABLE_C AND KWSYS_C_SRCS) + IF(KWSYS_SPLIT_OBJECTS_FROM_INTERFACE) + SET(KWSYS_TARGET_C_INTERFACE ${KWSYS_NAMESPACE}_c) + SET(KWSYS_TARGET_C_OBJECT ${KWSYS_NAMESPACE}_c_objects) + SET(KWSYS_TARGET_C_LINK ${KWSYS_NAMESPACE}_c_private) + SET(KWSYS_TARGET_C_INSTALL + ${KWSYS_TARGET_C_INTERFACE} ${KWSYS_TARGET_C_LINK}) + SET(KWSYS_LINK_DEPENDENCY INTERFACE) + ADD_LIBRARY(${KWSYS_TARGET_C_OBJECT} OBJECT ${KWSYS_C_SRCS}) + IF(KWSYS_BUILD_SHARED OR KWSYS_BUILD_PIC) + SET_PROPERTY(TARGET ${KWSYS_TARGET_C_OBJECT} PROPERTY + POSITION_INDEPENDENT_CODE TRUE) + ENDIF() + ADD_LIBRARY(${KWSYS_TARGET_C_INTERFACE} INTERFACE) + ADD_LIBRARY(${KWSYS_TARGET_C_LINK} INTERFACE) + TARGET_LINK_LIBRARIES(${KWSYS_TARGET_C_LINK} INTERFACE + ${KWSYS_TARGET_C_INTERFACE}) + TARGET_SOURCES(${KWSYS_TARGET_C_LINK} INTERFACE + $) + ELSE() + SET(KWSYS_TARGET_C_INTERFACE ${KWSYS_NAMESPACE}_c) + SET(KWSYS_TARGET_C_OBJECT ${KWSYS_NAMESPACE}_c) + SET(KWSYS_TARGET_C_LINK ${KWSYS_NAMESPACE}_c) + SET(KWSYS_TARGET_C_INSTALL ${KWSYS_TARGET_C_LINK}) + SET(KWSYS_LINK_DEPENDENCY PUBLIC) + ADD_LIBRARY(${KWSYS_TARGET_C_INTERFACE} ${KWSYS_LIBRARY_TYPE} + ${KWSYS_C_SRCS}) + ENDIF() + SET_TARGET_PROPERTIES(${KWSYS_TARGET_C_OBJECT} PROPERTIES + LABELS "${KWSYS_LABELS_LIB}") + + # Apply user-defined target properties to the library. + IF(KWSYS_PROPERTIES_C) + SET_TARGET_PROPERTIES(${KWSYS_TARGET_C_INTERFACE} PROPERTIES + ${KWSYS_PROPERTIES_C}) + ENDIF() + + # Set up include usage requirement + IF(COMMAND TARGET_INCLUDE_DIRECTORIES) + TARGET_INCLUDE_DIRECTORIES(${KWSYS_TARGET_C_INTERFACE} INTERFACE + $) + IF(KWSYS_INSTALL_INCLUDE_DIR) + TARGET_INCLUDE_DIRECTORIES(${KWSYS_TARGET_C_INTERFACE} INTERFACE + $) + ENDIF() + ENDIF() + + # Create an install target for the library. + IF(KWSYS_INSTALL_LIBRARY_RULE) + INSTALL(TARGETS ${KWSYS_TARGET_C_INSTALL}) + ENDIF() +ENDIF() + +# For building kwsys itself, we use a macro defined on the command +# line to configure the namespace in the C and C++ source files. +ADD_DEFINITIONS("-DKWSYS_NAMESPACE=${KWSYS_NAMESPACE}") + +# Disable deprecation warnings for standard C functions. +IF(MSVC OR (WIN32 AND (CMAKE_C_COMPILER_ID STREQUAL "Intel" OR + (CMAKE_C_COMPILER_ID STREQUAL "Clang" AND CMAKE_CXX_SIMULATE_ID STREQUAL "MSVC")))) + ADD_DEFINITIONS( + -D_CRT_NONSTDC_NO_DEPRECATE + -D_CRT_SECURE_NO_DEPRECATE + -D_CRT_SECURE_NO_WARNINGS + -D_SCL_SECURE_NO_DEPRECATE + ) +ENDIF() + +IF(WIN32) + # Help enforce the use of wide Windows apis. + ADD_DEFINITIONS(-DUNICODE -D_UNICODE) +ENDIF() + +IF(KWSYS_USE_String) + # Activate code in "String.c". See the comment in the source. + SET_SOURCE_FILES_PROPERTIES(String.c PROPERTIES + COMPILE_FLAGS "-DKWSYS_STRING_C") +ENDIF() + +IF(KWSYS_USE_Encoding) + # Set default 8 bit encoding in "EndcodingC.c". + SET_PROPERTY(SOURCE EncodingC.c EncodingCXX.cxx APPEND PROPERTY COMPILE_DEFINITIONS + KWSYS_ENCODING_DEFAULT_CODEPAGE=${KWSYS_ENCODING_DEFAULT_CODEPAGE}) +ENDIF() + +#----------------------------------------------------------------------------- +# Setup testing if not being built as part of another project. +IF(KWSYS_STANDALONE OR CMake_SOURCE_DIR) + IF(BUILD_TESTING) + # Compute the location of executables. + SET(EXEC_DIR "${CMAKE_CURRENT_BINARY_DIR}") + IF(EXECUTABLE_OUTPUT_PATH) + SET(EXEC_DIR "${EXECUTABLE_OUTPUT_PATH}") + ENDIF() + + # C tests + SET(KWSYS_C_TESTS + testEncode.c + testTerminal.c + ) + IF(KWSYS_STANDALONE) + SET(KWSYS_C_TESTS ${KWSYS_C_TESTS} testFail.c) + ENDIF() + CREATE_TEST_SOURCELIST( + KWSYS_C_TEST_SRCS ${KWSYS_NAMESPACE}TestsC.c + ${KWSYS_C_TESTS} + ) + ADD_EXECUTABLE(${KWSYS_NAMESPACE}TestsC ${KWSYS_C_TEST_SRCS}) + SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestsC PROPERTY LABELS ${KWSYS_LABELS_EXE}) + TARGET_LINK_LIBRARIES(${KWSYS_NAMESPACE}TestsC ${KWSYS_TARGET_C_LINK}) + FOREACH(testfile ${KWSYS_C_TESTS}) + get_filename_component(test "${testfile}" NAME_WE) + ADD_TEST(kwsys.${test} ${EXEC_DIR}/${KWSYS_NAMESPACE}TestsC ${test} ${KWSYS_TEST_ARGS_${test}}) + SET_PROPERTY(TEST kwsys.${test} PROPERTY LABELS ${KWSYS_LABELS_TEST}) + ENDFOREACH() + + # C++ tests + IF(NOT WATCOM AND NOT CMake_SOURCE_DIR) + SET(KWSYS_CXX_TESTS + testHashSTL.cxx + ) + ENDIF() + SET(KWSYS_CXX_TESTS ${KWSYS_CXX_TESTS} + testConfigure.cxx + testSystemTools.cxx + testCommandLineArguments.cxx + testCommandLineArguments1.cxx + testDirectory.cxx + ) + IF(KWSYS_STL_HAS_WSTRING) + SET(KWSYS_CXX_TESTS ${KWSYS_CXX_TESTS} + testEncoding.cxx + ) + ENDIF() + IF(KWSYS_USE_FStream) + SET(KWSYS_CXX_TESTS ${KWSYS_CXX_TESTS} + testFStream.cxx + ) + ENDIF() + IF(KWSYS_USE_ConsoleBuf) + ADD_EXECUTABLE(testConsoleBufChild testConsoleBufChild.cxx) + SET_PROPERTY(TARGET testConsoleBufChild PROPERTY C_CLANG_TIDY "") + SET_PROPERTY(TARGET testConsoleBufChild PROPERTY CXX_CLANG_TIDY "") + SET_PROPERTY(TARGET testConsoleBufChild PROPERTY C_INCLUDE_WHAT_YOU_USE "") + SET_PROPERTY(TARGET testConsoleBufChild PROPERTY CXX_INCLUDE_WHAT_YOU_USE "") + SET_PROPERTY(TARGET testConsoleBufChild PROPERTY LABELS ${KWSYS_LABELS_EXE}) + TARGET_LINK_LIBRARIES(testConsoleBufChild ${KWSYS_TARGET_LINK}) + SET(KWSYS_CXX_TESTS ${KWSYS_CXX_TESTS} + testConsoleBuf.cxx + ) + IF(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC" AND + CMAKE_CXX_COMPILER_VERSION VERSION_GREATER "19.0.23506") + set_property(SOURCE testConsoleBuf.cxx testConsoleBufChild.cxx PROPERTY COMPILE_FLAGS /utf-8) + ENDIF() + SET_PROPERTY(SOURCE testConsoleBuf.cxx APPEND PROPERTY COMPILE_DEFINITIONS + KWSYS_ENCODING_DEFAULT_CODEPAGE=${KWSYS_ENCODING_DEFAULT_CODEPAGE}) + ENDIF() + IF(KWSYS_USE_SystemInformation) + SET(KWSYS_CXX_TESTS ${KWSYS_CXX_TESTS} testSystemInformation.cxx) + ENDIF() + IF(KWSYS_USE_DynamicLoader) + SET(KWSYS_CXX_TESTS ${KWSYS_CXX_TESTS} testDynamicLoader.cxx) + # If kwsys contains the DynamicLoader, need extra library + ADD_LIBRARY(${KWSYS_NAMESPACE}TestDynload MODULE testDynload.c) + SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestDynload PROPERTY LABELS ${KWSYS_LABELS_LIB}) + ADD_DEPENDENCIES(${KWSYS_NAMESPACE}TestDynload ${KWSYS_TARGET_INTERFACE}) + + if (WIN32) + # Windows tests supported flags. + add_library(${KWSYS_NAMESPACE}TestDynloadImpl SHARED testDynloadImpl.c) + set_property(TARGET ${KWSYS_NAMESPACE}TestDynloadImpl PROPERTY LABELS ${KWSYS_LABELS_LIB}) + set_property(TARGET ${KWSYS_NAMESPACE}TestDynloadImpl PROPERTY DEFINE_SYMBOL BUILDING_TestDynloadImpl) + set_property(TARGET ${KWSYS_NAMESPACE}TestDynloadImpl PROPERTY RUNTIME_OUTPUT_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/dynloaddir") + add_dependencies(${KWSYS_NAMESPACE}TestDynloadImpl ${KWSYS_TARGET_INTERFACE}) + add_library(${KWSYS_NAMESPACE}TestDynloadUse MODULE testDynloadUse.c) + set_property(TARGET ${KWSYS_NAMESPACE}TestDynloadUse PROPERTY LABELS ${KWSYS_LABELS_LIB}) + set_property(TARGET ${KWSYS_NAMESPACE}TestDynloadUse PROPERTY LIBRARY_OUTPUT_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/dynloaddir") + add_dependencies(${KWSYS_NAMESPACE}TestDynloadUse ${KWSYS_TARGET_INTERFACE}) + target_link_libraries(${KWSYS_NAMESPACE}TestDynloadUse PRIVATE ${KWSYS_NAMESPACE}TestDynloadImpl) + endif () + ENDIF() + CREATE_TEST_SOURCELIST( + KWSYS_CXX_TEST_SRCS ${KWSYS_NAMESPACE}TestsCxx.cxx + ${KWSYS_CXX_TESTS} + ) + ADD_EXECUTABLE(${KWSYS_NAMESPACE}TestsCxx ${KWSYS_CXX_TEST_SRCS}) + SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestsCxx PROPERTY C_CLANG_TIDY "") + SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestsCxx PROPERTY CXX_CLANG_TIDY "") + SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestsCxx PROPERTY C_INCLUDE_WHAT_YOU_USE "") + SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestsCxx PROPERTY CXX_INCLUDE_WHAT_YOU_USE "") + SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestsCxx PROPERTY LABELS ${KWSYS_LABELS_EXE}) + TARGET_LINK_LIBRARIES(${KWSYS_NAMESPACE}TestsCxx ${KWSYS_TARGET_LINK}) + + SET(TEST_SYSTEMTOOLS_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}") + SET(TEST_SYSTEMTOOLS_BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}") + CONFIGURE_FILE( + ${PROJECT_SOURCE_DIR}/testSystemTools.h.in + ${PROJECT_BINARY_DIR}/testSystemTools.h) + INCLUDE_DIRECTORIES(${PROJECT_BINARY_DIR}) + + IF(CTEST_TEST_KWSYS) + CONFIGURE_FILE("${CMAKE_CURRENT_SOURCE_DIR}/ExtraTest.cmake.in" + "${CMAKE_CURRENT_BINARY_DIR}/ExtraTest.cmake") + SET_DIRECTORY_PROPERTIES(PROPERTIES TEST_INCLUDE_FILE "${CMAKE_CURRENT_BINARY_DIR}/ExtraTest.cmake") + ENDIF() + + SET(KWSYS_TEST_ARGS_testCommandLineArguments + --another-bool-variable + --long3=opt + --set-bool-arg1 + -SSS ken brad bill andy + --some-bool-variable=true + --some-double-variable12.5 + --some-int-variable 14 + "--some-string-variable=test string with space" + --some-multi-argument 5 1 8 3 7 1 3 9 7 1 + -N 12.5 -SS=andy -N 1.31 -N 22 + -SS=bill -BBtrue -SS=brad + -BBtrue + -BBfalse + -SS=ken + -A + -C=test + --long2 hello + ) + SET(KWSYS_TEST_ARGS_testCommandLineArguments1 + --ignored + -n 24 + --second-ignored + "-m=test value" + third-ignored + -p + some junk at the end + ) + FOREACH(testfile ${KWSYS_CXX_TESTS}) + get_filename_component(test "${testfile}" NAME_WE) + ADD_TEST(kwsys.${test} ${EXEC_DIR}/${KWSYS_NAMESPACE}TestsCxx ${test} ${KWSYS_TEST_ARGS_${test}}) + SET_PROPERTY(TEST kwsys.${test} PROPERTY LABELS ${KWSYS_LABELS_TEST}) + ENDFOREACH() + + # Process tests. + ADD_EXECUTABLE(${KWSYS_NAMESPACE}TestProcess testProcess.c) + SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestProcess PROPERTY LABELS ${KWSYS_LABELS_EXE}) + TARGET_LINK_LIBRARIES(${KWSYS_NAMESPACE}TestProcess ${KWSYS_TARGET_C_LINK}) + IF(NOT CYGWIN) + SET(KWSYS_TEST_PROCESS_7 7) + ENDIF() + FOREACH(n 1 2 3 4 5 6 ${KWSYS_TEST_PROCESS_7} 9 10) + ADD_TEST(kwsys.testProcess-${n} ${EXEC_DIR}/${KWSYS_NAMESPACE}TestProcess ${n}) + SET_PROPERTY(TEST kwsys.testProcess-${n} PROPERTY LABELS ${KWSYS_LABELS_TEST}) + SET_TESTS_PROPERTIES(kwsys.testProcess-${n} PROPERTIES TIMEOUT 120) + ENDFOREACH() + + SET(testProcess_COMPILE_FLAGS "") + # Some Apple compilers produce bad optimizations in this source. + IF(APPLE AND CMAKE_C_COMPILER_ID MATCHES "^(GNU|LLVM)$") + SET(testProcess_COMPILE_FLAGS "${testProcess_COMPILE_FLAGS} -O0") + ELSEIF(CMAKE_C_COMPILER_ID STREQUAL "XL") + # Tell IBM XL not to warn about our test infinite loop + IF(CMAKE_SYSTEM MATCHES "Linux.*ppc64le" + AND CMAKE_C_COMPILER_VERSION VERSION_LESS "16.1.0" + AND NOT CMAKE_C_COMPILER_VERSION VERSION_LESS "13.1.1") + # v13.1.[1-6] on Linux ppc64le is clang based and does not accept + # the -qsuppress option, so just suppress all warnings. + SET(testProcess_COMPILE_FLAGS "${testProcess_COMPILE_FLAGS} -w") + ELSE() + SET(testProcess_COMPILE_FLAGS "${testProcess_COMPILE_FLAGS} -qsuppress=1500-010") + ENDIF() + ENDIF() + IF(CMAKE_C_FLAGS MATCHES "-fsanitize=") + SET(testProcess_COMPILE_FLAGS "${testProcess_COMPILE_FLAGS} -DCRASH_USING_ABORT") + ENDIF() + SET_PROPERTY(SOURCE testProcess.c PROPERTY COMPILE_FLAGS "${testProcess_COMPILE_FLAGS}") + + # Test SharedForward + CONFIGURE_FILE(${PROJECT_SOURCE_DIR}/testSharedForward.c.in + ${PROJECT_BINARY_DIR}/testSharedForward.c @ONLY IMMEDIATE) + ADD_EXECUTABLE(${KWSYS_NAMESPACE}TestSharedForward + ${PROJECT_BINARY_DIR}/testSharedForward.c) + SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestSharedForward PROPERTY LABELS ${KWSYS_LABELS_EXE}) + ADD_DEPENDENCIES(${KWSYS_NAMESPACE}TestSharedForward ${KWSYS_TARGET_C_LINK}) + ADD_TEST(kwsys.testSharedForward ${EXEC_DIR}/${KWSYS_NAMESPACE}TestSharedForward 1) + SET_PROPERTY(TEST kwsys.testSharedForward PROPERTY LABELS ${KWSYS_LABELS_TEST}) + + # Configure some test properties. + IF(KWSYS_STANDALONE) + # We expect test to fail + SET_TESTS_PROPERTIES(kwsys.testFail PROPERTIES WILL_FAIL ON) + GET_TEST_PROPERTY(kwsys.testFail WILL_FAIL wfv) + SET_TESTS_PROPERTIES(kwsys.testFail PROPERTIES MEASUREMENT "Some Key=Some Value") + MESSAGE(STATUS "GET_TEST_PROPERTY returned: ${wfv}") + ENDIF() + + # Set up ctest custom configuration file. + CONFIGURE_FILE(${PROJECT_SOURCE_DIR}/CTestCustom.cmake.in + ${PROJECT_BINARY_DIR}/CTestCustom.cmake @ONLY) + + # Suppress known consistent failures on buggy systems. + IF(KWSYS_TEST_BOGUS_FAILURES) + SET_TESTS_PROPERTIES(${KWSYS_TEST_BOGUS_FAILURES} PROPERTIES WILL_FAIL ON) + ENDIF() + + ENDIF() +ENDIF() diff --git a/test/API/driver/kwsys/CONTRIBUTING.rst b/test/API/driver/kwsys/CONTRIBUTING.rst new file mode 100644 index 00000000000..32e7b83c5b8 --- /dev/null +++ b/test/API/driver/kwsys/CONTRIBUTING.rst @@ -0,0 +1,49 @@ +Contributing to KWSys +********************* + +Patches +======= + +KWSys is kept in its own Git repository and shared by several projects +via copies in their source trees. Changes to KWSys should not be made +directly in a host project, except perhaps in maintenance branches. + +KWSys uses `Kitware's GitLab Instance`_ to manage development and code review. +To contribute patches: + +#. Fork the upstream `KWSys Repository`_ into a personal account. +#. Base all new work on the upstream ``master`` branch. +#. Run ``./SetupForDevelopment.sh`` in new local work trees. +#. Create commits making incremental, distinct, logically complete changes. +#. Push a topic branch to a personal repository fork on GitLab. +#. Create a GitLab Merge Request targeting the upstream ``master`` branch. + +Once changes are reviewed, tested, and integrated to KWSys upstream then +copies of KWSys within dependent projects can be updated to get the changes. + +.. _`Kitware's GitLab Instance`: https://gitlab.kitware.com +.. _`KWSys Repository`: https://gitlab.kitware.com/utils/kwsys + +Code Style +========== + +We use `clang-format`_ version **6.0** to define our style for C++ code in +the KWSys source tree. See the `.clang-format`_ configuration file for +our style settings. Use the `clang-format.bash`_ script to format source +code. It automatically runs ``clang-format`` on the set of source files +for which we enforce style. The script also has options to format only +a subset of files, such as those that are locally modified. + +.. _`clang-format`: http://clang.llvm.org/docs/ClangFormat.html +.. _`.clang-format`: .clang-format +.. _`clang-format.bash`: clang-format.bash + +License +======= + +We do not require any formal copyright assignment or contributor license +agreement. Any contributions intentionally sent upstream are presumed +to be offered under terms of the OSI-approved BSD 3-clause License. +See `Copyright.txt`_ for details. + +.. _`Copyright.txt`: Copyright.txt diff --git a/test/API/driver/kwsys/CTestConfig.cmake b/test/API/driver/kwsys/CTestConfig.cmake new file mode 100644 index 00000000000..1339ffc2ddc --- /dev/null +++ b/test/API/driver/kwsys/CTestConfig.cmake @@ -0,0 +1,9 @@ +# Distributed under the OSI-approved BSD 3-Clause License. See accompanying +# file Copyright.txt or https://cmake.org/licensing#kwsys for details. + +set(CTEST_PROJECT_NAME "KWSys") +set(CTEST_NIGHTLY_START_TIME "21:00:00 EDT") +set(CTEST_DROP_METHOD "http") +set(CTEST_DROP_SITE "open.cdash.org") +set(CTEST_DROP_LOCATION "/submit.php?project=KWSys") +set(CTEST_DROP_SITE_CDASH TRUE) diff --git a/test/API/driver/kwsys/CTestCustom.cmake.in b/test/API/driver/kwsys/CTestCustom.cmake.in new file mode 100644 index 00000000000..760221b1244 --- /dev/null +++ b/test/API/driver/kwsys/CTestCustom.cmake.in @@ -0,0 +1,14 @@ +# kwsys.testProcess-10 involves sending SIGINT to a child process, which then +# exits abnormally via a call to _exit(). (On Windows, a call to ExitProcess). +# Naturally, this results in plenty of memory being "leaked" by this child +# process - the memory check results are not meaningful in this case. +# +# kwsys.testProcess-9 also tests sending SIGINT to a child process. However, +# normal operation of that test involves the child process timing out, and the +# host process kills (SIGKILL) it as a result. Since it was SIGKILL'ed, the +# resulting memory leaks are not logged by valgrind anyway. Therefore, we +# don't have to exclude it. + +list(APPEND CTEST_CUSTOM_MEMCHECK_IGNORE + kwsys.testProcess-10 + ) diff --git a/test/API/driver/kwsys/CommandLineArguments.cxx b/test/API/driver/kwsys/CommandLineArguments.cxx new file mode 100644 index 00000000000..3fd19556179 --- /dev/null +++ b/test/API/driver/kwsys/CommandLineArguments.cxx @@ -0,0 +1,768 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" +#include KWSYS_HEADER(CommandLineArguments.hxx) + +#include KWSYS_HEADER(Configure.hxx) +#include KWSYS_HEADER(String.hxx) + +// Work-around CMake dependency scanning limitation. This must +// duplicate the above list of headers. +#if 0 +# include "CommandLineArguments.hxx.in" +# include "Configure.hxx.in" +# include "String.hxx.in" +#endif + +#include +#include +#include +#include +#include + +#include +#include +#include + +#ifdef _MSC_VER +# pragma warning(disable : 4786) +#endif + +#if defined(__sgi) && !defined(__GNUC__) +# pragma set woff 1375 /* base class destructor not virtual */ +#endif + +#if 0 +# define CommandLineArguments_DEBUG(x) \ + std::cout << __LINE__ << " CLA: " << x << std::endl +#else +# define CommandLineArguments_DEBUG(x) +#endif + +namespace KWSYS_NAMESPACE { + +struct CommandLineArgumentsCallbackStructure +{ + const char* Argument; + int ArgumentType; + CommandLineArguments::CallbackType Callback; + void* CallData; + void* Variable; + int VariableType; + const char* Help; +}; + +class CommandLineArgumentsVectorOfStrings : public std::vector +{ +}; +class CommandLineArgumentsSetOfStrings : public std::set +{ +}; +class CommandLineArgumentsMapOfStrucs + : public std::map +{ +}; + +class CommandLineArgumentsInternal +{ +public: + CommandLineArgumentsInternal() + : UnknownArgumentCallback{ nullptr } + , ClientData{ nullptr } + , LastArgument{ 0 } + { + } + + typedef CommandLineArgumentsVectorOfStrings VectorOfStrings; + typedef CommandLineArgumentsMapOfStrucs CallbacksMap; + typedef kwsys::String String; + typedef CommandLineArgumentsSetOfStrings SetOfStrings; + + VectorOfStrings Argv; + String Argv0; + CallbacksMap Callbacks; + + CommandLineArguments::ErrorCallbackType UnknownArgumentCallback; + void* ClientData; + + VectorOfStrings::size_type LastArgument; + + VectorOfStrings UnusedArguments; +}; + +CommandLineArguments::CommandLineArguments() +{ + this->Internals = new CommandLineArguments::Internal; + this->Help = ""; + this->LineLength = 80; + this->StoreUnusedArgumentsFlag = false; +} + +CommandLineArguments::~CommandLineArguments() +{ + delete this->Internals; +} + +void CommandLineArguments::Initialize(int argc, const char* const argv[]) +{ + int cc; + + this->Initialize(); + this->Internals->Argv0 = argv[0]; + for (cc = 1; cc < argc; cc++) { + this->ProcessArgument(argv[cc]); + } +} + +void CommandLineArguments::Initialize(int argc, char* argv[]) +{ + this->Initialize(argc, static_cast(argv)); +} + +void CommandLineArguments::Initialize() +{ + this->Internals->Argv.clear(); + this->Internals->LastArgument = 0; +} + +void CommandLineArguments::ProcessArgument(const char* arg) +{ + this->Internals->Argv.push_back(arg); +} + +bool CommandLineArguments::GetMatchedArguments( + std::vector* matches, const std::string& arg) +{ + matches->clear(); + CommandLineArguments::Internal::CallbacksMap::iterator it; + + // Does the argument match to any we know about? + for (it = this->Internals->Callbacks.begin(); + it != this->Internals->Callbacks.end(); it++) { + const CommandLineArguments::Internal::String& parg = it->first; + CommandLineArgumentsCallbackStructure* cs = &it->second; + if (cs->ArgumentType == CommandLineArguments::NO_ARGUMENT || + cs->ArgumentType == CommandLineArguments::SPACE_ARGUMENT) { + if (arg == parg) { + matches->push_back(parg); + } + } else if (arg.find(parg) == 0) { + matches->push_back(parg); + } + } + return !matches->empty(); +} + +int CommandLineArguments::Parse() +{ + std::vector::size_type cc; + std::vector matches; + if (this->StoreUnusedArgumentsFlag) { + this->Internals->UnusedArguments.clear(); + } + for (cc = 0; cc < this->Internals->Argv.size(); cc++) { + const std::string& arg = this->Internals->Argv[cc]; + CommandLineArguments_DEBUG("Process argument: " << arg); + this->Internals->LastArgument = cc; + if (this->GetMatchedArguments(&matches, arg)) { + // Ok, we found one or more arguments that match what user specified. + // Let's find the longest one. + CommandLineArguments::Internal::VectorOfStrings::size_type kk; + CommandLineArguments::Internal::VectorOfStrings::size_type maxidx = 0; + CommandLineArguments::Internal::String::size_type maxlen = 0; + for (kk = 0; kk < matches.size(); kk++) { + if (matches[kk].size() > maxlen) { + maxlen = matches[kk].size(); + maxidx = kk; + } + } + // So, the longest one is probably the right one. Now see if it has any + // additional value + CommandLineArgumentsCallbackStructure* cs = + &this->Internals->Callbacks[matches[maxidx]]; + const std::string& sarg = matches[maxidx]; + if (cs->Argument != sarg) { + abort(); + } + switch (cs->ArgumentType) { + case NO_ARGUMENT: + // No value + if (!this->PopulateVariable(cs, nullptr)) { + return 0; + } + break; + case SPACE_ARGUMENT: + if (cc == this->Internals->Argv.size() - 1) { + this->Internals->LastArgument--; + return 0; + } + CommandLineArguments_DEBUG("This is a space argument: " + << arg << " value: " + << this->Internals->Argv[cc + 1]); + // Value is the next argument + if (!this->PopulateVariable(cs, + this->Internals->Argv[cc + 1].c_str())) { + return 0; + } + cc++; + break; + case EQUAL_ARGUMENT: + if (arg.size() == sarg.size() || arg.at(sarg.size()) != '=') { + this->Internals->LastArgument--; + return 0; + } + // Value is everythng followed the '=' sign + if (!this->PopulateVariable(cs, arg.c_str() + sarg.size() + 1)) { + return 0; + } + break; + case CONCAT_ARGUMENT: + // Value is whatever follows the argument + if (!this->PopulateVariable(cs, arg.c_str() + sarg.size())) { + return 0; + } + break; + case MULTI_ARGUMENT: + // Suck in all the rest of the arguments + CommandLineArguments_DEBUG("This is a multi argument: " << arg); + for (cc++; cc < this->Internals->Argv.size(); ++cc) { + const std::string& marg = this->Internals->Argv[cc]; + CommandLineArguments_DEBUG( + " check multi argument value: " << marg); + if (this->GetMatchedArguments(&matches, marg)) { + CommandLineArguments_DEBUG("End of multi argument " + << arg << " with value: " << marg); + break; + } + CommandLineArguments_DEBUG( + " populate multi argument value: " << marg); + if (!this->PopulateVariable(cs, marg.c_str())) { + return 0; + } + } + if (cc != this->Internals->Argv.size()) { + CommandLineArguments_DEBUG("Again End of multi argument " << arg); + cc--; + continue; + } + break; + default: + std::cerr << "Got unknown argument type: \"" << cs->ArgumentType + << "\"" << std::endl; + this->Internals->LastArgument--; + return 0; + } + } else { + // Handle unknown arguments + if (this->Internals->UnknownArgumentCallback) { + if (!this->Internals->UnknownArgumentCallback( + arg.c_str(), this->Internals->ClientData)) { + this->Internals->LastArgument--; + return 0; + } + return 1; + } else if (this->StoreUnusedArgumentsFlag) { + CommandLineArguments_DEBUG("Store unused argument " << arg); + this->Internals->UnusedArguments.push_back(arg); + } else { + std::cerr << "Got unknown argument: \"" << arg << "\"" << std::endl; + this->Internals->LastArgument--; + return 0; + } + } + } + return 1; +} + +void CommandLineArguments::GetRemainingArguments(int* argc, char*** argv) +{ + CommandLineArguments::Internal::VectorOfStrings::size_type size = + this->Internals->Argv.size() - this->Internals->LastArgument + 1; + CommandLineArguments::Internal::VectorOfStrings::size_type cc; + + // Copy Argv0 as the first argument + char** args = new char*[size]; + args[0] = new char[this->Internals->Argv0.size() + 1]; + strcpy(args[0], this->Internals->Argv0.c_str()); + int cnt = 1; + + // Copy everything after the LastArgument, since that was not parsed. + for (cc = this->Internals->LastArgument + 1; + cc < this->Internals->Argv.size(); cc++) { + args[cnt] = new char[this->Internals->Argv[cc].size() + 1]; + strcpy(args[cnt], this->Internals->Argv[cc].c_str()); + cnt++; + } + *argc = cnt; + *argv = args; +} + +void CommandLineArguments::GetUnusedArguments(int* argc, char*** argv) +{ + CommandLineArguments::Internal::VectorOfStrings::size_type size = + this->Internals->UnusedArguments.size() + 1; + CommandLineArguments::Internal::VectorOfStrings::size_type cc; + + // Copy Argv0 as the first argument + char** args = new char*[size]; + args[0] = new char[this->Internals->Argv0.size() + 1]; + strcpy(args[0], this->Internals->Argv0.c_str()); + int cnt = 1; + + // Copy everything after the LastArgument, since that was not parsed. + for (cc = 0; cc < this->Internals->UnusedArguments.size(); cc++) { + kwsys::String& str = this->Internals->UnusedArguments[cc]; + args[cnt] = new char[str.size() + 1]; + strcpy(args[cnt], str.c_str()); + cnt++; + } + *argc = cnt; + *argv = args; +} + +void CommandLineArguments::DeleteRemainingArguments(int argc, char*** argv) +{ + int cc; + for (cc = 0; cc < argc; ++cc) { + delete[](*argv)[cc]; + } + delete[] * argv; +} + +void CommandLineArguments::AddCallback(const char* argument, + ArgumentTypeEnum type, + CallbackType callback, void* call_data, + const char* help) +{ + CommandLineArgumentsCallbackStructure s; + s.Argument = argument; + s.ArgumentType = type; + s.Callback = callback; + s.CallData = call_data; + s.VariableType = CommandLineArguments::NO_VARIABLE_TYPE; + s.Variable = nullptr; + s.Help = help; + + this->Internals->Callbacks[argument] = s; + this->GenerateHelp(); +} + +void CommandLineArguments::AddArgument(const char* argument, + ArgumentTypeEnum type, + VariableTypeEnum vtype, void* variable, + const char* help) +{ + CommandLineArgumentsCallbackStructure s; + s.Argument = argument; + s.ArgumentType = type; + s.Callback = nullptr; + s.CallData = nullptr; + s.VariableType = vtype; + s.Variable = variable; + s.Help = help; + + this->Internals->Callbacks[argument] = s; + this->GenerateHelp(); +} + +#define CommandLineArgumentsAddArgumentMacro(type, ctype) \ + void CommandLineArguments::AddArgument(const char* argument, \ + ArgumentTypeEnum type, \ + ctype* variable, const char* help) \ + { \ + this->AddArgument(argument, type, CommandLineArguments::type##_TYPE, \ + variable, help); \ + } + +/* clang-format off */ +CommandLineArgumentsAddArgumentMacro(BOOL, bool) +CommandLineArgumentsAddArgumentMacro(INT, int) +CommandLineArgumentsAddArgumentMacro(DOUBLE, double) +CommandLineArgumentsAddArgumentMacro(STRING, char*) +CommandLineArgumentsAddArgumentMacro(STL_STRING, std::string) + +CommandLineArgumentsAddArgumentMacro(VECTOR_BOOL, std::vector) +CommandLineArgumentsAddArgumentMacro(VECTOR_INT, std::vector) +CommandLineArgumentsAddArgumentMacro(VECTOR_DOUBLE, std::vector) +CommandLineArgumentsAddArgumentMacro(VECTOR_STRING, std::vector) +CommandLineArgumentsAddArgumentMacro(VECTOR_STL_STRING, + std::vector) +#ifdef HELP_CLANG_FORMAT +; +#endif +/* clang-format on */ + +#define CommandLineArgumentsAddBooleanArgumentMacro(type, ctype) \ + void CommandLineArguments::AddBooleanArgument( \ + const char* argument, ctype* variable, const char* help) \ + { \ + this->AddArgument(argument, CommandLineArguments::NO_ARGUMENT, \ + CommandLineArguments::type##_TYPE, variable, help); \ + } + +/* clang-format off */ +CommandLineArgumentsAddBooleanArgumentMacro(BOOL, bool) +CommandLineArgumentsAddBooleanArgumentMacro(INT, int) +CommandLineArgumentsAddBooleanArgumentMacro(DOUBLE, double) +CommandLineArgumentsAddBooleanArgumentMacro(STRING, char*) +CommandLineArgumentsAddBooleanArgumentMacro(STL_STRING, std::string) +#ifdef HELP_CLANG_FORMAT +; +#endif +/* clang-format on */ + +void CommandLineArguments::SetClientData(void* client_data) +{ + this->Internals->ClientData = client_data; +} + +void CommandLineArguments::SetUnknownArgumentCallback( + CommandLineArguments::ErrorCallbackType callback) +{ + this->Internals->UnknownArgumentCallback = callback; +} + +const char* CommandLineArguments::GetHelp(const char* arg) +{ + CommandLineArguments::Internal::CallbacksMap::iterator it = + this->Internals->Callbacks.find(arg); + if (it == this->Internals->Callbacks.end()) { + return nullptr; + } + + // Since several arguments may point to the same argument, find the one this + // one point to if this one is pointing to another argument. + CommandLineArgumentsCallbackStructure* cs = &(it->second); + for (;;) { + CommandLineArguments::Internal::CallbacksMap::iterator hit = + this->Internals->Callbacks.find(cs->Help); + if (hit == this->Internals->Callbacks.end()) { + break; + } + cs = &(hit->second); + } + return cs->Help; +} + +void CommandLineArguments::SetLineLength(unsigned int ll) +{ + if (ll < 9 || ll > 1000) { + return; + } + this->LineLength = ll; + this->GenerateHelp(); +} + +const char* CommandLineArguments::GetArgv0() +{ + return this->Internals->Argv0.c_str(); +} + +unsigned int CommandLineArguments::GetLastArgument() +{ + return static_cast(this->Internals->LastArgument + 1); +} + +void CommandLineArguments::GenerateHelp() +{ + std::ostringstream str; + + // Collapse all arguments into the map of vectors of all arguments that do + // the same thing. + CommandLineArguments::Internal::CallbacksMap::iterator it; + typedef std::map + MapArgs; + MapArgs mp; + MapArgs::iterator mpit, smpit; + for (it = this->Internals->Callbacks.begin(); + it != this->Internals->Callbacks.end(); it++) { + CommandLineArgumentsCallbackStructure* cs = &(it->second); + mpit = mp.find(cs->Help); + if (mpit != mp.end()) { + mpit->second.insert(it->first); + mp[it->first].insert(it->first); + } else { + mp[it->first].insert(it->first); + } + } + for (it = this->Internals->Callbacks.begin(); + it != this->Internals->Callbacks.end(); it++) { + CommandLineArgumentsCallbackStructure* cs = &(it->second); + mpit = mp.find(cs->Help); + if (mpit != mp.end()) { + mpit->second.insert(it->first); + smpit = mp.find(it->first); + CommandLineArguments::Internal::SetOfStrings::iterator sit; + for (sit = smpit->second.begin(); sit != smpit->second.end(); sit++) { + mpit->second.insert(*sit); + } + mp.erase(smpit); + } else { + mp[it->first].insert(it->first); + } + } + + // Find the length of the longest string + CommandLineArguments::Internal::String::size_type maxlen = 0; + for (mpit = mp.begin(); mpit != mp.end(); mpit++) { + CommandLineArguments::Internal::SetOfStrings::iterator sit; + for (sit = mpit->second.begin(); sit != mpit->second.end(); sit++) { + CommandLineArguments::Internal::String::size_type clen = sit->size(); + switch (this->Internals->Callbacks[*sit].ArgumentType) { + case CommandLineArguments::NO_ARGUMENT: + clen += 0; + break; + case CommandLineArguments::CONCAT_ARGUMENT: + clen += 3; + break; + case CommandLineArguments::SPACE_ARGUMENT: + clen += 4; + break; + case CommandLineArguments::EQUAL_ARGUMENT: + clen += 4; + break; + } + if (clen > maxlen) { + maxlen = clen; + } + } + } + + CommandLineArguments::Internal::String::size_type maxstrlen = maxlen; + maxlen += 4; // For the space before and after the option + + // Print help for each option + for (mpit = mp.begin(); mpit != mp.end(); mpit++) { + CommandLineArguments::Internal::SetOfStrings::iterator sit; + for (sit = mpit->second.begin(); sit != mpit->second.end(); sit++) { + str << std::endl; + std::string argument = *sit; + switch (this->Internals->Callbacks[*sit].ArgumentType) { + case CommandLineArguments::NO_ARGUMENT: + break; + case CommandLineArguments::CONCAT_ARGUMENT: + argument += "opt"; + break; + case CommandLineArguments::SPACE_ARGUMENT: + argument += " opt"; + break; + case CommandLineArguments::EQUAL_ARGUMENT: + argument += "=opt"; + break; + case CommandLineArguments::MULTI_ARGUMENT: + argument += " opt opt ..."; + break; + } + str << " " << argument.substr(0, maxstrlen) << " "; + } + const char* ptr = this->Internals->Callbacks[mpit->first].Help; + size_t len = strlen(ptr); + int cnt = 0; + while (len > 0) { + // If argument with help is longer than line length, split it on previous + // space (or tab) and continue on the next line + CommandLineArguments::Internal::String::size_type cc; + for (cc = 0; ptr[cc]; cc++) { + if (*ptr == ' ' || *ptr == '\t') { + ptr++; + len--; + } + } + if (cnt > 0) { + for (cc = 0; cc < maxlen; cc++) { + str << " "; + } + } + CommandLineArguments::Internal::String::size_type skip = len; + if (skip > this->LineLength - maxlen) { + skip = this->LineLength - maxlen; + for (cc = skip - 1; cc > 0; cc--) { + if (ptr[cc] == ' ' || ptr[cc] == '\t') { + break; + } + } + if (cc != 0) { + skip = cc; + } + } + str.write(ptr, static_cast(skip)); + str << std::endl; + ptr += skip; + len -= skip; + cnt++; + } + } + /* + // This can help debugging help string + str << endl; + unsigned int cc; + for ( cc = 0; cc < this->LineLength; cc ++ ) + { + str << cc % 10; + } + str << endl; + */ + this->Help = str.str(); +} + +void CommandLineArguments::PopulateVariable(bool* variable, + const std::string& value) +{ + if (value == "1" || value == "ON" || value == "on" || value == "On" || + value == "TRUE" || value == "true" || value == "True" || + value == "yes" || value == "Yes" || value == "YES") { + *variable = true; + } else { + *variable = false; + } +} + +void CommandLineArguments::PopulateVariable(int* variable, + const std::string& value) +{ + char* res = nullptr; + *variable = static_cast(strtol(value.c_str(), &res, 10)); + // if ( res && *res ) + // { + // Can handle non-int + // } +} + +void CommandLineArguments::PopulateVariable(double* variable, + const std::string& value) +{ + char* res = nullptr; + *variable = strtod(value.c_str(), &res); + // if ( res && *res ) + // { + // Can handle non-double + // } +} + +void CommandLineArguments::PopulateVariable(char** variable, + const std::string& value) +{ + delete[] * variable; + *variable = new char[value.size() + 1]; + strcpy(*variable, value.c_str()); +} + +void CommandLineArguments::PopulateVariable(std::string* variable, + const std::string& value) +{ + *variable = value; +} + +void CommandLineArguments::PopulateVariable(std::vector* variable, + const std::string& value) +{ + bool val = false; + if (value == "1" || value == "ON" || value == "on" || value == "On" || + value == "TRUE" || value == "true" || value == "True" || + value == "yes" || value == "Yes" || value == "YES") { + val = true; + } + variable->push_back(val); +} + +void CommandLineArguments::PopulateVariable(std::vector* variable, + const std::string& value) +{ + char* res = nullptr; + variable->push_back(static_cast(strtol(value.c_str(), &res, 10))); + // if ( res && *res ) + // { + // Can handle non-int + // } +} + +void CommandLineArguments::PopulateVariable(std::vector* variable, + const std::string& value) +{ + char* res = nullptr; + variable->push_back(strtod(value.c_str(), &res)); + // if ( res && *res ) + // { + // Can handle non-int + // } +} + +void CommandLineArguments::PopulateVariable(std::vector* variable, + const std::string& value) +{ + char* var = new char[value.size() + 1]; + strcpy(var, value.c_str()); + variable->push_back(var); +} + +void CommandLineArguments::PopulateVariable(std::vector* variable, + const std::string& value) +{ + variable->push_back(value); +} + +bool CommandLineArguments::PopulateVariable( + CommandLineArgumentsCallbackStructure* cs, const char* value) +{ + // Call the callback + if (cs->Callback) { + if (!cs->Callback(cs->Argument, value, cs->CallData)) { + this->Internals->LastArgument--; + return 0; + } + } + CommandLineArguments_DEBUG("Set argument: " << cs->Argument << " to " + << value); + if (cs->Variable) { + std::string var = "1"; + if (value) { + var = value; + } + switch (cs->VariableType) { + case CommandLineArguments::INT_TYPE: + this->PopulateVariable(static_cast(cs->Variable), var); + break; + case CommandLineArguments::DOUBLE_TYPE: + this->PopulateVariable(static_cast(cs->Variable), var); + break; + case CommandLineArguments::STRING_TYPE: + this->PopulateVariable(static_cast(cs->Variable), var); + break; + case CommandLineArguments::STL_STRING_TYPE: + this->PopulateVariable(static_cast(cs->Variable), var); + break; + case CommandLineArguments::BOOL_TYPE: + this->PopulateVariable(static_cast(cs->Variable), var); + break; + case CommandLineArguments::VECTOR_BOOL_TYPE: + this->PopulateVariable(static_cast*>(cs->Variable), + var); + break; + case CommandLineArguments::VECTOR_INT_TYPE: + this->PopulateVariable(static_cast*>(cs->Variable), + var); + break; + case CommandLineArguments::VECTOR_DOUBLE_TYPE: + this->PopulateVariable(static_cast*>(cs->Variable), + var); + break; + case CommandLineArguments::VECTOR_STRING_TYPE: + this->PopulateVariable(static_cast*>(cs->Variable), + var); + break; + case CommandLineArguments::VECTOR_STL_STRING_TYPE: + this->PopulateVariable( + static_cast*>(cs->Variable), var); + break; + default: + std::cerr << "Got unknown variable type: \"" << cs->VariableType + << "\"" << std::endl; + this->Internals->LastArgument--; + return 0; + } + } + return 1; +} + +} // namespace KWSYS_NAMESPACE diff --git a/test/API/driver/kwsys/CommandLineArguments.hxx.in b/test/API/driver/kwsys/CommandLineArguments.hxx.in new file mode 100644 index 00000000000..7db90155640 --- /dev/null +++ b/test/API/driver/kwsys/CommandLineArguments.hxx.in @@ -0,0 +1,270 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifndef @KWSYS_NAMESPACE@_CommandLineArguments_hxx +#define @KWSYS_NAMESPACE@_CommandLineArguments_hxx + +#include <@KWSYS_NAMESPACE@/Configure.h> +#include <@KWSYS_NAMESPACE@/Configure.hxx> + +#include +#include + +namespace @KWSYS_NAMESPACE@ { + +class CommandLineArgumentsInternal; +struct CommandLineArgumentsCallbackStructure; + +/** \class CommandLineArguments + * \brief Command line arguments processing code. + * + * Find specified arguments with optional options and execute specified methods + * or set given variables. + * + * The two interfaces it knows are callback based and variable based. For + * callback based, you have to register callback for particular argument using + * AddCallback method. When that argument is passed, the callback will be + * called with argument, value, and call data. For boolean (NO_ARGUMENT) + * arguments, the value is "1". If the callback returns 0 the argument parsing + * will stop with an error. + * + * For the variable interface you associate variable with each argument. When + * the argument is specified, the variable is set to the specified value casted + * to the appropriate type. For boolean (NO_ARGUMENT), the value is "1". + * + * Both interfaces can be used at the same time. + * + * Possible argument types are: + * NO_ARGUMENT - The argument takes no value : --A + * CONCAT_ARGUMENT - The argument takes value after no space : --Aval + * SPACE_ARGUMENT - The argument takes value after space : --A val + * EQUAL_ARGUMENT - The argument takes value after equal : --A=val + * MULTI_ARGUMENT - The argument takes values after space : --A val1 val2 + * val3 ... + * + * Example use: + * + * kwsys::CommandLineArguments arg; + * arg.Initialize(argc, argv); + * typedef kwsys::CommandLineArguments argT; + * arg.AddArgument("--something", argT::EQUAL_ARGUMENT, &some_variable, + * "This is help string for --something"); + * if ( !arg.Parse() ) + * { + * std::cerr << "Problem parsing arguments" << std::endl; + * res = 1; + * } + * + */ + +class @KWSYS_NAMESPACE@_EXPORT CommandLineArguments +{ +public: + CommandLineArguments(); + ~CommandLineArguments(); + + CommandLineArguments(const CommandLineArguments&) = delete; + CommandLineArguments& operator=(const CommandLineArguments&) = delete; + + /** + * Various argument types. + */ + enum ArgumentTypeEnum + { + NO_ARGUMENT, + CONCAT_ARGUMENT, + SPACE_ARGUMENT, + EQUAL_ARGUMENT, + MULTI_ARGUMENT + }; + + /** + * Various variable types. When using the variable interface, this specifies + * what type the variable is. + */ + enum VariableTypeEnum + { + NO_VARIABLE_TYPE = 0, // The variable is not specified + INT_TYPE, // The variable is integer (int) + BOOL_TYPE, // The variable is boolean (bool) + DOUBLE_TYPE, // The variable is float (double) + STRING_TYPE, // The variable is string (char*) + STL_STRING_TYPE, // The variable is string (char*) + VECTOR_INT_TYPE, // The variable is integer (int) + VECTOR_BOOL_TYPE, // The variable is boolean (bool) + VECTOR_DOUBLE_TYPE, // The variable is float (double) + VECTOR_STRING_TYPE, // The variable is string (char*) + VECTOR_STL_STRING_TYPE, // The variable is string (char*) + LAST_VARIABLE_TYPE + }; + + /** + * Prototypes for callbacks for callback interface. + */ + typedef int (*CallbackType)(const char* argument, const char* value, + void* call_data); + typedef int (*ErrorCallbackType)(const char* argument, void* client_data); + + /** + * Initialize internal data structures. This should be called before parsing. + */ + void Initialize(int argc, const char* const argv[]); + void Initialize(int argc, char* argv[]); + + /** + * Initialize internal data structure and pass arguments one by one. This is + * convenience method for use from scripting languages where argc and argv + * are not available. + */ + void Initialize(); + void ProcessArgument(const char* arg); + + /** + * This method will parse arguments and call appropriate methods. + */ + int Parse(); + + /** + * This method will add a callback for a specific argument. The arguments to + * it are argument, argument type, callback method, and call data. The + * argument help specifies the help string used with this option. The + * callback and call_data can be skipped. + */ + void AddCallback(const char* argument, ArgumentTypeEnum type, + CallbackType callback, void* call_data, const char* help); + + /** + * Add handler for argument which is going to set the variable to the + * specified value. If the argument is specified, the option is casted to the + * appropriate type. + */ + void AddArgument(const char* argument, ArgumentTypeEnum type, bool* variable, + const char* help); + void AddArgument(const char* argument, ArgumentTypeEnum type, int* variable, + const char* help); + void AddArgument(const char* argument, ArgumentTypeEnum type, + double* variable, const char* help); + void AddArgument(const char* argument, ArgumentTypeEnum type, + char** variable, const char* help); + void AddArgument(const char* argument, ArgumentTypeEnum type, + std::string* variable, const char* help); + + /** + * Add handler for argument which is going to set the variable to the + * specified value. If the argument is specified, the option is casted to the + * appropriate type. This will handle the multi argument values. + */ + void AddArgument(const char* argument, ArgumentTypeEnum type, + std::vector* variable, const char* help); + void AddArgument(const char* argument, ArgumentTypeEnum type, + std::vector* variable, const char* help); + void AddArgument(const char* argument, ArgumentTypeEnum type, + std::vector* variable, const char* help); + void AddArgument(const char* argument, ArgumentTypeEnum type, + std::vector* variable, const char* help); + void AddArgument(const char* argument, ArgumentTypeEnum type, + std::vector* variable, const char* help); + + /** + * Add handler for boolean argument. The argument does not take any option + * and if it is specified, the value of the variable is true/1, otherwise it + * is false/0. + */ + void AddBooleanArgument(const char* argument, bool* variable, + const char* help); + void AddBooleanArgument(const char* argument, int* variable, + const char* help); + void AddBooleanArgument(const char* argument, double* variable, + const char* help); + void AddBooleanArgument(const char* argument, char** variable, + const char* help); + void AddBooleanArgument(const char* argument, std::string* variable, + const char* help); + + /** + * Set the callbacks for error handling. + */ + void SetClientData(void* client_data); + void SetUnknownArgumentCallback(ErrorCallbackType callback); + + /** + * Get remaining arguments. It allocates space for argv, so you have to call + * delete[] on it. + */ + void GetRemainingArguments(int* argc, char*** argv); + void DeleteRemainingArguments(int argc, char*** argv); + + /** + * If StoreUnusedArguments is set to true, then all unknown arguments will be + * stored and the user can access the modified argc, argv without known + * arguments. + */ + void StoreUnusedArguments(bool val) { this->StoreUnusedArgumentsFlag = val; } + void GetUnusedArguments(int* argc, char*** argv); + + /** + * Return string containing help. If the argument is specified, only return + * help for that argument. + */ + const char* GetHelp() { return this->Help.c_str(); } + const char* GetHelp(const char* arg); + + /** + * Get / Set the help line length. This length is used when generating the + * help page. Default length is 80. + */ + void SetLineLength(unsigned int); + unsigned int GetLineLength(); + + /** + * Get the executable name (argv0). This is only available when using + * Initialize with argc/argv. + */ + const char* GetArgv0(); + + /** + * Get index of the last argument parsed. This is the last argument that was + * parsed ok in the original argc/argv list. + */ + unsigned int GetLastArgument(); + +protected: + void GenerateHelp(); + + //! This is internal method that registers variable with argument + void AddArgument(const char* argument, ArgumentTypeEnum type, + VariableTypeEnum vtype, void* variable, const char* help); + + bool GetMatchedArguments(std::vector* matches, + const std::string& arg); + + //! Populate individual variables + bool PopulateVariable(CommandLineArgumentsCallbackStructure* cs, + const char* value); + + //! Populate individual variables of type ... + void PopulateVariable(bool* variable, const std::string& value); + void PopulateVariable(int* variable, const std::string& value); + void PopulateVariable(double* variable, const std::string& value); + void PopulateVariable(char** variable, const std::string& value); + void PopulateVariable(std::string* variable, const std::string& value); + void PopulateVariable(std::vector* variable, const std::string& value); + void PopulateVariable(std::vector* variable, const std::string& value); + void PopulateVariable(std::vector* variable, + const std::string& value); + void PopulateVariable(std::vector* variable, + const std::string& value); + void PopulateVariable(std::vector* variable, + const std::string& value); + + typedef CommandLineArgumentsInternal Internal; + Internal* Internals; + std::string Help; + + unsigned int LineLength; + + bool StoreUnusedArgumentsFlag; +}; + +} // namespace @KWSYS_NAMESPACE@ + +#endif diff --git a/test/API/driver/kwsys/Configure.h.in b/test/API/driver/kwsys/Configure.h.in new file mode 100644 index 00000000000..5323c57bebe --- /dev/null +++ b/test/API/driver/kwsys/Configure.h.in @@ -0,0 +1,89 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifndef @KWSYS_NAMESPACE@_Configure_h +#define @KWSYS_NAMESPACE@_Configure_h + +/* If we are building a kwsys .c or .cxx file, let it use the kwsys + namespace. When not building a kwsys source file these macros are + temporarily defined inside the headers that use them. */ +#if defined(KWSYS_NAMESPACE) +# define kwsys_ns(x) @KWSYS_NAMESPACE@##x +# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT +#endif + +/* Disable some warnings inside kwsys source files. */ +#if defined(KWSYS_NAMESPACE) +# if defined(__BORLANDC__) +# pragma warn - 8027 /* function not inlined. */ +# endif +# if defined(__INTEL_COMPILER) +# pragma warning(disable : 1572) /* floating-point equality test */ +# endif +# if defined(__sgi) && !defined(__GNUC__) +# pragma set woff 3970 /* pointer to int conversion */ +# pragma set woff 3968 /* 64 bit conversion */ +# endif +#endif + +/* Whether kwsys namespace is "kwsys". */ +#define @KWSYS_NAMESPACE@_NAME_IS_KWSYS @KWSYS_NAME_IS_KWSYS@ + +/* Setup the export macro. */ +#if @KWSYS_BUILD_SHARED@ +# if defined(_WIN32) || defined(__CYGWIN__) +# if defined(@KWSYS_NAMESPACE@_EXPORTS) +# define @KWSYS_NAMESPACE@_EXPORT __declspec(dllexport) +# else +# define @KWSYS_NAMESPACE@_EXPORT __declspec(dllimport) +# endif +# elif __GNUC__ >= 4 +# define @KWSYS_NAMESPACE@_EXPORT __attribute__((visibility("default"))) +# else +# define @KWSYS_NAMESPACE@_EXPORT +# endif +#else +# define @KWSYS_NAMESPACE@_EXPORT +#endif + +/* Enable warnings that are off by default but are useful. */ +#if !defined(@KWSYS_NAMESPACE@_NO_WARNING_ENABLE) +# if defined(_MSC_VER) +# pragma warning(default : 4263) /* no override, call convention differs \ + */ +# endif +#endif + +/* Disable warnings that are on by default but occur in valid code. */ +#if !defined(@KWSYS_NAMESPACE@_NO_WARNING_DISABLE) +# if defined(_MSC_VER) +# pragma warning(disable : 4097) /* typedef is synonym for class */ +# pragma warning(disable : 4127) /* conditional expression is constant */ +# pragma warning(disable : 4244) /* possible loss in conversion */ +# pragma warning(disable : 4251) /* missing DLL-interface */ +# pragma warning(disable : 4305) /* truncation from type1 to type2 */ +# pragma warning(disable : 4309) /* truncation of constant value */ +# pragma warning(disable : 4514) /* unreferenced inline function */ +# pragma warning(disable : 4706) /* assignment in conditional expression \ + */ +# pragma warning(disable : 4710) /* function not inlined */ +# pragma warning(disable : 4786) /* identifier truncated in debug info */ +# endif +# if defined(__BORLANDC__) && !defined(__cplusplus) +/* Code has no effect; raised by winnt.h in C (not C++) when ignoring an + unused parameter using "(param)" syntax (i.e. no cast to void). */ +# pragma warn - 8019 +# endif +#endif + +/* MSVC 6.0 in release mode will warn about code it produces with its + optimizer. Disable the warnings specifically for this + configuration. Real warnings will be revealed by a debug build or + by other compilers. */ +#if !defined(@KWSYS_NAMESPACE@_NO_WARNING_DISABLE_BOGUS) +# if defined(_MSC_VER) && (_MSC_VER < 1300) && defined(NDEBUG) +# pragma warning(disable : 4701) /* Variable may be used uninitialized. */ +# pragma warning(disable : 4702) /* Unreachable code. */ +# endif +#endif + +#endif diff --git a/test/API/driver/kwsys/Configure.hxx.in b/test/API/driver/kwsys/Configure.hxx.in new file mode 100644 index 00000000000..29a2dd11e39 --- /dev/null +++ b/test/API/driver/kwsys/Configure.hxx.in @@ -0,0 +1,65 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifndef @KWSYS_NAMESPACE@_Configure_hxx +#define @KWSYS_NAMESPACE@_Configure_hxx + +/* Include C configuration. */ +#include <@KWSYS_NAMESPACE@/Configure.h> + +/* Whether wstring is available. */ +#define @KWSYS_NAMESPACE@_STL_HAS_WSTRING @KWSYS_STL_HAS_WSTRING@ +/* Whether is available. */ +#define @KWSYS_NAMESPACE@_CXX_HAS_EXT_STDIO_FILEBUF_H \ + @KWSYS_CXX_HAS_EXT_STDIO_FILEBUF_H@ +/* Whether the translation map is available or not. */ +#define @KWSYS_NAMESPACE@_SYSTEMTOOLS_USE_TRANSLATION_MAP \ + @KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP@ + +#if defined(__SUNPRO_CC) && __SUNPRO_CC > 0x5130 && defined(__has_attribute) +# define @KWSYS_NAMESPACE@__has_cpp_attribute(x) __has_attribute(x) +#elif defined(__has_cpp_attribute) +# define @KWSYS_NAMESPACE@__has_cpp_attribute(x) __has_cpp_attribute(x) +#else +# define @KWSYS_NAMESPACE@__has_cpp_attribute(x) 0 +#endif + +#if __cplusplus >= 201103L +# define @KWSYS_NAMESPACE@_NULLPTR nullptr +#else +# define @KWSYS_NAMESPACE@_NULLPTR 0 +#endif + +#ifndef @KWSYS_NAMESPACE@_FALLTHROUGH +# if __cplusplus >= 201703L && \ + @KWSYS_NAMESPACE@__has_cpp_attribute(fallthrough) +# define @KWSYS_NAMESPACE@_FALLTHROUGH [[fallthrough]] +# elif __cplusplus >= 201103L && \ + @KWSYS_NAMESPACE@__has_cpp_attribute(gnu::fallthrough) +# define @KWSYS_NAMESPACE@_FALLTHROUGH [[gnu::fallthrough]] +# elif __cplusplus >= 201103L && \ + @KWSYS_NAMESPACE@__has_cpp_attribute(clang::fallthrough) +# define @KWSYS_NAMESPACE@_FALLTHROUGH [[clang::fallthrough]] +# endif +#endif +#ifndef @KWSYS_NAMESPACE@_FALLTHROUGH +# define @KWSYS_NAMESPACE@_FALLTHROUGH static_cast(0) +#endif + +#undef @KWSYS_NAMESPACE@__has_cpp_attribute + +/* If building a C++ file in kwsys itself, give the source file + access to the macros without a configured namespace. */ +#if defined(KWSYS_NAMESPACE) +# if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS +# define kwsys @KWSYS_NAMESPACE@ +# endif +# define KWSYS_NAME_IS_KWSYS @KWSYS_NAMESPACE@_NAME_IS_KWSYS +# define KWSYS_STL_HAS_WSTRING @KWSYS_NAMESPACE@_STL_HAS_WSTRING +# define KWSYS_CXX_HAS_EXT_STDIO_FILEBUF_H \ + @KWSYS_NAMESPACE@_CXX_HAS_EXT_STDIO_FILEBUF_H +# define KWSYS_FALLTHROUGH @KWSYS_NAMESPACE@_FALLTHROUGH +# define KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP \ + @KWSYS_NAMESPACE@_SYSTEMTOOLS_USE_TRANSLATION_MAP +#endif + +#endif diff --git a/test/API/driver/kwsys/ConsoleBuf.hxx.in b/test/API/driver/kwsys/ConsoleBuf.hxx.in new file mode 100644 index 00000000000..49dbdf7ea5f --- /dev/null +++ b/test/API/driver/kwsys/ConsoleBuf.hxx.in @@ -0,0 +1,398 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifndef @KWSYS_NAMESPACE@_ConsoleBuf_hxx +#define @KWSYS_NAMESPACE@_ConsoleBuf_hxx + +#include <@KWSYS_NAMESPACE@/Configure.hxx> + +#include <@KWSYS_NAMESPACE@/Encoding.hxx> + +#include +#include +#include +#include +#include +#include + +#if defined(_WIN32) +# include +# if __cplusplus >= 201103L +# include +# endif +#endif + +namespace @KWSYS_NAMESPACE@ { +#if defined(_WIN32) + +template > +class BasicConsoleBuf : public std::basic_streambuf +{ +public: + typedef typename Traits::int_type int_type; + typedef typename Traits::char_type char_type; + + class Manager + { + public: + Manager(std::basic_ios& ios, const bool err = false) + : m_consolebuf(0) + { + m_ios = &ios; + try { + m_consolebuf = new BasicConsoleBuf(err); + m_streambuf = m_ios->rdbuf(m_consolebuf); + } catch (const std::runtime_error& ex) { + std::cerr << "Failed to create ConsoleBuf!" << std::endl + << ex.what() << std::endl; + }; + } + + BasicConsoleBuf* GetConsoleBuf() { return m_consolebuf; } + + void SetUTF8Pipes() + { + if (m_consolebuf) { + m_consolebuf->input_pipe_codepage = CP_UTF8; + m_consolebuf->output_pipe_codepage = CP_UTF8; + m_consolebuf->activateCodepageChange(); + } + } + + ~Manager() + { + if (m_consolebuf) { + delete m_consolebuf; + m_ios->rdbuf(m_streambuf); + } + } + + private: + std::basic_ios* m_ios; + std::basic_streambuf* m_streambuf; + BasicConsoleBuf* m_consolebuf; + }; + + BasicConsoleBuf(const bool err = false) + : flush_on_newline(true) + , input_pipe_codepage(0) + , output_pipe_codepage(0) + , input_file_codepage(CP_UTF8) + , output_file_codepage(CP_UTF8) + , m_consolesCodepage(0) + { + m_hInput = ::GetStdHandle(STD_INPUT_HANDLE); + checkHandle(true, "STD_INPUT_HANDLE"); + if (!setActiveInputCodepage()) { + throw std::runtime_error("setActiveInputCodepage failed!"); + } + m_hOutput = err ? ::GetStdHandle(STD_ERROR_HANDLE) + : ::GetStdHandle(STD_OUTPUT_HANDLE); + checkHandle(false, err ? "STD_ERROR_HANDLE" : "STD_OUTPUT_HANDLE"); + if (!setActiveOutputCodepage()) { + throw std::runtime_error("setActiveOutputCodepage failed!"); + } + _setg(); + _setp(); + } + + ~BasicConsoleBuf() throw() { sync(); } + + bool activateCodepageChange() + { + return setActiveInputCodepage() && setActiveOutputCodepage(); + } + +protected: + virtual int sync() + { + bool success = true; + if (m_hInput && m_isConsoleInput && + ::FlushConsoleInputBuffer(m_hInput) == 0) { + success = false; + } + if (m_hOutput && !m_obuffer.empty()) { + const std::wstring wbuffer = getBuffer(m_obuffer); + if (m_isConsoleOutput) { + DWORD charsWritten; + success = + ::WriteConsoleW(m_hOutput, wbuffer.c_str(), (DWORD)wbuffer.size(), + &charsWritten, nullptr) == 0 + ? false + : true; + } else { + DWORD bytesWritten; + std::string buffer; + success = encodeOutputBuffer(wbuffer, buffer); + if (success) { + success = + ::WriteFile(m_hOutput, buffer.c_str(), (DWORD)buffer.size(), + &bytesWritten, nullptr) == 0 + ? false + : true; + } + } + } + m_ibuffer.clear(); + m_obuffer.clear(); + _setg(); + _setp(); + return success ? 0 : -1; + } + + virtual int_type underflow() + { + if (this->gptr() >= this->egptr()) { + if (!m_hInput) { + _setg(true); + return Traits::eof(); + } + if (m_isConsoleInput) { + // ReadConsole doesn't tell if there's more input available + // don't support reading more characters than this + wchar_t wbuffer[8192]; + DWORD charsRead; + if (ReadConsoleW(m_hInput, wbuffer, + (sizeof(wbuffer) / sizeof(wbuffer[0])), &charsRead, + nullptr) == 0 || + charsRead == 0) { + _setg(true); + return Traits::eof(); + } + setBuffer(std::wstring(wbuffer, charsRead), m_ibuffer); + } else { + std::wstring wbuffer; + std::string strbuffer; + DWORD bytesRead; + LARGE_INTEGER size; + if (GetFileSizeEx(m_hInput, &size) == 0) { + _setg(true); + return Traits::eof(); + } + char* buffer = new char[size.LowPart]; + while (ReadFile(m_hInput, buffer, size.LowPart, &bytesRead, nullptr) == + 0) { + if (GetLastError() == ERROR_MORE_DATA) { + strbuffer += std::string(buffer, bytesRead); + continue; + } + _setg(true); + delete[] buffer; + return Traits::eof(); + } + if (bytesRead > 0) { + strbuffer += std::string(buffer, bytesRead); + } + delete[] buffer; + if (!decodeInputBuffer(strbuffer, wbuffer)) { + _setg(true); + return Traits::eof(); + } + setBuffer(wbuffer, m_ibuffer); + } + _setg(); + } + return Traits::to_int_type(*this->gptr()); + } + + virtual int_type overflow(int_type ch = Traits::eof()) + { + if (!Traits::eq_int_type(ch, Traits::eof())) { + char_type chr = Traits::to_char_type(ch); + m_obuffer += chr; + if ((flush_on_newline && Traits::eq(chr, '\n')) || + Traits::eq_int_type(ch, 0x00)) { + sync(); + } + return ch; + } + sync(); + return Traits::eof(); + } + +public: + bool flush_on_newline; + UINT input_pipe_codepage; + UINT output_pipe_codepage; + UINT input_file_codepage; + UINT output_file_codepage; + +private: + HANDLE m_hInput; + HANDLE m_hOutput; + std::basic_string m_ibuffer; + std::basic_string m_obuffer; + bool m_isConsoleInput; + bool m_isConsoleOutput; + UINT m_activeInputCodepage; + UINT m_activeOutputCodepage; + UINT m_consolesCodepage; + void checkHandle(bool input, std::string handleName) + { + if ((input && m_hInput == INVALID_HANDLE_VALUE) || + (!input && m_hOutput == INVALID_HANDLE_VALUE)) { + std::string errmsg = + "GetStdHandle(" + handleName + ") returned INVALID_HANDLE_VALUE"; +# if __cplusplus >= 201103L + throw std::system_error(::GetLastError(), std::system_category(), + errmsg); +# else + throw std::runtime_error(errmsg); +# endif + } + } + UINT getConsolesCodepage() + { + if (!m_consolesCodepage) { + m_consolesCodepage = GetConsoleCP(); + if (!m_consolesCodepage) { + m_consolesCodepage = GetACP(); + } + } + return m_consolesCodepage; + } + bool setActiveInputCodepage() + { + m_isConsoleInput = false; + switch (GetFileType(m_hInput)) { + case FILE_TYPE_DISK: + m_activeInputCodepage = input_file_codepage; + break; + case FILE_TYPE_CHAR: + // Check for actual console. + DWORD consoleMode; + m_isConsoleInput = + GetConsoleMode(m_hInput, &consoleMode) == 0 ? false : true; + if (m_isConsoleInput) { + break; + } + @KWSYS_NAMESPACE@_FALLTHROUGH; + case FILE_TYPE_PIPE: + m_activeInputCodepage = input_pipe_codepage; + break; + default: + return false; + } + if (!m_isConsoleInput && m_activeInputCodepage == 0) { + m_activeInputCodepage = getConsolesCodepage(); + } + return true; + } + bool setActiveOutputCodepage() + { + m_isConsoleOutput = false; + switch (GetFileType(m_hOutput)) { + case FILE_TYPE_DISK: + m_activeOutputCodepage = output_file_codepage; + break; + case FILE_TYPE_CHAR: + // Check for actual console. + DWORD consoleMode; + m_isConsoleOutput = + GetConsoleMode(m_hOutput, &consoleMode) == 0 ? false : true; + if (m_isConsoleOutput) { + break; + } + @KWSYS_NAMESPACE@_FALLTHROUGH; + case FILE_TYPE_PIPE: + m_activeOutputCodepage = output_pipe_codepage; + break; + default: + return false; + } + if (!m_isConsoleOutput && m_activeOutputCodepage == 0) { + m_activeOutputCodepage = getConsolesCodepage(); + } + return true; + } + void _setg(bool empty = false) + { + if (!empty) { + this->setg((char_type*)m_ibuffer.data(), (char_type*)m_ibuffer.data(), + (char_type*)m_ibuffer.data() + m_ibuffer.size()); + } else { + this->setg((char_type*)m_ibuffer.data(), + (char_type*)m_ibuffer.data() + m_ibuffer.size(), + (char_type*)m_ibuffer.data() + m_ibuffer.size()); + } + } + void _setp() + { + this->setp((char_type*)m_obuffer.data(), + (char_type*)m_obuffer.data() + m_obuffer.size()); + } + bool encodeOutputBuffer(const std::wstring wbuffer, std::string& buffer) + { + if (wbuffer.size() == 0) { + buffer = std::string(); + return true; + } + const int length = + WideCharToMultiByte(m_activeOutputCodepage, 0, wbuffer.c_str(), + (int)wbuffer.size(), nullptr, 0, nullptr, nullptr); + char* buf = new char[length]; + const bool success = + WideCharToMultiByte(m_activeOutputCodepage, 0, wbuffer.c_str(), + (int)wbuffer.size(), buf, length, nullptr, + nullptr) > 0 + ? true + : false; + buffer = std::string(buf, length); + delete[] buf; + return success; + } + bool decodeInputBuffer(const std::string buffer, std::wstring& wbuffer) + { + size_t length = buffer.length(); + if (length == 0) { + wbuffer = std::wstring(); + return true; + } + int actualCodepage = m_activeInputCodepage; + const char BOM_UTF8[] = { char(0xEF), char(0xBB), char(0xBF) }; + const char* data = buffer.data(); + const size_t BOMsize = sizeof(BOM_UTF8); + if (length >= BOMsize && std::memcmp(data, BOM_UTF8, BOMsize) == 0) { + // PowerShell uses UTF-8 with BOM for pipes + actualCodepage = CP_UTF8; + data += BOMsize; + length -= BOMsize; + } + const size_t wlength = static_cast(MultiByteToWideChar( + actualCodepage, 0, data, static_cast(length), nullptr, 0)); + wchar_t* wbuf = new wchar_t[wlength]; + const bool success = + MultiByteToWideChar(actualCodepage, 0, data, static_cast(length), + wbuf, static_cast(wlength)) > 0 + ? true + : false; + wbuffer = std::wstring(wbuf, wlength); + delete[] wbuf; + return success; + } + std::wstring getBuffer(const std::basic_string buffer) + { + return Encoding::ToWide(buffer); + } + std::wstring getBuffer(const std::basic_string buffer) + { + return buffer; + } + void setBuffer(const std::wstring wbuffer, std::basic_string& target) + { + target = Encoding::ToNarrow(wbuffer); + } + void setBuffer(const std::wstring wbuffer, + std::basic_string& target) + { + target = wbuffer; + } + +}; // BasicConsoleBuf class + +typedef BasicConsoleBuf ConsoleBuf; +typedef BasicConsoleBuf WConsoleBuf; + +#endif +} // KWSYS_NAMESPACE + +#endif diff --git a/test/API/driver/kwsys/Copyright.txt b/test/API/driver/kwsys/Copyright.txt new file mode 100644 index 00000000000..33d7fb47266 --- /dev/null +++ b/test/API/driver/kwsys/Copyright.txt @@ -0,0 +1,38 @@ +KWSys - Kitware System Library +Copyright 2000-2016 Kitware, Inc. and Contributors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +* Neither the name of Kitware, Inc. nor the names of Contributors + may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------------------------ + +The following individuals and institutions are among the Contributors: + +* Insight Software Consortium + +See version control history for details of individual contributions. diff --git a/test/API/driver/kwsys/Directory.cxx b/test/API/driver/kwsys/Directory.cxx new file mode 100644 index 00000000000..e3791826be7 --- /dev/null +++ b/test/API/driver/kwsys/Directory.cxx @@ -0,0 +1,236 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" +#include KWSYS_HEADER(Directory.hxx) + +#include KWSYS_HEADER(Configure.hxx) + +#include KWSYS_HEADER(Encoding.hxx) + +// Work-around CMake dependency scanning limitation. This must +// duplicate the above list of headers. +#if 0 +# include "Configure.hxx.in" +# include "Directory.hxx.in" +# include "Encoding.hxx.in" +#endif + +#include +#include + +namespace KWSYS_NAMESPACE { + +class DirectoryInternals +{ +public: + // Array of Files + std::vector Files; + + // Path to Open'ed directory + std::string Path; +}; + +Directory::Directory() +{ + this->Internal = new DirectoryInternals; +} + +Directory::~Directory() +{ + delete this->Internal; +} + +unsigned long Directory::GetNumberOfFiles() const +{ + return static_cast(this->Internal->Files.size()); +} + +const char* Directory::GetFile(unsigned long dindex) const +{ + if (dindex >= this->Internal->Files.size()) { + return nullptr; + } + return this->Internal->Files[dindex].c_str(); +} + +const char* Directory::GetPath() const +{ + return this->Internal->Path.c_str(); +} + +void Directory::Clear() +{ + this->Internal->Path.resize(0); + this->Internal->Files.clear(); +} + +} // namespace KWSYS_NAMESPACE + +// First Windows platforms + +#if defined(_WIN32) && !defined(__CYGWIN__) +# include + +# include +# include +# include +# include +# include +# include +# include +# include + +// Wide function names can vary depending on compiler: +# ifdef __BORLANDC__ +# define _wfindfirst_func __wfindfirst +# define _wfindnext_func __wfindnext +# else +# define _wfindfirst_func _wfindfirst +# define _wfindnext_func _wfindnext +# endif + +namespace KWSYS_NAMESPACE { + +bool Directory::Load(const std::string& name) +{ + this->Clear(); +# if (defined(_MSC_VER) && _MSC_VER < 1300) || defined(__BORLANDC__) + // Older Visual C++ and Embarcadero compilers. + long srchHandle; +# else // Newer Visual C++ + intptr_t srchHandle; +# endif + char* buf; + size_t n = name.size(); + if (name.back() == '/' || name.back() == '\\') { + buf = new char[n + 1 + 1]; + sprintf(buf, "%s*", name.c_str()); + } else { + // Make sure the slashes in the wildcard suffix are consistent with the + // rest of the path + buf = new char[n + 2 + 1]; + if (name.find('\\') != std::string::npos) { + sprintf(buf, "%s\\*", name.c_str()); + } else { + sprintf(buf, "%s/*", name.c_str()); + } + } + struct _wfinddata_t data; // data of current file + + // Now put them into the file array + srchHandle = _wfindfirst_func( + (wchar_t*)Encoding::ToWindowsExtendedPath(buf).c_str(), &data); + delete[] buf; + + if (srchHandle == -1) { + return 0; + } + + // Loop through names + do { + this->Internal->Files.push_back(Encoding::ToNarrow(data.name)); + } while (_wfindnext_func(srchHandle, &data) != -1); + this->Internal->Path = name; + return _findclose(srchHandle) != -1; +} + +unsigned long Directory::GetNumberOfFilesInDirectory(const std::string& name) +{ +# if (defined(_MSC_VER) && _MSC_VER < 1300) || defined(__BORLANDC__) + // Older Visual C++ and Embarcadero compilers. + long srchHandle; +# else // Newer Visual C++ + intptr_t srchHandle; +# endif + char* buf; + size_t n = name.size(); + if (name.back() == '/') { + buf = new char[n + 1 + 1]; + sprintf(buf, "%s*", name.c_str()); + } else { + buf = new char[n + 2 + 1]; + sprintf(buf, "%s/*", name.c_str()); + } + struct _wfinddata_t data; // data of current file + + // Now put them into the file array + srchHandle = + _wfindfirst_func((wchar_t*)Encoding::ToWide(buf).c_str(), &data); + delete[] buf; + + if (srchHandle == -1) { + return 0; + } + + // Loop through names + unsigned long count = 0; + do { + count++; + } while (_wfindnext_func(srchHandle, &data) != -1); + _findclose(srchHandle); + return count; +} + +} // namespace KWSYS_NAMESPACE + +#else + +// Now the POSIX style directory access + +# include + +# include + +// PGI with glibc has trouble with dirent and large file support: +// http://www.pgroup.com/userforum/viewtopic.php? +// p=1992&sid=f16167f51964f1a68fe5041b8eb213b6 +// Work around the problem by mapping dirent the same way as readdir. +# if defined(__PGI) && defined(__GLIBC__) +# define kwsys_dirent_readdir dirent +# define kwsys_dirent_readdir64 dirent64 +# define kwsys_dirent kwsys_dirent_lookup(readdir) +# define kwsys_dirent_lookup(x) kwsys_dirent_lookup_delay(x) +# define kwsys_dirent_lookup_delay(x) kwsys_dirent_##x +# else +# define kwsys_dirent dirent +# endif + +namespace KWSYS_NAMESPACE { + +bool Directory::Load(const std::string& name) +{ + this->Clear(); + + DIR* dir = opendir(name.c_str()); + + if (!dir) { + return 0; + } + + for (kwsys_dirent* d = readdir(dir); d; d = readdir(dir)) { + this->Internal->Files.push_back(d->d_name); + } + this->Internal->Path = name; + closedir(dir); + return 1; +} + +unsigned long Directory::GetNumberOfFilesInDirectory(const std::string& name) +{ + DIR* dir = opendir(name.c_str()); + + if (!dir) { + return 0; + } + + unsigned long count = 0; + for (kwsys_dirent* d = readdir(dir); d; d = readdir(dir)) { + count++; + } + closedir(dir); + return count; +} + +} // namespace KWSYS_NAMESPACE + +#endif diff --git a/test/API/driver/kwsys/Directory.hxx.in b/test/API/driver/kwsys/Directory.hxx.in new file mode 100644 index 00000000000..ad8c51b86e2 --- /dev/null +++ b/test/API/driver/kwsys/Directory.hxx.in @@ -0,0 +1,72 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifndef @KWSYS_NAMESPACE@_Directory_hxx +#define @KWSYS_NAMESPACE@_Directory_hxx + +#include <@KWSYS_NAMESPACE@/Configure.h> + +#include + +namespace @KWSYS_NAMESPACE@ { + +class DirectoryInternals; + +/** \class Directory + * \brief Portable directory/filename traversal. + * + * Directory provides a portable way of finding the names of the files + * in a system directory. + * + * Directory currently works with Windows and Unix operating systems. + */ +class @KWSYS_NAMESPACE@_EXPORT Directory +{ +public: + Directory(); + ~Directory(); + + /** + * Load the specified directory and load the names of the files + * in that directory. 0 is returned if the directory can not be + * opened, 1 if it is opened. + */ + bool Load(const std::string&); + + /** + * Return the number of files in the current directory. + */ + unsigned long GetNumberOfFiles() const; + + /** + * Return the number of files in the specified directory. + * A higher performance static method. + */ + static unsigned long GetNumberOfFilesInDirectory(const std::string&); + + /** + * Return the file at the given index, the indexing is 0 based + */ + const char* GetFile(unsigned long) const; + + /** + * Return the path to Open'ed directory + */ + const char* GetPath() const; + + /** + * Clear the internal structure. Used internally at beginning of Load(...) + * to clear the cache. + */ + void Clear(); + +private: + // Private implementation details. + DirectoryInternals* Internal; + + Directory(const Directory&); // Not implemented. + void operator=(const Directory&); // Not implemented. +}; // End Class: Directory + +} // namespace @KWSYS_NAMESPACE@ + +#endif diff --git a/test/API/driver/kwsys/DynamicLoader.cxx b/test/API/driver/kwsys/DynamicLoader.cxx new file mode 100644 index 00000000000..a4b864118ce --- /dev/null +++ b/test/API/driver/kwsys/DynamicLoader.cxx @@ -0,0 +1,495 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#if defined(_WIN32) +# define NOMINMAX // hide min,max to not conflict with +#endif + +#include "kwsysPrivate.h" +#include KWSYS_HEADER(DynamicLoader.hxx) + +#include KWSYS_HEADER(Configure.hxx) +#include KWSYS_HEADER(Encoding.hxx) + +// Work-around CMake dependency scanning limitation. This must +// duplicate the above list of headers. +#if 0 +# include "Configure.hxx.in" +# include "DynamicLoader.hxx.in" +#endif + +// This file actually contains several different implementations: +// * NOOP for environments without dynamic libs +// * HP machines which uses shl_load +// * Mac OS X 10.2.x and earlier which uses NSLinkModule +// * Windows which uses LoadLibrary +// * BeOS / Haiku +// * FreeMiNT for Atari +// * Default implementation for *NIX systems (including Mac OS X 10.3 and +// later) which use dlopen +// +// Each part of the ifdef contains a complete implementation for +// the static methods of DynamicLoader. + +#define CHECK_OPEN_FLAGS(var, supported, ret) \ + do { \ + /* Check for unknown flags. */ \ + if ((var & AllOpenFlags) != var) { \ + return ret; \ + } \ + \ + /* Check for unsupported flags. */ \ + if ((var & (supported)) != var) { \ + return ret; \ + } \ + } while (0) + +namespace KWSYS_NAMESPACE { + +DynamicLoader::LibraryHandle DynamicLoader::OpenLibrary( + const std::string& libname) +{ + return DynamicLoader::OpenLibrary(libname, 0); +} +} + +#if !KWSYS_SUPPORTS_SHARED_LIBS +// Implementation for environments without dynamic libs +# include // for strerror() + +namespace KWSYS_NAMESPACE { + +DynamicLoader::LibraryHandle DynamicLoader::OpenLibrary( + const std::string& libname, int flags) +{ + return 0; +} + +int DynamicLoader::CloseLibrary(DynamicLoader::LibraryHandle lib) +{ + if (!lib) { + return 0; + } + + return 1; +} + +DynamicLoader::SymbolPointer DynamicLoader::GetSymbolAddress( + DynamicLoader::LibraryHandle lib, const std::string& sym) +{ + return 0; +} + +const char* DynamicLoader::LastError() +{ + return "General error"; +} + +} // namespace KWSYS_NAMESPACE + +#elif defined(__hpux) +// Implementation for HPUX machines +# include +# include + +namespace KWSYS_NAMESPACE { + +DynamicLoader::LibraryHandle DynamicLoader::OpenLibrary( + const std::string& libname, int flags) +{ + CHECK_OPEN_FLAGS(flags, 0, 0); + + return shl_load(libname.c_str(), BIND_DEFERRED | DYNAMIC_PATH, 0L); +} + +int DynamicLoader::CloseLibrary(DynamicLoader::LibraryHandle lib) +{ + if (!lib) { + return 0; + } + return !shl_unload(lib); +} + +DynamicLoader::SymbolPointer DynamicLoader::GetSymbolAddress( + DynamicLoader::LibraryHandle lib, const std::string& sym) +{ + void* addr; + int status; + + /* TYPE_PROCEDURE Look for a function or procedure. (This used to be default) + * TYPE_DATA Look for a symbol in the data segment (for example, + * variables). + * TYPE_UNDEFINED Look for any symbol. + */ + status = shl_findsym(&lib, sym.c_str(), TYPE_UNDEFINED, &addr); + void* result = (status < 0) ? (void*)0 : addr; + + // Hack to cast pointer-to-data to pointer-to-function. + return *reinterpret_cast(&result); +} + +const char* DynamicLoader::LastError() +{ + // TODO: Need implementation with errno/strerror + /* If successful, shl_findsym returns an integer (int) value zero. If + * shl_findsym cannot find sym, it returns -1 and sets errno to zero. + * If any other errors occur, shl_findsym returns -1 and sets errno to one + * of these values (defined in ): + * ENOEXEC + * A format error was detected in the specified library. + * ENOSYM + * A symbol on which sym depends could not be found. + * EINVAL + * The specified handle is invalid. + */ + + if (errno == ENOEXEC || errno == ENOSYM || errno == EINVAL) { + return strerror(errno); + } + // else + return 0; +} + +} // namespace KWSYS_NAMESPACE + +#elif defined(__APPLE__) && (MAC_OS_X_VERSION_MAX_ALLOWED < 1030) +// Implementation for Mac OS X 10.2.x and earlier +# include +# include // for strlen + +namespace KWSYS_NAMESPACE { + +DynamicLoader::LibraryHandle DynamicLoader::OpenLibrary( + const std::string& libname, int flags) +{ + CHECK_OPEN_FLAGS(flags, 0, 0); + + NSObjectFileImageReturnCode rc; + NSObjectFileImage image = 0; + + rc = NSCreateObjectFileImageFromFile(libname.c_str(), &image); + // rc == NSObjectFileImageInappropriateFile when trying to load a dylib file + if (rc != NSObjectFileImageSuccess) { + return 0; + } + NSModule handle = NSLinkModule(image, libname.c_str(), + NSLINKMODULE_OPTION_BINDNOW | + NSLINKMODULE_OPTION_RETURN_ON_ERROR); + NSDestroyObjectFileImage(image); + return handle; +} + +int DynamicLoader::CloseLibrary(DynamicLoader::LibraryHandle lib) +{ + // NSUNLINKMODULE_OPTION_KEEP_MEMORY_MAPPED + // With this option the memory for the module is not deallocated + // allowing pointers into the module to still be valid. + // You should use this option instead if your code experience some problems + // reported against Panther 10.3.9 (fixed in Tiger 10.4.2 and up) + bool success = NSUnLinkModule(lib, NSUNLINKMODULE_OPTION_NONE); + return success; +} + +DynamicLoader::SymbolPointer DynamicLoader::GetSymbolAddress( + DynamicLoader::LibraryHandle lib, const std::string& sym) +{ + void* result = 0; + // Need to prepend symbols with '_' on Apple-gcc compilers + std::string rsym = '_' + sym; + + NSSymbol symbol = NSLookupSymbolInModule(lib, rsym.c_str()); + if (symbol) { + result = NSAddressOfSymbol(symbol); + } + + // Hack to cast pointer-to-data to pointer-to-function. + return *reinterpret_cast(&result); +} + +const char* DynamicLoader::LastError() +{ + return 0; +} + +} // namespace KWSYS_NAMESPACE + +#elif defined(_WIN32) && !defined(__CYGWIN__) +// Implementation for Windows win32 code but not cygwin +# include + +# include + +namespace KWSYS_NAMESPACE { + +DynamicLoader::LibraryHandle DynamicLoader::OpenLibrary( + const std::string& libname, int flags) +{ + CHECK_OPEN_FLAGS(flags, SearchBesideLibrary, nullptr); + + DWORD llFlags = 0; + if (flags & SearchBesideLibrary) { + llFlags |= LOAD_WITH_ALTERED_SEARCH_PATH; + } + + return LoadLibraryExW(Encoding::ToWindowsExtendedPath(libname).c_str(), + nullptr, llFlags); +} + +int DynamicLoader::CloseLibrary(DynamicLoader::LibraryHandle lib) +{ + return (int)FreeLibrary(lib); +} + +DynamicLoader::SymbolPointer DynamicLoader::GetSymbolAddress( + DynamicLoader::LibraryHandle lib, const std::string& sym) +{ + // TODO: The calling convention affects the name of the symbol. We + // should have a tool to help get the symbol with the desired + // calling convention. Currently we assume cdecl. + // + // Borland: + // __cdecl = "_func" (default) + // __fastcall = "@_func" + // __stdcall = "func" + // + // Watcom: + // __cdecl = "_func" + // __fastcall = "@_func@X" + // __stdcall = "_func@X" + // __watcall = "func_" (default) + // + // MSVC: + // __cdecl = "func" (default) + // __fastcall = "@_func@X" + // __stdcall = "_func@X" + // + // Note that the "@X" part of the name above is the total size (in + // bytes) of the arguments on the stack. + void* result; +# if defined(__BORLANDC__) || defined(__WATCOMC__) + // Need to prepend symbols with '_' + std::string ssym = '_' + sym; + const char* rsym = ssym.c_str(); +# else + const char* rsym = sym.c_str(); +# endif + result = (void*)GetProcAddress(lib, rsym); +// Hack to cast pointer-to-data to pointer-to-function. +# ifdef __WATCOMC__ + return *(DynamicLoader::SymbolPointer*)(&result); +# else + return *reinterpret_cast(&result); +# endif +} + +# define DYNLOAD_ERROR_BUFFER_SIZE 1024 + +const char* DynamicLoader::LastError() +{ + wchar_t lpMsgBuf[DYNLOAD_ERROR_BUFFER_SIZE + 1]; + + DWORD error = GetLastError(); + DWORD length = FormatMessageW( + FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, nullptr, error, + MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // Default language + lpMsgBuf, DYNLOAD_ERROR_BUFFER_SIZE, nullptr); + + static char str[DYNLOAD_ERROR_BUFFER_SIZE + 1]; + + if (length < 1) { + /* FormatMessage failed. Use a default message. */ + _snprintf(str, DYNLOAD_ERROR_BUFFER_SIZE, + "DynamicLoader encountered error 0x%X. " + "FormatMessage failed with error 0x%X", + error, GetLastError()); + return str; + } + + if (!WideCharToMultiByte(CP_UTF8, 0, lpMsgBuf, -1, str, + DYNLOAD_ERROR_BUFFER_SIZE, nullptr, nullptr)) { + /* WideCharToMultiByte failed. Use a default message. */ + _snprintf(str, DYNLOAD_ERROR_BUFFER_SIZE, + "DynamicLoader encountered error 0x%X. " + "WideCharToMultiByte failed with error 0x%X", + error, GetLastError()); + } + + return str; +} + +} // namespace KWSYS_NAMESPACE + +#elif defined(__BEOS__) +// Implementation for BeOS / Haiku +# include // for strerror() + +# include +# include + +namespace KWSYS_NAMESPACE { + +static image_id last_dynamic_err = B_OK; + +DynamicLoader::LibraryHandle DynamicLoader::OpenLibrary( + const std::string& libname, int flags) +{ + CHECK_OPEN_FLAGS(flags, 0, 0); + + // image_id's are integers, errors are negative. Add one just in case we + // get a valid image_id of zero (is that even possible?). + image_id rc = load_add_on(libname.c_str()); + if (rc < 0) { + last_dynamic_err = rc; + return 0; + } + + return rc + 1; +} + +int DynamicLoader::CloseLibrary(DynamicLoader::LibraryHandle lib) +{ + if (!lib) { + last_dynamic_err = B_BAD_VALUE; + return 0; + } else { + // The function dlclose() returns 0 on success, and non-zero on error. + status_t rc = unload_add_on(lib - 1); + if (rc != B_OK) { + last_dynamic_err = rc; + return 0; + } + } + + return 1; +} + +DynamicLoader::SymbolPointer DynamicLoader::GetSymbolAddress( + DynamicLoader::LibraryHandle lib, const std::string& sym) +{ + // Hack to cast pointer-to-data to pointer-to-function. + union + { + void* pvoid; + DynamicLoader::SymbolPointer psym; + } result; + + result.psym = nullptr; + + if (!lib) { + last_dynamic_err = B_BAD_VALUE; + } else { + // !!! FIXME: BeOS can do function-only lookups...does this ever + // !!! FIXME: actually _want_ a data symbol lookup, or was this union + // !!! FIXME: a leftover of dlsym()? (s/ANY/TEXT for functions only). + status_t rc = + get_image_symbol(lib - 1, sym.c_str(), B_SYMBOL_TYPE_ANY, &result.pvoid); + if (rc != B_OK) { + last_dynamic_err = rc; + result.psym = nullptr; + } + } + return result.psym; +} + +const char* DynamicLoader::LastError() +{ + const char* retval = strerror(last_dynamic_err); + last_dynamic_err = B_OK; + return retval; +} + +} // namespace KWSYS_NAMESPACE + +#elif defined(__MINT__) +// Implementation for FreeMiNT on Atari +# define _GNU_SOURCE /* for program_invocation_name */ +# include +# include +# include +# include + +namespace KWSYS_NAMESPACE { + +DynamicLoader::LibraryHandle DynamicLoader::OpenLibrary( + const std::string& libname, int flags) +{ + CHECK_OPEN_FLAGS(flags, 0, nullptr); + + char* name = (char*)calloc(1, libname.size() + 1); + dld_init(program_invocation_name); + strncpy(name, libname.c_str(), libname.size()); + dld_link(libname.c_str()); + return (void*)name; +} + +int DynamicLoader::CloseLibrary(DynamicLoader::LibraryHandle lib) +{ + dld_unlink_by_file((char*)lib, 0); + free(lib); + return 0; +} + +DynamicLoader::SymbolPointer DynamicLoader::GetSymbolAddress( + DynamicLoader::LibraryHandle lib, const std::string& sym) +{ + // Hack to cast pointer-to-data to pointer-to-function. + union + { + void* pvoid; + DynamicLoader::SymbolPointer psym; + } result; + result.pvoid = dld_get_symbol(sym.c_str()); + return result.psym; +} + +const char* DynamicLoader::LastError() +{ + return dld_strerror(dld_errno); +} + +} // namespace KWSYS_NAMESPACE + +#else +// Default implementation for *NIX systems (including Mac OS X 10.3 and +// later) which use dlopen +# include + +namespace KWSYS_NAMESPACE { + +DynamicLoader::LibraryHandle DynamicLoader::OpenLibrary( + const std::string& libname, int flags) +{ + CHECK_OPEN_FLAGS(flags, 0, nullptr); + + return dlopen(libname.c_str(), RTLD_LAZY); +} + +int DynamicLoader::CloseLibrary(DynamicLoader::LibraryHandle lib) +{ + if (lib) { + // The function dlclose() returns 0 on success, and non-zero on error. + return !dlclose(lib); + } + // else + return 0; +} + +DynamicLoader::SymbolPointer DynamicLoader::GetSymbolAddress( + DynamicLoader::LibraryHandle lib, const std::string& sym) +{ + // Hack to cast pointer-to-data to pointer-to-function. + union + { + void* pvoid; + DynamicLoader::SymbolPointer psym; + } result; + result.pvoid = dlsym(lib, sym.c_str()); + return result.psym; +} + +const char* DynamicLoader::LastError() +{ + return dlerror(); +} + +} // namespace KWSYS_NAMESPACE +#endif diff --git a/test/API/driver/kwsys/DynamicLoader.hxx.in b/test/API/driver/kwsys/DynamicLoader.hxx.in new file mode 100644 index 00000000000..539c7425980 --- /dev/null +++ b/test/API/driver/kwsys/DynamicLoader.hxx.in @@ -0,0 +1,106 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifndef @KWSYS_NAMESPACE@_DynamicLoader_hxx +#define @KWSYS_NAMESPACE@_DynamicLoader_hxx + +#include <@KWSYS_NAMESPACE@/Configure.hxx> + +#include + +#if defined(__hpux) +# include +#elif defined(_WIN32) && !defined(__CYGWIN__) +# include +#elif defined(__APPLE__) +# include +# if MAC_OS_X_VERSION_MAX_ALLOWED < 1030 +# include +# endif +#elif defined(__BEOS__) +# include +#endif + +namespace @KWSYS_NAMESPACE@ { +/** \class DynamicLoader + * \brief Portable loading of dynamic libraries or dll's. + * + * DynamicLoader provides a portable interface to loading dynamic + * libraries or dll's into a process. + * + * Directory currently works with Windows, Apple, HP-UX and Unix (POSIX) + * operating systems + * + * \warning dlopen on *nix system works the following way: + * If filename contains a slash ("/"), then it is interpreted as a (relative + * or absolute) pathname. Otherwise, the dynamic linker searches for the + * library as follows : see ld.so(8) for further details): + * Whereas this distinction does not exist on Win32. Therefore ideally you + * should be doing full path to guarantee to have a consistent way of dealing + * with dynamic loading of shared library. + * + * \warning the Cygwin implementation do not use the Win32 HMODULE. Put extra + * condition so that we can include the correct declaration (POSIX) + */ + +class @KWSYS_NAMESPACE@_EXPORT DynamicLoader +{ +public: +// Ugly stuff for library handles +// They are different on several different OS's +#if defined(__hpux) + typedef shl_t LibraryHandle; +#elif defined(_WIN32) && !defined(__CYGWIN__) + typedef HMODULE LibraryHandle; +#elif defined(__APPLE__) +# if MAC_OS_X_VERSION_MAX_ALLOWED < 1030 + typedef NSModule LibraryHandle; +# else + typedef void* LibraryHandle; +# endif +#elif defined(__BEOS__) + typedef image_id LibraryHandle; +#else // POSIX + typedef void* LibraryHandle; +#endif + + // Return type from DynamicLoader::GetSymbolAddress. + typedef void (*SymbolPointer)(); + + enum OpenFlags + { + // Search for dependent libraries beside the library being loaded. + // + // This is currently only supported on Windows. + SearchBesideLibrary = 0x00000001, + + AllOpenFlags = SearchBesideLibrary + }; + + /** Load a dynamic library into the current process. + * The returned LibraryHandle can be used to access the symbols in the + * library. The optional second argument is a set of flags to use when + * opening the library. If unrecognized or unsupported flags are specified, + * the library is not opened. */ + static LibraryHandle OpenLibrary(const std::string&); + static LibraryHandle OpenLibrary(const std::string&, int); + + /** Attempt to detach a dynamic library from the + * process. A value of true is returned if it is successful. */ + static int CloseLibrary(LibraryHandle); + + /** Find the address of the symbol in the given library. */ + static SymbolPointer GetSymbolAddress(LibraryHandle, const std::string&); + + /** Return the default module prefix for the current platform. */ + static const char* LibPrefix() { return "@KWSYS_DynamicLoader_PREFIX@"; } + + /** Return the default module suffix for the current platform. */ + static const char* LibExtension() { return "@KWSYS_DynamicLoader_SUFFIX@"; } + + /** Return the last error produced from a calls made on this class. */ + static const char* LastError(); +}; // End Class: DynamicLoader + +} // namespace @KWSYS_NAMESPACE@ + +#endif diff --git a/test/API/driver/kwsys/Encoding.h.in b/test/API/driver/kwsys/Encoding.h.in new file mode 100644 index 00000000000..86a26692abc --- /dev/null +++ b/test/API/driver/kwsys/Encoding.h.in @@ -0,0 +1,69 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifndef @KWSYS_NAMESPACE@_Encoding_h +#define @KWSYS_NAMESPACE@_Encoding_h + +#include <@KWSYS_NAMESPACE@/Configure.h> + +#include + +/* Redefine all public interface symbol names to be in the proper + namespace. These macros are used internally to kwsys only, and are + not visible to user code. Use kwsysHeaderDump.pl to reproduce + these macros after making changes to the interface. */ +#if !defined(KWSYS_NAMESPACE) +# define kwsys_ns(x) @KWSYS_NAMESPACE@##x +# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT +#endif +#if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS +# define kwsysEncoding kwsys_ns(Encoding) +# define kwsysEncoding_mbstowcs kwsys_ns(Encoding_mbstowcs) +# define kwsysEncoding_DupToWide kwsys_ns(Encoding_DupToWide) +# define kwsysEncoding_wcstombs kwsys_ns(Encoding_wcstombs) +# define kwsysEncoding_DupToNarrow kwsys_ns(Encoding_DupToNarrow) +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +/* Convert a narrow string to a wide string. + On Windows, UTF-8 is assumed, and on other platforms, + the current locale is assumed. + */ +kwsysEXPORT size_t kwsysEncoding_mbstowcs(wchar_t* dest, const char* src, + size_t n); + +/* Convert a narrow string to a wide string. + This can return NULL if the conversion fails. */ +kwsysEXPORT wchar_t* kwsysEncoding_DupToWide(const char* src); + +/* Convert a wide string to a narrow string. + On Windows, UTF-8 is assumed, and on other platforms, + the current locale is assumed. */ +kwsysEXPORT size_t kwsysEncoding_wcstombs(char* dest, const wchar_t* src, + size_t n); + +/* Convert a wide string to a narrow string. + This can return NULL if the conversion fails. */ +kwsysEXPORT char* kwsysEncoding_DupToNarrow(const wchar_t* str); + +#if defined(__cplusplus) +} /* extern "C" */ +#endif + +/* If we are building a kwsys .c or .cxx file, let it use these macros. + Otherwise, undefine them to keep the namespace clean. */ +#if !defined(KWSYS_NAMESPACE) +# undef kwsys_ns +# undef kwsysEXPORT +# if !defined(KWSYS_NAMESPACE) && !@KWSYS_NAMESPACE@_NAME_IS_KWSYS +# undef kwsysEncoding +# undef kwsysEncoding_mbstowcs +# undef kwsysEncoding_DupToWide +# undef kwsysEncoding_wcstombs +# undef kwsysEncoding_DupToNarrow +# endif +#endif + +#endif diff --git a/test/API/driver/kwsys/Encoding.hxx.in b/test/API/driver/kwsys/Encoding.hxx.in new file mode 100644 index 00000000000..75a2d4d0f99 --- /dev/null +++ b/test/API/driver/kwsys/Encoding.hxx.in @@ -0,0 +1,80 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifndef @KWSYS_NAMESPACE@_Encoding_hxx +#define @KWSYS_NAMESPACE@_Encoding_hxx + +#include <@KWSYS_NAMESPACE@/Configure.hxx> + +#include +#include + +namespace @KWSYS_NAMESPACE@ { +class @KWSYS_NAMESPACE@_EXPORT Encoding +{ +public: + // Container class for argc/argv. + class @KWSYS_NAMESPACE@_EXPORT CommandLineArguments + { + public: + // On Windows, get the program command line arguments + // in this Encoding module's 8 bit encoding. + // On other platforms the given argc/argv is used, and + // to be consistent, should be the argc/argv from main(). + static CommandLineArguments Main(int argc, char const* const* argv); + + // Construct CommandLineArguments with the given + // argc/argv. It is assumed that the string is already + // in the encoding used by this module. + CommandLineArguments(int argc, char const* const* argv); + + // Construct CommandLineArguments with the given + // argc and wide argv. This is useful if wmain() is used. + CommandLineArguments(int argc, wchar_t const* const* argv); + ~CommandLineArguments(); + CommandLineArguments(const CommandLineArguments&); + CommandLineArguments& operator=(const CommandLineArguments&); + + int argc() const; + char const* const* argv() const; + + protected: + std::vector argv_; + }; + + /** + * Convert between char and wchar_t + */ + +#if @KWSYS_NAMESPACE@_STL_HAS_WSTRING + + // Convert a narrow string to a wide string. + // On Windows, UTF-8 is assumed, and on other platforms, + // the current locale is assumed. + static std::wstring ToWide(const std::string& str); + static std::wstring ToWide(const char* str); + + // Convert a wide string to a narrow string. + // On Windows, UTF-8 is assumed, and on other platforms, + // the current locale is assumed. + static std::string ToNarrow(const std::wstring& str); + static std::string ToNarrow(const wchar_t* str); + +# if defined(_WIN32) + /** + * Convert the path to an extended length path to avoid MAX_PATH length + * limitations on Windows. If the input is a local path the result will be + * prefixed with \\?\; if the input is instead a network path, the result + * will be prefixed with \\?\UNC\. All output will also be converted to + * absolute paths with Windows-style backslashes. + **/ + static std::wstring ToWindowsExtendedPath(std::string const&); + static std::wstring ToWindowsExtendedPath(const char* source); + static std::wstring ToWindowsExtendedPath(std::wstring const& wsource); +# endif + +#endif // @KWSYS_NAMESPACE@_STL_HAS_WSTRING + +}; // class Encoding +} // namespace @KWSYS_NAMESPACE@ + +#endif diff --git a/test/API/driver/kwsys/EncodingC.c b/test/API/driver/kwsys/EncodingC.c new file mode 100644 index 00000000000..e12236afe5a --- /dev/null +++ b/test/API/driver/kwsys/EncodingC.c @@ -0,0 +1,72 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" +#include KWSYS_HEADER(Encoding.h) + +/* Work-around CMake dependency scanning limitation. This must + duplicate the above list of headers. */ +#if 0 +# include "Encoding.h.in" +#endif + +#include + +#ifdef _WIN32 +# include +#endif + +size_t kwsysEncoding_mbstowcs(wchar_t* dest, const char* str, size_t n) +{ + if (str == 0) { + return (size_t)-1; + } +#ifdef _WIN32 + return MultiByteToWideChar(KWSYS_ENCODING_DEFAULT_CODEPAGE, 0, str, -1, dest, + (int)n) - + 1; +#else + return mbstowcs(dest, str, n); +#endif +} + +wchar_t* kwsysEncoding_DupToWide(const char* str) +{ + wchar_t* ret = NULL; + size_t length = kwsysEncoding_mbstowcs(NULL, str, 0) + 1; + if (length > 0) { + ret = (wchar_t*)malloc((length) * sizeof(wchar_t)); + if (ret) { + ret[0] = 0; + kwsysEncoding_mbstowcs(ret, str, length); + } + } + return ret; +} + +size_t kwsysEncoding_wcstombs(char* dest, const wchar_t* str, size_t n) +{ + if (str == 0) { + return (size_t)-1; + } +#ifdef _WIN32 + return WideCharToMultiByte(KWSYS_ENCODING_DEFAULT_CODEPAGE, 0, str, -1, dest, + (int)n, NULL, NULL) - + 1; +#else + return wcstombs(dest, str, n); +#endif +} + +char* kwsysEncoding_DupToNarrow(const wchar_t* str) +{ + char* ret = NULL; + size_t length = kwsysEncoding_wcstombs(0, str, 0) + 1; + if (length > 0) { + ret = (char*)malloc(length); + if (ret) { + ret[0] = 0; + kwsysEncoding_wcstombs(ret, str, length); + } + } + return ret; +} diff --git a/test/API/driver/kwsys/EncodingCXX.cxx b/test/API/driver/kwsys/EncodingCXX.cxx new file mode 100644 index 00000000000..5cad934ec37 --- /dev/null +++ b/test/API/driver/kwsys/EncodingCXX.cxx @@ -0,0 +1,288 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifdef __osf__ +# define _OSF_SOURCE +# define _POSIX_C_SOURCE 199506L +# define _XOPEN_SOURCE_EXTENDED +#endif + +#include "kwsysPrivate.h" +#include KWSYS_HEADER(Encoding.hxx) +#include KWSYS_HEADER(Encoding.h) + +// Work-around CMake dependency scanning limitation. This must +// duplicate the above list of headers. +#if 0 +# include "Encoding.h.in" +# include "Encoding.hxx.in" +#endif + +#include +#include +#include + +#ifdef _MSC_VER +# pragma warning(disable : 4786) +#endif + +// Windows API. +#if defined(_WIN32) +# include + +# include +# include +#endif + +namespace KWSYS_NAMESPACE { + +Encoding::CommandLineArguments Encoding::CommandLineArguments::Main( + int argc, char const* const* argv) +{ +#ifdef _WIN32 + (void)argc; + (void)argv; + + int ac; + LPWSTR* w_av = CommandLineToArgvW(GetCommandLineW(), &ac); + + std::vector av1(ac); + std::vector av2(ac); + for (int i = 0; i < ac; i++) { + av1[i] = ToNarrow(w_av[i]); + av2[i] = av1[i].c_str(); + } + LocalFree(w_av); + return CommandLineArguments(ac, &av2[0]); +#else + return CommandLineArguments(argc, argv); +#endif +} + +Encoding::CommandLineArguments::CommandLineArguments(int ac, + char const* const* av) +{ + this->argv_.resize(ac + 1); + for (int i = 0; i < ac; i++) { + this->argv_[i] = strdup(av[i]); + } + this->argv_[ac] = nullptr; +} + +Encoding::CommandLineArguments::CommandLineArguments(int ac, + wchar_t const* const* av) +{ + this->argv_.resize(ac + 1); + for (int i = 0; i < ac; i++) { + this->argv_[i] = kwsysEncoding_DupToNarrow(av[i]); + } + this->argv_[ac] = nullptr; +} + +Encoding::CommandLineArguments::~CommandLineArguments() +{ + for (size_t i = 0; i < this->argv_.size(); i++) { + free(argv_[i]); + } +} + +Encoding::CommandLineArguments::CommandLineArguments( + const CommandLineArguments& other) +{ + this->argv_.resize(other.argv_.size()); + for (size_t i = 0; i < this->argv_.size(); i++) { + this->argv_[i] = other.argv_[i] ? strdup(other.argv_[i]) : nullptr; + } +} + +Encoding::CommandLineArguments& Encoding::CommandLineArguments::operator=( + const CommandLineArguments& other) +{ + if (this != &other) { + size_t i; + for (i = 0; i < this->argv_.size(); i++) { + free(this->argv_[i]); + } + + this->argv_.resize(other.argv_.size()); + for (i = 0; i < this->argv_.size(); i++) { + this->argv_[i] = other.argv_[i] ? strdup(other.argv_[i]) : nullptr; + } + } + + return *this; +} + +int Encoding::CommandLineArguments::argc() const +{ + return static_cast(this->argv_.size() - 1); +} + +char const* const* Encoding::CommandLineArguments::argv() const +{ + return &this->argv_[0]; +} + +#if KWSYS_STL_HAS_WSTRING + +std::wstring Encoding::ToWide(const std::string& str) +{ + std::wstring wstr; +# if defined(_WIN32) + const int wlength = + MultiByteToWideChar(KWSYS_ENCODING_DEFAULT_CODEPAGE, 0, str.data(), + int(str.size()), nullptr, 0); + if (wlength > 0) { + wchar_t* wdata = new wchar_t[wlength]; + int r = MultiByteToWideChar(KWSYS_ENCODING_DEFAULT_CODEPAGE, 0, str.data(), + int(str.size()), wdata, wlength); + if (r > 0) { + wstr = std::wstring(wdata, wlength); + } + delete[] wdata; + } +# else + size_t pos = 0; + size_t nullPos = 0; + do { + if (pos < str.size() && str.at(pos) != '\0') { + wstr += ToWide(str.c_str() + pos); + } + nullPos = str.find('\0', pos); + if (nullPos != std::string::npos) { + pos = nullPos + 1; + wstr += wchar_t('\0'); + } + } while (nullPos != std::string::npos); +# endif + return wstr; +} + +std::string Encoding::ToNarrow(const std::wstring& str) +{ + std::string nstr; +# if defined(_WIN32) + int length = + WideCharToMultiByte(KWSYS_ENCODING_DEFAULT_CODEPAGE, 0, str.c_str(), + int(str.size()), nullptr, 0, nullptr, nullptr); + if (length > 0) { + char* data = new char[length]; + int r = + WideCharToMultiByte(KWSYS_ENCODING_DEFAULT_CODEPAGE, 0, str.c_str(), + int(str.size()), data, length, nullptr, nullptr); + if (r > 0) { + nstr = std::string(data, length); + } + delete[] data; + } +# else + size_t pos = 0; + size_t nullPos = 0; + do { + if (pos < str.size() && str.at(pos) != '\0') { + nstr += ToNarrow(str.c_str() + pos); + } + nullPos = str.find(wchar_t('\0'), pos); + if (nullPos != std::string::npos) { + pos = nullPos + 1; + nstr += '\0'; + } + } while (nullPos != std::string::npos); +# endif + return nstr; +} + +std::wstring Encoding::ToWide(const char* cstr) +{ + std::wstring wstr; + size_t length = kwsysEncoding_mbstowcs(nullptr, cstr, 0) + 1; + if (length > 0) { + std::vector wchars(length); + if (kwsysEncoding_mbstowcs(&wchars[0], cstr, length) > 0) { + wstr = &wchars[0]; + } + } + return wstr; +} + +std::string Encoding::ToNarrow(const wchar_t* wcstr) +{ + std::string str; + size_t length = kwsysEncoding_wcstombs(nullptr, wcstr, 0) + 1; + if (length > 0) { + std::vector chars(length); + if (kwsysEncoding_wcstombs(&chars[0], wcstr, length) > 0) { + str = &chars[0]; + } + } + return str; +} + +# if defined(_WIN32) +// Convert local paths to UNC style paths +std::wstring Encoding::ToWindowsExtendedPath(std::string const& source) +{ + return ToWindowsExtendedPath(ToWide(source)); +} + +// Convert local paths to UNC style paths +std::wstring Encoding::ToWindowsExtendedPath(const char* source) +{ + return ToWindowsExtendedPath(ToWide(source)); +} + +// Convert local paths to UNC style paths +std::wstring Encoding::ToWindowsExtendedPath(std::wstring const& wsource) +{ + // Resolve any relative paths + DWORD wfull_len; + + /* The +3 is a workaround for a bug in some versions of GetFullPathNameW that + * won't return a large enough buffer size if the input is too small */ + wfull_len = GetFullPathNameW(wsource.c_str(), 0, nullptr, nullptr) + 3; + std::vector wfull(wfull_len); + GetFullPathNameW(wsource.c_str(), wfull_len, &wfull[0], nullptr); + + /* This should get the correct size without any extra padding from the + * previous size workaround. */ + wfull_len = static_cast(wcslen(&wfull[0])); + + if (wfull_len >= 2 && isalpha(wfull[0]) && + wfull[1] == L':') { /* C:\Foo\bar\FooBar.txt */ + return L"\\\\?\\" + std::wstring(&wfull[0]); + } else if (wfull_len >= 2 && wfull[0] == L'\\' && + wfull[1] == L'\\') { /* Starts with \\ */ + if (wfull_len >= 4 && wfull[2] == L'?' && + wfull[3] == L'\\') { /* Starts with \\?\ */ + if (wfull_len >= 8 && wfull[4] == L'U' && wfull[5] == L'N' && + wfull[6] == L'C' && + wfull[7] == L'\\') { /* \\?\UNC\Foo\bar\FooBar.txt */ + return std::wstring(&wfull[0]); + } else if (wfull_len >= 6 && isalpha(wfull[4]) && + wfull[5] == L':') { /* \\?\C:\Foo\bar\FooBar.txt */ + return std::wstring(&wfull[0]); + } else if (wfull_len >= 5) { /* \\?\Foo\bar\FooBar.txt */ + return L"\\\\?\\UNC\\" + std::wstring(&wfull[4]); + } + } else if (wfull_len >= 4 && wfull[2] == L'.' && + wfull[3] == L'\\') { /* Starts with \\.\ a device name */ + if (wfull_len >= 6 && isalpha(wfull[4]) && + wfull[5] == L':') { /* \\.\C:\Foo\bar\FooBar.txt */ + return L"\\\\?\\" + std::wstring(&wfull[4]); + } else if (wfull_len >= + 5) { /* \\.\Foo\bar\ Device name is left unchanged */ + return std::wstring(&wfull[0]); + } + } else if (wfull_len >= 3) { /* \\Foo\bar\FooBar.txt */ + return L"\\\\?\\UNC\\" + std::wstring(&wfull[2]); + } + } + + // If this case has been reached, then the path is invalid. Leave it + // unchanged + return wsource; +} +# endif + +#endif // KWSYS_STL_HAS_WSTRING + +} // namespace KWSYS_NAMESPACE diff --git a/test/API/driver/kwsys/ExtraTest.cmake.in b/test/API/driver/kwsys/ExtraTest.cmake.in new file mode 100644 index 00000000000..e8c0a1cdb19 --- /dev/null +++ b/test/API/driver/kwsys/ExtraTest.cmake.in @@ -0,0 +1 @@ +MESSAGE("*** This message is generated by message inside a file that is included in DartTestfile.txt ***") diff --git a/test/API/driver/kwsys/FStream.cxx b/test/API/driver/kwsys/FStream.cxx new file mode 100644 index 00000000000..5e4133ac564 --- /dev/null +++ b/test/API/driver/kwsys/FStream.cxx @@ -0,0 +1,55 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" +#include KWSYS_HEADER(FStream.hxx) + +// Work-around CMake dependency scanning limitation. This must +// duplicate the above list of headers. +#if 0 +# include "FStream.hxx.in" +#endif + +namespace KWSYS_NAMESPACE { +namespace FStream { + +BOM ReadBOM(std::istream& in) +{ + if (!in.good()) { + return BOM_None; + } + unsigned long orig = in.tellg(); + unsigned char bom[4]; + in.read(reinterpret_cast(bom), 2); + if (!in.good()) { + in.clear(); + in.seekg(orig); + return BOM_None; + } + if (bom[0] == 0xEF && bom[1] == 0xBB) { + in.read(reinterpret_cast(bom + 2), 1); + if (in.good() && bom[2] == 0xBF) { + return BOM_UTF8; + } + } else if (bom[0] == 0xFE && bom[1] == 0xFF) { + return BOM_UTF16BE; + } else if (bom[0] == 0x00 && bom[1] == 0x00) { + in.read(reinterpret_cast(bom + 2), 2); + if (in.good() && bom[2] == 0xFE && bom[3] == 0xFF) { + return BOM_UTF32BE; + } + } else if (bom[0] == 0xFF && bom[1] == 0xFE) { + unsigned long p = in.tellg(); + in.read(reinterpret_cast(bom + 2), 2); + if (in.good() && bom[2] == 0x00 && bom[3] == 0x00) { + return BOM_UTF32LE; + } + in.seekg(p); + return BOM_UTF16LE; + } + in.clear(); + in.seekg(orig); + return BOM_None; +} + +} // FStream namespace +} // KWSYS_NAMESPACE diff --git a/test/API/driver/kwsys/FStream.hxx.in b/test/API/driver/kwsys/FStream.hxx.in new file mode 100644 index 00000000000..d79bbdf16b9 --- /dev/null +++ b/test/API/driver/kwsys/FStream.hxx.in @@ -0,0 +1,278 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifndef @KWSYS_NAMESPACE@_FStream_hxx +#define @KWSYS_NAMESPACE@_FStream_hxx + +#include <@KWSYS_NAMESPACE@/Configure.hxx> + +#include <@KWSYS_NAMESPACE@/Encoding.hxx> + +#include +#if defined(_WIN32) +# if !defined(_MSC_VER) && @KWSYS_NAMESPACE@_CXX_HAS_EXT_STDIO_FILEBUF_H +# include +# endif +#endif + +namespace @KWSYS_NAMESPACE@ { +#if defined(_WIN32) && \ + (defined(_MSC_VER) || @KWSYS_NAMESPACE@_CXX_HAS_EXT_STDIO_FILEBUF_H) +# if defined(_NOEXCEPT) +# define @KWSYS_NAMESPACE@_FStream_NOEXCEPT _NOEXCEPT +# else +# define @KWSYS_NAMESPACE@_FStream_NOEXCEPT +# endif + +# if defined(_MSC_VER) + +template +class basic_filebuf : public std::basic_filebuf +{ +# if _MSC_VER >= 1400 +public: + typedef std::basic_filebuf my_base_type; + basic_filebuf* open(char const* s, std::ios_base::openmode mode) + { + const std::wstring wstr = Encoding::ToWindowsExtendedPath(s); + return static_cast(my_base_type::open(wstr.c_str(), mode)); + } +# endif +}; + +# else + +inline std::wstring getcmode(const std::ios_base::openmode mode) +{ + std::wstring cmode; + bool plus = false; + if (mode & std::ios_base::app) { + cmode += L"a"; + plus = mode & std::ios_base::in ? true : false; + } else if (mode & std::ios_base::trunc || + (mode & std::ios_base::out && (mode & std::ios_base::in) == 0)) { + cmode += L"w"; + plus = mode & std::ios_base::in ? true : false; + } else { + cmode += L"r"; + plus = mode & std::ios_base::out ? true : false; + } + if (plus) { + cmode += L"+"; + } + if (mode & std::ios_base::binary) { + cmode += L"b"; + } else { + cmode += L"t"; + } + return cmode; +}; + +# endif + +template > +class basic_efilebuf +{ +public: +# if defined(_MSC_VER) + typedef basic_filebuf internal_buffer_type; +# else + typedef __gnu_cxx::stdio_filebuf internal_buffer_type; +# endif + + basic_efilebuf() + : file_(0) + { + buf_ = 0; + } + + bool _open(char const* file_name, std::ios_base::openmode mode) + { + if (is_open() || file_) { + return false; + } +# if defined(_MSC_VER) + const bool success = buf_->open(file_name, mode) != 0; +# else + const std::wstring wstr = Encoding::ToWindowsExtendedPath(file_name); + bool success = false; + std::wstring cmode = getcmode(mode); + file_ = _wfopen(wstr.c_str(), cmode.c_str()); + if (file_) { + if (buf_) { + delete buf_; + } + buf_ = new internal_buffer_type(file_, mode); + success = true; + } +# endif + return success; + } + + bool is_open() + { + if (!buf_) { + return false; + } + return buf_->is_open(); + } + + bool is_open() const + { + if (!buf_) { + return false; + } + return buf_->is_open(); + } + + bool _close() + { + bool success = false; + if (buf_) { + success = buf_->close() != 0; +# if !defined(_MSC_VER) + if (file_) { + success = fclose(file_) == 0 ? success : false; + file_ = 0; + } +# endif + } + return success; + } + + static void _set_state(bool success, std::basic_ios* ios, + basic_efilebuf* efilebuf) + { +# if !defined(_MSC_VER) + ios->rdbuf(efilebuf->buf_); +# else + static_cast(efilebuf); +# endif + if (!success) { + ios->setstate(std::ios_base::failbit); + } else { + ios->clear(); + } + } + + ~basic_efilebuf() + { + if (buf_) { + delete buf_; + } + } + +protected: + internal_buffer_type* buf_; + FILE* file_; +}; + +template > +class basic_ifstream + : public std::basic_istream + , public basic_efilebuf +{ +public: + typedef typename basic_efilebuf::internal_buffer_type + internal_buffer_type; + typedef std::basic_istream internal_stream_type; + + basic_ifstream() + : internal_stream_type(new internal_buffer_type()) + { + this->buf_ = + static_cast(internal_stream_type::rdbuf()); + } + explicit basic_ifstream(char const* file_name, + std::ios_base::openmode mode = std::ios_base::in) + : internal_stream_type(new internal_buffer_type()) + { + this->buf_ = + static_cast(internal_stream_type::rdbuf()); + open(file_name, mode); + } + + void open(char const* file_name, + std::ios_base::openmode mode = std::ios_base::in) + { + mode = mode | std::ios_base::in; + this->_set_state(this->_open(file_name, mode), this, this); + } + + void close() { this->_set_state(this->_close(), this, this); } + + using basic_efilebuf::is_open; + + internal_buffer_type* rdbuf() const { return this->buf_; } + + ~basic_ifstream() @KWSYS_NAMESPACE@_FStream_NOEXCEPT { close(); } +}; + +template > +class basic_ofstream + : public std::basic_ostream + , public basic_efilebuf +{ + using basic_efilebuf::is_open; + +public: + typedef typename basic_efilebuf::internal_buffer_type + internal_buffer_type; + typedef std::basic_ostream internal_stream_type; + + basic_ofstream() + : internal_stream_type(new internal_buffer_type()) + { + this->buf_ = + static_cast(internal_stream_type::rdbuf()); + } + explicit basic_ofstream(char const* file_name, + std::ios_base::openmode mode = std::ios_base::out) + : internal_stream_type(new internal_buffer_type()) + { + this->buf_ = + static_cast(internal_stream_type::rdbuf()); + open(file_name, mode); + } + void open(char const* file_name, + std::ios_base::openmode mode = std::ios_base::out) + { + mode = mode | std::ios_base::out; + this->_set_state(this->_open(file_name, mode), this, this); + } + + void close() { this->_set_state(this->_close(), this, this); } + + internal_buffer_type* rdbuf() const { return this->buf_; } + + ~basic_ofstream() @KWSYS_NAMESPACE@_FStream_NOEXCEPT { close(); } +}; + +typedef basic_ifstream ifstream; +typedef basic_ofstream ofstream; + +# undef @KWSYS_NAMESPACE@_FStream_NOEXCEPT +#else +using std::ofstream; +using std::ifstream; +#endif + +namespace FStream { +enum BOM +{ + BOM_None, + BOM_UTF8, + BOM_UTF16BE, + BOM_UTF16LE, + BOM_UTF32BE, + BOM_UTF32LE +}; + +// Read a BOM, if one exists. +// If a BOM exists, the stream is advanced to after the BOM. +// This function requires a seekable stream (but not a relative +// seekable stream). +@KWSYS_NAMESPACE@_EXPORT BOM ReadBOM(std::istream& in); +} +} + +#endif diff --git a/test/API/driver/kwsys/GitSetup/.gitattributes b/test/API/driver/kwsys/GitSetup/.gitattributes new file mode 100644 index 00000000000..e96d1f8c503 --- /dev/null +++ b/test/API/driver/kwsys/GitSetup/.gitattributes @@ -0,0 +1,6 @@ +.git* export-ignore + +config* eol=lf whitespace=indent-with-non-tab +git-* eol=lf whitespace=indent-with-non-tab +tips eol=lf whitespace=indent-with-non-tab +setup-* eol=lf whitespace=indent-with-non-tab diff --git a/test/API/driver/kwsys/GitSetup/LICENSE b/test/API/driver/kwsys/GitSetup/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/test/API/driver/kwsys/GitSetup/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/API/driver/kwsys/GitSetup/NOTICE b/test/API/driver/kwsys/GitSetup/NOTICE new file mode 100644 index 00000000000..0d32c02eb69 --- /dev/null +++ b/test/API/driver/kwsys/GitSetup/NOTICE @@ -0,0 +1,5 @@ +Kitware Local Git Setup Scripts +Copyright 2010-2012 Kitware, Inc. + +This product includes software developed at Kitware, Inc. +(http://www.kitware.com/). diff --git a/test/API/driver/kwsys/GitSetup/README b/test/API/driver/kwsys/GitSetup/README new file mode 100644 index 00000000000..2f9f1ec078d --- /dev/null +++ b/test/API/driver/kwsys/GitSetup/README @@ -0,0 +1,87 @@ +Kitware Local Git Setup Scripts + + +Introduction +------------ + +This is a collection of local Git development setup scripts meant for +inclusion in project source trees to aid their development workflow. +Project-specific information needed by the scripts may be configured +in a "config" file added next to them in the project. + + +Import +------ + +A project may import these scripts into their source tree by +initializing a subtree merge. Bring up a Git prompt and set the +current working directory inside a clone of the target project. +Fetch the "setup" branch from the GitSetup repository: + + $ git fetch ../GitSetup setup:setup + +Prepare to merge the branch but place the content in a subdirectory. +Any prefix (with trailing '/') may be chosen so long as it is used +consistently within a project through the rest of these instructions: + + $ git merge -s ours --no-commit setup + $ git read-tree -u --prefix=Utilities/GitSetup/ setup + +Commit the merge with an informative message: + + $ git commit + ------------------------------------------------------------------------ + Merge branch 'setup' + + Add Utilities/GitSetup/ directory using subtree merge from + the general GitSetup repository "setup" branch. + ------------------------------------------------------------------------ + +Optionally add to the project ".gitattributes" file the line + + /Utilities/GitSetup export-ignore + +to exclude the GitSetup directory from inclusion by "git archive" +since it does not make sense in source tarballs. + + +Configuration +------------- + +Read the "Project configuration instructions" comment in each script. +Add a "config" file next to the scripts with desired configuration +(optionally copy and modify "config.sample"). For example, to +configure the "setup-hooks" script: + + $ git config -f Utilities/GitSetup/config hooks.url "$url" + +where "$url" is the project repository publishing the "hooks" branch. +When finished, add and commit the configuration file: + + $ git add Utilities/GitSetup/config + $ git commit + + +Update +------ + +A project may update these scripts from the GitSetup repository. +Bring up a Git prompt and set the current working directory inside a +clone of the target project. Fetch the "setup" branch from the +GitSetup repository: + + $ git fetch ../GitSetup setup:setup + +Merge the "setup" branch into the subtree: + + $ git merge -X subtree=Utilities/GitSetup setup + +where "Utilities/GitSetup" is the same prefix used during the import +setup, but without a trailing '/'. + + +License +------- + +Distributed under the Apache License 2.0. +See LICENSE and NOTICE for details. diff --git a/test/API/driver/kwsys/GitSetup/config b/test/API/driver/kwsys/GitSetup/config new file mode 100644 index 00000000000..cba4c146031 --- /dev/null +++ b/test/API/driver/kwsys/GitSetup/config @@ -0,0 +1,4 @@ +[hooks] + url = https://gitlab.kitware.com/utils/gitsetup.git +[upstream] + url = https://gitlab.kitware.com/utils/kwsys.git diff --git a/test/API/driver/kwsys/GitSetup/config.sample b/test/API/driver/kwsys/GitSetup/config.sample new file mode 100644 index 00000000000..eeb468ba17b --- /dev/null +++ b/test/API/driver/kwsys/GitSetup/config.sample @@ -0,0 +1,32 @@ +# Kitware Local Git Setup Scripts - Sample Project Configuration +# +# Copy to "config" and edit as necessary. + +[hooks] + url = http://public.kitware.com/GitSetup.git + #branch = hooks + +[ssh] + host = public.kitware.com + key = id_git_public + request-url = https://www.kitware.com/Admin/SendPassword.cgi + +[stage] + #url = git://public.kitware.com/stage/Project.git + #pushurl = git@public.kitware.com:stage/Project.git + +[gerrit] + #project = Project + site = http://review.source.kitware.com + # pushurl placeholder "$username" is literal + pushurl = $username@review.source.kitware.com:Project + +[upstream] + url = git://public.kitware.com/Project.git + +[gitlab] + host = gitlab.kitware.com + group-path = group + group-name = Group + project-path = project + project-name = Project diff --git a/test/API/driver/kwsys/GitSetup/git-gerrit-push b/test/API/driver/kwsys/GitSetup/git-gerrit-push new file mode 100644 index 00000000000..b46f753eb22 --- /dev/null +++ b/test/API/driver/kwsys/GitSetup/git-gerrit-push @@ -0,0 +1,74 @@ +#!/usr/bin/env bash +#============================================================================= +# Copyright 2010-2015 Kitware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#============================================================================= + +USAGE="[] [--no-topic] [--dry-run] [--]" +OPTIONS_SPEC= +SUBDIRECTORY_OK=Yes +. "$(git --exec-path)/git-sh-setup" + +#----------------------------------------------------------------------------- + +remote='' +refspecs='' +no_topic='' +dry_run='' + +# Parse the command line options. +while test $# != 0; do + case "$1" in + --no-topic) no_topic=1 ;; + --dry-run) dry_run=--dry-run ;; + --) shift; break ;; + -*) usage ;; + *) test -z "$remote" || usage ; remote="$1" ;; + esac + shift +done +test $# = 0 || usage + +# Default remote. +test -n "$remote" || remote="gerrit" + +if test -z "$no_topic"; then + # Identify and validate the topic branch name. + head="$(git symbolic-ref HEAD)" && topic="${head#refs/heads/}" || topic='' + if test -z "$topic" -o "$topic" = "master"; then + die 'Please name your topic: + git checkout -b descriptive-name' + fi + # The topic branch will be pushed by name. + refspecs="HEAD:refs/for/master/$topic $refspecs" +fi + +# Fetch the current upstream master branch head. +# This helps computation of a minimal pack to push. +echo "Fetching $remote master" +fetch_out=$(git fetch "$remote" master 2>&1) || die "$fetch_out" + +# Exit early if we have nothing to push. +if test -z "$refspecs"; then + echo 'Nothing to push!' + exit 0 +fi + +# Push. Save output and exit code. +echo "Pushing to $remote" +push_stdout=$(git push --porcelain $dry_run "$remote" $refspecs); push_exit=$? +echo "$push_stdout" + +# Reproduce the push exit code. +exit $push_exit diff --git a/test/API/driver/kwsys/GitSetup/git-gitlab-push b/test/API/driver/kwsys/GitSetup/git-gitlab-push new file mode 100644 index 00000000000..768f8534ed7 --- /dev/null +++ b/test/API/driver/kwsys/GitSetup/git-gitlab-push @@ -0,0 +1,177 @@ +#!/usr/bin/env bash +#============================================================================= +# Copyright 2010-2015 Kitware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#============================================================================= + +USAGE='[] [...] [--] + +OPTIONS + +--dry-run + Show what would be pushed without actually updating the destination + +-f,--force + Force-push the topic HEAD to rewrite the destination branch + +--no-default + Do not push the default branch (e.g. master) + +--no-topic + Do not push the topic HEAD. +' +OPTIONS_SPEC= +SUBDIRECTORY_OK=Yes +. "$(git --exec-path)/git-sh-setup" + +egrep-q() { + egrep "$@" >/dev/null 2>/dev/null +} + +# Load the project configuration. +gitlab_upstream='' && +gitlab_configured='' && +config="${BASH_SOURCE%/*}/config" && +protocol=$(git config -f "$config" --get gitlab.protocol || + echo "https") && +host=$(git config -f "$config" --get gitlab.host) && +site=$(git config -f "$config" --get gitlab.site || + echo "$protocol://$host") && +group_path=$(git config -f "$config" --get gitlab.group-path) && +project_path=$(git config -f "$config" --get gitlab.project-path) && +gitlab_upstream="$site/$group_path/$project_path.git" && +gitlab_pushurl=$(git config --get remote.gitlab.pushurl || + git config --get remote.gitlab.url) && +gitlab_configured=1 + +#----------------------------------------------------------------------------- + +remote='' +refspecs='' +force='' +lease=false +lease_flag='' +no_topic='' +no_default='' +dry_run='' + +# Parse the command line options. +while test $# != 0; do + case "$1" in + -f|--force) force='+'; lease=true ;; + --no-topic) no_topic=1 ;; + --dry-run) dry_run=--dry-run ;; + --no-default) no_default=1 ;; + --) shift; break ;; + -*) usage ;; + *) test -z "$remote" || usage ; remote="$1" ;; + esac + shift +done +test $# = 0 || usage + +# Default remote. +test -n "$remote" || remote="gitlab" + +if test -z "$no_topic"; then + # Identify and validate the topic branch name. + head="$(git symbolic-ref HEAD)" && topic="${head#refs/heads/}" || topic='' + if test -z "$topic" -o "$topic" = "master"; then + die 'Please name your topic: + git checkout -b descriptive-name' + fi + + if $lease; then + have_ref=false + remoteref="refs/remotes/$remote/$topic" + if git rev-parse --verify -q "$remoteref"; then + have_ref=true + else + die "It seems that a local ref for the branch is +missing; forcing a push is dangerous and may overwrite +previous work. Fetch from the $remote remote first or +push without '-f' or '--force'." + fi + + have_lease_flag=false + if git push -h | egrep-q -e '--force-with-lease'; then + have_lease_flag=true + fi + + if $have_lease_flag && $have_ref; then + # Set the lease flag. + lease_flag="--force-with-lease=$topic:$remoteref" + # Clear the force string. + force='' + fi + fi + + # The topic branch will be pushed by name. + refspecs="${force}HEAD:refs/heads/$topic $refspecs" +fi + +# Fetch the current remote master branch head. +# This helps computation of a minimal pack to push. +echo "Fetching $remote master" +fetch_out=$(git fetch "$remote" master 2>&1) || die "$fetch_out" +gitlab_head=$(git rev-parse FETCH_HEAD) || exit + +# Fetch the current upstream master branch head. +if origin_fetchurl=$(git config --get remote.origin.url) && + test "$origin_fetchurl" = "$gitlab_upstream"; then + upstream_remote='origin' +else + upstream_remote="$gitlab_upstream" +fi +echo "Fetching $upstream_remote master" +fetch_out=$(git fetch "$upstream_remote" master 2>&1) || die "$fetch_out" +upstream_head=$(git rev-parse FETCH_HEAD) || exit + +# Add a refspec to keep the remote master up to date if possible. +if test -z "$no_default" && + base=$(git merge-base "$gitlab_head" "$upstream_head") && + test "$base" = "$gitlab_head"; then + refspecs="$upstream_head:refs/heads/master $refspecs" +fi + +# Exit early if we have nothing to push. +if test -z "$refspecs"; then + echo 'Nothing to push!' + exit 0 +fi + +# Push. Save output and exit code. +echo "Pushing to $remote" +push_config='-c advice.pushUpdateRejected=false' +push_stdout=$(git $push_config push $lease_flag --porcelain $dry_run "$remote" $refspecs); push_exit=$? +echo "$push_stdout" + +if test "$push_exit" -ne 0 && test -z "$force"; then + # Advise the user to fetch if needed. + if echo "$push_stdout" | egrep-q 'stale info'; then + echo " +You have pushed to your branch from another machine; you may be overwriting +commits unintentionally. Fetch from the $remote remote and check that you are +not pushing an outdated branch." + fi + + # Advise the user to force-push if needed. + if echo "$push_stdout" | egrep-q 'non-fast-forward'; then + echo ' +Add "-f" or "--force" to push a rewritten topic.' + fi +fi + +# Reproduce the push exit code. +exit $push_exit diff --git a/test/API/driver/kwsys/GitSetup/pre-commit b/test/API/driver/kwsys/GitSetup/pre-commit new file mode 100644 index 00000000000..1f1d3f52959 --- /dev/null +++ b/test/API/driver/kwsys/GitSetup/pre-commit @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +egrep-q() { + egrep "$@" >/dev/null 2>/dev/null +} + +die() { + echo 'pre-commit hook failure' 1>&2 + echo '-----------------------' 1>&2 + echo '' 1>&2 + echo "$@" 1>&2 + exit 1 +} + +#----------------------------------------------------------------------------- + +# Check that developmer setup is up-to-date. +lastSetupForDevelopment=$(git config --get hooks.SetupForDevelopment || echo 0) +eval $(grep '^SetupForDevelopment_VERSION=' "${BASH_SOURCE%/*}/../SetupForDevelopment.sh") +test -n "$SetupForDevelopment_VERSION" || SetupForDevelopment_VERSION=0 +if test $lastSetupForDevelopment -lt $SetupForDevelopment_VERSION; then + die 'Developer setup in this work tree is out of date. Please re-run + + ./SetupForDevelopment.sh +' +fi diff --git a/test/API/driver/kwsys/GitSetup/setup-aliases b/test/API/driver/kwsys/GitSetup/setup-aliases new file mode 100644 index 00000000000..98810adcfe3 --- /dev/null +++ b/test/API/driver/kwsys/GitSetup/setup-aliases @@ -0,0 +1,6 @@ +#!/usr/bin/env bash +echo "Adding 'git prepush' alias" && +git config alias.prepush 'log --graph --stat origin/master..' && +gerrit_disabled="KWSys no longer uses Gerrit. Please use GitLab." && +git config alias.gerrit-push '!sh -c "echo '"${gerrit_disabled}"'"' && +true diff --git a/test/API/driver/kwsys/GitSetup/setup-gerrit b/test/API/driver/kwsys/GitSetup/setup-gerrit new file mode 100644 index 00000000000..6d46e3ccf54 --- /dev/null +++ b/test/API/driver/kwsys/GitSetup/setup-gerrit @@ -0,0 +1,147 @@ +#!/usr/bin/env bash +#============================================================================= +# Copyright 2010-2012 Kitware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#============================================================================= + +# Run this script to set up the local Git repository to push to +# a Gerrit Code Review instance for this project. + +# Project configuration instructions: +# +# - Run a Gerrit Code Review server +# +# - Populate adjacent "config" file with: +# gerrit.site = Top Gerrit URL (not project-specific) +# gerrit.project = Name of project in Gerrit +# gerrit.pushurl = Review site push URL with "$username" placeholder +# gerrit.remote = Gerrit remote name, if not "gerrit" +# gerrit.url = Gerrit project URL, if not "$site/p/$project" +# optionally with "$username" placeholder + +die() { + echo 1>&2 "$@" ; exit 1 +} + +# Make sure we are inside the repository. +cd "${BASH_SOURCE%/*}" && + +# Load the project configuration. +site=$(git config -f config --get gerrit.site) && +project=$(git config -f config --get gerrit.project) && +remote=$(git config -f config --get gerrit.remote || + echo "gerrit") && +fetchurl_=$(git config -f config --get gerrit.url || + echo "$site/p/$project") && +pushurl_=$(git config -f config --get gerrit.pushurl || + git config -f config --get gerrit.url) || +die 'This project is not configured to use Gerrit.' + +# Get current gerrit push URL. +pushurl=$(git config --get remote."$remote".pushurl || + git config --get remote."$remote".url || echo '') && + +# Tell user about current configuration. +if test -n "$pushurl"; then + echo 'Remote "'"$remote"'" is currently configured to push to + + '"$pushurl"' +' && + read -ep 'Reconfigure Gerrit? [y/N]: ' ans && + if [ "$ans" == "y" ] || [ "$ans" == "Y" ]; then + setup=1 + else + setup='' + fi +else + echo 'Remote "'"$remote"'" is not yet configured. + +'"$project"' changes must be pushed to our Gerrit Code Review site: + + '"$site/p/$project"' + +Register a Gerrit account and select a username (used below). +You will need an OpenID: + + http://openid.net/get-an-openid/ +' && + read -ep 'Configure Gerrit? [Y/n]: ' ans && + if [ "$ans" == "n" ] || [ "$ans" == "N" ]; then + exit 0 + else + setup=1 + fi +fi && + +# Perform setup if necessary. +if test -n "$setup"; then + echo 'Sign-in to Gerrit to get/set your username at + + '"$site"'/#/settings + +Add your SSH public keys at + + '"$site"'/#/settings/ssh-keys +' && + read -ep "Gerrit username? [$USER]: " gu && + if test -z "$gu"; then + gu="$USER" + fi && + fetchurl="${fetchurl_/\$username/$gu}" && + if test -z "$pushurl"; then + git remote add "$remote" "$fetchurl" + else + git config remote."$remote".url "$fetchurl" + fi && + pushurl="${pushurl_/\$username/$gu}" && + if test "$pushurl" != "$fetchurl"; then + git config remote."$remote".pushurl "$pushurl" + fi && + echo 'Remote "'"$remote"'" is now configured to push to + + '"$pushurl"' +' +fi && + +# Optionally test Gerrit access. +if test -n "$pushurl"; then + read -ep 'Test access to Gerrit (SSH)? [y/N]: ' ans && + if [ "$ans" == "y" ] || [ "$ans" == "Y" ]; then + echo -n 'Testing Gerrit access by SSH...' + if git ls-remote --heads "$pushurl" >/dev/null; then + echo 'passed.' + else + echo 'failed.' && + die 'Could not access Gerrit. Add your SSH public keys at + + '"$site"'/#/settings/ssh-keys +' + fi + fi +fi && + +# Set up GerritId hook. +hook=$(git config --get hooks.GerritId || echo '') && +if test -z "$hook"; then + echo ' +Enabling GerritId hook to add a "Change-Id" footer to commit +messages for interaction with Gerrit. Run + + git config hooks.GerritId false + +to disable this feature (but you will be on your own).' && + git config hooks.GerritId true +else + echo 'GerritId hook already configured to "'"$hook"'".' +fi diff --git a/test/API/driver/kwsys/GitSetup/setup-gitlab b/test/API/driver/kwsys/GitSetup/setup-gitlab new file mode 100644 index 00000000000..9c7574d1ea9 --- /dev/null +++ b/test/API/driver/kwsys/GitSetup/setup-gitlab @@ -0,0 +1,140 @@ +#!/usr/bin/env bash +#============================================================================= +# Copyright 2010-2015 Kitware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#============================================================================= + +# Run this script to set up the local Git repository to push to +# a personal fork for this project in GitLab. + +# Project configuration instructions: +# +# - Run a GitLab server +# +# - Populate adjacent "config" file with: +# gitlab.protocol = Top GitLab protocol, if not 'https' +# gitlab.host = Top GitLab fully qualified host name +# gitlab.site = Top GitLab URL, if not "://" +# gitlab.group-name = Name of group containing project in GitLab +# gitlab.group-path = Path of group containing project in GitLab +# gitlab.project-name = Name of project within GitLab group +# gitlab.project-path = Path of project within GitLab group +# gitlab.url = GitLab push URL with "$username" placeholder, +# if not "/$username/.git" +# gitlab.pushurl = GitLab push URL with "$username" placeholder, +# if not "git@:$username/.git" +# gitlab.remote = GitLab remote name, if not "gitlab" + +die() { + echo 1>&2 "$@" ; exit 1 +} + +# Make sure we are inside the repository. +cd "${BASH_SOURCE%/*}" && + +# Load the project configuration. +protocol=$(git config -f config --get gitlab.protocol || + echo "https") && +host=$(git config -f config --get gitlab.host) && +site=$(git config -f config --get gitlab.site || + echo "$protocol://$host") && +group_path=$(git config -f config --get gitlab.group-path) && +group_name=$(git config -f config --get gitlab.group-name) && +project_name=$(git config -f config --get gitlab.project-name) && +project_path=$(git config -f config --get gitlab.project-path) && +pushurl_=$(git config -f config --get gitlab.pushurl || + echo "git@$host:\$username/$project_path.git") && +remote=$(git config -f config --get gitlab.remote || + echo "gitlab") && +fetchurl_=$(git config -f config --get gitlab.url || + echo "$site/\$username/$project_path.git") || +die 'This project is not configured to use GitLab.' + +# Get current gitlab push URL. +pushurl=$(git config --get remote."$remote".pushurl || + git config --get remote."$remote".url || echo '') && + +# Tell user about current configuration. +if test -n "$pushurl"; then + echo 'Remote "'"$remote"'" is currently configured to push to + + '"$pushurl"' +' && + read -ep 'Reconfigure GitLab? [y/N]: ' ans && + if [ "$ans" == "y" ] || [ "$ans" == "Y" ]; then + setup=1 + else + setup='' + fi +else + echo 'Remote "'"$remote"'" is not yet configured. +' && + read -ep 'Configure GitLab to contribute to '"$project_name"'? [Y/n]: ' ans && + if [ "$ans" == "n" ] || [ "$ans" == "N" ]; then + exit 0 + else + setup=1 + fi +fi && + +setup_instructions='Add your SSH public keys at + + '"$site"'/profile/keys + +Then visit the main repository at: + + '"$site/$group_path/$project_path"' + +and use the Fork button in the upper right. +' + +# Perform setup if necessary. +if test -n "$setup"; then + echo 'Sign-in to GitLab to get/set your username at + + '"$site/profile/account"' + +'"$setup_instructions" && + read -ep "GitLab username? [$USER]: " gu && + if test -z "$gu"; then + gu="$USER" + fi && + fetchurl="${fetchurl_/\$username/$gu}" && + if test -z "$pushurl"; then + git remote add "$remote" "$fetchurl" + else + git config remote."$remote".url "$fetchurl" + fi && + pushurl="${pushurl_/\$username/$gu}" && + git config remote."$remote".pushurl "$pushurl" && + echo 'Remote "'"$remote"'" is now configured to push to + + '"$pushurl"' +' +fi && + +# Optionally test GitLab access. +if test -n "$pushurl"; then + read -ep 'Test access to GitLab (SSH)? [y/N]: ' ans && + if [ "$ans" == "y" ] || [ "$ans" == "Y" ]; then + echo -n 'Testing GitLab access by SSH...' + if git ls-remote --heads "$pushurl" >/dev/null; then + echo 'passed.' + else + echo 'failed.' && + die 'Could not access your GitLab fork of this project. +'"$setup_instructions" + fi + fi +fi diff --git a/test/API/driver/kwsys/GitSetup/setup-hooks b/test/API/driver/kwsys/GitSetup/setup-hooks new file mode 100644 index 00000000000..ca07712d55a --- /dev/null +++ b/test/API/driver/kwsys/GitSetup/setup-hooks @@ -0,0 +1,64 @@ +#!/usr/bin/env bash +#============================================================================= +# Copyright 2010-2012 Kitware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#============================================================================= + +# Run this script to set up local Git hooks for this project. + +# Project configuration instructions: +# +# - Publish a "hooks" branch in the project repository such that +# clones will have "refs/remotes/origin/hooks". +# +# - Populate adjacent "config" file with: +# hooks.url = Repository URL publishing "hooks" branch +# hooks.branch = Repository branch instead of "hooks" + +egrep-q() { + egrep "$@" >/dev/null 2>/dev/null +} + +die() { + echo 1>&2 "$@" ; exit 1 +} + +# Make sure we are inside the repository. +cd "${BASH_SOURCE%/*}" && + +# Select a hooks branch. +if url=$(git config --get hooks.url); then + # Fetch hooks from locally configured repository. + branch=$(git config hooks.branch || echo hooks) +elif git for-each-ref refs/remotes/origin/hooks 2>/dev/null | + egrep-q 'refs/remotes/origin/hooks$'; then + # Use hooks cloned from origin. + url=.. && branch=remotes/origin/hooks +elif url=$(git config -f config --get hooks.url); then + # Fetch hooks from project-configured repository. + branch=$(git config -f config hooks.branch || echo hooks) +else + die 'This project is not configured to install local hooks.' +fi && + +# Populate ".git/hooks". +echo 'Setting up git hooks...' && +git_dir=$(git rev-parse --git-dir) && +mkdir -p "$git_dir/hooks" && +cd "$git_dir/hooks" && +if ! test -e .git; then + git init -q || die 'Could not run git init for hooks.' +fi && +git fetch -q "$url" "$branch" && +git reset -q --hard FETCH_HEAD || die 'Failed to install hooks' diff --git a/test/API/driver/kwsys/GitSetup/setup-ssh b/test/API/driver/kwsys/GitSetup/setup-ssh new file mode 100644 index 00000000000..8920a5bd338 --- /dev/null +++ b/test/API/driver/kwsys/GitSetup/setup-ssh @@ -0,0 +1,111 @@ +#!/usr/bin/env bash +#============================================================================= +# Copyright 2010-2012 Kitware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#============================================================================= + +# Run this script to set up ssh push access to the repository host. + +# Project configuration instructions: +# +# - Populate adjacent "config" file with: +# ssh.host = Repository host name +# ssh.user = Username on host, if not "git" +# ssh.key = Local ssh key name +# ssh.request-url = Web page URL to request ssh access + +egrep-q() { + egrep "$@" >/dev/null 2>/dev/null +} + +die() { + echo 1>&2 "$@" ; exit 1 +} + +# Make sure we are inside the repository. +cd "${BASH_SOURCE%/*}" && + +# Load the project configuration. +host=$(git config -f config --get ssh.host) && +user=$(git config -f config --get ssh.user || echo git) && +key=$(git config -f config --get ssh.key) && +request_url=$(git config -f config --get ssh.request-url) || +die 'This project is not configured for ssh push access.' + +# Check for existing configuration. +if test -r ~/.ssh/config && + egrep-q 'Host[= ]'"${host//\./\\.}" ~/.ssh/config; then + echo 'Host "'"$host"'" is already in ~/.ssh/config' && + setup= && + question='Test' +else + echo 'Host "'"$host"'" not found in ~/.ssh/config' && + setup=1 && + question='Setup and test' +fi && + +# Ask the user whether to make changes. +echo '' && +read -ep "${question} push access by ssh to $user@$host? [y/N]: " access && +if test "$access" != "y" -a "$access" != "Y"; then + exit 0 +fi && + +# Setup host configuration if necessary. +if test -n "$setup"; then + if ! test -d ~/.ssh; then + mkdir -p ~/.ssh && + chmod 700 ~/.ssh + fi && + if ! test -f ~/.ssh/config; then + touch ~/.ssh/config && + chmod 600 ~/.ssh/config + fi && + ssh_config='Host='"$host"' + IdentityFile ~/.ssh/'"$key" && + echo "Adding to ~/.ssh/config: + +$ssh_config +" && + echo "$ssh_config" >> ~/.ssh/config && + if ! test -e ~/.ssh/"$key"; then + if test -f ~/.ssh/id_rsa; then + # Take care of the common case. + ln -s id_rsa ~/.ssh/"$key" + echo ' +Assuming ~/.ssh/id_rsa is the private key corresponding to the public key for + + '"$user@$host"' + +If this is incorrect place private key at "~/.ssh/'"$key"'".' + else + echo ' +Place the private key corresponding to the public key registered for + + '"$user@$host"' + +at "~/.ssh/'"$key"'".' + fi + read -e -n 1 -p 'Press any key to continue...' + fi +fi || exit 1 + +# Test access configuration. +echo 'Testing ssh push access to "'"$user@$host"'"...' && +if ! ssh "$user@$host" info; then + die 'No ssh push access to "'"$user@$host"'". You may need to request access at + + '"$request_url"' +' +fi diff --git a/test/API/driver/kwsys/GitSetup/setup-stage b/test/API/driver/kwsys/GitSetup/setup-stage new file mode 100644 index 00000000000..ce6ec457487 --- /dev/null +++ b/test/API/driver/kwsys/GitSetup/setup-stage @@ -0,0 +1,82 @@ +#!/usr/bin/env bash +#============================================================================= +# Copyright 2010-2012 Kitware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#============================================================================= + +# Run this script to set up the topic stage for pushing changes. + +# Project configuration instructions: +# +# - Run a Topic Stage repository next to the main project repository. +# +# - Populate adjacent "config" file with: +# stage.url = Topic Stage repository URL +# stage.pushurl = Topic Stage push URL if not "$url" + +egrep-q() { + egrep "$@" >/dev/null 2>/dev/null +} + +die() { + echo 1>&2 "$@" ; exit 1 +} + +# Make sure we are inside the repository. +cd "${BASH_SOURCE%/*}" && + +# Load the project configuration. +fetchurl_=$(git config -f config --get stage.url) && +pushurl_=$(git config -f config --get stage.pushurl || echo "$fetchurl_") && +remote=$(git config -f config --get stage.remote || echo 'stage') || +die 'This project is not configured to use a topic stage.' + +# Get current stage push URL. +pushurl=$(git config --get remote."$remote".pushurl || + git config --get remote."$remote".url || echo '') && + +# Tell user about current configuration. +if test -n "$pushurl"; then + echo 'Remote "'"$remote"'" is currently configured to push to + + '"$pushurl"' +' && + read -ep 'Reconfigure Topic Stage? [y/N]: ' ans && + if [ "$ans" == "y" ] || [ "$ans" == "Y" ]; then + setup=1 + else + setup='' + fi +else + setup=1 +fi + +# Perform setup if necessary. +if test -n "$setup"; then + echo 'Setting up the topic stage...' && + fetchurl="${fetchurl_}" && + if test -z "$pushurl"; then + git remote add "$remote" "$fetchurl" + else + git config remote."$remote".url "$fetchurl" + fi && + pushurl="${pushurl_}" && + if test "$pushurl" != "$fetchurl"; then + git config remote."$remote".pushurl "$pushurl" + fi && + echo 'Remote "'"$remote"'" is now configured to push to + + '"$pushurl"' +' +fi || die 'Could not configure the topic stage remote.' diff --git a/test/API/driver/kwsys/GitSetup/setup-upstream b/test/API/driver/kwsys/GitSetup/setup-upstream new file mode 100644 index 00000000000..92ce1dae5f3 --- /dev/null +++ b/test/API/driver/kwsys/GitSetup/setup-upstream @@ -0,0 +1,104 @@ +#!/usr/bin/env bash +#============================================================================= +# Copyright 2010-2015 Kitware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#============================================================================= + +# Run this script to set up the local Git repository to use the +# preferred upstream repository URLs. + +# Project configuration instructions: +# +# - Populate adjacent "config" file with: +# upstream.url = Preferred fetch url for upstream remote +# upstream.remote = Preferred name for upstream remote, if not "origin" + +die() { + echo 1>&2 "$@" ; exit 1 +} + +# Make sure we are inside the repository. +cd "${BASH_SOURCE%/*}" && + +# Load the project configuration. +url=$(git config -f config --get upstream.url) && +remote=$(git config -f config --get upstream.remote || + echo 'origin') || +die 'This project is not configured to use a preferred upstream repository.' + +# Get current upstream URLs. +fetchurl=$(git config --get remote."$remote".url || echo '') && +pushurl=$(git config --get remote."$remote".pushurl || echo '') && + +if test "$fetchurl" = "$url"; then + echo 'Remote "'"$remote"'" already uses recommended upstream repository.' + exit 0 +fi + +upstream_recommend=' +We recommended configuring the "'"$remote"'" remote to fetch from upstream at + + '"$url"' +' + +# Tell user about current configuration. +if test -n "$fetchurl"; then + echo 'Remote "'"$remote"'" is currently configured to fetch from + + '"$fetchurl"' +' && + if test -n "$pushurl"; then + echo 'and push to + + '"$pushurl" + fi && + echo "$upstream_recommend" && + if test -n "$pushurl"; then + echo 'and to never push to it directly. +' + fi && + + read -ep 'Reconfigure "'"$remote"'" remote as recommended? [y/N]: ' ans && + if [ "$ans" == "y" ] || [ "$ans" == "Y" ]; then + setup=1 + else + setup='' + fi +else + echo 'Remote "'"$remote"'" is not yet configured.' && + echo "$upstream_recommend" && + read -ep 'Configure "'"$remote"'" remote as recommended? [Y/n]: ' ans && + if [ "$ans" == "n" ] || [ "$ans" == "N" ]; then + exit 0 + else + setup=1 + fi +fi && + +# Perform setup if necessary. +if test -n "$setup"; then + if test -z "$fetchurl"; then + git remote add "$remote" "$url" + else + git config remote."$remote".url "$url" && + if old=$(git config --get remote."$remote".pushurl); then + git config --unset remote."$remote".pushurl || + echo 'Warning: failed to unset remote.'"$remote"'.pushurl' + fi + fi && + echo 'Remote "'"$remote"'" is now configured to fetch from + + '"$url"' +' +fi diff --git a/test/API/driver/kwsys/GitSetup/setup-user b/test/API/driver/kwsys/GitSetup/setup-user new file mode 100644 index 00000000000..1af439c45e4 --- /dev/null +++ b/test/API/driver/kwsys/GitSetup/setup-user @@ -0,0 +1,39 @@ +#!/usr/bin/env bash +#============================================================================= +# Copyright 2010-2012 Kitware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#============================================================================= + +# Run this script to configure Git user info in this repository. + +# Project configuration instructions: NONE + +for (( ; ; )); do + user_name=$(git config user.name || echo '') && + user_email=$(git config user.email || echo '') && + if test -n "$user_name" -a -n "$user_email"; then + echo 'Your commits will record as Author: + + '"$user_name <$user_email>"' +' && + read -ep 'Is the author name and email address above correct? [Y/n] ' correct && + if test "$correct" != "n" -a "$correct" != "N"; then + break + fi + fi && + read -ep 'Enter your full name e.g. "John Doe": ' name && + read -ep 'Enter your email address e.g. "john@gmail.com": ' email && + git config user.name "$name" && + git config user.email "$email" +done diff --git a/test/API/driver/kwsys/GitSetup/tips b/test/API/driver/kwsys/GitSetup/tips new file mode 100644 index 00000000000..784e1ed890d --- /dev/null +++ b/test/API/driver/kwsys/GitSetup/tips @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +#============================================================================= +# Copyright 2010-2012 Kitware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#============================================================================= + +# This script makes optional suggestions for working with Git. + +# Project configuration instructions: NONE + +egrep-q() { + egrep "$@" >/dev/null 2>/dev/null +} + +# Suggest color configuration. +if test -z "$(git config --get color.ui)"; then + echo ' +One may enable color output from Git commands with + + git config --global color.ui auto +' +fi + +# Suggest bash completion. +if ! bash -i -c 'echo $PS1' | egrep-q '__git_ps1'; then + echo ' +A dynamic, informative Git shell prompt can be obtained by sourcing +the git bash-completion script in your "~/.bashrc". Set the PS1 +environmental variable as suggested in the comments at the top of the +bash-completion script. You may need to install the bash-completion +package from your distribution to obtain it. +' +fi + +# Suggest merge tool. +if test -z "$(git config --get merge.tool)"; then + echo ' +One may configure Git to load a merge tool with + + git config merge.tool + +See "git help mergetool" for more information. +' +fi diff --git a/test/API/driver/kwsys/Glob.cxx b/test/API/driver/kwsys/Glob.cxx new file mode 100644 index 00000000000..34bb0d0fe00 --- /dev/null +++ b/test/API/driver/kwsys/Glob.cxx @@ -0,0 +1,448 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" +#include KWSYS_HEADER(Glob.hxx) + +#include KWSYS_HEADER(Configure.hxx) + +#include KWSYS_HEADER(RegularExpression.hxx) +#include KWSYS_HEADER(SystemTools.hxx) +#include KWSYS_HEADER(Directory.hxx) + +// Work-around CMake dependency scanning limitation. This must +// duplicate the above list of headers. +#if 0 +# include "Configure.hxx.in" +# include "Directory.hxx.in" +# include "Glob.hxx.in" +# include "RegularExpression.hxx.in" +# include "SystemTools.hxx.in" +#endif + +#include +#include +#include + +#include +#include +#include +namespace KWSYS_NAMESPACE { +#if defined(_WIN32) || defined(__APPLE__) || defined(__CYGWIN__) +// On Windows and Apple, no difference between lower and upper case +# define KWSYS_GLOB_CASE_INDEPENDENT +#endif + +#if defined(_WIN32) || defined(__CYGWIN__) +// Handle network paths +# define KWSYS_GLOB_SUPPORT_NETWORK_PATHS +#endif + +class GlobInternals +{ +public: + std::vector Files; + std::vector Expressions; +}; + +Glob::Glob() +{ + this->Internals = new GlobInternals; + this->Recurse = false; + this->Relative = ""; + + this->RecurseThroughSymlinks = true; + // RecurseThroughSymlinks is true by default for backwards compatibility, + // not because it's a good idea... + this->FollowedSymlinkCount = 0; + + // Keep separate variables for directory listing for back compatibility + this->ListDirs = true; + this->RecurseListDirs = false; +} + +Glob::~Glob() +{ + delete this->Internals; +} + +std::vector& Glob::GetFiles() +{ + return this->Internals->Files; +} + +std::string Glob::PatternToRegex(const std::string& pattern, + bool require_whole_string, bool preserve_case) +{ + // Incrementally build the regular expression from the pattern. + std::string regex = require_whole_string ? "^" : ""; + std::string::const_iterator pattern_first = pattern.begin(); + std::string::const_iterator pattern_last = pattern.end(); + for (std::string::const_iterator i = pattern_first; i != pattern_last; ++i) { + int c = *i; + if (c == '*') { + // A '*' (not between brackets) matches any string. + // We modify this to not match slashes since the original glob + // pattern documentation was meant for matching file name + // components separated by slashes. + regex += "[^/]*"; + } else if (c == '?') { + // A '?' (not between brackets) matches any single character. + // We modify this to not match slashes since the original glob + // pattern documentation was meant for matching file name + // components separated by slashes. + regex += "[^/]"; + } else if (c == '[') { + // Parse out the bracket expression. It begins just after the + // opening character. + std::string::const_iterator bracket_first = i + 1; + std::string::const_iterator bracket_last = bracket_first; + + // The first character may be complementation '!' or '^'. + if (bracket_last != pattern_last && + (*bracket_last == '!' || *bracket_last == '^')) { + ++bracket_last; + } + + // If the next character is a ']' it is included in the brackets + // because the bracket string may not be empty. + if (bracket_last != pattern_last && *bracket_last == ']') { + ++bracket_last; + } + + // Search for the closing ']'. + while (bracket_last != pattern_last && *bracket_last != ']') { + ++bracket_last; + } + + // Check whether we have a complete bracket string. + if (bracket_last == pattern_last) { + // The bracket string did not end, so it was opened simply by + // a '[' that is supposed to be matched literally. + regex += "\\["; + } else { + // Convert the bracket string to its regex equivalent. + std::string::const_iterator k = bracket_first; + + // Open the regex block. + regex += "["; + + // A regex range complement uses '^' instead of '!'. + if (k != bracket_last && *k == '!') { + regex += "^"; + ++k; + } + + // Convert the remaining characters. + for (; k != bracket_last; ++k) { + // Backslashes must be escaped. + if (*k == '\\') { + regex += "\\"; + } + + // Store this character. + regex += *k; + } + + // Close the regex block. + regex += "]"; + + // Jump to the end of the bracket string. + i = bracket_last; + } + } else { + // A single character matches itself. + int ch = c; + if (!(('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') || + ('0' <= ch && ch <= '9'))) { + // Escape the non-alphanumeric character. + regex += "\\"; + } +#if defined(KWSYS_GLOB_CASE_INDEPENDENT) + else { + // On case-insensitive systems file names are converted to lower + // case before matching. + if (!preserve_case) { + ch = tolower(ch); + } + } +#endif + (void)preserve_case; + // Store the character. + regex.append(1, static_cast(ch)); + } + } + + if (require_whole_string) { + regex += "$"; + } + return regex; +} + +bool Glob::RecurseDirectory(std::string::size_type start, + const std::string& dir, GlobMessages* messages) +{ + kwsys::Directory d; + if (!d.Load(dir)) { + return true; + } + unsigned long cc; + std::string realname; + std::string fname; + for (cc = 0; cc < d.GetNumberOfFiles(); cc++) { + fname = d.GetFile(cc); + if (fname == "." || fname == "..") { + continue; + } + + if (start == 0) { + realname = dir + fname; + } else { + realname = dir + "/" + fname; + } + +#if defined(KWSYS_GLOB_CASE_INDEPENDENT) + // On Windows and Apple, no difference between lower and upper case + fname = kwsys::SystemTools::LowerCase(fname); +#endif + + bool isDir = kwsys::SystemTools::FileIsDirectory(realname); + bool isSymLink = kwsys::SystemTools::FileIsSymlink(realname); + + if (isDir && (!isSymLink || this->RecurseThroughSymlinks)) { + if (isSymLink) { + ++this->FollowedSymlinkCount; + std::string realPathErrorMessage; + std::string canonicalPath( + SystemTools::GetRealPath(dir, &realPathErrorMessage)); + + if (!realPathErrorMessage.empty()) { + if (messages) { + messages->push_back( + Message(Glob::error, + "Canonical path generation from path '" + dir + + "' failed! Reason: '" + realPathErrorMessage + "'")); + } + return false; + } + + if (std::find(this->VisitedSymlinks.begin(), + this->VisitedSymlinks.end(), + canonicalPath) == this->VisitedSymlinks.end()) { + if (this->RecurseListDirs) { + // symlinks are treated as directories + this->AddFile(this->Internals->Files, realname); + } + + this->VisitedSymlinks.push_back(canonicalPath); + if (!this->RecurseDirectory(start + 1, realname, messages)) { + this->VisitedSymlinks.pop_back(); + + return false; + } + this->VisitedSymlinks.pop_back(); + } + // else we have already visited this symlink - prevent cyclic recursion + else if (messages) { + std::string message; + for (std::vector::const_iterator pathIt = + std::find(this->VisitedSymlinks.begin(), + this->VisitedSymlinks.end(), canonicalPath); + pathIt != this->VisitedSymlinks.end(); ++pathIt) { + message += *pathIt + "\n"; + } + message += canonicalPath + "/" + fname; + messages->push_back(Message(Glob::cyclicRecursion, message)); + } + } else { + if (this->RecurseListDirs) { + this->AddFile(this->Internals->Files, realname); + } + if (!this->RecurseDirectory(start + 1, realname, messages)) { + return false; + } + } + } else { + if (!this->Internals->Expressions.empty() && + this->Internals->Expressions.back().find(fname)) { + this->AddFile(this->Internals->Files, realname); + } + } + } + + return true; +} + +void Glob::ProcessDirectory(std::string::size_type start, + const std::string& dir, GlobMessages* messages) +{ + // std::cout << "ProcessDirectory: " << dir << std::endl; + bool last = (start == this->Internals->Expressions.size() - 1); + if (last && this->Recurse) { + this->RecurseDirectory(start, dir, messages); + return; + } + + if (start >= this->Internals->Expressions.size()) { + return; + } + + kwsys::Directory d; + if (!d.Load(dir)) { + return; + } + unsigned long cc; + std::string realname; + std::string fname; + for (cc = 0; cc < d.GetNumberOfFiles(); cc++) { + fname = d.GetFile(cc); + if (fname == "." || fname == "..") { + continue; + } + + if (start == 0) { + realname = dir + fname; + } else { + realname = dir + "/" + fname; + } + +#if defined(KWSYS_GLOB_CASE_INDEPENDENT) + // On case-insensitive file systems convert to lower case for matching. + fname = kwsys::SystemTools::LowerCase(fname); +#endif + + // std::cout << "Look at file: " << fname << std::endl; + // std::cout << "Match: " + // << this->Internals->TextExpressions[start].c_str() << std::endl; + // std::cout << "Real name: " << realname << std::endl; + + if ((!last && !kwsys::SystemTools::FileIsDirectory(realname)) || + (!this->ListDirs && last && + kwsys::SystemTools::FileIsDirectory(realname))) { + continue; + } + + if (this->Internals->Expressions[start].find(fname)) { + if (last) { + this->AddFile(this->Internals->Files, realname); + } else { + this->ProcessDirectory(start + 1, realname, messages); + } + } + } +} + +bool Glob::FindFiles(const std::string& inexpr, GlobMessages* messages) +{ + std::string cexpr; + std::string::size_type cc; + std::string expr = inexpr; + + this->Internals->Expressions.clear(); + this->Internals->Files.clear(); + + if (!kwsys::SystemTools::FileIsFullPath(expr)) { + expr = kwsys::SystemTools::GetCurrentWorkingDirectory(); + expr += "/" + inexpr; + } + std::string fexpr = expr; + + std::string::size_type skip = 0; + std::string::size_type last_slash = 0; + for (cc = 0; cc < expr.size(); cc++) { + if (cc > 0 && expr[cc] == '/' && expr[cc - 1] != '\\') { + last_slash = cc; + } + if (cc > 0 && (expr[cc] == '[' || expr[cc] == '?' || expr[cc] == '*') && + expr[cc - 1] != '\\') { + break; + } + } + if (last_slash > 0) { + // std::cout << "I can skip: " << fexpr.substr(0, last_slash) + // << std::endl; + skip = last_slash; + } + if (skip == 0) { +#if defined(KWSYS_GLOB_SUPPORT_NETWORK_PATHS) + // Handle network paths + if (expr[0] == '/' && expr[1] == '/') { + int cnt = 0; + for (cc = 2; cc < expr.size(); cc++) { + if (expr[cc] == '/') { + cnt++; + if (cnt == 2) { + break; + } + } + } + skip = int(cc + 1); + } else +#endif + // Handle drive letters on Windows + if (expr[1] == ':' && expr[0] != '/') { + skip = 2; + } + } + + if (skip > 0) { + expr = expr.substr(skip); + } + + cexpr = ""; + for (cc = 0; cc < expr.size(); cc++) { + int ch = expr[cc]; + if (ch == '/') { + if (!cexpr.empty()) { + this->AddExpression(cexpr); + } + cexpr = ""; + } else { + cexpr.append(1, static_cast(ch)); + } + } + if (!cexpr.empty()) { + this->AddExpression(cexpr); + } + + // Handle network paths + if (skip > 0) { + this->ProcessDirectory(0, fexpr.substr(0, skip) + "/", messages); + } else { + this->ProcessDirectory(0, "/", messages); + } + return true; +} + +void Glob::AddExpression(const std::string& expr) +{ + this->Internals->Expressions.push_back( + kwsys::RegularExpression(this->PatternToRegex(expr))); +} + +void Glob::SetRelative(const char* dir) +{ + if (!dir) { + this->Relative = ""; + return; + } + this->Relative = dir; +} + +const char* Glob::GetRelative() +{ + if (this->Relative.empty()) { + return nullptr; + } + return this->Relative.c_str(); +} + +void Glob::AddFile(std::vector& files, const std::string& file) +{ + if (!this->Relative.empty()) { + files.push_back(kwsys::SystemTools::RelativePath(this->Relative, file)); + } else { + files.push_back(file); + } +} + +} // namespace KWSYS_NAMESPACE diff --git a/test/API/driver/kwsys/Glob.hxx.in b/test/API/driver/kwsys/Glob.hxx.in new file mode 100644 index 00000000000..170766f4b1e --- /dev/null +++ b/test/API/driver/kwsys/Glob.hxx.in @@ -0,0 +1,134 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifndef @KWSYS_NAMESPACE@_Glob_hxx +#define @KWSYS_NAMESPACE@_Glob_hxx + +#include <@KWSYS_NAMESPACE@/Configure.h> +#include <@KWSYS_NAMESPACE@/Configure.hxx> + +#include +#include + +namespace @KWSYS_NAMESPACE@ { + +class GlobInternals; + +/** \class Glob + * \brief Portable globbing searches. + * + * Globbing expressions are much simpler than regular + * expressions. This class will search for files using + * globbing expressions. + * + * Finds all files that match a given globbing expression. + */ +class @KWSYS_NAMESPACE@_EXPORT Glob +{ +public: + enum MessageType + { + error, + cyclicRecursion + }; + + struct Message + { + MessageType type; + std::string content; + + Message(MessageType t, const std::string& c) + : type(t) + , content(c) + { + } + ~Message() = default; + Message(const Message& msg) = default; + Message& operator=(Message const& msg) = default; + }; + + typedef std::vector GlobMessages; + typedef std::vector::iterator GlobMessagesIterator; + +public: + Glob(); + ~Glob(); + + //! Find all files that match the pattern. + bool FindFiles(const std::string& inexpr, GlobMessages* messages = nullptr); + + //! Return the list of files that matched. + std::vector& GetFiles(); + + //! Set recurse to true to match subdirectories. + void RecurseOn() { this->SetRecurse(true); } + void RecurseOff() { this->SetRecurse(false); } + void SetRecurse(bool i) { this->Recurse = i; } + bool GetRecurse() { return this->Recurse; } + + //! Set recurse through symlinks to true if recursion should traverse the + // linked-to directories + void RecurseThroughSymlinksOn() { this->SetRecurseThroughSymlinks(true); } + void RecurseThroughSymlinksOff() { this->SetRecurseThroughSymlinks(false); } + void SetRecurseThroughSymlinks(bool i) { this->RecurseThroughSymlinks = i; } + bool GetRecurseThroughSymlinks() { return this->RecurseThroughSymlinks; } + + //! Get the number of symlinks followed through recursion + unsigned int GetFollowedSymlinkCount() { return this->FollowedSymlinkCount; } + + //! Set relative to true to only show relative path to files. + void SetRelative(const char* dir); + const char* GetRelative(); + + /** Convert the given globbing pattern to a regular expression. + There is no way to quote meta-characters. The + require_whole_string argument specifies whether the regex is + automatically surrounded by "^" and "$" to match the whole + string. This is on by default because patterns always match + whole strings, but may be disabled to support concatenating + expressions more easily (regex1|regex2|etc). */ + static std::string PatternToRegex(const std::string& pattern, + bool require_whole_string = true, + bool preserve_case = false); + + /** Getters and setters for enabling and disabling directory + listing in recursive and non recursive globbing mode. + If listing is enabled in recursive mode it also lists + directory symbolic links even if follow symlinks is enabled. */ + void SetListDirs(bool list) { this->ListDirs = list; } + bool GetListDirs() const { return this->ListDirs; } + void SetRecurseListDirs(bool list) { this->RecurseListDirs = list; } + bool GetRecurseListDirs() const { return this->RecurseListDirs; } + +protected: + //! Process directory + void ProcessDirectory(std::string::size_type start, const std::string& dir, + GlobMessages* messages); + + //! Process last directory, but only when recurse flags is on. That is + // effectively like saying: /path/to/file/**/file + bool RecurseDirectory(std::string::size_type start, const std::string& dir, + GlobMessages* messages); + + //! Add regular expression + void AddExpression(const std::string& expr); + + //! Add a file to the list + void AddFile(std::vector& files, const std::string& file); + + GlobInternals* Internals; + bool Recurse; + std::string Relative; + bool RecurseThroughSymlinks; + unsigned int FollowedSymlinkCount; + std::vector VisitedSymlinks; + bool ListDirs; + bool RecurseListDirs; + +private: + Glob(const Glob&); // Not implemented. + void operator=(const Glob&); // Not implemented. +}; + +} // namespace @KWSYS_NAMESPACE@ + +#endif diff --git a/test/API/driver/kwsys/IOStream.cxx b/test/API/driver/kwsys/IOStream.cxx new file mode 100644 index 00000000000..e21f87d4539 --- /dev/null +++ b/test/API/driver/kwsys/IOStream.cxx @@ -0,0 +1,255 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" +#include KWSYS_HEADER(Configure.hxx) + +// Include the streams library. +#include +#include KWSYS_HEADER(IOStream.hxx) + +// Work-around CMake dependency scanning limitation. This must +// duplicate the above list of headers. +#if 0 +# include "Configure.hxx.in" +# include "IOStream.hxx.in" +#endif + +// Implement the rest of this file only if it is needed. +#if KWSYS_IOS_NEED_OPERATORS_LL + +# include // sscanf, sprintf +# include // memchr + +# if defined(_MAX_INT_DIG) +# define KWSYS_IOS_INT64_MAX_DIG _MAX_INT_DIG +# else +# define KWSYS_IOS_INT64_MAX_DIG 32 +# endif + +namespace KWSYS_NAMESPACE { + +// Scan an input stream for an integer value. +static int IOStreamScanStream(std::istream& is, char* buffer) +{ + // Prepare to write to buffer. + char* out = buffer; + char* end = buffer + KWSYS_IOS_INT64_MAX_DIG - 1; + + // Look for leading sign. + if (is.peek() == '+') { + *out++ = '+'; + is.ignore(); + } else if (is.peek() == '-') { + *out++ = '-'; + is.ignore(); + } + + // Determine the base. If not specified in the stream, try to + // detect it from the input. A leading 0x means hex, and a leading + // 0 alone means octal. + int base = 0; + int flags = is.flags() & std::ios_base::basefield; + if (flags == std::ios_base::oct) { + base = 8; + } else if (flags == std::ios_base::dec) { + base = 10; + } else if (flags == std::ios_base::hex) { + base = 16; + } + bool foundDigit = false; + bool foundNonZero = false; + if (is.peek() == '0') { + foundDigit = true; + is.ignore(); + if ((is.peek() == 'x' || is.peek() == 'X') && (base == 0 || base == 16)) { + base = 16; + foundDigit = false; + is.ignore(); + } else if (base == 0) { + base = 8; + } + } + + // Determine the range of digits allowed for this number. + const char* digits = "0123456789abcdefABCDEF"; + int maxDigitIndex = 10; + if (base == 8) { + maxDigitIndex = 8; + } else if (base == 16) { + maxDigitIndex = 10 + 6 + 6; + } + + // Scan until an invalid digit is found. + for (; is.peek() != EOF; is.ignore()) { + if (memchr(digits, *out = (char)is.peek(), maxDigitIndex) != 0) { + if ((foundNonZero || *out != '0') && out < end) { + ++out; + foundNonZero = true; + } + foundDigit = true; + } else { + break; + } + } + + // Correct the buffer contents for degenerate cases. + if (foundDigit && !foundNonZero) { + *out++ = '0'; + } else if (!foundDigit) { + out = buffer; + } + + // Terminate the string in the buffer. + *out = '\0'; + + return base; +} + +// Read an integer value from an input stream. +template +std::istream& IOStreamScanTemplate(std::istream& is, T& value, char type) +{ + int state = std::ios_base::goodbit; + + // Skip leading whitespace. + std::istream::sentry okay(is); + + if (okay) { + try { + // Copy the string to a buffer and construct the format string. + char buffer[KWSYS_IOS_INT64_MAX_DIG]; +# if defined(_MSC_VER) + char format[] = "%I64_"; + const int typeIndex = 4; +# else + char format[] = "%ll_"; + const int typeIndex = 3; +# endif + switch (IOStreamScanStream(is, buffer)) { + case 8: + format[typeIndex] = 'o'; + break; + case 0: // Default to decimal if not told otherwise. + case 10: + format[typeIndex] = type; + break; + case 16: + format[typeIndex] = 'x'; + break; + }; + + // Use sscanf to parse the number from the buffer. + T result; + int success = (sscanf(buffer, format, &result) == 1) ? 1 : 0; + + // Set flags for resulting state. + if (is.peek() == EOF) { + state |= std::ios_base::eofbit; + } + if (!success) { + state |= std::ios_base::failbit; + } else { + value = result; + } + } catch (...) { + state |= std::ios_base::badbit; + } + } + + is.setstate(std::ios_base::iostate(state)); + return is; +} + +// Print an integer value to an output stream. +template +std::ostream& IOStreamPrintTemplate(std::ostream& os, T value, char type) +{ + std::ostream::sentry okay(os); + if (okay) { + try { + // Construct the format string. + char format[8]; + char* f = format; + *f++ = '%'; + if (os.flags() & std::ios_base::showpos) { + *f++ = '+'; + } + if (os.flags() & std::ios_base::showbase) { + *f++ = '#'; + } +# if defined(_MSC_VER) + *f++ = 'I'; + *f++ = '6'; + *f++ = '4'; +# else + *f++ = 'l'; + *f++ = 'l'; +# endif + long bflags = os.flags() & std::ios_base::basefield; + if (bflags == std::ios_base::oct) { + *f++ = 'o'; + } else if (bflags != std::ios_base::hex) { + *f++ = type; + } else if (os.flags() & std::ios_base::uppercase) { + *f++ = 'X'; + } else { + *f++ = 'x'; + } + *f = '\0'; + + // Use sprintf to print to a buffer and then write the + // buffer to the stream. + char buffer[2 * KWSYS_IOS_INT64_MAX_DIG]; + sprintf(buffer, format, value); + os << buffer; + } catch (...) { + os.clear(os.rdstate() | std::ios_base::badbit); + } + } + return os; +} + +# if !KWSYS_IOS_HAS_ISTREAM_LONG_LONG +// Implement input stream operator for IOStreamSLL. +std::istream& IOStreamScan(std::istream& is, IOStreamSLL& value) +{ + return IOStreamScanTemplate(is, value, 'd'); +} + +// Implement input stream operator for IOStreamULL. +std::istream& IOStreamScan(std::istream& is, IOStreamULL& value) +{ + return IOStreamScanTemplate(is, value, 'u'); +} +# endif + +# if !KWSYS_IOS_HAS_OSTREAM_LONG_LONG +// Implement output stream operator for IOStreamSLL. +std::ostream& IOStreamPrint(std::ostream& os, IOStreamSLL value) +{ + return IOStreamPrintTemplate(os, value, 'd'); +} + +// Implement output stream operator for IOStreamULL. +std::ostream& IOStreamPrint(std::ostream& os, IOStreamULL value) +{ + return IOStreamPrintTemplate(os, value, 'u'); +} +# endif + +} // namespace KWSYS_NAMESPACE + +#else + +namespace KWSYS_NAMESPACE { + +// Create one public symbol in this object file to avoid warnings from +// archivers. +void IOStreamSymbolToAvoidWarning(); +void IOStreamSymbolToAvoidWarning() +{ +} + +} // namespace KWSYS_NAMESPACE + +#endif // KWSYS_IOS_NEED_OPERATORS_LL diff --git a/test/API/driver/kwsys/IOStream.hxx.in b/test/API/driver/kwsys/IOStream.hxx.in new file mode 100644 index 00000000000..db8a23ef56d --- /dev/null +++ b/test/API/driver/kwsys/IOStream.hxx.in @@ -0,0 +1,126 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifndef @KWSYS_NAMESPACE@_IOStream_hxx +#define @KWSYS_NAMESPACE@_IOStream_hxx + +#include + +/* Define these macros temporarily to keep the code readable. */ +#if !defined(KWSYS_NAMESPACE) && !@KWSYS_NAMESPACE@_NAME_IS_KWSYS +# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT +#endif + +/* Whether istream supports long long. */ +#define @KWSYS_NAMESPACE@_IOS_HAS_ISTREAM_LONG_LONG \ + @KWSYS_IOS_HAS_ISTREAM_LONG_LONG@ + +/* Whether ostream supports long long. */ +#define @KWSYS_NAMESPACE@_IOS_HAS_OSTREAM_LONG_LONG \ + @KWSYS_IOS_HAS_OSTREAM_LONG_LONG@ + +/* Determine whether we need to define the streaming operators for + long long or __int64. */ +#if @KWSYS_USE_LONG_LONG@ +# if !@KWSYS_NAMESPACE@_IOS_HAS_ISTREAM_LONG_LONG || \ + !@KWSYS_NAMESPACE@_IOS_HAS_OSTREAM_LONG_LONG +# define @KWSYS_NAMESPACE@_IOS_NEED_OPERATORS_LL 1 +namespace @KWSYS_NAMESPACE@ { +typedef long long IOStreamSLL; +typedef unsigned long long IOStreamULL; +} +# endif +#elif defined(_MSC_VER) && _MSC_VER < 1300 +# define @KWSYS_NAMESPACE@_IOS_NEED_OPERATORS_LL 1 +namespace @KWSYS_NAMESPACE@ { +typedef __int64 IOStreamSLL; +typedef unsigned __int64 IOStreamULL; +} +#endif +#if !defined(@KWSYS_NAMESPACE@_IOS_NEED_OPERATORS_LL) +# define @KWSYS_NAMESPACE@_IOS_NEED_OPERATORS_LL 0 +#endif + +#if @KWSYS_NAMESPACE@_IOS_NEED_OPERATORS_LL +# if !@KWSYS_NAMESPACE@_IOS_HAS_ISTREAM_LONG_LONG + +/* Input stream operator implementation functions. */ +namespace @KWSYS_NAMESPACE@ { +kwsysEXPORT std::istream& IOStreamScan(std::istream&, IOStreamSLL&); +kwsysEXPORT std::istream& IOStreamScan(std::istream&, IOStreamULL&); +} + +/* Provide input stream operator for long long. */ +# if !defined(@KWSYS_NAMESPACE@_IOS_NO_ISTREAM_LONG_LONG) && \ + !defined(KWSYS_IOS_ISTREAM_LONG_LONG_DEFINED) +# define KWSYS_IOS_ISTREAM_LONG_LONG_DEFINED +# define @KWSYS_NAMESPACE@_IOS_ISTREAM_LONG_LONG_DEFINED +inline std::istream& operator>>(std::istream& is, + @KWSYS_NAMESPACE@::IOStreamSLL& value) +{ + return @KWSYS_NAMESPACE@::IOStreamScan(is, value); +} +# endif + +/* Provide input stream operator for unsigned long long. */ +# if !defined(@KWSYS_NAMESPACE@_IOS_NO_ISTREAM_UNSIGNED_LONG_LONG) && \ + !defined(KWSYS_IOS_ISTREAM_UNSIGNED_LONG_LONG_DEFINED) +# define KWSYS_IOS_ISTREAM_UNSIGNED_LONG_LONG_DEFINED +# define @KWSYS_NAMESPACE@_IOS_ISTREAM_UNSIGNED_LONG_LONG_DEFINED +inline std::istream& operator>>(std::istream& is, + @KWSYS_NAMESPACE@::IOStreamULL& value) +{ + return @KWSYS_NAMESPACE@::IOStreamScan(is, value); +} +# endif +# endif /* !@KWSYS_NAMESPACE@_IOS_HAS_ISTREAM_LONG_LONG */ + +# if !@KWSYS_NAMESPACE@_IOS_HAS_OSTREAM_LONG_LONG + +/* Output stream operator implementation functions. */ +namespace @KWSYS_NAMESPACE@ { +kwsysEXPORT std::ostream& IOStreamPrint(std::ostream&, IOStreamSLL); +kwsysEXPORT std::ostream& IOStreamPrint(std::ostream&, IOStreamULL); +} + +/* Provide output stream operator for long long. */ +# if !defined(@KWSYS_NAMESPACE@_IOS_NO_OSTREAM_LONG_LONG) && \ + !defined(KWSYS_IOS_OSTREAM_LONG_LONG_DEFINED) +# define KWSYS_IOS_OSTREAM_LONG_LONG_DEFINED +# define @KWSYS_NAMESPACE@_IOS_OSTREAM_LONG_LONG_DEFINED +inline std::ostream& operator<<(std::ostream& os, + @KWSYS_NAMESPACE@::IOStreamSLL value) +{ + return @KWSYS_NAMESPACE@::IOStreamPrint(os, value); +} +# endif + +/* Provide output stream operator for unsigned long long. */ +# if !defined(@KWSYS_NAMESPACE@_IOS_NO_OSTREAM_UNSIGNED_LONG_LONG) && \ + !defined(KWSYS_IOS_OSTREAM_UNSIGNED_LONG_LONG_DEFINED) +# define KWSYS_IOS_OSTREAM_UNSIGNED_LONG_LONG_DEFINED +# define @KWSYS_NAMESPACE@_IOS_OSTREAM_UNSIGNED_LONG_LONG_DEFINED +inline std::ostream& operator<<(std::ostream& os, + @KWSYS_NAMESPACE@::IOStreamULL value) +{ + return @KWSYS_NAMESPACE@::IOStreamPrint(os, value); +} +# endif +# endif /* !@KWSYS_NAMESPACE@_IOS_HAS_OSTREAM_LONG_LONG */ +#endif /* @KWSYS_NAMESPACE@_IOS_NEED_OPERATORS_LL */ + +/* Undefine temporary macros. */ +#if !defined(KWSYS_NAMESPACE) && !@KWSYS_NAMESPACE@_NAME_IS_KWSYS +# undef kwsysEXPORT +#endif + +/* If building a C++ file in kwsys itself, give the source file + access to the macros without a configured namespace. */ +#if defined(KWSYS_NAMESPACE) +# define KWSYS_IOS_HAS_ISTREAM_LONG_LONG \ + @KWSYS_NAMESPACE@_IOS_HAS_ISTREAM_LONG_LONG +# define KWSYS_IOS_HAS_OSTREAM_LONG_LONG \ + @KWSYS_NAMESPACE@_IOS_HAS_OSTREAM_LONG_LONG +# define KWSYS_IOS_NEED_OPERATORS_LL @KWSYS_NAMESPACE@_IOS_NEED_OPERATORS_LL +#endif + +#endif diff --git a/test/API/driver/kwsys/MD5.c b/test/API/driver/kwsys/MD5.c new file mode 100644 index 00000000000..97cf9ba68b1 --- /dev/null +++ b/test/API/driver/kwsys/MD5.c @@ -0,0 +1,494 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" +#include KWSYS_HEADER(MD5.h) + +/* Work-around CMake dependency scanning limitation. This must + duplicate the above list of headers. */ +#if 0 +# include "MD5.h.in" +#endif + +#include /* size_t */ +#include /* malloc, free */ +#include /* memcpy, strlen */ + +/* This MD5 implementation has been taken from a third party. Slight + modifications to the arrangement of the code have been made to put + it in a single source file instead of a separate header and + implementation file. */ + +#if defined(__clang__) && !defined(__INTEL_COMPILER) +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wcast-align" +#endif + +/* + Copyright (C) 1999, 2000, 2002 Aladdin Enterprises. All rights reserved. + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + L. Peter Deutsch + ghost@aladdin.com + + */ +/* + Independent implementation of MD5 (RFC 1321). + + This code implements the MD5 Algorithm defined in RFC 1321, whose + text is available at + http://www.ietf.org/rfc/rfc1321.txt + The code is derived from the text of the RFC, including the test suite + (section A.5) but excluding the rest of Appendix A. It does not include + any code or documentation that is identified in the RFC as being + copyrighted. + + The original and principal author of md5.c is L. Peter Deutsch + . Other authors are noted in the change history + that follows (in reverse chronological order): + + 2002-04-13 lpd Clarified derivation from RFC 1321; now handles byte order + either statically or dynamically; added missing #include + in library. + 2002-03-11 lpd Corrected argument list for main(), and added int return + type, in test program and T value program. + 2002-02-21 lpd Added missing #include in test program. + 2000-07-03 lpd Patched to eliminate warnings about "constant is + unsigned in ANSI C, signed in traditional"; made test program + self-checking. + 1999-11-04 lpd Edited comments slightly for automatic TOC extraction. + 1999-10-18 lpd Fixed typo in header comment (ansi2knr rather than md5). + 1999-05-03 lpd Original version. + */ + +/* + * This package supports both compile-time and run-time determination of CPU + * byte order. If ARCH_IS_BIG_ENDIAN is defined as 0, the code will be + * compiled to run only on little-endian CPUs; if ARCH_IS_BIG_ENDIAN is + * defined as non-zero, the code will be compiled to run only on big-endian + * CPUs; if ARCH_IS_BIG_ENDIAN is not defined, the code will be compiled to + * run on either big- or little-endian CPUs, but will run slightly less + * efficiently on either one than if ARCH_IS_BIG_ENDIAN is defined. + */ + +typedef unsigned char md5_byte_t; /* 8-bit byte */ +typedef unsigned int md5_word_t; /* 32-bit word */ + +/* Define the state of the MD5 Algorithm. */ +typedef struct md5_state_s +{ + md5_word_t count[2]; /* message length in bits, lsw first */ + md5_word_t abcd[4]; /* digest buffer */ + md5_byte_t buf[64]; /* accumulate block */ +} md5_state_t; + +#undef BYTE_ORDER /* 1 = big-endian, -1 = little-endian, 0 = unknown */ +#ifdef ARCH_IS_BIG_ENDIAN +# define BYTE_ORDER (ARCH_IS_BIG_ENDIAN ? 1 : -1) +#else +# define BYTE_ORDER 0 +#endif + +#define T_MASK ((md5_word_t)~0) +#define T1 /* 0xd76aa478 */ (T_MASK ^ 0x28955b87) +#define T2 /* 0xe8c7b756 */ (T_MASK ^ 0x173848a9) +#define T3 0x242070db +#define T4 /* 0xc1bdceee */ (T_MASK ^ 0x3e423111) +#define T5 /* 0xf57c0faf */ (T_MASK ^ 0x0a83f050) +#define T6 0x4787c62a +#define T7 /* 0xa8304613 */ (T_MASK ^ 0x57cfb9ec) +#define T8 /* 0xfd469501 */ (T_MASK ^ 0x02b96afe) +#define T9 0x698098d8 +#define T10 /* 0x8b44f7af */ (T_MASK ^ 0x74bb0850) +#define T11 /* 0xffff5bb1 */ (T_MASK ^ 0x0000a44e) +#define T12 /* 0x895cd7be */ (T_MASK ^ 0x76a32841) +#define T13 0x6b901122 +#define T14 /* 0xfd987193 */ (T_MASK ^ 0x02678e6c) +#define T15 /* 0xa679438e */ (T_MASK ^ 0x5986bc71) +#define T16 0x49b40821 +#define T17 /* 0xf61e2562 */ (T_MASK ^ 0x09e1da9d) +#define T18 /* 0xc040b340 */ (T_MASK ^ 0x3fbf4cbf) +#define T19 0x265e5a51 +#define T20 /* 0xe9b6c7aa */ (T_MASK ^ 0x16493855) +#define T21 /* 0xd62f105d */ (T_MASK ^ 0x29d0efa2) +#define T22 0x02441453 +#define T23 /* 0xd8a1e681 */ (T_MASK ^ 0x275e197e) +#define T24 /* 0xe7d3fbc8 */ (T_MASK ^ 0x182c0437) +#define T25 0x21e1cde6 +#define T26 /* 0xc33707d6 */ (T_MASK ^ 0x3cc8f829) +#define T27 /* 0xf4d50d87 */ (T_MASK ^ 0x0b2af278) +#define T28 0x455a14ed +#define T29 /* 0xa9e3e905 */ (T_MASK ^ 0x561c16fa) +#define T30 /* 0xfcefa3f8 */ (T_MASK ^ 0x03105c07) +#define T31 0x676f02d9 +#define T32 /* 0x8d2a4c8a */ (T_MASK ^ 0x72d5b375) +#define T33 /* 0xfffa3942 */ (T_MASK ^ 0x0005c6bd) +#define T34 /* 0x8771f681 */ (T_MASK ^ 0x788e097e) +#define T35 0x6d9d6122 +#define T36 /* 0xfde5380c */ (T_MASK ^ 0x021ac7f3) +#define T37 /* 0xa4beea44 */ (T_MASK ^ 0x5b4115bb) +#define T38 0x4bdecfa9 +#define T39 /* 0xf6bb4b60 */ (T_MASK ^ 0x0944b49f) +#define T40 /* 0xbebfbc70 */ (T_MASK ^ 0x4140438f) +#define T41 0x289b7ec6 +#define T42 /* 0xeaa127fa */ (T_MASK ^ 0x155ed805) +#define T43 /* 0xd4ef3085 */ (T_MASK ^ 0x2b10cf7a) +#define T44 0x04881d05 +#define T45 /* 0xd9d4d039 */ (T_MASK ^ 0x262b2fc6) +#define T46 /* 0xe6db99e5 */ (T_MASK ^ 0x1924661a) +#define T47 0x1fa27cf8 +#define T48 /* 0xc4ac5665 */ (T_MASK ^ 0x3b53a99a) +#define T49 /* 0xf4292244 */ (T_MASK ^ 0x0bd6ddbb) +#define T50 0x432aff97 +#define T51 /* 0xab9423a7 */ (T_MASK ^ 0x546bdc58) +#define T52 /* 0xfc93a039 */ (T_MASK ^ 0x036c5fc6) +#define T53 0x655b59c3 +#define T54 /* 0x8f0ccc92 */ (T_MASK ^ 0x70f3336d) +#define T55 /* 0xffeff47d */ (T_MASK ^ 0x00100b82) +#define T56 /* 0x85845dd1 */ (T_MASK ^ 0x7a7ba22e) +#define T57 0x6fa87e4f +#define T58 /* 0xfe2ce6e0 */ (T_MASK ^ 0x01d3191f) +#define T59 /* 0xa3014314 */ (T_MASK ^ 0x5cfebceb) +#define T60 0x4e0811a1 +#define T61 /* 0xf7537e82 */ (T_MASK ^ 0x08ac817d) +#define T62 /* 0xbd3af235 */ (T_MASK ^ 0x42c50dca) +#define T63 0x2ad7d2bb +#define T64 /* 0xeb86d391 */ (T_MASK ^ 0x14792c6e) + +static void md5_process(md5_state_t* pms, const md5_byte_t* data /*[64]*/) +{ + md5_word_t a = pms->abcd[0], b = pms->abcd[1], c = pms->abcd[2], + d = pms->abcd[3]; + md5_word_t t; +#if BYTE_ORDER > 0 + /* Define storage only for big-endian CPUs. */ + md5_word_t X[16]; +#else + /* Define storage for little-endian or both types of CPUs. */ + md5_word_t xbuf[16]; + const md5_word_t* X; +#endif + + { +#if BYTE_ORDER == 0 + /* + * Determine dynamically whether this is a big-endian or + * little-endian machine, since we can use a more efficient + * algorithm on the latter. + */ + static const int w = 1; + + if (*((const md5_byte_t*)&w)) /* dynamic little-endian */ +#endif +#if BYTE_ORDER <= 0 /* little-endian */ + { + /* + * On little-endian machines, we can process properly aligned + * data without copying it. + */ + if (!((data - (const md5_byte_t*)0) & 3)) { + /* data are properly aligned */ + X = (const md5_word_t*)data; + } else { + /* not aligned */ + memcpy(xbuf, data, 64); + X = xbuf; + } + } +#endif +#if BYTE_ORDER == 0 + else /* dynamic big-endian */ +#endif +#if BYTE_ORDER >= 0 /* big-endian */ + { + /* + * On big-endian machines, we must arrange the bytes in the + * right order. + */ + const md5_byte_t* xp = data; + int i; + +# if BYTE_ORDER == 0 + X = xbuf; /* (dynamic only) */ +# else +# define xbuf X /* (static only) */ +# endif + for (i = 0; i < 16; ++i, xp += 4) + xbuf[i] = + (md5_word_t)(xp[0] + (xp[1] << 8) + (xp[2] << 16) + (xp[3] << 24)); + } +#endif + } + +#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32 - (n)))) + +/* Round 1. */ +/* Let [abcd k s i] denote the operation + a = b + ((a + F(b,c,d) + X[k] + T[i]) <<< s). */ +#define F(x, y, z) (((x) & (y)) | (~(x) & (z))) +#define SET(a, b, c, d, k, s, Ti) \ + t = a + F(b, c, d) + X[k] + (Ti); \ + a = ROTATE_LEFT(t, s) + b + /* Do the following 16 operations. */ + SET(a, b, c, d, 0, 7, T1); + SET(d, a, b, c, 1, 12, T2); + SET(c, d, a, b, 2, 17, T3); + SET(b, c, d, a, 3, 22, T4); + SET(a, b, c, d, 4, 7, T5); + SET(d, a, b, c, 5, 12, T6); + SET(c, d, a, b, 6, 17, T7); + SET(b, c, d, a, 7, 22, T8); + SET(a, b, c, d, 8, 7, T9); + SET(d, a, b, c, 9, 12, T10); + SET(c, d, a, b, 10, 17, T11); + SET(b, c, d, a, 11, 22, T12); + SET(a, b, c, d, 12, 7, T13); + SET(d, a, b, c, 13, 12, T14); + SET(c, d, a, b, 14, 17, T15); + SET(b, c, d, a, 15, 22, T16); +#undef SET + +/* Round 2. */ +/* Let [abcd k s i] denote the operation + a = b + ((a + G(b,c,d) + X[k] + T[i]) <<< s). */ +#define G(x, y, z) (((x) & (z)) | ((y) & ~(z))) +#define SET(a, b, c, d, k, s, Ti) \ + t = a + G(b, c, d) + X[k] + (Ti); \ + a = ROTATE_LEFT(t, s) + b + /* Do the following 16 operations. */ + SET(a, b, c, d, 1, 5, T17); + SET(d, a, b, c, 6, 9, T18); + SET(c, d, a, b, 11, 14, T19); + SET(b, c, d, a, 0, 20, T20); + SET(a, b, c, d, 5, 5, T21); + SET(d, a, b, c, 10, 9, T22); + SET(c, d, a, b, 15, 14, T23); + SET(b, c, d, a, 4, 20, T24); + SET(a, b, c, d, 9, 5, T25); + SET(d, a, b, c, 14, 9, T26); + SET(c, d, a, b, 3, 14, T27); + SET(b, c, d, a, 8, 20, T28); + SET(a, b, c, d, 13, 5, T29); + SET(d, a, b, c, 2, 9, T30); + SET(c, d, a, b, 7, 14, T31); + SET(b, c, d, a, 12, 20, T32); +#undef SET + +/* Round 3. */ +/* Let [abcd k s t] denote the operation + a = b + ((a + H(b,c,d) + X[k] + T[i]) <<< s). */ +#define H(x, y, z) ((x) ^ (y) ^ (z)) +#define SET(a, b, c, d, k, s, Ti) \ + t = a + H(b, c, d) + X[k] + (Ti); \ + a = ROTATE_LEFT(t, s) + b + /* Do the following 16 operations. */ + SET(a, b, c, d, 5, 4, T33); + SET(d, a, b, c, 8, 11, T34); + SET(c, d, a, b, 11, 16, T35); + SET(b, c, d, a, 14, 23, T36); + SET(a, b, c, d, 1, 4, T37); + SET(d, a, b, c, 4, 11, T38); + SET(c, d, a, b, 7, 16, T39); + SET(b, c, d, a, 10, 23, T40); + SET(a, b, c, d, 13, 4, T41); + SET(d, a, b, c, 0, 11, T42); + SET(c, d, a, b, 3, 16, T43); + SET(b, c, d, a, 6, 23, T44); + SET(a, b, c, d, 9, 4, T45); + SET(d, a, b, c, 12, 11, T46); + SET(c, d, a, b, 15, 16, T47); + SET(b, c, d, a, 2, 23, T48); +#undef SET + +/* Round 4. */ +/* Let [abcd k s t] denote the operation + a = b + ((a + I(b,c,d) + X[k] + T[i]) <<< s). */ +#define I(x, y, z) ((y) ^ ((x) | ~(z))) +#define SET(a, b, c, d, k, s, Ti) \ + t = a + I(b, c, d) + X[k] + (Ti); \ + a = ROTATE_LEFT(t, s) + b + /* Do the following 16 operations. */ + SET(a, b, c, d, 0, 6, T49); + SET(d, a, b, c, 7, 10, T50); + SET(c, d, a, b, 14, 15, T51); + SET(b, c, d, a, 5, 21, T52); + SET(a, b, c, d, 12, 6, T53); + SET(d, a, b, c, 3, 10, T54); + SET(c, d, a, b, 10, 15, T55); + SET(b, c, d, a, 1, 21, T56); + SET(a, b, c, d, 8, 6, T57); + SET(d, a, b, c, 15, 10, T58); + SET(c, d, a, b, 6, 15, T59); + SET(b, c, d, a, 13, 21, T60); + SET(a, b, c, d, 4, 6, T61); + SET(d, a, b, c, 11, 10, T62); + SET(c, d, a, b, 2, 15, T63); + SET(b, c, d, a, 9, 21, T64); +#undef SET + + /* Then perform the following additions. (That is increment each + of the four registers by the value it had before this block + was started.) */ + pms->abcd[0] += a; + pms->abcd[1] += b; + pms->abcd[2] += c; + pms->abcd[3] += d; +} + +/* Initialize the algorithm. */ +static void md5_init(md5_state_t* pms) +{ + pms->count[0] = pms->count[1] = 0; + pms->abcd[0] = 0x67452301; + pms->abcd[1] = /*0xefcdab89*/ T_MASK ^ 0x10325476; + pms->abcd[2] = /*0x98badcfe*/ T_MASK ^ 0x67452301; + pms->abcd[3] = 0x10325476; +} + +/* Append a string to the message. */ +static void md5_append(md5_state_t* pms, const md5_byte_t* data, size_t nbytes) +{ + const md5_byte_t* p = data; + size_t left = nbytes; + size_t offset = (pms->count[0] >> 3) & 63; + md5_word_t nbits = (md5_word_t)(nbytes << 3); + + if (nbytes <= 0) + return; + + /* Update the message length. */ + pms->count[1] += (md5_word_t)(nbytes >> 29); + pms->count[0] += nbits; + if (pms->count[0] < nbits) + pms->count[1]++; + + /* Process an initial partial block. */ + if (offset) { + size_t copy = (offset + nbytes > 64 ? 64 - offset : nbytes); + + memcpy(pms->buf + offset, p, copy); + if (offset + copy < 64) + return; + p += copy; + left -= copy; + md5_process(pms, pms->buf); + } + + /* Process full blocks. */ + for (; left >= 64; p += 64, left -= 64) + md5_process(pms, p); + + /* Process a final partial block. */ + if (left) + memcpy(pms->buf, p, left); +} + +/* Finish the message and return the digest. */ +static void md5_finish(md5_state_t* pms, md5_byte_t digest[16]) +{ + static const md5_byte_t pad[64] = { 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + md5_byte_t data[8]; + int i; + + /* Save the length before padding. */ + for (i = 0; i < 8; ++i) + data[i] = (md5_byte_t)(pms->count[i >> 2] >> ((i & 3) << 3)); + /* Pad to 56 bytes mod 64. */ + md5_append(pms, pad, ((55 - (pms->count[0] >> 3)) & 63) + 1); + /* Append the length. */ + md5_append(pms, data, 8); + for (i = 0; i < 16; ++i) + digest[i] = (md5_byte_t)(pms->abcd[i >> 2] >> ((i & 3) << 3)); +} + +#if defined(__clang__) && !defined(__INTEL_COMPILER) +# pragma clang diagnostic pop +#endif + +/* Wrap up the MD5 state in our opaque structure. */ +struct kwsysMD5_s +{ + md5_state_t md5_state; +}; + +kwsysMD5* kwsysMD5_New(void) +{ + /* Allocate a process control structure. */ + kwsysMD5* md5 = (kwsysMD5*)malloc(sizeof(kwsysMD5)); + if (!md5) { + return 0; + } + return md5; +} + +void kwsysMD5_Delete(kwsysMD5* md5) +{ + /* Make sure we have an instance. */ + if (!md5) { + return; + } + + /* Free memory. */ + free(md5); +} + +void kwsysMD5_Initialize(kwsysMD5* md5) +{ + md5_init(&md5->md5_state); +} + +void kwsysMD5_Append(kwsysMD5* md5, unsigned char const* data, int length) +{ + size_t dlen; + if (length < 0) { + dlen = strlen((char const*)data); + } else { + dlen = (size_t)length; + } + md5_append(&md5->md5_state, (md5_byte_t const*)data, dlen); +} + +void kwsysMD5_Finalize(kwsysMD5* md5, unsigned char digest[16]) +{ + md5_finish(&md5->md5_state, (md5_byte_t*)digest); +} + +void kwsysMD5_FinalizeHex(kwsysMD5* md5, char buffer[32]) +{ + unsigned char digest[16]; + kwsysMD5_Finalize(md5, digest); + kwsysMD5_DigestToHex(digest, buffer); +} + +void kwsysMD5_DigestToHex(unsigned char const digest[16], char buffer[32]) +{ + /* Map from 4-bit index to hexadecimal representation. */ + static char const hex[16] = { '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' }; + + /* Map each 4-bit block separately. */ + char* out = buffer; + int i; + for (i = 0; i < 16; ++i) { + *out++ = hex[digest[i] >> 4]; + *out++ = hex[digest[i] & 0xF]; + } +} diff --git a/test/API/driver/kwsys/MD5.h.in b/test/API/driver/kwsys/MD5.h.in new file mode 100644 index 00000000000..7646f1297ab --- /dev/null +++ b/test/API/driver/kwsys/MD5.h.in @@ -0,0 +1,97 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifndef @KWSYS_NAMESPACE@_MD5_h +#define @KWSYS_NAMESPACE@_MD5_h + +#include <@KWSYS_NAMESPACE@/Configure.h> + +/* Redefine all public interface symbol names to be in the proper + namespace. These macros are used internally to kwsys only, and are + not visible to user code. Use kwsysHeaderDump.pl to reproduce + these macros after making changes to the interface. */ +#if !defined(KWSYS_NAMESPACE) +# define kwsys_ns(x) @KWSYS_NAMESPACE@##x +# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT +#endif +#if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS +# define kwsysMD5 kwsys_ns(MD5) +# define kwsysMD5_s kwsys_ns(MD5_s) +# define kwsysMD5_New kwsys_ns(MD5_New) +# define kwsysMD5_Delete kwsys_ns(MD5_Delete) +# define kwsysMD5_Initialize kwsys_ns(MD5_Initialize) +# define kwsysMD5_Append kwsys_ns(MD5_Append) +# define kwsysMD5_Finalize kwsys_ns(MD5_Finalize) +# define kwsysMD5_FinalizeHex kwsys_ns(MD5_FinalizeHex) +# define kwsysMD5_DigestToHex kwsys_ns(MD5_DigestToHex) +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +/** + * MD5 state data structure. + */ +typedef struct kwsysMD5_s kwsysMD5; + +/** + * Create a new MD5 instance. The returned instance is not initialized. + */ +kwsysEXPORT kwsysMD5* kwsysMD5_New(void); + +/** + * Delete an old MD5 instance. + */ +kwsysEXPORT void kwsysMD5_Delete(kwsysMD5* md5); + +/** + * Initialize a new MD5 digest. + */ +kwsysEXPORT void kwsysMD5_Initialize(kwsysMD5* md5); + +/** + * Append data to an MD5 digest. If the given length is negative, + * data will be read up to but not including a terminating null. + */ +kwsysEXPORT void kwsysMD5_Append(kwsysMD5* md5, unsigned char const* data, + int length); + +/** + * Finalize a MD5 digest and get the 16-byte hash value. + */ +kwsysEXPORT void kwsysMD5_Finalize(kwsysMD5* md5, unsigned char digest[16]); + +/** + * Finalize a MD5 digest and get the 32-bit hexadecimal representation. + */ +kwsysEXPORT void kwsysMD5_FinalizeHex(kwsysMD5* md5, char buffer[32]); + +/** + * Convert a MD5 digest 16-byte value to a 32-byte hexadecimal representation. + */ +kwsysEXPORT void kwsysMD5_DigestToHex(unsigned char const digest[16], + char buffer[32]); + +#if defined(__cplusplus) +} /* extern "C" */ +#endif + +/* If we are building a kwsys .c or .cxx file, let it use these macros. + Otherwise, undefine them to keep the namespace clean. */ +#if !defined(KWSYS_NAMESPACE) +# undef kwsys_ns +# undef kwsysEXPORT +# if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS +# undef kwsysMD5 +# undef kwsysMD5_s +# undef kwsysMD5_New +# undef kwsysMD5_Delete +# undef kwsysMD5_Initialize +# undef kwsysMD5_Append +# undef kwsysMD5_Finalize +# undef kwsysMD5_FinalizeHex +# undef kwsysMD5_DigestToHex +# endif +#endif + +#endif diff --git a/test/API/driver/kwsys/Process.h.in b/test/API/driver/kwsys/Process.h.in new file mode 100644 index 00000000000..73ea9dbfcd9 --- /dev/null +++ b/test/API/driver/kwsys/Process.h.in @@ -0,0 +1,544 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifndef @KWSYS_NAMESPACE@_Process_h +#define @KWSYS_NAMESPACE@_Process_h + +#include <@KWSYS_NAMESPACE@/Configure.h> + +/* Redefine all public interface symbol names to be in the proper + namespace. These macros are used internally to kwsys only, and are + not visible to user code. Use kwsysHeaderDump.pl to reproduce + these macros after making changes to the interface. */ +#if !defined(KWSYS_NAMESPACE) +# define kwsys_ns(x) @KWSYS_NAMESPACE@##x +# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT +#endif +#if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS +# define kwsysProcess kwsys_ns(Process) +# define kwsysProcess_s kwsys_ns(Process_s) +# define kwsysProcess_New kwsys_ns(Process_New) +# define kwsysProcess_Delete kwsys_ns(Process_Delete) +# define kwsysProcess_SetCommand kwsys_ns(Process_SetCommand) +# define kwsysProcess_AddCommand kwsys_ns(Process_AddCommand) +# define kwsysProcess_SetTimeout kwsys_ns(Process_SetTimeout) +# define kwsysProcess_SetWorkingDirectory \ + kwsys_ns(Process_SetWorkingDirectory) +# define kwsysProcess_SetPipeFile kwsys_ns(Process_SetPipeFile) +# define kwsysProcess_SetPipeNative kwsys_ns(Process_SetPipeNative) +# define kwsysProcess_SetPipeShared kwsys_ns(Process_SetPipeShared) +# define kwsysProcess_Option_Detach kwsys_ns(Process_Option_Detach) +# define kwsysProcess_Option_HideWindow kwsys_ns(Process_Option_HideWindow) +# define kwsysProcess_Option_MergeOutput kwsys_ns(Process_Option_MergeOutput) +# define kwsysProcess_Option_Verbatim kwsys_ns(Process_Option_Verbatim) +# define kwsysProcess_Option_CreateProcessGroup \ + kwsys_ns(Process_Option_CreateProcessGroup) +# define kwsysProcess_GetOption kwsys_ns(Process_GetOption) +# define kwsysProcess_SetOption kwsys_ns(Process_SetOption) +# define kwsysProcess_Option_e kwsys_ns(Process_Option_e) +# define kwsysProcess_State_Starting kwsys_ns(Process_State_Starting) +# define kwsysProcess_State_Error kwsys_ns(Process_State_Error) +# define kwsysProcess_State_Exception kwsys_ns(Process_State_Exception) +# define kwsysProcess_State_Executing kwsys_ns(Process_State_Executing) +# define kwsysProcess_State_Exited kwsys_ns(Process_State_Exited) +# define kwsysProcess_State_Expired kwsys_ns(Process_State_Expired) +# define kwsysProcess_State_Killed kwsys_ns(Process_State_Killed) +# define kwsysProcess_State_Disowned kwsys_ns(Process_State_Disowned) +# define kwsysProcess_State_e kwsys_ns(Process_State_e) +# define kwsysProcess_Exception_None kwsys_ns(Process_Exception_None) +# define kwsysProcess_Exception_Fault kwsys_ns(Process_Exception_Fault) +# define kwsysProcess_Exception_Illegal kwsys_ns(Process_Exception_Illegal) +# define kwsysProcess_Exception_Interrupt \ + kwsys_ns(Process_Exception_Interrupt) +# define kwsysProcess_Exception_Numerical \ + kwsys_ns(Process_Exception_Numerical) +# define kwsysProcess_Exception_Other kwsys_ns(Process_Exception_Other) +# define kwsysProcess_Exception_e kwsys_ns(Process_Exception_e) +# define kwsysProcess_GetState kwsys_ns(Process_GetState) +# define kwsysProcess_GetExitException kwsys_ns(Process_GetExitException) +# define kwsysProcess_GetExitCode kwsys_ns(Process_GetExitCode) +# define kwsysProcess_GetExitValue kwsys_ns(Process_GetExitValue) +# define kwsysProcess_GetErrorString kwsys_ns(Process_GetErrorString) +# define kwsysProcess_GetExceptionString kwsys_ns(Process_GetExceptionString) +# define kwsysProcess_GetStateByIndex kwsys_ns(Process_GetStateByIndex) +# define kwsysProcess_GetExitExceptionByIndex \ + kwsys_ns(Process_GetExitExceptionByIndex) +# define kwsysProcess_GetExitCodeByIndex kwsys_ns(Process_GetExitCodeByIndex) +# define kwsysProcess_GetExitValueByIndex \ + kwsys_ns(Process_GetExitValueByIndex) +# define kwsysProcess_GetExceptionStringByIndex \ + kwsys_ns(Process_GetExceptionStringByIndex) +# define kwsysProcess_GetExitCodeByIndex kwsys_ns(Process_GetExitCodeByIndex) +# define kwsysProcess_Execute kwsys_ns(Process_Execute) +# define kwsysProcess_Disown kwsys_ns(Process_Disown) +# define kwsysProcess_WaitForData kwsys_ns(Process_WaitForData) +# define kwsysProcess_Pipes_e kwsys_ns(Process_Pipes_e) +# define kwsysProcess_Pipe_None kwsys_ns(Process_Pipe_None) +# define kwsysProcess_Pipe_STDIN kwsys_ns(Process_Pipe_STDIN) +# define kwsysProcess_Pipe_STDOUT kwsys_ns(Process_Pipe_STDOUT) +# define kwsysProcess_Pipe_STDERR kwsys_ns(Process_Pipe_STDERR) +# define kwsysProcess_Pipe_Timeout kwsys_ns(Process_Pipe_Timeout) +# define kwsysProcess_Pipe_Handle kwsys_ns(Process_Pipe_Handle) +# define kwsysProcess_WaitForExit kwsys_ns(Process_WaitForExit) +# define kwsysProcess_Interrupt kwsys_ns(Process_Interrupt) +# define kwsysProcess_Kill kwsys_ns(Process_Kill) +# define kwsysProcess_KillPID kwsys_ns(Process_KillPID) +# define kwsysProcess_ResetStartTime kwsys_ns(Process_ResetStartTime) +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +/** + * Process control data structure. + */ +typedef struct kwsysProcess_s kwsysProcess; + +/* Platform-specific pipe handle type. */ +#if defined(_WIN32) && !defined(__CYGWIN__) +typedef void* kwsysProcess_Pipe_Handle; +#else +typedef int kwsysProcess_Pipe_Handle; +#endif + +/** + * Create a new Process instance. + */ +kwsysEXPORT kwsysProcess* kwsysProcess_New(void); + +/** + * Delete an existing Process instance. If the instance is currently + * executing a process, this blocks until the process terminates. + */ +kwsysEXPORT void kwsysProcess_Delete(kwsysProcess* cp); + +/** + * Set the command line to be executed. Argument is an array of + * pointers to the command and each argument. The array must end with + * a NULL pointer. Any previous command lines are removed. Returns + * 1 for success and 0 otherwise. + */ +kwsysEXPORT int kwsysProcess_SetCommand(kwsysProcess* cp, + char const* const* command); + +/** + * Add a command line to be executed. Argument is an array of + * pointers to the command and each argument. The array must end with + * a NULL pointer. If this is not the first command added, its + * standard input will be connected to the standard output of the + * previous command. Returns 1 for success and 0 otherwise. + */ +kwsysEXPORT int kwsysProcess_AddCommand(kwsysProcess* cp, + char const* const* command); + +/** + * Set the timeout in seconds for the child process. The timeout + * period begins when the child is executed. If the child has not + * terminated when the timeout expires, it will be killed. A + * non-positive (<= 0) value will disable the timeout. + */ +kwsysEXPORT void kwsysProcess_SetTimeout(kwsysProcess* cp, double timeout); + +/** + * Set the working directory for the child process. The working + * directory can be absolute or relative to the current directory. + * Returns 1 for success and 0 for failure. + */ +kwsysEXPORT int kwsysProcess_SetWorkingDirectory(kwsysProcess* cp, + const char* dir); + +/** + * Set the name of a file to be attached to the given pipe. Returns 1 + * for success and 0 for failure. + */ +kwsysEXPORT int kwsysProcess_SetPipeFile(kwsysProcess* cp, int pipe, + const char* file); + +/** + * Set whether the given pipe in the child is shared with the parent + * process. The default is no for Pipe_STDOUT and Pipe_STDERR and yes + * for Pipe_STDIN. + */ +kwsysEXPORT void kwsysProcess_SetPipeShared(kwsysProcess* cp, int pipe, + int shared); + +/** + * Specify a platform-specific native pipe for use as one of the child + * interface pipes. The native pipe is specified by an array of two + * descriptors or handles. The first entry in the array (index 0) + * should be the read end of the pipe. The second entry in the array + * (index 1) should be the write end of the pipe. If a null pointer + * is given the option will be disabled. + * + * For Pipe_STDIN the native pipe is connected to the first child in + * the pipeline as its stdin. After the children are created the + * write end of the pipe will be closed in the child process and the + * read end will be closed in the parent process. + * + * For Pipe_STDOUT and Pipe_STDERR the pipe is connected to the last + * child as its stdout or stderr. After the children are created the + * write end of the pipe will be closed in the parent process and the + * read end will be closed in the child process. + */ +kwsysEXPORT void kwsysProcess_SetPipeNative(kwsysProcess* cp, int pipe, + kwsysProcess_Pipe_Handle p[2]); + +/** + * Get/Set a possibly platform-specific option. Possible options are: + * + * kwsysProcess_Option_Detach = Whether to detach the process. + * 0 = No (default) + * 1 = Yes + * + * kwsysProcess_Option_HideWindow = Whether to hide window on Windows. + * 0 = No (default) + * 1 = Yes + * + * kwsysProcess_Option_MergeOutput = Whether to merge stdout/stderr. + * No content will be returned as stderr. + * Any actual stderr will be on stdout. + * 0 = No (default) + * 1 = Yes + * + * kwsysProcess_Option_Verbatim = Whether SetCommand and AddCommand + * should treat the first argument + * as a verbatim command line + * and ignore the rest of the arguments. + * 0 = No (default) + * 1 = Yes + * + * kwsysProcess_Option_CreateProcessGroup = Whether to place the process in a + * new process group. This is + * useful if you want to send Ctrl+C + * to the process. On UNIX, also + * places the process in a new + * session. + * 0 = No (default) + * 1 = Yes + */ +kwsysEXPORT int kwsysProcess_GetOption(kwsysProcess* cp, int optionId); +kwsysEXPORT void kwsysProcess_SetOption(kwsysProcess* cp, int optionId, + int value); +enum kwsysProcess_Option_e +{ + kwsysProcess_Option_HideWindow, + kwsysProcess_Option_Detach, + kwsysProcess_Option_MergeOutput, + kwsysProcess_Option_Verbatim, + kwsysProcess_Option_CreateProcessGroup +}; + +/** + * Get the current state of the Process instance. Possible states are: + * + * kwsysProcess_State_Starting = Execute has not yet been called. + * kwsysProcess_State_Error = Error administrating the child process. + * kwsysProcess_State_Exception = Child process exited abnormally. + * kwsysProcess_State_Executing = Child process is currently running. + * kwsysProcess_State_Exited = Child process exited normally. + * kwsysProcess_State_Expired = Child process's timeout expired. + * kwsysProcess_State_Killed = Child process terminated by Kill method. + * kwsysProcess_State_Disowned = Child is no longer managed by this object. + */ +kwsysEXPORT int kwsysProcess_GetState(kwsysProcess* cp); +enum kwsysProcess_State_e +{ + kwsysProcess_State_Starting, + kwsysProcess_State_Error, + kwsysProcess_State_Exception, + kwsysProcess_State_Executing, + kwsysProcess_State_Exited, + kwsysProcess_State_Expired, + kwsysProcess_State_Killed, + kwsysProcess_State_Disowned +}; + +/** + * When GetState returns "Exception", this method returns a + * platform-independent description of the exceptional behavior that + * caused the child to terminate abnormally. Possible exceptions are: + * + * kwsysProcess_Exception_None = No exceptional behavior occurred. + * kwsysProcess_Exception_Fault = Child crashed with a memory fault. + * kwsysProcess_Exception_Illegal = Child crashed with an illegal + * instruction. + * kwsysProcess_Exception_Interrupt = Child was interrupted by user + * (Cntl-C/Break). + * kwsysProcess_Exception_Numerical = Child crashed with a numerical + * exception. + * kwsysProcess_Exception_Other = Child terminated for another reason. + */ +kwsysEXPORT int kwsysProcess_GetExitException(kwsysProcess* cp); +enum kwsysProcess_Exception_e +{ + kwsysProcess_Exception_None, + kwsysProcess_Exception_Fault, + kwsysProcess_Exception_Illegal, + kwsysProcess_Exception_Interrupt, + kwsysProcess_Exception_Numerical, + kwsysProcess_Exception_Other +}; + +/** + * When GetState returns "Exited" or "Exception", this method returns + * the platform-specific raw exit code of the process. UNIX platforms + * should use WIFEXITED/WEXITSTATUS and WIFSIGNALED/WTERMSIG to access + * this value. Windows users should compare the value to the various + * EXCEPTION_* values. + * + * If GetState returns "Exited", use GetExitValue to get the + * platform-independent child return value. + */ +kwsysEXPORT int kwsysProcess_GetExitCode(kwsysProcess* cp); + +/** + * When GetState returns "Exited", this method returns the child's + * platform-independent exit code (such as the value returned by the + * child's main). + */ +kwsysEXPORT int kwsysProcess_GetExitValue(kwsysProcess* cp); + +/** + * When GetState returns "Error", this method returns a string + * describing the problem. Otherwise, it returns NULL. + */ +kwsysEXPORT const char* kwsysProcess_GetErrorString(kwsysProcess* cp); + +/** + * When GetState returns "Exception", this method returns a string + * describing the problem. Otherwise, it returns NULL. + */ +kwsysEXPORT const char* kwsysProcess_GetExceptionString(kwsysProcess* cp); + +/** + * Get the current state of the Process instance. Possible states are: + * + * kwsysProcess_StateByIndex_Starting = Execute has not yet been called. + * kwsysProcess_StateByIndex_Exception = Child process exited abnormally. + * kwsysProcess_StateByIndex_Exited = Child process exited normally. + * kwsysProcess_StateByIndex_Error = Error getting the child return code. + */ +kwsysEXPORT int kwsysProcess_GetStateByIndex(kwsysProcess* cp, int idx); +enum kwsysProcess_StateByIndex_e +{ + kwsysProcess_StateByIndex_Starting = kwsysProcess_State_Starting, + kwsysProcess_StateByIndex_Exception = kwsysProcess_State_Exception, + kwsysProcess_StateByIndex_Exited = kwsysProcess_State_Exited, + kwsysProcess_StateByIndex_Error = kwsysProcess_State_Error +}; + +/** + * When GetState returns "Exception", this method returns a + * platform-independent description of the exceptional behavior that + * caused the child to terminate abnormally. Possible exceptions are: + * + * kwsysProcess_Exception_None = No exceptional behavior occurred. + * kwsysProcess_Exception_Fault = Child crashed with a memory fault. + * kwsysProcess_Exception_Illegal = Child crashed with an illegal + * instruction. + * kwsysProcess_Exception_Interrupt = Child was interrupted by user + * (Cntl-C/Break). + * kwsysProcess_Exception_Numerical = Child crashed with a numerical + * exception. + * kwsysProcess_Exception_Other = Child terminated for another reason. + */ +kwsysEXPORT int kwsysProcess_GetExitExceptionByIndex(kwsysProcess* cp, + int idx); + +/** + * When GetState returns "Exited" or "Exception", this method returns + * the platform-specific raw exit code of the process. UNIX platforms + * should use WIFEXITED/WEXITSTATUS and WIFSIGNALED/WTERMSIG to access + * this value. Windows users should compare the value to the various + * EXCEPTION_* values. + * + * If GetState returns "Exited", use GetExitValue to get the + * platform-independent child return value. + */ +kwsysEXPORT int kwsysProcess_GetExitCodeByIndex(kwsysProcess* cp, int idx); + +/** + * When GetState returns "Exited", this method returns the child's + * platform-independent exit code (such as the value returned by the + * child's main). + */ +kwsysEXPORT int kwsysProcess_GetExitValueByIndex(kwsysProcess* cp, int idx); + +/** + * When GetState returns "Exception", this method returns a string + * describing the problem. Otherwise, it returns NULL. + */ +kwsysEXPORT const char* kwsysProcess_GetExceptionStringByIndex( + kwsysProcess* cp, int idx); + +/** + * Start executing the child process. + */ +kwsysEXPORT void kwsysProcess_Execute(kwsysProcess* cp); + +/** + * Stop management of a detached child process. This closes any pipes + * being read. If the child was not created with the + * kwsysProcess_Option_Detach option, this method does nothing. This + * is because disowning a non-detached process will cause the child + * exit signal to be left unhandled until this process exits. + */ +kwsysEXPORT void kwsysProcess_Disown(kwsysProcess* cp); + +/** + * Block until data are available on a pipe, a timeout expires, or the + * child process terminates. Arguments are as follows: + * + * data = If data are read, the pointer to which this points is + * set to point to the data. + * length = If data are read, the integer to which this points is + * set to the length of the data read. + * timeout = Specifies the maximum time this call may block. Upon + * return after reading data, the time elapsed is subtracted + * from the timeout value. If this timeout expires, the + * value is set to 0. A NULL pointer passed for this argument + * indicates no timeout for the call. A negative or zero + * value passed for this argument may be used for polling + * and will always return immediately. + * + * Return value will be one of: + * + * Pipe_None = No more data will be available from the child process, + * ( == 0) or no process has been executed. WaitForExit should + * be called to wait for the process to terminate. + * Pipe_STDOUT = Data have been read from the child's stdout pipe. + * Pipe_STDERR = Data have been read from the child's stderr pipe. + * Pipe_Timeout = No data available within timeout specified for the + * call. Time elapsed has been subtracted from timeout + * argument. + */ +kwsysEXPORT int kwsysProcess_WaitForData(kwsysProcess* cp, char** data, + int* length, double* timeout); +enum kwsysProcess_Pipes_e +{ + kwsysProcess_Pipe_None, + kwsysProcess_Pipe_STDIN, + kwsysProcess_Pipe_STDOUT, + kwsysProcess_Pipe_STDERR, + kwsysProcess_Pipe_Timeout = 255 +}; + +/** + * Block until the child process terminates or the given timeout + * expires. If no process is running, returns immediately. The + * argument is: + * + * timeout = Specifies the maximum time this call may block. Upon + * returning due to child termination, the elapsed time + * is subtracted from the given value. A NULL pointer + * passed for this argument indicates no timeout for the + * call. + * + * Return value will be one of: + * + * 0 = Child did not terminate within timeout specified for + * the call. Time elapsed has been subtracted from timeout + * argument. + * 1 = Child has terminated or was not running. + */ +kwsysEXPORT int kwsysProcess_WaitForExit(kwsysProcess* cp, double* timeout); + +/** + * Interrupt the process group for the child process that is currently + * running by sending it the appropriate operating-system specific signal. + * The caller should call WaitForExit after this returns to wait for the + * child to terminate. + * + * WARNING: If you didn't specify kwsysProcess_Option_CreateProcessGroup, + * you will interrupt your own process group. + */ +kwsysEXPORT void kwsysProcess_Interrupt(kwsysProcess* cp); + +/** + * Forcefully terminate the child process that is currently running. + * The caller should call WaitForExit after this returns to wait for + * the child to terminate. + */ +kwsysEXPORT void kwsysProcess_Kill(kwsysProcess* cp); + +/** + * Same as kwsysProcess_Kill using process ID to locate process to + * terminate. + * @see kwsysProcess_Kill(kwsysProcess* cp) + */ +kwsysEXPORT void kwsysProcess_KillPID(unsigned long); + +/** + * Reset the start time of the child process to the current time. + */ +kwsysEXPORT void kwsysProcess_ResetStartTime(kwsysProcess* cp); + +#if defined(__cplusplus) +} /* extern "C" */ +#endif + +/* If we are building a kwsys .c or .cxx file, let it use these macros. + Otherwise, undefine them to keep the namespace clean. */ +#if !defined(KWSYS_NAMESPACE) +# undef kwsys_ns +# undef kwsysEXPORT +# if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS +# undef kwsysProcess +# undef kwsysProcess_s +# undef kwsysProcess_New +# undef kwsysProcess_Delete +# undef kwsysProcess_SetCommand +# undef kwsysProcess_AddCommand +# undef kwsysProcess_SetTimeout +# undef kwsysProcess_SetWorkingDirectory +# undef kwsysProcess_SetPipeFile +# undef kwsysProcess_SetPipeNative +# undef kwsysProcess_SetPipeShared +# undef kwsysProcess_Option_Detach +# undef kwsysProcess_Option_HideWindow +# undef kwsysProcess_Option_MergeOutput +# undef kwsysProcess_Option_Verbatim +# undef kwsysProcess_Option_CreateProcessGroup +# undef kwsysProcess_GetOption +# undef kwsysProcess_SetOption +# undef kwsysProcess_Option_e +# undef kwsysProcess_State_Starting +# undef kwsysProcess_State_Error +# undef kwsysProcess_State_Exception +# undef kwsysProcess_State_Executing +# undef kwsysProcess_State_Exited +# undef kwsysProcess_State_Expired +# undef kwsysProcess_State_Killed +# undef kwsysProcess_State_Disowned +# undef kwsysProcess_GetState +# undef kwsysProcess_State_e +# undef kwsysProcess_Exception_None +# undef kwsysProcess_Exception_Fault +# undef kwsysProcess_Exception_Illegal +# undef kwsysProcess_Exception_Interrupt +# undef kwsysProcess_Exception_Numerical +# undef kwsysProcess_Exception_Other +# undef kwsysProcess_GetExitException +# undef kwsysProcess_Exception_e +# undef kwsysProcess_GetExitCode +# undef kwsysProcess_GetExitValue +# undef kwsysProcess_GetErrorString +# undef kwsysProcess_GetExceptionString +# undef kwsysProcess_Execute +# undef kwsysProcess_Disown +# undef kwsysProcess_WaitForData +# undef kwsysProcess_Pipes_e +# undef kwsysProcess_Pipe_None +# undef kwsysProcess_Pipe_STDIN +# undef kwsysProcess_Pipe_STDOUT +# undef kwsysProcess_Pipe_STDERR +# undef kwsysProcess_Pipe_Timeout +# undef kwsysProcess_Pipe_Handle +# undef kwsysProcess_WaitForExit +# undef kwsysProcess_Interrupt +# undef kwsysProcess_Kill +# undef kwsysProcess_ResetStartTime +# endif +#endif + +#endif diff --git a/test/API/driver/kwsys/ProcessUNIX.c b/test/API/driver/kwsys/ProcessUNIX.c new file mode 100644 index 00000000000..100eddcde7e --- /dev/null +++ b/test/API/driver/kwsys/ProcessUNIX.c @@ -0,0 +1,2920 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" +#include KWSYS_HEADER(Process.h) +#include KWSYS_HEADER(System.h) + +/* Work-around CMake dependency scanning limitation. This must + duplicate the above list of headers. */ +#if 0 +# include "Process.h.in" +# include "System.h.in" +#endif + +/* + +Implementation for UNIX + +On UNIX, a child process is forked to exec the program. Three output +pipes are read by the parent process using a select call to block +until data are ready. Two of the pipes are stdout and stderr for the +child. The third is a special pipe populated by a signal handler to +indicate that a child has terminated. This is used in conjunction +with the timeout on the select call to implement a timeout for program +even when it closes stdout and stderr and at the same time avoiding +races. + +*/ + +/* + +TODO: + +We cannot create the pipeline of processes in suspended states. How +do we cleanup processes already started when one fails to load? Right +now we are just killing them, which is probably not the right thing to +do. + +*/ + +#if defined(__CYGWIN__) +/* Increase the file descriptor limit for select() before including + related system headers. (Default: 64) */ +# define FD_SETSIZE 16384 +#endif + +#include /* assert */ +#include /* isspace */ +#include /* DIR, dirent */ +#include /* errno */ +#include /* fcntl */ +#include /* sigaction */ +#include /* ptrdiff_t */ +#include /* snprintf */ +#include /* malloc, free */ +#include /* strdup, strerror, memset */ +#include /* open mode */ +#include /* struct timeval */ +#include /* pid_t, fd_set */ +#include /* waitpid */ +#include /* gettimeofday */ +#include /* pipe, close, fork, execvp, select, _exit */ + +#if defined(__VMS) +# define KWSYSPE_VMS_NONBLOCK , O_NONBLOCK +#else +# define KWSYSPE_VMS_NONBLOCK +#endif + +#if defined(KWSYS_C_HAS_PTRDIFF_T) && KWSYS_C_HAS_PTRDIFF_T +typedef ptrdiff_t kwsysProcess_ptrdiff_t; +#else +typedef int kwsysProcess_ptrdiff_t; +#endif + +#if defined(KWSYS_C_HAS_SSIZE_T) && KWSYS_C_HAS_SSIZE_T +typedef ssize_t kwsysProcess_ssize_t; +#else +typedef int kwsysProcess_ssize_t; +#endif + +#if defined(__BEOS__) && !defined(__ZETA__) +/* BeOS 5 doesn't have usleep(), but it has snooze(), which is identical. */ +# include +static inline void kwsysProcess_usleep(unsigned int msec) +{ + snooze(msec); +} +#else +# define kwsysProcess_usleep usleep +#endif + +/* + * BeOS's select() works like WinSock: it's for networking only, and + * doesn't work with Unix file handles...socket and file handles are + * different namespaces (the same descriptor means different things in + * each context!) + * + * So on Unix-like systems where select() is flakey, we'll set the + * pipes' file handles to be non-blocking and just poll them directly + * without select(). + */ +#if !defined(__BEOS__) && !defined(__VMS) && !defined(__MINT__) && \ + !defined(KWSYSPE_USE_SELECT) +# define KWSYSPE_USE_SELECT 1 +#endif + +/* Some platforms do not have siginfo on their signal handlers. */ +#if defined(SA_SIGINFO) && !defined(__BEOS__) +# define KWSYSPE_USE_SIGINFO 1 +#endif + +/* The number of pipes for the child's output. The standard stdout + and stderr pipes are the first two. One more pipe is used to + detect when the child process has terminated. The third pipe is + not given to the child process, so it cannot close it until it + terminates. */ +#define KWSYSPE_PIPE_COUNT 3 +#define KWSYSPE_PIPE_STDOUT 0 +#define KWSYSPE_PIPE_STDERR 1 +#define KWSYSPE_PIPE_SIGNAL 2 + +/* The maximum amount to read from a pipe at a time. */ +#define KWSYSPE_PIPE_BUFFER_SIZE 1024 + +/* Keep track of times using a signed representation. Switch to the + native (possibly unsigned) representation only when calling native + functions. */ +typedef struct timeval kwsysProcessTimeNative; +typedef struct kwsysProcessTime_s kwsysProcessTime; +struct kwsysProcessTime_s +{ + long tv_sec; + long tv_usec; +}; + +typedef struct kwsysProcessCreateInformation_s +{ + int StdIn; + int StdOut; + int StdErr; + int ErrorPipe[2]; +} kwsysProcessCreateInformation; + +static void kwsysProcessVolatileFree(volatile void* p); +static int kwsysProcessInitialize(kwsysProcess* cp); +static void kwsysProcessCleanup(kwsysProcess* cp, int error); +static void kwsysProcessCleanupDescriptor(int* pfd); +static void kwsysProcessClosePipes(kwsysProcess* cp); +static int kwsysProcessSetNonBlocking(int fd); +static int kwsysProcessCreate(kwsysProcess* cp, int prIndex, + kwsysProcessCreateInformation* si); +static void kwsysProcessDestroy(kwsysProcess* cp); +static int kwsysProcessSetupOutputPipeFile(int* p, const char* name); +static int kwsysProcessSetupOutputPipeNative(int* p, int des[2]); +static int kwsysProcessGetTimeoutTime(kwsysProcess* cp, double* userTimeout, + kwsysProcessTime* timeoutTime); +static int kwsysProcessGetTimeoutLeft(kwsysProcessTime* timeoutTime, + double* userTimeout, + kwsysProcessTimeNative* timeoutLength, + int zeroIsExpired); +static kwsysProcessTime kwsysProcessTimeGetCurrent(void); +static double kwsysProcessTimeToDouble(kwsysProcessTime t); +static kwsysProcessTime kwsysProcessTimeFromDouble(double d); +static int kwsysProcessTimeLess(kwsysProcessTime in1, kwsysProcessTime in2); +static kwsysProcessTime kwsysProcessTimeAdd(kwsysProcessTime in1, + kwsysProcessTime in2); +static kwsysProcessTime kwsysProcessTimeSubtract(kwsysProcessTime in1, + kwsysProcessTime in2); +static void kwsysProcessSetExitExceptionByIndex(kwsysProcess* cp, int sig, + int idx); +static void kwsysProcessChildErrorExit(int errorPipe); +static void kwsysProcessRestoreDefaultSignalHandlers(void); +static pid_t kwsysProcessFork(kwsysProcess* cp, + kwsysProcessCreateInformation* si); +static void kwsysProcessKill(pid_t process_id); +#if defined(__VMS) +static int kwsysProcessSetVMSFeature(const char* name, int value); +#endif +static int kwsysProcessesAdd(kwsysProcess* cp); +static void kwsysProcessesRemove(kwsysProcess* cp); +#if KWSYSPE_USE_SIGINFO +static void kwsysProcessesSignalHandler(int signum, siginfo_t* info, + void* ucontext); +#else +static void kwsysProcessesSignalHandler(int signum); +#endif + +/* A structure containing results data for each process. */ +typedef struct kwsysProcessResults_s kwsysProcessResults; +struct kwsysProcessResults_s +{ + /* The status of the child process. */ + int State; + + /* The exceptional behavior that terminated the process, if any. */ + int ExitException; + + /* The process exit code. */ + int ExitCode; + + /* The process return code, if any. */ + int ExitValue; + + /* Description for the ExitException. */ + char ExitExceptionString[KWSYSPE_PIPE_BUFFER_SIZE + 1]; +}; + +/* Structure containing data used to implement the child's execution. */ +struct kwsysProcess_s +{ + /* The command lines to execute. */ + char*** Commands; + volatile int NumberOfCommands; + + /* Descriptors for the read ends of the child's output pipes and + the signal pipe. */ + int PipeReadEnds[KWSYSPE_PIPE_COUNT]; + + /* Descriptors for the child's ends of the pipes. + Used temporarily during process creation. */ + int PipeChildStd[3]; + + /* Write descriptor for child termination signal pipe. */ + int SignalPipe; + + /* Buffer for pipe data. */ + char PipeBuffer[KWSYSPE_PIPE_BUFFER_SIZE]; + + /* Process IDs returned by the calls to fork. Everything is volatile + because the signal handler accesses them. You must be very careful + when reaping PIDs or modifying this array to avoid race conditions. */ + volatile pid_t* volatile ForkPIDs; + + /* Flag for whether the children were terminated by a failed select. */ + int SelectError; + + /* The timeout length. */ + double Timeout; + + /* The working directory for the process. */ + char* WorkingDirectory; + + /* Whether to create the child as a detached process. */ + int OptionDetach; + + /* Whether the child was created as a detached process. */ + int Detached; + + /* Whether to treat command lines as verbatim. */ + int Verbatim; + + /* Whether to merge stdout/stderr of the child. */ + int MergeOutput; + + /* Whether to create the process in a new process group. */ + volatile sig_atomic_t CreateProcessGroup; + + /* Time at which the child started. Negative for no timeout. */ + kwsysProcessTime StartTime; + + /* Time at which the child will timeout. Negative for no timeout. */ + kwsysProcessTime TimeoutTime; + + /* Flag for whether the timeout expired. */ + int TimeoutExpired; + + /* The number of pipes left open during execution. */ + int PipesLeft; + +#if KWSYSPE_USE_SELECT + /* File descriptor set for call to select. */ + fd_set PipeSet; +#endif + + /* The number of children still executing. */ + int CommandsLeft; + + /* The status of the process structure. Must be atomic because + the signal handler checks this to avoid a race. */ + volatile sig_atomic_t State; + + /* Whether the process was killed. */ + volatile sig_atomic_t Killed; + + /* Buffer for error message in case of failure. */ + char ErrorMessage[KWSYSPE_PIPE_BUFFER_SIZE + 1]; + + /* process results. */ + kwsysProcessResults* ProcessResults; + + /* The exit codes of each child process in the pipeline. */ + int* CommandExitCodes; + + /* Name of files to which stdin and stdout pipes are attached. */ + char* PipeFileSTDIN; + char* PipeFileSTDOUT; + char* PipeFileSTDERR; + + /* Whether each pipe is shared with the parent process. */ + int PipeSharedSTDIN; + int PipeSharedSTDOUT; + int PipeSharedSTDERR; + + /* Native pipes provided by the user. */ + int PipeNativeSTDIN[2]; + int PipeNativeSTDOUT[2]; + int PipeNativeSTDERR[2]; + + /* The real working directory of this process. */ + int RealWorkingDirectoryLength; + char* RealWorkingDirectory; +}; + +kwsysProcess* kwsysProcess_New(void) +{ + /* Allocate a process control structure. */ + kwsysProcess* cp = (kwsysProcess*)malloc(sizeof(kwsysProcess)); + if (!cp) { + return 0; + } + memset(cp, 0, sizeof(kwsysProcess)); + + /* Share stdin with the parent process by default. */ + cp->PipeSharedSTDIN = 1; + + /* No native pipes by default. */ + cp->PipeNativeSTDIN[0] = -1; + cp->PipeNativeSTDIN[1] = -1; + cp->PipeNativeSTDOUT[0] = -1; + cp->PipeNativeSTDOUT[1] = -1; + cp->PipeNativeSTDERR[0] = -1; + cp->PipeNativeSTDERR[1] = -1; + + /* Set initial status. */ + cp->State = kwsysProcess_State_Starting; + + return cp; +} + +void kwsysProcess_Delete(kwsysProcess* cp) +{ + /* Make sure we have an instance. */ + if (!cp) { + return; + } + + /* If the process is executing, wait for it to finish. */ + if (cp->State == kwsysProcess_State_Executing) { + if (cp->Detached) { + kwsysProcess_Disown(cp); + } else { + kwsysProcess_WaitForExit(cp, 0); + } + } + + /* Free memory. */ + kwsysProcess_SetCommand(cp, 0); + kwsysProcess_SetWorkingDirectory(cp, 0); + kwsysProcess_SetPipeFile(cp, kwsysProcess_Pipe_STDIN, 0); + kwsysProcess_SetPipeFile(cp, kwsysProcess_Pipe_STDOUT, 0); + kwsysProcess_SetPipeFile(cp, kwsysProcess_Pipe_STDERR, 0); + free(cp->CommandExitCodes); + free(cp->ProcessResults); + free(cp); +} + +int kwsysProcess_SetCommand(kwsysProcess* cp, char const* const* command) +{ + int i; + if (!cp) { + return 0; + } + for (i = 0; i < cp->NumberOfCommands; ++i) { + char** c = cp->Commands[i]; + while (*c) { + free(*c++); + } + free(cp->Commands[i]); + } + cp->NumberOfCommands = 0; + if (cp->Commands) { + free(cp->Commands); + cp->Commands = 0; + } + if (command) { + return kwsysProcess_AddCommand(cp, command); + } + return 1; +} + +int kwsysProcess_AddCommand(kwsysProcess* cp, char const* const* command) +{ + int newNumberOfCommands; + char*** newCommands; + + /* Make sure we have a command to add. */ + if (!cp || !command || !*command) { + return 0; + } + + /* Allocate a new array for command pointers. */ + newNumberOfCommands = cp->NumberOfCommands + 1; + if (!(newCommands = + (char***)malloc(sizeof(char**) * (size_t)(newNumberOfCommands)))) { + /* Out of memory. */ + return 0; + } + + /* Copy any existing commands into the new array. */ + { + int i; + for (i = 0; i < cp->NumberOfCommands; ++i) { + newCommands[i] = cp->Commands[i]; + } + } + + /* Add the new command. */ + if (cp->Verbatim) { + /* In order to run the given command line verbatim we need to + parse it. */ + newCommands[cp->NumberOfCommands] = + kwsysSystem_Parse_CommandForUnix(*command, 0); + if (!newCommands[cp->NumberOfCommands] || + !newCommands[cp->NumberOfCommands][0]) { + /* Out of memory or no command parsed. */ + free(newCommands); + return 0; + } + } else { + /* Copy each argument string individually. */ + char const* const* c = command; + kwsysProcess_ptrdiff_t n = 0; + kwsysProcess_ptrdiff_t i = 0; + while (*c++) + ; + n = c - command - 1; + newCommands[cp->NumberOfCommands] = + (char**)malloc((size_t)(n + 1) * sizeof(char*)); + if (!newCommands[cp->NumberOfCommands]) { + /* Out of memory. */ + free(newCommands); + return 0; + } + for (i = 0; i < n; ++i) { + assert(command[i]); /* Quiet Clang scan-build. */ + newCommands[cp->NumberOfCommands][i] = strdup(command[i]); + if (!newCommands[cp->NumberOfCommands][i]) { + break; + } + } + if (i < n) { + /* Out of memory. */ + for (; i > 0; --i) { + free(newCommands[cp->NumberOfCommands][i - 1]); + } + free(newCommands); + return 0; + } + newCommands[cp->NumberOfCommands][n] = 0; + } + + /* Successfully allocated new command array. Free the old array. */ + free(cp->Commands); + cp->Commands = newCommands; + cp->NumberOfCommands = newNumberOfCommands; + + return 1; +} + +void kwsysProcess_SetTimeout(kwsysProcess* cp, double timeout) +{ + if (!cp) { + return; + } + cp->Timeout = timeout; + if (cp->Timeout < 0) { + cp->Timeout = 0; + } + // Force recomputation of TimeoutTime. + cp->TimeoutTime.tv_sec = -1; +} + +int kwsysProcess_SetWorkingDirectory(kwsysProcess* cp, const char* dir) +{ + if (!cp) { + return 0; + } + if (cp->WorkingDirectory == dir) { + return 1; + } + if (cp->WorkingDirectory && dir && strcmp(cp->WorkingDirectory, dir) == 0) { + return 1; + } + if (cp->WorkingDirectory) { + free(cp->WorkingDirectory); + cp->WorkingDirectory = 0; + } + if (dir) { + cp->WorkingDirectory = strdup(dir); + if (!cp->WorkingDirectory) { + return 0; + } + } + return 1; +} + +int kwsysProcess_SetPipeFile(kwsysProcess* cp, int prPipe, const char* file) +{ + char** pfile; + if (!cp) { + return 0; + } + switch (prPipe) { + case kwsysProcess_Pipe_STDIN: + pfile = &cp->PipeFileSTDIN; + break; + case kwsysProcess_Pipe_STDOUT: + pfile = &cp->PipeFileSTDOUT; + break; + case kwsysProcess_Pipe_STDERR: + pfile = &cp->PipeFileSTDERR; + break; + default: + return 0; + } + if (*pfile) { + free(*pfile); + *pfile = 0; + } + if (file) { + *pfile = strdup(file); + if (!*pfile) { + return 0; + } + } + + /* If we are redirecting the pipe, do not share it or use a native + pipe. */ + if (*pfile) { + kwsysProcess_SetPipeNative(cp, prPipe, 0); + kwsysProcess_SetPipeShared(cp, prPipe, 0); + } + return 1; +} + +void kwsysProcess_SetPipeShared(kwsysProcess* cp, int prPipe, int shared) +{ + if (!cp) { + return; + } + + switch (prPipe) { + case kwsysProcess_Pipe_STDIN: + cp->PipeSharedSTDIN = shared ? 1 : 0; + break; + case kwsysProcess_Pipe_STDOUT: + cp->PipeSharedSTDOUT = shared ? 1 : 0; + break; + case kwsysProcess_Pipe_STDERR: + cp->PipeSharedSTDERR = shared ? 1 : 0; + break; + default: + return; + } + + /* If we are sharing the pipe, do not redirect it to a file or use a + native pipe. */ + if (shared) { + kwsysProcess_SetPipeFile(cp, prPipe, 0); + kwsysProcess_SetPipeNative(cp, prPipe, 0); + } +} + +void kwsysProcess_SetPipeNative(kwsysProcess* cp, int prPipe, int p[2]) +{ + int* pPipeNative = 0; + + if (!cp) { + return; + } + + switch (prPipe) { + case kwsysProcess_Pipe_STDIN: + pPipeNative = cp->PipeNativeSTDIN; + break; + case kwsysProcess_Pipe_STDOUT: + pPipeNative = cp->PipeNativeSTDOUT; + break; + case kwsysProcess_Pipe_STDERR: + pPipeNative = cp->PipeNativeSTDERR; + break; + default: + return; + } + + /* Copy the native pipe descriptors provided. */ + if (p) { + pPipeNative[0] = p[0]; + pPipeNative[1] = p[1]; + } else { + pPipeNative[0] = -1; + pPipeNative[1] = -1; + } + + /* If we are using a native pipe, do not share it or redirect it to + a file. */ + if (p) { + kwsysProcess_SetPipeFile(cp, prPipe, 0); + kwsysProcess_SetPipeShared(cp, prPipe, 0); + } +} + +int kwsysProcess_GetOption(kwsysProcess* cp, int optionId) +{ + if (!cp) { + return 0; + } + + switch (optionId) { + case kwsysProcess_Option_Detach: + return cp->OptionDetach; + case kwsysProcess_Option_MergeOutput: + return cp->MergeOutput; + case kwsysProcess_Option_Verbatim: + return cp->Verbatim; + case kwsysProcess_Option_CreateProcessGroup: + return cp->CreateProcessGroup; + default: + return 0; + } +} + +void kwsysProcess_SetOption(kwsysProcess* cp, int optionId, int value) +{ + if (!cp) { + return; + } + + switch (optionId) { + case kwsysProcess_Option_Detach: + cp->OptionDetach = value; + break; + case kwsysProcess_Option_MergeOutput: + cp->MergeOutput = value; + break; + case kwsysProcess_Option_Verbatim: + cp->Verbatim = value; + break; + case kwsysProcess_Option_CreateProcessGroup: + cp->CreateProcessGroup = value; + break; + default: + break; + } +} + +int kwsysProcess_GetState(kwsysProcess* cp) +{ + return cp ? cp->State : kwsysProcess_State_Error; +} + +int kwsysProcess_GetExitException(kwsysProcess* cp) +{ + return (cp && cp->ProcessResults && (cp->NumberOfCommands > 0)) + ? cp->ProcessResults[cp->NumberOfCommands - 1].ExitException + : kwsysProcess_Exception_Other; +} + +int kwsysProcess_GetExitCode(kwsysProcess* cp) +{ + return (cp && cp->ProcessResults && (cp->NumberOfCommands > 0)) + ? cp->ProcessResults[cp->NumberOfCommands - 1].ExitCode + : 0; +} + +int kwsysProcess_GetExitValue(kwsysProcess* cp) +{ + return (cp && cp->ProcessResults && (cp->NumberOfCommands > 0)) + ? cp->ProcessResults[cp->NumberOfCommands - 1].ExitValue + : -1; +} + +const char* kwsysProcess_GetErrorString(kwsysProcess* cp) +{ + if (!cp) { + return "Process management structure could not be allocated"; + } else if (cp->State == kwsysProcess_State_Error) { + return cp->ErrorMessage; + } + return "Success"; +} + +const char* kwsysProcess_GetExceptionString(kwsysProcess* cp) +{ + if (!(cp && cp->ProcessResults && (cp->NumberOfCommands > 0))) { + return "GetExceptionString called with NULL process management structure"; + } else if (cp->State == kwsysProcess_State_Exception) { + return cp->ProcessResults[cp->NumberOfCommands - 1].ExitExceptionString; + } + return "No exception"; +} + +/* the index should be in array bound. */ +#define KWSYSPE_IDX_CHK(RET) \ + if (!cp || idx >= cp->NumberOfCommands || idx < 0) { \ + return RET; \ + } + +int kwsysProcess_GetStateByIndex(kwsysProcess* cp, int idx) +{ + KWSYSPE_IDX_CHK(kwsysProcess_State_Error) + return cp->ProcessResults[idx].State; +} + +int kwsysProcess_GetExitExceptionByIndex(kwsysProcess* cp, int idx) +{ + KWSYSPE_IDX_CHK(kwsysProcess_Exception_Other) + return cp->ProcessResults[idx].ExitException; +} + +int kwsysProcess_GetExitValueByIndex(kwsysProcess* cp, int idx) +{ + KWSYSPE_IDX_CHK(-1) + return cp->ProcessResults[idx].ExitValue; +} + +int kwsysProcess_GetExitCodeByIndex(kwsysProcess* cp, int idx) +{ + KWSYSPE_IDX_CHK(-1) + return cp->CommandExitCodes[idx]; +} + +const char* kwsysProcess_GetExceptionStringByIndex(kwsysProcess* cp, int idx) +{ + KWSYSPE_IDX_CHK("GetExceptionString called with NULL process management " + "structure or index out of bound") + if (cp->ProcessResults[idx].State == kwsysProcess_StateByIndex_Exception) { + return cp->ProcessResults[idx].ExitExceptionString; + } + return "No exception"; +} + +#undef KWSYSPE_IDX_CHK + +void kwsysProcess_Execute(kwsysProcess* cp) +{ + int i; + + /* Do not execute a second copy simultaneously. */ + if (!cp || cp->State == kwsysProcess_State_Executing) { + return; + } + + /* Make sure we have something to run. */ + if (cp->NumberOfCommands < 1) { + strcpy(cp->ErrorMessage, "No command"); + cp->State = kwsysProcess_State_Error; + return; + } + + /* Initialize the control structure for a new process. */ + if (!kwsysProcessInitialize(cp)) { + strcpy(cp->ErrorMessage, "Out of memory"); + cp->State = kwsysProcess_State_Error; + return; + } + +#if defined(__VMS) + /* Make sure pipes behave like streams on VMS. */ + if (!kwsysProcessSetVMSFeature("DECC$STREAM_PIPE", 1)) { + kwsysProcessCleanup(cp, 1); + return; + } +#endif + + /* Save the real working directory of this process and change to + the working directory for the child processes. This is needed + to make pipe file paths evaluate correctly. */ + if (cp->WorkingDirectory) { + int r; + if (!getcwd(cp->RealWorkingDirectory, + (size_t)(cp->RealWorkingDirectoryLength))) { + kwsysProcessCleanup(cp, 1); + return; + } + + /* Some platforms specify that the chdir call may be + interrupted. Repeat the call until it finishes. */ + while (((r = chdir(cp->WorkingDirectory)) < 0) && (errno == EINTR)) + ; + if (r < 0) { + kwsysProcessCleanup(cp, 1); + return; + } + } + + /* If not running a detached child, add this object to the global + set of process objects that wish to be notified when a child + exits. */ + if (!cp->OptionDetach) { + if (!kwsysProcessesAdd(cp)) { + kwsysProcessCleanup(cp, 1); + return; + } + } + + /* Setup the stdin pipe for the first process. */ + if (cp->PipeFileSTDIN) { + /* Open a file for the child's stdin to read. */ + cp->PipeChildStd[0] = open(cp->PipeFileSTDIN, O_RDONLY); + if (cp->PipeChildStd[0] < 0) { + kwsysProcessCleanup(cp, 1); + return; + } + + /* Set close-on-exec flag on the pipe's end. */ + if (fcntl(cp->PipeChildStd[0], F_SETFD, FD_CLOEXEC) < 0) { + kwsysProcessCleanup(cp, 1); + return; + } + } else if (cp->PipeSharedSTDIN) { + cp->PipeChildStd[0] = 0; + } else if (cp->PipeNativeSTDIN[0] >= 0) { + cp->PipeChildStd[0] = cp->PipeNativeSTDIN[0]; + + /* Set close-on-exec flag on the pipe's ends. The read end will + be dup2-ed into the stdin descriptor after the fork but before + the exec. */ + if ((fcntl(cp->PipeNativeSTDIN[0], F_SETFD, FD_CLOEXEC) < 0) || + (fcntl(cp->PipeNativeSTDIN[1], F_SETFD, FD_CLOEXEC) < 0)) { + kwsysProcessCleanup(cp, 1); + return; + } + } else { + cp->PipeChildStd[0] = -1; + } + + /* Create the output pipe for the last process. + We always create this so the pipe can be passed to select even if + it will report closed immediately. */ + { + /* Create the pipe. */ + int p[2]; + if (pipe(p KWSYSPE_VMS_NONBLOCK) < 0) { + kwsysProcessCleanup(cp, 1); + return; + } + + /* Store the pipe. */ + cp->PipeReadEnds[KWSYSPE_PIPE_STDOUT] = p[0]; + cp->PipeChildStd[1] = p[1]; + + /* Set close-on-exec flag on the pipe's ends. */ + if ((fcntl(p[0], F_SETFD, FD_CLOEXEC) < 0) || + (fcntl(p[1], F_SETFD, FD_CLOEXEC) < 0)) { + kwsysProcessCleanup(cp, 1); + return; + } + + /* Set to non-blocking in case select lies, or for the polling + implementation. */ + if (!kwsysProcessSetNonBlocking(p[0])) { + kwsysProcessCleanup(cp, 1); + return; + } + } + + if (cp->PipeFileSTDOUT) { + /* Use a file for stdout. */ + if (!kwsysProcessSetupOutputPipeFile(&cp->PipeChildStd[1], + cp->PipeFileSTDOUT)) { + kwsysProcessCleanup(cp, 1); + return; + } + } else if (cp->PipeSharedSTDOUT) { + /* Use the parent stdout. */ + kwsysProcessCleanupDescriptor(&cp->PipeChildStd[1]); + cp->PipeChildStd[1] = 1; + } else if (cp->PipeNativeSTDOUT[1] >= 0) { + /* Use the given descriptor for stdout. */ + if (!kwsysProcessSetupOutputPipeNative(&cp->PipeChildStd[1], + cp->PipeNativeSTDOUT)) { + kwsysProcessCleanup(cp, 1); + return; + } + } + + /* Create stderr pipe to be shared by all processes in the pipeline. + We always create this so the pipe can be passed to select even if + it will report closed immediately. */ + { + /* Create the pipe. */ + int p[2]; + if (pipe(p KWSYSPE_VMS_NONBLOCK) < 0) { + kwsysProcessCleanup(cp, 1); + return; + } + + /* Store the pipe. */ + cp->PipeReadEnds[KWSYSPE_PIPE_STDERR] = p[0]; + cp->PipeChildStd[2] = p[1]; + + /* Set close-on-exec flag on the pipe's ends. */ + if ((fcntl(p[0], F_SETFD, FD_CLOEXEC) < 0) || + (fcntl(p[1], F_SETFD, FD_CLOEXEC) < 0)) { + kwsysProcessCleanup(cp, 1); + return; + } + + /* Set to non-blocking in case select lies, or for the polling + implementation. */ + if (!kwsysProcessSetNonBlocking(p[0])) { + kwsysProcessCleanup(cp, 1); + return; + } + } + + if (cp->PipeFileSTDERR) { + /* Use a file for stderr. */ + if (!kwsysProcessSetupOutputPipeFile(&cp->PipeChildStd[2], + cp->PipeFileSTDERR)) { + kwsysProcessCleanup(cp, 1); + return; + } + } else if (cp->PipeSharedSTDERR) { + /* Use the parent stderr. */ + kwsysProcessCleanupDescriptor(&cp->PipeChildStd[2]); + cp->PipeChildStd[2] = 2; + } else if (cp->PipeNativeSTDERR[1] >= 0) { + /* Use the given handle for stderr. */ + if (!kwsysProcessSetupOutputPipeNative(&cp->PipeChildStd[2], + cp->PipeNativeSTDERR)) { + kwsysProcessCleanup(cp, 1); + return; + } + } + + /* The timeout period starts now. */ + cp->StartTime = kwsysProcessTimeGetCurrent(); + cp->TimeoutTime.tv_sec = -1; + cp->TimeoutTime.tv_usec = -1; + + /* Create the pipeline of processes. */ + { + kwsysProcessCreateInformation si = { -1, -1, -1, { -1, -1 } }; + int nextStdIn = cp->PipeChildStd[0]; + for (i = 0; i < cp->NumberOfCommands; ++i) { + /* Setup the process's pipes. */ + si.StdIn = nextStdIn; + if (i == cp->NumberOfCommands - 1) { + nextStdIn = -1; + si.StdOut = cp->PipeChildStd[1]; + } else { + /* Create a pipe to sit between the children. */ + int p[2] = { -1, -1 }; + if (pipe(p KWSYSPE_VMS_NONBLOCK) < 0) { + if (nextStdIn != cp->PipeChildStd[0]) { + kwsysProcessCleanupDescriptor(&nextStdIn); + } + kwsysProcessCleanup(cp, 1); + return; + } + + /* Set close-on-exec flag on the pipe's ends. */ + if ((fcntl(p[0], F_SETFD, FD_CLOEXEC) < 0) || + (fcntl(p[1], F_SETFD, FD_CLOEXEC) < 0)) { + close(p[0]); + close(p[1]); + if (nextStdIn != cp->PipeChildStd[0]) { + kwsysProcessCleanupDescriptor(&nextStdIn); + } + kwsysProcessCleanup(cp, 1); + return; + } + nextStdIn = p[0]; + si.StdOut = p[1]; + } + si.StdErr = cp->MergeOutput ? cp->PipeChildStd[1] : cp->PipeChildStd[2]; + + { + int res = kwsysProcessCreate(cp, i, &si); + + /* Close our copies of pipes used between children. */ + if (si.StdIn != cp->PipeChildStd[0]) { + kwsysProcessCleanupDescriptor(&si.StdIn); + } + if (si.StdOut != cp->PipeChildStd[1]) { + kwsysProcessCleanupDescriptor(&si.StdOut); + } + if (si.StdErr != cp->PipeChildStd[2] && !cp->MergeOutput) { + kwsysProcessCleanupDescriptor(&si.StdErr); + } + + if (!res) { + kwsysProcessCleanupDescriptor(&si.ErrorPipe[0]); + kwsysProcessCleanupDescriptor(&si.ErrorPipe[1]); + if (nextStdIn != cp->PipeChildStd[0]) { + kwsysProcessCleanupDescriptor(&nextStdIn); + } + kwsysProcessCleanup(cp, 1); + return; + } + } + } + } + + /* The parent process does not need the child's pipe ends. */ + for (i = 0; i < 3; ++i) { + kwsysProcessCleanupDescriptor(&cp->PipeChildStd[i]); + } + + /* Restore the working directory. */ + if (cp->RealWorkingDirectory) { + /* Some platforms specify that the chdir call may be + interrupted. Repeat the call until it finishes. */ + while ((chdir(cp->RealWorkingDirectory) < 0) && (errno == EINTR)) + ; + free(cp->RealWorkingDirectory); + cp->RealWorkingDirectory = 0; + } + + /* All the pipes are now open. */ + cp->PipesLeft = KWSYSPE_PIPE_COUNT; + + /* The process has now started. */ + cp->State = kwsysProcess_State_Executing; + cp->Detached = cp->OptionDetach; +} + +kwsysEXPORT void kwsysProcess_Disown(kwsysProcess* cp) +{ + /* Make sure a detached child process is running. */ + if (!cp || !cp->Detached || cp->State != kwsysProcess_State_Executing || + cp->TimeoutExpired || cp->Killed) { + return; + } + + /* Close all the pipes safely. */ + kwsysProcessClosePipes(cp); + + /* We will not wait for exit, so cleanup now. */ + kwsysProcessCleanup(cp, 0); + + /* The process has been disowned. */ + cp->State = kwsysProcess_State_Disowned; +} + +typedef struct kwsysProcessWaitData_s +{ + int Expired; + int PipeId; + int User; + double* UserTimeout; + kwsysProcessTime TimeoutTime; +} kwsysProcessWaitData; +static int kwsysProcessWaitForPipe(kwsysProcess* cp, char** data, int* length, + kwsysProcessWaitData* wd); + +int kwsysProcess_WaitForData(kwsysProcess* cp, char** data, int* length, + double* userTimeout) +{ + kwsysProcessTime userStartTime = { 0, 0 }; + kwsysProcessWaitData wd = { 0, kwsysProcess_Pipe_None, 0, 0, { 0, 0 } }; + wd.UserTimeout = userTimeout; + /* Make sure we are executing a process. */ + if (!cp || cp->State != kwsysProcess_State_Executing || cp->Killed || + cp->TimeoutExpired) { + return kwsysProcess_Pipe_None; + } + + /* Record the time at which user timeout period starts. */ + if (userTimeout) { + userStartTime = kwsysProcessTimeGetCurrent(); + } + + /* Calculate the time at which a timeout will expire, and whether it + is the user or process timeout. */ + wd.User = kwsysProcessGetTimeoutTime(cp, userTimeout, &wd.TimeoutTime); + + /* Data can only be available when pipes are open. If the process + is not running, cp->PipesLeft will be 0. */ + while (cp->PipesLeft > 0 && + !kwsysProcessWaitForPipe(cp, data, length, &wd)) { + } + + /* Update the user timeout. */ + if (userTimeout) { + kwsysProcessTime userEndTime = kwsysProcessTimeGetCurrent(); + kwsysProcessTime difference = + kwsysProcessTimeSubtract(userEndTime, userStartTime); + double d = kwsysProcessTimeToDouble(difference); + *userTimeout -= d; + if (*userTimeout < 0) { + *userTimeout = 0; + } + } + + /* Check what happened. */ + if (wd.PipeId) { + /* Data are ready on a pipe. */ + return wd.PipeId; + } else if (wd.Expired) { + /* A timeout has expired. */ + if (wd.User) { + /* The user timeout has expired. It has no time left. */ + return kwsysProcess_Pipe_Timeout; + } else { + /* The process timeout has expired. Kill the children now. */ + kwsysProcess_Kill(cp); + cp->Killed = 0; + cp->TimeoutExpired = 1; + return kwsysProcess_Pipe_None; + } + } else { + /* No pipes are left open. */ + return kwsysProcess_Pipe_None; + } +} + +static int kwsysProcessWaitForPipe(kwsysProcess* cp, char** data, int* length, + kwsysProcessWaitData* wd) +{ + int i; + kwsysProcessTimeNative timeoutLength; + +#if KWSYSPE_USE_SELECT + int numReady = 0; + int max = -1; + kwsysProcessTimeNative* timeout = 0; + + /* Check for any open pipes with data reported ready by the last + call to select. According to "man select_tut" we must deal + with all descriptors reported by a call to select before + passing them to another select call. */ + for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { + if (cp->PipeReadEnds[i] >= 0 && + FD_ISSET(cp->PipeReadEnds[i], &cp->PipeSet)) { + kwsysProcess_ssize_t n; + + /* We are handling this pipe now. Remove it from the set. */ + FD_CLR(cp->PipeReadEnds[i], &cp->PipeSet); + + /* The pipe is ready to read without blocking. Keep trying to + read until the operation is not interrupted. */ + while (((n = read(cp->PipeReadEnds[i], cp->PipeBuffer, + KWSYSPE_PIPE_BUFFER_SIZE)) < 0) && + (errno == EINTR)) + ; + if (n > 0) { + /* We have data on this pipe. */ + if (i == KWSYSPE_PIPE_SIGNAL) { + /* A child process has terminated. */ + kwsysProcessDestroy(cp); + } else if (data && length) { + /* Report this data. */ + *data = cp->PipeBuffer; + *length = (int)(n); + switch (i) { + case KWSYSPE_PIPE_STDOUT: + wd->PipeId = kwsysProcess_Pipe_STDOUT; + break; + case KWSYSPE_PIPE_STDERR: + wd->PipeId = kwsysProcess_Pipe_STDERR; + break; + } + return 1; + } + } else if (n < 0 && errno == EAGAIN) { + /* No data are really ready. The select call lied. See the + "man select" page on Linux for cases when this occurs. */ + } else { + /* We are done reading from this pipe. */ + kwsysProcessCleanupDescriptor(&cp->PipeReadEnds[i]); + --cp->PipesLeft; + } + } + } + + /* If we have data, break early. */ + if (wd->PipeId) { + return 1; + } + + /* Make sure the set is empty (it should always be empty here + anyway). */ + FD_ZERO(&cp->PipeSet); + + /* Setup a timeout if required. */ + if (wd->TimeoutTime.tv_sec < 0) { + timeout = 0; + } else { + timeout = &timeoutLength; + } + if (kwsysProcessGetTimeoutLeft( + &wd->TimeoutTime, wd->User ? wd->UserTimeout : 0, &timeoutLength, 0)) { + /* Timeout has already expired. */ + wd->Expired = 1; + return 1; + } + + /* Add the pipe reading ends that are still open. */ + max = -1; + for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { + if (cp->PipeReadEnds[i] >= 0) { + FD_SET(cp->PipeReadEnds[i], &cp->PipeSet); + if (cp->PipeReadEnds[i] > max) { + max = cp->PipeReadEnds[i]; + } + } + } + + /* Make sure we have a non-empty set. */ + if (max < 0) { + /* All pipes have closed. Child has terminated. */ + return 1; + } + + /* Run select to block until data are available. Repeat call + until it is not interrupted. */ + while (((numReady = select(max + 1, &cp->PipeSet, 0, 0, timeout)) < 0) && + (errno == EINTR)) + ; + + /* Check result of select. */ + if (numReady == 0) { + /* Select's timeout expired. */ + wd->Expired = 1; + return 1; + } else if (numReady < 0) { + /* Select returned an error. Leave the error description in the + pipe buffer. */ + strncpy(cp->ErrorMessage, strerror(errno), KWSYSPE_PIPE_BUFFER_SIZE); + + /* Kill the children now. */ + kwsysProcess_Kill(cp); + cp->Killed = 0; + cp->SelectError = 1; + } + + return 0; +#else + /* Poll pipes for data since we do not have select. */ + for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { + if (cp->PipeReadEnds[i] >= 0) { + const int fd = cp->PipeReadEnds[i]; + int n = read(fd, cp->PipeBuffer, KWSYSPE_PIPE_BUFFER_SIZE); + if (n > 0) { + /* We have data on this pipe. */ + if (i == KWSYSPE_PIPE_SIGNAL) { + /* A child process has terminated. */ + kwsysProcessDestroy(cp); + } else if (data && length) { + /* Report this data. */ + *data = cp->PipeBuffer; + *length = n; + switch (i) { + case KWSYSPE_PIPE_STDOUT: + wd->PipeId = kwsysProcess_Pipe_STDOUT; + break; + case KWSYSPE_PIPE_STDERR: + wd->PipeId = kwsysProcess_Pipe_STDERR; + break; + }; + } + return 1; + } else if (n == 0) /* EOF */ + { +/* We are done reading from this pipe. */ +# if defined(__VMS) + if (!cp->CommandsLeft) +# endif + { + kwsysProcessCleanupDescriptor(&cp->PipeReadEnds[i]); + --cp->PipesLeft; + } + } else if (n < 0) /* error */ + { +# if defined(__VMS) + if (!cp->CommandsLeft) { + kwsysProcessCleanupDescriptor(&cp->PipeReadEnds[i]); + --cp->PipesLeft; + } else +# endif + if ((errno != EINTR) && (errno != EAGAIN)) { + strncpy(cp->ErrorMessage, strerror(errno), KWSYSPE_PIPE_BUFFER_SIZE); + /* Kill the children now. */ + kwsysProcess_Kill(cp); + cp->Killed = 0; + cp->SelectError = 1; + return 1; + } + } + } + } + + /* If we have data, break early. */ + if (wd->PipeId) { + return 1; + } + + if (kwsysProcessGetTimeoutLeft( + &wd->TimeoutTime, wd->User ? wd->UserTimeout : 0, &timeoutLength, 1)) { + /* Timeout has already expired. */ + wd->Expired = 1; + return 1; + } + + /* Sleep a little, try again. */ + { + unsigned int msec = + ((timeoutLength.tv_sec * 1000) + (timeoutLength.tv_usec / 1000)); + if (msec > 100000) { + msec = 100000; /* do not sleep more than 100 milliseconds at a time */ + } + kwsysProcess_usleep(msec); + } + return 0; +#endif +} + +int kwsysProcess_WaitForExit(kwsysProcess* cp, double* userTimeout) +{ + int prPipe = 0; + + /* Make sure we are executing a process. */ + if (!cp || cp->State != kwsysProcess_State_Executing) { + return 1; + } + + /* Wait for all the pipes to close. Ignore all data. */ + while ((prPipe = kwsysProcess_WaitForData(cp, 0, 0, userTimeout)) > 0) { + if (prPipe == kwsysProcess_Pipe_Timeout) { + return 0; + } + } + + /* Check if there was an error in one of the waitpid calls. */ + if (cp->State == kwsysProcess_State_Error) { + /* The error message is already in its buffer. Tell + kwsysProcessCleanup to not create it. */ + kwsysProcessCleanup(cp, 0); + return 1; + } + + /* Check whether the child reported an error invoking the process. */ + if (cp->SelectError) { + /* The error message is already in its buffer. Tell + kwsysProcessCleanup to not create it. */ + kwsysProcessCleanup(cp, 0); + cp->State = kwsysProcess_State_Error; + return 1; + } + /* Determine the outcome. */ + if (cp->Killed) { + /* We killed the child. */ + cp->State = kwsysProcess_State_Killed; + } else if (cp->TimeoutExpired) { + /* The timeout expired. */ + cp->State = kwsysProcess_State_Expired; + } else { + /* The children exited. Report the outcome of the child processes. */ + for (prPipe = 0; prPipe < cp->NumberOfCommands; ++prPipe) { + cp->ProcessResults[prPipe].ExitCode = cp->CommandExitCodes[prPipe]; + if (WIFEXITED(cp->ProcessResults[prPipe].ExitCode)) { + /* The child exited normally. */ + cp->ProcessResults[prPipe].State = kwsysProcess_StateByIndex_Exited; + cp->ProcessResults[prPipe].ExitException = kwsysProcess_Exception_None; + cp->ProcessResults[prPipe].ExitValue = + (int)WEXITSTATUS(cp->ProcessResults[prPipe].ExitCode); + } else if (WIFSIGNALED(cp->ProcessResults[prPipe].ExitCode)) { + /* The child received an unhandled signal. */ + cp->ProcessResults[prPipe].State = kwsysProcess_State_Exception; + kwsysProcessSetExitExceptionByIndex( + cp, (int)WTERMSIG(cp->ProcessResults[prPipe].ExitCode), prPipe); + } else { + /* Error getting the child return code. */ + strcpy(cp->ProcessResults[prPipe].ExitExceptionString, + "Error getting child return code."); + cp->ProcessResults[prPipe].State = kwsysProcess_StateByIndex_Error; + } + } + /* support legacy state status value */ + cp->State = cp->ProcessResults[cp->NumberOfCommands - 1].State; + } + /* Normal cleanup. */ + kwsysProcessCleanup(cp, 0); + return 1; +} + +void kwsysProcess_Interrupt(kwsysProcess* cp) +{ + int i; + /* Make sure we are executing a process. */ + if (!cp || cp->State != kwsysProcess_State_Executing || cp->TimeoutExpired || + cp->Killed) { + return; + } + + /* Interrupt the children. */ + if (cp->CreateProcessGroup) { + if (cp->ForkPIDs) { + for (i = 0; i < cp->NumberOfCommands; ++i) { + /* Make sure the PID is still valid. */ + if (cp->ForkPIDs[i]) { + /* The user created a process group for this process. The group ID + is the process ID for the original process in the group. */ + kill(-cp->ForkPIDs[i], SIGINT); + } + } + } + } else { + /* No process group was created. Kill our own process group. + NOTE: While one could argue that we could call kill(cp->ForkPIDs[i], + SIGINT) as a way to still interrupt the process even though it's not in + a special group, this is not an option on Windows. Therefore, we kill + the current process group for consistency with Windows. */ + kill(0, SIGINT); + } +} + +void kwsysProcess_Kill(kwsysProcess* cp) +{ + int i; + + /* Make sure we are executing a process. */ + if (!cp || cp->State != kwsysProcess_State_Executing) { + return; + } + + /* First close the child exit report pipe write end to avoid causing a + SIGPIPE when the child terminates and our signal handler tries to + report it after we have already closed the read end. */ + kwsysProcessCleanupDescriptor(&cp->SignalPipe); + +#if !defined(__APPLE__) + /* Close all the pipe read ends. Do this before killing the + children because Cygwin has problems killing processes that are + blocking to wait for writing to their output pipes. */ + kwsysProcessClosePipes(cp); +#endif + + /* Kill the children. */ + cp->Killed = 1; + for (i = 0; i < cp->NumberOfCommands; ++i) { + int status; + if (cp->ForkPIDs[i]) { + /* Kill the child. */ + kwsysProcessKill(cp->ForkPIDs[i]); + + /* Reap the child. Keep trying until the call is not + interrupted. */ + while ((waitpid(cp->ForkPIDs[i], &status, 0) < 0) && (errno == EINTR)) + ; + } + } + +#if defined(__APPLE__) + /* Close all the pipe read ends. Do this after killing the + children because OS X has problems closing pipe read ends whose + pipes are full and still have an open write end. */ + kwsysProcessClosePipes(cp); +#endif + + cp->CommandsLeft = 0; +} + +/* Call the free() function with a pointer to volatile without causing + compiler warnings. */ +static void kwsysProcessVolatileFree(volatile void* p) +{ +/* clang has made it impossible to free memory that points to volatile + without first using special pragmas to disable a warning... */ +#if defined(__clang__) && !defined(__INTEL_COMPILER) +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wcast-qual" +#endif + free((void*)p); /* The cast will silence most compilers, but not clang. */ +#if defined(__clang__) && !defined(__INTEL_COMPILER) +# pragma clang diagnostic pop +#endif +} + +/* Initialize a process control structure for kwsysProcess_Execute. */ +static int kwsysProcessInitialize(kwsysProcess* cp) +{ + int i; + volatile pid_t* oldForkPIDs; + for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { + cp->PipeReadEnds[i] = -1; + } + for (i = 0; i < 3; ++i) { + cp->PipeChildStd[i] = -1; + } + cp->SignalPipe = -1; + cp->SelectError = 0; + cp->StartTime.tv_sec = -1; + cp->StartTime.tv_usec = -1; + cp->TimeoutTime.tv_sec = -1; + cp->TimeoutTime.tv_usec = -1; + cp->TimeoutExpired = 0; + cp->PipesLeft = 0; + cp->CommandsLeft = 0; +#if KWSYSPE_USE_SELECT + FD_ZERO(&cp->PipeSet); +#endif + cp->State = kwsysProcess_State_Starting; + cp->Killed = 0; + cp->ErrorMessage[0] = 0; + + oldForkPIDs = cp->ForkPIDs; + cp->ForkPIDs = (volatile pid_t*)malloc(sizeof(volatile pid_t) * + (size_t)(cp->NumberOfCommands)); + kwsysProcessVolatileFree(oldForkPIDs); + if (!cp->ForkPIDs) { + return 0; + } + for (i = 0; i < cp->NumberOfCommands; ++i) { + cp->ForkPIDs[i] = 0; /* can't use memset due to volatile */ + } + + free(cp->CommandExitCodes); + cp->CommandExitCodes = + (int*)malloc(sizeof(int) * (size_t)(cp->NumberOfCommands)); + if (!cp->CommandExitCodes) { + return 0; + } + memset(cp->CommandExitCodes, 0, + sizeof(int) * (size_t)(cp->NumberOfCommands)); + + /* Allocate process result information for each process. */ + free(cp->ProcessResults); + cp->ProcessResults = (kwsysProcessResults*)malloc( + sizeof(kwsysProcessResults) * (size_t)(cp->NumberOfCommands)); + if (!cp->ProcessResults) { + return 0; + } + memset(cp->ProcessResults, 0, + sizeof(kwsysProcessResults) * (size_t)(cp->NumberOfCommands)); + for (i = 0; i < cp->NumberOfCommands; i++) { + cp->ProcessResults[i].ExitException = kwsysProcess_Exception_None; + cp->ProcessResults[i].State = kwsysProcess_StateByIndex_Starting; + cp->ProcessResults[i].ExitCode = 1; + cp->ProcessResults[i].ExitValue = 1; + strcpy(cp->ProcessResults[i].ExitExceptionString, "No exception"); + } + + /* Allocate memory to save the real working directory. */ + if (cp->WorkingDirectory) { +#if defined(MAXPATHLEN) + cp->RealWorkingDirectoryLength = MAXPATHLEN; +#elif defined(PATH_MAX) + cp->RealWorkingDirectoryLength = PATH_MAX; +#else + cp->RealWorkingDirectoryLength = 4096; +#endif + cp->RealWorkingDirectory = + (char*)malloc((size_t)(cp->RealWorkingDirectoryLength)); + if (!cp->RealWorkingDirectory) { + return 0; + } + } + + return 1; +} + +/* Free all resources used by the given kwsysProcess instance that were + allocated by kwsysProcess_Execute. */ +static void kwsysProcessCleanup(kwsysProcess* cp, int error) +{ + int i; + + if (error) { + /* We are cleaning up due to an error. Report the error message + if one has not been provided already. */ + if (cp->ErrorMessage[0] == 0) { + strncpy(cp->ErrorMessage, strerror(errno), KWSYSPE_PIPE_BUFFER_SIZE); + } + + /* Set the error state. */ + cp->State = kwsysProcess_State_Error; + + /* Kill any children already started. */ + if (cp->ForkPIDs) { + int status; + for (i = 0; i < cp->NumberOfCommands; ++i) { + if (cp->ForkPIDs[i]) { + /* Kill the child. */ + kwsysProcessKill(cp->ForkPIDs[i]); + + /* Reap the child. Keep trying until the call is not + interrupted. */ + while ((waitpid(cp->ForkPIDs[i], &status, 0) < 0) && + (errno == EINTR)) + ; + } + } + } + + /* Restore the working directory. */ + if (cp->RealWorkingDirectory) { + while ((chdir(cp->RealWorkingDirectory) < 0) && (errno == EINTR)) + ; + } + } + + /* If not creating a detached child, remove this object from the + global set of process objects that wish to be notified when a + child exits. */ + if (!cp->OptionDetach) { + kwsysProcessesRemove(cp); + } + + /* Free memory. */ + if (cp->ForkPIDs) { + kwsysProcessVolatileFree(cp->ForkPIDs); + cp->ForkPIDs = 0; + } + if (cp->RealWorkingDirectory) { + free(cp->RealWorkingDirectory); + cp->RealWorkingDirectory = 0; + } + + /* Close pipe handles. */ + for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { + kwsysProcessCleanupDescriptor(&cp->PipeReadEnds[i]); + } + for (i = 0; i < 3; ++i) { + kwsysProcessCleanupDescriptor(&cp->PipeChildStd[i]); + } +} + +/* Close the given file descriptor if it is open. Reset its value to -1. */ +static void kwsysProcessCleanupDescriptor(int* pfd) +{ + if (pfd && *pfd > 2) { + /* Keep trying to close until it is not interrupted by a + * signal. */ + while ((close(*pfd) < 0) && (errno == EINTR)) + ; + *pfd = -1; + } +} + +static void kwsysProcessClosePipes(kwsysProcess* cp) +{ + int i; + + /* Close any pipes that are still open. */ + for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { + if (cp->PipeReadEnds[i] >= 0) { +#if KWSYSPE_USE_SELECT + /* If the pipe was reported by the last call to select, we must + read from it. This is needed to satisfy the suggestions from + "man select_tut" and is not needed for the polling + implementation. Ignore the data. */ + if (FD_ISSET(cp->PipeReadEnds[i], &cp->PipeSet)) { + /* We are handling this pipe now. Remove it from the set. */ + FD_CLR(cp->PipeReadEnds[i], &cp->PipeSet); + + /* The pipe is ready to read without blocking. Keep trying to + read until the operation is not interrupted. */ + while ((read(cp->PipeReadEnds[i], cp->PipeBuffer, + KWSYSPE_PIPE_BUFFER_SIZE) < 0) && + (errno == EINTR)) + ; + } +#endif + + /* We are done reading from this pipe. */ + kwsysProcessCleanupDescriptor(&cp->PipeReadEnds[i]); + --cp->PipesLeft; + } + } +} + +static int kwsysProcessSetNonBlocking(int fd) +{ + int flags = fcntl(fd, F_GETFL); + if (flags >= 0) { + flags = fcntl(fd, F_SETFL, flags | O_NONBLOCK); + } + return flags >= 0; +} + +#if defined(__VMS) +int decc$set_child_standard_streams(int fd1, int fd2, int fd3); +#endif + +static int kwsysProcessCreate(kwsysProcess* cp, int prIndex, + kwsysProcessCreateInformation* si) +{ + sigset_t mask, old_mask; + int pgidPipe[2]; + char tmp; + ssize_t readRes; + + /* Create the error reporting pipe. */ + if (pipe(si->ErrorPipe) < 0) { + return 0; + } + + /* Create a pipe for detecting that the child process has created a process + group and session. */ + if (pipe(pgidPipe) < 0) { + kwsysProcessCleanupDescriptor(&si->ErrorPipe[0]); + kwsysProcessCleanupDescriptor(&si->ErrorPipe[1]); + return 0; + } + + /* Set close-on-exec flag on the pipe's write end. */ + if (fcntl(si->ErrorPipe[1], F_SETFD, FD_CLOEXEC) < 0 || + fcntl(pgidPipe[1], F_SETFD, FD_CLOEXEC) < 0) { + kwsysProcessCleanupDescriptor(&si->ErrorPipe[0]); + kwsysProcessCleanupDescriptor(&si->ErrorPipe[1]); + kwsysProcessCleanupDescriptor(&pgidPipe[0]); + kwsysProcessCleanupDescriptor(&pgidPipe[1]); + return 0; + } + + /* Block SIGINT / SIGTERM while we start. The purpose is so that our signal + handler doesn't get called from the child process after the fork and + before the exec, and subsequently start kill()'ing PIDs from ForkPIDs. */ + sigemptyset(&mask); + sigaddset(&mask, SIGINT); + sigaddset(&mask, SIGTERM); + if (sigprocmask(SIG_BLOCK, &mask, &old_mask) < 0) { + kwsysProcessCleanupDescriptor(&si->ErrorPipe[0]); + kwsysProcessCleanupDescriptor(&si->ErrorPipe[1]); + kwsysProcessCleanupDescriptor(&pgidPipe[0]); + kwsysProcessCleanupDescriptor(&pgidPipe[1]); + return 0; + } + +/* Fork off a child process. */ +#if defined(__VMS) + /* VMS needs vfork and execvp to be in the same function because + they use setjmp/longjmp to run the child startup code in the + parent! TODO: OptionDetach. Also + TODO: CreateProcessGroup. */ + cp->ForkPIDs[prIndex] = vfork(); +#else + cp->ForkPIDs[prIndex] = kwsysProcessFork(cp, si); +#endif + if (cp->ForkPIDs[prIndex] < 0) { + sigprocmask(SIG_SETMASK, &old_mask, 0); + kwsysProcessCleanupDescriptor(&si->ErrorPipe[0]); + kwsysProcessCleanupDescriptor(&si->ErrorPipe[1]); + kwsysProcessCleanupDescriptor(&pgidPipe[0]); + kwsysProcessCleanupDescriptor(&pgidPipe[1]); + return 0; + } + + if (cp->ForkPIDs[prIndex] == 0) { +#if defined(__VMS) + /* Specify standard pipes for child process. */ + decc$set_child_standard_streams(si->StdIn, si->StdOut, si->StdErr); +#else + /* Close the read end of the error reporting / process group + setup pipe. */ + close(si->ErrorPipe[0]); + close(pgidPipe[0]); + + /* Setup the stdin, stdout, and stderr pipes. */ + if (si->StdIn > 0) { + dup2(si->StdIn, 0); + } else if (si->StdIn < 0) { + close(0); + } + if (si->StdOut != 1) { + dup2(si->StdOut, 1); + } + if (si->StdErr != 2) { + dup2(si->StdErr, 2); + } + + /* Clear the close-on-exec flag for stdin, stdout, and stderr. + All other pipe handles will be closed when exec succeeds. */ + fcntl(0, F_SETFD, 0); + fcntl(1, F_SETFD, 0); + fcntl(2, F_SETFD, 0); + + /* Restore all default signal handlers. */ + kwsysProcessRestoreDefaultSignalHandlers(); + + /* Now that we have restored default signal handling and created the + process group, restore mask. */ + sigprocmask(SIG_SETMASK, &old_mask, 0); + + /* Create new process group. We use setsid instead of setpgid to avoid + the child getting hung up on signals like SIGTTOU. (In the real world, + this has been observed where "git svn" ends up calling the "resize" + program which opens /dev/tty. */ + if (cp->CreateProcessGroup && setsid() < 0) { + kwsysProcessChildErrorExit(si->ErrorPipe[1]); + } +#endif + + /* Execute the real process. If successful, this does not return. */ + execvp(cp->Commands[prIndex][0], cp->Commands[prIndex]); + /* TODO: What does VMS do if the child fails to start? */ + /* TODO: On VMS, how do we put the process in a new group? */ + + /* Failure. Report error to parent and terminate. */ + kwsysProcessChildErrorExit(si->ErrorPipe[1]); + } + +#if defined(__VMS) + /* Restore the standard pipes of this process. */ + decc$set_child_standard_streams(0, 1, 2); +#endif + + /* We are done with the error reporting pipe and process group setup pipe + write end. */ + kwsysProcessCleanupDescriptor(&si->ErrorPipe[1]); + kwsysProcessCleanupDescriptor(&pgidPipe[1]); + + /* Make sure the child is in the process group before we proceed. This + avoids race conditions with calls to the kill function that we make for + signalling process groups. */ + while ((readRes = read(pgidPipe[0], &tmp, 1)) > 0) + ; + if (readRes < 0) { + sigprocmask(SIG_SETMASK, &old_mask, 0); + kwsysProcessCleanupDescriptor(&si->ErrorPipe[0]); + kwsysProcessCleanupDescriptor(&pgidPipe[0]); + return 0; + } + kwsysProcessCleanupDescriptor(&pgidPipe[0]); + + /* Unmask signals. */ + if (sigprocmask(SIG_SETMASK, &old_mask, 0) < 0) { + kwsysProcessCleanupDescriptor(&si->ErrorPipe[0]); + return 0; + } + + /* A child has been created. */ + ++cp->CommandsLeft; + + /* Block until the child's exec call succeeds and closes the error + pipe or writes data to the pipe to report an error. */ + { + kwsysProcess_ssize_t total = 0; + kwsysProcess_ssize_t n = 1; + /* Read the entire error message up to the length of our buffer. */ + while (total < KWSYSPE_PIPE_BUFFER_SIZE && n > 0) { + /* Keep trying to read until the operation is not interrupted. */ + while (((n = read(si->ErrorPipe[0], cp->ErrorMessage + total, + (size_t)(KWSYSPE_PIPE_BUFFER_SIZE - total))) < 0) && + (errno == EINTR)) + ; + if (n > 0) { + total += n; + } + } + + /* We are done with the error reporting pipe read end. */ + kwsysProcessCleanupDescriptor(&si->ErrorPipe[0]); + + if (total > 0) { + /* The child failed to execute the process. */ + return 0; + } + } + + return 1; +} + +static void kwsysProcessDestroy(kwsysProcess* cp) +{ + /* A child process has terminated. Reap it if it is one handled by + this object. */ + int i; + /* Temporarily disable signals that access ForkPIDs. We don't want them to + read a reaped PID, and writes to ForkPIDs are not atomic. */ + sigset_t mask, old_mask; + sigemptyset(&mask); + sigaddset(&mask, SIGINT); + sigaddset(&mask, SIGTERM); + if (sigprocmask(SIG_BLOCK, &mask, &old_mask) < 0) { + return; + } + + for (i = 0; i < cp->NumberOfCommands; ++i) { + if (cp->ForkPIDs[i]) { + int result; + while (((result = waitpid(cp->ForkPIDs[i], &cp->CommandExitCodes[i], + WNOHANG)) < 0) && + (errno == EINTR)) + ; + if (result > 0) { + /* This child has terminated. */ + cp->ForkPIDs[i] = 0; + if (--cp->CommandsLeft == 0) { + /* All children have terminated. Close the signal pipe + write end so that no more notifications are sent to this + object. */ + kwsysProcessCleanupDescriptor(&cp->SignalPipe); + + /* TODO: Once the children have terminated, switch + WaitForData to use a non-blocking read to get the + rest of the data from the pipe. This is needed when + grandchildren keep the output pipes open. */ + } + } else if (result < 0 && cp->State != kwsysProcess_State_Error) { + /* Unexpected error. Report the first time this happens. */ + strncpy(cp->ErrorMessage, strerror(errno), KWSYSPE_PIPE_BUFFER_SIZE); + cp->State = kwsysProcess_State_Error; + } + } + } + + /* Re-enable signals. */ + sigprocmask(SIG_SETMASK, &old_mask, 0); +} + +static int kwsysProcessSetupOutputPipeFile(int* p, const char* name) +{ + int fout; + if (!name) { + return 1; + } + + /* Close the existing descriptor. */ + kwsysProcessCleanupDescriptor(p); + + /* Open a file for the pipe to write. */ + if ((fout = open(name, O_WRONLY | O_CREAT | O_TRUNC, 0666)) < 0) { + return 0; + } + + /* Set close-on-exec flag on the pipe's end. */ + if (fcntl(fout, F_SETFD, FD_CLOEXEC) < 0) { + close(fout); + return 0; + } + + /* Assign the replacement descriptor. */ + *p = fout; + return 1; +} + +static int kwsysProcessSetupOutputPipeNative(int* p, int des[2]) +{ + /* Close the existing descriptor. */ + kwsysProcessCleanupDescriptor(p); + + /* Set close-on-exec flag on the pipe's ends. The proper end will + be dup2-ed into the standard descriptor number after fork but + before exec. */ + if ((fcntl(des[0], F_SETFD, FD_CLOEXEC) < 0) || + (fcntl(des[1], F_SETFD, FD_CLOEXEC) < 0)) { + return 0; + } + + /* Assign the replacement descriptor. */ + *p = des[1]; + return 1; +} + +/* Get the time at which either the process or user timeout will + expire. Returns 1 if the user timeout is first, and 0 otherwise. */ +static int kwsysProcessGetTimeoutTime(kwsysProcess* cp, double* userTimeout, + kwsysProcessTime* timeoutTime) +{ + /* The first time this is called, we need to calculate the time at + which the child will timeout. */ + if (cp->Timeout > 0 && cp->TimeoutTime.tv_sec < 0) { + kwsysProcessTime length = kwsysProcessTimeFromDouble(cp->Timeout); + cp->TimeoutTime = kwsysProcessTimeAdd(cp->StartTime, length); + } + + /* Start with process timeout. */ + *timeoutTime = cp->TimeoutTime; + + /* Check if the user timeout is earlier. */ + if (userTimeout) { + kwsysProcessTime currentTime = kwsysProcessTimeGetCurrent(); + kwsysProcessTime userTimeoutLength = + kwsysProcessTimeFromDouble(*userTimeout); + kwsysProcessTime userTimeoutTime = + kwsysProcessTimeAdd(currentTime, userTimeoutLength); + if (timeoutTime->tv_sec < 0 || + kwsysProcessTimeLess(userTimeoutTime, *timeoutTime)) { + *timeoutTime = userTimeoutTime; + return 1; + } + } + return 0; +} + +/* Get the length of time before the given timeout time arrives. + Returns 1 if the time has already arrived, and 0 otherwise. */ +static int kwsysProcessGetTimeoutLeft(kwsysProcessTime* timeoutTime, + double* userTimeout, + kwsysProcessTimeNative* timeoutLength, + int zeroIsExpired) +{ + if (timeoutTime->tv_sec < 0) { + /* No timeout time has been requested. */ + return 0; + } else { + /* Calculate the remaining time. */ + kwsysProcessTime currentTime = kwsysProcessTimeGetCurrent(); + kwsysProcessTime timeLeft = + kwsysProcessTimeSubtract(*timeoutTime, currentTime); + if (timeLeft.tv_sec < 0 && userTimeout && *userTimeout <= 0) { + /* Caller has explicitly requested a zero timeout. */ + timeLeft.tv_sec = 0; + timeLeft.tv_usec = 0; + } + + if (timeLeft.tv_sec < 0 || + (timeLeft.tv_sec == 0 && timeLeft.tv_usec == 0 && zeroIsExpired)) { + /* Timeout has already expired. */ + return 1; + } else { + /* There is some time left. */ + timeoutLength->tv_sec = timeLeft.tv_sec; + timeoutLength->tv_usec = timeLeft.tv_usec; + return 0; + } + } +} + +static kwsysProcessTime kwsysProcessTimeGetCurrent(void) +{ + kwsysProcessTime current; + kwsysProcessTimeNative current_native; +#if KWSYS_C_HAS_CLOCK_GETTIME_MONOTONIC + struct timespec current_timespec; + clock_gettime(CLOCK_MONOTONIC, ¤t_timespec); + + current_native.tv_sec = current_timespec.tv_sec; + current_native.tv_usec = current_timespec.tv_nsec / 1000; +#else + gettimeofday(¤t_native, 0); +#endif + current.tv_sec = (long)current_native.tv_sec; + current.tv_usec = (long)current_native.tv_usec; + return current; +} + +static double kwsysProcessTimeToDouble(kwsysProcessTime t) +{ + return (double)t.tv_sec + (double)(t.tv_usec) * 0.000001; +} + +static kwsysProcessTime kwsysProcessTimeFromDouble(double d) +{ + kwsysProcessTime t; + t.tv_sec = (long)d; + t.tv_usec = (long)((d - (double)(t.tv_sec)) * 1000000); + return t; +} + +static int kwsysProcessTimeLess(kwsysProcessTime in1, kwsysProcessTime in2) +{ + return ((in1.tv_sec < in2.tv_sec) || + ((in1.tv_sec == in2.tv_sec) && (in1.tv_usec < in2.tv_usec))); +} + +static kwsysProcessTime kwsysProcessTimeAdd(kwsysProcessTime in1, + kwsysProcessTime in2) +{ + kwsysProcessTime out; + out.tv_sec = in1.tv_sec + in2.tv_sec; + out.tv_usec = in1.tv_usec + in2.tv_usec; + if (out.tv_usec >= 1000000) { + out.tv_usec -= 1000000; + out.tv_sec += 1; + } + return out; +} + +static kwsysProcessTime kwsysProcessTimeSubtract(kwsysProcessTime in1, + kwsysProcessTime in2) +{ + kwsysProcessTime out; + out.tv_sec = in1.tv_sec - in2.tv_sec; + out.tv_usec = in1.tv_usec - in2.tv_usec; + if (out.tv_usec < 0) { + out.tv_usec += 1000000; + out.tv_sec -= 1; + } + return out; +} + +#define KWSYSPE_CASE(type, str) \ + cp->ProcessResults[idx].ExitException = kwsysProcess_Exception_##type; \ + strcpy(cp->ProcessResults[idx].ExitExceptionString, str) +static void kwsysProcessSetExitExceptionByIndex(kwsysProcess* cp, int sig, + int idx) +{ + switch (sig) { +#ifdef SIGSEGV + case SIGSEGV: + KWSYSPE_CASE(Fault, "Segmentation fault"); + break; +#endif +#ifdef SIGBUS +# if !defined(SIGSEGV) || SIGBUS != SIGSEGV + case SIGBUS: + KWSYSPE_CASE(Fault, "Bus error"); + break; +# endif +#endif +#ifdef SIGFPE + case SIGFPE: + KWSYSPE_CASE(Numerical, "Floating-point exception"); + break; +#endif +#ifdef SIGILL + case SIGILL: + KWSYSPE_CASE(Illegal, "Illegal instruction"); + break; +#endif +#ifdef SIGINT + case SIGINT: + KWSYSPE_CASE(Interrupt, "User interrupt"); + break; +#endif +#ifdef SIGABRT + case SIGABRT: + KWSYSPE_CASE(Other, "Child aborted"); + break; +#endif +#ifdef SIGKILL + case SIGKILL: + KWSYSPE_CASE(Other, "Child killed"); + break; +#endif +#ifdef SIGTERM + case SIGTERM: + KWSYSPE_CASE(Other, "Child terminated"); + break; +#endif +#ifdef SIGHUP + case SIGHUP: + KWSYSPE_CASE(Other, "SIGHUP"); + break; +#endif +#ifdef SIGQUIT + case SIGQUIT: + KWSYSPE_CASE(Other, "SIGQUIT"); + break; +#endif +#ifdef SIGTRAP + case SIGTRAP: + KWSYSPE_CASE(Other, "SIGTRAP"); + break; +#endif +#ifdef SIGIOT +# if !defined(SIGABRT) || SIGIOT != SIGABRT + case SIGIOT: + KWSYSPE_CASE(Other, "SIGIOT"); + break; +# endif +#endif +#ifdef SIGUSR1 + case SIGUSR1: + KWSYSPE_CASE(Other, "SIGUSR1"); + break; +#endif +#ifdef SIGUSR2 + case SIGUSR2: + KWSYSPE_CASE(Other, "SIGUSR2"); + break; +#endif +#ifdef SIGPIPE + case SIGPIPE: + KWSYSPE_CASE(Other, "SIGPIPE"); + break; +#endif +#ifdef SIGALRM + case SIGALRM: + KWSYSPE_CASE(Other, "SIGALRM"); + break; +#endif +#ifdef SIGSTKFLT + case SIGSTKFLT: + KWSYSPE_CASE(Other, "SIGSTKFLT"); + break; +#endif +#ifdef SIGCHLD + case SIGCHLD: + KWSYSPE_CASE(Other, "SIGCHLD"); + break; +#elif defined(SIGCLD) + case SIGCLD: + KWSYSPE_CASE(Other, "SIGCLD"); + break; +#endif +#ifdef SIGCONT + case SIGCONT: + KWSYSPE_CASE(Other, "SIGCONT"); + break; +#endif +#ifdef SIGSTOP + case SIGSTOP: + KWSYSPE_CASE(Other, "SIGSTOP"); + break; +#endif +#ifdef SIGTSTP + case SIGTSTP: + KWSYSPE_CASE(Other, "SIGTSTP"); + break; +#endif +#ifdef SIGTTIN + case SIGTTIN: + KWSYSPE_CASE(Other, "SIGTTIN"); + break; +#endif +#ifdef SIGTTOU + case SIGTTOU: + KWSYSPE_CASE(Other, "SIGTTOU"); + break; +#endif +#ifdef SIGURG + case SIGURG: + KWSYSPE_CASE(Other, "SIGURG"); + break; +#endif +#ifdef SIGXCPU + case SIGXCPU: + KWSYSPE_CASE(Other, "SIGXCPU"); + break; +#endif +#ifdef SIGXFSZ + case SIGXFSZ: + KWSYSPE_CASE(Other, "SIGXFSZ"); + break; +#endif +#ifdef SIGVTALRM + case SIGVTALRM: + KWSYSPE_CASE(Other, "SIGVTALRM"); + break; +#endif +#ifdef SIGPROF + case SIGPROF: + KWSYSPE_CASE(Other, "SIGPROF"); + break; +#endif +#ifdef SIGWINCH + case SIGWINCH: + KWSYSPE_CASE(Other, "SIGWINCH"); + break; +#endif +#ifdef SIGPOLL + case SIGPOLL: + KWSYSPE_CASE(Other, "SIGPOLL"); + break; +#endif +#ifdef SIGIO +# if !defined(SIGPOLL) || SIGIO != SIGPOLL + case SIGIO: + KWSYSPE_CASE(Other, "SIGIO"); + break; +# endif +#endif +#ifdef SIGPWR + case SIGPWR: + KWSYSPE_CASE(Other, "SIGPWR"); + break; +#endif +#ifdef SIGSYS + case SIGSYS: + KWSYSPE_CASE(Other, "SIGSYS"); + break; +#endif +#ifdef SIGUNUSED +# if !defined(SIGSYS) || SIGUNUSED != SIGSYS + case SIGUNUSED: + KWSYSPE_CASE(Other, "SIGUNUSED"); + break; +# endif +#endif + default: + cp->ProcessResults[idx].ExitException = kwsysProcess_Exception_Other; + sprintf(cp->ProcessResults[idx].ExitExceptionString, "Signal %d", sig); + break; + } +} +#undef KWSYSPE_CASE + +/* When the child process encounters an error before its program is + invoked, this is called to report the error to the parent and + exit. */ +static void kwsysProcessChildErrorExit(int errorPipe) +{ + /* Construct the error message. */ + char buffer[KWSYSPE_PIPE_BUFFER_SIZE]; + kwsysProcess_ssize_t result; + strncpy(buffer, strerror(errno), KWSYSPE_PIPE_BUFFER_SIZE); + buffer[KWSYSPE_PIPE_BUFFER_SIZE - 1] = '\0'; + + /* Report the error to the parent through the special pipe. */ + result = write(errorPipe, buffer, strlen(buffer)); + (void)result; + + /* Terminate without cleanup. */ + _exit(1); +} + +/* Restores all signal handlers to their default values. */ +static void kwsysProcessRestoreDefaultSignalHandlers(void) +{ + struct sigaction act; + memset(&act, 0, sizeof(struct sigaction)); + act.sa_handler = SIG_DFL; +#ifdef SIGHUP + sigaction(SIGHUP, &act, 0); +#endif +#ifdef SIGINT + sigaction(SIGINT, &act, 0); +#endif +#ifdef SIGQUIT + sigaction(SIGQUIT, &act, 0); +#endif +#ifdef SIGILL + sigaction(SIGILL, &act, 0); +#endif +#ifdef SIGTRAP + sigaction(SIGTRAP, &act, 0); +#endif +#ifdef SIGABRT + sigaction(SIGABRT, &act, 0); +#endif +#ifdef SIGIOT + sigaction(SIGIOT, &act, 0); +#endif +#ifdef SIGBUS + sigaction(SIGBUS, &act, 0); +#endif +#ifdef SIGFPE + sigaction(SIGFPE, &act, 0); +#endif +#ifdef SIGUSR1 + sigaction(SIGUSR1, &act, 0); +#endif +#ifdef SIGSEGV + sigaction(SIGSEGV, &act, 0); +#endif +#ifdef SIGUSR2 + sigaction(SIGUSR2, &act, 0); +#endif +#ifdef SIGPIPE + sigaction(SIGPIPE, &act, 0); +#endif +#ifdef SIGALRM + sigaction(SIGALRM, &act, 0); +#endif +#ifdef SIGTERM + sigaction(SIGTERM, &act, 0); +#endif +#ifdef SIGSTKFLT + sigaction(SIGSTKFLT, &act, 0); +#endif +#ifdef SIGCLD + sigaction(SIGCLD, &act, 0); +#endif +#ifdef SIGCHLD + sigaction(SIGCHLD, &act, 0); +#endif +#ifdef SIGCONT + sigaction(SIGCONT, &act, 0); +#endif +#ifdef SIGTSTP + sigaction(SIGTSTP, &act, 0); +#endif +#ifdef SIGTTIN + sigaction(SIGTTIN, &act, 0); +#endif +#ifdef SIGTTOU + sigaction(SIGTTOU, &act, 0); +#endif +#ifdef SIGURG + sigaction(SIGURG, &act, 0); +#endif +#ifdef SIGXCPU + sigaction(SIGXCPU, &act, 0); +#endif +#ifdef SIGXFSZ + sigaction(SIGXFSZ, &act, 0); +#endif +#ifdef SIGVTALRM + sigaction(SIGVTALRM, &act, 0); +#endif +#ifdef SIGPROF + sigaction(SIGPROF, &act, 0); +#endif +#ifdef SIGWINCH + sigaction(SIGWINCH, &act, 0); +#endif +#ifdef SIGPOLL + sigaction(SIGPOLL, &act, 0); +#endif +#ifdef SIGIO + sigaction(SIGIO, &act, 0); +#endif +#ifdef SIGPWR + sigaction(SIGPWR, &act, 0); +#endif +#ifdef SIGSYS + sigaction(SIGSYS, &act, 0); +#endif +#ifdef SIGUNUSED + sigaction(SIGUNUSED, &act, 0); +#endif +} + +static void kwsysProcessExit(void) +{ + _exit(0); +} + +#if !defined(__VMS) +static pid_t kwsysProcessFork(kwsysProcess* cp, + kwsysProcessCreateInformation* si) +{ + /* Create a detached process if requested. */ + if (cp->OptionDetach) { + /* Create an intermediate process. */ + pid_t middle_pid = fork(); + if (middle_pid < 0) { + /* Fork failed. Return as if we were not detaching. */ + return middle_pid; + } else if (middle_pid == 0) { + /* This is the intermediate process. Create the real child. */ + pid_t child_pid = fork(); + if (child_pid == 0) { + /* This is the real child process. There is nothing to do here. */ + return 0; + } else { + /* Use the error pipe to report the pid to the real parent. */ + while ((write(si->ErrorPipe[1], &child_pid, sizeof(child_pid)) < 0) && + (errno == EINTR)) + ; + + /* Exit without cleanup. The parent holds all resources. */ + kwsysProcessExit(); + return 0; /* Never reached, but avoids SunCC warning. */ + } + } else { + /* This is the original parent process. The intermediate + process will use the error pipe to report the pid of the + detached child. */ + pid_t child_pid; + int status; + while ((read(si->ErrorPipe[0], &child_pid, sizeof(child_pid)) < 0) && + (errno == EINTR)) + ; + + /* Wait for the intermediate process to exit and clean it up. */ + while ((waitpid(middle_pid, &status, 0) < 0) && (errno == EINTR)) + ; + return child_pid; + } + } else { + /* Not creating a detached process. Use normal fork. */ + return fork(); + } +} +#endif + +/* We try to obtain process information by invoking the ps command. + Here we define the command to call on each platform and the + corresponding parsing format string. The parsing format should + have two integers to store: the pid and then the ppid. */ +#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ + defined(__OpenBSD__) || defined(__GLIBC__) || defined(__GNU__) +# define KWSYSPE_PS_COMMAND "ps axo pid,ppid" +# define KWSYSPE_PS_FORMAT "%d %d\n" +#elif defined(__sun) && (defined(__SVR4) || defined(__svr4__)) /* Solaris */ +# define KWSYSPE_PS_COMMAND "ps -e -o pid,ppid" +# define KWSYSPE_PS_FORMAT "%d %d\n" +#elif defined(__hpux) || defined(__sun__) || defined(__sgi) || \ + defined(_AIX) || defined(__sparc) +# define KWSYSPE_PS_COMMAND "ps -ef" +# define KWSYSPE_PS_FORMAT "%*s %d %d %*[^\n]\n" +#elif defined(__QNX__) +# define KWSYSPE_PS_COMMAND "ps -Af" +# define KWSYSPE_PS_FORMAT "%*d %d %d %*[^\n]\n" +#elif defined(__CYGWIN__) +# define KWSYSPE_PS_COMMAND "ps aux" +# define KWSYSPE_PS_FORMAT "%d %d %*[^\n]\n" +#endif + +void kwsysProcess_KillPID(unsigned long process_id) +{ + kwsysProcessKill((pid_t)process_id); +} + +static void kwsysProcessKill(pid_t process_id) +{ +#if defined(__linux__) || defined(__CYGWIN__) + DIR* procdir; +#endif + + /* Suspend the process to be sure it will not create more children. */ + kill(process_id, SIGSTOP); + +#if defined(__CYGWIN__) + /* Some Cygwin versions seem to need help here. Give up our time slice + so that the child can process SIGSTOP before we send SIGKILL. */ + usleep(1); +#endif + +/* Kill all children if we can find them. */ +#if defined(__linux__) || defined(__CYGWIN__) + /* First try using the /proc filesystem. */ + if ((procdir = opendir("/proc")) != NULL) { +# if defined(MAXPATHLEN) + char fname[MAXPATHLEN]; +# elif defined(PATH_MAX) + char fname[PATH_MAX]; +# else + char fname[4096]; +# endif + char buffer[KWSYSPE_PIPE_BUFFER_SIZE + 1]; + struct dirent* d; + + /* Each process has a directory in /proc whose name is the pid. + Within this directory is a file called stat that has the + following format: + + pid (command line) status ppid ... + + We want to get the ppid for all processes. Those that have + process_id as their parent should be recursively killed. */ + for (d = readdir(procdir); d; d = readdir(procdir)) { + int pid; + if (sscanf(d->d_name, "%d", &pid) == 1 && pid != 0) { + struct stat finfo; + sprintf(fname, "/proc/%d/stat", pid); + if (stat(fname, &finfo) == 0) { + FILE* f = fopen(fname, "r"); + if (f) { + size_t nread = fread(buffer, 1, KWSYSPE_PIPE_BUFFER_SIZE, f); + fclose(f); + buffer[nread] = '\0'; + if (nread > 0) { + const char* rparen = strrchr(buffer, ')'); + int ppid; + if (rparen && (sscanf(rparen + 1, "%*s %d", &ppid) == 1)) { + if (ppid == process_id) { + /* Recursively kill this child and its children. */ + kwsysProcessKill(pid); + } + } + } + } + } + } + } + closedir(procdir); + } else +#endif + { +#if defined(KWSYSPE_PS_COMMAND) + /* Try running "ps" to get the process information. */ + FILE* ps = popen(KWSYSPE_PS_COMMAND, "r"); + + /* Make sure the process started and provided a valid header. */ + if (ps && fscanf(ps, "%*[^\n]\n") != EOF) { + /* Look for processes whose parent is the process being killed. */ + int pid, ppid; + while (fscanf(ps, KWSYSPE_PS_FORMAT, &pid, &ppid) == 2) { + if (ppid == process_id) { + /* Recursively kill this child and its children. */ + kwsysProcessKill(pid); + } + } + } + + /* We are done with the ps process. */ + if (ps) { + pclose(ps); + } +#endif + } + + /* Kill the process. */ + kill(process_id, SIGKILL); + +#if defined(__APPLE__) + /* On OS X 10.3 the above SIGSTOP occasionally prevents the SIGKILL + from working. Just in case, we resume the child and kill it + again. There is a small race condition in this obscure case. If + the child manages to fork again between these two signals, we + will not catch its children. */ + kill(process_id, SIGCONT); + kill(process_id, SIGKILL); +#endif +} + +#if defined(__VMS) +int decc$feature_get_index(const char* name); +int decc$feature_set_value(int index, int mode, int value); +static int kwsysProcessSetVMSFeature(const char* name, int value) +{ + int i; + errno = 0; + i = decc$feature_get_index(name); + return i >= 0 && (decc$feature_set_value(i, 1, value) >= 0 || errno == 0); +} +#endif + +/* Global set of executing processes for use by the signal handler. + This global instance will be zero-initialized by the compiler. */ +typedef struct kwsysProcessInstances_s +{ + int Count; + int Size; + kwsysProcess** Processes; +} kwsysProcessInstances; +static kwsysProcessInstances kwsysProcesses; + +/* The old SIGCHLD / SIGINT / SIGTERM handlers. */ +static struct sigaction kwsysProcessesOldSigChldAction; +static struct sigaction kwsysProcessesOldSigIntAction; +static struct sigaction kwsysProcessesOldSigTermAction; + +static void kwsysProcessesUpdate(kwsysProcessInstances* newProcesses) +{ + /* Block signals while we update the set of pipes to check. + TODO: sigprocmask is undefined for threaded apps. See + pthread_sigmask. */ + sigset_t newset; + sigset_t oldset; + sigemptyset(&newset); + sigaddset(&newset, SIGCHLD); + sigaddset(&newset, SIGINT); + sigaddset(&newset, SIGTERM); + sigprocmask(SIG_BLOCK, &newset, &oldset); + + /* Store the new set in that seen by the signal handler. */ + kwsysProcesses = *newProcesses; + + /* Restore the signal mask to the previous setting. */ + sigprocmask(SIG_SETMASK, &oldset, 0); +} + +static int kwsysProcessesAdd(kwsysProcess* cp) +{ + /* Create a pipe through which the signal handler can notify the + given process object that a child has exited. */ + { + /* Create the pipe. */ + int p[2]; + if (pipe(p KWSYSPE_VMS_NONBLOCK) < 0) { + return 0; + } + + /* Store the pipes now to be sure they are cleaned up later. */ + cp->PipeReadEnds[KWSYSPE_PIPE_SIGNAL] = p[0]; + cp->SignalPipe = p[1]; + + /* Switch the pipe to non-blocking mode so that reading a byte can + be an atomic test-and-set. */ + if (!kwsysProcessSetNonBlocking(p[0]) || + !kwsysProcessSetNonBlocking(p[1])) { + return 0; + } + + /* The children do not need this pipe. Set close-on-exec flag on + the pipe's ends. */ + if ((fcntl(p[0], F_SETFD, FD_CLOEXEC) < 0) || + (fcntl(p[1], F_SETFD, FD_CLOEXEC) < 0)) { + return 0; + } + } + + /* Attempt to add the given signal pipe to the signal handler set. */ + { + + /* Make sure there is enough space for the new signal pipe. */ + kwsysProcessInstances oldProcesses = kwsysProcesses; + kwsysProcessInstances newProcesses = oldProcesses; + if (oldProcesses.Count == oldProcesses.Size) { + /* Start with enough space for a small number of process instances + and double the size each time more is needed. */ + newProcesses.Size = oldProcesses.Size ? oldProcesses.Size * 2 : 4; + + /* Try allocating the new block of memory. */ + if ((newProcesses.Processes = ((kwsysProcess**)malloc( + (size_t)(newProcesses.Size) * sizeof(kwsysProcess*))))) { + /* Copy the old pipe set to the new memory. */ + if (oldProcesses.Count > 0) { + memcpy(newProcesses.Processes, oldProcesses.Processes, + ((size_t)(oldProcesses.Count) * sizeof(kwsysProcess*))); + } + } else { + /* Failed to allocate memory for the new signal pipe set. */ + return 0; + } + } + + /* Append the new signal pipe to the set. */ + newProcesses.Processes[newProcesses.Count++] = cp; + + /* Store the new set in that seen by the signal handler. */ + kwsysProcessesUpdate(&newProcesses); + + /* Free the original pipes if new ones were allocated. */ + if (newProcesses.Processes != oldProcesses.Processes) { + free(oldProcesses.Processes); + } + + /* If this is the first process, enable the signal handler. */ + if (newProcesses.Count == 1) { + /* Install our handler for SIGCHLD. Repeat call until it is not + interrupted. */ + struct sigaction newSigAction; + memset(&newSigAction, 0, sizeof(struct sigaction)); +#if KWSYSPE_USE_SIGINFO + newSigAction.sa_sigaction = kwsysProcessesSignalHandler; + newSigAction.sa_flags = SA_NOCLDSTOP | SA_SIGINFO; +# ifdef SA_RESTART + newSigAction.sa_flags |= SA_RESTART; +# endif +#else + newSigAction.sa_handler = kwsysProcessesSignalHandler; + newSigAction.sa_flags = SA_NOCLDSTOP; +#endif + sigemptyset(&newSigAction.sa_mask); + while ((sigaction(SIGCHLD, &newSigAction, + &kwsysProcessesOldSigChldAction) < 0) && + (errno == EINTR)) + ; + + /* Install our handler for SIGINT / SIGTERM. Repeat call until + it is not interrupted. */ + sigemptyset(&newSigAction.sa_mask); + sigaddset(&newSigAction.sa_mask, SIGTERM); + while ((sigaction(SIGINT, &newSigAction, + &kwsysProcessesOldSigIntAction) < 0) && + (errno == EINTR)) + ; + + sigemptyset(&newSigAction.sa_mask); + sigaddset(&newSigAction.sa_mask, SIGINT); + while ((sigaction(SIGTERM, &newSigAction, + &kwsysProcessesOldSigIntAction) < 0) && + (errno == EINTR)) + ; + } + } + + return 1; +} + +static void kwsysProcessesRemove(kwsysProcess* cp) +{ + /* Attempt to remove the given signal pipe from the signal handler set. */ + { + /* Find the given process in the set. */ + kwsysProcessInstances newProcesses = kwsysProcesses; + int i; + for (i = 0; i < newProcesses.Count; ++i) { + if (newProcesses.Processes[i] == cp) { + break; + } + } + if (i < newProcesses.Count) { + /* Remove the process from the set. */ + --newProcesses.Count; + for (; i < newProcesses.Count; ++i) { + newProcesses.Processes[i] = newProcesses.Processes[i + 1]; + } + + /* If this was the last process, disable the signal handler. */ + if (newProcesses.Count == 0) { + /* Restore the signal handlers. Repeat call until it is not + interrupted. */ + while ((sigaction(SIGCHLD, &kwsysProcessesOldSigChldAction, 0) < 0) && + (errno == EINTR)) + ; + while ((sigaction(SIGINT, &kwsysProcessesOldSigIntAction, 0) < 0) && + (errno == EINTR)) + ; + while ((sigaction(SIGTERM, &kwsysProcessesOldSigTermAction, 0) < 0) && + (errno == EINTR)) + ; + + /* Free the table of process pointers since it is now empty. + This is safe because the signal handler has been removed. */ + newProcesses.Size = 0; + free(newProcesses.Processes); + newProcesses.Processes = 0; + } + + /* Store the new set in that seen by the signal handler. */ + kwsysProcessesUpdate(&newProcesses); + } + } + + /* Close the pipe through which the signal handler may have notified + the given process object that a child has exited. */ + kwsysProcessCleanupDescriptor(&cp->SignalPipe); +} + +static void kwsysProcessesSignalHandler(int signum +#if KWSYSPE_USE_SIGINFO + , + siginfo_t* info, void* ucontext +#endif +) +{ + int i, j, procStatus, old_errno = errno; +#if KWSYSPE_USE_SIGINFO + (void)info; + (void)ucontext; +#endif + + /* Signal all process objects that a child has terminated. */ + switch (signum) { + case SIGCHLD: + for (i = 0; i < kwsysProcesses.Count; ++i) { + /* Set the pipe in a signalled state. */ + char buf = 1; + kwsysProcess* cp = kwsysProcesses.Processes[i]; + kwsysProcess_ssize_t pipeStatus = + read(cp->PipeReadEnds[KWSYSPE_PIPE_SIGNAL], &buf, 1); + (void)pipeStatus; + pipeStatus = write(cp->SignalPipe, &buf, 1); + (void)pipeStatus; + } + break; + case SIGINT: + case SIGTERM: + /* Signal child processes that are running in new process groups. */ + for (i = 0; i < kwsysProcesses.Count; ++i) { + kwsysProcess* cp = kwsysProcesses.Processes[i]; + /* Check Killed to avoid data race condition when killing. + Check State to avoid data race condition in kwsysProcessCleanup + when there is an error (it leaves a reaped PID). */ + if (cp->CreateProcessGroup && !cp->Killed && + cp->State != kwsysProcess_State_Error && cp->ForkPIDs) { + for (j = 0; j < cp->NumberOfCommands; ++j) { + /* Make sure the PID is still valid. */ + if (cp->ForkPIDs[j]) { + /* The user created a process group for this process. The group + ID + is the process ID for the original process in the group. */ + kill(-cp->ForkPIDs[j], SIGINT); + } + } + } + } + + /* Wait for all processes to terminate. */ + while (wait(&procStatus) >= 0 || errno != ECHILD) { + } + + /* Terminate the process, which is now in an inconsistent state + because we reaped all the PIDs that it may have been reaping + or may have reaped in the future. Reraise the signal so that + the proper exit code is returned. */ + { + /* Install default signal handler. */ + struct sigaction defSigAction; + sigset_t unblockSet; + memset(&defSigAction, 0, sizeof(defSigAction)); + defSigAction.sa_handler = SIG_DFL; + sigemptyset(&defSigAction.sa_mask); + while ((sigaction(signum, &defSigAction, 0) < 0) && (errno == EINTR)) + ; + /* Unmask the signal. */ + sigemptyset(&unblockSet); + sigaddset(&unblockSet, signum); + sigprocmask(SIG_UNBLOCK, &unblockSet, 0); + /* Raise the signal again. */ + raise(signum); + /* We shouldn't get here... but if we do... */ + _exit(1); + } + /* break omitted to silence unreachable code clang compiler warning. */ + } + +#if !KWSYSPE_USE_SIGINFO + /* Re-Install our handler. Repeat call until it is not interrupted. */ + { + struct sigaction newSigAction; + struct sigaction& oldSigAction; + memset(&newSigAction, 0, sizeof(struct sigaction)); + newSigChldAction.sa_handler = kwsysProcessesSignalHandler; + newSigChldAction.sa_flags = SA_NOCLDSTOP; + sigemptyset(&newSigAction.sa_mask); + switch (signum) { + case SIGCHLD: + oldSigAction = &kwsysProcessesOldSigChldAction; + break; + case SIGINT: + sigaddset(&newSigAction.sa_mask, SIGTERM); + oldSigAction = &kwsysProcessesOldSigIntAction; + break; + case SIGTERM: + sigaddset(&newSigAction.sa_mask, SIGINT); + oldSigAction = &kwsysProcessesOldSigTermAction; + break; + default: + return 0; + } + while ((sigaction(signum, &newSigAction, oldSigAction) < 0) && + (errno == EINTR)) + ; + } +#endif + + errno = old_errno; +} + +void kwsysProcess_ResetStartTime(kwsysProcess* cp) +{ + if (!cp) { + return; + } + /* Reset start time. */ + cp->StartTime = kwsysProcessTimeGetCurrent(); +} diff --git a/test/API/driver/kwsys/ProcessWin32.c b/test/API/driver/kwsys/ProcessWin32.c new file mode 100644 index 00000000000..a963862f060 --- /dev/null +++ b/test/API/driver/kwsys/ProcessWin32.c @@ -0,0 +1,2786 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" +#include KWSYS_HEADER(Process.h) +#include KWSYS_HEADER(Encoding.h) + +/* Work-around CMake dependency scanning limitation. This must + duplicate the above list of headers. */ +#if 0 +# include "Encoding.h.in" +# include "Process.h.in" +#endif + +/* + +Implementation for Windows + +On windows, a thread is created to wait for data on each pipe. The +threads are synchronized with the main thread to simulate the use of +a UNIX-style select system call. + +*/ + +#ifdef _MSC_VER +# pragma warning(push, 1) +#endif +#include /* Windows API */ +#if defined(_MSC_VER) && _MSC_VER >= 1800 +# define KWSYS_WINDOWS_DEPRECATED_GetVersionEx +#endif +#include /* _unlink */ +#include /* sprintf */ +#include /* strlen, strdup */ +#ifdef __WATCOMC__ +# define _unlink unlink +#endif + +#ifndef _MAX_FNAME +# define _MAX_FNAME 4096 +#endif +#ifndef _MAX_PATH +# define _MAX_PATH 4096 +#endif + +#ifdef _MSC_VER +# pragma warning(pop) +# pragma warning(disable : 4514) +# pragma warning(disable : 4706) +#endif + +#if defined(__BORLANDC__) +# pragma warn - 8004 /* assigned a value that is never used */ +# pragma warn - 8060 /* Assignment inside if() condition. */ +#endif + +/* There are pipes for the process pipeline's stdout and stderr. */ +#define KWSYSPE_PIPE_COUNT 2 +#define KWSYSPE_PIPE_STDOUT 0 +#define KWSYSPE_PIPE_STDERR 1 + +/* The maximum amount to read from a pipe at a time. */ +#define KWSYSPE_PIPE_BUFFER_SIZE 1024 + +/* Debug output macro. */ +#if 0 +# define KWSYSPE_DEBUG(x) \ + ((void*)cp == (void*)0x00226DE0 \ + ? (fprintf(stderr, "%d/%p/%d ", (int)GetCurrentProcessId(), cp, \ + __LINE__), \ + fprintf x, fflush(stderr), 1) \ + : (1)) +#else +# define KWSYSPE_DEBUG(x) (void)1 +#endif + +typedef LARGE_INTEGER kwsysProcessTime; + +typedef struct kwsysProcessCreateInformation_s +{ + /* Windows child startup control data. */ + STARTUPINFOW StartupInfo; + + /* Original handles before making inherited duplicates. */ + HANDLE hStdInput; + HANDLE hStdOutput; + HANDLE hStdError; +} kwsysProcessCreateInformation; + +typedef struct kwsysProcessPipeData_s kwsysProcessPipeData; +static DWORD WINAPI kwsysProcessPipeThreadRead(LPVOID ptd); +static void kwsysProcessPipeThreadReadPipe(kwsysProcess* cp, + kwsysProcessPipeData* td); +static DWORD WINAPI kwsysProcessPipeThreadWake(LPVOID ptd); +static void kwsysProcessPipeThreadWakePipe(kwsysProcess* cp, + kwsysProcessPipeData* td); +static int kwsysProcessInitialize(kwsysProcess* cp); +static DWORD kwsysProcessCreate(kwsysProcess* cp, int index, + kwsysProcessCreateInformation* si); +static void kwsysProcessDestroy(kwsysProcess* cp, int event); +static DWORD kwsysProcessSetupOutputPipeFile(PHANDLE handle, const char* name); +static void kwsysProcessSetupSharedPipe(DWORD nStdHandle, PHANDLE handle); +static void kwsysProcessSetupPipeNative(HANDLE native, PHANDLE handle); +static void kwsysProcessCleanupHandle(PHANDLE h); +static void kwsysProcessCleanup(kwsysProcess* cp, DWORD error); +static void kwsysProcessCleanErrorMessage(kwsysProcess* cp); +static int kwsysProcessGetTimeoutTime(kwsysProcess* cp, double* userTimeout, + kwsysProcessTime* timeoutTime); +static int kwsysProcessGetTimeoutLeft(kwsysProcessTime* timeoutTime, + double* userTimeout, + kwsysProcessTime* timeoutLength); +static kwsysProcessTime kwsysProcessTimeGetCurrent(void); +static DWORD kwsysProcessTimeToDWORD(kwsysProcessTime t); +static double kwsysProcessTimeToDouble(kwsysProcessTime t); +static kwsysProcessTime kwsysProcessTimeFromDouble(double d); +static int kwsysProcessTimeLess(kwsysProcessTime in1, kwsysProcessTime in2); +static kwsysProcessTime kwsysProcessTimeAdd(kwsysProcessTime in1, + kwsysProcessTime in2); +static kwsysProcessTime kwsysProcessTimeSubtract(kwsysProcessTime in1, + kwsysProcessTime in2); +static void kwsysProcessSetExitExceptionByIndex(kwsysProcess* cp, int code, + int idx); +static void kwsysProcessKillTree(int pid); +static void kwsysProcessDisablePipeThreads(kwsysProcess* cp); +static int kwsysProcessesInitialize(void); +static int kwsysTryEnterCreateProcessSection(void); +static void kwsysLeaveCreateProcessSection(void); +static int kwsysProcessesAdd(HANDLE hProcess, DWORD dwProcessId, + int newProcessGroup); +static void kwsysProcessesRemove(HANDLE hProcess); +static BOOL WINAPI kwsysCtrlHandler(DWORD dwCtrlType); + +/* A structure containing synchronization data for each thread. */ +typedef struct kwsysProcessPipeSync_s kwsysProcessPipeSync; +struct kwsysProcessPipeSync_s +{ + /* Handle to the thread. */ + HANDLE Thread; + + /* Semaphore indicating to the thread that a process has started. */ + HANDLE Ready; + + /* Semaphore indicating to the thread that it should begin work. */ + HANDLE Go; + + /* Semaphore indicating thread has reset for another process. */ + HANDLE Reset; +}; + +/* A structure containing data for each pipe's threads. */ +struct kwsysProcessPipeData_s +{ + /* ------------- Data managed per instance of kwsysProcess ------------- */ + + /* Synchronization data for reading thread. */ + kwsysProcessPipeSync Reader; + + /* Synchronization data for waking thread. */ + kwsysProcessPipeSync Waker; + + /* Index of this pipe. */ + int Index; + + /* The kwsysProcess instance owning this pipe. */ + kwsysProcess* Process; + + /* ------------- Data managed per call to Execute ------------- */ + + /* Buffer for data read in this pipe's thread. */ + char DataBuffer[KWSYSPE_PIPE_BUFFER_SIZE]; + + /* The length of the data stored in the buffer. */ + DWORD DataLength; + + /* Whether the pipe has been closed. */ + int Closed; + + /* Handle for the read end of this pipe. */ + HANDLE Read; + + /* Handle for the write end of this pipe. */ + HANDLE Write; +}; + +/* A structure containing results data for each process. */ +typedef struct kwsysProcessResults_s kwsysProcessResults; +struct kwsysProcessResults_s +{ + /* The status of the process. */ + int State; + + /* The exceptional behavior that terminated the process, if any. */ + int ExitException; + + /* The process exit code. */ + DWORD ExitCode; + + /* The process return code, if any. */ + int ExitValue; + + /* Description for the ExitException. */ + char ExitExceptionString[KWSYSPE_PIPE_BUFFER_SIZE + 1]; +}; + +/* Structure containing data used to implement the child's execution. */ +struct kwsysProcess_s +{ + /* ------------- Data managed per instance of kwsysProcess ------------- */ + + /* The status of the process structure. */ + int State; + + /* The command lines to execute. */ + wchar_t** Commands; + int NumberOfCommands; + + /* The exit code of each command. */ + DWORD* CommandExitCodes; + + /* The working directory for the child process. */ + wchar_t* WorkingDirectory; + + /* Whether to create the child as a detached process. */ + int OptionDetach; + + /* Whether the child was created as a detached process. */ + int Detached; + + /* Whether to hide the child process's window. */ + int HideWindow; + + /* Whether to treat command lines as verbatim. */ + int Verbatim; + + /* Whether to merge stdout/stderr of the child. */ + int MergeOutput; + + /* Whether to create the process in a new process group. */ + int CreateProcessGroup; + + /* Mutex to protect the shared index used by threads to report data. */ + HANDLE SharedIndexMutex; + + /* Semaphore used by threads to signal data ready. */ + HANDLE Full; + + /* Whether we are currently deleting this kwsysProcess instance. */ + int Deleting; + + /* Data specific to each pipe and its thread. */ + kwsysProcessPipeData Pipe[KWSYSPE_PIPE_COUNT]; + + /* Name of files to which stdin and stdout pipes are attached. */ + char* PipeFileSTDIN; + char* PipeFileSTDOUT; + char* PipeFileSTDERR; + + /* Whether each pipe is shared with the parent process. */ + int PipeSharedSTDIN; + int PipeSharedSTDOUT; + int PipeSharedSTDERR; + + /* Native pipes provided by the user. */ + HANDLE PipeNativeSTDIN[2]; + HANDLE PipeNativeSTDOUT[2]; + HANDLE PipeNativeSTDERR[2]; + + /* ------------- Data managed per call to Execute ------------- */ + + /* Index of last pipe to report data, if any. */ + int CurrentIndex; + + /* Index shared by threads to report data. */ + int SharedIndex; + + /* The timeout length. */ + double Timeout; + + /* Time at which the child started. */ + kwsysProcessTime StartTime; + + /* Time at which the child will timeout. Negative for no timeout. */ + kwsysProcessTime TimeoutTime; + + /* Flag for whether the process was killed. */ + int Killed; + + /* Flag for whether the timeout expired. */ + int TimeoutExpired; + + /* Flag for whether the process has terminated. */ + int Terminated; + + /* The number of pipes still open during execution and while waiting + for pipes to close after process termination. */ + int PipesLeft; + + /* Buffer for error messages. */ + char ErrorMessage[KWSYSPE_PIPE_BUFFER_SIZE + 1]; + + /* process results. */ + kwsysProcessResults* ProcessResults; + + /* Windows process information data. */ + PROCESS_INFORMATION* ProcessInformation; + + /* Data and process termination events for which to wait. */ + PHANDLE ProcessEvents; + int ProcessEventsLength; + + /* Real working directory of our own process. */ + DWORD RealWorkingDirectoryLength; + wchar_t* RealWorkingDirectory; + + /* Own handles for the child's ends of the pipes in the parent process. + Used temporarily during process creation. */ + HANDLE PipeChildStd[3]; +}; + +kwsysProcess* kwsysProcess_New(void) +{ + int i; + + /* Process control structure. */ + kwsysProcess* cp; + + /* Windows version number data. */ + OSVERSIONINFO osv; + + /* Initialize list of processes before we get any farther. It's especially + important that the console Ctrl handler be added BEFORE starting the + first process. This prevents the risk of an orphaned process being + started by the main thread while the default Ctrl handler is in + progress. */ + if (!kwsysProcessesInitialize()) { + return 0; + } + + /* Allocate a process control structure. */ + cp = (kwsysProcess*)malloc(sizeof(kwsysProcess)); + if (!cp) { + /* Could not allocate memory for the control structure. */ + return 0; + } + ZeroMemory(cp, sizeof(*cp)); + + /* Share stdin with the parent process by default. */ + cp->PipeSharedSTDIN = 1; + + /* Set initial status. */ + cp->State = kwsysProcess_State_Starting; + + /* Choose a method of running the child based on version of + windows. */ + ZeroMemory(&osv, sizeof(osv)); + osv.dwOSVersionInfoSize = sizeof(osv); +#ifdef KWSYS_WINDOWS_DEPRECATED_GetVersionEx +# pragma warning(push) +# ifdef __INTEL_COMPILER +# pragma warning(disable : 1478) +# elif defined __clang__ +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wdeprecated-declarations" +# else +# pragma warning(disable : 4996) +# endif +#endif + GetVersionEx(&osv); +#ifdef KWSYS_WINDOWS_DEPRECATED_GetVersionEx +# ifdef __clang__ +# pragma clang diagnostic pop +# else +# pragma warning(pop) +# endif +#endif + if (osv.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS) { + /* Win9x no longer supported. */ + kwsysProcess_Delete(cp); + return 0; + } + + /* Initially no thread owns the mutex. Initialize semaphore to 1. */ + if (!(cp->SharedIndexMutex = CreateSemaphore(0, 1, 1, 0))) { + kwsysProcess_Delete(cp); + return 0; + } + + /* Initially no data are available. Initialize semaphore to 0. */ + if (!(cp->Full = CreateSemaphore(0, 0, 1, 0))) { + kwsysProcess_Delete(cp); + return 0; + } + + /* Create the thread to read each pipe. */ + for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { + DWORD dummy = 0; + + /* Assign the thread its index. */ + cp->Pipe[i].Index = i; + + /* Give the thread a pointer back to the kwsysProcess instance. */ + cp->Pipe[i].Process = cp; + + /* No process is yet running. Initialize semaphore to 0. */ + if (!(cp->Pipe[i].Reader.Ready = CreateSemaphore(0, 0, 1, 0))) { + kwsysProcess_Delete(cp); + return 0; + } + + /* The pipe is not yet reset. Initialize semaphore to 0. */ + if (!(cp->Pipe[i].Reader.Reset = CreateSemaphore(0, 0, 1, 0))) { + kwsysProcess_Delete(cp); + return 0; + } + + /* The thread's buffer is initially empty. Initialize semaphore to 1. */ + if (!(cp->Pipe[i].Reader.Go = CreateSemaphore(0, 1, 1, 0))) { + kwsysProcess_Delete(cp); + return 0; + } + + /* Create the reading thread. It will block immediately. The + thread will not make deeply nested calls, so we need only a + small stack. */ + if (!(cp->Pipe[i].Reader.Thread = CreateThread( + 0, 1024, kwsysProcessPipeThreadRead, &cp->Pipe[i], 0, &dummy))) { + kwsysProcess_Delete(cp); + return 0; + } + + /* No process is yet running. Initialize semaphore to 0. */ + if (!(cp->Pipe[i].Waker.Ready = CreateSemaphore(0, 0, 1, 0))) { + kwsysProcess_Delete(cp); + return 0; + } + + /* The pipe is not yet reset. Initialize semaphore to 0. */ + if (!(cp->Pipe[i].Waker.Reset = CreateSemaphore(0, 0, 1, 0))) { + kwsysProcess_Delete(cp); + return 0; + } + + /* The waker should not wake immediately. Initialize semaphore to 0. */ + if (!(cp->Pipe[i].Waker.Go = CreateSemaphore(0, 0, 1, 0))) { + kwsysProcess_Delete(cp); + return 0; + } + + /* Create the waking thread. It will block immediately. The + thread will not make deeply nested calls, so we need only a + small stack. */ + if (!(cp->Pipe[i].Waker.Thread = CreateThread( + 0, 1024, kwsysProcessPipeThreadWake, &cp->Pipe[i], 0, &dummy))) { + kwsysProcess_Delete(cp); + return 0; + } + } + for (i = 0; i < 3; ++i) { + cp->PipeChildStd[i] = INVALID_HANDLE_VALUE; + } + + return cp; +} + +void kwsysProcess_Delete(kwsysProcess* cp) +{ + int i; + + /* Make sure we have an instance. */ + if (!cp) { + return; + } + + /* If the process is executing, wait for it to finish. */ + if (cp->State == kwsysProcess_State_Executing) { + if (cp->Detached) { + kwsysProcess_Disown(cp); + } else { + kwsysProcess_WaitForExit(cp, 0); + } + } + + /* We are deleting the kwsysProcess instance. */ + cp->Deleting = 1; + + /* Terminate each of the threads. */ + for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { + /* Terminate this reading thread. */ + if (cp->Pipe[i].Reader.Thread) { + /* Signal the thread we are ready for it. It will terminate + immediately since Deleting is set. */ + ReleaseSemaphore(cp->Pipe[i].Reader.Ready, 1, 0); + + /* Wait for the thread to exit. */ + WaitForSingleObject(cp->Pipe[i].Reader.Thread, INFINITE); + + /* Close the handle to the thread. */ + kwsysProcessCleanupHandle(&cp->Pipe[i].Reader.Thread); + } + + /* Terminate this waking thread. */ + if (cp->Pipe[i].Waker.Thread) { + /* Signal the thread we are ready for it. It will terminate + immediately since Deleting is set. */ + ReleaseSemaphore(cp->Pipe[i].Waker.Ready, 1, 0); + + /* Wait for the thread to exit. */ + WaitForSingleObject(cp->Pipe[i].Waker.Thread, INFINITE); + + /* Close the handle to the thread. */ + kwsysProcessCleanupHandle(&cp->Pipe[i].Waker.Thread); + } + + /* Cleanup the pipe's semaphores. */ + kwsysProcessCleanupHandle(&cp->Pipe[i].Reader.Ready); + kwsysProcessCleanupHandle(&cp->Pipe[i].Reader.Go); + kwsysProcessCleanupHandle(&cp->Pipe[i].Reader.Reset); + kwsysProcessCleanupHandle(&cp->Pipe[i].Waker.Ready); + kwsysProcessCleanupHandle(&cp->Pipe[i].Waker.Go); + kwsysProcessCleanupHandle(&cp->Pipe[i].Waker.Reset); + } + + /* Close the shared semaphores. */ + kwsysProcessCleanupHandle(&cp->SharedIndexMutex); + kwsysProcessCleanupHandle(&cp->Full); + + /* Free memory. */ + kwsysProcess_SetCommand(cp, 0); + kwsysProcess_SetWorkingDirectory(cp, 0); + kwsysProcess_SetPipeFile(cp, kwsysProcess_Pipe_STDIN, 0); + kwsysProcess_SetPipeFile(cp, kwsysProcess_Pipe_STDOUT, 0); + kwsysProcess_SetPipeFile(cp, kwsysProcess_Pipe_STDERR, 0); + free(cp->CommandExitCodes); + free(cp->ProcessResults); + free(cp); +} + +int kwsysProcess_SetCommand(kwsysProcess* cp, char const* const* command) +{ + int i; + if (!cp) { + return 0; + } + for (i = 0; i < cp->NumberOfCommands; ++i) { + free(cp->Commands[i]); + } + cp->NumberOfCommands = 0; + if (cp->Commands) { + free(cp->Commands); + cp->Commands = 0; + } + if (command) { + return kwsysProcess_AddCommand(cp, command); + } + return 1; +} + +int kwsysProcess_AddCommand(kwsysProcess* cp, char const* const* command) +{ + int newNumberOfCommands; + wchar_t** newCommands; + + /* Make sure we have a command to add. */ + if (!cp || !command || !*command) { + return 0; + } + + /* Allocate a new array for command pointers. */ + newNumberOfCommands = cp->NumberOfCommands + 1; + if (!(newCommands = + (wchar_t**)malloc(sizeof(wchar_t*) * newNumberOfCommands))) { + /* Out of memory. */ + return 0; + } + + /* Copy any existing commands into the new array. */ + { + int i; + for (i = 0; i < cp->NumberOfCommands; ++i) { + newCommands[i] = cp->Commands[i]; + } + } + + if (cp->Verbatim) { + /* Copy the verbatim command line into the buffer. */ + newCommands[cp->NumberOfCommands] = kwsysEncoding_DupToWide(*command); + } else { + /* Encode the arguments so CommandLineToArgvW can decode + them from the command line string in the child. */ + char buffer[32768]; /* CreateProcess max command-line length. */ + char* end = buffer + sizeof(buffer); + char* out = buffer; + char const* const* a; + for (a = command; *a; ++a) { + int quote = !**a; /* Quote the empty string. */ + int slashes = 0; + char const* c; + if (a != command && out != end) { + *out++ = ' '; + } + for (c = *a; !quote && *c; ++c) { + quote = (*c == ' ' || *c == '\t'); + } + if (quote && out != end) { + *out++ = '"'; + } + for (c = *a; *c; ++c) { + if (*c == '\\') { + ++slashes; + } else { + if (*c == '"') { + // Add n+1 backslashes to total 2n+1 before internal '"'. + while (slashes-- >= 0 && out != end) { + *out++ = '\\'; + } + } + slashes = 0; + } + if (out != end) { + *out++ = *c; + } + } + if (quote) { + // Add n backslashes to total 2n before ending '"'. + while (slashes-- > 0 && out != end) { + *out++ = '\\'; + } + if (out != end) { + *out++ = '"'; + } + } + } + if (out != end) { + *out = '\0'; + newCommands[cp->NumberOfCommands] = kwsysEncoding_DupToWide(buffer); + } else { + newCommands[cp->NumberOfCommands] = 0; + } + } + if (!newCommands[cp->NumberOfCommands]) { + /* Out of memory or command line too long. */ + free(newCommands); + return 0; + } + + /* Save the new array of commands. */ + free(cp->Commands); + cp->Commands = newCommands; + cp->NumberOfCommands = newNumberOfCommands; + return 1; +} + +void kwsysProcess_SetTimeout(kwsysProcess* cp, double timeout) +{ + if (!cp) { + return; + } + cp->Timeout = timeout; + if (cp->Timeout < 0) { + cp->Timeout = 0; + } + // Force recomputation of TimeoutTime. + cp->TimeoutTime.QuadPart = -1; +} + +int kwsysProcess_SetWorkingDirectory(kwsysProcess* cp, const char* dir) +{ + if (!cp) { + return 0; + } + if (cp->WorkingDirectory) { + free(cp->WorkingDirectory); + cp->WorkingDirectory = 0; + } + if (dir && dir[0]) { + wchar_t* wdir = kwsysEncoding_DupToWide(dir); + /* We must convert the working directory to a full path. */ + DWORD length = GetFullPathNameW(wdir, 0, 0, 0); + if (length > 0) { + wchar_t* work_dir = malloc(length * sizeof(wchar_t)); + if (!work_dir) { + free(wdir); + return 0; + } + if (!GetFullPathNameW(wdir, length, work_dir, 0)) { + free(work_dir); + free(wdir); + return 0; + } + cp->WorkingDirectory = work_dir; + } + free(wdir); + } + return 1; +} + +int kwsysProcess_SetPipeFile(kwsysProcess* cp, int pipe, const char* file) +{ + char** pfile; + if (!cp) { + return 0; + } + switch (pipe) { + case kwsysProcess_Pipe_STDIN: + pfile = &cp->PipeFileSTDIN; + break; + case kwsysProcess_Pipe_STDOUT: + pfile = &cp->PipeFileSTDOUT; + break; + case kwsysProcess_Pipe_STDERR: + pfile = &cp->PipeFileSTDERR; + break; + default: + return 0; + } + if (*pfile) { + free(*pfile); + *pfile = 0; + } + if (file) { + *pfile = strdup(file); + if (!*pfile) { + return 0; + } + } + + /* If we are redirecting the pipe, do not share it or use a native + pipe. */ + if (*pfile) { + kwsysProcess_SetPipeNative(cp, pipe, 0); + kwsysProcess_SetPipeShared(cp, pipe, 0); + } + + return 1; +} + +void kwsysProcess_SetPipeShared(kwsysProcess* cp, int pipe, int shared) +{ + if (!cp) { + return; + } + + switch (pipe) { + case kwsysProcess_Pipe_STDIN: + cp->PipeSharedSTDIN = shared ? 1 : 0; + break; + case kwsysProcess_Pipe_STDOUT: + cp->PipeSharedSTDOUT = shared ? 1 : 0; + break; + case kwsysProcess_Pipe_STDERR: + cp->PipeSharedSTDERR = shared ? 1 : 0; + break; + default: + return; + } + + /* If we are sharing the pipe, do not redirect it to a file or use a + native pipe. */ + if (shared) { + kwsysProcess_SetPipeFile(cp, pipe, 0); + kwsysProcess_SetPipeNative(cp, pipe, 0); + } +} + +void kwsysProcess_SetPipeNative(kwsysProcess* cp, int pipe, HANDLE p[2]) +{ + HANDLE* pPipeNative = 0; + + if (!cp) { + return; + } + + switch (pipe) { + case kwsysProcess_Pipe_STDIN: + pPipeNative = cp->PipeNativeSTDIN; + break; + case kwsysProcess_Pipe_STDOUT: + pPipeNative = cp->PipeNativeSTDOUT; + break; + case kwsysProcess_Pipe_STDERR: + pPipeNative = cp->PipeNativeSTDERR; + break; + default: + return; + } + + /* Copy the native pipe handles provided. */ + if (p) { + pPipeNative[0] = p[0]; + pPipeNative[1] = p[1]; + } else { + pPipeNative[0] = 0; + pPipeNative[1] = 0; + } + + /* If we are using a native pipe, do not share it or redirect it to + a file. */ + if (p) { + kwsysProcess_SetPipeFile(cp, pipe, 0); + kwsysProcess_SetPipeShared(cp, pipe, 0); + } +} + +int kwsysProcess_GetOption(kwsysProcess* cp, int optionId) +{ + if (!cp) { + return 0; + } + + switch (optionId) { + case kwsysProcess_Option_Detach: + return cp->OptionDetach; + case kwsysProcess_Option_HideWindow: + return cp->HideWindow; + case kwsysProcess_Option_MergeOutput: + return cp->MergeOutput; + case kwsysProcess_Option_Verbatim: + return cp->Verbatim; + case kwsysProcess_Option_CreateProcessGroup: + return cp->CreateProcessGroup; + default: + return 0; + } +} + +void kwsysProcess_SetOption(kwsysProcess* cp, int optionId, int value) +{ + if (!cp) { + return; + } + + switch (optionId) { + case kwsysProcess_Option_Detach: + cp->OptionDetach = value; + break; + case kwsysProcess_Option_HideWindow: + cp->HideWindow = value; + break; + case kwsysProcess_Option_MergeOutput: + cp->MergeOutput = value; + break; + case kwsysProcess_Option_Verbatim: + cp->Verbatim = value; + break; + case kwsysProcess_Option_CreateProcessGroup: + cp->CreateProcessGroup = value; + break; + default: + break; + } +} + +int kwsysProcess_GetState(kwsysProcess* cp) +{ + return cp ? cp->State : kwsysProcess_State_Error; +} + +int kwsysProcess_GetExitException(kwsysProcess* cp) +{ + return (cp && cp->ProcessResults && (cp->NumberOfCommands > 0)) + ? cp->ProcessResults[cp->NumberOfCommands - 1].ExitException + : kwsysProcess_Exception_Other; +} + +int kwsysProcess_GetExitValue(kwsysProcess* cp) +{ + return (cp && cp->ProcessResults && (cp->NumberOfCommands > 0)) + ? cp->ProcessResults[cp->NumberOfCommands - 1].ExitValue + : -1; +} + +int kwsysProcess_GetExitCode(kwsysProcess* cp) +{ + return (cp && cp->ProcessResults && (cp->NumberOfCommands > 0)) + ? cp->ProcessResults[cp->NumberOfCommands - 1].ExitCode + : 0; +} + +const char* kwsysProcess_GetErrorString(kwsysProcess* cp) +{ + if (!cp) { + return "Process management structure could not be allocated"; + } else if (cp->State == kwsysProcess_State_Error) { + return cp->ErrorMessage; + } + return "Success"; +} + +const char* kwsysProcess_GetExceptionString(kwsysProcess* cp) +{ + if (!(cp && cp->ProcessResults && (cp->NumberOfCommands > 0))) { + return "GetExceptionString called with NULL process management structure"; + } else if (cp->State == kwsysProcess_State_Exception) { + return cp->ProcessResults[cp->NumberOfCommands - 1].ExitExceptionString; + } + return "No exception"; +} + +/* the index should be in array bound. */ +#define KWSYSPE_IDX_CHK(RET) \ + if (!cp || idx >= cp->NumberOfCommands || idx < 0) { \ + KWSYSPE_DEBUG((stderr, "array index out of bound\n")); \ + return RET; \ + } + +int kwsysProcess_GetStateByIndex(kwsysProcess* cp, int idx) +{ + KWSYSPE_IDX_CHK(kwsysProcess_State_Error) + return cp->ProcessResults[idx].State; +} + +int kwsysProcess_GetExitExceptionByIndex(kwsysProcess* cp, int idx) +{ + KWSYSPE_IDX_CHK(kwsysProcess_Exception_Other) + return cp->ProcessResults[idx].ExitException; +} + +int kwsysProcess_GetExitValueByIndex(kwsysProcess* cp, int idx) +{ + KWSYSPE_IDX_CHK(-1) + return cp->ProcessResults[idx].ExitValue; +} + +int kwsysProcess_GetExitCodeByIndex(kwsysProcess* cp, int idx) +{ + KWSYSPE_IDX_CHK(-1) + return cp->CommandExitCodes[idx]; +} + +const char* kwsysProcess_GetExceptionStringByIndex(kwsysProcess* cp, int idx) +{ + KWSYSPE_IDX_CHK("GetExceptionString called with NULL process management " + "structure or index out of bound") + if (cp->ProcessResults[idx].State == kwsysProcess_StateByIndex_Exception) { + return cp->ProcessResults[idx].ExitExceptionString; + } + return "No exception"; +} + +#undef KWSYSPE_IDX_CHK + +void kwsysProcess_Execute(kwsysProcess* cp) +{ + int i; + + /* Do not execute a second time. */ + if (!cp || cp->State == kwsysProcess_State_Executing) { + return; + } + + /* Make sure we have something to run. */ + if (cp->NumberOfCommands < 1) { + strcpy(cp->ErrorMessage, "No command"); + cp->State = kwsysProcess_State_Error; + return; + } + + /* Initialize the control structure for a new process. */ + if (!kwsysProcessInitialize(cp)) { + strcpy(cp->ErrorMessage, "Out of memory"); + cp->State = kwsysProcess_State_Error; + return; + } + + /* Save the real working directory of this process and change to + the working directory for the child processes. This is needed + to make pipe file paths evaluate correctly. */ + if (cp->WorkingDirectory) { + if (!GetCurrentDirectoryW(cp->RealWorkingDirectoryLength, + cp->RealWorkingDirectory)) { + kwsysProcessCleanup(cp, GetLastError()); + return; + } + SetCurrentDirectoryW(cp->WorkingDirectory); + } + + /* Setup the stdin pipe for the first process. */ + if (cp->PipeFileSTDIN) { + /* Create a handle to read a file for stdin. */ + wchar_t* wstdin = kwsysEncoding_DupToWide(cp->PipeFileSTDIN); + DWORD error; + cp->PipeChildStd[0] = + CreateFileW(wstdin, GENERIC_READ, FILE_SHARE_READ | FILE_SHARE_WRITE, 0, + OPEN_EXISTING, 0, 0); + error = GetLastError(); /* Check now in case free changes this. */ + free(wstdin); + if (cp->PipeChildStd[0] == INVALID_HANDLE_VALUE) { + kwsysProcessCleanup(cp, error); + return; + } + } else if (cp->PipeSharedSTDIN) { + /* Share this process's stdin with the child. */ + kwsysProcessSetupSharedPipe(STD_INPUT_HANDLE, &cp->PipeChildStd[0]); + } else if (cp->PipeNativeSTDIN[0]) { + /* Use the provided native pipe. */ + kwsysProcessSetupPipeNative(cp->PipeNativeSTDIN[0], &cp->PipeChildStd[0]); + } else { + /* Explicitly give the child no stdin. */ + cp->PipeChildStd[0] = INVALID_HANDLE_VALUE; + } + + /* Create the output pipe for the last process. + We always create this so the pipe thread can run even if we + do not end up giving the write end to the child below. */ + if (!CreatePipe(&cp->Pipe[KWSYSPE_PIPE_STDOUT].Read, + &cp->Pipe[KWSYSPE_PIPE_STDOUT].Write, 0, 0)) { + kwsysProcessCleanup(cp, GetLastError()); + return; + } + + if (cp->PipeFileSTDOUT) { + /* Use a file for stdout. */ + DWORD error = kwsysProcessSetupOutputPipeFile(&cp->PipeChildStd[1], + cp->PipeFileSTDOUT); + if (error) { + kwsysProcessCleanup(cp, error); + return; + } + } else if (cp->PipeSharedSTDOUT) { + /* Use the parent stdout. */ + kwsysProcessSetupSharedPipe(STD_OUTPUT_HANDLE, &cp->PipeChildStd[1]); + } else if (cp->PipeNativeSTDOUT[1]) { + /* Use the given handle for stdout. */ + kwsysProcessSetupPipeNative(cp->PipeNativeSTDOUT[1], &cp->PipeChildStd[1]); + } else { + /* Use our pipe for stdout. Duplicate the handle since our waker + thread will use the original. Do not make it inherited yet. */ + if (!DuplicateHandle(GetCurrentProcess(), + cp->Pipe[KWSYSPE_PIPE_STDOUT].Write, + GetCurrentProcess(), &cp->PipeChildStd[1], 0, FALSE, + DUPLICATE_SAME_ACCESS)) { + kwsysProcessCleanup(cp, GetLastError()); + return; + } + } + + /* Create stderr pipe to be shared by all processes in the pipeline. + We always create this so the pipe thread can run even if we do not + end up giving the write end to the child below. */ + if (!CreatePipe(&cp->Pipe[KWSYSPE_PIPE_STDERR].Read, + &cp->Pipe[KWSYSPE_PIPE_STDERR].Write, 0, 0)) { + kwsysProcessCleanup(cp, GetLastError()); + return; + } + + if (cp->PipeFileSTDERR) { + /* Use a file for stderr. */ + DWORD error = kwsysProcessSetupOutputPipeFile(&cp->PipeChildStd[2], + cp->PipeFileSTDERR); + if (error) { + kwsysProcessCleanup(cp, error); + return; + } + } else if (cp->PipeSharedSTDERR) { + /* Use the parent stderr. */ + kwsysProcessSetupSharedPipe(STD_ERROR_HANDLE, &cp->PipeChildStd[2]); + } else if (cp->PipeNativeSTDERR[1]) { + /* Use the given handle for stderr. */ + kwsysProcessSetupPipeNative(cp->PipeNativeSTDERR[1], &cp->PipeChildStd[2]); + } else { + /* Use our pipe for stderr. Duplicate the handle since our waker + thread will use the original. Do not make it inherited yet. */ + if (!DuplicateHandle(GetCurrentProcess(), + cp->Pipe[KWSYSPE_PIPE_STDERR].Write, + GetCurrentProcess(), &cp->PipeChildStd[2], 0, FALSE, + DUPLICATE_SAME_ACCESS)) { + kwsysProcessCleanup(cp, GetLastError()); + return; + } + } + + /* Create the pipeline of processes. */ + { + /* Child startup control data. */ + kwsysProcessCreateInformation si; + HANDLE nextStdInput = cp->PipeChildStd[0]; + + /* Initialize startup info data. */ + ZeroMemory(&si, sizeof(si)); + si.StartupInfo.cb = sizeof(si.StartupInfo); + + /* Decide whether a child window should be shown. */ + si.StartupInfo.dwFlags |= STARTF_USESHOWWINDOW; + si.StartupInfo.wShowWindow = + (unsigned short)(cp->HideWindow ? SW_HIDE : SW_SHOWDEFAULT); + + /* Connect the child's output pipes to the threads. */ + si.StartupInfo.dwFlags |= STARTF_USESTDHANDLES; + + for (i = 0; i < cp->NumberOfCommands; ++i) { + /* Setup the process's pipes. */ + si.hStdInput = nextStdInput; + if (i == cp->NumberOfCommands - 1) { + /* The last child gets the overall stdout. */ + nextStdInput = INVALID_HANDLE_VALUE; + si.hStdOutput = cp->PipeChildStd[1]; + } else { + /* Create a pipe to sit between the children. */ + HANDLE p[2] = { INVALID_HANDLE_VALUE, INVALID_HANDLE_VALUE }; + if (!CreatePipe(&p[0], &p[1], 0, 0)) { + DWORD error = GetLastError(); + if (nextStdInput != cp->PipeChildStd[0]) { + kwsysProcessCleanupHandle(&nextStdInput); + } + kwsysProcessCleanup(cp, error); + return; + } + nextStdInput = p[0]; + si.hStdOutput = p[1]; + } + si.hStdError = + cp->MergeOutput ? cp->PipeChildStd[1] : cp->PipeChildStd[2]; + + { + DWORD error = kwsysProcessCreate(cp, i, &si); + + /* Close our copies of pipes used between children. */ + if (si.hStdInput != cp->PipeChildStd[0]) { + kwsysProcessCleanupHandle(&si.hStdInput); + } + if (si.hStdOutput != cp->PipeChildStd[1]) { + kwsysProcessCleanupHandle(&si.hStdOutput); + } + if (si.hStdError != cp->PipeChildStd[2] && !cp->MergeOutput) { + kwsysProcessCleanupHandle(&si.hStdError); + } + if (!error) { + cp->ProcessEvents[i + 1] = cp->ProcessInformation[i].hProcess; + } else { + if (nextStdInput != cp->PipeChildStd[0]) { + kwsysProcessCleanupHandle(&nextStdInput); + } + kwsysProcessCleanup(cp, error); + return; + } + } + } + } + + /* The parent process does not need the child's pipe ends. */ + for (i = 0; i < 3; ++i) { + kwsysProcessCleanupHandle(&cp->PipeChildStd[i]); + } + + /* Restore the working directory. */ + if (cp->RealWorkingDirectory) { + SetCurrentDirectoryW(cp->RealWorkingDirectory); + free(cp->RealWorkingDirectory); + cp->RealWorkingDirectory = 0; + } + + /* The timeout period starts now. */ + cp->StartTime = kwsysProcessTimeGetCurrent(); + cp->TimeoutTime = kwsysProcessTimeFromDouble(-1); + + /* All processes in the pipeline have been started in suspended + mode. Resume them all now. */ + for (i = 0; i < cp->NumberOfCommands; ++i) { + ResumeThread(cp->ProcessInformation[i].hThread); + } + + /* ---- It is no longer safe to call kwsysProcessCleanup. ----- */ + /* Tell the pipe threads that a process has started. */ + for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { + ReleaseSemaphore(cp->Pipe[i].Reader.Ready, 1, 0); + ReleaseSemaphore(cp->Pipe[i].Waker.Ready, 1, 0); + } + + /* We don't care about the children's main threads. */ + for (i = 0; i < cp->NumberOfCommands; ++i) { + kwsysProcessCleanupHandle(&cp->ProcessInformation[i].hThread); + } + + /* No pipe has reported data. */ + cp->CurrentIndex = KWSYSPE_PIPE_COUNT; + cp->PipesLeft = KWSYSPE_PIPE_COUNT; + + /* The process has now started. */ + cp->State = kwsysProcess_State_Executing; + cp->Detached = cp->OptionDetach; +} + +void kwsysProcess_Disown(kwsysProcess* cp) +{ + int i; + + /* Make sure we are executing a detached process. */ + if (!cp || !cp->Detached || cp->State != kwsysProcess_State_Executing || + cp->TimeoutExpired || cp->Killed || cp->Terminated) { + return; + } + + /* Disable the reading threads. */ + kwsysProcessDisablePipeThreads(cp); + + /* Wait for all pipe threads to reset. */ + for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { + WaitForSingleObject(cp->Pipe[i].Reader.Reset, INFINITE); + WaitForSingleObject(cp->Pipe[i].Waker.Reset, INFINITE); + } + + /* We will not wait for exit, so cleanup now. */ + kwsysProcessCleanup(cp, 0); + + /* The process has been disowned. */ + cp->State = kwsysProcess_State_Disowned; +} + +int kwsysProcess_WaitForData(kwsysProcess* cp, char** data, int* length, + double* userTimeout) +{ + kwsysProcessTime userStartTime; + kwsysProcessTime timeoutLength; + kwsysProcessTime timeoutTime; + DWORD timeout; + int user; + int done = 0; + int expired = 0; + int pipeId = kwsysProcess_Pipe_None; + DWORD w; + + /* Make sure we are executing a process. */ + if (!cp || cp->State != kwsysProcess_State_Executing || cp->Killed || + cp->TimeoutExpired) { + return kwsysProcess_Pipe_None; + } + + /* Record the time at which user timeout period starts. */ + userStartTime = kwsysProcessTimeGetCurrent(); + + /* Calculate the time at which a timeout will expire, and whether it + is the user or process timeout. */ + user = kwsysProcessGetTimeoutTime(cp, userTimeout, &timeoutTime); + + /* Loop until we have a reason to return. */ + while (!done && cp->PipesLeft > 0) { + /* If we previously got data from a thread, let it know we are + done with the data. */ + if (cp->CurrentIndex < KWSYSPE_PIPE_COUNT) { + KWSYSPE_DEBUG((stderr, "releasing reader %d\n", cp->CurrentIndex)); + ReleaseSemaphore(cp->Pipe[cp->CurrentIndex].Reader.Go, 1, 0); + cp->CurrentIndex = KWSYSPE_PIPE_COUNT; + } + + /* Setup a timeout if required. */ + if (kwsysProcessGetTimeoutLeft(&timeoutTime, user ? userTimeout : 0, + &timeoutLength)) { + /* Timeout has already expired. */ + expired = 1; + break; + } + if (timeoutTime.QuadPart < 0) { + timeout = INFINITE; + } else { + timeout = kwsysProcessTimeToDWORD(timeoutLength); + } + + /* Wait for a pipe's thread to signal or a process to terminate. */ + w = WaitForMultipleObjects(cp->ProcessEventsLength, cp->ProcessEvents, 0, + timeout); + if (w == WAIT_TIMEOUT) { + /* Timeout has expired. */ + expired = 1; + done = 1; + } else if (w == WAIT_OBJECT_0) { + /* Save the index of the reporting thread and release the mutex. + The thread will block until we signal its Empty mutex. */ + cp->CurrentIndex = cp->SharedIndex; + ReleaseSemaphore(cp->SharedIndexMutex, 1, 0); + + /* Data are available or a pipe closed. */ + if (cp->Pipe[cp->CurrentIndex].Closed) { + /* The pipe closed at the write end. Close the read end and + inform the wakeup thread it is done with this process. */ + kwsysProcessCleanupHandle(&cp->Pipe[cp->CurrentIndex].Read); + ReleaseSemaphore(cp->Pipe[cp->CurrentIndex].Waker.Go, 1, 0); + KWSYSPE_DEBUG((stderr, "wakeup %d\n", cp->CurrentIndex)); + --cp->PipesLeft; + } else if (data && length) { + /* Report this data. */ + *data = cp->Pipe[cp->CurrentIndex].DataBuffer; + *length = cp->Pipe[cp->CurrentIndex].DataLength; + switch (cp->CurrentIndex) { + case KWSYSPE_PIPE_STDOUT: + pipeId = kwsysProcess_Pipe_STDOUT; + break; + case KWSYSPE_PIPE_STDERR: + pipeId = kwsysProcess_Pipe_STDERR; + break; + } + done = 1; + } + } else { + /* A process has terminated. */ + kwsysProcessDestroy(cp, w - WAIT_OBJECT_0); + } + } + + /* Update the user timeout. */ + if (userTimeout) { + kwsysProcessTime userEndTime = kwsysProcessTimeGetCurrent(); + kwsysProcessTime difference = + kwsysProcessTimeSubtract(userEndTime, userStartTime); + double d = kwsysProcessTimeToDouble(difference); + *userTimeout -= d; + if (*userTimeout < 0) { + *userTimeout = 0; + } + } + + /* Check what happened. */ + if (pipeId) { + /* Data are ready on a pipe. */ + return pipeId; + } else if (expired) { + /* A timeout has expired. */ + if (user) { + /* The user timeout has expired. It has no time left. */ + return kwsysProcess_Pipe_Timeout; + } else { + /* The process timeout has expired. Kill the child now. */ + KWSYSPE_DEBUG((stderr, "killing child because timeout expired\n")); + kwsysProcess_Kill(cp); + cp->TimeoutExpired = 1; + cp->Killed = 0; + return kwsysProcess_Pipe_None; + } + } else { + /* The children have terminated and no more data are available. */ + return kwsysProcess_Pipe_None; + } +} + +int kwsysProcess_WaitForExit(kwsysProcess* cp, double* userTimeout) +{ + int i; + int pipe; + + /* Make sure we are executing a process. */ + if (!cp || cp->State != kwsysProcess_State_Executing) { + return 1; + } + + /* Wait for the process to terminate. Ignore all data. */ + while ((pipe = kwsysProcess_WaitForData(cp, 0, 0, userTimeout)) > 0) { + if (pipe == kwsysProcess_Pipe_Timeout) { + /* The user timeout has expired. */ + return 0; + } + } + + KWSYSPE_DEBUG((stderr, "no more data\n")); + + /* When the last pipe closes in WaitForData, the loop terminates + without releasing the pipe's thread. Release it now. */ + if (cp->CurrentIndex < KWSYSPE_PIPE_COUNT) { + KWSYSPE_DEBUG((stderr, "releasing reader %d\n", cp->CurrentIndex)); + ReleaseSemaphore(cp->Pipe[cp->CurrentIndex].Reader.Go, 1, 0); + cp->CurrentIndex = KWSYSPE_PIPE_COUNT; + } + + /* Wait for all pipe threads to reset. */ + for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { + KWSYSPE_DEBUG((stderr, "waiting reader reset %d\n", i)); + WaitForSingleObject(cp->Pipe[i].Reader.Reset, INFINITE); + KWSYSPE_DEBUG((stderr, "waiting waker reset %d\n", i)); + WaitForSingleObject(cp->Pipe[i].Waker.Reset, INFINITE); + } + + /* ---- It is now safe again to call kwsysProcessCleanup. ----- */ + /* Close all the pipes. */ + kwsysProcessCleanup(cp, 0); + + /* Determine the outcome. */ + if (cp->Killed) { + /* We killed the child. */ + cp->State = kwsysProcess_State_Killed; + } else if (cp->TimeoutExpired) { + /* The timeout expired. */ + cp->State = kwsysProcess_State_Expired; + } else { + /* The children exited. Report the outcome of the child processes. */ + for (i = 0; i < cp->NumberOfCommands; ++i) { + cp->ProcessResults[i].ExitCode = cp->CommandExitCodes[i]; + if ((cp->ProcessResults[i].ExitCode & 0xF0000000) == 0xC0000000) { + /* Child terminated due to exceptional behavior. */ + cp->ProcessResults[i].State = kwsysProcess_StateByIndex_Exception; + cp->ProcessResults[i].ExitValue = 1; + kwsysProcessSetExitExceptionByIndex(cp, cp->ProcessResults[i].ExitCode, + i); + } else { + /* Child exited without exception. */ + cp->ProcessResults[i].State = kwsysProcess_StateByIndex_Exited; + cp->ProcessResults[i].ExitException = kwsysProcess_Exception_None; + cp->ProcessResults[i].ExitValue = cp->ProcessResults[i].ExitCode; + } + } + /* support legacy state status value */ + cp->State = cp->ProcessResults[cp->NumberOfCommands - 1].State; + } + + return 1; +} + +void kwsysProcess_Interrupt(kwsysProcess* cp) +{ + int i; + /* Make sure we are executing a process. */ + if (!cp || cp->State != kwsysProcess_State_Executing || cp->TimeoutExpired || + cp->Killed) { + KWSYSPE_DEBUG((stderr, "interrupt: child not executing\n")); + return; + } + + /* Skip actually interrupting the child if it has already terminated. */ + if (cp->Terminated) { + KWSYSPE_DEBUG((stderr, "interrupt: child already terminated\n")); + return; + } + + /* Interrupt the children. */ + if (cp->CreateProcessGroup) { + if (cp->ProcessInformation) { + for (i = 0; i < cp->NumberOfCommands; ++i) { + /* Make sure the process handle isn't closed (e.g. from disowning). */ + if (cp->ProcessInformation[i].hProcess) { + /* The user created a process group for this process. The group ID + is the process ID for the original process in the group. Note + that we have to use Ctrl+Break: Ctrl+C is not allowed for process + groups. */ + GenerateConsoleCtrlEvent(CTRL_BREAK_EVENT, + cp->ProcessInformation[i].dwProcessId); + } + } + } + } else { + /* No process group was created. Kill our own process group... */ + GenerateConsoleCtrlEvent(CTRL_BREAK_EVENT, 0); + } +} + +void kwsysProcess_Kill(kwsysProcess* cp) +{ + int i; + /* Make sure we are executing a process. */ + if (!cp || cp->State != kwsysProcess_State_Executing || cp->TimeoutExpired || + cp->Killed) { + KWSYSPE_DEBUG((stderr, "kill: child not executing\n")); + return; + } + + /* Disable the reading threads. */ + KWSYSPE_DEBUG((stderr, "kill: disabling pipe threads\n")); + kwsysProcessDisablePipeThreads(cp); + + /* Skip actually killing the child if it has already terminated. */ + if (cp->Terminated) { + KWSYSPE_DEBUG((stderr, "kill: child already terminated\n")); + return; + } + + /* Kill the children. */ + cp->Killed = 1; + for (i = 0; i < cp->NumberOfCommands; ++i) { + kwsysProcessKillTree(cp->ProcessInformation[i].dwProcessId); + /* Remove from global list of processes and close handles. */ + kwsysProcessesRemove(cp->ProcessInformation[i].hProcess); + kwsysProcessCleanupHandle(&cp->ProcessInformation[i].hThread); + kwsysProcessCleanupHandle(&cp->ProcessInformation[i].hProcess); + } + + /* We are killing the children and ignoring all data. Do not wait + for them to exit. */ +} + +void kwsysProcess_KillPID(unsigned long process_id) +{ + kwsysProcessKillTree((DWORD)process_id); +} + +/* + Function executed for each pipe's thread. Argument is a pointer to + the kwsysProcessPipeData instance for this thread. +*/ +DWORD WINAPI kwsysProcessPipeThreadRead(LPVOID ptd) +{ + kwsysProcessPipeData* td = (kwsysProcessPipeData*)ptd; + kwsysProcess* cp = td->Process; + + /* Wait for a process to be ready. */ + while ((WaitForSingleObject(td->Reader.Ready, INFINITE), !cp->Deleting)) { + /* Read output from the process for this thread's pipe. */ + kwsysProcessPipeThreadReadPipe(cp, td); + + /* Signal the main thread we have reset for a new process. */ + ReleaseSemaphore(td->Reader.Reset, 1, 0); + } + return 0; +} + +/* + Function called in each pipe's thread to handle data for one + execution of a subprocess. +*/ +void kwsysProcessPipeThreadReadPipe(kwsysProcess* cp, kwsysProcessPipeData* td) +{ + /* Wait for space in the thread's buffer. */ + while ((KWSYSPE_DEBUG((stderr, "wait for read %d\n", td->Index)), + WaitForSingleObject(td->Reader.Go, INFINITE), !td->Closed)) { + KWSYSPE_DEBUG((stderr, "reading %d\n", td->Index)); + + /* Read data from the pipe. This may block until data are available. */ + if (!ReadFile(td->Read, td->DataBuffer, KWSYSPE_PIPE_BUFFER_SIZE, + &td->DataLength, 0)) { + if (GetLastError() != ERROR_BROKEN_PIPE) { + /* UNEXPECTED failure to read the pipe. */ + } + + /* The pipe closed. There are no more data to read. */ + td->Closed = 1; + KWSYSPE_DEBUG((stderr, "read closed %d\n", td->Index)); + } + + KWSYSPE_DEBUG((stderr, "read %d\n", td->Index)); + + /* Wait for our turn to be handled by the main thread. */ + WaitForSingleObject(cp->SharedIndexMutex, INFINITE); + + KWSYSPE_DEBUG((stderr, "reporting read %d\n", td->Index)); + + /* Tell the main thread we have something to report. */ + cp->SharedIndex = td->Index; + ReleaseSemaphore(cp->Full, 1, 0); + } + + /* We were signalled to exit with our buffer empty. Reset the + mutex for a new process. */ + KWSYSPE_DEBUG((stderr, "self releasing reader %d\n", td->Index)); + ReleaseSemaphore(td->Reader.Go, 1, 0); +} + +/* + Function executed for each pipe's thread. Argument is a pointer to + the kwsysProcessPipeData instance for this thread. +*/ +DWORD WINAPI kwsysProcessPipeThreadWake(LPVOID ptd) +{ + kwsysProcessPipeData* td = (kwsysProcessPipeData*)ptd; + kwsysProcess* cp = td->Process; + + /* Wait for a process to be ready. */ + while ((WaitForSingleObject(td->Waker.Ready, INFINITE), !cp->Deleting)) { + /* Wait for a possible wakeup. */ + kwsysProcessPipeThreadWakePipe(cp, td); + + /* Signal the main thread we have reset for a new process. */ + ReleaseSemaphore(td->Waker.Reset, 1, 0); + } + return 0; +} + +/* + Function called in each pipe's thread to handle reading thread + wakeup for one execution of a subprocess. +*/ +void kwsysProcessPipeThreadWakePipe(kwsysProcess* cp, kwsysProcessPipeData* td) +{ + (void)cp; + + /* Wait for a possible wake command. */ + KWSYSPE_DEBUG((stderr, "wait for wake %d\n", td->Index)); + WaitForSingleObject(td->Waker.Go, INFINITE); + KWSYSPE_DEBUG((stderr, "waking %d\n", td->Index)); + + /* If the pipe is not closed, we need to wake up the reading thread. */ + if (!td->Closed) { + DWORD dummy; + KWSYSPE_DEBUG((stderr, "waker %d writing byte\n", td->Index)); + WriteFile(td->Write, "", 1, &dummy, 0); + KWSYSPE_DEBUG((stderr, "waker %d wrote byte\n", td->Index)); + } +} + +/* Initialize a process control structure for kwsysProcess_Execute. */ +int kwsysProcessInitialize(kwsysProcess* cp) +{ + int i; + /* Reset internal status flags. */ + cp->TimeoutExpired = 0; + cp->Terminated = 0; + cp->Killed = 0; + + free(cp->ProcessResults); + /* Allocate process result information for each process. */ + cp->ProcessResults = (kwsysProcessResults*)malloc( + sizeof(kwsysProcessResults) * (cp->NumberOfCommands)); + if (!cp->ProcessResults) { + return 0; + } + ZeroMemory(cp->ProcessResults, + sizeof(kwsysProcessResults) * cp->NumberOfCommands); + for (i = 0; i < cp->NumberOfCommands; i++) { + cp->ProcessResults[i].ExitException = kwsysProcess_Exception_None; + cp->ProcessResults[i].State = kwsysProcess_StateByIndex_Starting; + cp->ProcessResults[i].ExitCode = 1; + cp->ProcessResults[i].ExitValue = 1; + strcpy(cp->ProcessResults[i].ExitExceptionString, "No exception"); + } + + /* Allocate process information for each process. */ + free(cp->ProcessInformation); + cp->ProcessInformation = (PROCESS_INFORMATION*)malloc( + sizeof(PROCESS_INFORMATION) * cp->NumberOfCommands); + if (!cp->ProcessInformation) { + return 0; + } + ZeroMemory(cp->ProcessInformation, + sizeof(PROCESS_INFORMATION) * cp->NumberOfCommands); + free(cp->CommandExitCodes); + cp->CommandExitCodes = (DWORD*)malloc(sizeof(DWORD) * cp->NumberOfCommands); + if (!cp->CommandExitCodes) { + return 0; + } + ZeroMemory(cp->CommandExitCodes, sizeof(DWORD) * cp->NumberOfCommands); + + /* Allocate event wait array. The first event is cp->Full, the rest + are the process termination events. */ + cp->ProcessEvents = + (PHANDLE)malloc(sizeof(HANDLE) * (cp->NumberOfCommands + 1)); + if (!cp->ProcessEvents) { + return 0; + } + ZeroMemory(cp->ProcessEvents, sizeof(HANDLE) * (cp->NumberOfCommands + 1)); + cp->ProcessEvents[0] = cp->Full; + cp->ProcessEventsLength = cp->NumberOfCommands + 1; + + /* Allocate space to save the real working directory of this process. */ + if (cp->WorkingDirectory) { + cp->RealWorkingDirectoryLength = GetCurrentDirectoryW(0, 0); + if (cp->RealWorkingDirectoryLength > 0) { + cp->RealWorkingDirectory = + malloc(cp->RealWorkingDirectoryLength * sizeof(wchar_t)); + if (!cp->RealWorkingDirectory) { + return 0; + } + } + } + { + for (i = 0; i < 3; ++i) { + cp->PipeChildStd[i] = INVALID_HANDLE_VALUE; + } + } + + return 1; +} + +static DWORD kwsysProcessCreateChildHandle(PHANDLE out, HANDLE in, int isStdIn) +{ + DWORD flags; + + /* Check whether the handle is valid for this process. */ + if (in != INVALID_HANDLE_VALUE && GetHandleInformation(in, &flags)) { + /* Use the handle as-is if it is already inherited. */ + if (flags & HANDLE_FLAG_INHERIT) { + *out = in; + return ERROR_SUCCESS; + } + + /* Create an inherited copy of this handle. */ + if (DuplicateHandle(GetCurrentProcess(), in, GetCurrentProcess(), out, 0, + TRUE, DUPLICATE_SAME_ACCESS)) { + return ERROR_SUCCESS; + } else { + return GetLastError(); + } + } else { + /* The given handle is not valid for this process. Some child + processes may break if they do not have a valid standard handle, + so open NUL to give to the child. */ + SECURITY_ATTRIBUTES sa; + ZeroMemory(&sa, sizeof(sa)); + sa.nLength = (DWORD)sizeof(sa); + sa.bInheritHandle = 1; + *out = CreateFileW( + L"NUL", + (isStdIn ? GENERIC_READ : (GENERIC_WRITE | FILE_READ_ATTRIBUTES)), + FILE_SHARE_READ | FILE_SHARE_WRITE, &sa, OPEN_EXISTING, 0, 0); + return (*out != INVALID_HANDLE_VALUE) ? ERROR_SUCCESS : GetLastError(); + } +} + +DWORD kwsysProcessCreate(kwsysProcess* cp, int index, + kwsysProcessCreateInformation* si) +{ + DWORD creationFlags; + DWORD error = ERROR_SUCCESS; + + /* Check if we are currently exiting. */ + if (!kwsysTryEnterCreateProcessSection()) { + /* The Ctrl handler is currently working on exiting our process. Rather + than return an error code, which could cause incorrect conclusions to be + reached by the caller, we simply hang. (For example, a CMake try_run + configure step might cause the project to configure wrong.) */ + Sleep(INFINITE); + } + + /* Create the child in a suspended state so we can wait until all + children have been created before running any one. */ + creationFlags = CREATE_SUSPENDED; + if (cp->CreateProcessGroup) { + creationFlags |= CREATE_NEW_PROCESS_GROUP; + } + + /* Create inherited copies of the handles. */ + (error = kwsysProcessCreateChildHandle(&si->StartupInfo.hStdInput, + si->hStdInput, 1)) || + (error = kwsysProcessCreateChildHandle(&si->StartupInfo.hStdOutput, + si->hStdOutput, 0)) || + (error = kwsysProcessCreateChildHandle(&si->StartupInfo.hStdError, + si->hStdError, 0)) || + /* Create the process. */ + (!CreateProcessW(0, cp->Commands[index], 0, 0, TRUE, creationFlags, 0, 0, + &si->StartupInfo, &cp->ProcessInformation[index]) && + (error = GetLastError())); + + /* Close the inherited copies of the handles. */ + if (si->StartupInfo.hStdInput != si->hStdInput) { + kwsysProcessCleanupHandle(&si->StartupInfo.hStdInput); + } + if (si->StartupInfo.hStdOutput != si->hStdOutput) { + kwsysProcessCleanupHandle(&si->StartupInfo.hStdOutput); + } + if (si->StartupInfo.hStdError != si->hStdError) { + kwsysProcessCleanupHandle(&si->StartupInfo.hStdError); + } + + /* Add the process to the global list of processes. */ + if (!error && + !kwsysProcessesAdd(cp->ProcessInformation[index].hProcess, + cp->ProcessInformation[index].dwProcessId, + cp->CreateProcessGroup)) { + /* This failed for some reason. Kill the suspended process. */ + TerminateProcess(cp->ProcessInformation[index].hProcess, 1); + /* And clean up... */ + kwsysProcessCleanupHandle(&cp->ProcessInformation[index].hProcess); + kwsysProcessCleanupHandle(&cp->ProcessInformation[index].hThread); + strcpy(cp->ErrorMessage, "kwsysProcessesAdd function failed"); + error = ERROR_NOT_ENOUGH_MEMORY; /* Most likely reason. */ + } + + /* If the console Ctrl handler is waiting for us, this will release it... */ + kwsysLeaveCreateProcessSection(); + return error; +} + +void kwsysProcessDestroy(kwsysProcess* cp, int event) +{ + int i; + int index; + + /* Find the process index for the termination event. */ + for (index = 0; index < cp->NumberOfCommands; ++index) { + if (cp->ProcessInformation[index].hProcess == cp->ProcessEvents[event]) { + break; + } + } + + /* Check the exit code of the process. */ + GetExitCodeProcess(cp->ProcessInformation[index].hProcess, + &cp->CommandExitCodes[index]); + + /* Remove from global list of processes. */ + kwsysProcessesRemove(cp->ProcessInformation[index].hProcess); + + /* Close the process handle for the terminated process. */ + kwsysProcessCleanupHandle(&cp->ProcessInformation[index].hProcess); + + /* Remove the process from the available events. */ + cp->ProcessEventsLength -= 1; + for (i = event; i < cp->ProcessEventsLength; ++i) { + cp->ProcessEvents[i] = cp->ProcessEvents[i + 1]; + } + + /* Check if all processes have terminated. */ + if (cp->ProcessEventsLength == 1) { + cp->Terminated = 1; + + /* Close our copies of the pipe write handles so the pipe threads + can detect end-of-data. */ + for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { + /* TODO: If the child created its own child (our grandchild) + which inherited a copy of the pipe write-end then the pipe + may not close and we will still need the waker write pipe. + However we still want to be able to detect end-of-data in the + normal case. The reader thread will have to switch to using + PeekNamedPipe to read the last bit of data from the pipe + without blocking. This is equivalent to using a non-blocking + read on posix. */ + KWSYSPE_DEBUG((stderr, "closing wakeup write %d\n", i)); + kwsysProcessCleanupHandle(&cp->Pipe[i].Write); + } + } +} + +DWORD kwsysProcessSetupOutputPipeFile(PHANDLE phandle, const char* name) +{ + HANDLE fout; + wchar_t* wname; + DWORD error; + if (!name) { + return ERROR_INVALID_PARAMETER; + } + + /* Close the existing handle. */ + kwsysProcessCleanupHandle(phandle); + + /* Create a handle to write a file for the pipe. */ + wname = kwsysEncoding_DupToWide(name); + fout = + CreateFileW(wname, GENERIC_WRITE, FILE_SHARE_READ, 0, CREATE_ALWAYS, 0, 0); + error = GetLastError(); + free(wname); + if (fout == INVALID_HANDLE_VALUE) { + return error; + } + + /* Assign the replacement handle. */ + *phandle = fout; + return ERROR_SUCCESS; +} + +void kwsysProcessSetupSharedPipe(DWORD nStdHandle, PHANDLE handle) +{ + /* Close the existing handle. */ + kwsysProcessCleanupHandle(handle); + /* Store the new standard handle. */ + *handle = GetStdHandle(nStdHandle); +} + +void kwsysProcessSetupPipeNative(HANDLE native, PHANDLE handle) +{ + /* Close the existing handle. */ + kwsysProcessCleanupHandle(handle); + /* Store the new given handle. */ + *handle = native; +} + +/* Close the given handle if it is open. Reset its value to 0. */ +void kwsysProcessCleanupHandle(PHANDLE h) +{ + if (h && *h && *h != INVALID_HANDLE_VALUE && + *h != GetStdHandle(STD_INPUT_HANDLE) && + *h != GetStdHandle(STD_OUTPUT_HANDLE) && + *h != GetStdHandle(STD_ERROR_HANDLE)) { + CloseHandle(*h); + *h = INVALID_HANDLE_VALUE; + } +} + +/* Close all handles created by kwsysProcess_Execute. */ +void kwsysProcessCleanup(kwsysProcess* cp, DWORD error) +{ + int i; + /* If this is an error case, report the error. */ + if (error) { + /* Construct an error message if one has not been provided already. */ + if (cp->ErrorMessage[0] == 0) { + /* Format the error message. */ + wchar_t err_msg[KWSYSPE_PIPE_BUFFER_SIZE]; + DWORD length = FormatMessageW( + FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, 0, error, + MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), err_msg, + KWSYSPE_PIPE_BUFFER_SIZE, 0); + if (length < 1) { + /* FormatMessage failed. Use a default message. */ + _snprintf(cp->ErrorMessage, KWSYSPE_PIPE_BUFFER_SIZE, + "Process execution failed with error 0x%X. " + "FormatMessage failed with error 0x%X", + error, GetLastError()); + } + if (!WideCharToMultiByte(CP_UTF8, 0, err_msg, -1, cp->ErrorMessage, + KWSYSPE_PIPE_BUFFER_SIZE, NULL, NULL)) { + /* WideCharToMultiByte failed. Use a default message. */ + _snprintf(cp->ErrorMessage, KWSYSPE_PIPE_BUFFER_SIZE, + "Process execution failed with error 0x%X. " + "WideCharToMultiByte failed with error 0x%X", + error, GetLastError()); + } + } + + /* Remove trailing period and newline, if any. */ + kwsysProcessCleanErrorMessage(cp); + + /* Set the error state. */ + cp->State = kwsysProcess_State_Error; + + /* Cleanup any processes already started in a suspended state. */ + if (cp->ProcessInformation) { + for (i = 0; i < cp->NumberOfCommands; ++i) { + if (cp->ProcessInformation[i].hProcess) { + TerminateProcess(cp->ProcessInformation[i].hProcess, 255); + WaitForSingleObject(cp->ProcessInformation[i].hProcess, INFINITE); + } + } + for (i = 0; i < cp->NumberOfCommands; ++i) { + /* Remove from global list of processes and close handles. */ + kwsysProcessesRemove(cp->ProcessInformation[i].hProcess); + kwsysProcessCleanupHandle(&cp->ProcessInformation[i].hThread); + kwsysProcessCleanupHandle(&cp->ProcessInformation[i].hProcess); + } + } + + /* Restore the working directory. */ + if (cp->RealWorkingDirectory) { + SetCurrentDirectoryW(cp->RealWorkingDirectory); + } + } + + /* Free memory. */ + if (cp->ProcessInformation) { + free(cp->ProcessInformation); + cp->ProcessInformation = 0; + } + if (cp->ProcessEvents) { + free(cp->ProcessEvents); + cp->ProcessEvents = 0; + } + if (cp->RealWorkingDirectory) { + free(cp->RealWorkingDirectory); + cp->RealWorkingDirectory = 0; + } + + /* Close each pipe. */ + for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { + kwsysProcessCleanupHandle(&cp->Pipe[i].Write); + kwsysProcessCleanupHandle(&cp->Pipe[i].Read); + cp->Pipe[i].Closed = 0; + } + for (i = 0; i < 3; ++i) { + kwsysProcessCleanupHandle(&cp->PipeChildStd[i]); + } +} + +void kwsysProcessCleanErrorMessage(kwsysProcess* cp) +{ + /* Remove trailing period and newline, if any. */ + size_t length = strlen(cp->ErrorMessage); + if (cp->ErrorMessage[length - 1] == '\n') { + cp->ErrorMessage[length - 1] = 0; + --length; + if (length > 0 && cp->ErrorMessage[length - 1] == '\r') { + cp->ErrorMessage[length - 1] = 0; + --length; + } + } + if (length > 0 && cp->ErrorMessage[length - 1] == '.') { + cp->ErrorMessage[length - 1] = 0; + } +} + +/* Get the time at which either the process or user timeout will + expire. Returns 1 if the user timeout is first, and 0 otherwise. */ +int kwsysProcessGetTimeoutTime(kwsysProcess* cp, double* userTimeout, + kwsysProcessTime* timeoutTime) +{ + /* The first time this is called, we need to calculate the time at + which the child will timeout. */ + if (cp->Timeout && cp->TimeoutTime.QuadPart < 0) { + kwsysProcessTime length = kwsysProcessTimeFromDouble(cp->Timeout); + cp->TimeoutTime = kwsysProcessTimeAdd(cp->StartTime, length); + } + + /* Start with process timeout. */ + *timeoutTime = cp->TimeoutTime; + + /* Check if the user timeout is earlier. */ + if (userTimeout) { + kwsysProcessTime currentTime = kwsysProcessTimeGetCurrent(); + kwsysProcessTime userTimeoutLength = + kwsysProcessTimeFromDouble(*userTimeout); + kwsysProcessTime userTimeoutTime = + kwsysProcessTimeAdd(currentTime, userTimeoutLength); + if (timeoutTime->QuadPart < 0 || + kwsysProcessTimeLess(userTimeoutTime, *timeoutTime)) { + *timeoutTime = userTimeoutTime; + return 1; + } + } + return 0; +} + +/* Get the length of time before the given timeout time arrives. + Returns 1 if the time has already arrived, and 0 otherwise. */ +int kwsysProcessGetTimeoutLeft(kwsysProcessTime* timeoutTime, + double* userTimeout, + kwsysProcessTime* timeoutLength) +{ + if (timeoutTime->QuadPart < 0) { + /* No timeout time has been requested. */ + return 0; + } else { + /* Calculate the remaining time. */ + kwsysProcessTime currentTime = kwsysProcessTimeGetCurrent(); + *timeoutLength = kwsysProcessTimeSubtract(*timeoutTime, currentTime); + + if (timeoutLength->QuadPart < 0 && userTimeout && *userTimeout <= 0) { + /* Caller has explicitly requested a zero timeout. */ + timeoutLength->QuadPart = 0; + } + + if (timeoutLength->QuadPart < 0) { + /* Timeout has already expired. */ + return 1; + } else { + /* There is some time left. */ + return 0; + } + } +} + +kwsysProcessTime kwsysProcessTimeGetCurrent() +{ + kwsysProcessTime current; + FILETIME ft; + GetSystemTimeAsFileTime(&ft); + current.LowPart = ft.dwLowDateTime; + current.HighPart = ft.dwHighDateTime; + return current; +} + +DWORD kwsysProcessTimeToDWORD(kwsysProcessTime t) +{ + return (DWORD)(t.QuadPart * 0.0001); +} + +double kwsysProcessTimeToDouble(kwsysProcessTime t) +{ + return t.QuadPart * 0.0000001; +} + +kwsysProcessTime kwsysProcessTimeFromDouble(double d) +{ + kwsysProcessTime t; + t.QuadPart = (LONGLONG)(d * 10000000); + return t; +} + +int kwsysProcessTimeLess(kwsysProcessTime in1, kwsysProcessTime in2) +{ + return in1.QuadPart < in2.QuadPart; +} + +kwsysProcessTime kwsysProcessTimeAdd(kwsysProcessTime in1, + kwsysProcessTime in2) +{ + kwsysProcessTime out; + out.QuadPart = in1.QuadPart + in2.QuadPart; + return out; +} + +kwsysProcessTime kwsysProcessTimeSubtract(kwsysProcessTime in1, + kwsysProcessTime in2) +{ + kwsysProcessTime out; + out.QuadPart = in1.QuadPart - in2.QuadPart; + return out; +} + +#define KWSYSPE_CASE(type, str) \ + cp->ProcessResults[idx].ExitException = kwsysProcess_Exception_##type; \ + strcpy(cp->ProcessResults[idx].ExitExceptionString, str) +static void kwsysProcessSetExitExceptionByIndex(kwsysProcess* cp, int code, + int idx) +{ + switch (code) { + case STATUS_CONTROL_C_EXIT: + KWSYSPE_CASE(Interrupt, "User interrupt"); + break; + + case STATUS_FLOAT_DENORMAL_OPERAND: + KWSYSPE_CASE(Numerical, "Floating-point exception (denormal operand)"); + break; + case STATUS_FLOAT_DIVIDE_BY_ZERO: + KWSYSPE_CASE(Numerical, "Divide-by-zero"); + break; + case STATUS_FLOAT_INEXACT_RESULT: + KWSYSPE_CASE(Numerical, "Floating-point exception (inexact result)"); + break; + case STATUS_FLOAT_INVALID_OPERATION: + KWSYSPE_CASE(Numerical, "Invalid floating-point operation"); + break; + case STATUS_FLOAT_OVERFLOW: + KWSYSPE_CASE(Numerical, "Floating-point overflow"); + break; + case STATUS_FLOAT_STACK_CHECK: + KWSYSPE_CASE(Numerical, "Floating-point stack check failed"); + break; + case STATUS_FLOAT_UNDERFLOW: + KWSYSPE_CASE(Numerical, "Floating-point underflow"); + break; +#ifdef STATUS_FLOAT_MULTIPLE_FAULTS + case STATUS_FLOAT_MULTIPLE_FAULTS: + KWSYSPE_CASE(Numerical, "Floating-point exception (multiple faults)"); + break; +#endif +#ifdef STATUS_FLOAT_MULTIPLE_TRAPS + case STATUS_FLOAT_MULTIPLE_TRAPS: + KWSYSPE_CASE(Numerical, "Floating-point exception (multiple traps)"); + break; +#endif + case STATUS_INTEGER_DIVIDE_BY_ZERO: + KWSYSPE_CASE(Numerical, "Integer divide-by-zero"); + break; + case STATUS_INTEGER_OVERFLOW: + KWSYSPE_CASE(Numerical, "Integer overflow"); + break; + + case STATUS_DATATYPE_MISALIGNMENT: + KWSYSPE_CASE(Fault, "Datatype misalignment"); + break; + case STATUS_ACCESS_VIOLATION: + KWSYSPE_CASE(Fault, "Access violation"); + break; + case STATUS_IN_PAGE_ERROR: + KWSYSPE_CASE(Fault, "In-page error"); + break; + case STATUS_INVALID_HANDLE: + KWSYSPE_CASE(Fault, "Invalid hanlde"); + break; + case STATUS_NONCONTINUABLE_EXCEPTION: + KWSYSPE_CASE(Fault, "Noncontinuable exception"); + break; + case STATUS_INVALID_DISPOSITION: + KWSYSPE_CASE(Fault, "Invalid disposition"); + break; + case STATUS_ARRAY_BOUNDS_EXCEEDED: + KWSYSPE_CASE(Fault, "Array bounds exceeded"); + break; + case STATUS_STACK_OVERFLOW: + KWSYSPE_CASE(Fault, "Stack overflow"); + break; + + case STATUS_ILLEGAL_INSTRUCTION: + KWSYSPE_CASE(Illegal, "Illegal instruction"); + break; + case STATUS_PRIVILEGED_INSTRUCTION: + KWSYSPE_CASE(Illegal, "Privileged instruction"); + break; + + case STATUS_NO_MEMORY: + default: + cp->ProcessResults[idx].ExitException = kwsysProcess_Exception_Other; + _snprintf(cp->ProcessResults[idx].ExitExceptionString, + KWSYSPE_PIPE_BUFFER_SIZE, "Exit code 0x%x\n", code); + break; + } +} +#undef KWSYSPE_CASE + +typedef struct kwsysProcess_List_s kwsysProcess_List; +static kwsysProcess_List* kwsysProcess_List_New(void); +static void kwsysProcess_List_Delete(kwsysProcess_List* self); +static int kwsysProcess_List_Update(kwsysProcess_List* self); +static int kwsysProcess_List_NextProcess(kwsysProcess_List* self); +static int kwsysProcess_List_GetCurrentProcessId(kwsysProcess_List* self); +static int kwsysProcess_List_GetCurrentParentId(kwsysProcess_List* self); + +/* Windows NT 4 API definitions. */ +#define STATUS_INFO_LENGTH_MISMATCH ((NTSTATUS)0xC0000004L) +typedef LONG NTSTATUS; +typedef LONG KPRIORITY; +typedef struct _UNICODE_STRING UNICODE_STRING; +struct _UNICODE_STRING +{ + USHORT Length; + USHORT MaximumLength; + PWSTR Buffer; +}; + +/* The process information structure. Declare only enough to get + process identifiers. The rest may be ignored because we use the + NextEntryDelta to move through an array of instances. */ +typedef struct _SYSTEM_PROCESS_INFORMATION SYSTEM_PROCESS_INFORMATION; +typedef SYSTEM_PROCESS_INFORMATION* PSYSTEM_PROCESS_INFORMATION; +struct _SYSTEM_PROCESS_INFORMATION +{ + ULONG NextEntryDelta; + ULONG ThreadCount; + ULONG Reserved1[6]; + LARGE_INTEGER CreateTime; + LARGE_INTEGER UserTime; + LARGE_INTEGER KernelTime; + UNICODE_STRING ProcessName; + KPRIORITY BasePriority; + ULONG ProcessId; + ULONG InheritedFromProcessId; +}; + +/* Toolhelp32 API definitions. */ +#define TH32CS_SNAPPROCESS 0x00000002 +#if defined(_WIN64) +typedef unsigned __int64 ProcessULONG_PTR; +#else +typedef unsigned long ProcessULONG_PTR; +#endif +typedef struct tagPROCESSENTRY32 PROCESSENTRY32; +typedef PROCESSENTRY32* LPPROCESSENTRY32; +struct tagPROCESSENTRY32 +{ + DWORD dwSize; + DWORD cntUsage; + DWORD th32ProcessID; + ProcessULONG_PTR th32DefaultHeapID; + DWORD th32ModuleID; + DWORD cntThreads; + DWORD th32ParentProcessID; + LONG pcPriClassBase; + DWORD dwFlags; + char szExeFile[MAX_PATH]; +}; + +/* Windows API function types. */ +typedef HANDLE(WINAPI* CreateToolhelp32SnapshotType)(DWORD, DWORD); +typedef BOOL(WINAPI* Process32FirstType)(HANDLE, LPPROCESSENTRY32); +typedef BOOL(WINAPI* Process32NextType)(HANDLE, LPPROCESSENTRY32); +typedef NTSTATUS(WINAPI* ZwQuerySystemInformationType)(ULONG, PVOID, ULONG, + PULONG); + +static int kwsysProcess_List__New_NT4(kwsysProcess_List* self); +static int kwsysProcess_List__New_Snapshot(kwsysProcess_List* self); +static void kwsysProcess_List__Delete_NT4(kwsysProcess_List* self); +static void kwsysProcess_List__Delete_Snapshot(kwsysProcess_List* self); +static int kwsysProcess_List__Update_NT4(kwsysProcess_List* self); +static int kwsysProcess_List__Update_Snapshot(kwsysProcess_List* self); +static int kwsysProcess_List__Next_NT4(kwsysProcess_List* self); +static int kwsysProcess_List__Next_Snapshot(kwsysProcess_List* self); +static int kwsysProcess_List__GetProcessId_NT4(kwsysProcess_List* self); +static int kwsysProcess_List__GetProcessId_Snapshot(kwsysProcess_List* self); +static int kwsysProcess_List__GetParentId_NT4(kwsysProcess_List* self); +static int kwsysProcess_List__GetParentId_Snapshot(kwsysProcess_List* self); + +struct kwsysProcess_List_s +{ + /* Implementation switches at runtime based on version of Windows. */ + int NT4; + + /* Implementation functions and data for NT 4. */ + ZwQuerySystemInformationType P_ZwQuerySystemInformation; + char* Buffer; + int BufferSize; + PSYSTEM_PROCESS_INFORMATION CurrentInfo; + + /* Implementation functions and data for other Windows versions. */ + CreateToolhelp32SnapshotType P_CreateToolhelp32Snapshot; + Process32FirstType P_Process32First; + Process32NextType P_Process32Next; + HANDLE Snapshot; + PROCESSENTRY32 CurrentEntry; +}; + +static kwsysProcess_List* kwsysProcess_List_New(void) +{ + OSVERSIONINFO osv; + kwsysProcess_List* self; + + /* Allocate and initialize the list object. */ + if (!(self = (kwsysProcess_List*)malloc(sizeof(kwsysProcess_List)))) { + return 0; + } + memset(self, 0, sizeof(*self)); + + /* Select an implementation. */ + ZeroMemory(&osv, sizeof(osv)); + osv.dwOSVersionInfoSize = sizeof(osv); +#ifdef KWSYS_WINDOWS_DEPRECATED_GetVersionEx +# pragma warning(push) +# ifdef __INTEL_COMPILER +# pragma warning(disable : 1478) +# elif defined __clang__ +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wdeprecated-declarations" +# else +# pragma warning(disable : 4996) +# endif +#endif + GetVersionEx(&osv); +#ifdef KWSYS_WINDOWS_DEPRECATED_GetVersionEx +# ifdef __clang__ +# pragma clang diagnostic pop +# else +# pragma warning(pop) +# endif +#endif + self->NT4 = + (osv.dwPlatformId == VER_PLATFORM_WIN32_NT && osv.dwMajorVersion < 5) ? 1 + : 0; + + /* Initialize the selected implementation. */ + if (!(self->NT4 ? kwsysProcess_List__New_NT4(self) + : kwsysProcess_List__New_Snapshot(self))) { + kwsysProcess_List_Delete(self); + return 0; + } + + /* Update to the current set of processes. */ + if (!kwsysProcess_List_Update(self)) { + kwsysProcess_List_Delete(self); + return 0; + } + return self; +} + +static void kwsysProcess_List_Delete(kwsysProcess_List* self) +{ + if (self) { + if (self->NT4) { + kwsysProcess_List__Delete_NT4(self); + } else { + kwsysProcess_List__Delete_Snapshot(self); + } + free(self); + } +} + +static int kwsysProcess_List_Update(kwsysProcess_List* self) +{ + return self ? (self->NT4 ? kwsysProcess_List__Update_NT4(self) + : kwsysProcess_List__Update_Snapshot(self)) + : 0; +} + +static int kwsysProcess_List_GetCurrentProcessId(kwsysProcess_List* self) +{ + return self ? (self->NT4 ? kwsysProcess_List__GetProcessId_NT4(self) + : kwsysProcess_List__GetProcessId_Snapshot(self)) + : -1; +} + +static int kwsysProcess_List_GetCurrentParentId(kwsysProcess_List* self) +{ + return self ? (self->NT4 ? kwsysProcess_List__GetParentId_NT4(self) + : kwsysProcess_List__GetParentId_Snapshot(self)) + : -1; +} + +static int kwsysProcess_List_NextProcess(kwsysProcess_List* self) +{ + return (self ? (self->NT4 ? kwsysProcess_List__Next_NT4(self) + : kwsysProcess_List__Next_Snapshot(self)) + : 0); +} + +static int kwsysProcess_List__New_NT4(kwsysProcess_List* self) +{ + /* Get a handle to the NT runtime module that should already be + loaded in this program. This does not actually increment the + reference count to the module so we do not need to close the + handle. */ + HMODULE hNT = GetModuleHandleW(L"ntdll.dll"); + if (hNT) { + /* Get pointers to the needed API functions. */ + self->P_ZwQuerySystemInformation = + ((ZwQuerySystemInformationType)GetProcAddress( + hNT, "ZwQuerySystemInformation")); + } + if (!self->P_ZwQuerySystemInformation) { + return 0; + } + + /* Allocate an initial process information buffer. */ + self->BufferSize = 32768; + self->Buffer = (char*)malloc(self->BufferSize); + return self->Buffer ? 1 : 0; +} + +static void kwsysProcess_List__Delete_NT4(kwsysProcess_List* self) +{ + /* Free the process information buffer. */ + free(self->Buffer); +} + +static int kwsysProcess_List__Update_NT4(kwsysProcess_List* self) +{ + self->CurrentInfo = 0; + for (;;) { + /* Query number 5 is for system process list. */ + NTSTATUS status = + self->P_ZwQuerySystemInformation(5, self->Buffer, self->BufferSize, 0); + if (status == STATUS_INFO_LENGTH_MISMATCH) { + /* The query requires a bigger buffer. */ + int newBufferSize = self->BufferSize * 2; + char* newBuffer = (char*)malloc(newBufferSize); + if (newBuffer) { + free(self->Buffer); + self->Buffer = newBuffer; + self->BufferSize = newBufferSize; + } else { + return 0; + } + } else if (status >= 0) { + /* The query succeeded. Initialize traversal of the process list. */ + self->CurrentInfo = (PSYSTEM_PROCESS_INFORMATION)self->Buffer; + return 1; + } else { + /* The query failed. */ + return 0; + } + } +} + +static int kwsysProcess_List__Next_NT4(kwsysProcess_List* self) +{ + if (self->CurrentInfo) { + if (self->CurrentInfo->NextEntryDelta > 0) { + self->CurrentInfo = + ((PSYSTEM_PROCESS_INFORMATION)((char*)self->CurrentInfo + + self->CurrentInfo->NextEntryDelta)); + return 1; + } + self->CurrentInfo = 0; + } + return 0; +} + +static int kwsysProcess_List__GetProcessId_NT4(kwsysProcess_List* self) +{ + return self->CurrentInfo ? self->CurrentInfo->ProcessId : -1; +} + +static int kwsysProcess_List__GetParentId_NT4(kwsysProcess_List* self) +{ + return self->CurrentInfo ? self->CurrentInfo->InheritedFromProcessId : -1; +} + +static int kwsysProcess_List__New_Snapshot(kwsysProcess_List* self) +{ + /* Get a handle to the Windows runtime module that should already be + loaded in this program. This does not actually increment the + reference count to the module so we do not need to close the + handle. */ + HMODULE hKernel = GetModuleHandleW(L"kernel32.dll"); + if (hKernel) { + self->P_CreateToolhelp32Snapshot = + ((CreateToolhelp32SnapshotType)GetProcAddress( + hKernel, "CreateToolhelp32Snapshot")); + self->P_Process32First = + ((Process32FirstType)GetProcAddress(hKernel, "Process32First")); + self->P_Process32Next = + ((Process32NextType)GetProcAddress(hKernel, "Process32Next")); + } + return (self->P_CreateToolhelp32Snapshot && self->P_Process32First && + self->P_Process32Next) + ? 1 + : 0; +} + +static void kwsysProcess_List__Delete_Snapshot(kwsysProcess_List* self) +{ + if (self->Snapshot) { + CloseHandle(self->Snapshot); + } +} + +static int kwsysProcess_List__Update_Snapshot(kwsysProcess_List* self) +{ + if (self->Snapshot) { + CloseHandle(self->Snapshot); + } + if (!(self->Snapshot = + self->P_CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0))) { + return 0; + } + ZeroMemory(&self->CurrentEntry, sizeof(self->CurrentEntry)); + self->CurrentEntry.dwSize = sizeof(self->CurrentEntry); + if (!self->P_Process32First(self->Snapshot, &self->CurrentEntry)) { + CloseHandle(self->Snapshot); + self->Snapshot = 0; + return 0; + } + return 1; +} + +static int kwsysProcess_List__Next_Snapshot(kwsysProcess_List* self) +{ + if (self->Snapshot) { + if (self->P_Process32Next(self->Snapshot, &self->CurrentEntry)) { + return 1; + } + CloseHandle(self->Snapshot); + self->Snapshot = 0; + } + return 0; +} + +static int kwsysProcess_List__GetProcessId_Snapshot(kwsysProcess_List* self) +{ + return self->Snapshot ? self->CurrentEntry.th32ProcessID : -1; +} + +static int kwsysProcess_List__GetParentId_Snapshot(kwsysProcess_List* self) +{ + return self->Snapshot ? self->CurrentEntry.th32ParentProcessID : -1; +} + +static void kwsysProcessKill(DWORD pid) +{ + HANDLE h = OpenProcess(PROCESS_TERMINATE, 0, pid); + if (h) { + TerminateProcess(h, 255); + WaitForSingleObject(h, INFINITE); + CloseHandle(h); + } +} + +static void kwsysProcessKillTree(int pid) +{ + kwsysProcess_List* plist = kwsysProcess_List_New(); + kwsysProcessKill(pid); + if (plist) { + do { + if (kwsysProcess_List_GetCurrentParentId(plist) == pid) { + int ppid = kwsysProcess_List_GetCurrentProcessId(plist); + kwsysProcessKillTree(ppid); + } + } while (kwsysProcess_List_NextProcess(plist)); + kwsysProcess_List_Delete(plist); + } +} + +static void kwsysProcessDisablePipeThreads(kwsysProcess* cp) +{ + int i; + + /* If data were just reported data, release the pipe's thread. */ + if (cp->CurrentIndex < KWSYSPE_PIPE_COUNT) { + KWSYSPE_DEBUG((stderr, "releasing reader %d\n", cp->CurrentIndex)); + ReleaseSemaphore(cp->Pipe[cp->CurrentIndex].Reader.Go, 1, 0); + cp->CurrentIndex = KWSYSPE_PIPE_COUNT; + } + + /* Wakeup all reading threads that are not on closed pipes. */ + for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { + /* The wakeup threads will write one byte to the pipe write ends. + If there are no data in the pipe then this is enough to wakeup + the reading threads. If there are already data in the pipe + this may block. We cannot use PeekNamedPipe to check whether + there are data because an outside process might still be + writing data if we are disowning it. Also, PeekNamedPipe will + block if checking a pipe on which the reading thread is + currently calling ReadPipe. Therefore we need a separate + thread to call WriteFile. If it blocks, that is okay because + it will unblock when we close the read end and break the pipe + below. */ + if (cp->Pipe[i].Read) { + KWSYSPE_DEBUG((stderr, "releasing waker %d\n", i)); + ReleaseSemaphore(cp->Pipe[i].Waker.Go, 1, 0); + } + } + + /* Tell pipe threads to reset until we run another process. */ + while (cp->PipesLeft > 0) { + /* The waking threads will cause all reading threads to report. + Wait for the next one and save its index. */ + KWSYSPE_DEBUG((stderr, "waiting for reader\n")); + WaitForSingleObject(cp->Full, INFINITE); + cp->CurrentIndex = cp->SharedIndex; + ReleaseSemaphore(cp->SharedIndexMutex, 1, 0); + KWSYSPE_DEBUG((stderr, "got reader %d\n", cp->CurrentIndex)); + + /* We are done reading this pipe. Close its read handle. */ + cp->Pipe[cp->CurrentIndex].Closed = 1; + kwsysProcessCleanupHandle(&cp->Pipe[cp->CurrentIndex].Read); + --cp->PipesLeft; + + /* Tell the reading thread we are done with the data. It will + reset immediately because the pipe is closed. */ + ReleaseSemaphore(cp->Pipe[cp->CurrentIndex].Reader.Go, 1, 0); + } +} + +/* Global set of executing processes for use by the Ctrl handler. + This global instance will be zero-initialized by the compiler. + + Note that the console Ctrl handler runs on a background thread and so + everything it does must be thread safe. Here, we track the hProcess + HANDLEs directly instead of kwsysProcess instances, so that we don't have + to make kwsysProcess thread safe. */ +typedef struct kwsysProcessInstance_s +{ + HANDLE hProcess; + DWORD dwProcessId; + int NewProcessGroup; /* Whether the process was created in a new group. */ +} kwsysProcessInstance; + +typedef struct kwsysProcessInstances_s +{ + /* Whether we have initialized key fields below, like critical sections. */ + int Initialized; + + /* Ctrl handler runs on a different thread, so we must sync access. */ + CRITICAL_SECTION Lock; + + int Exiting; + size_t Count; + size_t Size; + kwsysProcessInstance* Processes; +} kwsysProcessInstances; +static kwsysProcessInstances kwsysProcesses; + +/* Initialize critial section and set up console Ctrl handler. You MUST call + this before using any other kwsysProcesses* functions below. */ +static int kwsysProcessesInitialize(void) +{ + /* Initialize everything if not done already. */ + if (!kwsysProcesses.Initialized) { + InitializeCriticalSection(&kwsysProcesses.Lock); + + /* Set up console ctrl handler. */ + if (!SetConsoleCtrlHandler(kwsysCtrlHandler, TRUE)) { + return 0; + } + + kwsysProcesses.Initialized = 1; + } + return 1; +} + +/* The Ctrl handler waits on the global list of processes. To prevent an + orphaned process, do not create a new process if the Ctrl handler is + already running. Do so by using this function to check if it is ok to + create a process. */ +static int kwsysTryEnterCreateProcessSection(void) +{ + /* Enter main critical section; this means creating a process and the Ctrl + handler are mutually exclusive. */ + EnterCriticalSection(&kwsysProcesses.Lock); + /* Indicate to the caller if they can create a process. */ + if (kwsysProcesses.Exiting) { + LeaveCriticalSection(&kwsysProcesses.Lock); + return 0; + } else { + return 1; + } +} + +/* Matching function on successful kwsysTryEnterCreateProcessSection return. + Make sure you called kwsysProcessesAdd if applicable before calling this.*/ +static void kwsysLeaveCreateProcessSection(void) +{ + LeaveCriticalSection(&kwsysProcesses.Lock); +} + +/* Add new process to global process list. The Ctrl handler will wait for + the process to exit before it returns. Do not close the process handle + until after calling kwsysProcessesRemove. The newProcessGroup parameter + must be set if the process was created with CREATE_NEW_PROCESS_GROUP. */ +static int kwsysProcessesAdd(HANDLE hProcess, DWORD dwProcessid, + int newProcessGroup) +{ + if (!kwsysProcessesInitialize() || !hProcess || + hProcess == INVALID_HANDLE_VALUE) { + return 0; + } + + /* Enter the critical section. */ + EnterCriticalSection(&kwsysProcesses.Lock); + + /* Make sure there is enough space for the new process handle. */ + if (kwsysProcesses.Count == kwsysProcesses.Size) { + size_t newSize; + kwsysProcessInstance* newArray; + /* Start with enough space for a small number of process handles + and double the size each time more is needed. */ + newSize = kwsysProcesses.Size ? kwsysProcesses.Size * 2 : 4; + + /* Try allocating the new block of memory. */ + if ((newArray = (kwsysProcessInstance*)malloc( + newSize * sizeof(kwsysProcessInstance)))) { + /* Copy the old process handles to the new memory. */ + if (kwsysProcesses.Count > 0) { + memcpy(newArray, kwsysProcesses.Processes, + kwsysProcesses.Count * sizeof(kwsysProcessInstance)); + } + } else { + /* Failed to allocate memory for the new process handle set. */ + LeaveCriticalSection(&kwsysProcesses.Lock); + return 0; + } + + /* Free original array. */ + free(kwsysProcesses.Processes); + + /* Update original structure with new allocation. */ + kwsysProcesses.Size = newSize; + kwsysProcesses.Processes = newArray; + } + + /* Append the new process information to the set. */ + kwsysProcesses.Processes[kwsysProcesses.Count].hProcess = hProcess; + kwsysProcesses.Processes[kwsysProcesses.Count].dwProcessId = dwProcessid; + kwsysProcesses.Processes[kwsysProcesses.Count++].NewProcessGroup = + newProcessGroup; + + /* Leave critical section and return success. */ + LeaveCriticalSection(&kwsysProcesses.Lock); + + return 1; +} + +/* Removes process to global process list. */ +static void kwsysProcessesRemove(HANDLE hProcess) +{ + size_t i; + + if (!hProcess || hProcess == INVALID_HANDLE_VALUE) { + return; + } + + EnterCriticalSection(&kwsysProcesses.Lock); + + /* Find the given process in the set. */ + for (i = 0; i < kwsysProcesses.Count; ++i) { + if (kwsysProcesses.Processes[i].hProcess == hProcess) { + break; + } + } + if (i < kwsysProcesses.Count) { + /* Found it! Remove the process from the set. */ + --kwsysProcesses.Count; + for (; i < kwsysProcesses.Count; ++i) { + kwsysProcesses.Processes[i] = kwsysProcesses.Processes[i + 1]; + } + + /* If this was the last process, free the array. */ + if (kwsysProcesses.Count == 0) { + kwsysProcesses.Size = 0; + free(kwsysProcesses.Processes); + kwsysProcesses.Processes = 0; + } + } + + LeaveCriticalSection(&kwsysProcesses.Lock); +} + +static BOOL WINAPI kwsysCtrlHandler(DWORD dwCtrlType) +{ + size_t i; + (void)dwCtrlType; + /* Enter critical section. */ + EnterCriticalSection(&kwsysProcesses.Lock); + + /* Set flag indicating that we are exiting. */ + kwsysProcesses.Exiting = 1; + + /* If some of our processes were created in a new process group, we must + manually interrupt them. They won't otherwise receive a Ctrl+C/Break. */ + for (i = 0; i < kwsysProcesses.Count; ++i) { + if (kwsysProcesses.Processes[i].NewProcessGroup) { + DWORD groupId = kwsysProcesses.Processes[i].dwProcessId; + if (groupId) { + GenerateConsoleCtrlEvent(CTRL_BREAK_EVENT, groupId); + } + } + } + + /* Wait for each child process to exit. This is the key step that prevents + us from leaving several orphaned children processes running in the + background when the user presses Ctrl+C. */ + for (i = 0; i < kwsysProcesses.Count; ++i) { + WaitForSingleObject(kwsysProcesses.Processes[i].hProcess, INFINITE); + } + + /* Leave critical section. */ + LeaveCriticalSection(&kwsysProcesses.Lock); + + /* Continue on to default Ctrl handler (which calls ExitProcess). */ + return FALSE; +} + +void kwsysProcess_ResetStartTime(kwsysProcess* cp) +{ + if (!cp) { + return; + } + /* Reset start time. */ + cp->StartTime = kwsysProcessTimeGetCurrent(); +} diff --git a/test/API/driver/kwsys/README.rst b/test/API/driver/kwsys/README.rst new file mode 100644 index 00000000000..fc6b5902edc --- /dev/null +++ b/test/API/driver/kwsys/README.rst @@ -0,0 +1,37 @@ +KWSys +***** + +Introduction +============ + +KWSys is the Kitware System Library. It provides platform-independent +APIs to many common system features that are implemented differently on +every platform. This library is intended to be shared among many +projects at the source level, so it has a configurable namespace. +Each project should configure KWSys to use a namespace unique to itself. +See comments in `CMakeLists.txt`_ for details. + +.. _`CMakeLists.txt`: CMakeLists.txt + +License +======= + +KWSys is distributed under the OSI-approved BSD 3-clause License. +See `Copyright.txt`_ for details. + +.. _`Copyright.txt`: Copyright.txt + +Reporting Bugs +============== + +KWSys has no independent issue tracker. After encountering an issue +(bug) please submit a patch using the instructions for `Contributing`_. +Otherwise please report the issue to the tracker for the project that +hosts the copy of KWSys in which the problem was found. + +Contributing +============ + +See `CONTRIBUTING.rst`_ for instructions to contribute. + +.. _`CONTRIBUTING.rst`: CONTRIBUTING.rst diff --git a/test/API/driver/kwsys/RegularExpression.cxx b/test/API/driver/kwsys/RegularExpression.cxx new file mode 100644 index 00000000000..5e6f8da5031 --- /dev/null +++ b/test/API/driver/kwsys/RegularExpression.cxx @@ -0,0 +1,1218 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +// +// Copyright (C) 1991 Texas Instruments Incorporated. +// +// Permission is granted to any individual or institution to use, copy, modify +// and distribute this software, provided that this complete copyright and +// permission notice is maintained, intact, in all copies and supporting +// documentation. +// +// Texas Instruments Incorporated provides this software "as is" without +// express or implied warranty. +// +// +// Created: MNF 06/13/89 Initial Design and Implementation +// Updated: LGO 08/09/89 Inherit from Generic +// Updated: MBN 09/07/89 Added conditional exception handling +// Updated: MBN 12/15/89 Sprinkled "const" qualifiers all over the place! +// Updated: DLS 03/22/91 New lite version +// + +#include "kwsysPrivate.h" +#include KWSYS_HEADER(RegularExpression.hxx) + +// Work-around CMake dependency scanning limitation. This must +// duplicate the above list of headers. +#if 0 +# include "RegularExpression.hxx.in" +#endif + +#include +#include + +namespace KWSYS_NAMESPACE { + +// RegularExpression -- Copies the given regular expression. +RegularExpression::RegularExpression(const RegularExpression& rxp) +{ + if (!rxp.program) { + this->program = nullptr; + return; + } + int ind; + this->progsize = rxp.progsize; // Copy regular expression size + this->program = new char[this->progsize]; // Allocate storage + for (ind = this->progsize; ind-- != 0;) // Copy regular expression + this->program[ind] = rxp.program[ind]; + // Copy pointers into last successful "find" operation + this->regmatch = rxp.regmatch; + this->regmust = rxp.regmust; // Copy field + if (rxp.regmust != nullptr) { + char* dum = rxp.program; + ind = 0; + while (dum != rxp.regmust) { + ++dum; + ++ind; + } + this->regmust = this->program + ind; + } + this->regstart = rxp.regstart; // Copy starting index + this->reganch = rxp.reganch; // Copy remaining private data + this->regmlen = rxp.regmlen; // Copy remaining private data +} + +// operator= -- Copies the given regular expression. +RegularExpression& RegularExpression::operator=(const RegularExpression& rxp) +{ + if (this == &rxp) { + return *this; + } + if (!rxp.program) { + this->program = nullptr; + return *this; + } + int ind; + this->progsize = rxp.progsize; // Copy regular expression size + delete[] this->program; + this->program = new char[this->progsize]; // Allocate storage + for (ind = this->progsize; ind-- != 0;) // Copy regular expression + this->program[ind] = rxp.program[ind]; + // Copy pointers into last successful "find" operation + this->regmatch = rxp.regmatch; + this->regmust = rxp.regmust; // Copy field + if (rxp.regmust != nullptr) { + char* dum = rxp.program; + ind = 0; + while (dum != rxp.regmust) { + ++dum; + ++ind; + } + this->regmust = this->program + ind; + } + this->regstart = rxp.regstart; // Copy starting index + this->reganch = rxp.reganch; // Copy remaining private data + this->regmlen = rxp.regmlen; // Copy remaining private data + + return *this; +} + +// operator== -- Returns true if two regular expressions have the same +// compiled program for pattern matching. +bool RegularExpression::operator==(const RegularExpression& rxp) const +{ + if (this != &rxp) { // Same address? + int ind = this->progsize; // Get regular expression size + if (ind != rxp.progsize) // If different size regexp + return false; // Return failure + while (ind-- != 0) // Else while still characters + if (this->program[ind] != rxp.program[ind]) // If regexp are different + return false; // Return failure + } + return true; // Else same, return success +} + +// deep_equal -- Returns true if have the same compiled regular expressions +// and the same start and end pointers. + +bool RegularExpression::deep_equal(const RegularExpression& rxp) const +{ + int ind = this->progsize; // Get regular expression size + if (ind != rxp.progsize) // If different size regexp + return false; // Return failure + while (ind-- != 0) // Else while still characters + if (this->program[ind] != rxp.program[ind]) // If regexp are different + return false; // Return failure + // Else if same start/end ptrs, return true + return (this->regmatch.start() == rxp.regmatch.start() && + this->regmatch.end() == rxp.regmatch.end()); +} + +// The remaining code in this file is derived from the regular expression code +// whose copyright statement appears below. It has been changed to work +// with the class concepts of C++ and COOL. + +/* + * compile and find + * + * Copyright (c) 1986 by University of Toronto. + * Written by Henry Spencer. Not derived from licensed software. + * + * Permission is granted to anyone to use this software for any + * purpose on any computer system, and to redistribute it freely, + * subject to the following restrictions: + * + * 1. The author is not responsible for the consequences of use of + * this software, no matter how awful, even if they arise + * from defects in it. + * + * 2. The origin of this software must not be misrepresented, either + * by explicit claim or by omission. + * + * 3. Altered versions must be plainly marked as such, and must not + * be misrepresented as being the original software. + * + * Beware that some of this code is subtly aware of the way operator + * precedence is structured in regular expressions. Serious changes in + * regular-expression syntax might require a total rethink. + */ + +/* + * The "internal use only" fields in regexp.h are present to pass info from + * compile to execute that permits the execute phase to run lots faster on + * simple cases. They are: + * + * regstart char that must begin a match; '\0' if none obvious + * reganch is the match anchored (at beginning-of-line only)? + * regmust string (pointer into program) that match must include, or + * nullptr regmlen length of regmust string + * + * Regstart and reganch permit very fast decisions on suitable starting points + * for a match, cutting down the work a lot. Regmust permits fast rejection + * of lines that cannot possibly match. The regmust tests are costly enough + * that compile() supplies a regmust only if the r.e. contains something + * potentially expensive (at present, the only such thing detected is * or + + * at the start of the r.e., which can involve a lot of backup). Regmlen is + * supplied because the test in find() needs it and compile() is computing + * it anyway. + */ + +/* + * Structure for regexp "program". This is essentially a linear encoding + * of a nondeterministic finite-state machine (aka syntax charts or + * "railroad normal form" in parsing technology). Each node is an opcode + * plus a "next" pointer, possibly plus an operand. "Next" pointers of + * all nodes except BRANCH implement concatenation; a "next" pointer with + * a BRANCH on both ends of it is connecting two alternatives. (Here we + * have one of the subtle syntax dependencies: an individual BRANCH (as + * opposed to a collection of them) is never concatenated with anything + * because of operator precedence.) The operand of some types of node is + * a literal string; for others, it is a node leading into a sub-FSM. In + * particular, the operand of a BRANCH node is the first node of the branch. + * (NB this is *not* a tree structure: the tail of the branch connects + * to the thing following the set of BRANCHes.) The opcodes are: + */ + +// definition number opnd? meaning +#define END 0 // no End of program. +#define BOL 1 // no Match "" at beginning of line. +#define EOL 2 // no Match "" at end of line. +#define ANY 3 // no Match any one character. +#define ANYOF 4 // str Match any character in this string. +#define ANYBUT \ + 5 // str Match any character not in this + // string. +#define BRANCH \ + 6 // node Match this alternative, or the + // next... +#define BACK 7 // no Match "", "next" ptr points backward. +#define EXACTLY 8 // str Match this string. +#define NOTHING 9 // no Match empty string. +#define STAR \ + 10 // node Match this (simple) thing 0 or more + // times. +#define PLUS \ + 11 // node Match this (simple) thing 1 or more + // times. +#define OPEN \ + 20 // no Mark this point in input as start of + // #n. +// OPEN+1 is number 1, etc. +#define CLOSE 30 // no Analogous to OPEN. + +/* + * Opcode notes: + * + * BRANCH The set of branches constituting a single choice are hooked + * together with their "next" pointers, since precedence prevents + * anything being concatenated to any individual branch. The + * "next" pointer of the last BRANCH in a choice points to the + * thing following the whole choice. This is also where the + * final "next" pointer of each individual branch points; each + * branch starts with the operand node of a BRANCH node. + * + * BACK Normal "next" pointers all implicitly point forward; BACK + * exists to make loop structures possible. + * + * STAR,PLUS '?', and complex '*' and '+', are implemented as circular + * BRANCH structures using BACK. Simple cases (one character + * per match) are implemented with STAR and PLUS for speed + * and to minimize recursive plunges. + * + * OPEN,CLOSE ...are numbered at compile time. + */ + +/* + * A node is one char of opcode followed by two chars of "next" pointer. + * "Next" pointers are stored as two 8-bit pieces, high order first. The + * value is a positive offset from the opcode of the node containing it. + * An operand, if any, simply follows the node. (Note that much of the + * code generation knows about this implicit relationship.) + * + * Using two bytes for the "next" pointer is vast overkill for most things, + * but allows patterns to get big without disasters. + */ + +#define OP(p) (*(p)) +#define NEXT(p) (((*((p) + 1) & 0377) << 8) + (*((p) + 2) & 0377)) +#define OPERAND(p) ((p) + 3) + +const unsigned char MAGIC = 0234; +/* + * Utility definitions. + */ + +#define UCHARAT(p) (reinterpret_cast(p))[0] + +#define ISMULT(c) ((c) == '*' || (c) == '+' || (c) == '?') +#define META "^$.[()|?+*\\" + +/* + * Flags to be passed up and down. + */ +#define HASWIDTH 01 // Known never to match null string. +#define SIMPLE 02 // Simple enough to be STAR/PLUS operand. +#define SPSTART 04 // Starts with * or +. +#define WORST 0 // Worst case. + +///////////////////////////////////////////////////////////////////////// +// +// COMPILE AND ASSOCIATED FUNCTIONS +// +///////////////////////////////////////////////////////////////////////// + +/* + * Read only utility variables. + */ +static char regdummy; +static char* const regdummyptr = ®dummy; + +/* + * Utility class for RegularExpression::compile(). + */ +class RegExpCompile +{ +public: + const char* regparse; // Input-scan pointer. + int regnpar; // () count. + char* regcode; // Code-emit pointer; regdummyptr = don't. + long regsize; // Code size. + + char* reg(int, int*); + char* regbranch(int*); + char* regpiece(int*); + char* regatom(int*); + char* regnode(char); + void regc(char); + void reginsert(char, char*); + static void regtail(char*, const char*); + static void regoptail(char*, const char*); +}; + +static const char* regnext(const char*); +static char* regnext(char*); + +#ifdef STRCSPN +static int strcspn(); +#endif + +/* + * We can't allocate space until we know how big the compiled form will be, + * but we can't compile it (and thus know how big it is) until we've got a + * place to put the code. So we cheat: we compile it twice, once with code + * generation turned off and size counting turned on, and once "for real". + * This also means that we don't allocate space until we are sure that the + * thing really will compile successfully, and we never have to move the + * code and thus invalidate pointers into it. (Note that it has to be in + * one piece because free() must be able to free it all.) + * + * Beware that the optimization-preparation code in here knows about some + * of the structure of the compiled regexp. + */ + +// compile -- compile a regular expression into internal code +// for later pattern matching. + +bool RegularExpression::compile(const char* exp) +{ + const char* scan; + const char* longest; + int flags; + + if (exp == nullptr) { + // RAISE Error, SYM(RegularExpression), SYM(No_Expr), + printf("RegularExpression::compile(): No expression supplied.\n"); + return false; + } + + // First pass: determine size, legality. + RegExpCompile comp; + comp.regparse = exp; + comp.regnpar = 1; + comp.regsize = 0L; + comp.regcode = regdummyptr; + comp.regc(static_cast(MAGIC)); + if (!comp.reg(0, &flags)) { + printf("RegularExpression::compile(): Error in compile.\n"); + return false; + } + this->regmatch.clear(); + + // Small enough for pointer-storage convention? + if (comp.regsize >= 32767L) { // Probably could be 65535L. + // RAISE Error, SYM(RegularExpression), SYM(Expr_Too_Big), + printf("RegularExpression::compile(): Expression too big.\n"); + return false; + } + + // Allocate space. + //#ifndef _WIN32 + if (this->program != nullptr) + delete[] this->program; + //#endif + this->program = new char[comp.regsize]; + this->progsize = static_cast(comp.regsize); + + if (this->program == nullptr) { + // RAISE Error, SYM(RegularExpression), SYM(Out_Of_Memory), + printf("RegularExpression::compile(): Out of memory.\n"); + return false; + } + + // Second pass: emit code. + comp.regparse = exp; + comp.regnpar = 1; + comp.regcode = this->program; + comp.regc(static_cast(MAGIC)); + comp.reg(0, &flags); + + // Dig out information for optimizations. + this->regstart = '\0'; // Worst-case defaults. + this->reganch = 0; + this->regmust = nullptr; + this->regmlen = 0; + scan = this->program + 1; // First BRANCH. + if (OP(regnext(scan)) == END) { // Only one top-level choice. + scan = OPERAND(scan); + + // Starting-point info. + if (OP(scan) == EXACTLY) + this->regstart = *OPERAND(scan); + else if (OP(scan) == BOL) + this->reganch++; + + // + // If there's something expensive in the r.e., find the longest + // literal string that must appear and make it the regmust. Resolve + // ties in favor of later strings, since the regstart check works + // with the beginning of the r.e. and avoiding duplication + // strengthens checking. Not a strong reason, but sufficient in the + // absence of others. + // + if (flags & SPSTART) { + longest = nullptr; + size_t len = 0; + for (; scan != nullptr; scan = regnext(scan)) + if (OP(scan) == EXACTLY && strlen(OPERAND(scan)) >= len) { + longest = OPERAND(scan); + len = strlen(OPERAND(scan)); + } + this->regmust = longest; + this->regmlen = len; + } + } + return true; +} + +/* + - reg - regular expression, i.e. main body or parenthesized thing + * + * Caller must absorb opening parenthesis. + * + * Combining parenthesis handling with the base level of regular expression + * is a trifle forced, but the need to tie the tails of the branches to what + * follows makes it hard to avoid. + */ +char* RegExpCompile::reg(int paren, int* flagp) +{ + char* ret; + char* br; + char* ender; + int parno = 0; + int flags; + + *flagp = HASWIDTH; // Tentatively. + + // Make an OPEN node, if parenthesized. + if (paren) { + if (regnpar >= RegularExpressionMatch::NSUBEXP) { + // RAISE Error, SYM(RegularExpression), SYM(Too_Many_Parens), + printf("RegularExpression::compile(): Too many parentheses.\n"); + return nullptr; + } + parno = regnpar; + regnpar++; + ret = regnode(static_cast(OPEN + parno)); + } else + ret = nullptr; + + // Pick up the branches, linking them together. + br = regbranch(&flags); + if (br == nullptr) + return (nullptr); + if (ret != nullptr) + regtail(ret, br); // OPEN -> first. + else + ret = br; + if (!(flags & HASWIDTH)) + *flagp &= ~HASWIDTH; + *flagp |= flags & SPSTART; + while (*regparse == '|') { + regparse++; + br = regbranch(&flags); + if (br == nullptr) + return (nullptr); + regtail(ret, br); // BRANCH -> BRANCH. + if (!(flags & HASWIDTH)) + *flagp &= ~HASWIDTH; + *flagp |= flags & SPSTART; + } + + // Make a closing node, and hook it on the end. + ender = regnode(static_cast((paren) ? CLOSE + parno : END)); + regtail(ret, ender); + + // Hook the tails of the branches to the closing node. + for (br = ret; br != nullptr; br = regnext(br)) + regoptail(br, ender); + + // Check for proper termination. + if (paren && *regparse++ != ')') { + // RAISE Error, SYM(RegularExpression), SYM(Unmatched_Parens), + printf("RegularExpression::compile(): Unmatched parentheses.\n"); + return nullptr; + } else if (!paren && *regparse != '\0') { + if (*regparse == ')') { + // RAISE Error, SYM(RegularExpression), SYM(Unmatched_Parens), + printf("RegularExpression::compile(): Unmatched parentheses.\n"); + return nullptr; + } else { + // RAISE Error, SYM(RegularExpression), SYM(Internal_Error), + printf("RegularExpression::compile(): Internal error.\n"); + return nullptr; + } + // NOTREACHED + } + return (ret); +} + +/* + - regbranch - one alternative of an | operator + * + * Implements the concatenation operator. + */ +char* RegExpCompile::regbranch(int* flagp) +{ + char* ret; + char* chain; + char* latest; + int flags; + + *flagp = WORST; // Tentatively. + + ret = regnode(BRANCH); + chain = nullptr; + while (*regparse != '\0' && *regparse != '|' && *regparse != ')') { + latest = regpiece(&flags); + if (latest == nullptr) + return (nullptr); + *flagp |= flags & HASWIDTH; + if (chain == nullptr) // First piece. + *flagp |= flags & SPSTART; + else + regtail(chain, latest); + chain = latest; + } + if (chain == nullptr) // Loop ran zero times. + regnode(NOTHING); + + return (ret); +} + +/* + - regpiece - something followed by possible [*+?] + * + * Note that the branching code sequences used for ? and the general cases + * of * and + are somewhat optimized: they use the same NOTHING node as + * both the endmarker for their branch list and the body of the last branch. + * It might seem that this node could be dispensed with entirely, but the + * endmarker role is not redundant. + */ +char* RegExpCompile::regpiece(int* flagp) +{ + char* ret; + char op; + char* next; + int flags; + + ret = regatom(&flags); + if (ret == nullptr) + return (nullptr); + + op = *regparse; + if (!ISMULT(op)) { + *flagp = flags; + return (ret); + } + + if (!(flags & HASWIDTH) && op != '?') { + // RAISE Error, SYM(RegularExpression), SYM(Empty_Operand), + printf("RegularExpression::compile() : *+ operand could be empty.\n"); + return nullptr; + } + *flagp = (op != '+') ? (WORST | SPSTART) : (WORST | HASWIDTH); + + if (op == '*' && (flags & SIMPLE)) + reginsert(STAR, ret); + else if (op == '*') { + // Emit x* as (x&|), where & means "self". + reginsert(BRANCH, ret); // Either x + regoptail(ret, regnode(BACK)); // and loop + regoptail(ret, ret); // back + regtail(ret, regnode(BRANCH)); // or + regtail(ret, regnode(NOTHING)); // null. + } else if (op == '+' && (flags & SIMPLE)) + reginsert(PLUS, ret); + else if (op == '+') { + // Emit x+ as x(&|), where & means "self". + next = regnode(BRANCH); // Either + regtail(ret, next); + regtail(regnode(BACK), ret); // loop back + regtail(next, regnode(BRANCH)); // or + regtail(ret, regnode(NOTHING)); // null. + } else if (op == '?') { + // Emit x? as (x|) + reginsert(BRANCH, ret); // Either x + regtail(ret, regnode(BRANCH)); // or + next = regnode(NOTHING); // null. + regtail(ret, next); + regoptail(ret, next); + } + regparse++; + if (ISMULT(*regparse)) { + // RAISE Error, SYM(RegularExpression), SYM(Nested_Operand), + printf("RegularExpression::compile(): Nested *?+.\n"); + return nullptr; + } + return (ret); +} + +/* + - regatom - the lowest level + * + * Optimization: gobbles an entire sequence of ordinary characters so that + * it can turn them into a single node, which is smaller to store and + * faster to run. Backslashed characters are exceptions, each becoming a + * separate node; the code is simpler that way and it's not worth fixing. + */ +char* RegExpCompile::regatom(int* flagp) +{ + char* ret; + int flags; + + *flagp = WORST; // Tentatively. + + switch (*regparse++) { + case '^': + ret = regnode(BOL); + break; + case '$': + ret = regnode(EOL); + break; + case '.': + ret = regnode(ANY); + *flagp |= HASWIDTH | SIMPLE; + break; + case '[': { + int rxpclass; + int rxpclassend; + + if (*regparse == '^') { // Complement of range. + ret = regnode(ANYBUT); + regparse++; + } else + ret = regnode(ANYOF); + if (*regparse == ']' || *regparse == '-') + regc(*regparse++); + while (*regparse != '\0' && *regparse != ']') { + if (*regparse == '-') { + regparse++; + if (*regparse == ']' || *regparse == '\0') + regc('-'); + else { + rxpclass = UCHARAT(regparse - 2) + 1; + rxpclassend = UCHARAT(regparse); + if (rxpclass > rxpclassend + 1) { + // RAISE Error, SYM(RegularExpression), SYM(Invalid_Range), + printf("RegularExpression::compile(): Invalid range in [].\n"); + return nullptr; + } + for (; rxpclass <= rxpclassend; rxpclass++) + regc(static_cast(rxpclass)); + regparse++; + } + } else + regc(*regparse++); + } + regc('\0'); + if (*regparse != ']') { + // RAISE Error, SYM(RegularExpression), SYM(Unmatched_Bracket), + printf("RegularExpression::compile(): Unmatched [].\n"); + return nullptr; + } + regparse++; + *flagp |= HASWIDTH | SIMPLE; + } break; + case '(': + ret = reg(1, &flags); + if (ret == nullptr) + return (nullptr); + *flagp |= flags & (HASWIDTH | SPSTART); + break; + case '\0': + case '|': + case ')': + // RAISE Error, SYM(RegularExpression), SYM(Internal_Error), + printf("RegularExpression::compile(): Internal error.\n"); // Never here + return nullptr; + case '?': + case '+': + case '*': + // RAISE Error, SYM(RegularExpression), SYM(No_Operand), + printf("RegularExpression::compile(): ?+* follows nothing.\n"); + return nullptr; + case '\\': + if (*regparse == '\0') { + // RAISE Error, SYM(RegularExpression), SYM(Trailing_Backslash), + printf("RegularExpression::compile(): Trailing backslash.\n"); + return nullptr; + } + ret = regnode(EXACTLY); + regc(*regparse++); + regc('\0'); + *flagp |= HASWIDTH | SIMPLE; + break; + default: { + int len; + char ender; + + regparse--; + len = int(strcspn(regparse, META)); + if (len <= 0) { + // RAISE Error, SYM(RegularExpression), SYM(Internal_Error), + printf("RegularExpression::compile(): Internal error.\n"); + return nullptr; + } + ender = *(regparse + len); + if (len > 1 && ISMULT(ender)) + len--; // Back off clear of ?+* operand. + *flagp |= HASWIDTH; + if (len == 1) + *flagp |= SIMPLE; + ret = regnode(EXACTLY); + while (len > 0) { + regc(*regparse++); + len--; + } + regc('\0'); + } break; + } + return (ret); +} + +/* + - regnode - emit a node + Location. + */ +char* RegExpCompile::regnode(char op) +{ + char* ret; + char* ptr; + + ret = regcode; + if (ret == regdummyptr) { + regsize += 3; + return (ret); + } + + ptr = ret; + *ptr++ = op; + *ptr++ = '\0'; // Null "next" pointer. + *ptr++ = '\0'; + regcode = ptr; + + return (ret); +} + +/* + - regc - emit (if appropriate) a byte of code + */ +void RegExpCompile::regc(char b) +{ + if (regcode != regdummyptr) + *regcode++ = b; + else + regsize++; +} + +/* + - reginsert - insert an operator in front of already-emitted operand + * + * Means relocating the operand. + */ +void RegExpCompile::reginsert(char op, char* opnd) +{ + char* src; + char* dst; + char* place; + + if (regcode == regdummyptr) { + regsize += 3; + return; + } + + src = regcode; + regcode += 3; + dst = regcode; + while (src > opnd) + *--dst = *--src; + + place = opnd; // Op node, where operand used to be. + *place++ = op; + *place++ = '\0'; + *place = '\0'; +} + +/* + - regtail - set the next-pointer at the end of a node chain + */ +void RegExpCompile::regtail(char* p, const char* val) +{ + char* scan; + char* temp; + int offset; + + if (p == regdummyptr) + return; + + // Find last node. + scan = p; + for (;;) { + temp = regnext(scan); + if (temp == nullptr) + break; + scan = temp; + } + + if (OP(scan) == BACK) + offset = int(scan - val); + else + offset = int(val - scan); + *(scan + 1) = static_cast((offset >> 8) & 0377); + *(scan + 2) = static_cast(offset & 0377); +} + +/* + - regoptail - regtail on operand of first argument; nop if operandless + */ +void RegExpCompile::regoptail(char* p, const char* val) +{ + // "Operandless" and "op != BRANCH" are synonymous in practice. + if (p == nullptr || p == regdummyptr || OP(p) != BRANCH) + return; + regtail(OPERAND(p), val); +} + +//////////////////////////////////////////////////////////////////////// +// +// find and friends +// +//////////////////////////////////////////////////////////////////////// + +/* + * Utility class for RegularExpression::find(). + */ +class RegExpFind +{ +public: + const char* reginput; // String-input pointer. + const char* regbol; // Beginning of input, for ^ check. + const char** regstartp; // Pointer to startp array. + const char** regendp; // Ditto for endp. + + int regtry(const char*, const char**, const char**, const char*); + int regmatch(const char*); + int regrepeat(const char*); +}; + +// find -- Matches the regular expression to the given string. +// Returns true if found, and sets start and end indexes accordingly. +bool RegularExpression::find(char const* string, + RegularExpressionMatch& rmatch) const +{ + const char* s; + + rmatch.clear(); + rmatch.searchstring = string; + + if (!this->program) { + return false; + } + + // Check validity of program. + if (UCHARAT(this->program) != MAGIC) { + // RAISE Error, SYM(RegularExpression), SYM(Internal_Error), + printf( + "RegularExpression::find(): Compiled regular expression corrupted.\n"); + return false; + } + + // If there is a "must appear" string, look for it. + if (this->regmust != nullptr) { + s = string; + while ((s = strchr(s, this->regmust[0])) != nullptr) { + if (strncmp(s, this->regmust, this->regmlen) == 0) + break; // Found it. + s++; + } + if (s == nullptr) // Not present. + return false; + } + + RegExpFind regFind; + + // Mark beginning of line for ^ . + regFind.regbol = string; + + // Simplest case: anchored match need be tried only once. + if (this->reganch) + return ( + regFind.regtry(string, rmatch.startp, rmatch.endp, this->program) != 0); + + // Messy cases: unanchored match. + s = string; + if (this->regstart != '\0') + // We know what char it must start with. + while ((s = strchr(s, this->regstart)) != nullptr) { + if (regFind.regtry(s, rmatch.startp, rmatch.endp, this->program)) + return true; + s++; + } + else + // We don't -- general case. + do { + if (regFind.regtry(s, rmatch.startp, rmatch.endp, this->program)) + return true; + } while (*s++ != '\0'); + + // Failure. + return false; +} + +/* + - regtry - try match at specific point + 0 failure, 1 success + */ +int RegExpFind::regtry(const char* string, const char** start, + const char** end, const char* prog) +{ + int i; + const char** sp1; + const char** ep; + + reginput = string; + regstartp = start; + regendp = end; + + sp1 = start; + ep = end; + for (i = RegularExpressionMatch::NSUBEXP; i > 0; i--) { + *sp1++ = nullptr; + *ep++ = nullptr; + } + if (regmatch(prog + 1)) { + start[0] = string; + end[0] = reginput; + return (1); + } else + return (0); +} + +/* + - regmatch - main matching routine + * + * Conceptually the strategy is simple: check to see whether the current + * node matches, call self recursively to see whether the rest matches, + * and then act accordingly. In practice we make some effort to avoid + * recursion, in particular by going through "ordinary" nodes (that don't + * need to know whether the rest of the match failed) by a loop instead of + * by recursion. + * 0 failure, 1 success + */ +int RegExpFind::regmatch(const char* prog) +{ + const char* scan; // Current node. + const char* next; // Next node. + + scan = prog; + + while (scan != nullptr) { + + next = regnext(scan); + + switch (OP(scan)) { + case BOL: + if (reginput != regbol) + return (0); + break; + case EOL: + if (*reginput != '\0') + return (0); + break; + case ANY: + if (*reginput == '\0') + return (0); + reginput++; + break; + case EXACTLY: { + size_t len; + const char* opnd; + + opnd = OPERAND(scan); + // Inline the first character, for speed. + if (*opnd != *reginput) + return (0); + len = strlen(opnd); + if (len > 1 && strncmp(opnd, reginput, len) != 0) + return (0); + reginput += len; + } break; + case ANYOF: + if (*reginput == '\0' || strchr(OPERAND(scan), *reginput) == nullptr) + return (0); + reginput++; + break; + case ANYBUT: + if (*reginput == '\0' || strchr(OPERAND(scan), *reginput) != nullptr) + return (0); + reginput++; + break; + case NOTHING: + break; + case BACK: + break; + case OPEN + 1: + case OPEN + 2: + case OPEN + 3: + case OPEN + 4: + case OPEN + 5: + case OPEN + 6: + case OPEN + 7: + case OPEN + 8: + case OPEN + 9: { + int no; + const char* save; + + no = OP(scan) - OPEN; + save = reginput; + + if (regmatch(next)) { + + // + // Don't set startp if some later invocation of the + // same parentheses already has. + // + if (regstartp[no] == nullptr) + regstartp[no] = save; + return (1); + } else + return (0); + } + // break; + case CLOSE + 1: + case CLOSE + 2: + case CLOSE + 3: + case CLOSE + 4: + case CLOSE + 5: + case CLOSE + 6: + case CLOSE + 7: + case CLOSE + 8: + case CLOSE + 9: { + int no; + const char* save; + + no = OP(scan) - CLOSE; + save = reginput; + + if (regmatch(next)) { + + // + // Don't set endp if some later invocation of the + // same parentheses already has. + // + if (regendp[no] == nullptr) + regendp[no] = save; + return (1); + } else + return (0); + } + // break; + case BRANCH: { + + const char* save; + + if (OP(next) != BRANCH) // No choice. + next = OPERAND(scan); // Avoid recursion. + else { + do { + save = reginput; + if (regmatch(OPERAND(scan))) + return (1); + reginput = save; + scan = regnext(scan); + } while (scan != nullptr && OP(scan) == BRANCH); + return (0); + // NOTREACHED + } + } break; + case STAR: + case PLUS: { + char nextch; + int no; + const char* save; + int min_no; + + // + // Lookahead to avoid useless match attempts when we know + // what character comes next. + // + nextch = '\0'; + if (OP(next) == EXACTLY) + nextch = *OPERAND(next); + min_no = (OP(scan) == STAR) ? 0 : 1; + save = reginput; + no = regrepeat(OPERAND(scan)); + while (no >= min_no) { + // If it could work, try it. + if (nextch == '\0' || *reginput == nextch) + if (regmatch(next)) + return (1); + // Couldn't or didn't -- back up. + no--; + reginput = save + no; + } + return (0); + } + // break; + case END: + return (1); // Success! + + default: + // RAISE Error, SYM(RegularExpression), SYM(Internal_Error), + printf( + "RegularExpression::find(): Internal error -- memory corrupted.\n"); + return 0; + } + scan = next; + } + + // + // We get here only if there's trouble -- normally "case END" is the + // terminating point. + // + // RAISE Error, SYM(RegularExpression), SYM(Internal_Error), + printf("RegularExpression::find(): Internal error -- corrupted pointers.\n"); + return (0); +} + +/* + - regrepeat - repeatedly match something simple, report how many + */ +int RegExpFind::regrepeat(const char* p) +{ + int count = 0; + const char* scan; + const char* opnd; + + scan = reginput; + opnd = OPERAND(p); + switch (OP(p)) { + case ANY: + count = int(strlen(scan)); + scan += count; + break; + case EXACTLY: + while (*opnd == *scan) { + count++; + scan++; + } + break; + case ANYOF: + while (*scan != '\0' && strchr(opnd, *scan) != nullptr) { + count++; + scan++; + } + break; + case ANYBUT: + while (*scan != '\0' && strchr(opnd, *scan) == nullptr) { + count++; + scan++; + } + break; + default: // Oh dear. Called inappropriately. + // RAISE Error, SYM(RegularExpression), SYM(Internal_Error), + printf("cm RegularExpression::find(): Internal error.\n"); + return 0; + } + reginput = scan; + return (count); +} + +/* + - regnext - dig the "next" pointer out of a node + */ +static const char* regnext(const char* p) +{ + int offset; + + if (p == regdummyptr) + return (nullptr); + + offset = NEXT(p); + if (offset == 0) + return (nullptr); + + if (OP(p) == BACK) + return (p - offset); + else + return (p + offset); +} + +static char* regnext(char* p) +{ + int offset; + + if (p == regdummyptr) + return (nullptr); + + offset = NEXT(p); + if (offset == 0) + return (nullptr); + + if (OP(p) == BACK) + return (p - offset); + else + return (p + offset); +} + +} // namespace KWSYS_NAMESPACE diff --git a/test/API/driver/kwsys/RegularExpression.hxx.in b/test/API/driver/kwsys/RegularExpression.hxx.in new file mode 100644 index 00000000000..0c2366b8421 --- /dev/null +++ b/test/API/driver/kwsys/RegularExpression.hxx.in @@ -0,0 +1,562 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +// Original Copyright notice: +// Copyright (C) 1991 Texas Instruments Incorporated. +// +// Permission is granted to any individual or institution to use, copy, modify, +// and distribute this software, provided that this complete copyright and +// permission notice is maintained, intact, in all copies and supporting +// documentation. +// +// Texas Instruments Incorporated provides this software "as is" without +// express or implied warranty. +// +// Created: MNF 06/13/89 Initial Design and Implementation +// Updated: LGO 08/09/89 Inherit from Generic +// Updated: MBN 09/07/89 Added conditional exception handling +// Updated: MBN 12/15/89 Sprinkled "const" qualifiers all over the place! +// Updated: DLS 03/22/91 New lite version +// + +#ifndef @KWSYS_NAMESPACE@_RegularExpression_hxx +#define @KWSYS_NAMESPACE@_RegularExpression_hxx + +#include <@KWSYS_NAMESPACE@/Configure.h> +#include <@KWSYS_NAMESPACE@/Configure.hxx> + +#include + +/* Disable useless Borland warnings. KWSys tries not to force things + on its includers, but there is no choice here. */ +#if defined(__BORLANDC__) +# pragma warn - 8027 /* function not inlined. */ +#endif + +namespace @KWSYS_NAMESPACE@ { + +// Forward declaration +class RegularExpression; + +/** \class RegularExpressionMatch + * \brief Stores the pattern matches of a RegularExpression + */ +class @KWSYS_NAMESPACE@_EXPORT RegularExpressionMatch +{ +public: + RegularExpressionMatch(); + + bool isValid() const; + void clear(); + + std::string::size_type start() const; + std::string::size_type end() const; + std::string::size_type start(int n) const; + std::string::size_type end(int n) const; + std::string match(int n) const; + + enum + { + NSUBEXP = 10 + }; + +private: + friend class RegularExpression; + const char* startp[NSUBEXP]; + const char* endp[NSUBEXP]; + const char* searchstring; +}; + +/** + * \brief Creates an invalid match object + */ +inline RegularExpressionMatch::RegularExpressionMatch() + : startp{} + , endp{} + , searchstring{} +{ +} + +/** + * \brief Returns true if the match pointers are valid + */ +inline bool RegularExpressionMatch::isValid() const +{ + return (this->startp[0] != nullptr); +} + +/** + * \brief Resets to the (invalid) construction state. + */ +inline void RegularExpressionMatch::clear() +{ + startp[0] = nullptr; + endp[0] = nullptr; + searchstring = nullptr; +} + +/** + * \brief Returns the start index of the full match. + */ +inline std::string::size_type RegularExpressionMatch::start() const +{ + return static_cast(this->startp[0] - searchstring); +} + +/** + * \brief Returns the end index of the full match. + */ +inline std::string::size_type RegularExpressionMatch::end() const +{ + return static_cast(this->endp[0] - searchstring); +} + +/** + * \brief Returns the start index of nth submatch. + * start(0) is the start of the full match. + */ +inline std::string::size_type RegularExpressionMatch::start(int n) const +{ + return static_cast(this->startp[n] - + this->searchstring); +} + +/** + * \brief Returns the end index of nth submatch. + * end(0) is the end of the full match. + */ +inline std::string::size_type RegularExpressionMatch::end(int n) const +{ + return static_cast(this->endp[n] - + this->searchstring); +} + +/** + * \brief Returns the nth submatch as a string. + */ +inline std::string RegularExpressionMatch::match(int n) const +{ + if (this->startp[n] == nullptr) { + return std::string(); + } else { + return std::string( + this->startp[n], + static_cast(this->endp[n] - this->startp[n])); + } +} + +/** \class RegularExpression + * \brief Implements pattern matching with regular expressions. + * + * This is the header file for the regular expression class. An object of + * this class contains a regular expression, in a special "compiled" format. + * This compiled format consists of several slots all kept as the objects + * private data. The RegularExpression class provides a convenient way to + * represent regular expressions. It makes it easy to search for the same + * regular expression in many different strings without having to compile a + * string to regular expression format more than necessary. + * + * This class implements pattern matching via regular expressions. + * A regular expression allows a programmer to specify complex + * patterns that can be searched for and matched against the + * character string of a string object. In its simplest form, a + * regular expression is a sequence of characters used to + * search for exact character matches. However, many times the + * exact sequence to be found is not known, or only a match at + * the beginning or end of a string is desired. The RegularExpression regu- + * lar expression class implements regular expression pattern + * matching as is found and implemented in many UNIX commands + * and utilities. + * + * Example: The perl code + * + * $filename =~ m"([a-z]+)\.cc"; + * print $1; + * + * Is written as follows in C++ + * + * RegularExpression re("([a-z]+)\\.cc"); + * re.find(filename); + * cerr << re.match(1); + * + * + * The regular expression class provides a convenient mechanism + * for specifying and manipulating regular expressions. The + * regular expression object allows specification of such pat- + * terns by using the following regular expression metacharac- + * ters: + * + * ^ Matches at beginning of a line + * + * $ Matches at end of a line + * + * . Matches any single character + * + * [ ] Matches any character(s) inside the brackets + * + * [^ ] Matches any character(s) not inside the brackets + * + * - Matches any character in range on either side of a dash + * + * * Matches preceding pattern zero or more times + * + * + Matches preceding pattern one or more times + * + * ? Matches preceding pattern zero or once only + * + * () Saves a matched expression and uses it in a later match + * + * Note that more than one of these metacharacters can be used + * in a single regular expression in order to create complex + * search patterns. For example, the pattern [^ab1-9] says to + * match any character sequence that does not begin with the + * characters "ab" followed by numbers in the series one + * through nine. + * + * There are three constructors for RegularExpression. One just creates an + * empty RegularExpression object. Another creates a RegularExpression + * object and initializes it with a regular expression that is given in the + * form of a char*. The third takes a reference to a RegularExpression + * object as an argument and creates an object initialized with the + * information from the given RegularExpression object. + * + * The find member function finds the first occurrence of the regular + * expression of that object in the string given to find as an argument. Find + * returns a boolean, and if true, mutates the private data appropriately. + * Find sets pointers to the beginning and end of the thing last found, they + * are pointers into the actual string that was searched. The start and end + * member functions return indices into the searched string that correspond + * to the beginning and end pointers respectively. The compile member + * function takes a char* and puts the compiled version of the char* argument + * into the object's private data fields. The == and != operators only check + * the to see if the compiled regular expression is the same, and the + * deep_equal functions also checks to see if the start and end pointers are + * the same. The is_valid function returns false if program is set to + * nullptr, (i.e. there is no valid compiled expression). The set_invalid + * function sets the program to nullptr (Warning: this deletes the compiled + * expression). The following examples may help clarify regular expression + * usage: + * + * * The regular expression "^hello" matches a "hello" only at the + * beginning of a line. It would match "hello there" but not "hi, + * hello there". + * + * * The regular expression "long$" matches a "long" only at the end + * of a line. It would match "so long\0", but not "long ago". + * + * * The regular expression "t..t..g" will match anything that has a + * "t" then any two characters, another "t", any two characters and + * then a "g". It will match "testing", or "test again" but would + * not match "toasting" + * + * * The regular expression "[1-9ab]" matches any number one through + * nine, and the characters "a" and "b". It would match "hello 1" + * or "begin", but would not match "no-match". + * + * * The regular expression "[^1-9ab]" matches any character that is + * not a number one through nine, or an "a" or "b". It would NOT + * match "hello 1" or "begin", but would match "no-match". + * + * * The regular expression "br* " matches something that begins with + * a "b", is followed by zero or more "r"s, and ends in a space. It + * would match "brrrrr ", and "b ", but would not match "brrh ". + * + * * The regular expression "br+ " matches something that begins with + * a "b", is followed by one or more "r"s, and ends in a space. It + * would match "brrrrr ", and "br ", but would not match "b " or + * "brrh ". + * + * * The regular expression "br? " matches something that begins with + * a "b", is followed by zero or one "r"s, and ends in a space. It + * would match "br ", and "b ", but would not match "brrrr " or + * "brrh ". + * + * * The regular expression "(..p)b" matches something ending with pb + * and beginning with whatever the two characters before the first p + * encountered in the line were. It would find "repb" in "rep drepa + * qrepb". The regular expression "(..p)a" would find "repa qrepb" + * in "rep drepa qrepb" + * + * * The regular expression "d(..p)" matches something ending with p, + * beginning with d, and having two characters in between that are + * the same as the two characters before the first p encountered in + * the line. It would match "drepa qrepb" in "rep drepa qrepb". + * + * All methods of RegularExpression can be called simultaneously from + * different threads but only if each invocation uses an own instance of + * RegularExpression. + */ +class @KWSYS_NAMESPACE@_EXPORT RegularExpression +{ +public: + /** + * Instantiate RegularExpression with program=nullptr. + */ + inline RegularExpression(); + + /** + * Instantiate RegularExpression with compiled char*. + */ + inline RegularExpression(char const*); + + /** + * Instantiate RegularExpression as a copy of another regular expression. + */ + RegularExpression(RegularExpression const&); + + /** + * Instantiate RegularExpression with compiled string. + */ + inline RegularExpression(std::string const&); + + /** + * Destructor. + */ + inline ~RegularExpression(); + + /** + * Compile a regular expression into internal code + * for later pattern matching. + */ + bool compile(char const*); + + /** + * Compile a regular expression into internal code + * for later pattern matching. + */ + inline bool compile(std::string const&); + + /** + * Matches the regular expression to the given string. + * Returns true if found, and sets start and end indexes + * in the RegularExpressionMatch instance accordingly. + * + * This method is thread safe when called with different + * RegularExpressionMatch instances. + */ + bool find(char const*, RegularExpressionMatch&) const; + + /** + * Matches the regular expression to the given string. + * Returns true if found, and sets start and end indexes accordingly. + */ + inline bool find(char const*); + + /** + * Matches the regular expression to the given std string. + * Returns true if found, and sets start and end indexes accordingly. + */ + inline bool find(std::string const&); + + /** + * Match indices + */ + inline RegularExpressionMatch const& regMatch() const; + inline std::string::size_type start() const; + inline std::string::size_type end() const; + inline std::string::size_type start(int n) const; + inline std::string::size_type end(int n) const; + + /** + * Match strings + */ + inline std::string match(int n) const; + + /** + * Copy the given regular expression. + */ + RegularExpression& operator=(const RegularExpression& rxp); + + /** + * Returns true if two regular expressions have the same + * compiled program for pattern matching. + */ + bool operator==(RegularExpression const&) const; + + /** + * Returns true if two regular expressions have different + * compiled program for pattern matching. + */ + inline bool operator!=(RegularExpression const&) const; + + /** + * Returns true if have the same compiled regular expressions + * and the same start and end pointers. + */ + bool deep_equal(RegularExpression const&) const; + + /** + * True if the compiled regexp is valid. + */ + inline bool is_valid() const; + + /** + * Marks the regular expression as invalid. + */ + inline void set_invalid(); + +private: + RegularExpressionMatch regmatch; + char regstart; // Internal use only + char reganch; // Internal use only + const char* regmust; // Internal use only + std::string::size_type regmlen; // Internal use only + char* program; + int progsize; +}; + +/** + * Create an empty regular expression. + */ +inline RegularExpression::RegularExpression() + : regstart{} + , reganch{} + , regmust{} + , program{ nullptr } + , progsize{} +{ +} + +/** + * Creates a regular expression from string s, and + * compiles s. + */ +inline RegularExpression::RegularExpression(const char* s) + : regstart{} + , reganch{} + , regmust{} + , program{ nullptr } + , progsize{} +{ + if (s) { + this->compile(s); + } +} + +/** + * Creates a regular expression from string s, and + * compiles s. + */ +inline RegularExpression::RegularExpression(const std::string& s) + : regstart{} + , reganch{} + , regmust{} + , program{ nullptr } + , progsize{} +{ + this->compile(s); +} + +/** + * Destroys and frees space allocated for the regular expression. + */ +inline RegularExpression::~RegularExpression() +{ + //#ifndef _WIN32 + delete[] this->program; + //#endif +} + +/** + * Compile a regular expression into internal code + * for later pattern matching. + */ +inline bool RegularExpression::compile(std::string const& s) +{ + return this->compile(s.c_str()); +} + +/** + * Matches the regular expression to the given std string. + * Returns true if found, and sets start and end indexes accordingly. + */ +inline bool RegularExpression::find(const char* s) +{ + return this->find(s, this->regmatch); +} + +/** + * Matches the regular expression to the given std string. + * Returns true if found, and sets start and end indexes accordingly. + */ +inline bool RegularExpression::find(std::string const& s) +{ + return this->find(s.c_str()); +} + +/** + * Returns the internal match object + */ +inline RegularExpressionMatch const& RegularExpression::regMatch() const +{ + return this->regmatch; +} + +/** + * Returns the start index of the full match. + */ +inline std::string::size_type RegularExpression::start() const +{ + return regmatch.start(); +} + +/** + * Returns the end index of the full match. + */ +inline std::string::size_type RegularExpression::end() const +{ + return regmatch.end(); +} + +/** + * Return start index of nth submatch. start(0) is the start of the full match. + */ +inline std::string::size_type RegularExpression::start(int n) const +{ + return regmatch.start(n); +} + +/** + * Return end index of nth submatch. end(0) is the end of the full match. + */ +inline std::string::size_type RegularExpression::end(int n) const +{ + return regmatch.end(n); +} + +/** + * Return nth submatch as a string. + */ +inline std::string RegularExpression::match(int n) const +{ + return regmatch.match(n); +} + +/** + * Returns true if two regular expressions have different + * compiled program for pattern matching. + */ +inline bool RegularExpression::operator!=(const RegularExpression& r) const +{ + return (!(*this == r)); +} + +/** + * Returns true if a valid regular expression is compiled + * and ready for pattern matching. + */ +inline bool RegularExpression::is_valid() const +{ + return (this->program != nullptr); +} + +inline void RegularExpression::set_invalid() +{ + //#ifndef _WIN32 + delete[] this->program; + //#endif + this->program = nullptr; +} + +} // namespace @KWSYS_NAMESPACE@ + +#endif diff --git a/test/API/driver/kwsys/SetupForDevelopment.sh b/test/API/driver/kwsys/SetupForDevelopment.sh new file mode 100644 index 00000000000..c3a2b1655bd --- /dev/null +++ b/test/API/driver/kwsys/SetupForDevelopment.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +cd "${BASH_SOURCE%/*}" && +GitSetup/setup-user && echo && +GitSetup/setup-hooks && echo && +GitSetup/setup-aliases && echo && +GitSetup/setup-upstream && echo && +GitSetup/tips + +# Rebase master by default +git config rebase.stat true +git config branch.master.rebase true + +# Disable Gerrit hook explicitly so the commit-msg hook will +# not complain even if some gerrit remotes are still configured. +git config hooks.GerritId false + +# Record the version of this setup so Scripts/pre-commit can check it. +SetupForDevelopment_VERSION=2 +git config hooks.SetupForDevelopment ${SetupForDevelopment_VERSION} diff --git a/test/API/driver/kwsys/SharedForward.h.in b/test/API/driver/kwsys/SharedForward.h.in new file mode 100644 index 00000000000..5716cd4f1e1 --- /dev/null +++ b/test/API/driver/kwsys/SharedForward.h.in @@ -0,0 +1,879 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifndef @KWSYS_NAMESPACE@_SharedForward_h +# define @KWSYS_NAMESPACE@_SharedForward_h + +/* + This header is used to create a forwarding executable sets up the + shared library search path and replaces itself with a real + executable. This is useful when creating installations on UNIX with + shared libraries that will run from any install directory. Typical + usage: + + #if defined(CMAKE_INTDIR) + # define CONFIG_DIR_PRE CMAKE_INTDIR "/" + # define CONFIG_DIR_POST "/" CMAKE_INTDIR + #else + # define CONFIG_DIR_PRE "" + # define CONFIG_DIR_POST "" + #endif + #define @KWSYS_NAMESPACE@_SHARED_FORWARD_DIR_BUILD "/path/to/foo-build/bin" + #define @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_BUILD "." CONFIG_DIR_POST + #define @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_INSTALL "../lib/foo-1.2" + #define @KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_BUILD CONFIG_DIR_PRE "foo-real" + #define @KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_INSTALL + "../lib/foo-1.2/foo-real" + #define @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_COMMAND "--command" + #define @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_PRINT "--print" + #define @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_LDD "--ldd" + #if defined(CMAKE_INTDIR) + # define @KWSYS_NAMESPACE@_SHARED_FORWARD_CONFIG_NAME CMAKE_INTDIR + #endif + #include <@KWSYS_NAMESPACE@/SharedForward.h> + int main(int argc, char** argv) + { + return @KWSYS_NAMESPACE@_shared_forward_to_real(argc, argv); + } + + Specify search and executable paths relative to the forwarding + executable location or as full paths. Include no trailing slash. + In the case of a multi-configuration build, when CMAKE_INTDIR is + defined, the DIR_BUILD setting should point at the directory above + the executable (the one containing the per-configuration + subdirectory specified by CMAKE_INTDIR). Then PATH_BUILD entries + and EXE_BUILD should be specified relative to this location and use + CMAKE_INTDIR as necessary. In the above example imagine appending + the PATH_BUILD or EXE_BUILD setting to the DIR_BUILD setting. The + result should form a valid path with per-configuration subdirectory. + + Additional paths may be specified in the PATH_BUILD and PATH_INSTALL + variables by using comma-separated strings. For example: + + #define @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_BUILD \ + "." CONFIG_DIR_POST, "/path/to/bar-build" CONFIG_DIR_POST + #define @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_INSTALL \ + "../lib/foo-1.2", "../lib/bar-4.5" + + See the comments below for specific explanations of each macro. +*/ + +/* Disable -Wcast-qual warnings since they are too hard to fix in a + cross-platform way. */ +# if defined(__clang__) && defined(__has_warning) +# if __has_warning("-Wcast-qual") +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wcast-qual" +# endif +# endif + +# if defined(__BORLANDC__) && !defined(__cplusplus) +/* Code has no effect; raised by winnt.h in C (not C++) when ignoring an + unused parameter using "(param)" syntax (i.e. no cast to void). */ +# pragma warn - 8019 +# endif + +/* Full path to the directory in which this executable is built. Do + not include a trailing slash. */ +# if !defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_DIR_BUILD) +# error "Must define @KWSYS_NAMESPACE@_SHARED_FORWARD_DIR_BUILD" +# endif +# if !defined(KWSYS_SHARED_FORWARD_DIR_BUILD) +# define KWSYS_SHARED_FORWARD_DIR_BUILD \ + @KWSYS_NAMESPACE@_SHARED_FORWARD_DIR_BUILD +# endif + +/* Library search path for build tree. */ +# if !defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_BUILD) +# error "Must define @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_BUILD" +# endif +# if !defined(KWSYS_SHARED_FORWARD_PATH_BUILD) +# define KWSYS_SHARED_FORWARD_PATH_BUILD \ + @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_BUILD +# endif + +/* Library search path for install tree. */ +# if !defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_INSTALL) +# error "Must define @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_INSTALL" +# endif +# if !defined(KWSYS_SHARED_FORWARD_PATH_INSTALL) +# define KWSYS_SHARED_FORWARD_PATH_INSTALL \ + @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_INSTALL +# endif + +/* The real executable to which to forward in the build tree. */ +# if !defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_BUILD) +# error "Must define @KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_BUILD" +# endif +# if !defined(KWSYS_SHARED_FORWARD_EXE_BUILD) +# define KWSYS_SHARED_FORWARD_EXE_BUILD \ + @KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_BUILD +# endif + +/* The real executable to which to forward in the install tree. */ +# if !defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_INSTALL) +# error "Must define @KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_INSTALL" +# endif +# if !defined(KWSYS_SHARED_FORWARD_EXE_INSTALL) +# define KWSYS_SHARED_FORWARD_EXE_INSTALL \ + @KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_INSTALL +# endif + +/* The configuration name with which this executable was built (Debug/Release). + */ +# if defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_CONFIG_NAME) +# define KWSYS_SHARED_FORWARD_CONFIG_NAME \ + @KWSYS_NAMESPACE@_SHARED_FORWARD_CONFIG_NAME +# else +# undef KWSYS_SHARED_FORWARD_CONFIG_NAME +# endif + +/* Create command line option to replace executable. */ +# if defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_COMMAND) +# if !defined(KWSYS_SHARED_FORWARD_OPTION_COMMAND) +# define KWSYS_SHARED_FORWARD_OPTION_COMMAND \ + @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_COMMAND +# endif +# else +# undef KWSYS_SHARED_FORWARD_OPTION_COMMAND +# endif + +/* Create command line option to print environment setting and exit. */ +# if defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_PRINT) +# if !defined(KWSYS_SHARED_FORWARD_OPTION_PRINT) +# define KWSYS_SHARED_FORWARD_OPTION_PRINT \ + @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_PRINT +# endif +# else +# undef KWSYS_SHARED_FORWARD_OPTION_PRINT +# endif + +/* Create command line option to run ldd or equivalent. */ +# if defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_LDD) +# if !defined(KWSYS_SHARED_FORWARD_OPTION_LDD) +# define KWSYS_SHARED_FORWARD_OPTION_LDD \ + @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_LDD +# endif +# else +# undef KWSYS_SHARED_FORWARD_OPTION_LDD +# endif + +/* Include needed system headers. */ + +# include +# include +# include /* size_t */ +# include +# include +# include + +# if defined(_WIN32) && !defined(__CYGWIN__) +# include + +# include +# include +# define KWSYS_SHARED_FORWARD_ESCAPE_ARGV /* re-escape argv for execvp */ +# else +# include +# include +# endif + +/* Configuration for this platform. */ + +/* The path separator for this platform. */ +# if defined(_WIN32) && !defined(__CYGWIN__) +# define KWSYS_SHARED_FORWARD_PATH_SEP ';' +# define KWSYS_SHARED_FORWARD_PATH_SLASH '\\' +# else +# define KWSYS_SHARED_FORWARD_PATH_SEP ':' +# define KWSYS_SHARED_FORWARD_PATH_SLASH '/' +# endif +static const char kwsys_shared_forward_path_sep[2] = { + KWSYS_SHARED_FORWARD_PATH_SEP, 0 +}; +static const char kwsys_shared_forward_path_slash[2] = { + KWSYS_SHARED_FORWARD_PATH_SLASH, 0 +}; + +/* The maximum length of a file name. */ +# if defined(PATH_MAX) +# define KWSYS_SHARED_FORWARD_MAXPATH PATH_MAX +# elif defined(MAXPATHLEN) +# define KWSYS_SHARED_FORWARD_MAXPATH MAXPATHLEN +# else +# define KWSYS_SHARED_FORWARD_MAXPATH 16384 +# endif + +/* Select the environment variable holding the shared library runtime + search path for this platform and build configuration. Also select + ldd command equivalent. */ + +/* Linux */ +# if defined(__linux) +# define KWSYS_SHARED_FORWARD_LDD "ldd" +# define KWSYS_SHARED_FORWARD_LDD_N 1 +# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY_PATH" + +/* FreeBSD */ +# elif defined(__FreeBSD__) +# define KWSYS_SHARED_FORWARD_LDD "ldd" +# define KWSYS_SHARED_FORWARD_LDD_N 1 +# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY_PATH" + +/* OpenBSD */ +# elif defined(__OpenBSD__) +# define KWSYS_SHARED_FORWARD_LDD "ldd" +# define KWSYS_SHARED_FORWARD_LDD_N 1 +# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY_PATH" + +/* OS X */ +# elif defined(__APPLE__) +# define KWSYS_SHARED_FORWARD_LDD "otool", "-L" +# define KWSYS_SHARED_FORWARD_LDD_N 2 +# define KWSYS_SHARED_FORWARD_LDPATH "DYLD_LIBRARY_PATH" + +/* AIX */ +# elif defined(_AIX) +# define KWSYS_SHARED_FORWARD_LDD "dump", "-H" +# define KWSYS_SHARED_FORWARD_LDD_N 2 +# define KWSYS_SHARED_FORWARD_LDPATH "LIBPATH" + +/* SUN */ +# elif defined(__sun) +# define KWSYS_SHARED_FORWARD_LDD "ldd" +# define KWSYS_SHARED_FORWARD_LDD_N 1 +# include +# if defined(_ILP32) +# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY_PATH" +# elif defined(_LP64) +# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY_PATH_64" +# endif + +/* HP-UX */ +# elif defined(__hpux) +# define KWSYS_SHARED_FORWARD_LDD "chatr" +# define KWSYS_SHARED_FORWARD_LDD_N 1 +# if defined(__LP64__) +# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY_PATH" +# else +# define KWSYS_SHARED_FORWARD_LDPATH "SHLIB_PATH" +# endif + +/* SGI MIPS */ +# elif defined(__sgi) && defined(_MIPS_SIM) +# define KWSYS_SHARED_FORWARD_LDD "ldd" +# define KWSYS_SHARED_FORWARD_LDD_N 1 +# if _MIPS_SIM == _ABIO32 +# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY_PATH" +# elif _MIPS_SIM == _ABIN32 +# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARYN32_PATH" +# elif _MIPS_SIM == _ABI64 +# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY64_PATH" +# endif + +/* Cygwin */ +# elif defined(__CYGWIN__) +# define KWSYS_SHARED_FORWARD_LDD \ + "cygcheck" /* TODO: cygwin 1.7 has ldd \ + */ +# define KWSYS_SHARED_FORWARD_LDD_N 1 +# define KWSYS_SHARED_FORWARD_LDPATH "PATH" + +/* Windows */ +# elif defined(_WIN32) +# define KWSYS_SHARED_FORWARD_LDPATH "PATH" + +/* Guess on this unknown system. */ +# else +# define KWSYS_SHARED_FORWARD_LDD "ldd" +# define KWSYS_SHARED_FORWARD_LDD_N 1 +# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY_PATH" +# endif + +# ifdef KWSYS_SHARED_FORWARD_ESCAPE_ARGV +typedef struct kwsys_sf_arg_info_s +{ + const char* arg; + int size; + int quote; +} kwsys_sf_arg_info; + +static kwsys_sf_arg_info kwsys_sf_get_arg_info(const char* in) +{ + /* Initialize information. */ + kwsys_sf_arg_info info; + + /* String iterator. */ + const char* c; + + /* Keep track of how many backslashes have been encountered in a row. */ + int windows_backslashes = 0; + + /* Start with the length of the original argument, plus one for + either a terminating null or a separating space. */ + info.arg = in; + info.size = (int)strlen(in) + 1; + info.quote = 0; + + /* Scan the string for characters that require escaping or quoting. */ + for (c = in; *c; ++c) { + /* Check whether this character needs quotes. */ + if (strchr(" \t?'#&<>|^", *c)) { + info.quote = 1; + } + + /* On Windows only backslashes and double-quotes need escaping. */ + if (*c == '\\') { + /* Found a backslash. It may need to be escaped later. */ + ++windows_backslashes; + } else if (*c == '"') { + /* Found a double-quote. We need to escape it and all + immediately preceding backslashes. */ + info.size += windows_backslashes + 1; + windows_backslashes = 0; + } else { + /* Found another character. This eliminates the possibility + that any immediately preceding backslashes will be + escaped. */ + windows_backslashes = 0; + } + } + + /* Check whether the argument needs surrounding quotes. */ + if (info.quote) { + /* Surrounding quotes are needed. Allocate space for them. */ + info.size += 2; + + /* We must escape all ending backslashes when quoting on windows. */ + info.size += windows_backslashes; + } + + return info; +} + +static char* kwsys_sf_get_arg(kwsys_sf_arg_info info, char* out) +{ + /* String iterator. */ + const char* c; + + /* Keep track of how many backslashes have been encountered in a row. */ + int windows_backslashes = 0; + + /* Whether the argument must be quoted. */ + if (info.quote) { + /* Add the opening quote for this argument. */ + *out++ = '"'; + } + + /* Scan the string for characters that require escaping or quoting. */ + for (c = info.arg; *c; ++c) { + /* On Windows only backslashes and double-quotes need escaping. */ + if (*c == '\\') { + /* Found a backslash. It may need to be escaped later. */ + ++windows_backslashes; + } else if (*c == '"') { + /* Found a double-quote. Escape all immediately preceding + backslashes. */ + while (windows_backslashes > 0) { + --windows_backslashes; + *out++ = '\\'; + } + + /* Add the backslash to escape the double-quote. */ + *out++ = '\\'; + } else { + /* We encountered a normal character. This eliminates any + escaping needed for preceding backslashes. */ + windows_backslashes = 0; + } + + /* Store this character. */ + *out++ = *c; + } + + if (info.quote) { + /* Add enough backslashes to escape any trailing ones. */ + while (windows_backslashes > 0) { + --windows_backslashes; + *out++ = '\\'; + } + + /* Add the closing quote for this argument. */ + *out++ = '"'; + } + + /* Store a terminating null without incrementing. */ + *out = 0; + + return out; +} +# endif + +/* Function to convert a logical or relative path to a physical full path. */ +static int kwsys_shared_forward_realpath(const char* in_path, char* out_path) +{ +# if defined(_WIN32) && !defined(__CYGWIN__) + /* Implementation for Windows. */ + DWORD n = + GetFullPathNameA(in_path, KWSYS_SHARED_FORWARD_MAXPATH, out_path, 0); + return n > 0 && n <= KWSYS_SHARED_FORWARD_MAXPATH; +# else + /* Implementation for UNIX. */ + return realpath(in_path, out_path) != 0; +# endif +} + +static int kwsys_shared_forward_samepath(const char* file1, const char* file2) +{ +# if defined(_WIN32) + int result = 0; + HANDLE h1 = CreateFileA(file1, GENERIC_READ, FILE_SHARE_READ, NULL, + OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL); + HANDLE h2 = CreateFileA(file2, GENERIC_READ, FILE_SHARE_READ, NULL, + OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL); + if (h1 != INVALID_HANDLE_VALUE && h2 != INVALID_HANDLE_VALUE) { + BY_HANDLE_FILE_INFORMATION fi1; + BY_HANDLE_FILE_INFORMATION fi2; + GetFileInformationByHandle(h1, &fi1); + GetFileInformationByHandle(h2, &fi2); + result = (fi1.dwVolumeSerialNumber == fi2.dwVolumeSerialNumber && + fi1.nFileIndexHigh == fi2.nFileIndexHigh && + fi1.nFileIndexLow == fi2.nFileIndexLow); + } + CloseHandle(h1); + CloseHandle(h2); + return result; +# else + struct stat fs1, fs2; + return (stat(file1, &fs1) == 0 && stat(file2, &fs2) == 0 && + memcmp(&fs2.st_dev, &fs1.st_dev, sizeof(fs1.st_dev)) == 0 && + memcmp(&fs2.st_ino, &fs1.st_ino, sizeof(fs1.st_ino)) == 0 && + fs2.st_size == fs1.st_size); +# endif +} + +/* Function to report a system error message. */ +static void kwsys_shared_forward_strerror(char* message) +{ +# if defined(_WIN32) && !defined(__CYGWIN__) + /* Implementation for Windows. */ + DWORD original = GetLastError(); + DWORD length = + FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, + 0, original, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), + message, KWSYS_SHARED_FORWARD_MAXPATH, 0); + if (length < 1 || length > KWSYS_SHARED_FORWARD_MAXPATH) { + /* FormatMessage failed. Use a default message. */ + _snprintf(message, KWSYS_SHARED_FORWARD_MAXPATH, + "Error 0x%X (FormatMessage failed with error 0x%X)", original, + GetLastError()); + } +# else + /* Implementation for UNIX. */ + strcpy(message, strerror(errno)); +# endif +} + +/* Functions to execute a child process. */ +static void kwsys_shared_forward_execvp(const char* cmd, + char const* const* argv) +{ +# ifdef KWSYS_SHARED_FORWARD_ESCAPE_ARGV + /* Count the number of arguments. */ + int argc = 0; + { + char const* const* argvc; + for (argvc = argv; *argvc; ++argvc, ++argc) { + } + } + + /* Create the escaped arguments. */ + { + char** nargv = (char**)malloc((argc + 1) * sizeof(char*)); + int i; + for (i = 0; i < argc; ++i) { + kwsys_sf_arg_info info = kwsys_sf_get_arg_info(argv[i]); + nargv[i] = (char*)malloc(info.size); + kwsys_sf_get_arg(info, nargv[i]); + } + nargv[argc] = 0; + + /* Replace the command line to be used. */ + argv = (char const* const*)nargv; + } +# endif + +/* Invoke the child process. */ +# if defined(_MSC_VER) + _execvp(cmd, argv); +# elif defined(__MINGW32__) && !defined(__MINGW64__) + execvp(cmd, argv); +# else + execvp(cmd, (char* const*)argv); +# endif +} + +/* Function to get the directory containing the given file or directory. */ +static void kwsys_shared_forward_dirname(const char* begin, char* result) +{ + /* Find the location of the last slash. */ + int last_slash_index = -1; + const char* end = begin + strlen(begin); + for (; begin <= end && last_slash_index < 0; --end) { + if (*end == '/' || *end == '\\') { + last_slash_index = (int)(end - begin); + } + } + + /* Handle each case of the index of the last slash. */ + if (last_slash_index < 0) { + /* No slashes. */ + strcpy(result, "."); + } else if (last_slash_index == 0) { + /* Only one leading slash. */ + strcpy(result, kwsys_shared_forward_path_slash); + } +# if defined(_WIN32) + else if (last_slash_index == 2 && begin[1] == ':') { + /* Only one leading drive letter and slash. */ + strncpy(result, begin, (size_t)last_slash_index); + result[last_slash_index] = KWSYS_SHARED_FORWARD_PATH_SLASH; + result[last_slash_index + 1] = 0; + } +# endif + else { + /* A non-leading slash. */ + strncpy(result, begin, (size_t)last_slash_index); + result[last_slash_index] = 0; + } +} + +/* Function to check if a file exists and is executable. */ +static int kwsys_shared_forward_is_executable(const char* f) +{ +# if defined(_MSC_VER) +# define KWSYS_SHARED_FORWARD_ACCESS _access +# else +# define KWSYS_SHARED_FORWARD_ACCESS access +# endif +# if defined(X_OK) +# define KWSYS_SHARED_FORWARD_ACCESS_OK X_OK +# else +# define KWSYS_SHARED_FORWARD_ACCESS_OK 04 +# endif + if (KWSYS_SHARED_FORWARD_ACCESS(f, KWSYS_SHARED_FORWARD_ACCESS_OK) == 0) { + return 1; + } else { + return 0; + } +} + +/* Function to locate the executable currently running. */ +static int kwsys_shared_forward_self_path(const char* argv0, char* result) +{ + /* Check whether argv0 has a slash. */ + int has_slash = 0; + const char* p = argv0; + for (; *p && !has_slash; ++p) { + if (*p == '/' || *p == '\\') { + has_slash = 1; + } + } + + if (has_slash) { + /* There is a slash. Use the dirname of the given location. */ + kwsys_shared_forward_dirname(argv0, result); + return 1; + } else { + /* There is no slash. Search the PATH for the executable. */ + const char* path = getenv("PATH"); + const char* begin = path; + const char* end = begin + (begin ? strlen(begin) : 0); + const char* first = begin; + while (first != end) { + /* Store the end of this path entry. */ + const char* last; + + /* Skip all path separators. */ + for (; *first && *first == KWSYS_SHARED_FORWARD_PATH_SEP; ++first) + ; + + /* Find the next separator. */ + for (last = first; *last && *last != KWSYS_SHARED_FORWARD_PATH_SEP; + ++last) + ; + + /* If we got a non-empty directory, look for the executable there. */ + if (first < last) { + /* Determine the length without trailing slash. */ + size_t length = (size_t)(last - first); + if (*(last - 1) == '/' || *(last - 1) == '\\') { + --length; + } + + /* Construct the name of the executable in this location. */ + strncpy(result, first, length); + result[length] = KWSYS_SHARED_FORWARD_PATH_SLASH; + strcpy(result + (length) + 1, argv0); + + /* Check if it exists and is executable. */ + if (kwsys_shared_forward_is_executable(result)) { + /* Found it. */ + result[length] = 0; + return 1; + } + } + + /* Move to the next directory in the path. */ + first = last; + } + } + + /* We could not find the executable. */ + return 0; +} + +/* Function to convert a specified path to a full path. If it is not + already full, it is taken relative to the self path. */ +static int kwsys_shared_forward_fullpath(const char* self_path, + const char* in_path, char* result, + const char* desc) +{ + /* Check the specified path type. */ + if (in_path[0] == '/') { + /* Already a full path. */ + strcpy(result, in_path); + } +# if defined(_WIN32) + else if (in_path[0] && in_path[1] == ':') { + /* Already a full path. */ + strcpy(result, in_path); + } +# endif + else { + /* Relative to self path. */ + char temp_path[KWSYS_SHARED_FORWARD_MAXPATH]; + strcpy(temp_path, self_path); + strcat(temp_path, kwsys_shared_forward_path_slash); + strcat(temp_path, in_path); + if (!kwsys_shared_forward_realpath(temp_path, result)) { + if (desc) { + char msgbuf[KWSYS_SHARED_FORWARD_MAXPATH]; + kwsys_shared_forward_strerror(msgbuf); + fprintf(stderr, "Error converting %s \"%s\" to real path: %s\n", desc, + temp_path, msgbuf); + } + return 0; + } + } + return 1; +} + +/* Function to compute the library search path and executable name + based on the self path. */ +static int kwsys_shared_forward_get_settings(const char* self_path, + char* ldpath, char* exe) +{ + /* Possible search paths. */ + static const char* search_path_build[] = { KWSYS_SHARED_FORWARD_PATH_BUILD, + 0 }; + static const char* search_path_install[] = { + KWSYS_SHARED_FORWARD_PATH_INSTALL, 0 + }; + + /* Chosen paths. */ + const char** search_path; + const char* exe_path; + +/* Get the real name of the build and self paths. */ +# if defined(KWSYS_SHARED_FORWARD_CONFIG_NAME) + char build_path[] = + KWSYS_SHARED_FORWARD_DIR_BUILD "/" KWSYS_SHARED_FORWARD_CONFIG_NAME; + char self_path_logical[KWSYS_SHARED_FORWARD_MAXPATH]; +# else + char build_path[] = KWSYS_SHARED_FORWARD_DIR_BUILD; + const char* self_path_logical = self_path; +# endif + char build_path_real[KWSYS_SHARED_FORWARD_MAXPATH]; + char self_path_real[KWSYS_SHARED_FORWARD_MAXPATH]; + if (!kwsys_shared_forward_realpath(self_path, self_path_real)) { + char msgbuf[KWSYS_SHARED_FORWARD_MAXPATH]; + kwsys_shared_forward_strerror(msgbuf); + fprintf(stderr, "Error converting self path \"%s\" to real path: %s\n", + self_path, msgbuf); + return 0; + } + + /* Check whether we are running in the build tree or an install tree. */ + if (kwsys_shared_forward_realpath(build_path, build_path_real) && + kwsys_shared_forward_samepath(self_path_real, build_path_real)) { + /* Running in build tree. Use the build path and exe. */ + search_path = search_path_build; +# if defined(_WIN32) + exe_path = KWSYS_SHARED_FORWARD_EXE_BUILD ".exe"; +# else + exe_path = KWSYS_SHARED_FORWARD_EXE_BUILD; +# endif + +# if defined(KWSYS_SHARED_FORWARD_CONFIG_NAME) + /* Remove the configuration directory from self_path. */ + kwsys_shared_forward_dirname(self_path, self_path_logical); +# endif + } else { + /* Running in install tree. Use the install path and exe. */ + search_path = search_path_install; +# if defined(_WIN32) + exe_path = KWSYS_SHARED_FORWARD_EXE_INSTALL ".exe"; +# else + exe_path = KWSYS_SHARED_FORWARD_EXE_INSTALL; +# endif + +# if defined(KWSYS_SHARED_FORWARD_CONFIG_NAME) + /* Use the original self path directory. */ + strcpy(self_path_logical, self_path); +# endif + } + + /* Construct the runtime search path. */ + { + const char** dir; + for (dir = search_path; *dir; ++dir) { + /* Add separator between path components. */ + if (dir != search_path) { + strcat(ldpath, kwsys_shared_forward_path_sep); + } + + /* Add this path component. */ + if (!kwsys_shared_forward_fullpath(self_path_logical, *dir, + ldpath + strlen(ldpath), + "runtime path entry")) { + return 0; + } + } + } + + /* Construct the executable location. */ + if (!kwsys_shared_forward_fullpath(self_path_logical, exe_path, exe, + "executable file")) { + return 0; + } + return 1; +} + +/* Function to print why execution of a command line failed. */ +static void kwsys_shared_forward_print_failure(char const* const* argv) +{ + char msg[KWSYS_SHARED_FORWARD_MAXPATH]; + char const* const* arg = argv; + kwsys_shared_forward_strerror(msg); + fprintf(stderr, "Error running"); + for (; *arg; ++arg) { + fprintf(stderr, " \"%s\"", *arg); + } + fprintf(stderr, ": %s\n", msg); +} + +/* Static storage space to store the updated environment variable. */ +static char kwsys_shared_forward_ldpath[65535] = + KWSYS_SHARED_FORWARD_LDPATH "="; + +/* Main driver function to be called from main. */ +static int @KWSYS_NAMESPACE@_shared_forward_to_real(int argc, char** argv_in) +{ + char const** argv = (char const**)argv_in; + /* Get the directory containing this executable. */ + char self_path[KWSYS_SHARED_FORWARD_MAXPATH]; + if (kwsys_shared_forward_self_path(argv[0], self_path)) { + /* Found this executable. Use it to get the library directory. */ + char exe[KWSYS_SHARED_FORWARD_MAXPATH]; + if (kwsys_shared_forward_get_settings(self_path, + kwsys_shared_forward_ldpath, exe)) { + /* Append the old runtime search path. */ + const char* old_ldpath = getenv(KWSYS_SHARED_FORWARD_LDPATH); + if (old_ldpath) { + strcat(kwsys_shared_forward_ldpath, kwsys_shared_forward_path_sep); + strcat(kwsys_shared_forward_ldpath, old_ldpath); + } + + /* Store the environment variable. */ + putenv(kwsys_shared_forward_ldpath); + +# if defined(KWSYS_SHARED_FORWARD_OPTION_COMMAND) + /* Look for the command line replacement option. */ + if (argc > 1 && + strcmp(argv[1], KWSYS_SHARED_FORWARD_OPTION_COMMAND) == 0) { + if (argc > 2) { + /* Use the command line given. */ + strcpy(exe, argv[2]); + argv += 2; + argc -= 2; + } else { + /* The option was not given an executable. */ + fprintf(stderr, + "Option " KWSYS_SHARED_FORWARD_OPTION_COMMAND + " must be followed by a command line.\n"); + return 1; + } + } +# endif + +# if defined(KWSYS_SHARED_FORWARD_OPTION_PRINT) + /* Look for the print command line option. */ + if (argc > 1 && + strcmp(argv[1], KWSYS_SHARED_FORWARD_OPTION_PRINT) == 0) { + fprintf(stdout, "%s\n", kwsys_shared_forward_ldpath); + fprintf(stdout, "%s\n", exe); + return 0; + } +# endif + +# if defined(KWSYS_SHARED_FORWARD_OPTION_LDD) + /* Look for the ldd command line option. */ + if (argc > 1 && strcmp(argv[1], KWSYS_SHARED_FORWARD_OPTION_LDD) == 0) { +# if defined(KWSYS_SHARED_FORWARD_LDD) + /* Use the named ldd-like executable and arguments. */ + char const* ldd_argv[] = { KWSYS_SHARED_FORWARD_LDD, 0, 0 }; + ldd_argv[KWSYS_SHARED_FORWARD_LDD_N] = exe; + kwsys_shared_forward_execvp(ldd_argv[0], ldd_argv); + + /* Report why execution failed. */ + kwsys_shared_forward_print_failure(ldd_argv); + return 1; +# else + /* We have no ldd-like executable available on this platform. */ + fprintf(stderr, "No ldd-like tool is known to this executable.\n"); + return 1; +# endif + } +# endif + + /* Replace this process with the real executable. */ + argv[0] = exe; + kwsys_shared_forward_execvp(argv[0], argv); + + /* Report why execution failed. */ + kwsys_shared_forward_print_failure(argv); + } else { + /* Could not convert self path to the library directory. */ + } + } else { + /* Could not find this executable. */ + fprintf(stderr, "Error locating executable \"%s\".\n", argv[0]); + } + + /* Avoid unused argument warning. */ + (void)argc; + + /* Exit with failure. */ + return 1; +} + +/* Restore warning stack. */ +# if defined(__clang__) && defined(__has_warning) +# if __has_warning("-Wcast-qual") +# pragma clang diagnostic pop +# endif +# endif + +#else +# error "@KWSYS_NAMESPACE@/SharedForward.h should be included only once." +#endif diff --git a/test/API/driver/kwsys/String.c b/test/API/driver/kwsys/String.c new file mode 100644 index 00000000000..daf7ad1a0f5 --- /dev/null +++ b/test/API/driver/kwsys/String.c @@ -0,0 +1,100 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifdef KWSYS_STRING_C +/* +All code in this source file is conditionally compiled to work-around +template definition auto-search on VMS. Other source files in this +directory that use the stl string cause the compiler to load this +source to try to get the definition of the string template. This +condition blocks the compiler from seeing the symbols defined here. +*/ +# include "kwsysPrivate.h" +# include KWSYS_HEADER(String.h) + +/* Work-around CMake dependency scanning limitation. This must + duplicate the above list of headers. */ +# if 0 +# include "String.h.in" +# endif + +/* Select an implementation for strcasecmp. */ +# if defined(_MSC_VER) +# define KWSYS_STRING_USE_STRICMP +# include +# elif defined(__GNUC__) +# define KWSYS_STRING_USE_STRCASECMP +# include +# else +/* Table to convert upper case letters to lower case and leave all + other characters alone. */ +static char kwsysString_strcasecmp_tolower[] = { + '\000', '\001', '\002', '\003', '\004', '\005', '\006', '\007', '\010', + '\011', '\012', '\013', '\014', '\015', '\016', '\017', '\020', '\021', + '\022', '\023', '\024', '\025', '\026', '\027', '\030', '\031', '\032', + '\033', '\034', '\035', '\036', '\037', '\040', '\041', '\042', '\043', + '\044', '\045', '\046', '\047', '\050', '\051', '\052', '\053', '\054', + '\055', '\056', '\057', '\060', '\061', '\062', '\063', '\064', '\065', + '\066', '\067', '\070', '\071', '\072', '\073', '\074', '\075', '\076', + '\077', '\100', '\141', '\142', '\143', '\144', '\145', '\146', '\147', + '\150', '\151', '\152', '\153', '\154', '\155', '\156', '\157', '\160', + '\161', '\162', '\163', '\164', '\165', '\166', '\167', '\170', '\171', + '\172', '\133', '\134', '\135', '\136', '\137', '\140', '\141', '\142', + '\143', '\144', '\145', '\146', '\147', '\150', '\151', '\152', '\153', + '\154', '\155', '\156', '\157', '\160', '\161', '\162', '\163', '\164', + '\165', '\166', '\167', '\170', '\171', '\172', '\173', '\174', '\175', + '\176', '\177', '\200', '\201', '\202', '\203', '\204', '\205', '\206', + '\207', '\210', '\211', '\212', '\213', '\214', '\215', '\216', '\217', + '\220', '\221', '\222', '\223', '\224', '\225', '\226', '\227', '\230', + '\231', '\232', '\233', '\234', '\235', '\236', '\237', '\240', '\241', + '\242', '\243', '\244', '\245', '\246', '\247', '\250', '\251', '\252', + '\253', '\254', '\255', '\256', '\257', '\260', '\261', '\262', '\263', + '\264', '\265', '\266', '\267', '\270', '\271', '\272', '\273', '\274', + '\275', '\276', '\277', '\300', '\301', '\302', '\303', '\304', '\305', + '\306', '\307', '\310', '\311', '\312', '\313', '\314', '\315', '\316', + '\317', '\320', '\321', '\322', '\323', '\324', '\325', '\326', '\327', + '\330', '\331', '\332', '\333', '\334', '\335', '\336', '\337', '\340', + '\341', '\342', '\343', '\344', '\345', '\346', '\347', '\350', '\351', + '\352', '\353', '\354', '\355', '\356', '\357', '\360', '\361', '\362', + '\363', '\364', '\365', '\366', '\367', '\370', '\371', '\372', '\373', + '\374', '\375', '\376', '\377' +}; +# endif + +/*--------------------------------------------------------------------------*/ +int kwsysString_strcasecmp(const char* lhs, const char* rhs) +{ +# if defined(KWSYS_STRING_USE_STRICMP) + return _stricmp(lhs, rhs); +# elif defined(KWSYS_STRING_USE_STRCASECMP) + return strcasecmp(lhs, rhs); +# else + const char* const lower = kwsysString_strcasecmp_tolower; + unsigned char const* us1 = (unsigned char const*)lhs; + unsigned char const* us2 = (unsigned char const*)rhs; + int result; + while ((result = lower[*us1] - lower[*us2++], result == 0) && *us1++) { + } + return result; +# endif +} + +/*--------------------------------------------------------------------------*/ +int kwsysString_strncasecmp(const char* lhs, const char* rhs, size_t n) +{ +# if defined(KWSYS_STRING_USE_STRICMP) + return _strnicmp(lhs, rhs, n); +# elif defined(KWSYS_STRING_USE_STRCASECMP) + return strncasecmp(lhs, rhs, n); +# else + const char* const lower = kwsysString_strcasecmp_tolower; + unsigned char const* us1 = (unsigned char const*)lhs; + unsigned char const* us2 = (unsigned char const*)rhs; + int result = 0; + while (n && (result = lower[*us1] - lower[*us2++], result == 0) && *us1++) { + --n; + } + return result; +# endif +} + +#endif /* KWSYS_STRING_C */ diff --git a/test/API/driver/kwsys/String.h.in b/test/API/driver/kwsys/String.h.in new file mode 100644 index 00000000000..7c9348af134 --- /dev/null +++ b/test/API/driver/kwsys/String.h.in @@ -0,0 +1,57 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifndef @KWSYS_NAMESPACE@_String_h +#define @KWSYS_NAMESPACE@_String_h + +#include <@KWSYS_NAMESPACE@/Configure.h> + +#include /* size_t */ + +/* Redefine all public interface symbol names to be in the proper + namespace. These macros are used internally to kwsys only, and are + not visible to user code. Use kwsysHeaderDump.pl to reproduce + these macros after making changes to the interface. */ +#if !defined(KWSYS_NAMESPACE) +# define kwsys_ns(x) @KWSYS_NAMESPACE@##x +# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT +#endif +#if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS +# define kwsysString_strcasecmp kwsys_ns(String_strcasecmp) +# define kwsysString_strncasecmp kwsys_ns(String_strncasecmp) +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +/** + * Compare two strings ignoring the case of the characters. The + * integer returned is negative, zero, or positive if the first string + * is found to be less than, equal to, or greater than the second + * string, respectively. + */ +kwsysEXPORT int kwsysString_strcasecmp(const char* lhs, const char* rhs); + +/** + * Identical to String_strcasecmp except that only the first n + * characters are considered. + */ +kwsysEXPORT int kwsysString_strncasecmp(const char* lhs, const char* rhs, + size_t n); + +#if defined(__cplusplus) +} /* extern "C" */ +#endif + +/* If we are building a kwsys .c or .cxx file, let it use these macros. + Otherwise, undefine them to keep the namespace clean. */ +#if !defined(KWSYS_NAMESPACE) +# undef kwsys_ns +# undef kwsysEXPORT +# if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS +# undef kwsysString_strcasecmp +# undef kwsysString_strncasecmp +# endif +#endif + +#endif diff --git a/test/API/driver/kwsys/String.hxx.in b/test/API/driver/kwsys/String.hxx.in new file mode 100644 index 00000000000..db1cf22a93a --- /dev/null +++ b/test/API/driver/kwsys/String.hxx.in @@ -0,0 +1,65 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifndef @KWSYS_NAMESPACE@_String_hxx +#define @KWSYS_NAMESPACE@_String_hxx + +#include + +namespace @KWSYS_NAMESPACE@ { + +/** \class String + * \brief Short-name version of the STL basic_string class template. + * + * The standard library "string" type is actually a typedef for + * "basic_string<..long argument list..>". This string class is + * simply a subclass of this type with the same interface so that the + * name is shorter in debugging symbols and error messages. + */ +class String : public std::string +{ + /** The original string type. */ + typedef std::string stl_string; + +public: + /** String member types. */ + typedef stl_string::value_type value_type; + typedef stl_string::pointer pointer; + typedef stl_string::reference reference; + typedef stl_string::const_reference const_reference; + typedef stl_string::size_type size_type; + typedef stl_string::difference_type difference_type; + typedef stl_string::iterator iterator; + typedef stl_string::const_iterator const_iterator; + typedef stl_string::reverse_iterator reverse_iterator; + typedef stl_string::const_reverse_iterator const_reverse_iterator; + + /** String constructors. */ + String() + : stl_string() + { + } + String(const value_type* s) + : stl_string(s) + { + } + String(const value_type* s, size_type n) + : stl_string(s, n) + { + } + String(const stl_string& s, size_type pos = 0, size_type n = npos) + : stl_string(s, pos, n) + { + } +}; // End Class: String + +#if defined(__WATCOMC__) +inline bool operator<(String const& l, String const& r) +{ + return (static_cast(l) < + static_cast(r)); +} +#endif + +} // namespace @KWSYS_NAMESPACE@ + +#endif diff --git a/test/API/driver/kwsys/System.c b/test/API/driver/kwsys/System.c new file mode 100644 index 00000000000..d43cc6fbbce --- /dev/null +++ b/test/API/driver/kwsys/System.c @@ -0,0 +1,236 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" +#include KWSYS_HEADER(System.h) + +/* Work-around CMake dependency scanning limitation. This must + duplicate the above list of headers. */ +#if 0 +# include "System.h.in" +#endif + +#include /* isspace */ +#include /* ptrdiff_t */ +#include /* malloc, free */ +#include /* memcpy */ + +#include + +#if defined(KWSYS_C_HAS_PTRDIFF_T) && KWSYS_C_HAS_PTRDIFF_T +typedef ptrdiff_t kwsysSystem_ptrdiff_t; +#else +typedef int kwsysSystem_ptrdiff_t; +#endif + +static int kwsysSystem__AppendByte(char* local, char** begin, char** end, + int* size, char c) +{ + /* Allocate space for the character. */ + if ((*end - *begin) >= *size) { + kwsysSystem_ptrdiff_t length = *end - *begin; + char* newBuffer = (char*)malloc((size_t)(*size * 2)); + if (!newBuffer) { + return 0; + } + memcpy(newBuffer, *begin, (size_t)(length) * sizeof(char)); + if (*begin != local) { + free(*begin); + } + *begin = newBuffer; + *end = *begin + length; + *size *= 2; + } + + /* Store the character. */ + *(*end)++ = c; + return 1; +} + +static int kwsysSystem__AppendArgument(char** local, char*** begin, + char*** end, int* size, char* arg_local, + char** arg_begin, char** arg_end, + int* arg_size) +{ + /* Append a null-terminator to the argument string. */ + if (!kwsysSystem__AppendByte(arg_local, arg_begin, arg_end, arg_size, + '\0')) { + return 0; + } + + /* Allocate space for the argument pointer. */ + if ((*end - *begin) >= *size) { + kwsysSystem_ptrdiff_t length = *end - *begin; + char** newPointers = (char**)malloc((size_t)(*size) * 2 * sizeof(char*)); + if (!newPointers) { + return 0; + } + memcpy(newPointers, *begin, (size_t)(length) * sizeof(char*)); + if (*begin != local) { + free(*begin); + } + *begin = newPointers; + *end = *begin + length; + *size *= 2; + } + + /* Allocate space for the argument string. */ + **end = (char*)malloc((size_t)(*arg_end - *arg_begin)); + if (!**end) { + return 0; + } + + /* Store the argument in the command array. */ + memcpy(**end, *arg_begin, (size_t)(*arg_end - *arg_begin)); + ++(*end); + + /* Reset the argument to be empty. */ + *arg_end = *arg_begin; + + return 1; +} + +#define KWSYSPE_LOCAL_BYTE_COUNT 1024 +#define KWSYSPE_LOCAL_ARGS_COUNT 32 +static char** kwsysSystem__ParseUnixCommand(const char* command, int flags) +{ + /* Create a buffer for argument pointers during parsing. */ + char* local_pointers[KWSYSPE_LOCAL_ARGS_COUNT]; + int pointers_size = KWSYSPE_LOCAL_ARGS_COUNT; + char** pointer_begin = local_pointers; + char** pointer_end = pointer_begin; + + /* Create a buffer for argument strings during parsing. */ + char local_buffer[KWSYSPE_LOCAL_BYTE_COUNT]; + int buffer_size = KWSYSPE_LOCAL_BYTE_COUNT; + char* buffer_begin = local_buffer; + char* buffer_end = buffer_begin; + + /* Parse the command string. Try to behave like a UNIX shell. */ + char** newCommand = 0; + const char* c = command; + int in_argument = 0; + int in_escape = 0; + int in_single = 0; + int in_double = 0; + int failed = 0; + for (; *c; ++c) { + if (in_escape) { + /* This character is escaped so do no special handling. */ + if (!in_argument) { + in_argument = 1; + } + if (!kwsysSystem__AppendByte(local_buffer, &buffer_begin, &buffer_end, + &buffer_size, *c)) { + failed = 1; + break; + } + in_escape = 0; + } else if (*c == '\\') { + /* The next character should be escaped. */ + in_escape = 1; + } else if (*c == '\'' && !in_double) { + /* Enter or exit single-quote state. */ + if (in_single) { + in_single = 0; + } else { + in_single = 1; + if (!in_argument) { + in_argument = 1; + } + } + } else if (*c == '"' && !in_single) { + /* Enter or exit double-quote state. */ + if (in_double) { + in_double = 0; + } else { + in_double = 1; + if (!in_argument) { + in_argument = 1; + } + } + } else if (isspace((unsigned char)*c)) { + if (in_argument) { + if (in_single || in_double) { + /* This space belongs to a quoted argument. */ + if (!kwsysSystem__AppendByte(local_buffer, &buffer_begin, + &buffer_end, &buffer_size, *c)) { + failed = 1; + break; + } + } else { + /* This argument has been terminated by whitespace. */ + if (!kwsysSystem__AppendArgument( + local_pointers, &pointer_begin, &pointer_end, &pointers_size, + local_buffer, &buffer_begin, &buffer_end, &buffer_size)) { + failed = 1; + break; + } + in_argument = 0; + } + } + } else { + /* This character belong to an argument. */ + if (!in_argument) { + in_argument = 1; + } + if (!kwsysSystem__AppendByte(local_buffer, &buffer_begin, &buffer_end, + &buffer_size, *c)) { + failed = 1; + break; + } + } + } + + /* Finish the last argument. */ + if (in_argument) { + if (!kwsysSystem__AppendArgument( + local_pointers, &pointer_begin, &pointer_end, &pointers_size, + local_buffer, &buffer_begin, &buffer_end, &buffer_size)) { + failed = 1; + } + } + + /* If we still have memory allocate space for the new command + buffer. */ + if (!failed) { + kwsysSystem_ptrdiff_t n = pointer_end - pointer_begin; + newCommand = (char**)malloc((size_t)(n + 1) * sizeof(char*)); + } + + if (newCommand) { + /* Copy the arguments into the new command buffer. */ + kwsysSystem_ptrdiff_t n = pointer_end - pointer_begin; + memcpy(newCommand, pointer_begin, sizeof(char*) * (size_t)(n)); + newCommand[n] = 0; + } else { + /* Free arguments already allocated. */ + while (pointer_end != pointer_begin) { + free(*(--pointer_end)); + } + } + + /* Free temporary buffers. */ + if (pointer_begin != local_pointers) { + free(pointer_begin); + } + if (buffer_begin != local_buffer) { + free(buffer_begin); + } + + /* The flags argument is currently unused. */ + (void)flags; + + /* Return the final command buffer. */ + return newCommand; +} + +char** kwsysSystem_Parse_CommandForUnix(const char* command, int flags) +{ + /* Validate the flags. */ + if (flags != 0) { + return 0; + } + + /* Forward to our internal implementation. */ + return kwsysSystem__ParseUnixCommand(command, flags); +} diff --git a/test/API/driver/kwsys/System.h.in b/test/API/driver/kwsys/System.h.in new file mode 100644 index 00000000000..a9d4f5e690b --- /dev/null +++ b/test/API/driver/kwsys/System.h.in @@ -0,0 +1,60 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifndef @KWSYS_NAMESPACE@_System_h +#define @KWSYS_NAMESPACE@_System_h + +#include <@KWSYS_NAMESPACE@/Configure.h> + +/* Redefine all public interface symbol names to be in the proper + namespace. These macros are used internally to kwsys only, and are + not visible to user code. Use kwsysHeaderDump.pl to reproduce + these macros after making changes to the interface. */ +#if !defined(KWSYS_NAMESPACE) +# define kwsys_ns(x) @KWSYS_NAMESPACE@##x +# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT +#endif +#if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS +# define kwsysSystem_Parse_CommandForUnix \ + kwsys_ns(System_Parse_CommandForUnix) +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +/** + * Parse a unix-style command line string into separate arguments. + * + * On success, returns a pointer to an array of pointers to individual + * argument strings. Each string is null-terminated and the last + * entry in the array is a NULL pointer (just like argv). It is the + * caller's responsibility to free() the strings and the array of + * pointers to them. + * + * On failure, returns NULL. Failure occurs only on invalid flags or + * when memory cannot be allocated; never due to content of the input + * string. Missing close-quotes are treated as if the necessary + * closing quote appears. + * + * By default single- and double-quoted arguments are supported, and + * any character may be escaped by a backslash. The flags argument is + * reserved for future use, and must be zero (or the call will fail). + */ +kwsysEXPORT char** kwsysSystem_Parse_CommandForUnix(const char* command, + int flags); + +#if defined(__cplusplus) +} /* extern "C" */ +#endif + +/* If we are building a kwsys .c or .cxx file, let it use these macros. + Otherwise, undefine them to keep the namespace clean. */ +#if !defined(KWSYS_NAMESPACE) +# undef kwsys_ns +# undef kwsysEXPORT +# if !defined(KWSYS_NAMESPACE) && !@KWSYS_NAMESPACE@_NAME_IS_KWSYS +# undef kwsysSystem_Parse_CommandForUnix +# endif +#endif + +#endif diff --git a/test/API/driver/kwsys/SystemInformation.cxx b/test/API/driver/kwsys/SystemInformation.cxx new file mode 100644 index 00000000000..6ec6e48ffb0 --- /dev/null +++ b/test/API/driver/kwsys/SystemInformation.cxx @@ -0,0 +1,5466 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#if defined(_WIN32) +# define NOMINMAX // use our min,max +# if !defined(_WIN32_WINNT) && defined(_MSC_VER) && _MSC_VER >= 1800 +# define _WIN32_WINNT 0x0600 // vista +# endif +# if !defined(_WIN32_WINNT) && !(defined(_MSC_VER) && _MSC_VER < 1300) +# define _WIN32_WINNT 0x0501 +# endif +# include // WSADATA, include before sys/types.h +#endif + +#if (defined(__GNUC__) || defined(__PGI)) && !defined(_GNU_SOURCE) +# define _GNU_SOURCE +#endif + +// TODO: +// We need an alternative implementation for many functions in this file +// when USE_ASM_INSTRUCTIONS gets defined as 0. +// +// Consider using these on Win32/Win64 for some of them: +// +// IsProcessorFeaturePresent +// http://msdn.microsoft.com/en-us/library/ms724482(VS.85).aspx +// +// GetProcessMemoryInfo +// http://msdn.microsoft.com/en-us/library/ms683219(VS.85).aspx + +#include "kwsysPrivate.h" +#include KWSYS_HEADER(SystemInformation.hxx) +#include KWSYS_HEADER(Process.h) + +// Work-around CMake dependency scanning limitation. This must +// duplicate the above list of headers. +#if 0 +# include "Process.h.in" +# include "SystemInformation.hxx.in" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(_WIN32) +# include +# if defined(_MSC_VER) && _MSC_VER >= 1800 +# define KWSYS_WINDOWS_DEPRECATED_GetVersionEx +# endif +# include +# if defined(KWSYS_SYS_HAS_PSAPI) +# include +# endif +# if !defined(siginfo_t) +typedef int siginfo_t; +# endif +#else +# include + +# include // extern int errno; +# include +# include +# include // getrlimit +# include +# include // int uname(struct utsname *buf); +# include +#endif + +#if defined(__CYGWIN__) && !defined(_WIN32) +# include +# undef _WIN32 +#endif + +#if defined(__OpenBSD__) || defined(__FreeBSD__) || defined(__NetBSD__) || \ + defined(__DragonFly__) +# include +# include +# include +# include +# include +# if defined(KWSYS_SYS_HAS_IFADDRS_H) +# include +# include +# define KWSYS_SYSTEMINFORMATION_IMPLEMENT_FQDN +# endif +#endif + +#if defined(KWSYS_SYS_HAS_MACHINE_CPU_H) +# include +#endif + +#ifdef __APPLE__ +# include +# include +# include +# include +# include +# include +# include +# include +# if defined(KWSYS_SYS_HAS_IFADDRS_H) +# include +# include +# define KWSYS_SYSTEMINFORMATION_IMPLEMENT_FQDN +# endif +# if !(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ - 0 >= 1050) +# undef KWSYS_SYSTEMINFORMATION_HAS_BACKTRACE +# endif +#endif + +#if defined(__linux) || defined(__sun) || defined(_SCO_DS) || \ + defined(__GLIBC__) || defined(__GNU__) +# include +# include +# include +# if defined(KWSYS_SYS_HAS_IFADDRS_H) +# include +# include +# if defined(__LSB_VERSION__) +/* LSB has no getifaddrs */ +# elif defined(__ANDROID_API__) && __ANDROID_API__ < 24 +/* Android has no getifaddrs prior to API 24. */ +# else +# define KWSYS_SYSTEMINFORMATION_IMPLEMENT_FQDN +# endif +# endif +# if defined(KWSYS_CXX_HAS_RLIMIT64) +typedef struct rlimit64 ResourceLimitType; +# define GetResourceLimit getrlimit64 +# else +typedef struct rlimit ResourceLimitType; +# define GetResourceLimit getrlimit +# endif +#elif defined(__hpux) +# include +# include +# if defined(KWSYS_SYS_HAS_MPCTL_H) +# include +# endif +#endif + +#ifdef __HAIKU__ +# include +#endif + +#if defined(KWSYS_SYSTEMINFORMATION_HAS_BACKTRACE) +# include +# if defined(KWSYS_SYSTEMINFORMATION_HAS_CPP_DEMANGLE) +# include +# endif +# if defined(KWSYS_SYSTEMINFORMATION_HAS_SYMBOL_LOOKUP) +# include +# endif +#else +# undef KWSYS_SYSTEMINFORMATION_HAS_CPP_DEMANGLE +# undef KWSYS_SYSTEMINFORMATION_HAS_SYMBOL_LOOKUP +#endif + +#include // int isdigit(int c); +#include +#include +#include +#include + +#if defined(KWSYS_USE_LONG_LONG) +# if defined(KWSYS_IOS_HAS_OSTREAM_LONG_LONG) +# define iostreamLongLong(x) (x) +# else +# define iostreamLongLong(x) ((long)(x)) +# endif +#elif defined(KWSYS_USE___INT64) +# if defined(KWSYS_IOS_HAS_OSTREAM___INT64) +# define iostreamLongLong(x) (x) +# else +# define iostreamLongLong(x) ((long)(x)) +# endif +#else +# error "No Long Long" +#endif + +#if defined(KWSYS_CXX_HAS_ATOLL) +# define atoLongLong atoll +#else +# if defined(KWSYS_CXX_HAS__ATOI64) +# define atoLongLong _atoi64 +# elif defined(KWSYS_CXX_HAS_ATOL) +# define atoLongLong atol +# else +# define atoLongLong atoi +# endif +#endif + +#if defined(_MSC_VER) && (_MSC_VER >= 1300) && !defined(_WIN64) && \ + !defined(__clang__) +# define USE_ASM_INSTRUCTIONS 1 +#else +# define USE_ASM_INSTRUCTIONS 0 +#endif + +#if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(__clang__) +# include +# define USE_CPUID_INTRINSICS 1 +#else +# define USE_CPUID_INTRINSICS 0 +#endif + +#if USE_ASM_INSTRUCTIONS || USE_CPUID_INTRINSICS || \ + defined(KWSYS_CXX_HAS_BORLAND_ASM_CPUID) +# define USE_CPUID 1 +#else +# define USE_CPUID 0 +#endif + +#if USE_CPUID + +# define CPUID_AWARE_COMPILER + +/** + * call CPUID instruction + * + * Will return false if the instruction failed. + */ +static bool call_cpuid(int select, int result[4]) +{ +# if USE_CPUID_INTRINSICS + __cpuid(result, select); + return true; +# else + int tmp[4]; +# if defined(_MSC_VER) + // Use SEH to determine CPUID presence + __try { + _asm { +# ifdef CPUID_AWARE_COMPILER + ; we must push/pop the registers <> writes to, as the + ; optimiser does not know about <>, and so does not expect + ; these registers to change. + push eax + push ebx + push ecx + push edx +# endif + ; <> + mov eax, select +# ifdef CPUID_AWARE_COMPILER + cpuid +# else + _asm _emit 0x0f + _asm _emit 0xa2 +# endif + mov tmp[0 * TYPE int], eax + mov tmp[1 * TYPE int], ebx + mov tmp[2 * TYPE int], ecx + mov tmp[3 * TYPE int], edx + +# ifdef CPUID_AWARE_COMPILER + pop edx + pop ecx + pop ebx + pop eax +# endif + } + } __except (1) { + return false; + } + + memcpy(result, tmp, sizeof(tmp)); +# elif defined(KWSYS_CXX_HAS_BORLAND_ASM_CPUID) + unsigned int a, b, c, d; + __asm { + mov EAX, select; + cpuid + mov a, EAX; + mov b, EBX; + mov c, ECX; + mov d, EDX; + } + + result[0] = a; + result[1] = b; + result[2] = c; + result[3] = d; +# endif + + // The cpuid instruction succeeded. + return true; +# endif +} +#endif + +namespace KWSYS_NAMESPACE { +template +T min(T a, T b) +{ + return a < b ? a : b; +} + +extern "C" { +typedef void (*SigAction)(int, siginfo_t*, void*); +} + +// Define SystemInformationImplementation class +typedef void (*DELAY_FUNC)(unsigned int uiMS); + +class SystemInformationImplementation +{ +public: + typedef SystemInformation::LongLong LongLong; + SystemInformationImplementation(); + ~SystemInformationImplementation(); + + const char* GetVendorString(); + const char* GetVendorID(); + std::string GetTypeID(); + std::string GetFamilyID(); + std::string GetModelID(); + std::string GetModelName(); + std::string GetSteppingCode(); + const char* GetExtendedProcessorName(); + const char* GetProcessorSerialNumber(); + int GetProcessorCacheSize(); + unsigned int GetLogicalProcessorsPerPhysical(); + float GetProcessorClockFrequency(); + int GetProcessorAPICID(); + int GetProcessorCacheXSize(long int); + bool DoesCPUSupportFeature(long int); + + const char* GetOSName(); + const char* GetHostname(); + int GetFullyQualifiedDomainName(std::string& fqdn); + const char* GetOSRelease(); + const char* GetOSVersion(); + const char* GetOSPlatform(); + + bool Is64Bits(); + + unsigned int GetNumberOfLogicalCPU(); // per physical cpu + unsigned int GetNumberOfPhysicalCPU(); + + bool DoesCPUSupportCPUID(); + + // Retrieve memory information in MiB. + size_t GetTotalVirtualMemory(); + size_t GetAvailableVirtualMemory(); + size_t GetTotalPhysicalMemory(); + size_t GetAvailablePhysicalMemory(); + + LongLong GetProcessId(); + + // Retrieve memory information in KiB. + LongLong GetHostMemoryTotal(); + LongLong GetHostMemoryAvailable(const char* envVarName); + LongLong GetHostMemoryUsed(); + + LongLong GetProcMemoryAvailable(const char* hostLimitEnvVarName, + const char* procLimitEnvVarName); + LongLong GetProcMemoryUsed(); + + double GetLoadAverage(); + + // enable/disable stack trace signal handler. + static void SetStackTraceOnError(int enable); + + // get current stack + static std::string GetProgramStack(int firstFrame, int wholePath); + + /** Run the different checks */ + void RunCPUCheck(); + void RunOSCheck(); + void RunMemoryCheck(); + +public: + typedef struct tagID + { + int Type; + int Family; + int Model; + int Revision; + int ExtendedFamily; + int ExtendedModel; + std::string ProcessorName; + std::string Vendor; + std::string SerialNumber; + std::string ModelName; + } ID; + + typedef struct tagCPUPowerManagement + { + bool HasVoltageID; + bool HasFrequencyID; + bool HasTempSenseDiode; + } CPUPowerManagement; + + typedef struct tagCPUExtendedFeatures + { + bool Has3DNow; + bool Has3DNowPlus; + bool SupportsMP; + bool HasMMXPlus; + bool HasSSEMMX; + unsigned int LogicalProcessorsPerPhysical; + int APIC_ID; + CPUPowerManagement PowerManagement; + } CPUExtendedFeatures; + + typedef struct CPUtagFeatures + { + bool HasFPU; + bool HasTSC; + bool HasMMX; + bool HasSSE; + bool HasSSEFP; + bool HasSSE2; + bool HasIA64; + bool HasAPIC; + bool HasCMOV; + bool HasMTRR; + bool HasACPI; + bool HasSerial; + bool HasThermal; + int CPUSpeed; + int L1CacheSize; + int L2CacheSize; + int L3CacheSize; + CPUExtendedFeatures ExtendedFeatures; + } CPUFeatures; + + enum Manufacturer + { + AMD, + Intel, + NSC, + UMC, + Cyrix, + NexGen, + IDT, + Rise, + Transmeta, + Sun, + IBM, + Motorola, + HP, + Hygon, + UnknownManufacturer + }; + +protected: + // For windows + bool RetrieveCPUFeatures(); + bool RetrieveCPUIdentity(); + bool RetrieveCPUCacheDetails(); + bool RetrieveClassicalCPUCacheDetails(); + bool RetrieveCPUClockSpeed(); + bool RetrieveClassicalCPUClockSpeed(); + bool RetrieveCPUExtendedLevelSupport(int); + bool RetrieveExtendedCPUFeatures(); + bool RetrieveProcessorSerialNumber(); + bool RetrieveCPUPowerManagement(); + bool RetrieveClassicalCPUIdentity(); + bool RetrieveExtendedCPUIdentity(); + + // Processor information + Manufacturer ChipManufacturer; + CPUFeatures Features; + ID ChipID; + float CPUSpeedInMHz; + unsigned int NumberOfLogicalCPU; + unsigned int NumberOfPhysicalCPU; + + void CPUCountWindows(); // For windows + unsigned char GetAPICId(); // For windows + bool IsSMTSupported(); + static LongLong GetCyclesDifference(DELAY_FUNC, unsigned int); // For windows + + // For Linux and Cygwin, /proc/cpuinfo formats are slightly different + bool RetreiveInformationFromCpuInfoFile(); + std::string ExtractValueFromCpuInfoFile(std::string buffer, const char* word, + size_t init = 0); + + bool QueryLinuxMemory(); + bool QueryCygwinMemory(); + + static void Delay(unsigned int); + static void DelayOverhead(unsigned int); + + void FindManufacturer(const std::string& family = ""); + + // For Mac + bool ParseSysCtl(); + int CallSwVers(const char* arg, std::string& ver); + void TrimNewline(std::string&); + std::string ExtractValueFromSysCtl(const char* word); + std::string SysCtlBuffer; + + // For Solaris + bool QuerySolarisMemory(); + bool QuerySolarisProcessor(); + std::string ParseValueFromKStat(const char* arguments); + std::string RunProcess(std::vector args); + + // For Haiku OS + bool QueryHaikuInfo(); + + // For QNX + bool QueryQNXMemory(); + bool QueryQNXProcessor(); + + // For OpenBSD, FreeBSD, NetBSD, DragonFly + bool QueryBSDMemory(); + bool QueryBSDProcessor(); + + // For HP-UX + bool QueryHPUXMemory(); + bool QueryHPUXProcessor(); + + // For Microsoft Windows + bool QueryWindowsMemory(); + + // For AIX + bool QueryAIXMemory(); + + bool QueryProcessorBySysconf(); + bool QueryProcessor(); + + // Evaluate the memory information. + bool QueryMemoryBySysconf(); + bool QueryMemory(); + size_t TotalVirtualMemory; + size_t AvailableVirtualMemory; + size_t TotalPhysicalMemory; + size_t AvailablePhysicalMemory; + + size_t CurrentPositionInFile; + + // Operating System information + bool QueryOSInformation(); + std::string OSName; + std::string Hostname; + std::string OSRelease; + std::string OSVersion; + std::string OSPlatform; + bool OSIs64Bit; +}; + +SystemInformation::SystemInformation() +{ + this->Implementation = new SystemInformationImplementation; +} + +SystemInformation::~SystemInformation() +{ + delete this->Implementation; +} + +const char* SystemInformation::GetVendorString() +{ + return this->Implementation->GetVendorString(); +} + +const char* SystemInformation::GetVendorID() +{ + return this->Implementation->GetVendorID(); +} + +std::string SystemInformation::GetTypeID() +{ + return this->Implementation->GetTypeID(); +} + +std::string SystemInformation::GetFamilyID() +{ + return this->Implementation->GetFamilyID(); +} + +std::string SystemInformation::GetModelID() +{ + return this->Implementation->GetModelID(); +} + +std::string SystemInformation::GetModelName() +{ + return this->Implementation->GetModelName(); +} + +std::string SystemInformation::GetSteppingCode() +{ + return this->Implementation->GetSteppingCode(); +} + +const char* SystemInformation::GetExtendedProcessorName() +{ + return this->Implementation->GetExtendedProcessorName(); +} + +const char* SystemInformation::GetProcessorSerialNumber() +{ + return this->Implementation->GetProcessorSerialNumber(); +} + +int SystemInformation::GetProcessorCacheSize() +{ + return this->Implementation->GetProcessorCacheSize(); +} + +unsigned int SystemInformation::GetLogicalProcessorsPerPhysical() +{ + return this->Implementation->GetLogicalProcessorsPerPhysical(); +} + +float SystemInformation::GetProcessorClockFrequency() +{ + return this->Implementation->GetProcessorClockFrequency(); +} + +int SystemInformation::GetProcessorAPICID() +{ + return this->Implementation->GetProcessorAPICID(); +} + +int SystemInformation::GetProcessorCacheXSize(long int l) +{ + return this->Implementation->GetProcessorCacheXSize(l); +} + +bool SystemInformation::DoesCPUSupportFeature(long int i) +{ + return this->Implementation->DoesCPUSupportFeature(i); +} + +std::string SystemInformation::GetCPUDescription() +{ + std::ostringstream oss; + oss << this->GetNumberOfPhysicalCPU() << " core "; + if (this->GetModelName().empty()) { + oss << this->GetProcessorClockFrequency() << " MHz " + << this->GetVendorString() << " " << this->GetExtendedProcessorName(); + } else { + oss << this->GetModelName(); + } + + // remove extra spaces + std::string tmp = oss.str(); + size_t pos; + while ((pos = tmp.find(" ")) != std::string::npos) { + tmp.replace(pos, 2, " "); + } + + return tmp; +} + +const char* SystemInformation::GetOSName() +{ + return this->Implementation->GetOSName(); +} + +const char* SystemInformation::GetHostname() +{ + return this->Implementation->GetHostname(); +} + +std::string SystemInformation::GetFullyQualifiedDomainName() +{ + std::string fqdn; + this->Implementation->GetFullyQualifiedDomainName(fqdn); + return fqdn; +} + +const char* SystemInformation::GetOSRelease() +{ + return this->Implementation->GetOSRelease(); +} + +const char* SystemInformation::GetOSVersion() +{ + return this->Implementation->GetOSVersion(); +} + +const char* SystemInformation::GetOSPlatform() +{ + return this->Implementation->GetOSPlatform(); +} + +int SystemInformation::GetOSIsWindows() +{ +#if defined(_WIN32) + return 1; +#else + return 0; +#endif +} + +int SystemInformation::GetOSIsLinux() +{ +#if defined(__linux) + return 1; +#else + return 0; +#endif +} + +int SystemInformation::GetOSIsApple() +{ +#if defined(__APPLE__) + return 1; +#else + return 0; +#endif +} + +std::string SystemInformation::GetOSDescription() +{ + std::ostringstream oss; + oss << this->GetOSName() << " " << this->GetOSRelease() << " " + << this->GetOSVersion(); + + return oss.str(); +} + +bool SystemInformation::Is64Bits() +{ + return this->Implementation->Is64Bits(); +} + +unsigned int SystemInformation::GetNumberOfLogicalCPU() // per physical cpu +{ + return this->Implementation->GetNumberOfLogicalCPU(); +} + +unsigned int SystemInformation::GetNumberOfPhysicalCPU() +{ + return this->Implementation->GetNumberOfPhysicalCPU(); +} + +bool SystemInformation::DoesCPUSupportCPUID() +{ + return this->Implementation->DoesCPUSupportCPUID(); +} + +// Retrieve memory information in MiB. +size_t SystemInformation::GetTotalVirtualMemory() +{ + return this->Implementation->GetTotalVirtualMemory(); +} + +size_t SystemInformation::GetAvailableVirtualMemory() +{ + return this->Implementation->GetAvailableVirtualMemory(); +} + +size_t SystemInformation::GetTotalPhysicalMemory() +{ + return this->Implementation->GetTotalPhysicalMemory(); +} + +size_t SystemInformation::GetAvailablePhysicalMemory() +{ + return this->Implementation->GetAvailablePhysicalMemory(); +} + +std::string SystemInformation::GetMemoryDescription( + const char* hostLimitEnvVarName, const char* procLimitEnvVarName) +{ + std::ostringstream oss; + oss << "Host Total: " << iostreamLongLong(this->GetHostMemoryTotal()) + << " KiB, Host Available: " + << iostreamLongLong(this->GetHostMemoryAvailable(hostLimitEnvVarName)) + << " KiB, Process Available: " + << iostreamLongLong(this->GetProcMemoryAvailable(hostLimitEnvVarName, + procLimitEnvVarName)) + << " KiB"; + return oss.str(); +} + +// host memory info in units of KiB. +SystemInformation::LongLong SystemInformation::GetHostMemoryTotal() +{ + return this->Implementation->GetHostMemoryTotal(); +} + +SystemInformation::LongLong SystemInformation::GetHostMemoryAvailable( + const char* hostLimitEnvVarName) +{ + return this->Implementation->GetHostMemoryAvailable(hostLimitEnvVarName); +} + +SystemInformation::LongLong SystemInformation::GetHostMemoryUsed() +{ + return this->Implementation->GetHostMemoryUsed(); +} + +// process memory info in units of KiB. +SystemInformation::LongLong SystemInformation::GetProcMemoryAvailable( + const char* hostLimitEnvVarName, const char* procLimitEnvVarName) +{ + return this->Implementation->GetProcMemoryAvailable(hostLimitEnvVarName, + procLimitEnvVarName); +} + +SystemInformation::LongLong SystemInformation::GetProcMemoryUsed() +{ + return this->Implementation->GetProcMemoryUsed(); +} + +double SystemInformation::GetLoadAverage() +{ + return this->Implementation->GetLoadAverage(); +} + +SystemInformation::LongLong SystemInformation::GetProcessId() +{ + return this->Implementation->GetProcessId(); +} + +void SystemInformation::SetStackTraceOnError(int enable) +{ + SystemInformationImplementation::SetStackTraceOnError(enable); +} + +std::string SystemInformation::GetProgramStack(int firstFrame, int wholePath) +{ + return SystemInformationImplementation::GetProgramStack(firstFrame, + wholePath); +} + +/** Run the different checks */ +void SystemInformation::RunCPUCheck() +{ + this->Implementation->RunCPUCheck(); +} + +void SystemInformation::RunOSCheck() +{ + this->Implementation->RunOSCheck(); +} + +void SystemInformation::RunMemoryCheck() +{ + this->Implementation->RunMemoryCheck(); +} + +// SystemInformationImplementation starts here + +#if USE_CPUID +# define STORE_TLBCACHE_INFO(x, y) x = (x < (y)) ? (y) : x +# define TLBCACHE_INFO_UNITS (15) +#endif + +#if USE_ASM_INSTRUCTIONS +# define CLASSICAL_CPU_FREQ_LOOP 10000000 +# define RDTSC_INSTRUCTION _asm _emit 0x0f _asm _emit 0x31 +#endif + +#define INITIAL_APIC_ID_BITS 0xFF000000 +// initial APIC ID for the processor this code is running on. +// Default value = 0xff if HT is not supported + +// Hide implementation details in an anonymous namespace. +namespace { +// ***************************************************************************** +#if defined(__linux) || defined(__APPLE__) +int LoadLines(FILE* file, std::vector& lines) +{ + // Load each line in the given file into a the vector. + int nRead = 0; + const int bufSize = 1024; + char buf[bufSize] = { '\0' }; + while (!feof(file) && !ferror(file)) { + errno = 0; + if (fgets(buf, bufSize, file) == nullptr) { + if (ferror(file) && (errno == EINTR)) { + clearerr(file); + } + continue; + } + char* pBuf = buf; + while (*pBuf) { + if (*pBuf == '\n') + *pBuf = '\0'; + pBuf += 1; + } + lines.push_back(buf); + ++nRead; + } + if (ferror(file)) { + return 0; + } + return nRead; +} + +# if defined(__linux) +// ***************************************************************************** +int LoadLines(const char* fileName, std::vector& lines) +{ + FILE* file = fopen(fileName, "r"); + if (file == 0) { + return 0; + } + int nRead = LoadLines(file, lines); + fclose(file); + return nRead; +} +# endif + +// **************************************************************************** +template +int NameValue(std::vector const& lines, std::string const& name, + T& value) +{ + size_t nLines = lines.size(); + for (size_t i = 0; i < nLines; ++i) { + size_t at = lines[i].find(name); + if (at == std::string::npos) { + continue; + } + std::istringstream is(lines[i].substr(at + name.size())); + is >> value; + return 0; + } + return -1; +} +#endif + +#if defined(__linux) +// **************************************************************************** +template +int GetFieldsFromFile(const char* fileName, const char** fieldNames, T* values) +{ + std::vector fields; + if (!LoadLines(fileName, fields)) { + return -1; + } + int i = 0; + while (fieldNames[i] != nullptr) { + int ierr = NameValue(fields, fieldNames[i], values[i]); + if (ierr) { + return -(i + 2); + } + i += 1; + } + return 0; +} + +// **************************************************************************** +template +int GetFieldFromFile(const char* fileName, const char* fieldName, T& value) +{ + const char* fieldNames[2] = { fieldName, nullptr }; + T values[1] = { T(0) }; + int ierr = GetFieldsFromFile(fileName, fieldNames, values); + if (ierr) { + return ierr; + } + value = values[0]; + return 0; +} +#endif + +// **************************************************************************** +#if defined(__APPLE__) +template +int GetFieldsFromCommand(const char* command, const char** fieldNames, + T* values) +{ + FILE* file = popen(command, "r"); + if (file == nullptr) { + return -1; + } + std::vector fields; + int nl = LoadLines(file, fields); + pclose(file); + if (nl == 0) { + return -1; + } + int i = 0; + while (fieldNames[i] != nullptr) { + int ierr = NameValue(fields, fieldNames[i], values[i]); + if (ierr) { + return -(i + 2); + } + i += 1; + } + return 0; +} +#endif + +// **************************************************************************** +#if !defined(_WIN32) && !defined(__MINGW32__) && !defined(__CYGWIN__) +void StacktraceSignalHandler(int sigNo, siginfo_t* sigInfo, + void* /*sigContext*/) +{ +# if defined(__linux) || defined(__APPLE__) + std::ostringstream oss; + oss << std::endl + << "=========================================================" + << std::endl + << "Process id " << getpid() << " "; + switch (sigNo) { + case SIGINT: + oss << "Caught SIGINT"; + break; + + case SIGTERM: + oss << "Caught SIGTERM"; + break; + + case SIGABRT: + oss << "Caught SIGABRT"; + break; + + case SIGFPE: + oss << "Caught SIGFPE at " << (sigInfo->si_addr == nullptr ? "0x" : "") + << sigInfo->si_addr << " "; + switch (sigInfo->si_code) { +# if defined(FPE_INTDIV) + case FPE_INTDIV: + oss << "integer division by zero"; + break; +# endif + +# if defined(FPE_INTOVF) + case FPE_INTOVF: + oss << "integer overflow"; + break; +# endif + + case FPE_FLTDIV: + oss << "floating point divide by zero"; + break; + + case FPE_FLTOVF: + oss << "floating point overflow"; + break; + + case FPE_FLTUND: + oss << "floating point underflow"; + break; + + case FPE_FLTRES: + oss << "floating point inexact result"; + break; + + case FPE_FLTINV: + oss << "floating point invalid operation"; + break; + +# if defined(FPE_FLTSUB) + case FPE_FLTSUB: + oss << "floating point subscript out of range"; + break; +# endif + + default: + oss << "code " << sigInfo->si_code; + break; + } + break; + + case SIGSEGV: + oss << "Caught SIGSEGV at " << (sigInfo->si_addr == nullptr ? "0x" : "") + << sigInfo->si_addr << " "; + switch (sigInfo->si_code) { + case SEGV_MAPERR: + oss << "address not mapped to object"; + break; + + case SEGV_ACCERR: + oss << "invalid permission for mapped object"; + break; + + default: + oss << "code " << sigInfo->si_code; + break; + } + break; + + case SIGBUS: + oss << "Caught SIGBUS at " << (sigInfo->si_addr == nullptr ? "0x" : "") + << sigInfo->si_addr << " "; + switch (sigInfo->si_code) { + case BUS_ADRALN: + oss << "invalid address alignment"; + break; + +# if defined(BUS_ADRERR) + case BUS_ADRERR: + oss << "nonexistent physical address"; + break; +# endif + +# if defined(BUS_OBJERR) + case BUS_OBJERR: + oss << "object-specific hardware error"; + break; +# endif + +# if defined(BUS_MCEERR_AR) + case BUS_MCEERR_AR: + oss << "Hardware memory error consumed on a machine check; action " + "required."; + break; +# endif + +# if defined(BUS_MCEERR_AO) + case BUS_MCEERR_AO: + oss << "Hardware memory error detected in process but not consumed; " + "action optional."; + break; +# endif + + default: + oss << "code " << sigInfo->si_code; + break; + } + break; + + case SIGILL: + oss << "Caught SIGILL at " << (sigInfo->si_addr == nullptr ? "0x" : "") + << sigInfo->si_addr << " "; + switch (sigInfo->si_code) { + case ILL_ILLOPC: + oss << "illegal opcode"; + break; + +# if defined(ILL_ILLOPN) + case ILL_ILLOPN: + oss << "illegal operand"; + break; +# endif + +# if defined(ILL_ILLADR) + case ILL_ILLADR: + oss << "illegal addressing mode."; + break; +# endif + + case ILL_ILLTRP: + oss << "illegal trap"; + break; + + case ILL_PRVOPC: + oss << "privileged opcode"; + break; + +# if defined(ILL_PRVREG) + case ILL_PRVREG: + oss << "privileged register"; + break; +# endif + +# if defined(ILL_COPROC) + case ILL_COPROC: + oss << "co-processor error"; + break; +# endif + +# if defined(ILL_BADSTK) + case ILL_BADSTK: + oss << "internal stack error"; + break; +# endif + + default: + oss << "code " << sigInfo->si_code; + break; + } + break; + + default: + oss << "Caught " << sigNo << " code " << sigInfo->si_code; + break; + } + oss << std::endl + << "Program Stack:" << std::endl + << SystemInformationImplementation::GetProgramStack(2, 0) + << "=========================================================" + << std::endl; + std::cerr << oss.str() << std::endl; + + // restore the previously registered handlers + // and abort + SystemInformationImplementation::SetStackTraceOnError(0); + abort(); +# else + // avoid warning C4100 + (void)sigNo; + (void)sigInfo; +# endif +} +#endif + +#if defined(KWSYS_SYSTEMINFORMATION_HAS_BACKTRACE) +# define safes(_arg) ((_arg) ? (_arg) : "???") + +// Description: +// A container for symbol properties. Each instance +// must be Initialized. +class SymbolProperties +{ +public: + SymbolProperties(); + + // Description: + // The SymbolProperties instance must be initialized by + // passing a stack address. + void Initialize(void* address); + + // Description: + // Get the symbol's stack address. + void* GetAddress() const { return this->Address; } + + // Description: + // If not set paths will be removed. eg, from a binary + // or source file. + void SetReportPath(int rp) { this->ReportPath = rp; } + + // Description: + // Set/Get the name of the binary file that the symbol + // is found in. + void SetBinary(const char* binary) { this->Binary = safes(binary); } + + std::string GetBinary() const; + + // Description: + // Set the name of the function that the symbol is found in. + // If c++ demangling is supported it will be demangled. + void SetFunction(const char* function) + { + this->Function = this->Demangle(function); + } + + std::string GetFunction() const { return this->Function; } + + // Description: + // Set/Get the name of the source file where the symbol + // is defined. + void SetSourceFile(const char* sourcefile) + { + this->SourceFile = safes(sourcefile); + } + + std::string GetSourceFile() const + { + return this->GetFileName(this->SourceFile); + } + + // Description: + // Set/Get the line number where the symbol is defined + void SetLineNumber(long linenumber) { this->LineNumber = linenumber; } + long GetLineNumber() const { return this->LineNumber; } + + // Description: + // Set the address where the binary image is mapped + // into memory. + void SetBinaryBaseAddress(void* address) + { + this->BinaryBaseAddress = address; + } + +private: + void* GetRealAddress() const + { + return (void*)((char*)this->Address - (char*)this->BinaryBaseAddress); + } + + std::string GetFileName(const std::string& path) const; + std::string Demangle(const char* symbol) const; + +private: + std::string Binary; + void* BinaryBaseAddress; + void* Address; + std::string SourceFile; + std::string Function; + long LineNumber; + int ReportPath; +}; + +std::ostream& operator<<(std::ostream& os, const SymbolProperties& sp) +{ +# if defined(KWSYS_SYSTEMINFORMATION_HAS_SYMBOL_LOOKUP) + os << std::hex << sp.GetAddress() << " : " << sp.GetFunction() << " [(" + << sp.GetBinary() << ") " << sp.GetSourceFile() << ":" << std::dec + << sp.GetLineNumber() << "]"; +# elif defined(KWSYS_SYSTEMINFORMATION_HAS_BACKTRACE) + void* addr = sp.GetAddress(); + char** syminfo = backtrace_symbols(&addr, 1); + os << safes(syminfo[0]); + free(syminfo); +# else + (void)os; + (void)sp; +# endif + return os; +} + +SymbolProperties::SymbolProperties() +{ + // not using an initializer list + // to avoid some PGI compiler warnings + this->SetBinary("???"); + this->SetBinaryBaseAddress(nullptr); + this->Address = nullptr; + this->SetSourceFile("???"); + this->SetFunction("???"); + this->SetLineNumber(-1); + this->SetReportPath(0); + // avoid PGI compiler warnings + this->GetRealAddress(); + this->GetFunction(); + this->GetSourceFile(); + this->GetLineNumber(); +} + +std::string SymbolProperties::GetFileName(const std::string& path) const +{ + std::string file(path); + if (!this->ReportPath) { + size_t at = file.rfind("/"); + if (at != std::string::npos) { + file = file.substr(at + 1); + } + } + return file; +} + +std::string SymbolProperties::GetBinary() const +{ +// only linux has proc fs +# if defined(__linux__) + if (this->Binary == "/proc/self/exe") { + std::string binary; + char buf[1024] = { '\0' }; + ssize_t ll = 0; + if ((ll = readlink("/proc/self/exe", buf, 1024)) > 0 && ll < 1024) { + buf[ll] = '\0'; + binary = buf; + } else { + binary = "/proc/self/exe"; + } + return this->GetFileName(binary); + } +# endif + return this->GetFileName(this->Binary); +} + +std::string SymbolProperties::Demangle(const char* symbol) const +{ + std::string result = safes(symbol); +# if defined(KWSYS_SYSTEMINFORMATION_HAS_CPP_DEMANGLE) + int status = 0; + size_t bufferLen = 1024; + char* buffer = (char*)malloc(1024); + char* demangledSymbol = + abi::__cxa_demangle(symbol, buffer, &bufferLen, &status); + if (!status) { + result = demangledSymbol; + } + free(buffer); +# else + (void)symbol; +# endif + return result; +} + +void SymbolProperties::Initialize(void* address) +{ + this->Address = address; +# if defined(KWSYS_SYSTEMINFORMATION_HAS_SYMBOL_LOOKUP) + // first fallback option can demangle c++ functions + Dl_info info; + int ierr = dladdr(this->Address, &info); + if (ierr && info.dli_sname && info.dli_saddr) { + this->SetBinary(info.dli_fname); + this->SetFunction(info.dli_sname); + } +# else +// second fallback use builtin backtrace_symbols +// to decode the bactrace. +# endif +} +#endif // don't define this class if we're not using it + +#if defined(_WIN32) || defined(__CYGWIN__) +# define KWSYS_SYSTEMINFORMATION_USE_GetSystemTimes +#endif +#if defined(_MSC_VER) && _MSC_VER < 1310 +# undef KWSYS_SYSTEMINFORMATION_USE_GetSystemTimes +#endif +#if defined(KWSYS_SYSTEMINFORMATION_USE_GetSystemTimes) +double calculateCPULoad(unsigned __int64 idleTicks, + unsigned __int64 totalTicks) +{ + static double previousLoad = -0.0; + static unsigned __int64 previousIdleTicks = 0; + static unsigned __int64 previousTotalTicks = 0; + + unsigned __int64 const idleTicksSinceLastTime = + idleTicks - previousIdleTicks; + unsigned __int64 const totalTicksSinceLastTime = + totalTicks - previousTotalTicks; + + double load; + if (previousTotalTicks == 0 || totalTicksSinceLastTime == 0) { + // No new information. Use previous result. + load = previousLoad; + } else { + // Calculate load since last time. + load = 1.0 - double(idleTicksSinceLastTime) / totalTicksSinceLastTime; + + // Smooth if possible. + if (previousLoad > 0) { + load = 0.25 * load + 0.75 * previousLoad; + } + } + + previousLoad = load; + previousIdleTicks = idleTicks; + previousTotalTicks = totalTicks; + + return load; +} + +unsigned __int64 fileTimeToUInt64(FILETIME const& ft) +{ + LARGE_INTEGER out; + out.HighPart = ft.dwHighDateTime; + out.LowPart = ft.dwLowDateTime; + return out.QuadPart; +} +#endif + +} // anonymous namespace + +SystemInformationImplementation::SystemInformationImplementation() +{ + this->TotalVirtualMemory = 0; + this->AvailableVirtualMemory = 0; + this->TotalPhysicalMemory = 0; + this->AvailablePhysicalMemory = 0; + this->CurrentPositionInFile = 0; + this->ChipManufacturer = UnknownManufacturer; + memset(&this->Features, 0, sizeof(CPUFeatures)); + this->ChipID.Type = 0; + this->ChipID.Family = 0; + this->ChipID.Model = 0; + this->ChipID.Revision = 0; + this->ChipID.ExtendedFamily = 0; + this->ChipID.ExtendedModel = 0; + this->CPUSpeedInMHz = 0; + this->NumberOfLogicalCPU = 0; + this->NumberOfPhysicalCPU = 0; + this->OSName = ""; + this->Hostname = ""; + this->OSRelease = ""; + this->OSVersion = ""; + this->OSPlatform = ""; + this->OSIs64Bit = (sizeof(void*) == 8); +} + +SystemInformationImplementation::~SystemInformationImplementation() +{ +} + +void SystemInformationImplementation::RunCPUCheck() +{ +#ifdef _WIN32 + // Check to see if this processor supports CPUID. + bool supportsCPUID = DoesCPUSupportCPUID(); + + if (supportsCPUID) { + // Retrieve the CPU details. + RetrieveCPUIdentity(); + this->FindManufacturer(); + RetrieveCPUFeatures(); + } + + // These two may be called without support for the CPUID instruction. + // (But if the instruction is there, they should be called *after* + // the above call to RetrieveCPUIdentity... that's why the two if + // blocks exist with the same "if (supportsCPUID)" logic... + // + if (!RetrieveCPUClockSpeed()) { + RetrieveClassicalCPUClockSpeed(); + } + + if (supportsCPUID) { + // Retrieve cache information. + if (!RetrieveCPUCacheDetails()) { + RetrieveClassicalCPUCacheDetails(); + } + + // Retrieve the extended CPU details. + if (!RetrieveExtendedCPUIdentity()) { + RetrieveClassicalCPUIdentity(); + } + + RetrieveExtendedCPUFeatures(); + RetrieveCPUPowerManagement(); + + // Now attempt to retrieve the serial number (if possible). + RetrieveProcessorSerialNumber(); + } + + this->CPUCountWindows(); + +#elif defined(__APPLE__) + this->ParseSysCtl(); +#elif defined(__SVR4) && defined(__sun) + this->QuerySolarisProcessor(); +#elif defined(__HAIKU__) + this->QueryHaikuInfo(); +#elif defined(__QNX__) + this->QueryQNXProcessor(); +#elif defined(__OpenBSD__) || defined(__FreeBSD__) || defined(__NetBSD__) || \ + defined(__DragonFly__) + this->QueryBSDProcessor(); +#elif defined(__hpux) + this->QueryHPUXProcessor(); +#elif defined(__linux) || defined(__CYGWIN__) + this->RetreiveInformationFromCpuInfoFile(); +#else + this->QueryProcessor(); +#endif +} + +void SystemInformationImplementation::RunOSCheck() +{ + this->QueryOSInformation(); +} + +void SystemInformationImplementation::RunMemoryCheck() +{ +#if defined(__APPLE__) + this->ParseSysCtl(); +#elif defined(__SVR4) && defined(__sun) + this->QuerySolarisMemory(); +#elif defined(__HAIKU__) + this->QueryHaikuInfo(); +#elif defined(__QNX__) + this->QueryQNXMemory(); +#elif defined(__OpenBSD__) || defined(__FreeBSD__) || defined(__NetBSD__) || \ + defined(__DragonFly__) + this->QueryBSDMemory(); +#elif defined(__CYGWIN__) + this->QueryCygwinMemory(); +#elif defined(_WIN32) + this->QueryWindowsMemory(); +#elif defined(__hpux) + this->QueryHPUXMemory(); +#elif defined(__linux) + this->QueryLinuxMemory(); +#elif defined(_AIX) + this->QueryAIXMemory(); +#else + this->QueryMemory(); +#endif +} + +/** Get the vendor string */ +const char* SystemInformationImplementation::GetVendorString() +{ + return this->ChipID.Vendor.c_str(); +} + +/** Get the OS Name */ +const char* SystemInformationImplementation::GetOSName() +{ + return this->OSName.c_str(); +} + +/** Get the hostname */ +const char* SystemInformationImplementation::GetHostname() +{ + if (this->Hostname.empty()) { + this->Hostname = "localhost"; +#if defined(_WIN32) + WORD wVersionRequested; + WSADATA wsaData; + char name[255]; + wVersionRequested = MAKEWORD(2, 0); + if (WSAStartup(wVersionRequested, &wsaData) == 0) { + gethostname(name, sizeof(name)); + WSACleanup(); + } + this->Hostname = name; +#else + struct utsname unameInfo; + int errorFlag = uname(&unameInfo); + if (errorFlag == 0) { + this->Hostname = unameInfo.nodename; + } +#endif + } + return this->Hostname.c_str(); +} + +/** Get the FQDN */ +int SystemInformationImplementation::GetFullyQualifiedDomainName( + std::string& fqdn) +{ + // in the event of absolute failure return localhost. + fqdn = "localhost"; + +#if defined(_WIN32) + int ierr; + // TODO - a more robust implementation for windows, see comments + // in unix implementation. + WSADATA wsaData; + WORD ver = MAKEWORD(2, 0); + ierr = WSAStartup(ver, &wsaData); + if (ierr) { + return -1; + } + + char base[256] = { '\0' }; + ierr = gethostname(base, 256); + if (ierr) { + WSACleanup(); + return -2; + } + fqdn = base; + + HOSTENT* hent = gethostbyname(base); + if (hent) { + fqdn = hent->h_name; + } + + WSACleanup(); + return 0; + +#elif defined(KWSYS_SYSTEMINFORMATION_IMPLEMENT_FQDN) + // gethostname typical returns an alias for loopback interface + // we want the fully qualified domain name. Because there are + // any number of interfaces on this system we look for the + // first of these that contains the name returned by gethostname + // and is longer. failing that we return gethostname and indicate + // with a failure code. Return of a failure code is not necessarily + // an indication of an error. for instance gethostname may return + // the fully qualified domain name, or there may not be one if the + // system lives on a private network such as in the case of a cluster + // node. + + int ierr = 0; + char base[NI_MAXHOST]; + ierr = gethostname(base, NI_MAXHOST); + if (ierr) { + return -1; + } + size_t baseSize = strlen(base); + fqdn = base; + + struct ifaddrs* ifas; + struct ifaddrs* ifa; + ierr = getifaddrs(&ifas); + if (ierr) { + return -2; + } + + for (ifa = ifas; ifa != nullptr; ifa = ifa->ifa_next) { + int fam = ifa->ifa_addr ? ifa->ifa_addr->sa_family : -1; + // Skip Loopback interfaces + if (((fam == AF_INET) || (fam == AF_INET6)) && + !(ifa->ifa_flags & IFF_LOOPBACK)) { + char host[NI_MAXHOST] = { '\0' }; + + const size_t addrlen = (fam == AF_INET ? sizeof(struct sockaddr_in) + : sizeof(struct sockaddr_in6)); + + ierr = getnameinfo(ifa->ifa_addr, static_cast(addrlen), host, + NI_MAXHOST, nullptr, 0, NI_NAMEREQD); + if (ierr) { + // don't report the failure now since we may succeed on another + // interface. If all attempts fail then return the failure code. + ierr = -3; + continue; + } + + std::string candidate = host; + if ((candidate.find(base) != std::string::npos) && + baseSize < candidate.size()) { + // success, stop now. + ierr = 0; + fqdn = candidate; + break; + } + } + } + freeifaddrs(ifas); + + return ierr; +#else + /* TODO: Implement on more platforms. */ + fqdn = this->GetHostname(); + return -1; +#endif +} + +/** Get the OS release */ +const char* SystemInformationImplementation::GetOSRelease() +{ + return this->OSRelease.c_str(); +} + +/** Get the OS version */ +const char* SystemInformationImplementation::GetOSVersion() +{ + return this->OSVersion.c_str(); +} + +/** Get the OS platform */ +const char* SystemInformationImplementation::GetOSPlatform() +{ + return this->OSPlatform.c_str(); +} + +/** Get the vendor ID */ +const char* SystemInformationImplementation::GetVendorID() +{ + // Return the vendor ID. + switch (this->ChipManufacturer) { + case Intel: + return "Intel Corporation"; + case AMD: + return "Advanced Micro Devices"; + case NSC: + return "National Semiconductor"; + case Cyrix: + return "Cyrix Corp., VIA Inc."; + case NexGen: + return "NexGen Inc., Advanced Micro Devices"; + case IDT: + return "IDT\\Centaur, Via Inc."; + case UMC: + return "United Microelectronics Corp."; + case Rise: + return "Rise"; + case Transmeta: + return "Transmeta"; + case Sun: + return "Sun Microelectronics"; + case IBM: + return "IBM"; + case Motorola: + return "Motorola"; + case HP: + return "Hewlett-Packard"; + case Hygon: + return "Chengdu Haiguang IC Design Co., Ltd."; + case UnknownManufacturer: + default: + return "Unknown Manufacturer"; + } +} + +/** Return the type ID of the CPU */ +std::string SystemInformationImplementation::GetTypeID() +{ + std::ostringstream str; + str << this->ChipID.Type; + return str.str(); +} + +/** Return the family of the CPU present */ +std::string SystemInformationImplementation::GetFamilyID() +{ + std::ostringstream str; + str << this->ChipID.Family; + return str.str(); +} + +// Return the model of CPU present */ +std::string SystemInformationImplementation::GetModelID() +{ + std::ostringstream str; + str << this->ChipID.Model; + return str.str(); +} + +// Return the model name of CPU present */ +std::string SystemInformationImplementation::GetModelName() +{ + return this->ChipID.ModelName; +} + +/** Return the stepping code of the CPU present. */ +std::string SystemInformationImplementation::GetSteppingCode() +{ + std::ostringstream str; + str << this->ChipID.Revision; + return str.str(); +} + +/** Return the stepping code of the CPU present. */ +const char* SystemInformationImplementation::GetExtendedProcessorName() +{ + return this->ChipID.ProcessorName.c_str(); +} + +/** Return the serial number of the processor + * in hexadecimal: xxxx-xxxx-xxxx-xxxx-xxxx-xxxx. */ +const char* SystemInformationImplementation::GetProcessorSerialNumber() +{ + return this->ChipID.SerialNumber.c_str(); +} + +/** Return the logical processors per physical */ +unsigned int SystemInformationImplementation::GetLogicalProcessorsPerPhysical() +{ + return this->Features.ExtendedFeatures.LogicalProcessorsPerPhysical; +} + +/** Return the processor clock frequency. */ +float SystemInformationImplementation::GetProcessorClockFrequency() +{ + return this->CPUSpeedInMHz; +} + +/** Return the APIC ID. */ +int SystemInformationImplementation::GetProcessorAPICID() +{ + return this->Features.ExtendedFeatures.APIC_ID; +} + +/** Return the L1 cache size. */ +int SystemInformationImplementation::GetProcessorCacheSize() +{ + return this->Features.L1CacheSize; +} + +/** Return the chosen cache size. */ +int SystemInformationImplementation::GetProcessorCacheXSize(long int dwCacheID) +{ + switch (dwCacheID) { + case SystemInformation::CPU_FEATURE_L1CACHE: + return this->Features.L1CacheSize; + case SystemInformation::CPU_FEATURE_L2CACHE: + return this->Features.L2CacheSize; + case SystemInformation::CPU_FEATURE_L3CACHE: + return this->Features.L3CacheSize; + } + return -1; +} + +bool SystemInformationImplementation::DoesCPUSupportFeature(long int dwFeature) +{ + bool bHasFeature = false; + + // Check for MMX instructions. + if (((dwFeature & SystemInformation::CPU_FEATURE_MMX) != 0) && + this->Features.HasMMX) + bHasFeature = true; + + // Check for MMX+ instructions. + if (((dwFeature & SystemInformation::CPU_FEATURE_MMX_PLUS) != 0) && + this->Features.ExtendedFeatures.HasMMXPlus) + bHasFeature = true; + + // Check for SSE FP instructions. + if (((dwFeature & SystemInformation::CPU_FEATURE_SSE) != 0) && + this->Features.HasSSE) + bHasFeature = true; + + // Check for SSE FP instructions. + if (((dwFeature & SystemInformation::CPU_FEATURE_SSE_FP) != 0) && + this->Features.HasSSEFP) + bHasFeature = true; + + // Check for SSE MMX instructions. + if (((dwFeature & SystemInformation::CPU_FEATURE_SSE_MMX) != 0) && + this->Features.ExtendedFeatures.HasSSEMMX) + bHasFeature = true; + + // Check for SSE2 instructions. + if (((dwFeature & SystemInformation::CPU_FEATURE_SSE2) != 0) && + this->Features.HasSSE2) + bHasFeature = true; + + // Check for 3DNow! instructions. + if (((dwFeature & SystemInformation::CPU_FEATURE_AMD_3DNOW) != 0) && + this->Features.ExtendedFeatures.Has3DNow) + bHasFeature = true; + + // Check for 3DNow+ instructions. + if (((dwFeature & SystemInformation::CPU_FEATURE_AMD_3DNOW_PLUS) != 0) && + this->Features.ExtendedFeatures.Has3DNowPlus) + bHasFeature = true; + + // Check for IA64 instructions. + if (((dwFeature & SystemInformation::CPU_FEATURE_IA64) != 0) && + this->Features.HasIA64) + bHasFeature = true; + + // Check for MP capable. + if (((dwFeature & SystemInformation::CPU_FEATURE_MP_CAPABLE) != 0) && + this->Features.ExtendedFeatures.SupportsMP) + bHasFeature = true; + + // Check for a serial number for the processor. + if (((dwFeature & SystemInformation::CPU_FEATURE_SERIALNUMBER) != 0) && + this->Features.HasSerial) + bHasFeature = true; + + // Check for a local APIC in the processor. + if (((dwFeature & SystemInformation::CPU_FEATURE_APIC) != 0) && + this->Features.HasAPIC) + bHasFeature = true; + + // Check for CMOV instructions. + if (((dwFeature & SystemInformation::CPU_FEATURE_CMOV) != 0) && + this->Features.HasCMOV) + bHasFeature = true; + + // Check for MTRR instructions. + if (((dwFeature & SystemInformation::CPU_FEATURE_MTRR) != 0) && + this->Features.HasMTRR) + bHasFeature = true; + + // Check for L1 cache size. + if (((dwFeature & SystemInformation::CPU_FEATURE_L1CACHE) != 0) && + (this->Features.L1CacheSize != -1)) + bHasFeature = true; + + // Check for L2 cache size. + if (((dwFeature & SystemInformation::CPU_FEATURE_L2CACHE) != 0) && + (this->Features.L2CacheSize != -1)) + bHasFeature = true; + + // Check for L3 cache size. + if (((dwFeature & SystemInformation::CPU_FEATURE_L3CACHE) != 0) && + (this->Features.L3CacheSize != -1)) + bHasFeature = true; + + // Check for ACPI capability. + if (((dwFeature & SystemInformation::CPU_FEATURE_ACPI) != 0) && + this->Features.HasACPI) + bHasFeature = true; + + // Check for thermal monitor support. + if (((dwFeature & SystemInformation::CPU_FEATURE_THERMALMONITOR) != 0) && + this->Features.HasThermal) + bHasFeature = true; + + // Check for temperature sensing diode support. + if (((dwFeature & SystemInformation::CPU_FEATURE_TEMPSENSEDIODE) != 0) && + this->Features.ExtendedFeatures.PowerManagement.HasTempSenseDiode) + bHasFeature = true; + + // Check for frequency ID support. + if (((dwFeature & SystemInformation::CPU_FEATURE_FREQUENCYID) != 0) && + this->Features.ExtendedFeatures.PowerManagement.HasFrequencyID) + bHasFeature = true; + + // Check for voltage ID support. + if (((dwFeature & SystemInformation::CPU_FEATURE_VOLTAGEID_FREQUENCY) != + 0) && + this->Features.ExtendedFeatures.PowerManagement.HasVoltageID) + bHasFeature = true; + + // Check for FPU support. + if (((dwFeature & SystemInformation::CPU_FEATURE_FPU) != 0) && + this->Features.HasFPU) + bHasFeature = true; + + return bHasFeature; +} + +void SystemInformationImplementation::Delay(unsigned int uiMS) +{ +#ifdef _WIN32 + LARGE_INTEGER Frequency, StartCounter, EndCounter; + __int64 x; + + // Get the frequency of the high performance counter. + if (!QueryPerformanceFrequency(&Frequency)) + return; + x = Frequency.QuadPart / 1000 * uiMS; + + // Get the starting position of the counter. + QueryPerformanceCounter(&StartCounter); + + do { + // Get the ending position of the counter. + QueryPerformanceCounter(&EndCounter); + } while (EndCounter.QuadPart - StartCounter.QuadPart < x); +#endif + (void)uiMS; +} + +bool SystemInformationImplementation::DoesCPUSupportCPUID() +{ +#if USE_CPUID + int dummy[4] = { 0, 0, 0, 0 }; + +# if USE_ASM_INSTRUCTIONS + return call_cpuid(0, dummy); +# else + call_cpuid(0, dummy); + return dummy[0] || dummy[1] || dummy[2] || dummy[3]; +# endif +#else + // Assume no cpuid instruction. + return false; +#endif +} + +bool SystemInformationImplementation::RetrieveCPUFeatures() +{ +#if USE_CPUID + int cpuinfo[4] = { 0, 0, 0, 0 }; + + if (!call_cpuid(1, cpuinfo)) { + return false; + } + + // Retrieve the features of CPU present. + this->Features.HasFPU = + ((cpuinfo[3] & 0x00000001) != 0); // FPU Present --> Bit 0 + this->Features.HasTSC = + ((cpuinfo[3] & 0x00000010) != 0); // TSC Present --> Bit 4 + this->Features.HasAPIC = + ((cpuinfo[3] & 0x00000200) != 0); // APIC Present --> Bit 9 + this->Features.HasMTRR = + ((cpuinfo[3] & 0x00001000) != 0); // MTRR Present --> Bit 12 + this->Features.HasCMOV = + ((cpuinfo[3] & 0x00008000) != 0); // CMOV Present --> Bit 15 + this->Features.HasSerial = + ((cpuinfo[3] & 0x00040000) != 0); // Serial Present --> Bit 18 + this->Features.HasACPI = + ((cpuinfo[3] & 0x00400000) != 0); // ACPI Capable --> Bit 22 + this->Features.HasMMX = + ((cpuinfo[3] & 0x00800000) != 0); // MMX Present --> Bit 23 + this->Features.HasSSE = + ((cpuinfo[3] & 0x02000000) != 0); // SSE Present --> Bit 25 + this->Features.HasSSE2 = + ((cpuinfo[3] & 0x04000000) != 0); // SSE2 Present --> Bit 26 + this->Features.HasThermal = + ((cpuinfo[3] & 0x20000000) != 0); // Thermal Monitor Present --> Bit 29 + this->Features.HasIA64 = + ((cpuinfo[3] & 0x40000000) != 0); // IA64 Present --> Bit 30 + +# if USE_ASM_INSTRUCTIONS + // Retrieve extended SSE capabilities if SSE is available. + if (this->Features.HasSSE) { + + // Attempt to __try some SSE FP instructions. + __try { + // Perform: orps xmm0, xmm0 + _asm + { + _emit 0x0f + _emit 0x56 + _emit 0xc0 + } + + // SSE FP capable processor. + this->Features.HasSSEFP = true; + } __except (1) { + // bad instruction - processor or OS cannot handle SSE FP. + this->Features.HasSSEFP = false; + } + } else { + // Set the advanced SSE capabilities to not available. + this->Features.HasSSEFP = false; + } +# else + this->Features.HasSSEFP = false; +# endif + + // Retrieve Intel specific extended features. + if (this->ChipManufacturer == Intel) { + bool SupportsSMT = + ((cpuinfo[3] & 0x10000000) != 0); // Intel specific: SMT --> Bit 28 + + if ((SupportsSMT) && (this->Features.HasAPIC)) { + // Retrieve APIC information if there is one present. + this->Features.ExtendedFeatures.APIC_ID = + ((cpuinfo[1] & 0xFF000000) >> 24); + } + } + + return true; + +#else + return false; +#endif +} + +/** Find the manufacturer given the vendor id */ +void SystemInformationImplementation::FindManufacturer( + const std::string& family) +{ + if (this->ChipID.Vendor == "GenuineIntel") + this->ChipManufacturer = Intel; // Intel Corp. + else if (this->ChipID.Vendor == "UMC UMC UMC ") + this->ChipManufacturer = UMC; // United Microelectronics Corp. + else if (this->ChipID.Vendor == "AuthenticAMD") + this->ChipManufacturer = AMD; // Advanced Micro Devices + else if (this->ChipID.Vendor == "AMD ISBETTER") + this->ChipManufacturer = AMD; // Advanced Micro Devices (1994) + else if (this->ChipID.Vendor == "HygonGenuine") + this->ChipManufacturer = Hygon; // Chengdu Haiguang IC Design Co., Ltd. + else if (this->ChipID.Vendor == "CyrixInstead") + this->ChipManufacturer = Cyrix; // Cyrix Corp., VIA Inc. + else if (this->ChipID.Vendor == "NexGenDriven") + this->ChipManufacturer = NexGen; // NexGen Inc. (now AMD) + else if (this->ChipID.Vendor == "CentaurHauls") + this->ChipManufacturer = IDT; // IDT/Centaur (now VIA) + else if (this->ChipID.Vendor == "RiseRiseRise") + this->ChipManufacturer = Rise; // Rise + else if (this->ChipID.Vendor == "GenuineTMx86") + this->ChipManufacturer = Transmeta; // Transmeta + else if (this->ChipID.Vendor == "TransmetaCPU") + this->ChipManufacturer = Transmeta; // Transmeta + else if (this->ChipID.Vendor == "Geode By NSC") + this->ChipManufacturer = NSC; // National Semiconductor + else if (this->ChipID.Vendor == "Sun") + this->ChipManufacturer = Sun; // Sun Microelectronics + else if (this->ChipID.Vendor == "IBM") + this->ChipManufacturer = IBM; // IBM Microelectronics + else if (this->ChipID.Vendor == "Hewlett-Packard") + this->ChipManufacturer = HP; // Hewlett-Packard + else if (this->ChipID.Vendor == "Motorola") + this->ChipManufacturer = Motorola; // Motorola Microelectronics + else if (family.substr(0, 7) == "PA-RISC") + this->ChipManufacturer = HP; // Hewlett-Packard + else + this->ChipManufacturer = UnknownManufacturer; // Unknown manufacturer +} + +/** */ +bool SystemInformationImplementation::RetrieveCPUIdentity() +{ +#if USE_CPUID + int localCPUVendor[4]; + int localCPUSignature[4]; + + if (!call_cpuid(0, localCPUVendor)) { + return false; + } + if (!call_cpuid(1, localCPUSignature)) { + return false; + } + + // Process the returned information. + // ; eax = 0 --> eax: maximum value of CPUID instruction. + // ; ebx: part 1 of 3; CPU signature. + // ; edx: part 2 of 3; CPU signature. + // ; ecx: part 3 of 3; CPU signature. + char vbuf[13]; + memcpy(&(vbuf[0]), &(localCPUVendor[1]), sizeof(int)); + memcpy(&(vbuf[4]), &(localCPUVendor[3]), sizeof(int)); + memcpy(&(vbuf[8]), &(localCPUVendor[2]), sizeof(int)); + vbuf[12] = '\0'; + this->ChipID.Vendor = vbuf; + + // Retrieve the family of CPU present. + // ; eax = 1 --> eax: CPU ID - bits 31..16 - unused, bits 15..12 - type, + // bits 11..8 - family, bits 7..4 - model, bits 3..0 - mask revision + // ; ebx: 31..24 - default APIC ID, 23..16 - logical processor ID, + // 15..8 - CFLUSH chunk size , 7..0 - brand ID + // ; edx: CPU feature flags + this->ChipID.ExtendedFamily = + ((localCPUSignature[0] & 0x0FF00000) >> 20); // Bits 27..20 Used + this->ChipID.ExtendedModel = + ((localCPUSignature[0] & 0x000F0000) >> 16); // Bits 19..16 Used + this->ChipID.Type = + ((localCPUSignature[0] & 0x0000F000) >> 12); // Bits 15..12 Used + this->ChipID.Family = + ((localCPUSignature[0] & 0x00000F00) >> 8); // Bits 11..8 Used + this->ChipID.Model = + ((localCPUSignature[0] & 0x000000F0) >> 4); // Bits 7..4 Used + this->ChipID.Revision = + ((localCPUSignature[0] & 0x0000000F) >> 0); // Bits 3..0 Used + + return true; + +#else + return false; +#endif +} + +/** */ +bool SystemInformationImplementation::RetrieveCPUCacheDetails() +{ +#if USE_CPUID + int L1Cache[4] = { 0, 0, 0, 0 }; + int L2Cache[4] = { 0, 0, 0, 0 }; + + // Check to see if what we are about to do is supported... + if (RetrieveCPUExtendedLevelSupport(0x80000005)) { + if (!call_cpuid(0x80000005, L1Cache)) { + return false; + } + // Save the L1 data cache size (in KB) from ecx: bits 31..24 as well as + // data cache size from edx: bits 31..24. + this->Features.L1CacheSize = ((L1Cache[2] & 0xFF000000) >> 24); + this->Features.L1CacheSize += ((L1Cache[3] & 0xFF000000) >> 24); + } else { + // Store -1 to indicate the cache could not be queried. + this->Features.L1CacheSize = -1; + } + + // Check to see if what we are about to do is supported... + if (RetrieveCPUExtendedLevelSupport(0x80000006)) { + if (!call_cpuid(0x80000006, L2Cache)) { + return false; + } + // Save the L2 unified cache size (in KB) from ecx: bits 31..16. + this->Features.L2CacheSize = ((L2Cache[2] & 0xFFFF0000) >> 16); + } else { + // Store -1 to indicate the cache could not be queried. + this->Features.L2CacheSize = -1; + } + + // Define L3 as being not present as we cannot test for it. + this->Features.L3CacheSize = -1; + +#endif + + // Return failure if we cannot detect either cache with this method. + return ((this->Features.L1CacheSize == -1) && + (this->Features.L2CacheSize == -1)) + ? false + : true; +} + +/** */ +bool SystemInformationImplementation::RetrieveClassicalCPUCacheDetails() +{ +#if USE_CPUID + int TLBCode = -1, TLBData = -1, L1Code = -1, L1Data = -1, L1Trace = -1, + L2Unified = -1, L3Unified = -1; + int TLBCacheData[4] = { 0, 0, 0, 0 }; + int TLBPassCounter = 0; + int TLBCacheUnit = 0; + + do { + if (!call_cpuid(2, TLBCacheData)) { + return false; + } + + int bob = ((TLBCacheData[0] & 0x00FF0000) >> 16); + (void)bob; + // Process the returned TLB and cache information. + for (int nCounter = 0; nCounter < TLBCACHE_INFO_UNITS; nCounter++) { + // First of all - decide which unit we are dealing with. + switch (nCounter) { + // eax: bits 8..15 : bits 16..23 : bits 24..31 + case 0: + TLBCacheUnit = ((TLBCacheData[0] & 0x0000FF00) >> 8); + break; + case 1: + TLBCacheUnit = ((TLBCacheData[0] & 0x00FF0000) >> 16); + break; + case 2: + TLBCacheUnit = ((TLBCacheData[0] & 0xFF000000) >> 24); + break; + + // ebx: bits 0..7 : bits 8..15 : bits 16..23 : bits 24..31 + case 3: + TLBCacheUnit = ((TLBCacheData[1] & 0x000000FF) >> 0); + break; + case 4: + TLBCacheUnit = ((TLBCacheData[1] & 0x0000FF00) >> 8); + break; + case 5: + TLBCacheUnit = ((TLBCacheData[1] & 0x00FF0000) >> 16); + break; + case 6: + TLBCacheUnit = ((TLBCacheData[1] & 0xFF000000) >> 24); + break; + + // ecx: bits 0..7 : bits 8..15 : bits 16..23 : bits 24..31 + case 7: + TLBCacheUnit = ((TLBCacheData[2] & 0x000000FF) >> 0); + break; + case 8: + TLBCacheUnit = ((TLBCacheData[2] & 0x0000FF00) >> 8); + break; + case 9: + TLBCacheUnit = ((TLBCacheData[2] & 0x00FF0000) >> 16); + break; + case 10: + TLBCacheUnit = ((TLBCacheData[2] & 0xFF000000) >> 24); + break; + + // edx: bits 0..7 : bits 8..15 : bits 16..23 : bits 24..31 + case 11: + TLBCacheUnit = ((TLBCacheData[3] & 0x000000FF) >> 0); + break; + case 12: + TLBCacheUnit = ((TLBCacheData[3] & 0x0000FF00) >> 8); + break; + case 13: + TLBCacheUnit = ((TLBCacheData[3] & 0x00FF0000) >> 16); + break; + case 14: + TLBCacheUnit = ((TLBCacheData[3] & 0xFF000000) >> 24); + break; + + // Default case - an error has occurred. + default: + return false; + } + + // Now process the resulting unit to see what it means.... + switch (TLBCacheUnit) { + case 0x00: + break; + case 0x01: + STORE_TLBCACHE_INFO(TLBCode, 4); + break; + case 0x02: + STORE_TLBCACHE_INFO(TLBCode, 4096); + break; + case 0x03: + STORE_TLBCACHE_INFO(TLBData, 4); + break; + case 0x04: + STORE_TLBCACHE_INFO(TLBData, 4096); + break; + case 0x06: + STORE_TLBCACHE_INFO(L1Code, 8); + break; + case 0x08: + STORE_TLBCACHE_INFO(L1Code, 16); + break; + case 0x0a: + STORE_TLBCACHE_INFO(L1Data, 8); + break; + case 0x0c: + STORE_TLBCACHE_INFO(L1Data, 16); + break; + case 0x10: + STORE_TLBCACHE_INFO(L1Data, 16); + break; // <-- FIXME: IA-64 Only + case 0x15: + STORE_TLBCACHE_INFO(L1Code, 16); + break; // <-- FIXME: IA-64 Only + case 0x1a: + STORE_TLBCACHE_INFO(L2Unified, 96); + break; // <-- FIXME: IA-64 Only + case 0x22: + STORE_TLBCACHE_INFO(L3Unified, 512); + break; + case 0x23: + STORE_TLBCACHE_INFO(L3Unified, 1024); + break; + case 0x25: + STORE_TLBCACHE_INFO(L3Unified, 2048); + break; + case 0x29: + STORE_TLBCACHE_INFO(L3Unified, 4096); + break; + case 0x39: + STORE_TLBCACHE_INFO(L2Unified, 128); + break; + case 0x3c: + STORE_TLBCACHE_INFO(L2Unified, 256); + break; + case 0x40: + STORE_TLBCACHE_INFO(L2Unified, 0); + break; // <-- FIXME: No integrated L2 cache (P6 core) or L3 cache (P4 + // core). + case 0x41: + STORE_TLBCACHE_INFO(L2Unified, 128); + break; + case 0x42: + STORE_TLBCACHE_INFO(L2Unified, 256); + break; + case 0x43: + STORE_TLBCACHE_INFO(L2Unified, 512); + break; + case 0x44: + STORE_TLBCACHE_INFO(L2Unified, 1024); + break; + case 0x45: + STORE_TLBCACHE_INFO(L2Unified, 2048); + break; + case 0x50: + STORE_TLBCACHE_INFO(TLBCode, 4096); + break; + case 0x51: + STORE_TLBCACHE_INFO(TLBCode, 4096); + break; + case 0x52: + STORE_TLBCACHE_INFO(TLBCode, 4096); + break; + case 0x5b: + STORE_TLBCACHE_INFO(TLBData, 4096); + break; + case 0x5c: + STORE_TLBCACHE_INFO(TLBData, 4096); + break; + case 0x5d: + STORE_TLBCACHE_INFO(TLBData, 4096); + break; + case 0x66: + STORE_TLBCACHE_INFO(L1Data, 8); + break; + case 0x67: + STORE_TLBCACHE_INFO(L1Data, 16); + break; + case 0x68: + STORE_TLBCACHE_INFO(L1Data, 32); + break; + case 0x70: + STORE_TLBCACHE_INFO(L1Trace, 12); + break; + case 0x71: + STORE_TLBCACHE_INFO(L1Trace, 16); + break; + case 0x72: + STORE_TLBCACHE_INFO(L1Trace, 32); + break; + case 0x77: + STORE_TLBCACHE_INFO(L1Code, 16); + break; // <-- FIXME: IA-64 Only + case 0x79: + STORE_TLBCACHE_INFO(L2Unified, 128); + break; + case 0x7a: + STORE_TLBCACHE_INFO(L2Unified, 256); + break; + case 0x7b: + STORE_TLBCACHE_INFO(L2Unified, 512); + break; + case 0x7c: + STORE_TLBCACHE_INFO(L2Unified, 1024); + break; + case 0x7e: + STORE_TLBCACHE_INFO(L2Unified, 256); + break; + case 0x81: + STORE_TLBCACHE_INFO(L2Unified, 128); + break; + case 0x82: + STORE_TLBCACHE_INFO(L2Unified, 256); + break; + case 0x83: + STORE_TLBCACHE_INFO(L2Unified, 512); + break; + case 0x84: + STORE_TLBCACHE_INFO(L2Unified, 1024); + break; + case 0x85: + STORE_TLBCACHE_INFO(L2Unified, 2048); + break; + case 0x88: + STORE_TLBCACHE_INFO(L3Unified, 2048); + break; // <-- FIXME: IA-64 Only + case 0x89: + STORE_TLBCACHE_INFO(L3Unified, 4096); + break; // <-- FIXME: IA-64 Only + case 0x8a: + STORE_TLBCACHE_INFO(L3Unified, 8192); + break; // <-- FIXME: IA-64 Only + case 0x8d: + STORE_TLBCACHE_INFO(L3Unified, 3096); + break; // <-- FIXME: IA-64 Only + case 0x90: + STORE_TLBCACHE_INFO(TLBCode, 262144); + break; // <-- FIXME: IA-64 Only + case 0x96: + STORE_TLBCACHE_INFO(TLBCode, 262144); + break; // <-- FIXME: IA-64 Only + case 0x9b: + STORE_TLBCACHE_INFO(TLBCode, 262144); + break; // <-- FIXME: IA-64 Only + + // Default case - an error has occurred. + default: + return false; + } + } + + // Increment the TLB pass counter. + TLBPassCounter++; + } while ((TLBCacheData[0] & 0x000000FF) > TLBPassCounter); + + // Ok - we now have the maximum TLB, L1, L2, and L3 sizes... + if ((L1Code == -1) && (L1Data == -1) && (L1Trace == -1)) { + this->Features.L1CacheSize = -1; + } else if ((L1Code == -1) && (L1Data == -1) && (L1Trace != -1)) { + this->Features.L1CacheSize = L1Trace; + } else if ((L1Code != -1) && (L1Data == -1)) { + this->Features.L1CacheSize = L1Code; + } else if ((L1Code == -1) && (L1Data != -1)) { + this->Features.L1CacheSize = L1Data; + } else if ((L1Code != -1) && (L1Data != -1)) { + this->Features.L1CacheSize = L1Code + L1Data; + } else { + this->Features.L1CacheSize = -1; + } + + // Ok - we now have the maximum TLB, L1, L2, and L3 sizes... + if (L2Unified == -1) { + this->Features.L2CacheSize = -1; + } else { + this->Features.L2CacheSize = L2Unified; + } + + // Ok - we now have the maximum TLB, L1, L2, and L3 sizes... + if (L3Unified == -1) { + this->Features.L3CacheSize = -1; + } else { + this->Features.L3CacheSize = L3Unified; + } + + return true; + +#else + return false; +#endif +} + +/** */ +bool SystemInformationImplementation::RetrieveCPUClockSpeed() +{ + bool retrieved = false; + +#if defined(_WIN32) + unsigned int uiRepetitions = 1; + unsigned int uiMSecPerRepetition = 50; + __int64 i64Total = 0; + __int64 i64Overhead = 0; + + // Check if the TSC implementation works at all + if (this->Features.HasTSC && + GetCyclesDifference(SystemInformationImplementation::Delay, + uiMSecPerRepetition) > 0) { + for (unsigned int nCounter = 0; nCounter < uiRepetitions; nCounter++) { + i64Total += GetCyclesDifference(SystemInformationImplementation::Delay, + uiMSecPerRepetition); + i64Overhead += GetCyclesDifference( + SystemInformationImplementation::DelayOverhead, uiMSecPerRepetition); + } + + // Calculate the MHz speed. + i64Total -= i64Overhead; + i64Total /= uiRepetitions; + i64Total /= uiMSecPerRepetition; + i64Total /= 1000; + + // Save the CPU speed. + this->CPUSpeedInMHz = (float)i64Total; + + retrieved = true; + } + + // If RDTSC is not supported, we fallback to trying to read this value + // from the registry: + if (!retrieved) { + HKEY hKey = nullptr; + LONG err = + RegOpenKeyExW(HKEY_LOCAL_MACHINE, + L"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", 0, + KEY_READ, &hKey); + + if (ERROR_SUCCESS == err) { + DWORD dwType = 0; + DWORD data = 0; + DWORD dwSize = sizeof(DWORD); + + err = + RegQueryValueExW(hKey, L"~MHz", 0, &dwType, (LPBYTE)&data, &dwSize); + + if (ERROR_SUCCESS == err) { + this->CPUSpeedInMHz = (float)data; + retrieved = true; + } + + RegCloseKey(hKey); + hKey = nullptr; + } + } +#endif + + return retrieved; +} + +/** */ +bool SystemInformationImplementation::RetrieveClassicalCPUClockSpeed() +{ +#if USE_ASM_INSTRUCTIONS + LARGE_INTEGER liStart, liEnd, liCountsPerSecond; + double dFrequency, dDifference; + + // Attempt to get a starting tick count. + QueryPerformanceCounter(&liStart); + + __try { + _asm { + mov eax, 0x80000000 + mov ebx, CLASSICAL_CPU_FREQ_LOOP + Timer_Loop: + bsf ecx,eax + dec ebx + jnz Timer_Loop + } + } __except (1) { + return false; + } + + // Attempt to get a starting tick count. + QueryPerformanceCounter(&liEnd); + + // Get the difference... NB: This is in seconds.... + QueryPerformanceFrequency(&liCountsPerSecond); + dDifference = (((double)liEnd.QuadPart - (double)liStart.QuadPart) / + (double)liCountsPerSecond.QuadPart); + + // Calculate the clock speed. + if (this->ChipID.Family == 3) { + // 80386 processors.... Loop time is 115 cycles! + dFrequency = (((CLASSICAL_CPU_FREQ_LOOP * 115) / dDifference) / 1000000); + } else if (this->ChipID.Family == 4) { + // 80486 processors.... Loop time is 47 cycles! + dFrequency = (((CLASSICAL_CPU_FREQ_LOOP * 47) / dDifference) / 1000000); + } else if (this->ChipID.Family == 5) { + // Pentium processors.... Loop time is 43 cycles! + dFrequency = (((CLASSICAL_CPU_FREQ_LOOP * 43) / dDifference) / 1000000); + } + + // Save the clock speed. + this->Features.CPUSpeed = (int)dFrequency; + + return true; + +#else + return false; +#endif +} + +/** */ +bool SystemInformationImplementation::RetrieveCPUExtendedLevelSupport( + int CPULevelToCheck) +{ + int cpuinfo[4] = { 0, 0, 0, 0 }; + + // The extended CPUID is supported by various vendors starting with the + // following CPU models: + // + // Manufacturer & Chip Name | Family Model Revision + // + // AMD K6, K6-2 | 5 6 x + // Cyrix GXm, Cyrix III "Joshua" | 5 4 x + // IDT C6-2 | 5 8 x + // VIA Cyrix III | 6 5 x + // Transmeta Crusoe | 5 x x + // Intel Pentium 4 | f x x + // + + // We check to see if a supported processor is present... + if (this->ChipManufacturer == AMD) { + if (this->ChipID.Family < 5) + return false; + if ((this->ChipID.Family == 5) && (this->ChipID.Model < 6)) + return false; + } else if (this->ChipManufacturer == Cyrix) { + if (this->ChipID.Family < 5) + return false; + if ((this->ChipID.Family == 5) && (this->ChipID.Model < 4)) + return false; + if ((this->ChipID.Family == 6) && (this->ChipID.Model < 5)) + return false; + } else if (this->ChipManufacturer == IDT) { + if (this->ChipID.Family < 5) + return false; + if ((this->ChipID.Family == 5) && (this->ChipID.Model < 8)) + return false; + } else if (this->ChipManufacturer == Transmeta) { + if (this->ChipID.Family < 5) + return false; + } else if (this->ChipManufacturer == Intel) { + if (this->ChipID.Family < 0xf) { + return false; + } + } + +#if USE_CPUID + if (!call_cpuid(0x80000000, cpuinfo)) { + return false; + } +#endif + + // Now we have to check the level wanted vs level returned... + int nLevelWanted = (CPULevelToCheck & 0x7FFFFFFF); + int nLevelReturn = (cpuinfo[0] & 0x7FFFFFFF); + + // Check to see if the level provided is supported... + if (nLevelWanted > nLevelReturn) { + return false; + } + + return true; +} + +/** */ +bool SystemInformationImplementation::RetrieveExtendedCPUFeatures() +{ + + // Check that we are not using an Intel processor as it does not support + // this. + if (this->ChipManufacturer == Intel) { + return false; + } + + // Check to see if what we are about to do is supported... + if (!RetrieveCPUExtendedLevelSupport(static_cast(0x80000001))) { + return false; + } + +#if USE_CPUID + int localCPUExtendedFeatures[4] = { 0, 0, 0, 0 }; + + if (!call_cpuid(0x80000001, localCPUExtendedFeatures)) { + return false; + } + + // Retrieve the extended features of CPU present. + this->Features.ExtendedFeatures.Has3DNow = + ((localCPUExtendedFeatures[3] & 0x80000000) != + 0); // 3DNow Present --> Bit 31. + this->Features.ExtendedFeatures.Has3DNowPlus = + ((localCPUExtendedFeatures[3] & 0x40000000) != + 0); // 3DNow+ Present -- > Bit 30. + this->Features.ExtendedFeatures.HasSSEMMX = + ((localCPUExtendedFeatures[3] & 0x00400000) != + 0); // SSE MMX Present --> Bit 22. + this->Features.ExtendedFeatures.SupportsMP = + ((localCPUExtendedFeatures[3] & 0x00080000) != + 0); // MP Capable -- > Bit 19. + + // Retrieve AMD specific extended features. + if (this->ChipManufacturer == AMD || this->ChipManufacturer == Hygon) { + this->Features.ExtendedFeatures.HasMMXPlus = + ((localCPUExtendedFeatures[3] & 0x00400000) != + 0); // AMD specific: MMX-SSE --> Bit 22 + } + + // Retrieve Cyrix specific extended features. + if (this->ChipManufacturer == Cyrix) { + this->Features.ExtendedFeatures.HasMMXPlus = + ((localCPUExtendedFeatures[3] & 0x01000000) != + 0); // Cyrix specific: Extended MMX --> Bit 24 + } + + return true; + +#else + return false; +#endif +} + +/** */ +bool SystemInformationImplementation::RetrieveProcessorSerialNumber() +{ + // Check to see if the processor supports the processor serial number. + if (!this->Features.HasSerial) { + return false; + } + +#if USE_CPUID + int SerialNumber[4]; + + if (!call_cpuid(3, SerialNumber)) { + return false; + } + + // Process the returned information. + // ; eax = 3 --> ebx: top 32 bits are the processor signature bits --> NB: + // Transmeta only ?!? + // ; ecx: middle 32 bits are the processor signature bits + // ; edx: bottom 32 bits are the processor signature bits + char sn[128]; + sprintf(sn, "%.2x%.2x-%.2x%.2x-%.2x%.2x-%.2x%.2x-%.2x%.2x-%.2x%.2x", + ((SerialNumber[1] & 0xff000000) >> 24), + ((SerialNumber[1] & 0x00ff0000) >> 16), + ((SerialNumber[1] & 0x0000ff00) >> 8), + ((SerialNumber[1] & 0x000000ff) >> 0), + ((SerialNumber[2] & 0xff000000) >> 24), + ((SerialNumber[2] & 0x00ff0000) >> 16), + ((SerialNumber[2] & 0x0000ff00) >> 8), + ((SerialNumber[2] & 0x000000ff) >> 0), + ((SerialNumber[3] & 0xff000000) >> 24), + ((SerialNumber[3] & 0x00ff0000) >> 16), + ((SerialNumber[3] & 0x0000ff00) >> 8), + ((SerialNumber[3] & 0x000000ff) >> 0)); + this->ChipID.SerialNumber = sn; + return true; + +#else + return false; +#endif +} + +/** */ +bool SystemInformationImplementation::RetrieveCPUPowerManagement() +{ + // Check to see if what we are about to do is supported... + if (!RetrieveCPUExtendedLevelSupport(static_cast(0x80000007))) { + this->Features.ExtendedFeatures.PowerManagement.HasFrequencyID = false; + this->Features.ExtendedFeatures.PowerManagement.HasVoltageID = false; + this->Features.ExtendedFeatures.PowerManagement.HasTempSenseDiode = false; + return false; + } + +#if USE_CPUID + int localCPUPowerManagement[4] = { 0, 0, 0, 0 }; + + if (!call_cpuid(0x80000007, localCPUPowerManagement)) { + return false; + } + + // Check for the power management capabilities of the CPU. + this->Features.ExtendedFeatures.PowerManagement.HasTempSenseDiode = + ((localCPUPowerManagement[3] & 0x00000001) != 0); + this->Features.ExtendedFeatures.PowerManagement.HasFrequencyID = + ((localCPUPowerManagement[3] & 0x00000002) != 0); + this->Features.ExtendedFeatures.PowerManagement.HasVoltageID = + ((localCPUPowerManagement[3] & 0x00000004) != 0); + + return true; + +#else + return false; +#endif +} + +#if USE_CPUID +// Used only in USE_CPUID implementation below. +static void SystemInformationStripLeadingSpace(std::string& str) +{ + // Because some manufacturers have leading white space - we have to + // post-process the name. + std::string::size_type pos = str.find_first_not_of(" "); + if (pos != std::string::npos) { + str = str.substr(pos); + } +} +#endif + +/** */ +bool SystemInformationImplementation::RetrieveExtendedCPUIdentity() +{ + // Check to see if what we are about to do is supported... + if (!RetrieveCPUExtendedLevelSupport(static_cast(0x80000002))) + return false; + if (!RetrieveCPUExtendedLevelSupport(static_cast(0x80000003))) + return false; + if (!RetrieveCPUExtendedLevelSupport(static_cast(0x80000004))) + return false; + +#if USE_CPUID + int CPUExtendedIdentity[12]; + + if (!call_cpuid(0x80000002, CPUExtendedIdentity)) { + return false; + } + if (!call_cpuid(0x80000003, CPUExtendedIdentity + 4)) { + return false; + } + if (!call_cpuid(0x80000004, CPUExtendedIdentity + 8)) { + return false; + } + + // Process the returned information. + char nbuf[49]; + memcpy(&(nbuf[0]), &(CPUExtendedIdentity[0]), sizeof(int)); + memcpy(&(nbuf[4]), &(CPUExtendedIdentity[1]), sizeof(int)); + memcpy(&(nbuf[8]), &(CPUExtendedIdentity[2]), sizeof(int)); + memcpy(&(nbuf[12]), &(CPUExtendedIdentity[3]), sizeof(int)); + memcpy(&(nbuf[16]), &(CPUExtendedIdentity[4]), sizeof(int)); + memcpy(&(nbuf[20]), &(CPUExtendedIdentity[5]), sizeof(int)); + memcpy(&(nbuf[24]), &(CPUExtendedIdentity[6]), sizeof(int)); + memcpy(&(nbuf[28]), &(CPUExtendedIdentity[7]), sizeof(int)); + memcpy(&(nbuf[32]), &(CPUExtendedIdentity[8]), sizeof(int)); + memcpy(&(nbuf[36]), &(CPUExtendedIdentity[9]), sizeof(int)); + memcpy(&(nbuf[40]), &(CPUExtendedIdentity[10]), sizeof(int)); + memcpy(&(nbuf[44]), &(CPUExtendedIdentity[11]), sizeof(int)); + nbuf[48] = '\0'; + this->ChipID.ProcessorName = nbuf; + this->ChipID.ModelName = nbuf; + + // Because some manufacturers have leading white space - we have to + // post-process the name. + SystemInformationStripLeadingSpace(this->ChipID.ProcessorName); + return true; +#else + return false; +#endif +} + +/** */ +bool SystemInformationImplementation::RetrieveClassicalCPUIdentity() +{ + // Start by decided which manufacturer we are using.... + switch (this->ChipManufacturer) { + case Intel: + // Check the family / model / revision to determine the CPU ID. + switch (this->ChipID.Family) { + case 3: + this->ChipID.ProcessorName = "Newer i80386 family"; + break; + case 4: + switch (this->ChipID.Model) { + case 0: + this->ChipID.ProcessorName = "i80486DX-25/33"; + break; + case 1: + this->ChipID.ProcessorName = "i80486DX-50"; + break; + case 2: + this->ChipID.ProcessorName = "i80486SX"; + break; + case 3: + this->ChipID.ProcessorName = "i80486DX2"; + break; + case 4: + this->ChipID.ProcessorName = "i80486SL"; + break; + case 5: + this->ChipID.ProcessorName = "i80486SX2"; + break; + case 7: + this->ChipID.ProcessorName = "i80486DX2 WriteBack"; + break; + case 8: + this->ChipID.ProcessorName = "i80486DX4"; + break; + case 9: + this->ChipID.ProcessorName = "i80486DX4 WriteBack"; + break; + default: + this->ChipID.ProcessorName = "Unknown 80486 family"; + return false; + } + break; + case 5: + switch (this->ChipID.Model) { + case 0: + this->ChipID.ProcessorName = "P5 A-Step"; + break; + case 1: + this->ChipID.ProcessorName = "P5"; + break; + case 2: + this->ChipID.ProcessorName = "P54C"; + break; + case 3: + this->ChipID.ProcessorName = "P24T OverDrive"; + break; + case 4: + this->ChipID.ProcessorName = "P55C"; + break; + case 7: + this->ChipID.ProcessorName = "P54C"; + break; + case 8: + this->ChipID.ProcessorName = "P55C (0.25micron)"; + break; + default: + this->ChipID.ProcessorName = "Unknown Pentium family"; + return false; + } + break; + case 6: + switch (this->ChipID.Model) { + case 0: + this->ChipID.ProcessorName = "P6 A-Step"; + break; + case 1: + this->ChipID.ProcessorName = "P6"; + break; + case 3: + this->ChipID.ProcessorName = "Pentium II (0.28 micron)"; + break; + case 5: + this->ChipID.ProcessorName = "Pentium II (0.25 micron)"; + break; + case 6: + this->ChipID.ProcessorName = "Pentium II With On-Die L2 Cache"; + break; + case 7: + this->ChipID.ProcessorName = "Pentium III (0.25 micron)"; + break; + case 8: + this->ChipID.ProcessorName = + "Pentium III (0.18 micron) With 256 KB On-Die L2 Cache "; + break; + case 0xa: + this->ChipID.ProcessorName = + "Pentium III (0.18 micron) With 1 Or 2 MB On-Die L2 Cache "; + break; + case 0xb: + this->ChipID.ProcessorName = "Pentium III (0.13 micron) With " + "256 Or 512 KB On-Die L2 Cache "; + break; + case 23: + this->ChipID.ProcessorName = + "Intel(R) Core(TM)2 Duo CPU T9500 @ 2.60GHz"; + break; + default: + this->ChipID.ProcessorName = "Unknown P6 family"; + return false; + } + break; + case 7: + this->ChipID.ProcessorName = "Intel Merced (IA-64)"; + break; + case 0xf: + // Check the extended family bits... + switch (this->ChipID.ExtendedFamily) { + case 0: + switch (this->ChipID.Model) { + case 0: + this->ChipID.ProcessorName = "Pentium IV (0.18 micron)"; + break; + case 1: + this->ChipID.ProcessorName = "Pentium IV (0.18 micron)"; + break; + case 2: + this->ChipID.ProcessorName = "Pentium IV (0.13 micron)"; + break; + default: + this->ChipID.ProcessorName = "Unknown Pentium 4 family"; + return false; + } + break; + case 1: + this->ChipID.ProcessorName = "Intel McKinley (IA-64)"; + break; + default: + this->ChipID.ProcessorName = "Pentium"; + } + break; + default: + this->ChipID.ProcessorName = "Unknown Intel family"; + return false; + } + break; + + case AMD: + // Check the family / model / revision to determine the CPU ID. + switch (this->ChipID.Family) { + case 4: + switch (this->ChipID.Model) { + case 3: + this->ChipID.ProcessorName = "80486DX2"; + break; + case 7: + this->ChipID.ProcessorName = "80486DX2 WriteBack"; + break; + case 8: + this->ChipID.ProcessorName = "80486DX4"; + break; + case 9: + this->ChipID.ProcessorName = "80486DX4 WriteBack"; + break; + case 0xe: + this->ChipID.ProcessorName = "5x86"; + break; + case 0xf: + this->ChipID.ProcessorName = "5x86WB"; + break; + default: + this->ChipID.ProcessorName = "Unknown 80486 family"; + return false; + } + break; + case 5: + switch (this->ChipID.Model) { + case 0: + this->ChipID.ProcessorName = "SSA5 (PR75, PR90 = PR100)"; + break; + case 1: + this->ChipID.ProcessorName = "5k86 (PR120 = PR133)"; + break; + case 2: + this->ChipID.ProcessorName = "5k86 (PR166)"; + break; + case 3: + this->ChipID.ProcessorName = "5k86 (PR200)"; + break; + case 6: + this->ChipID.ProcessorName = "K6 (0.30 micron)"; + break; + case 7: + this->ChipID.ProcessorName = "K6 (0.25 micron)"; + break; + case 8: + this->ChipID.ProcessorName = "K6-2"; + break; + case 9: + this->ChipID.ProcessorName = "K6-III"; + break; + case 0xd: + this->ChipID.ProcessorName = "K6-2+ or K6-III+ (0.18 micron)"; + break; + default: + this->ChipID.ProcessorName = "Unknown 80586 family"; + return false; + } + break; + case 6: + switch (this->ChipID.Model) { + case 1: + this->ChipID.ProcessorName = "Athlon- (0.25 micron)"; + break; + case 2: + this->ChipID.ProcessorName = "Athlon- (0.18 micron)"; + break; + case 3: + this->ChipID.ProcessorName = "Duron- (SF core)"; + break; + case 4: + this->ChipID.ProcessorName = "Athlon- (Thunderbird core)"; + break; + case 6: + this->ChipID.ProcessorName = "Athlon- (Palomino core)"; + break; + case 7: + this->ChipID.ProcessorName = "Duron- (Morgan core)"; + break; + case 8: + if (this->Features.ExtendedFeatures.SupportsMP) + this->ChipID.ProcessorName = "Athlon - MP (Thoroughbred core)"; + else + this->ChipID.ProcessorName = "Athlon - XP (Thoroughbred core)"; + break; + default: + this->ChipID.ProcessorName = "Unknown K7 family"; + return false; + } + break; + default: + this->ChipID.ProcessorName = "Unknown AMD family"; + return false; + } + break; + + case Hygon: + this->ChipID.ProcessorName = "Unknown Hygon family"; + return false; + + case Transmeta: + switch (this->ChipID.Family) { + case 5: + switch (this->ChipID.Model) { + case 4: + this->ChipID.ProcessorName = "Crusoe TM3x00 and TM5x00"; + break; + default: + this->ChipID.ProcessorName = "Unknown Crusoe family"; + return false; + } + break; + default: + this->ChipID.ProcessorName = "Unknown Transmeta family"; + return false; + } + break; + + case Rise: + switch (this->ChipID.Family) { + case 5: + switch (this->ChipID.Model) { + case 0: + this->ChipID.ProcessorName = "mP6 (0.25 micron)"; + break; + case 2: + this->ChipID.ProcessorName = "mP6 (0.18 micron)"; + break; + default: + this->ChipID.ProcessorName = "Unknown Rise family"; + return false; + } + break; + default: + this->ChipID.ProcessorName = "Unknown Rise family"; + return false; + } + break; + + case UMC: + switch (this->ChipID.Family) { + case 4: + switch (this->ChipID.Model) { + case 1: + this->ChipID.ProcessorName = "U5D"; + break; + case 2: + this->ChipID.ProcessorName = "U5S"; + break; + default: + this->ChipID.ProcessorName = "Unknown UMC family"; + return false; + } + break; + default: + this->ChipID.ProcessorName = "Unknown UMC family"; + return false; + } + break; + + case IDT: + switch (this->ChipID.Family) { + case 5: + switch (this->ChipID.Model) { + case 4: + this->ChipID.ProcessorName = "C6"; + break; + case 8: + this->ChipID.ProcessorName = "C2"; + break; + case 9: + this->ChipID.ProcessorName = "C3"; + break; + default: + this->ChipID.ProcessorName = "Unknown IDT\\Centaur family"; + return false; + } + break; + case 6: + switch (this->ChipID.Model) { + case 6: + this->ChipID.ProcessorName = "VIA Cyrix III - Samuel"; + break; + default: + this->ChipID.ProcessorName = "Unknown IDT\\Centaur family"; + return false; + } + break; + default: + this->ChipID.ProcessorName = "Unknown IDT\\Centaur family"; + return false; + } + break; + + case Cyrix: + switch (this->ChipID.Family) { + case 4: + switch (this->ChipID.Model) { + case 4: + this->ChipID.ProcessorName = "MediaGX GX = GXm"; + break; + case 9: + this->ChipID.ProcessorName = "5x86"; + break; + default: + this->ChipID.ProcessorName = "Unknown Cx5x86 family"; + return false; + } + break; + case 5: + switch (this->ChipID.Model) { + case 2: + this->ChipID.ProcessorName = "Cx6x86"; + break; + case 4: + this->ChipID.ProcessorName = "MediaGX GXm"; + break; + default: + this->ChipID.ProcessorName = "Unknown Cx6x86 family"; + return false; + } + break; + case 6: + switch (this->ChipID.Model) { + case 0: + this->ChipID.ProcessorName = "6x86MX"; + break; + case 5: + this->ChipID.ProcessorName = "Cyrix M2 Core"; + break; + case 6: + this->ChipID.ProcessorName = "WinChip C5A Core"; + break; + case 7: + this->ChipID.ProcessorName = "WinChip C5B\\C5C Core"; + break; + case 8: + this->ChipID.ProcessorName = "WinChip C5C-T Core"; + break; + default: + this->ChipID.ProcessorName = "Unknown 6x86MX\\Cyrix III family"; + return false; + } + break; + default: + this->ChipID.ProcessorName = "Unknown Cyrix family"; + return false; + } + break; + + case NexGen: + switch (this->ChipID.Family) { + case 5: + switch (this->ChipID.Model) { + case 0: + this->ChipID.ProcessorName = "Nx586 or Nx586FPU"; + break; + default: + this->ChipID.ProcessorName = "Unknown NexGen family"; + return false; + } + break; + default: + this->ChipID.ProcessorName = "Unknown NexGen family"; + return false; + } + break; + + case NSC: + this->ChipID.ProcessorName = "Cx486SLC \\ DLC \\ Cx486S A-Step"; + break; + + case Sun: + case IBM: + case Motorola: + case HP: + case UnknownManufacturer: + default: + this->ChipID.ProcessorName = + "Unknown family"; // We cannot identify the processor. + return false; + } + + return true; +} + +/** Extract a value from the CPUInfo file */ +std::string SystemInformationImplementation::ExtractValueFromCpuInfoFile( + std::string buffer, const char* word, size_t init) +{ + size_t pos = buffer.find(word, init); + if (pos != std::string::npos) { + this->CurrentPositionInFile = pos; + pos = buffer.find(":", pos); + size_t pos2 = buffer.find("\n", pos); + if (pos != std::string::npos && pos2 != std::string::npos) { + // It may happen that the beginning matches, but this is still not the + // requested key. + // An example is looking for "cpu" when "cpu family" comes first. So we + // check that + // we have only spaces from here to pos, otherwise we search again. + for (size_t i = this->CurrentPositionInFile + strlen(word); i < pos; + ++i) { + if (buffer[i] != ' ' && buffer[i] != '\t') { + return this->ExtractValueFromCpuInfoFile(buffer, word, pos2); + } + } + return buffer.substr(pos + 2, pos2 - pos - 2); + } + } + this->CurrentPositionInFile = std::string::npos; + return ""; +} + +/** Query for the cpu status */ +bool SystemInformationImplementation::RetreiveInformationFromCpuInfoFile() +{ + this->NumberOfLogicalCPU = 0; + this->NumberOfPhysicalCPU = 0; + std::string buffer; + + FILE* fd = fopen("/proc/cpuinfo", "r"); + if (!fd) { + std::cout << "Problem opening /proc/cpuinfo" << std::endl; + return false; + } + + size_t fileSize = 0; + while (!feof(fd)) { + buffer += static_cast(fgetc(fd)); + fileSize++; + } + fclose(fd); + buffer.resize(fileSize - 2); + // Number of logical CPUs (combination of multiple processors, multi-core + // and SMT) + size_t pos = buffer.find("processor\t"); + while (pos != std::string::npos) { + this->NumberOfLogicalCPU++; + pos = buffer.find("processor\t", pos + 1); + } + +#ifdef __linux + // Count sockets. + std::set PhysicalIDs; + std::string idc = this->ExtractValueFromCpuInfoFile(buffer, "physical id"); + while (this->CurrentPositionInFile != std::string::npos) { + int id = atoi(idc.c_str()); + PhysicalIDs.insert(id); + idc = this->ExtractValueFromCpuInfoFile(buffer, "physical id", + this->CurrentPositionInFile + 1); + } + uint64_t NumberOfSockets = PhysicalIDs.size(); + NumberOfSockets = std::max(NumberOfSockets, (uint64_t)1); + // Physical ids returned by Linux don't distinguish cores. + // We want to record the total number of cores in this->NumberOfPhysicalCPU + // (checking only the first proc) + std::string Cores = this->ExtractValueFromCpuInfoFile(buffer, "cpu cores"); + unsigned int NumberOfCoresPerSocket = (unsigned int)atoi(Cores.c_str()); + NumberOfCoresPerSocket = std::max(NumberOfCoresPerSocket, 1u); + this->NumberOfPhysicalCPU = + NumberOfCoresPerSocket * (unsigned int)NumberOfSockets; + +#else // __CYGWIN__ + // does not have "physical id" entries, neither "cpu cores" + // this has to be fixed for hyper-threading. + std::string cpucount = + this->ExtractValueFromCpuInfoFile(buffer, "cpu count"); + this->NumberOfPhysicalCPU = this->NumberOfLogicalCPU = + atoi(cpucount.c_str()); +#endif + // gotta have one, and if this is 0 then we get a / by 0n + // better to have a bad answer than a crash + if (this->NumberOfPhysicalCPU <= 0) { + this->NumberOfPhysicalCPU = 1; + } + // LogicalProcessorsPerPhysical>1 => SMT. + this->Features.ExtendedFeatures.LogicalProcessorsPerPhysical = + this->NumberOfLogicalCPU / this->NumberOfPhysicalCPU; + + // CPU speed (checking only the first processor) + std::string CPUSpeed = this->ExtractValueFromCpuInfoFile(buffer, "cpu MHz"); + if (!CPUSpeed.empty()) { + this->CPUSpeedInMHz = static_cast(atof(CPUSpeed.c_str())); + } +#ifdef __linux + else { + // Linux Sparc: CPU speed is in Hz and encoded in hexadecimal + CPUSpeed = this->ExtractValueFromCpuInfoFile(buffer, "Cpu0ClkTck"); + this->CPUSpeedInMHz = + static_cast(strtoull(CPUSpeed.c_str(), 0, 16)) / 1000000.0f; + } +#endif + + // Chip family + std::string familyStr = + this->ExtractValueFromCpuInfoFile(buffer, "cpu family"); + if (familyStr.empty()) { + familyStr = this->ExtractValueFromCpuInfoFile(buffer, "CPU architecture"); + } + this->ChipID.Family = atoi(familyStr.c_str()); + + // Chip Vendor + this->ChipID.Vendor = this->ExtractValueFromCpuInfoFile(buffer, "vendor_id"); + this->FindManufacturer(familyStr); + + // second try for setting family + if (this->ChipID.Family == 0 && this->ChipManufacturer == HP) { + if (familyStr == "PA-RISC 1.1a") + this->ChipID.Family = 0x11a; + else if (familyStr == "PA-RISC 2.0") + this->ChipID.Family = 0x200; + // If you really get CMake to work on a machine not belonging to + // any of those families I owe you a dinner if you get it to + // contribute nightly builds regularly. + } + + // Chip Model + this->ChipID.Model = + atoi(this->ExtractValueFromCpuInfoFile(buffer, "model").c_str()); + if (!this->RetrieveClassicalCPUIdentity()) { + // Some platforms (e.g. PA-RISC) tell us their CPU name here. + // Note: x86 does not. + std::string cpuname = this->ExtractValueFromCpuInfoFile(buffer, "cpu"); + if (!cpuname.empty()) { + this->ChipID.ProcessorName = cpuname; + } + } + + // Chip revision + std::string cpurev = this->ExtractValueFromCpuInfoFile(buffer, "stepping"); + if (cpurev.empty()) { + cpurev = this->ExtractValueFromCpuInfoFile(buffer, "CPU revision"); + } + this->ChipID.Revision = atoi(cpurev.c_str()); + + // Chip Model Name + this->ChipID.ModelName = + this->ExtractValueFromCpuInfoFile(buffer, "model name"); + + // L1 Cache size + // Different architectures may show different names for the caches. + // Sum up everything we find. + std::vector cachename; + cachename.clear(); + + cachename.push_back("cache size"); // e.g. x86 + cachename.push_back("I-cache"); // e.g. PA-RISC + cachename.push_back("D-cache"); // e.g. PA-RISC + + this->Features.L1CacheSize = 0; + for (size_t index = 0; index < cachename.size(); index++) { + std::string cacheSize = + this->ExtractValueFromCpuInfoFile(buffer, cachename[index]); + if (!cacheSize.empty()) { + pos = cacheSize.find(" KB"); + if (pos != std::string::npos) { + cacheSize = cacheSize.substr(0, pos); + } + this->Features.L1CacheSize += atoi(cacheSize.c_str()); + } + } + + // processor feature flags (probably x86 specific) + std::string cpuflags = this->ExtractValueFromCpuInfoFile(buffer, "flags"); + if (!cpurev.empty()) { + // now we can match every flags as space + flag + space + cpuflags = " " + cpuflags + " "; + if ((cpuflags.find(" fpu ") != std::string::npos)) { + this->Features.HasFPU = true; + } + if ((cpuflags.find(" tsc ") != std::string::npos)) { + this->Features.HasTSC = true; + } + if ((cpuflags.find(" mmx ") != std::string::npos)) { + this->Features.HasMMX = true; + } + if ((cpuflags.find(" sse ") != std::string::npos)) { + this->Features.HasSSE = true; + } + if ((cpuflags.find(" sse2 ") != std::string::npos)) { + this->Features.HasSSE2 = true; + } + if ((cpuflags.find(" apic ") != std::string::npos)) { + this->Features.HasAPIC = true; + } + if ((cpuflags.find(" cmov ") != std::string::npos)) { + this->Features.HasCMOV = true; + } + if ((cpuflags.find(" mtrr ") != std::string::npos)) { + this->Features.HasMTRR = true; + } + if ((cpuflags.find(" acpi ") != std::string::npos)) { + this->Features.HasACPI = true; + } + if ((cpuflags.find(" 3dnow ") != std::string::npos)) { + this->Features.ExtendedFeatures.Has3DNow = true; + } + } + + return true; +} + +bool SystemInformationImplementation::QueryProcessorBySysconf() +{ +#if defined(_SC_NPROC_ONLN) && !defined(_SC_NPROCESSORS_ONLN) +// IRIX names this slightly different +# define _SC_NPROCESSORS_ONLN _SC_NPROC_ONLN +#endif + +#ifdef _SC_NPROCESSORS_ONLN + long c = sysconf(_SC_NPROCESSORS_ONLN); + if (c <= 0) { + return false; + } + + this->NumberOfPhysicalCPU = static_cast(c); + this->NumberOfLogicalCPU = this->NumberOfPhysicalCPU; + + return true; +#else + return false; +#endif +} + +bool SystemInformationImplementation::QueryProcessor() +{ + return this->QueryProcessorBySysconf(); +} + +/** +Get total system RAM in units of KiB. +*/ +SystemInformation::LongLong +SystemInformationImplementation::GetHostMemoryTotal() +{ +#if defined(_WIN32) +# if defined(_MSC_VER) && _MSC_VER < 1300 + MEMORYSTATUS stat; + stat.dwLength = sizeof(stat); + GlobalMemoryStatus(&stat); + return stat.dwTotalPhys / 1024; +# else + MEMORYSTATUSEX statex; + statex.dwLength = sizeof(statex); + GlobalMemoryStatusEx(&statex); + return statex.ullTotalPhys / 1024; +# endif +#elif defined(__linux) + SystemInformation::LongLong memTotal = 0; + int ierr = GetFieldFromFile("/proc/meminfo", "MemTotal:", memTotal); + if (ierr) { + return -1; + } + return memTotal; +#elif defined(__APPLE__) + uint64_t mem; + size_t len = sizeof(mem); + int ierr = sysctlbyname("hw.memsize", &mem, &len, nullptr, 0); + if (ierr) { + return -1; + } + return mem / 1024; +#else + return 0; +#endif +} + +/** +Get total system RAM in units of KiB. This may differ from the +host total if a host-wide resource limit is applied. +*/ +SystemInformation::LongLong +SystemInformationImplementation::GetHostMemoryAvailable( + const char* hostLimitEnvVarName) +{ + SystemInformation::LongLong memTotal = this->GetHostMemoryTotal(); + + // the following mechanism is provided for systems that + // apply resource limits across groups of processes. + // this is of use on certain SMP systems (eg. SGI UV) + // where the host has a large amount of ram but a given user's + // access to it is severely restricted. The system will + // apply a limit across a set of processes. Units are in KiB. + if (hostLimitEnvVarName) { + const char* hostLimitEnvVarValue = getenv(hostLimitEnvVarName); + if (hostLimitEnvVarValue) { + SystemInformation::LongLong hostLimit = + atoLongLong(hostLimitEnvVarValue); + if (hostLimit > 0) { + memTotal = min(hostLimit, memTotal); + } + } + } + + return memTotal; +} + +/** +Get total system RAM in units of KiB. This may differ from the +host total if a per-process resource limit is applied. +*/ +SystemInformation::LongLong +SystemInformationImplementation::GetProcMemoryAvailable( + const char* hostLimitEnvVarName, const char* procLimitEnvVarName) +{ + SystemInformation::LongLong memAvail = + this->GetHostMemoryAvailable(hostLimitEnvVarName); + + // the following mechanism is provide for systems where rlimits + // are not employed. Units are in KiB. + if (procLimitEnvVarName) { + const char* procLimitEnvVarValue = getenv(procLimitEnvVarName); + if (procLimitEnvVarValue) { + SystemInformation::LongLong procLimit = + atoLongLong(procLimitEnvVarValue); + if (procLimit > 0) { + memAvail = min(procLimit, memAvail); + } + } + } + +#if defined(__linux) + int ierr; + ResourceLimitType rlim; + ierr = GetResourceLimit(RLIMIT_DATA, &rlim); + if ((ierr == 0) && (rlim.rlim_cur != RLIM_INFINITY)) { + memAvail = + min((SystemInformation::LongLong)rlim.rlim_cur / 1024, memAvail); + } + + ierr = GetResourceLimit(RLIMIT_AS, &rlim); + if ((ierr == 0) && (rlim.rlim_cur != RLIM_INFINITY)) { + memAvail = + min((SystemInformation::LongLong)rlim.rlim_cur / 1024, memAvail); + } +#elif defined(__APPLE__) + struct rlimit rlim; + int ierr; + ierr = getrlimit(RLIMIT_DATA, &rlim); + if ((ierr == 0) && (rlim.rlim_cur != RLIM_INFINITY)) { + memAvail = + min((SystemInformation::LongLong)rlim.rlim_cur / 1024, memAvail); + } + + ierr = getrlimit(RLIMIT_RSS, &rlim); + if ((ierr == 0) && (rlim.rlim_cur != RLIM_INFINITY)) { + memAvail = + min((SystemInformation::LongLong)rlim.rlim_cur / 1024, memAvail); + } +#endif + + return memAvail; +} + +/** +Get RAM used by all processes in the host, in units of KiB. +*/ +SystemInformation::LongLong +SystemInformationImplementation::GetHostMemoryUsed() +{ +#if defined(_WIN32) +# if defined(_MSC_VER) && _MSC_VER < 1300 + MEMORYSTATUS stat; + stat.dwLength = sizeof(stat); + GlobalMemoryStatus(&stat); + return (stat.dwTotalPhys - stat.dwAvailPhys) / 1024; +# else + MEMORYSTATUSEX statex; + statex.dwLength = sizeof(statex); + GlobalMemoryStatusEx(&statex); + return (statex.ullTotalPhys - statex.ullAvailPhys) / 1024; +# endif +#elif defined(__linux) + // First try to use MemAvailable, but it only works on newer kernels + const char* names2[3] = { "MemTotal:", "MemAvailable:", nullptr }; + SystemInformation::LongLong values2[2] = { SystemInformation::LongLong(0) }; + int ierr = GetFieldsFromFile("/proc/meminfo", names2, values2); + if (ierr) { + const char* names4[5] = { "MemTotal:", "MemFree:", "Buffers:", "Cached:", + nullptr }; + SystemInformation::LongLong values4[4] = { SystemInformation::LongLong( + 0) }; + ierr = GetFieldsFromFile("/proc/meminfo", names4, values4); + if (ierr) { + return ierr; + } + SystemInformation::LongLong& memTotal = values4[0]; + SystemInformation::LongLong& memFree = values4[1]; + SystemInformation::LongLong& memBuffers = values4[2]; + SystemInformation::LongLong& memCached = values4[3]; + return memTotal - memFree - memBuffers - memCached; + } + SystemInformation::LongLong& memTotal = values2[0]; + SystemInformation::LongLong& memAvail = values2[1]; + return memTotal - memAvail; +#elif defined(__APPLE__) + SystemInformation::LongLong psz = getpagesize(); + if (psz < 1) { + return -1; + } + const char* names[3] = { "Pages wired down:", "Pages active:", nullptr }; + SystemInformation::LongLong values[2] = { SystemInformation::LongLong(0) }; + int ierr = GetFieldsFromCommand("vm_stat", names, values); + if (ierr) { + return -1; + } + SystemInformation::LongLong& vmWired = values[0]; + SystemInformation::LongLong& vmActive = values[1]; + return ((vmActive + vmWired) * psz) / 1024; +#else + return 0; +#endif +} + +/** +Get system RAM used by the process associated with the given +process id in units of KiB. +*/ +SystemInformation::LongLong +SystemInformationImplementation::GetProcMemoryUsed() +{ +#if defined(_WIN32) && defined(KWSYS_SYS_HAS_PSAPI) + long pid = GetCurrentProcessId(); + HANDLE hProc; + hProc = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, false, pid); + if (hProc == 0) { + return -1; + } + PROCESS_MEMORY_COUNTERS pmc; + int ok = GetProcessMemoryInfo(hProc, &pmc, sizeof(pmc)); + CloseHandle(hProc); + if (!ok) { + return -2; + } + return pmc.WorkingSetSize / 1024; +#elif defined(__linux) + SystemInformation::LongLong memUsed = 0; + int ierr = GetFieldFromFile("/proc/self/status", "VmRSS:", memUsed); + if (ierr) { + return -1; + } + return memUsed; +#elif defined(__APPLE__) + SystemInformation::LongLong memUsed = 0; + pid_t pid = getpid(); + std::ostringstream oss; + oss << "ps -o rss= -p " << pid; + FILE* file = popen(oss.str().c_str(), "r"); + if (file == nullptr) { + return -1; + } + oss.str(""); + while (!feof(file) && !ferror(file)) { + char buf[256] = { '\0' }; + errno = 0; + size_t nRead = fread(buf, 1, 256, file); + if (ferror(file) && (errno == EINTR)) { + clearerr(file); + } + if (nRead) + oss << buf; + } + int ierr = ferror(file); + pclose(file); + if (ierr) { + return -2; + } + std::istringstream iss(oss.str()); + iss >> memUsed; + return memUsed; +#else + return 0; +#endif +} + +double SystemInformationImplementation::GetLoadAverage() +{ +#if defined(KWSYS_CXX_HAS_GETLOADAVG) + double loadavg[3] = { 0.0, 0.0, 0.0 }; + if (getloadavg(loadavg, 3) > 0) { + return loadavg[0]; + } + return -0.0; +#elif defined(KWSYS_SYSTEMINFORMATION_USE_GetSystemTimes) + // Old windows.h headers do not provide GetSystemTimes. + typedef BOOL(WINAPI * GetSystemTimesType)(LPFILETIME, LPFILETIME, + LPFILETIME); + static GetSystemTimesType pGetSystemTimes = + (GetSystemTimesType)GetProcAddress(GetModuleHandleW(L"kernel32"), + "GetSystemTimes"); + FILETIME idleTime, kernelTime, userTime; + if (pGetSystemTimes && pGetSystemTimes(&idleTime, &kernelTime, &userTime)) { + unsigned __int64 const idleTicks = fileTimeToUInt64(idleTime); + unsigned __int64 const totalTicks = + fileTimeToUInt64(kernelTime) + fileTimeToUInt64(userTime); + return calculateCPULoad(idleTicks, totalTicks) * GetNumberOfPhysicalCPU(); + } + return -0.0; +#else + // Not implemented on this platform. + return -0.0; +#endif +} + +/** +Get the process id of the running process. +*/ +SystemInformation::LongLong SystemInformationImplementation::GetProcessId() +{ +#if defined(_WIN32) + return GetCurrentProcessId(); +#elif defined(__linux) || defined(__APPLE__) || defined(__OpenBSD__) || \ + defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__) + return getpid(); +#else + return -1; +#endif +} + +/** + * Used in GetProgramStack(...) below + */ +#if defined(_WIN32_WINNT) && _WIN32_WINNT >= 0x0600 && defined(_MSC_VER) && \ + _MSC_VER >= 1800 +# define KWSYS_SYSTEMINFORMATION_HAS_DBGHELP +# define TRACE_MAX_STACK_FRAMES 1024 +# define TRACE_MAX_FUNCTION_NAME_LENGTH 1024 +# pragma warning(push) +# pragma warning(disable : 4091) /* 'typedef ': ignored on left of '' */ +# include "dbghelp.h" +# pragma warning(pop) +#endif + +/** +return current program stack in a string +demangle cxx symbols if possible. +*/ +std::string SystemInformationImplementation::GetProgramStack(int firstFrame, + int wholePath) +{ + std::ostringstream oss; + std::string programStack = ""; + +#ifdef KWSYS_SYSTEMINFORMATION_HAS_DBGHELP + (void)wholePath; + + void* stack[TRACE_MAX_STACK_FRAMES]; + HANDLE process = GetCurrentProcess(); + SymInitialize(process, nullptr, TRUE); + WORD numberOfFrames = + CaptureStackBackTrace(firstFrame, TRACE_MAX_STACK_FRAMES, stack, nullptr); + SYMBOL_INFO* symbol = static_cast( + malloc(sizeof(SYMBOL_INFO) + + (TRACE_MAX_FUNCTION_NAME_LENGTH - 1) * sizeof(TCHAR))); + symbol->MaxNameLen = TRACE_MAX_FUNCTION_NAME_LENGTH; + symbol->SizeOfStruct = sizeof(SYMBOL_INFO); + DWORD displacement; + IMAGEHLP_LINE64 line; + line.SizeOfStruct = sizeof(IMAGEHLP_LINE64); + for (int i = 0; i < numberOfFrames; i++) { + DWORD64 address = reinterpret_cast(stack[i]); + SymFromAddr(process, address, nullptr, symbol); + if (SymGetLineFromAddr64(process, address, &displacement, &line)) { + oss << " at " << symbol->Name << " in " << line.FileName << " line " + << line.LineNumber << std::endl; + } else { + oss << " at " << symbol->Name << std::endl; + } + } + free(symbol); + +#else + programStack += "" +# if !defined(KWSYS_SYSTEMINFORMATION_HAS_BACKTRACE) + "WARNING: The stack could not be examined " + "because backtrace is not supported.\n" +# elif !defined(KWSYS_SYSTEMINFORMATION_HAS_DEBUG_BUILD) + "WARNING: The stack trace will not use advanced " + "capabilities because this is a release build.\n" +# else +# if !defined(KWSYS_SYSTEMINFORMATION_HAS_SYMBOL_LOOKUP) + "WARNING: Function names will not be demangled " + "because dladdr is not available.\n" +# endif +# if !defined(KWSYS_SYSTEMINFORMATION_HAS_CPP_DEMANGLE) + "WARNING: Function names will not be demangled " + "because cxxabi is not available.\n" +# endif +# endif + ; + +# if defined(KWSYS_SYSTEMINFORMATION_HAS_BACKTRACE) + void* stackSymbols[256]; + int nFrames = backtrace(stackSymbols, 256); + for (int i = firstFrame; i < nFrames; ++i) { + SymbolProperties symProps; + symProps.SetReportPath(wholePath); + symProps.Initialize(stackSymbols[i]); + oss << symProps << std::endl; + } +# else + (void)firstFrame; + (void)wholePath; +# endif +#endif + + programStack += oss.str(); + + return programStack; +} + +/** +when set print stack trace in response to common signals. +*/ +void SystemInformationImplementation::SetStackTraceOnError(int enable) +{ +#if !defined(_WIN32) && !defined(__MINGW32__) && !defined(__CYGWIN__) + static int saOrigValid = 0; + static struct sigaction saABRTOrig; + static struct sigaction saSEGVOrig; + static struct sigaction saTERMOrig; + static struct sigaction saINTOrig; + static struct sigaction saILLOrig; + static struct sigaction saBUSOrig; + static struct sigaction saFPEOrig; + + if (enable && !saOrigValid) { + // save the current actions + sigaction(SIGABRT, nullptr, &saABRTOrig); + sigaction(SIGSEGV, nullptr, &saSEGVOrig); + sigaction(SIGTERM, nullptr, &saTERMOrig); + sigaction(SIGINT, nullptr, &saINTOrig); + sigaction(SIGILL, nullptr, &saILLOrig); + sigaction(SIGBUS, nullptr, &saBUSOrig); + sigaction(SIGFPE, nullptr, &saFPEOrig); + + // enable read, disable write + saOrigValid = 1; + + // install ours + struct sigaction sa; + sa.sa_sigaction = (SigAction)StacktraceSignalHandler; + sa.sa_flags = SA_SIGINFO | SA_RESETHAND; +# ifdef SA_RESTART + sa.sa_flags |= SA_RESTART; +# endif + sigemptyset(&sa.sa_mask); + + sigaction(SIGABRT, &sa, nullptr); + sigaction(SIGSEGV, &sa, nullptr); + sigaction(SIGTERM, &sa, nullptr); + sigaction(SIGINT, &sa, nullptr); + sigaction(SIGILL, &sa, nullptr); + sigaction(SIGBUS, &sa, nullptr); + sigaction(SIGFPE, &sa, nullptr); + } else if (!enable && saOrigValid) { + // restore previous actions + sigaction(SIGABRT, &saABRTOrig, nullptr); + sigaction(SIGSEGV, &saSEGVOrig, nullptr); + sigaction(SIGTERM, &saTERMOrig, nullptr); + sigaction(SIGINT, &saINTOrig, nullptr); + sigaction(SIGILL, &saILLOrig, nullptr); + sigaction(SIGBUS, &saBUSOrig, nullptr); + sigaction(SIGFPE, &saFPEOrig, nullptr); + + // enable write, disable read + saOrigValid = 0; + } +#else + // avoid warning C4100 + (void)enable; +#endif +} + +bool SystemInformationImplementation::QueryWindowsMemory() +{ +#if defined(_WIN32) +# if defined(_MSC_VER) && _MSC_VER < 1300 + MEMORYSTATUS ms; + unsigned long tv, tp, av, ap; + ms.dwLength = sizeof(ms); + GlobalMemoryStatus(&ms); +# define MEM_VAL(value) dw##value +# else + MEMORYSTATUSEX ms; + DWORDLONG tv, tp, av, ap; + ms.dwLength = sizeof(ms); + if (0 == GlobalMemoryStatusEx(&ms)) { + return 0; + } +# define MEM_VAL(value) ull##value +# endif + tv = ms.MEM_VAL(TotalPageFile); + tp = ms.MEM_VAL(TotalPhys); + av = ms.MEM_VAL(AvailPageFile); + ap = ms.MEM_VAL(AvailPhys); + this->TotalVirtualMemory = tv >> 10 >> 10; + this->TotalPhysicalMemory = tp >> 10 >> 10; + this->AvailableVirtualMemory = av >> 10 >> 10; + this->AvailablePhysicalMemory = ap >> 10 >> 10; + return true; +#else + return false; +#endif +} + +bool SystemInformationImplementation::QueryLinuxMemory() +{ +#if defined(__linux) + unsigned long tv = 0; + unsigned long tp = 0; + unsigned long av = 0; + unsigned long ap = 0; + + char buffer[1024]; // for reading lines + + int linuxMajor = 0; + int linuxMinor = 0; + + // Find the Linux kernel version first + struct utsname unameInfo; + int errorFlag = uname(&unameInfo); + if (errorFlag != 0) { + std::cout << "Problem calling uname(): " << strerror(errno) << std::endl; + return false; + } + + if (strlen(unameInfo.release) >= 3) { + // release looks like "2.6.3-15mdk-i686-up-4GB" + char majorChar = unameInfo.release[0]; + char minorChar = unameInfo.release[2]; + + if (isdigit(majorChar)) { + linuxMajor = majorChar - '0'; + } + + if (isdigit(minorChar)) { + linuxMinor = minorChar - '0'; + } + } + + FILE* fd = fopen("/proc/meminfo", "r"); + if (!fd) { + std::cout << "Problem opening /proc/meminfo" << std::endl; + return false; + } + + if (linuxMajor >= 3 || ((linuxMajor >= 2) && (linuxMinor >= 6))) { + // new /proc/meminfo format since kernel 2.6.x + // Rigorously, this test should check from the developing version 2.5.x + // that introduced the new format... + + enum + { + mMemTotal, + mMemFree, + mBuffers, + mCached, + mSwapTotal, + mSwapFree + }; + const char* format[6] = { "MemTotal:%lu kB", "MemFree:%lu kB", + "Buffers:%lu kB", "Cached:%lu kB", + "SwapTotal:%lu kB", "SwapFree:%lu kB" }; + bool have[6] = { false, false, false, false, false, false }; + unsigned long value[6]; + int count = 0; + while (fgets(buffer, static_cast(sizeof(buffer)), fd)) { + for (int i = 0; i < 6; ++i) { + if (!have[i] && sscanf(buffer, format[i], &value[i]) == 1) { + have[i] = true; + ++count; + } + } + } + if (count == 6) { + this->TotalPhysicalMemory = value[mMemTotal] / 1024; + this->AvailablePhysicalMemory = + (value[mMemFree] + value[mBuffers] + value[mCached]) / 1024; + this->TotalVirtualMemory = value[mSwapTotal] / 1024; + this->AvailableVirtualMemory = value[mSwapFree] / 1024; + } else { + std::cout << "Problem parsing /proc/meminfo" << std::endl; + fclose(fd); + return false; + } + } else { + // /proc/meminfo format for kernel older than 2.6.x + + unsigned long temp; + unsigned long cachedMem; + unsigned long buffersMem; + // Skip "total: used:..." + char* r = fgets(buffer, static_cast(sizeof(buffer)), fd); + int status = 0; + if (r == buffer) { + status += fscanf(fd, "Mem: %lu %lu %lu %lu %lu %lu\n", &tp, &temp, &ap, + &temp, &buffersMem, &cachedMem); + } + if (status == 6) { + status += fscanf(fd, "Swap: %lu %lu %lu\n", &tv, &temp, &av); + } + if (status == 9) { + this->TotalVirtualMemory = tv >> 10 >> 10; + this->TotalPhysicalMemory = tp >> 10 >> 10; + this->AvailableVirtualMemory = av >> 10 >> 10; + this->AvailablePhysicalMemory = + (ap + buffersMem + cachedMem) >> 10 >> 10; + } else { + std::cout << "Problem parsing /proc/meminfo" << std::endl; + fclose(fd); + return false; + } + } + fclose(fd); + + return true; +#else + return false; +#endif +} + +bool SystemInformationImplementation::QueryCygwinMemory() +{ +#ifdef __CYGWIN__ + // _SC_PAGE_SIZE does return the mmap() granularity on Cygwin, + // see http://cygwin.com/ml/cygwin/2006-06/msg00350.html + // Therefore just use 4096 as the page size of Windows. + long m = sysconf(_SC_PHYS_PAGES); + if (m < 0) { + return false; + } + this->TotalPhysicalMemory = m >> 8; + return true; +#else + return false; +#endif +} + +bool SystemInformationImplementation::QueryAIXMemory() +{ +#if defined(_AIX) && defined(_SC_AIX_REALMEM) + long c = sysconf(_SC_AIX_REALMEM); + if (c <= 0) { + return false; + } + + this->TotalPhysicalMemory = c / 1024; + + return true; +#else + return false; +#endif +} + +bool SystemInformationImplementation::QueryMemoryBySysconf() +{ +#if defined(_SC_PHYS_PAGES) && defined(_SC_PAGESIZE) + // Assume the mmap() granularity as returned by _SC_PAGESIZE is also + // the system page size. The only known system where this isn't true + // is Cygwin. + long p = sysconf(_SC_PHYS_PAGES); + long m = sysconf(_SC_PAGESIZE); + + if (p < 0 || m < 0) { + return false; + } + + // assume pagesize is a power of 2 and smaller 1 MiB + size_t pagediv = (1024 * 1024 / m); + + this->TotalPhysicalMemory = p; + this->TotalPhysicalMemory /= pagediv; + +# if defined(_SC_AVPHYS_PAGES) + p = sysconf(_SC_AVPHYS_PAGES); + if (p < 0) { + return false; + } + + this->AvailablePhysicalMemory = p; + this->AvailablePhysicalMemory /= pagediv; +# endif + + return true; +#else + return false; +#endif +} + +/** Query for the memory status */ +bool SystemInformationImplementation::QueryMemory() +{ + return this->QueryMemoryBySysconf(); +} + +/** */ +size_t SystemInformationImplementation::GetTotalVirtualMemory() +{ + return this->TotalVirtualMemory; +} + +/** */ +size_t SystemInformationImplementation::GetAvailableVirtualMemory() +{ + return this->AvailableVirtualMemory; +} + +size_t SystemInformationImplementation::GetTotalPhysicalMemory() +{ + return this->TotalPhysicalMemory; +} + +/** */ +size_t SystemInformationImplementation::GetAvailablePhysicalMemory() +{ + return this->AvailablePhysicalMemory; +} + +/** Get Cycle differences */ +SystemInformation::LongLong +SystemInformationImplementation::GetCyclesDifference(DELAY_FUNC DelayFunction, + unsigned int uiParameter) +{ +#if defined(_MSC_VER) && (_MSC_VER >= 1400) + unsigned __int64 stamp1, stamp2; + + stamp1 = __rdtsc(); + DelayFunction(uiParameter); + stamp2 = __rdtsc(); + + return stamp2 - stamp1; +#elif USE_ASM_INSTRUCTIONS + + unsigned int edx1, eax1; + unsigned int edx2, eax2; + + // Calculate the frequency of the CPU instructions. + __try { + _asm { + push uiParameter ; push parameter param + mov ebx, DelayFunction ; store func in ebx + + RDTSC_INSTRUCTION + + mov esi, eax ; esi = eax + mov edi, edx ; edi = edx + + call ebx ; call the delay functions + + RDTSC_INSTRUCTION + + pop ebx + + mov edx2, edx ; edx2 = edx + mov eax2, eax ; eax2 = eax + + mov edx1, edi ; edx2 = edi + mov eax1, esi ; eax2 = esi + } + } __except (1) { + return -1; + } + + return ((((__int64)edx2 << 32) + eax2) - (((__int64)edx1 << 32) + eax1)); + +#else + (void)DelayFunction; + (void)uiParameter; + return -1; +#endif +} + +/** Compute the delay overhead */ +void SystemInformationImplementation::DelayOverhead(unsigned int uiMS) +{ +#if defined(_WIN32) + LARGE_INTEGER Frequency, StartCounter, EndCounter; + __int64 x; + + // Get the frequency of the high performance counter. + if (!QueryPerformanceFrequency(&Frequency)) { + return; + } + x = Frequency.QuadPart / 1000 * uiMS; + + // Get the starting position of the counter. + QueryPerformanceCounter(&StartCounter); + + do { + // Get the ending position of the counter. + QueryPerformanceCounter(&EndCounter); + } while (EndCounter.QuadPart - StartCounter.QuadPart == x); +#endif + (void)uiMS; +} + +/** Works only for windows */ +bool SystemInformationImplementation::IsSMTSupported() +{ + return this->Features.ExtendedFeatures.LogicalProcessorsPerPhysical > 1; +} + +/** Return the APIC Id. Works only for windows. */ +unsigned char SystemInformationImplementation::GetAPICId() +{ + int Regs[4] = { 0, 0, 0, 0 }; + +#if USE_CPUID + if (!this->IsSMTSupported()) { + return static_cast(-1); // HT not supported + } // Logical processor = 1 + call_cpuid(1, Regs); +#endif + + return static_cast((Regs[1] & INITIAL_APIC_ID_BITS) >> 24); +} + +/** Count the number of CPUs. Works only on windows. */ +void SystemInformationImplementation::CPUCountWindows() +{ +#if defined(_WIN32) + this->NumberOfPhysicalCPU = 0; + this->NumberOfLogicalCPU = 0; + + typedef BOOL(WINAPI * GetLogicalProcessorInformationType)( + PSYSTEM_LOGICAL_PROCESSOR_INFORMATION, PDWORD); + static GetLogicalProcessorInformationType pGetLogicalProcessorInformation = + (GetLogicalProcessorInformationType)GetProcAddress( + GetModuleHandleW(L"kernel32"), "GetLogicalProcessorInformation"); + + if (!pGetLogicalProcessorInformation) { + // Fallback to approximate implementation on ancient Windows versions. + SYSTEM_INFO info; + ZeroMemory(&info, sizeof(info)); + GetSystemInfo(&info); + this->NumberOfPhysicalCPU = + static_cast(info.dwNumberOfProcessors); + this->NumberOfLogicalCPU = this->NumberOfPhysicalCPU; + return; + } + + std::vector ProcInfo; + { + DWORD Length = 0; + DWORD rc = pGetLogicalProcessorInformation(nullptr, &Length); + assert(FALSE == rc); + (void)rc; // Silence unused variable warning in Borland C++ 5.81 + assert(GetLastError() == ERROR_INSUFFICIENT_BUFFER); + ProcInfo.resize(Length / sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION)); + rc = pGetLogicalProcessorInformation(&ProcInfo[0], &Length); + assert(rc != FALSE); + (void)rc; // Silence unused variable warning in Borland C++ 5.81 + } + + typedef std::vector::iterator + pinfoIt_t; + for (pinfoIt_t it = ProcInfo.begin(); it != ProcInfo.end(); ++it) { + SYSTEM_LOGICAL_PROCESSOR_INFORMATION PInfo = *it; + if (PInfo.Relationship != RelationProcessorCore) { + continue; + } + + std::bitset::digits> ProcMask( + (unsigned long long)PInfo.ProcessorMask); + unsigned int count = (unsigned int)ProcMask.count(); + if (count == 0) { // I think this should never happen, but just to be safe. + continue; + } + this->NumberOfPhysicalCPU++; + this->NumberOfLogicalCPU += (unsigned int)count; + this->Features.ExtendedFeatures.LogicalProcessorsPerPhysical = count; + } + this->NumberOfPhysicalCPU = std::max(1u, this->NumberOfPhysicalCPU); + this->NumberOfLogicalCPU = std::max(1u, this->NumberOfLogicalCPU); +#else +#endif +} + +/** Return the number of logical CPUs on the system */ +unsigned int SystemInformationImplementation::GetNumberOfLogicalCPU() +{ + return this->NumberOfLogicalCPU; +} + +/** Return the number of physical CPUs on the system */ +unsigned int SystemInformationImplementation::GetNumberOfPhysicalCPU() +{ + return this->NumberOfPhysicalCPU; +} + +/** For Mac use sysctlbyname calls to find system info */ +bool SystemInformationImplementation::ParseSysCtl() +{ +#if defined(__APPLE__) + char retBuf[128]; + int err = 0; + uint64_t value = 0; + size_t len = sizeof(value); + sysctlbyname("hw.memsize", &value, &len, nullptr, 0); + this->TotalPhysicalMemory = static_cast(value / 1048576); + + // Parse values for Mac + this->AvailablePhysicalMemory = 0; + vm_statistics_data_t vmstat; + mach_msg_type_number_t count = HOST_VM_INFO_COUNT; + if (host_statistics(mach_host_self(), HOST_VM_INFO, (host_info_t)&vmstat, + &count) == KERN_SUCCESS) { + len = sizeof(value); + err = sysctlbyname("hw.pagesize", &value, &len, nullptr, 0); + int64_t available_memory = + (vmstat.free_count + vmstat.inactive_count) * value; + this->AvailablePhysicalMemory = + static_cast(available_memory / 1048576); + } + +# ifdef VM_SWAPUSAGE + // Virtual memory. + int mib[2] = { CTL_VM, VM_SWAPUSAGE }; + unsigned int miblen = + static_cast(sizeof(mib) / sizeof(mib[0])); + struct xsw_usage swap; + len = sizeof(swap); + err = sysctl(mib, miblen, &swap, &len, nullptr, 0); + if (err == 0) { + this->AvailableVirtualMemory = + static_cast(swap.xsu_avail / 1048576); + this->TotalVirtualMemory = static_cast(swap.xsu_total / 1048576); + } +# else + this->AvailableVirtualMemory = 0; + this->TotalVirtualMemory = 0; +# endif + + // CPU Info + len = sizeof(this->NumberOfPhysicalCPU); + sysctlbyname("hw.physicalcpu", &this->NumberOfPhysicalCPU, &len, nullptr, 0); + len = sizeof(this->NumberOfLogicalCPU); + sysctlbyname("hw.logicalcpu", &this->NumberOfLogicalCPU, &len, nullptr, 0); + + int cores_per_package = 0; + len = sizeof(cores_per_package); + err = sysctlbyname("machdep.cpu.cores_per_package", &cores_per_package, &len, + nullptr, 0); + // That name was not found, default to 1 + this->Features.ExtendedFeatures.LogicalProcessorsPerPhysical = + err != 0 ? 1 : static_cast(cores_per_package); + + len = sizeof(value); + sysctlbyname("hw.cpufrequency", &value, &len, nullptr, 0); + this->CPUSpeedInMHz = static_cast(value) / 1000000; + + // Chip family + len = sizeof(this->ChipID.Family); + // Seems only the intel chips will have this name so if this fails it is + // probably a PPC machine + err = + sysctlbyname("machdep.cpu.family", &this->ChipID.Family, &len, nullptr, 0); + if (err != 0) // Go back to names we know but are less descriptive + { + this->ChipID.Family = 0; + ::memset(retBuf, 0, 128); + len = 32; + err = sysctlbyname("hw.machine", &retBuf, &len, nullptr, 0); + std::string machineBuf(retBuf); + if (machineBuf.find_first_of("Power") != std::string::npos) { + this->ChipID.Vendor = "IBM"; + len = sizeof(this->ChipID.Family); + err = sysctlbyname("hw.cputype", &this->ChipID.Family, &len, nullptr, 0); + len = sizeof(this->ChipID.Model); + err = + sysctlbyname("hw.cpusubtype", &this->ChipID.Model, &len, nullptr, 0); + this->FindManufacturer(); + } + } else // Should be an Intel Chip. + { + len = sizeof(this->ChipID.Family); + err = sysctlbyname("machdep.cpu.family", &this->ChipID.Family, &len, + nullptr, 0); + + ::memset(retBuf, 0, 128); + len = 128; + err = sysctlbyname("machdep.cpu.vendor", retBuf, &len, nullptr, 0); + // Chip Vendor + this->ChipID.Vendor = retBuf; + this->FindManufacturer(); + + // Chip Model + len = sizeof(value); + err = sysctlbyname("machdep.cpu.model", &value, &len, nullptr, 0); + this->ChipID.Model = static_cast(value); + + // Chip Stepping + len = sizeof(value); + value = 0; + err = sysctlbyname("machdep.cpu.stepping", &value, &len, nullptr, 0); + if (!err) { + this->ChipID.Revision = static_cast(value); + } + + // feature string + char* buf = nullptr; + size_t allocSize = 128; + + err = 0; + len = 0; + + // sysctlbyname() will return with err==0 && len==0 if the buffer is too + // small + while (err == 0 && len == 0) { + delete[] buf; + allocSize *= 2; + buf = new char[allocSize]; + if (!buf) { + break; + } + buf[0] = ' '; + len = allocSize - 2; // keep space for leading and trailing space + err = sysctlbyname("machdep.cpu.features", buf + 1, &len, nullptr, 0); + } + if (!err && buf && len) { + // now we can match every flags as space + flag + space + buf[len + 1] = ' '; + std::string cpuflags(buf, len + 2); + + if ((cpuflags.find(" FPU ") != std::string::npos)) { + this->Features.HasFPU = true; + } + if ((cpuflags.find(" TSC ") != std::string::npos)) { + this->Features.HasTSC = true; + } + if ((cpuflags.find(" MMX ") != std::string::npos)) { + this->Features.HasMMX = true; + } + if ((cpuflags.find(" SSE ") != std::string::npos)) { + this->Features.HasSSE = true; + } + if ((cpuflags.find(" SSE2 ") != std::string::npos)) { + this->Features.HasSSE2 = true; + } + if ((cpuflags.find(" APIC ") != std::string::npos)) { + this->Features.HasAPIC = true; + } + if ((cpuflags.find(" CMOV ") != std::string::npos)) { + this->Features.HasCMOV = true; + } + if ((cpuflags.find(" MTRR ") != std::string::npos)) { + this->Features.HasMTRR = true; + } + if ((cpuflags.find(" ACPI ") != std::string::npos)) { + this->Features.HasACPI = true; + } + } + delete[] buf; + } + + // brand string + ::memset(retBuf, 0, sizeof(retBuf)); + len = sizeof(retBuf); + err = sysctlbyname("machdep.cpu.brand_string", retBuf, &len, nullptr, 0); + if (!err) { + this->ChipID.ProcessorName = retBuf; + this->ChipID.ModelName = retBuf; + } + + // Cache size + len = sizeof(value); + err = sysctlbyname("hw.l1icachesize", &value, &len, nullptr, 0); + this->Features.L1CacheSize = static_cast(value); + len = sizeof(value); + err = sysctlbyname("hw.l2cachesize", &value, &len, nullptr, 0); + this->Features.L2CacheSize = static_cast(value); + + return true; +#else + return false; +#endif +} + +/** Extract a value from sysctl command */ +std::string SystemInformationImplementation::ExtractValueFromSysCtl( + const char* word) +{ + size_t pos = this->SysCtlBuffer.find(word); + if (pos != std::string::npos) { + pos = this->SysCtlBuffer.find(": ", pos); + size_t pos2 = this->SysCtlBuffer.find("\n", pos); + if (pos != std::string::npos && pos2 != std::string::npos) { + return this->SysCtlBuffer.substr(pos + 2, pos2 - pos - 2); + } + } + return ""; +} + +/** Run a given process */ +std::string SystemInformationImplementation::RunProcess( + std::vector args) +{ + std::string buffer; + + // Run the application + kwsysProcess* gp = kwsysProcess_New(); + kwsysProcess_SetCommand(gp, args.data()); + kwsysProcess_SetOption(gp, kwsysProcess_Option_HideWindow, 1); + + kwsysProcess_Execute(gp); + + char* data = nullptr; + int length; + double timeout = 255; + int pipe; // pipe id as returned by kwsysProcess_WaitForData() + + while ((static_cast( + pipe = kwsysProcess_WaitForData(gp, &data, &length, &timeout)), + (pipe == kwsysProcess_Pipe_STDOUT || + pipe == kwsysProcess_Pipe_STDERR))) // wait for 1s + { + buffer.append(data, length); + } + kwsysProcess_WaitForExit(gp, nullptr); + + int result = 0; + switch (kwsysProcess_GetState(gp)) { + case kwsysProcess_State_Exited: { + result = kwsysProcess_GetExitValue(gp); + } break; + case kwsysProcess_State_Error: { + std::cerr << "Error: Could not run " << args[0] << ":\n"; + std::cerr << kwsysProcess_GetErrorString(gp) << "\n"; + } break; + case kwsysProcess_State_Exception: { + std::cerr << "Error: " << args[0] << " terminated with an exception: " + << kwsysProcess_GetExceptionString(gp) << "\n"; + } break; + case kwsysProcess_State_Starting: + case kwsysProcess_State_Executing: + case kwsysProcess_State_Expired: + case kwsysProcess_State_Killed: { + // Should not get here. + std::cerr << "Unexpected ending state after running " << args[0] + << std::endl; + } break; + } + kwsysProcess_Delete(gp); + if (result) { + std::cerr << "Error " << args[0] << " returned :" << result << "\n"; + } + return buffer; +} + +std::string SystemInformationImplementation::ParseValueFromKStat( + const char* arguments) +{ + std::vector args_string; + std::string command = arguments; + size_t start = std::string::npos; + size_t pos = command.find(' ', 0); + while (pos != std::string::npos) { + bool inQuotes = false; + // Check if we are between quotes + size_t b0 = command.find('"', 0); + size_t b1 = command.find('"', b0 + 1); + while (b0 != std::string::npos && b1 != std::string::npos && b1 > b0) { + if (pos > b0 && pos < b1) { + inQuotes = true; + break; + } + b0 = command.find('"', b1 + 1); + b1 = command.find('"', b0 + 1); + } + + if (!inQuotes) { + args_string.push_back(command.substr(start + 1, pos - start - 1)); + std::string& arg = args_string.back(); + + // Remove the quotes if any + arg.erase(std::remove(arg.begin(), arg.end(), '"'), arg.end()); + start = pos; + } + pos = command.find(' ', pos + 1); + } + args_string.push_back(command.substr(start + 1, command.size() - start - 1)); + + std::vector args; + args.reserve(3 + args_string.size()); + args.push_back("kstat"); + args.push_back("-p"); + for (size_t i = 0; i < args_string.size(); ++i) { + args.push_back(args_string[i].c_str()); + } + args.push_back(nullptr); + + std::string buffer = this->RunProcess(args); + + std::string value; + for (size_t i = buffer.size() - 1; i > 0; i--) { + if (buffer[i] == ' ' || buffer[i] == '\t') { + break; + } + if (buffer[i] != '\n' && buffer[i] != '\r') { + value.insert(0u, 1, buffer[i]); + } + } + return value; +} + +/** Querying for system information from Solaris */ +bool SystemInformationImplementation::QuerySolarisMemory() +{ +#if defined(__SVR4) && defined(__sun) +// Solaris allows querying this value by sysconf, but if this is +// a 32 bit process on a 64 bit host the returned memory will be +// limited to 4GiB. So if this is a 32 bit process or if the sysconf +// method fails use the kstat interface. +# if SIZEOF_VOID_P == 8 + if (this->QueryMemoryBySysconf()) { + return true; + } +# endif + + char* tail; + unsigned long totalMemory = + strtoul(this->ParseValueFromKStat("-s physmem").c_str(), &tail, 0); + this->TotalPhysicalMemory = totalMemory / 128; + + return true; +#else + return false; +#endif +} + +bool SystemInformationImplementation::QuerySolarisProcessor() +{ + if (!this->QueryProcessorBySysconf()) { + return false; + } + + // Parse values + this->CPUSpeedInMHz = static_cast( + atoi(this->ParseValueFromKStat("-s clock_MHz").c_str())); + + // Chip family + this->ChipID.Family = 0; + + // Chip Model + this->ChipID.ProcessorName = this->ParseValueFromKStat("-s cpu_type"); + this->ChipID.Model = 0; + + // Chip Vendor + if (this->ChipID.ProcessorName != "i386") { + this->ChipID.Vendor = "Sun"; + this->FindManufacturer(); + } + + return true; +} + +/** Querying for system information from Haiku OS */ +bool SystemInformationImplementation::QueryHaikuInfo() +{ +#if defined(__HAIKU__) + + // CPU count + system_info info; + get_system_info(&info); + this->NumberOfPhysicalCPU = info.cpu_count; + + // CPU speed + uint32 topologyNodeCount = 0; + cpu_topology_node_info* topology = 0; + get_cpu_topology_info(0, &topologyNodeCount); + if (topologyNodeCount != 0) + topology = new cpu_topology_node_info[topologyNodeCount]; + get_cpu_topology_info(topology, &topologyNodeCount); + + for (uint32 i = 0; i < topologyNodeCount; i++) { + if (topology[i].type == B_TOPOLOGY_CORE) { + this->CPUSpeedInMHz = + topology[i].data.core.default_frequency / 1000000.0f; + break; + } + } + + delete[] topology; + + // Physical Memory + this->TotalPhysicalMemory = (info.max_pages * B_PAGE_SIZE) / (1024 * 1024); + this->AvailablePhysicalMemory = this->TotalPhysicalMemory - + ((info.used_pages * B_PAGE_SIZE) / (1024 * 1024)); + + // NOTE: get_system_info_etc is currently a private call so just set to 0 + // until it becomes public + this->TotalVirtualMemory = 0; + this->AvailableVirtualMemory = 0; + + // Retrieve cpuid_info union for cpu 0 + cpuid_info cpu_info; + get_cpuid(&cpu_info, 0, 0); + + // Chip Vendor + // Use a temporary buffer so that we can add NULL termination to the string + char vbuf[13]; + strncpy(vbuf, cpu_info.eax_0.vendor_id, 12); + vbuf[12] = '\0'; + this->ChipID.Vendor = vbuf; + + this->FindManufacturer(); + + // Retrieve cpuid_info union for cpu 0 this time using a register value of 1 + get_cpuid(&cpu_info, 1, 0); + + this->NumberOfLogicalCPU = cpu_info.eax_1.logical_cpus; + + // Chip type + this->ChipID.Type = cpu_info.eax_1.type; + + // Chip family + this->ChipID.Family = cpu_info.eax_1.family; + + // Chip Model + this->ChipID.Model = cpu_info.eax_1.model; + + // Chip Revision + this->ChipID.Revision = cpu_info.eax_1.stepping; + + // Chip Extended Family + this->ChipID.ExtendedFamily = cpu_info.eax_1.extended_family; + + // Chip Extended Model + this->ChipID.ExtendedModel = cpu_info.eax_1.extended_model; + + // Get ChipID.ProcessorName from other information already gathered + this->RetrieveClassicalCPUIdentity(); + + // Cache size + this->Features.L1CacheSize = 0; + this->Features.L2CacheSize = 0; + + return true; + +#else + return false; +#endif +} + +bool SystemInformationImplementation::QueryQNXMemory() +{ +#if defined(__QNX__) + std::string buffer; + std::vector args; + args.clear(); + + args.push_back("showmem"); + args.push_back("-S"); + args.push_back(0); + buffer = this->RunProcess(args); + args.clear(); + + size_t pos = buffer.find("System RAM:"); + if (pos == std::string::npos) + return false; + pos = buffer.find(":", pos); + size_t pos2 = buffer.find("M (", pos); + if (pos2 == std::string::npos) + return false; + + pos++; + while (buffer[pos] == ' ') + pos++; + + this->TotalPhysicalMemory = atoi(buffer.substr(pos, pos2 - pos).c_str()); + return true; +#endif + return false; +} + +bool SystemInformationImplementation::QueryBSDMemory() +{ +#if defined(__OpenBSD__) || defined(__FreeBSD__) || defined(__NetBSD__) || \ + defined(__DragonFly__) + int ctrl[2] = { CTL_HW, HW_PHYSMEM }; +# if defined(HW_PHYSMEM64) + int64_t k; + ctrl[1] = HW_PHYSMEM64; +# else + int k; +# endif + size_t sz = sizeof(k); + + if (sysctl(ctrl, 2, &k, &sz, nullptr, 0) != 0) { + return false; + } + + this->TotalPhysicalMemory = k >> 10 >> 10; + + return true; +#else + return false; +#endif +} + +bool SystemInformationImplementation::QueryQNXProcessor() +{ +#if defined(__QNX__) + // the output on my QNX 6.4.1 looks like this: + // Processor1: 686 Pentium II Stepping 3 2175MHz FPU + std::string buffer; + std::vector args; + args.clear(); + + args.push_back("pidin"); + args.push_back("info"); + args.push_back(0); + buffer = this->RunProcess(args); + args.clear(); + + size_t pos = buffer.find("Processor1:"); + if (pos == std::string::npos) + return false; + + size_t pos2 = buffer.find("MHz", pos); + if (pos2 == std::string::npos) + return false; + + size_t pos3 = pos2; + while (buffer[pos3] != ' ') + --pos3; + + this->CPUSpeedInMHz = atoi(buffer.substr(pos3 + 1, pos2 - pos3 - 1).c_str()); + + pos2 = buffer.find(" Stepping", pos); + if (pos2 != std::string::npos) { + pos2 = buffer.find(" ", pos2 + 1); + if (pos2 != std::string::npos && pos2 < pos3) { + this->ChipID.Revision = + atoi(buffer.substr(pos2 + 1, pos3 - pos2).c_str()); + } + } + + this->NumberOfPhysicalCPU = 0; + do { + pos = buffer.find("\nProcessor", pos + 1); + ++this->NumberOfPhysicalCPU; + } while (pos != std::string::npos); + this->NumberOfLogicalCPU = 1; + + return true; +#else + return false; +#endif +} + +bool SystemInformationImplementation::QueryBSDProcessor() +{ +#if defined(__OpenBSD__) || defined(__FreeBSD__) || defined(__NetBSD__) || \ + defined(__DragonFly__) + int k; + size_t sz = sizeof(k); + int ctrl[2] = { CTL_HW, HW_NCPU }; + + if (sysctl(ctrl, 2, &k, &sz, nullptr, 0) != 0) { + return false; + } + + this->NumberOfPhysicalCPU = k; + this->NumberOfLogicalCPU = this->NumberOfPhysicalCPU; + +# if defined(HW_CPUSPEED) + ctrl[1] = HW_CPUSPEED; + + if (sysctl(ctrl, 2, &k, &sz, nullptr, 0) != 0) { + return false; + } + + this->CPUSpeedInMHz = (float)k; +# endif + +# if defined(CPU_SSE) + ctrl[0] = CTL_MACHDEP; + ctrl[1] = CPU_SSE; + + if (sysctl(ctrl, 2, &k, &sz, nullptr, 0) != 0) { + return false; + } + + this->Features.HasSSE = (k > 0); +# endif + +# if defined(CPU_SSE2) + ctrl[0] = CTL_MACHDEP; + ctrl[1] = CPU_SSE2; + + if (sysctl(ctrl, 2, &k, &sz, nullptr, 0) != 0) { + return false; + } + + this->Features.HasSSE2 = (k > 0); +# endif + +# if defined(CPU_CPUVENDOR) + ctrl[0] = CTL_MACHDEP; + ctrl[1] = CPU_CPUVENDOR; + char vbuf[25]; + ::memset(vbuf, 0, sizeof(vbuf)); + sz = sizeof(vbuf) - 1; + if (sysctl(ctrl, 2, vbuf, &sz, nullptr, 0) != 0) { + return false; + } + + this->ChipID.Vendor = vbuf; + this->FindManufacturer(); +# endif + + return true; +#else + return false; +#endif +} + +bool SystemInformationImplementation::QueryHPUXMemory() +{ +#if defined(__hpux) + unsigned long tv = 0; + unsigned long tp = 0; + unsigned long av = 0; + unsigned long ap = 0; + struct pst_static pst; + struct pst_dynamic pdy; + + unsigned long ps = 0; + if (pstat_getstatic(&pst, sizeof(pst), (size_t)1, 0) == -1) { + return false; + } + + ps = pst.page_size; + tp = pst.physical_memory * ps; + tv = (pst.physical_memory + pst.pst_maxmem) * ps; + if (pstat_getdynamic(&pdy, sizeof(pdy), (size_t)1, 0) == -1) { + return false; + } + + ap = tp - pdy.psd_rm * ps; + av = tv - pdy.psd_vm; + this->TotalVirtualMemory = tv >> 10 >> 10; + this->TotalPhysicalMemory = tp >> 10 >> 10; + this->AvailableVirtualMemory = av >> 10 >> 10; + this->AvailablePhysicalMemory = ap >> 10 >> 10; + return true; +#else + return false; +#endif +} + +bool SystemInformationImplementation::QueryHPUXProcessor() +{ +#if defined(__hpux) +# if defined(KWSYS_SYS_HAS_MPCTL_H) + int c = mpctl(MPC_GETNUMSPUS_SYS, 0, 0); + if (c <= 0) { + return false; + } + + this->NumberOfPhysicalCPU = c; + this->NumberOfLogicalCPU = this->NumberOfPhysicalCPU; + + long t = sysconf(_SC_CPU_VERSION); + + if (t == -1) { + return false; + } + + switch (t) { + case CPU_PA_RISC1_0: + this->ChipID.Vendor = "Hewlett-Packard"; + this->ChipID.Family = 0x100; + break; + case CPU_PA_RISC1_1: + this->ChipID.Vendor = "Hewlett-Packard"; + this->ChipID.Family = 0x110; + break; + case CPU_PA_RISC2_0: + this->ChipID.Vendor = "Hewlett-Packard"; + this->ChipID.Family = 0x200; + break; +# if defined(CPU_HP_INTEL_EM_1_0) || defined(CPU_IA64_ARCHREV_0) +# ifdef CPU_HP_INTEL_EM_1_0 + case CPU_HP_INTEL_EM_1_0: +# endif +# ifdef CPU_IA64_ARCHREV_0 + case CPU_IA64_ARCHREV_0: +# endif + this->ChipID.Vendor = "GenuineIntel"; + this->Features.HasIA64 = true; + break; +# endif + default: + return false; + } + + this->FindManufacturer(); + + return true; +# else + return false; +# endif +#else + return false; +#endif +} + +/** Query the operating system information */ +bool SystemInformationImplementation::QueryOSInformation() +{ +#if defined(_WIN32) + + this->OSName = "Windows"; + + OSVERSIONINFOEXW osvi; + BOOL bIsWindows64Bit; + BOOL bOsVersionInfoEx; + char operatingSystem[256]; + + // Try calling GetVersionEx using the OSVERSIONINFOEX structure. + ZeroMemory(&osvi, sizeof(OSVERSIONINFOEXW)); + osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEXW); +# ifdef KWSYS_WINDOWS_DEPRECATED_GetVersionEx +# pragma warning(push) +# ifdef __INTEL_COMPILER +# pragma warning(disable : 1478) +# elif defined __clang__ +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wdeprecated-declarations" +# else +# pragma warning(disable : 4996) +# endif +# endif + bOsVersionInfoEx = GetVersionExW((OSVERSIONINFOW*)&osvi); + if (!bOsVersionInfoEx) { + osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOW); + if (!GetVersionExW((OSVERSIONINFOW*)&osvi)) { + return false; + } + } +# ifdef KWSYS_WINDOWS_DEPRECATED_GetVersionEx +# ifdef __clang__ +# pragma clang diagnostic pop +# else +# pragma warning(pop) +# endif +# endif + + switch (osvi.dwPlatformId) { + case VER_PLATFORM_WIN32_NT: + // Test for the product. + if (osvi.dwMajorVersion <= 4) { + this->OSRelease = "NT"; + } + if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 0) { + this->OSRelease = "2000"; + } + if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 1) { + this->OSRelease = "XP"; + } + // XP Professional x64 + if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 2) { + this->OSRelease = "XP"; + } +# ifdef VER_NT_WORKSTATION + // Test for product type. + if (bOsVersionInfoEx) { + if (osvi.wProductType == VER_NT_WORKSTATION) { + if (osvi.dwMajorVersion == 6 && osvi.dwMinorVersion == 0) { + this->OSRelease = "Vista"; + } + if (osvi.dwMajorVersion == 6 && osvi.dwMinorVersion == 1) { + this->OSRelease = "7"; + } +// VER_SUITE_PERSONAL may not be defined +# ifdef VER_SUITE_PERSONAL + else { + if (osvi.wSuiteMask & VER_SUITE_PERSONAL) { + this->OSRelease += " Personal"; + } else { + this->OSRelease += " Professional"; + } + } +# endif + } else if (osvi.wProductType == VER_NT_SERVER) { + // Check for .NET Server instead of Windows XP. + if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 1) { + this->OSRelease = ".NET"; + } + + // Continue with the type detection. + if (osvi.wSuiteMask & VER_SUITE_DATACENTER) { + this->OSRelease += " DataCenter Server"; + } else if (osvi.wSuiteMask & VER_SUITE_ENTERPRISE) { + this->OSRelease += " Advanced Server"; + } else { + this->OSRelease += " Server"; + } + } + + sprintf(operatingSystem, "%ls (Build %ld)", osvi.szCSDVersion, + osvi.dwBuildNumber & 0xFFFF); + this->OSVersion = operatingSystem; + } else +# endif // VER_NT_WORKSTATION + { + HKEY hKey; + wchar_t szProductType[80]; + DWORD dwBufLen; + + // Query the registry to retrieve information. + RegOpenKeyExW(HKEY_LOCAL_MACHINE, + L"SYSTEM\\CurrentControlSet\\Control\\ProductOptions", 0, + KEY_QUERY_VALUE, &hKey); + RegQueryValueExW(hKey, L"ProductType", nullptr, nullptr, + (LPBYTE)szProductType, &dwBufLen); + RegCloseKey(hKey); + + if (lstrcmpiW(L"WINNT", szProductType) == 0) { + this->OSRelease += " Professional"; + } + if (lstrcmpiW(L"LANMANNT", szProductType) == 0) { + // Decide between Windows 2000 Advanced Server and Windows .NET + // Enterprise Server. + if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 1) { + this->OSRelease += " Standard Server"; + } else { + this->OSRelease += " Server"; + } + } + if (lstrcmpiW(L"SERVERNT", szProductType) == 0) { + // Decide between Windows 2000 Advanced Server and Windows .NET + // Enterprise Server. + if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 1) { + this->OSRelease += " Enterprise Server"; + } else { + this->OSRelease += " Advanced Server"; + } + } + } + + // Display version, service pack (if any), and build number. + if (osvi.dwMajorVersion <= 4) { + // NB: NT 4.0 and earlier. + sprintf(operatingSystem, "version %ld.%ld %ls (Build %ld)", + osvi.dwMajorVersion, osvi.dwMinorVersion, osvi.szCSDVersion, + osvi.dwBuildNumber & 0xFFFF); + this->OSVersion = operatingSystem; + } else if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 1) { + // Windows XP and .NET server. + typedef BOOL(CALLBACK * LPFNPROC)(HANDLE, BOOL*); + HINSTANCE hKernelDLL; + LPFNPROC DLLProc; + + // Load the Kernel32 DLL. + hKernelDLL = LoadLibraryW(L"kernel32"); + if (hKernelDLL != nullptr) { + // Only XP and .NET Server support IsWOW64Process so... Load + // dynamically! + DLLProc = (LPFNPROC)GetProcAddress(hKernelDLL, "IsWow64Process"); + + // If the function address is valid, call the function. + if (DLLProc != nullptr) + (DLLProc)(GetCurrentProcess(), &bIsWindows64Bit); + else + bIsWindows64Bit = false; + + // Free the DLL module. + FreeLibrary(hKernelDLL); + } + } else { + // Windows 2000 and everything else. + sprintf(operatingSystem, "%ls (Build %ld)", osvi.szCSDVersion, + osvi.dwBuildNumber & 0xFFFF); + this->OSVersion = operatingSystem; + } + break; + + case VER_PLATFORM_WIN32_WINDOWS: + // Test for the product. + if (osvi.dwMajorVersion == 4 && osvi.dwMinorVersion == 0) { + this->OSRelease = "95"; + if (osvi.szCSDVersion[1] == 'C') { + this->OSRelease += "OSR 2.5"; + } else if (osvi.szCSDVersion[1] == 'B') { + this->OSRelease += "OSR 2"; + } + } + + if (osvi.dwMajorVersion == 4 && osvi.dwMinorVersion == 10) { + this->OSRelease = "98"; + if (osvi.szCSDVersion[1] == 'A') { + this->OSRelease += "SE"; + } + } + + if (osvi.dwMajorVersion == 4 && osvi.dwMinorVersion == 90) { + this->OSRelease = "Me"; + } + break; + + case VER_PLATFORM_WIN32s: + this->OSRelease = "Win32s"; + break; + + default: + this->OSRelease = "Unknown"; + break; + } + + // Get the hostname + WORD wVersionRequested; + WSADATA wsaData; + char name[255]; + wVersionRequested = MAKEWORD(2, 0); + + if (WSAStartup(wVersionRequested, &wsaData) == 0) { + gethostname(name, sizeof(name)); + WSACleanup(); + } + this->Hostname = name; + + const char* arch = getenv("PROCESSOR_ARCHITECTURE"); + const char* wow64 = getenv("PROCESSOR_ARCHITEW6432"); + if (arch) { + this->OSPlatform = arch; + } + + if (wow64) { + // the PROCESSOR_ARCHITEW6432 is only defined when running 32bit programs + // on 64bit OS + this->OSIs64Bit = true; + } else if (arch) { + // all values other than x86 map to 64bit architectures + this->OSIs64Bit = (strncmp(arch, "x86", 3) != 0); + } + +#else + + struct utsname unameInfo; + int errorFlag = uname(&unameInfo); + if (errorFlag == 0) { + this->OSName = unameInfo.sysname; + this->Hostname = unameInfo.nodename; + this->OSRelease = unameInfo.release; + this->OSVersion = unameInfo.version; + this->OSPlatform = unameInfo.machine; + + // This is still insufficient to capture 64bit architecture such + // powerpc and possible mips and sparc + if (this->OSPlatform.find_first_of("64") != std::string::npos) { + this->OSIs64Bit = true; + } + } + +# ifdef __APPLE__ + this->OSName = "Unknown Apple OS"; + this->OSRelease = "Unknown product version"; + this->OSVersion = "Unknown build version"; + + this->CallSwVers("-productName", this->OSName); + this->CallSwVers("-productVersion", this->OSRelease); + this->CallSwVers("-buildVersion", this->OSVersion); +# endif + +#endif + + return true; +} + +int SystemInformationImplementation::CallSwVers(const char* arg, + std::string& ver) +{ +#ifdef __APPLE__ + std::vector args; + args.push_back("sw_vers"); + args.push_back(arg); + args.push_back(nullptr); + ver = this->RunProcess(args); + this->TrimNewline(ver); +#else + // avoid C4100 + (void)arg; + (void)ver; +#endif + return 0; +} + +void SystemInformationImplementation::TrimNewline(std::string& output) +{ + // remove \r + std::string::size_type pos = 0; + while ((pos = output.find("\r", pos)) != std::string::npos) { + output.erase(pos); + } + + // remove \n + pos = 0; + while ((pos = output.find("\n", pos)) != std::string::npos) { + output.erase(pos); + } +} + +/** Return true if the machine is 64 bits */ +bool SystemInformationImplementation::Is64Bits() +{ + return this->OSIs64Bit; +} +} diff --git a/test/API/driver/kwsys/SystemInformation.hxx.in b/test/API/driver/kwsys/SystemInformation.hxx.in new file mode 100644 index 00000000000..fc42e9dc72d --- /dev/null +++ b/test/API/driver/kwsys/SystemInformation.hxx.in @@ -0,0 +1,170 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifndef @KWSYS_NAMESPACE@_SystemInformation_h +#define @KWSYS_NAMESPACE@_SystemInformation_h + +#include <@KWSYS_NAMESPACE@/Configure.hxx> + +#include /* size_t */ +#include + +namespace @KWSYS_NAMESPACE@ { + +// forward declare the implementation class +class SystemInformationImplementation; + +class @KWSYS_NAMESPACE@_EXPORT SystemInformation +{ +#if @KWSYS_USE_LONG_LONG@ + typedef long long LongLong; +#elif @KWSYS_USE___INT64@ + typedef __int64 LongLong; +#else +# error "No Long Long" +#endif + friend class SystemInformationImplementation; + SystemInformationImplementation* Implementation; + +public: + // possible parameter values for DoesCPUSupportFeature() + static const long int CPU_FEATURE_MMX = 1 << 0; + static const long int CPU_FEATURE_MMX_PLUS = 1 << 1; + static const long int CPU_FEATURE_SSE = 1 << 2; + static const long int CPU_FEATURE_SSE2 = 1 << 3; + static const long int CPU_FEATURE_AMD_3DNOW = 1 << 4; + static const long int CPU_FEATURE_AMD_3DNOW_PLUS = 1 << 5; + static const long int CPU_FEATURE_IA64 = 1 << 6; + static const long int CPU_FEATURE_MP_CAPABLE = 1 << 7; + static const long int CPU_FEATURE_HYPERTHREAD = 1 << 8; + static const long int CPU_FEATURE_SERIALNUMBER = 1 << 9; + static const long int CPU_FEATURE_APIC = 1 << 10; + static const long int CPU_FEATURE_SSE_FP = 1 << 11; + static const long int CPU_FEATURE_SSE_MMX = 1 << 12; + static const long int CPU_FEATURE_CMOV = 1 << 13; + static const long int CPU_FEATURE_MTRR = 1 << 14; + static const long int CPU_FEATURE_L1CACHE = 1 << 15; + static const long int CPU_FEATURE_L2CACHE = 1 << 16; + static const long int CPU_FEATURE_L3CACHE = 1 << 17; + static const long int CPU_FEATURE_ACPI = 1 << 18; + static const long int CPU_FEATURE_THERMALMONITOR = 1 << 19; + static const long int CPU_FEATURE_TEMPSENSEDIODE = 1 << 20; + static const long int CPU_FEATURE_FREQUENCYID = 1 << 21; + static const long int CPU_FEATURE_VOLTAGEID_FREQUENCY = 1 << 22; + static const long int CPU_FEATURE_FPU = 1 << 23; + +public: + SystemInformation(); + ~SystemInformation(); + + SystemInformation(const SystemInformation&) = delete; + SystemInformation& operator=(const SystemInformation&) = delete; + + const char* GetVendorString(); + const char* GetVendorID(); + std::string GetTypeID(); + std::string GetFamilyID(); + std::string GetModelID(); + std::string GetModelName(); + std::string GetSteppingCode(); + const char* GetExtendedProcessorName(); + const char* GetProcessorSerialNumber(); + int GetProcessorCacheSize(); + unsigned int GetLogicalProcessorsPerPhysical(); + float GetProcessorClockFrequency(); + int GetProcessorAPICID(); + int GetProcessorCacheXSize(long int); + bool DoesCPUSupportFeature(long int); + + // returns an informative general description of the cpu + // on this system. + std::string GetCPUDescription(); + + const char* GetHostname(); + std::string GetFullyQualifiedDomainName(); + + const char* GetOSName(); + const char* GetOSRelease(); + const char* GetOSVersion(); + const char* GetOSPlatform(); + + int GetOSIsWindows(); + int GetOSIsLinux(); + int GetOSIsApple(); + + // returns an informative general description of the os + // on this system. + std::string GetOSDescription(); + + // returns if the operating system is 64bit or not. + bool Is64Bits(); + + unsigned int GetNumberOfLogicalCPU(); + unsigned int GetNumberOfPhysicalCPU(); + + bool DoesCPUSupportCPUID(); + + // Retrieve id of the current running process + LongLong GetProcessId(); + + // Retrieve memory information in MiB. + size_t GetTotalVirtualMemory(); + size_t GetAvailableVirtualMemory(); + size_t GetTotalPhysicalMemory(); + size_t GetAvailablePhysicalMemory(); + + // returns an informative general description if the installed and + // available ram on this system. See the GetHostMemoryTotal, and + // Get{Host,Proc}MemoryAvailable methods for more information. + std::string GetMemoryDescription(const char* hostLimitEnvVarName = nullptr, + const char* procLimitEnvVarName = nullptr); + + // Retrieve amount of physical memory installed on the system in KiB + // units. + LongLong GetHostMemoryTotal(); + + // Get total system RAM in units of KiB available colectivley to all + // processes in a process group. An example of a process group + // are the processes comprising an mpi program which is running in + // parallel. The amount of memory reported may differ from the host + // total if a host wide resource limit is applied. Such reource limits + // are reported to us via an application specified environment variable. + LongLong GetHostMemoryAvailable(const char* hostLimitEnvVarName = nullptr); + + // Get total system RAM in units of KiB available to this process. + // This may differ from the host available if a per-process resource + // limit is applied. per-process memory limits are applied on unix + // system via rlimit API. Resource limits that are not imposed via + // rlimit API may be reported to us via an application specified + // environment variable. + LongLong GetProcMemoryAvailable(const char* hostLimitEnvVarName = nullptr, + const char* procLimitEnvVarName = nullptr); + + // Get the system RAM used by all processes on the host, in units of KiB. + LongLong GetHostMemoryUsed(); + + // Get system RAM used by this process id in units of KiB. + LongLong GetProcMemoryUsed(); + + // Return the load average of the machine or -0.0 if it cannot + // be determined. + double GetLoadAverage(); + + // enable/disable stack trace signal handler. In order to + // produce an informative stack trace the application should + // be dynamically linked and compiled with debug symbols. + static void SetStackTraceOnError(int enable); + + // format and return the current program stack in a string. In + // order to produce an informative stack trace the application + // should be dynamically linked and compiled with debug symbols. + static std::string GetProgramStack(int firstFrame, int wholePath); + + /** Run the different checks */ + void RunCPUCheck(); + void RunOSCheck(); + void RunMemoryCheck(); +}; + +} // namespace @KWSYS_NAMESPACE@ + +#endif diff --git a/test/API/driver/kwsys/SystemTools.cxx b/test/API/driver/kwsys/SystemTools.cxx new file mode 100644 index 00000000000..ce4d6ef9505 --- /dev/null +++ b/test/API/driver/kwsys/SystemTools.cxx @@ -0,0 +1,4703 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifdef __osf__ +# define _OSF_SOURCE +# define _POSIX_C_SOURCE 199506L +# define _XOPEN_SOURCE_EXTENDED +#endif + +#if defined(_WIN32) && \ + (defined(_MSC_VER) || defined(__WATCOMC__) || defined(__BORLANDC__) || \ + defined(__MINGW32__)) +# define KWSYS_WINDOWS_DIRS +#else +# if defined(__SUNPRO_CC) +# include +# endif +#endif + +#include "kwsysPrivate.h" +#include KWSYS_HEADER(RegularExpression.hxx) +#include KWSYS_HEADER(SystemTools.hxx) +#include KWSYS_HEADER(Directory.hxx) +#include KWSYS_HEADER(FStream.hxx) +#include KWSYS_HEADER(Encoding.h) +#include KWSYS_HEADER(Encoding.hxx) + +#include +#include +#include +#include +#include +#include + +// Work-around CMake dependency scanning limitation. This must +// duplicate the above list of headers. +#if 0 +# include "Directory.hxx.in" +# include "Encoding.hxx.in" +# include "FStream.hxx.in" +# include "RegularExpression.hxx.in" +# include "SystemTools.hxx.in" +#endif + +#ifdef _MSC_VER +# pragma warning(disable : 4786) +#endif + +#if defined(__sgi) && !defined(__GNUC__) +# pragma set woff 1375 /* base class destructor not virtual */ +#endif + +#include +#include +#ifdef __QNX__ +# include /* for malloc/free on QNX */ +#endif +#include +#include +#include +#include + +#if defined(_WIN32) && !defined(_MSC_VER) && defined(__GNUC__) +# include /* for strcasecmp */ +#endif + +#ifdef _MSC_VER +# define umask _umask // Note this is still umask on Borland +#endif + +// support for realpath call +#ifndef _WIN32 +# include +# include +# include +# include +# include +# include +# include +# ifndef __VMS +# include +# include +# endif +# include /* sigprocmask */ +#endif + +#ifdef __linux +# include +#endif + +// Windows API. +#if defined(_WIN32) +# include +# include +# ifndef INVALID_FILE_ATTRIBUTES +# define INVALID_FILE_ATTRIBUTES ((DWORD)-1) +# endif +# if defined(_MSC_VER) && _MSC_VER >= 1800 +# define KWSYS_WINDOWS_DEPRECATED_GetVersionEx +# endif +#elif defined(__CYGWIN__) +# include +# undef _WIN32 +#endif + +#if !KWSYS_CXX_HAS_ENVIRON_IN_STDLIB_H +extern char** environ; +#endif + +#ifdef __CYGWIN__ +# include +#endif + +// getpwnam doesn't exist on Windows and Cray Xt3/Catamount +// same for TIOCGWINSZ +#if defined(_WIN32) || defined(__LIBCATAMOUNT__) || \ + (defined(HAVE_GETPWNAM) && HAVE_GETPWNAM == 0) +# undef HAVE_GETPWNAM +# undef HAVE_TTY_INFO +#else +# define HAVE_GETPWNAM 1 +# define HAVE_TTY_INFO 1 +#endif + +#define VTK_URL_PROTOCOL_REGEX "([a-zA-Z0-9]*)://(.*)" +#define VTK_URL_REGEX \ + "([a-zA-Z0-9]*)://(([A-Za-z0-9]+)(:([^:@]+))?@)?([^:@/]+)(:([0-9]+))?/" \ + "(.+)?" + +#ifdef _MSC_VER +# include +#else +# include +#endif + +// This is a hack to prevent warnings about these functions being +// declared but not referenced. +#if defined(__sgi) && !defined(__GNUC__) +# include +namespace KWSYS_NAMESPACE { +class SystemToolsHack +{ +public: + enum + { + Ref1 = sizeof(cfgetospeed(0)), + Ref2 = sizeof(cfgetispeed(0)), + Ref3 = sizeof(tcgetattr(0, 0)), + Ref4 = sizeof(tcsetattr(0, 0, 0)), + Ref5 = sizeof(cfsetospeed(0, 0)), + Ref6 = sizeof(cfsetispeed(0, 0)) + }; +}; +} +#endif + +#if defined(_WIN32) && \ + (defined(_MSC_VER) || defined(__WATCOMC__) || defined(__BORLANDC__) || \ + defined(__MINGW32__)) +# include +# include +# define _unlink unlink +#endif + +/* The maximum length of a file name. */ +#if defined(PATH_MAX) +# define KWSYS_SYSTEMTOOLS_MAXPATH PATH_MAX +#elif defined(MAXPATHLEN) +# define KWSYS_SYSTEMTOOLS_MAXPATH MAXPATHLEN +#else +# define KWSYS_SYSTEMTOOLS_MAXPATH 16384 +#endif +#if defined(__WATCOMC__) +# include +# define _mkdir mkdir +# define _rmdir rmdir +# define _getcwd getcwd +# define _chdir chdir +#endif + +#if defined(__BEOS__) && !defined(__ZETA__) +# include +# include + +// BeOS 5 doesn't have usleep(), but it has snooze(), which is identical. +static inline void usleep(unsigned int msec) +{ + ::snooze(msec); +} + +// BeOS 5 also doesn't have realpath(), but its C++ API offers something close. +static inline char* realpath(const char* path, char* resolved_path) +{ + const size_t maxlen = KWSYS_SYSTEMTOOLS_MAXPATH; + snprintf(resolved_path, maxlen, "%s", path); + BPath normalized(resolved_path, nullptr, true); + const char* resolved = normalized.Path(); + if (resolved != nullptr) // nullptr == No such file. + { + if (snprintf(resolved_path, maxlen, "%s", resolved) < maxlen) { + return resolved_path; + } + } + return nullptr; // something went wrong. +} +#endif + +#ifdef _WIN32 +static time_t windows_filetime_to_posix_time(const FILETIME& ft) +{ + LARGE_INTEGER date; + date.HighPart = ft.dwHighDateTime; + date.LowPart = ft.dwLowDateTime; + + // removes the diff between 1970 and 1601 + date.QuadPart -= ((LONGLONG)(369 * 365 + 89) * 24 * 3600 * 10000000); + + // converts back from 100-nanoseconds to seconds + return date.QuadPart / 10000000; +} +#endif + +#ifdef KWSYS_WINDOWS_DIRS +# include + +inline int Mkdir(const std::string& dir) +{ + return _wmkdir( + KWSYS_NAMESPACE::Encoding::ToWindowsExtendedPath(dir).c_str()); +} +inline int Rmdir(const std::string& dir) +{ + return _wrmdir( + KWSYS_NAMESPACE::Encoding::ToWindowsExtendedPath(dir).c_str()); +} +inline const char* Getcwd(char* buf, unsigned int len) +{ + std::vector w_buf(len); + if (_wgetcwd(&w_buf[0], len)) { + size_t nlen = kwsysEncoding_wcstombs(buf, &w_buf[0], len); + if (nlen == static_cast(-1)) { + return 0; + } + if (nlen < len) { + // make sure the drive letter is capital + if (nlen > 1 && buf[1] == ':') { + buf[0] = toupper(buf[0]); + } + return buf; + } + } + return 0; +} +inline int Chdir(const std::string& dir) +{ +# if defined(__BORLANDC__) + return chdir(dir.c_str()); +# else + return _wchdir(KWSYS_NAMESPACE::Encoding::ToWide(dir).c_str()); +# endif +} +inline void Realpath(const std::string& path, std::string& resolved_path, + std::string* errorMessage = 0) +{ + std::wstring tmp = KWSYS_NAMESPACE::Encoding::ToWide(path); + wchar_t* ptemp; + wchar_t fullpath[MAX_PATH]; + DWORD bufferLen = GetFullPathNameW( + tmp.c_str(), sizeof(fullpath) / sizeof(fullpath[0]), fullpath, &ptemp); + if (bufferLen < sizeof(fullpath) / sizeof(fullpath[0])) { + resolved_path = KWSYS_NAMESPACE::Encoding::ToNarrow(fullpath); + KWSYS_NAMESPACE::SystemTools::ConvertToUnixSlashes(resolved_path); + } else if (errorMessage) { + if (bufferLen) { + *errorMessage = "Destination path buffer size too small."; + } else if (unsigned int errorId = GetLastError()) { + LPSTR message = nullptr; + DWORD size = FormatMessageA( + FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, + nullptr, errorId, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), + (LPSTR)&message, 0, nullptr); + *errorMessage = std::string(message, size); + LocalFree(message); + } else { + *errorMessage = "Unknown error."; + } + + resolved_path = ""; + } else { + resolved_path = path; + } +} +#else +# include + +# include +# include +inline int Mkdir(const std::string& dir) +{ + return mkdir(dir.c_str(), 00777); +} +inline int Rmdir(const std::string& dir) +{ + return rmdir(dir.c_str()); +} +inline const char* Getcwd(char* buf, unsigned int len) +{ + return getcwd(buf, len); +} + +inline int Chdir(const std::string& dir) +{ + return chdir(dir.c_str()); +} +inline void Realpath(const std::string& path, std::string& resolved_path, + std::string* errorMessage = nullptr) +{ + char resolved_name[KWSYS_SYSTEMTOOLS_MAXPATH]; + + errno = 0; + char* ret = realpath(path.c_str(), resolved_name); + if (ret) { + resolved_path = ret; + } else if (errorMessage) { + if (errno) { + *errorMessage = strerror(errno); + } else { + *errorMessage = "Unknown error."; + } + + resolved_path = ""; + } else { + // if path resolution fails, return what was passed in + resolved_path = path; + } +} +#endif + +#if !defined(_WIN32) && defined(__COMO__) +// Hack for como strict mode to avoid defining _SVID_SOURCE or _BSD_SOURCE. +extern "C" { +extern FILE* popen(__const char* __command, __const char* __modes) __THROW; +extern int pclose(FILE* __stream) __THROW; +extern char* realpath(__const char* __restrict __name, + char* __restrict __resolved) __THROW; +extern char* strdup(__const char* __s) __THROW; +extern int putenv(char* __string) __THROW; +} +#endif + +namespace KWSYS_NAMESPACE { + +double SystemTools::GetTime(void) +{ +#if defined(_WIN32) && !defined(__CYGWIN__) + FILETIME ft; + GetSystemTimeAsFileTime(&ft); + return (429.4967296 * ft.dwHighDateTime + 0.0000001 * ft.dwLowDateTime - + 11644473600.0); +#else + struct timeval t; + gettimeofday(&t, nullptr); + return 1.0 * double(t.tv_sec) + 0.000001 * double(t.tv_usec); +#endif +} + +/* Type of character storing the environment. */ +#if defined(_WIN32) +typedef wchar_t envchar; +#else +typedef char envchar; +#endif + +/* Order by environment key only (VAR from VAR=VALUE). */ +struct kwsysEnvCompare +{ + bool operator()(const envchar* l, const envchar* r) const + { +#if defined(_WIN32) + const wchar_t* leq = wcschr(l, L'='); + const wchar_t* req = wcschr(r, L'='); + size_t llen = leq ? (leq - l) : wcslen(l); + size_t rlen = req ? (req - r) : wcslen(r); + if (llen == rlen) { + return wcsncmp(l, r, llen) < 0; + } else { + return wcscmp(l, r) < 0; + } +#else + const char* leq = strchr(l, '='); + const char* req = strchr(r, '='); + size_t llen = leq ? static_cast(leq - l) : strlen(l); + size_t rlen = req ? static_cast(req - r) : strlen(r); + if (llen == rlen) { + return strncmp(l, r, llen) < 0; + } else { + return strcmp(l, r) < 0; + } +#endif + } +}; + +class kwsysEnvSet : public std::set +{ +public: + class Free + { + const envchar* Env; + + public: + Free(const envchar* env) + : Env(env) + { + } + ~Free() { free(const_cast(this->Env)); } + + Free(const Free&) = delete; + Free& operator=(const Free&) = delete; + }; + + const envchar* Release(const envchar* env) + { + const envchar* old = nullptr; + iterator i = this->find(env); + if (i != this->end()) { + old = *i; + this->erase(i); + } + return old; + } +}; + +#ifdef _WIN32 +struct SystemToolsPathCaseCmp +{ + bool operator()(std::string const& l, std::string const& r) const + { +# ifdef _MSC_VER + return _stricmp(l.c_str(), r.c_str()) < 0; +# elif defined(__GNUC__) + return strcasecmp(l.c_str(), r.c_str()) < 0; +# else + return SystemTools::Strucmp(l.c_str(), r.c_str()) < 0; +# endif + } +}; +#endif + +/** + * SystemTools static variables singleton class. + */ +class SystemToolsStatic +{ +public: + typedef std::map StringMap; +#if KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP + /** + * Path translation table from dir to refdir + * Each time 'dir' will be found it will be replace by 'refdir' + */ + StringMap TranslationMap; +#endif +#ifdef _WIN32 + static std::string GetCasePathName(std::string const& pathIn); + static std::string GetActualCaseForPathCached(std::string const& path); + static const char* GetEnvBuffered(const char* key); + std::map PathCaseMap; + std::map EnvMap; +#endif +#ifdef __CYGWIN__ + StringMap Cyg2Win32Map; +#endif + + /** + * Actual implementation of ReplaceString. + */ + static void ReplaceString(std::string& source, const char* replace, + size_t replaceSize, const std::string& with); + + /** + * Actual implementation of FileIsFullPath. + */ + static bool FileIsFullPath(const char*, size_t); + + /** + * Find a filename (file or directory) in the system PATH, with + * optional extra paths. + */ + static std::string FindName( + const std::string& name, + const std::vector& path = std::vector(), + bool no_system_path = false); +}; + +#ifdef _WIN32 +std::string SystemToolsStatic::GetCasePathName(std::string const& pathIn) +{ + std::string casePath; + + // First check if the file is relative. We don't fix relative paths since the + // real case depends on the root directory and the given path fragment may + // have meaning elsewhere in the project. + if (!SystemTools::FileIsFullPath(pathIn)) { + // This looks unnecessary, but it allows for the return value optimization + // since all return paths return the same local variable. + casePath = pathIn; + return casePath; + } + + std::vector path_components; + SystemTools::SplitPath(pathIn, path_components); + + // Start with root component. + std::vector::size_type idx = 0; + casePath = path_components[idx++]; + // make sure drive letter is always upper case + if (casePath.size() > 1 && casePath[1] == ':') { + casePath[0] = toupper(casePath[0]); + } + const char* sep = ""; + + // If network path, fill casePath with server/share so FindFirstFile + // will work after that. Maybe someday call other APIs to get + // actual case of servers and shares. + if (path_components.size() > 2 && path_components[0] == "//") { + casePath += path_components[idx++]; + casePath += "/"; + casePath += path_components[idx++]; + sep = "/"; + } + + // Convert case of all components that exist. + bool converting = true; + for (; idx < path_components.size(); idx++) { + casePath += sep; + sep = "/"; + + if (converting) { + // If path component contains wildcards, we skip matching + // because these filenames are not allowed on windows, + // and we do not want to match a different file. + if (path_components[idx].find('*') != std::string::npos || + path_components[idx].find('?') != std::string::npos) { + converting = false; + } else { + std::string test_str = casePath; + test_str += path_components[idx]; + WIN32_FIND_DATAW findData; + HANDLE hFind = + ::FindFirstFileW(Encoding::ToWide(test_str).c_str(), &findData); + if (INVALID_HANDLE_VALUE != hFind) { + path_components[idx] = Encoding::ToNarrow(findData.cFileName); + ::FindClose(hFind); + } else { + converting = false; + } + } + } + + casePath += path_components[idx]; + } + return casePath; +} + +std::string SystemToolsStatic::GetActualCaseForPathCached(std::string const& p) +{ + // Check to see if actual case has already been called + // for this path, and the result is stored in the PathCaseMap + auto& pcm = SystemTools::Statics->PathCaseMap; + { + auto itr = pcm.find(p); + if (itr != pcm.end()) { + return itr->second; + } + } + std::string casePath = SystemToolsStatic::GetCasePathName(p); + if (casePath.size() <= MAX_PATH) { + pcm[p] = casePath; + } + return casePath; +} +#endif + +// adds the elements of the env variable path to the arg passed in +void SystemTools::GetPath(std::vector& path, const char* env) +{ + size_t const old_size = path.size(); +#if defined(_WIN32) && !defined(__CYGWIN__) + const char pathSep = ';'; +#else + const char pathSep = ':'; +#endif + if (!env) { + env = "PATH"; + } + std::string pathEnv; + if (!SystemTools::GetEnv(env, pathEnv)) { + return; + } + + // A hack to make the below algorithm work. + if (!pathEnv.empty() && pathEnv.back() != pathSep) { + pathEnv += pathSep; + } + std::string::size_type start = 0; + bool done = false; + while (!done) { + std::string::size_type endpos = pathEnv.find(pathSep, start); + if (endpos != std::string::npos) { + path.push_back(pathEnv.substr(start, endpos - start)); + start = endpos + 1; + } else { + done = true; + } + } + for (std::vector::iterator i = path.begin() + old_size; + i != path.end(); ++i) { + SystemTools::ConvertToUnixSlashes(*i); + } +} + +#if defined(_WIN32) +const char* SystemToolsStatic::GetEnvBuffered(const char* key) +{ + std::string env; + if (SystemTools::GetEnv(key, env)) { + std::string& menv = SystemTools::Statics->EnvMap[key]; + if (menv != env) { + menv = std::move(env); + } + return menv.c_str(); + } + return nullptr; +} +#endif + +const char* SystemTools::GetEnv(const char* key) +{ +#if defined(_WIN32) + return SystemToolsStatic::GetEnvBuffered(key); +#else + return getenv(key); +#endif +} + +const char* SystemTools::GetEnv(const std::string& key) +{ +#if defined(_WIN32) + return SystemToolsStatic::GetEnvBuffered(key.c_str()); +#else + return getenv(key.c_str()); +#endif +} + +bool SystemTools::GetEnv(const char* key, std::string& result) +{ +#if defined(_WIN32) + const std::wstring wkey = Encoding::ToWide(key); + const wchar_t* wv = _wgetenv(wkey.c_str()); + if (wv) { + result = Encoding::ToNarrow(wv); + return true; + } +#else + const char* v = getenv(key); + if (v) { + result = v; + return true; + } +#endif + return false; +} + +bool SystemTools::GetEnv(const std::string& key, std::string& result) +{ + return SystemTools::GetEnv(key.c_str(), result); +} + +bool SystemTools::HasEnv(const char* key) +{ +#if defined(_WIN32) + const std::wstring wkey = Encoding::ToWide(key); + const wchar_t* v = _wgetenv(wkey.c_str()); +#else + const char* v = getenv(key); +#endif + return v != nullptr; +} + +bool SystemTools::HasEnv(const std::string& key) +{ + return SystemTools::HasEnv(key.c_str()); +} + +#if KWSYS_CXX_HAS_UNSETENV +/* unsetenv("A") removes A from the environment. + On older platforms it returns void instead of int. */ +static int kwsysUnPutEnv(const std::string& env) +{ + size_t pos = env.find('='); + if (pos != std::string::npos) { + std::string name = env.substr(0, pos); + unsetenv(name.c_str()); + } else { + unsetenv(env.c_str()); + } + return 0; +} + +#elif defined(__CYGWIN__) || defined(__GLIBC__) +/* putenv("A") removes A from the environment. It must not put the + memory in the environment because it does not have any "=" syntax. */ +static int kwsysUnPutEnv(const std::string& env) +{ + int err = 0; + size_t pos = env.find('='); + size_t const len = pos == std::string::npos ? env.size() : pos; + size_t const sz = len + 1; + char local_buf[256]; + char* buf = sz > sizeof(local_buf) ? (char*)malloc(sz) : local_buf; + if (!buf) { + return -1; + } + strncpy(buf, env.c_str(), len); + buf[len] = 0; + if (putenv(buf) < 0 && errno != EINVAL) { + err = errno; + } + if (buf != local_buf) { + free(buf); + } + if (err) { + errno = err; + return -1; + } + return 0; +} + +#elif defined(_WIN32) +/* putenv("A=") places "A=" in the environment, which is as close to + removal as we can get with the putenv API. We have to leak the + most recent value placed in the environment for each variable name + on program exit in case exit routines access it. */ + +static kwsysEnvSet kwsysUnPutEnvSet; + +static int kwsysUnPutEnv(std::string const& env) +{ + std::wstring wEnv = Encoding::ToWide(env); + size_t const pos = wEnv.find('='); + size_t const len = pos == std::string::npos ? wEnv.size() : pos; + wEnv.resize(len + 1, L'='); + wchar_t* newEnv = _wcsdup(wEnv.c_str()); + if (!newEnv) { + return -1; + } + kwsysEnvSet::Free oldEnv(kwsysUnPutEnvSet.Release(newEnv)); + kwsysUnPutEnvSet.insert(newEnv); + return _wputenv(newEnv); +} + +#else +/* Manipulate the "environ" global directly. */ +static int kwsysUnPutEnv(const std::string& env) +{ + size_t pos = env.find('='); + size_t const len = pos == std::string::npos ? env.size() : pos; + int in = 0; + int out = 0; + while (environ[in]) { + if (strlen(environ[in]) > len && environ[in][len] == '=' && + strncmp(env.c_str(), environ[in], len) == 0) { + ++in; + } else { + environ[out++] = environ[in++]; + } + } + while (out < in) { + environ[out++] = 0; + } + return 0; +} +#endif + +#if KWSYS_CXX_HAS_SETENV + +/* setenv("A", "B", 1) will set A=B in the environment and makes its + own copies of the strings. */ +bool SystemTools::PutEnv(const std::string& env) +{ + size_t pos = env.find('='); + if (pos != std::string::npos) { + std::string name = env.substr(0, pos); + return setenv(name.c_str(), env.c_str() + pos + 1, 1) == 0; + } else { + return kwsysUnPutEnv(env) == 0; + } +} + +bool SystemTools::UnPutEnv(const std::string& env) +{ + return kwsysUnPutEnv(env) == 0; +} + +#else + +/* putenv("A=B") will set A=B in the environment. Most putenv implementations + put their argument directly in the environment. They never free the memory + on program exit. Keep an active set of pointers to memory we allocate and + pass to putenv, one per environment key. At program exit remove any + environment values that may still reference memory we allocated. Then free + the memory. This will not affect any environment values we never set. */ + +# ifdef __INTEL_COMPILER +# pragma warning disable 444 /* base has non-virtual destructor */ +# endif + +class kwsysEnv : public kwsysEnvSet +{ +public: + ~kwsysEnv() + { + for (iterator i = this->begin(); i != this->end(); ++i) { +# if defined(_WIN32) + const std::string s = Encoding::ToNarrow(*i); + kwsysUnPutEnv(s); +# else + kwsysUnPutEnv(*i); +# endif + free(const_cast(*i)); + } + } + bool Put(const char* env) + { +# if defined(_WIN32) + const std::wstring wEnv = Encoding::ToWide(env); + wchar_t* newEnv = _wcsdup(wEnv.c_str()); +# else + char* newEnv = strdup(env); +# endif + Free oldEnv(this->Release(newEnv)); + this->insert(newEnv); +# if defined(_WIN32) + return _wputenv(newEnv) == 0; +# else + return putenv(newEnv) == 0; +# endif + } + bool UnPut(const char* env) + { +# if defined(_WIN32) + const std::wstring wEnv = Encoding::ToWide(env); + Free oldEnv(this->Release(wEnv.c_str())); +# else + Free oldEnv(this->Release(env)); +# endif + return kwsysUnPutEnv(env) == 0; + } +}; + +static kwsysEnv kwsysEnvInstance; + +bool SystemTools::PutEnv(const std::string& env) +{ + return kwsysEnvInstance.Put(env.c_str()); +} + +bool SystemTools::UnPutEnv(const std::string& env) +{ + return kwsysEnvInstance.UnPut(env.c_str()); +} + +#endif + +const char* SystemTools::GetExecutableExtension() +{ +#if defined(_WIN32) || defined(__CYGWIN__) || defined(__VMS) + return ".exe"; +#else + return ""; +#endif +} + +FILE* SystemTools::Fopen(const std::string& file, const char* mode) +{ +#ifdef _WIN32 + return _wfopen(Encoding::ToWindowsExtendedPath(file).c_str(), + Encoding::ToWide(mode).c_str()); +#else + return fopen(file.c_str(), mode); +#endif +} + +bool SystemTools::MakeDirectory(const char* path, const mode_t* mode) +{ + if (!path) { + return false; + } + return SystemTools::MakeDirectory(std::string(path), mode); +} + +bool SystemTools::MakeDirectory(const std::string& path, const mode_t* mode) +{ + if (SystemTools::PathExists(path)) { + return SystemTools::FileIsDirectory(path); + } + if (path.empty()) { + return false; + } + std::string dir = path; + SystemTools::ConvertToUnixSlashes(dir); + + std::string::size_type pos = 0; + std::string topdir; + while ((pos = dir.find('/', pos)) != std::string::npos) { + topdir = dir.substr(0, pos); + + if (Mkdir(topdir) == 0 && mode != nullptr) { + SystemTools::SetPermissions(topdir, *mode); + } + + ++pos; + } + topdir = dir; + if (Mkdir(topdir) != 0) { + // There is a bug in the Borland Run time library which makes MKDIR + // return EACCES when it should return EEXISTS + // if it is some other error besides directory exists + // then return false + if ((errno != EEXIST) +#ifdef __BORLANDC__ + && (errno != EACCES) +#endif + ) { + return false; + } + } else if (mode != nullptr) { + SystemTools::SetPermissions(topdir, *mode); + } + + return true; +} + +// replace replace with with as many times as it shows up in source. +// write the result into source. +void SystemTools::ReplaceString(std::string& source, + const std::string& replace, + const std::string& with) +{ + // do while hangs if replaceSize is 0 + if (replace.empty()) { + return; + } + + SystemToolsStatic::ReplaceString(source, replace.c_str(), replace.size(), + with); +} + +void SystemTools::ReplaceString(std::string& source, const char* replace, + const char* with) +{ + // do while hangs if replaceSize is 0 + if (!*replace) { + return; + } + + SystemToolsStatic::ReplaceString(source, replace, strlen(replace), + with ? with : ""); +} + +void SystemToolsStatic::ReplaceString(std::string& source, const char* replace, + size_t replaceSize, + const std::string& with) +{ + const char* src = source.c_str(); + char* searchPos = const_cast(strstr(src, replace)); + + // get out quick if string is not found + if (!searchPos) { + return; + } + + // perform replacements until done + char* orig = strdup(src); + char* currentPos = orig; + searchPos = searchPos - src + orig; + + // initialize the result + source.erase(source.begin(), source.end()); + do { + *searchPos = '\0'; + source += currentPos; + currentPos = searchPos + replaceSize; + // replace + source += with; + searchPos = strstr(currentPos, replace); + } while (searchPos); + + // copy any trailing text + source += currentPos; + free(orig); +} + +#if defined(_WIN32) && !defined(__CYGWIN__) + +# if defined(KEY_WOW64_32KEY) && defined(KEY_WOW64_64KEY) +# define KWSYS_ST_KEY_WOW64_32KEY KEY_WOW64_32KEY +# define KWSYS_ST_KEY_WOW64_64KEY KEY_WOW64_64KEY +# else +# define KWSYS_ST_KEY_WOW64_32KEY 0x0200 +# define KWSYS_ST_KEY_WOW64_64KEY 0x0100 +# endif + +static bool SystemToolsParseRegistryKey(const std::string& key, + HKEY& primaryKey, std::string& second, + std::string& valuename) +{ + std::string primary = key; + + size_t start = primary.find('\\'); + if (start == std::string::npos) { + return false; + } + + size_t valuenamepos = primary.find(';'); + if (valuenamepos != std::string::npos) { + valuename = primary.substr(valuenamepos + 1); + } + + second = primary.substr(start + 1, valuenamepos - start - 1); + primary = primary.substr(0, start); + + if (primary == "HKEY_CURRENT_USER") { + primaryKey = HKEY_CURRENT_USER; + } + if (primary == "HKEY_CURRENT_CONFIG") { + primaryKey = HKEY_CURRENT_CONFIG; + } + if (primary == "HKEY_CLASSES_ROOT") { + primaryKey = HKEY_CLASSES_ROOT; + } + if (primary == "HKEY_LOCAL_MACHINE") { + primaryKey = HKEY_LOCAL_MACHINE; + } + if (primary == "HKEY_USERS") { + primaryKey = HKEY_USERS; + } + + return true; +} + +static DWORD SystemToolsMakeRegistryMode(DWORD mode, + SystemTools::KeyWOW64 view) +{ + // only add the modes when on a system that supports Wow64. + static FARPROC wow64p = + GetProcAddress(GetModuleHandleW(L"kernel32"), "IsWow64Process"); + if (wow64p == nullptr) { + return mode; + } + + if (view == SystemTools::KeyWOW64_32) { + return mode | KWSYS_ST_KEY_WOW64_32KEY; + } else if (view == SystemTools::KeyWOW64_64) { + return mode | KWSYS_ST_KEY_WOW64_64KEY; + } + return mode; +} +#endif + +#if defined(_WIN32) && !defined(__CYGWIN__) +bool SystemTools::GetRegistrySubKeys(const std::string& key, + std::vector& subkeys, + KeyWOW64 view) +{ + HKEY primaryKey = HKEY_CURRENT_USER; + std::string second; + std::string valuename; + if (!SystemToolsParseRegistryKey(key, primaryKey, second, valuename)) { + return false; + } + + HKEY hKey; + if (RegOpenKeyExW(primaryKey, Encoding::ToWide(second).c_str(), 0, + SystemToolsMakeRegistryMode(KEY_READ, view), + &hKey) != ERROR_SUCCESS) { + return false; + } else { + wchar_t name[1024]; + DWORD dwNameSize = sizeof(name) / sizeof(name[0]); + + DWORD i = 0; + while (RegEnumKeyW(hKey, i, name, dwNameSize) == ERROR_SUCCESS) { + subkeys.push_back(Encoding::ToNarrow(name)); + ++i; + } + + RegCloseKey(hKey); + } + + return true; +} +#else +bool SystemTools::GetRegistrySubKeys(const std::string&, + std::vector&, KeyWOW64) +{ + return false; +} +#endif + +// Read a registry value. +// Example : +// HKEY_LOCAL_MACHINE\SOFTWARE\Python\PythonCore\2.1\InstallPath +// => will return the data of the "default" value of the key +// HKEY_LOCAL_MACHINE\SOFTWARE\Scriptics\Tcl\8.4;Root +// => will return the data of the "Root" value of the key + +#if defined(_WIN32) && !defined(__CYGWIN__) +bool SystemTools::ReadRegistryValue(const std::string& key, std::string& value, + KeyWOW64 view) +{ + bool valueset = false; + HKEY primaryKey = HKEY_CURRENT_USER; + std::string second; + std::string valuename; + if (!SystemToolsParseRegistryKey(key, primaryKey, second, valuename)) { + return false; + } + + HKEY hKey; + if (RegOpenKeyExW(primaryKey, Encoding::ToWide(second).c_str(), 0, + SystemToolsMakeRegistryMode(KEY_READ, view), + &hKey) != ERROR_SUCCESS) { + return false; + } else { + DWORD dwType, dwSize; + dwSize = 1023; + wchar_t data[1024]; + if (RegQueryValueExW(hKey, Encoding::ToWide(valuename).c_str(), nullptr, + &dwType, (BYTE*)data, &dwSize) == ERROR_SUCCESS) { + if (dwType == REG_SZ) { + value = Encoding::ToNarrow(data); + valueset = true; + } else if (dwType == REG_EXPAND_SZ) { + wchar_t expanded[1024]; + DWORD dwExpandedSize = sizeof(expanded) / sizeof(expanded[0]); + if (ExpandEnvironmentStringsW(data, expanded, dwExpandedSize)) { + value = Encoding::ToNarrow(expanded); + valueset = true; + } + } + } + + RegCloseKey(hKey); + } + + return valueset; +} +#else +bool SystemTools::ReadRegistryValue(const std::string&, std::string&, KeyWOW64) +{ + return false; +} +#endif + +// Write a registry value. +// Example : +// HKEY_LOCAL_MACHINE\SOFTWARE\Python\PythonCore\2.1\InstallPath +// => will set the data of the "default" value of the key +// HKEY_LOCAL_MACHINE\SOFTWARE\Scriptics\Tcl\8.4;Root +// => will set the data of the "Root" value of the key + +#if defined(_WIN32) && !defined(__CYGWIN__) +bool SystemTools::WriteRegistryValue(const std::string& key, + const std::string& value, KeyWOW64 view) +{ + HKEY primaryKey = HKEY_CURRENT_USER; + std::string second; + std::string valuename; + if (!SystemToolsParseRegistryKey(key, primaryKey, second, valuename)) { + return false; + } + + HKEY hKey; + DWORD dwDummy; + wchar_t lpClass[] = L""; + if (RegCreateKeyExW(primaryKey, Encoding::ToWide(second).c_str(), 0, lpClass, + REG_OPTION_NON_VOLATILE, + SystemToolsMakeRegistryMode(KEY_WRITE, view), nullptr, + &hKey, &dwDummy) != ERROR_SUCCESS) { + return false; + } + + std::wstring wvalue = Encoding::ToWide(value); + if (RegSetValueExW(hKey, Encoding::ToWide(valuename).c_str(), 0, REG_SZ, + (CONST BYTE*)wvalue.c_str(), + (DWORD)(sizeof(wchar_t) * (wvalue.size() + 1))) == + ERROR_SUCCESS) { + return true; + } + return false; +} +#else +bool SystemTools::WriteRegistryValue(const std::string&, const std::string&, + KeyWOW64) +{ + return false; +} +#endif + +// Delete a registry value. +// Example : +// HKEY_LOCAL_MACHINE\SOFTWARE\Python\PythonCore\2.1\InstallPath +// => will delete the data of the "default" value of the key +// HKEY_LOCAL_MACHINE\SOFTWARE\Scriptics\Tcl\8.4;Root +// => will delete the data of the "Root" value of the key + +#if defined(_WIN32) && !defined(__CYGWIN__) +bool SystemTools::DeleteRegistryValue(const std::string& key, KeyWOW64 view) +{ + HKEY primaryKey = HKEY_CURRENT_USER; + std::string second; + std::string valuename; + if (!SystemToolsParseRegistryKey(key, primaryKey, second, valuename)) { + return false; + } + + HKEY hKey; + if (RegOpenKeyExW(primaryKey, Encoding::ToWide(second).c_str(), 0, + SystemToolsMakeRegistryMode(KEY_WRITE, view), + &hKey) != ERROR_SUCCESS) { + return false; + } else { + if (RegDeleteValue(hKey, (LPTSTR)valuename.c_str()) == ERROR_SUCCESS) { + RegCloseKey(hKey); + return true; + } + } + return false; +} +#else +bool SystemTools::DeleteRegistryValue(const std::string&, KeyWOW64) +{ + return false; +} +#endif + +bool SystemTools::SameFile(const std::string& file1, const std::string& file2) +{ +#ifdef _WIN32 + HANDLE hFile1, hFile2; + + hFile1 = + CreateFileW(Encoding::ToWide(file1).c_str(), GENERIC_READ, FILE_SHARE_READ, + nullptr, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, nullptr); + hFile2 = + CreateFileW(Encoding::ToWide(file2).c_str(), GENERIC_READ, FILE_SHARE_READ, + nullptr, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, nullptr); + if (hFile1 == INVALID_HANDLE_VALUE || hFile2 == INVALID_HANDLE_VALUE) { + if (hFile1 != INVALID_HANDLE_VALUE) { + CloseHandle(hFile1); + } + if (hFile2 != INVALID_HANDLE_VALUE) { + CloseHandle(hFile2); + } + return false; + } + + BY_HANDLE_FILE_INFORMATION fiBuf1; + BY_HANDLE_FILE_INFORMATION fiBuf2; + GetFileInformationByHandle(hFile1, &fiBuf1); + GetFileInformationByHandle(hFile2, &fiBuf2); + CloseHandle(hFile1); + CloseHandle(hFile2); + return (fiBuf1.dwVolumeSerialNumber == fiBuf2.dwVolumeSerialNumber && + fiBuf1.nFileIndexHigh == fiBuf2.nFileIndexHigh && + fiBuf1.nFileIndexLow == fiBuf2.nFileIndexLow); +#else + struct stat fileStat1, fileStat2; + if (stat(file1.c_str(), &fileStat1) == 0 && + stat(file2.c_str(), &fileStat2) == 0) { + // see if the files are the same file + // check the device inode and size + if (memcmp(&fileStat2.st_dev, &fileStat1.st_dev, + sizeof(fileStat1.st_dev)) == 0 && + memcmp(&fileStat2.st_ino, &fileStat1.st_ino, + sizeof(fileStat1.st_ino)) == 0 && + fileStat2.st_size == fileStat1.st_size) { + return true; + } + } + return false; +#endif +} + +bool SystemTools::PathExists(const std::string& path) +{ + if (path.empty()) { + return false; + } +#if defined(__CYGWIN__) + // Convert path to native windows path if possible. + char winpath[MAX_PATH]; + if (SystemTools::PathCygwinToWin32(path.c_str(), winpath)) { + return (GetFileAttributesA(winpath) != INVALID_FILE_ATTRIBUTES); + } + struct stat st; + return lstat(path.c_str(), &st) == 0; +#elif defined(_WIN32) + return (GetFileAttributesW(Encoding::ToWindowsExtendedPath(path).c_str()) != + INVALID_FILE_ATTRIBUTES); +#else + struct stat st; + return lstat(path.c_str(), &st) == 0; +#endif +} + +bool SystemTools::FileExists(const char* filename) +{ + if (!filename) { + return false; + } + return SystemTools::FileExists(std::string(filename)); +} + +bool SystemTools::FileExists(const std::string& filename) +{ + if (filename.empty()) { + return false; + } +#if defined(__CYGWIN__) + // Convert filename to native windows path if possible. + char winpath[MAX_PATH]; + if (SystemTools::PathCygwinToWin32(filename.c_str(), winpath)) { + return (GetFileAttributesA(winpath) != INVALID_FILE_ATTRIBUTES); + } + return access(filename.c_str(), R_OK) == 0; +#elif defined(_WIN32) + DWORD attr = + GetFileAttributesW(Encoding::ToWindowsExtendedPath(filename).c_str()); + if (attr == INVALID_FILE_ATTRIBUTES) { + return false; + } + + if (attr & FILE_ATTRIBUTE_REPARSE_POINT) { + // Using 0 instead of GENERIC_READ as it allows reading of file attributes + // even if we do not have permission to read the file itself + HANDLE handle = + CreateFileW(Encoding::ToWindowsExtendedPath(filename).c_str(), 0, 0, + nullptr, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, nullptr); + + if (handle == INVALID_HANDLE_VALUE) { + return false; + } + + CloseHandle(handle); + } + + return true; +#else +// SCO OpenServer 5.0.7/3.2's command has 711 permission. +# if defined(_SCO_DS) + return access(filename.c_str(), F_OK) == 0; +# else + return access(filename.c_str(), R_OK) == 0; +# endif +#endif +} + +bool SystemTools::FileExists(const char* filename, bool isFile) +{ + if (!filename) { + return false; + } + return SystemTools::FileExists(std::string(filename), isFile); +} + +bool SystemTools::FileExists(const std::string& filename, bool isFile) +{ + if (SystemTools::FileExists(filename)) { + // If isFile is set return not FileIsDirectory, + // so this will only be true if it is a file + return !isFile || !SystemTools::FileIsDirectory(filename); + } + return false; +} + +bool SystemTools::TestFileAccess(const char* filename, + TestFilePermissions permissions) +{ + if (!filename) { + return false; + } + return SystemTools::TestFileAccess(std::string(filename), permissions); +} + +bool SystemTools::TestFileAccess(const std::string& filename, + TestFilePermissions permissions) +{ + if (filename.empty()) { + return false; + } +#if defined(_WIN32) && !defined(__CYGWIN__) + // If execute set, change to read permission (all files on Windows + // are executable if they are readable). The CRT will always fail + // if you pass an execute bit. + if (permissions & TEST_FILE_EXECUTE) { + permissions &= ~TEST_FILE_EXECUTE; + permissions |= TEST_FILE_READ; + } + return _waccess(Encoding::ToWindowsExtendedPath(filename).c_str(), + permissions) == 0; +#else + return access(filename.c_str(), permissions) == 0; +#endif +} + +int SystemTools::Stat(const char* path, SystemTools::Stat_t* buf) +{ + if (!path) { + errno = EFAULT; + return -1; + } + return SystemTools::Stat(std::string(path), buf); +} + +int SystemTools::Stat(const std::string& path, SystemTools::Stat_t* buf) +{ + if (path.empty()) { + errno = ENOENT; + return -1; + } +#if defined(_WIN32) && !defined(__CYGWIN__) + // Ideally we should use Encoding::ToWindowsExtendedPath to support + // long paths, but _wstat64 rejects paths with '?' in them, thinking + // they are wildcards. + std::wstring const& wpath = Encoding::ToWide(path); +# if defined(__BORLANDC__) + return _wstati64(wpath.c_str(), buf); +# else + return _wstat64(wpath.c_str(), buf); +# endif +#else + return stat(path.c_str(), buf); +#endif +} + +#ifdef __CYGWIN__ +bool SystemTools::PathCygwinToWin32(const char* path, char* win32_path) +{ + auto itr = SystemTools::Statics->Cyg2Win32Map.find(path); + if (itr != SystemTools::Statics->Cyg2Win32Map.end()) { + strncpy(win32_path, itr->second.c_str(), MAX_PATH); + } else { + if (cygwin_conv_path(CCP_POSIX_TO_WIN_A, path, win32_path, MAX_PATH) != + 0) { + win32_path[0] = 0; + } + SystemTools::Statics->Cyg2Win32Map.insert( + SystemToolsStatic::StringMap::value_type(path, win32_path)); + } + return win32_path[0] != 0; +} +#endif + +bool SystemTools::Touch(const std::string& filename, bool create) +{ + if (!SystemTools::PathExists(filename)) { + if (create) { + FILE* file = Fopen(filename, "a+b"); + if (file) { + fclose(file); + return true; + } + return false; + } else { + return true; + } + } +#if defined(_WIN32) && !defined(__CYGWIN__) + HANDLE h = CreateFileW(Encoding::ToWindowsExtendedPath(filename).c_str(), + FILE_WRITE_ATTRIBUTES, FILE_SHARE_WRITE, 0, + OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0); + if (!h) { + return false; + } + FILETIME mtime; + GetSystemTimeAsFileTime(&mtime); + if (!SetFileTime(h, 0, 0, &mtime)) { + CloseHandle(h); + return false; + } + CloseHandle(h); +#elif KWSYS_CXX_HAS_UTIMENSAT + // utimensat is only available on newer Unixes and macOS 10.13+ + if (utimensat(AT_FDCWD, filename.c_str(), nullptr, 0) < 0) { + return false; + } +#else + // fall back to utimes + if (utimes(filename.c_str(), nullptr) < 0) { + return false; + } +#endif + return true; +} + +bool SystemTools::FileTimeCompare(const std::string& f1, const std::string& f2, + int* result) +{ + // Default to same time. + *result = 0; +#if !defined(_WIN32) || defined(__CYGWIN__) + // POSIX version. Use stat function to get file modification time. + struct stat s1; + if (stat(f1.c_str(), &s1) != 0) { + return false; + } + struct stat s2; + if (stat(f2.c_str(), &s2) != 0) { + return false; + } +# if KWSYS_CXX_STAT_HAS_ST_MTIM + // Compare using nanosecond resolution. + if (s1.st_mtim.tv_sec < s2.st_mtim.tv_sec) { + *result = -1; + } else if (s1.st_mtim.tv_sec > s2.st_mtim.tv_sec) { + *result = 1; + } else if (s1.st_mtim.tv_nsec < s2.st_mtim.tv_nsec) { + *result = -1; + } else if (s1.st_mtim.tv_nsec > s2.st_mtim.tv_nsec) { + *result = 1; + } +# elif KWSYS_CXX_STAT_HAS_ST_MTIMESPEC + // Compare using nanosecond resolution. + if (s1.st_mtimespec.tv_sec < s2.st_mtimespec.tv_sec) { + *result = -1; + } else if (s1.st_mtimespec.tv_sec > s2.st_mtimespec.tv_sec) { + *result = 1; + } else if (s1.st_mtimespec.tv_nsec < s2.st_mtimespec.tv_nsec) { + *result = -1; + } else if (s1.st_mtimespec.tv_nsec > s2.st_mtimespec.tv_nsec) { + *result = 1; + } +# else + // Compare using 1 second resolution. + if (s1.st_mtime < s2.st_mtime) { + *result = -1; + } else if (s1.st_mtime > s2.st_mtime) { + *result = 1; + } +# endif +#else + // Windows version. Get the modification time from extended file attributes. + WIN32_FILE_ATTRIBUTE_DATA f1d; + WIN32_FILE_ATTRIBUTE_DATA f2d; + if (!GetFileAttributesExW(Encoding::ToWindowsExtendedPath(f1).c_str(), + GetFileExInfoStandard, &f1d)) { + return false; + } + if (!GetFileAttributesExW(Encoding::ToWindowsExtendedPath(f2).c_str(), + GetFileExInfoStandard, &f2d)) { + return false; + } + + // Compare the file times using resolution provided by system call. + *result = (int)CompareFileTime(&f1d.ftLastWriteTime, &f2d.ftLastWriteTime); +#endif + return true; +} + +// Return a capitalized string (i.e the first letter is uppercased, all other +// are lowercased) +std::string SystemTools::Capitalized(const std::string& s) +{ + std::string n; + if (s.empty()) { + return n; + } + n.resize(s.size()); + n[0] = static_cast(toupper(s[0])); + for (size_t i = 1; i < s.size(); i++) { + n[i] = static_cast(tolower(s[i])); + } + return n; +} + +// Return capitalized words +std::string SystemTools::CapitalizedWords(const std::string& s) +{ + std::string n(s); + for (size_t i = 0; i < s.size(); i++) { +#if defined(_MSC_VER) && defined(_MT) && defined(_DEBUG) + // MS has an assert that will fail if s[i] < 0; setting + // LC_CTYPE using setlocale() does *not* help. Painful. + if ((int)s[i] >= 0 && isalpha(s[i]) && + (i == 0 || ((int)s[i - 1] >= 0 && isspace(s[i - 1])))) +#else + if (isalpha(s[i]) && (i == 0 || isspace(s[i - 1]))) +#endif + { + n[i] = static_cast(toupper(s[i])); + } + } + return n; +} + +// Return uncapitalized words +std::string SystemTools::UnCapitalizedWords(const std::string& s) +{ + std::string n(s); + for (size_t i = 0; i < s.size(); i++) { +#if defined(_MSC_VER) && defined(_MT) && defined(_DEBUG) + // MS has an assert that will fail if s[i] < 0; setting + // LC_CTYPE using setlocale() does *not* help. Painful. + if ((int)s[i] >= 0 && isalpha(s[i]) && + (i == 0 || ((int)s[i - 1] >= 0 && isspace(s[i - 1])))) +#else + if (isalpha(s[i]) && (i == 0 || isspace(s[i - 1]))) +#endif + { + n[i] = static_cast(tolower(s[i])); + } + } + return n; +} + +// only works for words with at least two letters +std::string SystemTools::AddSpaceBetweenCapitalizedWords(const std::string& s) +{ + std::string n; + if (!s.empty()) { + n.reserve(s.size()); + n += s[0]; + for (size_t i = 1; i < s.size(); i++) { + if (isupper(s[i]) && !isspace(s[i - 1]) && !isupper(s[i - 1])) { + n += ' '; + } + n += s[i]; + } + } + return n; +} + +char* SystemTools::AppendStrings(const char* str1, const char* str2) +{ + if (!str1) { + return SystemTools::DuplicateString(str2); + } + if (!str2) { + return SystemTools::DuplicateString(str1); + } + size_t len1 = strlen(str1); + char* newstr = new char[len1 + strlen(str2) + 1]; + if (!newstr) { + return nullptr; + } + strcpy(newstr, str1); + strcat(newstr + len1, str2); + return newstr; +} + +char* SystemTools::AppendStrings(const char* str1, const char* str2, + const char* str3) +{ + if (!str1) { + return SystemTools::AppendStrings(str2, str3); + } + if (!str2) { + return SystemTools::AppendStrings(str1, str3); + } + if (!str3) { + return SystemTools::AppendStrings(str1, str2); + } + + size_t len1 = strlen(str1), len2 = strlen(str2); + char* newstr = new char[len1 + len2 + strlen(str3) + 1]; + if (!newstr) { + return nullptr; + } + strcpy(newstr, str1); + strcat(newstr + len1, str2); + strcat(newstr + len1 + len2, str3); + return newstr; +} + +// Return a lower case string +std::string SystemTools::LowerCase(const std::string& s) +{ + std::string n; + n.resize(s.size()); + for (size_t i = 0; i < s.size(); i++) { + n[i] = static_cast(tolower(s[i])); + } + return n; +} + +// Return a lower case string +std::string SystemTools::UpperCase(const std::string& s) +{ + std::string n; + n.resize(s.size()); + for (size_t i = 0; i < s.size(); i++) { + n[i] = static_cast(toupper(s[i])); + } + return n; +} + +// Count char in string +size_t SystemTools::CountChar(const char* str, char c) +{ + size_t count = 0; + + if (str) { + while (*str) { + if (*str == c) { + ++count; + } + ++str; + } + } + return count; +} + +// Remove chars in string +char* SystemTools::RemoveChars(const char* str, const char* toremove) +{ + if (!str) { + return nullptr; + } + char* clean_str = new char[strlen(str) + 1]; + char* ptr = clean_str; + while (*str) { + const char* str2 = toremove; + while (*str2 && *str != *str2) { + ++str2; + } + if (!*str2) { + *ptr++ = *str; + } + ++str; + } + *ptr = '\0'; + return clean_str; +} + +// Remove chars in string +char* SystemTools::RemoveCharsButUpperHex(const char* str) +{ + if (!str) { + return nullptr; + } + char* clean_str = new char[strlen(str) + 1]; + char* ptr = clean_str; + while (*str) { + if ((*str >= '0' && *str <= '9') || (*str >= 'A' && *str <= 'F')) { + *ptr++ = *str; + } + ++str; + } + *ptr = '\0'; + return clean_str; +} + +// Replace chars in string +char* SystemTools::ReplaceChars(char* str, const char* toreplace, + char replacement) +{ + if (str) { + char* ptr = str; + while (*ptr) { + const char* ptr2 = toreplace; + while (*ptr2) { + if (*ptr == *ptr2) { + *ptr = replacement; + } + ++ptr2; + } + ++ptr; + } + } + return str; +} + +// Returns if string starts with another string +bool SystemTools::StringStartsWith(const char* str1, const char* str2) +{ + if (!str1 || !str2) { + return false; + } + size_t len1 = strlen(str1), len2 = strlen(str2); + return len1 >= len2 && !strncmp(str1, str2, len2) ? true : false; +} + +// Returns if string starts with another string +bool SystemTools::StringStartsWith(const std::string& str1, const char* str2) +{ + if (!str2) { + return false; + } + size_t len1 = str1.size(), len2 = strlen(str2); + return len1 >= len2 && !strncmp(str1.c_str(), str2, len2) ? true : false; +} + +// Returns if string ends with another string +bool SystemTools::StringEndsWith(const char* str1, const char* str2) +{ + if (!str1 || !str2) { + return false; + } + size_t len1 = strlen(str1), len2 = strlen(str2); + return len1 >= len2 && !strncmp(str1 + (len1 - len2), str2, len2) ? true + : false; +} + +// Returns if string ends with another string +bool SystemTools::StringEndsWith(const std::string& str1, const char* str2) +{ + if (!str2) { + return false; + } + size_t len1 = str1.size(), len2 = strlen(str2); + return len1 >= len2 && !strncmp(str1.c_str() + (len1 - len2), str2, len2) + ? true + : false; +} + +// Returns a pointer to the last occurrence of str2 in str1 +const char* SystemTools::FindLastString(const char* str1, const char* str2) +{ + if (!str1 || !str2) { + return nullptr; + } + + size_t len1 = strlen(str1), len2 = strlen(str2); + if (len1 >= len2) { + const char* ptr = str1 + len1 - len2; + do { + if (!strncmp(ptr, str2, len2)) { + return ptr; + } + } while (ptr-- != str1); + } + + return nullptr; +} + +// Duplicate string +char* SystemTools::DuplicateString(const char* str) +{ + if (str) { + char* newstr = new char[strlen(str) + 1]; + return strcpy(newstr, str); + } + return nullptr; +} + +// Return a cropped string +std::string SystemTools::CropString(const std::string& s, size_t max_len) +{ + if (!s.size() || max_len == 0 || max_len >= s.size()) { + return s; + } + + std::string n; + n.reserve(max_len); + + size_t middle = max_len / 2; + + n += s.substr(0, middle); + n += s.substr(s.size() - (max_len - middle)); + + if (max_len > 2) { + n[middle] = '.'; + if (max_len > 3) { + n[middle - 1] = '.'; + if (max_len > 4) { + n[middle + 1] = '.'; + } + } + } + + return n; +} + +std::vector SystemTools::SplitString(const std::string& p, + char sep, bool isPath) +{ + std::string path = p; + std::vector paths; + if (path.empty()) { + return paths; + } + if (isPath && path[0] == '/') { + path.erase(path.begin()); + paths.push_back("/"); + } + std::string::size_type pos1 = 0; + std::string::size_type pos2 = path.find(sep, pos1 + 1); + while (pos2 != std::string::npos) { + paths.push_back(path.substr(pos1, pos2 - pos1)); + pos1 = pos2 + 1; + pos2 = path.find(sep, pos1 + 1); + } + paths.push_back(path.substr(pos1, pos2 - pos1)); + + return paths; +} + +int SystemTools::EstimateFormatLength(const char* format, va_list ap) +{ + if (!format) { + return 0; + } + + // Quick-hack attempt at estimating the length of the string. + // Should never under-estimate. + + // Start with the length of the format string itself. + + size_t length = strlen(format); + + // Increase the length for every argument in the format. + + const char* cur = format; + while (*cur) { + if (*cur++ == '%') { + // Skip "%%" since it doesn't correspond to a va_arg. + if (*cur != '%') { + while (!int(isalpha(*cur))) { + ++cur; + } + switch (*cur) { + case 's': { + // Check the length of the string. + char* s = va_arg(ap, char*); + if (s) { + length += strlen(s); + } + } break; + case 'e': + case 'f': + case 'g': { + // Assume the argument contributes no more than 64 characters. + length += 64; + + // Eat the argument. + static_cast(va_arg(ap, double)); + } break; + default: { + // Assume the argument contributes no more than 64 characters. + length += 64; + + // Eat the argument. + static_cast(va_arg(ap, int)); + } break; + } + } + + // Move past the characters just tested. + ++cur; + } + } + + return static_cast(length); +} + +std::string SystemTools::EscapeChars(const char* str, + const char* chars_to_escape, + char escape_char) +{ + std::string n; + if (str) { + if (!chars_to_escape || !*chars_to_escape) { + n.append(str); + } else { + n.reserve(strlen(str)); + while (*str) { + const char* ptr = chars_to_escape; + while (*ptr) { + if (*str == *ptr) { + n += escape_char; + break; + } + ++ptr; + } + n += *str; + ++str; + } + } + } + return n; +} + +#ifdef __VMS +static void ConvertVMSToUnix(std::string& path) +{ + std::string::size_type rootEnd = path.find(":["); + std::string::size_type pathEnd = path.find("]"); + if (rootEnd != std::string::npos) { + std::string root = path.substr(0, rootEnd); + std::string pathPart = path.substr(rootEnd + 2, pathEnd - rootEnd - 2); + const char* pathCString = pathPart.c_str(); + const char* pos0 = pathCString; + for (std::string::size_type pos = 0; *pos0; ++pos) { + if (*pos0 == '.') { + pathPart[pos] = '/'; + } + pos0++; + } + path = "/" + root + "/" + pathPart; + } +} +#endif + +// convert windows slashes to unix slashes +void SystemTools::ConvertToUnixSlashes(std::string& path) +{ + if (path.empty()) { + return; + } + + const char* pathCString = path.c_str(); + bool hasDoubleSlash = false; +#ifdef __VMS + ConvertVMSToUnix(path); +#else + const char* pos0 = pathCString; + for (std::string::size_type pos = 0; *pos0; ++pos) { + if (*pos0 == '\\') { + path[pos] = '/'; + } + + // Also, reuse the loop to check for slash followed by another slash + if (!hasDoubleSlash && *(pos0 + 1) == '/' && *(pos0 + 2) == '/') { +# ifdef _WIN32 + // However, on windows if the first characters are both slashes, + // then keep them that way, so that network paths can be handled. + if (pos > 0) { + hasDoubleSlash = true; + } +# else + hasDoubleSlash = true; +# endif + } + + pos0++; + } + + if (hasDoubleSlash) { + SystemTools::ReplaceString(path, "//", "/"); + } +#endif + + // remove any trailing slash + // if there is a tilda ~ then replace it with HOME + pathCString = path.c_str(); + if (pathCString[0] == '~' && + (pathCString[1] == '/' || pathCString[1] == '\0')) { + std::string homeEnv; + if (SystemTools::GetEnv("HOME", homeEnv)) { + path.replace(0, 1, homeEnv); + } + } +#ifdef HAVE_GETPWNAM + else if (pathCString[0] == '~') { + std::string::size_type idx = path.find_first_of("/\0"); + std::string user = path.substr(1, idx - 1); + passwd* pw = getpwnam(user.c_str()); + if (pw) { + path.replace(0, idx, pw->pw_dir); + } + } +#endif + // remove trailing slash if the path is more than + // a single / + pathCString = path.c_str(); + size_t size = path.size(); + if (size > 1 && path.back() == '/') { + // if it is c:/ then do not remove the trailing slash + if (!((size == 3 && pathCString[1] == ':'))) { + path.resize(size - 1); + } + } +} + +#ifdef _WIN32 +std::wstring SystemTools::ConvertToWindowsExtendedPath( + const std::string& source) +{ + return Encoding::ToWindowsExtendedPath(source); +} +#endif + +// change // to /, and escape any spaces in the path +std::string SystemTools::ConvertToUnixOutputPath(const std::string& path) +{ + std::string ret = path; + + // remove // except at the beginning might be a cygwin drive + std::string::size_type pos = 1; + while ((pos = ret.find("//", pos)) != std::string::npos) { + ret.erase(pos, 1); + } + // escape spaces and () in the path + if (ret.find_first_of(" ") != std::string::npos) { + std::string result; + char lastch = 1; + for (const char* ch = ret.c_str(); *ch != '\0'; ++ch) { + // if it is already escaped then don't try to escape it again + if ((*ch == ' ') && lastch != '\\') { + result += '\\'; + } + result += *ch; + lastch = *ch; + } + ret = result; + } + return ret; +} + +std::string SystemTools::ConvertToOutputPath(const std::string& path) +{ +#if defined(_WIN32) && !defined(__CYGWIN__) + return SystemTools::ConvertToWindowsOutputPath(path); +#else + return SystemTools::ConvertToUnixOutputPath(path); +#endif +} + +// remove double slashes not at the start +std::string SystemTools::ConvertToWindowsOutputPath(const std::string& path) +{ + std::string ret; + // make it big enough for all of path and double quotes + ret.reserve(path.size() + 3); + // put path into the string + ret = path; + std::string::size_type pos = 0; + // first convert all of the slashes + while ((pos = ret.find('/', pos)) != std::string::npos) { + ret[pos] = '\\'; + pos++; + } + // check for really small paths + if (ret.size() < 2) { + return ret; + } + // now clean up a bit and remove double slashes + // Only if it is not the first position in the path which is a network + // path on windows + pos = 1; // start at position 1 + if (ret[0] == '\"') { + pos = 2; // if the string is already quoted then start at 2 + if (ret.size() < 3) { + return ret; + } + } + while ((pos = ret.find("\\\\", pos)) != std::string::npos) { + ret.erase(pos, 1); + } + // now double quote the path if it has spaces in it + // and is not already double quoted + if (ret.find(' ') != std::string::npos && ret[0] != '\"') { + ret.insert(static_cast(0), + static_cast(1), '\"'); + ret.append(1, '\"'); + } + return ret; +} + +/** + * Append the filename from the path source to the directory name dir. + */ +static std::string FileInDir(const std::string& source, const std::string& dir) +{ + std::string new_destination = dir; + SystemTools::ConvertToUnixSlashes(new_destination); + return new_destination + '/' + SystemTools::GetFilenameName(source); +} + +bool SystemTools::CopyFileIfDifferent(const std::string& source, + const std::string& destination) +{ + // special check for a destination that is a directory + // FilesDiffer does not handle file to directory compare + if (SystemTools::FileIsDirectory(destination)) { + const std::string new_destination = FileInDir(source, destination); + return SystemTools::CopyFileIfDifferent(source, new_destination); + } + // source and destination are files so do a copy if they + // are different + if (SystemTools::FilesDiffer(source, destination)) { + return SystemTools::CopyFileAlways(source, destination); + } + // at this point the files must be the same so return true + return true; +} + +#define KWSYS_ST_BUFFER 4096 + +bool SystemTools::FilesDiffer(const std::string& source, + const std::string& destination) +{ + +#if defined(_WIN32) + WIN32_FILE_ATTRIBUTE_DATA statSource; + if (GetFileAttributesExW(Encoding::ToWindowsExtendedPath(source).c_str(), + GetFileExInfoStandard, &statSource) == 0) { + return true; + } + + WIN32_FILE_ATTRIBUTE_DATA statDestination; + if (GetFileAttributesExW( + Encoding::ToWindowsExtendedPath(destination).c_str(), + GetFileExInfoStandard, &statDestination) == 0) { + return true; + } + + if (statSource.nFileSizeHigh != statDestination.nFileSizeHigh || + statSource.nFileSizeLow != statDestination.nFileSizeLow) { + return true; + } + + if (statSource.nFileSizeHigh == 0 && statSource.nFileSizeLow == 0) { + return false; + } + off_t nleft = + ((__int64)statSource.nFileSizeHigh << 32) + statSource.nFileSizeLow; + +#else + + struct stat statSource; + if (stat(source.c_str(), &statSource) != 0) { + return true; + } + + struct stat statDestination; + if (stat(destination.c_str(), &statDestination) != 0) { + return true; + } + + if (statSource.st_size != statDestination.st_size) { + return true; + } + + if (statSource.st_size == 0) { + return false; + } + off_t nleft = statSource.st_size; +#endif + +#if defined(_WIN32) + kwsys::ifstream finSource(source.c_str(), (std::ios::binary | std::ios::in)); + kwsys::ifstream finDestination(destination.c_str(), + (std::ios::binary | std::ios::in)); +#else + kwsys::ifstream finSource(source.c_str()); + kwsys::ifstream finDestination(destination.c_str()); +#endif + if (!finSource || !finDestination) { + return true; + } + + // Compare the files a block at a time. + char source_buf[KWSYS_ST_BUFFER]; + char dest_buf[KWSYS_ST_BUFFER]; + while (nleft > 0) { + // Read a block from each file. + std::streamsize nnext = (nleft > KWSYS_ST_BUFFER) + ? KWSYS_ST_BUFFER + : static_cast(nleft); + finSource.read(source_buf, nnext); + finDestination.read(dest_buf, nnext); + + // If either failed to read assume they are different. + if (static_cast(finSource.gcount()) != nnext || + static_cast(finDestination.gcount()) != nnext) { + return true; + } + + // If this block differs the file differs. + if (memcmp(static_cast(source_buf), + static_cast(dest_buf), + static_cast(nnext)) != 0) { + return true; + } + + // Update the byte count remaining. + nleft -= nnext; + } + + // No differences found. + return false; +} + +bool SystemTools::TextFilesDiffer(const std::string& path1, + const std::string& path2) +{ + kwsys::ifstream if1(path1.c_str()); + kwsys::ifstream if2(path2.c_str()); + if (!if1 || !if2) { + return true; + } + + for (;;) { + std::string line1, line2; + bool hasData1 = GetLineFromStream(if1, line1); + bool hasData2 = GetLineFromStream(if2, line2); + if (hasData1 != hasData2) { + return true; + } + if (!hasData1) { + break; + } + if (line1 != line2) { + return true; + } + } + return false; +} + +/** + * Blockwise copy source to destination file + */ +static bool CopyFileContentBlockwise(const std::string& source, + const std::string& destination) +{ +// Open files +#if defined(_WIN32) + kwsys::ifstream fin( + Encoding::ToNarrow(Encoding::ToWindowsExtendedPath(source)).c_str(), + std::ios::in | std::ios::binary); +#else + kwsys::ifstream fin(source.c_str(), std::ios::in | std::ios::binary); +#endif + if (!fin) { + return false; + } + + // try and remove the destination file so that read only destination files + // can be written to. + // If the remove fails continue so that files in read only directories + // that do not allow file removal can be modified. + SystemTools::RemoveFile(destination); + +#if defined(_WIN32) + kwsys::ofstream fout( + Encoding::ToNarrow(Encoding::ToWindowsExtendedPath(destination)).c_str(), + std::ios::out | std::ios::trunc | std::ios::binary); +#else + kwsys::ofstream fout(destination.c_str(), + std::ios::out | std::ios::trunc | std::ios::binary); +#endif + if (!fout) { + return false; + } + + // This copy loop is very sensitive on certain platforms with + // slightly broken stream libraries (like HPUX). Normally, it is + // incorrect to not check the error condition on the fin.read() + // before using the data, but the fin.gcount() will be zero if an + // error occurred. Therefore, the loop should be safe everywhere. + while (fin) { + const int bufferSize = 4096; + char buffer[bufferSize]; + + fin.read(buffer, bufferSize); + if (fin.gcount()) { + fout.write(buffer, fin.gcount()); + } else { + break; + } + } + + // Make sure the operating system has finished writing the file + // before closing it. This will ensure the file is finished before + // the check below. + fout.flush(); + + fin.close(); + fout.close(); + + if (!fout) { + return false; + } + + return true; +} + +/** + * Clone the source file to the destination file + * + * If available, the Linux FICLONE ioctl is used to create a check + * copy-on-write clone of the source file. + * + * The method returns false for the following cases: + * - The code has not been compiled on Linux or the ioctl was unknown + * - The source and destination is on different file systems + * - The underlying filesystem does not support file cloning + * - An unspecified error occurred + */ +static bool CloneFileContent(const std::string& source, + const std::string& destination) +{ +#if defined(__linux) && defined(FICLONE) + int in = open(source.c_str(), O_RDONLY); + if (in < 0) { + return false; + } + + SystemTools::RemoveFile(destination); + + int out = + open(destination.c_str(), O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR); + if (out < 0) { + close(in); + return false; + } + + int result = ioctl(out, FICLONE, in); + close(in); + close(out); + + if (result < 0) { + return false; + } + + return true; +#else + (void)source; + (void)destination; + return false; +#endif +} + +/** + * Copy a file named by "source" to the file named by "destination". + */ +bool SystemTools::CopyFileAlways(const std::string& source, + const std::string& destination) +{ + mode_t perm = 0; + bool perms = SystemTools::GetPermissions(source, perm); + std::string real_destination = destination; + + if (SystemTools::FileIsDirectory(source)) { + SystemTools::MakeDirectory(destination); + } else { + // If destination is a directory, try to create a file with the same + // name as the source in that directory. + + std::string destination_dir; + if (SystemTools::FileIsDirectory(destination)) { + destination_dir = real_destination; + SystemTools::ConvertToUnixSlashes(real_destination); + real_destination += '/'; + std::string source_name = source; + real_destination += SystemTools::GetFilenameName(source_name); + } else { + destination_dir = SystemTools::GetFilenamePath(destination); + } + // If files are the same do not copy + if (SystemTools::SameFile(source, real_destination)) { + return true; + } + + // Create destination directory + + SystemTools::MakeDirectory(destination_dir); + + if (!CloneFileContent(source, real_destination)) { + // if cloning did not succeed, fall back to blockwise copy + if (!CopyFileContentBlockwise(source, real_destination)) { + return false; + } + } + } + if (perms) { + if (!SystemTools::SetPermissions(real_destination, perm)) { + return false; + } + } + return true; +} + +bool SystemTools::CopyAFile(const std::string& source, + const std::string& destination, bool always) +{ + if (always) { + return SystemTools::CopyFileAlways(source, destination); + } else { + return SystemTools::CopyFileIfDifferent(source, destination); + } +} + +/** + * Copy a directory content from "source" directory to the directory named by + * "destination". + */ +bool SystemTools::CopyADirectory(const std::string& source, + const std::string& destination, bool always) +{ + Directory dir; + if (dir.Load(source) == 0) { + return false; + } + size_t fileNum; + if (!SystemTools::MakeDirectory(destination)) { + return false; + } + for (fileNum = 0; fileNum < dir.GetNumberOfFiles(); ++fileNum) { + if (strcmp(dir.GetFile(static_cast(fileNum)), ".") && + strcmp(dir.GetFile(static_cast(fileNum)), "..")) { + std::string fullPath = source; + fullPath += "/"; + fullPath += dir.GetFile(static_cast(fileNum)); + if (SystemTools::FileIsDirectory(fullPath)) { + std::string fullDestPath = destination; + fullDestPath += "/"; + fullDestPath += dir.GetFile(static_cast(fileNum)); + if (!SystemTools::CopyADirectory(fullPath, fullDestPath, always)) { + return false; + } + } else { + if (!SystemTools::CopyAFile(fullPath, destination, always)) { + return false; + } + } + } + } + + return true; +} + +// return size of file; also returns zero if no file exists +unsigned long SystemTools::FileLength(const std::string& filename) +{ + unsigned long length = 0; +#ifdef _WIN32 + WIN32_FILE_ATTRIBUTE_DATA fs; + if (GetFileAttributesExW(Encoding::ToWindowsExtendedPath(filename).c_str(), + GetFileExInfoStandard, &fs) != 0) { + /* To support the full 64-bit file size, use fs.nFileSizeHigh + * and fs.nFileSizeLow to construct the 64 bit size + + length = ((__int64)fs.nFileSizeHigh << 32) + fs.nFileSizeLow; + */ + length = static_cast(fs.nFileSizeLow); + } +#else + struct stat fs; + if (stat(filename.c_str(), &fs) == 0) { + length = static_cast(fs.st_size); + } +#endif + return length; +} + +int SystemTools::Strucmp(const char* l, const char* r) +{ + int lc; + int rc; + do { + lc = tolower(*l++); + rc = tolower(*r++); + } while (lc == rc && lc); + return lc - rc; +} + +// return file's modified time +long int SystemTools::ModifiedTime(const std::string& filename) +{ + long int mt = 0; +#ifdef _WIN32 + WIN32_FILE_ATTRIBUTE_DATA fs; + if (GetFileAttributesExW(Encoding::ToWindowsExtendedPath(filename).c_str(), + GetFileExInfoStandard, &fs) != 0) { + mt = windows_filetime_to_posix_time(fs.ftLastWriteTime); + } +#else + struct stat fs; + if (stat(filename.c_str(), &fs) == 0) { + mt = static_cast(fs.st_mtime); + } +#endif + return mt; +} + +// return file's creation time +long int SystemTools::CreationTime(const std::string& filename) +{ + long int ct = 0; +#ifdef _WIN32 + WIN32_FILE_ATTRIBUTE_DATA fs; + if (GetFileAttributesExW(Encoding::ToWindowsExtendedPath(filename).c_str(), + GetFileExInfoStandard, &fs) != 0) { + ct = windows_filetime_to_posix_time(fs.ftCreationTime); + } +#else + struct stat fs; + if (stat(filename.c_str(), &fs) == 0) { + ct = fs.st_ctime >= 0 ? static_cast(fs.st_ctime) : 0; + } +#endif + return ct; +} + +std::string SystemTools::GetLastSystemError() +{ + int e = errno; + return strerror(e); +} + +bool SystemTools::RemoveFile(const std::string& source) +{ +#ifdef _WIN32 + std::wstring const& ws = Encoding::ToWindowsExtendedPath(source); + if (DeleteFileW(ws.c_str())) { + return true; + } + DWORD err = GetLastError(); + if (err == ERROR_FILE_NOT_FOUND || err == ERROR_PATH_NOT_FOUND) { + return true; + } + if (err != ERROR_ACCESS_DENIED) { + return false; + } + /* The file may be read-only. Try adding write permission. */ + mode_t mode; + if (!SystemTools::GetPermissions(source, mode) || + !SystemTools::SetPermissions(source, S_IWRITE)) { + SetLastError(err); + return false; + } + + const DWORD DIRECTORY_SOFT_LINK_ATTRS = + FILE_ATTRIBUTE_DIRECTORY | FILE_ATTRIBUTE_REPARSE_POINT; + DWORD attrs = GetFileAttributesW(ws.c_str()); + if (attrs != INVALID_FILE_ATTRIBUTES && + (attrs & DIRECTORY_SOFT_LINK_ATTRS) == DIRECTORY_SOFT_LINK_ATTRS && + RemoveDirectoryW(ws.c_str())) { + return true; + } + if (DeleteFileW(ws.c_str()) || GetLastError() == ERROR_FILE_NOT_FOUND || + GetLastError() == ERROR_PATH_NOT_FOUND) { + return true; + } + /* Try to restore the original permissions. */ + SystemTools::SetPermissions(source, mode); + SetLastError(err); + return false; +#else + return unlink(source.c_str()) == 0 || errno == ENOENT; +#endif +} + +bool SystemTools::RemoveADirectory(const std::string& source) +{ + // Add write permission to the directory so we can modify its + // content to remove files and directories from it. + mode_t mode; + if (SystemTools::GetPermissions(source, mode)) { +#if defined(_WIN32) && !defined(__CYGWIN__) + mode |= S_IWRITE; +#else + mode |= S_IWUSR; +#endif + SystemTools::SetPermissions(source, mode); + } + + Directory dir; + dir.Load(source); + size_t fileNum; + for (fileNum = 0; fileNum < dir.GetNumberOfFiles(); ++fileNum) { + if (strcmp(dir.GetFile(static_cast(fileNum)), ".") && + strcmp(dir.GetFile(static_cast(fileNum)), "..")) { + std::string fullPath = source; + fullPath += "/"; + fullPath += dir.GetFile(static_cast(fileNum)); + if (SystemTools::FileIsDirectory(fullPath) && + !SystemTools::FileIsSymlink(fullPath)) { + if (!SystemTools::RemoveADirectory(fullPath)) { + return false; + } + } else { + if (!SystemTools::RemoveFile(fullPath)) { + return false; + } + } + } + } + + return (Rmdir(source) == 0); +} + +/** + */ +size_t SystemTools::GetMaximumFilePathLength() +{ + return KWSYS_SYSTEMTOOLS_MAXPATH; +} + +/** + * Find the file the given name. Searches the given path and then + * the system search path. Returns the full path to the file if it is + * found. Otherwise, the empty string is returned. + */ +std::string SystemToolsStatic::FindName( + const std::string& name, const std::vector& userPaths, + bool no_system_path) +{ + // Add the system search path to our path first + std::vector path; + if (!no_system_path) { + SystemTools::GetPath(path, "CMAKE_FILE_PATH"); + SystemTools::GetPath(path); + } + // now add the additional paths + path.reserve(path.size() + userPaths.size()); + path.insert(path.end(), userPaths.begin(), userPaths.end()); + // now look for the file + std::string tryPath; + for (std::string const& p : path) { + tryPath = p; + if (tryPath.empty() || tryPath.back() != '/') { + tryPath += '/'; + } + tryPath += name; + if (SystemTools::FileExists(tryPath)) { + return tryPath; + } + } + // Couldn't find the file. + return ""; +} + +/** + * Find the file the given name. Searches the given path and then + * the system search path. Returns the full path to the file if it is + * found. Otherwise, the empty string is returned. + */ +std::string SystemTools::FindFile(const std::string& name, + const std::vector& userPaths, + bool no_system_path) +{ + std::string tryPath = + SystemToolsStatic::FindName(name, userPaths, no_system_path); + if (!tryPath.empty() && !SystemTools::FileIsDirectory(tryPath)) { + return SystemTools::CollapseFullPath(tryPath); + } + // Couldn't find the file. + return ""; +} + +/** + * Find the directory the given name. Searches the given path and then + * the system search path. Returns the full path to the directory if it is + * found. Otherwise, the empty string is returned. + */ +std::string SystemTools::FindDirectory( + const std::string& name, const std::vector& userPaths, + bool no_system_path) +{ + std::string tryPath = + SystemToolsStatic::FindName(name, userPaths, no_system_path); + if (!tryPath.empty() && SystemTools::FileIsDirectory(tryPath)) { + return SystemTools::CollapseFullPath(tryPath); + } + // Couldn't find the file. + return ""; +} + +/** + * Find the executable with the given name. Searches the given path and then + * the system search path. Returns the full path to the executable if it is + * found. Otherwise, the empty string is returned. + */ +std::string SystemTools::FindProgram(const char* nameIn, + const std::vector& userPaths, + bool no_system_path) +{ + if (!nameIn || !*nameIn) { + return ""; + } + return SystemTools::FindProgram(std::string(nameIn), userPaths, + no_system_path); +} + +std::string SystemTools::FindProgram(const std::string& name, + const std::vector& userPaths, + bool no_system_path) +{ + std::string tryPath; + +#if defined(_WIN32) || defined(__CYGWIN__) || defined(__MINGW32__) + std::vector extensions; + // check to see if the name already has a .xxx at + // the end of it + // on windows try .com then .exe + if (name.size() <= 3 || name[name.size() - 4] != '.') { + extensions.emplace_back(".com"); + extensions.emplace_back(".exe"); + + // first try with extensions if the os supports them + for (std::string const& ext : extensions) { + tryPath = name; + tryPath += ext; + if (SystemTools::FileExists(tryPath, true)) { + return SystemTools::CollapseFullPath(tryPath); + } + } + } +#endif + + // now try just the name + if (SystemTools::FileExists(name, true)) { + return SystemTools::CollapseFullPath(name); + } + // now construct the path + std::vector path; + // Add the system search path to our path. + if (!no_system_path) { + SystemTools::GetPath(path); + } + // now add the additional paths + path.reserve(path.size() + userPaths.size()); + path.insert(path.end(), userPaths.begin(), userPaths.end()); + // Add a trailing slash to all paths to aid the search process. + for (std::string& p : path) { + if (p.empty() || p.back() != '/') { + p += '/'; + } + } + // Try each path + for (std::string& p : path) { +#ifdef _WIN32 + // Remove double quotes from the path on windows + SystemTools::ReplaceString(p, "\"", ""); +#endif +#if defined(_WIN32) || defined(__CYGWIN__) || defined(__MINGW32__) + // first try with extensions + for (std::string const& ext : extensions) { + tryPath = p; + tryPath += name; + tryPath += ext; + if (SystemTools::FileExists(tryPath, true)) { + return SystemTools::CollapseFullPath(tryPath); + } + } +#endif + // now try it without them + tryPath = p; + tryPath += name; + if (SystemTools::FileExists(tryPath, true)) { + return SystemTools::CollapseFullPath(tryPath); + } + } + // Couldn't find the program. + return ""; +} + +std::string SystemTools::FindProgram(const std::vector& names, + const std::vector& path, + bool noSystemPath) +{ + for (std::string const& name : names) { + // Try to find the program. + std::string result = SystemTools::FindProgram(name, path, noSystemPath); + if (!result.empty()) { + return result; + } + } + return ""; +} + +/** + * Find the library with the given name. Searches the given path and then + * the system search path. Returns the full path to the library if it is + * found. Otherwise, the empty string is returned. + */ +std::string SystemTools::FindLibrary(const std::string& name, + const std::vector& userPaths) +{ + // See if the executable exists as written. + if (SystemTools::FileExists(name, true)) { + return SystemTools::CollapseFullPath(name); + } + + // Add the system search path to our path. + std::vector path; + SystemTools::GetPath(path); + // now add the additional paths + path.reserve(path.size() + userPaths.size()); + path.insert(path.end(), userPaths.begin(), userPaths.end()); + // Add a trailing slash to all paths to aid the search process. + for (std::string& p : path) { + if (p.empty() || p.back() != '/') { + p += '/'; + } + } + std::string tryPath; + for (std::string const& p : path) { +#if defined(__APPLE__) + tryPath = p; + tryPath += name; + tryPath += ".framework"; + if (SystemTools::FileIsDirectory(tryPath)) { + return SystemTools::CollapseFullPath(tryPath); + } +#endif +#if defined(_WIN32) && !defined(__CYGWIN__) && !defined(__MINGW32__) + tryPath = p; + tryPath += name; + tryPath += ".lib"; + if (SystemTools::FileExists(tryPath, true)) { + return SystemTools::CollapseFullPath(tryPath); + } +#else + tryPath = p; + tryPath += "lib"; + tryPath += name; + tryPath += ".so"; + if (SystemTools::FileExists(tryPath, true)) { + return SystemTools::CollapseFullPath(tryPath); + } + tryPath = p; + tryPath += "lib"; + tryPath += name; + tryPath += ".a"; + if (SystemTools::FileExists(tryPath, true)) { + return SystemTools::CollapseFullPath(tryPath); + } + tryPath = p; + tryPath += "lib"; + tryPath += name; + tryPath += ".sl"; + if (SystemTools::FileExists(tryPath, true)) { + return SystemTools::CollapseFullPath(tryPath); + } + tryPath = p; + tryPath += "lib"; + tryPath += name; + tryPath += ".dylib"; + if (SystemTools::FileExists(tryPath, true)) { + return SystemTools::CollapseFullPath(tryPath); + } + tryPath = p; + tryPath += "lib"; + tryPath += name; + tryPath += ".dll"; + if (SystemTools::FileExists(tryPath, true)) { + return SystemTools::CollapseFullPath(tryPath); + } +#endif + } + + // Couldn't find the library. + return ""; +} + +std::string SystemTools::GetRealPath(const std::string& path, + std::string* errorMessage) +{ + std::string ret; + Realpath(path, ret, errorMessage); + return ret; +} + +bool SystemTools::FileIsDirectory(const std::string& inName) +{ + if (inName.empty()) { + return false; + } + size_t length = inName.size(); + const char* name = inName.c_str(); + + // Remove any trailing slash from the name except in a root component. + char local_buffer[KWSYS_SYSTEMTOOLS_MAXPATH]; + std::string string_buffer; + size_t last = length - 1; + if (last > 0 && (name[last] == '/' || name[last] == '\\') && + strcmp(name, "/") != 0 && name[last - 1] != ':') { + if (last < sizeof(local_buffer)) { + memcpy(local_buffer, name, last); + local_buffer[last] = '\0'; + name = local_buffer; + } else { + string_buffer.append(name, last); + name = string_buffer.c_str(); + } + } + +// Now check the file node type. +#if defined(_WIN32) + DWORD attr = + GetFileAttributesW(Encoding::ToWindowsExtendedPath(name).c_str()); + if (attr != INVALID_FILE_ATTRIBUTES) { + return (attr & FILE_ATTRIBUTE_DIRECTORY) != 0; +#else + struct stat fs; + if (stat(name, &fs) == 0) { + return S_ISDIR(fs.st_mode); +#endif + } else { + return false; + } +} + +bool SystemTools::FileIsSymlink(const std::string& name) +{ +#if defined(_WIN32) + std::wstring path = Encoding::ToWindowsExtendedPath(name); + DWORD attr = GetFileAttributesW(path.c_str()); + if (attr != INVALID_FILE_ATTRIBUTES) { + if ((attr & FILE_ATTRIBUTE_REPARSE_POINT) != 0) { + // FILE_ATTRIBUTE_REPARSE_POINT means: + // * a file or directory that has an associated reparse point, or + // * a file that is a symbolic link. + HANDLE hFile = CreateFileW( + path.c_str(), GENERIC_READ, FILE_SHARE_READ, nullptr, OPEN_EXISTING, + FILE_FLAG_OPEN_REPARSE_POINT | FILE_FLAG_BACKUP_SEMANTICS, nullptr); + if (hFile == INVALID_HANDLE_VALUE) { + return false; + } + byte buffer[MAXIMUM_REPARSE_DATA_BUFFER_SIZE]; + DWORD bytesReturned = 0; + if (!DeviceIoControl(hFile, FSCTL_GET_REPARSE_POINT, nullptr, 0, buffer, + MAXIMUM_REPARSE_DATA_BUFFER_SIZE, &bytesReturned, + nullptr)) { + CloseHandle(hFile); + // Since FILE_ATTRIBUTE_REPARSE_POINT is set this file must be + // a symbolic link if it is not a reparse point. + return GetLastError() == ERROR_NOT_A_REPARSE_POINT; + } + CloseHandle(hFile); + ULONG reparseTag = + reinterpret_cast(&buffer[0])->ReparseTag; + return (reparseTag == IO_REPARSE_TAG_SYMLINK) || + (reparseTag == IO_REPARSE_TAG_MOUNT_POINT); + } + return false; + } else { + return false; + } +#else + struct stat fs; + if (lstat(name.c_str(), &fs) == 0) { + return S_ISLNK(fs.st_mode); + } else { + return false; + } +#endif +} + +bool SystemTools::FileIsFIFO(const std::string& name) +{ +#if defined(_WIN32) + HANDLE hFile = + CreateFileW(Encoding::ToWide(name).c_str(), GENERIC_READ, FILE_SHARE_READ, + nullptr, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, nullptr); + if (hFile == INVALID_HANDLE_VALUE) { + return false; + } + const DWORD type = GetFileType(hFile); + CloseHandle(hFile); + return type == FILE_TYPE_PIPE; +#else + struct stat fs; + if (lstat(name.c_str(), &fs) == 0) { + return S_ISFIFO(fs.st_mode); + } else { + return false; + } +#endif +} + +#if defined(_WIN32) && !defined(__CYGWIN__) +bool SystemTools::CreateSymlink(const std::string&, const std::string&) +{ + return false; +} +#else +bool SystemTools::CreateSymlink(const std::string& origName, + const std::string& newName) +{ + return symlink(origName.c_str(), newName.c_str()) >= 0; +} +#endif + +#if defined(_WIN32) && !defined(__CYGWIN__) +bool SystemTools::ReadSymlink(const std::string&, std::string&) +{ + return false; +} +#else +bool SystemTools::ReadSymlink(const std::string& newName, + std::string& origName) +{ + char buf[KWSYS_SYSTEMTOOLS_MAXPATH + 1]; + int count = static_cast( + readlink(newName.c_str(), buf, KWSYS_SYSTEMTOOLS_MAXPATH)); + if (count >= 0) { + // Add null-terminator. + buf[count] = 0; + origName = buf; + return true; + } else { + return false; + } +} +#endif + +int SystemTools::ChangeDirectory(const std::string& dir) +{ + return Chdir(dir); +} + +std::string SystemTools::GetCurrentWorkingDirectory(bool collapse) +{ + char buf[2048]; + const char* cwd = Getcwd(buf, 2048); + std::string path; + if (cwd) { + path = cwd; + } + if (collapse) { + return SystemTools::CollapseFullPath(path); + } + return path; +} + +std::string SystemTools::GetProgramPath(const std::string& in_name) +{ + std::string dir, file; + SystemTools::SplitProgramPath(in_name, dir, file); + return dir; +} + +bool SystemTools::SplitProgramPath(const std::string& in_name, + std::string& dir, std::string& file, bool) +{ + dir = in_name; + file = ""; + SystemTools::ConvertToUnixSlashes(dir); + + if (!SystemTools::FileIsDirectory(dir)) { + std::string::size_type slashPos = dir.rfind("/"); + if (slashPos != std::string::npos) { + file = dir.substr(slashPos + 1); + dir = dir.substr(0, slashPos); + } else { + file = dir; + dir = ""; + } + } + if (!(dir.empty()) && !SystemTools::FileIsDirectory(dir)) { + std::string oldDir = in_name; + SystemTools::ConvertToUnixSlashes(oldDir); + dir = in_name; + return false; + } + return true; +} + +bool SystemTools::FindProgramPath(const char* argv0, std::string& pathOut, + std::string& errorMsg, const char* exeName, + const char* buildDir, + const char* installPrefix) +{ + std::vector failures; + std::string self = argv0 ? argv0 : ""; + failures.push_back(self); + SystemTools::ConvertToUnixSlashes(self); + self = SystemTools::FindProgram(self); + if (!SystemTools::FileExists(self)) { + if (buildDir) { + std::string intdir = "."; +#ifdef CMAKE_INTDIR + intdir = CMAKE_INTDIR; +#endif + self = buildDir; + self += "/bin/"; + self += intdir; + self += "/"; + self += exeName; + self += SystemTools::GetExecutableExtension(); + } + } + if (installPrefix) { + if (!SystemTools::FileExists(self)) { + failures.push_back(self); + self = installPrefix; + self += "/bin/"; + self += exeName; + } + } + if (!SystemTools::FileExists(self)) { + failures.push_back(self); + std::ostringstream msg; + msg << "Can not find the command line program "; + if (exeName) { + msg << exeName; + } + msg << "\n"; + if (argv0) { + msg << " argv[0] = \"" << argv0 << "\"\n"; + } + msg << " Attempted paths:\n"; + for (std::string const& ff : failures) { + msg << " \"" << ff << "\"\n"; + } + errorMsg = msg.str(); + return false; + } + pathOut = self; + return true; +} + +std::string SystemTools::CollapseFullPath(const std::string& in_relative) +{ + return SystemTools::CollapseFullPath(in_relative, nullptr); +} + +#if KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP +void SystemTools::AddTranslationPath(const std::string& a, + const std::string& b) +{ + std::string path_a = a; + std::string path_b = b; + SystemTools::ConvertToUnixSlashes(path_a); + SystemTools::ConvertToUnixSlashes(path_b); + // First check this is a directory path, since we don't want the table to + // grow too fat + if (SystemTools::FileIsDirectory(path_a)) { + // Make sure the path is a full path and does not contain no '..' + // Ken--the following code is incorrect. .. can be in a valid path + // for example /home/martink/MyHubba...Hubba/Src + if (SystemTools::FileIsFullPath(path_b) && + path_b.find("..") == std::string::npos) { + // Before inserting make sure path ends with '/' + if (!path_a.empty() && path_a.back() != '/') { + path_a += '/'; + } + if (!path_b.empty() && path_b.back() != '/') { + path_b += '/'; + } + if (!(path_a == path_b)) { + SystemTools::Statics->TranslationMap.insert( + SystemToolsStatic::StringMap::value_type(std::move(path_a), + std::move(path_b))); + } + } + } +} + +void SystemTools::AddKeepPath(const std::string& dir) +{ + std::string cdir; + Realpath(SystemTools::CollapseFullPath(dir), cdir); + SystemTools::AddTranslationPath(cdir, dir); +} + +void SystemTools::CheckTranslationPath(std::string& path) +{ + // Do not translate paths that are too short to have meaningful + // translations. + if (path.size() < 2) { + return; + } + + // Always add a trailing slash before translation. It does not + // matter if this adds an extra slash, but we do not want to + // translate part of a directory (like the foo part of foo-dir). + path += '/'; + + // In case a file was specified we still have to go through this: + // Now convert any path found in the table back to the one desired: + for (auto const& pair : SystemTools::Statics->TranslationMap) { + // We need to check of the path is a substring of the other path + if (path.find(pair.first) == 0) { + path = path.replace(0, pair.first.size(), pair.second); + } + } + + // Remove the trailing slash we added before. + path.pop_back(); +} +#endif + +static void SystemToolsAppendComponents( + std::vector& out_components, + std::vector::iterator first, + std::vector::iterator last) +{ + static const std::string up = ".."; + static const std::string cur = "."; + for (std::vector::const_iterator i = first; i != last; ++i) { + if (*i == up) { + // Remove the previous component if possible. Ignore ../ components + // that try to go above the root. Keep ../ components if they are + // at the beginning of a relative path (base path is relative). + if (out_components.size() > 1 && out_components.back() != up) { + out_components.resize(out_components.size() - 1); + } else if (!out_components.empty() && out_components[0].empty()) { + out_components.emplace_back(std::move(*i)); + } + } else if (!i->empty() && *i != cur) { + out_components.emplace_back(std::move(*i)); + } + } +} + +std::string SystemTools::CollapseFullPath(const std::string& in_path, + const char* in_base) +{ + // Use the current working directory as a base path. + char buf[2048]; + const char* res_in_base = in_base; + if (!res_in_base) { + if (const char* cwd = Getcwd(buf, 2048)) { + res_in_base = cwd; + } else { + res_in_base = ""; + } + } + + return SystemTools::CollapseFullPath(in_path, std::string(res_in_base)); +} + +std::string SystemTools::CollapseFullPath(const std::string& in_path, + const std::string& in_base) +{ + // Collect the output path components. + std::vector out_components; + + // Split the input path components. + std::vector path_components; + SystemTools::SplitPath(in_path, path_components); + out_components.reserve(path_components.size()); + + // If the input path is relative, start with a base path. + if (path_components[0].empty()) { + std::vector base_components; + // Use the given base path. + SystemTools::SplitPath(in_base, base_components); + + // Append base path components to the output path. + out_components.push_back(base_components[0]); + SystemToolsAppendComponents(out_components, base_components.begin() + 1, + base_components.end()); + } + + // Append input path components to the output path. + SystemToolsAppendComponents(out_components, path_components.begin(), + path_components.end()); + + // Transform the path back to a string. + std::string newPath = SystemTools::JoinPath(out_components); + +#if KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP + // Update the translation table with this potentially new path. I am not + // sure why this line is here, it seems really questionable, but yet I + // would put good money that if I remove it something will break, basically + // from what I can see it created a mapping from the collapsed path, to be + // replaced by the input path, which almost completely does the opposite of + // this function, the only thing preventing this from happening a lot is + // that if the in_path has a .. in it, then it is not added to the + // translation table. So for most calls this either does nothing due to the + // .. or it adds a translation between identical paths as nothing was + // collapsed, so I am going to try to comment it out, and see what hits the + // fan, hopefully quickly. + // Commented out line below: + // SystemTools::AddTranslationPath(newPath, in_path); + + SystemTools::CheckTranslationPath(newPath); +#endif +#ifdef _WIN32 + newPath = SystemTools::Statics->GetActualCaseForPathCached(newPath); + SystemTools::ConvertToUnixSlashes(newPath); +#endif + // Return the reconstructed path. + return newPath; +} + +// compute the relative path from here to there +std::string SystemTools::RelativePath(const std::string& local, + const std::string& remote) +{ + if (!SystemTools::FileIsFullPath(local)) { + return ""; + } + if (!SystemTools::FileIsFullPath(remote)) { + return ""; + } + + std::string l = SystemTools::CollapseFullPath(local); + std::string r = SystemTools::CollapseFullPath(remote); + + // split up both paths into arrays of strings using / as a separator + std::vector localSplit = SystemTools::SplitString(l, '/', true); + std::vector remoteSplit = + SystemTools::SplitString(r, '/', true); + std::vector + commonPath; // store shared parts of path in this array + std::vector finalPath; // store the final relative path here + // count up how many matching directory names there are from the start + unsigned int sameCount = 0; + while (((sameCount <= (localSplit.size() - 1)) && + (sameCount <= (remoteSplit.size() - 1))) && +// for Windows and Apple do a case insensitive string compare +#if defined(_WIN32) || defined(__APPLE__) + SystemTools::Strucmp(localSplit[sameCount].c_str(), + remoteSplit[sameCount].c_str()) == 0 +#else + localSplit[sameCount] == remoteSplit[sameCount] +#endif + ) { + // put the common parts of the path into the commonPath array + commonPath.push_back(localSplit[sameCount]); + // erase the common parts of the path from the original path arrays + localSplit[sameCount] = ""; + remoteSplit[sameCount] = ""; + sameCount++; + } + + // If there is nothing in common at all then just return the full + // path. This is the case only on windows when the paths have + // different drive letters. On unix two full paths always at least + // have the root "/" in common so we will return a relative path + // that passes through the root directory. + if (sameCount == 0) { + return remote; + } + + // for each entry that is not common in the local path + // add a ../ to the finalpath array, this gets us out of the local + // path into the remote dir + for (std::string const& lp : localSplit) { + if (!lp.empty()) { + finalPath.emplace_back("../"); + } + } + // for each entry that is not common in the remote path add it + // to the final path. + for (std::string const& rp : remoteSplit) { + if (!rp.empty()) { + finalPath.push_back(rp); + } + } + std::string relativePath; // result string + // now turn the array of directories into a unix path by puttint / + // between each entry that does not already have one + for (std::string const& fp : finalPath) { + if (!relativePath.empty() && relativePath.back() != '/') { + relativePath += '/'; + } + relativePath += fp; + } + return relativePath; +} + +std::string SystemTools::GetActualCaseForPath(const std::string& p) +{ +#ifdef _WIN32 + return SystemToolsStatic::GetCasePathName(p); +#else + return p; +#endif +} + +const char* SystemTools::SplitPathRootComponent(const std::string& p, + std::string* root) +{ + // Identify the root component. + const char* c = p.c_str(); + if ((c[0] == '/' && c[1] == '/') || (c[0] == '\\' && c[1] == '\\')) { + // Network path. + if (root) { + *root = "//"; + } + c += 2; + } else if (c[0] == '/' || c[0] == '\\') { + // Unix path (or Windows path w/out drive letter). + if (root) { + *root = "/"; + } + c += 1; + } else if (c[0] && c[1] == ':' && (c[2] == '/' || c[2] == '\\')) { + // Windows path. + if (root) { + (*root) = "_:/"; + (*root)[0] = c[0]; + } + c += 3; + } else if (c[0] && c[1] == ':') { + // Path relative to a windows drive working directory. + if (root) { + (*root) = "_:"; + (*root)[0] = c[0]; + } + c += 2; + } else if (c[0] == '~') { + // Home directory. The returned root should always have a + // trailing slash so that appending components as + // c[0]c[1]/c[2]/... works. The remaining path returned should + // skip the first slash if it exists: + // + // "~" : root = "~/" , return "" + // "~/ : root = "~/" , return "" + // "~/x : root = "~/" , return "x" + // "~u" : root = "~u/", return "" + // "~u/" : root = "~u/", return "" + // "~u/x" : root = "~u/", return "x" + size_t n = 1; + while (c[n] && c[n] != '/') { + ++n; + } + if (root) { + root->assign(c, n); + *root += '/'; + } + if (c[n] == '/') { + ++n; + } + c += n; + } else { + // Relative path. + if (root) { + *root = ""; + } + } + + // Return the remaining path. + return c; +} + +void SystemTools::SplitPath(const std::string& p, + std::vector& components, + bool expand_home_dir) +{ + const char* c; + components.clear(); + + // Identify the root component. + { + std::string root; + c = SystemTools::SplitPathRootComponent(p, &root); + + // Expand home directory references if requested. + if (expand_home_dir && !root.empty() && root[0] == '~') { + std::string homedir; + root = root.substr(0, root.size() - 1); + if (root.size() == 1) { +#if defined(_WIN32) && !defined(__CYGWIN__) + if (!SystemTools::GetEnv("USERPROFILE", homedir)) +#endif + SystemTools::GetEnv("HOME", homedir); + } +#ifdef HAVE_GETPWNAM + else if (passwd* pw = getpwnam(root.c_str() + 1)) { + if (pw->pw_dir) { + homedir = pw->pw_dir; + } + } +#endif + if (!homedir.empty() && + (homedir.back() == '/' || homedir.back() == '\\')) { + homedir.resize(homedir.size() - 1); + } + SystemTools::SplitPath(homedir, components); + } else { + components.push_back(root); + } + } + + // Parse the remaining components. + const char* first = c; + const char* last = first; + for (; *last; ++last) { + if (*last == '/' || *last == '\\') { + // End of a component. Save it. + components.push_back(std::string(first, last)); + first = last + 1; + } + } + + // Save the last component unless there were no components. + if (last != c) { + components.push_back(std::string(first, last)); + } +} + +std::string SystemTools::JoinPath(const std::vector& components) +{ + return SystemTools::JoinPath(components.begin(), components.end()); +} + +std::string SystemTools::JoinPath( + std::vector::const_iterator first, + std::vector::const_iterator last) +{ + // Construct result in a single string. + std::string result; + size_t len = 0; + for (std::vector::const_iterator i = first; i != last; ++i) { + len += 1 + i->size(); + } + result.reserve(len); + + // The first two components do not add a slash. + if (first != last) { + result.append(*first++); + } + if (first != last) { + result.append(*first++); + } + + // All remaining components are always separated with a slash. + while (first != last) { + result.push_back('/'); + result.append((*first++)); + } + + // Return the concatenated result. + return result; +} + +bool SystemTools::ComparePath(const std::string& c1, const std::string& c2) +{ +#if defined(_WIN32) || defined(__APPLE__) +# ifdef _MSC_VER + return _stricmp(c1.c_str(), c2.c_str()) == 0; +# elif defined(__APPLE__) || defined(__GNUC__) + return strcasecmp(c1.c_str(), c2.c_str()) == 0; +# else + return SystemTools::Strucmp(c1.c_str(), c2.c_str()) == 0; +# endif +#else + return c1 == c2; +#endif +} + +bool SystemTools::Split(const std::string& str, + std::vector& lines, char separator) +{ + std::string data(str); + std::string::size_type lpos = 0; + while (lpos < data.length()) { + std::string::size_type rpos = data.find_first_of(separator, lpos); + if (rpos == std::string::npos) { + // String ends at end of string without a separator. + lines.push_back(data.substr(lpos)); + return false; + } else { + // String ends in a separator, remove the character. + lines.push_back(data.substr(lpos, rpos - lpos)); + } + lpos = rpos + 1; + } + return true; +} + +bool SystemTools::Split(const std::string& str, + std::vector& lines) +{ + std::string data(str); + std::string::size_type lpos = 0; + while (lpos < data.length()) { + std::string::size_type rpos = data.find_first_of('\n', lpos); + if (rpos == std::string::npos) { + // Line ends at end of string without a newline. + lines.push_back(data.substr(lpos)); + return false; + } + if ((rpos > lpos) && (data[rpos - 1] == '\r')) { + // Line ends in a "\r\n" pair, remove both characters. + lines.push_back(data.substr(lpos, (rpos - 1) - lpos)); + } else { + // Line ends in a "\n", remove the character. + lines.push_back(data.substr(lpos, rpos - lpos)); + } + lpos = rpos + 1; + } + return true; +} + +/** + * Return path of a full filename (no trailing slashes). + * Warning: returned path is converted to Unix slashes format. + */ +std::string SystemTools::GetFilenamePath(const std::string& filename) +{ + std::string fn = filename; + SystemTools::ConvertToUnixSlashes(fn); + + std::string::size_type slash_pos = fn.rfind("/"); + if (slash_pos != std::string::npos) { + std::string ret = fn.substr(0, slash_pos); + if (ret.size() == 2 && ret[1] == ':') { + return ret + '/'; + } + if (ret.empty()) { + return "/"; + } + return ret; + } else { + return ""; + } +} + +/** + * Return file name of a full filename (i.e. file name without path). + */ +std::string SystemTools::GetFilenameName(const std::string& filename) +{ +#if defined(_WIN32) || defined(KWSYS_SYSTEMTOOLS_SUPPORT_WINDOWS_SLASHES) + const char* separators = "/\\"; +#else + char separators = '/'; +#endif + std::string::size_type slash_pos = filename.find_last_of(separators); + if (slash_pos != std::string::npos) { + return filename.substr(slash_pos + 1); + } else { + return filename; + } +} + +/** + * Return file extension of a full filename (dot included). + * Warning: this is the longest extension (for example: .tar.gz) + */ +std::string SystemTools::GetFilenameExtension(const std::string& filename) +{ + std::string name = SystemTools::GetFilenameName(filename); + std::string::size_type dot_pos = name.find('.'); + if (dot_pos != std::string::npos) { + return name.substr(dot_pos); + } else { + return ""; + } +} + +/** + * Return file extension of a full filename (dot included). + * Warning: this is the shortest extension (for example: .gz of .tar.gz) + */ +std::string SystemTools::GetFilenameLastExtension(const std::string& filename) +{ + std::string name = SystemTools::GetFilenameName(filename); + std::string::size_type dot_pos = name.rfind('.'); + if (dot_pos != std::string::npos) { + return name.substr(dot_pos); + } else { + return ""; + } +} + +/** + * Return file name without extension of a full filename (i.e. without path). + * Warning: it considers the longest extension (for example: .tar.gz) + */ +std::string SystemTools::GetFilenameWithoutExtension( + const std::string& filename) +{ + std::string name = SystemTools::GetFilenameName(filename); + std::string::size_type dot_pos = name.find('.'); + if (dot_pos != std::string::npos) { + return name.substr(0, dot_pos); + } else { + return name; + } +} + +/** + * Return file name without extension of a full filename (i.e. without path). + * Warning: it considers the last extension (for example: removes .gz + * from .tar.gz) + */ +std::string SystemTools::GetFilenameWithoutLastExtension( + const std::string& filename) +{ + std::string name = SystemTools::GetFilenameName(filename); + std::string::size_type dot_pos = name.rfind('.'); + if (dot_pos != std::string::npos) { + return name.substr(0, dot_pos); + } else { + return name; + } +} + +bool SystemTools::FileHasSignature(const char* filename, const char* signature, + long offset) +{ + if (!filename || !signature) { + return false; + } + + FILE* fp = Fopen(filename, "rb"); + if (!fp) { + return false; + } + + fseek(fp, offset, SEEK_SET); + + bool res = false; + size_t signature_len = strlen(signature); + char* buffer = new char[signature_len]; + + if (fread(buffer, 1, signature_len, fp) == signature_len) { + res = (!strncmp(buffer, signature, signature_len) ? true : false); + } + + delete[] buffer; + + fclose(fp); + return res; +} + +SystemTools::FileTypeEnum SystemTools::DetectFileType(const char* filename, + unsigned long length, + double percent_bin) +{ + if (!filename || percent_bin < 0) { + return SystemTools::FileTypeUnknown; + } + + if (SystemTools::FileIsDirectory(filename)) { + return SystemTools::FileTypeUnknown; + } + + FILE* fp = Fopen(filename, "rb"); + if (!fp) { + return SystemTools::FileTypeUnknown; + } + + // Allocate buffer and read bytes + + unsigned char* buffer = new unsigned char[length]; + size_t read_length = fread(buffer, 1, length, fp); + fclose(fp); + if (read_length == 0) { + delete[] buffer; + return SystemTools::FileTypeUnknown; + } + + // Loop over contents and count + + size_t text_count = 0; + + const unsigned char* ptr = buffer; + const unsigned char* buffer_end = buffer + read_length; + + while (ptr != buffer_end) { + if ((*ptr >= 0x20 && *ptr <= 0x7F) || *ptr == '\n' || *ptr == '\r' || + *ptr == '\t') { + text_count++; + } + ptr++; + } + + delete[] buffer; + + double current_percent_bin = (static_cast(read_length - text_count) / + static_cast(read_length)); + + if (current_percent_bin >= percent_bin) { + return SystemTools::FileTypeBinary; + } + + return SystemTools::FileTypeText; +} + +bool SystemTools::LocateFileInDir(const char* filename, const char* dir, + std::string& filename_found, + int try_filename_dirs) +{ + if (!filename || !dir) { + return false; + } + + // Get the basename of 'filename' + + std::string filename_base = SystemTools::GetFilenameName(filename); + + // Check if 'dir' is really a directory + // If win32 and matches something like C:, accept it as a dir + + std::string real_dir; + if (!SystemTools::FileIsDirectory(dir)) { +#if defined(_WIN32) + size_t dir_len = strlen(dir); + if (dir_len < 2 || dir[dir_len - 1] != ':') { +#endif + real_dir = SystemTools::GetFilenamePath(dir); + dir = real_dir.c_str(); +#if defined(_WIN32) + } +#endif + } + + // Try to find the file in 'dir' + + bool res = false; + if (!filename_base.empty() && dir) { + size_t dir_len = strlen(dir); + int need_slash = + (dir_len && dir[dir_len - 1] != '/' && dir[dir_len - 1] != '\\'); + + std::string temp = dir; + if (need_slash) { + temp += "/"; + } + temp += filename_base; + + if (SystemTools::FileExists(temp)) { + res = true; + filename_found = temp; + } + + // If not found, we can try harder by appending part of the file to + // to the directory to look inside. + // Example: if we were looking for /foo/bar/yo.txt in /d1/d2, then + // try to find yo.txt in /d1/d2/bar, then /d1/d2/foo/bar, etc. + + else if (try_filename_dirs) { + std::string filename_dir(filename); + std::string filename_dir_base; + std::string filename_dir_bases; + do { + filename_dir = SystemTools::GetFilenamePath(filename_dir); + filename_dir_base = SystemTools::GetFilenameName(filename_dir); +#if defined(_WIN32) + if (filename_dir_base.empty() || filename_dir_base.back() == ':') +#else + if (filename_dir_base.empty()) +#endif + { + break; + } + + filename_dir_bases = filename_dir_base + "/" + filename_dir_bases; + + temp = dir; + if (need_slash) { + temp += "/"; + } + temp += filename_dir_bases; + + res = SystemTools::LocateFileInDir(filename_base.c_str(), temp.c_str(), + filename_found, 0); + + } while (!res && !filename_dir_base.empty()); + } + } + + return res; +} + +bool SystemTools::FileIsFullPath(const std::string& in_name) +{ + return SystemToolsStatic::FileIsFullPath(in_name.c_str(), in_name.size()); +} + +bool SystemTools::FileIsFullPath(const char* in_name) +{ + return SystemToolsStatic::FileIsFullPath( + in_name, in_name[0] ? (in_name[1] ? 2 : 1) : 0); +} + +bool SystemToolsStatic::FileIsFullPath(const char* in_name, size_t len) +{ +#if defined(_WIN32) || defined(__CYGWIN__) + // On Windows, the name must be at least two characters long. + if (len < 2) { + return false; + } + if (in_name[1] == ':') { + return true; + } + if (in_name[0] == '\\') { + return true; + } +#else + // On UNIX, the name must be at least one character long. + if (len < 1) { + return false; + } +#endif +#if !defined(_WIN32) + if (in_name[0] == '~') { + return true; + } +#endif + // On UNIX, the name must begin in a '/'. + // On Windows, if the name begins in a '/', then it is a full + // network path. + if (in_name[0] == '/') { + return true; + } + return false; +} + +bool SystemTools::GetShortPath(const std::string& path, std::string& shortPath) +{ +#if defined(_WIN32) && !defined(__CYGWIN__) + std::string tempPath = path; // create a buffer + + // if the path passed in has quotes around it, first remove the quotes + if (!path.empty() && path[0] == '"' && path.back() == '"') { + tempPath = path.substr(1, path.length() - 2); + } + + std::wstring wtempPath = Encoding::ToWide(tempPath); + DWORD ret = GetShortPathNameW(wtempPath.c_str(), nullptr, 0); + std::vector buffer(ret); + if (ret != 0) { + ret = GetShortPathNameW(wtempPath.c_str(), &buffer[0], + static_cast(buffer.size())); + } + + if (ret == 0) { + return false; + } else { + shortPath = Encoding::ToNarrow(&buffer[0]); + return true; + } +#else + shortPath = path; + return true; +#endif +} + +std::string SystemTools::GetCurrentDateTime(const char* format) +{ + char buf[1024]; + time_t t; + time(&t); + strftime(buf, sizeof(buf), format, localtime(&t)); + return std::string(buf); +} + +std::string SystemTools::MakeCidentifier(const std::string& s) +{ + std::string str(s); + if (str.find_first_of("0123456789") == 0) { + str = "_" + str; + } + + std::string permited_chars("_" + "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "0123456789"); + std::string::size_type pos = 0; + while ((pos = str.find_first_not_of(permited_chars, pos)) != + std::string::npos) { + str[pos] = '_'; + } + return str; +} + +// Convenience function around std::getline which removes a trailing carriage +// return and can truncate the buffer as needed. Returns true +// if any data were read before the end-of-file was reached. +bool SystemTools::GetLineFromStream(std::istream& is, std::string& line, + bool* has_newline /* = 0 */, + long sizeLimit /* = -1 */) +{ + // Start with an empty line. + line = ""; + + // Early short circuit return if stream is no good. Just return + // false and the empty line. (Probably means caller tried to + // create a file stream with a non-existent file name...) + // + if (!is) { + if (has_newline) { + *has_newline = false; + } + return false; + } + + std::getline(is, line); + bool haveData = !line.empty() || !is.eof(); + if (!line.empty()) { + // Avoid storing a carriage return character. + if (line.back() == '\r') { + line.resize(line.size() - 1); + } + + // if we read too much then truncate the buffer + if (sizeLimit >= 0 && line.size() >= static_cast(sizeLimit)) { + line.resize(sizeLimit); + } + } + + // Return the results. + if (has_newline) { + *has_newline = !is.eof(); + } + return haveData; +} + +int SystemTools::GetTerminalWidth() +{ + int width = -1; +#ifdef HAVE_TTY_INFO + struct winsize ws; + std::string columns; /* Unix98 environment variable */ + if (ioctl(1, TIOCGWINSZ, &ws) != -1 && ws.ws_col > 0 && ws.ws_row > 0) { + width = ws.ws_col; + } + if (!isatty(STDOUT_FILENO)) { + width = -1; + } + if (SystemTools::GetEnv("COLUMNS", columns) && !columns.empty()) { + long t; + char* endptr; + t = strtol(columns.c_str(), &endptr, 0); + if (endptr && !*endptr && (t > 0) && (t < 1000)) { + width = static_cast(t); + } + } + if (width < 9) { + width = -1; + } +#endif + return width; +} + +bool SystemTools::GetPermissions(const char* file, mode_t& mode) +{ + if (!file) { + return false; + } + return SystemTools::GetPermissions(std::string(file), mode); +} + +bool SystemTools::GetPermissions(const std::string& file, mode_t& mode) +{ +#if defined(_WIN32) + DWORD attr = + GetFileAttributesW(Encoding::ToWindowsExtendedPath(file).c_str()); + if (attr == INVALID_FILE_ATTRIBUTES) { + return false; + } + if ((attr & FILE_ATTRIBUTE_READONLY) != 0) { + mode = (_S_IREAD | (_S_IREAD >> 3) | (_S_IREAD >> 6)); + } else { + mode = (_S_IWRITE | (_S_IWRITE >> 3) | (_S_IWRITE >> 6)) | + (_S_IREAD | (_S_IREAD >> 3) | (_S_IREAD >> 6)); + } + if ((attr & FILE_ATTRIBUTE_DIRECTORY) != 0) { + mode |= S_IFDIR | (_S_IEXEC | (_S_IEXEC >> 3) | (_S_IEXEC >> 6)); + } else { + mode |= S_IFREG; + } + size_t dotPos = file.rfind('.'); + const char* ext = dotPos == std::string::npos ? 0 : (file.c_str() + dotPos); + if (ext && + (Strucmp(ext, ".exe") == 0 || Strucmp(ext, ".com") == 0 || + Strucmp(ext, ".cmd") == 0 || Strucmp(ext, ".bat") == 0)) { + mode |= (_S_IEXEC | (_S_IEXEC >> 3) | (_S_IEXEC >> 6)); + } +#else + struct stat st; + if (stat(file.c_str(), &st) < 0) { + return false; + } + mode = st.st_mode; +#endif + return true; +} + +bool SystemTools::SetPermissions(const char* file, mode_t mode, + bool honor_umask) +{ + if (!file) { + return false; + } + return SystemTools::SetPermissions(std::string(file), mode, honor_umask); +} + +bool SystemTools::SetPermissions(const std::string& file, mode_t mode, + bool honor_umask) +{ + if (!SystemTools::PathExists(file)) { + return false; + } + if (honor_umask) { + mode_t currentMask = umask(0); + umask(currentMask); + mode &= ~currentMask; + } +#ifdef _WIN32 + if (_wchmod(Encoding::ToWindowsExtendedPath(file).c_str(), mode) < 0) +#else + if (chmod(file.c_str(), mode) < 0) +#endif + { + return false; + } + + return true; +} + +std::string SystemTools::GetParentDirectory(const std::string& fileOrDir) +{ + return SystemTools::GetFilenamePath(fileOrDir); +} + +bool SystemTools::IsSubDirectory(const std::string& cSubdir, + const std::string& cDir) +{ + if (cDir.empty()) { + return false; + } + std::string subdir = cSubdir; + std::string dir = cDir; + SystemTools::ConvertToUnixSlashes(subdir); + SystemTools::ConvertToUnixSlashes(dir); + if (subdir.size() <= dir.size() || dir.empty()) { + return false; + } + bool isRootPath = dir.back() == '/'; // like "/" or "C:/" + size_t expectedSlashPosition = isRootPath ? dir.size() - 1u : dir.size(); + if (subdir[expectedSlashPosition] != '/') { + return false; + } + std::string s = subdir.substr(0, dir.size()); + return SystemTools::ComparePath(s, dir); +} + +void SystemTools::Delay(unsigned int msec) +{ +#ifdef _WIN32 + Sleep(msec); +#else + // The sleep function gives 1 second resolution and the usleep + // function gives 1e-6 second resolution but on some platforms has a + // maximum sleep time of 1 second. This could be re-implemented to + // use select with masked signals or pselect to mask signals + // atomically. If select is given empty sets and zero as the max + // file descriptor but a non-zero timeout it can be used to block + // for a precise amount of time. + if (msec >= 1000) { + sleep(msec / 1000); + usleep((msec % 1000) * 1000); + } else { + usleep(msec * 1000); + } +#endif +} + +std::string SystemTools::GetOperatingSystemNameAndVersion() +{ + std::string res; + +#ifdef _WIN32 + char buffer[256]; + + OSVERSIONINFOEXA osvi; + BOOL bOsVersionInfoEx; + + ZeroMemory(&osvi, sizeof(osvi)); + osvi.dwOSVersionInfoSize = sizeof(osvi); + +# ifdef KWSYS_WINDOWS_DEPRECATED_GetVersionEx +# pragma warning(push) +# ifdef __INTEL_COMPILER +# pragma warning(disable : 1478) +# elif defined __clang__ +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wdeprecated-declarations" +# else +# pragma warning(disable : 4996) +# endif +# endif + bOsVersionInfoEx = GetVersionExA((OSVERSIONINFOA*)&osvi); + if (!bOsVersionInfoEx) { + return 0; + } +# ifdef KWSYS_WINDOWS_DEPRECATED_GetVersionEx +# ifdef __clang__ +# pragma clang diagnostic pop +# else +# pragma warning(pop) +# endif +# endif + + switch (osvi.dwPlatformId) { + // Test for the Windows NT product family. + + case VER_PLATFORM_WIN32_NT: + + // Test for the specific product family. + if (osvi.dwMajorVersion == 10 && osvi.dwMinorVersion == 0) { + if (osvi.wProductType == VER_NT_WORKSTATION) { + res += "Microsoft Windows 10"; + } else { + res += "Microsoft Windows Server 2016 family"; + } + } + + if (osvi.dwMajorVersion == 6 && osvi.dwMinorVersion == 3) { + if (osvi.wProductType == VER_NT_WORKSTATION) { + res += "Microsoft Windows 8.1"; + } else { + res += "Microsoft Windows Server 2012 R2 family"; + } + } + + if (osvi.dwMajorVersion == 6 && osvi.dwMinorVersion == 2) { + if (osvi.wProductType == VER_NT_WORKSTATION) { + res += "Microsoft Windows 8"; + } else { + res += "Microsoft Windows Server 2012 family"; + } + } + + if (osvi.dwMajorVersion == 6 && osvi.dwMinorVersion == 1) { + if (osvi.wProductType == VER_NT_WORKSTATION) { + res += "Microsoft Windows 7"; + } else { + res += "Microsoft Windows Server 2008 R2 family"; + } + } + + if (osvi.dwMajorVersion == 6 && osvi.dwMinorVersion == 0) { + if (osvi.wProductType == VER_NT_WORKSTATION) { + res += "Microsoft Windows Vista"; + } else { + res += "Microsoft Windows Server 2008 family"; + } + } + + if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 2) { + res += "Microsoft Windows Server 2003 family"; + } + + if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 1) { + res += "Microsoft Windows XP"; + } + + if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 0) { + res += "Microsoft Windows 2000"; + } + + if (osvi.dwMajorVersion <= 4) { + res += "Microsoft Windows NT"; + } + + // Test for specific product on Windows NT 4.0 SP6 and later. + + if (bOsVersionInfoEx) { + // Test for the workstation type. + + if (osvi.wProductType == VER_NT_WORKSTATION) { + if (osvi.dwMajorVersion == 4) { + res += " Workstation 4.0"; + } else if (osvi.dwMajorVersion == 5) { + if (osvi.wSuiteMask & VER_SUITE_PERSONAL) { + res += " Home Edition"; + } else { + res += " Professional"; + } + } + } + + // Test for the server type. + + else if (osvi.wProductType == VER_NT_SERVER) { + if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 2) { + if (osvi.wSuiteMask & VER_SUITE_DATACENTER) { + res += " Datacenter Edition"; + } else if (osvi.wSuiteMask & VER_SUITE_ENTERPRISE) { + res += " Enterprise Edition"; + } else if (osvi.wSuiteMask == VER_SUITE_BLADE) { + res += " Web Edition"; + } else { + res += " Standard Edition"; + } + } + + else if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 0) { + if (osvi.wSuiteMask & VER_SUITE_DATACENTER) { + res += " Datacenter Server"; + } else if (osvi.wSuiteMask & VER_SUITE_ENTERPRISE) { + res += " Advanced Server"; + } else { + res += " Server"; + } + } + + else if (osvi.dwMajorVersion <= 4) // Windows NT 4.0 + { + if (osvi.wSuiteMask & VER_SUITE_ENTERPRISE) { + res += " Server 4.0, Enterprise Edition"; + } else { + res += " Server 4.0"; + } + } + } + } + + // Test for specific product on Windows NT 4.0 SP5 and earlier + + else { + HKEY hKey; +# define BUFSIZE 80 + wchar_t szProductType[BUFSIZE]; + DWORD dwBufLen = BUFSIZE; + LONG lRet; + + lRet = + RegOpenKeyExW(HKEY_LOCAL_MACHINE, + L"SYSTEM\\CurrentControlSet\\Control\\ProductOptions", + 0, KEY_QUERY_VALUE, &hKey); + if (lRet != ERROR_SUCCESS) { + return 0; + } + + lRet = RegQueryValueExW(hKey, L"ProductType", nullptr, nullptr, + (LPBYTE)szProductType, &dwBufLen); + + if ((lRet != ERROR_SUCCESS) || (dwBufLen > BUFSIZE)) { + return 0; + } + + RegCloseKey(hKey); + + if (lstrcmpiW(L"WINNT", szProductType) == 0) { + res += " Workstation"; + } + if (lstrcmpiW(L"LANMANNT", szProductType) == 0) { + res += " Server"; + } + if (lstrcmpiW(L"SERVERNT", szProductType) == 0) { + res += " Advanced Server"; + } + + res += " "; + sprintf(buffer, "%ld", osvi.dwMajorVersion); + res += buffer; + res += "."; + sprintf(buffer, "%ld", osvi.dwMinorVersion); + res += buffer; + } + + // Display service pack (if any) and build number. + + if (osvi.dwMajorVersion == 4 && + lstrcmpiA(osvi.szCSDVersion, "Service Pack 6") == 0) { + HKEY hKey; + LONG lRet; + + // Test for SP6 versus SP6a. + + lRet = RegOpenKeyExW( + HKEY_LOCAL_MACHINE, + L"SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Hotfix\\Q246009", + 0, KEY_QUERY_VALUE, &hKey); + + if (lRet == ERROR_SUCCESS) { + res += " Service Pack 6a (Build "; + sprintf(buffer, "%ld", osvi.dwBuildNumber & 0xFFFF); + res += buffer; + res += ")"; + } else // Windows NT 4.0 prior to SP6a + { + res += " "; + res += osvi.szCSDVersion; + res += " (Build "; + sprintf(buffer, "%ld", osvi.dwBuildNumber & 0xFFFF); + res += buffer; + res += ")"; + } + + RegCloseKey(hKey); + } else // Windows NT 3.51 and earlier or Windows 2000 and later + { + res += " "; + res += osvi.szCSDVersion; + res += " (Build "; + sprintf(buffer, "%ld", osvi.dwBuildNumber & 0xFFFF); + res += buffer; + res += ")"; + } + + break; + + // Test for the Windows 95 product family. + + case VER_PLATFORM_WIN32_WINDOWS: + + if (osvi.dwMajorVersion == 4 && osvi.dwMinorVersion == 0) { + res += "Microsoft Windows 95"; + if (osvi.szCSDVersion[1] == 'C' || osvi.szCSDVersion[1] == 'B') { + res += " OSR2"; + } + } + + if (osvi.dwMajorVersion == 4 && osvi.dwMinorVersion == 10) { + res += "Microsoft Windows 98"; + if (osvi.szCSDVersion[1] == 'A') { + res += " SE"; + } + } + + if (osvi.dwMajorVersion == 4 && osvi.dwMinorVersion == 90) { + res += "Microsoft Windows Millennium Edition"; + } + break; + + case VER_PLATFORM_WIN32s: + + res += "Microsoft Win32s"; + break; + } +#endif + + return res; +} + +bool SystemTools::ParseURLProtocol(const std::string& URL, + std::string& protocol, + std::string& dataglom) +{ + // match 0 entire url + // match 1 protocol + // match 2 dataglom following protocol:// + kwsys::RegularExpression urlRe(VTK_URL_PROTOCOL_REGEX); + + if (!urlRe.find(URL)) + return false; + + protocol = urlRe.match(1); + dataglom = urlRe.match(2); + + return true; +} + +bool SystemTools::ParseURL(const std::string& URL, std::string& protocol, + std::string& username, std::string& password, + std::string& hostname, std::string& dataport, + std::string& database) +{ + kwsys::RegularExpression urlRe(VTK_URL_REGEX); + if (!urlRe.find(URL)) + return false; + + // match 0 URL + // match 1 protocol + // match 2 mangled user + // match 3 username + // match 4 mangled password + // match 5 password + // match 6 hostname + // match 7 mangled port + // match 8 dataport + // match 9 database name + + protocol = urlRe.match(1); + username = urlRe.match(3); + password = urlRe.match(5); + hostname = urlRe.match(6); + dataport = urlRe.match(8); + database = urlRe.match(9); + + return true; +} + +// These must NOT be initialized. Default initialization to zero is +// necessary. +static unsigned int SystemToolsManagerCount; +SystemToolsStatic* SystemTools::Statics; + +// SystemToolsManager manages the SystemTools singleton. +// SystemToolsManager should be included in any translation unit +// that will use SystemTools or that implements the singleton +// pattern. It makes sure that the SystemTools singleton is created +// before and destroyed after all other singletons in CMake. + +SystemToolsManager::SystemToolsManager() +{ + if (++SystemToolsManagerCount == 1) { + SystemTools::ClassInitialize(); + } +} + +SystemToolsManager::~SystemToolsManager() +{ + if (--SystemToolsManagerCount == 0) { + SystemTools::ClassFinalize(); + } +} + +#if defined(__VMS) +// On VMS we configure the run time C library to be more UNIX like. +// http://h71000.www7.hp.com/doc/732final/5763/5763pro_004.html +extern "C" int decc$feature_get_index(char* name); +extern "C" int decc$feature_set_value(int index, int mode, int value); +static int SetVMSFeature(char* name, int value) +{ + int i; + errno = 0; + i = decc$feature_get_index(name); + return i >= 0 && (decc$feature_set_value(i, 1, value) >= 0 || errno == 0); +} +#endif + +void SystemTools::ClassInitialize() +{ +#ifdef __VMS + SetVMSFeature("DECC$FILENAME_UNIX_ONLY", 1); +#endif + + // Create statics singleton instance + SystemTools::Statics = new SystemToolsStatic; + +#if KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP +// Add some special translation paths for unix. These are not added +// for windows because drive letters need to be maintained. Also, +// there are not sym-links and mount points on windows anyway. +# if !defined(_WIN32) || defined(__CYGWIN__) + // The tmp path is frequently a logical path so always keep it: + SystemTools::AddKeepPath("/tmp/"); + + // If the current working directory is a logical path then keep the + // logical name. + std::string pwd_str; + if (SystemTools::GetEnv("PWD", pwd_str)) { + char buf[2048]; + if (const char* cwd = Getcwd(buf, 2048)) { + // The current working directory may be a logical path. Find + // the shortest logical path that still produces the correct + // physical path. + std::string cwd_changed; + std::string pwd_changed; + + // Test progressively shorter logical-to-physical mappings. + std::string cwd_str = cwd; + std::string pwd_path; + Realpath(pwd_str, pwd_path); + while (cwd_str == pwd_path && cwd_str != pwd_str) { + // The current pair of paths is a working logical mapping. + cwd_changed = cwd_str; + pwd_changed = pwd_str; + + // Strip off one directory level and see if the logical + // mapping still works. + pwd_str = SystemTools::GetFilenamePath(pwd_str); + cwd_str = SystemTools::GetFilenamePath(cwd_str); + Realpath(pwd_str, pwd_path); + } + + // Add the translation to keep the logical path name. + if (!cwd_changed.empty() && !pwd_changed.empty()) { + SystemTools::AddTranslationPath(cwd_changed, pwd_changed); + } + } + } +# endif +#endif +} + +void SystemTools::ClassFinalize() +{ + delete SystemTools::Statics; +} + +} // namespace KWSYS_NAMESPACE + +#if defined(_MSC_VER) && defined(_DEBUG) +# include +# include +# include +namespace KWSYS_NAMESPACE { + +static int SystemToolsDebugReport(int, char* message, int*) +{ + fprintf(stderr, "%s", message); + fflush(stderr); + return 1; // no further reporting required +} + +void SystemTools::EnableMSVCDebugHook() +{ + if (SystemTools::HasEnv("DART_TEST_FROM_DART") || + SystemTools::HasEnv("DASHBOARD_TEST_FROM_CTEST")) { + _CrtSetReportHook(SystemToolsDebugReport); + } +} + +} // namespace KWSYS_NAMESPACE +#else +namespace KWSYS_NAMESPACE { +void SystemTools::EnableMSVCDebugHook() +{ +} +} // namespace KWSYS_NAMESPACE +#endif diff --git a/test/API/driver/kwsys/SystemTools.hxx.in b/test/API/driver/kwsys/SystemTools.hxx.in new file mode 100644 index 00000000000..c4ab9d4f36a --- /dev/null +++ b/test/API/driver/kwsys/SystemTools.hxx.in @@ -0,0 +1,981 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifndef @KWSYS_NAMESPACE@_SystemTools_hxx +#define @KWSYS_NAMESPACE@_SystemTools_hxx + +#include <@KWSYS_NAMESPACE@/Configure.hxx> + +#include +#include +#include +#include + +#include +// include sys/stat.h after sys/types.h +#include + +#if !defined(_WIN32) || defined(__CYGWIN__) +# include // For access permissions for use with access() +#endif + +// Required for va_list +#include +// Required for FILE* +#include +#if !defined(va_list) +// Some compilers move va_list into the std namespace and there is no way to +// tell that this has been done. Playing with things being included before or +// after stdarg.h does not solve things because we do not have control over +// what the user does. This hack solves this problem by moving va_list to our +// own namespace that is local for kwsys. +namespace std { +} // Required for platforms that do not have std namespace +namespace @KWSYS_NAMESPACE@_VA_LIST { +using namespace std; +typedef va_list hack_va_list; +} +namespace @KWSYS_NAMESPACE@ { +typedef @KWSYS_NAMESPACE@_VA_LIST::hack_va_list va_list; +} +#endif // va_list + +namespace @KWSYS_NAMESPACE@ { + +class SystemToolsStatic; + +/** \class SystemToolsManager + * \brief Use to make sure SystemTools is initialized before it is used + * and is the last static object destroyed + */ +class @KWSYS_NAMESPACE@_EXPORT SystemToolsManager +{ +public: + SystemToolsManager(); + ~SystemToolsManager(); + + SystemToolsManager(const SystemToolsManager&) = delete; + SystemToolsManager& operator=(const SystemToolsManager&) = delete; +}; + +// This instance will show up in any translation unit that uses +// SystemTools. It will make sure SystemTools is initialized +// before it is used and is the last static object destroyed. +static SystemToolsManager SystemToolsManagerInstance; + +// Flags for use with TestFileAccess. Use a typedef in case any operating +// system in the future needs a special type. These are flags that may be +// combined using the | operator. +typedef int TestFilePermissions; +#if defined(_WIN32) && !defined(__CYGWIN__) +// On Windows (VC and Borland), no system header defines these constants... +static const TestFilePermissions TEST_FILE_OK = 0; +static const TestFilePermissions TEST_FILE_READ = 4; +static const TestFilePermissions TEST_FILE_WRITE = 2; +static const TestFilePermissions TEST_FILE_EXECUTE = 1; +#else +// Standard POSIX constants +static const TestFilePermissions TEST_FILE_OK = F_OK; +static const TestFilePermissions TEST_FILE_READ = R_OK; +static const TestFilePermissions TEST_FILE_WRITE = W_OK; +static const TestFilePermissions TEST_FILE_EXECUTE = X_OK; +#endif + +/** \class SystemTools + * \brief A collection of useful platform-independent system functions. + */ +class @KWSYS_NAMESPACE@_EXPORT SystemTools +{ +public: + /** ----------------------------------------------------------------- + * String Manipulation Routines + * ----------------------------------------------------------------- + */ + + /** + * Replace symbols in str that are not valid in C identifiers as + * defined by the 1999 standard, ie. anything except [A-Za-z0-9_]. + * They are replaced with `_' and if the first character is a digit + * then an underscore is prepended. Note that this can produce + * identifiers that the standard reserves (_[A-Z].* and __.*). + */ + static std::string MakeCidentifier(const std::string& s); + + static std::string MakeCindentifier(const std::string& s) + { + return MakeCidentifier(s); + } + + /** + * Replace replace all occurrences of the string in the source string. + */ + static void ReplaceString(std::string& source, const char* replace, + const char* with); + static void ReplaceString(std::string& source, const std::string& replace, + const std::string& with); + + /** + * Return a capitalized string (i.e the first letter is uppercased, + * all other are lowercased). + */ + static std::string Capitalized(const std::string&); + + /** + * Return a 'capitalized words' string (i.e the first letter of each word + * is uppercased all other are left untouched though). + */ + static std::string CapitalizedWords(const std::string&); + + /** + * Return a 'uncapitalized words' string (i.e the first letter of each word + * is lowercased all other are left untouched though). + */ + static std::string UnCapitalizedWords(const std::string&); + + /** + * Return a lower case string + */ + static std::string LowerCase(const std::string&); + + /** + * Return a lower case string + */ + static std::string UpperCase(const std::string&); + + /** + * Count char in string + */ + static size_t CountChar(const char* str, char c); + + /** + * Remove some characters from a string. + * Return a pointer to the new resulting string (allocated with 'new') + */ + static char* RemoveChars(const char* str, const char* toremove); + + /** + * Remove remove all but 0->9, A->F characters from a string. + * Return a pointer to the new resulting string (allocated with 'new') + */ + static char* RemoveCharsButUpperHex(const char* str); + + /** + * Replace some characters by another character in a string (in-place) + * Return a pointer to string + */ + static char* ReplaceChars(char* str, const char* toreplace, + char replacement); + + /** + * Returns true if str1 starts (respectively ends) with str2 + */ + static bool StringStartsWith(const char* str1, const char* str2); + static bool StringStartsWith(const std::string& str1, const char* str2); + static bool StringEndsWith(const char* str1, const char* str2); + static bool StringEndsWith(const std::string& str1, const char* str2); + + /** + * Returns a pointer to the last occurrence of str2 in str1 + */ + static const char* FindLastString(const char* str1, const char* str2); + + /** + * Make a duplicate of the string similar to the strdup C function + * but use new to create the 'new' string, so one can use + * 'delete' to remove it. Returns 0 if the input is empty. + */ + static char* DuplicateString(const char* str); + + /** + * Return the string cropped to a given length by removing chars in the + * center of the string and replacing them with an ellipsis (...) + */ + static std::string CropString(const std::string&, size_t max_len); + + /** split a path by separator into an array of strings, default is /. + If isPath is true then the string is treated like a path and if + s starts with a / then the first element of the returned array will + be /, so /foo/bar will be [/, foo, bar] + */ + static std::vector SplitString(const std::string& s, + char separator = '/', + bool isPath = false); + /** + * Perform a case-independent string comparison + */ + static int Strucmp(const char* s1, const char* s2); + + /** + * Split a string on its newlines into multiple lines + * Return false only if the last line stored had no newline + */ + static bool Split(const std::string& s, std::vector& l); + static bool Split(const std::string& s, std::vector& l, + char separator); + + /** + * Return string with space added between capitalized words + * (i.e. EatMyShorts becomes Eat My Shorts ) + * (note that IEatShorts becomes IEat Shorts) + */ + static std::string AddSpaceBetweenCapitalizedWords(const std::string&); + + /** + * Append two or more strings and produce new one. + * Programmer must 'delete []' the resulting string, which was allocated + * with 'new'. + * Return 0 if inputs are empty or there was an error + */ + static char* AppendStrings(const char* str1, const char* str2); + static char* AppendStrings(const char* str1, const char* str2, + const char* str3); + + /** + * Estimate the length of the string that will be produced + * from printing the given format string and arguments. The + * returned length will always be at least as large as the string + * that will result from printing. + * WARNING: since va_arg is called to iterate of the argument list, + * you will not be able to use this 'ap' anymore from the beginning. + * It's up to you to call va_end though. + */ + static int EstimateFormatLength(const char* format, va_list ap); + + /** + * Escape specific characters in 'str'. + */ + static std::string EscapeChars(const char* str, const char* chars_to_escape, + char escape_char = '\\'); + + /** ----------------------------------------------------------------- + * Filename Manipulation Routines + * ----------------------------------------------------------------- + */ + + /** + * Replace Windows file system slashes with Unix-style slashes. + */ + static void ConvertToUnixSlashes(std::string& path); + +#ifdef _WIN32 + /** Calls Encoding::ToWindowsExtendedPath. */ + static std::wstring ConvertToWindowsExtendedPath(const std::string&); +#endif + + /** + * For windows this calls ConvertToWindowsOutputPath and for unix + * it calls ConvertToUnixOutputPath + */ + static std::string ConvertToOutputPath(const std::string&); + + /** + * Convert the path to a string that can be used in a unix makefile. + * double slashes are removed, and spaces are escaped. + */ + static std::string ConvertToUnixOutputPath(const std::string&); + + /** + * Convert the path to string that can be used in a windows project or + * makefile. Double slashes are removed if they are not at the start of + * the string, the slashes are converted to windows style backslashes, and + * if there are spaces in the string it is double quoted. + */ + static std::string ConvertToWindowsOutputPath(const std::string&); + + /** + * Return true if a path with the given name exists in the current directory. + */ + static bool PathExists(const std::string& path); + + /** + * Return true if a file exists in the current directory. + * If isFile = true, then make sure the file is a file and + * not a directory. If isFile = false, then return true + * if it is a file or a directory. Note that the file will + * also be checked for read access. (Currently, this check + * for read access is only done on POSIX systems.) + */ + static bool FileExists(const char* filename, bool isFile); + static bool FileExists(const std::string& filename, bool isFile); + static bool FileExists(const char* filename); + static bool FileExists(const std::string& filename); + + /** + * Test if a file exists and can be accessed with the requested + * permissions. Symbolic links are followed. Returns true if + * the access test was successful. + * + * On POSIX systems (including Cygwin), this maps to the access + * function. On Windows systems, all existing files are + * considered readable, and writable files are considered to + * have the read-only file attribute cleared. + */ + static bool TestFileAccess(const char* filename, + TestFilePermissions permissions); + static bool TestFileAccess(const std::string& filename, + TestFilePermissions permissions); +/** + * Cross platform wrapper for stat struct + */ +#if defined(_WIN32) && !defined(__CYGWIN__) +# if defined(__BORLANDC__) + typedef struct stati64 Stat_t; +# else + typedef struct _stat64 Stat_t; +# endif +#else + typedef struct stat Stat_t; +#endif + + /** + * Cross platform wrapper for stat system call + * + * On Windows this may not work for paths longer than 250 characters + * due to limitations of the underlying '_wstat64' call. + */ + static int Stat(const char* path, Stat_t* buf); + static int Stat(const std::string& path, Stat_t* buf); + +/** + * Converts Cygwin path to Win32 path. Uses dictionary container for + * caching and calls to cygwin_conv_to_win32_path from Cygwin dll + * for actual translation. Returns true on success, else false. + */ +#ifdef __CYGWIN__ + static bool PathCygwinToWin32(const char* path, char* win32_path); +#endif + + /** + * Return file length + */ + static unsigned long FileLength(const std::string& filename); + + /** + Change the modification time or create a file + */ + static bool Touch(const std::string& filename, bool create); + + /** + * Compare file modification times. + * Return true for successful comparison and false for error. + * When true is returned, result has -1, 0, +1 for + * f1 older, same, or newer than f2. + */ + static bool FileTimeCompare(const std::string& f1, const std::string& f2, + int* result); + + /** + * Get the file extension (including ".") needed for an executable + * on the current platform ("" for unix, ".exe" for Windows). + */ + static const char* GetExecutableExtension(); + + /** + * Given a path on a Windows machine, return the actual case of + * the path as it exists on disk. Path components that do not + * exist on disk are returned unchanged. Relative paths are always + * returned unchanged. Drive letters are always made upper case. + * This does nothing on non-Windows systems but return the path. + */ + static std::string GetActualCaseForPath(const std::string& path); + + /** + * Given the path to a program executable, get the directory part of + * the path with the file stripped off. If there is no directory + * part, the empty string is returned. + */ + static std::string GetProgramPath(const std::string&); + static bool SplitProgramPath(const std::string& in_name, std::string& dir, + std::string& file, bool errorReport = true); + + /** + * Given argv[0] for a unix program find the full path to a running + * executable. argv0 can be null for windows WinMain programs + * in this case GetModuleFileName will be used to find the path + * to the running executable. If argv0 is not a full path, + * then this will try to find the full path. If the path is not + * found false is returned, if found true is returned. An error + * message of the attempted paths is stored in errorMsg. + * exeName is the name of the executable. + * buildDir is a possibly null path to the build directory. + * installPrefix is a possibly null pointer to the install directory. + */ + static bool FindProgramPath(const char* argv0, std::string& pathOut, + std::string& errorMsg, + const char* exeName = nullptr, + const char* buildDir = nullptr, + const char* installPrefix = nullptr); + + /** + * Given a path to a file or directory, convert it to a full path. + * This collapses away relative paths relative to the cwd argument + * (which defaults to the current working directory). The full path + * is returned. + */ + static std::string CollapseFullPath(const std::string& in_relative); + static std::string CollapseFullPath(const std::string& in_relative, + const char* in_base); + static std::string CollapseFullPath(const std::string& in_relative, + const std::string& in_base); + + /** + * Get the real path for a given path, removing all symlinks. In + * the event of an error (non-existent path, permissions issue, + * etc.) the original path is returned if errorMessage pointer is + * nullptr. Otherwise empty string is returned and errorMessage + * contains error description. + */ + static std::string GetRealPath(const std::string& path, + std::string* errorMessage = nullptr); + + /** + * Split a path name into its root component and the rest of the + * path. The root component is one of the following: + * "/" = UNIX full path + * "c:/" = Windows full path (can be any drive letter) + * "c:" = Windows drive-letter relative path (can be any drive letter) + * "//" = Network path + * "~/" = Home path for current user + * "~u/" = Home path for user 'u' + * "" = Relative path + * + * A pointer to the rest of the path after the root component is + * returned. The root component is stored in the "root" string if + * given. + */ + static const char* SplitPathRootComponent(const std::string& p, + std::string* root = nullptr); + + /** + * Split a path name into its basic components. The first component + * always exists and is the root returned by SplitPathRootComponent. + * The remaining components form the path. If there is a trailing + * slash then the last component is the empty string. The + * components can be recombined as "c[0]c[1]/c[2]/.../c[n]" to + * produce the original path. Home directory references are + * automatically expanded if expand_home_dir is true and this + * platform supports them. + * + * This does *not* normalize the input path. All components are + * preserved, including empty ones. Typically callers should use + * this only on paths that have already been normalized. + */ + static void SplitPath(const std::string& p, + std::vector& components, + bool expand_home_dir = true); + + /** + * Join components of a path name into a single string. See + * SplitPath for the format of the components. + * + * This does *not* normalize the input path. All components are + * preserved, including empty ones. Typically callers should use + * this only on paths that have already been normalized. + */ + static std::string JoinPath(const std::vector& components); + static std::string JoinPath(std::vector::const_iterator first, + std::vector::const_iterator last); + + /** + * Compare a path or components of a path. + */ + static bool ComparePath(const std::string& c1, const std::string& c2); + + /** + * Return path of a full filename (no trailing slashes) + */ + static std::string GetFilenamePath(const std::string&); + + /** + * Return file name of a full filename (i.e. file name without path) + */ + static std::string GetFilenameName(const std::string&); + + /** + * Return longest file extension of a full filename (dot included) + */ + static std::string GetFilenameExtension(const std::string&); + + /** + * Return shortest file extension of a full filename (dot included) + */ + static std::string GetFilenameLastExtension(const std::string& filename); + + /** + * Return file name without extension of a full filename + */ + static std::string GetFilenameWithoutExtension(const std::string&); + + /** + * Return file name without its last (shortest) extension + */ + static std::string GetFilenameWithoutLastExtension(const std::string&); + + /** + * Return whether the path represents a full path (not relative) + */ + static bool FileIsFullPath(const std::string&); + static bool FileIsFullPath(const char*); + + /** + * For windows return the short path for the given path, + * Unix just a pass through + */ + static bool GetShortPath(const std::string& path, std::string& result); + + /** + * Read line from file. Make sure to read a full line and truncates it if + * requested via sizeLimit. Returns true if any data were read before the + * end-of-file was reached. If the has_newline argument is specified, it will + * be true when the line read had a newline character. + */ + static bool GetLineFromStream(std::istream& istr, std::string& line, + bool* has_newline = nullptr, + long sizeLimit = -1); + + /** + * Get the parent directory of the directory or file + */ + static std::string GetParentDirectory(const std::string& fileOrDir); + + /** + * Check if the given file or directory is in subdirectory of dir + */ + static bool IsSubDirectory(const std::string& fileOrDir, + const std::string& dir); + + /** ----------------------------------------------------------------- + * File Manipulation Routines + * ----------------------------------------------------------------- + */ + + /** + * Open a file considering unicode. + */ + static FILE* Fopen(const std::string& file, const char* mode); + +/** + * Visual C++ does not define mode_t (note that Borland does, however). + */ +#if defined(_MSC_VER) + typedef unsigned short mode_t; +#endif + + /** + * Make a new directory if it is not there. This function + * can make a full path even if none of the directories existed + * prior to calling this function. + */ + static bool MakeDirectory(const char* path, const mode_t* mode = nullptr); + static bool MakeDirectory(const std::string& path, + const mode_t* mode = nullptr); + + /** + * Copy the source file to the destination file only + * if the two files differ. + */ + static bool CopyFileIfDifferent(const std::string& source, + const std::string& destination); + + /** + * Compare the contents of two files. Return true if different + */ + static bool FilesDiffer(const std::string& source, + const std::string& destination); + + /** + * Compare the contents of two files, ignoring line ending differences. + * Return true if different + */ + static bool TextFilesDiffer(const std::string& path1, + const std::string& path2); + + /** + * Return true if the two files are the same file + */ + static bool SameFile(const std::string& file1, const std::string& file2); + + /** + * Copy a file. + */ + static bool CopyFileAlways(const std::string& source, + const std::string& destination); + + /** + * Copy a file. If the "always" argument is true the file is always + * copied. If it is false, the file is copied only if it is new or + * has changed. + */ + static bool CopyAFile(const std::string& source, + const std::string& destination, bool always = true); + + /** + * Copy content directory to another directory with all files and + * subdirectories. If the "always" argument is true all files are + * always copied. If it is false, only files that have changed or + * are new are copied. + */ + static bool CopyADirectory(const std::string& source, + const std::string& destination, + bool always = true); + + /** + * Remove a file + */ + static bool RemoveFile(const std::string& source); + + /** + * Remove a directory + */ + static bool RemoveADirectory(const std::string& source); + + /** + * Get the maximum full file path length + */ + static size_t GetMaximumFilePathLength(); + + /** + * Find a file in the system PATH, with optional extra paths + */ + static std::string FindFile( + const std::string& name, + const std::vector& path = std::vector(), + bool no_system_path = false); + + /** + * Find a directory in the system PATH, with optional extra paths + */ + static std::string FindDirectory( + const std::string& name, + const std::vector& path = std::vector(), + bool no_system_path = false); + + /** + * Find an executable in the system PATH, with optional extra paths + */ + static std::string FindProgram( + const char* name, + const std::vector& path = std::vector(), + bool no_system_path = false); + static std::string FindProgram( + const std::string& name, + const std::vector& path = std::vector(), + bool no_system_path = false); + static std::string FindProgram( + const std::vector& names, + const std::vector& path = std::vector(), + bool no_system_path = false); + + /** + * Find a library in the system PATH, with optional extra paths + */ + static std::string FindLibrary(const std::string& name, + const std::vector& path); + + /** + * Return true if the file is a directory + */ + static bool FileIsDirectory(const std::string& name); + + /** + * Return true if the file is a symlink + */ + static bool FileIsSymlink(const std::string& name); + + /** + * Return true if the file is a FIFO + */ + static bool FileIsFIFO(const std::string& name); + + /** + * Return true if the file has a given signature (first set of bytes) + */ + static bool FileHasSignature(const char* filename, const char* signature, + long offset = 0); + + /** + * Attempt to detect and return the type of a file. + * Up to 'length' bytes are read from the file, if more than 'percent_bin' % + * of the bytes are non-textual elements, the file is considered binary, + * otherwise textual. Textual elements are bytes in the ASCII [0x20, 0x7E] + * range, but also \\n, \\r, \\t. + * The algorithm is simplistic, and should probably check for usual file + * extensions, 'magic' signature, unicode, etc. + */ + enum FileTypeEnum + { + FileTypeUnknown, + FileTypeBinary, + FileTypeText + }; + static SystemTools::FileTypeEnum DetectFileType(const char* filename, + unsigned long length = 256, + double percent_bin = 0.05); + + /** + * Create a symbolic link if the platform supports it. Returns whether + * creation succeeded. + */ + static bool CreateSymlink(const std::string& origName, + const std::string& newName); + + /** + * Read the contents of a symbolic link. Returns whether reading + * succeeded. + */ + static bool ReadSymlink(const std::string& newName, std::string& origName); + + /** + * Try to locate the file 'filename' in the directory 'dir'. + * If 'filename' is a fully qualified filename, the basename of the file is + * used to check for its existence in 'dir'. + * If 'dir' is not a directory, GetFilenamePath() is called on 'dir' to + * get its directory first (thus, you can pass a filename as 'dir', as + * a convenience). + * 'filename_found' is assigned the fully qualified name/path of the file + * if it is found (not touched otherwise). + * If 'try_filename_dirs' is true, try to find the file using the + * components of its path, i.e. if we are looking for c:/foo/bar/bill.txt, + * first look for bill.txt in 'dir', then in 'dir'/bar, then in 'dir'/foo/bar + * etc. + * Return true if the file was found, false otherwise. + */ + static bool LocateFileInDir(const char* filename, const char* dir, + std::string& filename_found, + int try_filename_dirs = 0); + + /** compute the relative path from local to remote. local must + be a directory. remote can be a file or a directory. + Both remote and local must be full paths. Basically, if + you are in directory local and you want to access the file in remote + what is the relative path to do that. For example: + /a/b/c/d to /a/b/c1/d1 -> ../../c1/d1 + from /usr/src to /usr/src/test/blah/foo.cpp -> test/blah/foo.cpp + */ + static std::string RelativePath(const std::string& local, + const std::string& remote); + + /** + * Return file's modified time + */ + static long int ModifiedTime(const std::string& filename); + + /** + * Return file's creation time (Win32: works only for NTFS, not FAT) + */ + static long int CreationTime(const std::string& filename); + + /** + * Get and set permissions of the file. If honor_umask is set, the umask + * is queried and applied to the given permissions. Returns false if + * failure. + * + * WARNING: A non-thread-safe method is currently used to get the umask + * if a honor_umask parameter is set to true. + */ + static bool GetPermissions(const char* file, mode_t& mode); + static bool GetPermissions(const std::string& file, mode_t& mode); + static bool SetPermissions(const char* file, mode_t mode, + bool honor_umask = false); + static bool SetPermissions(const std::string& file, mode_t mode, + bool honor_umask = false); + + /** ----------------------------------------------------------------- + * Time Manipulation Routines + * ----------------------------------------------------------------- + */ + + /** Get current time in seconds since Posix Epoch (Jan 1, 1970). */ + static double GetTime(); + + /** + * Get current date/time + */ + static std::string GetCurrentDateTime(const char* format); + + /** ----------------------------------------------------------------- + * Registry Manipulation Routines + * ----------------------------------------------------------------- + */ + + /** + * Specify access to the 32-bit or 64-bit application view of + * registry values. The default is to match the currently running + * binary type. + */ + enum KeyWOW64 + { + KeyWOW64_Default, + KeyWOW64_32, + KeyWOW64_64 + }; + + /** + * Get a list of subkeys. + */ + static bool GetRegistrySubKeys(const std::string& key, + std::vector& subkeys, + KeyWOW64 view = KeyWOW64_Default); + + /** + * Read a registry value + */ + static bool ReadRegistryValue(const std::string& key, std::string& value, + KeyWOW64 view = KeyWOW64_Default); + + /** + * Write a registry value + */ + static bool WriteRegistryValue(const std::string& key, + const std::string& value, + KeyWOW64 view = KeyWOW64_Default); + + /** + * Delete a registry value + */ + static bool DeleteRegistryValue(const std::string& key, + KeyWOW64 view = KeyWOW64_Default); + + /** ----------------------------------------------------------------- + * Environment Manipulation Routines + * ----------------------------------------------------------------- + */ + + /** + * Add the paths from the environment variable PATH to the + * string vector passed in. If env is set then the value + * of env will be used instead of PATH. + */ + static void GetPath(std::vector& path, + const char* env = nullptr); + + /** + * Read an environment variable + */ + static const char* GetEnv(const char* key); + static const char* GetEnv(const std::string& key); + static bool GetEnv(const char* key, std::string& result); + static bool GetEnv(const std::string& key, std::string& result); + static bool HasEnv(const char* key); + static bool HasEnv(const std::string& key); + + /** Put a string into the environment + of the form var=value */ + static bool PutEnv(const std::string& env); + + /** Remove a string from the environment. + Input is of the form "var" or "var=value" (value is ignored). */ + static bool UnPutEnv(const std::string& env); + + /** + * Get current working directory CWD + */ + static std::string GetCurrentWorkingDirectory(bool collapse = true); + + /** + * Change directory to the directory specified + */ + static int ChangeDirectory(const std::string& dir); + + /** + * Get the result of strerror(errno) + */ + static std::string GetLastSystemError(); + + /** + * When building DEBUG with MSVC, this enables a hook that prevents + * error dialogs from popping up if the program is being run from + * DART. + */ + static void EnableMSVCDebugHook(); + + /** + * Get the width of the terminal window. The code may or may not work, so + * make sure you have some reasonable defaults prepared if the code returns + * some bogus size. + */ + static int GetTerminalWidth(); + +#if @KWSYS_NAMESPACE@_SYSTEMTOOLS_USE_TRANSLATION_MAP + /** + * Add an entry in the path translation table. + */ + static void AddTranslationPath(const std::string& dir, + const std::string& refdir); + + /** + * If dir is different after CollapseFullPath is called, + * Then insert it into the path translation table + */ + static void AddKeepPath(const std::string& dir); + + /** + * Update path by going through the Path Translation table; + */ + static void CheckTranslationPath(std::string& path); +#endif + + /** + * Delay the execution for a specified amount of time specified + * in milliseconds + */ + static void Delay(unsigned int msec); + + /** + * Get the operating system name and version + * This is implemented for Win32 only for the moment + */ + static std::string GetOperatingSystemNameAndVersion(); + + /** ----------------------------------------------------------------- + * URL Manipulation Routines + * ----------------------------------------------------------------- + */ + + /** + * Parse a character string : + * protocol://dataglom + * and fill protocol as appropriate. + * Return false if the URL does not have the required form, true otherwise. + */ + static bool ParseURLProtocol(const std::string& URL, std::string& protocol, + std::string& dataglom); + + /** + * Parse a string (a URL without protocol prefix) with the form: + * protocol://[[username[':'password]'@']hostname[':'dataport]]'/'[datapath] + * and fill protocol, username, password, hostname, dataport, and datapath + * when values are found. + * Return true if the string matches the format; false otherwise. + */ + static bool ParseURL(const std::string& URL, std::string& protocol, + std::string& username, std::string& password, + std::string& hostname, std::string& dataport, + std::string& datapath); + +private: + /** + * Allocate the stl map that serve as the Path Translation table. + */ + static void ClassInitialize(); + + /** + * Deallocate the stl map that serve as the Path Translation table. + */ + static void ClassFinalize(); + + /** + * This method prevents warning on SGI + */ + SystemToolsManager* GetSystemToolsManager() + { + return &SystemToolsManagerInstance; + } + + static SystemToolsStatic* Statics; + friend class SystemToolsStatic; + friend class SystemToolsManager; +}; + +} // namespace @KWSYS_NAMESPACE@ + +#endif diff --git a/test/API/driver/kwsys/Terminal.c b/test/API/driver/kwsys/Terminal.c new file mode 100644 index 00000000000..4dd246148c5 --- /dev/null +++ b/test/API/driver/kwsys/Terminal.c @@ -0,0 +1,414 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" +#include KWSYS_HEADER(Terminal.h) + +/* Work-around CMake dependency scanning limitation. This must + duplicate the above list of headers. */ +#if 0 +# include "Terminal.h.in" +#endif + +/* Configure support for this platform. */ +#if defined(_WIN32) || defined(__CYGWIN__) +# define KWSYS_TERMINAL_SUPPORT_CONSOLE +#endif +#if !defined(_WIN32) +# define KWSYS_TERMINAL_ISATTY_WORKS +#endif + +/* Include needed system APIs. */ + +#include /* va_list */ +#include /* getenv */ +#include /* strcmp */ + +#if defined(KWSYS_TERMINAL_SUPPORT_CONSOLE) +# include /* _get_osfhandle */ +# include /* SetConsoleTextAttribute */ +#endif + +#if defined(KWSYS_TERMINAL_ISATTY_WORKS) +# include /* isatty */ +#else +# include /* fstat */ +#endif + +static int kwsysTerminalStreamIsVT100(FILE* stream, int default_vt100, + int default_tty); +static void kwsysTerminalSetVT100Color(FILE* stream, int color); +#if defined(KWSYS_TERMINAL_SUPPORT_CONSOLE) +static HANDLE kwsysTerminalGetStreamHandle(FILE* stream); +static void kwsysTerminalSetConsoleColor(HANDLE hOut, + CONSOLE_SCREEN_BUFFER_INFO* hOutInfo, + FILE* stream, int color); +#endif + +void kwsysTerminal_cfprintf(int color, FILE* stream, const char* format, ...) +{ + /* Setup the stream with the given color if possible. */ + int pipeIsConsole = 0; + int pipeIsVT100 = 0; + int default_vt100 = color & kwsysTerminal_Color_AssumeVT100; + int default_tty = color & kwsysTerminal_Color_AssumeTTY; +#if defined(KWSYS_TERMINAL_SUPPORT_CONSOLE) + CONSOLE_SCREEN_BUFFER_INFO hOutInfo; + HANDLE hOut = kwsysTerminalGetStreamHandle(stream); + if (GetConsoleScreenBufferInfo(hOut, &hOutInfo)) { + pipeIsConsole = 1; + kwsysTerminalSetConsoleColor(hOut, &hOutInfo, stream, color); + } +#endif + if (!pipeIsConsole && + kwsysTerminalStreamIsVT100(stream, default_vt100, default_tty)) { + pipeIsVT100 = 1; + kwsysTerminalSetVT100Color(stream, color); + } + + /* Format the text into the stream. */ + { + va_list var_args; + va_start(var_args, format); + vfprintf(stream, format, var_args); + va_end(var_args); + } + +/* Restore the normal color state for the stream. */ +#if defined(KWSYS_TERMINAL_SUPPORT_CONSOLE) + if (pipeIsConsole) { + kwsysTerminalSetConsoleColor(hOut, &hOutInfo, stream, + kwsysTerminal_Color_Normal); + } +#endif + if (pipeIsVT100) { + kwsysTerminalSetVT100Color(stream, kwsysTerminal_Color_Normal); + } +} + +/* Detect cases when a stream is definitely not interactive. */ +#if !defined(KWSYS_TERMINAL_ISATTY_WORKS) +static int kwsysTerminalStreamIsNotInteractive(FILE* stream) +{ + /* The given stream is definitely not interactive if it is a regular + file. */ + struct stat stream_stat; + if (fstat(fileno(stream), &stream_stat) == 0) { + if (stream_stat.st_mode & S_IFREG) { + return 1; + } + } + return 0; +} +#endif + +/* List of terminal names known to support VT100 color escape sequences. */ +static const char* kwsysTerminalVT100Names[] = { "Eterm", + "alacritty", + "alacritty-direct", + "ansi", + "color-xterm", + "con132x25", + "con132x30", + "con132x43", + "con132x60", + "con80x25", + "con80x28", + "con80x30", + "con80x43", + "con80x50", + "con80x60", + "cons25", + "console", + "cygwin", + "dtterm", + "eterm-color", + "gnome", + "gnome-256color", + "konsole", + "konsole-256color", + "kterm", + "linux", + "msys", + "linux-c", + "mach-color", + "mlterm", + "putty", + "putty-256color", + "rxvt", + "rxvt-256color", + "rxvt-cygwin", + "rxvt-cygwin-native", + "rxvt-unicode", + "rxvt-unicode-256color", + "screen", + "screen-256color", + "screen-256color-bce", + "screen-bce", + "screen-w", + "screen.linux", + "tmux", + "tmux-256color", + "vt100", + "xterm", + "xterm-16color", + "xterm-256color", + "xterm-88color", + "xterm-color", + "xterm-debian", + "xterm-kitty", + "xterm-termite", + 0 }; + +/* Detect whether a stream is displayed in a VT100-compatible terminal. */ +static int kwsysTerminalStreamIsVT100(FILE* stream, int default_vt100, + int default_tty) +{ + /* Force color according to http://bixense.com/clicolors/ convention. */ + { + const char* clicolor_force = getenv("CLICOLOR_FORCE"); + if (clicolor_force && *clicolor_force && + strcmp(clicolor_force, "0") != 0) { + return 1; + } + } + + /* If running inside emacs the terminal is not VT100. Some emacs + seem to claim the TERM is xterm even though they do not support + VT100 escapes. */ + { + const char* emacs = getenv("EMACS"); + if (emacs && *emacs == 't') { + return 0; + } + } + + /* Check for a valid terminal. */ + if (!default_vt100) { + const char** t = 0; + const char* term = getenv("TERM"); + if (term) { + for (t = kwsysTerminalVT100Names; *t && strcmp(term, *t) != 0; ++t) { + } + } + if (!(t && *t)) { + return 0; + } + } + +#if defined(KWSYS_TERMINAL_ISATTY_WORKS) + /* Make sure the stream is a tty. */ + (void)default_tty; + return isatty(fileno(stream)) ? 1 : 0; +#else + /* Check for cases in which the stream is definitely not a tty. */ + if (kwsysTerminalStreamIsNotInteractive(stream)) { + return 0; + } + + /* Use the provided default for whether this is a tty. */ + return default_tty; +#endif +} + +/* VT100 escape sequence strings. */ +#if defined(__MVS__) +/* if building on z/OS (aka MVS), assume we are using EBCDIC */ +# define ESCAPE_CHAR "\47" +#else +# define ESCAPE_CHAR "\33" +#endif + +#define KWSYS_TERMINAL_VT100_NORMAL ESCAPE_CHAR "[0m" +#define KWSYS_TERMINAL_VT100_BOLD ESCAPE_CHAR "[1m" +#define KWSYS_TERMINAL_VT100_UNDERLINE ESCAPE_CHAR "[4m" +#define KWSYS_TERMINAL_VT100_BLINK ESCAPE_CHAR "[5m" +#define KWSYS_TERMINAL_VT100_INVERSE ESCAPE_CHAR "[7m" +#define KWSYS_TERMINAL_VT100_FOREGROUND_BLACK ESCAPE_CHAR "[30m" +#define KWSYS_TERMINAL_VT100_FOREGROUND_RED ESCAPE_CHAR "[31m" +#define KWSYS_TERMINAL_VT100_FOREGROUND_GREEN ESCAPE_CHAR "[32m" +#define KWSYS_TERMINAL_VT100_FOREGROUND_YELLOW ESCAPE_CHAR "[33m" +#define KWSYS_TERMINAL_VT100_FOREGROUND_BLUE ESCAPE_CHAR "[34m" +#define KWSYS_TERMINAL_VT100_FOREGROUND_MAGENTA ESCAPE_CHAR "[35m" +#define KWSYS_TERMINAL_VT100_FOREGROUND_CYAN ESCAPE_CHAR "[36m" +#define KWSYS_TERMINAL_VT100_FOREGROUND_WHITE ESCAPE_CHAR "[37m" +#define KWSYS_TERMINAL_VT100_BACKGROUND_BLACK ESCAPE_CHAR "[40m" +#define KWSYS_TERMINAL_VT100_BACKGROUND_RED ESCAPE_CHAR "[41m" +#define KWSYS_TERMINAL_VT100_BACKGROUND_GREEN ESCAPE_CHAR "[42m" +#define KWSYS_TERMINAL_VT100_BACKGROUND_YELLOW ESCAPE_CHAR "[43m" +#define KWSYS_TERMINAL_VT100_BACKGROUND_BLUE ESCAPE_CHAR "[44m" +#define KWSYS_TERMINAL_VT100_BACKGROUND_MAGENTA ESCAPE_CHAR "[45m" +#define KWSYS_TERMINAL_VT100_BACKGROUND_CYAN ESCAPE_CHAR "[46m" +#define KWSYS_TERMINAL_VT100_BACKGROUND_WHITE ESCAPE_CHAR "[47m" + +/* Write VT100 escape sequences to the stream for the given color. */ +static void kwsysTerminalSetVT100Color(FILE* stream, int color) +{ + if (color == kwsysTerminal_Color_Normal) { + fprintf(stream, KWSYS_TERMINAL_VT100_NORMAL); + return; + } + + switch (color & kwsysTerminal_Color_ForegroundMask) { + case kwsysTerminal_Color_Normal: + fprintf(stream, KWSYS_TERMINAL_VT100_NORMAL); + break; + case kwsysTerminal_Color_ForegroundBlack: + fprintf(stream, KWSYS_TERMINAL_VT100_FOREGROUND_BLACK); + break; + case kwsysTerminal_Color_ForegroundRed: + fprintf(stream, KWSYS_TERMINAL_VT100_FOREGROUND_RED); + break; + case kwsysTerminal_Color_ForegroundGreen: + fprintf(stream, KWSYS_TERMINAL_VT100_FOREGROUND_GREEN); + break; + case kwsysTerminal_Color_ForegroundYellow: + fprintf(stream, KWSYS_TERMINAL_VT100_FOREGROUND_YELLOW); + break; + case kwsysTerminal_Color_ForegroundBlue: + fprintf(stream, KWSYS_TERMINAL_VT100_FOREGROUND_BLUE); + break; + case kwsysTerminal_Color_ForegroundMagenta: + fprintf(stream, KWSYS_TERMINAL_VT100_FOREGROUND_MAGENTA); + break; + case kwsysTerminal_Color_ForegroundCyan: + fprintf(stream, KWSYS_TERMINAL_VT100_FOREGROUND_CYAN); + break; + case kwsysTerminal_Color_ForegroundWhite: + fprintf(stream, KWSYS_TERMINAL_VT100_FOREGROUND_WHITE); + break; + } + switch (color & kwsysTerminal_Color_BackgroundMask) { + case kwsysTerminal_Color_BackgroundBlack: + fprintf(stream, KWSYS_TERMINAL_VT100_BACKGROUND_BLACK); + break; + case kwsysTerminal_Color_BackgroundRed: + fprintf(stream, KWSYS_TERMINAL_VT100_BACKGROUND_RED); + break; + case kwsysTerminal_Color_BackgroundGreen: + fprintf(stream, KWSYS_TERMINAL_VT100_BACKGROUND_GREEN); + break; + case kwsysTerminal_Color_BackgroundYellow: + fprintf(stream, KWSYS_TERMINAL_VT100_BACKGROUND_YELLOW); + break; + case kwsysTerminal_Color_BackgroundBlue: + fprintf(stream, KWSYS_TERMINAL_VT100_BACKGROUND_BLUE); + break; + case kwsysTerminal_Color_BackgroundMagenta: + fprintf(stream, KWSYS_TERMINAL_VT100_BACKGROUND_MAGENTA); + break; + case kwsysTerminal_Color_BackgroundCyan: + fprintf(stream, KWSYS_TERMINAL_VT100_BACKGROUND_CYAN); + break; + case kwsysTerminal_Color_BackgroundWhite: + fprintf(stream, KWSYS_TERMINAL_VT100_BACKGROUND_WHITE); + break; + } + if (color & kwsysTerminal_Color_ForegroundBold) { + fprintf(stream, KWSYS_TERMINAL_VT100_BOLD); + } +} + +#if defined(KWSYS_TERMINAL_SUPPORT_CONSOLE) + +# define KWSYS_TERMINAL_MASK_FOREGROUND \ + (FOREGROUND_BLUE | FOREGROUND_GREEN | FOREGROUND_RED | \ + FOREGROUND_INTENSITY) +# define KWSYS_TERMINAL_MASK_BACKGROUND \ + (BACKGROUND_BLUE | BACKGROUND_GREEN | BACKGROUND_RED | \ + BACKGROUND_INTENSITY) + +/* Get the Windows handle for a FILE stream. */ +static HANDLE kwsysTerminalGetStreamHandle(FILE* stream) +{ + /* Get the C-library file descriptor from the stream. */ + int fd = fileno(stream); + +# if defined(__CYGWIN__) + /* Cygwin seems to have an extra pipe level. If the file descriptor + corresponds to stdout or stderr then obtain the matching windows + handle directly. */ + if (fd == fileno(stdout)) { + return GetStdHandle(STD_OUTPUT_HANDLE); + } else if (fd == fileno(stderr)) { + return GetStdHandle(STD_ERROR_HANDLE); + } +# endif + + /* Get the underlying Windows handle for the descriptor. */ + return (HANDLE)_get_osfhandle(fd); +} + +/* Set color attributes in a Windows console. */ +static void kwsysTerminalSetConsoleColor(HANDLE hOut, + CONSOLE_SCREEN_BUFFER_INFO* hOutInfo, + FILE* stream, int color) +{ + WORD attributes = 0; + switch (color & kwsysTerminal_Color_ForegroundMask) { + case kwsysTerminal_Color_Normal: + attributes |= hOutInfo->wAttributes & KWSYS_TERMINAL_MASK_FOREGROUND; + break; + case kwsysTerminal_Color_ForegroundBlack: + attributes |= 0; + break; + case kwsysTerminal_Color_ForegroundRed: + attributes |= FOREGROUND_RED; + break; + case kwsysTerminal_Color_ForegroundGreen: + attributes |= FOREGROUND_GREEN; + break; + case kwsysTerminal_Color_ForegroundYellow: + attributes |= FOREGROUND_RED | FOREGROUND_GREEN; + break; + case kwsysTerminal_Color_ForegroundBlue: + attributes |= FOREGROUND_BLUE; + break; + case kwsysTerminal_Color_ForegroundMagenta: + attributes |= FOREGROUND_RED | FOREGROUND_BLUE; + break; + case kwsysTerminal_Color_ForegroundCyan: + attributes |= FOREGROUND_BLUE | FOREGROUND_GREEN; + break; + case kwsysTerminal_Color_ForegroundWhite: + attributes |= FOREGROUND_BLUE | FOREGROUND_GREEN | FOREGROUND_RED; + break; + } + switch (color & kwsysTerminal_Color_BackgroundMask) { + case kwsysTerminal_Color_Normal: + attributes |= hOutInfo->wAttributes & KWSYS_TERMINAL_MASK_BACKGROUND; + break; + case kwsysTerminal_Color_BackgroundBlack: + attributes |= 0; + break; + case kwsysTerminal_Color_BackgroundRed: + attributes |= BACKGROUND_RED; + break; + case kwsysTerminal_Color_BackgroundGreen: + attributes |= BACKGROUND_GREEN; + break; + case kwsysTerminal_Color_BackgroundYellow: + attributes |= BACKGROUND_RED | BACKGROUND_GREEN; + break; + case kwsysTerminal_Color_BackgroundBlue: + attributes |= BACKGROUND_BLUE; + break; + case kwsysTerminal_Color_BackgroundMagenta: + attributes |= BACKGROUND_RED | BACKGROUND_BLUE; + break; + case kwsysTerminal_Color_BackgroundCyan: + attributes |= BACKGROUND_BLUE | BACKGROUND_GREEN; + break; + case kwsysTerminal_Color_BackgroundWhite: + attributes |= BACKGROUND_BLUE | BACKGROUND_GREEN | BACKGROUND_RED; + break; + } + if (color & kwsysTerminal_Color_ForegroundBold) { + attributes |= FOREGROUND_INTENSITY; + } + if (color & kwsysTerminal_Color_BackgroundBold) { + attributes |= BACKGROUND_INTENSITY; + } + fflush(stream); + SetConsoleTextAttribute(hOut, attributes); +} +#endif diff --git a/test/API/driver/kwsys/Terminal.h.in b/test/API/driver/kwsys/Terminal.h.in new file mode 100644 index 00000000000..1a2c7452fa1 --- /dev/null +++ b/test/API/driver/kwsys/Terminal.h.in @@ -0,0 +1,170 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifndef @KWSYS_NAMESPACE@_Terminal_h +#define @KWSYS_NAMESPACE@_Terminal_h + +#include <@KWSYS_NAMESPACE@/Configure.h> + +#include /* For file stream type FILE. */ + +/* Redefine all public interface symbol names to be in the proper + namespace. These macros are used internally to kwsys only, and are + not visible to user code. Use kwsysHeaderDump.pl to reproduce + these macros after making changes to the interface. */ +#if !defined(KWSYS_NAMESPACE) +# define kwsys_ns(x) @KWSYS_NAMESPACE@##x +# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT +#endif +#if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS +# define kwsysTerminal_cfprintf kwsys_ns(Terminal_cfprintf) +# define kwsysTerminal_Color_e kwsys_ns(Terminal_Color_e) +# define kwsysTerminal_Color_Normal kwsys_ns(Terminal_Color_Normal) +# define kwsysTerminal_Color_ForegroundBlack \ + kwsys_ns(Terminal_Color_ForegroundBlack) +# define kwsysTerminal_Color_ForegroundRed \ + kwsys_ns(Terminal_Color_ForegroundRed) +# define kwsysTerminal_Color_ForegroundGreen \ + kwsys_ns(Terminal_Color_ForegroundGreen) +# define kwsysTerminal_Color_ForegroundYellow \ + kwsys_ns(Terminal_Color_ForegroundYellow) +# define kwsysTerminal_Color_ForegroundBlue \ + kwsys_ns(Terminal_Color_ForegroundBlue) +# define kwsysTerminal_Color_ForegroundMagenta \ + kwsys_ns(Terminal_Color_ForegroundMagenta) +# define kwsysTerminal_Color_ForegroundCyan \ + kwsys_ns(Terminal_Color_ForegroundCyan) +# define kwsysTerminal_Color_ForegroundWhite \ + kwsys_ns(Terminal_Color_ForegroundWhite) +# define kwsysTerminal_Color_ForegroundMask \ + kwsys_ns(Terminal_Color_ForegroundMask) +# define kwsysTerminal_Color_BackgroundBlack \ + kwsys_ns(Terminal_Color_BackgroundBlack) +# define kwsysTerminal_Color_BackgroundRed \ + kwsys_ns(Terminal_Color_BackgroundRed) +# define kwsysTerminal_Color_BackgroundGreen \ + kwsys_ns(Terminal_Color_BackgroundGreen) +# define kwsysTerminal_Color_BackgroundYellow \ + kwsys_ns(Terminal_Color_BackgroundYellow) +# define kwsysTerminal_Color_BackgroundBlue \ + kwsys_ns(Terminal_Color_BackgroundBlue) +# define kwsysTerminal_Color_BackgroundMagenta \ + kwsys_ns(Terminal_Color_BackgroundMagenta) +# define kwsysTerminal_Color_BackgroundCyan \ + kwsys_ns(Terminal_Color_BackgroundCyan) +# define kwsysTerminal_Color_BackgroundWhite \ + kwsys_ns(Terminal_Color_BackgroundWhite) +# define kwsysTerminal_Color_BackgroundMask \ + kwsys_ns(Terminal_Color_BackgroundMask) +# define kwsysTerminal_Color_ForegroundBold \ + kwsys_ns(Terminal_Color_ForegroundBold) +# define kwsysTerminal_Color_BackgroundBold \ + kwsys_ns(Terminal_Color_BackgroundBold) +# define kwsysTerminal_Color_AssumeTTY kwsys_ns(Terminal_Color_AssumeTTY) +# define kwsysTerminal_Color_AssumeVT100 kwsys_ns(Terminal_Color_AssumeVT100) +# define kwsysTerminal_Color_AttributeMask \ + kwsys_ns(Terminal_Color_AttributeMask) +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +/** + * Write colored and formatted text to a stream. Color is used only + * for streams supporting it. The color specification is constructed + * by bitwise-OR-ing enumeration values. At most one foreground and + * one background value may be given. + * + * Whether the a stream supports color is usually automatically + * detected, but with two exceptions: + * + * - When the stream is displayed in a terminal supporting VT100 + * color but using an intermediate pipe for communication the + * detection of a tty fails. (This typically occurs for a shell + * running in an rxvt terminal in MSYS.) If the caller knows this + * to be the case, the attribute Color_AssumeTTY may be included in + * the color specification. + * + * - When the stream is displayed in a terminal whose TERM + * environment variable is not set or is set to a value that is not + * known to support VT100 colors. If the caller knows this to be + * the case, the attribute Color_AssumeVT100 may be included in the + * color specification. + */ +kwsysEXPORT void kwsysTerminal_cfprintf(int color, FILE* stream, + const char* format, ...); +enum kwsysTerminal_Color_e +{ + /* Normal Text */ + kwsysTerminal_Color_Normal = 0, + + /* Foreground Color */ + kwsysTerminal_Color_ForegroundBlack = 0x1, + kwsysTerminal_Color_ForegroundRed = 0x2, + kwsysTerminal_Color_ForegroundGreen = 0x3, + kwsysTerminal_Color_ForegroundYellow = 0x4, + kwsysTerminal_Color_ForegroundBlue = 0x5, + kwsysTerminal_Color_ForegroundMagenta = 0x6, + kwsysTerminal_Color_ForegroundCyan = 0x7, + kwsysTerminal_Color_ForegroundWhite = 0x8, + kwsysTerminal_Color_ForegroundMask = 0xF, + + /* Background Color */ + kwsysTerminal_Color_BackgroundBlack = 0x10, + kwsysTerminal_Color_BackgroundRed = 0x20, + kwsysTerminal_Color_BackgroundGreen = 0x30, + kwsysTerminal_Color_BackgroundYellow = 0x40, + kwsysTerminal_Color_BackgroundBlue = 0x50, + kwsysTerminal_Color_BackgroundMagenta = 0x60, + kwsysTerminal_Color_BackgroundCyan = 0x70, + kwsysTerminal_Color_BackgroundWhite = 0x80, + kwsysTerminal_Color_BackgroundMask = 0xF0, + + /* Attributes */ + kwsysTerminal_Color_ForegroundBold = 0x100, + kwsysTerminal_Color_BackgroundBold = 0x200, + kwsysTerminal_Color_AssumeTTY = 0x400, + kwsysTerminal_Color_AssumeVT100 = 0x800, + kwsysTerminal_Color_AttributeMask = 0xF00 +}; + +#if defined(__cplusplus) +} /* extern "C" */ +#endif + +/* If we are building a kwsys .c or .cxx file, let it use these macros. + Otherwise, undefine them to keep the namespace clean. */ +#if !defined(KWSYS_NAMESPACE) +# undef kwsys_ns +# undef kwsysEXPORT +# if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS +# undef kwsysTerminal_cfprintf +# undef kwsysTerminal_Color_e +# undef kwsysTerminal_Color_Normal +# undef kwsysTerminal_Color_ForegroundBlack +# undef kwsysTerminal_Color_ForegroundRed +# undef kwsysTerminal_Color_ForegroundGreen +# undef kwsysTerminal_Color_ForegroundYellow +# undef kwsysTerminal_Color_ForegroundBlue +# undef kwsysTerminal_Color_ForegroundMagenta +# undef kwsysTerminal_Color_ForegroundCyan +# undef kwsysTerminal_Color_ForegroundWhite +# undef kwsysTerminal_Color_ForegroundMask +# undef kwsysTerminal_Color_BackgroundBlack +# undef kwsysTerminal_Color_BackgroundRed +# undef kwsysTerminal_Color_BackgroundGreen +# undef kwsysTerminal_Color_BackgroundYellow +# undef kwsysTerminal_Color_BackgroundBlue +# undef kwsysTerminal_Color_BackgroundMagenta +# undef kwsysTerminal_Color_BackgroundCyan +# undef kwsysTerminal_Color_BackgroundWhite +# undef kwsysTerminal_Color_BackgroundMask +# undef kwsysTerminal_Color_ForegroundBold +# undef kwsysTerminal_Color_BackgroundBold +# undef kwsysTerminal_Color_AssumeTTY +# undef kwsysTerminal_Color_AssumeVT100 +# undef kwsysTerminal_Color_AttributeMask +# endif +#endif + +#endif diff --git a/test/API/driver/kwsys/clang-format.bash b/test/API/driver/kwsys/clang-format.bash new file mode 100644 index 00000000000..b0282abc8aa --- /dev/null +++ b/test/API/driver/kwsys/clang-format.bash @@ -0,0 +1,128 @@ +#!/usr/bin/env bash +#============================================================================= +# Copyright 2015-2017 Kitware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#============================================================================= + +usage='usage: clang-format.bash [] [--] + + --help Print usage plus more detailed help. + + --clang-format Use given clang-format tool. + + --amend Filter files changed by HEAD. + --cached Filter files locally staged for commit. + --modified Filter files locally modified from HEAD. + --tracked Filter files tracked by Git. +' + +help="$usage"' +Example to format locally modified files: + + ./clang-format.bash --modified + +Example to format locally modified files staged for commit: + + ./clang-format.bash --cached + +Example to format files modified by the most recent commit: + + ./clang-format.bash --amend + +Example to format all files: + + ./clang-format.bash --tracked + +Example to format the current topic: + + git filter-branch \ + --tree-filter "./clang-format.bash --tracked" \ + master.. +' + +die() { + echo "$@" 1>&2; exit 1 +} + +#----------------------------------------------------------------------------- + +# Parse command-line arguments. +clang_format='' +mode='' +while test "$#" != 0; do + case "$1" in + --amend) mode="amend" ;; + --cached) mode="cached" ;; + --clang-format) shift; clang_format="$1" ;; + --help) echo "$help"; exit 0 ;; + --modified) mode="modified" ;; + --tracked) mode="tracked" ;; + --) shift ; break ;; + -*) die "$usage" ;; + *) break ;; + esac + shift +done +test "$#" = 0 || die "$usage" + +# Find a default tool. +tools=' + clang-format-6.0 + clang-format +' +if test "x$clang_format" = "x"; then + for tool in $tools; do + if type -p "$tool" >/dev/null; then + clang_format="$tool" + break + fi + done +fi + +# Verify that we have a tool. +if ! type -p "$clang_format" >/dev/null; then + echo "Unable to locate a 'clang-format' tool." + exit 1 +fi + +if ! "$clang_format" --version | grep 'clang-format version 6\.0' >/dev/null 2>/dev/null; then + echo "clang-format version 6.0 is required (exactly)" + exit 1 +fi + +# Select listing mode. +case "$mode" in + '') echo "$usage"; exit 0 ;; + amend) git_ls='git diff-tree --diff-filter=AM --name-only HEAD -r --no-commit-id' ;; + cached) git_ls='git diff-index --diff-filter=AM --name-only HEAD --cached' ;; + modified) git_ls='git diff-index --diff-filter=AM --name-only HEAD' ;; + tracked) git_ls='git ls-files' ;; + *) die "invalid mode: $mode" ;; +esac + +# List files as selected above. +list_files() { + $git_ls | + + # Select sources with our attribute. + git check-attr --stdin format.clang-format | + sed -n '/: format\.clang-format: set$/ {s/:[^:]*:[^:]*$//p}' +} + +# Transform configured sources to protect @SYMBOLS@. +list_files | xargs -d '\n' sed -i 's/@\(KWSYS_[A-Z0-9_]\+\)@/x\1x/g' +# Update sources in-place. +list_files | xargs -d '\n' "$clang_format" -i +# Transform configured sources to restore @SYMBOLS@. +list_files | xargs -d '\n' sed -i 's/x\(KWSYS_[A-Z0-9_]\+\)x/@\1@/g' diff --git a/test/API/driver/kwsys/hash_fun.hxx.in b/test/API/driver/kwsys/hash_fun.hxx.in new file mode 100644 index 00000000000..8626c2aa2a1 --- /dev/null +++ b/test/API/driver/kwsys/hash_fun.hxx.in @@ -0,0 +1,166 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +/* + * Copyright (c) 1996 + * Silicon Graphics Computer Systems, Inc. + * + * Permission to use, copy, modify, distribute and sell this software + * and its documentation for any purpose is hereby granted without fee, + * provided that the above copyright notice appear in all copies and + * that both that copyright notice and this permission notice appear + * in supporting documentation. Silicon Graphics makes no + * representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied warranty. + * + * + * Copyright (c) 1994 + * Hewlett-Packard Company + * + * Permission to use, copy, modify, distribute and sell this software + * and its documentation for any purpose is hereby granted without fee, + * provided that the above copyright notice appear in all copies and + * that both that copyright notice and this permission notice appear + * in supporting documentation. Hewlett-Packard Company makes no + * representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied warranty. + * + */ +#ifndef @KWSYS_NAMESPACE@_hash_fun_hxx +#define @KWSYS_NAMESPACE@_hash_fun_hxx + +#include <@KWSYS_NAMESPACE@/Configure.hxx> + +#include // size_t +#include + +namespace @KWSYS_NAMESPACE@ { + +template +struct hash +{ +}; + +inline size_t _stl_hash_string(const char* __s) +{ + unsigned long __h = 0; + for (; *__s; ++__s) + __h = 5 * __h + *__s; + + return size_t(__h); +} + +template <> +struct hash +{ + size_t operator()(const char* __s) const { return _stl_hash_string(__s); } +}; + +template <> +struct hash +{ + size_t operator()(const char* __s) const { return _stl_hash_string(__s); } +}; + +template <> +struct hash +{ + size_t operator()(const std::string& __s) const + { + return _stl_hash_string(__s.c_str()); + } +}; + +#if !defined(__BORLANDC__) +template <> +struct hash +{ + size_t operator()(const std::string& __s) const + { + return _stl_hash_string(__s.c_str()); + } +}; +#endif + +template <> +struct hash +{ + size_t operator()(char __x) const { return __x; } +}; + +template <> +struct hash +{ + size_t operator()(unsigned char __x) const { return __x; } +}; + +template <> +struct hash +{ + size_t operator()(unsigned char __x) const { return __x; } +}; + +template <> +struct hash +{ + size_t operator()(short __x) const { return __x; } +}; + +template <> +struct hash +{ + size_t operator()(unsigned short __x) const { return __x; } +}; + +template <> +struct hash +{ + size_t operator()(int __x) const { return __x; } +}; + +template <> +struct hash +{ + size_t operator()(unsigned int __x) const { return __x; } +}; + +template <> +struct hash +{ + size_t operator()(long __x) const { return __x; } +}; + +template <> +struct hash +{ + size_t operator()(unsigned long __x) const { return __x; } +}; + +// use long long or __int64 +#if @KWSYS_USE_LONG_LONG@ +template <> +struct hash +{ + size_t operator()(long long __x) const { return __x; } +}; + +template <> +struct hash +{ + size_t operator()(unsigned long long __x) const { return __x; } +}; +#elif @KWSYS_USE___INT64@ +template <> +struct hash<__int64> +{ + size_t operator()(__int64 __x) const { return __x; } +}; +template <> +struct hash +{ + size_t operator()(unsigned __int64 __x) const { return __x; } +}; +#endif // use long long or __int64 + +} // namespace @KWSYS_NAMESPACE@ + +#endif diff --git a/test/API/driver/kwsys/hash_map.hxx.in b/test/API/driver/kwsys/hash_map.hxx.in new file mode 100644 index 00000000000..5f04e9c1680 --- /dev/null +++ b/test/API/driver/kwsys/hash_map.hxx.in @@ -0,0 +1,423 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +/* + * Copyright (c) 1996 + * Silicon Graphics Computer Systems, Inc. + * + * Permission to use, copy, modify, distribute and sell this software + * and its documentation for any purpose is hereby granted without fee, + * provided that the above copyright notice appear in all copies and + * that both that copyright notice and this permission notice appear + * in supporting documentation. Silicon Graphics makes no + * representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied warranty. + * + * + * Copyright (c) 1994 + * Hewlett-Packard Company + * + * Permission to use, copy, modify, distribute and sell this software + * and its documentation for any purpose is hereby granted without fee, + * provided that the above copyright notice appear in all copies and + * that both that copyright notice and this permission notice appear + * in supporting documentation. Hewlett-Packard Company makes no + * representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied warranty. + * + */ +#ifndef @KWSYS_NAMESPACE@_hash_map_hxx +#define @KWSYS_NAMESPACE@_hash_map_hxx + +#include <@KWSYS_NAMESPACE@/hashtable.hxx> + +#include <@KWSYS_NAMESPACE@/hash_fun.hxx> + +#include // equal_to + +#if defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable : 4284) +# pragma warning(disable : 4786) +#endif + +#if defined(__sgi) && !defined(__GNUC__) && (_MIPS_SIM != _MIPS_SIM_ABI32) +# pragma set woff 1174 +# pragma set woff 1375 +#endif + +namespace @KWSYS_NAMESPACE@ { + +// select1st is an extension: it is not part of the standard. +template +struct hash_select1st +{ + const T1& operator()(const std::pair& __x) const + { + return __x.first; + } +}; + +// Forward declaration of equality operator; needed for friend declaration. + +template , + class _EqualKey = std::equal_to<_Key>, + class _Alloc = std::allocator > +class hash_map; + +template +bool operator==(const hash_map<_Key, _Tp, _HashFn, _EqKey, _Alloc>&, + const hash_map<_Key, _Tp, _HashFn, _EqKey, _Alloc>&); + +template +class hash_map +{ +private: + typedef hashtable, _Key, _HashFcn, + hash_select1st, _EqualKey, _Alloc> + _Ht; + _Ht _M_ht; + +public: + typedef typename _Ht::key_type key_type; + typedef _Tp data_type; + typedef _Tp mapped_type; + typedef typename _Ht::value_type value_type; + typedef typename _Ht::hasher hasher; + typedef typename _Ht::key_equal key_equal; + + typedef typename _Ht::size_type size_type; + typedef typename _Ht::difference_type difference_type; + typedef typename _Ht::pointer pointer; + typedef typename _Ht::const_pointer const_pointer; + typedef typename _Ht::reference reference; + typedef typename _Ht::const_reference const_reference; + + typedef typename _Ht::iterator iterator; + typedef typename _Ht::const_iterator const_iterator; + + typedef typename _Ht::allocator_type allocator_type; + + hasher hash_funct() const { return _M_ht.hash_funct(); } + key_equal key_eq() const { return _M_ht.key_eq(); } + allocator_type get_allocator() const { return _M_ht.get_allocator(); } + +public: + hash_map() + : _M_ht(100, hasher(), key_equal(), allocator_type()) + { + } + explicit hash_map(size_type __n) + : _M_ht(__n, hasher(), key_equal(), allocator_type()) + { + } + hash_map(size_type __n, const hasher& __hf) + : _M_ht(__n, __hf, key_equal(), allocator_type()) + { + } + hash_map(size_type __n, const hasher& __hf, const key_equal& __eql, + const allocator_type& __a = allocator_type()) + : _M_ht(__n, __hf, __eql, __a) + { + } + + template + hash_map(_InputIterator __f, _InputIterator __l) + : _M_ht(100, hasher(), key_equal(), allocator_type()) + { + _M_ht.insert_unique(__f, __l); + } + template + hash_map(_InputIterator __f, _InputIterator __l, size_type __n) + : _M_ht(__n, hasher(), key_equal(), allocator_type()) + { + _M_ht.insert_unique(__f, __l); + } + template + hash_map(_InputIterator __f, _InputIterator __l, size_type __n, + const hasher& __hf) + : _M_ht(__n, __hf, key_equal(), allocator_type()) + { + _M_ht.insert_unique(__f, __l); + } + template + hash_map(_InputIterator __f, _InputIterator __l, size_type __n, + const hasher& __hf, const key_equal& __eql, + const allocator_type& __a = allocator_type()) + : _M_ht(__n, __hf, __eql, __a) + { + _M_ht.insert_unique(__f, __l); + } + +public: + size_type size() const { return _M_ht.size(); } + size_type max_size() const { return _M_ht.max_size(); } + bool empty() const { return _M_ht.empty(); } + void swap(hash_map& __hs) { _M_ht.swap(__hs._M_ht); } + + friend bool operator==<>(const hash_map&, const hash_map&); + + iterator begin() { return _M_ht.begin(); } + iterator end() { return _M_ht.end(); } + const_iterator begin() const { return _M_ht.begin(); } + const_iterator end() const { return _M_ht.end(); } + +public: + std::pair insert(const value_type& __obj) + { + return _M_ht.insert_unique(__obj); + } + template + void insert(_InputIterator __f, _InputIterator __l) + { + _M_ht.insert_unique(__f, __l); + } + std::pair insert_noresize(const value_type& __obj) + { + return _M_ht.insert_unique_noresize(__obj); + } + + iterator find(const key_type& __key) { return _M_ht.find(__key); } + const_iterator find(const key_type& __key) const + { + return _M_ht.find(__key); + } + + _Tp& operator[](const key_type& __key) + { + return _M_ht.find_or_insert(value_type(__key, _Tp())).second; + } + + size_type count(const key_type& __key) const { return _M_ht.count(__key); } + + std::pair equal_range(const key_type& __key) + { + return _M_ht.equal_range(__key); + } + std::pair equal_range( + const key_type& __key) const + { + return _M_ht.equal_range(__key); + } + + size_type erase(const key_type& __key) { return _M_ht.erase(__key); } + void erase(iterator __it) { _M_ht.erase(__it); } + void erase(iterator __f, iterator __l) { _M_ht.erase(__f, __l); } + void clear() { _M_ht.clear(); } + + void resize(size_type __hint) { _M_ht.resize(__hint); } + size_type bucket_count() const { return _M_ht.bucket_count(); } + size_type max_bucket_count() const { return _M_ht.max_bucket_count(); } + size_type elems_in_bucket(size_type __n) const + { + return _M_ht.elems_in_bucket(__n); + } +}; + +template +bool operator==(const hash_map<_Key, _Tp, _HashFcn, _EqlKey, _Alloc>& __hm1, + const hash_map<_Key, _Tp, _HashFcn, _EqlKey, _Alloc>& __hm2) +{ + return __hm1._M_ht == __hm2._M_ht; +} + +template +inline bool operator!=( + const hash_map<_Key, _Tp, _HashFcn, _EqlKey, _Alloc>& __hm1, + const hash_map<_Key, _Tp, _HashFcn, _EqlKey, _Alloc>& __hm2) +{ + return !(__hm1 == __hm2); +} + +template +inline void swap(hash_map<_Key, _Tp, _HashFcn, _EqlKey, _Alloc>& __hm1, + hash_map<_Key, _Tp, _HashFcn, _EqlKey, _Alloc>& __hm2) +{ + __hm1.swap(__hm2); +} + +// Forward declaration of equality operator; needed for friend declaration. + +template , + class _EqualKey = std::equal_to<_Key>, + class _Alloc = std::allocator > +class hash_multimap; + +template +bool operator==(const hash_multimap<_Key, _Tp, _HF, _EqKey, _Alloc>& __hm1, + const hash_multimap<_Key, _Tp, _HF, _EqKey, _Alloc>& __hm2); + +template +class hash_multimap +{ +private: + typedef hashtable, _Key, _HashFcn, + hash_select1st, _EqualKey, _Alloc> + _Ht; + _Ht _M_ht; + +public: + typedef typename _Ht::key_type key_type; + typedef _Tp data_type; + typedef _Tp mapped_type; + typedef typename _Ht::value_type value_type; + typedef typename _Ht::hasher hasher; + typedef typename _Ht::key_equal key_equal; + + typedef typename _Ht::size_type size_type; + typedef typename _Ht::difference_type difference_type; + typedef typename _Ht::pointer pointer; + typedef typename _Ht::const_pointer const_pointer; + typedef typename _Ht::reference reference; + typedef typename _Ht::const_reference const_reference; + + typedef typename _Ht::iterator iterator; + typedef typename _Ht::const_iterator const_iterator; + + typedef typename _Ht::allocator_type allocator_type; + + hasher hash_funct() const { return _M_ht.hash_funct(); } + key_equal key_eq() const { return _M_ht.key_eq(); } + allocator_type get_allocator() const { return _M_ht.get_allocator(); } + +public: + hash_multimap() + : _M_ht(100, hasher(), key_equal(), allocator_type()) + { + } + explicit hash_multimap(size_type __n) + : _M_ht(__n, hasher(), key_equal(), allocator_type()) + { + } + hash_multimap(size_type __n, const hasher& __hf) + : _M_ht(__n, __hf, key_equal(), allocator_type()) + { + } + hash_multimap(size_type __n, const hasher& __hf, const key_equal& __eql, + const allocator_type& __a = allocator_type()) + : _M_ht(__n, __hf, __eql, __a) + { + } + + template + hash_multimap(_InputIterator __f, _InputIterator __l) + : _M_ht(100, hasher(), key_equal(), allocator_type()) + { + _M_ht.insert_equal(__f, __l); + } + template + hash_multimap(_InputIterator __f, _InputIterator __l, size_type __n) + : _M_ht(__n, hasher(), key_equal(), allocator_type()) + { + _M_ht.insert_equal(__f, __l); + } + template + hash_multimap(_InputIterator __f, _InputIterator __l, size_type __n, + const hasher& __hf) + : _M_ht(__n, __hf, key_equal(), allocator_type()) + { + _M_ht.insert_equal(__f, __l); + } + template + hash_multimap(_InputIterator __f, _InputIterator __l, size_type __n, + const hasher& __hf, const key_equal& __eql, + const allocator_type& __a = allocator_type()) + : _M_ht(__n, __hf, __eql, __a) + { + _M_ht.insert_equal(__f, __l); + } + +public: + size_type size() const { return _M_ht.size(); } + size_type max_size() const { return _M_ht.max_size(); } + bool empty() const { return _M_ht.empty(); } + void swap(hash_multimap& __hs) { _M_ht.swap(__hs._M_ht); } + + friend bool operator==<>(const hash_multimap&, const hash_multimap&); + + iterator begin() { return _M_ht.begin(); } + iterator end() { return _M_ht.end(); } + const_iterator begin() const { return _M_ht.begin(); } + const_iterator end() const { return _M_ht.end(); } + +public: + iterator insert(const value_type& __obj) + { + return _M_ht.insert_equal(__obj); + } + template + void insert(_InputIterator __f, _InputIterator __l) + { + _M_ht.insert_equal(__f, __l); + } + iterator insert_noresize(const value_type& __obj) + { + return _M_ht.insert_equal_noresize(__obj); + } + + iterator find(const key_type& __key) { return _M_ht.find(__key); } + const_iterator find(const key_type& __key) const + { + return _M_ht.find(__key); + } + + size_type count(const key_type& __key) const { return _M_ht.count(__key); } + + std::pair equal_range(const key_type& __key) + { + return _M_ht.equal_range(__key); + } + std::pair equal_range( + const key_type& __key) const + { + return _M_ht.equal_range(__key); + } + + size_type erase(const key_type& __key) { return _M_ht.erase(__key); } + void erase(iterator __it) { _M_ht.erase(__it); } + void erase(iterator __f, iterator __l) { _M_ht.erase(__f, __l); } + void clear() { _M_ht.clear(); } + +public: + void resize(size_type __hint) { _M_ht.resize(__hint); } + size_type bucket_count() const { return _M_ht.bucket_count(); } + size_type max_bucket_count() const { return _M_ht.max_bucket_count(); } + size_type elems_in_bucket(size_type __n) const + { + return _M_ht.elems_in_bucket(__n); + } +}; + +template +bool operator==(const hash_multimap<_Key, _Tp, _HF, _EqKey, _Alloc>& __hm1, + const hash_multimap<_Key, _Tp, _HF, _EqKey, _Alloc>& __hm2) +{ + return __hm1._M_ht == __hm2._M_ht; +} + +template +inline bool operator!=( + const hash_multimap<_Key, _Tp, _HF, _EqKey, _Alloc>& __hm1, + const hash_multimap<_Key, _Tp, _HF, _EqKey, _Alloc>& __hm2) +{ + return !(__hm1 == __hm2); +} + +template +inline void swap(hash_multimap<_Key, _Tp, _HashFcn, _EqlKey, _Alloc>& __hm1, + hash_multimap<_Key, _Tp, _HashFcn, _EqlKey, _Alloc>& __hm2) +{ + __hm1.swap(__hm2); +} + +} // namespace @KWSYS_NAMESPACE@ + +#if defined(__sgi) && !defined(__GNUC__) && (_MIPS_SIM != _MIPS_SIM_ABI32) +# pragma reset woff 1174 +# pragma reset woff 1375 +#endif + +#if defined(_MSC_VER) +# pragma warning(pop) +#endif + +#endif diff --git a/test/API/driver/kwsys/hash_set.hxx.in b/test/API/driver/kwsys/hash_set.hxx.in new file mode 100644 index 00000000000..f4a37eebdb3 --- /dev/null +++ b/test/API/driver/kwsys/hash_set.hxx.in @@ -0,0 +1,392 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +/* + * Copyright (c) 1996 + * Silicon Graphics Computer Systems, Inc. + * + * Permission to use, copy, modify, distribute and sell this software + * and its documentation for any purpose is hereby granted without fee, + * provided that the above copyright notice appear in all copies and + * that both that copyright notice and this permission notice appear + * in supporting documentation. Silicon Graphics makes no + * representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied warranty. + * + * + * Copyright (c) 1994 + * Hewlett-Packard Company + * + * Permission to use, copy, modify, distribute and sell this software + * and its documentation for any purpose is hereby granted without fee, + * provided that the above copyright notice appear in all copies and + * that both that copyright notice and this permission notice appear + * in supporting documentation. Hewlett-Packard Company makes no + * representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied warranty. + * + */ +#ifndef @KWSYS_NAMESPACE@_hash_set_hxx +#define @KWSYS_NAMESPACE@_hash_set_hxx + +#include <@KWSYS_NAMESPACE@/hashtable.hxx> + +#include <@KWSYS_NAMESPACE@/hash_fun.hxx> + +#include // equal_to + +#if defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable : 4284) +# pragma warning(disable : 4786) +#endif + +#if defined(__sgi) && !defined(__GNUC__) && (_MIPS_SIM != _MIPS_SIM_ABI32) +# pragma set woff 1174 +# pragma set woff 1375 +#endif + +namespace @KWSYS_NAMESPACE@ { + +// identity is an extension: it is not part of the standard. +template +struct _Identity +{ + const _Tp& operator()(const _Tp& __x) const { return __x; } +}; + +// Forward declaration of equality operator; needed for friend declaration. + +template , + class _EqualKey = std::equal_to<_Value>, + class _Alloc = std::allocator > +class hash_set; + +template +bool operator==(const hash_set<_Value, _HashFcn, _EqualKey, _Alloc>& __hs1, + const hash_set<_Value, _HashFcn, _EqualKey, _Alloc>& __hs2); + +template +class hash_set +{ +private: + typedef hashtable<_Value, _Value, _HashFcn, _Identity<_Value>, _EqualKey, + _Alloc> + _Ht; + _Ht _M_ht; + +public: + typedef typename _Ht::key_type key_type; + typedef typename _Ht::value_type value_type; + typedef typename _Ht::hasher hasher; + typedef typename _Ht::key_equal key_equal; + + typedef typename _Ht::size_type size_type; + typedef typename _Ht::difference_type difference_type; + typedef typename _Ht::const_pointer pointer; + typedef typename _Ht::const_pointer const_pointer; + typedef typename _Ht::const_reference reference; + typedef typename _Ht::const_reference const_reference; + + typedef typename _Ht::const_iterator iterator; + typedef typename _Ht::const_iterator const_iterator; + + typedef typename _Ht::allocator_type allocator_type; + + hasher hash_funct() const { return _M_ht.hash_funct(); } + key_equal key_eq() const { return _M_ht.key_eq(); } + allocator_type get_allocator() const { return _M_ht.get_allocator(); } + +public: + hash_set() + : _M_ht(100, hasher(), key_equal(), allocator_type()) + { + } + explicit hash_set(size_type __n) + : _M_ht(__n, hasher(), key_equal(), allocator_type()) + { + } + hash_set(size_type __n, const hasher& __hf) + : _M_ht(__n, __hf, key_equal(), allocator_type()) + { + } + hash_set(size_type __n, const hasher& __hf, const key_equal& __eql, + const allocator_type& __a = allocator_type()) + : _M_ht(__n, __hf, __eql, __a) + { + } + + template + hash_set(_InputIterator __f, _InputIterator __l) + : _M_ht(100, hasher(), key_equal(), allocator_type()) + { + _M_ht.insert_unique(__f, __l); + } + template + hash_set(_InputIterator __f, _InputIterator __l, size_type __n) + : _M_ht(__n, hasher(), key_equal(), allocator_type()) + { + _M_ht.insert_unique(__f, __l); + } + template + hash_set(_InputIterator __f, _InputIterator __l, size_type __n, + const hasher& __hf) + : _M_ht(__n, __hf, key_equal(), allocator_type()) + { + _M_ht.insert_unique(__f, __l); + } + template + hash_set(_InputIterator __f, _InputIterator __l, size_type __n, + const hasher& __hf, const key_equal& __eql, + const allocator_type& __a = allocator_type()) + : _M_ht(__n, __hf, __eql, __a) + { + _M_ht.insert_unique(__f, __l); + } + +public: + size_type size() const { return _M_ht.size(); } + size_type max_size() const { return _M_ht.max_size(); } + bool empty() const { return _M_ht.empty(); } + void swap(hash_set& __hs) { _M_ht.swap(__hs._M_ht); } + + friend bool operator==<>(const hash_set&, const hash_set&); + + iterator begin() const { return _M_ht.begin(); } + iterator end() const { return _M_ht.end(); } + +public: + std::pair insert(const value_type& __obj) + { + typedef typename _Ht::iterator _Ht_iterator; + std::pair<_Ht_iterator, bool> __p = _M_ht.insert_unique(__obj); + return std::pair(__p.first, __p.second); + } + template + void insert(_InputIterator __f, _InputIterator __l) + { + _M_ht.insert_unique(__f, __l); + } + std::pair insert_noresize(const value_type& __obj) + { + typedef typename _Ht::iterator _Ht_iterator; + std::pair<_Ht_iterator, bool> __p = _M_ht.insert_unique_noresize(__obj); + return std::pair(__p.first, __p.second); + } + + iterator find(const key_type& __key) const { return _M_ht.find(__key); } + + size_type count(const key_type& __key) const { return _M_ht.count(__key); } + + std::pair equal_range(const key_type& __key) const + { + return _M_ht.equal_range(__key); + } + + size_type erase(const key_type& __key) { return _M_ht.erase(__key); } + void erase(iterator __it) { _M_ht.erase(__it); } + void erase(iterator __f, iterator __l) { _M_ht.erase(__f, __l); } + void clear() { _M_ht.clear(); } + +public: + void resize(size_type __hint) { _M_ht.resize(__hint); } + size_type bucket_count() const { return _M_ht.bucket_count(); } + size_type max_bucket_count() const { return _M_ht.max_bucket_count(); } + size_type elems_in_bucket(size_type __n) const + { + return _M_ht.elems_in_bucket(__n); + } +}; + +template +bool operator==(const hash_set<_Value, _HashFcn, _EqualKey, _Alloc>& __hs1, + const hash_set<_Value, _HashFcn, _EqualKey, _Alloc>& __hs2) +{ + return __hs1._M_ht == __hs2._M_ht; +} + +template +inline bool operator!=( + const hash_set<_Value, _HashFcn, _EqualKey, _Alloc>& __hs1, + const hash_set<_Value, _HashFcn, _EqualKey, _Alloc>& __hs2) +{ + return !(__hs1 == __hs2); +} + +template +inline void swap(hash_set<_Val, _HashFcn, _EqualKey, _Alloc>& __hs1, + hash_set<_Val, _HashFcn, _EqualKey, _Alloc>& __hs2) +{ + __hs1.swap(__hs2); +} + +template , + class _EqualKey = std::equal_to<_Value>, + class _Alloc = std::allocator > +class hash_multiset; + +template +bool operator==(const hash_multiset<_Val, _HashFcn, _EqualKey, _Alloc>& __hs1, + const hash_multiset<_Val, _HashFcn, _EqualKey, _Alloc>& __hs2); + +template +class hash_multiset +{ +private: + typedef hashtable<_Value, _Value, _HashFcn, _Identity<_Value>, _EqualKey, + _Alloc> + _Ht; + _Ht _M_ht; + +public: + typedef typename _Ht::key_type key_type; + typedef typename _Ht::value_type value_type; + typedef typename _Ht::hasher hasher; + typedef typename _Ht::key_equal key_equal; + + typedef typename _Ht::size_type size_type; + typedef typename _Ht::difference_type difference_type; + typedef typename _Ht::const_pointer pointer; + typedef typename _Ht::const_pointer const_pointer; + typedef typename _Ht::const_reference reference; + typedef typename _Ht::const_reference const_reference; + + typedef typename _Ht::const_iterator iterator; + typedef typename _Ht::const_iterator const_iterator; + + typedef typename _Ht::allocator_type allocator_type; + + hasher hash_funct() const { return _M_ht.hash_funct(); } + key_equal key_eq() const { return _M_ht.key_eq(); } + allocator_type get_allocator() const { return _M_ht.get_allocator(); } + +public: + hash_multiset() + : _M_ht(100, hasher(), key_equal(), allocator_type()) + { + } + explicit hash_multiset(size_type __n) + : _M_ht(__n, hasher(), key_equal(), allocator_type()) + { + } + hash_multiset(size_type __n, const hasher& __hf) + : _M_ht(__n, __hf, key_equal(), allocator_type()) + { + } + hash_multiset(size_type __n, const hasher& __hf, const key_equal& __eql, + const allocator_type& __a = allocator_type()) + : _M_ht(__n, __hf, __eql, __a) + { + } + + template + hash_multiset(_InputIterator __f, _InputIterator __l) + : _M_ht(100, hasher(), key_equal(), allocator_type()) + { + _M_ht.insert_equal(__f, __l); + } + template + hash_multiset(_InputIterator __f, _InputIterator __l, size_type __n) + : _M_ht(__n, hasher(), key_equal(), allocator_type()) + { + _M_ht.insert_equal(__f, __l); + } + template + hash_multiset(_InputIterator __f, _InputIterator __l, size_type __n, + const hasher& __hf) + : _M_ht(__n, __hf, key_equal(), allocator_type()) + { + _M_ht.insert_equal(__f, __l); + } + template + hash_multiset(_InputIterator __f, _InputIterator __l, size_type __n, + const hasher& __hf, const key_equal& __eql, + const allocator_type& __a = allocator_type()) + : _M_ht(__n, __hf, __eql, __a) + { + _M_ht.insert_equal(__f, __l); + } + +public: + size_type size() const { return _M_ht.size(); } + size_type max_size() const { return _M_ht.max_size(); } + bool empty() const { return _M_ht.empty(); } + void swap(hash_multiset& hs) { _M_ht.swap(hs._M_ht); } + + friend bool operator==<>(const hash_multiset&, const hash_multiset&); + + iterator begin() const { return _M_ht.begin(); } + iterator end() const { return _M_ht.end(); } + +public: + iterator insert(const value_type& __obj) + { + return _M_ht.insert_equal(__obj); + } + template + void insert(_InputIterator __f, _InputIterator __l) + { + _M_ht.insert_equal(__f, __l); + } + iterator insert_noresize(const value_type& __obj) + { + return _M_ht.insert_equal_noresize(__obj); + } + + iterator find(const key_type& __key) const { return _M_ht.find(__key); } + + size_type count(const key_type& __key) const { return _M_ht.count(__key); } + + std::pair equal_range(const key_type& __key) const + { + return _M_ht.equal_range(__key); + } + + size_type erase(const key_type& __key) { return _M_ht.erase(__key); } + void erase(iterator __it) { _M_ht.erase(__it); } + void erase(iterator __f, iterator __l) { _M_ht.erase(__f, __l); } + void clear() { _M_ht.clear(); } + +public: + void resize(size_type __hint) { _M_ht.resize(__hint); } + size_type bucket_count() const { return _M_ht.bucket_count(); } + size_type max_bucket_count() const { return _M_ht.max_bucket_count(); } + size_type elems_in_bucket(size_type __n) const + { + return _M_ht.elems_in_bucket(__n); + } +}; + +template +bool operator==(const hash_multiset<_Val, _HashFcn, _EqualKey, _Alloc>& __hs1, + const hash_multiset<_Val, _HashFcn, _EqualKey, _Alloc>& __hs2) +{ + return __hs1._M_ht == __hs2._M_ht; +} + +template +inline bool operator!=( + const hash_multiset<_Val, _HashFcn, _EqualKey, _Alloc>& __hs1, + const hash_multiset<_Val, _HashFcn, _EqualKey, _Alloc>& __hs2) +{ + return !(__hs1 == __hs2); +} + +template +inline void swap(hash_multiset<_Val, _HashFcn, _EqualKey, _Alloc>& __hs1, + hash_multiset<_Val, _HashFcn, _EqualKey, _Alloc>& __hs2) +{ + __hs1.swap(__hs2); +} + +} // namespace @KWSYS_NAMESPACE@ + +#if defined(__sgi) && !defined(__GNUC__) && (_MIPS_SIM != _MIPS_SIM_ABI32) +# pragma reset woff 1174 +# pragma reset woff 1375 +#endif + +#if defined(_MSC_VER) +# pragma warning(pop) +#endif + +#endif diff --git a/test/API/driver/kwsys/hashtable.hxx.in b/test/API/driver/kwsys/hashtable.hxx.in new file mode 100644 index 00000000000..8c4b0025f53 --- /dev/null +++ b/test/API/driver/kwsys/hashtable.hxx.in @@ -0,0 +1,995 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +/* + * Copyright (c) 1996 + * Silicon Graphics Computer Systems, Inc. + * + * Permission to use, copy, modify, distribute and sell this software + * and its documentation for any purpose is hereby granted without fee, + * provided that the above copyright notice appear in all copies and + * that both that copyright notice and this permission notice appear + * in supporting documentation. Silicon Graphics makes no + * representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied warranty. + * + * + * Copyright (c) 1994 + * Hewlett-Packard Company + * + * Permission to use, copy, modify, distribute and sell this software + * and its documentation for any purpose is hereby granted without fee, + * provided that the above copyright notice appear in all copies and + * that both that copyright notice and this permission notice appear + * in supporting documentation. Hewlett-Packard Company makes no + * representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied warranty. + * + */ +#ifdef __BORLANDC__ +# pragma warn - 8027 /* 'for' not inlined. */ +# pragma warn - 8026 /* 'exception' not inlined. */ +#endif + +#ifndef @KWSYS_NAMESPACE@_hashtable_hxx +# define @KWSYS_NAMESPACE@_hashtable_hxx + +# include <@KWSYS_NAMESPACE@/Configure.hxx> + +# include // lower_bound +# include // iterator_traits +# include // allocator +# include // size_t +# include // pair +# include // vector + +# if defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable : 4284) +# pragma warning(disable : 4786) +# pragma warning(disable : 4512) /* no assignment operator for class */ +# endif +# if defined(__sgi) && !defined(__GNUC__) +# pragma set woff 3970 /* pointer to int conversion */ 3321 3968 +# endif + +// In C++11, clang will warn about using dynamic exception specifications +// as they are deprecated. But as this class is trying to faithfully +// mimic unordered_set and unordered_map, we want to keep the 'throw()' +// decorations below. So we suppress the warning. +# if defined(__clang__) && defined(__has_warning) +# if __has_warning("-Wdeprecated") +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wdeprecated" +# endif +# endif + +namespace @KWSYS_NAMESPACE@ { + +template +struct _Hashtable_node +{ + _Hashtable_node* _M_next; + _Val _M_val; + void public_method_to_quiet_warning_about_all_methods_private(); + +private: + void operator=(_Hashtable_node<_Val> const&) = delete; +}; + +template > +class hashtable; + +template +struct _Hashtable_iterator; + +template +struct _Hashtable_const_iterator; + +template +struct _Hashtable_iterator +{ + typedef hashtable<_Val, _Key, _HashFcn, _ExtractKey, _EqualKey, _Alloc> + _Hashtable; + typedef _Hashtable_iterator<_Val, _Key, _HashFcn, _ExtractKey, _EqualKey, + _Alloc> + iterator; + typedef _Hashtable_const_iterator<_Val, _Key, _HashFcn, _ExtractKey, + _EqualKey, _Alloc> + const_iterator; + typedef _Hashtable_node<_Val> _Node; + + typedef std::forward_iterator_tag iterator_category; + typedef _Val value_type; + typedef ptrdiff_t difference_type; + typedef size_t size_type; + typedef _Val& reference; + typedef _Val* pointer; + + _Node* _M_cur; + _Hashtable* _M_ht; + + _Hashtable_iterator(_Node* __n, _Hashtable* __tab) + : _M_cur(__n) + , _M_ht(__tab) + { + } + _Hashtable_iterator() {} + reference operator*() const { return _M_cur->_M_val; } + pointer operator->() const { return &(operator*()); } + iterator& operator++(); + iterator operator++(int); + bool operator==(const iterator& __it) const { return _M_cur == __it._M_cur; } + bool operator!=(const iterator& __it) const { return _M_cur != __it._M_cur; } +}; + +template +struct _Hashtable_const_iterator +{ + typedef hashtable<_Val, _Key, _HashFcn, _ExtractKey, _EqualKey, _Alloc> + _Hashtable; + typedef _Hashtable_iterator<_Val, _Key, _HashFcn, _ExtractKey, _EqualKey, + _Alloc> + iterator; + typedef _Hashtable_const_iterator<_Val, _Key, _HashFcn, _ExtractKey, + _EqualKey, _Alloc> + const_iterator; + typedef _Hashtable_node<_Val> _Node; + + typedef std::forward_iterator_tag iterator_category; + typedef _Val value_type; + typedef ptrdiff_t difference_type; + typedef size_t size_type; + typedef const _Val& reference; + typedef const _Val* pointer; + + const _Node* _M_cur; + const _Hashtable* _M_ht; + + _Hashtable_const_iterator(const _Node* __n, const _Hashtable* __tab) + : _M_cur(__n) + , _M_ht(__tab) + { + } + _Hashtable_const_iterator() {} + _Hashtable_const_iterator(const iterator& __it) + : _M_cur(__it._M_cur) + , _M_ht(__it._M_ht) + { + } + reference operator*() const { return _M_cur->_M_val; } + pointer operator->() const { return &(operator*()); } + const_iterator& operator++(); + const_iterator operator++(int); + bool operator==(const const_iterator& __it) const + { + return _M_cur == __it._M_cur; + } + bool operator!=(const const_iterator& __it) const + { + return _M_cur != __it._M_cur; + } +}; + +// Note: assumes long is at least 32 bits. +enum +{ + _stl_num_primes = 31 +}; + +// create a function with a static local to that function that returns +// the static +static inline const unsigned long* get_stl_prime_list() +{ + + static const unsigned long _stl_prime_list[_stl_num_primes] = { + 5ul, 11ul, 23ul, 53ul, 97ul, + 193ul, 389ul, 769ul, 1543ul, 3079ul, + 6151ul, 12289ul, 24593ul, 49157ul, 98317ul, + 196613ul, 393241ul, 786433ul, 1572869ul, 3145739ul, + 6291469ul, 12582917ul, 25165843ul, 50331653ul, 100663319ul, + 201326611ul, 402653189ul, 805306457ul, 1610612741ul, 3221225473ul, + 4294967291ul + }; + + return &_stl_prime_list[0]; +} + +static inline size_t _stl_next_prime(size_t __n) +{ + const unsigned long* __first = get_stl_prime_list(); + const unsigned long* __last = get_stl_prime_list() + (int)_stl_num_primes; + const unsigned long* pos = std::lower_bound(__first, __last, __n); + return pos == __last ? *(__last - 1) : *pos; +} + +// Forward declaration of operator==. + +template +class hashtable; + +template +bool operator==(const hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>& __ht1, + const hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>& __ht2); + +// Hashtables handle allocators a bit differently than other containers +// do. If we're using standard-conforming allocators, then a hashtable +// unconditionally has a member variable to hold its allocator, even if +// it so happens that all instances of the allocator type are identical. +// This is because, for hashtables, this extra storage is negligible. +// Additionally, a base class wouldn't serve any other purposes; it +// wouldn't, for example, simplify the exception-handling code. + +template +class hashtable +{ +public: + typedef _Key key_type; + typedef _Val value_type; + typedef _HashFcn hasher; + typedef _EqualKey key_equal; + + typedef size_t size_type; + typedef ptrdiff_t difference_type; + typedef value_type* pointer; + typedef const value_type* const_pointer; + typedef value_type& reference; + typedef const value_type& const_reference; + + hasher hash_funct() const { return _M_hash; } + key_equal key_eq() const { return _M_equals; } + +private: + typedef _Hashtable_node<_Val> _Node; + +public: + typedef typename _Alloc::template rebind<_Val>::other allocator_type; + allocator_type get_allocator() const { return _M_node_allocator; } + +private: + typedef + typename _Alloc::template rebind<_Node>::other _M_node_allocator_type; + typedef + typename _Alloc::template rebind<_Node*>::other _M_node_ptr_allocator_type; + typedef std::vector<_Node*, _M_node_ptr_allocator_type> _M_buckets_type; + +private: + _M_node_allocator_type _M_node_allocator; + hasher _M_hash; + key_equal _M_equals; + _ExtractKey _M_get_key; + _M_buckets_type _M_buckets; + size_type _M_num_elements; + + _Node* _M_get_node() { return _M_node_allocator.allocate(1); } + void _M_put_node(_Node* __p) { _M_node_allocator.deallocate(__p, 1); } + +public: + typedef _Hashtable_iterator<_Val, _Key, _HashFcn, _ExtractKey, _EqualKey, + _Alloc> + iterator; + typedef _Hashtable_const_iterator<_Val, _Key, _HashFcn, _ExtractKey, + _EqualKey, _Alloc> + const_iterator; + + friend struct _Hashtable_iterator<_Val, _Key, _HashFcn, _ExtractKey, + _EqualKey, _Alloc>; + friend struct _Hashtable_const_iterator<_Val, _Key, _HashFcn, _ExtractKey, + _EqualKey, _Alloc>; + +public: + hashtable(size_type __n, const _HashFcn& __hf, const _EqualKey& __eql, + const _ExtractKey& __ext, + const allocator_type& __a = allocator_type()) + : _M_node_allocator(__a) + , _M_hash(__hf) + , _M_equals(__eql) + , _M_get_key(__ext) + , _M_buckets(__a) + , _M_num_elements(0) + { + _M_initialize_buckets(__n); + } + + hashtable(size_type __n, const _HashFcn& __hf, const _EqualKey& __eql, + const allocator_type& __a = allocator_type()) + : _M_node_allocator(__a) + , _M_hash(__hf) + , _M_equals(__eql) + , _M_get_key(_ExtractKey()) + , _M_buckets(__a) + , _M_num_elements(0) + { + _M_initialize_buckets(__n); + } + + hashtable(const hashtable& __ht) + : _M_node_allocator(__ht.get_allocator()) + , _M_hash(__ht._M_hash) + , _M_equals(__ht._M_equals) + , _M_get_key(__ht._M_get_key) + , _M_buckets(__ht.get_allocator()) + , _M_num_elements(0) + { + _M_copy_from(__ht); + } + + hashtable& operator=(const hashtable& __ht) + { + if (&__ht != this) { + clear(); + _M_hash = __ht._M_hash; + _M_equals = __ht._M_equals; + _M_get_key = __ht._M_get_key; + _M_copy_from(__ht); + } + return *this; + } + + ~hashtable() { clear(); } + + size_type size() const { return _M_num_elements; } + size_type max_size() const { return size_type(-1); } + bool empty() const { return size() == 0; } + + void swap(hashtable& __ht) + { + std::swap(_M_hash, __ht._M_hash); + std::swap(_M_equals, __ht._M_equals); + std::swap(_M_get_key, __ht._M_get_key); + _M_buckets.swap(__ht._M_buckets); + std::swap(_M_num_elements, __ht._M_num_elements); + } + + iterator begin() + { + for (size_type __n = 0; __n < _M_buckets.size(); ++__n) + if (_M_buckets[__n]) + return iterator(_M_buckets[__n], this); + return end(); + } + + iterator end() { return iterator(nullptr, this); } + + const_iterator begin() const + { + for (size_type __n = 0; __n < _M_buckets.size(); ++__n) + if (_M_buckets[__n]) + return const_iterator(_M_buckets[__n], this); + return end(); + } + + const_iterator end() const { return const_iterator(nullptr, this); } + + friend bool operator==<>(const hashtable&, const hashtable&); + +public: + size_type bucket_count() const { return _M_buckets.size(); } + + size_type max_bucket_count() const + { + return get_stl_prime_list()[(int)_stl_num_primes - 1]; + } + + size_type elems_in_bucket(size_type __bucket) const + { + size_type __result = 0; + for (_Node* __cur = _M_buckets[__bucket]; __cur; __cur = __cur->_M_next) + __result += 1; + return __result; + } + + std::pair insert_unique(const value_type& __obj) + { + resize(_M_num_elements + 1); + return insert_unique_noresize(__obj); + } + + iterator insert_equal(const value_type& __obj) + { + resize(_M_num_elements + 1); + return insert_equal_noresize(__obj); + } + + std::pair insert_unique_noresize(const value_type& __obj); + iterator insert_equal_noresize(const value_type& __obj); + + template + void insert_unique(_InputIterator __f, _InputIterator __l) + { + insert_unique( + __f, __l, + typename std::iterator_traits<_InputIterator>::iterator_category()); + } + + template + void insert_equal(_InputIterator __f, _InputIterator __l) + { + insert_equal( + __f, __l, + typename std::iterator_traits<_InputIterator>::iterator_category()); + } + + template + void insert_unique(_InputIterator __f, _InputIterator __l, + std::input_iterator_tag) + { + for (; __f != __l; ++__f) + insert_unique(*__f); + } + + template + void insert_equal(_InputIterator __f, _InputIterator __l, + std::input_iterator_tag) + { + for (; __f != __l; ++__f) + insert_equal(*__f); + } + + template + void insert_unique(_ForwardIterator __f, _ForwardIterator __l, + std::forward_iterator_tag) + { + size_type __n = 0; + std::distance(__f, __l, __n); + resize(_M_num_elements + __n); + for (; __n > 0; --__n, ++__f) + insert_unique_noresize(*__f); + } + + template + void insert_equal(_ForwardIterator __f, _ForwardIterator __l, + std::forward_iterator_tag) + { + size_type __n = 0; + std::distance(__f, __l, __n); + resize(_M_num_elements + __n); + for (; __n > 0; --__n, ++__f) + insert_equal_noresize(*__f); + } + + reference find_or_insert(const value_type& __obj); + + iterator find(const key_type& __key) + { + size_type __n = _M_bkt_num_key(__key); + _Node* __first; + for (__first = _M_buckets[__n]; + __first && !_M_equals(_M_get_key(__first->_M_val), __key); + __first = __first->_M_next) { + } + return iterator(__first, this); + } + + const_iterator find(const key_type& __key) const + { + size_type __n = _M_bkt_num_key(__key); + const _Node* __first; + for (__first = _M_buckets[__n]; + __first && !_M_equals(_M_get_key(__first->_M_val), __key); + __first = __first->_M_next) { + } + return const_iterator(__first, this); + } + + size_type count(const key_type& __key) const + { + const size_type __n = _M_bkt_num_key(__key); + size_type __result = 0; + + for (const _Node* __cur = _M_buckets[__n]; __cur; __cur = __cur->_M_next) + if (_M_equals(_M_get_key(__cur->_M_val), __key)) + ++__result; + return __result; + } + + std::pair equal_range(const key_type& __key); + + std::pair equal_range( + const key_type& __key) const; + + size_type erase(const key_type& __key); + void erase(const iterator& __it); + void erase(iterator __first, iterator __last); + + void erase(const const_iterator& __it); + void erase(const_iterator __first, const_iterator __last); + + void resize(size_type __num_elements_hint); + void clear(); + +private: + size_type _M_next_size(size_type __n) const { return _stl_next_prime(__n); } + + void _M_initialize_buckets(size_type __n) + { + const size_type __n_buckets = _M_next_size(__n); + _M_buckets.reserve(__n_buckets); + _M_buckets.insert(_M_buckets.end(), __n_buckets, (_Node*)nullptr); + _M_num_elements = 0; + } + + size_type _M_bkt_num_key(const key_type& __key) const + { + return _M_bkt_num_key(__key, _M_buckets.size()); + } + + size_type _M_bkt_num(const value_type& __obj) const + { + return _M_bkt_num_key(_M_get_key(__obj)); + } + + size_type _M_bkt_num_key(const key_type& __key, size_t __n) const + { + return _M_hash(__key) % __n; + } + + size_type _M_bkt_num(const value_type& __obj, size_t __n) const + { + return _M_bkt_num_key(_M_get_key(__obj), __n); + } + + void construct(_Val* p, const _Val& v) { new (p) _Val(v); } + void destroy(_Val* p) + { + (void)p; + p->~_Val(); + } + + _Node* _M_new_node(const value_type& __obj) + { + _Node* __n = _M_get_node(); + __n->_M_next = nullptr; + try { + construct(&__n->_M_val, __obj); + return __n; + } catch (...) { + _M_put_node(__n); + throw; + } + } + + void _M_delete_node(_Node* __n) + { + destroy(&__n->_M_val); + _M_put_node(__n); + } + + void _M_erase_bucket(const size_type __n, _Node* __first, _Node* __last); + void _M_erase_bucket(const size_type __n, _Node* __last); + + void _M_copy_from(const hashtable& __ht); +}; + +template +_Hashtable_iterator<_Val, _Key, _HF, _ExK, _EqK, _All>& +_Hashtable_iterator<_Val, _Key, _HF, _ExK, _EqK, _All>::operator++() +{ + const _Node* __old = _M_cur; + _M_cur = _M_cur->_M_next; + if (!_M_cur) { + size_type __bucket = _M_ht->_M_bkt_num(__old->_M_val); + while (!_M_cur && ++__bucket < _M_ht->_M_buckets.size()) + _M_cur = _M_ht->_M_buckets[__bucket]; + } + return *this; +} + +template +inline _Hashtable_iterator<_Val, _Key, _HF, _ExK, _EqK, _All> +_Hashtable_iterator<_Val, _Key, _HF, _ExK, _EqK, _All>::operator++(int) +{ + iterator __tmp = *this; + ++*this; + return __tmp; +} + +template +_Hashtable_const_iterator<_Val, _Key, _HF, _ExK, _EqK, _All>& +_Hashtable_const_iterator<_Val, _Key, _HF, _ExK, _EqK, _All>::operator++() +{ + const _Node* __old = _M_cur; + _M_cur = _M_cur->_M_next; + if (!_M_cur) { + size_type __bucket = _M_ht->_M_bkt_num(__old->_M_val); + while (!_M_cur && ++__bucket < _M_ht->_M_buckets.size()) + _M_cur = _M_ht->_M_buckets[__bucket]; + } + return *this; +} + +template +inline _Hashtable_const_iterator<_Val, _Key, _HF, _ExK, _EqK, _All> +_Hashtable_const_iterator<_Val, _Key, _HF, _ExK, _EqK, _All>::operator++(int) +{ + const_iterator __tmp = *this; + ++*this; + return __tmp; +} + +template +bool operator==(const hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>& __ht1, + const hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>& __ht2) +{ + typedef typename hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::_Node _Node; + if (__ht1._M_buckets.size() != __ht2._M_buckets.size()) + return false; + for (int __n = 0; __n < __ht1._M_buckets.size(); ++__n) { + _Node* __cur1 = __ht1._M_buckets[__n]; + _Node* __cur2 = __ht2._M_buckets[__n]; + for (; __cur1 && __cur2 && __cur1->_M_val == __cur2->_M_val; + __cur1 = __cur1->_M_next, __cur2 = __cur2->_M_next) { + } + if (__cur1 || __cur2) + return false; + } + return true; +} + +template +inline bool operator!=(const hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>& __ht1, + const hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>& __ht2) +{ + return !(__ht1 == __ht2); +} + +template +inline void swap(hashtable<_Val, _Key, _HF, _Extract, _EqKey, _All>& __ht1, + hashtable<_Val, _Key, _HF, _Extract, _EqKey, _All>& __ht2) +{ + __ht1.swap(__ht2); +} + +template +std::pair::iterator, bool> +hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::insert_unique_noresize( + const value_type& __obj) +{ + const size_type __n = _M_bkt_num(__obj); + _Node* __first = _M_buckets[__n]; + + for (_Node* __cur = __first; __cur; __cur = __cur->_M_next) + if (_M_equals(_M_get_key(__cur->_M_val), _M_get_key(__obj))) + return std::pair(iterator(__cur, this), false); + + _Node* __tmp = _M_new_node(__obj); + __tmp->_M_next = __first; + _M_buckets[__n] = __tmp; + ++_M_num_elements; + return std::pair(iterator(__tmp, this), true); +} + +template +typename hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::iterator +hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::insert_equal_noresize( + const value_type& __obj) +{ + const size_type __n = _M_bkt_num(__obj); + _Node* __first = _M_buckets[__n]; + + for (_Node* __cur = __first; __cur; __cur = __cur->_M_next) + if (_M_equals(_M_get_key(__cur->_M_val), _M_get_key(__obj))) { + _Node* __tmp = _M_new_node(__obj); + __tmp->_M_next = __cur->_M_next; + __cur->_M_next = __tmp; + ++_M_num_elements; + return iterator(__tmp, this); + } + + _Node* __tmp = _M_new_node(__obj); + __tmp->_M_next = __first; + _M_buckets[__n] = __tmp; + ++_M_num_elements; + return iterator(__tmp, this); +} + +template +typename hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::reference +hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::find_or_insert( + const value_type& __obj) +{ + resize(_M_num_elements + 1); + + size_type __n = _M_bkt_num(__obj); + _Node* __first = _M_buckets[__n]; + + for (_Node* __cur = __first; __cur; __cur = __cur->_M_next) + if (_M_equals(_M_get_key(__cur->_M_val), _M_get_key(__obj))) + return __cur->_M_val; + + _Node* __tmp = _M_new_node(__obj); + __tmp->_M_next = __first; + _M_buckets[__n] = __tmp; + ++_M_num_elements; + return __tmp->_M_val; +} + +template +std::pair::iterator, + typename hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::iterator> +hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::equal_range(const key_type& __key) +{ + typedef std::pair _Pii; + const size_type __n = _M_bkt_num_key(__key); + + for (_Node* __first = _M_buckets[__n]; __first; __first = __first->_M_next) + if (_M_equals(_M_get_key(__first->_M_val), __key)) { + for (_Node* __cur = __first->_M_next; __cur; __cur = __cur->_M_next) + if (!_M_equals(_M_get_key(__cur->_M_val), __key)) + return _Pii(iterator(__first, this), iterator(__cur, this)); + for (size_type __m = __n + 1; __m < _M_buckets.size(); ++__m) + if (_M_buckets[__m]) + return _Pii(iterator(__first, this), + iterator(_M_buckets[__m], this)); + return _Pii(iterator(__first, this), end()); + } + return _Pii(end(), end()); +} + +template +std::pair::const_iterator, + typename hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::const_iterator> +hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::equal_range( + const key_type& __key) const +{ + typedef std::pair _Pii; + const size_type __n = _M_bkt_num_key(__key); + + for (const _Node* __first = _M_buckets[__n]; __first; + __first = __first->_M_next) { + if (_M_equals(_M_get_key(__first->_M_val), __key)) { + for (const _Node* __cur = __first->_M_next; __cur; + __cur = __cur->_M_next) + if (!_M_equals(_M_get_key(__cur->_M_val), __key)) + return _Pii(const_iterator(__first, this), + const_iterator(__cur, this)); + for (size_type __m = __n + 1; __m < _M_buckets.size(); ++__m) + if (_M_buckets[__m]) + return _Pii(const_iterator(__first, this), + const_iterator(_M_buckets[__m], this)); + return _Pii(const_iterator(__first, this), end()); + } + } + return _Pii(end(), end()); +} + +template +typename hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::size_type +hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::erase(const key_type& __key) +{ + const size_type __n = _M_bkt_num_key(__key); + _Node* __first = _M_buckets[__n]; + size_type __erased = 0; + + if (__first) { + _Node* __cur = __first; + _Node* __next = __cur->_M_next; + while (__next) { + if (_M_equals(_M_get_key(__next->_M_val), __key)) { + __cur->_M_next = __next->_M_next; + _M_delete_node(__next); + __next = __cur->_M_next; + ++__erased; + --_M_num_elements; + } else { + __cur = __next; + __next = __cur->_M_next; + } + } + if (_M_equals(_M_get_key(__first->_M_val), __key)) { + _M_buckets[__n] = __first->_M_next; + _M_delete_node(__first); + ++__erased; + --_M_num_elements; + } + } + return __erased; +} + +template +void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::erase(const iterator& __it) +{ + _Node* __p = __it._M_cur; + if (__p) { + const size_type __n = _M_bkt_num(__p->_M_val); + _Node* __cur = _M_buckets[__n]; + + if (__cur == __p) { + _M_buckets[__n] = __cur->_M_next; + _M_delete_node(__cur); + --_M_num_elements; + } else { + _Node* __next = __cur->_M_next; + while (__next) { + if (__next == __p) { + __cur->_M_next = __next->_M_next; + _M_delete_node(__next); + --_M_num_elements; + break; + } else { + __cur = __next; + __next = __cur->_M_next; + } + } + } + } +} + +template +void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::erase(iterator __first, + iterator __last) +{ + size_type __f_bucket = + __first._M_cur ? _M_bkt_num(__first._M_cur->_M_val) : _M_buckets.size(); + size_type __l_bucket = + __last._M_cur ? _M_bkt_num(__last._M_cur->_M_val) : _M_buckets.size(); + + if (__first._M_cur == __last._M_cur) + return; + else if (__f_bucket == __l_bucket) + _M_erase_bucket(__f_bucket, __first._M_cur, __last._M_cur); + else { + _M_erase_bucket(__f_bucket, __first._M_cur, nullptr); + for (size_type __n = __f_bucket + 1; __n < __l_bucket; ++__n) + _M_erase_bucket(__n, nullptr); + if (__l_bucket != _M_buckets.size()) + _M_erase_bucket(__l_bucket, __last._M_cur); + } +} + +template +inline void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::erase( + const_iterator __first, const_iterator __last) +{ + erase(iterator(const_cast<_Node*>(__first._M_cur), + const_cast(__first._M_ht)), + iterator(const_cast<_Node*>(__last._M_cur), + const_cast(__last._M_ht))); +} + +template +inline void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::erase( + const const_iterator& __it) +{ + erase(iterator(const_cast<_Node*>(__it._M_cur), + const_cast(__it._M_ht))); +} + +template +void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::resize( + size_type __num_elements_hint) +{ + const size_type __old_n = _M_buckets.size(); + if (__num_elements_hint > __old_n) { + const size_type __n = _M_next_size(__num_elements_hint); + if (__n > __old_n) { + _M_buckets_type __tmp(__n, (_Node*)(nullptr), + _M_buckets.get_allocator()); + try { + for (size_type __bucket = 0; __bucket < __old_n; ++__bucket) { + _Node* __first = _M_buckets[__bucket]; + while (__first) { + size_type __new_bucket = _M_bkt_num(__first->_M_val, __n); + _M_buckets[__bucket] = __first->_M_next; + __first->_M_next = __tmp[__new_bucket]; + __tmp[__new_bucket] = __first; + __first = _M_buckets[__bucket]; + } + } + _M_buckets.swap(__tmp); + } catch (...) { + for (size_type __bucket = 0; __bucket < __tmp.size(); ++__bucket) { + while (__tmp[__bucket]) { + _Node* __next = __tmp[__bucket]->_M_next; + _M_delete_node(__tmp[__bucket]); + __tmp[__bucket] = __next; + } + } + throw; + } + } + } +} + +template +void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::_M_erase_bucket( + const size_type __n, _Node* __first, _Node* __last) +{ + _Node* __cur = _M_buckets[__n]; + if (__cur == __first) + _M_erase_bucket(__n, __last); + else { + _Node* __next; + for (__next = __cur->_M_next; __next != __first; + __cur = __next, __next = __cur->_M_next) + ; + while (__next != __last) { + __cur->_M_next = __next->_M_next; + _M_delete_node(__next); + __next = __cur->_M_next; + --_M_num_elements; + } + } +} + +template +void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::_M_erase_bucket( + const size_type __n, _Node* __last) +{ + _Node* __cur = _M_buckets[__n]; + while (__cur != __last) { + _Node* __next = __cur->_M_next; + _M_delete_node(__cur); + __cur = __next; + _M_buckets[__n] = __cur; + --_M_num_elements; + } +} + +template +void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::clear() +{ + for (size_type __i = 0; __i < _M_buckets.size(); ++__i) { + _Node* __cur = _M_buckets[__i]; + while (__cur != nullptr) { + _Node* __next = __cur->_M_next; + _M_delete_node(__cur); + __cur = __next; + } + _M_buckets[__i] = nullptr; + } + _M_num_elements = 0; +} + +template +void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::_M_copy_from( + const hashtable& __ht) +{ + _M_buckets.clear(); + _M_buckets.reserve(__ht._M_buckets.size()); + _M_buckets.insert(_M_buckets.end(), __ht._M_buckets.size(), (_Node*)nullptr); + try { + for (size_type __i = 0; __i < __ht._M_buckets.size(); ++__i) { + const _Node* __cur = __ht._M_buckets[__i]; + if (__cur) { + _Node* __copy = _M_new_node(__cur->_M_val); + _M_buckets[__i] = __copy; + + for (_Node* __next = __cur->_M_next; __next; + __cur = __next, __next = __cur->_M_next) { + __copy->_M_next = _M_new_node(__next->_M_val); + __copy = __copy->_M_next; + } + } + } + _M_num_elements = __ht._M_num_elements; + } catch (...) { + clear(); + throw; + } +} + +} // namespace @KWSYS_NAMESPACE@ + +// Undo warning suppression. +# if defined(__clang__) && defined(__has_warning) +# if __has_warning("-Wdeprecated") +# pragma clang diagnostic pop +# endif +# endif + +# if defined(_MSC_VER) +# pragma warning(pop) +# endif + +#endif diff --git a/test/API/driver/kwsys/kwsysHeaderDump.pl b/test/API/driver/kwsys/kwsysHeaderDump.pl new file mode 100644 index 00000000000..e3391e76232 --- /dev/null +++ b/test/API/driver/kwsys/kwsysHeaderDump.pl @@ -0,0 +1,41 @@ +#!/usr/bin/perl +# Distributed under the OSI-approved BSD 3-Clause License. See accompanying +# file Copyright.txt or https://cmake.org/licensing#kwsys for details. + +if ( $#ARGV+1 < 2 ) +{ + print "Usage: ./kwsysHeaderDump.pl
    \n"; + exit(1); +} + +$name = $ARGV[0]; +$max = 0; +open(INFILE, $ARGV[1]); +while (chomp ($line = )) +{ + if (($line !~ /^\#/) && + ($line =~ s/.*kwsys${name}_([A-Za-z0-9_]*).*/\1/) && + ($i{$line}++ < 1)) + { + push(@lines, "$line"); + if (length($line) > $max) + { + $max = length($line); + } + } +} +close(INFILE); + +$width = $max + 13; +print sprintf("#define %-${width}s kwsys_ns(${name})\n", "kwsys${name}"); +foreach $l (@lines) +{ + print sprintf("#define %-${width}s kwsys_ns(${name}_$l)\n", + "kwsys${name}_$l"); +} +print "\n"; +print sprintf("# undef kwsys${name}\n"); +foreach $l (@lines) +{ + print sprintf("# undef kwsys${name}_$l\n"); +} diff --git a/test/API/driver/kwsys/kwsysPlatformTests.cmake b/test/API/driver/kwsys/kwsysPlatformTests.cmake new file mode 100644 index 00000000000..28d3f68e252 --- /dev/null +++ b/test/API/driver/kwsys/kwsysPlatformTests.cmake @@ -0,0 +1,216 @@ +# Distributed under the OSI-approved BSD 3-Clause License. See accompanying +# file Copyright.txt or https://cmake.org/licensing#kwsys for details. + +SET(KWSYS_PLATFORM_TEST_FILE_C kwsysPlatformTestsC.c) +SET(KWSYS_PLATFORM_TEST_FILE_CXX kwsysPlatformTestsCXX.cxx) + +MACRO(KWSYS_PLATFORM_TEST lang var description invert) + IF(NOT DEFINED ${var}_COMPILED) + MESSAGE(STATUS "${description}") + set(maybe_cxx_standard "") + if(CMAKE_VERSION VERSION_LESS 3.8 AND CMAKE_CXX_STANDARD) + set(maybe_cxx_standard "-DCMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD}") + endif() + TRY_COMPILE(${var}_COMPILED + ${CMAKE_CURRENT_BINARY_DIR} + ${CMAKE_CURRENT_SOURCE_DIR}/${KWSYS_PLATFORM_TEST_FILE_${lang}} + COMPILE_DEFINITIONS -DTEST_${var} ${KWSYS_PLATFORM_TEST_DEFINES} ${KWSYS_PLATFORM_TEST_EXTRA_FLAGS} + CMAKE_FLAGS "-DLINK_LIBRARIES:STRING=${KWSYS_PLATFORM_TEST_LINK_LIBRARIES}" + ${maybe_cxx_standard} + OUTPUT_VARIABLE OUTPUT) + IF(${var}_COMPILED) + FILE(APPEND + ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log + "${description} compiled with the following output:\n${OUTPUT}\n\n") + ELSE() + FILE(APPEND + ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log + "${description} failed to compile with the following output:\n${OUTPUT}\n\n") + ENDIF() + IF(${invert} MATCHES INVERT) + IF(${var}_COMPILED) + MESSAGE(STATUS "${description} - no") + ELSE() + MESSAGE(STATUS "${description} - yes") + ENDIF() + ELSE() + IF(${var}_COMPILED) + MESSAGE(STATUS "${description} - yes") + ELSE() + MESSAGE(STATUS "${description} - no") + ENDIF() + ENDIF() + ENDIF() + IF(${invert} MATCHES INVERT) + IF(${var}_COMPILED) + SET(${var} 0) + ELSE() + SET(${var} 1) + ENDIF() + ELSE() + IF(${var}_COMPILED) + SET(${var} 1) + ELSE() + SET(${var} 0) + ENDIF() + ENDIF() +ENDMACRO() + +MACRO(KWSYS_PLATFORM_TEST_RUN lang var description invert) + IF(NOT DEFINED ${var}) + MESSAGE(STATUS "${description}") + TRY_RUN(${var} ${var}_COMPILED + ${CMAKE_CURRENT_BINARY_DIR} + ${CMAKE_CURRENT_SOURCE_DIR}/${KWSYS_PLATFORM_TEST_FILE_${lang}} + COMPILE_DEFINITIONS -DTEST_${var} ${KWSYS_PLATFORM_TEST_DEFINES} ${KWSYS_PLATFORM_TEST_EXTRA_FLAGS} + OUTPUT_VARIABLE OUTPUT) + + # Note that ${var} will be a 0 return value on success. + IF(${var}_COMPILED) + IF(${var}) + FILE(APPEND + ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log + "${description} compiled but failed to run with the following output:\n${OUTPUT}\n\n") + ELSE() + FILE(APPEND + ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log + "${description} compiled and ran with the following output:\n${OUTPUT}\n\n") + ENDIF() + ELSE() + FILE(APPEND + ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log + "${description} failed to compile with the following output:\n${OUTPUT}\n\n") + SET(${var} -1 CACHE INTERNAL "${description} failed to compile.") + ENDIF() + + IF(${invert} MATCHES INVERT) + IF(${var}_COMPILED) + IF(${var}) + MESSAGE(STATUS "${description} - yes") + ELSE() + MESSAGE(STATUS "${description} - no") + ENDIF() + ELSE() + MESSAGE(STATUS "${description} - failed to compile") + ENDIF() + ELSE() + IF(${var}_COMPILED) + IF(${var}) + MESSAGE(STATUS "${description} - no") + ELSE() + MESSAGE(STATUS "${description} - yes") + ENDIF() + ELSE() + MESSAGE(STATUS "${description} - failed to compile") + ENDIF() + ENDIF() + ENDIF() + + IF(${invert} MATCHES INVERT) + IF(${var}_COMPILED) + IF(${var}) + SET(${var} 1) + ELSE() + SET(${var} 0) + ENDIF() + ELSE() + SET(${var} 1) + ENDIF() + ELSE() + IF(${var}_COMPILED) + IF(${var}) + SET(${var} 0) + ELSE() + SET(${var} 1) + ENDIF() + ELSE() + SET(${var} 0) + ENDIF() + ENDIF() +ENDMACRO() + +MACRO(KWSYS_PLATFORM_C_TEST var description invert) + SET(KWSYS_PLATFORM_TEST_DEFINES ${KWSYS_PLATFORM_C_TEST_DEFINES}) + SET(KWSYS_PLATFORM_TEST_EXTRA_FLAGS ${KWSYS_PLATFORM_C_TEST_EXTRA_FLAGS}) + KWSYS_PLATFORM_TEST(C "${var}" "${description}" "${invert}") + SET(KWSYS_PLATFORM_TEST_DEFINES) + SET(KWSYS_PLATFORM_TEST_EXTRA_FLAGS) +ENDMACRO() + +MACRO(KWSYS_PLATFORM_C_TEST_RUN var description invert) + SET(KWSYS_PLATFORM_TEST_DEFINES ${KWSYS_PLATFORM_C_TEST_DEFINES}) + SET(KWSYS_PLATFORM_TEST_EXTRA_FLAGS ${KWSYS_PLATFORM_C_TEST_EXTRA_FLAGS}) + KWSYS_PLATFORM_TEST_RUN(C "${var}" "${description}" "${invert}") + SET(KWSYS_PLATFORM_TEST_DEFINES) + SET(KWSYS_PLATFORM_TEST_EXTRA_FLAGS) +ENDMACRO() + +MACRO(KWSYS_PLATFORM_CXX_TEST var description invert) + SET(KWSYS_PLATFORM_TEST_DEFINES ${KWSYS_PLATFORM_CXX_TEST_DEFINES}) + SET(KWSYS_PLATFORM_TEST_EXTRA_FLAGS ${KWSYS_PLATFORM_CXX_TEST_EXTRA_FLAGS}) + SET(KWSYS_PLATFORM_TEST_LINK_LIBRARIES ${KWSYS_PLATFORM_CXX_TEST_LINK_LIBRARIES}) + KWSYS_PLATFORM_TEST(CXX "${var}" "${description}" "${invert}") + SET(KWSYS_PLATFORM_TEST_DEFINES) + SET(KWSYS_PLATFORM_TEST_EXTRA_FLAGS) + SET(KWSYS_PLATFORM_TEST_LINK_LIBRARIES) +ENDMACRO() + +MACRO(KWSYS_PLATFORM_CXX_TEST_RUN var description invert) + SET(KWSYS_PLATFORM_TEST_DEFINES ${KWSYS_PLATFORM_CXX_TEST_DEFINES}) + SET(KWSYS_PLATFORM_TEST_EXTRA_FLAGS ${KWSYS_PLATFORM_CXX_TEST_EXTRA_FLAGS}) + KWSYS_PLATFORM_TEST_RUN(CXX "${var}" "${description}" "${invert}") + SET(KWSYS_PLATFORM_TEST_DEFINES) + SET(KWSYS_PLATFORM_TEST_EXTRA_FLAGS) +ENDMACRO() + +#----------------------------------------------------------------------------- +# KWSYS_PLATFORM_INFO_TEST(lang var description) +# +# Compile test named by ${var} and store INFO strings extracted from binary. +MACRO(KWSYS_PLATFORM_INFO_TEST lang var description) + # We can implement this macro on CMake 2.6 and above. + IF("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.6) + SET(${var} "") + ELSE() + # Choose a location for the result binary. + SET(KWSYS_PLATFORM_INFO_FILE + ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_FILES_DIRECTORY}/${var}.bin) + + # Compile the test binary. + IF(NOT EXISTS ${KWSYS_PLATFORM_INFO_FILE}) + MESSAGE(STATUS "${description}") + TRY_COMPILE(${var}_COMPILED + ${CMAKE_CURRENT_BINARY_DIR} + ${CMAKE_CURRENT_SOURCE_DIR}/${KWSYS_PLATFORM_TEST_FILE_${lang}} + COMPILE_DEFINITIONS -DTEST_${var} + ${KWSYS_PLATFORM_${lang}_TEST_DEFINES} + ${KWSYS_PLATFORM_${lang}_TEST_EXTRA_FLAGS} + OUTPUT_VARIABLE OUTPUT + COPY_FILE ${KWSYS_PLATFORM_INFO_FILE} + ) + IF(${var}_COMPILED) + FILE(APPEND + ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log + "${description} compiled with the following output:\n${OUTPUT}\n\n") + ELSE() + FILE(APPEND + ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log + "${description} failed to compile with the following output:\n${OUTPUT}\n\n") + ENDIF() + IF(${var}_COMPILED) + MESSAGE(STATUS "${description} - compiled") + ELSE() + MESSAGE(STATUS "${description} - failed") + ENDIF() + ENDIF() + + # Parse info strings out of the compiled binary. + IF(${var}_COMPILED) + FILE(STRINGS ${KWSYS_PLATFORM_INFO_FILE} ${var} REGEX "INFO:[A-Za-z0-9]+\\[[^]]*\\]") + ELSE() + SET(${var} "") + ENDIF() + + SET(KWSYS_PLATFORM_INFO_FILE) + ENDIF() +ENDMACRO() diff --git a/test/API/driver/kwsys/kwsysPlatformTestsC.c b/test/API/driver/kwsys/kwsysPlatformTestsC.c new file mode 100644 index 00000000000..b0cf7ad3b0c --- /dev/null +++ b/test/API/driver/kwsys/kwsysPlatformTestsC.c @@ -0,0 +1,108 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +/* + Macros to define main() in a cross-platform way. + + Usage: + + int KWSYS_PLATFORM_TEST_C_MAIN() + { + return 0; + } + + int KWSYS_PLATFORM_TEST_C_MAIN_ARGS(argc, argv) + { + (void)argc; (void)argv; + return 0; + } +*/ +#if defined(__CLASSIC_C__) +# define KWSYS_PLATFORM_TEST_C_MAIN() main() +# define KWSYS_PLATFORM_TEST_C_MAIN_ARGS(argc, argv) \ + main(argc, argv) int argc; \ + char* argv[]; +#else +# define KWSYS_PLATFORM_TEST_C_MAIN() main(void) +# define KWSYS_PLATFORM_TEST_C_MAIN_ARGS(argc, argv) \ + main(int argc, char* argv[]) +#endif + +#ifdef TEST_KWSYS_C_HAS_PTRDIFF_T +# include +int f(ptrdiff_t n) +{ + return n > 0; +} +int KWSYS_PLATFORM_TEST_C_MAIN() +{ + char* p = 0; + ptrdiff_t d = p - p; + (void)d; + return f(p - p); +} +#endif + +#ifdef TEST_KWSYS_C_HAS_SSIZE_T +# include +int f(ssize_t n) +{ + return (int)n; +} +int KWSYS_PLATFORM_TEST_C_MAIN() +{ + ssize_t n = 0; + return f(n); +} +#endif + +#ifdef TEST_KWSYS_C_HAS_CLOCK_GETTIME_MONOTONIC +# if defined(__APPLE__) +# include +# if MAC_OS_X_VERSION_MIN_REQUIRED < 101200 +# error "clock_gettime not available on macOS < 10.12" +# endif +# endif +# include +int KWSYS_PLATFORM_TEST_C_MAIN() +{ + struct timespec ts; + return clock_gettime(CLOCK_MONOTONIC, &ts); +} +#endif + +#ifdef TEST_KWSYS_C_TYPE_MACROS +char* info_macros = +# if defined(__SIZEOF_SHORT__) + "INFO:macro[__SIZEOF_SHORT__]\n" +# endif +# if defined(__SIZEOF_INT__) + "INFO:macro[__SIZEOF_INT__]\n" +# endif +# if defined(__SIZEOF_LONG__) + "INFO:macro[__SIZEOF_LONG__]\n" +# endif +# if defined(__SIZEOF_LONG_LONG__) + "INFO:macro[__SIZEOF_LONG_LONG__]\n" +# endif +# if defined(__SHORT_MAX__) + "INFO:macro[__SHORT_MAX__]\n" +# endif +# if defined(__INT_MAX__) + "INFO:macro[__INT_MAX__]\n" +# endif +# if defined(__LONG_MAX__) + "INFO:macro[__LONG_MAX__]\n" +# endif +# if defined(__LONG_LONG_MAX__) + "INFO:macro[__LONG_LONG_MAX__]\n" +# endif + ""; + +int KWSYS_PLATFORM_TEST_C_MAIN_ARGS(argc, argv) +{ + int require = 0; + require += info_macros[argc]; + (void)argv; + return require; +} +#endif diff --git a/test/API/driver/kwsys/kwsysPlatformTestsCXX.cxx b/test/API/driver/kwsys/kwsysPlatformTestsCXX.cxx new file mode 100644 index 00000000000..cfd5666f304 --- /dev/null +++ b/test/API/driver/kwsys/kwsysPlatformTestsCXX.cxx @@ -0,0 +1,335 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifdef TEST_KWSYS_CXX_HAS_CSTDIO +# include +int main() +{ + return 0; +} +#endif + +#ifdef TEST_KWSYS_CXX_HAS_LONG_LONG +long long f(long long n) +{ + return n; +} +int main() +{ + long long n = 0; + return static_cast(f(n)); +} +#endif + +#ifdef TEST_KWSYS_CXX_HAS___INT64 +__int64 f(__int64 n) +{ + return n; +} +int main() +{ + __int64 n = 0; + return static_cast(f(n)); +} +#endif + +#ifdef TEST_KWSYS_CXX_STAT_HAS_ST_MTIM +# include + +# include +# include +int main() +{ + struct stat stat1; + (void)stat1.st_mtim.tv_sec; + (void)stat1.st_mtim.tv_nsec; + return 0; +} +#endif + +#ifdef TEST_KWSYS_CXX_STAT_HAS_ST_MTIMESPEC +# include + +# include +# include +int main() +{ + struct stat stat1; + (void)stat1.st_mtimespec.tv_sec; + (void)stat1.st_mtimespec.tv_nsec; + return 0; +} +#endif + +#ifdef TEST_KWSYS_CXX_SAME_LONG_AND___INT64 +void function(long**) +{ +} +int main() +{ + __int64** p = 0; + function(p); + return 0; +} +#endif + +#ifdef TEST_KWSYS_CXX_SAME_LONG_LONG_AND___INT64 +void function(long long**) +{ +} +int main() +{ + __int64** p = 0; + function(p); + return 0; +} +#endif + +#ifdef TEST_KWSYS_IOS_HAS_ISTREAM_LONG_LONG +# include +int test_istream(std::istream& is, long long& x) +{ + return (is >> x) ? 1 : 0; +} +int main() +{ + long long x = 0; + return test_istream(std::cin, x); +} +#endif + +#ifdef TEST_KWSYS_IOS_HAS_OSTREAM_LONG_LONG +# include +int test_ostream(std::ostream& os, long long x) +{ + return (os << x) ? 1 : 0; +} +int main() +{ + long long x = 0; + return test_ostream(std::cout, x); +} +#endif + +#ifdef TEST_KWSYS_IOS_HAS_ISTREAM___INT64 +# include +int test_istream(std::istream& is, __int64& x) +{ + return (is >> x) ? 1 : 0; +} +int main() +{ + __int64 x = 0; + return test_istream(std::cin, x); +} +#endif + +#ifdef TEST_KWSYS_IOS_HAS_OSTREAM___INT64 +# include +int test_ostream(std::ostream& os, __int64 x) +{ + return (os << x) ? 1 : 0; +} +int main() +{ + __int64 x = 0; + return test_ostream(std::cout, x); +} +#endif + +#ifdef TEST_KWSYS_CXX_HAS_SETENV +# include +int main() +{ + return setenv("A", "B", 1); +} +#endif + +#ifdef TEST_KWSYS_CXX_HAS_UNSETENV +# include +int main() +{ + unsetenv("A"); + return 0; +} +#endif + +#ifdef TEST_KWSYS_CXX_HAS_ENVIRON_IN_STDLIB_H +# include +int main() +{ + char* e = environ[0]; + return e ? 0 : 1; +} +#endif + +#ifdef TEST_KWSYS_CXX_HAS_GETLOADAVG +// Match feature definitions from SystemInformation.cxx +# if (defined(__GNUC__) || defined(__PGI)) && !defined(_GNU_SOURCE) +# define _GNU_SOURCE +# endif +# include +int main() +{ + double loadavg[3] = { 0.0, 0.0, 0.0 }; + return getloadavg(loadavg, 3); +} +#endif + +#ifdef TEST_KWSYS_CXX_HAS_RLIMIT64 +# include +int main() +{ + struct rlimit64 rlim; + return getrlimit64(0, &rlim); +} +#endif + +#ifdef TEST_KWSYS_CXX_HAS_ATOLL +# include +int main() +{ + const char* str = "1024"; + return static_cast(atoll(str)); +} +#endif + +#ifdef TEST_KWSYS_CXX_HAS_ATOL +# include +int main() +{ + const char* str = "1024"; + return static_cast(atol(str)); +} +#endif + +#ifdef TEST_KWSYS_CXX_HAS__ATOI64 +# include +int main() +{ + const char* str = "1024"; + return static_cast(_atoi64(str)); +} +#endif + +#ifdef TEST_KWSYS_CXX_HAS_UTIMES +# include +int main() +{ + struct timeval* current_time = 0; + return utimes("/example", current_time); +} +#endif + +#ifdef TEST_KWSYS_CXX_HAS_UTIMENSAT +# include +# include +# if defined(__APPLE__) +# include +# if MAC_OS_X_VERSION_MIN_REQUIRED < 101300 +# error "utimensat not available on macOS < 10.13" +# endif +# endif +int main() +{ + struct timespec times[2] = { { 0, UTIME_OMIT }, { 0, UTIME_NOW } }; + return utimensat(AT_FDCWD, "/example", times, AT_SYMLINK_NOFOLLOW); +} +#endif + +#ifdef TEST_KWSYS_CXX_HAS_BACKTRACE +# if defined(__PATHSCALE__) || defined(__PATHCC__) || \ + (defined(__LSB_VERSION__) && (__LSB_VERSION__ < 41)) +backtrace does not work with this compiler or os +# endif +# if (defined(__GNUC__) || defined(__PGI)) && !defined(_GNU_SOURCE) +# define _GNU_SOURCE +# endif +# include +int main() +{ + void* stackSymbols[256]; + backtrace(stackSymbols, 256); + backtrace_symbols(&stackSymbols[0], 1); + return 0; +} +#endif + +#ifdef TEST_KWSYS_CXX_HAS_DLADDR +# if (defined(__GNUC__) || defined(__PGI)) && !defined(_GNU_SOURCE) +# define _GNU_SOURCE +# endif +# include +int main() +{ + Dl_info info; + int ierr = dladdr((void*)main, &info); + return 0; +} +#endif + +#ifdef TEST_KWSYS_CXX_HAS_CXXABI +# if (defined(__GNUC__) || defined(__PGI)) && !defined(_GNU_SOURCE) +# define _GNU_SOURCE +# endif +# if defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x5130 && __linux && \ + __SUNPRO_CC_COMPAT == 'G' +# include +# endif +# include +int main() +{ + int status = 0; + size_t bufferLen = 512; + char buffer[512] = { '\0' }; + const char* function = "_ZN5kwsys17SystemInformation15GetProgramStackEii"; + char* demangledFunction = + abi::__cxa_demangle(function, buffer, &bufferLen, &status); + return status; +} +#endif + +#ifdef TEST_KWSYS_CXX_HAS_BORLAND_ASM +int main() +{ + int a = 1; + __asm { + xor EBX, EBX; + mov a, EBX; + } + + return a; +} +#endif + +#ifdef TEST_KWSYS_CXX_HAS_BORLAND_ASM_CPUID +int main() +{ + int a = 0; + __asm { + xor EAX, EAX; + cpuid; + mov a, EAX; + } + + return a; +} +#endif + +#ifdef TEST_KWSYS_STL_HAS_WSTRING +# include +void f(std::wstring*) +{ +} +int main() +{ + return 0; +} +#endif + +#ifdef TEST_KWSYS_CXX_HAS_EXT_STDIO_FILEBUF_H +# include +int main() +{ + return 0; +} +#endif diff --git a/test/API/driver/kwsys/kwsysPrivate.h b/test/API/driver/kwsys/kwsysPrivate.h new file mode 100644 index 00000000000..dd9c1277fb6 --- /dev/null +++ b/test/API/driver/kwsys/kwsysPrivate.h @@ -0,0 +1,34 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifndef KWSYS_NAMESPACE +# error "Do not include kwsysPrivate.h outside of kwsys c and cxx files." +#endif + +#ifndef _kwsysPrivate_h +# define _kwsysPrivate_h + +/* + Define KWSYS_HEADER macro to help the c and cxx files include kwsys + headers from the configured namespace directory. The macro can be + used like this: + + #include KWSYS_HEADER(Directory.hxx) + #include KWSYS_HEADER(std/vector) +*/ +/* clang-format off */ +#define KWSYS_HEADER(x) KWSYS_HEADER0(KWSYS_NAMESPACE/x) +/* clang-format on */ +# define KWSYS_HEADER0(x) KWSYS_HEADER1(x) +# define KWSYS_HEADER1(x) + +/* + Define KWSYS_NAMESPACE_STRING to be a string constant containing the + name configured for this instance of the kwsys library. +*/ +# define KWSYS_NAMESPACE_STRING KWSYS_NAMESPACE_STRING0(KWSYS_NAMESPACE) +# define KWSYS_NAMESPACE_STRING0(x) KWSYS_NAMESPACE_STRING1(x) +# define KWSYS_NAMESPACE_STRING1(x) # x + +#else +# error "kwsysPrivate.h included multiple times." +#endif diff --git a/test/API/driver/kwsys/testCommandLineArguments.cxx b/test/API/driver/kwsys/testCommandLineArguments.cxx new file mode 100644 index 00000000000..1778a9ba8bd --- /dev/null +++ b/test/API/driver/kwsys/testCommandLineArguments.cxx @@ -0,0 +1,209 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" +#include KWSYS_HEADER(CommandLineArguments.hxx) + +// Work-around CMake dependency scanning limitation. This must +// duplicate the above list of headers. +#if 0 +# include "CommandLineArguments.hxx.in" +#endif + +#include +#include + +#include /* size_t */ +#include /* strcmp */ + +static void* random_ptr = reinterpret_cast(0x123); + +static int argument(const char* arg, const char* value, void* call_data) +{ + std::cout << "Got argument: \"" << arg << "\" value: \"" + << (value ? value : "(null)") << "\"" << std::endl; + if (call_data != random_ptr) { + std::cerr << "Problem processing call_data" << std::endl; + return 0; + } + return 1; +} + +static int unknown_argument(const char* argument, void* call_data) +{ + std::cout << "Got unknown argument: \"" << argument << "\"" << std::endl; + if (call_data != random_ptr) { + std::cerr << "Problem processing call_data" << std::endl; + return 0; + } + return 1; +} + +static bool CompareTwoItemsOnList(bool i1, bool i2) +{ + return i1 == i2; +} +static bool CompareTwoItemsOnList(int i1, int i2) +{ + return i1 == i2; +} +static bool CompareTwoItemsOnList(double i1, double i2) +{ + return i1 == i2; +} +static bool CompareTwoItemsOnList(const char* i1, const char* i2) +{ + return strcmp(i1, i2) == 0; +} +static bool CompareTwoItemsOnList(const std::string& i1, const std::string& i2) +{ + return i1 == i2; +} + +int testCommandLineArguments(int argc, char* argv[]) +{ + // Example run: ./testCommandLineArguments --some-int-variable 4 + // --another-bool-variable --some-bool-variable=yes + // --some-stl-string-variable=foobar --set-bool-arg1 --set-bool-arg2 + // --some-string-variable=hello + + int res = 0; + kwsys::CommandLineArguments arg; + arg.Initialize(argc, argv); + + // For error handling + arg.SetClientData(random_ptr); + arg.SetUnknownArgumentCallback(unknown_argument); + + int some_int_variable = 10; + double some_double_variable = 10.10; + char* some_string_variable = nullptr; + std::string some_stl_string_variable; + bool some_bool_variable = false; + bool some_bool_variable1 = false; + bool bool_arg1 = false; + int bool_arg2 = 0; + + std::vector numbers_argument; + int valid_numbers[] = { 5, 1, 8, 3, 7, 1, 3, 9, 7, 1 }; + + std::vector doubles_argument; + double valid_doubles[] = { 12.5, 1.31, 22 }; + + std::vector bools_argument; + bool valid_bools[] = { true, true, false }; + + std::vector strings_argument; + const char* valid_strings[] = { "andy", "bill", "brad", "ken" }; + + std::vector stl_strings_argument; + std::string valid_stl_strings[] = { "ken", "brad", "bill", "andy" }; + + typedef kwsys::CommandLineArguments argT; + + arg.AddArgument("--some-int-variable", argT::SPACE_ARGUMENT, + &some_int_variable, "Set some random int variable"); + arg.AddArgument("--some-double-variable", argT::CONCAT_ARGUMENT, + &some_double_variable, "Set some random double variable"); + arg.AddArgument("--some-string-variable", argT::EQUAL_ARGUMENT, + &some_string_variable, "Set some random string variable"); + arg.AddArgument("--some-stl-string-variable", argT::EQUAL_ARGUMENT, + &some_stl_string_variable, + "Set some random stl string variable"); + arg.AddArgument("--some-bool-variable", argT::EQUAL_ARGUMENT, + &some_bool_variable, "Set some random bool variable"); + arg.AddArgument("--another-bool-variable", argT::NO_ARGUMENT, + &some_bool_variable1, "Set some random bool variable 1"); + arg.AddBooleanArgument("--set-bool-arg1", &bool_arg1, + "Test AddBooleanArgument 1"); + arg.AddBooleanArgument("--set-bool-arg2", &bool_arg2, + "Test AddBooleanArgument 2"); + arg.AddArgument("--some-multi-argument", argT::MULTI_ARGUMENT, + &numbers_argument, "Some multiple values variable"); + arg.AddArgument("-N", argT::SPACE_ARGUMENT, &doubles_argument, + "Some explicit multiple values variable"); + arg.AddArgument("-BB", argT::CONCAT_ARGUMENT, &bools_argument, + "Some explicit multiple values variable"); + arg.AddArgument("-SS", argT::EQUAL_ARGUMENT, &strings_argument, + "Some explicit multiple values variable"); + arg.AddArgument("-SSS", argT::MULTI_ARGUMENT, &stl_strings_argument, + "Some explicit multiple values variable"); + + arg.AddCallback("-A", argT::NO_ARGUMENT, argument, random_ptr, + "Some option -A. This option has a multiline comment. It " + "should demonstrate how the code splits lines."); + arg.AddCallback("-B", argT::SPACE_ARGUMENT, argument, random_ptr, + "Option -B takes argument with space"); + arg.AddCallback("-C", argT::EQUAL_ARGUMENT, argument, random_ptr, + "Option -C takes argument after ="); + arg.AddCallback("-D", argT::CONCAT_ARGUMENT, argument, random_ptr, + "This option takes concatenated argument"); + arg.AddCallback("--long1", argT::NO_ARGUMENT, argument, random_ptr, "-A"); + arg.AddCallback("--long2", argT::SPACE_ARGUMENT, argument, random_ptr, "-B"); + arg.AddCallback("--long3", argT::EQUAL_ARGUMENT, argument, random_ptr, + "Same as -C but a bit different"); + arg.AddCallback("--long4", argT::CONCAT_ARGUMENT, argument, random_ptr, + "-C"); + + if (!arg.Parse()) { + std::cerr << "Problem parsing arguments" << std::endl; + res = 1; + } + std::cout << "Help: " << arg.GetHelp() << std::endl; + + std::cout << "Some int variable was set to: " << some_int_variable + << std::endl; + std::cout << "Some double variable was set to: " << some_double_variable + << std::endl; + if (some_string_variable && + strcmp(some_string_variable, "test string with space") == 0) { + std::cout << "Some string variable was set to: " << some_string_variable + << std::endl; + delete[] some_string_variable; + } else { + std::cerr << "Problem setting string variable" << std::endl; + res = 1; + } + size_t cc; +#define CompareTwoLists(list1, list_valid, lsize) \ + do { \ + if (list1.size() != lsize) { \ + std::cerr << "Problem setting " #list1 ". Size is: " << list1.size() \ + << " should be: " << lsize << std::endl; \ + res = 1; \ + } else { \ + std::cout << #list1 " argument set:"; \ + for (cc = 0; cc < lsize; ++cc) { \ + std::cout << " " << list1[cc]; \ + if (!CompareTwoItemsOnList(list1[cc], list_valid[cc])) { \ + std::cerr << "Problem setting " #list1 ". Value of " << cc \ + << " is: [" << list1[cc] << "] <> [" << list_valid[cc] \ + << "]" << std::endl; \ + res = 1; \ + break; \ + } \ + } \ + std::cout << std::endl; \ + } \ + } while (0) + CompareTwoLists(numbers_argument, valid_numbers, 10); + CompareTwoLists(doubles_argument, valid_doubles, 3); + CompareTwoLists(bools_argument, valid_bools, 3); + CompareTwoLists(strings_argument, valid_strings, 4); + CompareTwoLists(stl_strings_argument, valid_stl_strings, 4); + + std::cout << "Some STL String variable was set to: " + << some_stl_string_variable << std::endl; + std::cout << "Some bool variable was set to: " << some_bool_variable + << std::endl; + std::cout << "Some bool variable was set to: " << some_bool_variable1 + << std::endl; + std::cout << "bool_arg1 variable was set to: " << bool_arg1 << std::endl; + std::cout << "bool_arg2 variable was set to: " << bool_arg2 << std::endl; + std::cout << std::endl; + + for (cc = 0; cc < strings_argument.size(); ++cc) { + delete[] strings_argument[cc]; + strings_argument[cc] = nullptr; + } + return res; +} diff --git a/test/API/driver/kwsys/testCommandLineArguments1.cxx b/test/API/driver/kwsys/testCommandLineArguments1.cxx new file mode 100644 index 00000000000..64561b1e3b4 --- /dev/null +++ b/test/API/driver/kwsys/testCommandLineArguments1.cxx @@ -0,0 +1,93 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" +#include KWSYS_HEADER(CommandLineArguments.hxx) + +// Work-around CMake dependency scanning limitation. This must +// duplicate the above list of headers. +#if 0 +# include "CommandLineArguments.hxx.in" +#endif + +#include +#include + +#include /* assert */ +#include /* strcmp */ + +int testCommandLineArguments1(int argc, char* argv[]) +{ + kwsys::CommandLineArguments arg; + arg.Initialize(argc, argv); + + int n = 0; + char* m = nullptr; + std::string p; + int res = 0; + + typedef kwsys::CommandLineArguments argT; + arg.AddArgument("-n", argT::SPACE_ARGUMENT, &n, "Argument N"); + arg.AddArgument("-m", argT::EQUAL_ARGUMENT, &m, "Argument M"); + arg.AddBooleanArgument("-p", &p, "Argument P"); + + arg.StoreUnusedArguments(true); + + if (!arg.Parse()) { + std::cerr << "Problem parsing arguments" << std::endl; + res = 1; + } + if (n != 24) { + std::cout << "Problem setting N. Value of N: " << n << std::endl; + res = 1; + } + if (!m || strcmp(m, "test value") != 0) { + std::cout << "Problem setting M. Value of M: " << m << std::endl; + res = 1; + } + if (p != "1") { + std::cout << "Problem setting P. Value of P: " << p << std::endl; + res = 1; + } + std::cout << "Value of N: " << n << std::endl; + std::cout << "Value of M: " << m << std::endl; + std::cout << "Value of P: " << p << std::endl; + if (m) { + delete[] m; + } + + char** newArgv = nullptr; + int newArgc = 0; + arg.GetUnusedArguments(&newArgc, &newArgv); + int cc; + const char* valid_unused_args[9] = { nullptr, + "--ignored", + "--second-ignored", + "third-ignored", + "some", + "junk", + "at", + "the", + "end" }; + if (newArgc != 9) { + std::cerr << "Bad number of unused arguments: " << newArgc << std::endl; + res = 1; + } + for (cc = 0; cc < newArgc; ++cc) { + assert(newArgv[cc]); /* Quiet Clang scan-build. */ + std::cout << "Unused argument[" << cc << "] = [" << newArgv[cc] << "]" + << std::endl; + if (cc >= 9) { + std::cerr << "Too many unused arguments: " << cc << std::endl; + res = 1; + } else if (valid_unused_args[cc] && + strcmp(valid_unused_args[cc], newArgv[cc]) != 0) { + std::cerr << "Bad unused argument [" << cc << "] \"" << newArgv[cc] + << "\" should be: \"" << valid_unused_args[cc] << "\"" + << std::endl; + res = 1; + } + } + arg.DeleteRemainingArguments(newArgc, &newArgv); + + return res; +} diff --git a/test/API/driver/kwsys/testConfigure.cxx b/test/API/driver/kwsys/testConfigure.cxx new file mode 100644 index 00000000000..a3c2ed3aeda --- /dev/null +++ b/test/API/driver/kwsys/testConfigure.cxx @@ -0,0 +1,30 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying +file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" +#include KWSYS_HEADER(Configure.hxx) + +// Work-around CMake dependency scanning limitation. This must +// duplicate the above list of headers. +#if 0 +# include "Configure.hxx.in" +#endif + +static bool testFallthrough(int n) +{ + int r = 0; + switch (n) { + case 1: + ++r; + KWSYS_FALLTHROUGH; + default: + ++r; + } + return r == 2; +} + +int testConfigure(int, char* []) +{ + bool res = true; + res = testFallthrough(1) && res; + return res ? 0 : 1; +} diff --git a/test/API/driver/kwsys/testConsoleBuf.cxx b/test/API/driver/kwsys/testConsoleBuf.cxx new file mode 100644 index 00000000000..4b7ddf053fc --- /dev/null +++ b/test/API/driver/kwsys/testConsoleBuf.cxx @@ -0,0 +1,782 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" + +// Ignore Windows version levels defined by command-line flags. This +// source needs access to all APIs available on the host in order for +// the test to run properly. The test binary is not installed anyway. +#undef _WIN32_WINNT +#undef NTDDI_VERSION + +#include KWSYS_HEADER(Encoding.hxx) + +// Work-around CMake dependency scanning limitation. This must +// duplicate the above list of headers. +#if 0 +# include "Encoding.hxx.in" +#endif + +#if defined(_WIN32) + +# include +# include +# include +# include +# include +# include +# include + +# include "testConsoleBuf.hxx" + +# if defined(_MSC_VER) && _MSC_VER >= 1800 +# define KWSYS_WINDOWS_DEPRECATED_GetVersion +# endif +// يونيكود +static const WCHAR UnicodeInputTestString[] = + L"\u064A\u0648\u0646\u064A\u0643\u0648\u062F!"; +static UINT TestCodepage = KWSYS_ENCODING_DEFAULT_CODEPAGE; + +static const DWORD waitTimeout = 10 * 1000; +static STARTUPINFO startupInfo; +static PROCESS_INFORMATION processInfo; +static HANDLE beforeInputEvent; +static HANDLE afterOutputEvent; +static std::string encodedInputTestString; +static std::string encodedTestString; + +static void displayError(DWORD errorCode) +{ + std::cerr.setf(std::ios::hex, std::ios::basefield); + std::cerr << "Failed with error: 0x" << errorCode << "!" << std::endl; + LPWSTR message; + if (FormatMessageW(FORMAT_MESSAGE_ALLOCATE_BUFFER | + FORMAT_MESSAGE_FROM_SYSTEM, + nullptr, errorCode, 0, (LPWSTR)&message, 0, nullptr)) { + std::cerr << "Error message: " << kwsys::Encoding::ToNarrow(message) + << std::endl; + HeapFree(GetProcessHeap(), 0, message); + } else { + std::cerr << "FormatMessage() failed with error: 0x" << GetLastError() + << "!" << std::endl; + } + std::cerr.unsetf(std::ios::hex); +} + +std::basic_streambuf* errstream(const char* unused) +{ + static_cast(unused); + return std::cerr.rdbuf(); +} + +std::basic_streambuf* errstream(const wchar_t* unused) +{ + static_cast(unused); + return std::wcerr.rdbuf(); +} + +template +static void dumpBuffers(const T* expected, const T* received, size_t size) +{ + std::basic_ostream err(errstream(expected)); + err << "Expected output: '" << std::basic_string(expected, size) << "'" + << std::endl; + if (err.fail()) { + err.clear(); + err << "--- Error while outputting ---" << std::endl; + } + err << "Received output: '" << std::basic_string(received, size) << "'" + << std::endl; + if (err.fail()) { + err.clear(); + err << "--- Error while outputting ---" << std::endl; + } + std::cerr << "Expected output | Received output" << std::endl; + for (size_t i = 0; i < size; i++) { + std::cerr << std::setbase(16) << std::setfill('0') << " " + << "0x" << std::setw(8) << static_cast(expected[i]) + << " | " + << "0x" << std::setw(8) + << static_cast(received[i]); + if (static_cast(expected[i]) != + static_cast(received[i])) { + std::cerr << " MISMATCH!"; + } + std::cerr << std::endl; + } + std::cerr << std::endl; +} + +static bool createProcess(HANDLE hIn, HANDLE hOut, HANDLE hErr) +{ + BOOL bInheritHandles = FALSE; + DWORD dwCreationFlags = 0; + memset(&processInfo, 0, sizeof(processInfo)); + memset(&startupInfo, 0, sizeof(startupInfo)); + startupInfo.cb = sizeof(startupInfo); + startupInfo.dwFlags = STARTF_USESHOWWINDOW; + startupInfo.wShowWindow = SW_HIDE; + if (hIn || hOut || hErr) { + startupInfo.dwFlags |= STARTF_USESTDHANDLES; + startupInfo.hStdInput = hIn; + startupInfo.hStdOutput = hOut; + startupInfo.hStdError = hErr; + bInheritHandles = TRUE; + } + + WCHAR cmd[MAX_PATH]; + if (GetModuleFileNameW(nullptr, cmd, MAX_PATH) == 0) { + std::cerr << "GetModuleFileName failed!" << std::endl; + return false; + } + WCHAR* p = cmd + wcslen(cmd); + while (p > cmd && *p != L'\\') + p--; + *(p + 1) = 0; + wcscat(cmd, cmdConsoleBufChild); + wcscat(cmd, L".exe"); + + bool success = + CreateProcessW(nullptr, // No module name (use command line) + cmd, // Command line + nullptr, // Process handle not inheritable + nullptr, // Thread handle not inheritable + bInheritHandles, // Set handle inheritance + dwCreationFlags, + nullptr, // Use parent's environment block + nullptr, // Use parent's starting directory + &startupInfo, // Pointer to STARTUPINFO structure + &processInfo) != + 0; // Pointer to PROCESS_INFORMATION structure + if (!success) { + DWORD lastError = GetLastError(); + std::cerr << "CreateProcess(" << kwsys::Encoding::ToNarrow(cmd) << ")" + << std::endl; + displayError(lastError); + } + return success; +} + +static void finishProcess(bool success) +{ + if (success) { + success = + WaitForSingleObject(processInfo.hProcess, waitTimeout) == WAIT_OBJECT_0; + }; + if (!success) { + TerminateProcess(processInfo.hProcess, 1); + } + CloseHandle(processInfo.hProcess); + CloseHandle(processInfo.hThread); +} + +static bool createPipe(PHANDLE readPipe, PHANDLE writePipe) +{ + SECURITY_ATTRIBUTES securityAttributes; + securityAttributes.nLength = sizeof(SECURITY_ATTRIBUTES); + securityAttributes.bInheritHandle = TRUE; + securityAttributes.lpSecurityDescriptor = nullptr; + return CreatePipe(readPipe, writePipe, &securityAttributes, 0) == 0 ? false + : true; +} + +static void finishPipe(HANDLE readPipe, HANDLE writePipe) +{ + if (readPipe != INVALID_HANDLE_VALUE) { + CloseHandle(readPipe); + } + if (writePipe != INVALID_HANDLE_VALUE) { + CloseHandle(writePipe); + } +} + +static HANDLE createFile(LPCWSTR fileName) +{ + SECURITY_ATTRIBUTES securityAttributes; + securityAttributes.nLength = sizeof(SECURITY_ATTRIBUTES); + securityAttributes.bInheritHandle = TRUE; + securityAttributes.lpSecurityDescriptor = nullptr; + + HANDLE file = + CreateFileW(fileName, GENERIC_READ | GENERIC_WRITE, + 0, // do not share + &securityAttributes, + CREATE_ALWAYS, // overwrite existing + FILE_ATTRIBUTE_TEMPORARY | FILE_FLAG_DELETE_ON_CLOSE, + nullptr); // no template + if (file == INVALID_HANDLE_VALUE) { + DWORD lastError = GetLastError(); + std::cerr << "CreateFile(" << kwsys::Encoding::ToNarrow(fileName) << ")" + << std::endl; + displayError(lastError); + } + return file; +} + +static void finishFile(HANDLE file) +{ + if (file != INVALID_HANDLE_VALUE) { + CloseHandle(file); + } +} + +# ifndef MAPVK_VK_TO_VSC +# define MAPVK_VK_TO_VSC (0) +# endif + +static void writeInputKeyEvent(INPUT_RECORD inputBuffer[], WCHAR chr) +{ + inputBuffer[0].EventType = KEY_EVENT; + inputBuffer[0].Event.KeyEvent.bKeyDown = TRUE; + inputBuffer[0].Event.KeyEvent.wRepeatCount = 1; + SHORT keyCode = VkKeyScanW(chr); + if (keyCode == -1) { + // Character can't be entered with current keyboard layout + // Just set any, it doesn't really matter + keyCode = 'K'; + } + inputBuffer[0].Event.KeyEvent.wVirtualKeyCode = LOBYTE(keyCode); + inputBuffer[0].Event.KeyEvent.wVirtualScanCode = MapVirtualKey( + inputBuffer[0].Event.KeyEvent.wVirtualKeyCode, MAPVK_VK_TO_VSC); + inputBuffer[0].Event.KeyEvent.uChar.UnicodeChar = chr; + inputBuffer[0].Event.KeyEvent.dwControlKeyState = 0; + if ((HIBYTE(keyCode) & 1) == 1) { + inputBuffer[0].Event.KeyEvent.dwControlKeyState |= SHIFT_PRESSED; + } + if ((HIBYTE(keyCode) & 2) == 2) { + inputBuffer[0].Event.KeyEvent.dwControlKeyState |= RIGHT_CTRL_PRESSED; + } + if ((HIBYTE(keyCode) & 4) == 4) { + inputBuffer[0].Event.KeyEvent.dwControlKeyState |= RIGHT_ALT_PRESSED; + } + inputBuffer[1].EventType = inputBuffer[0].EventType; + inputBuffer[1].Event.KeyEvent.bKeyDown = FALSE; + inputBuffer[1].Event.KeyEvent.wRepeatCount = 1; + inputBuffer[1].Event.KeyEvent.wVirtualKeyCode = + inputBuffer[0].Event.KeyEvent.wVirtualKeyCode; + inputBuffer[1].Event.KeyEvent.wVirtualScanCode = + inputBuffer[0].Event.KeyEvent.wVirtualScanCode; + inputBuffer[1].Event.KeyEvent.uChar.UnicodeChar = + inputBuffer[0].Event.KeyEvent.uChar.UnicodeChar; + inputBuffer[1].Event.KeyEvent.dwControlKeyState = 0; +} + +static int testPipe() +{ + int didFail = 1; + HANDLE inPipeRead = INVALID_HANDLE_VALUE; + HANDLE inPipeWrite = INVALID_HANDLE_VALUE; + HANDLE outPipeRead = INVALID_HANDLE_VALUE; + HANDLE outPipeWrite = INVALID_HANDLE_VALUE; + HANDLE errPipeRead = INVALID_HANDLE_VALUE; + HANDLE errPipeWrite = INVALID_HANDLE_VALUE; + UINT currentCodepage = GetConsoleCP(); + char buffer[200]; + char buffer2[200]; + try { + if (!createPipe(&inPipeRead, &inPipeWrite) || + !createPipe(&outPipeRead, &outPipeWrite) || + !createPipe(&errPipeRead, &errPipeWrite)) { + throw std::runtime_error("createFile failed!"); + } + if (TestCodepage == CP_ACP) { + TestCodepage = GetACP(); + } + if (!SetConsoleCP(TestCodepage)) { + throw std::runtime_error("SetConsoleCP failed!"); + } + + DWORD bytesWritten = 0; + if (!WriteFile(inPipeWrite, encodedInputTestString.c_str(), + (DWORD)encodedInputTestString.size(), &bytesWritten, + nullptr) || + bytesWritten == 0) { + throw std::runtime_error("WriteFile failed!"); + } + + if (createProcess(inPipeRead, outPipeWrite, errPipeWrite)) { + try { + DWORD status; + if ((status = WaitForSingleObject(afterOutputEvent, waitTimeout)) != + WAIT_OBJECT_0) { + std::cerr.setf(std::ios::hex, std::ios::basefield); + std::cerr << "WaitForSingleObject returned unexpected status 0x" + << status << std::endl; + std::cerr.unsetf(std::ios::hex); + throw std::runtime_error("WaitForSingleObject failed!"); + } + DWORD bytesRead = 0; + if (!ReadFile(outPipeRead, buffer, sizeof(buffer), &bytesRead, + nullptr) || + bytesRead == 0) { + throw std::runtime_error("ReadFile#1 failed!"); + } + buffer[bytesRead] = 0; + if ((bytesRead < + encodedTestString.size() + 1 + encodedInputTestString.size() && + !ReadFile(outPipeRead, buffer + bytesRead, + sizeof(buffer) - bytesRead, &bytesRead, nullptr)) || + bytesRead == 0) { + throw std::runtime_error("ReadFile#2 failed!"); + } + if (memcmp(buffer, encodedTestString.c_str(), + encodedTestString.size()) == 0 && + memcmp(buffer + encodedTestString.size() + 1, + encodedInputTestString.c_str(), + encodedInputTestString.size()) == 0) { + bytesRead = 0; + if (!ReadFile(errPipeRead, buffer2, sizeof(buffer2), &bytesRead, + nullptr) || + bytesRead == 0) { + throw std::runtime_error("ReadFile#3 failed!"); + } + buffer2[bytesRead] = 0; + didFail = encodedTestString.compare(0, std::string::npos, buffer2, + encodedTestString.size()) == 0 + ? 0 + : 1; + } + if (didFail != 0) { + std::cerr << "Pipe's output didn't match expected output!" + << std::endl; + dumpBuffers(encodedTestString.c_str(), buffer, + encodedTestString.size()); + dumpBuffers(encodedInputTestString.c_str(), + buffer + encodedTestString.size() + 1, + encodedInputTestString.size()); + dumpBuffers(encodedTestString.c_str(), buffer2, + encodedTestString.size()); + } + } catch (const std::runtime_error& ex) { + DWORD lastError = GetLastError(); + std::cerr << "In function testPipe, line " << __LINE__ << ": " + << ex.what() << std::endl; + displayError(lastError); + } + finishProcess(didFail == 0); + } + } catch (const std::runtime_error& ex) { + DWORD lastError = GetLastError(); + std::cerr << "In function testPipe, line " << __LINE__ << ": " << ex.what() + << std::endl; + displayError(lastError); + } + finishPipe(inPipeRead, inPipeWrite); + finishPipe(outPipeRead, outPipeWrite); + finishPipe(errPipeRead, errPipeWrite); + SetConsoleCP(currentCodepage); + return didFail; +} + +static int testFile() +{ + int didFail = 1; + HANDLE inFile = INVALID_HANDLE_VALUE; + HANDLE outFile = INVALID_HANDLE_VALUE; + HANDLE errFile = INVALID_HANDLE_VALUE; + try { + if ((inFile = createFile(L"stdinFile.txt")) == INVALID_HANDLE_VALUE || + (outFile = createFile(L"stdoutFile.txt")) == INVALID_HANDLE_VALUE || + (errFile = createFile(L"stderrFile.txt")) == INVALID_HANDLE_VALUE) { + throw std::runtime_error("createFile failed!"); + } + DWORD bytesWritten = 0; + char buffer[200]; + char buffer2[200]; + + int length; + if ((length = WideCharToMultiByte(TestCodepage, 0, UnicodeInputTestString, + -1, buffer, sizeof(buffer), nullptr, + nullptr)) == 0) { + throw std::runtime_error("WideCharToMultiByte failed!"); + } + buffer[length - 1] = '\n'; + if (!WriteFile(inFile, buffer, length, &bytesWritten, nullptr) || + bytesWritten == 0) { + throw std::runtime_error("WriteFile failed!"); + } + if (SetFilePointer(inFile, 0, 0, FILE_BEGIN) == INVALID_SET_FILE_POINTER) { + throw std::runtime_error("SetFilePointer failed!"); + } + + if (createProcess(inFile, outFile, errFile)) { + DWORD bytesRead = 0; + try { + DWORD status; + if ((status = WaitForSingleObject(afterOutputEvent, waitTimeout)) != + WAIT_OBJECT_0) { + std::cerr.setf(std::ios::hex, std::ios::basefield); + std::cerr << "WaitForSingleObject returned unexpected status 0x" + << status << std::endl; + std::cerr.unsetf(std::ios::hex); + throw std::runtime_error("WaitForSingleObject failed!"); + } + if (SetFilePointer(outFile, 0, 0, FILE_BEGIN) == + INVALID_SET_FILE_POINTER) { + throw std::runtime_error("SetFilePointer#1 failed!"); + } + if (!ReadFile(outFile, buffer, sizeof(buffer), &bytesRead, nullptr) || + bytesRead == 0) { + throw std::runtime_error("ReadFile#1 failed!"); + } + buffer[bytesRead] = 0; + if (memcmp(buffer, encodedTestString.c_str(), + encodedTestString.size()) == 0 && + memcmp(buffer + encodedTestString.size() + 1, + encodedInputTestString.c_str(), + encodedInputTestString.size()) == 0) { + bytesRead = 0; + if (SetFilePointer(errFile, 0, 0, FILE_BEGIN) == + INVALID_SET_FILE_POINTER) { + throw std::runtime_error("SetFilePointer#2 failed!"); + } + + if (!ReadFile(errFile, buffer2, sizeof(buffer2), &bytesRead, + nullptr) || + bytesRead == 0) { + throw std::runtime_error("ReadFile#2 failed!"); + } + buffer2[bytesRead] = 0; + didFail = encodedTestString.compare(0, std::string::npos, buffer2, + encodedTestString.size()) == 0 + ? 0 + : 1; + } + if (didFail != 0) { + std::cerr << "File's output didn't match expected output!" + << std::endl; + dumpBuffers(encodedTestString.c_str(), buffer, + encodedTestString.size()); + dumpBuffers(encodedInputTestString.c_str(), + buffer + encodedTestString.size() + 1, + encodedInputTestString.size()); + dumpBuffers(encodedTestString.c_str(), buffer2, + encodedTestString.size()); + } + } catch (const std::runtime_error& ex) { + DWORD lastError = GetLastError(); + std::cerr << "In function testFile, line " << __LINE__ << ": " + << ex.what() << std::endl; + displayError(lastError); + } + finishProcess(didFail == 0); + } + } catch (const std::runtime_error& ex) { + DWORD lastError = GetLastError(); + std::cerr << "In function testFile, line " << __LINE__ << ": " << ex.what() + << std::endl; + displayError(lastError); + } + finishFile(inFile); + finishFile(outFile); + finishFile(errFile); + return didFail; +} + +# ifndef _WIN32_WINNT_VISTA +# define _WIN32_WINNT_VISTA 0x0600 +# endif + +static int testConsole() +{ + int didFail = 1; + HANDLE parentIn = GetStdHandle(STD_INPUT_HANDLE); + HANDLE parentOut = GetStdHandle(STD_OUTPUT_HANDLE); + HANDLE parentErr = GetStdHandle(STD_ERROR_HANDLE); + HANDLE hIn = parentIn; + HANDLE hOut = parentOut; + DWORD consoleMode; + bool newConsole = false; + bool forceNewConsole = false; + bool restoreConsole = false; + LPCWSTR TestFaceName = L"Lucida Console"; + const DWORD TestFontFamily = 0x00000036; + const DWORD TestFontSize = 0x000c0000; + HKEY hConsoleKey; + WCHAR FaceName[200]; + FaceName[0] = 0; + DWORD FaceNameSize = sizeof(FaceName); + DWORD FontFamily = TestFontFamily; + DWORD FontSize = TestFontSize; +# ifdef KWSYS_WINDOWS_DEPRECATED_GetVersion +# pragma warning(push) +# ifdef __INTEL_COMPILER +# pragma warning(disable : 1478) +# elif defined __clang__ +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wdeprecated-declarations" +# else +# pragma warning(disable : 4996) +# endif +# endif + const bool isVistaOrGreater = + LOBYTE(LOWORD(GetVersion())) >= HIBYTE(_WIN32_WINNT_VISTA); +# ifdef KWSYS_WINDOWS_DEPRECATED_GetVersion +# ifdef __clang__ +# pragma clang diagnostic pop +# else +# pragma warning(pop) +# endif +# endif + if (!isVistaOrGreater) { + if (RegOpenKeyExW(HKEY_CURRENT_USER, L"Console", 0, KEY_READ | KEY_WRITE, + &hConsoleKey) == ERROR_SUCCESS) { + DWORD dwordSize = sizeof(DWORD); + if (RegQueryValueExW(hConsoleKey, L"FontFamily", nullptr, nullptr, + (LPBYTE)&FontFamily, &dwordSize) == ERROR_SUCCESS) { + if (FontFamily != TestFontFamily) { + RegQueryValueExW(hConsoleKey, L"FaceName", nullptr, nullptr, + (LPBYTE)FaceName, &FaceNameSize); + RegQueryValueExW(hConsoleKey, L"FontSize", nullptr, nullptr, + (LPBYTE)&FontSize, &dwordSize); + + RegSetValueExW(hConsoleKey, L"FontFamily", 0, REG_DWORD, + (BYTE*)&TestFontFamily, sizeof(TestFontFamily)); + RegSetValueExW(hConsoleKey, L"FaceName", 0, REG_SZ, + (BYTE*)TestFaceName, + (DWORD)((wcslen(TestFaceName) + 1) * sizeof(WCHAR))); + RegSetValueExW(hConsoleKey, L"FontSize", 0, REG_DWORD, + (BYTE*)&TestFontSize, sizeof(TestFontSize)); + + restoreConsole = true; + forceNewConsole = true; + } + } else { + std::cerr << "RegGetValueW(FontFamily) failed!" << std::endl; + } + RegCloseKey(hConsoleKey); + } else { + std::cerr << "RegOpenKeyExW(HKEY_CURRENT_USER\\Console) failed!" + << std::endl; + } + } + if (forceNewConsole || GetConsoleMode(parentOut, &consoleMode) == 0) { + // Not a real console, let's create new one. + FreeConsole(); + if (!AllocConsole()) { + std::cerr << "AllocConsole failed!" << std::endl; + return didFail; + } + SECURITY_ATTRIBUTES securityAttributes; + securityAttributes.nLength = sizeof(SECURITY_ATTRIBUTES); + securityAttributes.bInheritHandle = TRUE; + securityAttributes.lpSecurityDescriptor = nullptr; + hIn = CreateFileW(L"CONIN$", GENERIC_READ | GENERIC_WRITE, + FILE_SHARE_READ | FILE_SHARE_WRITE, &securityAttributes, + OPEN_EXISTING, 0, nullptr); + if (hIn == INVALID_HANDLE_VALUE) { + DWORD lastError = GetLastError(); + std::cerr << "CreateFile(CONIN$)" << std::endl; + displayError(lastError); + } + hOut = CreateFileW(L"CONOUT$", GENERIC_READ | GENERIC_WRITE, + FILE_SHARE_READ | FILE_SHARE_WRITE, &securityAttributes, + OPEN_EXISTING, 0, nullptr); + if (hOut == INVALID_HANDLE_VALUE) { + DWORD lastError = GetLastError(); + std::cerr << "CreateFile(CONOUT$)" << std::endl; + displayError(lastError); + } + SetStdHandle(STD_INPUT_HANDLE, hIn); + SetStdHandle(STD_OUTPUT_HANDLE, hOut); + SetStdHandle(STD_ERROR_HANDLE, hOut); + newConsole = true; + } + +# if _WIN32_WINNT >= _WIN32_WINNT_VISTA + if (isVistaOrGreater) { + CONSOLE_FONT_INFOEX consoleFont; + memset(&consoleFont, 0, sizeof(consoleFont)); + consoleFont.cbSize = sizeof(consoleFont); + HMODULE kernel32 = LoadLibraryW(L"kernel32.dll"); + typedef BOOL(WINAPI * GetCurrentConsoleFontExFunc)( + HANDLE hConsoleOutput, BOOL bMaximumWindow, + PCONSOLE_FONT_INFOEX lpConsoleCurrentFontEx); + typedef BOOL(WINAPI * SetCurrentConsoleFontExFunc)( + HANDLE hConsoleOutput, BOOL bMaximumWindow, + PCONSOLE_FONT_INFOEX lpConsoleCurrentFontEx); + GetCurrentConsoleFontExFunc getConsoleFont = + (GetCurrentConsoleFontExFunc)GetProcAddress(kernel32, + "GetCurrentConsoleFontEx"); + SetCurrentConsoleFontExFunc setConsoleFont = + (SetCurrentConsoleFontExFunc)GetProcAddress(kernel32, + "SetCurrentConsoleFontEx"); + if (getConsoleFont(hOut, FALSE, &consoleFont)) { + if (consoleFont.FontFamily != TestFontFamily) { + consoleFont.FontFamily = TestFontFamily; + wcscpy(consoleFont.FaceName, TestFaceName); + if (!setConsoleFont(hOut, FALSE, &consoleFont)) { + std::cerr << "SetCurrentConsoleFontEx failed!" << std::endl; + } + } + } else { + std::cerr << "GetCurrentConsoleFontEx failed!" << std::endl; + } + } else { +# endif + if (restoreConsole && + RegOpenKeyExW(HKEY_CURRENT_USER, L"Console", 0, KEY_WRITE, + &hConsoleKey) == ERROR_SUCCESS) { + RegSetValueExW(hConsoleKey, L"FontFamily", 0, REG_DWORD, + (BYTE*)&FontFamily, sizeof(FontFamily)); + if (FaceName[0] != 0) { + RegSetValueExW(hConsoleKey, L"FaceName", 0, REG_SZ, (BYTE*)FaceName, + FaceNameSize); + } else { + RegDeleteValueW(hConsoleKey, L"FaceName"); + } + RegSetValueExW(hConsoleKey, L"FontSize", 0, REG_DWORD, (BYTE*)&FontSize, + sizeof(FontSize)); + RegCloseKey(hConsoleKey); + } +# if _WIN32_WINNT >= _WIN32_WINNT_VISTA + } +# endif + + if (createProcess(nullptr, nullptr, nullptr)) { + try { + DWORD status; + if ((status = WaitForSingleObject(beforeInputEvent, waitTimeout)) != + WAIT_OBJECT_0) { + std::cerr.setf(std::ios::hex, std::ios::basefield); + std::cerr << "WaitForSingleObject returned unexpected status 0x" + << status << std::endl; + std::cerr.unsetf(std::ios::hex); + throw std::runtime_error("WaitForSingleObject#1 failed!"); + } + INPUT_RECORD inputBuffer[(sizeof(UnicodeInputTestString) / + sizeof(UnicodeInputTestString[0])) * + 2]; + memset(&inputBuffer, 0, sizeof(inputBuffer)); + unsigned int i; + for (i = 0; i < (sizeof(UnicodeInputTestString) / + sizeof(UnicodeInputTestString[0]) - + 1); + i++) { + writeInputKeyEvent(&inputBuffer[i * 2], UnicodeInputTestString[i]); + } + writeInputKeyEvent(&inputBuffer[i * 2], VK_RETURN); + DWORD eventsWritten = 0; + // We need to wait a bit before writing to console so child process have + // started waiting for input on stdin. + Sleep(300); + if (!WriteConsoleInputW(hIn, inputBuffer, + sizeof(inputBuffer) / sizeof(inputBuffer[0]), + &eventsWritten) || + eventsWritten == 0) { + throw std::runtime_error("WriteConsoleInput failed!"); + } + if ((status = WaitForSingleObject(afterOutputEvent, waitTimeout)) != + WAIT_OBJECT_0) { + std::cerr.setf(std::ios::hex, std::ios::basefield); + std::cerr << "WaitForSingleObject returned unexpected status 0x" + << status << std::endl; + std::cerr.unsetf(std::ios::hex); + throw std::runtime_error("WaitForSingleObject#2 failed!"); + } + CONSOLE_SCREEN_BUFFER_INFO screenBufferInfo; + if (!GetConsoleScreenBufferInfo(hOut, &screenBufferInfo)) { + throw std::runtime_error("GetConsoleScreenBufferInfo failed!"); + } + + COORD coord; + DWORD charsRead = 0; + coord.X = 0; + coord.Y = screenBufferInfo.dwCursorPosition.Y - 4; + WCHAR* outputBuffer = new WCHAR[screenBufferInfo.dwSize.X * 4]; + if (!ReadConsoleOutputCharacterW(hOut, outputBuffer, + screenBufferInfo.dwSize.X * 4, coord, + &charsRead) || + charsRead == 0) { + delete[] outputBuffer; + throw std::runtime_error("ReadConsoleOutputCharacter failed!"); + } + std::wstring wideTestString = kwsys::Encoding::ToWide(encodedTestString); + std::replace(wideTestString.begin(), wideTestString.end(), '\0', ' '); + std::wstring wideInputTestString = + kwsys::Encoding::ToWide(encodedInputTestString); + if (memcmp(outputBuffer, wideTestString.c_str(), + wideTestString.size() * sizeof(wchar_t)) == 0 && + memcmp(outputBuffer + screenBufferInfo.dwSize.X * 1, + wideTestString.c_str(), + wideTestString.size() * sizeof(wchar_t)) == 0 && + memcmp(outputBuffer + screenBufferInfo.dwSize.X * 2, + UnicodeInputTestString, + sizeof(UnicodeInputTestString) - sizeof(WCHAR)) == 0 && + memcmp(outputBuffer + screenBufferInfo.dwSize.X * 3, + wideInputTestString.c_str(), + (wideInputTestString.size() - 1) * sizeof(wchar_t)) == 0) { + didFail = 0; + } else { + std::cerr << "Console's output didn't match expected output!" + << std::endl; + dumpBuffers(wideTestString.c_str(), outputBuffer, + wideTestString.size()); + dumpBuffers(wideTestString.c_str(), + outputBuffer + screenBufferInfo.dwSize.X * 1, + wideTestString.size()); + dumpBuffers( + UnicodeInputTestString, outputBuffer + screenBufferInfo.dwSize.X * 2, + (sizeof(UnicodeInputTestString) - 1) / sizeof(WCHAR)); + dumpBuffers(wideInputTestString.c_str(), + outputBuffer + screenBufferInfo.dwSize.X * 3, + wideInputTestString.size() - 1); + } + delete[] outputBuffer; + } catch (const std::runtime_error& ex) { + DWORD lastError = GetLastError(); + std::cerr << "In function testConsole, line " << __LINE__ << ": " + << ex.what() << std::endl; + displayError(lastError); + } + finishProcess(didFail == 0); + } + if (newConsole) { + SetStdHandle(STD_INPUT_HANDLE, parentIn); + SetStdHandle(STD_OUTPUT_HANDLE, parentOut); + SetStdHandle(STD_ERROR_HANDLE, parentErr); + CloseHandle(hIn); + CloseHandle(hOut); + FreeConsole(); + } + return didFail; +} + +#endif + +int testConsoleBuf(int, char* []) +{ + int ret = 0; + +#if defined(_WIN32) + beforeInputEvent = CreateEventW(nullptr, + FALSE, // auto-reset event + FALSE, // initial state is nonsignaled + BeforeInputEventName); // object name + if (!beforeInputEvent) { + std::cerr << "CreateEvent#1 failed " << GetLastError() << std::endl; + return 1; + } + + afterOutputEvent = CreateEventW(nullptr, FALSE, FALSE, AfterOutputEventName); + if (!afterOutputEvent) { + std::cerr << "CreateEvent#2 failed " << GetLastError() << std::endl; + return 1; + } + + encodedTestString = kwsys::Encoding::ToNarrow(std::wstring( + UnicodeTestString, sizeof(UnicodeTestString) / sizeof(wchar_t) - 1)); + encodedInputTestString = kwsys::Encoding::ToNarrow( + std::wstring(UnicodeInputTestString, + sizeof(UnicodeInputTestString) / sizeof(wchar_t) - 1)); + encodedInputTestString += "\n"; + + ret |= testPipe(); + ret |= testFile(); + ret |= testConsole(); + + CloseHandle(beforeInputEvent); + CloseHandle(afterOutputEvent); +#endif + + return ret; +} diff --git a/test/API/driver/kwsys/testConsoleBuf.hxx b/test/API/driver/kwsys/testConsoleBuf.hxx new file mode 100644 index 00000000000..e93cb4f0a1c --- /dev/null +++ b/test/API/driver/kwsys/testConsoleBuf.hxx @@ -0,0 +1,17 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifndef testConsoleBuf_hxx +#define testConsoleBuf_hxx + +static const wchar_t cmdConsoleBufChild[] = L"testConsoleBufChild"; + +static const wchar_t BeforeInputEventName[] = L"BeforeInputEvent"; +static const wchar_t AfterOutputEventName[] = L"AfterOutputEvent"; + +// यूनिकोड είναι здорово! +static const wchar_t UnicodeTestString[] = + L"\u092F\u0942\u0928\u093F\u0915\u094B\u0921 " + L"\u03B5\u03AF\u03BD\0\u03B1\u03B9 " + L"\u0437\u0434\u043E\u0440\u043E\u0432\u043E!"; + +#endif diff --git a/test/API/driver/kwsys/testConsoleBufChild.cxx b/test/API/driver/kwsys/testConsoleBufChild.cxx new file mode 100644 index 00000000000..3c8fdc2e13b --- /dev/null +++ b/test/API/driver/kwsys/testConsoleBufChild.cxx @@ -0,0 +1,55 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" + +#include KWSYS_HEADER(ConsoleBuf.hxx) +#include KWSYS_HEADER(Encoding.hxx) + +// Work-around CMake dependency scanning limitation. This must +// duplicate the above list of headers. +#if 0 +# include "ConsoleBuf.hxx.in" +# include "Encoding.hxx.in" +#endif + +#include + +#include "testConsoleBuf.hxx" + +int main(int argc, const char* argv[]) +{ +#if defined(_WIN32) + kwsys::ConsoleBuf::Manager out(std::cout); + kwsys::ConsoleBuf::Manager err(std::cerr, true); + kwsys::ConsoleBuf::Manager in(std::cin); + + if (argc > 1) { + std::cout << argv[1] << std::endl; + std::cerr << argv[1] << std::endl; + } else { + std::string str = kwsys::Encoding::ToNarrow(std::wstring( + UnicodeTestString, sizeof(UnicodeTestString) / sizeof(wchar_t) - 1)); + std::cout << str << std::endl; + std::cerr << str << std::endl; + } + + std::string input; + HANDLE event = OpenEventW(EVENT_MODIFY_STATE, FALSE, BeforeInputEventName); + if (event) { + SetEvent(event); + CloseHandle(event); + } + + std::cin >> input; + std::cout << input << std::endl; + event = OpenEventW(EVENT_MODIFY_STATE, FALSE, AfterOutputEventName); + if (event) { + SetEvent(event); + CloseHandle(event); + } +#else + static_cast(argc); + static_cast(argv); +#endif + return 0; +} diff --git a/test/API/driver/kwsys/testDirectory.cxx b/test/API/driver/kwsys/testDirectory.cxx new file mode 100644 index 00000000000..b1ab0c87270 --- /dev/null +++ b/test/API/driver/kwsys/testDirectory.cxx @@ -0,0 +1,110 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying +file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" +#include KWSYS_HEADER(Directory.hxx) +#include KWSYS_HEADER(Encoding.hxx) +#include KWSYS_HEADER(SystemTools.hxx) + +// Work-around CMake dependency scanning limitation. This must +// duplicate the above list of headers. +#if 0 +# include "Directory.hxx.in" +# include "Encoding.hxx.in" +# include "SystemTools.hxx.in" +#endif + +#include +#include +#include + +#include + +int _doLongPathTest() +{ + using namespace kwsys; + static const int LONG_PATH_THRESHOLD = 512; + int res = 0; + std::string topdir(TEST_SYSTEMTOOLS_BINARY_DIR "/directory_testing/"); + std::stringstream testpathstrm; + std::string testdirpath; + std::string extendedtestdirpath; + + testpathstrm << topdir; + size_t pathlen = testpathstrm.str().length(); + testpathstrm.seekp(0, std::ios_base::end); + while (pathlen < LONG_PATH_THRESHOLD) { + testpathstrm << "0123456789/"; + pathlen = testpathstrm.str().length(); + } + + testdirpath = testpathstrm.str(); +#ifdef _WIN32 + extendedtestdirpath = + Encoding::ToNarrow(SystemTools::ConvertToWindowsExtendedPath(testdirpath)); +#else + extendedtestdirpath = testdirpath; +#endif + + if (SystemTools::MakeDirectory(extendedtestdirpath)) { + std::ofstream testfile1( + (extendedtestdirpath + "longfilepathtest1.txt").c_str()); + std::ofstream testfile2( + (extendedtestdirpath + "longfilepathtest2.txt").c_str()); + testfile1 << "foo"; + testfile2 << "bar"; + testfile1.close(); + testfile2.close(); + + Directory testdir; + // Set res to failure if the directory doesn't load + res += !testdir.Load(testdirpath); + // Increment res failure if the directory appears empty + res += testdir.GetNumberOfFiles() == 0; + // Increment res failures if the path has changed from + // what was provided. + res += testdirpath != testdir.GetPath(); + + SystemTools::RemoveADirectory(topdir); + } else { + std::cerr << "Failed to create directory with long path: " + << extendedtestdirpath << std::endl; + res += 1; + } + return res; +} + +int _copyDirectoryTest() +{ + using namespace kwsys; + const std::string source(TEST_SYSTEMTOOLS_BINARY_DIR + "/directory_testing/copyDirectoryTestSrc"); + if (SystemTools::PathExists(source)) { + std::cerr << source << " shouldn't exist before test" << std::endl; + return 1; + } + const std::string destination(TEST_SYSTEMTOOLS_BINARY_DIR + "/directory_testing/copyDirectoryTestDst"); + if (SystemTools::PathExists(destination)) { + std::cerr << destination << " shouldn't exist before test" << std::endl; + return 2; + } + const bool copysuccess = SystemTools::CopyADirectory(source, destination); + const bool destinationexists = SystemTools::PathExists(destination); + if (copysuccess) { + std::cerr << "CopyADirectory should have returned false" << std::endl; + SystemTools::RemoveADirectory(destination); + return 3; + } + if (destinationexists) { + std::cerr << "CopyADirectory returned false, but destination directory" + << " has been created" << std::endl; + SystemTools::RemoveADirectory(destination); + return 4; + } + return 0; +} + +int testDirectory(int, char* []) +{ + return _doLongPathTest() + _copyDirectoryTest(); +} diff --git a/test/API/driver/kwsys/testDynamicLoader.cxx b/test/API/driver/kwsys/testDynamicLoader.cxx new file mode 100644 index 00000000000..2421ac0e154 --- /dev/null +++ b/test/API/driver/kwsys/testDynamicLoader.cxx @@ -0,0 +1,133 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" + +#include KWSYS_HEADER(DynamicLoader.hxx) + +#if defined(__BEOS__) || defined(__HAIKU__) +# include /* disable_debugger() API. */ +#endif + +// Work-around CMake dependency scanning limitation. This must +// duplicate the above list of headers. +#if 0 +# include "DynamicLoader.hxx.in" +#endif + +#include +#include + +// Include with <> instead of "" to avoid getting any in-source copy +// left on disk. +#include + +static std::string GetLibName(const char* lname, const char* subdir = nullptr) +{ + // Construct proper name of lib + std::string slname; + slname = EXECUTABLE_OUTPUT_PATH; + if (subdir) { + slname += "/"; + slname += subdir; + } +#ifdef CMAKE_INTDIR + slname += "/"; + slname += CMAKE_INTDIR; +#endif + slname += "/"; + slname += kwsys::DynamicLoader::LibPrefix(); + slname += lname; + slname += kwsys::DynamicLoader::LibExtension(); + + return slname; +} + +/* libname = Library name (proper prefix, proper extension) + * System = symbol to lookup in libname + * r1: should OpenLibrary succeed ? + * r2: should GetSymbolAddress succeed ? + * r3: should CloseLibrary succeed ? + */ +static int TestDynamicLoader(const char* libname, const char* symbol, int r1, + int r2, int r3, int flags = 0) +{ + std::cerr << "Testing: " << libname << std::endl; + kwsys::DynamicLoader::LibraryHandle l = + kwsys::DynamicLoader::OpenLibrary(libname, flags); + // If result is incompatible with expectation just fails (xor): + if ((r1 && !l) || (!r1 && l)) { + std::cerr << "OpenLibrary: " << kwsys::DynamicLoader::LastError() + << std::endl; + return 1; + } + kwsys::DynamicLoader::SymbolPointer f = + kwsys::DynamicLoader::GetSymbolAddress(l, symbol); + if ((r2 && !f) || (!r2 && f)) { + std::cerr << "GetSymbolAddress: " << kwsys::DynamicLoader::LastError() + << std::endl; + return 1; + } +#ifndef __APPLE__ + int s = kwsys::DynamicLoader::CloseLibrary(l); + if ((r3 && !s) || (!r3 && s)) { + std::cerr << "CloseLibrary: " << kwsys::DynamicLoader::LastError() + << std::endl; + return 1; + } +#else + (void)r3; +#endif + return 0; +} + +int testDynamicLoader(int argc, char* argv[]) +{ +#if defined(_WIN32) + SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX); +#elif defined(__BEOS__) || defined(__HAIKU__) + disable_debugger(1); +#endif + int res = 0; + if (argc == 3) { + // User specify a libname and symbol to check. + res = TestDynamicLoader(argv[1], argv[2], 1, 1, 1); + return res; + } + +// dlopen() on Syllable before 11/22/2007 doesn't return 0 on error +#ifndef __SYLLABLE__ + // Make sure that inexistent lib is giving correct result + res += TestDynamicLoader("azerty_", "foo_bar", 0, 0, 0); + // Make sure that random binary file cannot be assimilated as dylib + res += TestDynamicLoader(TEST_SYSTEMTOOLS_SOURCE_DIR "/testSystemTools.bin", + "wp", 0, 0, 0); +#endif + +#ifdef __linux__ + // This one is actually fun to test, since dlopen is by default + // loaded...wonder why :) + res += TestDynamicLoader("foobar.lib", "dlopen", 0, 1, 0); + res += TestDynamicLoader("libdl.so", "dlopen", 1, 1, 1); + res += TestDynamicLoader("libdl.so", "TestDynamicLoader", 1, 0, 1); +#endif + // Now try on the generated library + std::string libname = GetLibName(KWSYS_NAMESPACE_STRING "TestDynload"); + res += TestDynamicLoader(libname.c_str(), "dummy", 1, 0, 1); + res += TestDynamicLoader(libname.c_str(), "TestDynamicLoaderSymbolPointer", + 1, 1, 1); + res += TestDynamicLoader(libname.c_str(), "_TestDynamicLoaderSymbolPointer", + 1, 0, 1); + res += TestDynamicLoader(libname.c_str(), "TestDynamicLoaderData", 1, 1, 1); + res += TestDynamicLoader(libname.c_str(), "_TestDynamicLoaderData", 1, 0, 1); + +#ifdef _WIN32 + libname = GetLibName(KWSYS_NAMESPACE_STRING "TestDynloadUse", "dynloaddir"); + res += TestDynamicLoader(libname.c_str(), "dummy", 0, 0, 0); + res += TestDynamicLoader(libname.c_str(), "TestLoad", 1, 1, 1, + kwsys::DynamicLoader::SearchBesideLibrary); + res += TestDynamicLoader(libname.c_str(), "_TestLoad", 1, 0, 1, + kwsys::DynamicLoader::SearchBesideLibrary); +#endif + + return res; +} diff --git a/test/API/driver/kwsys/testDynload.c b/test/API/driver/kwsys/testDynload.c new file mode 100644 index 00000000000..c49f747df43 --- /dev/null +++ b/test/API/driver/kwsys/testDynload.c @@ -0,0 +1,13 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifdef _WIN32 +# define DL_EXPORT __declspec(dllexport) +#else +# define DL_EXPORT +#endif + +DL_EXPORT int TestDynamicLoaderData = 0; + +DL_EXPORT void TestDynamicLoaderSymbolPointer() +{ +} diff --git a/test/API/driver/kwsys/testDynloadImpl.c b/test/API/driver/kwsys/testDynloadImpl.c new file mode 100644 index 00000000000..2b9069bc7a4 --- /dev/null +++ b/test/API/driver/kwsys/testDynloadImpl.c @@ -0,0 +1,10 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ + +#include "testDynloadImpl.h" + +int TestDynamicLoaderImplData = 0; + +void TestDynamicLoaderImplSymbolPointer() +{ +} diff --git a/test/API/driver/kwsys/testDynloadImpl.h b/test/API/driver/kwsys/testDynloadImpl.h new file mode 100644 index 00000000000..d0c9dfb756b --- /dev/null +++ b/test/API/driver/kwsys/testDynloadImpl.h @@ -0,0 +1,15 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifdef _WIN32 +# ifdef BUILDING_TestDynloadImpl +# define DLIMPL_EXPORT __declspec(dllexport) +# else +# define DLIMPL_EXPORT __declspec(dllimport) +# endif +#else +# define DLIMPL_EXPORT +#endif + +DLIMPL_EXPORT int TestDynamicLoaderImplData; + +DLIMPL_EXPORT void TestDynamicLoaderImplSymbolPointer(); diff --git a/test/API/driver/kwsys/testDynloadUse.c b/test/API/driver/kwsys/testDynloadUse.c new file mode 100644 index 00000000000..5402add6a20 --- /dev/null +++ b/test/API/driver/kwsys/testDynloadUse.c @@ -0,0 +1,15 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "testDynloadImpl.h" + +#ifdef _WIN32 +# define DL_EXPORT __declspec(dllexport) +#else +# define DL_EXPORT +#endif + +DL_EXPORT int TestLoad() +{ + TestDynamicLoaderImplSymbolPointer(); + return TestDynamicLoaderImplData; +} diff --git a/test/API/driver/kwsys/testEncode.c b/test/API/driver/kwsys/testEncode.c new file mode 100644 index 00000000000..b7b6dd8458f --- /dev/null +++ b/test/API/driver/kwsys/testEncode.c @@ -0,0 +1,67 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" +#include KWSYS_HEADER(MD5.h) + +/* Work-around CMake dependency scanning limitation. This must + duplicate the above list of headers. */ +#if 0 +# include "MD5.h.in" +#endif + +#include +#include + +static const unsigned char testMD5input1[] = + " A quick brown fox jumps over the lazy dog.\n" + " This is sample text for MD5 sum input.\n"; +static const char testMD5output1[] = "8f146af46ed4f267921bb937d4d3500c"; + +static const int testMD5input2len = 28; +static const unsigned char testMD5input2[] = "the cow jumped over the moon"; +static const char testMD5output2[] = "a2ad137b746138fae4e5adca9c85d3ae"; + +static int testMD5_1(kwsysMD5* md5) +{ + char md5out[33]; + kwsysMD5_Initialize(md5); + kwsysMD5_Append(md5, testMD5input1, -1); + kwsysMD5_FinalizeHex(md5, md5out); + md5out[32] = 0; + printf("md5sum 1: expected [%s]\n" + " got [%s]\n", + testMD5output1, md5out); + return (strcmp(md5out, testMD5output1) != 0) ? 1 : 0; +} + +static int testMD5_2(kwsysMD5* md5) +{ + unsigned char digest[16]; + char md5out[33]; + kwsysMD5_Initialize(md5); + kwsysMD5_Append(md5, testMD5input2, testMD5input2len); + kwsysMD5_Finalize(md5, digest); + kwsysMD5_DigestToHex(digest, md5out); + md5out[32] = 0; + printf("md5sum 2: expected [%s]\n" + " got [%s]\n", + testMD5output2, md5out); + return (strcmp(md5out, testMD5output2) != 0) ? 1 : 0; +} + +int testEncode(int argc, char* argv[]) +{ + int result = 0; + (void)argc; + (void)argv; + + /* Test MD5 digest. */ + { + kwsysMD5* md5 = kwsysMD5_New(); + result |= testMD5_1(md5); + result |= testMD5_2(md5); + kwsysMD5_Delete(md5); + } + + return result; +} diff --git a/test/API/driver/kwsys/testEncoding.cxx b/test/API/driver/kwsys/testEncoding.cxx new file mode 100644 index 00000000000..988697bff8d --- /dev/null +++ b/test/API/driver/kwsys/testEncoding.cxx @@ -0,0 +1,286 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" + +#if defined(_MSC_VER) +# pragma warning(disable : 4786) +#endif + +#include KWSYS_HEADER(Encoding.hxx) +#include KWSYS_HEADER(Encoding.h) + +#include +#include +#include +#include +#include + +// Work-around CMake dependency scanning limitation. This must +// duplicate the above list of headers. +#if 0 +# include "Encoding.h.in" +# include "Encoding.hxx.in" +#endif + +static const unsigned char helloWorldStrings[][32] = { + // English + { 'H', 'e', 'l', 'l', 'o', ' ', 'W', 'o', 'r', 'l', 'd', 0 }, + // Japanese + { 0xE3, 0x81, 0x93, 0xE3, 0x82, 0x93, 0xE3, 0x81, 0xAB, 0xE3, 0x81, + 0xA1, 0xE3, 0x81, 0xAF, 0xE4, 0xB8, 0x96, 0xE7, 0x95, 0x8C, 0 }, + // Arabic + { 0xD9, 0x85, 0xD8, 0xB1, 0xD8, 0xAD, 0xD8, 0xA8, 0xD8, 0xA7, 0x20, 0xD8, + 0xA7, 0xD9, 0x84, 0xD8, 0xB9, 0xD8, 0xA7, 0xD9, 0x84, 0xD9, 0x85, 0 }, + // Yiddish + { 0xD7, 0x94, 0xD7, 0xA2, 0xD7, 0x9C, 0xD7, 0x90, 0x20, 0xD7, + 0x95, 0xD7, 0x95, 0xD7, 0xA2, 0xD7, 0x9C, 0xD7, 0x98, 0 }, + // Russian + { 0xD0, 0xBF, 0xD1, 0x80, 0xD0, 0xB8, 0xD0, 0xB2, 0xD0, 0xB5, + 0xD1, 0x82, 0x20, 0xD0, 0xBC, 0xD0, 0xB8, 0xD1, 0x80, 0 }, + // Latin + { 0x4D, 0x75, 0x6E, 0x64, 0x75, 0x73, 0x20, 0x73, 0x61, 0x6C, 0x76, 0x65, + 0 }, + // Swahili + { 0x68, 0x75, 0x6A, 0x61, 0x6D, 0x62, 0x6F, 0x20, 0x44, 0x75, 0x6E, 0x69, + 0x61, 0 }, + // Icelandic + { 0x48, 0x61, 0x6C, 0x6C, 0xC3, 0xB3, 0x20, 0x68, 0x65, 0x69, 0x6D, 0x75, + 0x72, 0 }, + { 0 } +}; + +static int testHelloWorldEncoding() +{ + int ret = 0; + for (int i = 0; helloWorldStrings[i][0] != 0; i++) { + std::string str = reinterpret_cast(helloWorldStrings[i]); + std::cout << str << std::endl; + std::wstring wstr = kwsys::Encoding::ToWide(str); + std::string str2 = kwsys::Encoding::ToNarrow(wstr); + wchar_t* c_wstr = kwsysEncoding_DupToWide(str.c_str()); + char* c_str2 = kwsysEncoding_DupToNarrow(c_wstr); + if (!wstr.empty() && (str != str2 || strcmp(c_str2, str.c_str()))) { + std::cout << "converted string was different: " << str2 << std::endl; + std::cout << "converted string was different: " << c_str2 << std::endl; + ret++; + } + free(c_wstr); + free(c_str2); + } + return ret; +} + +static int testRobustEncoding() +{ + // test that the conversion functions handle invalid + // unicode correctly/gracefully + + // we manipulate the format flags of stdout, remember + // the original state here to restore before return + std::ios::fmtflags const& flags = std::cout.flags(); + + int ret = 0; + char cstr[] = { (char)-1, 0 }; + // this conversion could fail + std::wstring wstr = kwsys::Encoding::ToWide(cstr); + + wstr = kwsys::Encoding::ToWide(nullptr); + if (wstr != L"") { + const wchar_t* wcstr = wstr.c_str(); + std::cout << "ToWide(NULL) returned"; + for (size_t i = 0; i < wstr.size(); i++) { + std::cout << " " << std::hex << (int)wcstr[i]; + } + std::cout << std::endl; + ret++; + } + wstr = kwsys::Encoding::ToWide(""); + if (wstr != L"") { + const wchar_t* wcstr = wstr.c_str(); + std::cout << "ToWide(\"\") returned"; + for (size_t i = 0; i < wstr.size(); i++) { + std::cout << " " << std::hex << (int)wcstr[i]; + } + std::cout << std::endl; + ret++; + } + +#ifdef _WIN32 + // 16 bit wchar_t - we make an invalid surrogate pair + wchar_t cwstr[] = { 0xD801, 0xDA00, 0 }; + // this conversion could fail + std::string win_str = kwsys::Encoding::ToNarrow(cwstr); +#endif + + std::string str = kwsys::Encoding::ToNarrow(nullptr); + if (str != "") { + std::cout << "ToNarrow(NULL) returned " << str << std::endl; + ret++; + } + + str = kwsys::Encoding::ToNarrow(L""); + if (wstr != L"") { + std::cout << "ToNarrow(\"\") returned " << str << std::endl; + ret++; + } + + std::cout.flags(flags); + return ret; +} + +static int testWithNulls() +{ + int ret = 0; + std::vector strings; + strings.push_back(std::string("ab") + '\0' + 'c'); + strings.push_back(std::string("d") + '\0' + '\0' + 'e'); + strings.push_back(std::string() + '\0' + 'f'); + strings.push_back(std::string() + '\0' + '\0' + "gh"); + strings.push_back(std::string("ij") + '\0'); + strings.push_back(std::string("k") + '\0' + '\0'); + strings.push_back(std::string("\0\0\0\0", 4) + "lmn" + + std::string("\0\0\0\0", 4)); + for (std::vector::iterator it = strings.begin(); + it != strings.end(); ++it) { + std::wstring wstr = kwsys::Encoding::ToWide(*it); + std::string str = kwsys::Encoding::ToNarrow(wstr); + std::string s(*it); + std::replace(s.begin(), s.end(), '\0', ' '); + std::cout << "'" << s << "' (" << it->size() << ")" << std::endl; + if (str != *it) { + std::replace(str.begin(), str.end(), '\0', ' '); + std::cout << "string with null was different: '" << str << "' (" + << str.size() << ")" << std::endl; + ret++; + } + } + return ret; +} + +static int testCommandLineArguments() +{ + int status = 0; + + char const* argv[2] = { "./app.exe", (char const*)helloWorldStrings[1] }; + + kwsys::Encoding::CommandLineArguments args(2, argv); + kwsys::Encoding::CommandLineArguments arg2 = + kwsys::Encoding::CommandLineArguments(args); + + char const* const* u8_argv = args.argv(); + for (int i = 0; i < args.argc(); i++) { + char const* u8_arg = u8_argv[i]; + if (strcmp(argv[i], u8_arg) != 0) { + std::cout << "argv[" << i << "] " << argv[i] << " != " << u8_arg + << std::endl; + status++; + } + } + + kwsys::Encoding::CommandLineArguments args3 = + kwsys::Encoding::CommandLineArguments::Main(2, argv); + + return status; +} + +static int testToWindowsExtendedPath() +{ +#ifdef _WIN32 + int ret = 0; + if (kwsys::Encoding::ToWindowsExtendedPath( + "L:\\Local Mojo\\Hex Power Pack\\Iffy Voodoo") != + L"\\\\?\\L:\\Local Mojo\\Hex Power Pack\\Iffy Voodoo") { + std::cout << "Problem with ToWindowsExtendedPath " + << "\"L:\\Local Mojo\\Hex Power Pack\\Iffy Voodoo\"" + << std::endl; + ++ret; + } + + if (kwsys::Encoding::ToWindowsExtendedPath( + "L:/Local Mojo/Hex Power Pack/Iffy Voodoo") != + L"\\\\?\\L:\\Local Mojo\\Hex Power Pack\\Iffy Voodoo") { + std::cout << "Problem with ToWindowsExtendedPath " + << "\"L:/Local Mojo/Hex Power Pack/Iffy Voodoo\"" << std::endl; + ++ret; + } + + if (kwsys::Encoding::ToWindowsExtendedPath( + "\\\\Foo\\Local Mojo\\Hex Power Pack\\Iffy Voodoo") != + L"\\\\?\\UNC\\Foo\\Local Mojo\\Hex Power Pack\\Iffy Voodoo") { + std::cout << "Problem with ToWindowsExtendedPath " + << "\"\\\\Foo\\Local Mojo\\Hex Power Pack\\Iffy Voodoo\"" + << std::endl; + ++ret; + } + + if (kwsys::Encoding::ToWindowsExtendedPath( + "//Foo/Local Mojo/Hex Power Pack/Iffy Voodoo") != + L"\\\\?\\UNC\\Foo\\Local Mojo\\Hex Power Pack\\Iffy Voodoo") { + std::cout << "Problem with ToWindowsExtendedPath " + << "\"//Foo/Local Mojo/Hex Power Pack/Iffy Voodoo\"" + << std::endl; + ++ret; + } + + if (kwsys::Encoding::ToWindowsExtendedPath("//") != L"//") { + std::cout << "Problem with ToWindowsExtendedPath " + << "\"//\"" << std::endl; + ++ret; + } + + if (kwsys::Encoding::ToWindowsExtendedPath("\\\\.\\") != L"\\\\.\\") { + std::cout << "Problem with ToWindowsExtendedPath " + << "\"\\\\.\\\"" << std::endl; + ++ret; + } + + if (kwsys::Encoding::ToWindowsExtendedPath("\\\\.\\X") != L"\\\\.\\X") { + std::cout << "Problem with ToWindowsExtendedPath " + << "\"\\\\.\\X\"" << std::endl; + ++ret; + } + + if (kwsys::Encoding::ToWindowsExtendedPath("\\\\.\\X:") != L"\\\\?\\X:") { + std::cout << "Problem with ToWindowsExtendedPath " + << "\"\\\\.\\X:\"" << std::endl; + ++ret; + } + + if (kwsys::Encoding::ToWindowsExtendedPath("\\\\.\\X:\\") != + L"\\\\?\\X:\\") { + std::cout << "Problem with ToWindowsExtendedPath " + << "\"\\\\.\\X:\\\"" << std::endl; + ++ret; + } + + if (kwsys::Encoding::ToWindowsExtendedPath("NUL") != L"\\\\.\\NUL") { + std::cout << "Problem with ToWindowsExtendedPath " + << "\"NUL\"" << std::endl; + ++ret; + } + + return ret; +#else + return 0; +#endif +} + +int testEncoding(int, char* []) +{ + const char* loc = setlocale(LC_ALL, ""); + if (loc) { + std::cout << "Locale: " << loc << std::endl; + } else { + std::cout << "Locale: None" << std::endl; + } + + int ret = 0; + + ret |= testHelloWorldEncoding(); + ret |= testRobustEncoding(); + ret |= testCommandLineArguments(); + ret |= testWithNulls(); + ret |= testToWindowsExtendedPath(); + + return ret; +} diff --git a/test/API/driver/kwsys/testFStream.cxx b/test/API/driver/kwsys/testFStream.cxx new file mode 100644 index 00000000000..5009e988773 --- /dev/null +++ b/test/API/driver/kwsys/testFStream.cxx @@ -0,0 +1,113 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" + +#if defined(_MSC_VER) +# pragma warning(disable : 4786) +#endif + +#include KWSYS_HEADER(FStream.hxx) +#include +#ifdef __BORLANDC__ +# include /* memcmp */ +#endif + +// Work-around CMake dependency scanning limitation. This must +// duplicate the above list of headers. +#if 0 +# include "FStream.hxx.in" +#endif + +#include + +static int testNoFile() +{ + kwsys::ifstream in_file("NoSuchFile.txt"); + if (in_file) { + return 1; + } + + return 0; +} + +static const int num_test_files = 7; +static const int max_test_file_size = 45; + +static kwsys::FStream::BOM expected_bom[num_test_files] = { + kwsys::FStream::BOM_None, kwsys::FStream::BOM_None, + kwsys::FStream::BOM_UTF8, kwsys::FStream::BOM_UTF16LE, + kwsys::FStream::BOM_UTF16BE, kwsys::FStream::BOM_UTF32LE, + kwsys::FStream::BOM_UTF32BE +}; + +static unsigned char expected_bom_data[num_test_files][5] = { + { 0 }, + { 0 }, + { 3, 0xEF, 0xBB, 0xBF }, + { 2, 0xFF, 0xFE }, + { 2, 0xFE, 0xFF }, + { 4, 0xFF, 0xFE, 0x00, 0x00 }, + { 4, 0x00, 0x00, 0xFE, 0xFF }, +}; + +static unsigned char file_data[num_test_files][max_test_file_size] = { + { 1, 'H' }, + { 11, 'H', 'e', 'l', 'l', 'o', ' ', 'W', 'o', 'r', 'l', 'd' }, + { 11, 'H', 'e', 'l', 'l', 'o', ' ', 'W', 'o', 'r', 'l', 'd' }, + { 22, 0x48, 0x00, 0x65, 0x00, 0x6C, 0x00, 0x6C, 0x00, 0x6F, 0x00, 0x20, + 0x00, 0x57, 0x00, 0x6F, 0x00, 0x72, 0x00, 0x6C, 0x00, 0x64, 0x00 }, + { 22, 0x00, 0x48, 0x00, 0x65, 0x00, 0x6C, 0x00, 0x6C, 0x00, 0x6F, 0x00, + 0x20, 0x00, 0x57, 0x00, 0x6F, 0x00, 0x72, 0x00, 0x6C, 0x00, 0x64 }, + { 44, 0x48, 0x00, 0x00, 0x00, 0x65, 0x00, 0x00, 0x00, 0x6C, 0x00, 0x00, + 0x00, 0x6C, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, + 0x00, 0x57, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x00, 0x00, 0x72, 0x00, 0x00, + 0x00, 0x6C, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00 }, + { 44, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x65, 0x00, 0x00, 0x00, + 0x6C, 0x00, 0x00, 0x00, 0x6C, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x00, 0x00, + 0x20, 0x00, 0x00, 0x00, 0x57, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x00, 0x00, + 0x72, 0x00, 0x00, 0x00, 0x6C, 0x00, 0x00, 0x00, 0x64 }, +}; + +static int testBOM() +{ + // test various encodings in binary mode + for (int i = 0; i < num_test_files; i++) { + { + kwsys::ofstream out("bom.txt", kwsys::ofstream::binary); + out.write(reinterpret_cast(expected_bom_data[i] + 1), + *expected_bom_data[i]); + out.write(reinterpret_cast(file_data[i] + 1), + file_data[i][0]); + } + + kwsys::ifstream in("bom.txt", kwsys::ofstream::binary); + kwsys::FStream::BOM bom = kwsys::FStream::ReadBOM(in); + if (bom != expected_bom[i]) { + std::cout << "Unexpected BOM " << i << std::endl; + return 1; + } + char data[max_test_file_size]; + in.read(data, file_data[i][0]); + if (!in.good()) { + std::cout << "Unable to read data " << i << std::endl; + return 1; + } + + if (memcmp(data, file_data[i] + 1, file_data[i][0]) != 0) { + std::cout << "Incorrect read data " << i << std::endl; + return 1; + } + } + + return 0; +} + +int testFStream(int, char* []) +{ + int ret = 0; + + ret |= testNoFile(); + ret |= testBOM(); + + return ret; +} diff --git a/test/API/driver/kwsys/testFail.c b/test/API/driver/kwsys/testFail.c new file mode 100644 index 00000000000..82caeac37f4 --- /dev/null +++ b/test/API/driver/kwsys/testFail.c @@ -0,0 +1,24 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include +#include +#include + +int testFail(int argc, char* argv[]) +{ + char* env = getenv("DASHBOARD_TEST_FROM_CTEST"); + int oldCtest = 0; + if (env) { + if (strcmp(env, "1") == 0) { + oldCtest = 1; + } + printf("DASHBOARD_TEST_FROM_CTEST = %s\n", env); + } + printf("%s: This test intentionally fails\n", argv[0]); + if (oldCtest) { + printf("The version of ctest is not able to handle intentionally failing " + "tests, so pass.\n"); + return 0; + } + return argc; +} diff --git a/test/API/driver/kwsys/testHashSTL.cxx b/test/API/driver/kwsys/testHashSTL.cxx new file mode 100644 index 00000000000..4ed2f899d1c --- /dev/null +++ b/test/API/driver/kwsys/testHashSTL.cxx @@ -0,0 +1,64 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" +#include KWSYS_HEADER(hash_map.hxx) +#include KWSYS_HEADER(hash_set.hxx) + +// Work-around CMake dependency scanning limitation. This must +// duplicate the above list of headers. +#if 0 +# include "hash_map.hxx.in" +# include "hash_set.hxx.in" +#endif + +#include + +#if defined(_MSC_VER) +# pragma warning(disable : 4786) +#endif + +#if defined(__sgi) && !defined(__GNUC__) +# pragma set woff 1468 /* inline function cannot be explicitly instantiated \ + */ +#endif + +template class kwsys::hash_map; +template class kwsys::hash_set; + +static bool test_hash_map() +{ + typedef kwsys::hash_map mtype; + mtype m; + const char* keys[] = { "hello", "world" }; + m[keys[0]] = 1; + m.insert(mtype::value_type(keys[1], 2)); + int sum = 0; + for (mtype::iterator mi = m.begin(); mi != m.end(); ++mi) { + std::cout << "Found entry [" << mi->first << "," << mi->second << "]" + << std::endl; + sum += mi->second; + } + return sum == 3; +} + +static bool test_hash_set() +{ + typedef kwsys::hash_set stype; + stype s; + s.insert(1); + s.insert(2); + int sum = 0; + for (stype::iterator si = s.begin(); si != s.end(); ++si) { + std::cout << "Found entry [" << *si << "]" << std::endl; + sum += *si; + } + return sum == 3; +} + +int testHashSTL(int, char* []) +{ + bool result = true; + result = test_hash_map() && result; + result = test_hash_set() && result; + return result ? 0 : 1; +} diff --git a/test/API/driver/kwsys/testProcess.c b/test/API/driver/kwsys/testProcess.c new file mode 100644 index 00000000000..39aaa23ba85 --- /dev/null +++ b/test/API/driver/kwsys/testProcess.c @@ -0,0 +1,728 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" +#include KWSYS_HEADER(Process.h) +#include KWSYS_HEADER(Encoding.h) + +/* Work-around CMake dependency scanning limitation. This must + duplicate the above list of headers. */ +#if 0 +# include "Encoding.h.in" +# include "Process.h.in" +#endif + +#include +#include +#include +#include +#include + +#if defined(_WIN32) +# include +#else +# include +# include +#endif + +#if defined(__BORLANDC__) +# pragma warn - 8060 /* possibly incorrect assignment */ +#endif + +/* Platform-specific sleep functions. */ + +#if defined(__BEOS__) && !defined(__ZETA__) +/* BeOS 5 doesn't have usleep(), but it has snooze(), which is identical. */ +# include +static inline void testProcess_usleep(unsigned int usec) +{ + snooze(usec); +} +#elif defined(_WIN32) +/* Windows can only sleep in millisecond intervals. */ +static void testProcess_usleep(unsigned int usec) +{ + Sleep(usec / 1000); +} +#else +# define testProcess_usleep usleep +#endif + +#if defined(_WIN32) +static void testProcess_sleep(unsigned int sec) +{ + Sleep(sec * 1000); +} +#else +static void testProcess_sleep(unsigned int sec) +{ + sleep(sec); +} +#endif + +int runChild(const char* cmd[], int state, int exception, int value, int share, + int output, int delay, double timeout, int poll, int repeat, + int disown, int createNewGroup, unsigned int interruptDelay); + +static int test1(int argc, const char* argv[]) +{ + /* This is a very basic functional test of kwsysProcess. It is repeated + numerous times to verify that there are no resource leaks in kwsysProcess + that eventually lead to an error. Many versions of OS X will fail after + 256 leaked file handles, so 257 iterations seems to be a good test. On + the other hand, too many iterations will cause the test to time out - + especially if the test is instrumented with e.g. valgrind. + + If you have problems with this test timing out on your system, or want to + run more than 257 iterations, you can change the number of iterations by + setting the KWSYS_TEST_PROCESS_1_COUNT environment variable. */ + (void)argc; + (void)argv; + fprintf(stdout, "Output on stdout from test returning 0.\n"); + fprintf(stderr, "Output on stderr from test returning 0.\n"); + return 0; +} + +static int test2(int argc, const char* argv[]) +{ + (void)argc; + (void)argv; + fprintf(stdout, "Output on stdout from test returning 123.\n"); + fprintf(stderr, "Output on stderr from test returning 123.\n"); + return 123; +} + +static int test3(int argc, const char* argv[]) +{ + (void)argc; + (void)argv; + fprintf(stdout, "Output before sleep on stdout from timeout test.\n"); + fprintf(stderr, "Output before sleep on stderr from timeout test.\n"); + fflush(stdout); + fflush(stderr); + testProcess_sleep(15); + fprintf(stdout, "Output after sleep on stdout from timeout test.\n"); + fprintf(stderr, "Output after sleep on stderr from timeout test.\n"); + return 0; +} + +static int test4(int argc, const char* argv[]) +{ +#ifndef CRASH_USING_ABORT + /* Prepare a pointer to an invalid address. Don't use null, because + dereferencing null is undefined behaviour and compilers are free to + do whatever they want. ex: Clang will warn at compile time, or even + optimize away the write. We hope to 'outsmart' them by using + 'volatile' and a slightly larger address, based on a runtime value. */ + volatile int* invalidAddress = 0; + invalidAddress += argc ? 1 : 2; +#endif + +#if defined(_WIN32) + /* Avoid error diagnostic popups since we are crashing on purpose. */ + SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX); +#elif defined(__BEOS__) || defined(__HAIKU__) + /* Avoid error diagnostic popups since we are crashing on purpose. */ + disable_debugger(1); +#endif + (void)argc; + (void)argv; + fprintf(stdout, "Output before crash on stdout from crash test.\n"); + fprintf(stderr, "Output before crash on stderr from crash test.\n"); + fflush(stdout); + fflush(stderr); +#ifdef CRASH_USING_ABORT + abort(); +#else + assert(invalidAddress); /* Quiet Clang scan-build. */ + /* Provoke deliberate crash by writing to the invalid address. */ + *invalidAddress = 0; +#endif + fprintf(stdout, "Output after crash on stdout from crash test.\n"); + fprintf(stderr, "Output after crash on stderr from crash test.\n"); + return 0; +} + +static int test5(int argc, const char* argv[]) +{ + int r; + const char* cmd[4]; + (void)argc; + cmd[0] = argv[0]; + cmd[1] = "run"; + cmd[2] = "4"; + cmd[3] = 0; + fprintf(stdout, "Output on stdout before recursive test.\n"); + fprintf(stderr, "Output on stderr before recursive test.\n"); + fflush(stdout); + fflush(stderr); + r = runChild(cmd, kwsysProcess_State_Exception, +#ifdef CRASH_USING_ABORT + kwsysProcess_Exception_Other, +#else + kwsysProcess_Exception_Fault, +#endif + 1, 1, 1, 0, 15, 0, 1, 0, 0, 0); + fprintf(stdout, "Output on stdout after recursive test.\n"); + fprintf(stderr, "Output on stderr after recursive test.\n"); + fflush(stdout); + fflush(stderr); + return r; +} + +#define TEST6_SIZE (4096 * 2) +static void test6(int argc, const char* argv[]) +{ + int i; + char runaway[TEST6_SIZE + 1]; + (void)argc; + (void)argv; + for (i = 0; i < TEST6_SIZE; ++i) { + runaway[i] = '.'; + } + runaway[TEST6_SIZE] = '\n'; + + /* Generate huge amounts of output to test killing. */ + for (;;) { + fwrite(runaway, 1, TEST6_SIZE + 1, stdout); + fflush(stdout); + } +} + +/* Define MINPOLL to be one more than the number of times output is + written. Define MAXPOLL to be the largest number of times a loop + delaying 1/10th of a second should ever have to poll. */ +#define MINPOLL 5 +#define MAXPOLL 20 +static int test7(int argc, const char* argv[]) +{ + (void)argc; + (void)argv; + fprintf(stdout, "Output on stdout before sleep.\n"); + fprintf(stderr, "Output on stderr before sleep.\n"); + fflush(stdout); + fflush(stderr); + /* Sleep for 1 second. */ + testProcess_sleep(1); + fprintf(stdout, "Output on stdout after sleep.\n"); + fprintf(stderr, "Output on stderr after sleep.\n"); + fflush(stdout); + fflush(stderr); + return 0; +} + +static int test8(int argc, const char* argv[]) +{ + /* Create a disowned grandchild to test handling of processes + that exit before their children. */ + int r; + const char* cmd[4]; + (void)argc; + cmd[0] = argv[0]; + cmd[1] = "run"; + cmd[2] = "108"; + cmd[3] = 0; + fprintf(stdout, "Output on stdout before grandchild test.\n"); + fprintf(stderr, "Output on stderr before grandchild test.\n"); + fflush(stdout); + fflush(stderr); + r = runChild(cmd, kwsysProcess_State_Disowned, kwsysProcess_Exception_None, + 1, 1, 1, 0, 10, 0, 1, 1, 0, 0); + fprintf(stdout, "Output on stdout after grandchild test.\n"); + fprintf(stderr, "Output on stderr after grandchild test.\n"); + fflush(stdout); + fflush(stderr); + return r; +} + +static int test8_grandchild(int argc, const char* argv[]) +{ + (void)argc; + (void)argv; + fprintf(stdout, "Output on stdout from grandchild before sleep.\n"); + fprintf(stderr, "Output on stderr from grandchild before sleep.\n"); + fflush(stdout); + fflush(stderr); + /* TODO: Instead of closing pipes here leave them open to make sure + the grandparent can stop listening when the parent exits. This + part of the test cannot be enabled until the feature is + implemented. */ + fclose(stdout); + fclose(stderr); + testProcess_sleep(15); + return 0; +} + +static int test9(int argc, const char* argv[]) +{ + /* Test Ctrl+C behavior: the root test program will send a Ctrl+C to this + process. Here, we start a child process that sleeps for a long time + while ignoring signals. The test is successful if this process waits + for the child to return before exiting from the Ctrl+C handler. + + WARNING: This test will falsely pass if the share parameter of runChild + was set to 0 when invoking the test9 process. */ + int r; + const char* cmd[4]; + (void)argc; + cmd[0] = argv[0]; + cmd[1] = "run"; + cmd[2] = "109"; + cmd[3] = 0; + fprintf(stdout, "Output on stdout before grandchild test.\n"); + fprintf(stderr, "Output on stderr before grandchild test.\n"); + fflush(stdout); + fflush(stderr); + r = runChild(cmd, kwsysProcess_State_Exited, kwsysProcess_Exception_None, 0, + 1, 1, 0, 30, 0, 1, 0, 0, 0); + /* This sleep will avoid a race condition between this function exiting + normally and our Ctrl+C handler exiting abnormally after the process + exits. */ + testProcess_sleep(1); + fprintf(stdout, "Output on stdout after grandchild test.\n"); + fprintf(stderr, "Output on stderr after grandchild test.\n"); + fflush(stdout); + fflush(stderr); + return r; +} + +#if defined(_WIN32) +static BOOL WINAPI test9_grandchild_handler(DWORD dwCtrlType) +{ + /* Ignore all Ctrl+C/Break signals. We must use an actual handler function + instead of using SetConsoleCtrlHandler(NULL, TRUE) so that we can also + ignore Ctrl+Break in addition to Ctrl+C. */ + (void)dwCtrlType; + return TRUE; +} +#endif + +static int test9_grandchild(int argc, const char* argv[]) +{ + /* The grandchild just sleeps for a few seconds while ignoring signals. */ + (void)argc; + (void)argv; +#if defined(_WIN32) + if (!SetConsoleCtrlHandler(test9_grandchild_handler, TRUE)) { + return 1; + } +#else + struct sigaction sa; + memset(&sa, 0, sizeof(sa)); + sa.sa_handler = SIG_IGN; + sigemptyset(&sa.sa_mask); + if (sigaction(SIGINT, &sa, 0) < 0) { + return 1; + } +#endif + fprintf(stdout, "Output on stdout from grandchild before sleep.\n"); + fprintf(stderr, "Output on stderr from grandchild before sleep.\n"); + fflush(stdout); + fflush(stderr); + /* Sleep for 9 seconds. */ + testProcess_sleep(9); + fprintf(stdout, "Output on stdout from grandchild after sleep.\n"); + fprintf(stderr, "Output on stderr from grandchild after sleep.\n"); + fflush(stdout); + fflush(stderr); + return 0; +} + +static int test10(int argc, const char* argv[]) +{ + /* Test Ctrl+C behavior: the root test program will send a Ctrl+C to this + process. Here, we start a child process that sleeps for a long time and + processes signals normally. However, this grandchild is created in a new + process group - ensuring that Ctrl+C we receive is sent to our process + groups. We make sure it exits anyway. */ + int r; + const char* cmd[4]; + (void)argc; + cmd[0] = argv[0]; + cmd[1] = "run"; + cmd[2] = "110"; + cmd[3] = 0; + fprintf(stdout, "Output on stdout before grandchild test.\n"); + fprintf(stderr, "Output on stderr before grandchild test.\n"); + fflush(stdout); + fflush(stderr); + r = + runChild(cmd, kwsysProcess_State_Exception, + kwsysProcess_Exception_Interrupt, 0, 1, 1, 0, 30, 0, 1, 0, 1, 0); + fprintf(stdout, "Output on stdout after grandchild test.\n"); + fprintf(stderr, "Output on stderr after grandchild test.\n"); + fflush(stdout); + fflush(stderr); + return r; +} + +static int test10_grandchild(int argc, const char* argv[]) +{ + /* The grandchild just sleeps for a few seconds and handles signals. */ + (void)argc; + (void)argv; + fprintf(stdout, "Output on stdout from grandchild before sleep.\n"); + fprintf(stderr, "Output on stderr from grandchild before sleep.\n"); + fflush(stdout); + fflush(stderr); + /* Sleep for 6 seconds. */ + testProcess_sleep(6); + fprintf(stdout, "Output on stdout from grandchild after sleep.\n"); + fprintf(stderr, "Output on stderr from grandchild after sleep.\n"); + fflush(stdout); + fflush(stderr); + return 0; +} + +static int runChild2(kwsysProcess* kp, const char* cmd[], int state, + int exception, int value, int share, int output, + int delay, double timeout, int poll, int disown, + int createNewGroup, unsigned int interruptDelay) +{ + int result = 0; + char* data = 0; + int length = 0; + double userTimeout = 0; + double* pUserTimeout = 0; + kwsysProcess_SetCommand(kp, cmd); + if (timeout >= 0) { + kwsysProcess_SetTimeout(kp, timeout); + } + if (share) { + kwsysProcess_SetPipeShared(kp, kwsysProcess_Pipe_STDOUT, 1); + kwsysProcess_SetPipeShared(kp, kwsysProcess_Pipe_STDERR, 1); + } + if (disown) { + kwsysProcess_SetOption(kp, kwsysProcess_Option_Detach, 1); + } + if (createNewGroup) { + kwsysProcess_SetOption(kp, kwsysProcess_Option_CreateProcessGroup, 1); + } + kwsysProcess_Execute(kp); + + if (poll) { + pUserTimeout = &userTimeout; + } + + if (interruptDelay) { + testProcess_sleep(interruptDelay); + kwsysProcess_Interrupt(kp); + } + + if (!share && !disown) { + int p; + while ((p = kwsysProcess_WaitForData(kp, &data, &length, pUserTimeout))) { + if (output) { + if (poll && p == kwsysProcess_Pipe_Timeout) { + fprintf(stdout, "WaitForData timeout reached.\n"); + fflush(stdout); + + /* Count the number of times we polled without getting data. + If it is excessive then kill the child and fail. */ + if (++poll >= MAXPOLL) { + fprintf(stdout, "Poll count reached limit %d.\n", MAXPOLL); + kwsysProcess_Kill(kp); + } + } else { + fwrite(data, 1, (size_t)length, stdout); + fflush(stdout); + } + } + if (poll) { + /* Delay to avoid busy loop during polling. */ + testProcess_usleep(100000); + } + if (delay) { +/* Purposely sleeping only on Win32 to let pipe fill up. */ +#if defined(_WIN32) + testProcess_usleep(100000); +#endif + } + } + } + + if (disown) { + kwsysProcess_Disown(kp); + } else { + kwsysProcess_WaitForExit(kp, 0); + } + + switch (kwsysProcess_GetState(kp)) { + case kwsysProcess_State_Starting: + printf("No process has been executed.\n"); + break; + case kwsysProcess_State_Executing: + printf("The process is still executing.\n"); + break; + case kwsysProcess_State_Expired: + printf("Child was killed when timeout expired.\n"); + break; + case kwsysProcess_State_Exited: + printf("Child exited with value = %d\n", kwsysProcess_GetExitValue(kp)); + result = ((exception != kwsysProcess_GetExitException(kp)) || + (value != kwsysProcess_GetExitValue(kp))); + break; + case kwsysProcess_State_Killed: + printf("Child was killed by parent.\n"); + break; + case kwsysProcess_State_Exception: + printf("Child terminated abnormally: %s\n", + kwsysProcess_GetExceptionString(kp)); + result = ((exception != kwsysProcess_GetExitException(kp)) || + (value != kwsysProcess_GetExitValue(kp))); + break; + case kwsysProcess_State_Disowned: + printf("Child was disowned.\n"); + break; + case kwsysProcess_State_Error: + printf("Error in administrating child process: [%s]\n", + kwsysProcess_GetErrorString(kp)); + break; + } + + if (result) { + if (exception != kwsysProcess_GetExitException(kp)) { + fprintf(stderr, + "Mismatch in exit exception. " + "Should have been %d, was %d.\n", + exception, kwsysProcess_GetExitException(kp)); + } + if (value != kwsysProcess_GetExitValue(kp)) { + fprintf(stderr, + "Mismatch in exit value. " + "Should have been %d, was %d.\n", + value, kwsysProcess_GetExitValue(kp)); + } + } + + if (kwsysProcess_GetState(kp) != state) { + fprintf(stderr, + "Mismatch in state. " + "Should have been %d, was %d.\n", + state, kwsysProcess_GetState(kp)); + result = 1; + } + + /* We should have polled more times than there were data if polling + was enabled. */ + if (poll && poll < MINPOLL) { + fprintf(stderr, "Poll count is %d, which is less than %d.\n", poll, + MINPOLL); + result = 1; + } + + return result; +} + +/** + * Runs a child process and blocks until it returns. Arguments as follows: + * + * cmd = Command line to run. + * state = Expected return value of kwsysProcess_GetState after exit. + * exception = Expected return value of kwsysProcess_GetExitException. + * value = Expected return value of kwsysProcess_GetExitValue. + * share = Whether to share stdout/stderr child pipes with our pipes + * by way of kwsysProcess_SetPipeShared. If false, new pipes + * are created. + * output = If !share && !disown, whether to write the child's stdout + * and stderr output to our stdout. + * delay = If !share && !disown, adds an additional short delay to + * the pipe loop to allow the pipes to fill up; Windows only. + * timeout = Non-zero to sets a timeout in seconds via + * kwsysProcess_SetTimeout. + * poll = If !share && !disown, we count the number of 0.1 second + * intervals where the child pipes had no new data. We fail + * if not in the bounds of MINPOLL/MAXPOLL. + * repeat = Number of times to run the process. + * disown = If set, the process is disowned. + * createNewGroup = If set, the process is created in a new process group. + * interruptDelay = If non-zero, number of seconds to delay before + * interrupting the process. Note that this delay will occur + * BEFORE any reading/polling of pipes occurs and before any + * detachment occurs. + */ +int runChild(const char* cmd[], int state, int exception, int value, int share, + int output, int delay, double timeout, int poll, int repeat, + int disown, int createNewGroup, unsigned int interruptDelay) +{ + int result = 1; + kwsysProcess* kp = kwsysProcess_New(); + if (!kp) { + fprintf(stderr, "kwsysProcess_New returned NULL!\n"); + return 1; + } + while (repeat-- > 0) { + result = runChild2(kp, cmd, state, exception, value, share, output, delay, + timeout, poll, disown, createNewGroup, interruptDelay); + if (result) { + break; + } + } + kwsysProcess_Delete(kp); + return result; +} + +int main(int argc, const char* argv[]) +{ + int n = 0; + +#ifdef _WIN32 + int i; + char new_args[10][_MAX_PATH]; + LPWSTR* w_av = CommandLineToArgvW(GetCommandLineW(), &argc); + for (i = 0; i < argc; i++) { + kwsysEncoding_wcstombs(new_args[i], w_av[i], _MAX_PATH); + argv[i] = new_args[i]; + } + LocalFree(w_av); +#endif + +#if 0 + { + HANDLE out = GetStdHandle(STD_OUTPUT_HANDLE); + DuplicateHandle(GetCurrentProcess(), out, + GetCurrentProcess(), &out, 0, FALSE, + DUPLICATE_SAME_ACCESS | DUPLICATE_CLOSE_SOURCE); + SetStdHandle(STD_OUTPUT_HANDLE, out); + } + { + HANDLE out = GetStdHandle(STD_ERROR_HANDLE); + DuplicateHandle(GetCurrentProcess(), out, + GetCurrentProcess(), &out, 0, FALSE, + DUPLICATE_SAME_ACCESS | DUPLICATE_CLOSE_SOURCE); + SetStdHandle(STD_ERROR_HANDLE, out); + } +#endif + if (argc == 2) { + n = atoi(argv[1]); + } else if (argc == 3 && strcmp(argv[1], "run") == 0) { + n = atoi(argv[2]); + } + /* Check arguments. */ + if (((n >= 1 && n <= 10) || n == 108 || n == 109 || n == 110) && argc == 3) { + /* This is the child process for a requested test number. */ + switch (n) { + case 1: + return test1(argc, argv); + case 2: + return test2(argc, argv); + case 3: + return test3(argc, argv); + case 4: + return test4(argc, argv); + case 5: + return test5(argc, argv); + case 6: + test6(argc, argv); + return 0; + case 7: + return test7(argc, argv); + case 8: + return test8(argc, argv); + case 9: + return test9(argc, argv); + case 10: + return test10(argc, argv); + case 108: + return test8_grandchild(argc, argv); + case 109: + return test9_grandchild(argc, argv); + case 110: + return test10_grandchild(argc, argv); + } + fprintf(stderr, "Invalid test number %d.\n", n); + return 1; + } else if (n >= 1 && n <= 10) { + /* This is the parent process for a requested test number. */ + int states[10] = { + kwsysProcess_State_Exited, kwsysProcess_State_Exited, + kwsysProcess_State_Expired, kwsysProcess_State_Exception, + kwsysProcess_State_Exited, kwsysProcess_State_Expired, + kwsysProcess_State_Exited, kwsysProcess_State_Exited, + kwsysProcess_State_Expired, /* Ctrl+C handler test */ + kwsysProcess_State_Exception /* Process group test */ + }; + int exceptions[10] = { + kwsysProcess_Exception_None, kwsysProcess_Exception_None, + kwsysProcess_Exception_None, +#ifdef CRASH_USING_ABORT + kwsysProcess_Exception_Other, +#else + kwsysProcess_Exception_Fault, +#endif + kwsysProcess_Exception_None, kwsysProcess_Exception_None, + kwsysProcess_Exception_None, kwsysProcess_Exception_None, + kwsysProcess_Exception_None, kwsysProcess_Exception_Interrupt + }; + int values[10] = { 0, 123, 1, 1, 0, 0, 0, 0, 1, 1 }; + int shares[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1 }; + int outputs[10] = { 1, 1, 1, 1, 1, 0, 1, 1, 1, 1 }; + int delays[10] = { 0, 0, 0, 0, 0, 1, 0, 0, 0, 0 }; + double timeouts[10] = { 10, 10, 10, 30, 30, 10, -1, 10, 6, 4 }; + int polls[10] = { 0, 0, 0, 0, 0, 0, 1, 0, 0, 0 }; + int repeat[10] = { 257, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; + int createNewGroups[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1 }; + unsigned int interruptDelays[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 2 }; + int r; + const char* cmd[4]; +#ifdef _WIN32 + char* argv0 = 0; +#endif + char* test1IterationsStr = getenv("KWSYS_TEST_PROCESS_1_COUNT"); + if (test1IterationsStr) { + long int test1Iterations = strtol(test1IterationsStr, 0, 10); + if (test1Iterations > 10 && test1Iterations != LONG_MAX) { + repeat[0] = (int)test1Iterations; + } + } +#ifdef _WIN32 + if (n == 0 && (argv0 = strdup(argv[0]))) { + /* Try converting to forward slashes to see if it works. */ + char* c; + for (c = argv0; *c; ++c) { + if (*c == '\\') { + *c = '/'; + } + } + cmd[0] = argv0; + } else { + cmd[0] = argv[0]; + } +#else + cmd[0] = argv[0]; +#endif + cmd[1] = "run"; + cmd[2] = argv[1]; + cmd[3] = 0; + fprintf(stdout, "Output on stdout before test %d.\n", n); + fprintf(stderr, "Output on stderr before test %d.\n", n); + fflush(stdout); + fflush(stderr); + r = runChild(cmd, states[n - 1], exceptions[n - 1], values[n - 1], + shares[n - 1], outputs[n - 1], delays[n - 1], timeouts[n - 1], + polls[n - 1], repeat[n - 1], 0, createNewGroups[n - 1], + interruptDelays[n - 1]); + fprintf(stdout, "Output on stdout after test %d.\n", n); + fprintf(stderr, "Output on stderr after test %d.\n", n); + fflush(stdout); + fflush(stderr); +#if defined(_WIN32) + free(argv0); +#endif + return r; + } else if (argc > 2 && strcmp(argv[1], "0") == 0) { + /* This is the special debugging test to run a given command + line. */ + const char** cmd = argv + 2; + int state = kwsysProcess_State_Exited; + int exception = kwsysProcess_Exception_None; + int value = 0; + double timeout = 0; + int r = + runChild(cmd, state, exception, value, 0, 1, 0, timeout, 0, 1, 0, 0, 0); + return r; + } else { + /* Improper usage. */ + fprintf(stdout, "Usage: %s \n", argv[0]); + return 1; + } +} diff --git a/test/API/driver/kwsys/testSharedForward.c.in b/test/API/driver/kwsys/testSharedForward.c.in new file mode 100644 index 00000000000..b3eb4138289 --- /dev/null +++ b/test/API/driver/kwsys/testSharedForward.c.in @@ -0,0 +1,27 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#if defined(CMAKE_INTDIR) +# define CONFIG_DIR_PRE CMAKE_INTDIR "/" +# define CONFIG_DIR_POST "/" CMAKE_INTDIR +#else +# define CONFIG_DIR_PRE "" +# define CONFIG_DIR_POST "" +#endif +#define @KWSYS_NAMESPACE@_SHARED_FORWARD_DIR_BUILD "@EXEC_DIR@" +#define @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_BUILD "." CONFIG_DIR_POST +#define @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_INSTALL 0 +#define @KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_BUILD \ + CONFIG_DIR_PRE "@KWSYS_NAMESPACE@TestProcess" +#define @KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_INSTALL \ + "@KWSYS_NAMESPACE@TestProcess" +#define @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_COMMAND "--command" +#define @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_PRINT "--print" +#define @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_LDD "--ldd" +#if defined(CMAKE_INTDIR) +# define @KWSYS_NAMESPACE@_SHARED_FORWARD_CONFIG_NAME CMAKE_INTDIR +#endif +#include <@KWSYS_NAMESPACE@/SharedForward.h> +int main(int argc, char** argv) +{ + return @KWSYS_NAMESPACE@_shared_forward_to_real(argc, argv); +} diff --git a/test/API/driver/kwsys/testSystemInformation.cxx b/test/API/driver/kwsys/testSystemInformation.cxx new file mode 100644 index 00000000000..154517eae4b --- /dev/null +++ b/test/API/driver/kwsys/testSystemInformation.cxx @@ -0,0 +1,106 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" +#include KWSYS_HEADER(SystemInformation.hxx) + +// Work-around CMake dependency scanning limitation. This must +// duplicate the above list of headers. +#if 0 +# include "SystemInformation.hxx.in" +#endif + +#include + +#if defined(KWSYS_USE_LONG_LONG) +# if defined(KWSYS_IOS_HAS_OSTREAM_LONG_LONG) +# define iostreamLongLong(x) (x) +# else +# define iostreamLongLong(x) ((long)x) +# endif +#elif defined(KWSYS_USE___INT64) +# if defined(KWSYS_IOS_HAS_OSTREAM___INT64) +# define iostreamLongLong(x) (x) +# else +# define iostreamLongLong(x) ((long)x) +# endif +#else +# error "No Long Long" +#endif + +#define printMethod(info, m) std::cout << #m << ": " << info.m() << "\n" + +#define printMethod2(info, m, unit) \ + std::cout << #m << ": " << info.m() << " " << unit << "\n" + +#define printMethod3(info, m, unit) \ + std::cout << #m << ": " << iostreamLongLong(info.m) << " " << unit << "\n" + +int testSystemInformation(int, char* []) +{ + std::cout << "CTEST_FULL_OUTPUT\n"; // avoid truncation + + kwsys::SystemInformation info; + info.RunCPUCheck(); + info.RunOSCheck(); + info.RunMemoryCheck(); + printMethod(info, GetOSName); + printMethod(info, GetOSIsLinux); + printMethod(info, GetOSIsApple); + printMethod(info, GetOSIsWindows); + printMethod(info, GetHostname); + printMethod(info, GetFullyQualifiedDomainName); + printMethod(info, GetOSRelease); + printMethod(info, GetOSVersion); + printMethod(info, GetOSPlatform); + printMethod(info, Is64Bits); + printMethod(info, GetVendorString); + printMethod(info, GetVendorID); + printMethod(info, GetTypeID); + printMethod(info, GetFamilyID); + printMethod(info, GetModelID); + printMethod(info, GetExtendedProcessorName); + printMethod(info, GetSteppingCode); + printMethod(info, GetProcessorSerialNumber); + printMethod2(info, GetProcessorCacheSize, "KB"); + printMethod(info, GetLogicalProcessorsPerPhysical); + printMethod2(info, GetProcessorClockFrequency, "MHz"); + printMethod(info, GetNumberOfLogicalCPU); + printMethod(info, GetNumberOfPhysicalCPU); + printMethod(info, DoesCPUSupportCPUID); + printMethod(info, GetProcessorAPICID); + printMethod2(info, GetTotalVirtualMemory, "MB"); + printMethod2(info, GetAvailableVirtualMemory, "MB"); + printMethod2(info, GetTotalPhysicalMemory, "MB"); + printMethod2(info, GetAvailablePhysicalMemory, "MB"); + printMethod3(info, GetHostMemoryTotal(), "KiB"); + printMethod3(info, GetHostMemoryAvailable("KWSHL"), "KiB"); + printMethod3(info, GetProcMemoryAvailable("KWSHL", "KWSPL"), "KiB"); + printMethod3(info, GetHostMemoryUsed(), "KiB"); + printMethod3(info, GetProcMemoryUsed(), "KiB"); + printMethod(info, GetLoadAverage); + + for (long int i = 0; i <= 31; i++) { + if (info.DoesCPUSupportFeature(static_cast(1) << i)) { + std::cout << "CPU feature " << i << "\n"; + } + } + + /* test stack trace + */ + std::cout << "Program Stack:" << std::endl + << kwsys::SystemInformation::GetProgramStack(0, 0) << std::endl + << std::endl; + + /* test segv handler + info.SetStackTraceOnError(1); + double *d = (double*)100; + *d=0; + */ + + /* test abort handler + info.SetStackTraceOnError(1); + abort(); + */ + + return 0; +} diff --git a/test/API/driver/kwsys/testSystemTools.bin b/test/API/driver/kwsys/testSystemTools.bin new file mode 100644 index 0000000000000000000000000000000000000000..961a4043b9b2785351ab26a33cfcb1f366c1391b GIT binary patch literal 766 zcmbV~J8r`;5JX3D078KQr%G#;xrK8icPgzTq*cc{r`QAV5H3@?bYXZsLq;FVX_BRe z%g5!cJ`hlGG|ej%-%r3B^E=g0A5?{B&Opc7@6oZyO4pUdnM;@%vkIOsxNAjmXiL+d z<7 zAIM?AiVDY~%?XgU=c3&OkPg?P^8282@1&KxNx}UnZQM`N*8ME)+%M9>{YuT^22-rR A-v9sr literal 0 HcmV?d00001 diff --git a/test/API/driver/kwsys/testSystemTools.cxx b/test/API/driver/kwsys/testSystemTools.cxx new file mode 100644 index 00000000000..1f3a15b5912 --- /dev/null +++ b/test/API/driver/kwsys/testSystemTools.cxx @@ -0,0 +1,1128 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" + +#if defined(_MSC_VER) +# pragma warning(disable : 4786) +#endif + +#include KWSYS_HEADER(FStream.hxx) +#include KWSYS_HEADER(SystemTools.hxx) + +// Work-around CMake dependency scanning limitation. This must +// duplicate the above list of headers. +#if 0 +# include "FStream.hxx.in" +# include "SystemTools.hxx.in" +#endif + +// Include with <> instead of "" to avoid getting any in-source copy +// left on disk. +#include + +#include +#include +#include /* free */ +#include /* strcmp */ +#if defined(_WIN32) && !defined(__CYGWIN__) +# include /* _umask (MSVC) / umask (Borland) */ +# ifdef _MSC_VER +# define umask _umask // Note this is still umask on Borland +# endif +#endif +#include /* umask (POSIX), _S_I* constants (Windows) */ +// Visual C++ does not define mode_t (note that Borland does, however). +#if defined(_MSC_VER) +typedef unsigned short mode_t; +#endif + +static const char* toUnixPaths[][2] = { + { "/usr/local/bin/passwd", "/usr/local/bin/passwd" }, + { "/usr/lo cal/bin/pa sswd", "/usr/lo cal/bin/pa sswd" }, + { "/usr/lo\\ cal/bin/pa\\ sswd", "/usr/lo/ cal/bin/pa/ sswd" }, + { "c:/usr/local/bin/passwd", "c:/usr/local/bin/passwd" }, + { "c:/usr/lo cal/bin/pa sswd", "c:/usr/lo cal/bin/pa sswd" }, + { "c:/usr/lo\\ cal/bin/pa\\ sswd", "c:/usr/lo/ cal/bin/pa/ sswd" }, + { "\\usr\\local\\bin\\passwd", "/usr/local/bin/passwd" }, + { "\\usr\\lo cal\\bin\\pa sswd", "/usr/lo cal/bin/pa sswd" }, + { "\\usr\\lo\\ cal\\bin\\pa\\ sswd", "/usr/lo/ cal/bin/pa/ sswd" }, + { "c:\\usr\\local\\bin\\passwd", "c:/usr/local/bin/passwd" }, + { "c:\\usr\\lo cal\\bin\\pa sswd", "c:/usr/lo cal/bin/pa sswd" }, + { "c:\\usr\\lo\\ cal\\bin\\pa\\ sswd", "c:/usr/lo/ cal/bin/pa/ sswd" }, + { "\\\\usr\\local\\bin\\passwd", "//usr/local/bin/passwd" }, + { "\\\\usr\\lo cal\\bin\\pa sswd", "//usr/lo cal/bin/pa sswd" }, + { "\\\\usr\\lo\\ cal\\bin\\pa\\ sswd", "//usr/lo/ cal/bin/pa/ sswd" }, + { nullptr, nullptr } +}; + +static bool CheckConvertToUnixSlashes(std::string const& input, + std::string const& output) +{ + std::string result = input; + kwsys::SystemTools::ConvertToUnixSlashes(result); + if (result != output) { + std::cerr << "Problem with ConvertToUnixSlashes - input: " << input + << " output: " << result << " expected: " << output << std::endl; + return false; + } + return true; +} + +static const char* checkEscapeChars[][4] = { + { "1 foo 2 bar 2", "12", "\\", "\\1 foo \\2 bar \\2" }, + { " {} ", "{}", "#", " #{#} " }, + { nullptr, nullptr, nullptr, nullptr } +}; + +static bool CheckEscapeChars(std::string const& input, + const char* chars_to_escape, char escape_char, + std::string const& output) +{ + std::string result = kwsys::SystemTools::EscapeChars( + input.c_str(), chars_to_escape, escape_char); + if (result != output) { + std::cerr << "Problem with CheckEscapeChars - input: " << input + << " output: " << result << " expected: " << output << std::endl; + return false; + } + return true; +} + +static bool CheckFileOperations() +{ + bool res = true; + const std::string testNonExistingFile(TEST_SYSTEMTOOLS_SOURCE_DIR + "/testSystemToolsNonExistingFile"); + const std::string testDotFile(TEST_SYSTEMTOOLS_SOURCE_DIR "/."); + const std::string testBinFile(TEST_SYSTEMTOOLS_SOURCE_DIR + "/testSystemTools.bin"); + const std::string testTxtFile(TEST_SYSTEMTOOLS_SOURCE_DIR + "/testSystemTools.cxx"); + const std::string testNewDir(TEST_SYSTEMTOOLS_BINARY_DIR + "/testSystemToolsNewDir"); + const std::string testNewFile(testNewDir + "/testNewFile.txt"); + + if (kwsys::SystemTools::DetectFileType(testNonExistingFile.c_str()) != + kwsys::SystemTools::FileTypeUnknown) { + std::cerr << "Problem with DetectFileType - failed to detect type of: " + << testNonExistingFile << std::endl; + res = false; + } + + if (kwsys::SystemTools::DetectFileType(testDotFile.c_str()) != + kwsys::SystemTools::FileTypeUnknown) { + std::cerr << "Problem with DetectFileType - failed to detect type of: " + << testDotFile << std::endl; + res = false; + } + + if (kwsys::SystemTools::DetectFileType(testBinFile.c_str()) != + kwsys::SystemTools::FileTypeBinary) { + std::cerr << "Problem with DetectFileType - failed to detect type of: " + << testBinFile << std::endl; + res = false; + } + + if (kwsys::SystemTools::DetectFileType(testTxtFile.c_str()) != + kwsys::SystemTools::FileTypeText) { + std::cerr << "Problem with DetectFileType - failed to detect type of: " + << testTxtFile << std::endl; + res = false; + } + + if (kwsys::SystemTools::FileLength(testBinFile) != 766) { + std::cerr << "Problem with FileLength - incorrect length for: " + << testBinFile << std::endl; + res = false; + } + + kwsys::SystemTools::Stat_t buf; + if (kwsys::SystemTools::Stat(testTxtFile.c_str(), &buf) != 0) { + std::cerr << "Problem with Stat - unable to stat text file: " + << testTxtFile << std::endl; + res = false; + } + + if (kwsys::SystemTools::Stat(testBinFile, &buf) != 0) { + std::cerr << "Problem with Stat - unable to stat bin file: " << testBinFile + << std::endl; + res = false; + } + + if (!kwsys::SystemTools::MakeDirectory(testNewDir)) { + std::cerr << "Problem with MakeDirectory for: " << testNewDir << std::endl; + res = false; + } + // calling it again should just return true + if (!kwsys::SystemTools::MakeDirectory(testNewDir)) { + std::cerr << "Problem with second call to MakeDirectory for: " + << testNewDir << std::endl; + res = false; + } + // calling with 0 pointer should return false + if (kwsys::SystemTools::MakeDirectory(nullptr)) { + std::cerr << "Problem with MakeDirectory(0)" << std::endl; + res = false; + } + // calling with an empty string should return false + if (kwsys::SystemTools::MakeDirectory(std::string())) { + std::cerr << "Problem with MakeDirectory(std::string())" << std::endl; + res = false; + } + // check existence + if (!kwsys::SystemTools::FileExists(testNewDir.c_str(), false)) { + std::cerr << "Problem with FileExists as C string and not file for: " + << testNewDir << std::endl; + res = false; + } + // check existence + if (!kwsys::SystemTools::PathExists(testNewDir)) { + std::cerr << "Problem with PathExists for: " << testNewDir << std::endl; + res = false; + } + // remove it + if (!kwsys::SystemTools::RemoveADirectory(testNewDir)) { + std::cerr << "Problem with RemoveADirectory for: " << testNewDir + << std::endl; + res = false; + } + // check existence + if (kwsys::SystemTools::FileExists(testNewDir.c_str(), false)) { + std::cerr << "After RemoveADirectory: " + << "Problem with FileExists as C string and not file for: " + << testNewDir << std::endl; + res = false; + } + // check existence + if (kwsys::SystemTools::PathExists(testNewDir)) { + std::cerr << "After RemoveADirectory: " + << "Problem with PathExists for: " << testNewDir << std::endl; + res = false; + } + // create it using the char* version + if (!kwsys::SystemTools::MakeDirectory(testNewDir.c_str())) { + std::cerr << "Problem with second call to MakeDirectory as C string for: " + << testNewDir << std::endl; + res = false; + } + + if (!kwsys::SystemTools::Touch(testNewFile, true)) { + std::cerr << "Problem with Touch for: " << testNewFile << std::endl; + res = false; + } + // calling MakeDirectory with something that is no file should fail + if (kwsys::SystemTools::MakeDirectory(testNewFile)) { + std::cerr << "Problem with to MakeDirectory for: " << testNewFile + << std::endl; + res = false; + } + + // calling with 0 pointer should return false + if (kwsys::SystemTools::FileExists(nullptr)) { + std::cerr << "Problem with FileExists(0)" << std::endl; + res = false; + } + if (kwsys::SystemTools::FileExists(nullptr, true)) { + std::cerr << "Problem with FileExists(0) as file" << std::endl; + res = false; + } + // calling with an empty string should return false + if (kwsys::SystemTools::FileExists(std::string())) { + std::cerr << "Problem with FileExists(std::string())" << std::endl; + res = false; + } + // FileExists(x, true) should return false on a directory + if (kwsys::SystemTools::FileExists(testNewDir, true)) { + std::cerr << "Problem with FileExists as file for: " << testNewDir + << std::endl; + res = false; + } + if (kwsys::SystemTools::FileExists(testNewDir.c_str(), true)) { + std::cerr << "Problem with FileExists as C string and file for: " + << testNewDir << std::endl; + res = false; + } + // FileExists(x, false) should return true even on a directory + if (!kwsys::SystemTools::FileExists(testNewDir, false)) { + std::cerr << "Problem with FileExists as not file for: " << testNewDir + << std::endl; + res = false; + } + if (!kwsys::SystemTools::FileExists(testNewDir.c_str(), false)) { + std::cerr << "Problem with FileExists as C string and not file for: " + << testNewDir << std::endl; + res = false; + } + // should work, was created as new file before + if (!kwsys::SystemTools::FileExists(testNewFile)) { + std::cerr << "Problem with FileExists for: " << testNewFile << std::endl; + res = false; + } + if (!kwsys::SystemTools::FileExists(testNewFile.c_str())) { + std::cerr << "Problem with FileExists as C string for: " << testNewFile + << std::endl; + res = false; + } + if (!kwsys::SystemTools::FileExists(testNewFile, true)) { + std::cerr << "Problem with FileExists as file for: " << testNewFile + << std::endl; + res = false; + } + if (!kwsys::SystemTools::FileExists(testNewFile.c_str(), true)) { + std::cerr << "Problem with FileExists as C string and file for: " + << testNewFile << std::endl; + res = false; + } + + // calling with an empty string should return false + if (kwsys::SystemTools::PathExists(std::string())) { + std::cerr << "Problem with PathExists(std::string())" << std::endl; + res = false; + } + // PathExists(x) should return true on a directory + if (!kwsys::SystemTools::PathExists(testNewDir)) { + std::cerr << "Problem with PathExists for: " << testNewDir << std::endl; + res = false; + } + // should work, was created as new file before + if (!kwsys::SystemTools::PathExists(testNewFile)) { + std::cerr << "Problem with PathExists for: " << testNewFile << std::endl; + res = false; + } + +// Reset umask +#if defined(_WIN32) && !defined(__CYGWIN__) + // NOTE: Windows doesn't support toggling _S_IREAD. + mode_t fullMask = _S_IWRITE; +#else + // On a normal POSIX platform, we can toggle all permissions. + mode_t fullMask = S_IRWXU | S_IRWXG | S_IRWXO; +#endif + mode_t orig_umask = umask(fullMask); + + // Test file permissions without umask + mode_t origPerm, thisPerm; + if (!kwsys::SystemTools::GetPermissions(testNewFile, origPerm)) { + std::cerr << "Problem with GetPermissions (1) for: " << testNewFile + << std::endl; + res = false; + } + + if (!kwsys::SystemTools::SetPermissions(testNewFile, 0)) { + std::cerr << "Problem with SetPermissions (1) for: " << testNewFile + << std::endl; + res = false; + } + + if (!kwsys::SystemTools::GetPermissions(testNewFile, thisPerm)) { + std::cerr << "Problem with GetPermissions (2) for: " << testNewFile + << std::endl; + res = false; + } + + if ((thisPerm & fullMask) != 0) { + std::cerr << "SetPermissions failed to set permissions (1) for: " + << testNewFile << ": actual = " << thisPerm + << "; expected = " << 0 << std::endl; + res = false; + } + + // While we're at it, check proper TestFileAccess functionality. + if (kwsys::SystemTools::TestFileAccess(testNewFile, + kwsys::TEST_FILE_WRITE)) { + std::cerr + << "TestFileAccess incorrectly indicated that this is a writable file:" + << testNewFile << std::endl; + res = false; + } + + if (!kwsys::SystemTools::TestFileAccess(testNewFile, kwsys::TEST_FILE_OK)) { + std::cerr + << "TestFileAccess incorrectly indicated that this file does not exist:" + << testNewFile << std::endl; + res = false; + } + + // Test restoring/setting full permissions. + if (!kwsys::SystemTools::SetPermissions(testNewFile, fullMask)) { + std::cerr << "Problem with SetPermissions (2) for: " << testNewFile + << std::endl; + res = false; + } + + if (!kwsys::SystemTools::GetPermissions(testNewFile, thisPerm)) { + std::cerr << "Problem with GetPermissions (3) for: " << testNewFile + << std::endl; + res = false; + } + + if ((thisPerm & fullMask) != fullMask) { + std::cerr << "SetPermissions failed to set permissions (2) for: " + << testNewFile << ": actual = " << thisPerm + << "; expected = " << fullMask << std::endl; + res = false; + } + + // Test setting file permissions while honoring umask + if (!kwsys::SystemTools::SetPermissions(testNewFile, fullMask, true)) { + std::cerr << "Problem with SetPermissions (3) for: " << testNewFile + << std::endl; + res = false; + } + + if (!kwsys::SystemTools::GetPermissions(testNewFile, thisPerm)) { + std::cerr << "Problem with GetPermissions (4) for: " << testNewFile + << std::endl; + res = false; + } + + if ((thisPerm & fullMask) != 0) { + std::cerr << "SetPermissions failed to honor umask for: " << testNewFile + << ": actual = " << thisPerm << "; expected = " << 0 + << std::endl; + res = false; + } + + // Restore umask + umask(orig_umask); + + // Restore file permissions + if (!kwsys::SystemTools::SetPermissions(testNewFile, origPerm)) { + std::cerr << "Problem with SetPermissions (4) for: " << testNewFile + << std::endl; + res = false; + } + + // Remove the test file + if (!kwsys::SystemTools::RemoveFile(testNewFile)) { + std::cerr << "Problem with RemoveFile: " << testNewFile << std::endl; + res = false; + } + + std::string const testFileMissing(testNewDir + "/testMissingFile.txt"); + if (!kwsys::SystemTools::RemoveFile(testFileMissing)) { + std::string const& msg = kwsys::SystemTools::GetLastSystemError(); + std::cerr << "RemoveFile(\"" << testFileMissing << "\") failed: " << msg + << "\n"; + res = false; + } + + std::string const testFileMissingDir(testNewDir + "/missing/file.txt"); + if (!kwsys::SystemTools::RemoveFile(testFileMissingDir)) { + std::string const& msg = kwsys::SystemTools::GetLastSystemError(); + std::cerr << "RemoveFile(\"" << testFileMissingDir << "\") failed: " << msg + << "\n"; + res = false; + } + + kwsys::SystemTools::Touch(testNewFile, true); + if (!kwsys::SystemTools::RemoveADirectory(testNewDir)) { + std::cerr << "Problem with RemoveADirectory for: " << testNewDir + << std::endl; + res = false; + } + +#ifdef KWSYS_TEST_SYSTEMTOOLS_LONG_PATHS + // Perform the same file and directory creation and deletion tests but + // with paths > 256 characters in length. + + const std::string testNewLongDir( + TEST_SYSTEMTOOLS_BINARY_DIR + "/" + "012345678901234567890123456789012345678901234567890123456789" + "012345678901234567890123456789012345678901234567890123456789" + "012345678901234567890123456789012345678901234567890123456789" + "012345678901234567890123456789012345678901234567890123456789" + "01234567890123"); + const std::string testNewLongFile( + testNewLongDir + + "/" + "012345678901234567890123456789012345678901234567890123456789" + "012345678901234567890123456789012345678901234567890123456789" + "012345678901234567890123456789012345678901234567890123456789" + "012345678901234567890123456789012345678901234567890123456789" + "0123456789.txt"); + + if (!kwsys::SystemTools::MakeDirectory(testNewLongDir)) { + std::cerr << "Problem with MakeDirectory for: " << testNewLongDir + << std::endl; + res = false; + } + + if (!kwsys::SystemTools::Touch(testNewLongFile.c_str(), true)) { + std::cerr << "Problem with Touch for: " << testNewLongFile << std::endl; + res = false; + } + + if (!kwsys::SystemTools::RemoveFile(testNewLongFile)) { + std::cerr << "Problem with RemoveFile: " << testNewLongFile << std::endl; + res = false; + } + + kwsys::SystemTools::Touch(testNewLongFile.c_str(), true); + if (!kwsys::SystemTools::RemoveADirectory(testNewLongDir)) { + std::cerr << "Problem with RemoveADirectory for: " << testNewLongDir + << std::endl; + res = false; + } +#endif + + return res; +} + +static bool CheckStringOperations() +{ + bool res = true; + + std::string test = "mary had a little lamb."; + if (kwsys::SystemTools::CapitalizedWords(test) != + "Mary Had A Little Lamb.") { + std::cerr << "Problem with CapitalizedWords " << '"' << test << '"' + << std::endl; + res = false; + } + + test = "Mary Had A Little Lamb."; + if (kwsys::SystemTools::UnCapitalizedWords(test) != + "mary had a little lamb.") { + std::cerr << "Problem with UnCapitalizedWords " << '"' << test << '"' + << std::endl; + res = false; + } + + test = "MaryHadTheLittleLamb."; + if (kwsys::SystemTools::AddSpaceBetweenCapitalizedWords(test) != + "Mary Had The Little Lamb.") { + std::cerr << "Problem with AddSpaceBetweenCapitalizedWords " << '"' << test + << '"' << std::endl; + res = false; + } + + char* cres = + kwsys::SystemTools::AppendStrings("Mary Had A", " Little Lamb."); + if (strcmp(cres, "Mary Had A Little Lamb.")) { + std::cerr << "Problem with AppendStrings " + << "\"Mary Had A\" \" Little Lamb.\"" << std::endl; + res = false; + } + delete[] cres; + + cres = kwsys::SystemTools::AppendStrings("Mary Had", " A ", "Little Lamb."); + if (strcmp(cres, "Mary Had A Little Lamb.")) { + std::cerr << "Problem with AppendStrings " + << "\"Mary Had\" \" A \" \"Little Lamb.\"" << std::endl; + res = false; + } + delete[] cres; + + if (kwsys::SystemTools::CountChar("Mary Had A Little Lamb.", 'a') != 3) { + std::cerr << "Problem with CountChar " + << "\"Mary Had A Little Lamb.\"" << std::endl; + res = false; + } + + cres = kwsys::SystemTools::RemoveChars("Mary Had A Little Lamb.", "aeiou"); + if (strcmp(cres, "Mry Hd A Lttl Lmb.")) { + std::cerr << "Problem with RemoveChars " + << "\"Mary Had A Little Lamb.\"" << std::endl; + res = false; + } + delete[] cres; + + cres = kwsys::SystemTools::RemoveCharsButUpperHex("Mary Had A Little Lamb."); + if (strcmp(cres, "A")) { + std::cerr << "Problem with RemoveCharsButUpperHex " + << "\"Mary Had A Little Lamb.\"" << std::endl; + res = false; + } + delete[] cres; + + char* cres2 = strdup("Mary Had A Little Lamb."); + kwsys::SystemTools::ReplaceChars(cres2, "aeiou", 'X'); + if (strcmp(cres2, "MXry HXd A LXttlX LXmb.")) { + std::cerr << "Problem with ReplaceChars " + << "\"Mary Had A Little Lamb.\"" << std::endl; + res = false; + } + free(cres2); + + if (!kwsys::SystemTools::StringStartsWith("Mary Had A Little Lamb.", + "Mary ")) { + std::cerr << "Problem with StringStartsWith " + << "\"Mary Had A Little Lamb.\"" << std::endl; + res = false; + } + + if (!kwsys::SystemTools::StringEndsWith("Mary Had A Little Lamb.", + " Lamb.")) { + std::cerr << "Problem with StringEndsWith " + << "\"Mary Had A Little Lamb.\"" << std::endl; + res = false; + } + + cres = kwsys::SystemTools::DuplicateString("Mary Had A Little Lamb."); + if (strcmp(cres, "Mary Had A Little Lamb.")) { + std::cerr << "Problem with DuplicateString " + << "\"Mary Had A Little Lamb.\"" << std::endl; + res = false; + } + delete[] cres; + + test = "Mary Had A Little Lamb."; + if (kwsys::SystemTools::CropString(test, 13) != "Mary ...Lamb.") { + std::cerr << "Problem with CropString " + << "\"Mary Had A Little Lamb.\"" << std::endl; + res = false; + } + + std::vector lines; + kwsys::SystemTools::Split("Mary Had A Little Lamb.", lines, ' '); + if (lines[0] != "Mary" || lines[1] != "Had" || lines[2] != "A" || + lines[3] != "Little" || lines[4] != "Lamb.") { + std::cerr << "Problem with Split " + << "\"Mary Had A Little Lamb.\"" << std::endl; + res = false; + } + + if (kwsys::SystemTools::ConvertToWindowsOutputPath( + "L://Local Mojo/Hex Power Pack/Iffy Voodoo") != + "\"L:\\Local Mojo\\Hex Power Pack\\Iffy Voodoo\"") { + std::cerr << "Problem with ConvertToWindowsOutputPath " + << "\"L://Local Mojo/Hex Power Pack/Iffy Voodoo\"" << std::endl; + res = false; + } + + if (kwsys::SystemTools::ConvertToWindowsOutputPath( + "//grayson/Local Mojo/Hex Power Pack/Iffy Voodoo") != + "\"\\\\grayson\\Local Mojo\\Hex Power Pack\\Iffy Voodoo\"") { + std::cerr << "Problem with ConvertToWindowsOutputPath " + << "\"//grayson/Local Mojo/Hex Power Pack/Iffy Voodoo\"" + << std::endl; + res = false; + } + + if (kwsys::SystemTools::ConvertToUnixOutputPath( + "//Local Mojo/Hex Power Pack/Iffy Voodoo") != + "//Local\\ Mojo/Hex\\ Power\\ Pack/Iffy\\ Voodoo") { + std::cerr << "Problem with ConvertToUnixOutputPath " + << "\"//Local Mojo/Hex Power Pack/Iffy Voodoo\"" << std::endl; + res = false; + } + + return res; +} + +static bool CheckPutEnv(const std::string& env, const char* name, + const char* value) +{ + if (!kwsys::SystemTools::PutEnv(env)) { + std::cerr << "PutEnv(\"" << env << "\") failed!" << std::endl; + return false; + } + std::string v = "(null)"; + kwsys::SystemTools::GetEnv(name, v); + if (v != value) { + std::cerr << "GetEnv(\"" << name << "\") returned \"" << v << "\", not \"" + << value << "\"!" << std::endl; + return false; + } + return true; +} + +static bool CheckUnPutEnv(const char* env, const char* name) +{ + if (!kwsys::SystemTools::UnPutEnv(env)) { + std::cerr << "UnPutEnv(\"" << env << "\") failed!" << std::endl; + return false; + } + std::string v; + if (kwsys::SystemTools::GetEnv(name, v)) { + std::cerr << "GetEnv(\"" << name << "\") returned \"" << v + << "\", not (null)!" << std::endl; + return false; + } + return true; +} + +static bool CheckEnvironmentOperations() +{ + bool res = true; + res &= CheckPutEnv("A=B", "A", "B"); + res &= CheckPutEnv("B=C", "B", "C"); + res &= CheckPutEnv("C=D", "C", "D"); + res &= CheckPutEnv("D=E", "D", "E"); + res &= CheckUnPutEnv("A", "A"); + res &= CheckUnPutEnv("B=", "B"); + res &= CheckUnPutEnv("C=D", "C"); + /* Leave "D=E" in environment so a memory checker can test for leaks. */ + return res; +} + +static bool CheckRelativePath(const std::string& local, + const std::string& remote, + const std::string& expected) +{ + std::string result = kwsys::SystemTools::RelativePath(local, remote); + if (!kwsys::SystemTools::ComparePath(expected, result)) { + std::cerr << "RelativePath(" << local << ", " << remote << ") yielded " + << result << " instead of " << expected << std::endl; + return false; + } + return true; +} + +static bool CheckRelativePaths() +{ + bool res = true; + res &= CheckRelativePath("/usr/share", "/bin/bash", "../../bin/bash"); + res &= CheckRelativePath("/usr/./share/", "/bin/bash", "../../bin/bash"); + res &= CheckRelativePath("/usr//share/", "/bin/bash", "../../bin/bash"); + res &= + CheckRelativePath("/usr/share/../bin/", "/bin/bash", "../../bin/bash"); + res &= CheckRelativePath("/usr/share", "/usr/share//bin", "bin"); + return res; +} + +static bool CheckCollapsePath(const std::string& path, + const std::string& expected, + const char* base = nullptr) +{ + std::string result = kwsys::SystemTools::CollapseFullPath(path, base); + if (!kwsys::SystemTools::ComparePath(expected, result)) { + std::cerr << "CollapseFullPath(" << path << ") yielded " << result + << " instead of " << expected << std::endl; + return false; + } + return true; +} + +static bool CheckCollapsePath() +{ + bool res = true; + res &= CheckCollapsePath("/usr/share/*", "/usr/share/*"); + res &= CheckCollapsePath("C:/Windows/*", "C:/Windows/*"); + res &= CheckCollapsePath("/usr/share/../lib", "/usr/lib"); + res &= CheckCollapsePath("/usr/share/./lib", "/usr/share/lib"); + res &= CheckCollapsePath("/usr/share/../../lib", "/lib"); + res &= CheckCollapsePath("/usr/share/.././../lib", "/lib"); + res &= CheckCollapsePath("/../lib", "/lib"); + res &= CheckCollapsePath("/../lib/", "/lib"); + res &= CheckCollapsePath("/", "/"); + res &= CheckCollapsePath("C:/", "C:/"); + res &= CheckCollapsePath("C:/../", "C:/"); + res &= CheckCollapsePath("C:/../../", "C:/"); + res &= CheckCollapsePath("../b", "../../b", "../"); + res &= CheckCollapsePath("../a/../b", "../b", "../rel"); + res &= CheckCollapsePath("a/../b", "../rel/b", "../rel"); + return res; +} + +static std::string StringVectorToString(const std::vector& vec) +{ + std::stringstream ss; + ss << "vector("; + for (std::vector::const_iterator i = vec.begin(); + i != vec.end(); ++i) { + if (i != vec.begin()) { + ss << ", "; + } + ss << *i; + } + ss << ")"; + return ss.str(); +} + +static bool CheckGetPath() +{ + const char* envName = "S"; +#ifdef _WIN32 + const char* envValue = "C:\\Somewhere\\something;D:\\Temp"; +#else + const char* envValue = "/Somewhere/something:/tmp"; +#endif + const char* registryPath = "[HKEY_LOCAL_MACHINE\\SOFTWARE\\MyApp; MyKey]"; + + std::vector originalPaths; + originalPaths.push_back(registryPath); + + std::vector expectedPaths; + expectedPaths.push_back(registryPath); +#ifdef _WIN32 + expectedPaths.push_back("C:/Somewhere/something"); + expectedPaths.push_back("D:/Temp"); +#else + expectedPaths.push_back("/Somewhere/something"); + expectedPaths.push_back("/tmp"); +#endif + + bool res = true; + res &= CheckPutEnv(std::string(envName) + "=" + envValue, envName, envValue); + + std::vector paths = originalPaths; + kwsys::SystemTools::GetPath(paths, envName); + + if (paths != expectedPaths) { + std::cerr << "GetPath(" << StringVectorToString(originalPaths) << ", " + << envName << ") yielded " << StringVectorToString(paths) + << " instead of " << StringVectorToString(expectedPaths) + << std::endl; + res = false; + } + + res &= CheckUnPutEnv(envName, envName); + return res; +} + +static bool CheckGetFilenameName() +{ + const char* windowsFilepath = "C:\\somewhere\\something"; + const char* unixFilepath = "/somewhere/something"; + +#if defined(_WIN32) || defined(KWSYS_SYSTEMTOOLS_SUPPORT_WINDOWS_SLASHES) + std::string expectedWindowsFilename = "something"; +#else + std::string expectedWindowsFilename = "C:\\somewhere\\something"; +#endif + std::string expectedUnixFilename = "something"; + + bool res = true; + std::string filename = kwsys::SystemTools::GetFilenameName(windowsFilepath); + if (filename != expectedWindowsFilename) { + std::cerr << "GetFilenameName(" << windowsFilepath << ") yielded " + << filename << " instead of " << expectedWindowsFilename + << std::endl; + res = false; + } + + filename = kwsys::SystemTools::GetFilenameName(unixFilepath); + if (filename != expectedUnixFilename) { + std::cerr << "GetFilenameName(" << unixFilepath << ") yielded " << filename + << " instead of " << expectedUnixFilename << std::endl; + res = false; + } + return res; +} + +static bool CheckFind() +{ + bool res = true; + const std::string testFindFileName("testFindFile.txt"); + const std::string testFindFile(TEST_SYSTEMTOOLS_BINARY_DIR "/" + + testFindFileName); + + if (!kwsys::SystemTools::Touch(testFindFile, true)) { + std::cerr << "Problem with Touch for: " << testFindFile << std::endl; + // abort here as the existence of the file only makes the test meaningful + return false; + } + + std::vector searchPaths; + searchPaths.push_back(TEST_SYSTEMTOOLS_BINARY_DIR); + if (kwsys::SystemTools::FindFile(testFindFileName, searchPaths, true) + .empty()) { + std::cerr << "Problem with FindFile without system paths for: " + << testFindFileName << std::endl; + res = false; + } + if (kwsys::SystemTools::FindFile(testFindFileName, searchPaths, false) + .empty()) { + std::cerr << "Problem with FindFile with system paths for: " + << testFindFileName << std::endl; + res = false; + } + + return res; +} + +static bool CheckIsSubDirectory() +{ + bool res = true; + + if (kwsys::SystemTools::IsSubDirectory("/foo", "/") == false) { + std::cerr << "Problem with IsSubDirectory (root - unix): " << std::endl; + res = false; + } + if (kwsys::SystemTools::IsSubDirectory("c:/foo", "c:/") == false) { + std::cerr << "Problem with IsSubDirectory (root - dos): " << std::endl; + res = false; + } + if (kwsys::SystemTools::IsSubDirectory("/foo/bar", "/foo") == false) { + std::cerr << "Problem with IsSubDirectory (deep): " << std::endl; + res = false; + } + if (kwsys::SystemTools::IsSubDirectory("/foo", "/foo") == true) { + std::cerr << "Problem with IsSubDirectory (identity): " << std::endl; + res = false; + } + if (kwsys::SystemTools::IsSubDirectory("/fooo", "/foo") == true) { + std::cerr << "Problem with IsSubDirectory (substring): " << std::endl; + res = false; + } + if (kwsys::SystemTools::IsSubDirectory("/foo/", "/foo") == true) { + std::cerr << "Problem with IsSubDirectory (prepended slash): " + << std::endl; + res = false; + } + + return res; +} + +static bool CheckGetLineFromStream() +{ + const std::string fileWithFiveCharsOnFirstLine(TEST_SYSTEMTOOLS_SOURCE_DIR + "/README.rst"); + + kwsys::ifstream file(fileWithFiveCharsOnFirstLine.c_str(), std::ios::in); + + if (!file) { + std::cerr << "Problem opening: " << fileWithFiveCharsOnFirstLine + << std::endl; + return false; + } + + std::string line; + bool has_newline = false; + bool result; + + file.seekg(0, std::ios::beg); + result = kwsys::SystemTools::GetLineFromStream(file, line, &has_newline, -1); + if (!result || line.size() != 5) { + std::cerr << "First line does not have five characters: " << line.size() + << std::endl; + return false; + } + + file.seekg(0, std::ios::beg); + result = kwsys::SystemTools::GetLineFromStream(file, line, &has_newline, -1); + if (!result || line.size() != 5) { + std::cerr << "First line does not have five characters after rewind: " + << line.size() << std::endl; + return false; + } + + bool ret = true; + + for (size_t size = 1; size <= 5; ++size) { + file.seekg(0, std::ios::beg); + result = kwsys::SystemTools::GetLineFromStream(file, line, &has_newline, + static_cast(size)); + if (!result || line.size() != size) { + std::cerr << "Should have read " << size << " characters but got " + << line.size() << std::endl; + ret = false; + } + } + + return ret; +} + +static bool CheckGetLineFromStreamLongLine() +{ + const std::string fileWithLongLine("longlines.txt"); + std::string firstLine, secondLine; + // First line: large buffer, containing a carriage return for some reason. + firstLine.assign(2050, ' '); + firstLine += "\rfirst"; + secondLine.assign(2050, 'y'); + secondLine += "second"; + + // Create file with long lines. + { + kwsys::ofstream out(fileWithLongLine.c_str(), std::ios::binary); + if (!out) { + std::cerr << "Problem opening for write: " << fileWithLongLine + << std::endl; + return false; + } + out << firstLine << "\r\n\n" << secondLine << "\n"; + } + + kwsys::ifstream file(fileWithLongLine.c_str(), std::ios::binary); + if (!file) { + std::cerr << "Problem opening: " << fileWithLongLine << std::endl; + return false; + } + + std::string line; + bool has_newline = false; + bool result; + + // Read first line. + result = kwsys::SystemTools::GetLineFromStream(file, line, &has_newline, -1); + if (!result || line != firstLine) { + std::cerr << "First line does not match, expected " << firstLine.size() + << " characters, got " << line.size() << std::endl; + return false; + } + if (!has_newline) { + std::cerr << "Expected new line to be read from first line" << std::endl; + return false; + } + + // Read empty line. + has_newline = false; + result = kwsys::SystemTools::GetLineFromStream(file, line, &has_newline, -1); + if (!result || !line.empty()) { + std::cerr << "Expected successful read with an empty line, got " + << line.size() << " characters" << std::endl; + return false; + } + if (!has_newline) { + std::cerr << "Expected new line to be read for an empty line" << std::endl; + return false; + } + + // Read second line. + has_newline = false; + result = kwsys::SystemTools::GetLineFromStream(file, line, &has_newline, -1); + if (!result || line != secondLine) { + std::cerr << "Second line does not match, expected " << secondLine.size() + << " characters, got " << line.size() << std::endl; + return false; + } + if (!has_newline) { + std::cerr << "Expected new line to be read from second line" << std::endl; + return false; + } + + return true; +} + +static bool writeFile(const char* fileName, const char* data) +{ + kwsys::ofstream out(fileName, std::ios::binary); + out << data; + if (!out) { + std::cerr << "Failed to write file: " << fileName << std::endl; + return false; + } + return true; +} + +static std::string readFile(const char* fileName) +{ + kwsys::ifstream in(fileName, std::ios::binary); + std::stringstream sstr; + sstr << in.rdbuf(); + std::string data = sstr.str(); + if (!in) { + std::cerr << "Failed to read file: " << fileName << std::endl; + return std::string(); + } + return data; +} + +struct +{ + const char* a; + const char* b; + bool differ; +} diff_test_cases[] = { { "one", "one", false }, + { "one", "two", true }, + { "", "", false }, + { "\n", "\r\n", false }, + { "one\n", "one\n", false }, + { "one\r\n", "one\n", false }, + { "one\n", "one", false }, + { "one\ntwo", "one\ntwo", false }, + { "one\ntwo", "one\r\ntwo", false } }; + +static bool CheckTextFilesDiffer() +{ + const int num_test_cases = + sizeof(diff_test_cases) / sizeof(diff_test_cases[0]); + for (int i = 0; i < num_test_cases; ++i) { + if (!writeFile("file_a", diff_test_cases[i].a) || + !writeFile("file_b", diff_test_cases[i].b)) { + return false; + } + if (kwsys::SystemTools::TextFilesDiffer("file_a", "file_b") != + diff_test_cases[i].differ) { + std::cerr << "Incorrect TextFilesDiffer result for test case " << i + 1 + << "." << std::endl; + return false; + } + } + + return true; +} + +static bool CheckCopyFileIfDifferent() +{ + bool ret = true; + const int num_test_cases = + sizeof(diff_test_cases) / sizeof(diff_test_cases[0]); + for (int i = 0; i < num_test_cases; ++i) { + if (!writeFile("file_a", diff_test_cases[i].a) || + !writeFile("file_b", diff_test_cases[i].b)) { + return false; + } + const char* cptarget = + i < 4 ? TEST_SYSTEMTOOLS_BINARY_DIR "/file_b" : "file_b"; + if (!kwsys::SystemTools::CopyFileIfDifferent("file_a", cptarget)) { + std::cerr << "CopyFileIfDifferent() returned false for test case " + << i + 1 << "." << std::endl; + ret = false; + continue; + } + std::string bdata = readFile("file_b"); + if (diff_test_cases[i].a != bdata) { + std::cerr << "Incorrect CopyFileIfDifferent file contents in test case " + << i + 1 << "." << std::endl; + ret = false; + continue; + } + } + + return ret; +} + +int testSystemTools(int, char* []) +{ + bool res = true; + + int cc; + for (cc = 0; toUnixPaths[cc][0]; cc++) { + res &= CheckConvertToUnixSlashes(toUnixPaths[cc][0], toUnixPaths[cc][1]); + } + + // Special check for ~ + std::string output; + if (kwsys::SystemTools::GetEnv("HOME", output)) { + output += "/foo bar/lala"; + res &= CheckConvertToUnixSlashes("~/foo bar/lala", output); + } + + for (cc = 0; checkEscapeChars[cc][0]; cc++) { + res &= CheckEscapeChars(checkEscapeChars[cc][0], checkEscapeChars[cc][1], + *checkEscapeChars[cc][2], checkEscapeChars[cc][3]); + } + + res &= CheckFileOperations(); + + res &= CheckStringOperations(); + + res &= CheckEnvironmentOperations(); + + res &= CheckRelativePaths(); + + res &= CheckCollapsePath(); + + res &= CheckGetPath(); + + res &= CheckFind(); + + res &= CheckIsSubDirectory(); + + res &= CheckGetLineFromStream(); + + res &= CheckGetLineFromStreamLongLine(); + + res &= CheckGetFilenameName(); + + res &= CheckTextFilesDiffer(); + + res &= CheckCopyFileIfDifferent(); + + return res ? 0 : 1; +} diff --git a/test/API/driver/kwsys/testSystemTools.h.in b/test/API/driver/kwsys/testSystemTools.h.in new file mode 100644 index 00000000000..022e36e2f44 --- /dev/null +++ b/test/API/driver/kwsys/testSystemTools.h.in @@ -0,0 +1,12 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#ifndef @KWSYS_NAMESPACE@_testSystemtools_h +#define @KWSYS_NAMESPACE@_testSystemtools_h + +#define EXECUTABLE_OUTPUT_PATH "@CMAKE_CURRENT_BINARY_DIR@" + +#define TEST_SYSTEMTOOLS_SOURCE_DIR "@TEST_SYSTEMTOOLS_SOURCE_DIR@" +#define TEST_SYSTEMTOOLS_BINARY_DIR "@TEST_SYSTEMTOOLS_BINARY_DIR@" +#cmakedefine KWSYS_TEST_SYSTEMTOOLS_LONG_PATHS + +#endif diff --git a/test/API/driver/kwsys/testTerminal.c b/test/API/driver/kwsys/testTerminal.c new file mode 100644 index 00000000000..652830ccd91 --- /dev/null +++ b/test/API/driver/kwsys/testTerminal.c @@ -0,0 +1,22 @@ +/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying + file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ +#include "kwsysPrivate.h" +#include KWSYS_HEADER(Terminal.h) + +/* Work-around CMake dependency scanning limitation. This must + duplicate the above list of headers. */ +#if 0 +# include "Terminal.h.in" +#endif + +int testTerminal(int argc, char* argv[]) +{ + (void)argc; + (void)argv; + kwsysTerminal_cfprintf(kwsysTerminal_Color_ForegroundYellow | + kwsysTerminal_Color_BackgroundBlue | + kwsysTerminal_Color_AssumeTTY, + stdout, "Hello %s!", "World"); + fprintf(stdout, "\n"); + return 0; +} diff --git a/test/API/driver/kwsys/update-gitsetup.bash b/test/API/driver/kwsys/update-gitsetup.bash new file mode 100644 index 00000000000..aa83cb8079e --- /dev/null +++ b/test/API/driver/kwsys/update-gitsetup.bash @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +set -e +set -x +shopt -s dotglob + +readonly name="GitSetup" +readonly ownership="GitSetup Upstream " +readonly subtree="GitSetup" +readonly repo="https://gitlab.kitware.com/utils/gitsetup.git" +readonly tag="setup" +readonly shortlog=false +readonly paths=" +" + +extract_source () { + git_archive +} + +. "${BASH_SOURCE%/*}/update-third-party.bash" diff --git a/test/API/driver/kwsys/update-third-party.bash b/test/API/driver/kwsys/update-third-party.bash new file mode 100644 index 00000000000..3b8358e0114 --- /dev/null +++ b/test/API/driver/kwsys/update-third-party.bash @@ -0,0 +1,169 @@ +#============================================================================= +# Copyright 2015-2016 Kitware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#============================================================================= + +######################################################################## +# Script for updating third party packages. +# +# This script should be sourced in a project-specific script which sets +# the following variables: +# +# name +# The name of the project. +# ownership +# A git author name/email for the commits. +# subtree +# The location of the thirdparty package within the main source +# tree. +# repo +# The git repository to use as upstream. +# tag +# The tag, branch or commit hash to use for upstream. +# shortlog +# Optional. Set to 'true' to get a shortlog in the commit message. +# +# Additionally, an "extract_source" function must be defined. It will be +# run within the checkout of the project on the requested tag. It should +# should place the desired tree into $extractdir/$name-reduced. This +# directory will be used as the newest commit for the project. +# +# For convenience, the function may use the "git_archive" function which +# does a standard "git archive" extraction using the (optional) "paths" +# variable to only extract a subset of the source tree. +######################################################################## + +######################################################################## +# Utility functions +######################################################################## +git_archive () { + git archive --worktree-attributes --prefix="$name-reduced/" HEAD -- $paths | \ + tar -C "$extractdir" -x +} + +die () { + echo >&2 "$@" + exit 1 +} + +warn () { + echo >&2 "warning: $@" +} + +readonly regex_date='20[0-9][0-9]-[0-9][0-9]-[0-9][0-9]' +readonly basehash_regex="$name $regex_date ([0-9a-f]*)" +readonly basehash="$( git rev-list --author="$ownership" --grep="$basehash_regex" -n 1 HEAD )" +readonly upstream_old_short="$( git cat-file commit "$basehash" | sed -n '/'"$basehash_regex"'/ {s/.*(//;s/)//;p}' | egrep '^[0-9a-f]+$' )" + +######################################################################## +# Sanity checking +######################################################################## +[ -n "$name" ] || \ + die "'name' is empty" +[ -n "$ownership" ] || \ + die "'ownership' is empty" +[ -n "$subtree" ] || \ + die "'subtree' is empty" +[ -n "$repo" ] || \ + die "'repo' is empty" +[ -n "$tag" ] || \ + die "'tag' is empty" +[ -n "$basehash" ] || \ + warn "'basehash' is empty; performing initial import" +readonly do_shortlog="${shortlog-false}" + +readonly workdir="$PWD/work" +readonly upstreamdir="$workdir/upstream" +readonly extractdir="$workdir/extract" + +[ -d "$workdir" ] && \ + die "error: workdir '$workdir' already exists" + +trap "rm -rf '$workdir'" EXIT + +# Get upstream +git clone "$repo" "$upstreamdir" + +if [ -n "$basehash" ]; then + # Use the existing package's history + git worktree add "$extractdir" "$basehash" + # Clear out the working tree + pushd "$extractdir" + git ls-files | xargs rm -v + find . -type d -empty -delete + popd +else + # Create a repo to hold this package's history + mkdir -p "$extractdir" + git -C "$extractdir" init +fi + +# Extract the subset of upstream we care about +pushd "$upstreamdir" +git checkout "$tag" +readonly upstream_hash="$( git rev-parse HEAD )" +readonly upstream_hash_short="$( git rev-parse --short=8 "$upstream_hash" )" +readonly upstream_datetime="$( git rev-list "$upstream_hash" --format='%ci' -n 1 | grep -e "^$regex_date" )" +readonly upstream_date="$( echo "$upstream_datetime" | grep -o -e "$regex_date" )" +if $do_shortlog && [ -n "$basehash" ]; then + readonly commit_shortlog=" + +Upstream Shortlog +----------------- + +$( git shortlog --no-merges --abbrev=8 --format='%h %s' "$upstream_old_short".."$upstream_hash" )" +else + readonly commit_shortlog="" +fi +extract_source || \ + die "failed to extract source" +popd + +[ -d "$extractdir/$name-reduced" ] || \ + die "expected directory to extract does not exist" +readonly commit_summary="$name $upstream_date ($upstream_hash_short)" + +# Commit the subset +pushd "$extractdir" +mv -v "$name-reduced/"* . +rmdir "$name-reduced/" +git add -A . +git commit -n --author="$ownership" --date="$upstream_datetime" -F - <<-EOF +$commit_summary + +Code extracted from: + + $repo + +at commit $upstream_hash ($tag).$commit_shortlog +EOF +git branch -f "upstream-$name" +popd + +# Merge the subset into this repository +if [ -n "$basehash" ]; then + git merge --log -s recursive "-Xsubtree=$subtree/" --no-commit "upstream-$name" +else + unrelated_histories_flag="" + if git merge --help | grep -q -e allow-unrelated-histories; then + unrelated_histories_flag="--allow-unrelated-histories " + fi + readonly unrelated_histories_flag + + git fetch "$extractdir" "upstream-$name:upstream-$name" + git merge --log -s ours --no-commit $unrelated_histories_flag "upstream-$name" + git read-tree -u --prefix="$subtree/" "upstream-$name" +fi +git commit --no-edit +git branch -d "upstream-$name" diff --git a/test/API/tarray.c b/test/API/tarray.c new file mode 100644 index 00000000000..214a022aa0f --- /dev/null +++ b/test/API/tarray.c @@ -0,0 +1,2250 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*********************************************************** + * + * Test program: tarray + * + * Test the Array Datatype functionality + * + *************************************************************/ + +#include "testhdf5.h" +/* #include "H5srcdir.h" */ + +#define FILENAME "tarray1.h5" +#define TESTFILE "tarrold.h5" + +/* 1-D array datatype */ +#define ARRAY1_RANK 1 +#define ARRAY1_DIM1 4 + +/* 3-D array datatype */ +#define ARRAY2_RANK 3 +#define ARRAY2_DIM1 3 +#define ARRAY2_DIM2 4 +#define ARRAY2_DIM3 5 + +/* 2-D array datatype */ +#define ARRAY3_RANK 2 +#define ARRAY3_DIM1 6 +#define ARRAY3_DIM2 3 + +/* 1-D dataset with fixed dimensions */ +#define SPACE1_RANK 1 +#define SPACE1_DIM1 4 + +/* Parameters used with the test_array_bkg() test */ +#define FIELDNAME "ArrayofStructures" +#define LENGTH 5 +#define ALEN 10 +#define RANK 1 +#define NMAX 100 + +/* Struct used with test_array_bkg() test */ +typedef struct { + int nsubfields; + char *name[NMAX]; + size_t offset[NMAX]; + hid_t datatype[NMAX]; + +} CmpDTSinfo; + +/* Forward declarations for custom vlen memory manager functions */ +void *test_array_alloc_custom(size_t size, void *info); +void test_array_free_custom(void *mem, void *info); + +/*------------------------------------------------------------------------- + * Function: test_array_atomic_1d + * + * Purpose: Test basic array datatype code. + * Tests 1-D array of atomic datatypes. + * + * Return: void + * + *------------------------------------------------------------------------- + */ +static void +test_array_atomic_1d(void) +{ + int wdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information to write */ + int rdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information read in */ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1; /* Dataspace ID */ + hid_t tid1; /* Datatype ID */ + hsize_t sdims1[] = {SPACE1_DIM1}; + hsize_t tdims1[] = {ARRAY1_DIM1}; + int ndims; /* Array rank for reading */ + hsize_t rdims1[H5S_MAX_RANK]; /* Array dimensions for reading */ + int i, j; /* counting variables */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing 1-D Array of Atomic Datatypes Functionality\n")); + + /* Allocate and initialize array data to write */ + for (i = 0; i < SPACE1_DIM1; i++) + for (j = 0; j < ARRAY1_DIM1; j++) + wdata[i][j] = i * 10 + j; + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid1 = H5Screate_simple(SPACE1_RANK, sdims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create a datatype to refer to */ + tid1 = H5Tarray_create2(H5T_NATIVE_INT, ARRAY1_RANK, tdims1); + CHECK(tid1, FAIL, "H5Tarray_create2"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write dataset to disk */ + ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file */ + fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Get the datatype */ + tid1 = H5Dget_type(dataset); + CHECK(tid1, FAIL, "H5Dget_type"); + + /* Check the array rank */ + ndims = H5Tget_array_ndims(tid1); + VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims"); + + /* Get the array dimensions */ + ret = H5Tget_array_dims2(tid1, rdims1); + CHECK(ret, FAIL, "H5Tget_array_dims2"); + + /* Check the array dimensions */ + for (i = 0; i < ndims; i++) + if (rdims1[i] != tdims1[i]) { + TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE + ", tdims1[%d]=%" PRIuHSIZE "\n", + i, rdims1[i], i, tdims1[i]); + continue; + } /* end if */ + + /* Read dataset from disk */ + ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < SPACE1_DIM1; i++) + for (j = 0; j < ARRAY1_DIM1; j++) + if (wdata[i][j] != rdata[i][j]) { + TestErrPrintf("Array data information doesn't match!, wdata[%d][%d]=%d, rdata[%d][%d]=%d\n", + (int)i, (int)j, (int)wdata[i][j], (int)i, (int)j, (int)rdata[i][j]); + continue; + } /* end if */ + + /* Close Datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_array_atomic_1d() */ + +/*------------------------------------------------------------------------- + * Function: test_array_funcs + * + * Purpose: Test some type functions that are and aren't supposed to + * work with array type. + * + * Return: void + * + *------------------------------------------------------------------------- + */ +static void +test_array_funcs(void) +{ + hid_t type; /* Datatype ID */ + hsize_t tdims1[] = {ARRAY1_DIM1}; + size_t size; + H5T_pad_t inpad; + H5T_norm_t norm; + H5T_cset_t cset; + H5T_str_t strpad; + herr_t ret; /* Generic return value */ + + /* Create a datatype to refer to */ + type = H5Tarray_create2(H5T_IEEE_F32BE, ARRAY1_RANK, tdims1); + CHECK(type, FAIL, "H5Tarray_create2"); + + size = H5Tget_precision(type); + CHECK(size, 0, "H5Tget_precision"); + + size = H5Tget_size(type); + CHECK(size, 0, "H5Tget_size"); + + size = H5Tget_ebias(type); + CHECK(size, 0, "H5Tget_ebias"); + + ret = H5Tset_pad(type, H5T_PAD_ZERO, H5T_PAD_ONE); + CHECK(ret, FAIL, "H5Tset_pad"); + + inpad = H5Tget_inpad(type); + CHECK(inpad, FAIL, "H5Tget_inpad"); + + norm = H5Tget_norm(type); + CHECK(norm, FAIL, "H5Tget_norm"); + + ret = H5Tset_offset(type, (size_t)16); + CHECK(ret, FAIL, "H5Tset_offset"); + + H5E_BEGIN_TRY + { + cset = H5Tget_cset(type); + } + H5E_END_TRY; + VERIFY(cset, FAIL, "H5Tget_cset"); + + H5E_BEGIN_TRY + { + strpad = H5Tget_strpad(type); + } + H5E_END_TRY; + VERIFY(strpad, FAIL, "H5Tget_strpad"); + + /* Close datatype */ + ret = H5Tclose(type); + CHECK(ret, FAIL, "H5Tclose"); +} /* end test_array_funcs() */ + +/*------------------------------------------------------------------------- + * Function: test_array_atomic_3d + * + * Purpose: Test basic array datatype code. + * Tests 3-D array of atomic datatypes. + * + * Return: void + * + *------------------------------------------------------------------------- + */ +static void +test_array_atomic_3d(void) +{ + int wdata[SPACE1_DIM1][ARRAY2_DIM1][ARRAY2_DIM2][ARRAY2_DIM3]; /* Information to write */ + int rdata[SPACE1_DIM1][ARRAY2_DIM1][ARRAY2_DIM2][ARRAY2_DIM3]; /* Information read in */ + hid_t fid; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t tid; /* Datatype ID */ + hsize_t sdims1[] = {SPACE1_DIM1}; + hsize_t tdims2[] = {ARRAY2_DIM1, ARRAY2_DIM2, ARRAY2_DIM3}; + int ndims; /* Array rank for reading */ + hsize_t rdims2[H5S_MAX_RANK]; /* Array dimensions for reading */ + int i, j, k, l; /* counting variables */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing 3-D Array of Atomic Datatypes Functionality\n")); + + /* Allocate and initialize array data to write */ + for (i = 0; i < SPACE1_DIM1; i++) + for (j = 0; j < ARRAY2_DIM1; j++) + for (k = 0; k < ARRAY2_DIM2; k++) + for (l = 0; l < ARRAY2_DIM3; l++) + wdata[i][j][k][l] = i * 1000 + j * 100 + k * 10 + l; + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid = H5Screate_simple(SPACE1_RANK, sdims1, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Create a datatype to refer to */ + tid = H5Tarray_create2(H5T_NATIVE_INT, ARRAY2_RANK, tdims2); + CHECK(tid, FAIL, "H5Tarray_create2"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid, "Dataset1", tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write dataset to disk */ + ret = H5Dwrite(dataset, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatype */ + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid, "Dataset1", H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Get the datatype */ + tid = H5Dget_type(dataset); + CHECK(tid, FAIL, "H5Dget_type"); + + /* Check the array rank */ + ndims = H5Tget_array_ndims(tid); + VERIFY(ndims, ARRAY2_RANK, "H5Tget_array_ndims"); + + /* Get the array dimensions */ + ret = H5Tget_array_dims2(tid, rdims2); + CHECK(ret, FAIL, "H5Tget_array_dims2"); + + /* Check the array dimensions */ + for (i = 0; i < ndims; i++) + if (rdims2[i] != tdims2[i]) { + TestErrPrintf("Array dimension information doesn't match!, rdims2[%d]=%d, tdims2[%d]=%d\n", + (int)i, (int)rdims2[i], (int)i, (int)tdims2[i]); + continue; + } /* end if */ + + /* Read dataset from disk */ + ret = H5Dread(dataset, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < SPACE1_DIM1; i++) + for (j = 0; j < ARRAY2_DIM1; j++) + for (k = 0; k < ARRAY2_DIM2; k++) + for (l = 0; l < ARRAY2_DIM3; l++) + if (wdata[i][j][k][l] != rdata[i][j][k][l]) { + TestErrPrintf("Array data information doesn't match!, wdata[%d][%d][%d][%d]=%d, " + "rdata[%d][%d][%d][%d]=%d\n", + (int)i, (int)j, (int)k, (int)l, (int)wdata[i][j][k][l], (int)i, (int)j, + (int)k, (int)l, (int)rdata[i][j][k][l]); + continue; + } /* end if */ + + /* Close Datatype */ + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + +} /* end test_array_atomic_3d() */ + +/*------------------------------------------------------------------------- + * Function: test_array_array_atomic + * + * Purpose: Test basic array datatype code. + * Tests 1-D array 2-D arrays of atomic datatypes. + * + * Return: void + * + *------------------------------------------------------------------------- + */ +static void +test_array_array_atomic(void) +{ + int wdata[SPACE1_DIM1][ARRAY1_DIM1][ARRAY3_DIM1][ARRAY3_DIM2]; /* Information to write */ + int rdata[SPACE1_DIM1][ARRAY1_DIM1][ARRAY3_DIM1][ARRAY3_DIM2]; /* Information read in */ + hid_t fid; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t tid1; /* 1-D array Datatype ID */ + hid_t tid2; /* 2-D array Datatype ID */ + hsize_t sdims1[] = {SPACE1_DIM1}; + hsize_t tdims1[] = {ARRAY1_DIM1}; + hsize_t tdims2[] = {ARRAY3_DIM1, ARRAY3_DIM2}; + int ndims1; /* Array rank for reading */ + int ndims2; /* Array rank for reading */ + hsize_t rdims1[H5S_MAX_RANK]; /* Array dimensions for reading */ + hsize_t rdims2[H5S_MAX_RANK]; /* Array dimensions for reading */ + int i, j, k, l; /* counting variables */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing 1-D Array 2-D Arrays of Atomic Datatypes Functionality\n")); + + /* Allocate and initialize array data to write */ + for (i = 0; i < SPACE1_DIM1; i++) + for (j = 0; j < ARRAY1_DIM1; j++) + for (k = 0; k < ARRAY3_DIM1; k++) + for (l = 0; l < ARRAY3_DIM2; l++) + wdata[i][j][k][l] = i * 1000 + j * 100 + k * 10 + l; + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid = H5Screate_simple(SPACE1_RANK, sdims1, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Create a 2-D datatype to refer to */ + tid2 = H5Tarray_create2(H5T_NATIVE_INT, ARRAY3_RANK, tdims2); + CHECK(tid2, FAIL, "H5Tarray_create2"); + + /* Create a 1-D datatype to refer to */ + tid1 = H5Tarray_create2(tid2, ARRAY1_RANK, tdims1); + CHECK(tid1, FAIL, "H5Tarray_create2"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid, "Dataset1", tid1, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write dataset to disk */ + ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatypes */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid, "Dataset1", H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Get the 1-D datatype */ + tid1 = H5Dget_type(dataset); + CHECK(tid1, FAIL, "H5Dget_type"); + + /* Check the 1-D array rank */ + ndims1 = H5Tget_array_ndims(tid1); + VERIFY(ndims1, ARRAY1_RANK, "H5Tget_array_ndims"); + + /* Get the 1-D array dimensions */ + ret = H5Tget_array_dims2(tid1, rdims1); + CHECK(ret, FAIL, "H5Tget_array_dims2"); + + /* Check the array dimensions */ + for (i = 0; i < ndims1; i++) + if (rdims1[i] != tdims1[i]) { + TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE + ", tdims1[%d]=%" PRIuHSIZE "\n", + i, rdims1[i], i, tdims1[i]); + continue; + } /* end if */ + + /* Get the 2-D datatype */ + tid2 = H5Tget_super(tid1); + CHECK(tid2, FAIL, "H5Tget_super"); + + /* Check the 2-D array rank */ + ndims2 = H5Tget_array_ndims(tid2); + VERIFY(ndims2, ARRAY3_RANK, "H5Tget_array_ndims"); + + /* Get the 2-D array dimensions */ + ret = H5Tget_array_dims2(tid2, rdims2); + CHECK(ret, FAIL, "H5Tget_array_dims2"); + + /* Check the array dimensions */ + for (i = 0; i < ndims2; i++) + if (rdims2[i] != tdims2[i]) { + TestErrPrintf("Array dimension information doesn't match!, rdims2[%d]=%d, tdims2[%d]=%d\n", + (int)i, (int)rdims2[i], (int)i, (int)tdims2[i]); + continue; + } /* end if */ + + /* Read dataset from disk */ + ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < SPACE1_DIM1; i++) + for (j = 0; j < ARRAY1_DIM1; j++) + for (k = 0; k < ARRAY3_DIM1; k++) + for (l = 0; l < ARRAY3_DIM2; l++) + if (wdata[i][j][k][l] != rdata[i][j][k][l]) { + TestErrPrintf("Array data information doesn't match!, wdata[%d][%d][%d][%d]=%d, " + "rdata[%d][%d][%d][%d]=%d\n", + (int)i, (int)j, (int)k, (int)l, (int)wdata[i][j][k][l], (int)i, (int)j, + (int)k, (int)l, (int)rdata[i][j][k][l]); + continue; + } /* end if */ + + /* Close Datatypes */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_array_array_atomic() */ + +/*------------------------------------------------------------------------- + * Function: test_array_compound_atomic + * + * Purpose: Test basic array datatype code. + * Tests 1-D array of compound datatypes (with no array fields). + * + * Return: void + * + *------------------------------------------------------------------------- + */ +static void +test_array_compound_atomic(void) +{ + typedef struct { /* Typedef for compound datatype */ + int i; + float f; + } s1_t; + + s1_t wdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information to write */ + s1_t rdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information read in */ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1; /* Dataspace ID */ + hid_t tid1; /* Array Datatype ID */ + hid_t tid2; /* Compound Datatype ID */ + hsize_t sdims1[] = {SPACE1_DIM1}; + hsize_t tdims1[] = {ARRAY1_DIM1}; + int ndims; /* Array rank for reading */ + hsize_t rdims1[H5S_MAX_RANK]; /* Array dimensions for reading */ + int nmemb; /* Number of compound members */ + char *mname; /* Name of compound field */ + size_t off; /* Offset of compound field */ + hid_t mtid; /* Datatype ID for field */ + int i, j; /* counting variables */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing 1-D Array of Compound Atomic Datatypes Functionality\n")); + + /* Initialize array data to write */ + for (i = 0; i < SPACE1_DIM1; i++) + for (j = 0; j < ARRAY1_DIM1; j++) { + wdata[i][j].i = i * 10 + j; + wdata[i][j].f = (float)i * 2.5F + (float)j; + } /* end for */ + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid1 = H5Screate_simple(SPACE1_RANK, sdims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create a compound datatype to refer to */ + tid2 = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); + CHECK(tid2, FAIL, "H5Tcreate"); + + /* Insert integer field */ + ret = H5Tinsert(tid2, "i", HOFFSET(s1_t, i), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Insert float field */ + ret = H5Tinsert(tid2, "f", HOFFSET(s1_t, f), H5T_NATIVE_FLOAT); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Create an array datatype to refer to */ + tid1 = H5Tarray_create2(tid2, ARRAY1_RANK, tdims1); + CHECK(tid1, FAIL, "H5Tarray_create2"); + + /* Close compound datatype */ + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write dataset to disk */ + ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file */ + fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Get the datatype */ + tid1 = H5Dget_type(dataset); + CHECK(tid1, FAIL, "H5Dget_type"); + + /* Check the array rank */ + ndims = H5Tget_array_ndims(tid1); + VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims"); + + /* Get the array dimensions */ + ret = H5Tget_array_dims2(tid1, rdims1); + CHECK(ret, FAIL, "H5Tget_array_dims2"); + + /* Check the array dimensions */ + for (i = 0; i < ndims; i++) + if (rdims1[i] != tdims1[i]) { + TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE + ", tdims1[%d]=%" PRIuHSIZE "\n", + i, rdims1[i], i, tdims1[i]); + continue; + } /* end if */ + + /* Get the compound datatype */ + tid2 = H5Tget_super(tid1); + CHECK(tid2, FAIL, "H5Tget_super"); + + /* Check the number of members */ + nmemb = H5Tget_nmembers(tid2); + VERIFY(nmemb, 2, "H5Tget_nmembers"); + + /* Check the 1st field's name */ + mname = H5Tget_member_name(tid2, 0); + CHECK_PTR(mname, "H5Tget_member_name"); + if (HDstrcmp(mname, "i") != 0) + TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname); + H5free_memory(mname); + + /* Check the 1st field's offset */ + off = H5Tget_member_offset(tid2, 0); + VERIFY(off, HOFFSET(s1_t, i), "H5Tget_member_offset"); + + /* Check the 1st field's datatype */ + mtid = H5Tget_member_type(tid2, 0); + CHECK(mtid, FAIL, "H5Tget_member_type"); + if ((ret = H5Tequal(mtid, H5T_NATIVE_INT)) <= 0) + TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret); + ret = H5Tclose(mtid); + CHECK(mtid, FAIL, "H5Tclose"); + + /* Check the 2nd field's name */ + mname = H5Tget_member_name(tid2, 1); + CHECK_PTR(mname, "H5Tget_member_name"); + if (HDstrcmp(mname, "f") != 0) + TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname); + H5free_memory(mname); + + /* Check the 2nd field's offset */ + off = H5Tget_member_offset(tid2, 1); + VERIFY(off, HOFFSET(s1_t, f), "H5Tget_member_offset"); + + /* Check the 2nd field's datatype */ + mtid = H5Tget_member_type(tid2, 1); + CHECK(mtid, FAIL, "H5Tget_member_type"); + if ((ret = H5Tequal(mtid, H5T_NATIVE_FLOAT)) <= 0) + TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret); + ret = H5Tclose(mtid); + CHECK(mtid, FAIL, "H5Tclose"); + + /* Close Compound Datatype */ + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* Read dataset from disk */ + ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < SPACE1_DIM1; i++) + for (j = 0; j < ARRAY1_DIM1; j++) { + if (wdata[i][j].i != rdata[i][j].i) { + TestErrPrintf( + "Array data information doesn't match!, wdata[%d][%d].i=%d, rdata[%d][%d].i=%d\n", (int)i, + (int)j, (int)wdata[i][j].i, (int)i, (int)j, (int)rdata[i][j].i); + continue; + } /* end if */ + if (!H5_FLT_ABS_EQUAL(wdata[i][j].f, rdata[i][j].f)) { + TestErrPrintf( + "Array data information doesn't match!, wdata[%d][%d].f=%f, rdata[%d][%d].f=%f\n", (int)i, + (int)j, (double)wdata[i][j].f, (int)i, (int)j, (double)rdata[i][j].f); + continue; + } /* end if */ + } /* end for */ + + /* Close Datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_array_compound_atomic() */ + +/*------------------------------------------------------------------------- + * Function: test_array_compound_array + * + * Purpose: Test basic array datatype code. + * Tests 1-D array of compound datatypes (with array fields). + * + * Return: void + * + *------------------------------------------------------------------------- + */ +static void +test_array_compound_array(void) +{ + typedef struct { /* Typedef for compound datatype */ + int i; + float f[ARRAY1_DIM1]; + } s1_t; + + s1_t wdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information to write */ + s1_t rdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information read in */ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1; /* Dataspace ID */ + hid_t tid1; /* Array Datatype ID */ + hid_t tid2; /* Compound Datatype ID */ + hid_t tid3; /* Nested Array Datatype ID */ + hsize_t sdims1[] = {SPACE1_DIM1}; + hsize_t tdims1[] = {ARRAY1_DIM1}; + int ndims; /* Array rank for reading */ + hsize_t rdims1[H5S_MAX_RANK]; /* Array dimensions for reading */ + int nmemb; /* Number of compound members */ + char *mname; /* Name of compound field */ + size_t off; /* Offset of compound field */ + hid_t mtid; /* Datatype ID for field */ + H5T_class_t mclass; /* Datatype class for field */ + int i, j, k; /* counting variables */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing 1-D Array of Compound Array Datatypes Functionality\n")); + + /* Initialize array data to write */ + for (i = 0; i < SPACE1_DIM1; i++) + for (j = 0; j < ARRAY1_DIM1; j++) { + wdata[i][j].i = i * 10 + j; + for (k = 0; k < ARRAY1_DIM1; k++) + wdata[i][j].f[k] = (float)i * 10.0F + (float)j * 2.5F + (float)k; + } /* end for */ + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid1 = H5Screate_simple(SPACE1_RANK, sdims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create a compound datatype to refer to */ + tid2 = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); + CHECK(tid2, FAIL, "H5Tcreate"); + + /* Insert integer field */ + ret = H5Tinsert(tid2, "i", HOFFSET(s1_t, i), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Create an array of floats datatype */ + tid3 = H5Tarray_create2(H5T_NATIVE_FLOAT, ARRAY1_RANK, tdims1); + CHECK(tid3, FAIL, "H5Tarray_create2"); + + /* Insert float array field */ + ret = H5Tinsert(tid2, "f", HOFFSET(s1_t, f), tid3); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Close array of floats field datatype */ + ret = H5Tclose(tid3); + CHECK(ret, FAIL, "H5Tclose"); + + /* Create an array datatype to refer to */ + tid1 = H5Tarray_create2(tid2, ARRAY1_RANK, tdims1); + CHECK(tid1, FAIL, "H5Tarray_create2"); + + /* Close compound datatype */ + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write dataset to disk */ + ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file */ + fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Get the datatype */ + tid1 = H5Dget_type(dataset); + CHECK(tid1, FAIL, "H5Dget_type"); + + /* Check the array rank */ + ndims = H5Tget_array_ndims(tid1); + VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims"); + + /* Get the array dimensions */ + ret = H5Tget_array_dims2(tid1, rdims1); + CHECK(ret, FAIL, "H5Tget_array_dims2"); + + /* Check the array dimensions */ + for (i = 0; i < ndims; i++) + if (rdims1[i] != tdims1[i]) { + TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE + ", tdims1[%d]=%" PRIuHSIZE "\n", + i, rdims1[i], i, tdims1[i]); + continue; + } /* end if */ + + /* Get the compound datatype */ + tid2 = H5Tget_super(tid1); + CHECK(tid2, FAIL, "H5Tget_super"); + + /* Check the number of members */ + nmemb = H5Tget_nmembers(tid2); + VERIFY(nmemb, 2, "H5Tget_nmembers"); + + /* Check the 1st field's name */ + mname = H5Tget_member_name(tid2, 0); + CHECK_PTR(mname, "H5Tget_member_name"); + if (HDstrcmp(mname, "i") != 0) + TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname); + H5free_memory(mname); + + /* Check the 1st field's offset */ + off = H5Tget_member_offset(tid2, 0); + VERIFY(off, HOFFSET(s1_t, i), "H5Tget_member_offset"); + + /* Check the 1st field's datatype */ + mtid = H5Tget_member_type(tid2, 0); + CHECK(mtid, FAIL, "H5Tget_member_type"); + if ((ret = H5Tequal(mtid, H5T_NATIVE_INT)) <= 0) + TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret); + ret = H5Tclose(mtid); + CHECK(mtid, FAIL, "H5Tclose"); + + /* Check the 2nd field's name */ + mname = H5Tget_member_name(tid2, 1); + CHECK_PTR(mname, "H5Tget_member_name"); + if (HDstrcmp(mname, "f") != 0) + TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname); + H5free_memory(mname); + + /* Check the 2nd field's offset */ + off = H5Tget_member_offset(tid2, 1); + VERIFY(off, HOFFSET(s1_t, f), "H5Tget_member_offset"); + + /* Check the 2nd field's datatype */ + mtid = H5Tget_member_type(tid2, 1); + CHECK(mtid, FAIL, "H5Tget_member_type"); + + /* Get the 2nd field's class */ + mclass = H5Tget_class(mtid); + VERIFY(mclass, H5T_ARRAY, "H5Tget_class"); + + /* Check the array rank */ + ndims = H5Tget_array_ndims(mtid); + VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims"); + + /* Get the array dimensions */ + ret = H5Tget_array_dims2(mtid, rdims1); + CHECK(ret, FAIL, "H5Tget_array_dims2"); + + /* Check the array dimensions */ + for (i = 0; i < ndims; i++) + if (rdims1[i] != tdims1[i]) { + TestErrPrintf("Nested array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE + ", tdims1[%d]=%" PRIuHSIZE "\n", + i, rdims1[i], i, tdims1[i]); + continue; + } /* end if */ + + /* Check the nested array's datatype */ + tid3 = H5Tget_super(mtid); + CHECK(tid3, FAIL, "H5Tget_super"); + + if ((ret = H5Tequal(tid3, H5T_NATIVE_FLOAT)) <= 0) + TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret); + + /* Close the array's base type datatype */ + ret = H5Tclose(tid3); + CHECK(mtid, FAIL, "H5Tclose"); + + /* Close the member datatype */ + ret = H5Tclose(mtid); + CHECK(mtid, FAIL, "H5Tclose"); + + /* Close Compound Datatype */ + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* Read dataset from disk */ + ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < SPACE1_DIM1; i++) { + for (j = 0; j < ARRAY1_DIM1; j++) { + if (wdata[i][j].i != rdata[i][j].i) { + TestErrPrintf( + "Array data information doesn't match!, wdata[%d][%d].i=%d, rdata[%d][%d].i=%d\n", (int)i, + (int)j, (int)wdata[i][j].i, (int)i, (int)j, (int)rdata[i][j].i); + continue; + } /* end if */ + for (k = 0; k < ARRAY1_DIM1; k++) + if (!H5_FLT_ABS_EQUAL(wdata[i][j].f[k], rdata[i][j].f[k])) { + TestErrPrintf("Array data information doesn't match!, wdata[%d][%d].f[%d]=%f, " + "rdata[%d][%d].f[%d]=%f\n", + (int)i, (int)j, (int)k, (double)wdata[i][j].f[k], (int)i, (int)j, (int)k, + (double)rdata[i][j].f[k]); + continue; + } /* end if */ + } /* end for */ + } /* end for */ + + /* Close Datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + +} /* end test_array_compound_array() */ + +/**************************************************************** +** +** test_array_alloc_custom(): Test VL datatype custom memory +** allocation routines. This routine just uses malloc to +** allocate the memory and increments the amount of memory +** allocated. +** +****************************************************************/ + +/*------------------------------------------------------------------------- + * Function: test_array_alloc_custom + * + * Purpose: Memory allocator for testing VL datatype custom memory + * allocation routines. + * + * This routine just uses malloc to allocate the memory and + * increments the amount of memory allocated. + * + * Return: + * + * Success: A memory buffer + * Failure: NULL + * + *------------------------------------------------------------------------- + */ +void * +test_array_alloc_custom(size_t size, void *info) +{ + void *ret_value = NULL; /* Pointer to return */ + size_t *mem_used = (size_t *)info; /* Pointer to the memory used */ + size_t extra; /* Extra space needed */ + + /* + * This weird contortion is required on the DEC Alpha to keep the + * alignment correct - QAK + */ + extra = MAX(sizeof(void *), sizeof(size_t)); + + if ((ret_value = HDmalloc(extra + size)) != NULL) { + *(size_t *)ret_value = size; + *mem_used += size; + } /* end if */ + + ret_value = ((unsigned char *)ret_value) + extra; + return ret_value; +} /* end test_array_alloc_custom() */ + +/*------------------------------------------------------------------------- + * Function: test_array_free_custom + * + * Purpose: Memory free function for testing VL datatype custom memory + * allocation routines. + * + * This routine just uses free to free the memory and + * decrements the amount of memory allocated. + * + * Return: void + * + *------------------------------------------------------------------------- + */ +void +test_array_free_custom(void *_mem, void *info) +{ + unsigned char *mem = NULL; /* Pointer to mem to be freed */ + size_t *mem_used = (size_t *)info; /* Pointer to the memory used */ + size_t extra; /* Extra space needed */ + + /* + * This weird contortion is required on the DEC Alpha to keep the + * alignment correct - QAK + */ + extra = MAX(sizeof(void *), sizeof(size_t)); + + if (_mem != NULL) { + mem = ((unsigned char *)_mem) - extra; + *mem_used -= *(size_t *)((void *)mem); + HDfree(mem); + } /* end if */ + +} /* end test_array_free_custom() */ + +/*------------------------------------------------------------------------- + * Function: test_array_vlen_atomic + * + * Purpose: Test basic array datatype code. + * Tests 1-D array of atomic VL datatypes. + * + * Return: void + * + *------------------------------------------------------------------------- + */ +static void +test_array_vlen_atomic(void) +{ + hvl_t wdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information to write */ + hvl_t rdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information read in */ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1; /* Dataspace ID */ + hid_t tid1; /* Array Datatype ID */ + hid_t tid2; /* VL Datatype ID */ + hid_t tid3; /* Atomic Datatype ID */ + hsize_t sdims1[] = {SPACE1_DIM1}; + hsize_t tdims1[] = {ARRAY1_DIM1}; + int ndims; /* Array rank for reading */ + hsize_t rdims1[H5S_MAX_RANK]; /* Array dimensions for reading */ + H5T_class_t mclass; /* Datatype class for VL */ + hid_t xfer_pid; /* Dataset transfer property list ID */ + hsize_t size; /* Number of bytes which will be used */ + size_t mem_used = 0; /* Memory used during allocation */ + int i, j, k; /* counting variables */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing 1-D Array of Atomic Variable-Length Datatypes Functionality\n")); + + /* Initialize array data to write */ + for (i = 0; i < SPACE1_DIM1; i++) + for (j = 0; j < ARRAY1_DIM1; j++) { + wdata[i][j].p = HDmalloc((size_t)(i + j + 1) * sizeof(unsigned int)); + wdata[i][j].len = (size_t)(i + j + 1); + for (k = 0; k < (i + j + 1); k++) + ((unsigned int *)wdata[i][j].p)[k] = (unsigned int)(i * 100 + j * 10 + k); + } /* end for */ + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid1 = H5Screate_simple(SPACE1_RANK, sdims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create a compound datatype to refer to */ + tid2 = H5Tvlen_create(H5T_NATIVE_UINT); + CHECK(tid2, FAIL, "H5Tcreate"); + + /* Create an array datatype to refer to */ + tid1 = H5Tarray_create2(tid2, ARRAY1_RANK, tdims1); + CHECK(tid1, FAIL, "H5Tarray_create2"); + + /* Close VL datatype */ + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write dataset to disk */ + ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file */ + fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Get the dataspace */ + sid1 = H5Dget_space(dataset); + CHECK(sid1, FAIL, "H5Dget_space"); + + /* Get the datatype */ + tid1 = H5Dget_type(dataset); + CHECK(tid1, FAIL, "H5Dget_type"); + + /* Check the array rank */ + ndims = H5Tget_array_ndims(tid1); + VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims"); + + /* Get the array dimensions */ + ret = H5Tget_array_dims2(tid1, rdims1); + CHECK(ret, FAIL, "H5Tget_array_dims2"); + + /* Check the array dimensions */ + for (i = 0; i < ndims; i++) + if (rdims1[i] != tdims1[i]) { + TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE + ", tdims1[%d]=%" PRIuHSIZE "\n", + i, rdims1[i], i, tdims1[i]); + continue; + } /* end if */ + + /* Get the VL datatype */ + tid2 = H5Tget_super(tid1); + CHECK(tid2, FAIL, "H5Tget_super"); + + /* Get the 2nd field's class */ + mclass = H5Tget_class(tid2); + VERIFY(mclass, H5T_VLEN, "H5Tget_class"); + + /* Check the VL datatype's base type */ + tid3 = H5Tget_super(tid2); + CHECK(tid3, FAIL, "H5Tget_super"); + + if ((ret = H5Tequal(tid3, H5T_NATIVE_UINT)) <= 0) + TestErrPrintf("VL base datatype is incorrect!, ret=%d\n", (int)ret); + + /* Close the array's base type datatype */ + ret = H5Tclose(tid3); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close VL Datatype */ + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* Change to the custom memory allocation routines for reading VL data */ + xfer_pid = H5Pcreate(H5P_DATASET_XFER); + CHECK(xfer_pid, FAIL, "H5Pcreate"); + + ret = H5Pset_vlen_mem_manager(xfer_pid, test_array_alloc_custom, &mem_used, test_array_free_custom, + &mem_used); + CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); + + /* Make certain the correct amount of memory will be used */ + ret = H5Dvlen_get_buf_size(dataset, tid1, sid1, &size); + CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); + + /* # elements allocated = (1 + 2 + 3 + 4) + (2 + 3 + 4 + 5) + + * (3 + 4 + 5 + 6) + (4 + 5 + 6 + 7) = 64 elements + */ + VERIFY(size, 64 * sizeof(unsigned int), "H5Dvlen_get_buf_size"); + + /* Read dataset from disk */ + ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Make certain the correct amount of memory has been used */ + /* # elements allocated = (1 + 2 + 3 + 4) + (2 + 3 + 4 + 5) + + * (3 + 4 + 5 + 6) + (4 + 5 + 6 + 7) = 64 elements + */ + VERIFY(mem_used, 64 * sizeof(unsigned int), "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < SPACE1_DIM1; i++) { + for (j = 0; j < ARRAY1_DIM1; j++) { + if (wdata[i][j].len != rdata[i][j].len) { + TestErrPrintf("VL data length don't match!, wdata[%d][%d].len=%d, rdata[%d][%d].len=%d\n", + (int)i, (int)j, (int)wdata[i][j].len, (int)i, (int)j, (int)rdata[i][j].len); + continue; + } /* end if */ + for (k = 0; k < (int)rdata[i][j].len; k++) { + if (((unsigned int *)wdata[i][j].p)[k] != ((unsigned int *)rdata[i][j].p)[k]) { + TestErrPrintf( + "VL data values don't match!, wdata[%d][%d].p[%d]=%d, rdata[%d][%d].p[%d]=%d\n", + (int)i, (int)j, (int)k, (int)((unsigned int *)wdata[i][j].p)[k], (int)i, (int)j, + (int)k, (int)((unsigned int *)rdata[i][j].p)[k]); + continue; + } /* end if */ + } /* end for */ + } /* end for */ + } /* end for */ + + /* Reclaim the read VL data */ + ret = H5Treclaim(tid1, sid1, xfer_pid, rdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Make certain the VL memory has been freed */ + VERIFY(mem_used, 0, "H5Treclaim"); + + /* Reclaim the write VL data */ + ret = H5Treclaim(tid1, sid1, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Close dataset transfer property list */ + ret = H5Pclose(xfer_pid); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close Datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + +} /* end test_array_vlen_atomic() */ + +/*------------------------------------------------------------------------- + * Function: test_array_vlen_array + * + * Purpose: Test basic array datatype code. + * Tests 1-D array of 1-D array VL datatypes. + * + * Return: void + * + *------------------------------------------------------------------------- + */ +static void +test_array_vlen_array(void) +{ + hvl_t wdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information to write */ + hvl_t rdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information read in */ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1; /* Dataspace ID */ + hid_t tid1; /* Array Datatype ID */ + hid_t tid2; /* VL Datatype ID */ + hid_t tid3; /* Nested Array Datatype ID */ + hid_t tid4; /* Atomic Datatype ID */ + hsize_t sdims1[] = {SPACE1_DIM1}; + hsize_t tdims1[] = {ARRAY1_DIM1}; + int ndims; /* Array rank for reading */ + hsize_t rdims1[H5S_MAX_RANK]; /* Array dimensions for reading */ + H5T_class_t mclass; /* Datatype class for VL */ + hid_t xfer_pid; /* Dataset transfer property list ID */ + hsize_t size; /* Number of bytes which will be used */ + size_t mem_used = 0; /* Memory used during allocation */ + int i, j, k, l; /* Index variables */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing 1-D Array of 1-D Array Variable-Length Datatypes Functionality\n")); + + /* Initialize array data to write */ + for (i = 0; i < SPACE1_DIM1; i++) + for (j = 0; j < ARRAY1_DIM1; j++) { + wdata[i][j].p = HDmalloc((size_t)(i + j + 1) * sizeof(unsigned int) * (size_t)ARRAY1_DIM1); + wdata[i][j].len = (size_t)(i + j + 1); + for (k = 0; k < (i + j + 1); k++) + for (l = 0; l < ARRAY1_DIM1; l++) + ((unsigned int *)wdata[i][j].p)[k * ARRAY1_DIM1 + l] = + (unsigned int)(i * 1000 + j * 100 + k * 10 + l); + } + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid1 = H5Screate_simple(SPACE1_RANK, sdims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create the nested array datatype to refer to */ + tid3 = H5Tarray_create2(H5T_NATIVE_UINT, ARRAY1_RANK, tdims1); + CHECK(tid3, FAIL, "H5Tarray_create2"); + + /* Create a VL datatype of 1-D arrays to refer to */ + tid2 = H5Tvlen_create(tid3); + CHECK(tid2, FAIL, "H5Tcreate"); + + /* Close nested array datatype */ + ret = H5Tclose(tid3); + CHECK(ret, FAIL, "H5Tclose"); + + /* Create an array datatype to refer to */ + tid1 = H5Tarray_create2(tid2, ARRAY1_RANK, tdims1); + CHECK(tid1, FAIL, "H5Tarray_create2"); + + /* Close VL datatype */ + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write dataset to disk */ + ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file */ + fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Get the dataspace */ + sid1 = H5Dget_space(dataset); + CHECK(sid1, FAIL, "H5Dget_space"); + + /* Get the datatype */ + tid1 = H5Dget_type(dataset); + CHECK(tid1, FAIL, "H5Dget_type"); + + /* Check the array rank */ + ndims = H5Tget_array_ndims(tid1); + VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims"); + + /* Get the array dimensions */ + ret = H5Tget_array_dims2(tid1, rdims1); + CHECK(ret, FAIL, "H5Tget_array_dims2"); + + /* Check the array dimensions */ + for (i = 0; i < ndims; i++) + if (rdims1[i] != tdims1[i]) { + TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE + ", tdims1[%d]=%" PRIuHSIZE "\n", + i, rdims1[i], i, tdims1[i]); + continue; + } /* end if */ + + /* Get the VL datatype */ + tid2 = H5Tget_super(tid1); + CHECK(tid2, FAIL, "H5Tget_super"); + + /* Get the VL datatype's class */ + mclass = H5Tget_class(tid2); + VERIFY(mclass, H5T_VLEN, "H5Tget_class"); + + /* Check the VL datatype's base type */ + tid3 = H5Tget_super(tid2); + CHECK(tid3, FAIL, "H5Tget_super"); + + /* Get the nested array datatype's class */ + mclass = H5Tget_class(tid3); + VERIFY(mclass, H5T_ARRAY, "H5Tget_class"); + + /* Check the array rank */ + ndims = H5Tget_array_ndims(tid3); + VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims"); + + /* Get the array dimensions */ + ret = H5Tget_array_dims2(tid3, rdims1); + CHECK(ret, FAIL, "H5Tget_array_dims2"); + + /* Check the array dimensions */ + for (i = 0; i < ndims; i++) + if (rdims1[i] != tdims1[i]) { + TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE + ", tdims1[%d]=%" PRIuHSIZE "\n", + i, rdims1[i], i, tdims1[i]); + continue; + } /* end if */ + + /* Check the array's base type */ + tid4 = H5Tget_super(tid3); + CHECK(tid4, FAIL, "H5Tget_super"); + + if ((ret = H5Tequal(tid4, H5T_NATIVE_UINT)) <= 0) + TestErrPrintf("VL base datatype is incorrect!, ret=%d\n", (int)ret); + + /* Close the array's base type datatype */ + ret = H5Tclose(tid4); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close the nested array datatype */ + ret = H5Tclose(tid3); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close VL Datatype */ + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* Change to the custom memory allocation routines for reading VL data */ + xfer_pid = H5Pcreate(H5P_DATASET_XFER); + CHECK(xfer_pid, FAIL, "H5Pcreate"); + + ret = H5Pset_vlen_mem_manager(xfer_pid, test_array_alloc_custom, &mem_used, test_array_free_custom, + &mem_used); + CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); + + /* Make certain the correct amount of memory will be used */ + ret = H5Dvlen_get_buf_size(dataset, tid1, sid1, &size); + CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); + + /* # elements allocated = (1 + 2 + 3 + 4) + (2 + 3 + 4 + 5) + + * (3 + 4 + 5 + 6) + (4 + 5 + 6 + 7) = 64*ARRAY1_DIM1 elements + */ + VERIFY(size, 64 * (sizeof(unsigned int) * ARRAY1_DIM1), "H5Dvlen_get_buf_size"); + + /* Read dataset from disk */ + ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Make certain the correct amount of memory has been used */ + /* # elements allocated = (1 + 2 + 3 + 4) + (2 + 3 + 4 + 5) + + * (3 + 4 + 5 + 6) + (4 + 5 + 6 + 7) = 64*ARRAY1_DIM1 elements + */ + VERIFY(mem_used, 64 * (sizeof(unsigned int) * ARRAY1_DIM1), "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < SPACE1_DIM1; i++) { + for (j = 0; j < ARRAY1_DIM1; j++) { + if (wdata[i][j].len != rdata[i][j].len) { + TestErrPrintf("VL data length don't match!, wdata[%d][%d].len=%d, rdata[%d][%d].len=%d\n", + (int)i, (int)j, (int)wdata[i][j].len, (int)i, (int)j, (int)rdata[i][j].len); + continue; + } /* end if */ + for (k = 0; k < (int)rdata[i][j].len; k++) { + for (l = 0; l < ARRAY1_DIM1; l++) { + if (((unsigned int *)wdata[i][j].p)[k * ARRAY1_DIM1 + l] != + ((unsigned int *)rdata[i][j].p)[k * ARRAY1_DIM1 + l]) { + TestErrPrintf("VL data values don't match!, wdata[%d][%d].p[%d][%d]=%d, " + "rdata[%d][%d].p[%d][%d]=%d\n", + (int)i, (int)j, (int)k, (int)l, + (int)((unsigned int *)wdata[i][j].p)[k * ARRAY1_DIM1 + l], (int)i, + (int)j, (int)k, (int)l, + (int)((unsigned int *)rdata[i][j].p)[k * ARRAY1_DIM1 + l]); + continue; + } /* end if */ + } /* end for */ + } /* end for */ + } /* end for */ + } /* end for */ + + /* Reclaim the read VL data */ + ret = H5Treclaim(tid1, sid1, xfer_pid, rdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Make certain the VL memory has been freed */ + VERIFY(mem_used, 0, "H5Treclaim"); + + /* Reclaim the write VL data */ + ret = H5Treclaim(tid1, sid1, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Close dataset transfer property list */ + ret = H5Pclose(xfer_pid); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close Datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + +} /* end test_array_vlen_array() */ + +/*------------------------------------------------------------------------- + * Function: test_array_bkg + * + * Purpose: Test basic array datatype code. + * Tests reading compound datatype with array fields and + * writing partial fields. + * + * Return: void + * + *------------------------------------------------------------------------- + */ +static void +test_array_bkg(void) +{ + herr_t status = -1; + + hid_t fid, array_dt; + hid_t space; + hid_t type; + hid_t dataset; + + hsize_t dim[] = {LENGTH}; + hsize_t dima[] = {ALEN}; + + int i, j; + unsigned ndims[3] = {1, 1, 1}; + + typedef struct { + int a[ALEN]; + float b[ALEN]; + double c[ALEN]; + } CmpField; + + CmpField cf[LENGTH]; + CmpField cfr[LENGTH]; + CmpDTSinfo *dtsinfo = NULL; + + typedef struct { + float b[ALEN]; + } fld_t; + + fld_t fld[LENGTH]; + fld_t fldr[LENGTH]; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Partial I/O of Array Fields in Compound Datatype Functionality\n")); + + /* Initialize the data */ + /* ------------------- */ + dtsinfo = (CmpDTSinfo *)HDmalloc(sizeof(CmpDTSinfo)); + CHECK_PTR(dtsinfo, "HDmalloc"); + HDmemset(dtsinfo, 0, sizeof(CmpDTSinfo)); + for (i = 0; i < LENGTH; i++) { + for (j = 0; j < ALEN; j++) { + cf[i].a[j] = 100 * (i + 1) + j; + cf[i].b[j] = 100.0F * ((float)i + 1.0F) + 0.01F * (float)j; + cf[i].c[j] = (double)(100.0F * ((float)i + 1.0F) + 0.02F * (float)j); + } /* end for */ + } /* end for */ + + /* Set the number of data members */ + /* ------------------------------ */ + dtsinfo->nsubfields = 3; + + /* Initialize the offsets */ + /* ----------------------- */ + dtsinfo->offset[0] = HOFFSET(CmpField, a); + dtsinfo->offset[1] = HOFFSET(CmpField, b); + dtsinfo->offset[2] = HOFFSET(CmpField, c); + + /* Initialize the data type IDs */ + /* ---------------------------- */ + dtsinfo->datatype[0] = H5T_NATIVE_INT; + dtsinfo->datatype[1] = H5T_NATIVE_FLOAT; + dtsinfo->datatype[2] = H5T_NATIVE_DOUBLE; + + /* Initialize the names of data members */ + /* ------------------------------------ */ + for (i = 0; i < dtsinfo->nsubfields; i++) + dtsinfo->name[i] = (char *)HDcalloc((size_t)20, sizeof(char)); + + HDstrcpy(dtsinfo->name[0], "One"); + HDstrcpy(dtsinfo->name[1], "Two"); + HDstrcpy(dtsinfo->name[2], "Three"); + + /* Create file */ + /* ----------- */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create data space */ + /* ----------------- */ + space = H5Screate_simple(RANK, dim, NULL); + CHECK(space, FAIL, "H5Screate_simple"); + + /* Create the memory data type */ + /* --------------------------- */ + type = H5Tcreate(H5T_COMPOUND, sizeof(CmpField)); + CHECK(type, FAIL, "H5Tcreate"); + + /* Add members to the compound data type */ + /* -------------------------------------- */ + for (i = 0; i < dtsinfo->nsubfields; i++) { + array_dt = H5Tarray_create2(dtsinfo->datatype[i], ndims[i], dima); + CHECK(array_dt, FAIL, "H5Tarray_create2"); + + status = H5Tinsert(type, dtsinfo->name[i], dtsinfo->offset[i], array_dt); + CHECK(status, FAIL, "H5Tinsert"); + + status = H5Tclose(array_dt); + CHECK(status, FAIL, "H5Tclose"); + } /* end for */ + + /* Create the dataset */ + /* ------------------ */ + dataset = H5Dcreate2(fid, FIELDNAME, type, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write data to the dataset */ + /* ------------------------- */ + status = H5Dwrite(dataset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, cf); + CHECK(status, FAIL, "H5Dwrite"); + + status = H5Dread(dataset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, cfr); + CHECK(status, FAIL, "H5Dread"); + + /* Verify correct data */ + /* ------------------- */ + for (i = 0; i < LENGTH; i++) { + for (j = 0; j < ALEN; j++) { + if (cf[i].a[j] != cfr[i].a[j]) { + TestErrPrintf("Field a data doesn't match, cf[%d].a[%d]=%d, cfr[%d].a[%d]=%d\n", (int)i, + (int)j, (int)cf[i].a[j], (int)i, (int)j, (int)cfr[i].a[j]); + continue; + } /* end if */ + if (!H5_FLT_ABS_EQUAL(cf[i].b[j], cfr[i].b[j])) { + TestErrPrintf("Field b data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n", (int)i, + (int)j, (double)cf[i].b[j], (int)i, (int)j, (double)cfr[i].b[j]); + continue; + } /* end if */ + if (!H5_DBL_ABS_EQUAL(cf[i].c[j], cfr[i].c[j])) { + TestErrPrintf("Field c data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n", (int)i, + (int)j, (double)cf[i].c[j], (int)i, (int)j, (double)cfr[i].c[j]); + continue; + } /* end if */ + } /* end for */ + } /* end for */ + + /* Release memory resources */ + /* ------------------------ */ + for (i = 0; i < dtsinfo->nsubfields; i++) + HDfree(dtsinfo->name[i]); + + /* Release IDs */ + /* ----------- */ + status = H5Tclose(type); + CHECK(status, FAIL, "H5Tclose"); + + status = H5Sclose(space); + CHECK(status, FAIL, "H5Sclose"); + + status = H5Dclose(dataset); + CHECK(status, FAIL, "H5Dclose"); + + status = H5Fclose(fid); + CHECK(status, FAIL, "H5Fclose"); + + /******************************/ + /* Reopen the file and update */ + /******************************/ + + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + dataset = H5Dopen2(fid, FIELDNAME, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + type = H5Tcreate(H5T_COMPOUND, sizeof(fld_t)); + CHECK(type, FAIL, "H5Tcreate"); + + array_dt = H5Tarray_create2(H5T_NATIVE_FLOAT, 1, dima); + CHECK(array_dt, FAIL, "H5Tarray_create2"); + + status = H5Tinsert(type, "Two", HOFFSET(fld_t, b), array_dt); + CHECK(status, FAIL, "H5Tinsert"); + + /* Initialize the data to overwrite */ + /* -------------------------------- */ + for (i = 0; i < LENGTH; i++) + for (j = 0; j < ALEN; j++) + cf[i].b[j] = fld[i].b[j] = 1.313F; + + status = H5Dwrite(dataset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, fld); + CHECK(status, FAIL, "H5Dwrite"); + + /* Read just the field changed */ + status = H5Dread(dataset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, fldr); + CHECK(status, FAIL, "H5Dread"); + + for (i = 0; i < LENGTH; i++) + for (j = 0; j < ALEN; j++) + if (!H5_FLT_ABS_EQUAL(fld[i].b[j], fldr[i].b[j])) { + TestErrPrintf("Field data doesn't match, fld[%d].b[%d]=%f, fldr[%d].b[%d]=%f\n", (int)i, + (int)j, (double)fld[i].b[j], (int)i, (int)j, (double)fldr[i].b[j]); + continue; + } /* end if */ + + status = H5Tclose(type); + CHECK(status, FAIL, "H5Tclose"); + + status = H5Tclose(array_dt); + CHECK(status, FAIL, "H5Tclose"); + + type = H5Dget_type(dataset); + CHECK(type, FAIL, "H5Dget_type"); + + /* Read the entire dataset again */ + status = H5Dread(dataset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, cfr); + CHECK(status, FAIL, "H5Dread"); + + /* Verify correct data */ + /* ------------------- */ + for (i = 0; i < LENGTH; i++) { + for (j = 0; j < ALEN; j++) { + if (cf[i].a[j] != cfr[i].a[j]) { + TestErrPrintf("Field a data doesn't match, cf[%d].a[%d]=%d, cfr[%d].a[%d]=%d\n", (int)i, + (int)j, (int)cf[i].a[j], (int)i, (int)j, (int)cfr[i].a[j]); + continue; + } /* end if */ + if (!H5_FLT_ABS_EQUAL(cf[i].b[j], cfr[i].b[j])) { + TestErrPrintf("Field b data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n", (int)i, + (int)j, (double)cf[i].b[j], (int)i, (int)j, (double)cfr[i].b[j]); + continue; + } /* end if */ + if (!H5_DBL_ABS_EQUAL(cf[i].c[j], cfr[i].c[j])) { + TestErrPrintf("Field c data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n", (int)i, + (int)j, (double)cf[i].c[j], (int)i, (int)j, (double)cfr[i].c[j]); + continue; + } /* end if */ + } /* end for */ + } /* end for */ + + status = H5Dclose(dataset); + CHECK(status, FAIL, "H5Dclose"); + + status = H5Tclose(type); + CHECK(status, FAIL, "H5Tclose"); + + status = H5Fclose(fid); + CHECK(status, FAIL, "H5Fclose"); + + /****************************************************/ + /* Reopen the file and print out all the data again */ + /****************************************************/ + + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + dataset = H5Dopen2(fid, FIELDNAME, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + type = H5Dget_type(dataset); + CHECK(type, FAIL, "H5Dget_type"); + + /* Reset the data to read in */ + /* ------------------------- */ + HDmemset(cfr, 0, sizeof(CmpField) * LENGTH); + + status = H5Dread(dataset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, cfr); + CHECK(status, FAIL, "H5Dread"); + + /* Verify correct data */ + /* ------------------- */ + for (i = 0; i < LENGTH; i++) { + for (j = 0; j < ALEN; j++) { + if (cf[i].a[j] != cfr[i].a[j]) { + TestErrPrintf("Field a data doesn't match, cf[%d].a[%d]=%d, cfr[%d].a[%d]=%d\n", (int)i, + (int)j, (int)cf[i].a[j], (int)i, (int)j, (int)cfr[i].a[j]); + continue; + } /* end if */ + if (!H5_FLT_ABS_EQUAL(cf[i].b[j], cfr[i].b[j])) { + TestErrPrintf("Field b data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n", (int)i, + (int)j, (double)cf[i].b[j], (int)i, (int)j, (double)cfr[i].b[j]); + continue; + } /* end if */ + if (!H5_DBL_ABS_EQUAL(cf[i].c[j], cfr[i].c[j])) { + TestErrPrintf("Field c data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n", (int)i, + (int)j, (double)cf[i].c[j], (int)i, (int)j, (double)cfr[i].c[j]); + continue; + } /* end if */ + } /* end for */ + } /* end for */ + + status = H5Dclose(dataset); + CHECK(status, FAIL, "H5Dclose"); + + status = H5Tclose(type); + CHECK(status, FAIL, "H5Tclose"); + + status = H5Fclose(fid); + CHECK(status, FAIL, "H5Fclose"); + + HDfree(dtsinfo); +} /* end test_array_bkg() */ + +/*------------------------------------------------------------------------- + * Function: test_compat + * + * Purpose: Test array datatype compatibility code. + * + * Reads file containing old version of datatype object header + * messages for compound datatypes and verifies reading the older + * version of the is working correctly. + * + * Return: void + * + *------------------------------------------------------------------------- + */ +#if 0 +static void +test_compat(void) +{ + const char *testfile = H5_get_srcdir_filename(TESTFILE); /* Corrected test file name */ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t tid1; /* Array Datatype ID */ + hid_t tid2; /* Datatype ID */ + hsize_t tdims1[] = {ARRAY1_DIM1}; + int ndims; /* Array rank for reading */ + hsize_t rdims1[H5S_MAX_RANK]; /* Array dimensions for reading */ + H5T_class_t mclass; /* Datatype class for VL */ + int nmemb; /* Number of compound members */ + char *mname; /* Name of compound field */ + size_t off; /* Offset of compound field */ + hid_t mtid; /* Datatype ID for field */ + int i; /* Index variables */ + hbool_t driver_is_default_compatible; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Array Datatypes Compatibility Functionality\n")); + + /* + * Try reading a file that has been prepared that has datasets with + * compound datatypes which use an older version (version 1) of the + * datatype object header message for describing the datatype. + * + * If this test fails and the datatype object header message version has + * changed, follow the instructions in gen_old_array.c for regenerating + * the tarrold.h5 file. + */ + + if (h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible) < 0) + TestErrPrintf("can't check if VFD is default VFD compatible\n"); + if (!driver_is_default_compatible) { + HDprintf(" -- SKIPPED --\n"); + return; + } + + /* Open the testfile */ + fid1 = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK_I(fid1, "H5Fopen"); + + /* Only try to proceed if the file is around */ + if (fid1 >= 0) { + /* Open the first dataset (with no array fields) */ + dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); + CHECK_I(dataset, "H5Dopen2"); + + /* Get the datatype */ + tid1 = H5Dget_type(dataset); + CHECK_I(tid1, "H5Dget_type"); + + /* Verify datatype class */ + mclass = H5Tget_class(tid1); + VERIFY(mclass, H5T_COMPOUND, "H5Tget_class"); + + /* Get the number of compound datatype fields */ + nmemb = H5Tget_nmembers(tid1); + VERIFY(nmemb, 3, "H5Tget_nmembers"); + + /* Check the 1st field's name */ + mname = H5Tget_member_name(tid1, 0); + CHECK_PTR(mname, "H5Tget_member_name"); + if (HDstrcmp(mname, "i") != 0) + TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname); + H5free_memory(mname); + + /* Check the 1st field's offset */ + off = H5Tget_member_offset(tid1, 0); + VERIFY(off, 0, "H5Tget_member_offset"); + + /* Check the 1st field's datatype */ + mtid = H5Tget_member_type(tid1, 0); + CHECK(mtid, FAIL, "H5Tget_member_type"); + if ((ret = H5Tequal(mtid, H5T_STD_I16LE)) <= 0) + TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret); + ret = H5Tclose(mtid); + CHECK(mtid, FAIL, "H5Tclose"); + + /* Check the 2nd field's name */ + mname = H5Tget_member_name(tid1, 1); + CHECK_PTR(mname, "H5Tget_member_name"); + if (HDstrcmp(mname, "f") != 0) + TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname); + H5free_memory(mname); + + /* Check the 2nd field's offset */ + off = H5Tget_member_offset(tid1, 1); + VERIFY(off, 4, "H5Tget_member_offset"); + + /* Check the 2nd field's datatype */ + mtid = H5Tget_member_type(tid1, 1); + CHECK(mtid, FAIL, "H5Tget_member_type"); + if ((ret = H5Tequal(mtid, H5T_IEEE_F32LE)) <= 0) + TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret); + ret = H5Tclose(mtid); + CHECK(mtid, FAIL, "H5Tclose"); + + /* Check the 3rd field's name */ + mname = H5Tget_member_name(tid1, 2); + CHECK_PTR(mname, "H5Tget_member_name"); + if (HDstrcmp(mname, "l") != 0) + TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname); + H5free_memory(mname); + + /* Check the 3rd field's offset */ + off = H5Tget_member_offset(tid1, 2); + VERIFY(off, 8, "H5Tget_member_offset"); + + /* Check the 3rd field's datatype */ + mtid = H5Tget_member_type(tid1, 2); + CHECK(mtid, FAIL, "H5Tget_member_type"); + if ((ret = H5Tequal(mtid, H5T_STD_I32LE)) <= 0) + TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret); + ret = H5Tclose(mtid); + CHECK(mtid, FAIL, "H5Tclose"); + + /* Close the datatype */ + ret = H5Tclose(tid1); + CHECK_I(ret, "H5Tclose"); + + /* Close the dataset */ + ret = H5Dclose(dataset); + CHECK_I(ret, "H5Dclose"); + + /* Open the second dataset (with array fields) */ + dataset = H5Dopen2(fid1, "Dataset2", H5P_DEFAULT); + CHECK_I(dataset, "H5Dopen2"); + + /* Get the datatype */ + tid1 = H5Dget_type(dataset); + CHECK_I(tid1, "H5Dget_type"); + + /* Verify datatype class */ + mclass = H5Tget_class(tid1); + VERIFY(mclass, H5T_COMPOUND, "H5Tget_class"); + + /* Get the number of compound datatype fields */ + nmemb = H5Tget_nmembers(tid1); + VERIFY(nmemb, 4, "H5Tget_nmembers"); + + /* Check the 1st field's name */ + mname = H5Tget_member_name(tid1, 0); + CHECK_PTR(mname, "H5Tget_member_name"); + if (mname && HDstrcmp(mname, "i") != 0) + TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname); + if (mname) + H5free_memory(mname); + + /* Check the 1st field's offset */ + off = H5Tget_member_offset(tid1, 0); + VERIFY(off, 0, "H5Tget_member_offset"); + + /* Check the 1st field's datatype */ + mtid = H5Tget_member_type(tid1, 0); + CHECK(mtid, FAIL, "H5Tget_member_type"); + if ((ret = H5Tequal(mtid, H5T_STD_I16LE)) <= 0) + TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret); + ret = H5Tclose(mtid); + CHECK(mtid, FAIL, "H5Tclose"); + + /* Check the 2nd field's name */ + mname = H5Tget_member_name(tid1, 1); + CHECK_PTR(mname, "H5Tget_member_name"); + if (mname && HDstrcmp(mname, "f") != 0) + TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname); + if (mname) + H5free_memory(mname); + + /* Check the 2nd field's offset */ + off = H5Tget_member_offset(tid1, 1); + VERIFY(off, 4, "H5Tget_member_offset"); + + /* Check the 2nd field's datatype */ + mtid = H5Tget_member_type(tid1, 1); + CHECK(mtid, FAIL, "H5Tget_member_type"); + + /* Verify datatype class */ + mclass = H5Tget_class(mtid); + VERIFY(mclass, H5T_ARRAY, "H5Tget_class"); + + /* Check the array rank */ + ndims = H5Tget_array_ndims(mtid); + VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims"); + + /* Get the array dimensions */ + ret = H5Tget_array_dims2(mtid, rdims1); + CHECK(ret, FAIL, "H5Tget_array_dims2"); + + /* Check the array dimensions */ + for (i = 0; i < ndims; i++) + if (rdims1[i] != tdims1[i]) { + TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE + ", tdims1[%d]=%" PRIuHSIZE "\n", + i, rdims1[i], i, tdims1[i]); + continue; + } /* end if */ + + /* Check the array's base datatype */ + tid2 = H5Tget_super(mtid); + CHECK(tid2, FAIL, "H5Tget_super"); + + if ((ret = H5Tequal(tid2, H5T_IEEE_F32LE)) <= 0) + TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret); + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Tclose(mtid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Check the 3rd field's name */ + mname = H5Tget_member_name(tid1, 2); + CHECK_PTR(mname, "H5Tget_member_name"); + if (mname && HDstrcmp(mname, "l") != 0) + TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname); + if (mname) + H5free_memory(mname); + + /* Check the 3rd field's offset */ + off = H5Tget_member_offset(tid1, 2); + VERIFY(off, 20, "H5Tget_member_offset"); + + /* Check the 3rd field's datatype */ + mtid = H5Tget_member_type(tid1, 2); + CHECK(mtid, FAIL, "H5Tget_member_type"); + + /* Verify datatype class */ + mclass = H5Tget_class(mtid); + VERIFY(mclass, H5T_ARRAY, "H5Tget_class"); + + /* Check the array rank */ + ndims = H5Tget_array_ndims(mtid); + VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims"); + + /* Get the array dimensions */ + ret = H5Tget_array_dims2(mtid, rdims1); + CHECK(ret, FAIL, "H5Tget_array_dims2"); + + /* Check the array dimensions */ + for (i = 0; i < ndims; i++) + if (rdims1[i] != tdims1[i]) { + TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE + ", tdims1[%d]=%" PRIuHSIZE "\n", + i, rdims1[i], i, tdims1[i]); + continue; + } /* end if */ + + /* Check the array's base datatype */ + tid2 = H5Tget_super(mtid); + CHECK(tid2, FAIL, "H5Tget_super"); + + if ((ret = H5Tequal(tid2, H5T_STD_I32LE)) <= 0) + TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret); + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Tclose(mtid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Check the 4th field's name */ + mname = H5Tget_member_name(tid1, 3); + CHECK_PTR(mname, "H5Tget_member_name"); + if (mname && HDstrcmp(mname, "d") != 0) + TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname); + if (mname) + H5free_memory(mname); + + /* Check the 4th field's offset */ + off = H5Tget_member_offset(tid1, 3); + VERIFY(off, 36, "H5Tget_member_offset"); + + /* Check the 4th field's datatype */ + mtid = H5Tget_member_type(tid1, 3); + CHECK(mtid, FAIL, "H5Tget_member_type"); + if ((ret = H5Tequal(mtid, H5T_IEEE_F64LE)) <= 0) + TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret); + ret = H5Tclose(mtid); + CHECK(mtid, FAIL, "H5Tclose"); + + /* Close the datatype */ + ret = H5Tclose(tid1); + CHECK_I(ret, "H5Tclose"); + + /* Close the dataset */ + ret = H5Dclose(dataset); + CHECK_I(ret, "H5Dclose"); + + /* Close the file */ + ret = H5Fclose(fid1); + CHECK_I(ret, "H5Fclose"); + } /* end if */ + else + HDprintf("***cannot open the pre-created compound datatype test file (%s)\n", testfile); + +} /* end test_compat() */ +#endif + +/*------------------------------------------------------------------------- + * Function: test_array + * + * Purpose: Main array datatype testing routine. + * + * Return: void + * + *------------------------------------------------------------------------- + */ +void +test_array(void) +{ + /* Output message about test being performed */ + MESSAGE(5, ("Testing Array Datatypes\n")); + + /* These tests use the same file... */ + test_array_atomic_1d(); /* Test 1-D array of atomic datatypes */ + test_array_atomic_3d(); /* Test 3-D array of atomic datatypes */ + test_array_array_atomic(); /* Test 1-D array of 2-D arrays of atomic datatypes */ + test_array_compound_atomic(); /* Test 1-D array of compound datatypes (with no array fields) */ + test_array_compound_array(); /* Test 1-D array of compound datatypes (with array fields) */ + test_array_vlen_atomic(); /* Test 1-D array of atomic VL datatypes */ + test_array_vlen_array(); /* Test 1-D array of 1-D array VL datatypes */ + test_array_funcs(); /* Test type functions with array types */ + + test_array_bkg(); /* Read compound datatype with array fields and background fields read */ +#if 0 + /* This test uses a custom file */ + test_compat(); /* Test compatibility changes for compound datatype fields */ +#endif +} /* end test_array() */ + +/*------------------------------------------------------------------------- + * Function: cleanup_array + * + * Purpose: Cleanup temporary test files + * + * Return: void + * + * Programmer: Quincey Koziol + * June 8, 1999 + * + *------------------------------------------------------------------------- + */ +void +cleanup_array(void) +{ + H5Fdelete(FILENAME, H5P_DEFAULT); +} /* end cleanup_array() */ diff --git a/test/API/tattr.c b/test/API/tattr.c new file mode 100644 index 00000000000..d006eb8fda4 --- /dev/null +++ b/test/API/tattr.c @@ -0,0 +1,11929 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*********************************************************** + * + * Test program: tattr + * + * Test the attribute functionality + * + *************************************************************/ + +#include "testhdf5.h" + +#if 0 +#include "H5VLnative_private.h" + +/* + * This file needs to access private information from the H5O package. + * This file also needs to access the object header testing code. + */ +#define H5O_FRIEND /*suppress error about including H5Opkg */ +#define H5O_TESTING +#include "H5Opkg.h" /* Object headers */ + +/* + * This file needs to access private information from the H5A package. + * This file also needs to access the attribute testing code. + */ +#define H5A_FRIEND /*suppress error about including H5Apkg */ +#define H5A_TESTING +#include "H5Apkg.h" /* Attributes */ + +/* + * This file needs to access private information from the H5F package. + * This file also needs to access the file testing code. + */ +#define H5F_FRIEND /*suppress error about including H5Fpkg */ +#define H5F_TESTING +#include "H5Fpkg.h" /* File access */ +#endif + +#define FILENAME "tattr.h5" +#define NAME_BUF_SIZE 1024 +#define ATTR_NAME_LEN 16 +#define ATTR_MAX_DIMS 7 +#define ATTR_TMP_NAME "a really long temp_name" +#define CORDER_ITER_STOP 3 + +/* 3-D dataset with fixed dimensions */ +#define SPACE1_RANK 3 +#define SPACE1_DIM1 3 +#define SPACE1_DIM2 15 +#define SPACE1_DIM3 13 + +/* Dataset Information */ +#define DSET1_NAME "Dataset1" +#define DSET2_NAME "Dataset2" +#define DSET3_NAME "Dataset3" +#define NUM_DSETS 3 + +/* Group Information */ +#define GROUP1_NAME "/Group1" +#define GROUP2_NAME "/Group2" +#define GROUP3_NAME "/Group3" + +/* Named Datatype Information */ +#define TYPE1_NAME "/Type" + +/* Attribute Rank & Dimensions */ +#define ATTR1_NAME "Attr1" +#define ATTR1_RANK 1 +#define ATTR1_DIM1 3 +int attr_data1[ATTR1_DIM1] = {512, -234, 98123}; /* Test data for 1st attribute */ + +/* rank & dimensions for another attribute */ +#define ATTR1A_NAME "Attr1_a" +int attr_data1a[ATTR1_DIM1] = {256, 11945, -22107}; + +#define ATTR2_NAME "Attr2" +#define ATTR2_RANK 2 +#define ATTR2_DIM1 2 +#define ATTR2_DIM2 2 +int attr_data2[ATTR2_DIM1][ATTR2_DIM2] = {{7614, -416}, {197814, -3}}; /* Test data for 2nd attribute */ + +#define ATTR3_NAME "Attr3" +#define ATTR3_RANK 3 +#define ATTR3_DIM1 2 +#define ATTR3_DIM2 2 +#define ATTR3_DIM3 2 +double attr_data3[ATTR3_DIM1][ATTR3_DIM2][ATTR3_DIM3] = { + {{2.3, -26.1}, {0.123, -10.0}}, {{973.23, -0.91827}, {2.0, 23.0}}}; /* Test data for 3rd attribute */ + +#define ATTR4_NAME "Attr4" +#define ATTR4_RANK 2 +#define ATTR4_DIM1 2 +#define ATTR4_DIM2 2 +#define ATTR4_FIELDNAME1 "i" +#define ATTR4_FIELDNAME2 "d" +#define ATTR4_FIELDNAME3 "c" +size_t attr4_field1_off = 0; +size_t attr4_field2_off = 0; +size_t attr4_field3_off = 0; +struct attr4_struct { + int i; + double d; + char c; +} attr_data4[ATTR4_DIM1][ATTR4_DIM2] = { + {{3, -26.1, 'd'}, {-100000, 0.123, '3'}}, + {{-23, 981724.2, 'Q'}, {0, 2.0, '\n'}}}; /* Test data for 4th attribute */ + +#define ATTR5_NAME "Attr5" +#define ATTR5_RANK 0 +float attr_data5 = -5.123F; /* Test data for 5th attribute */ + +#define ATTR6_RANK 3 +#define ATTR6_DIM1 100 +#define ATTR6_DIM2 100 +#define ATTR6_DIM3 100 + +#define ATTR7_NAME "attr 1 - 000000" +#define ATTR8_NAME "attr 2" + +#define LINK1_NAME "Link1" + +#define NATTR_MANY_OLD 350 +#define NATTR_MANY_NEW 3500 + +#define BUG2_NATTR 100 +#define BUG2_NATTR2 16 + +#define BUG3_DSET_NAME "dset" +#define BUG3_DT_NAME "dt" +#define BUG3_ATTR_NAME "attr" + +/* Used by test_attr_delete_last_dense() */ +#define GRPNAME "grp" +#define ATTRNAME "attr" +#define DIM0 100 +#define DIM1 100 +#define RANK 2 + +/* Used by test_attr_info_null_info_pointer() */ +#define GET_INFO_NULL_POINTER_ATTR_NAME "NullInfoPointerAttr" + +/* Used by test_attr_rename_invalid_name() */ +#define INVALID_RENAME_TEST_ATTR_NAME "InvalidRenameTestAttr" +#define INVALID_RENAME_TEST_NEW_ATTR_NAME "InvalidRenameTestNewAttr" + +/* Used by test_attr_get_name_invalid_buf() */ +#define GET_NAME_INVALID_BUF_TEST_ATTR_NAME "InvalidNameBufferTestAttr" + +/* Attribute iteration struct */ +typedef struct { + H5_iter_order_t order; /* Direction of iteration */ + unsigned ncalled; /* # of times callback is entered */ + unsigned nskipped; /* # of attributes skipped */ + int stop; /* # of iterations to stop after */ + hsize_t curr; /* Current creation order value */ + size_t max_visit; /* Size of "visited attribute" flag array */ + hbool_t *visited; /* Pointer to array of "visited attribute" flags */ +} attr_iter_info_t; + +static herr_t attr_op1(hid_t loc_id, const char *name, const H5A_info_t *ainfo, void *op_data); + +/* Global dcpl ID, can be re-set as a generated dcpl for various operations + * across multiple tests. + * e.g., minimized dataset object headers + */ +static hid_t dcpl_g = H5P_DEFAULT; + +/**************************************************************** +** +** test_attr_basic_write(): Test basic H5A (attribute) code. +** Tests integer attributes on both datasets and groups +** +****************************************************************/ +static void +test_attr_basic_write(hid_t fapl) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t group; /* Group ID */ + hid_t sid1, sid2; /* Dataspace ID */ + hid_t attr, attr2; /* Attribute ID */ +#if 0 + hsize_t attr_size; /* storage size for attribute */ +#endif + ssize_t attr_name_size; /* size of attribute name */ + char *attr_name = NULL; /* name of attribute */ + hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; + hsize_t dims2[] = {ATTR1_DIM1}; + hsize_t dims3[] = {ATTR2_DIM1, ATTR2_DIM2}; + int read_data1[ATTR1_DIM1] = {0}; /* Buffer for reading 1st attribute */ + int i; +#ifndef NO_PREVENT_CREATE_SAME_ATTRIBUTE_TWICE + hid_t ret_id; /* Generic hid_t return value */ +#endif + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Basic Scalar Attribute Writing Functions\n")); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, DSET1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Create dataspace for attribute */ + sid2 = H5Screate_simple(ATTR1_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Try to create an attribute on the file (should create an attribute on root group) */ + attr = H5Acreate2(fid1, ATTR1_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Open the root group */ + group = H5Gopen2(fid1, "/", H5P_DEFAULT); + CHECK(group, FAIL, "H5Gopen2"); + + /* Open attribute again */ + attr = H5Aopen(group, ATTR1_NAME, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close root group */ + ret = H5Gclose(group); + CHECK(ret, FAIL, "H5Gclose"); + + /* Create an attribute for the dataset */ + attr = H5Acreate2(dataset, ATTR1_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#ifndef NO_PREVENT_CREATE_SAME_ATTRIBUTE_TWICE + /* Try to create the same attribute again (should fail) */ + H5E_BEGIN_TRY + { + ret_id = H5Acreate2(dataset, ATTR1_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret_id, FAIL, "H5Acreate2"); +#endif + /* Write attribute information */ + ret = H5Awrite(attr, H5T_NATIVE_INT, attr_data1); + CHECK(ret, FAIL, "H5Awrite"); + + /* Create an another attribute for the dataset */ + attr2 = H5Acreate2(dataset, ATTR1A_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write attribute information */ + ret = H5Awrite(attr2, H5T_NATIVE_INT, attr_data1a); + CHECK(ret, FAIL, "H5Awrite"); +#if 0 + /* Check storage size for attribute */ + attr_size = H5Aget_storage_size(attr); + VERIFY(attr_size, (ATTR1_DIM1 * sizeof(int)), "H5A_get_storage_size"); +#endif + /* Read attribute information immediately, without closing attribute */ + ret = H5Aread(attr, H5T_NATIVE_INT, read_data1); + CHECK(ret, FAIL, "H5Aread"); + + /* Verify values read in */ + for (i = 0; i < ATTR1_DIM1; i++) + if (attr_data1[i] != read_data1[i]) + TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d, read_data1[%d]=%d\n", __LINE__, i, + attr_data1[i], i, read_data1[i]); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close attribute */ + ret = H5Aclose(attr2); + CHECK(ret, FAIL, "H5Aclose"); + + /* change attribute name */ + ret = H5Arename(dataset, ATTR1_NAME, ATTR_TMP_NAME); + CHECK(ret, FAIL, "H5Arename"); + + /* Open attribute again */ + attr = H5Aopen(dataset, ATTR_TMP_NAME, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); + + /* Verify new attribute name */ + attr_name_size = H5Aget_name(attr, (size_t)0, NULL); + CHECK(attr_name_size, FAIL, "H5Aget_name"); + + if (attr_name_size > 0) { + attr_name = (char *)HDcalloc((size_t)(attr_name_size + 1), sizeof(char)); + CHECK_PTR(attr_name, "HDcalloc"); + + if (attr_name) { + ret = (herr_t)H5Aget_name(attr, (size_t)(attr_name_size + 1), attr_name); + CHECK(ret, FAIL, "H5Aget_name"); + ret = HDstrcmp(attr_name, ATTR_TMP_NAME); + VERIFY(ret, 0, "HDstrcmp"); + + HDfree(attr_name); + attr_name = NULL; + } /* end if */ + } /* end if */ + + /* Read attribute information immediately, without closing attribute */ + ret = H5Aread(attr, H5T_NATIVE_INT, read_data1); + CHECK(ret, FAIL, "H5Aread"); + + /* Verify values read in */ + for (i = 0; i < ATTR1_DIM1; i++) + if (attr_data1[i] != read_data1[i]) + TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d, read_data1[%d]=%d\n", __LINE__, i, + attr_data1[i], i, read_data1[i]); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Open the second attribute again */ + attr2 = H5Aopen(dataset, ATTR1A_NAME, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); + + /* Verify new attribute name */ + attr_name_size = H5Aget_name(attr2, (size_t)0, NULL); + CHECK(attr_name_size, FAIL, "H5Aget_name"); + + if (attr_name_size > 0) { + attr_name = (char *)HDcalloc((size_t)(attr_name_size + 1), sizeof(char)); + CHECK_PTR(attr_name, "HDcalloc"); + + if (attr_name) { + ret = (herr_t)H5Aget_name(attr2, (size_t)(attr_name_size + 1), attr_name); + CHECK(ret, FAIL, "H5Aget_name"); + ret = HDstrcmp(attr_name, ATTR1A_NAME); + VERIFY(ret, 0, "HDstrcmp"); + + HDfree(attr_name); + attr_name = NULL; + } /* end if */ + } /* end if */ + + /* Read attribute information immediately, without closing attribute */ + ret = H5Aread(attr2, H5T_NATIVE_INT, read_data1); + CHECK(ret, FAIL, "H5Aread"); + + /* Verify values read in */ + for (i = 0; i < ATTR1_DIM1; i++) + if (attr_data1a[i] != read_data1[i]) + TestErrPrintf("%d: attribute data different: attr_data1a[%d]=%d, read_data1[%d]=%d\n", __LINE__, + i, attr_data1a[i], i, read_data1[i]); + + /* Close attribute */ + ret = H5Aclose(attr2); + CHECK(ret, FAIL, "H5Aclose"); + + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Create group */ + group = H5Gcreate2(fid1, GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(group, FAIL, "H5Gcreate2"); + + /* Create dataspace for attribute */ + sid2 = H5Screate_simple(ATTR2_RANK, dims3, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Create an attribute for the group */ + attr = H5Acreate2(group, ATTR2_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#if 0 + /* Check storage size for attribute */ + attr_size = H5Aget_storage_size(attr); + VERIFY(attr_size, (ATTR2_DIM1 * ATTR2_DIM2 * sizeof(int)), "H5Aget_storage_size"); +#endif +#ifndef NO_PREVENT_CREATE_SAME_ATTRIBUTE_TWICE + /* Try to create the same attribute again (should fail) */ + H5E_BEGIN_TRY + { + ret_id = H5Acreate2(group, ATTR2_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret_id, FAIL, "H5Acreate2"); +#endif + /* Write attribute information */ + ret = H5Awrite(attr, H5T_NATIVE_INT, attr_data2); + CHECK(ret, FAIL, "H5Awrite"); +#if 0 + /* Check storage size for attribute */ + attr_size = H5Aget_storage_size(attr); + VERIFY(attr_size, (ATTR2_DIM1 * ATTR2_DIM2 * sizeof(int)), "H5A_get_storage_size"); +#endif + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close Attribute dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Group */ + ret = H5Gclose(group); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_attr_basic_write() */ + +/**************************************************************** +** +** test_attr_basic_read(): Test basic H5A (attribute) code. +** +****************************************************************/ +static void +test_attr_basic_read(hid_t fapl) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t group; /* Group ID */ + hid_t attr; /* Attribute ID */ + H5O_info2_t oinfo; /* Object info */ + int read_data1[ATTR1_DIM1] = {0}; /* Buffer for reading 1st attribute */ + int read_data2[ATTR2_DIM1][ATTR2_DIM2] = {{0}}; /* Buffer for reading 2nd attribute */ + int i, j; /* Local index variables */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Basic Attribute Functions\n")); + + /* Create file */ + fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid1, DSET1_NAME, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Verify the correct number of attributes */ + ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); + CHECK(ret, FAIL, "H5Oget_info3"); + VERIFY(oinfo.num_attrs, 2, "H5Oget_info3"); + + /* Open first attribute for the dataset */ + attr = H5Aopen(dataset, ATTR_TMP_NAME, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); + + /* Read attribute information */ + ret = H5Aread(attr, H5T_NATIVE_INT, read_data1); + CHECK(ret, FAIL, "H5Aread"); + + /* Verify values read in */ + for (i = 0; i < ATTR1_DIM1; i++) + if (attr_data1[i] != read_data1[i]) + TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d, read_data1[%d]=%d\n", __LINE__, i, + attr_data1[i], i, read_data1[i]); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Open the group */ + group = H5Gopen2(fid1, GROUP1_NAME, H5P_DEFAULT); + CHECK(group, FAIL, "H5Gopen2"); + + /* Verify the correct number of attributes */ + ret = H5Oget_info3(group, &oinfo, H5O_INFO_NUM_ATTRS); + CHECK(ret, FAIL, "H5Oget_info3"); + VERIFY(oinfo.num_attrs, 1, "H5Oget_info3"); + + /* Open the attribute for the group */ + attr = H5Aopen(group, ATTR2_NAME, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); + + /* Read attribute information */ + ret = H5Aread(attr, H5T_NATIVE_INT, read_data2); + CHECK(ret, FAIL, "H5Aread"); + + /* Verify values read in */ + for (i = 0; i < ATTR2_DIM1; i++) + for (j = 0; j < ATTR2_DIM2; j++) + if (attr_data2[i][j] != read_data2[i][j]) + TestErrPrintf("%d: attribute data different: attr_data2[%d][%d]=%d, read_data2[%d][%d]=%d\n", + __LINE__, i, j, attr_data2[i][j], i, j, read_data1[i]); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close group */ + ret = H5Gclose(group); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_attr_basic_read() */ + +/**************************************************************** +** +** test_attr_flush(): Test H5A (attribute) code for performing +** I/O when H5Fflush is used. +** +****************************************************************/ +static void +test_attr_flush(hid_t fapl) +{ + hid_t fil, /* File ID */ + att, /* Attribute ID */ + spc, /* Dataspace ID */ + set; /* Dataset ID */ + double wdata = 3.14159; /* Data to write */ + double rdata; /* Data read in */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Attribute Flushing\n")); + + fil = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(fil, FAIL, "H5Fcreate"); + + spc = H5Screate(H5S_SCALAR); + CHECK(spc, FAIL, "H5Screate"); + + set = H5Dcreate2(fil, DSET1_NAME, H5T_NATIVE_DOUBLE, spc, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); + CHECK(set, FAIL, "H5Dcreate2"); + + att = H5Acreate2(set, ATTR1_NAME, H5T_NATIVE_DOUBLE, spc, H5P_DEFAULT, H5P_DEFAULT); + CHECK(att, FAIL, "H5Acreate2"); +#ifndef NO_ATTR_FILL_VALUE_SUPPORT + ret = H5Aread(att, H5T_NATIVE_DOUBLE, &rdata); + CHECK(ret, FAIL, "H5Aread"); + + if (!H5_DBL_ABS_EQUAL(rdata, 0.0)) + TestErrPrintf("attribute value wrong: rdata=%f, should be %f\n", rdata, 0.0); + + ret = H5Fflush(fil, H5F_SCOPE_GLOBAL); + CHECK(ret, FAIL, "H5Fflush"); + + ret = H5Aread(att, H5T_NATIVE_DOUBLE, &rdata); + CHECK(ret, FAIL, "H5Awrite"); + + if (!H5_DBL_ABS_EQUAL(rdata, 0.0)) + TestErrPrintf("attribute value wrong: rdata=%f, should be %f\n", rdata, 0.0); +#else + HDprintf("** SKIPPED attribute pre-read temporarily until attribute fill values supported **\n"); +#endif + ret = H5Awrite(att, H5T_NATIVE_DOUBLE, &wdata); + CHECK(ret, FAIL, "H5Awrite"); + + ret = H5Aread(att, H5T_NATIVE_DOUBLE, &rdata); + CHECK(ret, FAIL, "H5Awrite"); + + if (!H5_DBL_ABS_EQUAL(rdata, wdata)) + TestErrPrintf("attribute value wrong: rdata=%f, should be %f\n", rdata, wdata); + + ret = H5Sclose(spc); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Aclose(att); + CHECK(ret, FAIL, "H5Aclose"); + ret = H5Dclose(set); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Fclose(fil); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_attr_flush() */ + +/**************************************************************** +** +** test_attr_plist(): Test Attribute Creation Property Lists +** +****************************************************************/ +static void +test_attr_plist(hid_t fapl) +{ + hid_t fid = H5I_INVALID_HID; /* File ID */ + hid_t did = H5I_INVALID_HID; /* Dataset ID */ + hid_t dsid = H5I_INVALID_HID; /* Dataspace ID (for dataset) */ + hid_t asid = H5I_INVALID_HID; /* Dataspace ID (for attribute) */ + hid_t aid = H5I_INVALID_HID; /* Attribute ID */ + hid_t acpl_id = H5I_INVALID_HID; /* Attribute creation property list ID */ + hid_t aapl_id = H5I_INVALID_HID; /* Attribute access property list ID */ + hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; + hsize_t dims2[] = {ATTR1_DIM1}; + H5T_cset_t cset; /* Character set for attributes */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Attribute Property Lists\n")); + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); + + /* Create dataspace for dataset */ + dsid = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(dsid, H5I_INVALID_HID, "H5Screate_simple"); + + /* Create a dataset */ + did = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, dsid, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); + CHECK(did, H5I_INVALID_HID, "H5Dcreate2"); + + /* Create dataspace for attribute */ + asid = H5Screate_simple(ATTR1_RANK, dims2, NULL); + CHECK(asid, H5I_INVALID_HID, "H5Screate_simple"); + + /* Create default creation property list for attribute */ + acpl_id = H5Pcreate(H5P_ATTRIBUTE_CREATE); + CHECK(acpl_id, H5I_INVALID_HID, "H5Pcreate"); + + /* Create default access property list for attribute + * This currently has no properties, but we need to test its creation + * and use. + */ + aapl_id = H5Pcreate(H5P_ATTRIBUTE_ACCESS); + CHECK(aapl_id, H5I_INVALID_HID, "H5Pcreate"); + + /* Get the character encoding and ensure that it is the default (ASCII) */ + ret = H5Pget_char_encoding(acpl_id, &cset); + CHECK(ret, FAIL, "H5Pget_char_encoding"); + VERIFY(cset, H5T_CSET_ASCII, "H5Pget_char_encoding"); + + /* Create an attribute for the dataset using the property list */ + aid = H5Acreate2(did, ATTR1_NAME, H5T_NATIVE_INT, asid, acpl_id, aapl_id); + CHECK(aid, H5I_INVALID_HID, "H5Acreate2"); + + /* Close the property list, and get the attribute's creation property list */ + ret = H5Pclose(acpl_id); + CHECK(ret, FAIL, "H5Pclose"); + acpl_id = H5Aget_create_plist(aid); + CHECK(acpl_id, H5I_INVALID_HID, "H5Aget_create_plist"); + + /* Get the character encoding and ensure that it is the default (ASCII) */ + ret = H5Pget_char_encoding(acpl_id, &cset); + CHECK(ret, FAIL, "H5Pget_char_encoding"); + VERIFY(cset, H5T_CSET_ASCII, "H5Pget_char_encoding"); + + /* Close the property list and attribute */ + ret = H5Pclose(acpl_id); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + /* Create a new property list and modify it to use a different encoding */ + acpl_id = H5Pcreate(H5P_ATTRIBUTE_CREATE); + CHECK(acpl_id, H5I_INVALID_HID, "H5Pcreate"); + ret = H5Pset_char_encoding(acpl_id, H5T_CSET_UTF8); + CHECK(ret, FAIL, "H5Pset_char_encoding"); + + /* Get the character encoding and ensure that it has been changed */ + ret = H5Pget_char_encoding(acpl_id, &cset); + CHECK(ret, FAIL, "H5Pget_char_encoding"); + VERIFY(cset, H5T_CSET_UTF8, "H5Pget_char_encoding"); + + /* Create an attribute for the dataset using the modified property list */ + aid = H5Acreate2(did, ATTR2_NAME, H5T_NATIVE_INT, asid, acpl_id, aapl_id); + CHECK(aid, H5I_INVALID_HID, "H5Acreate2"); + + /* Close the property list and attribute */ + ret = H5Pclose(acpl_id); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + /* Re-open the second attribute and ensure that its character encoding is correct */ + aid = H5Aopen(did, ATTR2_NAME, H5P_DEFAULT); + CHECK(aid, H5I_INVALID_HID, "H5Aopen"); + acpl_id = H5Aget_create_plist(aid); + CHECK(acpl_id, H5I_INVALID_HID, "H5Aget_create_plist"); + ret = H5Pget_char_encoding(acpl_id, &cset); + CHECK(ret, FAIL, "H5Pget_char_encoding"); + VERIFY(cset, H5T_CSET_UTF8, "H5Pget_char_encoding"); + + /* Close everything */ + ret = H5Sclose(dsid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(asid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Pclose(aapl_id); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(acpl_id); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_attr_plist() */ + +/**************************************************************** +** +** test_attr_compound_write(): Test H5A (attribute) code. +** Tests compound datatype attributes +** +****************************************************************/ +static void +test_attr_compound_write(hid_t fapl) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t tid1; /* Attribute datatype ID */ + hid_t sid1, sid2; /* Dataspace ID */ + hid_t attr; /* Attribute ID */ + hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; + hsize_t dims2[] = {ATTR4_DIM1, ATTR4_DIM2}; +#ifndef NO_PREVENT_CREATE_SAME_ATTRIBUTE_TWICE + hid_t ret_id; /* Generic hid_t return value */ +#endif + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Multiple Attribute Functions\n")); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, DSET1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Close dataset's dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create the attribute datatype. */ + tid1 = H5Tcreate(H5T_COMPOUND, sizeof(struct attr4_struct)); + CHECK(tid1, FAIL, "H5Tcreate"); + attr4_field1_off = HOFFSET(struct attr4_struct, i); + ret = H5Tinsert(tid1, ATTR4_FIELDNAME1, attr4_field1_off, H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + attr4_field2_off = HOFFSET(struct attr4_struct, d); + ret = H5Tinsert(tid1, ATTR4_FIELDNAME2, attr4_field2_off, H5T_NATIVE_DOUBLE); + CHECK(ret, FAIL, "H5Tinsert"); + attr4_field3_off = HOFFSET(struct attr4_struct, c); + ret = H5Tinsert(tid1, ATTR4_FIELDNAME3, attr4_field3_off, H5T_NATIVE_SCHAR); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Create dataspace for 1st attribute */ + sid2 = H5Screate_simple(ATTR4_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Create complex attribute for the dataset */ + attr = H5Acreate2(dataset, ATTR4_NAME, tid1, sid2, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#ifndef NO_PREVENT_CREATE_SAME_ATTRIBUTE_TWICE + /* Try to create the same attribute again (should fail) */ + H5E_BEGIN_TRY + { + ret_id = H5Acreate2(dataset, ATTR4_NAME, tid1, sid2, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret_id, FAIL, "H5Acreate2"); +#endif + /* Write complex attribute data */ + ret = H5Awrite(attr, tid1, attr_data4); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close attribute's dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close attribute's datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_attr_compound_write() */ + +/**************************************************************** +** +** test_attr_compound_read(): Test basic H5A (attribute) code. +** +****************************************************************/ +static void +test_attr_compound_read(hid_t fapl) +{ + hid_t fid1; /* HDF5 File ID */ + hid_t dataset; /* Dataset ID */ + hid_t space; /* Attribute dataspace */ + hid_t type; /* Attribute datatype */ + hid_t attr; /* Attribute ID */ + char attr_name[ATTR_NAME_LEN]; /* Buffer for attribute names */ + int rank; /* Attribute rank */ + hsize_t dims[ATTR_MAX_DIMS]; /* Attribute dimensions */ + H5T_class_t t_class; /* Attribute datatype class */ + H5T_order_t order; /* Attribute datatype order */ + size_t size; /* Attribute datatype size as stored in file */ + int fields; /* # of Attribute datatype fields */ + char *fieldname; /* Name of a field */ + size_t offset; /* Attribute datatype field offset */ + hid_t field; /* Attribute field datatype */ + struct attr4_struct read_data4[ATTR4_DIM1][ATTR4_DIM2]; /* Buffer for reading 4th attribute */ + ssize_t name_len; /* Length of attribute name */ + H5O_info2_t oinfo; /* Object info */ + int i, j; /* Local index variables */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Basic Attribute Functions\n")); + + /* Open file */ + fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid1, DSET1_NAME, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Verify the correct number of attributes */ + ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); + CHECK(ret, FAIL, "H5Oget_info3"); + VERIFY(oinfo.num_attrs, 1, "H5Oget_info3"); + + /* Open 1st attribute for the dataset */ + attr = + H5Aopen_by_idx(dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)0, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen_by_idx"); + + /* Verify Dataspace */ + space = H5Aget_space(attr); + CHECK(space, FAIL, "H5Aget_space"); + rank = H5Sget_simple_extent_ndims(space); + VERIFY(rank, ATTR4_RANK, "H5Sget_simple_extent_ndims"); + ret = H5Sget_simple_extent_dims(space, dims, NULL); + CHECK(ret, FAIL, "H5Sget_simple_extent_dims"); + if (dims[0] != ATTR4_DIM1) + TestErrPrintf("attribute dimensions different: dims[0]=%d, should be %d\n", (int)dims[0], ATTR4_DIM1); + if (dims[1] != ATTR4_DIM2) + TestErrPrintf("attribute dimensions different: dims[1]=%d, should be %d\n", (int)dims[1], ATTR4_DIM2); + H5Sclose(space); + + /* Verify Datatype */ + type = H5Aget_type(attr); + CHECK(type, FAIL, "H5Aget_type"); + t_class = H5Tget_class(type); + VERIFY(t_class, H5T_COMPOUND, "H5Tget_class"); + fields = H5Tget_nmembers(type); + VERIFY(fields, 3, "H5Tget_nmembers"); + for (i = 0; i < fields; i++) { + fieldname = H5Tget_member_name(type, (unsigned)i); + if (!(HDstrcmp(fieldname, ATTR4_FIELDNAME1) != 0 || HDstrcmp(fieldname, ATTR4_FIELDNAME2) != 0 || + HDstrcmp(fieldname, ATTR4_FIELDNAME3) != 0)) + TestErrPrintf("invalid field name for field #%d: %s\n", i, fieldname); + H5free_memory(fieldname); + } /* end for */ + offset = H5Tget_member_offset(type, 0); + VERIFY(offset, attr4_field1_off, "H5Tget_member_offset"); + offset = H5Tget_member_offset(type, 1); + VERIFY(offset, attr4_field2_off, "H5Tget_member_offset"); + offset = H5Tget_member_offset(type, 2); + VERIFY(offset, attr4_field3_off, "H5Tget_member_offset"); + + /* Verify each field's type, class & size */ + field = H5Tget_member_type(type, 0); + CHECK(field, FAIL, "H5Tget_member_type"); + t_class = H5Tget_class(field); + VERIFY(t_class, H5T_INTEGER, "H5Tget_class"); + order = H5Tget_order(field); + VERIFY_TYPE(order, H5Tget_order(H5T_NATIVE_INT), H5T_order_t, "%d", "H5Tget_order"); + size = H5Tget_size(field); + VERIFY(size, H5Tget_size(H5T_NATIVE_INT), "H5Tget_size"); + H5Tclose(field); + field = H5Tget_member_type(type, 1); + CHECK(field, FAIL, "H5Tget_member_type"); + t_class = H5Tget_class(field); + VERIFY(t_class, H5T_FLOAT, "H5Tget_class"); + order = H5Tget_order(field); + VERIFY_TYPE(order, H5Tget_order(H5T_NATIVE_DOUBLE), H5T_order_t, "%d", "H5Tget_order"); + size = H5Tget_size(field); + VERIFY(size, H5Tget_size(H5T_NATIVE_DOUBLE), "H5Tget_size"); + H5Tclose(field); + field = H5Tget_member_type(type, 2); + CHECK(field, FAIL, "H5Tget_member_type"); + t_class = H5Tget_class(field); + VERIFY(t_class, H5T_INTEGER, "H5Tget_class"); + order = H5Tget_order(field); + VERIFY_TYPE(order, H5Tget_order(H5T_NATIVE_SCHAR), H5T_order_t, "%d", "H5Tget_order"); + size = H5Tget_size(field); + VERIFY(size, H5Tget_size(H5T_NATIVE_SCHAR), "H5Tget_size"); + H5Tclose(field); + + /* Read attribute information */ + ret = H5Aread(attr, type, read_data4); + CHECK(ret, FAIL, "H5Aread"); + + /* Verify values read in */ + for (i = 0; i < ATTR4_DIM1; i++) + for (j = 0; j < ATTR4_DIM2; j++) + if (HDmemcmp(&attr_data4[i][j], &read_data4[i][j], sizeof(struct attr4_struct)) != 0) { + HDprintf("%d: attribute data different: attr_data4[%d][%d].i=%d, read_data4[%d][%d].i=%d\n", + __LINE__, i, j, attr_data4[i][j].i, i, j, read_data4[i][j].i); + HDprintf("%d: attribute data different: attr_data4[%d][%d].d=%f, read_data4[%d][%d].d=%f\n", + __LINE__, i, j, attr_data4[i][j].d, i, j, read_data4[i][j].d); + TestErrPrintf( + "%d: attribute data different: attr_data4[%d][%d].c=%c, read_data4[%d][%d].c=%c\n", + __LINE__, i, j, attr_data4[i][j].c, i, j, read_data4[i][j].c); + } /* end if */ + + /* Verify Name */ + name_len = H5Aget_name(attr, (size_t)ATTR_NAME_LEN, attr_name); + VERIFY(name_len, HDstrlen(ATTR4_NAME), "H5Aget_name"); + if (HDstrcmp(attr_name, ATTR4_NAME) != 0) + TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, ATTR4_NAME); + + /* Close attribute datatype */ + ret = H5Tclose(type); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_attr_compound_read() */ + +/**************************************************************** +** +** test_attr_scalar_write(): Test scalar H5A (attribute) writing code. +** +****************************************************************/ +static void +test_attr_scalar_write(hid_t fapl) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1, sid2; /* Dataspace ID */ + hid_t attr; /* Attribute ID */ + hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; +#ifndef NO_PREVENT_CREATE_SAME_ATTRIBUTE_TWICE + hid_t ret_id; /* Generic hid_t return value */ +#endif + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Basic Attribute Functions\n")); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, DSET1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Create dataspace for attribute */ + sid2 = H5Screate_simple(ATTR5_RANK, NULL, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Create an attribute for the dataset */ + attr = H5Acreate2(dataset, ATTR5_NAME, H5T_NATIVE_FLOAT, sid2, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#ifndef NO_PREVENT_CREATE_SAME_ATTRIBUTE_TWICE + /* Try to create the same attribute again (should fail) */ + H5E_BEGIN_TRY + { + ret_id = H5Acreate2(dataset, ATTR5_NAME, H5T_NATIVE_FLOAT, sid2, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret_id, FAIL, "H5Acreate2"); +#endif + /* Write attribute information */ + ret = H5Awrite(attr, H5T_NATIVE_FLOAT, &attr_data5); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_attr_scalar_write() */ + +/**************************************************************** +** +** test_attr_scalar_read(): Test scalar H5A (attribute) reading code. +** +****************************************************************/ +static void +test_attr_scalar_read(hid_t fapl) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t attr; /* Attribute ID */ + H5S_class_t stype; /* Dataspace class */ + float rdata = 0.0F; /* Buffer for reading 1st attribute */ + H5O_info2_t oinfo; /* Object info */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Basic Scalar Attribute Reading Functions\n")); + + /* Create file */ + fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid1, DSET1_NAME, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Verify the correct number of attributes */ + ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); + CHECK(ret, FAIL, "H5Oget_info3"); + VERIFY(oinfo.num_attrs, 1, "H5Oget_info3"); + + /* Open an attribute for the dataset */ + attr = H5Aopen(dataset, ATTR5_NAME, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); + + /* Read attribute information */ + ret = H5Aread(attr, H5T_NATIVE_FLOAT, &rdata); + CHECK(ret, FAIL, "H5Aread"); + + /* Verify the floating-poing value in this way to avoid compiler warning. */ + if (!H5_FLT_ABS_EQUAL(rdata, attr_data5)) + HDprintf("*** UNEXPECTED VALUE from %s should be %f, but is %f at line %4d in %s\n", "H5Aread", + (double)attr_data5, (double)rdata, (int)__LINE__, __FILE__); + + /* Get the attribute's dataspace */ + sid = H5Aget_space(attr); + CHECK(sid, FAIL, "H5Aget_space"); + + /* Make certain the dataspace is scalar */ + stype = H5Sget_simple_extent_type(sid); + VERIFY(stype, H5S_SCALAR, "H5Sget_simple_extent_type"); + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_attr_scalar_read() */ + +/**************************************************************** +** +** test_attr_mult_write(): Test basic H5A (attribute) code. +** Tests integer attributes on both datasets and groups +** +****************************************************************/ +static void +test_attr_mult_write(hid_t fapl) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1, sid2; /* Dataspace ID */ + hid_t attr; /* Attribute ID */ + hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; + hsize_t dims2[] = {ATTR1_DIM1}; + hsize_t dims3[] = {ATTR2_DIM1, ATTR2_DIM2}; + hsize_t dims4[] = {ATTR3_DIM1, ATTR3_DIM2, ATTR3_DIM3}; +#ifndef NO_PREVENT_CREATE_SAME_ATTRIBUTE_TWICE + hid_t ret_id; /* Generic hid_t return value */ +#endif + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Multiple Attribute Functions\n")); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, DSET1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Close dataset's dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create dataspace for 1st attribute */ + sid2 = H5Screate_simple(ATTR1_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Create 1st attribute for the dataset */ + attr = H5Acreate2(dataset, ATTR1_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#ifndef NO_PREVENT_CREATE_SAME_ATTRIBUTE_TWICE + /* Try to create the same attribute again (should fail) */ + H5E_BEGIN_TRY + { + ret_id = H5Acreate2(dataset, ATTR1_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret_id, FAIL, "H5Acreate2"); +#endif + /* Write 1st attribute data */ + ret = H5Awrite(attr, H5T_NATIVE_INT, attr_data1); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close 1st attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close attribute's dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create dataspace for 2nd attribute */ + sid2 = H5Screate_simple(ATTR2_RANK, dims3, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Create 2nd attribute for the dataset */ + attr = H5Acreate2(dataset, ATTR2_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#ifndef NO_PREVENT_CREATE_SAME_ATTRIBUTE_TWICE + /* Try to create the same attribute again (should fail) */ + H5E_BEGIN_TRY + { + ret_id = H5Acreate2(dataset, ATTR2_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret_id, FAIL, "H5Acreate2"); +#endif + /* Write 2nd attribute information */ + ret = H5Awrite(attr, H5T_NATIVE_INT, attr_data2); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close 2nd attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close 2nd attribute's dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create dataspace for 3rd attribute */ + sid2 = H5Screate_simple(ATTR3_RANK, dims4, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Create 3rd attribute for the dataset */ + attr = H5Acreate2(dataset, ATTR3_NAME, H5T_NATIVE_DOUBLE, sid2, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#ifndef NO_PREVENT_CREATE_SAME_ATTRIBUTE_TWICE + /* Try to create the same attribute again (should fail) */ + H5E_BEGIN_TRY + { + ret_id = H5Acreate2(dataset, ATTR3_NAME, H5T_NATIVE_DOUBLE, sid2, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret_id, FAIL, "H5Acreate2"); +#endif + /* Write 3rd attribute information */ + ret = H5Awrite(attr, H5T_NATIVE_DOUBLE, attr_data3); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close 3rd attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close 3rd attribute's dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_attr_mult_write() */ + +/**************************************************************** +** +** test_attr_mult_read(): Test basic H5A (attribute) code. +** +****************************************************************/ +static void +test_attr_mult_read(hid_t fapl) +{ + hid_t fid1; /* HDF5 File ID */ + hid_t dataset; /* Dataset ID */ + hid_t space; /* Attribute dataspace */ + hid_t type; /* Attribute datatype */ + hid_t attr; /* Attribute ID */ + char attr_name[ATTR_NAME_LEN]; /* Buffer for attribute names */ + char temp_name[ATTR_NAME_LEN]; /* Buffer for mangling attribute names */ + int rank; /* Attribute rank */ + hsize_t dims[ATTR_MAX_DIMS]; /* Attribute dimensions */ + H5T_class_t t_class; /* Attribute datatype class */ + H5T_order_t order; /* Attribute datatype order */ + size_t size; /* Attribute datatype size as stored in file */ + int read_data1[ATTR1_DIM1] = {0}; /* Buffer for reading 1st attribute */ + int read_data2[ATTR2_DIM1][ATTR2_DIM2] = {{0}}; /* Buffer for reading 2nd attribute */ + double read_data3[ATTR3_DIM1][ATTR3_DIM2][ATTR3_DIM3] = {{{0}}}; /* Buffer for reading 3rd attribute */ + ssize_t name_len; /* Length of attribute name */ + H5O_info2_t oinfo; /* Object info */ + int i, j, k; /* Local index values */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Basic Attribute Functions\n")); + + /* Open file */ + fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid1, DSET1_NAME, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Verify the correct number of attributes */ + ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); + CHECK(ret, FAIL, "H5Oget_info3"); + VERIFY(oinfo.num_attrs, 3, "H5Oget_info3"); + + /* Open 1st attribute for the dataset */ + attr = + H5Aopen_by_idx(dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)0, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen_by_idx"); + + /* Verify Dataspace */ + space = H5Aget_space(attr); + CHECK(space, FAIL, "H5Aget_space"); + rank = H5Sget_simple_extent_ndims(space); + VERIFY(rank, ATTR1_RANK, "H5Sget_simple_extent_ndims"); + ret = H5Sget_simple_extent_dims(space, dims, NULL); + CHECK(ret, FAIL, "H5Sget_simple_extent_dims"); + if (dims[0] != ATTR1_DIM1) + TestErrPrintf("attribute dimensions different: dims[0]=%d, should be %d\n", (int)dims[0], ATTR1_DIM1); + H5Sclose(space); + + /* Verify Datatype */ + type = H5Aget_type(attr); + CHECK(type, FAIL, "H5Aget_type"); + t_class = H5Tget_class(type); + VERIFY(t_class, H5T_INTEGER, "H5Tget_class"); + order = H5Tget_order(type); + VERIFY_TYPE(order, H5Tget_order(H5T_NATIVE_INT), H5T_order_t, "%d", "H5Tget_order"); + size = H5Tget_size(type); + VERIFY(size, H5Tget_size(H5T_NATIVE_INT), "H5Tget_size"); + H5Tclose(type); + + /* Read attribute information */ + ret = H5Aread(attr, H5T_NATIVE_INT, read_data1); + CHECK(ret, FAIL, "H5Aread"); + + /* Verify values read in */ + for (i = 0; i < ATTR1_DIM1; i++) + if (attr_data1[i] != read_data1[i]) + TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d, read_data1[%d]=%d\n", __LINE__, i, + attr_data1[i], i, read_data1[i]); + + /* Verify Name */ + name_len = H5Aget_name(attr, (size_t)ATTR_NAME_LEN, attr_name); + VERIFY(name_len, HDstrlen(ATTR1_NAME), "H5Aget_name"); + if (HDstrcmp(attr_name, ATTR1_NAME) != 0) + TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, ATTR1_NAME); + + /* Verify Name with too small of a buffer */ + name_len = H5Aget_name(attr, HDstrlen(ATTR1_NAME), attr_name); + VERIFY(name_len, HDstrlen(ATTR1_NAME), "H5Aget_name"); + HDstrcpy(temp_name, ATTR1_NAME); /* make a copy of the name */ + temp_name[HDstrlen(ATTR1_NAME) - 1] = '\0'; /* truncate it to match the one retrieved */ + if (HDstrcmp(attr_name, temp_name) != 0) + TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, temp_name); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Open 2nd attribute for the dataset */ + attr = + H5Aopen_by_idx(dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)1, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen_by_idx"); + + /* Verify Dataspace */ + space = H5Aget_space(attr); + CHECK(space, FAIL, "H5Aget_space"); + rank = H5Sget_simple_extent_ndims(space); + VERIFY(rank, ATTR2_RANK, "H5Sget_simple_extent_ndims"); + ret = H5Sget_simple_extent_dims(space, dims, NULL); + CHECK(ret, FAIL, "H5Sget_simple_extent_dims"); + if (dims[0] != ATTR2_DIM1) + TestErrPrintf("attribute dimensions different: dims[0]=%d, should be %d\n", (int)dims[0], ATTR2_DIM1); + if (dims[1] != ATTR2_DIM2) + TestErrPrintf("attribute dimensions different: dims[1]=%d, should be %d\n", (int)dims[1], ATTR2_DIM2); + H5Sclose(space); + + /* Verify Datatype */ + type = H5Aget_type(attr); + CHECK(type, FAIL, "H5Aget_type"); + t_class = H5Tget_class(type); + VERIFY(t_class, H5T_INTEGER, "H5Tget_class"); + order = H5Tget_order(type); + VERIFY_TYPE(order, H5Tget_order(H5T_NATIVE_INT), H5T_order_t, "%d", "H5Tget_order"); + size = H5Tget_size(type); + VERIFY(size, H5Tget_size(H5T_NATIVE_INT), "H5Tget_size"); + H5Tclose(type); + + /* Read attribute information */ + ret = H5Aread(attr, H5T_NATIVE_INT, read_data2); + CHECK(ret, FAIL, "H5Aread"); + + /* Verify values read in */ + for (i = 0; i < ATTR2_DIM1; i++) + for (j = 0; j < ATTR2_DIM2; j++) + if (attr_data2[i][j] != read_data2[i][j]) + TestErrPrintf("%d: attribute data different: attr_data2[%d][%d]=%d, read_data2[%d][%d]=%d\n", + __LINE__, i, j, attr_data2[i][j], i, j, read_data2[i][j]); + + /* Verify Name */ + name_len = H5Aget_name(attr, (size_t)ATTR_NAME_LEN, attr_name); + VERIFY(name_len, HDstrlen(ATTR2_NAME), "H5Aget_name"); + if (HDstrcmp(attr_name, ATTR2_NAME) != 0) + TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, ATTR2_NAME); + + /* Verify Name with too small of a buffer */ + name_len = H5Aget_name(attr, HDstrlen(ATTR2_NAME), attr_name); + VERIFY(name_len, HDstrlen(ATTR2_NAME), "H5Aget_name"); + HDstrcpy(temp_name, ATTR2_NAME); /* make a copy of the name */ + temp_name[HDstrlen(ATTR2_NAME) - 1] = '\0'; /* truncate it to match the one retrieved */ + if (HDstrcmp(attr_name, temp_name) != 0) + TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, temp_name); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Open 2nd attribute for the dataset */ + attr = + H5Aopen_by_idx(dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)2, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen_by_idx"); + + /* Verify Dataspace */ + space = H5Aget_space(attr); + CHECK(space, FAIL, "H5Aget_space"); + rank = H5Sget_simple_extent_ndims(space); + VERIFY(rank, ATTR3_RANK, "H5Sget_simple_extent_ndims"); + ret = H5Sget_simple_extent_dims(space, dims, NULL); + CHECK(ret, FAIL, "H5Sget_simple_extent_dims"); + if (dims[0] != ATTR3_DIM1) + TestErrPrintf("attribute dimensions different: dims[0]=%d, should be %d\n", (int)dims[0], ATTR3_DIM1); + if (dims[1] != ATTR3_DIM2) + TestErrPrintf("attribute dimensions different: dims[1]=%d, should be %d\n", (int)dims[1], ATTR3_DIM2); + if (dims[2] != ATTR3_DIM3) + TestErrPrintf("attribute dimensions different: dims[2]=%d, should be %d\n", (int)dims[2], ATTR3_DIM3); + H5Sclose(space); + + /* Verify Datatype */ + type = H5Aget_type(attr); + CHECK(type, FAIL, "H5Aget_type"); + t_class = H5Tget_class(type); + VERIFY(t_class, H5T_FLOAT, "H5Tget_class"); + order = H5Tget_order(type); + VERIFY_TYPE(order, H5Tget_order(H5T_NATIVE_DOUBLE), H5T_order_t, "%d", "H5Tget_order"); + size = H5Tget_size(type); + VERIFY(size, H5Tget_size(H5T_NATIVE_DOUBLE), "H5Tget_size"); + H5Tclose(type); + + /* Read attribute information */ + ret = H5Aread(attr, H5T_NATIVE_DOUBLE, read_data3); + CHECK(ret, FAIL, "H5Aread"); + + /* Verify values read in */ + for (i = 0; i < ATTR3_DIM1; i++) + for (j = 0; j < ATTR3_DIM2; j++) + for (k = 0; k < ATTR3_DIM3; k++) + if (!H5_DBL_ABS_EQUAL(attr_data3[i][j][k], read_data3[i][j][k])) + TestErrPrintf("%d: attribute data different: attr_data3[%d][%d][%d]=%f, " + "read_data3[%d][%d][%d]=%f\n", + __LINE__, i, j, k, attr_data3[i][j][k], i, j, k, read_data3[i][j][k]); + + /* Verify Name */ + name_len = H5Aget_name(attr, (size_t)ATTR_NAME_LEN, attr_name); + VERIFY(name_len, HDstrlen(ATTR3_NAME), "H5Aget_name"); + if (HDstrcmp(attr_name, ATTR3_NAME) != 0) + TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, ATTR3_NAME); + + /* Verify Name with too small of a buffer */ + name_len = H5Aget_name(attr, HDstrlen(ATTR3_NAME), attr_name); + VERIFY(name_len, HDstrlen(ATTR3_NAME), "H5Aget_name"); + HDstrcpy(temp_name, ATTR3_NAME); /* make a copy of the name */ + temp_name[HDstrlen(ATTR3_NAME) - 1] = '\0'; /* truncate it to match the one retrieved */ + if (HDstrcmp(attr_name, temp_name) != 0) + TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, temp_name); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_attr_mult_read() */ + +/**************************************************************** +** +** attr_op1(): Attribute operator +** +****************************************************************/ +static herr_t +attr_op1(hid_t H5_ATTR_UNUSED loc_id, const char *name, const H5A_info_t H5_ATTR_UNUSED *ainfo, void *op_data) +{ + int *count = (int *)op_data; + herr_t ret = 0; + + switch (*count) { + case 0: + if (HDstrcmp(name, ATTR1_NAME) != 0) + TestErrPrintf("attribute name different: name=%s, should be %s\n", name, ATTR1_NAME); + (*count)++; + break; + + case 1: + if (HDstrcmp(name, ATTR2_NAME) != 0) + TestErrPrintf("attribute name different: name=%s, should be %s\n", name, ATTR2_NAME); + (*count)++; + break; + + case 2: + if (HDstrcmp(name, ATTR3_NAME) != 0) + TestErrPrintf("attribute name different: name=%s, should be %s\n", name, ATTR3_NAME); + (*count)++; + break; + + default: + ret = -1; + break; + } /* end switch() */ + + return (ret); +} /* end attr_op1() */ + +/**************************************************************** +** +** test_attr_iterate(): Test H5A (attribute) iterator code. +** +****************************************************************/ +static void +test_attr_iterate(hid_t fapl) +{ + hid_t file; /* HDF5 File ID */ + hid_t dataset; /* Dataset ID */ + hid_t sid; /* Dataspace ID */ + int count; /* operator data for the iterator */ + H5O_info2_t oinfo; /* Object info */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Basic Attribute Functions\n")); + + /* Open file */ + file = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(file, FAIL, "H5Fopen"); + + /* Create a dataspace */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create a new dataset */ + dataset = H5Dcreate2(file, DSET2_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Verify the correct number of attributes */ + ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); + CHECK(ret, FAIL, "H5Oget_info3"); + VERIFY(oinfo.num_attrs, 0, "H5Oget_info3"); + + /* Iterate over attributes on dataset */ + count = 0; + ret = H5Aiterate2(dataset, H5_INDEX_NAME, H5_ITER_INC, NULL, attr_op1, &count); + VERIFY(ret, 0, "H5Aiterate2"); + + /* Close dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Open existing dataset w/attributes */ + dataset = H5Dopen2(file, DSET1_NAME, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Verify the correct number of attributes */ + ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); + CHECK(ret, FAIL, "H5Oget_info3"); + VERIFY(oinfo.num_attrs, 3, "H5Oget_info3"); + + /* Iterate over attributes on dataset */ + count = 0; + ret = H5Aiterate2(dataset, H5_INDEX_NAME, H5_ITER_INC, NULL, attr_op1, &count); + VERIFY(ret, 0, "H5Aiterate2"); + + /* Close dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_attr_iterate() */ + +/**************************************************************** +** +** test_attr_delete(): Test H5A (attribute) code for deleting objects. +** +****************************************************************/ +static void +test_attr_delete(hid_t fapl) +{ + hid_t fid1; /* HDF5 File ID */ + hid_t dataset; /* Dataset ID */ + hid_t attr; /* Attribute ID */ + char attr_name[ATTR_NAME_LEN]; /* Buffer for attribute names */ + ssize_t name_len; /* Length of attribute name */ + H5O_info2_t oinfo; /* Object info */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Basic Attribute Deletion Functions\n")); + + /* Open file */ + fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid1, DSET1_NAME, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Verify the correct number of attributes */ + ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); + CHECK(ret, FAIL, "H5Oget_info3"); + VERIFY(oinfo.num_attrs, 3, "H5Oget_info3"); +#ifndef NO_DELETE_NONEXISTENT_ATTRIBUTE + /* Try to delete bogus attribute */ + H5E_BEGIN_TRY + { + ret = H5Adelete(dataset, "Bogus"); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Adelete"); +#endif + /* Verify the correct number of attributes */ + ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); + CHECK(ret, FAIL, "H5Oget_info3"); + VERIFY(oinfo.num_attrs, 3, "H5Oget_info3"); + + /* Delete middle (2nd) attribute */ + ret = H5Adelete(dataset, ATTR2_NAME); + CHECK(ret, FAIL, "H5Adelete"); + + /* Verify the correct number of attributes */ + ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); + CHECK(ret, FAIL, "H5Oget_info3"); + VERIFY(oinfo.num_attrs, 2, "H5Oget_info3"); + + /* Open 1st attribute for the dataset */ + attr = + H5Aopen_by_idx(dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)0, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen_by_idx"); + + /* Verify Name */ + name_len = H5Aget_name(attr, (size_t)ATTR_NAME_LEN, attr_name); + VERIFY(name_len, HDstrlen(ATTR1_NAME), "H5Aget_name"); + if (HDstrcmp(attr_name, ATTR1_NAME) != 0) + TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, ATTR1_NAME); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Open last (formally 3rd) attribute for the dataset */ + attr = + H5Aopen_by_idx(dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)1, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen_by_idx"); + + /* Verify Name */ + name_len = H5Aget_name(attr, (size_t)ATTR_NAME_LEN, attr_name); + VERIFY(name_len, HDstrlen(ATTR3_NAME), "H5Aget_name"); + if (HDstrcmp(attr_name, ATTR3_NAME) != 0) + TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, ATTR3_NAME); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Delete first attribute */ + ret = H5Adelete(dataset, ATTR1_NAME); + CHECK(ret, FAIL, "H5Adelete"); + + /* Verify the correct number of attributes */ + ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); + CHECK(ret, FAIL, "H5Oget_info3"); + VERIFY(oinfo.num_attrs, 1, "H5Oget_info3"); + + /* Open last (formally 3rd) attribute for the dataset */ + attr = + H5Aopen_by_idx(dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)0, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen_by_idx"); + + /* Verify Name */ + name_len = H5Aget_name(attr, (size_t)ATTR_NAME_LEN, attr_name); + VERIFY(name_len, HDstrlen(ATTR3_NAME), "H5Aget_name"); + if (HDstrcmp(attr_name, ATTR3_NAME) != 0) + TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, ATTR3_NAME); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Delete first attribute */ + ret = H5Adelete(dataset, ATTR3_NAME); + CHECK(ret, FAIL, "H5Adelete"); + + /* Verify the correct number of attributes */ + ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); + CHECK(ret, FAIL, "H5Oget_info3"); + VERIFY(oinfo.num_attrs, 0, "H5Oget_info3"); + + /* Close dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_attr_delete() */ + +/**************************************************************** +** +** test_attr_dtype_shared(): Test H5A (attribute) code for using +** shared datatypes in attributes. +** +****************************************************************/ +static void +test_attr_dtype_shared(hid_t fapl) +{ +#ifndef NO_SHARED_DATATYPES + hid_t file_id; /* File ID */ + hid_t dset_id; /* Dataset ID */ + hid_t space_id; /* Dataspace ID for dataset & attribute */ + hid_t type_id; /* Datatype ID for named datatype */ + hid_t attr_id; /* Attribute ID */ + int data = 8; /* Data to write */ + int rdata = 0; /* Read read in */ + H5O_info2_t oinfo; /* Object's information */ +#if 0 + h5_stat_size_t empty_filesize; /* Size of empty file */ + h5_stat_size_t filesize; /* Size of file after modifications */ +#endif + herr_t ret; /* Generic return value */ +#else + (void)fapl; +#endif + /* Output message about test being performed */ + MESSAGE(5, ("Testing Shared Datatypes with Attributes - SKIPPED for now due to no support for shared " + "datatypes\n")); +#ifndef NO_SHARED_DATATYPES + /* Create a file */ + file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(file_id, FAIL, "H5Fopen"); + + /* Close file */ + ret = H5Fclose(file_id); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + /* Get size of file */ + empty_filesize = h5_get_file_size(FILENAME, fapl); + if (empty_filesize < 0) + TestErrPrintf("Line %d: file size wrong!\n", __LINE__); +#endif + + /* Re-open file */ + file_id = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(file_id, FAIL, "H5Fopen"); + + /* Create a datatype to commit and use */ + type_id = H5Tcopy(H5T_NATIVE_INT); + CHECK(type_id, FAIL, "H5Tcopy"); + + /* Commit datatype to file */ + ret = H5Tcommit2(file_id, TYPE1_NAME, type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + /* Check reference count on named datatype */ + ret = H5Oget_info_by_name3(file_id, TYPE1_NAME, &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + VERIFY(oinfo.rc, 1, "H5Oget_info_by_name3"); + + /* Create dataspace for dataset */ + space_id = H5Screate(H5S_SCALAR); + CHECK(space_id, FAIL, "H5Screate"); + + /* Create dataset */ + dset_id = H5Dcreate2(file_id, DSET1_NAME, type_id, space_id, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); + CHECK(dset_id, FAIL, "H5Dcreate2"); + + /* Check reference count on named datatype */ + ret = H5Oget_info_by_name3(file_id, TYPE1_NAME, &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + VERIFY(oinfo.rc, 2, "H5Oget_info_by_name3"); + + /* Create attribute on dataset */ + attr_id = H5Acreate2(dset_id, ATTR1_NAME, type_id, space_id, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr_id, FAIL, "H5Acreate2"); + + /* Check reference count on named datatype */ + ret = H5Oget_info_by_name3(file_id, TYPE1_NAME, &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + VERIFY(oinfo.rc, 3, "H5Oget_info_by_name3"); + + /* Close attribute */ + ret = H5Aclose(attr_id); + CHECK(ret, FAIL, "H5Aclose"); + + /* Delete attribute */ + ret = H5Adelete(dset_id, ATTR1_NAME); + CHECK(ret, FAIL, "H5Adelete"); + + /* Check reference count on named datatype */ + ret = H5Oget_info_by_name3(file_id, TYPE1_NAME, &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + VERIFY(oinfo.rc, 2, "H5Oget_info_by_name3"); + + /* Create attribute on dataset */ + attr_id = H5Acreate2(dset_id, ATTR1_NAME, type_id, space_id, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr_id, FAIL, "H5Acreate2"); + + /* Check reference count on named datatype */ + ret = H5Oget_info_by_name3(file_id, TYPE1_NAME, &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + VERIFY(oinfo.rc, 3, "H5Oget_info_by_name3"); + + /* Write data into the attribute */ + ret = H5Awrite(attr_id, H5T_NATIVE_INT, &data); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr_id); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close dataset */ + ret = H5Dclose(dset_id); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close dataspace */ + ret = H5Sclose(space_id); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close datatype */ + ret = H5Tclose(type_id); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close file */ + ret = H5Fclose(file_id); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file */ + file_id = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(file_id, FAIL, "H5Fopen"); + + /* Open dataset */ + dset_id = H5Dopen2(file_id, DSET1_NAME, H5P_DEFAULT); + CHECK(dset_id, FAIL, "H5Dopen2"); + + /* Open attribute */ + attr_id = H5Aopen(dset_id, ATTR1_NAME, H5P_DEFAULT); + CHECK(attr_id, FAIL, "H5Aopen"); + + /* Read data from the attribute */ + ret = H5Aread(attr_id, H5T_NATIVE_INT, &rdata); + CHECK(ret, FAIL, "H5Aread"); + VERIFY(data, rdata, "H5Aread"); + + /* Close attribute */ + ret = H5Aclose(attr_id); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close dataset */ + ret = H5Dclose(dset_id); + CHECK(ret, FAIL, "H5Dclose"); + + /* Check reference count on named datatype */ + ret = H5Oget_info_by_name3(file_id, TYPE1_NAME, &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + VERIFY(oinfo.rc, 3, "H5Oget_info_by_name3"); + + /* Unlink the dataset */ + ret = H5Ldelete(file_id, DSET1_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Check reference count on named datatype */ + ret = H5Oget_info_by_name3(file_id, TYPE1_NAME, &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + VERIFY(oinfo.rc, 1, "H5Oget_info_by_name3"); + + /* Unlink the named datatype */ + ret = H5Ldelete(file_id, TYPE1_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Close file */ + ret = H5Fclose(file_id); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + /* Check size of file */ + filesize = h5_get_file_size(FILENAME, fapl); + VERIFY(filesize, empty_filesize, "h5_get_file_size"); +#endif +#endif +} /* test_attr_dtype_shared() */ + +/**************************************************************** +** +** test_attr_duplicate_ids(): Test operations with more than +** one ID handles. +** +****************************************************************/ +static void +test_attr_duplicate_ids(hid_t fapl) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t gid1, gid2; /* Group ID */ + hid_t sid1, sid2; /* Dataspace ID */ + hid_t attr, attr2; /* Attribute ID */ + hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; + hsize_t dims2[] = {ATTR1_DIM1}; + int read_data1[ATTR1_DIM1] = {0}; /* Buffer for reading 1st attribute */ + int rewrite_data[ATTR1_DIM1] = {1234, -423, 9907256}; /* Test data for rewrite */ + int i; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing operations with two ID handles\n")); + + /*----------------------------------------------------------------------------------- + * Create an attribute in a new file and fill it with fill value. + */ + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, DSET1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Create dataspace for attribute */ + sid2 = H5Screate_simple(ATTR1_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Try to create an attribute on the dataset */ + attr = H5Acreate2(dataset, ATTR1_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Open the attribute just created and get a second ID */ + attr2 = H5Aopen(dataset, ATTR1_NAME, H5P_DEFAULT); + CHECK(attr2, FAIL, "H5Aopen"); + + /* Close attribute */ + ret = H5Aclose(attr2); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /*----------------------------------------------------------------------------------- + * Reopen the file and verify the fill value for attribute. Also write + * some real data. + */ + + /* Open file */ + fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid1, DSET1_NAME, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Open first attribute for the dataset */ + attr = H5Aopen(dataset, ATTR1_NAME, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); + + /* Read attribute with fill value */ + ret = H5Aread(attr, H5T_NATIVE_INT, read_data1); + CHECK(ret, FAIL, "H5Aread"); + + /* Verify values read in */ + for (i = 0; i < ATTR1_DIM1; i++) + if (0 != read_data1[i]) + TestErrPrintf("%d: attribute data different: read_data1[%d]=%d\n", __LINE__, i, read_data1[i]); + + /* Open attribute for the second time */ + attr2 = H5Aopen(dataset, ATTR1_NAME, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); + + /* Write attribute information */ + ret = H5Awrite(attr2, H5T_NATIVE_INT, attr_data1); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr2); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /*----------------------------------------------------------------------------------- + * Reopen the file and verify the data. Also rewrite the data and verify it. + */ + + /* Open file */ + fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid1, DSET1_NAME, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Open first attribute for the dataset */ + attr = H5Aopen(dataset, ATTR1_NAME, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); + + /* Read attribute information */ + ret = H5Aread(attr, H5T_NATIVE_INT, read_data1); + CHECK(ret, FAIL, "H5Aread"); + + /* Verify values read in */ + for (i = 0; i < ATTR1_DIM1; i++) + if (attr_data1[i] != read_data1[i]) + TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d, read_data1[%d]=%d\n", __LINE__, i, + attr_data1[i], i, read_data1[i]); + + /* Open attribute for the second time */ + attr2 = H5Aopen(dataset, ATTR1_NAME, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); + + /* Write attribute information */ + ret = H5Awrite(attr2, H5T_NATIVE_INT, rewrite_data); + CHECK(ret, FAIL, "H5Awrite"); + + /* Read attribute information */ + ret = H5Aread(attr, H5T_NATIVE_INT, read_data1); + CHECK(ret, FAIL, "H5Aread"); + + /* Verify values read in */ + for (i = 0; i < ATTR1_DIM1; i++) + if (read_data1[i] != rewrite_data[i]) + TestErrPrintf("%d: attribute data different: read_data1[%d]=%d, rewrite_data[%d]=%d\n", __LINE__, + i, read_data1[i], i, rewrite_data[i]); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + ret = H5Aclose(attr2); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /*----------------------------------------------------------------------------------- + * Verify that the attribute being pointed to by different paths shares + * the same data. + */ + /* Open file */ + fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Create a group */ + gid1 = H5Gcreate2(fid1, GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid1, FAIL, "H5Gcreate2"); + + /* Create hard link to the first group */ + ret = H5Lcreate_hard(gid1, GROUP1_NAME, H5L_SAME_LOC, GROUP2_NAME, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lcreate_hard"); + + /* Try to create an attribute on the group */ + attr = H5Acreate2(gid1, ATTR2_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Open the hard link just created */ + gid2 = H5Gopen2(fid1, GROUP2_NAME, H5P_DEFAULT); + CHECK(gid2, FAIL, "H5Gopen2"); + + /* Open the attribute of the group for the second time */ + attr2 = H5Aopen(gid2, ATTR2_NAME, H5P_DEFAULT); + CHECK(attr2, FAIL, "H5Aopen"); + + /* Write attribute information with the first attribute handle */ + ret = H5Awrite(attr, H5T_NATIVE_INT, attr_data1); + CHECK(ret, FAIL, "H5Awrite"); + + /* Read attribute information with the second attribute handle */ + ret = H5Aread(attr2, H5T_NATIVE_INT, read_data1); + CHECK(ret, FAIL, "H5Aread"); + + /* Verify values read in */ + for (i = 0; i < ATTR1_DIM1; i++) + if (attr_data1[i] != read_data1[i]) + TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d, read_data1[%d]=%d\n", __LINE__, i, + attr_data1[i], i, read_data1[i]); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + ret = H5Aclose(attr2); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close group */ + ret = H5Gclose(gid1); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Gclose(gid2); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close Attribute dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_attr_duplicate_ids() */ + +/**************************************************************** +** +** test_attr_dense_verify(): Test basic H5A (attribute) code. +** Verify attributes on object +** +****************************************************************/ +static int +test_attr_dense_verify(hid_t loc_id, unsigned max_attr) +{ + char attrname[NAME_BUF_SIZE]; /* Name of attribute */ + hid_t attr; /* Attribute ID */ + unsigned value; /* Attribute value */ + unsigned u; /* Local index variable */ + int old_nerrs; /* Number of errors when entering this check */ + herr_t ret; /* Generic return value */ + + /* Retrieve the current # of reported errors */ + old_nerrs = nerrors; + + /* Re-open all the attributes by name and verify the data */ + for (u = 0; u < max_attr; u++) { + /* Open attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Aopen(loc_id, attrname, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); + + /* Read data from the attribute */ + ret = H5Aread(attr, H5T_NATIVE_UINT, &value); + CHECK(ret, FAIL, "H5Aread"); + VERIFY(value, u, "H5Aread"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + } /* end for */ + + /* Re-open all the attributes by index and verify the data */ + for (u = 0; u < max_attr; u++) { + ssize_t name_len; /* Length of attribute name */ + char check_name[ATTR_NAME_LEN]; /* Buffer for checking attribute names */ + + /* Open attribute */ + attr = H5Aopen_by_idx(loc_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)u, H5P_DEFAULT, + H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen_by_idx"); + + /* Verify Name */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + name_len = H5Aget_name(attr, (size_t)ATTR_NAME_LEN, check_name); + VERIFY(name_len, HDstrlen(attrname), "H5Aget_name"); + if (HDstrcmp(check_name, attrname) != 0) + TestErrPrintf("attribute name different: attrname = '%s', should be '%s'\n", check_name, + attrname); + + /* Read data from the attribute */ + ret = H5Aread(attr, H5T_NATIVE_UINT, &value); + CHECK(ret, FAIL, "H5Aread"); + VERIFY(value, u, "H5Aread"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + } /* end for */ + + /* Retrieve current # of errors */ + if (old_nerrs == nerrors) + return (0); + else + return (-1); +} /* test_attr_dense_verify() */ + +/**************************************************************** +** +** test_attr_dense_create(): Test basic H5A (attribute) code. +** Tests "dense" attribute storage creation +** +****************************************************************/ +static void +test_attr_dense_create(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* HDF5 File ID */ + hid_t dataset; /* Dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t attr; /* Attribute ID */ + hid_t dcpl; /* Dataset creation property list ID */ + char attrname[NAME_BUF_SIZE]; /* Name of attribute */ + unsigned max_compact; /* Maximum # of attributes to store compactly */ + unsigned min_dense; /* Minimum # of attributes to store "densely" */ +#if 0 + htri_t is_dense; /* Are attributes stored densely? */ +#endif + unsigned u; /* Local index variable */ +#if 0 + h5_stat_size_t empty_filesize; /* Size of empty file */ + h5_stat_size_t filesize; /* Size of file after modifications */ +#endif + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Dense Attribute Storage Creation\n")); + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + /* Get size of file */ + empty_filesize = h5_get_file_size(FILENAME, fapl); + if (empty_filesize < 0) + TestErrPrintf("Line %d: file size wrong!\n", __LINE__); +#endif + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Create dataspace for dataset */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* need DCPL to query the group creation properties */ + if (dcpl_g == H5P_DEFAULT) { + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + } + else { + dcpl = H5Pcopy(dcpl_g); + CHECK(dcpl, FAIL, "H5Pcopy"); + } + + /* Create a dataset */ + dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Retrieve limits for compact/dense attribute storage */ + ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + + /* Close property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Add attributes, until just before converting to dense storage */ + for (u = 0; u < max_compact; u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + } /* end for */ +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Add one more attribute, to push into "dense" storage */ + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); +#endif + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); +#ifndef NO_PREVENT_CREATE_SAME_ATTRIBUTE_TWICE + /* Attempt to add attribute again, which should fail */ + H5E_BEGIN_TRY + { + attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(attr, FAIL, "H5Acreate2"); +#endif + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Unlink dataset with attributes */ + ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + if (h5_using_default_driver(NULL)) { + /* Check size of file */ + filesize = h5_get_file_size(FILENAME, fapl); + VERIFY(filesize, empty_filesize, "h5_get_file_size"); + } +#endif +} /* test_attr_dense_create() */ + +/**************************************************************** +** +** test_attr_dense_open(): Test basic H5A (attribute) code. +** Tests opening attributes in "dense" storage +** +****************************************************************/ +static void +test_attr_dense_open(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* HDF5 File ID */ + hid_t dataset; /* Dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t attr; /* Attribute ID */ + hid_t dcpl; /* Dataset creation property list ID */ + char attrname[NAME_BUF_SIZE]; /* Name of attribute */ + unsigned max_compact; /* Maximum # of attributes to store compactly */ + unsigned min_dense; /* Minimum # of attributes to store "densely" */ +#if 0 + htri_t is_dense; /* Are attributes stored densely? */ +#endif + unsigned u; /* Local index variable */ +#if 0 + h5_stat_size_t empty_filesize; /* Size of empty file */ + h5_stat_size_t filesize; /* Size of file after modifications */ +#endif + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Opening Attributes in Dense Storage\n")); + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + /* Get size of file */ + empty_filesize = h5_get_file_size(FILENAME, fapl); + if (empty_filesize < 0) + TestErrPrintf("Line %d: file size wrong!\n", __LINE__); +#endif + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Create dataspace for dataset */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* need DCPL to query the group creation properties */ + if (dcpl_g == H5P_DEFAULT) { + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + } + else { + dcpl = H5Pcopy(dcpl_g); + CHECK(dcpl, FAIL, "H5Pcopy"); + } + + /* Enable creation order tracking on attributes, so creation order tests work */ + ret = H5Pset_attr_creation_order(dcpl, H5P_CRT_ORDER_TRACKED); + CHECK(ret, FAIL, "H5Pset_attr_creation_order"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Retrieve limits for compact/dense attribute storage */ + ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + + /* Close property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Add attributes, until just before converting to dense storage */ + for (u = 0; u < max_compact; u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Verify attributes written so far */ + ret = test_attr_dense_verify(dataset, u); + CHECK(ret, FAIL, "test_attr_dense_verify"); + } /* end for */ +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Add one more attribute, to push into "dense" storage */ + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); +#endif + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Verify all the attributes written */ + ret = test_attr_dense_verify(dataset, (u + 1)); + CHECK(ret, FAIL, "test_attr_dense_verify"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Unlink dataset with attributes */ + ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + if (h5_using_default_driver(NULL)) { + /* Check size of file */ + filesize = h5_get_file_size(FILENAME, fapl); + VERIFY(filesize, empty_filesize, "h5_get_file_size"); + } +#endif +} /* test_attr_dense_open() */ + +/**************************************************************** +** +** test_attr_dense_delete(): Test basic H5A (attribute) code. +** Tests deleting attributes in "dense" storage +** +****************************************************************/ +static void +test_attr_dense_delete(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* HDF5 File ID */ + hid_t dataset; /* Dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t attr; /* Attribute ID */ + hid_t dcpl; /* Dataset creation property list ID */ + char attrname[NAME_BUF_SIZE]; /* Name of attribute */ + unsigned max_compact; /* Maximum # of attributes to store compactly */ + unsigned min_dense; /* Minimum # of attributes to store "densely" */ +#if 0 + htri_t is_dense; /* Are attributes stored densely? */ +#endif + unsigned u; /* Local index variable */ +#if 0 + h5_stat_size_t empty_filesize; /* Size of empty file */ + h5_stat_size_t filesize; /* Size of file after modifications */ +#endif + H5O_info2_t oinfo; /* Object info */ + int use_min_dset_oh = (dcpl_g != H5P_DEFAULT); + herr_t ret; /* Generic return value */ + + /* Only run this test for sec2/default driver */ + if (!h5_using_default_driver(NULL)) + return; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Deleting Attributes in Dense Storage\n")); + + if (use_min_dset_oh) { /* using minimized dataset headers */ + /* modify fcpl... + * sidestep "bug" where file space is lost with minimized dset ohdrs + */ + fcpl = H5Pcopy(fcpl); + CHECK(fcpl, FAIL, "H5Pcopy"); + ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, TRUE, 1); + CHECK(ret, FAIL, "H5Pset_file_space_strategy"); + } + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + if (use_min_dset_oh) + CHECK(H5Pclose(fcpl), FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + /* Get size of file */ + empty_filesize = h5_get_file_size(FILENAME, fapl); + if (empty_filesize < 0) + TestErrPrintf("Line %d: file size wrong!\n", __LINE__); +#endif + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Create dataspace for dataset */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* need DCPL to query the group creation properties */ + if (use_min_dset_oh) { + dcpl = H5Pcopy(dcpl_g); + CHECK(dcpl, FAIL, "H5Pcopy"); + } + else { + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + } + + /* Enable creation order tracking on attributes, so creation order tests work */ + ret = H5Pset_attr_creation_order(dcpl, H5P_CRT_ORDER_TRACKED); + CHECK(ret, FAIL, "H5Pset_attr_creation_order"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Retrieve limits for compact/dense attribute storage */ + ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + + /* Close property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Add attributes, until well into dense storage */ + for (u = 0; u < (max_compact * 2); u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Check # of attributes */ + ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); + CHECK(ret, FAIL, "H5Oget_info3"); + VERIFY(oinfo.num_attrs, (u + 1), "H5Oget_info3"); + } /* end for */ +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); +#endif + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Open dataset */ + dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Delete attributes until the attributes revert to compact storage again */ + for (u--; u >= min_dense; u--) { + /* Delete attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + ret = H5Adelete(dataset, attrname); + CHECK(ret, FAIL, "H5Adelete"); + + /* Verify attributes still left */ + ret = test_attr_dense_verify(dataset, u); + CHECK(ret, FAIL, "test_attr_dense_verify"); + } /* end for */ +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); +#endif + /* Delete one more attribute, which should cause reversion to compact storage */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + ret = H5Adelete(dataset, attrname); + CHECK(ret, FAIL, "H5Adelete"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Verify attributes still left */ + ret = test_attr_dense_verify(dataset, (u - 1)); + CHECK(ret, FAIL, "test_attr_dense_verify"); + + /* Delete another attribute, to verify deletion in compact storage */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", (u - 1)); + ret = H5Adelete(dataset, attrname); + CHECK(ret, FAIL, "H5Adelete"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Verify attributes still left */ + ret = test_attr_dense_verify(dataset, (u - 2)); + CHECK(ret, FAIL, "test_attr_dense_verify"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Unlink dataset with attributes */ + ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + if (h5_using_default_driver(NULL)) { + /* Check size of file */ + filesize = h5_get_file_size(FILENAME, fapl); + VERIFY(filesize, empty_filesize, "h5_get_file_size"); + } +#endif +} /* test_attr_dense_delete() */ + +/**************************************************************** +** +** test_attr_dense_rename(): Test basic H5A (attribute) code. +** Tests renaming attributes in "dense" storage +** +****************************************************************/ +static void +test_attr_dense_rename(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* HDF5 File ID */ + hid_t dataset; /* Dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t attr; /* Attribute ID */ + hid_t dcpl; /* Dataset creation property list ID */ + char attrname[NAME_BUF_SIZE]; /* Name of attribute */ + char new_attrname[NAME_BUF_SIZE]; /* New name of attribute */ + unsigned max_compact; /* Maximum # of attributes to store compactly */ + unsigned min_dense; /* Minimum # of attributes to store "densely" */ +#if 0 + htri_t is_dense; /* Are attributes stored densely? */ + h5_stat_size_t empty_filesize; /* Size of empty file */ + h5_stat_size_t filesize; /* Size of file after modifications */ +#endif + H5O_info2_t oinfo; /* Object info */ + unsigned u; /* Local index variable */ + int use_min_dset_oh = (dcpl_g != H5P_DEFAULT); + unsigned use_corder; /* Track creation order or not */ + herr_t ret; /* Generic return value */ + + /* Only run this test for sec2/default driver */ + if (!h5_using_default_driver(NULL)) + return; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Renaming Attributes in Dense Storage\n")); + + if (use_min_dset_oh) { /* using minimized dataset headers */ + /* modify fcpl... + * sidestep "bug" where file space is lost with minimized dset ohdrs + */ + fcpl = H5Pcopy(fcpl); + CHECK(fcpl, FAIL, "H5Pcopy"); + ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, TRUE, 1); + CHECK(ret, FAIL, "H5Pset_file_space_strategy"); + } + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); + if (use_min_dset_oh) + CHECK(H5Pclose(fcpl), FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + /* Get size of file */ + empty_filesize = h5_get_file_size(FILENAME, fapl); + if (empty_filesize < 0) + TestErrPrintf("Line %d: file size wrong!\n", __LINE__); +#endif + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, H5I_INVALID_HID, "H5Fopen"); + + /* Create dataspace for dataset */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, H5I_INVALID_HID, "H5Screate"); + + /* need DCPL to query the group creation properties */ + if (use_min_dset_oh) { + dcpl = H5Pcopy(dcpl_g); + CHECK(dcpl, H5I_INVALID_HID, "H5Pcopy"); + } + else { + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, H5I_INVALID_HID, "H5Pcreate"); + } + + /* Retrieve limits for compact/dense attribute storage */ + ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + + /* Using creation order or not */ + for (use_corder = FALSE; use_corder <= TRUE; use_corder++) { + + if (use_corder) { + ret = H5Pset_attr_creation_order(dcpl, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED); + CHECK(ret, FAIL, "H5Pset_attr_creation_order"); + } + + /* Create a dataset */ + dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Add attributes, until well into dense storage */ + for (u = 0; u < (max_compact * 2); u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, H5I_INVALID_HID, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Rename attribute */ + HDsnprintf(new_attrname, sizeof(new_attrname), "new attr %02u", u); + + /* Rename attribute */ + ret = H5Arename_by_name(fid, DSET1_NAME, attrname, new_attrname, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Arename_by_name"); + + /* Check # of attributes */ + ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); + CHECK(ret, FAIL, "H5Oget_info3"); + VERIFY(oinfo.num_attrs, (u + 1), "H5Oget_info3"); + } /* end for */ +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); +#endif + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + if (!use_corder) { + /* Unlink dataset with attributes */ + ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + } + + } /* end for use_corder */ + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, H5I_INVALID_HID, "H5Fopen"); + + /* Open dataset */ + dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dopen2"); + + /* Verify renamed attributes */ + for (u = 0; u < (max_compact * 2); u++) { + unsigned value; /* Attribute value */ + + /* Open attribute */ + HDsnprintf(attrname, sizeof(attrname), "new attr %02u", u); + attr = H5Aopen(dataset, attrname, H5P_DEFAULT); + CHECK(attr, H5I_INVALID_HID, "H5Aopen"); + + /* Read data from the attribute */ + ret = H5Aread(attr, H5T_NATIVE_UINT, &value); + CHECK(ret, FAIL, "H5Aread"); + VERIFY(value, u, "H5Aread"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + } /* end for */ + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Unlink dataset with attributes */ + ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + if (h5_using_default_driver(NULL)) { + /* Check size of file */ + filesize = h5_get_file_size(FILENAME, fapl); + VERIFY(filesize, empty_filesize, "h5_get_file_size"); + } +#endif +} /* test_attr_dense_rename() */ + +/**************************************************************** +** +** test_attr_dense_unlink(): Test basic H5A (attribute) code. +** Tests unlinking object with attributes in "dense" storage +** +****************************************************************/ +static void +test_attr_dense_unlink(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* HDF5 File ID */ + hid_t dataset; /* Dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t attr; /* Attribute ID */ + hid_t dcpl; /* Dataset creation property list ID */ + char attrname[NAME_BUF_SIZE]; /* Name of attribute */ + unsigned max_compact; /* Maximum # of attributes to store compactly */ + unsigned min_dense; /* Minimum # of attributes to store "densely" */ +#if 0 + htri_t is_dense; /* Are attributes stored densely? */ + size_t mesg_count; /* # of shared messages */ + h5_stat_size_t empty_filesize; /* Size of empty file */ + h5_stat_size_t filesize; /* Size of file after modifications */ +#endif + H5O_info2_t oinfo; /* Object info */ + unsigned u; /* Local index variable */ + int use_min_dset_oh = (dcpl_g != H5P_DEFAULT); + herr_t ret; /* Generic return value */ + + /* Only run this test for sec2/default driver */ + if (!h5_using_default_driver(NULL)) + return; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Unlinking Object with Attributes in Dense Storage\n")); + + if (use_min_dset_oh) { /* using minimized dataset headers */ + /* modify fcpl... + * sidestep "bug" where file space is lost with minimized dset ohdrs + */ + fcpl = H5Pcopy(fcpl); + CHECK(fcpl, FAIL, "H5Pcopy"); + ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, TRUE, 1); + CHECK(ret, FAIL, "H5Pset_file_space_strategy"); + } + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + if (use_min_dset_oh) + CHECK(H5Pclose(fcpl), FAIL, "H5Pclose"); + + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + empty_filesize = h5_get_file_size(FILENAME, fapl); + if (empty_filesize < 0) + TestErrPrintf("Line %d: file size wrong!\n", __LINE__); +#endif + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Create dataspace for dataset */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* need DCPL to query the group creation properties */ + if (use_min_dset_oh) { + dcpl = H5Pcopy(dcpl_g); + CHECK(dcpl, FAIL, "H5Pcopy"); + } + else { + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + } + + /* Create a dataset */ + dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Retrieve limits for compact/dense attribute storage */ + ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + + /* Close property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Add attributes, until well into dense storage */ + for (u = 0; u < (max_compact * 2); u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Check # of attributes */ + ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); + CHECK(ret, FAIL, "H5Oget_info3"); + VERIFY(oinfo.num_attrs, (u + 1), "H5Oget_info3"); + } /* end for */ +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); +#endif + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Unlink dataset */ + ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); +#if 0 + /* Check on dataset's attribute storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_ATTR_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); +#endif + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + if (h5_using_default_driver(NULL)) { + /* Check size of file */ + filesize = h5_get_file_size(FILENAME, fapl); + VERIFY(filesize, empty_filesize, "h5_get_file_size"); + } +#endif +} /* test_attr_dense_unlink() */ + +/**************************************************************** +** +** test_attr_dense_limits(): Test basic H5A (attribute) code. +** Tests attribute in "dense" storage limits +** +****************************************************************/ +static void +test_attr_dense_limits(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* HDF5 File ID */ + hid_t dataset; /* Dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t attr; /* Attribute ID */ + hid_t dcpl; /* Dataset creation property list ID */ + char attrname[NAME_BUF_SIZE]; /* Name of attribute */ + unsigned max_compact, rmax_compact; /* Maximum # of attributes to store compactly */ + unsigned min_dense, rmin_dense; /* Minimum # of attributes to store "densely" */ +#if 0 + htri_t is_dense; /* Are attributes stored densely? */ +#endif + unsigned u; /* Local index variable */ +#if 0 + h5_stat_size_t empty_filesize; /* Size of empty file */ + h5_stat_size_t filesize; /* Size of file after modifications */ +#endif + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Phase Change Limits For Attributes in Dense Storage\n")); + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + /* Get size of file */ + empty_filesize = h5_get_file_size(FILENAME, fapl); + if (empty_filesize < 0) + TestErrPrintf("Line %d: file size wrong!\n", __LINE__); +#endif + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Create dataspace for dataset */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* need DCPL to query the group creation properties */ + if (dcpl_g == H5P_DEFAULT) { + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + } + else { + dcpl = H5Pcopy(dcpl_g); + CHECK(dcpl, FAIL, "H5Pcopy"); + } + + /* Change limits on compact/dense attribute storage */ + max_compact = 0; + min_dense = 0; + ret = H5Pset_attr_phase_change(dcpl, max_compact, min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Retrieve limits for compact/dense attribute storage */ + ret = H5Pget_attr_phase_change(dcpl, &rmax_compact, &rmin_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + VERIFY(rmax_compact, max_compact, "H5Pget_attr_phase_change"); + VERIFY(rmin_dense, min_dense, "H5Pget_attr_phase_change"); + + /* Close property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + + /* Add first attribute, which should be immediately in dense storage */ + + /* Create attribute */ + u = 0; + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); +#endif + + /* Add second attribute, to allow deletions to be checked easily */ + + /* Create attribute */ + u = 1; + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); +#endif + + /* Delete second attribute, attributes should still be stored densely */ + + /* Delete attribute */ + ret = H5Adelete(dataset, attrname); + CHECK(ret, FAIL, "H5Adelete"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); +#endif + + /* Delete first attribute, attributes should not be stored densely */ + + /* Delete attribute */ + u = 0; + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + ret = H5Adelete(dataset, attrname); + CHECK(ret, FAIL, "H5Adelete"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Unlink dataset */ + ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + if (h5_using_default_driver(NULL)) { + /* Check size of file */ + filesize = h5_get_file_size(FILENAME, fapl); + VERIFY(filesize, empty_filesize, "h5_get_file_size"); + } +#endif +} /* test_attr_dense_limits() */ + +/**************************************************************** +** +** test_attr_dense_dup_ids(): Test operations with multiple ID +** handles with "dense" attribute storage creation +** +****************************************************************/ +static void +test_attr_dense_dup_ids(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* HDF5 File ID */ + hid_t dataset; /* Dataset ID */ + hid_t gid1, gid2; /* Group ID */ + hid_t sid, sid2; /* Dataspace ID */ + hid_t attr, attr2, add_attr; /* Attribute ID */ + hid_t dcpl; /* Dataset creation property list ID */ + char attrname[NAME_BUF_SIZE]; /* Name of attribute */ + hsize_t dims[] = {ATTR1_DIM1}; + int read_data1[ATTR1_DIM1] = {0}; /* Buffer for reading attribute */ + int rewrite_data[ATTR1_DIM1] = {1234, -423, 9907256}; /* Test data for rewrite */ + unsigned scalar_data = 1317; /* scalar data for attribute */ + unsigned read_scalar; /* variable for reading attribute*/ + unsigned max_compact; /* Maximum # of attributes to store compactly */ + unsigned min_dense; /* Minimum # of attributes to store "densely" */ +#if 0 + htri_t is_dense; /* Are attributes stored densely? */ +#endif + unsigned u, i; /* Local index variable */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing operations with two IDs for Dense Storage\n")); + + /*----------------------------------------------------------------------------------- + * Create an attribute in dense storage and fill it with fill value. + */ + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Create dataspace for dataset */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* need DCPL to query the group creation properties */ + if (dcpl_g == H5P_DEFAULT) { + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + } + else { + dcpl = H5Pcopy(dcpl_g); + CHECK(dcpl, FAIL, "H5Pcopy"); + } + + /* Create a dataset */ + dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Retrieve limits for compact/dense attribute storage */ + ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + + /* Close property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Add attributes, until just before converting to dense storage */ + for (u = 0; u < max_compact; u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + } /* end for */ +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Add one more attribute, to push into "dense" storage */ + /* Create dataspace for attribute */ + sid2 = H5Screate_simple(ATTR1_RANK, dims, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate2(dataset, attrname, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); +#endif + /* Open the attribute just created and get a second ID */ + attr2 = H5Aopen(dataset, attrname, H5P_DEFAULT); + CHECK(attr2, FAIL, "H5Aopen"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + ret = H5Aclose(attr2); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /*----------------------------------------------------------------------------------- + * Reopen the file and verify the fill value for attribute. Also write + * some real data. + */ + /* Open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); +#endif + /* Open first attribute for the dataset */ + attr = H5Aopen(dataset, attrname, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); + + /* Read attribute with fill value */ + ret = H5Aread(attr, H5T_NATIVE_INT, read_data1); + CHECK(ret, FAIL, "H5Aread"); + + /* Verify values read in */ + for (i = 0; i < ATTR1_DIM1; i++) + if (0 != read_data1[i]) + TestErrPrintf("%d: attribute data different: read_data1[%d]=%d\n", __LINE__, i, read_data1[i]); + + /* Open attribute for the second time */ + attr2 = H5Aopen(dataset, attrname, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); + + /* Write attribute information */ + ret = H5Awrite(attr2, H5T_NATIVE_INT, attr_data1); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr2); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /*----------------------------------------------------------------------------------- + * Reopen the file and verify the data. Also rewrite the data and verify it. + */ + /* Open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); +#endif + /* Open first attribute for the dataset */ + attr = H5Aopen(dataset, attrname, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); + + /* Read attribute information */ + ret = H5Aread(attr, H5T_NATIVE_INT, read_data1); + CHECK(ret, FAIL, "H5Aread"); + + /* Verify values read in */ + for (i = 0; i < ATTR1_DIM1; i++) + if (attr_data1[i] != read_data1[i]) + TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d, read_data1[%d]=%d\n", __LINE__, i, + attr_data1[i], i, read_data1[i]); + + /* Open attribute for the second time */ + attr2 = H5Aopen(dataset, attrname, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); + + /* Write attribute information with the second ID */ + ret = H5Awrite(attr2, H5T_NATIVE_INT, rewrite_data); + CHECK(ret, FAIL, "H5Awrite"); + + /* Read attribute information with the first ID */ + ret = H5Aread(attr, H5T_NATIVE_INT, read_data1); + CHECK(ret, FAIL, "H5Aread"); + + /* Verify values read in */ + for (i = 0; i < ATTR1_DIM1; i++) + if (read_data1[i] != rewrite_data[i]) + TestErrPrintf("%d: attribute data different: read_data1[%d]=%d, rewrite_data[%d]=%d\n", __LINE__, + i, read_data1[i], i, rewrite_data[i]); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + ret = H5Aclose(attr2); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /*----------------------------------------------------------------------------------- + * Open the attribute by index. Verify the data is shared when the attribute + * is opened twice. + */ + /* Open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); +#endif + /* Open first attribute for the dataset */ + attr = H5Aopen_by_idx(dataset, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)4, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); + + /* Open attribute for the second time */ + attr2 = H5Aopen_by_idx(dataset, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)4, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); + + /* Write attribute information with the second ID */ + ret = H5Awrite(attr2, H5T_NATIVE_UINT, &scalar_data); + CHECK(ret, FAIL, "H5Awrite"); + + /* Read attribute information with the first ID */ + ret = H5Aread(attr, H5T_NATIVE_INT, &read_scalar); + CHECK(ret, FAIL, "H5Aread"); + + /* Verify values read in */ + if (read_scalar != scalar_data) + TestErrPrintf("%d: attribute data different: read_scalar=%d, scalar_data=%d\n", __LINE__, read_scalar, + scalar_data); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + ret = H5Aclose(attr2); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /*----------------------------------------------------------------------------------- + * Open one attribute. As it remains open, delete some attributes. The + * attribute storage should switch from dense to compact. Then open the + * same attribute for the second time and verify that the attribute data + * is shared. + */ + /* Open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); +#endif + /* Open attribute of the dataset for the first time */ + attr = H5Aopen_by_idx(dataset, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)2, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); + + /* Delete a few attributes until the storage switches to compact */ + for (u = max_compact; u >= min_dense - 1; u--) { + ret = H5Adelete_by_idx(dataset, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)u, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Adelete_by_idx"); + } +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Open attribute for the second time */ + attr2 = H5Aopen_by_idx(dataset, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)2, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); + + /* Write attribute information with the second ID */ + ret = H5Awrite(attr2, H5T_NATIVE_UINT, &scalar_data); + CHECK(ret, FAIL, "H5Awrite"); + + /* Read attribute information with the first ID */ + ret = H5Aread(attr, H5T_NATIVE_INT, &read_scalar); + CHECK(ret, FAIL, "H5Aread"); + + /* Verify values read in */ + if (read_scalar != scalar_data) + TestErrPrintf("%d: attribute data different: read_scalar=%d, scalar_data=%d\n", __LINE__, read_scalar, + scalar_data); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + ret = H5Aclose(attr2); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /*----------------------------------------------------------------------------------- + * Open one attribute. As it remains open, create some attributes. The + * attribute storage should switch from compact to dense. Then open the + * same attribute for the second time and verify that the attribute data + * is shared. + */ + /* Open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Open attribute of the dataset for the first time */ + attr = H5Aopen_by_idx(dataset, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)3, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); + + /* Delete a few attributes until the storage switches to compact */ + for (u = min_dense - 1; u <= max_compact; u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + add_attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(add_attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(add_attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(add_attr); + CHECK(ret, FAIL, "H5Aclose"); + } +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); +#endif + /* Open attribute for the second time */ + attr2 = H5Aopen_by_idx(dataset, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)3, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); + + /* Write attribute information with the second ID */ + ret = H5Awrite(attr2, H5T_NATIVE_UINT, &scalar_data); + CHECK(ret, FAIL, "H5Awrite"); + + /* Read attribute information with the first ID */ + ret = H5Aread(attr, H5T_NATIVE_INT, &read_scalar); + CHECK(ret, FAIL, "H5Aread"); + + /* Verify values read in */ + if (read_scalar != scalar_data) + TestErrPrintf("%d: attribute data different: read_scalar=%d, scalar_data=%d\n", __LINE__, read_scalar, + scalar_data); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + ret = H5Aclose(attr2); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /*----------------------------------------------------------------------------------- + * Verify that the attribute being pointed to by different paths shares + * the same data. + */ + /* Open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Create a group */ + gid1 = H5Gcreate2(fid, GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid1, FAIL, "H5Gcreate2"); + + /* Create hard link to the first group */ + ret = H5Lcreate_hard(gid1, GROUP1_NAME, H5L_SAME_LOC, GROUP2_NAME, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lcreate_hard"); + + /* Add attributes, until just before converting to dense storage */ + for (u = 0; u < max_compact; u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate2(gid1, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + } /* end for */ + + /* Try to create another attribute to make dense storage */ + attr = H5Acreate2(gid1, ATTR2_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#if 0 + /* Check on group's attribute storage status */ + is_dense = H5O__is_attr_dense_test(gid1); + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); +#endif + /* Open the hard link just created */ + gid2 = H5Gopen2(fid, GROUP2_NAME, H5P_DEFAULT); + CHECK(gid2, FAIL, "H5Gopen2"); + + /* Open the attribute of the group for the second time */ + attr2 = H5Aopen(gid2, ATTR2_NAME, H5P_DEFAULT); + CHECK(attr2, FAIL, "H5Aopen"); + + /* Write attribute information with the first attribute handle */ + ret = H5Awrite(attr, H5T_NATIVE_INT, attr_data1); + CHECK(ret, FAIL, "H5Awrite"); + + /* Read attribute information with the second attribute handle */ + ret = H5Aread(attr2, H5T_NATIVE_INT, read_data1); + CHECK(ret, FAIL, "H5Aread"); + + /* Verify values read in */ + for (i = 0; i < ATTR1_DIM1; i++) + if (attr_data1[i] != read_data1[i]) + TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d, read_data1[%d]=%d\n", __LINE__, i, + attr_data1[i], i, read_data1[i]); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + ret = H5Aclose(attr2); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close group */ + ret = H5Gclose(gid1); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Gclose(gid2); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close Attribute dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_attr_dense_dup_ids() */ + +/**************************************************************** +** +** test_attr_big(): Test basic H5A (attribute) code. +** Tests storing "big" attribute in dense storage immediately, if available +** +****************************************************************/ +static void +test_attr_big(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* HDF5 File ID */ + hid_t dataset; /* Dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t big_sid; /* "Big" dataspace ID */ + hsize_t dims[ATTR6_RANK] = {ATTR6_DIM1, ATTR6_DIM2, ATTR6_DIM3}; /* Attribute dimensions */ + hid_t attr; /* Attribute ID */ + hid_t dcpl; /* Dataset creation property list ID */ + char attrname[NAME_BUF_SIZE]; /* Name of attribute */ + unsigned max_compact; /* Maximum # of attributes to store compactly */ + unsigned min_dense; /* Minimum # of attributes to store "densely" */ + unsigned nshared_indices; /* # of shared message indices */ + H5F_libver_t low, high; /* File format bounds */ +#if 0 + htri_t is_empty; /* Are there any attributes? */ + htri_t is_dense; /* Are attributes stored densely? */ +#endif + unsigned u; /* Local index variable */ +#if 0 + h5_stat_size_t empty_filesize; /* Size of empty file */ + h5_stat_size_t filesize; /* Size of file after modifications */ +#endif + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Storing 'Big' Attributes in Dense Storage\n")); + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + /* Get size of file */ + empty_filesize = h5_get_file_size(FILENAME, fapl); + if (empty_filesize < 0) + TestErrPrintf("Line %d: file size wrong!\n", __LINE__); +#endif + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Create dataspace for dataset & "small" attributes */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create "big" dataspace for "big" attributes */ + big_sid = H5Screate_simple(ATTR6_RANK, dims, NULL); + CHECK(big_sid, FAIL, "H5Screate_simple"); + + /* need DCPL to query the group creation properties */ + if (dcpl_g == H5P_DEFAULT) { + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + } + else { + dcpl = H5Pcopy(dcpl_g); + CHECK(dcpl, FAIL, "H5Pcopy"); + } + + /* Retrieve limits for compact/dense attribute storage */ + ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + + /* Retrieve # of shared message indices (ie. whether attributes are shared or not) */ + ret = H5Pget_shared_mesg_nindexes(fcpl, &nshared_indices); + CHECK(ret, FAIL, "H5Pget_shared_mesg_nindexes"); + + /* Retrieve the format bounds for creating objects in the file */ + ret = H5Pget_libver_bounds(fapl, &low, &high); + CHECK(ret, FAIL, "H5Pget_libver_bounds"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Close property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + +#if 0 + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(dataset); + VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + + /* Add first "small" attribute, which should be in compact storage */ + + /* Create attribute */ + u = 0; + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); +#if 0 + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + + /* Add second "small" attribute, which should stay in compact storage */ + + /* Create attribute */ + u = 1; + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); +#if 0 + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + + /* Add first "big" attribute, which should push storage into dense form */ + + /* Create attribute */ + u = 2; + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, big_sid, H5P_DEFAULT, H5P_DEFAULT); + if (low == H5F_LIBVER_LATEST || attr >= 0) { + CHECK(attr, FAIL, "H5Acreate2"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Check on dataset's attribute storage status */ + /* (when attributes are shared, the "big" attribute goes into the shared + * message heap instead of forcing the attribute storage into the dense + * form - QAK) + */ +#if 0 + is_empty = H5O__is_attr_empty_test(dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, (nshared_indices ? FALSE : TRUE), "H5O__is_attr_dense_test"); +#endif + + /* Add second "big" attribute, which should leave storage in dense form */ + + /* Create attribute */ + u = 3; + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, big_sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Check on dataset's attribute storage status */ + /* (when attributes are shared, the "big" attribute goes into the shared + * message heap instead of forcing the attribute storage into the dense + * form - QAK) + */ +#if 0 + is_empty = H5O__is_attr_empty_test(dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, (nshared_indices ? FALSE : TRUE), "H5O__is_attr_dense_test"); +#endif + + /* Delete second "small" attribute, attributes should still be stored densely */ + + /* Delete attribute */ + u = 1; + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + ret = H5Adelete(dataset, attrname); + CHECK(ret, FAIL, "H5Adelete"); +#if 0 + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, (nshared_indices ? FALSE : TRUE), "H5O__is_attr_dense_test"); +#endif + + /* Delete second "big" attribute, attributes should still be stored densely */ + + /* Delete attribute */ + u = 3; + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + ret = H5Adelete(dataset, attrname); + CHECK(ret, FAIL, "H5Adelete"); +#if 0 + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, (nshared_indices ? FALSE : TRUE), "H5O__is_attr_dense_test"); +#endif + + /* Delete first "big" attribute, attributes should _not_ be stored densely */ + + /* Delete attribute */ + u = 2; + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + ret = H5Adelete(dataset, attrname); + CHECK(ret, FAIL, "H5Adelete"); +#if 0 + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + + /* Delete first "small" attribute, should be no attributes now */ + + /* Delete attribute */ + u = 0; + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + ret = H5Adelete(dataset, attrname); + CHECK(ret, FAIL, "H5Adelete"); +#if 0 + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(dataset); + VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test"); +#endif + } /* end if */ +#if 0 + else { + /* Shouldn't be able to create "big" attributes with older version of format */ + VERIFY(attr, FAIL, "H5Acreate2"); + + /* Check on dataset's attribute storage status */ + /* (when attributes are shared, the "big" attribute goes into the shared + * message heap instead of forcing the attribute storage into the dense + * form - QAK) + */ + is_empty = H5O__is_attr_empty_test(dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); + } /* end else */ +#endif + + /* Close dataspaces */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(big_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Unlink dataset */ + ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + if (h5_using_default_driver(NULL)) { + /* Check size of file */ + filesize = h5_get_file_size(FILENAME, fapl); + VERIFY(filesize, empty_filesize, "h5_get_file_size"); + } +#endif +} /* test_attr_big() */ + +/**************************************************************** +** +** test_attr_null_space(): Test basic H5A (attribute) code. +** Tests storing attribute with "null" dataspace +** +****************************************************************/ +static void +test_attr_null_space(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* HDF5 File ID */ + hid_t dataset; /* Dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t null_sid; /* "null" dataspace ID */ + hid_t attr_sid; /* Attribute's dataspace ID */ + hid_t attr; /* Attribute ID */ + char attrname[NAME_BUF_SIZE]; /* Name of attribute */ + unsigned value; /* Attribute value */ + htri_t cmp; /* Results of comparison */ +#if 0 + hsize_t storage_size; /* Size of storage for attribute */ +#endif + H5A_info_t ainfo; /* Attribute info */ +#if 0 + h5_stat_size_t empty_filesize; /* Size of empty file */ + h5_stat_size_t filesize; /* Size of file after modifications */ +#endif + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Storing Attributes with 'null' dataspace\n")); + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + /* Get size of file */ + empty_filesize = h5_get_file_size(FILENAME, fapl); + if (empty_filesize < 0) + TestErrPrintf("Line %d: file size wrong!\n", __LINE__); +#endif + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Create dataspace for dataset attributes */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create "null" dataspace for attribute */ + null_sid = H5Screate(H5S_NULL); + CHECK(null_sid, FAIL, "H5Screate"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Add attribute with 'null' dataspace */ + + /* Create attribute */ + HDstrcpy(attrname, "null attr"); + attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, null_sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Try to read data from the attribute */ + /* (shouldn't fail, but should leave buffer alone) */ + value = 23; + ret = H5Aread(attr, H5T_NATIVE_UINT, &value); + CHECK(ret, FAIL, "H5Aread"); + VERIFY(value, 23, "H5Aread"); + + /* Get the dataspace for the attribute and make certain it's 'null' */ + attr_sid = H5Aget_space(attr); + CHECK(attr_sid, FAIL, "H5Aget_space"); + + /* Compare the dataspaces */ + cmp = H5Sextent_equal(attr_sid, null_sid); + CHECK(cmp, FAIL, "H5Sextent_equal"); + VERIFY(cmp, TRUE, "H5Sextent_equal"); + + /* Close dataspace */ + ret = H5Sclose(attr_sid); + CHECK(ret, FAIL, "H5Sclose"); +#if 0 + /* Check the storage size for the attribute */ + storage_size = H5Aget_storage_size(attr); + VERIFY(storage_size, 0, "H5Aget_storage_size"); +#endif + /* Get the attribute info */ + ret = H5Aget_info(attr, &ainfo); + CHECK(ret, FAIL, "H5Aget_info"); +#if 0 + VERIFY(ainfo.data_size, storage_size, "H5Aget_info"); +#endif + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Add another attribute with 'null' dataspace */ + + /* Create attribute */ + HDstrcpy(attrname, "null attr #2"); + attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, null_sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Try to write data to the attribute */ + /* (shouldn't fail, but should leave buffer alone) */ + value = 23; + ret = H5Awrite(attr, H5T_NATIVE_UINT, &value); + CHECK(ret, FAIL, "H5Awrite"); + VERIFY(value, 23, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open the file and check on the attributes */ + + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Open dataset */ + dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Open first attribute */ + HDstrcpy(attrname, "null attr #2"); + attr = H5Aopen(dataset, attrname, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); + + /* Try to read data from the attribute */ + /* (shouldn't fail, but should leave buffer alone) */ + value = 23; + ret = H5Aread(attr, H5T_NATIVE_UINT, &value); + CHECK(ret, FAIL, "H5Aread"); + VERIFY(value, 23, "H5Aread"); + + /* Get the dataspace for the attribute and make certain it's 'null' */ + attr_sid = H5Aget_space(attr); + CHECK(attr_sid, FAIL, "H5Aget_space"); + + /* Compare the dataspaces */ + cmp = H5Sextent_equal(attr_sid, null_sid); + CHECK(cmp, FAIL, "H5Sextent_equal"); + VERIFY(cmp, TRUE, "H5Sextent_equal"); + + /* Close dataspace */ + ret = H5Sclose(attr_sid); + CHECK(ret, FAIL, "H5Sclose"); +#if 0 + /* Check the storage size for the attribute */ + storage_size = H5Aget_storage_size(attr); + VERIFY(storage_size, 0, "H5Aget_storage_size"); +#endif + /* Get the attribute info */ + ret = H5Aget_info(attr, &ainfo); + CHECK(ret, FAIL, "H5Aget_info"); +#if 0 + VERIFY(ainfo.data_size, storage_size, "H5Aget_info"); +#endif + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Open second attribute */ + HDstrcpy(attrname, "null attr"); + attr = H5Aopen(dataset, attrname, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); + + /* Try to write data to the attribute */ + /* (shouldn't fail, but should leave buffer alone) */ + value = 23; + ret = H5Awrite(attr, H5T_NATIVE_UINT, &value); + CHECK(ret, FAIL, "H5Awrite"); + VERIFY(value, 23, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Unlink dataset */ + ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Close dataspaces */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(null_sid); + CHECK(ret, FAIL, "H5Sclose"); + +#if 0 + if (h5_using_default_driver(NULL)) { + /* Check size of file */ + filesize = h5_get_file_size(FILENAME, fapl); + VERIFY(filesize, empty_filesize, "h5_get_file_size"); + } +#endif +} /* test_attr_null_space() */ + +/**************************************************************** +** +** test_attr_deprec(): Test basic H5A (attribute) code. +** Tests deprecated API routines +** +****************************************************************/ +static void +test_attr_deprec(hid_t fcpl, hid_t fapl) +{ +#ifndef H5_NO_DEPRECATED_SYMBOLS + hid_t fid; /* HDF5 File ID */ + hid_t dataset; /* Dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t attr; /* Attribute ID */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Deprecated Attribute Routines\n")); + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset attributes */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Add attribute to dataset */ + + /* Create attribute */ + attr = H5Acreate1(dataset, "attr", H5T_NATIVE_UINT, sid, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate1"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close dataspaces */ + ret = H5Sclose(sid); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open the file and operate on the attribute */ + + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Open dataset */ + dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + +#if 0 + /* Get number of attributes with bad ID */ + H5E_BEGIN_TRY + { + ret = H5Aget_num_attrs((hid_t)-1); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Aget_num_attrs"); + + /* Get number of attributes */ + ret = H5Aget_num_attrs(dataset); + VERIFY(ret, 1, "H5Aget_num_attrs"); +#endif + /* Open the attribute by index */ + attr = H5Aopen_idx(dataset, 0); + CHECK(attr, FAIL, "H5Aopen_idx"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Open the attribute by name */ + attr = H5Aopen_name(dataset, "attr"); + CHECK(attr, FAIL, "H5Aopen_name"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +#else /* H5_NO_DEPRECATED_SYMBOLS */ + /* Shut compiler up */ + (void)fcpl; + (void)fapl; + + /* Output message about test being skipped */ + MESSAGE(5, ("Skipping Test On Deprecated Attribute Routines\n")); + +#endif /* H5_NO_DEPRECATED_SYMBOLS */ +} /* test_attr_deprec() */ + +/**************************************************************** +** +** test_attr_many(): Test basic H5A (attribute) code. +** Tests storing lots of attributes +** +****************************************************************/ +static void +test_attr_many(hbool_t new_format, hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* HDF5 File ID */ + hid_t gid; /* Group ID */ + hid_t sid; /* Dataspace ID */ + hid_t aid; /* Attribute ID */ + char attrname[NAME_BUF_SIZE]; /* Name of attribute */ + unsigned nattr = (new_format ? NATTR_MANY_NEW : NATTR_MANY_OLD); /* Number of attributes */ + htri_t exists; /* Whether the attribute exists or not */ + unsigned u; /* Local index variable */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Storing Many Attributes\n")); + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create dataspace for attribute */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create group for attributes */ + gid = H5Gcreate2(fid, GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gcreate2"); + + /* Create many attributes */ + for (u = 0; u < nattr; u++) { + HDsnprintf(attrname, sizeof(attrname), "a-%06u", u); + + exists = H5Aexists(gid, attrname); + VERIFY(exists, FALSE, "H5Aexists"); + + exists = H5Aexists_by_name(fid, GROUP1_NAME, attrname, H5P_DEFAULT); + VERIFY(exists, FALSE, "H5Aexists_by_name"); + + aid = H5Acreate2(gid, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + + exists = H5Aexists(gid, attrname); + VERIFY(exists, TRUE, "H5Aexists"); + + exists = H5Aexists_by_name(fid, GROUP1_NAME, attrname, H5P_DEFAULT); + VERIFY(exists, TRUE, "H5Aexists_by_name"); + + ret = H5Awrite(aid, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + exists = H5Aexists(gid, attrname); + VERIFY(exists, TRUE, "H5Aexists"); + + exists = H5Aexists_by_name(fid, GROUP1_NAME, attrname, H5P_DEFAULT); + VERIFY(exists, TRUE, "H5Aexists_by_name"); + } /* end for */ + + /* Close group */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open the file and check on the attributes */ + + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDONLY, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Re-open group */ + gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gopen2"); + + /* Verify attributes */ + for (u = 0; u < nattr; u++) { + unsigned value; /* Attribute value */ + + HDsnprintf(attrname, sizeof(attrname), "a-%06u", u); + + exists = H5Aexists(gid, attrname); + VERIFY(exists, TRUE, "H5Aexists"); + + exists = H5Aexists_by_name(fid, GROUP1_NAME, attrname, H5P_DEFAULT); + VERIFY(exists, TRUE, "H5Aexists_by_name"); + + aid = H5Aopen(gid, attrname, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Aopen"); + + exists = H5Aexists(gid, attrname); + VERIFY(exists, TRUE, "H5Aexists"); + + exists = H5Aexists_by_name(fid, GROUP1_NAME, attrname, H5P_DEFAULT); + VERIFY(exists, TRUE, "H5Aexists_by_name"); + + ret = H5Aread(aid, H5T_NATIVE_UINT, &value); + CHECK(ret, FAIL, "H5Aread"); + VERIFY(value, u, "H5Aread"); + + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + } /* end for */ + + /* Close group */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Close dataspaces */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_attr_many() */ + +/**************************************************************** +** +** test_attr_corder_create_empty(): Test basic H5A (attribute) code. +** Tests basic code to create objects with attribute creation order info +** +****************************************************************/ +static void +test_attr_corder_create_basic(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* HDF5 File ID */ + hid_t dataset; /* Dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t dcpl; /* Dataset creation property list ID */ + unsigned crt_order_flags; /* Creation order flags */ +#if 0 + htri_t is_empty; /* Are there any attributes? */ + htri_t is_dense; /* Are attributes stored densely? */ +#endif + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Basic Code for Attributes with Creation Order Info\n")); + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create dataset creation property list */ + if (dcpl_g == H5P_DEFAULT) { + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + } + else { + dcpl = H5Pcopy(dcpl_g); + CHECK(dcpl, FAIL, "H5Pcopy"); + } +#if 0 + /* Get creation order indexing on object */ + ret = H5Pget_attr_creation_order(dcpl, &crt_order_flags); + CHECK(ret, FAIL, "H5Pget_attr_creation_order"); + VERIFY(crt_order_flags, 0, "H5Pget_attr_creation_order"); +#endif + /* Setting invalid combination of a attribute order creation order indexing on should fail */ + H5E_BEGIN_TRY + { + ret = H5Pset_attr_creation_order(dcpl, H5P_CRT_ORDER_INDEXED); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Pset_attr_creation_order"); + +#if 0 + ret = H5Pget_attr_creation_order(dcpl, &crt_order_flags); + CHECK(ret, FAIL, "H5Pget_attr_creation_order"); + VERIFY(crt_order_flags, 0, "H5Pget_attr_creation_order"); +#endif + + /* Set attribute creation order tracking & indexing for object */ + ret = H5Pset_attr_creation_order(dcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED)); + CHECK(ret, FAIL, "H5Pset_attr_creation_order"); + ret = H5Pget_attr_creation_order(dcpl, &crt_order_flags); + CHECK(ret, FAIL, "H5Pget_attr_creation_order"); + VERIFY(crt_order_flags, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED), "H5Pget_attr_creation_order"); + + /* Create dataspace for dataset */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); +#if 0 + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(dataset); + VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Open dataset created */ + dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); +#if 0 + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(dataset); + VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + + /* Retrieve dataset creation property list for group */ + dcpl = H5Dget_create_plist(dataset); + CHECK(dcpl, FAIL, "H5Dget_create_plist"); + + /* Query the attribute creation properties */ + ret = H5Pget_attr_creation_order(dcpl, &crt_order_flags); + CHECK(ret, FAIL, "H5Pget_attr_creation_order"); + VERIFY(crt_order_flags, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED), "H5Pget_attr_creation_order"); + + /* Close property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_attr_corder_create_basic() */ + +/**************************************************************** +** +** test_attr_corder_create_compact(): Test basic H5A (attribute) code. +** Tests compact attribute storage on objects with attribute creation order info +** +****************************************************************/ +static void +test_attr_corder_create_compact(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* HDF5 File ID */ + hid_t dset1, dset2, dset3; /* Dataset IDs */ + hid_t my_dataset; /* Current dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t attr; /* Attribute ID */ + hid_t dcpl; /* Dataset creation property list ID */ + unsigned max_compact; /* Maximum # of links to store in group compactly */ + unsigned min_dense; /* Minimum # of links to store in group "densely" */ +#if 0 + htri_t is_empty; /* Are there any attributes? */ + htri_t is_dense; /* Are attributes stored densely? */ + hsize_t nattrs; /* Number of attributes on object */ +#endif + char attrname[NAME_BUF_SIZE]; /* Name of attribute */ + unsigned curr_dset; /* Current dataset to work on */ + unsigned u; /* Local index variable */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Compact Storage of Attributes with Creation Order Info\n")); + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create dataset creation property list */ + if (dcpl_g == H5P_DEFAULT) { + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + } + else { + dcpl = H5Pcopy(dcpl_g); + CHECK(dcpl, FAIL, "H5Pcopy"); + } + + /* Set attribute creation order tracking & indexing for object */ + ret = H5Pset_attr_creation_order(dcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED)); + CHECK(ret, FAIL, "H5Pset_attr_creation_order"); + + /* Query the attribute creation properties */ + ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + + /* Create dataspace for dataset & attributes */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create datasets */ + dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset1, FAIL, "H5Dcreate2"); + dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset2, FAIL, "H5Dcreate2"); + dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset3, FAIL, "H5Dcreate2"); + + /* Work on all the datasets */ + for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { + switch (curr_dset) { + case 0: + my_dataset = dset1; + break; + + case 1: + my_dataset = dset2; + break; + + case 2: + my_dataset = dset3; + break; + + default: + HDassert(0 && "Too many datasets!"); + } /* end switch */ +#if 0 + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Create several attributes, but keep storage in compact form */ + for (u = 0; u < max_compact; u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); +#if 0 + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (u + 1), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + } /* end for */ + } /* end for */ + + /* Close Datasets */ + ret = H5Dclose(dset1); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset2); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset3); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Open datasets created */ + dset1 = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(dset1, FAIL, "H5Dopen2"); + dset2 = H5Dopen2(fid, DSET2_NAME, H5P_DEFAULT); + CHECK(dset2, FAIL, "H5Dopen2"); + dset3 = H5Dopen2(fid, DSET3_NAME, H5P_DEFAULT); + CHECK(dset3, FAIL, "H5Dopen2"); + + /* Work on all the datasets */ + for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { + switch (curr_dset) { + case 0: + my_dataset = dset1; + break; + + case 1: + my_dataset = dset2; + break; + + case 2: + my_dataset = dset3; + break; + + default: + HDassert(0 && "Too many datasets!"); + } /* end switch */ +#if 0 + /* Check on dataset's attribute storage status */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Loop through attributes, checking their creation order values */ + /* (the name index is used, but the creation order value is in the same order) */ + for (u = 0; u < max_compact; u++) { + H5A_info_t ainfo; /* Attribute information */ + + /* Retrieve information for attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + ret = H5Aget_info_by_name(my_dataset, ".", attrname, &ainfo, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_info_by_name"); + + /* Verify creation order of attribute */ + VERIFY(ainfo.corder_valid, TRUE, "H5Aget_info_by_name"); + VERIFY(ainfo.corder, u, "H5Aget_info_by_name"); + } /* end for */ + } /* end for */ + + /* Close Datasets */ + ret = H5Dclose(dset1); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset2); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset3); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_attr_corder_create_compact() */ + +/**************************************************************** +** +** test_attr_corder_create_dense(): Test basic H5A (attribute) code. +** Tests dense attribute storage on objects with attribute creation order info +** +****************************************************************/ +static void +test_attr_corder_create_dense(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* HDF5 File ID */ + hid_t dset1, dset2, dset3; /* Dataset IDs */ + hid_t my_dataset; /* Current dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t attr; /* Attribute ID */ + hid_t dcpl; /* Dataset creation property list ID */ + unsigned max_compact; /* Maximum # of links to store in group compactly */ + unsigned min_dense; /* Minimum # of links to store in group "densely" */ +#if 0 + htri_t is_empty; /* Are there any attributes? */ + htri_t is_dense; /* Are attributes stored densely? */ + hsize_t nattrs; /* Number of attributes on object */ + hsize_t name_count; /* # of records in name index */ + hsize_t corder_count; /* # of records in creation order index */ +#endif + char attrname[NAME_BUF_SIZE]; /* Name of attribute */ + unsigned curr_dset; /* Current dataset to work on */ + unsigned u; /* Local index variable */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Dense Storage of Attributes with Creation Order Info\n")); + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create dataset creation property list */ + if (dcpl_g == H5P_DEFAULT) { + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + } + else { + dcpl = H5Pcopy(dcpl_g); + CHECK(dcpl, FAIL, "H5Pcopy"); + } + + /* Set attribute creation order tracking & indexing for object */ + ret = H5Pset_attr_creation_order(dcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED)); + CHECK(ret, FAIL, "H5Pset_attr_creation_order"); + + /* Query the attribute creation properties */ + ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + + /* Create dataspace for dataset & attributes */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create datasets */ + dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset1, FAIL, "H5Dcreate2"); + dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset2, FAIL, "H5Dcreate2"); + dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset3, FAIL, "H5Dcreate2"); + + /* Work on all the datasets */ + for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { + switch (curr_dset) { + case 0: + my_dataset = dset1; + break; + + case 1: + my_dataset = dset2; + break; + + case 2: + my_dataset = dset3; + break; + + default: + HDassert(0 && "Too many datasets!"); + } /* end switch */ +#if 0 + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Create several attributes, but keep storage in compact form */ + for (u = 0; u < max_compact; u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); +#if 0 + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (u + 1), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + } /* end for */ + + /* Create another attribute, to push into dense storage */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", max_compact); + attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); +#if 0 + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); + + /* Retrieve & verify # of records in the name & creation order indices */ + ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); + CHECK(ret, FAIL, "H5O__attr_dense_info_test"); + VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); +#endif + } /* end for */ + + /* Close Datasets */ + ret = H5Dclose(dset1); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset2); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset3); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Open datasets created */ + dset1 = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(dset1, FAIL, "H5Dopen2"); + dset2 = H5Dopen2(fid, DSET2_NAME, H5P_DEFAULT); + CHECK(dset2, FAIL, "H5Dopen2"); + dset3 = H5Dopen2(fid, DSET3_NAME, H5P_DEFAULT); + CHECK(dset3, FAIL, "H5Dopen2"); + + /* Work on all the datasets */ + for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { + switch (curr_dset) { + case 0: + my_dataset = dset1; + break; + + case 1: + my_dataset = dset2; + break; + + case 2: + my_dataset = dset3; + break; + + default: + HDassert(0 && "Too many datasets!"); + } /* end switch */ +#if 0 + /* Check on dataset's attribute storage status */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); +#endif + /* Loop through attributes, checking their creation order values */ + /* (the name index is used, but the creation order value is in the same order) */ + for (u = 0; u < (max_compact + 1); u++) { + H5A_info_t ainfo; /* Attribute information */ + + /* Retrieve information for attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + ret = H5Aget_info_by_name(my_dataset, ".", attrname, &ainfo, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_info_by_name"); + + /* Verify creation order of attribute */ + VERIFY(ainfo.corder_valid, TRUE, "H5Aget_info_by_name"); + VERIFY(ainfo.corder, u, "H5Aget_info_by_name"); + } /* end for */ + } /* end for */ + + /* Close Datasets */ + ret = H5Dclose(dset1); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset2); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset3); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_attr_corder_create_dense() */ + +/**************************************************************** +** +** test_attr_corder_create_reopen(): Test basic H5A (attribute) code. +** Test creating attributes w/reopening file from using new format +** to using old format +** +****************************************************************/ +static void +test_attr_corder_create_reopen(hid_t fcpl, hid_t fapl) +{ + hid_t fid = -1; /* File ID */ + hid_t gcpl_id = -1; /* Group creation property list ID */ + hid_t gid = -1; /* Group ID */ + hid_t sid = -1; /* Dataspace ID */ + hid_t aid = -1; /* Attribute ID */ + int buf; /* Attribute data */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Creating Attributes w/New & Old Format\n")); + + /* Create dataspace for attributes */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create group */ + gcpl_id = H5Pcreate(H5P_GROUP_CREATE); + CHECK(gcpl_id, FAIL, "H5Pcreate"); + ret = H5Pset_attr_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED); + CHECK(ret, FAIL, "H5Pset_attr_creation_order"); + gid = H5Gcreate2(fid, GROUP1_NAME, H5P_DEFAULT, gcpl_id, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gcreate2"); + + /* Create a couple of attributes */ + aid = H5Acreate2(gid, "attr-003", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + buf = 3; + ret = H5Awrite(aid, H5T_NATIVE_INT, &buf); + CHECK(ret, FAIL, "H5Awrite"); + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + aid = H5Acreate2(gid, "attr-004", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + buf = 4; + ret = H5Awrite(aid, H5T_NATIVE_INT, &buf); + CHECK(ret, FAIL, "H5Awrite"); + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + /***** Close group & GCPL *****/ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Pclose(gcpl_id); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file, without "use the latest format" flag */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Re-open group */ + gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gopen2"); + + /* Delete attribute */ + ret = H5Adelete(gid, "attr-003"); + CHECK(aid, FAIL, "H5Adelete"); + + /* Create some additional attributes */ + aid = H5Acreate2(gid, "attr-008", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + buf = 8; + ret = H5Awrite(aid, H5T_NATIVE_INT, &buf); + CHECK(ret, FAIL, "H5Awrite"); + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + aid = H5Acreate2(gid, "attr-006", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + buf = 6; + ret = H5Awrite(aid, H5T_NATIVE_INT, &buf); + CHECK(ret, FAIL, "H5Awrite"); + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + /***** Close group *****/ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Close attribute dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_attr_corder_create_reopen() */ + +/**************************************************************** +** +** test_attr_corder_transition(): Test basic H5A (attribute) code. +** Tests attribute storage transitions on objects with attribute creation order info +** +****************************************************************/ +static void +test_attr_corder_transition(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* HDF5 File ID */ + hid_t dset1, dset2, dset3; /* Dataset IDs */ + hid_t my_dataset; /* Current dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t attr; /* Attribute ID */ + hid_t dcpl; /* Dataset creation property list ID */ + unsigned max_compact; /* Maximum # of links to store in group compactly */ + unsigned min_dense; /* Minimum # of links to store in group "densely" */ +#if 0 + htri_t is_empty; /* Are there any attributes? */ + htri_t is_dense; /* Are attributes stored densely? */ + hsize_t nattrs; /* Number of attributes on object */ + hsize_t name_count; /* # of records in name index */ + hsize_t corder_count; /* # of records in creation order index */ +#endif + char attrname[NAME_BUF_SIZE]; /* Name of attribute */ + unsigned curr_dset; /* Current dataset to work on */ + unsigned u; /* Local index variable */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Storage Transitions of Attributes with Creation Order Info\n")); + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create dataset creation property list */ + if (dcpl_g == H5P_DEFAULT) { + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + } + else { + dcpl = H5Pcopy(dcpl_g); + CHECK(dcpl, FAIL, "H5Pcopy"); + } + + /* Set attribute creation order tracking & indexing for object */ + ret = H5Pset_attr_creation_order(dcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED)); + CHECK(ret, FAIL, "H5Pset_attr_creation_order"); + + /* Query the attribute creation properties */ + ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + + /* Create dataspace for dataset & attributes */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* XXX: Try to find a way to resize dataset's object header so that the object + * header can have one chunk, then retrieve "empty" file size and check + * that size after everything is deleted -QAK + */ + /* Create datasets */ + dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset1, FAIL, "H5Dcreate2"); + dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset2, FAIL, "H5Dcreate2"); + dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset3, FAIL, "H5Dcreate2"); + + /* Work on all the datasets */ + for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { + switch (curr_dset) { + case 0: + my_dataset = dset1; + break; + + case 1: + my_dataset = dset2; + break; + + case 2: + my_dataset = dset3; + break; + + default: + HDassert(0 && "Too many datasets!"); + } /* end switch */ +#if 0 + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + } /* end for */ + + /* Close Datasets */ + ret = H5Dclose(dset1); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset2); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset3); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Open datasets created */ + dset1 = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(dset1, FAIL, "H5Dopen2"); + dset2 = H5Dopen2(fid, DSET2_NAME, H5P_DEFAULT); + CHECK(dset2, FAIL, "H5Dopen2"); + dset3 = H5Dopen2(fid, DSET3_NAME, H5P_DEFAULT); + CHECK(dset3, FAIL, "H5Dopen2"); + + /* Work on all the datasets */ + for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { + switch (curr_dset) { + case 0: + my_dataset = dset1; + break; + + case 1: + my_dataset = dset2; + break; + + case 2: + my_dataset = dset3; + break; + + default: + HDassert(0 && "Too many datasets!"); + } /* end switch */ + + /* Create several attributes, but keep storage in compact form */ + for (u = 0; u < max_compact; u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); +#if 0 + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (u + 1), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + } /* end for */ + + /* Create another attribute, to push into dense storage */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", max_compact); + attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); +#if 0 + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); + + /* Retrieve & verify # of records in the name & creation order indices */ + ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); + CHECK(ret, FAIL, "H5O__attr_dense_info_test"); + VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); +#endif + /* Delete several attributes from object, until attribute storage resumes compact form */ + for (u = max_compact; u >= min_dense; u--) { + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + ret = H5Adelete(my_dataset, attrname); + CHECK(ret, FAIL, "H5Adelete"); +#if 0 + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, u, "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); + + /* Retrieve & verify # of records in the name & creation order indices */ + ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); + CHECK(ret, FAIL, "H5O__attr_dense_info_test"); + VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); +#endif + } /* end for */ + + /* Delete another attribute, to push attribute storage into compact form */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", (min_dense - 1)); + ret = H5Adelete(my_dataset, attrname); + CHECK(ret, FAIL, "H5Adelete"); +#if 0 + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (min_dense - 1), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Re-add attributes to get back into dense form */ + for (u = (min_dense - 1); u < (max_compact + 1); u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + } /* end for */ +#if 0 + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); + + /* Retrieve & verify # of records in the name & creation order indices */ + ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); + CHECK(ret, FAIL, "H5O__attr_dense_info_test"); + VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); +#endif + } /* end for */ + + /* Close Datasets */ + ret = H5Dclose(dset1); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset2); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset3); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Open datasets created */ + dset1 = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(dset1, FAIL, "H5Dopen2"); + dset2 = H5Dopen2(fid, DSET2_NAME, H5P_DEFAULT); + CHECK(dset2, FAIL, "H5Dopen2"); + dset3 = H5Dopen2(fid, DSET3_NAME, H5P_DEFAULT); + CHECK(dset3, FAIL, "H5Dopen2"); + + /* Work on all the datasets */ + for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { + switch (curr_dset) { + case 0: + my_dataset = dset1; + break; + + case 1: + my_dataset = dset2; + break; + + case 2: + my_dataset = dset3; + break; + + default: + HDassert(0 && "Too many datasets!"); + } /* end switch */ +#if 0 + /* Check on dataset's attribute storage status */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); + + /* Retrieve & verify # of records in the name & creation order indices */ + ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); + CHECK(ret, FAIL, "H5O__attr_dense_info_test"); + VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); +#endif + /* Delete several attributes from object, until attribute storage resumes compact form */ + for (u = max_compact; u >= min_dense; u--) { + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + ret = H5Adelete(my_dataset, attrname); + CHECK(ret, FAIL, "H5Adelete"); +#if 0 + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, u, "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); + + /* Retrieve & verify # of records in the name & creation order indices */ + ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); + CHECK(ret, FAIL, "H5O__attr_dense_info_test"); + VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); +#endif + } /* end for */ + + /* Delete another attribute, to push attribute storage into compact form */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", (min_dense - 1)); + ret = H5Adelete(my_dataset, attrname); + CHECK(ret, FAIL, "H5Adelete"); +#if 0 + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (min_dense - 1), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Re-add attributes to get back into dense form */ + for (u = (min_dense - 1); u < (max_compact + 1); u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + } /* end for */ +#if 0 + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); + + /* Retrieve & verify # of records in the name & creation order indices */ + ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); + CHECK(ret, FAIL, "H5O__attr_dense_info_test"); + VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); +#endif + /* Delete all attributes */ + for (u = max_compact; u > 0; u--) { + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + ret = H5Adelete(my_dataset, attrname); + CHECK(ret, FAIL, "H5Adelete"); + } /* end for */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", 0); + ret = H5Adelete(my_dataset, attrname); + CHECK(ret, FAIL, "H5Adelete"); + } /* end for */ + + /* Close Datasets */ + ret = H5Dclose(dset1); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset2); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset3); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_attr_corder_transition() */ + +/**************************************************************** +** +** test_attr_corder_delete(): Test basic H5A (attribute) code. +** Tests deleting object w/dense attribute storage on objects with attribute creation order info +** +****************************************************************/ +static void +test_attr_corder_delete(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* HDF5 File ID */ + hid_t dset1, dset2, dset3; /* Dataset IDs */ + hid_t my_dataset; /* Current dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t attr; /* Attribute ID */ + hid_t dcpl; /* Dataset creation property list ID */ + unsigned max_compact; /* Maximum # of links to store in group compactly */ + unsigned min_dense; /* Minimum # of links to store in group "densely" */ +#if 0 + htri_t is_empty; /* Are there any attributes? */ + htri_t is_dense; /* Are attributes stored densely? */ + hsize_t nattrs; /* Number of attributes on object */ + hsize_t name_count; /* # of records in name index */ + hsize_t corder_count; /* # of records in creation order index */ +#endif + unsigned reopen_file; /* Whether to re-open the file before deleting group */ + char attrname[NAME_BUF_SIZE]; /* Name of attribute */ +#ifdef LATER + h5_stat_size_t empty_size; /* Size of empty file */ + h5_stat_size_t file_size; /* Size of file after operating on it */ +#endif /* LATER */ + unsigned curr_dset; /* Current dataset to work on */ + unsigned u; /* Local index variable */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Deleting Object w/Dense Attribute Storage and Creation Order Info\n")); + + /* Create dataspace for dataset & attributes */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create dataset creation property list */ + if (dcpl_g == H5P_DEFAULT) { + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + } + else { + dcpl = H5Pcopy(dcpl_g); + CHECK(dcpl, FAIL, "H5Pcopy"); + } + + /* Set attribute creation order tracking & indexing for object */ + ret = H5Pset_attr_creation_order(dcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED)); + CHECK(ret, FAIL, "H5Pset_attr_creation_order"); + + /* Query the attribute creation properties */ + ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + +/* XXX: Try to find a way to resize dataset's object header so that the object + * header can have one chunk, then retrieve "empty" file size and check + * that size after everything is deleted -QAK + */ +#ifdef LATER + /* Create empty file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Get the size of an empty file */ + empty_size = h5_get_file_size(FILENAME); + CHECK(empty_size, FAIL, "h5_get_file_size"); +#endif /* LATER */ + + /* Loop to leave file open when deleting dataset, or to close & re-open file + * before deleting dataset */ + for (reopen_file = FALSE; reopen_file <= TRUE; reopen_file++) { + /* Create test file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Create datasets */ + dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset1, FAIL, "H5Dcreate2"); + dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset2, FAIL, "H5Dcreate2"); + dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset3, FAIL, "H5Dcreate2"); + + /* Work on all the datasets */ + for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { + switch (curr_dset) { + case 0: + my_dataset = dset1; + break; + + case 1: + my_dataset = dset2; + break; + + case 2: + my_dataset = dset3; + break; + + default: + HDassert(0 && "Too many datasets!"); + } /* end switch */ +#if 0 + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Create attributes, until attribute storage is in dense form */ + for (u = 0; u < max_compact * 2; u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + } /* end for */ +#if 0 + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); + + /* Retrieve & verify # of records in the name & creation order indices */ + ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); + CHECK(ret, FAIL, "H5O__attr_dense_info_test"); + VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); +#endif + } /* end for */ + + /* Close Datasets */ + ret = H5Dclose(dset1); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset2); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset3); + CHECK(ret, FAIL, "H5Dclose"); + + /* Check for deleting datasets without re-opening file */ + if (!reopen_file) { + ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + ret = H5Ldelete(fid, DSET2_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + ret = H5Ldelete(fid, DSET3_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + } /* end if */ + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Check for deleting dataset after re-opening file */ + if (reopen_file) { + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Delete the datasets */ + ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + ret = H5Ldelete(fid, DSET2_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + ret = H5Ldelete(fid, DSET3_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + } /* end if */ + +#ifdef LATER + /* Get the size of the file now */ + file_size = h5_get_file_size(FILENAME); + CHECK(file_size, FAIL, "h5_get_file_size"); + VERIFY(file_size, empty_size, "h5_get_file_size"); +#endif /* LATER */ + } /* end for */ + + /* Close property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_attr_corder_delete() */ + +/*------------------------------------------------------------------------- + * Function: attr_info_by_idx_check + * + * Purpose: Support routine for attr_info_by_idx, to verify the attribute + * info is correct for a attribute + * + * Note: This routine assumes that the attributes have been added to the + * object in alphabetical order. + * + * Return: Success: 0 + * Failure: -1 + * + * Programmer: Quincey Koziol + * Tuesday, Februrary 13, 2007 + * + *------------------------------------------------------------------------- + */ +static int +attr_info_by_idx_check(hid_t obj_id, const char *attrname, hsize_t n, hbool_t use_index) +{ + char tmpname[NAME_BUF_SIZE]; /* Temporary attribute name */ + H5A_info_t ainfo; /* Attribute info struct */ + int old_nerrs; /* Number of errors when entering this check */ + herr_t ret; /* Generic return value */ + + /* Retrieve the current # of reported errors */ + old_nerrs = nerrors; + + /* Verify the information for first attribute, in increasing creation order */ + HDmemset(&ainfo, 0, sizeof(ainfo)); + ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)0, &ainfo, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_info_by_idx"); + VERIFY(ainfo.corder, 0, "H5Aget_info_by_idx"); + + /* Verify the information for new attribute, in increasing creation order */ + HDmemset(&ainfo, 0, sizeof(ainfo)); + ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, n, &ainfo, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_info_by_idx"); + VERIFY(ainfo.corder, n, "H5Aget_info_by_idx"); + + /* Verify the name for new link, in increasing creation order */ + HDmemset(tmpname, 0, (size_t)NAME_BUF_SIZE); + ret = (herr_t)H5Aget_name_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, n, tmpname, + (size_t)NAME_BUF_SIZE, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_name_by_idx"); + if (HDstrcmp(attrname, tmpname) != 0) + TestErrPrintf("Line %d: attribute name size wrong!\n", __LINE__); + + /* Don't test "native" order if there is no creation order index, since + * there's not a good way to easily predict the attribute's order in the name + * index. + */ + if (use_index) { + /* Verify the information for first attribute, in native creation order */ + HDmemset(&ainfo, 0, sizeof(ainfo)); + ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC /* H5_ITER_NATIVE */, + (hsize_t)0, &ainfo, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_info_by_idx"); + VERIFY(ainfo.corder, 0, "H5Aget_info_by_idx"); + + /* Verify the information for new attribute, in native creation order */ + HDmemset(&ainfo, 0, sizeof(ainfo)); + ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC /* H5_ITER_NATIVE */, n, &ainfo, + H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_info_by_idx"); + VERIFY(ainfo.corder, n, "H5Aget_info_by_idx"); + + /* Verify the name for new link, in increasing native order */ + HDmemset(tmpname, 0, (size_t)NAME_BUF_SIZE); + ret = (herr_t)H5Aget_name_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC /* H5_ITER_NATIVE */, n, + tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_name_by_idx"); + if (HDstrcmp(attrname, tmpname) != 0) + TestErrPrintf("Line %d: attribute name size wrong!\n", __LINE__); + } /* end if */ + + /* Verify the information for first attribute, in decreasing creation order */ + HDmemset(&ainfo, 0, sizeof(ainfo)); + ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, n, &ainfo, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_info_by_idx"); + VERIFY(ainfo.corder, 0, "H5Aget_info_by_idx"); + + /* Verify the information for new attribute, in increasing creation order */ + HDmemset(&ainfo, 0, sizeof(ainfo)); + ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, (hsize_t)0, &ainfo, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_info_by_idx"); + VERIFY(ainfo.corder, n, "H5Aget_info_by_idx"); + + /* Verify the name for new link, in increasing creation order */ + HDmemset(tmpname, 0, (size_t)NAME_BUF_SIZE); + ret = (herr_t)H5Aget_name_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, (hsize_t)0, tmpname, + (size_t)NAME_BUF_SIZE, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_name_by_idx"); + if (HDstrcmp(attrname, tmpname) != 0) + TestErrPrintf("Line %d: attribute name size wrong!\n", __LINE__); + + /* Verify the information for first attribute, in increasing name order */ + HDmemset(&ainfo, 0, sizeof(ainfo)); + ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)0, &ainfo, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_info_by_idx"); + VERIFY(ainfo.corder, 0, "H5Aget_info_by_idx"); + + /* Verify the information for new attribute, in increasing name order */ + HDmemset(&ainfo, 0, sizeof(ainfo)); + ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_NAME, H5_ITER_INC, n, &ainfo, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_info_by_idx"); + VERIFY(ainfo.corder, n, "H5Aget_info_by_idx"); + + /* Verify the name for new link, in increasing name order */ + HDmemset(tmpname, 0, (size_t)NAME_BUF_SIZE); + ret = (herr_t)H5Aget_name_by_idx(obj_id, ".", H5_INDEX_NAME, H5_ITER_INC, n, tmpname, + (size_t)NAME_BUF_SIZE, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_name_by_idx"); + if (HDstrcmp(attrname, tmpname) != 0) + TestErrPrintf("Line %d: attribute name size wrong!\n", __LINE__); + + /* Don't test "native" order queries on link name order, since there's not + * a good way to easily predict the order of the links in the name index. + */ + +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + /* Verify the information for first attribute, in decreasing name order */ + HDmemset(&ainfo, 0, sizeof(ainfo)); + ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_NAME, H5_ITER_DEC, n, &ainfo, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_info_by_idx"); + VERIFY(ainfo.corder, 0, "H5Aget_info_by_idx"); + + /* Verify the information for new attribute, in increasing name order */ + HDmemset(&ainfo, 0, sizeof(ainfo)); + ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_NAME, H5_ITER_DEC, (hsize_t)0, &ainfo, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_info_by_idx"); + VERIFY(ainfo.corder, n, "H5Aget_info_by_idx"); + + /* Verify the name for new link, in increasing name order */ + HDmemset(tmpname, 0, (size_t)NAME_BUF_SIZE); + ret = (herr_t)H5Aget_name_by_idx(obj_id, ".", H5_INDEX_NAME, H5_ITER_DEC, (hsize_t)0, tmpname, + (size_t)NAME_BUF_SIZE, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_name_by_idx"); + if (HDstrcmp(attrname, tmpname) != 0) + TestErrPrintf("Line %d: attribute name size wrong!\n", __LINE__); +#endif + /* Retrieve current # of errors */ + if (old_nerrs == nerrors) + return (0); + else + return (-1); +} /* end attr_info_by_idx_check() */ + +/**************************************************************** +** +** test_attr_info_by_idx(): Test basic H5A (attribute) code. +** Tests querying attribute info by index +** +****************************************************************/ +static void +test_attr_info_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* HDF5 File ID */ + hid_t dset1, dset2, dset3; /* Dataset IDs */ + hid_t my_dataset; /* Current dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t attr; /* Attribute ID */ + hid_t dcpl; /* Dataset creation property list ID */ + H5A_info_t ainfo; /* Attribute information */ + unsigned max_compact; /* Maximum # of links to store in group compactly */ + unsigned min_dense; /* Minimum # of links to store in group "densely" */ +#if 0 + htri_t is_empty; /* Are there any attributes? */ + htri_t is_dense; /* Are attributes stored densely? */ + hsize_t nattrs; /* Number of attributes on object */ + hsize_t name_count; /* # of records in name index */ + hsize_t corder_count; /* # of records in creation order index */ +#endif + unsigned use_index; /* Use index on creation order values */ + char attrname[NAME_BUF_SIZE]; /* Name of attribute */ + char tmpname[NAME_BUF_SIZE]; /* Temporary attribute name */ + unsigned curr_dset; /* Current dataset to work on */ + unsigned u; /* Local index variable */ + herr_t ret; /* Generic return value */ + + /* Create dataspace for dataset & attributes */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create dataset creation property list */ + if (dcpl_g == H5P_DEFAULT) { + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + } + else { + dcpl = H5Pcopy(dcpl_g); + CHECK(dcpl, FAIL, "H5Pcopy"); + } + + /* Query the attribute creation properties */ + ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + + /* Loop over using index for creation order value */ + for (use_index = FALSE; use_index <= TRUE; use_index++) { + /* Output message about test being performed */ + if (use_index) + MESSAGE(5, ("Testing Querying Attribute Info By Index w/Creation Order Index\n")) + else + MESSAGE(5, ("Testing Querying Attribute Info By Index w/o Creation Order Index\n")) + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Set attribute creation order tracking & indexing for object */ + if (new_format == TRUE) { + ret = H5Pset_attr_creation_order( + dcpl, (H5P_CRT_ORDER_TRACKED | (use_index ? H5P_CRT_ORDER_INDEXED : (unsigned)0))); + CHECK(ret, FAIL, "H5Pset_attr_creation_order"); + } /* end if */ + + /* Create datasets */ + dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset1, FAIL, "H5Dcreate2"); + dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset2, FAIL, "H5Dcreate2"); + dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset3, FAIL, "H5Dcreate2"); + + /* Work on all the datasets */ + for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { + switch (curr_dset) { + case 0: + my_dataset = dset1; + break; + + case 1: + my_dataset = dset2; + break; + + case 2: + my_dataset = dset3; + break; + + default: + HDassert(0 && "Too many datasets!"); + } /* end switch */ +#if 0 + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Check for query on non-existent attribute */ + H5E_BEGIN_TRY + { + ret = H5Aget_info_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)0, &ainfo, + H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Aget_info_by_idx"); + H5E_BEGIN_TRY + { + ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)0, + tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Aget_name_by_idx"); + + /* Create attributes, up to limit of compact form */ + for (u = 0; u < max_compact; u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Verify information for new attribute */ + ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); + CHECK(ret, FAIL, "attr_info_by_idx_check"); + } /* end for */ +#if 0 + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Check for out of bound offset queries */ + H5E_BEGIN_TRY + { + ret = H5Aget_info_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)u, &ainfo, + H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Aget_info_by_idx"); + H5E_BEGIN_TRY + { + ret = H5Aget_info_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, (hsize_t)u, &ainfo, + H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Aget_info_by_idx"); + H5E_BEGIN_TRY + { + ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)u, + tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Aget_name_by_idx"); + + /* Create more attributes, to push into dense form */ + for (; u < (max_compact * 2); u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); +#if 0 + /* Verify state of object */ + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, (new_format ? TRUE : FALSE), "H5O__is_attr_dense_test"); +#endif + /* Verify information for new attribute */ + ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); + CHECK(ret, FAIL, "attr_info_by_idx_check"); + } /* end for */ +#if 0 + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, (new_format ? TRUE : FALSE), "H5O__is_attr_dense_test"); + + if (new_format) { + /* Retrieve & verify # of records in the name & creation order indices */ + ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); + CHECK(ret, FAIL, "H5O__attr_dense_info_test"); + if (use_index) + VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); + VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test"); + } /* end if */ +#endif + /* Check for out of bound offset queries */ + H5E_BEGIN_TRY + { + ret = H5Aget_info_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)u, &ainfo, + H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Aget_info_by_idx"); + H5E_BEGIN_TRY + { + ret = H5Aget_info_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, (hsize_t)u, &ainfo, + H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Aget_info_by_idx"); + H5E_BEGIN_TRY + { + ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)u, + tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Aget_name_by_idx"); + } /* end for */ + + /* Close Datasets */ + ret = H5Dclose(dset1); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset2); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset3); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + } /* end for */ + + /* Close property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_attr_info_by_idx() */ + +/*************************************************************** +** +** test_attr_info_null_info_pointer(): A test to ensure that +** passing a NULL attribute info pointer to H5Aget_info +** (_by_name/_by_idx) doesn't cause bad behavior. +** +****************************************************************/ +static void +test_attr_info_null_info_pointer(hid_t fcpl, hid_t fapl) +{ + herr_t err_ret = -1; + hid_t fid; + hid_t attr; + hid_t sid; + + /* Create dataspace for attribute */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create attribute */ + attr = H5Acreate2(fid, GET_INFO_NULL_POINTER_ATTR_NAME, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + H5E_BEGIN_TRY + { + err_ret = H5Aget_info(attr, NULL); + } + H5E_END_TRY; + + CHECK(err_ret, SUCCEED, "H5Aget_info"); + + H5E_BEGIN_TRY + { + err_ret = H5Aget_info_by_name(fid, ".", GET_INFO_NULL_POINTER_ATTR_NAME, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + CHECK(err_ret, SUCCEED, "H5Aget_info_by_name"); + + H5E_BEGIN_TRY + { + err_ret = H5Aget_info_by_idx(fid, ".", H5_INDEX_NAME, H5_ITER_INC, 0, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + CHECK(err_ret, SUCCEED, "H5Aget_info_by_idx"); + + /* Close dataspace */ + err_ret = H5Sclose(sid); + CHECK(err_ret, FAIL, "H5Sclose"); + + /* Close attribute */ + err_ret = H5Aclose(attr); + CHECK(err_ret, FAIL, "H5Aclose"); + + /* Close file */ + err_ret = H5Fclose(fid); + CHECK(err_ret, FAIL, "H5Fclose"); +} + +/*************************************************************** +** +** test_attr_rename_invalid_name(): A test to ensure that +** passing a NULL or empty attribute name to +** H5Arename(_by_name) doesn't cause bad behavior. +** +****************************************************************/ +static void +test_attr_rename_invalid_name(hid_t fcpl, hid_t fapl) +{ + herr_t err_ret = -1; + hid_t fid; + hid_t attr; + hid_t sid; + + /* Create dataspace for attribute */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create attribute */ + attr = H5Acreate2(fid, INVALID_RENAME_TEST_ATTR_NAME, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + H5E_BEGIN_TRY + { + err_ret = H5Arename(fid, NULL, INVALID_RENAME_TEST_NEW_ATTR_NAME); + } + H5E_END_TRY; + + CHECK(err_ret, SUCCEED, "H5Arename"); + + H5E_BEGIN_TRY + { + err_ret = H5Arename(fid, "", INVALID_RENAME_TEST_NEW_ATTR_NAME); + } + H5E_END_TRY; + + CHECK(err_ret, SUCCEED, "H5Arename"); + + H5E_BEGIN_TRY + { + err_ret = H5Arename(fid, INVALID_RENAME_TEST_ATTR_NAME, NULL); + } + H5E_END_TRY; + + CHECK(err_ret, SUCCEED, "H5Arename"); + + H5E_BEGIN_TRY + { + err_ret = H5Arename(fid, INVALID_RENAME_TEST_ATTR_NAME, ""); + } + H5E_END_TRY; + + CHECK(err_ret, SUCCEED, "H5Arename"); + + H5E_BEGIN_TRY + { + err_ret = H5Arename_by_name(fid, ".", NULL, INVALID_RENAME_TEST_NEW_ATTR_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + + CHECK(err_ret, SUCCEED, "H5Arename_by_name"); + + H5E_BEGIN_TRY + { + err_ret = H5Arename_by_name(fid, ".", "", INVALID_RENAME_TEST_NEW_ATTR_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + + CHECK(err_ret, SUCCEED, "H5Arename_by_name"); + + H5E_BEGIN_TRY + { + err_ret = H5Arename_by_name(fid, ".", INVALID_RENAME_TEST_ATTR_NAME, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + + CHECK(err_ret, SUCCEED, "H5Arename_by_name"); + + H5E_BEGIN_TRY + { + err_ret = H5Arename_by_name(fid, ".", INVALID_RENAME_TEST_ATTR_NAME, "", H5P_DEFAULT); + } + H5E_END_TRY; + + CHECK(err_ret, SUCCEED, "H5Arename_by_name"); + + /* Close dataspace */ + err_ret = H5Sclose(sid); + CHECK(err_ret, FAIL, "H5Sclose"); + + /* Close attribute */ + err_ret = H5Aclose(attr); + CHECK(err_ret, FAIL, "H5Aclose"); + + /* Close file */ + err_ret = H5Fclose(fid); + CHECK(err_ret, FAIL, "H5Fclose"); +} + +/*************************************************************** +** +** test_attr_get_name_invalid_buf(): A test to ensure that +** passing a NULL buffer to H5Aget_name(_by_idx) when +** the 'size' parameter is non-zero doesn't cause bad +** behavior. +** +****************************************************************/ +static void +test_attr_get_name_invalid_buf(hid_t fcpl, hid_t fapl) +{ + ssize_t err_ret = -1; + hid_t fid; + hid_t attr; + hid_t sid; + + /* Create dataspace for attribute */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create attribute */ + attr = + H5Acreate2(fid, GET_NAME_INVALID_BUF_TEST_ATTR_NAME, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + H5E_BEGIN_TRY + { + err_ret = H5Aget_name(attr, 1, NULL); + } + H5E_END_TRY; + + VERIFY(err_ret, FAIL, "H5Aget_name"); + + H5E_BEGIN_TRY + { + err_ret = H5Aget_name_by_idx(fid, ".", H5_INDEX_NAME, H5_ITER_INC, 0, NULL, 1, H5P_DEFAULT); + } + H5E_END_TRY; + + VERIFY(err_ret, FAIL, "H5Aget_name_by_idx"); + + /* Close dataspace */ + err_ret = H5Sclose(sid); + CHECK(err_ret, FAIL, "H5Sclose"); + + /* Close attribute */ + err_ret = H5Aclose(attr); + CHECK(err_ret, FAIL, "H5Aclose"); + + /* Close file */ + err_ret = H5Fclose(fid); + CHECK(err_ret, FAIL, "H5Fclose"); +} + +/**************************************************************** +** +** test_attr_delete_by_idx(): Test basic H5A (attribute) code. +** Tests deleting attribute by index +** +****************************************************************/ +static void +test_attr_delete_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* HDF5 File ID */ + hid_t dset1, dset2, dset3; /* Dataset IDs */ + hid_t my_dataset; /* Current dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t attr; /* Attribute ID */ + hid_t dcpl; /* Dataset creation property list ID */ + H5A_info_t ainfo; /* Attribute information */ + unsigned max_compact; /* Maximum # of links to store in group compactly */ + unsigned min_dense; /* Minimum # of links to store in group "densely" */ +#if 0 + htri_t is_empty; /* Are there any attributes? */ + htri_t is_dense; /* Are attributes stored densely? */ + hsize_t nattrs; /* Number of attributes on object */ + hsize_t name_count; /* # of records in name index */ + hsize_t corder_count; /* # of records in creation order index */ +#endif + H5_index_t idx_type; /* Type of index to operate on */ + H5_iter_order_t order; /* Order within in the index */ + unsigned use_index; /* Use index on creation order values */ + char attrname[NAME_BUF_SIZE]; /* Name of attribute */ + char tmpname[NAME_BUF_SIZE]; /* Temporary attribute name */ + unsigned curr_dset; /* Current dataset to work on */ + unsigned u; /* Local index variable */ + herr_t ret; /* Generic return value */ + + MESSAGE(5, ("Testing Deleting Attribute By Index\n")) + + /* Create dataspace for dataset & attributes */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create dataset creation property list */ + if (dcpl_g == H5P_DEFAULT) { + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + } + else { + dcpl = H5Pcopy(dcpl_g); + CHECK(dcpl, FAIL, "H5Pcopy"); + } + + /* Query the attribute creation properties */ + ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + + /* Loop over operating on different indices on link fields */ + for (idx_type = H5_INDEX_NAME; idx_type <= H5_INDEX_CRT_ORDER; idx_type++) { + /* Loop over operating in different orders */ + for (order = H5_ITER_INC; order <= H5_ITER_DEC; order++) { + /* Loop over using index for creation order value */ + for (use_index = FALSE; use_index <= TRUE; use_index++) { + /* Print appropriate test message */ + if (idx_type == H5_INDEX_CRT_ORDER) { + if (order == H5_ITER_INC) { + if (use_index) + MESSAGE(5, ("Testing Deleting Attribute By Creation Order Index in Increasing " + "Order w/Creation Order Index\n")) + else + MESSAGE(5, ("Testing Deleting Attribute By Creation Order Index in Increasing " + "Order w/o Creation Order Index\n")) + } /* end if */ + else { + if (use_index) + MESSAGE(5, ("Testing Deleting Attribute By Creation Order Index in Decreasing " + "Order w/Creation Order Index\n")) + else + MESSAGE(5, ("Testing Deleting Attribute By Creation Order Index in Decreasing " + "Order w/o Creation Order Index\n")) + } /* end else */ + } /* end if */ + else { + if (order == H5_ITER_INC) { + if (use_index) + MESSAGE(5, ("Testing Deleting Attribute By Name Index in Increasing Order " + "w/Creation Order Index\n")) + else + MESSAGE(5, ("Testing Deleting Attribute By Name Index in Increasing Order w/o " + "Creation Order Index\n")) + } /* end if */ + else { +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + if (use_index) + MESSAGE(5, ("Testing Deleting Attribute By Name Index in Decreasing Order " + "w/Creation Order Index\n")) + else + MESSAGE(5, ("Testing Deleting Attribute By Name Index in Decreasing Order w/o " + "Creation Order Index\n")) +#else + continue; +#endif + } /* end else */ + } /* end else */ + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Set attribute creation order tracking & indexing for object */ + if (new_format == TRUE) { + ret = H5Pset_attr_creation_order( + dcpl, (H5P_CRT_ORDER_TRACKED | (use_index ? H5P_CRT_ORDER_INDEXED : (unsigned)0))); + CHECK(ret, FAIL, "H5Pset_attr_creation_order"); + } /* end if */ + + /* Create datasets */ + dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset1, FAIL, "H5Dcreate2"); + dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset2, FAIL, "H5Dcreate2"); + dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset3, FAIL, "H5Dcreate2"); + + /* Work on all the datasets */ + for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { + switch (curr_dset) { + case 0: + my_dataset = dset1; + break; + + case 1: + my_dataset = dset2; + break; + + case 2: + my_dataset = dset3; + break; + + default: + HDassert(0 && "Too many datasets!"); + } /* end switch */ +#if 0 + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Check for deleting non-existent attribute */ + H5E_BEGIN_TRY + { + ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Adelete_by_idx"); + + /* Create attributes, up to limit of compact form */ + for (u = 0; u < max_compact; u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = + H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Verify information for new attribute */ + ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); + CHECK(ret, FAIL, "attr_info_by_idx_check"); + } /* end for */ +#if 0 + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Check for out of bound deletions */ + H5E_BEGIN_TRY + { + ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Adelete_by_idx"); + } /* end for */ + + /* Work on all the datasets */ + for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { + switch (curr_dset) { + case 0: + my_dataset = dset1; + break; + + case 1: + my_dataset = dset2; + break; + + case 2: + my_dataset = dset3; + break; + + default: + HDassert(0 && "Too many datasets!"); + } /* end switch */ + + /* Delete attributes from compact storage */ + for (u = 0; u < (max_compact - 1); u++) { + /* Delete first attribute in appropriate order */ + ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Adelete_by_idx"); + + /* Verify the attribute information for first attribute in appropriate order */ + HDmemset(&ainfo, 0, sizeof(ainfo)); + ret = H5Aget_info_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, &ainfo, + H5P_DEFAULT); + if (new_format) { + if (order == H5_ITER_INC) { + VERIFY(ainfo.corder, (u + 1), "H5Aget_info_by_idx"); + } /* end if */ + else { + VERIFY(ainfo.corder, (max_compact - (u + 2)), "H5Aget_info_by_idx"); + } /* end else */ + } /* end if */ + + /* Verify the name for first attribute in appropriate order */ + HDmemset(tmpname, 0, (size_t)NAME_BUF_SIZE); + ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, + tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT); + if (order == H5_ITER_INC) + HDsnprintf(attrname, sizeof(attrname), "attr %02u", (u + 1)); + else + HDsnprintf(attrname, sizeof(attrname), "attr %02u", (max_compact - (u + 2))); + ret = HDstrcmp(attrname, tmpname); + VERIFY(ret, 0, "H5Aget_name_by_idx"); + } /* end for */ + + /* Delete last attribute */ + ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Adelete_by_idx"); +#if 0 + /* Verify state of attribute storage (empty) */ + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test"); +#endif + } /* end for */ + + /* Work on all the datasets */ + for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { + switch (curr_dset) { + case 0: + my_dataset = dset1; + break; + + case 1: + my_dataset = dset2; + break; + + case 2: + my_dataset = dset3; + break; + + default: + HDassert(0 && "Too many datasets!"); + } /* end switch */ + + /* Create more attributes, to push into dense form */ + for (u = 0; u < (max_compact * 2); u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = + H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); +#if 0 + /* Verify state of object */ + if (u >= max_compact) { + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, (new_format ? TRUE : FALSE), "H5O__is_attr_dense_test"); + } /* end if */ +#endif + /* Verify information for new attribute */ + ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); + CHECK(ret, FAIL, "attr_info_by_idx_check"); + } /* end for */ +#if 0 + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, (new_format ? TRUE : FALSE), "H5O__is_attr_dense_test"); + + if (new_format) { + /* Retrieve & verify # of records in the name & creation order indices */ + ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); + CHECK(ret, FAIL, "H5O__attr_dense_info_test"); + if (use_index) + VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); + VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test"); + } /* end if */ +#endif + /* Check for out of bound deletion */ + H5E_BEGIN_TRY + { + ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Adelete_by_idx"); + } /* end for */ + + /* Work on all the datasets */ + for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { + switch (curr_dset) { + case 0: + my_dataset = dset1; + break; + + case 1: + my_dataset = dset2; + break; + + case 2: + my_dataset = dset3; + break; + + default: + HDassert(0 && "Too many datasets!"); + } /* end switch */ + + /* Delete attributes from dense storage */ + for (u = 0; u < ((max_compact * 2) - 1); u++) { + /* Delete first attribute in appropriate order */ + ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Adelete_by_idx"); + + /* Verify the attribute information for first attribute in appropriate order */ + HDmemset(&ainfo, 0, sizeof(ainfo)); + ret = H5Aget_info_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, &ainfo, + H5P_DEFAULT); + if (new_format) { + if (order == H5_ITER_INC) { + VERIFY(ainfo.corder, (u + 1), "H5Aget_info_by_idx"); + } /* end if */ + else { + VERIFY(ainfo.corder, ((max_compact * 2) - (u + 2)), "H5Aget_info_by_idx"); + } /* end else */ + } /* end if */ + + /* Verify the name for first attribute in appropriate order */ + HDmemset(tmpname, 0, (size_t)NAME_BUF_SIZE); + ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, + tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT); + if (order == H5_ITER_INC) + HDsnprintf(attrname, sizeof(attrname), "attr %02u", (u + 1)); + else + HDsnprintf(attrname, sizeof(attrname), "attr %02u", + ((max_compact * 2) - (u + 2))); + ret = HDstrcmp(attrname, tmpname); + VERIFY(ret, 0, "H5Aget_name_by_idx"); + } /* end for */ + + /* Delete last attribute */ + ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Adelete_by_idx"); +#if 0 + /* Verify state of attribute storage (empty) */ + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test"); +#endif + /* Check for deletion on empty attribute storage again */ + H5E_BEGIN_TRY + { + ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Adelete_by_idx"); + } /* end for */ + + /* Delete attributes in middle */ + + /* Work on all the datasets */ + for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { + switch (curr_dset) { + case 0: + my_dataset = dset1; + break; + + case 1: + my_dataset = dset2; + break; + + case 2: + my_dataset = dset3; + break; + + default: + HDassert(0 && "Too many datasets!"); + } /* end switch */ + + /* Create attributes, to push into dense form */ + for (u = 0; u < (max_compact * 2); u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = + H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); +#if 0 + /* Verify state of object */ + if (u >= max_compact) { + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, (new_format ? TRUE : FALSE), "H5O__is_attr_dense_test"); + } /* end if */ +#endif + /* Verify information for new attribute */ + ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); + CHECK(ret, FAIL, "attr_info_by_idx_check"); + } /* end for */ + } /* end for */ + + /* Work on all the datasets */ + for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { + switch (curr_dset) { + case 0: + my_dataset = dset1; + break; + + case 1: + my_dataset = dset2; + break; + + case 2: + my_dataset = dset3; + break; + + default: + HDassert(0 && "Too many datasets!"); + } /* end switch */ + + /* Delete every other attribute from dense storage, in appropriate order */ + for (u = 0; u < max_compact; u++) { + /* Delete attribute */ + ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Adelete_by_idx"); + + /* Verify the attribute information for first attribute in appropriate order */ + HDmemset(&ainfo, 0, sizeof(ainfo)); + ret = H5Aget_info_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, &ainfo, + H5P_DEFAULT); + if (new_format) { + if (order == H5_ITER_INC) { + VERIFY(ainfo.corder, ((u * 2) + 1), "H5Aget_info_by_idx"); + } /* end if */ + else { + VERIFY(ainfo.corder, ((max_compact * 2) - ((u * 2) + 2)), + "H5Aget_info_by_idx"); + } /* end else */ + } /* end if */ + + /* Verify the name for first attribute in appropriate order */ + HDmemset(tmpname, 0, (size_t)NAME_BUF_SIZE); + ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, + tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT); + if (order == H5_ITER_INC) + HDsnprintf(attrname, sizeof(attrname), "attr %02u", ((u * 2) + 1)); + else + HDsnprintf(attrname, sizeof(attrname), "attr %02u", + ((max_compact * 2) - ((u * 2) + 2))); + ret = HDstrcmp(attrname, tmpname); + VERIFY(ret, 0, "H5Aget_name_by_idx"); + } /* end for */ + } /* end for */ + + /* Work on all the datasets */ + for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { + switch (curr_dset) { + case 0: + my_dataset = dset1; + break; + + case 1: + my_dataset = dset2; + break; + + case 2: + my_dataset = dset3; + break; + + default: + HDassert(0 && "Too many datasets!"); + } /* end switch */ + + /* Delete remaining attributes from dense storage, in appropriate order */ + for (u = 0; u < (max_compact - 1); u++) { + /* Delete attribute */ + ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Adelete_by_idx"); + + /* Verify the attribute information for first attribute in appropriate order */ + HDmemset(&ainfo, 0, sizeof(ainfo)); + ret = H5Aget_info_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, &ainfo, + H5P_DEFAULT); + if (new_format) { + if (order == H5_ITER_INC) { + VERIFY(ainfo.corder, ((u * 2) + 3), "H5Aget_info_by_idx"); + } /* end if */ + else { + VERIFY(ainfo.corder, ((max_compact * 2) - ((u * 2) + 4)), + "H5Aget_info_by_idx"); + } /* end else */ + } /* end if */ + + /* Verify the name for first attribute in appropriate order */ + HDmemset(tmpname, 0, (size_t)NAME_BUF_SIZE); + ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, + tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT); + if (order == H5_ITER_INC) + HDsnprintf(attrname, sizeof(attrname), "attr %02u", ((u * 2) + 3)); + else + HDsnprintf(attrname, sizeof(attrname), "attr %02u", + ((max_compact * 2) - ((u * 2) + 4))); + ret = HDstrcmp(attrname, tmpname); + VERIFY(ret, 0, "H5Aget_name_by_idx"); + } /* end for */ + + /* Delete last attribute */ + ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Adelete_by_idx"); +#if 0 + /* Verify state of attribute storage (empty) */ + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test"); +#endif + /* Check for deletion on empty attribute storage again */ + H5E_BEGIN_TRY + { + ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Adelete_by_idx"); + } /* end for */ + + /* Close Datasets */ + ret = H5Dclose(dset1); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset2); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset3); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + } /* end for */ + } /* end for */ + } /* end for */ + + /* Close property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_attr_delete_by_idx() */ + +/**************************************************************** +** +** attr_iterate2_cb(): Revised attribute operator +** +****************************************************************/ +static herr_t +attr_iterate2_cb(hid_t loc_id, const char *attr_name, const H5A_info_t *info, void *_op_data) +{ + attr_iter_info_t *op_data = (attr_iter_info_t *)_op_data; /* User data */ + char attrname[NAME_BUF_SIZE]; /* Object name */ + H5A_info_t my_info; /* Local attribute info */ + + /* Increment # of times the callback was called */ + op_data->ncalled++; + + /* Get the attribute information directly to compare */ + if (H5Aget_info_by_name(loc_id, ".", attr_name, &my_info, H5P_DEFAULT) < 0) + return (H5_ITER_ERROR); + + /* Check more things for revised attribute iteration (vs. older attribute iteration) */ + if (info) { + /* Check for correct order of iteration */ + /* (if we are operating in increasing or decreasing order) */ + if (op_data->order != H5_ITER_NATIVE) + if (info->corder != op_data->curr) + return (H5_ITER_ERROR); + + /* Compare attribute info structs */ + if (info->corder_valid != my_info.corder_valid) + return (H5_ITER_ERROR); + if (info->corder != my_info.corder) + return (H5_ITER_ERROR); + if (info->cset != my_info.cset) + return (H5_ITER_ERROR); + if (info->data_size != my_info.data_size) + return (H5_ITER_ERROR); + } /* end if */ + + /* Verify name of link */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", (unsigned)my_info.corder); + if (HDstrcmp(attr_name, attrname) != 0) + return (H5_ITER_ERROR); + + /* Check if we've visited this link before */ + if ((size_t)op_data->curr >= op_data->max_visit) + return (H5_ITER_ERROR); + if (op_data->visited[op_data->curr]) + return (H5_ITER_ERROR); + op_data->visited[op_data->curr] = TRUE; + + /* Advance to next value, in correct direction */ + if (op_data->order != H5_ITER_DEC) + op_data->curr++; + else + op_data->curr--; + + /* Check for stopping in the middle of iterating */ + if (op_data->stop > 0) + if (--op_data->stop == 0) + return (CORDER_ITER_STOP); + + return (H5_ITER_CONT); +} /* end attr_iterate2_cb() */ + +#ifndef H5_NO_DEPRECATED_SYMBOLS + +/**************************************************************** +** +** attr_iterate1_cb(): Attribute operator +** +****************************************************************/ +#if 0 +static herr_t +attr_iterate1_cb(hid_t loc_id, const char *attr_name, void *_op_data) +{ + return (attr_iterate2_cb(loc_id, attr_name, NULL, _op_data)); +} /* end attr_iterate1_cb() */ +#endif /* H5_NO_DEPRECATED_SYMBOLS */ +#endif + +#ifndef NO_ITERATION_RESTART +/*------------------------------------------------------------------------- + * Function: attr_iterate2_fail_cb + * + * Purpose: Callback routine for iterating over attributes on object that + * always returns failure + * + * Return: Success: 0 + * Failure: -1 + * + * Programmer: Quincey Koziol + * Tuesday, February 20, 2007 + * + *------------------------------------------------------------------------- + */ +static int +attr_iterate2_fail_cb(hid_t H5_ATTR_UNUSED group_id, const char H5_ATTR_UNUSED *attr_name, + const H5A_info_t H5_ATTR_UNUSED *info, void H5_ATTR_UNUSED *_op_data) +{ + return (H5_ITER_ERROR); +} /* end attr_iterate2_fail_cb() */ + +/*------------------------------------------------------------------------- + * Function: attr_iterate_check + * + * Purpose: Check iteration over attributes on an object + * + * Return: Success: 0 + * Failure: -1 + * + * Programmer: Quincey Koziol + * Tuesday, February 20, 2007 + * + *------------------------------------------------------------------------- + */ +static int +attr_iterate_check(hid_t fid, const char *dsetname, hid_t obj_id, H5_index_t idx_type, H5_iter_order_t order, + unsigned max_attrs, attr_iter_info_t *iter_info) +{ + unsigned v; /* Local index variable */ + hsize_t skip; /* # of attributes to skip on object */ +#if 0 +#ifndef H5_NO_DEPRECATED_SYMBOLS + unsigned oskip; /* # of attributes to skip on object, with H5Aiterate1 */ +#endif /* H5_NO_DEPRECATED_SYMBOLS */ +#endif + int old_nerrs; /* Number of errors when entering this check */ + herr_t ret; /* Generic return value */ + + /* Retrieve the current # of reported errors */ + old_nerrs = nerrors; + + /* Iterate over attributes on object */ + iter_info->nskipped = (unsigned)(skip = 0); + iter_info->order = order; + iter_info->stop = -1; + iter_info->ncalled = 0; + iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1); + HDmemset(iter_info->visited, 0, sizeof(hbool_t) * iter_info->max_visit); + ret = H5Aiterate2(obj_id, idx_type, order, &skip, attr_iterate2_cb, iter_info); + CHECK(ret, FAIL, "H5Aiterate2"); + + /* Verify that we visited all the attributes */ + VERIFY(skip, max_attrs, "H5Aiterate2"); + for (v = 0; v < max_attrs; v++) + VERIFY(iter_info->visited[v], TRUE, "H5Aiterate2"); + + /* Iterate over attributes on object */ + iter_info->nskipped = (unsigned)(skip = 0); + iter_info->order = order; + iter_info->stop = -1; + iter_info->ncalled = 0; + iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1); + HDmemset(iter_info->visited, 0, sizeof(hbool_t) * iter_info->max_visit); + ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &skip, attr_iterate2_cb, iter_info, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aiterate_by_name"); + + /* Verify that we visited all the attributes */ + VERIFY(skip, max_attrs, "H5Aiterate_by_name"); + for (v = 0; v < max_attrs; v++) + VERIFY(iter_info->visited[v], TRUE, "H5Aiterate_by_name"); + + /* Iterate over attributes on object */ + iter_info->nskipped = (unsigned)(skip = 0); + iter_info->order = order; + iter_info->stop = -1; + iter_info->ncalled = 0; + iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1); + HDmemset(iter_info->visited, 0, sizeof(hbool_t) * iter_info->max_visit); + ret = H5Aiterate_by_name(obj_id, ".", idx_type, order, &skip, attr_iterate2_cb, iter_info, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aiterate_by_name"); + + /* Verify that we visited all the attributes */ + VERIFY(skip, max_attrs, "H5Aiterate_by_name"); + for (v = 0; v < max_attrs; v++) + VERIFY(iter_info->visited[v], TRUE, "H5Aiterate_by_name"); + +#if 0 +#ifndef H5_NO_DEPRECATED_SYMBOLS + /* Iterate over attributes on object, with H5Aiterate1 */ + iter_info->nskipped = oskip = 0; + iter_info->order = order; + iter_info->stop = -1; + iter_info->ncalled = 0; + iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1); + HDmemset(iter_info->visited, 0, sizeof(hbool_t) * iter_info->max_visit); + ret = H5Aiterate1(obj_id, &oskip, attr_iterate1_cb, iter_info); + CHECK(ret, FAIL, "H5Aiterate1"); + + /* Verify that we visited all the attributes */ + VERIFY(skip, max_attrs, "H5Aiterate1"); + for (v = 0; v < max_attrs; v++) + VERIFY(iter_info->visited[v], TRUE, "H5Aiterate1"); +#endif /* H5_NO_DEPRECATED_SYMBOLS */ +#endif + + /* Skip over some attributes on object */ + iter_info->nskipped = (unsigned)(skip = max_attrs / 2); + iter_info->order = order; + iter_info->stop = -1; + iter_info->ncalled = 0; + iter_info->curr = order != H5_ITER_DEC ? skip : ((max_attrs - 1) - skip); + HDmemset(iter_info->visited, 0, sizeof(hbool_t) * iter_info->max_visit); + ret = H5Aiterate2(obj_id, idx_type, order, &skip, attr_iterate2_cb, iter_info); + CHECK(ret, FAIL, "H5Aiterate2"); + + /* Verify that we visited all the attributes */ + VERIFY(skip, max_attrs, "H5Aiterate2"); + if (order == H5_ITER_INC) { + for (v = 0; v < (max_attrs / 2); v++) + VERIFY(iter_info->visited[v + (max_attrs / 2)], TRUE, "H5Aiterate2"); + } /* end if */ + else if (order == H5_ITER_DEC) { + for (v = 0; v < (max_attrs / 2); v++) + VERIFY(iter_info->visited[v], TRUE, "H5Aiterate2"); + } /* end if */ + else { + unsigned nvisit = 0; /* # of links visited */ + + HDassert(order == H5_ITER_NATIVE); + for (v = 0; v < max_attrs; v++) + if (iter_info->visited[v] == TRUE) + nvisit++; + + VERIFY(skip, (max_attrs / 2), "H5Aiterate2"); + } /* end else */ + + /* Skip over some attributes on object */ + iter_info->nskipped = (unsigned)(skip = max_attrs / 2); + iter_info->order = order; + iter_info->stop = -1; + iter_info->ncalled = 0; + iter_info->curr = order != H5_ITER_DEC ? skip : ((max_attrs - 1) - skip); + HDmemset(iter_info->visited, 0, sizeof(hbool_t) * iter_info->max_visit); + ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &skip, attr_iterate2_cb, iter_info, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aiterate_by_name"); + + /* Verify that we visited all the attributes */ + VERIFY(skip, max_attrs, "H5Aiterate_by_name"); + if (order == H5_ITER_INC) { + for (v = 0; v < (max_attrs / 2); v++) + VERIFY(iter_info->visited[v + (max_attrs / 2)], TRUE, "H5Aiterate_by_name"); + } /* end if */ + else if (order == H5_ITER_DEC) { + for (v = 0; v < (max_attrs / 2); v++) + VERIFY(iter_info->visited[v], TRUE, "H5Aiterate_by_name"); + } /* end if */ + else { + unsigned nvisit = 0; /* # of links visited */ + + HDassert(order == H5_ITER_NATIVE); + for (v = 0; v < max_attrs; v++) + if (iter_info->visited[v] == TRUE) + nvisit++; + + VERIFY(skip, (max_attrs / 2), "H5Aiterate_by_name"); + } /* end else */ + + /* Skip over some attributes on object */ + iter_info->nskipped = (unsigned)(skip = max_attrs / 2); + iter_info->order = order; + iter_info->stop = -1; + iter_info->ncalled = 0; + iter_info->curr = order != H5_ITER_DEC ? skip : ((max_attrs - 1) - skip); + HDmemset(iter_info->visited, 0, sizeof(hbool_t) * iter_info->max_visit); + ret = H5Aiterate_by_name(obj_id, ".", idx_type, order, &skip, attr_iterate2_cb, iter_info, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aiterate_by_name"); + + /* Verify that we visited all the attributes */ + VERIFY(skip, max_attrs, "H5Aiterate_by_name"); + if (order == H5_ITER_INC) { + for (v = 0; v < (max_attrs / 2); v++) + VERIFY(iter_info->visited[v + (max_attrs / 2)], TRUE, "H5Aiterate_by_name"); + } /* end if */ + else if (order == H5_ITER_DEC) { + for (v = 0; v < (max_attrs / 2); v++) + VERIFY(iter_info->visited[v], TRUE, "H5Aiterate_by_name"); + } /* end if */ + else { + unsigned nvisit = 0; /* # of links visited */ + + HDassert(order == H5_ITER_NATIVE); + for (v = 0; v < max_attrs; v++) + if (iter_info->visited[v] == TRUE) + nvisit++; + + VERIFY(skip, (max_attrs / 2), "H5Aiterate_by_name"); + } /* end else */ + +#if 0 +#ifndef H5_NO_DEPRECATED_SYMBOLS + /* Skip over some attributes on object, with H5Aiterate1 */ + iter_info->nskipped = oskip = max_attrs / 2; + iter_info->order = order; + iter_info->stop = -1; + iter_info->ncalled = 0; + iter_info->curr = order != H5_ITER_DEC ? (unsigned)oskip : ((max_attrs - 1) - oskip); + HDmemset(iter_info->visited, 0, sizeof(hbool_t) * iter_info->max_visit); + ret = H5Aiterate1(obj_id, &oskip, attr_iterate1_cb, iter_info); + CHECK(ret, FAIL, "H5Aiterate1"); + + /* Verify that we visited all the links */ + VERIFY(oskip, max_attrs, "H5Aiterate1"); + if (order == H5_ITER_INC) { + for (v = 0; v < (max_attrs / 2); v++) + VERIFY(iter_info->visited[v + (max_attrs / 2)], TRUE, "H5Aiterate1"); + } /* end if */ + else if (order == H5_ITER_DEC) { + for (v = 0; v < (max_attrs / 2); v++) + VERIFY(iter_info->visited[v], TRUE, "H5Aiterate1"); + } /* end if */ + else { + unsigned nvisit = 0; /* # of links visited */ + + HDassert(order == H5_ITER_NATIVE); + for (v = 0; v < max_attrs; v++) + if (iter_info->visited[v] == TRUE) + nvisit++; + + VERIFY(skip, (max_attrs / 2), "H5Aiterate1"); + } /* end else */ +#endif /* H5_NO_DEPRECATED_SYMBOLS */ +#endif + + /* Iterate over attributes on object, stopping in the middle */ + iter_info->nskipped = (unsigned)(skip = 0); + iter_info->order = order; + iter_info->stop = 3; + iter_info->ncalled = 0; + iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1); + HDmemset(iter_info->visited, 0, sizeof(hbool_t) * iter_info->max_visit); + ret = H5Aiterate2(obj_id, idx_type, order, &skip, attr_iterate2_cb, iter_info); + CHECK(ret, FAIL, "H5Aiterate2"); + VERIFY(ret, CORDER_ITER_STOP, "H5Aiterate2"); + VERIFY(iter_info->ncalled, 3, "H5Aiterate2"); + + /* Iterate over attributes on object, stopping in the middle */ + iter_info->nskipped = (unsigned)(skip = 0); + iter_info->order = order; + iter_info->stop = 3; + iter_info->ncalled = 0; + iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1); + HDmemset(iter_info->visited, 0, sizeof(hbool_t) * iter_info->max_visit); + ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &skip, attr_iterate2_cb, iter_info, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aiterate_by_name"); + VERIFY(ret, CORDER_ITER_STOP, "H5Aiterate_by_name"); + VERIFY(iter_info->ncalled, 3, "H5Aiterate_by_name"); + + /* Iterate over attributes on object, stopping in the middle */ + iter_info->nskipped = (unsigned)(skip = 0); + iter_info->order = order; + iter_info->stop = 3; + iter_info->ncalled = 0; + iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1); + HDmemset(iter_info->visited, 0, sizeof(hbool_t) * iter_info->max_visit); + ret = H5Aiterate_by_name(obj_id, ".", idx_type, order, &skip, attr_iterate2_cb, iter_info, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aiterate_by_name"); + VERIFY(ret, CORDER_ITER_STOP, "H5Aiterate_by_name"); + VERIFY(iter_info->ncalled, 3, "H5Aiterate_by_name"); + +#if 0 +#ifndef H5_NO_DEPRECATED_SYMBOLS + /* Iterate over attributes on object, stopping in the middle, with H5Aiterate1() */ + iter_info->nskipped = oskip = 0; + iter_info->order = order; + iter_info->stop = 3; + iter_info->ncalled = 0; + iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1); + HDmemset(iter_info->visited, 0, sizeof(hbool_t) * iter_info->max_visit); + ret = H5Aiterate1(obj_id, &oskip, attr_iterate1_cb, iter_info); + CHECK(ret, FAIL, "H5Aiterate1"); + VERIFY(ret, CORDER_ITER_STOP, "H5Aiterate1"); + VERIFY(iter_info->ncalled, 3, "H5Aiterate1"); +#endif /* H5_NO_DEPRECATED_SYMBOLS */ +#endif + + /* Check for iteration routine indicating failure */ + skip = 0; + H5E_BEGIN_TRY + { + ret = H5Aiterate2(obj_id, idx_type, order, &skip, attr_iterate2_fail_cb, NULL); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Aiterate2"); + + skip = 0; + H5E_BEGIN_TRY + { + ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &skip, attr_iterate2_fail_cb, NULL, + H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Aiterate_by_name"); + + skip = 0; + H5E_BEGIN_TRY + { + ret = + H5Aiterate_by_name(obj_id, ".", idx_type, order, &skip, attr_iterate2_fail_cb, NULL, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Aiterate_by_name"); + + /* Retrieve current # of errors */ + if (old_nerrs == nerrors) + return (0); + else + return (-1); +} /* end attr_iterate_check() */ +#endif + +/**************************************************************** +** +** test_attr_iterate2(): Test basic H5A (attribute) code. +** Tests iterating over attributes by index +** +****************************************************************/ +static void +test_attr_iterate2(hbool_t new_format, hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* HDF5 File ID */ + hid_t dset1, dset2, dset3; /* Dataset IDs */ + hid_t my_dataset; /* Current dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t attr; /* Attribute ID */ + hid_t dcpl; /* Dataset creation property list ID */ + unsigned max_compact; /* Maximum # of links to store in group compactly */ + unsigned min_dense; /* Minimum # of links to store in group "densely" */ +#if 0 + htri_t is_empty; /* Are there any attributes? */ + htri_t is_dense; /* Are attributes stored densely? */ + hsize_t nattrs; /* Number of attributes on object */ + hsize_t name_count; /* # of records in name index */ + hsize_t corder_count; /* # of records in creation order index */ +#endif + H5_index_t idx_type; /* Type of index to operate on */ + H5_iter_order_t order; /* Order within in the index */ + attr_iter_info_t iter_info; /* Iterator info */ + hbool_t *visited = NULL; /* Array of flags for visiting links */ +#ifndef NO_ITERATION_RESTART + hsize_t idx; /* Start index for iteration */ +#endif + unsigned use_index; /* Use index on creation order values */ + const char *dsetname; /* Name of dataset for attributes */ + char attrname[NAME_BUF_SIZE]; /* Name of attribute */ + unsigned curr_dset; /* Current dataset to work on */ + unsigned u; /* Local index variable */ + herr_t ret; /* Generic return value */ + + /* Create dataspace for dataset & attributes */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create dataset creation property list */ + if (dcpl_g == H5P_DEFAULT) { + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + } + else { + dcpl = H5Pcopy(dcpl_g); + CHECK(dcpl, FAIL, "H5Pcopy"); + } + + /* Query the attribute creation properties */ + ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + + /* Allocate the "visited link" array */ + iter_info.max_visit = max_compact * 2; + visited = (hbool_t *)HDmalloc(sizeof(hbool_t) * iter_info.max_visit); + CHECK_PTR(visited, "HDmalloc"); + iter_info.visited = visited; + + /* Loop over operating on different indices on link fields */ + for (idx_type = H5_INDEX_NAME; idx_type <= H5_INDEX_CRT_ORDER; idx_type++) { + /* Loop over operating in different orders */ + for (order = H5_ITER_INC; order <= H5_ITER_DEC; order++) { + /* Loop over using index for creation order value */ + for (use_index = FALSE; use_index <= TRUE; use_index++) { + /* Print appropriate test message */ + if (idx_type == H5_INDEX_CRT_ORDER) { + if (order == H5_ITER_INC) { + if (use_index) + MESSAGE(5, ("Testing Iterating over Attributes By Creation Order Index in " + "Increasing Order w/Creation Order Index\n")) + else + MESSAGE(5, ("Testing Iterating over Attributes By Creation Order Index in " + "Increasing Order w/o Creation Order Index\n")) + } /* end if */ + else { + if (use_index) + MESSAGE(5, ("Testing Iterating over Attributes By Creation Order Index in " + "Decreasing Order w/Creation Order Index\n")) + else + MESSAGE(5, ("Testing Iterating over Attributes By Creation Order Index in " + "Decreasing Order w/o Creation Order Index\n")) + } /* end else */ + } /* end if */ + else { + if (order == H5_ITER_INC) { + if (use_index) + MESSAGE(5, ("Testing Iterating over Attributes By Name Index in Increasing Order " + "w/Creation Order Index\n")) + else + MESSAGE(5, ("Testing Iterating over Attributes By Name Index in Increasing Order " + "w/o Creation Order Index\n")) + } /* end if */ + else { +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + if (use_index) + MESSAGE(5, ("Testing Iterating over Attributes By Name Index in Decreasing Order " + "w/Creation Order Index\n")) + else + MESSAGE(5, ("Testing Iterating over Attributes By Name Index in Decreasing Order " + "w/o Creation Order Index\n")) +#else + continue; +#endif + } /* end else */ + } /* end else */ + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Set attribute creation order tracking & indexing for object */ + if (new_format == TRUE) { + ret = H5Pset_attr_creation_order( + dcpl, (H5P_CRT_ORDER_TRACKED | (use_index ? H5P_CRT_ORDER_INDEXED : (unsigned)0))); + CHECK(ret, FAIL, "H5Pset_attr_creation_order"); + } /* end if */ + + /* Create datasets */ + dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset1, FAIL, "H5Dcreate2"); + dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset2, FAIL, "H5Dcreate2"); + dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset3, FAIL, "H5Dcreate2"); + + /* Work on all the datasets */ + for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { + switch (curr_dset) { + case 0: + my_dataset = dset1; + dsetname = DSET1_NAME; + break; + + case 1: + my_dataset = dset2; + dsetname = DSET2_NAME; + break; + + case 2: + my_dataset = dset3; + dsetname = DSET3_NAME; + break; + + default: + HDassert(0 && "Too many datasets!"); + } /* end switch */ +#if 0 + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Check for iterating over object with no attributes (should be OK) */ + ret = H5Aiterate2(my_dataset, idx_type, order, NULL, attr_iterate2_cb, NULL); + CHECK(ret, FAIL, "H5Aiterate2"); + + ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, NULL, attr_iterate2_cb, NULL, + H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aiterate_by_name"); + + ret = H5Aiterate_by_name(my_dataset, ".", idx_type, order, NULL, attr_iterate2_cb, NULL, + H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aiterate_by_name"); + + /* Create attributes, up to limit of compact form */ + for (u = 0; u < max_compact; u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = + H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Verify information for new attribute */ + ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); + CHECK(ret, FAIL, "attr_info_by_idx_check"); + } /* end for */ +#if 0 + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif +#ifndef NO_ITERATION_RESTART + /* Check for out of bound iteration */ + idx = u; + H5E_BEGIN_TRY + { + ret = H5Aiterate2(my_dataset, idx_type, order, &idx, attr_iterate2_cb, NULL); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Aiterate2"); + + idx = u; + H5E_BEGIN_TRY + { + ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &idx, attr_iterate2_cb, NULL, + H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Aiterate_by_name"); + + idx = u; + H5E_BEGIN_TRY + { + ret = H5Aiterate_by_name(my_dataset, ".", idx_type, order, &idx, attr_iterate2_cb, + NULL, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Aiterate_by_name"); + + /* Test iteration over attributes stored compactly */ + ret = attr_iterate_check(fid, dsetname, my_dataset, idx_type, order, u, &iter_info); + CHECK(ret, FAIL, "attr_iterate_check"); +#endif + } /* end for */ + + /* Work on all the datasets */ + for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { + switch (curr_dset) { + case 0: + my_dataset = dset1; + dsetname = DSET1_NAME; + break; + + case 1: + my_dataset = dset2; + dsetname = DSET2_NAME; + break; + + case 2: + my_dataset = dset3; + dsetname = DSET3_NAME; + break; + + default: + HDassert(0 && "Too many datasets!"); + } /* end switch */ + + /* Create more attributes, to push into dense form */ + for (u = max_compact; u < (max_compact * 2); u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = + H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); +#if 0 + /* Verify state of object */ + if (u >= max_compact) { + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, (new_format ? TRUE : FALSE), "H5O__is_attr_dense_test"); + } /* end if */ +#endif + /* Verify information for new attribute */ + ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); + CHECK(ret, FAIL, "attr_info_by_idx_check"); + } /* end for */ +#if 0 + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, (new_format ? TRUE : FALSE), "H5O__is_attr_dense_test"); + + if (new_format) { + /* Retrieve & verify # of records in the name & creation order indices */ + ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); + CHECK(ret, FAIL, "H5O__attr_dense_info_test"); + if (use_index) + VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); + VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test"); + } /* end if */ +#endif +#ifndef NO_ITERATION_RESTART + /* Check for out of bound iteration */ + idx = u; + H5E_BEGIN_TRY + { + ret = H5Aiterate2(my_dataset, idx_type, order, &idx, attr_iterate2_cb, NULL); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Aiterate2"); + + idx = u; + H5E_BEGIN_TRY + { + ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &idx, attr_iterate2_cb, NULL, + H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Aiterate_by_name"); + + idx = u; + H5E_BEGIN_TRY + { + ret = H5Aiterate_by_name(my_dataset, ".", idx_type, order, &idx, attr_iterate2_cb, + NULL, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Aiterate_by_name"); + + /* Test iteration over attributes stored densely */ + ret = attr_iterate_check(fid, dsetname, my_dataset, idx_type, order, u, &iter_info); + CHECK(ret, FAIL, "attr_iterate_check"); +#endif + } /* end for */ + + /* Close Datasets */ + ret = H5Dclose(dset1); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset2); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset3); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + } /* end for */ + } /* end for */ + } /* end for */ + + /* Close property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Free the "visited link" array */ + HDfree(visited); +} /* test_attr_iterate2() */ + +/*------------------------------------------------------------------------- + * Function: attr_open_by_idx_check + * + * Purpose: Check opening attribute by index on an object + * + * Return: Success: 0 + * Failure: -1 + * + * Programmer: Quincey Koziol + * Wednesday, February 21, 2007 + * + *------------------------------------------------------------------------- + */ +static int +attr_open_by_idx_check(hid_t obj_id, H5_index_t idx_type, H5_iter_order_t order, unsigned max_attrs) +{ + hid_t attr_id; /* ID of attribute to test */ + H5A_info_t ainfo; /* Attribute info */ + int old_nerrs; /* Number of errors when entering this check */ + unsigned u; /* Local index variable */ + herr_t ret; /* Generic return value */ + + /* Retrieve the current # of reported errors */ + old_nerrs = nerrors; + + /* Open each attribute on object by index and check that it's the correct one */ + for (u = 0; u < max_attrs; u++) { + /* Open the attribute */ + attr_id = H5Aopen_by_idx(obj_id, ".", idx_type, order, (hsize_t)u, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr_id, FAIL, "H5Aopen_by_idx"); + + /* Get the attribute's information */ + ret = H5Aget_info(attr_id, &ainfo); + CHECK(ret, FAIL, "H5Aget_info"); + + /* Check that the object is the correct one */ + if (order == H5_ITER_INC) { + VERIFY(ainfo.corder, u, "H5Aget_info"); + } /* end if */ + else if (order == H5_ITER_DEC) { + VERIFY(ainfo.corder, (max_attrs - (u + 1)), "H5Aget_info"); + } /* end if */ + else { + /* XXX: What to do about native order? */ + } /* end else */ + + /* Close attribute */ + ret = H5Aclose(attr_id); + CHECK(ret, FAIL, "H5Aclose"); + } /* end for */ + + /* Retrieve current # of errors */ + if (old_nerrs == nerrors) + return (0); + else + return (-1); +} /* end attr_open_by_idx_check() */ + +/**************************************************************** +** +** test_attr_open_by_idx(): Test basic H5A (attribute) code. +** Tests opening attributes by index +** +****************************************************************/ +static void +test_attr_open_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* HDF5 File ID */ + hid_t dset1, dset2, dset3; /* Dataset IDs */ + hid_t my_dataset; /* Current dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t attr; /* Attribute ID */ + hid_t dcpl; /* Dataset creation property list ID */ + unsigned max_compact; /* Maximum # of links to store in group compactly */ + unsigned min_dense; /* Minimum # of links to store in group "densely" */ +#if 0 + htri_t is_empty; /* Are there any attributes? */ + htri_t is_dense; /* Are attributes stored densely? */ + hsize_t nattrs; /* Number of attributes on object */ + hsize_t name_count; /* # of records in name index */ + hsize_t corder_count; /* # of records in creation order index */ +#endif + H5_index_t idx_type; /* Type of index to operate on */ + H5_iter_order_t order; /* Order within in the index */ + unsigned use_index; /* Use index on creation order values */ + char attrname[NAME_BUF_SIZE]; /* Name of attribute */ + unsigned curr_dset; /* Current dataset to work on */ + unsigned u; /* Local index variable */ + hid_t ret_id; /* Generic hid_t return value */ + herr_t ret; /* Generic return value */ + + /* Create dataspace for dataset & attributes */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create dataset creation property list */ + if (dcpl_g == H5P_DEFAULT) { + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + } + else { + dcpl = H5Pcopy(dcpl_g); + CHECK(dcpl, FAIL, "H5Pcopy"); + } + + /* Query the attribute creation properties */ + ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + + /* Loop over operating on different indices on link fields */ + for (idx_type = H5_INDEX_NAME; idx_type <= H5_INDEX_CRT_ORDER; idx_type++) { + /* Loop over operating in different orders */ + for (order = H5_ITER_INC; order <= H5_ITER_DEC; order++) { + /* Loop over using index for creation order value */ + for (use_index = FALSE; use_index <= TRUE; use_index++) { + /* Print appropriate test message */ + if (idx_type == H5_INDEX_CRT_ORDER) { + if (order == H5_ITER_INC) { + if (use_index) + MESSAGE(5, ("Testing Opening Attributes By Creation Order Index in Increasing " + "Order w/Creation Order Index\n")) + else + MESSAGE(5, ("Testing Opening Attributes By Creation Order Index in Increasing " + "Order w/o Creation Order Index\n")) + } /* end if */ + else { + if (use_index) + MESSAGE(5, ("Testing Opening Attributes By Creation Order Index in Decreasing " + "Order w/Creation Order Index\n")) + else + MESSAGE(5, ("Testing Opening Attributes By Creation Order Index in Decreasing " + "Order w/o Creation Order Index\n")) + } /* end else */ + } /* end if */ + else { + if (order == H5_ITER_INC) { + if (use_index) + MESSAGE(5, ("Testing Opening Attributes By Name Index in Increasing Order " + "w/Creation Order Index\n")) + else + MESSAGE(5, ("Testing Opening Attributes By Name Index in Increasing Order w/o " + "Creation Order Index\n")) + } /* end if */ + else { +#ifndef NO_DECREASING_ALPHA_ITER_ORDER + if (use_index) + MESSAGE(5, ("Testing Opening Attributes By Name Index in Decreasing Order " + "w/Creation Order Index\n")) + else + MESSAGE(5, ("Testing Opening Attributes By Name Index in Decreasing Order w/o " + "Creation Order Index\n")) +#else + continue; +#endif + } /* end else */ + } /* end else */ + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Set attribute creation order tracking & indexing for object */ + if (new_format == TRUE) { + ret = H5Pset_attr_creation_order( + dcpl, (H5P_CRT_ORDER_TRACKED | (use_index ? H5P_CRT_ORDER_INDEXED : (unsigned)0))); + CHECK(ret, FAIL, "H5Pset_attr_creation_order"); + } /* end if */ + + /* Create datasets */ + dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset1, FAIL, "H5Dcreate2"); + dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset2, FAIL, "H5Dcreate2"); + dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset3, FAIL, "H5Dcreate2"); + + /* Work on all the datasets */ + for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { + switch (curr_dset) { + case 0: + my_dataset = dset1; + break; + + case 1: + my_dataset = dset2; + break; + + case 2: + my_dataset = dset3; + break; + + default: + HDassert(0 && "Too many datasets!"); + } /* end switch */ +#if 0 + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Check for opening an attribute on an object with no attributes */ + H5E_BEGIN_TRY + { + ret_id = H5Aopen_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT, + H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret_id, FAIL, "H5Aopen_by_idx"); + + /* Create attributes, up to limit of compact form */ + for (u = 0; u < max_compact; u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = + H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Verify information for new attribute */ + ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); + CHECK(ret, FAIL, "attr_info_by_idx_check"); + } /* end for */ +#if 0 + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Check for out of bound opening an attribute on an object */ + H5E_BEGIN_TRY + { + ret_id = H5Aopen_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, H5P_DEFAULT, + H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret_id, FAIL, "H5Aopen_by_idx"); + + /* Test opening attributes by index stored compactly */ + ret = attr_open_by_idx_check(my_dataset, idx_type, order, u); + CHECK(ret, FAIL, "attr_open_by_idx_check"); + } /* end for */ + + /* Work on all the datasets */ + for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { + switch (curr_dset) { + case 0: + my_dataset = dset1; + break; + + case 1: + my_dataset = dset2; + break; + + case 2: + my_dataset = dset3; + break; + + default: + HDassert(0 && "Too many datasets!"); + } /* end switch */ + + /* Create more attributes, to push into dense form */ + for (u = max_compact; u < (max_compact * 2); u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = + H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); +#if 0 + /* Verify state of object */ + if (u >= max_compact) { + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, (new_format ? TRUE : FALSE), "H5O__is_attr_dense_test"); + } /* end if */ +#endif + /* Verify information for new attribute */ + ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); + CHECK(ret, FAIL, "attr_info_by_idx_check"); + } /* end for */ +#if 0 + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, (new_format ? TRUE : FALSE), "H5O__is_attr_dense_test"); + + if (new_format) { + /* Retrieve & verify # of records in the name & creation order indices */ + ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); + CHECK(ret, FAIL, "H5O__attr_dense_info_test"); + if (use_index) + VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); + VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test"); + } /* end if */ +#endif + /* Check for out of bound opening an attribute on an object */ + H5E_BEGIN_TRY + { + ret_id = H5Aopen_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, H5P_DEFAULT, + H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret_id, FAIL, "H5Aopen_by_idx"); + + /* Test opening attributes by index stored compactly */ + ret = attr_open_by_idx_check(my_dataset, idx_type, order, u); + CHECK(ret, FAIL, "attr_open_by_idx_check"); + } /* end for */ + + /* Close Datasets */ + ret = H5Dclose(dset1); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset2); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset3); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + } /* end for */ + } /* end for */ + } /* end for */ + + /* Close property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_attr_open_by_idx() */ + +/*------------------------------------------------------------------------- + * Function: attr_open_check + * + * Purpose: Check opening attribute on an object + * + * Return: Success: 0 + * Failure: -1 + * + * Programmer: Quincey Koziol + * Wednesday, February 21, 2007 + * + *------------------------------------------------------------------------- + */ +static int +attr_open_check(hid_t fid, const char *dsetname, hid_t obj_id, unsigned max_attrs) +{ + hid_t attr_id; /* ID of attribute to test */ + H5A_info_t ainfo; /* Attribute info */ + char attrname[NAME_BUF_SIZE]; /* Name of attribute */ + int old_nerrs; /* Number of errors when entering this check */ + unsigned u; /* Local index variable */ + herr_t ret; /* Generic return value */ + + /* Retrieve the current # of reported errors */ + old_nerrs = nerrors; + + /* Open each attribute on object by index and check that it's the correct one */ + for (u = 0; u < max_attrs; u++) { + /* Open the attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr_id = H5Aopen(obj_id, attrname, H5P_DEFAULT); + CHECK(attr_id, FAIL, "H5Aopen"); + + /* Get the attribute's information */ + ret = H5Aget_info(attr_id, &ainfo); + CHECK(ret, FAIL, "H5Aget_info"); + + /* Check that the object is the correct one */ + VERIFY(ainfo.corder, u, "H5Aget_info"); + + /* Close attribute */ + ret = H5Aclose(attr_id); + CHECK(ret, FAIL, "H5Aclose"); + + /* Open the attribute */ + attr_id = H5Aopen_by_name(obj_id, ".", attrname, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr_id, FAIL, "H5Aopen_by_name"); + + /* Get the attribute's information */ + ret = H5Aget_info(attr_id, &ainfo); + CHECK(ret, FAIL, "H5Aget_info"); + + /* Check that the object is the correct one */ + VERIFY(ainfo.corder, u, "H5Aget_info"); + + /* Close attribute */ + ret = H5Aclose(attr_id); + CHECK(ret, FAIL, "H5Aclose"); + + /* Open the attribute */ + attr_id = H5Aopen_by_name(fid, dsetname, attrname, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr_id, FAIL, "H5Aopen_by_name"); + + /* Get the attribute's information */ + ret = H5Aget_info(attr_id, &ainfo); + CHECK(ret, FAIL, "H5Aget_info"); + + /* Check that the object is the correct one */ + VERIFY(ainfo.corder, u, "H5Aget_info"); + + /* Close attribute */ + ret = H5Aclose(attr_id); + CHECK(ret, FAIL, "H5Aclose"); + } /* end for */ + + /* Retrieve current # of errors */ + if (old_nerrs == nerrors) + return (0); + else + return (-1); +} /* end attr_open_check() */ + +/**************************************************************** +** +** test_attr_open_by_name(): Test basic H5A (attribute) code. +** Tests opening attributes by name +** +****************************************************************/ +static void +test_attr_open_by_name(hbool_t new_format, hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* HDF5 File ID */ + hid_t dset1, dset2, dset3; /* Dataset IDs */ + hid_t my_dataset; /* Current dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t attr; /* Attribute ID */ + hid_t dcpl; /* Dataset creation property list ID */ + unsigned max_compact; /* Maximum # of links to store in group compactly */ + unsigned min_dense; /* Minimum # of links to store in group "densely" */ +#if 0 + htri_t is_empty; /* Are there any attributes? */ + htri_t is_dense; /* Are attributes stored densely? */ + hsize_t nattrs; /* Number of attributes on object */ + hsize_t name_count; /* # of records in name index */ + hsize_t corder_count; /* # of records in creation order index */ +#endif + unsigned use_index; /* Use index on creation order values */ + const char *dsetname; /* Name of dataset for attributes */ + char attrname[NAME_BUF_SIZE]; /* Name of attribute */ + unsigned curr_dset; /* Current dataset to work on */ + unsigned u; /* Local index variable */ + hid_t ret_id; /* Generic hid_t return value */ + herr_t ret; /* Generic return value */ + + /* Create dataspace for dataset & attributes */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create dataset creation property list */ + if (dcpl_g == H5P_DEFAULT) { + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + } + else { + dcpl = H5Pcopy(dcpl_g); + CHECK(dcpl, FAIL, "H5Pcopy"); + } + + /* Query the attribute creation properties */ + ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + + /* Loop over using index for creation order value */ + for (use_index = FALSE; use_index <= TRUE; use_index++) { + /* Print appropriate test message */ + if (use_index) + MESSAGE(5, ("Testing Opening Attributes By Name w/Creation Order Index\n")) + else + MESSAGE(5, ("Testing Opening Attributes By Name w/o Creation Order Index\n")) + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Set attribute creation order tracking & indexing for object */ + if (new_format == TRUE) { + ret = H5Pset_attr_creation_order( + dcpl, (H5P_CRT_ORDER_TRACKED | (use_index ? H5P_CRT_ORDER_INDEXED : (unsigned)0))); + CHECK(ret, FAIL, "H5Pset_attr_creation_order"); + } /* end if */ + + /* Create datasets */ + dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset1, FAIL, "H5Dcreate2"); + dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset2, FAIL, "H5Dcreate2"); + dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset3, FAIL, "H5Dcreate2"); + + /* Work on all the datasets */ + for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { + switch (curr_dset) { + case 0: + my_dataset = dset1; + dsetname = DSET1_NAME; + break; + + case 1: + my_dataset = dset2; + dsetname = DSET2_NAME; + break; + + case 2: + my_dataset = dset3; + dsetname = DSET3_NAME; + break; + + default: + HDassert(0 && "Too many datasets!"); + } /* end switch */ +#if 0 + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Check for opening a non-existent attribute on an object with no attributes */ + H5E_BEGIN_TRY + { + ret_id = H5Aopen(my_dataset, "foo", H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret_id, FAIL, "H5Aopen"); + + H5E_BEGIN_TRY + { + ret_id = H5Aopen_by_name(my_dataset, ".", "foo", H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret_id, FAIL, "H5Aopen_by_name"); + + H5E_BEGIN_TRY + { + ret_id = H5Aopen_by_name(fid, dsetname, "foo", H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret_id, FAIL, "H5Aopen_by_name"); + + /* Create attributes, up to limit of compact form */ + for (u = 0; u < max_compact; u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Verify information for new attribute */ + ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); + CHECK(ret, FAIL, "attr_info_by_idx_check"); + } /* end for */ +#if 0 + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Check for opening a non-existent attribute on an object with compact attribute storage */ + H5E_BEGIN_TRY + { + ret_id = H5Aopen(my_dataset, "foo", H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret_id, FAIL, "H5Aopen"); + + H5E_BEGIN_TRY + { + ret_id = H5Aopen_by_name(my_dataset, ".", "foo", H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret_id, FAIL, "H5Aopen_by_name"); + + H5E_BEGIN_TRY + { + ret_id = H5Aopen_by_name(fid, dsetname, "foo", H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret_id, FAIL, "H5Aopen_by_name"); + + /* Test opening attributes stored compactly */ + ret = attr_open_check(fid, dsetname, my_dataset, u); + CHECK(ret, FAIL, "attr_open_check"); + } /* end for */ + + /* Work on all the datasets */ + for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { + switch (curr_dset) { + case 0: + my_dataset = dset1; + dsetname = DSET1_NAME; + break; + + case 1: + my_dataset = dset2; + dsetname = DSET2_NAME; + break; + + case 2: + my_dataset = dset3; + dsetname = DSET3_NAME; + break; + + default: + HDassert(0 && "Too many datasets!"); + } /* end switch */ + + /* Create more attributes, to push into dense form */ + for (u = max_compact; u < (max_compact * 2); u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); +#if 0 + /* Verify state of object */ + if (u >= max_compact) { + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, (new_format ? TRUE : FALSE), "H5O__is_attr_dense_test"); + } /* end if */ +#endif + /* Verify information for new attribute */ + ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); + CHECK(ret, FAIL, "attr_info_by_idx_check"); + } /* end for */ +#if 0 + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, (new_format ? TRUE : FALSE), "H5O__is_attr_dense_test"); + + if (new_format) { + /* Retrieve & verify # of records in the name & creation order indices */ + ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); + CHECK(ret, FAIL, "H5O__attr_dense_info_test"); + if (use_index) + VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); + VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test"); + } /* end if */ +#endif + /* Check for opening a non-existent attribute on an object with dense attribute storage */ + H5E_BEGIN_TRY + { + ret_id = H5Aopen(my_dataset, "foo", H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret_id, FAIL, "H5Aopen"); + + H5E_BEGIN_TRY + { + ret_id = H5Aopen_by_name(my_dataset, ".", "foo", H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret_id, FAIL, "H5Aopen_by_name"); + + H5E_BEGIN_TRY + { + ret_id = H5Aopen_by_name(fid, dsetname, "foo", H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret_id, FAIL, "H5Aopen_by_name"); + + /* Test opening attributes stored compactly */ + ret = attr_open_check(fid, dsetname, my_dataset, u); + CHECK(ret, FAIL, "attr_open_check"); + } /* end for */ + + /* Close Datasets */ + ret = H5Dclose(dset1); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset2); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset3); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + } /* end for */ + + /* Close property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_attr_open_by_name() */ + +/**************************************************************** +** +** test_attr_create_by_name(): Test basic H5A (attribute) code. +** Tests creating attributes by name +** +****************************************************************/ +static void +test_attr_create_by_name(hbool_t new_format, hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* HDF5 File ID */ + hid_t dset1, dset2, dset3; /* Dataset IDs */ + hid_t my_dataset; /* Current dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t attr; /* Attribute ID */ + hid_t dcpl; /* Dataset creation property list ID */ + unsigned max_compact; /* Maximum # of links to store in group compactly */ + unsigned min_dense; /* Minimum # of links to store in group "densely" */ +#if 0 + htri_t is_empty; /* Are there any attributes? */ + htri_t is_dense; /* Are attributes stored densely? */ + hsize_t nattrs; /* Number of attributes on object */ + hsize_t name_count; /* # of records in name index */ + hsize_t corder_count; /* # of records in creation order index */ +#endif + unsigned use_index; /* Use index on creation order values */ + const char *dsetname; /* Name of dataset for attributes */ + char attrname[NAME_BUF_SIZE]; /* Name of attribute */ + unsigned curr_dset; /* Current dataset to work on */ + unsigned u; /* Local index variable */ + herr_t ret; /* Generic return value */ + + /* Create dataspace for dataset & attributes */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create dataset creation property list */ + if (dcpl_g == H5P_DEFAULT) { + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + } + else { + dcpl = H5Pcopy(dcpl_g); + CHECK(dcpl, FAIL, "H5Pcopy"); + } + + /* Query the attribute creation properties */ + ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + + /* Loop over using index for creation order value */ + for (use_index = FALSE; use_index <= TRUE; use_index++) { + /* Print appropriate test message */ + if (use_index) + MESSAGE(5, ("Testing Creating Attributes By Name w/Creation Order Index\n")) + else + MESSAGE(5, ("Testing Creating Attributes By Name w/o Creation Order Index\n")) + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Set attribute creation order tracking & indexing for object */ + if (new_format == TRUE) { + ret = H5Pset_attr_creation_order( + dcpl, (H5P_CRT_ORDER_TRACKED | (use_index ? H5P_CRT_ORDER_INDEXED : (unsigned)0))); + CHECK(ret, FAIL, "H5Pset_attr_creation_order"); + } /* end if */ + + /* Create datasets */ + dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset1, FAIL, "H5Dcreate2"); + dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset2, FAIL, "H5Dcreate2"); + dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset3, FAIL, "H5Dcreate2"); + + /* Work on all the datasets */ + for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { + switch (curr_dset) { + case 0: + my_dataset = dset1; + dsetname = DSET1_NAME; + break; + + case 1: + my_dataset = dset2; + dsetname = DSET2_NAME; + break; + + case 2: + my_dataset = dset3; + dsetname = DSET3_NAME; + break; + + default: + HDassert(0 && "Too many datasets!"); + } /* end switch */ +#if 0 + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Create attributes, up to limit of compact form */ + for (u = 0; u < max_compact; u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate_by_name(fid, dsetname, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate_by_name"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Verify information for new attribute */ + ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); + CHECK(ret, FAIL, "attr_info_by_idx_check"); + } /* end for */ +#if 0 + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Test opening attributes stored compactly */ + ret = attr_open_check(fid, dsetname, my_dataset, u); + CHECK(ret, FAIL, "attr_open_check"); + } /* end for */ + + /* Work on all the datasets */ + for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { + switch (curr_dset) { + case 0: + my_dataset = dset1; + dsetname = DSET1_NAME; + break; + + case 1: + my_dataset = dset2; + dsetname = DSET2_NAME; + break; + + case 2: + my_dataset = dset3; + dsetname = DSET3_NAME; + break; + + default: + HDassert(0 && "Too many datasets!"); + } /* end switch */ + + /* Create more attributes, to push into dense form */ + for (u = max_compact; u < (max_compact * 2); u++) { + /* Create attribute */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + attr = H5Acreate_by_name(fid, dsetname, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate_by_name"); + + /* Write data into the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); +#if 0 + /* Verify state of object */ + if (u >= max_compact) { + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, (new_format ? TRUE : FALSE), "H5O__is_attr_dense_test"); + } /* end if */ +#endif + /* Verify information for new attribute */ + ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); + CHECK(ret, FAIL, "attr_info_by_idx_check"); + } /* end for */ +#if 0 + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, (new_format ? TRUE : FALSE), "H5O__is_attr_dense_test"); + + if (new_format) { + /* Retrieve & verify # of records in the name & creation order indices */ + ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); + CHECK(ret, FAIL, "H5O__attr_dense_info_test"); + if (use_index) + VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); + VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test"); + } /* end if */ +#endif + /* Test opening attributes stored compactly */ + ret = attr_open_check(fid, dsetname, my_dataset, u); + CHECK(ret, FAIL, "attr_open_check"); + } /* end for */ + + /* Close Datasets */ + ret = H5Dclose(dset1); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset2); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset3); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + } /* end for */ + + /* Close property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_attr_create_by_name() */ + +/**************************************************************** +** +** test_attr_shared_write(): Test basic H5A (attribute) code. +** Tests writing mix of shared & un-shared attributes in "compact" & "dense" storage +** +****************************************************************/ +static void +test_attr_shared_write(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* File ID */ + hid_t my_fcpl; /* File creation property list ID */ + hid_t dataset, dataset2; /* Dataset IDs */ + hid_t attr_tid; /* Attribute's datatype ID */ + hid_t sid, big_sid; /* Dataspace IDs */ + hsize_t big_dims[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; /* Dimensions for "big" attribute */ + hid_t attr; /* Attribute ID */ + hid_t dcpl; /* Dataset creation property list ID */ + char attrname[NAME_BUF_SIZE]; /* Name of attribute */ + unsigned max_compact; /* Maximum # of attributes to store compactly */ + unsigned min_dense; /* Minimum # of attributes to store "densely" */ +#if 0 + htri_t is_dense; /* Are attributes stored densely? */ + htri_t is_shared; /* Is attributes shared? */ + hsize_t shared_refcount; /* Reference count of shared attribute */ +#endif + unsigned attr_value; /* Attribute value */ + unsigned *big_value; /* Data for "big" attribute */ +#if 0 + size_t mesg_count; /* # of shared messages */ +#endif + unsigned test_shared; /* Index over shared component type */ + unsigned u; /* Local index variable */ +#if 0 + h5_stat_size_t empty_filesize; /* Size of empty file */ + h5_stat_size_t filesize; /* Size of file after modifications */ +#endif + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Writing Shared & Unshared Attributes in Compact & Dense Storage\n")); + + /* Allocate & initialize "big" attribute data */ + big_value = (unsigned *)HDmalloc((size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3) * sizeof(unsigned)); + CHECK_PTR(big_value, "HDmalloc"); + HDmemset(big_value, 1, sizeof(unsigned) * (size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3)); + + /* Create dataspace for dataset */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create "big" dataspace for "large" attributes */ + big_sid = H5Screate_simple(SPACE1_RANK, big_dims, NULL); + CHECK(big_sid, FAIL, "H5Screate_simple"); + + /* Loop over type of shared components */ + for (test_shared = 0; test_shared < 3; test_shared++) { + /* Make copy of file creation property list */ + my_fcpl = H5Pcopy(fcpl); + CHECK(my_fcpl, FAIL, "H5Pcopy"); + + /* Set up datatype for attributes */ + attr_tid = H5Tcopy(H5T_NATIVE_UINT); + CHECK(attr_tid, FAIL, "H5Tcopy"); + + /* Special setup for each type of shared components */ + if (test_shared == 0) { + /* Make attributes > 500 bytes shared */ + ret = H5Pset_shared_mesg_nindexes(my_fcpl, (unsigned)1); + CHECK_I(ret, "H5Pset_shared_mesg_nindexes"); + ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)500); + CHECK_I(ret, "H5Pset_shared_mesg_index"); + } /* end if */ + else { + /* Set up copy of file creation property list */ + + ret = H5Pset_shared_mesg_nindexes(my_fcpl, (unsigned)3); + CHECK_I(ret, "H5Pset_shared_mesg_nindexes"); + + /* Make attributes > 500 bytes shared */ + ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)500); + CHECK_I(ret, "H5Pset_shared_mesg_index"); + + /* Make datatypes & dataspaces > 1 byte shared (i.e. all of them :-) */ + ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)1, H5O_SHMESG_DTYPE_FLAG, (unsigned)1); + CHECK_I(ret, "H5Pset_shared_mesg_index"); + ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)2, H5O_SHMESG_SDSPACE_FLAG, (unsigned)1); + CHECK_I(ret, "H5Pset_shared_mesg_index"); + } /* end else */ + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, my_fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Close FCPL copy */ + ret = H5Pclose(my_fcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + /* Get size of file */ + empty_filesize = h5_get_file_size(FILENAME, fapl); + if (empty_filesize < 0) + TestErrPrintf("Line %d: file size wrong!\n", __LINE__); +#endif + + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Commit datatype to file */ + if (test_shared == 2) { + ret = H5Tcommit2(fid, TYPE1_NAME, attr_tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + } /* end if */ + + /* Set up to query the object creation properties */ + if (dcpl_g == H5P_DEFAULT) { + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + } + else { + dcpl = H5Pcopy(dcpl_g); + CHECK(dcpl, FAIL, "H5Pcopy"); + } + + /* Create datasets */ + dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + dataset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dataset2, FAIL, "H5Dcreate2"); + + /* Check on dataset's message storage status */ + if (test_shared != 0) { +#if 0 + /* Datasets' datatypes can be shared */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 1, "H5F__get_sohm_mesg_count_test"); + + /* Datasets' dataspace can be shared */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 1, "H5F__get_sohm_mesg_count_test"); +#endif + } /* end if */ + + /* Retrieve limits for compact/dense attribute storage */ + ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + + /* Close property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); +#if 0 + /* Check on datasets' attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); + is_dense = H5O__is_attr_dense_test(dataset2); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Add attributes to each dataset, until after converting to dense storage */ + for (u = 0; u < max_compact * 2; u++) { + /* Create attribute name */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + + /* Alternate between creating "small" & "big" attributes */ + if (u % 2) { + /* Create "small" attribute on first dataset */ + attr = H5Acreate2(dataset, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#if 0 + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, FALSE, "H5A__is_shared_test"); +#endif + /* Write data into the attribute */ + attr_value = u + 1; + ret = H5Awrite(attr, attr_tid, &attr_value); + CHECK(ret, FAIL, "H5Awrite"); + } /* end if */ + else { + /* Create "big" attribute on first dataset */ + attr = H5Acreate2(dataset, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#if 0 + /* Check that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, TRUE, "H5A__is_shared_test"); + + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); +#endif + /* Write data into the attribute */ + big_value[0] = u + 1; + ret = H5Awrite(attr, attr_tid, big_value); + CHECK(ret, FAIL, "H5Awrite"); +#if 0 + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); +#endif + } /* end else */ + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + if (u < max_compact) + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); + else + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); +#endif + + /* Alternate between creating "small" & "big" attributes */ + if (u % 2) { + /* Create "small" attribute on second dataset */ + attr = H5Acreate2(dataset2, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#if 0 + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, FALSE, "H5A__is_shared_test"); +#endif + /* Write data into the attribute */ + attr_value = u + 1; + ret = H5Awrite(attr, attr_tid, &attr_value); + CHECK(ret, FAIL, "H5Awrite"); + } /* end if */ + else { + /* Create "big" attribute on second dataset */ + attr = H5Acreate2(dataset2, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#if 0 + /* Check that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, TRUE, "H5A__is_shared_test"); + + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); +#endif + /* Write data into the attribute */ + big_value[0] = u + 1; + ret = H5Awrite(attr, attr_tid, big_value); + CHECK(ret, FAIL, "H5Awrite"); +#if 0 + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test"); +#endif + } /* end else */ + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset2); + if (u < max_compact) + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); + else + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); +#endif + } /* end for */ + + /* Close attribute's datatype */ + ret = H5Tclose(attr_tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close Datasets */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dataset2); + CHECK(ret, FAIL, "H5Dclose"); +#if 0 + /* Check on shared message status now */ + if (test_shared != 0) { + if (test_shared == 1) { + /* Check on datatype storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 2, "H5F__get_sohm_mesg_count_test"); + } /* end if */ + + /* Check on dataspace storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 2, "H5F__get_sohm_mesg_count_test"); + } /* end if */ +#endif + /* Unlink datasets with attributes */ + ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + ret = H5Ldelete(fid, DSET2_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Unlink committed datatype */ + if (test_shared == 2) { + ret = H5Ldelete(fid, TYPE1_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + } /* end if */ +#if 0 + /* Check on attribute storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_ATTR_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); + + if (test_shared != 0) { + /* Check on datatype storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); + + /* Check on dataspace storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); + } /* end if */ +#endif + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + if (h5_using_default_driver(NULL)) { + /* Check size of file */ + filesize = h5_get_file_size(FILENAME, fapl); + VERIFY(filesize, empty_filesize, "h5_get_file_size"); + } +#endif + } /* end for */ + + /* Close dataspaces */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(big_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Release memory */ + HDfree(big_value); +} /* test_attr_shared_write() */ + +/**************************************************************** +** +** test_attr_shared_rename(): Test basic H5A (attribute) code. +** Tests renaming shared attributes in "compact" & "dense" storage +** +****************************************************************/ +static void +test_attr_shared_rename(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* HDF5 File ID */ + hid_t my_fcpl; /* File creation property list ID */ + hid_t dataset, dataset2; /* Dataset ID2 */ + hid_t attr_tid; /* Attribute's datatype ID */ + hid_t sid, big_sid; /* Dataspace IDs */ + hsize_t big_dims[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; /* Dimensions for "big" attribute */ + hid_t attr; /* Attribute ID */ + hid_t dcpl; /* Dataset creation property list ID */ + char attrname[NAME_BUF_SIZE]; /* Name of attribute on first dataset */ + char attrname2[NAME_BUF_SIZE]; /* Name of attribute on second dataset */ + unsigned max_compact; /* Maximum # of attributes to store compactly */ + unsigned min_dense; /* Minimum # of attributes to store "densely" */ +#if 0 + htri_t is_dense; /* Are attributes stored densely? */ + htri_t is_shared; /* Is attributes shared? */ + hsize_t shared_refcount; /* Reference count of shared attribute */ +#endif + unsigned attr_value; /* Attribute value */ + unsigned *big_value; /* Data for "big" attribute */ +#if 0 + size_t mesg_count; /* # of shared messages */ +#endif + unsigned test_shared; /* Index over shared component type */ + unsigned u; /* Local index variable */ +#if 0 + h5_stat_size_t empty_filesize; /* Size of empty file */ + h5_stat_size_t filesize; /* Size of file after modifications */ +#endif + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Renaming Shared & Unshared Attributes in Compact & Dense Storage\n")); + + /* Allocate & initialize "big" attribute data */ + big_value = (unsigned *)HDmalloc((size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3) * sizeof(unsigned)); + CHECK_PTR(big_value, "HDmalloc"); + HDmemset(big_value, 1, sizeof(unsigned) * (size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3)); + + /* Create dataspace for dataset */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create "big" dataspace for "large" attributes */ + big_sid = H5Screate_simple(SPACE1_RANK, big_dims, NULL); + CHECK(big_sid, FAIL, "H5Screate_simple"); + + /* Loop over type of shared components */ + for (test_shared = 0; test_shared < 3; test_shared++) { + /* Make copy of file creation property list */ + my_fcpl = H5Pcopy(fcpl); + CHECK(my_fcpl, FAIL, "H5Pcopy"); + + /* Set up datatype for attributes */ + attr_tid = H5Tcopy(H5T_NATIVE_UINT); + CHECK(attr_tid, FAIL, "H5Tcopy"); + + /* Special setup for each type of shared components */ + if (test_shared == 0) { + /* Make attributes > 500 bytes shared */ + ret = H5Pset_shared_mesg_nindexes(my_fcpl, (unsigned)1); + CHECK_I(ret, "H5Pset_shared_mesg_nindexes"); + ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)500); + CHECK_I(ret, "H5Pset_shared_mesg_index"); + } /* end if */ + else { + /* Set up copy of file creation property list */ + + ret = H5Pset_shared_mesg_nindexes(my_fcpl, (unsigned)3); + CHECK_I(ret, "H5Pset_shared_mesg_nindexes"); + + /* Make attributes > 500 bytes shared */ + ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)500); + CHECK_I(ret, "H5Pset_shared_mesg_index"); + + /* Make datatypes & dataspaces > 1 byte shared (i.e. all of them :-) */ + ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)1, H5O_SHMESG_DTYPE_FLAG, (unsigned)1); + CHECK_I(ret, "H5Pset_shared_mesg_index"); + ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)2, H5O_SHMESG_SDSPACE_FLAG, (unsigned)1); + CHECK_I(ret, "H5Pset_shared_mesg_index"); + } /* end else */ + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, my_fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Close FCPL copy */ + ret = H5Pclose(my_fcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + /* Get size of file */ + empty_filesize = h5_get_file_size(FILENAME, fapl); + if (empty_filesize < 0) + TestErrPrintf("Line %d: file size wrong!\n", __LINE__); +#endif + + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Commit datatype to file */ + if (test_shared == 2) { + ret = H5Tcommit2(fid, TYPE1_NAME, attr_tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + } /* end if */ + + /* Set up to query the object creation properties */ + if (dcpl_g == H5P_DEFAULT) { + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + } + else { + dcpl = H5Pcopy(dcpl_g); + CHECK(dcpl, FAIL, "H5Pcopy"); + } + + /* Create datasets */ + dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + dataset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dataset2, FAIL, "H5Dcreate2"); +#if 0 + /* Check on dataset's message storage status */ + if (test_shared != 0) { + /* Datasets' datatypes can be shared */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 1, "H5F__get_sohm_mesg_count_test"); + + /* Datasets' dataspace can be shared */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 1, "H5F__get_sohm_mesg_count_test"); + } /* end if */ +#endif + /* Retrieve limits for compact/dense attribute storage */ + ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + + /* Close property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); +#if 0 + /* Check on datasets' attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); + is_dense = H5O__is_attr_dense_test(dataset2); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Add attributes to each dataset, until after converting to dense storage */ + for (u = 0; u < max_compact * 2; u++) { + /* Create attribute name */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + + /* Alternate between creating "small" & "big" attributes */ + if (u % 2) { + /* Create "small" attribute on first dataset */ + attr = H5Acreate2(dataset, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#if 0 + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, FALSE, "H5A__is_shared_test"); +#endif + /* Write data into the attribute */ + attr_value = u + 1; + ret = H5Awrite(attr, attr_tid, &attr_value); + CHECK(ret, FAIL, "H5Awrite"); + } /* end if */ + else { + /* Create "big" attribute on first dataset */ + attr = H5Acreate2(dataset, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#if 0 + /* Check that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, TRUE, "H5A__is_shared_test"); + + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); +#endif + /* Write data into the attribute */ + big_value[0] = u + 1; + ret = H5Awrite(attr, attr_tid, big_value); + CHECK(ret, FAIL, "H5Awrite"); +#if 0 + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); +#endif + } /* end else */ + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + if (u < max_compact) + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); + else + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); +#endif + + /* Alternate between creating "small" & "big" attributes */ + if (u % 2) { + /* Create "small" attribute on second dataset */ + attr = H5Acreate2(dataset2, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#if 0 + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, FALSE, "H5A__is_shared_test"); +#endif + /* Write data into the attribute */ + attr_value = u + 1; + ret = H5Awrite(attr, attr_tid, &attr_value); + CHECK(ret, FAIL, "H5Awrite"); + } /* end if */ + else { + /* Create "big" attribute on second dataset */ + attr = H5Acreate2(dataset2, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#if 0 + /* Check that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, TRUE, "H5A__is_shared_test"); + + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); +#endif + /* Write data into the attribute */ + big_value[0] = u + 1; + ret = H5Awrite(attr, attr_tid, big_value); + CHECK(ret, FAIL, "H5Awrite"); +#if 0 + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test"); +#endif + } /* end else */ + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset2); + if (u < max_compact) + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); + else + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); +#endif + + /* Create new attribute name */ + HDsnprintf(attrname2, sizeof(attrname2), "new attr %02u", u); + + /* Change second dataset's attribute's name */ + ret = H5Arename_by_name(fid, DSET2_NAME, attrname, attrname2, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Arename_by_name"); + + /* Check refcount on attributes now */ + + /* Check refcount on renamed attribute */ + attr = H5Aopen(dataset2, attrname2, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); +#if 0 + if (u % 2) { + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, FALSE, "H5A__is_shared_test"); + } /* end if */ + else { + /* Check that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, TRUE, "H5A__is_shared_test"); + + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + } /* end else */ +#endif + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Check refcount on original attribute */ + attr = H5Aopen(dataset, attrname, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); +#if 0 + if (u % 2) { + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, FALSE, "H5A__is_shared_test"); + } /* end if */ + else { + /* Check that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, TRUE, "H5A__is_shared_test"); + + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + } /* end else */ +#endif + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Change second dataset's attribute's name back to original */ + ret = H5Arename_by_name(fid, DSET2_NAME, attrname2, attrname, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Arename_by_name"); + + /* Check refcount on attributes now */ + + /* Check refcount on renamed attribute */ + attr = H5Aopen(dataset2, attrname, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); +#if 0 + if (u % 2) { + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, FALSE, "H5A__is_shared_test"); + } /* end if */ + else { + /* Check that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, TRUE, "H5A__is_shared_test"); + + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test"); + } /* end else */ +#endif + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Check refcount on original attribute */ + attr = H5Aopen(dataset, attrname, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); +#if 0 + if (u % 2) { + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, FALSE, "H5A__is_shared_test"); + } /* end if */ + else { + /* Check that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, TRUE, "H5A__is_shared_test"); + + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test"); + } /* end else */ +#endif + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + } /* end for */ + + /* Close attribute's datatype */ + ret = H5Tclose(attr_tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close Datasets */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dataset2); + CHECK(ret, FAIL, "H5Dclose"); +#if 0 + /* Check on shared message status now */ + if (test_shared != 0) { + if (test_shared == 1) { + /* Check on datatype storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 2, "H5F__get_sohm_mesg_count_test"); + } /* end if */ + + /* Check on dataspace storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 2, "H5F__get_sohm_mesg_count_test"); + } /* end if */ +#endif + /* Unlink datasets with attributes */ + ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "HLdelete"); + ret = H5Ldelete(fid, DSET2_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Unlink committed datatype */ + if (test_shared == 2) { + ret = H5Ldelete(fid, TYPE1_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + } /* end if */ +#if 0 + /* Check on attribute storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_ATTR_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); + + if (test_shared != 0) { + /* Check on datatype storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); + + /* Check on dataspace storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); + } /* end if */ +#endif + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + if (h5_using_default_driver(NULL)) { + /* Check size of file */ + filesize = h5_get_file_size(FILENAME, fapl); + VERIFY(filesize, empty_filesize, "h5_get_file_size"); + } +#endif + } /* end for */ + + /* Close dataspaces */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(big_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Release memory */ + HDfree(big_value); +} /* test_attr_shared_rename() */ + +/**************************************************************** +** +** test_attr_shared_delete(): Test basic H5A (attribute) code. +** Tests deleting shared attributes in "compact" & "dense" storage +** +****************************************************************/ +static void +test_attr_shared_delete(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* File ID */ + hid_t my_fcpl; /* File creation property list ID */ + hid_t dataset, dataset2; /* Dataset IDs */ + hid_t attr_tid; /* Attribute's datatype ID */ + hid_t sid, big_sid; /* Dataspace IDs */ + hsize_t big_dims[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; /* Dimensions for "big" attribute */ + hid_t attr; /* Attribute ID */ + hid_t dcpl; /* Dataset creation property list ID */ + char attrname[NAME_BUF_SIZE]; /* Name of attribute on first dataset */ + unsigned max_compact; /* Maximum # of attributes to store compactly */ + unsigned min_dense; /* Minimum # of attributes to store "densely" */ +#if 0 + htri_t is_dense; /* Are attributes stored densely? */ + htri_t is_shared; /* Is attributes shared? */ + hsize_t shared_refcount; /* Reference count of shared attribute */ +#endif + unsigned attr_value; /* Attribute value */ + unsigned *big_value; /* Data for "big" attribute */ +#if 0 + size_t mesg_count; /* # of shared messages */ +#endif + unsigned test_shared; /* Index over shared component type */ + unsigned u; /* Local index variable */ +#if 0 + h5_stat_size_t empty_filesize; /* Size of empty file */ + h5_stat_size_t filesize; /* Size of file after modifications */ +#endif + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Deleting Shared & Unshared Attributes in Compact & Dense Storage\n")); + + /* Allocate & initialize "big" attribute data */ + big_value = (unsigned *)HDmalloc((size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3) * sizeof(unsigned)); + CHECK_PTR(big_value, "HDmalloc"); + HDmemset(big_value, 1, sizeof(unsigned) * (size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3)); + + /* Create dataspace for dataset */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create "big" dataspace for "large" attributes */ + big_sid = H5Screate_simple(SPACE1_RANK, big_dims, NULL); + CHECK(big_sid, FAIL, "H5Screate_simple"); + + /* Loop over type of shared components */ + for (test_shared = 0; test_shared < 3; test_shared++) { + /* Make copy of file creation property list */ + my_fcpl = H5Pcopy(fcpl); + CHECK(my_fcpl, FAIL, "H5Pcopy"); + + /* Set up datatype for attributes */ + attr_tid = H5Tcopy(H5T_NATIVE_UINT); + CHECK(attr_tid, FAIL, "H5Tcopy"); + + /* Special setup for each type of shared components */ + if (test_shared == 0) { + /* Make attributes > 500 bytes shared */ + ret = H5Pset_shared_mesg_nindexes(my_fcpl, (unsigned)1); + CHECK_I(ret, "H5Pset_shared_mesg_nindexes"); + ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)500); + CHECK_I(ret, "H5Pset_shared_mesg_index"); + } /* end if */ + else { + /* Set up copy of file creation property list */ + + ret = H5Pset_shared_mesg_nindexes(my_fcpl, (unsigned)3); + CHECK_I(ret, "H5Pset_shared_mesg_nindexes"); + + /* Make attributes > 500 bytes shared */ + ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)500); + CHECK_I(ret, "H5Pset_shared_mesg_index"); + + /* Make datatypes & dataspaces > 1 byte shared (i.e. all of them :-) */ + ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)1, H5O_SHMESG_DTYPE_FLAG, (unsigned)1); + CHECK_I(ret, "H5Pset_shared_mesg_index"); + ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)2, H5O_SHMESG_SDSPACE_FLAG, (unsigned)1); + CHECK_I(ret, "H5Pset_shared_mesg_index"); + } /* end else */ + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, my_fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Close FCPL copy */ + ret = H5Pclose(my_fcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + /* Get size of file */ + empty_filesize = h5_get_file_size(FILENAME, fapl); + if (empty_filesize < 0) + TestErrPrintf("Line %d: file size wrong!\n", __LINE__); +#endif + + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Commit datatype to file */ + if (test_shared == 2) { + ret = H5Tcommit2(fid, TYPE1_NAME, attr_tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + } /* end if */ + + /* Set up to query the object creation properties */ + if (dcpl_g == H5P_DEFAULT) { + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + } + else { + dcpl = H5Pcopy(dcpl_g); + CHECK(dcpl, FAIL, "H5Pcopy"); + } + + /* Create datasets */ + dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + dataset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dataset2, FAIL, "H5Dcreate2"); +#if 0 + /* Check on dataset's message storage status */ + if (test_shared != 0) { + /* Datasets' datatypes can be shared */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 1, "H5F__get_sohm_mesg_count_test"); + + /* Datasets' dataspace can be shared */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 1, "H5F__get_sohm_mesg_count_test"); + } /* end if */ +#endif + /* Retrieve limits for compact/dense attribute storage */ + ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + + /* Close property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); +#if 0 + /* Check on datasets' attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); + is_dense = H5O__is_attr_dense_test(dataset2); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Add attributes to each dataset, until after converting to dense storage */ + for (u = 0; u < max_compact * 2; u++) { + /* Create attribute name */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + + /* Alternate between creating "small" & "big" attributes */ + if (u % 2) { + /* Create "small" attribute on first dataset */ + attr = H5Acreate2(dataset, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#if 0 + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, FALSE, "H5A__is_shared_test"); +#endif + /* Write data into the attribute */ + attr_value = u + 1; + ret = H5Awrite(attr, attr_tid, &attr_value); + CHECK(ret, FAIL, "H5Awrite"); + } /* end if */ + else { + /* Create "big" attribute on first dataset */ + attr = H5Acreate2(dataset, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#if 0 + /* Check that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, TRUE, "H5A__is_shared_test"); + + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); +#endif + /* Write data into the attribute */ + big_value[0] = u + 1; + ret = H5Awrite(attr, attr_tid, big_value); + CHECK(ret, FAIL, "H5Awrite"); +#if 0 + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); +#endif + } /* end else */ + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + if (u < max_compact) + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); + else + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); +#endif + + /* Alternate between creating "small" & "big" attributes */ + if (u % 2) { + /* Create "small" attribute on second dataset */ + attr = H5Acreate2(dataset2, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#if 0 + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, FALSE, "H5A__is_shared_test"); +#endif + /* Write data into the attribute */ + attr_value = u + 1; + ret = H5Awrite(attr, attr_tid, &attr_value); + CHECK(ret, FAIL, "H5Awrite"); + } /* end if */ + else { + /* Create "big" attribute on second dataset */ + attr = H5Acreate2(dataset2, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#if 0 + /* Check that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, TRUE, "H5A__is_shared_test"); + + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); +#endif + /* Write data into the attribute */ + big_value[0] = u + 1; + ret = H5Awrite(attr, attr_tid, big_value); + CHECK(ret, FAIL, "H5Awrite"); +#if 0 + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test"); +#endif + } /* end else */ + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset2); + if (u < max_compact) + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); + else + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); +#endif + } /* end for */ + + /* Delete attributes from second dataset */ + for (u = 0; u < max_compact * 2; u++) { + /* Create attribute name */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + + /* Delete second dataset's attribute */ + ret = H5Adelete_by_name(fid, DSET2_NAME, attrname, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Adelete_by_name"); + + /* Check refcount on attributes now */ + + /* Check refcount on first dataset's attribute */ + attr = H5Aopen(dataset, attrname, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); +#if 0 + if (u % 2) { + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, FALSE, "H5A__is_shared_test"); + } /* end if */ + else { + /* Check that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, TRUE, "H5A__is_shared_test"); + + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + } /* end else */ +#endif + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + } /* end for */ + + /* Close attribute's datatype */ + ret = H5Tclose(attr_tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close Datasets */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dataset2); + CHECK(ret, FAIL, "H5Dclose"); +#if 0 + /* Check on shared message status now */ + if (test_shared != 0) { + if (test_shared == 1) { + /* Check on datatype storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 2, "H5F__get_sohm_mesg_count_test"); + } /* end if */ + + /* Check on dataspace storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 2, "H5F__get_sohm_mesg_count_test"); + } /* end if */ +#endif + /* Unlink datasets with attributes */ + ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + ret = H5Ldelete(fid, DSET2_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Unlink committed datatype */ + if (test_shared == 2) { + ret = H5Ldelete(fid, TYPE1_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + } /* end if */ +#if 0 + /* Check on attribute storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_ATTR_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); + + if (test_shared != 0) { + /* Check on datatype storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); + + /* Check on dataspace storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); + } /* end if */ +#endif + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + if (h5_using_default_driver(NULL)) { + /* Check size of file */ + filesize = h5_get_file_size(FILENAME, fapl); + VERIFY(filesize, empty_filesize, "h5_get_file_size"); + } +#endif + } /* end for */ + + /* Close dataspaces */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(big_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Release memory */ + HDfree(big_value); +} /* test_attr_shared_delete() */ + +/**************************************************************** +** +** test_attr_shared_unlink(): Test basic H5A (attribute) code. +** Tests unlinking object with shared attributes in "compact" & "dense" storage +** +****************************************************************/ +static void +test_attr_shared_unlink(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* File ID */ + hid_t my_fcpl; /* File creation property list ID */ + hid_t dataset, dataset2; /* Dataset IDs */ + hid_t attr_tid; /* Attribute's datatype ID */ + hid_t sid, big_sid; /* Dataspace IDs */ + hsize_t big_dims[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; /* Dimensions for "big" attribute */ + hid_t attr; /* Attribute ID */ + hid_t dcpl; /* Dataset creation property list ID */ + char attrname[NAME_BUF_SIZE]; /* Name of attribute on first dataset */ + unsigned max_compact; /* Maximum # of attributes to store compactly */ + unsigned min_dense; /* Minimum # of attributes to store "densely" */ +#if 0 + htri_t is_dense; /* Are attributes stored densely? */ + htri_t is_shared; /* Is attributes shared? */ + hsize_t shared_refcount; /* Reference count of shared attribute */ +#endif + unsigned attr_value; /* Attribute value */ + unsigned *big_value; /* Data for "big" attribute */ +#if 0 + size_t mesg_count; /* # of shared messages */ +#endif + unsigned test_shared; /* Index over shared component type */ + unsigned u; /* Local index variable */ +#if 0 + h5_stat_size_t empty_filesize; /* Size of empty file */ + h5_stat_size_t filesize; /* Size of file after modifications */ +#endif + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Unlinking Object with Shared Attributes in Compact & Dense Storage\n")); + + /* Allocate & initialize "big" attribute data */ + big_value = (unsigned *)HDmalloc((size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3) * sizeof(unsigned)); + CHECK_PTR(big_value, "HDmalloc"); + HDmemset(big_value, 1, sizeof(unsigned) * (size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3)); + + /* Create dataspace for dataset */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create "big" dataspace for "large" attributes */ + big_sid = H5Screate_simple(SPACE1_RANK, big_dims, NULL); + CHECK(big_sid, FAIL, "H5Screate_simple"); + + /* Loop over type of shared components */ + for (test_shared = 0; test_shared < 3; test_shared++) { + /* Make copy of file creation property list */ + my_fcpl = H5Pcopy(fcpl); + CHECK(my_fcpl, FAIL, "H5Pcopy"); + + /* Set up datatype for attributes */ + attr_tid = H5Tcopy(H5T_NATIVE_UINT); + CHECK(attr_tid, FAIL, "H5Tcopy"); + + /* Special setup for each type of shared components */ + if (test_shared == 0) { + /* Make attributes > 500 bytes shared */ + ret = H5Pset_shared_mesg_nindexes(my_fcpl, (unsigned)1); + CHECK_I(ret, "H5Pset_shared_mesg_nindexes"); + ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)500); + CHECK_I(ret, "H5Pset_shared_mesg_index"); + } /* end if */ + else { + /* Set up copy of file creation property list */ + + ret = H5Pset_shared_mesg_nindexes(my_fcpl, (unsigned)3); + CHECK_I(ret, "H5Pset_shared_mesg_nindexes"); + + /* Make attributes > 500 bytes shared */ + ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)500); + CHECK_I(ret, "H5Pset_shared_mesg_index"); + + /* Make datatypes & dataspaces > 1 byte shared (i.e. all of them :-) */ + ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)1, H5O_SHMESG_DTYPE_FLAG, (unsigned)1); + CHECK_I(ret, "H5Pset_shared_mesg_index"); + ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)2, H5O_SHMESG_SDSPACE_FLAG, (unsigned)1); + CHECK_I(ret, "H5Pset_shared_mesg_index"); + } /* end else */ + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, my_fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Close FCPL copy */ + ret = H5Pclose(my_fcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + /* Get size of file */ + empty_filesize = h5_get_file_size(FILENAME, fapl); + if (empty_filesize < 0) + TestErrPrintf("Line %d: file size wrong!\n", __LINE__); +#endif + + /* Re-open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Commit datatype to file */ + if (test_shared == 2) { + ret = H5Tcommit2(fid, TYPE1_NAME, attr_tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + } /* end if */ + + /* Set up to query the object creation properties */ + if (dcpl_g == H5P_DEFAULT) { + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + } + else { + dcpl = H5Pcopy(dcpl_g); + CHECK(dcpl, FAIL, "H5Pcopy"); + } + + /* Create datasets */ + dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + dataset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dataset2, FAIL, "H5Dcreate2"); +#if 0 + /* Check on dataset's message storage status */ + if (test_shared != 0) { + /* Datasets' datatypes can be shared */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 1, "H5F__get_sohm_mesg_count_test"); + + /* Datasets' dataspace can be shared */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 1, "H5F__get_sohm_mesg_count_test"); + } /* end if */ +#endif + /* Retrieve limits for compact/dense attribute storage */ + ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + + /* Close property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); +#if 0 + /* Check on datasets' attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); + is_dense = H5O__is_attr_dense_test(dataset2); + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); +#endif + /* Add attributes to each dataset, until after converting to dense storage */ + for (u = 0; u < max_compact * 2; u++) { + /* Create attribute name */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + + /* Alternate between creating "small" & "big" attributes */ + if (u % 2) { + /* Create "small" attribute on first dataset */ + attr = H5Acreate2(dataset, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#if 0 + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, FALSE, "H5A__is_shared_test"); +#endif + /* Write data into the attribute */ + attr_value = u + 1; + ret = H5Awrite(attr, attr_tid, &attr_value); + CHECK(ret, FAIL, "H5Awrite"); + } /* end if */ + else { + /* Create "big" attribute on first dataset */ + attr = H5Acreate2(dataset, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#if 0 + /* ChecFk that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, TRUE, "H5A__is_shared_test"); + + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); +#endif + /* Write data into the attribute */ + big_value[0] = u + 1; + ret = H5Awrite(attr, attr_tid, big_value); + CHECK(ret, FAIL, "H5Awrite"); +#if 0 + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); +#endif + } /* end else */ + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + if (u < max_compact) + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); + else + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); +#endif + + /* Alternate between creating "small" & "big" attributes */ + if (u % 2) { + /* Create "small" attribute on second dataset */ + attr = H5Acreate2(dataset2, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#if 0 + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, FALSE, "H5A__is_shared_test"); +#endif + /* Write data into the attribute */ + attr_value = u + 1; + ret = H5Awrite(attr, attr_tid, &attr_value); + CHECK(ret, FAIL, "H5Awrite"); + } /* end if */ + else { + /* Create "big" attribute on second dataset */ + attr = H5Acreate2(dataset2, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); +#if 0 + /* Check that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, TRUE, "H5A__is_shared_test"); + + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); +#endif + /* Write data into the attribute */ + big_value[0] = u + 1; + ret = H5Awrite(attr, attr_tid, big_value); + CHECK(ret, FAIL, "H5Awrite"); +#if 0 + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test"); +#endif + } /* end else */ + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); +#if 0 + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset2); + if (u < max_compact) + VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test"); + else + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); +#endif + } /* end for */ + + /* Close attribute's datatype */ + ret = H5Tclose(attr_tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close second dataset */ + ret = H5Dclose(dataset2); + CHECK(ret, FAIL, "H5Dclose"); + + /* Unlink second dataset */ + ret = H5Ldelete(fid, DSET2_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + +#if 0 + /* Check on first dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test"); +#endif + /* Check ref count on attributes of first dataset */ + for (u = 0; u < max_compact * 2; u++) { + /* Create attribute name */ + HDsnprintf(attrname, sizeof(attrname), "attr %02u", u); + + /* Open attribute on first dataset */ + attr = H5Aopen(dataset, attrname, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); +#if 0 + if (u % 2) { + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, FALSE, "H5A__is_shared_test"); + } /* end if */ + else { + /* Check that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, TRUE, "H5A__is_shared_test"); + + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + } /* end else */ +#endif + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + } /* end for */ + + /* Close Datasets */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Unlink first dataset */ + ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Unlink committed datatype */ + if (test_shared == 2) { + ret = H5Ldelete(fid, TYPE1_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + } /* end if */ +#if 0 + /* Check on attribute storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_ATTR_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); + + if (test_shared != 0) { + /* Check on datatype storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); + + /* Check on dataspace storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); + } /* end if */ +#endif + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + if (h5_using_default_driver(NULL)) { + /* Check size of file */ + filesize = h5_get_file_size(FILENAME, fapl); + VERIFY(filesize, empty_filesize, "h5_get_file_size"); + } +#endif + } /* end for */ + + /* Close dataspaces */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(big_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Release memory */ + HDfree(big_value); +} /* test_attr_shared_unlink() */ + +/**************************************************************** +** +** test_attr_bug1(): Test basic H5A (attribute) code. +** Tests odd sequence of allocating and deallocating space in the file. +** The series of actions below constructs a file with an attribute +** in each object header chunk, except the first. Then, the attributes +** are removed and re-created in a way that makes the object header +** allocation code remove an object header chunk "in the middle" of +** the sequence of the chunks. +** +****************************************************************/ +static void +test_attr_bug1(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* File ID */ + hid_t gid; /* Group ID */ + hid_t aid; /* Attribute ID */ + hid_t sid; /* Dataspace ID */ + herr_t ret; /* Generic return status */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Allocating and De-allocating Attributes in Unusual Way\n")); + + /* Create dataspace ID for attributes */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create main group to operate on */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + gid = H5Gcreate2(fid, GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gcreate2"); + + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file and create another group, then attribute on first group */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Create second group */ + gid = H5Gcreate2(fid, GROUP2_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gcreate2"); + + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Re-open first group */ + gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gopen2"); + + /* Create attribute on first group */ + aid = H5Acreate2(gid, ATTR7_NAME, H5T_NATIVE_DOUBLE, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file and create another group, then another attribute on first group */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Create third group */ + gid = H5Gcreate2(fid, GROUP3_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gcreate2"); + + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Unlink second group */ + ret = H5Ldelete(fid, GROUP2_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Re-open first group */ + gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gopen2"); + + /* Create another attribute on first group */ + aid = H5Acreate2(gid, ATTR8_NAME, H5T_NATIVE_DOUBLE, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file and re-create attributes on first group */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Re-open first group */ + gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gopen2"); + + /* Delete first attribute */ + ret = H5Adelete(gid, ATTR7_NAME); + CHECK(ret, FAIL, "H5Adelete"); + + /* Re-create first attribute */ + aid = H5Acreate2(gid, ATTR7_NAME, H5T_NATIVE_DOUBLE, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + /* Delete second attribute */ + ret = H5Adelete(gid, ATTR8_NAME); + CHECK(ret, FAIL, "H5Adelete"); + + /* Re-create second attribute */ + aid = H5Acreate2(gid, ATTR8_NAME, H5T_NATIVE_DOUBLE, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Close dataspace ID */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Gclose"); +} /* test_attr_bug1() */ + +/**************************************************************** +** +** test_attr_bug2(): Test basic H5A (attribute) code. +** Tests deleting a large number of attributes with the +** intention of creating a null message with a size that +** is too large. This routine deletes every other +** attribute, but the original bug could also be +** reproduced by deleting every attribute except a few to +** keep the chunk open. +** +****************************************************************/ +static void +test_attr_bug2(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* File ID */ + hid_t gid; /* Group ID */ + hid_t aid; /* Attribute ID */ + hid_t sid; /* Dataspace ID */ + hid_t tid; /* Datatype ID */ + hid_t gcpl; /* Group creation property list */ + hsize_t dims[2] = {10, 100}; /* Attribute dimensions */ + char aname[16]; /* Attribute name */ + unsigned i; /* index */ + herr_t ret; /* Generic return status */ + htri_t tri_ret; /* htri_t return status */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Allocating and De-allocating Attributes in Unusual Way\n")); + + /* Create group creation property list */ + gcpl = H5Pcreate(H5P_GROUP_CREATE); + CHECK(gcpl, FAIL, "H5Pcreate"); + + /* Prevent the library from switching to dense attribute storage */ + /* Not doing this with the latest format actually triggers a different bug. + * This will be tested here as soon as it is fixed. -NAF + */ + ret = H5Pset_attr_phase_change(gcpl, BUG2_NATTR + 10, BUG2_NATTR + 5); + CHECK(ret, FAIL, "H5Pset_attr_phase_change"); + + /* Create dataspace ID for attributes */ + sid = H5Screate_simple(2, dims, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Create main group to operate on */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + gid = H5Gcreate2(fid, GROUP1_NAME, H5P_DEFAULT, gcpl, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gcreate2"); + + /* Create attributes on group */ + for (i = 0; i < BUG2_NATTR; i++) { + HDsnprintf(aname, sizeof(aname), "%03u", i); + aid = H5Acreate2(gid, aname, H5T_STD_I32LE, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + } + + /* Delete every other attribute */ + for (i = 1; i < BUG2_NATTR; i += 2) { + HDsnprintf(aname, sizeof(aname), "%03u", i); + ret = H5Adelete(gid, aname); + CHECK(ret, FAIL, "H5Adelete"); + } + + /* Close IDs */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Reopen file and group */ + fid = H5Fopen(FILENAME, H5F_ACC_RDONLY, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gopen"); + + /* Open an attribute in the middle */ + i = (BUG2_NATTR / 4) * 2; + HDsnprintf(aname, sizeof(aname), "%03u", i); + aid = H5Aopen(gid, aname, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Aopen"); + + /* Verify that the attribute has the correct datatype */ + tid = H5Aget_type(aid); + CHECK(tid, FAIL, "H5Aget_type"); + + tri_ret = H5Tequal(tid, H5T_STD_I32LE); + VERIFY(tri_ret, TRUE, "H5Tequal"); + + /* Close IDs */ + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Now test a variation on this bug - where either the size of chunk 0 goes + * down a "notch" or two, or chunk 1 becomes completely null at the same + * time that a null message that is too large is formed */ + dims[0] = 25; + dims[1] = 41; /* 1025*4 byte attribute size */ + + /* Create dataspace ID for attributes */ + sid = H5Screate_simple(2, dims, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Create main group to operate on */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + gid = H5Gcreate2(fid, GROUP1_NAME, H5P_DEFAULT, gcpl, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gcreate2"); + + /* Create attributes on group */ + for (i = 0; i < BUG2_NATTR2; i++) { + HDsnprintf(aname, sizeof(aname), "%03u", i); + aid = H5Acreate2(gid, aname, H5T_STD_I32LE, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + } + + /* Delete every other attribute */ + for (i = 0; i < BUG2_NATTR2; i++) { + HDsnprintf(aname, sizeof(aname), "%03u", i); + ret = H5Adelete(gid, aname); + CHECK(ret, FAIL, "H5Adelete"); + } + + /* Close IDs */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Pclose(gcpl); + CHECK(ret, FAIL, "H5Pclose"); +} /* test_attr_bug2() */ + +/**************************************************************** +** +** test_attr_bug3(): Test basic H5A (attribute) code. +** Tests creating and deleting attributes which use a +** datatype and/or dataspace stored in the same object +** header. +** +****************************************************************/ +static void +test_attr_bug3(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* File ID */ + hid_t aid1, aid2; /* Attribute IDs */ + hid_t sid1, sid2; /* Dataspace ID */ + hid_t tid1, tid2; /* Datatype IDs */ + hid_t did; /* Dataset ID */ + hsize_t dims1[2] = {2, 2}, dims2[2] = {3, 3}; /* Dimensions */ + int wdata1[2][2]; + unsigned wdata2[3][3]; /* Write buffers */ + unsigned u, v; /* Local index variables */ + herr_t ret; /* Generic return status */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Attributes in the Same Header as their Datatypes\n")); + + /* Create dataspaces */ + sid1 = H5Screate_simple(2, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + sid2 = H5Screate_simple(2, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Create file to operate on */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create datatypes and commit tid1 */ + tid1 = H5Tcopy(H5T_STD_I16BE); + CHECK(tid1, FAIL, "H5Tcopy"); + tid2 = H5Tcopy(H5T_STD_U64LE); + CHECK(tid1, FAIL, "H5Tcopy"); + ret = H5Tcommit2(fid, "dtype", tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + /* Create dataset */ + did = H5Dcreate2(fid, "dset", tid2, sid2, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + /* Create attribute on datatype, using that datatype as its datatype */ + aid1 = H5Acreate2(tid1, "attr", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid1, FAIL, "H5Acreate2"); + + /* Create attribute on dataset, using its datatype and dataspace */ + aid2 = H5Acreate2(did, "attr", tid2, sid2, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid2, FAIL, "H5Acreate2"); + + /* Close attributes */ + ret = H5Aclose(aid1); + CHECK(ret, FAIL, "H5Aclose"); + ret = H5Aclose(aid2); + CHECK(ret, FAIL, "H5Aclose"); + + /* Reopen attributes */ + aid1 = H5Aopen(tid1, "attr", H5P_DEFAULT); + CHECK(aid1, FAIL, "H5Aopen"); + aid2 = H5Aopen(did, "attr", H5P_DEFAULT); + CHECK(aid2, FAIL, "H5Aopen"); + + /* Initialize the write buffers */ + for (u = 0; u < dims1[0]; u++) + for (v = 0; v < dims1[1]; v++) + wdata1[u][v] = (int)((u * dims1[1]) + v); + for (u = 0; u < dims2[0]; u++) + for (v = 0; v < dims2[1]; v++) + wdata2[u][v] = (unsigned)((u * dims2[1]) + v); + + /* Write data to the attributes */ + ret = H5Awrite(aid1, H5T_NATIVE_INT, wdata1); + CHECK(ret, FAIL, "H5Awrite"); + ret = H5Awrite(aid2, H5T_NATIVE_UINT, wdata2); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attributes */ + ret = H5Aclose(aid1); + CHECK(ret, FAIL, "H5Aclose"); + ret = H5Aclose(aid2); + CHECK(ret, FAIL, "H5Aclose"); + + /* Delete attributes */ + ret = H5Adelete(tid1, "attr"); + CHECK(ret, FAIL, "H5Adelete"); + ret = H5Adelete(did, "attr"); + CHECK(ret, FAIL, "H5Adelete"); + + /* Recreate attributes */ + aid1 = H5Acreate2(tid1, "attr", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid1, FAIL, "H5Acreate2"); + aid2 = H5Acreate2(did, "attr", tid2, sid2, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid2, FAIL, "H5Acreate2"); + + /* Delete attributes (note they are still open) */ + ret = H5Adelete(tid1, "attr"); + CHECK(ret, FAIL, "H5Adelete"); + ret = H5Adelete(did, "attr"); + CHECK(ret, FAIL, "H5Adelete"); + + /* Close dataspaces and transient datatype */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close dataset and committed datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Delete dataset and committed datatype */ + ret = H5Ldelete(fid, "dtype", H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Ldelete(fid, "dset", H5P_DEFAULT); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close attributes */ + ret = H5Aclose(aid1); + CHECK(ret, FAIL, "H5Aclose"); + ret = H5Aclose(aid2); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_attr_bug3() */ + +/**************************************************************** +** +** test_attr_bug4(): Test basic H5A (attribute) code. +** Attempts to trigger a bug which would result in being +** unable to add an attribute to a named datatype. This +** happened when an object header chunk was too small to +** hold a continuation message and could not be extended. +** +****************************************************************/ +static void +test_attr_bug4(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* File ID */ + hid_t gid; /* Group ID */ + hid_t aid1, aid2, aid3; /* Attribute IDs */ + hid_t sid; /* Dataspace ID */ + hid_t tid; /* Datatype ID */ + hid_t did; /* Dataset ID */ + hsize_t dims[1] = {5}; /* Attribute dimensions */ + herr_t ret; /* Generic return status */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing that attributes can always be added to named datatypes\n")); + + /* Create dataspace */ + sid = H5Screate_simple(1, dims, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Open root group */ + gid = H5Gopen2(fid, "/", H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gcreate2"); + + /* Create committed datatype */ + tid = H5Tcopy(H5T_STD_I32LE); + CHECK(tid, FAIL, "H5Tcopy"); + ret = H5Tcommit2(fid, "dtype", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + /* Create dataset */ + did = H5Dcreate2(fid, "dset", tid, sid, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + /* Create attributes on group and dataset */ + aid1 = H5Acreate2(gid, "attr", tid, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid1, FAIL, "H5Acreate2"); + aid2 = H5Acreate2(did, "attr", tid, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid2, FAIL, "H5Acreate2"); + + /* Create attribute on datatype (this is the main test) */ + aid3 = H5Acreate2(tid, "attr", tid, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid3, FAIL, "H5Acreate2"); + + /* Close IDs */ + ret = H5Aclose(aid3); + CHECK(ret, FAIL, "H5Aclose"); + + ret = H5Aclose(aid2); + CHECK(ret, FAIL, "H5Aclose"); + + ret = H5Aclose(aid1); + CHECK(ret, FAIL, "H5Aclose"); + + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_attr_bug4() */ + +/**************************************************************** +** +** test_attr_bug5(): Test basic H5A (attribute) code. +** Tests opening an attribute multiple times through +** objects opened through different file handles. +** +****************************************************************/ +static void +test_attr_bug5(hid_t fcpl, hid_t fapl) +{ + hid_t fid1, fid2; /* File IDs */ + hid_t gid1, gid2; /* Group IDs */ + hid_t did1, did2; /* Dataset IDs */ + hid_t tid1, tid2; /* Datatype IDs */ + hid_t aidg1, aidg2, aidd1, aidd2, aidt1, aidt2; /* Attribute IDs */ + hid_t sid; /* Dataspace ID */ + hsize_t dims[1] = {5}; /* Attribute dimensions */ + herr_t ret; /* Generic return status */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Opening an Attribute Through Multiple Files Concurrently\n")); + + /* Create dataspace ID for attributes and datasets */ + sid = H5Screate_simple(1, dims, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Open root group */ + gid1 = H5Gopen2(fid1, "/", H5P_DEFAULT); + CHECK(gid1, FAIL, "H5Gopen2"); + + /* Create and commit datatype */ + tid1 = H5Tcopy(H5T_STD_I32LE); + CHECK(tid1, FAIL, "H5Tcopy"); + ret = H5Tcommit2(fid1, BUG3_DT_NAME, tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + /* Create dataset */ + did1 = H5Dcreate2(fid1, BUG3_DSET_NAME, tid1, sid, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); + CHECK(did1, FAIL, "H5Dcreate2"); + + /* Create attribute on root group */ + aidg1 = H5Acreate2(gid1, BUG3_ATTR_NAME, tid1, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aidg1, FAIL, "H5Acreate2"); + + /* Create attribute on dataset */ + aidd1 = H5Acreate2(did1, BUG3_ATTR_NAME, tid1, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aidd1, FAIL, "H5Acreate2"); + + /* Create attribute on datatype */ + aidt1 = H5Acreate2(tid1, BUG3_ATTR_NAME, tid1, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aidt1, FAIL, "H5Acreate2"); + + /* Close all IDs */ + ret = H5Aclose(aidt1); + CHECK(ret, FAIL, "H5Aclose"); + ret = H5Aclose(aidd1); + CHECK(ret, FAIL, "H5Aclose"); + ret = H5Aclose(aidg1); + CHECK(ret, FAIL, "H5Aclose"); + ret = H5Dclose(did1); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Gclose(gid1); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Open file twice */ + fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, fapl); + CHECK(fid1, FAIL, "H5Fopen"); + fid2 = H5Fopen(FILENAME, H5F_ACC_RDONLY, fapl); + CHECK(fid2, FAIL, "H5Fopen"); + + /* Open the root group twice */ + gid1 = H5Gopen2(fid1, "/", H5P_DEFAULT); + CHECK(gid1, FAIL, "H5Gopen2"); + gid2 = H5Gopen2(fid2, "/", H5P_DEFAULT); + CHECK(gid2, FAIL, "H5Gopen2"); + + /* Open the root group attribute twice */ + aidg1 = H5Aopen(gid1, BUG3_ATTR_NAME, H5P_DEFAULT); + CHECK(aidg1, FAIL, "H5Aopen"); + aidg2 = H5Aopen(gid2, BUG3_ATTR_NAME, H5P_DEFAULT); + CHECK(aidg1, FAIL, "H5Aopen"); + + /* Open the dataset twice */ + did1 = H5Dopen2(fid1, BUG3_DSET_NAME, H5P_DEFAULT); + CHECK(did1, FAIL, "H5Dopen2"); + did2 = H5Dopen2(fid2, BUG3_DSET_NAME, H5P_DEFAULT); + CHECK(did2, FAIL, "H5Dopen2"); + + /* Open the dataset attribute twice */ + aidd1 = H5Aopen(did1, BUG3_ATTR_NAME, H5P_DEFAULT); + CHECK(aidd1, FAIL, "H5Aopen"); + aidd2 = H5Aopen(did2, BUG3_ATTR_NAME, H5P_DEFAULT); + CHECK(aidd1, FAIL, "H5Aopen"); + + /* Open the datatype twice */ + tid1 = H5Topen2(fid1, BUG3_DT_NAME, H5P_DEFAULT); + CHECK(tid1, FAIL, "H5Topen2"); + tid2 = H5Topen2(fid2, BUG3_DT_NAME, H5P_DEFAULT); + CHECK(tid2, FAIL, "H5Topen2"); + + /* Open the datatype attribute twice */ + aidt1 = H5Aopen(tid1, BUG3_ATTR_NAME, H5P_DEFAULT); + CHECK(aidt1, FAIL, "H5Aopen"); + aidt2 = H5Aopen(tid2, BUG3_ATTR_NAME, H5P_DEFAULT); + CHECK(aidt2, FAIL, "H5Aopen"); + + /* Close all attributes */ + ret = H5Aclose(aidg1); + CHECK(ret, FAIL, "H5Aclose"); + ret = H5Aclose(aidg2); + CHECK(ret, FAIL, "H5Aclose"); + ret = H5Aclose(aidd1); + CHECK(ret, FAIL, "H5Aclose"); + ret = H5Aclose(aidd2); + CHECK(ret, FAIL, "H5Aclose"); + ret = H5Aclose(aidt1); + CHECK(ret, FAIL, "H5Aclose"); + ret = H5Aclose(aidt2); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close root groups */ + ret = H5Gclose(gid1); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Gclose(gid2); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close datasets */ + ret = H5Dclose(did1); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(did2); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatypes */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close files */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Fclose(fid2); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_attr_bug5() */ + +/**************************************************************** +** +** test_attr_bug6(): Test basic H5A (attribute) code. +** Tests if reading an empty attribute is OK. +** +****************************************************************/ +static void +test_attr_bug6(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* File ID */ + hid_t gid; /* Group ID */ + hid_t aid1, aid2; /* Attribute IDs */ + hid_t sid; /* Dataspace ID */ + hsize_t dims[ATTR1_RANK] = {ATTR1_DIM1}; /* Attribute dimensions */ + int intar[ATTR1_DIM1]; /* Data reading buffer */ + herr_t ret; /* Generic return status */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing that empty attribute can be read\n")); + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Open root group */ + gid = H5Gopen2(fid, "/", H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gopen2"); + + /* Create dataspace */ + sid = H5Screate_simple(1, dims, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Create attribute on group */ + aid1 = H5Acreate2(gid, ATTR1_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid1, FAIL, "H5Acreate2"); + + ret = H5Aclose(aid1); + CHECK(ret, FAIL, "H5Aclose"); + + /* Open the attribute again */ + aid2 = H5Aopen(gid, ATTR1_NAME, H5P_DEFAULT); + CHECK(aid2, FAIL, "H5Aopen"); + + ret = H5Aread(aid2, H5T_NATIVE_INT, intar); + CHECK(ret, FAIL, "H5Aread"); + + /* Close IDs */ + ret = H5Aclose(aid2); + CHECK(ret, FAIL, "H5Aclose"); + + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_attr_bug6() */ + +/**************************************************************** +** +** test_attr_bug7(): Test basic H5A (attribute) code. +** (Really tests object header allocation code). +** Tests creating and deleting attributes in such a way as +** to change the size of the "chunk #0 size" field. +** Includes testing "skipping" a possible size of the +** field, i.e. going from 1 to 4 bytes or 4 to 1 byte. +** +****************************************************************/ +#if 0 +static void +test_attr_bug7(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* File ID */ + hid_t aid; /* Attribute ID */ + hid_t sid; /* Dataspace ID */ + hid_t tid; /* Datatype ID */ + hsize_t dims_s = 140; /* Small attribute dimensions */ + hsize_t dims_l = 65480; /* Large attribute dimensions */ + H5A_info_t ainfo; /* Attribute info */ + herr_t ret; /* Generic return status */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing adding and deleting large attributes\n")); + + /* Create committed datatype to operate on. Use a committed datatype so that + * there is nothing after the object header and the first chunk can expand and + * contract as necessary. */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + tid = H5Tcopy(H5T_STD_I32LE); + CHECK(tid, FAIL, "H5Tcopy"); + ret = H5Tcommit2(fid, TYPE1_NAME, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + /* + * Create small attribute + */ + sid = H5Screate_simple(1, &dims_s, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + aid = H5Acreate2(tid, ATTR1_NAME, H5T_STD_I8LE, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + + /* Close file */ + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Check attribute */ + tid = H5Topen2(fid, TYPE1_NAME, H5P_DEFAULT); + CHECK(tid, FAIL, "H5Topen2"); + ret = H5Aget_info_by_name(tid, ".", ATTR1_NAME, &ainfo, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_info_by_name"); + if (ainfo.data_size != dims_s) + TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n", + (long long unsigned)ainfo.data_size, (long long unsigned)dims_s); + + /* + * Create another small attribute. Should cause chunk size field to expand by + * 1 byte (1->2). + */ + aid = H5Acreate2(tid, ATTR2_NAME, H5T_STD_I8LE, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + + /* Close file */ + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Check attributes */ + tid = H5Topen2(fid, TYPE1_NAME, H5P_DEFAULT); + CHECK(tid, FAIL, "H5Topen2"); + ret = H5Aget_info_by_name(tid, ".", ATTR1_NAME, &ainfo, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_info_by_name"); + if (ainfo.data_size != dims_s) + TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n", + (long long unsigned)ainfo.data_size, (long long unsigned)dims_s); + ret = H5Aget_info_by_name(tid, ".", ATTR2_NAME, &ainfo, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_info_by_name"); + if (ainfo.data_size != dims_s) + TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n", + (long long unsigned)ainfo.data_size, (long long unsigned)dims_s); + + /* + * Create large attribute. Should cause chunk size field to expand by 2 bytes + * (2->4). + */ + ret = H5Sset_extent_simple(sid, 1, &dims_l, NULL); + CHECK(ret, FAIL, "H5Sset_extent_simple"); + aid = H5Acreate2(tid, ATTR3_NAME, H5T_STD_I8LE, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + + /* Close file */ + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Check attributes */ + tid = H5Topen2(fid, TYPE1_NAME, H5P_DEFAULT); + CHECK(tid, FAIL, "H5Topen2"); + ret = H5Aget_info_by_name(tid, ".", ATTR1_NAME, &ainfo, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_info_by_name"); + if (ainfo.data_size != dims_s) + TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n", + (long long unsigned)ainfo.data_size, (long long unsigned)dims_s); + ret = H5Aget_info_by_name(tid, ".", ATTR2_NAME, &ainfo, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_info_by_name"); + if (ainfo.data_size != dims_s) + TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n", + (long long unsigned)ainfo.data_size, (long long unsigned)dims_s); + ret = H5Aget_info_by_name(tid, ".", ATTR3_NAME, &ainfo, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_info_by_name"); + if (ainfo.data_size != dims_l) + TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n", + (long long unsigned)ainfo.data_size, (long long unsigned)dims_l); + + /* + * Delete last two attributes - should merge into a null message that is too + * large, causing the chunk size field to shrink by 3 bytes (4->1). + */ + ret = H5Sset_extent_simple(sid, 1, &dims_l, NULL); + CHECK(ret, FAIL, "H5Sset_extent_simple"); + ret = H5Adelete(tid, ATTR2_NAME); + CHECK(ret, FAIL, "H5Adelete"); + ret = H5Adelete(tid, ATTR3_NAME); + CHECK(ret, FAIL, "H5Adelete"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Check attribute */ + tid = H5Topen2(fid, TYPE1_NAME, H5P_DEFAULT); + CHECK(tid, FAIL, "H5Topen2"); + ret = H5Aget_info_by_name(tid, ".", ATTR1_NAME, &ainfo, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_info_by_name"); + if (ainfo.data_size != dims_s) + TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n", + (long long unsigned)ainfo.data_size, (long long unsigned)dims_s); + + /* + * Create large attribute. Should cause chunk size field to expand by 3 bytes + * (1->4). + */ + aid = H5Acreate2(tid, ATTR2_NAME, H5T_STD_I8LE, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + + /* Close file */ + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Check attributes */ + tid = H5Topen2(fid, TYPE1_NAME, H5P_DEFAULT); + CHECK(tid, FAIL, "H5Topen2"); + ret = H5Aget_info_by_name(tid, ".", ATTR1_NAME, &ainfo, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_info_by_name"); + if (ainfo.data_size != dims_s) + TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n", + (long long unsigned)ainfo.data_size, (long long unsigned)dims_s); + ret = H5Aget_info_by_name(tid, ".", ATTR2_NAME, &ainfo, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_info_by_name"); + if (ainfo.data_size != dims_l) + TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n", + (long long unsigned)ainfo.data_size, (long long unsigned)dims_l); + + /* Close IDs */ + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_attr_bug7() */ +#endif + +/**************************************************************** +** +** test_attr_bug8(): Test basic H5A (attribute) code. +** (Really tests object header code). +** Tests adding a link and attribute to a group in such a +** way as to cause the "chunk #0 size" field to expand +** when some object header messages are not loaded into +** cache. Before the bug was fixed, this would prevent +** these messages from being shifted to the correct +** position as the expansion algorithm marked them dirty, +** invalidating the raw form, when there was no native +** form to encode. +** +****************************************************************/ +static void +test_attr_bug8(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* File ID */ + hid_t aid; /* Attribute ID */ + hid_t sid; /* Dataspace ID */ + hid_t gid; /* Group ID */ + hid_t oid; /* Object ID */ + hsize_t dims = 256; /* Attribute dimensions */ + H5O_info2_t oinfo; /* Object info */ + H5A_info_t ainfo; /* Attribute info */ + H5O_token_t root_token; /* Root group token */ + int cmp_value; /* Result from H5Otoken_cmp */ + herr_t ret; /* Generic return status */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing attribute expanding object header with undecoded messages\n")); + + /* Create committed datatype to operate on. Use a committed datatype so that + * there is nothing after the object header and the first chunk can expand and + * contract as necessary. */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + gid = H5Gcreate2(fid, GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gcreate2"); + + /* Get root group token */ + ret = H5Oget_info3(fid, &oinfo, H5O_INFO_BASIC); + CHECK(ret, FAIL, "H5Oget_info"); + root_token = oinfo.token; + + /* + * Create link to root group + */ + ret = H5Lcreate_hard(fid, "/", gid, LINK1_NAME, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lcreate_hard"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDONLY, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Check link */ + gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gopen2"); + oid = H5Oopen(gid, LINK1_NAME, H5P_DEFAULT); + CHECK(oid, FAIL, "H5Oopen"); + ret = H5Oget_info3(oid, &oinfo, H5O_INFO_BASIC); + CHECK(ret, FAIL, "H5Oget_info"); + ret = H5Otoken_cmp(oid, &oinfo.token, &root_token, &cmp_value); + CHECK(ret, FAIL, "H5Otoken_cmp"); + VERIFY(cmp_value, 0, "H5Otoken_cmp"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Oclose(oid); + CHECK(ret, FAIL, "H5Oclose"); + + /* Open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* + * Create attribute. Should cause chunk size field to expand by 1 byte + * (1->2). + */ + gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gopen2"); + sid = H5Screate_simple(1, &dims, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + aid = H5Acreate2(gid, ATTR1_NAME, H5T_STD_I8LE, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + + /* Close file */ + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Open file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDONLY, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Check link and attribute */ + gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gopen2"); + oid = H5Oopen(gid, LINK1_NAME, H5P_DEFAULT); + CHECK(oid, FAIL, "H5Oopen"); + ret = H5Oget_info3(oid, &oinfo, H5O_INFO_BASIC); + CHECK(ret, FAIL, "H5Oget_info"); + ret = H5Otoken_cmp(oid, &oinfo.token, &root_token, &cmp_value); + CHECK(ret, FAIL, "H5Otoken_cmp"); + VERIFY(cmp_value, 0, "H5Otoken_cmp"); + ret = H5Aget_info_by_name(gid, ".", ATTR1_NAME, &ainfo, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Aget_info_by_name"); + if (ainfo.data_size != dims) + TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n", + (long long unsigned)ainfo.data_size, (long long unsigned)dims); + + /* Close IDs */ + ret = H5Oclose(oid); + CHECK(ret, FAIL, "H5Oclose"); + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_attr_bug8() */ + +/**************************************************************** +** +** test_attr_bug9(): Test basic H5A (attribute) code. +** (Really tests object header code). +** Tests adding several large attributes to an object until +** they convert to dense storage. The total size of all +** attributes is larger than 64K, causing the internal +** object header code to, after merging the deleted +** messages in to a NULL message, shrink the object header +** chunk. Do this twice: once with only attributes in the +** object header chunk and once with a (small) soft link in +** the chunk as well. In both cases, the shrunk chunk will +** initially be too small and a new NULL message must be +** created. +** +****************************************************************/ +static void +test_attr_bug9(hid_t fcpl, hid_t fapl) +{ + hid_t fid = -1; /* File ID */ + hid_t gid = -1; /* Group ID */ + hid_t aid = -1; /* Attribute ID */ + hid_t sid = -1; /* Dataspace ID */ + hsize_t dims[1] = {32768}; /* Attribute dimensions */ + int create_link; /* Whether to create a soft link */ + unsigned max_compact; /* Setting from fcpl */ + unsigned min_dense; /* Setting from fcpl */ + char aname[11]; /* Attribute name */ + unsigned i; /* Local index variable */ + herr_t ret; /* Generic return status */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing that attributes can always be added to named datatypes\n")); + + /* Create dataspace */ + sid = H5Screate_simple(1, dims, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Obtain attribute phase change settings */ + ret = H5Pget_attr_phase_change(fcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + + /* Run with and without the soft link */ + for (create_link = 0; create_link < 2; create_link++) { + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create second group */ + gid = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gcreate2"); + + /* Close second group */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Open root group */ + gid = H5Gopen2(fid, "/", H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gopen2"); + + /* Create enough attributes to cause a change to dense storage */ + for (i = 0; i < max_compact + 1; i++) { + /* Create attribute */ + HDsnprintf(aname, sizeof(aname), "%u", i); + aid = H5Acreate2(gid, aname, H5T_NATIVE_CHAR, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + + /* Close attribute */ + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + /* Create enough soft links that exactly one goes into chunk 1 if + * requested */ + if (i == 0 && create_link) { + ret = H5Lcreate_soft("b", gid, "a", H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lcreate_soft"); + ret = H5Lcreate_soft("d", gid, "c", H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lcreate_soft"); + ret = H5Lcreate_soft("f", gid, "e", H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lcreate_soft"); + } /* end if */ + } /* end for */ + + /* Close IDs */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + } /* end for */ + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_attr_bug9() */ + +/**************************************************************** +** +** test_attr_bug10(): Test basic H5A (attribute) code. +** Attempts to trigger a bug which would result in a +** segfault. Create a vlen attribute through a file +** handle, then open the same file through a different +** handle, open the same attribute through the second file +** handle, then close the second file and attribute +** handles, then write to the attribute through the first +** handle. +** +****************************************************************/ +static void +test_attr_bug10(hid_t fcpl, hid_t fapl) +{ + hid_t fid1, fid2; /* File IDs */ + hid_t aid1, aid2; /* Attribute IDs */ + hid_t sid; /* Dataspace ID */ + hid_t tid; /* Datatype ID */ + hsize_t dims[1] = {1}; /* Attribute dimensions */ + const char *wbuf[1] = {"foo"}; /* Write buffer */ + herr_t ret; /* Generic return status */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing that vlen attributes can be written to after a second file handle is closed\n")); + + /* Create dataspace */ + sid = H5Screate_simple(1, dims, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Create VL string datatype */ + tid = H5Tcopy(H5T_C_S1); + CHECK(tid, FAIL, "H5Tcreate"); + ret = H5Tset_size(tid, H5T_VARIABLE); + CHECK(ret, FAIL, "H5Tset_size"); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create attribute on root group */ + aid1 = H5Acreate2(fid1, "attr", tid, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid1, FAIL, "H5Acreate2"); + + /* Open the same file again */ + fid2 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid2, FAIL, "H5Fcreate"); + + /* Open the same attribute through the second file handle */ + aid2 = H5Aopen(fid2, "attr", H5P_DEFAULT); + CHECK(aid2, FAIL, "H5Aopen"); + + /* Close the second attribute and file handles */ + ret = H5Aclose(aid2); + CHECK(ret, FAIL, "H5Aclose"); + ret = H5Fclose(fid2); + CHECK(ret, FAIL, "H5Fclose"); + + /* Write to the attribute through the first handle */ + ret = H5Awrite(aid1, tid, wbuf); + + /* Close IDs */ + ret = H5Aclose(aid1); + CHECK(ret, FAIL, "H5Aclose"); + + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_attr_bug10() */ + +/**************************************************************** +** +** test_attr_delete_dense(): +** This is to verify the error as described in HDFFV-9277 +** is fixed when deleting the last "large" attribute that +** is stored densely. +** +****************************************************************/ +#if 0 /* Native VOL connector only supports large attributes with latest format */ +static void +test_attr_delete_last_dense(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* File ID */ + hid_t gid; /* Group ID */ + hid_t aid; /* Attribute ID */ + hid_t sid; /* Dataspace ID */ + hsize_t dim2[2] = {DIM0, DIM1}; /* Dimension sizes */ + int i, j; /* Local index variables */ + double *data = NULL; /* Pointer to the data buffer */ + herr_t ret; /* Generic return status */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Deleting the last large attribute stored densely\n")); + + /* Create the file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create the group */ + gid = H5Gcreate2(fid, GRPNAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gcreate"); + + /* Create the dataspace */ + sid = H5Screate_simple(RANK, dim2, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Attach the attribute to the group */ + aid = H5Acreate2(gid, ATTRNAME, H5T_IEEE_F64LE, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + + /* Allocate the data buffer */ + data = (double *)HDmalloc((size_t)(DIM0 * DIM1) * sizeof(double)); + CHECK_PTR(data, "HDmalloc"); + + /* Initialize the data */ + for (i = 0; i < DIM0; i++) + for (j = 0; j < DIM1; j++) + *(data + i * DIM1 + j) = i + j; + + /* Write to the attribute */ + ret = H5Awrite(aid, H5T_NATIVE_DOUBLE, data); + CHECK(ret, FAIL, "H5Awrite"); + + /* Closing */ + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open the file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Open the group */ + gid = H5Gopen2(fid, GRPNAME, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gopen"); + + /* Delete the attribute */ + ret = H5Adelete(gid, ATTRNAME); + CHECK(ret, FAIL, "H5Adelete"); + + /* Closing */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free the data buffer */ + if (data) + HDfree(data); + +} /* test_attr_delete_last_dense() */ +#endif + +/**************************************************************** +** +** test_attr(): Main H5A (attribute) testing routine. +** +****************************************************************/ +void +test_attr(void) +{ + hid_t fapl = (-1), fapl2 = (-1); /* File access property lists */ + hid_t fcpl = (-1), fcpl2 = (-1); /* File creation property lists */ + hid_t dcpl = -1; /* Dataset creation property list */ + unsigned new_format; /* Whether to use the new format or not */ + unsigned use_shared; /* Whether to use shared attributes or not */ + unsigned minimize_dset_oh; /* Whether to use minimized dataset object headers */ + herr_t ret; /* Generic return value */ + + MESSAGE(5, ("Testing Attributes\n")); + + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + + /* fapl2 uses "latest version of the format" for creating objects in the file */ + fapl2 = H5Pcopy(fapl); + CHECK(fapl2, FAIL, "H5Pcopy"); + ret = H5Pset_libver_bounds(fapl2, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); + CHECK(ret, FAIL, "H5Pset_libver_bounds"); + + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); + + /* files with fcpl2 make all attributes ( > 1 byte) shared + * (i.e. all of them :-) */ + fcpl2 = H5Pcopy(fcpl); + CHECK(fcpl2, FAIL, "H5Pcopy"); + ret = H5Pset_shared_mesg_nindexes(fcpl2, (unsigned)1); + CHECK_I(ret, "H5Pset_shared_mesg_nindexes"); + ret = H5Pset_shared_mesg_index(fcpl2, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)1); + CHECK_I(ret, "H5Pset_shared_mesg_index"); + + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, H5I_INVALID_HID, "H5Pcreate"); + + ret = H5Pset_attr_creation_order(dcpl, H5P_CRT_ORDER_TRACKED); + CHECK(ret, FAIL, ""); + + dcpl_g = dcpl; + + for (minimize_dset_oh = 0; minimize_dset_oh <= 1; minimize_dset_oh++) { + if (minimize_dset_oh != 0) + continue; + +#if 0 + if (minimize_dset_oh == 0) { + MESSAGE(7, ("testing with default dataset object headers\n")); + dcpl_g = H5P_DEFAULT; + } + else { + MESSAGE(7, ("testing with minimzied dataset object headers\n")); + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + ret = H5Pset_dset_no_attrs_hint(dcpl, TRUE); + CHECK_I(ret, "H5Pset_dset_no_attrs_hint"); + dcpl_g = dcpl; + } +#endif + + for (new_format = FALSE; new_format <= TRUE; new_format++) { + hid_t my_fapl = fapl; + + if (new_format) + continue; + +#if 0 + /* Set the FAPL for the type of format */ + if (new_format) { + MESSAGE(7, ("testing with new file format\n")); + my_fapl = fapl2; + } + else { + MESSAGE(7, ("testing with old file format\n")); + my_fapl = fapl; + } +#endif + + /* These next two tests use the same file information */ + test_attr_basic_write(my_fapl); /* Test basic H5A writing code */ + test_attr_basic_read(my_fapl); /* Test basic H5A reading code */ + + /* These next two tests use their own file information */ + test_attr_flush(my_fapl); /* Test H5A I/O in the presence of H5Fflush calls */ + test_attr_plist(my_fapl); /* Test attribute property lists */ + + /* These next two tests use the same file information */ + test_attr_compound_write(my_fapl); /* Test complex datatype H5A writing code */ + test_attr_compound_read(my_fapl); /* Test complex datatype H5A reading code */ + + /* These next two tests use the same file information */ + test_attr_scalar_write(my_fapl); /* Test scalar dataspace H5A writing code */ + test_attr_scalar_read(my_fapl); /* Test scalar dataspace H5A reading code */ + + /* These next four tests use the same file information */ + test_attr_mult_write(my_fapl); /* Test H5A writing code for multiple attributes */ + test_attr_mult_read(my_fapl); /* Test H5A reading code for multiple attributes */ + test_attr_iterate(my_fapl); /* Test H5A iterator code */ + test_attr_delete(my_fapl); /* Test H5A code for deleting attributes */ + + /* This next test uses its own file information */ + test_attr_dtype_shared(my_fapl); /* Test using shared dataypes in attributes */ + + /* This next test uses its own file information */ + test_attr_duplicate_ids(my_fapl); + + for (use_shared = FALSE; use_shared <= TRUE; use_shared++) { + hid_t my_fcpl; + + if (new_format == TRUE && use_shared) { + MESSAGE(7, ("testing with shared attributes\n")); + my_fcpl = fcpl2; + } + else { + MESSAGE(7, ("testing without shared attributes\n")); + my_fcpl = fcpl; + } + + test_attr_big(my_fcpl, my_fapl); /* Test storing big attribute */ + test_attr_null_space(my_fcpl, my_fapl); /* Test storing attribute with NULL dataspace */ + test_attr_deprec(fcpl, my_fapl); /* Test deprecated API routines */ + test_attr_many(new_format, my_fcpl, my_fapl); /* Test storing lots of attributes */ + test_attr_info_null_info_pointer(my_fcpl, + my_fapl); /* Test passing a NULL attribute info pointer to + H5Aget_info(_by_name/_by_idx) */ + test_attr_rename_invalid_name( + my_fcpl, + my_fapl); /* Test passing a NULL or empty attribute name to H5Arename(_by_name) */ + test_attr_get_name_invalid_buf( + my_fcpl, my_fapl); /* Test passing NULL buffer to H5Aget_name(_by_idx) */ + + /* New attribute API routine tests */ + test_attr_info_by_idx(new_format, my_fcpl, + my_fapl); /* Test querying attribute info by index */ + test_attr_delete_by_idx(new_format, my_fcpl, my_fapl); /* Test deleting attribute by index */ + test_attr_iterate2(new_format, my_fcpl, + my_fapl); /* Test iterating over attributes by index */ + test_attr_open_by_idx(new_format, my_fcpl, my_fapl); /* Test opening attributes by index */ + test_attr_open_by_name(new_format, my_fcpl, my_fapl); /* Test opening attributes by name */ + test_attr_create_by_name(new_format, my_fcpl, my_fapl); /* Test creating attributes by name */ + + /* Tests that address specific bugs */ + test_attr_bug1(my_fcpl, my_fapl); /* Test odd allocation operations */ + test_attr_bug2(my_fcpl, my_fapl); /* Test many deleted attributes */ + test_attr_bug3(my_fcpl, my_fapl); /* Test "self referential" attributes */ + test_attr_bug4(my_fcpl, my_fapl); /* Test attributes on named datatypes */ + test_attr_bug5(my_fcpl, + my_fapl); /* Test opening/closing attributes through different file handles */ + test_attr_bug6(my_fcpl, my_fapl); /* Test reading empty attribute */ + /* test_attr_bug7 is specific to the "new" object header format, + * and in fact fails if used with the old format due to the + * attributes being larger than 64K */ + test_attr_bug8(my_fcpl, + my_fapl); /* Test attribute expanding object header with undecoded messages */ + test_attr_bug9(my_fcpl, my_fapl); /* Test large attributes converting to dense storage */ + test_attr_bug10(my_fcpl, my_fapl); /* Test writing an attribute after opening and closing + through a different file handle */ + + /* tests specific to the "new format" */ + if (new_format == TRUE) { + /* General attribute tests */ + test_attr_dense_create(my_fcpl, my_fapl); /* Test dense attribute storage creation */ + test_attr_dense_open(my_fcpl, my_fapl); /* Test opening attributes in dense storage */ + test_attr_dense_delete(my_fcpl, my_fapl); /* Test deleting attributes in dense storage */ + test_attr_dense_rename(my_fcpl, my_fapl); /* Test renaming attributes in dense storage */ + test_attr_dense_unlink( + my_fcpl, my_fapl); /* Test unlinking object with attributes in dense storage */ + test_attr_dense_limits(my_fcpl, my_fapl); /* Test dense attribute storage limits */ + test_attr_dense_dup_ids(my_fcpl, + my_fapl); /* Test duplicated IDs for dense attribute storage */ + + /* Attribute creation order tests */ + test_attr_corder_create_basic( + my_fcpl, my_fapl); /* Test creating an object w/attribute creation order info */ + test_attr_corder_create_compact(my_fcpl, + my_fapl); /* Test compact attribute storage on an object + w/attribute creation order info */ + test_attr_corder_create_dense(my_fcpl, + my_fapl); /* Test dense attribute storage on an object + w/attribute creation order info */ + test_attr_corder_create_reopen(my_fcpl, + my_fapl); /* Test creating attributes w/reopening file from + using new format to using old format */ + test_attr_corder_transition(my_fcpl, + my_fapl); /* Test attribute storage transitions on an object + w/attribute creation order info */ + test_attr_corder_delete(my_fcpl, my_fapl); /* Test deleting object using dense storage + w/attribute creation order info */ + + /* More complex tests with exclusively both "new format" and "shared" attributes */ + if (use_shared == TRUE) { + test_attr_shared_write( + my_fcpl, + my_fapl); /* Test writing to shared attributes in compact & dense storage */ + test_attr_shared_rename( + my_fcpl, + my_fapl); /* Test renaming shared attributes in compact & dense storage */ + test_attr_shared_delete( + my_fcpl, + my_fapl); /* Test deleting shared attributes in compact & dense storage */ + test_attr_shared_unlink(my_fcpl, my_fapl); /* Test unlinking object with shared + attributes in compact & dense storage */ + } /* if using shared attributes */ + +#if 0 /* Native VOL connector only supports large attributes with latest format */ + test_attr_delete_last_dense(my_fcpl, my_fapl); + + /* test_attr_bug7 is specific to the "new" object header format, + * and in fact fails if used with the old format due to the + * attributes being larger than 64K */ + test_attr_bug7(my_fcpl, + my_fapl); /* Test creating and deleting large attributes in ohdr chunk 0 */ +#endif + + } /* if using "new format" */ + } /* for unshared/shared attributes */ + } /* for old/new format */ + + if (minimize_dset_oh != 0) { + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + dcpl_g = H5P_DEFAULT; + } + + } /* for default/minimized dataset object headers */ + + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close FCPLs */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(fcpl2); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close FAPLs */ + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(fapl2); + CHECK(ret, FAIL, "H5Pclose"); +} /* test_attr() */ + +/*------------------------------------------------------------------------- + * Function: cleanup_attr + * + * Purpose: Cleanup temporary test files + * + * Return: none + * + * Programmer: Albert Cheng + * July 2, 1998 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +void +cleanup_attr(void) +{ + H5Fdelete(FILENAME, H5P_DEFAULT); +} diff --git a/test/API/tchecksum.c b/test/API/tchecksum.c new file mode 100644 index 00000000000..a77ffcd7497 --- /dev/null +++ b/test/API/tchecksum.c @@ -0,0 +1,251 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*------------------------------------------------------------------------- + * + * Created: tchecksum.c + * Aug 21 2006 + * Quincey Koziol + * + * Purpose: Test internal checksum routine(s) + * + *------------------------------------------------------------------------- + */ + +/***********/ +/* Headers */ +/***********/ +#include "testhdf5.h" + +/**********/ +/* Macros */ +/**********/ +#define BUF_LEN 3093 /* No particular value */ + +/*******************/ +/* Local variables */ +/*******************/ + +/**************************************************************** +** +** test_chksum_size_one(): Checksum 1 byte buffer +** +****************************************************************/ +static void +test_chksum_size_one(void) +{ + uint8_t buf[1] = {23}; /* Buffer to checksum */ + uint32_t chksum; /* Checksum value */ + + /* Buffer w/real data */ + chksum = H5_checksum_fletcher32(buf, sizeof(buf)); + VERIFY(chksum, 0x17001700, "H5_checksum_fletcher32"); + + chksum = H5_checksum_crc(buf, sizeof(buf)); + VERIFY(chksum, 0xfa2568b7, "H5_checksum_crc"); + + chksum = H5_checksum_lookup3(buf, sizeof(buf), 0); + VERIFY(chksum, 0xa209c931, "H5_checksum_lookup3"); + + /* Buffer w/zero(s) for data */ + HDmemset(buf, 0, sizeof(buf)); + chksum = H5_checksum_fletcher32(buf, sizeof(buf)); + VERIFY(chksum, 0, "H5_checksum_fletcher32"); + + chksum = H5_checksum_crc(buf, sizeof(buf)); + VERIFY(chksum, 0xfa60fb57, "H5_checksum_crc"); + + chksum = H5_checksum_lookup3(buf, sizeof(buf), 0); + VERIFY(chksum, 0x8ba9414b, "H5_checksum_lookup3"); +} /* test_chksum_size_one() */ + +/**************************************************************** +** +** test_chksum_size_two(): Checksum 2 byte buffer +** +****************************************************************/ +static void +test_chksum_size_two(void) +{ + uint8_t buf[2] = {23, 187}; /* Buffer to checksum */ + uint32_t chksum; /* Checksum value */ + + /* Buffer w/real data */ + chksum = H5_checksum_fletcher32(buf, sizeof(buf)); + VERIFY(chksum, 0x17bb17bb, "H5_checksum_fletcher32"); + + chksum = H5_checksum_crc(buf, sizeof(buf)); + VERIFY(chksum, 0xfc856608, "H5_checksum_crc"); + + chksum = H5_checksum_lookup3(buf, sizeof(buf), 0); + VERIFY(chksum, 0x8ba7a6c9, "H5_checksum_lookup3"); + + /* Buffer w/zero(s) for data */ + HDmemset(buf, 0, sizeof(buf)); + chksum = H5_checksum_fletcher32(buf, sizeof(buf)); + VERIFY(chksum, 0, "H5_checksum_fletcher32"); + + chksum = H5_checksum_crc(buf, sizeof(buf)); + VERIFY(chksum, 0xfc7e9b20, "H5_checksum_crc"); + + chksum = H5_checksum_lookup3(buf, sizeof(buf), 0); + VERIFY(chksum, 0x62cd61b3, "H5_checksum_lookup3"); +} /* test_chksum_size_two() */ + +/**************************************************************** +** +** test_chksum_size_three(): Checksum 3 byte buffer +** +****************************************************************/ +static void +test_chksum_size_three(void) +{ + uint8_t buf[3] = {23, 187, 98}; /* Buffer to checksum */ + uint32_t chksum; /* Checksum value */ + + /* Buffer w/real data */ + chksum = H5_checksum_fletcher32(buf, sizeof(buf)); + VERIFY(chksum, 0x917679bb, "H5_checksum_fletcher32"); + + chksum = H5_checksum_crc(buf, sizeof(buf)); + VERIFY(chksum, 0xfebc5d70, "H5_checksum_crc"); + + chksum = H5_checksum_lookup3(buf, sizeof(buf), 0); + VERIFY(chksum, 0xcebdf4f0, "H5_checksum_lookup3"); + + /* Buffer w/zero(s) for data */ + HDmemset(buf, 0, sizeof(buf)); + chksum = H5_checksum_fletcher32(buf, sizeof(buf)); + VERIFY(chksum, 0, "H5_checksum_fletcher32"); + + chksum = H5_checksum_crc(buf, sizeof(buf)); + VERIFY(chksum, 0xf9cc4c7a, "H5_checksum_crc"); + + chksum = H5_checksum_lookup3(buf, sizeof(buf), 0); + VERIFY(chksum, 0x6bd0060f, "H5_checksum_lookup3"); +} /* test_chksum_size_three() */ + +/**************************************************************** +** +** test_chksum_size_four(): Checksum 4 byte buffer +** +****************************************************************/ +static void +test_chksum_size_four(void) +{ + uint8_t buf[4] = {23, 187, 98, 217}; /* Buffer to checksum */ + uint32_t chksum; /* Checksum value */ + + /* Buffer w/real data */ + chksum = H5_checksum_fletcher32(buf, sizeof(buf)); + VERIFY(chksum, 0x924f7a94, "H5_checksum_fletcher32"); + + chksum = H5_checksum_crc(buf, sizeof(buf)); + VERIFY(chksum, 0xff398a46, "H5_checksum_crc"); + + chksum = H5_checksum_lookup3(buf, sizeof(buf), 0); + VERIFY(chksum, 0x2c88bb51, "H5_checksum_lookup3"); + + /* Buffer w/zero(s) for data */ + HDmemset(buf, 0, sizeof(buf)); + chksum = H5_checksum_fletcher32(buf, sizeof(buf)); + VERIFY(chksum, 0, "H5_checksum_fletcher32"); + + chksum = H5_checksum_crc(buf, sizeof(buf)); + VERIFY(chksum, 0xff117081, "H5_checksum_crc"); + + chksum = H5_checksum_lookup3(buf, sizeof(buf), 0); + VERIFY(chksum, 0x049396b8, "H5_checksum_lookup3"); +} /* test_chksum_size_four() */ + +/**************************************************************** +** +** test_chksum_large(): Checksum larger buffer +** +****************************************************************/ +static void +test_chksum_large(void) +{ + uint8_t *large_buf; /* Buffer for checksum calculations */ + uint32_t chksum; /* Checksum value */ + size_t u; /* Local index variable */ + + /* Allocate the buffer */ + large_buf = (uint8_t *)HDmalloc((size_t)BUF_LEN); + CHECK_PTR(large_buf, "HDmalloc"); + + /* Initialize buffer w/known data */ + for (u = 0; u < BUF_LEN; u++) + large_buf[u] = (uint8_t)(u * 3); + + /* Buffer w/real data */ + chksum = H5_checksum_fletcher32(large_buf, (size_t)BUF_LEN); + VERIFY(chksum, 0x85b4e2a, "H5_checksum_fletcher32"); + + chksum = H5_checksum_crc(large_buf, (size_t)BUF_LEN); + VERIFY(chksum, 0xfbd0f7c0, "H5_checksum_crc"); + + chksum = H5_checksum_lookup3(large_buf, (size_t)BUF_LEN, 0); + VERIFY(chksum, 0x1bd2ee7b, "H5_checksum_lookup3"); + + /* Buffer w/zero(s) for data */ + HDmemset(large_buf, 0, (size_t)BUF_LEN); + chksum = H5_checksum_fletcher32(large_buf, (size_t)BUF_LEN); + VERIFY(chksum, 0, "H5_checksum_fletcher32"); + + chksum = H5_checksum_crc(large_buf, (size_t)BUF_LEN); + VERIFY(chksum, 0xfac8b4c4, "H5_checksum_crc"); + + chksum = H5_checksum_lookup3(large_buf, (size_t)BUF_LEN, 0); + VERIFY(chksum, 0x930c7afc, "H5_checksum_lookup3"); + + /* Release memory for buffer */ + HDfree(large_buf); +} /* test_chksum_large() */ + +/**************************************************************** +** +** test_checksum(): Main checksum testing routine. +** +****************************************************************/ +void +test_checksum(void) +{ + /* Output message about test being performed */ + MESSAGE(5, ("Testing checksum algorithms\n")); + + /* Various checks for fletcher32 checksum algorithm */ + test_chksum_size_one(); /* Test buffer w/only 1 byte */ + test_chksum_size_two(); /* Test buffer w/only 2 bytes */ + test_chksum_size_three(); /* Test buffer w/only 3 bytes */ + test_chksum_size_four(); /* Test buffer w/only 4 bytes */ + test_chksum_large(); /* Test buffer w/larger # of bytes */ + +} /* test_checksum() */ + +/*------------------------------------------------------------------------- + * Function: cleanup_checksum + * + * Purpose: Cleanup temporary test files + * + * Return: none + * + * Programmer: Quincey Koziol + * August 21, 2006 + * + *------------------------------------------------------------------------- + */ +void +cleanup_checksum(void) +{ + /* no file to clean */ +} diff --git a/test/API/tconfig.c b/test/API/tconfig.c new file mode 100644 index 00000000000..fdab5ef5d1d --- /dev/null +++ b/test/API/tconfig.c @@ -0,0 +1,199 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*********************************************************** + * + * Test program: tconfig + * + * Test the definitions in the H5config.h as much as possible + * + *************************************************************/ + +#include "testhdf5.h" + +/* macros definitions */ +/* verify C int type: verify the size of signed and unsigned int type + * with the macro size. + */ +#define vrfy_cint_type(ctype, uctype, ctype_macro) \ + /* check signed type size */ \ + vrfy_macrosize(ctype, ctype_macro, #ctype_macro); \ + /* check unsigned type size */ \ + vrfy_macrosize(uctype, ctype_macro, #ctype_macro); + +/* verify C type sizes: verify the sizeof type with the macro size. */ +#define vrfy_ctype(type, macro) vrfy_macrosize(type, macro, #macro); + +/* verify if the sizeof(type) matches size defined in macro. */ +/* Needs this extra step so that we can print the macro name. */ +#define vrfy_macrosize(type, macro, macroname) \ + if (sizeof(type) != (macro)) \ + TestErrPrintf("Error: sizeof(%s) is %zu but %s is %d\n", #type, sizeof(type), macroname, \ + (int)(macro)); + +/* local routine prototypes */ +void test_config_ctypes(void); +void test_exit_definitions(void); + +/*------------------------------------------------------------------------- + * Function: test_configure + * + * Purpose: Main configure definitions testing routine + * + * Return: none (error is fed back via global variable num_errs) + * + * Programmer: Albert Cheng + * September 25, 2001 + * + *------------------------------------------------------------------------- + */ +void +test_configure(void) +{ + /* Output message about test being performed */ + MESSAGE(5, ("Testing configure definitions\n")); + test_config_ctypes(); + test_exit_definitions(); +} + +/*------------------------------------------------------------------------- + * Function: cleanup_configure + * + * Purpose: Cleanup temporary test files + * + * Return: none + * + * Programmer: Albert Cheng + * September 25, 2001 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +void +cleanup_configure(void) +{ + /* no file to clean */ +} + +/*------------------------------------------------------------------------- + * Function: test_config_ctypes + * + * Purpose: test C language data type sizes + * + * Return: none (error is fed back via global variable num_errs) + * + * Programmer: Albert Cheng + * September 25, 2001 + * + * Modifications: + * Albert Cheng, 2004/10/14 + * Verified both signed and unsigned int types. + * + *------------------------------------------------------------------------- + */ +void +test_config_ctypes(void) +{ + /* standard C89 basic types */ + /* char, signed char, unsigned char are three distinct types. */ + vrfy_ctype(char, H5_SIZEOF_CHAR); + vrfy_cint_type(signed char, unsigned char, H5_SIZEOF_CHAR); + vrfy_cint_type(int, unsigned int, H5_SIZEOF_INT); + vrfy_cint_type(short, unsigned short, H5_SIZEOF_SHORT); + vrfy_cint_type(long, unsigned long, H5_SIZEOF_LONG); + vrfy_ctype(float, H5_SIZEOF_FLOAT); + vrfy_ctype(double, H5_SIZEOF_DOUBLE); + vrfy_ctype(long double, H5_SIZEOF_LONG_DOUBLE); + + /* standard C99 basic types */ + vrfy_cint_type(long long, unsigned long long, H5_SIZEOF_LONG_LONG); + vrfy_cint_type(int8_t, uint8_t, H5_SIZEOF_INT8_T); + vrfy_cint_type(int16_t, uint16_t, H5_SIZEOF_INT16_T); + vrfy_cint_type(int32_t, uint32_t, H5_SIZEOF_INT32_T); + vrfy_cint_type(int64_t, uint64_t, H5_SIZEOF_INT64_T); + + /* Some vendors have different sizes for the signed and unsigned */ + /* fast8_t. Need to check them individually. */ +#if H5_SIZEOF_INT_FAST8_T > 0 + vrfy_ctype(int_fast8_t, H5_SIZEOF_INT_FAST8_T); +#endif + +#if H5_SIZEOF_UINT_FAST8_T > 0 + vrfy_ctype(uint_fast8_t, H5_SIZEOF_UINT_FAST8_T); +#endif + +#if H5_SIZEOF_INT_FAST16_T > 0 + vrfy_cint_type(int_fast16_t, uint_fast16_t, H5_SIZEOF_INT_FAST16_T); +#endif + +#if H5_SIZEOF_INT_FAST32_T > 0 + vrfy_cint_type(int_fast32_t, uint_fast32_t, H5_SIZEOF_INT_FAST32_T); +#endif + +#if H5_SIZEOF_INT_FAST64_T > 0 + vrfy_cint_type(int_fast64_t, uint_fast64_t, H5_SIZEOF_INT_FAST64_T); +#endif + +#if H5_SIZEOF_INT_LEAST8_T > 0 + vrfy_cint_type(int_least8_t, uint_least8_t, H5_SIZEOF_INT_LEAST8_T); +#endif + +#if H5_SIZEOF_INT_LEAST16_T > 0 + vrfy_cint_type(int_least16_t, uint_least16_t, H5_SIZEOF_INT_LEAST16_T); +#endif + +#if H5_SIZEOF_INT_LEAST32_T > 0 + vrfy_cint_type(int_least32_t, uint_least32_t, H5_SIZEOF_INT_LEAST32_T); +#endif + +#if H5_SIZEOF_INT_LEAST64_T > 0 + vrfy_cint_type(int_least64_t, uint_least64_t, H5_SIZEOF_INT_LEAST64_T); +#endif + +#if H5_SIZEOF_OFF_T > 0 + vrfy_ctype(off_t, H5_SIZEOF_OFF_T); +#endif + +#if H5_SIZEOF_SIZE_T > 0 + vrfy_ctype(size_t, H5_SIZEOF_SIZE_T); +#endif + +#if H5_SIZEOF_SSIZE_T > 0 + vrfy_ctype(ssize_t, H5_SIZEOF_SSIZE_T); +#endif +} + +/*------------------------------------------------------------------------- + * Function: test_exit_definitions + * + * Purpose: test the exit macros values + * + * Return: none (error is fed back via global variable num_errs) + * + * Programmer: Albert Cheng + * October 12, 2009 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +void +test_exit_definitions(void) +{ + /* Verify the EXIT_SUCCESS and EXIT_FAILURE are 0 and 1 respectively. */ + /* This should be true for POSIX compliant systems. */ + if (EXIT_SUCCESS != 0) + TestErrPrintf("Error: EXIT_SUCCESS is %d, should be %d\n", EXIT_SUCCESS, 0); + if (EXIT_FAILURE != 1) + TestErrPrintf("Error: EXIT_FAILURE is %d, should be %d\n", EXIT_FAILURE, 1); +} diff --git a/test/API/tcoords.c b/test/API/tcoords.c new file mode 100644 index 00000000000..9c66b40b065 --- /dev/null +++ b/test/API/tcoords.c @@ -0,0 +1,724 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*********************************************************** + * + * Test program: tcoords + * + * Test the element coordinates for dataspace selection. For + * chunked dataset, when the hyperslab selection of some + * dimensions is full, the library optimize it by "flattening" + * the fully selected dimensions. This program tests if the + * coordinates of selected elements are correctly calculated. + * + *************************************************************/ + +#include "testhdf5.h" + +#define FILENAME "coord.h5" + +#define SINGLE_END_DSET "single_end_dset" +#define MULTI_ENDS_SEL_HYPER_DSET "multiple_ends_dset" + +#define NAME_LEN 128 + +/* Data written to the dataset for single block test. Global variable + * for convenience. */ +int da_buffer[2][3][6][2]; + +/*********************************************************** +** +** test_singleEnd_selElements(): Test element selection of only +** one block. +** +*************************************************************/ +static void +test_singleEnd_selElements(hid_t file, hbool_t is_chunked) +{ + hid_t sid, plid, did, msid; + char dset_name[NAME_LEN]; /* Dataset name */ + size_t elmts_numb; + herr_t ret; /* Generic error return */ + int i, j, k; + hsize_t da_dims[4] = {2, 3, 6, 2}; + hsize_t da_chunksize[4] = {1, 3, 3, 2}; + + /* For testing the full selection in the fastest-growing end */ + int mem1_buffer[1][1][6][2]; + hsize_t mem1_dims[4] = {1, 1, 6, 2}; + hsize_t da_elmts1[12][4] = {{0, 0, 0, 0}, {0, 0, 0, 1}, {0, 0, 1, 0}, {0, 0, 1, 1}, + {0, 0, 2, 0}, {0, 0, 2, 1}, {0, 0, 3, 0}, {0, 0, 3, 1}, + {0, 0, 4, 0}, {0, 0, 4, 1}, {0, 0, 5, 0}, {0, 0, 5, 1}}; + + /* For testing the full selection in the slowest-growing end */ + int mem2_buffer[2][3][1][1]; + hsize_t mem2_dims[4] = {2, 3, 1, 1}; + hsize_t da_elmts2[6][4] = {{0, 0, 0, 0}, {0, 1, 0, 0}, {0, 2, 0, 0}, + {1, 0, 0, 0}, {1, 1, 0, 0}, {1, 2, 0, 0}}; + + /* For testing the full selection in the middle dimensions */ + int mem3_buffer[1][3][6][1]; + hsize_t mem3_dims[4] = {1, 3, 6, 1}; + hsize_t da_elmts3[18][4] = {{0, 0, 0, 0}, {0, 0, 1, 0}, {0, 0, 2, 0}, {0, 0, 3, 0}, {0, 0, 4, 0}, + {0, 0, 5, 0}, {0, 1, 0, 0}, {0, 1, 1, 0}, {0, 1, 2, 0}, {0, 1, 3, 0}, + {0, 1, 4, 0}, {0, 1, 5, 0}, {0, 2, 0, 0}, {0, 2, 1, 0}, {0, 2, 2, 0}, + {0, 2, 3, 0}, {0, 2, 4, 0}, {0, 2, 5, 0}}; + + /* Create and write the dataset */ + sid = H5Screate_simple(4, da_dims, da_dims); + CHECK(sid, FAIL, "H5Screate_simple"); + + plid = H5Pcreate(H5P_DATASET_CREATE); + CHECK(plid, FAIL, "H5Pcreate"); + + if (is_chunked) { + ret = H5Pset_chunk(plid, 4, da_chunksize); + CHECK(ret, FAIL, "H5Pset_chunk"); + } + + /* Construct dataset's name */ + HDmemset(dset_name, 0, (size_t)NAME_LEN); + HDstrcat(dset_name, SINGLE_END_DSET); + if (is_chunked) + HDstrcat(dset_name, "_chunked"); + + did = H5Dcreate2(file, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, plid, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + /* Initialize the data to be written to file */ + for (i = 0; i < 2; i++) { + for (j = 0; j < 3; j++) { + for (k = 0; k < 6; k++) { + da_buffer[i][j][k][0] = i * 100 + j * 10 + k; + da_buffer[i][j][k][1] = i * 100 + j * 10 + k + 1; + } + } + } + + ret = H5Dwrite(did, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, da_buffer); + CHECK(ret, FAIL, "H5Dwrite"); + + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* ****** Case 1: ****** + * Testing the full selection in the fastest-growing end */ + did = H5Dopen2(file, dset_name, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen2"); + + /* Select the elements in the dataset */ + elmts_numb = 12; + + ret = H5Sselect_elements(sid, H5S_SELECT_SET, elmts_numb, (const hsize_t *)da_elmts1); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Dataspace for memory buffer */ + msid = H5Screate_simple(4, mem1_dims, mem1_dims); + CHECK(msid, FAIL, "H5Screate_simple"); + + ret = H5Sselect_all(msid); + CHECK(ret, FAIL, "H5Sselect_all"); + + ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem1_buffer); + CHECK(ret, FAIL, "H5Dread"); + + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Sclose(msid); + CHECK(ret, FAIL, "H5Sclose"); + + for (i = 0; i < 6; i++) + for (j = 0; j < 2; j++) + if (da_buffer[0][0][i][j] != mem1_buffer[0][0][i][j]) { + TestErrPrintf("%u: Read different values than written at index 0,0,%d,%d\n", __LINE__, i, j); + } + + /* ****** Case 2: ****** + * Testing the full selection in the slowest-growing end */ + did = H5Dopen2(file, dset_name, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen2"); + + /* Select the elements in the dataset */ + elmts_numb = 6; + + ret = H5Sselect_elements(sid, H5S_SELECT_SET, elmts_numb, (const hsize_t *)da_elmts2); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Dataspace for memory buffer */ + msid = H5Screate_simple(4, mem2_dims, mem2_dims); + CHECK(msid, FAIL, "H5Screate_simple"); + + ret = H5Sselect_all(msid); + CHECK(ret, FAIL, "H5Sselect_all"); + + ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem2_buffer); + CHECK(ret, FAIL, "H5Dread"); + + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Sclose(msid); + CHECK(ret, FAIL, "H5Sclose"); + + for (i = 0; i < 2; i++) + for (j = 0; j < 3; j++) + if (da_buffer[i][j][0][0] != mem2_buffer[i][j][0][0]) { + TestErrPrintf("%u: Read different values than written at index %d,%d,0,0, da_buffer = %d, " + "mem2_buffer = %d\n", + __LINE__, i, j, da_buffer[i][j][0][0], mem2_buffer[i][j][0][0]); + } + + /* ****** Case 3: ****** + * Testing the full selection in the middle dimensions */ + did = H5Dopen2(file, dset_name, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen2"); + + /* Select the elements in the dataset */ + elmts_numb = 18; + + ret = H5Sselect_elements(sid, H5S_SELECT_SET, elmts_numb, (const hsize_t *)da_elmts3); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Dataspace for memory buffer */ + msid = H5Screate_simple(4, mem3_dims, mem3_dims); + CHECK(msid, FAIL, "H5Screate_simple"); + + ret = H5Sselect_all(msid); + CHECK(ret, FAIL, "H5Sselect_all"); + + ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem3_buffer); + CHECK(ret, FAIL, "H5Dread"); + + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Sclose(msid); + CHECK(ret, FAIL, "H5Sclose"); + + for (i = 0; i < 3; i++) + for (j = 0; j < 6; j++) + if (da_buffer[0][i][j][0] != mem3_buffer[0][i][j][0]) { + TestErrPrintf("%u: Read different values than written at index 0,%d,%d,0\n", __LINE__, i, j); + } + + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Pclose(plid); + CHECK(ret, FAIL, "H5Pclose"); +} + +/*********************************************************** +** +** test_singleEnd_selHyperslab(): Test full hyperslab selection +** of only one block. +** +*************************************************************/ +static void +test_singleEnd_selHyperslab(hid_t file, hbool_t is_chunked) +{ + hid_t sid, did, msid; + char dset_name[NAME_LEN]; /* Dataset name */ + herr_t ret; /* Generic error return */ + int i, j; + hsize_t da_dims[4] = {2, 3, 6, 2}; + + /* For testing the full selection in the fastest-growing end */ + int mem1_buffer[1][1][6][2]; + hsize_t mem1_dims[4] = {1, 1, 6, 2}; + hsize_t mem1_start[4] = {0, 0, 0, 0}; + hsize_t mem1_count[4] = {1, 1, 1, 1}; + hsize_t mem1_stride[4] = {1, 1, 1, 1}; + hsize_t mem1_block[4] = {1, 1, 6, 2}; + + /* For testing the full selection in the slowest-growing end */ + int mem2_buffer[2][3][1][1]; + hsize_t mem2_dims[4] = {2, 3, 1, 1}; + hsize_t mem2_start[4] = {0, 0, 0, 0}; + hsize_t mem2_count[4] = {1, 1, 1, 1}; + hsize_t mem2_stride[4] = {1, 1, 1, 1}; + hsize_t mem2_block[4] = {2, 3, 1, 1}; + + /* For testing the full selection in the middle dimensions */ + int mem3_buffer[1][3][6][1]; + hsize_t mem3_dims[4] = {1, 3, 6, 1}; + hsize_t mem3_start[4] = {0, 0, 0, 0}; + hsize_t mem3_count[4] = {1, 1, 1, 1}; + hsize_t mem3_stride[4] = {1, 1, 1, 1}; + hsize_t mem3_block[4] = {1, 3, 6, 1}; + + /* Construct dataset's name */ + HDmemset(dset_name, 0, NAME_LEN); + HDstrcat(dset_name, SINGLE_END_DSET); + if (is_chunked) + HDstrcat(dset_name, "_chunked"); + + /* Dataspace for the dataset in file */ + sid = H5Screate_simple(4, da_dims, da_dims); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* ****** Case 1: ****** + * Testing the full selection in the fastest-growing end */ + did = H5Dopen2(file, dset_name, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen2"); + + /* Select the elements in the dataset */ + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, mem1_start, mem1_stride, mem1_count, mem1_block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Dataspace for memory buffer */ + msid = H5Screate_simple(4, mem1_dims, mem1_dims); + CHECK(msid, FAIL, "H5Screate_simple"); + + ret = H5Sselect_all(msid); + CHECK(ret, FAIL, "H5Sselect_all"); + + ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem1_buffer); + CHECK(ret, FAIL, "H5Dread"); + + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Sclose(msid); + CHECK(ret, FAIL, "H5Sclose"); + + for (i = 0; i < 6; i++) + for (j = 0; j < 2; j++) + if (da_buffer[0][0][i][j] != mem1_buffer[0][0][i][j]) { + TestErrPrintf("%u: Read different values than written at index 0,0,%d,%d\n", __LINE__, i, j); + } + + /* ****** Case 2: ****** + * Testing the full selection in the slowest-growing end */ + did = H5Dopen2(file, dset_name, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen2"); + + /* Select the elements in the dataset */ + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, mem2_start, mem2_stride, mem2_count, mem2_block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Dataspace for memory buffer */ + msid = H5Screate_simple(4, mem2_dims, mem2_dims); + CHECK(msid, FAIL, "H5Screate_simple"); + + ret = H5Sselect_all(msid); + CHECK(ret, FAIL, "H5Sselect_all"); + + ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem2_buffer); + CHECK(ret, FAIL, "H5Dread"); + + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Sclose(msid); + CHECK(ret, FAIL, "H5Sclose"); + + for (i = 0; i < 2; i++) + for (j = 0; j < 3; j++) + if (da_buffer[i][j][0][0] != mem2_buffer[i][j][0][0]) { + TestErrPrintf("%u: Read different values than written at index %d,%d,0,0\n", __LINE__, i, j); + } + + /* ****** Case 3: ****** + * Testing the full selection in the middle dimensions */ + did = H5Dopen2(file, dset_name, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen2"); + + /* Select the elements in the dataset */ + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, mem3_start, mem3_stride, mem3_count, mem3_block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Dataspace for memory buffer */ + msid = H5Screate_simple(4, mem3_dims, mem3_dims); + CHECK(msid, FAIL, "H5Screate_simple"); + + ret = H5Sselect_all(msid); + CHECK(ret, FAIL, "H5Sselect_all"); + + ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem3_buffer); + CHECK(ret, FAIL, "H5Dread"); + + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Sclose(msid); + CHECK(ret, FAIL, "H5Sclose"); + + for (i = 0; i < 3; i++) + for (j = 0; j < 6; j++) + if (da_buffer[0][i][j][0] != mem3_buffer[0][i][j][0]) { + TestErrPrintf("%u: Read different values than written at index 0,%d,%d,0\n", __LINE__, i, j); + } + + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); +} + +/*********************************************************** +** +** test_multiple_end(): Test full hyperslab selection of +** multiple blocks. +** +*************************************************************/ +static void +test_multiple_ends(hid_t file, hbool_t is_chunked) +{ + hid_t sid, plid, did, msid; + char dset_name[NAME_LEN]; /* Dataset name */ + herr_t ret; /* Generic error return */ + int i, j, k, l, m, n, p; + hsize_t da_dims[8] = {4, 5, 3, 4, 2, 3, 6, 2}; + hsize_t da_chunksize[8] = {1, 5, 3, 2, 2, 3, 3, 2}; + struct { + int arr[4][5][3][4][2][3][6][2]; + } *data_buf = NULL; + + /* For testing the full selections in the fastest-growing end and in the middle dimensions */ + struct { + int arr[1][1][1][4][2][1][6][2]; + } *mem1_buffer = NULL; + hsize_t mem1_dims[8] = {1, 1, 1, 4, 2, 1, 6, 2}; + hsize_t mem1_start[8] = {0, 0, 0, 0, 0, 0, 0, 0}; + hsize_t mem1_count[8] = {1, 1, 1, 1, 1, 1, 1, 1}; + hsize_t mem1_stride[8] = {1, 1, 1, 1, 1, 1, 1, 1}; + hsize_t mem1_block[8] = {1, 1, 1, 4, 2, 1, 6, 2}; + + /* For testing the full selections in the slowest-growing end and in the middle dimensions */ + struct { + int arr[4][5][1][4][2][1][1][1]; + } *mem2_buffer = NULL; + hsize_t mem2_dims[8] = {4, 5, 1, 4, 2, 1, 1, 1}; + hsize_t mem2_start[8] = {0, 0, 0, 0, 0, 0, 0, 0}; + hsize_t mem2_count[8] = {1, 1, 1, 1, 1, 1, 1, 1}; + hsize_t mem2_stride[8] = {1, 1, 1, 1, 1, 1, 1, 1}; + hsize_t mem2_block[8] = {4, 5, 1, 4, 2, 1, 1, 1}; + + /* For testing two unadjacent full selections in the middle dimensions */ + struct { + int arr[1][5][3][1][1][3][6][1]; + } *mem3_buffer = NULL; + hsize_t mem3_dims[8] = {1, 5, 3, 1, 1, 3, 6, 1}; + hsize_t mem3_start[8] = {0, 0, 0, 0, 0, 0, 0, 0}; + hsize_t mem3_count[8] = {1, 1, 1, 1, 1, 1, 1, 1}; + hsize_t mem3_stride[8] = {1, 1, 1, 1, 1, 1, 1, 1}; + hsize_t mem3_block[8] = {1, 5, 3, 1, 1, 3, 6, 1}; + + /* For testing the full selections in the fastest-growing end and the slowest-growing end */ + struct { + int arr[4][5][1][1][1][1][6][2]; + } *mem4_buffer = NULL; + hsize_t mem4_dims[8] = {4, 5, 1, 1, 1, 1, 6, 2}; + hsize_t mem4_start[8] = {0, 0, 0, 0, 0, 0, 0, 0}; + hsize_t mem4_count[8] = {1, 1, 1, 1, 1, 1, 1, 1}; + hsize_t mem4_stride[8] = {1, 1, 1, 1, 1, 1, 1, 1}; + hsize_t mem4_block[8] = {4, 5, 1, 1, 1, 1, 6, 2}; + + /* For testing the full selections in the fastest-growing end and slowest-growing end, + * also in the middle dimensions */ + struct { + int arr[4][5][1][4][2][1][6][2]; + } *mem5_buffer = NULL; + hsize_t mem5_dims[8] = {4, 5, 1, 4, 2, 1, 6, 2}; + hsize_t mem5_start[8] = {0, 0, 0, 0, 0, 0, 0, 0}; + hsize_t mem5_count[8] = {1, 1, 1, 1, 1, 1, 1, 1}; + hsize_t mem5_stride[8] = {1, 1, 1, 1, 1, 1, 1, 1}; + hsize_t mem5_block[8] = {4, 5, 1, 4, 2, 1, 6, 2}; + + /* Initialize dynamic arrays */ + data_buf = HDcalloc(1, sizeof(*data_buf)); + CHECK_PTR(data_buf, "HDcalloc"); + mem1_buffer = HDcalloc(1, sizeof(*mem1_buffer)); + CHECK_PTR(data_buf, "HDcalloc"); + mem2_buffer = HDcalloc(1, sizeof(*mem2_buffer)); + CHECK_PTR(data_buf, "HDcalloc"); + mem3_buffer = HDcalloc(1, sizeof(*mem3_buffer)); + CHECK_PTR(data_buf, "HDcalloc"); + mem4_buffer = HDcalloc(1, sizeof(*mem4_buffer)); + CHECK_PTR(data_buf, "HDcalloc"); + mem5_buffer = HDcalloc(1, sizeof(*mem5_buffer)); + CHECK_PTR(data_buf, "HDcalloc"); + + /* Create and write the dataset */ + sid = H5Screate_simple(8, da_dims, da_dims); + CHECK(sid, FAIL, "H5Screate_simple"); + + plid = H5Pcreate(H5P_DATASET_CREATE); + CHECK(plid, FAIL, "H5Pcreate"); + + if (is_chunked) { + ret = H5Pset_chunk(plid, 8, da_chunksize); + CHECK(ret, FAIL, "H5Pset_chunk"); + } + + /* Construct dataset's name */ + HDmemset(dset_name, 0, NAME_LEN); + HDstrcat(dset_name, MULTI_ENDS_SEL_HYPER_DSET); + if (is_chunked) + HDstrcat(dset_name, "_chunked"); + + did = H5Dcreate2(file, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, plid, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + for (i = 0; i < 4; i++) + for (j = 0; j < 5; j++) + for (k = 0; k < 3; k++) + for (l = 0; l < 4; l++) + for (m = 0; m < 2; m++) + for (n = 0; n < 3; n++) + for (p = 0; p < 6; p++) { + data_buf->arr[i][j][k][l][m][n][p][0] = + i * 1000000 + j * 100000 + k * 10000 + l * 1000 + m * 100 + n * 10 + p; + data_buf->arr[i][j][k][l][m][n][p][1] = i * 1000000 + j * 100000 + k * 10000 + + l * 1000 + m * 100 + n * 10 + p + 1; + } + + ret = H5Dwrite(did, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, data_buf); + CHECK(ret, FAIL, "H5Dwrite"); + + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* ****** Case 1: ****** + * Testing the full selections in the fastest-growing end and in the middle dimensions*/ + did = H5Dopen2(file, dset_name, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen2"); + + /* Select the elements in the dataset */ + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, mem1_start, mem1_stride, mem1_count, mem1_block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + msid = H5Screate_simple(8, mem1_dims, mem1_dims); + CHECK(msid, FAIL, "H5Screate_simple"); + + ret = H5Sselect_all(msid); + CHECK(ret, FAIL, "H5Sselect_all"); + + ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem1_buffer); + CHECK(ret, FAIL, "H5Dread"); + + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Sclose(msid); + CHECK(ret, FAIL, "H5Sclose"); + + for (i = 0; i < 4; i++) + for (j = 0; j < 2; j++) + for (k = 0; k < 6; k++) + for (l = 0; l < 2; l++) + if (data_buf->arr[0][0][0][i][j][0][k][l] != mem1_buffer->arr[0][0][0][i][j][0][k][l]) { + TestErrPrintf("%u: Read different values than written at index 0,0,0,%d,%d,0,%d,%d\n", + __LINE__, i, j, k, l); + } + + /* ****** Case 2: ****** + * Testing the full selections in the slowest-growing end and in the middle dimensions*/ + did = H5Dopen2(file, dset_name, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen2"); + + /* Select the elements in the dataset */ + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, mem2_start, mem2_stride, mem2_count, mem2_block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + msid = H5Screate_simple(8, mem2_dims, mem2_dims); + CHECK(msid, FAIL, "H5Screate_simple"); + + ret = H5Sselect_all(msid); + CHECK(ret, FAIL, "H5Sselect_all"); + + ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem2_buffer); + CHECK(ret, FAIL, "H5Dread"); + + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Sclose(msid); + CHECK(ret, FAIL, "H5Sclose"); + + for (i = 0; i < 4; i++) + for (j = 0; j < 5; j++) + for (k = 0; k < 4; k++) + for (l = 0; l < 2; l++) + if (data_buf->arr[i][j][0][k][l][0][0][0] != mem2_buffer->arr[i][j][0][k][l][0][0][0]) { + TestErrPrintf("%u: Read different values than written at index %d,%d,0,%d,%d,0,0,0\n", + __LINE__, i, j, k, l); + } + + /* ****** Case 3: ****** + * Testing two unadjacent full selections in the middle dimensions */ + did = H5Dopen2(file, dset_name, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen2"); + + /* Select the elements in the dataset */ + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, mem3_start, mem3_stride, mem3_count, mem3_block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + msid = H5Screate_simple(8, mem3_dims, mem3_dims); + CHECK(msid, FAIL, "H5Screate_simple"); + + ret = H5Sselect_all(msid); + CHECK(ret, FAIL, "H5Sselect_all"); + + ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem3_buffer); + CHECK(ret, FAIL, "H5Dread"); + + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Sclose(msid); + CHECK(ret, FAIL, "H5Sclose"); + + for (i = 0; i < 5; i++) + for (j = 0; j < 3; j++) + for (k = 0; k < 3; k++) + for (l = 0; l < 6; l++) + if (data_buf->arr[0][i][j][0][0][k][l][0] != mem3_buffer->arr[0][i][j][0][0][k][l][0]) { + TestErrPrintf("%u: Read different values than written at index 0,%d,%d,0,0,%d,%d,0\n", + __LINE__, i, j, k, l); + } + + /* ****** Case 4: ****** + * Testing the full selections in the fastest-growing end and the slowest-growing end */ + did = H5Dopen2(file, dset_name, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen2"); + + /* Select the elements in the dataset */ + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, mem4_start, mem4_stride, mem4_count, mem4_block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + msid = H5Screate_simple(8, mem4_dims, mem4_dims); + CHECK(msid, FAIL, "H5Screate_simple"); + + ret = H5Sselect_all(msid); + CHECK(ret, FAIL, "H5Sselect_all"); + + ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem4_buffer); + CHECK(ret, FAIL, "H5Dread"); + + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Sclose(msid); + CHECK(ret, FAIL, "H5Sclose"); + + for (i = 0; i < 4; i++) + for (j = 0; j < 5; j++) + for (k = 0; k < 6; k++) + for (l = 0; l < 2; l++) + if (data_buf->arr[i][j][0][0][0][0][k][l] != mem4_buffer->arr[i][j][0][0][0][0][k][l]) { + TestErrPrintf("%u: Read different values than written at index %d,%d,0,0,0,0,%d,%d\n", + __LINE__, i, j, k, l); + } + + /* ****** Case 5: ****** + * Testing the full selections in the fastest-growing end and the slowest-growing end, + * and also in the middle dimensions */ + did = H5Dopen2(file, dset_name, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen2"); + + /* Select the elements in the dataset */ + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, mem5_start, mem5_stride, mem5_count, mem5_block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + msid = H5Screate_simple(8, mem5_dims, mem5_dims); + CHECK(msid, FAIL, "H5Screate_simple"); + + ret = H5Sselect_all(msid); + CHECK(ret, FAIL, "H5Sselect_all"); + + ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem5_buffer); + CHECK(ret, FAIL, "H5Dread"); + + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Sclose(msid); + CHECK(ret, FAIL, "H5Sclose"); + + for (i = 0; i < 4; i++) + for (j = 0; j < 5; j++) + for (k = 0; k < 4; k++) + for (l = 0; l < 2; l++) + for (m = 0; m < 6; m++) + for (n = 0; n < 2; n++) + if (data_buf->arr[i][j][0][k][l][0][m][n] != + mem5_buffer->arr[i][j][0][k][l][0][m][n]) { + TestErrPrintf( + "%u: Read different values than written at index %d,%d,0,%d,%d,0,%d,%d\n", + __LINE__, i, j, k, l, m, n); + } + + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Pclose(plid); + CHECK(ret, FAIL, "H5Pclose"); + + HDfree(data_buf); + HDfree(mem1_buffer); + HDfree(mem2_buffer); + HDfree(mem3_buffer); + HDfree(mem4_buffer); + HDfree(mem5_buffer); +} + +/**************************************************************** +** +** test_coords(): Main testing routine. +** +****************************************************************/ +void +test_coords(void) +{ + hid_t fid; + hbool_t is_chunk[2] = {TRUE, FALSE}; + int i; + herr_t ret; /* Generic error return */ + + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + for (i = 0; i < 2; i++) { + /* Test H5Sselect_elements with selection of one block of data */ + test_singleEnd_selElements(fid, is_chunk[i]); + + /* Test H5Sselect_hyperslab with selection of one block of data */ + test_singleEnd_selHyperslab(fid, is_chunk[i]); + + /* Test H5Sselect_hyperslab with selection of multiple blocks of data */ + test_multiple_ends(fid, is_chunk[i]); + } + + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +} + +/*------------------------------------------------------------------------- + * Function: cleanup_coords + * + * Purpose: Cleanup temporary test files + * + * Return: none + * + * Programmer: Raymond Lu + * 20 Dec. 2007 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +void +cleanup_coords(void) +{ + H5Fdelete(FILENAME, H5P_DEFAULT); +} diff --git a/test/API/testhdf5.c b/test/API/testhdf5.c new file mode 100644 index 00000000000..f29b6030eba --- /dev/null +++ b/test/API/testhdf5.c @@ -0,0 +1,729 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + FILE + testhdf5.c - HDF5 testing framework main file. + + REMARKS + General test wrapper for HDF5 base library test programs + + DESIGN + Each test function should be implemented as function having no + parameters and returning void (i.e. no return value). They should be put + into the list of AddTest() calls in main() below. Functions which depend + on other functionality should be placed below the AddTest() call for the + base functionality testing. + Each test module should include testhdf5.h and define a unique set of + names for test files they create. + + BUGS/LIMITATIONS + + + */ + +/* ANY new test needs to have a prototype in testhdf5.h */ +#include "testhdf5.h" + +int nerrors = 0; + +char *paraprefix = NULL; /* for command line option para-prefix */ + +/* Length of multi-file VFD filename buffers */ +#define H5TEST_MULTI_FILENAME_LEN 1024 + +/* + * This routine is designed to provide equivalent functionality to 'printf' + * and allow easy replacement for environments which don't have stdin/stdout + * available. (i.e. Windows & the Mac) + */ +H5_ATTR_FORMAT(printf, 1, 2) +int +print_func(const char *format, ...) +{ + va_list arglist; + int ret_value; + + HDva_start(arglist, format); + ret_value = HDvprintf(format, arglist); + HDva_end(arglist); + return ret_value; +} + +/* + * This routine is designed to provide equivalent functionality to 'printf' + * and also increment the error count for the testing framework. + */ +int +TestErrPrintf(const char *format, ...) +{ + va_list arglist; + int ret_value; + + /* Increment the error count */ + nerrors++; + + /* Print the requested information */ + HDva_start(arglist, format); + ret_value = HDvprintf(format, arglist); + HDva_end(arglist); + + /* Return the length of the string produced (like printf() does) */ + return ret_value; +} + +#ifdef H5_HAVE_PARALLEL +/*------------------------------------------------------------------------- + * Function: getenv_all + * + * Purpose: Used to get the environment that the root MPI task has. + * name specifies which environment variable to look for + * val is the string to which the value of that environment + * variable will be copied. + * + * NOTE: The pointer returned by this function is only + * valid until the next call to getenv_all and the data + * stored there must be copied somewhere else before any + * further calls to getenv_all take place. + * + * Return: pointer to a string containing the value of the environment variable + * NULL if the variable doesn't exist in task 'root's environment. + * + * Programmer: Leon Arber + * 4/4/05 + * + * Modifications: + * Use original getenv if MPI is not initialized. This happens + * one uses the PHDF5 library to build a serial nature code. + * Albert 2006/04/07 + * + *------------------------------------------------------------------------- + */ +char * +getenv_all(MPI_Comm comm, int root, const char *name) +{ + int mpi_size, mpi_rank, mpi_initialized, mpi_finalized; + int len; + static char *env = NULL; + + HDassert(name); + + MPI_Initialized(&mpi_initialized); + MPI_Finalized(&mpi_finalized); + + if (mpi_initialized && !mpi_finalized) { + MPI_Comm_rank(comm, &mpi_rank); + MPI_Comm_size(comm, &mpi_size); + HDassert(root < mpi_size); + + /* The root task does the getenv call + * and sends the result to the other tasks */ + if (mpi_rank == root) { + env = HDgetenv(name); + if (env) { + len = (int)HDstrlen(env); + MPI_Bcast(&len, 1, MPI_INT, root, comm); + MPI_Bcast(env, len, MPI_CHAR, root, comm); + } + else { + /* len -1 indicates that the variable was not in the environment */ + len = -1; + MPI_Bcast(&len, 1, MPI_INT, root, comm); + } + } + else { + MPI_Bcast(&len, 1, MPI_INT, root, comm); + if (len >= 0) { + if (env == NULL) + env = (char *)HDmalloc((size_t)len + 1); + else if (HDstrlen(env) < (size_t)len) + env = (char *)HDrealloc(env, (size_t)len + 1); + + MPI_Bcast(env, len, MPI_CHAR, root, comm); + env[len] = '\0'; + } + else { + if (env) + HDfree(env); + env = NULL; + } + } +#ifndef NDEBUG + MPI_Barrier(comm); +#endif + } + else { + /* use original getenv */ + if (env) + HDfree(env); + env = HDgetenv(name); + } /* end if */ + + return env; +} + +#endif + +/*------------------------------------------------------------------------- + * Function: h5_fileaccess + * + * Purpose: Returns a file access template which is the default template + * but with a file driver, VOL connector, or libver bound set + * according to a constant or environment variable + * + * Return: Success: A file access property list + * Failure: H5I_INVALID_HID + * + * Programmer: Robb Matzke + * Thursday, November 19, 1998 + * + *------------------------------------------------------------------------- + */ +hid_t +h5_fileaccess(void) +{ + hid_t fapl_id = H5I_INVALID_HID; + + if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) + goto error; + + /* Finally, check for libver bounds */ + if (h5_get_libver_fapl(fapl_id) < 0) + goto error; + + return fapl_id; + +error: + if (fapl_id != H5I_INVALID_HID) + H5Pclose(fapl_id); + return H5I_INVALID_HID; +} /* end h5_fileaccess() */ + +/*------------------------------------------------------------------------- + * Function: h5_get_libver_fapl + * + * Purpose: Sets the library version bounds for a FAPL according to the + * value in the constant or environment variable "HDF5_LIBVER_BOUNDS". + * + * Return: Success: 0 + * Failure: -1 + * + * Programmer: Quincey Koziol + * November 2018 + * + *------------------------------------------------------------------------- + */ +herr_t +h5_get_libver_fapl(hid_t fapl) +{ + const char *env = NULL; /* HDF5_DRIVER environment variable */ + const char *tok = NULL; /* strtok pointer */ + char *lasts = NULL; /* Context pointer for strtok_r() call */ + char buf[1024]; /* buffer for tokenizing HDF5_DRIVER */ + + /* Get the environment variable, if it exists */ + env = HDgetenv("HDF5_LIBVER_BOUNDS"); +#ifdef HDF5_LIBVER_BOUNDS + /* Use the environment variable, then the compile-time constant */ + if (!env) + env = HDF5_LIBVER_BOUNDS; +#endif + + /* If the environment variable was not set, just return + * without modifying the FAPL. + */ + if (!env || !*env) + goto done; + + /* Get the first 'word' of the environment variable. + * If it's nothing (environment variable was whitespace) + * just return the default fapl. + */ + HDstrncpy(buf, env, sizeof(buf)); + buf[sizeof(buf) - 1] = '\0'; + if (NULL == (tok = HDstrtok_r(buf, " \t\n\r", &lasts))) + goto done; + + if (!HDstrcmp(tok, "latest")) { + /* use the latest format */ + if (H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + goto error; + } /* end if */ + else { + /* Unknown setting */ + goto error; + } /* end else */ + +done: + return 0; + +error: + return -1; +} /* end h5_get_libver_fapl() */ + +#ifndef HDF5_PARAPREFIX +#define HDF5_PARAPREFIX "" +#endif +static char * +h5_fixname_real(const char *base_name, hid_t fapl, const char *_suffix, char *fullname, size_t size, + hbool_t nest_printf, hbool_t subst_for_superblock) +{ + const char *prefix = NULL; + const char *driver_env_var = NULL; /* HDF5_DRIVER environment variable */ + char *ptr, last = '\0'; + const char *suffix = _suffix; + size_t i, j; + hid_t driver = -1; + int isppdriver = 0; /* if the driver is MPI parallel */ + + if (!base_name || !fullname || size < 1) + return NULL; + + HDmemset(fullname, 0, size); + + /* Determine if driver is set by environment variable. If it is, + * only generate a suffix if fixing the filename for the superblock + * file. */ + driver_env_var = HDgetenv(HDF5_DRIVER); + if (driver_env_var && (H5P_DEFAULT == fapl) && subst_for_superblock) + fapl = H5P_FILE_ACCESS_DEFAULT; + + /* figure out the suffix */ + if (H5P_DEFAULT != fapl) { + if ((driver = H5Pget_driver(fapl)) < 0) + return NULL; + + if (suffix) { + if (H5FD_FAMILY == driver) { + if (subst_for_superblock) + suffix = "-000000.h5"; + else + suffix = nest_printf ? "-%%06d.h5" : "-%06d.h5"; + } + else if (H5FD_MULTI == driver) { + + /* Check the HDF5_DRIVER environment variable in case + * we are using the split driver since both of those + * use the multi VFD under the hood. + */ + if (driver_env_var && !HDstrcmp(driver_env_var, "split")) { + /* split VFD */ + if (subst_for_superblock) + suffix = ".h5.meta"; + } + else { + /* multi VFD */ + if (subst_for_superblock) + suffix = "-s.h5"; + else + suffix = NULL; + } + } + } + } + + /* Must first check fapl is not H5P_DEFAULT (-1) because H5FD_XXX + * could be of value -1 if it is not defined. + */ + isppdriver = ((H5P_DEFAULT != fapl) || driver_env_var) && (H5FD_MPIO == driver); +#if 0 + /* Check HDF5_NOCLEANUP environment setting. + * (The #ifdef is needed to prevent compile failure in case MPI is not + * configured.) + */ + if (isppdriver) { +#ifdef H5_HAVE_PARALLEL + if (getenv_all(MPI_COMM_WORLD, 0, HDF5_NOCLEANUP)) + SetTestNoCleanup(); +#endif /* H5_HAVE_PARALLEL */ + } + else { + if (HDgetenv(HDF5_NOCLEANUP)) + SetTestNoCleanup(); + } +#endif + /* Check what prefix to use for test files. Process HDF5_PARAPREFIX and + * HDF5_PREFIX. + * Use different ones depending on parallel or serial driver used. + * (The #ifdef is needed to prevent compile failure in case MPI is not + * configured.) + */ + if (isppdriver) { +#ifdef H5_HAVE_PARALLEL + /* + * For parallel: + * First use command line option, then the environment + * variable, then try the constant + */ + static int explained = 0; + + prefix = (paraprefix ? paraprefix : getenv_all(MPI_COMM_WORLD, 0, "HDF5_PARAPREFIX")); + + if (!prefix && !explained) { + /* print hint by process 0 once. */ + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + if (mpi_rank == 0) + HDprintf("*** Hint ***\n" + "You can use environment variable HDF5_PARAPREFIX to " + "run parallel test files in a\n" + "different directory or to add file type prefix. e.g.,\n" + " HDF5_PARAPREFIX=pfs:/PFS/user/me\n" + " export HDF5_PARAPREFIX\n" + "*** End of Hint ***\n"); + + explained = TRUE; +#ifdef HDF5_PARAPREFIX + prefix = HDF5_PARAPREFIX; +#endif /* HDF5_PARAPREFIX */ + } +#endif /* H5_HAVE_PARALLEL */ + } + else { + /* + * For serial: + * First use the environment variable, then try the constant + */ + prefix = HDgetenv("HDF5_PREFIX"); + +#ifdef HDF5_PREFIX + if (!prefix) + prefix = HDF5_PREFIX; +#endif /* HDF5_PREFIX */ + } + + /* Prepend the prefix value to the base name */ + if (prefix && *prefix) { + if (isppdriver) { + /* This is a parallel system */ + char *subdir; + + if (!HDstrcmp(prefix, HDF5_PARAPREFIX)) { + /* + * If the prefix specifies the HDF5_PARAPREFIX directory, then + * default to using the "/tmp/$USER" or "/tmp/$LOGIN" + * directory instead. + */ + char *user, *login; + + user = HDgetenv("USER"); + login = HDgetenv("LOGIN"); + subdir = (user ? user : login); + + if (subdir) { + for (i = 0; i < size && prefix[i]; i++) + fullname[i] = prefix[i]; + + fullname[i++] = '/'; + + for (j = 0; i < size && subdir[j]; ++i, ++j) + fullname[i] = subdir[j]; + } + } + + if (!fullname[0]) { + /* We didn't append the prefix yet */ + HDstrncpy(fullname, prefix, size); + fullname[size - 1] = '\0'; + } + + if (HDstrlen(fullname) + HDstrlen(base_name) + 1 < size) { + /* + * Append the base_name with a slash first. Multiple + * slashes are handled below. + */ + h5_stat_t buf; + + if (HDstat(fullname, &buf) < 0) + /* The directory doesn't exist just yet */ + if (HDmkdir(fullname, (mode_t)0755) < 0 && errno != EEXIST) + /* + * We couldn't make the "/tmp/${USER,LOGIN}" + * subdirectory. Default to PREFIX's original + * prefix value. + */ + HDstrcpy(fullname, prefix); + + HDstrcat(fullname, "/"); + HDstrcat(fullname, base_name); + } + else { + /* Buffer is too small */ + return NULL; + } + } + else { + if (HDsnprintf(fullname, size, "%s/%s", prefix, base_name) == (int)size) + /* Buffer is too small */ + return NULL; + } + } + else if (HDstrlen(base_name) >= size) { + /* Buffer is too small */ + return NULL; + } + else { + HDstrcpy(fullname, base_name); + } + + /* Append a suffix */ + if (suffix) { + if (HDstrlen(fullname) + HDstrlen(suffix) >= size) + return NULL; + + HDstrcat(fullname, suffix); + } + + /* Remove any double slashes in the filename */ + for (ptr = fullname, i = j = 0; ptr && i < size; i++, ptr++) { + if (*ptr != '/' || last != '/') + fullname[j++] = *ptr; + + last = *ptr; + } + + return fullname; +} + +char * +h5_fixname(const char *base_name, hid_t fapl, char *fullname, size_t size) +{ + return (h5_fixname_real(base_name, fapl, ".h5", fullname, size, FALSE, FALSE)); +} + +char * +h5_fixname_superblock(const char *base_name, hid_t fapl_id, char *fullname, size_t size) +{ + return (h5_fixname_real(base_name, fapl_id, ".h5", fullname, size, FALSE, TRUE)); +} + +hbool_t +h5_using_default_driver(const char *drv_name) +{ + hbool_t ret_val = TRUE; + + HDassert(H5_DEFAULT_VFD == H5FD_SEC2); + + if (!drv_name) + drv_name = HDgetenv(HDF5_DRIVER); + + if (drv_name) + return (!HDstrcmp(drv_name, "sec2") || !HDstrcmp(drv_name, "nomatch")); + + return ret_val; +} + +herr_t +h5_driver_is_default_vfd_compatible(hid_t fapl_id, hbool_t *default_vfd_compatible) +{ + unsigned long feat_flags = 0; + hid_t driver_id = H5I_INVALID_HID; + herr_t ret_value = SUCCEED; + + HDassert(fapl_id >= 0); + HDassert(default_vfd_compatible); + + if (fapl_id == H5P_DEFAULT) + fapl_id = H5P_FILE_ACCESS_DEFAULT; + + if ((driver_id = H5Pget_driver(fapl_id)) < 0) + return FAIL; + + if (H5FDdriver_query(driver_id, &feat_flags) < 0) + return FAIL; + + *default_vfd_compatible = (feat_flags & H5FD_FEAT_DEFAULT_VFD_COMPATIBLE); + + return ret_value; +} /* end h5_driver_is_default_vfd_compatible() */ + +int +main(int argc, char *argv[]) +{ +#if defined(H5_PARALLEL_TEST) + MPI_Init(&argc, &argv); +#else + (void)argc; + (void)argv; +#endif + + HDprintf("===================================\n"); + HDprintf("HDF5 TESTS START\n"); + HDprintf("===================================\n"); + + /* Initialize testing framework */ + /* TestInit(argv[0], NULL, NULL); */ + + /* Tests are generally arranged from least to most complexity... */ + /* AddTest("config", test_configure, cleanup_configure, "Configure definitions", NULL); */ + HDprintf("** CONFIGURE DEFINITIONS **\n"); + test_configure(); + HDprintf("\n"); + + /* AddTest("metadata", test_metadata, cleanup_metadata, "Encoding/decoding metadata", NULL); */ + + /* AddTest("checksum", test_checksum, cleanup_checksum, "Checksum algorithm", NULL); */ + HDprintf("** CHECKSUM ALGORITHM **\n"); + test_checksum(); + HDprintf("\n"); + + /* AddTest("tst", test_tst, NULL, "Ternary Search Trees", NULL); */ + + /* AddTest("heap", test_heap, NULL, "Memory Heaps", NULL); */ + + /* AddTest("skiplist", test_skiplist, NULL, "Skip Lists", NULL); */ + + /* AddTest("refstr", test_refstr, NULL, "Reference Counted Strings", NULL); */ + + /* AddTest("file", test_file, cleanup_file, "Low-Level File I/O", NULL); */ + HDprintf("** LOW-LEVEL FILE I/O **\n"); + test_file(); + HDprintf("\n"); + + /* AddTest("objects", test_h5o, cleanup_h5o, "Generic Object Functions", NULL); */ + HDprintf("** GENERIC OBJECT FUNCTIONS **\n"); + test_h5o(); + HDprintf("\n"); + + /* AddTest("h5s", test_h5s, cleanup_h5s, "Dataspaces", NULL); */ + HDprintf("** DATASPACES **\n"); + test_h5s(); + HDprintf("\n"); + + /* AddTest("coords", test_coords, cleanup_coords, "Dataspace coordinates", NULL); */ + HDprintf("** DATASPACE COORDINATES **\n"); + test_coords(); + HDprintf("\n"); + + /* AddTest("sohm", test_sohm, cleanup_sohm, "Shared Object Header Messages", NULL); */ + + /* AddTest("attr", test_attr, cleanup_attr, "Attributes", NULL); */ + HDprintf("** ATTRIBUTES **\n"); + test_attr(); + HDprintf("\n"); + + /* AddTest("select", test_select, cleanup_select, "Selections", NULL); */ + HDprintf("** SELECTIONS **\n"); + test_select(); + HDprintf("\n"); + + /* AddTest("time", test_time, cleanup_time, "Time Datatypes", NULL); */ + HDprintf("** TIME DATATYPES**\n"); + test_time(); + HDprintf("\n"); + + /* AddTest("ref_deprec", test_reference_deprec, cleanup_reference_deprec, "Deprecated References", NULL); + */ + + /* AddTest("ref", test_reference, cleanup_reference, "References", NULL); */ + HDprintf("** REFERENCES **\n"); + test_reference(); + HDprintf("\n"); + + /* AddTest("vltypes", test_vltypes, cleanup_vltypes, "Variable-Length Datatypes", NULL); */ + HDprintf("** VARIABLE-LENGTH DATATYPES **\n"); + test_vltypes(); + HDprintf("\n"); + + /* AddTest("vlstrings", test_vlstrings, cleanup_vlstrings, "Variable-Length Strings", NULL); */ + HDprintf("** VARIABLE-LENGTH STRINGS **\n"); + test_vlstrings(); + HDprintf("\n"); + + /* AddTest("iterate", test_iterate, cleanup_iterate, "Group & Attribute Iteration", NULL); */ + HDprintf("** GROUP & ATTRIBUTE ITERATION **\n"); + test_iterate(); + HDprintf("\n"); + + /* AddTest("array", test_array, cleanup_array, "Array Datatypes", NULL); */ + HDprintf("** ARRAY DATATYPES **\n"); + test_array(); + HDprintf("\n"); + + /* AddTest("genprop", test_genprop, cleanup_genprop, "Generic Properties", NULL); */ + HDprintf("** GENERIC PROPERTIES **\n"); + test_genprop(); + HDprintf("\n"); + + /* AddTest("unicode", test_unicode, cleanup_unicode, "UTF-8 Encoding", NULL); */ + HDprintf("** UTF-8 ENCODING **\n"); + test_unicode(); + HDprintf("\n"); + + /* AddTest("id", test_ids, NULL, "User-Created Identifiers", NULL); */ + HDprintf("** USER-CREATED IDENTIFIERS **\n"); + test_ids(); + HDprintf("\n"); + + /* AddTest("misc", test_misc, cleanup_misc, "Miscellaneous", NULL); */ + HDprintf("** MISCELLANEOUS **\n"); + test_misc(); + HDprintf("\n"); + + /* Display testing information */ + /* TestInfo(argv[0]); */ + + /* Parse command line arguments */ + /* TestParseCmdLine(argc,argv); */ + + /* Perform requested testing */ + /* PerformTests(); */ + + /* Display test summary, if requested */ + /* if (GetTestSummary()) + TestSummary(); */ + + /* Clean up test files, if allowed */ + if (/* GetTestCleanup() && */ !getenv("HDF5_NOCLEANUP")) { + /* TestCleanup(); */ + + HDprintf("TEST CLEANUP\n"); + + H5E_BEGIN_TRY + cleanup_configure(); + cleanup_checksum(); + cleanup_file(); + cleanup_h5o(); + cleanup_h5s(); + cleanup_coords(); + cleanup_attr(); + cleanup_select(); + cleanup_time(); + cleanup_reference(); + cleanup_vltypes(); + cleanup_vlstrings(); + cleanup_iterate(); + cleanup_array(); + cleanup_genprop(); + cleanup_unicode(); + cleanup_misc(); + H5E_END_TRY; + + HDprintf("\n"); + } + + /* Release test infrastructure */ + /* TestShutdown(); */ + + /* Exit failure if errors encountered; else exit success. */ + /* No need to print anything since PerformTests() already does. */ + if (nerrors /* GetTestNumErrs() */ > 0) { + HDprintf("** HDF5 tests failed with %d errors **\n", nerrors); + HDexit(EXIT_FAILURE); + } + else { + HDprintf("** HDF5 tests ran successfully **\n"); + HDexit(EXIT_SUCCESS); + } +} /* end main() */ diff --git a/test/API/testhdf5.h b/test/API/testhdf5.h new file mode 100644 index 00000000000..44ccfe000c3 --- /dev/null +++ b/test/API/testhdf5.h @@ -0,0 +1,349 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * This header file contains information required for testing the HDF5 library. + */ + +#ifndef TESTHDF5_H +#define TESTHDF5_H + +/* Include generic testing header also */ +/* #include "h5test.h" */ +#include "hdf5.h" +#include "H5private.h" +#include "H5_api_tests_disabled.h" + +#define VERBO_NONE 0 /* None */ +#define VERBO_DEF 3 /* Default */ +#define VERBO_LO 5 /* Low */ +#define VERBO_MED 7 /* Medium */ +#define VERBO_HI 9 /* High */ + +/* Turn off verbose reporting by default */ +#define VERBOSE_MED (FALSE) +#define VERBOSE_HI (FALSE) + +/* Use %ld to print the value because long should cover most cases. */ +/* Used to make certain a return value _is_not_ a value */ +#define CHECK(ret, val, where) \ + do { \ + if (VERBOSE_HI) { \ + print_func(" Call to routine: %15s at line %4d " \ + "in %s returned %ld \n", \ + where, (int)__LINE__, __FILE__, (long)(ret)); \ + } \ + if ((ret) == (val)) { \ + TestErrPrintf("*** UNEXPECTED RETURN from %s is %ld at line %4d " \ + "in %s\n", \ + where, (long)(ret), (int)__LINE__, __FILE__); \ + H5Eprint2(H5E_DEFAULT, stdout); \ + } \ + } while (0) + +#define CHECK_I(ret, where) \ + { \ + if (VERBOSE_HI) { \ + print_func(" Call to routine: %15s at line %4d in %s returned %ld\n", (where), (int)__LINE__, \ + __FILE__, (long)(ret)); \ + } \ + if ((ret) < 0) { \ + TestErrPrintf("*** UNEXPECTED RETURN from %s is %ld line %4d in %s\n", (where), (long)(ret), \ + (int)__LINE__, __FILE__); \ + H5Eprint2(H5E_DEFAULT, stdout); \ + } \ + } + +/* Check that a pointer is valid (i.e.: not NULL) */ +#define CHECK_PTR(ret, where) \ + { \ + if (VERBOSE_HI) { \ + print_func(" Call to routine: %15s at line %4d in %s returned %p\n", (where), (int)__LINE__, \ + __FILE__, ((const void *)ret)); \ + } \ + if (!(ret)) { \ + TestErrPrintf("*** UNEXPECTED RETURN from %s is NULL line %4d in %s\n", (where), (int)__LINE__, \ + __FILE__); \ + H5Eprint2(H5E_DEFAULT, stdout); \ + } \ + } + +/* Check that a pointer is NULL */ +#define CHECK_PTR_NULL(ret, where) \ + { \ + if (VERBOSE_HI) { \ + print_func(" Call to routine: %15s at line %4d in %s returned %p\n", (where), (int)__LINE__, \ + __FILE__, ((const void *)ret)); \ + } \ + if (ret) { \ + TestErrPrintf("*** UNEXPECTED RETURN from %s is not NULL line %4d in %s\n", (where), \ + (int)__LINE__, __FILE__); \ + H5Eprint2(H5E_DEFAULT, stdout); \ + } \ + } + +/* Check that two pointers are equal */ +#define CHECK_PTR_EQ(ret, val, where) \ + { \ + if (VERBOSE_HI) { \ + print_func(" Call to routine: %15s at line %4d in %s returned %p\n", (where), (int)__LINE__, \ + __FILE__, (const void *)(ret)); \ + } \ + if (ret != val) { \ + TestErrPrintf( \ + "*** UNEXPECTED RETURN from %s: returned value of %p is not equal to %p line %4d in %s\n", \ + (where), (const void *)(ret), (const void *)(val), (int)__LINE__, __FILE__); \ + H5Eprint2(H5E_DEFAULT, stdout); \ + } \ + } + +/* Used to make certain a return value _is_ a value */ +#define VERIFY(_x, _val, where) \ + do { \ + long __x = (long)_x, __val = (long)_val; \ + if (VERBOSE_HI) { \ + print_func(" Call to routine: %15s at line %4d in %s had value " \ + "%ld \n", \ + (where), (int)__LINE__, __FILE__, __x); \ + } \ + if ((__x) != (__val)) { \ + TestErrPrintf("*** UNEXPECTED VALUE from %s should be %ld, but is %ld at line %4d " \ + "in %s\n", \ + (where), __val, __x, (int)__LINE__, __FILE__); \ + H5Eprint2(H5E_DEFAULT, stdout); \ + } \ + } while (0) + +/* Used to make certain a (non-'long' type's) return value _is_ a value */ +#define VERIFY_TYPE(_x, _val, _type, _format, where) \ + do { \ + _type __x = (_type)_x, __val = (_type)_val; \ + if (VERBOSE_HI) { \ + print_func(" Call to routine: %15s at line %4d in %s had value " _format " \n", (where), \ + (int)__LINE__, __FILE__, __x); \ + } \ + if ((__x) != (__val)) { \ + TestErrPrintf("*** UNEXPECTED VALUE from %s should be " _format ", but is " _format \ + " at line %4d " \ + "in %s\n", \ + (where), __val, __x, (int)__LINE__, __FILE__); \ + H5Eprint2(H5E_DEFAULT, stdout); \ + } \ + } while (0) + +/* Used to make certain a string return value _is_ a value */ +#define VERIFY_STR(x, val, where) \ + do { \ + if (VERBOSE_HI) { \ + print_func(" Call to routine: %15s at line %4d in %s had value " \ + "%s \n", \ + (where), (int)__LINE__, __FILE__, x); \ + } \ + if (HDstrcmp(x, val) != 0) { \ + TestErrPrintf("*** UNEXPECTED VALUE from %s should be %s, but is %s at line %4d " \ + "in %s\n", \ + where, val, x, (int)__LINE__, __FILE__); \ + H5Eprint2(H5E_DEFAULT, stdout); \ + } \ + } while (0) + +/* Used to document process through a test and to check for errors */ +#define RESULT(ret, func) \ + do { \ + if (VERBOSE_MED) { \ + print_func(" Call to routine: %15s at line %4d in %s returned " \ + "%ld\n", \ + func, (int)__LINE__, __FILE__, (long)(ret)); \ + } \ + if (VERBOSE_HI) \ + H5Eprint2(H5E_DEFAULT, stdout); \ + if ((ret) == FAIL) { \ + TestErrPrintf("*** UNEXPECTED RETURN from %s is %ld at line %4d " \ + "in %s\n", \ + func, (long)(ret), (int)__LINE__, __FILE__); \ + H5Eprint2(H5E_DEFAULT, stdout); \ + } \ + } while (0) + +/* Used to document process through a test */ +#if defined(H5_HAVE_PARALLEL) && defined(H5_PARALLEL_TEST) +#define MESSAGE(V, A) \ + { \ + int mpi_rank; \ + \ + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); \ + if (mpi_rank == 0 && VERBO_LO /* HDGetTestVerbosity() */ >= (V)) \ + print_func A; \ + } +#else /* H5_HAVE_PARALLEL */ +#define MESSAGE(V, A) \ + { \ + if (VERBO_LO /* HDGetTestVerbosity() */ >= (V)) \ + print_func A; \ + } +#endif /* H5_HAVE_PARALLEL */ + +/* Used to indicate an error that is complex to check for */ +#define ERROR(where) \ + do { \ + if (VERBOSE_HI) \ + print_func(" Call to routine: %15s at line %4d in %s returned " \ + "invalid result\n", \ + where, (int)__LINE__, __FILE__); \ + TestErrPrintf("*** UNEXPECTED RESULT from %s at line %4d in %s\n", where, (int)__LINE__, __FILE__); \ + } while (0) + +/* definitions for command strings */ +#define VERBOSITY_STR "Verbosity" +#define SKIP_STR "Skip" +#define TEST_STR "Test" +#define CLEAN_STR "Cleanup" + +#define AT() HDprintf(" at %s:%d in %s()...\n", __FILE__, __LINE__, __func__); +#define TESTING(WHAT) \ + { \ + HDprintf("Testing %-62s", WHAT); \ + HDfflush(stdout); \ + } +#define TESTING_2(WHAT) \ + { \ + HDprintf(" Testing %-60s", WHAT); \ + HDfflush(stdout); \ + } +#define PASSED() \ + { \ + HDputs(" PASSED"); \ + HDfflush(stdout); \ + } +#define H5_FAILED() \ + { \ + HDputs("*FAILED*"); \ + HDfflush(stdout); \ + } +#define H5_WARNING() \ + { \ + HDputs("*WARNING*"); \ + HDfflush(stdout); \ + } +#define SKIPPED() \ + { \ + HDputs(" -SKIP-"); \ + HDfflush(stdout); \ + } +#define PUTS_ERROR(s) \ + { \ + HDputs(s); \ + AT(); \ + goto error; \ + } +#define TEST_ERROR \ + { \ + H5_FAILED(); \ + AT(); \ + goto error; \ + } +#define STACK_ERROR \ + { \ + H5Eprint2(H5E_DEFAULT, stdout); \ + goto error; \ + } +#define FAIL_STACK_ERROR \ + { \ + H5_FAILED(); \ + AT(); \ + H5Eprint2(H5E_DEFAULT, stdout); \ + goto error; \ + } +#define FAIL_PUTS_ERROR(s) \ + { \ + H5_FAILED(); \ + AT(); \ + HDputs(s); \ + goto error; \ + } + +#ifdef __cplusplus +extern "C" { +#endif + +extern int nerrors; + +int print_func(const char *format, ...); +int TestErrPrintf(const char *format, ...); +hid_t h5_fileaccess(void); +/* Functions that will replace components of a FAPL */ +herr_t h5_get_vfd_fapl(hid_t fapl_id); +herr_t h5_get_libver_fapl(hid_t fapl_id); +char *h5_fixname(const char *base_name, hid_t fapl, char *fullname, size_t size); +char *h5_fixname_superblock(const char *base_name, hid_t fapl, char *fullname, size_t size); +hbool_t h5_using_default_driver(const char *drv_name); +herr_t h5_driver_is_default_vfd_compatible(hid_t fapl_id, hbool_t *default_vfd_compatible); + +#ifdef H5_HAVE_PARALLEL +char *getenv_all(MPI_Comm comm, int root, const char *name); +#endif + +/* Prototypes for the test routines */ +void test_metadata(void); +void test_checksum(void); +void test_refstr(void); +void test_file(void); +void test_h5o(void); +void test_h5t(void); +void test_h5s(void); +void test_coords(void); +void test_h5d(void); +void test_attr(void); +void test_select(void); +void test_time(void); +void test_reference(void); +void test_reference_deprec(void); +void test_vltypes(void); +void test_vlstrings(void); +void test_iterate(void); +void test_array(void); +void test_genprop(void); +void test_configure(void); +void test_h5_system(void); +void test_misc(void); +void test_ids(void); +void test_skiplist(void); +void test_sohm(void); +void test_unicode(void); + +/* Prototypes for the cleanup routines */ +void cleanup_metadata(void); +void cleanup_checksum(void); +void cleanup_file(void); +void cleanup_h5o(void); +void cleanup_h5s(void); +void cleanup_coords(void); +void cleanup_attr(void); +void cleanup_select(void); +void cleanup_time(void); +void cleanup_reference(void); +void cleanup_reference_deprec(void); +void cleanup_vltypes(void); +void cleanup_vlstrings(void); +void cleanup_iterate(void); +void cleanup_array(void); +void cleanup_genprop(void); +void cleanup_configure(void); +void cleanup_h5_system(void); +void cleanup_sohm(void); +void cleanup_misc(void); +void cleanup_unicode(void); + +#ifdef __cplusplus +} +#endif +#endif /* TESTHDF5_H */ diff --git a/test/API/tfile.c b/test/API/tfile.c new file mode 100644 index 00000000000..bc0f18ee301 --- /dev/null +++ b/test/API/tfile.c @@ -0,0 +1,8381 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*********************************************************** + * + * Test program: tfile + * + * Test the low-level file I/O features. + * + *************************************************************/ + +#include "testhdf5.h" +/* #include "H5srcdir.h" */ + +/* #include "H5Iprivate.h" */ +/* #include "H5Pprivate.h" */ +/* #include "H5VLprivate.h" */ /* Virtual Object Layer */ + +#if 0 +/* + * This file needs to access private information from the H5F package. + * This file also needs to access the file testing code. + */ +#define H5F_FRIEND /*suppress error about including H5Fpkg */ +#define H5F_TESTING +#include "H5Fpkg.h" /* File access */ + +#define H5FD_FRIEND /*suppress error about including H5FDpkg.h */ +#define H5FD_TESTING +#include "H5FDpkg.h" + +#define H5D_FRIEND /*suppress error about including H5Dpkg */ +#include "H5Dpkg.h" /* Dataset access */ + +#define H5S_FRIEND /*suppress error about including H5Spkg */ +#include "H5Spkg.h" /* Dataspace */ + +#define H5T_FRIEND /*suppress error about including H5Tpkg */ +#include "H5Tpkg.h" /* Datatype */ + +#define H5A_FRIEND /*suppress error about including H5Apkg */ +#include "H5Apkg.h" /* Attributes */ + +#define H5O_FRIEND /*suppress error about including H5Opkg */ +#include "H5Opkg.h" /* Object headers */ +#endif + +#define BAD_USERBLOCK_SIZE1 (hsize_t)1 +#define BAD_USERBLOCK_SIZE2 (hsize_t)2 +#define BAD_USERBLOCK_SIZE3 (hsize_t)3 +#define BAD_USERBLOCK_SIZE4 (hsize_t)64 +#define BAD_USERBLOCK_SIZE5 (hsize_t)511 +#define BAD_USERBLOCK_SIZE6 (hsize_t)513 +#define BAD_USERBLOCK_SIZE7 (hsize_t)6144 + +#define F1_USERBLOCK_SIZE (hsize_t)0 +#define F1_OFFSET_SIZE sizeof(haddr_t) +#define F1_LENGTH_SIZE sizeof(hsize_t) +#define F1_SYM_LEAF_K 4 +#define F1_SYM_INTERN_K 16 +#define FILE1 "tfile1.h5" +#define SFILE1 "sys_file1" + +#define REOPEN_FILE "tfile_reopen.h5" +#define REOPEN_DSET "dset" + +#define F2_USERBLOCK_SIZE (hsize_t)512 +#define F2_OFFSET_SIZE 8 +#define F2_LENGTH_SIZE 8 +#define F2_SYM_LEAF_K 8 +#define F2_SYM_INTERN_K 32 +#define F2_RANK 2 +#define F2_DIM0 4 +#define F2_DIM1 6 +#define F2_DSET "dset" +#define FILE2 "tfile2.h5" + +#define F3_USERBLOCK_SIZE (hsize_t)0 +#define F3_OFFSET_SIZE F2_OFFSET_SIZE +#define F3_LENGTH_SIZE F2_LENGTH_SIZE +#define F3_SYM_LEAF_K F2_SYM_LEAF_K +#define F3_SYM_INTERN_K F2_SYM_INTERN_K +#define FILE3 "tfile3.h5" + +#define GRP_NAME "/group" +#define DSET_NAME "dataset" +#define ATTR_NAME "attr" +#define TYPE_NAME "type" +#define FILE4 "tfile4.h5" + +#define OBJ_ID_COUNT_0 0 +#define OBJ_ID_COUNT_1 1 +#define OBJ_ID_COUNT_2 2 +#define OBJ_ID_COUNT_3 3 +#define OBJ_ID_COUNT_4 4 +#define OBJ_ID_COUNT_6 6 +#define OBJ_ID_COUNT_8 8 + +#define GROUP1 "Group1" +#define DSET1 "Dataset1" +#define DSET2 "/Group1/Dataset2" + +#define TESTA_GROUPNAME "group" +#define TESTA_DSETNAME "dataset" +#define TESTA_ATTRNAME "attribute" +#define TESTA_DTYPENAME "compound" +#define TESTA_NAME_BUF_SIZE 64 +#define TESTA_RANK 2 +#define TESTA_NX 4 +#define TESTA_NY 5 + +#define USERBLOCK_SIZE ((hsize_t)512) + +/* Declarations for test_filespace_*() */ +#define FILENAME_LEN 1024 /* length of file name */ +#define DSETNAME "dset" /* Name of dataset */ +#define NELMTS(X) (sizeof(X) / sizeof(X[0])) /* # of elements */ +#define READ_OLD_BUFSIZE 1024 /* Buffer for holding file data */ +#define FILE5 "tfile5.h5" /* Test file */ +#define TEST_THRESHOLD10 10 /* Free space section threshold */ +#define FSP_SIZE_DEF 4096 /* File space page size default */ +#define FSP_SIZE512 512 /* File space page size */ +#define FSP_SIZE1G (1024 * 1024 * 1024) /* File space page size */ + +/* Declaration for test_libver_macros2() */ +#define FILE6 "tfile6.h5" /* Test file */ + +/* Declaration for test_get_obj_ids() */ +#define FILE7 "tfile7.h5" /* Test file */ +#define NGROUPS 2 +#define NDSETS 4 + +/* Declaration for test_incr_filesize() */ +#define FILE8 "tfile8.h5" /* Test file */ + +/* Files created under 1.6 branch and 1.8 branch--used in test_filespace_compatible() */ +const char *OLD_FILENAME[] = { + "filespace_1_6.h5", /* 1.6 HDF5 file */ + "filespace_1_8.h5" /* 1.8 HDF5 file */ +}; + +/* Files created in 1.10.0 release --used in test_filespace_1.10.0_compatible() */ +/* These files are copied from release 1.10.0 tools/h5format_convert/testfiles */ +const char *OLD_1_10_0_FILENAME[] = { + "h5fc_ext1_i.h5", /* 0 */ + "h5fc_ext1_f.h5", /* 1 */ + "h5fc_ext2_if.h5", /* 2 */ + "h5fc_ext2_sf.h5", /* 3 */ + "h5fc_ext3_isf.h5", /* 4 */ + "h5fc_ext_none.h5" /* 5 */ +}; + +/* Files used in test_filespace_round_compatible() */ +const char *FSPACE_FILENAMES[] = { + "fsm_aggr_nopersist.h5", /* H5F_FILE_SPACE_AGGR, not persisting free-space */ + "fsm_aggr_persist.h5", /* H5F_FILE_SPACE_AGGR, persisting free-space */ + "paged_nopersist.h5", /* H5F_FILE_SPACE_PAGE, not persisting free-space */ + "paged_persist.h5", /* H5F_FILE_SPACE_PAGE, persisting free-space */ + "aggr.h5", /* H5F_FILE_SPACE_AGGR */ + "none.h5" /* H5F_FILE_SPACE_NONE */ +}; + +const char *FILESPACE_NAME[] = {"tfilespace.h5", NULL}; + +/* Declarations for test_libver_bounds_copy(): */ +/* SRC_FILE: source file created under 1.8 branch with latest format */ +/* DST_FILE: destination file for copying the dataset in SRC_FILE */ +/* DSET_DS1: the dataset created in SRC_FILE to be copied to DST_FILE */ +#define SRC_FILE "fill18.h5" +#define DST_FILE "fill18_copy.h5" +#define DSET_DS1 "DS1" + +#if 0 +/* Local test function declarations for version bounds */ +static void test_libver_bounds_low_high(const char *env_h5_drvr); +static void test_libver_bounds_super(hid_t fapl, const char *env_h5_drvr); +static void test_libver_bounds_super_create(hid_t fapl, hid_t fcpl, htri_t is_swmr, htri_t non_def_fsm); +static void test_libver_bounds_super_open(hid_t fapl, hid_t fcpl, htri_t is_swmr, htri_t non_def_fsm); +static void test_libver_bounds_obj(hid_t fapl); +static void test_libver_bounds_dataset(hid_t fapl); +static void test_libver_bounds_dataspace(hid_t fapl); +static void test_libver_bounds_datatype(hid_t fapl); +static void test_libver_bounds_datatype_check(hid_t fapl, hid_t tid); +static void test_libver_bounds_attributes(hid_t fapl); +#endif + +#define DSET_NULL "DSET_NULL" +#define DSET "DSET" +#define DSETA "DSETA" +#define DSETB "DSETB" +#define DSETC "DSETC" + +#if 0 +static void +create_objects(hid_t, hid_t, hid_t *, hid_t *, hid_t *, hid_t *); +static void +test_obj_count_and_id(hid_t, hid_t, hid_t, hid_t, hid_t, hid_t); +static void +check_file_id(hid_t, hid_t); +#endif + +#if 0 +/* Helper routine used by test_rw_noupdate() */ +static int cal_chksum(const char *file, uint32_t *chksum); + +static void test_rw_noupdate(void); +#endif + +/**************************************************************** +** +** test_file_create(): Low-level file creation I/O test routine. +** +****************************************************************/ +static void +test_file_create(void) +{ + hid_t fid1 = H5I_INVALID_HID; + hid_t fid2 = H5I_INVALID_HID; + hid_t fid3 = H5I_INVALID_HID; /* HDF5 File IDs */ + hid_t tmpl1, tmpl2; /* file creation templates */ + hsize_t ublock; /* sizeof userblock */ + size_t parm; /* file-creation parameters */ + size_t parm2; /* file-creation parameters */ + unsigned iparm; + unsigned iparm2; + herr_t ret; /*generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Low-Level File Creation I/O\n")); + + /* First ensure the file does not exist */ + H5E_BEGIN_TRY + { + H5Fdelete(FILE1, H5P_DEFAULT); + } + H5E_END_TRY; + + /* Try opening a non-existent file */ + H5E_BEGIN_TRY + { + fid1 = H5Fopen(FILE1, H5F_ACC_RDWR, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(fid1, FAIL, "H5Fopen"); + + /* Test create with various sequences of H5F_ACC_EXCL and */ + /* H5F_ACC_TRUNC flags */ + + /* Create with H5F_ACC_EXCL */ + fid1 = H5Fcreate(FILE1, H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); +#ifndef NO_TRUNCATE_OPEN_FILE + /* + * try to create the same file with H5F_ACC_TRUNC. This should fail + * because fid1 is the same file and is currently open. + */ + H5E_BEGIN_TRY + { + fid2 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(fid2, FAIL, "H5Fcreate"); +#endif + /* Close all files */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + H5E_BEGIN_TRY + { + ret = H5Fclose(fid2); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Fclose"); /*file should not have been open */ + + /* + * Try again with H5F_ACC_EXCL. This should fail because the file already + * exists from the previous steps. + */ + H5E_BEGIN_TRY + { + fid1 = H5Fcreate(FILE1, H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(fid1, FAIL, "H5Fcreate"); + + /* Test create with H5F_ACC_TRUNC. This will truncate the existing file. */ + fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); +#ifndef NO_TRUNCATE_OPEN_FILE + /* + * Try to truncate first file again. This should fail because fid1 is the + * same file and is currently open. + */ + H5E_BEGIN_TRY + { + fid2 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(fid2, FAIL, "H5Fcreate"); +#endif + /* + * Try with H5F_ACC_EXCL. This should fail too because the file already + * exists. + */ + H5E_BEGIN_TRY + { + fid2 = H5Fcreate(FILE1, H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(fid2, FAIL, "H5Fcreate"); + + /* Get the file-creation template */ + tmpl1 = H5Fget_create_plist(fid1); + CHECK(tmpl1, FAIL, "H5Fget_create_plist"); + + /* Get the file-creation parameters */ + ret = H5Pget_userblock(tmpl1, &ublock); + CHECK(ret, FAIL, "H5Pget_userblock"); + VERIFY(ublock, F1_USERBLOCK_SIZE, "H5Pget_userblock"); + + ret = H5Pget_sizes(tmpl1, &parm, &parm2); + CHECK(ret, FAIL, "H5Pget_sizes"); + VERIFY(parm, F1_OFFSET_SIZE, "H5Pget_sizes"); + VERIFY(parm2, F1_LENGTH_SIZE, "H5Pget_sizes"); + + ret = H5Pget_sym_k(tmpl1, &iparm, &iparm2); + CHECK(ret, FAIL, "H5Pget_sym_k"); + VERIFY(iparm, F1_SYM_INTERN_K, "H5Pget_sym_k"); + VERIFY(iparm2, F1_SYM_LEAF_K, "H5Pget_sym_k"); + + /* Release file-creation template */ + ret = H5Pclose(tmpl1); + CHECK(ret, FAIL, "H5Pclose"); + +#ifdef LATER + /* Double-check that the atom has been vaporized */ + ret = H5Pclose(tmpl1); + VERIFY(ret, FAIL, "H5Pclose"); +#endif + + if (h5_using_default_driver(NULL)) { + + /* Create a new file with a non-standard file-creation template */ + tmpl1 = H5Pcreate(H5P_FILE_CREATE); + CHECK(tmpl1, FAIL, "H5Pcreate"); + + /* Try setting some bad userblock sizes */ + H5E_BEGIN_TRY + { + ret = H5Pset_userblock(tmpl1, BAD_USERBLOCK_SIZE1); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Pset_userblock"); + H5E_BEGIN_TRY + { + ret = H5Pset_userblock(tmpl1, BAD_USERBLOCK_SIZE2); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Pset_userblock"); + H5E_BEGIN_TRY + { + ret = H5Pset_userblock(tmpl1, BAD_USERBLOCK_SIZE3); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Pset_userblock"); + H5E_BEGIN_TRY + { + ret = H5Pset_userblock(tmpl1, BAD_USERBLOCK_SIZE4); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Pset_userblock"); + H5E_BEGIN_TRY + { + ret = H5Pset_userblock(tmpl1, BAD_USERBLOCK_SIZE5); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Pset_userblock"); + H5E_BEGIN_TRY + { + ret = H5Pset_userblock(tmpl1, BAD_USERBLOCK_SIZE6); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Pset_userblock"); + H5E_BEGIN_TRY + { + ret = H5Pset_userblock(tmpl1, BAD_USERBLOCK_SIZE7); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Pset_userblock"); + + /* Set the new file-creation parameters */ + ret = H5Pset_userblock(tmpl1, F2_USERBLOCK_SIZE); + CHECK(ret, FAIL, "H5Pset_userblock"); + + ret = H5Pset_sizes(tmpl1, (size_t)F2_OFFSET_SIZE, (size_t)F2_LENGTH_SIZE); + CHECK(ret, FAIL, "H5Pset_sizes"); + + ret = H5Pset_sym_k(tmpl1, F2_SYM_INTERN_K, F2_SYM_LEAF_K); + CHECK(ret, FAIL, "H5Pset_sym_k"); + + /* + * Try to create second file, with non-standard file-creation template + * params. + */ + fid2 = H5Fcreate(FILE2, H5F_ACC_TRUNC, tmpl1, H5P_DEFAULT); + CHECK(fid2, FAIL, "H5Fcreate"); + + /* Release file-creation template */ + ret = H5Pclose(tmpl1); + CHECK(ret, FAIL, "H5Pclose"); + + /* Make certain we can create a dataset properly in the file with the userblock */ + { + hid_t dataset_id, dataspace_id; /* identifiers */ + hsize_t dims[F2_RANK]; + unsigned data[F2_DIM0][F2_DIM1]; + unsigned i, j; + + /* Create the data space for the dataset. */ + dims[0] = F2_DIM0; + dims[1] = F2_DIM1; + dataspace_id = H5Screate_simple(F2_RANK, dims, NULL); + CHECK(dataspace_id, FAIL, "H5Screate_simple"); + + /* Create the dataset. */ + dataset_id = H5Dcreate2(fid2, F2_DSET, H5T_NATIVE_UINT, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT); + CHECK(dataset_id, FAIL, "H5Dcreate2"); + + for (i = 0; i < F2_DIM0; i++) + for (j = 0; j < F2_DIM1; j++) + data[i][j] = i * 10 + j; + + /* Write data to the new dataset */ + ret = H5Dwrite(dataset_id, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data); + CHECK(ret, FAIL, "H5Dwrite"); + + /* End access to the dataset and release resources used by it. */ + ret = H5Dclose(dataset_id); + CHECK(ret, FAIL, "H5Dclose"); + + /* Terminate access to the data space. */ + ret = H5Sclose(dataspace_id); + CHECK(ret, FAIL, "H5Sclose"); + } + + /* Get the file-creation template */ + tmpl1 = H5Fget_create_plist(fid2); + CHECK(tmpl1, FAIL, "H5Fget_create_plist"); + + /* Get the file-creation parameters */ + ret = H5Pget_userblock(tmpl1, &ublock); + CHECK(ret, FAIL, "H5Pget_userblock"); + VERIFY(ublock, F2_USERBLOCK_SIZE, "H5Pget_userblock"); + + ret = H5Pget_sizes(tmpl1, &parm, &parm2); + CHECK(ret, FAIL, "H5Pget_sizes"); + VERIFY(parm, F2_OFFSET_SIZE, "H5Pget_sizes"); + VERIFY(parm2, F2_LENGTH_SIZE, "H5Pget_sizes"); + + ret = H5Pget_sym_k(tmpl1, &iparm, &iparm2); + CHECK(ret, FAIL, "H5Pget_sym_k"); + VERIFY(iparm, F2_SYM_INTERN_K, "H5Pget_sym_k"); + VERIFY(iparm2, F2_SYM_LEAF_K, "H5Pget_sym_k"); + + /* Clone the file-creation template */ + tmpl2 = H5Pcopy(tmpl1); + CHECK(tmpl2, FAIL, "H5Pcopy"); + + /* Release file-creation template */ + ret = H5Pclose(tmpl1); + CHECK(ret, FAIL, "H5Pclose"); + + /* Set the new file-creation parameter */ + ret = H5Pset_userblock(tmpl2, F3_USERBLOCK_SIZE); + CHECK(ret, FAIL, "H5Pset_userblock"); + + /* + * Try to create second file, with non-standard file-creation template + * params + */ + fid3 = H5Fcreate(FILE3, H5F_ACC_TRUNC, tmpl2, H5P_DEFAULT); + CHECK(fid3, FAIL, "H5Fcreate"); + + /* Release file-creation template */ + ret = H5Pclose(tmpl2); + CHECK(ret, FAIL, "H5Pclose"); + + /* Get the file-creation template */ + tmpl1 = H5Fget_create_plist(fid3); + CHECK(tmpl1, FAIL, "H5Fget_create_plist"); + + /* Get the file-creation parameters */ + ret = H5Pget_userblock(tmpl1, &ublock); + CHECK(ret, FAIL, "H5Pget_userblock"); + VERIFY(ublock, F3_USERBLOCK_SIZE, "H5Pget_userblock"); + + ret = H5Pget_sizes(tmpl1, &parm, &parm2); + CHECK(ret, FAIL, "H5Pget_sizes"); + VERIFY(parm, F3_OFFSET_SIZE, "H5Pget_sizes"); + VERIFY(parm2, F3_LENGTH_SIZE, "H5Pget_sizes"); + + ret = H5Pget_sym_k(tmpl1, &iparm, &iparm2); + CHECK(ret, FAIL, "H5Pget_sym_k"); + VERIFY(iparm, F3_SYM_INTERN_K, "H5Pget_sym_k"); + VERIFY(iparm2, F3_SYM_LEAF_K, "H5Pget_sym_k"); + + /* Release file-creation template */ + ret = H5Pclose(tmpl1); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close second file */ + ret = H5Fclose(fid2); + CHECK(ret, FAIL, "H5Fclose"); + + /* Close third file */ + ret = H5Fclose(fid3); + CHECK(ret, FAIL, "H5Fclose"); + } + + /* Close first file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_file_create() */ + +/**************************************************************** +** +** test_file_open(): Low-level file open I/O test routine. +** +****************************************************************/ +static void +test_file_open(const char *env_h5_drvr) +{ + hid_t fid1; /*HDF5 File IDs */ +#if 0 + hid_t fid2; + hid_t did; /*dataset ID */ + hid_t fapl_id; /*file access property list ID */ +#endif + hid_t tmpl1; /*file creation templates */ + hsize_t ublock; /*sizeof user block */ + size_t parm; /*file-creation parameters */ + size_t parm2; /*file-creation parameters */ + unsigned iparm; + unsigned iparm2; + unsigned intent; + herr_t ret; /*generic return value */ + + /* + * Test single file open + */ + + /* Only run this test with sec2/default driver */ + if (!h5_using_default_driver(env_h5_drvr)) + return; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Low-Level File Opening I/O\n")); + + /* Open first file */ + fid1 = H5Fopen(FILE2, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Get the intent */ + ret = H5Fget_intent(fid1, &intent); + CHECK(ret, FAIL, "H5Fget_intent"); + VERIFY(intent, H5F_ACC_RDWR, "H5Fget_intent"); + + /* Get the file-creation template */ + tmpl1 = H5Fget_create_plist(fid1); + CHECK(tmpl1, FAIL, "H5Fget_create_plist"); + + /* Get the file-creation parameters */ + ret = H5Pget_userblock(tmpl1, &ublock); + CHECK(ret, FAIL, "H5Pget_userblock"); + VERIFY(ublock, F2_USERBLOCK_SIZE, "H5Pget_userblock"); + + ret = H5Pget_sizes(tmpl1, &parm, &parm2); + CHECK(ret, FAIL, "H5Pget_sizes"); + VERIFY(parm, F2_OFFSET_SIZE, "H5Pget_sizes"); + VERIFY(parm2, F2_LENGTH_SIZE, "H5Pget_sizes"); + + ret = H5Pget_sym_k(tmpl1, &iparm, &iparm2); + CHECK(ret, FAIL, "H5Pget_sym_k"); + VERIFY(iparm, F2_SYM_INTERN_K, "H5Pget_sym_k"); + VERIFY(iparm2, F2_SYM_LEAF_K, "H5Pget_sym_k"); + + /* Release file-creation template */ + ret = H5Pclose(tmpl1); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close first file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* + * Test two file opens: one is opened H5F_ACC_RDONLY and H5F_CLOSE_WEAK. + * It's closed with an object left open. Then another is opened + * H5F_ACC_RDWR, which should fail. + */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing 2 File Openings - SKIPPED for now due to no file close degree support\n")); +#if 0 + /* Create file access property list */ + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl_id, FAIL, "H5Pcreate"); + + /* Set file close mode to H5F_CLOSE_WEAK */ + ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_WEAK); + CHECK(ret, FAIL, "H5Pset_fclose_degree"); + + /* Open file for first time */ + fid1 = H5Fopen(FILE2, H5F_ACC_RDONLY, fapl_id); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Check the intent */ + ret = H5Fget_intent(fid1, &intent); + CHECK(ret, FAIL, "H5Fget_intent"); + VERIFY(intent, H5F_ACC_RDONLY, "H5Fget_intent"); + + /* Open dataset */ + did = H5Dopen2(fid1, F2_DSET, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen2"); + + /* Check that the intent works even if NULL is passed in */ + ret = H5Fget_intent(fid1, NULL); + CHECK(ret, FAIL, "H5Fget_intent"); + + /* Close first open */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Open file for second time, which should fail. */ + H5E_BEGIN_TRY + { + fid2 = H5Fopen(FILE2, H5F_ACC_RDWR, fapl_id); + } + H5E_END_TRY; + VERIFY(fid2, FAIL, "H5Fopen"); + + /* Check that the intent fails for an invalid ID */ + H5E_BEGIN_TRY + { + ret = H5Fget_intent(fid1, &intent); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Fget_intent"); + + /* Close dataset from first open */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Pclose(fapl_id); + CHECK(ret, FAIL, "H5Pclose"); +#endif +} /* test_file_open() */ + +/**************************************************************** +** +** test_file_reopen(): File reopen test routine. +** +****************************************************************/ +static void +test_file_reopen(void) +{ + hid_t fid = -1; /* file ID from initial open */ + hid_t rfid = -1; /* file ID from reopen */ + hid_t did = -1; /* dataset ID (both opens) */ + hid_t sid = -1; /* dataspace ID for dataset creation */ + hsize_t dims = 6; /* dataspace size */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing File Re-opening\n")); + + /* Create file via first ID */ + fid = H5Fcreate(REOPEN_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK_I(fid, "H5Fcreate"); + + /* Create a dataset in the file */ + sid = H5Screate_simple(1, &dims, &dims); + CHECK_I(sid, "H5Screate_simple") + did = H5Dcreate2(fid, REOPEN_DSET, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK_I(did, "H5Dcreate2"); + + /* Close dataset and dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Reopen the file with a different file ID */ + rfid = H5Freopen(fid); + CHECK_I(rfid, "H5Freopen"); + + /* Reopen the dataset through the reopen file ID */ + did = H5Dopen2(rfid, REOPEN_DSET, H5P_DEFAULT); + CHECK_I(did, "H5Dopen2"); + + /* Close and clean up */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Fclose(rfid); + CHECK(ret, FAIL, "H5Fclose"); + H5Fdelete(REOPEN_FILE, H5P_DEFAULT); + +} /* test_file_reopen() */ + +/**************************************************************** +** +** test_file_close(): low-level file close test routine. +** It mainly tests behavior with close degree. +** +*****************************************************************/ +static void +test_file_close(void) +{ +#if 0 + hid_t fid1, fid2; + hid_t fapl_id, access_id; + hid_t dataset_id, group_id1, group_id2, group_id3; + H5F_close_degree_t fc_degree; + herr_t ret; +#endif + + /* Output message about test being performed */ + MESSAGE(5, ("Testing File Closing with file close degrees - SKIPPED for now due to no file close degree " + "support\n")); +#if 0 + /* Test behavior while opening file multiple times with different + * file close degree value + */ + fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl_id, FAIL, "H5Pcreate"); + + ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_STRONG); + CHECK(ret, FAIL, "H5Pset_fclose_degree"); + + ret = H5Pget_fclose_degree(fapl_id, &fc_degree); + VERIFY(fc_degree, H5F_CLOSE_STRONG, "H5Pget_fclose_degree"); + + /* should fail */ + H5E_BEGIN_TRY + { + fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); + } + H5E_END_TRY; + VERIFY(fid2, FAIL, "H5Fopen"); + + ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_DEFAULT); + CHECK(ret, FAIL, "H5Pset_fclose_degree"); + + /* should succeed */ + fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); + CHECK(fid2, FAIL, "H5Fopen"); + + /* Close first open */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Close second open */ + ret = H5Fclose(fid2); + CHECK(ret, FAIL, "H5Fclose"); + + /* Test behavior while opening file multiple times with different file + * close degree + */ + fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_WEAK); + CHECK(ret, FAIL, "H5Pset_fclose_degree"); + + ret = H5Pget_fclose_degree(fapl_id, &fc_degree); + VERIFY(fc_degree, H5F_CLOSE_WEAK, "H5Pget_fclose_degree"); + + /* should succeed */ + fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); + CHECK(fid2, FAIL, "H5Fopen"); + + /* Close first open */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Close second open */ + ret = H5Fclose(fid2); + CHECK(ret, FAIL, "H5Fclose"); + + /* Test behavior while opening file multiple times with file close + * degree STRONG */ + ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_STRONG); + CHECK(ret, FAIL, "H5Pset_fclose_degree"); + + fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + CHECK(fid1, FAIL, "H5Fcreate"); + + ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_WEAK); + CHECK(ret, FAIL, "H5Pset_fclose_degree"); + + /* should fail */ + H5E_BEGIN_TRY + { + fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); + } + H5E_END_TRY; + VERIFY(fid2, FAIL, "H5Fopen"); + + ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_STRONG); + CHECK(ret, FAIL, "H5Pset_fclose_degree"); + + /* should succeed */ + fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); + CHECK(fid2, FAIL, "H5Fopen"); + + /* Create a dataset and a group in each file open respectively */ + create_objects(fid1, fid2, NULL, NULL, NULL, NULL); + + /* Close first open */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Close second open */ + ret = H5Fclose(fid2); + CHECK(ret, FAIL, "H5Fclose"); + + /* Test behavior while opening file multiple times with file close + * degree SEMI */ + ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_SEMI); + CHECK(ret, FAIL, "H5Pset_fclose_degree"); + + fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + CHECK(fid1, FAIL, "H5Fcreate"); + + ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_DEFAULT); + CHECK(ret, FAIL, "H5Pset_fclose_degree"); + + /* should fail */ + H5E_BEGIN_TRY + { + fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); + } + H5E_END_TRY; + VERIFY(fid2, FAIL, "H5Fopen"); + + ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_SEMI); + CHECK(ret, FAIL, "H5Pset_fclose_degree"); + + /* should succeed */ + fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); + CHECK(fid2, FAIL, "H5Fopen"); + + /* Create a dataset and a group in each file open respectively */ + create_objects(fid1, fid2, &dataset_id, &group_id1, &group_id2, &group_id3); + + /* Close first open, should fail since it is SEMI and objects are + * still open. */ + H5E_BEGIN_TRY + { + ret = H5Fclose(fid1); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Fclose"); + + /* Close second open, should fail since it is SEMI and objects are + * still open. */ + H5E_BEGIN_TRY + { + ret = H5Fclose(fid2); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Fclose"); + + ret = H5Dclose(dataset_id); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close first open */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + ret = H5Gclose(group_id1); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Gclose(group_id2); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close second open, should fail since it is SEMI and one group ID is + * still open. */ + H5E_BEGIN_TRY + { + ret = H5Fclose(fid2); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Fclose"); + + /* Same check with H5Idec_ref() (should fail also) */ + H5E_BEGIN_TRY + { + ret = H5Idec_ref(fid2); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Idec_ref"); + + ret = H5Gclose(group_id3); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close second open again. Should succeed. */ + ret = H5Fclose(fid2); + CHECK(ret, FAIL, "H5Fclose"); + + /* Test behavior while opening file multiple times with file close + * degree WEAK */ + ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_WEAK); + CHECK(ret, FAIL, "H5Pset_fclose_degree"); + + fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + CHECK(fid1, FAIL, "H5Fcreate"); + + ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_SEMI); + CHECK(ret, FAIL, "H5Pset_fclose_degree"); + + /* should fail */ + H5E_BEGIN_TRY + { + fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); + } + H5E_END_TRY; + VERIFY(fid2, FAIL, "H5Fopen"); + + ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_DEFAULT); + CHECK(ret, FAIL, "H5Pset_fclose_degree"); + + /* should succeed */ + fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); + CHECK(fid2, FAIL, "H5Fopen"); + + /* Create a dataset and a group in each file open respectively */ + create_objects(fid1, fid2, &dataset_id, &group_id1, &group_id2, &group_id3); + + /* Create more new files and test object count and ID list functions */ + test_obj_count_and_id(fid1, fid2, dataset_id, group_id1, group_id2, group_id3); + + /* Close first open */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Close second open. File will be finally closed after all objects + * are closed. */ + ret = H5Fclose(fid2); + CHECK(ret, FAIL, "H5Fclose"); + + ret = H5Dclose(dataset_id); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Gclose(group_id1); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Gclose(group_id2); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Gclose(group_id3); + CHECK(ret, FAIL, "H5Gclose"); + + /* Test behavior while opening file multiple times with file close + * degree DEFAULT */ + ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_DEFAULT); + CHECK(ret, FAIL, "H5Pset_fclose_degree"); + + fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + CHECK(fid1, FAIL, "H5Fcreate"); + + ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_SEMI); + CHECK(ret, FAIL, "H5Pset_fclose_degree"); + + /* should fail */ + H5E_BEGIN_TRY + { + fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); + } + H5E_END_TRY; + VERIFY(fid2, FAIL, "H5Fopen"); + + ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_DEFAULT); + CHECK(ret, FAIL, "H5Pset_fclose_degree"); + + /* should succeed */ + fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); + CHECK(fid2, FAIL, "H5Fopen"); + + /* Create a dataset and a group in each file open respectively */ + create_objects(fid1, fid2, &dataset_id, &group_id1, &group_id2, &group_id3); + + access_id = H5Fget_access_plist(fid1); + CHECK(access_id, FAIL, "H5Fget_access_plist"); + + ret = H5Pget_fclose_degree(access_id, &fc_degree); + CHECK(ret, FAIL, "H5Pget_fclose_degree"); + + switch (fc_degree) { + case H5F_CLOSE_STRONG: + /* Close first open */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + /* Close second open */ + ret = H5Fclose(fid2); + CHECK(ret, FAIL, "H5Fclose"); + break; + case H5F_CLOSE_SEMI: + /* Close first open */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Dclose(dataset_id); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Gclose(group_id1); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Gclose(group_id2); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Gclose(group_id3); + CHECK(ret, FAIL, "H5Gclose"); + /* Close second open */ + ret = H5Fclose(fid2); + CHECK(ret, FAIL, "H5Fclose"); + break; + case H5F_CLOSE_WEAK: + /* Close first open */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + /* Close second open */ + ret = H5Fclose(fid2); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Dclose(dataset_id); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Gclose(group_id1); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Gclose(group_id2); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Gclose(group_id3); + CHECK(ret, FAIL, "H5Gclose"); + break; + case H5F_CLOSE_DEFAULT: + default: + CHECK(fc_degree, H5F_CLOSE_DEFAULT, "H5Pget_fclose_degree"); + break; + } + + /* Close file access property list */ + ret = H5Pclose(fapl_id); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(access_id); + CHECK(ret, FAIL, "H5Pclose"); +#endif +} + +/**************************************************************** +** +** create_objects(): routine called by test_file_close to create +** a dataset and a group in file. +** +****************************************************************/ +#if 0 +static void +create_objects(hid_t fid1, hid_t fid2, hid_t *ret_did, hid_t *ret_gid1, hid_t *ret_gid2, hid_t *ret_gid3) +{ + ssize_t oid_count; + herr_t ret; + + /* Check reference counts of file IDs and opened object IDs. + * The verification is hard-coded. If in any case, this testing + * is changed, remember to check this part and update the macros. + */ + { + oid_count = H5Fget_obj_count(fid1, H5F_OBJ_ALL); + CHECK(oid_count, FAIL, "H5Fget_obj_count"); + VERIFY(oid_count, OBJ_ID_COUNT_2, "H5Fget_obj_count"); + + oid_count = H5Fget_obj_count(fid1, H5F_OBJ_DATASET | H5F_OBJ_GROUP | H5F_OBJ_DATATYPE | H5F_OBJ_ATTR); + CHECK(oid_count, FAIL, "H5Fget_obj_count"); + VERIFY(oid_count, OBJ_ID_COUNT_0, "H5Fget_obj_count"); + + oid_count = H5Fget_obj_count(fid2, H5F_OBJ_ALL); + CHECK(oid_count, FAIL, "H5Fget_obj_count"); + VERIFY(oid_count, OBJ_ID_COUNT_2, "H5Fget_obj_count"); + + oid_count = H5Fget_obj_count(fid2, H5F_OBJ_DATASET | H5F_OBJ_GROUP | H5F_OBJ_DATATYPE | H5F_OBJ_ATTR); + CHECK(oid_count, FAIL, "H5Fget_obj_count"); + VERIFY(oid_count, OBJ_ID_COUNT_0, "H5Fget_obj_count"); + } + + /* create a dataset in the first file open */ + { + hid_t dataset_id, dataspace_id; /* identifiers */ + hsize_t dims[F2_RANK]; + unsigned data[F2_DIM0][F2_DIM1]; + unsigned i, j; + + /* Create the data space for the dataset. */ + dims[0] = F2_DIM0; + dims[1] = F2_DIM1; + dataspace_id = H5Screate_simple(F2_RANK, dims, NULL); + CHECK(dataspace_id, FAIL, "H5Screate_simple"); + + /* Create the dataset. */ + dataset_id = + H5Dcreate2(fid1, "/dset", H5T_NATIVE_UINT, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset_id, FAIL, "H5Dcreate2"); + + for (i = 0; i < F2_DIM0; i++) + for (j = 0; j < F2_DIM1; j++) + data[i][j] = i * 10 + j; + + /* Write data to the new dataset */ + ret = H5Dwrite(dataset_id, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data); + CHECK(ret, FAIL, "H5Dwrite"); + + if (ret_did != NULL) + *ret_did = dataset_id; + + /* Terminate access to the data space. */ + ret = H5Sclose(dataspace_id); + CHECK(ret, FAIL, "H5Sclose"); + } + + /* Create a group in the second file open */ + { + hid_t gid1, gid2, gid3; + gid1 = H5Gcreate2(fid2, "/group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid1, FAIL, "H5Gcreate2"); + if (ret_gid1 != NULL) + *ret_gid1 = gid1; + + gid2 = H5Gopen2(fid2, "/group", H5P_DEFAULT); + CHECK(gid2, FAIL, "H5Gopen2"); + if (ret_gid2 != NULL) + *ret_gid2 = gid2; + + gid3 = H5Gopen2(fid2, "/group", H5P_DEFAULT); + CHECK(gid3, FAIL, "H5Gopen2"); + if (ret_gid3 != NULL) + *ret_gid3 = gid3; + } + + /* Check reference counts of file IDs and opened object IDs. + * The verification is hard-coded. If in any case, this testing + * is changed, remember to check this part and update the macros. + */ + { + oid_count = H5Fget_obj_count(fid1, H5F_OBJ_ALL); + CHECK(oid_count, FAIL, "H5Fget_obj_count"); + VERIFY(oid_count, OBJ_ID_COUNT_6, "H5Fget_obj_count"); + + oid_count = H5Fget_obj_count(fid1, H5F_OBJ_DATASET | H5F_OBJ_GROUP | H5F_OBJ_DATATYPE | H5F_OBJ_ATTR); + CHECK(oid_count, FAIL, "H5Fget_obj_count"); + VERIFY(oid_count, OBJ_ID_COUNT_4, "H5Fget_obj_count"); + + oid_count = H5Fget_obj_count(fid2, H5F_OBJ_ALL); + CHECK(oid_count, FAIL, "H5Fget_obj_count"); + VERIFY(oid_count, OBJ_ID_COUNT_6, "H5Fget_obj_count"); + + oid_count = H5Fget_obj_count(fid2, H5F_OBJ_DATASET | H5F_OBJ_GROUP | H5F_OBJ_DATATYPE | H5F_OBJ_ATTR); + CHECK(oid_count, FAIL, "H5Fget_obj_count"); + VERIFY(oid_count, OBJ_ID_COUNT_4, "H5Fget_obj_count"); + } +} +#endif + +/**************************************************************** +** +** test_get_obj_ids(): Test the bug and the fix for Jira 8528. +** H5Fget_obj_ids overfilled the list of +** object IDs by one. This is an enhancement +** for test_obj_count_and_id(). +** +****************************************************************/ +static void +test_get_obj_ids(void) +{ + hid_t fid, gid[NGROUPS], dset[NDSETS]; + hid_t filespace; + hsize_t file_dims[F2_RANK] = {F2_DIM0, F2_DIM1}; + ssize_t oid_count, ret_count; + hid_t *oid_list = NULL; + herr_t ret; + int i, m, n; + ssize_t oid_list_size = NDSETS; + char gname[64], dname[64]; + + MESSAGE(5, ("Testing retrieval of object IDs\n")); + + /* Create a new file */ + fid = H5Fcreate(FILE7, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + filespace = H5Screate_simple(F2_RANK, file_dims, NULL); + CHECK(filespace, FAIL, "H5Screate_simple"); + + /* creates NGROUPS groups under the root group */ + for (m = 0; m < NGROUPS; m++) { + HDsnprintf(gname, sizeof(gname), "group%d", m); + gid[m] = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid[m], FAIL, "H5Gcreate2"); + } + + /* create NDSETS datasets under the root group */ + for (n = 0; n < NDSETS; n++) { + HDsnprintf(dname, sizeof(dname), "dataset%d", n); + dset[n] = H5Dcreate2(fid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset[n], FAIL, "H5Dcreate2"); + } + + /* The number of opened objects should be NGROUPS + NDSETS + 1. One is opened file. */ + oid_count = H5Fget_obj_count(fid, H5F_OBJ_ALL); + CHECK(oid_count, FAIL, "H5Fget_obj_count"); + VERIFY(oid_count, (NGROUPS + NDSETS + 1), "H5Fget_obj_count"); + + oid_list = (hid_t *)HDcalloc((size_t)oid_list_size, sizeof(hid_t)); + CHECK_PTR(oid_list, "HDcalloc"); + + /* Call the public function H5F_get_obj_ids to use H5F__get_objects. User reported having problem here. + * that the returned size (ret_count) from H5Fget_obj_ids is one greater than the size passed in + * (oid_list_size) */ + ret_count = H5Fget_obj_ids(fid, H5F_OBJ_ALL, (size_t)oid_list_size, oid_list); + CHECK(ret_count, FAIL, "H5Fget_obj_ids"); + VERIFY(ret_count, oid_list_size, "H5Fget_obj_count"); + + /* Close all object IDs on the list except the file ID. The first ID is supposed to be file ID according + * to the library design */ + for (i = 0; i < ret_count; i++) { + if (fid != oid_list[i]) { + ret = H5Oclose(oid_list[i]); + CHECK(ret, FAIL, "H5Oclose"); + } + } + + /* The number of opened objects should be NGROUPS + 1 + 1. The first one is opened file. The second one + * is the dataset ID left open from the previous around of H5Fget_obj_ids */ + oid_count = H5Fget_obj_count(fid, H5F_OBJ_ALL); + CHECK(oid_count, FAIL, "H5Fget_obj_count"); + VERIFY(oid_count, NGROUPS + 2, "H5Fget_obj_count"); + + /* Get the IDs of the left opened objects */ + ret_count = H5Fget_obj_ids(fid, H5F_OBJ_ALL, (size_t)oid_list_size, oid_list); + CHECK(ret_count, FAIL, "H5Fget_obj_ids"); + VERIFY(ret_count, oid_list_size, "H5Fget_obj_count"); + + /* Close all object IDs on the list except the file ID. The first ID is still the file ID */ + for (i = 0; i < ret_count; i++) { + if (fid != oid_list[i]) { + ret = H5Oclose(oid_list[i]); + CHECK(ret, FAIL, "H5Oclose"); + } + } + + H5Sclose(filespace); + H5Fclose(fid); + + HDfree(oid_list); + + /* Reopen the file to check whether H5Fget_obj_count and H5Fget_obj_ids still works + * when the file is closed first */ + fid = H5Fopen(FILE7, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Open NDSETS datasets under the root group */ + for (n = 0; n < NDSETS; n++) { + HDsnprintf(dname, sizeof(dname), "dataset%d", n); + dset[n] = H5Dopen2(fid, dname, H5P_DEFAULT); + CHECK(dset[n], FAIL, "H5Dcreate2"); + } + + /* Close the file first */ + H5Fclose(fid); +#ifndef WRONG_DATATYPE_OBJ_COUNT + /* Get the number of all opened objects */ + oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_ALL); + CHECK(oid_count, FAIL, "H5Fget_obj_count"); + VERIFY(oid_count, NDSETS, "H5Fget_obj_count"); + + oid_list = (hid_t *)HDcalloc((size_t)oid_count, sizeof(hid_t)); + CHECK_PTR(oid_list, "HDcalloc"); + + /* Get the list of all opened objects */ + ret_count = H5Fget_obj_ids((hid_t)H5F_OBJ_ALL, H5F_OBJ_ALL, (size_t)oid_count, oid_list); + CHECK(ret_count, FAIL, "H5Fget_obj_ids"); + VERIFY(ret_count, NDSETS, "H5Fget_obj_ids"); + + H5E_BEGIN_TRY + { + /* Close all open objects with H5Oclose */ + for (n = 0; n < oid_count; n++) + H5Oclose(oid_list[n]); + } + H5E_END_TRY; + + HDfree(oid_list); +#endif +} + +/**************************************************************** +** +** test_get_file_id(): Test H5Iget_file_id() +** +*****************************************************************/ +static void +test_get_file_id(void) +{ +#if 0 + hid_t fid, fid2, fid3; + hid_t datatype_id, dataset_id, dataspace_id, group_id, attr_id; + hid_t plist; + hsize_t dims[F2_RANK]; + unsigned intent; + herr_t ret; +#endif + + MESSAGE(5, ("Testing H5Iget_file_id - SKIPPED for now due to no H5Iget_file_id support\n")); +#if 0 + /* Create a file */ + fid = H5Fcreate(FILE4, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Check the intent */ + ret = H5Fget_intent(fid, &intent); + CHECK(ret, FAIL, "H5Fget_intent"); + VERIFY(intent, H5F_ACC_RDWR, "H5Fget_intent"); + + /* Test H5Iget_file_id() */ + check_file_id(fid, fid); + + /* Create a group in the file. Make a duplicated file ID from the group. + * And close this duplicated ID + */ + group_id = H5Gcreate2(fid, GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(group_id, FAIL, "H5Gcreate2"); + + /* Test H5Iget_file_id() */ + check_file_id(fid, group_id); + + /* Close the file and get file ID from the group ID */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Test H5Iget_file_id() */ + check_file_id((hid_t)-1, group_id); + + ret = H5Gclose(group_id); + CHECK(ret, FAIL, "H5Gclose"); + + /* Open the file again. Test H5Iget_file_id() */ + fid = H5Fopen(FILE4, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + group_id = H5Gopen2(fid, GRP_NAME, H5P_DEFAULT); + CHECK(group_id, FAIL, "H5Gopen2"); + + /* Test H5Iget_file_id() */ + check_file_id(fid, group_id); + + /* Open the file for second time. Test H5Iget_file_id() */ + fid3 = H5Freopen(fid); + CHECK(fid3, FAIL, "H5Freopen"); + + /* Test H5Iget_file_id() */ + check_file_id(fid3, fid3); + + ret = H5Fclose(fid3); + CHECK(ret, FAIL, "H5Fclose"); + + /* Create a dataset in the group. Make a duplicated file ID from the + * dataset. And close this duplicated ID. + */ + dims[0] = F2_DIM0; + dims[1] = F2_DIM1; + dataspace_id = H5Screate_simple(F2_RANK, dims, NULL); + CHECK(dataspace_id, FAIL, "H5Screate_simple"); + + dataset_id = + H5Dcreate2(group_id, DSET_NAME, H5T_NATIVE_INT, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset_id, FAIL, "H5Dcreate2"); + + /* Test H5Iget_file_id() */ + check_file_id(fid, dataset_id); + + /* Create an attribute for the dataset. Make a duplicated file ID from + * this attribute. And close it. + */ + attr_id = H5Acreate2(dataset_id, ATTR_NAME, H5T_NATIVE_INT, dataspace_id, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Acreate2"); + + /* Test H5Iget_file_id() */ + check_file_id(fid, attr_id); + + /* Create a named datatype. Make a duplicated file ID from + * this attribute. And close it. + */ + datatype_id = H5Tcopy(H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tcopy"); + + ret = H5Tcommit2(fid, TYPE_NAME, datatype_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + /* Test H5Iget_file_id() */ + check_file_id(fid, datatype_id); + + /* Create a property list and try to get file ID from it. + * Supposed to fail. + */ + plist = H5Pcreate(H5P_FILE_ACCESS); + CHECK(plist, FAIL, "H5Pcreate"); + + H5E_BEGIN_TRY + { + fid2 = H5Iget_file_id(plist); + } + H5E_END_TRY; + VERIFY(fid2, FAIL, "H5Iget_file_id"); + + /* Close objects */ + ret = H5Pclose(plist); + CHECK(ret, FAIL, "H5Pclose"); + + ret = H5Tclose(datatype_id); + CHECK(ret, FAIL, "H5Tclose"); + + ret = H5Aclose(attr_id); + CHECK(ret, FAIL, "H5Aclose"); + + ret = H5Sclose(dataspace_id); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Dclose(dataset_id); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Gclose(group_id); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +#endif +} + +/**************************************************************** +** +** check_file_id(): Internal function of test_get_file_id() +** +*****************************************************************/ +#if 0 +static void +check_file_id(hid_t fid, hid_t object_id) +{ + hid_t new_fid; + herr_t ret; + + /* Return a duplicated file ID even not expecting user to do it. + * And close this duplicated ID + */ + new_fid = H5Iget_file_id(object_id); + + if (fid >= 0) + VERIFY(new_fid, fid, "H5Iget_file_id"); + else + CHECK(new_fid, FAIL, "H5Iget_file_id"); + + ret = H5Fclose(new_fid); + CHECK(ret, FAIL, "H5Fclose"); +} +#endif + +/**************************************************************** +** +** test_obj_count_and_id(): test object count and ID list functions. +** +****************************************************************/ +#if 0 +static void +test_obj_count_and_id(hid_t fid1, hid_t fid2, hid_t did, hid_t gid1, hid_t gid2, hid_t gid3) +{ + hid_t fid3, fid4; + ssize_t oid_count, ret_count; + herr_t ret; + + /* Create two new files */ + fid3 = H5Fcreate(FILE2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid3, FAIL, "H5Fcreate"); + fid4 = H5Fcreate(FILE3, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid4, FAIL, "H5Fcreate"); + + /* test object count of all files IDs open */ + oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_FILE); + CHECK(oid_count, FAIL, "H5Fget_obj_count"); + VERIFY(oid_count, OBJ_ID_COUNT_4, "H5Fget_obj_count"); + + /* test object count of all datasets open */ + oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_DATASET); + CHECK(oid_count, FAIL, "H5Fget_obj_count"); + VERIFY(oid_count, OBJ_ID_COUNT_1, "H5Fget_obj_count"); + + /* test object count of all groups open */ + oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_GROUP); + CHECK(oid_count, FAIL, "H5Fget_obj_count"); + VERIFY(oid_count, OBJ_ID_COUNT_3, "H5Fget_obj_count"); + + /* test object count of all named datatypes open */ + oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_DATATYPE); + CHECK(oid_count, FAIL, "H5Fget_obj_count"); + VERIFY(oid_count, OBJ_ID_COUNT_0, "H5Fget_obj_count"); + + /* test object count of all attributes open */ + oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_ATTR); + CHECK(oid_count, FAIL, "H5Fget_obj_count"); + VERIFY(oid_count, OBJ_ID_COUNT_0, "H5Fget_obj_count"); + + /* test object count of all objects currently open */ + oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_ALL); + CHECK(oid_count, FAIL, "H5Fget_obj_count"); + VERIFY(oid_count, OBJ_ID_COUNT_8, "H5Fget_obj_count"); + + if (oid_count > 0) { + hid_t *oid_list; + + oid_list = (hid_t *)HDcalloc((size_t)oid_count, sizeof(hid_t)); + if (oid_list != NULL) { + int i; + + ret_count = H5Fget_obj_ids((hid_t)H5F_OBJ_ALL, H5F_OBJ_ALL, (size_t)oid_count, oid_list); + CHECK(ret_count, FAIL, "H5Fget_obj_ids"); + + for (i = 0; i < oid_count; i++) { + H5I_type_t id_type; + + id_type = H5Iget_type(oid_list[i]); + switch (id_type) { + case H5I_FILE: + if (oid_list[i] != fid1 && oid_list[i] != fid2 && oid_list[i] != fid3 && + oid_list[i] != fid4) + ERROR("H5Fget_obj_ids"); + break; + + case H5I_GROUP: + if (oid_list[i] != gid1 && oid_list[i] != gid2 && oid_list[i] != gid3) + ERROR("H5Fget_obj_ids"); + break; + + case H5I_DATASET: + VERIFY(oid_list[i], did, "H5Fget_obj_ids"); + break; + + case H5I_MAP: + /* TODO: Not supported in native VOL connector yet */ + + case H5I_UNINIT: + case H5I_BADID: + case H5I_DATATYPE: + case H5I_DATASPACE: + case H5I_ATTR: + case H5I_VFL: + case H5I_VOL: + case H5I_GENPROP_CLS: + case H5I_GENPROP_LST: + case H5I_ERROR_CLASS: + case H5I_ERROR_MSG: + case H5I_ERROR_STACK: + case H5I_SPACE_SEL_ITER: + case H5I_EVENTSET: + case H5I_NTYPES: + default: + ERROR("H5Fget_obj_ids"); + } /* end switch */ + } /* end for */ + + HDfree(oid_list); + } /* end if */ + } /* end if */ + + /* close the two new files */ + ret = H5Fclose(fid3); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Fclose(fid4); + CHECK(ret, FAIL, "H5Fclose"); +} +#endif + +/**************************************************************** +** +** test_file_perm(): low-level file test routine. +** This test verifies that a file can be opened for both +** read-only and read-write access and things will be handled +** appropriately. +** +*****************************************************************/ +static void +test_file_perm(void) +{ + hid_t file; /* File opened with read-write permission */ + hid_t filero; /* Same file opened with read-only permission */ + hid_t dspace; /* Dataspace ID */ + hid_t dset; /* Dataset ID */ + herr_t ret; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Low-Level File Permissions\n")); + + dspace = H5Screate(H5S_SCALAR); + CHECK(dspace, FAIL, "H5Screate"); + + /* Create the file (with read-write permission) */ + file = H5Fcreate(FILE2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file, FAIL, "H5Fcreate"); + + /* Create a dataset with the read-write file handle */ + dset = H5Dcreate2(file, F2_DSET, H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset, FAIL, "H5Dcreate2"); + + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Open the file (with read-only permission) */ + filero = H5Fopen(FILE2, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(filero, FAIL, "H5Fopen"); + + /* Create a dataset with the read-only file handle (should fail) */ + H5E_BEGIN_TRY + { + dset = H5Dcreate2(filero, F2_DSET, H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(dset, FAIL, "H5Dcreate2"); + if (dset != FAIL) { + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + } /* end if */ + + ret = H5Fclose(filero); + CHECK(ret, FAIL, "H5Fclose"); + + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + + ret = H5Sclose(dspace); + CHECK(ret, FAIL, "H5Sclose"); + +} /* end test_file_perm() */ + +/**************************************************************** +** +** test_file_perm2(): low-level file test routine. +** This test verifies that no object can be created in a +** file that is opened for read-only. +** +*****************************************************************/ +static void +test_file_perm2(void) +{ + hid_t file; /* File opened with read-write permission */ + hid_t filero; /* Same file opened with read-only permission */ + hid_t dspace; /* Dataspace ID */ + hid_t group; /* Group ID */ + hid_t dset; /* Dataset ID */ + hid_t type; /* Datatype ID */ + hid_t attr; /* Attribute ID */ + herr_t ret; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Low-Level File Permissions again\n")); + + dspace = H5Screate(H5S_SCALAR); + CHECK(dspace, FAIL, "H5Screate"); + + /* Create the file (with read-write permission) */ + file = H5Fcreate(FILE2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file, FAIL, "H5Fcreate"); + + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + + /* Open the file (with read-only permission) */ + filero = H5Fopen(FILE2, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(filero, FAIL, "H5Fopen"); + + /* Create a group with the read-only file handle (should fail) */ + H5E_BEGIN_TRY + { + group = H5Gcreate2(filero, "MY_GROUP", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(group, FAIL, "H5Gcreate2"); + + /* Create a dataset with the read-only file handle (should fail) */ + H5E_BEGIN_TRY + { + dset = H5Dcreate2(filero, F2_DSET, H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(dset, FAIL, "H5Dcreate2"); + + /* Create an attribute with the read-only file handle (should fail) */ + H5E_BEGIN_TRY + { + attr = H5Acreate2(filero, "MY_ATTR", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(attr, FAIL, "H5Acreate2"); + + type = H5Tcopy(H5T_NATIVE_SHORT); + CHECK(type, FAIL, "H5Tcopy"); + + /* Commit a datatype with the read-only file handle (should fail) */ + H5E_BEGIN_TRY + { + ret = H5Tcommit2(filero, "MY_DTYPE", type, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Tcommit2"); + + ret = H5Tclose(type); + CHECK(ret, FAIL, "H5Tclose"); + + ret = H5Fclose(filero); + CHECK(ret, FAIL, "H5Fclose"); + + ret = H5Sclose(dspace); + CHECK(ret, FAIL, "H5Sclose"); +} /* end test_file_perm2() */ + +/**************************************************************** +** +** test_file_is_accessible(): low-level file test routine. +** Clone of test_file_ishdf5 but uses the newer VOL-enabled +** H5Fis_accessible() API call. +** +*****************************************************************/ +#define FILE_IS_ACCESSIBLE "tfile_is_accessible" +#define FILE_IS_ACCESSIBLE_NON_HDF5 "tfile_is_accessible_non_hdf5" +static void +test_file_is_accessible(const char *env_h5_drvr) +{ + hid_t fid = H5I_INVALID_HID; /* File opened with read-write permission */ + hid_t fcpl_id = H5I_INVALID_HID; /* File creation property list */ + hid_t fapl_id = H5I_INVALID_HID; /* File access property list */ +#if 0 + int fd; /* POSIX file descriptor */ +#endif + char filename[FILENAME_LEN]; /* Filename to use */ + char non_hdf5_filename[FILENAME_LEN]; /* Base name of non-hdf5 file */ + char non_hdf5_sb_filename[FILENAME_LEN]; /* Name of non-hdf5 superblock file */ +#if 0 + ssize_t nbytes; /* Number of bytes written */ + unsigned u; /* Local index variable */ + unsigned char buf[1024]; /* Buffer of data to write */ +#endif + htri_t is_hdf5; /* Whether a file is an HDF5 file */ +#if 0 + int posix_ret; /* Return value from POSIX calls */ +#endif + hbool_t driver_is_default_compatible; + herr_t ret; /* Return value from HDF5 calls */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Detection of HDF5 Files\n")); + + /* Get FAPL */ + fapl_id = h5_fileaccess(); + CHECK(fapl_id, H5I_INVALID_HID, "H5Pcreate"); + + if (h5_driver_is_default_vfd_compatible(fapl_id, &driver_is_default_compatible) < 0) { + TestErrPrintf("Can't check if VFD is compatible with default VFD"); + return; + } + + /* Fix up filenames */ + h5_fixname(FILE_IS_ACCESSIBLE, fapl_id, filename, sizeof(filename)); + h5_fixname(FILE_IS_ACCESSIBLE_NON_HDF5, fapl_id, non_hdf5_filename, sizeof(non_hdf5_filename)); + h5_fixname_superblock(FILE_IS_ACCESSIBLE_NON_HDF5, fapl_id, non_hdf5_sb_filename, + sizeof(non_hdf5_sb_filename)); + + /****************/ + /* Normal usage */ + /****************/ + + /* Create a file */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Verify that the file is an HDF5 file */ + is_hdf5 = H5Fis_accessible(filename, fapl_id); + VERIFY(is_hdf5, TRUE, "H5Fis_accessible"); + + /*****************************************/ + /* Newly created file that is still open */ + /*****************************************/ + + /* On Windows, file locking is mandatory so this check ensures that + * H5Fis_accessible() works on files that have an exclusive lock. + * Previous versions of this API call created an additional file handle + * and attempted to read through it, which will not work when locks + * are enforced by the OS. + */ + + /* Create a file and hold it open */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); + + /* Verify that the file is an HDF5 file */ + is_hdf5 = H5Fis_accessible(filename, fapl_id); + VERIFY(is_hdf5, TRUE, "H5Fis_accessible"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /*******************************/ + /* Non-default user block size */ + /*******************************/ + + /* This test is not currently working for the family VFD. + * There are failures when creating files with userblocks. + */ + if (0 != HDstrcmp(env_h5_drvr, "family")) { + /* Create a file creation property list with a non-default user block size */ + fcpl_id = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl_id, H5I_INVALID_HID, "H5Pcreate"); + + ret = H5Pset_userblock(fcpl_id, (hsize_t)2048); + CHECK(ret, FAIL, "H5Pset_userblock"); + + /* Create file with non-default user block */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl_id, fapl_id); + CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); + + /* Release file-creation property list */ + ret = H5Pclose(fcpl_id); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Verify that the file is an HDF5 file */ + is_hdf5 = H5Fis_accessible(filename, fapl_id); + VERIFY(is_hdf5, TRUE, "H5Fis_accessible"); + } /* end if */ +#if 0 + if (driver_is_default_compatible) { + /***********************/ + /* EMPTY non-HDF5 file */ + /***********************/ + + /* Create non-HDF5 file and check it */ + fd = HDopen(non_hdf5_sb_filename, O_RDWR | O_CREAT | O_TRUNC, H5_POSIX_CREATE_MODE_RW); + CHECK(fd, (-1), "HDopen"); + + /* Close the file */ + posix_ret = HDclose(fd); + CHECK(posix_ret, (-1), "HDclose"); + + /* Verify that the file is NOT an HDF5 file using the base filename */ + is_hdf5 = H5Fis_accessible(non_hdf5_filename, fapl_id); + VERIFY(is_hdf5, FALSE, "H5Fis_accessible (empty non-HDF5 file)"); + + /***************************/ + /* Non-empty non-HDF5 file */ + /***************************/ + + /* Create non-HDF5 file and check it */ + fd = HDopen(non_hdf5_sb_filename, O_RDWR | O_CREAT | O_TRUNC, H5_POSIX_CREATE_MODE_RW); + CHECK(fd, (-1), "HDopen"); + + /* Initialize information to write */ + for (u = 0; u < 1024; u++) + buf[u] = (unsigned char)u; + + /* Write some information */ + nbytes = HDwrite(fd, buf, (size_t)1024); + VERIFY(nbytes, 1024, "HDwrite"); + + /* Close the file */ + posix_ret = HDclose(fd); + CHECK(posix_ret, (-1), "HDclose"); + + /* Verify that the file is not an HDF5 file */ + is_hdf5 = H5Fis_accessible(non_hdf5_filename, fapl_id); + VERIFY(is_hdf5, FALSE, "H5Fis_accessible (non-HDF5 file)"); + } + + /* Clean up files */ + h5_delete_test_file(filename, fapl_id); + h5_delete_test_file(non_hdf5_filename, fapl_id); +#endif + H5Fdelete(filename, fapl_id); + + /* Close property list */ + ret = H5Pclose(fapl_id); + CHECK(ret, FAIL, "H5Pclose"); + +} /* end test_file_is_accessible() */ + +/**************************************************************** +** +** test_file_ishdf5(): low-level file test routine. +** This test checks whether the H5Fis_hdf5() routine is working +** correctly in various situations. +** +*****************************************************************/ +#if 0 +#ifndef H5_NO_DEPRECATED_SYMBOLS +static void +test_file_ishdf5(const char *env_h5_drvr) +{ + hid_t fid = H5I_INVALID_HID; /* File opened with read-write permission */ + hid_t fcpl_id = H5I_INVALID_HID; /* File creation property list */ + hid_t fapl_id = H5I_INVALID_HID; /* File access property list */ + int fd; /* POSIX file descriptor */ + char filename[FILENAME_LEN]; /* Filename to use */ + char sb_filename[FILENAME_LEN]; /* Name of file w/ superblock */ + ssize_t nbytes; /* Number of bytes written */ + unsigned u; /* Local index variable */ + unsigned char buf[1024]; /* Buffer of data to write */ + htri_t is_hdf5; /* Whether a file is an HDF5 file */ + int posix_ret; /* Return value from POSIX calls */ + herr_t ret; /* Return value from HDF5 calls */ + + if (!h5_using_default_driver(env_h5_drvr)) + return; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Detection of HDF5 Files (using deprecated H5Fis_hdf5() call)\n")); + + /* Get FAPL */ + fapl_id = h5_fileaccess(); + CHECK(fapl_id, H5I_INVALID_HID, "H5Pcreate"); + + /* Fix up filenames + * For VFDs that create multiple files, we also need the name + * of the file with the superblock. With single-file VFDs, this + * will be equal to the one from h5_fixname(). + */ + h5_fixname(FILE_IS_ACCESSIBLE, fapl_id, filename, sizeof(filename)); + h5_fixname_superblock(FILE_IS_ACCESSIBLE, fapl_id, sb_filename, sizeof(filename)); + + /****************/ + /* Normal usage */ + /****************/ + + /* Create a file */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Verify that the file is an HDF5 file */ + is_hdf5 = H5Fis_hdf5(sb_filename); + VERIFY(is_hdf5, TRUE, "H5Fis_hdf5"); + + /*******************************/ + /* Non-default user block size */ + /*******************************/ + + /* Create a file creation property list with a non-default user block size */ + fcpl_id = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl_id, H5I_INVALID_HID, "H5Pcreate"); + + ret = H5Pset_userblock(fcpl_id, (hsize_t)2048); + CHECK(ret, FAIL, "H5Pset_userblock"); + + /* Create file with non-default user block */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl_id, fapl_id); + CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); + + /* Release file creation property list */ + ret = H5Pclose(fcpl_id); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Verify that the file is an HDF5 file */ + is_hdf5 = H5Fis_hdf5(sb_filename); + VERIFY(is_hdf5, TRUE, "H5Fis_hdf5"); + + /***************************/ + /* Non-empty non-HDF5 file */ + /***************************/ + + /* Create non-HDF5 file. Use the calculated superblock + * filename to avoid the format strings that will make + * open(2) sad. + */ + fd = HDopen(sb_filename, O_RDWR | O_CREAT | O_TRUNC, H5_POSIX_CREATE_MODE_RW); + CHECK(fd, (-1), "HDopen"); + + /* Initialize information to write */ + for (u = 0; u < 1024; u++) + buf[u] = (unsigned char)u; + + /* Write some information */ + nbytes = HDwrite(fd, buf, (size_t)1024); + VERIFY(nbytes, 1024, "HDwrite"); + + /* Close the file */ + posix_ret = HDclose(fd); + CHECK(posix_ret, (-1), "HDclose"); + + /* Verify that the file is not an HDF5 file */ + is_hdf5 = H5Fis_hdf5(sb_filename); + VERIFY(is_hdf5, FALSE, "H5Fis_hdf5"); + + /* Clean up files */ +#if 0 + h5_delete_test_file(filename, fapl_id); +#endif + H5Fdelete(filename, fapl_id); + + /* Close property list */ + ret = H5Pclose(fapl_id); + CHECK(ret, FAIL, "H5Pclose"); + +} /* end test_file_ishdf5() */ +#endif /* H5_NO_DEPRECATED_SYMBOLS */ +#endif + +/**************************************************************** +** +** test_file_delete(): tests H5Fdelete for all VFDs +** +*****************************************************************/ +#define FILE_DELETE "test_file_delete.h5" +#define FILE_DELETE_NOT_HDF5 "test_file_delete_not_hdf5" +static void +test_file_delete(hid_t fapl_id) +{ + hid_t fid = H5I_INVALID_HID; /* File to be deleted */ + char filename[FILENAME_LEN]; /* Filename to use */ + htri_t is_hdf5; /* Whether a file is an HDF5 file */ +#if 0 + int fd; /* POSIX file descriptor */ + int iret; +#endif + herr_t ret; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Deletion of HDF5 Files\n")); + + /*************/ + /* HDF5 FILE */ + /*************/ + + /* Get fapl-dependent filename */ + h5_fixname(FILE_DELETE, fapl_id, filename, sizeof(filename)); + + /* Create a file */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); + + /* Close file */ + ret = H5Fclose(fid); + VERIFY(ret, SUCCEED, "H5Fclose"); + + /* Verify that the file is an HDF5 file */ + is_hdf5 = H5Fis_accessible(filename, fapl_id); + VERIFY(is_hdf5, TRUE, "H5Fis_accessible"); + + /* Delete the file */ + ret = H5Fdelete(filename, fapl_id); + VERIFY(ret, SUCCEED, "H5Fdelete"); + + /* Verify that the file is NO LONGER an HDF5 file */ + /* This should fail since there is no file */ + H5E_BEGIN_TRY + { + is_hdf5 = H5Fis_accessible(filename, fapl_id); + } + H5E_END_TRY; + VERIFY(is_hdf5, FAIL, "H5Fis_accessible"); + +#if 0 + /* Just in case deletion fails - silent on errors */ + h5_delete_test_file(FILE_DELETE, fapl_id); + + /*****************/ + /* NON-HDF5 FILE */ + /*****************/ + + /* Get fapl-dependent filename */ + h5_fixname(FILE_DELETE_NOT_HDF5, fapl_id, filename, sizeof(filename)); + + /* Create a non-HDF5 file */ + fd = HDopen(filename, O_RDWR | O_CREAT | O_TRUNC, H5_POSIX_CREATE_MODE_RW); + CHECK_I(fd, "HDopen"); + + /* Close the file */ + ret = HDclose(fd); + VERIFY(ret, 0, "HDclose"); + + /* Verify that the file is not an HDF5 file */ + /* Note that you can get a FAIL result when h5_fixname() + * perturbs the filename as a file with that exact name + * may not have been created since we created it with + * open(2) and not the library. + */ + H5E_BEGIN_TRY + { + is_hdf5 = H5Fis_accessible(filename, fapl_id); + } + H5E_END_TRY; + CHECK(is_hdf5, TRUE, "H5Fis_accessible"); + + /* Try to delete it (should fail) */ + H5E_BEGIN_TRY + { + ret = H5Fdelete(filename, fapl_id); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Fdelete"); + + /* Delete the file */ + iret = H5Fdelete(filename, H5P_DEFAULT); + VERIFY(iret, 0, "H5Fdelete"); +#endif +} /* end test_file_delete() */ + +/**************************************************************** +** +** test_file_open_dot(): low-level file test routine. +** This test checks whether opening objects with "." for a name +** works correctly in various situations. +** +*****************************************************************/ +static void +test_file_open_dot(void) +{ + hid_t fid; /* File ID */ + hid_t gid, gid2; /* Group IDs */ + hid_t did; /* Dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t tid, tid2; /* Datatype IDs */ + herr_t ret; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing opening objects with \".\" for a name\n")); + + /* Create a new HDF5 file to work with */ + fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create a group in the HDF5 file */ + gid = H5Gcreate2(fid, GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gcreate2"); + + /* Create a dataspace for creating datasets */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create a dataset with no name using the file ID */ + H5E_BEGIN_TRY + { + did = H5Dcreate2(fid, ".", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(did, FAIL, "H5Dcreate2"); + + /* Create a dataset with no name using the group ID */ + H5E_BEGIN_TRY + { + did = H5Dcreate2(gid, ".", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(did, FAIL, "H5Dcreate2"); + + /* Open a dataset with no name using the file ID */ + H5E_BEGIN_TRY + { + did = H5Dopen2(fid, ".", H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(did, FAIL, "H5Dopen2"); + + /* Open a dataset with no name using the group ID */ + H5E_BEGIN_TRY + { + did = H5Dopen2(gid, ".", H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(did, FAIL, "H5Dopen2"); + + /* Make a copy of a datatype to use for creating a named datatype */ + tid = H5Tcopy(H5T_NATIVE_INT); + CHECK(tid, FAIL, "H5Tcopy"); + + /* Create a named datatype with no name using the file ID */ + H5E_BEGIN_TRY + { + ret = H5Tcommit2(fid, ".", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Tcommit2"); + + /* Create a named datatype with no name using the group ID */ + H5E_BEGIN_TRY + { + ret = H5Tcommit2(gid, ".", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Tcommit2"); + + /* Open a named datatype with no name using the file ID */ + H5E_BEGIN_TRY + { + tid2 = H5Topen2(fid, ".", H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(tid2, FAIL, "H5Topen2"); + + /* Open a named datatype with no name using the group ID */ + H5E_BEGIN_TRY + { + tid2 = H5Topen2(gid, ".", H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(tid2, FAIL, "H5Topen2"); + + /* Create a group with no name using the file ID */ + H5E_BEGIN_TRY + { + gid2 = H5Gcreate2(fid, ".", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(gid2, FAIL, "H5Gcreate2"); + + /* Create a group with no name using the group ID */ + H5E_BEGIN_TRY + { + gid2 = H5Gcreate2(gid, ".", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(gid2, FAIL, "H5Gcreate2"); + + /* Open a group with no name using the file ID (should open the root group) */ + gid2 = H5Gopen2(fid, ".", H5P_DEFAULT); + CHECK(gid2, FAIL, "H5Gopen2"); + + ret = H5Gclose(gid2); + CHECK(ret, FAIL, "H5Gclose"); + + /* Open a group with no name using the group ID (should open the group again) */ + gid2 = H5Gopen2(gid, ".", H5P_DEFAULT); + CHECK(gid2, FAIL, "H5Gopen2"); + + ret = H5Gclose(gid2); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close everything */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + +} /* end test_file_open_dot() */ + +/**************************************************************** +** +** test_file_open_overlap(): low-level file test routine. +** This test checks whether opening files in an overlapping way +** (as opposed to a nested manner) works correctly. +** +*****************************************************************/ +static void +test_file_open_overlap(void) +{ + hid_t fid1, fid2; + hid_t did1, did2; + hid_t gid; + hid_t sid; + ssize_t nobjs; /* # of open objects */ + unsigned intent; +#if 0 + unsigned long fileno1, fileno2; /* File number */ +#endif + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing opening overlapping file opens\n")); + + /* Create file */ + fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Open file also */ + fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid2, FAIL, "H5Fopen"); + + /* Check the intent */ + ret = H5Fget_intent(fid1, &intent); + CHECK(ret, FAIL, "H5Fget_intent"); + VERIFY(intent, H5F_ACC_RDWR, "H5Fget_intent"); +#if 0 + /* Check the file numbers */ + fileno1 = 0; + ret = H5Fget_fileno(fid1, &fileno1); + CHECK(ret, FAIL, "H5Fget_fileno"); + fileno2 = 0; + ret = H5Fget_fileno(fid2, &fileno2); + CHECK(ret, FAIL, "H5Fget_fileno"); + VERIFY(fileno1, fileno2, "H5Fget_fileno"); + + /* Check that a file number pointer of NULL is ignored */ + ret = H5Fget_fileno(fid1, NULL); + CHECK(ret, FAIL, "H5Fget_fileno"); +#endif + + /* Create a group in file */ + gid = H5Gcreate2(fid1, GROUP1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gcreate2"); + + /* Create dataspace for dataset */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create dataset in group w/first file ID */ + did1 = H5Dcreate2(gid, DSET1, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(did1, FAIL, "H5Dcreate2"); +#ifndef WRONG_DATATYPE_OBJ_COUNT + /* Check number of objects opened in first file */ + nobjs = H5Fget_obj_count(fid1, H5F_OBJ_LOCAL | H5F_OBJ_ALL); + VERIFY(nobjs, 3, "H5Fget_obj_count"); /* 3 == file, dataset & group */ +#endif + /* Close dataset */ + ret = H5Dclose(did1); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close group */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close first file ID */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Create dataset with second file ID */ + did2 = H5Dcreate2(fid2, DSET2, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(did2, FAIL, "H5Dcreate2"); + + /* Check number of objects opened in first file */ + nobjs = H5Fget_obj_count(fid2, H5F_OBJ_ALL); + VERIFY(nobjs, 2, "H5Fget_obj_count"); /* 3 == file & dataset */ + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close second dataset */ + ret = H5Dclose(did2); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close second file */ + ret = H5Fclose(fid2); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_file_open_overlap() */ + +/**************************************************************** +** +** test_file_getname(): low-level file test routine. +** This test checks whether H5Fget_name works correctly. +** +*****************************************************************/ +static void +test_file_getname(void) +{ + /* Compound datatype */ + typedef struct s1_t { + unsigned int a; + float b; + } s1_t; + + hid_t file_id; + hid_t group_id; + hid_t dataset_id; + hid_t space_id; + hid_t type_id; + hid_t attr_id; + hsize_t dims[TESTA_RANK] = {TESTA_NX, TESTA_NY}; + char name[TESTA_NAME_BUF_SIZE]; + ssize_t name_len; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing H5Fget_name() functionality\n")); + + /* Create a new file_id using default properties. */ + file_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file_id, FAIL, "H5Fcreate"); + + /* Get and verify file name */ + name_len = H5Fget_name(file_id, name, (size_t)TESTA_NAME_BUF_SIZE); + CHECK(name_len, FAIL, "H5Fget_name"); + VERIFY_STR(name, FILE1, "H5Fget_name"); + + /* Create a group in the root group */ + group_id = H5Gcreate2(file_id, TESTA_GROUPNAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(group_id, FAIL, "H5Gcreate2"); + + /* Get and verify file name */ + name_len = H5Fget_name(group_id, name, (size_t)TESTA_NAME_BUF_SIZE); + CHECK(name_len, FAIL, "H5Fget_name"); + VERIFY_STR(name, FILE1, "H5Fget_name"); + + /* Create the data space */ + space_id = H5Screate_simple(TESTA_RANK, dims, NULL); + CHECK(space_id, FAIL, "H5Screate_simple"); + + /* Try get file name from data space. Supposed to fail because + * it's illegal operation. */ + H5E_BEGIN_TRY + { + name_len = H5Fget_name(space_id, name, (size_t)TESTA_NAME_BUF_SIZE); + } + H5E_END_TRY; + VERIFY(name_len, FAIL, "H5Fget_name"); + + /* Create a new dataset */ + dataset_id = + H5Dcreate2(file_id, TESTA_DSETNAME, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset_id, FAIL, "H5Dcreate2"); + + /* Get and verify file name */ + name_len = H5Fget_name(dataset_id, name, (size_t)TESTA_NAME_BUF_SIZE); + CHECK(name_len, FAIL, "H5Fget_name"); + VERIFY_STR(name, FILE1, "H5Fget_name"); + + /* Create an attribute for the dataset */ + attr_id = H5Acreate2(dataset_id, TESTA_ATTRNAME, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr_id, FAIL, "H5Acreate2"); + + /* Get and verify file name */ + name_len = H5Fget_name(attr_id, name, (size_t)TESTA_NAME_BUF_SIZE); + CHECK(name_len, FAIL, "H5Fget_name"); + VERIFY_STR(name, FILE1, "H5Fget_name"); + + /* Create a compound datatype */ + type_id = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); + CHECK(type_id, FAIL, "H5Tcreate"); + + /* Insert fields */ + ret = H5Tinsert(type_id, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(type_id, "b", HOFFSET(s1_t, b), H5T_NATIVE_FLOAT); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Save it on file */ + ret = H5Tcommit2(file_id, TESTA_DTYPENAME, type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + /* Get and verify file name */ + name_len = H5Fget_name(type_id, name, (size_t)TESTA_NAME_BUF_SIZE); + CHECK(name_len, FAIL, "H5Fget_name"); + VERIFY_STR(name, FILE1, "H5Fget_name"); + + /* Close things down */ + ret = H5Tclose(type_id); + CHECK(ret, FAIL, "H5Tclose"); + + ret = H5Aclose(attr_id); + CHECK(ret, FAIL, "H5Aclose"); + + ret = H5Dclose(dataset_id); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Sclose(space_id); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Gclose(group_id); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Fclose(file_id); + CHECK(ret, FAIL, "H5Fclose"); + +} /* end test_file_getname() */ + +/**************************************************************** +** +** test_file_double_root_open(): low-level file test routine. +** This test checks whether opening the root group from two +** different files works correctly. +** +*****************************************************************/ +static void +test_file_double_root_open(void) +{ + hid_t file1_id, file2_id; + hid_t grp1_id, grp2_id; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing double root group open\n")); + + file1_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file1_id, FAIL, "H5Fcreate"); + file2_id = H5Fopen(FILE1, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(file2_id, FAIL, "H5Fopen"); + + grp1_id = H5Gopen2(file1_id, "/", H5P_DEFAULT); + CHECK(grp1_id, FAIL, "H5Gopen2"); + grp2_id = H5Gopen2(file2_id, "/", H5P_DEFAULT); + CHECK(grp2_id, FAIL, "H5Gopen2"); + + /* Note "asymmetric" close order */ + ret = H5Gclose(grp1_id); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Gclose(grp2_id); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Fclose(file1_id); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Fclose(file2_id); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_file_double_root_open() */ + +/**************************************************************** +** +** test_file_double_group_open(): low-level file test routine. +** This test checks whether opening the same group from two +** different files works correctly. +** +*****************************************************************/ +static void +test_file_double_group_open(void) +{ + hid_t file1_id, file2_id; + hid_t grp1_id, grp2_id; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing double non-root group open\n")); + + file1_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file1_id, FAIL, "H5Fcreate"); + file2_id = H5Fopen(FILE1, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(file2_id, FAIL, "H5Fopen"); + + grp1_id = H5Gcreate2(file1_id, GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(grp1_id, FAIL, "H5Gcreate2"); + grp2_id = H5Gopen2(file2_id, GRP_NAME, H5P_DEFAULT); + CHECK(grp2_id, FAIL, "H5Gopen2"); + + /* Note "asymmetric" close order */ + ret = H5Gclose(grp1_id); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Gclose(grp2_id); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Fclose(file1_id); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Fclose(file2_id); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_file_double_group_open() */ + +/**************************************************************** +** +** test_file_double_dataset_open(): low-level file test routine. +** This test checks whether opening the same dataset from two +** different files works correctly. +** +*****************************************************************/ +static void +test_file_double_dataset_open(void) +{ + hid_t file1_id, file2_id; + hid_t dset1_id, dset2_id; + hid_t space_id; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing double dataset open\n")); + + file1_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file1_id, FAIL, "H5Fcreate"); + file2_id = H5Fopen(FILE1, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(file2_id, FAIL, "H5Fopen"); + + /* Create dataspace for dataset */ + space_id = H5Screate(H5S_SCALAR); + CHECK(space_id, FAIL, "H5Screate"); + + dset1_id = + H5Dcreate2(file1_id, DSET_NAME, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset1_id, FAIL, "H5Dcreate2"); + dset2_id = H5Dopen2(file2_id, DSET_NAME, H5P_DEFAULT); + CHECK(dset2_id, FAIL, "H5Dopen2"); + + /* Close "supporting" dataspace */ + ret = H5Sclose(space_id); + CHECK(ret, FAIL, "H5Sclose"); + + /* Note "asymmetric" close order */ + ret = H5Dclose(dset1_id); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset2_id); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Fclose(file1_id); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Fclose(file2_id); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_file_double_dataset_open() */ + +/**************************************************************** +** +** test_file_double_file_dataset_open(): +** This test checks multi-opens of files & datasets: +** It simulates the multi-thread test program from DLS +** which exposes the file pointer segmentation fault failure. +** NOTE: The order on when the files and datasets are open/close +** is important. +** +*****************************************************************/ +static void +test_file_double_file_dataset_open(hbool_t new_format) +{ + hid_t fapl = -1; /* File access property list */ + hid_t dcpl = -1; /* Dataset creation property list */ + hid_t fid1 = -1, fid2 = -1; /* File IDs */ + hid_t did1 = -1, did2 = -1; /* Dataset IDs */ + hid_t sid1 = -1, sid2 = -1; /* Dataspace IDs */ + hid_t tid1 = -1, tid2 = -1; /* Datatype IDs */ + hsize_t dims[1] = {5}, dims2[2] = {1, 4}; /* Dimension sizes */ + hsize_t e_ext_dims[1] = {7}; /* Expanded dimension sizes */ + hsize_t s_ext_dims[1] = {3}; /* Shrunk dimension sizes */ + hsize_t max_dims0[1] = {8}; /* Maximum dimension sizes */ + hsize_t max_dims1[1] = {H5S_UNLIMITED}; /* Maximum dimesion sizes for extensible array index */ + hsize_t max_dims2[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* Maximum dimension sizes for v2 B-tree index */ + hsize_t chunks[1] = {2}, chunks2[2] = {4, 5}; /* Chunk dimension sizes */ +#if 0 + hsize_t size; /* File size */ +#endif + char filename[FILENAME_LEN]; /* Filename to use */ + const char *data[] = {"String 1", "String 2", "String 3", "String 4", "String 5"}; /* Input Data */ + const char *e_data[] = {"String 1", "String 2", "String 3", "String 4", + "String 5", "String 6", "String 7"}; /* Input Data */ + char *buffer[5]; /* Output buffer */ + int wbuf[4] = {1, 2, 3, 4}; /* Input data */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing double file and dataset open/close\n")); + + /* Setting up test file */ + fapl = h5_fileaccess(); + CHECK(fapl, FAIL, "H5Pcreate"); + if (new_format) { + ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); + CHECK(ret, FAIL, "H5Pset_libver_bounds"); + } /* end if */ + h5_fixname(FILE1, fapl, filename, sizeof filename); + + /* Create the test file */ + fid1 = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create a chunked dataset with fixed array indexing */ + sid1 = H5Screate_simple(1, dims, max_dims0); + CHECK(sid1, FAIL, "H5Screate_simple"); + tid1 = H5Tcopy(H5T_C_S1); + CHECK(tid1, FAIL, "H5Tcopy"); + ret = H5Tset_size(tid1, H5T_VARIABLE); + CHECK(ret, FAIL, "H5Tset_size"); + + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + ret = H5Pset_chunk(dcpl, 1, chunks); + CHECK(ret, FAIL, "H5Pset_chunk"); + + did1 = H5Dcreate2(fid1, "dset_fa", tid1, sid1, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(did1, FAIL, "H5Dcreate2"); + + /* Closing */ + ret = H5Dclose(did1); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Dclose"); + + /* Create a chunked dataset with extensible array indexing */ + sid1 = H5Screate_simple(1, dims, max_dims1); + CHECK(sid1, FAIL, "H5Screate_simple"); + tid1 = H5Tcopy(H5T_C_S1); + CHECK(tid1, FAIL, "H5Tcopy"); + ret = H5Tset_size(tid1, H5T_VARIABLE); + CHECK(ret, FAIL, "H5Tset_size"); + + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + ret = H5Pset_chunk(dcpl, 1, chunks); + CHECK(ret, FAIL, "H5Pset_chunk"); + + did1 = H5Dcreate2(fid1, "dset_ea", tid1, sid1, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(did1, FAIL, "H5Dcreate2"); + + /* Write to the dataset */ + ret = H5Dwrite(did1, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, data); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Closing */ + /* (Leave sid1 open for later use) */ + ret = H5Dclose(did1); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Dclose"); + + /* Create a chunked dataset with v2 btree indexing */ + sid2 = H5Screate_simple(2, dims2, max_dims2); + CHECK(sid2, FAIL, "H5Screate_simple"); + + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + ret = H5Pset_chunk(dcpl, 2, chunks2); + CHECK(ret, FAIL, "H5Pset_chunk"); + + did2 = H5Dcreate2(fid1, "dset_bt2", H5T_NATIVE_INT, sid2, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(did2, FAIL, "H5Dcreate2"); + + /* Write to the dataset */ + ret = H5Dwrite(did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Closing */ + ret = H5Dclose(did2); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* + * Scenario 1 + */ + + /* First file open */ + fid1 = H5Fopen(filename, H5F_ACC_RDWR, fapl); + CHECK(fid1, FAIL, "H5Fopen"); + + /* First file's dataset open */ + did1 = H5Dopen2(fid1, "/dset_fa", H5P_DEFAULT); + CHECK(did1, FAIL, "H5Dopen2"); + + tid1 = H5Tcopy(did1); + CHECK(tid1, FAIL, "H5Tcopy"); + + /* First file's dataset write */ + ret = H5Dwrite(did1, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, data); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Second file open */ + fid2 = H5Fopen(filename, H5F_ACC_RDWR, fapl); + CHECK(fid2, FAIL, "H5Fopen"); + + /* Second file's dataset open */ + did2 = H5Dopen2(fid2, "/dset_fa", H5P_DEFAULT); + CHECK(did2, FAIL, "H5Dopen2"); + + tid2 = H5Tcopy(did2); + CHECK(tid2, FAIL, "H5Tcopy"); + + /* First file's dataset close */ + ret = H5Dclose(did1); + CHECK(ret, FAIL, "H5Dclose"); + + /* First file close */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Second file's dataset write */ + ret = H5Dwrite(did2, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, data); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Second file's dataset close */ + ret = H5Dclose(did2); + CHECK(ret, FAIL, "H5Dclose"); + + /* Second file close */ + ret = H5Fclose(fid2); + CHECK(ret, FAIL, "H5Fclose"); + + /* Closing */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* + * Scenario 2 + */ + + /* First file open */ + fid1 = H5Fopen(filename, H5F_ACC_RDONLY, fapl); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Second file open */ + fid2 = H5Fopen(filename, H5F_ACC_RDONLY, fapl); + CHECK(fid2, FAIL, "H5Fopen"); + + /* Second file's dataset open */ + did2 = H5Dopen2(fid2, "/dset_ea", H5P_DEFAULT); + CHECK(did2, FAIL, "H5Dopen2"); + + tid2 = H5Tcopy(did2); + CHECK(tid2, FAIL, "H5Tcopy"); + + /* First file's dataset open */ + did1 = H5Dopen2(fid1, "/dset_ea", H5P_DEFAULT); + CHECK(did1, FAIL, "H5Dopen2"); + + tid1 = H5Tcopy(did1); + CHECK(tid1, FAIL, "H5Tcopy"); + + /* Second file's dataset read */ + HDmemset(buffer, 0, sizeof(char *) * 5); + ret = H5Dread(did2, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, buffer); + CHECK(ret, FAIL, "H5Dread"); + ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, buffer); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Second file's dataset close */ + ret = H5Dclose(did2); + CHECK(ret, FAIL, "H5Dclose"); + + /* Second file close */ + ret = H5Fclose(fid2); + CHECK(ret, FAIL, "H5Fclose"); + + /* First file's dataset read */ + HDmemset(buffer, 0, sizeof(char *) * 5); + ret = H5Dread(did1, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, buffer); + CHECK(ret, FAIL, "H5Dread"); + ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, buffer); + CHECK(ret, FAIL, "H5Treclaim"); + + /* First file's dataset close */ + ret = H5Dclose(did1); + CHECK(ret, FAIL, "H5Dclose"); + + /* First file close */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Closing */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* + * Scenario 3 + */ + + /* First file open */ + fid1 = H5Fopen(filename, H5F_ACC_RDONLY, fapl); + CHECK(fid1, FAIL, "H5Fopen"); + + /* First file's dataset open */ + did1 = H5Dopen2(fid1, "/dset_bt2", H5P_DEFAULT); + CHECK(did1, FAIL, "H5Dopen2"); +#if 0 + /* First file's get storage size */ + size = H5Dget_storage_size(did1); + CHECK(size, 0, "H5Dget_storage_size"); +#endif + /* Second file open */ + fid2 = H5Fopen(filename, H5F_ACC_RDONLY, fapl); + CHECK(fid2, FAIL, "H5Fopen"); + + /* Second file's dataset open */ + did2 = H5Dopen2(fid2, "/dset_bt2", H5P_DEFAULT); + CHECK(did2, FAIL, "H5Dopen2"); + + /* First file's dataset close */ + ret = H5Dclose(did1); + CHECK(ret, FAIL, "H5Dclose"); + + /* First file close */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + /* Second file's get storage size */ + size = H5Dget_storage_size(did2); + CHECK(size, 0, "H5Dget_storage_size"); +#endif + /* Second file's dataset close */ + ret = H5Dclose(did2); + CHECK(ret, FAIL, "H5Dclose"); + + /* Second file close */ + ret = H5Fclose(fid2); + CHECK(ret, FAIL, "H5Fclose"); + + /* + * Scenario 4 + * --trigger H5AC_protect: Assertion `f->shared' failed + * from second call to + * H5Dset_extent->...H5D__earray_idx_remove->H5EA_get...H5EA__iblock_protect...H5AC_protect + */ + /* First file open */ + fid1 = H5Fopen(filename, H5F_ACC_RDWR, fapl); + CHECK(fid1, FAIL, "H5Fopen"); + + /* First file's dataset open */ + did1 = H5Dopen2(fid1, "/dset_ea", H5P_DEFAULT); + CHECK(did1, FAIL, "H5Dopen2"); + + tid1 = H5Tcopy(did1); + CHECK(tid1, FAIL, "H5Tcopy"); + + /* Extend the dataset */ + ret = H5Dset_extent(did1, e_ext_dims); + CHECK(ret, FAIL, "H5Dset_extent"); + + /* Write to the dataset */ + ret = H5Dwrite(did1, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, e_data); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Second file open */ + fid2 = H5Fopen(filename, H5F_ACC_RDWR, fapl); + CHECK(fid2, FAIL, "H5Fopen"); + + /* Second file's dataset open */ + did2 = H5Dopen2(fid2, "/dset_ea", H5P_DEFAULT); + CHECK(did2, FAIL, "H5Dopen2"); + + /* First file's dataset close */ + ret = H5Dclose(did1); + CHECK(ret, FAIL, "H5Dclose"); + + /* First file close */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Shrink the dataset */ + ret = H5Dset_extent(did2, s_ext_dims); + CHECK(ret, FAIL, "H5Dset_extent"); + + /* Second file's dataset close */ + ret = H5Dclose(did2); + CHECK(ret, FAIL, "H5Dclose"); + + /* Second file close */ + ret = H5Fclose(fid2); + CHECK(ret, FAIL, "H5Fclose"); + + /* Close the data type */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close FAPL */ + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); +} /* end test_file_double_dataset_open() */ + +/**************************************************************** +** +** test_file_double_datatype_open(): low-level file test routine. +** This test checks whether opening the same named datatype from two +** different files works correctly. +** +*****************************************************************/ +static void +test_file_double_datatype_open(void) +{ + hid_t file1_id, file2_id; + hid_t type1_id, type2_id; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing double datatype open\n")); + + file1_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file1_id, FAIL, "H5Fcreate"); + file2_id = H5Fopen(FILE1, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(file2_id, FAIL, "H5Fopen"); + + type1_id = H5Tcopy(H5T_NATIVE_INT); + CHECK(type1_id, FAIL, "H5Tcopy"); + ret = H5Tcommit2(file1_id, TYPE_NAME, type1_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + type2_id = H5Topen2(file2_id, TYPE_NAME, H5P_DEFAULT); + CHECK(type2_id, FAIL, "H5Topen2"); + + /* Note "asymmetric" close order */ + ret = H5Tclose(type1_id); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Tclose(type2_id); + CHECK(ret, FAIL, "H5Tclose"); + + ret = H5Fclose(file1_id); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Fclose(file2_id); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_file_double_dataset_open() */ + +/**************************************************************** +** +** test_userblock_file_size(): low-level file test routine. +** This test checks that the presence of a userblock +** affects the file size in the expected manner, and that +** the filesize is not changed by reopening the file. It +** creates two files which are identical except that one +** contains a userblock, and verifies that their file sizes +** differ exactly by the userblock size. +** +*****************************************************************/ +#if 0 +static void +test_userblock_file_size(const char *env_h5_drvr) +{ + hid_t file1_id, file2_id; + hid_t group1_id, group2_id; + hid_t dset1_id, dset2_id; + hid_t space_id; + hid_t fcpl2_id; + hsize_t dims[2] = {3, 4}; +#if 0 + hsize_t filesize1, filesize2, filesize; + unsigned long fileno1, fileno2; /* File number */ +#endif + herr_t ret; /* Generic return value */ + + /* Don't run with multi/split, family or direct drivers */ + if (!HDstrcmp(env_h5_drvr, "multi") || !HDstrcmp(env_h5_drvr, "split") || + !HDstrcmp(env_h5_drvr, "family") || !HDstrcmp(env_h5_drvr, "direct")) + return; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing file size with user block\n")); + + /* Create property list with userblock size set */ + fcpl2_id = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl2_id, FAIL, "H5Pcreate"); + ret = H5Pset_userblock(fcpl2_id, USERBLOCK_SIZE); + CHECK(ret, FAIL, "H5Pset_userblock"); + + /* Create files. Only file2 with have a userblock. */ + file1_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file1_id, FAIL, "H5Fcreate"); + file2_id = H5Fcreate(FILE2, H5F_ACC_TRUNC, fcpl2_id, H5P_DEFAULT); + CHECK(file2_id, FAIL, "H5Fcreate"); +#if 0 + /* Check the file numbers */ + fileno1 = 0; + ret = H5Fget_fileno(file1_id, &fileno1); + CHECK(ret, FAIL, "H5Fget_fileno"); + fileno2 = 0; + ret = H5Fget_fileno(file2_id, &fileno2); + CHECK(ret, FAIL, "H5Fget_fileno"); + CHECK(fileno1, fileno2, "H5Fget_fileno"); +#endif + /* Create groups */ + group1_id = H5Gcreate2(file1_id, GROUP1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(group1_id, FAIL, "H5Gcreate2"); + group2_id = H5Gcreate2(file2_id, GROUP1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(group2_id, FAIL, "H5Gcreate2"); + + /* Create dataspace */ + space_id = H5Screate_simple(2, dims, NULL); + CHECK(space_id, FAIL, "H5Screate_simple"); + + /* Create datasets */ + dset1_id = H5Dcreate2(file1_id, DSET2, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset1_id, FAIL, "H5Dcreate2"); + dset2_id = H5Dcreate2(file2_id, DSET2, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset2_id, FAIL, "H5Dcreate2"); + + /* Close IDs */ + ret = H5Dclose(dset1_id); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(dset2_id); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Sclose(space_id); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Gclose(group1_id); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Gclose(group2_id); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Pclose(fcpl2_id); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close files */ + ret = H5Fclose(file1_id); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Fclose(file2_id); + CHECK(ret, FAIL, "H5Fclose"); + + /* Reopen files */ + file1_id = H5Fopen(FILE1, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(file1_id, FAIL, "H5Fopen"); + file2_id = H5Fopen(FILE2, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(file2_id, FAIL, "H5Fopen"); +#if 0 + /* Check file sizes */ + ret = H5Fget_filesize(file1_id, &filesize1); + CHECK(ret, FAIL, "H5Fget_filesize"); + ret = H5Fget_filesize(file2_id, &filesize2); + CHECK(ret, FAIL, "H5Fget_filesize"); + + /* Verify that the file sizes differ exactly by the userblock size */ + VERIFY_TYPE((unsigned long long)filesize2, (unsigned long long)(filesize1 + USERBLOCK_SIZE), + unsigned long long, "%llu", "H5Fget_filesize"); +#endif + /* Close files */ + ret = H5Fclose(file1_id); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Fclose(file2_id); + CHECK(ret, FAIL, "H5Fclose"); + + /* Reopen files */ + file1_id = H5Fopen(FILE1, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(file1_id, FAIL, "H5Fopen"); + file2_id = H5Fopen(FILE2, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(file2_id, FAIL, "H5Fopen"); +#if 0 + /* Verify file sizes did not change */ + ret = H5Fget_filesize(file1_id, &filesize); + CHECK(ret, FAIL, "H5Fget_filesize"); + VERIFY(filesize, filesize1, "H5Fget_filesize"); + ret = H5Fget_filesize(file2_id, &filesize); + CHECK(ret, FAIL, "H5Fget_filesize"); + VERIFY(filesize, filesize2, "H5Fget_filesize"); +#endif + /* Close files */ + ret = H5Fclose(file1_id); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Fclose(file2_id); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_userblock_file_size() */ +#endif + +/**************************************************************** +** +** test_cached_stab_info(): low-level file test routine. +** This test checks that new files are created with cached +** symbol table information in the superblock (when using +** the old format). This is necessary to ensure backwards +** compatibility with versions from 1.3.0 to 1.6.3. +** +*****************************************************************/ +#if 0 +static void +test_cached_stab_info(void) +{ + hid_t file_id; + hid_t group_id; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing cached symbol table information\n")); + + /* Create file */ + file_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file_id, FAIL, "H5Fcreate"); + + /* Create group */ + group_id = H5Gcreate2(file_id, GROUP1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(group_id, FAIL, "H5Gcreate2"); + + /* Close file and group */ + ret = H5Gclose(group_id); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Fclose(file_id); + CHECK(ret, FAIL, "H5Fclose"); + + /* Reopen file */ + file_id = H5Fopen(FILE1, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(file_id, FAIL, "H5Fopen"); +#if 0 + /* Verify the cached symbol table information */ + ret = H5F__check_cached_stab_test(file_id); + CHECK(ret, FAIL, "H5F__check_cached_stab_test"); +#endif + /* Close file */ + ret = H5Fclose(file_id); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_cached_stab_info() */ +#endif + +#if 0 +/* + * To calculate the checksum for a file. + * This is a helper routine for test_rw_noupdate(). + */ +static int +cal_chksum(const char *file, uint32_t *chksum) +{ + int curr_num_errs = nerrors; /* Retrieve the current # of errors */ + int fdes = -1; /* File descriptor */ + void *file_data = NULL; /* Copy of file data */ + ssize_t bytes_read; /* # of bytes read */ + h5_stat_t sb; /* Stat buffer for file */ + herr_t ret; /* Generic return value */ + + /* Open the file */ + fdes = HDopen(file, O_RDONLY); + CHECK(fdes, FAIL, "HDopen"); + + /* Retrieve the file's size */ + ret = HDfstat(fdes, &sb); + CHECK(fdes, FAIL, "HDfstat"); + + /* Allocate space for the file data */ + file_data = HDmalloc((size_t)sb.st_size); + CHECK_PTR(file_data, "HDmalloc"); + + if (file_data) { + /* Read file's data into memory */ + bytes_read = HDread(fdes, file_data, (size_t)sb.st_size); + CHECK(bytes_read == sb.st_size, FALSE, "HDmalloc"); + + /* Calculate checksum */ + *chksum = H5_checksum_lookup3(file_data, sizeof(file_data), 0); + + /* Free memory */ + HDfree(file_data); + } + + /* Close the file */ + ret = HDclose(fdes); + CHECK(ret, FAIL, "HDclose"); + + return ((nerrors == curr_num_errs) ? 0 : -1); +} /* cal_chksum() */ +#endif + +/**************************************************************** +** +** test_rw_noupdate(): low-level file test routine. +** This test checks to ensure that opening and closing a file +** with read/write permissions does not write anything to the +** file if the file does not change. +** Due to the implementation of file locking (status_flags in +** the superblock is used), this test is changed to use checksum +** instead of timestamp to verify the file is not changed. +** +** Programmer: Vailin Choi; July 2013 +** +*****************************************************************/ +#if 0 +static void +test_rw_noupdate(void) +{ + herr_t ret; /* Generic return value */ + hid_t fid; /* File ID */ + uint32_t chksum1, chksum2; /* Checksum value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing to verify that nothing is written if nothing is changed.\n")); + + /* Create and Close a HDF5 File */ + fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Calculate checksum for the file */ + ret = cal_chksum(FILE1, &chksum1); + CHECK(ret, FAIL, "cal_chksum"); + + /* Open and close File With Read/Write Permission */ + fid = H5Fopen(FILE1, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Calculate checksum for the file */ + ret = cal_chksum(FILE1, &chksum2); + CHECK(ret, FAIL, "cal_chksum"); + + /* The two checksums are the same, i.e. the file is not changed */ + VERIFY(chksum1, chksum2, "Checksum"); + +} /* end test_rw_noupdate() */ +#endif + +/**************************************************************** +** +** test_userblock_alignment_helper1(): helper routine for +** test_userblock_alignment() test, to handle common testing +** +** Programmer: Quincey Koziol +** Septmber 10, 2009 +** +*****************************************************************/ +#if 0 +static int +test_userblock_alignment_helper1(hid_t fcpl, hid_t fapl) +{ + hid_t fid; /* File ID */ + int curr_num_errs = nerrors(); /* Retrieve the current # of errors */ + herr_t ret; /* Generic return value */ + + /* Create a file with FAPL & FCPL */ + fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Only proceed further if file ID is OK */ + if (fid > 0) { + hid_t gid; /* Group ID */ + hid_t sid; /* Dataspace ID */ + hid_t did; /* Dataset ID */ + int val = 2; /* Dataset value */ + + /* Create a group */ + gid = H5Gcreate2(fid, "group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gcreate2"); + + /* Create a dataset */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + did = H5Dcreate2(gid, "dataset", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Write value to dataset */ + ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &val); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close group */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + } /* end if */ + + return ((nerrors == curr_num_errs) ? 0 : -1); +} /* end test_userblock_alignment_helper1() */ + +/**************************************************************** +** +** test_userblock_alignment_helper2(): helper routine for +** test_userblock_alignment() test, to handle common testing +** +** Programmer: Quincey Koziol +** Septmber 10, 2009 +** +*****************************************************************/ +static int +test_userblock_alignment_helper2(hid_t fapl, hbool_t open_rw) +{ + hid_t fid; /* File ID */ + int curr_num_errs = nerrors(); /* Retrieve the current # of errors */ + herr_t ret; /* Generic return value */ + + /* Re-open file */ + fid = H5Fopen(FILE1, (open_rw ? H5F_ACC_RDWR : H5F_ACC_RDONLY), fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Only proceed further if file ID is OK */ + if (fid > 0) { + hid_t gid; /* Group ID */ + hid_t did; /* Dataset ID */ + int val = -1; /* Dataset value */ + + /* Open group */ + gid = H5Gopen2(fid, "group1", H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gopen2"); + + /* Open dataset */ + did = H5Dopen2(gid, "dataset", H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen2"); + + /* Read value from dataset */ + ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &val); + CHECK(ret, FAIL, "H5Dread"); + VERIFY(val, 2, "H5Dread"); + + /* Close dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Only create new objects if file is open R/W */ + if (open_rw) { + hid_t gid2; /* Group ID */ + + /* Create a new group */ + gid2 = H5Gcreate2(gid, "group2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gcreate2"); + + /* Close new group */ + ret = H5Gclose(gid2); + CHECK(ret, FAIL, "H5Gclose"); + } /* end if */ + + /* Close group */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + } /* end if */ + + return ((nerrors == curr_num_errs) ? 0 : -1); +} /* end test_userblock_alignment_helper2() */ + +/**************************************************************** +** +** test_userblock_alignment(): low-level file test routine. +** This test checks to ensure that files with both a userblock and a +** object [allocation] alignment size set interact properly. +** +** Programmer: Quincey Koziol +** Septmber 8, 2009 +** +*****************************************************************/ +static void +test_userblock_alignment(const char *env_h5_drvr) +{ + hid_t fid; /* File ID */ + hid_t fcpl; /* File creation property list ID */ + hid_t fapl; /* File access property list ID */ + herr_t ret; /* Generic return value */ + + /* Only run with sec2 driver */ + if (!h5_using_default_driver(env_h5_drvr)) + return; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing that non-zero userblocks and object alignment interact correctly.\n")); + + /* Case 1: + * Userblock size = 0, alignment != 0 + * Outcome: + * Should succeed + */ + /* Create file creation property list with user block */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); + ret = H5Pset_userblock(fcpl, (hsize_t)0); + CHECK(ret, FAIL, "H5Pset_userblock"); + + /* Create file access property list with alignment */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)3); + CHECK(ret, FAIL, "H5Pset_alignment"); + + /* Call helper routines to perform file manipulations */ + ret = test_userblock_alignment_helper1(fcpl, fapl); + CHECK(ret, FAIL, "test_userblock_alignment_helper1"); + ret = test_userblock_alignment_helper2(fapl, TRUE); + CHECK(ret, FAIL, "test_userblock_alignment_helper2"); + + /* Release property lists */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Case 2: + * Userblock size = 512, alignment = 16 + * (userblock is integral mult. of alignment) + * Outcome: + * Should succeed + */ + /* Create file creation property list with user block */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); + ret = H5Pset_userblock(fcpl, (hsize_t)512); + CHECK(ret, FAIL, "H5Pset_userblock"); + + /* Create file access property list with alignment */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)16); + CHECK(ret, FAIL, "H5Pset_alignment"); + + /* Call helper routines to perform file manipulations */ + ret = test_userblock_alignment_helper1(fcpl, fapl); + CHECK(ret, FAIL, "test_userblock_alignment_helper1"); + ret = test_userblock_alignment_helper2(fapl, TRUE); + CHECK(ret, FAIL, "test_userblock_alignment_helper2"); + + /* Release property lists */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Case 3: + * Userblock size = 512, alignment = 512 + * (userblock is equal to alignment) + * Outcome: + * Should succeed + */ + /* Create file creation property list with user block */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); + ret = H5Pset_userblock(fcpl, (hsize_t)512); + CHECK(ret, FAIL, "H5Pset_userblock"); + + /* Create file access property list with alignment */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)512); + CHECK(ret, FAIL, "H5Pset_alignment"); + + /* Call helper routines to perform file manipulations */ + ret = test_userblock_alignment_helper1(fcpl, fapl); + CHECK(ret, FAIL, "test_userblock_alignment_helper1"); + ret = test_userblock_alignment_helper2(fapl, TRUE); + CHECK(ret, FAIL, "test_userblock_alignment_helper2"); + + /* Release property lists */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Case 4: + * Userblock size = 512, alignment = 3 + * (userblock & alignment each individually valid, but userblock is + * non-integral multiple of alignment) + * Outcome: + * Should fail at file creation + */ + /* Create file creation property list with user block */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); + ret = H5Pset_userblock(fcpl, (hsize_t)512); + CHECK(ret, FAIL, "H5Pset_userblock"); + + /* Create file access property list with alignment */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)3); + CHECK(ret, FAIL, "H5Pset_alignment"); + + /* Create a file with FAPL & FCPL */ + H5E_BEGIN_TRY + { + fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl); + } + H5E_END_TRY; + VERIFY(fid, FAIL, "H5Fcreate"); + + /* Release property lists */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Case 5: + * Userblock size = 512, alignment = 1024 + * (userblock & alignment each individually valid, but userblock is + * less than alignment) + * Outcome: + * Should fail at file creation + */ + /* Create file creation property list with user block */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); + ret = H5Pset_userblock(fcpl, (hsize_t)512); + CHECK(ret, FAIL, "H5Pset_userblock"); + + /* Create file access property list with alignment */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)1024); + CHECK(ret, FAIL, "H5Pset_alignment"); + + /* Create a file with FAPL & FCPL */ + H5E_BEGIN_TRY + { + fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl); + } + H5E_END_TRY; + VERIFY(fid, FAIL, "H5Fcreate"); + + /* Release property lists */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Case 6: + * File created with: + * Userblock size = 512, alignment = 512 + * File re-opened for read-only & read-write access with: + * Userblock size = 512, alignment = 1024 + * Outcome: + * Should succeed + */ + /* Create file creation property list with user block */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); + ret = H5Pset_userblock(fcpl, (hsize_t)512); + CHECK(ret, FAIL, "H5Pset_userblock"); + + /* Create file access property list with alignment */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)512); + CHECK(ret, FAIL, "H5Pset_alignment"); + + /* Call helper routines to perform file manipulations */ + ret = test_userblock_alignment_helper1(fcpl, fapl); + CHECK(ret, FAIL, "test_userblock_alignment_helper1"); + + /* Change alignment in FAPL */ + ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)1024); + CHECK(ret, FAIL, "H5Pset_alignment"); + + /* Call helper routines to perform file manipulations */ + ret = test_userblock_alignment_helper2(fapl, FALSE); + CHECK(ret, FAIL, "test_userblock_alignment_helper2"); + ret = test_userblock_alignment_helper2(fapl, TRUE); + CHECK(ret, FAIL, "test_userblock_alignment_helper2"); + + /* Release property lists */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); +} /* end test_userblock_alignment() */ + +/**************************************************************** +** +** test_userblock_alignment_paged(): low-level file test routine. +** This test checks to ensure that files with both a userblock and +** alignment interact properly: +** -- alignment via H5Pset_alignment +** -- alignment via paged aggregation +** +** Programmer: Vailin Choi; March 2013 +** +*****************************************************************/ +static void +test_userblock_alignment_paged(const char *env_h5_drvr) +{ + hid_t fid; /* File ID */ + hid_t fcpl; /* File creation property list ID */ + hid_t fapl; /* File access property list ID */ + herr_t ret; /* Generic return value */ + + /* Only run with sec2 driver */ + if (!h5_using_default_driver(env_h5_drvr)) + return; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing interaction between userblock and alignment (via paged aggregation and " + "H5Pset_alignment)\n")); + + /* + * Case 1: + * Userblock size = 0 + * Alignment in use = 4096 + * Strategy = H5F_FILE_SPACE_PAGE; fsp_size = alignment = 4096 + * Outcome: + * Should succeed: + * userblock is 0 and alignment != 0 + */ + /* Create file creation property list with user block */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); + ret = H5Pset_userblock(fcpl, (hsize_t)0); + CHECK(ret, FAIL, "H5Pset_userblock"); + + /* Create file access property list */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + + /* Set the "use the latest version of the format" bounds */ + ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); + CHECK(ret, FAIL, "H5Pset_libver_bounds"); + + ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, FALSE, (hsize_t)1); + CHECK(ret, FAIL, "H5Pset_file_space_strategy"); + + /* Call helper routines to perform file manipulations */ + ret = test_userblock_alignment_helper1(fcpl, fapl); + CHECK(ret, FAIL, "test_userblock_alignment_helper1"); + ret = test_userblock_alignment_helper2(fapl, TRUE); + CHECK(ret, FAIL, "test_userblock_alignment_helper2"); + + /* Release property lists */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* + * Case 2a: + * Userblock size = 1024 + * Alignment in use = 512 + * Strategy = H5F_FILE_SPACE_PAGE; fsp_size = alignment = 512 + * H5Pset_alignment() is 3 + * Outcome: + * Should succeed: + * userblock (1024) is integral mult. of alignment (512) + */ + /* Create file creation property list with user block */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); + ret = H5Pset_userblock(fcpl, (hsize_t)1024); + CHECK(ret, FAIL, "H5Pset_userblock"); + ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, FALSE, (hsize_t)1); + ret = H5Pset_file_space_page_size(fcpl, (hsize_t)512); + + /* Create file access property list */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)3); + CHECK(ret, FAIL, "H5Pset_alignment"); + + /* Call helper routines to perform file manipulations */ + ret = test_userblock_alignment_helper1(fcpl, fapl); + CHECK(ret, FAIL, "test_userblock_alignment_helper1"); + ret = test_userblock_alignment_helper2(fapl, TRUE); + CHECK(ret, FAIL, "test_userblock_alignment_helper2"); + + /* Release property lists */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* + * Case 2b: + * Userblock size = 1024 + * Alignment in use = 3 + * Strategy = H5F_FILE_SPACE_AGGR; fsp_size = 512 + * (via default file creation property) + * H5Pset_alignment() is 3 + * Outcome: + * Should fail at file creation: + * userblock (1024) is non-integral mult. of alignment (3) + */ + /* Create file creation property list with user block */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); + ret = H5Pset_userblock(fcpl, (hsize_t)1024); + CHECK(ret, FAIL, "H5Pset_userblock"); + ret = H5Pset_file_space_page_size(fcpl, (hsize_t)512); + + /* Create file access property list */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)3); + CHECK(ret, FAIL, "H5Pset_alignment"); + + /* Create a file with FAPL & FCPL */ + H5E_BEGIN_TRY + { + fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl); + } + H5E_END_TRY; + VERIFY(fid, FAIL, "H5Fcreate"); + + /* Release property lists */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* + * Case 3a: + * Userblock size = 512 + * Alignment in use = 512 + * Strategy is H5F_FILE_SPACE_PAGE; fsp_size = alignment = 512 + * H5Pset_alignment() is 3 + * Outcome: + * Should succeed: + * userblock (512) is equal to alignment (512) + */ + /* Create file creation property list with user block */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); + ret = H5Pset_userblock(fcpl, (hsize_t)512); + CHECK(ret, FAIL, "H5Pset_userblock"); + ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, TRUE, (hsize_t)1); + CHECK(ret, FAIL, "H5Pset_file_space_strategy"); + ret = H5Pset_file_space_page_size(fcpl, (hsize_t)512); + CHECK(ret, FAIL, "H5Pset_file_space_page_size"); + + /* Create file access property list with alignment */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)3); + CHECK(ret, FAIL, "H5Pset_alignment"); + + /* Call helper routines to perform file manipulations */ + ret = test_userblock_alignment_helper1(fcpl, fapl); + CHECK(ret, FAIL, "test_userblock_alignment_helper1"); + ret = test_userblock_alignment_helper2(fapl, TRUE); + CHECK(ret, FAIL, "test_userblock_alignment_helper2"); + + /* Release property lists */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* + * Case 3b: + * Userblock size = 512 + * Alignment in use = 3 + * Strategy is H5F_FILE_SPACE_NONE; fsp_size = 512 + * H5Pset_alignment() is 3 + * Outcome: + * Should fail at file creation: + * userblock (512) is non-integral mult. of alignment (3) + */ + /* Create file creation property list with user block */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); + ret = H5Pset_userblock(fcpl, (hsize_t)512); + CHECK(ret, FAIL, "H5Pset_userblock"); + ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_NONE, FALSE, (hsize_t)1); + CHECK(ret, FAIL, "H5Pset_file_space_strategy"); + ret = H5Pset_file_space_page_size(fcpl, (hsize_t)512); + CHECK(ret, FAIL, "H5Pset_file_space_page_size"); + + /* Create file access property list with alignment */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)3); + CHECK(ret, FAIL, "H5Pset_alignment"); + + /* Create a file with FAPL & FCPL */ + H5E_BEGIN_TRY + { + fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl); + } + H5E_END_TRY; + VERIFY(fid, FAIL, "H5Fcreate"); + + /* Release property lists */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* + * Case 4a: + * Userblock size = 1024 + * Alignment in use = 1023 + * Strategy is H5F_FILE_SPACE_PAGE; fsp_size = alignment = 1023 + * H5Pset_alignment() is 16 + * Outcome: + * Should fail at file creation: + * userblock (1024) is non-integral multiple of alignment (1023) + */ + /* Create file creation property list with user block */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); + ret = H5Pset_userblock(fcpl, (hsize_t)1024); + CHECK(ret, FAIL, "H5Pset_userblock"); + ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, TRUE, (hsize_t)1); + CHECK(ret, FAIL, "H5Pset_file_space_strategy"); + ret = H5Pset_file_space_page_size(fcpl, (hsize_t)1023); + CHECK(ret, FAIL, "H5Pset_file_space_page_size"); + + /* Create file access property list with alignment */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)16); + CHECK(ret, FAIL, "H5Pset_alignment"); + + /* Create a file with FAPL & FCPL */ + H5E_BEGIN_TRY + { + fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl); + } + H5E_END_TRY; + VERIFY(fid, FAIL, "H5Fcreate"); + + /* Release property lists */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* + * Case 4b: + * Userblock size = 1024 + * Alignment in use = 16 + * Strategy is H5F_FILE_SPACE_FSM_AGGR; fsp_size = 1023 + * H5Pset_alignment() is 16 + * Outcome: + * Should succeed: + * userblock (512) is integral multiple of alignment (16) + */ + /* Create file creation property list with user block */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); + ret = H5Pset_userblock(fcpl, (hsize_t)1024); + CHECK(ret, FAIL, "H5Pset_userblock"); + ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, FALSE, (hsize_t)1); + CHECK(ret, FAIL, "H5Pset_file_space_strategy"); + ret = H5Pset_file_space_page_size(fcpl, (hsize_t)1023); + CHECK(ret, FAIL, "H5Pset_file_space_page_size"); + + /* Create file access property list with alignment */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)16); + CHECK(ret, FAIL, "H5Pset_alignment"); + + /* Call helper routines to perform file manipulations */ + ret = test_userblock_alignment_helper1(fcpl, fapl); + CHECK(ret, FAIL, "test_userblock_alignment_helper1"); + ret = test_userblock_alignment_helper2(fapl, TRUE); + CHECK(ret, FAIL, "test_userblock_alignment_helper2"); + + /* Release property lists */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* + * Case 5a: + * Userblock size = 512 + * Alignment in use = 1024 + * Strategy is H5F_FILE_SPACE_PAGE; fsp_size = alignment = 1024 + * H5Pset_alignment() is 16 + * Outcome: + * Should fail at file creation: + * userblock (512) is less than alignment (1024) + */ + /* Create file creation property list with user block */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); + ret = H5Pset_userblock(fcpl, (hsize_t)512); + CHECK(ret, FAIL, "H5Pset_userblock"); + ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, FALSE, (hsize_t)1); + CHECK(ret, FAIL, "H5Pset_file_space_strategy"); + ret = H5Pset_file_space_page_size(fcpl, (hsize_t)1024); + CHECK(ret, FAIL, "H5Pset_file_space_page_size"); + + /* Create file access property list with alignment */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)16); + CHECK(ret, FAIL, "H5Pset_alignment"); + + /* Create a file with FAPL & FCPL */ + H5E_BEGIN_TRY + { + fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl); + } + H5E_END_TRY; + VERIFY(fid, FAIL, "H5Fcreate"); + + /* Release property lists */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* + * Case 5b: + * Userblock size = 512 + * Alignment in use = 16 + * Strategy is H5F_FILE_SPACE_NONE; fsp_size = 1024 + * H5Pset_alignment() is 16 + * Outcome: + * Should succeed: + * userblock (512) is integral multiple of alignment (16) + */ + /* Create file creation property list with user block */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); + ret = H5Pset_userblock(fcpl, (hsize_t)512); + CHECK(ret, FAIL, "H5Pset_userblock"); + ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_NONE, FALSE, (hsize_t)1); + CHECK(ret, FAIL, "H5Pset_file_space_strategy"); + ret = H5Pset_file_space_page_size(fcpl, (hsize_t)1024); + CHECK(ret, FAIL, "H5Pset_file_space_page_size"); + + /* Create file access property list with alignment */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)16); + CHECK(ret, FAIL, "H5Pset_alignment"); + + /* Call helper routines to perform file manipulations */ + ret = test_userblock_alignment_helper1(fcpl, fapl); + CHECK(ret, FAIL, "test_userblock_alignment_helper1"); + ret = test_userblock_alignment_helper2(fapl, TRUE); + CHECK(ret, FAIL, "test_userblock_alignment_helper2"); + + /* Release property lists */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* + * Case 6: + * Userblock size = 512 + * Alignment in use = 512 + * Strategy is H5F_FILE_SPACE_PAGE; fsp_size = alignment = 512 + * H5Pset_alignment() is 3 + * Reopen the file; H5Pset_alignment() is 1024 + * Outcome: + * Should succeed: + * Userblock (512) is the same as alignment (512); + * The H5Pset_alignment() calls have no effect + */ + /* Create file creation property list with user block */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); + ret = H5Pset_userblock(fcpl, (hsize_t)512); + CHECK(ret, FAIL, "H5Pset_userblock"); + ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, FALSE, (hsize_t)1); + CHECK(ret, FAIL, "H5Pset_file_space_strategy"); + ret = H5Pset_file_space_page_size(fcpl, (hsize_t)512); + CHECK(ret, FAIL, "H5Pset_file_space_page_size"); + + /* Create file access property list with alignment */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)3); + CHECK(ret, FAIL, "H5Pset_alignment"); + + /* Call helper routines to perform file manipulations */ + ret = test_userblock_alignment_helper1(fcpl, fapl); + CHECK(ret, FAIL, "test_userblock_alignment_helper1"); + + /* Change alignment in FAPL */ + ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)1024); + CHECK(ret, FAIL, "H5Pset_alignment"); + + /* Call helper routines to perform file manipulations */ + ret = test_userblock_alignment_helper2(fapl, FALSE); + CHECK(ret, FAIL, "test_userblock_alignment_helper2"); + ret = test_userblock_alignment_helper2(fapl, TRUE); + CHECK(ret, FAIL, "test_userblock_alignment_helper2"); + + /* Release property lists */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + +} /* end test_userblock_alignment_paged() */ +#endif + +/**************************************************************** +** +** test_filespace_info(): +** Verify the following public routines retrieve and set file space +** information correctly: +** (1) H5Pget/set_file_space_strategy(): +** Retrieve and set file space strategy, persisting free-space, +** and free-space section threshold as specified +** (2) H5Pget/set_file_space_page_size(): +** Retrieve and set the page size for paged aggregation +** +****************************************************************/ +#if 0 +static void +test_filespace_info(const char *env_h5_drvr) +{ + hid_t fid; /* File IDs */ + hid_t fapl, new_fapl; /* File access property lists */ + hid_t fcpl, fcpl1, fcpl2; /* File creation property lists */ + H5F_fspace_strategy_t strategy; /* File space strategy */ + hbool_t persist; /* Persist free-space or not */ + hsize_t threshold; /* Free-space section threshold */ + unsigned new_format; /* New or old format */ + H5F_fspace_strategy_t fs_strategy; /* File space strategy--iteration variable */ + unsigned fs_persist; /* Persist free-space or not--iteration variable */ + hsize_t fs_threshold; /* Free-space section threshold--iteration variable */ + hsize_t fsp_size; /* File space page size */ + char filename[FILENAME_LEN]; /* Filename to use */ + hbool_t contig_addr_vfd; /* Whether VFD used has a contiguous address space */ + herr_t ret; /* Return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing file creation public routines: H5Pget/set_file_space_strategy & " + "H5Pget/set_file_space_page_size\n")); + + contig_addr_vfd = (hbool_t)(HDstrcmp(env_h5_drvr, "split") != 0 && HDstrcmp(env_h5_drvr, "multi") != 0); + + fapl = h5_fileaccess(); + h5_fixname(FILESPACE_NAME[0], fapl, filename, sizeof filename); + + /* Get a copy of the file access property list */ + new_fapl = H5Pcopy(fapl); + CHECK(new_fapl, FAIL, "H5Pcopy"); + + /* Set the "use the latest version of the format" bounds */ + ret = H5Pset_libver_bounds(new_fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); + CHECK(ret, FAIL, "H5Pset_libver_bounds"); + + /* + * Case (1) + * Check file space information from a default file creation property list. + * Values expected: + * strategy--H5F_FILE_SPACE_AGGR + * persist--FALSE + * threshold--1 + * file space page size--4096 + */ + /* Create file creation property list template */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); + + /* Retrieve file space information */ + ret = H5Pget_file_space_strategy(fcpl, &strategy, &persist, &threshold); + CHECK(ret, FAIL, "H5Pget_file_space_strategy"); + + /* Verify file space information */ + VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy"); + VERIFY(persist, FALSE, "H5Pget_file_space_strategy"); + VERIFY(threshold, 1, "H5Pget_file_space_strategy"); + + /* Retrieve file space page size */ + ret = H5Pget_file_space_page_size(fcpl, &fsp_size); + CHECK(ret, FAIL, "H5Pget_file_space_page_size"); + VERIFY(fsp_size, FSP_SIZE_DEF, "H5Pget_file_space_page_size"); + + /* Close property list */ + H5Pclose(fcpl); + + /* + * Case (2) + * File space page size has a minimum size of 512. + * Setting value less than 512 will return an error; + * --setting file space page size to 0 + * --setting file space page size to 511 + * + * File space page size has a maximum size of 1 gigabyte. + * Setting value greater than 1 gigabyte will return an error. + */ + /* Create file creation property list template */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); + + /* Setting to 0: should fail */ + H5E_BEGIN_TRY + { + ret = H5Pset_file_space_page_size(fcpl, 0); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Pset_file_space_page_size"); + + /* Setting to 511: should fail */ + H5E_BEGIN_TRY + { + ret = H5Pset_file_space_page_size(fcpl, 511); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Pset_file_space_page_size"); + + /* Setting to 1GB+1: should fail */ + H5E_BEGIN_TRY + { + ret = H5Pset_file_space_page_size(fcpl, FSP_SIZE1G + 1); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Pset_file_space_page_size"); + + /* Setting to 512: should succeed */ + ret = H5Pset_file_space_page_size(fcpl, FSP_SIZE512); + CHECK(ret, FAIL, "H5Pset_file_space_page_size"); + ret = H5Pget_file_space_page_size(fcpl, &fsp_size); + CHECK(ret, FAIL, "H5Pget_file_space_page_size"); + VERIFY(fsp_size, FSP_SIZE512, "H5Pget_file_space_page_size"); + + /* Setting to 1GB: should succeed */ + ret = H5Pset_file_space_page_size(fcpl, FSP_SIZE1G); + CHECK(ret, FAIL, "H5Pset_file_space_page_size"); + ret = H5Pget_file_space_page_size(fcpl, &fsp_size); + CHECK(ret, FAIL, "H5Pget_file_space_page_size"); + VERIFY(fsp_size, FSP_SIZE1G, "H5Pget_file_space_page_size"); + + /* Close property list */ + H5Pclose(fcpl); + + /* + * Case (3) + * Check file space information when creating a file with default properties. + * Values expected: + * strategy--H5F_FILE_SPACE_AGGR + * persist--FALSE + * threshold--1 + * file space page size--4096 + */ + /* Create a file with default file creation and access property lists */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Get the file's creation property list */ + fcpl1 = H5Fget_create_plist(fid); + CHECK(fcpl1, FAIL, "H5Fget_create_plist"); + + /* Retrieve file space information */ + ret = H5Pget_file_space_strategy(fcpl1, &strategy, &persist, &threshold); + CHECK(ret, FAIL, "H5Pget_file_space_strategy"); + + /* Verify file space information */ + VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy"); + VERIFY(persist, FALSE, "H5Pget_file_space_strategy"); + VERIFY(threshold, 1, "H5Pget_file_space_strategy"); + + /* Retrieve file space page size */ + ret = H5Pget_file_space_page_size(fcpl1, &fsp_size); + CHECK(ret, FAIL, "H5Pget_file_space_page_size"); + VERIFY(fsp_size, FSP_SIZE_DEF, "H5Pget_file_space_page_size"); + + /* Close property lists */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Pclose(fcpl1); + CHECK(ret, FAIL, "H5Pclose"); + + /* + * Case (4) + * Check file space information when creating a file with the + * latest library format and default properties. + * Values expected: + * strategy--H5F_FILE_SPACE_AGGR + * persist--FALSE + * threshold--1 + * file space page size--4096 + */ + /* Create a file with the latest library format */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, new_fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Get the file's creation property */ + fcpl1 = H5Fget_create_plist(fid); + CHECK(fcpl1, FAIL, "H5Fget_create_plist"); + + /* Retrieve file space information */ + ret = H5Pget_file_space_strategy(fcpl1, &strategy, &persist, &threshold); + CHECK(ret, FAIL, "H5Pget_file_space_strategy"); + + /* Verify file space information */ + VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy"); + VERIFY(persist, FALSE, "H5Pget_file_space_strategy"); + VERIFY(threshold, 1, "H5Pget_file_space_strategy"); + + /* Retrieve file space page size */ + ret = H5Pget_file_space_page_size(fcpl1, &fsp_size); + CHECK(ret, FAIL, "H5Pget_file_space_page_size"); + VERIFY(fsp_size, FSP_SIZE_DEF, "H5Pget_file_space_page_size"); + + /* Close property lists */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Pclose(fcpl1); + CHECK(ret, FAIL, "H5Pclose"); + + /* + * Case (5) + * Check file space information with the following combinations: + * Create file with -- + * New or old format + * Persist or not persist free-space + * Different sizes for free-space section threshold (0 to 10) + * The four file space strategies: + * H5F_FSPACE_STRATEGY_FSM_AGGR, H5F_FSPACE_STRATEGY_PAGE, + * H5F_FSPACE_STRATEGY_AGGR, H5F_FSPACE_STRATEGY_NONE + * File space page size: set to 512 + * + */ + for (new_format = FALSE; new_format <= TRUE; new_format++) { + hid_t my_fapl; + + /* Set the FAPL for the type of format */ + if (new_format) { + MESSAGE(5, ("Testing with new group format\n")); + my_fapl = new_fapl; + } /* end if */ + else { + MESSAGE(5, ("Testing with old group format\n")); + my_fapl = fapl; + } /* end else */ + + /* Test with TRUE or FALSE for persisting free-space */ + for (fs_persist = FALSE; fs_persist <= TRUE; fs_persist++) { + + /* Test with free-space section threshold size: 0 to 10 */ + for (fs_threshold = 0; fs_threshold <= TEST_THRESHOLD10; fs_threshold++) { + + /* Test with 4 file space strategies */ + for (fs_strategy = H5F_FSPACE_STRATEGY_FSM_AGGR; fs_strategy < H5F_FSPACE_STRATEGY_NTYPES; + fs_strategy++) { + + if (!contig_addr_vfd && (fs_strategy == H5F_FSPACE_STRATEGY_PAGE || fs_persist)) + continue; + + /* Create file creation property list template */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); + + /* Set file space information */ + ret = H5Pset_file_space_strategy(fcpl, fs_strategy, (hbool_t)fs_persist, fs_threshold); + CHECK(ret, FAIL, "H5Pset_file_space_strategy"); + + ret = H5Pset_file_space_page_size(fcpl, FSP_SIZE512); + CHECK(ret, FAIL, "H5Pset_file_space_strategy"); + + /* Retrieve file space information */ + ret = H5Pget_file_space_strategy(fcpl, &strategy, &persist, &threshold); + CHECK(ret, FAIL, "H5Pget_file_space_strategy"); + + /* Verify file space information */ + VERIFY(strategy, fs_strategy, "H5Pget_file_space_strategy"); + + if (fs_strategy < H5F_FSPACE_STRATEGY_AGGR) { + VERIFY(persist, (hbool_t)fs_persist, "H5Pget_file_space_strategy"); + VERIFY(threshold, fs_threshold, "H5Pget_file_space_strategy"); + } + else { + VERIFY(persist, FALSE, "H5Pget_file_space_strategy"); + VERIFY(threshold, 1, "H5Pget_file_space_strategy"); + } + + /* Retrieve and verify file space page size */ + ret = H5Pget_file_space_page_size(fcpl, &fsp_size); + CHECK(ret, FAIL, "H5Pget_file_space_page_size"); + VERIFY(fsp_size, FSP_SIZE512, "H5Pget_file_space_page_size"); + + /* Create the file with the specified file space info */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, my_fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Get the file's creation property */ + fcpl1 = H5Fget_create_plist(fid); + CHECK(fcpl1, FAIL, "H5Fget_create_plist"); + + /* Retrieve file space information */ + ret = H5Pget_file_space_strategy(fcpl1, &strategy, &persist, &threshold); + CHECK(ret, FAIL, "H5Pget_file_space_strategy"); + + /* Verify file space information */ + VERIFY(strategy, fs_strategy, "H5Pget_file_space_strategy"); + + if (fs_strategy < H5F_FSPACE_STRATEGY_AGGR) { + VERIFY(persist, fs_persist, "H5Pget_file_space_strategy"); + VERIFY(threshold, fs_threshold, "H5Pget_file_space_strategy"); + } + else { + VERIFY(persist, FALSE, "H5Pget_file_space_strategy"); + VERIFY(threshold, 1, "H5Pget_file_space_strategy"); + } + + /* Retrieve and verify file space page size */ + ret = H5Pget_file_space_page_size(fcpl1, &fsp_size); + CHECK(ret, FAIL, "H5Pget_file_space_page_size"); + VERIFY(fsp_size, FSP_SIZE512, "H5Pget_file_space_page_size"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open the file */ + fid = H5Fopen(filename, H5F_ACC_RDWR, my_fapl); + CHECK(ret, FAIL, "H5Fopen"); + + /* Get the file's creation property */ + fcpl2 = H5Fget_create_plist(fid); + CHECK(fcpl2, FAIL, "H5Fget_create_plist"); + + /* Retrieve file space information */ + ret = H5Pget_file_space_strategy(fcpl2, &strategy, &persist, &threshold); + CHECK(ret, FAIL, "H5Pget_file_space_strategy"); + + /* Verify file space information */ + VERIFY(strategy, fs_strategy, "H5Pget_file_space_strategy"); + if (fs_strategy < H5F_FSPACE_STRATEGY_AGGR) { + VERIFY(persist, fs_persist, "H5Pget_file_space_strategy"); + VERIFY(threshold, fs_threshold, "H5Pget_file_space_strategy"); + } + else { + VERIFY(persist, FALSE, "H5Pget_file_space_strategy"); + VERIFY(threshold, 1, "H5Pget_file_space_strategy"); + } + + /* Retrieve and verify file space page size */ + ret = H5Pget_file_space_page_size(fcpl2, &fsp_size); + CHECK(ret, FAIL, "H5Pget_file_space_page_size"); + VERIFY(fsp_size, FSP_SIZE512, "H5Pget_file_space_page_size"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Release file creation property lists */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(fcpl1); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(fcpl2); + CHECK(ret, FAIL, "H5Pclose"); + } /* end for file space strategy type */ + } /* end for free-space section threshold */ + } /* end for fs_persist */ + + /* close fapl_ and remove the file */ +#if 0 + h5_clean_files(FILESPACE_NAME, my_fapl); +#endif + + H5E_BEGIN_TRY + { + H5Fdelete(FILESPACE_NAME[0], my_fapl); + } + H5E_END_TRY; + } /* end for new_format */ + +} /* test_filespace_info() */ +#endif + +/**************************************************************** +** +** set_multi_split(): +** Internal routine to set up page-aligned address space for multi/split driver +** when testing paged aggregation. +** This is used by test_file_freespace() and test_sects_freespace(). +** +*****************************************************************/ +#if 0 +static int +set_multi_split(hid_t fapl, hsize_t pagesize, hbool_t split) +{ + H5FD_mem_t memb_map[H5FD_MEM_NTYPES]; + hid_t memb_fapl_arr[H5FD_MEM_NTYPES]; + char *memb_name[H5FD_MEM_NTYPES]; + haddr_t memb_addr[H5FD_MEM_NTYPES]; + hbool_t relax; + H5FD_mem_t mt; + + HDassert(split); + + HDmemset(memb_name, 0, sizeof memb_name); + + /* Get current split settings */ + if (H5Pget_fapl_multi(fapl, memb_map, memb_fapl_arr, memb_name, memb_addr, &relax) < 0) + TEST_ERROR; + + if (split) { + /* Set memb_addr aligned */ + memb_addr[H5FD_MEM_SUPER] = ((memb_addr[H5FD_MEM_SUPER] + pagesize - 1) / pagesize) * pagesize; + memb_addr[H5FD_MEM_DRAW] = ((memb_addr[H5FD_MEM_DRAW] + pagesize - 1) / pagesize) * pagesize; + } + else { + /* Set memb_addr aligned */ + for (mt = H5FD_MEM_DEFAULT; mt < H5FD_MEM_NTYPES; mt++) + memb_addr[mt] = ((memb_addr[mt] + pagesize - 1) / pagesize) * pagesize; + } /* end else */ + + /* Set multi driver with new FAPLs */ + if (H5Pset_fapl_multi(fapl, memb_map, memb_fapl_arr, (const char *const *)memb_name, memb_addr, relax) < + 0) + TEST_ERROR; + + /* Free memb_name */ + for (mt = H5FD_MEM_DEFAULT; mt < H5FD_MEM_NTYPES; mt++) + HDfree(memb_name[mt]); + + return 0; + +error: + return (-1); + +} /* set_multi_split() */ +#endif + +/**************************************************************** +** +** test_file_freespace(): +** This routine checks the free space available in a file as +** returned by the public routine H5Fget_freespace(). +** +** +*****************************************************************/ +#if 0 +static void +test_file_freespace(const char *env_h5_drvr) +{ + hid_t file; /* File opened with read-write permission */ +#if 0 + h5_stat_size_t empty_filesize; /* Size of file when empty */ + h5_stat_size_t mod_filesize; /* Size of file after being modified */ + hssize_t free_space; /* Amount of free space in file */ +#endif + hid_t fcpl; /* File creation property list */ + hid_t fapl, new_fapl; /* File access property list IDs */ + hid_t dspace; /* Dataspace ID */ + hid_t dset; /* Dataset ID */ + hid_t dcpl; /* Dataset creation property list */ + int k; /* Local index variable */ + unsigned u; /* Local index variable */ + char filename[FILENAME_LEN]; /* Filename to use */ + char name[32]; /* Dataset name */ + unsigned new_format; /* To use old or new format */ + hbool_t split_vfd, multi_vfd; /* Indicate multi/split driver */ + hsize_t expected_freespace; /* Freespace expected */ + hsize_t expected_fs_del; /* Freespace expected after delete */ + herr_t ret; /* Return value */ + + split_vfd = !HDstrcmp(env_h5_drvr, "split"); + multi_vfd = !HDstrcmp(env_h5_drvr, "multi"); + + if (!split_vfd && !multi_vfd) { + fapl = h5_fileaccess(); + h5_fixname(FILESPACE_NAME[0], fapl, filename, sizeof filename); + + new_fapl = H5Pcopy(fapl); + CHECK(new_fapl, FAIL, "H5Pcopy"); + + /* Set the "use the latest version of the format" bounds */ + ret = H5Pset_libver_bounds(new_fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); + CHECK(ret, FAIL, "H5Pset_libver_bounds"); + + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); + + /* Test with old & new format */ + for (new_format = FALSE; new_format <= TRUE; new_format++) { + hid_t my_fapl; + + /* Set the FAPL for the type of format */ + if (new_format) { + MESSAGE(5, ("Testing with new group format\n")); + + my_fapl = new_fapl; + + if (multi_vfd || split_vfd) { + ret = set_multi_split(new_fapl, FSP_SIZE_DEF, split_vfd); + CHECK(ret, FAIL, "set_multi_split"); + } + + ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, FALSE, (hsize_t)1); + CHECK(ret, FAIL, "H5P_set_file_space_strategy"); + + expected_freespace = 4534; + if (split_vfd) + expected_freespace = 427; + if (multi_vfd) + expected_freespace = 248; + expected_fs_del = 0; + } /* end if */ + else { + MESSAGE(5, ("Testing with old group format\n")); + /* Default: non-paged aggregation, non-persistent free-space */ + my_fapl = fapl; + expected_freespace = 2464; + if (split_vfd) + expected_freespace = 264; + if (multi_vfd) + expected_freespace = 0; + expected_fs_del = 4096; + + } /* end else */ + + /* Create an "empty" file */ + file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, my_fapl); + CHECK(file, FAIL, "H5Fcreate"); + + ret = H5Fclose(file); + CHECK_I(ret, "H5Fclose"); +#if 0 + /* Get the "empty" file size */ + empty_filesize = h5_get_file_size(filename, H5P_DEFAULT); +#endif + /* Re-open the file (with read-write permission) */ + file = H5Fopen(filename, H5F_ACC_RDWR, my_fapl); + CHECK_I(file, "H5Fopen"); +#if 0 + /* Check that the free space is 0 */ + free_space = H5Fget_freespace(file); + CHECK(free_space, FAIL, "H5Fget_freespace"); + VERIFY(free_space, 0, "H5Fget_freespace"); +#endif + /* Create dataspace for datasets */ + dspace = H5Screate(H5S_SCALAR); + CHECK(dspace, FAIL, "H5Screate"); + + /* Create a dataset creation property list */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + + /* Set the space allocation time to early */ + ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY); + CHECK(ret, FAIL, "H5Pset_alloc_time"); + + /* Create datasets in file */ + for (u = 0; u < 10; u++) { + HDsnprintf(name, sizeof(name), "Dataset %u", u); + dset = H5Dcreate2(file, name, H5T_STD_U32LE, dspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset, FAIL, "H5Dcreate2"); + + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + } /* end for */ + + /* Close dataspace */ + ret = H5Sclose(dspace); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close dataset creation property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); +#if 0 + /* Check that there is the right amount of free space in the file */ + free_space = H5Fget_freespace(file); + CHECK(free_space, FAIL, "H5Fget_freespace"); + VERIFY(free_space, expected_freespace, "H5Fget_freespace"); +#endif + /* Delete datasets in file */ + for (k = 9; k >= 0; k--) { + HDsnprintf(name, sizeof(name), "Dataset %u", (unsigned)k); + ret = H5Ldelete(file, name, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + } /* end for */ +#if 0 + /* Check that there is the right amount of free space in the file */ + free_space = H5Fget_freespace(file); + CHECK(free_space, FAIL, "H5Fget_freespace"); + VERIFY(free_space, expected_fs_del, "H5Fget_freespace"); +#endif + /* Close file */ + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); +#if 0 + /* Get the file size after modifications*/ + mod_filesize = h5_get_file_size(filename, H5P_DEFAULT); + + /* Check that the file reverted to empty size */ + VERIFY(mod_filesize, empty_filesize, "H5Fget_freespace"); + + h5_clean_files(FILESPACE_NAME, my_fapl); +#endif + H5Fdelete(FILESPACE_NAME[0], my_fapl); + } /* end for */ + } + +} /* end test_file_freespace() */ + +/**************************************************************** +** +** test_sects_freespace(): +** This routine checks free-space section information for the +** file as returned by the public routine H5Fget_free_sections(). +** +*****************************************************************/ +static void +test_sects_freespace(const char *env_h5_drvr, hbool_t new_format) +{ + char filename[FILENAME_LEN]; /* Filename to use */ + hid_t file; /* File ID */ + hid_t fcpl; /* File creation property list template */ + hid_t fapl; /* File access property list template */ +#if 0 + hssize_t free_space; /* Amount of free-space in the file */ +#endif + hid_t dspace; /* Dataspace ID */ + hid_t dset; /* Dataset ID */ + hid_t dcpl; /* Dataset creation property list */ + char name[32]; /* Dataset name */ + hssize_t nsects = 0; /* # of free-space sections */ + hssize_t nall; /* # of free-space sections for all types of data */ + hssize_t nmeta = 0, nraw = 0; /* # of free-space sections for meta/raw/generic data */ + H5F_sect_info_t sect_info[15]; /* Array to hold free-space information */ + H5F_sect_info_t all_sect_info[15]; /* Array to hold free-space information for all types of data */ + H5F_sect_info_t meta_sect_info[15]; /* Array to hold free-space information for metadata */ + H5F_sect_info_t raw_sect_info[15]; /* Array to hold free-space information for raw data */ + hsize_t total = 0; /* sum of the free-space section sizes */ + hsize_t tmp_tot = 0; /* Sum of the free-space section sizes */ + hsize_t last_size; /* Size of last free-space section */ + hsize_t dims[1]; /* Dimension sizes */ + unsigned u; /* Local index variable */ + H5FD_mem_t type; + hbool_t split_vfd = FALSE, multi_vfd = FALSE; + herr_t ret; /* Return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing H5Fget_free_sections()--free-space section info in the file\n")); + + split_vfd = !HDstrcmp(env_h5_drvr, "split"); + multi_vfd = !HDstrcmp(env_h5_drvr, "multi"); + + if (!split_vfd && !multi_vfd) { + + fapl = h5_fileaccess(); + h5_fixname(FILESPACE_NAME[0], fapl, filename, sizeof filename); + + /* Create file-creation template */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); + + if (new_format) { + ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); + CHECK(ret, FAIL, "H5Pset_libver_bounds"); + + /* Set to paged aggregation and persistent free-space */ + ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, TRUE, (hsize_t)1); + CHECK(ret, FAIL, "H5Pget_file_space_strategy"); + + /* Set up paged aligned address space for multi/split driver */ + if (multi_vfd || split_vfd) { + ret = set_multi_split(fapl, FSP_SIZE_DEF, split_vfd); + CHECK(ret, FAIL, "set_multi_split"); + } + } + else { + ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, TRUE, (hsize_t)1); + CHECK(ret, FAIL, "H5Pget_file_space_strategy"); + } + + /* Create the file */ + file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(file, FAIL, "H5Fcreate"); + + /* Create a dataset creation property list */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + + /* Set the space allocation time to early */ + ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY); + CHECK(ret, FAIL, "H5Pset_alloc_time"); + + /* Create 1 large dataset */ + dims[0] = 1200; + dspace = H5Screate_simple(1, dims, NULL); + dset = H5Dcreate2(file, "Dataset_large", H5T_STD_U32LE, dspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset, FAIL, "H5Dcreate2"); + + /* Close dataset */ + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close dataspace */ + ret = H5Sclose(dspace); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create dataspace for datasets */ + dspace = H5Screate(H5S_SCALAR); + CHECK(dspace, FAIL, "H5Screate"); + + /* Create datasets in file */ + for (u = 0; u < 10; u++) { + HDsnprintf(name, sizeof(name), "Dataset %u", u); + dset = H5Dcreate2(file, name, H5T_STD_U32LE, dspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset, FAIL, "H5Dcreate2"); + + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + } /* end for */ + + /* Close dataspace */ + ret = H5Sclose(dspace); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close dataset creation property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Delete odd-numbered datasets in file */ + for (u = 0; u < 10; u++) { + HDsnprintf(name, sizeof(name), "Dataset %u", u); + if (u % 2) { + ret = H5Ldelete(file, name, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + } /* end if */ + } /* end for */ + + /* Close file */ + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open the file with read-only permission */ + file = H5Fopen(filename, H5F_ACC_RDONLY, fapl); + CHECK_I(file, "H5Fopen"); +#if 0 + /* Get the amount of free space in the file */ + free_space = H5Fget_freespace(file); + CHECK(free_space, FAIL, "H5Fget_freespace"); +#endif + /* Get the total # of free-space sections in the file */ + nall = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)0, NULL); + CHECK(nall, FAIL, "H5Fget_free_sections"); + + /* Should return failure when nsects is 0 with a nonnull sect_info */ + nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)0, all_sect_info); + VERIFY(nsects, FAIL, "H5Fget_free_sections"); + + /* Retrieve and verify free space info for all the sections */ + HDmemset(all_sect_info, 0, sizeof(all_sect_info)); + nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)nall, all_sect_info); + VERIFY(nsects, nall, "H5Fget_free_sections"); + + /* Verify the amount of free-space is correct */ + for (u = 0; u < nall; u++) + total += all_sect_info[u].size; +#if 0 + VERIFY(free_space, total, "H5Fget_free_sections"); +#endif + /* Save the last section's size */ + last_size = all_sect_info[nall - 1].size; + + /* Retrieve and verify free space info for -1 sections */ + HDmemset(sect_info, 0, sizeof(sect_info)); + nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)(nall - 1), sect_info); + VERIFY(nsects, nall, "H5Fget_free_sections"); + + /* Verify the amount of free-space is correct */ + total = 0; + for (u = 0; u < (nall - 1); u++) { + VERIFY(sect_info[u].addr, all_sect_info[u].addr, "H5Fget_free_sections"); + VERIFY(sect_info[u].size, all_sect_info[u].size, "H5Fget_free_sections"); + total += sect_info[u].size; + } +#if 0 + VERIFY(((hsize_t)free_space - last_size), total, "H5Fget_free_sections"); +#endif + /* Retrieve and verify free-space info for +1 sections */ + HDmemset(sect_info, 0, sizeof(sect_info)); + nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)(nall + 1), sect_info); + VERIFY(nsects, nall, "H5Fget_free_sections"); + + /* Verify amount of free-space is correct */ + total = 0; + for (u = 0; u < nall; u++) { + VERIFY(sect_info[u].addr, all_sect_info[u].addr, "H5Fget_free_sections"); + VERIFY(sect_info[u].size, all_sect_info[u].size, "H5Fget_free_sections"); + total += sect_info[u].size; + } + VERIFY(sect_info[nall].addr, 0, "H5Fget_free_sections"); + VERIFY(sect_info[nall].size, 0, "H5Fget_free_sections"); +#if 0 + VERIFY(free_space, total, "H5Fget_free_sections"); +#endif + + HDmemset(meta_sect_info, 0, sizeof(meta_sect_info)); + if (multi_vfd) { + hssize_t ntmp; + + for (type = H5FD_MEM_SUPER; type < H5FD_MEM_NTYPES; type++) { + if (type == H5FD_MEM_DRAW || type == H5FD_MEM_GHEAP) + continue; + /* Get the # of free-space sections in the file for metadata */ + ntmp = H5Fget_free_sections(file, type, (size_t)0, NULL); + CHECK(ntmp, FAIL, "H5Fget_free_sections"); + + if (ntmp > 0) { + nsects = H5Fget_free_sections(file, type, (size_t)ntmp, &meta_sect_info[nmeta]); + VERIFY(nsects, ntmp, "H5Fget_free_sections"); + nmeta += ntmp; + } + } + } + else { + /* Get the # of free-space sections in the file for metadata */ + nmeta = H5Fget_free_sections(file, H5FD_MEM_SUPER, (size_t)0, NULL); + CHECK(nmeta, FAIL, "H5Fget_free_sections"); + + /* Retrieve and verify free-space sections for metadata */ + nsects = H5Fget_free_sections(file, H5FD_MEM_SUPER, (size_t)nmeta, meta_sect_info); + VERIFY(nsects, nmeta, "H5Fget_free_sections"); + } + + /* Get the # of free-space sections in the file for raw data */ + nraw = H5Fget_free_sections(file, H5FD_MEM_DRAW, (size_t)0, NULL); + CHECK(nraw, FAIL, "H5Fget_free_sections"); + + /* Retrieve and verify free-space sections for raw data */ + HDmemset(raw_sect_info, 0, sizeof(raw_sect_info)); + nsects = H5Fget_free_sections(file, H5FD_MEM_DRAW, (size_t)nraw, raw_sect_info); + VERIFY(nsects, nraw, "H5Fget_free_sections"); + + /* Sum all the free-space sections */ + for (u = 0; u < nmeta; u++) + tmp_tot += meta_sect_info[u].size; + + for (u = 0; u < nraw; u++) + tmp_tot += raw_sect_info[u].size; + + /* Verify free-space info */ + VERIFY(nmeta + nraw, nall, "H5Fget_free_sections"); + VERIFY(tmp_tot, total, "H5Fget_free_sections"); + + /* Closing */ + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Pclose(fcpl); + CHECK(fcpl, FAIL, "H5Pclose"); +#if 0 + h5_clean_files(FILESPACE_NAME, fapl); +#endif + H5Fdelete(FILESPACE_NAME[0], fapl); + } + +} /* end test_sects_freespace() */ +#endif + +/**************************************************************** +** +** test_filespace_compatible(): +** Verify that the trunk with the latest file space management +** can open, read and modify 1.6 HDF5 file and 1.8 HDF5 file. +** Also verify the correct file space handling information +** and the amount of free space. +** +****************************************************************/ +#if 0 +static void +test_filespace_compatible(void) +{ + int fd_old = (-1), fd_new = (-1); /* File descriptors for copying data */ + hid_t fid = -1; /* File id */ + hid_t did = -1; /* Dataset id */ + hid_t fcpl; /* File creation property list template */ + int check[100]; /* Temporary buffer for verifying dataset data */ + int rdbuf[100]; /* Temporary buffer for reading in dataset data */ + uint8_t buf[READ_OLD_BUFSIZE]; /* temporary buffer for reading */ + ssize_t nread; /* Number of bytes read in */ + unsigned i, j; /* Local index variable */ + hssize_t free_space; /* Amount of free-space in the file */ + hbool_t persist; /* Persist free-space or not */ + hsize_t threshold; /* Free-space section threshold */ + H5F_fspace_strategy_t strategy; /* File space handling strategy */ + herr_t ret; /* Return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("File space compatibility testing for 1.6 and 1.8 files\n")); + + for (j = 0; j < NELMTS(OLD_FILENAME); j++) { + const char *filename = H5_get_srcdir_filename(OLD_FILENAME[j]); /* Corrected test file name */ + + /* Open and copy the test file into a temporary file */ + fd_old = HDopen(filename, O_RDONLY); + CHECK(fd_old, FAIL, "HDopen"); + fd_new = HDopen(FILE5, O_RDWR | O_CREAT | O_TRUNC, H5_POSIX_CREATE_MODE_RW); + CHECK(fd_new, FAIL, "HDopen"); + + /* Copy data */ + while ((nread = HDread(fd_old, buf, (size_t)READ_OLD_BUFSIZE)) > 0) { + ssize_t write_err = HDwrite(fd_new, buf, (size_t)nread); + CHECK(write_err, -1, "HDwrite"); + } /* end while */ + + /* Close the files */ + ret = HDclose(fd_old); + CHECK(ret, FAIL, "HDclose"); + ret = HDclose(fd_new); + CHECK(ret, FAIL, "HDclose"); + + /* Open the temporary test file */ + fid = H5Fopen(FILE5, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* There should not be any free space in the file */ + free_space = H5Fget_freespace(fid); + CHECK(free_space, FAIL, "H5Fget_freespace"); + VERIFY(free_space, (hssize_t)0, "H5Fget_freespace"); + + /* Get the file's file creation property list */ + fcpl = H5Fget_create_plist(fid); + CHECK(fcpl, FAIL, "H5Fget_create_plist"); + + /* Retrieve the file space info */ + ret = H5Pget_file_space_strategy(fcpl, &strategy, &persist, &threshold); + CHECK(ret, FAIL, "H5Pget_file_space_strategy"); + + /* File space handling strategy should be H5F_FSPACE_STRATEGY_FSM_AGGR */ + /* Persisting free-space should be FALSE */ + /* Free-space section threshold should be 1 */ + VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy"); + VERIFY(persist, FALSE, "H5Pget_file_space_strategy"); + VERIFY(threshold, 1, "H5Pget_file_space_strategy"); + + /* Generate raw data */ + for (i = 0; i < 100; i++) + check[i] = (int)i; + + /* Open and read the dataset */ + did = H5Dopen2(fid, DSETNAME, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen"); + ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Verify the data read is correct */ + for (i = 0; i < 100; i++) + VERIFY(rdbuf[i], check[i], "test_compatible"); + + /* Close the dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Remove the dataset */ + ret = H5Ldelete(fid, DSETNAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Close the plist */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-Open the file */ + fid = H5Fopen(FILE5, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* The dataset should not be there */ + did = H5Dopen2(fid, DSETNAME, H5P_DEFAULT); + VERIFY(did, FAIL, "H5Dopen"); + + /* There should not be any free space in the file */ + free_space = H5Fget_freespace(fid); + CHECK(free_space, FAIL, "H5Fget_freespace"); + VERIFY(free_space, (hssize_t)0, "H5Fget_freespace"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + } /* end for */ +} /* test_filespace_compatible */ +#endif + +/**************************************************************** +** +** test_filespace_1.10.0_compatible(): +** Verify that the latest file space management can open, read and +** modify 1.10.0 HDF5 files : +** h5fc_ext1_i.h5: H5F_FILE_SPACE_ALL, default threshold; has superblock extension but no fsinfo message +** h5fc_ext1_f.h5: H5F_FILE_SPACE_ALL_PERSIST, default threshold; has superblock extension with fsinfo +*message +** h5fc_ext2_if.h5: H5F_FILE_SPACE_ALL, non-default threshold; has superblock extension with fsinfo +*message +** h5fc_ext2_sf.h5: H5F_FILE_SPACE_VFD, default threshold; has superblock extension with fsinfo message +** h5fc_ext3_isf.h5: H5F_FILE_SPACE_AGGR_VFD, default threshold; has superblock extension with fsinfo +*message +** h5fc_ext_none.h5: H5F_FILE_SPACE_ALL, default threshold; without superblock extension +** The above files are copied from release 1.10.0 tools/h5format_convert/testfiles. +** +****************************************************************/ +#if 0 +static void +test_filespace_1_10_0_compatible(void) +{ + hid_t fid = -1; /* File id */ + hid_t did = -1; /* Dataset id */ + hid_t fcpl; /* File creation property list */ + hbool_t persist; /* Persist free-space or not */ + hsize_t threshold; /* Free-space section threshold */ + H5F_fspace_strategy_t strategy; /* File space handling strategy */ + int wbuf[24]; /* Buffer for dataset data */ + int rdbuf[24]; /* Buffer for dataset data */ + int status; /* Status from copying the existing file */ + unsigned i, j; /* Local index variable */ + herr_t ret; /* Return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("File space compatibility testing for 1.10.0 files\n")); + + for (j = 0; j < NELMTS(OLD_1_10_0_FILENAME); j++) { + /* Make a copy of the test file */ + status = h5_make_local_copy(OLD_1_10_0_FILENAME[j], FILE5); + CHECK(status, FAIL, "h5_make_local_copy"); + + /* Open the temporary test file */ + fid = H5Fopen(FILE5, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Get the file's file creation property list */ + fcpl = H5Fget_create_plist(fid); + CHECK(fcpl, FAIL, "H5Fget_create_plist"); + + /* Retrieve the file space info */ + ret = H5Pget_file_space_strategy(fcpl, &strategy, &persist, &threshold); + CHECK(ret, FAIL, "H5Pget_file_space_strategy"); + + switch (j) { + case 0: +#if 0 + VERIFY(strategy, H5F_FILE_SPACE_STRATEGY_DEF, "H5Pget_file_space_strategy"); + VERIFY(persist, H5F_FREE_SPACE_PERSIST_DEF, "H5Pget_file_space_strategy"); + VERIFY(threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space_strategy"); +#endif + /* Open the dataset */ + did = H5Dopen2(fid, "/DSET_EA", H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen"); + + for (i = 0; i < 24; i++) + wbuf[i] = (int)j + 1; + + /* Write to the dataset */ + ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close the dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + break; + + case 1: + VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy"); + VERIFY(persist, TRUE, "H5Pget_file_space_strategy"); +#if 0 + VERIFY(threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space_strategy"); +#endif + + /* Open the dataset */ + did = H5Dopen2(fid, "/DSET_NDATA_BT2", H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen"); + + for (i = 0; i < 24; i++) + wbuf[i] = (int)j + 1; + + /* Write to the dataset */ + ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close the dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + break; + + case 2: + VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy"); +#if 0 + VERIFY(persist, H5F_FREE_SPACE_PERSIST_DEF, "H5Pget_file_space_strategy"); +#endif + VERIFY(threshold, 2, "H5Pget_file_space_strategy"); + + /* Open the dataset */ + did = H5Dopen2(fid, "/DSET_NONE", H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen"); + + for (i = 0; i < 24; i++) + wbuf[i] = (int)j + 1; + + /* Write to the dataset */ + ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close the dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + break; + + case 3: + VERIFY(strategy, H5F_FSPACE_STRATEGY_NONE, "H5Pget_file_space_strategy"); +#if 0 + VERIFY(persist, H5F_FREE_SPACE_PERSIST_DEF, "H5Pget_file_space_strategy"); + VERIFY(threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space_strategy"); +#endif + /* Open the dataset */ + did = H5Dopen2(fid, "/GROUP/DSET_NDATA_EA", H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen"); + + for (i = 0; i < 24; i++) + wbuf[i] = (int)j + 1; + + /* Write to the dataset */ + ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close the dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + break; + + case 4: + VERIFY(strategy, H5F_FSPACE_STRATEGY_AGGR, "H5Pget_file_space_strategy"); +#if 0 + VERIFY(persist, H5F_FREE_SPACE_PERSIST_DEF, "H5Pget_file_space_strategy"); + VERIFY(threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space_strategy"); +#endif + /* Open the dataset */ + did = H5Dopen2(fid, "/GROUP/DSET_NDATA_FA", H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen"); + + for (i = 0; i < 24; i++) + wbuf[i] = (int)j + 1; + + /* Write to the dataset */ + ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close the dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + break; + case 5: + VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy"); +#if 0 + VERIFY(persist, H5F_FREE_SPACE_PERSIST_DEF, "H5Pget_file_space_strategy"); + VERIFY(threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space_strategy"); +#endif + /* Open the dataset */ + did = H5Dopen2(fid, "/GROUP/DSET_NDATA_NONE", H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen"); + + for (i = 0; i < 24; i++) + wbuf[i] = (int)j + 1; + + /* Write to the dataset */ + ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close the dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + break; + + default: + break; + } + + /* Close the plist */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-Open the file */ + fid = H5Fopen(FILE5, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + switch (j) { + case 0: + /* Open and read the dataset */ + did = H5Dopen2(fid, "/DSET_EA", H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen"); + + ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Verify the data read is correct */ + for (i = 0; i < 24; i++) + VERIFY(rdbuf[i], j + 1, "test_compatible"); + + /* Close the dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + break; + + case 1: + /* Open and read the dataset */ + did = H5Dopen2(fid, "/DSET_NDATA_BT2", H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen"); + + ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Verify the data read is correct */ + for (i = 0; i < 24; i++) + VERIFY(rdbuf[i], j + 1, "test_compatible"); + + /* Close the dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + break; + + case 2: + /* Open and read the dataset */ + did = H5Dopen2(fid, "/DSET_NONE", H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen"); + + ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Verify the data read is correct */ + for (i = 0; i < 24; i++) + VERIFY(rdbuf[i], j + 1, "test_compatible"); + + /* Close the dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + break; + + case 3: + /* Open and read the dataset */ + did = H5Dopen2(fid, "/GROUP/DSET_NDATA_EA", H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen"); + + ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Verify the data read is correct */ + for (i = 0; i < 24; i++) + VERIFY(rdbuf[i], j + 1, "test_compatible"); + + /* Close the dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + break; + + case 4: + + /* Open and read the dataset */ + did = H5Dopen2(fid, "/GROUP/DSET_NDATA_FA", H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen"); + + ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Verify the data read is correct */ + for (i = 0; i < 24; i++) + VERIFY(rdbuf[i], j + 1, "test_compatible"); + + /* Close the dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + break; + + case 5: + + /* Open and read the dataset */ + did = H5Dopen2(fid, "/GROUP/DSET_NDATA_NONE", H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen"); + + ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Verify the data read is correct */ + for (i = 0; i < 24; i++) + VERIFY(rdbuf[i], j + 1, "test_compatible"); + + /* Close the dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + break; + + default: + break; + } + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + } /* end for */ + +} /* test_filespace_1_10_0_compatible */ +#endif + +/**************************************************************** +** +** test_filespace_round_compatible(): +** Verify that the trunk can open, read and modify these files-- +** 1) They are initially created (via gen_filespace.c) in the trunk +** with combinations of file space strategies, default/non-default +** threshold, and file spacing paging enabled/disabled. +** The library creates the file space info message with +** "mark if unknown" in these files. +** 2) They are copied to the 1.8 branch, and are opened/read/modified +** there via test_filespace_compatible() in test/tfile.c. +** The 1.8 library marks the file space info message as "unknown" +** in these files. +** 3) They are then copied back from the 1.8 branch to the trunk for +** compatibility testing via this routine. +** 4) Upon encountering the file space info message which is marked +** as "unknown", the library will use the default file space management +** from then on: non-persistent free-space managers, default threshold, +** and non-paging file space. +** +****************************************************************/ +#if 0 +static void +test_filespace_round_compatible(void) +{ + hid_t fid = -1; /* File id */ + hid_t fcpl = -1; /* File creation property list ID */ + unsigned j; /* Local index variable */ + H5F_fspace_strategy_t strategy; /* File space strategy */ + hbool_t persist; /* Persist free-space or not */ + hsize_t threshold; /* Free-space section threshold */ + hssize_t free_space; /* Amount of free space in the file */ + int status; /* Status from copying the existing file */ + herr_t ret; /* Return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("File space compatibility testing for files from trunk to 1_8 to trunk\n")); + + for (j = 0; j < NELMTS(FSPACE_FILENAMES); j++) { + /* Make a copy of the test file */ + status = h5_make_local_copy(FSPACE_FILENAMES[j], FILE5); + CHECK(status, FAIL, "h5_make_local_copy"); + + /* Open the temporary test file */ + fid = H5Fopen(FILE5, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Get the file's creation property list */ + fcpl = H5Fget_create_plist(fid); + CHECK(fcpl, FAIL, "H5Fget_create_plist"); + + ret = H5Pget_file_space_strategy(fcpl, &strategy, &persist, &threshold); + CHECK(ret, FAIL, "H5Pget_file_space_strategy"); + VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy"); + VERIFY(persist, FALSE, "H5Pget_file_space_strategy"); + VERIFY(threshold, 1, "H5Pget_file_space_strategy"); + + /* There should not be any free space in the file */ + free_space = H5Fget_freespace(fid); + CHECK(free_space, FAIL, "H5Fget_freespace"); + VERIFY(free_space, (hssize_t)0, "H5Fget_freespace"); + + /* Closing */ + ret = H5Fclose(fid); + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Fclose"); + } /* end for */ + +} /* test_filespace_round_compatible */ + +/**************************************************************** +** +** test_libver_bounds_real(): +** Verify that a file created and modified with the +** specified libver bounds has the specified object header +** versions for the right objects. +** +****************************************************************/ +static void +test_libver_bounds_real(H5F_libver_t libver_create, unsigned oh_vers_create, H5F_libver_t libver_mod, + unsigned oh_vers_mod) +{ + hid_t file, group; /* Handles */ + hid_t fapl; /* File access property list */ + H5O_native_info_t ninfo; /* Object info */ + herr_t ret; /* Return value */ + + /* + * Create a new file using the creation properties. + */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + + ret = H5Pset_libver_bounds(fapl, libver_create, H5F_LIBVER_LATEST); + CHECK(ret, FAIL, "H5Pset_libver_bounds"); + + file = H5Fcreate("tfile5.h5", H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(file, FAIL, "H5Fcreate"); + + /* + * Make sure the root group has the correct object header version + */ + ret = H5Oget_native_info_by_name(file, "/", &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.version, oh_vers_create, "H5Oget_native_info_by_name"); + + /* + * Reopen the file and make sure the root group still has the correct version + */ + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + + ret = H5Pset_libver_bounds(fapl, libver_mod, H5F_LIBVER_LATEST); + CHECK(ret, FAIL, "H5Pset_libver_bounds"); + + file = H5Fopen("tfile5.h5", H5F_ACC_RDWR, fapl); + CHECK(file, FAIL, "H5Fopen"); + + ret = H5Oget_native_info_by_name(file, "/", &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.version, oh_vers_create, "H5Oget_native_info_by_name"); + + /* + * Create a group named "G1" in the file, and make sure it has the correct + * object header version + */ + group = H5Gcreate2(file, "/G1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(group, FAIL, "H5Gcreate"); + + //! [H5Oget_native_info_snip] + + ret = H5Oget_native_info(group, &ninfo, H5O_NATIVE_INFO_HDR); + + //! [H5Oget_native_info_snip] + + CHECK(ret, FAIL, "H5Oget_native)info"); + VERIFY(ninfo.hdr.version, oh_vers_mod, "H5Oget_native_info"); + + ret = H5Gclose(group); + CHECK(ret, FAIL, "H5Gclose"); + + /* + * Create a group named "/G1/G3" in the file, and make sure it has the + * correct object header version + */ + group = H5Gcreate2(file, "/G1/G3", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(group, FAIL, "H5Gcreate"); + + ret = H5Oget_native_info(group, &ninfo, H5O_NATIVE_INFO_HDR); + CHECK(ret, FAIL, "H5Oget_native_info"); + VERIFY(ninfo.hdr.version, oh_vers_mod, "H5Oget_native_info"); + + ret = H5Gclose(group); + CHECK(ret, FAIL, "H5Gclose"); + + //! [H5Oget_native_info_by_name_snip] + + /* + * Make sure the root group still has the correct object header version + */ + ret = H5Oget_native_info_by_name(file, "/", &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); + + //! [H5Oget_native_info_by_name_snip] + + CHECK(ret, FAIL, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.version, oh_vers_create, "H5Oget_native_info_by_name"); + + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); +} /* end test_libver_bounds_real() */ +#endif + +/*------------------------------------------------------------------------- + * Function: test_libver_bounds_open + * + * Purpose: Tests opening latest file with various low/high bounds. + * + * Return: Success: 0 + * Failure: number of errors + * + *------------------------------------------------------------------------- + */ +#if 0 +#define VERBFNAME "tverbounds_dspace.h5" +#define VERBDSNAME "dataset 1" +#define SPACE1_DIM1 3 +static void +test_libver_bounds_open(void) +{ + hid_t file = -1; /* File ID */ + hid_t space = -1; /* Dataspace ID */ + hid_t dset = -1; /* Dataset ID */ + hid_t fapl = -1; /* File access property list ID */ + hid_t new_fapl = -1; /* File access property list ID for reopened file */ + hid_t dcpl = -1; /* Dataset creation property list ID */ + hsize_t dim[1] = {SPACE1_DIM1}; /* Dataset dimensions */ + H5F_libver_t low, high; /* File format bounds */ + hsize_t chunk_dim[1] = {SPACE1_DIM1}; /* Chunk dimensions */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Opening File in Various Version Bounds\n")); + + /* Create a file access property list */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + + /* Create dataspace */ + space = H5Screate_simple(1, dim, NULL); + CHECK(space, FAIL, "H5Screate_simple"); + + /* Create a dataset creation property list */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + + /* Create and set chunk plist */ + ret = H5Pset_chunk(dcpl, 1, chunk_dim); + CHECK(ret, FAIL, "H5Pset_chunk"); + ret = H5Pset_deflate(dcpl, 9); + CHECK(ret, FAIL, "H5Pset_deflate"); + ret = H5Pset_chunk_opts(dcpl, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS); + CHECK(ret, FAIL, "H5Pset_chunk_opts"); + + /* Create a file with (LATEST, LATEST) bounds, create a layout version 4 + dataset, then close the file */ + + /* Set version bounds to (LATEST, LATEST) */ + low = H5F_LIBVER_LATEST; + high = H5F_LIBVER_LATEST; + ret = H5Pset_libver_bounds(fapl, low, high); + CHECK(ret, FAIL, "H5Pset_libver_bounds"); + + /* Create the file */ + file = H5Fcreate(VERBFNAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(file, FAIL, "H5Fcreate"); + + /* Create dataset */ + dset = H5Dcreate2(file, VERBDSNAME, H5T_NATIVE_INT, space, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset, FAIL, "H5Dcreate2"); + + /* Close dataset and file */ + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + + /* Attempt to open latest file with (earliest, v18), should fail */ + ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_EARLIEST, H5F_LIBVER_V18); + H5E_BEGIN_TRY + { + file = H5Fopen(VERBFNAME, H5F_ACC_RDONLY, fapl); + } + H5E_END_TRY; + VERIFY(file, FAIL, "Attempted to open latest file with earliest version"); + + /* Attempt to open latest file with (v18, v18), should fail */ + ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_V18, H5F_LIBVER_V18); + H5E_BEGIN_TRY + { + file = H5Fopen(VERBFNAME, H5F_ACC_RDONLY, fapl); + } + H5E_END_TRY; + VERIFY(file, FAIL, "Attempted to open latest file with v18 bounds"); + + /* Opening VERBFNAME in these combination should succeed. + For each low bound, verify that it is upgraded properly */ + high = H5F_LIBVER_LATEST; + for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) { + H5F_libver_t new_low = H5F_LIBVER_EARLIEST; + + /* Set version bounds for opening file */ + ret = H5Pset_libver_bounds(fapl, low, high); + CHECK(ret, FAIL, "H5Pset_libver_bounds"); + + /* Open the file */ + file = H5Fopen(VERBFNAME, H5F_ACC_RDONLY, fapl); + CHECK(file, FAIL, "H5Fopen"); + + /* Get the new file access property */ + new_fapl = H5Fget_access_plist(file); + CHECK(new_fapl, FAIL, "H5Fget_access_plist"); + + /* Get new low bound and verify that it has been upgraded properly */ + ret = H5Pget_libver_bounds(new_fapl, &new_low, NULL); + CHECK(ret, FAIL, "H5Pget_libver_bounds"); + VERIFY(new_low >= H5F_LIBVER_V110, TRUE, "Low bound should be upgraded to at least H5F_LIBVER_V110"); + + ret = H5Pclose(new_fapl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + } /* for low */ + + /* Close dataspace and property lists */ + ret = H5Sclose(space); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); +} /* end test_libver_bounds_open() */ +#endif + +/*------------------------------------------------------------------------- + * Function: test_libver_bounds_copy + * + * Purpose: Test to verify HDFFV-10800 is fixed: + * This test is copied from the user test program: copy10.c. + * (See attached programs in the jira issue.) + * + * The source file used in the test is generated by the user test + * program "fill18.c" with the 1.8 library. The file is created + * with the latest format and the dataset created in the file + * has version 3 fill value message (latest). + * + * The test creates the destination file with (v18, v18) version bounds. + * H5Ocopy() should succeed in copying the dataset in the source file + * to the destination file. + * + * Return: Success: 0 + * Failure: number of errors + * + *------------------------------------------------------------------------- + */ +#if 0 +static void +test_libver_bounds_copy(void) +{ + hid_t src_fid = -1; /* File ID */ + hid_t dst_fid = -1; /* File ID */ + hid_t fapl = -1; /* File access property list ID */ + const char *src_fname; /* Source file name */ + herr_t ret; /* Generic return value */ + hbool_t driver_is_default_compatible; + + /* Output message about the test being performed */ + MESSAGE(5, ("Testing H5Ocopy a dataset in a 1.8 library file to a 1.10 library file\n")); + + ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); + CHECK_I(ret, "h5_driver_is_default_vfd_compatible"); + + if (!driver_is_default_compatible) { + HDprintf("-- SKIPPED --\n"); + return; + } + + /* Get the test file name */ + src_fname = H5_get_srcdir_filename(SRC_FILE); + + /* Open the source test file */ + src_fid = H5Fopen(src_fname, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(src_fid, FAIL, "H5Fopen"); + + /* Create file access property list */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + + /* Set library version bounds to (v18, v18) */ + ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_V18, H5F_LIBVER_V18); + CHECK(ret, FAIL, "H5Pset_libver_bounds"); + + /* Create the destination file with the fapl */ + dst_fid = H5Fcreate(DST_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(dst_fid, FAIL, "H5Pcreate"); + + /* Close the fapl */ + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Copy the dataset in the source file to the destination file */ + ret = H5Ocopy(src_fid, DSET_DS1, dst_fid, DSET_DS1, H5P_DEFAULT, H5P_DEFAULT); + VERIFY(ret, SUCCEED, "H5Ocopy"); + + /* Close the source file */ + ret = H5Fclose(src_fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Close the destination file */ + ret = H5Fclose(dst_fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Remove the destination file */ + H5Fdelete(DST_FILE, H5P_DEFAULT); + +} /* end test_libver_bounds_copy() */ +#endif + +/**************************************************************** +** +** test_libver_bounds(): +** Verify that a file created and modified with various +** libver bounds is handled correctly. (Further testing +** welcome) +** +****************************************************************/ +#if 0 +static void +test_libver_bounds(void) +{ + /* Output message about test being performed */ + MESSAGE(5, ("Testing setting library version bounds\n")); + + /* Run the tests */ + test_libver_bounds_real(H5F_LIBVER_EARLIEST, 1, H5F_LIBVER_LATEST, 2); + test_libver_bounds_real(H5F_LIBVER_LATEST, 2, H5F_LIBVER_EARLIEST, 2); + test_libver_bounds_open(); +#if 0 + test_libver_bounds_copy(); +#endif +} /* end test_libver_bounds() */ +#endif + +/************************************************************************************** +** +** test_libver_bounds_low_high(): +** Tests to verify that format versions are correct with the following five +** pairs of low/high version bounds set in fapl via H5Pset_libver_bounds(): +** (1) (earliest, v18) +** (2) (earliest, v110) +** (3) (v18, v18) +** (4) (v18, v110) +** (5) (v110, v110) +** +** For each pair of setting in fapl, verify format versions with the following +** six tests: +** (1) test_libver_bounds_super(fapl): superblock versions +** (2) test_libver_bounds_obj(fapl): object header versions +** (3) test_libver_bounds_dataset(fapl): message versions associated with dataset +** (4) test_libver_bounds_dataspace(fapl): dataspace message versions +** (5) test_libver_bounds_datatype(fapl): datatype message versions +** (6) test_libver_bounds_attributes(fapl): attribute message versions +** +**************************************************************************************/ +#if 0 +static void +test_libver_bounds_low_high(const char *env_h5_drvr) +{ + hid_t fapl = H5I_INVALID_HID; /* File access property list */ + H5F_libver_t low, high; /* Low and high bounds */ + herr_t ret; /* The return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing setting (low, high) format version bounds\n")); + + /* Create a file access property list */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, H5I_INVALID_HID, "H5Pcreate"); + + /* Loop through all the combinations of low/high version bounds */ + for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) + for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) { + + H5E_BEGIN_TRY + { + /* Set the low/high version bounds */ + ret = H5Pset_libver_bounds(fapl, low, high); + } + H5E_END_TRY; + + /* Should fail: invalid combinations */ + if (high == H5F_LIBVER_EARLIEST) { + VERIFY(ret, FAIL, "H5Pset_libver_bounds"); + continue; + } + + /* Should fail: invalid combinations */ + if (high < low) { + VERIFY(ret, FAIL, "H5Pset_libver_bounds"); + continue; + } + + /* All other combinations are valid and should succeed */ + VERIFY(ret, SUCCEED, "H5Pset_libver_bounds"); + + /* Tests to verify version bounds */ + test_libver_bounds_super(fapl, env_h5_drvr); + test_libver_bounds_obj(fapl); + test_libver_bounds_dataset(fapl); + test_libver_bounds_dataspace(fapl); + test_libver_bounds_datatype(fapl); + test_libver_bounds_attributes(fapl); + } + + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + +} /* end test_libver_bounds_low_high() */ +#endif + +/*********************************************************************** +** +** test_libver_bounds_super(): +** Verify superblock version with the following two tests: +** (1) test_libver_bounds_super_create(): +** --when creating a file with the input fapl and the fcpl +** that has the following feature enabled: +** (A) default fcpl +** (B) fcpl with v1-btee K value enabled +** (C) fcpl with shared messages enabled +** (D) fcpl with persistent free-space manager enabled +** +** (2) test_libver_bounds_super_open(): +** --when opening a file which is created with the input fapl +** and the fcpl setting as #A to #D above. +** +** These two tests are run with or without SWMR file access. +** +*************************************************************************/ +#if 0 +static void +test_libver_bounds_super(hid_t fapl, const char *env_h5_drvr) +{ + hid_t fcpl = H5I_INVALID_HID; /* File creation property list */ + herr_t ret; /* The return value */ + + /* Create a default fcpl: #A */ + /* This will result in superblock version 0 */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, H5I_INVALID_HID, "H5Pcreate"); + + /* Verify superblock version when creating a file with input fapl, + fcpl #A and with/without SWMR access */ + if (H5FD__supports_swmr_test(env_h5_drvr)) + test_libver_bounds_super_create(fapl, fcpl, TRUE, FALSE); + test_libver_bounds_super_create(fapl, fcpl, FALSE, FALSE); + + /* Verify superblock version when opening a file which is created + with input fapl, fcpl #A and with/without SWMR access */ + if (H5FD__supports_swmr_test(env_h5_drvr)) + test_libver_bounds_super_open(fapl, fcpl, TRUE, FALSE); + test_libver_bounds_super_open(fapl, fcpl, FALSE, FALSE); + + /* Close the fcpl */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Create a fcpl with v1-btree K value enabled: #B */ + /* This will result in superblock version 1 */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, H5I_INVALID_HID, "H5Pcreate"); + ret = H5Pset_istore_k(fcpl, 64); + CHECK(ret, FAIL, "H5Pset_istore_k"); + + /* Verify superblock version when creating a file with input fapl, + fcpl #B and with/without SWMR access */ + if (H5FD__supports_swmr_test(env_h5_drvr)) + test_libver_bounds_super_create(fapl, fcpl, TRUE, FALSE); + test_libver_bounds_super_create(fapl, fcpl, FALSE, FALSE); + + /* Verify superblock version when opening a file which is created + with input fapl, fcpl #B and with/without SWMR access */ + if (H5FD__supports_swmr_test(env_h5_drvr)) + test_libver_bounds_super_open(fapl, fcpl, TRUE, FALSE); + test_libver_bounds_super_open(fapl, fcpl, FALSE, FALSE); + + /* Close the fcpl */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Create a fcpl with shared messages enabled: #C */ + /* This will result in superblock version 2 */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, H5I_INVALID_HID, "H5Pcreate"); + ret = H5Pset_shared_mesg_nindexes(fcpl, 1); + CHECK(ret, FAIL, "H5Pset_shared_mesg_nindexes"); + ret = H5Pset_shared_mesg_index(fcpl, 0, H5O_SHMESG_ATTR_FLAG, 2); + CHECK(ret, FAIL, "H5Pset_shared_mesg_index"); + + /* Verify superblock version when creating a file with input fapl, + fcpl #C and with/without SWMR access */ + if (H5FD__supports_swmr_test(env_h5_drvr)) + test_libver_bounds_super_create(fapl, fcpl, TRUE, FALSE); + test_libver_bounds_super_create(fapl, fcpl, FALSE, FALSE); + + /* Verify superblock version when opening a file which is created + with input fapl, fcpl #C and with/without SWMR access */ + if (H5FD__supports_swmr_test(env_h5_drvr)) + test_libver_bounds_super_open(fapl, fcpl, TRUE, FALSE); + test_libver_bounds_super_open(fapl, fcpl, FALSE, FALSE); + + /* Close the fcpl */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + + if (h5_using_default_driver(env_h5_drvr)) { + /* Create a fcpl with persistent free-space manager enabled: #D */ + /* This will result in superblock version 2 */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, H5I_INVALID_HID, "H5Pcreate"); + ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, 1, (hsize_t)1); + CHECK(ret, FAIL, "H5Pset_file_space"); + + /* Verify superblock version when creating a file with input fapl, + fcpl #D and with/without SWMR access */ + if (H5FD__supports_swmr_test(env_h5_drvr)) + test_libver_bounds_super_create(fapl, fcpl, TRUE, TRUE); + test_libver_bounds_super_create(fapl, fcpl, FALSE, TRUE); + + /* Verify superblock version when opening a file which is created + with input fapl, fcpl #D and with/without SWMR access */ + if (H5FD__supports_swmr_test(env_h5_drvr)) + test_libver_bounds_super_open(fapl, fcpl, TRUE, TRUE); + test_libver_bounds_super_open(fapl, fcpl, FALSE, TRUE); + + /* Close the fcpl */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + } + +} /* end test_libver_bounds_super() */ + +/************************************************************************************************** +** +** test_libver_bounds_super_create(): +** Verify the following when the file is created with the input fapl, fcpl, +** and with/without SWMR access: +** (a) the superblock version # +** (b) the file's low bound setting +** (c) fail or succeed in creating the file +** +** For file creation, the bounds setting in fapl, the feature enabled in fcpl, +** and with/without SWMR file access will determine the results for #a to #c. +** +** The first row for the following two tables is the 5 pairs of low/high bounds setting +** in the input fapl. The next three rows list the expected results for #a to #c. +** "-->" indicates "upgrade to" +** +** The last table lists the expected results in creating the file when non-default +** free-space info (fsinfo) is enabled in fcpl. +** +** Creating a file with write access +** -------------------------------------------------------------------------------- +** | (earliest, v18) | (earliest, v110) | (v18, v18) | (v18, v110) | (v110, v110) | +** |______________________________________________________________________________| +** Superblock version | vers 0, 1, 2 | vers 0, 1, 2 | vers 2 | vers 2 | vers 3 | +** |------------------------------------------------------------------------------| +** File's low bound | no change | +** |------------------------------------------------------------------------------| +** File creation | succeed | +** |______________________________________________________________________________| +** +** Creating a file with SWMR-write access +** -------------------------------------------------------------------------------- +** | (earliest, v18) | (earliest, v110) | (v18, v18) | (v18, v110) | (v110, v110) | +** |______________________________________________________________________________| +** Superblock version | -- | vers 3 | -- | vers 3 | vers 3 | +** |------------------------------------------------------------------------------| +** File's low bound | -- | ->v110 | -- | ->v110 | no change | +** |------------------------------------------------------------------------------| +** File creation | fail | succeed | fail | succeed | succeed | +** |______________________________________________________________________________| +** +** Creating a file with write/SWMR-write access + non-default fsinfo +** -------------------------------------------------------------------------------- +** | (earliest, v18) | (earliest, v110) | (v18, v18) | (v18, v110) | (v110, v110) | +** |______________________________________________________________________________| +** File creation | fail | succeed | fail | succeed | succeed | +** |______________________________________________________________________________| +** +******************************************************************************************************/ +static void +test_libver_bounds_super_create(hid_t fapl, hid_t fcpl, htri_t is_swmr, htri_t non_def_fsm) +{ + hid_t fid = H5I_INVALID_HID; /* File ID */ +#if 0 + H5F_t *f = NULL; /* Internal file pointer */ +#endif + H5F_libver_t low, high; /* Low and high bounds */ +#if 0 + hbool_t ok; /* The result is ok or not */ +#endif + herr_t ret; /* The return value */ + + /* Try to create the file */ + H5E_BEGIN_TRY + { + fid = H5Fcreate(FILE8, H5F_ACC_TRUNC | (is_swmr ? H5F_ACC_SWMR_WRITE : 0), fcpl, fapl); + } + H5E_END_TRY; + +#if 0 + /* Get the internal file pointer if the create succeeds */ + if (fid >= 0) { + f = (H5F_t *)H5VL_object(fid); + CHECK_PTR(f, "H5VL_object"); + } +#endif + /* Retrieve the low/high bounds */ + ret = H5Pget_libver_bounds(fapl, &low, &high); + CHECK(ret, FAIL, "H5Pget_libver_bounds"); + + if (non_def_fsm && high < H5F_LIBVER_V110) + VERIFY(fid, H5I_INVALID_HID, "H5Fcreate"); + + else if (is_swmr) { /* SWMR is enabled */ + if (high >= H5F_LIBVER_V110) { /* Should succeed */ + VERIFY(fid >= 0, TRUE, "H5Fcreate"); +#if 0 + VERIFY(HDF5_SUPERBLOCK_VERSION_3, f->shared->sblock->super_vers, "HDF5_superblock_ver_bounds"); + VERIFY(f->shared->low_bound >= H5F_LIBVER_V110, TRUE, "HDF5_superblock_ver_bounds"); +#endif + } + else /* Should fail */ + VERIFY(fid >= 0, FALSE, "H5Fcreate"); + } + else { /* Should succeed */ + VERIFY(fid >= 0, TRUE, "H5Fcreate"); +#if 0 + VERIFY(low, f->shared->low_bound, "HDF5_superblock_ver_bounds"); + + switch (low) { + case H5F_LIBVER_EARLIEST: + ok = (f->shared->sblock->super_vers == HDF5_SUPERBLOCK_VERSION_DEF || + f->shared->sblock->super_vers == HDF5_SUPERBLOCK_VERSION_1 || + f->shared->sblock->super_vers == HDF5_SUPERBLOCK_VERSION_2); + VERIFY(ok, TRUE, "HDF5_superblock_ver_bounds"); + break; + + case H5F_LIBVER_V18: + ok = (f->shared->sblock->super_vers == HDF5_SUPERBLOCK_VERSION_2); + VERIFY(ok, TRUE, "HDF5_superblock_ver_bounds"); + break; + + case H5F_LIBVER_V110: + case H5F_LIBVER_V112: + case H5F_LIBVER_V114: + case H5F_LIBVER_V116: + ok = (f->shared->sblock->super_vers == HDF5_SUPERBLOCK_VERSION_3); + VERIFY(ok, TRUE, "HDF5_superblock_ver_bounds"); + break; + + case H5F_LIBVER_ERROR: + case H5F_LIBVER_NBOUNDS: + default: + ERROR("H5Pget_libver_bounds"); + + } /* end switch */ +#endif + } /* end else */ + + if (fid >= 0) { /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + } + +} /* end test_libver_bounds_super_create() */ + +/************************************************************************************************** +** +** test_libver_bounds_super_open(): +** Verify the following when opening a file which is created with the input fapl, fcpl, +** and with/without SWMR access: +** (a) the file's low bound setting +** (b) fail or succeed in opening the file +** +** (1) Create a file with the input fapl, fcpl and with/without SWMR access +** (2) Close the file +** (3) Reopen the file with a new fapl that is set to the 5 pairs of low/high bounds +** in a for loop. For each pair of setting in the new fapl: +** --Verify the expected results for #a and #b above. +** --Close the file. +** +** For file open, the file's superblock version, the low/high bounds setting in fapl, +** and with/without SWMR file access will determine the results for #a and #b. +** +** The first row for the following tables (#A - #B) is the 5 pairs of low/high bounds setting +** in the input fapl. The next two rows list the expected results for #a and #b. +** "-->" indicates "upgrade to" +** +** The last table (#C) lists the expected results in opening the file when non-default +** free-space info (fsinfo) is enabled in fcpl. +** +** (A) Opening a file with write access +** +** Superblock version 0, 1 +** -------------------------------------------------------------------------------- +** | (earliest, v18) | (earliest, v110) | (v18, v18) | (v18, v110) | (v110, v110) | +** |______________________________________________________________________________| +** File's low bound | no change | +** |------------------------------------------------------------------------------| +** File open | succeed | +** |______________________________________________________________________________| +** +** +** Superblock version 2 +** -------------------------------------------------------------------------------- +** | (earliest, v18) | (earliest, v110) | (v18, v18) | (v18, v110) | (v110, v110) | +** |______________________________________________________________________________| +** File's low bound | -->v18 | no change | +** |------------------------------------------------------------------------------| +** File open | succeed | +** |______________________________________________________________________________| +** +** Superblock version 3 +** -------------------------------------------------------------------------------- +** | (earliest, v18) | (earliest, v110) | (v18, v18) | (v18, v110) | (v110, v110) | +** |______________________________________________________________________________| +** File's low bound | -- | -->v110 | -- | -->v110 | no change | +** |------------------------------------------------------------------------------| +** File open | fail | succeed | fail | succeed | succeed | +** |______________________________________________________________________________| +** +** +** +** (B) Opening a file with SWMR-write access +** +** Superblock version 0, 1, 2 +** ------------------------------------------------------------------------------- +** | (earliest, v18) | (earliest, v10) | (v18, v18) | (v18, v110) | (v110, v110) | +** |_____________________________________________________________________________| +** File's low bound | ---- +** |-----------------------------------------------------------------------------| +** File open | fail +** |_____________________________________________________________________________| +** +** +** Superblock version 3 +** ------------------------------------------------------------------------------- +** | (earliest, v18) | (earliest, v10) | (v18, v18) | (v18, v110) | (v110, v110) | +** |_____________________________________________________________________________| +** File's low bound | -- | -->v110 | -- | -->v110 | no change | +** |-----------------------------------------------------------------------------| +** File open | fail | succeed | fail | succeed | succeed | +** |_____________________________________________________________________________| +** +** +** (C) Opening a file with write/SWMR-write access + non-default fsinfo +** ------------------------------------------------------------------------------- +** | (earliest, v18) | (earliest, v10) | (v18, v18) | (v18, v110) | (v110, v110) | +** |_____________________________________________________________________________| +** File open | fail | succeed | fail | succeed | succeed | +** |_____________________________________________________________________________| +** +** +******************************************************************************************************/ +static void +test_libver_bounds_super_open(hid_t fapl, hid_t fcpl, htri_t is_swmr, htri_t non_def_fsm) +{ + hid_t fid = H5I_INVALID_HID; /* File ID */ +#if 0 + H5F_t *f = NULL; /* Internal file pointer */ +#endif + hid_t new_fapl = H5I_INVALID_HID; /* File access property list */ +#if 0 + unsigned super_vers; /* Superblock version */ +#endif + H5F_libver_t low, high; /* Low and high bounds */ + herr_t ret; /* Return value */ + + /* Create the file with the input fcpl and fapl */ + H5E_BEGIN_TRY + { + fid = H5Fcreate(FILE8, H5F_ACC_TRUNC, fcpl, fapl); + } + H5E_END_TRY; + + /* Retrieve the low/high bounds */ + ret = H5Pget_libver_bounds(fapl, &low, &high); + CHECK(ret, FAIL, "H5Pget_libver_bounds"); + + if (non_def_fsm && high < H5F_LIBVER_V110) { + VERIFY(fid, H5I_INVALID_HID, "H5Fcreate"); + } + else { + VERIFY(fid >= 0, TRUE, "H5Fcreate"); +#if 0 + /* Get the internal file pointer */ + f = (H5F_t *)H5VL_object(fid); + CHECK_PTR(f, "H5VL_object"); + + /* The file's superblock version */ + super_vers = f->shared->sblock->super_vers; +#endif + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Create a default file access property list */ + new_fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(new_fapl, FAIL, "H5Pcreate"); + + /* Loop through all the combinations of low/high bounds in new_fapl */ + for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) { + for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) { + H5E_BEGIN_TRY + { + ret = H5Pset_libver_bounds(new_fapl, low, high); + } + H5E_END_TRY; + + /* Invalid combinations */ + if (ret < 0) + continue; + + /* Open the file with or without SWMR access */ + H5E_BEGIN_TRY + { + fid = H5Fopen(FILE8, H5F_ACC_RDWR | (is_swmr ? H5F_ACC_SWMR_WRITE : 0), new_fapl); + } + H5E_END_TRY; + + if (non_def_fsm && high < H5F_LIBVER_V110) { + VERIFY(fid, H5I_INVALID_HID, "H5Fopen"); + continue; + } +#if 0 + /* Get the internal file pointer if the open succeeds */ + if (fid >= 0) { + f = (H5F_t *)H5VL_object(fid); + CHECK_PTR(f, "H5VL_object"); + } + + /* Verify the file open succeeds or fails */ + switch (super_vers) { + case 3: + if (high >= H5F_LIBVER_V110) { + /* Should succeed */ + VERIFY(fid >= 0, TRUE, "H5Fopen"); + VERIFY(f->shared->low_bound >= H5F_LIBVER_V110, TRUE, + "HDF5_superblock_ver_bounds"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + } + else /* Should fail */ + VERIFY(fid >= 0, FALSE, "H5Fopen"); + break; + + case 2: + if (is_swmr) /* Should fail */ + VERIFY(fid >= 0, FALSE, "H5Fopen"); + else { /* Should succeed */ + VERIFY(fid >= 0, TRUE, "H5Fopen"); + VERIFY(f->shared->low_bound >= H5F_LIBVER_V18, TRUE, + "HDF5_superblock_ver_bounds"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + } + break; + + case 1: + case 0: + if (is_swmr) /* Should fail */ + VERIFY(fid >= 0, FALSE, "H5Fopen"); + else { /* Should succeed */ + VERIFY(fid >= 0, TRUE, "H5Fopen"); + VERIFY(f->shared->low_bound, low, "HDF5_superblock_ver_bounds"); + + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + } + break; + + default: + break; + } /* end switch */ +#endif + } /* end for */ + } /* end for */ + + /* Close the file access property list */ + ret = H5Pclose(new_fapl); + CHECK(ret, FAIL, "H5Pclose"); + } /* end else */ + +} /* end test_libver_bounds_super_open() */ +#endif + +/**************************************************************** +** +** test_libver_bounds_obj(): +** Verify object header versions: +** +** (a) Create a file with: +** --the input fapl +** --a fcpl that has shared message enabled +** Verify the root group's object header version. +** Close the file. +** +** (b) Create another file with: +** --the input fapl +** --a default fcpl +** Verify the root group's object header version. +** Close the file. +** +** (c) Reopen the same file in (b) with a new fapl. +** The new fapl is set to the 5 pairs of low/high +** bounds in a "for" loop. For each setting in fapl: +** --Create a group in the file +** --Verify the group's object header version +** --Close and delete the group +** --Close the file +** +****************************************************************/ +#if 0 +static void +test_libver_bounds_obj(hid_t fapl) +{ + hid_t fid = H5I_INVALID_HID; /* File ID */ + hid_t gid = H5I_INVALID_HID; /* Group ID */ + hid_t fcpl = H5I_INVALID_HID; /* File creation property list */ + hid_t new_fapl = H5I_INVALID_HID; /* File access property list */ + H5F_t *f = NULL; /* Internal file pointer */ + H5F_libver_t low, high; /* Low and high bounds */ + H5O_native_info_t ninfo; /* Object info */ + H5G_info_t ginfo; /* Group info */ + herr_t ret; /* Return value */ + + /* Retrieve the low/high bounds from the input fapl */ + ret = H5Pget_libver_bounds(fapl, &low, &high); + CHECK(ret, FAIL, "H5Pget_libver_bounds"); + + /* Create a default file creation property list */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, H5I_INVALID_HID, "H5Pcreate"); + + /* Enable shared message in the fcpl */ + /* This will result in a version 2 object header */ + ret = H5Pset_shared_mesg_nindexes(fcpl, 1); + CHECK(ret, FAIL, "H5Pset_shared_mesg_nindexes"); + ret = H5Pset_shared_mesg_index(fcpl, 0, H5O_SHMESG_ATTR_FLAG, 2); + CHECK(ret, FAIL, "H5Pset_shared_mesg_index"); + + /* Create the file with the fcpl and the input fapl */ + fid = H5Fcreate(FILE8, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); + + /* Get root group's object info */ + ret = H5Oget_native_info_by_name(fid, "/", &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_native_info_by_name"); + + /* Verify object header version is 2 because shared message is enabled */ + VERIFY(ninfo.hdr.version, H5O_VERSION_2, "H5O_obj_ver_bounds"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Close the file creation property list */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Create a file with the default fcpl and input fapl */ + fid = H5Fcreate(FILE8, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); + + /* Get root group's object info */ + ret = H5Oget_native_info_by_name(fid, "/", &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_native_info_by_name"); + + /* Verify object header version is as indicated by low_bound */ + VERIFY(ninfo.hdr.version, H5O_obj_ver_bounds[low], "H5O_obj_ver_bounds"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Create a new default file access property list which + is used to open the file in the "for" loop */ + new_fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(new_fapl, H5I_INVALID_HID, "H5Pcreate"); + + /* Loop through all the combinations of low/high bounds in new_fapl */ + /* Open the file with the fapl; create a group and verify the + object header version, then delete the group and close the file.*/ + for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) { + for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) { + H5E_BEGIN_TRY + { + ret = H5Pset_libver_bounds(new_fapl, low, high); + } + H5E_END_TRY; + + if (ret < 0) /* Invalid combinations */ + continue; + + /* Open the file */ + H5E_BEGIN_TRY + { + fid = H5Fopen(FILE8, H5F_ACC_RDWR, new_fapl); + } + H5E_END_TRY; + + if (fid >= 0) { /* The file open succeeds */ + + /* Get the internal file pointer */ + f = (H5F_t *)H5VL_object(fid); + CHECK_PTR(f, "H5VL_object"); + + /* Create a group in the file */ + gid = H5Gcreate2(fid, GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gcreate2"); + + /* Get group information */ + ret = H5Gget_info(gid, &ginfo); + CHECK(ret, FAIL, "H5Gget_info"); + + /* Verify group storage type */ + if (f->shared->low_bound >= H5F_LIBVER_V18) + /* Links in group are stored in object header */ + VERIFY(ginfo.storage_type, H5G_STORAGE_TYPE_COMPACT, "H5Gget_info"); + else + /* Links in group are stored with a "symbol table" */ + VERIFY(ginfo.storage_type, H5G_STORAGE_TYPE_SYMBOL_TABLE, "H5Gget_info"); + + /* Get object header information */ + ret = H5Oget_native_info_by_name(gid, GRP_NAME, &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_native_info_by_name"); + + /* Verify object header version as indicated by low_bound */ + VERIFY(ninfo.hdr.version, H5O_obj_ver_bounds[f->shared->low_bound], "H5O_obj_ver_bounds"); + + /* Close the group */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Delete the group */ + ret = H5Ldelete(fid, GRP_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + } /* end if */ + } /* end for */ + } /* end for */ + + /* Close the file access property list */ + ret = H5Pclose(new_fapl); + CHECK(ret, FAIL, "H5Pclose"); + +} /* end test_libver_bounds_obj() */ + +/**************************************************************** +** +** test_libver_bounds_dataset(): +** Verify message versions associated with datasets: +** +** (a) Create a file with default fcpl and the input fapl. +** Create the following two datasets: +** --A contiguous dataset +** --A chunked dataset with "no filter edge chunks" +** For both datasets, verify the versions for the layout, +** fill value and filter pipeline messages. +** Close the file. +** +** (b) Create a new fapl that is set to the 5 pairs of low/high +** bounds in a "for" loop. For each pair of setting in the +** new fapl: +** --Open the same file in (a) with the fapl +** --Create a chunked dataset with 2 unlimited +** dimensions +** --Verify the versions for the layout, fill value +** and filter pipeline messages +** --Close and delete the dataset +** --Close the file +** +****************************************************************/ +static void +test_libver_bounds_dataset(hid_t fapl) +{ + hid_t fid = H5I_INVALID_HID; /* File ID */ + hid_t new_fapl = H5I_INVALID_HID; /* File access property list */ + hid_t did = H5I_INVALID_HID; /* Dataset ID */ + hid_t sid = H5I_INVALID_HID; /* Dataspace ID */ + hid_t dcpl = H5I_INVALID_HID; /* Dataset creation property list */ + H5D_t *dset = NULL; /* Internal dataset pointer */ + H5F_t *f = NULL; /* Internal file pointer */ + H5F_libver_t low, high; /* Low and high bounds */ + herr_t ret; /* Return value */ + hsize_t fix_dims2[2] = {10, 4}; /* Dimension sizes */ + hsize_t fix_chunks2[2] = {4, 3}; /* Chunk dimension sizes */ + hsize_t dims2[2] = {1, 4}; /* Dimension sizes */ + hsize_t max_dims2[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* Maximum dimension sizes */ + hsize_t chunks2[2] = {4, 5}; /* Chunk dimension sizes */ + + /* Retrieve the low/high bounds from the input fapl */ + ret = H5Pget_libver_bounds(fapl, &low, &high); + CHECK(ret, FAIL, "H5Pget_libver_bounds"); + + /* Create the file with the input fapl */ + fid = H5Fcreate(FILE8, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); + + /* Create the dataspace */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, H5I_INVALID_HID, "H5Screate"); + + /* Create a contiguous dataset */ + did = H5Dcreate2(fid, DSETA, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(did, H5I_INVALID_HID, "H5Dcreate"); + + /* Get the internal dataset pointer */ + dset = (H5D_t *)H5VL_object(did); + CHECK_PTR(dset, "H5VL_object"); + + /* Verify version for layout and fill value messages */ + if (low == H5F_LIBVER_EARLIEST) { + /* For layout message: the earliest version the library will set is 3 */ + /* For fill value message: the earliest version the library will set is 2 */ + VERIFY(dset->shared->layout.version, H5O_LAYOUT_VERSION_DEFAULT, "H5O_layout_ver_bounds"); + VERIFY(dset->shared->dcpl_cache.fill.version, H5O_FILL_VERSION_2, "H5O_fill_ver_bounds"); + } + else { + VERIFY(dset->shared->layout.version, H5O_layout_ver_bounds[low], "H5O_layout_ver_bounds"); + VERIFY(dset->shared->dcpl_cache.fill.version, H5O_fill_ver_bounds[low], "H5O_fill_ver_bounds"); + } + + /* Verify filter pipeline message version */ + VERIFY(dset->shared->dcpl_cache.pline.version, H5O_pline_ver_bounds[low], "H5O_pline_ver_bounds"); + + /* Close the dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close the dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Set up dataspace and dcpl for creating a chunked dataset + with "no filter edge chunks" enabled. + This will result in a version 4 layout message */ + sid = H5Screate_simple(2, fix_dims2, NULL); + CHECK(sid, H5I_INVALID_HID, "H5Screate_simple"); + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, H5I_INVALID_HID, "H5Pcreate"); + ret = H5Pset_chunk(dcpl, 2, fix_chunks2); + CHECK(ret, FAIL, "H5Pset_chunk"); + ret = H5Pset_chunk_opts(dcpl, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS); + CHECK(ret, FAIL, "H5Pset_chunk_opts"); + + /* Create the chunked dataset */ + H5E_BEGIN_TRY + { + did = H5Dcreate2(fid, DSETB, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + } + H5E_END_TRY; + + if (did >= 0) { + + /* Get the internal dataset pointer */ + dset = (H5D_t *)H5VL_object(did); + CHECK_PTR(dset, "H5VL_object"); + + /* Verify layout message version and chunk indexing type */ + VERIFY(dset->shared->layout.version, H5O_LAYOUT_VERSION_4, "H5O_layout_ver_bounds"); + VERIFY(dset->shared->layout.u.chunk.idx_type, H5D_CHUNK_IDX_FARRAY, "chunk_index_type"); + + /* Close the dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + } + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Close the dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close the dataset creation property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Create a default file access property list which is used + to open the file in the 'for' loop */ + new_fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(new_fapl, H5I_INVALID_HID, "H5Pcreate"); + + /* Set up dataspace and dcpl for creating a chunked dataset with + 2 unlimited dimensions in the 'for' loop */ + sid = H5Screate_simple(2, dims2, max_dims2); + CHECK(sid, H5I_INVALID_HID, "H5Screate_simple"); + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, H5I_INVALID_HID, "H5Pcreate"); + ret = H5Pset_chunk(dcpl, 2, chunks2); + CHECK(ret, FAIL, "H5Pset_chunk"); + + /* Loop through all the combinations of low/high bounds in new_fapl */ + /* Open the file with the fapl and create the chunked dataset */ + /* Verify the dataset's layout, fill value and filter pipeline message versions */ + for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) { + for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) { + H5E_BEGIN_TRY + { + ret = H5Pset_libver_bounds(new_fapl, low, high); + } + H5E_END_TRY; + + if (ret < 0) /* Invalid low/high combinations */ + continue; + + /* Open the file */ + H5E_BEGIN_TRY + { + fid = H5Fopen(FILE8, H5F_ACC_RDWR, new_fapl); + } + H5E_END_TRY; + + if (fid >= 0) { /* The file open succeeds */ + + /* Get the internal file pointer */ + f = (H5F_t *)H5VL_object(fid); + CHECK_PTR(f, "H5VL_object"); + + /* Create the chunked dataset */ + did = H5Dcreate2(fid, DSETC, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(did, H5I_INVALID_HID, "H5Dcreate2"); + + /* Get the internal file pointer */ + dset = (H5D_t *)H5VL_object(did); + CHECK_PTR(dset, "H5VL_object"); + + if (dset) { + /* Verify the dataset's layout, fill value and filter pipeline message versions */ + /* Also verify the chunk indexing type */ + if (f->shared->low_bound == H5F_LIBVER_EARLIEST) { + /* For layout message: the earliest version the library will set is 3 */ + /* For fill value message: the earliest version the library will set is 2 */ + VERIFY(dset->shared->layout.version, H5O_LAYOUT_VERSION_DEFAULT, + "H5O_layout_ver_bounds"); + VERIFY(dset->shared->dcpl_cache.fill.version, H5O_FILL_VERSION_2, + "H5O_fill_ver_bounds"); + } + else { + VERIFY(dset->shared->layout.version, H5O_layout_ver_bounds[f->shared->low_bound], + "H5O_layout_ver_bounds"); + VERIFY(dset->shared->dcpl_cache.fill.version, + H5O_fill_ver_bounds[f->shared->low_bound], "H5O_fill_ver_bounds"); + } + + /* Verify the filter pipeline message version */ + VERIFY(dset->shared->dcpl_cache.pline.version, H5O_pline_ver_bounds[f->shared->low_bound], + "H5O_pline_ver_bounds"); + + /* Verify the dataset's chunk indexing type */ + if (dset->shared->layout.version == H5O_LAYOUT_VERSION_LATEST) + VERIFY(dset->shared->layout.u.chunk.idx_type, H5D_CHUNK_IDX_BT2, "chunk_index_type"); + else + VERIFY(dset->shared->layout.u.chunk.idx_type, H5D_CHUNK_IDX_BTREE, + "chunk_index_type"); + } + + /* Close the dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Delete the dataset */ + ret = H5Ldelete(fid, DSETC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + } /* end if */ + } /* end for */ + } /* end for */ + + /* Close the file access property list */ + ret = H5Pclose(new_fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close the dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close the dataset creation property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + +} /* end test_libver_bounds_dataset() */ + +/**************************************************************** +** +** test_libver_bounds_dataspace(): +** Verify dataspace message versions: +** +** (a) Create a file with default fcpl and the input fapl. +** Create the following two datasets: +** --A dataset with scalar dataspace +** --A dataset with null dataspace +** For both datasets, verify the dataspace message versions. +** Close the file. +** +** (b) Create a new fapl that is set to the 5 pairs of low/high +** bounds in a "for" loop. For each pair of setting in the +** new fapl: +** --Open the same file in (a) with the fapl +** --Create a chunked dataset, a compact dataset and +** a contiguous dataset +** --Verify the dataspace message version for these +** three datasets +** --Delete the three datasets and the dataspaces +** --Close the file +** +****************************************************************/ +static void +test_libver_bounds_dataspace(hid_t fapl) +{ + hid_t fid = H5I_INVALID_HID; /* File ID */ + hid_t new_fapl = H5I_INVALID_HID; /* File access property list */ + hid_t did = H5I_INVALID_HID, did_null = H5I_INVALID_HID; /* Dataset IDs */ + hid_t did_compact = H5I_INVALID_HID, did_contig = H5I_INVALID_HID; /* Dataset IDs */ + hid_t sid = H5I_INVALID_HID, sid_null = H5I_INVALID_HID; /* Dataspace IDs */ + hid_t sid_compact = H5I_INVALID_HID, sid_contig = H5I_INVALID_HID; /* Dataspace IDs */ + hid_t dcpl = H5I_INVALID_HID; /* Dataset creation property list */ + hid_t dcpl_compact = H5I_INVALID_HID, dcpl_contig = H5I_INVALID_HID; /* Dataset creation property lists */ + H5S_t *space = NULL, *space_null = NULL; /* Internal dataspace pointers */ + H5F_t *f = NULL; /* Internal file pointer */ + H5F_libver_t low, high; /* Low and high bounds */ + hsize_t dims[1] = {1}; /* Dimension sizes */ + hsize_t dims2[2] = {5, 4}; /* Dimension sizes */ + hsize_t max_dims[1] = {H5S_UNLIMITED}; /* Maximum dimension sizes */ + hsize_t chunks[1] = {4}; /* Chunk dimension sizes */ + herr_t ret; /* Return value */ + + /* Retrieve the low/high bounds from the input fapl */ + ret = H5Pget_libver_bounds(fapl, &low, &high); + CHECK(ret, FAIL, "H5Pget_libver_bounds"); + + /* Create the file with the input fapl */ + fid = H5Fcreate(FILE8, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); + + /* Create scalar dataspace */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, H5I_INVALID_HID, "H5Screate"); + + /* Create a dataset with the scalar dataspace */ + did = H5Dcreate2(fid, DSET, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(did, H5I_INVALID_HID, "H5Dcreate"); + + /* Get the internal dataspace pointer */ + sid = H5Dget_space(did); + CHECK(sid, H5I_INVALID_HID, "H5Dget_space"); + space = (H5S_t *)H5I_object(sid); + CHECK_PTR(space, "H5I_object"); + + /* Verify the dataspace version */ + VERIFY(space->extent.version, H5O_sdspace_ver_bounds[low], "H5O_sdspace_ver_bounds"); + + /* Create null dataspace */ + sid_null = H5Screate(H5S_NULL); + CHECK(sid_null, H5I_INVALID_HID, "H5Screate"); + + /* Create a dataset with the null dataspace */ + did_null = H5Dcreate2(fid, DSET_NULL, H5T_NATIVE_INT, sid_null, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(did_null, H5I_INVALID_HID, "H5Dcreate"); + + /* Get the internal dataspace pointer */ + sid_null = H5Dget_space(did_null); + CHECK(sid_null, H5I_INVALID_HID, "H5Dget_space"); + space_null = (H5S_t *)H5I_object(sid_null); + CHECK_PTR(space_null, "H5I_object"); + + /* Verify the dataspace version */ + VERIFY(space_null->extent.version, H5O_SDSPACE_VERSION_2, "H5O_sdspace_ver_bounds"); + + /* Close the datasets */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(did_null); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close the dataspaces */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(sid_null); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Create a default file access property list which is used + to open the file in the 'for' loop */ + new_fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(new_fapl, H5I_INVALID_HID, "H5Pcreate"); + + /* Set up dataspace and dcpl for creating a chunked dataset */ + sid = H5Screate_simple(1, dims, max_dims); + CHECK(sid, H5I_INVALID_HID, "H5Screate_simple"); + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, H5I_INVALID_HID, "H5Pcreate"); + ret = H5Pset_chunk(dcpl, 1, chunks); + CHECK(ret, FAIL, "H5Pset_chunk"); + + /* Set up dataspace and dcpl for creating a compact dataset */ + sid_compact = H5Screate_simple(1, dims, NULL); + CHECK(sid_compact, H5I_INVALID_HID, "H5Screate_simple"); + dcpl_compact = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl_compact, H5I_INVALID_HID, "H5Pcreate"); + ret = H5Pset_layout(dcpl_compact, H5D_COMPACT); + CHECK(ret, FAIL, "H5Pset_layout"); + + /* Set up dataspace and dcpl for creating a contiguous dataset */ + sid_contig = H5Screate_simple(2, dims2, NULL); + CHECK(sid_contig, H5I_INVALID_HID, "H5Screate_simple"); + dcpl_contig = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl_contig, H5I_INVALID_HID, "H5Pcreate"); + ret = H5Pset_layout(dcpl_contig, H5D_CONTIGUOUS); + CHECK(ret, FAIL, "H5Pset_layout"); + + /* Loop through all the combinations of low/high bounds in new_fapl */ + /* Open the file and create the chunked/compact/contiguous datasets */ + /* Verify the dataspace message version for the three datasets */ + for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) { + for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) { + hid_t tmp_sid, tmp_sid_compact, tmp_sid_contig; /* Dataspace IDs */ + H5S_t *tmp_space, *tmp_space_compact, *tmp_space_contig; /* Internal dataspace pointers */ + + H5E_BEGIN_TRY + { + ret = H5Pset_libver_bounds(new_fapl, low, high); + } + H5E_END_TRY; + + if (ret < 0) /* Invalid low/high combinations */ + continue; + + /* Open the file */ + H5E_BEGIN_TRY + { + fid = H5Fopen(FILE8, H5F_ACC_RDWR, new_fapl); + } + H5E_END_TRY; + + if (fid >= 0) { /* The file open succeeds */ + + /* Get the internal file pointer */ + f = (H5F_t *)H5VL_object(fid); + CHECK_PTR(f, "H5VL_object"); + + /* Create the chunked dataset */ + did = H5Dcreate2(fid, DSETA, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(did, H5I_INVALID_HID, "H5Dcreate2"); + + /* Get the internal dataspace pointer for the chunked dataset */ + tmp_sid = H5Dget_space(did); + CHECK(tmp_sid, H5I_INVALID_HID, "H5Dget_space"); + tmp_space = (H5S_t *)H5I_object(tmp_sid); + CHECK_PTR(tmp_space, "H5I_object"); + + /* Create the compact dataset */ + did_compact = H5Dcreate2(fid, DSETB, H5T_NATIVE_INT, sid_compact, H5P_DEFAULT, dcpl_compact, + H5P_DEFAULT); + CHECK(did_compact, H5I_INVALID_HID, "H5Dcreate2"); + + /* Get the internal dataspace pointer for the compact dataset */ + tmp_sid_compact = H5Dget_space(did_compact); + CHECK(tmp_sid_compact, H5I_INVALID_HID, "H5Dget_space"); + tmp_space_compact = (H5S_t *)H5I_object(tmp_sid_compact); + CHECK_PTR(tmp_space_compact, "H5I_object"); + + /* Create the contiguous dataset */ + did_contig = + H5Dcreate2(fid, DSETC, H5T_NATIVE_INT, sid_contig, H5P_DEFAULT, dcpl_contig, H5P_DEFAULT); + CHECK(did_contig, H5I_INVALID_HID, "H5Dcreate2"); + + /* Get the internal dataspace pointer for the contiguous dataset */ + tmp_sid_contig = H5Dget_space(did_contig); + CHECK(tmp_sid_contig, H5I_INVALID_HID, "H5Dget_space"); + tmp_space_contig = (H5S_t *)H5I_object(tmp_sid_contig); + CHECK_PTR(tmp_space_contig, "H5I_object"); + + if (tmp_space) { + /* Verify versions for the three dataspaces */ + VERIFY(tmp_space->extent.version, H5O_sdspace_ver_bounds[f->shared->low_bound], + "H5O_sdspace_ver_bounds"); + } + if (tmp_space_compact) { + VERIFY(tmp_space_compact->extent.version, H5O_sdspace_ver_bounds[f->shared->low_bound], + "H5O_sdspace_ver_bounds"); + } + if (tmp_space_contig) { + VERIFY(tmp_space_contig->extent.version, H5O_sdspace_ver_bounds[f->shared->low_bound], + "H5O_sdspace_ver_bounds"); + } + + /* Close the three datasets */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(did_compact); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Dclose(did_contig); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close the three dataspaces */ + ret = H5Sclose(tmp_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(tmp_sid_compact); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(tmp_sid_contig); + CHECK(ret, FAIL, "H5Sclose"); + + /* Delete the three datasets */ + ret = H5Ldelete(fid, DSETA, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + ret = H5Ldelete(fid, DSETB, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + ret = H5Ldelete(fid, DSETC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + } /* end if */ + } /* end for */ + } /* end for */ + + /* Close the file access property list */ + ret = H5Pclose(new_fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close the three dataspaces */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(sid_compact); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(sid_contig); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close the three dataset creation property lists */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(dcpl_compact); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(dcpl_contig); + CHECK(ret, FAIL, "H5Pclose"); + +} /* end test_libver_bounds_dataspace() */ + +/**************************************************************** +** +** test_libver_bounds_datatype(): +** Verify the datatype message version: +** +** (a) Create the following datatypes: +** 1) integer +** 2) enum +** 3) array +** 4) compound +** 5) vlen +** (b) Call test_libver_bounds_datatype_check() for each +** datatype in (a) to verify the datatype message version. +** +****************************************************************/ +static void +test_libver_bounds_datatype(hid_t fapl) +{ + hid_t tid = H5I_INVALID_HID, tid_enum = H5I_INVALID_HID, tid_array = H5I_INVALID_HID; /* Datatype IDs */ + hid_t tid_compound = H5I_INVALID_HID, tid_vlen = H5I_INVALID_HID; /* Datatype IDs */ + int enum_value; /* Value for enum datatype */ + typedef struct s1 { /* Data structure for compound datatype */ + char c; + int i; + } s1; + hsize_t dims[1] = {1}; /* Dimension sizes */ + herr_t ret; /* Return value */ + + /* Create integer datatype */ + tid = H5Tcopy(H5T_NATIVE_INT); + + /* Verify datatype message version */ + test_libver_bounds_datatype_check(fapl, tid); + + /* Create enum datatype */ + tid_enum = H5Tenum_create(tid); + enum_value = 0; + H5Tenum_insert(tid_enum, "val1", &enum_value); + enum_value = 1; + H5Tenum_insert(tid_enum, "val2", &enum_value); + + /* Verify datatype message version */ + test_libver_bounds_datatype_check(fapl, tid_enum); + + /* Create array datatype */ + tid_array = H5Tarray_create2(tid, 1, dims); + + /* Verify datatype message version */ + test_libver_bounds_datatype_check(fapl, tid_array); + + /* Create compound datatype */ + tid_compound = H5Tcreate(H5T_COMPOUND, sizeof(s1)); + H5Tinsert(tid_compound, "c", HOFFSET(s1, c), H5T_STD_U8LE); + H5Tinsert(tid_compound, "i", HOFFSET(s1, i), H5T_NATIVE_INT); + + /* Verify datatype message version */ + test_libver_bounds_datatype_check(fapl, tid_compound); + + /* Create vlen datatype */ + tid_vlen = H5Tvlen_create(tid); + + /* Verify datatype message version */ + test_libver_bounds_datatype_check(fapl, tid_vlen); + + /* Close the datatypes */ + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + ret = H5Tclose(tid_enum); + CHECK(ret, FAIL, "H5Tclose"); + + ret = H5Tclose(tid_array); + CHECK(ret, FAIL, "H5Tclose"); + + ret = H5Tclose(tid_compound); + CHECK(ret, FAIL, "H5Tclose"); + + ret = H5Tclose(tid_vlen); + CHECK(ret, FAIL, "H5Tclose"); + +} /* end test_libver_bounds_datatype() */ + +/**************************************************************** +** +** test_libver_bounds_datatype_check(): +** Helper routine called by test_libver_bounds_datatype() +** to verify the datatype message version for the input tid: +** +** (a) Create a file with default fcpl and the input fapl. +** Create a contiguous dataset with the input tid. +** Verify the datatype message version. +** Create a committed datatype of string to be +** used later. +** Close the file. +** +** (b) Create a new fapl that is set to the 5 pairs of low/high +** bounds in a "for" loop. For each pair of setting in +** the new fapl: +** --Open the same file in (a) with the fapl +** --Verify the message version for the committed +** datatype created earlier +** --Create a chunked dataset with the input tid +** --Verify the datatype message version +** --Close and delete the dataset +** --Close the file +** +****************************************************************/ +static void +test_libver_bounds_datatype_check(hid_t fapl, hid_t tid) +{ + hid_t fid = H5I_INVALID_HID; /* File ID */ + hid_t new_fapl = H5I_INVALID_HID; /* File access property list */ + hid_t dcpl = H5I_INVALID_HID; /* Dataset creation property list */ + hid_t dtid = H5I_INVALID_HID; /* Datatype ID for the dataset */ + hid_t str_tid = H5I_INVALID_HID; /* String datatype ID */ + hid_t did = H5I_INVALID_HID; /* Dataset ID */ + hid_t sid = H5I_INVALID_HID; /* Dataspace ID */ + hsize_t dims[1] = {1}; /* Dimension sizes */ + hsize_t dims2[2] = {5, 4}; /* Dimension sizes */ + hsize_t max_dims2[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* Maximum dimension sizes */ + hsize_t chunks[2] = {2, 3}; /* Chunk dimension sizes */ + H5T_t *dtype = NULL; /* Internal datatype pointer */ + H5T_t *str_dtype = NULL; /* Internal datatype pointer for the string datatype */ + H5F_t *f = NULL; /* Internal file pointer */ + H5F_libver_t low, high; /* Low and high bounds */ + herr_t ret; /* Return value */ + + /* Retrieve the low/high version bounds from the input fapl */ + ret = H5Pget_libver_bounds(fapl, &low, &high); + CHECK(ret, FAIL, "H5Pget_libver_bounds"); + + /* Create the file with the input fapl */ + fid = H5Fcreate(FILE8, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); + + /* Create a committed datatype of string which will be used + later inside the 'for' loop */ + str_tid = H5Tcopy(H5T_C_S1); + CHECK(str_tid, H5I_INVALID_HID, "H5Tcopy"); + ret = H5Tset_size(str_tid, (size_t)10); + CHECK(ret, FAIL, "H5Tset_size"); + ret = H5Tcommit2(fid, "datatype", str_tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + ret = H5Tclose(str_tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Create dataspace */ + sid = H5Screate_simple(1, dims, NULL); + CHECK(sid, H5I_INVALID_HID, "H5Screate_simple"); + + /* Create a dataset with the input tid */ + did = H5Dcreate2(fid, DSET1, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(did, H5I_INVALID_HID, "H5Dcreate2"); + + /* Get the dataset's datatype */ + dtid = H5Dget_type(did); + CHECK(dtid, H5I_INVALID_HID, "H5Dget_type"); + + /* Get the internal datatype pointer */ + dtype = (H5T_t *)H5I_object(dtid); + CHECK_PTR(dtype, "H5I_object"); + + /* Verify the datatype message version */ + /* H5T_COMPOUND, H5T_ENUM, H5T_ARRAY: + * --the library will set version according to low_bound + * --H5T_ARRAY: the earliest version the library will set is 2 + * H5T_INTEGER, H5T_FLOAT, H5T_TIME, H5T_STRING, H5T_BITFIELD, H5T_OPAQUE, H5T_REFERENCE: + * --the library will only use basic version + */ + + if (dtype->shared->type == H5T_COMPOUND || dtype->shared->type == H5T_ENUM || + dtype->shared->type == H5T_ARRAY) { + if (dtype->shared->type == H5T_ARRAY && low == H5F_LIBVER_EARLIEST) + VERIFY(dtype->shared->version, H5O_DTYPE_VERSION_2, "H5O_dtype_ver_bounds"); + else + VERIFY(dtype->shared->version, H5O_dtype_ver_bounds[low], "H5O_dtype_ver_bounds"); + } + else + VERIFY(dtype->shared->version, H5O_dtype_ver_bounds[H5F_LIBVER_EARLIEST], "H5O_dtype_ver_bounds"); + + /* Close the dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close the dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close the datatype */ + ret = H5Tclose(dtid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Create a default file access property list */ + new_fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(new_fapl, H5I_INVALID_HID, "H5Pcreate"); + + /* Set up dataspace and dcpl for creating a chunked dataset */ + sid = H5Screate_simple(2, dims2, max_dims2); + CHECK(sid, H5I_INVALID_HID, "H5Screate_simple"); + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, H5I_INVALID_HID, "H5Pcreate"); + ret = H5Pset_chunk(dcpl, 2, chunks); + CHECK(ret, FAIL, "H5Pset_chunk"); + + /* Loop through all the combinations of low/high bounds */ + /* Open the file and create the chunked dataset with the input tid */ + /* Verify the dataset's datatype message version */ + /* Also verify the committed atatype message version */ + for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) { + for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) { + H5E_BEGIN_TRY + { + ret = H5Pset_libver_bounds(new_fapl, low, high); + } + H5E_END_TRY; + + if (ret < 0) /* Invalid low/high combinations */ + continue; + + /* Open the file */ + H5E_BEGIN_TRY + { + fid = H5Fopen(FILE8, H5F_ACC_RDWR, new_fapl); + } + H5E_END_TRY; + + if (fid >= 0) { /* The file open succeeds */ + + /* Get the internal file pointer */ + f = (H5F_t *)H5VL_object(fid); + CHECK_PTR(f, "H5VL_object"); + + /* Open the committed datatype */ + str_tid = H5Topen2(fid, "datatype", H5P_DEFAULT); + CHECK(str_tid, FAIL, "H5Topen2"); + str_dtype = (H5T_t *)H5VL_object(str_tid); + CHECK_PTR(str_dtype, "H5VL_object"); + + /* Verify the committed datatype message version */ + VERIFY(str_dtype->shared->version, H5O_dtype_ver_bounds[H5F_LIBVER_EARLIEST], + "H5O_dtype_ver_bounds"); + + /* Close the committed datatype */ + ret = H5Tclose(str_tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Create the chunked dataset */ + did = H5Dcreate2(fid, DSETNAME, tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(did, H5I_INVALID_HID, "H5Dcreate2"); + + /* Get the dataset's datatype */ + dtid = H5Dget_type(did); + CHECK(dtid, H5I_INVALID_HID, "H5Dget_type"); + + /* Get the internal datatype pointer */ + dtype = (H5T_t *)H5I_object(dtid); + CHECK_PTR(dtype, "H5I_object"); + + if (dtype) { + /* Verify the dataset's datatype message version */ + /* H5T_COMPOUND, H5T_ENUM, H5T_ARRAY: + * --the library will set version according to low_bound + * --H5T_ARRAY: the earliest version the library will set is 2 + * H5T_INTEGER, H5T_FLOAT, H5T_TIME, H5T_STRING, H5T_BITFIELD, H5T_OPAQUE, H5T_REFERENCE: + * --the library will only use basic version + */ + if (dtype->shared->type == H5T_COMPOUND || dtype->shared->type == H5T_ENUM || + dtype->shared->type == H5T_ARRAY) { + if (dtype->shared->type == H5T_ARRAY && f->shared->low_bound == H5F_LIBVER_EARLIEST) + VERIFY(dtype->shared->version, H5O_DTYPE_VERSION_2, "H5O_dtype_ver_bounds"); + else + VERIFY(dtype->shared->version, H5O_dtype_ver_bounds[f->shared->low_bound], + "H5O_dtype_ver_bounds"); + } + else + VERIFY(dtype->shared->version, H5O_dtype_ver_bounds[H5F_LIBVER_EARLIEST], + "H5O_dtype_ver_bounds"); + } + + /* Close the dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close the dataset's datatype */ + ret = H5Tclose(dtid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Delete the dataset */ + ret = H5Ldelete(fid, DSETNAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + } /* end if */ + } /* end for */ + } /* end for */ + + /* Close the file access property list */ + ret = H5Pclose(new_fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close the dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close the dataset creation property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + +} /* end test_libver_bounds_datatype_check() */ + +/**************************************************************** +** +** test_libver_bounds_attributes(): +** Verify the attribute message versions: +** +** (a) Create a file with default fcpl and the input fapl. +** Create a group and attach the following three attributes +** to the group: +** (1) Attribute with a committed datatype +** (2) Attribute with integer type +** (3) Attribute with character encoding set +** Verify the three attributes' message versions. +** Close the file. +** +** (b) Create a fcpl that has shared datatype message enabled. +** Create a file with the fcpl and the input fapl. +** Create a group and attach an attribute with shared +** integer type to the group. +** Verify the attribute message version. +** Close the file +** +** (b) Create a new fapl that is set to the 5 pairs of low/high +** bounds in a "for" loop. For each pair of setting in +** the new fapl: +** --Open the same file in (b) with the fapl +** --Open the group and attach an attribute with integer +** type to the group +** --Verify the attribute message version +** --Delete the attribute +** --Close the group and the file +** +****************************************************************/ +static void +test_libver_bounds_attributes(hid_t fapl) +{ + hid_t fid = H5I_INVALID_HID; /* File ID */ + hid_t fcpl = H5I_INVALID_HID; /* File creation property list */ + hid_t new_fapl = H5I_INVALID_HID; /* File access property list */ + hid_t tid = H5I_INVALID_HID; /* Datatype ID */ + hid_t gid = H5I_INVALID_HID; /* Group ID */ + hid_t sid = H5I_INVALID_HID; /* Dataspace ID */ + hid_t aid = H5I_INVALID_HID; /* Attribute ID */ + hid_t attr_cpl = H5I_INVALID_HID; /* Attribute creation property list */ + H5A_t *attr = NULL; /* Internal attribute pointer */ + H5F_t *f = NULL; /* Internal file pointer */ + H5F_libver_t low, high; /* Low and high bounds */ + herr_t ret; /* Return value */ + + /* Retrieve the low/high bounds from the input fapl */ + ret = H5Pget_libver_bounds(fapl, &low, &high); + CHECK(ret, FAIL, "H5Pget_libver_bounds"); + + /* Create the file */ + fid = H5Fcreate(FILE8, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); + + /* Integer datatype */ + tid = H5Tcopy(H5T_NATIVE_INT); + CHECK(tid, H5I_INVALID_HID, "H5Tcopy"); + + /* Create a committed datatype */ + ret = H5Tcommit2(fid, "datatype", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + /* Create dataspace */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, H5I_INVALID_HID, "H5Screate"); + + /* Create a group */ + gid = H5Gcreate2(fid, GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid, H5I_INVALID_HID, "H5Gcreate2"); + + /* Attach an attribute to the group with the committed datatype */ + aid = H5Acreate2(gid, "attr1", tid, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, H5I_INVALID_HID, "H5Acreate2"); + + /* Get the internal attribute pointer */ + attr = (H5A_t *)H5VL_object(aid); + CHECK_PTR(attr, "H5VL_object"); + + /* Verify the attribute version */ + if (low == H5F_LIBVER_EARLIEST) + /* The earliest version the library can set for an attribute with committed datatype is 2 */ + VERIFY(attr->shared->version, H5O_ATTR_VERSION_2, "H5O_attr_ver_bounds"); + else + VERIFY(attr->shared->version, H5O_attr_ver_bounds[low], "H5O_attr_ver_bounds"); + + /* Close the attribute */ + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + /* Create an attribute to the group with integer type */ + aid = H5Acreate2(gid, "attr2", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + + /* Get the internal attribute pointer */ + attr = (H5A_t *)H5VL_object(aid); + CHECK_PTR(attr, "H5VL_object"); + + /* Verify attribute version */ + VERIFY(attr->shared->version, H5O_attr_ver_bounds[low], "H5O_attr_ver_bounds"); + + /* Close the attribute */ + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + /* Enable character encoding in attribute creation property list */ + attr_cpl = H5Pcreate(H5P_ATTRIBUTE_CREATE); + CHECK(attr_cpl, H5I_INVALID_HID, "H5Pcreate"); + ret = H5Pset_char_encoding(attr_cpl, H5T_CSET_UTF8); + CHECK(ret, FAIL, "H5Pset_char_encoding"); + + /* Attach an attribute to the group with character encoding set */ + aid = H5Acreate2(gid, "attr3", H5T_NATIVE_INT, sid, attr_cpl, H5P_DEFAULT); + CHECK(aid, H5I_INVALID_HID, "H5Acreate2"); + + /* Get internal attribute pointer */ + attr = (H5A_t *)H5VL_object(aid); + CHECK_PTR(attr, "H5VL_object"); + + /* Verify attribute version */ + if (low == H5F_LIBVER_EARLIEST) + /* The earliest version the library can set for an attribute with character encoding is 3 */ + VERIFY(attr->shared->version, H5O_ATTR_VERSION_3, "H5O_attr_ver_bounds"); + else + VERIFY(attr->shared->version, H5O_attr_ver_bounds[low], "H5O_attr_ver_bounds"); + + /* Close the attribute */ + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close the attribute creation property list */ + ret = H5Pclose(attr_cpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close the group */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close the dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close the datatype */ + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Create a copy of the file creation property list */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, H5I_INVALID_HID, "H5Pcreate"); + + /* Enable shared datatype message */ + ret = H5Pset_shared_mesg_nindexes(fcpl, 1); + CHECK(ret, FAIL, "H5Pset_shared_mesg_nindexes"); + ret = H5Pset_shared_mesg_index(fcpl, 0, H5O_SHMESG_DTYPE_FLAG, 2); + CHECK(ret, FAIL, "H5Pset_shared_mesg_index"); + + /* Create the file with shared datatype message enabled */ + fid = H5Fcreate(FILE8, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); + + /* Create an integer datatype */ + tid = H5Tcopy(H5T_NATIVE_INT); + CHECK(tid, H5I_INVALID_HID, "H5Tcopy"); + + /* Create dataspace */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, H5I_INVALID_HID, "H5Screate"); + + /* Create a group */ + gid = H5Gcreate2(fid, GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid, H5I_INVALID_HID, "H5Gcreate2"); + + /* Attach an attribute to the group with shared integer datatype */ + aid = H5Acreate2(gid, ATTR_NAME, tid, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, H5I_INVALID_HID, "H5Acreate2"); + + /* Get the internal attribute pointer */ + attr = (H5A_t *)H5VL_object(aid); + CHECK_PTR(attr, "H5VL_object"); + + /* Verify the attribute version */ + if (low == H5F_LIBVER_EARLIEST) + /* The earliest version the library can set for an attribute with shared datatype is 2 */ + VERIFY(attr->shared->version, H5O_ATTR_VERSION_2, "H5O_attr_ver_bounds"); + else + VERIFY(attr->shared->version, H5O_attr_ver_bounds[low], "H5O_attr_ver_bounds"); + + /* Close the attribute */ + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close the group */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close the dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close the datatype */ + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Create a default file access property list */ + new_fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(new_fapl, FAIL, "H5Pcreate"); + + /* Create a scalar dataspace to be used later for the attribute */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, H5I_INVALID_HID, "H5Screate"); + + /* Loop through all the combinations of low/high bounds */ + /* Open the file and group and attach an attribute to the group */ + /* Verify the attribute version */ + for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) { + for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) { + H5E_BEGIN_TRY + { + ret = H5Pset_libver_bounds(new_fapl, low, high); + } + H5E_END_TRY; + + if (ret < 0) /* Invalid low/high combinations */ + continue; + + /* Open the file */ + H5E_BEGIN_TRY + { + fid = H5Fopen(FILE8, H5F_ACC_RDWR, new_fapl); + } + H5E_END_TRY; + + if (fid >= 0) { /* The file open succeeds */ + + /* Get the internal file pointer */ + f = (H5F_t *)H5VL_object(fid); + CHECK_PTR(f, "H5VL_object"); + + /* Open the group */ + gid = H5Gopen2(fid, GRP_NAME, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gopen2"); + + /* Attach an attribute to the group */ + aid = H5Acreate2(gid, "attr1", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + + /* Get the internal attribute pointer */ + attr = (H5A_t *)H5VL_object(aid); + CHECK_PTR(attr, "H5VL_object"); + + /* Verify the attribute message version */ + VERIFY(attr->shared->version, H5O_attr_ver_bounds[f->shared->low_bound], + "H5O_attr_ver_bounds"); + + /* Close the attribute */ + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + /* Delete the attribute */ + ret = H5Adelete(gid, "attr1"); + CHECK(ret, FAIL, "H5Adelete"); + + /* Close the group */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + } /* end if */ + } /* end for */ + } /* end for */ + + /* Close the file access property list */ + ret = H5Pclose(new_fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close the dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + +} /* end test_libver_bounds_attributes() */ + +/**************************************************************** +** +** test_libver_macros(): +** Verify that H5_VERSION_GE and H5_VERSION_LE work correactly. +** +****************************************************************/ +static void +test_libver_macros(void) +{ + int major = H5_VERS_MAJOR; + int minor = H5_VERS_MINOR; + int release = H5_VERS_RELEASE; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing macros for library version comparison\n")); + + VERIFY(H5_VERSION_GE(major, minor, release), TRUE, "H5_VERSION_GE"); + VERIFY(H5_VERSION_GE(major - 1, minor, release), TRUE, "H5_VERSION_GE"); + VERIFY(H5_VERSION_GE(major - 1, minor + 1, release), TRUE, "H5_VERSION_GE"); + VERIFY(H5_VERSION_GE(major - 1, minor, release + 1), TRUE, "H5_VERSION_GE"); + VERIFY(H5_VERSION_GE(major, minor - 1, release), TRUE, "H5_VERSION_GE"); + VERIFY(H5_VERSION_GE(major, minor - 1, release + 1), TRUE, "H5_VERSION_GE"); + if (H5_VERS_RELEASE > 0) + VERIFY(H5_VERSION_GE(major, minor, release - 1), TRUE, "H5_VERSION_GE"); + + VERIFY(H5_VERSION_GE(major + 1, minor, release), FALSE, "H5_VERSION_GE"); + VERIFY(H5_VERSION_GE(major + 1, minor - 1, release), FALSE, "H5_VERSION_GE"); + VERIFY(H5_VERSION_GE(major + 1, minor - 1, release - 1), FALSE, "H5_VERSION_GE"); + VERIFY(H5_VERSION_GE(major, minor + 1, release), FALSE, "H5_VERSION_GE"); + VERIFY(H5_VERSION_GE(major, minor + 1, release - 1), FALSE, "H5_VERSION_GE"); + VERIFY(H5_VERSION_GE(major, minor, release + 1), FALSE, "H5_VERSION_GE"); + + VERIFY(H5_VERSION_LE(major, minor, release), TRUE, "H5_VERSION_LE"); + VERIFY(H5_VERSION_LE(major + 1, minor, release), TRUE, "H5_VERSION_LE"); + VERIFY(H5_VERSION_LE(major + 1, minor - 1, release), TRUE, "H5_VERSION_LE"); + VERIFY(H5_VERSION_LE(major + 1, minor - 1, release - 1), TRUE, "H5_VERSION_LE"); + VERIFY(H5_VERSION_LE(major, minor + 1, release), TRUE, "H5_VERSION_LE"); + VERIFY(H5_VERSION_LE(major, minor + 1, release - 1), TRUE, "H5_VERSION_LE"); + VERIFY(H5_VERSION_LE(major, minor, release + 1), TRUE, "H5_VERSION_LE"); + + VERIFY(H5_VERSION_LE(major - 1, minor, release), FALSE, "H5_VERSION_LE"); + VERIFY(H5_VERSION_LE(major - 1, minor + 1, release), FALSE, "H5_VERSION_LE"); + VERIFY(H5_VERSION_LE(major - 1, minor + 1, release + 1), FALSE, "H5_VERSION_LE"); + VERIFY(H5_VERSION_LE(major, minor - 1, release), FALSE, "H5_VERSION_LE"); + VERIFY(H5_VERSION_LE(major, minor - 1, release + 1), FALSE, "H5_VERSION_LE"); + if (H5_VERS_RELEASE > 0) + VERIFY(H5_VERSION_LE(major, minor, release - 1), FALSE, "H5_VERSION_LE"); +} /* test_libver_macros() */ + +/**************************************************************** +** +** test_libver_macros2(): +** Verify that H5_VERSION_GE works correactly and show how +** to use it. +** +****************************************************************/ +static void +test_libver_macros2(void) +{ + hid_t file; + hid_t grp; + htri_t status; + herr_t ret; /* Return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing macros for library version comparison with a file\n")); + + /* + * Create a file. + */ + file = H5Fcreate(FILE6, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file, FAIL, "H5Fcreate"); + + /* + * Create a group in the file. + */ + grp = H5Gcreate2(file, "Group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file, FAIL, "H5Gcreate"); + + /* + * Close the group + */ + ret = H5Gclose(grp); + CHECK(ret, FAIL, "H5Gclose"); + + /* + * Delete the group using different function based on the library version. + * And verify the action. + */ +#if H5_VERSION_GE(1, 8, 0) + ret = H5Ldelete(file, "Group", H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lunlink"); + + status = H5Lexists(file, "Group", H5P_DEFAULT); + VERIFY(status, FALSE, "H5Lexists"); +#else + ret = H5Gunlink(file, "Group"); + CHECK(ret, FAIL, "H5Gunlink"); + + H5E_BEGIN_TRY + { + grp = H5Gopen(file, "Group"); + } + H5E_END_TRY; + VERIFY(grp, FAIL, "H5Gopen"); +#endif + + /* + * Close the file. + */ + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + +} /* test_libver_macros2() */ +#endif + +#if 0 +/**************************************************************** +** +** test_filesize(): +** Verify H5Fincrement_filesize() and H5Fget_eoa() works as +** indicated in the "RFC: Enhancement to the tool h5clear". +** +****************************************************************/ +static void +test_incr_filesize(void) +{ + hid_t fid; /* File opened with read-write permission */ + h5_stat_size_t filesize; /* Size of file when empty */ + hid_t fcpl; /* File creation property list */ + hid_t fapl; /* File access property list */ + hid_t dspace; /* Dataspace ID */ + hid_t dset; /* Dataset ID */ + hid_t dcpl; /* Dataset creation property list */ + unsigned u; /* Local index variable */ + char filename[FILENAME_LEN]; /* Filename to use */ + char name[32]; /* Dataset name */ + haddr_t stored_eoa; /* The stored EOA value */ + hid_t driver_id = -1; /* ID for this VFD */ + unsigned long driver_flags = 0; /* VFD feature flags */ + herr_t ret; /* Return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing H5Fincrement_filesize() and H5Fget_eoa())\n")); + + fapl = h5_fileaccess(); + h5_fixname(FILE8, fapl, filename, sizeof filename); + + /* Get the VFD feature flags */ + driver_id = H5Pget_driver(fapl); + CHECK(driver_id, FAIL, "H5Pget_driver"); + + ret = H5FDdriver_query(driver_id, &driver_flags); + CHECK(ret, FAIL, "H5PDdriver_query"); + + /* Check whether the VFD feature flag supports these two public routines */ + if (driver_flags & H5FD_FEAT_SUPPORTS_SWMR_IO) { + + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); + + /* Set file space strategy */ + ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, FALSE, (hsize_t)1); + CHECK(ret, FAIL, "H5P_set_file_space_strategy"); + + /* Create the test file */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create dataspace for datasets */ + dspace = H5Screate(H5S_SCALAR); + CHECK(dspace, FAIL, "H5Screate"); + + /* Create a dataset creation property list */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + + /* Set the space allocation time to early */ + ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY); + CHECK(ret, FAIL, "H5Pset_alloc_time"); + + /* Create datasets in file */ + for (u = 0; u < 10; u++) { + HDsnprintf(name, sizeof(name), "Dataset %u", u); + dset = H5Dcreate2(fid, name, H5T_STD_U32LE, dspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset, FAIL, "H5Dcreate2"); + + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + } /* end for */ + + /* Close dataspace */ + ret = H5Sclose(dspace); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close dataset creation property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Get the file size */ + filesize = h5_get_file_size(filename, fapl); + + /* Open the file */ + fid = H5Fopen(filename, H5F_ACC_RDWR, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Get the stored EOA */ + ret = H5Fget_eoa(fid, &stored_eoa); + CHECK(ret, FAIL, "H5Fget_eoa"); + + /* Verify the stored EOA is the same as filesize */ + VERIFY(filesize, stored_eoa, "file size"); + + /* Set the EOA to the MAX(EOA, EOF) + 512 */ + ret = H5Fincrement_filesize(fid, 512); + CHECK(ret, FAIL, "H5Fincrement_filesize"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Get the file size */ + filesize = h5_get_file_size(filename, fapl); + + /* Verify the filesize is the previous stored_eoa + 512 */ + VERIFY(filesize, stored_eoa + 512, "file size"); + + /* Close the file access property list */ + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close the file creation property list */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + } +} /* end test_incr_filesize() */ +#endif + +/**************************************************************** +** +** test_min_dset_ohdr(): +** Test API calls to toggle dataset object header minimization. +** +** TODO (as separate function?): +** + setting persists between close and (re)open? +** + dataset header sizes created while changing value of toggle +** +****************************************************************/ +#if 0 +static void +test_min_dset_ohdr(void) +{ + const char basename[] = "min_dset_ohdr_testfile"; + char filename[FILENAME_LEN] = ""; + hid_t file_id = -1; + hid_t file2_id = -1; + hbool_t minimize; + herr_t ret; + + MESSAGE(5, ("Testing dataset object header minimization\n")); + + /*********/ + /* SETUP */ + /*********/ + + h5_fixname(basename, H5P_DEFAULT, filename, sizeof(filename)); + + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK_I(file_id, "H5Fcreate"); + + /*********/ + /* TESTS */ + /*********/ + + /*---------------------------------------- + * TEST default value + */ + ret = H5Fget_dset_no_attrs_hint(file_id, &minimize); + CHECK(ret, FAIL, "H5Fget_dset_no_attrs_hint"); + VERIFY(minimize, FALSE, "minimize flag"); + + /*---------------------------------------- + * TEST set to TRUE + */ + ret = H5Fset_dset_no_attrs_hint(file_id, TRUE); + CHECK(ret, FAIL, "H5Fset_dset_no_attrs_hint"); + + ret = H5Fget_dset_no_attrs_hint(file_id, &minimize); + CHECK(ret, FAIL, "H5Fget_dset_no_attrs_hint"); + VERIFY(minimize, TRUE, "minimize flag"); + + /*---------------------------------------- + * TEST second file open on same filename + */ + file2_id = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK_I(file2_id, "H5Fopen"); + + /* verify TRUE setting on second open + */ + ret = H5Fget_dset_no_attrs_hint(file_id, &minimize); + CHECK(ret, FAIL, "H5Fget_dset_no_attrs_hint"); + VERIFY(minimize, TRUE, "minimize flag"); + + /* re-set to FALSE on first open + */ + ret = H5Fset_dset_no_attrs_hint(file_id, FALSE); + CHECK(ret, FAIL, "H5Fset_dset_no_attrs_hint"); + + /* verify FALSE set on both opens + */ + ret = H5Fget_dset_no_attrs_hint(file_id, &minimize); + CHECK(ret, FAIL, "H5Fget_dset_no_attrs_hint"); + VERIFY(minimize, FALSE, "minimize flag"); + + ret = H5Fget_dset_no_attrs_hint(file2_id, &minimize); + CHECK(ret, FAIL, "H5Fget_dset_no_attrs_hint"); + VERIFY(minimize, FALSE, "minimize flag"); + + /* re-set to TRUE on second open + */ + ret = H5Fset_dset_no_attrs_hint(file2_id, TRUE); + CHECK(ret, FAIL, "H5Fset_dset_no_attrs_hint"); + + /* verify TRUE set on both opens + */ + ret = H5Fget_dset_no_attrs_hint(file_id, &minimize); + CHECK(ret, FAIL, "H5Fget_dset_no_attrs_hint"); + VERIFY(minimize, TRUE, "minimize flag"); + + ret = H5Fget_dset_no_attrs_hint(file2_id, &minimize); + CHECK(ret, FAIL, "H5Fget_dset_no_attrs_hint"); + VERIFY(minimize, TRUE, "minimize flag"); + + /*---------------------------------------- + * TEST error cases + */ + + /* trying to set with invalid file ID */ + H5E_BEGIN_TRY + { + ret = H5Fset_dset_no_attrs_hint(-1, TRUE); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Fset_dset_no_attrs_hint"); + + /* trying to get with invalid file ID */ + H5E_BEGIN_TRY + { + ret = H5Fget_dset_no_attrs_hint(-1, &minimize); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Fget_dset_no_attrs_hint"); + + /* trying to get with invalid pointer */ + H5E_BEGIN_TRY + { + ret = H5Fget_dset_no_attrs_hint(file_id, NULL); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Fget_dset_no_attrs_hint"); + + /************/ + /* TEARDOWN */ + /************/ + + ret = H5Fclose(file_id); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Fclose(file2_id); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_min_dset_ohdr() */ +#endif + +/**************************************************************** +** +** test_deprec(): +** Test deprecated functionality. +** +****************************************************************/ +#if 0 +#ifndef H5_NO_DEPRECATED_SYMBOLS +static void +test_deprec(const char *env_h5_drvr) +{ + hid_t file; /* File IDs for old & new files */ + hid_t fcpl; /* File creation property list */ + hid_t fapl; /* File creation property list */ + hid_t new_fapl; + hsize_t align; + unsigned super; /* Superblock version # */ + unsigned freelist; /* Free list version # */ + unsigned stab; /* Symbol table entry version # */ + unsigned shhdr; /* Shared object header version # */ + H5F_info1_t finfo; /* global information about file */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing deprecated routines\n")); + + /* Creating a file with the default file creation property list should + * create a version 0 superblock + */ + + /* Create file with default file creation property list */ + file = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file, FAIL, "H5Fcreate"); + + /* Get the file's version information */ + ret = H5Fget_info1(file, &finfo); + CHECK(ret, FAIL, "H5Fget_info1"); + VERIFY(finfo.super_ext_size, 0, "H5Fget_info1"); + VERIFY(finfo.sohm.hdr_size, 0, "H5Fget_info1"); + VERIFY(finfo.sohm.msgs_info.index_size, 0, "H5Fget_info1"); + VERIFY(finfo.sohm.msgs_info.heap_size, 0, "H5Fget_info1"); + + /* Get the file's dataset creation property list */ + fcpl = H5Fget_create_plist(file); + CHECK(fcpl, FAIL, "H5Fget_create_plist"); + + /* Get the file's version information */ + ret = H5Pget_version(fcpl, &super, &freelist, &stab, &shhdr); + CHECK(ret, FAIL, "H5Pget_version"); + VERIFY(super, 0, "H5Pget_version"); + VERIFY(freelist, 0, "H5Pget_version"); + VERIFY(stab, 0, "H5Pget_version"); + VERIFY(shhdr, 0, "H5Pget_version"); + + /* Close FCPL */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + + /* Only run this part of the test with the sec2/default driver */ + if (h5_using_default_driver(env_h5_drvr)) { + /* Create a file creation property list */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); + + /* Set a property in the FCPL that will push the superblock version up */ + ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 1, (hsize_t)0); + ret = H5Pset_file_space_page_size(fcpl, (hsize_t)512); + CHECK(ret, FAIL, "H5Pset_file_space_strategy"); + + fapl = H5Pcreate(H5P_FILE_ACCESS); + ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)1024); + CHECK(ret, FAIL, "H5Pset_alignment"); + + /* Creating a file with the non-default file creation property list should + * create a version 2 superblock + */ + + /* Create file with custom file creation property list */ + file = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(file, FAIL, "H5Fcreate"); + + new_fapl = H5Fget_access_plist(file); + H5Pget_alignment(new_fapl, NULL, &align); + + /* Close FCPL */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Get the file's version information */ + ret = H5Fget_info1(file, &finfo); + CHECK(ret, FAIL, "H5Fget_info1"); + VERIFY(finfo.super_ext_size, 152, "H5Fget_info1"); + VERIFY(finfo.sohm.hdr_size, 0, "H5Fget_info1"); + VERIFY(finfo.sohm.msgs_info.index_size, 0, "H5Fget_info1"); + VERIFY(finfo.sohm.msgs_info.heap_size, 0, "H5Fget_info1"); + + /* Get the file's dataset creation property list */ + fcpl = H5Fget_create_plist(file); + CHECK(fcpl, FAIL, "H5Fget_create_plist"); + + /* Get the file's version information */ + ret = H5Pget_version(fcpl, &super, &freelist, &stab, &shhdr); + CHECK(ret, FAIL, "H5Pget_version"); + VERIFY(super, 2, "H5Pget_version"); + VERIFY(freelist, 0, "H5Pget_version"); + VERIFY(stab, 0, "H5Pget_version"); + VERIFY(shhdr, 0, "H5Pget_version"); + + /* Close FCPL */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open the file */ + file = H5Fopen(FILE1, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(file, FAIL, "H5Fcreate"); + + /* Get the file's version information */ + ret = H5Fget_info1(file, &finfo); + CHECK(ret, FAIL, "H5Fget_info1"); + VERIFY(finfo.super_ext_size, 152, "H5Fget_info1"); + VERIFY(finfo.sohm.hdr_size, 0, "H5Fget_info1"); + VERIFY(finfo.sohm.msgs_info.index_size, 0, "H5Fget_info1"); + VERIFY(finfo.sohm.msgs_info.heap_size, 0, "H5Fget_info1"); + + /* Get the file's creation property list */ + fcpl = H5Fget_create_plist(file); + CHECK(fcpl, FAIL, "H5Fget_create_plist"); + + /* Get the file's version information */ + ret = H5Pget_version(fcpl, &super, &freelist, &stab, &shhdr); + CHECK(ret, FAIL, "H5Pget_version"); + VERIFY(super, 2, "H5Pget_version"); + VERIFY(freelist, 0, "H5Pget_version"); + VERIFY(stab, 0, "H5Pget_version"); + VERIFY(shhdr, 0, "H5Pget_version"); + + /* Close FCPL */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + + { /* Test deprecated H5Pget/set_file_space() */ + + H5F_file_space_type_t old_strategy; + hsize_t old_threshold; + hid_t fid; + hid_t ffcpl; + + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); + + ret = H5Pget_file_space(fcpl, &old_strategy, &old_threshold); + CHECK(ret, FAIL, "H5Pget_file_space"); + VERIFY(old_strategy, H5F_FILE_SPACE_ALL, "H5Pget_file_space"); + VERIFY(old_threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space"); + + /* Set file space strategy and free space section threshold */ + ret = H5Pset_file_space(fcpl, H5F_FILE_SPACE_ALL_PERSIST, (hsize_t)0); + CHECK(ret, FAIL, "H5Pget_file_space"); + + /* Get the file space info from the creation property */ + ret = H5Pget_file_space(fcpl, &old_strategy, &old_threshold); + CHECK(ret, FAIL, "H5Pget_file_space"); + VERIFY(old_strategy, H5F_FILE_SPACE_ALL_PERSIST, "H5Pget_file_space"); + VERIFY(old_threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space"); + + ret = H5Pset_file_space(fcpl, H5F_FILE_SPACE_DEFAULT, (hsize_t)3); + CHECK(ret, FAIL, "H5Pget_file_space"); + + ret = H5Pget_file_space(fcpl, &old_strategy, &old_threshold); + CHECK(ret, FAIL, "H5Pget_file_space"); + VERIFY(old_strategy, H5F_FILE_SPACE_ALL_PERSIST, "H5Pget_file_space"); + VERIFY(old_threshold, 3, "H5Pget_file_space"); + + /* Create a file */ + fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, H5P_DEFAULT); + CHECK(file, FAIL, "H5Fcreate"); + + old_strategy = H5F_FILE_SPACE_DEFAULT; + old_threshold = 0; + ffcpl = H5Fget_create_plist(fid); + ret = H5Pget_file_space(ffcpl, &old_strategy, &old_threshold); + CHECK(ret, FAIL, "H5Pget_file_space"); + VERIFY(old_strategy, H5F_FILE_SPACE_ALL_PERSIST, "H5Pget_file_space"); + VERIFY(old_threshold, 3, "H5Pget_file_space"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + ret = H5Pclose(ffcpl); + CHECK(ret, FAIL, "H5Pclose"); + + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Reopen the file */ + fid = H5Fopen(FILE1, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + old_strategy = H5F_FILE_SPACE_DEFAULT; + old_threshold = 0; + ffcpl = H5Fget_create_plist(fid); + ret = H5Pget_file_space(ffcpl, &old_strategy, &old_threshold); + CHECK(ret, FAIL, "H5Pget_file_space"); + VERIFY(old_strategy, H5F_FILE_SPACE_ALL_PERSIST, "H5Pget_file_space"); + VERIFY(old_threshold, 3, "H5Pget_file_space"); + + ret = H5Pclose(ffcpl); + CHECK(ret, FAIL, "H5Pclose"); + + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + } + } + +} /* test_deprec */ +#endif /* H5_NO_DEPRECATED_SYMBOLS */ +#endif + +/**************************************************************** +** +** test_file(): Main low-level file I/O test routine. +** +****************************************************************/ +void +test_file(void) +{ + const char *env_h5_drvr; /* File Driver value from environment */ + hid_t fapl_id = H5I_INVALID_HID; /* VFD-dependent fapl ID */ + hbool_t driver_is_default_compatible; + herr_t ret; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Low-Level File I/O\n")); + + /* Get the VFD to use */ + env_h5_drvr = HDgetenv(HDF5_DRIVER); + if (env_h5_drvr == NULL) + env_h5_drvr = "nomatch"; + + /* Improved version of VFD-dependent checks */ + fapl_id = h5_fileaccess(); + CHECK(fapl_id, H5I_INVALID_HID, "h5_fileaccess"); + + ret = h5_driver_is_default_vfd_compatible(fapl_id, &driver_is_default_compatible); + CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); + + test_file_create(); /* Test file creation(also creation templates)*/ + test_file_open(env_h5_drvr); /* Test file opening */ + test_file_reopen(); /* Test file reopening */ + test_file_close(); /* Test file close behavior */ + test_get_file_id(); /* Test H5Iget_file_id */ + test_get_obj_ids(); /* Test H5Fget_obj_ids for Jira Issue 8528 */ + test_file_perm(); /* Test file access permissions */ + test_file_perm2(); /* Test file access permission again */ + test_file_is_accessible(env_h5_drvr); /* Test detecting HDF5 files correctly */ + test_file_delete(fapl_id); /* Test H5Fdelete */ + test_file_open_dot(); /* Test opening objects with "." for a name */ + test_file_open_overlap(); /* Test opening files in an overlapping manner */ + test_file_getname(); /* Test basic H5Fget_name() functionality */ + test_file_double_root_open(); /* Test opening root group from two files works properly */ + test_file_double_group_open(); /* Test opening same group from two files works properly */ + test_file_double_dataset_open(); /* Test opening same dataset from two files works properly */ + test_file_double_datatype_open(); /* Test opening same named datatype from two files works properly */ + test_file_double_file_dataset_open(TRUE); + test_file_double_file_dataset_open(FALSE); +#if 0 + test_userblock_file_size( + env_h5_drvr); /* Tests that files created with a userblock have the correct size */ + test_cached_stab_info(); /* Tests that files are created with cached stab info in the superblock */ + + if (driver_is_default_compatible) { + test_rw_noupdate(); /* Test to ensure that RW permissions don't write the file unless dirtied */ + } + + test_userblock_alignment( + env_h5_drvr); /* Tests that files created with a userblock and alignment interact properly */ + test_userblock_alignment_paged(env_h5_drvr); /* Tests files created with a userblock and alignment (via + paged aggregation) interact properly */ + test_filespace_info(env_h5_drvr); /* Test file creation public routines: */ + /* H5Pget/set_file_space_strategy() & H5Pget/set_file_space_page_size() */ + /* Skipped testing for multi/split drivers */ + test_file_freespace(env_h5_drvr); /* Test file public routine H5Fget_freespace() */ + /* Skipped testing for multi/split drivers */ + /* Setup for multi/split drivers are there already */ + test_sects_freespace(env_h5_drvr, + TRUE); /* Test file public routine H5Fget_free_sections() for new format */ + /* Skipped testing for multi/split drivers */ + /* Setup for multi/split drivers are there already */ + test_sects_freespace(env_h5_drvr, FALSE); /* Test file public routine H5Fget_free_sections() */ + /* Skipped testing for multi/split drivers */ + + if (driver_is_default_compatible) { + test_filespace_compatible(); /* Test compatibility for file space management */ + + test_filespace_round_compatible(); /* Testing file space compatibility for files from trunk to 1_8 to + trunk */ + test_filespace_1_10_0_compatible(); /* Testing file space compatibility for files from release 1.10.0 + */ + } + + test_libver_bounds(); /* Test compatibility for file space management */ + test_libver_bounds_low_high(env_h5_drvr); + test_libver_macros(); /* Test the macros for library version comparison */ + test_libver_macros2(); /* Test the macros for library version comparison */ + test_incr_filesize(); /* Test H5Fincrement_filesize() and H5Fget_eoa() */ + test_min_dset_ohdr(); /* Test dataset object header minimization */ +#ifndef H5_NO_DEPRECATED_SYMBOLS + test_file_ishdf5(env_h5_drvr); /* Test detecting HDF5 files correctly */ + test_deprec(env_h5_drvr); /* Test deprecated routines */ +#endif /* H5_NO_DEPRECATED_SYMBOLS */ +#endif + + ret = H5Pclose(fapl_id); + CHECK(ret, FAIL, "H5Pclose"); + +} /* test_file() */ + +/*------------------------------------------------------------------------- + * Function: cleanup_file + * + * Purpose: Cleanup temporary test files + * + * Return: none + * + * Programmer: Albert Cheng + * July 2, 1998 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +void +cleanup_file(void) +{ + H5E_BEGIN_TRY + { + H5Fdelete(SFILE1, H5P_DEFAULT); + H5Fdelete(FILE1, H5P_DEFAULT); + H5Fdelete(FILE2, H5P_DEFAULT); + H5Fdelete(FILE3, H5P_DEFAULT); + H5Fdelete(FILE4, H5P_DEFAULT); + H5Fdelete(FILE5, H5P_DEFAULT); + H5Fdelete(FILE6, H5P_DEFAULT); + H5Fdelete(FILE7, H5P_DEFAULT); + H5Fdelete(DST_FILE, H5P_DEFAULT); + } + H5E_END_TRY; +} diff --git a/test/API/tgenprop.c b/test/API/tgenprop.c new file mode 100644 index 00000000000..c1ee8af990d --- /dev/null +++ b/test/API/tgenprop.c @@ -0,0 +1,2201 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*********************************************************** + * + * Test program: tgenprop + * + * Test the Generic Property functionality + * + *************************************************************/ + +#define H5P_FRIEND /*suppress error about including H5Ppkg */ + +/* Define this macro to indicate that the testing APIs should be available */ +#define H5P_TESTING + +#include "testhdf5.h" + +/* #include "H5Dprivate.h" */ /* For Dataset creation property list names */ +/* #include "H5Ppkg.h" */ /* Generic Properties */ + +#define FILENAME "tgenprop.h5" + +/* Property definitions */ +#define CLASS1_NAME "Class 1" +#define CLASS1_PATH "root/Class 1" + +#define CLASS2_NAME "Class 2" +#define CLASS2_PATH "root/Class 1/Class 2" + +/* Property definitions */ +#define PROP1_NAME "Property 1" +int prop1_def = 10; /* Property 1 default value */ +#define PROP1_SIZE sizeof(prop1_def) +#define PROP1_DEF_VALUE (&prop1_def) + +#define PROP2_NAME "Property 2" +float prop2_def = 3.14F; /* Property 2 default value */ +#define PROP2_SIZE sizeof(prop2_def) +#define PROP2_DEF_VALUE (&prop2_def) + +#define PROP3_NAME "Property 3" +char prop3_def[10] = "Ten chars"; /* Property 3 default value */ +#define PROP3_SIZE sizeof(prop3_def) +#define PROP3_DEF_VALUE (&prop3_def) + +#define PROP4_NAME "Property 4" +double prop4_def = 1.41; /* Property 4 default value */ +#define PROP4_SIZE sizeof(prop4_def) +#define PROP4_DEF_VALUE (&prop4_def) + +/* Structs used during iteration */ +typedef struct iter_data_t { + int iter_count; + char **names; +} iter_data_t; + +typedef struct count_data_t { + int count; + hid_t id; +} count_data_t; + +/**************************************************************** +** +** test_genprop_basic_class(): Test basic generic property list code. +** Tests creating new generic classes. +** +****************************************************************/ +static void +test_genprop_basic_class(void) +{ + hid_t cid1; /* Generic Property class ID */ + hid_t cid2; /* Generic Property class ID */ + hid_t cid3; /* Generic Property class ID */ + char *name; /* Name of class */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Basic Generic Property List Class Creation Functionality\n")); + + /* Create a new generic class, derived from the root of the class hierarchy */ + cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(cid1, "H5Pcreate_class"); + + /* Check class name */ + name = H5Pget_class_name(cid1); + CHECK_PTR(name, "H5Pget_class_name"); + if (HDstrcmp(name, CLASS1_NAME) != 0) + TestErrPrintf("Class names don't match!, name=%s, CLASS1_NAME=%s\n", name, CLASS1_NAME); + H5free_memory(name); + + /* Check class parent */ + cid2 = H5Pget_class_parent(cid1); + CHECK_I(cid2, "H5Pget_class_parent"); + + /* Verify class parent correct */ + ret = H5Pequal(cid2, H5P_ROOT); + VERIFY(ret, 1, "H5Pequal"); + + /* Make certain false positives aren't being returned */ + ret = H5Pequal(cid2, H5P_FILE_CREATE); + VERIFY(ret, 0, "H5Pequal"); + + /* Close parent class */ + ret = H5Pclose_class(cid2); + CHECK_I(ret, "H5Pclose_class"); + + /* Close class */ + ret = H5Pclose_class(cid1); + CHECK_I(ret, "H5Pclose_class"); + + /* Create another new generic class, derived from file creation class */ + cid1 = H5Pcreate_class(H5P_FILE_CREATE, CLASS2_NAME, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(cid1, "H5Pcreate_class"); + + /* Check class name */ + name = H5Pget_class_name(cid1); + CHECK_PTR(name, "H5Pget_class_name"); + if (HDstrcmp(name, CLASS2_NAME) != 0) + TestErrPrintf("Class names don't match!, name=%s, CLASS2_NAME=%s\n", name, CLASS2_NAME); + H5free_memory(name); + + /* Check class parent */ + cid2 = H5Pget_class_parent(cid1); + CHECK_I(cid2, "H5Pget_class_parent"); + + /* Verify class parent correct */ + ret = H5Pequal(cid2, H5P_FILE_CREATE); + VERIFY(ret, 1, "H5Pequal"); + + /* Check class parent's parent */ + cid3 = H5Pget_class_parent(cid2); + CHECK_I(cid3, "H5Pget_class_parent"); + + /* Verify class parent's parent correct */ + ret = H5Pequal(cid3, H5P_GROUP_CREATE); + VERIFY(ret, 1, "H5Pequal"); + + /* Close parent class's parent */ + ret = H5Pclose_class(cid3); + CHECK_I(ret, "H5Pclose_class"); + + /* Close parent class */ + ret = H5Pclose_class(cid2); + CHECK_I(ret, "H5Pclose_class"); + + /* Close class */ + ret = H5Pclose_class(cid1); + CHECK_I(ret, "H5Pclose_class"); +} /* end test_genprop_basic_class() */ + +/**************************************************************** +** +** test_genprop_basic_class_prop(): Test basic generic property list code. +** Tests adding properties to generic classes. +** +****************************************************************/ +static void +test_genprop_basic_class_prop(void) +{ + hid_t cid1; /* Generic Property class ID */ + size_t size; /* Size of property */ + size_t nprops; /* Number of properties in class */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Basic Generic Property List Class Properties Functionality\n")); + + /* Create a new generic class, derived from the root of the class hierarchy */ + cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(cid1, "H5Pcreate_class"); + + /* Check the number of properties in class */ + ret = H5Pget_nprops(cid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 0, "H5Pget_nprops"); + + /* Check the existence of the first property (should fail) */ + ret = H5Pexist(cid1, PROP1_NAME); + VERIFY(ret, 0, "H5Pexist"); + + /* Insert first property into class (with no callbacks) */ + ret = + H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister2"); + + /* Try to insert the first property again (should fail) */ + H5E_BEGIN_TRY + { + ret = H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, + NULL); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Pregister2"); + + /* Check the existence of the first property */ + ret = H5Pexist(cid1, PROP1_NAME); + VERIFY(ret, 1, "H5Pexist"); + + /* Check the size of the first property */ + ret = H5Pget_size(cid1, PROP1_NAME, &size); + CHECK_I(ret, "H5Pget_size"); + VERIFY(size, PROP1_SIZE, "H5Pget_size"); + + /* Check the number of properties in class */ + ret = H5Pget_nprops(cid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 1, "H5Pget_nprops"); + + /* Insert second property into class (with no callbacks) */ + ret = + H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister2"); + + /* Try to insert the second property again (should fail) */ + H5E_BEGIN_TRY + { + ret = H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, + NULL); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Pregister2"); + + /* Check the existence of the second property */ + ret = H5Pexist(cid1, PROP2_NAME); + VERIFY(ret, 1, "H5Pexist"); + + /* Check the size of the second property */ + ret = H5Pget_size(cid1, PROP2_NAME, &size); + CHECK_I(ret, "H5Pget_size"); + VERIFY(size, PROP2_SIZE, "H5Pget_size"); + + /* Check the number of properties in class */ + ret = H5Pget_nprops(cid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 2, "H5Pget_nprops"); + + /* Insert third property into class (with no callbacks) */ + ret = + H5Pregister2(cid1, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister2"); + + /* Check the existence of the third property */ + ret = H5Pexist(cid1, PROP3_NAME); + VERIFY(ret, 1, "H5Pexist"); + + /* Check the size of the third property */ + ret = H5Pget_size(cid1, PROP3_NAME, &size); + CHECK_I(ret, "H5Pget_size"); + VERIFY(size, PROP3_SIZE, "H5Pget_size"); + + /* Check the number of properties in class */ + ret = H5Pget_nprops(cid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 3, "H5Pget_nprops"); + + /* Unregister first property */ + ret = H5Punregister(cid1, PROP1_NAME); + CHECK_I(ret, "H5Punregister"); + + /* Try to check the size of the first property (should fail) */ + H5E_BEGIN_TRY + { + ret = H5Pget_size(cid1, PROP1_NAME, &size); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Pget_size"); + + /* Check the number of properties in class */ + ret = H5Pget_nprops(cid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 2, "H5Pget_nprops"); + + /* Unregister second property */ + ret = H5Punregister(cid1, PROP2_NAME); + CHECK_I(ret, "H5Punregister"); + + /* Check the number of properties in class */ + ret = H5Pget_nprops(cid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 1, "H5Pget_nprops"); + + /* Unregister third property */ + ret = H5Punregister(cid1, PROP3_NAME); + CHECK_I(ret, "H5Punregister"); + + /* Check the number of properties in class */ + ret = H5Pget_nprops(cid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 0, "H5Pget_nprops"); + + /* Close class */ + ret = H5Pclose_class(cid1); + CHECK_I(ret, "H5Pclose_class"); +} /* end test_genprop_basic_class_prop() */ + +/**************************************************************** +** +** test_genprop_iter1(): Property iterator for test_genprop_class_iter +** +****************************************************************/ +static int +test_genprop_iter1(hid_t H5_ATTR_UNUSED id, const char *name, void *iter_data) +{ + iter_data_t *idata = (iter_data_t *)iter_data; + + return HDstrcmp(name, idata->names[idata->iter_count++]); +} + +/**************************************************************** +** +** test_genprop_class_iter(): Test basic generic property list code. +** Tests iterating over properties in a generic class. +** +****************************************************************/ +static void +test_genprop_class_iter(void) +{ + hid_t cid1; /* Generic Property class ID */ + size_t nprops; /* Number of properties in class */ + int idx; /* Index to start iteration at */ + struct { /* Struct for iterations */ + int iter_count; + const char **names; + } iter_struct; + const char *pnames[4] = {/* Names of properties for iterator */ + PROP1_NAME, PROP2_NAME, PROP3_NAME, PROP4_NAME}; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Basic Generic Property List Class Property Iteration Functionality\n")); + + /* Create a new generic class, derived from the root of the class hierarchy */ + cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(cid1, "H5Pcreate_class"); + + /* Insert first property into class (with no callbacks) */ + ret = + H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister2"); + + /* Insert second property into class (with no callbacks) */ + ret = + H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister2"); + + /* Insert third property into class (with no callbacks) */ + ret = + H5Pregister2(cid1, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister2"); + + /* Insert third property into class (with no callbacks) */ + ret = + H5Pregister2(cid1, PROP4_NAME, PROP4_SIZE, PROP4_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister2"); + + /* Check the number of properties in class */ + ret = H5Pget_nprops(cid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 4, "H5Pget_nprops"); + + /* Iterate over all properties in class */ + iter_struct.iter_count = 0; + iter_struct.names = pnames; + ret = H5Piterate(cid1, NULL, test_genprop_iter1, &iter_struct); + VERIFY(ret, 0, "H5Piterate"); + + /* Iterate over last three properties in class */ + idx = iter_struct.iter_count = 1; + ret = H5Piterate(cid1, &idx, test_genprop_iter1, &iter_struct); + VERIFY(ret, 0, "H5Piterate"); + VERIFY(idx, (int)nprops, "H5Piterate"); + + /* Close class */ + ret = H5Pclose_class(cid1); + CHECK_I(ret, "H5Pclose_class"); +} /* end test_genprop_class_iter() */ + +/**************************************************************** +** +** test_genprop_cls_*_cb1(): Property List callbacks for test_genprop_class_callback +** +****************************************************************/ +static herr_t +test_genprop_cls_crt_cb1(hid_t list_id, void *create_data) +{ + count_data_t *cdata = (count_data_t *)create_data; + + cdata->count++; + cdata->id = list_id; + + return SUCCEED; +} + +static herr_t +test_genprop_cls_cpy_cb1(hid_t new_list_id, hid_t H5_ATTR_UNUSED old_list_id, void *copy_data) +{ + count_data_t *cdata = (count_data_t *)copy_data; + + cdata->count++; + cdata->id = new_list_id; + + return SUCCEED; +} + +static herr_t +test_genprop_cls_cls_cb1(hid_t list_id, void *create_data) +{ + count_data_t *cdata = (count_data_t *)create_data; + + cdata->count++; + cdata->id = list_id; + + return SUCCEED; +} + +/**************************************************************** +** +** test_genprop_class_callback(): Test basic generic property list code. +** Tests callbacks for property lists in a generic class. +** +****************************************************************/ +static void +test_genprop_class_callback(void) +{ + hid_t cid1; /* Generic Property class ID */ + hid_t cid2; /* Generic Property class ID */ + hid_t lid1; /* Generic Property list ID */ + hid_t lid2; /* Generic Property list ID */ + hid_t lid3; /* Generic Property list ID */ + size_t nprops; /* Number of properties in class */ + struct { /* Struct for callbacks */ + int count; + hid_t id; + } crt_cb_struct, cpy_cb_struct, cls_cb_struct; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Basic Generic Property List Class Callback Functionality\n")); + + /* Create a new generic class, derived from the root of the class hierarchy */ + cid1 = + H5Pcreate_class(H5P_ROOT, CLASS1_NAME, test_genprop_cls_crt_cb1, &crt_cb_struct, + test_genprop_cls_cpy_cb1, &cpy_cb_struct, test_genprop_cls_cls_cb1, &cls_cb_struct); + CHECK_I(cid1, "H5Pcreate_class"); + + /* Insert first property into class (with no callbacks) */ + ret = + H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister2"); + + /* Insert second property into class (with no callbacks) */ + ret = + H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister2"); + + /* Insert third property into class (with no callbacks) */ + ret = + H5Pregister2(cid1, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister2"); + + /* Check the number of properties in class */ + ret = H5Pget_nprops(cid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 3, "H5Pget_nprops"); + + /* Initialize class callback structs */ + crt_cb_struct.count = 0; + crt_cb_struct.id = (-1); + cpy_cb_struct.count = 0; + cpy_cb_struct.id = (-1); + cls_cb_struct.count = 0; + cls_cb_struct.id = (-1); + + /* Create a property list from the class */ + lid1 = H5Pcreate(cid1); + CHECK_I(lid1, "H5Pcreate"); + + /* Verify that the creation callback occurred */ + VERIFY(crt_cb_struct.count, 1, "H5Pcreate"); + VERIFY(crt_cb_struct.id, lid1, "H5Pcreate"); + + /* Check the number of properties in list */ + ret = H5Pget_nprops(lid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 3, "H5Pget_nprops"); + + /* Create another property list from the class */ + lid2 = H5Pcreate(cid1); + CHECK_I(lid2, "H5Pcreate"); + + /* Verify that the creation callback occurred */ + VERIFY(crt_cb_struct.count, 2, "H5Pcreate"); + VERIFY(crt_cb_struct.id, lid2, "H5Pcreate"); + + /* Check the number of properties in list */ + ret = H5Pget_nprops(lid2, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 3, "H5Pget_nprops"); + + /* Create another property list by copying an existing list */ + lid3 = H5Pcopy(lid1); + CHECK_I(lid3, "H5Pcopy"); + + /* Verify that the copy callback occurred */ + VERIFY(cpy_cb_struct.count, 1, "H5Pcopy"); + VERIFY(cpy_cb_struct.id, lid3, "H5Pcopy"); + + /* Check the number of properties in list */ + ret = H5Pget_nprops(lid3, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 3, "H5Pget_nprops"); + + /* Close first list */ + ret = H5Pclose(lid1); + CHECK_I(ret, "H5Pclose"); + + /* Verify that the close callback occurred */ + VERIFY(cls_cb_struct.count, 1, "H5Pclose"); + VERIFY(cls_cb_struct.id, lid1, "H5Pclose"); + + /* Close second list */ + ret = H5Pclose(lid2); + CHECK_I(ret, "H5Pclose"); + + /* Verify that the close callback occurred */ + VERIFY(cls_cb_struct.count, 2, "H5Pclose"); + VERIFY(cls_cb_struct.id, lid2, "H5Pclose"); + + /* Close third list */ + ret = H5Pclose(lid3); + CHECK_I(ret, "H5Pclose"); + + /* Verify that the close callback occurred */ + VERIFY(cls_cb_struct.count, 3, "H5Pclose"); + VERIFY(cls_cb_struct.id, lid3, "H5Pclose"); + + /* Create another new generic class, derived from first class */ + cid2 = + H5Pcreate_class(cid1, CLASS2_NAME, test_genprop_cls_crt_cb1, &crt_cb_struct, test_genprop_cls_cpy_cb1, + &cpy_cb_struct, test_genprop_cls_cls_cb1, &cls_cb_struct); + CHECK_I(cid2, "H5Pcreate_class"); + + /* Insert fourth property into class (with no callbacks) */ + ret = + H5Pregister2(cid2, PROP4_NAME, PROP4_SIZE, PROP4_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister2"); + + /* Check the number of properties in class */ + /* (only reports the number of properties in 2nd class) */ + ret = H5Pget_nprops(cid2, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 1, "H5Pget_nprops"); + + /* Create a property list from the 2nd class */ + lid1 = H5Pcreate(cid2); + CHECK_I(lid1, "H5Pcreate"); + + /* Verify that both of the creation callbacks occurred */ + VERIFY(crt_cb_struct.count, 4, "H5Pcreate"); + VERIFY(crt_cb_struct.id, lid1, "H5Pcreate"); + + /* Check the number of properties in list */ + ret = H5Pget_nprops(lid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 4, "H5Pget_nprops"); + + /* Create another property list by copying existing list */ + lid2 = H5Pcopy(lid1); + CHECK_I(lid2, "H5Pcopy"); + + /* Verify that both of the copy callbacks occurred */ + VERIFY(cpy_cb_struct.count, 3, "H5Pcopy"); + VERIFY(cpy_cb_struct.id, lid2, "H5Pcopy"); + + /* Check the number of properties in list */ + ret = H5Pget_nprops(lid2, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 4, "H5Pget_nprops"); + + /* Close first list */ + ret = H5Pclose(lid1); + CHECK_I(ret, "H5Pclose"); + + /* Verify that both of the close callbacks occurred */ + VERIFY(cls_cb_struct.count, 5, "H5Pclose"); + VERIFY(cls_cb_struct.id, lid1, "H5Pclose"); + + /* Close second list */ + ret = H5Pclose(lid2); + CHECK_I(ret, "H5Pclose"); + + /* Verify that both of the close callbacks occurred */ + VERIFY(cls_cb_struct.count, 7, "H5Pclose"); + VERIFY(cls_cb_struct.id, lid2, "H5Pclose"); + + /* Close classes */ + ret = H5Pclose_class(cid1); + CHECK_I(ret, "H5Pclose_class"); + ret = H5Pclose_class(cid2); + CHECK_I(ret, "H5Pclose_class"); +} /* end test_genprop_class_callback() */ + +/**************************************************************** +** +** test_genprop_basic_list(): Test basic generic property list code. +** Tests creating new generic property lists. +** +****************************************************************/ +static void +test_genprop_basic_list(void) +{ + hid_t cid1; /* Generic Property class ID */ + hid_t cid2; /* Generic Property class ID */ + hid_t lid1; /* Generic Property list ID */ + size_t nprops; /* Number of properties */ + size_t size; /* Size of property */ + int prop1_value; /* Value for property #1 */ + float prop2_value; /* Value for property #2 */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Basic Generic Property List Creation Functionality\n")); + + /* Create a new generic class, derived from the root of the class hierarchy */ + cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(cid1, "H5Pcreate_class"); + + /* Add several properties (w/default values) */ + + /* Insert first property into class (with no callbacks) */ + ret = + H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister2"); + + /* Insert second property into class (with no callbacks) */ + ret = + H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister2"); + + /* Check the number of properties in class */ + ret = H5Pget_nprops(cid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 2, "H5Pget_nprops"); + + /* Create a property list from the class */ + lid1 = H5Pcreate(cid1); + CHECK_I(lid1, "H5Pcreate"); + + /* Get the list's class */ + cid2 = H5Pget_class(lid1); + CHECK_I(cid2, "H5Pget_class"); + + /* Check that the list's class is correct */ + ret = H5Pequal(cid1, cid2); + VERIFY(ret, 1, "H5Pequal"); + + /* Check correct "is a" class/list relationship */ + ret = H5Pisa_class(lid1, cid1); + VERIFY(ret, 1, "H5Pisa_class"); + + /* Check "is a" class/list relationship another way */ + ret = H5Pisa_class(lid1, cid2); + VERIFY(ret, 1, "H5Pisa_class"); + + /* Close class */ + ret = H5Pclose_class(cid2); + CHECK_I(ret, "H5Pclose_class"); + + /* Check the number of properties in list */ + ret = H5Pget_nprops(lid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 2, "H5Pget_nprops"); + + /* Check existence of properties */ + ret = H5Pexist(lid1, PROP1_NAME); + VERIFY(ret, 1, "H5Pexist"); + ret = H5Pexist(lid1, PROP2_NAME); + VERIFY(ret, 1, "H5Pexist"); + + /* Check the sizes of the properties */ + ret = H5Pget_size(lid1, PROP1_NAME, &size); + CHECK_I(ret, "H5Pget_size"); + VERIFY(size, PROP1_SIZE, "H5Pget_size"); + ret = H5Pget_size(lid1, PROP2_NAME, &size); + CHECK_I(ret, "H5Pget_size"); + VERIFY(size, PROP2_SIZE, "H5Pget_size"); + + /* Check values of properties (set with default values) */ + ret = H5Pget(lid1, PROP1_NAME, &prop1_value); + CHECK_I(ret, "H5Pget"); + VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget"); + ret = H5Pget(lid1, PROP2_NAME, &prop2_value); + CHECK_I(ret, "H5Pget"); + /* Verify the floating-poing value in this way to avoid compiler warning. */ + if (!H5_FLT_ABS_EQUAL(prop2_value, *PROP2_DEF_VALUE)) + HDprintf("*** UNEXPECTED VALUE from %s should be %f, but is %f at line %4d in %s\n", "H5Pget", + (double)*PROP2_DEF_VALUE, (double)prop2_value, (int)__LINE__, __FILE__); + + /* Close list */ + ret = H5Pclose(lid1); + CHECK_I(ret, "H5Pclose"); + + /* Close class */ + ret = H5Pclose_class(cid1); + CHECK_I(ret, "H5Pclose_class"); + +} /* end test_genprop_basic_list() */ + +/**************************************************************** +** +** test_genprop_basic_list_prop(): Test basic generic property list code. +** Tests creating new generic property lists and adding and +** removing properties from them. +** +****************************************************************/ +static void +test_genprop_basic_list_prop(void) +{ + hid_t cid1; /* Generic Property class ID */ + hid_t lid1; /* Generic Property list ID */ + size_t nprops; /* Number of properties */ + int prop1_value; /* Value for property #1 */ + float prop2_value; /* Value for property #2 */ + char prop3_value[10]; /* Property #3 value */ + double prop4_value; /* Property #4 value */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Basic Generic Property List Property Functionality\n")); + + /* Create a new generic class, derived from the root of the class hierarchy */ + cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(cid1, "H5Pcreate_class"); + + /* Add several properties (several w/default values) */ + + /* Insert first property into class (with no callbacks) */ + ret = + H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister2"); + + /* Insert second property into class (with no callbacks) */ + ret = + H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister2"); + + /* Create a property list from the class */ + lid1 = H5Pcreate(cid1); + CHECK_I(lid1, "H5Pcreate"); + + /* Check the number of properties in list */ + ret = H5Pget_nprops(lid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 2, "H5Pget_nprops"); + + /* Add temporary properties */ + + /* Insert first temporary property into list (with no callbacks) */ + ret = H5Pinsert2(lid1, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pinsert2"); + + /* Insert second temporary property into list (with no callbacks) */ + ret = H5Pinsert2(lid1, PROP4_NAME, PROP4_SIZE, PROP4_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pinsert2"); + + /* Check the number of properties in list */ + ret = H5Pget_nprops(lid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 4, "H5Pget_nprops"); + + /* Check existence of all properties */ + ret = H5Pexist(lid1, PROP1_NAME); + VERIFY(ret, 1, "H5Pexist"); + ret = H5Pexist(lid1, PROP2_NAME); + VERIFY(ret, 1, "H5Pexist"); + ret = H5Pexist(lid1, PROP3_NAME); + VERIFY(ret, 1, "H5Pexist"); + ret = H5Pexist(lid1, PROP4_NAME); + VERIFY(ret, 1, "H5Pexist"); + + /* Check values of permanent properties (set with default values) */ + ret = H5Pget(lid1, PROP1_NAME, &prop1_value); + CHECK_I(ret, "H5Pget"); + VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget"); + ret = H5Pget(lid1, PROP2_NAME, &prop2_value); + CHECK_I(ret, "H5Pget"); + /* Verify the floating-poing value in this way to avoid compiler warning. */ + if (!H5_FLT_ABS_EQUAL(prop2_value, *PROP2_DEF_VALUE)) + HDprintf("*** UNEXPECTED VALUE from %s should be %f, but is %f at line %4d in %s\n", "H5Pget", + (double)*PROP2_DEF_VALUE, (double)prop2_value, (int)__LINE__, __FILE__); + + /* Check values of temporary properties (set with regular values) */ + ret = H5Pget(lid1, PROP3_NAME, &prop3_value); + CHECK_I(ret, "H5Pget"); + if (HDmemcmp(&prop3_value, PROP3_DEF_VALUE, PROP3_SIZE) != 0) + TestErrPrintf("Property #3 doesn't match!, line=%d\n", __LINE__); + ret = H5Pget(lid1, PROP4_NAME, &prop4_value); + CHECK_I(ret, "H5Pget"); + /* Verify the floating-poing value in this way to avoid compiler warning. */ + if (!H5_DBL_ABS_EQUAL(prop4_value, *PROP4_DEF_VALUE)) + HDprintf("*** UNEXPECTED VALUE from %s should be %f, but is %f at line %4d in %s\n", "H5Pget", + *PROP4_DEF_VALUE, prop4_value, (int)__LINE__, __FILE__); + + /* Delete permanent property */ + ret = H5Premove(lid1, PROP2_NAME); + CHECK_I(ret, "H5Premove"); + + /* Check number of properties */ + ret = H5Pget_nprops(lid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 3, "H5Pget_nprops"); + + /* Delete temporary property */ + ret = H5Premove(lid1, PROP3_NAME); + CHECK_I(ret, "H5Premove"); + + /* Check number of properties */ + ret = H5Pget_nprops(lid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 2, "H5Pget_nprops"); + + /* Check existence of remaining properties */ + ret = H5Pexist(lid1, PROP1_NAME); + VERIFY(ret, 1, "H5Pexist"); + ret = H5Pexist(lid1, PROP4_NAME); + VERIFY(ret, 1, "H5Pexist"); + + /* Check values of permanent properties (set with default values) */ + ret = H5Pget(lid1, PROP1_NAME, &prop1_value); + CHECK_I(ret, "H5Pget"); + VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget"); + + /* Check values of temporary properties (set with regular values) */ + ret = H5Pget(lid1, PROP4_NAME, &prop4_value); + CHECK_I(ret, "H5Pget"); + /* Verify the floating-poing value in this way to avoid compiler warning. */ + if (!H5_DBL_ABS_EQUAL(prop4_value, *PROP4_DEF_VALUE)) + HDprintf("*** UNEXPECTED VALUE from %s should be %f, but is %f at line %4d in %s\n", "H5Pget", + *PROP4_DEF_VALUE, prop4_value, (int)__LINE__, __FILE__); + + /* Close list */ + ret = H5Pclose(lid1); + CHECK_I(ret, "H5Pclose"); + + /* Close class */ + ret = H5Pclose_class(cid1); + CHECK_I(ret, "H5Pclose_class"); + +} /* end test_genprop_basic_list_prop() */ + +/**************************************************************** +** +** test_genprop_iter2(): Property iterator for test_genprop_list_iter +** +****************************************************************/ +static int +test_genprop_iter2(hid_t H5_ATTR_UNUSED id, const char *name, void *iter_data) +{ + iter_data_t *idata = (iter_data_t *)iter_data; + + return HDstrcmp(name, idata->names[idata->iter_count++]); +} + +/**************************************************************** +** +** test_genprop_list_iter(): Test basic generic property list code. +** Tests iterating over generic property list properties. +** +****************************************************************/ +static void +test_genprop_list_iter(void) +{ + hid_t cid1; /* Generic Property class ID */ + hid_t lid1; /* Generic Property list ID */ + size_t nprops; /* Number of properties */ + int idx; /* Index to start iteration at */ + struct { /* Struct for iterations */ + int iter_count; + const char **names; + } iter_struct; + const char *pnames[4] = {/* Names of properties for iterator */ + PROP3_NAME, PROP4_NAME, PROP1_NAME, PROP2_NAME}; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Generic Property List Iteration Functionality\n")); + + /* Create a new generic class, derived from the root of the class hierarchy */ + cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(cid1, "H5Pcreate_class"); + + /* Add several properties (several w/default values) */ + + /* Insert first property into class (with no callbacks) */ + ret = + H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister2"); + + /* Insert second property into class (with no callbacks) */ + ret = + H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister2"); + + /* Create a property list from the class */ + lid1 = H5Pcreate(cid1); + CHECK_I(lid1, "H5Pcreate"); + + /* Check the number of properties in list */ + ret = H5Pget_nprops(lid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 2, "H5Pget_nprops"); + + /* Add temporary properties */ + + /* Insert first temporary property into class (with no callbacks) */ + ret = H5Pinsert2(lid1, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pinsert2"); + + /* Insert second temporary property into class (with no callbacks) */ + ret = H5Pinsert2(lid1, PROP4_NAME, PROP4_SIZE, PROP4_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pinsert2"); + + /* Check the number of properties in list */ + ret = H5Pget_nprops(lid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 4, "H5Pget_nprops"); + + /* Iterate over all properties in list */ + iter_struct.iter_count = 0; + iter_struct.names = pnames; + ret = H5Piterate(lid1, NULL, test_genprop_iter2, &iter_struct); + VERIFY(ret, 0, "H5Piterate"); + + /* Iterate over last three properties in list */ + idx = iter_struct.iter_count = 1; + ret = H5Piterate(lid1, &idx, test_genprop_iter2, &iter_struct); + VERIFY(ret, 0, "H5Piterate"); + VERIFY(idx, (int)nprops, "H5Piterate"); + + /* Close list */ + ret = H5Pclose(lid1); + CHECK_I(ret, "H5Pclose"); + + /* Close class */ + ret = H5Pclose_class(cid1); + CHECK_I(ret, "H5Pclose_class"); + +} /* end test_genprop_list_iter() */ + +typedef struct { + /* Creation information */ + int crt_count; + char *crt_name; + void *crt_value; + + /* Set information */ + int set_count; + hid_t set_plist_id; + char *set_name; + void *set_value; + + /* Get information */ + int get_count; + hid_t get_plist_id; + char *get_name; + void *get_value; + + /* Delete information */ + int del_count; + hid_t del_plist_id; + char *del_name; + void *del_value; + + /* Copy information */ + int cop_count; + char *cop_name; + void *cop_value; + + /* Compare information */ + int cmp_count; + + /* Close information */ + int cls_count; + char *cls_name; + void *cls_value; +} prop_cb_info; + +/* Global variables for Callback information */ +prop_cb_info prop1_cb_info; /* Callback statistics for property #1 */ +prop_cb_info prop2_cb_info; /* Callback statistics for property #2 */ +prop_cb_info prop3_cb_info; /* Callback statistics for property #3 */ + +/**************************************************************** +** +** test_genprop_cls_cpy_cb2(): Property Class callback for test_genprop_list_callback +** +****************************************************************/ +static herr_t +test_genprop_cls_cpy_cb2(hid_t new_list_id, hid_t H5_ATTR_UNUSED old_list_id, void *create_data) +{ + count_data_t *cdata = (count_data_t *)create_data; + + cdata->count++; + cdata->id = new_list_id; + + return SUCCEED; +} + +/**************************************************************** +** +** test_genprop_prop_crt_cb1(): Property creation callback for test_genprop_list_callback +** +****************************************************************/ +static herr_t +test_genprop_prop_crt_cb1(const char *name, size_t size, void *def_value) +{ + /* Set the information from the creation call */ + prop1_cb_info.crt_count++; + prop1_cb_info.crt_name = HDstrdup(name); + prop1_cb_info.crt_value = HDmalloc(size); + HDmemcpy(prop1_cb_info.crt_value, def_value, size); + + return (SUCCEED); +} + +/**************************************************************** +** +** test_genprop_prop_set_cb1(): Property set callback for test_genprop_list_callback +** +****************************************************************/ +static herr_t +test_genprop_prop_set_cb1(hid_t plist_id, const char *name, size_t size, void *value) +{ + /* Set the information from the set call */ + prop1_cb_info.set_count++; + prop1_cb_info.set_plist_id = plist_id; + if (prop1_cb_info.set_name == NULL) + prop1_cb_info.set_name = HDstrdup(name); + if (prop1_cb_info.set_value == NULL) + prop1_cb_info.set_value = HDmalloc(size); + HDmemcpy(prop1_cb_info.set_value, value, size); + + return (SUCCEED); +} + +/**************************************************************** +** +** test_genprop_prop_get_cb1(): Property get callback for test_genprop_list_callback +** +****************************************************************/ +static herr_t +test_genprop_prop_get_cb1(hid_t plist_id, const char *name, size_t size, void *value) +{ + /* Set the information from the get call */ + prop1_cb_info.get_count++; + prop1_cb_info.get_plist_id = plist_id; + if (prop1_cb_info.get_name == NULL) + prop1_cb_info.get_name = HDstrdup(name); + if (prop1_cb_info.get_value == NULL) + prop1_cb_info.get_value = HDmalloc(size); + HDmemcpy(prop1_cb_info.get_value, value, size); + + return (SUCCEED); +} + +/**************************************************************** +** +** test_genprop_prop_cop_cb1(): Property copy callback for test_genprop_list_callback +** +****************************************************************/ +static herr_t +test_genprop_prop_cop_cb1(const char *name, size_t size, void *value) +{ + /* Set the information from the get call */ + prop1_cb_info.cop_count++; + if (prop1_cb_info.cop_name == NULL) + prop1_cb_info.cop_name = HDstrdup(name); + if (prop1_cb_info.cop_value == NULL) + prop1_cb_info.cop_value = HDmalloc(size); + HDmemcpy(prop1_cb_info.cop_value, value, size); + + return (SUCCEED); +} + +/**************************************************************** +** +** test_genprop_prop_cmp_cb1(): Property comparison callback for test_genprop_list_callback +** +****************************************************************/ +static int +test_genprop_prop_cmp_cb1(const void *value1, const void *value2, size_t size) +{ + /* Set the information from the comparison call */ + prop1_cb_info.cmp_count++; + + return (HDmemcmp(value1, value2, size)); +} + +/**************************************************************** +** +** test_genprop_prop_cmp_cb3(): Property comparison callback for test_genprop_list_callback +** +****************************************************************/ +static int +test_genprop_prop_cmp_cb3(const void *value1, const void *value2, size_t size) +{ + /* Set the information from the comparison call */ + prop3_cb_info.cmp_count++; + + return (HDmemcmp(value1, value2, size)); +} + +/**************************************************************** +** +** test_genprop_prop_cls_cb1(): Property close callback for test_genprop_list_callback +** +****************************************************************/ +static herr_t +test_genprop_prop_cls_cb1(const char *name, size_t size, void *value) +{ + /* Set the information from the close call */ + prop1_cb_info.cls_count++; + if (prop1_cb_info.cls_name == NULL) + prop1_cb_info.cls_name = HDstrdup(name); + if (prop1_cb_info.cls_value == NULL) + prop1_cb_info.cls_value = HDmalloc(size); + HDmemcpy(prop1_cb_info.cls_value, value, size); + + return (SUCCEED); +} + +/**************************************************************** +** +** test_genprop_prop_del_cb2(): Property delete callback for test_genprop_list_callback +** +****************************************************************/ +static herr_t +test_genprop_prop_del_cb2(hid_t plist_id, const char *name, size_t size, void *value) +{ + /* Set the information from the delete call */ + prop2_cb_info.del_count++; + prop2_cb_info.del_plist_id = plist_id; + prop2_cb_info.del_name = HDstrdup(name); + prop2_cb_info.del_value = HDmalloc(size); + HDmemcpy(prop2_cb_info.del_value, value, size); + + return (SUCCEED); +} + +/**************************************************************** +** +** test_genprop_list_callback(): Test basic generic property list code. +** Tests callbacks for properties in a generic property list. +** +****************************************************************/ +static void +test_genprop_list_callback(void) +{ + hid_t cid1; /* Generic Property class ID */ + hid_t lid1; /* Generic Property list ID */ + hid_t lid2; /* 2nd Generic Property list ID */ + size_t nprops; /* Number of properties in class */ + int prop1_value; /* Value for property #1 */ + int prop1_new_value = 20; /* Property #1 new value */ + float prop2_value; /* Value for property #2 */ + char prop3_value[10]; /* Property #3 value */ + char prop3_new_value[10] = "10 chairs"; /* Property #3 new value */ + double prop4_value; /* Property #4 value */ + struct { /* Struct for callbacks */ + int count; + hid_t id; + } cop_cb_struct; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Basic Generic Property List Property Callback Functionality\n")); + + /* Create a new generic class, derived from the root of the class hierarchy */ + cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, test_genprop_cls_cpy_cb2, &cop_cb_struct, NULL, + NULL); + CHECK_I(cid1, "H5Pcreate_class"); + + /* Insert first property into class (with callbacks) */ + ret = H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, test_genprop_prop_crt_cb1, + test_genprop_prop_set_cb1, test_genprop_prop_get_cb1, NULL, test_genprop_prop_cop_cb1, + test_genprop_prop_cmp_cb1, test_genprop_prop_cls_cb1); + CHECK_I(ret, "H5Pregister2"); + + /* Insert second property into class (with only delete callback) */ + ret = H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, + test_genprop_prop_del_cb2, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister2"); + + /* Insert third property into class (with only compare callback) */ + ret = H5Pregister2(cid1, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, + test_genprop_prop_cmp_cb3, NULL); + CHECK_I(ret, "H5Pregister2"); + + /* Insert fourth property into class (with no callbacks) */ + ret = + H5Pregister2(cid1, PROP4_NAME, PROP4_SIZE, PROP4_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister2"); + + /* Check the number of properties in class */ + ret = H5Pget_nprops(cid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 4, "H5Pget_nprops"); + + /* Initialize class callback structs */ + cop_cb_struct.count = 0; + cop_cb_struct.id = (-1); + + /* Initialize callback information for properties tracked */ + HDmemset(&prop1_cb_info, 0, sizeof(prop_cb_info)); + HDmemset(&prop2_cb_info, 0, sizeof(prop_cb_info)); + HDmemset(&prop3_cb_info, 0, sizeof(prop_cb_info)); + + /* Create a property list from the class */ + lid1 = H5Pcreate(cid1); + CHECK_I(lid1, "H5Pcreate"); + + /* The compare callback should not have been called once on property 1, as + * the property is always copied */ + VERIFY(prop1_cb_info.cmp_count, 0, "H5Pcreate"); + /* The compare callback should not have been called on property 3, as there + * is no create callback */ + VERIFY(prop3_cb_info.cmp_count, 0, "H5Pcreate"); + + /* Verify creation callback information for properties tracked */ + VERIFY(prop1_cb_info.crt_count, 1, "H5Pcreate"); + if (HDstrcmp(prop1_cb_info.crt_name, PROP1_NAME) != 0) + TestErrPrintf("Property #1 name doesn't match!, line=%d\n", __LINE__); + if (HDmemcmp(prop1_cb_info.crt_value, PROP1_DEF_VALUE, PROP1_SIZE) != 0) + TestErrPrintf("Property #1 value doesn't match!, line=%d\n", __LINE__); + + /* Check values of permanent properties (set with default values) */ + ret = H5Pget(lid1, PROP1_NAME, &prop1_value); + CHECK_I(ret, "H5Pget"); + VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget"); + /* The compare callback should not have been called */ + VERIFY(prop1_cb_info.cmp_count, 0, "H5Pget"); + ret = H5Pget(lid1, PROP2_NAME, &prop2_value); + CHECK_I(ret, "H5Pget"); + /* Verify the floating-poing value in this way to avoid compiler warning. */ + if (!H5_FLT_ABS_EQUAL(prop2_value, *PROP2_DEF_VALUE)) + HDprintf("*** UNEXPECTED VALUE from %s should be %f, but is %f at line %4d in %s\n", "H5Pget", + (double)*PROP2_DEF_VALUE, (double)prop2_value, (int)__LINE__, __FILE__); + + /* Check values of temporary properties (set with regular values) */ + ret = H5Pget(lid1, PROP3_NAME, &prop3_value); + CHECK_I(ret, "H5Pget"); + if (HDmemcmp(&prop3_value, PROP3_DEF_VALUE, PROP3_SIZE) != 0) + TestErrPrintf("Property #3 doesn't match!, line=%d\n", __LINE__); + /* The compare callback should not have been called, as there is no get + * callback for this property */ + VERIFY(prop3_cb_info.cmp_count, 0, "H5Pget"); + ret = H5Pget(lid1, PROP4_NAME, &prop4_value); + CHECK_I(ret, "H5Pget"); + /* Verify the floating-poing value in this way to avoid compiler warning. */ + if (!H5_DBL_ABS_EQUAL(prop4_value, *PROP4_DEF_VALUE)) + HDprintf("*** UNEXPECTED VALUE from %s should be %f, but is %f at line %4d in %s\n", "H5Pget", + *PROP4_DEF_VALUE, prop4_value, (int)__LINE__, __FILE__); + + /* Verify get callback information for properties tracked */ + VERIFY(prop1_cb_info.get_count, 1, "H5Pget"); + VERIFY(prop1_cb_info.get_plist_id, lid1, "H5Pget"); + if (HDstrcmp(prop1_cb_info.get_name, PROP1_NAME) != 0) + TestErrPrintf("Property #1 name doesn't match!, line=%d\n", __LINE__); + if (HDmemcmp(prop1_cb_info.get_value, PROP1_DEF_VALUE, PROP1_SIZE) != 0) + TestErrPrintf("Property #1 value doesn't match!, line=%d\n", __LINE__); + + /* Set value of property #1 to different value */ + ret = H5Pset(lid1, PROP1_NAME, &prop1_new_value); + CHECK_I(ret, "H5Pset"); + + /* Verify set callback information for properties tracked */ + VERIFY(prop1_cb_info.set_count, 1, "H5Pset"); + VERIFY(prop1_cb_info.set_plist_id, lid1, "H5Pset"); + if (HDstrcmp(prop1_cb_info.set_name, PROP1_NAME) != 0) + TestErrPrintf("Property #1 name doesn't match!, line=%d\n", __LINE__); + if (HDmemcmp(prop1_cb_info.set_value, &prop1_new_value, PROP1_SIZE) != 0) + TestErrPrintf("Property #1 value doesn't match!, line=%d\n", __LINE__); + + /* The compare callback should not have been called */ + VERIFY(prop1_cb_info.cmp_count, 0, "H5Pset"); + + /* Set value of property #3 to different value */ + ret = H5Pset(lid1, PROP3_NAME, prop3_new_value); + CHECK_I(ret, "H5Pset"); + + /* The compare callback should not have been called */ + VERIFY(prop3_cb_info.cmp_count, 0, "H5Pset"); + + /* Check new value of tracked properties */ + ret = H5Pget(lid1, PROP1_NAME, &prop1_value); + CHECK_I(ret, "H5Pget"); + VERIFY(prop1_value, prop1_new_value, "H5Pget"); + + /* Verify get callback information again for properties tracked */ + VERIFY(prop1_cb_info.get_count, 2, "H5Pget"); + VERIFY(prop1_cb_info.get_plist_id, lid1, "H5Pget"); + if (HDstrcmp(prop1_cb_info.get_name, PROP1_NAME) != 0) + TestErrPrintf("Property #1 name doesn't match!, line=%d\n", __LINE__); + if (HDmemcmp(prop1_cb_info.get_value, &prop1_new_value, PROP1_SIZE) != 0) + TestErrPrintf("Property #1 value doesn't match!, line=%d\n", __LINE__); + + /* Delete property #2 */ + ret = H5Premove(lid1, PROP2_NAME); + CHECK_I(ret, "H5Premove"); + + /* Verify delete callback information for properties tracked */ + VERIFY(prop2_cb_info.del_count, 1, "H5Premove"); + VERIFY(prop2_cb_info.del_plist_id, lid1, "H5Premove"); + if (HDstrcmp(prop2_cb_info.del_name, PROP2_NAME) != 0) + TestErrPrintf("Property #2 name doesn't match!, line=%d\n", __LINE__); + if (HDmemcmp(prop2_cb_info.del_value, PROP2_DEF_VALUE, PROP2_SIZE) != 0) + TestErrPrintf("Property #2 value doesn't match!, line=%d\n", __LINE__); + + /* Copy first list */ + lid2 = H5Pcopy(lid1); + CHECK_I(lid2, "H5Pcopy"); + + /* Verify copy callback information for properties tracked */ + VERIFY(prop1_cb_info.cop_count, 1, "H5Pcopy"); + if (HDstrcmp(prop1_cb_info.cop_name, PROP1_NAME) != 0) + TestErrPrintf("Property #1 name doesn't match!, line=%d\n", __LINE__); + if (HDmemcmp(prop1_cb_info.cop_value, &prop1_new_value, PROP1_SIZE) != 0) + TestErrPrintf("Property #1 value doesn't match!, line=%d\n", __LINE__); + + /* Verify that the class creation callback occurred */ + VERIFY(cop_cb_struct.count, 1, "H5Pcopy"); + VERIFY(cop_cb_struct.id, lid2, "H5Pcopy"); + + /* Compare the two lists */ + ret = H5Pequal(lid1, lid2); + VERIFY(ret, 1, "H5Pequal"); + + /* Verify compare callback information for properties tracked */ + VERIFY(prop1_cb_info.cmp_count, 1, "H5Pequal"); + VERIFY(prop3_cb_info.cmp_count, 1, "H5Pequal"); + + /* Close first list */ + ret = H5Pclose(lid1); + CHECK_I(ret, "H5Pclose"); + + /* Verify close callback information for properties tracked */ + VERIFY(prop1_cb_info.cls_count, 1, "H5Pclose"); + if (HDstrcmp(prop1_cb_info.cls_name, PROP1_NAME) != 0) + TestErrPrintf("Property #1 name doesn't match!, line=%d\n", __LINE__); + if (HDmemcmp(prop1_cb_info.cls_value, &prop1_new_value, PROP1_SIZE) != 0) + TestErrPrintf("Property #1 value doesn't match!, line=%d\n", __LINE__); + + /* Close second list */ + ret = H5Pclose(lid2); + CHECK_I(ret, "H5Pclose"); + + /* Verify close callback information for properties tracked */ + VERIFY(prop1_cb_info.cls_count, 2, "H5Pclose"); + + /* Free memory allocated for tracking properties */ + HDfree(prop1_cb_info.crt_name); + HDfree(prop1_cb_info.crt_value); + HDfree(prop1_cb_info.get_name); + HDfree(prop1_cb_info.get_value); + HDfree(prop1_cb_info.set_name); + HDfree(prop1_cb_info.set_value); + HDfree(prop1_cb_info.cop_name); + HDfree(prop1_cb_info.cop_value); + HDfree(prop1_cb_info.cls_name); + HDfree(prop1_cb_info.cls_value); + HDfree(prop2_cb_info.del_name); + HDfree(prop2_cb_info.del_value); + + /* Close class */ + ret = H5Pclose_class(cid1); + CHECK_I(ret, "H5Pclose_class"); +} /* end test_genprop_list_callback() */ + +/**************************************************************** +** +** test_genprop_list_addprop(): Test adding properties to a +** standard HDF5 property list and verify that the library +** ignores the extra properties. +** +****************************************************************/ +static void +test_genprop_list_addprop(void) +{ + hid_t fid; /* File ID */ + hid_t did; /* Dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t pid; /* Property List ID */ + int prop1_value; /* Value for property #1 */ + herr_t ret; /* Generic return value */ + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create scalar dataspace for dataset */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create a dataset creation property list */ + pid = H5Pcreate(H5P_DATASET_CREATE); + CHECK(pid, FAIL, "H5Pcreate"); + + /* Insert temporary property into class (with no callbacks) */ + ret = H5Pinsert2(pid, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pinsert2"); + + /* Check existence of added property */ + ret = H5Pexist(pid, PROP1_NAME); + VERIFY(ret, 1, "H5Pexist"); + + /* Check values of property (set with default value) */ + ret = H5Pget(pid, PROP1_NAME, &prop1_value); + CHECK_I(ret, "H5Pget"); + VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget"); + + /* Create a dataset */ + did = H5Dcreate2(fid, "Dataset1", H5T_NATIVE_INT, sid, H5P_DEFAULT, pid, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + /* Check existence of added property (after using property list) */ + ret = H5Pexist(pid, PROP1_NAME); + VERIFY(ret, 1, "H5Pexist"); + + /* Check values of property (set with default value) (after using property list) */ + ret = H5Pget(pid, PROP1_NAME, &prop1_value); + CHECK_I(ret, "H5Pget"); + VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget"); + + /* Close property list */ + ret = H5Pclose(pid); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + +} /* end test_genprop_list_addprop() */ + +/**************************************************************** +** +** test_genprop_class_addprop(): Test adding properties to a +** standard HDF5 property class and verify that the library +** ignores the extra properties and continues to recognize the +** derived class as a valid version of the derived-from class. +** +****************************************************************/ +static void +test_genprop_class_addprop(void) +{ + hid_t fid; /* File ID */ + hid_t did; /* Dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t cid; /* Property Class ID */ + hid_t pid; /* Property List ID */ + int prop1_value; /* Value for property #1 */ + herr_t ret; /* Generic return value */ + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create scalar dataspace for dataset */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create a new class, derived from the dataset creation property list class */ + cid = H5Pcreate_class(H5P_DATASET_CREATE, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(cid, "H5Pcreate_class"); +#if 0 + /* Check existence of an original property */ + ret = H5Pexist(cid, H5O_CRT_PIPELINE_NAME); + VERIFY(ret, 1, "H5Pexist"); +#endif + /* Insert first property into class (with no callbacks) */ + ret = + H5Pregister2(cid, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister2"); +#if 0 + /* Check existence of an original property */ + ret = H5Pexist(cid, H5O_CRT_PIPELINE_NAME); + VERIFY(ret, 1, "H5Pexist"); +#endif + /* Check existence of added property */ + ret = H5Pexist(cid, PROP1_NAME); + VERIFY(ret, 1, "H5Pexist"); + + /* Create a derived dataset creation property list */ + pid = H5Pcreate(cid); + CHECK(pid, FAIL, "H5Pcreate"); +#if 0 + /* Check existence of an original property */ + ret = H5Pexist(pid, H5O_CRT_PIPELINE_NAME); + VERIFY(ret, 1, "H5Pexist"); +#endif + /* Check existence of added property */ + ret = H5Pexist(pid, PROP1_NAME); + VERIFY(ret, 1, "H5Pexist"); + + /* Check values of property (set with default value) */ + ret = H5Pget(pid, PROP1_NAME, &prop1_value); + CHECK_I(ret, "H5Pget"); + VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget"); + + /* Insert second property into class (with no callbacks) */ + ret = + H5Pregister2(cid, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister2"); +#if 0 + /* Check existence of an original property (in class) */ + ret = H5Pexist(cid, H5O_CRT_PIPELINE_NAME); + VERIFY(ret, 1, "H5Pexist"); +#endif + /* Check existence of first added property (in class) */ + ret = H5Pexist(cid, PROP1_NAME); + VERIFY(ret, 1, "H5Pexist"); + + /* Check existence of second added property (in class) */ + ret = H5Pexist(cid, PROP2_NAME); + VERIFY(ret, 1, "H5Pexist"); +#if 0 + /* Check existence of an original property (in property list) */ + ret = H5Pexist(pid, H5O_CRT_PIPELINE_NAME); + VERIFY(ret, 1, "H5Pexist"); +#endif + /* Check existence of first added property (in property list) */ + ret = H5Pexist(pid, PROP1_NAME); + VERIFY(ret, 1, "H5Pexist"); + + /* Check existence of second added property (in property list) (should not exist) */ + ret = H5Pexist(pid, PROP2_NAME); + VERIFY(ret, 0, "H5Pexist"); + + /* Create a dataset */ + did = H5Dcreate2(fid, "Dataset1", H5T_NATIVE_INT, sid, H5P_DEFAULT, pid, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + /* Check existence of added property (after using property list) */ + ret = H5Pexist(pid, PROP1_NAME); + VERIFY(ret, 1, "H5Pexist"); + + /* Check values of property (set with default value) (after using property list) */ + ret = H5Pget(pid, PROP1_NAME, &prop1_value); + CHECK_I(ret, "H5Pget"); + VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget"); + + /* Close property class */ + ret = H5Pclose_class(cid); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close property list */ + ret = H5Pclose(pid); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + +} /* end test_genprop_class_addprop() */ + +/**************************************************************** +** +** test_genprop_list_add_remove_prop(): Test adding then removing the +** same properties to a standard HDF5 property list. This is testing +** also for a memory leak that could be caused by not freeing the +** removed property resources from the property list. +** +****************************************************************/ +static void +test_genprop_list_add_remove_prop(void) +{ + hid_t pid; /* Property List ID */ + herr_t ret; /* Generic return value */ + + /* Create a dataset creation property list */ + pid = H5Pcreate(H5P_DATASET_CREATE); + CHECK(pid, FAIL, "H5Pcreate"); + + /* Insert temporary property into class (with no callbacks) */ + ret = H5Pinsert2(pid, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pinsert2"); + + /* Delete added property */ + ret = H5Premove(pid, PROP1_NAME); + CHECK_I(ret, "H5Premove"); + + /* Insert temporary property into class (with no callbacks) */ + ret = H5Pinsert2(pid, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pinsert2"); + + /* Delete added property */ + ret = H5Premove(pid, PROP1_NAME); + CHECK_I(ret, "H5Premove"); + + /* Close property list */ + ret = H5Pclose(pid); + CHECK(ret, FAIL, "H5Pclose"); + +} /* end test_genprop_list_add_remove_prop() */ + +/**************************************************************** +** +** test_genprop_equal(): Test basic generic property list code. +** More tests for H5Pequal() +** +****************************************************************/ +static void +test_genprop_equal(void) +{ + hid_t cid1; /* Generic Property class ID */ + hid_t lid1; /* Generic Property list ID */ + hid_t lid2; /* Generic Property list ID */ + int prop1_new_value = 20; /* Property #1 new value */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Basic Generic Property List Equal Functionality\n")); + + /* Create a new generic class, derived from the root of the class hierarchy */ + cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(cid1, "H5Pcreate_class"); + + /* Insert first property into class (with no callbacks) */ + ret = + H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister2"); + + /* Insert second property into class (with no callbacks) */ + ret = + H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister2"); + + /* Create a property list from the class */ + lid1 = H5Pcreate(cid1); + CHECK_I(lid1, "H5Pcreate"); + + /* Copy the property list */ + lid2 = H5Pcopy(lid1); + CHECK_I(lid2, "H5Pcopy"); + + /* Check that the lists are equal */ + ret = H5Pequal(lid1, lid2); + VERIFY(ret, 1, "H5Pequal"); + + /* Set property in first list to another value */ + ret = H5Pset(lid1, PROP1_NAME, &prop1_new_value); + CHECK_I(ret, "H5Pset"); + + /* Check that the lists are not equal */ + ret = H5Pequal(lid1, lid2); + VERIFY(ret, 0, "H5Pequal"); + + /* Set property in first list back to default */ + ret = H5Pset(lid1, PROP1_NAME, PROP1_DEF_VALUE); + CHECK_I(ret, "H5Pset"); + + /* Check that the lists are still equal */ + ret = H5Pequal(lid1, lid2); + VERIFY(ret, 1, "H5Pequal"); + + /* Insert first temporary property into first list (with no callbacks) */ + ret = H5Pinsert2(lid1, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pinsert2"); + + /* Check that the lists are not equal */ + ret = H5Pequal(lid1, lid2); + VERIFY(ret, 0, "H5Pequal"); + + /* Insert first temporary property into second list (with no callbacks) */ + ret = H5Pinsert2(lid2, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pinsert2"); + + /* Check that the lists are equal */ + ret = H5Pequal(lid1, lid2); + VERIFY(ret, 1, "H5Pequal"); + + /* Insert second temporary property into second list (with no callbacks) */ + ret = H5Pinsert2(lid2, PROP4_NAME, PROP4_SIZE, PROP4_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pinsert2"); + + /* Check that the lists are not equal */ + ret = H5Pequal(lid1, lid2); + VERIFY(ret, 0, "H5Pequal"); + + /* Insert second temporary property into first list (with no callbacks) */ + ret = H5Pinsert2(lid1, PROP4_NAME, PROP4_SIZE, PROP4_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pinsert2"); + + /* Check that the lists are equal */ + ret = H5Pequal(lid1, lid2); + VERIFY(ret, 1, "H5Pequal"); + + /* Remove first temporary property from first list */ + ret = H5Premove(lid1, PROP3_NAME); + CHECK_I(ret, "H5Premove"); + + /* Check that the lists are not equal */ + ret = H5Pequal(lid1, lid2); + VERIFY(ret, 0, "H5Pequal"); + + /* Remove second temporary property from second list */ + ret = H5Premove(lid2, PROP4_NAME); + CHECK_I(ret, "H5Premove"); + + /* Check that the lists are not equal */ + ret = H5Pequal(lid1, lid2); + VERIFY(ret, 0, "H5Pequal"); + + /* Remove first temporary property from second list */ + ret = H5Premove(lid2, PROP3_NAME); + CHECK_I(ret, "H5Premove"); + + /* Check that the lists are not equal */ + ret = H5Pequal(lid1, lid2); + VERIFY(ret, 0, "H5Pequal"); + + /* Remove first permanent property from first list */ + ret = H5Premove(lid1, PROP1_NAME); + CHECK_I(ret, "H5Premove"); + + /* Check that the lists are not equal */ + ret = H5Pequal(lid1, lid2); + VERIFY(ret, 0, "H5Pequal"); + + /* Remove second temporary property from first list */ + ret = H5Premove(lid1, PROP4_NAME); + CHECK_I(ret, "H5Premove"); + + /* Check that the lists are not equal */ + ret = H5Pequal(lid1, lid2); + VERIFY(ret, 0, "H5Pequal"); + + /* Remove first permanent property from second list */ + ret = H5Premove(lid2, PROP1_NAME); + CHECK_I(ret, "H5Premove"); + + /* Check that the lists are equal */ + ret = H5Pequal(lid1, lid2); + VERIFY(ret, 1, "H5Pequal"); + + /* Close property lists */ + ret = H5Pclose(lid1); + CHECK_I(ret, "H5Pclose"); + ret = H5Pclose(lid2); + CHECK_I(ret, "H5Pclose"); + + /* Close class */ + ret = H5Pclose_class(cid1); + CHECK_I(ret, "H5Pclose_class"); +} /* ent test_genprop_equal() */ + +/**************************************************************** +** +** test_genprop_path(): Test basic generic property list code. +** Tests for class paths +** +****************************************************************/ +static void +test_genprop_path(void) +{ + hid_t cid1; /* Generic Property class ID */ + hid_t cid2; /* Generic Property class ID */ +#if 0 + hid_t cid3; /* Generic Property class ID */ + char *path; /* Class path */ +#endif + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Generic Property List Class Path Functionality\n")); + + /* Create a new generic class, derived from the root of the class hierarchy */ + cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(cid1, "H5Pcreate_class"); + + /* Insert first property into class (with no callbacks) */ + ret = + H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister2"); +#if 0 + /* Get full path for first class */ + path = H5P__get_class_path_test(cid1); + CHECK_PTR(path, "H5P__get_class_path_test"); + if (HDstrcmp(path, CLASS1_PATH) != 0) + TestErrPrintf("Class names don't match!, path=%s, CLASS1_PATH=%s\n", path, CLASS1_PATH); + H5free_memory(path); +#endif + /* Create another new generic class, derived from first class */ + cid2 = H5Pcreate_class(cid1, CLASS2_NAME, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(cid2, "H5Pcreate_class"); + + /* Insert second property into class (with no callbacks) */ + ret = + H5Pregister2(cid2, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister2"); +#if 0 + /* Get full path for second class */ + path = H5P__get_class_path_test(cid2); + CHECK_PTR(path, "H5P__get_class_path_test"); + if (HDstrcmp(path, CLASS2_PATH) != 0) + TestErrPrintf("Class names don't match!, path=%s, CLASS2_PATH=%s\n", path, CLASS2_PATH); + + /* Open a copy of the class with the path name */ + cid3 = H5P__open_class_path_test(path); + CHECK_I(cid3, "H5P__open_class_path_test"); + + /* Check that the classes are equal */ + ret = H5Pequal(cid2, cid3); + VERIFY(ret, 1, "H5Pequal"); + + /* Release the path string */ + H5free_memory(path); + + /* Close class */ + ret = H5Pclose_class(cid3); + CHECK_I(ret, "H5Pclose_class"); +#endif + /* Close class */ + ret = H5Pclose_class(cid1); + CHECK_I(ret, "H5Pclose_class"); + + /* Close class */ + ret = H5Pclose_class(cid2); + CHECK_I(ret, "H5Pclose_class"); + +} /* ent test_genprop_path() */ + +/**************************************************************** +** +** test_genprop_refcount(): Test basic generic property list code. +** Tests for correct reference counting +** +****************************************************************/ +static void +test_genprop_refcount(void) +{ + hid_t cid1; /* Generic Property class ID */ + hid_t lid1; /* Generic Property class ID */ + char *name; /* Name of class */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Generic Property List Reference Count Functionality\n")); + + /* Create a new generic class, derived from the root of the class hierarchy */ + cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(cid1, "H5Pcreate_class"); + + /* Insert first property into class (with no callbacks) */ + ret = + H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister2"); + + /* Create a new generic list, derived from the root of the class hierarchy */ + lid1 = H5Pcreate(cid1); + CHECK_I(lid1, "H5Pcreate"); + + /* Check class name */ + name = H5Pget_class_name(cid1); + CHECK_PTR(name, "H5Pget_class_name"); + if (HDstrcmp(name, CLASS1_NAME) != 0) + TestErrPrintf("Class names don't match!, name=%s, CLASS1_NAME=%s\n", name, CLASS1_NAME); + H5free_memory(name); + + /* Close class */ + ret = H5Pclose_class(cid1); + CHECK_I(ret, "H5Pclose_class"); + + /* Get the list's class */ + cid1 = H5Pget_class(lid1); + CHECK_I(cid1, "H5Pget_class"); + + /* Check correct "is a" class/list relationship */ + ret = H5Pisa_class(lid1, cid1); + VERIFY(ret, 1, "H5Pisa_class"); + + /* Check class name */ + name = H5Pget_class_name(cid1); + CHECK_PTR(name, "H5Pget_class_name"); + if (HDstrcmp(name, CLASS1_NAME) != 0) + TestErrPrintf("Class names don't match!, name=%s, CLASS1_NAME=%s\n", name, CLASS1_NAME); + H5free_memory(name); + + /* Close list */ + ret = H5Pclose(lid1); + CHECK_I(ret, "H5Pclose"); + + /* Check class name */ + name = H5Pget_class_name(cid1); + CHECK_PTR(name, "H5Pget_class_name"); + if (HDstrcmp(name, CLASS1_NAME) != 0) + TestErrPrintf("Class names don't match!, name=%s, CLASS1_NAME=%s\n", name, CLASS1_NAME); + H5free_memory(name); + + /* Close class */ + ret = H5Pclose_class(cid1); + CHECK_I(ret, "H5Pclose_class"); + +} /* ent test_genprop_refcount() */ + +#ifndef H5_NO_DEPRECATED_SYMBOLS +/**************************************************************** +** +** test_genprop_deprec_class(): Test basic generic property list code. +** Tests deprecated property class API routines. +** +****************************************************************/ +static void +test_genprop_deprec_class(void) +{ + hid_t cid1; /* Generic Property class ID */ + size_t size; /* Size of property */ + size_t nprops; /* Number of properties in class */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Deprecated Generic Property List Functions\n")); + + /* Create a new generic class, derived from the root of the class hierarchy */ + cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(cid1, "H5Pcreate_class"); + + /* Check the number of properties in class */ + ret = H5Pget_nprops(cid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 0, "H5Pget_nprops"); + + /* Check the existence of the first property (should fail) */ + ret = H5Pexist(cid1, PROP1_NAME); + VERIFY(ret, 0, "H5Pexist"); + + /* Insert first property into class (with no callbacks) */ + ret = H5Pregister1(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister1"); + + /* Try to insert the first property again (should fail) */ + H5E_BEGIN_TRY + { + ret = H5Pregister1(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Pregister1"); + + /* Check the existence of the first property */ + ret = H5Pexist(cid1, PROP1_NAME); + VERIFY(ret, 1, "H5Pexist"); + + /* Check the size of the first property */ + ret = H5Pget_size(cid1, PROP1_NAME, &size); + CHECK_I(ret, "H5Pget_size"); + VERIFY(size, PROP1_SIZE, "H5Pget_size"); + + /* Check the number of properties in class */ + ret = H5Pget_nprops(cid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 1, "H5Pget_nprops"); + + /* Insert second property into class (with no callbacks) */ + ret = H5Pregister1(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister1"); + + /* Try to insert the second property again (should fail) */ + H5E_BEGIN_TRY + { + ret = H5Pregister1(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Pregister1"); + + /* Check the existence of the second property */ + ret = H5Pexist(cid1, PROP2_NAME); + VERIFY(ret, 1, "H5Pexist"); + + /* Check the size of the second property */ + ret = H5Pget_size(cid1, PROP2_NAME, &size); + CHECK_I(ret, "H5Pget_size"); + VERIFY(size, PROP2_SIZE, "H5Pget_size"); + + /* Check the number of properties in class */ + ret = H5Pget_nprops(cid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 2, "H5Pget_nprops"); + + /* Insert third property into class (with no callbacks) */ + ret = H5Pregister1(cid1, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pregister1"); + + /* Check the existence of the third property */ + ret = H5Pexist(cid1, PROP3_NAME); + VERIFY(ret, 1, "H5Pexist"); + + /* Check the size of the third property */ + ret = H5Pget_size(cid1, PROP3_NAME, &size); + CHECK_I(ret, "H5Pget_size"); + VERIFY(size, PROP3_SIZE, "H5Pget_size"); + + /* Check the number of properties in class */ + ret = H5Pget_nprops(cid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 3, "H5Pget_nprops"); + + /* Unregister first property */ + ret = H5Punregister(cid1, PROP1_NAME); + CHECK_I(ret, "H5Punregister"); + + /* Try to check the size of the first property (should fail) */ + H5E_BEGIN_TRY + { + ret = H5Pget_size(cid1, PROP1_NAME, &size); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Pget_size"); + + /* Check the number of properties in class */ + ret = H5Pget_nprops(cid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 2, "H5Pget_nprops"); + + /* Unregister second property */ + ret = H5Punregister(cid1, PROP2_NAME); + CHECK_I(ret, "H5Punregister"); + + /* Check the number of properties in class */ + ret = H5Pget_nprops(cid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 1, "H5Pget_nprops"); + + /* Unregister third property */ + ret = H5Punregister(cid1, PROP3_NAME); + CHECK_I(ret, "H5Punregister"); + + /* Check the number of properties in class */ + ret = H5Pget_nprops(cid1, &nprops); + CHECK_I(ret, "H5Pget_nprops"); + VERIFY(nprops, 0, "H5Pget_nprops"); + + /* Close class */ + ret = H5Pclose_class(cid1); + CHECK_I(ret, "H5Pclose_class"); +} /* end test_genprop_deprec_class() */ + +/**************************************************************** +** +** test_genprop_deprec2(): Test basic generic property list code. +** Tests deprecated property list API routines. +** +****************************************************************/ +static void +test_genprop_deprec_list(void) +{ + hid_t fid; /* File ID */ + hid_t did; /* Dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t pid; /* Property List ID */ + int prop1_value; /* Value for property #1 */ + herr_t ret; /* Generic return value */ + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create scalar dataspace for dataset */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create a dataset creation property list */ + pid = H5Pcreate(H5P_DATASET_CREATE); + CHECK(pid, FAIL, "H5Pcreate"); + + /* Insert temporary property into class (with no callbacks) */ + ret = H5Pinsert1(pid, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL); + CHECK_I(ret, "H5Pinsert1"); + + /* Check existence of added property */ + ret = H5Pexist(pid, PROP1_NAME); + VERIFY(ret, 1, "H5Pexist"); + + /* Check values of property (set with default value) */ + ret = H5Pget(pid, PROP1_NAME, &prop1_value); + CHECK_I(ret, "H5Pget"); + VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget"); + + /* Create a dataset */ + did = H5Dcreate2(fid, "Dataset1", H5T_NATIVE_INT, sid, H5P_DEFAULT, pid, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + /* Check existence of added property (after using property list) */ + ret = H5Pexist(pid, PROP1_NAME); + VERIFY(ret, 1, "H5Pexist"); + + /* Check values of property (set with default value) (after using property list) */ + ret = H5Pget(pid, PROP1_NAME, &prop1_value); + CHECK_I(ret, "H5Pget"); + VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget"); + + /* Close property list */ + ret = H5Pclose(pid); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_genprop_deprec_list() */ +#endif /* H5_NO_DEPRECATED_SYMBOLS */ + +/**************************************************************** +** +** test_genprop(): Main generic property testing routine. +** +****************************************************************/ +void +test_genprop(void) +{ + /* Output message about test being performed */ + MESSAGE(5, ("Testing Generic Properties\n")); + + /* These tests use the same file... */ + test_genprop_basic_class(); /* Test basic code for creating a generic class */ + test_genprop_basic_class_prop(); /* Test basic code for adding properties to a generic class */ + test_genprop_class_iter(); /* Test code for iterating over properties in a generic class */ + test_genprop_class_callback(); /* Test code for property class callbacks */ + + test_genprop_basic_list(); /* Test basic code for creating a generic property list */ + test_genprop_basic_list_prop(); /* Test basic code for adding properties to a generic property list */ + test_genprop_list_iter(); /* Test basic code for iterating over properties in a generic property list */ + test_genprop_list_callback(); /* Test code for property list callbacks */ + + test_genprop_list_addprop(); /* Test adding properties to HDF5 property list */ + test_genprop_class_addprop(); /* Test adding properties to HDF5 property class */ + + test_genprop_list_add_remove_prop(); /* Test adding and removing the same property several times to HDF5 + property list */ + + test_genprop_equal(); /* Tests for more H5Pequal verification */ + test_genprop_path(); /* Tests for class path verification */ + test_genprop_refcount(); /* Tests for class reference counting */ + +#ifndef H5_NO_DEPRECATED_SYMBOLS + test_genprop_deprec_class(); /* Tests for deprecated routines */ + test_genprop_deprec_list(); /* Tests for deprecated routines */ +#endif /* H5_NO_DEPRECATED_SYMBOLS */ + +} /* test_genprop() */ + +/*------------------------------------------------------------------------- + * Function: cleanup_genprop + * + * Purpose: Cleanup temporary test files + * + * Return: none + * + * Programmer: Quincey Koziol + * June 8, 1999 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +void +cleanup_genprop(void) +{ + H5Fdelete(FILENAME, H5P_DEFAULT); +} diff --git a/test/API/th5o.c b/test/API/th5o.c new file mode 100644 index 00000000000..916f005e9e4 --- /dev/null +++ b/test/API/th5o.c @@ -0,0 +1,1889 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*********************************************************** + * + * Test program: th5o + * + * Test public H5O functions for accessing + * + *************************************************************/ + +#include "testhdf5.h" + +#if 0 +#include "H5Fprivate.h" +#include "H5VLprivate.h" +#include "H5VLnative_private.h" +#endif + +#define TEST_FILENAME "th5o_file.h5" + +#define RANK 2 +#define DIM0 5 +#define DIM1 10 + +#define TEST6_DIM1 100 +#define TEST6_DIM2 100 + +/**************************************************************** +** +** test_h5o_open(): Test H5Oopen function. +** +****************************************************************/ +static void +test_h5o_open(void) +{ + hid_t fid; /* HDF5 File ID */ + hid_t grp, dset, dtype, dspace; /* Object identifiers */ + char filename[1024]; + hsize_t dims[RANK]; + H5I_type_t id_type; /* Type of IDs returned from H5Oopen */ + H5G_info_t ginfo; /* Group info struct */ + H5T_class_t type_class; /* Class of the datatype */ + herr_t ret; /* Value returned from API calls */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing H5Oopen\n")); + + h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); + + /* Create a new HDF5 file */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create a group, dataset, and committed datatype within the file */ + /* Create the group */ + grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(grp, FAIL, "H5Gcreate2"); + ret = H5Gclose(grp); + CHECK(ret, FAIL, "H5Gclose"); + + /* Commit the type inside the group */ + dtype = H5Tcopy(H5T_NATIVE_INT); + CHECK(dtype, FAIL, "H5Tcopy"); + ret = H5Tcommit2(fid, "group/datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + ret = H5Tclose(dtype); + CHECK(ret, FAIL, "H5Tclose"); + + /* Create the data space for the dataset. */ + dims[0] = DIM0; + dims[1] = DIM1; + dspace = H5Screate_simple(RANK, dims, NULL); + CHECK(dspace, FAIL, "H5Screate_simple"); + + /* Create the dataset. */ + dset = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset, FAIL, "H5Dcreate2"); + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Sclose(dspace); + CHECK(ret, FAIL, "H5Sclose"); + + /* Now make sure that H5Oopen can open all three types of objects */ + grp = H5Oopen(fid, "group", H5P_DEFAULT); + CHECK(grp, FAIL, "H5Oopen"); + dtype = H5Oopen(fid, "group/datatype", H5P_DEFAULT); + CHECK(dtype, FAIL, "H5Oopen"); + /* Check that we can use the group as a valid location */ + dset = H5Oopen(grp, "/dataset", H5P_DEFAULT); + CHECK(dset, FAIL, "H5Oopen"); + + /* Make sure that each is the right kind of ID */ + id_type = H5Iget_type(grp); + VERIFY(id_type, H5I_GROUP, "H5Iget_type for group ID"); + id_type = H5Iget_type(dtype); + VERIFY(id_type, H5I_DATATYPE, "H5Iget_type for datatype ID"); + id_type = H5Iget_type(dset); + VERIFY(id_type, H5I_DATASET, "H5Iget_type for dataset ID"); + + /* Do something more complex with each of the IDs to make sure they "work" */ + ret = H5Gget_info(grp, &ginfo); + CHECK(ret, FAIL, "H5Gget_info"); + VERIFY(ginfo.nlinks, 1, "H5Gget_info"); /* There should be one object, the datatype */ + + type_class = H5Tget_class(dtype); + VERIFY(type_class, H5T_INTEGER, "H5Tget_class"); + + dspace = H5Dget_space(dset); + CHECK(dspace, FAIL, "H5Dget_space"); + + /* Close the IDs */ + ret = H5Sclose(dspace); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Gclose(grp); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Tclose(dtype); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Trying to open objects with bogus names should fail gracefully */ + H5E_BEGIN_TRY + { + grp = H5Oopen(fid, "bogus_group", H5P_DEFAULT); + VERIFY(grp, FAIL, "H5Oopen"); + dtype = H5Oopen(fid, "group/bogus_datatype", H5P_DEFAULT); + VERIFY(dtype, FAIL, "H5Oopen"); + dset = H5Oopen(fid, "/bogus_dataset", H5P_DEFAULT); + VERIFY(dset, FAIL, "H5Oopen"); + } + H5E_END_TRY + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Trying to open an object with a bogus file ID should fail */ + H5E_BEGIN_TRY + { + dset = H5Oopen(fid, "dataset", H5P_DEFAULT); + VERIFY(dset, FAIL, "H5Oopen"); + } + H5E_END_TRY +} /* test_h5o_open() */ + +/**************************************************************** +** +** test_h5o_close(): Test H5Oclose function. +** +****************************************************************/ +static void +test_h5o_close(void) +{ + hid_t fid; /* HDF5 File ID */ + hid_t grp, dset, dtype, dspace; /* Object identifiers */ + char filename[1024]; + hsize_t dims[RANK]; + herr_t ret; /* Value returned from API calls */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing H5Oclose\n")); + + h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); + + /* Create a new HDF5 file */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create a group, dataset, and committed datatype within the file */ + /* Create the group and close it with H5Oclose */ + grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(grp, FAIL, "H5Gcreate2"); + VERIFY_TYPE(H5Iget_type(grp), H5I_GROUP, H5I_type_t, "%d", "H5Iget_type"); + ret = H5Oclose(grp); + CHECK(ret, FAIL, "H5Oclose"); + + /* Commit the type inside the group */ + dtype = H5Tcopy(H5T_NATIVE_INT); + CHECK(dtype, FAIL, "H5Tcopy"); + ret = H5Tcommit2(fid, "group/datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + ret = H5Oclose(dtype); + CHECK(ret, FAIL, "H5Oclose"); + + /* Create the data space for the dataset. */ + dims[0] = DIM0; + dims[1] = DIM1; + dspace = H5Screate_simple(RANK, dims, NULL); + CHECK(dspace, FAIL, "H5Screate_simple"); + + /* Create the dataset. */ + dset = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset, FAIL, "H5Dcreate2"); + ret = H5Oclose(dset); + CHECK(ret, FAIL, "H5Oclose"); + + /* Attempting to close the data space with H5Oclose should fail */ + H5E_BEGIN_TRY + { + ret = H5Oclose(dspace); + VERIFY(ret, FAIL, "H5Oclose"); + } + H5E_END_TRY + /* Close the dataspace for real */ + ret = H5Sclose(dspace); + CHECK(ret, FAIL, "H5Sclose"); + + /* Make sure that H5Oclose can close objects opened with H5Oopen */ + grp = H5Oopen(fid, "group", H5P_DEFAULT); + CHECK(grp, FAIL, "H5Oopen"); + dtype = H5Oopen(fid, "group/datatype", H5P_DEFAULT); + CHECK(dtype, FAIL, "H5Oopen"); + dset = H5Oopen(fid, "dataset", H5P_DEFAULT); + CHECK(dset, FAIL, "H5Oopen"); + + ret = H5Oclose(grp); + CHECK(ret, FAIL, "H5Oclose"); + ret = H5Oclose(dtype); + CHECK(ret, FAIL, "H5Oclose"); + ret = H5Oclose(dset); + CHECK(ret, FAIL, "H5Oclose"); + + /* Make sure H5Oclose can close objects opened with H5*open */ + grp = H5Gopen2(fid, "group", H5P_DEFAULT); + CHECK(grp, FAIL, "H5Gopen2"); + dtype = H5Topen2(fid, "group/datatype", H5P_DEFAULT); + CHECK(dtype, FAIL, "H5Topen2"); + dset = H5Dopen2(fid, "dataset", H5P_DEFAULT); + CHECK(dset, FAIL, "H5Dopen2"); + + ret = H5Oclose(grp); + CHECK(ret, FAIL, "H5Oclose"); + ret = H5Oclose(dtype); + CHECK(ret, FAIL, "H5Oclose"); + ret = H5Oclose(dset); + CHECK(ret, FAIL, "H5Oclose"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +} + +#if 0 +#ifndef H5_NO_DEPRECATED_SYMBOLS +/**************************************************************** +** +** test_h5o_open_by_addr(): Test H5Oopen_by_addr function. +** +****************************************************************/ +static void +test_h5o_open_by_addr(void) +{ + hid_t fid; /* HDF5 File ID */ + hid_t grp, dset, dtype, dspace; /* Object identifiers */ + char filename[1024]; + H5L_info2_t li; /* Buffer for H5Lget_info2 */ + haddr_t grp_addr; /* Addresses for objects */ + haddr_t dset_addr; + haddr_t dtype_addr; + hsize_t dims[RANK]; + H5I_type_t id_type; /* Type of IDs returned from H5Oopen */ + H5G_info_t ginfo; /* Group info struct */ + H5T_class_t type_class; /* Class of the datatype */ + herr_t ret; /* Value returned from API calls */ + + h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); + + /* Create a new HDF5 file */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create a group, dataset, and committed datatype within the file */ + /* Create the group */ + grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(grp, FAIL, "H5Gcreate2"); + ret = H5Gclose(grp); + CHECK(ret, FAIL, "H5Gclose"); + + /* Commit the type inside the group */ + dtype = H5Tcopy(H5T_NATIVE_INT); + CHECK(dtype, FAIL, "H5Tcopy"); + ret = H5Tcommit2(fid, "group/datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + ret = H5Tclose(dtype); + CHECK(ret, FAIL, "H5Tclose"); + + /* Create the data space for the dataset. */ + dims[0] = DIM0; + dims[1] = DIM1; + dspace = H5Screate_simple(RANK, dims, NULL); + CHECK(dspace, FAIL, "H5Screate_simple"); + + /* Create the dataset. */ + dset = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset, FAIL, "H5Dcreate2"); + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Sclose(dspace); + CHECK(ret, FAIL, "H5Sclose"); + + /* Get address for each object */ + ret = H5Lget_info2(fid, "group", &li, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lget_info2"); + ret = H5VLnative_token_to_addr(fid, li.u.token, &grp_addr); + CHECK(ret, FAIL, "H5VLnative_token_to_addr"); + + ret = H5Lget_info2(fid, "group/datatype", &li, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lget_info2"); + ret = H5VLnative_token_to_addr(fid, li.u.token, &dtype_addr); + CHECK(ret, FAIL, "H5VLnative_token_to_addr"); + + ret = H5Lget_info2(fid, "dataset", &li, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lget_info2"); + ret = H5VLnative_token_to_addr(fid, li.u.token, &dset_addr); + CHECK(ret, FAIL, "H5VLnative_token_to_addr"); + + /* Now make sure that H5Oopen_by_addr can open all three types of objects */ + grp = H5Oopen_by_addr(fid, grp_addr); + CHECK(grp, FAIL, "H5Oopen_by_addr"); + dtype = H5Oopen_by_addr(fid, dtype_addr); + CHECK(dtype, FAIL, "H5Oopen_by_addr"); + /* Check that we can use the group ID as a valid location */ + dset = H5Oopen_by_addr(grp, dset_addr); + CHECK(dset, FAIL, "H5Oopen_by_addr"); + + /* Make sure that each is the right kind of ID */ + id_type = H5Iget_type(grp); + VERIFY(id_type, H5I_GROUP, "H5Iget_type for group ID"); + id_type = H5Iget_type(dtype); + VERIFY(id_type, H5I_DATATYPE, "H5Iget_type for datatype ID"); + id_type = H5Iget_type(dset); + VERIFY(id_type, H5I_DATASET, "H5Iget_type for dataset ID"); + + /* Do something more complex with each of the IDs to make sure they "work" */ + ret = H5Gget_info(grp, &ginfo); + CHECK(ret, FAIL, "H5Gget_info"); + VERIFY(ginfo.nlinks, 1, "H5Gget_info"); /* There should be one object, the datatype */ + + type_class = H5Tget_class(dtype); + VERIFY(type_class, H5T_INTEGER, "H5Tget_class"); + + dspace = H5Dget_space(dset); + CHECK(dspace, FAIL, "H5Dget_space"); + + /* Close the IDs */ + ret = H5Sclose(dspace); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Gclose(grp); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Tclose(dtype); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Try giving some bogus values to H5O_open_by_addr. */ + /* Try to open an object with a bad address */ + grp_addr += 20; + H5E_BEGIN_TRY + { + grp = H5Oopen_by_addr(fid, grp_addr); + } + H5E_END_TRY + VERIFY(grp, FAIL, "H5Oopen_by_addr"); + + /* For instance, an objectno smaller than the end of the file's superblock should + * trigger an error */ + grp_addr = 10; + H5E_BEGIN_TRY + { + grp = H5Oopen_by_addr(fid, grp_addr); + } + H5E_END_TRY + VERIFY(grp, FAIL, "H5Oopen_by_addr"); + + /* Likewise, an objectno larger than the size of the file should fail */ + grp_addr = 0; + grp_addr = 1000000000; + H5E_BEGIN_TRY + { + grp = H5Oopen_by_addr(fid, grp_addr); + } + H5E_END_TRY + VERIFY(grp, FAIL, "H5Oopen_by_addr"); + + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Also, trying to open an object without a valid location should fail */ + H5E_BEGIN_TRY + { + dtype = H5Oopen_by_addr(fid, dtype_addr); + } + H5E_END_TRY + VERIFY(dtype, FAIL, "H5Oopen_by_addr"); +} /* test_h5o_open_by_addr() */ +#endif /* H5_NO_DEPRECATED_SYMBOLS */ +#endif + +/**************************************************************** +** +** test_h5o_open_by_token(): Test H5Oopen_by_token function. +** +****************************************************************/ +static void +test_h5o_open_by_token(void) +{ + hid_t fid; /* HDF5 File ID */ + hid_t grp, dset, dtype, dspace; /* Object identifiers */ + char filename[1024]; + H5L_info2_t li; /* Buffer for H5Lget_info */ + hsize_t dims[RANK]; + H5I_type_t id_type; /* Type of IDs returned from H5Oopen */ + H5G_info_t ginfo; /* Group info struct */ + H5T_class_t type_class; /* Class of the datatype */ + herr_t ret; /* Value returned from API calls */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing H5Oopen_by_token\n")); + + h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); + + /* Create a new HDF5 file */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create a group, dataset, and committed datatype within the file */ + /* Create the group */ + grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(grp, FAIL, "H5Gcreate2"); + ret = H5Gclose(grp); + CHECK(ret, FAIL, "H5Gclose"); + + /* Commit the type inside the group */ + dtype = H5Tcopy(H5T_NATIVE_INT); + CHECK(dtype, FAIL, "H5Tcopy"); + ret = H5Tcommit2(fid, "group/datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + ret = H5Tclose(dtype); + CHECK(ret, FAIL, "H5Tclose"); + + /* Create the data space for the dataset. */ + dims[0] = DIM0; + dims[1] = DIM1; + dspace = H5Screate_simple(RANK, dims, NULL); + CHECK(dspace, FAIL, "H5Screate_simple"); + + /* Create the dataset. */ + dset = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset, FAIL, "H5Dcreate2"); + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Sclose(dspace); + CHECK(ret, FAIL, "H5Sclose"); + + /* Make sure that H5Oopen_by_token can open all three types of objects */ + ret = H5Lget_info2(fid, "group", &li, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lget_info"); + grp = H5Oopen_by_token(fid, li.u.token); + CHECK(grp, FAIL, "H5Oopen_by_token"); + + ret = H5Lget_info2(fid, "group/datatype", &li, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lget_info"); + dtype = H5Oopen_by_token(fid, li.u.token); + CHECK(dtype, FAIL, "H5Oopen_by_token"); + + ret = H5Lget_info2(fid, "dataset", &li, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lget_info"); + /* Check that we can use the group ID as a valid location */ + dset = H5Oopen_by_token(grp, li.u.token); + CHECK(dset, FAIL, "H5Oopen_by_token"); + + /* Make sure that each is the right kind of ID */ + id_type = H5Iget_type(grp); + VERIFY(id_type, H5I_GROUP, "H5Iget_type for group ID"); + id_type = H5Iget_type(dtype); + VERIFY(id_type, H5I_DATATYPE, "H5Iget_type for datatype ID"); + id_type = H5Iget_type(dset); + VERIFY(id_type, H5I_DATASET, "H5Iget_type for dataset ID"); + + /* Do something more complex with each of the IDs to make sure they "work" */ + ret = H5Gget_info(grp, &ginfo); + CHECK(ret, FAIL, "H5Gget_info"); + VERIFY(ginfo.nlinks, 1, "H5Gget_info"); /* There should be one object, the datatype */ + + type_class = H5Tget_class(dtype); + VERIFY(type_class, H5T_INTEGER, "H5Tget_class"); + + dspace = H5Dget_space(dset); + CHECK(dspace, FAIL, "H5Dget_space"); + + /* Close the IDs */ + ret = H5Sclose(dspace); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Gclose(grp); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Tclose(dtype); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Try giving some bogus values to H5O_open_by_token */ + /* Try opening an object using H5O_TOKEN_UNDEF (should fail) */ + H5E_BEGIN_TRY + { + dtype = H5Oopen_by_token(fid, H5O_TOKEN_UNDEF); + } + H5E_END_TRY + VERIFY(dtype, FAIL, "H5Oopen_by_token"); + + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Also, trying to open an object without a valid location (should fail) */ + H5E_BEGIN_TRY + { + dtype = H5Oopen_by_token(fid, li.u.token); + } + H5E_END_TRY + VERIFY(dtype, FAIL, "H5Oopen_by_token"); + +} /* test_h5o_open_by_token() */ + +/**************************************************************** +** +** test_h5o_refcount(): Test H5O refcounting functions. +** +****************************************************************/ +static void +test_h5o_refcount(void) +{ + hid_t fid; /* HDF5 File ID */ + hid_t grp, dset, dtype, dspace; /* Object identifiers */ + char filename[1024]; + H5O_info2_t oinfo; /* Object info struct */ + hsize_t dims[RANK]; + herr_t ret; /* Value returned from API calls */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing retrieval of object reference count with H5Oget_info\n")); + + h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); + + /* Create a new HDF5 file */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create a group, dataset, and committed datatype within the file */ + /* Create the group */ + grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(grp, FAIL, "H5Gcreate2"); + + /* Commit the type inside the group */ + dtype = H5Tcopy(H5T_NATIVE_INT); + CHECK(dtype, FAIL, "H5Tcopy"); + ret = H5Tcommit2(fid, "datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + /* Create the data space for the dataset. */ + dims[0] = DIM0; + dims[1] = DIM1; + dspace = H5Screate_simple(RANK, dims, NULL); + CHECK(dspace, FAIL, "H5Screate_simple"); + + /* Create the dataset. */ + dset = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset, FAIL, "H5Dcreate2"); + ret = H5Sclose(dspace); + CHECK(ret, FAIL, "H5Sclose"); + + /* Get ref counts for each object. They should all be 1, since each object has a hard link. */ + ret = H5Oget_info_by_name3(fid, "group", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3"); + ret = H5Oget_info_by_name3(fid, "datatype", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3"); + ret = H5Oget_info_by_name3(fid, "dataset", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3"); + + /* Increment each object's reference count. */ + ret = H5Oincr_refcount(grp); + CHECK(ret, FAIL, "H5Oincr_refcount"); + ret = H5Oincr_refcount(dtype); + CHECK(ret, FAIL, "H5Oincr_refcount"); + ret = H5Oincr_refcount(dset); + CHECK(ret, FAIL, "H5Oincr_refcount"); + + /* Get ref counts for each object. They should all be 2 now. */ + ret = H5Oget_info_by_name3(fid, "group", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + VERIFY(oinfo.rc, 2, "reference count in H5Oget_info_by_name3"); + ret = H5Oget_info_by_name3(fid, "datatype", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + VERIFY(oinfo.rc, 2, "reference count in H5Oget_info_by_name3"); + ret = H5Oget_info_by_name3(fid, "dataset", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + VERIFY(oinfo.rc, 2, "reference count in H5Oget_info_by_name3"); + + /* Decrement the reference counts and check that they decrease back to 1. */ + ret = H5Odecr_refcount(grp); + CHECK(ret, FAIL, "H5Odecr_refcount"); + ret = H5Odecr_refcount(dtype); + CHECK(ret, FAIL, "H5Odecr_refcount"); + ret = H5Odecr_refcount(dset); + CHECK(ret, FAIL, "H5Odecr_refcount"); + + ret = H5Oget_info_by_name3(fid, "group", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3"); + ret = H5Oget_info_by_name3(fid, "datatype", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3"); + ret = H5Oget_info_by_name3(fid, "dataset", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3"); + + /* Increment the reference counts and then close the file to make sure the increment is permanent */ + ret = H5Oincr_refcount(grp); + CHECK(ret, FAIL, "H5Oincr_refcount"); + ret = H5Oincr_refcount(dtype); + CHECK(ret, FAIL, "H5Oincr_refcount"); + ret = H5Oincr_refcount(dset); + CHECK(ret, FAIL, "H5Oincr_refcount"); + + ret = H5Gclose(grp); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Tclose(dtype); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open the file and check that the reference counts were really incremented */ + fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + grp = H5Gopen2(fid, "group", H5P_DEFAULT); + CHECK(grp, FAIL, "H5Gopen2"); + dtype = H5Topen2(fid, "datatype", H5P_DEFAULT); + CHECK(dtype, FAIL, "H5Topen2"); + dset = H5Dopen2(fid, "dataset", H5P_DEFAULT); + CHECK(dset, FAIL, "H5Dopen2"); + + ret = H5Oget_info_by_name3(fid, "group", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + VERIFY(oinfo.rc, 2, "reference count in H5Oget_info_by_name3"); + ret = H5Oget_info_by_name3(fid, "datatype", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + VERIFY(oinfo.rc, 2, "reference count in H5Oget_info_by_name3"); + ret = H5Oget_info_by_name3(fid, "dataset", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + VERIFY(oinfo.rc, 2, "reference count in H5Oget_info_by_name3"); + + /* Decrement the reference counts and close the file */ + ret = H5Odecr_refcount(grp); + CHECK(ret, FAIL, "H5Odecr_refcount"); + ret = H5Odecr_refcount(dtype); + CHECK(ret, FAIL, "H5Odecr_refcount"); + ret = H5Odecr_refcount(dset); + CHECK(ret, FAIL, "H5Odecr_refcount"); + + ret = H5Gclose(grp); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Tclose(dtype); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open the file and check that the reference counts were really decremented */ + fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + grp = H5Gopen2(fid, "group", H5P_DEFAULT); + CHECK(grp, FAIL, "H5Gopen2"); + dtype = H5Topen2(fid, "datatype", H5P_DEFAULT); + CHECK(dtype, FAIL, "H5Topen2"); + dset = H5Dopen2(fid, "dataset", H5P_DEFAULT); + CHECK(dset, FAIL, "H5Dopen2"); + + ret = H5Oget_info_by_name3(fid, "group", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3"); + ret = H5Oget_info_by_name3(fid, "datatype", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3"); + ret = H5Oget_info_by_name3(fid, "dataset", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3"); + + /* Close the IDs */ + ret = H5Gclose(grp); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Tclose(dtype); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Make sure that bogus IDs return errors properly */ + H5E_BEGIN_TRY + { + ret = H5Oincr_refcount(grp); + VERIFY(ret, FAIL, "H5Oincr_refcount"); + ret = H5Oincr_refcount(dtype); + VERIFY(ret, FAIL, "H5Oincr_refcount"); + ret = H5Oincr_refcount(dset); + VERIFY(ret, FAIL, "H5Oincr_refcount"); + ret = H5Odecr_refcount(grp); + VERIFY(ret, FAIL, "H5Odecr_refcount"); + ret = H5Odecr_refcount(dtype); + VERIFY(ret, FAIL, "H5Odecr_refcount"); + ret = H5Odecr_refcount(dset); + VERIFY(ret, FAIL, "H5Odecr_refcount"); + } + H5E_END_TRY + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_h5o_refcount() */ + +/**************************************************************** +** +** test_h5o_plist(): Test object creation properties +** +****************************************************************/ +static void +test_h5o_plist(void) +{ + hid_t fid; /* HDF5 File ID */ + hid_t grp, dset, dtype, dspace; /* Object identifiers */ + hid_t fapl; /* File access property list */ + hid_t gcpl, dcpl, tcpl; /* Object creation properties */ + char filename[1024]; + unsigned def_max_compact, def_min_dense; /* Default phase change parameters */ + unsigned max_compact, min_dense; /* Actual phase change parameters */ + herr_t ret; /* Value returned from API calls */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Object creation properties\n")); + + /* Make a FAPL that uses the "use the latest version of the format" flag */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + + /* Set the "use the latest version of the format" bounds for creating objects in the file */ + ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); + CHECK(ret, FAIL, "H5Pset_libver_bounds"); + + h5_fixname(TEST_FILENAME, fapl, filename, sizeof filename); + + /* Create a new HDF5 file */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create group, dataset & named datatype creation property lists */ + gcpl = H5Pcreate(H5P_GROUP_CREATE); + CHECK(gcpl, FAIL, "H5Pcreate"); + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + tcpl = H5Pcreate(H5P_DATATYPE_CREATE); + CHECK(tcpl, FAIL, "H5Pcreate"); + + /* Retrieve default attribute phase change values */ + ret = H5Pget_attr_phase_change(gcpl, &def_max_compact, &def_min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + + /* Set non-default attribute phase change values on each creation property list */ + ret = H5Pset_attr_phase_change(gcpl, def_max_compact + 1, def_min_dense - 1); + CHECK(ret, FAIL, "H5Pset_attr_phase_change"); + ret = H5Pset_attr_phase_change(dcpl, def_max_compact + 1, def_min_dense - 1); + CHECK(ret, FAIL, "H5Pset_attr_phase_change"); + ret = H5Pset_attr_phase_change(tcpl, def_max_compact + 1, def_min_dense - 1); + CHECK(ret, FAIL, "H5Pset_attr_phase_change"); + + /* Retrieve attribute phase change values on each creation property list and verify */ + ret = H5Pget_attr_phase_change(gcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change"); + VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change"); + ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change"); + VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change"); + ret = H5Pget_attr_phase_change(tcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change"); + VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change"); + + /* Create a group, dataset, and committed datatype within the file, + * using the respective type of creation property lists. + */ + + /* Create the group anonymously and link it in */ + grp = H5Gcreate_anon(fid, gcpl, H5P_DEFAULT); + CHECK(grp, FAIL, "H5Gcreate_anon"); + ret = H5Olink(grp, fid, "group", H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Olink"); + + /* Commit the type inside the group anonymously and link it in */ + dtype = H5Tcopy(H5T_NATIVE_INT); + CHECK(dtype, FAIL, "H5Tcopy"); + ret = H5Tcommit_anon(fid, dtype, tcpl, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit_anon"); + ret = H5Olink(dtype, fid, "datatype", H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Olink"); + + /* Create the dataspace for the dataset. */ + dspace = H5Screate(H5S_SCALAR); + CHECK(dspace, FAIL, "H5Screate"); + + /* Create the dataset anonymously and link it in */ + dset = H5Dcreate_anon(fid, H5T_NATIVE_INT, dspace, dcpl, H5P_DEFAULT); + CHECK(dset, FAIL, "H5Dcreate_anon"); + ret = H5Olink(dset, fid, "dataset", H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Olink"); + ret = H5Sclose(dspace); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close current creation property lists */ + ret = H5Pclose(gcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(tcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Retrieve each object's creation property list */ + gcpl = H5Gget_create_plist(grp); + CHECK(gcpl, FAIL, "H5Gget_create_plist"); + tcpl = H5Tget_create_plist(dtype); + CHECK(tcpl, FAIL, "H5Tget_create_plist"); + dcpl = H5Dget_create_plist(dset); + CHECK(dcpl, FAIL, "H5Dget_create_plist"); + + /* Retrieve attribute phase change values on each creation property list and verify */ + ret = H5Pget_attr_phase_change(gcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change"); + VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change"); + ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change"); + VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change"); + ret = H5Pget_attr_phase_change(tcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change"); + VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change"); + + /* Close current objects */ + ret = H5Pclose(gcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(tcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Gclose(grp); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Tclose(dtype); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open the file and check that the object creation properties persist */ + fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl); + CHECK(fid, FAIL, "H5Fopen"); + + /* Re-open objects */ + grp = H5Gopen2(fid, "group", H5P_DEFAULT); + CHECK(grp, FAIL, "H5Gopen2"); + dtype = H5Topen2(fid, "datatype", H5P_DEFAULT); + CHECK(dtype, FAIL, "H5Topen2"); + dset = H5Dopen2(fid, "dataset", H5P_DEFAULT); + CHECK(dset, FAIL, "H5Dopen2"); + + /* Retrieve each object's creation property list */ + gcpl = H5Gget_create_plist(grp); + CHECK(gcpl, FAIL, "H5Gget_create_plist"); + tcpl = H5Tget_create_plist(dtype); + CHECK(tcpl, FAIL, "H5Tget_create_plist"); + dcpl = H5Dget_create_plist(dset); + CHECK(dcpl, FAIL, "H5Dget_create_plist"); + + /* Retrieve attribute phase change values on each creation property list and verify */ + ret = H5Pget_attr_phase_change(gcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change"); + VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change"); + ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change"); + VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change"); + ret = H5Pget_attr_phase_change(tcpl, &max_compact, &min_dense); + CHECK(ret, FAIL, "H5Pget_attr_phase_change"); + VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change"); + VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change"); + + /* Close current objects */ + ret = H5Pclose(gcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(tcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Gclose(grp); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Tclose(dtype); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Close the FAPL */ + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); +} /* test_h5o_plist() */ + +/**************************************************************** +** +** test_h5o_link(): Test creating link to object +** +****************************************************************/ +static void +test_h5o_link(void) +{ + hid_t file_id = -1; + hid_t group_id = -1; + hid_t space_id = -1; + hid_t dset_id = -1; + hid_t type_id = -1; + hid_t fapl_id = -1; + hid_t lcpl_id = -1; + char filename[1024]; + hsize_t dims[2] = {TEST6_DIM1, TEST6_DIM2}; + htri_t committed; /* Whether the named datatype is committed */ + H5F_libver_t low, high; /* File format bounds */ + int *wdata; + int *rdata; + int i, n; + herr_t ret; /* Value returned from API calls */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing H5Olink\n")); + + h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); + + /* Allocate memory buffers */ + /* (These are treated as 2-D buffers) */ + wdata = (int *)HDmalloc((size_t)(TEST6_DIM1 * TEST6_DIM2) * sizeof(int)); + CHECK_PTR(wdata, "HDmalloc"); + rdata = (int *)HDmalloc((size_t)(TEST6_DIM1 * TEST6_DIM2) * sizeof(int)); + CHECK_PTR(rdata, "HDmalloc"); + + /* Initialize the raw data */ + for (i = n = 0; i < (TEST6_DIM1 * TEST6_DIM2); i++) + wdata[i] = n++; + + /* Create the dataspace */ + space_id = H5Screate_simple(2, dims, NULL); + CHECK(space_id, FAIL, "H5Screate_simple"); + + /* Create LCPL with intermediate group creation flag set */ + lcpl_id = H5Pcreate(H5P_LINK_CREATE); + CHECK(lcpl_id, FAIL, "H5Pcreate"); + ret = H5Pset_create_intermediate_group(lcpl_id, TRUE); + CHECK(ret, FAIL, "H5Pset_create_intermediate_group"); + + /* Create a file access property list */ + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl_id, FAIL, "H5Pcreate"); + + /* Loop through all the combinations of low/high library format bounds */ + for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) { + for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) { + + /* Set version bounds */ + H5E_BEGIN_TRY + { + ret = H5Pset_libver_bounds(fapl_id, low, high); + } + H5E_END_TRY; + + if (ret < 0) /* Invalid low/high combinations */ + continue; + + /* Create a new HDF5 file */ + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + CHECK(file_id, FAIL, "H5Fcreate"); + + /* Close the FAPL */ + ret = H5Pclose(fapl_id); + CHECK(ret, FAIL, "H5Pclose"); + + /* Create and commit a datatype with no name */ + type_id = H5Tcopy(H5T_NATIVE_INT); + CHECK(type_id, FAIL, "H5Fcreate"); + ret = H5Tcommit_anon(file_id, type_id, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit_anon"); + committed = H5Tcommitted(type_id); + VERIFY(committed, TRUE, "H5Tcommitted"); + + /* Create a dataset with no name using the committed datatype*/ + dset_id = H5Dcreate_anon(file_id, type_id, space_id, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset_id, FAIL, "H5Dcreate_anon"); + + /* Verify that we can write to and read from the dataset */ + + /* Write the data to the dataset */ + ret = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read the data back */ + ret = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Verify the data */ + for (i = 0; i < (TEST6_DIM1 * TEST6_DIM2); i++) + VERIFY(wdata[i], rdata[i], "H5Dread"); + + /* Create a group with no name*/ + group_id = H5Gcreate_anon(file_id, H5P_DEFAULT, H5P_DEFAULT); + CHECK(group_id, FAIL, "H5Gcreate_anon"); + + /* Link nameless datatype into nameless group */ + ret = H5Olink(type_id, group_id, "datatype", H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Olink"); + + /* Link nameless dataset into nameless group with intermediate group */ + ret = H5Olink(dset_id, group_id, "inter_group/dataset", lcpl_id, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Olink"); + + /* Close IDs for dataset and datatype */ + ret = H5Dclose(dset_id); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Tclose(type_id); + CHECK(ret, FAIL, "H5Tclose"); + + /* Re-open datatype using new link */ + type_id = H5Topen2(group_id, "datatype", H5P_DEFAULT); + CHECK(type_id, FAIL, "H5Topen2"); + + /* Link nameless group to root group and close the group ID*/ + ret = H5Olink(group_id, file_id, "/group", H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Olink"); + ret = H5Gclose(group_id); + CHECK(ret, FAIL, "H5Gclose"); + + /* Open dataset through root group and verify its data */ + dset_id = H5Dopen2(file_id, "/group/inter_group/dataset", H5P_DEFAULT); + CHECK(dset_id, FAIL, "H5Dopen2"); + + /* Read data from dataset */ + ret = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + for (i = 0; i < (TEST6_DIM1 * TEST6_DIM2); i++) + VERIFY(wdata[i], rdata[i], "H5Dread"); + + /* Close open IDs */ + ret = H5Dclose(dset_id); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Tclose(type_id); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Fclose(file_id); + CHECK(ret, FAIL, "H5Fclose"); + } /* for high */ + } /* for low */ + + /* Close remaining IDs */ + ret = H5Sclose(space_id); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Pclose(lcpl_id); + CHECK(ret, FAIL, "H5Pclose"); + + /* Release buffers */ + HDfree(wdata); + HDfree(rdata); +} /* end test_h5o_link() */ + +#if 0 +/**************************************************************** +** +** test_h5o_comment(): Test H5Oset(get)_comment functions. +** +****************************************************************/ +static void +test_h5o_comment(void) +{ + hid_t fid; /* HDF5 File ID */ + hid_t grp, dset, dtype, dspace; /* Object identifiers */ + hid_t attr_space, attr_id; + char filename[1024]; + hsize_t dims[RANK]; + hsize_t attr_dims = 1; + int attr_value = 5; + const char *file_comment = "file comment"; + const char *grp_comment = "group comment"; + const char *dset_comment = "dataset comment"; + const char *dtype_comment = "datatype comment"; + char check_comment[64]; + ssize_t comment_len = 0; + ssize_t len; + herr_t ret; /* Value returned from API calls */ + int ret_value; + + h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); + + /* Create a new HDF5 file */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create an attribute for the file */ + attr_space = H5Screate_simple(1, &attr_dims, NULL); + CHECK(attr_space, FAIL, "H5Screate_simple"); + attr_id = H5Acreate2(fid, "file attribute", H5T_NATIVE_INT, attr_space, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr_id, FAIL, "H5Acreate2"); + ret = H5Awrite(attr_id, H5T_NATIVE_INT, &attr_value); + CHECK(ret, FAIL, "H5Awrite"); + + /* Putting a comment on the file through its attribute */ + ret = H5Oset_comment(attr_id, file_comment); + CHECK(ret, FAIL, "H5Oset_comment"); + + ret = H5Sclose(attr_space); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Aclose(attr_id); + CHECK(ret, FAIL, "H5Aclose"); + + /* Create a group, dataset, and committed datatype within the file */ + /* Create the group */ + grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(grp, FAIL, "H5Gcreate2"); + + /* Putting a comment on the group */ + ret = H5Oset_comment(grp, grp_comment); + CHECK(ret, FAIL, "H5Oset_comment"); + + ret = H5Gclose(grp); + CHECK(ret, FAIL, "H5Gclose"); + + /* Commit the type inside the group */ + dtype = H5Tcopy(H5T_NATIVE_INT); + CHECK(dtype, FAIL, "H5Tcopy"); + ret = H5Tcommit2(fid, "group/datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + /* Putting a comment on the committed data type */ + ret = H5Oset_comment(dtype, dtype_comment); + CHECK(ret, FAIL, "H5Oset_comment"); + + ret = H5Tclose(dtype); + CHECK(ret, FAIL, "H5Tclose"); + + /* Create the data space for the dataset. */ + dims[0] = DIM0; + dims[1] = DIM1; + dspace = H5Screate_simple(RANK, dims, NULL); + CHECK(dspace, FAIL, "H5Screate_simple"); + + /* Create the dataset. */ + dset = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset, FAIL, "H5Dcreate2"); + + /* Putting a comment on the dataset */ + ret = H5Oset_comment(dset, dset_comment); + CHECK(ret, FAIL, "H5Oset_comment"); + + /* Putting a comment on the dataspace. It's supposed to fail. */ + H5E_BEGIN_TRY + { + ret = H5Oset_comment(dspace, "dataspace comment"); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Oset_comment"); + + /* Close the file */ + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Sclose(dspace); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Now make sure that the comments are correct all 4 types of objects */ + /* Open file */ + fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Getting the comment on the file and verify it */ + comment_len = H5Oget_comment(fid, NULL, (size_t)0); + CHECK(comment_len, FAIL, "H5Oget_comment"); + + len = H5Oget_comment(fid, check_comment, (size_t)comment_len + 1); + CHECK(len, FAIL, "H5Oget_comment"); + + ret_value = HDstrcmp(file_comment, check_comment); + VERIFY(ret_value, 0, "H5Oget_comment"); + + /* Open the group */ + grp = H5Gopen2(fid, "group", H5P_DEFAULT); + CHECK(grp, FAIL, "H5Gopen2"); + + /* Getting the comment on the group and verify it */ + comment_len = H5Oget_comment(grp, NULL, (size_t)0); + CHECK(comment_len, FAIL, "H5Oget_comment"); + + len = H5Oget_comment(grp, check_comment, (size_t)comment_len + 1); + CHECK(len, FAIL, "H5Oget_comment"); + + ret_value = HDstrcmp(grp_comment, check_comment); + VERIFY(ret_value, 0, "H5Oget_comment"); + + /* Open the datatype */ + dtype = H5Topen2(fid, "group/datatype", H5P_DEFAULT); + CHECK(dtype, FAIL, "H5Topen2"); + + /* Getting the comment on the datatype and verify it */ + comment_len = H5Oget_comment(dtype, NULL, (size_t)0); + CHECK(comment_len, FAIL, "H5Oget_comment"); + + len = H5Oget_comment(dtype, check_comment, (size_t)comment_len + 1); + CHECK(len, FAIL, "H5Oget_comment"); + + ret_value = HDstrcmp(dtype_comment, check_comment); + VERIFY(ret_value, 0, "H5Oget_comment"); + + /* Open the dataset */ + dset = H5Dopen2(fid, "dataset", H5P_DEFAULT); + CHECK(dset, FAIL, "H5Dopen2"); + + /* Getting the comment on the dataset and verify it */ + comment_len = H5Oget_comment(dset, NULL, (size_t)0); + CHECK(comment_len, FAIL, "H5Oget_comment"); + + len = H5Oget_comment(dset, check_comment, (size_t)comment_len + 1); + CHECK(ret, len, "H5Oget_comment"); + + ret_value = HDstrcmp(dset_comment, check_comment); + VERIFY(ret_value, 0, "H5Oget_comment"); + + /* Close the IDs */ + ret = H5Gclose(grp); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Tclose(dtype); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + +} /* test_h5o_comment() */ + +/**************************************************************** +** +** test_h5o_comment_by_name(): Test H5Oset(get)_comment_by_name functions. +** +****************************************************************/ +static void +test_h5o_comment_by_name(void) +{ + hid_t fid; /* HDF5 File ID */ + hid_t grp, dset, dtype, dspace; /* Object identifiers */ + hid_t attr_space, attr_id; + char filename[1024]; + hsize_t dims[RANK]; + hsize_t attr_dims = 1; + int attr_value = 5; + const char *file_comment = "file comment by name"; + const char *grp_comment = "group comment by name"; + const char *dset_comment = "dataset comment by name"; + const char *dtype_comment = "datatype comment by name"; + char check_comment[64]; + ssize_t comment_len = 0; + ssize_t len; + herr_t ret; /* Value returned from API calls */ + int ret_value; + + h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); + + /* Create a new HDF5 file */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create an attribute for the file */ + attr_space = H5Screate_simple(1, &attr_dims, NULL); + CHECK(attr_space, FAIL, "H5Screate_simple"); + attr_id = H5Acreate2(fid, "file attribute", H5T_NATIVE_INT, attr_space, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr_id, FAIL, "H5Acreate2"); + ret = H5Awrite(attr_id, H5T_NATIVE_INT, &attr_value); + CHECK(ret, FAIL, "H5Awrite"); + + /* Putting a comment on the file through its attribute */ + ret = H5Oset_comment_by_name(attr_id, ".", file_comment, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oset_comment_by_name"); + + ret = H5Sclose(attr_space); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Aclose(attr_id); + CHECK(ret, FAIL, "H5Aclose"); + + /* Create a group, dataset, and committed datatype within the file */ + /* Create the group */ + grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(grp, FAIL, "H5Gcreate2"); + + /* Putting a comment on the group */ + ret = H5Oset_comment_by_name(fid, "group", grp_comment, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oset_comment_by_name"); + + /* Commit the type inside the group */ + dtype = H5Tcopy(H5T_NATIVE_INT); + CHECK(dtype, FAIL, "H5Tcopy"); + ret = H5Tcommit2(fid, "group/datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + /* Putting a comment on the committed data type */ + ret = H5Oset_comment_by_name(grp, "datatype", dtype_comment, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oset_comment_by_name"); + + ret = H5Tclose(dtype); + CHECK(ret, FAIL, "H5Tclose"); + + ret = H5Gclose(grp); + CHECK(ret, FAIL, "H5Gclose"); + + /* Create the data space for the dataset. */ + dims[0] = DIM0; + dims[1] = DIM1; + dspace = H5Screate_simple(RANK, dims, NULL); + CHECK(dspace, FAIL, "H5Screate_simple"); + + /* Create the dataset. */ + dset = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset, FAIL, "H5Dcreate2"); + + /* Putting a comment on the dataset */ + ret = H5Oset_comment_by_name(fid, "dataset", dset_comment, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oset_comment_by_name"); + + /* Putting a comment on the dataspace. It's supposed to fail. */ + H5E_BEGIN_TRY + { + ret = H5Oset_comment_by_name(dspace, ".", "dataspace comment", H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Oset_comment"); + + /* Close the file */ + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Sclose(dspace); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Now make sure that the comments are correct all 4 types of objects */ + /* Open file */ + fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Getting the comment on the file and verify it */ + comment_len = H5Oget_comment_by_name(fid, ".", NULL, (size_t)0, H5P_DEFAULT); + CHECK(comment_len, FAIL, "H5Oget_comment_by_name"); + + len = H5Oget_comment_by_name(fid, ".", check_comment, (size_t)comment_len + 1, H5P_DEFAULT); + CHECK(len, FAIL, "H5Oget_comment_by_name"); + + ret_value = HDstrcmp(file_comment, check_comment); + VERIFY(ret_value, 0, "H5Oget_comment_by_name"); + + /* Open the group */ + grp = H5Gopen2(fid, "group", H5P_DEFAULT); + CHECK(grp, FAIL, "H5Gopen2"); + + /* Getting the comment on the group and verify it */ + comment_len = H5Oget_comment_by_name(fid, "group", NULL, (size_t)0, H5P_DEFAULT); + CHECK(comment_len, FAIL, "H5Oget_comment_by_name"); + + len = H5Oget_comment_by_name(fid, "group", check_comment, (size_t)comment_len + 1, H5P_DEFAULT); + CHECK(len, FAIL, "H5Oget_comment_by_name"); + + ret_value = HDstrcmp(grp_comment, check_comment); + VERIFY(ret_value, 0, "H5Oget_comment_by_name"); + + /* Getting the comment on the datatype and verify it */ + comment_len = H5Oget_comment_by_name(grp, "datatype", NULL, (size_t)0, H5P_DEFAULT); + CHECK(comment_len, FAIL, "H5Oget_comment_by_name"); + + len = H5Oget_comment_by_name(grp, "datatype", check_comment, (size_t)comment_len + 1, H5P_DEFAULT); + CHECK(len, FAIL, "H5Oget_comment"); + + ret_value = HDstrcmp(dtype_comment, check_comment); + VERIFY(ret_value, 0, "H5Oget_comment_by_name"); + + /* Getting the comment on the dataset and verify it */ + comment_len = H5Oget_comment_by_name(fid, "dataset", NULL, (size_t)0, H5P_DEFAULT); + CHECK(comment_len, FAIL, "H5Oget_comment_by_name"); + + len = H5Oget_comment_by_name(fid, "dataset", check_comment, (size_t)comment_len + 1, H5P_DEFAULT); + CHECK(len, FAIL, "H5Oget_comment_by_name"); + + ret_value = HDstrcmp(dset_comment, check_comment); + VERIFY(ret_value, 0, "H5Oget_comment_by_name"); + + /* Close the IDs */ + ret = H5Gclose(grp); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + +} /* test_h5o_comment_by_name() */ +#endif + +/**************************************************************** +** +** test_h5o_getinfo_same_file(): Test that querying the object info for +** objects in the same file will return the same file "number" +** +****************************************************************/ +static void +test_h5o_getinfo_same_file(void) +{ + hid_t fid1, fid2; /* HDF5 File ID */ + hid_t gid1, gid2; /* Group IDs */ + char filename[1024]; + H5O_info2_t oinfo1, oinfo2; /* Object info structs */ + herr_t ret; /* Value returned from API calls */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing H5Oget_info on objects in same file\n")); + + h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); + + /* Create a new HDF5 file */ + fid1 = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create two groups in the file */ + gid1 = H5Gcreate2(fid1, "group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid1, FAIL, "H5Gcreate2"); + gid2 = H5Gcreate2(fid1, "group2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid2, FAIL, "H5Gcreate2"); + + /* Reset object info */ + HDmemset(&oinfo1, 0, sizeof(oinfo1)); + HDmemset(&oinfo2, 0, sizeof(oinfo2)); + + /* Query the object info for each object, through group IDs */ + ret = H5Oget_info3(gid1, &oinfo1, H5O_INFO_BASIC); + CHECK(ret, FAIL, "H5Oget_info3"); + ret = H5Oget_info3(gid2, &oinfo2, H5O_INFO_BASIC); + CHECK(ret, FAIL, "H5Oget_info3"); + + VERIFY(oinfo1.fileno, oinfo2.fileno, "file number from H5Oget_info3"); + + /* Reset object info */ + HDmemset(&oinfo1, 0, sizeof(oinfo1)); + HDmemset(&oinfo2, 0, sizeof(oinfo2)); + + /* Query the object info for each object, by name */ + ret = H5Oget_info_by_name3(fid1, "group1", &oinfo1, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + ret = H5Oget_info_by_name3(fid1, "group2", &oinfo2, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + + VERIFY(oinfo1.fileno, oinfo2.fileno, "file number from H5Oget_info3"); + + /* Close everything */ + ret = H5Gclose(gid1); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Gclose(gid2); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Open file twice */ + fid1 = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fopen"); + fid2 = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid2, FAIL, "H5Fopen"); + + /* Open the two groups in the file */ + gid1 = H5Gopen2(fid1, "group1", H5P_DEFAULT); + CHECK(gid1, FAIL, "H5Gopen2"); + gid2 = H5Gopen2(fid2, "group2", H5P_DEFAULT); + CHECK(gid2, FAIL, "H5Gopen2"); + + /* Reset object info */ + HDmemset(&oinfo1, 0, sizeof(oinfo1)); + HDmemset(&oinfo2, 0, sizeof(oinfo2)); + + /* Query the object info for each object, through group IDs */ + ret = H5Oget_info3(gid1, &oinfo1, H5O_INFO_BASIC); + CHECK(ret, FAIL, "H5Oget_info3"); + ret = H5Oget_info3(gid2, &oinfo2, H5O_INFO_BASIC); + CHECK(ret, FAIL, "H5Oget_info3"); + + VERIFY(oinfo1.fileno, oinfo2.fileno, "file number from H5Oget_info3"); + + /* Reset object info */ + HDmemset(&oinfo1, 0, sizeof(oinfo1)); + HDmemset(&oinfo2, 0, sizeof(oinfo2)); + + /* Query the object info for each object, by name */ + ret = H5Oget_info_by_name3(fid1, "group1", &oinfo1, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + ret = H5Oget_info_by_name3(fid1, "group2", &oinfo2, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + + VERIFY(oinfo1.fileno, oinfo2.fileno, "file number from H5Oget_info3"); + + /* Close everything */ + ret = H5Gclose(gid1); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Gclose(gid2); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Fclose(fid2); + CHECK(ret, FAIL, "H5Fclose"); + +} /* test_h5o_getinfo_same_file() */ + +#ifndef H5_NO_DEPRECATED_SYMBOLS +#if 0 +/**************************************************************** +** +** test_h5o_open_by_addr_deprec(): Test H5Oopen_by_addr function. +** +****************************************************************/ +static void +test_h5o_open_by_addr_deprec(void) +{ + hid_t fid; /* HDF5 File ID */ + hid_t grp, dset, dtype, dspace; /* Object identifiers */ + char filename[1024]; + H5L_info1_t li; /* Buffer for H5Lget_info1 */ + haddr_t grp_addr; /* Addresses for objects */ + haddr_t dset_addr; + haddr_t dtype_addr; + hsize_t dims[RANK]; + H5I_type_t id_type; /* Type of IDs returned from H5Oopen */ + H5G_info_t ginfo; /* Group info struct */ + H5T_class_t type_class; /* Class of the datatype */ + herr_t ret; /* Value returned from API calls */ + + h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); + + /* Create a new HDF5 file */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create a group, dataset, and committed datatype within the file */ + /* Create the group */ + grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(grp, FAIL, "H5Gcreate2"); + ret = H5Gclose(grp); + CHECK(ret, FAIL, "H5Gclose"); + + /* Commit the type inside the group */ + dtype = H5Tcopy(H5T_NATIVE_INT); + CHECK(dtype, FAIL, "H5Tcopy"); + ret = H5Tcommit2(fid, "group/datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + ret = H5Tclose(dtype); + CHECK(ret, FAIL, "H5Tclose"); + + /* Create the data space for the dataset. */ + dims[0] = DIM0; + dims[1] = DIM1; + dspace = H5Screate_simple(RANK, dims, NULL); + CHECK(dspace, FAIL, "H5Screate_simple"); + + /* Create the dataset. */ + dset = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset, FAIL, "H5Dcreate2"); + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Sclose(dspace); + CHECK(ret, FAIL, "H5Sclose"); + + /* Get address for each object */ + ret = H5Lget_info1(fid, "group", &li, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lget_info"); + grp_addr = li.u.address; + ret = H5Lget_info1(fid, "group/datatype", &li, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lget_info"); + dtype_addr = li.u.address; + ret = H5Lget_info1(fid, "dataset", &li, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lget_info"); + dset_addr = li.u.address; + + /* Now make sure that H5Oopen_by_addr can open all three types of objects */ + grp = H5Oopen_by_addr(fid, grp_addr); + CHECK(grp, FAIL, "H5Oopen_by_addr"); + dtype = H5Oopen_by_addr(fid, dtype_addr); + CHECK(dtype, FAIL, "H5Oopen_by_addr"); + /* Check that we can use the group ID as a valid location */ + dset = H5Oopen_by_addr(grp, dset_addr); + CHECK(dset, FAIL, "H5Oopen_by_addr"); + + /* Make sure that each is the right kind of ID */ + id_type = H5Iget_type(grp); + VERIFY(id_type, H5I_GROUP, "H5Iget_type for group ID"); + id_type = H5Iget_type(dtype); + VERIFY(id_type, H5I_DATATYPE, "H5Iget_type for datatype ID"); + id_type = H5Iget_type(dset); + VERIFY(id_type, H5I_DATASET, "H5Iget_type for dataset ID"); + + /* Do something more complex with each of the IDs to make sure they "work" */ + ret = H5Gget_info(grp, &ginfo); + CHECK(ret, FAIL, "H5Gget_info"); + VERIFY(ginfo.nlinks, 1, "H5Gget_info"); /* There should be one object, the datatype */ + + type_class = H5Tget_class(dtype); + VERIFY(type_class, H5T_INTEGER, "H5Tget_class"); + + dspace = H5Dget_space(dset); + CHECK(dspace, FAIL, "H5Dget_space"); + + /* Close the IDs */ + ret = H5Sclose(dspace); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Gclose(grp); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Tclose(dtype); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Try giving some bogus values to H5O_open_by_addr. */ + /* Try to open an object with a bad address */ + grp_addr += 20; + H5E_BEGIN_TRY + { + grp = H5Oopen_by_addr(fid, grp_addr); + } + H5E_END_TRY + VERIFY(grp, FAIL, "H5Oopen_by_addr"); + + /* For instance, an objectno smaller than the end of the file's superblock should + * trigger an error */ + grp_addr = 10; + H5E_BEGIN_TRY + { + grp = H5Oopen_by_addr(fid, grp_addr); + } + H5E_END_TRY + VERIFY(grp, FAIL, "H5Oopen_by_addr"); + + /* Likewise, an objectno larger than the size of the file should fail */ + grp_addr = 0; + grp_addr = 1000000000; + H5E_BEGIN_TRY + { + grp = H5Oopen_by_addr(fid, grp_addr); + } + H5E_END_TRY + VERIFY(grp, FAIL, "H5Oopen_by_addr"); + + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Also, trying to open an object without a valid location should fail */ + H5E_BEGIN_TRY + { + dtype = H5Oopen_by_addr(fid, dtype_addr); + } + H5E_END_TRY + VERIFY(dtype, FAIL, "H5Oopen_by_addr"); +} /* test_h5o_open_by_addr_deprec() */ + +/**************************************************************** +** +** visit_obj_cb(): +** This is the callback function invoked by H5Ovisit1() in +** test_h5o_getinfo_visit(): +** --Verify that the object info returned to the callback +** function is the same as H5Oget_info2(). +** +****************************************************************/ +static int +visit_obj_cb(hid_t group_id, const char *name, const H5O_info1_t *oinfo1, void H5_ATTR_UNUSED *_op_data) +{ + H5O_info1_t oinfo2; /* Object info structs */ + + /* Verify the object info for "group1", "group2" and the root group */ + if (!(HDstrcmp(name, "group1"))) { + H5Oget_info_by_name2(group_id, name, &oinfo2, H5O_INFO_NUM_ATTRS, H5P_DEFAULT); + VERIFY(oinfo1->num_attrs, oinfo2.num_attrs, "obj info from H5Ovisit1"); + } + else if (!(HDstrcmp(name, "group2"))) { + H5Oget_info_by_name2(group_id, name, &oinfo2, H5O_INFO_HDR, H5P_DEFAULT); + VERIFY(oinfo1->hdr.nmesgs, oinfo2.hdr.nmesgs, "obj info from H5Ovisit1/H5Oget_info2"); + VERIFY(oinfo1->hdr.nchunks, oinfo2.hdr.nchunks, "obj info from H5Ovisit1/H5Oget_info2"); + } + else if (!(HDstrcmp(name, "."))) { + H5Oget_info_by_name2(group_id, name, &oinfo2, H5O_INFO_META_SIZE, H5P_DEFAULT); + VERIFY(oinfo1->meta_size.obj.index_size, oinfo2.meta_size.obj.index_size, + "obj info from H5Ovisit1/H5Oget_info2"); + VERIFY(oinfo1->meta_size.obj.heap_size, oinfo2.meta_size.obj.heap_size, + "obj info from H5Ovisit1/H5Oget_info2"); + } + + return (H5_ITER_CONT); +} /* end visit_obj_cb() */ + +/**************************************************************** +** +** test_h5o_getinfo_visit(): +** Verify that the object info returned via H5Oget_info1() +** and H5Oget_info2() are the same. +** Verify that the object info retrieved via H5Ovisit1() is +** the same as H5Oget_info2(). +** +****************************************************************/ +static void +test_h5o_getinfo_visit(void) +{ + hid_t fid = -1; /* HDF5 File ID */ + hid_t gid1 = -1, gid2 = -1; /* Group IDs */ + hid_t sid = -1; /* Dataspace ID */ + hid_t aid = -1; /* Attribute ID */ + char filename[1024]; + H5O_info1_t oinfo1, oinfo2; /* Object info structs */ + char attrname[25]; /* Attribute name */ + int j; /* Local index variable */ + herr_t ret; /* Value returned from API calls */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing info returned by H5Oget_info vs H5Ovisit\n")); + + h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); + + /* Create an HDF5 file */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create "group1" in the file */ + gid1 = H5Gcreate2(fid, "group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid1, FAIL, "H5Gcreate2"); + + /* Create dataspace */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Attach 10 attributes to "group1" */ + for (j = 0; j < 10; j++) { + /* Create the attribute name */ + HDsnprintf(attrname, sizeof(attrname), "attr%u", j); + /* Create the attribute */ + aid = H5Acreate2(gid1, attrname, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + /* Close the attribute */ + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + } + + /* Create "group2" in the file */ + gid2 = H5Gcreate2(fid, "group2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid2, FAIL, "H5Gcreate2"); + + /* Reset object info */ + HDmemset(&oinfo1, 0, sizeof(oinfo1)); + HDmemset(&oinfo2, 0, sizeof(oinfo2)); + + /* Query the object info for "group1" via H5Oget_info1 and H5Oget_info2 */ + ret = H5Oget_info1(gid1, &oinfo1); + CHECK(ret, FAIL, "H5Oget_info1"); + ret = H5Oget_info2(gid1, &oinfo2, H5O_INFO_BASIC | H5O_INFO_NUM_ATTRS); + CHECK(ret, FAIL, "H5Oget_info2"); + + /* Verify the object info for "group1" is correct */ + VERIFY(oinfo1.fileno, oinfo2.fileno, "obj info from H5Oget_info1/2"); + VERIFY(oinfo1.num_attrs, oinfo2.num_attrs, "obj info from H5Oget_info1/2"); + + /* Reset object info */ + HDmemset(&oinfo1, 0, sizeof(oinfo1)); + HDmemset(&oinfo2, 0, sizeof(oinfo2)); + + /* Query the object info for "group2" via H5Oget_info1 and H5Oget_info2 */ + ret = H5Oget_info_by_name1(fid, "group2", &oinfo1, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_native_info_by_name"); + ret = H5Oget_info_by_name2(fid, "group2", &oinfo2, H5O_INFO_HDR | H5O_INFO_META_SIZE, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_native_info_by_name"); + + /* Verify the object info for "group2" is correct */ + VERIFY(oinfo1.hdr.nmesgs, oinfo2.hdr.nmesgs, "obj info from H5Oget_info1/2"); + VERIFY(oinfo1.hdr.nchunks, oinfo2.hdr.nchunks, "obj info from H5Oget_info1/2"); + VERIFY(oinfo1.meta_size.obj.index_size, oinfo2.meta_size.obj.index_size, "obj info from H5Oget_info1/2"); + VERIFY(oinfo1.meta_size.obj.heap_size, oinfo2.meta_size.obj.heap_size, "obj info from H5Oget_info1/2"); + + /* Close everything */ + ret = H5Gclose(gid1); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Gclose(gid2); + CHECK(ret, FAIL, "H5Gclose"); + + /* Verify the object info returned to the callback function is correct */ + ret = H5Ovisit1(fid, H5_INDEX_NAME, H5_ITER_INC, visit_obj_cb, NULL); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + +} /* test_h5o_getinfo_visit() */ +#endif +#endif /* H5_NO_DEPRECATED_SYMBOLS */ + +/**************************************************************** +** +** test_h5o(): Main H5O (generic object) testing routine. +** +****************************************************************/ +void +test_h5o(void) +{ + /* Output message about test being performed */ + MESSAGE(5, ("Testing Objects\n")); + + test_h5o_open(); /* Test generic open function */ +#if 0 +#ifndef H5_NO_DEPRECATED_SYMBOLS + test_h5o_open_by_addr(); /* Test opening objects by address */ +#endif /* H5_NO_DEPRECATED_SYMBOLS */ +#endif + test_h5o_open_by_token(); /* Test opening objects by token */ + test_h5o_close(); /* Test generic close function */ + test_h5o_refcount(); /* Test incrementing and decrementing reference count */ + test_h5o_plist(); /* Test object creation properties */ + test_h5o_link(); /* Test object link routine */ +#if 0 + test_h5o_comment(); /* Test routines for comment */ + test_h5o_comment_by_name(); /* Test routines for comment by name */ +#endif + test_h5o_getinfo_same_file(); /* Test info for objects in the same file */ +#ifndef H5_NO_DEPRECATED_SYMBOLS +#if 0 + test_h5o_open_by_addr_deprec(); /* Test opening objects by address with H5Lget_info1 */ + test_h5o_getinfo_visit(); /* Test object info for H5Oget_info1/2 and H5Ovisit1 */ +#endif /* H5_NO_DEPRECATED_SYMBOLS */ +#endif +} /* test_h5o() */ + +/*------------------------------------------------------------------------- + * Function: cleanup_h5o + * + * Purpose: Cleanup temporary test files + * + * Return: none + * + * Programmer: James Laird + * June 3, 2006 + * + *------------------------------------------------------------------------- + */ +void +cleanup_h5o(void) +{ + char filename[1024]; + + H5E_BEGIN_TRY + { + h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); + H5Fdelete(filename, H5P_DEFAULT); + } + H5E_END_TRY; +} diff --git a/test/API/th5s.c b/test/API/th5s.c new file mode 100644 index 00000000000..cb1c8992ba4 --- /dev/null +++ b/test/API/th5s.c @@ -0,0 +1,3538 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*********************************************************** + * + * Test program: th5s + * + * Test the dataspace functionality + * + *************************************************************/ + +#include "testhdf5.h" +/* #include "H5srcdir.h" */ + +/* #include "H5Iprivate.h" */ +/* #include "H5Pprivate.h" */ + +#if 0 +/* + * This file needs to access private information from the H5S package. + * This file also needs to access the dataspace testing code. + */ +#define H5S_FRIEND /*suppress error about including H5Spkg */ +#define H5S_TESTING /*suppress warning about H5S testing funcs*/ +#include "H5Spkg.h" /* Dataspaces */ + +/* + * This file needs to access private information from the H5O package. + * This file also needs to access the dataspace testing code. + */ +#define H5O_FRIEND /*suppress error about including H5Opkg */ +#define H5O_TESTING +#include "H5Opkg.h" /* Object header */ +#endif + +#define TESTFILE "th5s.h5" +#define DATAFILE "th5s1.h5" +#define NULLFILE "th5s2.h5" +#define BASICFILE "th5s3.h5" +#define ZEROFILE "th5s4.h5" +#define BASICDATASET "basic_dataset" +#define BASICDATASET1 "basic_dataset1" +#define BASICDATASET2 "basic_dataset2" +#define BASICDATASET3 "basic_dataset3" +#define BASICDATASET4 "basic_dataset4" +#define BASICATTR "basic_attribute" +#define NULLDATASET "null_dataset" +#define NULLATTR "null_attribute" +#define EXTFILE_NAME "ext_file" + +/* 3-D dataset with fixed dimensions */ +#define SPACE1_RANK 3 +#define SPACE1_DIM1 3 +#define SPACE1_DIM2 15 +#define SPACE1_DIM3 13 + +/* 4-D dataset with one unlimited dimension */ +#define SPACE2_RANK 4 +#define SPACE2_DIM1 0 +#define SPACE2_DIM2 15 +#define SPACE2_DIM3 13 +#define SPACE2_DIM4 23 +#define SPACE2_MAX1 H5S_UNLIMITED +#define SPACE2_MAX2 15 +#define SPACE2_MAX3 13 +#define SPACE2_MAX4 23 + +/* Scalar dataset with simple datatype */ +#define SPACE3_RANK 0 +unsigned space3_data = 65; + +/* Scalar dataset with compound datatype */ +#define SPACE4_FIELDNAME1 "c1" +#define SPACE4_FIELDNAME2 "u" +#define SPACE4_FIELDNAME3 "f" +#define SPACE4_FIELDNAME4 "c2" +size_t space4_field1_off = 0; +size_t space4_field2_off = 0; +size_t space4_field3_off = 0; +size_t space4_field4_off = 0; +struct space4_struct { + char c1; + unsigned u; + float f; + char c2; +} space4_data = {'v', 987123, -3.14F, 'g'}; /* Test data for 4th dataspace */ + +/* + * Testing configuration defines used by: + * test_h5s_encode_regular_hyper() + * test_h5s_encode_irregular_hyper() + * test_h5s_encode_points() + */ +#define CONFIG_8 1 +#define CONFIG_16 2 +#define CONFIG_32 3 +#define POWER8 256 /* 2^8 */ +#define POWER16 65536 /* 2^16 */ +#define POWER32 4294967296 /* 2^32 */ + +/**************************************************************** +** +** test_h5s_basic(): Test basic H5S (dataspace) code. +** +****************************************************************/ +static void +test_h5s_basic(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t sid1, sid2; /* Dataspace ID */ + hid_t dset1; /* Dataset ID */ +#ifndef NO_VALIDATE_DATASPACE + hid_t aid1; /* Attribute ID */ +#endif + int rank; /* Logical rank of dataspace */ + hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; + hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2, SPACE2_DIM3, SPACE2_DIM4}; + hsize_t dims3[H5S_MAX_RANK + 1]; + hsize_t max2[] = {SPACE2_MAX1, SPACE2_MAX2, SPACE2_MAX3, SPACE2_MAX4}; + hsize_t tdims[4]; /* Dimension array to test with */ + hsize_t tmax[4]; + hssize_t n; /* Number of dataspace elements */ +#if 0 + hbool_t driver_is_default_compatible; +#endif + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Dataspace Manipulation\n")); + + sid1 = H5Screate_simple(SPACE1_RANK, dims1, max2); + CHECK(sid1, FAIL, "H5Screate_simple"); + + n = H5Sget_simple_extent_npoints(sid1); + CHECK(n, FAIL, "H5Sget_simple_extent_npoints"); + VERIFY(n, SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3, "H5Sget_simple_extent_npoints"); + + rank = H5Sget_simple_extent_ndims(sid1); + CHECK(rank, FAIL, "H5Sget_simple_extent_ndims"); + VERIFY(rank, SPACE1_RANK, "H5Sget_simple_extent_ndims"); + + rank = H5Sget_simple_extent_dims(sid1, tdims, NULL); + CHECK(rank, FAIL, "H5Sget_simple_extent_dims"); + VERIFY(HDmemcmp(tdims, dims1, SPACE1_RANK * sizeof(hsize_t)), 0, "H5Sget_simple_extent_dims"); + + sid2 = H5Screate_simple(SPACE2_RANK, dims2, max2); + CHECK(sid2, FAIL, "H5Screate_simple"); + + n = H5Sget_simple_extent_npoints(sid2); + CHECK(n, FAIL, "H5Sget_simple_extent_npoints"); + VERIFY(n, SPACE2_DIM1 * SPACE2_DIM2 * SPACE2_DIM3 * SPACE2_DIM4, "H5Sget_simple_extent_npoints"); + + rank = H5Sget_simple_extent_ndims(sid2); + CHECK(rank, FAIL, "H5Sget_simple_extent_ndims"); + VERIFY(rank, SPACE2_RANK, "H5Sget_simple_extent_ndims"); + + rank = H5Sget_simple_extent_dims(sid2, tdims, tmax); + CHECK(rank, FAIL, "H5Sget_simple_extent_dims"); + VERIFY(HDmemcmp(tdims, dims2, SPACE2_RANK * sizeof(hsize_t)), 0, "H5Sget_simple_extent_dims"); + VERIFY(HDmemcmp(tmax, max2, SPACE2_RANK * sizeof(hsize_t)), 0, "H5Sget_simple_extent_dims"); + + /* Change max dims to be equal to the dimensions */ + ret = H5Sset_extent_simple(sid1, SPACE1_RANK, dims1, NULL); + CHECK(ret, FAIL, "H5Sset_extent_simple"); + rank = H5Sget_simple_extent_dims(sid1, tdims, tmax); + CHECK(rank, FAIL, "H5Sget_simple_extent_dims"); + VERIFY(HDmemcmp(tdims, dims1, SPACE1_RANK * sizeof(hsize_t)), 0, "H5Sget_simple_extent_dims"); + VERIFY(HDmemcmp(tmax, dims1, SPACE1_RANK * sizeof(hsize_t)), 0, "H5Sget_simple_extent_dims"); + + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* + * Check to be sure we can't create a simple dataspace that has too many + * dimensions. + */ + H5E_BEGIN_TRY + { + sid1 = H5Screate_simple(H5S_MAX_RANK + 1, dims3, NULL); + } + H5E_END_TRY; + VERIFY(sid1, FAIL, "H5Screate_simple"); +#if 0 + /* + * Try reading a file that has been prepared that has a dataset with a + * higher dimensionality than what the library can handle. + * + * If this test fails and the H5S_MAX_RANK variable has changed, follow + * the instructions in space_overflow.c for regenerating the th5s.h5 file. + */ + ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); + CHECK_I(ret, "h5_driver_is_default_vfd_compatible"); + + if (driver_is_default_compatible) { + const char *testfile = H5_get_srcdir_filename(TESTFILE); /* Corrected test file name */ + + fid1 = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK_I(fid1, "H5Fopen"); + if (fid1 >= 0) { + dset1 = H5Dopen2(fid1, "dset", H5P_DEFAULT); + VERIFY(dset1, FAIL, "H5Dopen2"); + ret = H5Fclose(fid1); + CHECK_I(ret, "H5Fclose"); + } + else + HDprintf("***cannot open the pre-created H5S_MAX_RANK test file (%s)\n", testfile); + } +#endif + /* Verify that incorrect dimensions don't work */ + dims1[0] = H5S_UNLIMITED; + H5E_BEGIN_TRY + { + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + } + H5E_END_TRY; + VERIFY(sid1, FAIL, "H5Screate_simple"); + + dims1[0] = H5S_UNLIMITED; + sid1 = H5Screate(H5S_SIMPLE); + CHECK(sid1, FAIL, "H5Screate"); + + H5E_BEGIN_TRY + { + ret = H5Sset_extent_simple(sid1, SPACE1_RANK, dims1, NULL); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Sset_extent_simple"); + + ret = H5Sclose(sid1); + CHECK_I(ret, "H5Sclose"); + + /* + * Try writing simple dataspaces without setting their extents + */ + /* Create the file */ + fid1 = H5Fcreate(BASICFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + dims1[0] = SPACE1_DIM1; + + sid1 = H5Screate(H5S_SIMPLE); + CHECK(sid1, FAIL, "H5Screate"); + sid2 = H5Screate_simple(1, dims1, dims1); + CHECK(sid2, FAIL, "H5Screate"); +#ifndef NO_VALIDATE_DATASPACE + /* This dataset's space has no extent; it should not be created */ + H5E_BEGIN_TRY + { + dset1 = H5Dcreate2(fid1, BASICDATASET, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY + VERIFY(dset1, FAIL, "H5Dcreate2"); +#endif + dset1 = H5Dcreate2(fid1, BASICDATASET2, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset1, FAIL, "H5Dcreate2"); + + /* Try some writes with the bad dataspace (sid1) */ + H5E_BEGIN_TRY + { + ret = H5Dwrite(dset1, H5T_NATIVE_INT, sid1, H5S_ALL, H5P_DEFAULT, &n); + } + H5E_END_TRY + VERIFY(ret, FAIL, "H5Dwrite"); +#ifndef NO_VALIDATE_DATASPACE + H5E_BEGIN_TRY + { + ret = H5Dwrite(dset1, H5T_NATIVE_INT, H5S_ALL, sid1, H5P_DEFAULT, &n); + } + H5E_END_TRY + VERIFY(ret, FAIL, "H5Dwrite"); + + H5E_BEGIN_TRY + { + ret = H5Dwrite(dset1, H5T_NATIVE_INT, sid1, sid1, H5P_DEFAULT, &n); + } + H5E_END_TRY + VERIFY(ret, FAIL, "H5Dwrite"); +#endif + /* Try to iterate using the bad dataspace */ + H5E_BEGIN_TRY + { + ret = H5Diterate(&n, H5T_NATIVE_INT, sid1, NULL, NULL); + } + H5E_END_TRY + VERIFY(ret, FAIL, "H5Diterate"); + + /* Try to fill using the bad dataspace */ + H5E_BEGIN_TRY + { + ret = H5Dfill(NULL, H5T_NATIVE_INT, &n, H5T_NATIVE_INT, sid1); + } + H5E_END_TRY + VERIFY(ret, FAIL, "H5Dfill"); +#ifndef NO_VALIDATE_DATASPACE + /* Now use the bad dataspace as the space for an attribute */ + H5E_BEGIN_TRY + { + aid1 = H5Acreate2(dset1, BASICATTR, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY + VERIFY(aid1, FAIL, "H5Acreate2"); +#endif + /* Make sure that dataspace reads using the bad dataspace fail */ + H5E_BEGIN_TRY + { + ret = H5Dread(dset1, H5T_NATIVE_INT, sid1, H5S_ALL, H5P_DEFAULT, &n); + } + H5E_END_TRY + VERIFY(ret, FAIL, "H5Dread"); +#ifndef NO_VALIDATE_DATASPACE + H5E_BEGIN_TRY + { + ret = H5Dread(dset1, H5T_NATIVE_INT, H5S_ALL, sid1, H5P_DEFAULT, &n); + } + H5E_END_TRY + VERIFY(ret, FAIL, "H5Dread"); + + H5E_BEGIN_TRY + { + ret = H5Dread(dset1, H5T_NATIVE_INT, sid1, sid1, H5P_DEFAULT, &n); + } + H5E_END_TRY + VERIFY(ret, FAIL, "H5Dread"); +#endif + /* Clean up */ + ret = H5Dclose(dset1); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_h5s_basic() */ + +/**************************************************************** +** +** test_h5s_null(): Test NULL dataspace +** +****************************************************************/ +static void +test_h5s_null(void) +{ + hid_t fid; /* File ID */ + hid_t sid; /* Dataspace IDs */ + hid_t dset_sid, dset_sid2; /* Dataspace IDs */ + hid_t attr_sid; /* Dataspace IDs */ + hid_t did; /* Dataset ID */ + hid_t attr; /*Attribute ID */ + H5S_class_t stype; /* dataspace type */ + hssize_t nelem; /* Number of elements */ + unsigned uval = 2; /* Buffer for writing to dataset */ + int val = 1; /* Buffer for writing to attribute */ + H5S_sel_type sel_type; /* Type of selection currently */ + hsize_t dims[1] = {10}; /* Dimensions for converting null dataspace to simple */ + H5S_class_t space_type; /* Type of dataspace */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Null Dataspace\n")); + + /* Create the file */ + fid = H5Fcreate(NULLFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + sid = H5Screate(H5S_NULL); + CHECK(sid, FAIL, "H5Screate"); + + /* Check that the null dataspace actually has 0 elements */ + nelem = H5Sget_simple_extent_npoints(sid); + VERIFY(nelem, 0, "H5Sget_simple_extent_npoints"); + + /* Check that the dataspace was created with an "all" selection */ + sel_type = H5Sget_select_type(sid); + VERIFY(sel_type, H5S_SEL_ALL, "H5Sget_select_type"); + + /* Check that the null dataspace has 0 elements selected */ + nelem = H5Sget_select_npoints(sid); + VERIFY(nelem, 0, "H5Sget_select_npoints"); + + /* Change to "none" selection */ + ret = H5Sselect_none(sid); + CHECK(ret, FAIL, "H5Sselect_none"); + + /* Check that the null dataspace has 0 elements selected */ + nelem = H5Sget_select_npoints(sid); + VERIFY(nelem, 0, "H5Sget_select_npoints"); + + /* Check to be sure we can't set a hyperslab selection on a null dataspace */ + H5E_BEGIN_TRY + { + hsize_t start[1] = {0}; + hsize_t count[1] = {0}; + + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, NULL, count, NULL); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Sselect_hyperslab"); + + /* Check to be sure we can't set a point selection on a null dataspace */ + H5E_BEGIN_TRY + { + hsize_t coord[1][1]; /* Coordinates for point selection */ + + coord[0][0] = 0; + ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)1, (const hsize_t *)coord); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Sselect_elements"); + + /* Create first dataset */ + did = H5Dcreate2(fid, NULLDATASET, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + /* Write "nothing" to the dataset */ + ret = H5Dwrite(did, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &uval); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Write "nothing" to the dataset (with type conversion :-) */ + ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &val); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Try reading from the dataset (make certain our buffer is unmodified) */ + ret = H5Dread(did, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &uval); + CHECK(ret, FAIL, "H5Dread"); + VERIFY(uval, 2, "H5Dread"); + + /* Try reading from the dataset (with type conversion :-) (make certain our buffer is unmodified) */ + ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &val); + CHECK(ret, FAIL, "H5Dread"); + VERIFY(val, 1, "H5Dread"); + + /* Create an attribute for the group */ + attr = H5Acreate2(did, NULLATTR, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write "nothing" to the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_INT, &val); + CHECK(ret, FAIL, "H5Awrite"); + + /* Write "nothing" to the attribute (with type conversion :-) */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, &uval); + CHECK(ret, FAIL, "H5Awrite"); + + /* Try reading from the attribute (make certain our buffer is unmodified) */ + ret = H5Aread(attr, H5T_NATIVE_INT, &val); + CHECK(ret, FAIL, "H5Aread"); + VERIFY(val, 1, "H5Aread"); + + /* Try reading from the attribute (with type conversion :-) (make certain our buffer is unmodified) */ + ret = H5Aread(attr, H5T_NATIVE_UINT, &uval); + CHECK(ret, FAIL, "H5Aread"); + VERIFY(uval, 2, "H5Aread"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close the dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Verify that we've got the right kind of dataspace */ + space_type = H5Sget_simple_extent_type(sid); + VERIFY(space_type, H5S_NULL, "H5Sget_simple_extent_type"); + + /* Convert the null dataspace to a simple dataspace */ + ret = H5Sset_extent_simple(sid, 1, dims, NULL); + CHECK(ret, FAIL, "H5Sset_extent_simple"); + + /* Verify that we've got the right kind of dataspace now */ + space_type = H5Sget_simple_extent_type(sid); + VERIFY(space_type, H5S_SIMPLE, "H5Sget_simple_extent_type"); + + /* Close the dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /*============================================ + * Reopen the file to check the dataspace + *============================================ + */ + fid = H5Fopen(NULLFILE, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Reopen the dataset */ + did = H5Dopen2(fid, NULLDATASET, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen2"); + + /* Get the space of the dataset */ + dset_sid = H5Dget_space(did); + CHECK(dset_sid, FAIL, "H5Dget_space"); + + /* Query the NULL dataspace */ + dset_sid2 = H5Scopy(dset_sid); + CHECK(dset_sid2, FAIL, "H5Scopy"); + + /* Verify the class type of dataspace */ + stype = H5Sget_simple_extent_type(dset_sid2); + VERIFY(stype, H5S_NULL, "H5Sget_simple_extent_type"); + + /* Verify there is zero element in the dataspace */ + ret = (herr_t)H5Sget_simple_extent_npoints(dset_sid2); + VERIFY(ret, 0, "H5Sget_simple_extent_npoints"); + + /* Try reading from the dataset (make certain our buffer is unmodified) */ + ret = H5Dread(did, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &uval); + CHECK(ret, FAIL, "H5Dread"); + VERIFY(uval, 2, "H5Dread"); + + /* Close the dataspace */ + ret = H5Sclose(dset_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(dset_sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Open the attribute for the dataset */ + attr = H5Aopen(did, NULLATTR, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); + + /* Get the space of the dataset */ + attr_sid = H5Aget_space(attr); + CHECK(attr_sid, FAIL, "H5Aget_space"); + + /* Verify the class type of dataspace */ + stype = H5Sget_simple_extent_type(attr_sid); + VERIFY(stype, H5S_NULL, "H5Sget_simple_extent_type"); + + /* Verify there is zero element in the dataspace */ + ret = (herr_t)H5Sget_simple_extent_npoints(attr_sid); + VERIFY(ret, 0, "H5Sget_simple_extent_npoints"); + + /* Close the dataspace */ + ret = H5Sclose(attr_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Try reading from the attribute (make certain our buffer is unmodified) */ + ret = H5Aread(attr, H5T_NATIVE_INT, &val); + CHECK(ret, FAIL, "H5Aread"); + VERIFY(val, 1, "H5Aread"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close the dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_h5s_null() */ + +/**************************************************************** +** +** test_h5s_zero_dim(): Test the code for dataspace with zero dimension size +** +****************************************************************/ +static void +test_h5s_zero_dim(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t sid1, attr_sid; /* Dataspace ID */ + hid_t sid_chunk; /* Dataspace ID for chunked dataset */ + hid_t dset1; /* Dataset ID */ + hid_t plist_id; /* Dataset creation property list */ + hid_t attr; /* Attribute ID */ + int rank; /* Logical rank of dataspace */ + hsize_t dims1[] = {0, SPACE1_DIM2, SPACE1_DIM3}; + hsize_t max_dims[] = {SPACE1_DIM1 + 1, SPACE1_DIM2, SPACE1_DIM3}; + hsize_t extend_dims[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; + hsize_t chunk_dims[] = {SPACE1_DIM1, SPACE1_DIM2 / 3, SPACE1_DIM3}; + hsize_t tdims[SPACE1_RANK]; /* Dimension array to test with */ + int wdata[SPACE1_DIM2][SPACE1_DIM3]; + int rdata[SPACE1_DIM2][SPACE1_DIM3]; + short wdata_short[SPACE1_DIM2][SPACE1_DIM3]; + short rdata_short[SPACE1_DIM2][SPACE1_DIM3]; + int wdata_real[SPACE1_DIM1][SPACE1_DIM2][SPACE1_DIM3]; + int rdata_real[SPACE1_DIM1][SPACE1_DIM2][SPACE1_DIM3]; +#ifndef NO_CHECK_SELECTION_BOUNDS + int val = 3; + hsize_t start[] = {0, 0, 0}; + hsize_t count[] = {3, 15, 13}; + hsize_t coord[1][3]; /* Coordinates for point selection */ +#endif + hssize_t nelem; /* Number of elements */ + H5S_sel_type sel_type; /* Type of selection currently */ + H5S_class_t stype; /* dataspace type */ + H5D_alloc_time_t alloc_time; /* Space allocation time */ + herr_t ret; /* Generic return value */ + unsigned int i, j, k; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Dataspace with zero dimension size\n")); + + /* Initialize the data */ + for (i = 0; i < SPACE1_DIM2; i++) + for (j = 0; j < SPACE1_DIM3; j++) { + wdata[i][j] = (int)(i + j); + rdata[i][j] = 7; + wdata_short[i][j] = (short)(i + j); + rdata_short[i][j] = 7; + } + + for (i = 0; i < SPACE1_DIM1; i++) + for (j = 0; j < SPACE1_DIM2; j++) + for (k = 0; k < SPACE1_DIM3; k++) + wdata_real[i][j][k] = (int)(i + j + k); + + /* Test with different space allocation times */ + for (alloc_time = H5D_ALLOC_TIME_EARLY; alloc_time <= H5D_ALLOC_TIME_INCR; alloc_time++) { + + /* Make sure we can create the space with the dimension size 0 (starting from v1.8.7). + * The dimension doesn't need to be unlimited. */ + dims1[0] = 0; + dims1[1] = SPACE1_DIM2; + dims1[2] = SPACE1_DIM3; + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + sid1 = H5Screate(H5S_SIMPLE); + CHECK(sid1, FAIL, "H5Screate"); + + /* SID1 has the 1st dimension size as zero. The maximal dimension will be + * the same as the dimension because of the NULL passed in. */ + ret = H5Sset_extent_simple(sid1, SPACE1_RANK, dims1, NULL); + CHECK(ret, FAIL, "H5Sset_extent_simple"); + + /* Check that the dataspace actually has 0 elements */ + nelem = H5Sget_simple_extent_npoints(sid1); + VERIFY(nelem, 0, "H5Sget_simple_extent_npoints"); + + /* Check that the dataspace was created with an "all" selection */ + sel_type = H5Sget_select_type(sid1); + VERIFY(sel_type, H5S_SEL_ALL, "H5Sget_select_type"); + + /* Check that the dataspace has 0 elements selected */ + nelem = H5Sget_select_npoints(sid1); + VERIFY(nelem, 0, "H5Sget_select_npoints"); + + /* Change to "none" selection */ + ret = H5Sselect_none(sid1); + CHECK(ret, FAIL, "H5Sselect_none"); + + /* Check that the dataspace has 0 elements selected */ + nelem = H5Sget_select_npoints(sid1); + VERIFY(nelem, 0, "H5Sget_select_npoints"); + + /* Try to select all dataspace */ + ret = H5Sselect_all(sid1); + CHECK(ret, FAIL, "H5Sselect_all"); + + /* Check that the dataspace has 0 elements selected */ + nelem = H5Sget_select_npoints(sid1); + VERIFY(nelem, 0, "H5Sget_select_npoints"); + + /* Create the dataspace for chunked dataset with the first dimension size as zero. + * The maximal dimensions are bigger than the dimensions for later expansion. */ + sid_chunk = H5Screate_simple(SPACE1_RANK, dims1, max_dims); + CHECK(sid_chunk, FAIL, "H5Screate_simple"); + + /*============================================ + * Make sure we can use 0-dimension to create + * contiguous, chunked, compact, and external + * datasets, and also attribute. + *============================================ + */ + fid1 = H5Fcreate(ZEROFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /*===================== Contiguous dataset =======================*/ + plist_id = H5Pcreate(H5P_DATASET_CREATE); + CHECK(plist_id, FAIL, "H5Pcreate"); + + ret = H5Pset_alloc_time(plist_id, alloc_time); + CHECK(ret, FAIL, "H5Pset_alloc_time"); + + dset1 = H5Dcreate2(fid1, BASICDATASET, H5T_NATIVE_INT, sid1, H5P_DEFAULT, plist_id, H5P_DEFAULT); + CHECK(dset1, FAIL, "H5Dcreate2"); + + ret = H5Pclose(plist_id); + CHECK(ret, FAIL, "H5Pclose"); + + /* Write "nothing" to the dataset */ + ret = H5Dwrite(dset1, H5T_NATIVE_INT, sid1, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL); + CHECK(ret, FAIL, "H5Fflush"); + + /* Try reading from the dataset (make certain our buffer is unmodified) */ + ret = H5Dread(dset1, H5T_NATIVE_INT, sid1, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Check results */ + for (i = 0; i < SPACE1_DIM2; i++) { + for (j = 0; j < SPACE1_DIM3; j++) { + if (rdata[i][j] != 7) { + H5_FAILED(); + HDprintf("element [%d][%d] is %d but should have been 7\n", i, j, rdata[i][j]); + } + } + } + + /* Write "nothing" to the dataset (with type conversion :-) */ + ret = H5Dwrite(dset1, H5T_NATIVE_SHORT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata_short); + CHECK(ret, FAIL, "H5Dwrite"); + + ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL); + CHECK(ret, FAIL, "H5Fflush"); + + /* Try reading from the dataset (make certain our buffer is unmodified) */ + ret = H5Dread(dset1, H5T_NATIVE_INT, sid1, H5S_ALL, H5P_DEFAULT, rdata_short); + CHECK(ret, FAIL, "H5Dread"); + + /* Check results */ + for (i = 0; i < SPACE1_DIM2; i++) { + for (j = 0; j < SPACE1_DIM3; j++) { + if (rdata_short[i][j] != 7) { + H5_FAILED(); + HDprintf("element [%d][%d] is %d but should have been 7\n", i, j, rdata_short[i][j]); + } + } + } +#ifndef NO_CHECK_SELECTION_BOUNDS + /* Select a hyperslab beyond its current dimension sizes, then try to write + * the data. It should fail. */ + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + H5E_BEGIN_TRY + { + ret = H5Dwrite(dset1, H5T_NATIVE_INT, H5S_ALL, sid1, H5P_DEFAULT, wdata); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Dwrite"); +#endif + /* Change to "none" selection */ + ret = H5Sselect_none(sid1); + CHECK(ret, FAIL, "H5Sselect_none"); +#ifndef NO_CHECK_SELECTION_BOUNDS + /* Select a point beyond the dimension size, then try to write the data. + * It should fail. */ + coord[0][0] = 2; + coord[0][1] = 5; + coord[0][2] = 3; + ret = H5Sselect_elements(sid1, H5S_SELECT_SET, (size_t)1, (const hsize_t *)coord); + CHECK(ret, FAIL, "H5Sselect_elements"); + + H5E_BEGIN_TRY + { + ret = H5Dwrite(dset1, H5T_NATIVE_INT, H5S_ALL, sid1, H5P_DEFAULT, &val); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Dwrite"); +#endif + /* Restore the selection to all */ + ret = H5Sselect_all(sid1); + CHECK(ret, FAIL, "H5Sselect_all"); + + ret = H5Dclose(dset1); + CHECK(ret, FAIL, "H5Dclose"); + + /*=================== Chunked dataset ====================*/ + plist_id = H5Pcreate(H5P_DATASET_CREATE); + CHECK(plist_id, FAIL, "H5Pcreate"); + + ret = H5Pset_chunk(plist_id, SPACE1_RANK, chunk_dims); + CHECK(ret, FAIL, "H5Pset_chunk"); + + /* ret = H5Pset_alloc_time(plist_id, alloc_time); */ + /* CHECK(ret, FAIL, "H5Pset_alloc_time"); */ + + dset1 = + H5Dcreate2(fid1, BASICDATASET1, H5T_NATIVE_INT, sid_chunk, H5P_DEFAULT, plist_id, H5P_DEFAULT); + CHECK(dset1, FAIL, "H5Dcreate2"); + + /* Write "nothing" to the dataset */ + ret = H5Dwrite(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL); + CHECK(ret, FAIL, "H5Fflush"); + + /* Try reading from the dataset (make certain our buffer is unmodified) */ + ret = H5Dread(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Check results */ + for (i = 0; i < SPACE1_DIM2; i++) + for (j = 0; j < SPACE1_DIM3; j++) { + if (rdata[i][j] != 7) { + H5_FAILED(); + HDprintf("element [%d][%d] is %d but should have been 7\n", i, j, rdata[i][j]); + } + } + + /* Now extend the dataset to SPACE1_DIM1*SPACE1_DIM2*SPACE1_DIM3 and make sure + * we can write data to it */ + extend_dims[0] = SPACE1_DIM1; + ret = H5Dset_extent(dset1, extend_dims); + CHECK(ret, FAIL, "H5Dset_extent"); + + ret = H5Dwrite(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata_real); + CHECK(ret, FAIL, "H5Dwrite"); + + ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL); + CHECK(ret, FAIL, "H5Fflush"); + + ret = H5Dread(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata_real); + CHECK(ret, FAIL, "H5Dread"); + + /* Check results */ + for (i = 0; i < SPACE1_DIM1; i++) { + for (j = 0; j < SPACE1_DIM2; j++) { + for (k = 0; k < SPACE1_DIM3; k++) { + if (rdata_real[i][j][k] != wdata_real[i][j][k]) { + H5_FAILED(); + HDprintf("element [%d][%d][%d] is %d but should have been %d\n", i, j, k, + rdata_real[i][j][k], wdata_real[i][j][k]); + } + } + } + } + + /* Now shrink the first dimension size of the dataset to 0 and make sure no data is in it */ + extend_dims[0] = 0; + ret = H5Dset_extent(dset1, extend_dims); + CHECK(ret, FAIL, "H5Dset_extent"); + + ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL); + CHECK(ret, FAIL, "H5Fflush"); + + /* Try reading from the dataset (make certain our buffer is unmodified) */ + ret = H5Dread(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Check results */ + for (i = 0; i < SPACE1_DIM2; i++) + for (j = 0; j < SPACE1_DIM3; j++) { + if (rdata[i][j] != 7) { + H5_FAILED(); + HDprintf("element [%d][%d] is %d but should have been 7\n", i, j, rdata[i][j]); + } + } +#ifndef NO_CHECK_SELECTION_BOUNDS + /* Now extend the first dimension size of the dataset to SPACE1_DIM1*3 past the maximal size. + * It is supposed to fail. */ + extend_dims[0] = SPACE1_DIM1 * 3; + H5E_BEGIN_TRY + { + ret = H5Dset_extent(dset1, extend_dims); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Dset_extent"); +#endif + ret = H5Pclose(plist_id); + CHECK(ret, FAIL, "H5Pclose"); + + ret = H5Dclose(dset1); + CHECK(ret, FAIL, "H5Dclose"); + + /*=================== Compact dataset =====================*/ + plist_id = H5Pcreate(H5P_DATASET_CREATE); + CHECK(plist_id, FAIL, "H5Pcreate"); + + ret = H5Pset_layout(plist_id, H5D_COMPACT); + CHECK(ret, FAIL, "H5Pset_layout"); + + /* Don't set the allocation time for compact storage datasets (must be early) */ + + dset1 = H5Dcreate2(fid1, BASICDATASET2, H5T_NATIVE_INT, sid1, H5P_DEFAULT, plist_id, H5P_DEFAULT); + CHECK(dset1, FAIL, "H5Dcreate2"); + + /* Write "nothing" to the dataset */ + ret = H5Dwrite(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL); + CHECK(ret, FAIL, "H5Fflush"); + + /* Try reading from the dataset (make certain our buffer is unmodified) */ + ret = H5Dread(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Check results */ + for (i = 0; i < SPACE1_DIM2; i++) + for (j = 0; j < SPACE1_DIM3; j++) { + if (rdata[i][j] != 7) { + H5_FAILED(); + HDprintf("element [%d][%d] is %d but should have been 7\n", i, j, rdata[i][j]); + } + } + + ret = H5Pclose(plist_id); + CHECK(ret, FAIL, "H5Pclose"); + + ret = H5Dclose(dset1); + CHECK(ret, FAIL, "H5Dclose"); + + /*=========== Contiguous dataset with external storage ============*/ + plist_id = H5Pcreate(H5P_DATASET_CREATE); + CHECK(plist_id, FAIL, "H5Pcreate"); + + /* Change the DCPL for contiguous layout with external storage. The size of the reserved + * space in the external file is the size of the dataset (zero because one dimension size is zero). + * There's no need to clean up the external file since the library doesn't create it + * until the data is written to it. */ + ret = H5Pset_external(plist_id, EXTFILE_NAME, (off_t)0, (hsize_t)0); + CHECK(ret, FAIL, "H5Pset_external"); + + ret = H5Pset_alloc_time(plist_id, alloc_time); + CHECK(ret, FAIL, "H5Pset_alloc_time"); + + dset1 = H5Dcreate2(fid1, BASICDATASET3, H5T_NATIVE_INT, sid1, H5P_DEFAULT, plist_id, H5P_DEFAULT); + CHECK(dset1, FAIL, "H5Dcreate2"); + + /* Write "nothing" to the dataset */ + ret = H5Dwrite(dset1, H5T_NATIVE_INT, sid1, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL); + CHECK(ret, FAIL, "H5Fflush"); + + /* Try reading from the dataset (make certain our buffer is unmodified) */ + ret = H5Dread(dset1, H5T_NATIVE_INT, sid1, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Check results */ + for (i = 0; i < SPACE1_DIM2; i++) { + for (j = 0; j < SPACE1_DIM3; j++) { + if (rdata[i][j] != 7) { + H5_FAILED(); + HDprintf("element [%d][%d] is %d but should have been 7\n", i, j, rdata[i][j]); + } + } + } + + ret = H5Pclose(plist_id); + CHECK(ret, FAIL, "H5Pclose"); + + ret = H5Dclose(dset1); + CHECK(ret, FAIL, "H5Dclose"); + + /*=============== Create an attribute for the file ================*/ + attr = H5Acreate2(fid1, NULLATTR, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Acreate2"); + + /* Write "nothing" to the attribute */ + ret = H5Awrite(attr, H5T_NATIVE_INT, wdata); + CHECK(ret, FAIL, "H5Awrite"); + + ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL); + CHECK(ret, FAIL, "H5Fflush"); + + /* Try reading from the attribute (make certain our buffer is unmodified) */ + ret = H5Aread(attr, H5T_NATIVE_INT, rdata); + CHECK(ret, FAIL, "H5Aread"); + + /* Check results */ + for (i = 0; i < SPACE1_DIM2; i++) { + for (j = 0; j < SPACE1_DIM3; j++) { + if (rdata[i][j] != 7) { + H5_FAILED(); + HDprintf("element [%d][%d] is %d but should have been 7\n", i, j, rdata[i][j]); + } + } + } + + /* Write "nothing" to the attribute (with type conversion :-) */ + ret = H5Awrite(attr, H5T_NATIVE_SHORT, wdata_short); + CHECK(ret, FAIL, "H5Awrite"); + + ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL); + CHECK(ret, FAIL, "H5Fflush"); + + /* Try reading from the attribute (with type conversion :-) (make certain our buffer is unmodified) */ + ret = H5Aread(attr, H5T_NATIVE_SHORT, rdata_short); + CHECK(ret, FAIL, "H5Aread"); + + /* Check results */ + for (i = 0; i < SPACE1_DIM2; i++) { + for (j = 0; j < SPACE1_DIM3; j++) { + if (rdata_short[i][j] != 7) { + H5_FAILED(); + HDprintf("element [%d][%d] is %d but should have been 7\n", i, j, rdata_short[i][j]); + } + } + } + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /*=============================================================== + * Extend the dimension to make it a normal dataspace (3x15x13). + * Verify that data can be written to and read from the chunked + * dataset now. + *=============================================================== + */ + dims1[0] = SPACE1_DIM1; + ret = H5Sset_extent_simple(sid_chunk, SPACE1_RANK, dims1, max_dims); + CHECK(ret, FAIL, "H5Sset_extent_simple"); + + nelem = H5Sget_simple_extent_npoints(sid_chunk); + CHECK(nelem, FAIL, "H5Sget_simple_extent_npoints"); + VERIFY(nelem, SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3, "H5Sget_simple_extent_npoints"); + + rank = H5Sget_simple_extent_ndims(sid_chunk); + CHECK(rank, FAIL, "H5Sget_simple_extent_ndims"); + VERIFY(rank, SPACE1_RANK, "H5Sget_simple_extent_ndims"); + + rank = H5Sget_simple_extent_dims(sid_chunk, tdims, NULL); + CHECK(rank, FAIL, "H5Sget_simple_extent_dims"); + VERIFY(HDmemcmp(tdims, dims1, SPACE1_RANK * sizeof(hsize_t)), 0, "H5Sget_simple_extent_dims"); + + /* Set it to chunked dataset */ + plist_id = H5Pcreate(H5P_DATASET_CREATE); + CHECK(plist_id, FAIL, "H5Pcreate"); + + ret = H5Pset_chunk(plist_id, SPACE1_RANK, chunk_dims); + CHECK(ret, FAIL, "H5Pset_chunk"); + + ret = H5Pset_alloc_time(plist_id, alloc_time); + CHECK(ret, FAIL, "H5Pset_alloc_time"); + + dset1 = + H5Dcreate2(fid1, BASICDATASET4, H5T_NATIVE_INT, sid_chunk, H5P_DEFAULT, plist_id, H5P_DEFAULT); + CHECK(dset1, FAIL, "H5Dcreate2"); + + ret = H5Dwrite(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata_real); + CHECK(ret, FAIL, "H5Dwrite"); + + ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL); + CHECK(ret, FAIL, "H5Fflush"); + + ret = H5Dread(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata_real); + CHECK(ret, FAIL, "H5Dread"); + + /* Check results */ + for (i = 0; i < SPACE1_DIM1; i++) { + for (j = 0; j < SPACE1_DIM2; j++) { + for (k = 0; k < SPACE1_DIM3; k++) { + if (rdata_real[i][j][k] != wdata_real[i][j][k]) { + H5_FAILED(); + HDprintf("element [%d][%d][%d] is %d but should have been %d\n", i, j, k, + rdata_real[i][j][k], wdata_real[i][j][k]); + } + } + } + } + + ret = H5Pclose(plist_id); + CHECK(ret, FAIL, "H5Pclose"); + + ret = H5Dclose(dset1); + CHECK(ret, FAIL, "H5Dclose"); + + /* Change the dimensions to make them zero size again (0x0x0). Verify that + * no element is in the dataspace. */ + dims1[0] = dims1[1] = dims1[2] = 0; + ret = H5Sset_extent_simple(sid_chunk, SPACE1_RANK, dims1, NULL); + CHECK(ret, FAIL, "H5Sset_extent_simple"); + + /* Check that the dataspace actually has 0 elements */ + nelem = H5Sget_simple_extent_npoints(sid_chunk); + VERIFY(nelem, 0, "H5Sget_simple_extent_npoints"); + + /* Check that the dataspace was created with an "all" selection */ + sel_type = H5Sget_select_type(sid_chunk); + VERIFY(sel_type, H5S_SEL_ALL, "H5Sget_select_type"); + + /* Check that the dataspace has 0 elements selected */ + nelem = H5Sget_select_npoints(sid_chunk); + VERIFY(nelem, 0, "H5Sget_select_npoints"); + + /* Change to "none" selection */ + ret = H5Sselect_none(sid_chunk); + CHECK(ret, FAIL, "H5Sselect_none"); + + /* Check that the dataspace has 0 elements selected */ + nelem = H5Sget_select_npoints(sid_chunk); + VERIFY(nelem, 0, "H5Sget_select_npoints"); + + ret = H5Sclose(sid_chunk); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /*============================================ + * Reopen the file to check the dataspace + *============================================ + */ + fid1 = H5Fopen(ZEROFILE, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Reopen the chunked dataset */ + dset1 = H5Dopen2(fid1, BASICDATASET1, H5P_DEFAULT); + CHECK(dset1, FAIL, "H5Dopen2"); + + /* Get the space of the dataset and query it */ + sid1 = H5Dget_space(dset1); + CHECK(sid1, FAIL, "H5Dget_space"); + + /* Verify the class type of dataspace */ + stype = H5Sget_simple_extent_type(sid1); + VERIFY(stype, H5S_SIMPLE, "H5Sget_simple_extent_type"); + + /* Verify there is zero element in the dataspace */ + nelem = H5Sget_simple_extent_npoints(sid1); + VERIFY(nelem, 0, "H5Sget_simple_extent_npoints"); + + /* Verify the dimension sizes are correct */ + rank = H5Sget_simple_extent_dims(sid1, tdims, NULL); + CHECK(rank, FAIL, "H5Sget_simple_extent_dims"); + VERIFY(tdims[0], 0, "H5Sget_simple_extent_dims"); + VERIFY(tdims[1], SPACE1_DIM2, "H5Sget_simple_extent_dims"); + VERIFY(tdims[2], SPACE1_DIM3, "H5Sget_simple_extent_dims"); + + /* Try reading from the dataset (make certain our buffer is unmodified) */ + ret = H5Dread(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Check results */ + for (i = 0; i < SPACE1_DIM2; i++) { + for (j = 0; j < SPACE1_DIM3; j++) { + if (rdata[i][j] != 7) { + H5_FAILED(); + HDprintf("element [%d][%d] is %d but should have been 7\n", i, j, rdata[i][j]); + } + } + } + + /* Close the dataset and its dataspace */ + ret = H5Dclose(dset1); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Open the attribute for the file */ + attr = H5Aopen(fid1, NULLATTR, H5P_DEFAULT); + CHECK(attr, FAIL, "H5Aopen"); + + /* Get the space of the dataset */ + attr_sid = H5Aget_space(attr); + CHECK(attr_sid, FAIL, "H5Aget_space"); + + /* Verify the class type of dataspace */ + stype = H5Sget_simple_extent_type(attr_sid); + VERIFY(stype, H5S_SIMPLE, "H5Sget_simple_extent_type"); + + /* Verify there is zero element in the dataspace */ + nelem = H5Sget_simple_extent_npoints(attr_sid); + VERIFY(nelem, 0, "H5Sget_simple_extent_npoints"); + + /* Try reading from the attribute (make certain our buffer is unmodified) */ + ret = H5Aread(attr, H5T_NATIVE_SHORT, rdata_short); + CHECK(ret, FAIL, "H5Aread"); + + /* Check results */ + for (i = 0; i < SPACE1_DIM2; i++) { + for (j = 0; j < SPACE1_DIM3; j++) { + if (rdata_short[i][j] != 7) { + H5_FAILED(); + HDprintf("element [%d][%d] is %d but should have been 7\n", i, j, rdata_short[i][j]); + } + } + } + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close the dataspace */ + ret = H5Sclose(attr_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + } /* end for */ +} /* test_h5s_zero_dim() */ + +/**************************************************************** +** +** test_h5s_encode(): Test H5S (dataspace) encoding and decoding. +** +** Note: See "RFC: H5Sencode/H5Sdecode Format Change". +** +****************************************************************/ +static void +test_h5s_encode(H5F_libver_t low, H5F_libver_t high) +{ + hid_t sid1, sid2, sid3; /* Dataspace ID */ + hid_t decoded_sid1, decoded_sid2, decoded_sid3; + int rank; /* Logical rank of dataspace */ + hid_t fapl = -1; /* File access property list ID */ + hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; + size_t sbuf_size = 0, null_size = 0, scalar_size = 0; + unsigned char *sbuf = NULL, *null_sbuf = NULL, *scalar_buf = NULL; + hsize_t tdims[4]; /* Dimension array to test with */ + hssize_t n; /* Number of dataspace elements */ + hsize_t start[] = {0, 0, 0}; + hsize_t stride[] = {2, 5, 3}; + hsize_t count[] = {2, 2, 2}; + hsize_t block[] = {1, 3, 1}; + H5S_sel_type sel_type; + H5S_class_t space_type; + hssize_t nblocks; + hid_t ret_id; /* Generic hid_t return value */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Dataspace Encoding and Decoding\n")); + + /*------------------------------------------------------------------------- + * Test encoding and decoding of simple dataspace and hyperslab selection. + *------------------------------------------------------------------------- + */ + + /* Create the file access property list */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + + /* Set low/high bounds in the fapl */ + ret = H5Pset_libver_bounds(fapl, low, high); + CHECK(ret, FAIL, "H5Pset_libver_bounds"); + + /* Create the dataspace */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Set the hyperslab selection */ + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Encode simple dataspace in a buffer with the fapl setting */ + ret = H5Sencode2(sid1, NULL, &sbuf_size, fapl); + CHECK(ret, FAIL, "H5Sencode2"); + + if (sbuf_size > 0) { + sbuf = (unsigned char *)HDcalloc((size_t)1, sbuf_size); + CHECK_PTR(sbuf, "HDcalloc"); + } + + /* Try decoding bogus buffer */ + H5E_BEGIN_TRY + { + ret_id = H5Sdecode(sbuf); + } + H5E_END_TRY; + VERIFY(ret_id, FAIL, "H5Sdecode"); + + /* Encode the simple dataspace in a buffer with the fapl setting */ + ret = H5Sencode2(sid1, sbuf, &sbuf_size, fapl); + CHECK(ret, FAIL, "H5Sencode"); + + /* Decode from the dataspace buffer and return an object handle */ + decoded_sid1 = H5Sdecode(sbuf); + CHECK(decoded_sid1, FAIL, "H5Sdecode"); + + /* Verify the decoded dataspace */ + n = H5Sget_simple_extent_npoints(decoded_sid1); + CHECK(n, FAIL, "H5Sget_simple_extent_npoints"); + VERIFY(n, SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3, "H5Sget_simple_extent_npoints"); + + /* Retrieve and verify the dataspace rank */ + rank = H5Sget_simple_extent_ndims(decoded_sid1); + CHECK(rank, FAIL, "H5Sget_simple_extent_ndims"); + VERIFY(rank, SPACE1_RANK, "H5Sget_simple_extent_ndims"); + + /* Retrieve and verify the dataspace dimensions */ + rank = H5Sget_simple_extent_dims(decoded_sid1, tdims, NULL); + CHECK(rank, FAIL, "H5Sget_simple_extent_dims"); + VERIFY(HDmemcmp(tdims, dims1, SPACE1_RANK * sizeof(hsize_t)), 0, "H5Sget_simple_extent_dims"); + + /* Verify the type of dataspace selection */ + sel_type = H5Sget_select_type(decoded_sid1); + VERIFY(sel_type, H5S_SEL_HYPERSLABS, "H5Sget_select_type"); + + /* Verify the number of hyperslab blocks */ + nblocks = H5Sget_select_hyper_nblocks(decoded_sid1); + VERIFY(nblocks, 2 * 2 * 2, "H5Sget_select_hyper_nblocks"); + + /* Close the dataspaces */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(decoded_sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /*------------------------------------------------------------------------- + * Test encoding and decoding of null dataspace. + *------------------------------------------------------------------------- + */ + sid2 = H5Screate(H5S_NULL); + CHECK(sid2, FAIL, "H5Screate"); + + /* Encode null dataspace in a buffer */ + ret = H5Sencode2(sid2, NULL, &null_size, fapl); + CHECK(ret, FAIL, "H5Sencode"); + + if (null_size > 0) { + null_sbuf = (unsigned char *)HDcalloc((size_t)1, null_size); + CHECK_PTR(null_sbuf, "HDcalloc"); + } + + /* Encode the null dataspace in the buffer */ + ret = H5Sencode2(sid2, null_sbuf, &null_size, fapl); + CHECK(ret, FAIL, "H5Sencode2"); + + /* Decode from the dataspace buffer and return an object handle */ + decoded_sid2 = H5Sdecode(null_sbuf); + CHECK(decoded_sid2, FAIL, "H5Sdecode"); + + /* Verify the decoded dataspace type */ + space_type = H5Sget_simple_extent_type(decoded_sid2); + VERIFY(space_type, H5S_NULL, "H5Sget_simple_extent_type"); + + /* Close the dataspaces */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(decoded_sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /*------------------------------------------------------------------------- + * Test encoding and decoding of scalar dataspace. + *------------------------------------------------------------------------- + */ + /* Create scalar dataspace */ + sid3 = H5Screate(H5S_SCALAR); + CHECK(sid3, FAIL, "H5Screate_simple"); + + /* Encode scalar dataspace in a buffer */ + ret = H5Sencode2(sid3, NULL, &scalar_size, fapl); + CHECK(ret, FAIL, "H5Sencode"); + + if (scalar_size > 0) { + scalar_buf = (unsigned char *)HDcalloc((size_t)1, scalar_size); + CHECK_PTR(scalar_buf, "HDcalloc"); + } + + /* Encode the scalar dataspace in the buffer */ + ret = H5Sencode2(sid3, scalar_buf, &scalar_size, fapl); + CHECK(ret, FAIL, "H5Sencode2"); + + /* Decode from the dataspace buffer and return an object handle */ + decoded_sid3 = H5Sdecode(scalar_buf); + CHECK(decoded_sid3, FAIL, "H5Sdecode"); + + /* Verify extent type */ + space_type = H5Sget_simple_extent_type(decoded_sid3); + VERIFY(space_type, H5S_SCALAR, "H5Sget_simple_extent_type"); + + /* Verify decoded dataspace */ + n = H5Sget_simple_extent_npoints(decoded_sid3); + CHECK(n, FAIL, "H5Sget_simple_extent_npoints"); + VERIFY(n, 1, "H5Sget_simple_extent_npoints"); + + /* Retrieve and verify the dataspace rank */ + rank = H5Sget_simple_extent_ndims(decoded_sid3); + CHECK(rank, FAIL, "H5Sget_simple_extent_ndims"); + VERIFY(rank, 0, "H5Sget_simple_extent_ndims"); + + /* Close the dataspaces */ + ret = H5Sclose(sid3); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(decoded_sid3); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close the file access property list */ + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Release resources */ + if (sbuf) + HDfree(sbuf); + if (null_sbuf) + HDfree(null_sbuf); + if (scalar_buf) + HDfree(scalar_buf); +} /* test_h5s_encode() */ + +#ifndef H5_NO_DEPRECATED_SYMBOLS + +/**************************************************************** +** +** test_h5s_encode(): Test H5S (dataspace) encoding and decoding. +** +****************************************************************/ +static void +test_h5s_encode1(void) +{ + hid_t sid1, sid2, sid3; /* Dataspace ID */ + hid_t decoded_sid1, decoded_sid2, decoded_sid3; + int rank; /* Logical rank of dataspace */ + hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; + size_t sbuf_size = 0, null_size = 0, scalar_size = 0; + unsigned char *sbuf = NULL, *null_sbuf = NULL, *scalar_buf = NULL; + hsize_t tdims[4]; /* Dimension array to test with */ + hssize_t n; /* Number of dataspace elements */ + hsize_t start[] = {0, 0, 0}; + hsize_t stride[] = {2, 5, 3}; + hsize_t count[] = {2, 2, 2}; + hsize_t block[] = {1, 3, 1}; + H5S_sel_type sel_type; + H5S_class_t space_type; + hssize_t nblocks; + hid_t ret_id; /* Generic hid_t return value */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Dataspace Encoding (H5Sencode1) and Decoding\n")); + + /*------------------------------------------------------------------------- + * Test encoding and decoding of simple dataspace and hyperslab selection. + *------------------------------------------------------------------------- + */ + /* Create the dataspace */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Set the hyperslab selection */ + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Encode simple dataspace in a buffer with the fapl setting */ + ret = H5Sencode1(sid1, NULL, &sbuf_size); + CHECK(ret, FAIL, "H5Sencode2"); + + if (sbuf_size > 0) { + sbuf = (unsigned char *)HDcalloc((size_t)1, sbuf_size); + CHECK_PTR(sbuf, "HDcalloc"); + } + + /* Try decoding bogus buffer */ + H5E_BEGIN_TRY + { + ret_id = H5Sdecode(sbuf); + } + H5E_END_TRY; + VERIFY(ret_id, FAIL, "H5Sdecode"); + + /* Encode the simple dataspace in a buffer */ + ret = H5Sencode1(sid1, sbuf, &sbuf_size); + CHECK(ret, FAIL, "H5Sencode"); + + /* Decode from the dataspace buffer and return an object handle */ + decoded_sid1 = H5Sdecode(sbuf); + CHECK(decoded_sid1, FAIL, "H5Sdecode"); + + /* Verify the decoded dataspace */ + n = H5Sget_simple_extent_npoints(decoded_sid1); + CHECK(n, FAIL, "H5Sget_simple_extent_npoints"); + VERIFY(n, SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3, "H5Sget_simple_extent_npoints"); + + /* Retrieve and verify the dataspace rank */ + rank = H5Sget_simple_extent_ndims(decoded_sid1); + CHECK(rank, FAIL, "H5Sget_simple_extent_ndims"); + VERIFY(rank, SPACE1_RANK, "H5Sget_simple_extent_ndims"); + + /* Retrieve and verify the dataspace dimensions */ + rank = H5Sget_simple_extent_dims(decoded_sid1, tdims, NULL); + CHECK(rank, FAIL, "H5Sget_simple_extent_dims"); + VERIFY(HDmemcmp(tdims, dims1, SPACE1_RANK * sizeof(hsize_t)), 0, "H5Sget_simple_extent_dims"); + + /* Verify the type of dataspace selection */ + sel_type = H5Sget_select_type(decoded_sid1); + VERIFY(sel_type, H5S_SEL_HYPERSLABS, "H5Sget_select_type"); + + /* Verify the number of hyperslab blocks */ + nblocks = H5Sget_select_hyper_nblocks(decoded_sid1); + VERIFY(nblocks, 2 * 2 * 2, "H5Sget_select_hyper_nblocks"); + + /* Close the dataspaces */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(decoded_sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /*------------------------------------------------------------------------- + * Test encoding and decoding of null dataspace. + *------------------------------------------------------------------------- + */ + sid2 = H5Screate(H5S_NULL); + CHECK(sid2, FAIL, "H5Screate"); + + /* Encode null dataspace in a buffer */ + ret = H5Sencode1(sid2, NULL, &null_size); + CHECK(ret, FAIL, "H5Sencode"); + + if (null_size > 0) { + null_sbuf = (unsigned char *)HDcalloc((size_t)1, null_size); + CHECK_PTR(null_sbuf, "HDcalloc"); + } + + /* Encode the null dataspace in the buffer */ + ret = H5Sencode1(sid2, null_sbuf, &null_size); + CHECK(ret, FAIL, "H5Sencode2"); + + /* Decode from the dataspace buffer and return an object handle */ + decoded_sid2 = H5Sdecode(null_sbuf); + CHECK(decoded_sid2, FAIL, "H5Sdecode"); + + /* Verify the decoded dataspace type */ + space_type = H5Sget_simple_extent_type(decoded_sid2); + VERIFY(space_type, H5S_NULL, "H5Sget_simple_extent_type"); + + /* Close the dataspaces */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(decoded_sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /*------------------------------------------------------------------------- + * Test encoding and decoding of scalar dataspace. + *------------------------------------------------------------------------- + */ + /* Create scalar dataspace */ + sid3 = H5Screate(H5S_SCALAR); + CHECK(sid3, FAIL, "H5Screate"); + + /* Encode scalar dataspace in a buffer */ + ret = H5Sencode1(sid3, NULL, &scalar_size); + CHECK(ret, FAIL, "H5Sencode"); + + if (scalar_size > 0) { + scalar_buf = (unsigned char *)HDcalloc((size_t)1, scalar_size); + CHECK_PTR(scalar_buf, "HDcalloc"); + } + + /* Encode the scalar dataspace in the buffer */ + ret = H5Sencode1(sid3, scalar_buf, &scalar_size); + CHECK(ret, FAIL, "H5Sencode2"); + + /* Decode from the dataspace buffer and return an object handle */ + decoded_sid3 = H5Sdecode(scalar_buf); + CHECK(decoded_sid3, FAIL, "H5Sdecode"); + + /* Verify extent type */ + space_type = H5Sget_simple_extent_type(decoded_sid3); + VERIFY(space_type, H5S_SCALAR, "H5Sget_simple_extent_type"); + + /* Verify decoded dataspace */ + n = H5Sget_simple_extent_npoints(decoded_sid3); + CHECK(n, FAIL, "H5Sget_simple_extent_npoints"); + VERIFY(n, 1, "H5Sget_simple_extent_npoints"); + + /* Retrieve and verify the dataspace rank */ + rank = H5Sget_simple_extent_ndims(decoded_sid3); + CHECK(rank, FAIL, "H5Sget_simple_extent_ndims"); + VERIFY(rank, 0, "H5Sget_simple_extent_ndims"); + + /* Close the dataspaces */ + ret = H5Sclose(sid3); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(decoded_sid3); + CHECK(ret, FAIL, "H5Sclose"); + + /* Release resources */ + if (sbuf) + HDfree(sbuf); + if (null_sbuf) + HDfree(null_sbuf); + if (scalar_buf) + HDfree(scalar_buf); +} /* test_h5s_encode1() */ + +#endif /* H5_NO_DEPRECATED_SYMBOLS */ + +/**************************************************************** +** +** test_h5s_check_encoding(): +** This is the helper routine to verify that H5Sencode2() +** works as specified in the RFC for the library format setting +** in the file access property list. +** See "RFC: H5Sencode/H5Sdeocde Format Change". +** +** This routine is used by: +** test_h5s_encode_regular_hyper() +** test_h5s_encode_irregular_hyper() +** test_h5s_encode_points() +** +****************************************************************/ +static herr_t +test_h5s_check_encoding(hid_t in_fapl, hid_t in_sid, uint32_t expected_version, uint8_t expected_enc_size, + hbool_t expected_to_fail) +{ + char *buf = NULL; /* Pointer to the encoded buffer */ + size_t buf_size; /* Size of the encoded buffer */ + hid_t d_sid = -1; /* The decoded dataspace ID */ + htri_t check; + hsize_t in_low_bounds[1]; /* The low bounds for the selection for in_sid */ + hsize_t in_high_bounds[1]; /* The high bounds for the selection for in_sid */ + hsize_t d_low_bounds[1]; /* The low bounds for the selection for d_sid */ + hsize_t d_high_bounds[1]; /* The high bounds for the selection for d_sid */ + herr_t ret; /* Return value */ + + /* Get buffer size for encoding with the format setting in in_fapl */ + H5E_BEGIN_TRY + { + ret = H5Sencode2(in_sid, NULL, &buf_size, in_fapl); + } + H5E_END_TRY + + if (expected_to_fail) { + VERIFY(ret, FAIL, "H5Screate_simple"); + } + else { + + CHECK(ret, FAIL, "H5Sencode2"); + + /* Allocate the buffer for encoding */ + buf = (char *)HDmalloc(buf_size); + CHECK_PTR(buf, "HDmalloc"); + + /* Encode according to the setting in in_fapl */ + ret = H5Sencode2(in_sid, buf, &buf_size, in_fapl); + CHECK(ret, FAIL, "H5Sencode2"); + + /* Decode the buffer */ + d_sid = H5Sdecode(buf); + CHECK(d_sid, FAIL, "H5Sdecode"); + + /* Verify the number of selected points for in_sid and d_sid */ + VERIFY(H5Sget_select_npoints(in_sid), H5Sget_select_npoints(d_sid), "Compare npoints"); + + /* Verify if the two dataspace selections (in_sid, d_sid) are the same shape */ + check = H5Sselect_shape_same(in_sid, d_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Compare the starting/ending coordinates of the bounding box for in_sid and d_sid */ + ret = H5Sget_select_bounds(in_sid, in_low_bounds, in_high_bounds); + CHECK(ret, FAIL, "H5Sget_select_bounds"); + ret = H5Sget_select_bounds(d_sid, d_low_bounds, d_high_bounds); + CHECK(ret, FAIL, "H5Sget_select_bounds"); + VERIFY(in_low_bounds[0], d_low_bounds[0], "Compare selection low bounds"); + VERIFY(in_high_bounds[0], d_high_bounds[0], "Compare selection high bounds"); + + /* + * See "RFC: H5Sencode/H5Sdeocde Format Change" for the verification of: + * H5S_SEL_POINTS: + * --the expected version for point selection info + * --the expected encoded size (version 2 points selection info) + * H5S_SEL_HYPERSLABS: + * --the expected version for hyperslab selection info + * --the expected encoded size (version 3 hyperslab selection info) + */ + + if (H5Sget_select_type(in_sid) == H5S_SEL_POINTS) { + + /* Verify the version */ + VERIFY((uint32_t)buf[35], expected_version, "Version for point selection"); + + /* Verify the encoded size for version 2 */ + if (expected_version == 2) + VERIFY((uint8_t)buf[39], expected_enc_size, "Encoded size of point selection info"); + } + + if (H5Sget_select_type(in_sid) == H5S_SEL_HYPERSLABS) { + + /* Verify the version */ + VERIFY((uint32_t)buf[35], expected_version, "Version for hyperslab selection info"); + + /* Verify the encoded size for version 3 */ + if (expected_version == 3) + VERIFY((uint8_t)buf[40], expected_enc_size, "Encoded size of selection info"); + + } /* hyperslab selection */ + + ret = H5Sclose(d_sid); + CHECK(ret, FAIL, "H5Sclose"); + if (buf) + HDfree(buf); + } + + return (0); + +} /* test_h5s_check_encoding */ + +/**************************************************************** +** +** test_h5s_encode_regular_hyper(): +** This test verifies that H5Sencode2() works as specified in +** the RFC for regular hyperslabs. +** See "RFC: H5Sencode/H5Sdeocde Format Change". +** +****************************************************************/ +static void +test_h5s_encode_regular_hyper(H5F_libver_t low, H5F_libver_t high) +{ + hid_t fapl = -1; /* File access property list ID */ + hid_t sid = -1; /* Dataspace ID */ + hsize_t numparticles = 8388608; /* Used to calculate dimension size */ + unsigned num_dsets = 513; /* Used to calculate dimension size */ + hsize_t total_particles = numparticles * num_dsets; + hsize_t vdsdims[1] = {total_particles}; /* Dimension size */ + hsize_t start, stride, count, block; /* Selection info */ + unsigned config; /* Testing configuration */ + unsigned unlim; /* H5S_UNLIMITED setting or not */ + herr_t ret; /* Generic return value */ + uint32_t expected_version = 0; /* Expected version for selection info */ + uint8_t expected_enc_size = 0; /* Expected encoded size for selection info */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Dataspace encoding of regular hyperslabs\n")); + + /* Create the file access property list */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + + /* Set the low/high bounds in the fapl */ + ret = H5Pset_libver_bounds(fapl, low, high); + CHECK(ret, FAIL, "H5Pset_libver_bounds"); + + /* Create the dataspace */ + sid = H5Screate_simple(1, vdsdims, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Testing with each configuration */ + for (config = CONFIG_16; config <= CONFIG_32; config++) { + hbool_t expected_to_fail = FALSE; + + /* Testing with unlimited or not */ + for (unlim = 0; unlim <= 1; unlim++) { + start = 0; + count = unlim ? H5S_UNLIMITED : 2; + + if ((high <= H5F_LIBVER_V18) && (unlim || config == CONFIG_32)) + expected_to_fail = TRUE; + + if (low >= H5F_LIBVER_V112) + expected_version = 3; + else if (config == CONFIG_16 && !unlim) + expected_version = 1; + else + expected_version = 2; + + /* test 1 */ + switch (config) { + case CONFIG_16: + stride = POWER16 - 1; + block = 4; + expected_enc_size = (uint8_t)(expected_version == 3 ? 2 : 4); + break; + case CONFIG_32: + stride = POWER32 - 1; + block = 4; + expected_enc_size = (uint8_t)(expected_version == 3 ? 4 : 8); + + break; + default: + HDassert(0); + break; + } /* end switch */ + + /* Set the hyperslab selection */ + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, &start, &stride, &count, &block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Verify the version and encoded size expected for this configuration */ + ret = test_h5s_check_encoding(fapl, sid, expected_version, expected_enc_size, expected_to_fail); + CHECK(ret, FAIL, "test_h5s_check_encoding"); + + /* test 2 */ + switch (config) { + case CONFIG_16: + stride = POWER16 - 1; + block = POWER16 - 2; + expected_enc_size = (uint8_t)(expected_version == 3 ? 2 : 4); + break; + case CONFIG_32: + stride = POWER32 - 1; + block = POWER32 - 2; + expected_enc_size = (uint8_t)(expected_version == 3 ? 4 : 8); + break; + default: + HDassert(0); + break; + } /* end switch */ + + /* Set the hyperslab selection */ + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, &start, &stride, &count, &block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Verify the version and encoded size for this configuration */ + ret = test_h5s_check_encoding(fapl, sid, expected_version, expected_enc_size, expected_to_fail); + CHECK(ret, FAIL, "test_h5s_check_encoding"); + + /* test 3 */ + switch (config) { + case CONFIG_16: + stride = POWER16 - 1; + block = POWER16 - 1; + expected_enc_size = 4; + break; + case CONFIG_32: + stride = POWER32 - 1; + block = POWER32 - 1; + expected_enc_size = 8; + break; + default: + HDassert(0); + break; + } + + /* Set the hyperslab selection */ + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, &start, &stride, &count, &block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Verify the version and encoded size expected for this configuration */ + ret = test_h5s_check_encoding(fapl, sid, expected_version, expected_enc_size, expected_to_fail); + CHECK(ret, FAIL, "test_h5s_check_encoding"); + + /* test 4 */ + switch (config) { + case CONFIG_16: + stride = POWER16; + block = POWER16 - 2; + expected_enc_size = 4; + break; + case CONFIG_32: + stride = POWER32; + block = POWER32 - 2; + expected_enc_size = 8; + break; + default: + HDassert(0); + break; + } /* end switch */ + + /* Set the hyperslab selection */ + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, &start, &stride, &count, &block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Verify the version and encoded size expected for this configuration */ + ret = test_h5s_check_encoding(fapl, sid, expected_version, expected_enc_size, expected_to_fail); + CHECK(ret, FAIL, "test_h5s_check_encoding"); + + /* test 5 */ + switch (config) { + case CONFIG_16: + stride = POWER16; + block = 1; + expected_enc_size = 4; + break; + case CONFIG_32: + stride = POWER32; + block = 1; + expected_enc_size = 8; + break; + default: + HDassert(0); + break; + } + + /* Set the hyperslab selection */ + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, &start, &stride, &count, &block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Verify the version and encoded size expected for this configuration */ + ret = test_h5s_check_encoding(fapl, sid, expected_version, expected_enc_size, expected_to_fail); + CHECK(ret, FAIL, "test_h5s_check_encoding"); + + } /* for unlim */ + } /* for config */ + + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + +} /* test_h5s_encode_regular_hyper() */ + +/**************************************************************** +** +** test_h5s_encode_irregular_hyper(): +** This test verifies that H5Sencode2() works as specified in +** the RFC for irregular hyperslabs. +** See "RFC: H5Sencode/H5Sdeocde Format Change". +** +****************************************************************/ +static void +test_h5s_encode_irregular_hyper(H5F_libver_t low, H5F_libver_t high) +{ + hid_t fapl = -1; /* File access property list ID */ + hid_t sid; /* Dataspace ID */ + hsize_t numparticles = 8388608; /* Used to calculate dimension size */ + unsigned num_dsets = 513; /* Used to calculate dimension size */ + hsize_t total_particles = numparticles * num_dsets; + hsize_t vdsdims[1] = {total_particles}; /* Dimension size */ + hsize_t start, stride, count, block; /* Selection info */ + htri_t is_regular; /* Is this a regular hyperslab */ + unsigned config; /* Testing configuration */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Dataspace encoding of irregular hyperslabs\n")); + + /* Create the file access property list */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + + /* Set the low/high bounds in the fapl */ + ret = H5Pset_libver_bounds(fapl, low, high); + CHECK(ret, FAIL, "H5Pset_libver_bounds"); + + /* Create the dataspace */ + sid = H5Screate_simple(1, vdsdims, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Testing with each configuration */ + for (config = CONFIG_8; config <= CONFIG_32; config++) { + hbool_t expected_to_fail = FALSE; /* Whether H5Sencode2 is expected to fail */ + uint32_t expected_version = 0; /* Expected version for selection info */ + uint32_t expected_enc_size = 0; /* Expected encoded size for selection info */ + + start = 0; + count = 2; + block = 4; + + /* H5Sencode2 is expected to fail for library v110 and below + when the selection exceeds the 32 bits integer limit */ + if (high <= H5F_LIBVER_V110 && config == CONFIG_32) + expected_to_fail = TRUE; + + if (low >= H5F_LIBVER_V112 || config == CONFIG_32) + expected_version = 3; + else + expected_version = 1; + + switch (config) { + case CONFIG_8: + stride = POWER8 - 2; + break; + + case CONFIG_16: + stride = POWER16 - 2; + break; + + case CONFIG_32: + stride = POWER32 - 2; + break; + + default: + HDassert(0); + break; + } + + /* Set the hyperslab selection */ + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, &start, &stride, &count, &block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start = 8; + count = 5; + block = 2; + + switch (config) { + case CONFIG_8: + stride = POWER8; + expected_enc_size = expected_version == 3 ? 2 : 4; + break; + + case CONFIG_16: + stride = POWER16; + expected_enc_size = 4; + break; + + case CONFIG_32: + stride = POWER32; + expected_enc_size = 8; + break; + + default: + HDassert(0); + break; + } + + /* Set the hyperslab selection */ + ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, &start, &stride, &count, &block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Should be irregular hyperslab */ + is_regular = H5Sis_regular_hyperslab(sid); + VERIFY(is_regular, FALSE, "H5Sis_regular_hyperslab"); + + /* Verify the version and encoded size expected for the configuration */ + HDassert(expected_enc_size <= 255); + ret = test_h5s_check_encoding(fapl, sid, expected_version, (uint8_t)expected_enc_size, + expected_to_fail); + CHECK(ret, FAIL, "test_h5s_check_encoding"); + + } /* for config */ + + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + +} /* test_h5s_encode_irregular_hyper() */ + +/**************************************************************** +** +** test_h5s_encode_points(): +** This test verifies that H5Sencode2() works as specified in +** the RFC for point selection. +** See "RFC: H5Sencode/H5Sdeocde Format Change". +** +****************************************************************/ +static void +test_h5s_encode_points(H5F_libver_t low, H5F_libver_t high) +{ + hid_t fapl = -1; /* File access property list ID */ + hid_t sid; /* Dataspace ID */ + hsize_t numparticles = 8388608; /* Used to calculate dimension size */ + unsigned num_dsets = 513; /* used to calculate dimension size */ + hsize_t total_particles = numparticles * num_dsets; + hsize_t vdsdims[1] = {total_particles}; /* Dimension size */ + hsize_t coord[4]; /* The point coordinates */ + herr_t ret; /* Generic return value */ + hbool_t expected_to_fail = FALSE; /* Expected to fail or not */ + uint32_t expected_version = 0; /* Expected version for selection info */ + uint8_t expected_enc_size = 0; /* Expected encoded size of selection info */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Dataspace encoding of points selection\n")); + + /* Create the file access property list */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + + /* Set the low/high bounds in the fapl */ + ret = H5Pset_libver_bounds(fapl, low, high); + CHECK(ret, FAIL, "H5Pset_libver_bounds"); + + /* Create the dataspace */ + sid = H5Screate_simple(1, vdsdims, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* test 1 */ + coord[0] = 5; + coord[1] = 15; + coord[2] = POWER16; + coord[3] = 19; + ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)4, coord); + CHECK(ret, FAIL, "H5Sselect_elements"); + + expected_to_fail = FALSE; + expected_enc_size = 4; + expected_version = 1; + + if (low >= H5F_LIBVER_V112) + expected_version = 2; + + /* Verify the version and encoded size expected for the configuration */ + ret = test_h5s_check_encoding(fapl, sid, expected_version, expected_enc_size, expected_to_fail); + CHECK(ret, FAIL, "test_h5s_check_encoding"); + + /* test 2 */ + coord[0] = 5; + coord[1] = 15; + coord[2] = POWER32 - 1; + coord[3] = 19; + ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)4, coord); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Expected result same as test 1 */ + ret = test_h5s_check_encoding(fapl, sid, expected_version, expected_enc_size, expected_to_fail); + CHECK(ret, FAIL, "test_h5s_check_encoding"); + + /* test 3 */ + if (high <= H5F_LIBVER_V110) + expected_to_fail = TRUE; + + if (high >= H5F_LIBVER_V112) { + expected_version = 2; + expected_enc_size = 8; + } + + coord[0] = 5; + coord[1] = 15; + coord[2] = POWER32 + 1; + coord[3] = 19; + ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)4, coord); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Verify the version and encoded size expected for the configuration */ + ret = test_h5s_check_encoding(fapl, sid, expected_version, expected_enc_size, expected_to_fail); + CHECK(ret, FAIL, "test_h5s_check_encoding"); + + /* Close the dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + +} /* test_h5s_encode_points() */ + +/**************************************************************** +** +** test_h5s_encode_length(): +** Test to verify HDFFV-10271 is fixed. +** Verify that version 2 hyperslab encoding length is correct. +** +** See "RFC: H5Sencode/H5Sdecode Format Change" for the +** description of the encoding format. +** +****************************************************************/ +static void +test_h5s_encode_length(void) +{ + hid_t sid; /* Dataspace ID */ + hid_t decoded_sid; /* Dataspace ID from H5Sdecode2 */ + size_t sbuf_size = 0; /* Buffer size for H5Sencode2/1 */ + unsigned char *sbuf = NULL; /* Buffer for H5Sencode2/1 */ + hsize_t dims[1] = {500}; /* Dimension size */ + hsize_t start, count, block, stride; /* Hyperslab selection specifications */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Version 2 Hyperslab Encoding Length is correct\n")); + + /* Create dataspace */ + sid = H5Screate_simple(1, dims, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Setting H5S_UNLIMITED in count will use version 2 for hyperslab encoding */ + start = 0; + stride = 10; + block = 4; + count = H5S_UNLIMITED; + + /* Set hyperslab selection */ + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, &start, &stride, &count, &block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Encode simple dataspace in a buffer */ + ret = H5Sencode2(sid, NULL, &sbuf_size, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Sencode"); + + /* Allocate the buffer */ + if (sbuf_size > 0) { + sbuf = (unsigned char *)HDcalloc((size_t)1, sbuf_size); + CHECK_PTR(sbuf, "H5Sencode2"); + } + + /* Encode the dataspace */ + ret = H5Sencode2(sid, sbuf, &sbuf_size, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Sencode"); + + /* Verify that length stored at this location in the buffer is correct */ + VERIFY((uint32_t)sbuf[40], 36, "Length for encoding version 2"); + VERIFY((uint32_t)sbuf[35], 2, "Hyperslab encoding version is 2"); + + /* Decode from the dataspace buffer and return an object handle */ + decoded_sid = H5Sdecode(sbuf); + CHECK(decoded_sid, FAIL, "H5Sdecode"); + + /* Verify that the original and the decoded dataspace are equal */ + VERIFY(H5Sget_select_npoints(sid), H5Sget_select_npoints(decoded_sid), "Compare npoints"); + + /* Close the decoded dataspace */ + ret = H5Sclose(decoded_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Free the buffer */ + if (sbuf) + HDfree(sbuf); + + /* Close the original dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + +} /* test_h5s_encode_length() */ + +/**************************************************************** +** +** test_h5s_scalar_write(): Test scalar H5S (dataspace) writing code. +** +****************************************************************/ +static void +test_h5s_scalar_write(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1; /* Dataspace ID */ + int rank; /* Logical rank of dataspace */ + hsize_t tdims[4]; /* Dimension array to test with */ + hssize_t n; /* Number of dataspace elements */ + H5S_class_t ext_type; /* Extent type */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Scalar Dataspace Manipulation during Writing\n")); + + /* Create file */ + fid1 = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Verify a non-zero rank fails with a NULL dimension. */ + H5E_BEGIN_TRY + { + sid1 = H5Screate_simple(SPACE1_RANK, NULL, NULL); + } + H5E_END_TRY + VERIFY(sid1, FAIL, "H5Screate_simple"); + + /* Create scalar dataspace */ + sid1 = H5Screate_simple(SPACE3_RANK, NULL, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Retrieve the number of elements in the dataspace selection */ + n = H5Sget_simple_extent_npoints(sid1); + CHECK(n, FAIL, "H5Sget_simple_extent_npoints"); + VERIFY(n, 1, "H5Sget_simple_extent_npoints"); + + /* Get the dataspace rank */ + rank = H5Sget_simple_extent_ndims(sid1); + CHECK(rank, FAIL, "H5Sget_simple_extent_ndims"); + VERIFY(rank, SPACE3_RANK, "H5Sget_simple_extent_ndims"); + + /* Get the dataspace dimension sizes */ + rank = H5Sget_simple_extent_dims(sid1, tdims, NULL); + VERIFY(rank, 0, "H5Sget_simple_extent_dims"); + + /* Verify extent type */ + ext_type = H5Sget_simple_extent_type(sid1); + VERIFY(ext_type, H5S_SCALAR, "H5Sget_simple_extent_type"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset1", H5T_NATIVE_UINT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write to the dataset */ + ret = H5Dwrite(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &space3_data); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close scalar dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_h5s_scalar_write() */ + +/**************************************************************** +** +** test_h5s_scalar_read(): Test scalar H5S (dataspace) reading code. +** +****************************************************************/ +static void +test_h5s_scalar_read(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1; /* Dataspace ID */ + int rank; /* Logical rank of dataspace */ + hsize_t tdims[4]; /* Dimension array to test with */ + hssize_t n; /* Number of dataspace elements */ + unsigned rdata; /* Scalar data read in */ + herr_t ret; /* Generic return value */ + H5S_class_t ext_type; /* Extent type */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Scalar Dataspace Manipulation during Reading\n")); + + /* Create file */ + fid1 = H5Fopen(DATAFILE, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Create a dataset */ + dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + sid1 = H5Dget_space(dataset); + CHECK(sid1, FAIL, "H5Dget_space"); + + n = H5Sget_simple_extent_npoints(sid1); + CHECK(n, FAIL, "H5Sget_simple_extent_npoints"); + VERIFY(n, 1, "H5Sget_simple_extent_npoints"); + + rank = H5Sget_simple_extent_ndims(sid1); + CHECK(rank, FAIL, "H5Sget_simple_extent_ndims"); + VERIFY(rank, SPACE3_RANK, "H5Sget_simple_extent_ndims"); + + rank = H5Sget_simple_extent_dims(sid1, tdims, NULL); + VERIFY(rank, 0, "H5Sget_simple_extent_dims"); + + /* Verify extent type */ + ext_type = H5Sget_simple_extent_type(sid1); + VERIFY(ext_type, H5S_SCALAR, "H5Sget_simple_extent_type"); + + ret = H5Dread(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata); + CHECK(ret, FAIL, "H5Dread"); + VERIFY(rdata, space3_data, "H5Dread"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close scalar dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_h5s_scalar_read() */ + +/**************************************************************** +** +** test_h5s_compound_scalar_write(): Test scalar H5S (dataspace) writing for +** compound datatypes. +** +****************************************************************/ +static void +test_h5s_compound_scalar_write(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t tid1; /* Attribute datatype ID */ + hid_t sid1; /* Dataspace ID */ + int rank; /* Logical rank of dataspace */ + hsize_t tdims[4]; /* Dimension array to test with */ + hssize_t n; /* Number of dataspace elements */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Scalar Dataspace Manipulation for Writing Compound Datatypes\n")); + + /* Create file */ + fid1 = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create the compound datatype. */ + tid1 = H5Tcreate(H5T_COMPOUND, sizeof(struct space4_struct)); + CHECK(tid1, FAIL, "H5Tcreate"); + space4_field1_off = HOFFSET(struct space4_struct, c1); + ret = H5Tinsert(tid1, SPACE4_FIELDNAME1, space4_field1_off, H5T_NATIVE_SCHAR); + CHECK(ret, FAIL, "H5Tinsert"); + space4_field2_off = HOFFSET(struct space4_struct, u); + ret = H5Tinsert(tid1, SPACE4_FIELDNAME2, space4_field2_off, H5T_NATIVE_UINT); + CHECK(ret, FAIL, "H5Tinsert"); + space4_field3_off = HOFFSET(struct space4_struct, f); + ret = H5Tinsert(tid1, SPACE4_FIELDNAME3, space4_field3_off, H5T_NATIVE_FLOAT); + CHECK(ret, FAIL, "H5Tinsert"); + space4_field4_off = HOFFSET(struct space4_struct, c2); + ret = H5Tinsert(tid1, SPACE4_FIELDNAME4, space4_field4_off, H5T_NATIVE_SCHAR); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Create scalar dataspace */ + sid1 = H5Screate_simple(SPACE3_RANK, NULL, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + n = H5Sget_simple_extent_npoints(sid1); + CHECK(n, FAIL, "H5Sget_simple_extent_npoints"); + VERIFY(n, 1, "H5Sget_simple_extent_npoints"); + + rank = H5Sget_simple_extent_ndims(sid1); + CHECK(rank, FAIL, "H5Sget_simple_extent_ndims"); + VERIFY(rank, SPACE3_RANK, "H5Sget_simple_extent_ndims"); + + rank = H5Sget_simple_extent_dims(sid1, tdims, NULL); + VERIFY(rank, 0, "H5Sget_simple_extent_dims"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, &space4_data); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close compound datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close scalar dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_h5s_compound_scalar_write() */ + +/**************************************************************** +** +** test_h5s_compound_scalar_read(): Test scalar H5S (dataspace) reading for +** compound datatypes. +** +****************************************************************/ +static void +test_h5s_compound_scalar_read(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1; /* Dataspace ID */ + hid_t type; /* Datatype */ + int rank; /* Logical rank of dataspace */ + hsize_t tdims[4]; /* Dimension array to test with */ + hssize_t n; /* Number of dataspace elements */ + struct space4_struct rdata; /* Scalar data read in */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Scalar Dataspace Manipulation for Reading Compound Datatypes\n")); + + /* Create file */ + fid1 = H5Fopen(DATAFILE, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Create a dataset */ + dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + sid1 = H5Dget_space(dataset); + CHECK(sid1, FAIL, "H5Dget_space"); + + n = H5Sget_simple_extent_npoints(sid1); + CHECK(n, FAIL, "H5Sget_simple_extent_npoints"); + VERIFY(n, 1, "H5Sget_simple_extent_npoints"); + + rank = H5Sget_simple_extent_ndims(sid1); + CHECK(rank, FAIL, "H5Sget_simple_extent_ndims"); + VERIFY(rank, SPACE3_RANK, "H5Sget_simple_extent_ndims"); + + rank = H5Sget_simple_extent_dims(sid1, tdims, NULL); + VERIFY(rank, 0, "H5Sget_simple_extent_dims"); + + type = H5Dget_type(dataset); + CHECK(type, FAIL, "H5Dget_type"); + + ret = H5Dread(dataset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata); + CHECK(ret, FAIL, "H5Dread"); + if (HDmemcmp(&space4_data, &rdata, sizeof(struct space4_struct)) != 0) { + HDprintf("scalar data different: space4_data.c1=%c, read_data4.c1=%c\n", space4_data.c1, rdata.c1); + HDprintf("scalar data different: space4_data.u=%u, read_data4.u=%u\n", space4_data.u, rdata.u); + HDprintf("scalar data different: space4_data.f=%f, read_data4.f=%f\n", (double)space4_data.f, + (double)rdata.f); + TestErrPrintf("scalar data different: space4_data.c1=%c, read_data4.c1=%c\n", space4_data.c1, + rdata.c2); + } /* end if */ + + /* Close datatype */ + ret = H5Tclose(type); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close scalar dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_h5s_compound_scalar_read() */ + +/* Data array sizes for chunk test */ +#define CHUNK_DATA_NX 50000 +#define CHUNK_DATA_NY 3 + +/**************************************************************** +** +** test_h5s_chunk(): Exercise chunked I/O, testing when data conversion +** is necessary and the entire chunk read in doesn't fit into the +** conversion buffer +** +****************************************************************/ +static void +test_h5s_chunk(void) +{ + herr_t status; + hid_t fileID, dsetID; + hid_t plist_id; + hid_t space_id; + hsize_t dims[2]; + hsize_t csize[2]; + double **chunk_data_dbl = NULL; + double *chunk_data_dbl_data = NULL; + float **chunk_data_flt = NULL; + float *chunk_data_flt_data = NULL; + int i, j; + + /* Allocate memory */ + chunk_data_dbl_data = (double *)HDcalloc(CHUNK_DATA_NX * CHUNK_DATA_NY, sizeof(double)); + CHECK_PTR(chunk_data_dbl_data, "HDcalloc"); + chunk_data_dbl = (double **)HDcalloc(CHUNK_DATA_NX, sizeof(chunk_data_dbl_data)); + CHECK_PTR(chunk_data_dbl, "HDcalloc"); + for (i = 0; i < CHUNK_DATA_NX; i++) + chunk_data_dbl[i] = chunk_data_dbl_data + (i * CHUNK_DATA_NY); + + chunk_data_flt_data = (float *)HDcalloc(CHUNK_DATA_NX * CHUNK_DATA_NY, sizeof(float)); + CHECK_PTR(chunk_data_flt_data, "HDcalloc"); + chunk_data_flt = (float **)HDcalloc(CHUNK_DATA_NX, sizeof(chunk_data_flt_data)); + CHECK_PTR(chunk_data_flt, "HDcalloc"); + for (i = 0; i < CHUNK_DATA_NX; i++) + chunk_data_flt[i] = chunk_data_flt_data + (i * CHUNK_DATA_NY); + + fileID = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fileID, FAIL, "H5Fcreate"); + + plist_id = H5Pcreate(H5P_DATASET_CREATE); + CHECK(plist_id, FAIL, "H5Pcreate"); + + csize[0] = CHUNK_DATA_NX; + csize[1] = CHUNK_DATA_NY; + status = H5Pset_chunk(plist_id, 2, csize); + CHECK(status, FAIL, "H5Pset_chunk"); + + /* Create the dataspace */ + dims[0] = CHUNK_DATA_NX; + dims[1] = CHUNK_DATA_NY; + space_id = H5Screate_simple(2, dims, NULL); + CHECK(space_id, FAIL, "H5Screate_simple"); + + dsetID = H5Dcreate2(fileID, "coords", H5T_NATIVE_FLOAT, space_id, H5P_DEFAULT, plist_id, H5P_DEFAULT); + CHECK(dsetID, FAIL, "H5Dcreate2"); + + /* Initialize float array */ + for (i = 0; i < CHUNK_DATA_NX; i++) + for (j = 0; j < CHUNK_DATA_NY; j++) + chunk_data_flt[i][j] = (float)(i + 1) * 2.5F - (float)j * 100.3F; + + status = H5Dwrite(dsetID, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, chunk_data_flt_data); + CHECK(status, FAIL, "H5Dwrite"); + + status = H5Pclose(plist_id); + CHECK(status, FAIL, "H5Pclose"); + status = H5Sclose(space_id); + CHECK(status, FAIL, "H5Sclose"); + status = H5Dclose(dsetID); + CHECK(status, FAIL, "H5Dclose"); + status = H5Fclose(fileID); + CHECK(status, FAIL, "H5Fclose"); + + /* Reset/initialize the data arrays to read in */ + HDmemset(chunk_data_dbl_data, 0, sizeof(double) * CHUNK_DATA_NX * CHUNK_DATA_NY); + HDmemset(chunk_data_flt_data, 0, sizeof(float) * CHUNK_DATA_NX * CHUNK_DATA_NY); + + fileID = H5Fopen(DATAFILE, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fileID, FAIL, "H5Fopen"); + dsetID = H5Dopen2(fileID, "coords", H5P_DEFAULT); + CHECK(dsetID, FAIL, "H5Dopen2"); + + status = H5Dread(dsetID, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, chunk_data_dbl_data); + CHECK(status, FAIL, "H5Dread"); + status = H5Dread(dsetID, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, chunk_data_flt_data); + CHECK(status, FAIL, "H5Dread"); + + status = H5Dclose(dsetID); + CHECK(status, FAIL, "H5Dclose"); + status = H5Fclose(fileID); + CHECK(status, FAIL, "H5Fclose"); + + for (i = 0; i < CHUNK_DATA_NX; i++) { + for (j = 0; j < CHUNK_DATA_NY; j++) { + /* Check if the two values are within 0.001% range. */ + if (!H5_DBL_REL_EQUAL(chunk_data_dbl[i][j], (double)chunk_data_flt[i][j], 0.00001)) + TestErrPrintf("%u: chunk_data_dbl[%d][%d]=%e, chunk_data_flt[%d][%d]=%e\n", + (unsigned)__LINE__, i, j, chunk_data_dbl[i][j], i, j, + (double)chunk_data_flt[i][j]); + } /* end for */ + } /* end for */ + + HDfree(chunk_data_dbl); + HDfree(chunk_data_dbl_data); + HDfree(chunk_data_flt); + HDfree(chunk_data_flt_data); +} /* test_h5s_chunk() */ + +/**************************************************************** +** +** test_h5s_extent_equal(): Exercise extent comparison code +** +****************************************************************/ +static void +test_h5s_extent_equal(void) +{ + hid_t null_space; /* Null dataspace */ + hid_t scalar_space; /* Scalar dataspace */ + hid_t d1_space1, d1_space2, d1_space3, d1_space4; /* 1-D dataspaces */ + hid_t d2_space1, d2_space2, d2_space3, d2_space4; /* 2-D dataspaces */ + hid_t d3_space1, d3_space2, d3_space3, d3_space4; /* 3-D dataspaces */ + hsize_t d1_dims1[1] = {10}, /* 1-D dimensions */ + d1_dims2[1] = {20}, d1_dims3[1] = {H5S_UNLIMITED}; + hsize_t d2_dims1[2] = {10, 10}, /* 2-D dimensions */ + d2_dims2[2] = {20, 20}, d2_dims3[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; + hsize_t d3_dims1[3] = {10, 10, 10}, /* 3-D dimensions */ + d3_dims2[3] = {20, 20, 20}, d3_dims3[3] = {H5S_UNLIMITED, H5S_UNLIMITED, H5S_UNLIMITED}; + htri_t ext_equal; /* Whether two dataspace extents are equal */ + herr_t ret; /* Generic error return */ + + /* Create dataspaces */ + null_space = H5Screate(H5S_NULL); + CHECK(null_space, FAIL, "H5Screate"); + + scalar_space = H5Screate(H5S_SCALAR); + CHECK(scalar_space, FAIL, "H5Screate"); + + d1_space1 = H5Screate_simple(1, d1_dims1, NULL); + CHECK(d1_space1, FAIL, "H5Screate"); + d1_space2 = H5Screate_simple(1, d1_dims2, NULL); + CHECK(d1_space2, FAIL, "H5Screate"); + d1_space3 = H5Screate_simple(1, d1_dims1, d1_dims2); + CHECK(d1_space3, FAIL, "H5Screate"); + d1_space4 = H5Screate_simple(1, d1_dims1, d1_dims3); + CHECK(d1_space4, FAIL, "H5Screate"); + + d2_space1 = H5Screate_simple(2, d2_dims1, NULL); + CHECK(d2_space1, FAIL, "H5Screate"); + d2_space2 = H5Screate_simple(2, d2_dims2, NULL); + CHECK(d2_space2, FAIL, "H5Screate"); + d2_space3 = H5Screate_simple(2, d2_dims1, d2_dims2); + CHECK(d2_space3, FAIL, "H5Screate"); + d2_space4 = H5Screate_simple(2, d2_dims1, d2_dims3); + CHECK(d2_space4, FAIL, "H5Screate"); + + d3_space1 = H5Screate_simple(3, d3_dims1, NULL); + CHECK(d3_space1, FAIL, "H5Screate"); + d3_space2 = H5Screate_simple(3, d3_dims2, NULL); + CHECK(d3_space2, FAIL, "H5Screate"); + d3_space3 = H5Screate_simple(3, d3_dims1, d3_dims2); + CHECK(d3_space3, FAIL, "H5Screate"); + d3_space4 = H5Screate_simple(3, d3_dims1, d3_dims3); + CHECK(d3_space4, FAIL, "H5Screate"); + + /* Compare all dataspace combinations */ + + /* Compare null dataspace against all others, including itself */ + ext_equal = H5Sextent_equal(null_space, null_space); + VERIFY(ext_equal, TRUE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(null_space, scalar_space); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(null_space, d1_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(null_space, d1_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(null_space, d1_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(null_space, d1_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(null_space, d2_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(null_space, d2_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(null_space, d2_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(null_space, d2_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(null_space, d3_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(null_space, d3_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(null_space, d3_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(null_space, d3_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + + /* Compare scalar dataspace against all others, including itself */ + ext_equal = H5Sextent_equal(scalar_space, null_space); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(scalar_space, scalar_space); + VERIFY(ext_equal, TRUE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(scalar_space, d1_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(scalar_space, d1_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(scalar_space, d1_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(scalar_space, d1_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(scalar_space, d2_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(scalar_space, d2_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(scalar_space, d2_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(scalar_space, d2_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(scalar_space, d3_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(scalar_space, d3_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(scalar_space, d3_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(scalar_space, d3_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + + /* Compare small 1-D dataspace w/no max. dims against all others, including itself */ + ext_equal = H5Sextent_equal(d1_space1, null_space); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space1, scalar_space); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space1, d1_space1); + VERIFY(ext_equal, TRUE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space1, d1_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space1, d1_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space1, d1_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space1, d2_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space1, d2_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space1, d2_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space1, d2_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space1, d3_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space1, d3_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space1, d3_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space1, d3_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + + /* Compare larger 1-D dataspace w/no max. dims against all others, including itself */ + ext_equal = H5Sextent_equal(d1_space2, null_space); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space2, scalar_space); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space2, d1_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space2, d1_space2); + VERIFY(ext_equal, TRUE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space2, d1_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space2, d1_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space2, d2_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space2, d2_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space2, d2_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space2, d2_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space2, d3_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space2, d3_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space2, d3_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space2, d3_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + + /* Compare small 1-D dataspace w/fixed max. dims against all others, including itself */ + ext_equal = H5Sextent_equal(d1_space3, null_space); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space3, scalar_space); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space3, d1_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space3, d1_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space3, d1_space3); + VERIFY(ext_equal, TRUE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space3, d1_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space3, d2_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space3, d2_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space3, d2_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space3, d2_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space3, d3_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space3, d3_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space3, d3_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space3, d3_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + + /* Compare small 1-D dataspace w/unlimited max. dims against all others, including itself */ + ext_equal = H5Sextent_equal(d1_space4, null_space); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space4, scalar_space); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space4, d1_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space4, d1_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space4, d1_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space4, d1_space4); + VERIFY(ext_equal, TRUE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space4, d2_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space4, d2_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space4, d2_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space4, d2_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space4, d3_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space4, d3_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space4, d3_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d1_space4, d3_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + + /* Compare small 2-D dataspace w/no max. dims against all others, including itself */ + ext_equal = H5Sextent_equal(d2_space1, null_space); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space1, scalar_space); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space1, d1_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space1, d1_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space1, d1_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space1, d1_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space1, d2_space1); + VERIFY(ext_equal, TRUE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space1, d2_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space1, d2_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space1, d2_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space1, d3_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space1, d3_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space1, d3_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space1, d3_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + + /* Compare larger 2-D dataspace w/no max. dims against all others, including itself */ + ext_equal = H5Sextent_equal(d2_space2, null_space); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space2, scalar_space); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space2, d1_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space2, d1_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space2, d1_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space2, d1_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space2, d2_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space2, d2_space2); + VERIFY(ext_equal, TRUE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space2, d2_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space2, d2_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space2, d3_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space2, d3_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space2, d3_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space2, d3_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + + /* Compare small 2-D dataspace w/fixed max. dims against all others, including itself */ + ext_equal = H5Sextent_equal(d2_space3, null_space); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space3, scalar_space); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space3, d1_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space3, d1_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space3, d1_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space3, d1_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space3, d2_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space3, d2_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space3, d2_space3); + VERIFY(ext_equal, TRUE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space3, d2_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space3, d3_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space3, d3_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space3, d3_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space3, d3_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + + /* Compare small 2-D dataspace w/unlimited max. dims against all others, including itself */ + ext_equal = H5Sextent_equal(d2_space4, null_space); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space4, scalar_space); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space4, d1_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space4, d1_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space4, d1_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space4, d1_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space4, d2_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space4, d2_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space4, d2_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space4, d2_space4); + VERIFY(ext_equal, TRUE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space4, d3_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space4, d3_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space4, d3_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d2_space4, d3_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + + /* Compare small 3-D dataspace w/no max. dims against all others, including itself */ + ext_equal = H5Sextent_equal(d3_space1, null_space); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space1, scalar_space); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space1, d1_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space1, d1_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space1, d1_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space1, d1_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space1, d2_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space1, d2_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space1, d2_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space1, d2_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space1, d3_space1); + VERIFY(ext_equal, TRUE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space1, d3_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space1, d3_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space1, d3_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + + /* Compare larger 2-D dataspace w/no max. dims against all others, including itself */ + ext_equal = H5Sextent_equal(d3_space2, null_space); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space2, scalar_space); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space2, d1_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space2, d1_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space2, d1_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space2, d1_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space2, d2_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space2, d2_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space2, d2_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space2, d2_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space2, d3_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space2, d3_space2); + VERIFY(ext_equal, TRUE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space2, d3_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space2, d3_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + + /* Compare small 2-D dataspace w/fixed max. dims against all others, including itself */ + ext_equal = H5Sextent_equal(d3_space3, null_space); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space3, scalar_space); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space3, d1_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space3, d1_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space3, d1_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space3, d1_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space3, d2_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space3, d2_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space3, d2_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space3, d2_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space3, d3_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space3, d3_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space3, d3_space3); + VERIFY(ext_equal, TRUE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space3, d3_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + + /* Compare small 2-D dataspace w/unlimited max. dims against all others, including itself */ + ext_equal = H5Sextent_equal(d3_space4, null_space); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space4, scalar_space); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space4, d1_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space4, d1_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space4, d1_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space4, d1_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space4, d2_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space4, d2_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space4, d2_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space4, d2_space4); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space4, d3_space1); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space4, d3_space2); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space4, d3_space3); + VERIFY(ext_equal, FALSE, "H5Sextent_equal"); + ext_equal = H5Sextent_equal(d3_space4, d3_space4); + VERIFY(ext_equal, TRUE, "H5Sextent_equal"); + + /* Close dataspaces */ + ret = H5Sclose(null_space); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(scalar_space); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(d1_space1); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(d1_space2); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(d1_space3); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(d1_space4); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(d2_space1); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(d2_space2); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(d2_space3); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(d2_space4); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(d3_space1); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(d3_space2); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(d3_space3); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(d3_space4); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_h5s_extent_equal() */ + +/**************************************************************** +** +** test_h5s_extent_copy(): Exercise extent copy code +** +****************************************************************/ +static void +test_h5s_extent_copy(void) +{ + hid_t spaces[14] = {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}; /* Array of all dataspaces */ + hid_t tmp_space = -1; + hsize_t d1_dims1[1] = {10}, /* 1-D dimensions */ + d1_dims2[1] = {20}, d1_dims3[1] = {H5S_UNLIMITED}; + hsize_t d2_dims1[2] = {10, 10}, /* 2-D dimensions */ + d2_dims2[2] = {20, 20}, d2_dims3[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; + hsize_t d3_dims1[3] = {10, 10, 10}, /* 3-D dimensions */ + d3_dims2[3] = {20, 20, 20}, d3_dims3[3] = {H5S_UNLIMITED, H5S_UNLIMITED, H5S_UNLIMITED}; + hsize_t npoints[14]; /* Expected number of points in selection for each element in spaces */ + hssize_t npoints_ret; /* Number of points returned by H5Sget_select_npoints() */ + htri_t ext_equal; /* Whether two dataspace extents are equal */ + const unsigned num_spaces = sizeof(spaces) / sizeof(spaces[0]); + unsigned i, j; + herr_t ret; /* Generic error return */ + + /* Create dataspaces */ + spaces[0] = H5Screate(H5S_NULL); + CHECK(spaces[0], FAIL, "H5Screate"); + npoints[0] = (hsize_t)0; + + spaces[1] = H5Screate(H5S_SCALAR); + CHECK(spaces[1], FAIL, "H5Screate"); + npoints[1] = (hsize_t)1; + + spaces[2] = H5Screate_simple(1, d1_dims1, NULL); + CHECK(spaces[2], FAIL, "H5Screate"); + npoints[2] = d1_dims1[0]; + spaces[3] = H5Screate_simple(1, d1_dims2, NULL); + CHECK(spaces[3], FAIL, "H5Screate"); + npoints[3] = d1_dims2[0]; + spaces[4] = H5Screate_simple(1, d1_dims1, d1_dims2); + CHECK(spaces[4], FAIL, "H5Screate"); + npoints[4] = d1_dims1[0]; + spaces[5] = H5Screate_simple(1, d1_dims1, d1_dims3); + CHECK(spaces[5], FAIL, "H5Screate"); + npoints[5] = d1_dims1[0]; + + spaces[6] = H5Screate_simple(2, d2_dims1, NULL); + CHECK(spaces[6], FAIL, "H5Screate"); + npoints[6] = d2_dims1[0] * d2_dims1[1]; + spaces[7] = H5Screate_simple(2, d2_dims2, NULL); + CHECK(spaces[7], FAIL, "H5Screate"); + npoints[7] = d2_dims2[0] * d2_dims2[1]; + spaces[8] = H5Screate_simple(2, d2_dims1, d2_dims2); + CHECK(spaces[8], FAIL, "H5Screate"); + npoints[8] = d2_dims1[0] * d2_dims1[1]; + spaces[9] = H5Screate_simple(2, d2_dims1, d2_dims3); + CHECK(spaces[9], FAIL, "H5Screate"); + npoints[9] = d2_dims1[0] * d2_dims1[1]; + + spaces[10] = H5Screate_simple(3, d3_dims1, NULL); + CHECK(spaces[10], FAIL, "H5Screate"); + npoints[10] = d3_dims1[0] * d3_dims1[1] * d3_dims1[2]; + spaces[11] = H5Screate_simple(3, d3_dims2, NULL); + CHECK(spaces[11], FAIL, "H5Screate"); + npoints[11] = d3_dims2[0] * d3_dims2[1] * d3_dims2[2]; + spaces[12] = H5Screate_simple(3, d3_dims1, d3_dims2); + CHECK(spaces[12], FAIL, "H5Screate"); + npoints[12] = d3_dims1[0] * d3_dims1[1] * d3_dims1[2]; + spaces[13] = H5Screate_simple(3, d3_dims1, d3_dims3); + CHECK(spaces[13], FAIL, "H5Screate"); + npoints[13] = d3_dims1[0] * d3_dims1[1] * d3_dims1[2]; + + tmp_space = H5Screate(H5S_NULL); + CHECK(tmp_space, FAIL, "H5Screate"); + + /* Copy between all dataspace combinations. Note there are a few + * duplicates. */ + for (i = 0; i < num_spaces; i++) + for (j = i; j < num_spaces; j++) { + /* Copy from i to j, unless the inner loop just restarted, in which + * case i and j are the same, so the second call to H5Sextent_copy() + * will test copying from i/j to i/j */ + ret = H5Sextent_copy(tmp_space, spaces[j]); + CHECK(ret, FAIL, "H5Sextent_copy"); + + /* Verify that the extents are equal */ + ext_equal = H5Sextent_equal(tmp_space, spaces[j]); + VERIFY(ext_equal, TRUE, "H5Sextent_equal"); + + /* Verify that the correct number of elements is selected */ + npoints_ret = H5Sget_select_npoints(tmp_space); + VERIFY((hsize_t)npoints_ret, npoints[j], "H5Sget_select_npoints"); + + /* Copy from j to i */ + ret = H5Sextent_copy(tmp_space, spaces[i]); + CHECK(ret, FAIL, "H5Sextent_copy"); + + /* Verify that the extents are equal */ + ext_equal = H5Sextent_equal(tmp_space, spaces[i]); + VERIFY(ext_equal, TRUE, "H5Sextent_equal"); + + /* Verify that the correct number of elements is selected */ + npoints_ret = H5Sget_select_npoints(tmp_space); + VERIFY((hsize_t)npoints_ret, npoints[i], "H5Sget_select_npoints"); + } /* end for */ + + /* Close dataspaces */ + for (i = 0; i < num_spaces; i++) { + ret = H5Sclose(spaces[i]); + CHECK(ret, FAIL, "H5Sclose"); + spaces[i] = -1; + } /* end for */ + + ret = H5Sclose(tmp_space); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_h5s_extent_copy() */ + +/**************************************************************** +** +** test_h5s_bug1(): Test Creating dataspace with H5Screate then +* setting extent with H5Sextent_copy. +** +****************************************************************/ +static void +test_h5s_bug1(void) +{ + hid_t space1; /* Dataspace to copy extent to */ + hid_t space2; /* Scalar dataspace */ + hsize_t dims[2] = {10, 10}; /* Dimensions */ + hsize_t start[2] = {0, 0}; /* Hyperslab start */ + htri_t select_valid; /* Whether the dataspace selection is valid */ + herr_t ret; /* Generic error return */ + + /* Create dataspaces */ + space1 = H5Screate(H5S_SIMPLE); + CHECK(space1, FAIL, "H5Screate"); + space2 = H5Screate_simple(2, dims, NULL); + CHECK(space2, FAIL, "H5Screate"); + + /* Copy extent to space1 */ + ret = H5Sextent_copy(space1, space2); + CHECK(ret, FAIL, "H5Sextent_copy"); + + /* Select hyperslab in space1 containing entire extent */ + ret = H5Sselect_hyperslab(space1, H5S_SELECT_SET, start, NULL, dims, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Check that space1's selection is valid */ + select_valid = H5Sselect_valid(space1); + CHECK(select_valid, FAIL, "H5Sselect_valid"); + VERIFY(select_valid, TRUE, "H5Sselect_valid result"); + + /* Close dataspaces */ + ret = H5Sclose(space1); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(space2); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_h5s_bug1() */ + +/**************************************************************** +** +** test_h5s_bug2(): Test combining hyperslabs in a way that used +** to trip up H5S__hyper_update_diminfo() +** +****************************************************************/ +static void +test_h5s_bug2(void) +{ + hid_t space; /* Dataspace to copy extent to */ + hsize_t dims[2] = {1, 5}; /* Dimensions */ + hsize_t start[2] = {0, 0}; /* Hyperslab start */ + hsize_t count[2] = {1, 1}; /* Hyperslab start */ + htri_t select_valid; /* Whether the dataspace selection is valid */ + hssize_t elements_selected; /* Number of elements selected */ + herr_t ret; /* Generic error return */ + + /* Create dataspace */ + space = H5Screate_simple(2, dims, NULL); + CHECK(space, FAIL, "H5Screate"); + + /* Select hyperslab in space containing first element */ + ret = H5Sselect_hyperslab(space, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Add hyperslab in space containing last element */ + start[1] = 4; + ret = H5Sselect_hyperslab(space, H5S_SELECT_OR, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Add hyperslab in space containing the first 3 elements */ + start[1] = 0; + count[1] = 3; + ret = H5Sselect_hyperslab(space, H5S_SELECT_OR, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Check that space's selection is valid */ + select_valid = H5Sselect_valid(space); + CHECK(select_valid, FAIL, "H5Sselect_valid"); + VERIFY(select_valid, TRUE, "H5Sselect_valid result"); + + /* Check that 4 elements are selected */ + elements_selected = H5Sget_select_npoints(space); + CHECK(elements_selected, FAIL, "H5Sselect_valid"); + VERIFY(elements_selected, 4, "H5Sselect_valid result"); + + /* Close dataspaces */ + ret = H5Sclose(space); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_h5s_bug2() */ + +/*------------------------------------------------------------------------- + * Function: test_versionbounds + * + * Purpose: Tests version bounds with dataspace. + * + * Description: + * This function creates a file with lower bounds then later + * reopens it with higher bounds to show that the dataspace + * version is upgraded appropriately. + * + * Return: Success: 0 + * Failure: number of errors + * + *------------------------------------------------------------------------- + */ +#define VERBFNAME "tverbounds_dspace.h5" +#define BASIC_DSET "Basic Dataset" +#define LATEST_DSET "Latest Dataset" +static void +test_versionbounds(void) +{ + hid_t file = -1; /* File ID */ + hid_t space = -1; /* Dataspace ID */ + hid_t dset = -1; /* Dataset ID */ + hid_t fapl = -1; /* File access property list ID */ + hid_t dset_space = -1; /* Retrieved dataset's dataspace ID */ + hsize_t dim[1]; /* Dataset dimensions */ + H5F_libver_t low, high; /* File format bounds */ +#if 0 + H5S_t *spacep = NULL; /* Pointer to internal dataspace */ +#endif + herr_t ret = 0; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Version Bounds\n")); + + /* Create a file access property list */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + + /* Create dataspace */ + dim[0] = 10; + space = H5Screate_simple(1, dim, NULL); + CHECK(space, FAIL, "H5Screate"); +#if 0 + /* Its version should be H5O_SDSPACE_VERSION_1 */ + spacep = (H5S_t *)H5I_object(space); + CHECK_PTR(spacep, "H5I_object"); + VERIFY(spacep->extent.version, H5O_SDSPACE_VERSION_1, "basic dataspace version bound"); +#endif + + /* Set high bound to V18 */ + low = H5F_LIBVER_EARLIEST; + high = H5F_LIBVER_V18; + ret = H5Pset_libver_bounds(fapl, low, high); + CHECK(ret, FAIL, "H5Pset_libver_bounds"); + + /* Create the file */ + file = H5Fcreate(VERBFNAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(file, FAIL, "H5Fcreate"); + + /* Create a basic dataset */ + dset = H5Dcreate2(file, BASIC_DSET, H5T_NATIVE_INT, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + if (dset > 0) /* dataset created successfully */ + { + /* Get the internal dataspace pointer */ + dset_space = H5Dget_space(dset); + CHECK(dset_space, FAIL, "H5Dget_space"); +#if 0 + spacep = (H5S_t *)H5I_object(dset_space); + CHECK_PTR(spacep, "H5I_object"); + + /* Dataspace version should remain as H5O_SDSPACE_VERSION_1 */ + VERIFY(spacep->extent.version, H5O_SDSPACE_VERSION_1, "basic dataspace version bound"); +#endif + /* Close dataspace */ + ret = H5Sclose(dset_space); + CHECK(ret, FAIL, "H5Sclose"); + } + + /* Close basic dataset and the file */ + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + + /* Set low and high bounds to latest to trigger the increment of the + dataspace version */ + low = H5F_LIBVER_LATEST; + high = H5F_LIBVER_LATEST; + ret = H5Pset_libver_bounds(fapl, low, high); + CHECK(ret, FAIL, "H5Pset_libver_bounds"); + + /* Reopen the file with new version bounds, LATEST/LATEST */ + file = H5Fopen(VERBFNAME, H5F_ACC_RDWR, fapl); + + /* Create another dataset using the same dspace as the previous dataset */ + dset = H5Dcreate2(file, LATEST_DSET, H5T_NATIVE_INT, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset, FAIL, "H5Dcreate2"); + + /* Dataset created successfully. Verify that dataspace version has been + upgraded per the low bound */ + + /* Get the internal dataspace pointer */ + dset_space = H5Dget_space(dset); + CHECK(dset_space, FAIL, "H5Dget_space"); +#if 0 + spacep = (H5S_t *)H5I_object(dset_space); + CHECK_PTR(spacep, "H5I_object"); + + /* Verify the dataspace version */ + VERIFY(spacep->extent.version, H5O_sdspace_ver_bounds[low], "upgraded dataspace version"); +#endif + /* Close everything */ + ret = H5Sclose(dset_space); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Sclose(space); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_versionbounds() */ + +/**************************************************************** +** +** test_h5s(): Main H5S (dataspace) testing routine. +** +****************************************************************/ +void +test_h5s(void) +{ + H5F_libver_t low, high; /* Low and high bounds */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Dataspaces\n")); + + test_h5s_basic(); /* Test basic H5S code */ + test_h5s_null(); /* Test Null dataspace H5S code */ + test_h5s_zero_dim(); /* Test dataspace with zero dimension size */ +#if 0 + /* Loop through all the combinations of low/high version bounds */ + for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) { + for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) { + + /* Invalid combinations, just continue */ + if (high == H5F_LIBVER_EARLIEST || high < low) + continue; +#else + low = H5F_LIBVER_LATEST; + high = H5F_LIBVER_LATEST; +#endif + test_h5s_encode(low, high); /* Test encoding and decoding */ + test_h5s_encode_regular_hyper(low, high); /* Test encoding regular hyperslabs */ + test_h5s_encode_irregular_hyper(low, high); /* Test encoding irregular hyperslabs */ + test_h5s_encode_points(low, high); /* Test encoding points */ +#if 0 + } /* end high bound */ + } /* end low bound */ +#endif + test_h5s_encode_length(); /* Test version 2 hyperslab encoding length is correct */ +#ifndef H5_NO_DEPRECATED_SYMBOLS + test_h5s_encode1(); /* Test operations with old API routine (H5Sencode1) */ +#endif /* H5_NO_DEPRECATED_SYMBOLS */ + + test_h5s_scalar_write(); /* Test scalar H5S writing code */ + test_h5s_scalar_read(); /* Test scalar H5S reading code */ + + test_h5s_compound_scalar_write(); /* Test compound datatype scalar H5S writing code */ + test_h5s_compound_scalar_read(); /* Test compound datatype scalar H5S reading code */ + + /* This test was added later to exercise a bug in chunked I/O */ + test_h5s_chunk(); /* Exercise bug fix for chunked I/O */ + + test_h5s_extent_equal(); /* Test extent comparison code */ + test_h5s_extent_copy(); /* Test extent copy code */ + test_h5s_bug1(); /* Test bug in offset initialization */ + test_h5s_bug2(); /* Test bug found in H5S__hyper_update_diminfo() */ + test_versionbounds(); /* Test version bounds with dataspace */ +} /* test_h5s() */ + +/*------------------------------------------------------------------------- + * Function: cleanup_h5s + * + * Purpose: Cleanup temporary test files + * + * Return: none + * + * Programmer: Albert Cheng + * July 2, 1998 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +void +cleanup_h5s(void) +{ + H5Fdelete(DATAFILE, H5P_DEFAULT); + H5Fdelete(NULLFILE, H5P_DEFAULT); + H5Fdelete(BASICFILE, H5P_DEFAULT); + H5Fdelete(ZEROFILE, H5P_DEFAULT); + H5Fdelete(VERBFNAME, H5P_DEFAULT); +} diff --git a/test/API/tid.c b/test/API/tid.c new file mode 100644 index 00000000000..2dd8851c0a9 --- /dev/null +++ b/test/API/tid.c @@ -0,0 +1,1413 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* Test user-created identifiers (hid_t's) and identifier types. */ + +#include "testhdf5.h" + +#if 0 +/* Include H5Ipkg.h to calculate max number of groups */ +#define H5I_FRIEND /*suppress error about including H5Ipkg */ +#include "H5Ipkg.h" +#endif + +/* + * Number of bits to use for ID Type in each ID. Increase if more types + * are needed (though this will decrease the number of available IDs per + * type). This is the only number that must be changed since all other bit + * field sizes and masks are calculated from TYPE_BITS. + */ +#define TYPE_BITS 7 +#define TYPE_MASK (((hid_t)1 << TYPE_BITS) - 1) + +#define H5I_MAX_NUM_TYPES TYPE_MASK + +static herr_t +free_wrapper(void *p, void H5_ATTR_UNUSED **_ctx) +{ + HDfree(p); + return SUCCEED; +} + +/* Test basic functionality of registering and deleting types and IDs */ +static int +basic_id_test(void) +{ + H5I_type_t myType = H5I_BADID; + hid_t arrayID = H5I_INVALID_HID; + void *testObj = NULL; + void *testPtr = NULL; + char nameString[10]; + hid_t testID; + ssize_t testSize = -1; + herr_t err; + int num_ref; + hsize_t num_members; + + /* Try to register an ID with fictitious types */ + H5E_BEGIN_TRY + arrayID = H5Iregister((H5I_type_t)420, testObj); + H5E_END_TRY + + VERIFY(arrayID, H5I_INVALID_HID, "H5Iregister"); + if (arrayID != H5I_INVALID_HID) + goto out; + + H5E_BEGIN_TRY + arrayID = H5Iregister((H5I_type_t)-1, testObj); + H5E_END_TRY + + VERIFY(arrayID, H5I_INVALID_HID, "H5Iregister"); + if (arrayID != H5I_INVALID_HID) + goto out; + + /* Try to access IDs with fictitious types */ + H5E_BEGIN_TRY + testPtr = H5Iobject_verify((hid_t)100, (H5I_type_t)0); + H5E_END_TRY + + CHECK_PTR_NULL(testPtr, "H5Iobject_verify"); + if (testPtr != NULL) + goto out; + + H5E_BEGIN_TRY + testPtr = H5Iobject_verify((hid_t)700, (H5I_type_t)700); + H5E_END_TRY + + CHECK_PTR_NULL(testPtr, "H5Iobject_verify"); + if (testPtr != NULL) + goto out; + + /* Register a type */ + myType = H5Iregister_type((size_t)64, 0, free_wrapper); + + CHECK(myType, H5I_BADID, "H5Iregister_type"); + if (myType == H5I_BADID) + goto out; + + /* Register an ID and retrieve the object it points to. + * Once the ID has been registered, testObj will be freed when + * its ID type is destroyed. + */ + testObj = HDmalloc(7 * sizeof(int)); + arrayID = H5Iregister(myType, testObj); + + CHECK(arrayID, H5I_INVALID_HID, "H5Iregister"); + if (arrayID == H5I_INVALID_HID) { + HDfree(testObj); + goto out; + } + + testPtr = (int *)H5Iobject_verify(arrayID, myType); + + CHECK_PTR_EQ(testPtr, testObj, "H5Iobject_verify"); + if (testPtr != testObj) + goto out; + + /* Ensure that H5Iget_file_id and H5Iget_name() fail, since this + * is an hid_t for the wrong kind of object + */ + H5E_BEGIN_TRY + testID = H5Iget_file_id(arrayID); + H5E_END_TRY + + VERIFY(testID, H5I_INVALID_HID, "H5Iget_file_id"); + if (testID != H5I_INVALID_HID) + goto out; + + H5E_BEGIN_TRY + testSize = H5Iget_name(arrayID, nameString, (size_t)9); + H5E_END_TRY + + VERIFY(testSize, -1, "H5Iget_name"); + if (testSize != -1) + goto out; + + /* Make sure H5Iremove_verify catches objects of the wrong type */ + H5E_BEGIN_TRY + testPtr = (int *)H5Iremove_verify(arrayID, (H5I_type_t)0); + H5E_END_TRY + + CHECK_PTR_NULL(testPtr, "H5Iremove_verify"); + if (testPtr != NULL) + goto out; + + H5E_BEGIN_TRY + testPtr = (int *)H5Iremove_verify(arrayID, (H5I_type_t)((int)myType - 1)); + H5E_END_TRY + + CHECK_PTR_NULL(testPtr, "H5Iremove_verify"); + if (testPtr != NULL) + goto out; + + /* Remove an ID and make sure we can't access it */ + testPtr = (int *)H5Iremove_verify(arrayID, myType); + + CHECK_PTR(testPtr, "H5Iremove_verify"); + if (testPtr == NULL) + goto out; + + H5E_BEGIN_TRY + testPtr = (int *)H5Iobject_verify(arrayID, myType); + H5E_END_TRY + + CHECK_PTR_NULL(testPtr, "H5Iobject_verify"); + if (testPtr != NULL) + goto out; + + /* Delete the type and make sure we can't access objects within it */ + arrayID = H5Iregister(myType, testObj); + + err = H5Idestroy_type(myType); + VERIFY(err, 0, "H5Idestroy_type"); + if (err != 0) + goto out; + VERIFY(H5Itype_exists(myType), 0, "H5Itype_exists"); + if (H5Itype_exists(myType) != 0) + goto out; + + H5E_BEGIN_TRY + VERIFY(H5Inmembers(myType, NULL), -1, "H5Inmembers"); + if (H5Inmembers(myType, NULL) != -1) + goto out; + H5E_END_TRY + + /* Register another type and another object in that type */ + myType = H5Iregister_type((size_t)64, 0, free_wrapper); + + CHECK(myType, H5I_BADID, "H5Iregister_type"); + if (myType == H5I_BADID) + goto out; + + /* The memory that testObj pointed to should already have been + * freed when the previous type was destroyed. Allocate new + * memory for it. + */ + testObj = HDmalloc(7 * sizeof(int)); + arrayID = H5Iregister(myType, testObj); + + CHECK(arrayID, H5I_INVALID_HID, "H5Iregister"); + if (arrayID == H5I_INVALID_HID) { + HDfree(testObj); + goto out; + } + + err = H5Inmembers(myType, &num_members); + CHECK(err, -1, "H5Inmembers"); + if (err < 0) + goto out; + VERIFY(num_members, 1, "H5Inmembers"); + if (num_members != 1) + goto out; + + /* Increment references to type and ensure that dec_type_ref + * doesn't destroy the type + */ + num_ref = H5Iinc_type_ref(myType); + VERIFY(num_ref, 2, "H5Iinc_type_ref"); + if (num_ref != 2) + goto out; + num_ref = H5Idec_type_ref(myType); + VERIFY(num_ref, 1, "H5Idec_type_ref"); + if (num_ref != 1) + goto out; + err = H5Inmembers(myType, &num_members); + CHECK(err, -1, "H5Inmembers"); + if (err < 0) + goto out; + VERIFY(num_members, 1, "H5Inmembers"); + if (num_members != 1) + goto out; + + /* This call to dec_type_ref should destroy the type */ + num_ref = H5Idec_type_ref(myType); + VERIFY(num_ref, 0, "H5Idec_type_ref"); + if (num_ref != 0) + goto out; + VERIFY(H5Itype_exists(myType), 0, "H5Itype_exists"); + if (H5Itype_exists(myType) != 0) + goto out; + + H5E_BEGIN_TRY + err = H5Inmembers(myType, &num_members); + if (err >= 0) + goto out; + H5E_END_TRY + + return 0; + +out: + /* Clean up type if it has been allocated and free memory used + * by testObj + */ + if (myType >= 0) + H5Idestroy_type(myType); + + return -1; +} + +/* A dummy search function for the next test */ +static int +test_search_func(void H5_ATTR_UNUSED *ptr1, hid_t H5_ATTR_UNUSED id, void H5_ATTR_UNUSED *ptr2) +{ + return 0; +} + +/* Ensure that public functions cannot access "predefined" ID types */ +static int +id_predefined_test(void) +{ + void *testObj; + hid_t testID; + hid_t typeID = H5I_INVALID_HID; + void *testPtr; + herr_t testErr; + + testObj = HDmalloc(sizeof(int)); + + /* + * Attempt to perform public functions on various library types + */ + + H5E_BEGIN_TRY + testID = H5Iregister(H5I_FILE, testObj); + H5E_END_TRY + + VERIFY(testID, H5I_INVALID_HID, "H5Iregister"); + if (testID != H5I_INVALID_HID) + goto out; + + H5E_BEGIN_TRY + testPtr = H5Isearch(H5I_GENPROP_LST, test_search_func, testObj); + H5E_END_TRY + + CHECK_PTR_NULL(testPtr, "H5Isearch"); + if (testPtr != NULL) + goto out; + + H5E_BEGIN_TRY + testErr = H5Inmembers(H5I_ERROR_STACK, NULL); + H5E_END_TRY + + VERIFY(testErr, -1, "H5Inmembers"); + if (testErr != -1) + goto out; + + H5E_BEGIN_TRY + testErr = H5Iclear_type(H5I_FILE, 0); + H5E_END_TRY + + VERIFY((testErr >= 0), 0, "H5Iclear_type"); + if (testErr >= 0) + goto out; + + H5E_BEGIN_TRY + testErr = H5Idestroy_type(H5I_DATASET); + H5E_END_TRY + + VERIFY((testErr >= 0), 0, "H5Idestroy_type"); + if (testErr >= 0) + goto out; + + H5E_BEGIN_TRY + testErr = H5Itype_exists(H5I_GROUP); + H5E_END_TRY + + VERIFY(testErr, -1, "H5Itype_exists"); + if (testErr != -1) + goto out; + + H5E_BEGIN_TRY + testErr = H5Itype_exists(H5I_ATTR); + H5E_END_TRY + + VERIFY(testErr, -1, "H5Itype_exists"); + if (testErr != -1) + goto out; + + /* + * Create a datatype ID and try to perform illegal functions on it + */ + + typeID = H5Tcreate(H5T_OPAQUE, (size_t)42); + CHECK(typeID, H5I_INVALID_HID, "H5Tcreate"); + if (typeID == H5I_INVALID_HID) + goto out; + + H5E_BEGIN_TRY + testPtr = H5Iremove_verify(typeID, H5I_DATATYPE); + H5E_END_TRY + + CHECK_PTR_NULL(testPtr, "H5Iremove_verify"); + if (testPtr != NULL) + goto out; + + H5E_BEGIN_TRY + testPtr = H5Iobject_verify(typeID, H5I_DATATYPE); + H5E_END_TRY + + CHECK_PTR_NULL(testPtr, "H5Iobject_verify"); + if (testPtr != NULL) + goto out; + + H5Tclose(typeID); + + /* testObj was never registered as an atom, so it will not be + * automatically freed. */ + HDfree(testObj); + return 0; + +out: + if (typeID != H5I_INVALID_HID) + H5Tclose(typeID); + if (testObj != NULL) + HDfree(testObj); + + return -1; +} + +/* Test the H5Iis_valid function */ +static int +test_is_valid(void) +{ + hid_t dtype; /* datatype id */ +#if 0 + int64_t nmembs1; /* number of type memnbers */ + int64_t nmembs2; +#endif + htri_t tri_ret; /* htri_t return value */ +#if 0 + herr_t ret; /* return value */ +#endif + + /* Create a datatype id */ + dtype = H5Tcopy(H5T_NATIVE_INT); + CHECK(dtype, FAIL, "H5Tcopy"); + if (dtype < 0) + goto out; + + /* Check that the ID is valid */ + tri_ret = H5Iis_valid(dtype); + VERIFY(tri_ret, TRUE, "H5Iis_valid"); + if (tri_ret != TRUE) + goto out; +#if 0 /* Cannot call internal APIs and cannot call public H5Inmembers on library types */ + /* Artificially manipulate the reference counts so app_count is 0, and dtype + * appears to be an internal id. This takes advantage of the fact that + * H5Ipkg is included. + */ + ret = H5I_inc_ref(dtype, FALSE); + CHECK(ret, FAIL, "H5I_inc_ref"); + if (ret < 0) + goto out; + ret = H5I_dec_app_ref(dtype); + CHECK(ret, FAIL, "H5I_dec_ref"); + if (ret < 0) + goto out; + + /* Check that dtype is invalid */ + tri_ret = H5Iis_valid(dtype); + VERIFY(tri_ret, FALSE, "H5Iis_valid"); + if (tri_ret != FALSE) + goto out; + + /* Close dtype and verify that it has been closed */ + nmembs1 = H5I_nmembers(H5I_DATATYPE); + CHECK(nmembs1, FAIL, "H5I_nmembers"); + if (nmembs1 < 0) + goto out; + ret = H5I_dec_ref(dtype); + CHECK(ret, FAIL, "H5I_dec_ref"); + if (ret < 0) + goto out; + nmembs2 = H5I_nmembers(H5I_DATATYPE); + VERIFY(nmembs2, nmembs1 - 1, "H5I_nmembers"); + if (nmembs2 != nmembs1 - 1) + goto out; + + /* Check that dtype is invalid */ + tri_ret = H5Iis_valid(dtype); + VERIFY(tri_ret, FALSE, "H5Iis_valid"); + if (tri_ret != FALSE) + goto out; +#endif + /* Check that an id of -1 is invalid */ + tri_ret = H5Iis_valid((hid_t)-1); + VERIFY(tri_ret, FALSE, "H4Iis_valid"); + if (tri_ret != FALSE) + goto out; + + return 0; + +out: + /* Don't attempt to close dtype as we don't know the exact state of the + * reference counts. Every state in this function will be automatically + * closed at library exit anyways, as internal count is never > 1. + */ + return -1; +} + +/* Test the H5Iget_type function */ +static int +test_get_type(void) +{ + hid_t dtype; /* datatype id */ + H5I_type_t type_ret; /* return value */ + + /* Create a datatype id */ + dtype = H5Tcopy(H5T_NATIVE_INT); + CHECK(dtype, FAIL, "H5Tcopy"); + if (dtype < 0) + goto out; + + /* Check that the ID is correct */ + type_ret = H5Iget_type(dtype); + VERIFY(type_ret, H5I_DATATYPE, "H5Iget_type"); + if (type_ret == H5I_BADID) + goto out; + + /* Check that the ID is correct */ + type_ret = H5Iget_type((hid_t)H5T_STRING); + VERIFY(type_ret, H5I_BADID, "H5Iget_type"); + if (type_ret != H5I_BADID) + goto out; + + /* Check that the ID is correct */ + type_ret = H5Iget_type((hid_t)-1); + VERIFY(type_ret, H5I_BADID, "H5Iget_type"); + if (type_ret != H5I_BADID) + goto out; + + H5Tclose(dtype); + + return 0; + +out: + if (dtype != H5I_INVALID_HID) + H5Tclose(dtype); + + return -1; +} + +/* Test boundary cases with lots of types */ + +/* Type IDs range from H5I_NTYPES to H5I_MAX_NUM_TYPES. The system will assign */ +/* IDs in sequential order until H5I_MAX_NUM_TYPES IDs have been given out, at which */ +/* point it will search for type IDs that were allocated but have since been */ +/* deleted. */ +/* This test will allocate IDs up to H5I_MAX_NUM_TYPES, ensure that IDs wrap around */ +/* to low values successfully, ensure that an error is thrown when all possible */ +/* type IDs are taken, then ensure that deleting types frees up their IDs. */ +/* Note that this test depends on the implementation of IDs, so may break */ +/* if the implementation changes. */ +/* Also note that if someone else registered a user-defined type and forgot to */ +/* destroy it, this test will mysteriously fail (because it will expect there to */ +/* be one more "free" type ID than there is). */ +/* H5I_NTYPES is defined in h5public.h, H5I_MAX_NUM_TYPES is defined in h5pkg.h */ +static int +test_id_type_list(void) +{ + H5I_type_t startType; /* The first type ID we were assigned in this test */ + H5I_type_t currentType; + H5I_type_t testType; + int i; /* Just a counter variable */ + + startType = H5Iregister_type((size_t)8, 0, free_wrapper); + CHECK(startType, H5I_BADID, "H5Iregister_type"); + if (startType == H5I_BADID) + goto out; + + /* Sanity check */ + if ((int)startType >= H5I_MAX_NUM_TYPES || startType < H5I_NTYPES) { + /* Error condition, throw an error */ + ERROR("H5Iregister_type"); + goto out; + } + /* Create types up to H5I_MAX_NUM_TYPES */ + for (i = startType + 1; i < H5I_MAX_NUM_TYPES; i++) { + currentType = H5Iregister_type((size_t)8, 0, free_wrapper); + CHECK(currentType, H5I_BADID, "H5Iregister_type"); + if (currentType == H5I_BADID) + goto out; + } + + /* Wrap around to low type ID numbers */ + for (i = H5I_NTYPES; i < startType; i++) { + currentType = H5Iregister_type((size_t)8, 0, free_wrapper); + CHECK(currentType, H5I_BADID, "H5Iregister_type"); + if (currentType == H5I_BADID) + goto out; + } + + /* There should be no room at the inn for a new ID type*/ + H5E_BEGIN_TRY + testType = H5Iregister_type((size_t)8, 0, free_wrapper); + H5E_END_TRY + + VERIFY(testType, H5I_BADID, "H5Iregister_type"); + if (testType != H5I_BADID) + goto out; + + /* Now delete a type and try to insert again */ + H5Idestroy_type(H5I_NTYPES); + testType = H5Iregister_type((size_t)8, 0, free_wrapper); + + VERIFY(testType, H5I_NTYPES, "H5Iregister_type"); + if (testType != H5I_NTYPES) + goto out; + + /* Cleanup. Destroy all types. */ + for (i = H5I_NTYPES; i < H5I_MAX_NUM_TYPES; i++) + H5Idestroy_type((H5I_type_t)i); + + return 0; + +out: + /* Cleanup. For simplicity, just destroy all types and ignore errors. */ + H5E_BEGIN_TRY + for (i = H5I_NTYPES; i < H5I_MAX_NUM_TYPES; i++) + H5Idestroy_type((H5I_type_t)i); + H5E_END_TRY + return -1; +} + +/* Test removing ids in callback for H5Iclear_type */ + +/* There was a rare bug where, if an id free callback being called by + * H5I_clear_type() removed another id in that type, a segfault could occur. + * This test tests for that error (and freeing ids "out of order" within + * H5Iclear_type() in general). + * + * NB: RCT = "remove clear type" + */ + +/* Macro definitions */ +#define RCT_MAX_NOBJS 25 /* Maximum number of objects in the list */ +#define RCT_MIN_NOBJS 5 +#define RCT_NITER 50 /* Number of times we cycle through object creation and deletion */ + +/* Structure to hold the master list of objects */ +typedef struct rct_obj_list_t { + + /* Pointer to the objects */ + struct rct_obj_t *objects; + + /* The number of objects in the list */ + long count; + + /* The number of objects in the list that have not been freed */ + long remaining; +} rct_obj_list_t; + +/* Structure for an object */ +typedef struct rct_obj_t { + /* The ID for this object */ + hid_t id; + + /* The number of times this object has been freed */ + int nfrees; + + /* Whether we are currently freeing this object directly + * through H5Idec_ref(). + */ + hbool_t freeing; + + /* Pointer to the master list of all objects */ + rct_obj_list_t *list; +} rct_obj_t; + +/* Free callback passed to H5Iclear_type() + * + * When invoked on a closing object, frees a random unfreed ID in the + * master list of objects. + */ +static herr_t +rct_free_cb(void *_obj, void H5_ATTR_UNUSED **_ctx) +{ + rct_obj_t *obj = (rct_obj_t *)_obj; + long remove_nth; + long i; + herr_t ret; + + /* Mark this object as freed */ + obj->nfrees++; + + /* Decrement the number of objects in the list that have not been freed */ + obj->list->remaining--; + + /* If this object isn't already being freed by a callback free call and + * the master object list still contains objects to free, pick another + * object and free it. + */ + if (!obj->freeing && (obj->list->remaining > 0)) { + + /* Pick a random object from the list. This is done by picking a + * random number between 0 and the # of remaining unfreed objects + * and then scanning through the list to find that nth unfreed + * object. + */ + remove_nth = HDrandom() % obj->list->remaining; + for (i = 0; i < obj->list->count; i++) + if (obj->list->objects[i].nfrees == 0) { + if (remove_nth == 0) + break; + else + remove_nth--; + } + + /* Badness if we scanned through the list and didn't manage to + * select one to delete (the list stats were probably updated + * incorrectly). + */ + if (i == obj->list->count) { + ERROR("invalid obj_list"); + goto error; + } + + /* Mark the object we're about to free so its own callback does + * not free another object. We don't want to recursively free the + * entire list when we free the first ID. + */ + obj->list->objects[i].freeing = TRUE; + + /* Decrement the reference count on the object */ + ret = H5Idec_ref(obj->list->objects[i].id); + CHECK(ret, FAIL, "H5Idec_ref"); + if (ret == FAIL) + goto error; + + /* Unset the "freeing" flag */ + obj->list->objects[i].freeing = FALSE; + } + + /* Verify the number of objects remaining in the master list is non-negative */ + if (obj->list->remaining < 0) { + ERROR("invalid number of objects remaining"); + goto error; + } + + return 0; + +error: + return -1; +} /* end rct_free_cb() */ + +/* Test function */ +static int +test_remove_clear_type(void) +{ + H5I_type_t obj_type; + rct_obj_list_t obj_list; + rct_obj_t *objects = NULL; /* Convenience pointer to objects stored in master list */ + size_t list_size; + long i, j; + herr_t ret; /* return value */ + + /* Register a user-defined type with our custom ID-deleting callback */ + obj_type = H5Iregister_type((size_t)8, 0, rct_free_cb); + CHECK(obj_type, H5I_BADID, "H5Iregister_type"); + if (obj_type == H5I_BADID) + goto error; + + /* Create an array to hold the objects in the master list */ + list_size = RCT_MAX_NOBJS * sizeof(rct_obj_t); + obj_list.objects = HDmalloc(list_size); + CHECK_PTR(obj_list.objects, "HDcalloc"); + if (NULL == obj_list.objects) + goto error; + + /* Set a convenience pointer to the object array */ + objects = obj_list.objects; + + for (i = 0; i < RCT_NITER; i++) { + + /* The number of members in the type, according to the HDF5 library */ + hsize_t nmembers = 1234567; /* (init to fake number) */ + + /* The number of objects found while scanning through the object list */ + int found; + + /********************* + * Build object list * + *********************/ + + HDmemset(obj_list.objects, 0, list_size); + + /* The number of objects used is a random number between the min and max */ + obj_list.count = obj_list.remaining = + RCT_MIN_NOBJS + (HDrandom() % (long)(RCT_MAX_NOBJS - RCT_MIN_NOBJS + 1)); + + /* Create the actual objects */ + for (j = 0; j < obj_list.count; j++) { + + /* Object setup */ + objects[j].nfrees = 0; + objects[j].freeing = FALSE; + objects[j].list = &obj_list; + + /* Register an ID for it */ + objects[j].id = H5Iregister(obj_type, &objects[j]); + CHECK(objects[j].id, FAIL, "H5Iregister"); + if (objects[j].id == FAIL) + goto error; + + /* Bump the reference count by 1 (to 2) 50% of the time */ + if (HDrandom() % 2) { + ret = H5Iinc_ref(objects[j].id); + CHECK(ret, FAIL, "H5Iinc_ref"); + if (ret == FAIL) + goto error; + } + } + + /****************************************** + * Clear the type with force set to FALSE * + ******************************************/ + + /* Clear the type. Since force is FALSE, only + * IDs with a reference count of 1 will be cleared. + */ + ret = H5Iclear_type(obj_type, FALSE); + CHECK(ret, FAIL, "H5Iclear_type"); + if (ret == FAIL) + goto error; + + /* Verify that the object struct fields are sane and count the + * number of unfreed objects + */ + found = 0; + for (j = 0; j < obj_list.count; j++) { + + if (objects[j].nfrees == 0) { + /* Count unfreed objects */ + found++; + } + else { + /* Every freed object should have been freed exactly once */ + VERIFY(objects[j].nfrees, 1, "object freed more than once"); + if (objects[j].nfrees != 1) + goto error; + } + + /* No object should still be marked as "freeing" */ + VERIFY(objects[j].freeing, FALSE, "object marked as freeing"); + if (objects[j].freeing != FALSE) + goto error; + } + + /* Verify the number of unfreed objects we found during our scan + * matches the number stored in the list + */ + VERIFY(obj_list.remaining, found, "incorrect number of objects remaining"); + if (obj_list.remaining != found) + goto error; + + /* Make sure the HDF5 library confirms our count */ + ret = H5Inmembers(obj_type, &nmembers); + CHECK(ret, FAIL, "H5Inmembers"); + if (ret == FAIL) + goto error; + VERIFY(nmembers, found, "The number of members remaining in the type did not match our count"); + if (nmembers != (hsize_t)found) + goto error; + + /***************************************** + * Clear the type with force set to TRUE * + *****************************************/ + + /* Clear the type. Since force is TRUE, all IDs will be cleared. */ + ret = H5Iclear_type(obj_type, TRUE); + CHECK(ret, FAIL, "H5Iclear_type"); + if (ret == FAIL) + goto error; + + /* Verify that the object struct fields are sane */ + for (j = 0; j < obj_list.count; j++) { + + /* Every object should have been freed exactly once */ + VERIFY(objects[j].nfrees, 1, "object freed more than once"); + if (objects[j].nfrees != 1) + goto error; + + /* No object should still be marked as "freeing" */ + VERIFY(objects[j].freeing, FALSE, "object marked as freeing"); + if (objects[j].freeing != FALSE) + goto error; + } + + /* Verify the number of objects is 0 */ + VERIFY(obj_list.remaining, 0, "objects remaining was not zero"); + if (obj_list.remaining != 0) + goto error; + + /* Make sure the HDF5 library confirms zero members in the type */ + ret = H5Inmembers(obj_type, &nmembers); + CHECK(ret, FAIL, "H5Inmembers"); + if (ret == FAIL) + goto error; + VERIFY(nmembers, 0, "The number of members remaining in the type was not zero"); + if (nmembers != 0) + goto error; + } + + /* Destroy the type */ + ret = H5Idestroy_type(obj_type); + CHECK(ret, FAIL, "H5Idestroy_type"); + if (ret == FAIL) + goto error; + + /* Free the object array */ + HDfree(obj_list.objects); + + return 0; + +error: + /* Cleanup. For simplicity, just destroy the types and ignore errors. */ + H5E_BEGIN_TRY + { + H5Idestroy_type(obj_type); + } + H5E_END_TRY + + HDfree(obj_list.objects); + + return -1; +} /* end test_remove_clear_type() */ + +#if defined(H5VL_VERSION) && H5VL_VERSION >= 2 +/* Typedef for future objects */ +typedef struct { + H5I_type_t obj_type; /* ID type for actual object */ +} future_obj_t; + +/* Global (static) future ID object type */ +H5I_type_t future_obj_type_g = H5I_BADID; + +/* Callback to free the actual object for future object test */ +static herr_t +free_actual_object(void *_p, void H5_ATTR_UNUSED **_ctx) +{ + int *p = (int *)_p; + + if (7 != *p) + return FAIL; + + HDfree(p); + + return SUCCEED; +} + +/* Callback to realize a future object */ +static herr_t +realize_future_cb(void *_future_obj, hid_t *actual_id) +{ + future_obj_t *future_obj = (future_obj_t *)_future_obj; /* Future object */ + int *actual_obj; /* Pointer to the actual object */ + + /* Check for bad future object */ + if (NULL == future_obj) + return FAIL; + + /* Determine type of object to realize */ + if (H5I_DATASPACE == future_obj->obj_type) { + hsize_t dims = 13; + + if ((*actual_id = H5Screate_simple(1, &dims, NULL)) < 0) + return FAIL; + } + else if (H5I_DATATYPE == future_obj->obj_type) { + if ((*actual_id = H5Tcopy(H5T_NATIVE_INT)) < 0) + return FAIL; + } + else if (H5I_GENPROP_LST == future_obj->obj_type) { + if ((*actual_id = H5Pcreate(H5P_DATASET_XFER)) < 0) + return FAIL; + } + else { + /* Create a new object (the 'actual object') of the correct type */ + if (NULL == (actual_obj = HDmalloc(sizeof(int)))) + return FAIL; + *actual_obj = 7; + + /* Register actual object of the user-defined type */ + *actual_id = H5Iregister(future_obj->obj_type, actual_obj); + CHECK(*actual_id, FAIL, "H5Iregister"); + if (*actual_id == FAIL) + return FAIL; + } + + return SUCCEED; +} + +/* Callback to discard a future object */ +static herr_t +discard_future_cb(void *future_obj) +{ + if (NULL == future_obj) + return FAIL; + + HDfree(future_obj); + + return SUCCEED; +} + +/* Callback to realize a future object when future objects are NULL*/ +static herr_t +realize_future_generate_cb(void *_future_obj, hid_t *actual_id) +{ + future_obj_t *future_obj = (future_obj_t *)_future_obj; /* Future object */ + int *actual_obj; /* Pointer to the actual object */ + + if (NULL != future_obj) + return FAIL; + /* Create a new object (the 'actual object') of the correct type */ + if (NULL == (actual_obj = HDmalloc(sizeof(int)))) + return FAIL; + *actual_obj = 7; + + /* Register actual object without using future object info */ + *actual_id = H5Iregister(future_obj_type_g, actual_obj); + CHECK(*actual_id, FAIL, "H5Iregister"); + if (*actual_id == FAIL) + return FAIL; + + return SUCCEED; +} + +/* Callback to discard a future object when future objects are NULL */ +static herr_t +discard_future_generate_cb(void *future_obj) +{ + if (NULL != future_obj) + return FAIL; + + return SUCCEED; +} + +/* Test function */ +static int +test_future_ids(void) +{ + H5I_type_t obj_type; /* New user-defined ID type */ + hid_t future_id; /* ID for future object */ + int fake_future_obj; /* "Fake" future object for tests */ + future_obj_t *future_obj; /* Future object */ + int *actual_obj; /* Actual object */ + int *actual_obj2; /* Another actual object */ + H5I_type_t id_type; /* Type of ID */ + H5T_class_t type_class; /* Datatype class */ + herr_t ret; /* Return value */ + + /* Register a user-defined type with our custom ID-deleting callback */ + obj_type = H5Iregister_type((size_t)15, 0, free_actual_object); + CHECK(obj_type, H5I_BADID, "H5Iregister_type"); + if (H5I_BADID == obj_type) + goto error; + + /* Test basic error conditions */ + fake_future_obj = 0; + H5E_BEGIN_TRY + { + future_id = H5Iregister_future(obj_type, &fake_future_obj, NULL, NULL); + } + H5E_END_TRY + VERIFY(future_id, H5I_INVALID_HID, "H5Iregister_future"); + if (H5I_INVALID_HID != future_id) + goto error; + + H5E_BEGIN_TRY + { + future_id = H5Iregister_future(obj_type, &fake_future_obj, realize_future_cb, NULL); + } + H5E_END_TRY + VERIFY(future_id, H5I_INVALID_HID, "H5Iregister_future"); + if (H5I_INVALID_HID != future_id) + goto error; + + H5E_BEGIN_TRY + { + future_id = H5Iregister_future(obj_type, &fake_future_obj, NULL, discard_future_cb); + } + H5E_END_TRY + VERIFY(future_id, H5I_INVALID_HID, "H5Iregister_future"); + if (H5I_INVALID_HID != future_id) + goto error; + + H5E_BEGIN_TRY + { + future_id = H5Iregister_future(H5I_BADID, &fake_future_obj, realize_future_cb, discard_future_cb); + } + H5E_END_TRY + VERIFY(future_id, H5I_INVALID_HID, "H5Iregister_future"); + if (H5I_INVALID_HID != future_id) + goto error; + + /* Test base use-case: create a future object and destroy type without + * realizing the future object. + */ + future_obj = HDmalloc(sizeof(future_obj_t)); + future_obj->obj_type = obj_type; + future_id = H5Iregister_future(obj_type, future_obj, realize_future_cb, discard_future_cb); + CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future"); + if (H5I_INVALID_HID == future_id) + goto error; + + /* Destroy the type */ + ret = H5Idestroy_type(obj_type); + CHECK(ret, FAIL, "H5Idestroy_type"); + if (FAIL == ret) + goto error; + + /* Re-register a user-defined type with our custom ID-deleting callback */ + obj_type = H5Iregister_type((size_t)15, 0, free_actual_object); + CHECK(obj_type, H5I_BADID, "H5Iregister_type"); + if (H5I_BADID == obj_type) + goto error; + + /* Test base use-case: create a future object and realize the actual object. */ + future_obj = HDmalloc(sizeof(future_obj_t)); + future_obj->obj_type = obj_type; + future_id = H5Iregister_future(obj_type, future_obj, realize_future_cb, discard_future_cb); + CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future"); + if (H5I_INVALID_HID == future_id) + goto error; + + actual_obj = H5Iobject_verify(future_id, obj_type); + CHECK_PTR(actual_obj, "H5Iobject_verify"); + if (NULL == actual_obj) + goto error; + VERIFY(*actual_obj, 7, "H5Iobject_verify"); + if (7 != *actual_obj) + goto error; + + /* Retrieve the object again and verify that it's the same actual object */ + actual_obj2 = H5Iobject_verify(future_id, obj_type); + CHECK_PTR(actual_obj2, "H5Iobject_verify"); + if (NULL == actual_obj2) + goto error; + VERIFY(*actual_obj2, 7, "H5Iobject_verify"); + if (7 != *actual_obj2) + goto error; + CHECK_PTR_EQ(actual_obj, actual_obj2, "H5Iobject_verify"); + if (actual_obj != actual_obj2) + goto error; + + /* Destroy the type */ + ret = H5Idestroy_type(obj_type); + CHECK(ret, FAIL, "H5Idestroy_type"); + if (FAIL == ret) + goto error; + + /* Re-register a user-defined type with our custom ID-deleting callback */ + obj_type = H5Iregister_type((size_t)15, 0, free_actual_object); + CHECK(obj_type, H5I_BADID, "H5Iregister_type"); + if (H5I_BADID == obj_type) + goto error; + + /* Set the global future object type */ + future_obj_type_g = obj_type; + + /* Test "actual object generator" use-case: create a future object with + * NULL object pointer, to create new object of predefined type when + * future object is realized. + */ + future_id = H5Iregister_future(obj_type, NULL, realize_future_generate_cb, discard_future_generate_cb); + CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future"); + if (H5I_INVALID_HID == future_id) + goto error; + + /* Realize the actual object, with will be dynamically allocated within + * the 'realize' callback. + */ + actual_obj = H5Iobject_verify(future_id, obj_type); + CHECK_PTR(actual_obj, "H5Iobject_verify"); + if (NULL == actual_obj) + goto error; + VERIFY(*actual_obj, 7, "H5Iobject_verify"); + if (7 != *actual_obj) + goto error; + + /* Reset the global future object type */ + future_obj_type_g = H5I_BADID; + + /* Retrieve the object again and verify that it's the same actual object */ + /* (Will fail if global future object type used) */ + actual_obj2 = H5Iobject_verify(future_id, obj_type); + CHECK_PTR(actual_obj2, "H5Iobject_verify"); + if (NULL == actual_obj2) + goto error; + VERIFY(*actual_obj2, 7, "H5Iobject_verify"); + if (7 != *actual_obj2) + goto error; + CHECK_PTR_EQ(actual_obj, actual_obj2, "H5Iobject_verify"); + if (actual_obj != actual_obj2) + goto error; + + /* Destroy the type */ + ret = H5Idestroy_type(obj_type); + CHECK(ret, FAIL, "H5Idestroy_type"); + if (FAIL == ret) + goto error; + + /* Test base use-case: create a future object for a pre-defined type */ + /* (DATASPACE) */ + future_obj = HDmalloc(sizeof(future_obj_t)); + future_obj->obj_type = H5I_DATASPACE; + future_id = H5Iregister_future(H5I_DATASPACE, future_obj, realize_future_cb, discard_future_cb); + CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future"); + if (H5I_INVALID_HID == future_id) + goto error; + + /* (Can't verify the type of the future ID, because the library's current + * implementation realizes the object during sanity checks on the ID) + */ + + /* Close future object for pre-defined type without realizing it */ + ret = H5Idec_ref(future_id); + CHECK(ret, FAIL, "H5Idec_ref"); + if (FAIL == ret) + goto error; + + /* Test base use-case: create a future object for a pre-defined type */ + future_obj = HDmalloc(sizeof(future_obj_t)); + future_obj->obj_type = H5I_DATASPACE; + future_id = H5Iregister_future(H5I_DATASPACE, future_obj, realize_future_cb, discard_future_cb); + CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future"); + if (H5I_INVALID_HID == future_id) + goto error; + + /* Verify that the application believes the future ID is a dataspace */ + /* (Currently realizes the object "implicitly" during a sanity check) */ + id_type = H5Iget_type(future_id); + CHECK(id_type, H5I_BADID, "H5Iget_type"); + if (H5I_BADID == id_type) + goto error; + if (H5I_DATASPACE != id_type) + goto error; + + /* Close future object for pre-defined type without realizing it */ + ret = H5Idec_ref(future_id); + CHECK(ret, FAIL, "H5Idec_ref"); + if (FAIL == ret) + goto error; + + /* Test base use-case: create a future object for a pre-defined type */ + future_obj = HDmalloc(sizeof(future_obj_t)); + future_obj->obj_type = H5I_DATASPACE; + future_id = H5Iregister_future(H5I_DATASPACE, future_obj, realize_future_cb, discard_future_cb); + CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future"); + if (H5I_INVALID_HID == future_id) + goto error; + + /* Realize future dataspace by requesting its rank */ + ret = H5Sget_simple_extent_ndims(future_id); + CHECK(ret, FAIL, "H5Sget_simple_extent_ndims"); + if (FAIL == ret) + goto error; + if (1 != ret) + goto error; + + /* Verify that the application believes the ID is still a dataspace */ + id_type = H5Iget_type(future_id); + CHECK(id_type, H5I_BADID, "H5Iget_type"); + if (H5I_BADID == id_type) + goto error; + if (H5I_DATASPACE != id_type) + goto error; + + /* Close future object for pre-defined type after realizing it */ + ret = H5Idec_ref(future_id); + CHECK(ret, FAIL, "H5Idec_ref"); + if (FAIL == ret) + goto error; + + /* Test base use-case: create a future object for a pre-defined type */ + /* (DATATYPE) */ + future_obj = HDmalloc(sizeof(future_obj_t)); + future_obj->obj_type = H5I_DATATYPE; + future_id = H5Iregister_future(H5I_DATATYPE, future_obj, realize_future_cb, discard_future_cb); + CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future"); + if (H5I_INVALID_HID == future_id) + goto error; + + /* (Can't verify the type of the future ID, because the library's current + * implementation realizes the object during sanity checks on the ID) + */ + + /* Close future object for pre-defined type without realizing it */ + ret = H5Idec_ref(future_id); + CHECK(ret, FAIL, "H5Idec_ref"); + if (FAIL == ret) + goto error; + + /* Test base use-case: create a future object for a pre-defined type */ + future_obj = HDmalloc(sizeof(future_obj_t)); + future_obj->obj_type = H5I_DATATYPE; + future_id = H5Iregister_future(H5I_DATATYPE, future_obj, realize_future_cb, discard_future_cb); + CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future"); + if (H5I_INVALID_HID == future_id) + goto error; + + /* Verify that the application believes the future ID is a datatype */ + /* (Currently realizes the object "implicitly" during a sanity check) */ + id_type = H5Iget_type(future_id); + CHECK(id_type, H5I_BADID, "H5Iget_type"); + if (H5I_BADID == id_type) + goto error; + if (H5I_DATATYPE != id_type) + goto error; + + /* Close future object for pre-defined type without realizing it */ + ret = H5Idec_ref(future_id); + CHECK(ret, FAIL, "H5Idec_ref"); + if (FAIL == ret) + goto error; + + /* Test base use-case: create a future object for a pre-defined type */ + future_obj = HDmalloc(sizeof(future_obj_t)); + future_obj->obj_type = H5I_DATATYPE; + future_id = H5Iregister_future(H5I_DATATYPE, future_obj, realize_future_cb, discard_future_cb); + CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future"); + if (H5I_INVALID_HID == future_id) + goto error; + + /* Realize future datatype by requesting its class */ + type_class = H5Tget_class(future_id); + CHECK(ret, FAIL, "H5Tget_class"); + if (FAIL == ret) + goto error; + if (H5T_INTEGER != type_class) + goto error; + + /* Verify that the application believes the ID is still a datatype */ + id_type = H5Iget_type(future_id); + CHECK(id_type, H5I_BADID, "H5Iget_type"); + if (H5I_BADID == id_type) + goto error; + if (H5I_DATATYPE != id_type) + goto error; + + /* Close future object for pre-defined type after realizing it */ + ret = H5Idec_ref(future_id); + CHECK(ret, FAIL, "H5Idec_ref"); + if (FAIL == ret) + goto error; + + /* Test base use-case: create a future object for a pre-defined type */ + /* (PROPERTY LIST) */ + future_obj = HDmalloc(sizeof(future_obj_t)); + future_obj->obj_type = H5I_GENPROP_LST; + future_id = H5Iregister_future(H5I_GENPROP_LST, future_obj, realize_future_cb, discard_future_cb); + CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future"); + if (H5I_INVALID_HID == future_id) + goto error; + + /* (Can't verify the type of the future ID, because the library's current + * implementation realizes the object during sanity checks on the ID) + */ + + /* Close future object for pre-defined type without realizing it */ + ret = H5Idec_ref(future_id); + CHECK(ret, FAIL, "H5Idec_ref"); + if (FAIL == ret) + goto error; + + /* Test base use-case: create a future object for a pre-defined type */ + future_obj = HDmalloc(sizeof(future_obj_t)); + future_obj->obj_type = H5I_GENPROP_LST; + future_id = H5Iregister_future(H5I_GENPROP_LST, future_obj, realize_future_cb, discard_future_cb); + CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future"); + if (H5I_INVALID_HID == future_id) + goto error; + + /* Verify that the application believes the future ID is a property list */ + /* (Currently realizes the object "implicitly" during a sanity check) */ + id_type = H5Iget_type(future_id); + CHECK(id_type, H5I_BADID, "H5Iget_type"); + if (H5I_BADID == id_type) + goto error; + if (H5I_GENPROP_LST != id_type) + goto error; + + /* Close future object for pre-defined type without realizing it */ + ret = H5Idec_ref(future_id); + CHECK(ret, FAIL, "H5Idec_ref"); + if (FAIL == ret) + goto error; + + /* Test base use-case: create a future object for a pre-defined type */ + future_obj = HDmalloc(sizeof(future_obj_t)); + future_obj->obj_type = H5I_GENPROP_LST; + future_id = H5Iregister_future(H5I_GENPROP_LST, future_obj, realize_future_cb, discard_future_cb); + CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future"); + if (H5I_INVALID_HID == future_id) + goto error; + + /* Realize future property list by verifying its class */ + ret = H5Pisa_class(future_id, H5P_DATASET_XFER); + CHECK(ret, FAIL, "H5Pisa_class"); + if (FAIL == ret) + goto error; + if (TRUE != ret) + goto error; + + /* Verify that the application believes the ID is still a property list */ + id_type = H5Iget_type(future_id); + CHECK(id_type, H5I_BADID, "H5Iget_type"); + if (H5I_BADID == id_type) + goto error; + if (H5I_GENPROP_LST != id_type) + goto error; + + /* Close future object for pre-defined type after realizing it */ + ret = H5Idec_ref(future_id); + CHECK(ret, FAIL, "H5Idec_ref"); + if (FAIL == ret) + goto error; + + return 0; + +error: + /* Cleanup. For simplicity, just destroy the types and ignore errors. */ + H5E_BEGIN_TRY + { + H5Idestroy_type(obj_type); + } + H5E_END_TRY + + return -1; +} /* end test_future_ids() */ +#endif + +void +test_ids(void) +{ + /* Set the random # seed */ + HDsrandom((unsigned)HDtime(NULL)); + + if (basic_id_test() < 0) + TestErrPrintf("Basic ID test failed\n"); + if (id_predefined_test() < 0) + TestErrPrintf("Predefined ID type test failed\n"); + if (test_is_valid() < 0) + TestErrPrintf("H5Iis_valid test failed\n"); + if (test_get_type() < 0) + TestErrPrintf("H5Iget_type test failed\n"); + if (test_id_type_list() < 0) + TestErrPrintf("ID type list test failed\n"); + if (test_remove_clear_type() < 0) + TestErrPrintf("ID remove during H5Iclear_type test failed\n"); +#if defined(H5VL_VERSION) && H5VL_VERSION >= 2 + if (test_future_ids() < 0) + TestErrPrintf("Future ID test failed\n"); +#endif +} diff --git a/test/API/titerate.c b/test/API/titerate.c new file mode 100644 index 00000000000..6cbebbd8c55 --- /dev/null +++ b/test/API/titerate.c @@ -0,0 +1,1263 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*********************************************************** + * + * Test program: titerate + * + * Test the Group & Attribute functionality + * + *************************************************************/ + +#include "testhdf5.h" +/* #include "H5srcdir.h" */ + +#define DATAFILE "titerate.h5" + +/* Number of datasets for group iteration test */ +#define NDATASETS 50 + +/* Number of attributes for attribute iteration test */ +#define NATTR 50 + +/* Number of groups for second group iteration test */ +#define ITER_NGROUPS 150 + +/* General maximum length of names used */ +#define NAMELEN 80 + +/* 1-D dataset with fixed dimensions */ +#define SPACE1_RANK 1 +#define SPACE1_DIM1 4 + +typedef enum { RET_ZERO, RET_TWO, RET_CHANGE, RET_CHANGE2 } iter_enum; + +/* Custom group iteration callback data */ +typedef struct { + char name[NAMELEN]; /* The name of the object */ + H5O_type_t type; /* The type of the object */ + iter_enum command; /* The type of return value */ +} iter_info; + +/* Definition for test_corrupted_attnamelen */ +#define CORRUPTED_ATNAMELEN_FILE "memleak_H5O_dtype_decode_helper_H5Odtype.h5" +#define DSET_NAME "image" +typedef struct searched_err_t { + char message[256]; + hbool_t found; +} searched_err_t; +#if 0 +/* Call back function for test_corrupted_attnamelen */ +static int find_err_msg_cb(unsigned n, const H5E_error2_t *err_desc, void *_client_data); +#endif +/* Local functions */ +int iter_strcmp(const void *s1, const void *s2); +int iter_strcmp2(const void *s1, const void *s2); +#ifndef NO_ITERATION_RESTART +static herr_t liter_cb(hid_t group, const char *name, const H5L_info2_t *info, void *op_data); +static herr_t liter_cb2(hid_t group, const char *name, const H5L_info2_t *info, void *op_data); +#endif +herr_t aiter_cb(hid_t group, const char *name, const H5A_info_t *ainfo, void *op_data); + +/**************************************************************** +** +** iter_strcmp(): String comparison routine for qsort +** +****************************************************************/ +H5_ATTR_PURE int +iter_strcmp(const void *s1, const void *s2) +{ + return (HDstrcmp(*(const char *const *)s1, *(const char *const *)s2)); +} + +/**************************************************************** +** +** liter_cb(): Custom link iteration callback routine. +** +****************************************************************/ +#ifndef NO_ITERATION_RESTART +static herr_t +liter_cb(hid_t H5_ATTR_UNUSED group, const char *name, const H5L_info2_t H5_ATTR_UNUSED *link_info, + void *op_data) +{ + iter_info *info = (iter_info *)op_data; + static int count = 0; + static int count2 = 0; + + HDstrcpy(info->name, name); + + switch (info->command) { + case RET_ZERO: + return (0); + + case RET_TWO: + return (2); + + case RET_CHANGE: + count++; + return (count > 10 ? 1 : 0); + + case RET_CHANGE2: + count2++; + return (count2 > 10 ? 1 : 0); + + default: + HDprintf("invalid iteration command"); + return (-1); + } /* end switch */ +} /* end liter_cb() */ +#endif + +/**************************************************************** +** +** test_iter_group(): Test group iteration functionality +** +****************************************************************/ +static void +test_iter_group(hid_t fapl, hbool_t new_format) +{ +#ifndef NO_ITERATION_RESTART + hid_t file; /* File ID */ + hid_t dataset; /* Dataset ID */ + hid_t datatype; /* Common datatype ID */ + hid_t filespace; /* Common dataspace ID */ + hid_t root_group, grp; /* Root group ID */ + int i; /* counting variable */ + hsize_t idx; /* Index in the group */ + char name[NAMELEN]; /* temporary name buffer */ + char *lnames[NDATASETS + 2]; /* Names of the links created */ + char dataset_name[NAMELEN]; /* dataset name */ + iter_info info; /* Custom iteration information */ + H5G_info_t ginfo; /* Buffer for querying object's info */ + herr_t ret; /* Generic return value */ +#else + (void)fapl; + (void)new_format; +#endif + + /* Output message about test being performed */ + MESSAGE( + 5, ("Testing Group Iteration Functionality - SKIPPED for now due to no iteration restart support\n")); +#ifndef NO_ITERATION_RESTART + /* Create the test file with the datasets */ + file = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(file, FAIL, "H5Fcreate"); + + /* Test iterating over empty group */ + info.command = RET_ZERO; + idx = 0; + ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info); + VERIFY(ret, SUCCEED, "H5Literate2"); + + datatype = H5Tcopy(H5T_NATIVE_INT); + CHECK(datatype, FAIL, "H5Tcopy"); + + filespace = H5Screate(H5S_SCALAR); + CHECK(filespace, FAIL, "H5Screate"); + + for (i = 0; i < NDATASETS; i++) { + HDsnprintf(name, sizeof(name), "Dataset %d", i); + dataset = H5Dcreate2(file, name, datatype, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Keep a copy of the dataset names around for later */ + lnames[i] = HDstrdup(name); + CHECK_PTR(lnames[i], "strdup"); + + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + } /* end for */ + + /* Create a group and named datatype under root group for testing */ + grp = H5Gcreate2(file, "grp", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Gcreate2"); + + lnames[NDATASETS] = HDstrdup("grp"); + CHECK_PTR(lnames[NDATASETS], "strdup"); + + ret = H5Tcommit2(file, "dtype", datatype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + lnames[NDATASETS + 1] = HDstrdup("dtype"); + CHECK_PTR(lnames[NDATASETS], "strdup"); + + /* Close everything up */ + ret = H5Tclose(datatype); + CHECK(ret, FAIL, "H5Tclose"); + + ret = H5Gclose(grp); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Sclose(filespace); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + + /* Sort the dataset names */ + HDqsort(lnames, (size_t)(NDATASETS + 2), sizeof(char *), iter_strcmp); + + /* Iterate through the datasets in the root group in various ways */ + file = H5Fopen(DATAFILE, H5F_ACC_RDONLY, fapl); + CHECK(file, FAIL, "H5Fopen"); + + /* These two functions, H5Oget_info_by_idx and H5Lget_name_by_idx, actually + * iterate through B-tree for group members in internal library design. + */ + root_group = H5Gopen2(file, "/", H5P_DEFAULT); + CHECK(root_group, FAIL, "H5Gopen2"); + + ret = H5Gget_info(root_group, &ginfo); + CHECK(ret, FAIL, "H5Gget_info"); + VERIFY(ginfo.nlinks, (NDATASETS + 2), "H5Gget_info"); + + for (i = 0; i < (int)ginfo.nlinks; i++) { + H5O_info2_t oinfo; /* Object info */ + + ret = (herr_t)H5Lget_name_by_idx(root_group, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, + dataset_name, (size_t)NAMELEN, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lget_name_by_idx"); + + //! [H5Oget_info_by_idx3_snip] + + ret = H5Oget_info_by_idx3(root_group, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, &oinfo, + H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_idx"); + + //! [H5Oget_info_by_idx3_snip] + + } /* end for */ + + H5E_BEGIN_TRY + { + ret = + (herr_t)H5Lget_name_by_idx(root_group, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)(NDATASETS + 3), + dataset_name, (size_t)NAMELEN, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Lget_name_by_idx"); + + ret = H5Gclose(root_group); + CHECK(ret, FAIL, "H5Gclose"); + + /* These two functions, H5Oget_info_by_idx and H5Lget_name_by_idx, actually + * iterate through B-tree for group members in internal library design. + * (Same as test above, but with the file ID instead of opening the root group) + */ + ret = H5Gget_info(file, &ginfo); + CHECK(ret, FAIL, "H5Gget_info"); + VERIFY(ginfo.nlinks, NDATASETS + 2, "H5Gget_info"); + + for (i = 0; i < (int)ginfo.nlinks; i++) { + H5O_info2_t oinfo; /* Object info */ + + ret = (herr_t)H5Lget_name_by_idx(file, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, dataset_name, + (size_t)NAMELEN, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lget_name_by_idx"); + + ret = H5Oget_info_by_idx3(file, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, &oinfo, H5O_INFO_BASIC, + H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_idx3"); + } /* end for */ + + H5E_BEGIN_TRY + { + ret = (herr_t)H5Lget_name_by_idx(file, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)(NDATASETS + 3), + dataset_name, (size_t)NAMELEN, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Lget_name_by_idx"); + + /* Test invalid indices for starting iteration */ + info.command = RET_ZERO; + idx = (hsize_t)-1; + H5E_BEGIN_TRY + { + ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Literate2"); + + /* Test skipping exactly as many entries as in the group */ + idx = NDATASETS + 2; + H5E_BEGIN_TRY + { + ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Literate2"); + + /* Test skipping more entries than are in the group */ + idx = NDATASETS + 3; + H5E_BEGIN_TRY + { + ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Literate2"); + + /* Test all objects in group, when callback always returns 0 */ + info.command = RET_ZERO; + idx = 0; + if ((ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info)) > 0) + TestErrPrintf("Group iteration function didn't return zero correctly!\n"); + + /* Test all objects in group, when callback always returns 1 */ + /* This also tests the "restarting" ability, because the index changes */ + info.command = RET_TWO; + i = 0; + idx = 0; + memset(info.name, 0, NAMELEN); + while ((ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info)) > 0) { + /* Verify return value from iterator gets propagated correctly */ + VERIFY(ret, 2, "H5Literate2"); + + /* Increment the number of times "2" is returned */ + i++; + + /* Verify that the index is the correct value */ + VERIFY(idx, (hsize_t)i, "H5Literate2"); + if (idx != (hsize_t)i) + break; + if (idx > (NDATASETS + 2)) + TestErrPrintf("Group iteration function walked too far!\n"); + + /* Verify that the correct name is retrieved */ + if (HDstrncmp(info.name, lnames[(size_t)(idx - 1)], NAMELEN) != 0) + TestErrPrintf( + "Group iteration function didn't return name correctly for link - lnames[%u] = '%s'!\n", + (unsigned)(idx - 1), lnames[(size_t)(idx - 1)]); + } /* end while */ + VERIFY(ret, -1, "H5Literate2"); + + if (i != (NDATASETS + 2)) + TestErrPrintf("%u: Group iteration function didn't perform multiple iterations correctly!\n", + __LINE__); + + /* Test all objects in group, when callback changes return value */ + /* This also tests the "restarting" ability, because the index changes */ + info.command = new_format ? RET_CHANGE2 : RET_CHANGE; + i = 0; + idx = 0; + memset(info.name, 0, NAMELEN); + while ((ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info)) >= 0) { + /* Verify return value from iterator gets propagated correctly */ + VERIFY(ret, 1, "H5Literate2"); + + /* Increment the number of times "1" is returned */ + i++; + + /* Verify that the index is the correct value */ + VERIFY(idx, (hsize_t)(i + 10), "H5Literate2"); + if (idx != (hsize_t)(i + 10)) + break; + if (idx > (NDATASETS + 2)) + TestErrPrintf("Group iteration function walked too far!\n"); + + /* Verify that the correct name is retrieved */ + if (HDstrncmp(info.name, lnames[(size_t)(idx - 1)], NAMELEN) != 0) + TestErrPrintf( + "Group iteration function didn't return name correctly for link - lnames[%u] = '%s'!\n", + (unsigned)(idx - 1), lnames[(size_t)(idx - 1)]); + } /* end while */ + VERIFY(ret, -1, "H5Literate2"); + + if (i != 42 || idx != 52) + TestErrPrintf("%u: Group iteration function didn't perform multiple iterations correctly!\n", + __LINE__); + + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free the dataset names */ + for (i = 0; i < (NDATASETS + 2); i++) + HDfree(lnames[i]); +#endif +} /* test_iter_group() */ + +/**************************************************************** +** +** aiter_cb(): Custom group iteration callback routine. +** +****************************************************************/ +herr_t +aiter_cb(hid_t H5_ATTR_UNUSED group, const char *name, const H5A_info_t H5_ATTR_UNUSED *ainfo, void *op_data) +{ + iter_info *info = (iter_info *)op_data; + static int count = 0; + static int count2 = 0; + + HDstrcpy(info->name, name); + + switch (info->command) { + case RET_ZERO: + return (0); + + case RET_TWO: + return (2); + + case RET_CHANGE: + count++; + return (count > 10 ? 1 : 0); + + case RET_CHANGE2: + count2++; + return (count2 > 10 ? 1 : 0); + + default: + HDprintf("invalid iteration command"); + return (-1); + } /* end switch */ +} /* end aiter_cb() */ + +/**************************************************************** +** +** test_iter_attr(): Test attribute iteration functionality +** +****************************************************************/ +static void +test_iter_attr(hid_t fapl, hbool_t new_format) +{ +#ifndef NO_ITERATION_RESTART + hid_t file; /* File ID */ + hid_t dataset; /* Common Dataset ID */ + hid_t filespace; /* Common dataspace ID */ + hid_t attribute; /* Attribute ID */ + int i; /* counting variable */ + hsize_t idx; /* Index in the attribute list */ + char name[NAMELEN]; /* temporary name buffer */ + char *anames[NATTR]; /* Names of the attributes created */ + iter_info info; /* Custom iteration information */ + herr_t ret; /* Generic return value */ +#else + (void)fapl; + (void)new_format; +#endif + + /* Output message about test being performed */ + MESSAGE( + 5, + ("Testing Attribute Iteration Functionality - SKIPPED for no due to no iteration restart support\n")); +#ifndef NO_ITERATION_RESTART + HDmemset(&info, 0, sizeof(iter_info)); + + /* Create the test file with the datasets */ + file = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(file, FAIL, "H5Fcreate"); + + filespace = H5Screate(H5S_SCALAR); + CHECK(filespace, FAIL, "H5Screate"); + + dataset = H5Dcreate2(file, "Dataset", H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + for (i = 0; i < NATTR; i++) { + HDsnprintf(name, sizeof(name), "Attribute %02d", i); + attribute = H5Acreate2(dataset, name, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attribute, FAIL, "H5Acreate2"); + + /* Keep a copy of the attribute names around for later */ + anames[i] = HDstrdup(name); + CHECK_PTR(anames[i], "strdup"); + + ret = H5Aclose(attribute); + CHECK(ret, FAIL, "H5Aclose"); + } /* end for */ + + /* Close everything up */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Sclose(filespace); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + + /* Iterate through the attributes on the dataset in various ways */ + file = H5Fopen(DATAFILE, H5F_ACC_RDONLY, fapl); + CHECK(file, FAIL, "H5Fopen"); + + dataset = H5Dopen2(file, "Dataset", H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Test invalid indices for starting iteration */ + info.command = RET_ZERO; + + /* Test skipping exactly as many attributes as there are */ + idx = NATTR; + H5E_BEGIN_TRY + { + ret = H5Aiterate2(dataset, H5_INDEX_NAME, H5_ITER_INC, &idx, aiter_cb, &info); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Aiterate2"); + + /* Test skipping more attributes than there are */ + idx = NATTR + 1; + H5E_BEGIN_TRY + { + ret = H5Aiterate2(dataset, H5_INDEX_NAME, H5_ITER_INC, &idx, aiter_cb, &info); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Aiterate2"); + + /* Test all attributes on dataset, when callback always returns 0 */ + info.command = RET_ZERO; + idx = 0; + if ((ret = H5Aiterate2(dataset, H5_INDEX_NAME, H5_ITER_INC, &idx, aiter_cb, &info)) > 0) + TestErrPrintf("Attribute iteration function didn't return zero correctly!\n"); + + /* Test all attributes on dataset, when callback always returns 2 */ + /* This also tests the "restarting" ability, because the index changes */ + info.command = RET_TWO; + i = 0; + idx = 0; + while ((ret = H5Aiterate2(dataset, H5_INDEX_NAME, H5_ITER_INC, &idx, aiter_cb, &info)) > 0) { + /* Verify return value from iterator gets propagated correctly */ + VERIFY(ret, 2, "H5Aiterate2"); + + /* Increment the number of times "2" is returned */ + i++; + + /* Verify that the index is the correct value */ + VERIFY(idx, (unsigned)i, "H5Aiterate2"); + + /* Don't check name when new format is used */ + if (!new_format) { + /* Verify that the correct name is retrieved */ + if (idx > 0) { + if (HDstrcmp(info.name, anames[(size_t)idx - 1]) != 0) + TestErrPrintf("%u: Attribute iteration function didn't set names correctly, info.name = " + "'%s', anames[%u] = '%s'!\n", + __LINE__, info.name, (unsigned)(idx - 1), anames[(size_t)idx - 1]); + } /* end if */ + else + TestErrPrintf("%u: 'idx' was not set correctly!\n", __LINE__); + } /* end if */ + } /* end while */ + VERIFY(ret, -1, "H5Aiterate2"); + if (i != 50 || idx != 50) + TestErrPrintf("%u: Attribute iteration function didn't perform multiple iterations correctly!\n", + __LINE__); + + /* Test all attributes on dataset, when callback changes return value */ + /* This also tests the "restarting" ability, because the index changes */ + info.command = new_format ? RET_CHANGE2 : RET_CHANGE; + i = 0; + idx = 0; + while ((ret = H5Aiterate2(dataset, H5_INDEX_NAME, H5_ITER_INC, &idx, aiter_cb, &info)) > 0) { + /* Verify return value from iterator gets propagated correctly */ + VERIFY(ret, 1, "H5Aiterate2"); + + /* Increment the number of times "1" is returned */ + i++; + + /* Verify that the index is the correct value */ + VERIFY(idx, (unsigned)i + 10, "H5Aiterate2"); + + /* Don't check name when new format is used */ + if (!new_format) { + /* Verify that the correct name is retrieved */ + if (idx > 0) { + if (HDstrcmp(info.name, anames[(size_t)idx - 1]) != 0) + TestErrPrintf("%u: Attribute iteration function didn't set names correctly, info.name = " + "'%s', anames[%u] = '%s'!\n", + __LINE__, info.name, (unsigned)(idx - 1), anames[(size_t)idx - 1]); + } + else + TestErrPrintf("%u: 'idx' was not set correctly!\n", __LINE__); + } /* end if */ + } /* end while */ + VERIFY(ret, -1, "H5Aiterate2"); + if (i != 40 || idx != 50) + TestErrPrintf("%u: Attribute iteration function didn't perform multiple iterations correctly!\n", + __LINE__); + + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Free the attribute names */ + for (i = 0; i < NATTR; i++) + HDfree(anames[i]); +#endif +} /* test_iter_attr() */ + +/**************************************************************** +** +** iter_strcmp2(): String comparison routine for qsort +** +****************************************************************/ +H5_ATTR_PURE int +iter_strcmp2(const void *s1, const void *s2) +{ + return (HDstrcmp((const char *)s1, (const char *)s2)); +} /* end iter_strcmp2() */ + +/**************************************************************** +** +** liter_cb2(): Custom link iteration callback routine. +** +****************************************************************/ +#ifndef NO_ITERATION_RESTART +static herr_t +liter_cb2(hid_t loc_id, const char *name, const H5L_info2_t H5_ATTR_UNUSED *link_info, void *opdata) +{ + const iter_info *test_info = (const iter_info *)opdata; + H5O_info2_t oinfo; + herr_t ret; /* Generic return value */ + + if (HDstrcmp(name, test_info->name) != 0) { + TestErrPrintf("name = '%s', test_info = '%s'\n", name, test_info->name); + return (H5_ITER_ERROR); + } /* end if */ + + /* + * Get type of the object and check it. + */ + ret = H5Oget_info_by_name3(loc_id, name, &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + + if (test_info->type != oinfo.type) { + TestErrPrintf("test_info->type = %d, oinfo.type = %d\n", test_info->type, (int)oinfo.type); + return (H5_ITER_ERROR); + } /* end if */ + + return (H5_ITER_STOP); +} /* liter_cb2() */ +#endif + +/**************************************************************** +** +** test_iter_group_large(): Test group iteration functionality +** for groups with large #'s of objects +** +****************************************************************/ +static void +test_iter_group_large(hid_t fapl) +{ +#ifndef NO_ITERATION_RESTART + hid_t file; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t group; /* Group ID */ + hid_t sid; /* Dataspace ID */ + hid_t tid; /* Datatype ID */ + hsize_t dims[] = {SPACE1_DIM1}; + herr_t ret; /* Generic return value */ + char gname[20]; /* Temporary group name */ + iter_info *names; /* Names of objects in the root group */ + iter_info *curr_name; /* Pointer to the current name in the root group */ + int i; + + /* Compound datatype */ + typedef struct s1_t { + unsigned int a; + unsigned int b; + float c; + } s1_t; + + /* Allocate & initialize array */ + names = (iter_info *)HDcalloc(sizeof(iter_info), (ITER_NGROUPS + 2)); + CHECK_PTR(names, "HDcalloc"); +#else + (void)fapl; +#endif + /* Output message about test being performed */ + MESSAGE(5, ("Testing Large Group Iteration Functionality - SKIPPED for now due to no iteration restart " + "support\n")); +#ifndef NO_ITERATION_RESTART + /* Create file */ + file = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(file, FAIL, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid = H5Screate_simple(SPACE1_RANK, dims, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Create a bunch of groups */ + for (i = 0; i < ITER_NGROUPS; i++) { + HDsnprintf(gname, sizeof(gname), "Group_%d", i); + + /* Add the name to the list of objects in the root group */ + HDstrcpy(names[i].name, gname); + names[i].type = H5O_TYPE_GROUP; + + /* Create a group */ + group = H5Gcreate2(file, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(group, FAIL, "H5Gcreate2"); + + /* Close a group */ + ret = H5Gclose(group); + CHECK(ret, FAIL, "H5Gclose"); + } /* end for */ + + /* Create a dataset */ + dataset = H5Dcreate2(file, "Dataset1", H5T_STD_U32LE, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Add the name to the list of objects in the root group */ + HDstrcpy(names[ITER_NGROUPS].name, "Dataset1"); + names[ITER_NGROUPS].type = H5O_TYPE_DATASET; + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close Dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create a datatype */ + tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); + CHECK(tid, FAIL, "H5Tcreate"); + + /* Insert fields */ + ret = H5Tinsert(tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Save datatype for later */ + ret = H5Tcommit2(file, "Datatype1", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + /* Add the name to the list of objects in the root group */ + HDstrcpy(names[ITER_NGROUPS + 1].name, "Datatype1"); + names[ITER_NGROUPS + 1].type = H5O_TYPE_NAMED_DATATYPE; + + /* Close datatype */ + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Need to sort the names in the root group, cause that's what the library does */ + HDqsort(names, (size_t)(ITER_NGROUPS + 2), sizeof(iter_info), iter_strcmp2); + + /* Iterate through the file to see members of the root group */ + curr_name = &names[0]; + ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, NULL, liter_cb2, curr_name); + CHECK(ret, FAIL, "H5Literate2"); + for (i = 1; i < 100; i++) { + hsize_t idx = (hsize_t)i; + + curr_name = &names[i]; + ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb2, curr_name); + CHECK(ret, FAIL, "H5Literate2"); + } /* end for */ + + /* Close file */ + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + + /* Release memory */ + HDfree(names); +#endif +} /* test_iterate_group_large() */ + +/**************************************************************** +** +** test_grp_memb_funcs(): Test group member information +** functionality +** +****************************************************************/ +static void +test_grp_memb_funcs(hid_t fapl) +{ + hid_t file; /* File ID */ + hid_t dataset; /* Dataset ID */ + hid_t datatype; /* Common datatype ID */ + hid_t filespace; /* Common dataspace ID */ + hid_t root_group, grp; /* Root group ID */ + int i; /* counting variable */ + char name[NAMELEN]; /* temporary name buffer */ + char *dnames[NDATASETS + 2]; /* Names of the datasets created */ + char *obj_names[NDATASETS + 2]; /* Names of the objects in group */ + char dataset_name[NAMELEN]; /* dataset name */ + ssize_t name_len; /* Length of object's name */ + H5G_info_t ginfo; /* Buffer for querying object's info */ + herr_t ret = SUCCEED; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Group Member Information Functionality\n")); + + /* Create the test file with the datasets */ + file = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(file, FAIL, "H5Fcreate"); + + datatype = H5Tcopy(H5T_NATIVE_INT); + CHECK(datatype, FAIL, "H5Tcopy"); + + filespace = H5Screate(H5S_SCALAR); + CHECK(filespace, FAIL, "H5Screate"); + + for (i = 0; i < NDATASETS; i++) { + HDsnprintf(name, sizeof(name), "Dataset %d", i); + dataset = H5Dcreate2(file, name, datatype, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Keep a copy of the dataset names around for later */ + dnames[i] = HDstrdup(name); + CHECK_PTR(dnames[i], "strdup"); + + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + } /* end for */ + + /* Create a group and named datatype under root group for testing */ + grp = H5Gcreate2(file, "grp", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Gcreate2"); + + dnames[NDATASETS] = HDstrdup("grp"); + CHECK_PTR(dnames[NDATASETS], "strdup"); + + ret = H5Tcommit2(file, "dtype", datatype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + dnames[NDATASETS + 1] = HDstrdup("dtype"); + CHECK_PTR(dnames[NDATASETS], "strdup"); + + /* Close everything up */ + ret = H5Tclose(datatype); + CHECK(ret, FAIL, "H5Tclose"); + + ret = H5Gclose(grp); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Sclose(filespace); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + + /* Sort the dataset names */ + HDqsort(dnames, (size_t)(NDATASETS + 2), sizeof(char *), iter_strcmp); + + /* Iterate through the datasets in the root group in various ways */ + file = H5Fopen(DATAFILE, H5F_ACC_RDONLY, fapl); + CHECK(file, FAIL, "H5Fopen"); + + /* These two functions, H5Oget_info_by_idx and H5Lget_name_by_idx, actually + * iterate through B-tree for group members in internal library design. + */ + root_group = H5Gopen2(file, "/", H5P_DEFAULT); + CHECK(root_group, FAIL, "H5Gopen2"); + + ret = H5Gget_info(root_group, &ginfo); + CHECK(ret, FAIL, "H5Gget_info"); + VERIFY(ginfo.nlinks, (NDATASETS + 2), "H5Gget_info"); + + for (i = 0; i < (int)ginfo.nlinks; i++) { + H5O_info2_t oinfo; /* Object info */ + + /* Test with NULL for name, to query length */ + name_len = H5Lget_name_by_idx(root_group, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, NULL, + (size_t)NAMELEN, H5P_DEFAULT); + CHECK(name_len, FAIL, "H5Lget_name_by_idx"); + + ret = (herr_t)H5Lget_name_by_idx(root_group, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, + dataset_name, (size_t)(name_len + 1), H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lget_name_by_idx"); + + /* Double-check that the length is the same */ + VERIFY(ret, name_len, "H5Lget_name_by_idx"); + + /* Keep a copy of the dataset names around for later */ + obj_names[i] = HDstrdup(dataset_name); + CHECK_PTR(obj_names[i], "strdup"); + + ret = H5Oget_info_by_idx3(root_group, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, &oinfo, + H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_idx3"); + + if (!HDstrcmp(dataset_name, "grp")) + VERIFY(oinfo.type, H5O_TYPE_GROUP, "H5Lget_name_by_idx"); + if (!HDstrcmp(dataset_name, "dtype")) + VERIFY(oinfo.type, H5O_TYPE_NAMED_DATATYPE, "H5Lget_name_by_idx"); + if (!HDstrncmp(dataset_name, "Dataset", (size_t)7)) + VERIFY(oinfo.type, H5O_TYPE_DATASET, "H5Lget_name_by_idx"); + } /* end for */ + + H5E_BEGIN_TRY + { + ret = + (herr_t)H5Lget_name_by_idx(root_group, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)(NDATASETS + 3), + dataset_name, (size_t)NAMELEN, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Lget_name_by_idx"); + + /* Sort the dataset names */ + HDqsort(obj_names, (size_t)(NDATASETS + 2), sizeof(char *), iter_strcmp); + + /* Compare object names */ + for (i = 0; i < (int)ginfo.nlinks; i++) { + ret = HDstrcmp(dnames[i], obj_names[i]); + VERIFY(ret, 0, "HDstrcmp"); + } /* end for */ + + ret = H5Gclose(root_group); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free the dataset names */ + for (i = 0; i < (NDATASETS + 2); i++) { + HDfree(dnames[i]); + HDfree(obj_names[i]); + } /* end for */ +} /* test_grp_memb_funcs() */ + +/**************************************************************** +** +** test_links(): Test soft and hard link iteration +** +****************************************************************/ +static void +test_links(hid_t fapl) +{ + hid_t file; /* File ID */ + char obj_name[NAMELEN]; /* Names of the object in group */ + ssize_t name_len; /* Length of object's name */ + hid_t gid, gid1; + H5G_info_t ginfo; /* Buffer for querying object's info */ + hsize_t i; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Soft and Hard Link Iteration Functionality\n")); + + /* Create the test file with the datasets */ + file = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(file, FAIL, "H5Fcreate"); + + /* create groups */ + gid = H5Gcreate2(file, "/g1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gcreate2"); + + gid1 = H5Gcreate2(file, "/g1/g1.1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid1, FAIL, "H5Gcreate2"); + + /* create soft and hard links to the group "/g1". */ + ret = H5Lcreate_soft("something", gid, "softlink", H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lcreate_soft"); + + ret = H5Lcreate_hard(gid, "/g1", H5L_SAME_LOC, "hardlink", H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lcreate_hard"); + + ret = H5Gget_info(gid, &ginfo); + CHECK(ret, FAIL, "H5Gget_info"); + VERIFY(ginfo.nlinks, 3, "H5Gget_info"); + + /* Test these two functions, H5Oget_info_by_idx and H5Lget_name_by_idx */ + for (i = 0; i < ginfo.nlinks; i++) { + H5O_info2_t oinfo; /* Object info */ + H5L_info2_t linfo; /* Link info */ + + /* Get link name */ + name_len = H5Lget_name_by_idx(gid, ".", H5_INDEX_NAME, H5_ITER_INC, i, obj_name, (size_t)NAMELEN, + H5P_DEFAULT); + CHECK(name_len, FAIL, "H5Lget_name_by_idx"); + + /* Get link type */ + ret = H5Lget_info_by_idx2(gid, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, &linfo, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lget_info_by_idx2"); + + /* Get object type */ + if (linfo.type == H5L_TYPE_HARD) { + ret = H5Oget_info_by_idx3(gid, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, &oinfo, + H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_idx3"); + } /* end if */ + + if (!HDstrcmp(obj_name, "g1.1")) + VERIFY(oinfo.type, H5O_TYPE_GROUP, "H5Lget_name_by_idx"); + else if (!HDstrcmp(obj_name, "hardlink")) + VERIFY(oinfo.type, H5O_TYPE_GROUP, "H5Lget_name_by_idx"); + else if (!HDstrcmp(obj_name, "softlink")) + VERIFY(linfo.type, H5L_TYPE_SOFT, "H5Lget_name_by_idx"); + else + ERROR("unknown object name"); + } /* end for */ + + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Gclose(gid1); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_links() */ + +/*------------------------------------------------------------------------- + * Function: find_err_msg_cb + * + * Purpose: Callback function to find the given error message. + * Helper function for test_corrupted_attnamelen(). + * + * Return: H5_ITER_STOP when the message is found + * H5_ITER_CONT, otherwise + * + *------------------------------------------------------------------------- + */ +#if 0 +static int +find_err_msg_cb(unsigned H5_ATTR_UNUSED n, const H5E_error2_t *err_desc, void *_client_data) +{ + int status = H5_ITER_CONT; + searched_err_t *searched_err = (searched_err_t *)_client_data; + + if (searched_err == NULL) + return H5_ITER_ERROR; + + /* If the searched error message is found, stop the iteration */ + if (err_desc->desc != NULL && HDstrcmp(err_desc->desc, searched_err->message) == 0) { + searched_err->found = TRUE; + status = H5_ITER_STOP; + } + + return status; +} /* end find_err_msg_cb() */ +#endif + +/************************************************************************** +** +** test_corrupted_attnamelen(): Test the fix for the JIRA issue HDFFV-10588, +** where corrupted attribute's name length can be +** detected and invalid read can be avoided. +** +**************************************************************************/ +#if 0 +static void +test_corrupted_attnamelen(void) +{ + hid_t fid = -1; /* File ID */ + hid_t did = -1; /* Dataset ID */ + searched_err_t err_caught; /* Data to be passed to callback func */ + int err_status; /* Status returned by H5Aiterate2 */ + herr_t ret; /* Return value */ + hbool_t driver_is_default_compatible; + const char *testfile = H5_get_srcdir_filename(CORRUPTED_ATNAMELEN_FILE); /* Corrected test file name */ + + const char *err_message = "attribute name has different length than stored length"; + /* the error message produced when the failure occurs */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing the Handling of Corrupted Attribute's Name Length\n")); + + ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); + CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); + + if (!driver_is_default_compatible) { + HDprintf("-- SKIPPED --\n"); + return; + } + + fid = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Open the dataset */ + did = H5Dopen2(fid, DSET_NAME, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen2"); + + /* Call H5Aiterate2 to trigger the failure in HDFFV-10588. Failure should + occur in the decoding stage, so some arguments are not needed. */ + err_status = H5Aiterate2(did, H5_INDEX_NAME, H5_ITER_INC, NULL, NULL, NULL); + VERIFY(err_status, FAIL, "H5Aiterate2"); + + /* Make sure the intended error was caught */ + if (err_status == -1) { + /* Initialize client data */ + HDstrcpy(err_caught.message, err_message); + err_caught.found = FALSE; + + /* Look for the correct error message */ + ret = H5Ewalk2(H5E_DEFAULT, H5E_WALK_UPWARD, find_err_msg_cb, &err_caught); + CHECK(ret, FAIL, "H5Ewalk2"); + + /* Fail if the indicated message is not found */ + CHECK(err_caught.found, FALSE, "test_corrupted_attnamelen: Expected error not found"); + } + + /* Close the dataset and file */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + +} /* test_corrupted_attnamelen() */ +#endif + +#if 0 +#ifndef H5_NO_DEPRECATED_SYMBOLS +/**************************************************************** +** +** test_links_deprec(): Test soft and hard link iteration +** +****************************************************************/ +static void +test_links_deprec(hid_t fapl) +{ + hid_t file; /* File ID */ + char obj_name[NAMELEN]; /* Names of the object in group */ + ssize_t name_len; /* Length of object's name */ + hid_t gid, gid1; + H5G_info_t ginfo; /* Buffer for querying object's info */ + hsize_t i; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Soft and Hard Link Iteration Functionality Using Deprecated Routines\n")); + + /* Create the test file with the datasets */ + file = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(file, FAIL, "H5Fcreate"); + + /* create groups */ + gid = H5Gcreate2(file, "/g1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gcreate2"); + + gid1 = H5Gcreate2(file, "/g1/g1.1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid1, FAIL, "H5Gcreate2"); + + /* create soft and hard links to the group "/g1". */ + ret = H5Lcreate_soft("something", gid, "softlink", H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lcreate_soft"); + + ret = H5Lcreate_hard(gid, "/g1", H5L_SAME_LOC, "hardlink", H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lcreate_hard"); + + ret = H5Gget_info(gid, &ginfo); + CHECK(ret, FAIL, "H5Gget_info"); + VERIFY(ginfo.nlinks, 3, "H5Gget_info"); + + /* Test these two functions, H5Oget_info_by_idx and H5Lget_name_by_idx */ + for (i = 0; i < ginfo.nlinks; i++) { + H5O_info2_t oinfo; /* Object info */ + H5L_info2_t linfo; /* Link info */ + + /* Get link name */ + name_len = H5Lget_name_by_idx(gid, ".", H5_INDEX_NAME, H5_ITER_INC, i, obj_name, (size_t)NAMELEN, + H5P_DEFAULT); + CHECK(name_len, FAIL, "H5Lget_name_by_idx"); + + /* Get link type */ + ret = H5Lget_info_by_idx2(gid, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, &linfo, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lget_info_by_idx1"); + + /* Get object type */ + if (linfo.type == H5L_TYPE_HARD) { + ret = H5Oget_info_by_idx3(gid, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, &oinfo, + H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_idx"); + } /* end if */ + + if (!HDstrcmp(obj_name, "g1.1")) + VERIFY(oinfo.type, H5O_TYPE_GROUP, "H5Lget_name_by_idx"); + else if (!HDstrcmp(obj_name, "hardlink")) + VERIFY(oinfo.type, H5O_TYPE_GROUP, "H5Lget_name_by_idx"); + else if (!HDstrcmp(obj_name, "softlink")) + VERIFY(linfo.type, H5L_TYPE_SOFT, "H5Lget_name_by_idx"); + else + ERROR("unknown object name"); + } /* end for */ + + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Gclose(gid1); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_links_deprec() */ +#endif +#endif + +/**************************************************************** +** +** test_iterate(): Main iteration testing routine. +** +****************************************************************/ +void +test_iterate(void) +{ + hid_t fapl, fapl2; /* File access property lists */ + unsigned new_format; /* Whether to use the new format or not */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Iteration Operations\n")); + + /* Get the default FAPL */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + + /* Copy the file access property list */ + fapl2 = H5Pcopy(fapl); + CHECK(fapl2, FAIL, "H5Pcopy"); + + /* Set the "use the latest version of the format" bounds for creating objects in the file */ + ret = H5Pset_libver_bounds(fapl2, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); + CHECK(ret, FAIL, "H5Pset_libver_bounds"); + + /* These next tests use the same file */ + for (new_format = FALSE; new_format <= TRUE; new_format++) { + test_iter_group(new_format ? fapl2 : fapl, new_format); /* Test group iteration */ + test_iter_group_large(new_format ? fapl2 : fapl); /* Test group iteration for large # of objects */ + test_iter_attr(new_format ? fapl2 : fapl, new_format); /* Test attribute iteration */ + test_grp_memb_funcs(new_format ? fapl2 : fapl); /* Test group member information functions */ + test_links(new_format ? fapl2 : fapl); /* Test soft and hard link iteration */ +#if 0 +#ifndef H5_NO_DEPRECATED_SYMBOLS + test_links_deprec(new_format ? fapl2 : fapl); /* Test soft and hard link iteration */ +#endif +#endif + } /* end for */ +#if 0 + /* Test the fix for issue HDFFV-10588 */ + test_corrupted_attnamelen(); +#endif + /* Close FAPLs */ + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(fapl2); + CHECK(ret, FAIL, "H5Pclose"); +} /* test_iterate() */ + +/*------------------------------------------------------------------------- + * Function: cleanup_iterate + * + * Purpose: Cleanup temporary test files + * + * Return: none + * + * Programmer: Quincey Koziol + * April 5, 2000 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +void +cleanup_iterate(void) +{ + H5Fdelete(DATAFILE, H5P_DEFAULT); +} diff --git a/test/API/tmisc.c b/test/API/tmisc.c new file mode 100644 index 00000000000..d35a00bc034 --- /dev/null +++ b/test/API/tmisc.c @@ -0,0 +1,6349 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*********************************************************** + * + * Test program: tmisc + * + * Test miscellaneous features not tested elsewhere. Generally + * regression tests for bugs that are reported and don't + * have an existing test to add them to. + * + *************************************************************/ + +#define H5D_FRIEND /*suppress error about including H5Dpkg */ + +/* Define this macro to indicate that the testing APIs should be available */ +#define H5D_TESTING + +#include "testhdf5.h" +/* #include "H5srcdir.h" */ +/* #include "H5Dpkg.h" */ /* Datasets */ +/* #include "H5MMprivate.h" */ /* Memory */ + +/* Definitions for misc. test #1 */ +#define MISC1_FILE "tmisc1.h5" +#define MISC1_VAL (13417386) /* 0xccbbaa */ +#define MISC1_VAL2 (15654348) /* 0xeeddcc */ +#define MISC1_DSET_NAME "/scalar_set" + +/* Definitions for misc. test #2 */ +#define MISC2_FILE_1 "tmisc2a.h5" +#define MISC2_FILE_2 "tmisc2b.h5" +#define MISC2_ATT_NAME_1 "scalar_att_1" +#define MISC2_ATT_NAME_2 "scalar_att_2" + +typedef struct { + char *string; +} misc2_struct; + +/* Definitions for misc. test #3 */ +#define MISC3_FILE "tmisc3.h5" +#define MISC3_RANK 2 +#define MISC3_DIM1 6 +#define MISC3_DIM2 6 +#define MISC3_CHUNK_DIM1 2 +#define MISC3_CHUNK_DIM2 2 +#define MISC3_FILL_VALUE 2 +#define MISC3_DSET_NAME "/chunked" + +/* Definitions for misc. test #4 */ +#define MISC4_FILE_1 "tmisc4a.h5" +#define MISC4_FILE_2 "tmisc4b.h5" +#define MISC4_GROUP_1 "/Group1" +#define MISC4_GROUP_2 "/Group2" + +/* Definitions for misc. test #5 */ +#define MISC5_FILE "tmisc5.h5" +#define MISC5_DSETNAME "dset1" +#define MISC5_DSETRANK 1 +#define MISC5_NELMTOPLVL 1 +#define MISC5_DBGNELM1 2 +#define MISC5_DBGNELM2 1 +#define MISC5_DBGNELM3 1 +#define MISC5_DBGELVAL1 999999999 +#define MISC5_DBGELVAL2 888888888 +#define MISC5_DBGELVAL3 777777777 + +typedef struct { + int st1_el1; + hvl_t st1_el2; +} misc5_struct1; + +typedef struct { + int st2_el1; + hvl_t st2_el2; +} misc5_struct2; + +typedef struct { + int st3_el1; +} misc5_struct3; + +typedef struct { + hid_t st3h_base; + hid_t st3h_id; +} misc5_struct3_hndl; + +typedef struct { + hid_t st2h_base; + hid_t st2h_id; + misc5_struct3_hndl *st2h_st3hndl; +} misc5_struct2_hndl; + +typedef struct { + hid_t st1h_base; + hid_t st1h_id; + misc5_struct2_hndl *st1h_st2hndl; +} misc5_struct1_hndl; + +/* Definitions for misc. test #6 */ +#define MISC6_FILE "tmisc6.h5" +#define MISC6_DSETNAME1 "dset1" +#define MISC6_DSETNAME2 "dset2" +#define MISC6_NUMATTR 16 + +/* Definitions for misc. test #7 */ +#define MISC7_FILE "tmisc7.h5" +#define MISC7_DSETNAME1 "Dataset1" +#define MISC7_DSETNAME2 "Dataset2" +#define MISC7_TYPENAME1 "Datatype1" +#define MISC7_TYPENAME2 "Datatype2" + +/* Definitions for misc. test #8 */ +#define MISC8_FILE "tmisc8.h5" +#define MISC8_DSETNAME1 "Dataset1" +#define MISC8_DSETNAME4 "Dataset4" +#define MISC8_DSETNAME5 "Dataset5" +#define MISC8_DSETNAME8 "Dataset8" + +#ifndef H5_HAVE_PARALLEL +#define MISC8_DSETNAME2 "Dataset2" +#define MISC8_DSETNAME3 "Dataset3" +#define MISC8_DSETNAME6 "Dataset6" +#define MISC8_DSETNAME7 "Dataset7" +#define MISC8_DSETNAME9 "Dataset9" +#define MISC8_DSETNAME10 "Dataset10" +#endif + +#define MISC8_RANK 2 +#define MISC8_DIM0 50 +#define MISC8_DIM1 50 +#define MISC8_CHUNK_DIM0 10 +#define MISC8_CHUNK_DIM1 10 + +/* Definitions for misc. test #9 */ +#define MISC9_FILE "tmisc9.h5" + +/* Definitions for misc. test #10 */ +#define MISC10_FILE_OLD "tmtimeo.h5" +#define MISC10_FILE_NEW "tmisc10.h5" +#define MISC10_DSETNAME "Dataset1" + +/* Definitions for misc. test #11 */ +#define MISC11_FILE "tmisc11.h5" +#define MISC11_USERBLOCK 1024 +#define MISC11_SIZEOF_OFF 4 +#define MISC11_SIZEOF_LEN 4 +#define MISC11_SYM_LK 8 +#define MISC11_SYM_IK 32 +#define MISC11_ISTORE_IK 64 +#define MISC11_NINDEXES 1 + +/* Definitions for misc. test #12 */ +#define MISC12_FILE "tmisc12.h5" +#define MISC12_DSET_NAME "Dataset" +#define MISC12_SPACE1_RANK 1 +#define MISC12_SPACE1_DIM1 4 +#define MISC12_CHUNK_SIZE 2 +#define MISC12_APPEND_SIZE 5 + +/* Definitions for misc. test #13 */ +#define MISC13_FILE_1 "tmisc13a.h5" +#define MISC13_FILE_2 "tmisc13b.h5" +#define MISC13_DSET1_NAME "Dataset1" +#define MISC13_DSET2_NAME "Dataset2" +#define MISC13_DSET3_NAME "Dataset3" +#define MISC13_GROUP1_NAME "Group1" +#define MISC13_GROUP2_NAME "Group2" +#define MISC13_DTYPE_NAME "Datatype" +#define MISC13_RANK 1 +#define MISC13_DIM1 600 +#define MISC13_CHUNK_DIM1 10 +#define MISC13_USERBLOCK_SIZE 512 +#define MISC13_COPY_BUF_SIZE 4096 + +/* Definitions for misc. test #14 */ +#define MISC14_FILE "tmisc14.h5" +#define MISC14_DSET1_NAME "Dataset1" +#define MISC14_DSET2_NAME "Dataset2" +#define MISC14_DSET3_NAME "Dataset3" +#define MISC14_METADATA_SIZE 4096 + +/* Definitions for misc. test #15 */ +#define MISC15_FILE "tmisc15.h5" +#define MISC15_BUF_SIZE 1024 + +/* Definitions for misc. test #16 */ +#define MISC16_FILE "tmisc16.h5" +#define MISC16_SPACE_DIM 4 +#define MISC16_SPACE_RANK 1 +#define MISC16_STR_SIZE 8 +#define MISC16_DSET_NAME "Dataset" + +/* Definitions for misc. test #17 */ +#define MISC17_FILE "tmisc17.h5" +#define MISC17_SPACE_RANK 2 +#define MISC17_SPACE_DIM1 4 +#define MISC17_SPACE_DIM2 8 +#define MISC17_DSET_NAME "Dataset" + +/* Definitions for misc. test #18 */ +#define MISC18_FILE "tmisc18.h5" +#define MISC18_DSET1_NAME "Dataset1" +#define MISC18_DSET2_NAME "Dataset2" + +/* Definitions for misc. test #19 */ +#define MISC19_FILE "tmisc19.h5" +#define MISC19_DSET_NAME "Dataset" +#define MISC19_ATTR_NAME "Attribute" +#define MISC19_GROUP_NAME "Group" + +/* Definitions for misc. test #20 */ +#define MISC20_FILE "tmisc20.h5" +#define MISC20_FILE_OLD "tlayouto.h5" +#define MISC20_DSET_NAME "Dataset" +#define MISC20_DSET2_NAME "Dataset2" +#define MISC20_SPACE_RANK 2 +/* Make sure the product of the following 2 does not get too close to */ +/* 64 bits, risking an overflow. */ +#define MISC20_SPACE_DIM0 (8 * 1024 * 1024 * (uint64_t)1024) +#define MISC20_SPACE_DIM1 ((256 * 1024 * (uint64_t)1024) + 1) +#define MISC20_SPACE2_DIM0 8 +#define MISC20_SPACE2_DIM1 4 + +#if defined(H5_HAVE_FILTER_SZIP) && !defined(H5_API_TEST_NO_FILTERS) +/* Definitions for misc. test #21 */ +#define MISC21_FILE "tmisc21.h5" +#define MISC21_DSET_NAME "Dataset" +#define MISC21_SPACE_RANK 2 +#define MISC21_SPACE_DIM0 7639 +#define MISC21_SPACE_DIM1 6308 +#define MISC21_CHUNK_DIM0 2048 +#define MISC21_CHUNK_DIM1 2048 + +/* Definitions for misc. test #22 */ +#define MISC22_FILE "tmisc22.h5" +#define MISC22_DSET_NAME "Dataset" +#define MISC22_SPACE_RANK 2 +#define MISC22_CHUNK_DIM0 512 +#define MISC22_CHUNK_DIM1 512 +#define MISC22_SPACE_DIM0 639 +#define MISC22_SPACE_DIM1 1308 +#endif /* H5_HAVE_FILTER_SZIP */ + +/* Definitions for misc. test #23 */ +#define MISC23_FILE "tmisc23.h5" +#define MISC23_NAME_BUF_SIZE 40 + +/* Definitions for misc. test #24 */ +#define MISC24_FILE "tmisc24.h5" +#define MISC24_GROUP_NAME "group" +#define MISC24_GROUP_LINK "group_link" +#define MISC24_DATASET_NAME "dataset" +#define MISC24_DATASET_LINK "dataset_link" +#define MISC24_DATATYPE_NAME "datatype" +#define MISC24_DATATYPE_LINK "datatype_link" + +/* Definitions for misc. test #25 'a', 'b' & 'c' */ +#define MISC25A_FILE "foo.h5" +#define MISC25A_GROUP0_NAME "grp0" +#define MISC25A_GROUP1_NAME "/grp0/grp1" +#define MISC25A_GROUP2_NAME "/grp0/grp2" +#define MISC25A_GROUP3_NAME "/grp0/grp3" +#define MISC25A_ATTR1_NAME "_long attribute_" +#define MISC25A_ATTR1_LEN 11 +#define MISC25A_ATTR2_NAME "_short attr__" +#define MISC25A_ATTR2_LEN 11 +#define MISC25A_ATTR3_NAME "_short attr__" +#define MISC25A_ATTR3_LEN 1 +#define MISC25B_FILE "mergemsg.h5" +#define MISC25B_GROUP "grp1" +#define MISC25C_FILE "nc4_rename.h5" +#define MISC25C_DSETNAME "da" +#define MISC25C_DSETNAME2 "dz" +#define MISC25C_DSETGRPNAME "ga" +#define MISC25C_GRPNAME "gb" +#define MISC25C_GRPNAME2 "gc" +#define MISC25C_ATTRNAME "aa" +#define MISC25C_ATTRNAME2 "ab" + +/* Definitions for misc. test #26 */ +#define MISC26_FILE "dcpl_file" + +/* Definitions for misc. test #27 */ +/* (Note that this test file is generated by the "gen_bad_ohdr.c" code) */ +#define MISC27_FILE "tbad_msg_count.h5" +#define MISC27_GROUP "Group" + +/* Definitions for misc. test #28 */ +#define MISC28_FILE "tmisc28.h5" +#define MISC28_SIZE 10 +#define MISC28_NSLOTS 10000 + +/* Definitions for misc. test #29 */ +#define MISC29_ORIG_FILE "specmetaread.h5" +#define MISC29_COPY_FILE "tmisc29.h5" +#define MISC29_DSETNAME "dset2" + +/* Definitions for misc. test #30 */ +#define MISC30_FILE "tmisc30.h5" + +#ifndef H5_NO_DEPRECATED_SYMBOLS +/* Definitions for misc. test #31 */ +#define MISC31_FILE "tmisc31.h5" +#define MISC31_DSETNAME "dset" +#define MISC31_ATTRNAME1 "attr1" +#define MISC31_ATTRNAME2 "attr2" +#define MISC31_GROUPNAME "group" +#define MISC31_PROPNAME "misc31_prop" +#define MISC31_DTYPENAME "dtype" +#endif /* H5_NO_DEPRECATED_SYMBOLS */ + +/* Definitions for misc. test #33 */ +/* Note that this test file is generated by "gen_bad_offset.c" */ +/* and bad offset values are written to that file for testing */ +#define MISC33_FILE "bad_offset.h5" + +/* Definitions for misc. test #35 */ +#define MISC35_SPACE_RANK 3 +#define MISC35_SPACE_DIM1 3 +#define MISC35_SPACE_DIM2 15 +#define MISC35_SPACE_DIM3 13 +#define MISC35_NPOINTS 10 + +/* Definitions for misc. test #37 */ +/* The test file is formerly named h5_nrefs_POC. + See https://nvd.nist.gov/vuln/detail/CVE-2020-10812 */ +#define CVE_2020_10812_FILENAME "cve_2020_10812.h5" + +#if defined(H5_HAVE_FILTER_SZIP) && !defined(H5_API_TEST_NO_FILTERS) +/*------------------------------------------------------------------------- + * Function: h5_szip_can_encode + * + * Purpose: Retrieve the filter config flags for szip, tell if + * encoder is available. + * + * Return: 1: decode+encode is enabled + * 0: only decode is enabled + * -1: other + * + * Programmer: + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +int +h5_szip_can_encode(void) +{ + unsigned int filter_config_flags; + + H5Zget_filter_info(H5Z_FILTER_SZIP, &filter_config_flags); + if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) == 0) { + /* filter present but neither encode nor decode is supported (???) */ + return -1; + } + else if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) == + H5Z_FILTER_CONFIG_DECODE_ENABLED) { + /* decoder only: read but not write */ + return 0; + } + else if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) == + H5Z_FILTER_CONFIG_ENCODE_ENABLED) { + /* encoder only: write but not read (???) */ + return -1; + } + else if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) == + (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) { + return 1; + } + return (-1); +} +#endif /* H5_HAVE_FILTER_SZIP */ + +/**************************************************************** +** +** test_misc1(): test unlinking a dataset from a group and immediately +** re-using the dataset name +** +****************************************************************/ +static void +test_misc1(void) +{ + int i; + int i_check; + hid_t file, dataspace, dataset; + herr_t ret; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Unlinking Dataset and Re-creating It\n")); + + file = H5Fcreate(MISC1_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file, FAIL, "H5Fcreate"); + + dataspace = H5Screate(H5S_SCALAR); + CHECK(dataspace, FAIL, "H5Screate"); + + /* Write the dataset the first time. */ + dataset = + H5Dcreate2(file, MISC1_DSET_NAME, H5T_NATIVE_INT, dataspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + i = MISC1_VAL; + ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &i); + CHECK(ret, FAIL, "H5Dwrite"); + + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Remove the dataset. */ + ret = H5Ldelete(file, MISC1_DSET_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Write the dataset for the second time with a different value. */ + dataset = + H5Dcreate2(file, MISC1_DSET_NAME, H5T_NATIVE_INT, dataspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + i = MISC1_VAL2; + ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &i); + CHECK(ret, FAIL, "H5Dwrite"); + + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Sclose(dataspace); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + + /* Now, check the value written to the dataset, after it was re-created */ + file = H5Fopen(MISC1_FILE, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(file, FAIL, "H5Fopen"); + + dataspace = H5Screate(H5S_SCALAR); + CHECK(dataspace, FAIL, "H5Screate"); + + dataset = H5Dopen2(file, MISC1_DSET_NAME, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &i_check); + CHECK(ret, FAIL, "H5Dread"); + VERIFY(i_check, MISC1_VAL2, "H5Dread"); + + ret = H5Sclose(dataspace); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + +} /* end test_misc1() */ + +static hid_t +misc2_create_type(void) +{ + hid_t type, type_tmp; + herr_t ret; + + type_tmp = H5Tcopy(H5T_C_S1); + CHECK(type_tmp, FAIL, "H5Tcopy"); + + ret = H5Tset_size(type_tmp, H5T_VARIABLE); + CHECK(ret, FAIL, "H5Tset_size"); + + type = H5Tcreate(H5T_COMPOUND, sizeof(misc2_struct)); + CHECK(type, FAIL, "H5Tcreate"); + + ret = H5Tinsert(type, "string", offsetof(misc2_struct, string), type_tmp); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tclose(type_tmp); + CHECK(ret, FAIL, "H5Tclose"); + + return type; +} + +static void +test_misc2_write_attribute(void) +{ + hid_t file1, file2, root1, root2, dataspace, att1, att2; + hid_t type; + herr_t ret; + misc2_struct data, data_check; + char *string_att1 = HDstrdup("string attribute in file one"); + char *string_att2 = HDstrdup("string attribute in file two"); + + HDmemset(&data, 0, sizeof(data)); + HDmemset(&data_check, 0, sizeof(data_check)); + + type = misc2_create_type(); + + dataspace = H5Screate(H5S_SCALAR); + CHECK(dataspace, FAIL, "H5Screate"); + + file2 = H5Fcreate(MISC2_FILE_2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file2, FAIL, "H5Fcreate"); + + file1 = H5Fcreate(MISC2_FILE_1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file1, FAIL, "H5Fcreate"); + + root1 = H5Gopen2(file1, "/", H5P_DEFAULT); + CHECK(root1, FAIL, "H5Gopen2"); + + att1 = H5Acreate2(root1, MISC2_ATT_NAME_1, type, dataspace, H5P_DEFAULT, H5P_DEFAULT); + CHECK(att1, FAIL, "H5Acreate2"); + + data.string = string_att1; + + ret = H5Awrite(att1, type, &data); + CHECK(ret, FAIL, "H5Awrite"); + + ret = H5Aread(att1, type, &data_check); + CHECK(ret, FAIL, "H5Aread"); + + ret = H5Treclaim(type, dataspace, H5P_DEFAULT, &data_check); + CHECK(ret, FAIL, "H5Treclaim"); + + ret = H5Aclose(att1); + CHECK(ret, FAIL, "H5Aclose"); + + ret = H5Gclose(root1); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Fclose(file1); + CHECK(ret, FAIL, "H5Fclose"); + + root2 = H5Gopen2(file2, "/", H5P_DEFAULT); + CHECK(root2, FAIL, "H5Gopen2"); + + att2 = H5Acreate2(root2, MISC2_ATT_NAME_2, type, dataspace, H5P_DEFAULT, H5P_DEFAULT); + CHECK(att2, FAIL, "H5Acreate2"); + + data.string = string_att2; + + ret = H5Awrite(att2, type, &data); + CHECK(ret, FAIL, "H5Awrite"); + + ret = H5Aread(att2, type, &data_check); + CHECK(ret, FAIL, "H5Aread"); + + ret = H5Treclaim(type, dataspace, H5P_DEFAULT, &data_check); + CHECK(ret, FAIL, "H5Treclaim"); + + ret = H5Aclose(att2); + CHECK(ret, FAIL, "H5Aclose"); + + ret = H5Gclose(root2); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Tclose(type); + CHECK(ret, FAIL, "H5Tclose"); + + ret = H5Sclose(dataspace); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Fclose(file2); + CHECK(ret, FAIL, "H5Fclose"); + + HDfree(string_att1); + HDfree(string_att2); +} + +static void +test_misc2_read_attribute(const char *filename, const char *att_name) +{ + hid_t file, root, att; + hid_t type; + hid_t space; + herr_t ret; + misc2_struct data_check; + + type = misc2_create_type(); + + file = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(file, FAIL, "H5Fopen"); + + root = H5Gopen2(file, "/", H5P_DEFAULT); + CHECK(root, FAIL, "H5Gopen2"); + + att = H5Aopen(root, att_name, H5P_DEFAULT); + CHECK(att, FAIL, "H5Aopen"); + + space = H5Aget_space(att); + CHECK(space, FAIL, "H5Aget_space"); + + ret = H5Aread(att, type, &data_check); + CHECK(ret, FAIL, "H5Aread"); + + ret = H5Treclaim(type, space, H5P_DEFAULT, &data_check); + CHECK(ret, FAIL, "H5Treclaim"); + + ret = H5Sclose(space); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Aclose(att); + CHECK(ret, FAIL, "H5Aclose"); + + ret = H5Tclose(type); + CHECK(ret, FAIL, "H5Tclose"); + + ret = H5Gclose(root); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); +} +/**************************************************************** +** +** test_misc2(): test using the same VL-derived datatype in two +** different files, which was causing problems with the +** datatype conversion functions +** +****************************************************************/ +static void +test_misc2(void) +{ + /* Output message about test being performed */ + MESSAGE(5, ("Testing VL datatype in two different files\n")); + + test_misc2_write_attribute(); + test_misc2_read_attribute(MISC2_FILE_1, MISC2_ATT_NAME_1); + test_misc2_read_attribute(MISC2_FILE_2, MISC2_ATT_NAME_2); +} /* end test_misc2() */ + +/**************************************************************** +** +** test_misc3(): Test reading from chunked dataset with non-zero +** fill value +** +****************************************************************/ +static void +test_misc3(void) +{ + hid_t file, dataspace, dataset, dcpl; + int rank = MISC3_RANK; + hsize_t dims[MISC3_RANK] = {MISC3_DIM1, MISC3_DIM2}; + hsize_t chunk_dims[MISC3_RANK] = {MISC3_CHUNK_DIM1, MISC3_CHUNK_DIM2}; + int fill = MISC3_FILL_VALUE; + int read_buf[MISC3_DIM1][MISC3_DIM2]; + int i, j; + herr_t ret; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing reading from chunked dataset with non-zero fill-value\n")); + + file = H5Fcreate(MISC3_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file, FAIL, "H5Fcreate"); + + /* Create a simple dataspace */ + dataspace = H5Screate_simple(rank, dims, NULL); + CHECK(dataspace, FAIL, "H5Screate_simple"); + + /* Create a dataset creation property list */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + + /* Set the chunk information */ + ret = H5Pset_chunk(dcpl, rank, chunk_dims); + CHECK(dcpl, FAIL, "H5Pset_chunk"); + + /* Set the fill-value information */ + ret = H5Pset_fill_value(dcpl, H5T_NATIVE_INT, &fill); + CHECK(dcpl, FAIL, "H5Pset_fill_value"); + + /* Create the dataset */ + dataset = H5Dcreate2(file, MISC3_DSET_NAME, H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Read from the dataset (should be fill-values) */ + ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &read_buf); + CHECK(ret, FAIL, "H5Dread"); + + for (i = 0; i < MISC3_DIM1; i++) + for (j = 0; j < MISC3_DIM2; j++) + VERIFY(read_buf[i][j], fill, "H5Dread"); + + /* Release resources */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + + ret = H5Sclose(dataspace); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_misc3() */ + +/**************************************************************** +** +** test_misc4(): Test the that 'fileno' field in H5O_info_t is +** valid. +** +****************************************************************/ +static void +test_misc4(void) +{ + hid_t file1, file2, group1, group2, group3; + H5O_info2_t oinfo1, oinfo2, oinfo3; + herr_t ret; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing fileno working in H5O_info2_t\n")); + + file1 = H5Fcreate(MISC4_FILE_1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file1, FAIL, "H5Fcreate"); + + /* Create the first group */ + group1 = H5Gcreate2(file1, MISC4_GROUP_1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(group1, FAIL, "H5Gcreate2"); + + /* Create the second group */ + group2 = H5Gcreate2(file1, MISC4_GROUP_2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(group2, FAIL, "H5Gcreate2"); + + file2 = H5Fcreate(MISC4_FILE_2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file2, FAIL, "H5Fcreate"); + + /* Create the first group */ + group3 = H5Gcreate2(file2, MISC4_GROUP_1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(group3, FAIL, "H5Gcreate2"); + + /* Get the stat information for each group */ + ret = H5Oget_info_by_name3(file1, MISC4_GROUP_1, &oinfo1, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + ret = H5Oget_info_by_name3(file1, MISC4_GROUP_2, &oinfo2, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + ret = H5Oget_info_by_name3(file2, MISC4_GROUP_1, &oinfo3, H5O_INFO_BASIC, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name3"); + + /* Verify that the fileno values are the same for groups from file1 */ + VERIFY(oinfo1.fileno, oinfo2.fileno, "H5Oget_info_by_name"); + + /* Verify that the fileno values are not the same between file1 & file2 */ + if (oinfo1.fileno == oinfo3.fileno) + TestErrPrintf("Error on line %d: oinfo1.fileno != oinfo3.fileno\n", __LINE__); + if (oinfo2.fileno == oinfo3.fileno) + TestErrPrintf("Error on line %d: oinfo2.fileno != oinfo3.fileno\n", __LINE__); + + /* Close the objects */ + ret = H5Gclose(group1); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Gclose(group2); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Gclose(group3); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Fclose(file1); + CHECK(ret, FAIL, "H5Fclose"); + + ret = H5Fclose(file2); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_misc4() */ + +/**************************************************************** +** +** test_misc5(): Test several level deep nested compound & VL datatypes +** +****************************************************************/ + +/*********************** struct3 ***********************/ + +static misc5_struct3_hndl * +create_struct3(void) +{ + misc5_struct3_hndl *str3hndl; /* New 'struct3' created */ + herr_t ret; /* For error checking */ + + str3hndl = (misc5_struct3_hndl *)HDmalloc(sizeof(misc5_struct3_hndl)); + CHECK_PTR(str3hndl, "malloc"); + + str3hndl->st3h_base = H5Tcreate(H5T_COMPOUND, sizeof(misc5_struct3)); + CHECK(str3hndl->st3h_base, FAIL, "H5Tcreate"); + + ret = H5Tinsert(str3hndl->st3h_base, "st3_el1", HOFFSET(misc5_struct3, st3_el1), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + str3hndl->st3h_id = H5Tvlen_create(str3hndl->st3h_base); + CHECK(str3hndl->st3h_id, FAIL, "H5Tvlen_create"); + + return str3hndl; +} + +static void +delete_struct3(misc5_struct3_hndl *str3hndl) +{ + herr_t ret; /* For error checking */ + + ret = H5Tclose(str3hndl->st3h_id); + CHECK(ret, FAIL, "H5Tclose"); + + ret = H5Tclose(str3hndl->st3h_base); + CHECK(ret, FAIL, "H5Tclose"); + + HDfree(str3hndl); +} + +static void +set_struct3(misc5_struct3 *buf) +{ + buf->st3_el1 = MISC5_DBGELVAL3; +} + +/*********************** struct2 ***********************/ + +static misc5_struct2_hndl * +create_struct2(void) +{ + misc5_struct2_hndl *str2hndl; /* New 'struct2' created */ + herr_t ret; /* For error checking */ + + str2hndl = (misc5_struct2_hndl *)HDmalloc(sizeof(misc5_struct2_hndl)); + CHECK_PTR(str2hndl, "HDmalloc"); + + str2hndl->st2h_base = H5Tcreate(H5T_COMPOUND, sizeof(misc5_struct2)); + CHECK(str2hndl->st2h_base, FAIL, "H5Tcreate"); + + ret = H5Tinsert(str2hndl->st2h_base, "st2_el1", HOFFSET(misc5_struct2, st2_el1), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + str2hndl->st2h_st3hndl = create_struct3(); + CHECK_PTR(str2hndl->st2h_st3hndl, "create_struct3"); + + ret = H5Tinsert(str2hndl->st2h_base, "st2_el2", HOFFSET(misc5_struct2, st2_el2), + str2hndl->st2h_st3hndl->st3h_id); + CHECK(ret, FAIL, "H5Tinsert"); + + str2hndl->st2h_id = H5Tvlen_create(str2hndl->st2h_base); + CHECK(str2hndl->st2h_id, FAIL, "H5Tvlen_create"); + + return str2hndl; +} + +static void +delete_struct2(misc5_struct2_hndl *str2hndl) +{ + herr_t ret; /* For error checking */ + + ret = H5Tclose(str2hndl->st2h_id); + CHECK(ret, FAIL, "H5Tclose"); + + delete_struct3(str2hndl->st2h_st3hndl); + + H5Tclose(str2hndl->st2h_base); + CHECK(ret, FAIL, "H5Tclose"); + + HDfree(str2hndl); +} + +static void +set_struct2(misc5_struct2 *buf) +{ + unsigned i; /* Local index variable */ + + buf->st2_el1 = MISC5_DBGELVAL2; + buf->st2_el2.len = MISC5_DBGNELM3; + + buf->st2_el2.p = HDmalloc((buf->st2_el2.len) * sizeof(misc5_struct3)); + CHECK_PTR(buf->st2_el2.p, "HDmalloc"); + + for (i = 0; i < (buf->st2_el2.len); i++) + set_struct3(&(((misc5_struct3 *)(buf->st2_el2.p))[i])); +} + +static void +clear_struct2(misc5_struct2 *buf) +{ + HDfree(buf->st2_el2.p); +} + +/*********************** struct1 ***********************/ + +static misc5_struct1_hndl * +create_struct1(void) +{ + misc5_struct1_hndl *str1hndl; /* New 'struct1' created */ + herr_t ret; /* For error checking */ + + str1hndl = (misc5_struct1_hndl *)HDmalloc(sizeof(misc5_struct1_hndl)); + CHECK_PTR(str1hndl, "HDmalloc"); + + str1hndl->st1h_base = H5Tcreate(H5T_COMPOUND, sizeof(misc5_struct1)); + CHECK(str1hndl->st1h_base, FAIL, "H5Tcreate"); + + ret = H5Tinsert(str1hndl->st1h_base, "st1_el1", HOFFSET(misc5_struct1, st1_el1), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + str1hndl->st1h_st2hndl = create_struct2(); + CHECK_PTR(str1hndl->st1h_st2hndl, "create_struct2"); + + ret = H5Tinsert(str1hndl->st1h_base, "st1_el2", HOFFSET(misc5_struct1, st1_el2), + str1hndl->st1h_st2hndl->st2h_id); + CHECK(ret, FAIL, "H5Tinsert"); + + str1hndl->st1h_id = H5Tvlen_create(str1hndl->st1h_base); + CHECK(str1hndl->st1h_id, FAIL, "H5Tvlen_create"); + + return str1hndl; +} + +static void +delete_struct1(misc5_struct1_hndl *str1hndl) +{ + herr_t ret; /* For error checking */ + + ret = H5Tclose(str1hndl->st1h_id); + CHECK(ret, FAIL, "H5Tclose"); + + delete_struct2(str1hndl->st1h_st2hndl); + + ret = H5Tclose(str1hndl->st1h_base); + CHECK(ret, FAIL, "H5Tclose"); + + HDfree(str1hndl); +} + +static void +set_struct1(misc5_struct1 *buf) +{ + unsigned i; /* Local index variable */ + + buf->st1_el1 = MISC5_DBGELVAL1; + buf->st1_el2.len = MISC5_DBGNELM2; + + buf->st1_el2.p = HDmalloc((buf->st1_el2.len) * sizeof(misc5_struct2)); + CHECK_PTR(buf->st1_el2.p, "HDmalloc"); + + for (i = 0; i < (buf->st1_el2.len); i++) + set_struct2(&(((misc5_struct2 *)(buf->st1_el2.p))[i])); +} + +static void +clear_struct1(misc5_struct1 *buf) +{ + unsigned i; + + for (i = 0; i < buf->st1_el2.len; i++) + clear_struct2(&(((misc5_struct2 *)(buf->st1_el2.p))[i])); + HDfree(buf->st1_el2.p); +} + +static void +test_misc5(void) +{ + hid_t loc_id, space_id, dataset_id; + hid_t mem_type_id; + misc5_struct1_hndl *str1hndl; + hsize_t dims[MISC5_DSETRANK]; + hvl_t buf; + unsigned i, j, k; + herr_t ret; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing several level deep nested compound & VL datatypes \n")); + + /* Write the dataset out */ + loc_id = H5Fcreate(MISC5_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(loc_id, FAIL, "H5Fcreate"); + + /* Create the memory structure to write */ + str1hndl = create_struct1(); + CHECK_PTR(str1hndl, "create_struct1"); + + /* Create the dataspace */ + dims[0] = MISC5_NELMTOPLVL; + space_id = H5Screate_simple(MISC5_DSETRANK, dims, NULL); + CHECK(space_id, FAIL, "H5Screate_simple"); + + /* Create the dataset */ + dataset_id = H5Dcreate2(loc_id, MISC5_DSETNAME, str1hndl->st1h_id, space_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT); + CHECK(dataset_id, FAIL, "H5Dcreate2"); + + /* Create the variable-length buffer */ + buf.len = MISC5_DBGNELM1; + buf.p = HDmalloc((buf.len) * sizeof(misc5_struct1)); + CHECK_PTR(buf.p, "HDmalloc"); + + /* Create the top-level VL information */ + for (i = 0; i < MISC5_DBGNELM1; i++) + set_struct1(&(((misc5_struct1 *)(buf.p))[i])); + + /* Write the data out */ + ret = H5Dwrite(dataset_id, str1hndl->st1h_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, &buf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Release the top-level VL information */ + for (j = 0; j < MISC5_DBGNELM1; j++) + clear_struct1(&(((misc5_struct1 *)(buf.p))[j])); + + /* Free the variable-length buffer */ + HDfree(buf.p); + + /* Close dataset */ + ret = H5Dclose(dataset_id); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close dataspace */ + ret = H5Sclose(space_id); + CHECK(ret, FAIL, "H5Sclose"); + + /* Delete memory structures */ + delete_struct1(str1hndl); + + /* Close file */ + ret = H5Fclose(loc_id); + CHECK(ret, FAIL, "H5Fclose"); + + /* Read the dataset back in & verify it */ + loc_id = H5Fopen(MISC5_FILE, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(loc_id, FAIL, "H5Fopen"); + + /* Open dataset again */ + dataset_id = H5Dopen2(loc_id, MISC5_DSETNAME, H5P_DEFAULT); + CHECK(dataset_id, FAIL, "H5Dopen2"); + + /* Get the dataset's datatype */ + mem_type_id = H5Dget_type(dataset_id); + CHECK(mem_type_id, FAIL, "H5Dget_type"); + + /* Get the dataset's dataspace */ + space_id = H5Dget_space(dataset_id); + CHECK(space_id, FAIL, "H5Dget_space"); + + /* Read the data back in */ + ret = H5Dread(dataset_id, mem_type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, &buf); + CHECK(ret, FAIL, "H5Dread"); + + /* Verify the correct information was read in */ + for (i = 0; i < (buf.len); i++) { + /* HDprintf("[%d]=%d\n",i, ((misc5_struct1 *)(buf.p))[i].st1_el1); */ + VERIFY(((misc5_struct1 *)(buf.p))[i].st1_el1, MISC5_DBGELVAL1, "H5Dread"); + for (j = 0; j < (((misc5_struct1 *)(buf.p))[i].st1_el2.len); j++) { + /* HDprintf(" [%d]=%d\n",j, ((misc5_struct2 *)(((misc5_struct1 *) + * (buf.p))[i].st1_el2.p))[j].st2_el1); */ + VERIFY(((misc5_struct2 *)(((misc5_struct1 *)(buf.p))[i].st1_el2.p))[j].st2_el1, MISC5_DBGELVAL2, + "H5Dread"); + for (k = 0; k < (((misc5_struct2 *)(((misc5_struct1 *)(buf.p))[i].st1_el2.p))[j].st2_el2.len); + k++) { + /* HDprintf(" [%d]=%d\n",k, ((misc5_struct3 *)(((misc5_struct2 *) (((misc5_struct1 + * *)(buf.p))[i]. st1_el2.p))[j].st2_el2.p))[k].st3_el1); */ + VERIFY(((misc5_struct3 *)(((misc5_struct2 *)(((misc5_struct1 *)(buf.p))[i].st1_el2.p))[j] + .st2_el2.p))[k] + .st3_el1, + MISC5_DBGELVAL3, "H5Dread"); + } /* end for */ + } + } + + /* Reclaim the memory for the VL information */ + ret = H5Treclaim(mem_type_id, space_id, H5P_DEFAULT, &buf); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Close dataspace */ + ret = H5Sclose(space_id); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close dataset */ + ret = H5Tclose(mem_type_id); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close dataset */ + ret = H5Dclose(dataset_id); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(loc_id); + CHECK(ret, FAIL, "H5Fclose"); + +} /* end test_misc5() */ + +/**************************************************************** +** +** test_misc6(): Test that object header continuation messages are +** created correctly. +** +****************************************************************/ +static void +test_misc6(void) +{ + hid_t loc_id, space_id, dataset_id; + hid_t attr_id; + char attr_name[16]; + unsigned u; + herr_t ret; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing object header continuation code \n")); + + /* Create the file */ + loc_id = H5Fcreate(MISC6_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(loc_id, FAIL, "H5Fcreate"); + + /* Create the dataspace */ + space_id = H5Screate(H5S_SCALAR); + CHECK(space_id, FAIL, "H5Screate"); + + /* Create the first dataset */ + dataset_id = + H5Dcreate2(loc_id, MISC6_DSETNAME1, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset_id, FAIL, "H5Dcreate2"); + + /* Close dataset */ + ret = H5Dclose(dataset_id); + CHECK(ret, FAIL, "H5Dclose"); + + /* Create the second dataset */ + dataset_id = + H5Dcreate2(loc_id, MISC6_DSETNAME2, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset_id, FAIL, "H5Dcreate2"); + + /* Close dataset */ + ret = H5Dclose(dataset_id); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(loc_id); + CHECK(ret, FAIL, "H5Fclose"); + + /* Loop through adding attributes to each dataset */ + for (u = 0; u < MISC6_NUMATTR; u++) { + /* Create name for attribute */ + HDsnprintf(attr_name, sizeof(attr_name), "Attr#%u", u); + + /* Open the file */ + loc_id = H5Fopen(MISC6_FILE, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(loc_id, FAIL, "H5Fopen"); + + /* Open first dataset */ + dataset_id = H5Dopen2(loc_id, MISC6_DSETNAME1, H5P_DEFAULT); + CHECK(dataset_id, FAIL, "H5Dopen2"); + + /* Add attribute to dataset */ + attr_id = H5Acreate2(dataset_id, attr_name, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr_id, FAIL, "H5Acreate2"); + + /* Close attribute */ + ret = H5Aclose(attr_id); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close dataset */ + ret = H5Dclose(dataset_id); + CHECK(ret, FAIL, "H5Dclose"); + + /* Open second dataset */ + dataset_id = H5Dopen2(loc_id, MISC6_DSETNAME2, H5P_DEFAULT); + CHECK(dataset_id, FAIL, "H5Dopen2"); + + /* Add attribute to dataset */ + attr_id = H5Acreate2(dataset_id, attr_name, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr_id, FAIL, "H5Acreate2"); + + /* Close attribute */ + ret = H5Aclose(attr_id); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close dataset */ + ret = H5Dclose(dataset_id); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(loc_id); + CHECK(ret, FAIL, "H5Fclose"); + } /* end for */ + + /* Close dataspace */ + ret = H5Sclose(space_id); + CHECK(ret, FAIL, "H5Sclose"); + +} /* end test_misc6() */ + +/**************************************************************** +** +** test_misc7(): Test that datatypes are sensible to store on +** disk. (i.e. not partially initialized) +** +****************************************************************/ +#if 0 +static void +test_misc7(void) +{ + hid_t fid, did, tid, sid; + int enum_value = 1; + herr_t ret; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing sensible datatype on disk code \n")); + + /* Attempt to commit a non-sensible datatype */ + + /* Create the file */ + fid = H5Fcreate(MISC7_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create the dataspace */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create the compound datatype to commit*/ + tid = H5Tcreate(H5T_COMPOUND, (size_t)32); + CHECK(tid, FAIL, "H5Tcreate"); + + /* Attempt to commit an empty compound datatype */ + ret = H5Tcommit2(fid, MISC7_TYPENAME1, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VERIFY(ret, FAIL, "H5Tcommit2"); + + /* Attempt to use empty compound datatype to create dataset */ + did = H5Dcreate2(fid, MISC7_DSETNAME1, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VERIFY(ret, FAIL, "H5Dcreate2"); + + /* Add a field to the compound datatype */ + ret = H5Tinsert(tid, "a", (size_t)0, H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Attempt to commit the compound datatype now - should work */ + ret = H5Tcommit2(fid, MISC7_TYPENAME1, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + /* Attempt to use compound datatype to create dataset now - should work */ + did = H5Dcreate2(fid, MISC7_DSETNAME1, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + /* Close dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close compound datatype */ + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Create the enum datatype to commit*/ + tid = H5Tenum_create(H5T_NATIVE_INT); + CHECK(tid, FAIL, "H5Tenum_create"); + + /* Attempt to commit an empty enum datatype */ + ret = H5Tcommit2(fid, MISC7_TYPENAME2, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VERIFY(ret, FAIL, "H5Tcommit2"); + + /* Attempt to use empty enum datatype to create dataset */ + did = H5Dcreate2(fid, MISC7_DSETNAME2, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VERIFY(did, FAIL, "H5Dcreate2"); + + /* Add a member to the enum datatype */ + ret = H5Tenum_insert(tid, "a", &enum_value); + CHECK(ret, FAIL, "H5Tenum_insert"); + + /* Attempt to commit the enum datatype now - should work */ + ret = H5Tcommit2(fid, MISC7_TYPENAME2, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + /* Attempt to use enum datatype to create dataset now - should work */ + did = H5Dcreate2(fid, MISC7_DSETNAME2, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + /* Close dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close enum datatype */ + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + +} /* end test_misc7() */ +#endif + +/**************************************************************** +** +** test_misc8(): Test storage size of various types of dataset +** storage methods. +** +****************************************************************/ +#if 0 +static void +test_misc8(void) +{ + hid_t fid, did, sid; + hid_t fapl; /* File access property list */ + hid_t dcpl; /* Dataset creation property list */ + int rank = MISC8_RANK; + hsize_t dims[MISC8_RANK] = {MISC8_DIM0, MISC8_DIM1}; + hsize_t chunk_dims[MISC8_RANK] = {MISC8_CHUNK_DIM0, MISC8_CHUNK_DIM1}; + hsize_t storage_size; /* Number of bytes of raw data storage used */ + int *wdata; /* Data to write */ + int *tdata; /* Temporary pointer to data write */ +#ifdef VERIFY_DATA + int *rdata; /* Data to read */ + int *tdata2; /* Temporary pointer to data to read */ +#endif /* VERIFY_DATA */ + unsigned u, v; /* Local index variables */ + int mdc_nelmts; /* Metadata number of elements */ + size_t rdcc_nelmts; /* Raw data number of elements */ + size_t rdcc_nbytes; /* Raw data number of bytes */ + double rdcc_w0; /* Raw data write percentage */ + hsize_t start[MISC8_RANK]; /* Hyperslab start */ + hsize_t count[MISC8_RANK]; /* Hyperslab block count */ + herr_t ret; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing dataset storage sizes\n")); + + /* Allocate space for the data to write & read */ + wdata = (int *)HDmalloc(sizeof(int) * MISC8_DIM0 * MISC8_DIM1); + CHECK_PTR(wdata, "HDmalloc"); +#ifdef VERIFY_DATA + rdata = (int *)HDmalloc(sizeof(int) * MISC8_DIM0 * MISC8_DIM1); + CHECK_PTR(rdata, "HDmalloc"); +#endif /* VERIFY_DATA */ + + /* Initialize values */ + tdata = wdata; + for (u = 0; u < MISC8_DIM0; u++) + for (v = 0; v < MISC8_DIM1; v++) + *tdata++ = (int)(((u * MISC8_DIM1) + v) % 13); + + /* Create a file access property list */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + + /* Get the default file access properties for caching */ + ret = H5Pget_cache(fapl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0); + CHECK(ret, FAIL, "H5Pget_cache"); + + /* Decrease the size of the raw data cache */ + rdcc_nbytes = 0; + + /* Set the file access properties for caching */ + ret = H5Pset_cache(fapl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0); + CHECK(ret, FAIL, "H5Pset_cache"); + + /* Create the file */ + fid = H5Fcreate(MISC8_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Close file access property list */ + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Create a simple dataspace */ + sid = H5Screate_simple(rank, dims, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Select a hyperslab which coincides with chunk boundaries */ + /* (For later use) */ + start[0] = 1; + start[1] = 1; + count[0] = (MISC8_CHUNK_DIM0 * 2) - 1; + count[1] = (MISC8_CHUNK_DIM1 * 2) - 1; + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create a dataset creation property list */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + + /* I. contiguous dataset tests */ + + ret = H5Pset_layout(dcpl, H5D_CONTIGUOUS); + CHECK(ret, FAIL, "H5Pset_layout"); + + /* Set the space allocation time to early */ + ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY); + CHECK(ret, FAIL, "H5Pset_alloc_time"); + + /* Create a contiguous dataset, with space allocation early */ + did = H5Dcreate2(fid, MISC8_DSETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + /* Check the storage size */ + storage_size = H5Dget_storage_size(did); + CHECK(storage_size, 0, "H5Dget_storage_size"); + VERIFY(storage_size, (hsize_t)(MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)), + "H5Dget_storage_size"); + + /* Close dataset ID */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + +#ifndef H5_HAVE_PARALLEL + /* Set the space allocation time to late */ + ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE); + CHECK(ret, FAIL, "H5Pset_alloc_time"); + + /* Create a contiguous dataset, with space allocation late */ + did = H5Dcreate2(fid, MISC8_DSETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + /* Check the storage size before data is written */ + storage_size = H5Dget_storage_size(did); + VERIFY(storage_size, 0, "H5Dget_storage_size"); + + /* Write data */ + ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Check the storage size after data is written */ + storage_size = H5Dget_storage_size(did); + CHECK(storage_size, 0, "H5Dget_storage_size"); + VERIFY(storage_size, (hsize_t)(MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)), + "H5Dget_storage_size"); + + /* Close dataset ID */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Set the space allocation time to incremental */ + ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_INCR); + CHECK(ret, FAIL, "H5Pset_alloc_time"); + + /* Create a contiguous dataset, with space allocation late */ + did = H5Dcreate2(fid, MISC8_DSETNAME3, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + /* Check the storage size before data is written */ + storage_size = H5Dget_storage_size(did); + VERIFY(storage_size, 0, "H5Dget_storage_size"); + + /* Write data */ + ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Check the storage size after data is written */ + storage_size = H5Dget_storage_size(did); + CHECK(storage_size, 0, "H5Dget_storage_size"); + VERIFY(storage_size, (hsize_t)(MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)), + "H5Dget_storage_size"); + + /* Close dataset ID */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); +#endif /* H5_HAVE_PARALLEL */ + + /* II. compact dataset tests */ + ret = H5Pset_layout(dcpl, H5D_COMPACT); + CHECK(ret, FAIL, "H5Pset_layout"); + + /* Set the space allocation time to late */ + ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE); + CHECK(ret, FAIL, "H5Pset_alloc_time"); + + /* Create a contiguous dataset, with space allocation late */ + /* Should fail */ + H5E_BEGIN_TRY + { + did = H5Dcreate2(fid, MISC8_DSETNAME4, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(did, FAIL, "H5Dcreate2"); + + /* Set the space allocation time to incremental */ + ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_INCR); + CHECK(ret, FAIL, "H5Pset_alloc_time"); + + /* Create a contiguous dataset, with space allocation incremental */ + /* Should fail */ + H5E_BEGIN_TRY + { + did = H5Dcreate2(fid, MISC8_DSETNAME4, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(did, FAIL, "H5Dcreate2"); + + /* Set the space allocation time to early */ + ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY); + CHECK(ret, FAIL, "H5Pset_alloc_time"); + + /* Set the fill time to allocation */ + ret = H5Pset_fill_time(dcpl, H5D_FILL_TIME_ALLOC); + CHECK(ret, FAIL, "H5Pset_alloc_time"); + + /* Create a contiguous dataset, with space allocation early */ + did = H5Dcreate2(fid, MISC8_DSETNAME4, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + /* Check the storage size */ + storage_size = H5Dget_storage_size(did); + CHECK(storage_size, 0, "H5Dget_storage_size"); + VERIFY(storage_size, (hsize_t)(MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)), + "H5Dget_storage_size"); + + /* Close dataset ID */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* III. chunked dataset tests */ + + ret = H5Pset_layout(dcpl, H5D_CHUNKED); + CHECK(ret, FAIL, "H5Pset_layout"); + + /* Set the space allocation time to early */ + ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY); + CHECK(ret, FAIL, "H5Pset_alloc_time"); + + /* Use chunked storage for this dataset */ + ret = H5Pset_chunk(dcpl, rank, chunk_dims); + CHECK(ret, FAIL, "H5Pset_chunk"); + + /* Create a chunked dataset, with space allocation early */ + did = H5Dcreate2(fid, MISC8_DSETNAME5, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + /* Check the storage size after data is written */ + storage_size = H5Dget_storage_size(did); + CHECK(storage_size, 0, "H5Dget_storage_size"); + VERIFY(storage_size, (hsize_t)(MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)), + "H5Dget_storage_size"); + + /* Close dataset ID */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + +#ifndef H5_HAVE_PARALLEL + /* Set the space allocation time to late */ + ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE); + CHECK(ret, FAIL, "H5Pset_alloc_time"); + + /* Use chunked storage for this dataset */ + ret = H5Pset_chunk(dcpl, rank, chunk_dims); + CHECK(ret, FAIL, "H5Pset_chunk"); + + /* Create a chunked dataset, with space allocation late */ + did = H5Dcreate2(fid, MISC8_DSETNAME6, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + /* Check the storage size after dataset is created */ + storage_size = H5Dget_storage_size(did); + VERIFY(storage_size, 0, "H5Dget_storage_size"); + + /* Write part of the dataset */ + ret = H5Dwrite(did, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Check the storage size after data is written */ + storage_size = H5Dget_storage_size(did); + CHECK(storage_size, 0, "H5Dget_storage_size"); + VERIFY(storage_size, (hsize_t)(MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)), + "H5Dget_storage_size"); + + /* Close dataset ID */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Set the space allocation time to incremental */ + ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_INCR); + CHECK(ret, FAIL, "H5Pset_alloc_time"); + + /* Create a chunked dataset, with space allocation incremental */ + did = H5Dcreate2(fid, MISC8_DSETNAME7, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + /* Check the storage size before data is written */ + storage_size = H5Dget_storage_size(did); + VERIFY(storage_size, 0, "H5Dget_storage_size"); + + /* Write part of the dataset */ + ret = H5Dwrite(did, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Check the storage size after only four chunks are written */ + storage_size = H5Dget_storage_size(did); + VERIFY(storage_size, (hsize_t)(4 * MISC8_CHUNK_DIM0 * MISC8_CHUNK_DIM1 * H5Tget_size(H5T_NATIVE_INT)), + "H5Dget_storage_size"); + + /* Write entire dataset */ + ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + +#ifdef VERIFY_DATA + /* Read data */ + ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Check values written */ + tdata = wdata; + tdata2 = rdata; + for (u = 0; u < MISC8_DIM0; u++) + for (v = 0; v < MISC8_DIM1; v++, tdata++, tdata2++) + if (*tdata != *tdata2) + TestErrPrintf("Error on line %d: u=%u, v=%d, *tdata=%d, *tdata2=%d\n", __LINE__, (unsigned)u, + (unsigned)v, (int)*tdata, (int)*tdata2); +#endif /* VERIFY_DATA */ + + /* Check the storage size after data is written */ + storage_size = H5Dget_storage_size(did); + CHECK(storage_size, 0, "H5Dget_storage_size"); + VERIFY(storage_size, (hsize_t)(MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)), + "H5Dget_storage_size"); + + /* Close dataset ID */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); +#endif /* H5_HAVE_PARALLEL */ + + /* Set the space allocation time to early */ + ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY); + CHECK(ret, FAIL, "H5Pset_alloc_time"); + + /* Use compression as well as chunking for these datasets */ +#ifdef H5_HAVE_FILTER_DEFLATE + ret = H5Pset_deflate(dcpl, 9); + CHECK(ret, FAIL, "H5Pset_deflate"); +#endif /* end H5_HAVE_FILTER_DEFLATE */ + + /* Create a chunked dataset, with space allocation early */ + did = H5Dcreate2(fid, MISC8_DSETNAME8, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + /* Write part of the dataset */ + ret = H5Dwrite(did, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Check the storage size after data is written */ + storage_size = H5Dget_storage_size(did); + CHECK(storage_size, 0, "H5Dget_storage_size"); +#ifdef H5_HAVE_FILTER_DEFLATE + if (storage_size >= (MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT))) + TestErrPrintf("Error on line %d: data wasn't compressed! storage_size=%u\n", __LINE__, + (unsigned)storage_size); +#else /* Compression is not configured */ + if (storage_size != (MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT))) + TestErrPrintf("Error on line %d: wrong storage size! storage_size=%u\n", __LINE__, + (unsigned)storage_size); +#endif /* H5_HAVE_FILTER_DEFLATE */ + + /* Close dataset ID */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + +#ifndef H5_HAVE_PARALLEL + /* Set the space allocation time to late */ + ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE); + CHECK(ret, FAIL, "H5Pset_alloc_time"); + + /* Create a chunked dataset, with space allocation late */ + did = H5Dcreate2(fid, MISC8_DSETNAME9, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + /* Check the storage size before data is written */ + storage_size = H5Dget_storage_size(did); + VERIFY(storage_size, 0, "H5Dget_storage_size"); + + /* Write part of the dataset */ + ret = H5Dwrite(did, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Check the storage size after only four chunks are written */ + storage_size = H5Dget_storage_size(did); + CHECK(storage_size, 0, "H5Dget_storage_size"); +#ifdef H5_HAVE_FILTER_DEFLATE + if (storage_size >= (MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT))) + TestErrPrintf("Error on line %d: data wasn't compressed! storage_size=%u\n", __LINE__, + (unsigned)storage_size); +#else /* Compression is not configured */ + if (storage_size != (MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT))) + TestErrPrintf("Error on line %d: wrong storage size! storage_size=%u\n", __LINE__, + (unsigned)storage_size); +#endif /* H5_HAVE_FILTER_DEFLATE */ + + /* Write entire dataset */ + ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + +#ifdef VERIFY_DATA + /* Read data */ + ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Check values written */ + tdata = wdata; + tdata2 = rdata; + for (u = 0; u < MISC8_DIM0; u++) + for (v = 0; v < MISC8_DIM1; v++, tdata++, tdata2++) + if (*tdata != *tdata2) + TestErrPrintf("Error on line %d: u=%u, v=%d, *tdata=%d, *tdata2=%d\n", __LINE__, (unsigned)u, + (unsigned)v, (int)*tdata, (int)*tdata2); +#endif /* VERIFY_DATA */ + + /* Check the storage size after data is written */ + storage_size = H5Dget_storage_size(did); + CHECK(storage_size, 0, "H5Dget_storage_size"); +#ifdef H5_HAVE_FILTER_DEFLATE + if (storage_size >= (MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT))) + TestErrPrintf("Error on line %d: data wasn't compressed! storage_size=%u\n", __LINE__, + (unsigned)storage_size); +#else + if (storage_size != (MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT))) + TestErrPrintf("Error on line %d: wrong storage size! storage_size=%u\n", __LINE__, + (unsigned)storage_size); +#endif /*H5_HAVE_FILTER_DEFLATE*/ + + /* Close dataset ID */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Set the space allocation time to incremental */ + ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_INCR); + CHECK(ret, FAIL, "H5Pset_alloc_time"); + + /* Create a chunked dataset, with space allocation incremental */ + did = H5Dcreate2(fid, MISC8_DSETNAME10, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + /* Check the storage size before data is written */ + storage_size = H5Dget_storage_size(did); + VERIFY(storage_size, 0, "H5Dget_storage_size"); + + /* Write part of the dataset */ + ret = H5Dwrite(did, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Check the storage size after only four chunks are written */ + storage_size = H5Dget_storage_size(did); + CHECK(storage_size, 0, "H5Dget_storage_size"); +#ifdef H5_HAVE_FILTER_DEFLATE + if (storage_size >= (4 * MISC8_CHUNK_DIM0 * MISC8_CHUNK_DIM1 * H5Tget_size(H5T_NATIVE_INT))) + TestErrPrintf("Error on line %d: data wasn't compressed! storage_size=%u\n", __LINE__, + (unsigned)storage_size); +#else /* Compression is not configured */ + if (storage_size != (4 * MISC8_CHUNK_DIM0 * MISC8_CHUNK_DIM1 * H5Tget_size(H5T_NATIVE_INT))) + TestErrPrintf("Error on line %d: wrong storage size! storage_size=%u\n", __LINE__, + (unsigned)storage_size); +#endif /* H5_HAVE_FILTER_DEFLATE */ + + /* Write entire dataset */ + ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + +#ifdef VERIFY_DATA + /* Read data */ + ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Check values written */ + tdata = wdata; + tdata2 = rdata; + for (u = 0; u < MISC8_DIM0; u++) + for (v = 0; v < MISC8_DIM1; v++, tdata++, tdata2++) + if (*tdata != *tdata2) + TestErrPrintf("Error on line %d: u=%u, v=%d, *tdata=%d, *tdata2=%d\n", __LINE__, (unsigned)u, + (unsigned)v, (int)*tdata, (int)*tdata2); +#endif /* VERIFY_DATA */ + + /* Check the storage size after data is written */ + storage_size = H5Dget_storage_size(did); + CHECK(storage_size, 0, "H5Dget_storage_size"); +#ifdef H5_HAVE_FILTER_DEFLATE + if (storage_size >= (MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT))) + TestErrPrintf("Error on line %d: data wasn't compressed! storage_size=%u\n", __LINE__, + (unsigned)storage_size); +#else + if (storage_size != (MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT))) + TestErrPrintf("Error on line %d: wrong storage size! storage_size=%u\n", __LINE__, + (unsigned)storage_size); +#endif /*H5_HAVE_FILTER_DEFLATE*/ + + /* Close dataset ID */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); +#endif /* H5_HAVE_PARALLEL */ + + /* Close dataset creation property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free the read & write buffers */ + HDfree(wdata); +#ifdef VERIFY_DATA + HDfree(rdata); +#endif /* VERIFY_DATA */ +} /* end test_misc8() */ +#endif + +/**************************************************************** +** +** test_misc9(): Test that H5Fopen() does not succeed for core +** files, H5Fcreate() must be used to open them. +** +****************************************************************/ +static void +test_misc9(void) +{ + hid_t fapl, fid; + herr_t ret; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing core file opening\n")); + + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + + ret = H5Pset_fapl_core(fapl, (size_t)1024, 0); + CHECK(ret, FAIL, "H5Pset_fapl_core"); + + H5E_BEGIN_TRY + { + fid = H5Fopen(MISC9_FILE, H5F_ACC_RDWR, fapl); + } + H5E_END_TRY; + VERIFY(fid, FAIL, "H5Fopen"); + + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pset_fapl_core"); +} /* end test_misc9() */ + +/**************************************************************** +** +** test_misc10(): Test opening a dataset created with an older +** version of the library (shares the tmtimeo.h5 file with the mtime.c +** test - see notes in gen_old_mtime.c for notes on generating this +** data file) and using the dataset creation property list from +** that dataset to create a dataset with the current version of +** the library. Also tests using file creation property in same way. +** +****************************************************************/ +#if 0 +static void +test_misc10(void) +{ + hid_t file, file_new; /* File IDs for old & new files */ + hid_t fcpl; /* File creation property list */ + hid_t dataset, dataset_new; /* Dataset IDs for old & new datasets */ + hid_t dcpl; /* Dataset creation property list */ + hid_t space, type; /* Old dataset's dataspace & datatype */ + const char *testfile = H5_get_srcdir_filename(MISC10_FILE_OLD); /* Corrected test file name */ + hbool_t driver_is_default_compatible; + herr_t ret; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing using old dataset creation property list\n")); + + ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); + CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); + + if (!driver_is_default_compatible) { + HDprintf("-- SKIPPED --\n"); + return; + } + + /* + * Open the old file and the dataset and get old settings. + */ + file = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(file, FAIL, "H5Fopen"); + fcpl = H5Fget_create_plist(file); + CHECK(fcpl, FAIL, "H5Fget_create_plist"); + + dataset = H5Dopen2(file, MISC10_DSETNAME, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + dcpl = H5Dget_create_plist(dataset); + CHECK(dcpl, FAIL, "H5Dget_create_plist"); + space = H5Dget_space(dataset); + CHECK(space, FAIL, "H5Dget_space"); + type = H5Dget_type(dataset); + CHECK(type, FAIL, "H5Dget_type"); + + /* Create new file & dataset */ + file_new = H5Fcreate(MISC10_FILE_NEW, H5F_ACC_TRUNC, fcpl, H5P_DEFAULT); + CHECK(file_new, FAIL, "H5Fcreate"); + + dataset_new = H5Dcreate2(file_new, MISC10_DSETNAME, type, space, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dataset_new, FAIL, "H5Dcreate2"); + + /* Close new dataset & file */ + ret = H5Dclose(dataset_new); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Fclose(file_new); + CHECK(ret, FAIL, "H5Fclose"); + + /* Close old dataset information */ + ret = H5Tclose(type); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Sclose(space); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close old file information */ + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); +} /* end test_misc10() */ +#endif + +/**************************************************************** +** +** test_misc11(): Test that all properties in a file creation property +** list are stored correctly in the file and can be retrieved +** when the file is re-opened. +** +****************************************************************/ +static void +test_misc11(void) +{ + hid_t file; /* File IDs for old & new files */ + hid_t fcpl; /* File creation property list */ + hsize_t userblock; /* Userblock size retrieved from FCPL */ + size_t off_size; /* Size of offsets in the file */ + size_t len_size; /* Size of lengths in the file */ + unsigned sym_ik; /* Symbol table B-tree initial 'K' value */ + unsigned istore_ik; /* Indexed storage B-tree initial 'K' value */ + unsigned sym_lk; /* Symbol table B-tree leaf 'K' value */ + unsigned nindexes; /* Shared message number of indexes */ +#if 0 + H5F_info2_t finfo; /* global information about file */ +#endif + H5F_fspace_strategy_t strategy; /* File space strategy */ + hsize_t threshold; /* Free-space section threshold */ + hbool_t persist; /* To persist free-space or not */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing file creation properties retrieved correctly\n")); + + /* Creating a file with the default file creation property list should + * create a version 0 superblock + */ + + /* Create file with default file creation property list */ + file = H5Fcreate(MISC11_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file, FAIL, "H5Fcreate"); +#if 0 + /* Get the file's version information */ + ret = H5Fget_info2(file, &finfo); + CHECK(ret, FAIL, "H5Fget_info2"); + VERIFY(finfo.super.version, 0, "H5Fget_info2"); + VERIFY(finfo.free.version, 0, "H5Fget_info2"); + VERIFY(finfo.sohm.version, 0, "H5Fget_info2"); +#endif + /* Close file */ + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + + /* Create a file creation property list */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); + + /* Set all the properties in the FCPL */ + ret = H5Pset_userblock(fcpl, (hsize_t)MISC11_USERBLOCK); + CHECK(ret, FAIL, "H5Pset_userblock"); + + ret = H5Pset_sizes(fcpl, (size_t)MISC11_SIZEOF_OFF, (size_t)MISC11_SIZEOF_LEN); + CHECK(ret, FAIL, "H5Pset_sizes"); + + /* This should fail as (32770*2) will exceed ^16 - 2 bytes for storing btree entries */ + H5E_BEGIN_TRY + { + ret = H5Pset_sym_k(fcpl, 32770, 0); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Pset_sym_k"); + + ret = H5Pset_sym_k(fcpl, MISC11_SYM_IK, MISC11_SYM_LK); + CHECK(ret, FAIL, "H5Pset_sym_k"); + + /* This should fail as (32770*2) will exceed ^16 - 2 bytes for storing btree entries */ + H5E_BEGIN_TRY + { + ret = H5Pset_istore_k(fcpl, 32770); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Pset_istore_k"); + + ret = H5Pset_istore_k(fcpl, MISC11_ISTORE_IK); + CHECK(ret, FAIL, "H5Pset_istore_k"); + + ret = H5Pset_shared_mesg_nindexes(fcpl, MISC11_NINDEXES); + CHECK(ret, FAIL, "H5Pset_shared_mesg"); + + ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_NONE, FALSE, (hsize_t)1); + CHECK(ret, FAIL, "H5Pset_file_space"); + + /* Creating a file with the non-default file creation property list should + * create a version 2 superblock + */ + + /* Create file with custom file creation property list */ + file = H5Fcreate(MISC11_FILE, H5F_ACC_TRUNC, fcpl, H5P_DEFAULT); + CHECK(file, FAIL, "H5Fcreate"); + + /* Close FCPL */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); +#if 0 + /* Get the file's version information */ + ret = H5Fget_info2(file, &finfo); + CHECK(ret, FAIL, "H5Fget_info2"); + VERIFY(finfo.super.version, 2, "H5Fget_info2"); + VERIFY(finfo.free.version, 0, "H5Fget_info2"); + VERIFY(finfo.sohm.version, 0, "H5Fget_info2"); +#endif + /* Close file */ + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open the file */ + file = H5Fopen(MISC11_FILE, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(file, FAIL, "H5Fcreate"); + + /* Get the file's creation property list */ + fcpl = H5Fget_create_plist(file); + CHECK(fcpl, FAIL, "H5Fget_create_plist"); +#if 0 + /* Get the file's version information */ + ret = H5Fget_info2(file, &finfo); + CHECK(ret, FAIL, "H5Fget_info2"); + VERIFY(finfo.super.version, 2, "H5Fget_info2"); + VERIFY(finfo.free.version, 0, "H5Fget_info2"); + VERIFY(finfo.sohm.version, 0, "H5Fget_info2"); +#endif + /* Retrieve all the property values & check them */ + ret = H5Pget_userblock(fcpl, &userblock); + CHECK(ret, FAIL, "H5Pget_userblock"); + VERIFY(userblock, MISC11_USERBLOCK, "H5Pget_userblock"); + + ret = H5Pget_sizes(fcpl, &off_size, &len_size); + CHECK(ret, FAIL, "H5Pget_sizes"); + VERIFY(off_size, MISC11_SIZEOF_OFF, "H5Pget_sizes"); + VERIFY(len_size, MISC11_SIZEOF_LEN, "H5Pget_sizes"); + + ret = H5Pget_sym_k(fcpl, &sym_ik, &sym_lk); + CHECK(ret, FAIL, "H5Pget_sym_k"); + VERIFY(sym_ik, MISC11_SYM_IK, "H5Pget_sym_k"); + VERIFY(sym_lk, MISC11_SYM_LK, "H5Pget_sym_k"); + + ret = H5Pget_istore_k(fcpl, &istore_ik); + CHECK(ret, FAIL, "H5Pget_istore_k"); + VERIFY(istore_ik, MISC11_ISTORE_IK, "H5Pget_istore_k"); + + ret = H5Pget_shared_mesg_nindexes(fcpl, &nindexes); + CHECK(ret, FAIL, "H5Pget_shared_mesg_nindexes"); + VERIFY(nindexes, MISC11_NINDEXES, "H5Pget_shared_mesg_nindexes"); + + ret = H5Pget_file_space_strategy(fcpl, &strategy, &persist, &threshold); + CHECK(ret, FAIL, "H5Pget_file_space_strategy"); + VERIFY(strategy, 3, "H5Pget_file_space_strategy"); + VERIFY(persist, FALSE, "H5Pget_file_space_strategy"); + VERIFY(threshold, 1, "H5Pget_file_space_strategy"); + + /* Close file */ + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + + /* Close FCPL */ + ret = H5Pclose(fcpl); + CHECK(ret, FAIL, "H5Pclose"); +} /* end test_misc11() */ + +/**************************************************************** +** +** test_misc12(): Test that VL-types operate correctly in chunked +** datasets that are extended. +** +****************************************************************/ +static void +test_misc12(void) +{ + const char *wdata[MISC12_SPACE1_DIM1] = { + "Four score and seven years ago our forefathers brought forth on this continent a new nation,", + "conceived in liberty and dedicated to the proposition that all men are created equal.", + "Now we are engaged in a great civil war,", + "testing whether that nation or any nation so conceived and so dedicated can long endure."}; + const char *wdata1[MISC12_APPEND_SIZE] = { + "O Gloria inmarcesible! O Jubilo inmortal! En surcos de dolores, el", + "bien germina ya! Ceso la horrible noche, La libertad sublime", + "derrama las auroras de su invencible luz.", "La humanidad entera, que entre cadenas gime, comprende", + "las palabras del que murio en la cruz."}; + char *rdata[MISC12_SPACE1_DIM1 + MISC12_APPEND_SIZE]; /* Information read in */ + hid_t fid1; + hid_t dataset; + hid_t sid1, space, memspace; + hid_t tid1, cparms; + hsize_t dims1[] = {MISC12_SPACE1_DIM1}; + hsize_t dimsn[] = {MISC12_APPEND_SIZE}; + hsize_t maxdims1[1] = {H5S_UNLIMITED}; + hsize_t chkdims1[1] = {MISC12_CHUNK_SIZE}; + hsize_t newsize[1] = {MISC12_SPACE1_DIM1 + MISC12_APPEND_SIZE}; + hsize_t offset[1] = {MISC12_SPACE1_DIM1}; + hsize_t count[1] = {MISC12_APPEND_SIZE}; + int i; /* counting variable */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing VL-type in chunked dataset\n")); + + /* This test requirese a relatively "fresh" library environment */ + ret = H5garbage_collect(); + CHECK(ret, FAIL, "H5garbage_collect"); + + /* Create file */ + fid1 = H5Fcreate(MISC12_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid1 = H5Screate_simple(MISC12_SPACE1_RANK, dims1, maxdims1); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create a datatype to refer to */ + tid1 = H5Tcopy(H5T_C_S1); + CHECK(tid1, FAIL, "H5Tcopy"); + + ret = H5Tset_size(tid1, H5T_VARIABLE); + CHECK(ret, FAIL, "H5Tset_size"); + + cparms = H5Pcreate(H5P_DATASET_CREATE); + CHECK(cparms, FAIL, "H5Pcreate"); + + ret = H5Pset_chunk(cparms, 1, chkdims1); + CHECK(ret, FAIL, "H5Pset_chunk"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, MISC12_DSET_NAME, tid1, sid1, H5P_DEFAULT, cparms, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write dataset to disk */ + ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Extend dataset */ + ret = H5Dset_extent(dataset, newsize); + CHECK(ret, FAIL, "H5Dset_extent"); + + memspace = H5Screate_simple(MISC12_SPACE1_RANK, dimsn, NULL); + CHECK(memspace, FAIL, "H5Screate_simple"); + + space = H5Dget_space(dataset); + CHECK(space, FAIL, "H5Dget_space"); + + ret = H5Sselect_hyperslab(space, H5S_SELECT_SET, offset, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Write data to new portion of dataset */ + ret = H5Dwrite(dataset, tid1, memspace, space, H5P_DEFAULT, wdata1); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read all data back */ + ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + for (i = 0; i < MISC12_SPACE1_DIM1; i++) + if (HDstrcmp(wdata[i], rdata[i]) != 0) + TestErrPrintf("Error on line %d: wdata[%d]=%s, rdata[%d]=%s\n", __LINE__, i, wdata[i], i, + rdata[i]); + for (; i < (MISC12_SPACE1_DIM1 + MISC12_APPEND_SIZE); i++) + if (HDstrcmp(wdata1[i - MISC12_SPACE1_DIM1], rdata[i]) != 0) + TestErrPrintf("Error on line %d: wdata1[%d]=%s, rdata[%d]=%s\n", __LINE__, i - MISC12_SPACE1_DIM1, + wdata1[i - MISC12_SPACE1_DIM1], i, rdata[i]); + + ret = H5Sselect_all(space); + CHECK(ret, FAIL, "H5Sselect_all"); + + /* Reclaim VL data memory */ + ret = H5Treclaim(tid1, space, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Close Everything */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Sclose(space); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(memspace); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Pclose(cparms); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_misc12() */ +#if 0 +/* Various routines for misc. 13 test */ +static void +misc13_init_data(unsigned *original_data) +{ + unsigned u; + + for (u = 0; u < MISC13_DIM1; u++) + original_data[u] = u; +} + +static hbool_t +misc13_verify_data_match(const unsigned *original_data, const unsigned *read_data) +{ + unsigned u; + + for (u = 0; u < MISC13_DIM1; u++) + if (original_data[u] != read_data[u]) + return FALSE; + + return TRUE; +} + +static void +misc13_create_dataset(hid_t loc_id, const char *name, hid_t dcpl, const unsigned *data) +{ + hid_t dsid = -1; /* Dataset ID */ + hid_t sid = -1; /* Dataspace ID */ + hsize_t dims[MISC13_RANK]; /* Dataset dimensions */ + herr_t ret; /* Generic return value */ + + /* Create dataspace for use with dataset */ + dims[0] = MISC13_DIM1; + sid = H5Screate_simple(MISC13_RANK, dims, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Create contiguous dataset in root group */ + dsid = H5Dcreate2(loc_id, name, H5T_NATIVE_UINT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dsid, FAIL, "H5Dcreate2"); + + /* Write some data to dataset */ + ret = H5Dwrite(dsid, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close the contiguous dataset */ + ret = H5Dclose(dsid); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close the dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + +} /* end misc13_create_dataset() */ + +static void +misc13_verify_dataset(hid_t loc_id, const char *name, const unsigned *data) +{ + unsigned *read_data = NULL; /* Data to write to dataset */ + hid_t dsid = -1; /* Dataset ID */ + herr_t ret; /* Generic return value */ + + /* Create a data buffer for the dataset read */ + read_data = (unsigned *)HDcalloc(MISC13_DIM1, sizeof(unsigned)); + CHECK_PTR(read_data, "HDcalloc"); + + /* Open the contiguous dataset in the root group */ + dsid = H5Dopen2(loc_id, name, H5P_DEFAULT); + CHECK(dsid, FAIL, "H5Dopen2"); + + /* Read the data */ + ret = H5Dread(dsid, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_data); + CHECK(ret, FAIL, "H5Dread"); + + /* Verify that the data are correct */ + ret = misc13_verify_data_match(data, read_data); + CHECK(ret, FAIL, "misc13_verify_data_match"); + + /* Close the contiguous dataset */ + ret = H5Dclose(dsid); + CHECK(ret, FAIL, "H5Dclose"); + + /* Free the dataset read buffer */ + HDfree(read_data); + +} /* end misc13_verify_dataset() */ + +static void +misc13_create_hdf_file(const char *name, const unsigned *data) +{ + hid_t fid = -1; /* File ID */ + hid_t gid1 = -1; /* Group ID (level 1) */ + hid_t gid2 = -1; /* Group ID (level 2) */ + hid_t tid = -1; /* Datatype ID */ + hid_t dcplid = -1; /* Dataset creation property list ID */ + hsize_t chunk_dims[MISC13_RANK]; /* Chunk dimensions */ + herr_t ret; /* Generic return value */ + + /* Create file */ + fid = H5Fcreate(name, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create DCPL for use with datasets */ + dcplid = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcplid, FAIL, "H5Pcreate"); + + /* Set the DCPL to be chunked */ + ret = H5Pset_layout(dcplid, H5D_CHUNKED); + CHECK(ret, FAIL, "H5Pset_layout"); + + /* Use chunked storage for this DCPL */ + chunk_dims[0] = MISC13_CHUNK_DIM1; + ret = H5Pset_chunk(dcplid, MISC13_RANK, chunk_dims); + CHECK(ret, FAIL, "H5Pset_chunk"); + + /* Create contiguous dataset in root group */ + misc13_create_dataset(fid, MISC13_DSET1_NAME, H5P_DEFAULT, data); + + /* Create chunked dataset in root group */ + misc13_create_dataset(fid, MISC13_DSET2_NAME, dcplid, data); + + /* Create a datatype to commit to the file */ + tid = H5Tcopy(H5T_NATIVE_INT); + CHECK(tid, FAIL, "H5Tcopy"); + + /* Create a named datatype in the root group */ + ret = H5Tcommit2(fid, MISC13_DTYPE_NAME, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + /* Close named datatype */ + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Create a group in the root group */ + gid1 = H5Gcreate2(fid, MISC13_GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid1, FAIL, "H5Gcreate2"); + + /* Create another group in the new group */ + gid2 = H5Gcreate2(gid1, MISC13_GROUP2_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid2, FAIL, "H5Gcreate2"); + + /* Close the second group */ + ret = H5Gclose(gid2); + CHECK(ret, FAIL, "H5Gclose"); + + /* Create contiguous dataset in new group */ + misc13_create_dataset(gid1, MISC13_DSET1_NAME, H5P_DEFAULT, data); + + /* Create chunked dataset in new group */ + misc13_create_dataset(gid1, MISC13_DSET2_NAME, dcplid, data); + + /* Create a datatype to commit to the new group */ + tid = H5Tcopy(H5T_NATIVE_INT); + CHECK(tid, FAIL, "H5Tcopy"); + + /* Create a named datatype in the new group */ + ret = H5Tcommit2(gid1, MISC13_DTYPE_NAME, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + /* Close named datatype */ + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close the first group */ + ret = H5Gclose(gid1); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close the DCPL */ + ret = H5Pclose(dcplid); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + +} /* end misc13_create_hdf_file() */ + +static void +misc13_insert_user_block(const char *old_name, const char *new_name, const char *str, size_t size) +{ + FILE *new_fp = NULL; /* Pointers to new & old files */ + FILE *old_fp = NULL; + void *user_block = NULL; /* Pointer to user block to write to file */ + void *copy_buf = NULL; /* Pointer to buffer for copying data */ + size_t written; /* Amount of data written to new file */ + size_t read_in; /* Amount of data read in from old file */ + int ret; /* Generic status value */ + + /* Allocate space for the user block */ + user_block = HDcalloc(size, (size_t)1); + CHECK_PTR(user_block, "HDcalloc"); + + /* Copy in the user block data */ + HDmemcpy(user_block, str, HDstrlen(str)); + + /* Open the new file */ + new_fp = HDfopen(new_name, "wb"); + CHECK_PTR(new_fp, "HDfopen"); + + /* Write the user block to the new file */ + written = HDfwrite(user_block, (size_t)1, size, new_fp); + VERIFY(written, size, "HDfwrite"); + + /* Open the old file */ + old_fp = HDfopen(old_name, "rb"); + CHECK_PTR(old_fp, "HDfopen"); + + /* Allocate space for the copy buffer */ + copy_buf = HDmalloc((size_t)MISC13_COPY_BUF_SIZE); + CHECK_PTR(copy_buf, "HDmalloc"); + + /* Copy data from the old file to the new file */ + while ((read_in = HDfread(copy_buf, (size_t)1, (size_t)MISC13_COPY_BUF_SIZE, old_fp)) > 0) { + /* Write the data to the new file */ + written = HDfwrite(copy_buf, (size_t)1, read_in, new_fp); + VERIFY(written, read_in, "HDfwrite"); + } + + /* Close the old file */ + ret = HDfclose(old_fp); + VERIFY(ret, 0, "HDfclose"); + + /* Close the new file */ + ret = HDfclose(new_fp); + VERIFY(ret, 0, "HDfclose"); + + /* Free the copy buffer */ + HDfree(copy_buf); + + /* Free the user block */ + HDfree(user_block); + +} /* end misc13_insert_user_block() */ + +static void +misc13_verify_file(const char *name, const unsigned *data, hsize_t userblock_size, + hbool_t check_for_new_dataset) +{ + hid_t fid = -1; /* File ID */ + hid_t gid1 = -1; /* Group IDs */ + hid_t gid2 = -1; /* Group IDs */ + hid_t tid = -1; /* Datatype ID */ + hid_t fcplid = -1; /* File creation property list ID */ + hsize_t ub_size_out; /* Userblock size retrieved from FCPL */ + herr_t ret; /* Generic return value */ + + /* Open the file */ + fid = H5Fopen(name, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Get the file's FCPL */ + fcplid = H5Fget_create_plist(fid); + CHECK(fcplid, FAIL, "H5Fget_create_plist"); + + /* Get the user block size for the file */ + ret = H5Pget_userblock(fcplid, &ub_size_out); + CHECK(ret, FAIL, "H5Pget_userblock"); + + /* Check the userblock size */ + VERIFY(userblock_size, ub_size_out, "H5Pget_userblock"); + + /* Close the FCPL */ + ret = H5Pclose(fcplid); + CHECK(ret, FAIL, "H5Pclose"); + + /* Verify the contiguous dataset in the root group */ + misc13_verify_dataset(fid, MISC13_DSET1_NAME, data); + + /* Verify the chunked dataset in the root group */ + misc13_verify_dataset(fid, MISC13_DSET2_NAME, data); + + /* Verify the "new" contiguous dataset in the root group, if asked */ + if (check_for_new_dataset) + misc13_verify_dataset(fid, MISC13_DSET3_NAME, data); + + /* Open the named datatype in the root group */ + tid = H5Topen2(fid, MISC13_DTYPE_NAME, H5P_DEFAULT); + CHECK(tid, FAIL, "H5Topen2"); + + /* Verify the type is correct */ + VERIFY(H5Tequal(tid, H5T_NATIVE_INT), TRUE, "H5Tequal"); + + /* Close named datatype */ + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Open the first group */ + gid1 = H5Gopen2(fid, MISC13_GROUP1_NAME, H5P_DEFAULT); + CHECK(gid1, FAIL, "H5Gopen2"); + + /* Verify the contiguous dataset in the first group */ + misc13_verify_dataset(gid1, MISC13_DSET1_NAME, data); + + /* Verify the chunked dataset in the first group */ + misc13_verify_dataset(gid1, MISC13_DSET2_NAME, data); + + /* Open the named datatype in the first group */ + tid = H5Topen2(gid1, MISC13_DTYPE_NAME, H5P_DEFAULT); + CHECK(tid, FAIL, "H5Topen2"); + + /* Verify the type is correct */ + VERIFY(H5Tequal(tid, H5T_NATIVE_INT), TRUE, "H5Tequal"); + + /* Close named datatype */ + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Open the second group */ + gid2 = H5Gopen2(gid1, MISC13_GROUP2_NAME, H5P_DEFAULT); + CHECK(gid2, FAIL, "H5Gopen2"); + + /* Close the second group */ + ret = H5Gclose(gid2); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close the first group */ + ret = H5Gclose(gid1); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + +} /* end misc13_verify_file() */ + +static void +misc13_add_to_new_file(const char *name, const unsigned *data) +{ + hid_t fid = -1; /* File ID */ + herr_t ret; /* Generic return value */ + + /* Open the file */ + fid = H5Fopen(name, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Create new contiguous dataset in root group */ + misc13_create_dataset(fid, MISC13_DSET3_NAME, H5P_DEFAULT, data); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + +} /* end misc13_add_to_new_file() */ + +/**************************************************************** +** +** test_misc13(): Test that file contents can be "slid down" by +** inserting a user block in front of an existing file. +** +****************************************************************/ +static void +test_misc13(void) +{ + unsigned *data = NULL; /* Data to write to dataset */ + hsize_t userblock_size; /* Correct size of userblock */ + hbool_t check_for_new_dataset; /* Whether to check for the post-userblock-creation dataset */ + + /* Create a data buffer for the datasets */ + data = (unsigned *)HDcalloc(MISC13_DIM1, sizeof(unsigned)); + CHECK_PTR(data, "HDcalloc"); + + /* Initialize data to write */ + misc13_init_data(data); + + /* Create first file, with no user block */ + misc13_create_hdf_file(MISC13_FILE_1, data); + + /* Verify file contents are correct */ + userblock_size = 0; + check_for_new_dataset = FALSE; + misc13_verify_file(MISC13_FILE_1, data, userblock_size, check_for_new_dataset); + + /* Create a new file by inserting a user block in front of the first file */ + misc13_insert_user_block(MISC13_FILE_1, MISC13_FILE_2, "Test String", (size_t)MISC13_USERBLOCK_SIZE); + + /* Verify file contents are still correct */ + userblock_size = MISC13_USERBLOCK_SIZE; + check_for_new_dataset = FALSE; + misc13_verify_file(MISC13_FILE_2, data, userblock_size, check_for_new_dataset); + + /* Make certain we can modify the new file */ + misc13_add_to_new_file(MISC13_FILE_2, data); + + /* Verify file contents are still correct */ + userblock_size = MISC13_USERBLOCK_SIZE; + check_for_new_dataset = TRUE; + misc13_verify_file(MISC13_FILE_2, data, userblock_size, check_for_new_dataset); + + /* Free the dataset buffer */ + HDfree(data); + +} /* end test_misc13() */ +#endif + +/**************************************************************** +** +** test_misc14(): Test that file contents can be "slid down" by +** inserting a user block in front of an existing file. +** +****************************************************************/ +static void +test_misc14(void) +{ + hid_t file_id; /* File ID */ + hid_t fapl; /* File access property list ID */ + hid_t DataSpace; /* Dataspace ID */ + hid_t Dataset1; /* Dataset ID #1 */ + hid_t Dataset2; /* Dataset ID #2 */ + hid_t Dataset3; /* Dataset ID #3 */ + double data1 = 5.0; /* Data to write for dataset #1 */ + double data2 = 10.0; /* Data to write for dataset #2 */ + double data3 = 15.0; /* Data to write for dataset #3 */ + double rdata; /* Data read in */ + herr_t ret; /* Generic return value */ + + /* Test creating two datasets and deleting the second */ + + /* Increase the metadata block size */ + /* (This makes certain that all the data blocks are allocated together) */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + + ret = H5Pset_meta_block_size(fapl, (hsize_t)MISC14_METADATA_SIZE); + CHECK(ret, FAIL, "H5Pset_meta_block_size"); + + /* Create dataspace to use */ + DataSpace = H5Screate(H5S_SCALAR); + CHECK(DataSpace, FAIL, "H5Screate"); + + /* Open the file */ + file_id = H5Fcreate(MISC14_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(file_id, FAIL, "H5Fcreate"); + + /* Create first dataset & write data */ + Dataset1 = H5Dcreate2(file_id, MISC14_DSET1_NAME, H5T_NATIVE_DOUBLE, DataSpace, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT); + CHECK(Dataset1, FAIL, "H5Dcreate2"); + + ret = H5Dwrite(Dataset1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data1); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Create second dataset (to be unlinked). */ + Dataset2 = H5Dcreate2(file_id, MISC14_DSET2_NAME, H5T_NATIVE_DOUBLE, DataSpace, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT); + CHECK(Dataset2, FAIL, "H5Dcreate2"); + + ret = H5Dwrite(Dataset2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data2); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Check data from first dataset */ + ret = H5Dread(Dataset1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata); + CHECK(ret, FAIL, "H5Dread"); + if (!H5_DBL_ABS_EQUAL(rdata, data1)) + TestErrPrintf("Error on line %d: data1!=rdata\n", __LINE__); + + /* Unlink second dataset */ + ret = H5Ldelete(file_id, MISC14_DSET2_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Close second dataset */ + ret = H5Dclose(Dataset2); + CHECK(ret, FAIL, "H5Dclose"); + + /* Verify the data from dataset #1 */ + ret = H5Dread(Dataset1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata); + CHECK(ret, FAIL, "H5Dread"); + if (!H5_DBL_ABS_EQUAL(rdata, data1)) + TestErrPrintf("Error on line %d: data1!=rdata\n", __LINE__); + + /* Close first dataset */ + ret = H5Dclose(Dataset1); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close the file */ + ret = H5Fclose(file_id); + CHECK(ret, FAIL, "H5Fclose"); + + /* Test creating two datasets and deleting the first */ + + /* Open the file */ + file_id = H5Fcreate(MISC14_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(file_id, FAIL, "H5Fcreate"); + + /* Create first dataset & write data */ + Dataset1 = H5Dcreate2(file_id, MISC14_DSET1_NAME, H5T_NATIVE_DOUBLE, DataSpace, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT); + CHECK(Dataset1, FAIL, "H5Dcreate2"); + + ret = H5Dwrite(Dataset1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data1); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Create second dataset */ + Dataset2 = H5Dcreate2(file_id, MISC14_DSET2_NAME, H5T_NATIVE_DOUBLE, DataSpace, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT); + CHECK(Dataset2, FAIL, "H5Dcreate2"); + + ret = H5Dwrite(Dataset2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data2); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Check data from second dataset */ + ret = H5Dread(Dataset2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata); + CHECK(ret, FAIL, "H5Dread"); + if (!H5_DBL_ABS_EQUAL(rdata, data2)) + TestErrPrintf("Error on line %d: data2!=rdata\n", __LINE__); + + /* Unlink first dataset */ + ret = H5Ldelete(file_id, MISC14_DSET1_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Close first dataset */ + ret = H5Dclose(Dataset1); + CHECK(ret, FAIL, "H5Dclose"); + + /* Verify the data from dataset #2 */ + ret = H5Dread(Dataset2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata); + CHECK(ret, FAIL, "H5Dread"); + if (!H5_DBL_ABS_EQUAL(rdata, data2)) + TestErrPrintf("Error on line %d: data2!=rdata\n", __LINE__); + + /* Close second dataset */ + ret = H5Dclose(Dataset2); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close the file */ + ret = H5Fclose(file_id); + CHECK(ret, FAIL, "H5Fclose"); + + /* Test creating three datasets and deleting the second */ + + /* Open the file */ + file_id = H5Fcreate(MISC14_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(file_id, FAIL, "H5Fcreate"); + + /* Create first dataset & write data */ + Dataset1 = H5Dcreate2(file_id, MISC14_DSET1_NAME, H5T_NATIVE_DOUBLE, DataSpace, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT); + CHECK(Dataset1, FAIL, "H5Dcreate2"); + + ret = H5Dwrite(Dataset1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data1); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Create second dataset */ + Dataset2 = H5Dcreate2(file_id, MISC14_DSET2_NAME, H5T_NATIVE_DOUBLE, DataSpace, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT); + CHECK(Dataset2, FAIL, "H5Dcreate2"); + + ret = H5Dwrite(Dataset2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data2); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Create third dataset */ + Dataset3 = H5Dcreate2(file_id, MISC14_DSET3_NAME, H5T_NATIVE_DOUBLE, DataSpace, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT); + CHECK(Dataset2, FAIL, "H5Dcreate2"); + + ret = H5Dwrite(Dataset3, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data3); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Check data from first dataset */ + ret = H5Dread(Dataset1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata); + CHECK(ret, FAIL, "H5Dread"); + if (!H5_DBL_ABS_EQUAL(rdata, data1)) + TestErrPrintf("Error on line %d: data1!=rdata\n", __LINE__); + + /* Check data from third dataset */ + ret = H5Dread(Dataset3, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata); + CHECK(ret, FAIL, "H5Dread"); + if (!H5_DBL_ABS_EQUAL(rdata, data3)) + TestErrPrintf("Error on line %d: data3!=rdata\n", __LINE__); + + /* Unlink second dataset */ + ret = H5Ldelete(file_id, MISC14_DSET2_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Close second dataset */ + ret = H5Dclose(Dataset2); + CHECK(ret, FAIL, "H5Dclose"); + + /* Verify the data from dataset #1 */ + ret = H5Dread(Dataset1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata); + CHECK(ret, FAIL, "H5Dread"); + if (!H5_DBL_ABS_EQUAL(rdata, data1)) + TestErrPrintf("Error on line %d: data1!=rdata\n", __LINE__); + + /* Verify the data from dataset #3 */ + ret = H5Dread(Dataset3, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata); + CHECK(ret, FAIL, "H5Dread"); + if (!H5_DBL_ABS_EQUAL(rdata, data3)) + TestErrPrintf("Error on line %d: data3!=rdata\n", __LINE__); + + /* Close first dataset */ + ret = H5Dclose(Dataset1); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close third dataset */ + ret = H5Dclose(Dataset3); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close the file */ + ret = H5Fclose(file_id); + CHECK(ret, FAIL, "H5Fclose"); + + /* Close shared objects (dataspace & fapl) */ + ret = H5Sclose(DataSpace); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + +} /* end test_misc14() */ + +/**************************************************************** +** +** test_misc15(): Test that checking a file's access property list +** more than once correctly increments internal reference counts. +** +****************************************************************/ +static void +test_misc15(void) +{ + char filename[MISC15_BUF_SIZE]; + hid_t file; /* File ID */ + hid_t fapl; /* File access property list */ + herr_t ret; /* Generic return value */ + + fapl = h5_fileaccess(); + h5_fixname(MISC15_FILE, fapl, filename, MISC15_BUF_SIZE); + + /* Create the file & get it's FAPL */ + file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(file, FAIL, "H5Fcreate"); + + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + fapl = H5Fget_access_plist(file); + CHECK(fapl, FAIL, "H5Fget_access_plist"); + + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + + /* Open the file & get it's FAPL again */ + file = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(file, FAIL, "H5Fopen"); + + fapl = H5Fget_access_plist(file); + CHECK(fapl, FAIL, "H5Fget_access_plist"); + + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + + /* Verify that the file is still OK */ + ret = H5Fis_accessible(filename, fapl); + CHECK(ret, FAIL, "H5Fis_accessible"); + + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + file = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(file, FAIL, "H5Fopen"); + + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_misc15() */ + +/**************************************************************** +** +** test_misc16(): Test array of NULL-terminated +** fixed-length string. It creates a dataset of fixed-length +** strings. Each string is MISC16_STR_SIZE long. There are +** totally MISC16_SPACE_DIM by MISC16_SPACE_RANK strings. +** +****************************************************************/ +static void +test_misc16(void) +{ + hid_t file; /* File ID */ + herr_t ret; /* Generic return value */ + char wdata[MISC16_SPACE_DIM][MISC16_STR_SIZE]; + char rdata[MISC16_SPACE_DIM][MISC16_STR_SIZE]; /* Information read in */ + hid_t dataset; /* Dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t tid; /* Datatype ID */ + hsize_t dims[] = {MISC16_SPACE_DIM}; + int i; + + HDmemset(wdata, 0, sizeof(wdata)); + HDmemset(rdata, 0, sizeof(rdata)); + + /* Initialize the data */ + /* (Note that these are supposed to stress the code, so are a little weird) */ + HDmemcpy(wdata[0], "1234567", MISC16_STR_SIZE); + HDmemcpy(wdata[1], "1234567\0", MISC16_STR_SIZE); + HDmemcpy(wdata[2], "12345678", MISC16_STR_SIZE); + HDmemcpy(wdata[3], "\0\0\0\0\0\0\0\0", MISC16_STR_SIZE); + + /* Create the file */ + file = H5Fcreate(MISC16_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file, FAIL, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid = H5Screate_simple(MISC16_SPACE_RANK, dims, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Create a datatype to refer to */ + tid = H5Tcopy(H5T_C_S1); + CHECK(tid, FAIL, "H5Tcopy"); + + ret = H5Tset_size(tid, (size_t)MISC16_STR_SIZE); + CHECK(ret, FAIL, "H5Tset_size"); + + /*ret = H5Tset_strpad(tid,H5T_STR_NULLPAD); + CHECK(ret, FAIL, "H5Tset_strpad");*/ + + /* Create a dataset */ + dataset = H5Dcreate2(file, MISC16_DSET_NAME, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write dataset to disk */ + ret = H5Dwrite(dataset, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read dataset from disk */ + ret = H5Dread(dataset, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < MISC16_SPACE_DIM; i++) { + if (HDstrlen(wdata[i]) != HDstrlen(rdata[i])) { + TestErrPrintf( + "Line %u: VL data length don't match!, strlen(wdata[%d])=%d, strlen(rdata[%d])=%d\n", + (unsigned)__LINE__, (int)i, (int)HDstrlen(wdata[i]), (int)i, (int)HDstrlen(rdata[i])); + continue; + } /* end if */ + if (HDstrcmp(wdata[i], rdata[i]) != 0) { + TestErrPrintf("Line %u: VL data values don't match!, wdata[%d]=%s, rdata[%d]=%s\n", + (unsigned)__LINE__, (int)i, wdata[i], (int)i, rdata[i]); + continue; + } /* end if */ + } /* end for */ + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatype */ + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_misc16() */ + +/**************************************************************** +** +** test_misc17(): Test array of characters. It creates a dataset +** of ASCII characters, with dimensionality of MISC17_SPACE_DIM1 +** by MISC17_SPACE_DIM2. +** +****************************************************************/ +static void +test_misc17(void) +{ + hid_t file; /* File ID */ + herr_t ret; /* Generic return value */ + char wdata[MISC17_SPACE_DIM1][MISC17_SPACE_DIM2]; + char rdata[MISC17_SPACE_DIM1][MISC17_SPACE_DIM2]; /* Information read in */ + hid_t dataset; /* Dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t tid; /* Datatype ID */ + hsize_t dims[] = {MISC17_SPACE_DIM1, MISC17_SPACE_DIM2}; + int i; + + HDmemset(wdata, 0, sizeof(wdata)); + HDmemset(rdata, 0, sizeof(rdata)); + + /* Initialize the data */ + /* (Note that these are supposed to stress the code, so are a little weird) */ + HDmemcpy(wdata[0], "1234567", MISC17_SPACE_DIM2); + HDmemcpy(wdata[1], "1234567\0", MISC17_SPACE_DIM2); + HDmemcpy(wdata[2], "12345678", MISC17_SPACE_DIM2); + HDmemcpy(wdata[3], "\0\0\0\0\0\0\0\0", MISC17_SPACE_DIM2); + + /* Create the file */ + file = H5Fcreate(MISC17_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file, FAIL, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid = H5Screate_simple(MISC17_SPACE_RANK, dims, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Create a datatype to refer to */ + tid = H5Tcopy(H5T_C_S1); + CHECK(tid, FAIL, "H5Tcopy"); + + ret = H5Tset_strpad(tid, H5T_STR_NULLPAD); + CHECK(ret, FAIL, "H5Tset_strpad"); + + /* Create a dataset */ + dataset = H5Dcreate2(file, MISC17_DSET_NAME, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write dataset to disk */ + ret = H5Dwrite(dataset, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read dataset from disk */ + ret = H5Dread(dataset, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data in the way of strings. */ + for (i = 0; i < MISC17_SPACE_DIM1; i++) { + if (HDstrlen(wdata[i]) != HDstrlen(rdata[i])) { + TestErrPrintf( + "Line %u: VL data length don't match!, strlen(wdata[%d])=%d, strlen(rdata[%d])=%d\n", + (unsigned)__LINE__, (int)i, (int)HDstrlen(wdata[i]), (int)i, (int)HDstrlen(rdata[i])); + continue; + } /* end if */ + if (HDstrcmp(wdata[i], rdata[i]) != 0) { + TestErrPrintf("Line %u: VL data values don't match!, wdata[%d]=%s, rdata[%d]=%s\n", + (unsigned)__LINE__, (int)i, wdata[i], (int)i, rdata[i]); + continue; + } /* end if */ + } /* end for */ + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatype */ + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_misc17() */ + +/**************************************************************** +** +** test_misc18(): Test new object header information in H5O_info_t +** struct. +** +****************************************************************/ +static void +test_misc18(void) +{ + hid_t fid; /* File ID */ + hid_t sid; /* 'Space ID */ + hid_t did1, did2; /* Dataset IDs */ + hid_t aid; /* Attribute ID */ +#if 0 +#ifndef H5_NO_DEPRECATED_SYMBOLS + H5O_info1_t old_oinfo; /* (deprecated) information about object */ +#endif /* H5_NO_DEPRECATED_SYMBOLS */ +#endif + H5O_info2_t oinfo; /* Data model information about object */ +#if 0 + H5O_native_info_t ninfo; /* Native file format information about object */ +#endif + char attr_name[32]; /* Attribute name buffer */ + unsigned u; /* Local index variable */ + herr_t ret; /* Generic return value */ + + /* Create the file */ + fid = H5Fcreate(MISC18_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create dataspace for attributes */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create first dataset */ + did1 = H5Dcreate2(fid, MISC18_DSET1_NAME, H5T_STD_U32LE, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(did1, FAIL, "H5Dcreate2"); + + /* Get object information */ + ret = H5Oget_info_by_name3(fid, MISC18_DSET1_NAME, &oinfo, H5O_INFO_NUM_ATTRS, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name"); + VERIFY(oinfo.num_attrs, 0, "H5Oget_info_by_name"); +#if 0 +#ifndef H5_NO_DEPRECATED_SYMBOLS + ret = H5Oget_info_by_name2(fid, MISC18_DSET1_NAME, &old_oinfo, H5O_INFO_HDR | H5O_INFO_NUM_ATTRS, + H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.nmesgs, 6, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.nchunks, 1, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.space.total, 272, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.space.free, 152, "H5Oget_info_by_name"); + VERIFY(old_oinfo.num_attrs, 0, "H5Oget_info_by_name"); +#endif /* H5_NO_DEPRECATED_SYMBOLS */ + ret = H5Oget_native_info_by_name(fid, MISC18_DSET1_NAME, &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.nmesgs, 6, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.nchunks, 1, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.space.total, 272, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.space.free, 152, "H5Oget_native_info_by_name"); +#endif + + /* Create second dataset */ + did2 = H5Dcreate2(fid, MISC18_DSET2_NAME, H5T_STD_U32LE, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(did2, FAIL, "H5Dcreate2"); + + /* Get object information */ + ret = H5Oget_info_by_name3(fid, MISC18_DSET2_NAME, &oinfo, H5O_INFO_NUM_ATTRS, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name"); + VERIFY(oinfo.num_attrs, 0, "H5Oget_info_by_name"); +#if 0 +#ifndef H5_NO_DEPRECATED_SYMBOLS + ret = H5Oget_info_by_name2(fid, MISC18_DSET2_NAME, &old_oinfo, H5O_INFO_HDR | H5O_INFO_NUM_ATTRS, + H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.nmesgs, 6, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.nchunks, 1, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.space.total, 272, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.space.free, 152, "H5Oget_info_by_name"); + VERIFY(old_oinfo.num_attrs, 0, "H5Oget_info_by_name"); +#endif /* H5_NO_DEPRECATED_SYMBOLS */ + ret = H5Oget_native_info_by_name(fid, MISC18_DSET2_NAME, &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.nmesgs, 6, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.nchunks, 1, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.space.total, 272, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.space.free, 152, "H5Oget_native_info_by_name"); +#endif + + /* Loop creating attributes on each dataset, flushing them to the file each time */ + for (u = 0; u < 10; u++) { + /* Set up attribute name */ + HDsnprintf(attr_name, sizeof(attr_name), "Attr %u", u); + + /* Create & close attribute on first dataset */ + aid = H5Acreate2(did1, attr_name, H5T_STD_U32LE, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + /* Create & close attribute on second dataset */ + aid = H5Acreate2(did2, attr_name, H5T_STD_U32LE, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + /* Flush file, to 'fix' size of dataset object headers */ + ret = H5Fflush(fid, H5F_SCOPE_GLOBAL); + CHECK(ret, FAIL, "H5Fflush"); + } /* end for */ + + /* Get object information for dataset #1 now */ + ret = H5Oget_info_by_name3(fid, MISC18_DSET1_NAME, &oinfo, H5O_INFO_NUM_ATTRS, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name"); + VERIFY(oinfo.num_attrs, 10, "H5Oget_info_by_name"); +#if 0 +#ifndef H5_NO_DEPRECATED_SYMBOLS + ret = H5Oget_info_by_name2(fid, MISC18_DSET1_NAME, &old_oinfo, H5O_INFO_HDR | H5O_INFO_NUM_ATTRS, + H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.nmesgs, 24, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.nchunks, 9, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.space.total, 888, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.space.free, 16, "H5Oget_info_by_name"); + VERIFY(old_oinfo.num_attrs, 10, "H5Oget_info_by_name"); +#endif /* H5_NO_DEPRECATED_SYMBOLS */ + ret = H5Oget_native_info_by_name(fid, MISC18_DSET1_NAME, &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.nmesgs, 24, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.nchunks, 9, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.space.total, 888, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.space.free, 16, "H5Oget_native_info_by_name"); +#endif + + /* Get object information for dataset #2 now */ + ret = H5Oget_info_by_name3(fid, MISC18_DSET2_NAME, &oinfo, H5O_INFO_NUM_ATTRS, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name"); + VERIFY(oinfo.num_attrs, 10, "H5Oget_info_by_name"); +#if 0 +#ifndef H5_NO_DEPRECATED_SYMBOLS + ret = H5Oget_info_by_name2(fid, MISC18_DSET2_NAME, &old_oinfo, H5O_INFO_HDR | H5O_INFO_NUM_ATTRS, + H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.nmesgs, 24, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.nchunks, 9, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.space.total, 888, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.space.free, 16, "H5Oget_info_by_name"); + VERIFY(old_oinfo.num_attrs, 10, "H5Oget_info_by_name"); +#endif /* H5_NO_DEPRECATED_SYMBOLS */ + ret = H5Oget_native_info_by_name(fid, MISC18_DSET2_NAME, &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_mative_info_by_name"); + VERIFY(ninfo.hdr.nmesgs, 24, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.nchunks, 9, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.space.total, 888, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.space.free, 16, "H5Oget_native_info_by_name"); +#endif + + /* Close second dataset */ + ret = H5Dclose(did2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close first dataset */ + ret = H5Dclose(did1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_misc18() */ + +/**************************************************************** +** +** test_misc19(): Test incrementing & decrementing ref count on IDs +** +****************************************************************/ +static void +test_misc19(void) +{ + hid_t fid = -1; /* File ID */ + hid_t sid = -1; /* Dataspace ID */ + hid_t did = -1; /* Dataset ID */ + hid_t tid = -1; /* Datatype ID */ + hid_t aid = -1; /* Attribute ID */ + hid_t plid = -1; /* Property List ID */ + hid_t pcid = -1; /* Property Class ID */ + hid_t gid = -1; /* Group ID */ + hid_t ecid = -1; /* Error Class ID */ + hid_t emid = -1; /* Error Message ID */ + hid_t esid = -1; /* Error Stack ID */ +#if 0 + hid_t vfdid = -1; /* Virtual File Driver ID */ + hid_t volid = -1; /* Virtual Object Layer ID */ + H5FD_class_t *vfd_cls = NULL; /* VFD class */ + H5VL_class_t *vol_cls = NULL; /* VOL class */ +#endif + int rc; /* Reference count */ + herr_t ret; /* Generic return value */ + + /* Check H5I operations on files */ + + /* Create the file */ + fid = H5Fcreate(MISC19_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Check the reference count */ + rc = H5Iget_ref(fid); + VERIFY(rc, 1, "H5Iget_ref"); + + /* Inc the reference count */ + rc = H5Iinc_ref(fid); + VERIFY(rc, 2, "H5Iinc_ref"); + + /* Close the file normally */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Check the reference count */ + rc = H5Iget_ref(fid); + VERIFY(rc, 1, "H5Iget_ref"); + + /* Close the file by decrementing the reference count */ + rc = H5Idec_ref(fid); + VERIFY(rc, 0, "H5Idec_ref"); + + /* Try closing the file again (should fail) */ + H5E_BEGIN_TRY + { + ret = H5Fclose(fid); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Fclose"); + + /* Check H5I operations on property lists */ + + /* Create the property list */ + plid = H5Pcreate(H5P_DATASET_CREATE); + CHECK(plid, FAIL, "H5Pcreate"); + + /* Check the reference count */ + rc = H5Iget_ref(plid); + VERIFY(rc, 1, "H5Iget_ref"); + + /* Inc the reference count */ + rc = H5Iinc_ref(plid); + VERIFY(rc, 2, "H5Iinc_ref"); + + /* Close the property list normally */ + ret = H5Pclose(plid); + CHECK(ret, FAIL, "H5Pclose"); + + /* Check the reference count */ + rc = H5Iget_ref(plid); + VERIFY(rc, 1, "H5Iget_ref"); + + /* Close the property list by decrementing the reference count */ + rc = H5Idec_ref(plid); + VERIFY(rc, 0, "H5Idec_ref"); + + /* Try closing the property list again (should fail) */ + H5E_BEGIN_TRY + { + ret = H5Pclose(plid); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Pclose"); + + /* Check H5I operations on property classes */ + + /* Create a property class */ + pcid = H5Pcreate_class(H5P_DATASET_CREATE, "foo", NULL, NULL, NULL, NULL, NULL, NULL); + CHECK(pcid, FAIL, "H5Pcreate_class"); + + /* Check the reference count */ + rc = H5Iget_ref(pcid); + VERIFY(rc, 1, "H5Iget_ref"); + + /* Inc the reference count */ + rc = H5Iinc_ref(pcid); + VERIFY(rc, 2, "H5Iinc_ref"); + + /* Close the property class normally */ + ret = H5Pclose_class(pcid); + CHECK(ret, FAIL, "H5Pclose_class"); + + /* Check the reference count */ + rc = H5Iget_ref(pcid); + VERIFY(rc, 1, "H5Iget_ref"); + + /* Close the property class by decrementing the reference count */ + rc = H5Idec_ref(pcid); + VERIFY(rc, 0, "H5Idec_ref"); + + /* Try closing the property class again (should fail) */ + H5E_BEGIN_TRY + { + ret = H5Pclose_class(pcid); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Pclose_class"); + + /* Check H5I operations on datatypes */ + + /* Create a datatype */ + tid = H5Tcreate(H5T_OPAQUE, (size_t)16); + CHECK(tid, FAIL, "H5Tcreate"); + + /* Check the reference count */ + rc = H5Iget_ref(tid); + VERIFY(rc, 1, "H5Iget_ref"); + + /* Inc the reference count */ + rc = H5Iinc_ref(tid); + VERIFY(rc, 2, "H5Iinc_ref"); + + /* Close the datatype normally */ + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Check the reference count */ + rc = H5Iget_ref(tid); + VERIFY(rc, 1, "H5Iget_ref"); + + /* Close the datatype by decrementing the reference count */ + rc = H5Idec_ref(tid); + VERIFY(rc, 0, "H5Idec_ref"); + + /* Try closing the datatype again (should fail) */ + H5E_BEGIN_TRY + { + ret = H5Tclose(tid); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Tclose"); + + /* Check H5I operations on dataspaces */ + + /* Create a dataspace */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Check the reference count */ + rc = H5Iget_ref(sid); + VERIFY(rc, 1, "H5Iget_ref"); + + /* Inc the reference count */ + rc = H5Iinc_ref(sid); + VERIFY(rc, 2, "H5Iinc_ref"); + + /* Close the dataspace normally */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Check the reference count */ + rc = H5Iget_ref(sid); + VERIFY(rc, 1, "H5Iget_ref"); + + /* Close the dataspace by decrementing the reference count */ + rc = H5Idec_ref(sid); + VERIFY(rc, 0, "H5Idec_ref"); + + /* Try closing the dataspace again (should fail) */ + H5E_BEGIN_TRY + { + ret = H5Sclose(sid); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Sclose"); + + /* Check H5I operations on datasets */ + + /* Create a file */ + fid = H5Fcreate(MISC19_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create a dataspace */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create a dataset */ + did = H5Dcreate2(fid, MISC19_DSET_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + /* Check the reference count */ + rc = H5Iget_ref(did); + VERIFY(rc, 1, "H5Iget_ref"); + + /* Inc the reference count */ + rc = H5Iinc_ref(did); + VERIFY(rc, 2, "H5Iinc_ref"); + + /* Close the dataset normally */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Check the reference count */ + rc = H5Iget_ref(did); + VERIFY(rc, 1, "H5Iget_ref"); + + /* Close the dataset by decrementing the reference count */ + rc = H5Idec_ref(did); + VERIFY(rc, 0, "H5Idec_ref"); + + /* Try closing the dataset again (should fail) */ + H5E_BEGIN_TRY + { + ret = H5Dclose(did); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Dclose"); + + /* Close the dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Check H5I operations on attributes */ + + /* Create a file */ + fid = H5Fcreate(MISC19_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Open the root group */ + gid = H5Gopen2(fid, "/", H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gopen2"); + + /* Create a dataspace */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create an attribute */ + aid = H5Acreate2(gid, MISC19_ATTR_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + + /* Check the reference count */ + rc = H5Iget_ref(aid); + VERIFY(rc, 1, "H5Iget_ref"); + + /* Inc the reference count */ + rc = H5Iinc_ref(aid); + VERIFY(rc, 2, "H5Iinc_ref"); + + /* Close the dataset normally */ + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + /* Check the reference count */ + rc = H5Iget_ref(aid); + VERIFY(rc, 1, "H5Iget_ref"); + + /* Close the attribute by decrementing the reference count */ + rc = H5Idec_ref(aid); + VERIFY(rc, 0, "H5Idec_ref"); + + /* Try closing the attribute again (should fail) */ + H5E_BEGIN_TRY + { + ret = H5Aclose(aid); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Aclose"); + + /* Close the root group */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close the dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Check H5I operations on groups */ + + /* Create a file */ + fid = H5Fcreate(MISC19_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create a group */ + gid = H5Gcreate2(fid, MISC19_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gcreate2"); + + /* Check the reference count */ + rc = H5Iget_ref(gid); + VERIFY(rc, 1, "H5Iget_ref"); + + /* Inc the reference count */ + rc = H5Iinc_ref(gid); + VERIFY(rc, 2, "H5Iinc_ref"); + + /* Close the group normally */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Check the reference count */ + rc = H5Iget_ref(gid); + VERIFY(rc, 1, "H5Iget_ref"); + + /* Close the group by decrementing the reference count */ + rc = H5Idec_ref(gid); + VERIFY(rc, 0, "H5Idec_ref"); + + /* Try closing the group again (should fail) */ + H5E_BEGIN_TRY + { + ret = H5Gclose(gid); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Gclose"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Check H5I operations on error classes */ + + /* Create an error class */ + ecid = H5Eregister_class("foo", "bar", "baz"); + CHECK(ecid, FAIL, "H5Eregister_class"); + + /* Check the reference count */ + rc = H5Iget_ref(ecid); + VERIFY(rc, 1, "H5Iget_ref"); + + /* Inc the reference count */ + rc = H5Iinc_ref(ecid); + VERIFY(rc, 2, "H5Iinc_ref"); + + /* Close the error class normally */ + ret = H5Eunregister_class(ecid); + CHECK(ret, FAIL, "H5Eunregister_class"); + + /* Check the reference count */ + rc = H5Iget_ref(ecid); + VERIFY(rc, 1, "H5Iget_ref"); + + /* Close the error class by decrementing the reference count */ + rc = H5Idec_ref(ecid); + VERIFY(rc, 0, "H5Idec_ref"); + + /* Try closing the error class again (should fail) */ + H5E_BEGIN_TRY + { + ret = H5Eunregister_class(ecid); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Eunregister_class"); + + /* Check H5I operations on error messages */ + + /* Create an error class */ + ecid = H5Eregister_class("foo", "bar", "baz"); + CHECK(ecid, FAIL, "H5Eregister_class"); + + /* Create an error message */ + emid = H5Ecreate_msg(ecid, H5E_MAJOR, "mumble"); + CHECK(emid, FAIL, "H5Ecreate_msg"); + + /* Check the reference count */ + rc = H5Iget_ref(emid); + VERIFY(rc, 1, "H5Iget_ref"); + + /* Inc the reference count */ + rc = H5Iinc_ref(emid); + VERIFY(rc, 2, "H5Iinc_ref"); + + /* Close the error message normally */ + ret = H5Eclose_msg(emid); + CHECK(ret, FAIL, "H5Eclose_msg"); + + /* Check the reference count */ + rc = H5Iget_ref(emid); + VERIFY(rc, 1, "H5Iget_ref"); + + /* Close the error message by decrementing the reference count */ + rc = H5Idec_ref(emid); + VERIFY(rc, 0, "H5Idec_ref"); + + /* Try closing the error message again (should fail) */ + H5E_BEGIN_TRY + { + ret = H5Eclose_msg(emid); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Eclose_msg"); + + /* Close the error class */ + ret = H5Eunregister_class(ecid); + CHECK(ret, FAIL, "H5Eunregister_class"); + + /* Check H5I operations on error stacks */ + + /* Create an error stack */ + esid = H5Eget_current_stack(); + CHECK(esid, FAIL, "H5Eget_current_stack"); + + /* Check the reference count */ + rc = H5Iget_ref(esid); + VERIFY(rc, 1, "H5Iget_ref"); + + /* Inc the reference count */ + rc = H5Iinc_ref(esid); + VERIFY(rc, 2, "H5Iinc_ref"); + + /* Close the error stack normally */ + ret = H5Eclose_stack(esid); + CHECK(ret, FAIL, "H5Eclose_stack"); + + /* Check the reference count */ + rc = H5Iget_ref(esid); + VERIFY(rc, 1, "H5Iget_ref"); + + /* Close the error stack by decrementing the reference count */ + rc = H5Idec_ref(esid); + VERIFY(rc, 0, "H5Idec_ref"); + + /* Try closing the error stack again (should fail) */ + H5E_BEGIN_TRY + { + ret = H5Eclose_stack(esid); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Eclose_stack"); + +#if 0 + /* Check H5I operations on virtual file drivers */ + + /* Get a VFD class to register */ + vfd_cls = h5_get_dummy_vfd_class(); + CHECK_PTR(vfd_cls, "h5_get_dummy_vfd_class"); + + /* Register a virtual file driver */ + vfdid = H5FDregister(vfd_cls); + CHECK(vfdid, FAIL, "H5FDregister"); + + /* Check the reference count */ + rc = H5Iget_ref(vfdid); + VERIFY(rc, 1, "H5Iget_ref"); + + /* Increment the reference count */ + rc = H5Iinc_ref(vfdid); + VERIFY(rc, 2, "H5Iinc_ref"); + + /* Unregister the VFD normally */ + ret = H5FDunregister(vfdid); + CHECK(ret, FAIL, "H5FDunregister"); + + /* Check the reference count */ + rc = H5Iget_ref(vfdid); + VERIFY(rc, 1, "H5Iget_ref"); + + /* Unregister the VFD by decrementing the reference count */ + rc = H5Idec_ref(vfdid); + VERIFY(rc, 0, "H5Idec_ref"); + + /* Try unregistering the VFD again (should fail) */ + H5E_BEGIN_TRY + { + ret = H5FDunregister(vfdid); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5FDunregister"); + + HDfree(vfd_cls); + + /* Check H5I operations on virtual object connectors */ + + /* Get a VOL class to register */ + vol_cls = h5_get_dummy_vol_class(); + CHECK_PTR(vol_cls, "h5_get_dummy_vol_class"); + + /* Register a VOL connector */ + volid = H5VLregister_connector(vol_cls, H5P_DEFAULT); + CHECK(volid, FAIL, "H5VLregister_connector"); + + /* Check the reference count */ + rc = H5Iget_ref(volid); + VERIFY(rc, 1, "H5Iget_ref"); + + /* Increment the reference count */ + rc = H5Iinc_ref(volid); + VERIFY(rc, 2, "H5Iinc_ref"); + + /* Unregister the VOL connector normally */ + ret = H5VLunregister_connector(volid); + CHECK(ret, FAIL, "H5VLunregister_connector"); + + /* Check the reference count */ + rc = H5Iget_ref(volid); + VERIFY(rc, 1, "H5Iget_ref"); + + /* Unregister the VOL connector by decrementing the reference count */ + rc = H5Idec_ref(volid); + VERIFY(rc, 0, "H5Idec_ref"); + + /* Try unregistering the VOL connector again (should fail) */ + H5E_BEGIN_TRY + { + ret = H5VLunregister_connector(volid); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5VLunregister_connector"); + + HDfree(vol_cls); +#endif +} /* end test_misc19() */ + +/**************************************************************** +** +** test_misc20(): Test problems with version 2 of storage layout +** message truncating dimensions +** +****************************************************************/ +#if 0 +static void +test_misc20(void) +{ + hid_t fid; /* File ID */ + hid_t sid; /* 'Space ID */ + hid_t did; /* Dataset ID */ + hid_t dcpl; /* Dataset creation property list ID */ + int rank = MISC20_SPACE_RANK; /* Rank of dataspace */ + hsize_t big_dims[MISC20_SPACE_RANK] = {MISC20_SPACE_DIM0, MISC20_SPACE_DIM1}; /* Large dimensions */ + hsize_t small_dims[MISC20_SPACE_RANK] = {MISC20_SPACE2_DIM0, MISC20_SPACE2_DIM1}; /* Small dimensions */ + unsigned version; /* Version of storage layout info */ + hsize_t contig_size; /* Size of contiguous storage size from layout into */ + const char *testfile = H5_get_srcdir_filename(MISC20_FILE_OLD); /* Corrected test file name */ + hbool_t driver_is_default_compatible; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing large dimension truncation fix\n")); + + ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); + CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); + + if (!driver_is_default_compatible) { + HDprintf("-- SKIPPED --\n"); + return; + } + + /* Verify that chunks with dimensions that are too large get rejected */ + + /* Create a dataset creation property list */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + + /* Try to use chunked storage for this dataset */ + ret = H5Pset_chunk(dcpl, rank, big_dims); + VERIFY(ret, FAIL, "H5Pset_chunk"); + + /* Verify that the storage for the dataset is the correct size and hasn't + * been truncated. + */ + + /* Create the file */ + fid = H5Fcreate(MISC20_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create dataspace with _really_ big dimensions */ + sid = H5Screate_simple(rank, big_dims, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Make certain that the dataset's storage doesn't get allocated :-) */ + ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE); + CHECK(ret, FAIL, "H5Pset_alloc_time"); + + /* Create dataset with big dataspace */ + did = H5Dcreate2(fid, MISC20_DSET_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + /* Close datasset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create dataspace with small dimensions */ + sid = H5Screate_simple(rank, small_dims, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Create dataset with big dataspace */ + did = H5Dcreate2(fid, MISC20_DSET2_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + /* Close datasset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close dataset creation property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open the file */ + fid = H5Fopen(MISC20_FILE, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Open dataset with big dimensions */ + did = H5Dopen2(fid, MISC20_DSET_NAME, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen2"); + + /* Get the layout version */ + ret = H5D__layout_version_test(did, &version); + CHECK(ret, FAIL, "H5D__layout_version_test"); + VERIFY(version, 3, "H5D__layout_version_test"); + + /* Get the layout contiguous storage size */ + ret = H5D__layout_contig_size_test(did, &contig_size); + CHECK(ret, FAIL, "H5D__layout_contig_size_test"); + VERIFY(contig_size, (MISC20_SPACE_DIM0 * MISC20_SPACE_DIM1 * H5Tget_size(H5T_NATIVE_INT)), + "H5D__layout_contig_size_test"); + + /* Close datasset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Open dataset with small dimensions */ + did = H5Dopen2(fid, MISC20_DSET2_NAME, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen2"); + + /* Get the layout version */ + ret = H5D__layout_version_test(did, &version); + CHECK(ret, FAIL, "H5D__layout_version_test"); + VERIFY(version, 3, "H5D__layout_version_test"); + + /* Get the layout contiguous storage size */ + ret = H5D__layout_contig_size_test(did, &contig_size); + CHECK(ret, FAIL, "H5D__layout_contig_size_test"); + VERIFY(contig_size, (MISC20_SPACE2_DIM0 * MISC20_SPACE2_DIM1 * H5Tget_size(H5T_NATIVE_INT)), + "H5D__layout_contig_size_test"); + + /* Close datasset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Verify that the storage size is computed correctly for older versions of layout info */ + + /* + * Open the old file and the dataset and get old settings. + */ + fid = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Open dataset with small dimensions */ + did = H5Dopen2(fid, MISC20_DSET_NAME, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen2"); + + /* Get the layout version */ + ret = H5D__layout_version_test(did, &version); + CHECK(ret, FAIL, "H5D__layout_version_test"); + VERIFY(version, 2, "H5D__layout_version_test"); + + /* Get the layout contiguous storage size */ + ret = H5D__layout_contig_size_test(did, &contig_size); + CHECK(ret, FAIL, "H5D__layout_contig_size_test"); + VERIFY(contig_size, (MISC20_SPACE_DIM0 * MISC20_SPACE_DIM1 * H5Tget_size(H5T_STD_I32LE)), + "H5D__layout_contig_size_test"); + + /* Close datasset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + +} /* end test_misc20() */ +#endif + +/* + test_misc21 and test_misc22 should be executed when SZIP is present + and encoder is available. + EIP 2004/8/04 +*/ +#if defined(H5_HAVE_FILTER_SZIP) && !defined(H5_API_TEST_NO_FILTERS) + +/**************************************************************** +** +** test_misc21(): Test that late allocation time is treated the same +** as incremental allocation time, for chunked datasets +** when overwriting entire dataset where the chunks +** don't exactly match the dataspace. +** +****************************************************************/ +static void +test_misc21(void) +{ + hid_t fid, sid, dcpl, dsid; + char *buf; + hsize_t dims[2] = {MISC21_SPACE_DIM0, MISC21_SPACE_DIM1}, + chunk_size[2] = {MISC21_CHUNK_DIM0, MISC21_CHUNK_DIM1}; + herr_t ret; /* Generic return value */ + + if (h5_szip_can_encode() != 1) + return; + /* Output message about test being performed */ + MESSAGE(5, ("Testing late allocation time w/chunks & filters\n")); + + /* Allocate space for the buffer */ + buf = (char *)HDcalloc(MISC21_SPACE_DIM0 * MISC21_SPACE_DIM1, 1); + CHECK(buf, NULL, "HDcalloc"); + + /* Create the file */ + fid = H5Fcreate(MISC21_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create the DCPL */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + + /* Set custom DCPL properties */ + ret = H5Pset_chunk(dcpl, MISC21_SPACE_RANK, chunk_size); + CHECK(ret, FAIL, "H5Pset_chunk"); + ret = H5Pset_szip(dcpl, H5_SZIP_NN_OPTION_MASK, 8); + CHECK(ret, FAIL, "H5Pset_deflate"); + ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE); + CHECK(ret, FAIL, "H5Pset_alloc_time"); + + /* Create the dataspace for the dataset */ + sid = H5Screate_simple(MISC21_SPACE_RANK, dims, NULL); + CHECK(ret, FAIL, "H5Screate_simple"); + + /* Create the dataset */ + dsid = H5Dcreate2(fid, MISC21_DSET_NAME, H5T_NATIVE_UINT8, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dsid, FAIL, "H5Dcreate2"); + + /* Write out the whole dataset */ + ret = H5Dwrite(dsid, H5T_NATIVE_UINT8, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close everything */ + ret = H5Dclose(dsid); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + HDfree(buf); +} /* end test_misc21() */ + +/**************************************************************** +** +** test_misc22(): Test SZIP bits-per-pixel parameter. +** This should be set according to the datatype. +** Tests for precision and offset combo's. +** +****************************************************************/ +static void +test_misc22(void) +{ + hid_t fid, sid, dcpl, dsid, dcpl2; + char *buf; + hsize_t dims[2] = {MISC22_SPACE_DIM0, MISC22_SPACE_DIM1}, + chunk_size[2] = {MISC22_CHUNK_DIM0, MISC22_CHUNK_DIM1}; + herr_t ret; /* Generic return value */ + hid_t dtype; + /* should extend test to signed ints */ + hid_t idts[4]; + /* do the same for floats + hid_t fdts[2]={H5T_NATIVE_FLOAT32, + H5T_NATIVE_FLOAT64} + */ + size_t prec[4] = {3, 11, 19, 27}; + size_t offsets[5] = {0, 3, 11, 19, 27}; + int i, j, k; + unsigned int flags; + size_t cd_nelmts = 32; + unsigned int cd_values[32]; + size_t correct; + + if (h5_szip_can_encode() != 1) + return; + idts[0] = H5Tcopy(H5T_NATIVE_UINT8); + idts[1] = H5Tcopy(H5T_NATIVE_UINT16); + idts[2] = H5Tcopy(H5T_NATIVE_UINT32); + idts[3] = H5Tcopy(H5T_NATIVE_UINT64); + + /* Output message about test being performed */ + MESSAGE(5, ("Testing datatypes with SZIP filter\n")); + + /* Allocate space for the buffer */ + buf = (char *)HDcalloc(MISC22_SPACE_DIM0 * MISC22_SPACE_DIM1, 8); + CHECK(buf, NULL, "HDcalloc"); + + /* Create the file */ + fid = H5Fcreate(MISC22_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create the dataspace for the dataset */ + sid = H5Screate_simple(MISC22_SPACE_RANK, dims, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + for (i = 0; i < 4; i++) { + for (j = 0; j < 4; j++) { + if (prec[j] > (H5Tget_size(idts[i]) * 8)) + continue; /* skip irrelevant combination */ + for (k = 0; k < 5; k++) { + if (offsets[k] > (H5Tget_size(idts[i]) * 8)) + continue; /* skip irrelevant combinations */ + if ((prec[j] + offsets[k]) > (H5Tget_size(idts[i]) * 8)) + continue; + + MESSAGE(5, (" Testing datatypes size=%zu precision=%u offset=%d\n", H5Tget_size(idts[i]), + (unsigned)prec[j], (unsigned)offsets[k])); + + /* Create the DCPL */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + + /* Set DCPL properties */ + ret = H5Pset_chunk(dcpl, MISC22_SPACE_RANK, chunk_size); + CHECK(ret, FAIL, "H5Pset_chunk"); + /* Set custom DCPL properties */ + ret = H5Pset_szip(dcpl, H5_SZIP_NN_OPTION_MASK, 32); /* vary the PPB */ + CHECK(ret, FAIL, "H5Pset_szip"); + + /* set up the datatype according to the loop */ + dtype = H5Tcopy(idts[i]); + CHECK(dtype, FAIL, "H5Tcopy"); + ret = H5Tset_precision(dtype, prec[j]); + CHECK(ret, FAIL, "H5Tset_precision"); + ret = H5Tset_offset(dtype, offsets[k]); + CHECK(ret, FAIL, "H5Tset_precision"); + + /* compute the correct PPB that should be set by SZIP */ + if (offsets[k] == 0) + correct = prec[j]; + else + correct = H5Tget_size(idts[i]) * 8; + if (correct > 24) { + if (correct <= 32) + correct = 32; + else if (correct <= 64) + correct = 64; + } /* end if */ + + /* Create the dataset */ + dsid = H5Dcreate2(fid, MISC22_DSET_NAME, dtype, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dsid, FAIL, "H5Dcreate2"); + + /* Write out the whole dataset */ + ret = H5Dwrite(dsid, dtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close everything */ + ret = H5Dclose(dsid); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Tclose(dtype); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + + dsid = H5Dopen2(fid, MISC22_DSET_NAME, H5P_DEFAULT); + CHECK(dsid, FAIL, "H5Dopen2"); + + dcpl2 = H5Dget_create_plist(dsid); + CHECK(dcpl2, FAIL, "H5Dget_create_plist"); + + ret = H5Pget_filter_by_id2(dcpl2, H5Z_FILTER_SZIP, &flags, &cd_nelmts, cd_values, 0, NULL, + NULL); + CHECK(ret, FAIL, "H5Pget_filter_by_id2"); + + VERIFY(cd_values[2], (unsigned)correct, "SZIP filter returned value for precision"); + + ret = H5Dclose(dsid); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Ldelete(fid, MISC22_DSET_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + ret = H5Pclose(dcpl2); + CHECK(ret, FAIL, "H5Pclose"); + } + } + } + ret = H5Tclose(idts[0]); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Tclose(idts[1]); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Tclose(idts[2]); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Tclose(idts[3]); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + HDfree(buf); +} /* end test_misc22() */ +#endif /* H5_HAVE_FILTER_SZIP */ + +/**************************************************************** +** +** test_misc23(): Test intermediate group creation. +** +****************************************************************/ +static void +test_misc23(void) +{ + hsize_t dims[] = {10}; + hid_t file_id = 0, group_id = 0, type_id = 0, space_id = 0, tmp_id = 0, create_id = H5P_DEFAULT, + access_id = H5P_DEFAULT; +#ifndef NO_OBJECT_GET_NAME + char objname[MISC23_NAME_BUF_SIZE]; /* Name of object */ +#endif + H5O_info2_t oinfo; + htri_t tri_status; +#ifndef NO_OBJECT_GET_NAME + ssize_t namelen; +#endif + herr_t status; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing intermediate group creation\n")); + + /* Create a new file using default properties. */ + file_id = H5Fcreate(MISC23_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file_id, FAIL, "H5Fcreate"); + + /* Build some infrastructure */ + group_id = H5Gcreate2(file_id, "/A", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(group_id, FAIL, "H5Gcreate2"); + + space_id = H5Screate_simple(1, dims, NULL); + CHECK(space_id, FAIL, "H5Screate_simple"); + + type_id = H5Tcopy(H5T_STD_I32BE); + CHECK(type_id, FAIL, "H5Tcopy"); + +#ifndef H5_NO_DEPRECATED_SYMBOLS + /********************************************************************** + * test the old APIs + **********************************************************************/ + + H5E_BEGIN_TRY + { + tmp_id = H5Gcreate1(file_id, "/A/B00a/grp", (size_t)0); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Gcreate1"); + + /* Make sure that size_hint values that can't fit into a 32-bit + * unsigned integer are rejected. Only necessary on systems where + * size_t is a 64-bit type. + */ + if (SIZE_MAX > UINT32_MAX) { + H5E_BEGIN_TRY + { + tmp_id = H5Gcreate1(file_id, "/size_hint_too_large", SIZE_MAX); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Gcreate1"); + } + + /* Make sure the largest size_hint value works */ + H5E_BEGIN_TRY + { + tmp_id = H5Gcreate1(file_id, "/largest_size_hint", UINT32_MAX); + } + H5E_END_TRY; + CHECK(tmp_id, FAIL, "H5Gcreate1"); + status = H5Gclose(tmp_id); + CHECK(status, FAIL, "H5Gclose"); + + tmp_id = H5Gcreate1(file_id, "/A/grp", (size_t)0); + CHECK(tmp_id, FAIL, "H5Gcreate1"); + status = H5Gclose(tmp_id); + CHECK(status, FAIL, "H5Gclose"); + + H5E_BEGIN_TRY + { + tmp_id = H5Dcreate1(file_id, "/A/B00c/dset", type_id, space_id, create_id); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Dcreate1"); + + tmp_id = H5Dcreate1(file_id, "/A/dset", type_id, space_id, create_id); + CHECK(tmp_id, FAIL, "H5Dcreate1"); + status = H5Dclose(tmp_id); + CHECK(status, FAIL, "H5Dclose"); +#endif /* H5_NO_DEPRECATED_SYMBOLS */ + + /********************************************************************** + * test H5Gcreate2() + **********************************************************************/ + + /* Create link creation property list */ + create_id = H5Pcreate(H5P_LINK_CREATE); + CHECK(create_id, FAIL, "H5Pcreate"); + + /* Set flag for intermediate group creation */ + status = H5Pset_create_intermediate_group(create_id, TRUE); + CHECK(status, FAIL, "H5Pset_create_intermediate_group"); + + tmp_id = H5Gcreate2(file_id, "/A/B01/grp", create_id, H5P_DEFAULT, access_id); + CHECK(tmp_id, FAIL, "H5Gcreate2"); +#ifndef NO_OBJECT_GET_NAME + /* Query that the name of the new group is correct */ + namelen = H5Iget_name(tmp_id, objname, (size_t)MISC23_NAME_BUF_SIZE); + CHECK(namelen, FAIL, "H5Iget_name"); + VERIFY_STR(objname, "/A/B01/grp", "H5Iget_name"); +#endif + status = H5Gclose(tmp_id); + CHECK(status, FAIL, "H5Gclose"); + + /* Check that intermediate group is set up correctly */ + tmp_id = H5Gopen2(file_id, "/A/B01", H5P_DEFAULT); + CHECK(tmp_id, FAIL, "H5Gopen2"); + + status = H5Oget_info3(tmp_id, &oinfo, H5O_INFO_BASIC); + CHECK(status, FAIL, "H5Oget_info3"); + VERIFY(oinfo.rc, 1, "H5Oget_info3"); + + status = H5Gclose(tmp_id); + CHECK(status, FAIL, "H5Gclose"); + + tmp_id = H5Gcreate2(file_id, "/A/B02/C02/grp", create_id, H5P_DEFAULT, access_id); + CHECK(tmp_id, FAIL, "H5Gcreate2"); + + status = H5Gclose(tmp_id); + CHECK(status, FAIL, "H5Gclose"); + + tmp_id = H5Gcreate2(group_id, "B03/grp/", create_id, H5P_DEFAULT, access_id); + CHECK(tmp_id, FAIL, "H5Gcreate2"); + + status = H5Gclose(tmp_id); + CHECK(status, FAIL, "H5Gclose"); + + tmp_id = H5Gcreate2(group_id, "/A/B04/grp/", create_id, H5P_DEFAULT, access_id); + CHECK(tmp_id, FAIL, "H5Gcreate2"); + + status = H5Gclose(tmp_id); + CHECK(status, FAIL, "H5Gclose"); + + tmp_id = H5Gcreate2(file_id, "/A/B05/C05/A", create_id, H5P_DEFAULT, access_id); + CHECK(tmp_id, FAIL, "H5Gcreate2"); + + status = H5Gclose(tmp_id); + CHECK(status, FAIL, "H5Gclose"); + + status = H5Pclose(create_id); + CHECK(status, FAIL, "H5Pclose"); + + /********************************************************************** + * test H5Dcreate2() + **********************************************************************/ + + /* Create link creation property list */ + create_id = H5Pcreate(H5P_LINK_CREATE); + CHECK(create_id, FAIL, "H5Pcreate"); + + /* Set flag for intermediate group creation */ + status = H5Pset_create_intermediate_group(create_id, TRUE); + CHECK(status, FAIL, "H5Pset_create_intermediate_group"); + + tmp_id = H5Dcreate2(file_id, "/A/B06/dset", type_id, space_id, create_id, H5P_DEFAULT, H5P_DEFAULT); + CHECK(tmp_id, FAIL, "H5Dcreate2"); + + status = H5Dclose(tmp_id); + CHECK(status, FAIL, "H5Dclose"); + + tmp_id = H5Dcreate2(file_id, "/A/B07/B07/dset", type_id, space_id, create_id, H5P_DEFAULT, H5P_DEFAULT); + CHECK(tmp_id, FAIL, "H5Dcreate2"); + + status = H5Dclose(tmp_id); + CHECK(status, FAIL, "H5Dclose"); + + tmp_id = H5Dcreate2(group_id, "B08/dset", type_id, space_id, create_id, H5P_DEFAULT, H5P_DEFAULT); + CHECK(tmp_id, FAIL, "H5Dcreate2"); + + status = H5Dclose(tmp_id); + CHECK(status, FAIL, "H5Dclose"); + + tmp_id = H5Dcreate2(group_id, "/A/B09/dset", type_id, space_id, create_id, H5P_DEFAULT, H5P_DEFAULT); + CHECK(tmp_id, FAIL, "H5Dcreate2"); + + status = H5Dclose(tmp_id); + CHECK(status, FAIL, "H5Dclose"); + + tmp_id = H5Dcreate2(file_id, "/A/B10/C10/A/dset", type_id, space_id, create_id, H5P_DEFAULT, H5P_DEFAULT); + CHECK(tmp_id, FAIL, "H5Dcreate2"); + + status = H5Dclose(tmp_id); + CHECK(status, FAIL, "H5Dclose"); + + status = H5Tclose(type_id); + CHECK(status, FAIL, "H5Tclose"); + + status = H5Sclose(space_id); + CHECK(status, FAIL, "H5Sclose"); + + status = H5Pclose(create_id); + CHECK(status, FAIL, "H5Pclose"); + + /********************************************************************** + * test H5Tcommit2() + **********************************************************************/ + + /* Create link creation property list */ + create_id = H5Pcreate(H5P_LINK_CREATE); + CHECK(create_id, FAIL, "H5Pcreate"); + + /* Set flag for intermediate group creation */ + status = H5Pset_create_intermediate_group(create_id, TRUE); + CHECK(status, FAIL, "H5Pset_create_intermediate_group"); + + tmp_id = H5Tcopy(H5T_NATIVE_INT16); + CHECK(tmp_id, FAIL, "H5Tcopy"); + + status = H5Tcommit2(file_id, "/A/B11/dtype", tmp_id, create_id, H5P_DEFAULT, access_id); + CHECK(status, FAIL, "H5Tcommit2"); + + status = H5Tclose(tmp_id); + CHECK(status, FAIL, "H5Tclose"); + + tmp_id = H5Tcopy(H5T_NATIVE_INT32); + CHECK(tmp_id, FAIL, "H5Tcopy"); + + status = H5Tcommit2(file_id, "/A/B12/C12/dtype", tmp_id, create_id, H5P_DEFAULT, access_id); + CHECK(status, FAIL, "H5Tcommit2"); + + status = H5Tclose(tmp_id); + CHECK(status, FAIL, "H5Tclose"); + + tmp_id = H5Tcopy(H5T_NATIVE_INT64); + CHECK(tmp_id, FAIL, "H5Tcopy"); + + status = H5Tcommit2(group_id, "B13/C12/dtype", tmp_id, create_id, H5P_DEFAULT, access_id); + CHECK(status, FAIL, "H5Tcommit2"); + + status = H5Tclose(tmp_id); + CHECK(status, FAIL, "H5Tclose"); + + tmp_id = H5Tcopy(H5T_NATIVE_FLOAT); + CHECK(tmp_id, FAIL, "H5Tcopy"); + + status = H5Tcommit2(group_id, "/A/B14/dtype", tmp_id, create_id, H5P_DEFAULT, access_id); + CHECK(status, FAIL, "H5Tcommit2"); + + status = H5Tclose(tmp_id); + CHECK(status, FAIL, "H5Tclose"); + + tmp_id = H5Tcopy(H5T_NATIVE_DOUBLE); + CHECK(tmp_id, FAIL, "H5Tcopy"); + + status = H5Tcommit2(file_id, "/A/B15/C15/A/dtype", tmp_id, create_id, H5P_DEFAULT, access_id); + CHECK(status, FAIL, "H5Tcommit2"); + + status = H5Tclose(tmp_id); + CHECK(status, FAIL, "H5Tclose"); + + status = H5Pclose(create_id); + CHECK(status, FAIL, "H5Pclose"); + + /********************************************************************** + * test H5Lcopy() + **********************************************************************/ + + /* Create link creation property list */ + create_id = H5Pcreate(H5P_LINK_CREATE); + CHECK(create_id, FAIL, "H5Pcreate"); + + /* Set flag for intermediate group creation */ + status = H5Pset_create_intermediate_group(create_id, TRUE); + CHECK(status, FAIL, "H5Pset_create_intermediate_group"); + + status = H5Lcopy(file_id, "/A/B01/grp", file_id, "/A/B16/grp", create_id, access_id); + CHECK(status, FAIL, "H5Lcopy"); + + tri_status = H5Lexists(file_id, "/A/B16/grp", access_id); + VERIFY(tri_status, TRUE, "H5Lexists"); + + tri_status = H5Lexists(file_id, "/A/B01/grp", access_id); + VERIFY(tri_status, TRUE, "H5Lexists"); + + /********************************************************************** + * test H5Lmove() + **********************************************************************/ + + status = H5Lmove(file_id, "/A/B16/grp", file_id, "/A/B17/grp", create_id, access_id); + CHECK(status, FAIL, "H5Lmove"); + + tri_status = H5Lexists(file_id, "/A/B17/grp", access_id); + VERIFY(tri_status, TRUE, "H5Lexists"); + + tri_status = H5Lexists(file_id, "/A/B16/grp", access_id); + VERIFY(tri_status, FALSE, "H5Lexists"); + + /********************************************************************** + * test H5Lcreate_hard() + **********************************************************************/ + + status = H5Lcreate_hard(file_id, "/A/B01/grp", file_id, "/A/B18/grp", create_id, access_id); + CHECK(status, FAIL, "H5Lcreate_hard"); + + tri_status = H5Lexists(file_id, "/A/B18/grp", access_id); + VERIFY(tri_status, TRUE, "H5Lexists"); + + /********************************************************************** + * test H5Lcreate_soft() + **********************************************************************/ + + status = H5Lcreate_soft("/A/B01/grp", file_id, "/A/B19/grp", create_id, access_id); + CHECK(status, FAIL, "H5Lcreate_soft"); + + tri_status = H5Lexists(file_id, "/A/B19/grp", access_id); + VERIFY(tri_status, TRUE, "H5Lexists"); + + /********************************************************************** + * test H5Lcreate_external() + **********************************************************************/ +#ifndef NO_EXTERNAL_LINKS + status = H5Lcreate_external("fake_filename", "fake_path", file_id, "/A/B20/grp", create_id, access_id); + CHECK(status, FAIL, "H5Lcreate_external"); + + tri_status = H5Lexists(file_id, "/A/B20/grp", access_id); + VERIFY(tri_status, TRUE, "H5Lexists"); +#endif + /********************************************************************** + * test H5Lcreate_ud() + **********************************************************************/ +#ifndef NO_USER_DEFINED_LINKS + status = + H5Lcreate_ud(file_id, "/A/B21/grp", H5L_TYPE_EXTERNAL, "file\0obj", (size_t)9, create_id, access_id); + CHECK(status, FAIL, "H5Lcreate_ud"); + + tri_status = H5Lexists(file_id, "/A/B21/grp", access_id); + VERIFY(tri_status, TRUE, "H5Lexists"); +#endif + /********************************************************************** + * close + **********************************************************************/ + + status = H5Pclose(create_id); + CHECK(status, FAIL, "H5Pclose"); + + status = H5Gclose(group_id); + CHECK(status, FAIL, "H5Gclose"); + + status = H5Fclose(file_id); + CHECK(status, FAIL, "H5Fclose"); + +} /* end test_misc23() */ + +/**************************************************************** +** +** test_misc24(): Test opening objects with inappropriate APIs +** +****************************************************************/ +static void +test_misc24(void) +{ +#if 0 + hid_t file_id = 0, group_id = 0, type_id = 0, space_id = 0, dset_id = 0, tmp_id = 0; + herr_t ret; /* Generic return value */ +#endif + + /* Output message about test being performed */ + MESSAGE(5, + ("Testing opening objects with inappropriate APIs - SKIPPED due to causing problems in HDF5\n")); +#if 0 + /* Create a new file using default properties. */ + file_id = H5Fcreate(MISC24_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file_id, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset */ + space_id = H5Screate(H5S_SCALAR); + CHECK(space_id, FAIL, "H5Screate"); + + /* Create group, dataset & named datatype objects */ + group_id = H5Gcreate2(file_id, MISC24_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(group_id, FAIL, "H5Gcreate2"); + + dset_id = H5Dcreate2(file_id, MISC24_DATASET_NAME, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT); + CHECK(dset_id, FAIL, "H5Dcreate2"); + + type_id = H5Tcopy(H5T_NATIVE_INT); + CHECK(type_id, FAIL, "H5Tcopy"); + + ret = H5Tcommit2(file_id, MISC24_DATATYPE_NAME, type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + /* Create soft links to the objects created */ + ret = H5Lcreate_soft(MISC24_GROUP_NAME, file_id, MISC24_GROUP_LINK, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lcreate_soft"); + + ret = H5Lcreate_soft(MISC24_DATASET_NAME, file_id, MISC24_DATASET_LINK, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lcreate_soft"); + + ret = H5Lcreate_soft(MISC24_DATATYPE_NAME, file_id, MISC24_DATATYPE_LINK, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lcreate_soft"); + + /* Close IDs for objects */ + ret = H5Dclose(dset_id); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Sclose(space_id); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Gclose(group_id); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Tclose(type_id); + CHECK(ret, FAIL, "H5Tclose"); + + /* Attempt to open each kind of object with wrong API, including using soft links */ + H5E_BEGIN_TRY + { + tmp_id = H5Dopen2(file_id, MISC24_GROUP_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Dopen2"); + + H5E_BEGIN_TRY + { + tmp_id = H5Dopen2(file_id, MISC24_GROUP_LINK, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Dopen2"); + + H5E_BEGIN_TRY + { + tmp_id = H5Topen2(file_id, MISC24_GROUP_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Topen2"); + + H5E_BEGIN_TRY + { + tmp_id = H5Topen2(file_id, MISC24_GROUP_LINK, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Topen2"); + + H5E_BEGIN_TRY + { + tmp_id = H5Gopen2(file_id, MISC24_DATASET_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Gopen2"); + + H5E_BEGIN_TRY + { + tmp_id = H5Gopen2(file_id, MISC24_DATASET_LINK, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Gopen2"); + + H5E_BEGIN_TRY + { + tmp_id = H5Topen2(file_id, MISC24_DATASET_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Topen2"); + + H5E_BEGIN_TRY + { + tmp_id = H5Topen2(file_id, MISC24_DATASET_LINK, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Topen2"); + + H5E_BEGIN_TRY + { + tmp_id = H5Gopen2(file_id, MISC24_DATATYPE_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Gopen2"); + + H5E_BEGIN_TRY + { + tmp_id = H5Gopen2(file_id, MISC24_DATATYPE_LINK, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Gopen2"); + + H5E_BEGIN_TRY + { + tmp_id = H5Dopen2(file_id, MISC24_DATATYPE_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Dopen2"); + + H5E_BEGIN_TRY + { + tmp_id = H5Dopen2(file_id, MISC24_DATATYPE_LINK, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Dopen2"); + + /* Try again, with the object already open through valid call */ + /* Open group */ + group_id = H5Gopen2(file_id, MISC24_GROUP_NAME, H5P_DEFAULT); + CHECK(group_id, FAIL, "H5Gopen2"); + + H5E_BEGIN_TRY + { + tmp_id = H5Dopen2(file_id, MISC24_GROUP_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Dopen2"); + + H5E_BEGIN_TRY + { + tmp_id = H5Dopen2(file_id, MISC24_GROUP_LINK, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Dopen2"); + + H5E_BEGIN_TRY + { + tmp_id = H5Topen2(file_id, MISC24_GROUP_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Topen2"); + + H5E_BEGIN_TRY + { + tmp_id = H5Topen2(file_id, MISC24_GROUP_LINK, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Topen2"); + + ret = H5Gclose(group_id); + CHECK(ret, FAIL, "H5Gclose"); + + /* Open dataset */ + dset_id = H5Dopen2(file_id, MISC24_DATASET_NAME, H5P_DEFAULT); + CHECK(dset_id, FAIL, "H5Dopen2"); + + H5E_BEGIN_TRY + { + tmp_id = H5Gopen2(file_id, MISC24_DATASET_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Gopen2"); + + H5E_BEGIN_TRY + { + tmp_id = H5Gopen2(file_id, MISC24_DATASET_LINK, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Gopen2"); + + H5E_BEGIN_TRY + { + tmp_id = H5Topen2(file_id, MISC24_DATASET_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Topen2"); + + H5E_BEGIN_TRY + { + tmp_id = H5Topen2(file_id, MISC24_DATASET_LINK, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Topen2"); + + ret = H5Dclose(dset_id); + CHECK(ret, FAIL, "H5Dclose"); + + /* Open named datatype */ + type_id = H5Topen2(file_id, MISC24_DATATYPE_NAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Topen2"); + + H5E_BEGIN_TRY + { + tmp_id = H5Gopen2(file_id, MISC24_DATATYPE_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Gopen2"); + + H5E_BEGIN_TRY + { + tmp_id = H5Gopen2(file_id, MISC24_DATATYPE_LINK, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Gopen2"); + + H5E_BEGIN_TRY + { + tmp_id = H5Dopen2(file_id, MISC24_DATATYPE_NAME, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Dopen2"); + + H5E_BEGIN_TRY + { + tmp_id = H5Dopen2(file_id, MISC24_DATATYPE_LINK, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(tmp_id, FAIL, "H5Dopen2"); + + ret = H5Tclose(type_id); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close file */ + ret = H5Fclose(file_id); + CHECK(ret, FAIL, "H5Fclose"); +#endif +} /* end test_misc24() */ + +/**************************************************************** +** +** test_misc25a(): Exercise null object header message merge bug +** with new file +** +****************************************************************/ +static void +test_misc25a(void) +{ + hid_t fid; /* File ID */ + hid_t gid, gid2, gid3; /* Group IDs */ + hid_t aid; /* Attribute ID */ + hid_t sid; /* Dataspace ID */ + hid_t tid; /* Datatype ID */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Exercise null object header message bug\n")); + + /* Create file */ + fid = H5Fcreate(MISC25A_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create top group */ + gid = H5Gcreate2(fid, MISC25A_GROUP0_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gcreate2"); + + /* Close top group */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Create first group */ + gid = H5Gcreate2(fid, MISC25A_GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gcreate2"); + + /* Close first group */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Create second group */ + gid2 = H5Gcreate2(fid, MISC25A_GROUP2_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid2, FAIL, "H5Gcreate2"); + + /* Close second group */ + ret = H5Gclose(gid2); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file */ + fid = H5Fopen(MISC25A_FILE, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Re-open first group */ + gid = H5Gopen2(fid, MISC25A_GROUP1_NAME, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gopen2"); + + /* Create dataspace for attribute */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create dataype for attribute */ + tid = H5Tcopy(H5T_C_S1); + CHECK(tid, FAIL, "H5Tcopy"); + ret = H5Tset_size(tid, (size_t)MISC25A_ATTR1_LEN); + CHECK(ret, FAIL, "H5Tset_size"); + + /* Add 1st attribute on first group */ + aid = H5Acreate2(gid, MISC25A_ATTR1_NAME, tid, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close datatype */ + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close attribute */ + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + /* Create dataspace for 2nd attribute */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create dataype for attribute */ + tid = H5Tcopy(H5T_C_S1); + CHECK(tid, FAIL, "H5Tcopy"); + ret = H5Tset_size(tid, (size_t)MISC25A_ATTR2_LEN); + CHECK(ret, FAIL, "H5Tset_size"); + + /* Add 2nd attribute on first group */ + aid = H5Acreate2(gid, MISC25A_ATTR2_NAME, tid, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close datatype */ + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close 2nd attribute */ + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close first group */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file */ + fid = H5Fopen(MISC25A_FILE, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Create third group */ + gid3 = H5Gcreate2(fid, MISC25A_GROUP3_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid3, FAIL, "H5Gcreate2"); + + /* Close third group */ + ret = H5Gclose(gid3); + CHECK(ret, FAIL, "H5Gclose"); + + /* Re-open first group */ + gid = H5Gopen2(fid, MISC25A_GROUP1_NAME, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gopen2"); + + /* Delete 2nd attribute */ + ret = H5Adelete(gid, MISC25A_ATTR2_NAME); + CHECK(ret, FAIL, "H5Adelete"); + + /* Close first group */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file */ + fid = H5Fopen(MISC25A_FILE, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Re-open first group */ + gid = H5Gopen2(fid, MISC25A_GROUP1_NAME, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gopen2"); + + /* Create dataspace for 3rd attribute */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create dataype for attribute */ + tid = H5Tcopy(H5T_C_S1); + CHECK(tid, FAIL, "H5Tcopy"); + ret = H5Tset_size(tid, (size_t)MISC25A_ATTR3_LEN); + CHECK(ret, FAIL, "H5Tset_size"); + + /* Add 3rd attribute on first group (smaller than 2nd attribute) */ + aid = H5Acreate2(gid, MISC25A_ATTR3_NAME, tid, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close datatype */ + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close 3rd attribute */ + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close first group */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file */ + fid = H5Fopen(MISC25A_FILE, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Re-open first group */ + gid = H5Gopen2(fid, MISC25A_GROUP1_NAME, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gopen2"); + + /* Delete 3rd attribute */ + ret = H5Adelete(gid, MISC25A_ATTR3_NAME); + CHECK(ret, FAIL, "H5Adelete"); + + /* Create dataspace for 3rd attribute */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create dataype for attribute */ + tid = H5Tcopy(H5T_C_S1); + CHECK(tid, FAIL, "H5Tcopy"); + ret = H5Tset_size(tid, (size_t)MISC25A_ATTR2_LEN); + CHECK(ret, FAIL, "H5Tset_size"); + + /* Re-create 2nd attribute on first group */ + aid = H5Acreate2(gid, MISC25A_ATTR2_NAME, tid, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close datatype */ + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close 2nd attribute */ + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close first group */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file */ + fid = H5Fopen(MISC25A_FILE, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Re-open first group */ + gid = H5Gopen2(fid, MISC25A_GROUP1_NAME, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gopen2"); + + /* Delete 2nd attribute */ + ret = H5Adelete(gid, MISC25A_ATTR2_NAME); + CHECK(ret, FAIL, "H5Adelete"); + + /* Close first group */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file */ + fid = H5Fopen(MISC25A_FILE, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Re-open first group */ + gid = H5Gopen2(fid, MISC25A_GROUP1_NAME, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gopen2"); + + /* Create dataspace for 3rd attribute */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create dataype for attribute */ + tid = H5Tcopy(H5T_C_S1); + CHECK(tid, FAIL, "H5Tcopy"); + ret = H5Tset_size(tid, (size_t)MISC25A_ATTR2_LEN); + CHECK(ret, FAIL, "H5Tset_size"); + + /* Re-create 2nd attribute on first group */ + aid = H5Acreate2(gid, MISC25A_ATTR2_NAME, tid, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close datatype */ + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close 2nd attribute */ + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close first group */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_misc25a() */ + +/**************************************************************** +** +** test_misc25b(): Exercise null object header message merge bug +** with existing file (This test relies on +** the file produced by test/gen_mergemsg.c) +** +****************************************************************/ +#if 0 +static void +test_misc25b(void) +{ + hid_t fid; /* File ID */ + hid_t gid; /* Group ID */ + const char *testfile = H5_get_srcdir_filename(MISC25B_FILE); /* Corrected test file name */ + hbool_t driver_is_default_compatible; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Exercise null object header message bug\n")); + + ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); + CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); + + if (!driver_is_default_compatible) { + HDprintf("-- SKIPPED --\n"); + return; + } + + /* Open file */ + fid = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Re-open group with object header messages that will merge */ + gid = H5Gopen2(fid, MISC25B_GROUP, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gopen2"); + + /* Close first group */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_misc25b() */ +#endif + +/**************************************************************** +** +** test_misc25c(): Exercise another null object header message merge bug. +** +****************************************************************/ +static void +test_misc25c(void) +{ + hid_t fid; /* File ID */ + hid_t fapl; /* File access property list ID */ + hid_t gcpl; /* Group creation property list ID */ + hid_t sid; /* Dataspace ID */ + hid_t did; /* Dataset ID */ + hid_t gid; /* Group ID */ + hid_t gid2; /* Group ID */ + hid_t aid; /* Attribute ID */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Exercise another null object header message bug\n")); + + /* Compose file access property list */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); + CHECK(ret, FAIL, "H5Pset_libver_bounds"); + + /* Create the file */ + fid = H5Fcreate(MISC25C_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Compose group creation property list */ + gcpl = H5Pcreate(H5P_GROUP_CREATE); + CHECK(gcpl, FAIL, "H5Pcreate"); + ret = H5Pset_link_creation_order(gcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED)); + CHECK(ret, FAIL, "H5Pset_link_creation_order"); + ret = H5Pset_attr_creation_order(gcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED)); + CHECK(ret, FAIL, "H5Pset_attr_creation_order"); + ret = H5Pset_est_link_info(gcpl, 1, 18); + CHECK(ret, FAIL, "H5Pset_est_link_info"); + + /* Create a group for the dataset */ + gid = H5Gcreate2(fid, MISC25C_DSETGRPNAME, H5P_DEFAULT, gcpl, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gcreate2"); + + /* Create the dataspace */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create the dataset */ + did = H5Dcreate2(gid, MISC25C_DSETNAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + /* Create an extra group */ + gid2 = H5Gcreate2(fid, MISC25C_GRPNAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid2, FAIL, "H5Gcreate2"); + + /* Close the extra group */ + ret = H5Gclose(gid2); + CHECK(ret, FAIL, "H5Gclose"); + + /* Add an attribute to the dataset group */ + aid = H5Acreate2(gid, MISC25C_ATTRNAME, H5T_NATIVE_CHAR, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + + /* Close the attribute */ + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + /* Create a second extra group */ + gid2 = H5Gcreate2(fid, MISC25C_GRPNAME2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid2, FAIL, "H5Gcreate2"); + + /* Close the second extra group */ + ret = H5Gclose(gid2); + CHECK(ret, FAIL, "H5Gclose"); + + /* Add second attribute to the dataset group */ + aid = H5Acreate2(gid, MISC25C_ATTRNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(aid, FAIL, "H5Acreate2"); + + /* Close the attribute */ + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close the dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close the dataset group */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close the dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Close the property lists */ + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(gcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Re-open the file */ + fid = H5Fopen(MISC25C_FILE, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Re-open the dataset group */ + gid = H5Gopen2(fid, MISC25C_DSETGRPNAME, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gopen2"); + + /* Rename the dataset */ + ret = H5Lmove(gid, MISC25C_DSETNAME, H5L_SAME_LOC, MISC25C_DSETNAME2, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lmove"); + + /* Delete the first attribute */ + ret = H5Adelete(gid, MISC25C_ATTRNAME); + CHECK(ret, FAIL, "H5Adelete"); + + /* Close the dataset group */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_misc25c() */ + +/**************************************************************** +** +** test_misc26(): Regression test: ensure that copying filter +** pipelines works properly. +** +****************************************************************/ +static void +test_misc26(void) +{ + hid_t fid; /* File ID */ + hid_t sid; /* Dataspace ID */ + hid_t did; /* Dataset ID */ + hid_t dcpl1, dcpl2, dcpl3; /* Property List IDs */ + hsize_t dims[] = {1}; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Copying filter pipelines\n")); + + /* Create the property list. It needs chunking so we can add filters */ + dcpl1 = H5Pcreate(H5P_DATASET_CREATE); + CHECK_I(dcpl1, "H5Pcreate"); + ret = H5Pset_chunk(dcpl1, 1, dims); + CHECK_I(ret, "H5Pset_chunk"); + + /* Add a filter with a data value to the property list */ + ret = H5Pset_deflate(dcpl1, 1); + CHECK_I(ret, "H5Pset_deflate"); + + /* Copy the property list */ + dcpl2 = H5Pcopy(dcpl1); + CHECK_I(dcpl2, "H5Pcopy"); + + /* Add a filter with no data values to the copy */ + ret = H5Pset_shuffle(dcpl2); + CHECK_I(ret, "H5Pset_shuffle"); + + /* Copy the copy */ + dcpl3 = H5Pcopy(dcpl2); + CHECK_I(dcpl3, "H5Pcopy"); + + /* Add another filter */ + ret = H5Pset_deflate(dcpl3, 2); + CHECK_I(ret, "H5Pset_deflate"); + + /* Create a new file and datasets within that file that use these + * property lists + */ + fid = H5Fcreate(MISC26_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + sid = H5Screate_simple(1, dims, dims); + CHECK(sid, FAIL, "H5Screate_simple"); + + did = H5Dcreate2(fid, "dataset1", H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, dcpl1, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + ret = H5Dclose(did); + CHECK_I(ret, "H5Dclose"); + + did = H5Dcreate2(fid, "dataset2", H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, dcpl2, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + ret = H5Dclose(did); + CHECK_I(ret, "H5Dclose"); + + did = H5Dcreate2(fid, "dataset3", H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, dcpl3, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + ret = H5Dclose(did); + CHECK_I(ret, "H5Dclose"); + + /* Close the dataspace and file */ + ret = H5Sclose(sid); + CHECK_I(ret, "H5Sclose"); + ret = H5Fclose(fid); + CHECK_I(ret, "H5Fclose"); + + /* Close the property lists. */ + ret = H5Pclose(dcpl1); + CHECK_I(ret, "H5Pclose"); + ret = H5Pclose(dcpl2); + CHECK_I(ret, "H5Pclose"); + ret = H5Pclose(dcpl3); + CHECK_I(ret, "H5Pclose"); +} + +/**************************************************************** +** +** test_misc27(): Ensure that objects with incorrect # of object +** header messages are handled appropriately. +** +** (Note that this test file is generated by the "gen_bad_ohdr.c" code) +** +****************************************************************/ +#if 0 +static void +test_misc27(void) +{ + hid_t fid; /* File ID */ + hid_t gid; /* Group ID */ + const char *testfile = H5_get_srcdir_filename(MISC27_FILE); /* Corrected test file name */ + hbool_t driver_is_default_compatible; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Corrupt object header handling\n")); + + ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); + CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); + + if (!driver_is_default_compatible) { + HDprintf("-- SKIPPED --\n"); + return; + } + + /* Open the file */ + fid = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + +#ifdef H5_STRICT_FORMAT_CHECKS + /* Open group with incorrect # of object header messages (should fail) */ + H5E_BEGIN_TRY + { + gid = H5Gopen2(fid, MISC27_GROUP, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(gid, FAIL, "H5Gopen2"); +#else /* H5_STRICT_FORMAT_CHECKS */ + /* Open group with incorrect # of object header messages */ + gid = H5Gopen2(fid, MISC27_GROUP, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gopen2"); + + /* Close group */ + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); +#endif /* H5_STRICT_FORMAT_CHECKS */ + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_misc27() */ +#endif + +/**************************************************************** +** +** test_misc28(): Ensure that the dataset chunk cache will hold +** the correct number of chunks in cache without +** evicting them. +** +****************************************************************/ +static void +test_misc28(void) +{ + hid_t fid; /* File ID */ + hid_t sidf; /* File Dataspace ID */ + hid_t sidm; /* Memory Dataspace ID */ + hid_t did; /* Dataset ID */ + hid_t dcpl, fapl; /* Property List IDs */ + hsize_t dims[] = {MISC28_SIZE, MISC28_SIZE}; + hsize_t mdims[] = {MISC28_SIZE}; + hsize_t cdims[] = {1, 1}; + hsize_t start[] = {0, 0}; + hsize_t count[] = {MISC28_SIZE, 1}; +#if 0 + size_t nbytes_used; + int nused; +#endif + char buf[MISC28_SIZE]; + int i; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Dataset chunk cache\n")); + + /* Create the fapl and set the cache size. Set nelmts to larger than the + * file size so we can be guaranteed that no chunks will be evicted due to + * a hash collision. Set nbytes to fit exactly 1 column of chunks (10 + * bytes). */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + ret = H5Pset_cache(fapl, MISC28_NSLOTS, MISC28_NSLOTS, MISC28_SIZE, 0.75); + CHECK(ret, FAIL, "H5Pset_cache"); + + /* Create the dcpl and set the chunk size */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + ret = H5Pset_chunk(dcpl, 2, cdims); + CHECK(ret, FAIL, "H5Pset_chunk"); + + /* Create a new file and datasets within that file that use these + * property lists + */ + fid = H5Fcreate(MISC28_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + sidf = H5Screate_simple(2, dims, NULL); + CHECK(sidf, FAIL, "H5Screate_simple"); + + did = H5Dcreate2(fid, "dataset", H5T_NATIVE_CHAR, sidf, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); +#if 0 + /* Verify that the chunk cache is empty */ + ret = H5D__current_cache_size_test(did, &nbytes_used, &nused); + CHECK(ret, FAIL, "H5D__current_cache_size_test"); + VERIFY(nbytes_used, (size_t)0, "H5D__current_cache_size_test"); + VERIFY(nused, 0, "H5D__current_cache_size_test"); +#endif + /* Initialize write buffer */ + for (i = 0; i < MISC28_SIZE; i++) + buf[i] = (char)i; + + /* Create memory dataspace and selection in file dataspace */ + sidm = H5Screate_simple(1, mdims, NULL); + CHECK(sidm, FAIL, "H5Screate_simple"); + + ret = H5Sselect_hyperslab(sidf, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Write hypserslab */ + ret = H5Dwrite(did, H5T_NATIVE_CHAR, sidm, sidf, H5P_DEFAULT, buf); + CHECK(ret, FAIL, "H5Dwrite"); +#if 0 + /* Verify that all 10 chunks written have been cached */ + ret = H5D__current_cache_size_test(did, &nbytes_used, &nused); + CHECK(ret, FAIL, "H5D__current_cache_size_test"); + VERIFY(nbytes_used, (size_t)MISC28_SIZE, "H5D__current_cache_size_test"); + VERIFY(nused, MISC28_SIZE, "H5D__current_cache_size_test"); +#endif + /* Initialize write buffer */ + for (i = 0; i < MISC28_SIZE; i++) + buf[i] = (char)(MISC28_SIZE - 1 - i); + + /* Select new hyperslab */ + start[1] = 1; + ret = H5Sselect_hyperslab(sidf, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Write hyperslab */ + ret = H5Dwrite(did, H5T_NATIVE_CHAR, sidm, sidf, H5P_DEFAULT, buf); + CHECK(ret, FAIL, "H5Dwrite"); +#if 0 + /* Verify that the size of the cache remains at 10 */ + ret = H5D__current_cache_size_test(did, &nbytes_used, &nused); + CHECK(ret, FAIL, "H5D__current_cache_size_test"); + VERIFY(nbytes_used, (size_t)MISC28_SIZE, "H5D__current_cache_size_test"); + VERIFY(nused, MISC28_SIZE, "H5D__current_cache_size_test"); +#endif + /* Close dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Re open dataset */ + did = H5Dopen2(fid, "dataset", H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen2"); +#if 0 + /* Verify that the chunk cache is empty */ + ret = H5D__current_cache_size_test(did, &nbytes_used, &nused); + CHECK(ret, FAIL, "H5D__current_cache_size_test"); + VERIFY(nbytes_used, (size_t)0, "H5D__current_cache_size_test"); + VERIFY(nused, 0, "H5D__current_cache_size_test"); +#endif + /* Select hyperslabe for reading */ + start[1] = 0; + ret = H5Sselect_hyperslab(sidf, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Read hypserslab */ + ret = H5Dread(did, H5T_NATIVE_CHAR, sidm, sidf, H5P_DEFAULT, buf); + CHECK(ret, FAIL, "H5Dread"); + + /* Verify the data read */ + for (i = 0; i < MISC28_SIZE; i++) + VERIFY(buf[i], i, "H5Dread"); +#if 0 + /* Verify that all 10 chunks read have been cached */ + ret = H5D__current_cache_size_test(did, &nbytes_used, &nused); + CHECK(ret, FAIL, "H5D__current_cache_size_test"); + VERIFY(nbytes_used, (size_t)MISC28_SIZE, "H5D__current_cache_size_test"); + VERIFY(nused, MISC28_SIZE, "H5D__current_cache_size_test"); +#endif + /* Select new hyperslab */ + start[1] = 1; + ret = H5Sselect_hyperslab(sidf, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Read hyperslab */ + ret = H5Dread(did, H5T_NATIVE_CHAR, sidm, sidf, H5P_DEFAULT, buf); + CHECK(ret, FAIL, "H5Dread"); + + /* Verify the data read */ + for (i = 0; i < MISC28_SIZE; i++) + VERIFY(buf[i], MISC28_SIZE - 1 - i, "H5Dread"); +#if 0 + /* Verify that the size of the cache remains at 10 */ + ret = H5D__current_cache_size_test(did, &nbytes_used, &nused); + CHECK(ret, FAIL, "H5D__current_cache_size_test"); + VERIFY(nbytes_used, (size_t)MISC28_SIZE, "H5D__current_cache_size_test"); + VERIFY(nused, MISC28_SIZE, "H5D__current_cache_size_test"); +#endif + /* Close dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close the dataspaces and file */ + ret = H5Sclose(sidf); + CHECK_I(ret, "H5Sclose"); + ret = H5Sclose(sidm); + CHECK_I(ret, "H5Sclose"); + ret = H5Fclose(fid); + CHECK_I(ret, "H5Fclose"); + + /* Close the property lists. */ + ret = H5Pclose(dcpl); + CHECK_I(ret, "H5Pclose"); + ret = H5Pclose(fapl); + CHECK_I(ret, "H5Pclose"); +} /* end test_misc28() */ + +/**************************************************************** +** +** test_misc29(): Ensure that speculative metadata reads don't +** get raw data into the metadata accumulator. +** +****************************************************************/ +#if 0 +static void +test_misc29(void) +{ + hbool_t driver_is_default_compatible; + hid_t fid; /* File ID */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Speculative metadata reads\n")); + + ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); + CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); + + if (!driver_is_default_compatible) { + HDprintf("-- SKIPPED --\n"); + return; + } + + /* Make a copy of the data file from svn. */ + ret = h5_make_local_copy(MISC29_ORIG_FILE, MISC29_COPY_FILE); + CHECK(ret, -1, "h5_make_local_copy"); + + /* Open the copied file */ + fid = H5Fopen(MISC29_COPY_FILE, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Delete the last dataset */ + ret = H5Ldelete(fid, MISC29_DSETNAME, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_misc29() */ +#endif + +#if 0 +static int +test_misc30_get_info_cb(hid_t loc_id, const char *name, const H5L_info2_t H5_ATTR_UNUSED *info, + void H5_ATTR_UNUSED *op_data) +{ + H5O_info2_t object_info; + + return H5Oget_info_by_name3(loc_id, name, &object_info, H5O_INFO_BASIC, H5P_DEFAULT); +} + +static int +test_misc30_get_info(hid_t loc_id) +{ + return H5Literate2(loc_id, H5_INDEX_NAME, H5_ITER_INC, NULL, test_misc30_get_info_cb, NULL); +} +#endif + +/**************************************************************** +** +** test_misc30(): Exercise local heap code that loads prefix +** separately from data block, causing the free +** block information to get lost. +** +****************************************************************/ +#if 0 +static void +test_misc30(void) +{ + hsize_t file_size[] = {0, 0}; /* Sizes of file created */ + unsigned get_info; /* Whether to perform the get info call */ + + /* Output message about test being performed */ + MESSAGE(5, ("Local heap dropping free block info\n")); + + for (get_info = FALSE; get_info <= TRUE; get_info++) { + hid_t fid; /* File ID */ + hid_t gid; /* Group ID */ + int i; /* Local index counter */ + herr_t ret; /* Generic return value */ + + fid = H5Fcreate(MISC30_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + gid = H5Gcreate2(fid, "/g0", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gcreate2"); + + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + for (i = 0; i < 20; i++) { + char gname[32]; + + fid = H5Fopen(MISC30_FILE, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + if (get_info) { + ret = test_misc30_get_info(fid); + CHECK(ret, FAIL, "test_misc30_get_info"); + } + + HDsnprintf(gname, sizeof(gname), "/g0/group%d", i); + gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid, FAIL, "H5Gcreate2"); + + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + } + + fid = H5Fopen(MISC30_FILE, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + ret = H5Fget_filesize(fid, &file_size[get_info]); + CHECK(fid, FAIL, "H5Fget_filesize"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + } + + VERIFY(file_size[0], file_size[1], "test_misc30"); +} /* end test_misc30() */ +#endif + +/**************************************************************** +** +** test_misc31(): Test reentering library through deprecated +* routines that register an id after calling +* H5close(). +** +****************************************************************/ +#if 0 +static void +test_misc31(void) +{ +#ifndef H5_NO_DEPRECATED_SYMBOLS + hid_t file_id; /* File id */ + hid_t space_id; /* Dataspace id */ + hid_t dset_id; /* Dataset id */ + hid_t attr_id; /* Attribute id */ + hid_t group_id; /* Group id */ + hid_t dtype_id; /* Datatype id */ + herr_t ret; /* Generic return value */ +#endif /* H5_NO_DEPRECATED_SYMBOLS */ + + /* Output message about test being performed */ + MESSAGE(5, ("Deprecated routines initialize after H5close()\n")); + +#ifndef H5_NO_DEPRECATED_SYMBOLS + file_id = H5Fcreate(MISC31_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file_id, FAIL, "H5Fcreate"); + + /* Test dataset package */ + space_id = H5Screate(H5S_SCALAR); + CHECK(space_id, FAIL, "H5Screate"); + dset_id = H5Dcreate1(file_id, MISC31_DSETNAME, H5T_NATIVE_INT, space_id, H5P_DEFAULT); + CHECK(dset_id, FAIL, "H5Dcreate1"); + ret = H5close(); + CHECK(ret, FAIL, "H5close"); + file_id = H5Fopen(MISC31_FILE, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(file_id, FAIL, "H5Fopen"); + dset_id = H5Dopen1(file_id, MISC31_DSETNAME); + CHECK(dset_id, FAIL, "H5Dopen1"); + + /* Test attribute package */ + space_id = H5Screate(H5S_SCALAR); + CHECK(space_id, FAIL, "H5Screate"); + attr_id = H5Acreate1(dset_id, MISC31_ATTRNAME1, H5T_NATIVE_INT, space_id, H5P_DEFAULT); + CHECK(attr_id, FAIL, "H5Acreate1"); + ret = H5close(); + CHECK(ret, FAIL, "H5close"); + file_id = H5Fopen(MISC31_FILE, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(file_id, FAIL, "H5Fopen"); + dset_id = H5Dopen1(file_id, MISC31_DSETNAME); + CHECK(dset_id, FAIL, "H5Dopen1"); + space_id = H5Screate(H5S_SCALAR); + CHECK(space_id, FAIL, "H5Screate"); + attr_id = H5Acreate1(dset_id, MISC31_ATTRNAME2, H5T_NATIVE_INT, space_id, H5P_DEFAULT); + CHECK(attr_id, FAIL, "H5Acreate1"); + + /* Test group package */ + group_id = H5Gcreate1(file_id, MISC31_GROUPNAME, 0); + CHECK(group_id, FAIL, "H5Gcreate1"); + ret = H5close(); + CHECK(ret, FAIL, "H5close"); + file_id = H5Fopen(MISC31_FILE, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(file_id, FAIL, "H5Fopen"); + group_id = H5Gopen1(file_id, MISC31_GROUPNAME); + CHECK(group_id, FAIL, "H5Gopen1"); + + /* Test property list package */ + ret = H5Pregister1(H5P_OBJECT_CREATE, MISC31_PROPNAME, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK(ret, FAIL, "H5Pregister1"); + ret = H5close(); + CHECK(ret, FAIL, "H5close"); + ret = H5Pregister1(H5P_OBJECT_CREATE, MISC31_PROPNAME, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + CHECK(ret, FAIL, "H5Pregister1"); + ret = H5close(); + CHECK(ret, FAIL, "H5close"); + + /* Test datatype package */ + file_id = H5Fopen(MISC31_FILE, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(file_id, FAIL, "H5Fopen"); + dtype_id = H5Tcopy(H5T_NATIVE_INT); + CHECK(dtype_id, FAIL, "H5Tcopy"); + ret = H5Tcommit1(file_id, MISC31_DTYPENAME, dtype_id); + CHECK(ret, FAIL, "H5Tcommit1"); + ret = H5close(); + CHECK(ret, FAIL, "H5close"); + file_id = H5Fopen(MISC31_FILE, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(file_id, FAIL, "H5Fopen"); + dtype_id = H5Topen1(file_id, MISC31_DTYPENAME); + CHECK(ret, FAIL, "H5Topen1"); + ret = H5Fclose(file_id); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Tclose(dtype_id); + CHECK(ret, FAIL, "H5Tclose"); + +#else /* H5_NO_DEPRECATED_SYMBOLS */ + /* Output message about test being skipped */ + MESSAGE(5, (" ...Skipped")); +#endif /* H5_NO_DEPRECATED_SYMBOLS */ +} /* end test_misc31() */ +#endif + +/**************************************************************** + * + * test_misc32(): Simple test of filter memory allocation + * functions. + * + ***************************************************************/ +static void +test_misc32(void) +{ + void *buffer; + void *resized; + size_t size; + + /* Output message about test being performed */ + MESSAGE(5, ("Edge case test of filter memory allocation functions\n")); + + /* Test that the filter memory allocation functions behave correctly + * at edge cases. + */ + + /* FREE */ + + /* Test freeing a NULL pointer. + * No real confirmation check here, but Valgrind will confirm no + * shenanigans. + */ + buffer = NULL; + H5free_memory(buffer); + + /* ALLOCATE */ + + /* Size zero returns NULL. + * Also checks that a size of zero and setting the buffer clear flag + * to TRUE can be used together. + * + * Note that we have asserts in the code, so only check when NDEBUG + * is defined. + */ +#ifdef NDEBUG + buffer = H5allocate_memory(0, FALSE); + CHECK_PTR_NULL(buffer, "H5allocate_memory"); /*BAD*/ + buffer = H5allocate_memory(0, TRUE); + CHECK_PTR_NULL(buffer, "H5allocate_memory"); /*BAD*/ +#endif /* NDEBUG */ + + /* RESIZE */ + + /* Size zero returns NULL. Valgrind will confirm buffer is freed. */ + size = 1024; + buffer = H5allocate_memory(size, TRUE); + resized = H5resize_memory(buffer, 0); + CHECK_PTR_NULL(resized, "H5resize_memory"); + + /* NULL input pointer returns new buffer */ + resized = H5resize_memory(NULL, 1024); + CHECK_PTR(resized, "H5resize_memory"); + H5free_memory(resized); + + /* NULL input pointer and size zero returns NULL */ +#ifdef NDEBUG + resized = H5resize_memory(NULL, 0); + CHECK_PTR_NULL(resized, "H5resize_memory"); /*BAD*/ +#endif /* NDEBUG */ + +} /* end test_misc32() */ + +/**************************************************************** +** +** test_misc33(): Test for H5FFV-10216 +** --verify that H5HL_offset_into() returns error if the +** input parameter "offset" exceeds heap data block size. +** --case (1), (2), (3) are scenarios that will traverse to the +** the 3 locations in the file having bad offset values to +** the heap. (See description in gen_bad_offset.c) +** +****************************************************************/ +#if 0 +static void +test_misc33(void) +{ + hid_t fid = -1; /* File ID */ + const char *testfile = H5_get_srcdir_filename(MISC33_FILE); /* Corrected test file name */ + H5O_info2_t oinfo; /* Structure for object metadata information */ + hbool_t driver_is_default_compatible; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing that bad offset into the heap returns error")); + + ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); + CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); + + if (!driver_is_default_compatible) { + HDprintf("-- SKIPPED --\n"); + return; + } + + /* Open the test file */ + fid = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Case (1) */ + H5E_BEGIN_TRY + { + ret = H5Oget_info_by_name3(fid, "/soft_two", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Oget_info_by_name3"); + + /* Case (2) */ + H5E_BEGIN_TRY + { + ret = H5Oget_info_by_name3(fid, "/dsetA", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Oget_info_by_name3"); + + /* Case (3) */ + H5E_BEGIN_TRY + { + ret = H5Oget_info_by_name3(fid, "/soft_one", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Oget_info_by_name3"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(fid, FAIL, "H5Fclose"); + +} /* end test_misc33() */ +#endif + +/**************************************************************** +** +** test_misc34(): Ensure zero-size memory allocations work +** +****************************************************************/ +#if 0 +static void +test_misc34(void) +{ + void *mem = NULL; /* allocated buffer */ + char *dup = NULL; /* 'duplicated' string */ + size_t sz = 0; /* buffer size */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing O and NULL behavior in H5MM API calls")); + + /* H5MM_xfree(): Ensure that passing NULL is allowed and returns NULL */ + mem = H5MM_xfree(mem); + CHECK_PTR_NULL(mem, "H5MM_xfree"); + + /* H5MM_realloc(): Check behavior: + * + * H5MM_realloc(NULL, size) <==> H5MM_malloc(size) + * H5MM_realloc(ptr, 0) <==> H5MM_xfree(ptr) + * H5MM_realloc(NULL, 0) <==> NULL + */ + mem = H5MM_xfree(mem); + + sz = 1024; + mem = H5MM_realloc(mem, sz); + CHECK_PTR(mem, "H5MM_realloc (case 1)"); + /* Don't free mem here! */ + + sz = 0; + mem = H5MM_realloc(mem, sz); + CHECK_PTR_NULL(mem, "H5MM_realloc (case 2)"); + mem = H5MM_xfree(mem); + + mem = H5MM_realloc(mem, sz); + CHECK_PTR_NULL(mem, "H5MM_realloc (case 3)"); + mem = H5MM_xfree(mem); + + /* H5MM_xstrdup(): Ensure NULL returns NULL */ + dup = H5MM_xstrdup((const char *)mem); + CHECK_PTR_NULL(dup, "H5MM_xstrdup"); + dup = (char *)H5MM_xfree((void *)dup); + +} /* end test_misc34() */ + +/**************************************************************** +** +** test_misc35(): Check operation of free-list routines +** +****************************************************************/ +static void +test_misc35(void) +{ + hid_t sid = H5I_INVALID_HID; /* Dataspace ID */ + hsize_t dims[] = {MISC35_SPACE_DIM1, MISC35_SPACE_DIM2, MISC35_SPACE_DIM3}; /* Dataspace dims */ + hsize_t coord[MISC35_NPOINTS][MISC35_SPACE_RANK] = /* Coordinates for point selection */ + {{0, 10, 5}, {1, 2, 7}, {2, 4, 9}, {0, 6, 11}, {1, 8, 13}, + {2, 12, 0}, {0, 14, 2}, {1, 0, 4}, {2, 1, 6}, {0, 3, 8}}; + size_t reg_size_start; /* Initial amount of regular memory allocated */ + size_t arr_size_start; /* Initial amount of array memory allocated */ + size_t blk_size_start; /* Initial amount of block memory allocated */ + size_t fac_size_start; /* Initial amount of factory memory allocated */ + size_t reg_size_final; /* Final amount of regular memory allocated */ + size_t arr_size_final; /* Final amount of array memory allocated */ + size_t blk_size_final; /* Final amount of block memory allocated */ + size_t fac_size_final; /* Final amount of factory memory allocated */ + herr_t ret; /* Return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Free-list API calls")); + + /* Create dataspace */ + /* (Allocates array free-list nodes) */ + sid = H5Screate_simple(MISC35_SPACE_RANK, dims, NULL); + CHECK(sid, H5I_INVALID_HID, "H5Screate_simple"); + + /* Select sequence of ten points */ + ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)MISC35_NPOINTS, (const hsize_t *)coord); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Retrieve initial free list values */ + ret = H5get_free_list_sizes(®_size_start, &arr_size_start, &blk_size_start, &fac_size_start); + CHECK(ret, FAIL, "H5get_free_list_sizes"); + +#if !defined H5_NO_FREE_LISTS && !defined H5_USING_MEMCHECKER + /* All the free list values should be >0 */ + CHECK(reg_size_start, 0, "H5get_free_list_sizes"); + CHECK(arr_size_start, 0, "H5get_free_list_sizes"); + CHECK(blk_size_start, 0, "H5get_free_list_sizes"); + CHECK(fac_size_start, 0, "H5get_free_list_sizes"); +#else + /* All the values should be == 0 */ + VERIFY(reg_size_start, 0, "H5get_free_list_sizes"); + VERIFY(arr_size_start, 0, "H5get_free_list_sizes"); + VERIFY(blk_size_start, 0, "H5get_free_list_sizes"); + VERIFY(fac_size_start, 0, "H5get_free_list_sizes"); +#endif + + /* Garbage collect the free lists */ + ret = H5garbage_collect(); + CHECK(ret, FAIL, "H5garbage_collect"); + + /* Retrieve free list values again */ + ret = H5get_free_list_sizes(®_size_final, &arr_size_final, &blk_size_final, &fac_size_final); + CHECK(ret, FAIL, "H5get_free_list_sizes"); + + /* All the free list values should be <= previous values */ + if (reg_size_final > reg_size_start) + ERROR("reg_size_final > reg_size_start"); + if (arr_size_final > arr_size_start) + ERROR("arr_size_final > arr_size_start"); + if (blk_size_final > blk_size_start) + ERROR("blk_size_final > blk_size_start"); + if (fac_size_final > fac_size_start) + ERROR("fac_size_final > fac_size_start"); + +} /* end test_misc35() */ +#endif + +/* Context to pass to 'atclose' callbacks */ +static int test_misc36_context; + +/* 'atclose' callbacks for test_misc36 */ +static void +test_misc36_cb1(void *_ctx) +{ + int *ctx = (int *)_ctx; /* Set up context pointer */ + hbool_t is_terminating; /* Flag indicating the library is terminating */ + herr_t ret; /* Return value */ + + /* Check whether the library thinks it's terminating */ + is_terminating = FALSE; + ret = H5is_library_terminating(&is_terminating); + CHECK(ret, FAIL, "H5is_library_terminating"); + VERIFY(is_terminating, TRUE, "H5is_library_terminating"); + + /* Verify correct ordering for 'atclose' callbacks */ + if (0 != *ctx) + HDabort(); + + /* Update context value */ + *ctx = 1; +} + +static void +test_misc36_cb2(void *_ctx) +{ + int *ctx = (int *)_ctx; /* Set up context pointer */ + hbool_t is_terminating; /* Flag indicating the library is terminating */ + herr_t ret; /* Return value */ + + /* Check whether the library thinks it's terminating */ + is_terminating = FALSE; + ret = H5is_library_terminating(&is_terminating); + CHECK(ret, FAIL, "H5is_library_terminating"); + VERIFY(is_terminating, TRUE, "H5is_library_terminating"); + + /* Verify correct ordering for 'atclose' callbacks */ + if (1 != *ctx) + HDabort(); + + /* Update context value */ + *ctx = 2; +} + +/**************************************************************** +** +** test_misc36(): Exercise H5atclose and H5is_library_terminating +** +****************************************************************/ +static void +test_misc36(void) +{ + hbool_t is_terminating; /* Flag indicating the library is terminating */ + herr_t ret; /* Return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("H5atclose and H5is_library_terminating API calls")); + + /* Check whether the library thinks it's terminating */ + is_terminating = TRUE; + ret = H5is_library_terminating(&is_terminating); + CHECK(ret, FAIL, "H5is_library_terminating"); + VERIFY(is_terminating, FALSE, "H5is_library_terminating"); + + /* Shut the library down */ + test_misc36_context = 0; + H5close(); + + /* Check whether the library thinks it's terminating */ + is_terminating = TRUE; + ret = H5is_library_terminating(&is_terminating); + CHECK(ret, FAIL, "H5is_library_terminating"); + VERIFY(is_terminating, FALSE, "H5is_library_terminating"); + + /* Check the close context was not changed */ + VERIFY(test_misc36_context, 0, "H5atclose"); + + /* Restart the library */ + H5open(); + + /* Check whether the library thinks it's terminating */ + is_terminating = TRUE; + ret = H5is_library_terminating(&is_terminating); + CHECK(ret, FAIL, "H5is_library_terminating"); + VERIFY(is_terminating, FALSE, "H5is_library_terminating"); + + /* Register the 'atclose' callbacks */ + /* (Note that these will be called in reverse order, which is checked) */ + ret = H5atclose(&test_misc36_cb2, &test_misc36_context); + CHECK(ret, FAIL, "H5atclose"); + ret = H5atclose(&test_misc36_cb1, &test_misc36_context); + CHECK(ret, FAIL, "H5atclose"); + + /* Shut the library down */ + test_misc36_context = 0; + H5close(); + + /* Check the close context was changed correctly */ + VERIFY(test_misc36_context, 2, "H5atclose"); + + /* Restart the library */ + H5open(); + + /* Close the library again */ + test_misc36_context = 0; + H5close(); + + /* Check the close context was not changed */ + VERIFY(test_misc36_context, 0, "H5atclose"); +} /* end test_misc36() */ + +#if 0 +/**************************************************************** +** +** test_misc37(): +** Test for seg fault issue when closing the provided test file +** which has an illegal file size in its cache image. +** See HDFFV-11052/CVE-2020-10812 for details. +** +****************************************************************/ +static void +test_misc37(void) +{ + const char *testfile = H5_get_srcdir_filename(CVE_2020_10812_FILENAME); + hbool_t driver_is_default_compatible; + hid_t fid; + herr_t ret; + + /* Output message about test being performed */ + MESSAGE(5, ("Fix for HDFFV-11052/CVE-2020-10812")); + + ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); + CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); + + if (!driver_is_default_compatible) { + HDprintf("-- SKIPPED --\n"); + return; + } + + fid = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* This should fail due to the illegal file size. + It should fail gracefully and not seg fault */ + H5E_BEGIN_TRY + { + ret = H5Fclose(fid); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Fclose"); + +} /* end test_misc37() */ +#endif + +/**************************************************************** +** +** test_misc(): Main misc. test routine. +** +****************************************************************/ +void +test_misc(void) +{ + hbool_t default_driver = h5_using_default_driver(NULL); + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Miscellaneous Routines\n")); + + test_misc1(); /* Test unlinking a dataset & immediately re-using name */ + test_misc2(); /* Test storing a VL-derived datatype in two different files */ + test_misc3(); /* Test reading from chunked dataset with non-zero fill value */ + test_misc4(); /* Test retrieving the fileno for various objects with H5Oget_info() */ + test_misc5(); /* Test several level deep nested compound & VL datatypes */ + test_misc6(); /* Test object header continuation code */ +#if 0 + test_misc7(); /* Test for sensible datatypes stored on disk */ + test_misc8(); /* Test storage sizes of various types of dataset storage */ +#endif + test_misc9(); /* Test for opening (not creating) core files */ +#if 0 + test_misc10(); /* Test for using dataset creation property lists from old files */ +#endif + + if (default_driver) { + test_misc11(); /* Test for all properties of a file creation property list being stored */ + } + + test_misc12(); /* Test VL-strings in chunked datasets operating correctly */ +#if 0 + if (default_driver) { + test_misc13(); /* Test that a user block can be insert in front of file contents */ + } +#endif + test_misc14(); /* Test that deleted dataset's data is removed from sieve buffer correctly */ + test_misc15(); /* Test that checking a file's access property list more than once works */ + test_misc16(); /* Test array of fixed-length string */ + test_misc17(); /* Test array of ASCII character */ + test_misc18(); /* Test new object header information in H5O_info2_t struct */ + test_misc19(); /* Test incrementing & decrementing ref count on IDs */ +#if 0 + test_misc20(); /* Test problems with truncated dimensions in version 2 of storage layout message */ +#endif +#if defined(H5_HAVE_FILTER_SZIP) && !defined(H5_API_TEST_NO_FILTERS) + test_misc21(); /* Test that "late" allocation time is treated the same as "incremental", for chunked + datasets w/a filters */ + test_misc22(); /* check szip bits per pixel */ +#endif /* H5_HAVE_FILTER_SZIP */ + test_misc23(); /* Test intermediate group creation */ + test_misc24(); /* Test inappropriate API opens of objects */ + test_misc25a(); /* Exercise null object header message merge bug */ +#if 0 + test_misc25b(); /* Exercise null object header message merge bug on existing file */ +#endif + test_misc25c(); /* Exercise another null object header message merge bug */ + test_misc26(); /* Test closing property lists with long filter pipelines */ +#if 0 + test_misc27(); /* Test opening file with object that has bad # of object header messages */ +#endif + test_misc28(); /* Test that chunks are cached appropriately */ +#if 0 + test_misc29(); /* Test that speculative metadata reads are handled correctly */ + test_misc30(); /* Exercise local heap loading bug where free lists were getting dropped */ + + if (default_driver) { + test_misc31(); /* Test Reentering library through deprecated routines after H5close() */ + } +#endif + test_misc32(); /* Test filter memory allocation functions */ +#if 0 + test_misc33(); /* Test to verify that H5HL_offset_into() returns error if offset exceeds heap block */ + test_misc34(); /* Test behavior of 0 and NULL in H5MM API calls */ + test_misc35(); /* Test behavior of free-list & allocation statistics API calls */ +#endif + test_misc36(); /* Exercise H5atclose and H5is_library_terminating */ +#if 0 + test_misc37(); /* Test for seg fault failure at file close */ +#endif +} /* test_misc() */ + +/*------------------------------------------------------------------------- + * Function: cleanup_misc + * + * Purpose: Cleanup temporary test files + * + * Return: none + * + * Programmer: Albert Cheng + * July 2, 1998 + *------------------------------------------------------------------------- + */ +void +cleanup_misc(void) +{ + H5Fdelete(MISC1_FILE, H5P_DEFAULT); + H5Fdelete(MISC2_FILE_1, H5P_DEFAULT); + H5Fdelete(MISC2_FILE_2, H5P_DEFAULT); + H5Fdelete(MISC3_FILE, H5P_DEFAULT); + H5Fdelete(MISC4_FILE_1, H5P_DEFAULT); + H5Fdelete(MISC4_FILE_2, H5P_DEFAULT); + H5Fdelete(MISC5_FILE, H5P_DEFAULT); + H5Fdelete(MISC6_FILE, H5P_DEFAULT); + H5Fdelete(MISC7_FILE, H5P_DEFAULT); + H5Fdelete(MISC8_FILE, H5P_DEFAULT); + H5Fdelete(MISC9_FILE, H5P_DEFAULT); + H5Fdelete(MISC10_FILE_NEW, H5P_DEFAULT); + H5Fdelete(MISC11_FILE, H5P_DEFAULT); + H5Fdelete(MISC12_FILE, H5P_DEFAULT); + H5Fdelete(MISC13_FILE_1, H5P_DEFAULT); + H5Fdelete(MISC13_FILE_2, H5P_DEFAULT); + H5Fdelete(MISC14_FILE, H5P_DEFAULT); + H5Fdelete(MISC15_FILE, H5P_DEFAULT); + H5Fdelete(MISC16_FILE, H5P_DEFAULT); + H5Fdelete(MISC17_FILE, H5P_DEFAULT); + H5Fdelete(MISC18_FILE, H5P_DEFAULT); + H5Fdelete(MISC19_FILE, H5P_DEFAULT); + H5Fdelete(MISC20_FILE, H5P_DEFAULT); +#if defined(H5_HAVE_FILTER_SZIP) && !defined(H5_API_TEST_NO_FILTERS) + H5Fdelete(MISC21_FILE, H5P_DEFAULT); + H5Fdelete(MISC22_FILE, H5P_DEFAULT); +#endif /* H5_HAVE_FILTER_SZIP */ + H5Fdelete(MISC23_FILE, H5P_DEFAULT); + H5Fdelete(MISC24_FILE, H5P_DEFAULT); + H5Fdelete(MISC25A_FILE, H5P_DEFAULT); + H5Fdelete(MISC25C_FILE, H5P_DEFAULT); + H5Fdelete(MISC26_FILE, H5P_DEFAULT); + H5Fdelete(MISC28_FILE, H5P_DEFAULT); + H5Fdelete(MISC29_COPY_FILE, H5P_DEFAULT); + H5Fdelete(MISC30_FILE, H5P_DEFAULT); +#ifndef H5_NO_DEPRECATED_SYMBOLS + H5Fdelete(MISC31_FILE, H5P_DEFAULT); +#endif /* H5_NO_DEPRECATED_SYMBOLS */ +} /* end cleanup_misc() */ diff --git a/test/API/trefer.c b/test/API/trefer.c new file mode 100644 index 00000000000..af0b11b9dcd --- /dev/null +++ b/test/API/trefer.c @@ -0,0 +1,3641 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*********************************************************** + * + * Test program: trefer + * + * Test the Reference functionality + * + *************************************************************/ + +#include "testhdf5.h" + +#define FILE_REF_PARAM "trefer_param.h5" +#define FILE_REF_OBJ "trefer_obj.h5" +#define FILE_REF_VL_OBJ "trefer_vl_obj.h5" +#define FILE_REF_CMPND_OBJ "trefer_cmpnd_obj.h5" +#define FILE_REF_REG "trefer_reg.h5" +#define FILE_REF_REG_1D "trefer_reg_1d.h5" +#define FILE_REF_OBJ_DEL "trefer_obj_del.h5" +#define FILE_REF_GRP "trefer_grp.h5" +#define FILE_REF_ATTR "trefer_attr.h5" +#define FILE_REF_EXT1 "trefer_ext1.h5" +#define FILE_REF_EXT2 "trefer_ext2.h5" +#define FILE_REF_COMPAT "trefer_compat.h5" + +/* 1-D dataset with fixed dimensions */ +#define SPACE1_RANK 1 +#define SPACE1_DIM1 4 + +/* 2-D dataset with fixed dimensions */ +#define SPACE2_RANK 2 +#define SPACE2_DIM1 10 +#define SPACE2_DIM2 10 + +/* Larger 1-D dataset with fixed dimensions */ +#define SPACE3_RANK 1 +#define SPACE3_DIM1 100 + +/* Element selection information */ +#define POINT1_NPOINTS 10 + +/* Compound datatype */ +typedef struct s1_t { + unsigned int a; + unsigned int b; + float c; +} s1_t; + +/* Compound datatype with reference */ +typedef struct s2_t { + H5R_ref_t ref0; /* reference */ + H5R_ref_t ref1; /* reference */ + H5R_ref_t ref2; /* reference */ + H5R_ref_t ref3; /* reference */ + unsigned int dim_idx; /* dimension index of the dataset */ +} s2_t; + +#define GROUPNAME "/group" +#define GROUPNAME2 "group2" +#define GROUPNAME3 "group3" +#define DSETNAME "/dset" +#define DSETNAME2 "dset2" +#define NAME_SIZE 16 + +#define MAX_ITER_CREATE 1000 +#define MAX_ITER_WRITE MAX_ITER_CREATE +#define MAX_ITER_READ MAX_ITER_CREATE + +/**************************************************************** +** +** test_reference_params(): Test basic H5R (reference) parameters +** for correct processing +** +****************************************************************/ +static void +test_reference_params(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset, /* Dataset ID */ + dset2; /* Dereferenced dataset ID */ + hid_t group; /* Group ID */ + hid_t attr; /* Attribute ID */ + hid_t sid1; /* Dataspace ID */ + hid_t tid1; /* Datatype ID */ + hid_t aapl_id; /* Attribute access property list */ + hid_t dapl_id; /* Dataset access property list */ + hsize_t dims1[] = {SPACE1_DIM1}; + H5R_ref_t *wbuf, /* buffer to write to disk */ + *rbuf, /* buffer read from disk */ + *tbuf; /* temp. buffer read from disk */ + unsigned *obuf; + H5R_type_t type; /* Reference type */ + unsigned int i; /* Counters */ +#if 0 + const char *write_comment = "Foo!"; /* Comments for group */ +#endif + hid_t ret_id; /* Generic hid_t return value */ + ssize_t name_size; /* Size of reference name */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Reference Parameters\n")); + + /* Allocate write & read buffers */ + wbuf = (H5R_ref_t *)HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1); + rbuf = (H5R_ref_t *)HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1); + tbuf = (H5R_ref_t *)HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1); + obuf = HDcalloc(sizeof(unsigned), SPACE1_DIM1); + + for (i = 0; i < SPACE1_DIM1; i++) + obuf[i] = i * 3; + + /* Create file */ + fid1 = H5Fcreate(FILE_REF_PARAM, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, H5I_INVALID_HID, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple"); + + /* Create attribute access property list */ + aapl_id = H5Pcreate(H5P_ATTRIBUTE_ACCESS); + CHECK(aapl_id, H5I_INVALID_HID, "H5Pcreate"); + + /* Create dataset access property list */ + dapl_id = H5Pcreate(H5P_DATASET_ACCESS); + CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate"); + + /* Create a group */ + group = H5Gcreate2(fid1, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(group, H5I_INVALID_HID, "H5Gcreate2"); +#if 0 + /* Set group's comment */ + ret = H5Oset_comment(group, write_comment); + CHECK(ret, FAIL, "H5Oset_comment"); +#endif + /* Create a dataset (inside Group1) */ + dataset = H5Dcreate2(group, "Dataset1", H5T_NATIVE_UINT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, obuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Create another dataset (inside Group1) */ + dataset = H5Dcreate2(group, "Dataset2", H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + /* Create an attribute for the dataset */ + attr = H5Acreate2(dataset, "Attr", H5T_NATIVE_UINT, sid1, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, H5I_INVALID_HID, "H5Acreate2"); + + /* Write attribute to disk */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, obuf); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Create a datatype to refer to */ + tid1 = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); + CHECK(tid1, H5I_INVALID_HID, "H5Tcreate"); + + /* Insert fields */ + ret = H5Tinsert(tid1, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(tid1, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(tid1, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Save datatype for later */ + ret = H5Tcommit2(group, "Datatype1", tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close group */ + ret = H5Gclose(group); + CHECK(ret, FAIL, "H5Gclose"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset3", H5T_STD_REF, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, H5I_INVALID_HID, "H5Dcreate2"); + + /* Test parameters to H5Rcreate_object */ + H5E_BEGIN_TRY + { + ret = H5Rcreate_object(fid1, "/Group1/Dataset1", H5P_DEFAULT, NULL); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Rcreate_object ref"); + H5E_BEGIN_TRY + { + ret = H5Rcreate_object(H5I_INVALID_HID, "/Group1/Dataset1", H5P_DEFAULT, &wbuf[0]); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Rcreate_object loc_id"); + H5E_BEGIN_TRY + { + ret = H5Rcreate_object(fid1, NULL, H5P_DEFAULT, &wbuf[0]); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Rcreate_object name"); + H5E_BEGIN_TRY + { + ret = H5Rcreate_object(fid1, "", H5P_DEFAULT, &wbuf[0]); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Rcreate_object null name"); + + /* Test parameters to H5Rcreate_region */ + H5E_BEGIN_TRY + { + ret = H5Rcreate_region(fid1, "/Group1/Dataset1", sid1, H5P_DEFAULT, NULL); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Rcreate_region ref"); + H5E_BEGIN_TRY + { + ret = H5Rcreate_region(H5I_INVALID_HID, "/Group1/Dataset1", sid1, H5P_DEFAULT, &wbuf[0]); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Rcreate_region loc_id"); + H5E_BEGIN_TRY + { + ret = H5Rcreate_region(fid1, NULL, sid1, H5P_DEFAULT, &wbuf[0]); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Rcreate_region name"); + H5E_BEGIN_TRY + { + ret = H5Rcreate_region(fid1, "/Group1/Dataset1", H5I_INVALID_HID, H5P_DEFAULT, &wbuf[0]); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Rcreate_region dataspace"); + + /* Test parameters to H5Rcreate_attr */ + H5E_BEGIN_TRY + { + ret = H5Rcreate_attr(fid1, "/Group1/Dataset2", "Attr", H5P_DEFAULT, NULL); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Rcreate_attr ref"); + H5E_BEGIN_TRY + { + ret = H5Rcreate_attr(H5I_INVALID_HID, "/Group1/Dataset2", "Attr", H5P_DEFAULT, &wbuf[0]); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Rcreate_attr loc_id"); + H5E_BEGIN_TRY + { + ret = H5Rcreate_attr(fid1, NULL, "Attr", H5P_DEFAULT, &wbuf[0]); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Rcreate_attr name"); + H5E_BEGIN_TRY + { + ret = H5Rcreate_attr(fid1, "/Group1/Dataset2", NULL, H5P_DEFAULT, &wbuf[0]); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Rcreate_attr attr_name"); + + /* Test parameters to H5Rdestroy */ + H5E_BEGIN_TRY + { + ret = H5Rdestroy(NULL); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Rdestroy"); + + /* Test parameters to H5Rget_type */ + H5E_BEGIN_TRY + { + type = H5Rget_type(NULL); + } + H5E_END_TRY; + VERIFY(type, H5R_BADTYPE, "H5Rget_type ref"); + + /* Test parameters to H5Requal */ + H5E_BEGIN_TRY + { + ret = H5Requal(NULL, &rbuf[0]); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Requal ref1"); + H5E_BEGIN_TRY + { + ret = H5Requal(&rbuf[0], NULL); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Requal ref2"); + + /* Test parameters to H5Rcopy */ + H5E_BEGIN_TRY + { + ret = H5Rcopy(NULL, &wbuf[0]); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Rcopy src_ref"); + H5E_BEGIN_TRY + { + ret = H5Rcopy(&rbuf[0], NULL); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Rcopy dest_ref"); + + /* Test parameters to H5Ropen_object */ + H5E_BEGIN_TRY + { + dset2 = H5Ropen_object(&rbuf[0], H5I_INVALID_HID, H5I_INVALID_HID); + } + H5E_END_TRY; + VERIFY(dset2, H5I_INVALID_HID, "H5Ropen_object oapl_id"); + H5E_BEGIN_TRY + { + dset2 = H5Ropen_object(NULL, H5P_DEFAULT, dapl_id); + } + H5E_END_TRY; + VERIFY(dset2, H5I_INVALID_HID, "H5Ropen_object ref"); + + /* Test parameters to H5Ropen_region */ + H5E_BEGIN_TRY + { + ret_id = H5Ropen_region(NULL, H5I_INVALID_HID, H5I_INVALID_HID); + } + H5E_END_TRY; + VERIFY(ret_id, H5I_INVALID_HID, "H5Ropen_region ref"); + + /* Test parameters to H5Ropen_attr */ + H5E_BEGIN_TRY + { + ret_id = H5Ropen_attr(NULL, H5P_DEFAULT, aapl_id); + } + H5E_END_TRY; + VERIFY(ret_id, H5I_INVALID_HID, "H5Ropen_attr ref"); + + /* Test parameters to H5Rget_obj_type3 */ + H5E_BEGIN_TRY + { + ret = H5Rget_obj_type3(NULL, H5P_DEFAULT, NULL); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Rget_obj_type3 ref"); + + /* Test parameters to H5Rget_file_name */ + H5E_BEGIN_TRY + { + name_size = H5Rget_file_name(NULL, NULL, 0); + } + H5E_END_TRY; + VERIFY(name_size, (-1), "H5Rget_file_name ref"); + + /* Test parameters to H5Rget_obj_name */ + H5E_BEGIN_TRY + { + name_size = H5Rget_obj_name(NULL, H5P_DEFAULT, NULL, 0); + } + H5E_END_TRY; + VERIFY(name_size, (-1), "H5Rget_obj_name ref"); + + /* Test parameters to H5Rget_attr_name */ + H5E_BEGIN_TRY + { + name_size = H5Rget_attr_name(NULL, NULL, 0); + } + H5E_END_TRY; + VERIFY(name_size, (-1), "H5Rget_attr_name ref"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close dataset access property list */ + ret = H5Pclose(dapl_id); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close attribute access property list */ + ret = H5Pclose(aapl_id); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); + HDfree(tbuf); + HDfree(obuf); +} /* test_reference_params() */ + +/**************************************************************** +** +** test_reference_obj(): Test basic H5R (reference) object reference code. +** Tests references to various kinds of objects +** +****************************************************************/ +static void +test_reference_obj(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset, /* Dataset ID */ + dset2; /* Dereferenced dataset ID */ + hid_t group; /* Group ID */ + hid_t sid1; /* Dataspace ID */ + hid_t tid1; /* Datatype ID */ + hsize_t dims1[] = {SPACE1_DIM1}; + hid_t dapl_id; /* Dataset access property list */ + H5R_ref_t *wbuf, /* buffer to write to disk */ + *rbuf; /* buffer read from disk */ + unsigned *ibuf, *obuf; + unsigned i, j; /* Counters */ + H5O_type_t obj_type; /* Object type */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Object Reference Functions\n")); + + /* Allocate write & read buffers */ + wbuf = HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1); + rbuf = HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1); + ibuf = HDcalloc(sizeof(unsigned), SPACE1_DIM1); + obuf = HDcalloc(sizeof(unsigned), SPACE1_DIM1); + + for (i = 0; i < SPACE1_DIM1; i++) + obuf[i] = i * 3; + + /* Create file */ + fid1 = H5Fcreate(FILE_REF_OBJ, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, H5I_INVALID_HID, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple"); + + /* Create dataset access property list */ + dapl_id = H5Pcreate(H5P_DATASET_ACCESS); + CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate"); + + /* Create a group */ + group = H5Gcreate2(fid1, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(group, H5I_INVALID_HID, "H5Gcreate2"); + + /* Create a dataset (inside Group1) */ + dataset = H5Dcreate2(group, "Dataset1", H5T_NATIVE_UINT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, obuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Create another dataset (inside Group1) */ + dataset = H5Dcreate2(group, "Dataset2", H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Create a datatype to refer to */ + tid1 = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); + CHECK(tid1, H5I_INVALID_HID, "H5Tcreate"); + + /* Insert fields */ + ret = H5Tinsert(tid1, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(tid1, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(tid1, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Save datatype for later */ + ret = H5Tcommit2(group, "Datatype1", tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close group */ + ret = H5Gclose(group); + CHECK(ret, FAIL, "H5Gclose"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset3", H5T_STD_REF, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + /* Create reference to dataset */ + ret = H5Rcreate_object(fid1, "/Group1/Dataset1", H5P_DEFAULT, &wbuf[0]); + CHECK(ret, FAIL, "H5Rcreate_object"); + ret = H5Rget_obj_type3(&wbuf[0], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); + + /* Create reference to dataset */ + ret = H5Rcreate_object(fid1, "/Group1/Dataset2", H5P_DEFAULT, &wbuf[1]); + CHECK(ret, FAIL, "H5Rcreate_object"); + ret = H5Rget_obj_type3(&wbuf[1], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); + + /* Create reference to group */ + ret = H5Rcreate_object(fid1, "/Group1", H5P_DEFAULT, &wbuf[2]); + CHECK(ret, FAIL, "H5Rcreate_object"); + ret = H5Rget_obj_type3(&wbuf[2], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_GROUP, "H5Rget_obj_type3"); + + /* Create reference to named datatype */ + ret = H5Rcreate_object(fid1, "/Group1/Datatype1", H5P_DEFAULT, &wbuf[3]); + CHECK(ret, FAIL, "H5Rcreate_object"); + ret = H5Rget_obj_type3(&wbuf[3], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_NAMED_DATATYPE, "H5Rget_obj_type3"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open the file */ + fid1 = H5Fopen(FILE_REF_OBJ, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid1, H5I_INVALID_HID, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid1, "/Dataset3", H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dopen2"); + + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Open dataset object */ + dset2 = H5Ropen_object(&rbuf[0], H5P_DEFAULT, dapl_id); + CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object"); + + /* Check information in referenced dataset */ + sid1 = H5Dget_space(dset2); + CHECK(sid1, H5I_INVALID_HID, "H5Dget_space"); + + ret = (int)H5Sget_simple_extent_npoints(sid1); + VERIFY(ret, SPACE1_DIM1, "H5Sget_simple_extent_npoints"); + + /* Read from disk */ + ret = H5Dread(dset2, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, ibuf); + CHECK(ret, FAIL, "H5Dread"); + + for (i = 0; i < SPACE1_DIM1; i++) + VERIFY(ibuf[i], i * 3, "Data"); + + /* Close dereferenced Dataset */ + ret = H5Dclose(dset2); + CHECK(ret, FAIL, "H5Dclose"); + + /* Open group object. GAPL isn't supported yet. But it's harmless to pass in */ + group = H5Ropen_object(&rbuf[2], H5P_DEFAULT, H5P_DEFAULT); + CHECK(group, H5I_INVALID_HID, "H5Ropen_object"); + + /* Close group */ + ret = H5Gclose(group); + CHECK(ret, FAIL, "H5Gclose"); + + /* Open datatype object. TAPL isn't supported yet. But it's harmless to pass in */ + tid1 = H5Ropen_object(&rbuf[3], H5P_DEFAULT, H5P_DEFAULT); + CHECK(tid1, H5I_INVALID_HID, "H5Ropen_object"); + + /* Verify correct datatype */ + { + H5T_class_t tclass; + + tclass = H5Tget_class(tid1); + VERIFY(tclass, H5T_COMPOUND, "H5Tget_class"); + + ret = H5Tget_nmembers(tid1); + VERIFY(ret, 3, "H5Tget_nmembers"); + } + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close dataset access property list */ + ret = H5Pclose(dapl_id); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Destroy references */ + for (j = 0; j < SPACE1_DIM1; j++) { + ret = H5Rdestroy(&wbuf[j]); + CHECK(ret, FAIL, "H5Rdestroy"); + ret = H5Rdestroy(&rbuf[j]); + CHECK(ret, FAIL, "H5Rdestroy"); + } + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); + HDfree(ibuf); + HDfree(obuf); +} /* test_reference_obj() */ + +/**************************************************************** +** +** test_reference_vlen_obj(): Test basic H5R (reference) object reference +** within a vlen type. +** Tests references to various kinds of objects +** +****************************************************************/ +static void +test_reference_vlen_obj(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset, /* Dataset ID */ + dset2; /* Dereferenced dataset ID */ + hid_t group; /* Group ID */ + hid_t sid1; /* Dataspace ID */ + hid_t tid1; /* Datatype ID */ + hsize_t dims1[] = {SPACE1_DIM1}; + hsize_t vl_dims[] = {1}; + hid_t dapl_id; /* Dataset access property list */ + H5R_ref_t *wbuf, /* buffer to write to disk */ + *rbuf = NULL; /* buffer read from disk */ + unsigned *ibuf, *obuf; + unsigned i, j; /* Counters */ + H5O_type_t obj_type; /* Object type */ + herr_t ret; /* Generic return value */ + hvl_t vl_wbuf = {0, NULL}, vl_rbuf = {0, NULL}; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Object Reference Functions within VLEN type\n")); + + /* Allocate write & read buffers */ + wbuf = HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1); + ibuf = HDcalloc(sizeof(unsigned), SPACE1_DIM1); + obuf = HDcalloc(sizeof(unsigned), SPACE1_DIM1); + + for (i = 0; i < SPACE1_DIM1; i++) + obuf[i] = i * 3; + + /* Create file */ + fid1 = H5Fcreate(FILE_REF_VL_OBJ, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, H5I_INVALID_HID, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple"); + + /* Create dataset access property list */ + dapl_id = H5Pcreate(H5P_DATASET_ACCESS); + CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate"); + + /* Create a group */ + group = H5Gcreate2(fid1, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(group, H5I_INVALID_HID, "H5Gcreate2"); + + /* Create a dataset (inside Group1) */ + dataset = H5Dcreate2(group, "Dataset1", H5T_NATIVE_UINT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, obuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Create another dataset (inside Group1) */ + dataset = H5Dcreate2(group, "Dataset2", H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create a datatype to refer to */ + tid1 = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); + CHECK(tid1, H5I_INVALID_HID, "H5Tcreate"); + + /* Insert fields */ + ret = H5Tinsert(tid1, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(tid1, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(tid1, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Save datatype for later */ + ret = H5Tcommit2(group, "Datatype1", tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close group */ + ret = H5Gclose(group); + CHECK(ret, FAIL, "H5Gclose"); + + /* Create vlen type */ + tid1 = H5Tvlen_create(H5T_STD_REF); + CHECK(tid1, H5I_INVALID_HID, "H5Tvlen_create"); + + /* Create dataspace for datasets */ + sid1 = H5Screate_simple(SPACE1_RANK, vl_dims, NULL); + CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset3", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + /* Create reference to dataset */ + ret = H5Rcreate_object(fid1, "/Group1/Dataset1", H5P_DEFAULT, &wbuf[0]); + CHECK(ret, FAIL, "H5Rcreate_object"); + ret = H5Rget_obj_type3(&wbuf[0], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); + + /* Create reference to dataset */ + ret = H5Rcreate_object(fid1, "/Group1/Dataset2", H5P_DEFAULT, &wbuf[1]); + CHECK(ret, FAIL, "H5Rcreate_object"); + ret = H5Rget_obj_type3(&wbuf[1], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); + + /* Create reference to group */ + ret = H5Rcreate_object(fid1, "/Group1", H5P_DEFAULT, &wbuf[2]); + CHECK(ret, FAIL, "H5Rcreate_object"); + ret = H5Rget_obj_type3(&wbuf[2], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_GROUP, "H5Rget_obj_type3"); + + /* Create reference to named datatype */ + ret = H5Rcreate_object(fid1, "/Group1/Datatype1", H5P_DEFAULT, &wbuf[3]); + CHECK(ret, FAIL, "H5Rcreate_object"); + ret = H5Rget_obj_type3(&wbuf[3], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_NAMED_DATATYPE, "H5Rget_obj_type3"); + + /* Store references into vlen */ + vl_wbuf.len = SPACE1_DIM1; + vl_wbuf.p = wbuf; + + /* Write selection to disk */ + ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, &vl_wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open the file */ + fid1 = H5Fopen(FILE_REF_VL_OBJ, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid1, H5I_INVALID_HID, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid1, "/Dataset3", H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dopen2"); + + tid1 = H5Dget_type(dataset); + CHECK(tid1, H5I_INVALID_HID, "H5Dget_type"); + + /* Read selection from disk */ + ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, &vl_rbuf); + CHECK(ret, FAIL, "H5Dread"); + + VERIFY(vl_rbuf.len, SPACE1_DIM1, "H5Dread"); + rbuf = vl_rbuf.p; + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Open dataset object */ + dset2 = H5Ropen_object(&rbuf[0], H5P_DEFAULT, dapl_id); + CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object"); + + /* Check information in referenced dataset */ + sid1 = H5Dget_space(dset2); + CHECK(sid1, H5I_INVALID_HID, "H5Dget_space"); + + ret = (int)H5Sget_simple_extent_npoints(sid1); + VERIFY(ret, SPACE1_DIM1, "H5Sget_simple_extent_npoints"); + + /* Read from disk */ + ret = H5Dread(dset2, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, ibuf); + CHECK(ret, FAIL, "H5Dread"); + + for (i = 0; i < SPACE1_DIM1; i++) + VERIFY(ibuf[i], i * 3, "Data"); + + /* Close dereferenced Dataset */ + ret = H5Dclose(dset2); + CHECK(ret, FAIL, "H5Dclose"); + + /* Open group object. GAPL isn't supported yet. But it's harmless to pass in */ + group = H5Ropen_object(&rbuf[2], H5P_DEFAULT, H5P_DEFAULT); + CHECK(group, H5I_INVALID_HID, "H5Ropen_object"); + + /* Close group */ + ret = H5Gclose(group); + CHECK(ret, FAIL, "H5Gclose"); + + /* Open datatype object. TAPL isn't supported yet. But it's harmless to pass in */ + tid1 = H5Ropen_object(&rbuf[3], H5P_DEFAULT, H5P_DEFAULT); + CHECK(tid1, H5I_INVALID_HID, "H5Ropen_object"); + + /* Verify correct datatype */ + { + H5T_class_t tclass; + + tclass = H5Tget_class(tid1); + VERIFY(tclass, H5T_COMPOUND, "H5Tget_class"); + + ret = H5Tget_nmembers(tid1); + VERIFY(ret, 3, "H5Tget_nmembers"); + } + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close dataset access property list */ + ret = H5Pclose(dapl_id); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Destroy references */ + for (j = 0; j < SPACE1_DIM1; j++) { + ret = H5Rdestroy(&wbuf[j]); + CHECK(ret, FAIL, "H5Rdestroy"); + ret = H5Rdestroy(&rbuf[j]); + CHECK(ret, FAIL, "H5Rdestroy"); + } + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); + HDfree(ibuf); + HDfree(obuf); +} /* test_reference_vlen_obj() */ + +/**************************************************************** +** +** test_reference_cmpnd_obj(): Test basic H5R (reference) object reference +** within a compound type. +** Tests references to various kinds of objects +** +****************************************************************/ +static void +test_reference_cmpnd_obj(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset, /* Dataset ID */ + dset2; /* Dereferenced dataset ID */ + hid_t group; /* Group ID */ + hid_t sid1; /* Dataspace ID */ + hid_t tid1; /* Datatype ID */ + hsize_t dims1[] = {SPACE1_DIM1}; + hsize_t cmpnd_dims[] = {1}; + hid_t dapl_id; /* Dataset access property list */ + unsigned *ibuf, *obuf; + unsigned i; /* Counter */ + H5O_type_t obj_type; /* Object type */ + herr_t ret; /* Generic return value */ + s2_t cmpnd_wbuf, cmpnd_rbuf; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Object Reference Functions within compound type\n")); + + /* Allocate write & read buffers */ + ibuf = HDcalloc(sizeof(unsigned), SPACE1_DIM1); + obuf = HDcalloc(sizeof(unsigned), SPACE1_DIM1); + + for (i = 0; i < SPACE1_DIM1; i++) + obuf[i] = i * 3; + + /* Create file */ + fid1 = H5Fcreate(FILE_REF_CMPND_OBJ, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, H5I_INVALID_HID, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple"); + + /* Create dataset access property list */ + dapl_id = H5Pcreate(H5P_DATASET_ACCESS); + CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate"); + + /* Create a group */ + group = H5Gcreate2(fid1, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(group, H5I_INVALID_HID, "H5Gcreate2"); + + /* Create a dataset (inside Group1) */ + dataset = H5Dcreate2(group, "Dataset1", H5T_NATIVE_UINT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, obuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Create another dataset (inside Group1) */ + dataset = H5Dcreate2(group, "Dataset2", H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create a datatype to refer to */ + tid1 = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); + CHECK(tid1, H5I_INVALID_HID, "H5Tcreate"); + + /* Insert fields */ + ret = H5Tinsert(tid1, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(tid1, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(tid1, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Save datatype for later */ + ret = H5Tcommit2(group, "Datatype1", tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close group */ + ret = H5Gclose(group); + CHECK(ret, FAIL, "H5Gclose"); + + /* Create compound type */ + tid1 = H5Tcreate(H5T_COMPOUND, sizeof(s2_t)); + CHECK(tid1, H5I_INVALID_HID, "H5Tcreate"); + + /* Insert fields */ + ret = H5Tinsert(tid1, "ref0", HOFFSET(s2_t, ref0), H5T_STD_REF); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(tid1, "ref1", HOFFSET(s2_t, ref1), H5T_STD_REF); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(tid1, "ref2", HOFFSET(s2_t, ref2), H5T_STD_REF); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(tid1, "ref3", HOFFSET(s2_t, ref3), H5T_STD_REF); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(tid1, "dim_idx", HOFFSET(s2_t, dim_idx), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Create dataspace for datasets */ + sid1 = H5Screate_simple(SPACE1_RANK, cmpnd_dims, NULL); + CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset3", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + /* Reset buffer for writing */ + HDmemset(&cmpnd_wbuf, 0, sizeof(cmpnd_wbuf)); + + /* Create reference to dataset */ + ret = H5Rcreate_object(fid1, "/Group1/Dataset1", H5P_DEFAULT, &cmpnd_wbuf.ref0); + CHECK(ret, FAIL, "H5Rcreate_object"); + ret = H5Rget_obj_type3(&cmpnd_wbuf.ref0, H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); + + /* Create reference to dataset */ + ret = H5Rcreate_object(fid1, "/Group1/Dataset2", H5P_DEFAULT, &cmpnd_wbuf.ref1); + CHECK(ret, FAIL, "H5Rcreate_object"); + ret = H5Rget_obj_type3(&cmpnd_wbuf.ref1, H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); + + /* Create reference to group */ + ret = H5Rcreate_object(fid1, "/Group1", H5P_DEFAULT, &cmpnd_wbuf.ref2); + CHECK(ret, FAIL, "H5Rcreate_object"); + ret = H5Rget_obj_type3(&cmpnd_wbuf.ref2, H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_GROUP, "H5Rget_obj_type3"); + + /* Create reference to named datatype */ + ret = H5Rcreate_object(fid1, "/Group1/Datatype1", H5P_DEFAULT, &cmpnd_wbuf.ref3); + CHECK(ret, FAIL, "H5Rcreate_object"); + ret = H5Rget_obj_type3(&cmpnd_wbuf.ref3, H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_NAMED_DATATYPE, "H5Rget_obj_type3"); + + /* Store dimensions */ + cmpnd_wbuf.dim_idx = SPACE1_DIM1; + + /* Write selection to disk */ + ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, &cmpnd_wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open the file */ + fid1 = H5Fopen(FILE_REF_CMPND_OBJ, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid1, H5I_INVALID_HID, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid1, "/Dataset3", H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dopen2"); + + tid1 = H5Dget_type(dataset); + CHECK(tid1, H5I_INVALID_HID, "H5Dget_type"); + + /* Read selection from disk */ + ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, &cmpnd_rbuf); + CHECK(ret, FAIL, "H5Dread"); + + VERIFY(cmpnd_rbuf.dim_idx, SPACE1_DIM1, "H5Dread"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Open dataset object */ + dset2 = H5Ropen_object(&cmpnd_rbuf.ref0, H5P_DEFAULT, dapl_id); + CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object"); + + /* Check information in referenced dataset */ + sid1 = H5Dget_space(dset2); + CHECK(sid1, H5I_INVALID_HID, "H5Dget_space"); + + ret = (int)H5Sget_simple_extent_npoints(sid1); + VERIFY(ret, SPACE1_DIM1, "H5Sget_simple_extent_npoints"); + + /* Read from disk */ + ret = H5Dread(dset2, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, ibuf); + CHECK(ret, FAIL, "H5Dread"); + + for (i = 0; i < SPACE1_DIM1; i++) + VERIFY(ibuf[i], i * 3, "Data"); + + /* Close dereferenced Dataset */ + ret = H5Dclose(dset2); + CHECK(ret, FAIL, "H5Dclose"); + + /* Open group object. GAPL isn't supported yet. But it's harmless to pass in */ + group = H5Ropen_object(&cmpnd_rbuf.ref2, H5P_DEFAULT, H5P_DEFAULT); + CHECK(group, H5I_INVALID_HID, "H5Ropen_object"); + + /* Close group */ + ret = H5Gclose(group); + CHECK(ret, FAIL, "H5Gclose"); + + /* Open datatype object. TAPL isn't supported yet. But it's harmless to pass in */ + tid1 = H5Ropen_object(&cmpnd_rbuf.ref3, H5P_DEFAULT, H5P_DEFAULT); + CHECK(tid1, H5I_INVALID_HID, "H5Ropen_object"); + + /* Verify correct datatype */ + { + H5T_class_t tclass; + + tclass = H5Tget_class(tid1); + VERIFY(tclass, H5T_COMPOUND, "H5Tget_class"); + + ret = H5Tget_nmembers(tid1); + VERIFY(ret, 3, "H5Tget_nmembers"); + } + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close dataset access property list */ + ret = H5Pclose(dapl_id); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Destroy references */ + ret = H5Rdestroy(&cmpnd_wbuf.ref0); + CHECK(ret, FAIL, "H5Rdestroy"); + ret = H5Rdestroy(&cmpnd_wbuf.ref1); + CHECK(ret, FAIL, "H5Rdestroy"); + ret = H5Rdestroy(&cmpnd_wbuf.ref2); + CHECK(ret, FAIL, "H5Rdestroy"); + ret = H5Rdestroy(&cmpnd_wbuf.ref3); + CHECK(ret, FAIL, "H5Rdestroy"); + + ret = H5Rdestroy(&cmpnd_rbuf.ref0); + CHECK(ret, FAIL, "H5Rdestroy"); + ret = H5Rdestroy(&cmpnd_rbuf.ref1); + CHECK(ret, FAIL, "H5Rdestroy"); + ret = H5Rdestroy(&cmpnd_rbuf.ref2); + CHECK(ret, FAIL, "H5Rdestroy"); + ret = H5Rdestroy(&cmpnd_rbuf.ref3); + CHECK(ret, FAIL, "H5Rdestroy"); + + /* Free memory buffers */ + HDfree(ibuf); + HDfree(obuf); +} /* test_reference_cmpnd_obj() */ + +/**************************************************************** +** +** test_reference_region(): Test basic H5R (reference) object reference code. +** Tests references to various kinds of objects +** +** Note: The libver_low/libver_high parameters are added to create the file +** with the low and high bounds setting in fapl. +** Please see the RFC for "H5Sencode/H5Sdecode Format Change". +** +****************************************************************/ +static void +test_reference_region(H5F_libver_t libver_low, H5F_libver_t libver_high) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t fapl; /* File access property list */ + hid_t dset1, /* Dataset ID */ + dset2; /* Dereferenced dataset ID */ + hid_t sid1, /* Dataspace ID #1 */ + sid2; /* Dataspace ID #2 */ + hid_t dapl_id; /* Dataset access property list */ + hsize_t dims1[] = {SPACE1_DIM1}, dims2[] = {SPACE2_DIM1, SPACE2_DIM2}; + hsize_t start[SPACE2_RANK]; /* Starting location of hyperslab */ + hsize_t stride[SPACE2_RANK]; /* Stride of hyperslab */ + hsize_t count[SPACE2_RANK]; /* Element count of hyperslab */ + hsize_t block[SPACE2_RANK]; /* Block size of hyperslab */ + hsize_t coord1[POINT1_NPOINTS][SPACE2_RANK]; /* Coordinates for point selection */ + hsize_t *coords; /* Coordinate buffer */ + hsize_t low[SPACE2_RANK]; /* Selection bounds */ + hsize_t high[SPACE2_RANK]; /* Selection bounds */ + H5R_ref_t *wbuf = NULL, /* buffer to write to disk */ + *rbuf = NULL; /* buffer read from disk */ + H5R_ref_t nvrbuf[3] = {{{{0}}}, {{{101}}}, {{{255}}}}; /* buffer with non-valid refs */ + uint8_t *dwbuf = NULL, /* Buffer for writing numeric data to disk */ + *drbuf = NULL; /* Buffer for reading numeric data from disk */ + uint8_t *tu8; /* Temporary pointer to uint8 data */ + H5O_type_t obj_type; /* Type of object */ + int i, j; /* Counters */ + hssize_t hssize_ret; /* hssize_t return value */ + htri_t tri_ret; /* htri_t return value */ + herr_t ret; /* Generic return value */ + hid_t dset_NA; /* Dataset id for undefined reference */ + hid_t space_NA; /* Dataspace id for undefined reference */ + hsize_t dims_NA[1] = {1}; /* Dims array for undefined reference */ + H5R_ref_t rdata_NA[1]; /* Read buffer */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Dataset Region Reference Functions\n")); + + /* Allocate write & read buffers */ + wbuf = HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1); + rbuf = HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1); + dwbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE2_DIM1 * SPACE2_DIM2)); + drbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE2_DIM1 * SPACE2_DIM2)); + + for (tu8 = dwbuf, i = 0; i < (SPACE2_DIM1 * SPACE2_DIM2); i++) + *tu8++ = (uint8_t)(i * 3); + + /* Create file access property list */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, H5I_INVALID_HID, "H5Pcreate"); + + /* Set the low/high version bounds in fapl */ + ret = H5Pset_libver_bounds(fapl, libver_low, libver_high); + CHECK(ret, FAIL, "H5Pset_libver_bounds"); + + /* Create file with the fapl */ + fid1 = H5Fcreate(FILE_REF_REG, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(fid1, H5I_INVALID_HID, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); + CHECK(sid2, H5I_INVALID_HID, "H5Screate_simple"); + + /* Create dataset access property list */ + dapl_id = H5Pcreate(H5P_DATASET_ACCESS); + CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate"); + + /* Create a dataset */ + dset2 = H5Dcreate2(fid1, "Dataset2", H5T_STD_U8LE, sid2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset2, H5I_INVALID_HID, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dset2, H5T_STD_U8LE, H5S_ALL, H5S_ALL, H5P_DEFAULT, dwbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close Dataset */ + ret = H5Dclose(dset2); + CHECK(ret, FAIL, "H5Dclose"); + + /* Create dataspace for the reference dataset */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple"); + + /* Create a dataset */ + H5E_BEGIN_TRY + { + dset1 = H5Dcreate2(fid1, "Dataset1", H5T_STD_REF, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (dset1 < 0) { + VERIFY(libver_high <= H5F_LIBVER_V110, TRUE, "H5Dcreate2"); + + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + } + else { + + CHECK(dset1, H5I_INVALID_HID, "H5Dcreate2"); + + /* Create references */ + + /* Select 6x6 hyperslab for first reference */ + start[0] = 2; + start[1] = 2; + stride[0] = 1; + stride[1] = 1; + count[0] = 1; + count[1] = 1; + block[0] = 6; + block[1] = 6; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + ret = (int)H5Sget_select_npoints(sid2); + VERIFY(ret, 36, "H5Sget_select_npoints"); + + /* Store first dataset region */ + ret = H5Rcreate_region(fid1, "/Dataset2", sid2, H5P_DEFAULT, &wbuf[0]); + CHECK(ret, FAIL, "H5Rcreate_region"); + ret = H5Rget_obj_type3(&wbuf[0], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); + + /* Select sequence of ten points for second reference */ + coord1[0][0] = 6; + coord1[0][1] = 9; + coord1[1][0] = 2; + coord1[1][1] = 2; + coord1[2][0] = 8; + coord1[2][1] = 4; + coord1[3][0] = 1; + coord1[3][1] = 6; + coord1[4][0] = 2; + coord1[4][1] = 8; + coord1[5][0] = 3; + coord1[5][1] = 2; + coord1[6][0] = 0; + coord1[6][1] = 4; + coord1[7][0] = 9; + coord1[7][1] = 0; + coord1[8][0] = 7; + coord1[8][1] = 1; + coord1[9][0] = 3; + coord1[9][1] = 3; + ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1); + CHECK(ret, FAIL, "H5Sselect_elements"); + + ret = (int)H5Sget_select_npoints(sid2); + VERIFY(ret, SPACE2_DIM2, "H5Sget_select_npoints"); + + /* Store second dataset region */ + ret = H5Rcreate_region(fid1, "/Dataset2", sid2, H5P_DEFAULT, &wbuf[1]); + CHECK(ret, FAIL, "H5Rcreate_region"); + + /* Select unlimited hyperslab for third reference */ + start[0] = 1; + start[1] = 8; + stride[0] = 4; + stride[1] = 1; + count[0] = H5S_UNLIMITED; + count[1] = 1; + block[0] = 2; + block[1] = 2; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + hssize_ret = H5Sget_select_npoints(sid2); + VERIFY(hssize_ret, (hssize_t)H5S_UNLIMITED, "H5Sget_select_npoints"); + + /* Store third dataset region */ + ret = H5Rcreate_region(fid1, "/Dataset2", sid2, H5P_DEFAULT, &wbuf[2]); + CHECK(ret, FAIL, "H5Rcreate_region"); + + ret = H5Rget_obj_type3(&wbuf[2], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); + + /* Store fourth dataset region */ + ret = H5Rcreate_region(fid1, "/Dataset2", sid2, H5P_DEFAULT, &wbuf[3]); + CHECK(ret, FAIL, "H5Rcreate_region"); + + /* Write selection to disk */ + ret = H5Dwrite(dset1, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); + + /* + * Store a dataset region reference which will not get written to disk + */ + + /* Create the dataspace of the region references */ + space_NA = H5Screate_simple(1, dims_NA, NULL); + CHECK(space_NA, H5I_INVALID_HID, "H5Screate_simple"); + + /* Create the dataset and write the region references to it */ + dset_NA = H5Dcreate2(fid1, "DS_NA", H5T_STD_REF, space_NA, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset_NA, H5I_INVALID_HID, "H5Dcreate"); + + /* Close and release resources for undefined region reference tests */ + ret = H5Dclose(dset_NA); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Sclose(space_NA); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dset1); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close uint8 dataset dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open the file */ + fid1 = H5Fopen(FILE_REF_REG, H5F_ACC_RDWR, fapl); + CHECK(fid1, H5I_INVALID_HID, "H5Fopen"); + + /* + * Start the test of an undefined reference + */ + + /* Open the dataset of the undefined references */ + dset_NA = H5Dopen2(fid1, "DS_NA", H5P_DEFAULT); + CHECK(dset_NA, H5I_INVALID_HID, "H5Dopen2"); + + /* Read the data */ + ret = H5Dread(dset_NA, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata_NA); + CHECK(ret, FAIL, "H5Dread"); + + /* + * Dereference an undefined reference (should fail) + */ + H5E_BEGIN_TRY + { + dset2 = H5Ropen_object(&rdata_NA[0], H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(dset2, H5I_INVALID_HID, "H5Ropen_object"); + + /* Close and release resources. */ + ret = H5Dclose(dset_NA); + CHECK(ret, FAIL, "H5Dclose"); + + /* This close should fail since H5Ropen_object never created + * the id of the referenced object. */ + H5E_BEGIN_TRY + { + ret = H5Dclose(dset2); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Dclose"); + + /* + * End the test of an undefined reference + */ + + /* Open the dataset */ + dset1 = H5Dopen2(fid1, "/Dataset1", H5P_DEFAULT); + CHECK(dset1, H5I_INVALID_HID, "H5Dopen2"); + + /* Read selection from disk */ + ret = H5Dread(dset1, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Try to open objects */ + dset2 = H5Ropen_object(&rbuf[0], H5P_DEFAULT, dapl_id); + CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object"); + + /* Check what H5Rget_obj_type3 function returns */ + ret = H5Rget_obj_type3(&rbuf[0], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); + + /* Check information in referenced dataset */ + sid1 = H5Dget_space(dset2); + CHECK(sid1, H5I_INVALID_HID, "H5Dget_space"); + + ret = (int)H5Sget_simple_extent_npoints(sid1); + VERIFY(ret, (SPACE2_DIM1 * SPACE2_DIM2), "H5Sget_simple_extent_npoints"); + + /* Read from disk */ + ret = H5Dread(dset2, H5T_STD_U8LE, H5S_ALL, H5S_ALL, H5P_DEFAULT, drbuf); + CHECK(ret, FAIL, "H5Dread"); + + for (tu8 = (uint8_t *)drbuf, i = 0; i < (SPACE2_DIM1 * SPACE2_DIM2); i++, tu8++) + VERIFY(*tu8, (uint8_t)(i * 3), "Data"); + + /* Get the hyperslab selection */ + sid2 = H5Ropen_region(&rbuf[0], H5P_DEFAULT, H5P_DEFAULT); + CHECK(sid2, H5I_INVALID_HID, "H5Ropen_region"); + + /* Verify correct hyperslab selected */ + ret = (int)H5Sget_select_npoints(sid2); + VERIFY(ret, 36, "H5Sget_select_npoints"); + ret = (int)H5Sget_select_hyper_nblocks(sid2); + VERIFY(ret, 1, "H5Sget_select_hyper_nblocks"); + + /* allocate space for the hyperslab blocks */ + coords = (hsize_t *)HDmalloc((size_t)ret * SPACE2_RANK * sizeof(hsize_t) * 2); + + ret = H5Sget_select_hyper_blocklist(sid2, (hsize_t)0, (hsize_t)ret, coords); + CHECK(ret, FAIL, "H5Sget_select_hyper_blocklist"); + VERIFY(coords[0], 2, "Hyperslab Coordinates"); + VERIFY(coords[1], 2, "Hyperslab Coordinates"); + VERIFY(coords[2], 7, "Hyperslab Coordinates"); + VERIFY(coords[3], 7, "Hyperslab Coordinates"); + HDfree(coords); + ret = H5Sget_select_bounds(sid2, low, high); + CHECK(ret, FAIL, "H5Sget_select_bounds"); + VERIFY(low[0], 2, "Selection Bounds"); + VERIFY(low[1], 2, "Selection Bounds"); + VERIFY(high[0], 7, "Selection Bounds"); + VERIFY(high[1], 7, "Selection Bounds"); + + /* Close region space */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Get the element selection */ + sid2 = H5Ropen_region(&rbuf[1], H5P_DEFAULT, H5P_DEFAULT); + CHECK(sid2, H5I_INVALID_HID, "H5Ropen_region"); + + /* Verify correct elements selected */ + ret = (int)H5Sget_select_npoints(sid2); + VERIFY(ret, SPACE2_DIM2, "H5Sget_select_npoints"); + ret = (int)H5Sget_select_elem_npoints(sid2); + VERIFY(ret, SPACE2_DIM2, "H5Sget_select_elem_npoints"); + + /* allocate space for the element points */ + coords = (hsize_t *)HDmalloc((size_t)ret * SPACE2_RANK * sizeof(hsize_t)); + + ret = H5Sget_select_elem_pointlist(sid2, (hsize_t)0, (hsize_t)ret, coords); + CHECK(ret, FAIL, "H5Sget_select_elem_pointlist"); + VERIFY(coords[0], coord1[0][0], "Element Coordinates"); + VERIFY(coords[1], coord1[0][1], "Element Coordinates"); + VERIFY(coords[2], coord1[1][0], "Element Coordinates"); + VERIFY(coords[3], coord1[1][1], "Element Coordinates"); + VERIFY(coords[4], coord1[2][0], "Element Coordinates"); + VERIFY(coords[5], coord1[2][1], "Element Coordinates"); + VERIFY(coords[6], coord1[3][0], "Element Coordinates"); + VERIFY(coords[7], coord1[3][1], "Element Coordinates"); + VERIFY(coords[8], coord1[4][0], "Element Coordinates"); + VERIFY(coords[9], coord1[4][1], "Element Coordinates"); + VERIFY(coords[10], coord1[5][0], "Element Coordinates"); + VERIFY(coords[11], coord1[5][1], "Element Coordinates"); + VERIFY(coords[12], coord1[6][0], "Element Coordinates"); + VERIFY(coords[13], coord1[6][1], "Element Coordinates"); + VERIFY(coords[14], coord1[7][0], "Element Coordinates"); + VERIFY(coords[15], coord1[7][1], "Element Coordinates"); + VERIFY(coords[16], coord1[8][0], "Element Coordinates"); + VERIFY(coords[17], coord1[8][1], "Element Coordinates"); + VERIFY(coords[18], coord1[9][0], "Element Coordinates"); + VERIFY(coords[19], coord1[9][1], "Element Coordinates"); + HDfree(coords); + ret = H5Sget_select_bounds(sid2, low, high); + CHECK(ret, FAIL, "H5Sget_select_bounds"); + VERIFY(low[0], 0, "Selection Bounds"); + VERIFY(low[1], 0, "Selection Bounds"); + VERIFY(high[0], 9, "Selection Bounds"); + VERIFY(high[1], 9, "Selection Bounds"); + + /* Close region space */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Get the unlimited selection */ + sid2 = H5Ropen_region(&rbuf[2], H5P_DEFAULT, H5P_DEFAULT); + CHECK(sid2, H5I_INVALID_HID, "H5Ropen_region"); + + /* Verify correct hyperslab selected */ + hssize_ret = H5Sget_select_npoints(sid2); + VERIFY(hssize_ret, (hssize_t)H5S_UNLIMITED, "H5Sget_select_npoints"); + tri_ret = H5Sis_regular_hyperslab(sid2); + CHECK(tri_ret, FAIL, "H5Sis_regular_hyperslab"); + VERIFY(tri_ret, TRUE, "H5Sis_regular_hyperslab Result"); + ret = H5Sget_regular_hyperslab(sid2, start, stride, count, block); + CHECK(ret, FAIL, "H5Sget_regular_hyperslab"); + VERIFY(start[0], (hsize_t)1, "Hyperslab Coordinates"); + VERIFY(start[1], (hsize_t)8, "Hyperslab Coordinates"); + VERIFY(stride[0], (hsize_t)4, "Hyperslab Coordinates"); + VERIFY(stride[1], (hsize_t)1, "Hyperslab Coordinates"); + VERIFY(count[0], H5S_UNLIMITED, "Hyperslab Coordinates"); + VERIFY(count[1], (hsize_t)1, "Hyperslab Coordinates"); + VERIFY(block[0], (hsize_t)2, "Hyperslab Coordinates"); + VERIFY(block[1], (hsize_t)2, "Hyperslab Coordinates"); + + /* Close region space */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close first space */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close dereferenced Dataset */ + ret = H5Dclose(dset2); + CHECK(ret, FAIL, "H5Dclose"); + + /* Attempting to retrieve type of object using non-valid refs */ + for (j = 0; j < 3; j++) { + H5E_BEGIN_TRY + { + ret = H5Rget_obj_type3(&nvrbuf[j], H5P_DEFAULT, &obj_type); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Rget_obj_type3"); + } /* end for */ + + /* Close Dataset */ + ret = H5Dclose(dset1); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close dataset access property list */ + ret = H5Pclose(dapl_id); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Destroy references */ + for (j = 0; j < SPACE1_DIM1; j++) { + ret = H5Rdestroy(&wbuf[j]); + CHECK(ret, FAIL, "H5Rdestroy"); + ret = H5Rdestroy(&rbuf[j]); + CHECK(ret, FAIL, "H5Rdestroy"); + } + } + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); + HDfree(dwbuf); + HDfree(drbuf); + +} /* test_reference_region() */ + +/**************************************************************** +** +** test_reference_region_1D(): Test H5R (reference) object reference code. +** Tests 1-D references to various kinds of objects +** +** Note: The libver_low/libver_high parameters are added to create the file +** with the low and high bounds setting in fapl. +** Please see the RFC for "H5Sencode/H5Sdecode Format Change". +** +****************************************************************/ +static void +test_reference_region_1D(H5F_libver_t libver_low, H5F_libver_t libver_high) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t fapl; /* File access property list */ + hid_t dset1, /* Dataset ID */ + dset3; /* Dereferenced dataset ID */ + hid_t sid1, /* Dataspace ID #1 */ + sid3; /* Dataspace ID #3 */ + hid_t dapl_id; /* Dataset access property list */ + hsize_t dims1[] = {2}, /* Must be 2 */ + dims3[] = {SPACE3_DIM1}; + hsize_t start[SPACE3_RANK]; /* Starting location of hyperslab */ + hsize_t stride[SPACE3_RANK]; /* Stride of hyperslab */ + hsize_t count[SPACE3_RANK]; /* Element count of hyperslab */ + hsize_t block[SPACE3_RANK]; /* Block size of hyperslab */ + hsize_t coord1[POINT1_NPOINTS][SPACE3_RANK]; /* Coordinates for point selection */ + hsize_t *coords; /* Coordinate buffer */ + hsize_t low[SPACE3_RANK]; /* Selection bounds */ + hsize_t high[SPACE3_RANK]; /* Selection bounds */ + H5R_ref_t *wbuf = NULL, /* buffer to write to disk */ + *rbuf = NULL; /* buffer read from disk */ + uint8_t *dwbuf = NULL, /* Buffer for writing numeric data to disk */ + *drbuf = NULL; /* Buffer for reading numeric data from disk */ + uint8_t *tu8; /* Temporary pointer to uint8 data */ + H5O_type_t obj_type; /* Object type */ + int i; /* Counter */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing 1-D Dataset Region Reference Functions\n")); + + /* Allocate write & read buffers */ + wbuf = HDcalloc(sizeof(H5R_ref_t), (size_t)SPACE1_DIM1); + rbuf = HDcalloc(sizeof(H5R_ref_t), (size_t)SPACE1_DIM1); + dwbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)SPACE3_DIM1); + drbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)SPACE3_DIM1); + + for (tu8 = dwbuf, i = 0; i < SPACE3_DIM1; i++) + *tu8++ = (uint8_t)(i * 3); + + /* Create the file access property list */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, H5I_INVALID_HID, "H5Pcreate"); + + /* Set the low/high version bounds in fapl */ + ret = H5Pset_libver_bounds(fapl, libver_low, libver_high); + CHECK(ret, FAIL, "H5Pset_libver_bounds"); + + /* Create file with the fapl */ + fid1 = H5Fcreate(FILE_REF_REG_1D, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(fid1, H5I_INVALID_HID, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid3 = H5Screate_simple(SPACE3_RANK, dims3, NULL); + CHECK(sid3, H5I_INVALID_HID, "H5Screate_simple"); + + /* Create dataset access property list */ + dapl_id = H5Pcreate(H5P_DATASET_ACCESS); + CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate"); + + /* Create a dataset */ + dset3 = H5Dcreate2(fid1, "Dataset2", H5T_STD_U8LE, sid3, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset3, H5I_INVALID_HID, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dset3, H5T_STD_U8LE, H5S_ALL, H5S_ALL, H5P_DEFAULT, dwbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close Dataset */ + ret = H5Dclose(dset3); + CHECK(ret, FAIL, "H5Dclose"); + + /* Create dataspace for the reference dataset */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple"); + + /* Create a dataset */ + H5E_BEGIN_TRY + { + dset1 = H5Dcreate2(fid1, "Dataset1", H5T_STD_REF, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY; + + if (dset1 < 0) { + + VERIFY(libver_high <= H5F_LIBVER_V110, TRUE, "H5Dcreate2"); + + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(sid3); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + } + else { + + CHECK(ret, FAIL, "H5Dcreate2"); + + /* Create references */ + + /* Select 15 2x1 hyperslabs for first reference */ + start[0] = 2; + stride[0] = 5; + count[0] = 15; + block[0] = 2; + ret = H5Sselect_hyperslab(sid3, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + ret = (int)H5Sget_select_npoints(sid3); + VERIFY(ret, (block[0] * count[0]), "H5Sget_select_npoints"); + + /* Store first dataset region */ + ret = H5Rcreate_region(fid1, "/Dataset2", sid3, H5P_DEFAULT, &wbuf[0]); + CHECK(ret, FAIL, "H5Rcreate_region"); + ret = H5Rget_obj_type3(&wbuf[0], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); + + /* Select sequence of ten points for second reference */ + coord1[0][0] = 16; + coord1[1][0] = 22; + coord1[2][0] = 38; + coord1[3][0] = 41; + coord1[4][0] = 52; + coord1[5][0] = 63; + coord1[6][0] = 70; + coord1[7][0] = 89; + coord1[8][0] = 97; + coord1[9][0] = 03; + ret = H5Sselect_elements(sid3, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1); + CHECK(ret, FAIL, "H5Sselect_elements"); + + ret = (int)H5Sget_select_npoints(sid3); + VERIFY(ret, POINT1_NPOINTS, "H5Sget_select_npoints"); + + /* Store second dataset region */ + ret = H5Rcreate_region(fid1, "/Dataset2", sid3, H5P_DEFAULT, &wbuf[1]); + CHECK(ret, FAIL, "H5Rcreate_region"); + + /* Write selection to disk */ + ret = H5Dwrite(dset1, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dset1); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close uint8 dataset dataspace */ + ret = H5Sclose(sid3); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open the file */ + fid1 = H5Fopen(FILE_REF_REG_1D, H5F_ACC_RDWR, fapl); + CHECK(fid1, H5I_INVALID_HID, "H5Fopen"); + + /* Open the dataset */ + dset1 = H5Dopen2(fid1, "/Dataset1", H5P_DEFAULT); + CHECK(dset1, H5I_INVALID_HID, "H5Dopen2"); + + /* Read selection from disk */ + ret = H5Dread(dset1, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Try to open objects */ + dset3 = H5Ropen_object(&rbuf[0], H5P_DEFAULT, dapl_id); + CHECK(dset3, H5I_INVALID_HID, "H5Ropen_object"); + + /* Check what H5Rget_obj_type3 function returns */ + ret = H5Rget_obj_type3(&rbuf[0], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); + + /* Check information in referenced dataset */ + sid1 = H5Dget_space(dset3); + CHECK(sid1, H5I_INVALID_HID, "H5Dget_space"); + + ret = (int)H5Sget_simple_extent_npoints(sid1); + VERIFY(ret, SPACE3_DIM1, "H5Sget_simple_extent_npoints"); + + /* Read from disk */ + ret = H5Dread(dset3, H5T_STD_U8LE, H5S_ALL, H5S_ALL, H5P_DEFAULT, drbuf); + CHECK(ret, FAIL, "H5Dread"); + + for (tu8 = (uint8_t *)drbuf, i = 0; i < SPACE3_DIM1; i++, tu8++) + VERIFY(*tu8, (uint8_t)(i * 3), "Data"); + + /* Get the hyperslab selection */ + sid3 = H5Ropen_region(&rbuf[0], H5P_DEFAULT, H5P_DEFAULT); + CHECK(sid3, H5I_INVALID_HID, "H5Ropen_region"); + + /* Verify correct hyperslab selected */ + ret = (int)H5Sget_select_npoints(sid3); + VERIFY(ret, 30, "H5Sget_select_npoints"); + ret = (int)H5Sget_select_hyper_nblocks(sid3); + VERIFY(ret, 15, "H5Sget_select_hyper_nblocks"); + + /* allocate space for the hyperslab blocks */ + coords = (hsize_t *)HDmalloc((size_t)ret * SPACE3_RANK * sizeof(hsize_t) * 2); + + ret = H5Sget_select_hyper_blocklist(sid3, (hsize_t)0, (hsize_t)ret, coords); + CHECK(ret, FAIL, "H5Sget_select_hyper_blocklist"); + VERIFY(coords[0], 2, "Hyperslab Coordinates"); + VERIFY(coords[1], 3, "Hyperslab Coordinates"); + VERIFY(coords[2], 7, "Hyperslab Coordinates"); + VERIFY(coords[3], 8, "Hyperslab Coordinates"); + VERIFY(coords[4], 12, "Hyperslab Coordinates"); + VERIFY(coords[5], 13, "Hyperslab Coordinates"); + VERIFY(coords[6], 17, "Hyperslab Coordinates"); + VERIFY(coords[7], 18, "Hyperslab Coordinates"); + VERIFY(coords[8], 22, "Hyperslab Coordinates"); + VERIFY(coords[9], 23, "Hyperslab Coordinates"); + VERIFY(coords[10], 27, "Hyperslab Coordinates"); + VERIFY(coords[11], 28, "Hyperslab Coordinates"); + VERIFY(coords[12], 32, "Hyperslab Coordinates"); + VERIFY(coords[13], 33, "Hyperslab Coordinates"); + VERIFY(coords[14], 37, "Hyperslab Coordinates"); + VERIFY(coords[15], 38, "Hyperslab Coordinates"); + VERIFY(coords[16], 42, "Hyperslab Coordinates"); + VERIFY(coords[17], 43, "Hyperslab Coordinates"); + VERIFY(coords[18], 47, "Hyperslab Coordinates"); + VERIFY(coords[19], 48, "Hyperslab Coordinates"); + VERIFY(coords[20], 52, "Hyperslab Coordinates"); + VERIFY(coords[21], 53, "Hyperslab Coordinates"); + VERIFY(coords[22], 57, "Hyperslab Coordinates"); + VERIFY(coords[23], 58, "Hyperslab Coordinates"); + VERIFY(coords[24], 62, "Hyperslab Coordinates"); + VERIFY(coords[25], 63, "Hyperslab Coordinates"); + VERIFY(coords[26], 67, "Hyperslab Coordinates"); + VERIFY(coords[27], 68, "Hyperslab Coordinates"); + VERIFY(coords[28], 72, "Hyperslab Coordinates"); + VERIFY(coords[29], 73, "Hyperslab Coordinates"); + HDfree(coords); + ret = H5Sget_select_bounds(sid3, low, high); + CHECK(ret, FAIL, "H5Sget_select_bounds"); + VERIFY(low[0], 2, "Selection Bounds"); + VERIFY(high[0], 73, "Selection Bounds"); + + /* Close region space */ + ret = H5Sclose(sid3); + CHECK(ret, FAIL, "H5Sclose"); + + /* Get the element selection */ + sid3 = H5Ropen_region(&rbuf[1], H5P_DEFAULT, H5P_DEFAULT); + CHECK(sid3, H5I_INVALID_HID, "H5Ropen_region"); + + /* Verify correct elements selected */ + ret = (int)H5Sget_select_npoints(sid3); + VERIFY(ret, 10, "H5Sget_select_npoints"); + ret = (int)H5Sget_select_elem_npoints(sid3); + VERIFY(ret, 10, "H5Sget_select_elem_npoints"); + + /* allocate space for the element points */ + coords = (hsize_t *)HDmalloc((size_t)ret * SPACE3_RANK * sizeof(hsize_t)); + + ret = H5Sget_select_elem_pointlist(sid3, (hsize_t)0, (hsize_t)ret, coords); + CHECK(ret, FAIL, "H5Sget_select_elem_pointlist"); + VERIFY(coords[0], coord1[0][0], "Element Coordinates"); + VERIFY(coords[1], coord1[1][0], "Element Coordinates"); + VERIFY(coords[2], coord1[2][0], "Element Coordinates"); + VERIFY(coords[3], coord1[3][0], "Element Coordinates"); + VERIFY(coords[4], coord1[4][0], "Element Coordinates"); + VERIFY(coords[5], coord1[5][0], "Element Coordinates"); + VERIFY(coords[6], coord1[6][0], "Element Coordinates"); + VERIFY(coords[7], coord1[7][0], "Element Coordinates"); + VERIFY(coords[8], coord1[8][0], "Element Coordinates"); + VERIFY(coords[9], coord1[9][0], "Element Coordinates"); + HDfree(coords); + ret = H5Sget_select_bounds(sid3, low, high); + CHECK(ret, FAIL, "H5Sget_select_bounds"); + VERIFY(low[0], 3, "Selection Bounds"); + VERIFY(high[0], 97, "Selection Bounds"); + + /* Close region space */ + ret = H5Sclose(sid3); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close first space */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close dereferenced Dataset */ + ret = H5Dclose(dset3); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close Dataset */ + ret = H5Dclose(dset1); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close dataset access property list */ + ret = H5Pclose(dapl_id); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file access property list */ + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Destroy references */ + for (i = 0; i < 2; i++) { + ret = H5Rdestroy(&wbuf[i]); + CHECK(ret, FAIL, "H5Rdestroy"); + ret = H5Rdestroy(&rbuf[i]); + CHECK(ret, FAIL, "H5Rdestroy"); + } + } + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); + HDfree(dwbuf); + HDfree(drbuf); + +} /* test_reference_region_1D() */ + +/**************************************************************** +** +** test_reference_obj_deleted(): Test H5R (reference) object reference code. +** Tests for correct failures for deleted and non-existent objects +** +****************************************************************/ +static void +test_reference_obj_deleted(void) +{ +#ifndef NO_REFERENCE_TO_DELETED + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset, /* Dataset ID */ + dset2; /* Dereferenced dataset ID */ + hid_t sid1; /* Dataspace ID */ + H5R_ref_t oref; /* Object Reference to test */ + H5O_type_t obj_type; /* Object type */ + herr_t ret; /* Generic return value */ +#endif + MESSAGE(5, ("Testing References to Deleted Objects - SKIPPED for now due to no support\n")); +#ifndef NO_REFERENCE_TO_DELETED + /* Create file */ + fid1 = H5Fcreate(FILE_REF_OBJ_DEL, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, H5I_INVALID_HID, "H5Fcreate"); + + /* Create scalar dataspace for datasets */ + sid1 = H5Screate_simple(0, NULL, NULL); + CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple"); + + /* Create a dataset to reference (deleted later) */ + dataset = H5Dcreate2(fid1, "Dataset1", H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset2", H5T_STD_REF, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + /* Create reference to dataset */ + ret = H5Rcreate_object(fid1, "/Dataset1", H5P_DEFAULT, &oref); + CHECK(ret, FAIL, "H5Rcreate_object"); + ret = H5Rget_obj_type3(&oref, H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, &oref); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Delete referenced dataset */ + ret = H5Ldelete(fid1, "/Dataset1", H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Destroy reference */ + ret = H5Rdestroy(&oref); + CHECK(ret, FAIL, "H5Rdestroy"); + + /* Re-open the file */ + fid1 = H5Fopen(FILE_REF_OBJ_DEL, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid1, H5I_INVALID_HID, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid1, "/Dataset2", H5P_DEFAULT); + CHECK(ret, H5I_INVALID_HID, "H5Dopen2"); + + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, &oref); + CHECK(ret, FAIL, "H5Dread"); + + /* Open deleted dataset object */ + dset2 = H5Ropen_object(&oref, H5P_DEFAULT, H5P_DEFAULT); + VERIFY(dset2, H5I_INVALID_HID, "H5Ropen_object"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Destroy reference */ + ret = H5Rdestroy(&oref); + CHECK(ret, FAIL, "H5Rdestroy"); +#endif +} /* test_reference_obj_deleted() */ + +/**************************************************************** +** +** test_deref_iter_op(): Iterator callback for test_reference_group_iterate() +** test. +** +****************************************************************/ +static herr_t +test_deref_iter_op(hid_t H5_ATTR_UNUSED group, const char *name, const H5L_info2_t H5_ATTR_UNUSED *info, + void *op_data) +{ + int *count = (int *)op_data; /* Pointer to name counter */ + herr_t ret_value; + + /* Simple check for correct names */ + if (*count == 0) { + if (HDstrcmp(name, DSETNAME2) == 0) + ret_value = 0; + else + ret_value = -1; + } /* end if */ + else if (*count == 1) { + if (HDstrcmp(name, GROUPNAME2) == 0) + ret_value = 0; + else + ret_value = -1; + } /* end if */ + else if (*count == 2) { + if (HDstrcmp(name, GROUPNAME3) == 0) + ret_value = 0; + else + ret_value = -1; + } /* end if */ + else + ret_value = -1; + + (*count)++; + + return (ret_value); +} /* end test_deref_iter_op() */ + +/**************************************************************** +** +** test_reference_group(): Test H5R (reference) object reference code. +** Tests for correct behavior of various routines on dereferenced group +** +****************************************************************/ +static void +test_reference_group(void) +{ + hid_t fid = -1; /* File ID */ + hid_t gid = -1, gid2 = -1; /* Group IDs */ + hid_t did; /* Dataset ID */ + hid_t sid; /* Dataspace ID */ + H5R_ref_t wref; /* Reference to write */ + H5R_ref_t rref; /* Reference to read */ + H5G_info_t ginfo; /* Group info struct */ + char objname[NAME_SIZE]; /* Buffer to store name */ + H5O_info2_t oinfo; /* Object info struct */ + int count = 0; /* Count within iterated group */ + ssize_t size; /* Name length */ + herr_t ret; + + /* Create file with a group and a dataset containing an object reference to the group */ + fid = H5Fcreate(FILE_REF_GRP, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); + + /* Create dataspace to use for dataset */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, H5I_INVALID_HID, "H5Screate"); + + /* Create group to refer to */ + gid = H5Gcreate2(fid, GROUPNAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid, H5I_INVALID_HID, "H5Gcreate2"); + + /* Create nested groups */ + gid2 = H5Gcreate2(gid, GROUPNAME2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid2, H5I_INVALID_HID, "H5Gcreate2"); + ret = H5Gclose(gid2); + CHECK(ret, FAIL, "H5Gclose"); + + gid2 = H5Gcreate2(gid, GROUPNAME3, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid2, H5I_INVALID_HID, "H5Gcreate2"); + ret = H5Gclose(gid2); + CHECK(ret, FAIL, "H5Gclose"); + + /* Create bottom dataset */ + did = H5Dcreate2(gid, DSETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(did, H5I_INVALID_HID, "H5Dcreate2"); + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + /* Create dataset */ + did = H5Dcreate2(fid, DSETNAME, H5T_STD_REF, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(did, H5I_INVALID_HID, "H5Dcreate2"); + + /* Create reference to group */ + ret = H5Rcreate_object(fid, GROUPNAME, H5P_DEFAULT, &wref); + CHECK(ret, FAIL, "H5Rcreate_object"); + + /* Write reference to disk */ + ret = H5Dwrite(did, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, &wref); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close objects */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Destroy reference */ + ret = H5Rdestroy(&wref); + CHECK(ret, FAIL, "H5Rdestroy"); + + /* Re-open file */ + fid = H5Fopen(FILE_REF_GRP, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid, H5I_INVALID_HID, "H5Fopen"); + + /* Re-open dataset */ + did = H5Dopen2(fid, DSETNAME, H5P_DEFAULT); + CHECK(did, H5I_INVALID_HID, "H5Dopen2"); + + /* Read in the reference */ + ret = H5Dread(did, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rref); + CHECK(ret, FAIL, "H5Dread"); + + /* Dereference to get the group */ + gid = H5Ropen_object(&rref, H5P_DEFAULT, H5P_DEFAULT); + CHECK(gid, H5I_INVALID_HID, "H5Ropen_object"); + + /* Iterate through objects in dereferenced group */ + ret = H5Literate2(gid, H5_INDEX_NAME, H5_ITER_INC, NULL, test_deref_iter_op, &count); + CHECK(ret, FAIL, "H5Literate"); + + /* Various queries on the group opened */ + ret = H5Gget_info(gid, &ginfo); + CHECK(ret, FAIL, "H5Gget_info"); + VERIFY(ginfo.nlinks, 3, "H5Gget_info"); + + size = H5Lget_name_by_idx(gid, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)0, objname, (size_t)NAME_SIZE, + H5P_DEFAULT); + CHECK(size, (-1), "H5Lget_name_by_idx"); + VERIFY_STR(objname, DSETNAME2, "H5Lget_name_by_idx"); + + ret = H5Oget_info_by_idx3(gid, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)0, &oinfo, H5O_INFO_BASIC, + H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_idx3"); + VERIFY(oinfo.type, H5O_TYPE_DATASET, "H5Oget_info_by_idx3"); + + /* Unlink one of the objects in the dereferenced group */ + ret = H5Ldelete(gid, GROUPNAME2, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Delete dataset object in dereferenced group (with other dataset still open) */ + ret = H5Ldelete(gid, DSETNAME2, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + + /* Close objects */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Destroy reference */ + ret = H5Rdestroy(&rref); + CHECK(ret, FAIL, "H5Rdestroy"); +} /* test_reference_group() */ + +/**************************************************************** +** +** test_reference_attr(): Test basic H5R (reference) attribute reference code. +** Tests references to attributes on various kinds of objects +** +****************************************************************/ +static void +test_reference_attr(void) +{ + hid_t fid; /* HDF5 File ID */ + hid_t dataset; /* Dataset ID */ + hid_t group; /* Group ID */ + hid_t attr; /* Attribute ID */ + hid_t sid; /* Dataspace ID */ + hid_t tid; /* Datatype ID */ + hsize_t dims[] = {SPACE1_DIM1}; + hid_t dapl_id; /* Dataset access property list */ + H5R_ref_t ref_wbuf[SPACE1_DIM1], /* Buffer to write to disk */ + ref_rbuf[SPACE1_DIM1]; /* Buffer read from disk */ + unsigned wbuf[SPACE1_DIM1], rbuf[SPACE1_DIM1]; + unsigned i; /* Local index variables */ + H5O_type_t obj_type; /* Object type */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Attribute Reference Functions\n")); + + /* Create file */ + fid = H5Fcreate(FILE_REF_ATTR, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid = H5Screate_simple(SPACE1_RANK, dims, NULL); + CHECK(sid, H5I_INVALID_HID, "H5Screate_simple"); + + /* Create dataset access property list */ + dapl_id = H5Pcreate(H5P_DATASET_ACCESS); + CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate"); + + /* Create a group */ + group = H5Gcreate2(fid, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(group, H5I_INVALID_HID, "H5Gcreate2"); + + /* Create an attribute for the dataset */ + attr = H5Acreate2(group, "Attr2", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, H5I_INVALID_HID, "H5Acreate2"); + + for (i = 0; i < SPACE1_DIM1; i++) + wbuf[i] = (i * 3) + 1; + + /* Write attribute to disk */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, wbuf); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Create a dataset (inside Group1) */ + dataset = H5Dcreate2(group, "Dataset1", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + /* Create an attribute for the dataset */ + attr = H5Acreate2(dataset, "Attr1", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, H5I_INVALID_HID, "H5Acreate2"); + + for (i = 0; i < SPACE1_DIM1; i++) + wbuf[i] = i * 3; + + /* Write attribute to disk */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, wbuf); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Create another dataset (inside Group1) */ + dataset = H5Dcreate2(group, "Dataset2", H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Create a datatype to refer to */ + tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); + CHECK(tid, H5I_INVALID_HID, "H5Tcreate"); + + /* Insert fields */ + ret = H5Tinsert(tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Save datatype for later */ + ret = H5Tcommit2(group, "Datatype1", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + /* Create an attribute for the datatype */ + attr = H5Acreate2(tid, "Attr3", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, H5I_INVALID_HID, "H5Acreate2"); + + for (i = 0; i < SPACE1_DIM1; i++) + wbuf[i] = (i * 3) + 2; + + /* Write attribute to disk */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, wbuf); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close datatype */ + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close group */ + ret = H5Gclose(group); + CHECK(ret, FAIL, "H5Gclose"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid, "Dataset3", H5T_STD_REF, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + /* Create reference to dataset1 attribute */ + ret = H5Rcreate_attr(fid, "/Group1/Dataset1", "Attr1", H5P_DEFAULT, &ref_wbuf[0]); + CHECK(ret, FAIL, "H5Rcreate_attr"); + ret = H5Rget_obj_type3(&ref_wbuf[0], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); + + /* Create reference to dataset2 attribute */ + ret = H5Rcreate_attr(fid, "/Group1/Dataset2", "Attr1", H5P_DEFAULT, &ref_wbuf[1]); + CHECK(ret, FAIL, "H5Rcreate_attr"); + ret = H5Rget_obj_type3(&ref_wbuf[1], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); + + /* Create reference to group attribute */ + ret = H5Rcreate_attr(fid, "/Group1", "Attr2", H5P_DEFAULT, &ref_wbuf[2]); + CHECK(ret, FAIL, "H5Rcreate_attr"); + ret = H5Rget_obj_type3(&ref_wbuf[2], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_GROUP, "H5Rget_obj_type3"); + + /* Create reference to named datatype attribute */ + ret = H5Rcreate_attr(fid, "/Group1/Datatype1", "Attr3", H5P_DEFAULT, &ref_wbuf[3]); + CHECK(ret, FAIL, "H5Rcreate_attr"); + ret = H5Rget_obj_type3(&ref_wbuf[3], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_NAMED_DATATYPE, "H5Rget_obj_type3"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, ref_wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close disk dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open the file */ + fid = H5Fopen(FILE_REF_ATTR, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid, "/Dataset3", H5P_DEFAULT); + CHECK(ret, H5I_INVALID_HID, "H5Dopen2"); + + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, ref_rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Open attribute on dataset object */ + attr = H5Ropen_attr(&ref_rbuf[0], H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, H5I_INVALID_HID, "H5Ropen_attr"); + + /* Check information in referenced dataset */ + sid = H5Aget_space(attr); + CHECK(sid, H5I_INVALID_HID, "H5Aget_space"); + + ret = (int)H5Sget_simple_extent_npoints(sid); + VERIFY(ret, SPACE1_DIM1, "H5Sget_simple_extent_npoints"); + + /* Read from disk */ + ret = H5Aread(attr, H5T_NATIVE_UINT, rbuf); + CHECK(ret, FAIL, "H5Aread"); + + for (i = 0; i < SPACE1_DIM1; i++) + VERIFY(rbuf[i], i * 3, "Data"); + + /* Close dereferenced Dataset */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Open attribute on group object */ + attr = H5Ropen_attr(&ref_rbuf[2], H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, H5I_INVALID_HID, "H5Ropen_attr"); + + /* Read from disk */ + ret = H5Aread(attr, H5T_NATIVE_UINT, rbuf); + CHECK(ret, FAIL, "H5Aread"); + + for (i = 0; i < SPACE1_DIM1; i++) + VERIFY(rbuf[i], (i * 3) + 1, "Data"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Open attribute on named datatype object */ + attr = H5Ropen_attr(&ref_rbuf[3], H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, H5I_INVALID_HID, "H5Ropen_attr"); + + /* Read from disk */ + ret = H5Aread(attr, H5T_NATIVE_UINT, rbuf); + CHECK(ret, FAIL, "H5Aread"); + + for (i = 0; i < SPACE1_DIM1; i++) + VERIFY(rbuf[i], (i * 3) + 2, "Data"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close dataset access property list */ + ret = H5Pclose(dapl_id); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + for (i = 0; i < SPACE1_DIM1; i++) { + ret = H5Rdestroy(&ref_wbuf[i]); + CHECK(ret, FAIL, "H5Rdestroy"); + ret = H5Rdestroy(&ref_rbuf[i]); + CHECK(ret, FAIL, "H5Rdestroy"); + } +} /* test_reference_attr() */ + +/**************************************************************** +** +** test_reference_external(): +** Tests external references on various kinds of objects +** +****************************************************************/ +static void +test_reference_external(void) +{ + hid_t fid1, fid2; /* HDF5 File ID */ + hid_t dataset; /* Dataset ID */ + hid_t group; /* Group ID */ + hid_t attr; /* Attribute ID */ + hid_t sid; /* Dataspace ID */ + hid_t tid; /* Datatype ID */ + hsize_t dims[] = {SPACE1_DIM1}; + hid_t dapl_id; /* Dataset access property list */ + H5R_ref_t ref_wbuf[SPACE1_DIM1], /* Buffer to write to disk */ + ref_rbuf[SPACE1_DIM1]; /* Buffer read from disk */ + unsigned wbuf[SPACE1_DIM1], rbuf[SPACE1_DIM1]; + unsigned i; /* Local index variables */ + H5O_type_t obj_type; /* Object type */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing External References Functions\n")); + + /* Create file */ + fid1 = H5Fcreate(FILE_REF_EXT1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, H5I_INVALID_HID, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid = H5Screate_simple(SPACE1_RANK, dims, NULL); + CHECK(sid, H5I_INVALID_HID, "H5Screate_simple"); + + /* Create dataset access property list */ + dapl_id = H5Pcreate(H5P_DATASET_ACCESS); + CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate"); + + /* Create a group */ + group = H5Gcreate2(fid1, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(group, H5I_INVALID_HID, "H5Gcreate2"); + + /* Create an attribute for the dataset */ + attr = H5Acreate2(group, "Attr2", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, H5I_INVALID_HID, "H5Acreate2"); + + for (i = 0; i < SPACE1_DIM1; i++) + wbuf[i] = (i * 3) + 1; + + /* Write attribute to disk */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, wbuf); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Create a dataset (inside Group1) */ + dataset = H5Dcreate2(group, "Dataset1", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + /* Create an attribute for the dataset */ + attr = H5Acreate2(dataset, "Attr1", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, H5I_INVALID_HID, "H5Acreate2"); + + for (i = 0; i < SPACE1_DIM1; i++) + wbuf[i] = i * 3; + + /* Write attribute to disk */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, wbuf); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Create another dataset (inside Group1) */ + dataset = H5Dcreate2(group, "Dataset2", H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Create a datatype to refer to */ + tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); + CHECK(tid, H5I_INVALID_HID, "H5Tcreate"); + + /* Insert fields */ + ret = H5Tinsert(tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Save datatype for later */ + ret = H5Tcommit2(group, "Datatype1", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + /* Create an attribute for the datatype */ + attr = H5Acreate2(tid, "Attr3", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, H5I_INVALID_HID, "H5Acreate2"); + + for (i = 0; i < SPACE1_DIM1; i++) + wbuf[i] = (i * 3) + 2; + + /* Write attribute to disk */ + ret = H5Awrite(attr, H5T_NATIVE_UINT, wbuf); + CHECK(ret, FAIL, "H5Awrite"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close datatype */ + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close group */ + ret = H5Gclose(group); + CHECK(ret, FAIL, "H5Gclose"); + + /* Create reference to dataset1 attribute */ + ret = H5Rcreate_attr(fid1, "/Group1/Dataset1", "Attr1", H5P_DEFAULT, &ref_wbuf[0]); + CHECK(ret, FAIL, "H5Rcreate_attr"); + ret = H5Rget_obj_type3(&ref_wbuf[0], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); + + /* Create reference to dataset2 attribute */ + ret = H5Rcreate_attr(fid1, "/Group1/Dataset2", "Attr1", H5P_DEFAULT, &ref_wbuf[1]); + CHECK(ret, FAIL, "H5Rcreate_attr"); + ret = H5Rget_obj_type3(&ref_wbuf[1], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); + + /* Create reference to group attribute */ + ret = H5Rcreate_attr(fid1, "/Group1", "Attr2", H5P_DEFAULT, &ref_wbuf[2]); + CHECK(ret, FAIL, "H5Rcreate_attr"); + ret = H5Rget_obj_type3(&ref_wbuf[2], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_GROUP, "H5Rget_obj_type3"); + + /* Create reference to named datatype attribute */ + ret = H5Rcreate_attr(fid1, "/Group1/Datatype1", "Attr3", H5P_DEFAULT, &ref_wbuf[3]); + CHECK(ret, FAIL, "H5Rcreate_attr"); + ret = H5Rget_obj_type3(&ref_wbuf[3], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_NAMED_DATATYPE, "H5Rget_obj_type3"); + + /* Close disk dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Create file */ + fid2 = H5Fcreate(FILE_REF_EXT2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid2, H5I_INVALID_HID, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid = H5Screate_simple(SPACE1_RANK, dims, NULL); + CHECK(sid, H5I_INVALID_HID, "H5Screate_simple"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid2, "Dataset3", H5T_STD_REF, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, ref_wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close disk dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid2); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open the file */ + fid2 = H5Fopen(FILE_REF_EXT2, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid2, FAIL, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid2, "/Dataset3", H5P_DEFAULT); + CHECK(ret, H5I_INVALID_HID, "H5Dopen2"); + + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, ref_rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Open attribute on dataset object */ + attr = H5Ropen_attr(&ref_rbuf[0], H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, H5I_INVALID_HID, "H5Ropen_attr"); + + /* Check information in referenced dataset */ + sid = H5Aget_space(attr); + CHECK(sid, H5I_INVALID_HID, "H5Aget_space"); + + ret = (int)H5Sget_simple_extent_npoints(sid); + VERIFY(ret, SPACE1_DIM1, "H5Sget_simple_extent_npoints"); + + /* Read from disk */ + ret = H5Aread(attr, H5T_NATIVE_UINT, rbuf); + CHECK(ret, FAIL, "H5Aread"); + + for (i = 0; i < SPACE1_DIM1; i++) + VERIFY(rbuf[i], i * 3, "Data"); + + /* Close dereferenced Dataset */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Open attribute on group object */ + attr = H5Ropen_attr(&ref_rbuf[2], H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, H5I_INVALID_HID, "H5Ropen_attr"); + + /* Read from disk */ + ret = H5Aread(attr, H5T_NATIVE_UINT, rbuf); + CHECK(ret, FAIL, "H5Aread"); + + for (i = 0; i < SPACE1_DIM1; i++) + VERIFY(rbuf[i], (i * 3) + 1, "Data"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Open attribute on named datatype object */ + attr = H5Ropen_attr(&ref_rbuf[3], H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr, H5I_INVALID_HID, "H5Ropen_attr"); + + /* Read from disk */ + ret = H5Aread(attr, H5T_NATIVE_UINT, rbuf); + CHECK(ret, FAIL, "H5Aread"); + + for (i = 0; i < SPACE1_DIM1; i++) + VERIFY(rbuf[i], (i * 3) + 2, "Data"); + + /* Close attribute */ + ret = H5Aclose(attr); + CHECK(ret, FAIL, "H5Aclose"); + + /* Close dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close dataset access property list */ + ret = H5Pclose(dapl_id); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid2); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + for (i = 0; i < SPACE1_DIM1; i++) { + ret = H5Rdestroy(&ref_wbuf[i]); + CHECK(ret, FAIL, "H5Rdestroy"); + ret = H5Rdestroy(&ref_rbuf[i]); + CHECK(ret, FAIL, "H5Rdestroy"); + } +} /* test_reference_external() */ + +/**************************************************************** +** +** test_reference_compat_conv(): Test basic H5R (reference) object reference code. +** Tests deprecated API routines and type conversion. +** +****************************************************************/ +#if 0 +static void +test_reference_compat_conv(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset, dset2; /* Dataset ID */ + hid_t group, group2; /* Group ID */ + hid_t sid1, sid2, sid3; /* Dataspace IDs */ + hid_t tid1, tid2; /* Datatype ID */ + hsize_t dims1[] = {SPACE1_DIM1}, dims2[] = {SPACE2_DIM1, SPACE2_DIM2}, + dims3[] = {SPACE1_DIM1}; /* Purposely set dimension larger to test NULL references */ + hsize_t start[SPACE2_RANK]; /* Starting location of hyperslab */ + hsize_t stride[SPACE2_RANK]; /* Stride of hyperslab */ + hsize_t count[SPACE2_RANK]; /* Element count of hyperslab */ + hsize_t block[SPACE2_RANK]; /* Block size of hyperslab */ + hsize_t coord1[POINT1_NPOINTS][SPACE2_RANK]; /* Coordinates for point selection */ + hobj_ref_t *wbuf_obj = NULL; /* Buffer to write to disk */ + H5R_ref_t *rbuf_obj = NULL; /* Buffer read from disk */ + hdset_reg_ref_t *wbuf_reg = NULL; /* Buffer to write to disk */ + H5R_ref_t *rbuf_reg = NULL; /* Buffer read from disk */ + H5O_type_t obj_type; /* Object type */ + herr_t ret; /* Generic return value */ + unsigned int i; /* Counter */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Deprecated Object Reference Functions\n")); + + /* Allocate write & read buffers */ + wbuf_obj = (hobj_ref_t *)HDcalloc(sizeof(hobj_ref_t), SPACE1_DIM1); + rbuf_obj = HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1); + wbuf_reg = HDcalloc(sizeof(hdset_reg_ref_t), SPACE1_DIM1); + rbuf_reg = HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1); + + /* Create file */ + fid1 = H5Fcreate(FILE_REF_COMPAT, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, H5I_INVALID_HID, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple"); + + /* Create another dataspace for datasets */ + sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); + CHECK(sid2, H5I_INVALID_HID, "H5Screate_simple"); + + /* Create another dataspace for datasets */ + sid3 = H5Screate_simple(SPACE1_RANK, dims3, NULL); + CHECK(sid3, H5I_INVALID_HID, "H5Screate_simple"); + + /* Create a group */ + group = H5Gcreate2(fid1, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(group, H5I_INVALID_HID, "H5Gcreate2"); + + /* Create a dataset (inside Group1) */ + dataset = H5Dcreate2(group, "Dataset1", H5T_NATIVE_UINT, sid2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Create another dataset (inside Group1) */ + dataset = H5Dcreate2(group, "Dataset2", H5T_NATIVE_UCHAR, sid2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Create a datatype to refer to */ + tid1 = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); + CHECK(tid1, H5I_INVALID_HID, "H5Tcreate"); + + /* Insert fields */ + ret = H5Tinsert(tid1, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(tid1, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(tid1, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Save datatype for later */ + ret = H5Tcommit2(group, "Datatype1", tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close group */ + ret = H5Gclose(group); + CHECK(ret, FAIL, "H5Gclose"); + + /* Create a dataset with object reference datatype */ + dataset = H5Dcreate2(fid1, "Dataset3", H5T_STD_REF_OBJ, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + /* Create reference to dataset */ + ret = H5Rcreate(&wbuf_obj[0], fid1, "/Group1/Dataset1", H5R_OBJECT, H5I_INVALID_HID); + CHECK(ret, FAIL, "H5Rcreate"); + + /* Create reference to dataset */ + ret = H5Rcreate(&wbuf_obj[1], fid1, "/Group1/Dataset2", H5R_OBJECT, H5I_INVALID_HID); + CHECK(ret, FAIL, "H5Rcreate"); + + /* Create reference to group */ + ret = H5Rcreate(&wbuf_obj[2], fid1, "/Group1", H5R_OBJECT, H5I_INVALID_HID); + CHECK(ret, FAIL, "H5Rcreate"); + + /* Create reference to named datatype */ + ret = H5Rcreate(&wbuf_obj[3], fid1, "/Group1/Datatype1", H5R_OBJECT, H5I_INVALID_HID); + CHECK(ret, FAIL, "H5Rcreate"); + + /* Write references to disk */ + ret = H5Dwrite(dataset, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf_obj); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Create a dataset with region reference datatype */ + dataset = H5Dcreate2(fid1, "Dataset4", H5T_STD_REF_DSETREG, sid3, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + /* Select 6x6 hyperslab for first reference */ + start[0] = 2; + start[1] = 2; + stride[0] = 1; + stride[1] = 1; + count[0] = 1; + count[1] = 1; + block[0] = 6; + block[1] = 6; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create first dataset region */ + ret = H5Rcreate(&wbuf_reg[0], fid1, "/Group1/Dataset1", H5R_DATASET_REGION, sid2); + CHECK(ret, FAIL, "H5Rcreate"); + + /* Select sequence of ten points for second reference */ + coord1[0][0] = 6; + coord1[0][1] = 9; + coord1[1][0] = 2; + coord1[1][1] = 2; + coord1[2][0] = 8; + coord1[2][1] = 4; + coord1[3][0] = 1; + coord1[3][1] = 6; + coord1[4][0] = 2; + coord1[4][1] = 8; + coord1[5][0] = 3; + coord1[5][1] = 2; + coord1[6][0] = 0; + coord1[6][1] = 4; + coord1[7][0] = 9; + coord1[7][1] = 0; + coord1[8][0] = 7; + coord1[8][1] = 1; + coord1[9][0] = 3; + coord1[9][1] = 3; + ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Create second dataset region */ + ret = H5Rcreate(&wbuf_reg[1], fid1, "/Group1/Dataset2", H5R_DATASET_REGION, sid2); + CHECK(ret, FAIL, "H5Rcreate"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_STD_REF_DSETREG, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf_reg); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close disk dataspaces */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(sid3); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open the file */ + fid1 = H5Fopen(FILE_REF_COMPAT, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid1, H5I_INVALID_HID, "H5Fopen"); + + /* Open the object reference dataset */ + dataset = H5Dopen2(fid1, "/Dataset3", H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dopen2"); + + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf_obj); + CHECK(ret, FAIL, "H5Dread"); + + /* Verify type of objects pointed at */ + ret = H5Rget_obj_type3(&rbuf_obj[0], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); + + ret = H5Rget_obj_type3(&rbuf_obj[1], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); + + ret = H5Rget_obj_type3(&rbuf_obj[2], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_GROUP, "H5Rget_obj_type3"); + + ret = H5Rget_obj_type3(&rbuf_obj[3], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_NAMED_DATATYPE, "H5Rget_obj_type3"); + + /* Make sure the referenced objects can be opened */ + dset2 = H5Ropen_object(&rbuf_obj[0], H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object"); + + ret = H5Dclose(dset2); + CHECK(ret, FAIL, "H5Dclose"); + + dset2 = H5Ropen_object(&rbuf_obj[1], H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object"); + + ret = H5Dclose(dset2); + CHECK(ret, FAIL, "H5Dclose"); + + group2 = H5Ropen_object(&rbuf_obj[2], H5P_DEFAULT, H5P_DEFAULT); + CHECK(group2, H5I_INVALID_HID, "H5Ropen_object"); + + ret = H5Gclose(group2); + CHECK(ret, FAIL, "H5Gclose"); + + tid2 = H5Ropen_object(&rbuf_obj[3], H5P_DEFAULT, H5P_DEFAULT); + CHECK(tid2, H5I_INVALID_HID, "H5Ropen_object"); + + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Open the dataset region reference dataset */ + dataset = H5Dopen2(fid1, "/Dataset4", H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dopen2"); + + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf_reg); + CHECK(ret, FAIL, "H5Dread"); + + /* Verify type of objects pointed at */ + ret = H5Rget_obj_type3(&rbuf_reg[0], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); + + ret = H5Rget_obj_type3(&rbuf_reg[1], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); + + /* Make sure the referenced objects can be opened */ + dset2 = H5Ropen_object(&rbuf_reg[0], H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object"); + + ret = H5Dclose(dset2); + CHECK(ret, FAIL, "H5Dclose"); + + dset2 = H5Ropen_object(&rbuf_reg[1], H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object"); + + ret = H5Dclose(dset2); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Destroy references */ + for (i = 0; i < dims1[0]; i++) { + ret = H5Rdestroy(&rbuf_obj[i]); + CHECK(ret, FAIL, "H5Rdestroy"); + } + for (i = 0; i < dims3[0]; i++) { + ret = H5Rdestroy(&rbuf_reg[i]); + CHECK(ret, FAIL, "H5Rdestroy"); + } + + /* Free memory buffers */ + HDfree(wbuf_obj); + HDfree(rbuf_obj); + HDfree(wbuf_reg); + HDfree(rbuf_reg); +} /* test_reference_compat() */ +#endif + +/**************************************************************** +** +** test_reference_perf(): Test basic H5R (reference) object reference +** performance. +** +****************************************************************/ +static void +test_reference_perf(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset, /* Dataset ID */ + dset2; /* Dereferenced dataset ID */ + hid_t group; /* Group ID */ + hid_t sid1; /* Dataspace ID */ + hid_t tid1; /* Datatype ID */ + hsize_t dims1[] = {1}; + hid_t dapl_id; /* Dataset access property list */ + H5R_ref_t *wbuf, /* buffer to write to disk */ + *rbuf, /* buffer read from disk */ + *tbuf; /* temp. buffer read from disk */ + H5R_ref_t *wbuf_reg, /* buffer to write to disk */ + *rbuf_reg; /* buffer read from disk */ + hobj_ref_t *wbuf_deprec, /* deprecated references */ + *rbuf_deprec; /* deprecated references */ + hdset_reg_ref_t *wbuf_reg_deprec, /* deprecated references*/ + *rbuf_reg_deprec; /* deprecated references*/ + unsigned *ibuf, *obuf; + unsigned i, j; /* Counters */ + H5O_type_t obj_type; /* Object type */ + herr_t ret; /* Generic return value */ + double t1, t2, t; /* Timers */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Object Reference Performance\n")); + + /* Allocate write & read buffers */ + wbuf = HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1); + obuf = HDcalloc(sizeof(unsigned), SPACE1_DIM1); + ibuf = HDcalloc(sizeof(unsigned), SPACE1_DIM1); + wbuf_deprec = (hobj_ref_t *)HDcalloc(sizeof(hobj_ref_t), SPACE1_DIM1); + rbuf = (H5R_ref_t *)HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1); + rbuf_deprec = (hobj_ref_t *)HDcalloc(sizeof(hobj_ref_t), SPACE1_DIM1); + tbuf = (H5R_ref_t *)HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1); + wbuf_reg = (H5R_ref_t *)HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1); + rbuf_reg = (H5R_ref_t *)HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1); + wbuf_reg_deprec = (hdset_reg_ref_t *)HDcalloc(sizeof(hdset_reg_ref_t), SPACE1_DIM1); + rbuf_reg_deprec = (hdset_reg_ref_t *)HDcalloc(sizeof(hdset_reg_ref_t), SPACE1_DIM1); + + for (i = 0; i < SPACE1_DIM1; i++) + obuf[i] = i * 3; + + /* Create file */ + fid1 = H5Fcreate(FILE_REF_OBJ, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, H5I_INVALID_HID, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple"); + + /* Create dataset access property list */ + dapl_id = H5Pcreate(H5P_DATASET_ACCESS); + CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate"); + + /* Create a group */ + group = H5Gcreate2(fid1, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(group, H5I_INVALID_HID, "H5Gcreate2"); + + /* Create a dataset (inside Group1) */ + dataset = H5Dcreate2(group, "Dataset1", H5T_NATIVE_UINT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, obuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Create another dataset (inside Group1) */ + dataset = H5Dcreate2(group, "Dataset2", H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Create a datatype to refer to */ + tid1 = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); + CHECK(tid1, H5I_INVALID_HID, "H5Tcreate"); + + /* Insert fields */ + ret = H5Tinsert(tid1, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(tid1, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(tid1, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Save datatype for later */ + ret = H5Tcommit2(group, "Datatype1", tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close group */ + ret = H5Gclose(group); + CHECK(ret, FAIL, "H5Gclose"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset3", H5T_STD_REF, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + t = 0; + for (i = 0; i < MAX_ITER_CREATE; i++) { + t1 = H5_get_time(); + ret = H5Rcreate_object(fid1, "/Group1/Dataset1", H5P_DEFAULT, &wbuf[0]); + CHECK(ret, FAIL, "H5Rcreate_object"); + t2 = H5_get_time(); + t += t2 - t1; + ret = H5Rdestroy(&wbuf[0]); + CHECK(ret, FAIL, "H5Rdestroy"); + } + if (VERBOSE_MED) + HDprintf("--- Object reference create time: %lfs\n", t / MAX_ITER_CREATE); + + /* Create reference to dataset */ + ret = H5Rcreate_object(fid1, "/Group1/Dataset1", H5P_DEFAULT, &wbuf[0]); + CHECK(ret, FAIL, "H5Rcreate_object"); + ret = H5Rget_obj_type3(&wbuf[0], H5P_DEFAULT, &obj_type); + CHECK(ret, FAIL, "H5Rget_obj_type3"); + VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); + + t = 0; + for (i = 0; i < MAX_ITER_WRITE; i++) { + t1 = H5_get_time(); + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + t2 = H5_get_time(); + t += t2 - t1; + } + if (VERBOSE_MED) + HDprintf("--- Object reference write time: %lfs\n", t / MAX_ITER_WRITE); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); +#if 0 + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset4", H5T_STD_REF_OBJ, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + t = 0; + for (i = 0; i < MAX_ITER_CREATE; i++) { + t1 = H5_get_time(); + ret = H5Rcreate(&wbuf_deprec[0], fid1, "/Group1/Dataset1", H5R_OBJECT1, H5I_INVALID_HID); + CHECK(ret, FAIL, "H5Rcreate"); + t2 = H5_get_time(); + t += t2 - t1; + } + if (VERBOSE_MED) + HDprintf("--- Deprecated object reference create time: %lfs\n", t / MAX_ITER_CREATE); + + /* Create reference to dataset */ + ret = H5Rcreate(&wbuf_deprec[0], fid1, "/Group1/Dataset1", H5R_OBJECT1, H5I_INVALID_HID); + CHECK(ret, FAIL, "H5Rcreate"); + + t = 0; + for (i = 0; i < MAX_ITER_WRITE; i++) { + t1 = H5_get_time(); + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf_deprec); + CHECK(ret, FAIL, "H5Dwrite"); + t2 = H5_get_time(); + t += t2 - t1; + } + if (VERBOSE_MED) + HDprintf("--- Deprecated object reference write time: %lfs\n", t / MAX_ITER_WRITE); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); +#endif + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset5", H5T_STD_REF, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + t = 0; + for (i = 0; i < MAX_ITER_CREATE; i++) { + t1 = H5_get_time(); + /* Store first dataset region */ + ret = H5Rcreate_region(fid1, "/Group1/Dataset1", sid1, H5P_DEFAULT, &wbuf_reg[0]); + CHECK(ret, FAIL, "H5Rcreate_region"); + t2 = H5_get_time(); + t += t2 - t1; + ret = H5Rdestroy(&wbuf_reg[0]); + CHECK(ret, FAIL, "H5Rdestroy"); + } + if (VERBOSE_MED) + HDprintf("--- Region reference create time: %lfs\n", t / MAX_ITER_CREATE); + + /* Store first dataset region */ + ret = H5Rcreate_region(fid1, "/Group1/Dataset1", sid1, H5P_DEFAULT, &wbuf_reg[0]); + CHECK(ret, FAIL, "H5Rcreate_region"); + + t = 0; + for (i = 0; i < MAX_ITER_WRITE; i++) { + t1 = H5_get_time(); + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf_reg); + CHECK(ret, FAIL, "H5Dwrite"); + t2 = H5_get_time(); + t += t2 - t1; + } + if (VERBOSE_MED) + HDprintf("--- Region reference write time: %lfs\n", t / MAX_ITER_WRITE); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); +#if 0 + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset6", H5T_STD_REF_DSETREG, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + t = 0; + for (i = 0; i < MAX_ITER_CREATE; i++) { + t1 = H5_get_time(); + /* Store first dataset region */ + ret = H5Rcreate(&wbuf_reg_deprec[0], fid1, "/Group1/Dataset1", H5R_DATASET_REGION1, sid1); + CHECK(ret, FAIL, "H5Rcreate"); + t2 = H5_get_time(); + t += t2 - t1; + } + if (VERBOSE_MED) + HDprintf("--- Deprecated region reference create time: %lfs\n", t / MAX_ITER_CREATE); + + t = 0; + for (i = 0; i < MAX_ITER_WRITE; i++) { + t1 = H5_get_time(); + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_STD_REF_DSETREG, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf_reg_deprec); + CHECK(ret, FAIL, "H5Dwrite"); + t2 = H5_get_time(); + t += t2 - t1; + } + if (VERBOSE_MED) + HDprintf("--- Deprecated region reference write time: %lfs\n", t / MAX_ITER_WRITE); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); +#endif + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open the file */ + fid1 = H5Fopen(FILE_REF_OBJ, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid1, H5I_INVALID_HID, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid1, "/Dataset3", H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dopen2"); + + t = 0; + for (i = 0; i < MAX_ITER_READ; i++) { + t1 = H5_get_time(); + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf); + CHECK(ret, FAIL, "H5Dread"); + t2 = H5_get_time(); + t += t2 - t1; + ret = H5Rdestroy(&rbuf[0]); + CHECK(ret, FAIL, "H5Rdestroy"); + } + if (VERBOSE_MED) + HDprintf("--- Object reference read time: %lfs\n", t / MAX_ITER_READ); + + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Open dataset object */ + dset2 = H5Ropen_object(&rbuf[0], H5P_DEFAULT, dapl_id); + CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object"); + + /* Check information in referenced dataset */ + sid1 = H5Dget_space(dset2); + CHECK(sid1, H5I_INVALID_HID, "H5Dget_space"); + + ret = (int)H5Sget_simple_extent_npoints(sid1); + VERIFY(ret, dims1[0], "H5Sget_simple_extent_npoints"); + + /* Read from disk */ + ret = H5Dread(dset2, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, ibuf); + CHECK(ret, FAIL, "H5Dread"); + + for (i = 0; i < dims1[0]; i++) + VERIFY(ibuf[i], i * 3, "Data"); + + /* Close dereferenced Dataset */ + ret = H5Dclose(dset2); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); +#if 0 + /* Open the dataset */ + dataset = H5Dopen2(fid1, "/Dataset4", H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dopen2"); + + t = 0; + for (i = 0; i < MAX_ITER_READ; i++) { + t1 = H5_get_time(); + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf_deprec); + CHECK(ret, FAIL, "H5Dread"); + t2 = H5_get_time(); + t += t2 - t1; + } + if (VERBOSE_MED) + HDprintf("--- Deprecated object reference read time: %lfs\n", t / MAX_ITER_READ); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); +#endif + /* Open the dataset */ + dataset = H5Dopen2(fid1, "/Dataset5", H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dopen2"); + + t = 0; + for (i = 0; i < MAX_ITER_READ; i++) { + t1 = H5_get_time(); + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf_reg); + CHECK(ret, FAIL, "H5Dread"); + t2 = H5_get_time(); + t += t2 - t1; + ret = H5Rdestroy(&rbuf_reg[0]); + CHECK(ret, FAIL, "H5Rdestroy"); + } + if (VERBOSE_MED) + HDprintf("--- Region reference read time: %lfs\n", t / MAX_ITER_READ); + + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf_reg); + CHECK(ret, FAIL, "H5Dread"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); +#if 0 + /* Open the dataset */ + dataset = H5Dopen2(fid1, "/Dataset6", H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dopen2"); + + t = 0; + for (i = 0; i < MAX_ITER_READ; i++) { + t1 = H5_get_time(); + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_STD_REF_DSETREG, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf_reg_deprec); + CHECK(ret, FAIL, "H5Dread"); + t2 = H5_get_time(); + t += t2 - t1; + } + if (VERBOSE_MED) + HDprintf("--- Deprecated region reference read time: %lfs\n", t / MAX_ITER_READ); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); +#endif + /* Close dataset access property list */ + ret = H5Pclose(dapl_id); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Destroy references */ + for (j = 0; j < dims1[0]; j++) { + ret = H5Rdestroy(&wbuf[j]); + CHECK(ret, FAIL, "H5Rdestroy"); + ret = H5Rdestroy(&wbuf_reg[j]); + CHECK(ret, FAIL, "H5Rdestroy"); + ret = H5Rdestroy(&rbuf[j]); + CHECK(ret, FAIL, "H5Rdestroy"); + ret = H5Rdestroy(&rbuf_reg[j]); + CHECK(ret, FAIL, "H5Rdestroy"); + } + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); + HDfree(wbuf_reg); + HDfree(rbuf_reg); + HDfree(wbuf_deprec); + HDfree(rbuf_deprec); + HDfree(wbuf_reg_deprec); + HDfree(rbuf_reg_deprec); + HDfree(tbuf); + HDfree(ibuf); + HDfree(obuf); +} /* test_reference_perf() */ + +/**************************************************************** +** +** test_reference(): Main H5R reference testing routine. +** +****************************************************************/ +void +test_reference(void) +{ + H5F_libver_t low, high; /* Low and high bounds */ + const char *env_h5_drvr; /* File Driver value from environment */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing References\n")); + + /* Get the VFD to use */ + env_h5_drvr = HDgetenv(HDF5_DRIVER); + if (env_h5_drvr == NULL) + env_h5_drvr = "nomatch"; + + test_reference_params(); /* Test for correct parameter checking */ + test_reference_obj(); /* Test basic H5R object reference code */ + test_reference_vlen_obj(); /* Test reference within vlen */ + test_reference_cmpnd_obj(); /* Test reference within compound type */ + + /* Loop through all the combinations of low/high version bounds */ + for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) { + for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) { + + /* Invalid combinations, just continue */ + if (high == H5F_LIBVER_EARLIEST || high < low) + continue; + + test_reference_region(low, high); /* Test basic H5R dataset region reference code */ + test_reference_region_1D(low, high); /* Test H5R dataset region reference code for 1-D datasets */ + + } /* end high bound */ + } /* end low bound */ + + /* The following test is currently broken with the Direct VFD */ + if (HDstrcmp(env_h5_drvr, "direct") != 0) { + test_reference_obj_deleted(); /* Test H5R object reference code for deleted objects */ + } + + test_reference_group(); /* Test operations on dereferenced groups */ + test_reference_attr(); /* Test attribute references */ + test_reference_external(); /* Test external references */ +#if 0 + test_reference_compat_conv(); /* Test operations with old types */ +#endif + + test_reference_perf(); + +} /* test_reference() */ + +/*------------------------------------------------------------------------- + * Function: cleanup_reference + * + * Purpose: Cleanup temporary test files + * + * Return: none + * + * Programmer: Quincey Koziol + * September 8, 1998 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +void +cleanup_reference(void) +{ + H5Fdelete(FILE_REF_PARAM, H5P_DEFAULT); + H5Fdelete(FILE_REF_OBJ, H5P_DEFAULT); + H5Fdelete(FILE_REF_VL_OBJ, H5P_DEFAULT); + H5Fdelete(FILE_REF_CMPND_OBJ, H5P_DEFAULT); + H5Fdelete(FILE_REF_REG, H5P_DEFAULT); + H5Fdelete(FILE_REF_REG_1D, H5P_DEFAULT); + H5Fdelete(FILE_REF_OBJ_DEL, H5P_DEFAULT); + H5Fdelete(FILE_REF_GRP, H5P_DEFAULT); + H5Fdelete(FILE_REF_ATTR, H5P_DEFAULT); + H5Fdelete(FILE_REF_EXT1, H5P_DEFAULT); + H5Fdelete(FILE_REF_EXT2, H5P_DEFAULT); + H5Fdelete(FILE_REF_COMPAT, H5P_DEFAULT); +} diff --git a/test/API/tselect.c b/test/API/tselect.c new file mode 100644 index 00000000000..a2f377d29c3 --- /dev/null +++ b/test/API/tselect.c @@ -0,0 +1,16314 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*********************************************************** + * + * Test program: tselect + * + * Test the Dataspace selection functionality + * + *************************************************************/ + +#define H5S_FRIEND /*suppress error about including H5Spkg */ + +/* Define this macro to indicate that the testing APIs should be available */ +#define H5S_TESTING + +#include "testhdf5.h" +#include "hdf5.h" +/* #include "H5Spkg.h" */ /* Dataspaces */ + +#define FILENAME "tselect.h5" + +/* 3-D dataset with fixed dimensions */ +#define SPACE1_NAME "Space1" +#define SPACE1_RANK 3 +#define SPACE1_DIM1 3 +#define SPACE1_DIM2 15 +#define SPACE1_DIM3 13 + +/* 2-D dataset with fixed dimensions */ +#define SPACE2_NAME "Space2" +#define SPACE2_RANK 2 +#define SPACE2_DIM1 30 +#define SPACE2_DIM2 26 +#define SPACE2A_RANK 1 +#define SPACE2A_DIM1 (SPACE2_DIM1 * SPACE2_DIM2) + +/* 2-D dataset with fixed dimensions */ +#define SPACE3_NAME "Space3" +#define SPACE3_RANK 2 +#define SPACE3_DIM1 15 +#define SPACE3_DIM2 26 + +/* 3-D dataset with fixed dimensions */ +#define SPACE4_NAME "Space4" +#define SPACE4_RANK 3 +#define SPACE4_DIM1 11 +#define SPACE4_DIM2 13 +#define SPACE4_DIM3 17 + +/* Number of random hyperslabs to test */ +#define NHYPERSLABS 10 + +/* Number of random hyperslab tests performed */ +#define NRAND_HYPER 100 + +/* 5-D dataset with fixed dimensions */ +#define SPACE5_NAME "Space5" +#define SPACE5_RANK 5 +#define SPACE5_DIM1 10 +#define SPACE5_DIM2 10 +#define SPACE5_DIM3 10 +#define SPACE5_DIM4 10 +#define SPACE5_DIM5 10 + +/* 1-D dataset with same size as 5-D dataset */ +#define SPACE6_RANK 1 +#define SPACE6_DIM1 (SPACE5_DIM1 * SPACE5_DIM2 * SPACE5_DIM3 * SPACE5_DIM4 * SPACE5_DIM5) + +/* 2-D dataset with easy dimension sizes */ +#define SPACE7_NAME "Space7" +#define SPACE7_RANK 2 +#define SPACE7_DIM1 10 +#define SPACE7_DIM2 10 +#define SPACE7_FILL 254 +#define SPACE7_CHUNK_DIM1 5 +#define SPACE7_CHUNK_DIM2 5 +#define SPACE7_NPOINTS 8 + +/* 4-D dataset with fixed dimensions */ +#define SPACE8_NAME "Space8" +#define SPACE8_RANK 4 +#define SPACE8_DIM1 11 +#define SPACE8_DIM2 13 +#define SPACE8_DIM3 17 +#define SPACE8_DIM4 19 + +/* Another 2-D dataset with easy dimension sizes */ +#define SPACE9_RANK 2 +#define SPACE9_DIM1 12 +#define SPACE9_DIM2 12 + +/* Element selection information */ +#define POINT1_NPOINTS 10 + +/* Chunked dataset information */ +#define DATASETNAME "ChunkArray" +#define NX_SUB 87 /* hyperslab dimensions */ +#define NY_SUB 61 +#define NZ_SUB 181 +#define NX 87 /* output buffer dimensions */ +#define NY 61 +#define NZ 181 +#define RANK_F 3 /* File dataspace rank */ +#define RANK_M 3 /* Memory dataspace rank */ +#define X 87 /* dataset dimensions */ +#define Y 61 +#define Z 181 +#define CHUNK_X 87 /* chunk dimensions */ +#define CHUNK_Y 61 +#define CHUNK_Z 181 + +/* Basic chunk size */ +#define SPACE10_DIM1 180 +#define SPACE10_CHUNK_SIZE 12 + +/* Information for bounds checking test */ +#define SPACE11_RANK 2 +#define SPACE11_DIM1 100 +#define SPACE11_DIM2 100 +#define SPACE11_NPOINTS 4 + +/* Information for offsets w/chunks test #2 */ +#define SPACE12_RANK 1 +#define SPACE12_DIM0 25 +#define SPACE12_CHUNK_DIM0 5 + +/* Information for Space rebuild test */ +#define SPACERE1_RANK 1 +#define SPACERE1_DIM0 20 +#define SPACERE2_RANK 2 +#define SPACERE2_DIM0 8 +#define SPACERE2_DIM1 12 +#define SPACERE3_RANK 3 +#define SPACERE3_DIM0 8 +#define SPACERE3_DIM1 12 +#define SPACERE3_DIM2 8 +#define SPACERE4_RANK 4 +#define SPACERE4_DIM0 8 +#define SPACERE4_DIM1 12 +#define SPACERE4_DIM2 8 +#define SPACERE4_DIM3 12 +#define SPACERE5_RANK 5 +#define SPACERE5_DIM0 8 +#define SPACERE5_DIM1 12 +#define SPACERE5_DIM2 8 +#define SPACERE5_DIM3 12 +#define SPACERE5_DIM4 8 + +/* Information for Space update diminfo test */ +#define SPACEUD1_DIM0 20 +#define SPACEUD3_DIM0 9 +#define SPACEUD3_DIM1 12 +#define SPACEUD3_DIM2 13 + +/* #defines for shape same / different rank tests */ +#define SS_DR_MAX_RANK 5 + +/* Information for regular hyperslab query test */ +#define SPACE13_RANK 3 +#define SPACE13_DIM1 50 +#define SPACE13_DIM2 50 +#define SPACE13_DIM3 50 +#define SPACE13_NPOINTS 4 + +/* Information for testing selection iterators */ +#define SEL_ITER_MAX_SEQ 256 + +/* Defines for test_hyper_io_1d() */ +#define DNAME "DSET_1D" +#define RANK 1 +#define NUMCHUNKS 3 +#define CHUNKSZ 20 +#define NUM_ELEMENTS NUMCHUNKS *CHUNKSZ + +/* Location comparison function */ +static int compare_size_t(const void *s1, const void *s2); + +static herr_t test_select_hyper_iter1(void *elem, hid_t type_id, unsigned ndim, const hsize_t *point, + void *operator_data); +static herr_t test_select_point_iter1(void *elem, hid_t type_id, unsigned ndim, const hsize_t *point, + void *operator_data); +static herr_t test_select_all_iter1(void *elem, hid_t type_id, unsigned ndim, const hsize_t *point, + void *operator_data); +static herr_t test_select_none_iter1(void *elem, hid_t type_id, unsigned ndim, const hsize_t *point, + void *operator_data); +static herr_t test_select_hyper_iter2(void *_elem, hid_t type_id, unsigned ndim, const hsize_t *point, + void *_operator_data); +static herr_t test_select_hyper_iter3(void *elem, hid_t type_id, unsigned ndim, const hsize_t *point, + void *operator_data); + +/**************************************************************** +** +** test_select_hyper_iter1(): Iterator for checking hyperslab iteration +** +****************************************************************/ +static herr_t +test_select_hyper_iter1(void *_elem, hid_t H5_ATTR_UNUSED type_id, unsigned H5_ATTR_UNUSED ndim, + const hsize_t H5_ATTR_UNUSED *point, void *_operator_data) +{ + uint8_t *tbuf = (uint8_t *)_elem, /* temporary buffer pointer */ + **tbuf2 = (uint8_t **)_operator_data; /* temporary buffer handle */ + + if (*tbuf != **tbuf2) + return (-1); + else { + (*tbuf2)++; + return (0); + } +} /* end test_select_hyper_iter1() */ + +/**************************************************************** +** +** test_select_hyper(): Test basic H5S (dataspace) selection code. +** Tests hyperslabs of various sizes and dimensionalities. +** +****************************************************************/ +static void +test_select_hyper(hid_t xfer_plist) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1, sid2; /* Dataspace ID */ + hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; + hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2}; + hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2}; + hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */ + hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */ + hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */ + hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */ + uint8_t *wbuf, /* buffer to write to disk */ + *rbuf, /* buffer read from disk */ + *tbuf; /* temporary buffer pointer */ + int i, j; /* Counters */ + herr_t ret; /* Generic return value */ + H5S_class_t ext_type; /* Extent type */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Hyperslab Selection Functions\n")); + + /* Allocate write & read buffers */ + wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2); + CHECK_PTR(wbuf, "HDmalloc"); + rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2)); + CHECK_PTR(rbuf, "HDcalloc"); + + /* Initialize write buffer */ + for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) + for (j = 0; j < SPACE2_DIM2; j++) + *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create dataspace for writing buffer */ + sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Verify extent type */ + ext_type = H5Sget_simple_extent_type(sid1); + VERIFY(ext_type, H5S_SIMPLE, "H5Sget_simple_extent_type"); + + /* Test selecting stride==0 to verify failure */ + start[0] = 1; + start[1] = 0; + start[2] = 0; + stride[0] = 0; + stride[1] = 0; + stride[2] = 0; + count[0] = 2; + count[1] = 15; + count[2] = 13; + block[0] = 1; + block[1] = 1; + block[2] = 1; + H5E_BEGIN_TRY + { + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Sselect_hyperslab"); + + /* Test selecting stridebuf + (pnt_info->coord[pnt_info->offset][0] * SPACE2_DIM2) + + pnt_info->coord[pnt_info->offset][1]; + if (*elem != *tmp) + return (-1); + else { + pnt_info->offset++; + return (0); + } +} /* end test_select_point_iter1() */ + +/**************************************************************** +** +** test_select_point(): Test basic H5S (dataspace) selection code. +** Tests element selections between dataspaces of various sizes +** and dimensionalities. +** +****************************************************************/ +static void +test_select_point(hid_t xfer_plist) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1, sid2; /* Dataspace ID */ + hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; + hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2}; + hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2}; + hsize_t coord1[POINT1_NPOINTS][SPACE1_RANK]; /* Coordinates for point selection */ + hsize_t temp_coord1[POINT1_NPOINTS][SPACE1_RANK]; /* Coordinates for point selection */ + hsize_t coord2[POINT1_NPOINTS][SPACE2_RANK]; /* Coordinates for point selection */ + hsize_t temp_coord2[POINT1_NPOINTS][SPACE2_RANK]; /* Coordinates for point selection */ + hsize_t coord3[POINT1_NPOINTS][SPACE3_RANK]; /* Coordinates for point selection */ + hsize_t temp_coord3[POINT1_NPOINTS][SPACE3_RANK]; /* Coordinates for point selection */ + uint8_t *wbuf, /* buffer to write to disk */ + *rbuf, /* buffer read from disk */ + *tbuf; /* temporary buffer pointer */ + int i, j; /* Counters */ + struct pnt_iter pi; /* Custom Pointer iterator struct */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Element Selection Functions\n")); + + /* Allocate write & read buffers */ + wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2); + CHECK_PTR(wbuf, "HDmalloc"); + rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2)); + CHECK_PTR(rbuf, "HDcalloc"); + + /* Initialize write buffer */ + for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) + for (j = 0; j < SPACE2_DIM2; j++) + *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create dataspace for write buffer */ + sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select sequence of ten points for disk dataset */ + coord1[0][0] = 0; + coord1[0][1] = 10; + coord1[0][2] = 5; + coord1[1][0] = 1; + coord1[1][1] = 2; + coord1[1][2] = 7; + coord1[2][0] = 2; + coord1[2][1] = 4; + coord1[2][2] = 9; + coord1[3][0] = 0; + coord1[3][1] = 6; + coord1[3][2] = 11; + coord1[4][0] = 1; + coord1[4][1] = 8; + coord1[4][2] = 13; + coord1[5][0] = 2; + coord1[5][1] = 12; + coord1[5][2] = 0; + coord1[6][0] = 0; + coord1[6][1] = 14; + coord1[6][2] = 2; + coord1[7][0] = 1; + coord1[7][1] = 0; + coord1[7][2] = 4; + coord1[8][0] = 2; + coord1[8][1] = 1; + coord1[8][2] = 6; + coord1[9][0] = 0; + coord1[9][1] = 3; + coord1[9][2] = 8; + ret = H5Sselect_elements(sid1, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Verify correct elements selected */ + H5Sget_select_elem_pointlist(sid1, (hsize_t)0, (hsize_t)POINT1_NPOINTS, (hsize_t *)temp_coord1); + for (i = 0; i < POINT1_NPOINTS; i++) { + VERIFY(temp_coord1[i][0], coord1[i][0], "H5Sget_select_elem_pointlist"); + VERIFY(temp_coord1[i][1], coord1[i][1], "H5Sget_select_elem_pointlist"); + VERIFY(temp_coord1[i][2], coord1[i][2], "H5Sget_select_elem_pointlist"); + } /* end for */ + + ret = (int)H5Sget_select_npoints(sid1); + VERIFY(ret, 10, "H5Sget_select_npoints"); + + /* Append another sequence of ten points to disk dataset */ + coord1[0][0] = 0; + coord1[0][1] = 2; + coord1[0][2] = 0; + coord1[1][0] = 1; + coord1[1][1] = 10; + coord1[1][2] = 8; + coord1[2][0] = 2; + coord1[2][1] = 8; + coord1[2][2] = 10; + coord1[3][0] = 0; + coord1[3][1] = 7; + coord1[3][2] = 12; + coord1[4][0] = 1; + coord1[4][1] = 3; + coord1[4][2] = 11; + coord1[5][0] = 2; + coord1[5][1] = 1; + coord1[5][2] = 1; + coord1[6][0] = 0; + coord1[6][1] = 13; + coord1[6][2] = 7; + coord1[7][0] = 1; + coord1[7][1] = 14; + coord1[7][2] = 6; + coord1[8][0] = 2; + coord1[8][1] = 2; + coord1[8][2] = 5; + coord1[9][0] = 0; + coord1[9][1] = 6; + coord1[9][2] = 13; + ret = H5Sselect_elements(sid1, H5S_SELECT_APPEND, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Verify correct elements selected */ + H5Sget_select_elem_pointlist(sid1, (hsize_t)POINT1_NPOINTS, (hsize_t)POINT1_NPOINTS, + (hsize_t *)temp_coord1); + for (i = 0; i < POINT1_NPOINTS; i++) { + VERIFY(temp_coord1[i][0], coord1[i][0], "H5Sget_select_elem_pointlist"); + VERIFY(temp_coord1[i][1], coord1[i][1], "H5Sget_select_elem_pointlist"); + VERIFY(temp_coord1[i][2], coord1[i][2], "H5Sget_select_elem_pointlist"); + } /* end for */ + + ret = (int)H5Sget_select_npoints(sid1); + VERIFY(ret, 20, "H5Sget_select_npoints"); + + /* Select sequence of ten points for memory dataset */ + coord2[0][0] = 12; + coord2[0][1] = 3; + coord2[1][0] = 15; + coord2[1][1] = 13; + coord2[2][0] = 7; + coord2[2][1] = 25; + coord2[3][0] = 0; + coord2[3][1] = 6; + coord2[4][0] = 13; + coord2[4][1] = 0; + coord2[5][0] = 24; + coord2[5][1] = 11; + coord2[6][0] = 12; + coord2[6][1] = 21; + coord2[7][0] = 29; + coord2[7][1] = 4; + coord2[8][0] = 8; + coord2[8][1] = 8; + coord2[9][0] = 19; + coord2[9][1] = 17; + ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord2); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Verify correct elements selected */ + H5Sget_select_elem_pointlist(sid2, (hsize_t)0, (hsize_t)POINT1_NPOINTS, (hsize_t *)temp_coord2); + for (i = 0; i < POINT1_NPOINTS; i++) { + VERIFY(temp_coord2[i][0], coord2[i][0], "H5Sget_select_elem_pointlist"); + VERIFY(temp_coord2[i][1], coord2[i][1], "H5Sget_select_elem_pointlist"); + } /* end for */ + + /* Save points for later iteration */ + /* (these are in the second half of the buffer, because we are prepending */ + /* the next list of points to the beginning of the point selection list) */ + HDmemcpy(((char *)pi.coord) + sizeof(coord2), coord2, sizeof(coord2)); + + ret = (int)H5Sget_select_npoints(sid2); + VERIFY(ret, 10, "H5Sget_select_npoints"); + + /* Append another sequence of ten points to memory dataset */ + coord2[0][0] = 24; + coord2[0][1] = 0; + coord2[1][0] = 2; + coord2[1][1] = 25; + coord2[2][0] = 13; + coord2[2][1] = 17; + coord2[3][0] = 8; + coord2[3][1] = 3; + coord2[4][0] = 29; + coord2[4][1] = 4; + coord2[5][0] = 11; + coord2[5][1] = 14; + coord2[6][0] = 5; + coord2[6][1] = 22; + coord2[7][0] = 12; + coord2[7][1] = 2; + coord2[8][0] = 21; + coord2[8][1] = 12; + coord2[9][0] = 9; + coord2[9][1] = 18; + ret = H5Sselect_elements(sid2, H5S_SELECT_PREPEND, (size_t)POINT1_NPOINTS, (const hsize_t *)coord2); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Verify correct elements selected */ + H5Sget_select_elem_pointlist(sid2, (hsize_t)0, (hsize_t)POINT1_NPOINTS, (hsize_t *)temp_coord2); + for (i = 0; i < POINT1_NPOINTS; i++) { + VERIFY(temp_coord2[i][0], coord2[i][0], "H5Sget_select_elem_pointlist"); + VERIFY(temp_coord2[i][1], coord2[i][1], "H5Sget_select_elem_pointlist"); + } /* end for */ + + ret = (int)H5Sget_select_npoints(sid2); + VERIFY(ret, 20, "H5Sget_select_npoints"); + + /* Save points for later iteration */ + HDmemcpy(pi.coord, coord2, sizeof(coord2)); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, SPACE1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer_plist, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create dataspace for reading buffer */ + sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select sequence of points for read dataset */ + coord3[0][0] = 0; + coord3[0][1] = 2; + coord3[1][0] = 4; + coord3[1][1] = 8; + coord3[2][0] = 13; + coord3[2][1] = 13; + coord3[3][0] = 14; + coord3[3][1] = 20; + coord3[4][0] = 7; + coord3[4][1] = 9; + coord3[5][0] = 2; + coord3[5][1] = 0; + coord3[6][0] = 9; + coord3[6][1] = 19; + coord3[7][0] = 1; + coord3[7][1] = 22; + coord3[8][0] = 12; + coord3[8][1] = 21; + coord3[9][0] = 11; + coord3[9][1] = 6; + ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord3); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Verify correct elements selected */ + H5Sget_select_elem_pointlist(sid2, (hsize_t)0, (hsize_t)POINT1_NPOINTS, (hsize_t *)temp_coord3); + for (i = 0; i < POINT1_NPOINTS; i++) { + VERIFY(temp_coord3[i][0], coord3[i][0], "H5Sget_select_elem_pointlist"); + VERIFY(temp_coord3[i][1], coord3[i][1], "H5Sget_select_elem_pointlist"); + } /* end for */ + + ret = (int)H5Sget_select_npoints(sid2); + VERIFY(ret, 10, "H5Sget_select_npoints"); + + /* Append another sequence of ten points to disk dataset */ + coord3[0][0] = 14; + coord3[0][1] = 25; + coord3[1][0] = 0; + coord3[1][1] = 0; + coord3[2][0] = 11; + coord3[2][1] = 11; + coord3[3][0] = 5; + coord3[3][1] = 14; + coord3[4][0] = 3; + coord3[4][1] = 5; + coord3[5][0] = 2; + coord3[5][1] = 2; + coord3[6][0] = 7; + coord3[6][1] = 13; + coord3[7][0] = 9; + coord3[7][1] = 16; + coord3[8][0] = 12; + coord3[8][1] = 22; + coord3[9][0] = 13; + coord3[9][1] = 9; + ret = H5Sselect_elements(sid2, H5S_SELECT_APPEND, (size_t)POINT1_NPOINTS, (const hsize_t *)coord3); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Verify correct elements selected */ + H5Sget_select_elem_pointlist(sid2, (hsize_t)POINT1_NPOINTS, (hsize_t)POINT1_NPOINTS, + (hsize_t *)temp_coord3); + for (i = 0; i < POINT1_NPOINTS; i++) { + VERIFY(temp_coord3[i][0], coord3[i][0], "H5Sget_select_elem_pointlist"); + VERIFY(temp_coord3[i][1], coord3[i][1], "H5Sget_select_elem_pointlist"); + } /* end for */ + ret = (int)H5Sget_select_npoints(sid2); + VERIFY(ret, 20, "H5Sget_select_npoints"); + + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer_plist, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Check that the values match with a dataset iterator */ + pi.buf = wbuf; + pi.offset = 0; + ret = H5Diterate(rbuf, H5T_NATIVE_UCHAR, sid2, test_select_point_iter1, &pi); + CHECK(ret, FAIL, "H5Diterate"); + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); +} /* test_select_point() */ + +/**************************************************************** +** +** test_select_all_iter1(): Iterator for checking all iteration +** +** +****************************************************************/ +static herr_t +test_select_all_iter1(void *_elem, hid_t H5_ATTR_UNUSED type_id, unsigned H5_ATTR_UNUSED ndim, + const hsize_t H5_ATTR_UNUSED *point, void *_operator_data) +{ + uint8_t *tbuf = (uint8_t *)_elem, /* temporary buffer pointer */ + **tbuf2 = (uint8_t **)_operator_data; /* temporary buffer handle */ + + if (*tbuf != **tbuf2) + return (-1); + else { + (*tbuf2)++; + return (0); + } +} /* end test_select_all_iter1() */ + +/**************************************************************** +** +** test_select_none_iter1(): Iterator for checking none iteration +** (This is never supposed to be called, so it always returns -1) +** +****************************************************************/ +static herr_t +test_select_none_iter1(void H5_ATTR_UNUSED *_elem, hid_t H5_ATTR_UNUSED type_id, unsigned H5_ATTR_UNUSED ndim, + const hsize_t H5_ATTR_UNUSED *point, void H5_ATTR_UNUSED *_operator_data) +{ + return (-1); +} /* end test_select_none_iter1() */ + +/**************************************************************** +** +** test_select_all(): Test basic H5S (dataspace) selection code. +** Tests "all" selections. +** +****************************************************************/ +static void +test_select_all(hid_t xfer_plist) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1; /* Dataspace ID */ + hsize_t dims1[] = {SPACE4_DIM1, SPACE4_DIM2, SPACE4_DIM3}; + uint8_t *wbuf, /* buffer to write to disk */ + *rbuf, /* buffer read from disk */ + *tbuf; /* temporary buffer pointer */ + int i, j, k; /* Counters */ + herr_t ret; /* Generic return value */ + H5S_class_t ext_type; /* Extent type */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing 'All' Selection Functions\n")); + + /* Allocate write & read buffers */ + wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE4_DIM1 * SPACE4_DIM2 * SPACE4_DIM3); + CHECK_PTR(wbuf, "HDmalloc"); + rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE4_DIM1 * SPACE4_DIM2 * SPACE4_DIM3)); + CHECK_PTR(rbuf, "HDcalloc"); + + /* Initialize write buffer */ + for (i = 0, tbuf = wbuf; i < SPACE4_DIM1; i++) + for (j = 0; j < SPACE4_DIM2; j++) + for (k = 0; k < SPACE4_DIM3; k++) + *tbuf++ = (uint8_t)((((i * SPACE4_DIM2) + j) * SPACE4_DIM3) + k); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset */ + sid1 = H5Screate_simple(SPACE4_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Verify extent type */ + ext_type = H5Sget_simple_extent_type(sid1); + VERIFY(ext_type, H5S_SIMPLE, "H5Sget_simple_extent_type"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, SPACE4_NAME, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, xfer_plist, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, xfer_plist, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Check that the values match with a dataset iterator */ + tbuf = wbuf; + ret = H5Diterate(rbuf, H5T_NATIVE_UCHAR, sid1, test_select_all_iter1, &tbuf); + CHECK(ret, FAIL, "H5Diterate"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); +} /* test_select_all() */ + +/**************************************************************** +** +** test_select_all_hyper(): Test basic H5S (dataspace) selection code. +** Tests "all" and hyperslab selections. +** +****************************************************************/ +static void +test_select_all_hyper(hid_t xfer_plist) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1, sid2; /* Dataspace ID */ + hsize_t dims1[] = {SPACE3_DIM1, SPACE3_DIM2}; + hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2}; + hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2}; + hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */ + hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */ + hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */ + hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */ + uint8_t *wbuf, /* buffer to write to disk */ + *rbuf, /* buffer read from disk */ + *tbuf; /* temporary buffer pointer */ + int i, j; /* Counters */ + herr_t ret; /* Generic return value */ + H5S_class_t ext_type; /* Extent type */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing 'All' Selection Functions\n")); + + /* Allocate write & read buffers */ + wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2); + CHECK_PTR(wbuf, "HDmalloc"); + rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2)); + CHECK_PTR(rbuf, "HDcalloc"); + + /* Initialize write buffer */ + for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) + for (j = 0; j < SPACE2_DIM2; j++) + *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset */ + sid1 = H5Screate_simple(SPACE3_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create dataspace for writing buffer */ + sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Verify extent type */ + ext_type = H5Sget_simple_extent_type(sid1); + VERIFY(ext_type, H5S_SIMPLE, "H5Sget_simple_extent_type"); + + /* Select entire 15x26 extent for disk dataset */ + ret = H5Sselect_all(sid1); + CHECK(ret, FAIL, "H5Sselect_all"); + + /* Select 15x26 hyperslab for memory dataset */ + start[0] = 15; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 15; + count[1] = 26; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, SPACE3_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer_plist, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create dataspace for reading buffer */ + sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select 15x26 hyperslab for reading memory dataset */ + start[0] = 0; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 15; + count[1] = 26; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Select no extent for disk dataset */ + ret = H5Sselect_none(sid1); + CHECK(ret, FAIL, "H5Sselect_none"); + + /* Read selection from disk (should fail with no selection defined) */ + H5E_BEGIN_TRY + { + ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer_plist, rbuf); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Dread"); + + /* Select entire 15x26 extent for disk dataset */ + ret = H5Sselect_all(sid1); + CHECK(ret, FAIL, "H5Sselect_all"); + + /* Read selection from disk (should work now) */ + ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer_plist, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Check that the values match with a dataset iterator */ + tbuf = wbuf + (15 * SPACE2_DIM2); + ret = H5Diterate(rbuf, H5T_NATIVE_UCHAR, sid2, test_select_all_iter1, &tbuf); + CHECK(ret, FAIL, "H5Diterate"); + + /* A quick check to make certain that iterating through a "none" selection works */ + ret = H5Sselect_none(sid2); + CHECK(ret, FAIL, "H5Sselect_none"); + ret = H5Diterate(rbuf, H5T_NATIVE_UCHAR, sid2, test_select_none_iter1, &tbuf); + CHECK(ret, FAIL, "H5Diterate"); + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); +} /* test_select_all_hyper() */ + +/**************************************************************** +** +** test_select_combo(): Test basic H5S (dataspace) selection code. +** Tests combinations of element and hyperslab selections between +** dataspaces of various sizes and dimensionalities. +** +****************************************************************/ +static void +test_select_combo(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1, sid2; /* Dataspace ID */ + hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; + hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2}; + hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2}; + hsize_t coord1[POINT1_NPOINTS][SPACE1_RANK]; /* Coordinates for point selection */ + hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */ + hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */ + hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */ + hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */ + uint8_t *wbuf, /* buffer to write to disk */ + *rbuf, /* buffer read from disk */ + *tbuf, /* temporary buffer pointer */ + *tbuf2; /* temporary buffer pointer */ + int i, j; /* Counters */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Combination of Hyperslab & Element Selection Functions\n")); + + /* Allocate write & read buffers */ + wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2); + CHECK_PTR(wbuf, "HDmalloc"); + rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2)); + CHECK_PTR(rbuf, "HDcalloc"); + + /* Initialize write buffer */ + for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) + for (j = 0; j < SPACE2_DIM2; j++) + *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create dataspace for write buffer */ + sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select sequence of ten points for disk dataset */ + coord1[0][0] = 0; + coord1[0][1] = 10; + coord1[0][2] = 5; + coord1[1][0] = 1; + coord1[1][1] = 2; + coord1[1][2] = 7; + coord1[2][0] = 2; + coord1[2][1] = 4; + coord1[2][2] = 9; + coord1[3][0] = 0; + coord1[3][1] = 6; + coord1[3][2] = 11; + coord1[4][0] = 1; + coord1[4][1] = 8; + coord1[4][2] = 13; + coord1[5][0] = 2; + coord1[5][1] = 12; + coord1[5][2] = 0; + coord1[6][0] = 0; + coord1[6][1] = 14; + coord1[6][2] = 2; + coord1[7][0] = 1; + coord1[7][1] = 0; + coord1[7][2] = 4; + coord1[8][0] = 2; + coord1[8][1] = 1; + coord1[8][2] = 6; + coord1[9][0] = 0; + coord1[9][1] = 3; + coord1[9][2] = 8; + ret = H5Sselect_elements(sid1, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Select 1x10 hyperslab for writing memory dataset */ + start[0] = 0; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 1; + count[1] = 10; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, SPACE1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create dataspace for reading buffer */ + sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select 10x1 hyperslab for reading memory dataset */ + start[0] = 0; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 10; + count[1] = 1; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read with data written out */ + for (i = 0; i < POINT1_NPOINTS; i++) { + tbuf = wbuf + i; + tbuf2 = rbuf + (i * SPACE3_DIM2); + if (*tbuf != *tbuf2) + TestErrPrintf("element values don't match!, i=%d\n", i); + } /* end for */ + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); +} /* test_select_combo() */ + +static int +compare_size_t(const void *s1, const void *s2) +{ + if (*(const size_t *)s1 < *(const size_t *)s2) + return (-1); + else if (*(const size_t *)s1 > *(const size_t *)s2) + return (1); + else + return (0); +} + +/**************************************************************** +** +** test_select_hyper_stride(): Test H5S (dataspace) selection code. +** Tests strided hyperslabs of various sizes and dimensionalities. +** +****************************************************************/ +static void +test_select_hyper_stride(hid_t xfer_plist) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1, sid2; /* Dataspace ID */ + hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; + hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2}; + hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2}; + hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */ + hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */ + hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */ + hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */ + uint16_t *wbuf, /* buffer to write to disk */ + *rbuf, /* buffer read from disk */ + *tbuf, /* temporary buffer pointer */ + *tbuf2; /* temporary buffer pointer */ + size_t loc1[72] = { + /* Gruesomely ugly way to make certain hyperslab locations are checked correctly */ + 27, 28, 29, 53, 54, 55, 79, 80, 81, /* Block #1 */ + 32, 33, 34, 58, 59, 60, 84, 85, 86, /* Block #2 */ + 157, 158, 159, 183, 184, 185, 209, 210, 211, /* Block #3 */ + 162, 163, 164, 188, 189, 190, 214, 215, 216, /* Block #4 */ + 287, 288, 289, 313, 314, 315, 339, 340, 341, /* Block #5 */ + 292, 293, 294, 318, 319, 320, 344, 345, 346, /* Block #6 */ + 417, 418, 419, 443, 444, 445, 469, 470, 471, /* Block #7 */ + 422, 423, 424, 448, 449, 450, 474, 475, 476, /* Block #8 */ + }; + size_t loc2[72] = { + 0, 1, 2, 26, 27, 28, /* Block #1 */ + 4, 5, 6, 30, 31, 32, /* Block #2 */ + 8, 9, 10, 34, 35, 36, /* Block #3 */ + 12, 13, 14, 38, 39, 40, /* Block #4 */ + 104, 105, 106, 130, 131, 132, /* Block #5 */ + 108, 109, 110, 134, 135, 136, /* Block #6 */ + 112, 113, 114, 138, 139, 140, /* Block #7 */ + 116, 117, 118, 142, 143, 144, /* Block #8 */ + 208, 209, 210, 234, 235, 236, /* Block #9 */ + 212, 213, 214, 238, 239, 240, /* Block #10 */ + 216, 217, 218, 242, 243, 244, /* Block #11 */ + 220, 221, 222, 246, 247, 248, /* Block #12 */ + }; + int i, j; /* Counters */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Hyperslabs with Strides Functionality\n")); + + /* Allocate write & read buffers */ + wbuf = (uint16_t *)HDmalloc(sizeof(uint16_t) * SPACE2_DIM1 * SPACE2_DIM2); + CHECK_PTR(wbuf, "HDmalloc"); + rbuf = (uint16_t *)HDcalloc(sizeof(uint16_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2)); + CHECK_PTR(rbuf, "HDcalloc"); + + /* Initialize write buffer */ + for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) + for (j = 0; j < SPACE2_DIM2; j++) + *tbuf++ = (uint16_t)((i * SPACE2_DIM2) + j); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create dataspace for writing buffer */ + sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select 2x3x3 count with a stride of 2x4x3 & 1x2x2 block hyperslab for disk dataset */ + start[0] = 0; + start[1] = 0; + start[2] = 0; + stride[0] = 2; + stride[1] = 4; + stride[2] = 3; + count[0] = 2; + count[1] = 3; + count[2] = 3; + block[0] = 1; + block[1] = 2; + block[2] = 2; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Select 4x2 count with a stride of 5x5 & 3x3 block hyperslab for memory dataset */ + start[0] = 1; + start[1] = 1; + stride[0] = 5; + stride[1] = 5; + count[0] = 4; + count[1] = 2; + block[0] = 3; + block[1] = 3; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, SPACE2_NAME, H5T_STD_U16LE, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, xfer_plist, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create dataspace for reading buffer */ + sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select 3x4 count with a stride of 4x4 & 2x3 block hyperslab for memory dataset */ + start[0] = 0; + start[1] = 0; + stride[0] = 4; + stride[1] = 4; + count[0] = 3; + count[1] = 4; + block[0] = 2; + block[1] = 3; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_NATIVE_USHORT, sid2, sid1, xfer_plist, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Sort the locations into the proper order */ + HDqsort(loc1, (size_t)72, sizeof(size_t), compare_size_t); + HDqsort(loc2, (size_t)72, sizeof(size_t), compare_size_t); + /* Compare data read with data written out */ + for (i = 0; i < 72; i++) { + tbuf = wbuf + loc1[i]; + tbuf2 = rbuf + loc2[i]; + if (*tbuf != *tbuf2) { + HDprintf("%d: hyperslab values don't match!, loc1[%d]=%d, loc2[%d]=%d\n", __LINE__, i, + (int)loc1[i], i, (int)loc2[i]); + HDprintf("wbuf=%p, tbuf=%p, rbuf=%p, tbuf2=%p\n", (void *)wbuf, (void *)tbuf, (void *)rbuf, + (void *)tbuf2); + TestErrPrintf("*tbuf=%u, *tbuf2=%u\n", (unsigned)*tbuf, (unsigned)*tbuf2); + } /* end if */ + } /* end for */ + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); +} /* test_select_hyper_stride() */ + +/**************************************************************** +** +** test_select_hyper_contig(): Test H5S (dataspace) selection code. +** Tests contiguous hyperslabs of various sizes and dimensionalities. +** +****************************************************************/ +static void +test_select_hyper_contig(hid_t dset_type, hid_t xfer_plist) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1, sid2; /* Dataspace ID */ + hsize_t dims2[] = {SPACE2_DIM2, SPACE2_DIM1}; + hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */ + hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */ + hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */ + hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */ + uint16_t *wbuf, /* buffer to write to disk */ + *rbuf, /* buffer read from disk */ + *tbuf; /* temporary buffer pointer */ + int i, j; /* Counters */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Contiguous Hyperslabs Functionality\n")); + + /* Allocate write & read buffers */ + wbuf = (uint16_t *)HDmalloc(sizeof(uint16_t) * SPACE2_DIM1 * SPACE2_DIM2); + CHECK_PTR(wbuf, "HDmalloc"); + rbuf = (uint16_t *)HDcalloc(sizeof(uint16_t), (size_t)(SPACE2_DIM1 * SPACE2_DIM2)); + CHECK_PTR(rbuf, "HDcalloc"); + + /* Initialize write buffer */ + for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) + for (j = 0; j < SPACE2_DIM2; j++) + *tbuf++ = (uint16_t)((i * SPACE2_DIM2) + j); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset */ + sid1 = H5Screate_simple(SPACE2_RANK, dims2, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create dataspace for writing buffer */ + sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select 12x10 count with a stride of 1x3 & 3x3 block hyperslab for disk dataset */ + start[0] = 0; + start[1] = 0; + stride[0] = 1; + stride[1] = 3; + count[0] = 12; + count[1] = 10; + block[0] = 1; + block[1] = 3; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Select 4x5 count with a stride of 3x6 & 3x6 block hyperslab for memory dataset */ + start[0] = 0; + start[1] = 0; + stride[0] = 3; + stride[1] = 6; + count[0] = 4; + count[1] = 5; + block[0] = 3; + block[1] = 6; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, SPACE2_NAME, dset_type, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, xfer_plist, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create dataspace for reading buffer */ + sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select 6x5 count with a stride of 2x6 & 2x6 block hyperslab for disk dataset */ + start[0] = 0; + start[1] = 0; + stride[0] = 2; + stride[1] = 6; + count[0] = 6; + count[1] = 5; + block[0] = 2; + block[1] = 6; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Select 3x15 count with a stride of 4x2 & 4x2 block hyperslab for memory dataset */ + start[0] = 0; + start[1] = 0; + stride[0] = 4; + stride[1] = 2; + count[0] = 3; + count[1] = 15; + block[0] = 4; + block[1] = 2; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_NATIVE_USHORT, sid2, sid1, xfer_plist, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read with data written out */ + if (HDmemcmp(rbuf, wbuf, sizeof(uint16_t) * 30 * 12) != 0) + TestErrPrintf("hyperslab values don't match! Line=%d\n", __LINE__); + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); +} /* test_select_hyper_contig() */ + +/**************************************************************** +** +** test_select_hyper_contig2(): Test H5S (dataspace) selection code. +** Tests more contiguous hyperslabs of various sizes and dimensionalities. +** +****************************************************************/ +static void +test_select_hyper_contig2(hid_t dset_type, hid_t xfer_plist) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1, sid2; /* Dataspace ID */ + hsize_t dims2[] = {SPACE8_DIM4, SPACE8_DIM3, SPACE8_DIM2, SPACE8_DIM1}; + hsize_t start[SPACE8_RANK]; /* Starting location of hyperslab */ + hsize_t count[SPACE8_RANK]; /* Element count of hyperslab */ + uint16_t *wbuf, /* buffer to write to disk */ + *rbuf, /* buffer read from disk */ + *tbuf; /* temporary buffer pointer */ + int i, j, k, l; /* Counters */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing More Contiguous Hyperslabs Functionality\n")); + + /* Allocate write & read buffers */ + wbuf = (uint16_t *)HDmalloc(sizeof(uint16_t) * SPACE8_DIM1 * SPACE8_DIM2 * SPACE8_DIM3 * SPACE8_DIM4); + CHECK_PTR(wbuf, "HDmalloc"); + rbuf = (uint16_t *)HDcalloc(sizeof(uint16_t), + (size_t)(SPACE8_DIM1 * SPACE8_DIM2 * SPACE8_DIM3 * SPACE8_DIM4)); + CHECK_PTR(rbuf, "HDcalloc"); + + /* Initialize write buffer */ + for (i = 0, tbuf = wbuf; i < SPACE8_DIM1; i++) + for (j = 0; j < SPACE8_DIM2; j++) + for (k = 0; k < SPACE8_DIM3; k++) + for (l = 0; l < SPACE8_DIM4; l++) + *tbuf++ = (uint16_t)((i * SPACE8_DIM2) + j); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset */ + sid1 = H5Screate_simple(SPACE8_RANK, dims2, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create dataspace for writing buffer */ + sid2 = H5Screate_simple(SPACE8_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select contiguous hyperslab for disk dataset */ + start[0] = 0; + start[1] = 0; + start[2] = 0; + start[3] = 0; + count[0] = 2; + count[1] = SPACE8_DIM3; + count[2] = SPACE8_DIM2; + count[3] = SPACE8_DIM1; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Select contiguous hyperslab in memory */ + start[0] = 0; + start[1] = 0; + start[2] = 0; + start[3] = 0; + count[0] = 2; + count[1] = SPACE8_DIM3; + count[2] = SPACE8_DIM2; + count[3] = SPACE8_DIM1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, SPACE8_NAME, dset_type, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, xfer_plist, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create dataspace for reading buffer */ + sid2 = H5Screate_simple(SPACE8_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select contiguous hyperslab in memory */ + start[0] = 0; + start[1] = 0; + start[2] = 0; + start[3] = 0; + count[0] = 2; + count[1] = SPACE8_DIM3; + count[2] = SPACE8_DIM2; + count[3] = SPACE8_DIM1; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Select contiguous hyperslab in memory */ + start[0] = 0; + start[1] = 0; + start[2] = 0; + start[3] = 0; + count[0] = 2; + count[1] = SPACE8_DIM3; + count[2] = SPACE8_DIM2; + count[3] = SPACE8_DIM1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_NATIVE_USHORT, sid2, sid1, xfer_plist, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read with data written out */ + if (HDmemcmp(rbuf, wbuf, sizeof(uint16_t) * 2 * SPACE8_DIM3 * SPACE8_DIM2 * SPACE8_DIM1) != 0) + TestErrPrintf("Error: hyperslab values don't match!\n"); + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); +} /* test_select_hyper_contig2() */ + +/**************************************************************** +** +** test_select_hyper_contig3(): Test H5S (dataspace) selection code. +** Tests contiguous hyperslabs of various sizes and dimensionalities. +** This test uses a hyperslab that is contiguous in the lowest dimension, +** not contiguous in a dimension, then has a selection across the entire next +** dimension (which should be "flattened" out also). +** +****************************************************************/ +static void +test_select_hyper_contig3(hid_t dset_type, hid_t xfer_plist) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1, sid2; /* Dataspace ID */ + hsize_t dims2[] = {SPACE8_DIM4, SPACE8_DIM3, SPACE8_DIM2, SPACE8_DIM1}; + hsize_t start[SPACE8_RANK]; /* Starting location of hyperslab */ + hsize_t count[SPACE8_RANK]; /* Element count of hyperslab */ + uint16_t *wbuf, /* Buffer to write to disk */ + *rbuf, /* Buffer read from disk */ + *tbuf, *tbuf2; /* Temporary buffer pointers */ + unsigned i, j, k, l; /* Counters */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Yet More Contiguous Hyperslabs Functionality\n")); + + /* Allocate write & read buffers */ + wbuf = (uint16_t *)HDmalloc(sizeof(uint16_t) * SPACE8_DIM1 * SPACE8_DIM2 * SPACE8_DIM3 * SPACE8_DIM4); + CHECK_PTR(wbuf, "HDmalloc"); + rbuf = (uint16_t *)HDcalloc(sizeof(uint16_t), + (size_t)(SPACE8_DIM1 * SPACE8_DIM2 * SPACE8_DIM3 * SPACE8_DIM4)); + CHECK_PTR(rbuf, "HDcalloc"); + + /* Initialize write buffer */ + for (i = 0, tbuf = wbuf; i < SPACE8_DIM4; i++) + for (j = 0; j < SPACE8_DIM3; j++) + for (k = 0; k < SPACE8_DIM2; k++) + for (l = 0; l < SPACE8_DIM1; l++) + *tbuf++ = (uint16_t)((k * SPACE8_DIM2) + l); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset */ + sid1 = H5Screate_simple(SPACE8_RANK, dims2, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create dataspace for writing buffer */ + sid2 = H5Screate_simple(SPACE8_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select semi-contiguous hyperslab for disk dataset */ + start[0] = 0; + start[1] = 0; + start[2] = SPACE8_DIM2 / 2; + start[3] = 0; + count[0] = 2; + count[1] = SPACE8_DIM3; + count[2] = SPACE8_DIM2 / 2; + count[3] = SPACE8_DIM1; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Select semi-contiguous hyperslab in memory */ + start[0] = 0; + start[1] = 0; + start[2] = SPACE8_DIM2 / 2; + start[3] = 0; + count[0] = 2; + count[1] = SPACE8_DIM3; + count[2] = SPACE8_DIM2 / 2; + count[3] = SPACE8_DIM1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, SPACE8_NAME, dset_type, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, xfer_plist, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create dataspace for reading buffer */ + sid2 = H5Screate_simple(SPACE8_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select semi-contiguous hyperslab in memory */ + start[0] = 0; + start[1] = 0; + start[2] = SPACE8_DIM2 / 2; + start[3] = 0; + count[0] = 2; + count[1] = SPACE8_DIM3; + count[2] = SPACE8_DIM2 / 2; + count[3] = SPACE8_DIM1; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Select semi-contiguous hyperslab in memory */ + start[0] = 0; + start[1] = 0; + start[2] = SPACE8_DIM2 / 2; + start[3] = 0; + count[0] = 2; + count[1] = SPACE8_DIM3; + count[2] = SPACE8_DIM2 / 2; + count[3] = SPACE8_DIM1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_NATIVE_USHORT, sid2, sid1, xfer_plist, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read with data written out */ + for (i = 0, tbuf = wbuf, tbuf2 = rbuf; i < SPACE8_DIM4; i++) + for (j = 0; j < SPACE8_DIM3; j++) + for (k = 0; k < SPACE8_DIM2; k++) + for (l = 0; l < SPACE8_DIM1; l++, tbuf++, tbuf2++) + if ((i >= start[0] && i < (start[0] + count[0])) && + (j >= start[1] && j < (start[1] + count[1])) && + (k >= start[2] && k < (start[2] + count[2])) && + (l >= start[3] && l < (start[3] + count[3]))) { + if (*tbuf != *tbuf2) { + HDprintf("Error: hyperslab values don't match!\n"); + TestErrPrintf("Line: %d, i=%u, j=%u, k=%u, l=%u, *tbuf=%u,*tbuf2=%u\n", __LINE__, + i, j, k, l, (unsigned)*tbuf, (unsigned)*tbuf2); + } /* end if */ + } /* end if */ + else { + if (*tbuf2 != 0) { + HDprintf("Error: invalid data in read buffer!\n"); + TestErrPrintf("Line: %d, i=%u, j=%u, k=%u, l=%u, *tbuf=%u,*tbuf2=%u\n", __LINE__, + i, j, k, l, (unsigned)*tbuf, (unsigned)*tbuf2); + } /* end if */ + } /* end else */ + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); +} /* test_select_hyper_contig3() */ + +#if 0 +/**************************************************************** +** +** verify_select_hyper_contig_dr__run_test(): Verify data from +** test_select_hyper_contig_dr__run_test() +** +****************************************************************/ +static void +verify_select_hyper_contig_dr__run_test(const uint16_t *cube_buf, size_t cube_size, + unsigned edge_size, unsigned cube_rank) +{ + const uint16_t *cube_ptr; /* Pointer into the cube buffer */ + uint16_t expected_value; /* Expected value in dataset */ + unsigned i, j, k, l, m; /* Local index variables */ + size_t s; /* Local index variable */ + hbool_t mis_match; /* Flag to indicate mismatch in expected value */ + + HDassert(cube_buf); + HDassert(cube_size > 0); + + expected_value = 0; + mis_match = FALSE; + cube_ptr = cube_buf; + s = 0; + i = 0; + do { + j = 0; + do { + k = 0; + do { + l = 0; + do { + m = 0; + do { + /* Sanity check */ + HDassert(s < cube_size); + + /* Check for correct value */ + if (*cube_ptr != expected_value) + mis_match = TRUE; + + /* Advance to next element */ + cube_ptr++; + expected_value++; + s++; + m++; + } while ((cube_rank > 0) && (m < edge_size)); + l++; + } while ((cube_rank > 1) && (l < edge_size)); + k++; + } while ((cube_rank > 2) && (k < edge_size)); + j++; + } while ((cube_rank > 3) && (j < edge_size)); + i++; + } while ((cube_rank > 4) && (i < edge_size)); + if (mis_match) + TestErrPrintf("Initial cube data don't match! Line = %d\n", __LINE__); +} /* verify_select_hyper_contig_dr__run_test() */ +#endif +#if 0 + +/**************************************************************** +** +** test_select_hyper_contig_dr__run_test(): Test H5S (dataspace) +** selection code with contiguous source and target having +** different ranks but the same shape. We have already +** tested H5Sselect_shape_same in isolation, so now we try to do +** I/O. +** +****************************************************************/ +static void +test_select_hyper_contig_dr__run_test(int test_num, const uint16_t *cube_buf, const uint16_t *zero_buf, + unsigned edge_size, unsigned chunk_edge_size, unsigned small_rank, + unsigned large_rank, hid_t dset_type, hid_t xfer_plist) +{ + hbool_t mis_match; /* Flag indicating a value read in wasn't what was expected */ + hid_t fapl; /* File access property list */ + hid_t fid1; /* File ID */ + hid_t small_cube_sid; /* Dataspace ID for small cube in memory & file */ + hid_t mem_large_cube_sid; /* Dataspace ID for large cube in memory */ + hid_t file_large_cube_sid; /* Dataspace ID for large cube in file */ + hid_t small_cube_dcpl_id = H5P_DEFAULT; /* DCPL for small cube dataset */ + hid_t large_cube_dcpl_id = H5P_DEFAULT; /* DCPL for large cube dataset */ + hid_t small_cube_dataset; /* Dataset ID */ + hid_t large_cube_dataset; /* Dataset ID */ + size_t start_index; /* Offset within buffer to begin inspecting */ + size_t stop_index; /* Offset within buffer to end inspecting */ + uint16_t expected_value; /* Expected value in dataset */ + uint16_t *small_cube_buf_1; /* Buffer for small cube data */ + uint16_t *large_cube_buf_1; /* Buffer for large cube data */ + uint16_t *ptr_1; /* Temporary pointer into cube data */ + hsize_t dims[SS_DR_MAX_RANK]; /* Dataspace dimensions */ + hsize_t start[SS_DR_MAX_RANK]; /* Shared hyperslab start offset */ + hsize_t stride[SS_DR_MAX_RANK]; /* Shared hyperslab stride */ + hsize_t count[SS_DR_MAX_RANK]; /* Shared hyperslab count */ + hsize_t block[SS_DR_MAX_RANK]; /* Shared hyperslab block size */ + hsize_t *start_ptr; /* Actual hyperslab start offset */ + hsize_t *stride_ptr; /* Actual hyperslab stride */ + hsize_t *count_ptr; /* Actual hyperslab count */ + hsize_t *block_ptr; /* Actual hyperslab block size */ + size_t small_cube_size; /* Number of elements in small cube */ + size_t large_cube_size; /* Number of elements in large cube */ + unsigned u, v, w, x; /* Local index variables */ + size_t s; /* Local index variable */ + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ + + MESSAGE(7, ("\tn-cube slice through m-cube I/O test %d.\n", test_num)); + MESSAGE(7, ("\tranks = %u/%u, edge_size = %u, chunk_edge_size = %u.\n", small_rank, large_rank, edge_size, + chunk_edge_size)); + + HDassert(edge_size >= 6); + HDassert(edge_size >= chunk_edge_size); + HDassert((chunk_edge_size == 0) || (chunk_edge_size >= 3)); + HDassert(small_rank > 0); + HDassert(small_rank < large_rank); + HDassert(large_rank <= SS_DR_MAX_RANK); + + /* Compute cube sizes */ + small_cube_size = large_cube_size = (size_t)1; + for (u = 0; u < large_rank; u++) { + if (u < small_rank) + small_cube_size *= (size_t)edge_size; + + large_cube_size *= (size_t)edge_size; + } /* end for */ + + HDassert(large_cube_size < (size_t)UINT_MAX); + + /* set up the start, stride, count, and block pointers */ + start_ptr = &(start[SS_DR_MAX_RANK - large_rank]); + stride_ptr = &(stride[SS_DR_MAX_RANK - large_rank]); + count_ptr = &(count[SS_DR_MAX_RANK - large_rank]); + block_ptr = &(block[SS_DR_MAX_RANK - large_rank]); + + /* Allocate buffers */ + small_cube_buf_1 = (uint16_t *)HDcalloc(sizeof(uint16_t), small_cube_size); + CHECK_PTR(small_cube_buf_1, "HDcalloc"); + large_cube_buf_1 = (uint16_t *)HDcalloc(sizeof(uint16_t), large_cube_size); + CHECK_PTR(large_cube_buf_1, "HDcalloc"); + + /* Create a dataset transfer property list */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + + /* Use the 'core' VFD for this test */ + ret = H5Pset_fapl_core(fapl, (size_t)(1024 * 1024), FALSE); + CHECK(ret, FAIL, "H5Pset_fapl_core"); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Close file access property list */ + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* setup dims: */ + dims[0] = dims[1] = dims[2] = dims[3] = dims[4] = (hsize_t)edge_size; + + /* Create small cube dataspaces */ + small_cube_sid = H5Screate_simple((int)small_rank, dims, NULL); + CHECK(small_cube_sid, FAIL, "H5Screate_simple"); + + /* Create large cube dataspace */ + mem_large_cube_sid = H5Screate_simple((int)large_rank, dims, NULL); + CHECK(mem_large_cube_sid, FAIL, "H5Screate_simple"); + file_large_cube_sid = H5Screate_simple((int)large_rank, dims, NULL); + CHECK(file_large_cube_sid, FAIL, "H5Screate_simple"); + + /* if chunk edge size is greater than zero, set up the small and + * large data set creation property lists to specify chunked + * datasets. + */ + if (chunk_edge_size > 0) { + hsize_t chunk_dims[SS_DR_MAX_RANK]; /* Chunk dimensions */ + + chunk_dims[0] = chunk_dims[1] = chunk_dims[2] = chunk_dims[3] = chunk_dims[4] = + (hsize_t)chunk_edge_size; + + small_cube_dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + CHECK(small_cube_dcpl_id, FAIL, "H5Pcreate"); + + ret = H5Pset_layout(small_cube_dcpl_id, H5D_CHUNKED); + CHECK(ret, FAIL, "H5Pset_layout"); + + ret = H5Pset_chunk(small_cube_dcpl_id, (int)small_rank, chunk_dims); + CHECK(ret, FAIL, "H5Pset_chunk"); + + large_cube_dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + CHECK(large_cube_dcpl_id, FAIL, "H5Pcreate"); + + ret = H5Pset_layout(large_cube_dcpl_id, H5D_CHUNKED); + CHECK(ret, FAIL, "H5Pset_layout"); + + ret = H5Pset_chunk(large_cube_dcpl_id, (int)large_rank, chunk_dims); + CHECK(ret, FAIL, "H5Pset_chunk"); + } /* end if */ + + /* create the small cube dataset */ + small_cube_dataset = H5Dcreate2(fid1, "small_cube_dataset", dset_type, small_cube_sid, H5P_DEFAULT, + small_cube_dcpl_id, H5P_DEFAULT); + CHECK(small_cube_dataset, FAIL, "H5Dcreate2"); + + /* Close non-default small dataset DCPL */ + if (small_cube_dcpl_id != H5P_DEFAULT) { + ret = H5Pclose(small_cube_dcpl_id); + CHECK(ret, FAIL, "H5Pclose"); + } /* end if */ + + /* create the large cube dataset */ + large_cube_dataset = H5Dcreate2(fid1, "large_cube_dataset", dset_type, file_large_cube_sid, H5P_DEFAULT, + large_cube_dcpl_id, H5P_DEFAULT); + CHECK(large_cube_dataset, FAIL, "H5Dcreate2"); + + /* Close non-default large dataset DCPL */ + if (large_cube_dcpl_id != H5P_DEFAULT) { + ret = H5Pclose(large_cube_dcpl_id); + CHECK(ret, FAIL, "H5Pclose"); + } /* end if */ + + /* write initial data to the on disk datasets */ + ret = + H5Dwrite(small_cube_dataset, H5T_NATIVE_UINT16, small_cube_sid, small_cube_sid, xfer_plist, cube_buf); + CHECK(ret, FAIL, "H5Dwrite"); + + ret = H5Dwrite(large_cube_dataset, H5T_NATIVE_UINT16, mem_large_cube_sid, file_large_cube_sid, xfer_plist, + cube_buf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* read initial data from disk and verify that it is as expected. */ + ret = H5Dread(small_cube_dataset, H5T_NATIVE_UINT16, small_cube_sid, small_cube_sid, xfer_plist, + small_cube_buf_1); + CHECK(ret, FAIL, "H5Dread"); + + /* Check that the data is valid */ + verify_select_hyper_contig_dr__run_test(small_cube_buf_1, small_cube_size, edge_size, small_rank); + + ret = H5Dread(large_cube_dataset, H5T_NATIVE_UINT16, mem_large_cube_sid, file_large_cube_sid, xfer_plist, + large_cube_buf_1); + CHECK(ret, FAIL, "H5Dread"); + + /* Check that the data is valid */ + verify_select_hyper_contig_dr__run_test(large_cube_buf_1, large_cube_size, edge_size, large_rank); + + /* first, verify that we can read from disk correctly using selections + * of different rank that H5Sselect_shape_same() views as being of the + * same shape. + * + * Start by reading small_rank-D slice from the on disk large cube, and + * verifying that the data read is correct. Verify that H5Sselect_shape_same() + * returns true on the memory and file selections. + */ + + /* set up start, stride, count, and block -- note that we will + * change start[] so as to read slices of the large cube. + */ + for (u = 0; u < SS_DR_MAX_RANK; u++) { + start[u] = 0; + stride[u] = 1; + count[u] = 1; + if ((SS_DR_MAX_RANK - u) > small_rank) + block[u] = 1; + else + block[u] = (hsize_t)edge_size; + } /* end for */ + + u = 0; + do { + v = 0; + do { + w = 0; + do { + x = 0; + do { + /* we know that small_rank >= 1 and that large_rank > small_rank + * by the assertions at the head of this function. Thus no + * need for another inner loop. + */ + start[0] = (hsize_t)u; + start[1] = (hsize_t)v; + start[2] = (hsize_t)w; + start[3] = (hsize_t)x; + start[4] = (hsize_t)0; + + ret = H5Sselect_hyperslab(file_large_cube_sid, H5S_SELECT_SET, start_ptr, stride_ptr, + count_ptr, block_ptr); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* verify that H5Sselect_shape_same() reports the two + * selections as having the same shape. + */ + check = H5Sselect_shape_same(small_cube_sid, file_large_cube_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Read selection from disk */ + ret = H5Dread(large_cube_dataset, H5T_NATIVE_UINT16, small_cube_sid, file_large_cube_sid, + xfer_plist, small_cube_buf_1); + CHECK(ret, FAIL, "H5Dread"); + + /* verify that expected data is retrieved */ + mis_match = FALSE; + ptr_1 = small_cube_buf_1; + expected_value = (uint16_t)((u * edge_size * edge_size * edge_size * edge_size) + + (v * edge_size * edge_size * edge_size) + + (w * edge_size * edge_size) + (x * edge_size)); + for (s = 0; s < small_cube_size; s++) { + if (*ptr_1 != expected_value) + mis_match = TRUE; + ptr_1++; + expected_value++; + } /* end for */ + if (mis_match) + TestErrPrintf("small cube read from largecube has bad data! Line=%d\n", __LINE__); + + x++; + } while ((large_rank >= 2) && (small_rank <= 1) && (x < edge_size)); + w++; + } while ((large_rank >= 3) && (small_rank <= 2) && (w < edge_size)); + v++; + } while ((large_rank >= 4) && (small_rank <= 3) && (v < edge_size)); + u++; + } while ((large_rank >= 5) && (small_rank <= 4) && (u < edge_size)); + + /* similarly, read the on disk small cube into slices through the in memory + * large cube, and verify that the correct data (and only the correct data) + * is read. + */ + + /* zero out the in-memory large cube */ + HDmemset(large_cube_buf_1, 0, large_cube_size * sizeof(uint16_t)); + + u = 0; + do { + v = 0; + do { + w = 0; + do { + x = 0; + do { + /* we know that small_rank >= 1 and that large_rank > small_rank + * by the assertions at the head of this function. Thus no + * need for another inner loop. + */ + start[0] = (hsize_t)u; + start[1] = (hsize_t)v; + start[2] = (hsize_t)w; + start[3] = (hsize_t)x; + start[4] = (hsize_t)0; + + ret = H5Sselect_hyperslab(mem_large_cube_sid, H5S_SELECT_SET, start_ptr, stride_ptr, + count_ptr, block_ptr); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* verify that H5Sselect_shape_same() reports the two + * selections as having the same shape. + */ + check = H5Sselect_shape_same(small_cube_sid, mem_large_cube_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Read selection from disk */ + ret = H5Dread(small_cube_dataset, H5T_NATIVE_UINT16, mem_large_cube_sid, small_cube_sid, + xfer_plist, large_cube_buf_1); + CHECK(ret, FAIL, "H5Dread"); + + /* verify that the expected data and only the + * expected data was read. + */ + start_index = (u * edge_size * edge_size * edge_size * edge_size) + + (v * edge_size * edge_size * edge_size) + (w * edge_size * edge_size) + + (x * edge_size); + stop_index = start_index + small_cube_size - 1; + + HDassert(start_index < stop_index); + HDassert(stop_index <= large_cube_size); + + mis_match = FALSE; + ptr_1 = large_cube_buf_1; + expected_value = 0; + for (s = 0; s < start_index; s++) { + if (*ptr_1 != 0) + mis_match = TRUE; + ptr_1++; + } /* end for */ + for (; s <= stop_index; s++) { + if (*ptr_1 != expected_value) + mis_match = TRUE; + expected_value++; + ptr_1++; + } /* end for */ + for (; s < large_cube_size; s++) { + if (*ptr_1 != 0) + mis_match = TRUE; + ptr_1++; + } /* end for */ + if (mis_match) + TestErrPrintf("large cube read from small cube has bad data! Line=%u\n", __LINE__); + + /* Zero out the buffer for the next pass */ + HDmemset(large_cube_buf_1 + start_index, 0, small_cube_size * sizeof(uint16_t)); + + x++; + } while ((large_rank >= 2) && (small_rank <= 1) && (x < edge_size)); + w++; + } while ((large_rank >= 3) && (small_rank <= 2) && (w < edge_size)); + v++; + } while ((large_rank >= 4) && (small_rank <= 3) && (v < edge_size)); + u++; + } while ((large_rank >= 5) && (small_rank <= 4) && (u < edge_size)); + + /* now we go in the opposite direction, verifying that we can write + * from memory to file using selections of different rank that + * H5Sselect_shape_same() views as being of the same shape. + * + * Start by writing small_rank D slices from the in memory large cube, to + * the the on disk small cube dataset. After each write, read the small + * cube dataset back from disk, and verify that it contains the expected + * data. Verify that H5Sselect_shape_same() returns true on the + * memory and file selections. + */ + + u = 0; + do { + v = 0; + do { + w = 0; + do { + x = 0; + do { + /* we know that small_rank >= 1 and that large_rank > small_rank + * by the assertions at the head of this function. Thus no + * need for another inner loop. + */ + + /* zero out the on disk small cube */ + ret = H5Dwrite(small_cube_dataset, H5T_NATIVE_UINT16, small_cube_sid, small_cube_sid, + xfer_plist, zero_buf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* select the portion of the in memory large cube from which we + * are going to write data. + */ + start[0] = (hsize_t)u; + start[1] = (hsize_t)v; + start[2] = (hsize_t)w; + start[3] = (hsize_t)x; + start[4] = (hsize_t)0; + + ret = H5Sselect_hyperslab(mem_large_cube_sid, H5S_SELECT_SET, start_ptr, stride_ptr, + count_ptr, block_ptr); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* verify that H5Sselect_shape_same() reports the in + * memory slice through the cube selection and the + * on disk full small cube selections as having the same shape. + */ + check = H5Sselect_shape_same(small_cube_sid, mem_large_cube_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* write the slice from the in memory large cube to the on disk small cube */ + ret = H5Dwrite(small_cube_dataset, H5T_NATIVE_UINT16, mem_large_cube_sid, small_cube_sid, + xfer_plist, cube_buf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* read the on disk small cube into memory */ + ret = H5Dread(small_cube_dataset, H5T_NATIVE_UINT16, small_cube_sid, small_cube_sid, + xfer_plist, small_cube_buf_1); + CHECK(ret, FAIL, "H5Dread"); + + /* verify that expected data is retrieved */ + mis_match = FALSE; + ptr_1 = small_cube_buf_1; + expected_value = (uint16_t)((u * edge_size * edge_size * edge_size * edge_size) + + (v * edge_size * edge_size * edge_size) + + (w * edge_size * edge_size) + (x * edge_size)); + for (s = 0; s < small_cube_size; s++) { + if (*ptr_1 != expected_value) + mis_match = TRUE; + expected_value++; + ptr_1++; + } /* end for */ + if (mis_match) + TestErrPrintf("small cube data don't match! Line=%d\n", __LINE__); + + x++; + } while ((large_rank >= 2) && (small_rank <= 1) && (x < edge_size)); + w++; + } while ((large_rank >= 3) && (small_rank <= 2) && (w < edge_size)); + v++; + } while ((large_rank >= 4) && (small_rank <= 3) && (v < edge_size)); + u++; + } while ((large_rank >= 5) && (small_rank <= 4) && (u < edge_size)); + + /* Now write the contents of the in memory small cube to slices of + * the on disk cube. After each write, read the on disk cube + * into memory, and verify that it contains the expected + * data. Verify that H5Sselect_shape_same() returns true on + * the memory and file selections. + */ + + /* select the entire memory and file cube dataspaces */ + ret = H5Sselect_all(mem_large_cube_sid); + CHECK(ret, FAIL, "H5Sselect_all"); + + ret = H5Sselect_all(file_large_cube_sid); + CHECK(ret, FAIL, "H5Sselect_all"); + + u = 0; + do { + v = 0; + do { + w = 0; + do { + x = 0; + do { + /* we know that small_rank >= 1 and that large_rank > small_rank + * by the assertions at the head of this function. Thus no + * need for another inner loop. + */ + + /* zero out the on disk cube */ + ret = H5Dwrite(large_cube_dataset, H5T_NATIVE_USHORT, mem_large_cube_sid, + file_large_cube_sid, xfer_plist, zero_buf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* select the portion of the in memory large cube to which we + * are going to write data. + */ + start[0] = (hsize_t)u; + start[1] = (hsize_t)v; + start[2] = (hsize_t)w; + start[3] = (hsize_t)x; + start[4] = (hsize_t)0; + + ret = H5Sselect_hyperslab(file_large_cube_sid, H5S_SELECT_SET, start_ptr, stride_ptr, + count_ptr, block_ptr); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* verify that H5Sselect_shape_same() reports the in + * memory full selection of the small cube and the + * on disk slice through the large cube selection + * as having the same shape. + */ + check = H5Sselect_shape_same(small_cube_sid, file_large_cube_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* write the cube from memory to the target slice of the disk cube */ + ret = H5Dwrite(large_cube_dataset, H5T_NATIVE_UINT16, small_cube_sid, file_large_cube_sid, + xfer_plist, cube_buf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* read the on disk cube into memory */ + ret = H5Sselect_all(file_large_cube_sid); + CHECK(ret, FAIL, "H5Sselect_all"); + + ret = H5Dread(large_cube_dataset, H5T_NATIVE_UINT16, mem_large_cube_sid, + file_large_cube_sid, xfer_plist, large_cube_buf_1); + CHECK(ret, FAIL, "H5Dread"); + + /* verify that the expected data and only the + * expected data was read. + */ + start_index = (u * edge_size * edge_size * edge_size * edge_size) + + (v * edge_size * edge_size * edge_size) + (w * edge_size * edge_size) + + (x * edge_size); + stop_index = start_index + small_cube_size - 1; + + HDassert(start_index < stop_index); + HDassert(stop_index <= large_cube_size); + + mis_match = FALSE; + ptr_1 = large_cube_buf_1; + expected_value = 0; + for (s = 0; s < start_index; s++) { + if (*ptr_1 != 0) + mis_match = TRUE; + ptr_1++; + } /* end for */ + for (; s <= stop_index; s++) { + if (*ptr_1 != expected_value) + mis_match = TRUE; + expected_value++; + ptr_1++; + } /* end for */ + for (; s < large_cube_size; s++) { + if (*ptr_1 != 0) + mis_match = TRUE; + ptr_1++; + } /* end for */ + if (mis_match) + TestErrPrintf("large cube written from small cube has bad data! Line=%d\n", __LINE__); + + x++; + } while ((large_rank >= 2) && (small_rank <= 1) && (x < edge_size)); + w++; + } while ((large_rank >= 3) && (small_rank <= 2) && (w < edge_size)); + v++; + } while ((large_rank >= 4) && (small_rank <= 3) && (v < edge_size)); + u++; + } while ((large_rank >= 5) && (small_rank <= 4) && (u < edge_size)); + + /* Close memory dataspaces */ + ret = H5Sclose(small_cube_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(mem_large_cube_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(file_large_cube_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Datasets */ + ret = H5Dclose(small_cube_dataset); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Dclose(large_cube_dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + HDfree(small_cube_buf_1); + HDfree(large_cube_buf_1); + +} /* test_select_hyper_contig_dr__run_test() */ +#endif +#if 0 +/**************************************************************** +** +** test_select_hyper_contig_dr(): Test H5S (dataspace) +** selection code with contiguous source and target having +** different ranks but the same shape. We have already +** tested H5Sselect_shape_same in isolation, so now we try to do +** I/O. +** +****************************************************************/ +static void +test_select_hyper_contig_dr(hid_t dset_type, hid_t xfer_plist) +{ + int test_num = 0; + unsigned chunk_edge_size; /* Size of chunk's dataspace dimensions */ + unsigned edge_size = 6; /* Size of dataset's dataspace dimensions */ + unsigned small_rank; /* Current rank of small dataset */ + unsigned large_rank; /* Current rank of large dataset */ + uint16_t *cube_buf; /* Buffer for writing cube data */ + uint16_t *zero_buf; /* Buffer for writing zeroed cube data */ + uint16_t *cube_ptr; /* Temporary pointer into cube data */ + unsigned max_rank = 5; /* Max. rank to use */ + size_t max_cube_size; /* Max. number of elements in largest cube */ + size_t s; /* Local index variable */ + unsigned u; /* Local index variable */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Contiguous Hyperslabs With Different Rank I/O Functionality\n")); + + /* Compute max. cube size */ + max_cube_size = (size_t)1; + for (u = 0; u < max_rank; u++) + max_cube_size *= (size_t)edge_size; + + /* Allocate cube buffer for writing values */ + cube_buf = (uint16_t *)HDmalloc(sizeof(uint16_t) * max_cube_size); + CHECK_PTR(cube_buf, "HDmalloc"); + + /* Initialize the cube buffer */ + cube_ptr = cube_buf; + for (s = 0; s < max_cube_size; s++) + *cube_ptr++ = (uint16_t)s; + + /* Allocate cube buffer for zeroing values on disk */ + zero_buf = (uint16_t *)HDcalloc(sizeof(uint16_t), max_cube_size); + CHECK_PTR(zero_buf, "HDcalloc"); + + for (large_rank = 1; large_rank <= max_rank; large_rank++) { + for (small_rank = 1; small_rank < large_rank; small_rank++) { + chunk_edge_size = 0; + test_select_hyper_contig_dr__run_test(test_num, cube_buf, zero_buf, edge_size, chunk_edge_size, + small_rank, large_rank, dset_type, xfer_plist); + test_num++; + + chunk_edge_size = 3; + test_select_hyper_contig_dr__run_test(test_num, cube_buf, zero_buf, edge_size, chunk_edge_size, + small_rank, large_rank, dset_type, xfer_plist); + test_num++; + } /* for loop on small rank */ + } /* for loop on large rank */ + + HDfree(cube_buf); + HDfree(zero_buf); + +} /* test_select_hyper_contig_dr() */ +#endif +/**************************************************************** +** +** test_select_hyper_checker_board_dr__select_checker_board(): +** Given an n-cube dataspace with each edge of length +** edge_size, and a checker_edge_size either select a checker +** board selection of the entire cube(if sel_rank == n), +** or select a checker board selection of a +** sel_rank dimensional slice through n-cube parallel to the +** sel_rank fastest changing indices, with origin (in the +** higher indices) as indicated by the start array. +** +** Note that this function, like all its relatives, is +** hard coded to presume a maximum n-cube rank of 5. +** While this maximum is declared as a constant, increasing +** it will require extensive coding in addition to changing +** the value of the constant. +** +** JRM -- 9/9/09 +** +****************************************************************/ +#if 0 +static void +test_select_hyper_checker_board_dr__select_checker_board(hid_t tgt_n_cube_sid, unsigned tgt_n_cube_rank, + unsigned edge_size, unsigned checker_edge_size, + unsigned sel_rank, const hsize_t sel_start[]) +{ + hbool_t first_selection = TRUE; + unsigned n_cube_offset; + unsigned sel_offset; + hsize_t base_count; + hsize_t offset_count; + hsize_t start[SS_DR_MAX_RANK]; /* Offset of hyperslab selection */ + hsize_t stride[SS_DR_MAX_RANK]; /* Stride of hyperslab selection */ + hsize_t count[SS_DR_MAX_RANK]; /* Count of hyperslab selection */ + hsize_t block[SS_DR_MAX_RANK]; /* Block size of hyperslab selection */ + unsigned i, j, k, l, m; /* Local index variable */ + unsigned u; /* Local index variables */ + herr_t ret; /* Generic return value */ + + HDassert(edge_size >= 6); + HDassert(0 < checker_edge_size); + HDassert(checker_edge_size <= edge_size); + HDassert(0 < sel_rank); + HDassert(sel_rank <= tgt_n_cube_rank); + HDassert(tgt_n_cube_rank <= SS_DR_MAX_RANK); + + sel_offset = SS_DR_MAX_RANK - sel_rank; + n_cube_offset = SS_DR_MAX_RANK - tgt_n_cube_rank; + HDassert(n_cube_offset <= sel_offset); + + /* First, compute the base count (which assumes start == 0 + * for the associated offset) and offset_count (which + * assumes start == checker_edge_size for the associated + * offset). + */ + base_count = edge_size / (checker_edge_size * 2); + if ((edge_size % (checker_edge_size * 2)) > 0) + base_count++; + + offset_count = (edge_size - checker_edge_size) / (checker_edge_size * 2); + if (((edge_size - checker_edge_size) % (checker_edge_size * 2)) > 0) + offset_count++; + + /* Now set up the stride and block arrays, and portions of the start + * and count arrays that will not be altered during the selection of + * the checker board. + */ + u = 0; + while (u < n_cube_offset) { + /* these values should never be used */ + start[u] = 0; + stride[u] = 0; + count[u] = 0; + block[u] = 0; + + u++; + } /* end while */ + + while (u < sel_offset) { + start[u] = sel_start[u]; + stride[u] = 2 * edge_size; + count[u] = 1; + block[u] = 1; + + u++; + } /* end while */ + + while (u < SS_DR_MAX_RANK) { + stride[u] = 2 * checker_edge_size; + block[u] = checker_edge_size; + + u++; + } /* end while */ + + i = 0; + do { + if (0 >= sel_offset) { + if (i == 0) { + start[0] = 0; + count[0] = base_count; + } /* end if */ + else { + start[0] = checker_edge_size; + count[0] = offset_count; + } /* end else */ + } /* end if */ + + j = 0; + do { + if (1 >= sel_offset) { + if (j == 0) { + start[1] = 0; + count[1] = base_count; + } /* end if */ + else { + start[1] = checker_edge_size; + count[1] = offset_count; + } /* end else */ + } /* end if */ + + k = 0; + do { + if (2 >= sel_offset) { + if (k == 0) { + start[2] = 0; + count[2] = base_count; + } /* end if */ + else { + start[2] = checker_edge_size; + count[2] = offset_count; + } /* end else */ + } /* end if */ + + l = 0; + do { + if (3 >= sel_offset) { + if (l == 0) { + start[3] = 0; + count[3] = base_count; + } /* end if */ + else { + start[3] = checker_edge_size; + count[3] = offset_count; + } /* end else */ + } /* end if */ + + m = 0; + do { + if (4 >= sel_offset) { + if (m == 0) { + start[4] = 0; + count[4] = base_count; + } /* end if */ + else { + start[4] = checker_edge_size; + count[4] = offset_count; + } /* end else */ + } /* end if */ + + if (((i + j + k + l + m) % 2) == 0) { + if (first_selection) { + first_selection = FALSE; + + ret = H5Sselect_hyperslab(tgt_n_cube_sid, H5S_SELECT_SET, + &(start[n_cube_offset]), &(stride[n_cube_offset]), + &(count[n_cube_offset]), &(block[n_cube_offset])); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + } /* end if */ + else { + ret = H5Sselect_hyperslab(tgt_n_cube_sid, H5S_SELECT_OR, + &(start[n_cube_offset]), &(stride[n_cube_offset]), + &(count[n_cube_offset]), &(block[n_cube_offset])); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + } /* end else */ + } /* end if */ + + m++; + } while ((m <= 1) && (4 >= sel_offset)); + l++; + } while ((l <= 1) && (3 >= sel_offset)); + k++; + } while ((k <= 1) && (2 >= sel_offset)); + j++; + } while ((j <= 1) && (1 >= sel_offset)); + i++; + } while ((i <= 1) && (0 >= sel_offset)); + + /* Weirdness alert: + * + * Some how, it seems that selections can extend beyond the + * boundaries of the target dataspace -- hence the following + * code to manually clip the selection back to the dataspace + * proper. + */ + for (u = 0; u < SS_DR_MAX_RANK; u++) { + start[u] = 0; + stride[u] = edge_size; + count[u] = 1; + block[u] = edge_size; + } /* end for */ + + ret = H5Sselect_hyperslab(tgt_n_cube_sid, H5S_SELECT_AND, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +} /* test_select_hyper_checker_board_dr__select_checker_board() */ +#endif + +/**************************************************************** +** +** test_select_hyper_checker_board_dr__verify_data(): +** +** Examine the supplied buffer to see if it contains the +** expected data. Return TRUE if it does, and FALSE +** otherwise. +** +** The supplied buffer is presumed to contain the results +** of read or writing a checkerboard selection of an +** n-cube, or a checkerboard selection of an m (1 <= m < n) +** dimensional slice through an n-cube parallel to the +** fastest changing indices. +** +** It is further presumed that the buffer was zeroed before +** the read, and that the n-cube was initialize with the +** natural numbers listed in order from the origin along +** the fastest changing axis. +** +** Thus for a 10x10x10 3-cube, the value stored in location +** (x, y, z) (assuming that z is the fastest changing index +** and x the slowest) is assumed to be: +** +** (10 * 10 * x) + (10 * y) + z +** +** Thus, if the buffer contains the result of reading a +** checker board selection of a 10x10x10 3-cube, location +** (x, y, z) will contain zero if it is not in a checker, +** and 100x + 10y + z if (x, y, z) is in a checker. +** +** If the buffer contains the result of reading a 3 +** dimensional slice (parallel to the three fastest changing +** indices) through an n cube (n > 3), then the expected +** values in the buffer will be the same, save that we will +** add a constant determined by the origin of the 3-cube +** in the n-cube. +** +** Finally, the function presumes that the first element +** of the buffer resides either at the origin of either +** a selected or an unselected checker. +** +****************************************************************/ +#if 0 +H5_ATTR_PURE static hbool_t +test_select_hyper_checker_board_dr__verify_data(uint16_t *buf_ptr, unsigned rank, unsigned edge_size, + unsigned checker_edge_size, uint16_t first_expected_val, + hbool_t buf_starts_in_checker) +{ + hbool_t good_data = TRUE; + hbool_t in_checker; + hbool_t start_in_checker[5]; + uint16_t expected_value; + uint16_t *val_ptr; + unsigned i, j, k, l, m; /* to track position in n-cube */ + unsigned v, w, x, y, z; /* to track position in checker */ + const unsigned test_max_rank = 5; /* code changes needed if this is increased */ + + HDassert(buf_ptr != NULL); + HDassert(0 < rank); + HDassert(rank <= test_max_rank); + HDassert(edge_size >= 6); + HDassert(0 < checker_edge_size); + HDassert(checker_edge_size <= edge_size); + HDassert(test_max_rank <= SS_DR_MAX_RANK); + + val_ptr = buf_ptr; + expected_value = first_expected_val; + + i = 0; + v = 0; + start_in_checker[0] = buf_starts_in_checker; + do { + if (v >= checker_edge_size) { + start_in_checker[0] = !start_in_checker[0]; + v = 0; + } /* end if */ + + j = 0; + w = 0; + start_in_checker[1] = start_in_checker[0]; + do { + if (w >= checker_edge_size) { + start_in_checker[1] = !start_in_checker[1]; + w = 0; + } /* end if */ + + k = 0; + x = 0; + start_in_checker[2] = start_in_checker[1]; + do { + if (x >= checker_edge_size) { + start_in_checker[2] = !start_in_checker[2]; + x = 0; + } /* end if */ + + l = 0; + y = 0; + start_in_checker[3] = start_in_checker[2]; + do { + if (y >= checker_edge_size) { + start_in_checker[3] = !start_in_checker[3]; + y = 0; + } /* end if */ + + m = 0; + z = 0; + in_checker = start_in_checker[3]; + do { + if (z >= checker_edge_size) { + in_checker = !in_checker; + z = 0; + } /* end if */ + + if (in_checker) { + if (*val_ptr != expected_value) + good_data = FALSE; + } /* end if */ + else { + if (*val_ptr != 0) + good_data = FALSE; + } /* end else */ + + val_ptr++; + expected_value++; + + m++; + z++; + } while ((rank >= (test_max_rank - 4)) && (m < edge_size)); + l++; + y++; + } while ((rank >= (test_max_rank - 3)) && (l < edge_size)); + k++; + x++; + } while ((rank >= (test_max_rank - 2)) && (k < edge_size)); + j++; + w++; + } while ((rank >= (test_max_rank - 1)) && (j < edge_size)); + i++; + v++; + } while ((rank >= test_max_rank) && (i < edge_size)); + + return (good_data); +} /* test_select_hyper_checker_board_dr__verify_data() */ +#endif + +/**************************************************************** +** +** test_select_hyper_checker_board_dr__run_test(): Test H5S +** (dataspace) selection code with checker board source and +** target selections having different ranks but the same +** shape. We have already tested H5Sselect_shape_same in +** isolation, so now we try to do I/O. +** +****************************************************************/ +#if 0 +static void +test_select_hyper_checker_board_dr__run_test(int test_num, const uint16_t *cube_buf, const uint16_t *zero_buf, + unsigned edge_size, unsigned checker_edge_size, + unsigned chunk_edge_size, unsigned small_rank, + unsigned large_rank, hid_t dset_type, hid_t xfer_plist) +{ + hbool_t data_ok; + hid_t fapl; /* File access property list */ + hid_t fid; /* HDF5 File IDs */ + hid_t full_small_cube_sid; /* Dataspace for small cube w/all selection */ + hid_t mem_small_cube_sid; + hid_t file_small_cube_sid; + hid_t full_large_cube_sid; /* Dataspace for large cube w/all selection */ + hid_t mem_large_cube_sid; + hid_t file_large_cube_sid; + hid_t small_cube_dcpl_id = H5P_DEFAULT; /* DCPL for small cube dataset */ + hid_t large_cube_dcpl_id = H5P_DEFAULT; /* DCPL for large cube dataset */ + hid_t small_cube_dataset; /* Dataset ID */ + hid_t large_cube_dataset; /* Dataset ID */ + unsigned small_rank_offset; /* Rank offset of slice */ + const unsigned test_max_rank = 5; /* must update code if this changes */ + size_t start_index; /* Offset within buffer to begin inspecting */ + size_t stop_index; /* Offset within buffer to end inspecting */ + uint16_t expected_value; + uint16_t *small_cube_buf_1; + uint16_t *large_cube_buf_1; + uint16_t *ptr_1; + size_t small_cube_size; /* Number of elements in small cube */ + size_t large_cube_size; /* Number of elements in large cube */ + hsize_t dims[SS_DR_MAX_RANK]; + hsize_t chunk_dims[SS_DR_MAX_RANK]; + hsize_t sel_start[SS_DR_MAX_RANK]; + unsigned u, v, w, x; /* Local index variables */ + size_t s; /* Local index variable */ + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ + + MESSAGE(7, ("\tn-cube slice through m-cube I/O test %d.\n", test_num)); + MESSAGE(7, ("\tranks = %d/%d, edge_size = %d, checker_edge_size = %d, chunk_edge_size = %d.\n", + small_rank, large_rank, edge_size, checker_edge_size, chunk_edge_size)); + + HDassert(edge_size >= 6); + HDassert(checker_edge_size > 0); + HDassert(checker_edge_size <= edge_size); + HDassert(edge_size >= chunk_edge_size); + HDassert((chunk_edge_size == 0) || (chunk_edge_size >= 3)); + HDassert(small_rank > 0); + HDassert(small_rank < large_rank); + HDassert(large_rank <= test_max_rank); + HDassert(test_max_rank <= SS_DR_MAX_RANK); + + /* Compute cube sizes */ + small_cube_size = large_cube_size = (size_t)1; + for (u = 0; u < large_rank; u++) { + if (u < small_rank) + small_cube_size *= (size_t)edge_size; + + large_cube_size *= (size_t)edge_size; + } /* end for */ + HDassert(large_cube_size < (size_t)(UINT_MAX)); + + small_rank_offset = test_max_rank - small_rank; + HDassert(small_rank_offset >= 1); + + /* also, at present, we use 16 bit values in this test -- + * hence the following assertion. Delete it if we convert + * to 32 bit values. + */ + HDassert(large_cube_size < (size_t)(64 * 1024)); + + /* Allocate & initialize buffers */ + small_cube_buf_1 = (uint16_t *)HDcalloc(sizeof(uint16_t), small_cube_size); + CHECK_PTR(small_cube_buf_1, "HDcalloc"); + large_cube_buf_1 = (uint16_t *)HDcalloc(sizeof(uint16_t), large_cube_size); + CHECK_PTR(large_cube_buf_1, "HDcalloc"); + + /* Create a dataset transfer property list */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + + /* Use the 'core' VFD for this test */ + ret = H5Pset_fapl_core(fapl, (size_t)(1024 * 1024), FALSE); + CHECK(ret, FAIL, "H5Pset_fapl_core"); + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Close file access property list */ + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* setup dims: */ + dims[0] = dims[1] = dims[2] = dims[3] = dims[4] = edge_size; + + /* Create small cube dataspaces */ + full_small_cube_sid = H5Screate_simple((int)small_rank, dims, NULL); + CHECK(full_small_cube_sid, FAIL, "H5Screate_simple"); + + mem_small_cube_sid = H5Screate_simple((int)small_rank, dims, NULL); + CHECK(mem_small_cube_sid, FAIL, "H5Screate_simple"); + + file_small_cube_sid = H5Screate_simple((int)small_rank, dims, NULL); + CHECK(file_small_cube_sid, FAIL, "H5Screate_simple"); + + /* Create large cube dataspace */ + full_large_cube_sid = H5Screate_simple((int)large_rank, dims, NULL); + CHECK(full_large_cube_sid, FAIL, "H5Screate_simple"); + + mem_large_cube_sid = H5Screate_simple((int)large_rank, dims, NULL); + CHECK(mem_large_cube_sid, FAIL, "H5Screate_simple"); + + file_large_cube_sid = H5Screate_simple((int)large_rank, dims, NULL); + CHECK(file_large_cube_sid, FAIL, "H5Screate_simple"); + + /* if chunk edge size is greater than zero, set up the small and + * large data set creation property lists to specify chunked + * datasets. + */ + if (chunk_edge_size > 0) { + chunk_dims[0] = chunk_dims[1] = chunk_dims[2] = chunk_dims[3] = chunk_dims[4] = chunk_edge_size; + + small_cube_dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + CHECK(small_cube_dcpl_id, FAIL, "H5Pcreate"); + + ret = H5Pset_layout(small_cube_dcpl_id, H5D_CHUNKED); + CHECK(ret, FAIL, "H5Pset_layout"); + + ret = H5Pset_chunk(small_cube_dcpl_id, (int)small_rank, chunk_dims); + CHECK(ret, FAIL, "H5Pset_chunk"); + + large_cube_dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + CHECK(large_cube_dcpl_id, FAIL, "H5Pcreate"); + + ret = H5Pset_layout(large_cube_dcpl_id, H5D_CHUNKED); + CHECK(ret, FAIL, "H5Pset_layout"); + + ret = H5Pset_chunk(large_cube_dcpl_id, (int)large_rank, chunk_dims); + CHECK(ret, FAIL, "H5Pset_chunk"); + } /* end if */ + + /* create the small cube dataset */ + small_cube_dataset = H5Dcreate2(fid, "small_cube_dataset", dset_type, file_small_cube_sid, H5P_DEFAULT, + small_cube_dcpl_id, H5P_DEFAULT); + CHECK(small_cube_dataset, FAIL, "H5Dcreate2"); + + /* Close non-default small dataset DCPL */ + if (small_cube_dcpl_id != H5P_DEFAULT) { + ret = H5Pclose(small_cube_dcpl_id); + CHECK(ret, FAIL, "H5Pclose"); + } /* end if */ + + /* create the large cube dataset */ + large_cube_dataset = H5Dcreate2(fid, "large_cube_dataset", dset_type, file_large_cube_sid, H5P_DEFAULT, + large_cube_dcpl_id, H5P_DEFAULT); + CHECK(large_cube_dataset, FAIL, "H5Dcreate2"); + + /* Close non-default large dataset DCPL */ + if (large_cube_dcpl_id != H5P_DEFAULT) { + ret = H5Pclose(large_cube_dcpl_id); + CHECK(ret, FAIL, "H5Pclose"); + } /* end if */ + + /* write initial data to the on disk datasets */ + ret = H5Dwrite(small_cube_dataset, H5T_NATIVE_UINT16, full_small_cube_sid, full_small_cube_sid, + xfer_plist, cube_buf); + CHECK(ret, FAIL, "H5Dwrite"); + + ret = H5Dwrite(large_cube_dataset, H5T_NATIVE_UINT16, full_large_cube_sid, full_large_cube_sid, + xfer_plist, cube_buf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* read initial small cube data from disk and verify that it is as expected. */ + ret = H5Dread(small_cube_dataset, H5T_NATIVE_UINT16, full_small_cube_sid, full_small_cube_sid, xfer_plist, + small_cube_buf_1); + CHECK(ret, FAIL, "H5Dread"); + + /* Check that the data is valid */ + verify_select_hyper_contig_dr__run_test(small_cube_buf_1, small_cube_size, edge_size, small_rank); + + /* read initial large cube data from disk and verify that it is as expected. */ + ret = H5Dread(large_cube_dataset, H5T_NATIVE_UINT16, full_large_cube_sid, full_large_cube_sid, xfer_plist, + large_cube_buf_1); + CHECK(ret, FAIL, "H5Dread"); + + /* Check that the data is valid */ + verify_select_hyper_contig_dr__run_test(large_cube_buf_1, large_cube_size, edge_size, large_rank); + + /* first, verify that we can read from disk correctly using selections + * of different rank that H5Sselect_shape_same() views as being of the + * same shape. + * + * Start by reading small_rank-D slice from the on disk large cube, and + * verifying that the data read is correct. Verify that H5Sselect_shape_same() + * returns true on the memory and file selections. + * + * The first step is to set up the needed checker board selection in the + * in memory small small cube + */ + + sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0; + + test_select_hyper_checker_board_dr__select_checker_board(mem_small_cube_sid, small_rank, edge_size, + checker_edge_size, small_rank, sel_start); + + /* now read slices from the large, on-disk cube into the small cube. + * Note how we adjust sel_start only in the dimensions peculiar to the + * large cube. + */ + + u = 0; + do { + if (small_rank_offset > 0) + sel_start[0] = u; + + v = 0; + do { + if (small_rank_offset > 1) + sel_start[1] = v; + + w = 0; + do { + if (small_rank_offset > 2) + sel_start[2] = w; + + x = 0; + do { + if (small_rank_offset > 3) + sel_start[3] = x; + + /* we know that small_rank >= 1 and that large_rank > small_rank + * by the assertions at the head of this function. Thus no + * need for another inner loop. + */ + + HDassert((sel_start[0] == 0) || (0 < small_rank_offset)); + HDassert((sel_start[1] == 0) || (1 < small_rank_offset)); + HDassert((sel_start[2] == 0) || (2 < small_rank_offset)); + HDassert((sel_start[3] == 0) || (3 < small_rank_offset)); + HDassert((sel_start[4] == 0) || (4 < small_rank_offset)); + + test_select_hyper_checker_board_dr__select_checker_board( + file_large_cube_sid, large_rank, edge_size, checker_edge_size, small_rank, sel_start); + + /* verify that H5Sselect_shape_same() reports the two + * selections as having the same shape. + */ + check = H5Sselect_shape_same(mem_small_cube_sid, file_large_cube_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* zero the buffer that we will be using for reading */ + HDmemset(small_cube_buf_1, 0, sizeof(*small_cube_buf_1) * small_cube_size); + + /* Read selection from disk */ + ret = H5Dread(large_cube_dataset, H5T_NATIVE_UINT16, mem_small_cube_sid, + file_large_cube_sid, xfer_plist, small_cube_buf_1); + CHECK(ret, FAIL, "H5Dread"); + + expected_value = (uint16_t)((u * edge_size * edge_size * edge_size * edge_size) + + (v * edge_size * edge_size * edge_size) + + (w * edge_size * edge_size) + (x * edge_size)); + + data_ok = test_select_hyper_checker_board_dr__verify_data(small_cube_buf_1, small_rank, + edge_size, checker_edge_size, + expected_value, (hbool_t)TRUE); + if (!data_ok) + TestErrPrintf("small cube read from largecube has bad data! Line=%d\n", __LINE__); + + x++; + } while ((large_rank >= (test_max_rank - 3)) && (small_rank <= (test_max_rank - 4)) && + (x < edge_size)); + w++; + } while ((large_rank >= (test_max_rank - 2)) && (small_rank <= (test_max_rank - 3)) && + (w < edge_size)); + v++; + } while ((large_rank >= (test_max_rank - 1)) && (small_rank <= (test_max_rank - 2)) && + (v < edge_size)); + u++; + } while ((large_rank >= test_max_rank) && (small_rank <= (test_max_rank - 1)) && (u < edge_size)); + + /* similarly, read the on disk small cube into slices through the in memory + * large cube, and verify that the correct data (and only the correct data) + * is read. + */ + + /* select a checker board in the file small cube dataspace */ + sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0; + test_select_hyper_checker_board_dr__select_checker_board(file_small_cube_sid, small_rank, edge_size, + checker_edge_size, small_rank, sel_start); + + u = 0; + do { + if (0 < small_rank_offset) + sel_start[0] = u; + + v = 0; + do { + if (1 < small_rank_offset) + sel_start[1] = v; + + w = 0; + do { + if (2 < small_rank_offset) + sel_start[2] = w; + + x = 0; + do { + if (3 < small_rank_offset) + sel_start[3] = x; + + /* we know that small_rank >= 1 and that large_rank > small_rank + * by the assertions at the head of this function. Thus no + * need for another inner loop. + */ + + HDassert((sel_start[0] == 0) || (0 < small_rank_offset)); + HDassert((sel_start[1] == 0) || (1 < small_rank_offset)); + HDassert((sel_start[2] == 0) || (2 < small_rank_offset)); + HDassert((sel_start[3] == 0) || (3 < small_rank_offset)); + HDassert((sel_start[4] == 0) || (4 < small_rank_offset)); + + test_select_hyper_checker_board_dr__select_checker_board( + mem_large_cube_sid, large_rank, edge_size, checker_edge_size, small_rank, sel_start); + + /* verify that H5Sselect_shape_same() reports the two + * selections as having the same shape. + */ + check = H5Sselect_shape_same(file_small_cube_sid, mem_large_cube_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* zero out the in memory large cube */ + HDmemset(large_cube_buf_1, 0, sizeof(*large_cube_buf_1) * large_cube_size); + + /* Read selection from disk */ + ret = H5Dread(small_cube_dataset, H5T_NATIVE_UINT16, mem_large_cube_sid, + file_small_cube_sid, xfer_plist, large_cube_buf_1); + CHECK(ret, FAIL, "H5Dread"); + + /* verify that the expected data and only the + * expected data was read. + */ + data_ok = TRUE; + ptr_1 = large_cube_buf_1; + expected_value = 0; + start_index = (u * edge_size * edge_size * edge_size * edge_size) + + (v * edge_size * edge_size * edge_size) + (w * edge_size * edge_size) + + (x * edge_size); + stop_index = start_index + small_cube_size - 1; + + HDassert(start_index < stop_index); + HDassert(stop_index <= large_cube_size); + + /* verify that the large cube contains only zeros before the slice */ + for (s = 0; s < start_index; s++) { + if (*ptr_1 != 0) + data_ok = FALSE; + ptr_1++; + } /* end for */ + HDassert(s == start_index); + + data_ok &= test_select_hyper_checker_board_dr__verify_data( + ptr_1, small_rank, edge_size, checker_edge_size, (uint16_t)0, (hbool_t)TRUE); + + ptr_1 += small_cube_size; + s += small_cube_size; + + HDassert(s == stop_index + 1); + + /* verify that the large cube contains only zeros after the slice */ + for (s = stop_index + 1; s < large_cube_size; s++) { + if (*ptr_1 != 0) + data_ok = FALSE; + ptr_1++; + } /* end for */ + if (!data_ok) + TestErrPrintf("large cube read from small cube has bad data! Line=%d\n", __LINE__); + + x++; + } while ((large_rank >= (test_max_rank - 3)) && (small_rank <= (test_max_rank - 4)) && + (x < edge_size)); + w++; + } while ((large_rank >= (test_max_rank - 2)) && (small_rank <= (test_max_rank - 3)) && + (w < edge_size)); + v++; + } while ((large_rank >= (test_max_rank - 1)) && (small_rank <= (test_max_rank - 2)) && + (v < edge_size)); + u++; + } while ((large_rank >= test_max_rank) && (small_rank <= (test_max_rank - 1)) && (u < edge_size)); + + /* now we go in the opposite direction, verifying that we can write + * from memory to file using selections of different rank that + * H5Sselect_shape_same() views as being of the same shape. + * + * Start by writing small_rank D slices from the in memory large cube, to + * the the on disk small cube dataset. After each write, read the small + * cube dataset back from disk, and verify that it contains the expected + * data. Verify that H5Sselect_shape_same() returns true on the + * memory and file selections. + */ + + /* select a checker board in the file small cube dataspace */ + sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0; + test_select_hyper_checker_board_dr__select_checker_board(file_small_cube_sid, small_rank, edge_size, + checker_edge_size, small_rank, sel_start); + + u = 0; + do { + if (small_rank_offset > 0) + sel_start[0] = u; + + v = 0; + do { + if (small_rank_offset > 1) + sel_start[1] = v; + + w = 0; + do { + if (small_rank_offset > 2) + sel_start[2] = w; + + x = 0; + do { + if (small_rank_offset > 3) + sel_start[3] = x; + + /* zero out the on disk small cube */ + ret = H5Dwrite(small_cube_dataset, H5T_NATIVE_UINT16, full_small_cube_sid, + full_small_cube_sid, xfer_plist, zero_buf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* we know that small_rank >= 1 and that large_rank > small_rank + * by the assertions at the head of this function. Thus no + * need for another inner loop. + */ + + HDassert((sel_start[0] == 0) || (0 < small_rank_offset)); + HDassert((sel_start[1] == 0) || (1 < small_rank_offset)); + HDassert((sel_start[2] == 0) || (2 < small_rank_offset)); + HDassert((sel_start[3] == 0) || (3 < small_rank_offset)); + HDassert((sel_start[4] == 0) || (4 < small_rank_offset)); + + test_select_hyper_checker_board_dr__select_checker_board( + mem_large_cube_sid, large_rank, edge_size, checker_edge_size, small_rank, sel_start); + + /* verify that H5Sselect_shape_same() reports the two + * selections as having the same shape. + */ + check = H5Sselect_shape_same(file_small_cube_sid, mem_large_cube_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* write the slice from the in memory large cube to the + * on disk small cube + */ + ret = H5Dwrite(small_cube_dataset, H5T_NATIVE_UINT16, mem_large_cube_sid, + file_small_cube_sid, xfer_plist, cube_buf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* zero the buffer that we will be using for reading */ + HDmemset(small_cube_buf_1, 0, sizeof(*small_cube_buf_1) * small_cube_size); + + /* read the on disk small cube into memory */ + ret = H5Dread(small_cube_dataset, H5T_NATIVE_UINT16, full_small_cube_sid, + full_small_cube_sid, xfer_plist, small_cube_buf_1); + CHECK(ret, FAIL, "H5Dread"); + + expected_value = (uint16_t)((u * edge_size * edge_size * edge_size * edge_size) + + (v * edge_size * edge_size * edge_size) + + (w * edge_size * edge_size) + (x * edge_size)); + + data_ok = test_select_hyper_checker_board_dr__verify_data(small_cube_buf_1, small_rank, + edge_size, checker_edge_size, + expected_value, (hbool_t)TRUE); + if (!data_ok) + TestErrPrintf("small cube read from largecube has bad data! Line=%d\n", __LINE__); + + x++; + } while ((large_rank >= (test_max_rank - 3)) && (small_rank <= (test_max_rank - 4)) && + (x < edge_size)); + w++; + } while ((large_rank >= (test_max_rank - 2)) && (small_rank <= (test_max_rank - 3)) && + (w < edge_size)); + v++; + } while ((large_rank >= (test_max_rank - 1)) && (small_rank <= (test_max_rank - 2)) && + (v < edge_size)); + u++; + } while ((large_rank >= test_max_rank) && (small_rank <= (test_max_rank - 1)) && (u < edge_size)); + + /* Now write checker board selections of the entries in memory + * small cube to slices of the on disk cube. After each write, + * read the on disk large cube * into memory, and verify that + * it contains the expected * data. Verify that + * H5Sselect_shape_same() returns true on the memory and file + * selections. + */ + + /* select a checker board in the in memory small cube dataspace */ + sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0; + test_select_hyper_checker_board_dr__select_checker_board(mem_small_cube_sid, small_rank, edge_size, + checker_edge_size, small_rank, sel_start); + + u = 0; + do { + if (small_rank_offset > 0) + sel_start[0] = u; + + v = 0; + do { + if (small_rank_offset > 1) + sel_start[1] = v; + + w = 0; + do { + if (small_rank_offset > 2) + sel_start[2] = w; + + x = 0; + do { + if (small_rank_offset > 3) + sel_start[3] = x; + + /* zero out the on disk cube */ + ret = H5Dwrite(large_cube_dataset, H5T_NATIVE_USHORT, full_large_cube_sid, + full_large_cube_sid, xfer_plist, zero_buf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* we know that small_rank >= 1 and that large_rank > small_rank + * by the assertions at the head of this function. Thus no + * need for another inner loop. + */ + + HDassert((sel_start[0] == 0) || (0 < small_rank_offset)); + HDassert((sel_start[1] == 0) || (1 < small_rank_offset)); + HDassert((sel_start[2] == 0) || (2 < small_rank_offset)); + HDassert((sel_start[3] == 0) || (3 < small_rank_offset)); + HDassert((sel_start[4] == 0) || (4 < small_rank_offset)); + + test_select_hyper_checker_board_dr__select_checker_board( + file_large_cube_sid, large_rank, edge_size, checker_edge_size, small_rank, sel_start); + + /* verify that H5Sselect_shape_same() reports the two + * selections as having the same shape. + */ + check = H5Sselect_shape_same(file_large_cube_sid, mem_small_cube_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* write the checker board selection of the in memory + * small cube to a slice through the on disk large + * cube. + */ + ret = H5Dwrite(large_cube_dataset, H5T_NATIVE_UINT16, mem_small_cube_sid, + file_large_cube_sid, xfer_plist, cube_buf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* zero out the in memory large cube */ + HDmemset(large_cube_buf_1, 0, sizeof(*large_cube_buf_1) * large_cube_size); + + /* read the on disk large cube into memory */ + ret = H5Dread(large_cube_dataset, H5T_NATIVE_UINT16, full_large_cube_sid, + full_large_cube_sid, xfer_plist, large_cube_buf_1); + CHECK(ret, FAIL, "H5Dread"); + + /* verify that the expected data and only the + * expected data was written to the on disk large + * cube. + */ + data_ok = TRUE; + ptr_1 = large_cube_buf_1; + expected_value = 0; + start_index = (u * edge_size * edge_size * edge_size * edge_size) + + (v * edge_size * edge_size * edge_size) + (w * edge_size * edge_size) + + (x * edge_size); + stop_index = start_index + small_cube_size - 1; + + HDassert(start_index < stop_index); + HDassert(stop_index <= large_cube_size); + + /* verify that the large cube contains only zeros before the slice */ + for (s = 0; s < start_index; s++) { + if (*ptr_1 != 0) + data_ok = FALSE; + ptr_1++; + } /* end for */ + HDassert(s == start_index); + + /* verify that the slice contains the expected data */ + data_ok &= test_select_hyper_checker_board_dr__verify_data( + ptr_1, small_rank, edge_size, checker_edge_size, (uint16_t)0, (hbool_t)TRUE); + + ptr_1 += small_cube_size; + s += small_cube_size; + + HDassert(s == stop_index + 1); + + /* verify that the large cube contains only zeros after the slice */ + for (s = stop_index + 1; s < large_cube_size; s++) { + if (*ptr_1 != 0) + data_ok = FALSE; + ptr_1++; + } /* end for */ + if (!data_ok) + TestErrPrintf("large cube written from small cube has bad data! Line=%d\n", __LINE__); + + x++; + } while ((large_rank >= (test_max_rank - 3)) && (small_rank <= (test_max_rank - 4)) && + (x < edge_size)); + w++; + } while ((large_rank >= (test_max_rank - 2)) && (small_rank <= (test_max_rank - 3)) && + (w < edge_size)); + v++; + } while ((large_rank >= (test_max_rank - 1)) && (small_rank <= (test_max_rank - 2)) && + (v < edge_size)); + u++; + } while ((large_rank >= test_max_rank) && (small_rank <= (test_max_rank - 1)) && (u < edge_size)); + + /* Close memory dataspaces */ + ret = H5Sclose(full_small_cube_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(full_large_cube_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(mem_small_cube_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(mem_large_cube_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(file_small_cube_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(file_large_cube_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Datasets */ + ret = H5Dclose(small_cube_dataset); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Dclose(large_cube_dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + HDfree(small_cube_buf_1); + HDfree(large_cube_buf_1); + +} /* test_select_hyper_checker_board_dr__run_test() */ +#endif +/**************************************************************** +** +** test_select_hyper_checker_board_dr(): Test H5S (dataspace) +** selection code with checkerboard source and target having +** different ranks but the same shape. We have already +** tested H5Sselect_shape_same in isolation, so now we try to do +** I/O. +** +** This is just an initial smoke check, so we will work +** with a slice through a cube only. +** +****************************************************************/ +#if 0 +static void +test_select_hyper_checker_board_dr(hid_t dset_type, hid_t xfer_plist) +{ + uint16_t *cube_buf; /* Buffer for writing cube data */ + uint16_t *cube_ptr; /* Temporary pointer into cube data */ + uint16_t *zero_buf; /* Buffer for writing zeroed cube data */ + int test_num = 0; + unsigned checker_edge_size = 2; /* Size of checkerboard dimension */ + unsigned chunk_edge_size; /* Size of chunk's dataspace dimensions */ + unsigned edge_size = 6; /* Size of dataset's dataspace dimensions */ + unsigned small_rank; /* Current rank of small dataset */ + unsigned large_rank; /* Current rank of large dataset */ + unsigned max_rank = 5; /* Max. rank to use */ + size_t max_cube_size; /* Max. number of elements in largest cube */ + size_t s; /* Local index variable */ + unsigned u; /* Local index variable */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Checker Board Hyperslabs With Different Rank I/O Functionality\n")); + + /* Compute max. cube size */ + max_cube_size = (size_t)1; + for (u = 0; u < max_rank; u++) + max_cube_size *= (size_t)(edge_size + 1); + + /* Allocate cube buffer for writing values */ + cube_buf = (uint16_t *)HDmalloc(sizeof(uint16_t) * max_cube_size); + CHECK_PTR(cube_buf, "HDmalloc"); + + /* Initialize the cube buffer */ + cube_ptr = cube_buf; + for (s = 0; s < max_cube_size; s++) + *cube_ptr++ = (uint16_t)s; + + /* Allocate cube buffer for zeroing values on disk */ + zero_buf = (uint16_t *)HDcalloc(sizeof(uint16_t), max_cube_size); + CHECK_PTR(zero_buf, "HDcalloc"); + + for (large_rank = 1; large_rank <= max_rank; large_rank++) { + for (small_rank = 1; small_rank < large_rank; small_rank++) { + chunk_edge_size = 0; + test_select_hyper_checker_board_dr__run_test(test_num, cube_buf, zero_buf, edge_size, + checker_edge_size, chunk_edge_size, small_rank, + large_rank, dset_type, xfer_plist); + test_num++; + + test_select_hyper_checker_board_dr__run_test(test_num, cube_buf, zero_buf, edge_size + 1, + checker_edge_size, chunk_edge_size, small_rank, + large_rank, dset_type, xfer_plist); + test_num++; + + chunk_edge_size = 3; + test_select_hyper_checker_board_dr__run_test(test_num, cube_buf, zero_buf, edge_size, + checker_edge_size, chunk_edge_size, small_rank, + large_rank, dset_type, xfer_plist); + test_num++; + + test_select_hyper_checker_board_dr__run_test(test_num, cube_buf, zero_buf, edge_size + 1, + checker_edge_size, chunk_edge_size, small_rank, + large_rank, dset_type, xfer_plist); + test_num++; + } /* for loop on small rank */ + } /* for loop on large rank */ + + HDfree(cube_buf); + HDfree(zero_buf); + +} /* test_select_hyper_checker_board_dr() */ +#endif +/**************************************************************** +** +** test_select_hyper_copy(): Test H5S (dataspace) selection code. +** Tests copying hyperslab selections +** +****************************************************************/ +static void +test_select_hyper_copy(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t data1, data2; /* Dataset IDs */ + hid_t sid1, sid2, sid3; /* Dataspace IDs */ + hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; + hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2}; + hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2}; + hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */ + hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */ + hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */ + hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */ + uint16_t *wbuf, /* buffer to write to disk */ + *rbuf, /* 1st buffer read from disk */ + *rbuf2, /* 2nd buffer read from disk */ + *tbuf; /* temporary buffer pointer */ + int i, j; /* Counters */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Hyperslabs with Strides Functionality\n")); + + /* Allocate write & read buffers */ + wbuf = (uint16_t *)HDmalloc(sizeof(uint16_t) * SPACE2_DIM1 * SPACE2_DIM2); + CHECK_PTR(wbuf, "HDmalloc"); + rbuf = (uint16_t *)HDcalloc(sizeof(uint16_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2)); + CHECK_PTR(rbuf, "HDcalloc"); + rbuf2 = (uint16_t *)HDcalloc(sizeof(uint16_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2)); + CHECK_PTR(rbuf2, "HDcalloc"); + + /* Initialize write buffer */ + for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) + for (j = 0; j < SPACE2_DIM2; j++) + *tbuf++ = (uint16_t)((i * SPACE2_DIM2) + j); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create dataspace for writing buffer */ + sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select 2x3x3 count with a stride of 2x4x3 & 1x2x2 block hyperslab for disk dataset */ + start[0] = 0; + start[1] = 0; + start[2] = 0; + stride[0] = 2; + stride[1] = 4; + stride[2] = 3; + count[0] = 2; + count[1] = 3; + count[2] = 3; + block[0] = 1; + block[1] = 2; + block[2] = 2; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Select 4x2 count with a stride of 5x5 & 3x3 block hyperslab for memory dataset */ + start[0] = 1; + start[1] = 1; + stride[0] = 5; + stride[1] = 5; + count[0] = 4; + count[1] = 2; + block[0] = 3; + block[1] = 3; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Make a copy of the dataspace to write */ + sid3 = H5Scopy(sid2); + CHECK(sid3, FAIL, "H5Scopy"); + + /* Create a dataset */ + data1 = H5Dcreate2(fid1, SPACE1_NAME, H5T_STD_U16LE, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(data1, FAIL, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(data1, H5T_STD_U16LE, sid2, sid1, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create another dataset */ + data2 = H5Dcreate2(fid1, SPACE2_NAME, H5T_STD_U16LE, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(data2, FAIL, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(data2, H5T_STD_U16LE, sid3, sid1, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close memory dataspace */ + ret = H5Sclose(sid3); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create dataspace for reading buffer */ + sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select 3x4 count with a stride of 4x4 & 2x3 block hyperslab for memory dataset */ + start[0] = 0; + start[1] = 0; + stride[0] = 4; + stride[1] = 4; + count[0] = 3; + count[1] = 4; + block[0] = 2; + block[1] = 3; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Make a copy of the dataspace to read */ + sid3 = H5Scopy(sid2); + CHECK(sid3, FAIL, "H5Scopy"); + + /* Read selection from disk */ + ret = H5Dread(data1, H5T_STD_U16LE, sid2, sid1, H5P_DEFAULT, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Read selection from disk */ + ret = H5Dread(data2, H5T_STD_U16LE, sid3, sid1, H5P_DEFAULT, rbuf2); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read with data written out */ + if (HDmemcmp(rbuf, rbuf2, sizeof(uint16_t) * SPACE3_DIM1 * SPACE3_DIM2) != 0) + TestErrPrintf("hyperslab values don't match! Line=%d\n", __LINE__); + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close 2nd memory dataspace */ + ret = H5Sclose(sid3); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(data1); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close Dataset */ + ret = H5Dclose(data2); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); + HDfree(rbuf2); +} /* test_select_hyper_copy() */ + +/**************************************************************** +** +** test_select_point_copy(): Test H5S (dataspace) selection code. +** Tests copying point selections +** +****************************************************************/ +static void +test_select_point_copy(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t data1, data2; /* Dataset IDs */ + hid_t sid1, sid2, sid3; /* Dataspace IDs */ + hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; + hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2}; + hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2}; + hsize_t coord1[POINT1_NPOINTS][SPACE1_RANK]; /* Coordinates for point selection */ + hsize_t coord2[POINT1_NPOINTS][SPACE2_RANK]; /* Coordinates for point selection */ + hsize_t coord3[POINT1_NPOINTS][SPACE3_RANK]; /* Coordinates for point selection */ + uint16_t *wbuf, /* buffer to write to disk */ + *rbuf, /* 1st buffer read from disk */ + *rbuf2, /* 2nd buffer read from disk */ + *tbuf; /* temporary buffer pointer */ + int i, j; /* Counters */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Hyperslabs with Strides Functionality\n")); + + /* Allocate write & read buffers */ + wbuf = (uint16_t *)HDmalloc(sizeof(uint16_t) * SPACE2_DIM1 * SPACE2_DIM2); + CHECK_PTR(wbuf, "HDmalloc"); + rbuf = (uint16_t *)HDcalloc(sizeof(uint16_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2)); + CHECK_PTR(rbuf, "HDcalloc"); + rbuf2 = (uint16_t *)HDcalloc(sizeof(uint16_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2)); + CHECK_PTR(rbuf2, "HDcalloc"); + + /* Initialize write buffer */ + for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) + for (j = 0; j < SPACE2_DIM2; j++) + *tbuf++ = (uint16_t)((i * SPACE2_DIM2) + j); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create dataspace for writing buffer */ + sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select sequence of ten points for disk dataset */ + coord1[0][0] = 0; + coord1[0][1] = 10; + coord1[0][2] = 5; + coord1[1][0] = 1; + coord1[1][1] = 2; + coord1[1][2] = 7; + coord1[2][0] = 2; + coord1[2][1] = 4; + coord1[2][2] = 9; + coord1[3][0] = 0; + coord1[3][1] = 6; + coord1[3][2] = 11; + coord1[4][0] = 1; + coord1[4][1] = 8; + coord1[4][2] = 13; + coord1[5][0] = 2; + coord1[5][1] = 12; + coord1[5][2] = 0; + coord1[6][0] = 0; + coord1[6][1] = 14; + coord1[6][2] = 2; + coord1[7][0] = 1; + coord1[7][1] = 0; + coord1[7][2] = 4; + coord1[8][0] = 2; + coord1[8][1] = 1; + coord1[8][2] = 6; + coord1[9][0] = 0; + coord1[9][1] = 3; + coord1[9][2] = 8; + ret = H5Sselect_elements(sid1, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Select sequence of ten points for write dataset */ + coord2[0][0] = 12; + coord2[0][1] = 3; + coord2[1][0] = 15; + coord2[1][1] = 13; + coord2[2][0] = 7; + coord2[2][1] = 25; + coord2[3][0] = 0; + coord2[3][1] = 6; + coord2[4][0] = 13; + coord2[4][1] = 0; + coord2[5][0] = 24; + coord2[5][1] = 11; + coord2[6][0] = 12; + coord2[6][1] = 21; + coord2[7][0] = 29; + coord2[7][1] = 4; + coord2[8][0] = 8; + coord2[8][1] = 8; + coord2[9][0] = 19; + coord2[9][1] = 17; + ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord2); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Make a copy of the dataspace to write */ + sid3 = H5Scopy(sid2); + CHECK(sid3, FAIL, "H5Scopy"); + + /* Create a dataset */ + data1 = H5Dcreate2(fid1, SPACE1_NAME, H5T_STD_U16LE, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(data1, FAIL, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(data1, H5T_STD_U16LE, sid2, sid1, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create another dataset */ + data2 = H5Dcreate2(fid1, SPACE2_NAME, H5T_STD_U16LE, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(data2, FAIL, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(data2, H5T_STD_U16LE, sid3, sid1, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close memory dataspace */ + ret = H5Sclose(sid3); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create dataspace for reading buffer */ + sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select sequence of points for read dataset */ + coord3[0][0] = 0; + coord3[0][1] = 2; + coord3[1][0] = 4; + coord3[1][1] = 8; + coord3[2][0] = 13; + coord3[2][1] = 13; + coord3[3][0] = 14; + coord3[3][1] = 25; + coord3[4][0] = 7; + coord3[4][1] = 9; + coord3[5][0] = 2; + coord3[5][1] = 0; + coord3[6][0] = 9; + coord3[6][1] = 19; + coord3[7][0] = 1; + coord3[7][1] = 22; + coord3[8][0] = 12; + coord3[8][1] = 21; + coord3[9][0] = 11; + coord3[9][1] = 6; + ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord3); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Make a copy of the dataspace to read */ + sid3 = H5Scopy(sid2); + CHECK(sid3, FAIL, "H5Scopy"); + + /* Read selection from disk */ + ret = H5Dread(data1, H5T_STD_U16LE, sid2, sid1, H5P_DEFAULT, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Read selection from disk */ + ret = H5Dread(data2, H5T_STD_U16LE, sid3, sid1, H5P_DEFAULT, rbuf2); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read with data written out */ + if (HDmemcmp(rbuf, rbuf2, sizeof(uint16_t) * SPACE3_DIM1 * SPACE3_DIM2) != 0) + TestErrPrintf("point values don't match!\n"); + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close 2nd memory dataspace */ + ret = H5Sclose(sid3); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(data1); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close Dataset */ + ret = H5Dclose(data2); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); + HDfree(rbuf2); +} /* test_select_point_copy() */ + +/**************************************************************** +** +** test_select_hyper_offset(): Test basic H5S (dataspace) selection code. +** Tests hyperslabs of various sizes and dimensionalities with selection +** offsets. +** +****************************************************************/ +static void +test_select_hyper_offset(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1, sid2; /* Dataspace ID */ + hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; + hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2}; + hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2}; + hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */ + hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */ + hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */ + hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */ + hssize_t offset[SPACE1_RANK]; /* Offset of selection */ + uint8_t *wbuf, /* buffer to write to disk */ + *rbuf, /* buffer read from disk */ + *tbuf, /* temporary buffer pointer */ + *tbuf2; /* temporary buffer pointer */ + int i, j; /* Counters */ + herr_t ret; /* Generic return value */ + htri_t valid; /* Generic boolean return value */ + H5S_class_t ext_type; /* Extent type */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Hyperslab Selection Functions with Offsets\n")); + + /* Allocate write & read buffers */ + wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2); + CHECK_PTR(wbuf, "HDmalloc"); + rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2)); + CHECK_PTR(rbuf, "HDcalloc"); + + /* Initialize write buffer */ + for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) + for (j = 0; j < SPACE2_DIM2; j++) + *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create dataspace for writing buffer */ + sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Verify extent type */ + ext_type = H5Sget_simple_extent_type(sid1); + VERIFY(ext_type, H5S_SIMPLE, "H5Sget_simple_extent_type"); + + /* Select 2x15x13 hyperslab for disk dataset */ + start[0] = 1; + start[1] = 0; + start[2] = 0; + stride[0] = 1; + stride[1] = 1; + stride[2] = 1; + count[0] = 2; + count[1] = 15; + count[2] = 13; + block[0] = 1; + block[1] = 1; + block[2] = 1; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Check a valid offset */ + offset[0] = -1; + offset[1] = 0; + offset[2] = 0; + ret = H5Soffset_simple(sid1, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + valid = H5Sselect_valid(sid1); + VERIFY(valid, TRUE, "H5Sselect_valid"); + + /* Check an invalid offset */ + offset[0] = 10; + offset[1] = 0; + offset[2] = 0; + ret = H5Soffset_simple(sid1, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + valid = H5Sselect_valid(sid1); + VERIFY(valid, FALSE, "H5Sselect_valid"); + + /* Reset offset */ + offset[0] = 0; + offset[1] = 0; + offset[2] = 0; + ret = H5Soffset_simple(sid1, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + valid = H5Sselect_valid(sid1); + VERIFY(valid, TRUE, "H5Sselect_valid"); + + /* Select 15x26 hyperslab for memory dataset */ + start[0] = 15; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 15; + count[1] = 26; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Choose a valid offset for the memory dataspace */ + offset[0] = -10; + offset[1] = 0; + ret = H5Soffset_simple(sid2, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + valid = H5Sselect_valid(sid2); + VERIFY(valid, TRUE, "H5Sselect_valid"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, SPACE1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create dataspace for reading buffer */ + sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select 15x26 hyperslab for reading memory dataset */ + start[0] = 0; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 15; + count[1] = 26; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read with data written out */ + for (i = 0; i < SPACE3_DIM1; i++) { + tbuf = wbuf + ((i + 5) * SPACE2_DIM2); + tbuf2 = rbuf + (i * SPACE3_DIM2); + for (j = 0; j < SPACE3_DIM2; j++, tbuf++, tbuf2++) { + if (*tbuf != *tbuf2) + TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%u, *tbuf2=%u\n", + __LINE__, i, j, (unsigned)*tbuf, (unsigned)*tbuf2); + } /* end for */ + } /* end for */ + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); +} /* test_select_hyper_offset() */ + +/**************************************************************** +** +** test_select_hyper_offset2(): Test basic H5S (dataspace) selection code. +** Tests optimized hyperslab I/O with selection offsets. +** +****************************************************************/ +static void +test_select_hyper_offset2(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1, sid2; /* Dataspace ID */ + hsize_t dims1[] = {SPACE7_DIM1, SPACE7_DIM2}; + hsize_t dims2[] = {SPACE7_DIM1, SPACE7_DIM2}; + hsize_t start[SPACE7_RANK]; /* Starting location of hyperslab */ + hsize_t count[SPACE7_RANK]; /* Element count of hyperslab */ + hssize_t offset[SPACE7_RANK]; /* Offset of selection */ + uint8_t *wbuf, /* buffer to write to disk */ + *rbuf, /* buffer read from disk */ + *tbuf, /* temporary buffer pointer */ + *tbuf2; /* temporary buffer pointer */ + int i, j; /* Counters */ + herr_t ret; /* Generic return value */ + htri_t valid; /* Generic boolean return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing More Hyperslab Selection Functions with Offsets\n")); + + /* Allocate write & read buffers */ + wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE7_DIM1 * SPACE7_DIM2); + CHECK_PTR(wbuf, "HDmalloc"); + rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE7_DIM1 * SPACE7_DIM2)); + CHECK_PTR(rbuf, "HDcalloc"); + + /* Initialize write buffer */ + for (i = 0, tbuf = wbuf; i < SPACE7_DIM1; i++) + for (j = 0; j < SPACE7_DIM2; j++) + *tbuf++ = (uint8_t)((i * SPACE7_DIM2) + j); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset */ + sid1 = H5Screate_simple(SPACE7_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create dataspace for writing buffer */ + sid2 = H5Screate_simple(SPACE7_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select 4x10 hyperslab for disk dataset */ + start[0] = 1; + start[1] = 0; + count[0] = 4; + count[1] = 10; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Set offset */ + offset[0] = 1; + offset[1] = 0; + ret = H5Soffset_simple(sid1, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + valid = H5Sselect_valid(sid1); + VERIFY(valid, TRUE, "H5Sselect_valid"); + + /* Select 4x10 hyperslab for memory dataset */ + start[0] = 1; + start[1] = 0; + count[0] = 4; + count[1] = 10; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Choose a valid offset for the memory dataspace */ + offset[0] = 2; + offset[1] = 0; + ret = H5Soffset_simple(sid2, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + valid = H5Sselect_valid(sid2); + VERIFY(valid, TRUE, "H5Sselect_valid"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, SPACE7_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read with data written out */ + for (i = 0; i < 4; i++) { + tbuf = wbuf + ((i + 3) * SPACE7_DIM2); + tbuf2 = rbuf + ((i + 3) * SPACE7_DIM2); + for (j = 0; j < SPACE7_DIM2; j++, tbuf++, tbuf2++) { + if (*tbuf != *tbuf2) + TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%u, *tbuf2=%u\n", + __LINE__, i, j, (unsigned)*tbuf, (unsigned)*tbuf2); + } /* end for */ + } /* end for */ + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); +} /* test_select_hyper_offset2() */ + +/**************************************************************** +** +** test_select_point_offset(): Test basic H5S (dataspace) selection code. +** Tests element selections between dataspaces of various sizes +** and dimensionalities with selection offsets. +** +****************************************************************/ +static void +test_select_point_offset(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1, sid2; /* Dataspace ID */ + hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; + hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2}; + hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2}; + hsize_t coord1[POINT1_NPOINTS][SPACE1_RANK]; /* Coordinates for point selection */ + hsize_t coord2[POINT1_NPOINTS][SPACE2_RANK]; /* Coordinates for point selection */ + hsize_t coord3[POINT1_NPOINTS][SPACE3_RANK]; /* Coordinates for point selection */ + hssize_t offset[SPACE1_RANK]; /* Offset of selection */ + uint8_t *wbuf, /* buffer to write to disk */ + *rbuf, /* buffer read from disk */ + *tbuf, /* temporary buffer pointer */ + *tbuf2; /* temporary buffer pointer */ + int i, j; /* Counters */ + herr_t ret; /* Generic return value */ + htri_t valid; /* Generic boolean return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Element Selection Functions\n")); + + /* Allocate write & read buffers */ + wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2); + CHECK_PTR(wbuf, "HDmalloc"); + rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2)); + CHECK_PTR(rbuf, "HDcalloc"); + + /* Initialize write buffer */ + for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) + for (j = 0; j < SPACE2_DIM2; j++) + *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create dataspace for write buffer */ + sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select sequence of ten points for disk dataset */ + coord1[0][0] = 0; + coord1[0][1] = 10; + coord1[0][2] = 5; + coord1[1][0] = 1; + coord1[1][1] = 2; + coord1[1][2] = 7; + coord1[2][0] = 2; + coord1[2][1] = 4; + coord1[2][2] = 9; + coord1[3][0] = 0; + coord1[3][1] = 6; + coord1[3][2] = 11; + coord1[4][0] = 1; + coord1[4][1] = 8; + coord1[4][2] = 12; + coord1[5][0] = 2; + coord1[5][1] = 12; + coord1[5][2] = 0; + coord1[6][0] = 0; + coord1[6][1] = 14; + coord1[6][2] = 2; + coord1[7][0] = 1; + coord1[7][1] = 0; + coord1[7][2] = 4; + coord1[8][0] = 2; + coord1[8][1] = 1; + coord1[8][2] = 6; + coord1[9][0] = 0; + coord1[9][1] = 3; + coord1[9][2] = 8; + ret = H5Sselect_elements(sid1, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Check a valid offset */ + offset[0] = 0; + offset[1] = 0; + offset[2] = 1; + ret = H5Soffset_simple(sid1, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + valid = H5Sselect_valid(sid1); + VERIFY(valid, TRUE, "H5Sselect_valid"); + + /* Check an invalid offset */ + offset[0] = 10; + offset[1] = 0; + offset[2] = 0; + ret = H5Soffset_simple(sid1, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + valid = H5Sselect_valid(sid1); + VERIFY(valid, FALSE, "H5Sselect_valid"); + + /* Reset offset */ + offset[0] = 0; + offset[1] = 0; + offset[2] = 0; + ret = H5Soffset_simple(sid1, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + valid = H5Sselect_valid(sid1); + VERIFY(valid, TRUE, "H5Sselect_valid"); + + /* Select sequence of ten points for write dataset */ + coord2[0][0] = 12; + coord2[0][1] = 3; + coord2[1][0] = 15; + coord2[1][1] = 13; + coord2[2][0] = 7; + coord2[2][1] = 24; + coord2[3][0] = 0; + coord2[3][1] = 6; + coord2[4][0] = 13; + coord2[4][1] = 0; + coord2[5][0] = 24; + coord2[5][1] = 11; + coord2[6][0] = 12; + coord2[6][1] = 21; + coord2[7][0] = 23; + coord2[7][1] = 4; + coord2[8][0] = 8; + coord2[8][1] = 8; + coord2[9][0] = 19; + coord2[9][1] = 17; + ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord2); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Choose a valid offset for the memory dataspace */ + offset[0] = 5; + offset[1] = 1; + ret = H5Soffset_simple(sid2, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + valid = H5Sselect_valid(sid2); + VERIFY(valid, TRUE, "H5Sselect_valid"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, SPACE1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create dataspace for reading buffer */ + sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select sequence of points for read dataset */ + coord3[0][0] = 0; + coord3[0][1] = 2; + coord3[1][0] = 4; + coord3[1][1] = 8; + coord3[2][0] = 13; + coord3[2][1] = 13; + coord3[3][0] = 14; + coord3[3][1] = 25; + coord3[4][0] = 7; + coord3[4][1] = 9; + coord3[5][0] = 2; + coord3[5][1] = 0; + coord3[6][0] = 9; + coord3[6][1] = 19; + coord3[7][0] = 1; + coord3[7][1] = 22; + coord3[8][0] = 12; + coord3[8][1] = 21; + coord3[9][0] = 11; + coord3[9][1] = 6; + ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord3); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read with data written out */ + for (i = 0; i < POINT1_NPOINTS; i++) { + tbuf = wbuf + ((coord2[i][0] + (hsize_t)offset[0]) * SPACE2_DIM2) + coord2[i][1] + (hsize_t)offset[1]; + tbuf2 = rbuf + (coord3[i][0] * SPACE3_DIM2) + coord3[i][1]; + if (*tbuf != *tbuf2) + TestErrPrintf("element values don't match!, i=%d\n", i); + } /* end for */ + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); +} /* test_select_point_offset() */ + +/**************************************************************** +** +** test_select_hyper_union(): Test basic H5S (dataspace) selection code. +** Tests unions of hyperslabs of various sizes and dimensionalities. +** +****************************************************************/ +static void +test_select_hyper_union(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1, sid2; /* Dataspace ID */ + hid_t xfer; /* Dataset Transfer Property List ID */ + hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; + hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2}; + hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2}; + hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */ + hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */ + hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */ + hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */ + size_t begin[SPACE2_DIM1] = /* Offset within irregular block */ + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* First ten rows start at offset 0 */ + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}; /* Next eighteen rows start at offset 5 */ + size_t len[SPACE2_DIM1] = /* Len of each row within irregular block */ + {10, 10, 10, 10, 10, 10, 10, 10, /* First eight rows are 10 long */ + 20, 20, /* Next two rows are 20 long */ + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15}; /* Next eighteen rows are 15 long */ + uint8_t *wbuf, /* buffer to write to disk */ + *rbuf, /* buffer read from disk */ + *tbuf, /* temporary buffer pointer */ + *tbuf2; /* temporary buffer pointer */ + int i, j; /* Counters */ + herr_t ret; /* Generic return value */ + hssize_t npoints; /* Number of elements in selection */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Hyperslab Selection Functions with unions of hyperslabs\n")); + + /* Allocate write & read buffers */ + wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2); + CHECK_PTR(wbuf, "HDmalloc"); + rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2)); + CHECK_PTR(rbuf, "HDcalloc"); + + /* Initialize write buffer */ + for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) + for (j = 0; j < SPACE2_DIM2; j++) + *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Test simple case of one block overlapping another */ + /* Create dataspace for dataset */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create dataspace for writing buffer */ + sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select 2x15x13 hyperslab for disk dataset */ + start[0] = 1; + start[1] = 0; + start[2] = 0; + stride[0] = 1; + stride[1] = 1; + stride[2] = 1; + count[0] = 2; + count[1] = 15; + count[2] = 13; + block[0] = 1; + block[1] = 1; + block[2] = 1; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + npoints = H5Sget_select_npoints(sid1); + VERIFY(npoints, 2 * 15 * 13, "H5Sget_select_npoints"); + + /* Select 8x26 hyperslab for memory dataset */ + start[0] = 15; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 8; + count[1] = 26; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Union overlapping 8x26 hyperslab for memory dataset (to form a 15x26 selection) */ + start[0] = 22; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 8; + count[1] = 26; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + npoints = H5Sget_select_npoints(sid2); + VERIFY(npoints, 15 * 26, "H5Sget_select_npoints"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, SPACE1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create dataspace for reading buffer */ + sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select 15x26 hyperslab for reading memory dataset */ + start[0] = 0; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 15; + count[1] = 26; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read with data written out */ + for (i = 0; i < SPACE3_DIM1; i++) { + tbuf = wbuf + ((i + 15) * SPACE2_DIM2); + tbuf2 = rbuf + (i * SPACE3_DIM2); + for (j = 0; j < SPACE3_DIM2; j++, tbuf++, tbuf2++) { + if (*tbuf != *tbuf2) + TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n", + __LINE__, i, j, (int)*tbuf, (int)*tbuf2); + } /* end for */ + } /* end for */ + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Test simple case of several block overlapping another */ + /* Create dataspace for dataset */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create dataspace for writing buffer */ + sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select 2x15x13 hyperslab for disk dataset */ + start[0] = 1; + start[1] = 0; + start[2] = 0; + stride[0] = 1; + stride[1] = 1; + stride[2] = 1; + count[0] = 2; + count[1] = 15; + count[2] = 13; + block[0] = 1; + block[1] = 1; + block[2] = 1; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Select 8x15 hyperslab for memory dataset */ + start[0] = 15; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 8; + count[1] = 15; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Union overlapping 8x15 hyperslab for memory dataset (to form a 15x15 selection) */ + start[0] = 22; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 8; + count[1] = 15; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Union overlapping 15x15 hyperslab for memory dataset (to form a 15x26 selection) */ + start[0] = 15; + start[1] = 11; + stride[0] = 1; + stride[1] = 1; + count[0] = 15; + count[1] = 15; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + npoints = H5Sget_select_npoints(sid2); + VERIFY(npoints, 15 * 26, "H5Sget_select_npoints"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, SPACE2_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create dataspace for reading buffer */ + sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select 15x26 hyperslab for reading memory dataset */ + start[0] = 0; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 15; + count[1] = 26; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read with data written out */ + for (i = 0; i < SPACE3_DIM1; i++) { + tbuf = wbuf + ((i + 15) * SPACE2_DIM2); + tbuf2 = rbuf + (i * SPACE3_DIM2); + for (j = 0; j < SPACE3_DIM2; j++, tbuf++, tbuf2++) { + if (*tbuf != *tbuf2) + TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n", + __LINE__, i, j, (int)*tbuf, (int)*tbuf2); + } /* end for */ + } /* end for */ + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Test disjoint case of two non-overlapping blocks */ + /* Create dataspace for dataset */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create dataspace for writing buffer */ + sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select 2x15x13 hyperslab for disk dataset */ + start[0] = 1; + start[1] = 0; + start[2] = 0; + stride[0] = 1; + stride[1] = 1; + stride[2] = 1; + count[0] = 2; + count[1] = 15; + count[2] = 13; + block[0] = 1; + block[1] = 1; + block[2] = 1; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Select 7x26 hyperslab for memory dataset */ + start[0] = 1; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 7; + count[1] = 26; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Union non-overlapping 8x26 hyperslab for memory dataset (to form a 15x26 disjoint selection) */ + start[0] = 22; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 8; + count[1] = 26; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + npoints = H5Sget_select_npoints(sid2); + VERIFY(npoints, 15 * 26, "H5Sget_select_npoints"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, SPACE3_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create dataspace for reading buffer */ + sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select 15x26 hyperslab for reading memory dataset */ + start[0] = 0; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 15; + count[1] = 26; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read with data written out */ + for (i = 0; i < SPACE3_DIM1; i++) { + /* Jump over gap in middle */ + if (i < 7) + tbuf = wbuf + ((i + 1) * SPACE2_DIM2); + else + tbuf = wbuf + ((i + 15) * SPACE2_DIM2); + tbuf2 = rbuf + (i * SPACE3_DIM2); + for (j = 0; j < SPACE3_DIM2; j++, tbuf++, tbuf2++) { + if (*tbuf != *tbuf2) + TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n", + __LINE__, i, j, (int)*tbuf, (int)*tbuf2); + } /* end for */ + } /* end for */ + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Test disjoint case of two non-overlapping blocks with hyperslab caching turned off */ + /* Create dataspace for dataset */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create dataspace for writing buffer */ + sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select 2x15x13 hyperslab for disk dataset */ + start[0] = 1; + start[1] = 0; + start[2] = 0; + stride[0] = 1; + stride[1] = 1; + stride[2] = 1; + count[0] = 2; + count[1] = 15; + count[2] = 13; + block[0] = 1; + block[1] = 1; + block[2] = 1; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Select 7x26 hyperslab for memory dataset */ + start[0] = 1; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 7; + count[1] = 26; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Union non-overlapping 8x26 hyperslab for memory dataset (to form a 15x26 disjoint selection) */ + start[0] = 22; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 8; + count[1] = 26; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + npoints = H5Sget_select_npoints(sid2); + VERIFY(npoints, 15 * 26, "H5Sget_select_npoints"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, SPACE4_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + xfer = H5Pcreate(H5P_DATASET_XFER); + CHECK(xfer, FAIL, "H5Pcreate"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create dataspace for reading buffer */ + sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select 15x26 hyperslab for reading memory dataset */ + start[0] = 0; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 15; + count[1] = 26; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Close transfer property list */ + ret = H5Pclose(xfer); + CHECK(ret, FAIL, "H5Pclose"); + + /* Compare data read with data written out */ + for (i = 0; i < SPACE3_DIM1; i++) { + /* Jump over gap in middle */ + if (i < 7) + tbuf = wbuf + ((i + 1) * SPACE2_DIM2); + else + tbuf = wbuf + ((i + 15) * SPACE2_DIM2); + tbuf2 = rbuf + (i * SPACE3_DIM2); + for (j = 0; j < SPACE3_DIM2; j++, tbuf++, tbuf2++) { + if (*tbuf != *tbuf2) + TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n", + __LINE__, i, j, (int)*tbuf, (int)*tbuf2); + } /* end for */ + } /* end for */ + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Test case of two blocks which overlap corners and must be split */ + /* Create dataspace for dataset */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create dataspace for writing buffer */ + sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select 2x15x13 hyperslab for disk dataset */ + start[0] = 1; + start[1] = 0; + start[2] = 0; + stride[0] = 1; + stride[1] = 1; + stride[2] = 1; + count[0] = 2; + count[1] = 15; + count[2] = 13; + block[0] = 1; + block[1] = 1; + block[2] = 1; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Select 10x10 hyperslab for memory dataset */ + start[0] = 0; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 10; + count[1] = 10; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Union overlapping 15x20 hyperslab for memory dataset (forming a irregularly shaped region) */ + start[0] = 8; + start[1] = 5; + stride[0] = 1; + stride[1] = 1; + count[0] = 20; + count[1] = 15; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + npoints = H5Sget_select_npoints(sid2); + VERIFY(npoints, 15 * 26, "H5Sget_select_npoints"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, SPACE5_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create dataspace for reading buffer */ + sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select 15x26 hyperslab for reading memory dataset */ + start[0] = 0; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 15; + count[1] = 26; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read with data written out */ + for (i = 0, tbuf2 = rbuf; i < SPACE2_DIM1; i++) { + tbuf = wbuf + (i * SPACE2_DIM2) + begin[i]; + for (j = 0; j < (int)len[i]; j++, tbuf++, tbuf2++) { + if (*tbuf != *tbuf2) + TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n", + __LINE__, i, j, (int)*tbuf, (int)*tbuf2); + } /* end for */ + } /* end for */ + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); +} /* test_select_hyper_union() */ + +/**************************************************************** +** +** test_select_hyper_union_stagger(): Test basic H5S (dataspace) selection code. +** Tests unions of staggered hyperslabs. (Uses H5Scombine_hyperslab +** and H5Smodify_select instead of H5Sselect_hyperslab) +** +****************************************************************/ +static void +test_select_hyper_union_stagger(void) +{ + hid_t file_id; /* File ID */ + hid_t dset_id; /* Dataset ID */ + hid_t dataspace; /* File dataspace ID */ + hid_t memspace; /* Memory dataspace ID */ + hid_t tmp_space; /* Temporary dataspace ID */ + hid_t tmp2_space; /* Another emporary dataspace ID */ + hsize_t dimsm[2] = {7, 7}; /* Memory array dimensions */ + hsize_t dimsf[2] = {6, 5}; /* File array dimensions */ + hsize_t count[2] = {3, 1}; /* 1st Hyperslab size */ + hsize_t count2[2] = {3, 1}; /* 2nd Hyperslab size */ + hsize_t count3[2] = {2, 1}; /* 3rd Hyperslab size */ + hsize_t start[2] = {0, 0}; /* 1st Hyperslab offset */ + hsize_t start2[2] = {2, 1}; /* 2nd Hyperslab offset */ + hsize_t start3[2] = {4, 2}; /* 3rd Hyperslab offset */ + hsize_t count_out[2] = {4, 2}; /* Hyperslab size in memory */ + hsize_t start_out[2] = {0, 3}; /* Hyperslab offset in memory */ + int data[6][5]; /* Data to write */ + int data_out[7][7]; /* Data read in */ + int input_loc[8][2] = {{0, 0}, {1, 0}, {2, 0}, {2, 1}, {3, 1}, {4, 1}, {4, 2}, {5, 2}}; + int output_loc[8][2] = {{0, 3}, {0, 4}, {1, 3}, {1, 4}, {2, 3}, {2, 4}, {3, 3}, {3, 4}}; + int dsetrank = 2; /* File Dataset rank */ + int memrank = 2; /* Memory Dataset rank */ + int i, j; /* Local counting variables */ + herr_t error; + hsize_t stride[2] = {1, 1}; + hsize_t block[2] = {1, 1}; + + /* Initialize data to write */ + for (i = 0; i < 6; i++) + for (j = 0; j < 5; j++) + data[i][j] = j * 10 + i; + + /* Create file */ + file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file_id, FAIL, "H5Fcreate"); + + /* Create File Dataspace */ + dataspace = H5Screate_simple(dsetrank, dimsf, NULL); + CHECK(dataspace, FAIL, "H5Screate_simple"); + + /* Create File Dataset */ + dset_id = + H5Dcreate2(file_id, "IntArray", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset_id, FAIL, "H5Dcreate2"); + + /* Write File Dataset */ + error = H5Dwrite(dset_id, H5T_NATIVE_INT, dataspace, dataspace, H5P_DEFAULT, data); + CHECK(error, FAIL, "H5Dwrite"); + + /* Close things */ + error = H5Sclose(dataspace); + CHECK(error, FAIL, "H5Sclose"); + error = H5Dclose(dset_id); + CHECK(error, FAIL, "H5Dclose"); + error = H5Fclose(file_id); + CHECK(error, FAIL, "H5Fclose"); + + /* Initialize input buffer */ + HDmemset(data_out, 0, 7 * 7 * sizeof(int)); + + /* Open file */ + file_id = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(file_id, FAIL, "H5Fopen"); + + /* Open dataset */ + dset_id = H5Dopen2(file_id, "IntArray", H5P_DEFAULT); + CHECK(dset_id, FAIL, "H5Dopen2"); + + /* Get the dataspace */ + dataspace = H5Dget_space(dset_id); + CHECK(dataspace, FAIL, "H5Dget_space"); + + /* Select the hyperslabs */ + error = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start, stride, count, block); + CHECK(error, FAIL, "H5Sselect_hyperslab"); + tmp_space = H5Scombine_hyperslab(dataspace, H5S_SELECT_OR, start2, stride, count2, block); + CHECK(tmp_space, FAIL, "H5Scombine_hyperslab"); + + /* Copy the file dataspace and select hyperslab */ + tmp2_space = H5Scopy(dataspace); + CHECK(tmp2_space, FAIL, "H5Scopy"); + error = H5Sselect_hyperslab(tmp2_space, H5S_SELECT_SET, start3, stride, count3, block); + CHECK(error, FAIL, "H5Sselect_hyperslab"); + + /* Combine the copied dataspace with the temporary dataspace */ + error = H5Smodify_select(tmp_space, H5S_SELECT_OR, tmp2_space); + CHECK(error, FAIL, "H5Smodify_select"); + + /* Create Memory Dataspace */ + memspace = H5Screate_simple(memrank, dimsm, NULL); + CHECK(memspace, FAIL, "H5Screate_simple"); + + /* Select hyperslab in memory */ + error = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, start_out, stride, count_out, block); + CHECK(error, FAIL, "H5Sselect_hyperslab"); + + /* Read File Dataset */ + error = H5Dread(dset_id, H5T_NATIVE_INT, memspace, tmp_space, H5P_DEFAULT, data_out); + CHECK(error, FAIL, "H5Dread"); + + /* Verify input data */ + for (i = 0; i < 8; i++) { + if (data[input_loc[i][0]][input_loc[i][1]] != data_out[output_loc[i][0]][output_loc[i][1]]) { + HDprintf("input data #%d is wrong!\n", i); + HDprintf("input_loc=[%d][%d]\n", input_loc[i][0], input_loc[i][1]); + HDprintf("output_loc=[%d][%d]\n", output_loc[i][0], output_loc[i][1]); + HDprintf("data=%d\n", data[input_loc[i][0]][input_loc[i][1]]); + TestErrPrintf("data_out=%d\n", data_out[output_loc[i][0]][output_loc[i][1]]); + } /* end if */ + } /* end for */ + + /* Close things */ + error = H5Sclose(tmp2_space); + CHECK(error, FAIL, "H5Sclose"); + error = H5Sclose(tmp_space); + CHECK(error, FAIL, "H5Sclose"); + error = H5Sclose(dataspace); + CHECK(error, FAIL, "H5Sclose"); + error = H5Sclose(memspace); + CHECK(error, FAIL, "H5Sclose"); + error = H5Dclose(dset_id); + CHECK(error, FAIL, "H5Dclose"); + error = H5Fclose(file_id); + CHECK(error, FAIL, "H5Fclose"); +} + +/**************************************************************** +** +** test_select_hyper_union_3d(): Test basic H5S (dataspace) selection code. +** Tests unions of hyperslabs in 3-D (Uses H5Scombine_hyperslab +** and H5Scombine_select instead of H5Sselect_hyperslab) +** +****************************************************************/ +static void +test_select_hyper_union_3d(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1, sid2; /* Dataspace ID */ + hid_t tmp_space; /* Temporary Dataspace ID */ + hid_t tmp2_space; /* Another temporary Dataspace ID */ + hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; + hsize_t dims2[] = {SPACE4_DIM1, SPACE4_DIM2, SPACE4_DIM3}; + hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2}; + hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */ + hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */ + hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */ + hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */ + struct row_list { + size_t z; + size_t y; + size_t x; + size_t l; + } rows[] = { + /* Array of x,y,z coordinates & length for each row written from memory */ + {0, 0, 0, 6}, /* 1st face of 3-D object */ + {0, 1, 0, 6}, {0, 2, 0, 6}, {0, 3, 0, 6}, {0, 4, 0, 6}, {1, 0, 0, 6}, /* 2nd face of 3-D object */ + {1, 1, 0, 6}, {1, 2, 0, 6}, {1, 3, 0, 6}, {1, 4, 0, 6}, {2, 0, 0, 6}, /* 3rd face of 3-D object */ + {2, 1, 0, 10}, {2, 2, 0, 10}, {2, 3, 0, 10}, {2, 4, 0, 10}, {2, 5, 2, 8}, + {2, 6, 2, 8}, {3, 0, 0, 6}, /* 4th face of 3-D object */ + {3, 1, 0, 10}, {3, 2, 0, 10}, {3, 3, 0, 10}, {3, 4, 0, 10}, {3, 5, 2, 8}, + {3, 6, 2, 8}, {4, 0, 0, 6}, /* 5th face of 3-D object */ + {4, 1, 0, 10}, {4, 2, 0, 10}, {4, 3, 0, 10}, {4, 4, 0, 10}, {4, 5, 2, 8}, + {4, 6, 2, 8}, {5, 1, 2, 8}, /* 6th face of 3-D object */ + {5, 2, 2, 8}, {5, 3, 2, 8}, {5, 4, 2, 8}, {5, 5, 2, 8}, {5, 6, 2, 8}, + {6, 1, 2, 8}, /* 7th face of 3-D object */ + {6, 2, 2, 8}, {6, 3, 2, 8}, {6, 4, 2, 8}, {6, 5, 2, 8}, {6, 6, 2, 8}, + {7, 1, 2, 8}, /* 8th face of 3-D object */ + {7, 2, 2, 8}, {7, 3, 2, 8}, {7, 4, 2, 8}, {7, 5, 2, 8}, {7, 6, 2, 8}}; + uint8_t *wbuf, /* buffer to write to disk */ + *rbuf, /* buffer read from disk */ + *tbuf, /* temporary buffer pointer */ + *tbuf2; /* temporary buffer pointer */ + int i, j, k; /* Counters */ + herr_t ret; /* Generic return value */ + hsize_t npoints; /* Number of elements in selection */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Hyperslab Selection Functions with unions of 3-D hyperslabs\n")); + + /* Allocate write & read buffers */ + wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE4_DIM1 * SPACE4_DIM2 * SPACE4_DIM3); + CHECK_PTR(wbuf, "HDmalloc"); + rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), SPACE3_DIM1 * SPACE3_DIM2); + CHECK_PTR(rbuf, "HDcalloc"); + + /* Initialize write buffer */ + for (i = 0, tbuf = wbuf; i < SPACE4_DIM1; i++) + for (j = 0; j < SPACE4_DIM2; j++) + for (k = 0; k < SPACE4_DIM3; k++) + *tbuf++ = (uint8_t)((((i * SPACE4_DIM2) + j) * SPACE4_DIM3) + k); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Test case of two blocks which overlap corners and must be split */ + /* Create dataspace for dataset on disk */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create dataspace for writing buffer */ + sid2 = H5Screate_simple(SPACE4_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select 2x15x13 hyperslab for disk dataset */ + start[0] = 1; + start[1] = 0; + start[2] = 0; + stride[0] = 1; + stride[1] = 1; + stride[2] = 1; + count[0] = 2; + count[1] = 15; + count[2] = 13; + block[0] = 1; + block[1] = 1; + block[2] = 1; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Select 5x5x6 hyperslab for memory dataset */ + start[0] = 0; + start[1] = 0; + start[2] = 0; + stride[0] = 1; + stride[1] = 1; + stride[2] = 1; + count[0] = 5; + count[1] = 5; + count[2] = 6; + block[0] = 1; + block[1] = 1; + block[2] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Union overlapping 15x20 hyperslab for memory dataset (forming a irregularly shaped region) */ + start[0] = 2; + start[1] = 1; + start[2] = 2; + stride[0] = 1; + stride[1] = 1; + stride[2] = 1; + count[0] = 6; + count[1] = 6; + count[2] = 8; + block[0] = 1; + block[1] = 1; + block[2] = 1; + tmp_space = H5Scombine_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(tmp_space, FAIL, "H5Sselect_hyperslab"); + + /* Combine dataspaces and create new dataspace */ + tmp2_space = H5Scombine_select(sid2, H5S_SELECT_OR, tmp_space); + CHECK(tmp2_space, FAIL, "H5Scombin_select"); + + npoints = (hsize_t)H5Sget_select_npoints(tmp2_space); + VERIFY(npoints, 15 * 26, "H5Sget_select_npoints"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, SPACE1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, tmp2_space, sid1, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close temporary dataspaces */ + ret = H5Sclose(tmp_space); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(tmp2_space); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create dataspace for reading buffer */ + sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select 15x26 hyperslab for reading memory dataset */ + start[0] = 0; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 15; + count[1] = 26; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read with data written out */ + for (i = 0, tbuf2 = rbuf; i < (int)(sizeof(rows) / sizeof(struct row_list)); i++) { + tbuf = wbuf + (rows[i].z * SPACE4_DIM3 * SPACE4_DIM2) + (rows[i].y * SPACE4_DIM3) + rows[i].x; + for (j = 0; j < (int)rows[i].l; j++, tbuf++, tbuf2++) { + if (*tbuf != *tbuf2) + TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n", + __LINE__, i, j, (int)*tbuf, (int)*tbuf2); + } /* end for */ + } /* end for */ + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); +} /* test_select_hyper_union_3d() */ + +/**************************************************************** +** +** test_select_hyper_valid_combination(): Tests invalid and valid +** combinations of selections on dataspace for H5Scombine_select +** and H5Smodify_select. +** +****************************************************************/ +static void +test_select_hyper_valid_combination(void) +{ + hid_t single_pt_sid; /* Dataspace ID with single point selection */ + hid_t single_hyper_sid; /* Dataspace ID with single block hyperslab selection */ + hid_t regular_hyper_sid; /* Dataspace ID with regular hyperslab selection */ + hid_t non_existent_sid = -1; /* A non-existent space id */ + hid_t tmp_sid; /* Temporary dataspace ID */ + hsize_t dims2D[] = {SPACE9_DIM1, SPACE9_DIM2}; + hsize_t dims3D[] = {SPACE4_DIM1, SPACE4_DIM2, SPACE4_DIM3}; + + hsize_t coord1[1][SPACE2_RANK]; /* Coordinates for single point selection */ + hsize_t start[SPACE4_RANK]; /* Hyperslab start */ + hsize_t stride[SPACE4_RANK]; /* Hyperslab stride */ + hsize_t count[SPACE4_RANK]; /* Hyperslab block count */ + hsize_t block[SPACE4_RANK]; /* Hyperslab block size */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(6, ("Testing Selection Combination Validity\n")); + HDassert(SPACE9_DIM2 >= POINT1_NPOINTS); + + /* Create dataspace for single point selection */ + single_pt_sid = H5Screate_simple(SPACE9_RANK, dims2D, NULL); + CHECK(single_pt_sid, FAIL, "H5Screate_simple"); + + /* Select sequence of ten points for multiple point selection */ + coord1[0][0] = 2; + coord1[0][1] = 2; + ret = H5Sselect_elements(single_pt_sid, H5S_SELECT_SET, (size_t)1, (const hsize_t *)coord1); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Create dataspace for single hyperslab selection */ + single_hyper_sid = H5Screate_simple(SPACE9_RANK, dims2D, NULL); + CHECK(single_hyper_sid, FAIL, "H5Screate_simple"); + + /* Select 10x10 hyperslab for single hyperslab selection */ + start[0] = 1; + start[1] = 1; + stride[0] = 1; + stride[1] = 1; + count[0] = 1; + count[1] = 1; + block[0] = (SPACE9_DIM1 - 2); + block[1] = (SPACE9_DIM2 - 2); + ret = H5Sselect_hyperslab(single_hyper_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create dataspace for regular hyperslab selection */ + regular_hyper_sid = H5Screate_simple(SPACE4_RANK, dims3D, NULL); + CHECK(regular_hyper_sid, FAIL, "H5Screate_simple"); + + /* Select regular, strided hyperslab selection */ + start[0] = 2; + start[1] = 2; + start[2] = 2; + stride[0] = 2; + stride[1] = 2; + stride[2] = 2; + count[0] = 5; + count[1] = 2; + count[2] = 5; + block[0] = 1; + block[1] = 1; + block[2] = 1; + ret = H5Sselect_hyperslab(regular_hyper_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Test all the selections created */ + + /* Test the invalid combinations between point and hyperslab */ + H5E_BEGIN_TRY + { + tmp_sid = H5Scombine_select(single_pt_sid, H5S_SELECT_AND, single_hyper_sid); + } + H5E_END_TRY; + VERIFY(tmp_sid, FAIL, "H5Scombine_select"); + + H5E_BEGIN_TRY + { + tmp_sid = H5Smodify_select(single_pt_sid, H5S_SELECT_AND, single_hyper_sid); + } + H5E_END_TRY; + VERIFY(tmp_sid, FAIL, "H5Smodify_select"); + + /* Test the invalid combination between two hyperslab but of different dimension size */ + H5E_BEGIN_TRY + { + tmp_sid = H5Scombine_select(single_hyper_sid, H5S_SELECT_AND, regular_hyper_sid); + } + H5E_END_TRY; + VERIFY(tmp_sid, FAIL, "H5Scombine_select"); + + H5E_BEGIN_TRY + { + tmp_sid = H5Smodify_select(single_hyper_sid, H5S_SELECT_AND, regular_hyper_sid); + } + H5E_END_TRY; + VERIFY(tmp_sid, FAIL, "H5Smodify_select"); + + /* Test invalid operation inputs to the two functions */ + H5E_BEGIN_TRY + { + tmp_sid = H5Scombine_select(single_hyper_sid, H5S_SELECT_SET, single_hyper_sid); + } + H5E_END_TRY; + VERIFY(tmp_sid, FAIL, "H5Scombine_select"); + + H5E_BEGIN_TRY + { + tmp_sid = H5Smodify_select(single_hyper_sid, H5S_SELECT_SET, single_hyper_sid); + } + H5E_END_TRY; + VERIFY(tmp_sid, FAIL, "H5Smodify_select"); + + /* Test inputs in case of non-existent space ids */ + H5E_BEGIN_TRY + { + tmp_sid = H5Scombine_select(single_hyper_sid, H5S_SELECT_AND, non_existent_sid); + } + H5E_END_TRY; + VERIFY(tmp_sid, FAIL, "H5Scombine_select"); + + H5E_BEGIN_TRY + { + tmp_sid = H5Smodify_select(single_hyper_sid, H5S_SELECT_AND, non_existent_sid); + } + H5E_END_TRY; + VERIFY(tmp_sid, FAIL, "H5Smodify_select"); + + /* Close dataspaces */ + ret = H5Sclose(single_pt_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(single_hyper_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(regular_hyper_sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_select_hyper_valid_combination() */ + +/**************************************************************** +** +** test_select_hyper_and_2d(): Test basic H5S (dataspace) selection code. +** Tests 'and' of hyperslabs in 2-D +** +****************************************************************/ +static void +test_select_hyper_and_2d(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1, sid2; /* Dataspace ID */ + hsize_t dims1[] = {SPACE2_DIM1, SPACE2_DIM2}; + hsize_t dims2[] = {SPACE2A_DIM1}; + hsize_t start[SPACE2_RANK]; /* Starting location of hyperslab */ + hsize_t stride[SPACE2_RANK]; /* Stride of hyperslab */ + hsize_t count[SPACE2_RANK]; /* Element count of hyperslab */ + hsize_t block[SPACE2_RANK]; /* Block size of hyperslab */ + uint8_t *wbuf, /* buffer to write to disk */ + *rbuf, /* buffer read from disk */ + *tbuf, /* temporary buffer pointer */ + *tbuf2; /* temporary buffer pointer */ + int i, j; /* Counters */ + herr_t ret; /* Generic return value */ + hssize_t npoints; /* Number of elements in selection */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Hyperslab Selection Functions with intersection of 2-D hyperslabs\n")); + + /* Allocate write & read buffers */ + wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2); + CHECK_PTR(wbuf, "HDmalloc"); + rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE2_DIM1 * SPACE2_DIM2)); + CHECK_PTR(rbuf, "HDcalloc"); + + /* Initialize write buffer */ + for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) + for (j = 0; j < SPACE2_DIM2; j++) + *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset on disk */ + sid1 = H5Screate_simple(SPACE2_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create dataspace for writing buffer */ + sid2 = H5Screate_simple(SPACE2A_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select 10x10 hyperslab for disk dataset */ + start[0] = 0; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 10; + count[1] = 10; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Intersect overlapping 10x10 hyperslab */ + start[0] = 5; + start[1] = 5; + stride[0] = 1; + stride[1] = 1; + count[0] = 10; + count[1] = 10; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_AND, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + npoints = H5Sget_select_npoints(sid1); + VERIFY(npoints, 5 * 5, "H5Sget_select_npoints"); + + /* Select 25 hyperslab for memory dataset */ + start[0] = 0; + stride[0] = 1; + count[0] = 25; + block[0] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + npoints = H5Sget_select_npoints(sid2); + VERIFY(npoints, 5 * 5, "H5Sget_select_npoints"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, SPACE2_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read entire dataset from disk */ + ret = H5Dread(dataset, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Initialize write buffer */ + for (i = 0, tbuf = rbuf, tbuf2 = wbuf; i < SPACE2_DIM1; i++) + for (j = 0; j < SPACE2_DIM2; j++, tbuf++) { + if ((i >= 5 && i <= 9) && (j >= 5 && j <= 9)) { + if (*tbuf != *tbuf2) + HDprintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n", __LINE__, + i, j, (int)*tbuf, (int)*tbuf2); + tbuf2++; + } /* end if */ + else { + if (*tbuf != 0) + HDprintf("%d: hyperslab element has wrong value!, i=%d, j=%d, *tbuf=%d\n", __LINE__, i, j, + (int)*tbuf); + } /* end else */ + } /* end for */ + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); +} /* test_select_hyper_and_2d() */ + +/**************************************************************** +** +** test_select_hyper_xor_2d(): Test basic H5S (dataspace) selection code. +** Tests 'xor' of hyperslabs in 2-D +** +****************************************************************/ +static void +test_select_hyper_xor_2d(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1, sid2; /* Dataspace ID */ + hsize_t dims1[] = {SPACE2_DIM1, SPACE2_DIM2}; + hsize_t dims2[] = {SPACE2A_DIM1}; + hsize_t start[SPACE2_RANK]; /* Starting location of hyperslab */ + hsize_t stride[SPACE2_RANK]; /* Stride of hyperslab */ + hsize_t count[SPACE2_RANK]; /* Element count of hyperslab */ + hsize_t block[SPACE2_RANK]; /* Block size of hyperslab */ + uint8_t *wbuf, /* buffer to write to disk */ + *rbuf, /* buffer read from disk */ + *tbuf, /* temporary buffer pointer */ + *tbuf2; /* temporary buffer pointer */ + int i, j; /* Counters */ + herr_t ret; /* Generic return value */ + hssize_t npoints; /* Number of elements in selection */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Hyperslab Selection Functions with XOR of 2-D hyperslabs\n")); + + /* Allocate write & read buffers */ + wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2); + CHECK_PTR(wbuf, "HDmalloc"); + rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE2_DIM1 * SPACE2_DIM2)); + CHECK_PTR(rbuf, "HDcalloc"); + + /* Initialize write buffer */ + for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) + for (j = 0; j < SPACE2_DIM2; j++) + *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset on disk */ + sid1 = H5Screate_simple(SPACE2_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create dataspace for writing buffer */ + sid2 = H5Screate_simple(SPACE2A_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select 10x10 hyperslab for disk dataset */ + start[0] = 0; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 10; + count[1] = 10; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Intersect overlapping 10x10 hyperslab */ + start[0] = 5; + start[1] = 5; + stride[0] = 1; + stride[1] = 1; + count[0] = 10; + count[1] = 10; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_XOR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + npoints = H5Sget_select_npoints(sid1); + VERIFY(npoints, 150, "H5Sget_select_npoints"); + + /* Select 25 hyperslab for memory dataset */ + start[0] = 0; + stride[0] = 1; + count[0] = 150; + block[0] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + npoints = H5Sget_select_npoints(sid2); + VERIFY(npoints, 150, "H5Sget_select_npoints"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, SPACE2_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read entire dataset from disk */ + ret = H5Dread(dataset, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Initialize write buffer */ + for (i = 0, tbuf = rbuf, tbuf2 = wbuf; i < SPACE2_DIM1; i++) + for (j = 0; j < SPACE2_DIM2; j++, tbuf++) { + if (((i >= 0 && i <= 4) && (j >= 0 && j <= 9)) || + ((i >= 5 && i <= 9) && ((j >= 0 && j <= 4) || (j >= 10 && j <= 14))) || + ((i >= 10 && i <= 14) && (j >= 5 && j <= 14))) { + if (*tbuf != *tbuf2) + HDprintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n", __LINE__, + i, j, (int)*tbuf, (int)*tbuf2); + tbuf2++; + } /* end if */ + else { + if (*tbuf != 0) + HDprintf("%d: hyperslab element has wrong value!, i=%d, j=%d, *tbuf=%d\n", __LINE__, i, j, + (int)*tbuf); + } /* end else */ + } /* end for */ + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); +} /* test_select_hyper_xor_2d() */ + +/**************************************************************** +** +** test_select_hyper_notb_2d(): Test basic H5S (dataspace) selection code. +** Tests 'notb' of hyperslabs in 2-D +** +****************************************************************/ +static void +test_select_hyper_notb_2d(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1, sid2; /* Dataspace ID */ + hsize_t dims1[] = {SPACE2_DIM1, SPACE2_DIM2}; + hsize_t dims2[] = {SPACE2A_DIM1}; + hsize_t start[SPACE2_RANK]; /* Starting location of hyperslab */ + hsize_t stride[SPACE2_RANK]; /* Stride of hyperslab */ + hsize_t count[SPACE2_RANK]; /* Element count of hyperslab */ + hsize_t block[SPACE2_RANK]; /* Block size of hyperslab */ + uint8_t *wbuf, /* buffer to write to disk */ + *rbuf, /* buffer read from disk */ + *tbuf, /* temporary buffer pointer */ + *tbuf2; /* temporary buffer pointer */ + int i, j; /* Counters */ + herr_t ret; /* Generic return value */ + hssize_t npoints; /* Number of elements in selection */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Hyperslab Selection Functions with NOTB of 2-D hyperslabs\n")); + + /* Allocate write & read buffers */ + wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2); + CHECK_PTR(wbuf, "HDmalloc"); + rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE2_DIM1 * SPACE2_DIM2)); + CHECK_PTR(rbuf, "HDcalloc"); + + /* Initialize write buffer */ + for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) + for (j = 0; j < SPACE2_DIM2; j++) + *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset on disk */ + sid1 = H5Screate_simple(SPACE2_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create dataspace for writing buffer */ + sid2 = H5Screate_simple(SPACE2A_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select 10x10 hyperslab for disk dataset */ + start[0] = 0; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 10; + count[1] = 10; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Intersect overlapping 10x10 hyperslab */ + start[0] = 5; + start[1] = 5; + stride[0] = 1; + stride[1] = 1; + count[0] = 10; + count[1] = 10; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_NOTB, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + npoints = H5Sget_select_npoints(sid1); + VERIFY(npoints, 75, "H5Sget_select_npoints"); + + /* Select 75 hyperslab for memory dataset */ + start[0] = 0; + stride[0] = 1; + count[0] = 75; + block[0] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + npoints = H5Sget_select_npoints(sid2); + VERIFY(npoints, 75, "H5Sget_select_npoints"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, SPACE2_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read entire dataset from disk */ + ret = H5Dread(dataset, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Initialize write buffer */ + for (i = 0, tbuf = rbuf, tbuf2 = wbuf; i < SPACE2_DIM1; i++) + for (j = 0; j < SPACE2_DIM2; j++, tbuf++) { + if (((i >= 0 && i <= 4) && (j >= 0 && j <= 9)) || ((i >= 5 && i <= 9) && (j >= 0 && j <= 4))) { + if (*tbuf != *tbuf2) + HDprintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n", __LINE__, + i, j, (int)*tbuf, (int)*tbuf2); + tbuf2++; + } /* end if */ + else { + if (*tbuf != 0) + HDprintf("%d: hyperslab element has wrong value!, i=%d, j=%d, *tbuf=%d\n", __LINE__, i, j, + (int)*tbuf); + } /* end else */ + } /* end for */ + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); +} /* test_select_hyper_notb_2d() */ + +/**************************************************************** +** +** test_select_hyper_nota_2d(): Test basic H5S (dataspace) selection code. +** Tests 'nota' of hyperslabs in 2-D +** +****************************************************************/ +static void +test_select_hyper_nota_2d(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1, sid2; /* Dataspace ID */ + hsize_t dims1[] = {SPACE2_DIM1, SPACE2_DIM2}; + hsize_t dims2[] = {SPACE2A_DIM1}; + hsize_t start[SPACE2_RANK]; /* Starting location of hyperslab */ + hsize_t stride[SPACE2_RANK]; /* Stride of hyperslab */ + hsize_t count[SPACE2_RANK]; /* Element count of hyperslab */ + hsize_t block[SPACE2_RANK]; /* Block size of hyperslab */ + uint8_t *wbuf, /* buffer to write to disk */ + *rbuf, /* buffer read from disk */ + *tbuf, /* temporary buffer pointer */ + *tbuf2; /* temporary buffer pointer */ + int i, j; /* Counters */ + herr_t ret; /* Generic return value */ + hssize_t npoints; /* Number of elements in selection */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Hyperslab Selection Functions with NOTA of 2-D hyperslabs\n")); + + /* Allocate write & read buffers */ + wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2); + CHECK_PTR(wbuf, "HDmalloc"); + rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE2_DIM1 * SPACE2_DIM2)); + CHECK_PTR(rbuf, "HDcalloc"); + + /* Initialize write buffer */ + for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) + for (j = 0; j < SPACE2_DIM2; j++) + *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset on disk */ + sid1 = H5Screate_simple(SPACE2_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create dataspace for writing buffer */ + sid2 = H5Screate_simple(SPACE2A_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Select 10x10 hyperslab for disk dataset */ + start[0] = 0; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 10; + count[1] = 10; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Intersect overlapping 10x10 hyperslab */ + start[0] = 5; + start[1] = 5; + stride[0] = 1; + stride[1] = 1; + count[0] = 10; + count[1] = 10; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_NOTA, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + npoints = H5Sget_select_npoints(sid1); + VERIFY(npoints, 75, "H5Sget_select_npoints"); + + /* Select 75 hyperslab for memory dataset */ + start[0] = 0; + stride[0] = 1; + count[0] = 75; + block[0] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + npoints = H5Sget_select_npoints(sid2); + VERIFY(npoints, 75, "H5Sget_select_npoints"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, SPACE2_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read entire dataset from disk */ + ret = H5Dread(dataset, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Initialize write buffer */ + for (i = 0, tbuf = rbuf, tbuf2 = wbuf; i < SPACE2_DIM1; i++) + for (j = 0; j < SPACE2_DIM2; j++, tbuf++) { + if (((i >= 10 && i <= 14) && (j >= 5 && j <= 14)) || + ((i >= 5 && i <= 9) && (j >= 10 && j <= 14))) { + if (*tbuf != *tbuf2) + TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n", + __LINE__, i, j, (int)*tbuf, (int)*tbuf2); + tbuf2++; + } /* end if */ + else { + if (*tbuf != 0) + TestErrPrintf("%d: hyperslab element has wrong value!, i=%d, j=%d, *tbuf=%d\n", __LINE__, + i, j, (int)*tbuf); + } /* end else */ + } /* end for */ + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); +} /* test_select_hyper_nota_2d() */ + +/**************************************************************** +** +** test_select_hyper_iter2(): Iterator for checking hyperslab iteration +** +****************************************************************/ +static herr_t +test_select_hyper_iter2(void *_elem, hid_t H5_ATTR_UNUSED type_id, unsigned ndim, const hsize_t *point, + void *_operator_data) +{ + int *tbuf = (int *)_elem, /* temporary buffer pointer */ + **tbuf2 = (int **)_operator_data; /* temporary buffer handle */ + unsigned u; /* Local counting variable */ + + if (*tbuf != **tbuf2) { + TestErrPrintf("Error in hyperslab iteration!\n"); + HDprintf("location: { "); + for (u = 0; u < ndim; u++) { + HDprintf("%2d", (int)point[u]); + if (u < (ndim - 1)) + HDprintf(", "); + } /* end for */ + HDprintf("}\n"); + HDprintf("*tbuf=%d, **tbuf2=%d\n", *tbuf, **tbuf2); + return (-1); + } /* end if */ + else { + (*tbuf2)++; + return (0); + } +} /* end test_select_hyper_iter2() */ + +/**************************************************************** +** +** test_select_hyper_union_random_5d(): Test basic H5S (dataspace) selection code. +** Tests random unions of 5-D hyperslabs +** +****************************************************************/ +static void +test_select_hyper_union_random_5d(hid_t read_plist) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1, sid2; /* Dataspace ID */ + hsize_t dims1[] = {SPACE5_DIM1, SPACE5_DIM2, SPACE5_DIM3, SPACE5_DIM4, SPACE5_DIM5}; + hsize_t dims2[] = {SPACE6_DIM1}; + hsize_t start[SPACE5_RANK]; /* Starting location of hyperslab */ + hsize_t count[SPACE5_RANK]; /* Element count of hyperslab */ + int *wbuf, /* buffer to write to disk */ + *rbuf, /* buffer read from disk */ + *tbuf; /* temporary buffer pointer */ + int i, j, k, l, m; /* Counters */ + herr_t ret; /* Generic return value */ + hssize_t npoints, /* Number of elements in file selection */ + npoints2; /* Number of elements in memory selection */ + unsigned seed; /* Random number seed for each test */ + unsigned test_num; /* Count of tests being executed */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Hyperslab Selection Functions with random unions of 5-D hyperslabs\n")); + + /* Allocate write & read buffers */ + wbuf = (int *)HDmalloc(sizeof(int) * SPACE5_DIM1 * SPACE5_DIM2 * SPACE5_DIM3 * SPACE5_DIM4 * SPACE5_DIM5); + CHECK_PTR(wbuf, "HDmalloc"); + rbuf = (int *)HDcalloc(sizeof(int), + (size_t)(SPACE5_DIM1 * SPACE5_DIM2 * SPACE5_DIM3 * SPACE5_DIM4 * SPACE5_DIM5)); + CHECK_PTR(rbuf, "HDcalloc"); + + /* Initialize write buffer */ + for (i = 0, tbuf = wbuf; i < SPACE5_DIM1; i++) + for (j = 0; j < SPACE5_DIM2; j++) + for (k = 0; k < SPACE5_DIM3; k++) + for (l = 0; l < SPACE5_DIM4; l++) + for (m = 0; m < SPACE5_DIM5; m++) + *tbuf++ = (int)(((((((i * SPACE5_DIM2) + j) * SPACE5_DIM3) + k) * SPACE5_DIM4) + l) * + SPACE5_DIM5) + + m; + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset on disk */ + sid1 = H5Screate_simple(SPACE5_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, SPACE5_NAME, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write entire dataset to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Create dataspace for reading buffer */ + sid2 = H5Screate_simple(SPACE6_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Get initial random # seed */ + seed = (unsigned)HDtime(NULL) + (unsigned)HDclock(); + + /* Crunch through a bunch of random hyperslab reads from the file dataset */ + for (test_num = 0; test_num < NRAND_HYPER; test_num++) { + /* Save random # seed for later use */ + /* (Used in case of errors, to regenerate the hyperslab sequence) */ + seed += (unsigned)HDclock(); + HDsrandom(seed); + + for (i = 0; i < NHYPERSLABS; i++) { + /* Select random hyperslab location & size for selection */ + for (j = 0; j < SPACE5_RANK; j++) { + start[j] = ((hsize_t)HDrandom() % dims1[j]); + count[j] = (((hsize_t)HDrandom() % (dims1[j] - start[j])) + 1); + } /* end for */ + + /* Select hyperslab */ + ret = H5Sselect_hyperslab(sid1, (i == 0 ? H5S_SELECT_SET : H5S_SELECT_OR), start, NULL, count, + NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + if (ret < 0) { + TestErrPrintf("Random hyperslabs for seed %u failed!\n", seed); + break; + } /* end if */ + } /* end for */ + + /* Get the number of elements selected */ + npoints = H5Sget_select_npoints(sid1); + CHECK(npoints, 0, "H5Sget_select_npoints"); + + /* Select linear 1-D hyperslab for memory dataset */ + start[0] = 0; + count[0] = (hsize_t)npoints; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + npoints2 = H5Sget_select_npoints(sid2); + VERIFY(npoints, npoints2, "H5Sget_select_npoints"); + + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_NATIVE_INT, sid2, sid1, read_plist, rbuf); + CHECK(ret, FAIL, "H5Dread"); + if (ret < 0) { + TestErrPrintf("Random hyperslabs for seed %u failed!\n", seed); + break; + } /* end if */ + + /* Compare data read with data written out */ + tbuf = rbuf; + ret = H5Diterate(wbuf, H5T_NATIVE_INT, sid1, test_select_hyper_iter2, &tbuf); + if (ret < 0) { + TestErrPrintf("Random hyperslabs for seed %u failed!\n", seed); + break; + } /* end if */ + + /* Set the read buffer back to all zeroes */ + HDmemset(rbuf, 0, (size_t)SPACE6_DIM1); + } /* end for */ + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); +} /* test_select_hyper_union_random_5d() */ + +/**************************************************************** +** +** test_select_hyper_chunk(): Test basic H5S (dataspace) selection code. +** Tests large hyperslab selection in chunked dataset +** +****************************************************************/ +static void +test_select_hyper_chunk(hid_t fapl_plist, hid_t xfer_plist) +{ + hsize_t dimsf[3]; /* dataset dimensions */ + hsize_t chunk_dimsf[3] = {CHUNK_X, CHUNK_Y, CHUNK_Z}; /* chunk sizes */ + short *data; /* data to write */ + short *tmpdata; /* data to write */ + + /* + * Data and output buffer initialization. + */ + hid_t file, dataset; /* handles */ + hid_t dataspace; + hid_t memspace; + hid_t plist; + hsize_t dimsm[3]; /* memory space dimensions */ + hsize_t dims_out[3]; /* dataset dimensions */ + herr_t status; + + short *data_out; /* output buffer */ + short *tmpdata_out; /* output buffer */ + + hsize_t count[3]; /* size of the hyperslab in the file */ + hsize_t offset[3]; /* hyperslab offset in the file */ + hsize_t count_out[3]; /* size of the hyperslab in memory */ + hsize_t offset_out[3]; /* hyperslab offset in memory */ + int i, j, k, status_n, rank; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Hyperslab I/O on Large Chunks\n")); + + /* Allocate the transfer buffers */ + data = (short *)HDmalloc(sizeof(short) * X * Y * Z); + CHECK_PTR(data, "HDmalloc"); + data_out = (short *)HDcalloc((size_t)(NX * NY * NZ), sizeof(short)); + CHECK_PTR(data_out, "HDcalloc"); + + /* + * Data buffer initialization. + */ + tmpdata = data; + for (j = 0; j < X; j++) + for (i = 0; i < Y; i++) + for (k = 0; k < Z; k++) + *tmpdata++ = (short)((k + 1) % 256); + + /* + * Create a new file using H5F_ACC_TRUNC access, + * the default file creation properties, and the default file + * access properties. + */ + file = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_plist); + CHECK(file, FAIL, "H5Fcreate"); + + /* + * Describe the size of the array and create the dataspace for fixed + * size dataset. + */ + dimsf[0] = X; + dimsf[1] = Y; + dimsf[2] = Z; + dataspace = H5Screate_simple(RANK_F, dimsf, NULL); + CHECK(dataspace, FAIL, "H5Screate_simple"); + + /* + * Create a new dataset within the file using defined dataspace and + * chunking properties. + */ + plist = H5Pcreate(H5P_DATASET_CREATE); + CHECK(plist, FAIL, "H5Pcreate"); + status = H5Pset_chunk(plist, RANK_F, chunk_dimsf); + CHECK(status, FAIL, "H5Pset_chunk"); + dataset = H5Dcreate2(file, DATASETNAME, H5T_NATIVE_UCHAR, dataspace, H5P_DEFAULT, plist, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* + * Define hyperslab in the dataset. + */ + offset[0] = 0; + offset[1] = 0; + offset[2] = 0; + count[0] = NX_SUB; + count[1] = NY_SUB; + count[2] = NZ_SUB; + status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, NULL, count, NULL); + CHECK(status, FAIL, "H5Sselect_hyperslab"); + + /* + * Define the memory dataspace. + */ + dimsm[0] = NX; + dimsm[1] = NY; + dimsm[2] = NZ; + memspace = H5Screate_simple(RANK_M, dimsm, NULL); + CHECK(memspace, FAIL, "H5Screate_simple"); + + /* + * Define memory hyperslab. + */ + offset_out[0] = 0; + offset_out[1] = 0; + offset_out[2] = 0; + count_out[0] = NX_SUB; + count_out[1] = NY_SUB; + count_out[2] = NZ_SUB; + status = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, offset_out, NULL, count_out, NULL); + CHECK(status, FAIL, "H5Sselect_hyperslab"); + + /* + * Write the data to the dataset using hyperslabs + */ + status = H5Dwrite(dataset, H5T_NATIVE_SHORT, memspace, dataspace, xfer_plist, data); + CHECK(status, FAIL, "H5Dwrite"); + + /* + * Close/release resources. + */ + status = H5Pclose(plist); + CHECK(status, FAIL, "H5Pclose"); + status = H5Sclose(dataspace); + CHECK(status, FAIL, "H5Sclose"); + status = H5Sclose(memspace); + CHECK(status, FAIL, "H5Sclose"); + status = H5Dclose(dataset); + CHECK(status, FAIL, "H5Dclose"); + status = H5Fclose(file); + CHECK(status, FAIL, "H5Fclose"); + + /************************************************************* + + This reads the hyperslab from the test.h5 file just + created, into a 3-dimensional plane of the 3-dimensional + array. + + ************************************************************/ + + /* + * Open the file and the dataset. + */ + file = H5Fopen(FILENAME, H5F_ACC_RDONLY, fapl_plist); + CHECK(file, FAIL, "H5Fopen"); + dataset = H5Dopen2(file, DATASETNAME, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + dataspace = H5Dget_space(dataset); /* dataspace handle */ + CHECK(dataspace, FAIL, "H5Dget_space"); + rank = H5Sget_simple_extent_ndims(dataspace); + VERIFY(rank, 3, "H5Sget_simple_extent_ndims"); + status_n = H5Sget_simple_extent_dims(dataspace, dims_out, NULL); + CHECK(status_n, FAIL, "H5Sget_simple_extent_dims"); + VERIFY(dims_out[0], dimsf[0], "Dataset dimensions"); + VERIFY(dims_out[1], dimsf[1], "Dataset dimensions"); + VERIFY(dims_out[2], dimsf[2], "Dataset dimensions"); + + /* + * Define hyperslab in the dataset. + */ + offset[0] = 0; + offset[1] = 0; + offset[2] = 0; + count[0] = NX_SUB; + count[1] = NY_SUB; + count[2] = NZ_SUB; + status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, NULL, count, NULL); + CHECK(status, FAIL, "H5Sselect_hyperslab"); + + /* + * Define the memory dataspace. + */ + dimsm[0] = NX; + dimsm[1] = NY; + dimsm[2] = NZ; + memspace = H5Screate_simple(RANK_M, dimsm, NULL); + CHECK(memspace, FAIL, "H5Screate_simple"); + + /* + * Define memory hyperslab. + */ + offset_out[0] = 0; + offset_out[1] = 0; + offset_out[2] = 0; + count_out[0] = NX_SUB; + count_out[1] = NY_SUB; + count_out[2] = NZ_SUB; + status = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, offset_out, NULL, count_out, NULL); + CHECK(status, FAIL, "H5Sselect_hyperslab"); + + /* + * Read data from hyperslab in the file into the hyperslab in + * memory and display. + */ + status = H5Dread(dataset, H5T_NATIVE_SHORT, memspace, dataspace, xfer_plist, data_out); + CHECK(status, FAIL, "H5Dread"); + + /* Compare data written with data read in */ + tmpdata = data; + tmpdata_out = data_out; + for (j = 0; j < X; j++) + for (i = 0; i < Y; i++) + for (k = 0; k < Z; k++, tmpdata++, tmpdata_out++) { + if (*tmpdata != *tmpdata_out) + TestErrPrintf("Line %d: Error! j=%d, i=%d, k=%d, *tmpdata=%x, *tmpdata_out=%x\n", + __LINE__, j, i, k, (unsigned)*tmpdata, (unsigned)*tmpdata_out); + } /* end for */ + + /* + * Close and release resources. + */ + status = H5Dclose(dataset); + CHECK(status, FAIL, "H5Dclose"); + status = H5Sclose(dataspace); + CHECK(status, FAIL, "H5Sclose"); + status = H5Sclose(memspace); + CHECK(status, FAIL, "H5Sclose"); + status = H5Fclose(file); + CHECK(status, FAIL, "H5Fclose"); + HDfree(data); + HDfree(data_out); +} /* test_select_hyper_chunk() */ + +/**************************************************************** +** +** test_select_point_chunk(): Test basic H5S (dataspace) selection code. +** Tests combinations of hyperslab and point selections on +** chunked datasets. +** +****************************************************************/ +static void +test_select_point_chunk(void) +{ + hsize_t dimsf[SPACE7_RANK]; /* dataset dimensions */ + hsize_t chunk_dimsf[SPACE7_RANK] = {SPACE7_CHUNK_DIM1, SPACE7_CHUNK_DIM2}; /* chunk sizes */ + unsigned *data; /* data to write */ + unsigned *tmpdata; /* data to write */ + + /* + * Data and output buffer initialization. + */ + hid_t file, dataset; /* handles */ + hid_t dataspace; + hid_t pnt1_space; /* Dataspace to hold 1st point selection */ + hid_t pnt2_space; /* Dataspace to hold 2nd point selection */ + hid_t hyp1_space; /* Dataspace to hold 1st hyperslab selection */ + hid_t hyp2_space; /* Dataspace to hold 2nd hyperslab selection */ + hid_t dcpl; + herr_t ret; /* Generic return value */ + + unsigned *data_out; /* output buffer */ + + hsize_t start[SPACE7_RANK]; /* hyperslab offset */ + hsize_t count[SPACE7_RANK]; /* size of the hyperslab */ + hsize_t points[SPACE7_NPOINTS][SPACE7_RANK]; /* points for selection */ + unsigned i, j; /* Local index variables */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Point Selections on Chunked Datasets\n")); + + /* Allocate the transfer buffers */ + data = (unsigned *)HDmalloc(sizeof(unsigned) * SPACE7_DIM1 * SPACE7_DIM2); + CHECK_PTR(data, "HDmalloc"); + data_out = (unsigned *)HDcalloc((size_t)(SPACE7_DIM1 * SPACE7_DIM2), sizeof(unsigned)); + CHECK_PTR(data_out, "HDcalloc"); + + /* + * Data buffer initialization. + */ + tmpdata = data; + for (i = 0; i < SPACE7_DIM1; i++) + for (j = 0; j < SPACE7_DIM1; j++) + *tmpdata++ = ((i * SPACE7_DIM2) + j) % 256; + + /* + * Create a new file using H5F_ACC_TRUNC access, + * the default file creation properties and file + * access properties. + */ + file = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file, FAIL, "H5Fcreate"); + + /* Create file dataspace */ + dimsf[0] = SPACE7_DIM1; + dimsf[1] = SPACE7_DIM2; + dataspace = H5Screate_simple(SPACE7_RANK, dimsf, NULL); + CHECK(dataspace, FAIL, "H5Screate_simple"); + + /* + * Create a new dataset within the file using defined dataspace and + * chunking properties. + */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + ret = H5Pset_chunk(dcpl, SPACE7_RANK, chunk_dimsf); + CHECK(ret, FAIL, "H5Pset_chunk"); + dataset = H5Dcreate2(file, DATASETNAME, H5T_NATIVE_UCHAR, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Create 1st point selection */ + pnt1_space = H5Scopy(dataspace); + CHECK(pnt1_space, FAIL, "H5Scopy"); + + points[0][0] = 3; + points[0][1] = 3; + points[1][0] = 3; + points[1][1] = 8; + points[2][0] = 8; + points[2][1] = 3; + points[3][0] = 8; + points[3][1] = 8; + points[4][0] = 1; /* In same chunk as point #0, but "earlier" in chunk */ + points[4][1] = 1; + points[5][0] = 1; /* In same chunk as point #1, but "earlier" in chunk */ + points[5][1] = 6; + points[6][0] = 6; /* In same chunk as point #2, but "earlier" in chunk */ + points[6][1] = 1; + points[7][0] = 6; /* In same chunk as point #3, but "earlier" in chunk */ + points[7][1] = 6; + ret = H5Sselect_elements(pnt1_space, H5S_SELECT_SET, (size_t)SPACE7_NPOINTS, (const hsize_t *)points); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Create 1st hyperslab selection */ + hyp1_space = H5Scopy(dataspace); + CHECK(hyp1_space, FAIL, "H5Scopy"); + + start[0] = 2; + start[1] = 2; + count[0] = 4; + count[1] = 2; + ret = H5Sselect_hyperslab(hyp1_space, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Write out data using 1st point selection for file & hyperslab for memory */ + ret = H5Dwrite(dataset, H5T_NATIVE_UINT, hyp1_space, pnt1_space, H5P_DEFAULT, data); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Create 2nd point selection */ + pnt2_space = H5Scopy(dataspace); + CHECK(pnt2_space, FAIL, "H5Scopy"); + + points[0][0] = 4; + points[0][1] = 4; + points[1][0] = 4; + points[1][1] = 9; + points[2][0] = 9; + points[2][1] = 4; + points[3][0] = 9; + points[3][1] = 9; + points[4][0] = 2; /* In same chunk as point #0, but "earlier" in chunk */ + points[4][1] = 2; + points[5][0] = 2; /* In same chunk as point #1, but "earlier" in chunk */ + points[5][1] = 7; + points[6][0] = 7; /* In same chunk as point #2, but "earlier" in chunk */ + points[6][1] = 2; + points[7][0] = 7; /* In same chunk as point #3, but "earlier" in chunk */ + points[7][1] = 7; + ret = H5Sselect_elements(pnt2_space, H5S_SELECT_SET, (size_t)SPACE7_NPOINTS, (const hsize_t *)points); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Create 2nd hyperslab selection */ + hyp2_space = H5Scopy(dataspace); + CHECK(hyp2_space, FAIL, "H5Scopy"); + + start[0] = 2; + start[1] = 4; + count[0] = 4; + count[1] = 2; + ret = H5Sselect_hyperslab(hyp2_space, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Write out data using 2nd hyperslab selection for file & point for memory */ + ret = H5Dwrite(dataset, H5T_NATIVE_UINT, pnt2_space, hyp2_space, H5P_DEFAULT, data); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close everything (except selections) */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Sclose(dataspace); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + + /* Re-open file & dataset */ + file = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(file, FAIL, "H5Fopen"); + dataset = H5Dopen2(file, DATASETNAME, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Read data using 1st point selection for file and hyperslab for memory */ + ret = H5Dread(dataset, H5T_NATIVE_UINT, hyp1_space, pnt1_space, H5P_DEFAULT, data_out); + CHECK(ret, FAIL, "H5Dread"); + + /* Verify data (later) */ + + /* Read data using 2nd hyperslab selection for file and point for memory */ + ret = H5Dread(dataset, H5T_NATIVE_UINT, pnt2_space, hyp2_space, H5P_DEFAULT, data_out); + CHECK(ret, FAIL, "H5Dread"); + + /* Verify data (later) */ + + /* Close everything (including selections) */ + ret = H5Sclose(pnt1_space); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(pnt2_space); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(hyp1_space); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(hyp2_space); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + + HDfree(data); + HDfree(data_out); +} /* test_select_point_chunk() */ + +/**************************************************************** +** +** test_select_sclar_chunk(): Test basic H5S (dataspace) selection code. +** Tests using a scalar dataspace (in memory) to access chunked datasets. +** +****************************************************************/ +static void +test_select_scalar_chunk(void) +{ + hid_t file_id; /* File ID */ + hid_t dcpl; /* Dataset creation property list */ + hid_t dsid; /* Dataset ID */ + hid_t sid; /* Dataspace ID */ + hid_t m_sid; /* Memory dataspace */ + hsize_t dims[] = {2}; /* Dataset dimensions */ + hsize_t maxdims[] = {H5S_UNLIMITED}; /* Dataset maximum dimensions */ + hsize_t offset[] = {0}; /* Hyperslab start */ + hsize_t count[] = {1}; /* Hyperslab count */ + unsigned data = 2; /* Data to write */ + herr_t ret; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Scalar Dataspaces and Chunked Datasets\n")); + + file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file_id, FAIL, "H5Fcreate"); + + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + + dims[0] = 1024U; + ret = H5Pset_chunk(dcpl, 1, dims); + CHECK(ret, FAIL, "H5Pset_chunk"); + + /* Create 1-D dataspace */ + sid = H5Screate_simple(1, dims, maxdims); + CHECK(sid, FAIL, "H5Screate_simple"); + + dsid = H5Dcreate2(file_id, "dset", H5T_NATIVE_UINT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dsid, FAIL, "H5Dcreate2"); + + /* Select scalar area (offset 0, count 1) */ + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, offset, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create scalar memory dataspace */ + m_sid = H5Screate(H5S_SCALAR); + CHECK(m_sid, FAIL, "H5Screate"); + + /* Write out data using scalar dataspace for memory dataspace */ + ret = H5Dwrite(dsid, H5T_NATIVE_UINT, m_sid, sid, H5P_DEFAULT, &data); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close resources */ + ret = H5Sclose(m_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Dclose(dsid); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Fclose(file_id); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_select_scalar_chunk() */ + +/**************************************************************** +** +** test_select_valid(): Test basic H5S (dataspace) selection code. +** Tests selection validity +** +****************************************************************/ +static void +test_select_valid(void) +{ + herr_t error; + htri_t valid; + hid_t main_space, sub_space; + hsize_t safe_start[2] = {1, 1}; + hsize_t safe_count[2] = {1, 1}; + hsize_t start[2]; + hsize_t dims[2], maxdims[2], size[2], count[2]; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Selection Validity\n")); + + MESSAGE(8, ("Case 1 : sub_space is not a valid dataspace\n")); + dims[0] = dims[1] = H5S_UNLIMITED; + + H5E_BEGIN_TRY + { + sub_space = H5Screate_simple(2, dims, NULL); + } + H5E_END_TRY; + VERIFY(sub_space, FAIL, "H5Screate_simple"); + + H5E_BEGIN_TRY + { + valid = H5Sselect_valid(sub_space); + } + H5E_END_TRY; + VERIFY(valid, FAIL, "H5Sselect_valid"); + + /* Set arrays and dataspace for the rest of the cases */ + count[0] = count[1] = 1; + dims[0] = dims[1] = maxdims[0] = maxdims[1] = 10; + + main_space = H5Screate_simple(2, dims, maxdims); + CHECK(main_space, FAIL, "H5Screate_simple"); + + MESSAGE(8, ("Case 2 : sub_space is a valid but closed dataspace\n")); + sub_space = H5Scopy(main_space); + CHECK(sub_space, FAIL, "H5Scopy"); + + error = H5Sclose(sub_space); + CHECK(error, FAIL, "H5Sclose"); + + H5E_BEGIN_TRY + { + valid = H5Sselect_valid(sub_space); + } + H5E_END_TRY; + VERIFY(valid, FAIL, "H5Sselect_valid"); + + MESSAGE(8, ("Case 3 : in the dimensions\nTry offset (4,4) and size(6,6), the original space is of size " + "(10,10)\n")); + start[0] = start[1] = 4; + size[0] = size[1] = 6; + + sub_space = H5Scopy(main_space); + CHECK(sub_space, FAIL, "H5Scopy"); + + error = H5Sselect_hyperslab(sub_space, H5S_SELECT_SET, start, size, count, size); + CHECK(error, FAIL, "H5Sselect_hyperslab"); + + valid = H5Sselect_valid(sub_space); + VERIFY(valid, TRUE, "H5Sselect_valid"); + + error = H5Sselect_hyperslab(sub_space, H5S_SELECT_OR, safe_start, NULL, safe_count, NULL); + CHECK(error, FAIL, "H5Sselect_hyperslab"); + + valid = H5Sselect_valid(sub_space); + VERIFY(valid, TRUE, "H5Sselect_valid"); + + error = H5Sclose(sub_space); + CHECK(error, FAIL, "H5Sclose"); + + MESSAGE(8, ("Case 4 : exceed dimensions by 1\nTry offset (5,5) and size(6,6), the original space is of " + "size (10,10)\n")); + start[0] = start[1] = 5; + size[0] = size[1] = 6; + + sub_space = H5Scopy(main_space); + CHECK(sub_space, FAIL, "H5Scopy"); + + error = H5Sselect_hyperslab(sub_space, H5S_SELECT_SET, start, size, count, size); + CHECK(error, FAIL, "H5Sselect_hyperslab"); + + valid = H5Sselect_valid(sub_space); + VERIFY(valid, FALSE, "H5Sselect_valid"); + + error = H5Sselect_hyperslab(sub_space, H5S_SELECT_OR, safe_start, NULL, safe_count, NULL); + CHECK(error, FAIL, "H5Sselect_hyperslab"); + + valid = H5Sselect_valid(sub_space); + VERIFY(valid, FALSE, "H5Sselect_valid"); + + error = H5Sclose(sub_space); + CHECK(error, FAIL, "H5Sclose"); + + MESSAGE(8, ("Case 5 : exceed dimensions by 2\nTry offset (6,6) and size(6,6), the original space is of " + "size (10,10)\n")); + start[0] = start[1] = 6; + size[0] = size[1] = 6; + + sub_space = H5Scopy(main_space); + CHECK(sub_space, FAIL, "H5Scopy"); + + error = H5Sselect_hyperslab(sub_space, H5S_SELECT_SET, start, size, count, size); + CHECK(error, FAIL, "H5Sselect_hyperslab"); + + valid = H5Sselect_valid(sub_space); + VERIFY(valid, FALSE, "H5Sselect_valid"); + + error = H5Sselect_hyperslab(sub_space, H5S_SELECT_OR, safe_start, NULL, safe_count, NULL); + CHECK(error, FAIL, "H5Sselect_hyperslab"); + + valid = H5Sselect_valid(sub_space); + VERIFY(valid, FALSE, "H5Sselect_valid"); + + error = H5Sclose(sub_space); + CHECK(error, FAIL, "H5Sclose"); + error = H5Sclose(main_space); + CHECK(error, FAIL, "H5Sclose"); +} /* test_select_valid() */ + +/**************************************************************** +** +** test_select_combine(): Test basic H5S (dataspace) selection code. +** Tests combining "all" and "none" selections with hyperslab +** operations. +** +****************************************************************/ +static void +test_select_combine(void) +{ + hid_t base_id; /* Base dataspace for test */ + hid_t all_id; /* Dataspace for "all" selection */ + hid_t none_id; /* Dataspace for "none" selection */ + hid_t space1; /* Temporary dataspace #1 */ + hsize_t start[SPACE7_RANK]; /* Hyperslab start */ + hsize_t stride[SPACE7_RANK]; /* Hyperslab stride */ + hsize_t count[SPACE7_RANK]; /* Hyperslab count */ + hsize_t block[SPACE7_RANK]; /* Hyperslab block */ + hsize_t dims[SPACE7_RANK] = {SPACE7_DIM1, SPACE7_DIM2}; /* Dimensions of dataspace */ + H5S_sel_type sel_type; /* Selection type */ + hssize_t nblocks; /* Number of hyperslab blocks */ + hsize_t blocks[16][2][SPACE7_RANK]; /* List of blocks */ + herr_t error; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Selection Combinations\n")); + + /* Create dataspace for dataset on disk */ + base_id = H5Screate_simple(SPACE7_RANK, dims, NULL); + CHECK(base_id, FAIL, "H5Screate_simple"); + + /* Copy base dataspace and set selection to "all" */ + all_id = H5Scopy(base_id); + CHECK(all_id, FAIL, "H5Scopy"); + error = H5Sselect_all(all_id); + CHECK(error, FAIL, "H5Sselect_all"); + sel_type = H5Sget_select_type(all_id); + VERIFY(sel_type, H5S_SEL_ALL, "H5Sget_select_type"); + + /* Copy base dataspace and set selection to "none" */ + none_id = H5Scopy(base_id); + CHECK(none_id, FAIL, "H5Scopy"); + error = H5Sselect_none(none_id); + CHECK(error, FAIL, "H5Sselect_none"); + sel_type = H5Sget_select_type(none_id); + VERIFY(sel_type, H5S_SEL_NONE, "H5Sget_select_type"); + + /* Copy "all" selection & space */ + space1 = H5Scopy(all_id); + CHECK(space1, FAIL, "H5Scopy"); + + /* 'OR' "all" selection with another hyperslab */ + start[0] = start[1] = 0; + stride[0] = stride[1] = 1; + count[0] = count[1] = 1; + block[0] = block[1] = 5; + error = H5Sselect_hyperslab(space1, H5S_SELECT_OR, start, stride, count, block); + CHECK(error, FAIL, "H5Sselect_hyperslab"); + + /* Verify that it's still "all" selection */ + sel_type = H5Sget_select_type(space1); + VERIFY(sel_type, H5S_SEL_ALL, "H5Sget_select_type"); + + /* Close temporary dataspace */ + error = H5Sclose(space1); + CHECK(error, FAIL, "H5Sclose"); + + /* Copy "all" selection & space */ + space1 = H5Scopy(all_id); + CHECK(space1, FAIL, "H5Scopy"); + + /* 'AND' "all" selection with another hyperslab */ + start[0] = start[1] = 0; + stride[0] = stride[1] = 1; + count[0] = count[1] = 1; + block[0] = block[1] = 5; + error = H5Sselect_hyperslab(space1, H5S_SELECT_AND, start, stride, count, block); + CHECK(error, FAIL, "H5Sselect_hyperslab"); + + /* Verify that the new selection is the same at the original block */ + sel_type = H5Sget_select_type(space1); + VERIFY(sel_type, H5S_SEL_HYPERSLABS, "H5Sget_select_type"); + + /* Verify that there is only one block */ + nblocks = H5Sget_select_hyper_nblocks(space1); + VERIFY(nblocks, 1, "H5Sget_select_hyper_nblocks"); + + /* Retrieve the block defined */ + HDmemset(blocks, -1, sizeof(blocks)); /* Reset block list */ + error = H5Sget_select_hyper_blocklist(space1, (hsize_t)0, (hsize_t)nblocks, (hsize_t *)blocks); + CHECK(error, FAIL, "H5Sget_select_hyper_blocklist"); + + /* Verify that the correct block is defined */ + VERIFY(blocks[0][0][0], (hsize_t)start[0], "H5Sget_select_hyper_blocklist"); + VERIFY(blocks[0][0][1], (hsize_t)start[1], "H5Sget_select_hyper_blocklist"); + VERIFY(blocks[0][1][0], (block[0] - 1), "H5Sget_select_hyper_blocklist"); + VERIFY(blocks[0][1][1], (block[1] - 1), "H5Sget_select_hyper_blocklist"); + + /* Close temporary dataspace */ + error = H5Sclose(space1); + CHECK(error, FAIL, "H5Sclose"); + + /* Copy "all" selection & space */ + space1 = H5Scopy(all_id); + CHECK(space1, FAIL, "H5Scopy"); + + /* 'XOR' "all" selection with another hyperslab */ + start[0] = start[1] = 0; + stride[0] = stride[1] = 1; + count[0] = count[1] = 1; + block[0] = block[1] = 5; + error = H5Sselect_hyperslab(space1, H5S_SELECT_XOR, start, stride, count, block); + CHECK(error, FAIL, "H5Sselect_hyperslab"); + + /* Verify that the new selection is an inversion of the original block */ + sel_type = H5Sget_select_type(space1); + VERIFY(sel_type, H5S_SEL_HYPERSLABS, "H5Sget_select_type"); + + /* Verify that there are two blocks */ + nblocks = H5Sget_select_hyper_nblocks(space1); + VERIFY(nblocks, 2, "H5Sget_select_hyper_nblocks"); + + /* Retrieve the block defined */ + HDmemset(blocks, -1, sizeof(blocks)); /* Reset block list */ + error = H5Sget_select_hyper_blocklist(space1, (hsize_t)0, (hsize_t)nblocks, (hsize_t *)blocks); + CHECK(error, FAIL, "H5Sget_select_hyper_blocklist"); + + /* Verify that the correct block is defined */ + VERIFY(blocks[0][0][0], 0, "H5Sget_select_hyper_blocklist"); + VERIFY(blocks[0][0][1], 5, "H5Sget_select_hyper_blocklist"); + VERIFY(blocks[0][1][0], 4, "H5Sget_select_hyper_blocklist"); + VERIFY(blocks[0][1][1], 9, "H5Sget_select_hyper_blocklist"); + VERIFY(blocks[1][0][0], 5, "H5Sget_select_hyper_blocklist"); + VERIFY(blocks[1][0][1], 0, "H5Sget_select_hyper_blocklist"); + VERIFY(blocks[1][1][0], 9, "H5Sget_select_hyper_blocklist"); + VERIFY(blocks[1][1][1], 9, "H5Sget_select_hyper_blocklist"); + + /* Close temporary dataspace */ + error = H5Sclose(space1); + CHECK(error, FAIL, "H5Sclose"); + + /* Copy "all" selection & space */ + space1 = H5Scopy(all_id); + CHECK(space1, FAIL, "H5Scopy"); + + /* 'NOTB' "all" selection with another hyperslab */ + start[0] = start[1] = 0; + stride[0] = stride[1] = 1; + count[0] = count[1] = 1; + block[0] = block[1] = 5; + error = H5Sselect_hyperslab(space1, H5S_SELECT_NOTB, start, stride, count, block); + CHECK(error, FAIL, "H5Sselect_hyperslab"); + + /* Verify that the new selection is an inversion of the original block */ + sel_type = H5Sget_select_type(space1); + VERIFY(sel_type, H5S_SEL_HYPERSLABS, "H5Sget_select_type"); + + /* Verify that there are two blocks */ + nblocks = H5Sget_select_hyper_nblocks(space1); + VERIFY(nblocks, 2, "H5Sget_select_hyper_nblocks"); + + /* Retrieve the block defined */ + HDmemset(blocks, -1, sizeof(blocks)); /* Reset block list */ + error = H5Sget_select_hyper_blocklist(space1, (hsize_t)0, (hsize_t)nblocks, (hsize_t *)blocks); + CHECK(error, FAIL, "H5Sget_select_hyper_blocklist"); + + /* Verify that the correct block is defined */ + VERIFY(blocks[0][0][0], 0, "H5Sget_select_hyper_blocklist"); + VERIFY(blocks[0][0][1], 5, "H5Sget_select_hyper_blocklist"); + VERIFY(blocks[0][1][0], 4, "H5Sget_select_hyper_blocklist"); + VERIFY(blocks[0][1][1], 9, "H5Sget_select_hyper_blocklist"); + VERIFY(blocks[1][0][0], 5, "H5Sget_select_hyper_blocklist"); + VERIFY(blocks[1][0][1], 0, "H5Sget_select_hyper_blocklist"); + VERIFY(blocks[1][1][0], 9, "H5Sget_select_hyper_blocklist"); + VERIFY(blocks[1][1][1], 9, "H5Sget_select_hyper_blocklist"); + + /* Close temporary dataspace */ + error = H5Sclose(space1); + CHECK(error, FAIL, "H5Sclose"); + + /* Copy "all" selection & space */ + space1 = H5Scopy(all_id); + CHECK(space1, FAIL, "H5Scopy"); + + /* 'NOTA' "all" selection with another hyperslab */ + start[0] = start[1] = 0; + stride[0] = stride[1] = 1; + count[0] = count[1] = 1; + block[0] = block[1] = 5; + error = H5Sselect_hyperslab(space1, H5S_SELECT_NOTA, start, stride, count, block); + CHECK(error, FAIL, "H5Sselect_hyperslab"); + + /* Verify that the new selection is the "none" selection */ + sel_type = H5Sget_select_type(space1); + VERIFY(sel_type, H5S_SEL_NONE, "H5Sget_select_type"); + + /* Close temporary dataspace */ + error = H5Sclose(space1); + CHECK(error, FAIL, "H5Sclose"); + + /* Copy "none" selection & space */ + space1 = H5Scopy(none_id); + CHECK(space1, FAIL, "H5Scopy"); + + /* 'OR' "none" selection with another hyperslab */ + start[0] = start[1] = 0; + stride[0] = stride[1] = 1; + count[0] = count[1] = 1; + block[0] = block[1] = 5; + error = H5Sselect_hyperslab(space1, H5S_SELECT_OR, start, stride, count, block); + CHECK(error, FAIL, "H5Sselect_hyperslab"); + + /* Verify that the new selection is the same as the original hyperslab */ + sel_type = H5Sget_select_type(space1); + VERIFY(sel_type, H5S_SEL_HYPERSLABS, "H5Sget_select_type"); + + /* Verify that there is only one block */ + nblocks = H5Sget_select_hyper_nblocks(space1); + VERIFY(nblocks, 1, "H5Sget_select_hyper_nblocks"); + + /* Retrieve the block defined */ + HDmemset(blocks, -1, sizeof(blocks)); /* Reset block list */ + error = H5Sget_select_hyper_blocklist(space1, (hsize_t)0, (hsize_t)nblocks, (hsize_t *)blocks); + CHECK(error, FAIL, "H5Sget_select_hyper_blocklist"); + + /* Verify that the correct block is defined */ + VERIFY(blocks[0][0][0], (hsize_t)start[0], "H5Sget_select_hyper_blocklist"); + VERIFY(blocks[0][0][1], (hsize_t)start[1], "H5Sget_select_hyper_blocklist"); + VERIFY(blocks[0][1][0], (block[0] - 1), "H5Sget_select_hyper_blocklist"); + VERIFY(blocks[0][1][1], (block[1] - 1), "H5Sget_select_hyper_blocklist"); + + /* Close temporary dataspace */ + error = H5Sclose(space1); + CHECK(error, FAIL, "H5Sclose"); + + /* Copy "none" selection & space */ + space1 = H5Scopy(none_id); + CHECK(space1, FAIL, "H5Scopy"); + + /* 'AND' "none" selection with another hyperslab */ + start[0] = start[1] = 0; + stride[0] = stride[1] = 1; + count[0] = count[1] = 1; + block[0] = block[1] = 5; + error = H5Sselect_hyperslab(space1, H5S_SELECT_AND, start, stride, count, block); + CHECK(error, FAIL, "H5Sselect_hyperslab"); + + /* Verify that the new selection is the "none" selection */ + sel_type = H5Sget_select_type(space1); + VERIFY(sel_type, H5S_SEL_NONE, "H5Sget_select_type"); + + /* Close temporary dataspace */ + error = H5Sclose(space1); + CHECK(error, FAIL, "H5Sclose"); + + /* Copy "none" selection & space */ + space1 = H5Scopy(none_id); + CHECK(space1, FAIL, "H5Scopy"); + + /* 'XOR' "none" selection with another hyperslab */ + start[0] = start[1] = 0; + stride[0] = stride[1] = 1; + count[0] = count[1] = 1; + block[0] = block[1] = 5; + error = H5Sselect_hyperslab(space1, H5S_SELECT_XOR, start, stride, count, block); + CHECK(error, FAIL, "H5Sselect_hyperslab"); + + /* Verify that the new selection is the same as the original hyperslab */ + sel_type = H5Sget_select_type(space1); + VERIFY(sel_type, H5S_SEL_HYPERSLABS, "H5Sget_select_type"); + + /* Verify that there is only one block */ + nblocks = H5Sget_select_hyper_nblocks(space1); + VERIFY(nblocks, 1, "H5Sget_select_hyper_nblocks"); + + /* Retrieve the block defined */ + HDmemset(blocks, -1, sizeof(blocks)); /* Reset block list */ + error = H5Sget_select_hyper_blocklist(space1, (hsize_t)0, (hsize_t)nblocks, (hsize_t *)blocks); + CHECK(error, FAIL, "H5Sget_select_hyper_blocklist"); + + /* Verify that the correct block is defined */ + VERIFY(blocks[0][0][0], (hsize_t)start[0], "H5Sget_select_hyper_blocklist"); + VERIFY(blocks[0][0][1], (hsize_t)start[1], "H5Sget_select_hyper_blocklist"); + VERIFY(blocks[0][1][0], (block[0] - 1), "H5Sget_select_hyper_blocklist"); + VERIFY(blocks[0][1][1], (block[1] - 1), "H5Sget_select_hyper_blocklist"); + + /* Close temporary dataspace */ + error = H5Sclose(space1); + CHECK(error, FAIL, "H5Sclose"); + + /* Copy "none" selection & space */ + space1 = H5Scopy(none_id); + CHECK(space1, FAIL, "H5Scopy"); + + /* 'NOTB' "none" selection with another hyperslab */ + start[0] = start[1] = 0; + stride[0] = stride[1] = 1; + count[0] = count[1] = 1; + block[0] = block[1] = 5; + error = H5Sselect_hyperslab(space1, H5S_SELECT_NOTB, start, stride, count, block); + CHECK(error, FAIL, "H5Sselect_hyperslab"); + + /* Verify that the new selection is the "none" selection */ + sel_type = H5Sget_select_type(space1); + VERIFY(sel_type, H5S_SEL_NONE, "H5Sget_select_type"); + + /* Close temporary dataspace */ + error = H5Sclose(space1); + CHECK(error, FAIL, "H5Sclose"); + + /* Copy "none" selection & space */ + space1 = H5Scopy(none_id); + CHECK(space1, FAIL, "H5Scopy"); + + /* 'NOTA' "none" selection with another hyperslab */ + start[0] = start[1] = 0; + stride[0] = stride[1] = 1; + count[0] = count[1] = 1; + block[0] = block[1] = 5; + error = H5Sselect_hyperslab(space1, H5S_SELECT_NOTA, start, stride, count, block); + CHECK(error, FAIL, "H5Sselect_hyperslab"); + + /* Verify that the new selection is the same as the original hyperslab */ + sel_type = H5Sget_select_type(space1); + VERIFY(sel_type, H5S_SEL_HYPERSLABS, "H5Sget_select_type"); + + /* Verify that there is only one block */ + nblocks = H5Sget_select_hyper_nblocks(space1); + VERIFY(nblocks, 1, "H5Sget_select_hyper_nblocks"); + + /* Retrieve the block defined */ + HDmemset(blocks, -1, sizeof(blocks)); /* Reset block list */ + error = H5Sget_select_hyper_blocklist(space1, (hsize_t)0, (hsize_t)nblocks, (hsize_t *)blocks); + CHECK(error, FAIL, "H5Sget_select_hyper_blocklist"); + + /* Verify that the correct block is defined */ + VERIFY(blocks[0][0][0], (hsize_t)start[0], "H5Sget_select_hyper_blocklist"); + VERIFY(blocks[0][0][1], (hsize_t)start[1], "H5Sget_select_hyper_blocklist"); + VERIFY(blocks[0][1][0], (block[0] - 1), "H5Sget_select_hyper_blocklist"); + VERIFY(blocks[0][1][1], (block[1] - 1), "H5Sget_select_hyper_blocklist"); + + /* Close temporary dataspace */ + error = H5Sclose(space1); + CHECK(error, FAIL, "H5Sclose"); + + /* Close dataspaces */ + error = H5Sclose(base_id); + CHECK(error, FAIL, "H5Sclose"); + + error = H5Sclose(all_id); + CHECK(error, FAIL, "H5Sclose"); + + error = H5Sclose(none_id); + CHECK(error, FAIL, "H5Sclose"); +} /* test_select_combine() */ + +/* + * Typedef for iteration structure used in the fill value tests + */ +typedef struct { + unsigned short fill_value; /* The fill value to check */ + size_t curr_coord; /* Current coordinate to examine */ + hsize_t *coords; /* Pointer to selection's coordinates */ +} fill_iter_info; + +/**************************************************************** +** +** test_select_hyper_iter3(): Iterator for checking hyperslab iteration +** +****************************************************************/ +static herr_t +test_select_hyper_iter3(void *_elem, hid_t H5_ATTR_UNUSED type_id, unsigned ndim, const hsize_t *point, + void *_operator_data) +{ + unsigned *tbuf = (unsigned *)_elem; /* temporary buffer pointer */ + fill_iter_info *iter_info = + (fill_iter_info *)_operator_data; /* Get the pointer to the iterator information */ + hsize_t *coord_ptr; /* Pointer to the coordinate information for a point*/ + + /* Check value in current buffer location */ + if (*tbuf != iter_info->fill_value) + return (-1); + else { + /* Check number of dimensions */ + if (ndim != SPACE7_RANK) + return (-1); + else { + /* Check Coordinates */ + coord_ptr = iter_info->coords + (2 * iter_info->curr_coord); + iter_info->curr_coord++; + if (coord_ptr[0] != point[0]) + return (-1); + else if (coord_ptr[1] != point[1]) + return (-1); + else + return (0); + } /* end else */ + } /* end else */ +} /* end test_select_hyper_iter3() */ + +/**************************************************************** +** +** test_select_fill_all(): Test basic H5S (dataspace) selection code. +** Tests filling "all" selections +** +****************************************************************/ +static void +test_select_fill_all(void) +{ + hid_t sid1; /* Dataspace ID */ + hsize_t dims1[] = {SPACE7_DIM1, SPACE7_DIM2}; + unsigned fill_value; /* Fill value */ + fill_iter_info iter_info; /* Iterator information structure */ + hsize_t points[SPACE7_DIM1 * SPACE7_DIM2][SPACE7_RANK]; /* Coordinates of selection */ + unsigned *wbuf, /* buffer to write to disk */ + *tbuf; /* temporary buffer pointer */ + unsigned u, v; /* Counters */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Filling 'all' Selections\n")); + + /* Allocate memory buffer */ + wbuf = (unsigned *)HDmalloc(sizeof(unsigned) * SPACE7_DIM1 * SPACE7_DIM2); + CHECK_PTR(wbuf, "HDmalloc"); + + /* Initialize memory buffer */ + for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++) + for (v = 0; v < SPACE7_DIM2; v++) + *tbuf++ = (u * SPACE7_DIM2) + v; + + /* Create dataspace for dataset on disk */ + sid1 = H5Screate_simple(SPACE7_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Space defaults to "all" selection */ + + /* Set fill value */ + fill_value = SPACE7_FILL; + + /* Fill selection in memory */ + ret = H5Dfill(&fill_value, H5T_NATIVE_UINT, wbuf, H5T_NATIVE_UINT, sid1); + CHECK(ret, FAIL, "H5Dfill"); + + /* Verify memory buffer the hard way... */ + for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++) + for (v = 0; v < SPACE7_DIM2; v++) + if (*tbuf != fill_value) + TestErrPrintf("Error! v=%d, u=%u, *tbuf=%u, fill_value=%u\n", v, u, *tbuf, fill_value); + + /* Set the coordinates of the selection */ + for (u = 0; u < SPACE7_DIM1; u++) + for (v = 0; v < SPACE7_DIM2; v++) { + points[(u * SPACE7_DIM2) + v][0] = u; + points[(u * SPACE7_DIM2) + v][1] = v; + } /* end for */ + + /* Initialize the iterator structure */ + iter_info.fill_value = SPACE7_FILL; + iter_info.curr_coord = 0; + iter_info.coords = (hsize_t *)points; + + /* Iterate through selection, verifying correct data */ + ret = H5Diterate(wbuf, H5T_NATIVE_UINT, sid1, test_select_hyper_iter3, &iter_info); + CHECK(ret, FAIL, "H5Diterate"); + + /* Close dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Free memory buffers */ + HDfree(wbuf); +} /* test_select_fill_all() */ + +/**************************************************************** +** +** test_select_fill_point(): Test basic H5S (dataspace) selection code. +** Tests filling "point" selections +** +****************************************************************/ +static void +test_select_fill_point(hssize_t *offset) +{ + hid_t sid1; /* Dataspace ID */ + hsize_t dims1[] = {SPACE7_DIM1, SPACE7_DIM2}; + hssize_t real_offset[SPACE7_RANK]; /* Actual offset to use */ + hsize_t points[5][SPACE7_RANK] = {{2, 4}, {3, 8}, {8, 4}, {7, 5}, {7, 7}}; + size_t num_points = 5; /* Number of points selected */ + int fill_value; /* Fill value */ + fill_iter_info iter_info; /* Iterator information structure */ + unsigned *wbuf, /* buffer to write to disk */ + *tbuf; /* temporary buffer pointer */ + unsigned u, v, w; /* Counters */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Filling 'point' Selections\n")); + + /* Allocate memory buffer */ + wbuf = (unsigned *)HDmalloc(sizeof(unsigned) * SPACE7_DIM1 * SPACE7_DIM2); + CHECK_PTR(wbuf, "HDmalloc"); + + /* Initialize memory buffer */ + for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++) + for (v = 0; v < SPACE7_DIM2; v++) + *tbuf++ = (unsigned short)(u * SPACE7_DIM2) + v; + + /* Create dataspace for dataset on disk */ + sid1 = H5Screate_simple(SPACE7_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Select "point" selection */ + ret = H5Sselect_elements(sid1, H5S_SELECT_SET, num_points, (const hsize_t *)points); + CHECK(ret, FAIL, "H5Sselect_elements"); + + if (offset != NULL) { + HDmemcpy(real_offset, offset, SPACE7_RANK * sizeof(hssize_t)); + + /* Set offset, if provided */ + ret = H5Soffset_simple(sid1, real_offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + } /* end if */ + else + HDmemset(real_offset, 0, SPACE7_RANK * sizeof(hssize_t)); + + /* Set fill value */ + fill_value = SPACE7_FILL; + + /* Fill selection in memory */ + ret = H5Dfill(&fill_value, H5T_NATIVE_INT, wbuf, H5T_NATIVE_UINT, sid1); + CHECK(ret, FAIL, "H5Dfill"); + + /* Verify memory buffer the hard way... */ + for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++) + for (v = 0; v < SPACE7_DIM2; v++, tbuf++) { + for (w = 0; w < (unsigned)num_points; w++) { + if (u == (unsigned)(points[w][0] + (hsize_t)real_offset[0]) && + v == (unsigned)(points[w][1] + (hsize_t)real_offset[1])) { + if (*tbuf != (unsigned)fill_value) + TestErrPrintf("Error! v=%u, u=%u, *tbuf=%u, fill_value=%u\n", v, u, *tbuf, + (unsigned)fill_value); + break; + } /* end if */ + } /* end for */ + if (w == (unsigned)num_points && *tbuf != ((u * SPACE7_DIM2) + v)) + TestErrPrintf("Error! v=%d, u=%d, *tbuf=%u, should be: %u\n", v, u, *tbuf, + ((u * SPACE7_DIM2) + v)); + } /* end for */ + + /* Initialize the iterator structure */ + iter_info.fill_value = SPACE7_FILL; + iter_info.curr_coord = 0; + iter_info.coords = (hsize_t *)points; + + /* Add in the offset */ + for (u = 0; u < (unsigned)num_points; u++) { + points[u][0] = (hsize_t)((hssize_t)points[u][0] + real_offset[0]); + points[u][1] = (hsize_t)((hssize_t)points[u][1] + real_offset[1]); + } /* end for */ + + /* Iterate through selection, verifying correct data */ + ret = H5Diterate(wbuf, H5T_NATIVE_UINT, sid1, test_select_hyper_iter3, &iter_info); + CHECK(ret, FAIL, "H5Diterate"); + + /* Close dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Free memory buffers */ + HDfree(wbuf); +} /* test_select_fill_point() */ + +/**************************************************************** +** +** test_select_fill_hyper_simple(): Test basic H5S (dataspace) selection code. +** Tests filling "simple" (i.e. one block) hyperslab selections +** +****************************************************************/ +static void +test_select_fill_hyper_simple(hssize_t *offset) +{ + hid_t sid1; /* Dataspace ID */ + hsize_t dims1[] = {SPACE7_DIM1, SPACE7_DIM2}; + hssize_t real_offset[SPACE7_RANK]; /* Actual offset to use */ + hsize_t start[SPACE7_RANK]; /* Hyperslab start */ + hsize_t count[SPACE7_RANK]; /* Hyperslab block size */ + size_t num_points; /* Number of points in selection */ + hsize_t points[16][SPACE7_RANK]; /* Coordinates selected */ + int fill_value; /* Fill value */ + fill_iter_info iter_info; /* Iterator information structure */ + unsigned *wbuf, /* buffer to write to disk */ + *tbuf; /* temporary buffer pointer */ + unsigned u, v; /* Counters */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Filling Simple 'hyperslab' Selections\n")); + + /* Allocate memory buffer */ + wbuf = (unsigned *)HDmalloc(sizeof(unsigned) * SPACE7_DIM1 * SPACE7_DIM2); + CHECK_PTR(wbuf, "HDmalloc"); + + /* Initialize memory buffer */ + for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++) + for (v = 0; v < SPACE7_DIM2; v++) + *tbuf++ = (unsigned short)(u * SPACE7_DIM2) + v; + + /* Create dataspace for dataset on disk */ + sid1 = H5Screate_simple(SPACE7_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Select "hyperslab" selection */ + start[0] = 3; + start[1] = 3; + count[0] = 4; + count[1] = 4; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + if (offset != NULL) { + HDmemcpy(real_offset, offset, SPACE7_RANK * sizeof(hssize_t)); + + /* Set offset, if provided */ + ret = H5Soffset_simple(sid1, real_offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + } /* end if */ + else + HDmemset(real_offset, 0, SPACE7_RANK * sizeof(hssize_t)); + + /* Set fill value */ + fill_value = SPACE7_FILL; + + /* Fill selection in memory */ + ret = H5Dfill(&fill_value, H5T_NATIVE_INT, wbuf, H5T_NATIVE_UINT, sid1); + CHECK(ret, FAIL, "H5Dfill"); + + /* Verify memory buffer the hard way... */ + for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++) + for (v = 0; v < SPACE7_DIM2; v++, tbuf++) { + if ((u >= (unsigned)((hssize_t)start[0] + real_offset[0]) && + u < (unsigned)((hssize_t)(start[0] + count[0]) + real_offset[0])) && + (v >= (unsigned)((hssize_t)start[1] + real_offset[1]) && + v < (unsigned)((hssize_t)(start[1] + count[1]) + real_offset[1]))) { + if (*tbuf != (unsigned)fill_value) + TestErrPrintf("Error! v=%u, u=%u, *tbuf=%u, fill_value=%u\n", v, u, *tbuf, + (unsigned)fill_value); + } /* end if */ + else { + if (*tbuf != ((unsigned)(u * SPACE7_DIM2) + v)) + TestErrPrintf("Error! v=%u, u=%u, *tbuf=%u, should be: %u\n", v, u, *tbuf, + ((u * SPACE7_DIM2) + v)); + } /* end else */ + } /* end for */ + + /* Initialize the iterator structure */ + iter_info.fill_value = SPACE7_FILL; + iter_info.curr_coord = 0; + iter_info.coords = (hsize_t *)points; + + /* Set the coordinates of the selection (with the offset) */ + for (u = 0, num_points = 0; u < (unsigned)count[0]; u++) + for (v = 0; v < (unsigned)count[1]; v++, num_points++) { + points[num_points][0] = (hsize_t)((hssize_t)(u + start[0]) + real_offset[0]); + points[num_points][1] = (hsize_t)((hssize_t)(v + start[1]) + real_offset[1]); + } /* end for */ + + /* Iterate through selection, verifying correct data */ + ret = H5Diterate(wbuf, H5T_NATIVE_UINT, sid1, test_select_hyper_iter3, &iter_info); + CHECK(ret, FAIL, "H5Diterate"); + + /* Close dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Free memory buffers */ + HDfree(wbuf); +} /* test_select_fill_hyper_simple() */ + +/**************************************************************** +** +** test_select_fill_hyper_regular(): Test basic H5S (dataspace) selection code. +** Tests filling "regular" (i.e. strided block) hyperslab selections +** +****************************************************************/ +static void +test_select_fill_hyper_regular(hssize_t *offset) +{ + hid_t sid1; /* Dataspace ID */ + hsize_t dims1[] = {SPACE7_DIM1, SPACE7_DIM2}; + hssize_t real_offset[SPACE7_RANK]; /* Actual offset to use */ + hsize_t start[SPACE7_RANK]; /* Hyperslab start */ + hsize_t stride[SPACE7_RANK]; /* Hyperslab stride size */ + hsize_t count[SPACE7_RANK]; /* Hyperslab block count */ + hsize_t block[SPACE7_RANK]; /* Hyperslab block size */ + hsize_t points[16][SPACE7_RANK] = { + {2, 2}, {2, 3}, {2, 6}, {2, 7}, {3, 2}, {3, 3}, {3, 6}, {3, 7}, + {6, 2}, {6, 3}, {6, 6}, {6, 7}, {7, 2}, {7, 3}, {7, 6}, {7, 7}, + }; + size_t num_points = 16; /* Number of points selected */ + int fill_value; /* Fill value */ + fill_iter_info iter_info; /* Iterator information structure */ + unsigned *wbuf, /* buffer to write to disk */ + *tbuf; /* temporary buffer pointer */ + unsigned u, v, w; /* Counters */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Filling Regular 'hyperslab' Selections\n")); + + /* Allocate memory buffer */ + wbuf = (unsigned *)HDmalloc(sizeof(unsigned) * SPACE7_DIM1 * SPACE7_DIM2); + CHECK_PTR(wbuf, "HDmalloc"); + + /* Initialize memory buffer */ + for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++) + for (v = 0; v < SPACE7_DIM2; v++) + *tbuf++ = (u * SPACE7_DIM2) + v; + + /* Create dataspace for dataset on disk */ + sid1 = H5Screate_simple(SPACE7_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Select "hyperslab" selection */ + start[0] = 2; + start[1] = 2; + stride[0] = 4; + stride[1] = 4; + count[0] = 2; + count[1] = 2; + block[0] = 2; + block[1] = 2; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + if (offset != NULL) { + HDmemcpy(real_offset, offset, SPACE7_RANK * sizeof(hssize_t)); + + /* Set offset, if provided */ + ret = H5Soffset_simple(sid1, real_offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + } /* end if */ + else + HDmemset(real_offset, 0, SPACE7_RANK * sizeof(hssize_t)); + + /* Set fill value */ + fill_value = SPACE7_FILL; + + /* Fill selection in memory */ + ret = H5Dfill(&fill_value, H5T_NATIVE_INT, wbuf, H5T_NATIVE_UINT, sid1); + CHECK(ret, FAIL, "H5Dfill"); + + /* Verify memory buffer the hard way... */ + for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++) + for (v = 0; v < SPACE7_DIM2; v++, tbuf++) { + for (w = 0; w < (unsigned)num_points; w++) { + if (u == (unsigned)((hssize_t)points[w][0] + real_offset[0]) && + v == (unsigned)((hssize_t)points[w][1] + real_offset[1])) { + if (*tbuf != (unsigned)fill_value) + TestErrPrintf("Error! v=%u, u=%u, *tbuf=%u, fill_value=%u\n", v, u, *tbuf, + (unsigned)fill_value); + break; + } /* end if */ + } /* end for */ + if (w == (unsigned)num_points && *tbuf != ((u * SPACE7_DIM2) + v)) + TestErrPrintf("Error! v=%d, u=%d, *tbuf=%u, should be: %u\n", v, u, *tbuf, + ((u * SPACE7_DIM2) + v)); + } /* end for */ + + /* Initialize the iterator structure */ + iter_info.fill_value = SPACE7_FILL; + iter_info.curr_coord = 0; + iter_info.coords = (hsize_t *)points; + + /* Add in the offset */ + for (u = 0; u < (unsigned)num_points; u++) { + points[u][0] = (hsize_t)((hssize_t)points[u][0] + real_offset[0]); + points[u][1] = (hsize_t)((hssize_t)points[u][1] + real_offset[1]); + } /* end for */ + + /* Iterate through selection, verifying correct data */ + ret = H5Diterate(wbuf, H5T_NATIVE_UINT, sid1, test_select_hyper_iter3, &iter_info); + CHECK(ret, FAIL, "H5Diterate"); + + /* Close dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Free memory buffers */ + HDfree(wbuf); +} /* test_select_fill_hyper_regular() */ + +/**************************************************************** +** +** test_select_fill_hyper_irregular(): Test basic H5S (dataspace) selection code. +** Tests filling "irregular" (i.e. combined blocks) hyperslab selections +** +****************************************************************/ +static void +test_select_fill_hyper_irregular(hssize_t *offset) +{ + hid_t sid1; /* Dataspace ID */ + hsize_t dims1[] = {SPACE7_DIM1, SPACE7_DIM2}; + hssize_t real_offset[SPACE7_RANK]; /* Actual offset to use */ + hsize_t start[SPACE7_RANK]; /* Hyperslab start */ + hsize_t count[SPACE7_RANK]; /* Hyperslab block count */ + hsize_t points[32][SPACE7_RANK] = { + /* Yes, some of the are duplicated.. */ + {2, 2}, {2, 3}, {2, 4}, {2, 5}, {3, 2}, {3, 3}, {3, 4}, {3, 5}, {4, 2}, {4, 3}, {4, 4}, + {4, 5}, {5, 2}, {5, 3}, {5, 4}, {5, 5}, {4, 4}, {4, 5}, {4, 6}, {4, 7}, {5, 4}, {5, 5}, + {5, 6}, {5, 7}, {6, 4}, {6, 5}, {6, 6}, {6, 7}, {7, 4}, {7, 5}, {7, 6}, {7, 7}, + }; + hsize_t iter_points[28][SPACE7_RANK] = { + /* Coordinates, as iterated through */ + {2, 2}, {2, 3}, {2, 4}, {2, 5}, {3, 2}, {3, 3}, {3, 4}, {3, 5}, {4, 2}, {4, 3}, + {4, 4}, {4, 5}, {4, 6}, {4, 7}, {5, 2}, {5, 3}, {5, 4}, {5, 5}, {5, 6}, {5, 7}, + {6, 4}, {6, 5}, {6, 6}, {6, 7}, {7, 4}, {7, 5}, {7, 6}, {7, 7}, + }; + size_t num_points = 32; /* Number of points selected */ + size_t num_iter_points = 28; /* Number of resulting points */ + int fill_value; /* Fill value */ + fill_iter_info iter_info; /* Iterator information structure */ + unsigned *wbuf, /* buffer to write to disk */ + *tbuf; /* temporary buffer pointer */ + unsigned u, v, w; /* Counters */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Filling Irregular 'hyperslab' Selections\n")); + + /* Allocate memory buffer */ + wbuf = (unsigned *)HDmalloc(sizeof(unsigned) * SPACE7_DIM1 * SPACE7_DIM2); + CHECK_PTR(wbuf, "HDmalloc"); + + /* Initialize memory buffer */ + for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++) + for (v = 0; v < SPACE7_DIM2; v++) + *tbuf++ = (u * SPACE7_DIM2) + v; + + /* Create dataspace for dataset on disk */ + sid1 = H5Screate_simple(SPACE7_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Select first "hyperslab" selection */ + start[0] = 2; + start[1] = 2; + count[0] = 4; + count[1] = 4; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Combine with second "hyperslab" selection */ + start[0] = 4; + start[1] = 4; + count[0] = 4; + count[1] = 4; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_OR, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + if (offset != NULL) { + HDmemcpy(real_offset, offset, SPACE7_RANK * sizeof(hssize_t)); + + /* Set offset, if provided */ + ret = H5Soffset_simple(sid1, real_offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + } /* end if */ + else + HDmemset(real_offset, 0, SPACE7_RANK * sizeof(hssize_t)); + + /* Set fill value */ + fill_value = SPACE7_FILL; + + /* Fill selection in memory */ + ret = H5Dfill(&fill_value, H5T_NATIVE_INT, wbuf, H5T_NATIVE_UINT, sid1); + CHECK(ret, FAIL, "H5Dfill"); + + /* Verify memory buffer the hard way... */ + for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++) + for (v = 0; v < SPACE7_DIM2; v++, tbuf++) { + for (w = 0; w < (unsigned)num_points; w++) { + if (u == (unsigned)((hssize_t)points[w][0] + real_offset[0]) && + v == (unsigned)((hssize_t)points[w][1] + real_offset[1])) { + if (*tbuf != (unsigned)fill_value) + TestErrPrintf("Error! v=%u, u=%u, *tbuf=%u, fill_value=%u\n", v, u, *tbuf, + (unsigned)fill_value); + break; + } /* end if */ + } /* end for */ + if (w == (unsigned)num_points && *tbuf != ((u * SPACE7_DIM2) + v)) + TestErrPrintf("Error! v=%u, u=%u, *tbuf=%u, should be: %u\n", v, u, *tbuf, + ((u * SPACE7_DIM2) + v)); + } /* end for */ + + /* Initialize the iterator structure */ + iter_info.fill_value = SPACE7_FILL; + iter_info.curr_coord = 0; + iter_info.coords = (hsize_t *)iter_points; + + /* Add in the offset */ + for (u = 0; u < (unsigned)num_iter_points; u++) { + iter_points[u][0] = (hsize_t)((hssize_t)iter_points[u][0] + real_offset[0]); + iter_points[u][1] = (hsize_t)((hssize_t)iter_points[u][1] + real_offset[1]); + } /* end for */ + + /* Iterate through selection, verifying correct data */ + ret = H5Diterate(wbuf, H5T_NATIVE_UINT, sid1, test_select_hyper_iter3, &iter_info); + CHECK(ret, FAIL, "H5Diterate"); + + /* Close dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Free memory buffers */ + HDfree(wbuf); +} /* test_select_fill_hyper_irregular() */ + +/**************************************************************** +** +** test_select_none(): Test basic H5S (dataspace) selection code. +** Tests I/O on 0-sized point selections +** +****************************************************************/ +static void +test_select_none(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1, sid2; /* Dataspace ID */ + hsize_t dims1[] = {SPACE7_DIM1, SPACE7_DIM2}; + hsize_t dims2[] = {SPACE7_DIM1, SPACE7_DIM2}; + uint8_t *wbuf, /* buffer to write to disk */ + *rbuf, /* buffer to read from disk */ + *tbuf; /* temporary buffer pointer */ + int i, j; /* Counters */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing I/O on 0-sized Selections\n")); + + /* Allocate write & read buffers */ + wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE7_DIM1 * SPACE7_DIM2); + CHECK_PTR(wbuf, "HDmalloc"); + rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), SPACE7_DIM1 * SPACE7_DIM2); + CHECK_PTR(rbuf, "HDcalloc"); + + /* Initialize write buffer */ + for (i = 0, tbuf = wbuf; i < SPACE7_DIM1; i++) + for (j = 0; j < SPACE7_DIM2; j++) + *tbuf++ = (uint8_t)((i * SPACE7_DIM2) + j); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset */ + sid1 = H5Screate_simple(SPACE7_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create dataspace for writing buffer */ + sid2 = H5Screate_simple(SPACE7_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset1", H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Make "none" selection in both disk and memory datasets */ + ret = H5Sselect_none(sid1); + CHECK(ret, FAIL, "H5Sselect_none"); + + ret = H5Sselect_none(sid2); + CHECK(ret, FAIL, "H5Sselect_none"); + + /* Attempt to read "nothing" from disk (before space is allocated) */ + ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Write "nothing" to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Write "nothing" to disk (with a datatype conversion :-) */ + ret = H5Dwrite(dataset, H5T_NATIVE_INT, sid2, sid1, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Write "nothing" to disk (with NULL buffer argument) */ + ret = H5Dwrite(dataset, H5T_NATIVE_INT, sid2, sid1, H5P_DEFAULT, NULL); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read "nothing" from disk (with NULL buffer argument) */ + ret = H5Dread(dataset, H5T_NATIVE_INT, sid2, sid1, H5P_DEFAULT, NULL); + CHECK(ret, FAIL, "H5Dread"); + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + HDfree(wbuf); + HDfree(rbuf); +} /* test_select_none() */ + +/**************************************************************** +** +** test_scalar_select(): Test basic H5S (dataspace) selection code. +** Tests selections on scalar dataspaces +** +****************************************************************/ +static void +test_scalar_select(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1, sid2; /* Dataspace ID */ + hsize_t dims2[] = {SPACE7_DIM1, SPACE7_DIM2}; + hsize_t coord1[SPACE7_RANK]; /* Coordinates for point selection */ + hsize_t start[SPACE7_RANK]; /* Hyperslab start */ + hsize_t count[SPACE7_RANK]; /* Hyperslab block count */ + uint8_t *wbuf_uint8, /* buffer to write to disk */ + rval_uint8, /* value read back in */ + *tbuf_uint8; /* temporary buffer pointer */ + unsigned short *wbuf_ushort, /* another buffer to write to disk */ + rval_ushort, /* value read back in */ + *tbuf_ushort; /* temporary buffer pointer */ + int i, j; /* Counters */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing I/O on Selections in Scalar Dataspaces\n")); + + /* Allocate write & read buffers */ + wbuf_uint8 = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE7_DIM1 * SPACE7_DIM2); + CHECK_PTR(wbuf_uint8, "HDmalloc"); + wbuf_ushort = (unsigned short *)HDmalloc(sizeof(unsigned short) * SPACE7_DIM1 * SPACE7_DIM2); + CHECK_PTR(wbuf_ushort, "HDmalloc"); + + /* Initialize write buffers */ + for (i = 0, tbuf_uint8 = wbuf_uint8, tbuf_ushort = wbuf_ushort; i < SPACE7_DIM1; i++) + for (j = 0; j < SPACE7_DIM2; j++) { + *tbuf_uint8++ = (uint8_t)((i * SPACE7_DIM2) + j); + *tbuf_ushort++ = (unsigned short)((j * SPACE7_DIM2) + i); + } /* end for */ + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset */ + sid1 = H5Screate(H5S_SCALAR); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create dataspace for writing buffer */ + sid2 = H5Screate_simple(SPACE7_RANK, dims2, NULL); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset1", H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Select one element in memory with a point selection */ + coord1[0] = 0; + coord1[1] = 2; + ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)1, (const hsize_t *)&coord1); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Write single point to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf_uint8); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read scalar element from disk */ + ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid1, sid1, H5P_DEFAULT, &rval_uint8); + CHECK(ret, FAIL, "H5Dread"); + + /* Check value read back in */ + if (rval_uint8 != *(wbuf_uint8 + 2)) + TestErrPrintf("Error! rval=%u, should be: *(wbuf+2)=%u\n", (unsigned)rval_uint8, + (unsigned)*(wbuf_uint8 + 2)); + + /* Write single point to disk (with a datatype conversion) */ + ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, H5P_DEFAULT, wbuf_ushort); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read scalar element from disk */ + ret = H5Dread(dataset, H5T_NATIVE_USHORT, sid1, sid1, H5P_DEFAULT, &rval_ushort); + CHECK(ret, FAIL, "H5Dread"); + + /* Check value read back in */ + if (rval_ushort != *(wbuf_ushort + 2)) + TestErrPrintf("Error! rval=%u, should be: *(wbuf+2)=%u\n", (unsigned)rval_ushort, + (unsigned)*(wbuf_ushort + 2)); + + /* Select one element in memory with a hyperslab selection */ + start[0] = 4; + start[1] = 3; + count[0] = 1; + count[1] = 1; + ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Write single hyperslab element to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf_uint8); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read scalar element from disk */ + ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid1, sid1, H5P_DEFAULT, &rval_uint8); + CHECK(ret, FAIL, "H5Dread"); + + /* Check value read back in */ + if (rval_uint8 != *(wbuf_uint8 + (SPACE7_DIM2 * 4) + 3)) + TestErrPrintf("Error! rval=%u, should be: *(wbuf+(SPACE7_DIM2*4)+3)=%u\n", (unsigned)rval_uint8, + (unsigned)*(wbuf_uint8 + (SPACE7_DIM2 * 4) + 3)); + + /* Write single hyperslab element to disk (with a datatype conversion) */ + ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, H5P_DEFAULT, wbuf_ushort); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read scalar element from disk */ + ret = H5Dread(dataset, H5T_NATIVE_USHORT, sid1, sid1, H5P_DEFAULT, &rval_ushort); + CHECK(ret, FAIL, "H5Dread"); + + /* Check value read back in */ + if (rval_ushort != *(wbuf_ushort + (SPACE7_DIM2 * 4) + 3)) + TestErrPrintf("Error! rval=%u, should be: *(wbuf+(SPACE7_DIM2*4)+3)=%u\n", (unsigned)rval_ushort, + (unsigned)*(wbuf_ushort + (SPACE7_DIM2 * 4) + 3)); + + /* Select no elements in memory & file with "none" selections */ + ret = H5Sselect_none(sid1); + CHECK(ret, FAIL, "H5Sselect_none"); + + ret = H5Sselect_none(sid2); + CHECK(ret, FAIL, "H5Sselect_none"); + + /* Write no data to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf_uint8); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Write no data to disk (with a datatype conversion) */ + ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, H5P_DEFAULT, wbuf_ushort); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free memory buffers */ + HDfree(wbuf_uint8); + HDfree(wbuf_ushort); +} /* test_scalar_select() */ + +/**************************************************************** +** +** test_scalar_select2(): Tests selections on scalar dataspace, +** verify H5Sselect_hyperslab and H5Sselect_elements fails for +** scalar dataspace. +** +****************************************************************/ +static void +test_scalar_select2(void) +{ + hid_t sid; /* Dataspace ID */ + hsize_t coord1[1]; /* Coordinates for point selection */ + hsize_t start[1]; /* Hyperslab start */ + hsize_t count[1]; /* Hyperslab block count */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(6, ("Testing Selections in Scalar Dataspaces\n")); + + /* Create dataspace for dataset */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Select one element in memory with a point selection */ + coord1[0] = 0; + H5E_BEGIN_TRY + { + ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)1, (const hsize_t *)&coord1); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Sselect_elements"); + + /* Select one element in memory with a hyperslab selection */ + start[0] = 0; + count[0] = 0; + H5E_BEGIN_TRY + { + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, NULL, count, NULL); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Sselect_hyperslab"); + + /* Select no elements in memory & file with "none" selection */ + ret = H5Sselect_none(sid); + CHECK(ret, FAIL, "H5Sselect_none"); + + /* Select all elements in memory & file with "all" selection */ + ret = H5Sselect_all(sid); + CHECK(ret, FAIL, "H5Sselect_all"); + + /* Close disk dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_scalar_select2() */ + +/**************************************************************** +** +** test_scalar_select3(): Test basic H5S (dataspace) selection code. +** Tests selections on scalar dataspaces in memory +** +****************************************************************/ +static void +test_scalar_select3(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1, sid2; /* Dataspace ID */ + hsize_t dims2[] = {SPACE7_DIM1, SPACE7_DIM2}; + hsize_t coord1[SPACE7_RANK]; /* Coordinates for point selection */ + hsize_t start[SPACE7_RANK]; /* Hyperslab start */ + hsize_t count[SPACE7_RANK]; /* Hyperslab block count */ + uint8_t wval_uint8, /* Value written out */ + rval_uint8; /* Value read in */ + unsigned short wval_ushort, /* Another value written out */ + rval_ushort; /* Another value read in */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing I/O on Selections in Scalar Dataspaces in Memory\n")); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset */ + sid1 = H5Screate_simple(SPACE7_RANK, dims2, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create dataspace for writing buffer */ + sid2 = H5Screate(H5S_SCALAR); + CHECK(sid2, FAIL, "H5Screate_simple"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset1", H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Select one element in file with a point selection */ + coord1[0] = 0; + coord1[1] = 2; + ret = H5Sselect_elements(sid1, H5S_SELECT_SET, (size_t)1, (const hsize_t *)&coord1); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Write single point to disk */ + wval_uint8 = 12; + ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, &wval_uint8); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read scalar element from disk */ + rval_uint8 = 0; + ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, &rval_uint8); + CHECK(ret, FAIL, "H5Dread"); + + /* Check value read back in */ + if (rval_uint8 != wval_uint8) + TestErrPrintf("%u: Error! rval=%u, should be: wval=%u\n", (unsigned)__LINE__, (unsigned)rval_uint8, + (unsigned)wval_uint8); + + /* Write single point to disk (with a datatype conversion) */ + wval_ushort = 23; + ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, H5P_DEFAULT, &wval_ushort); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read scalar element from disk */ + rval_ushort = 0; + ret = H5Dread(dataset, H5T_NATIVE_USHORT, sid2, sid1, H5P_DEFAULT, &rval_ushort); + CHECK(ret, FAIL, "H5Dread"); + + /* Check value read back in */ + if (rval_ushort != wval_ushort) + TestErrPrintf("%u: Error! rval=%u, should be: wval=%u\n", (unsigned)__LINE__, (unsigned)rval_ushort, + (unsigned)wval_ushort); + + /* Select one element in file with a hyperslab selection */ + start[0] = 4; + start[1] = 3; + count[0] = 1; + count[1] = 1; + ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Write single hyperslab element to disk */ + wval_uint8 = 92; + ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, &wval_uint8); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read scalar element from disk */ + rval_uint8 = 0; + ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, &rval_uint8); + CHECK(ret, FAIL, "H5Dread"); + + /* Check value read back in */ + if (rval_uint8 != wval_uint8) + TestErrPrintf("%u: Error! rval=%u, should be: wval=%u\n", (unsigned)__LINE__, (unsigned)rval_uint8, + (unsigned)wval_uint8); + + /* Write single hyperslab element to disk (with a datatype conversion) */ + wval_ushort = 107; + ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, H5P_DEFAULT, &wval_ushort); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read scalar element from disk */ + rval_ushort = 0; + ret = H5Dread(dataset, H5T_NATIVE_USHORT, sid2, sid1, H5P_DEFAULT, &rval_ushort); + CHECK(ret, FAIL, "H5Dread"); + + /* Check value read back in */ + if (rval_ushort != wval_ushort) + TestErrPrintf("%u: Error! rval=%u, should be: wval=%u\n", (unsigned)__LINE__, (unsigned)rval_ushort, + (unsigned)wval_ushort); + + /* Select no elements in memory & file with "none" selections */ + ret = H5Sselect_none(sid1); + CHECK(ret, FAIL, "H5Sselect_none"); + + ret = H5Sselect_none(sid2); + CHECK(ret, FAIL, "H5Sselect_none"); + + /* Write no data to disk */ + ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, &wval_uint8); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Write no data to disk (with a datatype conversion) */ + ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, H5P_DEFAULT, &wval_ushort); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close memory dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_scalar_select3() */ + +/**************************************************************** +** +** test_shape_same(): Tests selections on dataspace, verify that +** "shape same" routine is working correctly. +** +****************************************************************/ +static void +test_shape_same(void) +{ + hid_t all_sid; /* Dataspace ID with "all" selection */ + hid_t none_sid; /* Dataspace ID with "none" selection */ + hid_t single_pt_sid; /* Dataspace ID with single point selection */ + hid_t mult_pt_sid; /* Dataspace ID with multiple point selection */ + hid_t single_hyper_sid; /* Dataspace ID with single block hyperslab selection */ + hid_t single_hyper_all_sid; /* Dataspace ID with single block hyperslab + * selection that is the entire dataspace + */ + hid_t single_hyper_pt_sid; /* Dataspace ID with single block hyperslab + * selection that is the same as the single + * point selection + */ + hid_t regular_hyper_sid; /* Dataspace ID with regular hyperslab selection */ + hid_t irreg_hyper_sid; /* Dataspace ID with irregular hyperslab selection */ + hid_t none_hyper_sid; /* Dataspace ID with "no hyperslabs" selection */ + hid_t scalar_all_sid; /* ID for scalar dataspace with "all" selection */ + hid_t scalar_none_sid; /* ID for scalar dataspace with "none" selection */ + hid_t tmp_sid; /* Temporary dataspace ID */ + hsize_t dims[] = {SPACE9_DIM1, SPACE9_DIM2}; + hsize_t coord1[1][SPACE2_RANK]; /* Coordinates for single point selection */ + hsize_t coord2[SPACE9_DIM2][SPACE9_RANK]; /* Coordinates for multiple point selection */ + hsize_t start[SPACE9_RANK]; /* Hyperslab start */ + hsize_t stride[SPACE9_RANK]; /* Hyperslab stride */ + hsize_t count[SPACE9_RANK]; /* Hyperslab block count */ + hsize_t block[SPACE9_RANK]; /* Hyperslab block size */ + unsigned u, v; /* Local index variables */ + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(6, ("Testing Same Shape Comparisons\n")); + HDassert(SPACE9_DIM2 >= POINT1_NPOINTS); + + /* Create dataspace for "all" selection */ + all_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(all_sid, FAIL, "H5Screate_simple"); + + /* Select entire extent for dataspace */ + ret = H5Sselect_all(all_sid); + CHECK(ret, FAIL, "H5Sselect_all"); + + /* Create dataspace for "none" selection */ + none_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(none_sid, FAIL, "H5Screate_simple"); + + /* Un-Select entire extent for dataspace */ + ret = H5Sselect_none(none_sid); + CHECK(ret, FAIL, "H5Sselect_none"); + + /* Create dataspace for single point selection */ + single_pt_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(single_pt_sid, FAIL, "H5Screate_simple"); + + /* Select sequence of ten points for multiple point selection */ + coord1[0][0] = 2; + coord1[0][1] = 2; + ret = H5Sselect_elements(single_pt_sid, H5S_SELECT_SET, (size_t)1, (const hsize_t *)coord1); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Create dataspace for multiple point selection */ + mult_pt_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(mult_pt_sid, FAIL, "H5Screate_simple"); + + /* Select sequence of ten points for multiple point selection */ + coord2[0][0] = 2; + coord2[0][1] = 2; + coord2[1][0] = 7; + coord2[1][1] = 2; + coord2[2][0] = 1; + coord2[2][1] = 4; + coord2[3][0] = 2; + coord2[3][1] = 6; + coord2[4][0] = 0; + coord2[4][1] = 8; + coord2[5][0] = 3; + coord2[5][1] = 2; + coord2[6][0] = 4; + coord2[6][1] = 4; + coord2[7][0] = 1; + coord2[7][1] = 0; + coord2[8][0] = 5; + coord2[8][1] = 1; + coord2[9][0] = 9; + coord2[9][1] = 3; + ret = H5Sselect_elements(mult_pt_sid, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord2); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Create dataspace for single hyperslab selection */ + single_hyper_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(single_hyper_sid, FAIL, "H5Screate_simple"); + + /* Select 10x10 hyperslab for single hyperslab selection */ + start[0] = 1; + start[1] = 1; + stride[0] = 1; + stride[1] = 1; + count[0] = 1; + count[1] = 1; + block[0] = (SPACE9_DIM1 - 2); + block[1] = (SPACE9_DIM2 - 2); + ret = H5Sselect_hyperslab(single_hyper_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create dataspace for single hyperslab selection with entire extent selected */ + single_hyper_all_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(single_hyper_all_sid, FAIL, "H5Screate_simple"); + + /* Select entire extent for hyperslab selection */ + start[0] = 0; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 1; + count[1] = 1; + block[0] = SPACE9_DIM1; + block[1] = SPACE9_DIM2; + ret = H5Sselect_hyperslab(single_hyper_all_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create dataspace for single hyperslab selection with single point selected */ + single_hyper_pt_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(single_hyper_pt_sid, FAIL, "H5Screate_simple"); + + /* Select entire extent for hyperslab selection */ + start[0] = 2; + start[1] = 2; + stride[0] = 1; + stride[1] = 1; + count[0] = 1; + count[1] = 1; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(single_hyper_pt_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create dataspace for regular hyperslab selection */ + regular_hyper_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(regular_hyper_sid, FAIL, "H5Screate_simple"); + + /* Select regular, strided hyperslab selection */ + start[0] = 2; + start[1] = 2; + stride[0] = 2; + stride[1] = 2; + count[0] = 5; + count[1] = 2; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(regular_hyper_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create dataspace for irregular hyperslab selection */ + irreg_hyper_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(irreg_hyper_sid, FAIL, "H5Screate_simple"); + + /* Create irregular hyperslab selection by OR'ing two blocks together */ + start[0] = 2; + start[1] = 2; + stride[0] = 1; + stride[1] = 1; + count[0] = 1; + count[1] = 1; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(irreg_hyper_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 4; + start[1] = 4; + stride[0] = 1; + stride[1] = 1; + count[0] = 1; + count[1] = 1; + block[0] = 3; + block[1] = 3; + ret = H5Sselect_hyperslab(irreg_hyper_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create dataspace for "no" hyperslab selection */ + none_hyper_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(none_hyper_sid, FAIL, "H5Screate_simple"); + + /* Create "no" hyperslab selection by XOR'ing same blocks together */ + start[0] = 2; + start[1] = 2; + stride[0] = 1; + stride[1] = 1; + count[0] = 1; + count[1] = 1; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(none_hyper_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + ret = H5Sselect_hyperslab(none_hyper_sid, H5S_SELECT_XOR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create scalar dataspace for "all" selection */ + scalar_all_sid = H5Screate(H5S_SCALAR); + CHECK(scalar_all_sid, FAIL, "H5Screate"); + + /* Create scalar dataspace for "none" selection */ + scalar_none_sid = H5Screate(H5S_SCALAR); + CHECK(scalar_none_sid, FAIL, "H5Screate"); + + /* Un-Select entire extent for dataspace */ + ret = H5Sselect_none(scalar_none_sid); + CHECK(ret, FAIL, "H5Sselect_none"); + + /* Compare "all" selection to all the selections created */ + /* Compare against itself */ + check = H5Sselect_shape_same(all_sid, all_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Compare against copy of itself */ + tmp_sid = H5Scopy(all_sid); + CHECK(tmp_sid, FAIL, "H5Scopy"); + + check = H5Sselect_shape_same(all_sid, tmp_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + ret = H5Sclose(tmp_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Compare against "none" selection */ + check = H5Sselect_shape_same(all_sid, none_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against single point selection */ + check = H5Sselect_shape_same(all_sid, single_pt_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against multiple point selection */ + check = H5Sselect_shape_same(all_sid, mult_pt_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "plain" single hyperslab selection */ + check = H5Sselect_shape_same(all_sid, single_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "all" single hyperslab selection */ + check = H5Sselect_shape_same(all_sid, single_hyper_all_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Compare against "single point" single hyperslab selection */ + check = H5Sselect_shape_same(all_sid, single_hyper_pt_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against regular, strided hyperslab selection */ + check = H5Sselect_shape_same(all_sid, regular_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against irregular hyperslab selection */ + check = H5Sselect_shape_same(all_sid, irreg_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "no" hyperslab selection */ + check = H5Sselect_shape_same(all_sid, none_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against scalar "all" hyperslab selection */ + check = H5Sselect_shape_same(all_sid, scalar_all_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against scalar "none" hyperslab selection */ + check = H5Sselect_shape_same(all_sid, scalar_none_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare "none" selection to all the selections created */ + /* Compare against itself */ + check = H5Sselect_shape_same(none_sid, none_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Compare against copy of itself */ + tmp_sid = H5Scopy(none_sid); + CHECK(tmp_sid, FAIL, "H5Scopy"); + + check = H5Sselect_shape_same(none_sid, tmp_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + ret = H5Sclose(tmp_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Compare against "all" selection */ + check = H5Sselect_shape_same(none_sid, all_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against single point selection */ + check = H5Sselect_shape_same(none_sid, single_pt_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against multiple point selection */ + check = H5Sselect_shape_same(none_sid, mult_pt_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "plain" single hyperslab selection */ + check = H5Sselect_shape_same(none_sid, single_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "all" single hyperslab selection */ + check = H5Sselect_shape_same(none_sid, single_hyper_all_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "single point" single hyperslab selection */ + check = H5Sselect_shape_same(none_sid, single_hyper_pt_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against regular, strided hyperslab selection */ + check = H5Sselect_shape_same(none_sid, regular_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against irregular hyperslab selection */ + check = H5Sselect_shape_same(none_sid, irreg_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "no" hyperslab selection */ + check = H5Sselect_shape_same(none_sid, none_hyper_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Compare against scalar "all" hyperslab selection */ + check = H5Sselect_shape_same(none_sid, scalar_all_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against scalar "none" hyperslab selection */ + check = H5Sselect_shape_same(none_sid, scalar_none_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Compare single point selection to all the selections created */ + /* Compare against itself */ + check = H5Sselect_shape_same(single_pt_sid, single_pt_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Compare against copy of itself */ + tmp_sid = H5Scopy(single_pt_sid); + CHECK(tmp_sid, FAIL, "H5Scopy"); + + check = H5Sselect_shape_same(single_pt_sid, tmp_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + ret = H5Sclose(tmp_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Compare against "all" selection */ + check = H5Sselect_shape_same(single_pt_sid, all_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "none" selection */ + check = H5Sselect_shape_same(single_pt_sid, none_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against multiple point selection */ + check = H5Sselect_shape_same(single_pt_sid, mult_pt_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "plain" single hyperslab selection */ + check = H5Sselect_shape_same(single_pt_sid, single_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "all" single hyperslab selection */ + check = H5Sselect_shape_same(single_pt_sid, single_hyper_all_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "single point" single hyperslab selection */ + check = H5Sselect_shape_same(single_pt_sid, single_hyper_pt_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Compare against regular, strided hyperslab selection */ + check = H5Sselect_shape_same(single_pt_sid, regular_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against irregular hyperslab selection */ + check = H5Sselect_shape_same(single_pt_sid, irreg_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "no" hyperslab selection */ + check = H5Sselect_shape_same(single_pt_sid, none_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against scalar "all" hyperslab selection */ + check = H5Sselect_shape_same(single_pt_sid, scalar_all_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Compare against scalar "none" hyperslab selection */ + check = H5Sselect_shape_same(single_pt_sid, scalar_none_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare multiple point selection to all the selections created */ + /* Compare against itself */ + check = H5Sselect_shape_same(mult_pt_sid, mult_pt_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Compare against copy of itself */ + tmp_sid = H5Scopy(mult_pt_sid); + CHECK(tmp_sid, FAIL, "H5Scopy"); + + check = H5Sselect_shape_same(mult_pt_sid, tmp_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + ret = H5Sclose(tmp_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Compare against "all" selection */ + check = H5Sselect_shape_same(mult_pt_sid, all_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "none" selection */ + check = H5Sselect_shape_same(mult_pt_sid, none_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against single point selection */ + check = H5Sselect_shape_same(mult_pt_sid, single_pt_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "plain" single hyperslab selection */ + check = H5Sselect_shape_same(mult_pt_sid, single_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "all" single hyperslab selection */ + check = H5Sselect_shape_same(mult_pt_sid, single_hyper_all_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "single point" single hyperslab selection */ + check = H5Sselect_shape_same(mult_pt_sid, single_hyper_pt_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against regular, strided hyperslab selection */ + check = H5Sselect_shape_same(mult_pt_sid, regular_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against irregular hyperslab selection */ + check = H5Sselect_shape_same(mult_pt_sid, irreg_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "no" hyperslab selection */ + check = H5Sselect_shape_same(mult_pt_sid, none_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against scalar "all" hyperslab selection */ + check = H5Sselect_shape_same(mult_pt_sid, scalar_all_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against scalar "none" hyperslab selection */ + check = H5Sselect_shape_same(mult_pt_sid, scalar_none_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare single "normal" hyperslab selection to all the selections created */ + /* Compare against itself */ + check = H5Sselect_shape_same(single_hyper_sid, single_hyper_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Compare against copy of itself */ + tmp_sid = H5Scopy(single_hyper_sid); + CHECK(tmp_sid, FAIL, "H5Scopy"); + + check = H5Sselect_shape_same(single_hyper_sid, tmp_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + ret = H5Sclose(tmp_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Compare against "all" selection */ + check = H5Sselect_shape_same(single_hyper_sid, all_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "none" selection */ + check = H5Sselect_shape_same(single_hyper_sid, none_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against single point selection */ + check = H5Sselect_shape_same(single_hyper_sid, single_pt_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against multiple point selection */ + check = H5Sselect_shape_same(single_hyper_sid, mult_pt_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "all" single hyperslab selection */ + check = H5Sselect_shape_same(single_hyper_sid, single_hyper_all_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "single point" single hyperslab selection */ + check = H5Sselect_shape_same(single_hyper_sid, single_hyper_pt_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against regular, strided hyperslab selection */ + check = H5Sselect_shape_same(single_hyper_sid, regular_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against irregular hyperslab selection */ + check = H5Sselect_shape_same(single_hyper_sid, irreg_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "no" hyperslab selection */ + check = H5Sselect_shape_same(single_hyper_sid, none_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + +#ifdef NOT_YET + /* In theory, these two selections are the same shape, but the + * H5Sselect_shape_same() routine is just not this sophisticated yet and it + * would take too much effort to make this work. The worst case is that the + * non-optimized chunk mapping routines will be invoked instead of the more + * optimized routines, so this only hurts performance, not correctness + */ + /* Construct point selection which matches "plain" hyperslab selection */ + /* Create dataspace for point selection */ + tmp_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(tmp_sid, FAIL, "H5Screate_simple"); + + /* Select sequence of points for point selection */ + for (u = 1; u < (SPACE9_DIM1 - 1); u++) { + for (v = 1; v < (SPACE9_DIM2 - 1); v++) { + coord2[v - 1][0] = u; + coord2[v - 1][1] = v; + } /* end for */ + + ret = H5Sselect_elements(tmp_sid, H5S_SELECT_APPEND, (SPACE9_DIM2 - 2), coord2); + CHECK(ret, FAIL, "H5Sselect_elements"); + } /* end for */ + + /* Compare against hyperslab selection */ + check = H5Sselect_shape_same(single_hyper_sid, tmp_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + ret = H5Sclose(tmp_sid); + CHECK(ret, FAIL, "H5Sclose"); +#endif /* NOT_YET */ + + /* Construct hyperslab selection which matches "plain" hyperslab selection */ + /* Create dataspace for hyperslab selection */ + tmp_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(tmp_sid, FAIL, "H5Screate_simple"); + + /* Un-select entire extent */ + ret = H5Sselect_none(tmp_sid); + CHECK(ret, FAIL, "H5Sselect_none"); + + /* Select sequence of rows for hyperslab selection */ + for (u = 1; u < (SPACE9_DIM1 - 1); u++) { + start[0] = u; + start[1] = 1; + stride[0] = 1; + stride[1] = 1; + count[0] = 1; + count[1] = 1; + block[0] = 1; + block[1] = (SPACE9_DIM2 - 2); + ret = H5Sselect_hyperslab(tmp_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + } /* end for */ + + /* Compare against hyperslab selection */ + check = H5Sselect_shape_same(single_hyper_sid, tmp_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + ret = H5Sclose(tmp_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Compare against scalar "all" hyperslab selection */ + check = H5Sselect_shape_same(single_hyper_sid, scalar_all_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against scalar "none" hyperslab selection */ + check = H5Sselect_shape_same(single_hyper_sid, scalar_none_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare single "all" hyperslab selection to all the selections created */ + /* Compare against itself */ + check = H5Sselect_shape_same(single_hyper_all_sid, single_hyper_all_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Compare against copy of itself */ + tmp_sid = H5Scopy(single_hyper_all_sid); + CHECK(tmp_sid, FAIL, "H5Scopy"); + + check = H5Sselect_shape_same(single_hyper_all_sid, tmp_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + ret = H5Sclose(tmp_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Compare against "all" selection */ + check = H5Sselect_shape_same(single_hyper_all_sid, all_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Compare against "none" selection */ + check = H5Sselect_shape_same(single_hyper_all_sid, none_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against single point selection */ + check = H5Sselect_shape_same(single_hyper_all_sid, single_pt_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against multiple point selection */ + check = H5Sselect_shape_same(single_hyper_all_sid, mult_pt_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "plain" single hyperslab selection */ + check = H5Sselect_shape_same(single_hyper_all_sid, single_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "single point" single hyperslab selection */ + check = H5Sselect_shape_same(single_hyper_all_sid, single_hyper_pt_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against regular, strided hyperslab selection */ + check = H5Sselect_shape_same(single_hyper_all_sid, regular_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against irregular hyperslab selection */ + check = H5Sselect_shape_same(single_hyper_all_sid, irreg_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "no" hyperslab selection */ + check = H5Sselect_shape_same(single_hyper_all_sid, none_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + +#ifdef NOT_YET + /* In theory, these two selections are the same shape, but the + * H5S_select_shape_same() routine is just not this sophisticated yet and it + * would take too much effort to make this work. The worst case is that the + * non-optimized chunk mapping routines will be invoked instead of the more + * optimized routines, so this only hurts performance, not correctness + */ + /* Construct point selection which matches "all" hyperslab selection */ + /* Create dataspace for point selection */ + tmp_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(tmp_sid, FAIL, "H5Screate_simple"); + + /* Select sequence of points for point selection */ + for (u = 0; u < SPACE9_DIM1; u++) { + for (v = 0; v < SPACE9_DIM2; v++) { + coord2[v][0] = u; + coord2[v][1] = v; + } /* end for */ + ret = H5Sselect_elements(tmp_sid, H5S_SELECT_APPEND, SPACE9_DIM2, coord2); + CHECK(ret, FAIL, "H5Sselect_elements"); + } /* end for */ + + /* Compare against hyperslab selection */ + check = H5Sselect_shape_same(single_hyper_all_sid, tmp_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + ret = H5Sclose(tmp_sid); + CHECK(ret, FAIL, "H5Sclose"); +#endif /* NOT_YET */ + + /* Construct hyperslab selection which matches "all" hyperslab selection */ + /* Create dataspace for hyperslab selection */ + tmp_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(tmp_sid, FAIL, "H5Screate_simple"); + + /* Un-select entire extent */ + ret = H5Sselect_none(tmp_sid); + CHECK(ret, FAIL, "H5Sselect_none"); + + /* Select sequence of rows for hyperslab selection */ + for (u = 0; u < SPACE9_DIM2; u++) { + start[0] = u; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 1; + count[1] = 1; + block[0] = 1; + block[1] = SPACE9_DIM2; + ret = H5Sselect_hyperslab(tmp_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + } /* end for */ + + /* Compare against hyperslab selection */ + check = H5Sselect_shape_same(single_hyper_all_sid, tmp_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + ret = H5Sclose(tmp_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Compare against scalar "all" hyperslab selection */ + check = H5Sselect_shape_same(single_hyper_all_sid, scalar_all_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against scalar "none" hyperslab selection */ + check = H5Sselect_shape_same(single_hyper_all_sid, scalar_none_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare single "point" hyperslab selection to all the selections created */ + /* Compare against itself */ + check = H5Sselect_shape_same(single_hyper_pt_sid, single_hyper_pt_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Compare against copy of itself */ + tmp_sid = H5Scopy(single_hyper_pt_sid); + CHECK(tmp_sid, FAIL, "H5Scopy"); + + check = H5Sselect_shape_same(single_hyper_pt_sid, tmp_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + ret = H5Sclose(tmp_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Compare against "all" selection */ + check = H5Sselect_shape_same(single_hyper_pt_sid, all_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "none" selection */ + check = H5Sselect_shape_same(single_hyper_pt_sid, none_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against single point selection */ + check = H5Sselect_shape_same(single_hyper_pt_sid, single_pt_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Compare against multiple point selection */ + check = H5Sselect_shape_same(single_hyper_pt_sid, mult_pt_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "plain" single hyperslab selection */ + check = H5Sselect_shape_same(single_hyper_pt_sid, single_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "all" single hyperslab selection */ + check = H5Sselect_shape_same(single_hyper_pt_sid, single_hyper_all_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against regular, strided hyperslab selection */ + check = H5Sselect_shape_same(single_hyper_pt_sid, regular_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against irregular hyperslab selection */ + check = H5Sselect_shape_same(single_hyper_pt_sid, irreg_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "no" hyperslab selection */ + check = H5Sselect_shape_same(single_hyper_pt_sid, none_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against scalar "all" hyperslab selection */ + check = H5Sselect_shape_same(single_hyper_pt_sid, scalar_all_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Compare against scalar "none" hyperslab selection */ + check = H5Sselect_shape_same(single_hyper_pt_sid, scalar_none_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare regular, strided hyperslab selection to all the selections created */ + /* Compare against itself */ + check = H5Sselect_shape_same(regular_hyper_sid, regular_hyper_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Compare against copy of itself */ + tmp_sid = H5Scopy(regular_hyper_sid); + CHECK(tmp_sid, FAIL, "H5Scopy"); + + check = H5Sselect_shape_same(regular_hyper_sid, tmp_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + ret = H5Sclose(tmp_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Compare against "all" selection */ + check = H5Sselect_shape_same(regular_hyper_sid, all_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "none" selection */ + check = H5Sselect_shape_same(regular_hyper_sid, none_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against single point selection */ + check = H5Sselect_shape_same(regular_hyper_sid, single_pt_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against multiple point selection */ + check = H5Sselect_shape_same(regular_hyper_sid, mult_pt_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "plain" single hyperslab selection */ + check = H5Sselect_shape_same(regular_hyper_sid, single_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "all" single hyperslab selection */ + check = H5Sselect_shape_same(regular_hyper_sid, single_hyper_all_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "single point" single hyperslab selection */ + check = H5Sselect_shape_same(regular_hyper_sid, single_hyper_pt_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against irregular hyperslab selection */ + check = H5Sselect_shape_same(regular_hyper_sid, irreg_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "no" hyperslab selection */ + check = H5Sselect_shape_same(regular_hyper_sid, none_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Construct point selection which matches regular, strided hyperslab selection */ + /* Create dataspace for point selection */ + tmp_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(tmp_sid, FAIL, "H5Screate_simple"); + + /* Select sequence of points for point selection */ + for (u = 2; u < 11; u += 2) { + for (v = 0; v < 2; v++) { + coord2[v][0] = u; + coord2[v][1] = (v * 2) + 2; + } /* end for */ + ret = H5Sselect_elements(tmp_sid, H5S_SELECT_APPEND, (size_t)2, (const hsize_t *)coord2); + CHECK(ret, FAIL, "H5Sselect_elements"); + } /* end for */ + + /* Compare against hyperslab selection */ + check = H5Sselect_shape_same(regular_hyper_sid, tmp_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + ret = H5Sclose(tmp_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Construct hyperslab selection which matches regular, strided hyperslab selection */ + /* Create dataspace for hyperslab selection */ + tmp_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(tmp_sid, FAIL, "H5Screate_simple"); + + /* Un-select entire extent */ + ret = H5Sselect_none(tmp_sid); + CHECK(ret, FAIL, "H5Sselect_none"); + + /* Select sequence of rows for hyperslab selection */ + for (u = 2; u < 11; u += 2) { + start[0] = u; + start[1] = 3; + stride[0] = 1; + stride[1] = 2; + count[0] = 1; + count[1] = 2; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(tmp_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + } /* end for */ + + /* Compare against hyperslab selection */ + check = H5Sselect_shape_same(regular_hyper_sid, tmp_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + ret = H5Sclose(tmp_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Construct regular hyperslab selection with an offset which matches regular, strided hyperslab selection + */ + /* Create dataspace for hyperslab selection */ + tmp_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(tmp_sid, FAIL, "H5Screate_simple"); + + /* Select regular, strided hyperslab selection at an offset */ + start[0] = 1; + start[1] = 1; + stride[0] = 2; + stride[1] = 2; + count[0] = 5; + count[1] = 2; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(tmp_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Compare against hyperslab selection */ + check = H5Sselect_shape_same(regular_hyper_sid, tmp_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + ret = H5Sclose(tmp_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Compare against scalar "all" hyperslab selection */ + check = H5Sselect_shape_same(regular_hyper_sid, scalar_all_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against scalar "none" hyperslab selection */ + check = H5Sselect_shape_same(regular_hyper_sid, scalar_none_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare irregular hyperslab selection to all the selections created */ + /* Compare against itself */ + check = H5Sselect_shape_same(irreg_hyper_sid, irreg_hyper_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Compare against copy of itself */ + tmp_sid = H5Scopy(irreg_hyper_sid); + CHECK(tmp_sid, FAIL, "H5Scopy"); + + check = H5Sselect_shape_same(irreg_hyper_sid, tmp_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + ret = H5Sclose(tmp_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Compare against "all" selection */ + check = H5Sselect_shape_same(irreg_hyper_sid, all_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "none" selection */ + check = H5Sselect_shape_same(irreg_hyper_sid, none_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against single point selection */ + check = H5Sselect_shape_same(irreg_hyper_sid, single_pt_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against multiple point selection */ + check = H5Sselect_shape_same(irreg_hyper_sid, mult_pt_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "plain" single hyperslab selection */ + check = H5Sselect_shape_same(irreg_hyper_sid, single_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "all" single hyperslab selection */ + check = H5Sselect_shape_same(irreg_hyper_sid, single_hyper_all_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "single point" single hyperslab selection */ + check = H5Sselect_shape_same(irreg_hyper_sid, single_hyper_pt_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against regular, strided hyperslab selection */ + check = H5Sselect_shape_same(irreg_hyper_sid, regular_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "no" hyperslab selection */ + check = H5Sselect_shape_same(irreg_hyper_sid, none_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Construct hyperslab selection which matches irregular hyperslab selection */ + /* Create dataspace for hyperslab selection */ + tmp_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(tmp_sid, FAIL, "H5Screate_simple"); + + start[0] = 2; + start[1] = 2; + stride[0] = 1; + stride[1] = 1; + count[0] = 1; + count[1] = 1; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(tmp_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Select sequence of columns for hyperslab selection */ + for (u = 0; u < 3; u++) { + start[0] = 4; + start[1] = u + 4; + stride[0] = 1; + stride[1] = 1; + count[0] = 1; + count[1] = 1; + block[0] = 3; + block[1] = 1; + ret = H5Sselect_hyperslab(tmp_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + } /* end for */ + + /* Compare against hyperslab selection */ + check = H5Sselect_shape_same(irreg_hyper_sid, tmp_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + ret = H5Sclose(tmp_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Compare against scalar "all" hyperslab selection */ + check = H5Sselect_shape_same(irreg_hyper_sid, scalar_all_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against scalar "none" hyperslab selection */ + check = H5Sselect_shape_same(irreg_hyper_sid, scalar_none_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare scalar "all" dataspace with all selections created */ + + /* Compare against itself */ + check = H5Sselect_shape_same(scalar_all_sid, scalar_all_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Compare against copy of itself */ + tmp_sid = H5Scopy(scalar_all_sid); + CHECK(tmp_sid, FAIL, "H5Scopy"); + + check = H5Sselect_shape_same(scalar_all_sid, tmp_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + ret = H5Sclose(tmp_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Compare against "all" selection */ + check = H5Sselect_shape_same(scalar_all_sid, all_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "none" selection */ + check = H5Sselect_shape_same(scalar_all_sid, none_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against single point selection */ + check = H5Sselect_shape_same(scalar_all_sid, single_pt_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Compare against multiple point selection */ + check = H5Sselect_shape_same(scalar_all_sid, mult_pt_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "plain" single hyperslab selection */ + check = H5Sselect_shape_same(scalar_all_sid, single_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "all" single hyperslab selection */ + check = H5Sselect_shape_same(scalar_all_sid, single_hyper_all_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "single point" single hyperslab selection */ + check = H5Sselect_shape_same(scalar_all_sid, single_hyper_pt_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Compare against regular, strided hyperslab selection */ + check = H5Sselect_shape_same(scalar_all_sid, regular_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against irregular hyperslab selection */ + check = H5Sselect_shape_same(scalar_all_sid, irreg_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "no" hyperslab selection */ + check = H5Sselect_shape_same(scalar_all_sid, none_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against scalar "none" hyperslab selection */ + check = H5Sselect_shape_same(scalar_all_sid, scalar_none_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare scalar "none" dataspace with all selections created */ + + /* Compare against itself */ + check = H5Sselect_shape_same(scalar_none_sid, scalar_none_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Compare against copy of itself */ + tmp_sid = H5Scopy(scalar_none_sid); + CHECK(tmp_sid, FAIL, "H5Scopy"); + + check = H5Sselect_shape_same(scalar_none_sid, tmp_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + ret = H5Sclose(tmp_sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Compare against "all" selection */ + check = H5Sselect_shape_same(scalar_none_sid, all_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "none" selection */ + check = H5Sselect_shape_same(scalar_none_sid, none_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Compare against single point selection */ + check = H5Sselect_shape_same(scalar_none_sid, single_pt_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against multiple point selection */ + check = H5Sselect_shape_same(scalar_none_sid, mult_pt_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "plain" single hyperslab selection */ + check = H5Sselect_shape_same(scalar_none_sid, single_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "all" single hyperslab selection */ + check = H5Sselect_shape_same(scalar_none_sid, single_hyper_all_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "single point" single hyperslab selection */ + check = H5Sselect_shape_same(scalar_none_sid, single_hyper_pt_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against regular, strided hyperslab selection */ + check = H5Sselect_shape_same(scalar_none_sid, regular_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against irregular hyperslab selection */ + check = H5Sselect_shape_same(scalar_none_sid, irreg_hyper_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "no" hyperslab selection */ + check = H5Sselect_shape_same(scalar_none_sid, none_hyper_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Compare against scalar "all" hyperslab selection */ + check = H5Sselect_shape_same(scalar_none_sid, scalar_all_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Close dataspaces */ + ret = H5Sclose(all_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(none_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(single_pt_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(mult_pt_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(single_hyper_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(single_hyper_all_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(single_hyper_pt_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(regular_hyper_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(irreg_hyper_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(none_hyper_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(scalar_all_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(scalar_none_sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_shape_same() */ + +/**************************************************************** +** +** test_shape_same_dr__smoke_check_1(): +** +** Create a square, 2-D dataspace (10 X 10), and select +** all of it. +** +** Similarly, create nine, 3-D dataspaces (10 X 10 X 10), +** and select (10 X 10 X 1) hyperslabs in each, three with +** the slab parallel to the xy plane, three parallel to the +** xz plane, and three parallel to the yz plane. +** +** Assuming that z is the fastest changing dimension, +** H5Sselect_shape_same() should return TRUE when comparing +** the full 2-D space against any hyperslab parallel to the +** yz plane in the 3-D space, and FALSE when comparing the +** full 2-D space against the other two hyperslabs. +** +** Also create two additional 3-D dataspaces (10 X 10 X 10), +** and select a (10 X 10 X 2) hyperslab parallel to the yz +** axis in one of them, and two parallel (10 X 10 X 1) hyper +** slabs parallel to the yz axis in the other. +** H5Sselect_shape_same() should return FALSE when comparing +** each to the 2-D selection. +** +****************************************************************/ +static void +test_shape_same_dr__smoke_check_1(void) +{ + hid_t small_square_sid; + hid_t small_cube_xy_slice_0_sid; + hid_t small_cube_xy_slice_1_sid; + hid_t small_cube_xy_slice_2_sid; + hid_t small_cube_xz_slice_0_sid; + hid_t small_cube_xz_slice_1_sid; + hid_t small_cube_xz_slice_2_sid; + hid_t small_cube_yz_slice_0_sid; + hid_t small_cube_yz_slice_1_sid; + hid_t small_cube_yz_slice_2_sid; + hid_t small_cube_yz_slice_3_sid; + hid_t small_cube_yz_slice_4_sid; + hsize_t small_cube_dims[] = {10, 10, 10}; + hsize_t start[3]; + hsize_t stride[3]; + hsize_t count[3]; + hsize_t block[3]; + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ + + MESSAGE(7, (" Smoke check 1: Slices through a cube.\n")); + + /* Create the 10 x 10 dataspace */ + small_square_sid = H5Screate_simple(2, small_cube_dims, NULL); + CHECK(small_square_sid, FAIL, "H5Screate_simple"); + + /* Create the 10 X 10 X 10 dataspaces for the hyperslab parallel to the xy axis */ + small_cube_xy_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_xy_slice_0_sid, FAIL, "H5Screate_simple"); + + small_cube_xy_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_xy_slice_1_sid, FAIL, "H5Screate_simple"); + + small_cube_xy_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_xy_slice_2_sid, FAIL, "H5Screate_simple"); + + start[0] = 0; /* x */ + start[1] = 0; /* y */ + start[2] = 0; /* z */ + + /* stride is a bit silly here, since we are only selecting a single */ + /* contiguous plane, but include it anyway, with values large enough */ + /* to ensure that we will only get the single block selected. */ + stride[0] = 20; /* x */ + stride[1] = 20; /* y */ + stride[2] = 20; /* z */ + + count[0] = 1; /* x */ + count[1] = 1; /* y */ + count[2] = 1; /* z */ + + block[0] = 10; /* x */ + block[1] = 10; /* y */ + block[2] = 1; /* z */ + ret = H5Sselect_hyperslab(small_cube_xy_slice_0_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[2] = 5; + ret = H5Sselect_hyperslab(small_cube_xy_slice_1_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[2] = 9; + ret = H5Sselect_hyperslab(small_cube_xy_slice_2_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create the 10 X 10 X 10 dataspaces for the hyperslab parallel to the xz axis */ + small_cube_xz_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_xz_slice_0_sid, FAIL, "H5Screate_simple"); + + small_cube_xz_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_xz_slice_1_sid, FAIL, "H5Screate_simple"); + + small_cube_xz_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_xz_slice_2_sid, FAIL, "H5Screate_simple"); + + start[0] = 0; /* x */ + start[1] = 0; /* y */ + start[2] = 0; /* z */ + + /* stride is a bit silly here, since we are only selecting a single */ + /* contiguous chunk, but include it anyway, with values large enough */ + /* to ensure that we will only get the single chunk. */ + stride[0] = 20; /* x */ + stride[1] = 20; /* y */ + stride[2] = 20; /* z */ + + count[0] = 1; /* x */ + count[1] = 1; /* y */ + count[2] = 1; /* z */ + + block[0] = 10; /* x */ + block[1] = 1; /* y */ + block[2] = 10; /* z */ + ret = H5Sselect_hyperslab(small_cube_xz_slice_0_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[1] = 4; + ret = H5Sselect_hyperslab(small_cube_xz_slice_1_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[1] = 9; + ret = H5Sselect_hyperslab(small_cube_xz_slice_2_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create the 10 X 10 X 10 dataspaces for the hyperslabs parallel to the yz axis */ + small_cube_yz_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_yz_slice_0_sid, FAIL, "H5Screate_simple"); + + small_cube_yz_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_yz_slice_1_sid, FAIL, "H5Screate_simple"); + + small_cube_yz_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_yz_slice_2_sid, FAIL, "H5Screate_simple"); + + small_cube_yz_slice_3_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_yz_slice_3_sid, FAIL, "H5Screate_simple"); + + small_cube_yz_slice_4_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_yz_slice_4_sid, FAIL, "H5Screate_simple"); + + start[0] = 0; /* x */ + start[1] = 0; /* y */ + start[2] = 0; /* z */ + + /* stride is a bit silly here, since we are only selecting a single */ + /* contiguous chunk, but include it anyway, with values large enough */ + /* to ensure that we will only get the single chunk. */ + stride[0] = 20; /* x */ + stride[1] = 20; /* y */ + stride[2] = 20; /* z */ + + count[0] = 1; /* x */ + count[1] = 1; /* y */ + count[2] = 1; /* z */ + + block[0] = 1; /* x */ + block[1] = 10; /* y */ + block[2] = 10; /* z */ + + ret = H5Sselect_hyperslab(small_cube_yz_slice_0_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 4; + ret = H5Sselect_hyperslab(small_cube_yz_slice_1_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 9; + ret = H5Sselect_hyperslab(small_cube_yz_slice_2_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 4; + block[0] = 2; + ret = H5Sselect_hyperslab(small_cube_yz_slice_3_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 3; + block[0] = 1; + ret = H5Sselect_hyperslab(small_cube_yz_slice_4_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 6; + ret = H5Sselect_hyperslab(small_cube_yz_slice_4_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* setup is done -- run the tests: */ + + /* Compare against "xy" selection */ + check = H5Sselect_shape_same(small_cube_xy_slice_0_sid, small_square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(small_cube_xy_slice_1_sid, small_square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(small_cube_xy_slice_2_sid, small_square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "xz" selection */ + check = H5Sselect_shape_same(small_cube_xz_slice_0_sid, small_square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(small_cube_xz_slice_1_sid, small_square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(small_cube_xz_slice_2_sid, small_square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "yz" selection */ + check = H5Sselect_shape_same(small_cube_yz_slice_0_sid, small_square_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(small_cube_yz_slice_1_sid, small_square_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(small_cube_yz_slice_2_sid, small_square_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(small_cube_yz_slice_3_sid, small_square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(small_cube_yz_slice_4_sid, small_square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Close dataspaces */ + ret = H5Sclose(small_square_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_xy_slice_0_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_xy_slice_1_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_xy_slice_2_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_xz_slice_0_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_xz_slice_1_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_xz_slice_2_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_yz_slice_0_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_yz_slice_1_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_yz_slice_2_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_yz_slice_3_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_yz_slice_4_sid); + CHECK(ret, FAIL, "H5Sclose"); + +} /* test_shape_same_dr__smoke_check_1() */ + +/**************************************************************** +** +** test_shape_same_dr__smoke_check_2(): +** +** Create a square, 2-D dataspace (10 X 10), and select +** a "checker board" hyperslab as follows: +** +** * * - - * * - - * * +** * * - - * * - - * * +** - - * * - - * * - - +** - - * * - - * * - - +** * * - - * * - - * * +** * * - - * * - - * * +** - - * * - - * * - - +** - - * * - - * * - - +** * * - - * * - - * * +** * * - - * * - - * * +** +** where asterisks indicate selected elements, and dashes +** indicate unselected elements. +** +** Similarly, create nine, 3-D dataspaces (10 X 10 X 10), +** and select similar (10 X 10 X 1) checker board hyper +** slabs in each, three with the slab parallel to the xy +** plane, three parallel to the xz plane, and three parallel +** to the yz plane. +** +** Assuming that z is the fastest changing dimension, +** H5Sselect_shape_same() should return TRUE when comparing +** the 2-D space checker board selection against a checker +** board hyperslab parallel to the yz plane in the 3-D +** space, and FALSE when comparing the 2-D checkerboard +** selection against two hyperslabs parallel to the xy +** or xz planes. +** +** Also create an additional 3-D dataspaces (10 X 10 X 10), +** and select a checker board parallel with the yz axis, +** save with some squares being on different planes. +** H5Sselect_shape_same() should return FALSE when +** comparing this selection to the 2-D selection. +** +****************************************************************/ +static void +test_shape_same_dr__smoke_check_2(void) +{ + hid_t small_square_sid; + hid_t small_cube_xy_slice_0_sid; + hid_t small_cube_xy_slice_1_sid; + hid_t small_cube_xy_slice_2_sid; + hid_t small_cube_xz_slice_0_sid; + hid_t small_cube_xz_slice_1_sid; + hid_t small_cube_xz_slice_2_sid; + hid_t small_cube_yz_slice_0_sid; + hid_t small_cube_yz_slice_1_sid; + hid_t small_cube_yz_slice_2_sid; + hid_t small_cube_yz_slice_3_sid; + hsize_t small_cube_dims[] = {10, 10, 10}; + hsize_t start[3]; + hsize_t stride[3]; + hsize_t count[3]; + hsize_t block[3]; + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ + + MESSAGE(7, (" Smoke check 2: Checker board slices through a cube.\n")); + + /* Create the 10 x 10 dataspace */ + small_square_sid = H5Screate_simple(2, small_cube_dims, NULL); + CHECK(small_square_sid, FAIL, "H5Screate_simple"); + + start[0] = 0; /* x */ + start[1] = 0; /* y */ + + stride[0] = 4; /* x */ + stride[1] = 4; /* y */ + + count[0] = 3; /* x */ + count[1] = 3; /* y */ + + block[0] = 2; /* x */ + block[1] = 2; /* y */ + ret = H5Sselect_hyperslab(small_square_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 2; /* x */ + start[1] = 2; /* y */ + + stride[0] = 4; /* x */ + stride[1] = 4; /* y */ + + count[0] = 2; /* x */ + count[1] = 2; /* y */ + + block[0] = 2; /* x */ + block[1] = 2; /* y */ + ret = H5Sselect_hyperslab(small_square_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create the 10 X 10 X 10 dataspaces for the hyperslab parallel to the xy axis */ + small_cube_xy_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_xy_slice_0_sid, FAIL, "H5Screate_simple"); + + small_cube_xy_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_xy_slice_1_sid, FAIL, "H5Screate_simple"); + + small_cube_xy_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_xy_slice_2_sid, FAIL, "H5Screate_simple"); + + start[0] = 0; /* x */ + start[1] = 0; /* y */ + start[2] = 0; /* z */ + + stride[0] = 4; /* x */ + stride[1] = 4; /* y */ + stride[2] = 20; /* z -- large enough that there will only be one slice */ + + count[0] = 3; /* x */ + count[1] = 3; /* y */ + count[2] = 1; /* z */ + + block[0] = 2; /* x */ + block[1] = 2; /* y */ + block[2] = 1; /* z */ + ret = H5Sselect_hyperslab(small_cube_xy_slice_0_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[2] = 3; + ret = H5Sselect_hyperslab(small_cube_xy_slice_1_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[2] = 9; + ret = H5Sselect_hyperslab(small_cube_xy_slice_2_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 2; /* x */ + start[1] = 2; /* y */ + start[2] = 0; /* z */ + + stride[0] = 4; /* x */ + stride[1] = 4; /* y */ + stride[2] = 20; /* z -- large enough that there will only be one slice */ + + count[0] = 2; /* x */ + count[1] = 2; /* y */ + count[2] = 1; /* z */ + + block[0] = 2; /* x */ + block[1] = 2; /* y */ + block[2] = 1; /* z */ + ret = H5Sselect_hyperslab(small_cube_xy_slice_0_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[2] = 3; + ret = H5Sselect_hyperslab(small_cube_xy_slice_1_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[2] = 9; + ret = H5Sselect_hyperslab(small_cube_xy_slice_2_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create the 10 X 10 X 10 dataspaces for the hyperslab parallel to the xz axis */ + small_cube_xz_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_xz_slice_0_sid, FAIL, "H5Screate_simple"); + + small_cube_xz_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_xz_slice_1_sid, FAIL, "H5Screate_simple"); + + small_cube_xz_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_xz_slice_2_sid, FAIL, "H5Screate_simple"); + + start[0] = 0; /* x */ + start[1] = 0; /* y */ + start[2] = 0; /* z */ + + stride[0] = 4; /* x */ + stride[1] = 20; /* y -- large enough that there will only be one slice */ + stride[2] = 4; /* z */ + + count[0] = 3; /* x */ + count[1] = 1; /* y */ + count[2] = 3; /* z */ + + block[0] = 2; /* x */ + block[1] = 1; /* y */ + block[2] = 2; /* z */ + ret = H5Sselect_hyperslab(small_cube_xz_slice_0_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[1] = 5; + ret = H5Sselect_hyperslab(small_cube_xz_slice_1_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[1] = 9; + ret = H5Sselect_hyperslab(small_cube_xz_slice_2_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 2; /* x */ + start[1] = 0; /* y */ + start[2] = 2; /* z */ + + stride[0] = 4; /* x */ + stride[1] = 20; /* y -- large enough that there will only be one slice */ + stride[2] = 4; /* z */ + + count[0] = 2; /* x */ + count[1] = 1; /* y */ + count[2] = 2; /* z */ + + block[0] = 2; /* x */ + block[1] = 1; /* y */ + block[2] = 2; /* z */ + ret = H5Sselect_hyperslab(small_cube_xz_slice_0_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[1] = 5; + ret = H5Sselect_hyperslab(small_cube_xz_slice_1_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[1] = 9; + ret = H5Sselect_hyperslab(small_cube_xz_slice_2_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create the 10 X 10 X 10 dataspaces for the hyperslabs parallel to the yz axis */ + small_cube_yz_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_yz_slice_0_sid, FAIL, "H5Screate_simple"); + + small_cube_yz_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_yz_slice_1_sid, FAIL, "H5Screate_simple"); + + small_cube_yz_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_yz_slice_2_sid, FAIL, "H5Screate_simple"); + + small_cube_yz_slice_3_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_yz_slice_3_sid, FAIL, "H5Screate_simple"); + + start[0] = 0; /* x */ + start[1] = 0; /* y */ + start[2] = 0; /* z */ + + stride[0] = 20; /* x -- large enough that there will only be one slice */ + stride[1] = 4; /* y */ + stride[2] = 4; /* z */ + + count[0] = 1; /* x */ + count[1] = 3; /* y */ + count[2] = 3; /* z */ + + block[0] = 1; /* x */ + block[1] = 2; /* y */ + block[2] = 2; /* z */ + ret = H5Sselect_hyperslab(small_cube_yz_slice_0_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 8; + ret = H5Sselect_hyperslab(small_cube_yz_slice_1_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 9; + ret = H5Sselect_hyperslab(small_cube_yz_slice_2_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 3; + ret = H5Sselect_hyperslab(small_cube_yz_slice_3_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 0; /* x */ + start[1] = 2; /* y */ + start[2] = 2; /* z */ + + stride[0] = 20; /* x -- large enough that there will only be one slice */ + stride[1] = 4; /* y */ + stride[2] = 4; /* z */ + + count[0] = 1; /* x */ + count[1] = 2; /* y */ + count[2] = 2; /* z */ + + block[0] = 1; /* x */ + block[1] = 2; /* y */ + block[2] = 2; /* z */ + ret = H5Sselect_hyperslab(small_cube_yz_slice_0_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 8; + ret = H5Sselect_hyperslab(small_cube_yz_slice_1_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 9; + ret = H5Sselect_hyperslab(small_cube_yz_slice_2_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 4; + /* This test gets the right answer, but it fails the shape same + * test in an unexpected point. Bring this up with Quincey, as + * the oddness looks like it is not related to my code. + * -- JRM + */ + ret = H5Sselect_hyperslab(small_cube_yz_slice_3_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* setup is done -- run the tests: */ + + /* Compare against "xy" selection */ + check = H5Sselect_shape_same(small_cube_xy_slice_0_sid, small_square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(small_cube_xy_slice_1_sid, small_square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(small_cube_xy_slice_2_sid, small_square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "xz" selection */ + check = H5Sselect_shape_same(small_cube_xz_slice_0_sid, small_square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(small_cube_xz_slice_1_sid, small_square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(small_cube_xz_slice_2_sid, small_square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "yz" selection */ + check = H5Sselect_shape_same(small_cube_yz_slice_0_sid, small_square_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(small_cube_yz_slice_1_sid, small_square_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(small_cube_yz_slice_2_sid, small_square_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(small_cube_yz_slice_3_sid, small_square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Close dataspaces */ + ret = H5Sclose(small_square_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_xy_slice_0_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_xy_slice_1_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_xy_slice_2_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_xz_slice_0_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_xz_slice_1_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_xz_slice_2_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_yz_slice_0_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_yz_slice_1_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_yz_slice_2_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_yz_slice_3_sid); + CHECK(ret, FAIL, "H5Sclose"); + +} /* test_shape_same_dr__smoke_check_2() */ + +/**************************************************************** +** +** test_shape_same_dr__smoke_check_3(): +** +** Create a square, 2-D dataspace (10 X 10), and select an +** irregular hyperslab as follows: +** +** y +** 9 - - - - - - - - - - +** 8 - - - - - - - - - - +** 7 - - - * * * * - - - +** 6 - - * * * * * - - - +** 5 - - * * - - - - - - +** 4 - - * * - * * - - - +** 3 - - * * - * * - - - +** 2 - - - - - - - - - - +** 1 - - - - - - - - - - +** 0 - - - - - - - - - - +** 0 1 2 3 4 5 6 7 8 9 x +** +** where asterisks indicate selected elements, and dashes +** indicate unselected elements. +** +** Similarly, create nine, 3-D dataspaces (10 X 10 X 10), +** and select similar irregular hyperslabs in each, three +** with the slab parallel to the xy plane, three parallel +** to the xz plane, and three parallel to the yz plane. +** Further, translate the irregular slab in 2/3rds of the +** cases. +** +** Assuming that z is the fastest changing dimension, +** H5Sselect_shape_same() should return TRUE when +** comparing the 2-D irregular hyperslab selection +** against the irregular hyperslab selections parallel +** to the yz plane in the 3-D space, and FALSE when +** comparing it against the irregular hyperslabs +** selections parallel to the xy or xz planes. +** +****************************************************************/ +static void +test_shape_same_dr__smoke_check_3(void) +{ + hid_t small_square_sid; + hid_t small_cube_xy_slice_0_sid; + hid_t small_cube_xy_slice_1_sid; + hid_t small_cube_xy_slice_2_sid; + hid_t small_cube_xz_slice_0_sid; + hid_t small_cube_xz_slice_1_sid; + hid_t small_cube_xz_slice_2_sid; + hid_t small_cube_yz_slice_0_sid; + hid_t small_cube_yz_slice_1_sid; + hid_t small_cube_yz_slice_2_sid; + hsize_t small_cube_dims[] = {10, 10, 10}; + hsize_t start[3]; + hsize_t stride[3]; + hsize_t count[3]; + hsize_t block[3]; + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ + + MESSAGE(7, (" Smoke check 3: Offset subsets of slices through a cube.\n")); + + /* Create the 10 x 10 dataspace */ + small_square_sid = H5Screate_simple(2, small_cube_dims, NULL); + CHECK(small_square_sid, FAIL, "H5Screate_simple"); + + start[0] = 2; /* x */ + start[1] = 3; /* y */ + + stride[0] = 20; /* x */ + stride[1] = 20; /* y */ + + count[0] = 1; /* x */ + count[1] = 1; /* y */ + + block[0] = 2; /* x */ + block[1] = 4; /* y */ + ret = H5Sselect_hyperslab(small_square_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 3; /* x */ + start[1] = 6; /* y */ + + stride[0] = 20; /* x */ + stride[1] = 20; /* y */ + + count[0] = 1; /* x */ + count[1] = 1; /* y */ + + block[0] = 4; /* x */ + block[1] = 2; /* y */ + ret = H5Sselect_hyperslab(small_square_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 5; /* x */ + start[1] = 3; /* y */ + + stride[0] = 20; /* x */ + stride[1] = 20; /* y */ + + count[0] = 1; /* x */ + count[1] = 1; /* y */ + + block[0] = 2; /* x */ + block[1] = 2; /* y */ + ret = H5Sselect_hyperslab(small_square_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create the 10 X 10 X 10 dataspaces for the hyperslab parallel to the xy axis */ + small_cube_xy_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_xy_slice_0_sid, FAIL, "H5Screate_simple"); + + small_cube_xy_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_xy_slice_1_sid, FAIL, "H5Screate_simple"); + + small_cube_xy_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_xy_slice_2_sid, FAIL, "H5Screate_simple"); + + start[0] = 2; /* x */ + start[1] = 3; /* y */ + start[2] = 5; /* z */ + + stride[0] = 20; /* x */ + stride[1] = 20; /* y */ + stride[2] = 20; /* z */ + + count[0] = 1; /* x */ + count[1] = 1; /* y */ + count[2] = 1; /* z */ + + block[0] = 2; /* x */ + block[1] = 4; /* y */ + block[2] = 1; /* z */ + ret = H5Sselect_hyperslab(small_cube_xy_slice_0_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* move the starting point to the origin */ + start[0] -= 1; /* x */ + start[1] -= 2; /* y */ + ret = H5Sselect_hyperslab(small_cube_xy_slice_1_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* move the irregular selection to the upper right hand corner */ + start[0] += 5; /* x */ + start[1] += 5; /* y */ + ret = H5Sselect_hyperslab(small_cube_xy_slice_2_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 3; /* x */ + start[1] = 6; /* y */ + start[2] = 5; /* z */ + + stride[0] = 20; /* x */ + stride[1] = 20; /* y */ + stride[2] = 20; /* z */ + + count[0] = 1; /* x */ + count[1] = 1; /* y */ + count[2] = 1; /* z */ + + block[0] = 4; /* x */ + block[1] = 2; /* y */ + block[2] = 1; /* z */ + ret = H5Sselect_hyperslab(small_cube_xy_slice_0_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* move the starting point to the origin */ + start[0] -= 1; /* x */ + start[1] -= 2; /* y */ + ret = H5Sselect_hyperslab(small_cube_xy_slice_1_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* move the irregular selection to the upper right hand corner */ + start[0] += 5; /* x */ + start[1] += 5; /* y */ + ret = H5Sselect_hyperslab(small_cube_xy_slice_2_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 5; /* x */ + start[1] = 3; /* y */ + start[2] = 5; /* z */ + + stride[0] = 20; /* x */ + stride[1] = 20; /* y */ + stride[2] = 20; /* z */ + + count[0] = 1; /* x */ + count[1] = 1; /* y */ + count[2] = 1; /* z */ + + block[0] = 2; /* x */ + block[1] = 2; /* y */ + block[2] = 1; /* z */ + ret = H5Sselect_hyperslab(small_cube_xy_slice_0_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* move the starting point to the origin */ + start[0] -= 1; /* x */ + start[1] -= 2; /* y */ + ret = H5Sselect_hyperslab(small_cube_xy_slice_1_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* move the irregular selection to the upper right hand corner */ + start[0] += 5; /* x */ + start[1] += 5; /* y */ + ret = H5Sselect_hyperslab(small_cube_xy_slice_2_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create the 10 X 10 X 10 dataspaces for the hyperslab parallel to the xz axis */ + small_cube_xz_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_xz_slice_0_sid, FAIL, "H5Screate_simple"); + + small_cube_xz_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_xz_slice_1_sid, FAIL, "H5Screate_simple"); + + small_cube_xz_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_xz_slice_2_sid, FAIL, "H5Screate_simple"); + + start[0] = 2; /* x */ + start[1] = 5; /* y */ + start[2] = 3; /* z */ + + stride[0] = 20; /* x */ + stride[1] = 20; /* y */ + stride[2] = 20; /* z */ + + count[0] = 1; /* x */ + count[1] = 1; /* y */ + count[2] = 1; /* z */ + + block[0] = 2; /* x */ + block[1] = 1; /* y */ + block[2] = 4; /* z */ + ret = H5Sselect_hyperslab(small_cube_xz_slice_0_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* move the starting point to the origin */ + start[0] -= 1; /* x */ + start[2] -= 2; /* y */ + ret = H5Sselect_hyperslab(small_cube_xz_slice_1_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* move the irregular selection to the upper right hand corner */ + start[0] += 5; /* x */ + start[2] += 5; /* y */ + ret = H5Sselect_hyperslab(small_cube_xz_slice_2_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 3; /* x */ + start[1] = 5; /* y */ + start[2] = 6; /* z */ + + stride[0] = 20; /* x */ + stride[1] = 20; /* y */ + stride[2] = 20; /* z */ + + count[0] = 1; /* x */ + count[1] = 1; /* y */ + count[2] = 1; /* z */ + + block[0] = 4; /* x */ + block[1] = 1; /* y */ + block[2] = 2; /* z */ + ret = H5Sselect_hyperslab(small_cube_xz_slice_0_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* move the starting point to the origin */ + start[0] -= 1; /* x */ + start[2] -= 2; /* y */ + ret = H5Sselect_hyperslab(small_cube_xz_slice_1_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* move the irregular selection to the upper right hand corner */ + start[0] += 5; /* x */ + start[2] += 5; /* y */ + ret = H5Sselect_hyperslab(small_cube_xz_slice_2_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 5; /* x */ + start[1] = 5; /* y */ + start[2] = 3; /* z */ + + stride[0] = 20; /* x */ + stride[1] = 20; /* y */ + stride[2] = 20; /* z */ + + count[0] = 1; /* x */ + count[1] = 1; /* y */ + count[2] = 1; /* z */ + + block[0] = 2; /* x */ + block[1] = 1; /* y */ + block[2] = 2; /* z */ + ret = H5Sselect_hyperslab(small_cube_xz_slice_0_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* move the starting point to the origin */ + start[0] -= 1; /* x */ + start[2] -= 2; /* y */ + ret = H5Sselect_hyperslab(small_cube_xz_slice_1_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* move the irregular selection to the upper right hand corner */ + start[0] += 5; /* x */ + start[2] += 5; /* y */ + ret = H5Sselect_hyperslab(small_cube_xz_slice_2_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* QAK: Start here. + */ + /* Create the 10 X 10 X 10 dataspaces for the hyperslabs parallel to the yz axis */ + small_cube_yz_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_yz_slice_0_sid, FAIL, "H5Screate_simple"); + + small_cube_yz_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_yz_slice_1_sid, FAIL, "H5Screate_simple"); + + small_cube_yz_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL); + CHECK(small_cube_yz_slice_2_sid, FAIL, "H5Screate_simple"); + + start[0] = 8; /* x */ + start[1] = 2; /* y */ + start[2] = 3; /* z */ + + stride[0] = 20; /* x -- large enough that there will only be one slice */ + stride[1] = 20; /* y */ + stride[2] = 20; /* z */ + + count[0] = 1; /* x */ + count[1] = 1; /* y */ + count[2] = 1; /* z */ + + block[0] = 1; /* x */ + block[1] = 2; /* y */ + block[2] = 4; /* z */ + ret = H5Sselect_hyperslab(small_cube_yz_slice_0_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* move the starting point to the origin */ + start[1] -= 1; /* x */ + start[2] -= 2; /* y */ + ret = H5Sselect_hyperslab(small_cube_yz_slice_1_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* move the irregular selection to the upper right hand corner */ + start[0] += 5; /* x */ + start[2] += 5; /* y */ + ret = H5Sselect_hyperslab(small_cube_yz_slice_2_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 8; /* x */ + start[1] = 3; /* y */ + start[2] = 6; /* z */ + + stride[0] = 20; /* x */ + stride[1] = 20; /* y */ + stride[2] = 20; /* z */ + + count[0] = 1; /* x */ + count[1] = 1; /* y */ + count[2] = 1; /* z */ + + block[0] = 1; /* x */ + block[1] = 4; /* y */ + block[2] = 2; /* z */ + ret = H5Sselect_hyperslab(small_cube_yz_slice_0_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* move the starting point to the origin */ + start[1] -= 1; /* x */ + start[2] -= 2; /* y */ + ret = H5Sselect_hyperslab(small_cube_yz_slice_1_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* move the irregular selection to the upper right hand corner */ + start[0] += 5; /* x */ + start[2] += 5; /* y */ + ret = H5Sselect_hyperslab(small_cube_yz_slice_2_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 8; /* x */ + start[1] = 5; /* y */ + start[2] = 3; /* z */ + + stride[0] = 20; /* x */ + stride[1] = 20; /* y */ + stride[2] = 20; /* z */ + + count[0] = 1; /* x */ + count[1] = 1; /* y */ + count[2] = 1; /* z */ + + block[0] = 1; /* x */ + block[1] = 2; /* y */ + block[2] = 2; /* z */ + ret = H5Sselect_hyperslab(small_cube_yz_slice_0_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* move the starting point to the origin */ + start[1] -= 1; /* x */ + start[2] -= 2; /* y */ + ret = H5Sselect_hyperslab(small_cube_yz_slice_1_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* move the irregular selection to the upper right hand corner */ + start[0] += 5; /* x */ + start[2] += 5; /* y */ + ret = H5Sselect_hyperslab(small_cube_yz_slice_2_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* setup is done -- run the tests: */ + + /* Compare against "xy" selection */ + check = H5Sselect_shape_same(small_cube_xy_slice_0_sid, small_square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(small_cube_xy_slice_1_sid, small_square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(small_cube_xy_slice_2_sid, small_square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "xz" selection */ + check = H5Sselect_shape_same(small_cube_xz_slice_0_sid, small_square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(small_cube_xz_slice_1_sid, small_square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(small_cube_xz_slice_2_sid, small_square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Compare against "yz" selection */ + check = H5Sselect_shape_same(small_cube_yz_slice_0_sid, small_square_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(small_cube_yz_slice_1_sid, small_square_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(small_cube_yz_slice_2_sid, small_square_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + /* Close dataspaces */ + ret = H5Sclose(small_square_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_xy_slice_0_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_xy_slice_1_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_xy_slice_2_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_xz_slice_0_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_xz_slice_1_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_xz_slice_2_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_yz_slice_0_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_yz_slice_1_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_cube_yz_slice_2_sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_shape_same_dr__smoke_check_3() */ + +/**************************************************************** +** +** test_shape_same_dr__smoke_check_4(): +** +** Create a square, 2-D dataspace (10 X 10), and select +** the entire space. +** +** Similarly, create 3-D and 4-D dataspaces: +** +** (1 X 10 X 10) +** (10 X 1 X 10) +** (10 X 10 X 1) +** (10 X 10 X 10) +** +** (1 X 1 X 10 X 10) +** (1 X 10 X 1 X 10) +** (1 X 10 X 10 X 1) +** (10 X 1 X 1 X 10) +** (10 X 1 X 10 X 1) +** (10 X 10 X 1 X 1) +** (10 X 1 X 10 X 10) +** +** And select these entire spaces as well. +** +** Compare the 2-D space against all the other spaces +** with H5Sselect_shape_same(). The (1 X 10 X 10) & +** (1 X 1 X 10 X 10) should return TRUE. All others +** should return FALSE. +** +****************************************************************/ +static void +test_shape_same_dr__smoke_check_4(void) +{ + hid_t square_sid; + hid_t three_d_space_0_sid; + hid_t three_d_space_1_sid; + hid_t three_d_space_2_sid; + hid_t three_d_space_3_sid; + hid_t four_d_space_0_sid; + hid_t four_d_space_1_sid; + hid_t four_d_space_2_sid; + hid_t four_d_space_3_sid; + hid_t four_d_space_4_sid; + hid_t four_d_space_5_sid; + hid_t four_d_space_6_sid; + hsize_t dims[] = {10, 10, 10, 10}; + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ + + MESSAGE(7, (" Smoke check 4: Spaces of different dimension but same size.\n")); + + /* Create the 10 x 10 dataspace */ + square_sid = H5Screate_simple(2, dims, NULL); + CHECK(square_sid, FAIL, "H5Screate_simple"); + + /* create (1 X 10 X 10) dataspace */ + dims[0] = 1; + dims[1] = 10; + dims[2] = 10; + three_d_space_0_sid = H5Screate_simple(3, dims, NULL); + CHECK(three_d_space_0_sid, FAIL, "H5Screate_simple"); + + /* create (10 X 1 X 10) dataspace */ + dims[0] = 10; + dims[1] = 1; + dims[2] = 10; + three_d_space_1_sid = H5Screate_simple(3, dims, NULL); + CHECK(three_d_space_1_sid, FAIL, "H5Screate_simple"); + + /* create (10 X 10 X 1) dataspace */ + dims[0] = 10; + dims[1] = 10; + dims[2] = 1; + three_d_space_2_sid = H5Screate_simple(3, dims, NULL); + CHECK(three_d_space_2_sid, FAIL, "H5Screate_simple"); + + /* create (10 X 10 X 10) dataspace */ + dims[0] = 10; + dims[1] = 10; + dims[2] = 10; + three_d_space_3_sid = H5Screate_simple(3, dims, NULL); + CHECK(three_d_space_3_sid, FAIL, "H5Screate_simple"); + + /* create (1 X 1 X 10 X 10) dataspace */ + dims[0] = 1; + dims[1] = 1; + dims[2] = 10; + dims[3] = 10; + four_d_space_0_sid = H5Screate_simple(4, dims, NULL); + CHECK(four_d_space_0_sid, FAIL, "H5Screate_simple"); + + /* create (1 X 10 X 1 X 10) dataspace */ + dims[0] = 1; + dims[1] = 10; + dims[2] = 1; + dims[3] = 10; + four_d_space_1_sid = H5Screate_simple(4, dims, NULL); + CHECK(four_d_space_1_sid, FAIL, "H5Screate_simple"); + + /* create (1 X 10 X 10 X 1) dataspace */ + dims[0] = 1; + dims[1] = 10; + dims[2] = 10; + dims[3] = 1; + four_d_space_2_sid = H5Screate_simple(4, dims, NULL); + CHECK(four_d_space_2_sid, FAIL, "H5Screate_simple"); + + /* create (10 X 1 X 1 X 10) dataspace */ + dims[0] = 10; + dims[1] = 1; + dims[2] = 1; + dims[3] = 10; + four_d_space_3_sid = H5Screate_simple(4, dims, NULL); + CHECK(four_d_space_3_sid, FAIL, "H5Screate_simple"); + + /* create (10 X 1 X 10 X 1) dataspace */ + dims[0] = 10; + dims[1] = 1; + dims[2] = 10; + dims[3] = 1; + four_d_space_4_sid = H5Screate_simple(4, dims, NULL); + CHECK(four_d_space_4_sid, FAIL, "H5Screate_simple"); + + /* create (10 X 10 X 1 X 1) dataspace */ + dims[0] = 10; + dims[1] = 10; + dims[2] = 1; + dims[3] = 1; + four_d_space_5_sid = H5Screate_simple(4, dims, NULL); + CHECK(four_d_space_5_sid, FAIL, "H5Screate_simple"); + + /* create (10 X 1 X 10 X 10) dataspace */ + dims[0] = 10; + dims[1] = 1; + dims[2] = 10; + dims[3] = 10; + four_d_space_6_sid = H5Screate_simple(4, dims, NULL); + CHECK(four_d_space_6_sid, FAIL, "H5Screate_simple"); + + /* setup is done -- run the tests: */ + + check = H5Sselect_shape_same(three_d_space_0_sid, square_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(three_d_space_1_sid, square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(three_d_space_2_sid, square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(three_d_space_3_sid, square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(four_d_space_0_sid, square_sid); + VERIFY(check, TRUE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(four_d_space_1_sid, square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(four_d_space_2_sid, square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(four_d_space_3_sid, square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(four_d_space_4_sid, square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(four_d_space_5_sid, square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + check = H5Sselect_shape_same(four_d_space_6_sid, square_sid); + VERIFY(check, FALSE, "H5Sselect_shape_same"); + + /* Close dataspaces */ + ret = H5Sclose(square_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(three_d_space_0_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(three_d_space_1_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(three_d_space_2_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(three_d_space_3_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(four_d_space_0_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(four_d_space_1_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(four_d_space_2_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(four_d_space_3_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(four_d_space_4_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(four_d_space_5_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(four_d_space_6_sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_shape_same_dr__smoke_check_4() */ + +/**************************************************************** +** +** test_shape_same_dr__full_space_vs_slice(): Tests selection +** of a full n-cube dataspace vs an n-dimensional slice of +** of an m-cube (m > n) in a call to H5Sselect_shape_same(). +** Note that this test does not require the n-cube and the +** n-dimensional slice to have the same rank (although +** H5Sselect_shape_same() should always return FALSE if +** they don't). +** +** Per Quincey's suggestion, only test up to 5 dimensional +** spaces. +** +****************************************************************/ +static void +test_shape_same_dr__full_space_vs_slice(int test_num, int small_rank, int large_rank, int offset, + hsize_t edge_size, hbool_t dim_selected[], hbool_t expected_result) +{ + char test_desc_0[128]; + char test_desc_1[256]; + int i; + hid_t n_cube_0_sid; /* the fully selected hyper cube */ + hid_t n_cube_1_sid; /* the hyper cube in which a slice is selected */ + hsize_t dims[SS_DR_MAX_RANK]; + hsize_t start[SS_DR_MAX_RANK]; + hsize_t *start_ptr; + hsize_t stride[SS_DR_MAX_RANK]; + hsize_t *stride_ptr; + hsize_t count[SS_DR_MAX_RANK]; + hsize_t *count_ptr; + hsize_t block[SS_DR_MAX_RANK]; + hsize_t *block_ptr; + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ + + HDassert(0 < small_rank); + HDassert(small_rank <= large_rank); + HDassert(large_rank <= SS_DR_MAX_RANK); + HDassert(0 <= offset); + HDassert(offset < large_rank); + HDassert(edge_size > 0); + HDassert(edge_size <= 1000); + + HDsnprintf(test_desc_0, sizeof(test_desc_0), "\tn-cube slice through m-cube (n <= m) test %d.\n", + test_num); + MESSAGE(7, ("%s", test_desc_0)); + + /* This statement must be updated if SS_DR_MAX_RANK is changed */ + HDsnprintf(test_desc_1, sizeof(test_desc_1), + "\t\tranks: %d/%d offset: %d dim_selected: %d/%d/%d/%d/%d.\n", small_rank, large_rank, offset, + (int)dim_selected[0], (int)dim_selected[1], (int)dim_selected[2], (int)dim_selected[3], + (int)dim_selected[4]); + MESSAGE(7, ("%s", test_desc_1)); + + /* copy the edge size into the dims array */ + for (i = 0; i < SS_DR_MAX_RANK; i++) + dims[i] = edge_size; + + /* Create the small n-cube */ + n_cube_0_sid = H5Screate_simple(small_rank, dims, NULL); + CHECK(n_cube_0_sid, FAIL, "H5Screate_simple"); + + /* Create the large n-cube */ + n_cube_1_sid = H5Screate_simple(large_rank, dims, NULL); + CHECK(n_cube_1_sid, FAIL, "H5Screate_simple"); + + /* set up start, stride, count, and block for the hyperslab selection */ + for (i = 0; i < SS_DR_MAX_RANK; i++) { + stride[i] = 2 * edge_size; /* a bit silly in this case */ + count[i] = 1; + if (dim_selected[i]) { + start[i] = 0; + block[i] = edge_size; + } + else { + start[i] = (hsize_t)offset; + block[i] = 1; + } + } + + /* since large rank may be less than SS_DR_MAX_RANK, we may not + * use the entire start, stride, count, and block arrays. This + * is a problem, since it is inconvenient to set up the dim_selected + * array to reflect the large rank, and thus if large_rank < + * SS_DR_MAX_RANK, we need to hide the lower index entries + * from H5Sselect_hyperslab(). + * + * Do this by setting up pointers to the first valid entry in start, + * stride, count, and block below, and pass these pointers in + * to H5Sselect_hyperslab() instead of the array base addresses. + */ + + i = SS_DR_MAX_RANK - large_rank; + HDassert(i >= 0); + + start_ptr = &(start[i]); + stride_ptr = &(stride[i]); + count_ptr = &(count[i]); + block_ptr = &(block[i]); + + /* select the hyperslab */ + ret = H5Sselect_hyperslab(n_cube_1_sid, H5S_SELECT_SET, start_ptr, stride_ptr, count_ptr, block_ptr); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* setup is done -- run the test: */ + check = H5Sselect_shape_same(n_cube_0_sid, n_cube_1_sid); + VERIFY(check, expected_result, "H5Sselect_shape_same"); + + /* Close dataspaces */ + ret = H5Sclose(n_cube_0_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(n_cube_1_sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_shape_same_dr__full_space_vs_slice() */ + +/**************************************************************** +** +** test_shape_same_dr__run_full_space_vs_slice_tests(): +** +** Run the test_shape_same_dr__full_space_vs_slice() test +** over a variety of ranks and offsets. +** +** At present, we test H5Sselect_shape_same() with +** fully selected 1, 2, 3, and 4 cubes as one parameter, and +** 1, 2, 3, and 4 dimensional slices through a n-cube of rank +** no more than 5 (and at least the rank of the slice). +** We stop at rank 5, as Quincey suggested that it would be +** sufficient. +** +** All the n-cubes will have lengths of the same size, so +** H5Sselect_shape_same() should return true iff: +** +** 1) the rank for the fully selected n cube equals the +** number of dimensions selected in the slice through the +** m-cube (m >= n). +** +** 2) The dimensions selected in the slice through the m-cube +** are the dimensions with the most quickly changing +** indices. +** +****************************************************************/ +static void +test_shape_same_dr__run_full_space_vs_slice_tests(void) +{ + hbool_t dim_selected[5]; + hbool_t expected_result; + int i, j; + int v, w, x, y, z; + int test_num = 0; + int small_rank; + int large_rank; + hsize_t edge_size = 10; + + for (large_rank = 1; large_rank <= 5; large_rank++) { + for (small_rank = 1; small_rank <= large_rank; small_rank++) { + v = 0; + do { + if (v == 0) + dim_selected[0] = FALSE; + else + dim_selected[0] = TRUE; + + w = 0; + do { + if (w == 0) + dim_selected[1] = FALSE; + else + dim_selected[1] = TRUE; + + x = 0; + do { + if (x == 0) + dim_selected[2] = FALSE; + else + dim_selected[2] = TRUE; + + y = 0; + do { + if (y == 0) + dim_selected[3] = FALSE; + else + dim_selected[3] = TRUE; + + z = 0; + do { + if (z == 0) + dim_selected[4] = FALSE; + else + dim_selected[4] = TRUE; + + /* compute the expected result: */ + i = 0; + j = 4; + expected_result = TRUE; + while ((i < small_rank) && expected_result) { + if (!dim_selected[j]) + expected_result = FALSE; + i++; + j--; + } + + while ((i < large_rank) && expected_result) { + if (dim_selected[j]) + expected_result = FALSE; + i++; + j--; + } + + /* everything is set up -- run the tests */ + + test_shape_same_dr__full_space_vs_slice(test_num++, small_rank, large_rank, 0, + edge_size, dim_selected, + expected_result); + + test_shape_same_dr__full_space_vs_slice(test_num++, small_rank, large_rank, + large_rank / 2, edge_size, + dim_selected, expected_result); + + test_shape_same_dr__full_space_vs_slice(test_num++, small_rank, large_rank, + large_rank - 1, edge_size, + dim_selected, expected_result); + + z++; + } while ((z < 2) && (large_rank >= 1)); + + y++; + } while ((y < 2) && (large_rank >= 2)); + + x++; + } while ((x < 2) && (large_rank >= 3)); + + w++; + } while ((w < 2) && (large_rank >= 4)); + + v++; + } while ((v < 2) && (large_rank >= 5)); + } /* end for */ + } /* end for */ +} /* test_shape_same_dr__run_full_space_vs_slice_tests() */ + +/**************************************************************** +** +** test_shape_same_dr__checkerboard(): Tests selection of a +** "checker board" subset of a full n-cube dataspace vs +** a "checker board" n-dimensional slice of an m-cube (m > n). +** in a call to H5Sselect_shape_same(). +** +** Note that this test does not require the n-cube and the +** n-dimensional slice to have the same rank (although +** H5Sselect_shape_same() should always return FALSE if +** they don't). +** +** Per Quincey's suggestion, only test up to 5 dimensional +** spaces. +** +****************************************************************/ +static void +test_shape_same_dr__checkerboard(int test_num, int small_rank, int large_rank, int offset, hsize_t edge_size, + hsize_t checker_size, hbool_t dim_selected[], hbool_t expected_result) +{ + char test_desc_0[128]; + char test_desc_1[256]; + int i; + int dims_selected = 0; + hid_t n_cube_0_sid; /* the checker board selected + * hyper cube + */ + hid_t n_cube_1_sid; /* the hyper cube in which a + * checkerboard slice is selected + */ + hsize_t dims[SS_DR_MAX_RANK]; + hsize_t base_start[2]; + hsize_t start[SS_DR_MAX_RANK]; + hsize_t *start_ptr; + hsize_t base_stride[2]; + hsize_t stride[SS_DR_MAX_RANK]; + hsize_t *stride_ptr; + hsize_t base_count[2]; + hsize_t count[SS_DR_MAX_RANK]; + hsize_t *count_ptr; + hsize_t base_block[2]; + hsize_t block[SS_DR_MAX_RANK]; + hsize_t *block_ptr; + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ + + HDassert(0 < small_rank); + HDassert(small_rank <= large_rank); + HDassert(large_rank <= SS_DR_MAX_RANK); + HDassert(0 < checker_size); + HDassert(checker_size <= edge_size); + HDassert(edge_size <= 1000); + HDassert(0 <= offset); + HDassert(offset < (int)edge_size); + + for (i = SS_DR_MAX_RANK - large_rank; i < SS_DR_MAX_RANK; i++) + if (dim_selected[i] == TRUE) + dims_selected++; + + HDassert(dims_selected >= 0); + HDassert(dims_selected <= large_rank); + + HDsnprintf(test_desc_0, sizeof(test_desc_0), + "\tcheckerboard n-cube slice through m-cube (n <= m) test %d.\n", test_num); + MESSAGE(7, ("%s", test_desc_0)); + + /* This statement must be updated if SS_DR_MAX_RANK is changed */ + HDsnprintf(test_desc_1, sizeof(test_desc_1), + "\tranks: %d/%d edge/chkr size: %d/%d offset: %d dim_selected: %d/%d/%d/%d/%d:%d.\n", + small_rank, large_rank, (int)edge_size, (int)checker_size, offset, (int)dim_selected[0], + (int)dim_selected[1], (int)dim_selected[2], (int)dim_selected[3], (int)dim_selected[4], + dims_selected); + MESSAGE(7, ("%s", test_desc_1)); + + /* copy the edge size into the dims array */ + for (i = 0; i < SS_DR_MAX_RANK; i++) + dims[i] = edge_size; + + /* Create the small n-cube */ + n_cube_0_sid = H5Screate_simple(small_rank, dims, NULL); + CHECK(n_cube_0_sid, FAIL, "H5Screate_simple"); + + /* Select a "checkerboard" pattern in the small n-cube. + * + * In the 1-D case, the "checkerboard" would look like this: + * + * * * - - * * - - * * + * + * and in the 2-D case, it would look like this: + * + * * * - - * * - - * * + * * * - - * * - - * * + * - - * * - - * * - - + * - - * * - - * * - - + * * * - - * * - - * * + * * * - - * * - - * * + * - - * * - - * * - - + * - - * * - - * * - - + * * * - - * * - - * * + * * * - - * * - - * * + * + * In both cases, asterisks indicate selected elements, + * and dashes indicate unselected elements. + * + * 3-D and 4-D ascii art is somewhat painful, so I'll + * leave those selections to your imagination. :-) + * + * Note, that since the edge_size and checker_size are + * parameters that are passed in, the selection need + * not look exactly like the selection shown above. + * At present, the function allows checker sizes that + * are not even divisors of the edge size -- thus + * something like the following is also possible: + * + * * * * - - - * * * - + * * * * - - - * * * - + * * * * - - - * * * - + * - - - * * * - - - * + * - - - * * * - - - * + * - - - * * * - - - * + * * * * - - - * * * - + * * * * - - - * * * - + * * * * - - - * * * - + * - - - * * * - - - * + * + * As the above pattern can't be selected in one + * call to H5Sselect_hyperslab(), and since the + * values in the start, stride, count, and block + * arrays will be repeated over all entries in + * the selected space case, and over all selected + * dimensions in the selected hyperslab case, we + * compute these values first and store them in + * in the base_start, base_stride, base_count, + * and base_block arrays. + */ + + base_start[0] = 0; + base_start[1] = checker_size; + + base_stride[0] = 2 * checker_size; + base_stride[1] = 2 * checker_size; + + /* Note that the following computation depends on the C99 + * requirement that integer division discard any fraction + * (truncation towards zero) to function correctly. As we + * now require C99, this shouldn't be a problem, but noting + * it may save us some pain if we are ever obliged to support + * pre-C99 compilers again. + */ + + base_count[0] = edge_size / (checker_size * 2); + if ((edge_size % (checker_size * 2)) > 0) + base_count[0]++; + + base_count[1] = (edge_size - checker_size) / (checker_size * 2); + if (((edge_size - checker_size) % (checker_size * 2)) > 0) + base_count[1]++; + + base_block[0] = checker_size; + base_block[1] = checker_size; + + /* now setup start, stride, count, and block arrays for + * the first call to H5Sselect_hyperslab(). + */ + for (i = 0; i < SS_DR_MAX_RANK; i++) { + start[i] = base_start[0]; + stride[i] = base_stride[0]; + count[i] = base_count[0]; + block[i] = base_block[0]; + } /* end for */ + + ret = H5Sselect_hyperslab(n_cube_0_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* if small_rank == 1, or if edge_size == checker_size, we + * are done, as either there is no added dimension in which + * to place offset selected "checkers". + * + * Otherwise, set up start, stride, count and block, and + * make the additional selection. + */ + + if ((small_rank > 1) && (checker_size < edge_size)) { + for (i = 0; i < SS_DR_MAX_RANK; i++) { + start[i] = base_start[1]; + stride[i] = base_stride[1]; + count[i] = base_count[1]; + block[i] = base_block[1]; + } /* end for */ + + ret = H5Sselect_hyperslab(n_cube_0_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + } /* end if */ + + /* Weirdness alert: + * + * Some how, it seems that selections can extend beyond the + * boundaries of the target dataspace -- hence the following + * code to manually clip the selection back to the dataspace + * proper. + */ + for (i = 0; i < SS_DR_MAX_RANK; i++) { + start[i] = 0; + stride[i] = edge_size; + count[i] = 1; + block[i] = edge_size; + } /* end for */ + + ret = H5Sselect_hyperslab(n_cube_0_sid, H5S_SELECT_AND, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create the large n-cube */ + n_cube_1_sid = H5Screate_simple(large_rank, dims, NULL); + CHECK(n_cube_1_sid, FAIL, "H5Screate_simple"); + + /* Now select the checkerboard selection in the (possibly larger) n-cube. + * + * Since we have already calculated the base start, stride, count, + * and block, re-use the values in setting up start, stride, count, + * and block. + */ + for (i = 0; i < SS_DR_MAX_RANK; i++) { + if (dim_selected[i]) { + start[i] = base_start[0]; + stride[i] = base_stride[0]; + count[i] = base_count[0]; + block[i] = base_block[0]; + } /* end if */ + else { + start[i] = (hsize_t)offset; + stride[i] = (hsize_t)(2 * edge_size); + count[i] = 1; + block[i] = 1; + } /* end else */ + } /* end for */ + + /* Since large rank may be less than SS_DR_MAX_RANK, we may not + * use the entire start, stride, count, and block arrays. This + * is a problem, since it is inconvenient to set up the dim_selected + * array to reflect the large rank, and thus if large_rank < + * SS_DR_MAX_RANK, we need to hide the lower index entries + * from H5Sselect_hyperslab(). + * + * Do this by setting up pointers to the first valid entry in start, + * stride, count, and block below, and pass these pointers in + * to H5Sselect_hyperslab() instead of the array base addresses. + */ + + i = SS_DR_MAX_RANK - large_rank; + HDassert(i >= 0); + + start_ptr = &(start[i]); + stride_ptr = &(stride[i]); + count_ptr = &(count[i]); + block_ptr = &(block[i]); + + /* select the hyperslab */ + ret = H5Sselect_hyperslab(n_cube_1_sid, H5S_SELECT_SET, start_ptr, stride_ptr, count_ptr, block_ptr); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* As before, if the number of dimensions selected is less than or + * equal to 1, or if edge_size == checker_size, we are done, as + * either there is no added dimension in which to place offset selected + * "checkers", or the hyperslab is completely occupied by one + * "checker". + * + * Otherwise, set up start, stride, count and block, and + * make the additional selection. + */ + if ((dims_selected > 1) && (checker_size < edge_size)) { + for (i = 0; i < SS_DR_MAX_RANK; i++) { + if (dim_selected[i]) { + start[i] = base_start[1]; + stride[i] = base_stride[1]; + count[i] = base_count[1]; + block[i] = base_block[1]; + } /* end if */ + else { + start[i] = (hsize_t)offset; + stride[i] = (hsize_t)(2 * edge_size); + count[i] = 1; + block[i] = 1; + } /* end else */ + } /* end for */ + + ret = H5Sselect_hyperslab(n_cube_1_sid, H5S_SELECT_OR, start_ptr, stride_ptr, count_ptr, block_ptr); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + } /* end if */ + + /* Weirdness alert: + * + * Again, it seems that selections can extend beyond the + * boundaries of the target dataspace -- hence the following + * code to manually clip the selection back to the dataspace + * proper. + */ + for (i = 0; i < SS_DR_MAX_RANK; i++) { + start[i] = 0; + stride[i] = edge_size; + count[i] = 1; + block[i] = edge_size; + } /* end for */ + + ret = H5Sselect_hyperslab(n_cube_1_sid, H5S_SELECT_AND, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* setup is done -- run the test: */ + check = H5Sselect_shape_same(n_cube_0_sid, n_cube_1_sid); + VERIFY(check, expected_result, "H5Sselect_shape_same"); + + /* Close dataspaces */ + ret = H5Sclose(n_cube_0_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(n_cube_1_sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_shape_same_dr__checkerboard() */ + +/**************************************************************** +** +** test_shape_same_dr__run_checkerboard_tests(): +** +** In this set of tests, we test H5Sselect_shape_same() +** with a "checkerboard" selection of 1, 2, 3, and 4 cubes as +** one parameter, and 1, 2, 3, and 4 dimensional checkerboard +** slices through a n-cube of rank no more than 5 (and at +** least the rank of the slice). +** +** All the n-cubes will have lengths of the same size, so +** H5Sselect_shape_same() should return true iff: +** +** 1) the rank of the n cube equals the number of dimensions +** selected in the checker board slice through the m-cube +** (m >= n). +** +** 2) The dimensions selected in the checkerboard slice +** through the m-cube are the dimensions with the most +** quickly changing indices. +** +****************************************************************/ +static void +test_shape_same_dr__run_checkerboard_tests(void) +{ + hbool_t dim_selected[5]; + hbool_t expected_result; + int i, j; + int v, w, x, y, z; + int test_num = 0; + int small_rank; + int large_rank; + + for (large_rank = 1; large_rank <= 5; large_rank++) { + for (small_rank = 1; small_rank <= large_rank; small_rank++) { + v = 0; + do { + if (v == 0) + dim_selected[0] = FALSE; + else + dim_selected[0] = TRUE; + + w = 0; + do { + if (w == 0) + dim_selected[1] = FALSE; + else + dim_selected[1] = TRUE; + + x = 0; + do { + if (x == 0) + dim_selected[2] = FALSE; + else + dim_selected[2] = TRUE; + + y = 0; + do { + if (y == 0) + dim_selected[3] = FALSE; + else + dim_selected[3] = TRUE; + + z = 0; + do { + if (z == 0) + dim_selected[4] = FALSE; + else + dim_selected[4] = TRUE; + + /* compute the expected result: */ + i = 0; + j = 4; + expected_result = TRUE; + while ((i < small_rank) && expected_result) { + if (!dim_selected[j]) + expected_result = FALSE; + i++; + j--; + } /* end while */ + + while ((i < large_rank) && expected_result) { + if (dim_selected[j]) + expected_result = FALSE; + i++; + j--; + } /* end while */ + + /* everything is set up -- run the tests */ + + /* run test with edge size 16, checker + * size 1, and a variety of offsets + */ + test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank, + /* offset */ 0, + /* edge_size */ 16, + /* checker_size */ 1, dim_selected, + expected_result); + + test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank, + /* offset */ 5, + /* edge_size */ 16, + /* checker_size */ 1, dim_selected, + expected_result); + + test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank, + /* offset */ 15, + /* edge_size */ 16, + /* checker_size */ 1, dim_selected, + expected_result); + + /* run test with edge size 10, checker + * size 2, and a variety of offsets + */ + test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank, + /* offset */ 0, + /* edge_size */ 10, + /* checker_size */ 2, dim_selected, + expected_result); + + test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank, + /* offset */ 5, + /* edge_size */ 10, + /* checker_size */ 2, dim_selected, + expected_result); + + test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank, + /* offset */ 9, + /* edge_size */ 10, + /* checker_size */ 2, dim_selected, + expected_result); + + /* run test with edge size 10, checker + * size 3, and a variety of offsets + */ + test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank, + /* offset */ 0, + /* edge_size */ 10, + /* checker_size */ 3, dim_selected, + expected_result); + + test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank, + /* offset */ 5, + /* edge_size */ 10, + /* checker_size */ 3, dim_selected, + expected_result); + + test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank, + /* offset */ 9, + /* edge_size */ 10, + /* checker_size */ 3, dim_selected, + expected_result); + + /* run test with edge size 8, checker + * size 8, and a variety of offsets + */ + test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank, + /* offset */ 0, + /* edge_size */ 8, + /* checker_size */ 8, dim_selected, + expected_result); + + test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank, + /* offset */ 4, + /* edge_size */ 8, + /* checker_size */ 8, dim_selected, + expected_result); + + test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank, + /* offset */ 7, + /* edge_size */ 8, + /* checker_size */ 8, dim_selected, + expected_result); + + z++; + } while ((z < 2) && (large_rank >= 1)); + + y++; + } while ((y < 2) && (large_rank >= 2)); + + x++; + } while ((x < 2) && (large_rank >= 3)); + + w++; + } while ((w < 2) && (large_rank >= 4)); + + v++; + } while ((v < 2) && (large_rank >= 5)); + } /* end for */ + } /* end for */ +} /* test_shape_same_dr__run_checkerboard_tests() */ + +/**************************************************************** +** +** test_shape_same_dr__irregular(): +** +** Tests selection of an "irregular" subset of a full +** n-cube dataspace vs an identical "irregular" subset +** of an n-dimensional slice of an m-cube (m > n). +** in a call to H5Sselect_shape_same(). +** +** Note that this test does not require the n-cube and the +** n-dimensional slice to have the same rank (although +** H5Sselect_shape_same() should always return FALSE if +** they don't). +** +****************************************************************/ +static void +test_shape_same_dr__irregular(int test_num, int small_rank, int large_rank, int pattern_offset, + int slice_offset, hbool_t dim_selected[], hbool_t expected_result) +{ + char test_desc_0[128]; + char test_desc_1[256]; + int edge_size = 10; + int i; + int j; + int k; + int dims_selected = 0; + hid_t n_cube_0_sid; /* the hyper cube containing + * an irregular selection + */ + hid_t n_cube_1_sid; /* the hyper cube in which a + * slice contains an irregular + * selection. + */ + hsize_t dims[SS_DR_MAX_RANK]; + hsize_t start_0[SS_DR_MAX_RANK] = {2, 2, 2, 2, 5}; + hsize_t stride_0[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10}; + hsize_t count_0[SS_DR_MAX_RANK] = {1, 1, 1, 1, 1}; + hsize_t block_0[SS_DR_MAX_RANK] = {2, 2, 2, 2, 3}; + + hsize_t start_1[SS_DR_MAX_RANK] = {2, 2, 2, 5, 2}; + hsize_t stride_1[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10}; + hsize_t count_1[SS_DR_MAX_RANK] = {1, 1, 1, 1, 1}; + hsize_t block_1[SS_DR_MAX_RANK] = {2, 2, 2, 3, 2}; + + hsize_t start_2[SS_DR_MAX_RANK] = {2, 2, 5, 2, 2}; + hsize_t stride_2[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10}; + hsize_t count_2[SS_DR_MAX_RANK] = {1, 1, 1, 1, 1}; + hsize_t block_2[SS_DR_MAX_RANK] = {2, 2, 3, 2, 2}; + + hsize_t start_3[SS_DR_MAX_RANK] = {2, 5, 2, 2, 2}; + hsize_t stride_3[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10}; + hsize_t count_3[SS_DR_MAX_RANK] = {1, 1, 1, 1, 1}; + hsize_t block_3[SS_DR_MAX_RANK] = {2, 3, 2, 2, 2}; + + hsize_t start_4[SS_DR_MAX_RANK] = {5, 2, 2, 2, 2}; + hsize_t stride_4[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10}; + hsize_t count_4[SS_DR_MAX_RANK] = {1, 1, 1, 1, 1}; + hsize_t block_4[SS_DR_MAX_RANK] = {3, 2, 2, 2, 2}; + + hsize_t clip_start[SS_DR_MAX_RANK] = {0, 0, 0, 0, 0}; + hsize_t clip_stride[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10}; + hsize_t clip_count[SS_DR_MAX_RANK] = {1, 1, 1, 1, 1}; + hsize_t clip_block[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10}; + + hsize_t *(starts[SS_DR_MAX_RANK]) = {start_0, start_1, start_2, start_3, start_4}; + hsize_t *(strides[SS_DR_MAX_RANK]) = {stride_0, stride_1, stride_2, stride_3, stride_4}; + hsize_t *(counts[SS_DR_MAX_RANK]) = {count_0, count_1, count_2, count_3, count_4}; + hsize_t *(blocks[SS_DR_MAX_RANK]) = {block_0, block_1, block_2, block_3, block_4}; + + hsize_t start[SS_DR_MAX_RANK]; + hsize_t *start_ptr; + hsize_t stride[SS_DR_MAX_RANK]; + hsize_t *stride_ptr; + hsize_t count[SS_DR_MAX_RANK]; + hsize_t *count_ptr; + hsize_t block[SS_DR_MAX_RANK]; + hsize_t *block_ptr; + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ + + HDassert(0 < small_rank); + HDassert(small_rank <= large_rank); + HDassert(large_rank <= SS_DR_MAX_RANK); + HDassert(9 <= edge_size); + HDassert(edge_size <= 1000); + HDassert(0 <= slice_offset); + HDassert(slice_offset < edge_size); + HDassert(-2 <= pattern_offset); + HDassert(pattern_offset <= 2); + + for (i = SS_DR_MAX_RANK - large_rank; i < SS_DR_MAX_RANK; i++) + if (dim_selected[i] == TRUE) + dims_selected++; + + HDassert(dims_selected >= 0); + HDassert(dims_selected <= large_rank); + + HDsnprintf(test_desc_0, sizeof(test_desc_0), + "\tirregular sub set of n-cube slice through m-cube (n <= m) test %d.\n", test_num); + MESSAGE(7, ("%s", test_desc_0)); + + /* This statement must be updated if SS_DR_MAX_RANK is changed */ + HDsnprintf(test_desc_1, sizeof(test_desc_1), + "\tranks: %d/%d edge: %d s/p offset: %d/%d dim_selected: %d/%d/%d/%d/%d:%d.\n", small_rank, + large_rank, edge_size, slice_offset, pattern_offset, (int)dim_selected[0], + (int)dim_selected[1], (int)dim_selected[2], (int)dim_selected[3], (int)dim_selected[4], + dims_selected); + MESSAGE(7, ("%s", test_desc_1)); + + /* copy the edge size into the dims array */ + for (i = 0; i < SS_DR_MAX_RANK; i++) + dims[i] = (hsize_t)edge_size; + + /* Create the small n-cube */ + n_cube_0_sid = H5Screate_simple(small_rank, dims, NULL); + CHECK(n_cube_0_sid, FAIL, "H5Screate_simple"); + + /* Select an "irregular" pattern in the small n-cube. This + * pattern can be though of a set of four 3 x 2 x 2 X 2 + * four dimensional prisims, each parallel to one of the + * axies and none of them intersecting with the other. + * + * In the lesser dimensional cases, this 4D pattern is + * projected onto the lower dimensional space. + * + * In the 1-D case, the projection of the pattern looks + * like this: + * + * - - * * - * * * - - + * 0 1 2 3 4 5 6 7 8 9 x + * + * and in the 2-D case, it would look like this: + * + * + * y + * 9 - - - - - - - - - - + * 8 - - - - - - - - - - + * 7 - - * * - - - - - - + * 6 - - * * - - - - - - + * 5 - - * * - - - - - - + * 4 - - - - - - - - - - + * 3 - - * * - * * * - - + * 2 - - * * - * * * - - + * 1 - - - - - - - - - - + * 0 - - - - - - - - - - + * 0 1 2 3 4 5 6 7 8 9 x + * + * In both cases, asterisks indicate selected elements, + * and dashes indicate unselected elements. + * + * Note that is this case, since the edge size is fixed, + * the pattern does not change. However, we do use the + * displacement parameter to allow it to be moved around + * within the n-cube or hyperslab. + */ + + /* first, ensure that the small n-cube has no selection */ + ret = H5Sselect_none(n_cube_0_sid); + CHECK(ret, FAIL, "H5Sselect_none"); + + /* now, select the irregular pattern */ + for (i = 0; i < SS_DR_MAX_RANK; i++) { + ret = H5Sselect_hyperslab(n_cube_0_sid, H5S_SELECT_OR, starts[i], strides[i], counts[i], blocks[i]); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + } /* end for */ + + /* finally, clip the selection to ensure that it lies fully + * within the n-cube. + */ + ret = H5Sselect_hyperslab(n_cube_0_sid, H5S_SELECT_AND, clip_start, clip_stride, clip_count, clip_block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create the large n-cube */ + n_cube_1_sid = H5Screate_simple(large_rank, dims, NULL); + CHECK(n_cube_1_sid, FAIL, "H5Screate_simple"); + + /* Ensure that the large n-cube has no selection */ + H5Sselect_none(n_cube_1_sid); + CHECK(ret, FAIL, "H5Sselect_none"); + + /* Since large rank may be less than SS_DR_MAX_RANK, we may not + * use the entire start, stride, count, and block arrays. This + * is a problem, since it is inconvenient to set up the dim_selected + * array to reflect the large rank, and thus if large_rank < + * SS_DR_MAX_RANK, we need to hide the lower index entries + * from H5Sselect_hyperslab(). + * + * Do this by setting up pointers to the first valid entry in start, + * stride, count, and block below, and pass these pointers in + * to H5Sselect_hyperslab() instead of the array base addresses. + */ + + i = SS_DR_MAX_RANK - large_rank; + HDassert(i >= 0); + + start_ptr = &(start[i]); + stride_ptr = &(stride[i]); + count_ptr = &(count[i]); + block_ptr = &(block[i]); + + /* Now select the irregular selection in the (possibly larger) n-cube. + * + * Basic idea is to project the pattern used in the smaller n-cube + * onto the dimensions selected in the larger n-cube, with the displacement + * specified. + */ + for (i = 0; i < SS_DR_MAX_RANK; i++) { + j = 0; + for (k = 0; k < SS_DR_MAX_RANK; k++) { + if (dim_selected[k]) { + start[k] = (starts[i])[j] + (hsize_t)pattern_offset; + stride[k] = (strides[i])[j]; + count[k] = (counts[i])[j]; + block[k] = (blocks[i])[j]; + j++; + } /* end if */ + else { + start[k] = (hsize_t)slice_offset; + stride[k] = (hsize_t)(2 * edge_size); + count[k] = 1; + block[k] = 1; + } /* end else */ + } /* end for */ + + /* select the hyperslab */ + ret = H5Sselect_hyperslab(n_cube_1_sid, H5S_SELECT_OR, start_ptr, stride_ptr, count_ptr, block_ptr); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + } /* end for */ + + /* it is possible that the selection extends beyond the dataspace. + * clip the selection to ensure that it doesn't. + */ + ret = H5Sselect_hyperslab(n_cube_1_sid, H5S_SELECT_AND, clip_start, clip_stride, clip_count, clip_block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* setup is done -- run the test: */ + check = H5Sselect_shape_same(n_cube_0_sid, n_cube_1_sid); + VERIFY(check, expected_result, "H5Sselect_shape_same"); + + /* Close dataspaces */ + ret = H5Sclose(n_cube_0_sid); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(n_cube_1_sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_shape_same_dr__irregular() */ + +/**************************************************************** +** +** test_shape_same_dr__run_irregular_tests(): +** +** In this set of tests, we test H5Sselect_shape_same() +** with an "irregular" subselection of 1, 2, 3, and 4 cubes as +** one parameter, and irregular subselections of 1, 2, 3, +** and 4 dimensional slices through a n-cube of rank no more +** than 5 (and at least the rank of the slice) as the other. +** Note that the "irregular" selection may be offset between +** the n-cube and the slice. +** +** All the irregular selections will be identical (modulo rank) +** so H5Sselect_shape_same() should return true iff: +** +** 1) the rank of the n cube equals the number of dimensions +** selected in the irregular slice through the m-cube +** (m >= n). +** +** 2) The dimensions selected in the irregular slice +** through the m-cube are the dimensions with the most +** quickly changing indices. +** +****************************************************************/ +static void +test_shape_same_dr__run_irregular_tests(void) +{ + hbool_t dim_selected[5]; + hbool_t expected_result; + int i, j; + int v, w, x, y, z; + int test_num = 0; + int small_rank; + int large_rank; + + for (large_rank = 1; large_rank <= 5; large_rank++) { + for (small_rank = 1; small_rank <= large_rank; small_rank++) { + v = 0; + do { + if (v == 0) + dim_selected[0] = FALSE; + else + dim_selected[0] = TRUE; + + w = 0; + do { + if (w == 0) + dim_selected[1] = FALSE; + else + dim_selected[1] = TRUE; + + x = 0; + do { + if (x == 0) + dim_selected[2] = FALSE; + else + dim_selected[2] = TRUE; + + y = 0; + do { + if (y == 0) + dim_selected[3] = FALSE; + else + dim_selected[3] = TRUE; + + z = 0; + do { + if (z == 0) + dim_selected[4] = FALSE; + else + dim_selected[4] = TRUE; + + /* compute the expected result: */ + i = 0; + j = 4; + expected_result = TRUE; + while ((i < small_rank) && expected_result) { + if (!dim_selected[j]) + expected_result = FALSE; + i++; + j--; + } /* end while */ + + while ((i < large_rank) && expected_result) { + if (dim_selected[j]) + expected_result = FALSE; + i++; + j--; + } /* end while */ + + /* everything is set up -- run the tests */ + + test_shape_same_dr__irregular(test_num++, small_rank, large_rank, + /* pattern_offset */ -2, + /* slice_offset */ 0, dim_selected, + expected_result); + + test_shape_same_dr__irregular(test_num++, small_rank, large_rank, + /* pattern_offset */ -2, + /* slice_offset */ 4, dim_selected, + expected_result); + + test_shape_same_dr__irregular(test_num++, small_rank, large_rank, + /* pattern_offset */ -2, + /* slice_offset */ 9, dim_selected, + expected_result); + + test_shape_same_dr__irregular(test_num++, small_rank, large_rank, + /* pattern_offset */ 0, + /* slice_offset */ 0, dim_selected, + expected_result); + + test_shape_same_dr__irregular(test_num++, small_rank, large_rank, + /* pattern_offset */ 0, + /* slice_offset */ 6, dim_selected, + expected_result); + + test_shape_same_dr__irregular(test_num++, small_rank, large_rank, + /* pattern_offset */ 0, + /* slice_offset */ 9, dim_selected, + expected_result); + + test_shape_same_dr__irregular(test_num++, small_rank, large_rank, + /* pattern_offset */ 2, + /* slice_offset */ 0, dim_selected, + expected_result); + + test_shape_same_dr__irregular(test_num++, small_rank, large_rank, + /* pattern_offset */ 2, + /* slice_offset */ 5, dim_selected, + expected_result); + + test_shape_same_dr__irregular(test_num++, small_rank, large_rank, + /* pattern_offset */ 2, + /* slice_offset */ 9, dim_selected, + expected_result); + + z++; + } while ((z < 2) && (large_rank >= 1)); + + y++; + } while ((y < 2) && (large_rank >= 2)); + + x++; + } while ((x < 2) && (large_rank >= 3)); + + w++; + } while ((w < 2) && (large_rank >= 4)); + + v++; + } while ((v < 2) && (large_rank >= 5)); + } /* end for */ + } /* end for */ +} /* test_shape_same_dr__run_irregular_tests() */ + +/**************************************************************** +** +** test_shape_same_dr(): Tests selections on dataspace with +** different ranks, to verify that "shape same" routine +** is now handling this case correctly. +** +****************************************************************/ +static void +test_shape_same_dr(void) +{ + /* Output message about test being performed */ + MESSAGE(6, ("Testing Same Shape/Different Rank Comparisons\n")); + + /* first run some smoke checks */ + test_shape_same_dr__smoke_check_1(); + test_shape_same_dr__smoke_check_2(); + test_shape_same_dr__smoke_check_3(); + test_shape_same_dr__smoke_check_4(); + + /* now run more intensive tests. */ + test_shape_same_dr__run_full_space_vs_slice_tests(); + test_shape_same_dr__run_checkerboard_tests(); + test_shape_same_dr__run_irregular_tests(); +} /* test_shape_same_dr() */ + +/**************************************************************** +** +** test_space_rebuild(): Tests selection rebuild routine, +** We will test whether selection in span-tree form can be rebuilt +** into a regular selection. +** +** +****************************************************************/ +static void +test_space_rebuild(void) +{ + /* regular space IDs in span-tree form */ + hid_t sid_reg1, sid_reg2, sid_reg3, sid_reg4, sid_reg5; + + /* Original regular Space IDs */ + hid_t sid_reg_ori1, sid_reg_ori2, sid_reg_ori3, sid_reg_ori4, sid_reg_ori5; + + /* Irregular space IDs */ + hid_t sid_irreg1, sid_irreg2, sid_irreg3, sid_irreg4, sid_irreg5; + + /* rebuild status state */ +#if 0 + H5S_diminfo_valid_t rebuild_stat1, rebuild_stat2; + htri_t rebuild_check; +#endif + herr_t ret; + + /* dimensions of rank 1 to rank 5 */ + hsize_t dims1[] = {SPACERE1_DIM0}; + hsize_t dims2[] = {SPACERE2_DIM0, SPACERE2_DIM1}; + hsize_t dims3[] = {SPACERE3_DIM0, SPACERE3_DIM1, SPACERE3_DIM2}; + hsize_t dims4[] = {SPACERE4_DIM0, SPACERE4_DIM1, SPACERE4_DIM2, SPACERE4_DIM3}; + hsize_t dims5[] = {SPACERE5_DIM0, SPACERE5_DIM1, SPACERE5_DIM2, SPACERE5_DIM3, SPACERE5_DIM4}; + + /* The start of the hyperslab */ + hsize_t start1[SPACERE1_RANK], start2[SPACERE2_RANK], start3[SPACERE3_RANK], start4[SPACERE4_RANK], + start5[SPACERE5_RANK]; + + /* The stride of the hyperslab */ + hsize_t stride1[SPACERE1_RANK], stride2[SPACERE2_RANK], stride3[SPACERE3_RANK], stride4[SPACERE4_RANK], + stride5[SPACERE5_RANK]; + + /* The number of blocks for the hyperslab */ + hsize_t count1[SPACERE1_RANK], count2[SPACERE2_RANK], count3[SPACERE3_RANK], count4[SPACERE4_RANK], + count5[SPACERE5_RANK]; + + /* The size of each block for the hyperslab */ + hsize_t block1[SPACERE1_RANK], block2[SPACERE2_RANK], block3[SPACERE3_RANK], block4[SPACERE4_RANK], + block5[SPACERE5_RANK]; + + /* Declarations for special test of rebuild */ + hid_t sid_spec; + + /* Output message about test being performed */ + MESSAGE(6, ("Testing functionality to rebuild regular hyperslab selection\n")); + + MESSAGE(7, ("Testing functionality to rebuild 1-D hyperslab selection\n")); + + /* Create 1-D dataspace */ + sid_reg1 = H5Screate_simple(SPACERE1_RANK, dims1, NULL); + sid_reg_ori1 = H5Screate_simple(SPACERE1_RANK, dims1, NULL); + + /* Build up the original one dimensional regular selection */ + start1[0] = 1; + count1[0] = 3; + stride1[0] = 5; + block1[0] = 4; + ret = H5Sselect_hyperslab(sid_reg_ori1, H5S_SELECT_SET, start1, stride1, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Build up one dimensional regular selection with H5_SELECT_OR, + inside HDF5, it will be treated as an irregular selection. */ + + start1[0] = 1; + count1[0] = 2; + stride1[0] = 5; + block1[0] = 4; + ret = H5Sselect_hyperslab(sid_reg1, H5S_SELECT_SET, start1, stride1, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start1[0] = 11; + count1[0] = 1; + stride1[0] = 5; + block1[0] = 4; + ret = H5Sselect_hyperslab(sid_reg1, H5S_SELECT_OR, start1, stride1, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + ret = H5S__get_rebuild_status_test(sid_reg1, &rebuild_stat1, &rebuild_stat2); + CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); + /* In this case, rebuild_stat1 and rebuild_stat2 should be + * H5S_DIMINFO_VALID_YES. */ + if (rebuild_stat1 != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } + if (rebuild_stat2 != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } + if (ret != FAIL) { + /* In this case, rebuild_check should be TRUE. */ + rebuild_check = H5Sselect_shape_same(sid_reg1, sid_reg_ori1); + CHECK(rebuild_check, FALSE, "H5Sselect_shape_same"); + } +#endif + /* For irregular hyperslab */ + sid_irreg1 = H5Screate_simple(SPACERE1_RANK, dims1, NULL); + + /* Build up one dimensional irregular selection with H5_SELECT_OR */ + start1[0] = 1; + count1[0] = 2; + stride1[0] = 5; + block1[0] = 4; + ret = H5Sselect_hyperslab(sid_irreg1, H5S_SELECT_SET, start1, stride1, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start1[0] = 12; /* Just one position switch */ + count1[0] = 1; + stride1[0] = 5; + block1[0] = 4; + ret = H5Sselect_hyperslab(sid_irreg1, H5S_SELECT_OR, start1, stride1, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + ret = H5S__get_rebuild_status_test(sid_irreg1, &rebuild_stat1, &rebuild_stat2); + CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); + /* In this case, rebuild_stat1 should be H5S_DIMINFO_VALID_NO and + * rebuild_stat2 should be H5S_DIMINFO_VALID_IMPOSSIBLE. */ + if (rebuild_stat1 != H5S_DIMINFO_VALID_NO) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } + if (rebuild_stat2 != H5S_DIMINFO_VALID_IMPOSSIBLE) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } + /* No need to do shape comparison */ +#endif + + MESSAGE(7, ("Testing functionality to rebuild 2-D hyperslab selection\n")); + /* Create 2-D dataspace */ + sid_reg2 = H5Screate_simple(SPACERE2_RANK, dims2, NULL); + sid_reg_ori2 = H5Screate_simple(SPACERE2_RANK, dims2, NULL); + + /* Build up the original two dimensional regular selection */ + start2[0] = 2; + count2[0] = 2; + stride2[0] = 7; + block2[0] = 5; + start2[1] = 1; + count2[1] = 3; + stride2[1] = 3; + block2[1] = 2; + + ret = H5Sselect_hyperslab(sid_reg_ori2, H5S_SELECT_SET, start2, stride2, count2, block2); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Build up two dimensional regular selection with H5_SELECT_OR, inside HDF5, + it will be treated as an irregular selection. */ + + start2[1] = 1; + count2[1] = 2; + stride2[1] = 3; + block2[1] = 2; + + ret = H5Sselect_hyperslab(sid_reg2, H5S_SELECT_SET, start2, stride2, count2, block2); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start2[1] = 7; /* 7 = start(1) + count(2) * stride(3) */ + count2[1] = 1; + stride2[1] = 3; + block2[1] = 2; + + ret = H5Sselect_hyperslab(sid_reg2, H5S_SELECT_OR, start2, stride2, count2, block2); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + ret = H5S__get_rebuild_status_test(sid_reg2, &rebuild_stat1, &rebuild_stat2); + CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); + /* In this case, rebuild_stat1 and rebuild_stat2 should be + * H5S_DIMINFO_VALID_YES. */ + if (rebuild_stat1 != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } + if (rebuild_stat2 != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } /* end if */ + if (ret != FAIL) { + /* In this case, rebuild_check should be TRUE. */ + rebuild_check = H5Sselect_shape_same(sid_reg2, sid_reg_ori2); + CHECK(rebuild_check, FALSE, "H5Sselect_shape_same"); + } +#endif + /* 2-D irregular case */ + sid_irreg2 = H5Screate_simple(SPACERE2_RANK, dims2, NULL); + /* Build up two dimensional irregular selection with H5_SELECT_OR */ + + start2[0] = 2; + count2[0] = 2; + stride2[0] = 7; + block2[0] = 5; + start2[1] = 1; + count2[1] = 1; + stride2[1] = 3; + block2[1] = 2; + ret = H5Sselect_hyperslab(sid_irreg2, H5S_SELECT_SET, start2, stride2, count2, block2); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start2[1] = 4; + count2[1] = 2; + stride2[1] = 4; + block2[1] = 3; /* Just add one element for the block */ + + ret = H5Sselect_hyperslab(sid_irreg2, H5S_SELECT_OR, start2, stride2, count2, block2); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + ret = H5S__get_rebuild_status_test(sid_irreg2, &rebuild_stat1, &rebuild_stat2); + CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); + /* In this case, rebuild_stat1 should be H5S_DIMINFO_VALID_NO and + * rebuild_stat2 should be H5S_DIMINFO_VALID_IMPOSSIBLE. */ + if (rebuild_stat1 != H5S_DIMINFO_VALID_NO) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } + if (rebuild_stat2 != H5S_DIMINFO_VALID_IMPOSSIBLE) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } + /* No need to do shape comparison */ +#endif + + MESSAGE(7, ("Testing functionality to rebuild 3-D hyperslab selection\n")); + + /* Create 3-D dataspace */ + sid_reg3 = H5Screate_simple(SPACERE3_RANK, dims3, NULL); + sid_reg_ori3 = H5Screate_simple(SPACERE3_RANK, dims3, NULL); + + /* Build up the original three dimensional regular selection */ + start3[0] = 2; + count3[0] = 2; + stride3[0] = 3; + block3[0] = 2; + start3[1] = 1; + count3[1] = 3; + stride3[1] = 3; + block3[1] = 2; + + start3[2] = 1; + count3[2] = 2; + stride3[2] = 4; + block3[2] = 2; + + ret = H5Sselect_hyperslab(sid_reg_ori3, H5S_SELECT_SET, start3, stride3, count3, block3); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Build up three dimensional regular selection with H5_SELECT_OR, inside HDF5, + it will be treated as an irregular selection. */ + start3[2] = 1; + count3[2] = 1; + stride3[2] = 4; + block3[2] = 2; + + ret = H5Sselect_hyperslab(sid_reg3, H5S_SELECT_SET, start3, stride3, count3, block3); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start3[2] = 5; + count3[2] = 1; + stride3[2] = 4; + block3[2] = 2; + + ret = H5Sselect_hyperslab(sid_reg3, H5S_SELECT_OR, start3, stride3, count3, block3); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + ret = H5S__get_rebuild_status_test(sid_reg3, &rebuild_stat1, &rebuild_stat2); + CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); + /* In this case, rebuild_stat1 and rebuild_stat2 should be + * H5S_DIMINFO_VALID_YES. */ + if (rebuild_stat1 != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } + if (rebuild_stat2 != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } + if (ret != FAIL) { + /* In this case, rebuild_check should be TRUE. */ + rebuild_check = H5Sselect_shape_same(sid_reg3, sid_reg_ori3); + CHECK(rebuild_check, FALSE, "H5Sselect_shape_same"); + } +#endif + + sid_irreg3 = H5Screate_simple(SPACERE3_RANK, dims3, NULL); + + /* Build up three dimensional irregular selection with H5_SELECT_OR */ + start3[0] = 2; + count3[0] = 2; + stride3[0] = 3; + block3[0] = 2; + start3[1] = 1; + count3[1] = 3; + stride3[1] = 3; + block3[1] = 2; + + start3[2] = 1; + count3[2] = 2; + stride3[2] = 2; + block3[2] = 1; + + ret = H5Sselect_hyperslab(sid_irreg3, H5S_SELECT_SET, start3, stride3, count3, block3); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start3[2] = 3; + count3[2] = 2; + stride3[2] = 3; /* Just add one element for the stride */ + block3[2] = 1; + + ret = H5Sselect_hyperslab(sid_irreg3, H5S_SELECT_OR, start3, stride3, count3, block3); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + ret = H5S__get_rebuild_status_test(sid_irreg3, &rebuild_stat1, &rebuild_stat2); + CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); + /* In this case, rebuild_stat1 should be H5S_DIMINFO_VALID_NO and + * rebuild_stat2 should be H5S_DIMINFO_VALID_IMPOSSIBLE. */ + if (rebuild_stat1 != H5S_DIMINFO_VALID_NO) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } + if (rebuild_stat2 != H5S_DIMINFO_VALID_IMPOSSIBLE) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } + /* No need to do shape comparison */ +#endif + + MESSAGE(7, ("Testing functionality to rebuild 4-D hyperslab selection\n")); + + /* Create 4-D dataspace */ + sid_reg4 = H5Screate_simple(SPACERE4_RANK, dims4, NULL); + sid_reg_ori4 = H5Screate_simple(SPACERE4_RANK, dims4, NULL); + + /* Build up the original four dimensional regular selection */ + start4[0] = 2; + count4[0] = 2; + stride4[0] = 3; + block4[0] = 2; + + start4[1] = 1; + count4[1] = 3; + stride4[1] = 3; + block4[1] = 2; + + start4[2] = 1; + count4[2] = 2; + stride4[2] = 4; + block4[2] = 2; + + start4[3] = 1; + count4[3] = 2; + stride4[3] = 4; + block4[3] = 2; + + ret = H5Sselect_hyperslab(sid_reg_ori4, H5S_SELECT_SET, start4, stride4, count4, block4); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Build up four dimensional regular selection with H5_SELECT_OR, inside HDF5, + it will be treated as an irregular selection. */ + start4[3] = 1; + count4[3] = 1; + stride4[3] = 4; + block4[3] = 2; + + ret = H5Sselect_hyperslab(sid_reg4, H5S_SELECT_SET, start4, stride4, count4, block4); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start4[3] = 5; + count4[3] = 1; + stride4[3] = 4; + block4[3] = 2; + + ret = H5Sselect_hyperslab(sid_reg4, H5S_SELECT_OR, start4, stride4, count4, block4); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + +#if 0 + ret = H5S__get_rebuild_status_test(sid_reg4, &rebuild_stat1, &rebuild_stat2); + CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); + /* In this case, rebuild_stat1 and rebuild_stat2 should be + * H5S_DIMINFO_VALID_YES. */ + if (rebuild_stat1 != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } + if (rebuild_stat2 != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } + if (ret != FAIL) { + /* In this case, rebuild_check should be TRUE. */ + rebuild_check = H5Sselect_shape_same(sid_reg4, sid_reg_ori4); + CHECK(rebuild_check, FALSE, "H5Sselect_shape_same"); + } +#endif + + /* Testing irregular selection */ + sid_irreg4 = H5Screate_simple(SPACERE4_RANK, dims4, NULL); + + /* Build up four dimensional irregular selection with H5_SELECT_OR */ + start4[0] = 2; + count4[0] = 2; + stride4[0] = 3; + block4[0] = 2; + start4[1] = 1; + count4[1] = 3; + stride4[1] = 3; + block4[1] = 2; + + start4[2] = 1; + count4[2] = 1; + stride4[2] = 4; + block4[2] = 2; + + start4[3] = 1; + count4[3] = 2; + stride4[3] = 4; + block4[3] = 2; /* sub-block is one element difference */ + + ret = H5Sselect_hyperslab(sid_irreg4, H5S_SELECT_SET, start4, stride4, count4, block4); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start4[2] = 5; + count4[2] = 1; + stride4[2] = 4; + block4[2] = 2; + + start4[3] = 1; + count4[3] = 2; + stride4[3] = 4; + block4[3] = 3; /* sub-block is one element difference */ + + ret = H5Sselect_hyperslab(sid_irreg4, H5S_SELECT_OR, start4, stride4, count4, block4); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + ret = H5S__get_rebuild_status_test(sid_irreg4, &rebuild_stat1, &rebuild_stat2); + CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); + /* In this case, rebuild_stat1 should be H5S_DIMINFO_VALID_NO and + * rebuild_stat2 should be H5S_DIMINFO_VALID_IMPOSSIBLE. */ + if (rebuild_stat1 != H5S_DIMINFO_VALID_NO) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } + if (rebuild_stat2 != H5S_DIMINFO_VALID_IMPOSSIBLE) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } + /* No need to do shape comparison */ +#endif + + MESSAGE(7, ("Testing functionality to rebuild 5-D hyperslab selection\n")); + + /* Create 5-D dataspace */ + sid_reg5 = H5Screate_simple(SPACERE5_RANK, dims5, NULL); + sid_reg_ori5 = H5Screate_simple(SPACERE5_RANK, dims5, NULL); + + /* Build up the original five dimensional regular selection */ + start5[0] = 2; + count5[0] = 2; + stride5[0] = 3; + block5[0] = 2; + + start5[1] = 1; + count5[1] = 3; + stride5[1] = 3; + block5[1] = 2; + + start5[2] = 1; + count5[2] = 2; + stride5[2] = 4; + block5[2] = 2; + + start5[3] = 1; + count5[3] = 2; + stride5[3] = 4; + block5[3] = 2; + + start5[4] = 1; + count5[4] = 2; + stride5[4] = 4; + block5[4] = 2; + + ret = H5Sselect_hyperslab(sid_reg_ori5, H5S_SELECT_SET, start5, stride5, count5, block5); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Build up five dimensional regular selection with H5_SELECT_OR, inside HDF5, + it will be treated as an irregular selection. */ + start5[4] = 1; + count5[4] = 1; + stride5[4] = 4; + block5[4] = 2; + + ret = H5Sselect_hyperslab(sid_reg5, H5S_SELECT_SET, start5, stride5, count5, block5); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start5[4] = 5; + count5[4] = 1; + stride5[4] = 4; + block5[4] = 2; + + ret = H5Sselect_hyperslab(sid_reg5, H5S_SELECT_OR, start5, stride5, count5, block5); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + +#if 0 + ret = H5S__get_rebuild_status_test(sid_reg5, &rebuild_stat1, &rebuild_stat2); + CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); + /* In this case, rebuild_stat1 and rebuild_stat2 should be + * H5S_DIMINFO_VALID_YES. */ + if (rebuild_stat1 != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } + if (rebuild_stat2 != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } + if (ret != FAIL) { + /* In this case, rebuild_check should be TRUE. */ + rebuild_check = H5Sselect_shape_same(sid_reg5, sid_reg_ori5); + CHECK(rebuild_check, FALSE, "H5Sselect_shape_same"); + } +#endif + + sid_irreg5 = H5Screate_simple(SPACERE5_RANK, dims5, NULL); + + /* Build up five dimensional irregular selection with H5_SELECT_OR */ + start5[0] = 2; + count5[0] = 2; + stride5[0] = 3; + block5[0] = 2; + + start5[1] = 1; + count5[1] = 3; + stride5[1] = 3; + block5[1] = 2; + + start5[2] = 1; + count5[2] = 2; + stride5[2] = 4; + block5[2] = 2; + + start5[3] = 1; + count5[3] = 1; + stride5[3] = 4; + block5[3] = 2; + + start5[4] = 2; /* One element difference */ + count5[4] = 1; + stride5[4] = 4; + block5[4] = 2; + + ret = H5Sselect_hyperslab(sid_irreg5, H5S_SELECT_SET, start5, stride5, count5, block5); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start5[3] = 5; + count5[3] = 1; + stride5[3] = 4; + block5[3] = 2; + + start5[4] = 1; /* One element difference */ + count5[4] = 2; + stride5[4] = 4; + block5[4] = 2; + + ret = H5Sselect_hyperslab(sid_irreg5, H5S_SELECT_OR, start5, stride5, count5, block5); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + ret = H5S__get_rebuild_status_test(sid_irreg5, &rebuild_stat1, &rebuild_stat2); + CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); + /* In this case, rebuild_stat1 should be H5S_DIMINFO_VALID_NO and + * rebuild_stat2 should be H5S_DIMINFO_VALID_IMPOSSIBLE. */ + if (rebuild_stat1 != H5S_DIMINFO_VALID_NO) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } + if (rebuild_stat2 != H5S_DIMINFO_VALID_IMPOSSIBLE) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } + /* No need to do shape comparison */ +#endif + + /* We use 5-D to test a special case with + rebuilding routine TRUE, FALSE and TRUE */ + sid_spec = H5Screate_simple(SPACERE5_RANK, dims5, NULL); + + /* Build up the original five dimensional regular selection */ + start5[0] = 2; + count5[0] = 2; + stride5[0] = 3; + block5[0] = 2; + + start5[1] = 1; + count5[1] = 3; + stride5[1] = 3; + block5[1] = 2; + + start5[2] = 1; + count5[2] = 2; + stride5[2] = 4; + block5[2] = 2; + + start5[3] = 1; + count5[3] = 2; + stride5[3] = 4; + block5[3] = 2; + + start5[4] = 1; + count5[4] = 1; + stride5[4] = 4; + block5[4] = 2; + + ret = H5Sselect_hyperslab(sid_spec, H5S_SELECT_SET, start5, stride5, count5, block5); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + ret = H5S__get_rebuild_status_test(sid_spec, &rebuild_stat1, &rebuild_stat2); + CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); + /* In this case, rebuild_stat1 and rebuild_stat2 should both be + * H5S_DIMINFO_VALID_YES. */ + if (rebuild_stat1 != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } + if (rebuild_stat2 != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } + /* No need to do shape comparison */ +#endif + + /* Adding some selections to make it real irregular */ + start5[3] = 1; + count5[3] = 1; + stride5[3] = 4; + block5[3] = 2; + + start5[4] = 5; + count5[4] = 1; + stride5[4] = 4; + block5[4] = 2; + + ret = H5Sselect_hyperslab(sid_spec, H5S_SELECT_OR, start5, stride5, count5, block5); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + ret = H5S__get_rebuild_status_test(sid_spec, &rebuild_stat1, &rebuild_stat2); + CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); + /* In this case, rebuild_stat1 should be H5S_DIMINFO_VALID_NO and + * rebuild_stat2 should be H5S_DIMINFO_VALID_IMPOSSIBLE. */ + if (rebuild_stat1 != H5S_DIMINFO_VALID_NO) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } + if (rebuild_stat2 != H5S_DIMINFO_VALID_IMPOSSIBLE) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } + /* No need to do shape comparison */ +#endif + + /* Add more selections to make it regular again */ + start5[3] = 5; + count5[3] = 1; + stride5[3] = 4; + block5[3] = 2; + + start5[4] = 5; + count5[4] = 1; + stride5[4] = 4; + block5[4] = 2; + + ret = H5Sselect_hyperslab(sid_spec, H5S_SELECT_OR, start5, stride5, count5, block5); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + ret = H5S__get_rebuild_status_test(sid_spec, &rebuild_stat1, &rebuild_stat2); + CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); + /* In this case, rebuild_stat1 should be H5S_DIMINFO_VALID_NO and + * rebuild_stat2 should be H5S_DIMINFO_VALID_YES. */ + if (rebuild_stat1 != H5S_DIMINFO_VALID_NO) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } + if (rebuild_stat2 != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } + /* No need to do shape comparison */ +#endif + + H5Sclose(sid_reg1); + CHECK(ret, FAIL, "H5Sclose"); + H5Sclose(sid_irreg1); + CHECK(ret, FAIL, "H5Sclose"); + + H5Sclose(sid_reg2); + CHECK(ret, FAIL, "H5Sclose"); + H5Sclose(sid_irreg2); + CHECK(ret, FAIL, "H5Sclose"); + + H5Sclose(sid_reg3); + CHECK(ret, FAIL, "H5Sclose"); + H5Sclose(sid_irreg3); + CHECK(ret, FAIL, "H5Sclose"); + + H5Sclose(sid_reg4); + CHECK(ret, FAIL, "H5Sclose"); + H5Sclose(sid_irreg4); + CHECK(ret, FAIL, "H5Sclose"); + + H5Sclose(sid_reg5); + CHECK(ret, FAIL, "H5Sclose"); + H5Sclose(sid_irreg5); + CHECK(ret, FAIL, "H5Sclose"); + + H5Sclose(sid_spec); + CHECK(ret, FAIL, "H5Sclose"); +} + +/**************************************************************** +** +** test_space_update_diminfo(): Tests selection diminfo update +** routine. We will test whether regular selections can be +** quickly updated when the selection is modified. +** +** +****************************************************************/ +static void +test_space_update_diminfo(void) +{ + hid_t space_id; /* Dataspace id */ +#if 0 + H5S_diminfo_valid_t diminfo_valid; /* Diminfo status */ + H5S_diminfo_valid_t rebuild_status; /* Diminfo status after rebuid */ +#endif + H5S_sel_type sel_type; /* Selection type */ + herr_t ret; /* Return value */ + + /* dimensions of rank 1 to rank 5 */ + hsize_t dims1[] = {SPACEUD1_DIM0}; + hsize_t dims3[] = {SPACEUD3_DIM0, SPACEUD3_DIM1, SPACEUD3_DIM2}; + + /* The start of the hyperslab */ + hsize_t start1[1], start3[3]; + + /* The stride of the hyperslab */ + hsize_t stride1[1], stride3[3]; + + /* The number of blocks for the hyperslab */ + hsize_t count1[1], count3[3]; + + /* The size of each block for the hyperslab */ + hsize_t block1[1], block3[3]; + + /* Output message about test being performed */ + MESSAGE(6, ("Testing functionality to update hyperslab dimension info\n")); + + MESSAGE(7, ("Testing functionality to update 1-D hyperslab dimension info\n")); + + /* + * Test adding regularly spaced distinct blocks + */ + + /* Create 1-D dataspace */ + space_id = H5Screate_simple(1, dims1, NULL); + + /* Create single block */ + start1[0] = 3; + count1[0] = 1; + block1[0] = 2; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, NULL, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Add block after first, with OR */ + start1[0] = 6; + count1[0] = 1; + block1[0] = 2; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Add block before first, this time with XOR */ + start1[0] = 0; + count1[0] = 1; + block1[0] = 2; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_XOR, start1, NULL, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Add two blocks after current block */ + start1[0] = 9; + stride1[0] = 3; + count1[0] = 2; + block1[0] = 2; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, stride1, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Add two blocks overlapping current block, with OR */ + start1[0] = 9; + stride1[0] = 3; + count1[0] = 2; + block1[0] = 2; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, stride1, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Add two blocks partially overlapping current block, with OR */ + start1[0] = 12; + stride1[0] = 3; + count1[0] = 2; + block1[0] = 2; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, stride1, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Add two blocks partially overlapping current block, with XOR */ + start1[0] = 15; + stride1[0] = 3; + count1[0] = 2; + block1[0] = 2; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_XOR, start1, stride1, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be NO, after rebuild it should be IMPOSSIBLE */ + ret = H5S__get_rebuild_status_test(space_id, &diminfo_valid, &rebuild_status); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_NO) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ + if (rebuild_status != H5S_DIMINFO_VALID_IMPOSSIBLE) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } /* end if */ +#endif + + /* Fill in missing block */ + start1[0] = 15; + count1[0] = 1; + block1[0] = 2; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_XOR, start1, NULL, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be NO, after rebuild it should be YES */ + ret = H5S__get_rebuild_status_test(space_id, &diminfo_valid, &rebuild_status); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_NO) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ + if (rebuild_status != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } /* end if */ +#endif + /* + * Test adding contiguous blocks + */ + + /* Create single block */ + start1[0] = 3; + count1[0] = 1; + block1[0] = 2; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, NULL, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Add block immediately after first, with OR */ + start1[0] = 5; + count1[0] = 1; + block1[0] = 2; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Add block immediately before first, with XOR */ + start1[0] = 1; + count1[0] = 1; + block1[0] = 2; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Add differently size block immediately after current, with OR */ + start1[0] = 7; + count1[0] = 1; + block1[0] = 7; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* + * Test adding overlapping blocks + */ + + /* Create single block */ + start1[0] = 3; + count1[0] = 1; + block1[0] = 2; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, NULL, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Add block completely overlapping first, with OR */ + start1[0] = 3; + count1[0] = 1; + block1[0] = 2; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Add block partially overlapping first, with OR */ + start1[0] = 4; + count1[0] = 1; + block1[0] = 2; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Add block completely enclosing current, with OR */ + start1[0] = 2; + count1[0] = 1; + block1[0] = 5; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Add block completely enclosed by current, with OR */ + start1[0] = 3; + count1[0] = 1; + block1[0] = 2; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Add equally sized block partially overlapping current, with XOR */ + start1[0] = 3; + count1[0] = 1; + block1[0] = 5; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_XOR, start1, NULL, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Fill in hole in block */ + start1[0] = 3; + count1[0] = 1; + block1[0] = 4; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be NO, after rebuild it should be YES */ + ret = H5S__get_rebuild_status_test(space_id, &diminfo_valid, &rebuild_status); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_NO) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ + if (rebuild_status != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } /* end if */ +#endif + + /* Add differently sized block partially overlapping current, with XOR */ + start1[0] = 4; + count1[0] = 1; + block1[0] = 5; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_XOR, start1, NULL, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be NO */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_NO) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Fill in hole in block */ + start1[0] = 4; + count1[0] = 1; + block1[0] = 4; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be NO, after rebuild it should be YES */ + ret = H5S__get_rebuild_status_test(space_id, &diminfo_valid, &rebuild_status); + CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_NO) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ + if (rebuild_status != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_rebuild"); + } /* end if */ +#endif + + /* Add block completely overlapping current, with XOR */ + start1[0] = 2; + count1[0] = 1; + block1[0] = 7; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_XOR, start1, NULL, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + sel_type = H5Sget_select_type(space_id); + VERIFY(sel_type, H5S_SEL_NONE, "H5Sget_select_type"); + + /* + * Test various conditions that break the fast algorithm + */ + + /* Create multiple blocks */ + start1[0] = 3; + stride1[0] = 3; + count1[0] = 2; + block1[0] = 2; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, stride1, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Create single block with start out of phase */ + start1[0] = 8; + count1[0] = 1; + block1[0] = 2; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be NO */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_NO) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Create multiple blocks */ + start1[0] = 3; + stride1[0] = 3; + count1[0] = 2; + block1[0] = 2; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, stride1, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Create multiple blocks with start out of phase */ + start1[0] = 8; + stride1[0] = 3; + count1[0] = 2; + block1[0] = 2; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, stride1, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be NO */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_NO) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Create multiple blocks */ + start1[0] = 3; + stride1[0] = 3; + count1[0] = 2; + block1[0] = 2; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, stride1, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Create multiple blocks with wrong stride */ + start1[0] = 9; + stride1[0] = 4; + count1[0] = 2; + block1[0] = 2; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, stride1, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be NO */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_NO) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Create single block */ + start1[0] = 3; + count1[0] = 1; + block1[0] = 2; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, NULL, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Create single block with wrong size */ + start1[0] = 6; + count1[0] = 1; + block1[0] = 1; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be NO */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_NO) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Create single block */ + start1[0] = 3; + count1[0] = 1; + block1[0] = 2; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, NULL, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Create multiple blocks with wrong size */ + start1[0] = 6; + stride1[0] = 3; + count1[0] = 2; + block1[0] = 1; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, stride1, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be NO */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_NO) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Create multiple blocks */ + start1[0] = 3; + stride1[0] = 3; + count1[0] = 2; + block1[0] = 2; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, stride1, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Create single block with wrong size */ + start1[0] = 9; + count1[0] = 1; + block1[0] = 1; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be NO */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_NO) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Create multiple blocks */ + start1[0] = 3; + stride1[0] = 3; + count1[0] = 2; + block1[0] = 2; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, stride1, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Create multiple blocks with wrong size */ + start1[0] = 9; + stride1[0] = 3; + count1[0] = 2; + block1[0] = 1; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, stride1, count1, block1); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be NO */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_NO) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + ret = H5Sclose(space_id); + CHECK(ret, FAIL, "H5Sclose"); + + MESSAGE(7, ("Testing functionality to update 3-D hyperslab dimension info\n")); + + /* Create 3-D dataspace */ + space_id = H5Screate_simple(3, dims3, NULL); + + /* Create multiple blocks */ + start3[0] = 0; + start3[1] = 1; + start3[2] = 2; + stride3[0] = 2; + stride3[1] = 3; + stride3[2] = 4; + count3[0] = 4; + count3[1] = 3; + count3[2] = 2; + block3[0] = 1; + block3[1] = 2; + block3[2] = 3; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start3, stride3, count3, block3); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Add blocks with same values in all dimensions */ + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start3, stride3, count3, block3); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Add blocks with same values in two dimensions */ + start3[0] = 8; + stride3[0] = 1; + count3[0] = 1; + block3[0] = 1; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start3, stride3, count3, block3); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Create multiple blocks */ + start3[0] = 0; + start3[1] = 1; + start3[2] = 2; + stride3[0] = 2; + stride3[1] = 3; + stride3[2] = 4; + count3[0] = 4; + count3[1] = 3; + count3[2] = 2; + block3[0] = 1; + block3[1] = 2; + block3[2] = 3; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start3, stride3, count3, block3); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Add blocks with same values in one dimension */ + start3[0] = 8; + start3[1] = 10; + stride3[0] = 1; + stride3[1] = 1; + count3[0] = 1; + count3[1] = 1; + block3[0] = 1; + block3[1] = 2; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start3, stride3, count3, block3); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be NO */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_NO) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Create multiple blocks */ + start3[0] = 0; + start3[1] = 1; + start3[2] = 2; + stride3[0] = 2; + stride3[1] = 3; + stride3[2] = 4; + count3[0] = 4; + count3[1] = 3; + count3[2] = 2; + block3[0] = 1; + block3[1] = 2; + block3[2] = 3; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start3, stride3, count3, block3); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be YES */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_YES) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + /* Add blocks with same values in no dimensions */ + start3[0] = 8; + start3[1] = 10; + start3[2] = 10; + stride3[0] = 1; + stride3[1] = 1; + stride3[2] = 1; + count3[0] = 1; + count3[1] = 1; + count3[2] = 1; + block3[0] = 1; + block3[1] = 2; + block3[2] = 3; + ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start3, stride3, count3, block3); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); +#if 0 + /* diminfo_valid should be NO */ + ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); + CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); + if (diminfo_valid != H5S_DIMINFO_VALID_NO) { + ret = FAIL; + CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); + } /* end if */ +#endif + ret = H5Sclose(space_id); + CHECK(ret, FAIL, "H5Sclose"); +} /* end test_space_update_diminfo() */ + +/**************************************************************** +** +** test_select_hyper_chunk_offset(): Tests selections on dataspace, +** verify that offsets for hyperslab selections are working in +** chunked datasets. +** +****************************************************************/ +#if 0 +static void +test_select_hyper_chunk_offset(void) +{ + hid_t fid; /* File ID */ + hid_t sid; /* Dataspace ID */ + hid_t msid; /* Memory dataspace ID */ + hid_t did; /* Dataset ID */ + const hsize_t mem_dims[1] = {SPACE10_DIM1}; /* Dataspace dimensions for memory */ + const hsize_t dims[1] = {0}; /* Dataspace initial dimensions */ + const hsize_t maxdims[1] = {H5S_UNLIMITED}; /* Dataspace mam dims */ + int *wbuf; /* Buffer for writing data */ + int *rbuf; /* Buffer for reading data */ + hid_t dcpl; /* Dataset creation property list ID */ + hsize_t chunks[1] = {SPACE10_CHUNK_SIZE}; /* Chunk size */ + hsize_t start[1] = {0}; /* The start of the hyperslab */ + hsize_t count[1] = {SPACE10_CHUNK_SIZE}; /* The size of the hyperslab */ + int i, j; /* Local index */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(6, ("Testing hyperslab selections using offsets in chunked datasets\n")); + + /* Allocate buffers */ + wbuf = (int *)HDmalloc(sizeof(int) * SPACE10_DIM1); + CHECK_PTR(wbuf, "HDmalloc"); + rbuf = (int *)HDcalloc(sizeof(int), SPACE10_DIM1); + CHECK_PTR(rbuf, "HDcalloc"); + + /* Initialize the write buffer */ + for (i = 0; i < SPACE10_DIM1; i++) + wbuf[i] = i; + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create a dataset creation property list */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + + /* Set to chunked storage layout */ + ret = H5Pset_layout(dcpl, H5D_CHUNKED); + CHECK(ret, FAIL, "H5Pset_layout"); + + /* Set the chunk size */ + ret = H5Pset_chunk(dcpl, 1, chunks); + CHECK(ret, FAIL, "H5Pset_chunk"); + + /* Create dataspace for memory */ + msid = H5Screate_simple(1, mem_dims, NULL); + CHECK(msid, FAIL, "H5Screate_simple"); + + /* Select the correct chunk in the memory dataspace */ + ret = H5Sselect_hyperslab(msid, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create dataspace for dataset */ + sid = H5Screate_simple(1, dims, maxdims); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Create the dataset */ + did = H5Dcreate2(fid, "fooData", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + /* Close the dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close the dataset creation property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Loop over writing out each chunk */ + for (i = SPACE10_CHUNK_SIZE; i <= SPACE10_DIM1; i += SPACE10_CHUNK_SIZE) { + hssize_t offset[1]; /* Offset of selection */ + hid_t fsid; /* File dataspace ID */ + hsize_t size[1]; /* The size to extend the dataset to */ + + /* Extend the dataset */ + size[0] = (hsize_t)i; /* The size to extend the dataset to */ + ret = H5Dset_extent(did, size); + CHECK(ret, FAIL, "H5Dset_extent"); + + /* Get the (extended) dataspace from the dataset */ + fsid = H5Dget_space(did); + CHECK(fsid, FAIL, "H5Dget_space"); + + /* Select the correct chunk in the dataset */ + ret = H5Sselect_hyperslab(fsid, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Set the selection offset for the file dataspace */ + offset[0] = i - SPACE10_CHUNK_SIZE; + ret = H5Soffset_simple(fsid, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + + /* Set the selection offset for the memory dataspace */ + offset[0] = SPACE10_DIM1 - i; + ret = H5Soffset_simple(msid, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + + /* Write the data to the chunk */ + ret = H5Dwrite(did, H5T_NATIVE_INT, msid, fsid, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close the file dataspace copy */ + ret = H5Sclose(fsid); + CHECK(ret, FAIL, "H5Sclose"); + } + + /* Read the data back in */ + ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Verify the information read in */ + for (i = 0; i < SPACE10_DIM1; i += SPACE10_CHUNK_SIZE) + for (j = 0; j < SPACE10_CHUNK_SIZE; j++) + if (wbuf[i + j] != rbuf[((SPACE10_DIM1 - i) - SPACE10_CHUNK_SIZE) + j]) + TestErrPrintf("Line: %d - Error! i=%d, j=%d, rbuf=%d, wbuf=%d\n", __LINE__, i, j, + rbuf[((SPACE10_DIM1 - i) - SPACE10_CHUNK_SIZE) + j], wbuf[i + j]); + + /* Check with 'OR'ed set of hyperslab selections, which makes certain the + * hyperslab spanlist code gets tested. -QAK + */ + + /* Re-initialize the write buffer */ + for (i = 0; i < SPACE10_DIM1; i++) + wbuf[i] = i * 2; + + /* Change the selected the region in the memory dataspace */ + start[0] = 0; + count[0] = SPACE10_CHUNK_SIZE / 3; + ret = H5Sselect_hyperslab(msid, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + start[0] = (2 * SPACE10_CHUNK_SIZE) / 3; + ret = H5Sselect_hyperslab(msid, H5S_SELECT_OR, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Loop over writing out each chunk */ + for (i = SPACE10_CHUNK_SIZE; i <= SPACE10_DIM1; i += SPACE10_CHUNK_SIZE) { + hssize_t offset[1]; /* Offset of selection */ + hid_t fsid; /* File dataspace ID */ + hsize_t size[1]; /* The size to extend the dataset to */ + + /* Extend the dataset */ + size[0] = (hsize_t)i; /* The size to extend the dataset to */ + ret = H5Dset_extent(did, size); + CHECK(ret, FAIL, "H5Dset_extent"); + + /* Get the (extended) dataspace from the dataset */ + fsid = H5Dget_space(did); + CHECK(fsid, FAIL, "H5Dget_space"); + + /* Select the correct region in the dataset */ + start[0] = 0; + ret = H5Sselect_hyperslab(fsid, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + start[0] = (2 * SPACE10_CHUNK_SIZE) / 3; + ret = H5Sselect_hyperslab(fsid, H5S_SELECT_OR, start, NULL, count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Set the selection offset for the file dataspace */ + offset[0] = i - SPACE10_CHUNK_SIZE; + ret = H5Soffset_simple(fsid, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + + /* Set the selection offset for the memory dataspace */ + offset[0] = SPACE10_DIM1 - i; + ret = H5Soffset_simple(msid, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + + /* Write the data to the chunk */ + ret = H5Dwrite(did, H5T_NATIVE_INT, msid, fsid, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Soffset_simple"); + + /* Close the file dataspace copy */ + ret = H5Sclose(fsid); + CHECK(ret, FAIL, "H5Sclose"); + } + + /* Read the data back in */ + ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf); + CHECK(ret, FAIL, "H5Soffset_simple"); + + /* Verify the information read in */ + for (i = 0; i < SPACE10_DIM1; i += SPACE10_CHUNK_SIZE) + for (j = 0; j < SPACE10_CHUNK_SIZE; j++) + /* We're not writing out the "middle" of each chunk, so don't check that */ + if (j < (SPACE10_CHUNK_SIZE / 3) || j >= ((2 * SPACE10_CHUNK_SIZE) / 3)) + if (wbuf[i + j] != rbuf[((SPACE10_DIM1 - i) - SPACE10_CHUNK_SIZE) + j]) + TestErrPrintf("Line: %d - Error! i=%d, j=%d, rbuf=%d, wbuf=%d\n", __LINE__, i, j, + rbuf[((SPACE10_DIM1 - i) - SPACE10_CHUNK_SIZE) + j], wbuf[i + j]); + + /* Close the memory dataspace */ + ret = H5Sclose(msid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close the dataset */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Free the buffers */ + HDfree(wbuf); + HDfree(rbuf); +} /* test_select_hyper_chunk_offset() */ +#endif +/**************************************************************** +** +** test_select_hyper_chunk_offset2(): Tests selections on dataspace, +** another test to verify that offsets for hyperslab selections are +** working in chunked datasets. +** +****************************************************************/ +#if 0 +static void +test_select_hyper_chunk_offset2(void) +{ + hid_t file, dataset; /* handles */ + hid_t dataspace; + hid_t memspace; + hid_t dcpl; /* Dataset creation property list */ + herr_t status; + unsigned data_out[SPACE12_DIM0]; /* output buffer */ + unsigned data_in[SPACE12_CHUNK_DIM0]; /* input buffer */ + hsize_t dims[SPACE12_RANK] = {SPACE12_DIM0}; /* Dimension size */ + hsize_t chunk_dims[SPACE12_RANK] = {SPACE12_CHUNK_DIM0}; /* Chunk size */ + hsize_t start[SPACE12_RANK]; /* Start of hyperslab */ + hsize_t count[SPACE12_RANK]; /* Size of hyperslab */ + hssize_t offset[SPACE12_RANK]; /* hyperslab offset in the file */ + unsigned u, v; /* Local index variables */ + + /* Output message about test being performed */ + MESSAGE(6, ("Testing more hyperslab selections using offsets in chunked datasets\n")); + + /* Initialize data to write out */ + for (u = 0; u < SPACE12_DIM0; u++) + data_out[u] = u; + + /* Create the file */ + file = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file, FAIL, "H5Fcreate"); + + /* Create dataspace */ + dataspace = H5Screate_simple(SPACE12_RANK, dims, NULL); + CHECK(dataspace, FAIL, "H5Screate_simple"); + + /* Create dataset creation property list */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + + /* Set chunk sizes */ + status = H5Pset_chunk(dcpl, SPACE12_RANK, chunk_dims); + CHECK(status, FAIL, "H5Pset_chunk"); + + /* Create dataset */ + dataset = H5Dcreate2(file, DATASETNAME, H5T_NATIVE_UINT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Close DCPL */ + status = H5Pclose(dcpl); + CHECK(status, FAIL, "H5Pclose"); + + /* Write out entire dataset */ + status = H5Dwrite(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_out); + CHECK(status, FAIL, "H5Dclose"); + + /* Create memory dataspace (same size as a chunk) */ + memspace = H5Screate_simple(SPACE12_RANK, chunk_dims, NULL); + CHECK(dataspace, FAIL, "H5Screate_simple"); + + /* + * Define hyperslab in the file dataspace. + */ + start[0] = 0; + count[0] = SPACE12_CHUNK_DIM0; + status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start, NULL, count, NULL); + CHECK(status, FAIL, "H5Sselect_hyperslab"); + + /* Loop through retrieving data from file, checking it against data written */ + for (u = 0; u < SPACE12_DIM0; u += SPACE12_CHUNK_DIM0) { + /* Set the offset of the file selection */ + offset[0] = u; + status = H5Soffset_simple(dataspace, offset); + CHECK(status, FAIL, "H5Soffset_simple"); + + /* Read in buffer of data */ + status = H5Dread(dataset, H5T_NATIVE_UINT, memspace, dataspace, H5P_DEFAULT, data_in); + CHECK(status, FAIL, "H5Dread"); + + /* Check data read in */ + for (v = 0; v < SPACE12_CHUNK_DIM0; v++) + if (data_out[u + v] != data_in[v]) + TestErrPrintf("Error! data_out[%u]=%u, data_in[%u]=%u\n", (unsigned)(u + v), data_out[u + v], + v, data_in[v]); + } /* end for */ + + status = H5Dclose(dataset); + CHECK(status, FAIL, "H5Dclose"); + + status = H5Sclose(dataspace); + CHECK(status, FAIL, "H5Sclose"); + + status = H5Sclose(memspace); + CHECK(status, FAIL, "H5Sclose"); + + status = H5Fclose(file); + CHECK(status, FAIL, "H5Fclose"); +} /* test_select_hyper_chunk_offset2() */ +#endif +/**************************************************************** +** +** test_select_bounds(): Tests selection bounds on dataspaces, +** both with and without offsets. +** +****************************************************************/ +static void +test_select_bounds(void) +{ + hid_t sid; /* Dataspace ID */ + const hsize_t dims[SPACE11_RANK] = {SPACE11_DIM1, SPACE11_DIM2}; /* Dataspace dimensions */ + hsize_t coord[SPACE11_NPOINTS][SPACE11_RANK]; /* Coordinates for point selection */ + hsize_t start[SPACE11_RANK]; /* The start of the hyperslab */ + hsize_t stride[SPACE11_RANK]; /* The stride between block starts for the hyperslab */ + hsize_t count[SPACE11_RANK]; /* The number of blocks for the hyperslab */ + hsize_t block[SPACE11_RANK]; /* The size of each block for the hyperslab */ + hssize_t offset[SPACE11_RANK]; /* Offset amount for selection */ + hsize_t low_bounds[SPACE11_RANK]; /* The low bounds for the selection */ + hsize_t high_bounds[SPACE11_RANK]; /* The high bounds for the selection */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(6, ("Testing selection bounds\n")); + + /* Create dataspace */ + sid = H5Screate_simple(SPACE11_RANK, dims, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Get bounds for 'all' selection */ + ret = H5Sget_select_bounds(sid, low_bounds, high_bounds); + CHECK(ret, FAIL, "H5Sget_select_bounds"); + VERIFY(low_bounds[0], 0, "H5Sget_select_bounds"); + VERIFY(low_bounds[1], 0, "H5Sget_select_bounds"); + VERIFY(high_bounds[0], SPACE11_DIM1 - 1, "H5Sget_select_bounds"); + VERIFY(high_bounds[1], SPACE11_DIM2 - 1, "H5Sget_select_bounds"); + + /* Set offset for selection */ + offset[0] = 1; + offset[1] = 1; + ret = H5Soffset_simple(sid, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + + /* Get bounds for 'all' selection with offset (which should be ignored) */ + ret = H5Sget_select_bounds(sid, low_bounds, high_bounds); + CHECK(ret, FAIL, "H5Sget_select_bounds"); + VERIFY(low_bounds[0], 0, "H5Sget_select_bounds"); + VERIFY(low_bounds[1], 0, "H5Sget_select_bounds"); + VERIFY(high_bounds[0], SPACE11_DIM1 - 1, "H5Sget_select_bounds"); + VERIFY(high_bounds[1], SPACE11_DIM2 - 1, "H5Sget_select_bounds"); + + /* Reset offset for selection */ + offset[0] = 0; + offset[1] = 0; + ret = H5Soffset_simple(sid, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + + /* Set 'none' selection */ + ret = H5Sselect_none(sid); + CHECK(ret, FAIL, "H5Sselect_none"); + + /* Get bounds for 'none' selection */ + H5E_BEGIN_TRY + { + ret = H5Sget_select_bounds(sid, low_bounds, high_bounds); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Sget_select_bounds"); + + /* Set point selection */ + coord[0][0] = 3; + coord[0][1] = 3; + coord[1][0] = 3; + coord[1][1] = 96; + coord[2][0] = 96; + coord[2][1] = 3; + coord[3][0] = 96; + coord[3][1] = 96; + ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)SPACE11_NPOINTS, (const hsize_t *)coord); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Get bounds for point selection */ + ret = H5Sget_select_bounds(sid, low_bounds, high_bounds); + CHECK(ret, FAIL, "H5Sget_select_bounds"); + VERIFY(low_bounds[0], 3, "H5Sget_select_bounds"); + VERIFY(low_bounds[1], 3, "H5Sget_select_bounds"); + VERIFY(high_bounds[0], SPACE11_DIM1 - 4, "H5Sget_select_bounds"); + VERIFY(high_bounds[1], SPACE11_DIM2 - 4, "H5Sget_select_bounds"); + + /* Set bad offset for selection */ + offset[0] = 5; + offset[1] = -5; + ret = H5Soffset_simple(sid, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + + /* Get bounds for hyperslab selection with negative offset */ + H5E_BEGIN_TRY + { + ret = H5Sget_select_bounds(sid, low_bounds, high_bounds); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Sget_select_bounds"); + + /* Set valid offset for selection */ + offset[0] = 2; + offset[1] = -2; + ret = H5Soffset_simple(sid, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + + /* Get bounds for point selection with offset */ + ret = H5Sget_select_bounds(sid, low_bounds, high_bounds); + CHECK(ret, FAIL, "H5Sget_select_bounds"); + VERIFY(low_bounds[0], 5, "H5Sget_select_bounds"); + VERIFY(low_bounds[1], 1, "H5Sget_select_bounds"); + VERIFY(high_bounds[0], SPACE11_DIM1 - 2, "H5Sget_select_bounds"); + VERIFY(high_bounds[1], SPACE11_DIM2 - 6, "H5Sget_select_bounds"); + + /* Reset offset for selection */ + offset[0] = 0; + offset[1] = 0; + ret = H5Soffset_simple(sid, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + + /* Set "regular" hyperslab selection */ + start[0] = 2; + start[1] = 2; + stride[0] = 10; + stride[1] = 10; + count[0] = 4; + count[1] = 4; + block[0] = 5; + block[1] = 5; + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Get bounds for hyperslab selection */ + ret = H5Sget_select_bounds(sid, low_bounds, high_bounds); + CHECK(ret, FAIL, "H5Sget_select_bounds"); + VERIFY(low_bounds[0], 2, "H5Sget_select_bounds"); + VERIFY(low_bounds[1], 2, "H5Sget_select_bounds"); + VERIFY(high_bounds[0], 36, "H5Sget_select_bounds"); + VERIFY(high_bounds[1], 36, "H5Sget_select_bounds"); + + /* Set bad offset for selection */ + offset[0] = 5; + offset[1] = -5; + ret = H5Soffset_simple(sid, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + + /* Get bounds for hyperslab selection with negative offset */ + H5E_BEGIN_TRY + { + ret = H5Sget_select_bounds(sid, low_bounds, high_bounds); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Sget_select_bounds"); + + /* Set valid offset for selection */ + offset[0] = 5; + offset[1] = -2; + ret = H5Soffset_simple(sid, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + + /* Get bounds for hyperslab selection with offset */ + ret = H5Sget_select_bounds(sid, low_bounds, high_bounds); + CHECK(ret, FAIL, "H5Sget_select_bounds"); + VERIFY(low_bounds[0], 7, "H5Sget_select_bounds"); + VERIFY(low_bounds[1], 0, "H5Sget_select_bounds"); + VERIFY(high_bounds[0], 41, "H5Sget_select_bounds"); + VERIFY(high_bounds[1], 34, "H5Sget_select_bounds"); + + /* Reset offset for selection */ + offset[0] = 0; + offset[1] = 0; + ret = H5Soffset_simple(sid, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + + /* Make "irregular" hyperslab selection */ + start[0] = 20; + start[1] = 20; + stride[0] = 20; + stride[1] = 20; + count[0] = 2; + count[1] = 2; + block[0] = 10; + block[1] = 10; + ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Get bounds for hyperslab selection */ + ret = H5Sget_select_bounds(sid, low_bounds, high_bounds); + CHECK(ret, FAIL, "H5Sget_select_bounds"); + VERIFY(low_bounds[0], 2, "H5Sget_select_bounds"); + VERIFY(low_bounds[1], 2, "H5Sget_select_bounds"); + VERIFY(high_bounds[0], 49, "H5Sget_select_bounds"); + VERIFY(high_bounds[1], 49, "H5Sget_select_bounds"); + + /* Set bad offset for selection */ + offset[0] = 5; + offset[1] = -5; + ret = H5Soffset_simple(sid, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + + /* Get bounds for hyperslab selection with negative offset */ + H5E_BEGIN_TRY + { + ret = H5Sget_select_bounds(sid, low_bounds, high_bounds); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Sget_select_bounds"); + + /* Set valid offset for selection */ + offset[0] = 5; + offset[1] = -2; + ret = H5Soffset_simple(sid, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + + /* Get bounds for hyperslab selection with offset */ + ret = H5Sget_select_bounds(sid, low_bounds, high_bounds); + CHECK(ret, FAIL, "H5Sget_select_bounds"); + VERIFY(low_bounds[0], 7, "H5Sget_select_bounds"); + VERIFY(low_bounds[1], 0, "H5Sget_select_bounds"); + VERIFY(high_bounds[0], 54, "H5Sget_select_bounds"); + VERIFY(high_bounds[1], 47, "H5Sget_select_bounds"); + + /* Reset offset for selection */ + offset[0] = 0; + offset[1] = 0; + ret = H5Soffset_simple(sid, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + + /* Close the dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_select_bounds() */ + +/**************************************************************** +** +** test_hyper_regular(): Tests query operations on regular hyperslabs +** +****************************************************************/ +static void +test_hyper_regular(void) +{ + hid_t sid; /* Dataspace ID */ + const hsize_t dims[SPACE13_RANK] = {SPACE13_DIM1, SPACE13_DIM2, SPACE13_DIM3}; /* Dataspace dimensions */ + hsize_t coord[SPACE13_NPOINTS][SPACE13_RANK]; /* Coordinates for point selection */ + hsize_t start[SPACE13_RANK]; /* The start of the hyperslab */ + hsize_t stride[SPACE13_RANK]; /* The stride between block starts for the hyperslab */ + hsize_t count[SPACE13_RANK]; /* The number of blocks for the hyperslab */ + hsize_t block[SPACE13_RANK]; /* The size of each block for the hyperslab */ + hsize_t t_start[SPACE13_RANK]; /* Temporary start of the hyperslab */ + hsize_t t_count[SPACE13_RANK]; /* Temporary number of blocks for the hyperslab */ + hsize_t q_start[SPACE13_RANK]; /* The queried start of the hyperslab */ + hsize_t q_stride[SPACE13_RANK]; /* The queried stride between block starts for the hyperslab */ + hsize_t q_count[SPACE13_RANK]; /* The queried number of blocks for the hyperslab */ + hsize_t q_block[SPACE13_RANK]; /* The queried size of each block for the hyperslab */ + htri_t is_regular; /* Whether a hyperslab selection is regular */ + unsigned u; /* Local index variable */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(6, ("Testing queries on regular hyperslabs\n")); + + /* Create dataspace */ + sid = H5Screate_simple(SPACE13_RANK, dims, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Query if 'all' selection is regular hyperslab (should fail) */ + H5E_BEGIN_TRY + { + is_regular = H5Sis_regular_hyperslab(sid); + } + H5E_END_TRY; + VERIFY(is_regular, FAIL, "H5Sis_regular_hyperslab"); + + /* Query regular hyperslab selection info (should fail) */ + H5E_BEGIN_TRY + { + ret = H5Sget_regular_hyperslab(sid, q_start, q_stride, q_count, q_block); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Sget_regular_hyperslab"); + + /* Set 'none' selection */ + ret = H5Sselect_none(sid); + CHECK(ret, FAIL, "H5Sselect_none"); + + /* Query if 'none' selection is regular hyperslab (should fail) */ + H5E_BEGIN_TRY + { + is_regular = H5Sis_regular_hyperslab(sid); + } + H5E_END_TRY; + VERIFY(is_regular, FAIL, "H5Sis_regular_hyperslab"); + + /* Query regular hyperslab selection info (should fail) */ + H5E_BEGIN_TRY + { + ret = H5Sget_regular_hyperslab(sid, q_start, q_stride, q_count, q_block); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Sget_regular_hyperslab"); + + /* Set point selection */ + coord[0][0] = 3; + coord[0][1] = 3; + coord[0][2] = 3; + coord[1][0] = 3; + coord[1][1] = 48; + coord[1][2] = 48; + coord[2][0] = 48; + coord[2][1] = 3; + coord[2][2] = 3; + coord[3][0] = 48; + coord[3][1] = 48; + coord[3][2] = 48; + ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)SPACE13_NPOINTS, (const hsize_t *)coord); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Query if 'point' selection is regular hyperslab (should fail) */ + H5E_BEGIN_TRY + { + is_regular = H5Sis_regular_hyperslab(sid); + } + H5E_END_TRY; + VERIFY(is_regular, FAIL, "H5Sis_regular_hyperslab"); + + /* Query regular hyperslab selection info (should fail) */ + H5E_BEGIN_TRY + { + ret = H5Sget_regular_hyperslab(sid, q_start, q_stride, q_count, q_block); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Sget_regular_hyperslab"); + + /* Set "regular" hyperslab selection */ + start[0] = 2; + start[1] = 2; + start[2] = 2; + stride[0] = 5; + stride[1] = 5; + stride[2] = 5; + count[0] = 3; + count[1] = 3; + count[2] = 3; + block[0] = 4; + block[1] = 4; + block[2] = 4; + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Query if 'hyperslab' selection is regular hyperslab (should be TRUE) */ + is_regular = H5Sis_regular_hyperslab(sid); + VERIFY(is_regular, TRUE, "H5Sis_regular_hyperslab"); + + /* Retrieve the hyperslab parameters */ + ret = H5Sget_regular_hyperslab(sid, q_start, q_stride, q_count, q_block); + CHECK(ret, FAIL, "H5Sget_regular_hyperslab"); + + /* Verify the hyperslab parameters */ + for (u = 0; u < SPACE13_RANK; u++) { + if (start[u] != q_start[u]) + ERROR("H5Sget_regular_hyperslab, start"); + if (stride[u] != q_stride[u]) + ERROR("H5Sget_regular_hyperslab, stride"); + if (count[u] != q_count[u]) + ERROR("H5Sget_regular_hyperslab, count"); + if (block[u] != q_block[u]) + ERROR("H5Sget_regular_hyperslab, block"); + } /* end for */ + + /* 'OR' in another point */ + t_start[0] = 0; + t_start[1] = 0; + t_start[2] = 0; + t_count[0] = 1; + t_count[1] = 1; + t_count[2] = 1; + ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, t_start, NULL, t_count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Query if 'hyperslab' selection is regular hyperslab (should be FALSE) */ + is_regular = H5Sis_regular_hyperslab(sid); + VERIFY(is_regular, FALSE, "H5Sis_regular_hyperslab"); + + /* Query regular hyperslab selection info (should fail) */ + H5E_BEGIN_TRY + { + ret = H5Sget_regular_hyperslab(sid, q_start, q_stride, q_count, q_block); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Sget_regular_hyperslab"); + + /* 'XOR' in the point again, to remove it, which should make it regular again */ + t_start[0] = 0; + t_start[1] = 0; + t_start[2] = 0; + t_count[0] = 1; + t_count[1] = 1; + t_count[2] = 1; + ret = H5Sselect_hyperslab(sid, H5S_SELECT_XOR, t_start, NULL, t_count, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Query if 'hyperslab' selection is regular hyperslab (should be TRUE) */ + is_regular = H5Sis_regular_hyperslab(sid); + VERIFY(is_regular, TRUE, "H5Sis_regular_hyperslab"); + + /* Retrieve the hyperslab parameters */ + ret = H5Sget_regular_hyperslab(sid, q_start, q_stride, q_count, q_block); + CHECK(ret, FAIL, "H5Sget_regular_hyperslab"); + + /* Verify the hyperslab parameters */ + for (u = 0; u < SPACE13_RANK; u++) { + if (start[u] != q_start[u]) + ERROR("H5Sget_regular_hyperslab, start"); + if (stride[u] != q_stride[u]) + ERROR("H5Sget_regular_hyperslab, stride"); + if (count[u] != q_count[u]) + ERROR("H5Sget_regular_hyperslab, count"); + if (block[u] != q_block[u]) + ERROR("H5Sget_regular_hyperslab, block"); + } /* end for */ + + /* Close the dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_hyper_regular() */ + +/**************************************************************** +** +** test_hyper_unlim(): Tests unlimited hyperslab selections +** +****************************************************************/ +static void +test_hyper_unlim_check(hid_t sid, hsize_t *dims, hssize_t endpoints, hssize_t enblocks, hsize_t *eblock1, + hsize_t *eblock2) +{ + hid_t lim_sid; + hsize_t start[3]; + H5S_sel_type sel_type; + hssize_t npoints; + hssize_t nblocks; + hsize_t blocklist[12]; + herr_t ret; + + HDassert(enblocks <= 2); + + /* Copy sid to lim_sid */ + lim_sid = H5Scopy(sid); + CHECK(lim_sid, FAIL, "H5Scopy"); + + /* "And" lim_sid with dims to create limited selection */ + HDmemset(start, 0, sizeof(start)); + ret = H5Sselect_hyperslab(lim_sid, H5S_SELECT_AND, start, NULL, dims, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Check number of elements */ + npoints = H5Sget_select_npoints(lim_sid); + CHECK(npoints, FAIL, "H5Sget_select_npoints"); + VERIFY(npoints, endpoints, "H5Sget_select_npoints"); + + /* Get selection type */ + sel_type = H5Sget_select_type(lim_sid); + CHECK(sel_type, H5S_SEL_ERROR, "H5Sget_select_type"); + + /* Only examine blocks for hyperslab selection */ + if (sel_type == H5S_SEL_HYPERSLABS) { + /* Get number of blocks */ + nblocks = H5Sget_select_hyper_nblocks(lim_sid); + CHECK(nblocks, FAIL, "H5Sget_select_hyper_nblocks"); + VERIFY(nblocks, enblocks, "H5Sget_select_hyper_nblocks"); + + if (nblocks > 0) { + /* Get blocklist */ + ret = H5Sget_select_hyper_blocklist(lim_sid, (hsize_t)0, (hsize_t)nblocks, blocklist); + CHECK(ret, FAIL, "H5Sget_select_hyper_blocklist"); + + /* Verify blocklist */ + if (nblocks == (hssize_t)1) { + if (HDmemcmp(blocklist, eblock1, 6 * sizeof(eblock1[0])) != 0) + ERROR("H5Sget_select_hyper_blocklist"); + } /* end if */ + else { + HDassert(nblocks == (hssize_t)2); + if (HDmemcmp(blocklist, eblock1, 6 * sizeof(eblock1[0])) != 0) { + if (HDmemcmp(blocklist, eblock2, 6 * sizeof(eblock2[0])) != 0) + ERROR("H5Sget_select_hyper_blocklist"); + if (HDmemcmp(&blocklist[6], eblock1, 6 * sizeof(eblock1[0])) != 0) + ERROR("H5Sget_select_hyper_blocklist"); + } /* end if */ + else if (HDmemcmp(&blocklist[6], eblock2, 6 * sizeof(eblock2[0])) != 0) + ERROR("H5Sget_select_hyper_blocklist"); + } /* end else */ + } /* end if */ + } /* end if */ + else if (sel_type != H5S_SEL_NONE) + ERROR("H5Sget_select_type"); + + /* Close the limited dataspace */ + ret = H5Sclose(lim_sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* end test_hyper_unlim_check() */ + +static void +test_hyper_unlim(void) +{ + hid_t sid; + hsize_t dims[3] = {4, 4, 7}; + hsize_t mdims[3] = {4, H5S_UNLIMITED, 7}; + hsize_t start[3] = {1, 2, 1}; + hsize_t stride[3] = {1, 1, 3}; + hsize_t count[3] = {1, 1, 2}; + hsize_t block[3] = {2, H5S_UNLIMITED, 2}; + hsize_t start2[3]; + hsize_t count2[3]; + hsize_t eblock1[6] = {1, 2, 1, 2, 3, 2}; + hsize_t eblock2[6] = {1, 2, 4, 2, 3, 5}; + hssize_t offset[3] = {0, -1, 0}; + hssize_t ssize_out; + herr_t ret; + + /* Output message about test being performed */ + MESSAGE(6, ("Testing unlimited hyperslab selections\n")); + + /* Create dataspace */ + sid = H5Screate_simple(3, dims, mdims); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Select unlimited hyperslab */ + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Check with unlimited dimension clipped to 4 */ + test_hyper_unlim_check(sid, dims, (hssize_t)16, (hssize_t)2, eblock1, eblock2); + + /* Check with unlimited dimension clipped to 3 */ + dims[1] = 3; + eblock1[4] = 2; + eblock2[4] = 2; + test_hyper_unlim_check(sid, dims, (hssize_t)8, (hssize_t)2, eblock1, eblock2); + + /* Check with unlimited dimension clipped to 2 */ + dims[1] = 2; + test_hyper_unlim_check(sid, dims, (hssize_t)0, (hssize_t)0, eblock1, eblock2); + + /* Check with unlimited dimension clipped to 1 */ + dims[1] = 1; + test_hyper_unlim_check(sid, dims, (hssize_t)0, (hssize_t)0, eblock1, eblock2); + + /* Check with unlimited dimension clipped to 7 */ + dims[1] = 7; + eblock1[4] = 6; + eblock2[4] = 6; + test_hyper_unlim_check(sid, dims, (hssize_t)40, (hssize_t)2, eblock1, eblock2); + + /* Set offset of selection */ + ret = H5Soffset_simple(sid, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + + /* Check with adjusted offset (should not affect result) */ + test_hyper_unlim_check(sid, dims, (hssize_t)40, (hssize_t)2, eblock1, eblock2); + + /* Reset offset of selection */ + offset[1] = (hssize_t)0; + ret = H5Soffset_simple(sid, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + + /* + * Now try with multiple blocks in unlimited dimension + */ + stride[1] = 3; + stride[2] = 1; + count[1] = H5S_UNLIMITED; + count[2] = 1; + block[1] = 2; + + /* Select unlimited hyperslab */ + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Check with new selection */ + eblock1[1] = 2; + eblock1[4] = 3; + eblock2[1] = 5; + eblock2[2] = 1; + eblock2[4] = 6; + eblock2[5] = 2; + test_hyper_unlim_check(sid, dims, (hssize_t)16, (hssize_t)2, eblock1, eblock2); + + /* Check with unlimited dimension clipped to 3 */ + dims[1] = 3; + eblock1[4] = 2; + test_hyper_unlim_check(sid, dims, (hssize_t)4, (hssize_t)1, eblock1, eblock2); + + /* Check with unlimited dimension clipped to 4 */ + dims[1] = 4; + eblock1[4] = 3; + test_hyper_unlim_check(sid, dims, (hssize_t)8, (hssize_t)1, eblock1, eblock2); + + /* Check with unlimited dimension clipped to 5 */ + dims[1] = 5; + eblock1[4] = 3; + test_hyper_unlim_check(sid, dims, (hssize_t)8, (hssize_t)1, eblock1, eblock2); + + /* Check with unlimited dimension clipped to 6 */ + dims[1] = 6; + eblock1[4] = 3; + eblock2[4] = 5; + test_hyper_unlim_check(sid, dims, (hssize_t)12, (hssize_t)2, eblock1, eblock2); + + /* Set offset of selection */ + offset[1] = (hssize_t)-1; + ret = H5Soffset_simple(sid, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + + /* Check with adjusted offset (should not affect result) */ + test_hyper_unlim_check(sid, dims, (hssize_t)12, (hssize_t)2, eblock1, eblock2); + + /* Set offset of selection */ + offset[1] = (hssize_t)3; + ret = H5Soffset_simple(sid, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + + /* Check with adjusted offset (should not affect result) */ + test_hyper_unlim_check(sid, dims, (hssize_t)12, (hssize_t)2, eblock1, eblock2); + + /* Reset offset of selection */ + offset[1] = (hssize_t)0; + ret = H5Soffset_simple(sid, offset); + CHECK(ret, FAIL, "H5Soffset_simple"); + + /* + * Now try invalid operations + */ + H5E_BEGIN_TRY + { + /* Try multiple unlimited dimensions */ + start[0] = 1; + start[1] = 2; + start[2] = 1; + stride[0] = 1; + stride[1] = 3; + stride[2] = 3; + count[0] = 1; + count[1] = H5S_UNLIMITED; + count[2] = H5S_UNLIMITED; + block[0] = 2; + block[1] = 2; + block[2] = 2; + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); + VERIFY(ret, FAIL, "H5Sselect_hyperslab"); + + /* Try unlimited count and block */ + count[2] = 2; + block[1] = H5S_UNLIMITED; + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); + VERIFY(ret, FAIL, "H5Sselect_hyperslab"); + } + H5E_END_TRY + + /* Try operations with two unlimited selections */ + block[1] = 2; + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + H5E_BEGIN_TRY + { + ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, start, NULL, count, NULL); + VERIFY(ret, FAIL, "H5Sselect_hyperslab"); + ret = H5Sselect_hyperslab(sid, H5S_SELECT_AND, start, NULL, count, NULL); + VERIFY(ret, FAIL, "H5Sselect_hyperslab"); + ret = H5Sselect_hyperslab(sid, H5S_SELECT_XOR, start, NULL, count, NULL); + VERIFY(ret, FAIL, "H5Sselect_hyperslab"); + ret = H5Sselect_hyperslab(sid, H5S_SELECT_NOTB, start, NULL, count, NULL); + VERIFY(ret, FAIL, "H5Sselect_hyperslab"); + ret = H5Sselect_hyperslab(sid, H5S_SELECT_NOTA, start, NULL, count, NULL); + VERIFY(ret, FAIL, "H5Sselect_hyperslab"); + } + H5E_END_TRY + + /* Try invalid combination operations */ + H5E_BEGIN_TRY + { + ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, start, NULL, block, NULL); + VERIFY(ret, FAIL, "H5Sselect_hyperslab"); + ret = H5Sselect_hyperslab(sid, H5S_SELECT_XOR, start, NULL, block, NULL); + VERIFY(ret, FAIL, "H5Sselect_hyperslab"); + ret = H5Sselect_hyperslab(sid, H5S_SELECT_NOTB, start, NULL, block, NULL); + VERIFY(ret, FAIL, "H5Sselect_hyperslab"); + } + H5E_END_TRY + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, NULL, block, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + H5E_BEGIN_TRY + { + ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, start, stride, count, block); + VERIFY(ret, FAIL, "H5Sselect_hyperslab"); + ret = H5Sselect_hyperslab(sid, H5S_SELECT_XOR, start, stride, count, block); + VERIFY(ret, FAIL, "H5Sselect_hyperslab"); + ret = H5Sselect_hyperslab(sid, H5S_SELECT_NOTA, start, stride, count, block); + VERIFY(ret, FAIL, "H5Sselect_hyperslab"); + } + H5E_END_TRY + + /* + * Now test valid combination operations + */ + /* unlim AND non-unlim */ + count[0] = 1; + count[1] = H5S_UNLIMITED; + count[2] = 2; + block[0] = 2; + block[1] = 2; + block[2] = 2; + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + start2[0] = 2; + start2[1] = 2; + start2[2] = 0; + count2[0] = 5; + count2[1] = 4; + count2[2] = 2; + ret = H5Sselect_hyperslab(sid, H5S_SELECT_AND, start2, NULL, count2, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + eblock1[0] = 2; + eblock1[3] = 2; + eblock1[1] = 2; + eblock1[4] = 3; + eblock1[2] = 1; + eblock1[5] = 1; + eblock2[0] = 2; + eblock2[3] = 2; + eblock2[1] = 5; + eblock2[4] = 5; + eblock2[2] = 1; + eblock2[5] = 1; + dims[0] = 50; + dims[1] = 50; + dims[2] = 50; + test_hyper_unlim_check(sid, dims, (hssize_t)3, (hssize_t)2, eblock1, eblock2); + + /* unlim NOTA non-unlim */ + count[0] = 1; + count[1] = H5S_UNLIMITED; + count[2] = 2; + block[0] = 2; + block[1] = 2; + block[2] = 2; + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + start2[0] = 1; + start2[1] = 5; + start2[2] = 2; + count2[0] = 2; + count2[1] = 2; + count2[2] = 6; + ret = H5Sselect_hyperslab(sid, H5S_SELECT_NOTA, start2, NULL, count2, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + eblock1[0] = 1; + eblock1[3] = 2; + eblock1[1] = 5; + eblock1[4] = 6; + eblock1[2] = 3; + eblock1[5] = 3; + eblock2[0] = 1; + eblock2[3] = 2; + eblock2[1] = 5; + eblock2[4] = 6; + eblock2[2] = 6; + eblock2[5] = 7; + dims[0] = 50; + dims[1] = 50; + dims[2] = 50; + test_hyper_unlim_check(sid, dims, (hssize_t)12, (hssize_t)2, eblock1, eblock2); + + /* non-unlim AND unlim */ + start2[0] = 2; + start2[1] = 2; + start2[2] = 0; + count2[0] = 5; + count2[1] = 4; + count2[2] = 2; + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start2, NULL, count2, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + count[0] = 1; + count[1] = H5S_UNLIMITED; + count[2] = 2; + block[0] = 2; + block[1] = 2; + block[2] = 2; + ret = H5Sselect_hyperslab(sid, H5S_SELECT_AND, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + eblock1[0] = 2; + eblock1[3] = 2; + eblock1[1] = 2; + eblock1[4] = 3; + eblock1[2] = 1; + eblock1[5] = 1; + eblock2[0] = 2; + eblock2[3] = 2; + eblock2[1] = 5; + eblock2[4] = 5; + eblock2[2] = 1; + eblock2[5] = 1; + dims[0] = 50; + dims[1] = 50; + dims[2] = 50; + test_hyper_unlim_check(sid, dims, (hssize_t)3, (hssize_t)2, eblock1, eblock2); + + /* non-unlim NOTB unlim */ + start2[0] = 1; + start2[1] = 5; + start2[2] = 2; + count2[0] = 2; + count2[1] = 2; + count2[2] = 6; + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start2, NULL, count2, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + count[0] = 1; + count[1] = H5S_UNLIMITED; + count[2] = 2; + block[0] = 2; + block[1] = 2; + block[2] = 2; + ret = H5Sselect_hyperslab(sid, H5S_SELECT_NOTB, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + eblock1[0] = 1; + eblock1[3] = 2; + eblock1[1] = 5; + eblock1[4] = 6; + eblock1[2] = 3; + eblock1[5] = 3; + eblock2[0] = 1; + eblock2[3] = 2; + eblock2[1] = 5; + eblock2[4] = 6; + eblock2[2] = 6; + eblock2[5] = 7; + dims[0] = 50; + dims[1] = 50; + dims[2] = 50; + test_hyper_unlim_check(sid, dims, (hssize_t)12, (hssize_t)2, eblock1, eblock2); + + /* Test H5Sget_select_npoints() */ + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + ssize_out = H5Sget_select_npoints(sid); + VERIFY(ssize_out, (hssize_t)H5S_UNLIMITED, "H5Sget_select_npoints"); + + /* Test H5Sget_select_hyper_nblocks() */ + H5E_BEGIN_TRY + { + ssize_out = H5Sget_select_hyper_nblocks(sid); + } + H5E_END_TRY; + VERIFY(ssize_out, (hssize_t)H5S_UNLIMITED, "H5Sget_select_hyper_nblocks"); + + /* Test H5Sget_select_bounds() */ + ret = H5Sget_select_bounds(sid, start2, count2); + CHECK(ret, FAIL, "H5Sget_select_bounds"); + VERIFY(start2[0], start[0], "H5Sget_select_bounds"); + VERIFY(start2[1], start[1], "H5Sget_select_bounds"); + VERIFY(start2[2], start[2], "H5Sget_select_bounds"); + VERIFY(count2[0], (long)(start[0] + (stride[0] * (count[0] - 1)) + block[0] - 1), "H5Sget_select_bounds"); + VERIFY(count2[1], H5S_UNLIMITED, "H5Sget_select_bounds"); + VERIFY(count2[2], (long)(start[2] + (stride[2] * (count[2] - 1)) + block[2] - 1), "H5Sget_select_bounds"); + + /* Close the dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* end test_hyper_unlim() */ + +/**************************************************************** +** +** test_internal_consistency(): Tests selections on dataspace, then +** verify that internal states of data structures of selections are +** consistent. +** +****************************************************************/ +static void +test_internal_consistency(void) +{ + hid_t all_sid; /* Dataspace ID with "all" selection */ + hid_t none_sid; /* Dataspace ID with "none" selection */ + hid_t single_pt_sid; /* Dataspace ID with single point selection */ + hid_t mult_pt_sid; /* Dataspace ID with multiple point selection */ + hid_t single_hyper_sid; /* Dataspace ID with single block hyperslab selection */ + hid_t single_hyper_all_sid; /* Dataspace ID with single block hyperslab + * selection that is the entire dataspace + */ + hid_t single_hyper_pt_sid; /* Dataspace ID with single block hyperslab + * selection that is the same as the single + * point selection + */ + hid_t regular_hyper_sid; /* Dataspace ID with regular hyperslab selection */ + hid_t irreg_hyper_sid; /* Dataspace ID with irregular hyperslab selection */ + hid_t none_hyper_sid; /* Dataspace ID with "no hyperslabs" selection */ + hid_t scalar_all_sid; /* ID for scalar dataspace with "all" selection */ + hid_t scalar_none_sid; /* ID for scalar dataspace with "none" selection */ + hid_t tmp_sid; /* Temporary dataspace ID */ + hsize_t dims[] = {SPACE9_DIM1, SPACE9_DIM2}; + hsize_t coord1[1][SPACE2_RANK]; /* Coordinates for single point selection */ + hsize_t coord2[SPACE9_DIM2][SPACE9_RANK]; /* Coordinates for multiple point selection */ + hsize_t start[SPACE9_RANK]; /* Hyperslab start */ + hsize_t stride[SPACE9_RANK]; /* Hyperslab stride */ + hsize_t count[SPACE9_RANK]; /* Hyperslab block count */ + hsize_t block[SPACE9_RANK]; /* Hyperslab block size */ +#if 0 + htri_t check; /* Shape comparison return value */ +#endif + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(6, ("Testing Consistency of Internal States\n")); + HDassert(SPACE9_DIM2 >= POINT1_NPOINTS); + + /* Create dataspace for "all" selection */ + all_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(all_sid, FAIL, "H5Screate_simple"); + + /* Select entire extent for dataspace */ + ret = H5Sselect_all(all_sid); + CHECK(ret, FAIL, "H5Sselect_all"); + + /* Create dataspace for "none" selection */ + none_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(none_sid, FAIL, "H5Screate_simple"); + + /* Un-Select entire extent for dataspace */ + ret = H5Sselect_none(none_sid); + CHECK(ret, FAIL, "H5Sselect_none"); + + /* Create dataspace for single point selection */ + single_pt_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(single_pt_sid, FAIL, "H5Screate_simple"); + + /* Select sequence of ten points for multiple point selection */ + coord1[0][0] = 2; + coord1[0][1] = 2; + ret = H5Sselect_elements(single_pt_sid, H5S_SELECT_SET, (size_t)1, (const hsize_t *)coord1); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Create dataspace for multiple point selection */ + mult_pt_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(mult_pt_sid, FAIL, "H5Screate_simple"); + + /* Select sequence of ten points for multiple point selection */ + coord2[0][0] = 2; + coord2[0][1] = 2; + coord2[1][0] = 7; + coord2[1][1] = 2; + coord2[2][0] = 1; + coord2[2][1] = 4; + coord2[3][0] = 2; + coord2[3][1] = 6; + coord2[4][0] = 0; + coord2[4][1] = 8; + coord2[5][0] = 3; + coord2[5][1] = 2; + coord2[6][0] = 4; + coord2[6][1] = 4; + coord2[7][0] = 1; + coord2[7][1] = 0; + coord2[8][0] = 5; + coord2[8][1] = 1; + coord2[9][0] = 9; + coord2[9][1] = 3; + ret = H5Sselect_elements(mult_pt_sid, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord2); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Create dataspace for single hyperslab selection */ + single_hyper_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(single_hyper_sid, FAIL, "H5Screate_simple"); + + /* Select 10x10 hyperslab for single hyperslab selection */ + start[0] = 1; + start[1] = 1; + stride[0] = 1; + stride[1] = 1; + count[0] = 1; + count[1] = 1; + block[0] = (SPACE9_DIM1 - 2); + block[1] = (SPACE9_DIM2 - 2); + ret = H5Sselect_hyperslab(single_hyper_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create dataspace for single hyperslab selection with entire extent selected */ + single_hyper_all_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(single_hyper_all_sid, FAIL, "H5Screate_simple"); + + /* Select entire extent for hyperslab selection */ + start[0] = 0; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 1; + count[1] = 1; + block[0] = SPACE9_DIM1; + block[1] = SPACE9_DIM2; + ret = H5Sselect_hyperslab(single_hyper_all_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create dataspace for single hyperslab selection with single point selected */ + single_hyper_pt_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(single_hyper_pt_sid, FAIL, "H5Screate_simple"); + + /* Select entire extent for hyperslab selection */ + start[0] = 2; + start[1] = 2; + stride[0] = 1; + stride[1] = 1; + count[0] = 1; + count[1] = 1; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(single_hyper_pt_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create dataspace for regular hyperslab selection */ + regular_hyper_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(regular_hyper_sid, FAIL, "H5Screate_simple"); + + /* Select regular, strided hyperslab selection */ + start[0] = 2; + start[1] = 2; + stride[0] = 2; + stride[1] = 2; + count[0] = 5; + count[1] = 2; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(regular_hyper_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create dataspace for irregular hyperslab selection */ + irreg_hyper_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(irreg_hyper_sid, FAIL, "H5Screate_simple"); + + /* Create irregular hyperslab selection by OR'ing two blocks together */ + start[0] = 2; + start[1] = 2; + stride[0] = 1; + stride[1] = 1; + count[0] = 1; + count[1] = 1; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(irreg_hyper_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 4; + start[1] = 4; + stride[0] = 1; + stride[1] = 1; + count[0] = 1; + count[1] = 1; + block[0] = 3; + block[1] = 3; + ret = H5Sselect_hyperslab(irreg_hyper_sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create dataspace for "no" hyperslab selection */ + none_hyper_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); + CHECK(none_hyper_sid, FAIL, "H5Screate_simple"); + + /* Create "no" hyperslab selection by XOR'ing same blocks together */ + start[0] = 2; + start[1] = 2; + stride[0] = 1; + stride[1] = 1; + count[0] = 1; + count[1] = 1; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(none_hyper_sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + ret = H5Sselect_hyperslab(none_hyper_sid, H5S_SELECT_XOR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create scalar dataspace for "all" selection */ + scalar_all_sid = H5Screate(H5S_SCALAR); + CHECK(scalar_all_sid, FAIL, "H5Screate"); + + /* Create scalar dataspace for "none" selection */ + scalar_none_sid = H5Screate(H5S_SCALAR); + CHECK(scalar_none_sid, FAIL, "H5Screate"); + + /* Un-Select entire extent for dataspace */ + ret = H5Sselect_none(scalar_none_sid); + CHECK(ret, FAIL, "H5Sselect_none"); + + /* Test all the selections created */ + + /* Test the copy of itself */ + tmp_sid = H5Scopy(all_sid); + CHECK(tmp_sid, FAIL, "H5Scopy"); +#if 0 + check = H5S__internal_consistency_test(tmp_sid); + VERIFY(check, TRUE, "H5S__internal_consistency_test"); +#endif + ret = H5Sclose(tmp_sid); + CHECK(ret, FAIL, "H5Sclose"); +#if 0 + /* Test "none" selection */ + check = H5S__internal_consistency_test(none_sid); + VERIFY(check, TRUE, "H5S__internal_consistency_test"); + + /* Test single point selection */ + check = H5S__internal_consistency_test(single_pt_sid); + VERIFY(check, TRUE, "H5S__internal_consistency_test"); + + /* Test multiple point selection */ + check = H5S__internal_consistency_test(mult_pt_sid); + VERIFY(check, TRUE, "H5S__internal_consistency_test"); + + /* Test "plain" single hyperslab selection */ + check = H5S__internal_consistency_test(single_hyper_sid); + VERIFY(check, TRUE, "H5S__internal_consistency_test"); + + /* Test "all" single hyperslab selection */ + check = H5S__internal_consistency_test(single_hyper_all_sid); + VERIFY(check, TRUE, "H5S__internal_consistency_test"); + + /* Test "single point" single hyperslab selection */ + check = H5S__internal_consistency_test(single_hyper_pt_sid); + VERIFY(check, TRUE, "H5S__internal_consistency_test"); + + /* Test regular, strided hyperslab selection */ + check = H5S__internal_consistency_test(regular_hyper_sid); + VERIFY(check, TRUE, "H5S__internal_consistency_test"); + + /* Test irregular hyperslab selection */ + check = H5S__internal_consistency_test(irreg_hyper_sid); + VERIFY(check, TRUE, "H5S__internal_consistency_test"); + + /* Test "no" hyperslab selection */ + check = H5S__internal_consistency_test(none_hyper_sid); + VERIFY(check, TRUE, "H5S__internal_consistency_test"); + + /* Test scalar "all" hyperslab selection */ + check = H5S__internal_consistency_test(scalar_all_sid); + VERIFY(check, TRUE, "H5S__internal_consistency_test"); + + /* Test scalar "none" hyperslab selection */ + check = H5S__internal_consistency_test(scalar_none_sid); + VERIFY(check, TRUE, "H5S__internal_consistency_test"); +#endif + + /* Close dataspaces */ + ret = H5Sclose(all_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(none_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(single_pt_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(mult_pt_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(single_hyper_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(single_hyper_all_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(single_hyper_pt_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(regular_hyper_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(irreg_hyper_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(none_hyper_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(scalar_all_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(scalar_none_sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_internal_consistency() */ + +/**************************************************************** +** +** test_irreg_io(): Tests unusual selections on datasets, to stress the +** new hyperslab code. +** +****************************************************************/ +static void +test_irreg_io(void) +{ + hid_t fid; /* File ID */ + hid_t did; /* Dataset ID */ + hid_t dcpl_id; /* Dataset creation property list ID */ + hid_t sid; /* File dataspace ID */ + hid_t mem_sid; /* Memory dataspace ID */ + hsize_t dims[] = {6, 12}; /* Dataspace dimensions */ + hsize_t chunk_dims[] = {2, 2}; /* Chunk dimensions */ + hsize_t mem_dims[] = {32}; /* Memory dataspace dimensions */ + hsize_t start[2]; /* Hyperslab start */ + hsize_t stride[2]; /* Hyperslab stride */ + hsize_t count[2]; /* Hyperslab block count */ + hsize_t block[2]; /* Hyperslab block size */ + unsigned char wbuf[72]; /* Write buffer */ + unsigned char rbuf[32]; /* Read buffer */ + unsigned u; /* Local index variable */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(6, ("Testing Irregular Hyperslab I/O\n")); + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create dataspace for dataset */ + sid = H5Screate_simple(2, dims, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Set chunk dimensions for dataset */ + dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl_id, FAIL, "H5Pcreate"); + ret = H5Pset_chunk(dcpl_id, 2, chunk_dims); + CHECK(ret, FAIL, "H5Pset_chunk"); + + /* Create a dataset */ + did = H5Dcreate2(fid, SPACE1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dcreate2"); + + /* Initialize the write buffer */ + for (u = 0; u < 72; u++) + wbuf[u] = (unsigned char)u; + + /* Write entire dataset to disk */ + ret = H5Dwrite(did, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close the DCPL */ + ret = H5Pclose(dcpl_id); + CHECK(ret, FAIL, "H5Pclose"); + + /* Create dataspace for memory selection */ + mem_sid = H5Screate_simple(1, mem_dims, NULL); + CHECK(mem_sid, FAIL, "H5Screate_simple"); + + /* Select 'L'-shaped region within dataset */ + start[0] = 0; + start[1] = 10; + stride[0] = 1; + stride[1] = 1; + count[0] = 4; + count[1] = 2; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 4; + start[1] = 0; + stride[0] = 1; + stride[1] = 1; + count[0] = 2; + count[1] = 12; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Reset the buffer */ + HDmemset(rbuf, 0, sizeof(rbuf)); + + /* Read selection from disk */ + ret = H5Dread(did, H5T_NATIVE_UCHAR, mem_sid, sid, H5P_DEFAULT, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Close everything */ + ret = H5Sclose(mem_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); +} /* test_irreg_io() */ + +/**************************************************************** +** +** test_sel_iter(): Test selection iterator API routines. +** +****************************************************************/ +static void +test_sel_iter(void) +{ + hid_t sid; /* Dataspace ID */ + hid_t iter_id; /* Dataspace selection iterator ID */ + hsize_t dims1[] = {6, 12}; /* 2-D Dataspace dimensions */ + hsize_t coord1[POINT1_NPOINTS][2]; /* Coordinates for point selection */ + hsize_t start[2]; /* Hyperslab start */ + hsize_t stride[2]; /* Hyperslab stride */ + hsize_t count[2]; /* Hyperslab block count */ + hsize_t block[2]; /* Hyperslab block size */ + size_t nseq; /* # of sequences retrieved */ + size_t nbytes; /* # of bytes retrieved */ + hsize_t off[SEL_ITER_MAX_SEQ]; /* Offsets for retrieved sequences */ + size_t len[SEL_ITER_MAX_SEQ]; /* Lengths for retrieved sequences */ + H5S_sel_type sel_type; /* Selection type */ + unsigned sel_share; /* Whether to share selection with dataspace */ + unsigned sel_iter_flags; /* Flags for selection iterator creation */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(6, ("Testing Dataspace Selection Iterators\n")); + + /* Create dataspace */ + sid = H5Screate_simple(2, dims1, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Try creating selection iterator object with bad parameters */ + H5E_BEGIN_TRY + { /* Bad dataspace ID */ + iter_id = H5Ssel_iter_create(H5I_INVALID_HID, (size_t)1, (unsigned)0); + } + H5E_END_TRY; + VERIFY(iter_id, FAIL, "H5Ssel_iter_create"); + H5E_BEGIN_TRY + { /* Bad element size */ + iter_id = H5Ssel_iter_create(sid, (size_t)0, (unsigned)0); + } + H5E_END_TRY; + VERIFY(iter_id, FAIL, "H5Ssel_iter_create"); + H5E_BEGIN_TRY + { /* Bad flag(s) */ + iter_id = H5Ssel_iter_create(sid, (size_t)1, (unsigned)0xffff); + } + H5E_END_TRY; + VERIFY(iter_id, FAIL, "H5Ssel_iter_create"); + + /* Try closing selection iterator, with bad parameters */ + H5E_BEGIN_TRY + { /* Invalid ID */ + ret = H5Ssel_iter_close(H5I_INVALID_HID); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Ssel_iter_close"); + H5E_BEGIN_TRY + { /* Not a selection iterator ID */ + ret = H5Ssel_iter_close(sid); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Ssel_iter_close"); + + /* Try with no selection sharing, and with sharing */ + for (sel_share = 0; sel_share < 2; sel_share++) { + /* Set selection iterator sharing flags */ + if (sel_share) + sel_iter_flags = H5S_SEL_ITER_SHARE_WITH_DATASPACE; + else + sel_iter_flags = 0; + + /* Create selection iterator object */ + iter_id = H5Ssel_iter_create(sid, (size_t)1, (unsigned)sel_iter_flags); + CHECK(iter_id, FAIL, "H5Ssel_iter_create"); + + /* Close selection iterator */ + ret = H5Ssel_iter_close(iter_id); + CHECK(ret, FAIL, "H5Ssel_iter_close"); + + /* Try closing selection iterator twice */ + H5E_BEGIN_TRY + { /* Invalid ID */ + ret = H5Ssel_iter_close(iter_id); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Ssel_iter_close"); + + /* Create selection iterator object */ + iter_id = H5Ssel_iter_create(sid, (size_t)1, (unsigned)sel_iter_flags); + CHECK(iter_id, FAIL, "H5Ssel_iter_create"); + + /* Try resetting selection iterator with bad parameters */ + H5E_BEGIN_TRY + { + ret = H5Ssel_iter_reset(H5I_INVALID_HID, sid); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Ssel_iter_reset"); + H5E_BEGIN_TRY + { + ret = H5Ssel_iter_reset(iter_id, H5I_INVALID_HID); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Ssel_iter_reset"); + + /* Try retrieving sequences, with bad parameters */ + H5E_BEGIN_TRY + { /* Invalid ID */ + ret = H5Ssel_iter_get_seq_list(H5I_INVALID_HID, (size_t)1, (size_t)1, &nseq, &nbytes, off, len); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Ssel_iter_get_seq_list"); + H5E_BEGIN_TRY + { /* Invalid nseq pointer */ + ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)1, (size_t)1, NULL, &nbytes, off, len); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Ssel_iter_get_seq_list"); + H5E_BEGIN_TRY + { /* Invalid nbytes pointer */ + ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)1, (size_t)1, &nseq, NULL, off, len); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Ssel_iter_get_seq_list"); + H5E_BEGIN_TRY + { /* Invalid offset array */ + ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)1, (size_t)1, &nseq, &nbytes, NULL, len); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Ssel_iter_get_seq_list"); + H5E_BEGIN_TRY + { /* Invalid length array */ + ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)1, (size_t)1, &nseq, &nbytes, off, NULL); + } + H5E_END_TRY; + VERIFY(ret, FAIL, "H5Ssel_iter_get_seq_list"); + + /* Close selection iterator */ + ret = H5Ssel_iter_close(iter_id); + CHECK(ret, FAIL, "H5Ssel_iter_close"); + + /* Test iterators on various basic selection types */ + for (sel_type = H5S_SEL_NONE; sel_type <= H5S_SEL_ALL; sel_type = (H5S_sel_type)(sel_type + 1)) { + switch (sel_type) { + case H5S_SEL_NONE: /* "None" selection */ + ret = H5Sselect_none(sid); + CHECK(ret, FAIL, "H5Sselect_none"); + break; + + case H5S_SEL_POINTS: /* Point selection */ + /* Select sequence of ten points */ + coord1[0][0] = 0; + coord1[0][1] = 9; + coord1[1][0] = 1; + coord1[1][1] = 2; + coord1[2][0] = 2; + coord1[2][1] = 4; + coord1[3][0] = 0; + coord1[3][1] = 6; + coord1[4][0] = 1; + coord1[4][1] = 8; + coord1[5][0] = 2; + coord1[5][1] = 10; + coord1[6][0] = 0; + coord1[6][1] = 11; + coord1[7][0] = 1; + coord1[7][1] = 4; + coord1[8][0] = 2; + coord1[8][1] = 1; + coord1[9][0] = 0; + coord1[9][1] = 3; + ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, + (const hsize_t *)coord1); + CHECK(ret, FAIL, "H5Sselect_elements"); + break; + + case H5S_SEL_HYPERSLABS: /* Hyperslab selection */ + /* Select regular hyperslab */ + start[0] = 3; + start[1] = 0; + stride[0] = 2; + stride[1] = 2; + count[0] = 2; + count[1] = 5; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + break; + + case H5S_SEL_ALL: /* "All" selection */ + ret = H5Sselect_all(sid); + CHECK(ret, FAIL, "H5Sselect_all"); + break; + + case H5S_SEL_ERROR: + case H5S_SEL_N: + default: + HDassert(0 && "Can't occur"); + break; + } /* end switch */ + + /* Create selection iterator object */ + iter_id = H5Ssel_iter_create(sid, (size_t)1, (unsigned)sel_iter_flags); + CHECK(iter_id, FAIL, "H5Ssel_iter_create"); + + /* Try retrieving no sequences, with 0 for maxseq & maxbytes */ + ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)0, (size_t)1, &nseq, &nbytes, off, len); + CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list"); + VERIFY(nseq, 0, "H5Ssel_iter_get_seq_list"); + VERIFY(nbytes, 0, "H5Ssel_iter_get_seq_list"); + ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)1, (size_t)0, &nseq, &nbytes, off, len); + CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list"); + VERIFY(nseq, 0, "H5Ssel_iter_get_seq_list"); + VERIFY(nbytes, 0, "H5Ssel_iter_get_seq_list"); + + /* Try retrieving all sequences */ + ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)SEL_ITER_MAX_SEQ, (size_t)(1024 * 1024), &nseq, + &nbytes, off, len); + CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list"); + + /* Check results from retrieving sequence list */ + switch (sel_type) { + case H5S_SEL_NONE: /* "None" selection */ + VERIFY(nseq, 0, "H5Ssel_iter_get_seq_list"); + VERIFY(nbytes, 0, "H5Ssel_iter_get_seq_list"); + break; + + case H5S_SEL_POINTS: /* Point selection */ + VERIFY(nseq, 10, "H5Ssel_iter_get_seq_list"); + VERIFY(nbytes, 10, "H5Ssel_iter_get_seq_list"); + break; + + case H5S_SEL_HYPERSLABS: /* Hyperslab selection */ + VERIFY(nseq, 10, "H5Ssel_iter_get_seq_list"); + VERIFY(nbytes, 10, "H5Ssel_iter_get_seq_list"); + break; + + case H5S_SEL_ALL: /* "All" selection */ + VERIFY(nseq, 1, "H5Ssel_iter_get_seq_list"); + VERIFY(nbytes, 72, "H5Ssel_iter_get_seq_list"); + break; + + case H5S_SEL_ERROR: + case H5S_SEL_N: + default: + HDassert(0 && "Can't occur"); + break; + } /* end switch */ + + /* Close selection iterator */ + ret = H5Ssel_iter_close(iter_id); + CHECK(ret, FAIL, "H5Ssel_iter_close"); + } /* end for */ + + /* Create selection iterator object */ + iter_id = H5Ssel_iter_create(sid, (size_t)1, (unsigned)sel_iter_flags); + CHECK(iter_id, FAIL, "H5Ssel_iter_create"); + + /* Test iterators on various basic selection types using + * H5Ssel_iter_reset instead of creating multiple iterators */ + for (sel_type = H5S_SEL_NONE; sel_type <= H5S_SEL_ALL; sel_type = (H5S_sel_type)(sel_type + 1)) { + switch (sel_type) { + case H5S_SEL_NONE: /* "None" selection */ + ret = H5Sselect_none(sid); + CHECK(ret, FAIL, "H5Sselect_none"); + break; + + case H5S_SEL_POINTS: /* Point selection */ + /* Select sequence of ten points */ + coord1[0][0] = 0; + coord1[0][1] = 9; + coord1[1][0] = 1; + coord1[1][1] = 2; + coord1[2][0] = 2; + coord1[2][1] = 4; + coord1[3][0] = 0; + coord1[3][1] = 6; + coord1[4][0] = 1; + coord1[4][1] = 8; + coord1[5][0] = 2; + coord1[5][1] = 10; + coord1[6][0] = 0; + coord1[6][1] = 11; + coord1[7][0] = 1; + coord1[7][1] = 4; + coord1[8][0] = 2; + coord1[8][1] = 1; + coord1[9][0] = 0; + coord1[9][1] = 3; + ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, + (const hsize_t *)coord1); + CHECK(ret, FAIL, "H5Sselect_elements"); + break; + + case H5S_SEL_HYPERSLABS: /* Hyperslab selection */ + /* Select regular hyperslab */ + start[0] = 3; + start[1] = 0; + stride[0] = 2; + stride[1] = 2; + count[0] = 2; + count[1] = 5; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + break; + + case H5S_SEL_ALL: /* "All" selection */ + ret = H5Sselect_all(sid); + CHECK(ret, FAIL, "H5Sselect_all"); + break; + + case H5S_SEL_ERROR: + case H5S_SEL_N: + default: + HDassert(0 && "Can't occur"); + break; + } /* end switch */ + + /* Try retrieving no sequences, with 0 for maxseq & maxbytes */ + ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)0, (size_t)1, &nseq, &nbytes, off, len); + CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list"); + VERIFY(nseq, 0, "H5Ssel_iter_get_seq_list"); + VERIFY(nbytes, 0, "H5Ssel_iter_get_seq_list"); + ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)1, (size_t)0, &nseq, &nbytes, off, len); + CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list"); + VERIFY(nseq, 0, "H5Ssel_iter_get_seq_list"); + VERIFY(nbytes, 0, "H5Ssel_iter_get_seq_list"); + + /* Reset iterator */ + ret = H5Ssel_iter_reset(iter_id, sid); + CHECK(ret, FAIL, "H5Ssel_iter_reset"); + + /* Try retrieving all sequences */ + ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)SEL_ITER_MAX_SEQ, (size_t)(1024 * 1024), &nseq, + &nbytes, off, len); + CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list"); + + /* Check results from retrieving sequence list */ + switch (sel_type) { + case H5S_SEL_NONE: /* "None" selection */ + VERIFY(nseq, 0, "H5Ssel_iter_get_seq_list"); + VERIFY(nbytes, 0, "H5Ssel_iter_get_seq_list"); + break; + + case H5S_SEL_POINTS: /* Point selection */ + VERIFY(nseq, 10, "H5Ssel_iter_get_seq_list"); + VERIFY(nbytes, 10, "H5Ssel_iter_get_seq_list"); + break; + + case H5S_SEL_HYPERSLABS: /* Hyperslab selection */ + VERIFY(nseq, 10, "H5Ssel_iter_get_seq_list"); + VERIFY(nbytes, 10, "H5Ssel_iter_get_seq_list"); + break; + + case H5S_SEL_ALL: /* "All" selection */ + VERIFY(nseq, 1, "H5Ssel_iter_get_seq_list"); + VERIFY(nbytes, 72, "H5Ssel_iter_get_seq_list"); + break; + + case H5S_SEL_ERROR: + case H5S_SEL_N: + default: + HDassert(0 && "Can't occur"); + break; + } /* end switch */ + + /* Reset iterator */ + ret = H5Ssel_iter_reset(iter_id, sid); + CHECK(ret, FAIL, "H5Ssel_iter_reset"); + + /* Try retrieving all sequences again */ + ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)SEL_ITER_MAX_SEQ, (size_t)(1024 * 1024), &nseq, + &nbytes, off, len); + CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list"); + + /* Check results from retrieving sequence list */ + switch (sel_type) { + case H5S_SEL_NONE: /* "None" selection */ + VERIFY(nseq, 0, "H5Ssel_iter_get_seq_list"); + VERIFY(nbytes, 0, "H5Ssel_iter_get_seq_list"); + break; + + case H5S_SEL_POINTS: /* Point selection */ + VERIFY(nseq, 10, "H5Ssel_iter_get_seq_list"); + VERIFY(nbytes, 10, "H5Ssel_iter_get_seq_list"); + break; + + case H5S_SEL_HYPERSLABS: /* Hyperslab selection */ + VERIFY(nseq, 10, "H5Ssel_iter_get_seq_list"); + VERIFY(nbytes, 10, "H5Ssel_iter_get_seq_list"); + break; + + case H5S_SEL_ALL: /* "All" selection */ + VERIFY(nseq, 1, "H5Ssel_iter_get_seq_list"); + VERIFY(nbytes, 72, "H5Ssel_iter_get_seq_list"); + break; + + case H5S_SEL_ERROR: + case H5S_SEL_N: + default: + HDassert(0 && "Can't occur"); + break; + } /* end switch */ + + /* Reset iterator */ + ret = H5Ssel_iter_reset(iter_id, sid); + CHECK(ret, FAIL, "H5Ssel_iter_reset"); + } /* end for */ + + /* Close selection iterator */ + ret = H5Ssel_iter_close(iter_id); + CHECK(ret, FAIL, "H5Ssel_iter_close"); + + /* Point selection which will merge into smaller # of sequences */ + coord1[0][0] = 0; + coord1[0][1] = 9; + coord1[1][0] = 0; + coord1[1][1] = 10; + coord1[2][0] = 0; + coord1[2][1] = 11; + coord1[3][0] = 0; + coord1[3][1] = 6; + coord1[4][0] = 1; + coord1[4][1] = 8; + coord1[5][0] = 2; + coord1[5][1] = 10; + coord1[6][0] = 0; + coord1[6][1] = 11; + coord1[7][0] = 1; + coord1[7][1] = 4; + coord1[8][0] = 1; + coord1[8][1] = 5; + coord1[9][0] = 1; + coord1[9][1] = 6; + ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Create selection iterator object */ + iter_id = H5Ssel_iter_create(sid, (size_t)1, (unsigned)sel_iter_flags); + CHECK(iter_id, FAIL, "H5Ssel_iter_create"); + + /* Try retrieving all sequences */ + ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)SEL_ITER_MAX_SEQ, (size_t)(1024 * 1024), &nseq, + &nbytes, off, len); + CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list"); + VERIFY(nseq, 6, "H5Ssel_iter_get_seq_list"); + VERIFY(nbytes, 10, "H5Ssel_iter_get_seq_list"); + + /* Reset iterator */ + ret = H5Ssel_iter_reset(iter_id, sid); + CHECK(ret, FAIL, "H5Ssel_iter_reset"); + + /* Try retrieving all sequences again */ + ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)SEL_ITER_MAX_SEQ, (size_t)(1024 * 1024), &nseq, + &nbytes, off, len); + CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list"); + VERIFY(nseq, 6, "H5Ssel_iter_get_seq_list"); + VERIFY(nbytes, 10, "H5Ssel_iter_get_seq_list"); + + /* Close selection iterator */ + ret = H5Ssel_iter_close(iter_id); + CHECK(ret, FAIL, "H5Ssel_iter_close"); + + /* Select irregular hyperslab, which will merge into smaller # of sequences */ + start[0] = 3; + start[1] = 0; + stride[0] = 2; + stride[1] = 2; + count[0] = 2; + count[1] = 5; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + start[0] = 3; + start[1] = 3; + stride[0] = 2; + stride[1] = 2; + count[0] = 2; + count[1] = 5; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create selection iterator object */ + iter_id = H5Ssel_iter_create(sid, (size_t)1, (unsigned)sel_iter_flags); + CHECK(iter_id, FAIL, "H5Ssel_iter_create"); + + /* Try retrieving all sequences */ + ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)SEL_ITER_MAX_SEQ, (size_t)(1024 * 1024), &nseq, + &nbytes, off, len); + CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list"); + VERIFY(nseq, 6, "H5Ssel_iter_get_seq_list"); + VERIFY(nbytes, 20, "H5Ssel_iter_get_seq_list"); + + /* Reset iterator */ + ret = H5Ssel_iter_reset(iter_id, sid); + CHECK(ret, FAIL, "H5Ssel_iter_reset"); + + /* Try retrieving all sequences again */ + ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)SEL_ITER_MAX_SEQ, (size_t)(1024 * 1024), &nseq, + &nbytes, off, len); + CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list"); + VERIFY(nseq, 6, "H5Ssel_iter_get_seq_list"); + VERIFY(nbytes, 20, "H5Ssel_iter_get_seq_list"); + + /* Close selection iterator */ + ret = H5Ssel_iter_close(iter_id); + CHECK(ret, FAIL, "H5Ssel_iter_close"); + + } /* end for */ + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_sel_iter() */ + +/**************************************************************** +** +** test_select_intersect_block(): Test selections on dataspace, +** verify that "intersect block" routine is working correctly. +** +****************************************************************/ +static void +test_select_intersect_block(void) +{ + hid_t sid; /* Dataspace ID */ + hsize_t dims1[] = {6, 12}; /* 2-D Dataspace dimensions */ + hsize_t block_start[] = {1, 3}; /* Start offset for block */ + hsize_t block_end[] = {2, 5}; /* End offset for block */ + hsize_t block_end2[] = {0, 5}; /* Bad end offset for block */ + hsize_t block_end3[] = {2, 2}; /* Another bad end offset for block */ + hsize_t block_end4[] = {1, 3}; /* End offset that makes a single element block */ + hsize_t coord[10][2]; /* Coordinates for point selection */ + hsize_t start[2]; /* Starting location of hyperslab */ + hsize_t stride[2]; /* Stride of hyperslab */ + hsize_t count[2]; /* Element count of hyperslab */ + hsize_t block[2]; /* Block size of hyperslab */ + htri_t status; /* Intersection status */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(6, ("Testing Dataspace Selection Block Intersection\n")); + + /* Create dataspace */ + sid = H5Screate_simple(2, dims1, NULL); + CHECK(sid, FAIL, "H5Screate_simple"); + + /* Try intersection calls with bad parameters */ + H5E_BEGIN_TRY + { /* Bad dataspace ID */ + status = H5Sselect_intersect_block(H5I_INVALID_HID, block_start, block_end); + } + H5E_END_TRY; + VERIFY(status, FAIL, "H5Sselect_intersect_block"); + H5E_BEGIN_TRY + { /* Bad start pointer */ + status = H5Sselect_intersect_block(sid, NULL, block_end); + } + H5E_END_TRY; + VERIFY(status, FAIL, "H5Sselect_intersect_block"); + H5E_BEGIN_TRY + { /* Bad end pointer */ + status = H5Sselect_intersect_block(sid, block_start, NULL); + } + H5E_END_TRY; + VERIFY(status, FAIL, "H5Sselect_intersect_block"); + H5E_BEGIN_TRY + { /* Invalid block */ + status = H5Sselect_intersect_block(sid, block_start, block_end2); + } + H5E_END_TRY; + VERIFY(status, FAIL, "H5Sselect_intersect_block"); + H5E_BEGIN_TRY + { /* Another invalid block */ + status = H5Sselect_intersect_block(sid, block_start, block_end3); + } + H5E_END_TRY; + VERIFY(status, FAIL, "H5Sselect_intersect_block"); + + /* Set selection to 'none' */ + ret = H5Sselect_none(sid); + CHECK(ret, FAIL, "H5Sselect_none"); + + /* Test block intersection with 'none' selection (always false) */ + status = H5Sselect_intersect_block(sid, block_start, block_end); + VERIFY(status, FALSE, "H5Sselect_intersect_block"); + + /* Set selection to 'all' */ + ret = H5Sselect_all(sid); + CHECK(ret, FAIL, "H5Sselect_all"); + + /* Test block intersection with 'all' selection (always true) */ + status = H5Sselect_intersect_block(sid, block_start, block_end); + VERIFY(status, TRUE, "H5Sselect_intersect_block"); + + /* Select sequence of ten points */ + coord[0][0] = 0; + coord[0][1] = 10; + coord[1][0] = 1; + coord[1][1] = 2; + coord[2][0] = 2; + coord[2][1] = 4; + coord[3][0] = 0; + coord[3][1] = 6; + coord[4][0] = 1; + coord[4][1] = 8; + coord[5][0] = 2; + coord[5][1] = 11; + coord[6][0] = 0; + coord[6][1] = 4; + coord[7][0] = 1; + coord[7][1] = 0; + coord[8][0] = 2; + coord[8][1] = 1; + coord[9][0] = 0; + coord[9][1] = 3; + ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)10, (const hsize_t *)coord); + CHECK(ret, FAIL, "H5Sselect_elements"); + + /* Test block intersection with 'point' selection */ + status = H5Sselect_intersect_block(sid, block_start, block_end); + VERIFY(status, TRUE, "H5Sselect_intersect_block"); + status = H5Sselect_intersect_block(sid, block_start, block_end4); + VERIFY(status, FALSE, "H5Sselect_intersect_block"); + + /* Select single 4x6 hyperslab block at (2,1) */ + start[0] = 2; + start[1] = 1; + stride[0] = 1; + stride[1] = 1; + count[0] = 4; + count[1] = 6; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Test block intersection with single 'hyperslab' selection */ + status = H5Sselect_intersect_block(sid, block_start, block_end); + VERIFY(status, TRUE, "H5Sselect_intersect_block"); + status = H5Sselect_intersect_block(sid, block_start, block_end4); + VERIFY(status, FALSE, "H5Sselect_intersect_block"); + + /* 'OR' another hyperslab block in, making an irregular hyperslab selection */ + start[0] = 3; + start[1] = 2; + stride[0] = 1; + stride[1] = 1; + count[0] = 4; + count[1] = 6; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Test block intersection with 'hyperslab' selection */ + status = H5Sselect_intersect_block(sid, block_start, block_end); + VERIFY(status, TRUE, "H5Sselect_intersect_block"); + status = H5Sselect_intersect_block(sid, block_start, block_end4); + VERIFY(status, FALSE, "H5Sselect_intersect_block"); + + /* Select regular, strided hyperslab selection */ + start[0] = 2; + start[1] = 1; + stride[0] = 2; + stride[1] = 2; + count[0] = 2; + count[1] = 4; + block[0] = 1; + block[1] = 1; + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Test block intersection with single 'hyperslab' selection */ + status = H5Sselect_intersect_block(sid, block_start, block_end); + VERIFY(status, TRUE, "H5Sselect_intersect_block"); + status = H5Sselect_intersect_block(sid, block_start, block_end4); + VERIFY(status, FALSE, "H5Sselect_intersect_block"); + + /* Close dataspace */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); +} /* test_select_intersect_block() */ + +/**************************************************************** +** +** test_hyper_io_1d(): +** Test to verify all the selected 10th element in the 1-d file +** dataspace is read correctly into the 1-d contiguous memory space. +** This is modeled after the test scenario described in HDFFV-10585 +** that demonstrated the hyperslab slowness. A fix to speed up +** performance is in place to handle the special case for 1-d disjoint +** file dataspace into 1-d single block contiguous memory space. +** +****************************************************************/ +static void +test_hyper_io_1d(void) +{ + hid_t fid; /* File ID */ + hid_t did; /* Dataset ID */ + hid_t sid, mid; /* Dataspace IDs */ + hid_t dcpl; /* Dataset creation property list ID */ + hsize_t dims[1], maxdims[1], dimsm[1]; /* Dataset dimension sizes */ + hsize_t chunk_dims[1]; /* Chunk dimension size */ + hsize_t offset[1]; /* Starting offset for hyperslab */ + hsize_t stride[1]; /* Distance between blocks in the hyperslab selection */ + hsize_t count[1]; /* # of blocks in the the hyperslab selection */ + hsize_t block[1]; /* Size of block in the hyperslab selection */ + unsigned int wdata[CHUNKSZ]; /* Data to be written */ + unsigned int rdata[NUM_ELEMENTS / 10]; /* Data to be read */ + herr_t ret; /* Generic return value */ + unsigned i; /* Local index variable */ + + /* Output message about test being performed */ + MESSAGE(6, ("Testing Hyperslab I/O for 1-d single block memory space\n")); + + for (i = 0; i < CHUNKSZ; i++) + wdata[i] = i; + + /* Create the file file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); + + /* Create file dataspace */ + dims[0] = CHUNKSZ; + maxdims[0] = H5S_UNLIMITED; + sid = H5Screate_simple(RANK, dims, maxdims); + CHECK(sid, H5I_INVALID_HID, "H5Pcreate"); + + /* Create memory dataspace */ + dimsm[0] = CHUNKSZ; + mid = H5Screate_simple(RANK, dimsm, NULL); + CHECK(mid, H5I_INVALID_HID, "H5Pcreate"); + + /* Set up to create a chunked dataset */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, H5I_INVALID_HID, "H5Pcreate"); + + chunk_dims[0] = CHUNKSZ; + ret = H5Pset_chunk(dcpl, RANK, chunk_dims); + CHECK(ret, FAIL, "H5Pset_chunk"); + + /* Create a chunked dataset */ + did = H5Dcreate2(fid, DNAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(did, H5I_INVALID_HID, "H5Dcreate2"); + + /* Set up hyperslab selection for file dataspace */ + offset[0] = 0; + stride[0] = 1; + count[0] = 1; + block[0] = CHUNKSZ; + + /* Write to each chunk in the dataset */ + for (i = 0; i < NUMCHUNKS; i++) { + /* Set the hyperslab selection */ + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, offset, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Write to the dataset */ + ret = H5Dwrite(did, H5T_NATIVE_INT, mid, sid, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Extend the dataset's dataspace */ + if (i < (NUMCHUNKS - 1)) { + offset[0] = offset[0] + CHUNKSZ; + dims[0] = dims[0] + CHUNKSZ; + ret = H5Dset_extent(did, dims); + CHECK(ret, FAIL, "H5Dset_extent"); + + /* Get the dataset's current dataspace */ + sid = H5Dget_space(did); + CHECK(sid, H5I_INVALID_HID, "H5Dget_space"); + } + } + + /* Closing */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(mid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Open the file */ + fid = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid, H5I_INVALID_HID, "H5Fopen"); + + /* Open the dataset */ + did = H5Dopen2(fid, DNAME, H5P_DEFAULT); + CHECK(did, H5I_INVALID_HID, "H5Dopen"); + + /* Set up to read every 10th element in file dataspace */ + offset[0] = 1; + stride[0] = 10; + count[0] = NUM_ELEMENTS / 10; + block[0] = 1; + + /* Get the dataset's dataspace */ + sid = H5Dget_space(did); + CHECK(sid, H5I_INVALID_HID, "H5Dget_space"); + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, offset, stride, count, block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Set up contiguous memory dataspace for the selected elements */ + dimsm[0] = count[0]; + mid = H5Screate_simple(RANK, dimsm, NULL); + CHECK(mid, H5I_INVALID_HID, "H5Screate_simple"); + + /* Read all the selected 10th elements in the dataset into "rdata" */ + ret = H5Dread(did, H5T_NATIVE_INT, mid, sid, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Verify data read is correct */ + for (i = 0; i < 6; i += 2) { + VERIFY(rdata[i], 1, "H5Dread\n"); + VERIFY(rdata[i + 1], 11, "H5Dread\n"); + } + + /* Closing */ + ret = H5Sclose(mid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + +} /* test_hyper_io_1d() */ + +/**************************************************************** +** +** test_h5s_set_extent_none: +** Test to verify the behavior of dataspace code when passed +** a dataspace modified by H5Sset_extent_none(). +** +****************************************************************/ +static void +test_h5s_set_extent_none(void) +{ + hid_t sid = H5I_INVALID_HID; + hid_t dst_sid = H5I_INVALID_HID; + hid_t null_sid = H5I_INVALID_HID; + int rank = 1; + hsize_t current_dims = 123; + H5S_class_t cls; + int out_rank; + hsize_t out_dims; + hsize_t out_maxdims; + hssize_t out_points; + htri_t equal; + herr_t ret; + + /* Specific values here don't matter as we're just going to reset */ + sid = H5Screate_simple(rank, ¤t_dims, NULL); + CHECK(sid, H5I_INVALID_HID, "H5Screate_simple"); + + /* Dataspace class will be H5S_NULL after this. + * In versions prior to 1.10.7 / 1.12.1 this would produce a + * dataspace with the internal H5S_NO_CLASS class. + */ + ret = H5Sset_extent_none(sid); + CHECK(ret, FAIL, "H5Sset_extent_none"); + cls = H5Sget_simple_extent_type(sid); + VERIFY(cls, H5S_NULL, "H5Sget_simple_extent_type"); + + /* Extent getters should generate normal results and not segfault. + */ + out_rank = H5Sget_simple_extent_dims(sid, &out_dims, &out_maxdims); + VERIFY(out_rank, 0, "H5Sget_simple_extent_dims"); + out_rank = H5Sget_simple_extent_ndims(sid); + VERIFY(out_rank, 0, "H5Sget_simple_extent_ndims"); + out_points = H5Sget_simple_extent_npoints(sid); + VERIFY(out_points, 0, "H5Sget_simple_extent_npoints"); + + /* Check that copying the new (non-)extent works. + */ + dst_sid = H5Screate_simple(rank, ¤t_dims, NULL); + CHECK(dst_sid, H5I_INVALID_HID, "H5Screate_simple"); + ret = H5Sextent_copy(dst_sid, sid); + CHECK(ret, FAIL, "H5Sextent_copy"); + + /* Check that H5Sset_extent_none() produces the same extent as + * H5Screate(H5S_NULL). + */ + null_sid = H5Screate(H5S_NULL); + CHECK(null_sid, H5I_INVALID_HID, "H5Screate"); + equal = H5Sextent_equal(sid, null_sid); + VERIFY(equal, TRUE, "H5Sextent_equal"); + + /* Close */ + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(dst_sid); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Sclose(null_sid); + CHECK(ret, FAIL, "H5Sclose"); + +} /* test_h5s_set_extent_none() */ + +/**************************************************************** +** +** test_select(): Main H5S selection testing routine. +** +****************************************************************/ +void +test_select(void) +{ + hid_t plist_id; /* Property list for reading random hyperslabs */ + hid_t fapl; /* Property list accessing the file */ + int mdc_nelmts; /* Metadata number of elements */ + size_t rdcc_nelmts; /* Raw data number of elements */ + size_t rdcc_nbytes; /* Raw data number of bytes */ + double rdcc_w0; /* Raw data write percentage */ + hssize_t offset[SPACE7_RANK] = {1, 1}; /* Offset for testing selection offsets */ + const char *env_h5_drvr; /* File Driver value from environment */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Selections\n")); + + /* Get the VFD to use */ + env_h5_drvr = HDgetenv(HDF5_DRIVER); + if (env_h5_drvr == NULL) + env_h5_drvr = "nomatch"; + + /* Create a dataset transfer property list */ + plist_id = H5Pcreate(H5P_DATASET_XFER); + CHECK(plist_id, FAIL, "H5Pcreate"); + + /* test I/O with a very small buffer for reads */ + ret = H5Pset_buffer(plist_id, (size_t)59, NULL, NULL); + CHECK(ret, FAIL, "H5Pset_buffer"); + + /* These next tests use the same file */ + test_select_hyper(H5P_DEFAULT); /* Test basic H5S hyperslab selection code */ + test_select_hyper(plist_id); /* Test basic H5S hyperslab selection code */ + test_select_point(H5P_DEFAULT); /* Test basic H5S element selection code, also tests appending to existing + element selections */ + test_select_point(plist_id); /* Test basic H5S element selection code, also tests appending to existing + element selections */ + test_select_all(H5P_DEFAULT); /* Test basic all & none selection code */ + test_select_all(plist_id); /* Test basic all & none selection code */ + test_select_all_hyper(H5P_DEFAULT); /* Test basic all & none selection code */ + test_select_all_hyper(plist_id); /* Test basic all & none selection code */ + + /* These next tests use the same file */ + test_select_combo(); /* Test combined hyperslab & element selection code */ + test_select_hyper_stride(H5P_DEFAULT); /* Test strided hyperslab selection code */ + test_select_hyper_stride(plist_id); /* Test strided hyperslab selection code */ + test_select_hyper_contig(H5T_STD_U16LE, H5P_DEFAULT); /* Test contiguous hyperslab selection code */ + test_select_hyper_contig(H5T_STD_U16LE, plist_id); /* Test contiguous hyperslab selection code */ + test_select_hyper_contig(H5T_STD_U16BE, H5P_DEFAULT); /* Test contiguous hyperslab selection code */ + test_select_hyper_contig(H5T_STD_U16BE, plist_id); /* Test contiguous hyperslab selection code */ + test_select_hyper_contig2(H5T_STD_U16LE, + H5P_DEFAULT); /* Test more contiguous hyperslab selection cases */ + test_select_hyper_contig2(H5T_STD_U16LE, plist_id); /* Test more contiguous hyperslab selection cases */ + test_select_hyper_contig2(H5T_STD_U16BE, + H5P_DEFAULT); /* Test more contiguous hyperslab selection cases */ + test_select_hyper_contig2(H5T_STD_U16BE, plist_id); /* Test more contiguous hyperslab selection cases */ + test_select_hyper_contig3(H5T_STD_U16LE, + H5P_DEFAULT); /* Test yet more contiguous hyperslab selection cases */ + test_select_hyper_contig3(H5T_STD_U16LE, + plist_id); /* Test yet more contiguous hyperslab selection cases */ + test_select_hyper_contig3(H5T_STD_U16BE, + H5P_DEFAULT); /* Test yet more contiguous hyperslab selection cases */ + test_select_hyper_contig3(H5T_STD_U16BE, + plist_id); /* Test yet more contiguous hyperslab selection cases */ +#if 0 + test_select_hyper_contig_dr(H5T_STD_U16LE, H5P_DEFAULT); + test_select_hyper_contig_dr(H5T_STD_U16LE, plist_id); + test_select_hyper_contig_dr(H5T_STD_U16BE, H5P_DEFAULT); + test_select_hyper_contig_dr(H5T_STD_U16BE, plist_id); +#else + HDprintf("** SKIPPED a test due to file creation issues\n"); +#endif +#if 0 + test_select_hyper_checker_board_dr(H5T_STD_U16LE, H5P_DEFAULT); + test_select_hyper_checker_board_dr(H5T_STD_U16LE, plist_id); + test_select_hyper_checker_board_dr(H5T_STD_U16BE, H5P_DEFAULT); + test_select_hyper_checker_board_dr(H5T_STD_U16BE, plist_id); +#else + HDprintf("** SKIPPED a test due to assertion in HDF5\n"); +#endif + test_select_hyper_copy(); /* Test hyperslab selection copying code */ + test_select_point_copy(); /* Test point selection copying code */ + test_select_hyper_offset(); /* Test selection offset code with hyperslabs */ + test_select_hyper_offset2(); /* Test more selection offset code with hyperslabs */ + test_select_point_offset(); /* Test selection offset code with elements */ + test_select_hyper_union(); /* Test hyperslab union code */ + + /* Fancy hyperslab API tests */ + test_select_hyper_union_stagger(); /* Test hyperslab union code for staggered slabs */ + test_select_hyper_union_3d(); /* Test hyperslab union code for 3-D dataset */ + test_select_hyper_valid_combination(); /* Test different input combinations */ + + /* The following tests are currently broken with the Direct VFD */ + if (HDstrcmp(env_h5_drvr, "direct") != 0) { + test_select_hyper_and_2d(); /* Test hyperslab intersection (AND) code for 2-D dataset */ + test_select_hyper_xor_2d(); /* Test hyperslab XOR code for 2-D dataset */ + test_select_hyper_notb_2d(); /* Test hyperslab NOTB code for 2-D dataset */ + test_select_hyper_nota_2d(); /* Test hyperslab NOTA code for 2-D dataset */ + } + + /* test the random hyperslab I/O with the default property list for reading */ + test_select_hyper_union_random_5d(H5P_DEFAULT); /* Test hyperslab union code for random 5-D hyperslabs */ + + /* test random hyperslab I/O with a small buffer for reads */ + test_select_hyper_union_random_5d(plist_id); /* Test hyperslab union code for random 5-D hyperslabs */ + + /* Create a dataset transfer property list */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + + /* Get the default file access properties for caching */ + ret = H5Pget_cache(fapl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0); + CHECK(ret, FAIL, "H5Pget_cache"); + + /* Increase the size of the raw data cache */ + rdcc_nbytes = 10 * 1024 * 1024; + + /* Set the file access properties for caching */ + ret = H5Pset_cache(fapl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0); + CHECK(ret, FAIL, "H5Pset_cache"); + + /* Test reading in a large hyperslab with a chunked dataset */ + test_select_hyper_chunk(fapl, H5P_DEFAULT); + + /* Test reading in a large hyperslab with a chunked dataset a small amount at a time */ + test_select_hyper_chunk(fapl, plist_id); + + /* Close file access property list */ + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close dataset transfer property list */ + ret = H5Pclose(plist_id); + CHECK(ret, FAIL, "H5Pclose"); + + /* More tests for checking validity of selections */ + test_select_valid(); + + /* Tests for combining "all" and "none" selections with hyperslabs */ + test_select_combine(); + + /* Test filling selections */ + /* (Also tests iterating through each selection */ + test_select_fill_all(); + test_select_fill_point(NULL); + test_select_fill_point(offset); + test_select_fill_hyper_simple(NULL); + test_select_fill_hyper_simple(offset); + test_select_fill_hyper_regular(NULL); + test_select_fill_hyper_regular(offset); + test_select_fill_hyper_irregular(NULL); + test_select_fill_hyper_irregular(offset); + + /* Test 0-sized selections */ + test_select_none(); + + /* Test selections on scalar dataspaces */ + test_scalar_select(); + test_scalar_select2(); + test_scalar_select3(); + + /* Test "same shape" routine */ + test_shape_same(); + + /* Test "same shape" routine for selections of different rank */ + test_shape_same_dr(); + + /* Test "re-build" routine */ + test_space_rebuild(); + + /* Test "update diminfo" routine */ + test_space_update_diminfo(); + + /* Test point selections in chunked datasets */ + test_select_point_chunk(); + + /* Test scalar dataspaces in chunked datasets */ + test_select_scalar_chunk(); +#if 0 + /* Test using selection offset on hyperslab in chunked dataset */ + test_select_hyper_chunk_offset(); + test_select_hyper_chunk_offset2(); +#else + HDprintf("** SKIPPED a test due to assertion in HDF5\n"); +#endif + + /* Test selection bounds with & without offsets */ + test_select_bounds(); + + /* Test 'regular' hyperslab query routines */ + test_hyper_regular(); + + /* Test unlimited hyperslab selections */ + test_hyper_unlim(); + + /* Test the consistency of internal data structures of selection */ + test_internal_consistency(); + + /* Test irregular selection I/O */ + test_irreg_io(); + + /* Test selection iterators */ + test_sel_iter(); + + /* Test selection intersection with block */ + test_select_intersect_block(); + + /* Test reading of 1-d disjoint file space to 1-d single block memory space */ + test_hyper_io_1d(); + + /* Test H5Sset_extent_none() functionality after we updated it to set + * the class to H5S_NULL instead of H5S_NO_CLASS. + */ + test_h5s_set_extent_none(); + +} /* test_select() */ + +/*------------------------------------------------------------------------- + * Function: cleanup_select + * + * Purpose: Cleanup temporary test files + * + * Return: none + * + * Programmer: Albert Cheng + * July 2, 1998 + * + *------------------------------------------------------------------------- + */ +void +cleanup_select(void) +{ + H5Fdelete(FILENAME, H5P_DEFAULT); +} diff --git a/test/API/ttime.c b/test/API/ttime.c new file mode 100644 index 00000000000..74128fd50e9 --- /dev/null +++ b/test/API/ttime.c @@ -0,0 +1,231 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*********************************************************** + * + * Test program: ttime + * + * Test the Time Datatype functionality + * + *************************************************************/ + +#include "testhdf5.h" + +#define DATAFILE "ttime.h5" +#ifdef NOT_YET +#define DATASETNAME "Dataset" +#endif /* NOT_YET */ + +/**************************************************************** +** +** test_time_commit(): Test committing time datatypes to a file +** +****************************************************************/ +static void +test_time_commit(void) +{ + hid_t file_id, tid; /* identifiers */ + herr_t status; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Committing Time Datatypes\n")); + + /* Create a new file using default properties. */ + file_id = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file_id, FAIL, "H5Fcreate"); + + tid = H5Tcopy(H5T_UNIX_D32LE); + CHECK(tid, FAIL, "H5Tcopy"); + status = H5Tcommit2(file_id, "Committed D32LE type", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(status, FAIL, "H5Tcommit2"); + status = H5Tclose(tid); + CHECK(status, FAIL, "H5Tclose"); + + tid = H5Tcopy(H5T_UNIX_D32BE); + CHECK(tid, FAIL, "H5Tcopy"); + status = H5Tcommit2(file_id, "Committed D32BE type", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(status, FAIL, "H5Tcommit2"); + status = H5Tclose(tid); + CHECK(status, FAIL, "H5Tclose"); + + tid = H5Tcopy(H5T_UNIX_D64LE); + CHECK(tid, FAIL, "H5Tcopy"); + status = H5Tcommit2(file_id, "Committed D64LE type", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(status, FAIL, "H5Tcommit2"); + status = H5Tclose(tid); + CHECK(status, FAIL, "H5Tclose"); + + tid = H5Tcopy(H5T_UNIX_D64BE); + CHECK(tid, FAIL, "H5Tcopy"); + status = H5Tcommit2(file_id, "Committed D64BE type", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(status, FAIL, "H5Tcommit2"); + status = H5Tclose(tid); + CHECK(status, FAIL, "H5Tclose"); + + /* Close the file. */ + status = H5Fclose(file_id); + CHECK(status, FAIL, "H5Fclose"); + + file_id = H5Fopen(DATAFILE, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(file_id, FAIL, "H5Fopen"); + + tid = H5Topen2(file_id, "Committed D32LE type", H5P_DEFAULT); + CHECK(tid, FAIL, "H5Topen2"); + + if (!H5Tequal(tid, H5T_UNIX_D32LE)) + TestErrPrintf("H5T_UNIX_D32LE datatype not found\n"); + + status = H5Tclose(tid); + CHECK(status, FAIL, "H5Tclose"); + + tid = H5Topen2(file_id, "Committed D32BE type", H5P_DEFAULT); + CHECK(tid, FAIL, "H5Topen2"); + + if (!H5Tequal(tid, H5T_UNIX_D32BE)) + TestErrPrintf("H5T_UNIX_D32BE datatype not found\n"); + + status = H5Tclose(tid); + CHECK(status, FAIL, "H5Tclose"); + + tid = H5Topen2(file_id, "Committed D64LE type", H5P_DEFAULT); + CHECK(tid, FAIL, "H5Topen2"); + + if (!H5Tequal(tid, H5T_UNIX_D64LE)) + TestErrPrintf("H5T_UNIX_D64LE datatype not found"); + + status = H5Tclose(tid); + CHECK(status, FAIL, "H5Tclose"); + + tid = H5Topen2(file_id, "Committed D64BE type", H5P_DEFAULT); + CHECK(tid, FAIL, "H5Topen2"); + + if (!H5Tequal(tid, H5T_UNIX_D64BE)) + TestErrPrintf("H5T_UNIX_D64BE datatype not found"); + + status = H5Tclose(tid); + CHECK(status, FAIL, "H5Tclose"); + + status = H5Fclose(file_id); + CHECK(status, FAIL, "H5Fclose"); +} + +#ifdef NOT_YET +/**************************************************************** +** +** test_time_io(): Test writing time data to a dataset +** +****************************************************************/ +static void +test_time_io(void) +{ + hid_t fid; /* File identifier */ + hid_t dsid; /* Dataset identifier */ + hid_t tid; /* Datatype identifier */ + hid_t sid; /* Dataspace identifier */ + time_t timenow, timethen; /* Times */ + herr_t status; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Committing Time Datatypes\n")); + + /* Create a new file using default properties. */ + fid = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Create a scalar dataspace */ + sid = H5Screate(H5S_SCALAR); + CHECK(sid, FAIL, "H5Screate"); + + /* Create a dataset with a time datatype */ + dsid = H5Dcreate2(fid, DATASETNAME, H5T_UNIX_D32LE, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dsid, FAIL, "H5Dcreate2"); + + /* Initialize time data value */ + timenow = HDtime(NULL); + + /* Write time to dataset */ + status = H5Dwrite(dsid, H5T_UNIX_D32LE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &timenow); + CHECK(status, FAIL, "H5Dwrite"); + + /* Close objects */ + status = H5Dclose(dsid); + CHECK(status, FAIL, "H5Dclose"); + + status = H5Sclose(sid); + CHECK(status, FAIL, "H5Sclose"); + + status = H5Fclose(fid); + CHECK(status, FAIL, "H5Fclose"); + + /* Open file and dataset, read time back and print it in calendar format */ + fid = H5Fopen(DATAFILE, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + dsid = H5Dopen2(fid, DATASETNAME, H5P_DEFAULT); + CHECK(dsid, FAIL, "H5Dopen2"); + + tid = H5Dget_type(dsid); + CHECK(tid, FAIL, "H5Dget_type"); + if (H5Tget_class(tid) == H5T_TIME) + HDfprintf(stderr, "datatype class is H5T_TIME\n"); + status = H5Tclose(tid); + CHECK(status, FAIL, "H5Tclose"); + + status = H5Dread(dsid, H5T_UNIX_D32LE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &timethen); + CHECK(status, FAIL, "H5Dread"); + HDfprintf(stderr, "time written was: %s\n", HDctime(&timethen)); + + status = H5Dclose(dsid); + CHECK(status, FAIL, "H5Dclose"); + + status = H5Fclose(fid); + CHECK(status, FAIL, "H5Fclose"); +} +#endif /* NOT_YET */ + +/**************************************************************** +** +** test_time(): Main time datatype testing routine. +** +****************************************************************/ +void +test_time(void) +{ + /* Output message about test being performed */ + MESSAGE(5, ("Testing Time Datatypes\n")); + + test_time_commit(); /* Test committing time datatypes to a file */ +#ifdef NOT_YET + test_time_io(); /* Test writing time data to a dataset */ +#endif /* NOT_YET */ + +} /* test_time() */ + +/*------------------------------------------------------------------------- + * Function: cleanup_time + * + * Purpose: Cleanup temporary test files + * + * Return: none + * + * Programmer: Quincey Koziol + * October 19, 2000 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +void +cleanup_time(void) +{ + H5Fdelete(DATAFILE, H5P_DEFAULT); +} diff --git a/test/API/tunicode.c b/test/API/tunicode.c new file mode 100644 index 00000000000..fa594562f43 --- /dev/null +++ b/test/API/tunicode.c @@ -0,0 +1,867 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* Unicode test */ +#include "testhdf5.h" + +#define NUM_CHARS 16 +#define MAX_STRING_LENGTH ((NUM_CHARS * 4) + 1) /* Max length in bytes */ +#define MAX_PATH_LENGTH (MAX_STRING_LENGTH + 20) /* Max length in bytes */ +#define MAX_CODE_POINT 0x200000 +#define FILENAME "unicode.h5" +/* A buffer to hold two copies of the UTF-8 string */ +#define LONG_BUF_SIZE (2 * MAX_STRING_LENGTH + 4) + +#define DSET1_NAME "fl_string_dataset" +#define DSET3_NAME "dataset3" +#define DSET4_NAME "dataset4" +#define VL_DSET1_NAME "vl_dset_1" +#define GROUP1_NAME "group1" +#define GROUP2_NAME "group2" +#define GROUP3_NAME "group3" +#define GROUP4_NAME "group4" + +#define RANK 1 +#define COMP_INT_VAL 7 +#define COMP_FLOAT_VAL (-42.0F) +#define COMP_DOUBLE_VAL 42.0 + +/* Test function prototypes */ +void test_fl_string(hid_t fid, const char *string); +void test_strpad(hid_t fid, const char *string); +void test_vl_string(hid_t fid, const char *string); +void test_objnames(hid_t fid, const char *string); +void test_attrname(hid_t fid, const char *string); +void test_compound(hid_t fid, const char *string); +void test_enum(hid_t fid, const char *string); +void test_opaque(hid_t fid, const char *string); + +/* Utility function prototypes */ +static hid_t mkstr(size_t len, H5T_str_t strpad); +unsigned int write_char(unsigned int c, char *test_string, unsigned int cur_pos); +void dump_string(const char *string); + +/* + * test_fl_string + * Tests that UTF-8 can be used for fixed-length string data. + * Writes the string to a dataset and reads it back again. + */ +void +test_fl_string(hid_t fid, const char *string) +{ + hid_t dtype_id, space_id, dset_id; + hsize_t dims = 1; + char read_buf[MAX_STRING_LENGTH]; + H5T_cset_t cset; + herr_t ret; + + /* Create the datatype, ensure that the character set behaves + * correctly (it should default to ASCII and can be set to UTF8) + */ + dtype_id = H5Tcopy(H5T_C_S1); + CHECK(dtype_id, FAIL, "H5Tcopy"); + ret = H5Tset_size(dtype_id, (size_t)MAX_STRING_LENGTH); + CHECK(ret, FAIL, "H5Tset_size"); + cset = H5Tget_cset(dtype_id); + VERIFY(cset, H5T_CSET_ASCII, "H5Tget_cset"); + ret = H5Tset_cset(dtype_id, H5T_CSET_UTF8); + CHECK(ret, FAIL, "H5Tset_cset"); + cset = H5Tget_cset(dtype_id); + VERIFY(cset, H5T_CSET_UTF8, "H5Tget_cset"); + + /* Create dataspace for a dataset */ + space_id = H5Screate_simple(RANK, &dims, NULL); + CHECK(space_id, FAIL, "H5Screate_simple"); + + /* Create a dataset */ + dset_id = H5Dcreate2(fid, DSET1_NAME, dtype_id, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset_id, FAIL, "H5Dcreate2"); + + /* Write UTF-8 string to dataset */ + ret = H5Dwrite(dset_id, dtype_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, string); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read string back and make sure it is unchanged */ + ret = H5Dread(dset_id, dtype_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf); + CHECK(ret, FAIL, "H5Dread"); + + VERIFY(HDstrcmp(string, read_buf), 0, "strcmp"); + + /* Close all */ + ret = H5Dclose(dset_id); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Tclose(dtype_id); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Sclose(space_id); + CHECK(ret, FAIL, "H5Sclose"); +} + +/* + * test_strpad + * Tests string padding for a UTF-8 string. + * Converts strings to shorter and then longer strings. + * Borrows heavily from dtypes.c, but is more complicated because + * the string is randomly generated. + */ +void +test_strpad(hid_t H5_ATTR_UNUSED fid, const char *string) +{ + /* buf is used to hold the data that H5Tconvert operates on. */ + char buf[LONG_BUF_SIZE]; + + /* cmpbuf holds the output that H5Tconvert should produce, + * to compare against the actual output. */ + char cmpbuf[LONG_BUF_SIZE]; + + /* new_string is a slightly modified version of the UTF-8 + * string to make the tests run more smoothly. */ + char new_string[MAX_STRING_LENGTH + 2]; + + size_t length; /* Length of new_string in bytes */ + size_t small_len; /* Size of the small datatype */ + size_t big_len; /* Size of the larger datatype */ + hid_t src_type, dst_type; + herr_t ret; + + /* The following tests are simpler if the UTF-8 string contains + * the right number of bytes (even or odd, depending on the test). + * We create a 'new_string' whose length is convenient by prepending + * an 'x' to 'string' when necessary. */ + length = HDstrlen(string); + if (length % 2 != 1) { + HDstrcpy(new_string, "x"); + HDstrcat(new_string, string); + length++; + } + else { + HDstrcpy(new_string, string); + } + + /* Convert a null-terminated string to a shorter and longer null + * terminated string. */ + + /* Create a src_type that holds the UTF-8 string and its final NULL */ + big_len = length + 1; /* +1 byte for final NULL */ + HDassert((2 * big_len) <= sizeof(cmpbuf)); + src_type = mkstr(big_len, H5T_STR_NULLTERM); + CHECK(src_type, FAIL, "mkstr"); + /* Create a dst_type that holds half of the UTF-8 string and a final + * NULL */ + small_len = (length + 1) / 2; + dst_type = mkstr(small_len, H5T_STR_NULLTERM); + CHECK(dst_type, FAIL, "mkstr"); + + /* Fill the buffer with two copies of the UTF-8 string, each with a + * terminating NULL. It will look like "abcdefg\0abcdefg\0". */ + HDstrncpy(buf, new_string, big_len); + HDstrncpy(&buf[big_len], new_string, big_len); + + ret = H5Tconvert(src_type, dst_type, (size_t)2, buf, NULL, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tconvert"); + + /* After conversion, the buffer should look like + * "abc\0abc\0abcdefg\0". Note that this is just what the bytes look + * like; UTF-8 characters may well have been truncated. + * To check that the conversion worked properly, we'll build this + * string manually. */ + HDstrncpy(cmpbuf, new_string, small_len - 1); + cmpbuf[small_len - 1] = '\0'; + HDstrncpy(&cmpbuf[small_len], new_string, small_len - 1); + cmpbuf[2 * small_len - 1] = '\0'; + HDstrcpy(&cmpbuf[2 * small_len], new_string); + + VERIFY(HDmemcmp(buf, cmpbuf, 2 * big_len), 0, "HDmemcmp"); + + /* Now convert from smaller datatype to bigger datatype. This should + * leave our buffer looking like: "abc\0\0\0\0\0abc\0\0\0\0\0" */ + ret = H5Tconvert(dst_type, src_type, (size_t)2, buf, NULL, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tconvert"); + + /* First fill the buffer with NULLs */ + HDmemset(cmpbuf, '\0', (size_t)LONG_BUF_SIZE); + /* Copy in the characters */ + HDstrncpy(cmpbuf, new_string, small_len - 1); + HDstrncpy(&cmpbuf[big_len], new_string, small_len - 1); + + VERIFY(HDmemcmp(buf, cmpbuf, 2 * big_len), 0, "HDmemcmp"); + + ret = H5Tclose(src_type); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Tclose(dst_type); + CHECK(ret, FAIL, "H5Tclose"); + + /* Now test null padding. Null-padded strings do *not* need + * terminating NULLs, so the sizes of the datatypes are slightly + * different and we want a string with an even number of characters. */ + length = HDstrlen(string); + if (length % 2 != 0) { + HDstrcpy(new_string, "x"); + HDstrcat(new_string, string); + length++; + } + else { + HDstrcpy(new_string, string); + } + + /* Create a src_type that holds the UTF-8 string */ + big_len = length; + HDassert((2 * big_len) <= sizeof(cmpbuf)); + src_type = mkstr(big_len, H5T_STR_NULLPAD); + CHECK(src_type, FAIL, "mkstr"); + /* Create a dst_type that holds half of the UTF-8 string */ + small_len = length / 2; + dst_type = mkstr(small_len, H5T_STR_NULLPAD); + CHECK(dst_type, FAIL, "mkstr"); + + /* Fill the buffer with two copies of the UTF-8 string. + * It will look like "abcdefghabcdefgh". */ + HDstrncpy(buf, new_string, big_len); + HDstrncpy(&buf[big_len], new_string, big_len); + + ret = H5Tconvert(src_type, dst_type, (size_t)2, buf, NULL, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tconvert"); + + /* After conversion, the buffer should look like + * "abcdabcdabcdefgh". Note that this is just what the bytes look + * like; UTF-8 characters may well have been truncated. + * To check that the conversion worked properly, we'll build this + * string manually. */ + HDstrncpy(cmpbuf, new_string, small_len); + HDstrncpy(&cmpbuf[small_len], new_string, small_len); + HDstrncpy(&cmpbuf[2 * small_len], new_string, big_len); + + VERIFY(HDmemcmp(buf, cmpbuf, 2 * big_len), 0, "HDmemcmp"); + + /* Now convert from smaller datatype to bigger datatype. This should + * leave our buffer looking like: "abcd\0\0\0\0abcd\0\0\0\0" */ + ret = H5Tconvert(dst_type, src_type, (size_t)2, buf, NULL, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tconvert"); + + /* First fill the buffer with NULLs */ + HDmemset(cmpbuf, '\0', (size_t)LONG_BUF_SIZE); + /* Copy in the characters */ + HDstrncpy(cmpbuf, new_string, small_len); + HDstrncpy(&cmpbuf[big_len], new_string, small_len); + + VERIFY(HDmemcmp(buf, cmpbuf, 2 * big_len), 0, "HDmemcmp"); + + ret = H5Tclose(src_type); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Tclose(dst_type); + CHECK(ret, FAIL, "H5Tclose"); + + /* Test space padding. This is very similar to null-padding; we can + use the same values of length, small_len, and big_len. */ + + src_type = mkstr(big_len, H5T_STR_SPACEPAD); + CHECK(src_type, FAIL, "mkstr"); + dst_type = mkstr(small_len, H5T_STR_SPACEPAD); + CHECK(src_type, FAIL, "mkstr"); + + /* Fill the buffer with two copies of the UTF-8 string. + * It will look like "abcdefghabcdefgh". */ + HDstrcpy(buf, new_string); + HDstrcpy(&buf[big_len], new_string); + + ret = H5Tconvert(src_type, dst_type, (size_t)2, buf, NULL, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tconvert"); + + /* After conversion, the buffer should look like + * "abcdabcdabcdefgh". Note that this is just what the bytes look + * like; UTF-8 characters may have been truncated. + * To check that the conversion worked properly, we'll build this + * string manually. */ + HDstrncpy(cmpbuf, new_string, small_len); + HDstrncpy(&cmpbuf[small_len], new_string, small_len); + HDstrncpy(&cmpbuf[2 * small_len], new_string, big_len); + + VERIFY(HDmemcmp(buf, cmpbuf, 2 * big_len), 0, "HDmemcmp"); + + /* Now convert from smaller datatype to bigger datatype. This should + * leave our buffer looking like: "abcd abcd " */ + ret = H5Tconvert(dst_type, src_type, (size_t)2, buf, NULL, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tconvert"); + + /* First fill the buffer with spaces */ + HDmemset(cmpbuf, ' ', (size_t)LONG_BUF_SIZE); + /* Copy in the characters */ + HDstrncpy(cmpbuf, new_string, small_len); + HDstrncpy(&cmpbuf[big_len], new_string, small_len); + + VERIFY(HDmemcmp(buf, cmpbuf, 2 * big_len), 0, "HDmemcmp"); + + ret = H5Tclose(src_type); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Tclose(dst_type); + CHECK(ret, FAIL, "H5Tclose"); +} + +/* + * test_vl_string + * Tests variable-length string datatype with UTF-8 strings. + */ +void +test_vl_string(hid_t fid, const char *string) +{ + hid_t type_id, space_id, dset_id; + hsize_t dims = 1; + hsize_t size; /* Number of bytes used */ + char *read_buf[1]; + herr_t ret; + + /* Create dataspace for datasets */ + space_id = H5Screate_simple(RANK, &dims, NULL); + CHECK(space_id, FAIL, "H5Screate_simple"); + + /* Create a datatype to refer to */ + type_id = H5Tcopy(H5T_C_S1); + CHECK(type_id, FAIL, "H5Tcopy"); + ret = H5Tset_size(type_id, H5T_VARIABLE); + CHECK(ret, FAIL, "H5Tset_size"); + + /* Create a dataset */ + dset_id = H5Dcreate2(fid, VL_DSET1_NAME, type_id, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset_id, FAIL, "H5Dcreate2"); + + /* Write dataset to disk */ + ret = H5Dwrite(dset_id, type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, &string); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Make certain the correct amount of memory will be used */ + ret = H5Dvlen_get_buf_size(dset_id, type_id, space_id, &size); + CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); + VERIFY(size, (hsize_t)HDstrlen(string) + 1, "H5Dvlen_get_buf_size"); + + /* Read dataset from disk */ + ret = H5Dread(dset_id, type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read in */ + VERIFY(HDstrcmp(string, read_buf[0]), 0, "strcmp"); + + /* Reclaim the read VL data */ + ret = H5Treclaim(type_id, space_id, H5P_DEFAULT, read_buf); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Close all */ + ret = H5Dclose(dset_id); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Tclose(type_id); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Sclose(space_id); + CHECK(ret, FAIL, "H5Sclose"); +} + +/* + * test_objnames + * Tests that UTF-8 can be used for object names in the file. + * Tests groups, datasets, named datatypes, and soft links. + * Note that this test doesn't actually mark the names as being + * in UTF-8. At the time this test was written, that feature + * didn't exist in HDF5, and when the character encoding property + * was added to links it didn't change how they were stored in the file, + * -JML 2/2/2006 + */ +void +test_objnames(hid_t fid, const char *string) +{ + hid_t grp_id, grp1_id, grp2_id, grp3_id; + hid_t type_id, dset_id, space_id; +#if 0 + char read_buf[MAX_STRING_LENGTH]; +#endif + char path_buf[MAX_PATH_LENGTH]; + hsize_t dims = 1; +#if 0 + hobj_ref_t obj_ref; + ssize_t size; +#endif + herr_t ret; + + /* Create a group with a UTF-8 name */ + grp_id = H5Gcreate2(fid, string, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(grp_id, FAIL, "H5Gcreate2"); +#if 0 + /* Set a comment on the group to test that we can access the group + * Also test that UTF-8 comments can be read. + */ + ret = H5Oset_comment_by_name(fid, string, string, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oset_comment_by_name"); + size = H5Oget_comment_by_name(fid, string, read_buf, (size_t)MAX_STRING_LENGTH, H5P_DEFAULT); + CHECK(size, FAIL, "H5Oget_comment_by_name"); +#endif + ret = H5Gclose(grp_id); + CHECK(ret, FAIL, "H5Gclose"); +#if 0 + VERIFY(HDstrcmp(string, read_buf), 0, "strcmp"); +#endif + /* Create a new dataset with a UTF-8 name */ + grp1_id = H5Gcreate2(fid, GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(grp1_id, FAIL, "H5Gcreate2"); + + space_id = H5Screate_simple(RANK, &dims, NULL); + CHECK(space_id, FAIL, "H5Screate_simple"); + dset_id = H5Dcreate2(grp1_id, string, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset_id, FAIL, "H5Dcreate2"); + + /* Make sure that dataset can be opened again */ + ret = H5Dclose(dset_id); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Sclose(space_id); + CHECK(ret, FAIL, "H5Sclose"); + + dset_id = H5Dopen2(grp1_id, string, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Dopen2"); + ret = H5Dclose(dset_id); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Gclose(grp1_id); + CHECK(ret, FAIL, "H5Gclose"); + + /* Do the same for a named datatype */ + grp2_id = H5Gcreate2(fid, GROUP2_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(grp2_id, FAIL, "H5Gcreate2"); + + type_id = H5Tcreate(H5T_OPAQUE, (size_t)1); + CHECK(type_id, FAIL, "H5Tcreate"); + ret = H5Tcommit2(grp2_id, string, type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(type_id, FAIL, "H5Tcommit2"); + ret = H5Tclose(type_id); + CHECK(type_id, FAIL, "H5Tclose"); + + type_id = H5Topen2(grp2_id, string, H5P_DEFAULT); + CHECK(type_id, FAIL, "H5Topen2"); + ret = H5Tclose(type_id); + CHECK(type_id, FAIL, "H5Tclose"); + + /* Don't close the group -- use it to test that object references + * can refer to objects named in UTF-8 */ +#if 0 + space_id = H5Screate_simple(RANK, &dims, NULL); + CHECK(space_id, FAIL, "H5Screate_simple"); + dset_id = + H5Dcreate2(grp2_id, DSET3_NAME, H5T_STD_REF_OBJ, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Dcreate2"); + + /* Create reference to named datatype */ + ret = H5Rcreate(&obj_ref, grp2_id, string, H5R_OBJECT, (hid_t)-1); + CHECK(ret, FAIL, "H5Rcreate"); + /* Write selection and read it back*/ + ret = H5Dwrite(dset_id, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, &obj_ref); + CHECK(ret, FAIL, "H5Dwrite"); + ret = H5Dread(dset_id, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, &obj_ref); + CHECK(ret, FAIL, "H5Dread"); + + /* Ensure that we can open named datatype using object reference */ + type_id = H5Rdereference2(dset_id, H5P_DEFAULT, H5R_OBJECT, &obj_ref); + CHECK(type_id, FAIL, "H5Rdereference2"); + ret = H5Tcommitted(type_id); + VERIFY(ret, 1, "H5Tcommitted"); + + ret = H5Tclose(type_id); + CHECK(type_id, FAIL, "H5Tclose"); + ret = H5Dclose(dset_id); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Sclose(space_id); + CHECK(ret, FAIL, "H5Sclose"); +#endif + ret = H5Gclose(grp2_id); + CHECK(ret, FAIL, "H5Gclose"); + + /* Create "group3". Build a hard link from group3 to group2, which has + * a datatype with the UTF-8 name. Create a soft link in group3 + * pointing through the hard link to the datatype. Give the soft + * link a name in UTF-8. Ensure that the soft link works. */ + + grp3_id = H5Gcreate2(fid, GROUP3_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(grp3_id, FAIL, "H5Gcreate2"); + + ret = H5Lcreate_hard(fid, GROUP2_NAME, grp3_id, GROUP2_NAME, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lcreate_hard"); + HDstrcpy(path_buf, GROUP2_NAME); + HDstrcat(path_buf, "/"); + HDstrcat(path_buf, string); + ret = H5Lcreate_hard(grp3_id, path_buf, H5L_SAME_LOC, string, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lcreate_hard"); + + /* Open named datatype using soft link */ + type_id = H5Topen2(grp3_id, string, H5P_DEFAULT); + CHECK(type_id, FAIL, "H5Topen2"); + + ret = H5Tclose(type_id); + CHECK(type_id, FAIL, "H5Tclose"); + ret = H5Gclose(grp3_id); + CHECK(ret, FAIL, "H5Gclose"); +} + +/* + * test_attrname + * Test that attributes can deal with UTF-8 strings + */ +void +test_attrname(hid_t fid, const char *string) +{ + hid_t group_id, attr_id; + hid_t dtype_id, space_id; + hsize_t dims = 1; + char read_buf[MAX_STRING_LENGTH]; + ssize_t size; + herr_t ret; + + /* Create a new group and give it an attribute whose + * name and value are UTF-8 strings. + */ + group_id = H5Gcreate2(fid, GROUP4_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(group_id, FAIL, "H5Gcreate2"); + + space_id = H5Screate_simple(RANK, &dims, NULL); + CHECK(space_id, FAIL, "H5Screate_simple"); + dtype_id = H5Tcopy(H5T_C_S1); + CHECK(dtype_id, FAIL, "H5Tcopy"); + ret = H5Tset_size(dtype_id, (size_t)MAX_STRING_LENGTH); + CHECK(ret, FAIL, "H5Tset_size"); + + /* Create the attribute and check that its name is correct */ + attr_id = H5Acreate2(group_id, string, dtype_id, space_id, H5P_DEFAULT, H5P_DEFAULT); + CHECK(attr_id, FAIL, "H5Acreate2"); + size = H5Aget_name(attr_id, (size_t)MAX_STRING_LENGTH, read_buf); + CHECK(size, FAIL, "H5Aget_name"); + ret = HDstrcmp(read_buf, string); + VERIFY(ret, 0, "strcmp"); + read_buf[0] = '\0'; + + /* Try writing and reading from the attribute */ + ret = H5Awrite(attr_id, dtype_id, string); + CHECK(ret, FAIL, "H5Awrite"); + ret = H5Aread(attr_id, dtype_id, read_buf); + CHECK(ret, FAIL, "H5Aread"); + ret = HDstrcmp(read_buf, string); + VERIFY(ret, 0, "strcmp"); + + /* Clean up */ + ret = H5Aclose(attr_id); + CHECK(ret, FAIL, "H5Aclose"); + ret = H5Tclose(dtype_id); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Sclose(space_id); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Gclose(group_id); + CHECK(ret, FAIL, "H5Gclose"); +} + +/* + * test_compound + * Test that compound datatypes can have UTF-8 field names. + */ +void +test_compound(hid_t fid, const char *string) +{ + /* Define two compound structures, s1_t and s2_t. + * s2_t is a subset of s1_t, with two out of three + * fields. + * This is stolen from the h5_compound example. + */ + typedef struct s1_t { + int a; + double c; + float b; + } s1_t; + typedef struct s2_t { + double c; + int a; + } s2_t; + /* Actual variable declarations */ + s1_t s1; + s2_t s2; + hid_t s1_tid, s2_tid; + hid_t space_id, dset_id; + hsize_t dim = 1; + char *readbuf; + herr_t ret; + + /* Initialize compound data */ + HDmemset(&s1, 0, sizeof(s1_t)); /* To make purify happy */ + s1.a = COMP_INT_VAL; + s1.c = COMP_DOUBLE_VAL; + s1.b = COMP_FLOAT_VAL; + + /* Create compound datatypes using UTF-8 field name */ + s1_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); + CHECK(s1_tid, FAIL, "H5Tcreate"); + ret = H5Tinsert(s1_tid, string, HOFFSET(s1_t, a), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Check that the field name was stored correctly */ + readbuf = H5Tget_member_name(s1_tid, 0); + ret = HDstrcmp(readbuf, string); + VERIFY(ret, 0, "strcmp"); + H5free_memory(readbuf); + + /* Add the other fields to the datatype */ + ret = H5Tinsert(s1_tid, "c_name", HOFFSET(s1_t, c), H5T_NATIVE_DOUBLE); + CHECK(ret, FAIL, "H5Tinsert"); + ret = H5Tinsert(s1_tid, "b_name", HOFFSET(s1_t, b), H5T_NATIVE_FLOAT); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Create second datatype, with only two fields. */ + s2_tid = H5Tcreate(H5T_COMPOUND, sizeof(s2_t)); + CHECK(s2_tid, FAIL, "H5Tcreate"); + ret = H5Tinsert(s2_tid, "c_name", HOFFSET(s2_t, c), H5T_NATIVE_DOUBLE); + CHECK(ret, FAIL, "H5Tinsert"); + ret = H5Tinsert(s2_tid, string, HOFFSET(s2_t, a), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Create the dataspace and dataset. */ + space_id = H5Screate_simple(1, &dim, NULL); + CHECK(space_id, FAIL, "H5Screate_simple"); + dset_id = H5Dcreate2(fid, DSET4_NAME, s1_tid, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset_id, FAIL, "H5Dcreate2"); + + /* Write data to the dataset. */ + ret = H5Dwrite(dset_id, s1_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, &s1); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Ensure that data can be read back by field name into s2 struct */ + ret = H5Dread(dset_id, s2_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, &s2); + CHECK(ret, FAIL, "H5Dread"); + + VERIFY(s2.a, COMP_INT_VAL, "H5Dread"); + VERIFY(s2.c, COMP_DOUBLE_VAL, "H5Dread"); + + /* Clean up */ + ret = H5Tclose(s1_tid); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Tclose(s2_tid); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Sclose(space_id); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Dclose(dset_id); + CHECK(ret, FAIL, "H5Dclose"); +} + +/* + * test_enum + * Test that enumerated datatypes can have UTF-8 member names. + */ +void +test_enum(hid_t H5_ATTR_UNUSED fid, const char *string) +{ + /* Define an enumerated type */ + typedef enum { E1_RED, E1_GREEN, E1_BLUE, E1_WHITE } c_e1; + /* Variable declarations */ + c_e1 val; + herr_t ret; + hid_t type_id; + char readbuf[MAX_STRING_LENGTH]; + + /* Create an enumerated datatype in HDF5 with a UTF-8 member name*/ + type_id = H5Tcreate(H5T_ENUM, sizeof(c_e1)); + CHECK(type_id, FAIL, "H5Tcreate"); + val = E1_RED; + ret = H5Tenum_insert(type_id, "RED", &val); + CHECK(ret, FAIL, "H5Tenum_insert"); + val = E1_GREEN; + ret = H5Tenum_insert(type_id, "GREEN", &val); + CHECK(ret, FAIL, "H5Tenum_insert"); + val = E1_BLUE; + ret = H5Tenum_insert(type_id, "BLUE", &val); + CHECK(ret, FAIL, "H5Tenum_insert"); + val = E1_WHITE; + ret = H5Tenum_insert(type_id, string, &val); + CHECK(ret, FAIL, "H5Tenum_insert"); + + /* Ensure that UTF-8 member name gives the right value and vice versa. */ + ret = H5Tenum_valueof(type_id, string, &val); + CHECK(ret, FAIL, "H5Tenum_valueof"); + VERIFY(val, E1_WHITE, "H5Tenum_valueof"); + ret = H5Tenum_nameof(type_id, &val, readbuf, (size_t)MAX_STRING_LENGTH); + CHECK(ret, FAIL, "H5Tenum_nameof"); + ret = HDstrcmp(readbuf, string); + VERIFY(ret, 0, "strcmp"); + + /* Close the datatype */ + ret = H5Tclose(type_id); + CHECK(ret, FAIL, "H5Tclose"); +} + +/* + * test_opaque + * Test comments on opaque datatypes + */ +void +test_opaque(hid_t H5_ATTR_UNUSED fid, const char *string) +{ + hid_t type_id; + char *read_buf; + herr_t ret; + + /* Create an opaque type and give it a UTF-8 tag */ + type_id = H5Tcreate(H5T_OPAQUE, (size_t)4); + CHECK(type_id, FAIL, "H5Tcreate"); + ret = H5Tset_tag(type_id, string); + CHECK(ret, FAIL, "H5Tset_tag"); + + /* Read the tag back. */ + read_buf = H5Tget_tag(type_id); + ret = HDstrcmp(read_buf, string); + VERIFY(ret, 0, "H5Tget_tag"); + H5free_memory(read_buf); + + ret = H5Tclose(type_id); + CHECK(ret, FAIL, "H5Tclose"); +} + +/*********************/ +/* Utility functions */ +/*********************/ + +/* mkstr + * Borrwed from dtypes.c. + * Creates a new string data type. Used in string padding tests */ +static hid_t +mkstr(size_t len, H5T_str_t strpad) +{ + hid_t t; + if ((t = H5Tcopy(H5T_C_S1)) < 0) + return -1; + if (H5Tset_size(t, len) < 0) + return -1; + if (H5Tset_strpad(t, strpad) < 0) + return -1; + return t; +} + +/* write_char + * Append a unicode code point c to test_string in UTF-8 encoding. + * Return the new end of the string. + */ +unsigned int +write_char(unsigned int c, char *test_string, unsigned int cur_pos) +{ + if (c < 0x80) { + test_string[cur_pos] = (char)c; + cur_pos++; + } + else if (c < 0x800) { + test_string[cur_pos] = (char)(0xC0 | c >> 6); + test_string[cur_pos + 1] = (char)(0x80 | (c & 0x3F)); + cur_pos += 2; + } + else if (c < 0x10000) { + test_string[cur_pos] = (char)(0xE0 | c >> 12); + test_string[cur_pos + 1] = (char)(0x80 | (c >> 6 & 0x3F)); + test_string[cur_pos + 2] = (char)(0x80 | (c & 0x3F)); + cur_pos += 3; + } + else if (c < 0x200000) { + test_string[cur_pos] = (char)(0xF0 | c >> 18); + test_string[cur_pos + 1] = (char)(0x80 | (c >> 12 & 0x3F)); + test_string[cur_pos + 2] = (char)(0x80 | (c >> 6 & 0x3F)); + test_string[cur_pos + 3] = (char)(0x80 | (c & 0x3F)); + cur_pos += 4; + } + + return cur_pos; +} + +/* dump_string + * Print a string both as text (which will look like garbage) and as hex. + * The text display is not guaranteed to be accurate--certain characters + * could confuse printf (e.g., '\n'). */ +void +dump_string(const char *string) +{ + size_t length; + size_t x; + + HDprintf("The string was:\n %s", string); + HDprintf("Or in hex:\n"); + + length = HDstrlen(string); + + for (x = 0; x < length; x++) + HDprintf("%x ", string[x] & (0x000000FF)); + + HDprintf("\n"); +} + +/* Main test. + * Create a string of random Unicode characters, then run each test with + * that string. + */ +void +test_unicode(void) +{ + char test_string[MAX_STRING_LENGTH]; + unsigned int cur_pos = 0; /* Current position in test_string */ + unsigned int unicode_point; /* Unicode code point for a single character */ + hid_t fid; /* ID of file */ + int x; /* Temporary variable */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing UTF-8 Encoding\n")); + + /* Create a random string with length NUM_CHARS */ + HDsrandom((unsigned)HDtime(NULL)); + + HDmemset(test_string, 0, sizeof(test_string)); + for (x = 0; x < NUM_CHARS; x++) { + /* We need to avoid unprintable characters (codes 0-31) and the + * . and / characters, since they aren't allowed in path names. + */ + unicode_point = (unsigned)(HDrandom() % (MAX_CODE_POINT - 32)) + 32; + if (unicode_point != 46 && unicode_point != 47) + cur_pos = write_char(unicode_point, test_string, cur_pos); + } + + /* Avoid unlikely case of the null string */ + if (cur_pos == 0) { + test_string[cur_pos] = 'Q'; + cur_pos++; + } + test_string[cur_pos] = '\0'; + + /* Create file */ + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + + test_fl_string(fid, test_string); + test_strpad(fid, "abcdefgh"); + test_strpad(fid, test_string); + test_vl_string(fid, test_string); + test_objnames(fid, test_string); + test_attrname(fid, test_string); + test_compound(fid, test_string); + test_enum(fid, test_string); + test_opaque(fid, test_string); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* This function could be useful in debugging if certain strings + * create errors. + */ +#ifdef DEBUG + dump_string(test_string); +#endif /* DEBUG */ +} + +/* cleanup_unicode(void) + * Delete the file this test created. + */ +void +cleanup_unicode(void) +{ + H5Fdelete(FILENAME, H5P_DEFAULT); +} diff --git a/test/API/tvlstr.c b/test/API/tvlstr.c new file mode 100644 index 00000000000..b05ff667e57 --- /dev/null +++ b/test/API/tvlstr.c @@ -0,0 +1,1013 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*********************************************************** + * + * Test program: tvlstr + * + * Test the Variable-Length String functionality + * + *************************************************************/ + +#include "testhdf5.h" + +#define DATAFILE "tvlstr.h5" +#define DATAFILE2 "tvlstr2.h5" +#define DATAFILE3 "sel2el.h5" + +#define DATASET "1Darray" + +/* 1-D dataset with fixed dimensions */ +#define SPACE1_RANK 1 +#define SPACE1_DIM1 4 +#define NUMP 4 + +#define VLSTR_TYPE "vl_string_type" + +/* Definitions for the VL re-writing test */ +#define REWRITE_NDATASETS 32 + +/* String for testing attributes */ +static const char *string_att = "This is the string for the attribute"; +static char *string_att_write = NULL; + +void *test_vlstr_alloc_custom(size_t size, void *info); +void test_vlstr_free_custom(void *mem, void *info); + +/**************************************************************** +** +** test_vlstr_alloc_custom(): Test VL datatype custom memory +** allocation routines. This routine just uses malloc to +** allocate the memory and increments the amount of memory +** allocated. +** +****************************************************************/ +void * +test_vlstr_alloc_custom(size_t size, void *info) +{ + void *ret_value = NULL; /* Pointer to return */ + size_t *mem_used = (size_t *)info; /* Get the pointer to the memory used */ + size_t extra; /* Extra space needed */ + + /* + * This weird contortion is required on the DEC Alpha to keep the + * alignment correct - QAK + */ + extra = MAX(sizeof(void *), sizeof(size_t)); + + if ((ret_value = HDmalloc(extra + size)) != NULL) { + *(size_t *)ret_value = size; + *mem_used += size; + } /* end if */ + ret_value = ((unsigned char *)ret_value) + extra; + return (ret_value); +} + +/**************************************************************** +** +** test_vlstr_free_custom(): Test VL datatype custom memory +** allocation routines. This routine just uses free to +** release the memory and decrements the amount of memory +** allocated. +** +****************************************************************/ +void +test_vlstr_free_custom(void *_mem, void *info) +{ + unsigned char *mem; + size_t *mem_used = (size_t *)info; /* Get the pointer to the memory used */ + size_t extra; /* Extra space needed */ + + /* + * This weird contortion is required on the DEC Alpha to keep the + * alignment correct - QAK + */ + extra = MAX(sizeof(void *), sizeof(size_t)); + + if (_mem != NULL) { + mem = ((unsigned char *)_mem) - extra; + *mem_used -= *(size_t *)((void *)mem); + HDfree(mem); + } /* end if */ +} + +/**************************************************************** +** +** test_vlstrings_basic(): Test basic VL string code. +** Tests simple VL string I/O +** +****************************************************************/ +static void +test_vlstrings_basic(void) +{ + /* Information to write */ + const char *wdata[SPACE1_DIM1] = { + "Four score and seven years ago our forefathers brought forth on this continent a new nation,", + "conceived in liberty and dedicated to the proposition that all men are created equal.", + "Now we are engaged in a great civil war,", + "testing whether that nation or any nation so conceived and so dedicated can long endure."}; + + char *rdata[SPACE1_DIM1]; /* Information read in */ + char *wdata2; + hid_t dataspace, dataset2; + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1; /* Dataspace ID */ + hid_t tid1; /* Datatype ID */ + hid_t xfer_pid; /* Dataset transfer property list ID */ + hsize_t dims1[] = {SPACE1_DIM1}; + hsize_t size; /* Number of bytes which will be used */ + unsigned i; /* counting variable */ + size_t str_used; /* String data in memory */ + size_t mem_used = 0; /* Memory used during allocation */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Basic VL String Functionality\n")); + + /* Create file */ + fid1 = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create a datatype to refer to */ + tid1 = H5Tcopy(H5T_C_S1); + CHECK(tid1, FAIL, "H5Tcopy"); + + ret = H5Tset_size(tid1, H5T_VARIABLE); + CHECK(ret, FAIL, "H5Tset_size"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write dataset to disk */ + ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + dataspace = H5Screate(H5S_SCALAR); + + dataset2 = H5Dcreate2(fid1, "Dataset2", tid1, dataspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + wdata2 = (char *)HDcalloc((size_t)65534, sizeof(char)); + HDmemset(wdata2, 'A', (size_t)65533); + + ret = H5Dwrite(dataset2, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, &wdata2); + CHECK(ret, FAIL, "H5Dwrite"); + + H5Sclose(dataspace); + H5Dclose(dataset2); + HDfree(wdata2); + + /* Change to the custom memory allocation routines for reading VL string */ + xfer_pid = H5Pcreate(H5P_DATASET_XFER); + CHECK(xfer_pid, FAIL, "H5Pcreate"); + + ret = H5Pset_vlen_mem_manager(xfer_pid, test_vlstr_alloc_custom, &mem_used, test_vlstr_free_custom, + &mem_used); + CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); + + /* Make certain the correct amount of memory will be used */ + ret = H5Dvlen_get_buf_size(dataset, tid1, sid1, &size); + CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); + + /* Count the actual number of bytes used by the strings */ + for (i = 0, str_used = 0; i < SPACE1_DIM1; i++) + str_used += HDstrlen(wdata[i]) + 1; + + /* Compare against the strings actually written */ + VERIFY(size, (hsize_t)str_used, "H5Dvlen_get_buf_size"); + + /* Read dataset from disk */ + ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Make certain the correct amount of memory has been used */ + VERIFY(mem_used, str_used, "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < SPACE1_DIM1; i++) { + if (HDstrlen(wdata[i]) != HDstrlen(rdata[i])) { + TestErrPrintf("VL data length don't match!, strlen(wdata[%d])=%d, strlen(rdata[%d])=%d\n", (int)i, + (int)HDstrlen(wdata[i]), (int)i, (int)HDstrlen(rdata[i])); + continue; + } /* end if */ + if (HDstrcmp(wdata[i], rdata[i]) != 0) { + TestErrPrintf("VL data values don't match!, wdata[%d]=%s, rdata[%d]=%s\n", (int)i, wdata[i], + (int)i, rdata[i]); + continue; + } /* end if */ + } /* end for */ + + /* Reclaim the read VL data */ + ret = H5Treclaim(tid1, sid1, xfer_pid, rdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Make certain the VL memory has been freed */ + VERIFY(mem_used, 0, "H5Treclaim"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close dataset transfer property list */ + ret = H5Pclose(xfer_pid); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + +} /* end test_vlstrings_basic() */ + +/**************************************************************** +** +** test_vlstrings_special(): Test VL string code for special +** string cases, nil and zero-sized. +** +****************************************************************/ +static void +test_vlstrings_special(void) +{ + const char *wdata[SPACE1_DIM1] = {"", "two", "three", "\0"}; + const char *wdata2[SPACE1_DIM1] = {NULL, NULL, NULL, NULL}; + char *rdata[SPACE1_DIM1]; /* Information read in */ + char *fill; /* Fill value */ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1; /* Dataspace ID */ + hid_t tid1; /* Datatype ID */ + hid_t dcpl; /* Dataset creation property list ID */ + hsize_t dims1[] = {SPACE1_DIM1}; + unsigned i; /* counting variable */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Special VL Strings\n")); + + /* Create file */ + fid1 = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create a datatype to refer to */ + tid1 = H5Tcopy(H5T_C_S1); + CHECK(tid1, FAIL, "H5Tcopy"); + + ret = H5Tset_size(tid1, H5T_VARIABLE); + CHECK(ret, FAIL, "H5Tset_size"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset3", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Read from dataset before writing data */ + ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Check data read in */ + for (i = 0; i < SPACE1_DIM1; i++) + if (rdata[i] != NULL) + TestErrPrintf("VL doesn't match!, rdata[%d]=%s\n", (int)i, rdata[i]); + + /* Write dataset to disk */ + ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read dataset from disk */ + ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < SPACE1_DIM1; i++) { + if (HDstrlen(wdata[i]) != HDstrlen(rdata[i])) { + TestErrPrintf("VL data length don't match!, strlen(wdata[%d])=%d, strlen(rdata[%d])=%d\n", (int)i, + (int)HDstrlen(wdata[i]), (int)i, (int)HDstrlen(rdata[i])); + continue; + } /* end if */ + if ((wdata[i] == NULL && rdata[i] != NULL) || (rdata[i] == NULL && wdata[i] != NULL)) { + TestErrPrintf("VL data values don't match!\n"); + continue; + } /* end if */ + if (HDstrcmp(wdata[i], rdata[i]) != 0) { + TestErrPrintf("VL data values don't match!, wdata[%d]=%s, rdata[%d]=%s\n", (int)i, wdata[i], + (int)i, rdata[i]); + continue; + } /* end if */ + } /* end for */ + + /* Reclaim the read VL data */ + ret = H5Treclaim(tid1, sid1, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Create another dataset to test nil strings */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + + /* Set the fill value for the second dataset */ + fill = NULL; + ret = H5Pset_fill_value(dcpl, tid1, &fill); + CHECK(ret, FAIL, "H5Pset_fill_value"); + + dataset = H5Dcreate2(fid1, "Dataset4", tid1, sid1, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Close dataset creation property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Read from dataset before writing data */ + ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Check data read in */ + for (i = 0; i < SPACE1_DIM1; i++) + if (rdata[i] != NULL) + TestErrPrintf("VL doesn't match!, rdata[%d]=%s\n", (int)i, rdata[i]); + + /* Try to write nil strings to disk. */ + ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata2); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read nil strings back from disk */ + ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Check data read in */ + for (i = 0; i < SPACE1_DIM1; i++) + if (rdata[i] != NULL) + TestErrPrintf("VL doesn't match!, rdata[%d]=%s\n", (int)i, rdata[i]); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); +} + +/**************************************************************** +** +** test_vlstring_type(): Test VL string type. +** Tests if VL string is treated as string. +** +****************************************************************/ +static void +test_vlstring_type(void) +{ + hid_t fid; /* HDF5 File IDs */ + hid_t tid_vlstr; + H5T_cset_t cset; + H5T_str_t pad; + htri_t vl_str; /* Whether string is VL */ + herr_t ret; + + /* Output message about test being performed */ + MESSAGE(5, ("Testing VL String type\n")); + + /* Open file */ + fid = H5Fopen(DATAFILE, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Create a datatype to refer to */ + tid_vlstr = H5Tcopy(H5T_C_S1); + CHECK(tid_vlstr, FAIL, "H5Tcopy"); + + /* Change padding and verify it */ + ret = H5Tset_strpad(tid_vlstr, H5T_STR_NULLPAD); + CHECK(ret, FAIL, "H5Tset_strpad"); + pad = H5Tget_strpad(tid_vlstr); + VERIFY(pad, H5T_STR_NULLPAD, "H5Tget_strpad"); + + /* Convert to variable-length string */ + ret = H5Tset_size(tid_vlstr, H5T_VARIABLE); + CHECK(ret, FAIL, "H5Tset_size"); + + /* Check if datatype is VL string */ + ret = H5Tget_class(tid_vlstr); + VERIFY(ret, H5T_STRING, "H5Tget_class"); + ret = H5Tis_variable_str(tid_vlstr); + VERIFY(ret, TRUE, "H5Tis_variable_str"); + + /* Verify that the class detects as a string */ + vl_str = H5Tdetect_class(tid_vlstr, H5T_STRING); + CHECK(vl_str, FAIL, "H5Tdetect_class"); + VERIFY(vl_str, TRUE, "H5Tdetect_class"); + + /* Check default character set and padding */ + cset = H5Tget_cset(tid_vlstr); + VERIFY(cset, H5T_CSET_ASCII, "H5Tget_cset"); + pad = H5Tget_strpad(tid_vlstr); + VERIFY(pad, H5T_STR_NULLPAD, "H5Tget_strpad"); + + /* Commit variable-length string datatype to storage */ + ret = H5Tcommit2(fid, VLSTR_TYPE, tid_vlstr, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Tcommit2"); + + /* Close datatype */ + ret = H5Tclose(tid_vlstr); + CHECK(ret, FAIL, "H5Tclose"); + + tid_vlstr = H5Topen2(fid, VLSTR_TYPE, H5P_DEFAULT); + CHECK(tid_vlstr, FAIL, "H5Topen2"); + + ret = H5Tclose(tid_vlstr); + CHECK(ret, FAIL, "H5Tclose"); + + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + fid = H5Fopen(DATAFILE, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* Open the variable-length string datatype just created */ + tid_vlstr = H5Topen2(fid, VLSTR_TYPE, H5P_DEFAULT); + CHECK(tid_vlstr, FAIL, "H5Topen2"); + + /* Verify character set and padding */ + cset = H5Tget_cset(tid_vlstr); + VERIFY(cset, H5T_CSET_ASCII, "H5Tget_cset"); + pad = H5Tget_strpad(tid_vlstr); + VERIFY(pad, H5T_STR_NULLPAD, "H5Tget_strpad"); + + /* Close datatype and file */ + ret = H5Tclose(tid_vlstr); + CHECK(ret, FAIL, "H5Tclose"); + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + +} /* end test_vlstring_type() */ + +/**************************************************************** +** +** test_compact_vlstring(): Test code for storing VL strings in +** compact datasets. +** +****************************************************************/ +static void +test_compact_vlstring(void) +{ + const char *wdata[SPACE1_DIM1] = {"one", "two", "three", "four"}; + char *rdata[SPACE1_DIM1]; /* Information read in */ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1; /* Dataspace ID */ + hid_t tid1; /* Datatype ID */ + hid_t plist; /* Dataset creation property list */ + hsize_t dims1[] = {SPACE1_DIM1}; + unsigned i; /* counting variable */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing VL Strings in compact dataset\n")); + + /* Create file */ + fid1 = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create a datatype to refer to */ + tid1 = H5Tcopy(H5T_C_S1); + CHECK(tid1, FAIL, "H5Tcopy"); + + ret = H5Tset_size(tid1, H5T_VARIABLE); + CHECK(ret, FAIL, "H5Tset_size"); + + plist = H5Pcreate(H5P_DATASET_CREATE); + CHECK(plist, FAIL, "H5Pcreate"); + + ret = H5Pset_layout(plist, H5D_COMPACT); + CHECK(ret, FAIL, "H5Pset_layout"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset5", tid1, sid1, H5P_DEFAULT, plist, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write dataset to disk */ + ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read dataset from disk */ + ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < SPACE1_DIM1; i++) { + if (HDstrlen(wdata[i]) != HDstrlen(rdata[i])) { + TestErrPrintf("VL data length don't match!, strlen(wdata[%d])=%d, strlen(rdata[%d])=%d\n", (int)i, + (int)HDstrlen(wdata[i]), (int)i, (int)HDstrlen(rdata[i])); + continue; + } /* end if */ + if (HDstrcmp(wdata[i], rdata[i]) != 0) { + TestErrPrintf("VL data values don't match!, wdata[%d]=%s, rdata[%d]=%s\n", (int)i, wdata[i], + (int)i, rdata[i]); + continue; + } /* end if */ + } /* end for */ + + /* Reclaim the read VL data */ + ret = H5Treclaim(tid1, sid1, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close dataset create property list */ + ret = H5Pclose(plist); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); +} /*test_compact_vlstrings*/ + +/**************************************************************** +** +** test_write_vl_string_attribute(): Test basic VL string code. +** Tests writing VL strings as attributes +** +****************************************************************/ +static void +test_write_vl_string_attribute(void) +{ + hid_t file, root, dataspace, att; + hid_t type; + herr_t ret; + char *string_att_check = NULL; + + /* Open the file */ + file = H5Fopen(DATAFILE, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(file, FAIL, "H5Fopen"); + + /* Create a datatype to refer to. */ + type = H5Tcopy(H5T_C_S1); + CHECK(type, FAIL, "H5Tcopy"); + + ret = H5Tset_size(type, H5T_VARIABLE); + CHECK(ret, FAIL, "H5Tset_size"); + + root = H5Gopen2(file, "/", H5P_DEFAULT); + CHECK(root, FAIL, "H5Gopen2"); + + dataspace = H5Screate(H5S_SCALAR); + CHECK(dataspace, FAIL, "H5Screate"); + + /* Test creating a "normal" sized string attribute */ + att = H5Acreate2(root, "test_scalar", type, dataspace, H5P_DEFAULT, H5P_DEFAULT); + CHECK(att, FAIL, "H5Acreate2"); + + ret = H5Awrite(att, type, &string_att); + CHECK(ret, FAIL, "H5Awrite"); + + ret = H5Aread(att, type, &string_att_check); + CHECK(ret, FAIL, "H5Aread"); + + if (HDstrcmp(string_att_check, string_att) != 0) + TestErrPrintf("VL string attributes don't match!, string_att=%s, string_att_check=%s\n", string_att, + string_att_check); + + H5free_memory(string_att_check); + string_att_check = NULL; + + ret = H5Aclose(att); + CHECK(ret, FAIL, "HAclose"); + + /* Test creating a "large" sized string attribute */ + att = H5Acreate2(root, "test_scalar_large", type, dataspace, H5P_DEFAULT, H5P_DEFAULT); + CHECK(att, FAIL, "H5Acreate2"); + + string_att_write = (char *)HDcalloc((size_t)8192, sizeof(char)); + HDmemset(string_att_write, 'A', (size_t)8191); + + ret = H5Awrite(att, type, &string_att_write); + CHECK(ret, FAIL, "H5Awrite"); + + ret = H5Aread(att, type, &string_att_check); + CHECK(ret, FAIL, "H5Aread"); + + if (HDstrcmp(string_att_check, string_att_write) != 0) + TestErrPrintf("VL string attributes don't match!, string_att_write=%s, string_att_check=%s\n", + string_att_write, string_att_check); + + H5free_memory(string_att_check); + string_att_check = NULL; + + /* The attribute string written is freed below, in the test_read_vl_string_attribute() test */ + /* HDfree(string_att_write); */ + + ret = H5Aclose(att); + CHECK(ret, FAIL, "HAclose"); + + ret = H5Gclose(root); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Tclose(type); + CHECK(ret, FAIL, "H5Tclose"); + + ret = H5Sclose(dataspace); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); +} + +/**************************************************************** +** +** test_read_vl_string_attribute(): Test basic VL string code. +** Tests reading VL strings from attributes +** +****************************************************************/ +static void +test_read_vl_string_attribute(void) +{ + hid_t file, root, att; + hid_t type; + herr_t ret; + char *string_att_check = NULL; + + /* Open file */ + file = H5Fopen(DATAFILE, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(file, FAIL, "H5Fopen"); + + /* Create a datatype to refer to. */ + type = H5Tcopy(H5T_C_S1); + CHECK(type, FAIL, "H5Tcopy"); + + ret = H5Tset_size(type, H5T_VARIABLE); + CHECK(ret, FAIL, "H5Tset_size"); + + root = H5Gopen2(file, "/", H5P_DEFAULT); + CHECK(root, FAIL, "H5Gopen2"); + + /* Test reading "normal" sized string attribute */ + att = H5Aopen(root, "test_scalar", H5P_DEFAULT); + CHECK(att, FAIL, "H5Aopen"); + + ret = H5Aread(att, type, &string_att_check); + CHECK(ret, FAIL, "H5Aread"); + + if (HDstrcmp(string_att_check, string_att) != 0) + TestErrPrintf("VL string attributes don't match!, string_att=%s, string_att_check=%s\n", string_att, + string_att_check); + + H5free_memory(string_att_check); + string_att_check = NULL; + + ret = H5Aclose(att); + CHECK(ret, FAIL, "HAclose"); + + /* Test reading "large" sized string attribute */ + att = H5Aopen(root, "test_scalar_large", H5P_DEFAULT); + CHECK(att, FAIL, "H5Aopen"); + + if (string_att_write) { + ret = H5Aread(att, type, &string_att_check); + CHECK(ret, FAIL, "H5Aread"); + + if (HDstrcmp(string_att_check, string_att_write) != 0) + TestErrPrintf("VL string attributes don't match!, string_att_write=%s, string_att_check=%s\n", + string_att_write, string_att_check); + + H5free_memory(string_att_check); + string_att_check = NULL; + } + + /* Free string allocated in test_write_vl_string_attribute */ + if (string_att_write) + HDfree(string_att_write); + + ret = H5Aclose(att); + CHECK(ret, FAIL, "HAclose"); + + ret = H5Tclose(type); + CHECK(ret, FAIL, "H5Tclose"); + + ret = H5Gclose(root); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); +} + +/* Helper routine for test_vl_rewrite() */ +static void +write_scalar_dset(hid_t file, hid_t type, hid_t space, char *name, char *data) +{ + hid_t dset; + herr_t ret; + + dset = H5Dcreate2(file, name, type, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset, FAIL, "H5Dcreate2"); + + ret = H5Dwrite(dset, type, space, space, H5P_DEFAULT, &data); + CHECK(ret, FAIL, "H5Dwrite"); + + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); +} + +/* Helper routine for test_vl_rewrite() */ +static void +read_scalar_dset(hid_t file, hid_t type, hid_t space, char *name, char *data) +{ + hid_t dset; + herr_t ret; + char *data_read; + + dset = H5Dopen2(file, name, H5P_DEFAULT); + CHECK(dset, FAIL, "H5Dopen2"); + + ret = H5Dread(dset, type, space, space, H5P_DEFAULT, &data_read); + CHECK(ret, FAIL, "H5Dread"); + + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + + if (HDstrcmp(data, data_read) != 0) + TestErrPrintf("Expected %s for dataset %s but read %s\n", data, name, data_read); + + ret = H5Treclaim(type, space, H5P_DEFAULT, &data_read); + CHECK(ret, FAIL, "H5Treclaim"); +} + +/**************************************************************** +** +** test_vl_rewrite(): Test basic VL string code. +** Tests I/O on VL strings when lots of objects in the file +** have been linked/unlinked. +** +****************************************************************/ +static void +test_vl_rewrite(void) +{ + hid_t file1, file2; /* File IDs */ + hid_t type; /* VL string datatype ID */ + hid_t space; /* Scalar dataspace */ + char name[256]; /* Buffer for names & data */ + int i; /* Local index variable */ + herr_t ret; /* Generic return value */ + + /* Create the VL string datatype */ + type = H5Tcopy(H5T_C_S1); + CHECK(type, FAIL, "H5Tcopy"); + + ret = H5Tset_size(type, H5T_VARIABLE); + CHECK(ret, FAIL, "H5Tset_size"); + + /* Create the scalar dataspace */ + space = H5Screate(H5S_SCALAR); + CHECK(space, FAIL, "H5Screate"); + + /* Open the files */ + file1 = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file1, FAIL, "H5Fcreate"); + + file2 = H5Fcreate(DATAFILE2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file1, FAIL, "H5Fcreate"); + + /* Create in file 1 */ + for (i = 0; i < REWRITE_NDATASETS; i++) { + HDsnprintf(name, sizeof(name), "/set_%d", i); + write_scalar_dset(file1, type, space, name, name); + } + + /* Effectively copy data from file 1 to 2 */ + for (i = 0; i < REWRITE_NDATASETS; i++) { + HDsnprintf(name, sizeof(name), "/set_%d", i); + read_scalar_dset(file1, type, space, name, name); + write_scalar_dset(file2, type, space, name, name); + } + + /* Read back from file 2 */ + for (i = 0; i < REWRITE_NDATASETS; i++) { + HDsnprintf(name, sizeof(name), "/set_%d", i); + read_scalar_dset(file2, type, space, name, name); + } /* end for */ + + /* Remove from file 2. */ + for (i = 0; i < REWRITE_NDATASETS; i++) { + HDsnprintf(name, sizeof(name), "/set_%d", i); + ret = H5Ldelete(file2, name, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + } /* end for */ + + /* Effectively copy from file 1 to file 2 */ + for (i = 0; i < REWRITE_NDATASETS; i++) { + HDsnprintf(name, sizeof(name), "/set_%d", i); + read_scalar_dset(file1, type, space, name, name); + write_scalar_dset(file2, type, space, name, name); + } /* end for */ + + /* Close everything */ + ret = H5Tclose(type); + CHECK(ret, FAIL, "H5Tclose"); + + ret = H5Sclose(space); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Fclose(file1); + CHECK(ret, FAIL, "H5Fclose"); + + ret = H5Fclose(file2); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_vl_rewrite() */ + +/**************************************************************** + ** + ** test_write_same_element(): + ** Tests writing to the same element of VL string using + ** H5Sselect_element. + ** + ****************************************************************/ +static void +test_write_same_element(void) +{ +#ifndef NO_WRITE_SAME_ELEMENT_TWICE + hid_t file1, dataset1; + hid_t mspace, fspace, dtype; + hsize_t fdim[] = {SPACE1_DIM1}; + const char *wdata[SPACE1_DIM1] = {"Parting", "is such a", "sweet", "sorrow."}; + const char *val[SPACE1_DIM1] = {"But", "reuniting", "is a", "great joy"}; + hsize_t marray[] = {NUMP}; + hsize_t coord[SPACE1_RANK][NUMP]; + herr_t ret; +#endif + + MESSAGE( + 5, + ("Testing writing to same element of VL string dataset twice - SKIPPED for now due to no support\n")); +#ifndef NO_WRITE_SAME_ELEMENT_TWICE + file1 = H5Fcreate(DATAFILE3, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file1, FAIL, "H5Fcreate"); + + dtype = H5Tcopy(H5T_C_S1); + CHECK(dtype, FAIL, "H5Tcopy"); + + ret = H5Tset_size(dtype, H5T_VARIABLE); + CHECK(ret, FAIL, "H5Tset_size"); + + fspace = H5Screate_simple(SPACE1_RANK, fdim, NULL); + CHECK(fspace, FAIL, "H5Screate_simple"); + + dataset1 = H5Dcreate2(file1, DATASET, dtype, fspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset1, FAIL, "H5Dcreate"); + + ret = H5Dwrite(dataset1, dtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + ret = H5Dclose(dataset1); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Tclose(dtype); + CHECK(ret, FAIL, "H5Tclose"); + + ret = H5Sclose(fspace); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Fclose(file1); + CHECK(ret, FAIL, "H5Fclose"); + + /* + * Open the file. Select the same points, write values to those point locations. + */ + file1 = H5Fopen(DATAFILE3, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(file1, FAIL, "H5Fopen"); + + dataset1 = H5Dopen2(file1, DATASET, H5P_DEFAULT); + CHECK(dataset1, FAIL, "H5Dopen"); + + fspace = H5Dget_space(dataset1); + CHECK(fspace, FAIL, "H5Dget_space"); + + dtype = H5Dget_type(dataset1); + CHECK(dtype, FAIL, "H5Dget_type"); + + mspace = H5Screate_simple(1, marray, NULL); + CHECK(mspace, FAIL, "H5Screate_simple"); + + coord[0][0] = 0; + coord[0][1] = 2; + coord[0][2] = 2; + coord[0][3] = 0; + + ret = H5Sselect_elements(fspace, H5S_SELECT_SET, NUMP, (const hsize_t *)&coord); + CHECK(ret, FAIL, "H5Sselect_elements"); + + ret = H5Dwrite(dataset1, dtype, mspace, fspace, H5P_DEFAULT, val); + CHECK(ret, FAIL, "H5Dwrite"); + + ret = H5Tclose(dtype); + CHECK(ret, FAIL, "H5Tclose"); + + ret = H5Dclose(dataset1); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Sclose(fspace); + CHECK(ret, FAIL, "H5Dclose"); + + ret = H5Sclose(mspace); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Fclose(file1); + CHECK(ret, FAIL, "H5Fclose"); +#endif +} /* test_write_same_element */ + +/**************************************************************** +** +** test_vlstrings(): Main VL string testing routine. +** +****************************************************************/ +void +test_vlstrings(void) +{ + /* Output message about test being performed */ + MESSAGE(5, ("Testing Variable-Length Strings\n")); + + /* These tests use the same file */ + /* Test basic VL string datatype */ + test_vlstrings_basic(); + test_vlstrings_special(); + test_vlstring_type(); + test_compact_vlstring(); + + /* Test using VL strings in attributes */ + test_write_vl_string_attribute(); + test_read_vl_string_attribute(); + + /* Test writing VL datasets in files with lots of unlinking */ + test_vl_rewrite(); + /* Test writing to the same element more than once using H5Sselect_elements */ + test_write_same_element(); +} /* test_vlstrings() */ + +/*------------------------------------------------------------------------- + * Function: cleanup_vlstrings + * + * Purpose: Cleanup temporary test files + * + * Return: none + * + * Programmer: Quincey Koziol + * September 10, 1999 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +void +cleanup_vlstrings(void) +{ + H5Fdelete(DATAFILE, H5P_DEFAULT); + H5Fdelete(DATAFILE2, H5P_DEFAULT); + H5Fdelete(DATAFILE3, H5P_DEFAULT); +} diff --git a/test/API/tvltypes.c b/test/API/tvltypes.c new file mode 100644 index 00000000000..eca534b8b78 --- /dev/null +++ b/test/API/tvltypes.c @@ -0,0 +1,3268 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*********************************************************** + * + * Test program: tvltypes + * + * Test the Variable-Length Datatype functionality + * + *************************************************************/ + +#include "testhdf5.h" + +/* #include "H5Dprivate.h" */ + +#define FILENAME "tvltypes.h5" + +/* 1-D dataset with fixed dimensions */ +#define SPACE1_RANK 1 +#define SPACE1_DIM1 4 + +/* 1-D dataset with fixed dimensions */ +#define SPACE3_RANK 1 +#define SPACE3_DIM1 128 +#define L1_INCM 16 +#define L2_INCM 8 +#define L3_INCM 3 + +/* Default temporary buffer size - Pulled from H5Dprivate.h */ +#define H5D_TEMP_BUF_SIZE (1024 * 1024) + +/* 1-D dataset with fixed dimensions */ +#define SPACE4_RANK 1 +#define SPACE4_DIM_SMALL 128 +#define SPACE4_DIM_LARGE (H5D_TEMP_BUF_SIZE / 64) + +void *test_vltypes_alloc_custom(size_t size, void *info); +void test_vltypes_free_custom(void *mem, void *info); + +/**************************************************************** +** +** test_vltypes_alloc_custom(): Test VL datatype custom memory +** allocation routines. This routine just uses malloc to +** allocate the memory and increments the amount of memory +** allocated. +** +****************************************************************/ +void * +test_vltypes_alloc_custom(size_t size, void *mem_used) +{ + void *ret_value; /* Pointer to return */ + const size_t extra = MAX(sizeof(void *), sizeof(size_t)); /* Extra space needed */ + /* (This weird contortion is required on the + * DEC Alpha to keep the alignment correct - QAK) + */ + + if ((ret_value = HDmalloc(extra + size)) != NULL) { + *(size_t *)ret_value = size; + *(size_t *)mem_used += size; + } /* end if */ + + ret_value = ((unsigned char *)ret_value) + extra; + + return (ret_value); +} + +/**************************************************************** +** +** test_vltypes_free_custom(): Test VL datatype custom memory +** allocation routines. This routine just uses free to +** release the memory and decrements the amount of memory +** allocated. +** +****************************************************************/ +void +test_vltypes_free_custom(void *_mem, void *mem_used) +{ + if (_mem) { + const size_t extra = MAX(sizeof(void *), sizeof(size_t)); /* Extra space needed */ + /* (This weird contortion is required + * on the DEC Alpha to keep the + * alignment correct - QAK) + */ + unsigned char *mem = ((unsigned char *)_mem) - extra; /* Pointer to actual block allocated */ + + *(size_t *)mem_used -= *(size_t *)((void *)mem); + HDfree(mem); + } /* end if */ +} + +/**************************************************************** +** +** test_vltypes_data_create(): Dataset of VL is supposed to +** fail when fill value is never written to dataset. +** +****************************************************************/ +static void +test_vltypes_dataset_create(void) +{ + hid_t fid1; /* HDF5 File IDs */ + hid_t dcpl; /* Dataset Property list */ + hid_t dataset; /* Dataset ID */ + hsize_t dims1[] = {SPACE1_DIM1}; + hid_t sid1; /* Dataspace ID */ + hid_t tid1; /* Datatype ID */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Dataset of VL Datatype Functionality\n")); + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create a datatype to refer to */ + tid1 = H5Tvlen_create(H5T_NATIVE_UINT); + CHECK(tid1, FAIL, "H5Tvlen_create"); + + /* Create dataset property list */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); + + /* Set fill value writing time to be NEVER */ + ret = H5Pset_fill_time(dcpl, H5D_FILL_TIME_NEVER); + CHECK(ret, FAIL, "H5Pset_fill_time"); + + /* Create a dataset, supposed to fail */ + H5E_BEGIN_TRY + { + dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, dcpl, H5P_DEFAULT); + } + H5E_END_TRY; + VERIFY(dataset, FAIL, "H5Dcreate2"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close dataset transfer property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); +} + +/**************************************************************** +** +** test_vltypes_funcs(): Test some type functions that are and +** aren't supposed to work with VL type. +** +****************************************************************/ +static void +test_vltypes_funcs(void) +{ + hid_t type; /* Datatype ID */ + size_t size; + H5T_pad_t inpad; + H5T_norm_t norm; + H5T_cset_t cset; + H5T_str_t strpad; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing some type functions for VL\n")); + + /* Create a datatype to refer to */ + type = H5Tvlen_create(H5T_IEEE_F32BE); + CHECK(type, FAIL, "H5Tvlen_create"); + + size = H5Tget_precision(type); + CHECK(size, 0, "H5Tget_precision"); + + size = H5Tget_size(type); + CHECK(size, 0, "H5Tget_size"); + + size = H5Tget_ebias(type); + CHECK(size, 0, "H5Tget_ebias"); + + ret = H5Tset_pad(type, H5T_PAD_ZERO, H5T_PAD_ONE); + CHECK(ret, FAIL, "H5Tset_pad"); + + inpad = H5Tget_inpad(type); + CHECK(inpad, FAIL, "H5Tget_inpad"); + + norm = H5Tget_norm(type); + CHECK(norm, FAIL, "H5Tget_norm"); + + ret = H5Tset_offset(type, (size_t)16); + CHECK(ret, FAIL, "H5Tset_offset"); + + H5E_BEGIN_TRY + { + cset = H5Tget_cset(type); + } + H5E_END_TRY; + VERIFY(cset, FAIL, "H5Tget_cset"); + + H5E_BEGIN_TRY + { + strpad = H5Tget_strpad(type); + } + H5E_END_TRY; + VERIFY(strpad, FAIL, "H5Tget_strpad"); + + /* Close datatype */ + ret = H5Tclose(type); + CHECK(ret, FAIL, "H5Tclose"); +} + +/**************************************************************** +** +** test_vltypes_vlen_atomic(): Test basic VL datatype code. +** Tests VL datatypes of atomic datatypes +** +****************************************************************/ +static void +test_vltypes_vlen_atomic(void) +{ + hvl_t wdata[SPACE1_DIM1]; /* Information to write */ + hvl_t wdata2[SPACE1_DIM1]; /* Information to write */ + hvl_t rdata[SPACE1_DIM1]; /* Information read in */ + hvl_t fill; /* Fill value */ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1; /* Dataspace ID */ + hid_t sid2; /* ID of bad dataspace (no extent set) */ + hid_t tid1; /* Datatype ID */ + hid_t dcpl_pid; /* Dataset creation property list ID */ + hid_t xfer_pid; /* Dataset transfer property list ID */ + hsize_t dims1[] = {SPACE1_DIM1}; + hsize_t size; /* Number of bytes which will be used */ + unsigned i, j; /* counting variables */ + size_t mem_used = 0; /* Memory used during allocation */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Basic Atomic VL Datatype Functionality\n")); + + /* Allocate and initialize VL data to write */ + for (i = 0; i < SPACE1_DIM1; i++) { + wdata[i].p = HDmalloc((i + 1) * sizeof(unsigned int)); + wdata[i].len = i + 1; + for (j = 0; j < (i + 1); j++) + ((unsigned int *)wdata[i].p)[j] = i * 10 + j; + + wdata2[i].p = NULL; + wdata2[i].len = 0; + } /* end for */ + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create a datatype to refer to */ + tid1 = H5Tvlen_create(H5T_NATIVE_UINT); + CHECK(tid1, FAIL, "H5Tvlen_create"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Read from dataset before writing data */ + ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Check data read in */ + for (i = 0; i < SPACE1_DIM1; i++) + if (rdata[i].len != 0 || rdata[i].p != NULL) + TestErrPrintf("VL doesn't match!, rdata[%d].len=%u, rdata[%d].p=%p\n", (int)i, + (unsigned)rdata[i].len, (int)i, rdata[i].p); + + /* Write "nil" data to disk */ + ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata2); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read from dataset with "nil" data */ + ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Check data read in */ + for (i = 0; i < SPACE1_DIM1; i++) + if (rdata[i].len != 0 || rdata[i].p != NULL) + TestErrPrintf("VL doesn't match!, rdata[%d].len=%u, rdata[%d].p=%p\n", (int)i, + (unsigned)rdata[i].len, (int)i, rdata[i].p); + + /* Write dataset to disk */ + ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Create second dataset, with fill value */ + dcpl_pid = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl_pid, FAIL, "H5Pcreate"); + + /* Set the fill value for the second dataset */ + fill.p = NULL; + fill.len = 0; + ret = H5Pset_fill_value(dcpl_pid, tid1, &fill); + CHECK(ret, FAIL, "H5Pset_fill_value"); + + /* Create a second dataset */ + dataset = H5Dcreate2(fid1, "Dataset2", tid1, sid1, H5P_DEFAULT, dcpl_pid, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Close dataset creation property list */ + ret = H5Pclose(dcpl_pid); + CHECK(ret, FAIL, "H5Pclose"); + + /* Read from dataset before writing data */ + ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Check data read in */ + for (i = 0; i < SPACE1_DIM1; i++) + if (rdata[i].len != 0 || rdata[i].p != NULL) + TestErrPrintf("VL doesn't match!, rdata[%d].len=%u, rdata[%d].p=%p\n", (int)i, + (unsigned)rdata[i].len, (int)i, rdata[i].p); + + /* Write "nil" data to disk */ + ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata2); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read from dataset with "nil" data */ + ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Check data read in */ + for (i = 0; i < SPACE1_DIM1; i++) + if (rdata[i].len != 0 || rdata[i].p != NULL) + TestErrPrintf("VL doesn't match!, rdata[%d].len=%u, rdata[%d].p=%p\n", (int)i, + (unsigned)rdata[i].len, (int)i, rdata[i].p); + + /* Write data to disk */ + ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Open the file for data checking */ + fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Open a dataset */ + dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Get dataspace for datasets */ + sid1 = H5Dget_space(dataset); + CHECK(sid1, FAIL, "H5Dget_space"); + + /* Get datatype for dataset */ + tid1 = H5Dget_type(dataset); + CHECK(tid1, FAIL, "H5Dget_type"); + + /* Change to the custom memory allocation routines for reading VL data */ + xfer_pid = H5Pcreate(H5P_DATASET_XFER); + CHECK(xfer_pid, FAIL, "H5Pcreate"); + + ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom, + &mem_used); + CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); + + /* Make certain the correct amount of memory will be used */ + ret = H5Dvlen_get_buf_size(dataset, tid1, sid1, &size); + CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); + + /* 10 elements allocated = 1 + 2 + 3 + 4 elements for each array position */ + VERIFY(size, ((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(unsigned int), "H5Dvlen_get_buf_size"); + + /* Read dataset from disk */ + ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Make certain the correct amount of memory has been used */ + /* 10 elements allocated = 1 + 2 + 3 + 4 elements for each array position */ + VERIFY(mem_used, ((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(unsigned int), "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < SPACE1_DIM1; i++) { + if (wdata[i].len != rdata[i].len) { + TestErrPrintf("%d: VL data lengths don't match!, wdata[%d].len=%d, rdata[%d].len=%d\n", __LINE__, + (int)i, (int)wdata[i].len, (int)i, (int)rdata[i].len); + continue; + } /* end if */ + for (j = 0; j < rdata[i].len; j++) { + if (((unsigned int *)wdata[i].p)[j] != ((unsigned int *)rdata[i].p)[j]) { + TestErrPrintf("VL data values don't match!, wdata[%d].p[%d]=%d, rdata[%d].p[%d]=%d\n", (int)i, + (int)j, (int)((unsigned int *)wdata[i].p)[j], (int)i, (int)j, + (int)((unsigned int *)rdata[i].p)[j]); + continue; + } /* end if */ + } /* end for */ + } /* end for */ + + /* Reclaim the read VL data */ + ret = H5Treclaim(tid1, sid1, xfer_pid, rdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Make certain the VL memory has been freed */ + VERIFY(mem_used, 0, "H5Treclaim"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close dataset transfer property list */ + ret = H5Pclose(xfer_pid); + CHECK(ret, FAIL, "H5Pclose"); + + /* Open second dataset */ + dataset = H5Dopen2(fid1, "Dataset2", H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Get dataspace for datasets */ + sid1 = H5Dget_space(dataset); + CHECK(sid1, FAIL, "H5Dget_space"); + + /* Get datatype for dataset */ + tid1 = H5Dget_type(dataset); + CHECK(tid1, FAIL, "H5Dget_type"); + + /* Create a "bad" dataspace with no extent set */ + sid2 = H5Screate(H5S_SIMPLE); + CHECK(sid2, FAIL, "H5Screate"); + + /* Change to the custom memory allocation routines for reading VL data */ + xfer_pid = H5Pcreate(H5P_DATASET_XFER); + CHECK(xfer_pid, FAIL, "H5Pcreate"); + + ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom, + &mem_used); + CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); + + /* Make certain the correct amount of memory will be used */ + ret = H5Dvlen_get_buf_size(dataset, tid1, sid1, &size); + CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); + + /* 10 elements allocated = 1 + 2 + 3 + 4 elements for each array position */ + VERIFY(size, ((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(unsigned int), "H5Dvlen_get_buf_size"); + + /* Try to call H5Dvlen_get_buf with bad dataspace */ + H5E_BEGIN_TRY + { + ret = H5Dvlen_get_buf_size(dataset, tid1, sid2, &size); + } + H5E_END_TRY + VERIFY(ret, FAIL, "H5Dvlen_get_buf_size"); + + /* Read dataset from disk */ + ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Make certain the correct amount of memory has been used */ + /* 10 elements allocated = 1 + 2 + 3 + 4 elements for each array position */ + VERIFY(mem_used, ((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(unsigned int), "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < SPACE1_DIM1; i++) { + if (wdata[i].len != rdata[i].len) { + TestErrPrintf("%d: VL data lengths don't match!, wdata[%d].len=%d, rdata[%d].len=%d\n", __LINE__, + (int)i, (int)wdata[i].len, (int)i, (int)rdata[i].len); + continue; + } /* end if */ + for (j = 0; j < rdata[i].len; j++) { + if (((unsigned int *)wdata[i].p)[j] != ((unsigned int *)rdata[i].p)[j]) { + TestErrPrintf("VL data values don't match!, wdata[%d].p[%d]=%d, rdata[%d].p[%d]=%d\n", (int)i, + (int)j, (int)((unsigned int *)wdata[i].p)[j], (int)i, (int)j, + (int)((unsigned int *)rdata[i].p)[j]); + continue; + } /* end if */ + } /* end for */ + } /* end for */ + + /* Try to reclaim read data using "bad" dataspace with no extent + * Should fail */ + H5E_BEGIN_TRY + { + ret = H5Treclaim(tid1, sid2, xfer_pid, rdata); + } + H5E_END_TRY + VERIFY(ret, FAIL, "H5Treclaim"); + + /* Reclaim the read VL data */ + ret = H5Treclaim(tid1, sid1, xfer_pid, rdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Make certain the VL memory has been freed */ + VERIFY(mem_used, 0, "H5Treclaim"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Reclaim the write VL data */ + ret = H5Treclaim(tid1, sid1, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close dataset transfer property list */ + ret = H5Pclose(xfer_pid); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + +} /* end test_vltypes_vlen_atomic() */ + +/**************************************************************** +** +** rewrite_vltypes_vlen_atomic(): check memory leak for basic VL datatype. +** Check memory leak for VL datatypes of atomic datatypes +** +****************************************************************/ +static void +rewrite_vltypes_vlen_atomic(void) +{ + hvl_t wdata[SPACE1_DIM1]; /* Information to write */ + hvl_t rdata[SPACE1_DIM1]; /* Information read in */ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1; /* Dataspace ID */ + hid_t tid1; /* Datatype ID */ + hid_t xfer_pid; /* Dataset transfer property list ID */ + hsize_t size; /* Number of bytes which will be used */ + unsigned i, j; /* counting variables */ + size_t mem_used = 0; /* Memory used during allocation */ + unsigned increment = 4; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Check Memory Leak for Basic Atomic VL Datatype Functionality\n")); + + /* Allocate and initialize VL data to write */ + for (i = 0; i < SPACE1_DIM1; i++) { + wdata[i].p = HDmalloc((i + increment) * sizeof(unsigned int)); + wdata[i].len = i + increment; + for (j = 0; j < (i + increment); j++) + ((unsigned int *)wdata[i].p)[j] = i * 20 + j; + } /* end for */ + + /* Open file created in test_vltypes_vlen_atomic() */ + fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Open the dataset created in test_vltypes_vlen_atomic() */ + dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Open dataspace for dataset */ + sid1 = H5Dget_space(dataset); + CHECK(sid1, FAIL, "H5Dget_space"); + + /* Get datatype for dataset */ + tid1 = H5Dget_type(dataset); + CHECK(tid1, FAIL, "H5Dget_type"); + + /* Write dataset to disk */ + ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Open the file for data checking */ + fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Open a dataset */ + dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Get dataspace for datasets */ + sid1 = H5Dget_space(dataset); + CHECK(sid1, FAIL, "H5Dget_space"); + + /* Get datatype for dataset */ + tid1 = H5Dget_type(dataset); + CHECK(tid1, FAIL, "H5Dget_type"); + + /* Change to the custom memory allocation routines for reading VL data */ + xfer_pid = H5Pcreate(H5P_DATASET_XFER); + CHECK(xfer_pid, FAIL, "H5Pcreate"); + + ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom, + &mem_used); + CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); + + /* Make certain the correct amount of memory will be used */ + ret = H5Dvlen_get_buf_size(dataset, tid1, sid1, &size); + CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); + + /* 22 elements allocated = 4+5+6+7 elements for each array position */ + VERIFY(size, 22 * sizeof(unsigned int), "H5Dvlen_get_buf_size"); + + /* Read dataset from disk */ + ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Make certain the correct amount of memory has been used */ + /* 22 elements allocated = 4+5+6+7 elements for each array position */ + VERIFY(mem_used, 22 * sizeof(unsigned int), "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < SPACE1_DIM1; i++) { + if (wdata[i].len != rdata[i].len) { + TestErrPrintf("%d: VL data lengths don't match!, wdata[%d].len=%d, rdata[%d].len=%d\n", __LINE__, + (int)i, (int)wdata[i].len, (int)i, (int)rdata[i].len); + continue; + } /* end if */ + for (j = 0; j < rdata[i].len; j++) { + if (((unsigned int *)wdata[i].p)[j] != ((unsigned int *)rdata[i].p)[j]) { + TestErrPrintf("VL data values don't match!, wdata[%d].p[%d]=%d, rdata[%d].p[%d]=%d\n", (int)i, + (int)j, (int)((unsigned int *)wdata[i].p)[j], (int)i, (int)j, + (int)((unsigned int *)rdata[i].p)[j]); + continue; + } /* end if */ + } /* end for */ + } /* end for */ + + /* Reclaim the read VL data */ + ret = H5Treclaim(tid1, sid1, xfer_pid, rdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Make certain the VL memory has been freed */ + VERIFY(mem_used, 0, "H5Treclaim"); + + /* Reclaim the write VL data */ + ret = H5Treclaim(tid1, sid1, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close dataset transfer property list */ + ret = H5Pclose(xfer_pid); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + +} /* end rewrite_vltypes_vlen_atomic() */ + +/**************************************************************** +** +** test_vltypes_vlen_compound(): Test basic VL datatype code. +** Test VL datatypes of compound datatypes +** +****************************************************************/ +static void +test_vltypes_vlen_compound(void) +{ + typedef struct { /* Struct that the VL sequences are composed of */ + int i; + float f; + } s1; + hvl_t wdata[SPACE1_DIM1]; /* Information to write */ + hvl_t rdata[SPACE1_DIM1]; /* Information read in */ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1; /* Dataspace ID */ + hid_t tid1, tid2; /* Datatype IDs */ + hid_t xfer_pid; /* Dataset transfer property list ID */ + hsize_t dims1[] = {SPACE1_DIM1}; + hsize_t size; /* Number of bytes which will be used */ + unsigned i, j; /* counting variables */ + size_t mem_used = 0; /* Memory used during allocation */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Basic Compound VL Datatype Functionality\n")); + + /* Allocate and initialize VL data to write */ + for (i = 0; i < SPACE1_DIM1; i++) { + wdata[i].p = HDmalloc((i + 1) * sizeof(s1)); + wdata[i].len = i + 1; + for (j = 0; j < (i + 1); j++) { + ((s1 *)wdata[i].p)[j].i = (int)(i * 10 + j); + ((s1 *)wdata[i].p)[j].f = (float)(i * 20 + j) / 3.0F; + } /* end for */ + } /* end for */ + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create the base compound type */ + tid2 = H5Tcreate(H5T_COMPOUND, sizeof(s1)); + CHECK(tid2, FAIL, "H5Tcreate"); + + /* Insert fields */ + ret = H5Tinsert(tid2, "i", HOFFSET(s1, i), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + ret = H5Tinsert(tid2, "f", HOFFSET(s1, f), H5T_NATIVE_FLOAT); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Create a datatype to refer to */ + tid1 = H5Tvlen_create(tid2); + CHECK(tid1, FAIL, "H5Tvlen_create"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write dataset to disk */ + ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Change to the custom memory allocation routines for reading VL data */ + xfer_pid = H5Pcreate(H5P_DATASET_XFER); + CHECK(xfer_pid, FAIL, "H5Pcreate"); + + ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom, + &mem_used); + CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); + + /* Make certain the correct amount of memory will be used */ + ret = H5Dvlen_get_buf_size(dataset, tid1, sid1, &size); + CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); + + /* 10 elements allocated = 1 + 2 + 3 + 4 elements for each array position */ + VERIFY(size, ((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(s1), "H5Dvlen_get_buf_size"); + + /* Read dataset from disk */ + ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Make certain the correct amount of memory has been used */ + /* 10 elements allocated = 1 + 2 + 3 + 4 elements for each array position */ + VERIFY(mem_used, ((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(s1), "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < SPACE1_DIM1; i++) { + if (wdata[i].len != rdata[i].len) { + TestErrPrintf("%d: VL data length don't match!, wdata[%d].len=%d, rdata[%d].len=%d\n", __LINE__, + (int)i, (int)wdata[i].len, (int)i, (int)rdata[i].len); + continue; + } /* end if */ + for (j = 0; j < rdata[i].len; j++) { + if (((s1 *)wdata[i].p)[j].i != ((s1 *)rdata[i].p)[j].i) { + TestErrPrintf("VL data values don't match!, wdata[%d].p[%d].i=%d, rdata[%d].p[%d].i=%d\n", + (int)i, (int)j, (int)((s1 *)wdata[i].p)[j].i, (int)i, (int)j, + (int)((s1 *)rdata[i].p)[j].i); + continue; + } /* end if */ + if (!H5_FLT_ABS_EQUAL(((s1 *)wdata[i].p)[j].f, ((s1 *)rdata[i].p)[j].f)) { + TestErrPrintf("VL data values don't match!, wdata[%d].p[%d].f=%f, rdata[%d].p[%d].f=%f\n", + (int)i, (int)j, (double)((s1 *)wdata[i].p)[j].f, (int)i, (int)j, + (double)((s1 *)rdata[i].p)[j].f); + continue; + } /* end if */ + } /* end for */ + } /* end for */ + + /* Reclaim the VL data */ + ret = H5Treclaim(tid1, sid1, xfer_pid, rdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Make certain the VL memory has been freed */ + VERIFY(mem_used, 0, "H5Treclaim"); + + /* Reclaim the write VL data */ + ret = H5Treclaim(tid1, sid1, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close datatype */ + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close dataset transfer property list */ + ret = H5Pclose(xfer_pid); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + +} /* end test_vltypes_vlen_compound() */ + +/**************************************************************** +** +** rewrite_vltypes_vlen_compound(): Check memory leak for basic VL datatype. +** Checks memory leak for VL datatypes of compound datatypes +** +****************************************************************/ +static void +rewrite_vltypes_vlen_compound(void) +{ + typedef struct { /* Struct that the VL sequences are composed of */ + int i; + float f; + } s1; + hvl_t wdata[SPACE1_DIM1]; /* Information to write */ + hvl_t rdata[SPACE1_DIM1]; /* Information read in */ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1; /* Dataspace ID */ + hid_t tid1, tid2; /* Datatype IDs */ + hid_t xfer_pid; /* Dataset transfer property list ID */ + hsize_t size; /* Number of bytes which will be used */ + unsigned i, j; /* counting variables */ + size_t mem_used = 0; /* Memory used during allocation */ + unsigned increment = 4; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Check Memory Leak for Basic Compound VL Datatype Functionality\n")); + + /* Allocate and initialize VL data to write */ + for (i = 0; i < SPACE1_DIM1; i++) { + wdata[i].p = HDmalloc((i + increment) * sizeof(s1)); + wdata[i].len = i + increment; + for (j = 0; j < (i + increment); j++) { + ((s1 *)wdata[i].p)[j].i = (int)(i * 40 + j); + ((s1 *)wdata[i].p)[j].f = (float)(i * 60 + j) / 3.0F; + } /* end for */ + } /* end for */ + + /* Create file */ + fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Create the base compound type */ + tid2 = H5Tcreate(H5T_COMPOUND, sizeof(s1)); + CHECK(tid2, FAIL, "H5Tcreate"); + + ret = H5Tinsert(tid2, "i", HOFFSET(s1, i), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + ret = H5Tinsert(tid2, "f", HOFFSET(s1, f), H5T_NATIVE_FLOAT); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Create a datatype to refer to */ + tid1 = H5Tvlen_create(tid2); + CHECK(tid1, FAIL, "H5Tvlen_create"); + + /* Create a dataset */ + dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Create dataspace for datasets */ + sid1 = H5Dget_space(dataset); + CHECK(sid1, FAIL, "H5Dget_space"); + + /* Write dataset to disk */ + ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Change to the custom memory allocation routines for reading VL data */ + xfer_pid = H5Pcreate(H5P_DATASET_XFER); + CHECK(xfer_pid, FAIL, "H5Pcreate"); + + ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom, + &mem_used); + CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); + + /* Make certain the correct amount of memory will be used */ + ret = H5Dvlen_get_buf_size(dataset, tid1, sid1, &size); + CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); + + /* 22 elements allocated = 4 + 5 + 6 + 7 elements for each array position */ + VERIFY(size, 22 * sizeof(s1), "H5Dvlen_get_buf_size"); + + /* Read dataset from disk */ + ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Make certain the correct amount of memory has been used */ + /* 22 elements allocated = 4 + 5 + 6 + 7 elements for each array position */ + VERIFY(mem_used, 22 * sizeof(s1), "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < SPACE1_DIM1; i++) { + if (wdata[i].len != rdata[i].len) { + TestErrPrintf("%d: VL data length don't match!, wdata[%d].len=%d, rdata[%d].len=%d\n", __LINE__, + (int)i, (int)wdata[i].len, (int)i, (int)rdata[i].len); + continue; + } /* end if */ + for (j = 0; j < rdata[i].len; j++) { + if (((s1 *)wdata[i].p)[j].i != ((s1 *)rdata[i].p)[j].i) { + TestErrPrintf("VL data values don't match!, wdata[%d].p[%d].i=%d, rdata[%d].p[%d].i=%d\n", + (int)i, (int)j, (int)((s1 *)wdata[i].p)[j].i, (int)i, (int)j, + (int)((s1 *)rdata[i].p)[j].i); + continue; + } /* end if */ + if (!H5_FLT_ABS_EQUAL(((s1 *)wdata[i].p)[j].f, ((s1 *)rdata[i].p)[j].f)) { + TestErrPrintf("VL data values don't match!, wdata[%d].p[%d].f=%f, rdata[%d].p[%d].f=%f\n", + (int)i, (int)j, (double)((s1 *)wdata[i].p)[j].f, (int)i, (int)j, + (double)((s1 *)rdata[i].p)[j].f); + continue; + } /* end if */ + } /* end for */ + } /* end for */ + + /* Reclaim the VL data */ + ret = H5Treclaim(tid1, sid1, xfer_pid, rdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Make certain the VL memory has been freed */ + VERIFY(mem_used, 0, "H5Treclaim"); + + /* Reclaim the write VL data */ + ret = H5Treclaim(tid1, sid1, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close datatype */ + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close dataset transfer property list */ + ret = H5Pclose(xfer_pid); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + +} /* end rewrite_vltypes_vlen_compound() */ + +/**************************************************************** +** +** test_vltypes_compound_vlen_vlen(): Test basic VL datatype code. +** Tests compound datatypes with VL datatypes of VL datatypes. +** +****************************************************************/ +static void +test_vltypes_compound_vlen_vlen(void) +{ + typedef struct { /* Struct that the compound type are composed of */ + int i; + float f; + hvl_t v; + } s1; + s1 *wdata; /* data to write */ + s1 *rdata; /* data to read */ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1; /* Dataspace ID */ + hid_t tid1, tid2, tid3; /* Datatype IDs */ + hsize_t dims1[] = {SPACE3_DIM1}; + unsigned i, j, k; /* counting variables */ + hvl_t *t1, *t2; /* Temporary pointer to VL information */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Compound Datatypes with VL Atomic Datatype Component Functionality\n")); + + /* Allocate and initialize VL data to write */ + wdata = (s1 *)HDmalloc(sizeof(s1) * SPACE3_DIM1); + CHECK_PTR(wdata, "HDmalloc"); + rdata = (s1 *)HDmalloc(sizeof(s1) * SPACE3_DIM1); + CHECK_PTR(rdata, "HDmalloc"); + for (i = 0; i < SPACE3_DIM1; i++) { + wdata[i].i = (int)(i * 10); + wdata[i].f = (float)(i * 20) / 3.0F; + wdata[i].v.p = HDmalloc((i + L1_INCM) * sizeof(hvl_t)); + wdata[i].v.len = i + L1_INCM; + for (t1 = (hvl_t *)((wdata[i].v).p), j = 0; j < (i + L1_INCM); j++, t1++) { + t1->p = HDmalloc((j + L2_INCM) * sizeof(unsigned int)); + t1->len = j + L2_INCM; + for (k = 0; k < j + L2_INCM; k++) + ((unsigned int *)t1->p)[k] = i * 100 + j * 10 + k; + } /* end for */ + } /* end for */ + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid1 = H5Screate_simple(SPACE3_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create a VL datatype to refer to */ + tid3 = H5Tvlen_create(H5T_NATIVE_UINT); + CHECK(tid3, FAIL, "H5Tvlen_create"); + + /* Create a VL datatype to refer to */ + tid1 = H5Tvlen_create(tid3); + CHECK(tid1, FAIL, "H5Tvlen_create"); + + /* Create the base compound type */ + tid2 = H5Tcreate(H5T_COMPOUND, sizeof(s1)); + CHECK(tid2, FAIL, "H5Tcreate"); + + /* Insert fields */ + ret = H5Tinsert(tid2, "i", HOFFSET(s1, i), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + ret = H5Tinsert(tid2, "f", HOFFSET(s1, f), H5T_NATIVE_FLOAT); + CHECK(ret, FAIL, "H5Tinsert"); + ret = H5Tinsert(tid2, "v", HOFFSET(s1, v), tid1); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset1", tid2, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write dataset to disk */ + ret = H5Dwrite(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Open file */ + fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Open a dataset */ + dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Read dataset from disk */ + ret = H5Dread(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < SPACE3_DIM1; i++) { + if (wdata[i].i != rdata[i].i) { + TestErrPrintf("Integer components don't match!, wdata[%d].i=%d, rdata[%d].i=%d\n", (int)i, + (int)wdata[i].i, (int)i, (int)rdata[i].i); + continue; + } /* end if */ + if (!H5_FLT_ABS_EQUAL(wdata[i].f, rdata[i].f)) { + TestErrPrintf("Float components don't match!, wdata[%d].f=%f, rdata[%d].f=%f\n", (int)i, + (double)wdata[i].f, (int)i, (double)rdata[i].f); + continue; + } /* end if */ + + if (wdata[i].v.len != rdata[i].v.len) { + TestErrPrintf("%d: VL data length don't match!, wdata[%d].v.len=%d, rdata[%d].v.len=%d\n", + __LINE__, (int)i, (int)wdata[i].v.len, (int)i, (int)rdata[i].v.len); + continue; + } /* end if */ + + for (t1 = (hvl_t *)(wdata[i].v.p), t2 = (hvl_t *)(rdata[i].v.p), j = 0; j < rdata[i].v.len; + j++, t1++, t2++) { + if (t1->len != t2->len) { + TestErrPrintf("%d: VL data length don't match!, i=%d, j=%d, t1->len=%d, t2->len=%d\n", + __LINE__, (int)i, (int)j, (int)t1->len, (int)t2->len); + continue; + } /* end if */ + for (k = 0; k < t2->len; k++) { + if (((unsigned int *)t1->p)[k] != ((unsigned int *)t2->p)[k]) { + TestErrPrintf("VL data values don't match!, t1->p[%d]=%d, t2->p[%d]=%d\n", (int)k, + (int)((unsigned int *)t1->p)[k], (int)k, (int)((unsigned int *)t2->p)[k]); + continue; + } /* end if */ + } /* end for */ + } /* end for */ + } /* end for */ + + /* Reclaim the VL data */ + ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Reclaim the write VL data */ + ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close datatype */ + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close datatype */ + ret = H5Tclose(tid3); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Release buffers */ + HDfree(wdata); + HDfree(rdata); +} /* end test_vltypes_compound_vlen_vlen() */ + +/**************************************************************** +** +** test_vltypes_compound_vlstr(): Test VL datatype code. +** Tests VL datatypes of compound datatypes with VL string. +** Dataset is extensible chunked, and data is rewritten with +** shorter VL data. +** +****************************************************************/ +static void +test_vltypes_compound_vlstr(void) +{ + typedef enum { red, blue, green } e1; + typedef struct { + char *string; + e1 color; + } s2; + typedef struct { /* Struct that the compound type are composed of */ + hvl_t v; + } s1; + s1 wdata[SPACE1_DIM1]; /* data to write */ + s1 wdata2[SPACE1_DIM1]; /* data to write */ + s1 rdata[SPACE1_DIM1]; /* data to read */ + s1 rdata2[SPACE1_DIM1]; /* data to read */ + char str[64] = "a\0"; + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset, dset2; /* Dataset ID */ + hid_t sid1, sid2, filespace, filespace2; /* Dataspace ID */ + hid_t tid1, tid2, tid3, tid4, tid5; /* Datatype IDs */ + hid_t cparms; + hsize_t dims1[] = {SPACE1_DIM1}; + hsize_t chunk_dims[] = {SPACE1_DIM1 / 2}; + hsize_t maxdims[] = {H5S_UNLIMITED}; + hsize_t size[] = {SPACE1_DIM1}; + hsize_t offset[] = {0}; + unsigned i, j; /* counting variables */ + s2 *t1, *t2; /* Temporary pointer to VL information */ + int val; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing VL Datatype of Compound Datatype with VL String Functionality\n")); + + /* Allocate and initialize VL data to write */ + for (i = 0; i < SPACE1_DIM1; i++) { + wdata[i].v.p = (s2 *)HDmalloc((i + L3_INCM) * sizeof(s2)); + wdata[i].v.len = i + L3_INCM; + for (t1 = (s2 *)((wdata[i].v).p), j = 0; j < (i + L3_INCM); j++, t1++) { + HDstrcat(str, "m"); + t1->string = (char *)HDmalloc(HDstrlen(str) * sizeof(char) + 1); + HDstrcpy(t1->string, str); + /*t1->color = red;*/ + t1->color = blue; + } + } /* end for */ + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, maxdims); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create a VL string type*/ + tid4 = H5Tcopy(H5T_C_S1); + CHECK(tid4, FAIL, "H5Tcopy"); + ret = H5Tset_size(tid4, H5T_VARIABLE); + CHECK(ret, FAIL, "H5Tset_size"); + + /* Create an enum type */ + tid3 = H5Tenum_create(H5T_STD_I32LE); + val = 0; + ret = H5Tenum_insert(tid3, "RED", &val); + CHECK(ret, FAIL, "H5Tenum_insert"); + val = 1; + ret = H5Tenum_insert(tid3, "BLUE", &val); + CHECK(ret, FAIL, "H5Tenum_insert"); + val = 2; + ret = H5Tenum_insert(tid3, "GREEN", &val); + CHECK(ret, FAIL, "H5Tenum_insert"); + + /* Create the first layer compound type */ + tid5 = H5Tcreate(H5T_COMPOUND, sizeof(s2)); + CHECK(tid5, FAIL, "H5Tcreate"); + /* Insert fields */ + ret = H5Tinsert(tid5, "string", HOFFSET(s2, string), tid4); + CHECK(ret, FAIL, "H5Tinsert"); + /* Insert fields */ + ret = H5Tinsert(tid5, "enumerate", HOFFSET(s2, color), tid3); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Create a VL datatype of first layer compound type */ + tid1 = H5Tvlen_create(tid5); + CHECK(tid1, FAIL, "H5Tvlen_create"); + + /* Create the base compound type */ + tid2 = H5Tcreate(H5T_COMPOUND, sizeof(s1)); + CHECK(tid2, FAIL, "H5Tcreate"); + + /* Insert fields */ + ret = H5Tinsert(tid2, "v", HOFFSET(s1, v), tid1); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Modify dataset creation properties, i.e. enable chunking */ + cparms = H5Pcreate(H5P_DATASET_CREATE); + ret = H5Pset_chunk(cparms, SPACE1_RANK, chunk_dims); + CHECK(ret, FAIL, "H5Pset_chunk"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset1", tid2, sid1, H5P_DEFAULT, cparms, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Extend the dataset. This call assures that dataset is 4.*/ + ret = H5Dset_extent(dataset, size); + CHECK(ret, FAIL, "H5Dset_extent"); + + /* Select a hyperslab */ + filespace = H5Dget_space(dataset); + ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, NULL, dims1, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Write dataset to disk */ + ret = H5Dwrite(dataset, tid2, sid1, filespace, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL); + CHECK(ret, FAIL, "H5Fflush"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close disk dataspace */ + ret = H5Sclose(filespace); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close datatype */ + ret = H5Tclose(tid4); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close datatype */ + ret = H5Tclose(tid5); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close datatype */ + ret = H5Tclose(tid3); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close datatype */ + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close Property list */ + ret = H5Pclose(cparms); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Open file */ + fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Open the dataset */ + dset2 = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); + CHECK(dset2, FAIL, "H5Dopen2"); + + /* Get the data type */ + tid2 = H5Dget_type(dset2); + CHECK(tid2, FAIL, "H5Dget_type"); + + /* Read dataset from disk */ + ret = H5Dread(dset2, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < SPACE1_DIM1; i++) { + if (wdata[i].v.len != rdata[i].v.len) { + TestErrPrintf("%d: VL data length don't match!, wdata[%d].v.len=%d, rdata[%d].v.len=%d\n", + __LINE__, (int)i, (int)wdata[i].v.len, (int)i, (int)rdata[i].v.len); + continue; + } /* end if */ + + for (t1 = (s2 *)(wdata[i].v.p), t2 = (s2 *)(rdata[i].v.p), j = 0; j < rdata[i].v.len; + j++, t1++, t2++) { + if (HDstrcmp(t1->string, t2->string) != 0) { + TestErrPrintf("VL data values don't match!, t1->string=%s, t2->string=%s\n", t1->string, + t2->string); + continue; + } /* end if */ + if (t1->color != t2->color) { + TestErrPrintf("VL data values don't match!, t1->color=%d, t2->color=%d\n", t1->color, + t2->color); + continue; + } /* end if */ + } /* end for */ + } /* end for */ + + /* Reclaim the VL data */ + ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Reclaim the write VL data */ + ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Use this part for new data */ + HDstrcpy(str, "bbbbbbbb\0"); + for (i = 0; i < SPACE1_DIM1; i++) { + wdata2[i].v.p = (s2 *)HDmalloc((i + 1) * sizeof(s2)); + wdata2[i].v.len = i + 1; + for (t1 = (s2 *)(wdata2[i].v).p, j = 0; j < i + 1; j++, t1++) { + HDstrcat(str, "pp"); + t1->string = (char *)HDmalloc(HDstrlen(str) * sizeof(char) + 1); + HDstrcpy(t1->string, str); + t1->color = green; + } + } /* end for */ + + /* Select a hyperslab */ + filespace2 = H5Dget_space(dset2); + ret = H5Sselect_hyperslab(filespace2, H5S_SELECT_SET, offset, NULL, dims1, NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create dataspace for datasets */ + sid2 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Write dataset to disk */ + ret = H5Dwrite(dset2, tid2, sid2, filespace2, H5P_DEFAULT, &wdata2); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read dataset from disk */ + ret = H5Dread(dset2, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata2); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < SPACE1_DIM1; i++) { + if (wdata2[i].v.len != rdata2[i].v.len) { + TestErrPrintf("%d: VL data length don't match!, wdata2[%d].v.len=%d, rdata2[%d].v.len=%d\n", + __LINE__, (int)i, (int)wdata2[i].v.len, (int)i, (int)rdata2[i].v.len); + continue; + } /* end if */ + + for (t1 = (s2 *)(wdata2[i].v.p), t2 = (s2 *)(rdata2[i].v.p), j = 0; j < rdata2[i].v.len; + j++, t1++, t2++) { + if (HDstrcmp(t1->string, t2->string) != 0) { + TestErrPrintf("VL data values don't match!, t1->string=%s, t2->string=%s\n", t1->string, + t2->string); + continue; + } /* end if */ + if (t1->color != t2->color) { + TestErrPrintf("VL data values don't match!, t1->color=%d, t2->color=%d\n", t1->color, + t2->color); + continue; + } /* end if */ + } /* end for */ + } /* end for */ + + /* Reclaim the write VL data */ + ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, wdata2); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Reclaim the VL data */ + ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, rdata2); + CHECK(ret, FAIL, "H5Treclaim"); + + ret = H5Dclose(dset2); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close disk dataspace */ + ret = H5Sclose(filespace2); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close datatype */ + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); +} /* end test_vltypes_compound_vlstr() */ + +/**************************************************************** +** +** test_vltypes_compound_vlen_atomic(): Test basic VL datatype code. +** Tests compound datatypes with VL datatypes of atomic datatypes. +** +****************************************************************/ +static void +test_vltypes_compound_vlen_atomic(void) +{ + typedef struct { /* Struct that the VL sequences are composed of */ + int i; + float f; + hvl_t v; + } s1; + s1 wdata[SPACE1_DIM1]; /* Information to write */ + s1 rdata[SPACE1_DIM1]; /* Information read in */ + s1 fill; /* Fill value */ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1; /* Dataspace ID */ + hid_t tid1, tid2; /* Datatype IDs */ + hid_t xfer_pid; /* Dataset transfer property list ID */ + hid_t dcpl_pid; /* Dataset creation property list ID */ + hsize_t dims1[] = {SPACE1_DIM1}; + hsize_t size; /* Number of bytes which will be used */ + unsigned i, j; /* counting variables */ + size_t mem_used = 0; /* Memory used during allocation */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing Compound Datatypes with VL Atomic Datatype Component Functionality\n")); + + /* Allocate and initialize VL data to write */ + for (i = 0; i < SPACE1_DIM1; i++) { + wdata[i].i = (int)(i * 10); + wdata[i].f = (float)(i * 20) / 3.0F; + wdata[i].v.p = HDmalloc((i + 1) * sizeof(unsigned int)); + wdata[i].v.len = i + 1; + for (j = 0; j < (i + 1); j++) + ((unsigned int *)wdata[i].v.p)[j] = i * 10 + j; + } /* end for */ + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create a VL datatype to refer to */ + tid1 = H5Tvlen_create(H5T_NATIVE_UINT); + CHECK(tid1, FAIL, "H5Tvlen_create"); + + /* Create the base compound type */ + tid2 = H5Tcreate(H5T_COMPOUND, sizeof(s1)); + CHECK(tid2, FAIL, "H5Tcreate"); + + /* Insert fields */ + ret = H5Tinsert(tid2, "i", HOFFSET(s1, i), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + ret = H5Tinsert(tid2, "f", HOFFSET(s1, f), H5T_NATIVE_FLOAT); + CHECK(ret, FAIL, "H5Tinsert"); + ret = H5Tinsert(tid2, "v", HOFFSET(s1, v), tid1); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset1", tid2, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write dataset to disk */ + ret = H5Dwrite(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Change to the custom memory allocation routines for reading VL data */ + xfer_pid = H5Pcreate(H5P_DATASET_XFER); + CHECK(xfer_pid, FAIL, "H5Pcreate"); + + ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom, + &mem_used); + CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); + + /* Make certain the correct amount of memory will be used */ + ret = H5Dvlen_get_buf_size(dataset, tid2, sid1, &size); + CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); + + /* 10 elements allocated = 1 + 2 + 3 + 4 elements for each array position */ + VERIFY(size, ((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(unsigned int), "H5Dvlen_get_buf_size"); + + /* Read dataset from disk */ + ret = H5Dread(dataset, tid2, H5S_ALL, H5S_ALL, xfer_pid, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Make certain the correct amount of memory has been used */ + /* 10 elements allocated = 1 + 2 + 3 + 4 elements for each array position */ + VERIFY(mem_used, ((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(unsigned int), "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < SPACE1_DIM1; i++) { + if (wdata[i].i != rdata[i].i) { + TestErrPrintf("Integer components don't match!, wdata[%d].i=%d, rdata[%d].i=%d\n", (int)i, + (int)wdata[i].i, (int)i, (int)rdata[i].i); + continue; + } /* end if */ + if (!H5_FLT_ABS_EQUAL(wdata[i].f, rdata[i].f)) { + TestErrPrintf("Float components don't match!, wdata[%d].f=%f, rdata[%d].f=%f\n", (int)i, + (double)wdata[i].f, (int)i, (double)rdata[i].f); + continue; + } /* end if */ + if (wdata[i].v.len != rdata[i].v.len) { + TestErrPrintf("%d: VL data length don't match!, wdata[%d].v.len=%d, rdata[%d].v.len=%d\n", + __LINE__, (int)i, (int)wdata[i].v.len, (int)i, (int)rdata[i].v.len); + continue; + } /* end if */ + for (j = 0; j < rdata[i].v.len; j++) { + if (((unsigned int *)wdata[i].v.p)[j] != ((unsigned int *)rdata[i].v.p)[j]) { + TestErrPrintf("VL data values don't match!, wdata[%d].v.p[%d]=%d, rdata[%d].v.p[%d]=%d\n", + (int)i, (int)j, (int)((unsigned int *)wdata[i].v.p)[j], (int)i, (int)j, + (int)((unsigned int *)rdata[i].v.p)[j]); + continue; + } /* end if */ + } /* end for */ + } /* end for */ + + /* Reclaim the VL data */ + ret = H5Treclaim(tid2, sid1, xfer_pid, rdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Make certain the VL memory has been freed */ + VERIFY(mem_used, 0, "H5Treclaim"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Create a second dataset, with a fill value */ + dcpl_pid = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl_pid, FAIL, "H5Pcreate"); + + /* Set the fill value for the second dataset */ + HDmemset(&fill, 0, sizeof(s1)); + ret = H5Pset_fill_value(dcpl_pid, tid2, &fill); + CHECK(ret, FAIL, "H5Pset_fill_value"); + + dataset = H5Dcreate2(fid1, "Dataset2", tid2, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Close dataset creation property list */ + ret = H5Pclose(dcpl_pid); + CHECK(ret, FAIL, "H5Pclose"); + + /* Read from dataset before writing data */ + ret = H5Dread(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Check data read in */ + for (i = 0; i < SPACE1_DIM1; i++) + if (rdata[i].i != 0 || !H5_FLT_ABS_EQUAL(rdata[i].f, 0.0F) || rdata[i].v.len != 0 || + rdata[i].v.p != NULL) + TestErrPrintf( + "VL doesn't match!, rdata[%d].i=%d, rdata[%d].f=%f, rdata[%d].v.len=%u, rdata[%d].v.p=%p\n", + (int)i, rdata[i].i, (int)i, (double)rdata[i].f, (int)i, (unsigned)rdata[i].v.len, (int)i, + rdata[i].v.p); + + /* Write dataset to disk */ + ret = H5Dwrite(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Read dataset from disk */ + ret = H5Dread(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < SPACE1_DIM1; i++) { + if (wdata[i].i != rdata[i].i) { + TestErrPrintf("Integer components don't match!, wdata[%d].i=%d, rdata[%d].i=%d\n", (int)i, + (int)wdata[i].i, (int)i, (int)rdata[i].i); + continue; + } /* end if */ + if (!H5_FLT_ABS_EQUAL(wdata[i].f, rdata[i].f)) { + TestErrPrintf("Float components don't match!, wdata[%d].f=%f, rdata[%d].f=%f\n", (int)i, + (double)wdata[i].f, (int)i, (double)rdata[i].f); + continue; + } /* end if */ + if (wdata[i].v.len != rdata[i].v.len) { + TestErrPrintf("%d: VL data length don't match!, wdata[%d].v.len=%d, rdata[%d].v.len=%d\n", + __LINE__, (int)i, (int)wdata[i].v.len, (int)i, (int)rdata[i].v.len); + continue; + } /* end if */ + for (j = 0; j < rdata[i].v.len; j++) { + if (((unsigned int *)wdata[i].v.p)[j] != ((unsigned int *)rdata[i].v.p)[j]) { + TestErrPrintf("VL data values don't match!, wdata[%d].v.p[%d]=%d, rdata[%d].v.p[%d]=%d\n", + (int)i, (int)j, (int)((unsigned int *)wdata[i].v.p)[j], (int)i, (int)j, + (int)((unsigned int *)rdata[i].v.p)[j]); + continue; + } /* end if */ + } /* end for */ + } /* end for */ + + /* Reclaim the VL data */ + ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, rdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Reclaim the write VL data */ + ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Close datatype */ + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close dataset transfer property list */ + ret = H5Pclose(xfer_pid); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + +} /* end test_vltypes_compound_vlen_atomic() */ + +/**************************************************************** +** +** rewrite_vltypes_compound_vlen_atomic(): Check memory leak for +** basic VL datatype code. +** Check memory leak for compound datatypes with VL datatypes +** of atomic datatypes. +** +****************************************************************/ +static void +rewrite_vltypes_compound_vlen_atomic(void) +{ + typedef struct { /* Struct that the VL sequences are composed of */ + int i; + float f; + hvl_t v; + } s1; + s1 wdata[SPACE1_DIM1]; /* Information to write */ + s1 rdata[SPACE1_DIM1]; /* Information read in */ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1; /* Dataspace ID */ + hid_t tid1, tid2; /* Datatype IDs */ + hid_t xfer_pid; /* Dataset transfer property list ID */ + hsize_t size; /* Number of bytes which will be used */ + unsigned i, j; /* counting variables */ + size_t mem_used = 0; /* Memory used during allocation */ + unsigned increment = 4; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, + ("Checking memory leak for compound datatype with VL Atomic Datatype Component Functionality\n")); + + /* Allocate and initialize VL data to write */ + for (i = 0; i < SPACE1_DIM1; i++) { + wdata[i].i = (int)(i * 40); + wdata[i].f = (float)(i * 50) / 3.0F; + wdata[i].v.p = HDmalloc((i + increment) * sizeof(unsigned int)); + wdata[i].v.len = i + increment; + for (j = 0; j < (i + increment); j++) + ((unsigned int *)wdata[i].v.p)[j] = i * 60 + j; + } /* end for */ + + /* Create file */ + fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Create a VL datatype to refer to */ + tid1 = H5Tvlen_create(H5T_NATIVE_UINT); + CHECK(tid1, FAIL, "H5Tvlen_create"); + + /* Create the base compound type */ + tid2 = H5Tcreate(H5T_COMPOUND, sizeof(s1)); + CHECK(tid2, FAIL, "H5Tcreate"); + + /* Insert fields */ + ret = H5Tinsert(tid2, "i", HOFFSET(s1, i), H5T_NATIVE_INT); + CHECK(ret, FAIL, "H5Tinsert"); + ret = H5Tinsert(tid2, "f", HOFFSET(s1, f), H5T_NATIVE_FLOAT); + CHECK(ret, FAIL, "H5Tinsert"); + ret = H5Tinsert(tid2, "v", HOFFSET(s1, v), tid1); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Create a dataset */ + dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Create dataspace for datasets */ + sid1 = H5Dget_space(dataset); + CHECK(sid1, FAIL, "H5Dget_space"); + + /* Write dataset to disk */ + ret = H5Dwrite(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Change to the custom memory allocation routines for reading VL data */ + xfer_pid = H5Pcreate(H5P_DATASET_XFER); + CHECK(xfer_pid, FAIL, "H5Pcreate"); + + ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom, + &mem_used); + CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); + + /* Make certain the correct amount of memory will be used */ + ret = H5Dvlen_get_buf_size(dataset, tid2, sid1, &size); + CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); + + /* 22 elements allocated = 4+5+6+7 elements for each array position */ + VERIFY(size, 22 * sizeof(unsigned int), "H5Dvlen_get_buf_size"); + + /* Read dataset from disk */ + ret = H5Dread(dataset, tid2, H5S_ALL, H5S_ALL, xfer_pid, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Make certain the correct amount of memory has been used */ + /* 22 elements allocated = 4+5+6+7 elements for each array position */ + VERIFY(mem_used, 22 * sizeof(unsigned int), "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < SPACE1_DIM1; i++) { + if (wdata[i].i != rdata[i].i) { + TestErrPrintf("Integer components don't match!, wdata[%d].i=%d, rdata[%d].i=%d\n", (int)i, + (int)wdata[i].i, (int)i, (int)rdata[i].i); + continue; + } /* end if */ + if (!H5_FLT_ABS_EQUAL(wdata[i].f, rdata[i].f)) { + TestErrPrintf("Float components don't match!, wdata[%d].f=%f, rdata[%d].f=%f\n", (int)i, + (double)wdata[i].f, (int)i, (double)rdata[i].f); + continue; + } /* end if */ + if (wdata[i].v.len != rdata[i].v.len) { + TestErrPrintf("%d: VL data length don't match!, wdata[%d].v.len=%d, rdata[%d].v.len=%d\n", + __LINE__, (int)i, (int)wdata[i].v.len, (int)i, (int)rdata[i].v.len); + continue; + } /* end if */ + for (j = 0; j < rdata[i].v.len; j++) { + if (((unsigned int *)wdata[i].v.p)[j] != ((unsigned int *)rdata[i].v.p)[j]) { + TestErrPrintf("VL data values don't match!, wdata[%d].v.p[%d]=%d, rdata[%d].v.p[%d]=%d\n", + (int)i, (int)j, (int)((unsigned int *)wdata[i].v.p)[j], (int)i, (int)j, + (int)((unsigned int *)rdata[i].v.p)[j]); + continue; + } /* end if */ + } /* end for */ + } /* end for */ + + /* Reclaim the VL data */ + ret = H5Treclaim(tid2, sid1, xfer_pid, rdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Make certain the VL memory has been freed */ + VERIFY(mem_used, 0, "H5Treclaim"); + + /* Reclaim the write VL data */ + ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatype */ + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close dataset transfer property list */ + ret = H5Pclose(xfer_pid); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + +} /* end rewrite_vltypes_compound_vlen_atomic() */ + +/**************************************************************** +** +** vlen_size_func(): Test basic VL datatype code. +** Tests VL datatype with VL datatypes of atomic datatypes. +** +****************************************************************/ +static size_t +vlen_size_func(unsigned long n) +{ + size_t u = 1; + size_t tmp = 1; + size_t result = 1; + + while (u < n) { + u++; + tmp += u; + result += tmp; + } + return (result); +} + +/**************************************************************** +** +** test_vltypes_vlen_vlen_atomic(): Test basic VL datatype code. +** Tests VL datatype with VL datatypes of atomic datatypes. +** +****************************************************************/ +static void +test_vltypes_vlen_vlen_atomic(void) +{ + hvl_t wdata[SPACE1_DIM1]; /* Information to write */ + hvl_t rdata[SPACE1_DIM1]; /* Information read in */ + hvl_t *t1, *t2; /* Temporary pointer to VL information */ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1; /* Dataspace ID */ + hid_t tid1, tid2; /* Datatype IDs */ + hid_t xfer_pid; /* Dataset transfer property list ID */ + hsize_t dims1[] = {SPACE1_DIM1}; + hsize_t size; /* Number of bytes which will be used */ + unsigned i, j, k; /* counting variables */ + size_t mem_used = 0; /* Memory used during allocation */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Testing VL Datatypes with VL Atomic Datatype Component Functionality\n")); + + /* Allocate and initialize VL data to write */ + for (i = 0; i < SPACE1_DIM1; i++) { + wdata[i].p = HDmalloc((i + 1) * sizeof(hvl_t)); + if (wdata[i].p == NULL) { + TestErrPrintf("Cannot allocate memory for VL data! i=%u\n", i); + return; + } /* end if */ + wdata[i].len = i + 1; + for (t1 = (hvl_t *)(wdata[i].p), j = 0; j < (i + 1); j++, t1++) { + t1->p = HDmalloc((j + 1) * sizeof(unsigned int)); + if (t1->p == NULL) { + TestErrPrintf("Cannot allocate memory for VL data! i=%u, j=%u\n", i, j); + return; + } /* end if */ + t1->len = j + 1; + for (k = 0; k < (j + 1); k++) + ((unsigned int *)t1->p)[k] = i * 100 + j * 10 + k; + } /* end for */ + } /* end for */ + + /* Create file */ + fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fcreate"); + + /* Create dataspace for datasets */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create a VL datatype to refer to */ + tid1 = H5Tvlen_create(H5T_NATIVE_UINT); + CHECK(tid1, FAIL, "H5Tvlen_create"); + + /* Create the base VL type */ + tid2 = H5Tvlen_create(tid1); + CHECK(tid2, FAIL, "H5Tvlen_create"); + + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset1", tid2, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dcreate2"); + + /* Write dataset to disk */ + ret = H5Dwrite(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatype */ + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Open file */ + fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Create dataspace for datasets */ + sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); + CHECK(sid1, FAIL, "H5Screate_simple"); + + /* Create a VL datatype to refer to */ + tid1 = H5Tvlen_create(H5T_NATIVE_UINT); + CHECK(tid1, FAIL, "H5Tvlen_create"); + + /* Create the base VL type */ + tid2 = H5Tvlen_create(tid1); + CHECK(tid2, FAIL, "H5Tvlen_create"); + + /* Open a dataset */ + dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Change to the custom memory allocation routines for reading VL data */ + xfer_pid = H5Pcreate(H5P_DATASET_XFER); + CHECK(xfer_pid, FAIL, "H5Pcreate"); + + ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom, + &mem_used); + CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); + + /* Make certain the correct amount of memory was used */ + ret = H5Dvlen_get_buf_size(dataset, tid2, sid1, &size); + CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); + + /* 10 hvl_t elements allocated = 1 + 2 + 3 + 4 elements for each array position */ + /* 20 unsigned int elements allocated = 1 + 3 + 6 + 10 elements */ + VERIFY(size, + (hsize_t)(((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(hvl_t) + + vlen_size_func((unsigned long)SPACE1_DIM1) * sizeof(unsigned int)), + "H5Dvlen_get_buf_size"); + + /* Read dataset from disk */ + ret = H5Dread(dataset, tid2, H5S_ALL, H5S_ALL, xfer_pid, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Make certain the correct amount of memory has been used */ + /* 10 hvl_t elements allocated = 1 + 2 + 3 + 4 elements for each array position */ + /* 20 unsigned int elements allocated = 1 + 3 + 6 + 10 elements */ + VERIFY(mem_used, + (size_t)(((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(hvl_t) + + vlen_size_func((unsigned long)SPACE1_DIM1) * sizeof(unsigned int)), + "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < SPACE1_DIM1; i++) { + if (wdata[i].len != rdata[i].len) { + TestErrPrintf("%d: VL data length don't match!, wdata[%d].len=%d, rdata[%d].len=%d\n", __LINE__, + (int)i, (int)wdata[i].len, (int)i, (int)rdata[i].len); + continue; + } /* end if */ + for (t1 = (hvl_t *)wdata[i].p, t2 = (hvl_t *)(rdata[i].p), j = 0; j < rdata[i].len; j++, t1++, t2++) { + if (t1->len != t2->len) { + TestErrPrintf("%d: VL data length don't match!, i=%d, j=%d, t1->len=%d, t2->len=%d\n", + __LINE__, (int)i, (int)j, (int)t1->len, (int)t2->len); + continue; + } /* end if */ + for (k = 0; k < t2->len; k++) { + if (((unsigned int *)t1->p)[k] != ((unsigned int *)t2->p)[k]) { + TestErrPrintf("VL data values don't match!, t1->p[%d]=%d, t2->p[%d]=%d\n", (int)k, + (int)((unsigned int *)t1->p)[k], (int)k, (int)((unsigned int *)t2->p)[k]); + continue; + } /* end if */ + } /* end for */ + } /* end for */ + } /* end for */ + + /* Reclaim all the (nested) VL data */ + ret = H5Treclaim(tid2, sid1, xfer_pid, rdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Make certain the VL memory has been freed */ + VERIFY(mem_used, 0, "H5Treclaim"); + + /* Reclaim the write VL data */ + ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatype */ + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close datatype */ + ret = H5Tclose(tid1); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close dataset transfer property list */ + ret = H5Pclose(xfer_pid); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + +} /* end test_vltypes_vlen_vlen_atomic() */ + +/**************************************************************** +** +** rewrite_longer_vltypes_vlen_vlen_atomic(): Test basic VL datatype code. +** Tests VL datatype with VL datatypes of atomic datatypes. +** +****************************************************************/ +static void +rewrite_longer_vltypes_vlen_vlen_atomic(void) +{ + hvl_t wdata[SPACE1_DIM1]; /* Information to write */ + hvl_t rdata[SPACE1_DIM1]; /* Information read in */ + hvl_t *t1, *t2; /* Temporary pointer to VL information */ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1; /* Dataspace ID */ + hid_t tid2; /* Datatype IDs */ + hid_t xfer_pid; /* Dataset transfer property list ID */ + hsize_t size; /* Number of bytes which will be used */ + unsigned i, j, k; /* counting variables */ + size_t mem_used = 0; /* Memory used during allocation */ + unsigned increment = 1; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Check memory leak for VL Datatypes with VL Atomic Datatype Component Functionality\n")); + + /* Allocate and initialize VL data to write */ + for (i = 0; i < SPACE1_DIM1; i++) { + wdata[i].p = HDmalloc((i + increment) * sizeof(hvl_t)); + if (wdata[i].p == NULL) { + TestErrPrintf("Cannot allocate memory for VL data! i=%u\n", i); + return; + } /* end if */ + wdata[i].len = i + increment; + for (t1 = (hvl_t *)(wdata[i].p), j = 0; j < (i + increment); j++, t1++) { + t1->p = HDmalloc((j + 1) * sizeof(unsigned int)); + if (t1->p == NULL) { + TestErrPrintf("Cannot allocate memory for VL data! i=%u, j=%u\n", i, j); + return; + } /* end if */ + t1->len = j + 1; + for (k = 0; k < (j + 1); k++) + ((unsigned int *)t1->p)[k] = i * 1000 + j * 100 + k * 10; + } /* end for */ + } /* end for */ + + /* Open file */ + fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Get dataspace for datasets */ + sid1 = H5Dget_space(dataset); + CHECK(sid1, FAIL, "H5Dget_space"); + + /* Open datatype of the dataset */ + tid2 = H5Dget_type(dataset); + CHECK(tid2, FAIL, "H5Dget_type"); + + /* Write dataset to disk */ + ret = H5Dwrite(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatype */ + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Open the file for data checking */ + fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Open a dataset */ + dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Get dataspace for datasets */ + sid1 = H5Dget_space(dataset); + CHECK(sid1, FAIL, "H5Dget_space"); + + /* Get datatype for dataset */ + tid2 = H5Dget_type(dataset); + CHECK(tid2, FAIL, "H5Dget_type"); + + /* Change to the custom memory allocation routines for reading VL data */ + xfer_pid = H5Pcreate(H5P_DATASET_XFER); + CHECK(xfer_pid, FAIL, "H5Pcreate"); + + ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom, + &mem_used); + CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); + + /* Make certain the correct amount of memory was used */ + ret = H5Dvlen_get_buf_size(dataset, tid2, sid1, &size); + CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); + + /* 18 hvl_t elements allocated = 3 + 4 + 5 + 6 elements for each array position */ + /* 52 unsigned int elements allocated = 6 + 10 + 15 + 21 elements */ + /*VERIFY(size, 18 * sizeof(hvl_t) + 52 * sizeof(unsigned int), "H5Dvlen_get_buf_size");*/ + + /* Read dataset from disk */ + ret = H5Dread(dataset, tid2, H5S_ALL, H5S_ALL, xfer_pid, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Make certain the correct amount of memory has been used */ + /* 18 hvl_t elements allocated = 3+4+5+6elements for each array position */ + /* 52 unsigned int elements allocated = 6+10+15+21 elements */ + /*VERIFY(mem_used,18*sizeof(hvl_t)+52*sizeof(unsigned int),"H5Dread");*/ + + /* Compare data read in */ + for (i = 0; i < SPACE1_DIM1; i++) { + if (wdata[i].len != rdata[i].len) { + TestErrPrintf("%d: VL data length don't match!, wdata[%d].len=%d, rdata[%d].len=%d\n", __LINE__, + (int)i, (int)wdata[i].len, (int)i, (int)rdata[i].len); + continue; + } /* end if */ + for (t1 = (hvl_t *)(wdata[i].p), t2 = (hvl_t *)(rdata[i].p), j = 0; j < rdata[i].len; + j++, t1++, t2++) { + if (t1->len != t2->len) { + TestErrPrintf("%d: VL data length don't match!, i=%d, j=%d, t1->len=%d, t2->len=%d\n", + __LINE__, (int)i, (int)j, (int)t1->len, (int)t2->len); + continue; + } /* end if */ + for (k = 0; k < t2->len; k++) { + if (((unsigned int *)t1->p)[k] != ((unsigned int *)t2->p)[k]) { + TestErrPrintf("VL data values don't match!, t1->p[%d]=%d, t2->p[%d]=%d\n", (int)k, + (int)((unsigned int *)t1->p)[k], (int)k, (int)((unsigned int *)t2->p)[k]); + continue; + } /* end if */ + } /* end for */ + } /* end for */ + } /* end for */ + + /* Reclaim all the (nested) VL data */ + ret = H5Treclaim(tid2, sid1, xfer_pid, rdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Make certain the VL memory has been freed */ + VERIFY(mem_used, 0, "H5Treclaim"); + + /* Reclaim the write VL data */ + ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatype */ + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close dataset transfer property list */ + ret = H5Pclose(xfer_pid); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + +} /* end rewrite_longer_vltypes_vlen_vlen_atomic() */ + +/**************************************************************** +** +** rewrite_shorter_vltypes_vlen_vlen_atomic(): Test basic VL datatype code. +** Tests VL datatype with VL datatypes of atomic datatypes. +** +****************************************************************/ +static void +rewrite_shorter_vltypes_vlen_vlen_atomic(void) +{ + hvl_t wdata[SPACE1_DIM1]; /* Information to write */ + hvl_t rdata[SPACE1_DIM1]; /* Information read in */ + hvl_t *t1, *t2; /* Temporary pointer to VL information */ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t sid1; /* Dataspace ID */ + hid_t tid2; /* Datatype IDs */ + hid_t xfer_pid; /* Dataset transfer property list ID */ + hsize_t size; /* Number of bytes which will be used */ + unsigned i, j, k; /* counting variables */ + size_t mem_used = 0; /* Memory used during allocation */ + unsigned increment = 1; + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + MESSAGE(5, ("Check memory leak for VL Datatypes with VL Atomic Datatype Component Functionality\n")); + + /* Allocate and initialize VL data to write */ + for (i = 0; i < SPACE1_DIM1; i++) { + wdata[i].p = HDmalloc((i + increment) * sizeof(hvl_t)); + if (wdata[i].p == NULL) { + TestErrPrintf("Cannot allocate memory for VL data! i=%u\n", i); + return; + } /* end if */ + wdata[i].len = i + increment; + for (t1 = (hvl_t *)(wdata[i].p), j = 0; j < (i + increment); j++, t1++) { + t1->p = HDmalloc((j + 1) * sizeof(unsigned int)); + if (t1->p == NULL) { + TestErrPrintf("Cannot allocate memory for VL data! i=%u, j=%u\n", i, j); + return; + } /* end if */ + t1->len = j + 1; + for (k = 0; k < (j + 1); k++) + ((unsigned int *)t1->p)[k] = i * 100000 + j * 1000 + k * 10; + } /* end for */ + } /* end for */ + + /* Open file */ + fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Open the dataset */ + dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Get dataspace for datasets */ + sid1 = H5Dget_space(dataset); + CHECK(sid1, FAIL, "H5Dget_space"); + + /* Open datatype of the dataset */ + tid2 = H5Dget_type(dataset); + CHECK(tid2, FAIL, "H5Dget_type"); + + /* Write dataset to disk */ + ret = H5Dwrite(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatype */ + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + + /* Open the file for data checking */ + fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid1, FAIL, "H5Fopen"); + + /* Open a dataset */ + dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); + CHECK(dataset, FAIL, "H5Dopen2"); + + /* Get dataspace for datasets */ + sid1 = H5Dget_space(dataset); + CHECK(sid1, FAIL, "H5Dget_space"); + + /* Get datatype for dataset */ + tid2 = H5Dget_type(dataset); + CHECK(tid2, FAIL, "H5Dget_type"); + + /* Change to the custom memory allocation routines for reading VL data */ + xfer_pid = H5Pcreate(H5P_DATASET_XFER); + CHECK(xfer_pid, FAIL, "H5Pcreate"); + + ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom, + &mem_used); + CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); + + /* Make certain the correct amount of memory was used */ + ret = H5Dvlen_get_buf_size(dataset, tid2, sid1, &size); + CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); + + /* 10 hvl_t elements allocated = 1 + 2 + 3 + 4 elements for each array position */ + /* 20 unsigned int elements allocated = 1 + 3 + 6 + 10 elements */ + VERIFY(size, + (hsize_t)(((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(hvl_t) + + vlen_size_func((unsigned long)SPACE1_DIM1) * sizeof(unsigned int)), + "H5Dvlen_get_buf_size"); + + /* Read dataset from disk */ + ret = H5Dread(dataset, tid2, H5S_ALL, H5S_ALL, xfer_pid, rdata); + CHECK(ret, FAIL, "H5Dread"); + + /* Make certain the correct amount of memory has been used */ + /* 10 hvl_t elements allocated = 1 + 2 + 3 + 4 elements for each array position */ + /* 20 unsigned int elements allocated = 1 + 3 + 6 + 10 elements */ + VERIFY(mem_used, + (size_t)(((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(hvl_t) + + vlen_size_func((unsigned long)SPACE1_DIM1) * sizeof(unsigned int)), + "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < SPACE1_DIM1; i++) { + if (wdata[i].len != rdata[i].len) { + TestErrPrintf("%d: VL data length don't match!, wdata[%d].len=%d, rdata[%d].len=%d\n", __LINE__, + (int)i, (int)wdata[i].len, (int)i, (int)rdata[i].len); + continue; + } /* end if */ + for (t1 = (hvl_t *)(wdata[i].p), t2 = (hvl_t *)(rdata[i].p), j = 0; j < rdata[i].len; + j++, t1++, t2++) { + if (t1->len != t2->len) { + TestErrPrintf("%d: VL data length don't match!, i=%d, j=%d, t1->len=%d, t2->len=%d\n", + __LINE__, (int)i, (int)j, (int)t1->len, (int)t2->len); + continue; + } /* end if */ + for (k = 0; k < t2->len; k++) { + if (((unsigned int *)t1->p)[k] != ((unsigned int *)t2->p)[k]) { + TestErrPrintf("VL data values don't match!, t1->p[%d]=%d, t2->p[%d]=%d\n", (int)k, + (int)((unsigned int *)t1->p)[k], (int)k, (int)((unsigned int *)t2->p)[k]); + continue; + } /* end if */ + } /* end for */ + } /* end for */ + } /* end for */ + + /* Reclaim all the (nested) VL data */ + ret = H5Treclaim(tid2, sid1, xfer_pid, rdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Make certain the VL memory has been freed */ + VERIFY(mem_used, 0, "H5Treclaim"); + + /* Reclaim the write VL data */ + ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, wdata); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close datatype */ + ret = H5Tclose(tid2); + CHECK(ret, FAIL, "H5Tclose"); + + /* Close disk dataspace */ + ret = H5Sclose(sid1); + CHECK(ret, FAIL, "H5Sclose"); + + /* Close dataset transfer property list */ + ret = H5Pclose(xfer_pid); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); + +} /* end rewrite_shorter_vltypes_vlen_vlen_atomic() */ + +/**************************************************************** +** +** test_vltypes_fill_value(): Test fill value for VL data. +** One tests data space isn't allocated; another tests data +** space is allocated. +** +****************************************************************/ +static void +test_vltypes_fill_value(void) +{ + typedef struct dtype1_struct { + unsigned int gui; + unsigned int pgui; + const char *str_id; + const char *str_name; + const char *str_desc; + const char *str_orig; + const char *str_stat; + unsigned int ver; + double val; + double ma; + double mi; + const char *str_form; + const char *str_unit; + } dtype1_struct; + + herr_t ret; + hid_t file_id; + hid_t dtype1_id = -1; + hid_t str_id = -1; + hid_t small_dspace_id; /* Dataspace ID for small datasets */ + hid_t large_dspace_id; /* Dataspace ID for large datasets */ + hid_t small_select_dspace_id; /* Dataspace ID for selection in small datasets */ + hid_t large_select_dspace_id; /* Dataspace ID for selection in large datasets */ + hid_t dset_dspace_id = -1; /* Dataspace ID for a particular dataset */ + hid_t dset_select_dspace_id = -1; /* Dataspace ID for selection in a particular dataset */ + hid_t scalar_dspace_id; /* Dataspace ID for scalar dataspace */ + hid_t single_dspace_id; /* Dataspace ID for single element selection */ + hsize_t single_offset[] = {2}; /* Offset of single element selection */ + hsize_t single_block[] = {1}; /* Block size of single element selection */ + hsize_t select_offset[] = {0}; /* Offset of non-contiguous element selection */ + hsize_t select_stride[] = {2}; /* Stride size of non-contiguous element selection */ + hsize_t small_select_count[] = {SPACE4_DIM_SMALL / + 2}; /* Count of small non-contiguous element selection */ + hsize_t large_select_count[] = {SPACE4_DIM_LARGE / + 2}; /* Count of large non-contiguous element selection */ + hsize_t select_block[] = {1}; /* Block size of non-contiguous element selection */ + hid_t dcpl_id, xfer_pid; + hid_t dset_id; + hsize_t small_dims[] = {SPACE4_DIM_SMALL}; + hsize_t large_dims[] = {SPACE4_DIM_LARGE}; + size_t dset_elmts = 0; /* Number of elements in a particular dataset */ + const dtype1_struct fill1 = {1, 2, "foobar", "", NULL, "\0", "dead", + 3, 4.0, 100.0, 1.0, "liquid", "meter"}; + const dtype1_struct wdata = {3, 4, "", NULL, "\0", "foo", "two", 6, 8.0, 200.0, 2.0, "solid", "yard"}; + dtype1_struct *rbuf = NULL; /* Buffer for reading data */ + size_t mem_used = 0; /* Memory used during allocation */ + H5D_layout_t layout; /* Dataset storage layout */ + char dset_name1[64], dset_name2[64]; /* Dataset names */ + unsigned i; + + /* Output message about test being performed */ + MESSAGE(5, ("Check fill value for VL data\n")); + + /* Create a string datatype */ + str_id = H5Tcopy(H5T_C_S1); + CHECK(str_id, FAIL, "H5Tcopy"); + ret = H5Tset_size(str_id, H5T_VARIABLE); + CHECK(ret, FAIL, "H5Tset_size"); + + /* Create a compound data type */ + dtype1_id = H5Tcreate(H5T_COMPOUND, sizeof(struct dtype1_struct)); + CHECK(dtype1_id, FAIL, "H5Tcreate"); + + ret = H5Tinsert(dtype1_id, "guid", HOFFSET(struct dtype1_struct, gui), H5T_NATIVE_UINT); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(dtype1_id, "pguid", HOFFSET(struct dtype1_struct, pgui), H5T_NATIVE_UINT); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(dtype1_id, "str_id", HOFFSET(dtype1_struct, str_id), str_id); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(dtype1_id, "str_name", HOFFSET(dtype1_struct, str_name), str_id); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(dtype1_id, "str_desc", HOFFSET(dtype1_struct, str_desc), str_id); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(dtype1_id, "str_orig", HOFFSET(dtype1_struct, str_orig), str_id); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(dtype1_id, "str_stat", HOFFSET(dtype1_struct, str_stat), str_id); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(dtype1_id, "ver", HOFFSET(struct dtype1_struct, ver), H5T_NATIVE_UINT); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(dtype1_id, "val", HOFFSET(struct dtype1_struct, val), H5T_NATIVE_DOUBLE); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(dtype1_id, "ma", HOFFSET(struct dtype1_struct, ma), H5T_NATIVE_DOUBLE); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(dtype1_id, "mi", HOFFSET(struct dtype1_struct, mi), H5T_NATIVE_DOUBLE); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(dtype1_id, "str_form", HOFFSET(dtype1_struct, str_form), str_id); + CHECK(ret, FAIL, "H5Tinsert"); + + ret = H5Tinsert(dtype1_id, "str_unit", HOFFSET(dtype1_struct, str_unit), str_id); + CHECK(ret, FAIL, "H5Tinsert"); + + /* Close string datatype */ + ret = H5Tclose(str_id); + CHECK(ret, FAIL, "H5Tclose"); + + /* Allocate space for the buffer to read data */ + rbuf = (dtype1_struct *)HDmalloc(SPACE4_DIM_LARGE * sizeof(dtype1_struct)); + CHECK_PTR(rbuf, "HDmalloc"); + + /* Create the small & large dataspaces to use */ + small_dspace_id = H5Screate_simple(SPACE4_RANK, small_dims, NULL); + CHECK(small_dspace_id, FAIL, "H5Screate_simple"); + + large_dspace_id = H5Screate_simple(SPACE4_RANK, large_dims, NULL); + CHECK(large_dspace_id, FAIL, "H5Screate_simple"); + + /* Create small & large dataspaces w/non-contiguous selections */ + small_select_dspace_id = H5Scopy(small_dspace_id); + CHECK(small_select_dspace_id, FAIL, "H5Scopy"); + + ret = H5Sselect_hyperslab(small_select_dspace_id, H5S_SELECT_SET, select_offset, select_stride, + small_select_count, select_block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + large_select_dspace_id = H5Scopy(large_dspace_id); + CHECK(large_select_dspace_id, FAIL, "H5Scopy"); + + ret = H5Sselect_hyperslab(large_select_dspace_id, H5S_SELECT_SET, select_offset, select_stride, + large_select_count, select_block); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Create a scalar dataspace */ + scalar_dspace_id = H5Screate(H5S_SCALAR); + CHECK(scalar_dspace_id, FAIL, "H5Screate"); + + /* Create dataset create property list and set the fill value */ + dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl_id, FAIL, "H5Pcreate"); + + ret = H5Pset_fill_value(dcpl_id, dtype1_id, &fill1); + CHECK(ret, FAIL, "H5Pset_fill_value"); + + /* Create the file */ + file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(file_id, FAIL, "H5Fcreate"); + + /* Create datasets with different storage layouts */ + for (layout = H5D_COMPACT; layout <= H5D_CHUNKED; layout++) { + unsigned compress_loop; /* # of times to run loop, for testing compressed chunked dataset */ + unsigned test_loop; /* Loop over datasets */ + +#ifdef H5_HAVE_FILTER_DEFLATE + if (layout == H5D_CHUNKED) + compress_loop = 2; + else +#endif /* H5_HAVE_FILTER_DEFLATE */ + compress_loop = 1; + + /* Loop over dataset operations */ + for (test_loop = 0; test_loop < compress_loop; test_loop++) { + hid_t tmp_dcpl_id; /* Temporary copy of the dataset creation property list */ + + /* Make a copy of the dataset creation property list */ + tmp_dcpl_id = H5Pcopy(dcpl_id); + CHECK(tmp_dcpl_id, FAIL, "H5Pcopy"); + + /* Layout specific actions */ + switch (layout) { + case H5D_COMPACT: + HDstrcpy(dset_name1, "dataset1-compact"); + HDstrcpy(dset_name2, "dataset2-compact"); + dset_dspace_id = small_dspace_id; + ret = H5Pset_layout(tmp_dcpl_id, H5D_COMPACT); + CHECK(ret, FAIL, "H5Pset_layout"); + break; + + case H5D_CONTIGUOUS: + HDstrcpy(dset_name1, "dataset1-contig"); + HDstrcpy(dset_name2, "dataset2-contig"); + dset_dspace_id = large_dspace_id; + break; + + case H5D_CHUNKED: { + hsize_t chunk_dims[1] = {SPACE4_DIM_LARGE / 4}; + + dset_dspace_id = large_dspace_id; + ret = H5Pset_chunk(tmp_dcpl_id, 1, chunk_dims); + CHECK(ret, FAIL, "H5Pset_chunk"); +#ifdef H5_HAVE_FILTER_DEFLATE + if (test_loop == 1) { + HDstrcpy(dset_name1, "dataset1-chunked-compressed"); + HDstrcpy(dset_name2, "dataset2-chunked-compressed"); + ret = H5Pset_deflate(tmp_dcpl_id, 3); + CHECK(ret, FAIL, "H5Pset_deflate"); + } /* end if */ + else { +#endif /* H5_HAVE_FILTER_DEFLATE */ + HDstrcpy(dset_name1, "dataset1-chunked"); + HDstrcpy(dset_name2, "dataset2-chunked"); +#ifdef H5_HAVE_FILTER_DEFLATE + } /* end else */ +#endif /* H5_HAVE_FILTER_DEFLATE */ + } break; + + case H5D_VIRTUAL: + HDassert(0 && "Invalid layout type!"); + break; + + case H5D_LAYOUT_ERROR: + case H5D_NLAYOUTS: + default: + HDassert(0 && "Unknown layout type!"); + break; + } /* end switch */ + + /* Create first data set with default setting - no space is allocated */ + dset_id = H5Dcreate2(file_id, dset_name1, dtype1_id, dset_dspace_id, H5P_DEFAULT, tmp_dcpl_id, + H5P_DEFAULT); + CHECK(dset_id, FAIL, "H5Dcreate2"); + + ret = H5Dclose(dset_id); + CHECK(ret, FAIL, "H5Dclose"); + + /* Create a second data set with space allocated and fill value written */ + ret = H5Pset_fill_time(tmp_dcpl_id, H5D_FILL_TIME_IFSET); + CHECK(ret, FAIL, "H5Pset_fill_time"); + + ret = H5Pset_alloc_time(tmp_dcpl_id, H5D_ALLOC_TIME_EARLY); + CHECK(ret, FAIL, "H5Pset_alloc_time"); + + dset_id = H5Dcreate2(file_id, dset_name2, dtype1_id, dset_dspace_id, H5P_DEFAULT, tmp_dcpl_id, + H5P_DEFAULT); + CHECK(dset_id, FAIL, "H5Dcreate2"); + + ret = H5Dclose(dset_id); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close temporary DCPL */ + ret = H5Pclose(tmp_dcpl_id); + CHECK(ret, FAIL, "H5Pclose"); + } /* end for */ + } /* end for */ + + ret = H5Fclose(file_id); + CHECK(ret, FAIL, "H5Fclose"); + + ret = H5Pclose(dcpl_id); + CHECK(ret, FAIL, "H5Pclose"); + + /* Change to the custom memory allocation routines for reading VL data */ + xfer_pid = H5Pcreate(H5P_DATASET_XFER); + CHECK(xfer_pid, FAIL, "H5Pcreate"); + + ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom, + &mem_used); + CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); + + /* Open the file to check data set value */ + file_id = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(file_id, FAIL, "H5Fopen"); + + /* Read empty datasets with different storage layouts */ + for (layout = H5D_COMPACT; layout <= H5D_CHUNKED; layout++) { + unsigned compress_loop; /* # of times to run loop, for testing compressed chunked dataset */ + unsigned test_loop; /* Loop over datasets */ + +#ifdef H5_HAVE_FILTER_DEFLATE + if (layout == H5D_CHUNKED) + compress_loop = 2; + else +#endif /* H5_HAVE_FILTER_DEFLATE */ + compress_loop = 1; + + /* Loop over dataset operations */ + for (test_loop = 0; test_loop < compress_loop; test_loop++) { + + /* Layout specific actions */ + switch (layout) { + case H5D_COMPACT: + HDstrcpy(dset_name1, "dataset1-compact"); + HDstrcpy(dset_name2, "dataset2-compact"); + dset_dspace_id = small_dspace_id; + dset_select_dspace_id = small_select_dspace_id; + dset_elmts = SPACE4_DIM_SMALL; + break; + + case H5D_CONTIGUOUS: + HDstrcpy(dset_name1, "dataset1-contig"); + HDstrcpy(dset_name2, "dataset2-contig"); + dset_dspace_id = large_dspace_id; + dset_select_dspace_id = large_select_dspace_id; + dset_elmts = SPACE4_DIM_LARGE; + break; + + case H5D_CHUNKED: +#ifdef H5_HAVE_FILTER_DEFLATE + if (test_loop == 1) { + HDstrcpy(dset_name1, "dataset1-chunked-compressed"); + HDstrcpy(dset_name2, "dataset2-chunked-compressed"); + } /* end if */ + else { +#endif /* H5_HAVE_FILTER_DEFLATE */ + HDstrcpy(dset_name1, "dataset1-chunked"); + HDstrcpy(dset_name2, "dataset2-chunked"); +#ifdef H5_HAVE_FILTER_DEFLATE + } /* end else */ +#endif /* H5_HAVE_FILTER_DEFLATE */ + dset_dspace_id = large_dspace_id; + dset_select_dspace_id = large_select_dspace_id; + dset_elmts = SPACE4_DIM_LARGE; + break; + + case H5D_VIRTUAL: + HDassert(0 && "Invalid layout type!"); + break; + + case H5D_LAYOUT_ERROR: + case H5D_NLAYOUTS: + default: + HDassert(0 && "Unknown layout type!"); + break; + } /* end switch */ + + /* Open first data set */ + dset_id = H5Dopen2(file_id, dset_name1, H5P_DEFAULT); + CHECK(dset_id, FAIL, "H5Dopen2"); + + /* Read in the entire 'empty' dataset of fill value */ + ret = H5Dread(dset_id, dtype1_id, dset_dspace_id, dset_dspace_id, xfer_pid, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < dset_elmts; i++) { + if (HDstrcmp(rbuf[i].str_id, "foobar") != 0 || HDstrcmp(rbuf[i].str_name, "") != 0 || + rbuf[i].str_desc || HDstrcmp(rbuf[i].str_orig, "\0") != 0 || + HDstrcmp(rbuf[i].str_stat, "dead") != 0 || HDstrcmp(rbuf[i].str_form, "liquid") != 0 || + HDstrcmp(rbuf[i].str_unit, "meter") != 0) { + TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i); + continue; + } /* end if */ + } /* end for */ + + /* Release the space */ + ret = H5Treclaim(dtype1_id, dset_dspace_id, xfer_pid, rbuf); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Clear the read buffer */ + HDmemset(rbuf, 0, dset_elmts * sizeof(dtype1_struct)); + + /* Read in non-contiguous selection from 'empty' dataset of fill value */ + ret = H5Dread(dset_id, dtype1_id, dset_select_dspace_id, dset_select_dspace_id, xfer_pid, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < dset_elmts; i++) { + if ((i % 2) == select_offset[0]) { + if (HDstrcmp(rbuf[i].str_id, "foobar") != 0 || HDstrcmp(rbuf[i].str_name, "") != 0 || + rbuf[i].str_desc || HDstrcmp(rbuf[i].str_orig, "\0") != 0 || + HDstrcmp(rbuf[i].str_stat, "dead") != 0 || + HDstrcmp(rbuf[i].str_form, "liquid") != 0 || + HDstrcmp(rbuf[i].str_unit, "meter") != 0) { + TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i); + continue; + } /* end if */ + } /* end if */ + else { + if (rbuf[i].str_id || rbuf[i].str_name || rbuf[i].str_desc || rbuf[i].str_orig || + rbuf[i].str_stat || rbuf[i].str_form || rbuf[i].str_unit) { + TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i); + continue; + } /* end if */ + } /* end else */ + } /* end for */ + + /* Release the space */ + ret = H5Treclaim(dtype1_id, dset_dspace_id, xfer_pid, rbuf); + CHECK(ret, FAIL, "H5Treclaim"); + + ret = H5Dclose(dset_id); + CHECK(ret, FAIL, "H5Dclose"); + + /* Open the second data set to check the value of data */ + dset_id = H5Dopen2(file_id, dset_name2, H5P_DEFAULT); + CHECK(dset_id, FAIL, "H5Dopen2"); + + /* Read in the entire 'empty' dataset of fill value */ + ret = H5Dread(dset_id, dtype1_id, dset_dspace_id, dset_dspace_id, xfer_pid, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < dset_elmts; i++) { + if (HDstrcmp(rbuf[i].str_id, "foobar") != 0 || HDstrcmp(rbuf[i].str_name, "") != 0 || + rbuf[i].str_desc || HDstrcmp(rbuf[i].str_orig, "\0") != 0 || + HDstrcmp(rbuf[i].str_stat, "dead") != 0 || HDstrcmp(rbuf[i].str_form, "liquid") != 0 || + HDstrcmp(rbuf[i].str_unit, "meter") != 0) { + TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n", __LINE__, (int)i); + continue; + } /* end if */ + } /* end for */ + + /* Release the space */ + ret = H5Treclaim(dtype1_id, dset_dspace_id, xfer_pid, rbuf); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Clear the read buffer */ + HDmemset(rbuf, 0, dset_elmts * sizeof(dtype1_struct)); + + /* Read in non-contiguous selection from 'empty' dataset of fill value */ + ret = H5Dread(dset_id, dtype1_id, dset_select_dspace_id, dset_select_dspace_id, xfer_pid, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < dset_elmts; i++) { + if ((i % 2) == select_offset[0]) { + if (HDstrcmp(rbuf[i].str_id, "foobar") != 0 || HDstrcmp(rbuf[i].str_name, "") != 0 || + rbuf[i].str_desc || HDstrcmp(rbuf[i].str_orig, "\0") != 0 || + HDstrcmp(rbuf[i].str_stat, "dead") != 0 || + HDstrcmp(rbuf[i].str_form, "liquid") != 0 || + HDstrcmp(rbuf[i].str_unit, "meter") != 0) { + TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i); + continue; + } /* end if */ + } /* end if */ + else { + if (rbuf[i].str_id || rbuf[i].str_name || rbuf[i].str_desc || rbuf[i].str_orig || + rbuf[i].str_stat || rbuf[i].str_form || rbuf[i].str_unit) { + TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i); + continue; + } /* end if */ + } /* end else */ + } /* end for */ + + /* Release the space */ + ret = H5Treclaim(dtype1_id, dset_select_dspace_id, xfer_pid, rbuf); + CHECK(ret, FAIL, "H5Treclaim"); + + ret = H5Dclose(dset_id); + CHECK(ret, FAIL, "H5Dclose"); + } /* end for */ + } /* end for */ + + ret = H5Fclose(file_id); + CHECK(ret, FAIL, "H5Fclose"); + + /* Open the file to check data set value */ + file_id = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(file_id, FAIL, "H5Fopen"); + + /* Write one element & fill values to datasets with different storage layouts */ + for (layout = H5D_COMPACT; layout <= H5D_CHUNKED; layout++) { + unsigned compress_loop; /* # of times to run loop, for testing compressed chunked dataset */ + unsigned test_loop; /* Loop over datasets */ + +#ifdef H5_HAVE_FILTER_DEFLATE + if (layout == H5D_CHUNKED) + compress_loop = 2; + else +#endif /* H5_HAVE_FILTER_DEFLATE */ + compress_loop = 1; + + /* Loop over dataset operations */ + for (test_loop = 0; test_loop < compress_loop; test_loop++) { + + /* Layout specific actions */ + switch (layout) { + case H5D_COMPACT: + HDstrcpy(dset_name1, "dataset1-compact"); + HDstrcpy(dset_name2, "dataset2-compact"); + dset_dspace_id = small_dspace_id; + dset_select_dspace_id = small_select_dspace_id; + dset_elmts = SPACE4_DIM_SMALL; + break; + + case H5D_CONTIGUOUS: + HDstrcpy(dset_name1, "dataset1-contig"); + HDstrcpy(dset_name2, "dataset2-contig"); + dset_dspace_id = large_dspace_id; + dset_select_dspace_id = large_select_dspace_id; + dset_elmts = SPACE4_DIM_LARGE; + break; + + case H5D_CHUNKED: +#ifdef H5_HAVE_FILTER_DEFLATE + if (test_loop == 1) { + HDstrcpy(dset_name1, "dataset1-chunked-compressed"); + HDstrcpy(dset_name2, "dataset2-chunked-compressed"); + } /* end if */ + else { +#endif /* H5_HAVE_FILTER_DEFLATE */ + HDstrcpy(dset_name1, "dataset1-chunked"); + HDstrcpy(dset_name2, "dataset2-chunked"); +#ifdef H5_HAVE_FILTER_DEFLATE + } /* end else */ +#endif /* H5_HAVE_FILTER_DEFLATE */ + dset_dspace_id = large_dspace_id; + dset_select_dspace_id = large_select_dspace_id; + dset_elmts = SPACE4_DIM_LARGE; + break; + + case H5D_VIRTUAL: + HDassert(0 && "Invalid layout type!"); + break; + + case H5D_LAYOUT_ERROR: + case H5D_NLAYOUTS: + default: + HDassert(0 && "Unknown layout type!"); + break; + } /* end switch */ + + /* Copy the dataset's dataspace */ + single_dspace_id = H5Scopy(dset_dspace_id); + CHECK(single_dspace_id, FAIL, "H5Scopy"); + + /* Set a single element in the dataspace */ + ret = H5Sselect_hyperslab(single_dspace_id, H5S_SELECT_SET, single_offset, NULL, single_block, + NULL); + CHECK(ret, FAIL, "H5Sselect_hyperslab"); + + /* Open first data set */ + dset_id = H5Dopen2(file_id, dset_name1, H5P_DEFAULT); + CHECK(dset_id, FAIL, "H5Dopen2"); + + /* Write one element in the dataset */ + ret = H5Dwrite(dset_id, dtype1_id, scalar_dspace_id, single_dspace_id, xfer_pid, &wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + ret = H5Dread(dset_id, dtype1_id, dset_dspace_id, dset_dspace_id, xfer_pid, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < dset_elmts; i++) { + if (i == single_offset[0]) { + if (HDstrcmp(rbuf[i].str_id, wdata.str_id) != 0 || rbuf[i].str_name || + HDstrcmp(rbuf[i].str_desc, wdata.str_desc) != 0 || + HDstrcmp(rbuf[i].str_orig, wdata.str_orig) != 0 || + HDstrcmp(rbuf[i].str_stat, wdata.str_stat) != 0 || + HDstrcmp(rbuf[i].str_form, wdata.str_form) != 0 || + HDstrcmp(rbuf[i].str_unit, wdata.str_unit) != 0) { + TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n", __LINE__, (int)i); + continue; + } /* end if */ + } /* end if */ + else { + if (HDstrcmp(rbuf[i].str_id, "foobar") != 0 || HDstrcmp(rbuf[i].str_name, "") != 0 || + rbuf[i].str_desc || HDstrcmp(rbuf[i].str_orig, "\0") != 0 || + HDstrcmp(rbuf[i].str_stat, "dead") != 0 || + HDstrcmp(rbuf[i].str_form, "liquid") != 0 || + HDstrcmp(rbuf[i].str_unit, "meter") != 0) { + TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n", __LINE__, (int)i); + continue; + } /* end if */ + } /* end if */ + } /* end for */ + + /* Release the space */ + ret = H5Treclaim(dtype1_id, dset_dspace_id, xfer_pid, rbuf); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Clear the read buffer */ + HDmemset(rbuf, 0, dset_elmts * sizeof(dtype1_struct)); + + /* Read in non-contiguous selection from dataset */ + ret = H5Dread(dset_id, dtype1_id, dset_select_dspace_id, dset_select_dspace_id, xfer_pid, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < dset_elmts; i++) { + if (i == single_offset[0]) { + if (HDstrcmp(rbuf[i].str_id, wdata.str_id) != 0 || rbuf[i].str_name || + HDstrcmp(rbuf[i].str_desc, wdata.str_desc) != 0 || + HDstrcmp(rbuf[i].str_orig, wdata.str_orig) != 0 || + HDstrcmp(rbuf[i].str_stat, wdata.str_stat) != 0 || + HDstrcmp(rbuf[i].str_form, wdata.str_form) != 0 || + HDstrcmp(rbuf[i].str_unit, wdata.str_unit) != 0) { + TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n", __LINE__, (int)i); + continue; + } /* end if */ + } /* end if */ + else { + if ((i % 2) == select_offset[0]) { + if (HDstrcmp(rbuf[i].str_id, "foobar") != 0 || HDstrcmp(rbuf[i].str_name, "") != 0 || + rbuf[i].str_desc || HDstrcmp(rbuf[i].str_orig, "\0") != 0 || + HDstrcmp(rbuf[i].str_stat, "dead") != 0 || + HDstrcmp(rbuf[i].str_form, "liquid") != 0 || + HDstrcmp(rbuf[i].str_unit, "meter") != 0) { + TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i); + continue; + } /* end if */ + } /* end if */ + else { + if (rbuf[i].str_id || rbuf[i].str_name || rbuf[i].str_desc || rbuf[i].str_orig || + rbuf[i].str_stat || rbuf[i].str_form || rbuf[i].str_unit) { + TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i); + continue; + } /* end if */ + } /* end else */ + } /* end else */ + } /* end for */ + + /* Release the space */ + ret = H5Treclaim(dtype1_id, dset_select_dspace_id, xfer_pid, rbuf); + CHECK(ret, FAIL, "H5Treclaim"); + + ret = H5Dclose(dset_id); + CHECK(ret, FAIL, "H5Dclose"); + + /* Open the second data set to check the value of data */ + dset_id = H5Dopen2(file_id, dset_name2, H5P_DEFAULT); + CHECK(dset_id, FAIL, "H5Dopen2"); + + /* Write one element in the dataset */ + ret = H5Dwrite(dset_id, dtype1_id, scalar_dspace_id, single_dspace_id, xfer_pid, &wdata); + CHECK(ret, FAIL, "H5Dwrite"); + + ret = H5Dread(dset_id, dtype1_id, dset_dspace_id, dset_dspace_id, xfer_pid, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < dset_elmts; i++) { + if (i == single_offset[0]) { + if (HDstrcmp(rbuf[i].str_id, wdata.str_id) != 0 || rbuf[i].str_name || + HDstrcmp(rbuf[i].str_desc, wdata.str_desc) != 0 || + HDstrcmp(rbuf[i].str_orig, wdata.str_orig) != 0 || + HDstrcmp(rbuf[i].str_stat, wdata.str_stat) != 0 || + HDstrcmp(rbuf[i].str_form, wdata.str_form) != 0 || + HDstrcmp(rbuf[i].str_unit, wdata.str_unit) != 0) { + TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n", __LINE__, (int)i); + continue; + } /* end if */ + } /* end if */ + else { + if (HDstrcmp(rbuf[i].str_id, "foobar") != 0 || HDstrcmp(rbuf[i].str_name, "") != 0 || + rbuf[i].str_desc || HDstrcmp(rbuf[i].str_orig, "\0") != 0 || + HDstrcmp(rbuf[i].str_stat, "dead") != 0 || + HDstrcmp(rbuf[i].str_form, "liquid") != 0 || + HDstrcmp(rbuf[i].str_unit, "meter") != 0) { + TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n", __LINE__, (int)i); + continue; + } /* end if */ + } /* end if */ + } /* end for */ + + /* Release the space */ + ret = H5Treclaim(dtype1_id, dset_dspace_id, xfer_pid, rbuf); + CHECK(ret, FAIL, "H5Treclaim"); + + /* Clear the read buffer */ + HDmemset(rbuf, 0, dset_elmts * sizeof(dtype1_struct)); + + /* Read in non-contiguous selection from dataset */ + ret = H5Dread(dset_id, dtype1_id, dset_select_dspace_id, dset_select_dspace_id, xfer_pid, rbuf); + CHECK(ret, FAIL, "H5Dread"); + + /* Compare data read in */ + for (i = 0; i < dset_elmts; i++) { + if (i == single_offset[0]) { + if (HDstrcmp(rbuf[i].str_id, wdata.str_id) != 0 || rbuf[i].str_name || + HDstrcmp(rbuf[i].str_desc, wdata.str_desc) != 0 || + HDstrcmp(rbuf[i].str_orig, wdata.str_orig) != 0 || + HDstrcmp(rbuf[i].str_stat, wdata.str_stat) != 0 || + HDstrcmp(rbuf[i].str_form, wdata.str_form) != 0 || + HDstrcmp(rbuf[i].str_unit, wdata.str_unit) != 0) { + TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n", __LINE__, (int)i); + continue; + } /* end if */ + } /* end if */ + else { + if ((i % 2) == select_offset[0]) { + if (HDstrcmp(rbuf[i].str_id, "foobar") != 0 || HDstrcmp(rbuf[i].str_name, "") != 0 || + rbuf[i].str_desc || HDstrcmp(rbuf[i].str_orig, "\0") != 0 || + HDstrcmp(rbuf[i].str_stat, "dead") != 0 || + HDstrcmp(rbuf[i].str_form, "liquid") != 0 || + HDstrcmp(rbuf[i].str_unit, "meter") != 0) { + TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i); + continue; + } /* end if */ + } /* end if */ + else { + if (rbuf[i].str_id || rbuf[i].str_name || rbuf[i].str_desc || rbuf[i].str_orig || + rbuf[i].str_stat || rbuf[i].str_form || rbuf[i].str_unit) { + TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i); + continue; + } /* end if */ + } /* end else */ + } /* end else */ + } /* end for */ + + /* Release the space */ + ret = H5Treclaim(dtype1_id, dset_select_dspace_id, xfer_pid, rbuf); + CHECK(ret, FAIL, "H5Treclaim"); + + ret = H5Dclose(dset_id); + CHECK(ret, FAIL, "H5Dclose"); + + /* Close the dataspace for the writes */ + ret = H5Sclose(single_dspace_id); + CHECK(ret, FAIL, "H5Sclose"); + } /* end for */ + } /* end for */ + + ret = H5Fclose(file_id); + CHECK(ret, FAIL, "H5Fclose"); + + /* Clean up rest of IDs */ + ret = H5Pclose(xfer_pid); + CHECK(ret, FAIL, "H5Pclose"); + + ret = H5Sclose(small_dspace_id); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(large_dspace_id); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(small_select_dspace_id); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(large_select_dspace_id); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Sclose(scalar_dspace_id); + CHECK(ret, FAIL, "H5Sclose"); + + ret = H5Tclose(dtype1_id); + CHECK(ret, FAIL, "H5Tclose"); + + /* Release buffer */ + HDfree(rbuf); +} /* end test_vltypes_fill_value() */ + +/**************************************************************** +** +** test_vltypes(): Main VL datatype testing routine. +** +****************************************************************/ +void +test_vltypes(void) +{ + /* Output message about test being performed */ + MESSAGE(5, ("Testing Variable-Length Datatypes\n")); + + /* These next tests use the same file */ + test_vltypes_dataset_create(); /* Check dataset of VL when fill value + * won't be rewritten to it.*/ + test_vltypes_funcs(); /* Test functions with VL types */ + test_vltypes_vlen_atomic(); /* Test VL atomic datatypes */ + rewrite_vltypes_vlen_atomic(); /* Check VL memory leak */ + test_vltypes_vlen_compound(); /* Test VL compound datatypes */ + rewrite_vltypes_vlen_compound(); /* Check VL memory leak */ + test_vltypes_compound_vlen_atomic(); /* Test compound datatypes with VL atomic components */ + rewrite_vltypes_compound_vlen_atomic(); /* Check VL memory leak */ + test_vltypes_vlen_vlen_atomic(); /* Test VL datatype with VL atomic components */ + rewrite_longer_vltypes_vlen_vlen_atomic(); /*overwrite with VL data of longer sequence*/ + rewrite_shorter_vltypes_vlen_vlen_atomic(); /*overwrite with VL data of shorted sequence*/ + test_vltypes_compound_vlen_vlen(); /* Test compound datatypes with VL atomic components */ + test_vltypes_compound_vlstr(); /* Test data rewritten of nested VL data */ + test_vltypes_fill_value(); /* Test fill value for VL data */ +} /* test_vltypes() */ + +/*------------------------------------------------------------------------- + * Function: cleanup_vltypes + * + * Purpose: Cleanup temporary test files + * + * Return: none + * + * Programmer: Quincey Koziol + * June 8, 1999 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +void +cleanup_vltypes(void) +{ + H5Fdelete(FILENAME, H5P_DEFAULT); +} diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index c3365b7b081..d52beb01d46 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -395,6 +395,24 @@ if (HDF5_BUILD_UTILS) set (H5_TESTS ${H5_TESTS} mirror_vfd) endif () +set (HDF5_API_TESTS + attribute + dataset + datatype + file + group + link + misc + object +) + +if (HDF5_TEST_API_ENABLE_ASYNC) + set (HDF5_API_TESTS + ${HDF5_API_TESTS} + async + ) +endif () + macro (ADD_H5_EXE file) add_executable (${file} ${HDF5_TEST_SOURCE_DIR}/${file}.c) target_include_directories (${file} PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};${HDF5_TEST_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") @@ -879,6 +897,8 @@ if (HDF5_ENABLE_FORMATTERS) clang_format (HDF5_TEST_use_disable_mdc_flushes_FORMAT use_disable_mdc_flushes) endif () +add_subdirectory (API) + if (HDF5_TEST_SERIAL) include (CMakeTests.cmake) endif () diff --git a/test/h5test.c b/test/h5test.c index c667ffdfcb5..50131351e5a 100644 --- a/test/h5test.c +++ b/test/h5test.c @@ -116,6 +116,13 @@ const char *LIBVER_NAMES[] = {"earliest", /* H5F_LIBVER_EARLIEST = 0 */ /* Previous error reporting function */ static H5E_auto2_t err_func = NULL; +/* Global variables for testing */ +size_t n_tests_run_g = 0; +size_t n_tests_passed_g = 0; +size_t n_tests_failed_g = 0; +size_t n_tests_skipped_g = 0; +uint64_t vol_cap_flags_g = H5VL_CAP_FLAG_NONE; + static herr_t h5_errors(hid_t estack, void *client_data); static char *h5_fixname_real(const char *base_name, hid_t fapl, const char *_suffix, char *fullname, size_t size, hbool_t nest_printf, hbool_t subst_for_superblock); diff --git a/test/h5test.h b/test/h5test.h index ea7ab4d75df..b2c2cda7255 100644 --- a/test/h5test.h +++ b/test/h5test.h @@ -106,21 +106,25 @@ H5TEST_DLLVAR MPI_Info h5_io_info_g; /* MPI INFO object for IO */ do { \ HDprintf("Testing %-62s", WHAT); \ HDfflush(stdout); \ + n_tests_run_g++; \ } while (0) #define TESTING_2(WHAT) \ do { \ HDprintf(" Testing %-60s", WHAT); \ HDfflush(stdout); \ + n_tests_run_g++; \ } while (0) #define PASSED() \ do { \ HDputs(" PASSED"); \ HDfflush(stdout); \ + n_tests_passed_g++; \ } while (0) #define H5_FAILED() \ do { \ HDputs("*FAILED*"); \ HDfflush(stdout); \ + n_tests_failed_g++; \ } while (0) #define H5_WARNING() \ do { \ @@ -131,6 +135,7 @@ H5TEST_DLLVAR MPI_Info h5_io_info_g; /* MPI INFO object for IO */ do { \ HDputs(" -SKIP-"); \ HDfflush(stdout); \ + n_tests_skipped_g++; \ } while (0) #define PUTS_ERROR(s) \ do { \ @@ -164,6 +169,66 @@ H5TEST_DLLVAR MPI_Info h5_io_info_g; /* MPI INFO object for IO */ goto error; \ } while (0) +/* + * Testing macros used for multi-part tests. + */ +#define TESTING_MULTIPART(WHAT) \ + do { \ + HDprintf("Testing %-62s", WHAT); \ + HDputs(""); \ + HDfflush(stdout); \ + } while (0) + +/* + * Begin and end an entire section of multi-part tests. By placing all the + * parts of a test between these macros, skipping to the 'error' cleanup + * section of a test is deferred until all parts have finished. + */ +#define BEGIN_MULTIPART \ + { \ + int part_nerrors = 0; + +#define END_MULTIPART \ + if (part_nerrors > 0) \ + goto error; \ + } + +/* + * Begin, end and handle errors within a single part of a multi-part test. + * The PART_END macro creates a goto label based on the given "part name". + * When a failure occurs in the current part, the PART_ERROR macro uses + * this label to skip to the next part of the multi-part test. The PART_ERROR + * macro also increments the error count so that the END_MULTIPART macro + * knows to skip to the test's 'error' label once all test parts have finished. + */ +#define PART_BEGIN(part_name) { +#define PART_END(part_name) \ + } \ + part_##part_name##_end: +#define PART_ERROR(part_name) \ + do { \ + n_tests_failed_g++; \ + part_nerrors++; \ + goto part_##part_name##_end; \ + } while (0) +#define PART_TEST_ERROR(part_name) \ + do { \ + H5_FAILED(); \ + AT(); \ + part_nerrors++; \ + goto part_##part_name##_end; \ + } while (0) + +/* + * Simply skips to the goto label for this test part and moves on to the + * next test part. Useful for when a test part needs to be skipped for + * some reason or is currently unimplemented and empty. + */ +#define PART_EMPTY(part_name) \ + do { \ + goto part_##part_name##_end; \ + } while (0) + /* Number of seconds to wait before killing a test (requires alarm(2)) */ #define H5_ALARM_SEC 1200 /* default is 20 minutes */ @@ -285,7 +350,12 @@ H5TEST_DLL char *getenv_all(MPI_Comm comm, int root, const char *name); #endif /* Extern global variables */ -H5TEST_DLLVAR int TestVerbosity; +H5TEST_DLLVAR int TestVerbosity; +H5TEST_DLLVAR size_t n_tests_run_g; +H5TEST_DLLVAR size_t n_tests_passed_g; +H5TEST_DLLVAR size_t n_tests_failed_g; +H5TEST_DLLVAR size_t n_tests_skipped_g; +H5TEST_DLLVAR uint64_t vol_cap_flags_g; H5TEST_DLL void h5_send_message(const char *file, const char *arg1, const char *arg2); H5TEST_DLL herr_t h5_wait_message(const char *file); diff --git a/test/vol.c b/test/vol.c index 29bbb0654b8..6bcae6bdfd3 100644 --- a/test/vol.c +++ b/test/vol.c @@ -2076,11 +2076,12 @@ test_async_vol_props(void) hid_t fapl_id = H5I_INVALID_HID; hid_t vol_id = H5I_INVALID_HID; H5VL_pass_through_info_t passthru_info; - uint64_t cap_flags = H5VL_CAP_FLAG_NONE; char *conn_env_str = NULL; TESTING("Async VOL props"); + vol_cap_flags_g = H5VL_CAP_FLAG_NONE; + /* Retrieve the file access property for testing */ fapl_id = h5_fileaccess(); @@ -2104,11 +2105,11 @@ test_async_vol_props(void) /* Test query w/default VOL, which should indicate no async, since native connector * doesn't support async. */ - if (H5Pget_vol_cap_flags(fapl_id, &cap_flags) < 0) + if (H5Pget_vol_cap_flags(fapl_id, &vol_cap_flags_g) < 0) FAIL_STACK_ERROR; - if ((cap_flags & H5VL_CAP_FLAG_ASYNC) > 0) + if ((vol_cap_flags_g & H5VL_CAP_FLAG_ASYNC) > 0) TEST_ERROR; - if ((cap_flags & H5VL_CAP_FLAG_NATIVE_FILES) == 0) + if ((vol_cap_flags_g & H5VL_CAP_FLAG_NATIVE_FILES) == 0) TEST_ERROR; /* Close FAPL */ @@ -2129,12 +2130,12 @@ test_async_vol_props(void) fapl_id = h5_fileaccess(); /* Test query w/fake async VOL, which should succeed */ - cap_flags = H5VL_CAP_FLAG_NONE; - if (H5Pget_vol_cap_flags(fapl_id, &cap_flags) < 0) + vol_cap_flags_g = H5VL_CAP_FLAG_NONE; + if (H5Pget_vol_cap_flags(fapl_id, &vol_cap_flags_g) < 0) FAIL_STACK_ERROR; - if ((cap_flags & H5VL_CAP_FLAG_ASYNC) == 0) + if ((vol_cap_flags_g & H5VL_CAP_FLAG_ASYNC) == 0) TEST_ERROR; - if ((cap_flags & H5VL_CAP_FLAG_NATIVE_FILES) > 0) + if ((vol_cap_flags_g & H5VL_CAP_FLAG_NATIVE_FILES) > 0) TEST_ERROR; /* Reset environment variable & re-init default connector */ @@ -2155,12 +2156,12 @@ test_async_vol_props(void) FAIL_STACK_ERROR; /* Test query w/fake async VOL, which should succeed */ - cap_flags = H5VL_CAP_FLAG_NONE; - if (H5Pget_vol_cap_flags(fapl_id, &cap_flags) < 0) + vol_cap_flags_g = H5VL_CAP_FLAG_NONE; + if (H5Pget_vol_cap_flags(fapl_id, &vol_cap_flags_g) < 0) FAIL_STACK_ERROR; - if ((cap_flags & H5VL_CAP_FLAG_ASYNC) == 0) + if ((vol_cap_flags_g & H5VL_CAP_FLAG_ASYNC) == 0) TEST_ERROR; - if ((cap_flags & H5VL_CAP_FLAG_NATIVE_FILES) > 0) + if ((vol_cap_flags_g & H5VL_CAP_FLAG_NATIVE_FILES) > 0) TEST_ERROR; /* Stack the [internal] passthrough VOL connector on top of the fake async connector */ @@ -2170,12 +2171,12 @@ test_async_vol_props(void) FAIL_STACK_ERROR; /* Test query w/passthru -> fake async VOL, which should succeed */ - cap_flags = H5VL_CAP_FLAG_NONE; - if (H5Pget_vol_cap_flags(fapl_id, &cap_flags) < 0) + vol_cap_flags_g = H5VL_CAP_FLAG_NONE; + if (H5Pget_vol_cap_flags(fapl_id, &vol_cap_flags_g) < 0) FAIL_STACK_ERROR; - if ((cap_flags & H5VL_CAP_FLAG_ASYNC) == 0) + if ((vol_cap_flags_g & H5VL_CAP_FLAG_ASYNC) == 0) TEST_ERROR; - if ((cap_flags & H5VL_CAP_FLAG_NATIVE_FILES) > 0) + if ((vol_cap_flags_g & H5VL_CAP_FLAG_NATIVE_FILES) > 0) TEST_ERROR; /* Unregister the fake async VOL ID */ @@ -2224,14 +2225,15 @@ test_async_vol_props(void) static herr_t test_vol_cap_flags(void) { - hid_t fapl_id = H5I_INVALID_HID; - hid_t vol_id = H5I_INVALID_HID; - uint64_t vol_cap_flags = H5VL_CAP_FLAG_NONE; - char *vol_env = NULL; + hid_t fapl_id = H5I_INVALID_HID; + hid_t vol_id = H5I_INVALID_HID; + char *vol_env = NULL; H5VL_pass_through_info_t passthru_info; TESTING("VOL capability flags"); + vol_cap_flags_g = H5VL_CAP_FLAG_NONE; + /* Register a fake VOL */ if ((vol_id = H5VLregister_connector(&fake_vol_g, H5P_DEFAULT)) < 0) TEST_ERROR; @@ -2243,13 +2245,13 @@ test_vol_cap_flags(void) TEST_ERROR; /* Verify the correctness of the VOL capacity flags */ - if (H5Pget_vol_cap_flags(fapl_id, &vol_cap_flags) < 0) + if (H5Pget_vol_cap_flags(fapl_id, &vol_cap_flags_g) < 0) TEST_ERROR; - if (!(vol_cap_flags & H5VL_CAP_FLAG_FILE_BASIC)) + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) TEST_ERROR; - if (vol_cap_flags & H5VL_CAP_FLAG_ATTR_BASIC) + if (vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) TEST_ERROR; /* If using the native VOL by default, check flags again with H5P_DEFAULT */ @@ -2263,12 +2265,12 @@ test_vol_cap_flags(void) if (NULL == (cls = H5I_object(connector_id))) TEST_ERROR; - vol_cap_flags = H5VL_CAP_FLAG_NONE; + vol_cap_flags_g = H5VL_CAP_FLAG_NONE; - if (H5Pget_vol_cap_flags(H5P_DEFAULT, &vol_cap_flags) < 0) + if (H5Pget_vol_cap_flags(H5P_DEFAULT, &vol_cap_flags_g) < 0) TEST_ERROR; - if (vol_cap_flags != cls->cap_flags) + if (vol_cap_flags_g != cls->cap_flags) TEST_ERROR; if (H5VLclose(connector_id) < 0) @@ -2283,15 +2285,15 @@ test_vol_cap_flags(void) FAIL_STACK_ERROR; /* Verify the correctness of the VOL capacity flags */ - vol_cap_flags = H5VL_CAP_FLAG_NONE; + vol_cap_flags_g = H5VL_CAP_FLAG_NONE; - if (H5Pget_vol_cap_flags(fapl_id, &vol_cap_flags) < 0) + if (H5Pget_vol_cap_flags(fapl_id, &vol_cap_flags_g) < 0) TEST_ERROR; - if (!(vol_cap_flags & H5VL_CAP_FLAG_FILE_BASIC)) + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) TEST_ERROR; - if (vol_cap_flags & H5VL_CAP_FLAG_ATTR_BASIC) + if (vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) TEST_ERROR; if (H5Pclose(fapl_id) < 0) diff --git a/testpar/API/CMakeLists.txt b/testpar/API/CMakeLists.txt new file mode 100644 index 00000000000..5eb69c4d58c --- /dev/null +++ b/testpar/API/CMakeLists.txt @@ -0,0 +1,279 @@ +# Copyright by The HDF Group. +# All rights reserved. +# +# This file is part of HDF5. The full HDF5 copyright notice, including +# terms governing use, modification, and redistribution, is contained in +# the COPYING file, which can be found at the root of the source code +# distribution tree, or in https://www.hdfgroup.org/licenses. +# If you do not have access to either file, you may request a copy from +# help@hdfgroup.org. +# + +#------------------------------------------------------------------------------ +# Set module path +#------------------------------------------------------------------------------ +set(HDF5_TEST_API_CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMake") +set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${HDF5_TEST_API_CMAKE_MODULE_PATH}) + +#------------------------------------------------------------------------------ +# Setup for API tests +#------------------------------------------------------------------------------ + +# Ported HDF5 tests +set (HDF5_API_PAR_TESTS_EXTRA + t_bigio + t_pshutdown + t_shapesame + testphdf5 +) + +# List of files generated by the HDF5 API tests which +# should be cleaned up in case the test failed to remove +# them +set (HDF5_API_PAR_TESTS_FILES + H5_api_test_parallel.h5 + H5_api_async_test_parallel.h5 + H5_api_async_test_parallel_0.h5 + H5_api_async_test_parallel_1.h5 + H5_api_async_test_parallel_2.h5 + H5_api_async_test_parallel_3.h5 + H5_api_async_test_parallel_4.h5 + test_file_parallel.h5 + split_comm_file.h5 +) + +#----------------------------------------------------------------------------- +# Build the main API test executable +#----------------------------------------------------------------------------- +foreach (api_test ${HDF5_API_TESTS}) + set (HDF5_API_PAR_TEST_SRCS + ${HDF5_API_PAR_TEST_SRCS} + ${CMAKE_CURRENT_SOURCE_DIR}/H5_api_${api_test}_test_parallel.c + ) +endforeach () + +set (HDF5_API_PAR_TEST_SRCS + ${HDF5_API_PAR_TEST_SRCS} + ${CMAKE_CURRENT_SOURCE_DIR}/H5_api_test_parallel.c + ${HDF5_TEST_API_SRC_DIR}/H5_api_test_util.c +) + +add_executable (h5_api_test_parallel ${HDF5_API_PAR_TEST_SRCS}) +target_include_directories ( + h5_api_test_parallel + PRIVATE + "${HDF5_SRC_INCLUDE_DIRS}" + "${HDF5_TEST_PAR_DIR}" + "${HDF5_TEST_API_SRC_DIR}" + "${HDF5_TEST_API_PAR_SRC_DIR}" + "${HDF5_SRC_BINARY_DIR}" + "${HDF5_TEST_BINARY_DIR}" + "${HDF5_TEST_API_SRC_DIR}" + "$<$:${MPI_C_INCLUDE_DIRS}>" +) +target_compile_options ( + h5_api_test_parallel + PRIVATE + "${HDF5_CMAKE_C_FLAGS}" +) +target_compile_definitions ( + h5_api_test_parallel + PRIVATE + $<$:${HDF5_DEVELOPER_DEFS}> +) +if (NOT BUILD_SHARED_LIBS) + TARGET_C_PROPERTIES (h5_api_test_parallel STATIC) + target_link_libraries ( + h5_api_test_parallel + PRIVATE + ${HDF5_TEST_LIB_TARGET} + ${HDF5_LIB_TARGET} + "$<$:MPI::MPI_C>" + ) +else () + TARGET_C_PROPERTIES (h5_api_test_parallel SHARED) + target_link_libraries ( + h5_api_test_parallel + PRIVATE + ${HDF5_TEST_LIBSH_TARGET} + ${HDF5_LIBSH_TARGET} + "$<$:MPI::MPI_C>" + ) +endif () +set_target_properties ( + h5_api_test_parallel + PROPERTIES + FOLDER test/par/API +) +# Add Target to clang-format +if (HDF5_ENABLE_FORMATTERS) + clang_format (HDF5_TEST_h5_api_test_parallel_FORMAT h5_api_test_parallel) +endif () + +#----------------------------------------------------------------------------- +# Build the ported HDF5 test executables +#----------------------------------------------------------------------------- +foreach (api_test_extra ${HDF5_API_PAR_TESTS_EXTRA}) + unset (HDF5_API_PAR_TEST_EXTRA_SRCS) + + set (HDF5_API_PAR_TEST_EXTRA_SRCS + ${HDF5_API_PAR_TEST_EXTRA_SRCS} + ${CMAKE_CURRENT_SOURCE_DIR}/${api_test_extra}.c + ) + + if (${api_test_extra} STREQUAL "testphdf5") + set (HDF5_API_PAR_TEST_EXTRA_SRCS + ${HDF5_API_PAR_TEST_EXTRA_SRCS} + ${CMAKE_CURRENT_SOURCE_DIR}/t_ph5basic.c + ${CMAKE_CURRENT_SOURCE_DIR}/t_file.c + ${CMAKE_CURRENT_SOURCE_DIR}/t_dset.c + ${CMAKE_CURRENT_SOURCE_DIR}/t_mdset.c + ${CMAKE_CURRENT_SOURCE_DIR}/t_coll_chunk.c + ${CMAKE_CURRENT_SOURCE_DIR}/t_span_tree.c + ${CMAKE_CURRENT_SOURCE_DIR}/t_prop.c + ${CMAKE_CURRENT_SOURCE_DIR}/t_file_image.c + ${CMAKE_CURRENT_SOURCE_DIR}/t_coll_md_read.c + ${CMAKE_CURRENT_SOURCE_DIR}/t_chunk_alloc.c + ${CMAKE_CURRENT_SOURCE_DIR}/t_filter_read.c + ) + endif () + + add_executable (h5_api_test_parallel_${api_test_extra} ${HDF5_API_PAR_TEST_EXTRA_SRCS}) + target_include_directories ( + h5_api_test_parallel_${api_test_extra} + PRIVATE + "${HDF5_SRC_INCLUDE_DIRS}" + "${HDF5_TEST_PAR_DIR}" + "${HDF5_TEST_API_SRC_DIR}" + "${HDF5_TEST_API_PAR_SRC_DIR}" + "${HDF5_SRC_BINARY_DIR}" + "${HDF5_TEST_BINARY_DIR}" + "$<$:${MPI_C_INCLUDE_DIRS}>" + ) + target_compile_options ( + h5_api_test_parallel_${api_test_extra} + PRIVATE + "${HDF5_CMAKE_C_FLAGS}" + ) + target_compile_definitions ( + h5_api_test_parallel_${api_test_extra} + PRIVATE + $<$:${HDF5_DEVELOPER_DEFS}> + ) + if (NOT BUILD_SHARED_LIBS) + TARGET_C_PROPERTIES (h5_api_test_parallel_${api_test_extra} STATIC) + target_link_libraries ( + h5_api_test_parallel_${api_test_extra} + PRIVATE + ${HDF5_TEST_LIB_TARGET} + ${HDF5_LIB_TARGET} + "$<$:MPI::MPI_C>" + ) + else () + TARGET_C_PROPERTIES (h5_api_test_parallel_${api_test_extra} SHARED) + target_link_libraries ( + h5_api_test_parallel_${api_test_extra} + PRIVATE + ${HDF5_TEST_LIBSH_TARGET} + ${HDF5_LIBSH_TARGET} + "$<$:MPI::MPI_C>" + ) + endif () + set_target_properties ( + h5_api_test_parallel_${api_test_extra} + PROPERTIES + FOLDER test/par/API + ) + # Add Target to clang-format + if (HDF5_ENABLE_FORMATTERS) + clang_format (HDF5_TEST_h5_api_test_parallel_${api_test_extra}_FORMAT h5_api_test_parallel_${api_test_extra}) + endif () +endforeach () + +#----------------------------------------------------------------------------- +# Add tests if HDF5 parallel testing is enabled +#----------------------------------------------------------------------------- +if (HDF5_TEST_PARALLEL) + if (HDF5_TEST_API_ENABLE_DRIVER) + if ("${HDF5_TEST_API_SERVER}" STREQUAL "") + message (FATAL_ERROR "Please set HDF5_TEST_API_SERVER to point to a server executable for the test driver program.") + endif () + + # Driver options + if (HDF5_TEST_API_SERVER_ALLOW_ERRORS) + set (HDF5_TEST_API_DRIVER_EXTRA_FLAGS --allow-server-errors) + endif () + if (HDF5_TEST_API_CLIENT_HELPER) + set (HDF5_TEST_API_DRIVER_EXTRA_FLAGS ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS} + --client-helper ${HDF5_TEST_API_CLIENT_HELPER} + ) + endif () + if (HDF5_TEST_API_CLIENT_INIT) + set (HDF5_TEST_API_DRIVER_EXTRA_FLAGS ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS} + --client-init ${HDF5_TEST_API_CLIENT_INIT} + ) + endif () + + set(last_api_test "") + foreach (api_test ${HDF5_API_TESTS}) + add_test ( + NAME "h5_api_test_parallel_${api_test}" + COMMAND $ + --server ${HDF5_TEST_API_SERVER} + --client $ "${api_test}" + --serial + ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS} + ) + + set_tests_properties("h5_api_test_parallel_${api_test}" PROPERTIES DEPENDS "${last_api_test}") + + set(last_api_test "h5_api_test_parallel_${api_test}") + endforeach () + + foreach (hdf5_test ${HDF5_API_PAR_TESTS_EXTRA}) + add_test ( + NAME "h5_api_test_parallel_${hdf5_test}" + COMMAND $ + --server ${HDF5_TEST_API_SERVER} + --client $ + --serial + ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS} + ) + endforeach () + + # Hook external tests to same test suite + foreach (ext_api_test ${HDF5_API_EXT_PARALLEL_TESTS}) + add_test ( + NAME "h5_api_ext_test_parallel_${ext_api_test}" + COMMAND $ + --server ${HDF5_TEST_API_SERVER} + --client $ + --serial + ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS} + ) + endforeach () + else () + set(last_api_test "") + foreach (api_test ${HDF5_API_TESTS}) + add_test ( + NAME "h5_api_test_parallel_${api_test}" + COMMAND ${MPIEXEC} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} + ${MPIEXEC_PREFLAGS} $ "${api_test}" + ${MPIEXEC_POSTFLAGS} + ) + + set_tests_properties("h5_api_test_parallel_${api_test}" PROPERTIES DEPENDS "${last_api_test}") + + set(last_api_test "h5_api_test_parallel_${api_test}") + endforeach () + + foreach (hdf5_test ${HDF5_API_PAR_TESTS_EXTRA}) + add_test ( + NAME "h5_api_test_parallel_${hdf5_test}" + COMMAND ${MPIEXEC} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} + ${MPIEXEC_PREFLAGS} $ + ${MPIEXEC_POSTFLAGS} + ) + endforeach () + endif () +endif () diff --git a/testpar/API/H5_api_async_test_parallel.c b/testpar/API/H5_api_async_test_parallel.c new file mode 100644 index 00000000000..dcb5e8d8b8d --- /dev/null +++ b/testpar/API/H5_api_async_test_parallel.c @@ -0,0 +1,3668 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#include "H5_api_async_test_parallel.h" + +#ifdef H5ESpublic_H + +static int test_one_dataset_io(void); +static int test_multi_dataset_io(void); +static int test_multi_file_dataset_io(void); +static int test_multi_file_grp_dset_io(void); +static int test_set_extent(void); +static int test_attribute_exists(void); +static int test_attribute_io(void); +static int test_attribute_io_tconv(void); +static int test_attribute_io_compound(void); +static int test_group(void); +static int test_link(void); +static int test_ocopy_orefresh(void); +static int test_file_reopen(void); + +/* + * The array of parallel async tests to be performed. + */ +static int (*par_async_tests[])(void) = { + test_one_dataset_io, + test_multi_dataset_io, + test_multi_file_dataset_io, + test_multi_file_grp_dset_io, + test_set_extent, + test_attribute_exists, + test_attribute_io, + test_attribute_io_tconv, + test_attribute_io_compound, + test_group, + test_link, + test_ocopy_orefresh, + test_file_reopen, +}; + +hbool_t coll_metadata_read = TRUE; + +/* Highest "printf" file created (starting at 0) */ +int max_printf_file = -1; + +/* + * Create file and dataset. Each rank writes to a portion + * of the dataset. + */ +#define ONE_DATASET_IO_TEST_SPACE_RANK 2 +static int +test_one_dataset_io(void) +{ + hsize_t *dims = NULL; + hsize_t start[ONE_DATASET_IO_TEST_SPACE_RANK]; + hsize_t stride[ONE_DATASET_IO_TEST_SPACE_RANK]; + hsize_t count[ONE_DATASET_IO_TEST_SPACE_RANK]; + hsize_t block[ONE_DATASET_IO_TEST_SPACE_RANK]; + hbool_t op_failed = false; + hbool_t is_native_vol = false; + size_t i, data_size, num_in_progress; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + hid_t es_id = H5I_INVALID_HID; + int *write_buf = NULL; + int *read_buf = NULL; + + TESTING_MULTIPART("single dataset I/O") + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { + if (MAINPROCESS) { + SKIPPED(); + HDprintf( + " API functions for basic file, dataset, or flush aren't supported with this connector\n"); + } + + return 0; + } + + TESTING_2("test setup"); + + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0) + TEST_ERROR; + + /* Create dataspace */ + if (generate_random_parallel_dimensions(ONE_DATASET_IO_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + if ((space_id = H5Screate_simple(ONE_DATASET_IO_TEST_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + /* Create event stack */ + if ((es_id = H5EScreate()) < 0) + TEST_ERROR; + + /* Create file asynchronously */ + if ((file_id = H5Fcreate_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id, es_id)) < 0) + TEST_ERROR; + + /* Find out if the native connector is used */ + if (H5VLobject_is_native(file_id, &is_native_vol) < 0) + TEST_ERROR; + + /* Create the dataset asynchronously */ + if ((dset_id = H5Dcreate_async(file_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Calculate size of data buffers - first dimension is skipped in calculation */ + for (i = 1, data_size = 1; i < ONE_DATASET_IO_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= sizeof(int); + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + TEST_ERROR; + } + + if (NULL == (read_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + TEST_ERROR; + } + + /* Select this rank's portion of the dataspace */ + for (i = 0; i < ONE_DATASET_IO_TEST_SPACE_RANK; i++) { + if (i == 0) { + start[i] = (hsize_t)mpi_rank; + block[i] = 1; + } + else { + start[i] = 0; + block[i] = dims[i]; + } + + stride[i] = 1; + count[i] = 1; + } + + if (H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) < 0) { + H5_FAILED(); + HDprintf(" couldn't select hyperslab for dataset write\n"); + goto error; + } + + /* Setup memory space for write_buf */ + { + hsize_t mdims[] = {data_size / sizeof(int)}; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + goto error; + } + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(single_dset_eswait) + { + TESTING_2("synchronization using H5ESwait()"); + + /* Initialize write_buf */ + for (i = 0; i < data_size / sizeof(int); i++) + ((int *)write_buf)[i] = mpi_rank; + + /* Write the dataset asynchronously */ + if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, write_buf, es_id) < + 0) + PART_TEST_ERROR(single_dset_eswait); + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(single_dset_eswait); + if (op_failed) + PART_TEST_ERROR(single_dset_eswait); + + /* Read the dataset asynchronously */ + if (H5Dread_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, read_buf, es_id) < 0) + PART_TEST_ERROR(single_dset_eswait); + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(single_dset_eswait); + if (op_failed) + PART_TEST_ERROR(single_dset_eswait); + + /* Verify the read data */ + for (i = 0; i < data_size / sizeof(int); i++) + if (write_buf[i] != read_buf[i]) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + PART_ERROR(single_dset_eswait); + } /* end if */ + + PASSED(); + } + PART_END(single_dset_eswait); + + PART_BEGIN(single_dset_dclose) + { + TESTING_2("synchronization using H5Dclose()"); + + /* Initialize write_buf */ + for (i = 0; i < data_size / sizeof(int); i++) + ((int *)write_buf)[i] = (int)i; + + /* Write the dataset asynchronously */ + if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, write_buf, es_id) < + 0) + PART_TEST_ERROR(single_dset_dclose); + + /* Close the dataset synchronously */ + if (H5Dclose(dset_id) < 0) + PART_TEST_ERROR(single_dset_dclose); + + /* Re-open the dataset asynchronously */ + if ((dset_id = H5Dopen_async(file_id, "dset", H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(single_dset_dclose); + + /* Read the dataset asynchronously */ + if (H5Dread_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, read_buf, es_id) < 0) + PART_TEST_ERROR(single_dset_dclose); + + /* Close the dataset synchronously */ + if (H5Dclose(dset_id) < 0) + PART_TEST_ERROR(single_dset_dclose); + + /* Verify the read data */ + for (i = 0; i < data_size / sizeof(int); i++) + if (write_buf[i] != read_buf[i]) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + PART_ERROR(single_dset_dclose); + } /* end if */ + + /* Re-open the dataset asynchronously */ + if ((dset_id = H5Dopen_async(file_id, "dset", H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(single_dset_dclose); + + PASSED(); + } + PART_END(single_dset_dclose); + + PART_BEGIN(single_dset_dflush) + { + TESTING_2("synchronization using H5Oflush_async()"); + + /* Initialize write_buf */ + for (i = 0; i < data_size / sizeof(int); i++) + ((int *)write_buf)[i] = 10 * (int)i; + + /* Write the dataset asynchronously */ + if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, write_buf, es_id) < + 0) + PART_TEST_ERROR(single_dset_dflush); + + /* Flush the dataset asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the write. Skip this + * function because it isn't supported for the native vol in parallel. */ + if (!is_native_vol && H5Oflush_async(dset_id, es_id) < 0) + PART_TEST_ERROR(single_dset_dflush); + + /* Read the dataset asynchronously */ + if (H5Dread_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, read_buf, es_id) < 0) + PART_TEST_ERROR(single_dset_dflush); + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(single_dset_dflush); + if (op_failed) + PART_TEST_ERROR(single_dset_dflush); + + /* Verify the read data */ + for (i = 0; i < data_size / sizeof(int); i++) + if (write_buf[i] != read_buf[i]) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + PART_ERROR(single_dset_dflush); + } /* end if */ + + PASSED(); + } + PART_END(single_dset_dflush); + + PART_BEGIN(single_dset_fclose) + { + TESTING_2("synchronization using H5Fclose()"); + + /* Initialize write_buf */ + for (i = 0; i < data_size / sizeof(int); i++) + ((int *)write_buf)[i] = (int)i + 5; + + /* Write the dataset asynchronously */ + if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, write_buf, es_id) < + 0) + PART_TEST_ERROR(single_dset_fclose); + + /* Close the dataset asynchronously */ + if (H5Dclose_async(dset_id, es_id) < 0) + PART_TEST_ERROR(single_dset_fclose); + + /* Close the file synchronously */ + if (H5Fclose(file_id) < 0) + PART_TEST_ERROR(single_dset_fclose); + + /* Reopen the file asynchronously. */ + if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDONLY, fapl_id, es_id)) < 0) + PART_TEST_ERROR(single_dset_fclose); + + /* Re-open the dataset asynchronously */ + if ((dset_id = H5Dopen_async(file_id, "dset", H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(single_dset_fclose); + + /* Read the dataset asynchronously */ + if (H5Dread_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, read_buf, es_id) < 0) + PART_TEST_ERROR(single_dset_fclose); + + /* Close the dataset asynchronously */ + if (H5Dclose_async(dset_id, es_id) < 0) + PART_TEST_ERROR(single_dset_fclose); + + /* Close the file synchronously */ + if (H5Fclose(file_id) < 0) + PART_TEST_ERROR(single_dset_fclose); + + /* Verify the read data */ + for (i = 0; i < data_size / sizeof(int); i++) + if (write_buf[i] != read_buf[i]) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + PART_ERROR(single_dset_fclose); + } /* end if */ + + PASSED(); + } + PART_END(single_dset_fclose); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Sclose(mspace_id) < 0) + TEST_ERROR; + if (H5ESclose(es_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (dims) + HDfree(dims); + H5Sclose(space_id); + H5Sclose(mspace_id); + H5Dclose(dset_id); + H5Pclose(fapl_id); + H5Fclose(file_id); + H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed); + H5ESclose(es_id); + } + H5E_END_TRY; + + return 1; +} +#undef ONE_DATASET_IO_TEST_SPACE_RANK + +/* + * Create file and multiple datasets. Each rank writes to a + * portion of each dataset and reads back their portion of + * each dataset. + */ +#define MULTI_DATASET_IO_TEST_SPACE_RANK 2 +#define MULTI_DATASET_IO_TEST_NDSETS 5 +static int +test_multi_dataset_io(void) +{ + hsize_t *dims = NULL; + hsize_t start[MULTI_DATASET_IO_TEST_SPACE_RANK]; + hsize_t stride[MULTI_DATASET_IO_TEST_SPACE_RANK]; + hsize_t count[MULTI_DATASET_IO_TEST_SPACE_RANK]; + hsize_t block[MULTI_DATASET_IO_TEST_SPACE_RANK]; + hbool_t op_failed; + size_t i, j, data_size, num_in_progress; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t dset_id[MULTI_DATASET_IO_TEST_NDSETS] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, + H5I_INVALID_HID, H5I_INVALID_HID}; + hid_t space_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + hid_t es_id = H5I_INVALID_HID; + char dset_name[32]; + int *write_buf = NULL; + int *read_buf = NULL; + + TESTING_MULTIPART("multi dataset I/O") + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { + if (MAINPROCESS) { + SKIPPED(); + HDprintf( + " API functions for basic file, dataset, or flush aren't supported with this connector\n"); + } + + return 0; + } + + TESTING_2("test setup"); + + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0) + TEST_ERROR; + + /* Create dataspace */ + if (generate_random_parallel_dimensions(MULTI_DATASET_IO_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + /* Create dataspace */ + if ((space_id = H5Screate_simple(MULTI_DATASET_IO_TEST_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + /* Create event stack */ + if ((es_id = H5EScreate()) < 0) + TEST_ERROR; + + /* Create file asynchronously */ + if ((file_id = H5Fcreate_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id, es_id)) < 0) + TEST_ERROR; + + /* Calculate size of data buffers - first dimension is skipped in calculation */ + for (i = 1, data_size = 1; i < MULTI_DATASET_IO_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= sizeof(int); + data_size *= MULTI_DATASET_IO_TEST_NDSETS; + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + TEST_ERROR; + } + + if (NULL == (read_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + TEST_ERROR; + } + + /* Select this rank's portion of the dataspace */ + for (i = 0; i < MULTI_DATASET_IO_TEST_SPACE_RANK; i++) { + if (i == 0) { + start[i] = (hsize_t)mpi_rank; + block[i] = 1; + } + else { + start[i] = 0; + block[i] = dims[i]; + } + + stride[i] = 1; + count[i] = 1; + } + + if (H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) < 0) { + H5_FAILED(); + HDprintf(" couldn't select hyperslab for dataset write\n"); + goto error; + } + + /* Setup memory space for write_buf */ + { + hsize_t mdims[] = {data_size / MULTI_DATASET_IO_TEST_NDSETS / sizeof(int)}; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + goto error; + } + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(multi_dset_open) + { + size_t buf_start_idx; + + TESTING_2("keeping datasets open"); + + /* Loop over datasets */ + for (i = 0; i < MULTI_DATASET_IO_TEST_NDSETS; i++) { + size_t buf_end_idx; + + /* Set dataset name */ + sprintf(dset_name, "dset%d", (int)i); + + /* Create the dataset asynchronously */ + if ((dset_id[i] = H5Dcreate_async(file_id, dset_name, H5T_NATIVE_INT, space_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_dset_open); + + /* Initialize write_buf. Must use a new slice of write_buf for + * each dset since we can't overwrite the buffers until I/O is done. */ + buf_start_idx = i * (data_size / MULTI_DATASET_IO_TEST_NDSETS / sizeof(int)); + buf_end_idx = buf_start_idx + (data_size / MULTI_DATASET_IO_TEST_NDSETS / sizeof(int)); + for (j = buf_start_idx; j < buf_end_idx; j++) + ((int *)write_buf)[j] = mpi_rank; + + /* Write the dataset asynchronously */ + if (H5Dwrite_async(dset_id[i], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, + &write_buf[buf_start_idx], es_id) < 0) + PART_TEST_ERROR(multi_dset_open); + } /* end for */ + + /* Flush the file asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the write. */ + if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0) + PART_TEST_ERROR(multi_dset_open); + + /* Loop over datasets */ + for (i = 0; i < MULTI_DATASET_IO_TEST_NDSETS; i++) { + buf_start_idx = i * (data_size / MULTI_DATASET_IO_TEST_NDSETS / sizeof(int)); + + /* Read the dataset asynchronously */ + if (H5Dread_async(dset_id[i], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, + &read_buf[buf_start_idx], es_id) < 0) + PART_TEST_ERROR(multi_dset_open); + } /* end for */ + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(multi_dset_open); + if (op_failed) + PART_TEST_ERROR(multi_dset_open); + + /* Verify the read data */ + for (i = 0; i < data_size / sizeof(int); i++) + if (write_buf[i] != read_buf[i]) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + PART_ERROR(multi_dset_open); + } /* end if */ + + /* Close the datasets */ + for (i = 0; i < MULTI_DATASET_IO_TEST_NDSETS; i++) + if (H5Dclose(dset_id[i]) < 0) + PART_TEST_ERROR(multi_dset_open); + + PASSED(); + } + PART_END(multi_dset_open); + + PART_BEGIN(multi_dset_close) + { + size_t buf_start_idx; + + TESTING_2("closing datasets between I/O"); + + /* Loop over datasets */ + for (i = 0; i < MULTI_DATASET_IO_TEST_NDSETS; i++) { + size_t buf_end_idx; + + /* Set dataset name */ + sprintf(dset_name, "dset%d", (int)i); + + /* Open the dataset asynchronously */ + if ((dset_id[0] = H5Dopen_async(file_id, dset_name, H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_dset_close); + + /* Initialize write_buf. */ + buf_start_idx = i * (data_size / MULTI_DATASET_IO_TEST_NDSETS / sizeof(int)); + buf_end_idx = buf_start_idx + (data_size / MULTI_DATASET_IO_TEST_NDSETS / sizeof(int)); + for (j = buf_start_idx; j < buf_end_idx; j++) + ((int *)write_buf)[j] = mpi_rank * 10; + + /* Write the dataset asynchronously */ + if (H5Dwrite_async(dset_id[0], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, + &write_buf[buf_start_idx], es_id) < 0) + PART_TEST_ERROR(multi_dset_close); + + /* Close the dataset asynchronously */ + if (H5Dclose_async(dset_id[0], es_id) < 0) + PART_TEST_ERROR(multi_dset_close); + } /* end for */ + + /* Flush the file asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the write. */ + if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0) + PART_TEST_ERROR(multi_dset_close); + + /* Loop over datasets */ + for (i = 0; i < MULTI_DATASET_IO_TEST_NDSETS; i++) { + /* Set dataset name */ + sprintf(dset_name, "dset%d", (int)i); + + /* Open the dataset asynchronously */ + if ((dset_id[0] = H5Dopen_async(file_id, dset_name, H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_dset_close); + + /* Read the dataset asynchronously */ + buf_start_idx = i * (data_size / MULTI_DATASET_IO_TEST_NDSETS / sizeof(int)); + if (H5Dread_async(dset_id[0], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, + &read_buf[buf_start_idx], es_id) < 0) + PART_TEST_ERROR(multi_dset_close); + + /* Close the dataset asynchronously */ + if (H5Dclose_async(dset_id[0], es_id) < 0) + PART_TEST_ERROR(multi_dset_close); + } /* end for */ + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(multi_dset_close); + if (op_failed) + PART_TEST_ERROR(multi_dset_close); + + /* Verify the read data */ + for (i = 0; i < data_size / sizeof(int); i++) + if (write_buf[i] != read_buf[i]) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + PART_ERROR(multi_dset_close); + } /* end if */ + + PASSED(); + } + PART_END(multi_dset_close); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Sclose(mspace_id) < 0) + TEST_ERROR; + if (H5ESclose(es_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (dims) + HDfree(dims); + H5Sclose(space_id); + H5Sclose(mspace_id); + for (i = 0; i < MULTI_DATASET_IO_TEST_NDSETS; i++) + H5Dclose(dset_id[i]); + H5Pclose(fapl_id); + H5Fclose(file_id); + H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed); + H5ESclose(es_id); + } + H5E_END_TRY; + + return 1; +} +#undef MULTI_DATASET_IO_TEST_SPACE_RANK +#undef MULTI_DATASET_IO_TEST_NDSETS + +/* + * Create multiple files, each with a single dataset. Each rank writes + * to a portion of each dataset and reads from a portion of each dataset. + */ +#define MULTI_FILE_DATASET_IO_TEST_SPACE_RANK 2 +#define MULTI_FILE_DATASET_IO_TEST_NFILES 5 +static int +test_multi_file_dataset_io(void) +{ + hsize_t *dims = NULL; + hsize_t start[MULTI_FILE_DATASET_IO_TEST_SPACE_RANK]; + hsize_t stride[MULTI_FILE_DATASET_IO_TEST_SPACE_RANK]; + hsize_t count[MULTI_FILE_DATASET_IO_TEST_SPACE_RANK]; + hsize_t block[MULTI_FILE_DATASET_IO_TEST_SPACE_RANK]; + hbool_t op_failed = false; + hbool_t is_native_vol = false; + size_t i, j, data_size, num_in_progress; + hid_t fapl_id = H5I_INVALID_HID; + hid_t file_id[MULTI_FILE_DATASET_IO_TEST_NFILES] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, + H5I_INVALID_HID, H5I_INVALID_HID}; + hid_t dset_id[MULTI_FILE_DATASET_IO_TEST_NFILES] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, + H5I_INVALID_HID, H5I_INVALID_HID}; + hid_t space_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + hid_t es_id = H5I_INVALID_HID; + char file_name[32]; + int *write_buf = NULL; + int *read_buf = NULL; + + TESTING_MULTIPART("multi file dataset I/O") + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { + if (MAINPROCESS) { + SKIPPED(); + HDprintf( + " API functions for basic file, dataset, or flush aren't supported with this connector\n"); + } + + return 0; + } + + TESTING_2("test setup"); + + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0) + TEST_ERROR; + + /* Create dataspace */ + if (generate_random_parallel_dimensions(MULTI_FILE_DATASET_IO_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + /* Create dataspace */ + if ((space_id = H5Screate_simple(MULTI_FILE_DATASET_IO_TEST_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + /* Create event stack */ + if ((es_id = H5EScreate()) < 0) + TEST_ERROR; + + /* Calculate size of data buffers - first dimension is skipped in calculation */ + for (i = 1, data_size = 1; i < MULTI_FILE_DATASET_IO_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= sizeof(int); + data_size *= MULTI_FILE_DATASET_IO_TEST_NFILES; + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + TEST_ERROR; + } + + if (NULL == (read_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + TEST_ERROR; + } + + /* Select this rank's portion of the dataspace */ + for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_SPACE_RANK; i++) { + if (i == 0) { + start[i] = (hsize_t)mpi_rank; + block[i] = 1; + } + else { + start[i] = 0; + block[i] = dims[i]; + } + + stride[i] = 1; + count[i] = 1; + } + + if (H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) < 0) { + H5_FAILED(); + HDprintf(" couldn't select hyperslab for dataset write\n"); + goto error; + } + + /* Setup memory space for write_buf */ + { + hsize_t mdims[] = {data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int)}; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + goto error; + } + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(multi_file_dset_open) + { + size_t buf_start_idx; + + TESTING_2("keeping files and datasets open"); + + /* Loop over files */ + for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++) { + size_t buf_end_idx; + + /* Set file name */ + sprintf(file_name, PAR_ASYNC_API_TEST_FILE_PRINTF, (int)i); + + /* Create file asynchronously */ + if ((file_id[i] = H5Fcreate_async(file_name, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id, es_id)) < 0) + PART_TEST_ERROR(multi_file_dset_open); + if ((int)i > max_printf_file) + max_printf_file = (int)i; + + /* Create the dataset asynchronously */ + if ((dset_id[i] = H5Dcreate_async(file_id[i], "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_dset_open); + + /* Initialize write_buf. Must use a new slice of write_buf for + * each dset since we can't overwrite the buffers until I/O is done. */ + buf_start_idx = i * (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int)); + buf_end_idx = buf_start_idx + (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int)); + for (j = buf_start_idx; j < buf_end_idx; j++) + ((int *)write_buf)[j] = mpi_rank; + + /* Write the dataset asynchronously */ + if (H5Dwrite_async(dset_id[i], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, + &write_buf[buf_start_idx], es_id) < 0) + PART_TEST_ERROR(multi_file_dset_open); + } /* end for */ + + /* Find out if the native connector is used */ + if (H5VLobject_is_native(file_id[0], &is_native_vol) < 0) + PART_TEST_ERROR(multi_file_dset_open); + + /* Loop over files */ + for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++) { + /* Flush the dataset asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the write. Skip this + * function because it isn't supported for the native vol in parallel. */ + if (!is_native_vol && H5Oflush_async(dset_id[i], es_id) < 0) + PART_TEST_ERROR(multi_file_dset_open); + + /* Read the dataset asynchronously */ + buf_start_idx = i * (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int)); + if (H5Dread_async(dset_id[i], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, + &read_buf[buf_start_idx], es_id) < 0) + PART_TEST_ERROR(multi_file_dset_open); + } /* end for */ + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(multi_file_dset_open); + if (op_failed) + PART_TEST_ERROR(multi_file_dset_open); + + /* Verify the read data */ + for (i = 0; i < data_size / sizeof(int); i++) + if (write_buf[i] != read_buf[i]) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + PART_ERROR(multi_file_dset_open); + } /* end if */ + + /* Close the datasets */ + for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++) + if (H5Dclose(dset_id[i]) < 0) + PART_TEST_ERROR(multi_file_dset_open); + + PASSED(); + } + PART_END(multi_file_dset_open); + + PART_BEGIN(multi_file_dset_dclose) + { + size_t buf_start_idx; + + TESTING_2("closing datasets between I/O"); + + /* Loop over files */ + for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++) { + size_t buf_end_idx; + + /* Open the dataset asynchronously */ + if ((dset_id[0] = H5Dopen_async(file_id[i], "dset", H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_dset_dclose); + + /* Initialize write_buf. */ + buf_start_idx = i * (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int)); + buf_end_idx = buf_start_idx + (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int)); + for (j = buf_start_idx; j < buf_end_idx; j++) + ((int *)write_buf)[j] = mpi_rank * 10; + + /* Write the dataset asynchronously */ + if (H5Dwrite_async(dset_id[0], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, + &write_buf[buf_start_idx], es_id) < 0) + PART_TEST_ERROR(multi_file_dset_dclose); + + /* Close the dataset asynchronously */ + if (H5Dclose_async(dset_id[0], es_id) < 0) + PART_TEST_ERROR(multi_file_dset_dclose); + } /* end for */ + + /* Loop over files */ + for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++) { + /* Flush the file asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the write. */ + if (H5Fflush_async(file_id[i], H5F_SCOPE_LOCAL, es_id) < 0) + PART_TEST_ERROR(multi_file_dset_open); + + /* Open the dataset asynchronously */ + if ((dset_id[0] = H5Dopen_async(file_id[i], "dset", H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_dset_dclose); + + /* Read the dataset asynchronously */ + buf_start_idx = i * (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int)); + if (H5Dread_async(dset_id[0], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, + &read_buf[buf_start_idx], es_id) < 0) + PART_TEST_ERROR(multi_file_dset_dclose); + + /* Close the dataset asynchronously */ + if (H5Dclose_async(dset_id[0], es_id) < 0) + PART_TEST_ERROR(multi_file_dset_dclose); + } /* end for */ + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(multi_file_dset_dclose); + if (op_failed) + PART_TEST_ERROR(multi_file_dset_dclose); + + /* Verify the read data */ + for (i = 0; i < data_size / sizeof(int); i++) + if (write_buf[i] != read_buf[i]) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + PART_ERROR(multi_file_dset_dclose); + } /* end if */ + + /* Close the files */ + for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++) + if (H5Fclose(file_id[i]) < 0) + PART_TEST_ERROR(multi_file_dset_dclose); + + PASSED(); + } + PART_END(multi_file_dset_dclose); + + PART_BEGIN(multi_file_dset_fclose) + { + size_t buf_start_idx; + + TESTING_2("closing files between I/O"); + + /* Loop over files */ + for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++) { + size_t buf_end_idx; + + /* Set file name */ + sprintf(file_name, PAR_ASYNC_API_TEST_FILE_PRINTF, (int)i); + + /* Open the file asynchronously */ + if ((file_id[0] = H5Fopen_async(file_name, H5F_ACC_RDWR, fapl_id, es_id)) < 0) + PART_TEST_ERROR(multi_file_dset_fclose); + + /* Open the dataset asynchronously */ + if ((dset_id[0] = H5Dopen_async(file_id[0], "dset", H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_dset_fclose); + + /* Initialize write_buf. */ + buf_start_idx = i * (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int)); + buf_end_idx = buf_start_idx + (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int)); + for (j = buf_start_idx; j < buf_end_idx; j++) + ((int *)write_buf)[j] = mpi_rank + 5; + + /* Write the dataset asynchronously */ + if (H5Dwrite_async(dset_id[0], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, + &write_buf[buf_start_idx], es_id) < 0) + PART_TEST_ERROR(multi_file_dset_fclose); + + /* Close the dataset asynchronously */ + if (H5Dclose_async(dset_id[0], es_id) < 0) + PART_TEST_ERROR(multi_file_dset_fclose); + + /* Close the file asynchronously */ + if (H5Fclose_async(file_id[0], es_id) < 0) + PART_TEST_ERROR(multi_file_dset_fclose); + } /* end for */ + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(multi_file_dset_fclose); + if (op_failed) + PART_TEST_ERROR(multi_file_dset_fclose); + + /* Loop over files */ + for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++) { + /* Set file name */ + sprintf(file_name, PAR_ASYNC_API_TEST_FILE_PRINTF, (int)i); + + /* Open the file asynchronously */ + if ((file_id[0] = H5Fopen_async(file_name, H5F_ACC_RDONLY, fapl_id, es_id)) < 0) + PART_TEST_ERROR(multi_file_dset_fclose); + + /* Open the dataset asynchronously */ + if ((dset_id[0] = H5Dopen_async(file_id[0], "dset", H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_dset_fclose); + + /* Read the dataset asynchronously */ + buf_start_idx = i * (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int)); + if (H5Dread_async(dset_id[0], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, + &read_buf[buf_start_idx], es_id) < 0) + PART_TEST_ERROR(multi_file_dset_fclose); + + /* Close the dataset asynchronously */ + if (H5Dclose_async(dset_id[0], es_id) < 0) + PART_TEST_ERROR(multi_file_dset_fclose); + + /* Close the file asynchronously */ + if (H5Fclose_async(file_id[0], es_id) < 0) + PART_TEST_ERROR(multi_file_dset_fclose); + } /* end for */ + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(multi_file_dset_fclose); + if (op_failed) + PART_TEST_ERROR(multi_file_dset_fclose); + + /* Verify the read data */ + for (i = 0; i < data_size / sizeof(int); i++) + if (write_buf[i] != read_buf[i]) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + PART_ERROR(multi_file_dset_fclose); + } /* end if */ + + PASSED(); + } + PART_END(multi_file_dset_fclose); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Sclose(mspace_id) < 0) + TEST_ERROR; + if (H5ESclose(es_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (dims) + HDfree(dims); + H5Sclose(space_id); + H5Sclose(mspace_id); + for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++) { + H5Dclose(dset_id[i]); + H5Fclose(file_id[i]); + } + H5Pclose(fapl_id); + H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed); + H5ESclose(es_id); + } + H5E_END_TRY; + + return 1; +} +#undef MULTI_FILE_DATASET_IO_TEST_SPACE_RANK +#undef MULTI_FILE_DATASET_IO_TEST_NFILES + +/* + * Create multiple files, each with a single group and dataset. Each rank + * writes to a portion of each dataset and reads from a portion of each dataset. + */ +#define MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK 2 +#define MULTI_FILE_GRP_DSET_IO_TEST_NFILES 5 +static int +test_multi_file_grp_dset_io(void) +{ + hsize_t *dims = NULL; + hsize_t start[MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK]; + hsize_t stride[MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK]; + hsize_t count[MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK]; + hsize_t block[MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK]; + hbool_t op_failed; + size_t i, j, data_size, num_in_progress; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t grp_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + hid_t es_id = H5I_INVALID_HID; + char file_name[32]; + int *write_buf = NULL; + int *read_buf = NULL; + + TESTING_MULTIPART("multi file dataset I/O with groups") + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + } + + return 0; + } + + TESTING_2("test setup"); + + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0) + TEST_ERROR; + + /* Create dataspace */ + if (generate_random_parallel_dimensions(MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + /* Create dataspace */ + if ((space_id = H5Screate_simple(MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + /* Create event stack */ + if ((es_id = H5EScreate()) < 0) + TEST_ERROR; + + /* Calculate size of data buffers - first dimension is skipped in calculation */ + for (i = 1, data_size = 1; i < MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= sizeof(int); + data_size *= MULTI_FILE_GRP_DSET_IO_TEST_NFILES; + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + TEST_ERROR; + } + + if (NULL == (read_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + TEST_ERROR; + } + + /* Select this rank's portion of the dataspace */ + for (i = 0; i < MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK; i++) { + if (i == 0) { + start[i] = (hsize_t)mpi_rank; + block[i] = 1; + } + else { + start[i] = 0; + block[i] = dims[i]; + } + + stride[i] = 1; + count[i] = 1; + } + + if (H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) < 0) { + H5_FAILED(); + HDprintf(" couldn't select hyperslab for dataset write\n"); + goto error; + } + + /* Setup memory space for write_buf */ + { + hsize_t mdims[] = {data_size / MULTI_FILE_GRP_DSET_IO_TEST_NFILES / sizeof(int)}; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + goto error; + } + } + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(multi_file_grp_dset_no_kick) + { + size_t buf_start_idx; + + TESTING_2("without intermediate calls to H5ESwait()"); + + /* Loop over files */ + for (i = 0; i < MULTI_FILE_GRP_DSET_IO_TEST_NFILES; i++) { + size_t buf_end_idx; + + /* Set file name */ + sprintf(file_name, PAR_ASYNC_API_TEST_FILE_PRINTF, (int)i); + + /* Create file asynchronously */ + if ((file_id = H5Fcreate_async(file_name, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id, es_id)) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + if ((int)i > max_printf_file) + max_printf_file = (int)i; + + /* Create the group asynchronously */ + if ((grp_id = H5Gcreate_async(file_id, "grp", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < + 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + + /* Create the dataset asynchronously */ + if ((dset_id = H5Dcreate_async(grp_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + + /* Initialize write_buf. Must use a new slice of write_buf for + * each dset since we can't overwrite the buffers until I/O is done. */ + buf_start_idx = i * (data_size / MULTI_FILE_GRP_DSET_IO_TEST_NFILES / sizeof(int)); + buf_end_idx = buf_start_idx + (data_size / MULTI_FILE_GRP_DSET_IO_TEST_NFILES / sizeof(int)); + for (j = buf_start_idx; j < buf_end_idx; j++) + ((int *)write_buf)[j] = mpi_rank; + + /* Write the dataset asynchronously */ + if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, + &write_buf[buf_start_idx], es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + + /* Close the dataset asynchronously */ + if (H5Dclose_async(dset_id, es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + + /* Close the group asynchronously */ + if (H5Gclose_async(grp_id, es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + + /* Close the file asynchronously */ + if (H5Fclose_async(file_id, es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + } /* end for */ + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + if (op_failed) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + + /* Loop over files */ + for (i = 0; i < MULTI_FILE_GRP_DSET_IO_TEST_NFILES; i++) { + /* Set file name */ + sprintf(file_name, PAR_ASYNC_API_TEST_FILE_PRINTF, (int)i); + + /* Open the file asynchronously */ + if ((file_id = H5Fopen_async(file_name, H5F_ACC_RDONLY, fapl_id, es_id)) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + + /* Open the group asynchronously */ + if ((grp_id = H5Gopen_async(file_id, "grp", H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + + /* Open the dataset asynchronously */ + if ((dset_id = H5Dopen_async(grp_id, "dset", H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + + /* Read the dataset asynchronously */ + buf_start_idx = i * (data_size / MULTI_FILE_GRP_DSET_IO_TEST_NFILES / sizeof(int)); + if (H5Dread_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, + &read_buf[buf_start_idx], es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + + /* Close the dataset asynchronously */ + if (H5Dclose_async(dset_id, es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + + /* Close the group asynchronously */ + if (H5Gclose_async(grp_id, es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + + /* Close the file asynchronously */ + if (H5Fclose_async(file_id, es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + } /* end for */ + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + if (op_failed) + PART_TEST_ERROR(multi_file_grp_dset_no_kick); + + /* Verify the read data */ + for (i = 0; i < data_size / sizeof(int); i++) + if (write_buf[i] != read_buf[i]) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + PART_ERROR(multi_file_grp_dset_no_kick); + } /* end if */ + + PASSED(); + } + PART_END(multi_file_grp_dset_no_kick); + + PART_BEGIN(multi_file_grp_dset_kick) + { + size_t buf_start_idx; + + TESTING_2("with intermediate calls to H5ESwait() (0 timeout)"); + + /* Loop over files */ + for (i = 0; i < MULTI_FILE_GRP_DSET_IO_TEST_NFILES; i++) { + size_t buf_end_idx; + + /* Set file name */ + sprintf(file_name, PAR_ASYNC_API_TEST_FILE_PRINTF, (int)i); + + /* Create file asynchronously */ + if ((file_id = H5Fcreate_async(file_name, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id, es_id)) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + if ((int)i > max_printf_file) + max_printf_file = (int)i; + + /* Create the group asynchronously */ + if ((grp_id = H5Gcreate_async(file_id, "grp", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < + 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Create the dataset asynchronously */ + if ((dset_id = H5Dcreate_async(grp_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Initialize write_buf. Must use a new slice of write_buf for + * each dset since we can't overwrite the buffers until I/O is done. */ + buf_start_idx = i * (data_size / MULTI_FILE_GRP_DSET_IO_TEST_NFILES / sizeof(int)); + buf_end_idx = buf_start_idx + (data_size / MULTI_FILE_GRP_DSET_IO_TEST_NFILES / sizeof(int)); + for (j = buf_start_idx; j < buf_end_idx; j++) + ((int *)write_buf)[j] = mpi_rank; + + /* Write the dataset asynchronously */ + if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, + &write_buf[buf_start_idx], es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Close the dataset asynchronously */ + if (H5Dclose_async(dset_id, es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Close the group asynchronously */ + if (H5Gclose_async(grp_id, es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Close the file asynchronously */ + if (H5Fclose_async(file_id, es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Kick the event stack to make progress */ + if (H5ESwait(es_id, 0, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + if (op_failed) + PART_TEST_ERROR(multi_file_grp_dset_kick); + } /* end for */ + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + if (op_failed) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Loop over files */ + for (i = 0; i < MULTI_FILE_GRP_DSET_IO_TEST_NFILES; i++) { + /* Set file name */ + sprintf(file_name, PAR_ASYNC_API_TEST_FILE_PRINTF, (int)i); + + /* Open the file asynchronously */ + if ((file_id = H5Fopen_async(file_name, H5F_ACC_RDONLY, fapl_id, es_id)) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Open the group asynchronously */ + if ((grp_id = H5Gopen_async(file_id, "grp", H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Open the dataset asynchronously */ + if ((dset_id = H5Dopen_async(grp_id, "dset", H5P_DEFAULT, es_id)) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Read the dataset asynchronously */ + buf_start_idx = i * (data_size / MULTI_FILE_GRP_DSET_IO_TEST_NFILES / sizeof(int)); + if (H5Dread_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, + &read_buf[buf_start_idx], es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Close the dataset asynchronously */ + if (H5Dclose_async(dset_id, es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Close the group asynchronously */ + if (H5Gclose_async(grp_id, es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Close the file asynchronously */ + if (H5Fclose_async(file_id, es_id) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Kick the event stack to make progress */ + if (H5ESwait(es_id, 0, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + if (op_failed) + PART_TEST_ERROR(multi_file_grp_dset_kick); + } /* end for */ + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + PART_TEST_ERROR(multi_file_grp_dset_kick); + if (op_failed) + PART_TEST_ERROR(multi_file_grp_dset_kick); + + /* Verify the read data */ + for (i = 0; i < data_size / sizeof(int); i++) + if (write_buf[i] != read_buf[i]) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + PART_ERROR(multi_file_grp_dset_kick); + } /* end if */ + + PASSED(); + } + PART_END(multi_file_grp_dset_kick); + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Sclose(mspace_id) < 0) + TEST_ERROR; + if (H5ESclose(es_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (dims) + HDfree(dims); + H5Sclose(space_id); + H5Sclose(mspace_id); + H5Dclose(dset_id); + H5Gclose(grp_id); + H5Fclose(file_id); + H5Pclose(fapl_id); + H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed); + H5ESclose(es_id); + } + H5E_END_TRY; + + return 1; +} +#undef MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK +#undef MULTI_FILE_GRP_DSET_IO_TEST_NFILES + +/* + * Creates a single file and dataset, then each rank writes to a portion + * of the dataset. Next, the dataset is continually extended in the first + * dimension by 1 "row" per mpi rank and partially written to by each rank. + * Finally, each rank reads from a portion of the dataset. + */ +#define SET_EXTENT_TEST_SPACE_RANK 2 +#define SET_EXTENT_TEST_NUM_EXTENDS 6 +static int +test_set_extent(void) +{ + hsize_t *dims = NULL; + hsize_t *maxdims = NULL; + hsize_t *cdims = NULL; + hsize_t start[SET_EXTENT_TEST_SPACE_RANK]; + hsize_t stride[SET_EXTENT_TEST_SPACE_RANK]; + hsize_t count[SET_EXTENT_TEST_SPACE_RANK]; + hsize_t block[SET_EXTENT_TEST_SPACE_RANK]; + hbool_t op_failed = false; + hbool_t is_native_vol = false; + size_t i, j, data_size, num_in_progress; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + hid_t space_id_out = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + hid_t es_id = H5I_INVALID_HID; + htri_t tri_ret; + int *write_buf = NULL; + int *read_buf = NULL; + + TESTING("extending dataset"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + SKIPPED(); + HDprintf(" API functions for basic file, dataset, dataset more, or flush aren't supported " + "with this connector\n"); + } + + return 0; + } + + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0) + TEST_ERROR; + + /* Create dataspace */ + if (generate_random_parallel_dimensions(SET_EXTENT_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + if (NULL == (maxdims = HDmalloc(SET_EXTENT_TEST_SPACE_RANK * sizeof(hsize_t)))) { + H5_FAILED(); + HDprintf(" couldn't allocate max dataspace dimension buffer\n"); + TEST_ERROR; + } + + if (NULL == (cdims = HDmalloc(SET_EXTENT_TEST_SPACE_RANK * sizeof(hsize_t)))) { + H5_FAILED(); + HDprintf(" couldn't allocate chunk dimension buffer\n"); + TEST_ERROR; + } + + for (i = 0; i < SET_EXTENT_TEST_SPACE_RANK; i++) { + maxdims[i] = (i == 0) ? dims[i] + (hsize_t)(SET_EXTENT_TEST_NUM_EXTENDS * mpi_size) : dims[i]; + cdims[i] = (dims[i] == 1) ? 1 : dims[i] / 2; + } + + /* Create file dataspace */ + if ((space_id = H5Screate_simple(SET_EXTENT_TEST_SPACE_RANK, dims, maxdims)) < 0) + TEST_ERROR; + + /* Create DCPL */ + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) + TEST_ERROR; + + /* Set chunking */ + if (H5Pset_chunk(dcpl_id, SET_EXTENT_TEST_SPACE_RANK, cdims) < 0) + TEST_ERROR; + + /* Create event stack */ + if ((es_id = H5EScreate()) < 0) + TEST_ERROR; + + /* Create file asynchronously */ + if ((file_id = H5Fcreate_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id, es_id)) < 0) + TEST_ERROR; + + /* Find out if the native connector is used */ + if (H5VLobject_is_native(file_id, &is_native_vol) < 0) + TEST_ERROR; + + /* Create the dataset asynchronously */ + if ((dset_id = H5Dcreate_async(file_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, dcpl_id, + H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Calculate size of data buffers - first dimension is skipped in calculation */ + for (i = 1, data_size = 1; i < SET_EXTENT_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= sizeof(int); + data_size *= SET_EXTENT_TEST_NUM_EXTENDS; + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + TEST_ERROR; + } + + if (NULL == (read_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + TEST_ERROR; + } + + /* Select this rank's portion of the dataspace */ + for (i = 0; i < SET_EXTENT_TEST_SPACE_RANK; i++) { + if (i == 0) { + start[i] = (hsize_t)mpi_rank; + block[i] = 1; + } + else { + start[i] = 0; + block[i] = dims[i]; + } + + stride[i] = 1; + count[i] = 1; + } + + if (H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) < 0) { + H5_FAILED(); + HDprintf(" couldn't select hyperslab for dataset write\n"); + goto error; + } + + /* Setup memory space for write_buf */ + { + hsize_t mdims[] = {data_size / SET_EXTENT_TEST_NUM_EXTENDS / sizeof(int)}; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + goto error; + } + } + + /* Initialize write_buf */ + for (i = 0; i < data_size / sizeof(int); i++) + ((int *)write_buf)[i] = mpi_rank; + + /* Extend the dataset in the first dimension n times, extending by 1 "row" per + * mpi rank involved on each iteration. Each rank will claim one of the new + * "rows" for I/O in an interleaved fashion. */ + for (i = 0; i < SET_EXTENT_TEST_NUM_EXTENDS; i++) { + /* No need to extend on the first iteration */ + if (i) { + /* Extend datapace */ + dims[0] += (hsize_t)mpi_size; + if (H5Sset_extent_simple(space_id, SET_EXTENT_TEST_SPACE_RANK, dims, maxdims) < 0) + TEST_ERROR; + + /* Extend dataset asynchronously */ + if (H5Dset_extent_async(dset_id, dims, es_id) < 0) + TEST_ERROR; + + /* Select hyperslab in file space to match new region */ + for (j = 0; j < SET_EXTENT_TEST_SPACE_RANK; j++) { + if (j == 0) { + start[j] = (hsize_t)mpi_rank; + block[j] = 1; + stride[j] = (hsize_t)mpi_size; + count[j] = i + 1; + } + else { + start[j] = 0; + block[j] = dims[j]; + stride[j] = 1; + count[j] = 1; + } + } + + if (H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) < 0) { + H5_FAILED(); + HDprintf(" couldn't select hyperslab for dataset write\n"); + goto error; + } + + /* Adjust memory dataspace to match as well */ + { + hsize_t mdims[] = {(i + 1) * (data_size / SET_EXTENT_TEST_NUM_EXTENDS / sizeof(int))}; + + if (H5Sset_extent_simple(mspace_id, 1, mdims, NULL) < 0) + TEST_ERROR; + + if (H5Sselect_all(mspace_id) < 0) + TEST_ERROR; + } + } /* end if */ + + /* Get dataset dataspace */ + if ((space_id_out = H5Dget_space_async(dset_id, es_id)) < 0) + TEST_ERROR; + + /* Verify extent is correct */ + if ((tri_ret = H5Sextent_equal(space_id, space_id_out)) < 0) + TEST_ERROR; + if (!tri_ret) + FAIL_PUTS_ERROR(" dataspaces are not equal\n"); + + /* Close output dataspace */ + if (H5Sclose(space_id_out) < 0) + TEST_ERROR; + + /* Write the dataset slice asynchronously */ + if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, write_buf, es_id) < 0) + TEST_ERROR; + } + + /* Flush the dataset asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the write. Skip this + * function because it isn't supported for the native vol in parallel. */ + if (!is_native_vol && H5Oflush_async(dset_id, es_id) < 0) + TEST_ERROR; + + /* Read the entire dataset asynchronously */ + if (H5Dread_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, read_buf, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Verify the read data */ + for (i = 0; i < data_size / sizeof(int); i++) + if (write_buf[i] != read_buf[i]) { + H5_FAILED(); + HDprintf(" data verification failed, expected %d but got %d\n", write_buf[i], read_buf[i]); + goto error; + } /* end if */ + + /* Close dataset asynchronously */ + if (H5Dclose_async(dset_id, es_id) < 0) + TEST_ERROR; + + /* Open dataset asynchronously */ + if ((dset_id = H5Dopen_async(file_id, "dset", H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Get dataset dataspace asynchronously */ + if ((space_id_out = H5Dget_space_async(dset_id, es_id)) < 0) + TEST_ERROR; + + /* Verify the extents match */ + if ((tri_ret = H5Sextent_equal(space_id, space_id_out)) < 0) + TEST_ERROR; + if (!tri_ret) + FAIL_PUTS_ERROR(" dataspaces are not equal\n"); + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + + if (cdims) { + HDfree(cdims); + cdims = NULL; + } + + if (maxdims) { + HDfree(maxdims); + maxdims = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Sclose(mspace_id) < 0) + TEST_ERROR; + if (H5Pclose(dcpl_id) < 0) + TEST_ERROR; + if (H5ESclose(es_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (cdims) + HDfree(cdims); + if (maxdims) + HDfree(maxdims); + if (dims) + HDfree(dims); + H5Sclose(space_id); + H5Sclose(mspace_id); + H5Sclose(space_id_out); + H5Dclose(dset_id); + H5Pclose(dcpl_id); + H5Fclose(file_id); + H5Pclose(fapl_id); + H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed); + H5ESclose(es_id); + } + H5E_END_TRY; + + return 1; +} +#undef SET_EXTENT_TEST_SPACE_RANK +#undef SET_EXTENT_TEST_NUM_EXTENDS + +/* + * Creates an attribute on a dataset. All ranks check to see + * if the attribute exists before and after creating the + * attribute on the dataset. + */ +#define ATTRIBUTE_EXISTS_TEST_SPACE_RANK 2 +static int +test_attribute_exists(void) +{ + hsize_t *dims = NULL; + hbool_t op_failed = false; + hbool_t is_native_vol = false; + size_t num_in_progress; + hbool_t exists1 = false; + hbool_t exists2 = false; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + hid_t es_id = H5I_INVALID_HID; + + TESTING("H5Aexists()"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + if (MAINPROCESS) { + SKIPPED(); + HDprintf(" API functions for basic file, dataset, dataset more, attribute, or flush aren't " + "supported with this connector\n"); + } + + return 0; + } + + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0) + TEST_ERROR; + + /* Create dataspace */ + if (generate_random_parallel_dimensions(ATTRIBUTE_EXISTS_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + /* Create dataspace */ + if ((space_id = H5Screate_simple(ATTRIBUTE_EXISTS_TEST_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + /* Create event stack */ + if ((es_id = H5EScreate()) < 0) + TEST_ERROR; + + /* Open file asynchronously */ + if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDWR, fapl_id, es_id)) < 0) + TEST_ERROR; + + /* Find out if the native connector is used */ + if (H5VLobject_is_native(file_id, &is_native_vol) < 0) + TEST_ERROR; + + /* Create the dataset asynchronously */ + if ((dset_id = H5Dcreate_async(file_id, "attr_exists_dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Check if the attribute exists asynchronously */ + if (H5Aexists_async(dset_id, "attr", &exists1, es_id) < 0) + TEST_ERROR; + + /* Flush the dataset asynchronously. This will effectively work as a + * barrier, guaranteeing the create takes place after the existence check. + * Skip this function because it isn't supported for the native vol in parallel. + */ + if (!is_native_vol && H5Oflush_async(dset_id, es_id) < 0) + TEST_ERROR; + + /* Create the attribute asynchronously */ + if ((attr_id = + H5Acreate_async(dset_id, "attr", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Flush the dataset asynchronously. This will effectively work as a + * barrier, guaranteeing the existence check takes place after the create. + * Skip this function because it isn't supported for the native vol in parallel. + */ + if (!is_native_vol && H5Oflush_async(dset_id, es_id) < 0) + TEST_ERROR; + + /* Check if the attribute exists asynchronously */ + if (H5Aexists_async(dset_id, "attr", &exists2, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Check if H5Aexists returned the correct values */ + if (exists1) + FAIL_PUTS_ERROR(" H5Aexists returned TRUE for an attribute that should not exist") + if (!exists2) + FAIL_PUTS_ERROR(" H5Aexists returned FALSE for an attribute that should exist") + + /* Close */ + if (H5Aclose_async(attr_id, es_id) < 0) + TEST_ERROR; + if (H5Dclose_async(dset_id, es_id) < 0) + TEST_ERROR; + if (H5Fclose_async(file_id, es_id) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Sclose(space_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5ESclose(es_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (dims) + HDfree(dims); + H5Sclose(space_id); + H5Aclose(attr_id); + H5Dclose(dset_id); + H5Pclose(fapl_id); + H5Fclose(file_id); + H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed); + H5ESclose(es_id); + } + H5E_END_TRY; + + return 1; +} +#undef ATTRIBUTE_EXISTS_TEST_SPACE_RANK + +/* + * Creates a file, dataset and attribute. Each rank writes to + * the attribute. Then, each rank reads the attribute and + * verifies the data is correct. + */ +#define ATTRIBUTE_IO_TEST_SPACE_RANK 2 +static int +test_attribute_io(void) +{ + hsize_t *dims = NULL; + hbool_t op_failed = false; + hbool_t is_native_vol = false; + size_t num_in_progress; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + hid_t es_id = H5I_INVALID_HID; + int *write_buf = NULL; + int *read_buf = NULL; + + TESTING("attribute I/O"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + if (MAINPROCESS) { + SKIPPED(); + HDprintf(" API functions for basic file, dataset, dataset more, attribute, or flush aren't " + "supported with this connector\n"); + } + + return 0; + } + + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0) + TEST_ERROR; + + /* Create dataspace */ + if (generate_random_parallel_dimensions(ATTRIBUTE_IO_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + /* Create dataspace */ + if ((space_id = H5Screate_simple(ATTRIBUTE_IO_TEST_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + /* Create event stack */ + if ((es_id = H5EScreate()) < 0) + TEST_ERROR; + + /* Open file asynchronously */ + if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDWR, fapl_id, es_id)) < 0) + TEST_ERROR; + + /* Find out if the native connector is used */ + if (H5VLobject_is_native(file_id, &is_native_vol) < 0) + TEST_ERROR; + + /* Create the dataset asynchronously */ + if ((dset_id = H5Dcreate_async(file_id, "attr_dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Create the attribute asynchronously */ + if ((attr_id = + H5Acreate_async(dset_id, "attr", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Calculate size of data buffers */ + for (i = 0, data_size = 1; i < ATTRIBUTE_IO_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= sizeof(int); + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for attribute write\n"); + TEST_ERROR; + } + + if (NULL == (read_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for attribute read\n"); + TEST_ERROR; + } + + /* Initialize write_buf. */ + for (i = 0; i < data_size / sizeof(int); i++) + write_buf[i] = 10 * (int)i; + + /* Write the attribute asynchronously */ + if (H5Awrite_async(attr_id, H5T_NATIVE_INT, write_buf, es_id) < 0) + TEST_ERROR; + + /* Flush the dataset asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the write. + * Skip this function because it isn't supported for the native vol in parallel. + */ + if (!is_native_vol && H5Oflush_async(dset_id, es_id) < 0) + TEST_ERROR; + + /* Read the attribute asynchronously */ + if (H5Aread_async(attr_id, H5T_NATIVE_INT, read_buf, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Verify the read data */ + for (i = 0; i < data_size / sizeof(int); i++) + if (write_buf[i] != read_buf[i]) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + goto error; + } /* end if */ + + /* Close the attribute asynchronously */ + if (H5Aclose_async(attr_id, es_id) < 0) + TEST_ERROR; + + /* Open the attribute asynchronously */ + if ((attr_id = H5Aopen_async(dset_id, "attr", H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Read the attribute asynchronously */ + if (H5Aread_async(attr_id, H5T_NATIVE_INT, read_buf, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Verify the read data */ + for (i = 0; i < data_size / sizeof(int); i++) + if (write_buf[i] != read_buf[i]) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + goto error; + } /* end if */ + + /* Close out of order to see if it trips things up */ + if (H5Dclose_async(dset_id, es_id) < 0) + TEST_ERROR; + if (H5Aclose_async(attr_id, es_id) < 0) + TEST_ERROR; + if (H5Fclose_async(file_id, es_id) < 0) + TEST_ERROR; + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5ESclose(es_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (dims) + HDfree(dims); + H5Sclose(space_id); + H5Aclose(attr_id); + H5Dclose(dset_id); + H5Pclose(fapl_id); + H5Fclose(file_id); + H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed); + H5ESclose(es_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * Creates a file, dataset and attribute in parallel. Each rank writes to + * the attribute with datatype conversion involved, then reads back the + * attribute and verifies the data is correct. + */ +#define ATTRIBUTE_IO_TCONV_TEST_SPACE_RANK 2 +static int +test_attribute_io_tconv(void) +{ + hsize_t *dims = NULL; + hbool_t op_failed; + size_t num_in_progress; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + hid_t es_id = H5I_INVALID_HID; + int *write_buf = NULL; + int *read_buf = NULL; + + TESTING("attribute I/O with type conversion"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + if (MAINPROCESS) { + SKIPPED(); + HDprintf(" API functions for basic file, attribute, or flush aren't supported with this " + "connector\n"); + } + + return 0; + } + + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0) + TEST_ERROR; + + /* Create dataspace */ + if (generate_random_parallel_dimensions(ATTRIBUTE_IO_TCONV_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + /* Create dataspace */ + if ((space_id = H5Screate_simple(ATTRIBUTE_IO_TCONV_TEST_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + /* Create event stack */ + if ((es_id = H5EScreate()) < 0) + TEST_ERROR; + + /* Open file asynchronously */ + if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDWR, fapl_id, es_id)) < 0) + TEST_ERROR; + + /* Create the attribute asynchronously by name */ + if ((attr_id = H5Acreate_by_name_async(file_id, "attr_dset", "attr_tconv", H5T_STD_U16BE, space_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Calculate size of data buffers */ + for (i = 0, data_size = 1; i < ATTRIBUTE_IO_TCONV_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= sizeof(int); + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for attribute write\n"); + TEST_ERROR; + } + + if (NULL == (read_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for attribute read\n"); + TEST_ERROR; + } + + /* Initialize write_buf. */ + for (i = 0; i < data_size / sizeof(int); i++) + write_buf[i] = 10 * (int)i; + + /* Write the attribute asynchronously */ + if (H5Awrite_async(attr_id, H5T_NATIVE_INT, write_buf, es_id) < 0) + TEST_ERROR; + + /* Flush the dataset asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the write. */ + if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0) + TEST_ERROR; + + /* Read the attribute asynchronously */ + if (H5Aread_async(attr_id, H5T_NATIVE_INT, read_buf, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Verify the read data */ + for (i = 0; i < data_size / sizeof(int); i++) + if (write_buf[i] != read_buf[i]) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + goto error; + } /* end if */ + + /* Close the attribute asynchronously */ + if (H5Aclose_async(attr_id, es_id) < 0) + TEST_ERROR; + + /* Open the attribute asynchronously */ + if ((attr_id = + H5Aopen_by_name_async(file_id, "attr_dset", "attr_tconv", H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Read the attribute asynchronously */ + if (H5Aread_async(attr_id, H5T_NATIVE_INT, read_buf, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Verify the read data */ + for (i = 0; i < data_size / sizeof(int); i++) + if (write_buf[i] != read_buf[i]) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + goto error; + } /* end if */ + + /* Close */ + if (H5Aclose_async(attr_id, es_id) < 0) + TEST_ERROR; + if (H5Fclose_async(file_id, es_id) < 0) + TEST_ERROR; + if (H5Sclose(space_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5ESclose(es_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (dims) + HDfree(dims); + H5Sclose(space_id); + H5Aclose(attr_id); + H5Dclose(dset_id); + H5Pclose(fapl_id); + H5Fclose(file_id); + H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed); + H5ESclose(es_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * Creates a file, dataset and attribute in parallel. Each rank writes to + * the attribute with a compound datatype, then reads back the attribute + * and verifies the data is correct. + */ +typedef struct tattr_cmpd_t { + int a; + int b; +} tattr_cmpd_t; + +#define ATTRIBUTE_IO_COMPOUND_TEST_SPACE_RANK 2 +static int +test_attribute_io_compound(void) +{ + hsize_t *dims = NULL; + hbool_t op_failed; + size_t num_in_progress; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + hid_t mtype_id = H5I_INVALID_HID; + hid_t ftype_id = H5I_INVALID_HID; + hid_t mtypea_id = H5I_INVALID_HID; + hid_t mtypeb_id = H5I_INVALID_HID; + hid_t es_id = H5I_INVALID_HID; + tattr_cmpd_t *write_buf = NULL; + tattr_cmpd_t *read_buf = NULL; + tattr_cmpd_t *fbuf = NULL; + + TESTING("attribute I/O with compound type conversion"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + if (MAINPROCESS) { + SKIPPED(); + HDprintf(" API functions for basic file, dataset, dataset more, attribute, or flush aren't " + "supported with this connector\n"); + } + + return 0; + } + + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0) + TEST_ERROR; + + /* Create dataspace */ + if (generate_random_parallel_dimensions(ATTRIBUTE_IO_COMPOUND_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + /* Create datatype */ + if ((mtype_id = H5Tcreate(H5T_COMPOUND, sizeof(tattr_cmpd_t))) < 0) + TEST_ERROR; + if (H5Tinsert(mtype_id, "a_name", HOFFSET(tattr_cmpd_t, a), H5T_NATIVE_INT) < 0) + TEST_ERROR; + if (H5Tinsert(mtype_id, "b_name", HOFFSET(tattr_cmpd_t, b), H5T_NATIVE_INT) < 0) + TEST_ERROR; + + if ((mtypea_id = H5Tcreate(H5T_COMPOUND, sizeof(tattr_cmpd_t))) < 0) + TEST_ERROR; + if (H5Tinsert(mtypea_id, "a_name", HOFFSET(tattr_cmpd_t, a), H5T_NATIVE_INT) < 0) + TEST_ERROR; + + if ((mtypeb_id = H5Tcreate(H5T_COMPOUND, sizeof(tattr_cmpd_t))) < 0) + TEST_ERROR; + if (H5Tinsert(mtypeb_id, "b_name", HOFFSET(tattr_cmpd_t, b), H5T_NATIVE_INT) < 0) + TEST_ERROR; + + if ((ftype_id = H5Tcreate(H5T_COMPOUND, 2 + 8)) < 0) + TEST_ERROR; + if (H5Tinsert(ftype_id, "a_name", 0, H5T_STD_U16BE) < 0) + TEST_ERROR; + if (H5Tinsert(ftype_id, "b_name", 2, H5T_STD_I64LE) < 0) + TEST_ERROR; + + /* Create dataspace */ + if ((space_id = H5Screate_simple(ATTRIBUTE_IO_COMPOUND_TEST_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + /* Create event stack */ + if ((es_id = H5EScreate()) < 0) + TEST_ERROR; + + /* Open file asynchronously */ + if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDWR, fapl_id, es_id)) < 0) + TEST_ERROR; + + /* Create the attribute asynchronously by name */ + if ((attr_id = H5Acreate_by_name_async(file_id, "attr_dset", "attr_cmpd", ftype_id, space_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Calculate size of data buffers */ + for (i = 0, data_size = 1; i < ATTRIBUTE_IO_COMPOUND_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= sizeof(tattr_cmpd_t); + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for attribute write\n"); + TEST_ERROR; + } + + if (NULL == (read_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for attribute read\n"); + TEST_ERROR; + } + + if (NULL == (fbuf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for attribute read verification\n"); + TEST_ERROR; + } + + /* Initialize write_buf. */ + for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) { + write_buf[i].a = 10 * (int)i; + write_buf[i].b = (10 * (int)i) + 1; + } + + /* Write the attribute asynchronously */ + if (H5Awrite_async(attr_id, mtype_id, write_buf, es_id) < 0) + TEST_ERROR; + + /* Update fbuf */ + for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) { + fbuf[i].a = write_buf[i].a; + fbuf[i].b = write_buf[i].b; + } + + /* Flush the dataset asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the write. */ + if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0) + TEST_ERROR; + + /* Read the attribute asynchronously */ + if (H5Aread_async(attr_id, mtype_id, read_buf, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Verify the read data */ + for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) { + if (read_buf[i].a != fbuf[i].a) { + H5_FAILED(); + HDprintf(" data verification failed for field 'a'\n"); + goto error; + } /* end if */ + if (read_buf[i].b != fbuf[i].b) { + H5_FAILED(); + HDprintf(" data verification failed for field 'b'\n"); + goto error; + } /* end if */ + } + + /* Clear the read buffer */ + for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) { + read_buf[i].a = -2; + read_buf[i].b = -2; + } + + /* Read the attribute asynchronously (element a only) */ + if (H5Aread_async(attr_id, mtypea_id, read_buf, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Verify the read data */ + for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) { + if (read_buf[i].a != fbuf[i].a) { + H5_FAILED(); + HDprintf(" data verification failed for field 'a'\n"); + goto error; + } /* end if */ + if (read_buf[i].b != -2) { + H5_FAILED(); + HDprintf(" data verification failed for field 'b'\n"); + goto error; + } /* end if */ + } + + /* Clear the read buffer */ + for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) { + read_buf[i].a = -2; + read_buf[i].b = -2; + } + + /* Read the attribute asynchronously (element b only) */ + if (H5Aread_async(attr_id, mtypeb_id, read_buf, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Verify the read data */ + for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) { + if (read_buf[i].a != -2) { + H5_FAILED(); + HDprintf(" data verification failed for field 'a'\n"); + goto error; + } /* end if */ + if (read_buf[i].b != fbuf[i].b) { + H5_FAILED(); + HDprintf(" data verification failed for field 'b'\n"); + goto error; + } /* end if */ + } + + if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) { + H5_FAILED(); + HDprintf(" MPI_Barrier failed\n"); + goto error; + } + + /* Update write_buf */ + for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) { + write_buf[i].a += 2 * 6 * 10; + write_buf[i].b += 2 * 6 * 10; + } + + /* Write the attribute asynchronously (element a only) */ + if (H5Awrite_async(attr_id, mtypea_id, write_buf, es_id) < 0) + TEST_ERROR; + + /* Update fbuf */ + for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) { + fbuf[i].a = write_buf[i].a; + } + + /* Flush the dataset asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the write. */ + if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0) + TEST_ERROR; + + /* Clear the read buffer */ + for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) { + read_buf[i].a = -2; + read_buf[i].b = -2; + } + + /* Read the attribute asynchronously */ + if (H5Aread_async(attr_id, mtype_id, read_buf, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Verify the read data */ + for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) { + if (read_buf[i].a != fbuf[i].a) { + H5_FAILED(); + HDprintf(" data verification failed for field 'a'\n"); + goto error; + } /* end if */ + if (read_buf[i].b != fbuf[i].b) { + H5_FAILED(); + HDprintf(" data verification failed for field 'b'\n"); + goto error; + } /* end if */ + } + + if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) { + H5_FAILED(); + HDprintf(" MPI_Barrier failed\n"); + goto error; + } + + /* Update write_buf */ + for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) { + write_buf[i].a += 2 * 6 * 10; + write_buf[i].b += 2 * 6 * 10; + } + + /* Write the attribute asynchronously (element b only) */ + if (H5Awrite_async(attr_id, mtypeb_id, write_buf, es_id) < 0) + TEST_ERROR; + + /* Update fbuf */ + for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) { + fbuf[i].b = write_buf[i].b; + } + + /* Flush the dataset asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the write. */ + if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0) + TEST_ERROR; + + /* Clear the read buffer */ + for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) { + read_buf[i].a = -2; + read_buf[i].b = -2; + } + + /* Read the attribute asynchronously */ + if (H5Aread_async(attr_id, mtype_id, read_buf, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Verify the read data */ + for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) { + if (read_buf[i].a != fbuf[i].a) { + H5_FAILED(); + HDprintf(" data verification failed for field 'a'\n"); + goto error; + } /* end if */ + if (read_buf[i].b != fbuf[i].b) { + H5_FAILED(); + HDprintf(" data verification failed for field 'b'\n"); + goto error; + } /* end if */ + } + + /* Close */ + if (H5Aclose_async(attr_id, es_id) < 0) + TEST_ERROR; + if (H5Fclose_async(file_id, es_id) < 0) + TEST_ERROR; + if (H5Sclose(space_id) < 0) + TEST_ERROR; + if (H5Tclose(mtype_id) < 0) + TEST_ERROR; + if (H5Tclose(ftype_id) < 0) + TEST_ERROR; + if (H5Tclose(mtypea_id) < 0) + TEST_ERROR; + if (H5Tclose(mtypeb_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + + if (fbuf) { + HDfree(fbuf); + fbuf = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5ESclose(es_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (fbuf) + HDfree(fbuf); + if (dims) + HDfree(dims); + H5Sclose(space_id); + H5Tclose(mtype_id); + H5Tclose(ftype_id); + H5Tclose(mtypea_id); + H5Tclose(mtypeb_id); + H5Aclose(attr_id); + H5Pclose(fapl_id); + H5Fclose(file_id); + H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed); + H5ESclose(es_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * Tests async group interfaces in parallel + */ +static int +test_group(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t parent_group_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t subgroup_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + hid_t es_id = H5I_INVALID_HID; + H5G_info_t info1; + H5G_info_t info2; + H5G_info_t info3; + size_t num_in_progress; + hbool_t op_failed; + + TESTING("group operations"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_MORE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + if (MAINPROCESS) { + SKIPPED(); + HDprintf(" API functions for basic file, group, group more, creation order, or flush aren't " + "supported with this connector\n"); + } + + return 0; + } + + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0) + TEST_ERROR; + + /* Create GCPL */ + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) + TEST_ERROR; + + /* Track creation order */ + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) + TEST_ERROR; + + /* Create event stack */ + if ((es_id = H5EScreate()) < 0) + TEST_ERROR; + + /* Open file asynchronously */ + if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDWR, fapl_id, es_id)) < 0) + TEST_ERROR; + + /* Create the parent group asynchronously */ + if ((parent_group_id = + H5Gcreate_async(file_id, "group_parent", H5P_DEFAULT, gcpl_id, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Create 3 subgroups asynchronously, the first with no sub-subgroups, the + * second with 1, and the third with 2 */ + if ((group_id = + H5Gcreate_async(parent_group_id, "group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + if (H5Gclose_async(group_id, es_id) < 0) + TEST_ERROR; + + if ((group_id = + H5Gcreate_async(parent_group_id, "group2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + if ((subgroup_id = H5Gcreate_async(group_id, "subgroup1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < + 0) + TEST_ERROR; + if (H5Gclose_async(subgroup_id, es_id) < 0) + TEST_ERROR; + if (H5Gclose_async(group_id, es_id) < 0) + TEST_ERROR; + + if ((group_id = + H5Gcreate_async(parent_group_id, "group3", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + if ((subgroup_id = H5Gcreate_async(group_id, "subgroup1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < + 0) + TEST_ERROR; + if (H5Gclose_async(subgroup_id, es_id) < 0) + TEST_ERROR; + if ((subgroup_id = H5Gcreate_async(group_id, "subgroup2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < + 0) + TEST_ERROR; + if (H5Gclose_async(subgroup_id, es_id) < 0) + TEST_ERROR; + if (H5Gclose_async(group_id, es_id) < 0) + TEST_ERROR; + + /* Flush the file asynchronously. This will effectively work as a barrier, + * guaranteeing the read takes place after the write. */ + if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0) + TEST_ERROR; + + /* Test H5Gget_info_async */ + /* Open group1 asynchronously */ + if ((group_id = H5Gopen_async(parent_group_id, "group1", H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Get info */ + if (H5Gget_info_async(group_id, &info1, es_id) < 0) + TEST_ERROR; + + /* Test H5Gget_info_by_idx_async */ + if (H5Gget_info_by_idx_async(parent_group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, &info2, + H5P_DEFAULT, es_id) < 0) + TEST_ERROR; + + /* Test H5Gget_info_by_name_async */ + if (H5Gget_info_by_name_async(parent_group_id, "group3", &info3, H5P_DEFAULT, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Verify group infos */ + if (info1.nlinks != 0) + FAIL_PUTS_ERROR(" incorrect number of links") + if (info2.nlinks != 1) + FAIL_PUTS_ERROR(" incorrect number of links") + if (info3.nlinks != 2) + FAIL_PUTS_ERROR(" incorrect number of links") + + /* Close */ + if (H5Gclose_async(group_id, es_id) < 0) + TEST_ERROR; + if (H5Gclose_async(parent_group_id, es_id) < 0) + TEST_ERROR; + if (H5Fclose_async(file_id, es_id) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (H5ESclose(es_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(subgroup_id); + H5Gclose(group_id); + H5Gclose(parent_group_id); + H5Fclose(file_id); + H5Pclose(fapl_id); + H5Pclose(gcpl_id); + H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed); + H5ESclose(es_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * Tests async link interfaces in parallel + */ +static int +test_link(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t parent_group_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t gcpl_id = H5I_INVALID_HID; + hid_t es_id = H5I_INVALID_HID; + hbool_t existsh1; + hbool_t existsh2; + hbool_t existsh3; + hbool_t existss1; + hbool_t existss2; + hbool_t existss3; + size_t num_in_progress; + hbool_t op_failed = false; + hbool_t is_native_vol = false; + + TESTING("link operations"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + if (MAINPROCESS) { + SKIPPED(); + HDprintf(" API functions for basic file, link, hard link, soft link, flush, or creation order " + "aren't supported with this connector\n"); + } + + return 0; + } + + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0) + TEST_ERROR; + + /* Create GCPL */ + if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) + TEST_ERROR; + + /* Track creation order */ + if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) + TEST_ERROR; + + /* Create event stack */ + if ((es_id = H5EScreate()) < 0) + TEST_ERROR; + + /* Open file asynchronously */ + if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDWR, fapl_id, es_id)) < 0) + TEST_ERROR; + + /* Find out if the native connector is used */ + if (H5VLobject_is_native(file_id, &is_native_vol) < 0) + TEST_ERROR; + + /* Create the parent group asynchronously */ + if ((parent_group_id = + H5Gcreate_async(file_id, "link_parent", H5P_DEFAULT, gcpl_id, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Create subgroup asynchronously. */ + if ((group_id = H5Gcreate_async(parent_group_id, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < + 0) + TEST_ERROR; + if (H5Gclose_async(group_id, es_id) < 0) + TEST_ERROR; + + /* Flush the parent group asynchronously. This will effectively work as a + * barrier, guaranteeing the link to the subgroup is visible to later tasks. + * Skip this function for the native vol because it isn't supported in parallel. + */ + if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0) + TEST_ERROR; + + /* Create hard link asynchronously */ + if (H5Lcreate_hard_async(parent_group_id, "group", parent_group_id, "hard_link", H5P_DEFAULT, H5P_DEFAULT, + es_id) < 0) + TEST_ERROR; + + /* Flush the parent group asynchronously. This will effectively work as a + * barrier, guaranteeing the soft link create takes place after the hard + * link create. Skip this function for the native vol because it isn't supported in parallel. + */ + if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0) + TEST_ERROR; + + /* Create soft link asynchronously */ + if (H5Lcreate_soft_async("/link_parent/group", parent_group_id, "soft_link", H5P_DEFAULT, H5P_DEFAULT, + es_id) < 0) + TEST_ERROR; + + /* Flush the parent group asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the writes. + * Skip this function for the native vol because it isn't supported in parallel. + */ + if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) { + H5_FAILED(); + HDprintf(" MPI_Barrier failed\n"); + goto error; + } + + /* Check if hard link exists */ + if (H5Lexists_async(parent_group_id, "hard_link", &existsh1, H5P_DEFAULT, es_id) < 0) + TEST_ERROR; + + /* Check if soft link exists */ + if (H5Lexists_async(parent_group_id, "soft_link", &existss1, H5P_DEFAULT, es_id) < 0) + TEST_ERROR; + + /* Flush the parent group asynchronously. This will effectively work as a + * barrier, guaranteeing the delete takes place after the reads. + * Skip this function for the native vol because it isn't supported in parallel. + */ + if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0) + TEST_ERROR; + + /* Delete soft link by index */ + if (H5Ldelete_by_idx_async(parent_group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, H5P_DEFAULT, es_id) < + 0) + TEST_ERROR; + + /* Flush the parent group asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the delete. + * Skip this function for the native vol because it isn't supported in parallel. + */ + if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) { + H5_FAILED(); + HDprintf(" MPI_Barrier failed\n"); + goto error; + } + + /* Check if hard link exists */ + if (H5Lexists_async(parent_group_id, "hard_link", &existsh2, H5P_DEFAULT, es_id) < 0) + TEST_ERROR; + + /* Check if soft link exists */ + if (H5Lexists_async(parent_group_id, "soft_link", &existss2, H5P_DEFAULT, es_id) < 0) + TEST_ERROR; + + /* Flush the parent group asynchronously. This will effectively work as a + * barrier, guaranteeing the delete takes place after the reads. + * Skip this function for the native vol because it isn't supported in parallel. + */ + if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0) + TEST_ERROR; + + /* Delete hard link */ + if (H5Ldelete_async(parent_group_id, "hard_link", H5P_DEFAULT, es_id) < 0) + TEST_ERROR; + + /* Flush the parent group asynchronously. This will effectively work as a + * barrier, guaranteeing the read takes place after the delete. + * Skip this function for the native vol because it isn't supported in parallel. + */ + if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) { + H5_FAILED(); + HDprintf(" MPI_Barrier failed\n"); + goto error; + } + + /* Check if hard link exists */ + if (H5Lexists_async(parent_group_id, "hard_link", &existsh3, H5P_DEFAULT, es_id) < 0) + TEST_ERROR; + + /* Check if soft link exists */ + if (H5Lexists_async(parent_group_id, "soft_link", &existss3, H5P_DEFAULT, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Check if existence returns were correct */ + if (!existsh1) + FAIL_PUTS_ERROR(" link exists returned FALSE for link that should exist") + if (!existss1) + FAIL_PUTS_ERROR(" link exists returned FALSE for link that should exist") + if (!existsh2) + FAIL_PUTS_ERROR(" link exists returned FALSE for link that should exist") + if (existss2) + FAIL_PUTS_ERROR(" link exists returned TRUE for link that should not exist") + if (existsh3) + FAIL_PUTS_ERROR(" link exists returned TRUE for link that should not exist") + if (existsh3) + FAIL_PUTS_ERROR(" link exists returned TRUE for link that should not exist") + + /* Close */ + if (H5Gclose_async(parent_group_id, es_id) < 0) + TEST_ERROR; + if (H5Fclose_async(file_id, es_id) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Pclose(gcpl_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (H5ESclose(es_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Gclose(group_id); + H5Gclose(parent_group_id); + H5Fclose(file_id); + H5Pclose(fapl_id); + H5Pclose(gcpl_id); + H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed); + H5ESclose(es_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * Tests H5Ocopy_async and H5Orefresh_async in parallel + */ +#define OCOPY_REFRESH_TEST_SPACE_RANK 2 +static int +test_ocopy_orefresh(void) +{ + hsize_t *dims = NULL; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t parent_group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + hid_t es_id = H5I_INVALID_HID; + size_t num_in_progress; + hbool_t op_failed = false; + hbool_t is_native_vol = false; + + TESTING("H5Ocopy() and H5Orefresh()"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { + if (MAINPROCESS) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, object more, flush, or refresh " + "aren't supported with this connector\n"); + } + + return 0; + } + + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0) + TEST_ERROR; + + /* Create dataspace */ + if (generate_random_parallel_dimensions(OCOPY_REFRESH_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + /* Create dataspace */ + if ((space_id = H5Screate_simple(OCOPY_REFRESH_TEST_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + /* Create event stack */ + if ((es_id = H5EScreate()) < 0) + TEST_ERROR; + + /* Open file asynchronously */ + if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDWR, fapl_id, es_id)) < 0) + TEST_ERROR; + + /* Find out if the native connector is used */ + if (H5VLobject_is_native(file_id, &is_native_vol) < 0) + TEST_ERROR; + + /* Create the parent group asynchronously */ + if ((parent_group_id = + H5Gcreate_async(file_id, "ocopy_parent", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Create dataset asynchronously. */ + if ((dset_id = H5Dcreate_async(parent_group_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + if (H5Dclose_async(dset_id, es_id) < 0) + TEST_ERROR; + + /* Flush the parent group asynchronously. This will effectively work as a + * barrier, guaranteeing the copy takes place after dataset create. + * Skip this function for the native vol because it isn't supported in parallel. + */ + if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0) + TEST_ERROR; + + /* Copy dataset */ + if (H5Ocopy_async(parent_group_id, "dset", parent_group_id, "copied_dset", H5P_DEFAULT, H5P_DEFAULT, + es_id) < 0) + TEST_ERROR; + + /* Flush the parent group asynchronously. This will effectively work as a + * barrier, guaranteeing the dataset open takes place copy. + * Skip this function for the native vol because it isn't supported in parallel. + */ + if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0) + TEST_ERROR; + + if (!coll_metadata_read) { + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) { + H5_FAILED(); + HDprintf(" MPI_Barrier failed\n"); + goto error; + } + } + + /* Open the copied dataset asynchronously */ + if ((dset_id = H5Dopen_async(parent_group_id, "copied_dset", H5P_DEFAULT, es_id)) < 0) + TEST_ERROR; + + /* Refresh the copied dataset asynchronously */ + if (H5Orefresh(dset_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Close */ + if (H5Dclose_async(dset_id, es_id) < 0) + TEST_ERROR; + if (H5Gclose_async(parent_group_id, es_id) < 0) + TEST_ERROR; + if (H5Fclose_async(file_id, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5ESclose(es_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (dims) + HDfree(dims); + H5Sclose(space_id); + H5Dclose(dset_id); + H5Gclose(parent_group_id); + H5Pclose(fapl_id); + H5Fclose(file_id); + H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed); + H5ESclose(es_id); + } + H5E_END_TRY; + + return 1; +} +#undef OCOPY_REFRESH_TEST_SPACE_RANK + +/* + * Tests H5Freopen_async in parallel + */ +static int +test_file_reopen(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t reopened_file_id = H5I_INVALID_HID; + hid_t es_id = H5I_INVALID_HID; + size_t num_in_progress; + hbool_t op_failed; + + TESTING("H5Freopen()"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE)) { + if (MAINPROCESS) { + SKIPPED(); + HDprintf(" API functions for basic file or file more aren't supported with this connector\n"); + } + + return 0; + } + + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0) + TEST_ERROR; + + /* Create event stack */ + if ((es_id = H5EScreate()) < 0) + TEST_ERROR; + + /* Open file asynchronously */ + if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDWR, fapl_id, es_id)) < 0) + TEST_ERROR; + + /* Reopen file asynchronously */ + if ((reopened_file_id = H5Freopen_async(file_id, es_id)) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + /* Close */ + if (H5Fclose_async(reopened_file_id, es_id) < 0) + TEST_ERROR; + if (H5Fclose_async(file_id, es_id) < 0) + TEST_ERROR; + + /* Wait for the event stack to complete */ + if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0) + TEST_ERROR; + if (op_failed) + TEST_ERROR; + + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5ESclose(es_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Fclose(reopened_file_id); + H5Fclose(file_id); + H5Pclose(fapl_id); + H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed); + H5ESclose(es_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * Cleanup temporary test files + */ +static void +cleanup_files(void) +{ + char file_name[64]; + int i; + + if (MAINPROCESS) { + H5Fdelete(PAR_ASYNC_API_TEST_FILE, H5P_DEFAULT); + for (i = 0; i <= max_printf_file; i++) { + snprintf(file_name, 64, PAR_ASYNC_API_TEST_FILE_PRINTF, i); + H5Fdelete(file_name, H5P_DEFAULT); + } /* end for */ + } +} + +int +H5_api_async_test_parallel(void) +{ + size_t i; + int nerrors; + + if (MAINPROCESS) { + HDprintf("**********************************************\n"); + HDprintf("* *\n"); + HDprintf("* API Parallel Async Tests *\n"); + HDprintf("* *\n"); + HDprintf("**********************************************\n\n"); + } + + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_ASYNC)) { + if (MAINPROCESS) { + SKIPPED(); + HDprintf(" Async APIs aren't supported with this connector\n"); + } + + return 0; + } + + for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_async_tests); i++) { + nerrors += (*par_async_tests[i])() ? 1 : 0; + + if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) { + if (MAINPROCESS) + HDprintf(" MPI_Barrier() failed!\n"); + } + } + + if (MAINPROCESS) { + HDprintf("\n"); + HDprintf("Cleaning up testing files\n"); + } + + cleanup_files(); + + if (MAINPROCESS) { + HDprintf("\n * Re-testing with independent metadata reads *\n"); + } + + coll_metadata_read = FALSE; + + for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_async_tests); i++) { + nerrors += (*par_async_tests[i])() ? 1 : 0; + + if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) { + if (MAINPROCESS) + HDprintf(" MPI_Barrier() failed!\n"); + } + } + + if (MAINPROCESS) { + HDprintf("\n"); + HDprintf("Cleaning up testing files\n"); + } + + cleanup_files(); + + return nerrors; +} + +#else /* H5ESpublic_H */ + +int +H5_api_async_test_parallel(void) +{ + if (MAINPROCESS) { + HDprintf("**********************************************\n"); + HDprintf("* *\n"); + HDprintf("* API Parallel Async Tests *\n"); + HDprintf("* *\n"); + HDprintf("**********************************************\n\n"); + } + + HDprintf("SKIPPED due to no async support in HDF5 library\n"); + + return 0; +} + +#endif diff --git a/testpar/API/H5_api_async_test_parallel.h b/testpar/API/H5_api_async_test_parallel.h new file mode 100644 index 00000000000..9e4340c48a9 --- /dev/null +++ b/testpar/API/H5_api_async_test_parallel.h @@ -0,0 +1,29 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#ifndef H5_API_ASYNC_TEST_PARALLEL_H_ +#define H5_API_ASYNC_TEST_PARALLEL_H_ + +#include "H5_api_test_parallel.h" + +int H5_api_async_test_parallel(void); + +/******************************************************** + * * + * API parallel async test defines * + * * + ********************************************************/ + +#define PAR_ASYNC_API_TEST_FILE "H5_api_async_test_parallel.h5" +#define PAR_ASYNC_API_TEST_FILE_PRINTF "H5_api_async_test_parallel_%d.h5" + +#endif /* H5_API_ASYNC_TEST_PARALLEL_H_ */ diff --git a/testpar/API/H5_api_attribute_test_parallel.c b/testpar/API/H5_api_attribute_test_parallel.c new file mode 100644 index 00000000000..cffbfcd8187 --- /dev/null +++ b/testpar/API/H5_api_attribute_test_parallel.c @@ -0,0 +1,47 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#include "H5_api_attribute_test_parallel.h" + +/* + * The array of parallel attribute tests to be performed. + */ +static int (*par_attribute_tests[])(void) = {NULL}; + +int +H5_api_attribute_test_parallel(void) +{ + size_t i; + int nerrors; + + if (MAINPROCESS) { + HDprintf("**********************************************\n"); + HDprintf("* *\n"); + HDprintf("* API Parallel Attribute Tests *\n"); + HDprintf("* *\n"); + HDprintf("**********************************************\n\n"); + } + + for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_attribute_tests); i++) { + /* nerrors += (*par_attribute_tests[i])() ? 1 : 0; */ + + if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) { + if (MAINPROCESS) + HDprintf(" MPI_Barrier() failed!\n"); + } + } + + if (MAINPROCESS) + HDprintf("\n"); + + return nerrors; +} diff --git a/testpar/API/H5_api_attribute_test_parallel.h b/testpar/API/H5_api_attribute_test_parallel.h new file mode 100644 index 00000000000..81802ae8633 --- /dev/null +++ b/testpar/API/H5_api_attribute_test_parallel.h @@ -0,0 +1,20 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#ifndef H5_API_ATTRIBUTE_TEST_PARALLEL_H_ +#define H5_API_ATTRIBUTE_TEST_PARALLEL_H_ + +#include "H5_api_test_parallel.h" + +int H5_api_attribute_test_parallel(void); + +#endif /* H5_API_ATTRIBUTE_TEST_PARALLEL_H_ */ diff --git a/testpar/API/H5_api_dataset_test_parallel.c b/testpar/API/H5_api_dataset_test_parallel.c new file mode 100644 index 00000000000..fd02a7ffbd3 --- /dev/null +++ b/testpar/API/H5_api_dataset_test_parallel.c @@ -0,0 +1,8149 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * XXX: Better documentation for each test about how the selections get + * split up among MPI ranks. + */ +#include "H5_api_dataset_test_parallel.h" + +static int test_write_dataset_data_verification(void); +static int test_write_dataset_independent(void); +static int test_write_dataset_one_proc_0_selection(void); +static int test_write_dataset_one_proc_none_selection(void); +static int test_write_dataset_one_proc_all_selection(void); +static int test_write_dataset_hyper_file_all_mem(void); +static int test_write_dataset_all_file_hyper_mem(void); +static int test_write_dataset_point_file_all_mem(void); +static int test_write_dataset_all_file_point_mem(void); +static int test_write_dataset_hyper_file_point_mem(void); +static int test_write_dataset_point_file_hyper_mem(void); +static int test_read_dataset_one_proc_0_selection(void); +static int test_read_dataset_one_proc_none_selection(void); +static int test_read_dataset_one_proc_all_selection(void); +static int test_read_dataset_hyper_file_all_mem(void); +static int test_read_dataset_all_file_hyper_mem(void); +static int test_read_dataset_point_file_all_mem(void); +static int test_read_dataset_all_file_point_mem(void); +static int test_read_dataset_hyper_file_point_mem(void); +static int test_read_dataset_point_file_hyper_mem(void); + +/* + * Chunking tests + */ +static int test_write_multi_chunk_dataset_same_shape_read(void); +static int test_write_multi_chunk_dataset_diff_shape_read(void); +static int test_overwrite_multi_chunk_dataset_same_shape_read(void); +static int test_overwrite_multi_chunk_dataset_diff_shape_read(void); + +/* + * The array of parallel dataset tests to be performed. + */ +static int (*par_dataset_tests[])(void) = { + test_write_dataset_data_verification, + test_write_dataset_independent, + test_write_dataset_one_proc_0_selection, + test_write_dataset_one_proc_none_selection, + test_write_dataset_one_proc_all_selection, + test_write_dataset_hyper_file_all_mem, + test_write_dataset_all_file_hyper_mem, + test_write_dataset_point_file_all_mem, + test_write_dataset_all_file_point_mem, + test_write_dataset_hyper_file_point_mem, + test_write_dataset_point_file_hyper_mem, + test_read_dataset_one_proc_0_selection, + test_read_dataset_one_proc_none_selection, + test_read_dataset_one_proc_all_selection, + test_read_dataset_hyper_file_all_mem, + test_read_dataset_all_file_hyper_mem, + test_read_dataset_point_file_all_mem, + test_read_dataset_all_file_point_mem, + test_read_dataset_hyper_file_point_mem, + test_read_dataset_point_file_hyper_mem, + test_write_multi_chunk_dataset_same_shape_read, + test_write_multi_chunk_dataset_diff_shape_read, + test_overwrite_multi_chunk_dataset_same_shape_read, + test_overwrite_multi_chunk_dataset_diff_shape_read, +}; + +/* + * A test to ensure that data is read back correctly from + * a dataset after it has been written in parallel. The test + * covers simple examples of using H5S_ALL selections, + * hyperslab selections and point selections. + */ +#define DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK 3 +#define DATASET_WRITE_DATA_VERIFY_TEST_NUM_POINTS 10 +#define DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE sizeof(int) +#define DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME "dataset_write_data_verification_test" +#define DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME1 "dataset_write_data_verification_all" +#define DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2 "dataset_write_data_verification_hyperslab" +#define DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3 "dataset_write_data_verification_points" +static int +test_write_dataset_data_verification(void) +{ + hssize_t space_npoints; + hsize_t *dims = NULL; + hsize_t start[DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK]; + hsize_t stride[DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK]; + hsize_t count[DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK]; + hsize_t block[DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK]; + hsize_t *points = NULL; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *write_buf = NULL; + void *read_buf = NULL; + + TESTING_MULTIPART("verification of dataset data using H5Dwrite then H5Dread"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) + TEST_ERROR; + + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME); + goto error; + } + + if (generate_random_parallel_dimensions(DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + if ((fspace_id = H5Screate_simple(DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME1, + DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME1); + goto error; + } + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + + if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2, + DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2); + goto error; + } + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + + if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3, + DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3); + goto error; + } + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Dwrite_all_read) + { + hbool_t op_failed = FALSE; + + TESTING_2("H5Dwrite using H5S_ALL then H5Dread"); + + if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME1, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME1); + PART_ERROR(H5Dwrite_all_read); + } + + /* + * Write data to dataset on rank 0 only. All ranks will read the data back. + */ + if (MAINPROCESS) { + for (i = 0, data_size = 1; i < DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE; + + if (NULL != (write_buf = HDmalloc(data_size))) { + for (i = 0; i < data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE; i++) + ((int *)write_buf)[i] = (int)i; + + if (H5Dwrite(dset_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, + H5P_DEFAULT, write_buf) < 0) + op_failed = TRUE; + } + else + op_failed = TRUE; + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + } + + if (MPI_SUCCESS != + MPI_Allreduce(MPI_IN_PLACE, &op_failed, 1, MPI_C_BOOL, MPI_LAND, MPI_COMM_WORLD)) { + H5_FAILED(); + HDprintf(" couldn't determine if dataset write on rank 0 succeeded\n"); + PART_ERROR(H5Dwrite_all_read); + } + + if (op_failed == TRUE) { + H5_FAILED(); + HDprintf(" dataset write on rank 0 failed!\n"); + PART_ERROR(H5Dwrite_all_read); + } + + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + /* + * Close and re-open the file to ensure that the data gets written. + */ + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close test's container group\n"); + PART_ERROR(H5Dwrite_all_read); + } + if (H5Gclose(container_group) < 0) { + H5_FAILED(); + HDprintf(" failed to close container group\n"); + PART_ERROR(H5Dwrite_all_read); + } + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close file for data flushing\n"); + PART_ERROR(H5Dwrite_all_read); + } + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename); + PART_ERROR(H5Dwrite_all_read); + } + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + PART_ERROR(H5Dwrite_all_read); + } + if ((group_id = + H5Gopen2(container_group, DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container sub-group '%s'\n", + DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME); + PART_ERROR(H5Dwrite_all_read); + } + + if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME1, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME1); + PART_ERROR(H5Dwrite_all_read); + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + PART_ERROR(H5Dwrite_all_read); + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + PART_ERROR(H5Dwrite_all_read); + } + + if (NULL == + (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + PART_ERROR(H5Dwrite_all_read); + } + + if (H5Dread(dset_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, + read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME1); + PART_ERROR(H5Dwrite_all_read); + } + + for (i = 0; i < (hsize_t)space_npoints; i++) + if (((int *)read_buf)[i] != (int)i) { + H5_FAILED(); + HDprintf(" H5S_ALL selection data verification failed\n"); + PART_ERROR(H5Dwrite_all_read); + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + PASSED(); + } + PART_END(H5Dwrite_all_read); + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + PART_BEGIN(H5Dwrite_hyperslab_read) + { + TESTING_2("H5Dwrite using hyperslab selection then H5Dread"); + + for (i = 1, data_size = 1; i < DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE; + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + for (i = 0; i < data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE; i++) + ((int *)write_buf)[i] = mpi_rank; + + /* Each MPI rank writes to a single row in the second dimension + * and the entirety of the following dimensions. The combined + * selections from all MPI ranks spans the first dimension. + */ + for (i = 0; i < DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK; i++) { + if (i == 0) { + start[i] = (hsize_t)mpi_rank; + block[i] = 1; + } + else { + start[i] = 0; + block[i] = dims[i]; + } + + stride[i] = 1; + count[i] = 1; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) { + H5_FAILED(); + HDprintf(" couldn't select hyperslab for dataset write\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + { + hsize_t mdims[] = {data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE}; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + } + + if (H5Dwrite(dset_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, mspace_id, fspace_id, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + if (mspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(mspace_id); + } + H5E_END_TRY; + mspace_id = H5I_INVALID_HID; + } + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + /* + * Close and re-open the file to ensure that the data gets written. + */ + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close test's container group\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + if (H5Gclose(container_group) < 0) { + H5_FAILED(); + HDprintf(" failed to close container group\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close file for data flushing\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename); + PART_ERROR(H5Dwrite_hyperslab_read); + } + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + PART_ERROR(H5Dwrite_hyperslab_read); + } + if ((group_id = + H5Gopen2(container_group, DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container sub-group '%s'\n", + DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + if (NULL == + (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + if (H5Dread(dset_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, + read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2); + PART_ERROR(H5Dwrite_hyperslab_read); + } + + for (i = 0; i < (size_t)mpi_size; i++) { + size_t j; + + for (j = 0; j < data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE; j++) { + if (((int *) + read_buf)[j + (i * (data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE))] != + (int)i) { + H5_FAILED(); + HDprintf(" hyperslab selection data verification failed\n"); + PART_ERROR(H5Dwrite_hyperslab_read); + } + } + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + PASSED(); + } + PART_END(H5Dwrite_hyperslab_read); + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (mspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(mspace_id); + } + H5E_END_TRY; + mspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + PART_BEGIN(H5Dwrite_point_sel_read) + { + TESTING_2("H5Dwrite using point selection then H5Dread"); + + for (i = 1, data_size = 1; i < DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE; + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + + /* Use different data than the previous test to ensure that the data actually changed. */ + for (i = 0; i < data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE; i++) + ((int *)write_buf)[i] = mpi_size - mpi_rank; + + if (NULL == (points = HDmalloc(DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK * + (data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE) * + sizeof(hsize_t)))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for point selection\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + + /* Each MPI rank writes to a single row in the second dimension + * and the entirety of the following dimensions. The combined + * selections from all MPI ranks spans the first dimension. + */ + for (i = 0; i < data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE; i++) { + size_t j; + + for (j = 0; j < DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK; j++) { + size_t idx = (i * DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK) + j; + + if (j == 0) + points[idx] = (hsize_t)mpi_rank; + else if (j != DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK - 1) + points[idx] = i / dims[j + 1]; + else + points[idx] = i % dims[j]; + } + } + + if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3); + PART_ERROR(H5Dwrite_point_sel_read); + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + + if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, + data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE, points) < 0) { + H5_FAILED(); + HDprintf(" couldn't select elements in dataspace\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + + { + hsize_t mdims[] = {data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE}; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + } + + if (H5Dwrite(dset_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, mspace_id, fspace_id, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3); + PART_ERROR(H5Dwrite_point_sel_read); + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + if (mspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(mspace_id); + } + H5E_END_TRY; + mspace_id = H5I_INVALID_HID; + } + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + /* + * Close and re-open the file to ensure that the data gets written. + */ + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close test's container group\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + if (H5Gclose(container_group) < 0) { + H5_FAILED(); + HDprintf(" failed to close container group\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close file for data flushing\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename); + PART_ERROR(H5Dwrite_point_sel_read); + } + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + PART_ERROR(H5Dwrite_point_sel_read); + } + if ((group_id = + H5Gopen2(container_group, DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container sub-group '%s'\n", + DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME); + PART_ERROR(H5Dwrite_point_sel_read); + } + + if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3); + PART_ERROR(H5Dwrite_point_sel_read); + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + + if (NULL == + (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + + if (H5Dread(dset_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, + read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3); + PART_ERROR(H5Dwrite_point_sel_read); + } + + for (i = 0; i < (size_t)mpi_size; i++) { + size_t j; + + for (j = 0; j < data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE; j++) { + if (((int *) + read_buf)[j + (i * (data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE))] != + (mpi_size - (int)i)) { + H5_FAILED(); + HDprintf(" point selection data verification failed\n"); + PART_ERROR(H5Dwrite_point_sel_read); + } + } + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + PASSED(); + } + PART_END(H5Dwrite_point_sel_read); + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + if (points) { + HDfree(points); + points = NULL; + } + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (mspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(mspace_id); + } + H5E_END_TRY; + mspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + + if (points) { + HDfree(points); + points = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (points) + HDfree(points); + if (dims) + HDfree(dims); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Pclose(fapl_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to ensure that independent dataset writes function + * as expected. First, two datasets are created in the file. + * Then, the even MPI ranks first write to dataset 1, followed + * by dataset 2. The odd MPI ranks first write to dataset 2, + * followed by dataset 1. After this, the data is read back from + * each dataset and verified. + */ +#define DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK 3 +#define DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE sizeof(int) +#define DATASET_INDEPENDENT_WRITE_TEST_GROUP_NAME "independent_dataset_write_test" +#define DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME1 "dset1" +#define DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME2 "dset2" +static int +test_write_dataset_independent(void) +{ + hssize_t space_npoints; + hsize_t *dims = NULL; + hsize_t start[DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK]; + hsize_t stride[DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK]; + hsize_t count[DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK]; + hsize_t block[DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK]; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id1 = H5I_INVALID_HID, dset_id2 = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *write_buf = NULL; + void *read_buf = NULL; + + TESTING("independent writing to different datasets by different ranks"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) + TEST_ERROR; + + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_INDEPENDENT_WRITE_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", DATASET_INDEPENDENT_WRITE_TEST_GROUP_NAME); + goto error; + } + + /* + * Setup dimensions of overall datasets and slabs local + * to the MPI rank. + */ + if (generate_random_parallel_dimensions(DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + if ((fspace_id = H5Screate_simple(DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + /* create a dataset collectively */ + if ((dset_id1 = H5Dcreate2(group_id, DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME1, + DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to create first dataset\n"); + goto error; + } + if ((dset_id2 = H5Dcreate2(group_id, DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME2, + DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" failed to create second dataset\n"); + goto error; + } + + for (i = 1, data_size = 1; i < DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE; + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + goto error; + } + + for (i = 0; i < data_size / DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE; i++) + ((int *)write_buf)[i] = mpi_rank; + + for (i = 0; i < DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK; i++) { + if (i == 0) { + start[i] = (hsize_t)mpi_rank; + block[i] = 1; + } + else { + start[i] = 0; + block[i] = dims[i]; + } + + stride[i] = 1; + count[i] = 1; + } + + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) { + H5_FAILED(); + HDprintf(" couldn't select hyperslab for dataset write\n"); + goto error; + } + + { + hsize_t mdims[] = {data_size / DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE}; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + goto error; + } + } + + /* + * To test the independent orders of writes between processes, all + * even number processes write to dataset1 first, then dataset2. + * All odd number processes write to dataset2 first, then dataset1. + */ + BEGIN_INDEPENDENT_OP(dset_write) + { + if (mpi_rank % 2 == 0) { + if (H5Dwrite(dset_id1, DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE, mspace_id, fspace_id, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" even ranks failed to write to dataset 1\n"); + INDEPENDENT_OP_ERROR(dset_write); + } + if (H5Dwrite(dset_id2, DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE, mspace_id, fspace_id, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" even ranks failed to write to dataset 2\n"); + INDEPENDENT_OP_ERROR(dset_write); + } + } + else { + if (H5Dwrite(dset_id2, DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE, mspace_id, fspace_id, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" odd ranks failed to write to dataset 2\n"); + INDEPENDENT_OP_ERROR(dset_write); + } + if (H5Dwrite(dset_id1, DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE, mspace_id, fspace_id, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" odd ranks failed to write to dataset 1\n"); + INDEPENDENT_OP_ERROR(dset_write); + } + } + } + END_INDEPENDENT_OP(dset_write); + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + + H5Sclose(mspace_id); + mspace_id = H5I_INVALID_HID; + H5Sclose(fspace_id); + fspace_id = H5I_INVALID_HID; + H5Dclose(dset_id1); + dset_id1 = H5I_INVALID_HID; + H5Dclose(dset_id2); + dset_id2 = H5I_INVALID_HID; + + /* + * Close and re-open the file to ensure that the data gets written. + */ + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close test's container group\n"); + goto error; + } + if (H5Gclose(container_group) < 0) { + H5_FAILED(); + HDprintf(" failed to close container group\n"); + goto error; + } + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close file for data flushing\n"); + goto error; + } + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + if ((group_id = H5Gopen2(container_group, DATASET_INDEPENDENT_WRITE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container sub-group '%s'\n", DATASET_INDEPENDENT_WRITE_TEST_GROUP_NAME); + goto error; + } + + if ((dset_id1 = H5Dopen2(group_id, DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME1, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME1); + goto error; + } + if ((dset_id2 = H5Dopen2(group_id, DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME2, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME2); + goto error; + } + + /* + * Verify that data has been written correctly. + */ + if ((fspace_id = H5Dget_space(dset_id1)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + goto error; + } + + if (NULL == (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + goto error; + } + + if (H5Dread(dset_id1, DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, + read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME1); + goto error; + } + + for (i = 0; i < (size_t)mpi_size; i++) { + size_t j; + + for (j = 0; j < data_size / DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE; j++) { + if (((int *)read_buf)[j + (i * (data_size / DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE))] != + (int)i) { + H5_FAILED(); + HDprintf(" dataset 1 data verification failed\n"); + goto error; + } + } + } + + if (H5Dread(dset_id2, DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, + read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME2); + goto error; + } + + for (i = 0; i < (size_t)mpi_size; i++) { + size_t j; + + for (j = 0; j < data_size / DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE; j++) { + if (((int *)read_buf)[j + (i * (data_size / DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE))] != + (int)i) { + H5_FAILED(); + HDprintf(" dataset 2 data verification failed\n"); + goto error; + } + } + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id1) < 0) + TEST_ERROR; + if (H5Dclose(dset_id2) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (dims) + HDfree(dims); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id1); + H5Dclose(dset_id2); + H5Gclose(group_id); + H5Gclose(container_group); + H5Pclose(fapl_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to ensure that a dataset can be written to by having + * one of the MPI ranks select 0 rows in a hyperslab selection. + */ +#define DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK 2 +#define DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_WRITE_ONE_PROC_0_SEL_TEST_DTYPE_SIZE sizeof(int) +#define DATASET_WRITE_ONE_PROC_0_SEL_TEST_GROUP_NAME "one_rank_0_sel_write_test" +#define DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_NAME "one_rank_0_sel_dset" +static int +test_write_dataset_one_proc_0_selection(void) +{ + hssize_t space_npoints; + hsize_t *dims = NULL; + hsize_t start[DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK]; + hsize_t stride[DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK]; + hsize_t count[DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK]; + hsize_t block[DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK]; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *write_buf = NULL; + void *read_buf = NULL; + + TESTING("write to dataset with one rank selecting 0 rows"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) + TEST_ERROR; + + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_ONE_PROC_0_SEL_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_WRITE_ONE_PROC_0_SEL_TEST_GROUP_NAME); + goto error; + } + + if (generate_random_parallel_dimensions(DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + if ((fspace_id = H5Screate_simple(DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_NAME, + DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_NAME); + goto error; + } + + for (i = 1, data_size = 1; i < DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_WRITE_ONE_PROC_0_SEL_TEST_DTYPE_SIZE; + + BEGIN_INDEPENDENT_OP(write_buf_alloc) + { + if (!MAINPROCESS) { + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + INDEPENDENT_OP_ERROR(write_buf_alloc); + } + + for (i = 0; i < data_size / DATASET_WRITE_ONE_PROC_0_SEL_TEST_DTYPE_SIZE; i++) + ((int *)write_buf)[i] = mpi_rank; + } + } + END_INDEPENDENT_OP(write_buf_alloc); + + for (i = 0; i < DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK; i++) { + if (i == 0) { + start[i] = (hsize_t)mpi_rank; + block[i] = MAINPROCESS ? 0 : 1; + } + else { + start[i] = 0; + block[i] = MAINPROCESS ? 0 : dims[i]; + } + + stride[i] = 1; + count[i] = MAINPROCESS ? 0 : 1; + } + + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) { + H5_FAILED(); + HDprintf(" couldn't select hyperslab for dataset write\n"); + goto error; + } + + { + hsize_t mdims[] = {data_size / DATASET_WRITE_ONE_PROC_0_SEL_TEST_DTYPE_SIZE}; + + if (MAINPROCESS) + mdims[0] = 0; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + goto error; + } + } + + BEGIN_INDEPENDENT_OP(dset_write) + { + if (H5Dwrite(dset_id, DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_DTYPE, mspace_id, fspace_id, H5P_DEFAULT, + write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_write); + } + } + END_INDEPENDENT_OP(dset_write); + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + if (mspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(mspace_id); + } + H5E_END_TRY; + mspace_id = H5I_INVALID_HID; + } + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + /* + * Close and re-open the file to ensure that the data gets written. + */ + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close test's container group\n"); + goto error; + } + if (H5Gclose(container_group) < 0) { + H5_FAILED(); + HDprintf(" failed to close container group\n"); + goto error; + } + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close file for data flushing\n"); + goto error; + } + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + if ((group_id = H5Gopen2(container_group, DATASET_WRITE_ONE_PROC_0_SEL_TEST_GROUP_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't open container sub-group '%s'\n", + DATASET_WRITE_ONE_PROC_0_SEL_TEST_GROUP_NAME); + goto error; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + goto error; + } + + if (NULL == + (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_ONE_PROC_0_SEL_TEST_DTYPE_SIZE))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + goto error; + } + + if (H5Dread(dset_id, DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, + read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_NAME); + goto error; + } + + for (i = 0; i < (size_t)mpi_size; i++) { + size_t j; + + if (i != 0) { + for (j = 0; j < data_size / DATASET_WRITE_ONE_PROC_0_SEL_TEST_DTYPE_SIZE; j++) { + if (((int *)read_buf)[j + (i * (data_size / DATASET_WRITE_ONE_PROC_0_SEL_TEST_DTYPE_SIZE))] != + (int)i) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + goto error; + } + } + } + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (dims) + HDfree(dims); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Pclose(fapl_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to ensure that a dataset can be written to by having + * one of the MPI ranks call H5Sselect_none. + */ +#define DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK 2 +#define DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE sizeof(int) +#define DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_GROUP_NAME "one_rank_none_sel_write_test" +#define DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_NAME "one_rank_none_sel_dset" +static int +test_write_dataset_one_proc_none_selection(void) +{ + hssize_t space_npoints; + hsize_t *dims = NULL; + hsize_t start[DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK]; + hsize_t stride[DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK]; + hsize_t count[DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK]; + hsize_t block[DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK]; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *write_buf = NULL; + void *read_buf = NULL; + + TESTING("write to dataset with one rank using 'none' selection"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) + TEST_ERROR; + + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_GROUP_NAME); + goto error; + } + + if (generate_random_parallel_dimensions(DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + if ((fspace_id = H5Screate_simple(DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_NAME, + DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_NAME); + goto error; + } + + for (i = 1, data_size = 1; i < DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE; + + BEGIN_INDEPENDENT_OP(write_buf_alloc) + { + if (!MAINPROCESS) { + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + INDEPENDENT_OP_ERROR(write_buf_alloc); + } + + for (i = 0; i < data_size / DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE; i++) + ((int *)write_buf)[i] = mpi_rank; + } + } + END_INDEPENDENT_OP(write_buf_alloc); + + for (i = 0; i < DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK; i++) { + if (i == 0) { + start[i] = (hsize_t)mpi_rank; + block[i] = 1; + } + else { + start[i] = 0; + block[i] = dims[i]; + } + + stride[i] = 1; + count[i] = 1; + } + + BEGIN_INDEPENDENT_OP(set_space_sel) + { + if (MAINPROCESS) { + if (H5Sselect_none(fspace_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't set 'none' selection for dataset write\n"); + INDEPENDENT_OP_ERROR(set_space_sel); + } + } + else { + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) { + H5_FAILED(); + HDprintf(" couldn't select hyperslab for dataset write\n"); + INDEPENDENT_OP_ERROR(set_space_sel); + } + } + } + END_INDEPENDENT_OP(set_space_sel); + + { + hsize_t mdims[] = {data_size / DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE}; + + if (MAINPROCESS) + mdims[0] = 0; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + goto error; + } + } + + BEGIN_INDEPENDENT_OP(dset_write) + { + if (H5Dwrite(dset_id, DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_DTYPE, mspace_id, fspace_id, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_write); + } + } + END_INDEPENDENT_OP(dset_write); + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + if (mspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(mspace_id); + } + H5E_END_TRY; + mspace_id = H5I_INVALID_HID; + } + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + /* + * Close and re-open the file to ensure that the data gets written. + */ + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close test's container group\n"); + goto error; + } + if (H5Gclose(container_group) < 0) { + H5_FAILED(); + HDprintf(" failed to close container group\n"); + goto error; + } + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close file for data flushing\n"); + goto error; + } + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + if ((group_id = H5Gopen2(container_group, DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_GROUP_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't open container sub-group '%s'\n", + DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_GROUP_NAME); + goto error; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + goto error; + } + + if (NULL == + (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + goto error; + } + + if (H5Dread(dset_id, DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, + read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_NAME); + goto error; + } + + for (i = 0; i < (size_t)mpi_size; i++) { + size_t j; + + if (i != 0) { + for (j = 0; j < data_size / DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE; j++) { + if (((int *) + read_buf)[j + (i * (data_size / DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE))] != + (int)i) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + goto error; + } + } + } + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (dims) + HDfree(dims); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Pclose(fapl_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to ensure that a dataset can be written to by having + * one of the MPI ranks use an ALL selection, while the other + * ranks write nothing. + */ +#define DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_SPACE_RANK 2 +#define DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE sizeof(int) +#define DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_GROUP_NAME "one_rank_all_sel_write_test" +#define DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_NAME "one_rank_all_sel_dset" +static int +test_write_dataset_one_proc_all_selection(void) +{ + hssize_t space_npoints; + hsize_t *dims = NULL; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *write_buf = NULL; + void *read_buf = NULL; + + TESTING("write to dataset with one rank using all selection; others none selection"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) + TEST_ERROR; + + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_GROUP_NAME); + goto error; + } + + if (generate_random_parallel_dimensions(DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + if ((fspace_id = H5Screate_simple(DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_NAME, + DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_NAME); + goto error; + } + + for (i = 0, data_size = 1; i < DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE; + + BEGIN_INDEPENDENT_OP(write_buf_alloc) + { + if (MAINPROCESS) { + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + INDEPENDENT_OP_ERROR(write_buf_alloc); + } + + for (i = 0; i < data_size / DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE; i++) + ((int *)write_buf)[i] = (int)i; + } + } + END_INDEPENDENT_OP(write_buf_alloc); + + BEGIN_INDEPENDENT_OP(set_space_sel) + { + if (MAINPROCESS) { + if (H5Sselect_all(fspace_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't set 'all' selection for dataset write\n"); + INDEPENDENT_OP_ERROR(set_space_sel); + } + } + else { + if (H5Sselect_none(fspace_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't set 'none' selection for dataset write\n"); + INDEPENDENT_OP_ERROR(set_space_sel); + } + } + } + END_INDEPENDENT_OP(set_space_sel); + + { + hsize_t mdims[] = {data_size / DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE}; + + if (!MAINPROCESS) + mdims[0] = 0; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + goto error; + } + } + + BEGIN_INDEPENDENT_OP(dset_write) + { + if (H5Dwrite(dset_id, DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_DTYPE, mspace_id, fspace_id, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_write); + } + } + END_INDEPENDENT_OP(dset_write); + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + if (mspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(mspace_id); + } + H5E_END_TRY; + mspace_id = H5I_INVALID_HID; + } + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + /* + * Close and re-open the file to ensure that the data gets written. + */ + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close test's container group\n"); + goto error; + } + if (H5Gclose(container_group) < 0) { + H5_FAILED(); + HDprintf(" failed to close container group\n"); + goto error; + } + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close file for data flushing\n"); + goto error; + } + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + if ((group_id = H5Gopen2(container_group, DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_GROUP_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't open container sub-group '%s'\n", + DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_GROUP_NAME); + goto error; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + goto error; + } + + if (NULL == + (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + goto error; + } + + if (H5Dread(dset_id, DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, + read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_NAME); + goto error; + } + + for (i = 0; i < data_size / DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE; i++) { + if (((int *)read_buf)[i] != (int)i) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + goto error; + } + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (dims) + HDfree(dims); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Pclose(fapl_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to ensure that a dataset can be written to by having + * a hyperslab selection in the file dataspace and an all selection + * in the memory dataspace. + * + * XXX: Currently pulls from invalid memory locations. + */ +#define DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK 2 +#define DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DTYPE_SIZE sizeof(int) +#define DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_GROUP_NAME "hyper_sel_file_all_sel_mem_write_test" +#define DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_NAME "hyper_sel_file_all_sel_mem_dset" +static int +test_write_dataset_hyper_file_all_mem(void) +{ +#ifdef BROKEN + hssize_t space_npoints; + hsize_t *dims = NULL; + hsize_t start[DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK]; + hsize_t stride[DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK]; + hsize_t count[DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK]; + hsize_t block[DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK]; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *write_buf = NULL; + void *read_buf = NULL; +#endif + + TESTING("write to dataset with hyperslab sel. for file space; all sel. for memory"); + +#ifdef BROKEN + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) + TEST_ERROR; + + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_GROUP_NAME); + goto error; + } + + if (generate_random_parallel_dimensions(DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + if ((fspace_id = H5Screate_simple(DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_NAME, + DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_NAME); + goto error; + } + + for (i = 1, data_size = 1; i < DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DTYPE_SIZE; + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + goto error; + } + + for (i = 0; i < data_size / DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DTYPE_SIZE; i++) + ((int *)write_buf)[i] = mpi_rank; + + for (i = 0; i < DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK; i++) { + if (i == 0) { + start[i] = mpi_rank; + block[i] = 1; + } + else { + start[i] = 0; + block[i] = dims[i]; + } + + stride[i] = 1; + count[i] = 1; + } + + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) { + H5_FAILED(); + HDprintf(" couldn't select hyperslab for dataset write\n"); + goto error; + } + + if (H5Dwrite(dset_id, DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_DTYPE, H5S_ALL, fspace_id, H5P_DEFAULT, + write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_NAME); + goto error; + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + /* + * Close and re-open the file to ensure that the data gets written. + */ + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close test's container group\n"); + goto error; + } + if (H5Gclose(container_group) < 0) { + H5_FAILED(); + HDprintf(" failed to close container group\n"); + goto error; + } + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close file for data flushing\n"); + goto error; + } + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + if ((group_id = + H5Gopen2(container_group, DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container sub-group '%s'\n", + DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_GROUP_NAME); + goto error; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + goto error; + } + + if (NULL == + (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DTYPE_SIZE))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + goto error; + } + + if (H5Dread(dset_id, DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, + read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_NAME); + goto error; + } + + for (i = 0; i < (size_t)mpi_size; i++) { + size_t j; + + for (j = 0; j < data_size / DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DTYPE_SIZE; j++) { + if (((int *)read_buf)[j + (i * (data_size / DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DTYPE_SIZE))] != + (int)i) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + goto error; + } + } + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); +#else + SKIPPED(); +#endif + + return 0; + +#ifdef BROKEN +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (dims) + HDfree(dims); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Pclose(fapl_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +#endif +} + +/* + * A test to ensure that a dataset can be written to by having + * an all selection in the file dataspace and a hyperslab + * selection in the memory dataspace. + */ +#define DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_SPACE_RANK 2 +#define DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE sizeof(int) +#define DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME "all_sel_file_hyper_sel_mem_write_test" +#define DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_NAME "all_sel_file_hyper_sel_mem_dset" +static int +test_write_dataset_all_file_hyper_mem(void) +{ + hssize_t space_npoints; + hsize_t *dims = NULL; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *write_buf = NULL; + void *read_buf = NULL; + + TESTING("write to dataset with all sel. for file space; hyperslab sel. for memory"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) + TEST_ERROR; + + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME); + goto error; + } + + if (generate_random_parallel_dimensions(DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + if ((fspace_id = H5Screate_simple(DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_NAME, + DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_NAME); + goto error; + } + + for (i = 0, data_size = 1; i < DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE; + + BEGIN_INDEPENDENT_OP(write_buf_alloc) + { + if (MAINPROCESS) { + /* + * Allocate twice the amount of memory needed and leave "holes" in the memory + * buffer in order to prove that the mapping from hyperslab selection <-> all + * selection works correctly. + */ + if (NULL == (write_buf = HDmalloc(2 * data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + INDEPENDENT_OP_ERROR(write_buf_alloc); + } + + for (i = 0; i < 2 * (data_size / DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE); i++) { + /* Write actual data to even indices */ + if (i % 2 == 0) + ((int *)write_buf)[i] = (int)((i / 2) + (i % 2)); + else + ((int *)write_buf)[i] = 0; + } + } + } + END_INDEPENDENT_OP(write_buf_alloc); + + /* + * Only have rank 0 perform the dataset write, as writing the entire dataset on all ranks + * might be stressful on system resources. There's also no guarantee as to what the outcome + * would be, since the writes would be overlapping with each other. + */ + BEGIN_INDEPENDENT_OP(dset_write) + { + if (MAINPROCESS) { + hsize_t start[1] = {0}; + hsize_t stride[1] = {2}; + hsize_t count[1] = {data_size / DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE}; + hsize_t block[1] = {1}; + hsize_t mdims[] = {2 * (data_size / DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE)}; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + INDEPENDENT_OP_ERROR(dset_write); + } + + if (H5Sselect_hyperslab(mspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) { + H5_FAILED(); + HDprintf(" couldn't select hyperslab for dataset write\n"); + INDEPENDENT_OP_ERROR(dset_write); + } + + if (H5Dwrite(dset_id, DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_DTYPE, mspace_id, H5S_ALL, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", + DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_write); + } + } + } + END_INDEPENDENT_OP(dset_write); + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + if (mspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(mspace_id); + } + H5E_END_TRY; + mspace_id = H5I_INVALID_HID; + } + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + /* + * Close and re-open the file to ensure that the data gets written. + */ + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close test's container group\n"); + goto error; + } + if (H5Gclose(container_group) < 0) { + H5_FAILED(); + HDprintf(" failed to close container group\n"); + goto error; + } + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close file for data flushing\n"); + goto error; + } + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + if ((group_id = + H5Gopen2(container_group, DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container sub-group '%s'\n", + DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME); + goto error; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + goto error; + } + + if (NULL == + (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + goto error; + } + + if (H5Dread(dset_id, DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, + read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_NAME); + goto error; + } + + for (i = 0; i < data_size / DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE; i++) { + if (((int *)read_buf)[i] != (int)i) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + goto error; + } + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (dims) + HDfree(dims); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Pclose(fapl_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to ensure that a dataset can be written to by having + * a point selection in the file dataspace and an all selection + * in the memory dataspace. + */ +static int +test_write_dataset_point_file_all_mem(void) +{ + TESTING("write to dataset with point sel. for file space; all sel. for memory"); + + SKIPPED(); + + return 0; +} + +/* + * A test to ensure that a dataset can be written to by having + * an all selection in the file dataspace and a point selection + * in the memory dataspace. + */ +#define DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_SPACE_RANK 2 +#define DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE sizeof(int) +#define DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_GROUP_NAME "all_sel_file_point_sel_mem_write_test" +#define DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_NAME "all_sel_file_point_sel_mem_dset" +static int +test_write_dataset_all_file_point_mem(void) +{ + hssize_t space_npoints; + hsize_t *points = NULL; + hsize_t *dims = NULL; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *write_buf = NULL; + void *read_buf = NULL; + + TESTING("write to dataset with all sel. for file space; point sel. for memory"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) + TEST_ERROR; + + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_GROUP_NAME, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_GROUP_NAME); + goto error; + } + + if (generate_random_parallel_dimensions(DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + if ((fspace_id = H5Screate_simple(DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_NAME, + DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_NAME); + goto error; + } + + for (i = 0, data_size = 1; i < DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE; + + BEGIN_INDEPENDENT_OP(write_buf_alloc) + { + if (MAINPROCESS) { + /* + * Allocate twice the amount of memory needed and leave "holes" in the memory + * buffer in order to prove that the mapping from point selection <-> all + * selection works correctly. + */ + if (NULL == (write_buf = HDmalloc(2 * data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + INDEPENDENT_OP_ERROR(write_buf_alloc); + } + + for (i = 0; i < 2 * (data_size / DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE); i++) { + /* Write actual data to even indices */ + if (i % 2 == 0) + ((int *)write_buf)[i] = (int)((i / 2) + (i % 2)); + else + ((int *)write_buf)[i] = 0; + } + } + } + END_INDEPENDENT_OP(write_buf_alloc); + + /* + * Only have rank 0 perform the dataset write, as writing the entire dataset on all ranks + * might be stressful on system resources. There's also no guarantee as to what the outcome + * would be, since the writes would be overlapping with each other. + */ + BEGIN_INDEPENDENT_OP(dset_write) + { + if (MAINPROCESS) { + hsize_t mdims[] = {2 * (data_size / DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE)}; + int j; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + INDEPENDENT_OP_ERROR(dset_write); + } + + if (NULL == (points = HDmalloc((data_size / DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE) * + sizeof(hsize_t)))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for point selection\n"); + INDEPENDENT_OP_ERROR(dset_write); + } + + /* Select every other point in the 1-dimensional memory dataspace */ + for (i = 0, j = 0; i < 2 * (data_size / DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE); i++) { + if (i % 2 == 0) + points[j++] = (hsize_t)i; + } + + if (H5Sselect_elements(mspace_id, H5S_SELECT_SET, + data_size / DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE, + points) < 0) { + H5_FAILED(); + HDprintf(" couldn't set point selection for dataset write\n"); + INDEPENDENT_OP_ERROR(dset_write); + } + + if (H5Dwrite(dset_id, DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_DTYPE, mspace_id, H5S_ALL, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", + DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_write); + } + } + } + END_INDEPENDENT_OP(dset_write); + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + if (points) { + HDfree(points); + points = NULL; + } + if (mspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(mspace_id); + } + H5E_END_TRY; + mspace_id = H5I_INVALID_HID; + } + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + /* + * Close and re-open the file to ensure that the data gets written. + */ + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close test's container group\n"); + goto error; + } + if (H5Gclose(container_group) < 0) { + H5_FAILED(); + HDprintf(" failed to close container group\n"); + goto error; + } + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close file for data flushing\n"); + goto error; + } + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + if ((group_id = + H5Gopen2(container_group, DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container sub-group '%s'\n", + DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_GROUP_NAME); + goto error; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + goto error; + } + + if (NULL == + (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + goto error; + } + + if (H5Dread(dset_id, DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, + read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_NAME); + goto error; + } + + for (i = 0; i < data_size / DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE; i++) { + if (((int *)read_buf)[i] != (int)i) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + goto error; + } + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (points) + HDfree(points); + if (dims) + HDfree(dims); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Pclose(fapl_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to ensure that a dataset can be written to by having + * a hyperslab selection in the file dataspace and a point + * selection in the memory dataspace. + */ +#define DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK 2 +#define DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE sizeof(int) +#define DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME "hyper_sel_file_point_sel_mem_write_test" +#define DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_NAME "hyper_sel_file_point_sel_mem_dset" +static int +test_write_dataset_hyper_file_point_mem(void) +{ + hssize_t space_npoints; + hsize_t *dims = NULL; + hsize_t *points = NULL; + hsize_t start[DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK]; + hsize_t stride[DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK]; + hsize_t count[DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK]; + hsize_t block[DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK]; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *write_buf = NULL; + void *read_buf = NULL; + + TESTING("write to dataset with hyperslab sel. for file space; point sel. for memory"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) + TEST_ERROR; + + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME); + goto error; + } + + if (generate_random_parallel_dimensions(DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + if ((fspace_id = H5Screate_simple(DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_NAME, + DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_NAME); + goto error; + } + + for (i = 1, data_size = 1; i < DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE; + + /* + * Allocate twice the amount of memory needed and leave "holes" in the memory + * buffer in order to prove that the mapping from point selection <-> hyperslab + * selection works correctly. + */ + if (NULL == (write_buf = HDmalloc(2 * data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + goto error; + } + + for (i = 0; i < 2 * (data_size / DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE); i++) { + /* Write actual data to even indices */ + if (i % 2 == 0) + ((int *)write_buf)[i] = mpi_rank; + else + ((int *)write_buf)[i] = 0; + } + + for (i = 0; i < DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK; i++) { + if (i == 0) { + start[i] = (hsize_t)mpi_rank; + block[i] = 1; + } + else { + start[i] = 0; + block[i] = dims[i]; + } + + stride[i] = 1; + count[i] = 1; + } + + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) { + H5_FAILED(); + HDprintf(" couldn't select hyperslab for dataset write\n"); + goto error; + } + + { + hsize_t mdims[] = {2 * (data_size / DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE)}; + int j; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + goto error; + } + + if (NULL == (points = HDmalloc((data_size / DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE) * + sizeof(hsize_t)))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for point selection\n"); + goto error; + } + + /* Select every other point in the 1-dimensional memory dataspace */ + for (i = 0, j = 0; i < 2 * (data_size / DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE); i++) { + if (i % 2 == 0) + points[j++] = (hsize_t)i; + } + + if (H5Sselect_elements(mspace_id, H5S_SELECT_SET, + data_size / DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE, points) < 0) { + H5_FAILED(); + HDprintf(" couldn't set point selection for dataset write\n"); + goto error; + } + } + + if (H5Dwrite(dset_id, DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_DTYPE, mspace_id, fspace_id, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_NAME); + goto error; + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + if (points) { + HDfree(points); + points = NULL; + } + if (mspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(mspace_id); + } + H5E_END_TRY; + mspace_id = H5I_INVALID_HID; + } + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + /* + * Close and re-open the file to ensure that the data gets written. + */ + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close test's container group\n"); + goto error; + } + if (H5Gclose(container_group) < 0) { + H5_FAILED(); + HDprintf(" failed to close container group\n"); + goto error; + } + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close file for data flushing\n"); + goto error; + } + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + if ((group_id = H5Gopen2(container_group, DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container sub-group '%s'\n", + DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME); + goto error; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + goto error; + } + + if (NULL == + (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + goto error; + } + + if (H5Dread(dset_id, DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, + read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_NAME); + goto error; + } + + for (i = 0; i < (size_t)mpi_size; i++) { + size_t j; + + for (j = 0; j < data_size / DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE; j++) { + if (((int *) + read_buf)[j + (i * (data_size / DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE))] != + (int)i) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + goto error; + } + } + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (points) + HDfree(points); + if (dims) + HDfree(dims); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Pclose(fapl_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to ensure that a dataset can be written to by having + * a point selection in the file dataspace and a hyperslab + * selection in the memory dataspace. + */ +#define DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK 2 +#define DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE sizeof(int) +#define DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME "point_sel_file_hyper_sel_mem_write_test" +#define DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_NAME "point_sel_file_hyper_sel_mem_dset" +static int +test_write_dataset_point_file_hyper_mem(void) +{ + hssize_t space_npoints; + hsize_t *dims = NULL; + hsize_t *points = NULL; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *write_buf = NULL; + void *read_buf = NULL; + + TESTING("write to dataset with point sel. for file space; hyperslab sel. for memory"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) + TEST_ERROR; + + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + + if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME); + goto error; + } + + if (generate_random_parallel_dimensions(DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + if ((fspace_id = H5Screate_simple(DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK, dims, NULL)) < 0) + TEST_ERROR; + + if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_NAME, + DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_NAME); + goto error; + } + + for (i = 1, data_size = 1; i < DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE; + + /* + * Allocate twice the amount of memory needed and leave "holes" in the memory + * buffer in order to prove that the mapping from hyperslab selection <-> point + * selection works correctly. + */ + if (NULL == (write_buf = HDmalloc(2 * data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + goto error; + } + + for (i = 0; i < 2 * (data_size / DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE); i++) { + /* Write actual data to even indices */ + if (i % 2 == 0) + ((int *)write_buf)[i] = mpi_rank; + else + ((int *)write_buf)[i] = 0; + } + + if (NULL == (points = HDmalloc((data_size / DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE) * + DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK * sizeof(hsize_t)))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for point selection\n"); + goto error; + } + + for (i = 0; i < data_size / DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE; i++) { + size_t j; + + for (j = 0; j < DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK; j++) { + size_t idx = (i * (size_t)DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK) + j; + + if (j == 0) + points[idx] = (hsize_t)mpi_rank; + else if (j != (size_t)DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK - 1) + points[idx] = i / dims[j + 1]; + else + points[idx] = i % dims[j]; + } + } + + if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, + data_size / DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE, points) < 0) { + H5_FAILED(); + HDprintf(" couldn't set point selection for dataset write\n"); + goto error; + } + + { + hsize_t start[1] = {0}; + hsize_t stride[1] = {2}; + hsize_t count[1] = {data_size / DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE}; + hsize_t block[1] = {1}; + hsize_t mdims[] = {2 * (data_size / DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE)}; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + goto error; + } + + if (H5Sselect_hyperslab(mspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) { + H5_FAILED(); + HDprintf(" couldn't set hyperslab selection for dataset write\n"); + goto error; + } + } + + if (H5Dwrite(dset_id, DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_DTYPE, mspace_id, fspace_id, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_NAME); + goto error; + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + if (points) { + HDfree(points); + points = NULL; + } + if (mspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(mspace_id); + } + H5E_END_TRY; + mspace_id = H5I_INVALID_HID; + } + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + /* + * Close and re-open the file to ensure that the data gets written. + */ + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close test's container group\n"); + goto error; + } + if (H5Gclose(container_group) < 0) { + H5_FAILED(); + HDprintf(" failed to close container group\n"); + goto error; + } + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close file for data flushing\n"); + goto error; + } + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + if ((group_id = H5Gopen2(container_group, DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container sub-group '%s'\n", + DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME); + goto error; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + goto error; + } + + if (NULL == + (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + goto error; + } + + if (H5Dread(dset_id, DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, + read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_NAME); + goto error; + } + + for (i = 0; i < (size_t)mpi_size; i++) { + size_t j; + + for (j = 0; j < data_size / DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE; j++) { + if (((int *) + read_buf)[j + (i * (data_size / DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE))] != + (int)i) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + goto error; + } + } + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (points) + HDfree(points); + if (dims) + HDfree(dims); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Pclose(fapl_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to ensure that a dataset can be read from by having + * one of the MPI ranks select 0 rows in a hyperslab selection. + */ +#define DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK 2 +#define DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_READ_ONE_PROC_0_SEL_TEST_DTYPE_SIZE sizeof(int) +#define DATASET_READ_ONE_PROC_0_SEL_TEST_GROUP_NAME "one_rank_0_sel_read_test" +#define DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_NAME "one_rank_0_sel_dset" +static int +test_read_dataset_one_proc_0_selection(void) +{ + hssize_t space_npoints; + hsize_t *dims = NULL; + hsize_t start[DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK]; + hsize_t stride[DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK]; + hsize_t count[DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK]; + hsize_t block[DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK]; + size_t i, data_size, read_buf_size; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *write_buf = NULL; + void *read_buf = NULL; + + TESTING("read from dataset with one rank selecting 0 rows"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if (generate_random_parallel_dimensions(DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + /* + * Have rank 0 create the dataset and completely fill it with data. + */ + BEGIN_INDEPENDENT_OP(dset_create) + { + if (MAINPROCESS) { + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((group_id = H5Gcreate2(container_group, DATASET_READ_ONE_PROC_0_SEL_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_READ_ONE_PROC_0_SEL_TEST_GROUP_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((fspace_id = H5Screate_simple(DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK, dims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" failed to create file dataspace for dataset\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((dset_id = H5Dcreate2(group_id, DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_NAME, + DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + for (i = 0, data_size = 1; i < DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_READ_ONE_PROC_0_SEL_TEST_DTYPE_SIZE; + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + for (i = 0; i < (size_t)mpi_size; i++) { + size_t j; + size_t elem_per_proc = (data_size / DATASET_READ_ONE_PROC_0_SEL_TEST_DTYPE_SIZE) / dims[0]; + + for (j = 0; j < elem_per_proc; j++) { + size_t idx = (i * elem_per_proc) + j; + + ((int *)write_buf)[idx] = (int)i; + } + } + + { + hsize_t mdims[] = {data_size / DATASET_READ_ONE_PROC_0_SEL_TEST_DTYPE_SIZE}; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + } + + if (H5Dwrite(dset_id, DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_DTYPE, mspace_id, H5S_ALL, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + if (mspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(mspace_id); + } + H5E_END_TRY; + mspace_id = H5I_INVALID_HID; + } + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + /* + * Close and re-open the file to ensure that the data gets written. + */ + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close test's container group\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + if (H5Gclose(container_group) < 0) { + H5_FAILED(); + HDprintf(" failed to close container group\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close file for data flushing\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + } + } + END_INDEPENDENT_OP(dset_create); + + /* + * Re-open file on all ranks. + */ + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) + TEST_ERROR; + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + if ((group_id = H5Gopen2(container_group, DATASET_READ_ONE_PROC_0_SEL_TEST_GROUP_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't open container sub-group '%s'\n", DATASET_READ_ONE_PROC_0_SEL_TEST_GROUP_NAME); + goto error; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + goto error; + } + + BEGIN_INDEPENDENT_OP(read_buf_alloc) + { + if (!MAINPROCESS) { + read_buf_size = + ((size_t)(space_npoints / mpi_size) * DATASET_READ_ONE_PROC_0_SEL_TEST_DTYPE_SIZE); + + if (NULL == (read_buf = HDmalloc(read_buf_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + INDEPENDENT_OP_ERROR(read_buf_alloc); + } + } + } + END_INDEPENDENT_OP(read_buf_alloc); + + { + hsize_t mdims[] = {(hsize_t)space_npoints / (hsize_t)mpi_size}; + + if (MAINPROCESS) + mdims[0] = 0; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + goto error; + } + } + + for (i = 0; i < DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK; i++) { + if (i == 0) { + start[i] = (hsize_t)mpi_rank; + block[i] = MAINPROCESS ? 0 : 1; + } + else { + start[i] = 0; + block[i] = MAINPROCESS ? 0 : dims[i]; + } + + stride[i] = 1; + count[i] = MAINPROCESS ? 0 : 1; + } + + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) { + H5_FAILED(); + HDprintf(" couldn't select hyperslab for dataset read\n"); + goto error; + } + + BEGIN_INDEPENDENT_OP(dset_read) + { + if (H5Dread(dset_id, DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_DTYPE, mspace_id, fspace_id, H5P_DEFAULT, + read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_read); + } + } + END_INDEPENDENT_OP(dset_read); + + BEGIN_INDEPENDENT_OP(data_verify) + { + if (!MAINPROCESS) { + for (i = 0; i < (size_t)space_npoints / (size_t)mpi_size; i++) { + if (((int *)read_buf)[i] != mpi_rank) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + INDEPENDENT_OP_ERROR(data_verify); + } + } + } + } + END_INDEPENDENT_OP(data_verify); + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5Sclose(mspace_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (dims) + HDfree(dims); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Pclose(fapl_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to ensure that a dataset can be read from by having + * one of the MPI ranks call H5Sselect_none. + */ +#define DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK 2 +#define DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_READ_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE sizeof(int) +#define DATASET_READ_ONE_PROC_NONE_SEL_TEST_GROUP_NAME "one_rank_none_sel_read_test" +#define DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_NAME "one_rank_none_sel_dset" +static int +test_read_dataset_one_proc_none_selection(void) +{ + hssize_t space_npoints; + hsize_t *dims = NULL; + hsize_t start[DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK]; + hsize_t stride[DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK]; + hsize_t count[DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK]; + hsize_t block[DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK]; + size_t i, data_size, read_buf_size; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *write_buf = NULL; + void *read_buf = NULL; + + TESTING("read from dataset with one rank using 'none' selection"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if (generate_random_parallel_dimensions(DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + /* + * Have rank 0 create the dataset and completely fill it with data. + */ + BEGIN_INDEPENDENT_OP(dset_create) + { + if (MAINPROCESS) { + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((group_id = H5Gcreate2(container_group, DATASET_READ_ONE_PROC_NONE_SEL_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_READ_ONE_PROC_NONE_SEL_TEST_GROUP_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((fspace_id = H5Screate_simple(DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK, dims, NULL)) < + 0) { + H5_FAILED(); + HDprintf(" failed to create file dataspace for dataset\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((dset_id = H5Dcreate2(group_id, DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_NAME, + DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + for (i = 0, data_size = 1; i < DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_READ_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE; + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + for (i = 0; i < (size_t)mpi_size; i++) { + size_t j; + size_t elem_per_proc = (data_size / DATASET_READ_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE) / dims[0]; + + for (j = 0; j < elem_per_proc; j++) { + size_t idx = (i * elem_per_proc) + j; + + ((int *)write_buf)[idx] = (int)i; + } + } + + { + hsize_t mdims[] = {data_size / DATASET_READ_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE}; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + } + + if (H5Dwrite(dset_id, DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_DTYPE, mspace_id, H5S_ALL, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", + DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + if (mspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(mspace_id); + } + H5E_END_TRY; + mspace_id = H5I_INVALID_HID; + } + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + /* + * Close and re-open the file to ensure that the data gets written. + */ + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close test's container group\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + if (H5Gclose(container_group) < 0) { + H5_FAILED(); + HDprintf(" failed to close container group\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close file for data flushing\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + } + } + END_INDEPENDENT_OP(dset_create); + + /* + * Re-open file on all ranks. + */ + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) + TEST_ERROR; + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + if ((group_id = H5Gopen2(container_group, DATASET_READ_ONE_PROC_NONE_SEL_TEST_GROUP_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't open container sub-group '%s'\n", + DATASET_READ_ONE_PROC_NONE_SEL_TEST_GROUP_NAME); + goto error; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + goto error; + } + + BEGIN_INDEPENDENT_OP(read_buf_alloc) + { + if (!MAINPROCESS) { + read_buf_size = + ((size_t)(space_npoints / mpi_size) * DATASET_READ_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE); + + if (NULL == (read_buf = HDmalloc(read_buf_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + INDEPENDENT_OP_ERROR(read_buf_alloc); + } + } + } + END_INDEPENDENT_OP(read_buf_alloc); + + { + hsize_t mdims[] = {(hsize_t)space_npoints / (hsize_t)mpi_size}; + + if (MAINPROCESS) + mdims[0] = 0; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + goto error; + } + } + + for (i = 0; i < DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK; i++) { + if (i == 0) { + start[i] = (hsize_t)mpi_rank; + block[i] = 1; + } + else { + start[i] = 0; + block[i] = dims[i]; + } + + stride[i] = 1; + count[i] = 1; + } + + BEGIN_INDEPENDENT_OP(set_space_sel) + { + if (MAINPROCESS) { + if (H5Sselect_none(fspace_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't set 'none' selection for dataset read\n"); + INDEPENDENT_OP_ERROR(set_space_sel); + } + } + else { + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) { + H5_FAILED(); + HDprintf(" couldn't select hyperslab for dataset read\n"); + INDEPENDENT_OP_ERROR(set_space_sel); + } + } + } + END_INDEPENDENT_OP(set_space_sel); + + BEGIN_INDEPENDENT_OP(dset_read) + { + if (H5Dread(dset_id, DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_DTYPE, mspace_id, fspace_id, + H5P_DEFAULT, read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_read); + } + } + END_INDEPENDENT_OP(dset_read); + + BEGIN_INDEPENDENT_OP(data_verify) + { + if (!MAINPROCESS) { + for (i = 0; i < (size_t)space_npoints / (size_t)mpi_size; i++) { + if (((int *)read_buf)[i] != mpi_rank) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + INDEPENDENT_OP_ERROR(data_verify); + } + } + } + } + END_INDEPENDENT_OP(data_verify); + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5Sclose(mspace_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (dims) + HDfree(dims); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Pclose(fapl_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to ensure that a dataset can be read from by having + * one of the MPI ranks use an ALL selection, while the other + * ranks read nothing. + */ +#define DATASET_READ_ONE_PROC_ALL_SEL_TEST_SPACE_RANK 2 +#define DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_READ_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE sizeof(int) +#define DATASET_READ_ONE_PROC_ALL_SEL_TEST_GROUP_NAME "one_rank_all_sel_read_test" +#define DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_NAME "one_rank_all_sel_dset" +static int +test_read_dataset_one_proc_all_selection(void) +{ + hssize_t space_npoints; + hsize_t *dims = NULL; + size_t i, data_size, read_buf_size; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *write_buf = NULL; + void *read_buf = NULL; + + TESTING("read from dataset with one rank using all selection; others none selection"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if (generate_random_parallel_dimensions(DATASET_READ_ONE_PROC_ALL_SEL_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + /* + * Have rank 0 create the dataset and completely fill it with data. + */ + BEGIN_INDEPENDENT_OP(dset_create) + { + if (MAINPROCESS) { + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((group_id = H5Gcreate2(container_group, DATASET_READ_ONE_PROC_ALL_SEL_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_READ_ONE_PROC_ALL_SEL_TEST_GROUP_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((fspace_id = H5Screate_simple(DATASET_READ_ONE_PROC_ALL_SEL_TEST_SPACE_RANK, dims, NULL)) < + 0) { + H5_FAILED(); + HDprintf(" failed to create file dataspace for dataset\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((dset_id = H5Dcreate2(group_id, DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_NAME, + DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + for (i = 0, data_size = 1; i < DATASET_READ_ONE_PROC_ALL_SEL_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_READ_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE; + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + for (i = 0; i < (size_t)mpi_size; i++) { + size_t j; + size_t elem_per_proc = (data_size / DATASET_READ_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE) / dims[0]; + + for (j = 0; j < elem_per_proc; j++) { + size_t idx = (i * elem_per_proc) + j; + + ((int *)write_buf)[idx] = (int)i; + } + } + + { + hsize_t mdims[] = {data_size / DATASET_READ_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE}; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + } + + if (H5Dwrite(dset_id, DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_DTYPE, mspace_id, H5S_ALL, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", + DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + if (mspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(mspace_id); + } + H5E_END_TRY; + mspace_id = H5I_INVALID_HID; + } + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + /* + * Close and re-open the file to ensure that the data gets written. + */ + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close test's container group\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + if (H5Gclose(container_group) < 0) { + H5_FAILED(); + HDprintf(" failed to close container group\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close file for data flushing\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + } + } + END_INDEPENDENT_OP(dset_create); + + /* + * Re-open file on all ranks. + */ + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) + TEST_ERROR; + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + if ((group_id = H5Gopen2(container_group, DATASET_READ_ONE_PROC_ALL_SEL_TEST_GROUP_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't open container sub-group '%s'\n", + DATASET_READ_ONE_PROC_ALL_SEL_TEST_GROUP_NAME); + goto error; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + goto error; + } + + BEGIN_INDEPENDENT_OP(read_buf_alloc) + { + if (MAINPROCESS) { + read_buf_size = (size_t)space_npoints * DATASET_READ_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE; + + if (NULL == (read_buf = HDmalloc(read_buf_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + INDEPENDENT_OP_ERROR(read_buf_alloc); + } + } + } + END_INDEPENDENT_OP(read_buf_alloc); + + { + hsize_t mdims[] = {(hsize_t)space_npoints}; + + if (!MAINPROCESS) + mdims[0] = 0; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + goto error; + } + } + + BEGIN_INDEPENDENT_OP(set_space_sel) + { + if (MAINPROCESS) { + if (H5Sselect_all(fspace_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't set 'all' selection for dataset read\n"); + INDEPENDENT_OP_ERROR(set_space_sel); + } + } + else { + if (H5Sselect_none(fspace_id) < 0) { + H5_FAILED(); + HDprintf(" couldn't set 'none' selection for dataset read\n"); + INDEPENDENT_OP_ERROR(set_space_sel); + } + } + } + END_INDEPENDENT_OP(set_space_sel); + + BEGIN_INDEPENDENT_OP(dset_read) + { + if (H5Dread(dset_id, DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_DTYPE, mspace_id, fspace_id, H5P_DEFAULT, + read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_read); + } + } + END_INDEPENDENT_OP(dset_read); + + BEGIN_INDEPENDENT_OP(data_verify) + { + if (MAINPROCESS) { + for (i = 0; i < (size_t)mpi_size; i++) { + size_t j; + size_t elem_per_proc = (size_t)(space_npoints / mpi_size); + + for (j = 0; j < elem_per_proc; j++) { + int idx = (int)((i * elem_per_proc) + j); + + if (((int *)read_buf)[idx] != (int)i) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + INDEPENDENT_OP_ERROR(data_verify); + } + } + } + } + } + END_INDEPENDENT_OP(data_verify); + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5Sclose(mspace_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (dims) + HDfree(dims); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Pclose(fapl_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to ensure that a dataset can be read from by having + * a hyperslab selection in the file dataspace and an all + * selection in the memory dataspace. + */ +static int +test_read_dataset_hyper_file_all_mem(void) +{ + TESTING("read from dataset with hyperslab sel. for file space; all sel. for memory"); + + SKIPPED(); + + return 0; +} + +/* + * A test to ensure that a dataset can be read from by having + * an all selection in the file dataspace and a hyperslab + * selection in the memory dataspace. + */ +#define DATASET_READ_ALL_FILE_HYPER_MEM_TEST_SPACE_RANK 2 +#define DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE sizeof(int) +#define DATASET_READ_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME "all_sel_file_hyper_sel_mem_read_test" +#define DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_NAME "all_sel_file_hyper_sel_mem_dset" +static int +test_read_dataset_all_file_hyper_mem(void) +{ + hssize_t space_npoints; + hsize_t *dims = NULL; + size_t i, data_size, read_buf_size; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *write_buf = NULL; + void *read_buf = NULL; + + TESTING("read from dataset with all sel. for file space; hyperslab sel. for memory"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if (generate_random_parallel_dimensions(DATASET_READ_ALL_FILE_HYPER_MEM_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + /* + * Have rank 0 create the dataset and completely fill it with data. + */ + BEGIN_INDEPENDENT_OP(dset_create) + { + if (MAINPROCESS) { + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((group_id = H5Gcreate2(container_group, DATASET_READ_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_READ_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((fspace_id = H5Screate_simple(DATASET_READ_ALL_FILE_HYPER_MEM_TEST_SPACE_RANK, dims, NULL)) < + 0) { + H5_FAILED(); + HDprintf(" failed to create file dataspace for dataset\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((dset_id = H5Dcreate2(group_id, DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_NAME, + DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", + DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + for (i = 0, data_size = 1; i < DATASET_READ_ALL_FILE_HYPER_MEM_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE; + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + for (i = 0; i < (size_t)mpi_size; i++) { + size_t j; + size_t elem_per_proc = + (data_size / DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE) / dims[0]; + + for (j = 0; j < elem_per_proc; j++) { + size_t idx = (i * elem_per_proc) + j; + + ((int *)write_buf)[idx] = (int)i; + } + } + + { + hsize_t mdims[] = {data_size / DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE}; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + } + + if (H5Dwrite(dset_id, DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_DTYPE, mspace_id, H5S_ALL, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", + DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + if (mspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(mspace_id); + } + H5E_END_TRY; + mspace_id = H5I_INVALID_HID; + } + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + /* + * Close and re-open the file to ensure that the data gets written. + */ + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close test's container group\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + if (H5Gclose(container_group) < 0) { + H5_FAILED(); + HDprintf(" failed to close container group\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close file for data flushing\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + } + } + END_INDEPENDENT_OP(dset_create); + + /* + * Re-open file on all ranks. + */ + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) + TEST_ERROR; + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + if ((group_id = H5Gopen2(container_group, DATASET_READ_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't open container sub-group '%s'\n", + DATASET_READ_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME); + goto error; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + goto error; + } + + /* + * Only have rank 0 perform the dataset read, as reading the entire dataset on all ranks + * might be stressful on system resources. + */ + BEGIN_INDEPENDENT_OP(dset_read) + { + if (MAINPROCESS) { + hsize_t start[1] = {0}; + hsize_t stride[1] = {2}; + hsize_t count[1] = {(hsize_t)space_npoints}; + hsize_t block[1] = {1}; + hsize_t mdims[] = {(hsize_t)(2 * space_npoints)}; + + /* + * Allocate twice the amount of memory needed and leave "holes" in the memory + * buffer in order to prove that the mapping from all selection <-> hyperslab + * selection works correctly. + */ + read_buf_size = (size_t)(2 * space_npoints) * DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE; + if (NULL == (read_buf = HDcalloc(1, read_buf_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + INDEPENDENT_OP_ERROR(dset_read); + } + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + INDEPENDENT_OP_ERROR(dset_read); + } + + if (H5Sselect_hyperslab(mspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) { + H5_FAILED(); + HDprintf(" couldn't select hyperslab for dataset read\n"); + INDEPENDENT_OP_ERROR(dset_read); + } + + if (H5Dread(dset_id, DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_DTYPE, mspace_id, H5S_ALL, + H5P_DEFAULT, read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", + DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_read); + } + + for (i = 0; i < (size_t)mpi_size; i++) { + size_t j; + size_t elem_per_proc = (size_t)(space_npoints / mpi_size); + + for (j = 0; j < 2 * elem_per_proc; j++) { + size_t idx = (i * 2 * elem_per_proc) + j; + + if (j % 2 == 0) { + if (((int *)read_buf)[idx] != (int)i) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + INDEPENDENT_OP_ERROR(dset_read); + } + } + else { + if (((int *)read_buf)[idx] != 0) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + INDEPENDENT_OP_ERROR(dset_read); + } + } + } + } + } + } + END_INDEPENDENT_OP(dset_read); + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (dims) + HDfree(dims); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Pclose(fapl_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to ensure that a dataset can be read from by having + * a point selection in the file dataspace and an all selection + * in the memory dataspace. + */ +static int +test_read_dataset_point_file_all_mem(void) +{ + TESTING("read from dataset with point sel. for file space; all sel. for memory"); + + SKIPPED(); + + return 0; +} + +/* + * A test to ensure that a dataset can be read from by having + * an all selection in the file dataspace and a point selection + * in the memory dataspace. + */ +#define DATASET_READ_ALL_FILE_POINT_MEM_TEST_SPACE_RANK 2 +#define DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_READ_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE sizeof(int) +#define DATASET_READ_ALL_FILE_POINT_MEM_TEST_GROUP_NAME "all_sel_file_point_sel_mem_read_test" +#define DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_NAME "all_sel_file_point_sel_mem_dset" +static int +test_read_dataset_all_file_point_mem(void) +{ + hssize_t space_npoints; + hsize_t *points = NULL; + hsize_t *dims = NULL; + size_t i, data_size, read_buf_size; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *write_buf = NULL; + void *read_buf = NULL; + + TESTING("read from dataset with all sel. for file space; point sel. for memory"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if (generate_random_parallel_dimensions(DATASET_READ_ALL_FILE_POINT_MEM_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + /* + * Have rank 0 create the dataset and completely fill it with data. + */ + BEGIN_INDEPENDENT_OP(dset_create) + { + if (MAINPROCESS) { + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((group_id = H5Gcreate2(container_group, DATASET_READ_ALL_FILE_POINT_MEM_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_READ_ALL_FILE_POINT_MEM_TEST_GROUP_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((fspace_id = H5Screate_simple(DATASET_READ_ALL_FILE_POINT_MEM_TEST_SPACE_RANK, dims, NULL)) < + 0) { + H5_FAILED(); + HDprintf(" failed to create file dataspace for dataset\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((dset_id = H5Dcreate2(group_id, DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_NAME, + DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", + DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + for (i = 0, data_size = 1; i < DATASET_READ_ALL_FILE_POINT_MEM_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_READ_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE; + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + for (i = 0; i < (size_t)mpi_size; i++) { + size_t j; + size_t elem_per_proc = + (data_size / DATASET_READ_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE) / dims[0]; + + for (j = 0; j < elem_per_proc; j++) { + size_t idx = (i * elem_per_proc) + j; + + ((int *)write_buf)[idx] = (int)i; + } + } + + { + hsize_t mdims[] = {data_size / DATASET_READ_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE}; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + } + + if (H5Dwrite(dset_id, DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_DTYPE, mspace_id, H5S_ALL, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", + DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + if (mspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(mspace_id); + } + H5E_END_TRY; + mspace_id = H5I_INVALID_HID; + } + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + /* + * Close and re-open the file to ensure that the data gets written. + */ + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close test's container group\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + if (H5Gclose(container_group) < 0) { + H5_FAILED(); + HDprintf(" failed to close container group\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close file for data flushing\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + } + } + END_INDEPENDENT_OP(dset_create); + + /* + * Re-open file on all ranks. + */ + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) + TEST_ERROR; + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + if ((group_id = H5Gopen2(container_group, DATASET_READ_ALL_FILE_POINT_MEM_TEST_GROUP_NAME, H5P_DEFAULT)) < + 0) { + H5_FAILED(); + HDprintf(" couldn't open container sub-group '%s'\n", + DATASET_READ_ALL_FILE_POINT_MEM_TEST_GROUP_NAME); + goto error; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + goto error; + } + + /* + * Only have rank 0 perform the dataset read, as reading the entire dataset on all ranks + * might be stressful on system resources. + */ + BEGIN_INDEPENDENT_OP(dset_read) + { + if (MAINPROCESS) { + hsize_t mdims[] = {(hsize_t)(2 * space_npoints)}; + size_t j; + + /* + * Allocate twice the amount of memory needed and leave "holes" in the memory + * buffer in order to prove that the mapping from all selection <-> point + * selection works correctly. + */ + read_buf_size = (size_t)(2 * space_npoints) * DATASET_READ_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE; + if (NULL == (read_buf = HDcalloc(1, read_buf_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + INDEPENDENT_OP_ERROR(dset_read); + } + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + INDEPENDENT_OP_ERROR(dset_read); + } + + if (NULL == (points = HDmalloc((size_t)space_npoints * sizeof(hsize_t)))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for point selection\n"); + INDEPENDENT_OP_ERROR(dset_read); + } + + /* Select every other point in the 1-dimensional memory dataspace */ + for (i = 0, j = 0; i < 2 * (size_t)space_npoints; i++) { + if (i % 2 == 0) + points[j++] = (hsize_t)i; + } + + if (H5Sselect_elements(mspace_id, H5S_SELECT_SET, (size_t)space_npoints, points) < 0) { + H5_FAILED(); + HDprintf(" couldn't set point selection for dataset read\n"); + INDEPENDENT_OP_ERROR(dset_read); + } + + if (H5Dread(dset_id, DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_DTYPE, mspace_id, H5S_ALL, + H5P_DEFAULT, read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", + DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_read); + } + + for (i = 0; i < (size_t)mpi_size; i++) { + size_t elem_per_proc = (size_t)(space_npoints / mpi_size); + + for (j = 0; j < 2 * elem_per_proc; j++) { + size_t idx = (i * 2 * elem_per_proc) + j; + + if (j % 2 == 0) { + if (((int *)read_buf)[idx] != (int)i) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + INDEPENDENT_OP_ERROR(dset_read); + } + } + else { + if (((int *)read_buf)[idx] != 0) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + INDEPENDENT_OP_ERROR(dset_read); + } + } + } + } + } + } + END_INDEPENDENT_OP(dset_read); + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (points) { + HDfree(points); + points = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (points) + HDfree(points); + if (dims) + HDfree(dims); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Pclose(fapl_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to ensure that a dataset can be read from by having + * a hyperslab selection in the file dataspace and a point + * selection in the memory dataspace. + */ +#define DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK 2 +#define DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE sizeof(int) +#define DATASET_READ_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME "hyper_sel_file_point_sel_mem_read_test" +#define DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_NAME "hyper_sel_file_point_sel_mem_dset" +static int +test_read_dataset_hyper_file_point_mem(void) +{ + hssize_t space_npoints; + hsize_t *dims = NULL; + hsize_t *points = NULL; + hsize_t start[DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK]; + hsize_t stride[DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK]; + hsize_t count[DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK]; + hsize_t block[DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK]; + size_t i, data_size, read_buf_size; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *write_buf = NULL; + void *read_buf = NULL; + + TESTING("read from dataset with hyperslab sel. for file space; point sel. for memory"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if (generate_random_parallel_dimensions(DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + /* + * Have rank 0 create the dataset and completely fill it with data. + */ + BEGIN_INDEPENDENT_OP(dset_create) + { + if (MAINPROCESS) { + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((group_id = H5Gcreate2(container_group, DATASET_READ_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_READ_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((fspace_id = + H5Screate_simple(DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK, dims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" failed to create file dataspace for dataset\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((dset_id = H5Dcreate2(group_id, DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_NAME, + DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_DTYPE, fspace_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", + DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + for (i = 0, data_size = 1; i < DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE; + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + for (i = 0; i < (size_t)mpi_size; i++) { + size_t j; + size_t elem_per_proc = + (data_size / DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE) / dims[0]; + + for (j = 0; j < elem_per_proc; j++) { + size_t idx = (i * elem_per_proc) + j; + + ((int *)write_buf)[idx] = (int)i; + } + } + + { + hsize_t mdims[] = {data_size / DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE}; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + } + + if (H5Dwrite(dset_id, DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_DTYPE, mspace_id, H5S_ALL, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", + DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + if (mspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(mspace_id); + } + H5E_END_TRY; + mspace_id = H5I_INVALID_HID; + } + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + /* + * Close and re-open the file to ensure that the data gets written. + */ + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close test's container group\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + if (H5Gclose(container_group) < 0) { + H5_FAILED(); + HDprintf(" failed to close container group\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close file for data flushing\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + } + } + END_INDEPENDENT_OP(dset_create); + + /* + * Re-open file on all ranks. + */ + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) + TEST_ERROR; + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + if ((group_id = + H5Gopen2(container_group, DATASET_READ_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container sub-group '%s'\n", + DATASET_READ_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME); + goto error; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + goto error; + } + + /* + * Allocate twice the amount of memory needed and leave "holes" in the memory + * buffer in order to prove that the mapping from hyperslab selection <-> point + * selection works correctly. + */ + read_buf_size = (2 * (size_t)(space_npoints / mpi_size) * DATASET_READ_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE); + if (NULL == (read_buf = HDcalloc(1, read_buf_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + goto error; + } + + for (i = 0; i < DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK; i++) { + if (i == 0) { + start[i] = (hsize_t)mpi_rank; + block[i] = 1; + } + else { + start[i] = 0; + block[i] = dims[i]; + } + + stride[i] = 1; + count[i] = 1; + } + + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) { + H5_FAILED(); + HDprintf(" couldn't select hyperslab for dataset read\n"); + goto error; + } + + { + hsize_t mdims[] = {(hsize_t)(2 * (space_npoints / mpi_size))}; + size_t j; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + goto error; + } + + if (NULL == (points = HDmalloc((size_t)(space_npoints / mpi_size) * sizeof(hsize_t)))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for point selection\n"); + goto error; + } + + /* Select every other point in the 1-dimensional memory dataspace */ + for (i = 0, j = 0; i < (size_t)(2 * (space_npoints / mpi_size)); i++) { + if (i % 2 == 0) + points[j++] = (hsize_t)i; + } + + if (H5Sselect_elements(mspace_id, H5S_SELECT_SET, (size_t)(space_npoints / mpi_size), points) < 0) { + H5_FAILED(); + HDprintf(" couldn't set point selection for dataset read\n"); + goto error; + } + } + + if (H5Dread(dset_id, DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_DTYPE, mspace_id, fspace_id, H5P_DEFAULT, + read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_NAME); + goto error; + } + + for (i = 0; i < (size_t)(2 * (space_npoints / mpi_size)); i++) { + if (i % 2 == 0) { + if (((int *)read_buf)[i] != (int)mpi_rank) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + goto error; + } + } + else { + if (((int *)read_buf)[i] != 0) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + goto error; + } + } + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (points) { + HDfree(points); + points = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (points) + HDfree(points); + if (dims) + HDfree(dims); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Pclose(fapl_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to ensure that a dataset can be read from by having + * a point selection in the file dataspace and a hyperslab + * selection in the memory dataspace. + */ +#define DATASET_READ_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK 2 +#define DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE sizeof(int) +#define DATASET_READ_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME "point_sel_file_hyper_sel_mem_read_test" +#define DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_NAME "point_sel_file_hyper_sel_mem_dset" +static int +test_read_dataset_point_file_hyper_mem(void) +{ + hssize_t space_npoints; + hsize_t *dims = NULL; + hsize_t *points = NULL; + size_t i, data_size, read_buf_size; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *write_buf = NULL; + void *read_buf = NULL; + + TESTING("read from dataset with point sel. for file space; hyperslab sel. for memory"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + SKIPPED(); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + return 0; + } + + if (generate_random_parallel_dimensions(DATASET_READ_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK, &dims) < 0) + TEST_ERROR; + + /* + * Have rank 0 create the dataset and completely fill it with data. + */ + BEGIN_INDEPENDENT_OP(dset_create) + { + if (MAINPROCESS) { + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((group_id = H5Gcreate2(container_group, DATASET_READ_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_READ_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((fspace_id = + H5Screate_simple(DATASET_READ_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK, dims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" failed to create file dataspace for dataset\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((dset_id = H5Dcreate2(group_id, DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_NAME, + DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_DTYPE, fspace_id, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", + DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + for (i = 0, data_size = 1; i < DATASET_READ_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE; + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + for (i = 0; i < (size_t)mpi_size; i++) { + size_t j; + size_t elem_per_proc = + (data_size / DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE) / dims[0]; + + for (j = 0; j < elem_per_proc; j++) { + size_t idx = (i * elem_per_proc) + j; + + ((int *)write_buf)[idx] = (int)i; + } + } + + { + hsize_t mdims[] = {data_size / DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE}; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + } + + if (H5Dwrite(dset_id, DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_DTYPE, mspace_id, H5S_ALL, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", + DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + if (mspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(mspace_id); + } + H5E_END_TRY; + mspace_id = H5I_INVALID_HID; + } + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + /* + * Close and re-open the file to ensure that the data gets written. + */ + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close test's container group\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + if (H5Gclose(container_group) < 0) { + H5_FAILED(); + HDprintf(" failed to close container group\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close file for data flushing\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + } + } + END_INDEPENDENT_OP(dset_create); + + /* + * Re-open file on all ranks. + */ + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) + TEST_ERROR; + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + if ((group_id = + H5Gopen2(container_group, DATASET_READ_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container sub-group '%s'\n", + DATASET_READ_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME); + goto error; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataspace num points\n"); + goto error; + } + + /* + * Allocate twice the amount of memory needed and leave "holes" in the memory + * buffer in order to prove that the mapping from point selection <-> hyperslab + * selection works correctly. + */ + read_buf_size = + (2 * (size_t)(space_npoints / mpi_size) * DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE); + if (NULL == (read_buf = HDcalloc(1, read_buf_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset read\n"); + goto error; + } + + if (NULL == (points = HDmalloc((size_t)((space_npoints / mpi_size) * + DATASET_READ_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK) * + sizeof(hsize_t)))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for point selection\n"); + goto error; + } + + for (i = 0; i < (size_t)(space_npoints / mpi_size); i++) { + size_t j; + + for (j = 0; j < DATASET_READ_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK; j++) { + size_t idx = (i * DATASET_READ_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK) + j; + + if (j == 0) + points[idx] = (hsize_t)mpi_rank; + else if (j != DATASET_READ_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK - 1) + points[idx] = i / dims[j + 1]; + else + points[idx] = i % dims[j]; + } + } + + if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, (size_t)(space_npoints / mpi_size), points) < 0) { + H5_FAILED(); + HDprintf(" couldn't set point selection for dataset read\n"); + goto error; + } + + { + hsize_t start[1] = {0}; + hsize_t stride[1] = {2}; + hsize_t count[1] = {(hsize_t)(space_npoints / mpi_size)}; + hsize_t block[1] = {1}; + hsize_t mdims[] = {(hsize_t)(2 * (space_npoints / mpi_size))}; + + if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create memory dataspace\n"); + goto error; + } + + if (H5Sselect_hyperslab(mspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) { + H5_FAILED(); + HDprintf(" couldn't set hyperslab selection for dataset write\n"); + goto error; + } + } + + if (H5Dread(dset_id, DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_DTYPE, mspace_id, fspace_id, H5P_DEFAULT, + read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_NAME); + goto error; + } + + for (i = 0; i < (size_t)(2 * (space_npoints / mpi_size)); i++) { + if (i % 2 == 0) { + if (((int *)read_buf)[i] != (int)mpi_rank) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + goto error; + } + } + else { + if (((int *)read_buf)[i] != 0) { + H5_FAILED(); + HDprintf(" data verification failed\n"); + goto error; + } + } + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + if (points) { + HDfree(points); + points = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5Sclose(mspace_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (read_buf) + HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (points) + HDfree(points); + if (dims) + HDfree(dims); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Pclose(fapl_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a dataset composed of multiple chunks + * can be written and read correctly. When reading back the + * chunks of the dataset, the file dataspace and memory dataspace + * used are the same shape. The dataset's first dimension grows + * with the number of MPI ranks, while the other dimensions are fixed. + */ +#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE \ + 100 /* Should be an even divisor of fixed dimension size */ +#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_FIXED_DIMSIZE 1000 +#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK 2 +#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE sizeof(int) +#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_GROUP_NAME \ + "multi_chunk_dataset_write_same_space_read_test" +#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME "multi_chunk_dataset" +static int +test_write_multi_chunk_dataset_same_shape_read(void) +{ + hsize_t *dims = NULL; + hsize_t *chunk_dims = NULL; + hsize_t retrieved_chunk_dims[DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK]; + hsize_t start[DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK]; + hsize_t count[DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK]; + size_t i, data_size, chunk_size, n_chunks_per_rank; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *write_buf = NULL; + int read_buf[1][DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE]; + + TESTING("write to dataset with multiple chunks using same shaped dataspaces"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, or getting property list aren't " + "supported with this connector\n"); + return 0; + } + + if (NULL == + (dims = HDmalloc(DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK * sizeof(hsize_t)))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset dimensionality\n"); + goto error; + } + + if (NULL == (chunk_dims = HDmalloc(DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK * + sizeof(hsize_t)))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset dimensionality\n"); + goto error; + } + + for (i = 0; i < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) { + if (i == 0) { + dims[i] = (hsize_t)mpi_size; + chunk_dims[i] = 1; + } + else { + dims[i] = DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_FIXED_DIMSIZE; + chunk_dims[i] = DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE; + } + } + + for (i = 0, chunk_size = 1; i < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) + chunk_size *= chunk_dims[i]; + chunk_size *= DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE; + + for (i = 0, data_size = 1; i < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE; + + /* + * Have rank 0 create the dataset and completely fill it with data. + */ + BEGIN_INDEPENDENT_OP(dset_create) + { + if (MAINPROCESS) { + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((group_id = + H5Gcreate2(container_group, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_GROUP_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((fspace_id = H5Screate_simple(DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK, + dims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" failed to create file dataspace for dataset\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" failed to create DCPL\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + if (H5Pset_chunk(dcpl_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK, + chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to set chunking on DCPL\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((dset_id = H5Dcreate2(group_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME, + DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, fspace_id, + H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", + DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + /* + * See if a copy of the DCPL reports the correct chunking. + */ + if (H5Pclose(dcpl_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close DCPL\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve copy of DCPL\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims)); + if (H5Pget_chunk(dcpl_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK, + retrieved_chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve chunking info\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + for (i = 0; i < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) { + if (chunk_dims[i] != retrieved_chunk_dims[i]) { + H5_FAILED(); + HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified " + "dimensionality\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + } + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + /* + * Ensure that each underlying chunk contains the values + * + * chunk_index .. (chunk_nelemts - 1) + chunk_index. + * + * That is to say, for a chunk size of 10 x 10, chunk 0 + * contains the values + * + * 0 .. 99 + * + * while the next chunk contains the values + * + * 1 .. 100 + * + * and so on. + */ + for (i = 0; i < data_size / DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE; i++) { + size_t j; + size_t base; + size_t tot_adjust; + + /* + * Calculate a starting base value by taking the index value mod + * the size of a chunk in each dimension. + */ + for (j = 0, base = i; j < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++) + if (chunk_dims[j] > 1 && base >= chunk_dims[j]) + base %= chunk_dims[j]; + + /* + * Calculate the adjustment in each dimension. + */ + for (j = 0, tot_adjust = 0; + j < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++) { + if (j == (DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK - 1)) + tot_adjust += (i % dims[j]) / chunk_dims[j]; + else { + size_t k; + size_t n_faster_elemts; + + /* + * Calculate the number of elements in faster dimensions. + */ + for (k = j + 1, n_faster_elemts = 1; + k < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; k++) + n_faster_elemts *= dims[k]; + + tot_adjust += + (((i / n_faster_elemts) / chunk_dims[j]) * (dims[j + 1] / chunk_dims[j + 1])) + + (((i / n_faster_elemts) % chunk_dims[j]) * chunk_dims[j + 1]); + } + } + + ((int *)write_buf)[i] = (int)(base + tot_adjust); + } + + /* + * Write every chunk in the dataset. + */ + if (H5Dwrite(dset_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", + DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dcpl_id >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(dcpl_id); + } + H5E_END_TRY; + dcpl_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + /* + * Close and re-open the file to ensure that the data gets written. + */ + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close test's container group\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + if (H5Gclose(container_group) < 0) { + H5_FAILED(); + HDprintf(" failed to close container group\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close file for data flushing\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + } + } + END_INDEPENDENT_OP(dset_create); + + /* + * Re-open file on all ranks. + */ + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) + TEST_ERROR; + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + if ((group_id = H5Gopen2(container_group, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_GROUP_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container sub-group '%s'\n", + DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_GROUP_NAME); + goto error; + } + + if ((dset_id = + H5Dopen2(group_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", + DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + /* + * Create 2-dimensional memory dataspace for read buffer. + */ + { + hsize_t mdims[] = {chunk_dims[0], chunk_dims[1]}; + + if ((mspace_id = H5Screate_simple(2, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" failed to create memory dataspace\n"); + goto error; + } + } + + for (i = 0; i < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) { + count[i] = chunk_dims[i]; + } + + /* + * Each rank reads their respective chunks in the dataset, checking the data for each one. + */ + if (MAINPROCESS) + HDprintf("\n"); + for (i = 0, n_chunks_per_rank = (data_size / (size_t)mpi_size) / chunk_size; i < n_chunks_per_rank; i++) { + size_t j, k; + + if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) { + H5_FAILED(); + HDprintf(" MPI_Barrier failed\n"); + goto error; + } + + if (MAINPROCESS) + HDprintf("\r All ranks reading chunk %zu", i); + + for (j = 0; j < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++) { + if (j == 0) + start[j] = (hsize_t)mpi_rank; + else if (j == (DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK - 1)) + /* Fastest changing dimension */ + start[j] = (i * chunk_dims[j]) % dims[j]; + else + start[j] = ((i * chunk_dims[j + 1]) / dims[j + 1]) * (chunk_dims[j]); + } + + /* + * Adjust file dataspace selection for next chunk. + */ + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) < 0) { + H5_FAILED(); + HDprintf(" failed to set hyperslab selection\n"); + goto error; + } + + for (j = 0; j < chunk_dims[0]; j++) + for (k = 0; k < chunk_dims[1]; k++) + read_buf[j][k] = 0; + + if (H5Dread(dset_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, mspace_id, fspace_id, + H5P_DEFAULT, read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", + DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME); + goto error; + } + + for (j = 0; j < chunk_dims[0]; j++) { + for (k = 0; k < chunk_dims[1]; k++) { + size_t val = + ((j * chunk_dims[0]) + k + i) + + ((hsize_t)mpi_rank * n_chunks_per_rank); /* Additional value offset for each rank */ + if (read_buf[j][k] != (int)val) { + H5_FAILED(); + HDprintf(" data verification failed for chunk %lld\n", (long long)i); + goto error; + } + } + } + } + + if (chunk_dims) { + HDfree(chunk_dims); + chunk_dims = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5Sclose(mspace_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (write_buf) + HDfree(write_buf); + if (chunk_dims) + HDfree(chunk_dims); + if (dims) + HDfree(dims); + H5Pclose(dcpl_id); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Pclose(fapl_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a dataset composed of multiple chunks + * can be written and read correctly. When reading back the + * chunks of the dataset, the file dataspace and memory dataspace + * used are differently shaped. The dataset's first dimension grows + * with the number of MPI ranks, while the other dimensions are fixed. + */ +#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE \ + 100 /* Should be an even divisor of fixed dimension size */ +#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE \ + (DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE / 10) +#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_FIXED_DIMSIZE 1000 +#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK 2 +#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE sizeof(int) +#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_GROUP_NAME \ + "multi_chunk_dataset_write_diff_space_read_test" +#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME "multi_chunk_dataset" +static int +test_write_multi_chunk_dataset_diff_shape_read(void) +{ + hsize_t *dims = NULL; + hsize_t *chunk_dims = NULL; + hsize_t retrieved_chunk_dims[DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK]; + hsize_t start[DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK]; + hsize_t count[DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK]; + size_t i, data_size, chunk_size, n_chunks_per_rank; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *write_buf = NULL; + int read_buf[DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE] + [DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE]; + + TESTING("write to dataset with multiple chunks using differently shaped dataspaces"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, or getting property list aren't " + "supported with this connector\n"); + return 0; + } + + if (NULL == + (dims = HDmalloc(DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK * sizeof(hsize_t)))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset dimensionality\n"); + goto error; + } + + if (NULL == (chunk_dims = HDmalloc(DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK * + sizeof(hsize_t)))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset dimensionality\n"); + goto error; + } + + for (i = 0; i < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) { + if (i == 0) { + dims[i] = (hsize_t)mpi_size; + chunk_dims[i] = 1; + } + else { + dims[i] = DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_FIXED_DIMSIZE; + chunk_dims[i] = DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE; + } + } + + for (i = 0, chunk_size = 1; i < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) + chunk_size *= chunk_dims[i]; + chunk_size *= DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE; + + for (i = 0, data_size = 1; i < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE; + + /* + * Have rank 0 create the dataset and completely fill it with data. + */ + BEGIN_INDEPENDENT_OP(dset_create) + { + if (MAINPROCESS) { + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((group_id = + H5Gcreate2(container_group, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_GROUP_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((fspace_id = H5Screate_simple(DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK, + dims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" failed to create file dataspace for dataset\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" failed to create DCPL\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + if (H5Pset_chunk(dcpl_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK, + chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to set chunking on DCPL\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((dset_id = H5Dcreate2(group_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME, + DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, fspace_id, + H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", + DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + /* + * See if a copy of the DCPL reports the correct chunking. + */ + if (H5Pclose(dcpl_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close DCPL\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve copy of DCPL\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims)); + if (H5Pget_chunk(dcpl_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK, + retrieved_chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve chunking info\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + for (i = 0; i < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) { + if (chunk_dims[i] != retrieved_chunk_dims[i]) { + H5_FAILED(); + HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified " + "dimensionality\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + } + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + /* + * Ensure that each underlying chunk contains the values + * + * chunk_index .. (chunk_nelemts - 1) + chunk_index. + * + * That is to say, for a chunk size of 10 x 10, chunk 0 + * contains the values + * + * 0 .. 99 + * + * while the next chunk contains the values + * + * 1 .. 100 + * + * and so on. + */ + for (i = 0; i < data_size / DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE; i++) { + size_t j; + size_t base; + size_t tot_adjust; + + /* + * Calculate a starting base value by taking the index value mod + * the size of a chunk in each dimension. + */ + for (j = 0, base = i; j < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++) + if (chunk_dims[j] > 1 && base >= chunk_dims[j]) + base %= chunk_dims[j]; + + /* + * Calculate the adjustment in each dimension. + */ + for (j = 0, tot_adjust = 0; + j < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++) { + if (j == (DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK - 1)) + tot_adjust += (i % dims[j]) / chunk_dims[j]; + else { + size_t k; + size_t n_faster_elemts; + + /* + * Calculate the number of elements in faster dimensions. + */ + for (k = j + 1, n_faster_elemts = 1; + k < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; k++) + n_faster_elemts *= dims[k]; + + tot_adjust += + (((i / n_faster_elemts) / chunk_dims[j]) * (dims[j + 1] / chunk_dims[j + 1])) + + (((i / n_faster_elemts) % chunk_dims[j]) * chunk_dims[j + 1]); + } + } + + ((int *)write_buf)[i] = (int)(base + tot_adjust); + } + + /* + * Write every chunk in the dataset. + */ + if (H5Dwrite(dset_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, + H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", + DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dcpl_id >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(dcpl_id); + } + H5E_END_TRY; + dcpl_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + /* + * Close and re-open the file to ensure that the data gets written. + */ + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close test's container group\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + if (H5Gclose(container_group) < 0) { + H5_FAILED(); + HDprintf(" failed to close container group\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close file for data flushing\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + } + } + END_INDEPENDENT_OP(dset_create); + + /* + * Re-open file on all ranks. + */ + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) + TEST_ERROR; + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + if ((group_id = H5Gopen2(container_group, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_GROUP_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container sub-group '%s'\n", + DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_GROUP_NAME); + goto error; + } + + if ((dset_id = + H5Dopen2(group_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", + DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + /* + * Create memory dataspace for read buffer. + */ + { + hsize_t mdims[] = {DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE, + DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE}; + + if ((mspace_id = H5Screate_simple(DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK, + mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" failed to create memory dataspace\n"); + goto error; + } + } + + for (i = 0; i < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) { + count[i] = chunk_dims[i]; + } + + /* + * Each rank reads their respective chunks in the dataset, checking the data for each one. + */ + if (MAINPROCESS) + HDprintf("\n"); + for (i = 0, n_chunks_per_rank = (data_size / (size_t)mpi_size) / chunk_size; i < n_chunks_per_rank; i++) { + size_t j, k; + + if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) { + H5_FAILED(); + HDprintf(" MPI_Barrier failed\n"); + goto error; + } + + if (MAINPROCESS) + HDprintf("\r All ranks reading chunk %zu", i); + + for (j = 0; j < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++) { + if (j == 0) + start[j] = (hsize_t)mpi_rank; + else if (j == (DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK - 1)) + /* Fastest changing dimension */ + start[j] = (i * chunk_dims[j]) % dims[j]; + else + start[j] = ((i * chunk_dims[j + 1]) / dims[j + 1]) * (chunk_dims[j]); + } + + /* + * Adjust file dataspace selection for next chunk. + */ + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) < 0) { + H5_FAILED(); + HDprintf(" failed to set hyperslab selection\n"); + goto error; + } + + for (j = 0; j < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE; j++) + for (k = 0; k < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE; k++) + read_buf[j][k] = 0; + + if (H5Dread(dset_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, mspace_id, fspace_id, + H5P_DEFAULT, read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", + DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME); + goto error; + } + + for (j = 0; j < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE; j++) { + for (k = 0; k < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE; k++) { + size_t val = ((j * DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE) + k + i) + + ((hsize_t)mpi_rank * n_chunks_per_rank); + + if (read_buf[j][k] != (int)val) { + H5_FAILED(); + HDprintf(" data verification failed for chunk %lld\n", (long long)i); + goto error; + } + } + } + } + + if (chunk_dims) { + HDfree(chunk_dims); + chunk_dims = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (H5Sclose(mspace_id) < 0) + TEST_ERROR; + if (H5Sclose(fspace_id) < 0) + TEST_ERROR; + if (H5Dclose(dset_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (write_buf) + HDfree(write_buf); + if (chunk_dims) + HDfree(chunk_dims); + if (dims) + HDfree(dims); + H5Pclose(dcpl_id); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Pclose(fapl_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a dataset composed of multiple chunks + * can be written and read correctly several times in a row. + * When reading back the chunks of the dataset, the file + * dataspace and memory dataspace used are the same shape. + * The dataset's first dimension grows with the number of MPI + * ranks, while the other dimensions are fixed. + */ +#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE \ + 100 /* Should be an even divisor of fixed dimension size */ +#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_FIXED_DIMSIZE 1000 +#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK 2 +#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE sizeof(int) +#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_GROUP_NAME \ + "multi_chunk_dataset_same_space_overwrite_test" +#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME "multi_chunk_dataset" +#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_NITERS 10 +static int +test_overwrite_multi_chunk_dataset_same_shape_read(void) +{ + hsize_t *dims = NULL; + hsize_t *chunk_dims = NULL; + hsize_t retrieved_chunk_dims[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK]; + hsize_t start[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK]; + hsize_t count[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK]; + size_t i, data_size, chunk_size, n_chunks_per_rank; + size_t niter; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *write_buf = NULL; + int read_buf[1][DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE]; + + TESTING("several overwrites to dataset with multiple chunks using same shaped dataspaces"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, or getting property list aren't " + "supported with this connector\n"); + return 0; + } + + if (NULL == (dims = HDmalloc(DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK * + sizeof(hsize_t)))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset dimensionality\n"); + goto error; + } + + if (NULL == (chunk_dims = HDmalloc(DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK * + sizeof(hsize_t)))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset dimensionality\n"); + goto error; + } + + for (i = 0; i < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) { + if (i == 0) { + dims[i] = (hsize_t)mpi_size; + chunk_dims[i] = 1; + } + else { + dims[i] = DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_FIXED_DIMSIZE; + chunk_dims[i] = DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE; + } + } + + for (i = 0, chunk_size = 1; i < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) + chunk_size *= chunk_dims[i]; + chunk_size *= DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE; + + for (i = 0, data_size = 1; i < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE; + + /* + * Have rank 0 create the dataset, but don't fill it with data yet. + */ + BEGIN_INDEPENDENT_OP(dset_create) + { + if (MAINPROCESS) { + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((group_id = H5Gcreate2(container_group, + DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_GROUP_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((fspace_id = H5Screate_simple( + DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK, dims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" failed to create file dataspace for dataset\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" failed to create DCPL\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + if (H5Pset_chunk(dcpl_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK, + chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to set chunking on DCPL\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + /* Set dataset space allocation time to Early to ensure all chunk-related metadata is available to + * all other processes when they open the dataset */ + if (H5Pset_alloc_time(dcpl_id, H5D_ALLOC_TIME_EARLY) < 0) { + H5_FAILED(); + HDprintf(" failed to set allocation time on DCPL\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((dset_id = H5Dcreate2(group_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME, + DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, + fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", + DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + /* + * See if a copy of the DCPL reports the correct chunking. + */ + if (H5Pclose(dcpl_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close DCPL\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve copy of DCPL\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims)); + if (H5Pget_chunk(dcpl_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK, + retrieved_chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve chunking info\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + for (i = 0; i < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) { + if (chunk_dims[i] != retrieved_chunk_dims[i]) { + H5_FAILED(); + HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified " + "dimensionality\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + } + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dcpl_id >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(dcpl_id); + } + H5E_END_TRY; + dcpl_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + /* + * Close and re-open the file on all ranks. + */ + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close test's container group\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + if (H5Gclose(container_group) < 0) { + H5_FAILED(); + HDprintf(" failed to close container group\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close file for data flushing\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + } + } + END_INDEPENDENT_OP(dset_create); + + /* + * Re-open file on all ranks. + */ + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) + TEST_ERROR; + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + if ((group_id = H5Gopen2(container_group, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_GROUP_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container sub-group '%s'\n", + DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_GROUP_NAME); + goto error; + } + + /* + * Create 2-dimensional memory dataspace for read buffer. + */ + { + hsize_t mdims[] = {chunk_dims[0], chunk_dims[1]}; + + if ((mspace_id = H5Screate_simple(2, mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" failed to create memory dataspace\n"); + goto error; + } + } + + for (i = 0; i < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) { + count[i] = chunk_dims[i]; + } + + if (MAINPROCESS) + HDprintf("\n"); + for (niter = 0; niter < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_NITERS; niter++) { + if ((dset_id = H5Dopen2(group_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", + DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME); + goto error; + } + + BEGIN_INDEPENDENT_OP(dset_write) + { + if (MAINPROCESS) { + memset(write_buf, 0, data_size); + + /* + * Ensure that each underlying chunk contains the values + * + * chunk_index .. (chunk_nelemts - 1) + chunk_index. + * + * That is to say, for a chunk size of 10 x 10, chunk 0 + * contains the values + * + * 0 .. 99 + * + * while the next chunk contains the values + * + * 1 .. 100 + * + * and so on. On each iteration, we add 1 to the previous + * values. + */ + for (i = 0; i < data_size / DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE; + i++) { + size_t j; + size_t base; + size_t tot_adjust; + + /* + * Calculate a starting base value by taking the index value mod + * the size of a chunk in each dimension. + */ + for (j = 0, base = i; + j < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++) + if (chunk_dims[j] > 1 && base >= chunk_dims[j]) + base %= chunk_dims[j]; + + /* + * Calculate the adjustment in each dimension. + */ + for (j = 0, tot_adjust = 0; + j < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++) { + if (j == (DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK - 1)) + tot_adjust += (i % dims[j]) / chunk_dims[j]; + else { + size_t k; + size_t n_faster_elemts; + + /* + * Calculate the number of elements in faster dimensions. + */ + for (k = j + 1, n_faster_elemts = 1; + k < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; k++) + n_faster_elemts *= dims[k]; + + tot_adjust += (((i / n_faster_elemts) / chunk_dims[j]) * + (dims[j + 1] / chunk_dims[j + 1])) + + (((i / n_faster_elemts) % chunk_dims[j]) * chunk_dims[j + 1]); + } + } + + ((int *)write_buf)[i] = (int)(base + tot_adjust + niter); + } + + /* + * Write every chunk in the dataset. + */ + if (H5Dwrite(dset_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, H5S_ALL, + H5S_ALL, H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", + DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_write); + } + } + } + END_INDEPENDENT_OP(dset_write); + + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) { + H5_FAILED(); + HDprintf(" MPI_Barrier failed\n"); + goto error; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", + DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + /* + * Each rank reads their respective chunks in the dataset, checking the data for each one. + */ + for (i = 0, n_chunks_per_rank = (data_size / (size_t)mpi_size) / chunk_size; i < n_chunks_per_rank; + i++) { + size_t j, k; + + if (MAINPROCESS) + HDprintf("\r All ranks reading chunk %zu", i); + + for (j = 0; j < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++) { + if (j == 0) + start[j] = (hsize_t)mpi_rank; + else if (j == (DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK - 1)) + /* Fastest changing dimension */ + start[j] = (i * chunk_dims[j]) % dims[j]; + else + start[j] = ((i * chunk_dims[j + 1]) / dims[j + 1]) * (chunk_dims[j]); + } + + /* + * Adjust file dataspace selection for next chunk. + */ + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) < 0) { + H5_FAILED(); + HDprintf(" failed to set hyperslab selection\n"); + goto error; + } + + for (j = 0; j < chunk_dims[0]; j++) + for (k = 0; k < chunk_dims[1]; k++) + read_buf[j][k] = 0; + + if (H5Dread(dset_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, mspace_id, + fspace_id, H5P_DEFAULT, read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", + DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME); + goto error; + } + + for (j = 0; j < chunk_dims[0]; j++) { + for (k = 0; k < chunk_dims[1]; k++) { + size_t val = + ((j * chunk_dims[0]) + k + i) + + ((hsize_t)mpi_rank * n_chunks_per_rank) /* Additional value offset for each rank */ + + niter; + if (read_buf[j][k] != (int)val) { + H5_FAILED(); + HDprintf(" data verification failed for chunk %lld\n", (long long)i); + goto error; + } + } + } + } + + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) { + H5_FAILED(); + HDprintf(" MPI_Barrier failed\n"); + goto error; + } + } + + if (chunk_dims) { + HDfree(chunk_dims); + chunk_dims = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + + if (H5Sclose(mspace_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (write_buf) + HDfree(write_buf); + if (chunk_dims) + HDfree(chunk_dims); + if (dims) + HDfree(dims); + H5Pclose(dcpl_id); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Pclose(fapl_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to check that a dataset composed of multiple chunks + * can be written and read correctly several times in a row. + * When reading back the chunks of the dataset, the file + * dataspace and memory dataspace used are differently shaped. + * The dataset's first dimension grows with the number of MPI + * ranks, while the other dimensions are fixed. + */ +#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE \ + 100 /* Should be an even divisor of fixed dimension size */ +#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE \ + (DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE / 10) +#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_FIXED_DIMSIZE 1000 +#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK 2 +#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE sizeof(int) +#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE H5T_NATIVE_INT +#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_GROUP_NAME \ + "multi_chunk_dataset_diff_space_overwrite_test" +#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME "multi_chunk_dataset" +#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_NITERS 10 +static int +test_overwrite_multi_chunk_dataset_diff_shape_read(void) +{ + hsize_t *dims = NULL; + hsize_t *chunk_dims = NULL; + hsize_t retrieved_chunk_dims[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK]; + hsize_t start[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK]; + hsize_t count[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK]; + size_t i, data_size, chunk_size, n_chunks_per_rank; + size_t niter; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + void *write_buf = NULL; + int read_buf[DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE] + [DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE]; + + TESTING("several overwrites to dataset with multiple chunks using differently shaped dataspaces"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) { + SKIPPED(); + HDprintf(" API functions for basic file, group, dataset, or getting property list aren't " + "supported with this connector\n"); + return 0; + } + + if (NULL == (dims = HDmalloc(DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK * + sizeof(hsize_t)))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset dimensionality\n"); + goto error; + } + + if (NULL == (chunk_dims = HDmalloc(DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK * + sizeof(hsize_t)))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset dimensionality\n"); + goto error; + } + + for (i = 0; i < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) { + if (i == 0) { + dims[i] = (hsize_t)mpi_size; + chunk_dims[i] = 1; + } + else { + dims[i] = DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_FIXED_DIMSIZE; + chunk_dims[i] = DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE; + } + } + + for (i = 0, chunk_size = 1; i < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) + chunk_size *= chunk_dims[i]; + chunk_size *= DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE; + + for (i = 0, data_size = 1; i < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) + data_size *= dims[i]; + data_size *= DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE; + + /* + * Have rank 0 create the dataset, but don't fill it with data yet. + */ + BEGIN_INDEPENDENT_OP(dset_create) + { + if (MAINPROCESS) { + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((group_id = H5Gcreate2(container_group, + DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_GROUP_NAME, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create container sub-group '%s'\n", + DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_GROUP_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((fspace_id = H5Screate_simple( + DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK, dims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" failed to create file dataspace for dataset\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) { + H5_FAILED(); + HDprintf(" failed to create DCPL\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + if (H5Pset_chunk(dcpl_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK, + chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to set chunking on DCPL\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + /* Set dataset space allocation time to Early to ensure all chunk-related metadata is available to + * all other processes when they open the dataset */ + if (H5Pset_alloc_time(dcpl_id, H5D_ALLOC_TIME_EARLY) < 0) { + H5_FAILED(); + HDprintf(" failed to set allocation time on DCPL\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((dset_id = H5Dcreate2(group_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME, + DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, + fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create dataset '%s'\n", + DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_create); + } + + /* + * See if a copy of the DCPL reports the correct chunking. + */ + if (H5Pclose(dcpl_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close DCPL\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve copy of DCPL\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims)); + if (H5Pget_chunk(dcpl_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK, + retrieved_chunk_dims) < 0) { + H5_FAILED(); + HDprintf(" failed to retrieve chunking info\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + for (i = 0; i < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) { + if (chunk_dims[i] != retrieved_chunk_dims[i]) { + H5_FAILED(); + HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified " + "dimensionality\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + } + + if (NULL == (write_buf = HDmalloc(data_size))) { + H5_FAILED(); + HDprintf(" couldn't allocate buffer for dataset write\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dcpl_id >= 0) { + H5E_BEGIN_TRY + { + H5Pclose(dcpl_id); + } + H5E_END_TRY; + dcpl_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + /* + * Close and re-open the file on all ranks. + */ + if (H5Gclose(group_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close test's container group\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + if (H5Gclose(container_group) < 0) { + H5_FAILED(); + HDprintf(" failed to close container group\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close file for data flushing\n"); + INDEPENDENT_OP_ERROR(dset_create); + } + } + } + END_INDEPENDENT_OP(dset_create); + + /* + * Re-open file on all ranks. + */ + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) + TEST_ERROR; + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename); + goto error; + } + if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME); + goto error; + } + if ((group_id = H5Gopen2(container_group, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_GROUP_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open container sub-group '%s'\n", + DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_GROUP_NAME); + goto error; + } + + /* + * Create memory dataspace for read buffer. + */ + { + hsize_t mdims[] = {DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE, + DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE}; + + if ((mspace_id = H5Screate_simple(DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK, + mdims, NULL)) < 0) { + H5_FAILED(); + HDprintf(" failed to create memory dataspace\n"); + goto error; + } + } + + for (i = 0; i < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) { + count[i] = chunk_dims[i]; + } + + if (MAINPROCESS) + HDprintf("\n"); + for (niter = 0; niter < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_NITERS; niter++) { + if ((dset_id = H5Dopen2(group_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", + DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME); + goto error; + } + + BEGIN_INDEPENDENT_OP(dset_write) + { + if (MAINPROCESS) { + memset(write_buf, 0, data_size); + + /* + * Ensure that each underlying chunk contains the values + * + * chunk_index .. (chunk_nelemts - 1) + chunk_index. + * + * That is to say, for a chunk size of 10 x 10, chunk 0 + * contains the values + * + * 0 .. 99 + * + * while the next chunk contains the values + * + * 1 .. 100 + * + * and so on. On each iteration, we add 1 to the previous + * values. + */ + for (i = 0; i < data_size / DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE; + i++) { + size_t j; + size_t base; + size_t tot_adjust; + + /* + * Calculate a starting base value by taking the index value mod + * the size of a chunk in each dimension. + */ + for (j = 0, base = i; + j < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++) + if (chunk_dims[j] > 1 && base >= chunk_dims[j]) + base %= chunk_dims[j]; + + /* + * Calculate the adjustment in each dimension. + */ + for (j = 0, tot_adjust = 0; + j < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++) { + if (j == (DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK - 1)) + tot_adjust += (i % dims[j]) / chunk_dims[j]; + else { + size_t k; + size_t n_faster_elemts; + + /* + * Calculate the number of elements in faster dimensions. + */ + for (k = j + 1, n_faster_elemts = 1; + k < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; k++) + n_faster_elemts *= dims[k]; + + tot_adjust += (((i / n_faster_elemts) / chunk_dims[j]) * + (dims[j + 1] / chunk_dims[j + 1])) + + (((i / n_faster_elemts) % chunk_dims[j]) * chunk_dims[j + 1]); + } + } + + ((int *)write_buf)[i] = (int)(base + tot_adjust + niter); + } + + /* + * Write every chunk in the dataset. + */ + if (H5Dwrite(dset_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, H5S_ALL, + H5S_ALL, H5P_DEFAULT, write_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't write to dataset '%s'\n", + DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME); + INDEPENDENT_OP_ERROR(dset_write); + } + } + } + END_INDEPENDENT_OP(dset_write); + + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) { + H5_FAILED(); + HDprintf(" MPI_Barrier failed\n"); + goto error; + } + + if ((dset_id = H5Dopen2(group_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME, + H5P_DEFAULT)) < 0) { + H5_FAILED(); + HDprintf(" couldn't open dataset '%s'\n", + DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME); + goto error; + } + + if ((fspace_id = H5Dget_space(dset_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't get dataset dataspace\n"); + goto error; + } + + /* + * Each rank reads their respective chunks in the dataset, checking the data for each one. + */ + for (i = 0, n_chunks_per_rank = (data_size / (size_t)mpi_size) / chunk_size; i < n_chunks_per_rank; + i++) { + size_t j, k; + + if (MAINPROCESS) + HDprintf("\r All ranks reading chunk %zu", i); + + for (j = 0; j < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++) { + if (j == 0) + start[j] = (hsize_t)mpi_rank; + else if (j == (DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK - 1)) + /* Fastest changing dimension */ + start[j] = (i * chunk_dims[j]) % dims[j]; + else + start[j] = ((i * chunk_dims[j + 1]) / dims[j + 1]) * (chunk_dims[j]); + } + + /* + * Adjust file dataspace selection for next chunk. + */ + if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) < 0) { + H5_FAILED(); + HDprintf(" failed to set hyperslab selection\n"); + goto error; + } + + for (j = 0; j < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE; j++) + for (k = 0; k < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE; k++) + read_buf[j][k] = 0; + + if (H5Dread(dset_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, mspace_id, + fspace_id, H5P_DEFAULT, read_buf) < 0) { + H5_FAILED(); + HDprintf(" couldn't read from dataset '%s'\n", + DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME); + goto error; + } + + for (j = 0; j < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE; j++) { + for (k = 0; k < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE; k++) { + size_t val = + ((j * DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE) + k + i) + + ((hsize_t)mpi_rank * n_chunks_per_rank) + niter; + + if (read_buf[j][k] != (int)val) { + H5_FAILED(); + HDprintf(" data verification failed for chunk %lld\n", (long long)i); + goto error; + } + } + } + } + + if (fspace_id >= 0) { + H5E_BEGIN_TRY + { + H5Sclose(fspace_id); + } + H5E_END_TRY; + fspace_id = H5I_INVALID_HID; + } + if (dset_id >= 0) { + H5E_BEGIN_TRY + { + H5Dclose(dset_id); + } + H5E_END_TRY; + dset_id = H5I_INVALID_HID; + } + + if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) { + H5_FAILED(); + HDprintf(" MPI_Barrier failed\n"); + goto error; + } + } + + if (chunk_dims) { + HDfree(chunk_dims); + chunk_dims = NULL; + } + + if (dims) { + HDfree(dims); + dims = NULL; + } + + if (write_buf) { + HDfree(write_buf); + write_buf = NULL; + } + + if (H5Sclose(mspace_id) < 0) + TEST_ERROR; + if (H5Gclose(group_id) < 0) + TEST_ERROR; + if (H5Gclose(container_group) < 0) + TEST_ERROR; + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + if (write_buf) + HDfree(write_buf); + if (chunk_dims) + HDfree(chunk_dims); + if (dims) + HDfree(dims); + H5Pclose(dcpl_id); + H5Sclose(mspace_id); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Gclose(group_id); + H5Gclose(container_group); + H5Pclose(fapl_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +int +H5_api_dataset_test_parallel(void) +{ + size_t i; + int nerrors; + + if (MAINPROCESS) { + HDprintf("**********************************************\n"); + HDprintf("* *\n"); + HDprintf("* API Parallel Dataset Tests *\n"); + HDprintf("* *\n"); + HDprintf("**********************************************\n\n"); + } + + for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_dataset_tests); i++) { + nerrors += (*par_dataset_tests[i])() ? 1 : 0; + + if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) { + if (MAINPROCESS) + HDprintf(" MPI_Barrier() failed!\n"); + } + } + + if (MAINPROCESS) + HDprintf("\n"); + + return nerrors; +} diff --git a/testpar/API/H5_api_dataset_test_parallel.h b/testpar/API/H5_api_dataset_test_parallel.h new file mode 100644 index 00000000000..1e2cbd06369 --- /dev/null +++ b/testpar/API/H5_api_dataset_test_parallel.h @@ -0,0 +1,20 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#ifndef H5_API_DATASET_TEST_PARALLEL_H_ +#define H5_API_DATASET_TEST_PARALLEL_H_ + +#include "H5_api_test_parallel.h" + +int H5_api_dataset_test_parallel(void); + +#endif /* H5_API_DATASET_TEST_PARALLEL_H_ */ diff --git a/testpar/API/H5_api_datatype_test_parallel.c b/testpar/API/H5_api_datatype_test_parallel.c new file mode 100644 index 00000000000..7d090c0478d --- /dev/null +++ b/testpar/API/H5_api_datatype_test_parallel.c @@ -0,0 +1,47 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#include "H5_api_datatype_test_parallel.h" + +/* + * The array of parallel datatype tests to be performed. + */ +static int (*par_datatype_tests[])(void) = {NULL}; + +int +H5_api_datatype_test_parallel(void) +{ + size_t i; + int nerrors; + + if (MAINPROCESS) { + HDprintf("**********************************************\n"); + HDprintf("* *\n"); + HDprintf("* API Parallel Datatype Tests *\n"); + HDprintf("* *\n"); + HDprintf("**********************************************\n\n"); + } + + for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_datatype_tests); i++) { + /* nerrors += (*par_datatype_tests[i])() ? 1 : 0; */ + + if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) { + if (MAINPROCESS) + HDprintf(" MPI_Barrier() failed!\n"); + } + } + + if (MAINPROCESS) + HDprintf("\n"); + + return nerrors; +} diff --git a/testpar/API/H5_api_datatype_test_parallel.h b/testpar/API/H5_api_datatype_test_parallel.h new file mode 100644 index 00000000000..0a2ba5070d9 --- /dev/null +++ b/testpar/API/H5_api_datatype_test_parallel.h @@ -0,0 +1,20 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#ifndef H5_API_DATATYPE_TEST_PARALLEL_H_ +#define H5_API_DATATYPE_TEST_PARALLEL_H_ + +#include "H5_api_test_parallel.h" + +int H5_api_datatype_test_parallel(void); + +#endif /* H5_API_DATATYPE_TEST_PARALLEL_H_ */ diff --git a/testpar/API/H5_api_file_test_parallel.c b/testpar/API/H5_api_file_test_parallel.c new file mode 100644 index 00000000000..20fb2baea8e --- /dev/null +++ b/testpar/API/H5_api_file_test_parallel.c @@ -0,0 +1,367 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#include "H5_api_file_test_parallel.h" + +static int test_create_file(void); +static int test_open_file(void); +static int test_split_comm_file_access(void); + +/* + * The array of parallel file tests to be performed. + */ +static int (*par_file_tests[])(void) = { + test_create_file, + test_open_file, + test_split_comm_file_access, +}; + +/* + * A test to ensure that a file can be created in parallel. + */ +#define FILE_CREATE_TEST_FILENAME "test_file_parallel.h5" +static int +test_create_file(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + + TESTING("H5Fcreate"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file aren't supported with this connector\n"); + return 0; + } + + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) + TEST_ERROR; + + if ((file_id = H5Fcreate(FILE_CREATE_TEST_FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s'\n", FILE_CREATE_TEST_FILENAME); + goto error; + } + + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(fapl_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * A test to ensure that a file can be opened in parallel. + */ +static int +test_open_file(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + + TESTING_MULTIPART("H5Fopen"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file aren't supported with this connector\n"); + return 0; + } + + TESTING_2("test setup"); + + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) + TEST_ERROR; + + PASSED(); + + BEGIN_MULTIPART + { + PART_BEGIN(H5Fopen_rdonly) + { + TESTING_2("H5Fopen in read-only mode"); + + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDONLY, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" unable to open file '%s' in read-only mode\n", H5_api_test_parallel_filename); + PART_ERROR(H5Fopen_rdonly); + } + + PASSED(); + } + PART_END(H5Fopen_rdonly); + + if (file_id >= 0) { + H5E_BEGIN_TRY + { + H5Fclose(file_id); + } + H5E_END_TRY; + file_id = H5I_INVALID_HID; + } + + PART_BEGIN(H5Fopen_rdwrite) + { + TESTING_2("H5Fopen in read-write mode"); + + if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" unable to open file '%s' in read-write mode\n", H5_api_test_parallel_filename); + PART_ERROR(H5Fopen_rdwrite); + } + + PASSED(); + } + PART_END(H5Fopen_rdwrite); + + if (file_id >= 0) { + H5E_BEGIN_TRY + { + H5Fclose(file_id); + } + H5E_END_TRY; + file_id = H5I_INVALID_HID; + } + + /* + * XXX: SWMR open flags + */ + } + END_MULTIPART; + + TESTING_2("test cleanup"); + + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(fapl_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * Tests file access by a communicator other than MPI_COMM_WORLD. + * + * Splits MPI_COMM_WORLD into two groups, where one (even_comm) contains + * the original processes of even ranks. The other (odd_comm) contains + * the original processes of odd ranks. Processes in even_comm create a + * file, then close it, using even_comm. Processes in old_comm just do + * a barrier using odd_comm. Then they all do a barrier using MPI_COMM_WORLD. + * If the file creation and close does not do correct collective action + * according to the communicator argument, the processes will freeze up + * sooner or later due to MPI_Barrier calls being mixed up. + */ +#define SPLIT_FILE_COMM_TEST_FILE_NAME "split_comm_file.h5" +static int +test_split_comm_file_access(void) +{ + MPI_Comm comm; + MPI_Info info = MPI_INFO_NULL; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + int is_old; + int newrank; + int err_occurred = 0; + + TESTING("file access with a split communicator"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) { + SKIPPED(); + HDprintf(" API functions for basic file aren't supported with this connector\n"); + return 0; + } + + /* set up MPI parameters */ + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + is_old = mpi_rank % 2; + if (MPI_SUCCESS != MPI_Comm_split(MPI_COMM_WORLD, is_old, mpi_rank, &comm)) { + H5_FAILED(); + HDprintf(" failed to split communicator!\n"); + goto error; + } + MPI_Comm_rank(comm, &newrank); + + if (is_old) { + /* odd-rank processes */ + if (MPI_SUCCESS != MPI_Barrier(comm)) { + err_occurred = 1; + goto access_end; + } + } + else { + /* even-rank processes */ + int sub_mpi_rank; /* rank in the sub-comm */ + + MPI_Comm_rank(comm, &sub_mpi_rank); + + /* setup file access template */ + if ((fapl_id = create_mpi_fapl(comm, info, TRUE)) < 0) { + err_occurred = 1; + goto access_end; + } + + /* create the file collectively */ + if ((file_id = H5Fcreate(SPLIT_FILE_COMM_TEST_FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0) { + H5_FAILED(); + HDprintf(" couldn't create file '%s'\n", SPLIT_FILE_COMM_TEST_FILE_NAME); + err_occurred = 1; + goto access_end; + } + + /* close the file */ + if (H5Fclose(file_id) < 0) { + H5_FAILED(); + HDprintf(" failed to close file '%s'\n", SPLIT_FILE_COMM_TEST_FILE_NAME); + err_occurred = 1; + goto access_end; + } + + /* delete the test file */ + if (H5Fdelete(SPLIT_FILE_COMM_TEST_FILE_NAME, fapl_id) < 0) { + H5_FAILED(); + HDprintf(" failed to delete file '%s'\n", SPLIT_FILE_COMM_TEST_FILE_NAME); + err_occurred = 1; + goto access_end; + } + + /* Release file-access template */ + if (H5Pclose(fapl_id) < 0) { + err_occurred = 1; + goto access_end; + } + } +access_end: + + /* Get the collective results about whether an error occurred */ + if (MPI_SUCCESS != MPI_Allreduce(MPI_IN_PLACE, &err_occurred, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD)) { + H5_FAILED(); + HDprintf(" MPI_Allreduce failed\n"); + goto error; + } + + if (err_occurred) { + H5_FAILED(); + HDprintf(" an error occurred on only some ranks during split-communicator file access! - " + "collectively failing\n"); + goto error; + } + + if (MPI_SUCCESS != MPI_Comm_free(&comm)) { + H5_FAILED(); + HDprintf(" MPI_Comm_free failed\n"); + goto error; + } + + if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) { + H5_FAILED(); + HDprintf(" MPI_Barrier on MPI_COMM_WORLD failed\n"); + goto error; + } + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(fapl_id); + H5Fclose(file_id); + } + H5E_END_TRY; + + return 1; +} + +/* + * Cleanup temporary test files + */ +static void +cleanup_files(void) +{ + hid_t fapl_id = H5I_INVALID_HID; + + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) { + if (MAINPROCESS) + HDprintf(" failed to create FAPL for deleting test files\n"); + return; + } + + H5Fdelete(FILE_CREATE_TEST_FILENAME, fapl_id); + + /* The below file is deleted as part of the test */ + /* H5Fdelete(SPLIT_FILE_COMM_TEST_FILE_NAME, H5P_DEFAULT); */ + + if (H5Pclose(fapl_id) < 0) { + if (MAINPROCESS) + HDprintf(" failed to close FAPL used for deleting test files\n"); + return; + } +} + +int +H5_api_file_test_parallel(void) +{ + size_t i; + int nerrors; + + if (MAINPROCESS) { + HDprintf("**********************************************\n"); + HDprintf("* *\n"); + HDprintf("* API Parallel File Tests *\n"); + HDprintf("* *\n"); + HDprintf("**********************************************\n\n"); + } + + for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_file_tests); i++) { + nerrors += (*par_file_tests[i])() ? 1 : 0; + + if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) { + if (MAINPROCESS) + HDprintf(" MPI_Barrier() failed!\n"); + } + } + + if (MAINPROCESS) { + HDprintf("\n"); + HDprintf("Cleaning up testing files\n"); + } + + cleanup_files(); + + return nerrors; +} diff --git a/testpar/API/H5_api_file_test_parallel.h b/testpar/API/H5_api_file_test_parallel.h new file mode 100644 index 00000000000..aac98008c3e --- /dev/null +++ b/testpar/API/H5_api_file_test_parallel.h @@ -0,0 +1,20 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#ifndef H5_API_FILE_TEST_PARALLEL_H_ +#define H5_API_FILE_TEST_PARALLEL_H_ + +#include "H5_api_test_parallel.h" + +int H5_api_file_test_parallel(void); + +#endif /* H5_API_FILE_TEST_PARALLEL_H_ */ diff --git a/testpar/API/H5_api_group_test_parallel.c b/testpar/API/H5_api_group_test_parallel.c new file mode 100644 index 00000000000..d6d8f188376 --- /dev/null +++ b/testpar/API/H5_api_group_test_parallel.c @@ -0,0 +1,47 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#include "H5_api_group_test_parallel.h" + +/* + * The array of parallel group tests to be performed. + */ +static int (*par_group_tests[])(void) = {NULL}; + +int +H5_api_group_test_parallel(void) +{ + size_t i; + int nerrors; + + if (MAINPROCESS) { + HDprintf("**********************************************\n"); + HDprintf("* *\n"); + HDprintf("* API Parallel Group Tests *\n"); + HDprintf("* *\n"); + HDprintf("**********************************************\n\n"); + } + + for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_group_tests); i++) { + /* nerrors += (*par_group_tests[i])() ? 1 : 0; */ + + if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) { + if (MAINPROCESS) + HDprintf(" MPI_Barrier() failed!\n"); + } + } + + if (MAINPROCESS) + HDprintf("\n"); + + return nerrors; +} diff --git a/testpar/API/H5_api_group_test_parallel.h b/testpar/API/H5_api_group_test_parallel.h new file mode 100644 index 00000000000..87dd24fa76c --- /dev/null +++ b/testpar/API/H5_api_group_test_parallel.h @@ -0,0 +1,20 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#ifndef H5_API_GROUP_TEST_PARALLEL_H_ +#define H5_API_GROUP_TEST_PARALLEL_H_ + +#include "H5_api_test_parallel.h" + +int H5_api_group_test_parallel(void); + +#endif /* H5_API_GROUP_TEST_PARALLEL_H_ */ diff --git a/testpar/API/H5_api_link_test_parallel.c b/testpar/API/H5_api_link_test_parallel.c new file mode 100644 index 00000000000..fb865a0e71f --- /dev/null +++ b/testpar/API/H5_api_link_test_parallel.c @@ -0,0 +1,47 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#include "H5_api_link_test_parallel.h" + +/* + * The array of parallel link tests to be performed. + */ +static int (*par_link_tests[])(void) = {NULL}; + +int +H5_api_link_test_parallel(void) +{ + size_t i; + int nerrors; + + if (MAINPROCESS) { + HDprintf("**********************************************\n"); + HDprintf("* *\n"); + HDprintf("* API Parallel Link Tests *\n"); + HDprintf("* *\n"); + HDprintf("**********************************************\n\n"); + } + + for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_link_tests); i++) { + /* nerrors += (*par_link_tests[i])() ? 1 : 0; */ + + if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) { + if (MAINPROCESS) + HDprintf(" MPI_Barrier() failed!\n"); + } + } + + if (MAINPROCESS) + HDprintf("\n"); + + return nerrors; +} diff --git a/testpar/API/H5_api_link_test_parallel.h b/testpar/API/H5_api_link_test_parallel.h new file mode 100644 index 00000000000..dbf0fc7b89f --- /dev/null +++ b/testpar/API/H5_api_link_test_parallel.h @@ -0,0 +1,20 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#ifndef H5_API_LINK_TEST_PARALLEL_H_ +#define H5_API_LINK_TEST_PARALLEL_H_ + +#include "H5_api_test_parallel.h" + +int H5_api_link_test_parallel(void); + +#endif /* H5_API_LINK_TEST_PARALLEL_H_ */ diff --git a/testpar/API/H5_api_misc_test_parallel.c b/testpar/API/H5_api_misc_test_parallel.c new file mode 100644 index 00000000000..0dc85eba651 --- /dev/null +++ b/testpar/API/H5_api_misc_test_parallel.c @@ -0,0 +1,47 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#include "H5_api_misc_test_parallel.h" + +/* + * The array of parallel miscellaneous tests to be performed. + */ +static int (*par_misc_tests[])(void) = {NULL}; + +int +H5_api_misc_test_parallel(void) +{ + size_t i; + int nerrors; + + if (MAINPROCESS) { + HDprintf("**********************************************\n"); + HDprintf("* *\n"); + HDprintf("* API Parallel Miscellaneous Tests *\n"); + HDprintf("* *\n"); + HDprintf("**********************************************\n\n"); + } + + for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_misc_tests); i++) { + /* nerrors += (*par_misc_tests[i])() ? 1 : 0; */ + + if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) { + if (MAINPROCESS) + HDprintf(" MPI_Barrier() failed!\n"); + } + } + + if (MAINPROCESS) + HDprintf("\n"); + + return nerrors; +} diff --git a/testpar/API/H5_api_misc_test_parallel.h b/testpar/API/H5_api_misc_test_parallel.h new file mode 100644 index 00000000000..84553a98fb9 --- /dev/null +++ b/testpar/API/H5_api_misc_test_parallel.h @@ -0,0 +1,20 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#ifndef H5_API_MISC_TEST_PARALLEL_H_ +#define H5_API_MISC_TEST_PARALLEL_H_ + +#include "H5_api_test_parallel.h" + +int H5_api_misc_test_parallel(void); + +#endif /* H5_API_MISC_TEST_PARALLEL_H_ */ diff --git a/testpar/API/H5_api_object_test_parallel.c b/testpar/API/H5_api_object_test_parallel.c new file mode 100644 index 00000000000..a264eb23890 --- /dev/null +++ b/testpar/API/H5_api_object_test_parallel.c @@ -0,0 +1,47 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#include "H5_api_object_test_parallel.h" + +/* + * The array of parallel object tests to be performed. + */ +static int (*par_object_tests[])(void) = {NULL}; + +int +H5_api_object_test_parallel(void) +{ + size_t i; + int nerrors; + + if (MAINPROCESS) { + HDprintf("**********************************************\n"); + HDprintf("* *\n"); + HDprintf("* API Parallel Object Tests *\n"); + HDprintf("* *\n"); + HDprintf("**********************************************\n\n"); + } + + for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_object_tests); i++) { + /* nerrors += (*par_object_tests[i])() ? 1 : 0; */ + + if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) { + if (MAINPROCESS) + HDprintf(" MPI_Barrier() failed!\n"); + } + } + + if (MAINPROCESS) + HDprintf("\n"); + + return nerrors; +} diff --git a/testpar/API/H5_api_object_test_parallel.h b/testpar/API/H5_api_object_test_parallel.h new file mode 100644 index 00000000000..6a8569f025d --- /dev/null +++ b/testpar/API/H5_api_object_test_parallel.h @@ -0,0 +1,20 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#ifndef H5_API_OBJECT_TEST_PARALLEL_H_ +#define H5_API_OBJECT_TEST_PARALLEL_H_ + +#include "H5_api_test_parallel.h" + +int H5_api_object_test_parallel(void); + +#endif /* H5_API_OBJECT_TEST_PARALLEL_H_ */ diff --git a/testpar/API/H5_api_test_parallel.c b/testpar/API/H5_api_test_parallel.c new file mode 100644 index 00000000000..45fa4ecb887 --- /dev/null +++ b/testpar/API/H5_api_test_parallel.c @@ -0,0 +1,338 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#include "H5_api_test_util.h" +#include "H5_api_test_parallel.h" + +#include "H5_api_attribute_test_parallel.h" +#include "H5_api_dataset_test_parallel.h" +#include "H5_api_datatype_test_parallel.h" +#include "H5_api_file_test_parallel.h" +#include "H5_api_group_test_parallel.h" +#include "H5_api_link_test_parallel.h" +#include "H5_api_misc_test_parallel.h" +#include "H5_api_object_test_parallel.h" +#ifdef H5_API_TEST_HAVE_ASYNC +#include "H5_api_async_test_parallel.h" +#endif + +char H5_api_test_parallel_filename[H5_API_TEST_FILENAME_MAX_LENGTH]; + +const char *test_path_prefix; + +size_t n_tests_run_g; +size_t n_tests_passed_g; +size_t n_tests_failed_g; +size_t n_tests_skipped_g; + +int mpi_size; +int mpi_rank; + +/* X-macro to define the following for each test: + * - enum type + * - name + * - test function + * - enabled by default + */ +#ifdef H5_API_TEST_HAVE_ASYNC +#define H5_API_PARALLEL_TESTS \ + X(H5_API_TEST_NULL, "", NULL, 0) \ + X(H5_API_TEST_FILE, "file", H5_api_file_test_parallel, 1) \ + X(H5_API_TEST_GROUP, "group", H5_api_group_test_parallel, 1) \ + X(H5_API_TEST_DATASET, "dataset", H5_api_dataset_test_parallel, 1) \ + X(H5_API_TEST_DATATYPE, "datatype", H5_api_datatype_test_parallel, 1) \ + X(H5_API_TEST_ATTRIBUTE, "attribute", H5_api_attribute_test_parallel, 1) \ + X(H5_API_TEST_LINK, "link", H5_api_link_test_parallel, 1) \ + X(H5_API_TEST_OBJECT, "object", H5_api_object_test_parallel, 1) \ + X(H5_API_TEST_MISC, "misc", H5_api_misc_test_parallel, 1) \ + X(H5_API_TEST_ASYNC, "async", H5_api_async_test_parallel, 1) \ + X(H5_API_TEST_MAX, "", NULL, 0) +#else +#define H5_API_PARALLEL_TESTS \ + X(H5_API_TEST_NULL, "", NULL, 0) \ + X(H5_API_TEST_FILE, "file", H5_api_file_test_parallel, 1) \ + X(H5_API_TEST_GROUP, "group", H5_api_group_test_parallel, 1) \ + X(H5_API_TEST_DATASET, "dataset", H5_api_dataset_test_parallel, 1) \ + X(H5_API_TEST_DATATYPE, "datatype", H5_api_datatype_test_parallel, 1) \ + X(H5_API_TEST_ATTRIBUTE, "attribute", H5_api_attribute_test_parallel, 1) \ + X(H5_API_TEST_LINK, "link", H5_api_link_test_parallel, 1) \ + X(H5_API_TEST_OBJECT, "object", H5_api_object_test_parallel, 1) \ + X(H5_API_TEST_MISC, "misc", H5_api_misc_test_parallel, 1) \ + X(H5_API_TEST_MAX, "", NULL, 0) +#endif + +#define X(a, b, c, d) a, +enum H5_api_test_type { H5_API_PARALLEL_TESTS }; +#undef X +#define X(a, b, c, d) b, +static const char *const H5_api_test_name[] = {H5_API_PARALLEL_TESTS}; +#undef X +#define X(a, b, c, d) c, +static int (*H5_api_test_func[])(void) = {H5_API_PARALLEL_TESTS}; +#undef X +#define X(a, b, c, d) d, +static int H5_api_test_enabled[] = {H5_API_PARALLEL_TESTS}; +#undef X + +static enum H5_api_test_type +H5_api_test_name_to_type(const char *test_name) +{ + enum H5_api_test_type i = 0; + + while (strcmp(H5_api_test_name[i], test_name) && i != H5_API_TEST_MAX) + i++; + + return ((i == H5_API_TEST_MAX) ? H5_API_TEST_NULL : i); +} + +static void +H5_api_test_run(void) +{ + enum H5_api_test_type i; + + for (i = H5_API_TEST_FILE; i < H5_API_TEST_MAX; i++) + if (H5_api_test_enabled[i]) + (void)H5_api_test_func[i](); +} + +hid_t +create_mpi_fapl(MPI_Comm comm, MPI_Info info, hbool_t coll_md_read) +{ + hid_t ret_pl = H5I_INVALID_HID; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + if ((ret_pl = H5Pcreate(H5P_FILE_ACCESS)) < 0) + goto error; + + if (H5Pset_fapl_mpio(ret_pl, comm, info) < 0) + goto error; + if (H5Pset_all_coll_metadata_ops(ret_pl, coll_md_read) < 0) + goto error; + if (H5Pset_coll_metadata_write(ret_pl, TRUE) < 0) + goto error; + + return ret_pl; + +error: + return H5I_INVALID_HID; +} /* end create_mpi_fapl() */ + +/* + * Generates random dimensions for a dataspace. The first dimension + * is always `mpi_size` to allow for convenient subsetting; the rest + * of the dimensions are randomized. + */ +int +generate_random_parallel_dimensions(int space_rank, hsize_t **dims_out) +{ + hsize_t *dims = NULL; + size_t i; + + if (space_rank <= 0) + goto error; + + if (NULL == (dims = HDmalloc((size_t)space_rank * sizeof(hsize_t)))) + goto error; + if (MAINPROCESS) { + for (i = 0; i < (size_t)space_rank; i++) { + if (i == 0) + dims[i] = (hsize_t)mpi_size; + else + dims[i] = (hsize_t)((rand() % MAX_DIM_SIZE) + 1); + } + } + + /* + * Ensure that the dataset dimensions are uniform across ranks. + */ + if (MPI_SUCCESS != MPI_Bcast(dims, space_rank, MPI_UNSIGNED_LONG_LONG, 0, MPI_COMM_WORLD)) + goto error; + + *dims_out = dims; + + return 0; + +error: + if (dims) + HDfree(dims); + + return -1; +} + +int +main(int argc, char **argv) +{ + const char *vol_connector_name; + unsigned seed; + hid_t fapl_id = H5I_INVALID_HID; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + /* Simple argument checking, TODO can improve that later */ + if (argc > 1) { + enum H5_api_test_type i = H5_api_test_name_to_type(argv[1]); + if (i != H5_API_TEST_NULL) { + /* Run only specific API test */ + memset(H5_api_test_enabled, 0, sizeof(H5_api_test_enabled)); + H5_api_test_enabled[i] = 1; + } + } + + /* + * Make sure that HDF5 is initialized on all MPI ranks before proceeding. + * This is important for certain VOL connectors which may require a + * collective initialization. + */ + H5open(); + + n_tests_run_g = 0; + n_tests_passed_g = 0; + n_tests_failed_g = 0; + n_tests_skipped_g = 0; + + if (MAINPROCESS) { + seed = (unsigned)HDtime(NULL); + } + + if (mpi_size > 1) { + if (MPI_SUCCESS != MPI_Bcast(&seed, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD)) { + if (MAINPROCESS) + HDprintf("Couldn't broadcast test seed\n"); + goto error; + } + } + + srand(seed); + + if (NULL == (test_path_prefix = HDgetenv(HDF5_API_TEST_PATH_PREFIX))) + test_path_prefix = ""; + + HDsnprintf(H5_api_test_parallel_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s%s", test_path_prefix, + PARALLEL_TEST_FILE_NAME); + + if (NULL == (vol_connector_name = HDgetenv(HDF5_VOL_CONNECTOR))) { + if (MAINPROCESS) + HDprintf("No VOL connector selected; using native VOL connector\n"); + vol_connector_name = "native"; + } + + if (MAINPROCESS) { + HDprintf("Running parallel API tests with VOL connector '%s'\n\n", vol_connector_name); + HDprintf("Test parameters:\n"); + HDprintf(" - Test file name: '%s'\n", H5_api_test_parallel_filename); + HDprintf(" - Number of MPI ranks: %d\n", mpi_size); + HDprintf(" - Test seed: %u\n", seed); + HDprintf("\n\n"); + } + + /* Retrieve the VOL cap flags - work around an HDF5 + * library issue by creating a FAPL + */ + BEGIN_INDEPENDENT_OP(get_capability_flags) + { + if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, FALSE)) < 0) { + if (MAINPROCESS) + HDfprintf(stderr, "Unable to create FAPL\n"); + INDEPENDENT_OP_ERROR(get_capability_flags); + } + + vol_cap_flags_g = H5VL_CAP_FLAG_NONE; + if (H5Pget_vol_cap_flags(fapl_id, &vol_cap_flags_g) < 0) { + if (MAINPROCESS) + HDfprintf(stderr, "Unable to retrieve VOL connector capability flags\n"); + INDEPENDENT_OP_ERROR(get_capability_flags); + } + } + END_INDEPENDENT_OP(get_capability_flags); + + /* + * Create the file that will be used for all of the tests, + * except for those which test file creation. + */ + BEGIN_INDEPENDENT_OP(create_test_container) + { + if (MAINPROCESS) { + if (create_test_container(H5_api_test_parallel_filename, vol_cap_flags_g) < 0) { + HDprintf(" failed to create testing container file '%s'\n", H5_api_test_parallel_filename); + INDEPENDENT_OP_ERROR(create_test_container); + } + } + } + END_INDEPENDENT_OP(create_test_container); + + /* Run all the tests that are enabled */ + H5_api_test_run(); + + if (MAINPROCESS) + HDprintf("Cleaning up testing files\n"); + H5Fdelete(H5_api_test_parallel_filename, fapl_id); + + if (n_tests_run_g > 0) { + if (MAINPROCESS) + HDprintf("The below statistics are minimum values due to the possibility of some ranks failing a " + "test while others pass:\n"); + + if (MPI_SUCCESS != MPI_Allreduce(MPI_IN_PLACE, &n_tests_passed_g, 1, MPI_UNSIGNED_LONG_LONG, MPI_MIN, + MPI_COMM_WORLD)) { + if (MAINPROCESS) + HDprintf(" failed to collect consensus about the minimum number of tests that passed -- " + "reporting rank 0's (possibly inaccurate) value\n"); + } + + if (MAINPROCESS) + HDprintf("%s%zu/%zu (%.2f%%) API tests passed across all ranks with VOL connector '%s'\n", + n_tests_passed_g > 0 ? "At least " : "", n_tests_passed_g, n_tests_run_g, + ((double)n_tests_passed_g / (double)n_tests_run_g * 100.0), vol_connector_name); + + if (MPI_SUCCESS != MPI_Allreduce(MPI_IN_PLACE, &n_tests_failed_g, 1, MPI_UNSIGNED_LONG_LONG, MPI_MIN, + MPI_COMM_WORLD)) { + if (MAINPROCESS) + HDprintf(" failed to collect consensus about the minimum number of tests that failed -- " + "reporting rank 0's (possibly inaccurate) value\n"); + } + + if (MAINPROCESS) { + HDprintf("%s%zu/%zu (%.2f%%) API tests did not pass across all ranks with VOL connector '%s'\n", + n_tests_failed_g > 0 ? "At least " : "", n_tests_failed_g, n_tests_run_g, + ((double)n_tests_failed_g / (double)n_tests_run_g * 100.0), vol_connector_name); + + HDprintf("%zu/%zu (%.2f%%) API tests were skipped with VOL connector '%s'\n", n_tests_skipped_g, + n_tests_run_g, ((double)n_tests_skipped_g / (double)n_tests_run_g * 100.0), + vol_connector_name); + } + } + + if (fapl_id >= 0 && H5Pclose(fapl_id) < 0) { + if (MAINPROCESS) + HDprintf(" failed to close MPI FAPL\n"); + } + + H5close(); + + MPI_Finalize(); + + HDexit(EXIT_SUCCESS); + +error: + H5E_BEGIN_TRY + { + H5Pclose(fapl_id); + } + H5E_END_TRY; + + MPI_Finalize(); + + HDexit(EXIT_FAILURE); +} diff --git a/testpar/API/H5_api_test_parallel.h b/testpar/API/H5_api_test_parallel.h new file mode 100644 index 00000000000..6df83e87216 --- /dev/null +++ b/testpar/API/H5_api_test_parallel.h @@ -0,0 +1,188 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#ifndef H5_API_TEST_PARALLEL_H +#define H5_API_TEST_PARALLEL_H + +#include + +#include "testpar.h" + +#include "H5_api_test.h" + +/* Define H5VL_VERSION if not already defined */ +#ifndef H5VL_VERSION +#define H5VL_VERSION 0 +#endif + +/* Define macro to wait forever depending on version */ +#if H5VL_VERSION >= 2 +#define H5_API_TEST_WAIT_FOREVER H5ES_WAIT_FOREVER +#else +#define H5_API_TEST_WAIT_FOREVER UINT64_MAX +#endif + +#define PARALLEL_TEST_FILE_NAME "H5_api_test_parallel.h5" +extern char H5_api_test_parallel_filename[]; + +#undef TESTING +#undef TESTING_2 +#undef PASSED +#undef H5_FAILED +#undef H5_WARNING +#undef SKIPPED +#undef PUTS_ERROR +#undef TEST_ERROR +#undef STACK_ERROR +#undef FAIL_STACK_ERROR +#undef FAIL_PUTS_ERROR +#undef TESTING_MULTIPART + +#define TESTING(WHAT) \ + { \ + if (MAINPROCESS) { \ + printf("Testing %-62s", WHAT); \ + fflush(stdout); \ + } \ + n_tests_run_g++; \ + } +#define TESTING_2(WHAT) \ + { \ + if (MAINPROCESS) { \ + printf(" Testing %-60s", WHAT); \ + fflush(stdout); \ + } \ + n_tests_run_g++; \ + } +#define PASSED() \ + { \ + if (MAINPROCESS) { \ + puts(" PASSED"); \ + fflush(stdout); \ + } \ + n_tests_passed_g++; \ + } +#define H5_FAILED() \ + { \ + if (MAINPROCESS) { \ + puts("*FAILED*"); \ + fflush(stdout); \ + } \ + n_tests_failed_g++; \ + } +#define H5_WARNING() \ + { \ + if (MAINPROCESS) { \ + puts("*WARNING*"); \ + fflush(stdout); \ + } \ + } +#define SKIPPED() \ + { \ + if (MAINPROCESS) { \ + puts(" -SKIP-"); \ + fflush(stdout); \ + } \ + n_tests_skipped_g++; \ + } +#define PUTS_ERROR(s) \ + { \ + if (MAINPROCESS) { \ + puts(s); \ + AT(); \ + } \ + goto error; \ + } +#define TEST_ERROR \ + { \ + H5_FAILED(); \ + if (MAINPROCESS) { \ + AT(); \ + } \ + goto error; \ + } +#define STACK_ERROR \ + { \ + if (MAINPROCESS) { \ + H5Eprint2(H5E_DEFAULT, stdout); \ + } \ + goto error; \ + } +#define FAIL_STACK_ERROR \ + { \ + H5_FAILED(); \ + if (MAINPROCESS) { \ + AT(); \ + H5Eprint2(H5E_DEFAULT, stdout); \ + } \ + goto error; \ + } +#define FAIL_PUTS_ERROR(s) \ + { \ + H5_FAILED(); \ + if (MAINPROCESS) { \ + AT(); \ + puts(s); \ + } \ + goto error; \ + } +#define TESTING_MULTIPART(WHAT) \ + { \ + if (MAINPROCESS) { \ + printf("Testing %-62s", WHAT); \ + HDputs(""); \ + fflush(stdout); \ + } \ + } + +/* + * Macros to surround an action that will be performed non-collectively. Once the + * operation has completed, a consensus will be formed by all ranks on whether the + * operation failed. + */ +#define BEGIN_INDEPENDENT_OP(op_name) \ + { \ + hbool_t ind_op_failed = FALSE; \ + \ + { + +#define END_INDEPENDENT_OP(op_name) \ + } \ + \ + op_##op_name##_end : if (MPI_SUCCESS != MPI_Allreduce(MPI_IN_PLACE, &ind_op_failed, 1, MPI_C_BOOL, \ + MPI_LOR, MPI_COMM_WORLD)) \ + { \ + if (MAINPROCESS) \ + HDprintf( \ + " failed to collect consensus about whether non-collective operation was successful\n"); \ + goto error; \ + } \ + \ + if (ind_op_failed) { \ + if (MAINPROCESS) \ + HDprintf(" failure detected during non-collective operation - all other ranks will now fail " \ + "too\n"); \ + goto error; \ + } \ + } + +#define INDEPENDENT_OP_ERROR(op_name) \ + ind_op_failed = TRUE; \ + goto op_##op_name##_end; + +hid_t create_mpi_fapl(MPI_Comm comm, MPI_Info info, hbool_t coll_md_read); +int generate_random_parallel_dimensions(int space_rank, hsize_t **dims_out); + +extern int mpi_size; +extern int mpi_rank; + +#endif diff --git a/testpar/API/t_bigio.c b/testpar/API/t_bigio.c new file mode 100644 index 00000000000..3e18c8f275f --- /dev/null +++ b/testpar/API/t_bigio.c @@ -0,0 +1,1942 @@ + +#include "hdf5.h" +#include "testphdf5.h" + +#if 0 +#include "H5Dprivate.h" /* For Chunk tests */ +#endif + +/* FILENAME and filenames must have the same number of names */ +const char *FILENAME[3] = {"bigio_test.h5", "single_rank_independent_io.h5", NULL}; + +/* Constants definitions */ +#define MAX_ERR_REPORT 10 /* Maximum number of errors reported */ + +/* Define some handy debugging shorthands, routines, ... */ +/* debugging tools */ + +#define MAIN_PROCESS (mpi_rank_g == 0) /* define process 0 as main process */ + +/* Constants definitions */ +#define RANK 2 + +#define IN_ORDER 1 +#define OUT_OF_ORDER 2 + +#define DATASET1 "DSET1" +#define DATASET2 "DSET2" +#define DATASET3 "DSET3" +#define DATASET4 "DSET4" +#define DXFER_COLLECTIVE_IO 0x1 /* Collective IO*/ +#define DXFER_INDEPENDENT_IO 0x2 /* Independent IO collectively */ +#define DXFER_BIGCOUNT (1 << 29) + +#define HYPER 1 +#define POINT 2 +#define ALL 3 + +/* Dataset data type. Int's can be easily octo dumped. */ +typedef hsize_t B_DATATYPE; + +int facc_type = FACC_MPIO; /*Test file access type */ +int dxfer_coll_type = DXFER_COLLECTIVE_IO; +size_t bigcount = (size_t) /* DXFER_BIGCOUNT */ 1310720; +int nerrors = 0; +static int mpi_size_g, mpi_rank_g; + +hsize_t space_dim1 = SPACE_DIM1 * 256; // 4096 +hsize_t space_dim2 = SPACE_DIM2; + +static void coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option, + int file_selection, int mem_selection, int mode); + +/* + * Setup the coordinates for point selection. + */ +static void +set_coords(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points, + hsize_t coords[], int order) +{ + hsize_t i, j, k = 0, m, n, s1, s2; + + if (OUT_OF_ORDER == order) + k = (num_points * RANK) - 1; + else if (IN_ORDER == order) + k = 0; + + s1 = start[0]; + s2 = start[1]; + + for (i = 0; i < count[0]; i++) + for (j = 0; j < count[1]; j++) + for (m = 0; m < block[0]; m++) + for (n = 0; n < block[1]; n++) + if (OUT_OF_ORDER == order) { + coords[k--] = s2 + (stride[1] * j) + n; + coords[k--] = s1 + (stride[0] * i) + m; + } + else if (IN_ORDER == order) { + coords[k++] = s1 + stride[0] * i + m; + coords[k++] = s2 + stride[1] * j + n; + } +} + +/* + * Fill the dataset with trivial data for testing. + * Assume dimension rank is 2 and data is stored contiguous. + */ +static void +fill_datasets(hsize_t start[], hsize_t block[], B_DATATYPE *dataset) +{ + B_DATATYPE *dataptr = dataset; + hsize_t i, j; + + /* put some trivial data in the data_array */ + for (i = 0; i < block[0]; i++) { + for (j = 0; j < block[1]; j++) { + *dataptr = (B_DATATYPE)((i + start[0]) * 100 + (j + start[1] + 1)); + dataptr++; + } + } +} + +/* + * Setup the coordinates for point selection. + */ +void +point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points, + hsize_t coords[], int order) +{ + hsize_t i, j, k = 0, m, n, s1, s2; + + HDcompile_assert(RANK == 2); + + if (OUT_OF_ORDER == order) + k = (num_points * RANK) - 1; + else if (IN_ORDER == order) + k = 0; + + s1 = start[0]; + s2 = start[1]; + + for (i = 0; i < count[0]; i++) + for (j = 0; j < count[1]; j++) + for (m = 0; m < block[0]; m++) + for (n = 0; n < block[1]; n++) + if (OUT_OF_ORDER == order) { + coords[k--] = s2 + (stride[1] * j) + n; + coords[k--] = s1 + (stride[0] * i) + m; + } + else if (IN_ORDER == order) { + coords[k++] = s1 + stride[0] * i + m; + coords[k++] = s2 + stride[1] * j + n; + } + + if (VERBOSE_MED) { + HDprintf("start[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), " + "count[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), " + "stride[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), " + "block[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), " + "total datapoints=%" PRIuHSIZE "\n", + start[0], start[1], count[0], count[1], stride[0], stride[1], block[0], block[1], + block[0] * block[1] * count[0] * count[1]); + k = 0; + for (i = 0; i < num_points; i++) { + HDprintf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]); + k += 2; + } + } +} + +/* + * Print the content of the dataset. + */ +static void +dataset_print(hsize_t start[], hsize_t block[], B_DATATYPE *dataset) +{ + B_DATATYPE *dataptr = dataset; + hsize_t i, j; + + /* print the column heading */ + HDprintf("%-8s", "Cols:"); + for (j = 0; j < block[1]; j++) { + HDprintf("%3" PRIuHSIZE " ", start[1] + j); + } + HDprintf("\n"); + + /* print the slab data */ + for (i = 0; i < block[0]; i++) { + HDprintf("Row %2" PRIuHSIZE ": ", i + start[0]); + for (j = 0; j < block[1]; j++) { + HDprintf("%" PRIuHSIZE " ", *dataptr++); + } + HDprintf("\n"); + } +} + +/* + * Print the content of the dataset. + */ +static int +verify_data(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], B_DATATYPE *dataset, + B_DATATYPE *original) +{ + hsize_t i, j; + int vrfyerrs; + + /* print it if VERBOSE_MED */ + if (VERBOSE_MED) { + HDprintf("verify_data dumping:::\n"); + HDprintf("start(%" PRIuHSIZE ", %" PRIuHSIZE "), " + "count(%" PRIuHSIZE ", %" PRIuHSIZE "), " + "stride(%" PRIuHSIZE ", %" PRIuHSIZE "), " + "block(%" PRIuHSIZE ", %" PRIuHSIZE ")\n", + start[0], start[1], count[0], count[1], stride[0], stride[1], block[0], block[1]); + HDprintf("original values:\n"); + dataset_print(start, block, original); + HDprintf("compared values:\n"); + dataset_print(start, block, dataset); + } + + vrfyerrs = 0; + for (i = 0; i < block[0]; i++) { + for (j = 0; j < block[1]; j++) { + if (*dataset != *original) { + if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) { + HDprintf("Dataset Verify failed at [%" PRIuHSIZE "][%" PRIuHSIZE "]" + "(row %" PRIuHSIZE ", col %" PRIuHSIZE "): " + "expect %" PRIuHSIZE ", got %" PRIuHSIZE "\n", + i, j, i + start[0], j + start[1], *(original), *(dataset)); + } + dataset++; + original++; + } + } + } + if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED) + HDprintf("[more errors ...]\n"); + if (vrfyerrs) + HDprintf("%d errors found in verify_data\n", vrfyerrs); + return (vrfyerrs); +} + +/* Set up the selection */ +static void +ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], + int mode) +{ + + switch (mode) { + + case BYROW_CONT: + /* Each process takes a slabs of rows. */ + block[0] = 1; + block[1] = 1; + stride[0] = 1; + stride[1] = 1; + count[0] = space_dim1; + count[1] = space_dim2; + start[0] = (hsize_t)mpi_rank * count[0]; + start[1] = 0; + + break; + + case BYROW_DISCONT: + /* Each process takes several disjoint blocks. */ + block[0] = 1; + block[1] = 1; + stride[0] = 3; + stride[1] = 3; + count[0] = space_dim1 / (stride[0] * block[0]); + count[1] = (space_dim2) / (stride[1] * block[1]); + start[0] = space_dim1 * (hsize_t)mpi_rank; + start[1] = 0; + + break; + + case BYROW_SELECTNONE: + /* Each process takes a slabs of rows, there are + no selections for the last process. */ + block[0] = 1; + block[1] = 1; + stride[0] = 1; + stride[1] = 1; + count[0] = ((mpi_rank >= MAX(1, (mpi_size - 2))) ? 0 : space_dim1); + count[1] = space_dim2; + start[0] = (hsize_t)mpi_rank * count[0]; + start[1] = 0; + + break; + + case BYROW_SELECTUNBALANCE: + /* The first one-third of the number of processes only + select top half of the domain, The rest will select the bottom + half of the domain. */ + + block[0] = 1; + count[0] = 2; + stride[0] = (hsize_t)(space_dim1 * (hsize_t)mpi_size / 4 + 1); + block[1] = space_dim2; + count[1] = 1; + start[1] = 0; + stride[1] = 1; + if ((mpi_rank * 3) < (mpi_size * 2)) + start[0] = (hsize_t)mpi_rank; + else + start[0] = 1 + space_dim1 * (hsize_t)mpi_size / 2 + (hsize_t)(mpi_rank - 2 * mpi_size / 3); + break; + + case BYROW_SELECTINCHUNK: + /* Each process will only select one chunk */ + + block[0] = 1; + count[0] = 1; + start[0] = (hsize_t)mpi_rank * space_dim1; + stride[0] = 1; + block[1] = space_dim2; + count[1] = 1; + stride[1] = 1; + start[1] = 0; + + break; + + default: + /* Unknown mode. Set it to cover the whole dataset. */ + block[0] = space_dim1 * (hsize_t)mpi_size; + block[1] = space_dim2; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = 0; + start[1] = 0; + + break; + } + if (VERBOSE_MED) { + HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total " + "datapoints=%lu\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1], + (unsigned long)(block[0] * block[1] * count[0] * count[1])); + } +} + +/* + * Fill the dataset with trivial data for testing. + * Assume dimension rank is 2. + */ +static void +ccdataset_fill(hsize_t start[], hsize_t stride[], hsize_t count[], hsize_t block[], DATATYPE *dataset, + int mem_selection) +{ + DATATYPE *dataptr = dataset; + DATATYPE *tmptr; + hsize_t i, j, k1, k2, k = 0; + /* put some trivial data in the data_array */ + tmptr = dataptr; + + /* assign the disjoint block (two-dimensional)data array value + through the pointer */ + + for (k1 = 0; k1 < count[0]; k1++) { + for (i = 0; i < block[0]; i++) { + for (k2 = 0; k2 < count[1]; k2++) { + for (j = 0; j < block[1]; j++) { + + if (ALL != mem_selection) { + dataptr = tmptr + ((start[0] + k1 * stride[0] + i) * space_dim2 + start[1] + + k2 * stride[1] + j); + } + else { + dataptr = tmptr + k; + k++; + } + + *dataptr = (DATATYPE)(k1 + k2 + i + j); + } + } + } + } +} + +/* + * Print the first block of the content of the dataset. + */ +static void +ccdataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset) + +{ + DATATYPE *dataptr = dataset; + hsize_t i, j; + + /* print the column heading */ + HDprintf("Print only the first block of the dataset\n"); + HDprintf("%-8s", "Cols:"); + for (j = 0; j < block[1]; j++) { + HDprintf("%3lu ", (unsigned long)(start[1] + j)); + } + HDprintf("\n"); + + /* print the slab data */ + for (i = 0; i < block[0]; i++) { + HDprintf("Row %2lu: ", (unsigned long)(i + start[0])); + for (j = 0; j < block[1]; j++) { + HDprintf("%03d ", *dataptr++); + } + HDprintf("\n"); + } +} + +/* + * Print the content of the dataset. + */ +static int +ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset, + DATATYPE *original, int mem_selection) +{ + hsize_t i, j, k1, k2, k = 0; + int vrfyerrs; + DATATYPE *dataptr, *oriptr; + + /* print it if VERBOSE_MED */ + if (VERBOSE_MED) { + HDprintf("dataset_vrfy dumping:::\n"); + HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1]); + HDprintf("original values:\n"); + ccdataset_print(start, block, original); + HDprintf("compared values:\n"); + ccdataset_print(start, block, dataset); + } + + vrfyerrs = 0; + + for (k1 = 0; k1 < count[0]; k1++) { + for (i = 0; i < block[0]; i++) { + for (k2 = 0; k2 < count[1]; k2++) { + for (j = 0; j < block[1]; j++) { + if (ALL != mem_selection) { + dataptr = dataset + ((start[0] + k1 * stride[0] + i) * space_dim2 + start[1] + + k2 * stride[1] + j); + oriptr = original + ((start[0] + k1 * stride[0] + i) * space_dim2 + start[1] + + k2 * stride[1] + j); + } + else { + dataptr = dataset + k; + oriptr = original + k; + k++; + } + if (*dataptr != *oriptr) { + if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) { + HDprintf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n", + (unsigned long)i, (unsigned long)j, *(oriptr), *(dataptr)); + } + } + } + } + } + } + if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED) + HDprintf("[more errors ...]\n"); + if (vrfyerrs) + HDprintf("%d errors found in ccdataset_vrfy\n", vrfyerrs); + return (vrfyerrs); +} + +/* + * Example of using the parallel HDF5 library to create two datasets + * in one HDF5 file with collective parallel access support. + * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. + * Each process controls only a slab of size dim0 x dim1 within each + * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and + * each process controls a hyperslab within.] + */ + +static void +dataset_big_write(void) +{ + + hid_t xfer_plist; /* Dataset transfer properties list */ + hid_t sid; /* Dataspace ID */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset; + hsize_t dims[RANK]; /* dataset dim sizes */ + hsize_t start[RANK]; /* for hyperslab setting */ + hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ + hsize_t block[RANK]; /* for hyperslab setting */ + hsize_t *coords = NULL; + herr_t ret; /* Generic return value */ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + size_t num_points; + B_DATATYPE *wdata; + + /* allocate memory for data buffer */ + wdata = (B_DATATYPE *)HDmalloc(bigcount * sizeof(B_DATATYPE)); + VRFY_G((wdata != NULL), "wdata malloc succeeded"); + + /* setup file access template */ + acc_tpl = H5Pcreate(H5P_FILE_ACCESS); + VRFY_G((acc_tpl >= 0), "H5P_FILE_ACCESS"); + H5Pset_fapl_mpio(acc_tpl, MPI_COMM_WORLD, MPI_INFO_NULL); + + /* create the file collectively */ + fid = H5Fcreate(FILENAME[0], H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); + VRFY_G((fid >= 0), "H5Fcreate succeeded"); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY_G((ret >= 0), ""); + + /* Each process takes a slabs of rows. */ + if (mpi_rank_g == 0) + HDprintf("\nTesting Dataset1 write by ROW\n"); + /* Create a large dataset */ + dims[0] = bigcount; + dims[1] = (hsize_t)mpi_size_g; + + sid = H5Screate_simple(RANK, dims, NULL); + VRFY_G((sid >= 0), "H5Screate_simple succeeded"); + dataset = H5Dcreate2(fid, DATASET1, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY_G((dataset >= 0), "H5Dcreate2 succeeded"); + H5Sclose(sid); + + block[0] = dims[0] / (hsize_t)mpi_size_g; + block[1] = dims[1]; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = (hsize_t)mpi_rank_g * block[0]; + start[1] = 0; + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset); + VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, block, NULL); + VRFY_G((mem_dataspace >= 0), ""); + + /* fill the local slab with some trivial data */ + fill_datasets(start, block, wdata); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, wdata); + } + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded"); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY_G((ret >= 0), "set independent IO collectively succeeded"); + } + + ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata); + VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded"); + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + ret = H5Dclose(dataset); + VRFY_G((ret >= 0), "H5Dclose1 succeeded"); + + /* Each process takes a slabs of cols. */ + if (mpi_rank_g == 0) + HDprintf("\nTesting Dataset2 write by COL\n"); + /* Create a large dataset */ + dims[0] = bigcount; + dims[1] = (hsize_t)mpi_size_g; + + sid = H5Screate_simple(RANK, dims, NULL); + VRFY_G((sid >= 0), "H5Screate_simple succeeded"); + dataset = H5Dcreate2(fid, DATASET2, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY_G((dataset >= 0), "H5Dcreate2 succeeded"); + H5Sclose(sid); + + block[0] = dims[0]; + block[1] = dims[1] / (hsize_t)mpi_size_g; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = 0; + start[1] = (hsize_t)mpi_rank_g * block[1]; + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset); + VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, block, NULL); + VRFY_G((mem_dataspace >= 0), ""); + + /* fill the local slab with some trivial data */ + fill_datasets(start, block, wdata); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, wdata); + } + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded"); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY_G((ret >= 0), "set independent IO collectively succeeded"); + } + + ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata); + VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded"); + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + ret = H5Dclose(dataset); + VRFY_G((ret >= 0), "H5Dclose1 succeeded"); + + /* ALL selection */ + if (mpi_rank_g == 0) + HDprintf("\nTesting Dataset3 write select ALL proc 0, NONE others\n"); + /* Create a large dataset */ + dims[0] = bigcount; + dims[1] = 1; + + sid = H5Screate_simple(RANK, dims, NULL); + VRFY_G((sid >= 0), "H5Screate_simple succeeded"); + dataset = H5Dcreate2(fid, DATASET3, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY_G((dataset >= 0), "H5Dcreate2 succeeded"); + H5Sclose(sid); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset); + VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded"); + if (mpi_rank_g == 0) { + ret = H5Sselect_all(file_dataspace); + VRFY_G((ret >= 0), "H5Sset_all succeeded"); + } + else { + ret = H5Sselect_none(file_dataspace); + VRFY_G((ret >= 0), "H5Sset_none succeeded"); + } + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, dims, NULL); + VRFY_G((mem_dataspace >= 0), ""); + if (mpi_rank_g != 0) { + ret = H5Sselect_none(mem_dataspace); + VRFY_G((ret >= 0), "H5Sset_none succeeded"); + } + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded"); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY_G((ret >= 0), "set independent IO collectively succeeded"); + } + + /* fill the local slab with some trivial data */ + fill_datasets(start, dims, wdata); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + } + + ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata); + VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded"); + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + ret = H5Dclose(dataset); + VRFY_G((ret >= 0), "H5Dclose1 succeeded"); + + /* Point selection */ + if (mpi_rank_g == 0) + HDprintf("\nTesting Dataset4 write point selection\n"); + /* Create a large dataset */ + dims[0] = bigcount; + dims[1] = (hsize_t)(mpi_size_g * 4); + + sid = H5Screate_simple(RANK, dims, NULL); + VRFY_G((sid >= 0), "H5Screate_simple succeeded"); + dataset = H5Dcreate2(fid, DATASET4, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY_G((dataset >= 0), "H5Dcreate2 succeeded"); + H5Sclose(sid); + + block[0] = dims[0] / 2; + block[1] = 2; + stride[0] = dims[0] / 2; + stride[1] = 2; + count[0] = 1; + count[1] = 1; + start[0] = 0; + start[1] = dims[1] / (hsize_t)mpi_size_g * (hsize_t)mpi_rank_g; + + num_points = bigcount; + + coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t)); + VRFY_G((coords != NULL), "coords malloc succeeded"); + + set_coords(start, count, stride, block, num_points, coords, IN_ORDER); + /* create a file dataspace */ + file_dataspace = H5Dget_space(dataset); + VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY_G((ret >= 0), "H5Sselect_elements succeeded"); + + if (coords) + free(coords); + + fill_datasets(start, block, wdata); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, wdata); + } + + /* create a memory dataspace */ + /* Warning: H5Screate_simple requires an array of hsize_t elements + * even if we only pass only a single value. Attempting anything else + * appears to cause problems with 32 bit compilers. + */ + mem_dataspace = H5Screate_simple(1, dims, NULL); + VRFY_G((mem_dataspace >= 0), ""); + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded"); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY_G((ret >= 0), "set independent IO collectively succeeded"); + } + + ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata); + VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded"); + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + ret = H5Dclose(dataset); + VRFY_G((ret >= 0), "H5Dclose1 succeeded"); + + HDfree(wdata); + H5Fclose(fid); +} + +/* + * Example of using the parallel HDF5 library to read two datasets + * in one HDF5 file with collective parallel access support. + * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. + * Each process controls only a slab of size dim0 x dim1 within each + * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and + * each process controls a hyperslab within.] + */ + +static void +dataset_big_read(void) +{ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t xfer_plist; /* Dataset transfer properties list */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset; + B_DATATYPE *rdata = NULL; /* data buffer */ + B_DATATYPE *wdata = NULL; /* expected data buffer */ + hsize_t dims[RANK]; /* dataset dim sizes */ + hsize_t start[RANK]; /* for hyperslab setting */ + hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ + hsize_t block[RANK]; /* for hyperslab setting */ + size_t num_points; + hsize_t *coords = NULL; + herr_t ret; /* Generic return value */ + + /* allocate memory for data buffer */ + rdata = (B_DATATYPE *)HDmalloc(bigcount * sizeof(B_DATATYPE)); + VRFY_G((rdata != NULL), "rdata malloc succeeded"); + wdata = (B_DATATYPE *)HDmalloc(bigcount * sizeof(B_DATATYPE)); + VRFY_G((wdata != NULL), "wdata malloc succeeded"); + + HDmemset(rdata, 0, bigcount * sizeof(B_DATATYPE)); + + /* setup file access template */ + acc_tpl = H5Pcreate(H5P_FILE_ACCESS); + VRFY_G((acc_tpl >= 0), "H5P_FILE_ACCESS"); + H5Pset_fapl_mpio(acc_tpl, MPI_COMM_WORLD, MPI_INFO_NULL); + + /* open the file collectively */ + fid = H5Fopen(FILENAME[0], H5F_ACC_RDONLY, acc_tpl); + VRFY_G((fid >= 0), "H5Fopen succeeded"); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY_G((ret >= 0), ""); + + if (mpi_rank_g == 0) + HDprintf("\nRead Testing Dataset1 by COL\n"); + + dataset = H5Dopen2(fid, DATASET1, H5P_DEFAULT); + VRFY_G((dataset >= 0), "H5Dopen2 succeeded"); + + dims[0] = bigcount; + dims[1] = (hsize_t)mpi_size_g; + /* Each process takes a slabs of cols. */ + block[0] = dims[0]; + block[1] = dims[1] / (hsize_t)mpi_size_g; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = 0; + start[1] = (hsize_t)mpi_rank_g * block[1]; + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset); + VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, block, NULL); + VRFY_G((mem_dataspace >= 0), ""); + + /* fill dataset with test data */ + fill_datasets(start, block, wdata); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + } + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY_G((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY_G((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY_G((ret >= 0), "set independent IO collectively succeeded"); + } + + /* read data collectively */ + ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata); + VRFY_G((ret >= 0), "H5Dread dataset1 succeeded"); + + /* verify the read data with original expected data */ + ret = verify_data(start, count, stride, block, rdata, wdata); + if (ret) { + HDfprintf(stderr, "verify failed\n"); + exit(1); + } + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + ret = H5Dclose(dataset); + VRFY_G((ret >= 0), "H5Dclose1 succeeded"); + + if (mpi_rank_g == 0) + HDprintf("\nRead Testing Dataset2 by ROW\n"); + HDmemset(rdata, 0, bigcount * sizeof(B_DATATYPE)); + dataset = H5Dopen2(fid, DATASET2, H5P_DEFAULT); + VRFY_G((dataset >= 0), "H5Dopen2 succeeded"); + + dims[0] = bigcount; + dims[1] = (hsize_t)mpi_size_g; + /* Each process takes a slabs of rows. */ + block[0] = dims[0] / (hsize_t)mpi_size_g; + block[1] = dims[1]; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = (hsize_t)mpi_rank_g * block[0]; + start[1] = 0; + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset); + VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, block, NULL); + VRFY_G((mem_dataspace >= 0), ""); + + /* fill dataset with test data */ + fill_datasets(start, block, wdata); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + } + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY_G((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY_G((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY_G((ret >= 0), "set independent IO collectively succeeded"); + } + + /* read data collectively */ + ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata); + VRFY_G((ret >= 0), "H5Dread dataset2 succeeded"); + + /* verify the read data with original expected data */ + ret = verify_data(start, count, stride, block, rdata, wdata); + if (ret) { + HDfprintf(stderr, "verify failed\n"); + exit(1); + } + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + ret = H5Dclose(dataset); + VRFY_G((ret >= 0), "H5Dclose1 succeeded"); + + if (mpi_rank_g == 0) + HDprintf("\nRead Testing Dataset3 read select ALL proc 0, NONE others\n"); + HDmemset(rdata, 0, bigcount * sizeof(B_DATATYPE)); + dataset = H5Dopen2(fid, DATASET3, H5P_DEFAULT); + VRFY_G((dataset >= 0), "H5Dopen2 succeeded"); + + dims[0] = bigcount; + dims[1] = 1; + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset); + VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded"); + if (mpi_rank_g == 0) { + ret = H5Sselect_all(file_dataspace); + VRFY_G((ret >= 0), "H5Sset_all succeeded"); + } + else { + ret = H5Sselect_none(file_dataspace); + VRFY_G((ret >= 0), "H5Sset_none succeeded"); + } + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, dims, NULL); + VRFY_G((mem_dataspace >= 0), ""); + if (mpi_rank_g != 0) { + ret = H5Sselect_none(mem_dataspace); + VRFY_G((ret >= 0), "H5Sset_none succeeded"); + } + + /* fill dataset with test data */ + fill_datasets(start, dims, wdata); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + } + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY_G((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY_G((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY_G((ret >= 0), "set independent IO collectively succeeded"); + } + + /* read data collectively */ + ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata); + VRFY_G((ret >= 0), "H5Dread dataset3 succeeded"); + + if (mpi_rank_g == 0) { + /* verify the read data with original expected data */ + ret = verify_data(start, count, stride, block, rdata, wdata); + if (ret) { + HDfprintf(stderr, "verify failed\n"); + exit(1); + } + } + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + ret = H5Dclose(dataset); + VRFY_G((ret >= 0), "H5Dclose1 succeeded"); + + if (mpi_rank_g == 0) + HDprintf("\nRead Testing Dataset4 with Point selection\n"); + dataset = H5Dopen2(fid, DATASET4, H5P_DEFAULT); + VRFY_G((dataset >= 0), "H5Dopen2 succeeded"); + + dims[0] = bigcount; + dims[1] = (hsize_t)(mpi_size_g * 4); + + block[0] = dims[0] / 2; + block[1] = 2; + stride[0] = dims[0] / 2; + stride[1] = 2; + count[0] = 1; + count[1] = 1; + start[0] = 0; + start[1] = dims[1] / (hsize_t)mpi_size_g * (hsize_t)mpi_rank_g; + + fill_datasets(start, block, wdata); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, wdata); + } + + num_points = bigcount; + + coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t)); + VRFY_G((coords != NULL), "coords malloc succeeded"); + + set_coords(start, count, stride, block, num_points, coords, IN_ORDER); + /* create a file dataspace */ + file_dataspace = H5Dget_space(dataset); + VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY_G((ret >= 0), "H5Sselect_elements succeeded"); + + if (coords) + HDfree(coords); + + /* create a memory dataspace */ + /* Warning: H5Screate_simple requires an array of hsize_t elements + * even if we only pass only a single value. Attempting anything else + * appears to cause problems with 32 bit compilers. + */ + mem_dataspace = H5Screate_simple(1, dims, NULL); + VRFY_G((mem_dataspace >= 0), ""); + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY_G((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY_G((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY_G((ret >= 0), "set independent IO collectively succeeded"); + } + + /* read data collectively */ + ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata); + VRFY_G((ret >= 0), "H5Dread dataset1 succeeded"); + + ret = verify_data(start, count, stride, block, rdata, wdata); + if (ret) { + HDfprintf(stderr, "verify failed\n"); + exit(1); + } + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + ret = H5Dclose(dataset); + VRFY_G((ret >= 0), "H5Dclose1 succeeded"); + + HDfree(wdata); + HDfree(rdata); + + wdata = NULL; + rdata = NULL; + /* We never wrote Dataset5 in the write section, so we can't + * expect to read it... + */ + file_dataspace = -1; + mem_dataspace = -1; + xfer_plist = -1; + dataset = -1; + + /* release all temporary handles. */ + if (file_dataspace != -1) + H5Sclose(file_dataspace); + if (mem_dataspace != -1) + H5Sclose(mem_dataspace); + if (xfer_plist != -1) + H5Pclose(xfer_plist); + if (dataset != -1) { + ret = H5Dclose(dataset); + VRFY_G((ret >= 0), "H5Dclose1 succeeded"); + } + H5Fclose(fid); + + /* release data buffers */ + if (rdata) + HDfree(rdata); + if (wdata) + HDfree(wdata); + +} /* dataset_large_readAll */ + +static void +single_rank_independent_io(void) +{ + if (mpi_rank_g == 0) + HDprintf("single_rank_independent_io\n"); + + if (MAIN_PROCESS) { + hsize_t dims[1]; + hid_t file_id = -1; + hid_t fapl_id = -1; + hid_t dset_id = -1; + hid_t fspace_id = -1; + herr_t ret; + int *data = NULL; + uint64_t i; + + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY_G((fapl_id >= 0), "H5P_FILE_ACCESS"); + + H5Pset_fapl_mpio(fapl_id, MPI_COMM_SELF, MPI_INFO_NULL); + file_id = H5Fcreate(FILENAME[1], H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY_G((file_id >= 0), "H5Dcreate2 succeeded"); + + /* + * Calculate the number of elements needed to exceed + * MPI's INT_MAX limitation + */ + dims[0] = (INT_MAX / sizeof(int)) + 10; + + fspace_id = H5Screate_simple(1, dims, NULL); + VRFY_G((fspace_id >= 0), "H5Screate_simple fspace_id succeeded"); + + /* + * Create and write to a >2GB dataset from a single rank. + */ + dset_id = H5Dcreate2(file_id, "test_dset", H5T_NATIVE_INT, fspace_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT); + + VRFY_G((dset_id >= 0), "H5Dcreate2 succeeded"); + + data = malloc(dims[0] * sizeof(int)); + + /* Initialize data */ + for (i = 0; i < dims[0]; i++) + data[i] = (int)(i % (uint64_t)DXFER_BIGCOUNT); + + /* Write data */ + ret = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_BLOCK, fspace_id, H5P_DEFAULT, data); + VRFY_G((ret >= 0), "H5Dwrite succeeded"); + + /* Wipe buffer */ + HDmemset(data, 0, dims[0] * sizeof(int)); + + /* Read data back */ + ret = H5Dread(dset_id, H5T_NATIVE_INT, H5S_BLOCK, fspace_id, H5P_DEFAULT, data); + VRFY_G((ret >= 0), "H5Dread succeeded"); + + /* Verify data */ + for (i = 0; i < dims[0]; i++) + if (data[i] != (int)(i % (uint64_t)DXFER_BIGCOUNT)) { + HDfprintf(stderr, "verify failed\n"); + exit(1); + } + + free(data); + H5Sclose(fspace_id); + H5Dclose(dset_id); + H5Fclose(file_id); + + H5Fdelete(FILENAME[1], fapl_id); + + H5Pclose(fapl_id); + } + MPI_Barrier(MPI_COMM_WORLD); +} + +/* + * Create the appropriate File access property list + */ +hid_t +create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type) +{ + hid_t ret_pl = -1; + herr_t ret; /* generic return value */ + int mpi_rank; /* mpi variables */ + + /* need the rank for error checking macros */ + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + ret_pl = H5Pcreate(H5P_FILE_ACCESS); + VRFY_G((ret_pl >= 0), "H5P_FILE_ACCESS"); + + if (l_facc_type == FACC_DEFAULT) + return (ret_pl); + + if (l_facc_type == FACC_MPIO) { + /* set Parallel access with communicator */ + ret = H5Pset_fapl_mpio(ret_pl, comm, info); + VRFY_G((ret >= 0), ""); + ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE); + VRFY_G((ret >= 0), ""); + ret = H5Pset_coll_metadata_write(ret_pl, TRUE); + VRFY_G((ret >= 0), ""); + return (ret_pl); + } + + if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) { + hid_t mpio_pl; + + mpio_pl = H5Pcreate(H5P_FILE_ACCESS); + VRFY_G((mpio_pl >= 0), ""); + /* set Parallel access with communicator */ + ret = H5Pset_fapl_mpio(mpio_pl, comm, info); + VRFY_G((ret >= 0), ""); + + /* setup file access template */ + ret_pl = H5Pcreate(H5P_FILE_ACCESS); + VRFY_G((ret_pl >= 0), ""); + /* set Parallel access with communicator */ + ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl); + VRFY_G((ret >= 0), "H5Pset_fapl_split succeeded"); + H5Pclose(mpio_pl); + return (ret_pl); + } + + /* unknown file access types */ + return (ret_pl); +} + +/*------------------------------------------------------------------------- + * Function: coll_chunk1 + * + * Purpose: Wrapper to test the collective chunk IO for regular JOINT + selection with a single chunk + * + * Return: Success: 0 + * + * Failure: -1 + * + * Programmer: Unknown + * July 12th, 2004 + * + *------------------------------------------------------------------------- + */ + +/* ------------------------------------------------------------------------ + * Descriptions for the selection: One big singular selection inside one chunk + * Two dimensions, + * + * dim1 = space_dim1(5760)*mpi_size + * dim2 = space_dim2(3) + * chunk_dim1 = dim1 + * chunk_dim2 = dim2 + * block = 1 for all dimensions + * stride = 1 for all dimensions + * count0 = space_dim1(5760) + * count1 = space_dim2(3) + * start0 = mpi_rank*space_dim1 + * start1 = 0 + * ------------------------------------------------------------------------ + */ + +void +coll_chunk1(void) +{ + const char *filename = FILENAME[0]; + if (mpi_rank_g == 0) + HDprintf("coll_chunk1\n"); + + coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER); + + coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER); + coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER); + coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER); +} + +/*------------------------------------------------------------------------- + * Function: coll_chunk2 + * + * Purpose: Wrapper to test the collective chunk IO for regular DISJOINT + selection with a single chunk + * + * Return: Success: 0 + * + * Failure: -1 + * + * Programmer: Unknown + * July 12th, 2004 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +/* ------------------------------------------------------------------------ + * Descriptions for the selection: many disjoint selections inside one chunk + * Two dimensions, + * + * dim1 = space_dim1*mpi_size(5760) + * dim2 = space_dim2(3) + * chunk_dim1 = dim1 + * chunk_dim2 = dim2 + * block = 1 for all dimensions + * stride = 3 for all dimensions + * count0 = space_dim1/stride0(5760/3) + * count1 = space_dim2/stride(3/3 = 1) + * start0 = mpi_rank*space_dim1 + * start1 = 0 + * + * ------------------------------------------------------------------------ + */ +void +coll_chunk2(void) +{ + const char *filename = FILENAME[0]; + if (mpi_rank_g == 0) + HDprintf("coll_chunk2\n"); + + coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, OUT_OF_ORDER); + + coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, IN_ORDER); + coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, IN_ORDER); + coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, IN_ORDER); +} + +/*------------------------------------------------------------------------- + * Function: coll_chunk3 + * + * Purpose: Wrapper to test the collective chunk IO for regular JOINT + selection with at least number of 2*mpi_size chunks + * + * Return: Success: 0 + * + * Failure: -1 + * + * Programmer: Unknown + * July 12th, 2004 + * + *------------------------------------------------------------------------- + */ + +/* ------------------------------------------------------------------------ + * Descriptions for the selection: one singular selection across many chunks + * Two dimensions, Num of chunks = 2* mpi_size + * + * dim1 = space_dim1*mpi_size + * dim2 = space_dim2(3) + * chunk_dim1 = space_dim1 + * chunk_dim2 = dim2/2 + * block = 1 for all dimensions + * stride = 1 for all dimensions + * count0 = space_dim1 + * count1 = space_dim2(3) + * start0 = mpi_rank*space_dim1 + * start1 = 0 + * + * ------------------------------------------------------------------------ + */ + +void +coll_chunk3(void) +{ + const char *filename = FILENAME[0]; + if (mpi_rank_g == 0) + HDprintf("coll_chunk3\n"); + + coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER); + coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER); + coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER); + coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER); + coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER); + + coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER); + coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER); + coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER); +} + +//------------------------------------------------------------------------- +// Borrowed/Modified (slightly) from t_coll_chunk.c +/*------------------------------------------------------------------------- + * Function: coll_chunktest + * + * Purpose: The real testing routine for regular selection of collective + chunking storage + testing both write and read, + If anything fails, it may be read or write. There is no + separation test between read and write. + * + * Return: Success: 0 + * + * Failure: -1 + * + * Programmer: Unknown + * July 12th, 2004 + * + *------------------------------------------------------------------------- + */ + +static void +coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option, int file_selection, + int mem_selection, int mode) +{ + hid_t file, dataset, file_dataspace, mem_dataspace; + hid_t acc_plist, xfer_plist, crp_plist; + + hsize_t dims[RANK], chunk_dims[RANK]; + int *data_array1 = NULL; + int *data_origin1 = NULL; + + hsize_t start[RANK], count[RANK], stride[RANK], block[RANK]; + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + unsigned prop_value; +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + herr_t status; + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + + size_t num_points; /* for point selection */ + hsize_t *coords = NULL; /* for point selection */ + + /* Create the data space */ + + acc_plist = create_faccess_plist(comm, info, facc_type); + VRFY_G((acc_plist >= 0), ""); + + file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_plist); + VRFY_G((file >= 0), "H5Fcreate succeeded"); + + status = H5Pclose(acc_plist); + VRFY_G((status >= 0), ""); + + /* setup dimensionality object */ + dims[0] = space_dim1 * (hsize_t)mpi_size_g; + dims[1] = space_dim2; + + /* allocate memory for data buffer */ + data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int)); + VRFY_G((data_array1 != NULL), "data_array1 malloc succeeded"); + + /* set up dimensions of the slab this process accesses */ + ccslab_set(mpi_rank_g, mpi_size_g, start, count, stride, block, select_factor); + + /* set up the coords array selection */ + num_points = block[0] * block[1] * count[0] * count[1]; + coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t)); + VRFY_G((coords != NULL), "coords malloc succeeded"); + point_set(start, count, stride, block, num_points, coords, mode); + + /* Warning: H5Screate_simple requires an array of hsize_t elements + * even if we only pass only a single value. Attempting anything else + * appears to cause problems with 32 bit compilers. + */ + file_dataspace = H5Screate_simple(2, dims, NULL); + VRFY_G((file_dataspace >= 0), "file dataspace created succeeded"); + + if (ALL != mem_selection) { + mem_dataspace = H5Screate_simple(2, dims, NULL); + VRFY_G((mem_dataspace >= 0), "mem dataspace created succeeded"); + } + else { + /* Putting the warning about H5Screate_simple (above) into practice... */ + hsize_t dsdims[1] = {num_points}; + mem_dataspace = H5Screate_simple(1, dsdims, NULL); + VRFY_G((mem_dataspace >= 0), "mem_dataspace create succeeded"); + } + + crp_plist = H5Pcreate(H5P_DATASET_CREATE); + VRFY_G((crp_plist >= 0), ""); + + /* Set up chunk information. */ + chunk_dims[0] = dims[0] / (hsize_t)chunk_factor; + + /* to decrease the testing time, maintain bigger chunk size */ + (chunk_factor == 1) ? (chunk_dims[1] = space_dim2) : (chunk_dims[1] = space_dim2 / 2); + status = H5Pset_chunk(crp_plist, 2, chunk_dims); + VRFY_G((status >= 0), "chunk creation property list succeeded"); + + dataset = H5Dcreate2(file, DSET_COLLECTIVE_CHUNK_NAME, H5T_NATIVE_INT, file_dataspace, H5P_DEFAULT, + crp_plist, H5P_DEFAULT); + VRFY_G((dataset >= 0), "dataset created succeeded"); + + status = H5Pclose(crp_plist); + VRFY_G((status >= 0), ""); + + /*put some trivial data in the data array */ + ccdataset_fill(start, stride, count, block, data_array1, mem_selection); + + MESG("data_array initialized"); + + switch (file_selection) { + case HYPER: + status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY_G((status >= 0), "hyperslab selection succeeded"); + break; + + case POINT: + if (num_points) { + status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY_G((status >= 0), "Element selection succeeded"); + } + else { + status = H5Sselect_none(file_dataspace); + VRFY_G((status >= 0), "none selection succeeded"); + } + break; + + case ALL: + status = H5Sselect_all(file_dataspace); + VRFY_G((status >= 0), "H5Sselect_all succeeded"); + break; + } + + switch (mem_selection) { + case HYPER: + status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY_G((status >= 0), "hyperslab selection succeeded"); + break; + + case POINT: + if (num_points) { + status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY_G((status >= 0), "Element selection succeeded"); + } + else { + status = H5Sselect_none(mem_dataspace); + VRFY_G((status >= 0), "none selection succeeded"); + } + break; + + case ALL: + status = H5Sselect_all(mem_dataspace); + VRFY_G((status >= 0), "H5Sselect_all succeeded"); + break; + } + + /* set up the collective transfer property list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY_G((xfer_plist >= 0), ""); + + status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY_G((status >= 0), "MPIO collective transfer property succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY_G((status >= 0), "set independent IO collectively succeeded"); + } + + switch (api_option) { + case API_LINK_HARD: + status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_ONE_IO); + VRFY_G((status >= 0), "collective chunk optimization succeeded"); + break; + + case API_MULTI_HARD: + status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_MULTI_IO); + VRFY_G((status >= 0), "collective chunk optimization succeeded "); + break; + + case API_LINK_TRUE: + status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 2); + VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded"); + break; + + case API_LINK_FALSE: + status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 6); + VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded"); + break; + + case API_MULTI_COLL: + status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */ + VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded"); + status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 50); + VRFY_G((status >= 0), "collective chunk optimization set chunk ratio succeeded"); + break; + + case API_MULTI_IND: + status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */ + VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded"); + status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 100); + VRFY_G((status >= 0), "collective chunk optimization set chunk ratio succeeded"); + break; + + default:; + } + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + if (facc_type == FACC_MPIO) { + switch (api_option) { + case API_LINK_HARD: + prop_value = H5D_XFER_COLL_CHUNK_DEF; + status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, + &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); + VRFY_G((status >= 0), "testing property list inserted succeeded"); + break; + + case API_MULTI_HARD: + prop_value = H5D_XFER_COLL_CHUNK_DEF; + status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, + &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); + VRFY_G((status >= 0), "testing property list inserted succeeded"); + break; + + case API_LINK_TRUE: + prop_value = H5D_XFER_COLL_CHUNK_DEF; + status = + H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, H5D_XFER_COLL_CHUNK_SIZE, + &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); + VRFY_G((status >= 0), "testing property list inserted succeeded"); + break; + + case API_LINK_FALSE: + prop_value = H5D_XFER_COLL_CHUNK_DEF; + status = + H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, H5D_XFER_COLL_CHUNK_SIZE, + &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); + VRFY_G((status >= 0), "testing property list inserted succeeded"); + break; + + case API_MULTI_COLL: + prop_value = H5D_XFER_COLL_CHUNK_DEF; + status = + H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, + H5D_XFER_COLL_CHUNK_SIZE, &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); + VRFY_G((status >= 0), "testing property list inserted succeeded"); + break; + + case API_MULTI_IND: + prop_value = H5D_XFER_COLL_CHUNK_DEF; + status = + H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, H5D_XFER_COLL_CHUNK_SIZE, + &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); + VRFY_G((status >= 0), "testing property list inserted succeeded"); + break; + + default:; + } + } +#endif + + /* write data collectively */ + status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY_G((status >= 0), "dataset write succeeded"); + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + if (facc_type == FACC_MPIO) { + switch (api_option) { + case API_LINK_HARD: + status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, &prop_value); + VRFY_G((status >= 0), "testing property list get succeeded"); + VRFY_G((prop_value == 0), "API to set LINK COLLECTIVE IO directly succeeded"); + break; + + case API_MULTI_HARD: + status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, &prop_value); + VRFY_G((status >= 0), "testing property list get succeeded"); + VRFY_G((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO optimization succeeded"); + break; + + case API_LINK_TRUE: + status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, &prop_value); + VRFY_G((status >= 0), "testing property list get succeeded"); + VRFY_G((prop_value == 0), "API to set LINK COLLECTIVE IO succeeded"); + break; + + case API_LINK_FALSE: + status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, &prop_value); + VRFY_G((status >= 0), "testing property list get succeeded"); + VRFY_G((prop_value == 0), "API to set LINK IO transferring to multi-chunk IO succeeded"); + break; + + case API_MULTI_COLL: + status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, &prop_value); + VRFY_G((status >= 0), "testing property list get succeeded"); + VRFY_G((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO with optimization succeeded"); + break; + + case API_MULTI_IND: + status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, &prop_value); + VRFY_G((status >= 0), "testing property list get succeeded"); + VRFY_G((prop_value == 0), + "API to set MULTI-CHUNK IO transferring to independent IO succeeded"); + break; + + default:; + } + } +#endif + + status = H5Dclose(dataset); + VRFY_G((status >= 0), ""); + + status = H5Pclose(xfer_plist); + VRFY_G((status >= 0), "property list closed"); + + status = H5Sclose(file_dataspace); + VRFY_G((status >= 0), ""); + + status = H5Sclose(mem_dataspace); + VRFY_G((status >= 0), ""); + + status = H5Fclose(file); + VRFY_G((status >= 0), ""); + + if (data_array1) + HDfree(data_array1); + + /* Use collective read to verify the correctness of collective write. */ + + /* allocate memory for data buffer */ + data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int)); + VRFY_G((data_array1 != NULL), "data_array1 malloc succeeded"); + + /* allocate memory for data buffer */ + data_origin1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int)); + VRFY_G((data_origin1 != NULL), "data_origin1 malloc succeeded"); + + acc_plist = create_faccess_plist(comm, info, facc_type); + VRFY_G((acc_plist >= 0), "MPIO creation property list succeeded"); + + file = H5Fopen(FILENAME[0], H5F_ACC_RDONLY, acc_plist); + VRFY_G((file >= 0), "H5Fcreate succeeded"); + + status = H5Pclose(acc_plist); + VRFY_G((status >= 0), ""); + + /* open the collective dataset*/ + dataset = H5Dopen2(file, DSET_COLLECTIVE_CHUNK_NAME, H5P_DEFAULT); + VRFY_G((dataset >= 0), ""); + + /* set up dimensions of the slab this process accesses */ + ccslab_set(mpi_rank_g, mpi_size_g, start, count, stride, block, select_factor); + + /* obtain the file and mem dataspace*/ + file_dataspace = H5Dget_space(dataset); + VRFY_G((file_dataspace >= 0), ""); + + if (ALL != mem_selection) { + mem_dataspace = H5Dget_space(dataset); + VRFY_G((mem_dataspace >= 0), ""); + } + else { + /* Warning: H5Screate_simple requires an array of hsize_t elements + * even if we only pass only a single value. Attempting anything else + * appears to cause problems with 32 bit compilers. + */ + hsize_t dsdims[1] = {num_points}; + mem_dataspace = H5Screate_simple(1, dsdims, NULL); + VRFY_G((mem_dataspace >= 0), "mem_dataspace create succeeded"); + } + + switch (file_selection) { + case HYPER: + status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY_G((status >= 0), "hyperslab selection succeeded"); + break; + + case POINT: + if (num_points) { + status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY_G((status >= 0), "Element selection succeeded"); + } + else { + status = H5Sselect_none(file_dataspace); + VRFY_G((status >= 0), "none selection succeeded"); + } + break; + + case ALL: + status = H5Sselect_all(file_dataspace); + VRFY_G((status >= 0), "H5Sselect_all succeeded"); + break; + } + + switch (mem_selection) { + case HYPER: + status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY_G((status >= 0), "hyperslab selection succeeded"); + break; + + case POINT: + if (num_points) { + status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY_G((status >= 0), "Element selection succeeded"); + } + else { + status = H5Sselect_none(mem_dataspace); + VRFY_G((status >= 0), "none selection succeeded"); + } + break; + + case ALL: + status = H5Sselect_all(mem_dataspace); + VRFY_G((status >= 0), "H5Sselect_all succeeded"); + break; + } + + /* fill dataset with test data */ + ccdataset_fill(start, stride, count, block, data_origin1, mem_selection); + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY_G((xfer_plist >= 0), ""); + + status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY_G((status >= 0), "MPIO collective transfer property succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY_G((status >= 0), "set independent IO collectively succeeded"); + } + + status = H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY_G((status >= 0), "dataset read succeeded"); + + /* verify the read data with original expected data */ + status = ccdataset_vrfy(start, count, stride, block, data_array1, data_origin1, mem_selection); + if (status) + nerrors++; + + status = H5Pclose(xfer_plist); + VRFY_G((status >= 0), "property list closed"); + + /* close dataset collectively */ + status = H5Dclose(dataset); + VRFY_G((status >= 0), "H5Dclose"); + + /* release all IDs created */ + status = H5Sclose(file_dataspace); + VRFY_G((status >= 0), "H5Sclose"); + + status = H5Sclose(mem_dataspace); + VRFY_G((status >= 0), "H5Sclose"); + + /* close the file collectively */ + status = H5Fclose(file); + VRFY_G((status >= 0), "H5Fclose"); + + /* release data buffers */ + if (coords) + HDfree(coords); + if (data_array1) + HDfree(data_array1); + if (data_origin1) + HDfree(data_origin1); +} + +int +main(int argc, char **argv) +{ + hid_t acc_plist = H5I_INVALID_HID; + + MPI_Init(&argc, &argv); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size_g); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank_g); + + /* Attempt to turn off atexit post processing so that in case errors + * happen during the test and the process is aborted, it will not get + * hang in the atexit post processing in which it may try to make MPI + * calls. By then, MPI calls may not work. + */ + if (H5dont_atexit() < 0) + HDprintf("Failed to turn off atexit processing. Continue.\n"); + + /* set alarm. */ + /* TestAlarmOn(); */ + + acc_plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + + /* Get the capability flag of the VOL connector being used */ + if (H5Pget_vol_cap_flags(acc_plist, &vol_cap_flags_g) < 0) { + if (MAIN_PROCESS) + HDprintf("Failed to get the capability flag of the VOL connector being used\n"); + + MPI_Finalize(); + return 0; + } + + /* Make sure the connector supports the API functions being tested. This test only + * uses a few API functions, such as H5Fcreate/open/close/delete, H5Dcreate/write/read/close, + * and H5Dget_space. */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAIN_PROCESS) + HDprintf( + "API functions for basic file, dataset basic or more aren't supported with this connector\n"); + + MPI_Finalize(); + return 0; + } + + dataset_big_write(); + MPI_Barrier(MPI_COMM_WORLD); + + dataset_big_read(); + MPI_Barrier(MPI_COMM_WORLD); + + coll_chunk1(); + MPI_Barrier(MPI_COMM_WORLD); + coll_chunk2(); + MPI_Barrier(MPI_COMM_WORLD); + coll_chunk3(); + MPI_Barrier(MPI_COMM_WORLD); + + single_rank_independent_io(); + + /* turn off alarm */ + /* TestAlarmOff(); */ + + if (mpi_rank_g == 0) { + hid_t fapl_id = H5Pcreate(H5P_FILE_ACCESS); + + H5Pset_fapl_mpio(fapl_id, MPI_COMM_SELF, MPI_INFO_NULL); + + H5E_BEGIN_TRY + { + H5Fdelete(FILENAME[0], fapl_id); + H5Fdelete(FILENAME[1], fapl_id); + } + H5E_END_TRY; + + H5Pclose(fapl_id); + } + + H5Pclose(acc_plist); + + /* close HDF5 library */ + H5close(); + + MPI_Finalize(); + + return 0; +} diff --git a/testpar/API/t_chunk_alloc.c b/testpar/API/t_chunk_alloc.c new file mode 100644 index 00000000000..dd78225483c --- /dev/null +++ b/testpar/API/t_chunk_alloc.c @@ -0,0 +1,512 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * This verifies if the storage space allocation methods are compatible between + * serial and parallel modes. + * + * Created by: Christian Chilan and Albert Cheng + * Date: 2006/05/25 + */ + +#include "hdf5.h" +#include "testphdf5.h" +static int mpi_size, mpi_rank; + +#define DSET_NAME "ExtendibleArray" +#define CHUNK_SIZE 1000 /* #elements per chunk */ +#define CHUNK_FACTOR 200 /* default dataset size in terms of chunks */ +#define CLOSE 1 +#define NO_CLOSE 0 + +#if 0 +static MPI_Offset +get_filesize(const char *filename) +{ + int mpierr; + MPI_File fd; + MPI_Offset filesize; + + mpierr = MPI_File_open(MPI_COMM_SELF, filename, MPI_MODE_RDONLY, MPI_INFO_NULL, &fd); + VRFY((mpierr == MPI_SUCCESS), ""); + + mpierr = MPI_File_get_size(fd, &filesize); + VRFY((mpierr == MPI_SUCCESS), ""); + + mpierr = MPI_File_close(&fd); + VRFY((mpierr == MPI_SUCCESS), ""); + + return (filesize); +} +#endif + +typedef enum write_pattern { none, sec_last, all } write_type; + +typedef enum access_ { write_all, open_only, extend_only } access_type; + +/* + * This creates a dataset serially with chunks, each of CHUNK_SIZE + * elements. The allocation time is set to H5D_ALLOC_TIME_EARLY. Another + * routine will open this in parallel for extension test. + */ +static void +create_chunked_dataset(const char *filename, int chunk_factor, write_type write_pattern) +{ + hid_t file_id, dataset; /* handles */ + hid_t dataspace, memspace; + hid_t cparms; + hsize_t dims[1]; + hsize_t maxdims[1] = {H5S_UNLIMITED}; + + hsize_t chunk_dims[1] = {CHUNK_SIZE}; + hsize_t count[1]; + hsize_t stride[1]; + hsize_t block[1]; + hsize_t offset[1]; /* Selection offset within dataspace */ + /* Variables used in reading data back */ + char buffer[CHUNK_SIZE]; + long nchunks; + herr_t hrc; +#if 0 + MPI_Offset filesize, /* actual file size */ + est_filesize; /* estimated file size */ +#endif + /* set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Only MAINPROCESS should create the file. Others just wait. */ + if (MAINPROCESS) { + nchunks = chunk_factor * mpi_size; + dims[0] = (hsize_t)(nchunks * CHUNK_SIZE); + /* Create the data space with unlimited dimensions. */ + dataspace = H5Screate_simple(1, dims, maxdims); + VRFY((dataspace >= 0), ""); + + memspace = H5Screate_simple(1, chunk_dims, NULL); + VRFY((memspace >= 0), ""); + + /* Create a new file. If file exists its contents will be overwritten. */ + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + VRFY((file_id >= 0), "H5Fcreate"); + + /* Modify dataset creation properties, i.e. enable chunking */ + cparms = H5Pcreate(H5P_DATASET_CREATE); + VRFY((cparms >= 0), ""); + + hrc = H5Pset_alloc_time(cparms, H5D_ALLOC_TIME_EARLY); + VRFY((hrc >= 0), ""); + + hrc = H5Pset_chunk(cparms, 1, chunk_dims); + VRFY((hrc >= 0), ""); + + /* Create a new dataset within the file using cparms creation properties. */ + dataset = + H5Dcreate2(file_id, DSET_NAME, H5T_NATIVE_UCHAR, dataspace, H5P_DEFAULT, cparms, H5P_DEFAULT); + VRFY((dataset >= 0), ""); + + if (write_pattern == sec_last) { + HDmemset(buffer, 100, CHUNK_SIZE); + + count[0] = 1; + stride[0] = 1; + block[0] = chunk_dims[0]; + offset[0] = (hsize_t)(nchunks - 2) * chunk_dims[0]; + + hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block); + VRFY((hrc >= 0), ""); + + /* Write sec_last chunk */ + hrc = H5Dwrite(dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer); + VRFY((hrc >= 0), "H5Dwrite"); + } /* end if */ + + /* Close resources */ + hrc = H5Dclose(dataset); + VRFY((hrc >= 0), ""); + dataset = -1; + + hrc = H5Sclose(dataspace); + VRFY((hrc >= 0), ""); + + hrc = H5Sclose(memspace); + VRFY((hrc >= 0), ""); + + hrc = H5Pclose(cparms); + VRFY((hrc >= 0), ""); + + hrc = H5Fclose(file_id); + VRFY((hrc >= 0), ""); + file_id = -1; + +#if 0 + /* verify file size */ + filesize = get_filesize(filename); + est_filesize = (MPI_Offset)nchunks * (MPI_Offset)CHUNK_SIZE * (MPI_Offset)sizeof(unsigned char); + VRFY((filesize >= est_filesize), "file size check"); +#endif + } + + /* Make sure all processes are done before exiting this routine. Otherwise, + * other tests may start and change the test data file before some processes + * of this test are still accessing the file. + */ + + MPI_Barrier(MPI_COMM_WORLD); +} + +/* + * This program performs three different types of parallel access. It writes on + * the entire dataset, it extends the dataset to nchunks*CHUNK_SIZE, and it only + * opens the dataset. At the end, it verifies the size of the dataset to be + * consistent with argument 'chunk_factor'. + */ +static void +parallel_access_dataset(const char *filename, int chunk_factor, access_type action, hid_t *file_id, + hid_t *dataset) +{ + /* HDF5 gubbins */ + hid_t memspace, dataspace; /* HDF5 file identifier */ + hid_t access_plist; /* HDF5 ID for file access property list */ + herr_t hrc; /* HDF5 return code */ + hsize_t size[1]; + + hsize_t chunk_dims[1] = {CHUNK_SIZE}; + hsize_t count[1]; + hsize_t stride[1]; + hsize_t block[1]; + hsize_t offset[1]; /* Selection offset within dataspace */ + hsize_t dims[1]; + hsize_t maxdims[1]; + + /* Variables used in reading data back */ + char buffer[CHUNK_SIZE]; + int i; + long nchunks; +#if 0 + /* MPI Gubbins */ + MPI_Offset filesize, /* actual file size */ + est_filesize; /* estimated file size */ +#endif + + /* Initialize MPI */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + nchunks = chunk_factor * mpi_size; + + /* Set up MPIO file access property lists */ + access_plist = H5Pcreate(H5P_FILE_ACCESS); + VRFY((access_plist >= 0), ""); + + hrc = H5Pset_fapl_mpio(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL); + VRFY((hrc >= 0), ""); + + /* Open the file */ + if (*file_id < 0) { + *file_id = H5Fopen(filename, H5F_ACC_RDWR, access_plist); + VRFY((*file_id >= 0), ""); + } + + /* Open dataset*/ + if (*dataset < 0) { + *dataset = H5Dopen2(*file_id, DSET_NAME, H5P_DEFAULT); + VRFY((*dataset >= 0), ""); + } + + /* Make sure all processes are done before continuing. Otherwise, one + * process could change the dataset extent before another finishes opening + * it, resulting in only some of the processes calling H5Dset_extent(). */ + MPI_Barrier(MPI_COMM_WORLD); + + memspace = H5Screate_simple(1, chunk_dims, NULL); + VRFY((memspace >= 0), ""); + + dataspace = H5Dget_space(*dataset); + VRFY((dataspace >= 0), ""); + + size[0] = (hsize_t)nchunks * CHUNK_SIZE; + + switch (action) { + + /* all chunks are written by all the processes in an interleaved way*/ + case write_all: + + HDmemset(buffer, mpi_rank + 1, CHUNK_SIZE); + count[0] = 1; + stride[0] = 1; + block[0] = chunk_dims[0]; + for (i = 0; i < nchunks / mpi_size; i++) { + offset[0] = (hsize_t)(i * mpi_size + mpi_rank) * chunk_dims[0]; + + hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block); + VRFY((hrc >= 0), ""); + + /* Write the buffer out */ + hrc = H5Dwrite(*dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer); + VRFY((hrc >= 0), "H5Dwrite"); + } + + break; + + /* only extends the dataset */ + case extend_only: + /* check if new size is larger than old size */ + hrc = H5Sget_simple_extent_dims(dataspace, dims, maxdims); + VRFY((hrc >= 0), ""); + + /* Extend dataset*/ + if (size[0] > dims[0]) { + hrc = H5Dset_extent(*dataset, size); + VRFY((hrc >= 0), ""); + } + break; + + /* only opens the *dataset */ + case open_only: + break; + default: + HDassert(0); + } + + /* Close up */ + hrc = H5Dclose(*dataset); + VRFY((hrc >= 0), ""); + *dataset = -1; + + hrc = H5Sclose(dataspace); + VRFY((hrc >= 0), ""); + + hrc = H5Sclose(memspace); + VRFY((hrc >= 0), ""); + + hrc = H5Fclose(*file_id); + VRFY((hrc >= 0), ""); + *file_id = -1; + +#if 0 + /* verify file size */ + filesize = get_filesize(filename); + est_filesize = (MPI_Offset)nchunks * (MPI_Offset)CHUNK_SIZE * (MPI_Offset)sizeof(unsigned char); + VRFY((filesize >= est_filesize), "file size check"); +#endif + + /* Can close some plists */ + hrc = H5Pclose(access_plist); + VRFY((hrc >= 0), ""); + + /* Make sure all processes are done before exiting this routine. Otherwise, + * other tests may start and change the test data file before some processes + * of this test are still accessing the file. + */ + MPI_Barrier(MPI_COMM_WORLD); +} + +/* + * This routine verifies the data written in the dataset. It does one of the + * three cases according to the value of parameter `write_pattern'. + * 1. it returns correct fill values though the dataset has not been written; + * 2. it still returns correct fill values though only a small part is written; + * 3. it returns correct values when the whole dataset has been written in an + * interleaved pattern. + */ +static void +verify_data(const char *filename, int chunk_factor, write_type write_pattern, int vclose, hid_t *file_id, + hid_t *dataset) +{ + /* HDF5 gubbins */ + hid_t dataspace, memspace; /* HDF5 file identifier */ + hid_t access_plist; /* HDF5 ID for file access property list */ + herr_t hrc; /* HDF5 return code */ + + hsize_t chunk_dims[1] = {CHUNK_SIZE}; + hsize_t count[1]; + hsize_t stride[1]; + hsize_t block[1]; + hsize_t offset[1]; /* Selection offset within dataspace */ + /* Variables used in reading data back */ + char buffer[CHUNK_SIZE]; + int value, i; + int index_l; + long nchunks; + /* Initialize MPI */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + nchunks = chunk_factor * mpi_size; + + /* Set up MPIO file access property lists */ + access_plist = H5Pcreate(H5P_FILE_ACCESS); + VRFY((access_plist >= 0), ""); + + hrc = H5Pset_fapl_mpio(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL); + VRFY((hrc >= 0), ""); + + /* Open the file */ + if (*file_id < 0) { + *file_id = H5Fopen(filename, H5F_ACC_RDWR, access_plist); + VRFY((*file_id >= 0), ""); + } + + /* Open dataset*/ + if (*dataset < 0) { + *dataset = H5Dopen2(*file_id, DSET_NAME, H5P_DEFAULT); + VRFY((*dataset >= 0), ""); + } + + memspace = H5Screate_simple(1, chunk_dims, NULL); + VRFY((memspace >= 0), ""); + + dataspace = H5Dget_space(*dataset); + VRFY((dataspace >= 0), ""); + + /* all processes check all chunks. */ + count[0] = 1; + stride[0] = 1; + block[0] = chunk_dims[0]; + for (i = 0; i < nchunks; i++) { + /* reset buffer values */ + HDmemset(buffer, -1, CHUNK_SIZE); + + offset[0] = (hsize_t)i * chunk_dims[0]; + + hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block); + VRFY((hrc >= 0), ""); + + /* Read the chunk */ + hrc = H5Dread(*dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer); + VRFY((hrc >= 0), "H5Dread"); + + /* set expected value according the write pattern */ + switch (write_pattern) { + case all: + value = i % mpi_size + 1; + break; + case none: + value = 0; + break; + case sec_last: + if (i == nchunks - 2) + value = 100; + else + value = 0; + break; + default: + HDassert(0); + } + + /* verify content of the chunk */ + for (index_l = 0; index_l < CHUNK_SIZE; index_l++) + VRFY((buffer[index_l] == value), "data verification"); + } + + hrc = H5Sclose(dataspace); + VRFY((hrc >= 0), ""); + + hrc = H5Sclose(memspace); + VRFY((hrc >= 0), ""); + + /* Can close some plists */ + hrc = H5Pclose(access_plist); + VRFY((hrc >= 0), ""); + + /* Close up */ + if (vclose) { + hrc = H5Dclose(*dataset); + VRFY((hrc >= 0), ""); + *dataset = -1; + + hrc = H5Fclose(*file_id); + VRFY((hrc >= 0), ""); + *file_id = -1; + } + + /* Make sure all processes are done before exiting this routine. Otherwise, + * other tests may start and change the test data file before some processes + * of this test are still accessing the file. + */ + MPI_Barrier(MPI_COMM_WORLD); +} + +/* + * Test following possible scenarios, + * Case 1: + * Sequential create a file and dataset with H5D_ALLOC_TIME_EARLY and large + * size, no write, close, reopen in parallel, read to verify all return + * the fill value. + * Case 2: + * Sequential create a file and dataset with H5D_ALLOC_TIME_EARLY but small + * size, no write, close, reopen in parallel, extend to large size, then close, + * then reopen in parallel and read to verify all return the fill value. + * Case 3: + * Sequential create a file and dataset with H5D_ALLOC_TIME_EARLY and large + * size, write just a small part of the dataset (second to the last), close, + * then reopen in parallel, read to verify all return the fill value except + * those small portion that has been written. Without closing it, writes + * all parts of the dataset in a interleave pattern, close it, and reopen + * it, read to verify all data are as written. + */ +void +test_chunk_alloc(void) +{ + const char *filename; + hid_t file_id, dataset; + + file_id = dataset = -1; + + /* Initialize MPI */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset, or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + filename = (const char *)PARATESTFILE /* GetTestParameters() */; + if (VERBOSE_MED) + HDprintf("Extend Chunked allocation test on file %s\n", filename); + + /* Case 1 */ + /* Create chunked dataset without writing anything.*/ + create_chunked_dataset(filename, CHUNK_FACTOR, none); + /* reopen dataset in parallel and check for file size */ + parallel_access_dataset(filename, CHUNK_FACTOR, open_only, &file_id, &dataset); + /* reopen dataset in parallel, read and verify the data */ + verify_data(filename, CHUNK_FACTOR, none, CLOSE, &file_id, &dataset); + + /* Case 2 */ + /* Create chunked dataset without writing anything */ + create_chunked_dataset(filename, 20, none); + /* reopen dataset in parallel and only extend it */ + parallel_access_dataset(filename, CHUNK_FACTOR, extend_only, &file_id, &dataset); + /* reopen dataset in parallel, read and verify the data */ + verify_data(filename, CHUNK_FACTOR, none, CLOSE, &file_id, &dataset); + + /* Case 3 */ + /* Create chunked dataset and write in the second to last chunk */ + create_chunked_dataset(filename, CHUNK_FACTOR, sec_last); + /* Reopen dataset in parallel, read and verify the data. The file and dataset are not closed*/ + verify_data(filename, CHUNK_FACTOR, sec_last, NO_CLOSE, &file_id, &dataset); + /* All processes write in all the chunks in a interleaved way */ + parallel_access_dataset(filename, CHUNK_FACTOR, write_all, &file_id, &dataset); + /* reopen dataset in parallel, read and verify the data */ + verify_data(filename, CHUNK_FACTOR, all, CLOSE, &file_id, &dataset); +} diff --git a/testpar/API/t_coll_chunk.c b/testpar/API/t_coll_chunk.c new file mode 100644 index 00000000000..57ee605e0a5 --- /dev/null +++ b/testpar/API/t_coll_chunk.c @@ -0,0 +1,1417 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#include "hdf5.h" +#include "testphdf5.h" + +#define HYPER 1 +#define POINT 2 +#define ALL 3 + +/* some commonly used routines for collective chunk IO tests*/ + +static void ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], + hsize_t block[], int mode); + +static void ccdataset_fill(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], + DATATYPE *dataset, int mem_selection); + +static void ccdataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset); + +static int ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], + DATATYPE *dataset, DATATYPE *original, int mem_selection); + +static void coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option, + int file_selection, int mem_selection, int mode); + +/*------------------------------------------------------------------------- + * Function: coll_chunk1 + * + * Purpose: Wrapper to test the collective chunk IO for regular JOINT + selection with a single chunk + * + * Return: Success: 0 + * + * Failure: -1 + * + * Programmer: Unknown + * July 12th, 2004 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +/* ------------------------------------------------------------------------ + * Descriptions for the selection: One big singular selection inside one chunk + * Two dimensions, + * + * dim1 = SPACE_DIM1(5760)*mpi_size + * dim2 = SPACE_DIM2(3) + * chunk_dim1 = dim1 + * chunk_dim2 = dim2 + * block = 1 for all dimensions + * stride = 1 for all dimensions + * count0 = SPACE_DIM1(5760) + * count1 = SPACE_DIM2(3) + * start0 = mpi_rank*SPACE_DIM1 + * start1 = 0 + * ------------------------------------------------------------------------ + */ + +void +coll_chunk1(void) +{ + const char *filename = PARATESTFILE /* GetTestParameters() */; + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER); + + coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER); + coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER); + coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER); +} + +/*------------------------------------------------------------------------- + * Function: coll_chunk2 + * + * Purpose: Wrapper to test the collective chunk IO for regular DISJOINT + selection with a single chunk + * + * Return: Success: 0 + * + * Failure: -1 + * + * Programmer: Unknown + * July 12th, 2004 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +/* ------------------------------------------------------------------------ + * Descriptions for the selection: many disjoint selections inside one chunk + * Two dimensions, + * + * dim1 = SPACE_DIM1*mpi_size(5760) + * dim2 = SPACE_DIM2(3) + * chunk_dim1 = dim1 + * chunk_dim2 = dim2 + * block = 1 for all dimensions + * stride = 3 for all dimensions + * count0 = SPACE_DIM1/stride0(5760/3) + * count1 = SPACE_DIM2/stride(3/3 = 1) + * start0 = mpi_rank*SPACE_DIM1 + * start1 = 0 + * + * ------------------------------------------------------------------------ + */ +void +coll_chunk2(void) +{ + const char *filename = PARATESTFILE /* GetTestParameters() */; + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, OUT_OF_ORDER); + + coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, IN_ORDER); + coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, IN_ORDER); + coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, IN_ORDER); +} + +/*------------------------------------------------------------------------- + * Function: coll_chunk3 + * + * Purpose: Wrapper to test the collective chunk IO for regular JOINT + selection with at least number of 2*mpi_size chunks + * + * Return: Success: 0 + * + * Failure: -1 + * + * Programmer: Unknown + * July 12th, 2004 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +/* ------------------------------------------------------------------------ + * Descriptions for the selection: one singular selection across many chunks + * Two dimensions, Num of chunks = 2* mpi_size + * + * dim1 = SPACE_DIM1*mpi_size + * dim2 = SPACE_DIM2(3) + * chunk_dim1 = SPACE_DIM1 + * chunk_dim2 = dim2/2 + * block = 1 for all dimensions + * stride = 1 for all dimensions + * count0 = SPACE_DIM1 + * count1 = SPACE_DIM2(3) + * start0 = mpi_rank*SPACE_DIM1 + * start1 = 0 + * + * ------------------------------------------------------------------------ + */ + +void +coll_chunk3(void) +{ + const char *filename = PARATESTFILE /* GetTestParameters() */; + int mpi_size; + int mpi_rank; + + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER); + coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER); + coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER); + coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER); + coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER); + + coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER); + coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER); + coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER); +} + +/*------------------------------------------------------------------------- + * Function: coll_chunk4 + * + * Purpose: Wrapper to test the collective chunk IO for regular JOINT + selection with at least number of 2*mpi_size chunks + * + * Return: Success: 0 + * + * Failure: -1 + * + * Programmer: Unknown + * July 12th, 2004 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +/* ------------------------------------------------------------------------ + * Descriptions for the selection: one singular selection across many chunks + * Two dimensions, Num of chunks = 2* mpi_size + * + * dim1 = SPACE_DIM1*mpi_size + * dim2 = SPACE_DIM2 + * chunk_dim1 = dim1 + * chunk_dim2 = dim2 + * block = 1 for all dimensions + * stride = 1 for all dimensions + * count0 = SPACE_DIM1 + * count1 = SPACE_DIM2(3) + * start0 = mpi_rank*SPACE_DIM1 + * start1 = 0 + * + * ------------------------------------------------------------------------ + */ + +void +coll_chunk4(void) +{ + const char *filename = PARATESTFILE /* GetTestParameters() */; + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, HYPER, HYPER, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, HYPER, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, ALL, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, HYPER, OUT_OF_ORDER); + + coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, ALL, IN_ORDER); + coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, POINT, IN_ORDER); + coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, HYPER, IN_ORDER); +} + +/*------------------------------------------------------------------------- + * Function: coll_chunk4 + * + * Purpose: Wrapper to test the collective chunk IO for regular JOINT + selection with at least number of 2*mpi_size chunks + * + * Return: Success: 0 + * + * Failure: -1 + * + * Programmer: Unknown + * July 12th, 2004 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +/* ------------------------------------------------------------------------ + * Descriptions for the selection: one singular selection across many chunks + * Two dimensions, Num of chunks = 2* mpi_size + * + * dim1 = SPACE_DIM1*mpi_size + * dim2 = SPACE_DIM2 + * chunk_dim1 = dim1 + * chunk_dim2 = dim2 + * block = 1 for all dimensions + * stride = 1 for all dimensions + * count0 = SPACE_DIM1 + * count1 = SPACE_DIM2(3) + * start0 = mpi_rank*SPACE_DIM1 + * start1 = 0 + * + * ------------------------------------------------------------------------ + */ + +void +coll_chunk5(void) +{ + const char *filename = PARATESTFILE /* GetTestParameters() */; + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, HYPER, HYPER, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, HYPER, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, ALL, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, HYPER, OUT_OF_ORDER); + + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, ALL, IN_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, POINT, IN_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, HYPER, IN_ORDER); +} + +/*------------------------------------------------------------------------- + * Function: coll_chunk6 + * + * Purpose: Test direct request for multi-chunk-io. + * Wrapper to test the collective chunk IO for regular JOINT + * selection with at least number of 2*mpi_size chunks + * Test for direct to Multi Chunk I/O. + * + * Return: Success: 0 + * + * Failure: -1 + * + * Programmer: Unknown + * July 12th, 2004 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +/* ------------------------------------------------------------------------ + * Descriptions for the selection: one singular selection across many chunks + * Two dimensions, Num of chunks = 2* mpi_size + * + * dim1 = SPACE_DIM1*mpi_size + * dim2 = SPACE_DIM2 + * chunk_dim1 = dim1 + * chunk_dim2 = dim2 + * block = 1 for all dimensions + * stride = 1 for all dimensions + * count0 = SPACE_DIM1 + * count1 = SPACE_DIM2(3) + * start0 = mpi_rank*SPACE_DIM1 + * start1 = 0 + * + * ------------------------------------------------------------------------ + */ + +void +coll_chunk6(void) +{ + const char *filename = PARATESTFILE /* GetTestParameters() */; + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, HYPER, HYPER, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, HYPER, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, ALL, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, HYPER, OUT_OF_ORDER); + + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, ALL, IN_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, POINT, IN_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, HYPER, IN_ORDER); +} + +/*------------------------------------------------------------------------- + * Function: coll_chunk7 + * + * Purpose: Wrapper to test the collective chunk IO for regular JOINT + selection with at least number of 2*mpi_size chunks + * + * Return: Success: 0 + * + * Failure: -1 + * + * Programmer: Unknown + * July 12th, 2004 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +/* ------------------------------------------------------------------------ + * Descriptions for the selection: one singular selection across many chunks + * Two dimensions, Num of chunks = 2* mpi_size + * + * dim1 = SPACE_DIM1*mpi_size + * dim2 = SPACE_DIM2 + * chunk_dim1 = dim1 + * chunk_dim2 = dim2 + * block = 1 for all dimensions + * stride = 1 for all dimensions + * count0 = SPACE_DIM1 + * count1 = SPACE_DIM2(3) + * start0 = mpi_rank*SPACE_DIM1 + * start1 = 0 + * + * ------------------------------------------------------------------------ + */ + +void +coll_chunk7(void) +{ + const char *filename = PARATESTFILE /* GetTestParameters() */; + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, HYPER, HYPER, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, HYPER, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, ALL, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, HYPER, OUT_OF_ORDER); + + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, ALL, IN_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, POINT, IN_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, HYPER, IN_ORDER); +} + +/*------------------------------------------------------------------------- + * Function: coll_chunk8 + * + * Purpose: Wrapper to test the collective chunk IO for regular JOINT + selection with at least number of 2*mpi_size chunks + * + * Return: Success: 0 + * + * Failure: -1 + * + * Programmer: Unknown + * July 12th, 2004 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +/* ------------------------------------------------------------------------ + * Descriptions for the selection: one singular selection across many chunks + * Two dimensions, Num of chunks = 2* mpi_size + * + * dim1 = SPACE_DIM1*mpi_size + * dim2 = SPACE_DIM2 + * chunk_dim1 = dim1 + * chunk_dim2 = dim2 + * block = 1 for all dimensions + * stride = 1 for all dimensions + * count0 = SPACE_DIM1 + * count1 = SPACE_DIM2(3) + * start0 = mpi_rank*SPACE_DIM1 + * start1 = 0 + * + * ------------------------------------------------------------------------ + */ + +void +coll_chunk8(void) +{ + const char *filename = PARATESTFILE /* GetTestParameters() */; + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, HYPER, HYPER, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, HYPER, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, ALL, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, HYPER, OUT_OF_ORDER); + + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, ALL, IN_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, POINT, IN_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, HYPER, IN_ORDER); +} + +/*------------------------------------------------------------------------- + * Function: coll_chunk9 + * + * Purpose: Wrapper to test the collective chunk IO for regular JOINT + selection with at least number of 2*mpi_size chunks + * + * Return: Success: 0 + * + * Failure: -1 + * + * Programmer: Unknown + * July 12th, 2004 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +/* ------------------------------------------------------------------------ + * Descriptions for the selection: one singular selection across many chunks + * Two dimensions, Num of chunks = 2* mpi_size + * + * dim1 = SPACE_DIM1*mpi_size + * dim2 = SPACE_DIM2 + * chunk_dim1 = dim1 + * chunk_dim2 = dim2 + * block = 1 for all dimensions + * stride = 1 for all dimensions + * count0 = SPACE_DIM1 + * count1 = SPACE_DIM2(3) + * start0 = mpi_rank*SPACE_DIM1 + * start1 = 0 + * + * ------------------------------------------------------------------------ + */ + +void +coll_chunk9(void) +{ + const char *filename = PARATESTFILE /* GetTestParameters() */; + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, HYPER, HYPER, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, HYPER, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, ALL, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, HYPER, OUT_OF_ORDER); + + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, ALL, IN_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, POINT, IN_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, HYPER, IN_ORDER); +} + +/*------------------------------------------------------------------------- + * Function: coll_chunk10 + * + * Purpose: Wrapper to test the collective chunk IO for regular JOINT + selection with at least number of 2*mpi_size chunks + * + * Return: Success: 0 + * + * Failure: -1 + * + * Programmer: Unknown + * July 12th, 2004 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +/* ------------------------------------------------------------------------ + * Descriptions for the selection: one singular selection across many chunks + * Two dimensions, Num of chunks = 2* mpi_size + * + * dim1 = SPACE_DIM1*mpi_size + * dim2 = SPACE_DIM2 + * chunk_dim1 = dim1 + * chunk_dim2 = dim2 + * block = 1 for all dimensions + * stride = 1 for all dimensions + * count0 = SPACE_DIM1 + * count1 = SPACE_DIM2(3) + * start0 = mpi_rank*SPACE_DIM1 + * start1 = 0 + * + * ------------------------------------------------------------------------ + */ + +void +coll_chunk10(void) +{ + const char *filename = PARATESTFILE /* GetTestParameters() */; + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, HYPER, HYPER, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, HYPER, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, ALL, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, HYPER, OUT_OF_ORDER); + + coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, ALL, IN_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, POINT, IN_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, HYPER, IN_ORDER); +} + +/*------------------------------------------------------------------------- + * Function: coll_chunktest + * + * Purpose: The real testing routine for regular selection of collective + chunking storage + testing both write and read, + If anything fails, it may be read or write. There is no + separation test between read and write. + * + * Return: Success: 0 + * + * Failure: -1 + * + * Modifications: + * Remove invalid temporary property checkings for API_LINK_HARD and + * API_LINK_TRUE cases. + * Programmer: Jonathan Kim + * Date: 2012-10-10 + * + * Programmer: Unknown + * July 12th, 2004 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +static void +coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option, int file_selection, + int mem_selection, int mode) +{ + hid_t file, dataset, file_dataspace, mem_dataspace; + hid_t acc_plist, xfer_plist, crp_plist; + + hsize_t dims[RANK], chunk_dims[RANK]; + int *data_array1 = NULL; + int *data_origin1 = NULL; + + hsize_t start[RANK], count[RANK], stride[RANK], block[RANK]; + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + unsigned prop_value; +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + int mpi_size, mpi_rank; + + herr_t status; + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + + size_t num_points; /* for point selection */ + hsize_t *coords = NULL; /* for point selection */ + hsize_t current_dims; /* for point selection */ + + /* set up MPI parameters */ + MPI_Comm_size(comm, &mpi_size); + MPI_Comm_rank(comm, &mpi_rank); + + /* Create the data space */ + + acc_plist = create_faccess_plist(comm, info, facc_type); + VRFY((acc_plist >= 0), ""); + + file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_plist); + VRFY((file >= 0), "H5Fcreate succeeded"); + + status = H5Pclose(acc_plist); + VRFY((status >= 0), ""); + + /* setup dimensionality object */ + dims[0] = (hsize_t)(SPACE_DIM1 * mpi_size); + dims[1] = SPACE_DIM2; + + /* allocate memory for data buffer */ + data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int)); + VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); + + /* set up dimensions of the slab this process accesses */ + ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor); + + /* set up the coords array selection */ + num_points = block[0] * block[1] * count[0] * count[1]; + coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t)); + VRFY((coords != NULL), "coords malloc succeeded"); + point_set(start, count, stride, block, num_points, coords, mode); + + file_dataspace = H5Screate_simple(2, dims, NULL); + VRFY((file_dataspace >= 0), "file dataspace created succeeded"); + + if (ALL != mem_selection) { + mem_dataspace = H5Screate_simple(2, dims, NULL); + VRFY((mem_dataspace >= 0), "mem dataspace created succeeded"); + } + else { + current_dims = num_points; + mem_dataspace = H5Screate_simple(1, ¤t_dims, NULL); + VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded"); + } + + crp_plist = H5Pcreate(H5P_DATASET_CREATE); + VRFY((crp_plist >= 0), ""); + + /* Set up chunk information. */ + chunk_dims[0] = dims[0] / (hsize_t)chunk_factor; + + /* to decrease the testing time, maintain bigger chunk size */ + (chunk_factor == 1) ? (chunk_dims[1] = SPACE_DIM2) : (chunk_dims[1] = SPACE_DIM2 / 2); + status = H5Pset_chunk(crp_plist, 2, chunk_dims); + VRFY((status >= 0), "chunk creation property list succeeded"); + + dataset = H5Dcreate2(file, DSET_COLLECTIVE_CHUNK_NAME, H5T_NATIVE_INT, file_dataspace, H5P_DEFAULT, + crp_plist, H5P_DEFAULT); + VRFY((dataset >= 0), "dataset created succeeded"); + + status = H5Pclose(crp_plist); + VRFY((status >= 0), ""); + + /*put some trivial data in the data array */ + ccdataset_fill(start, stride, count, block, data_array1, mem_selection); + + MESG("data_array initialized"); + + switch (file_selection) { + case HYPER: + status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((status >= 0), "hyperslab selection succeeded"); + break; + + case POINT: + if (num_points) { + status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY((status >= 0), "Element selection succeeded"); + } + else { + status = H5Sselect_none(file_dataspace); + VRFY((status >= 0), "none selection succeeded"); + } + break; + + case ALL: + status = H5Sselect_all(file_dataspace); + VRFY((status >= 0), "H5Sselect_all succeeded"); + break; + } + + switch (mem_selection) { + case HYPER: + status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((status >= 0), "hyperslab selection succeeded"); + break; + + case POINT: + if (num_points) { + status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY((status >= 0), "Element selection succeeded"); + } + else { + status = H5Sselect_none(mem_dataspace); + VRFY((status >= 0), "none selection succeeded"); + } + break; + + case ALL: + status = H5Sselect_all(mem_dataspace); + VRFY((status >= 0), "H5Sselect_all succeeded"); + break; + } + + /* set up the collective transfer property list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), ""); + + status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((status >= 0), "MPIO collective transfer property succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((status >= 0), "set independent IO collectively succeeded"); + } + + switch (api_option) { + case API_LINK_HARD: + status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_ONE_IO); + VRFY((status >= 0), "collective chunk optimization succeeded"); + break; + + case API_MULTI_HARD: + status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_MULTI_IO); + VRFY((status >= 0), "collective chunk optimization succeeded "); + break; + + case API_LINK_TRUE: + status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 2); + VRFY((status >= 0), "collective chunk optimization set chunk number succeeded"); + break; + + case API_LINK_FALSE: + status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 6); + VRFY((status >= 0), "collective chunk optimization set chunk number succeeded"); + break; + + case API_MULTI_COLL: + status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */ + VRFY((status >= 0), "collective chunk optimization set chunk number succeeded"); + status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 50); + VRFY((status >= 0), "collective chunk optimization set chunk ratio succeeded"); + break; + + case API_MULTI_IND: + status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */ + VRFY((status >= 0), "collective chunk optimization set chunk number succeeded"); + status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 100); + VRFY((status >= 0), "collective chunk optimization set chunk ratio succeeded"); + break; + + default:; + } + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + if (facc_type == FACC_MPIO) { + switch (api_option) { + case API_LINK_HARD: + prop_value = H5D_XFER_COLL_CHUNK_DEF; + status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, + &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); + VRFY((status >= 0), "testing property list inserted succeeded"); + break; + + case API_MULTI_HARD: + prop_value = H5D_XFER_COLL_CHUNK_DEF; + status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, + &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); + VRFY((status >= 0), "testing property list inserted succeeded"); + break; + + case API_LINK_TRUE: + prop_value = H5D_XFER_COLL_CHUNK_DEF; + status = + H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, H5D_XFER_COLL_CHUNK_SIZE, + &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); + VRFY((status >= 0), "testing property list inserted succeeded"); + break; + + case API_LINK_FALSE: + prop_value = H5D_XFER_COLL_CHUNK_DEF; + status = + H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, H5D_XFER_COLL_CHUNK_SIZE, + &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); + VRFY((status >= 0), "testing property list inserted succeeded"); + break; + + case API_MULTI_COLL: + prop_value = H5D_XFER_COLL_CHUNK_DEF; + status = + H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, + H5D_XFER_COLL_CHUNK_SIZE, &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); + VRFY((status >= 0), "testing property list inserted succeeded"); + break; + + case API_MULTI_IND: + prop_value = H5D_XFER_COLL_CHUNK_DEF; + status = + H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, H5D_XFER_COLL_CHUNK_SIZE, + &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); + VRFY((status >= 0), "testing property list inserted succeeded"); + break; + + default:; + } + } +#endif + + /* write data collectively */ + status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((status >= 0), "dataset write succeeded"); + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + /* Only check chunk optimization mode if selection I/O is not being used - + * selection I/O bypasses this IO mode decision - it's effectively always + * multi chunk currently */ + if (facc_type == FACC_MPIO && /* !H5_use_selection_io_g */ TRUE) { + switch (api_option) { + case API_LINK_HARD: + status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, &prop_value); + VRFY((status >= 0), "testing property list get succeeded"); + VRFY((prop_value == 0), "API to set LINK COLLECTIVE IO directly succeeded"); + break; + + case API_MULTI_HARD: + status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, &prop_value); + VRFY((status >= 0), "testing property list get succeeded"); + VRFY((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO optimization succeeded"); + break; + + case API_LINK_TRUE: + status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, &prop_value); + VRFY((status >= 0), "testing property list get succeeded"); + VRFY((prop_value == 0), "API to set LINK COLLECTIVE IO succeeded"); + break; + + case API_LINK_FALSE: + status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, &prop_value); + VRFY((status >= 0), "testing property list get succeeded"); + VRFY((prop_value == 0), "API to set LINK IO transferring to multi-chunk IO succeeded"); + break; + + case API_MULTI_COLL: + status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, &prop_value); + VRFY((status >= 0), "testing property list get succeeded"); + VRFY((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO with optimization succeeded"); + break; + + case API_MULTI_IND: + status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, &prop_value); + VRFY((status >= 0), "testing property list get succeeded"); + VRFY((prop_value == 0), + "API to set MULTI-CHUNK IO transferring to independent IO succeeded"); + break; + + default:; + } + } +#endif + + status = H5Dclose(dataset); + VRFY((status >= 0), ""); + + status = H5Pclose(xfer_plist); + VRFY((status >= 0), "property list closed"); + + status = H5Sclose(file_dataspace); + VRFY((status >= 0), ""); + + status = H5Sclose(mem_dataspace); + VRFY((status >= 0), ""); + + status = H5Fclose(file); + VRFY((status >= 0), ""); + + if (data_array1) + HDfree(data_array1); + + /* Use collective read to verify the correctness of collective write. */ + + /* allocate memory for data buffer */ + data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int)); + VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); + + /* allocate memory for data buffer */ + data_origin1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int)); + VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded"); + + acc_plist = create_faccess_plist(comm, info, facc_type); + VRFY((acc_plist >= 0), "MPIO creation property list succeeded"); + + file = H5Fopen(filename, H5F_ACC_RDONLY, acc_plist); + VRFY((file >= 0), "H5Fcreate succeeded"); + + status = H5Pclose(acc_plist); + VRFY((status >= 0), ""); + + /* open the collective dataset*/ + dataset = H5Dopen2(file, DSET_COLLECTIVE_CHUNK_NAME, H5P_DEFAULT); + VRFY((dataset >= 0), ""); + + /* set up dimensions of the slab this process accesses */ + ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor); + + /* obtain the file and mem dataspace*/ + file_dataspace = H5Dget_space(dataset); + VRFY((file_dataspace >= 0), ""); + + if (ALL != mem_selection) { + mem_dataspace = H5Dget_space(dataset); + VRFY((mem_dataspace >= 0), ""); + } + else { + current_dims = num_points; + mem_dataspace = H5Screate_simple(1, ¤t_dims, NULL); + VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded"); + } + + switch (file_selection) { + case HYPER: + status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((status >= 0), "hyperslab selection succeeded"); + break; + + case POINT: + if (num_points) { + status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY((status >= 0), "Element selection succeeded"); + } + else { + status = H5Sselect_none(file_dataspace); + VRFY((status >= 0), "none selection succeeded"); + } + break; + + case ALL: + status = H5Sselect_all(file_dataspace); + VRFY((status >= 0), "H5Sselect_all succeeded"); + break; + } + + switch (mem_selection) { + case HYPER: + status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((status >= 0), "hyperslab selection succeeded"); + break; + + case POINT: + if (num_points) { + status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY((status >= 0), "Element selection succeeded"); + } + else { + status = H5Sselect_none(mem_dataspace); + VRFY((status >= 0), "none selection succeeded"); + } + break; + + case ALL: + status = H5Sselect_all(mem_dataspace); + VRFY((status >= 0), "H5Sselect_all succeeded"); + break; + } + + /* fill dataset with test data */ + ccdataset_fill(start, stride, count, block, data_origin1, mem_selection); + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), ""); + + status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((status >= 0), "MPIO collective transfer property succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((status >= 0), "set independent IO collectively succeeded"); + } + + status = H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((status >= 0), "dataset read succeeded"); + + /* verify the read data with original expected data */ + status = ccdataset_vrfy(start, count, stride, block, data_array1, data_origin1, mem_selection); + if (status) + nerrors++; + + status = H5Pclose(xfer_plist); + VRFY((status >= 0), "property list closed"); + + /* close dataset collectively */ + status = H5Dclose(dataset); + VRFY((status >= 0), "H5Dclose"); + + /* release all IDs created */ + status = H5Sclose(file_dataspace); + VRFY((status >= 0), "H5Sclose"); + + status = H5Sclose(mem_dataspace); + VRFY((status >= 0), "H5Sclose"); + + /* close the file collectively */ + status = H5Fclose(file); + VRFY((status >= 0), "H5Fclose"); + + /* release data buffers */ + if (coords) + HDfree(coords); + if (data_array1) + HDfree(data_array1); + if (data_origin1) + HDfree(data_origin1); +} + +/* Set up the selection */ +static void +ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], + int mode) +{ + + switch (mode) { + + case BYROW_CONT: + /* Each process takes a slabs of rows. */ + block[0] = 1; + block[1] = 1; + stride[0] = 1; + stride[1] = 1; + count[0] = SPACE_DIM1; + count[1] = SPACE_DIM2; + start[0] = (hsize_t)mpi_rank * count[0]; + start[1] = 0; + + break; + + case BYROW_DISCONT: + /* Each process takes several disjoint blocks. */ + block[0] = 1; + block[1] = 1; + stride[0] = 3; + stride[1] = 3; + count[0] = SPACE_DIM1 / (stride[0] * block[0]); + count[1] = (SPACE_DIM2) / (stride[1] * block[1]); + start[0] = (hsize_t)SPACE_DIM1 * (hsize_t)mpi_rank; + start[1] = 0; + + break; + + case BYROW_SELECTNONE: + /* Each process takes a slabs of rows, there are + no selections for the last process. */ + block[0] = 1; + block[1] = 1; + stride[0] = 1; + stride[1] = 1; + count[0] = ((mpi_rank >= MAX(1, (mpi_size - 2))) ? 0 : SPACE_DIM1); + count[1] = SPACE_DIM2; + start[0] = (hsize_t)mpi_rank * count[0]; + start[1] = 0; + + break; + + case BYROW_SELECTUNBALANCE: + /* The first one-third of the number of processes only + select top half of the domain, The rest will select the bottom + half of the domain. */ + + block[0] = 1; + count[0] = 2; + stride[0] = (hsize_t)SPACE_DIM1 * (hsize_t)mpi_size / 4 + 1; + block[1] = SPACE_DIM2; + count[1] = 1; + start[1] = 0; + stride[1] = 1; + if ((mpi_rank * 3) < (mpi_size * 2)) + start[0] = (hsize_t)mpi_rank; + else + start[0] = (hsize_t)(1 + SPACE_DIM1 * mpi_size / 2 + (mpi_rank - 2 * mpi_size / 3)); + break; + + case BYROW_SELECTINCHUNK: + /* Each process will only select one chunk */ + + block[0] = 1; + count[0] = 1; + start[0] = (hsize_t)(mpi_rank * SPACE_DIM1); + stride[0] = 1; + block[1] = SPACE_DIM2; + count[1] = 1; + stride[1] = 1; + start[1] = 0; + + break; + + default: + /* Unknown mode. Set it to cover the whole dataset. */ + block[0] = (hsize_t)SPACE_DIM1 * (hsize_t)mpi_size; + block[1] = SPACE_DIM2; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = 0; + start[1] = 0; + + break; + } + if (VERBOSE_MED) { + HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total " + "datapoints=%lu\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1], + (unsigned long)(block[0] * block[1] * count[0] * count[1])); + } +} + +/* + * Fill the dataset with trivial data for testing. + * Assume dimension rank is 2. + */ +static void +ccdataset_fill(hsize_t start[], hsize_t stride[], hsize_t count[], hsize_t block[], DATATYPE *dataset, + int mem_selection) +{ + DATATYPE *dataptr = dataset; + DATATYPE *tmptr; + hsize_t i, j, k1, k2, k = 0; + /* put some trivial data in the data_array */ + tmptr = dataptr; + + /* assign the disjoint block (two-dimensional)data array value + through the pointer */ + + for (k1 = 0; k1 < count[0]; k1++) { + for (i = 0; i < block[0]; i++) { + for (k2 = 0; k2 < count[1]; k2++) { + for (j = 0; j < block[1]; j++) { + + if (ALL != mem_selection) { + dataptr = tmptr + ((start[0] + k1 * stride[0] + i) * SPACE_DIM2 + start[1] + + k2 * stride[1] + j); + } + else { + dataptr = tmptr + k; + k++; + } + + *dataptr = (DATATYPE)(k1 + k2 + i + j); + } + } + } + } +} + +/* + * Print the first block of the content of the dataset. + */ +static void +ccdataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset) + +{ + DATATYPE *dataptr = dataset; + hsize_t i, j; + + /* print the column heading */ + HDprintf("Print only the first block of the dataset\n"); + HDprintf("%-8s", "Cols:"); + for (j = 0; j < block[1]; j++) { + HDprintf("%3lu ", (unsigned long)(start[1] + j)); + } + HDprintf("\n"); + + /* print the slab data */ + for (i = 0; i < block[0]; i++) { + HDprintf("Row %2lu: ", (unsigned long)(i + start[0])); + for (j = 0; j < block[1]; j++) { + HDprintf("%03d ", *dataptr++); + } + HDprintf("\n"); + } +} + +/* + * Print the content of the dataset. + */ +static int +ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset, + DATATYPE *original, int mem_selection) +{ + hsize_t i, j, k1, k2, k = 0; + int vrfyerrs; + DATATYPE *dataptr, *oriptr; + + /* print it if VERBOSE_MED */ + if (VERBOSE_MED) { + HDprintf("dataset_vrfy dumping:::\n"); + HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1]); + HDprintf("original values:\n"); + ccdataset_print(start, block, original); + HDprintf("compared values:\n"); + ccdataset_print(start, block, dataset); + } + + vrfyerrs = 0; + + for (k1 = 0; k1 < count[0]; k1++) { + for (i = 0; i < block[0]; i++) { + for (k2 = 0; k2 < count[1]; k2++) { + for (j = 0; j < block[1]; j++) { + if (ALL != mem_selection) { + dataptr = dataset + ((start[0] + k1 * stride[0] + i) * SPACE_DIM2 + start[1] + + k2 * stride[1] + j); + oriptr = original + ((start[0] + k1 * stride[0] + i) * SPACE_DIM2 + start[1] + + k2 * stride[1] + j); + } + else { + dataptr = dataset + k; + oriptr = original + k; + k++; + } + if (*dataptr != *oriptr) { + if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) { + HDprintf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n", + (unsigned long)i, (unsigned long)j, *(oriptr), *(dataptr)); + } + } + } + } + } + } + if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED) + HDprintf("[more errors ...]\n"); + if (vrfyerrs) + HDprintf("%d errors found in ccdataset_vrfy\n", vrfyerrs); + return (vrfyerrs); +} diff --git a/testpar/API/t_coll_md_read.c b/testpar/API/t_coll_md_read.c new file mode 100644 index 00000000000..f6f99bf16e3 --- /dev/null +++ b/testpar/API/t_coll_md_read.c @@ -0,0 +1,654 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * A test suite to test HDF5's collective metadata read and write capabilities, + * as enabled by making a call to H5Pset_all_coll_metadata_ops() and/or + * H5Pset_coll_metadata_write(). + */ + +#include "hdf5.h" +#include "testphdf5.h" + +#include +#include +#include + +/* + * Define the non-participating process as the "last" + * rank to avoid any weirdness potentially caused by + * an if (mpi_rank == 0) check. + */ +#define PARTIAL_NO_SELECTION_NO_SEL_PROCESS (mpi_rank == mpi_size - 1) +#define PARTIAL_NO_SELECTION_DATASET_NAME "partial_no_selection_dset" +#define PARTIAL_NO_SELECTION_DATASET_NDIMS 2 +#define PARTIAL_NO_SELECTION_Y_DIM_SCALE 5 +#define PARTIAL_NO_SELECTION_X_DIM_SCALE 5 + +#define MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS 2 + +#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM 10000 +#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DATASET_NAME "linked_chunk_io_sort_chunk_issue" +#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS 1 + +#define COLL_GHEAP_WRITE_ATTR_NELEMS 10 +#define COLL_GHEAP_WRITE_ATTR_NAME "coll_gheap_write_attr" +#define COLL_GHEAP_WRITE_ATTR_DIMS 1 + +/* + * A test for issue HDFFV-10501. A parallel hang was reported which occurred + * in linked-chunk I/O when collective metadata reads are enabled and some ranks + * do not have any selection in a dataset's dataspace, while others do. The ranks + * which have no selection during the read/write operation called H5D__chunk_addrmap() + * to retrieve the lowest chunk address, since we require that the read/write be done + * in strictly non-decreasing order of chunk address. For version 1 and 2 B-trees, + * this caused the non-participating ranks to issue a collective MPI_Bcast() call + * which the other ranks did not issue, thus causing a hang. + * + * However, since these ranks are not actually reading/writing anything, this call + * can simply be removed and the address used for the read/write can be set to an + * arbitrary number (0 was chosen). + */ +void +test_partial_no_selection_coll_md_read(void) +{ + const char *filename; + hsize_t *dataset_dims = NULL; + hsize_t max_dataset_dims[PARTIAL_NO_SELECTION_DATASET_NDIMS]; + hsize_t sel_dims[1]; + hsize_t chunk_dims[PARTIAL_NO_SELECTION_DATASET_NDIMS] = {PARTIAL_NO_SELECTION_Y_DIM_SCALE, + PARTIAL_NO_SELECTION_X_DIM_SCALE}; + hsize_t start[PARTIAL_NO_SELECTION_DATASET_NDIMS]; + hsize_t stride[PARTIAL_NO_SELECTION_DATASET_NDIMS]; + hsize_t count[PARTIAL_NO_SELECTION_DATASET_NDIMS]; + hsize_t block[PARTIAL_NO_SELECTION_DATASET_NDIMS]; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t dxpl_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + int mpi_rank, mpi_size; + void *data = NULL; + void *read_buf = NULL; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or file flush aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + filename = PARATESTFILE /* GetTestParameters() */; + + fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + VRFY((fapl_id >= 0), "create_faccess_plist succeeded"); + + /* + * Even though the testphdf5 framework currently sets collective metadata reads + * on the FAPL, we call it here just to be sure this is futureproof, since + * demonstrating this issue relies upon it. + */ + VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "Set collective metadata reads succeeded"); + + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + + dataset_dims = HDmalloc(PARTIAL_NO_SELECTION_DATASET_NDIMS * sizeof(*dataset_dims)); + VRFY((dataset_dims != NULL), "malloc succeeded"); + + dataset_dims[0] = (hsize_t)PARTIAL_NO_SELECTION_Y_DIM_SCALE * (hsize_t)mpi_size; + dataset_dims[1] = (hsize_t)PARTIAL_NO_SELECTION_X_DIM_SCALE * (hsize_t)mpi_size; + max_dataset_dims[0] = H5S_UNLIMITED; + max_dataset_dims[1] = H5S_UNLIMITED; + + fspace_id = H5Screate_simple(PARTIAL_NO_SELECTION_DATASET_NDIMS, dataset_dims, max_dataset_dims); + VRFY((fspace_id >= 0), "H5Screate_simple succeeded"); + + /* + * Set up chunking on the dataset in order to reproduce the problem. + */ + dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl_id >= 0), "H5Pcreate succeeded"); + + VRFY((H5Pset_chunk(dcpl_id, PARTIAL_NO_SELECTION_DATASET_NDIMS, chunk_dims) >= 0), + "H5Pset_chunk succeeded"); + + dset_id = H5Dcreate2(file_id, PARTIAL_NO_SELECTION_DATASET_NAME, H5T_NATIVE_INT, fspace_id, H5P_DEFAULT, + dcpl_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "H5Dcreate2 succeeded"); + + /* + * Setup hyperslab selection to split the dataset among the ranks. + * + * The ranks will write rows across the dataset. + */ + start[0] = (hsize_t)PARTIAL_NO_SELECTION_Y_DIM_SCALE * (hsize_t)mpi_rank; + start[1] = 0; + stride[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE; + stride[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE; + count[0] = 1; + count[1] = (hsize_t)mpi_size; + block[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE; + block[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE; + + VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0), + "H5Sselect_hyperslab succeeded"); + + sel_dims[0] = count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE); + + mspace_id = H5Screate_simple(1, sel_dims, NULL); + VRFY((mspace_id >= 0), "H5Screate_simple succeeded"); + + data = HDcalloc(1, count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * + sizeof(int)); + VRFY((data != NULL), "calloc succeeded"); + + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_id >= 0), "H5Pcreate succeeded"); + + /* + * Enable collective access for the data transfer. + */ + VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded"); + + VRFY((H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, data) >= 0), "H5Dwrite succeeded"); + + VRFY((H5Fflush(file_id, H5F_SCOPE_GLOBAL) >= 0), "H5Fflush succeeded"); + + /* + * Ensure that linked-chunk I/O is performed since this is + * the particular code path where the issue lies and we don't + * want the library doing multi-chunk I/O behind our backs. + */ + VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_ONE_IO) >= 0), + "H5Pset_dxpl_mpio_chunk_opt succeeded"); + + read_buf = HDmalloc(count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * + sizeof(int)); + VRFY((read_buf != NULL), "malloc succeeded"); + + /* + * Make sure to call H5Sselect_none() on the non-participating process. + */ + if (PARTIAL_NO_SELECTION_NO_SEL_PROCESS) { + VRFY((H5Sselect_none(fspace_id) >= 0), "H5Sselect_none succeeded"); + VRFY((H5Sselect_none(mspace_id) >= 0), "H5Sselect_none succeeded"); + } + + /* + * Finally have each rank read their section of data back from the dataset. + */ + VRFY((H5Dread(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, read_buf) >= 0), + "H5Dread succeeded"); + + /* + * Check data integrity just to be sure. + */ + if (!PARTIAL_NO_SELECTION_NO_SEL_PROCESS) { + VRFY((!HDmemcmp(data, read_buf, + count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * + sizeof(int))), + "memcmp succeeded"); + } + + if (dataset_dims) { + HDfree(dataset_dims); + dataset_dims = NULL; + } + + if (data) { + HDfree(data); + data = NULL; + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded"); + VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded"); + VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded"); + VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded"); + VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded"); + VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded"); +} + +/* + * A test for HDFFV-10562 which attempts to verify that using multi-chunk + * I/O with collective metadata reads enabled doesn't causes issues due to + * collective metadata reads being made only by process 0 in H5D__chunk_addrmap(). + * + * Failure in this test may either cause a hang, or, due to how the MPI calls + * pertaining to this issue might mistakenly match up, may cause an MPI error + * message similar to: + * + * #008: H5Dmpio.c line 2546 in H5D__obtain_mpio_mode(): MPI_BCast failed + * major: Internal error (too specific to document in detail) + * minor: Some MPI function failed + * #009: H5Dmpio.c line 2546 in H5D__obtain_mpio_mode(): Message truncated, error stack: + *PMPI_Bcast(1600)..................: MPI_Bcast(buf=0x1df98e0, count=18, MPI_BYTE, root=0, comm=0x84000006) + *failed MPIR_Bcast_impl(1452).............: MPIR_Bcast(1476)..................: + *MPIR_Bcast_intra(1249)............: + *MPIR_SMP_Bcast(1088)..............: + *MPIR_Bcast_binomial(239)..........: + *MPIDI_CH3U_Receive_data_found(131): Message from rank 0 and tag 2 truncated; 2616 bytes received but buffer + *size is 18 major: Internal error (too specific to document in detail) minor: MPI Error String + * + */ +void +test_multi_chunk_io_addrmap_issue(void) +{ + const char *filename; + hsize_t start[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS]; + hsize_t stride[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS]; + hsize_t count[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS]; + hsize_t block[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS]; + hsize_t dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {10, 5}; + hsize_t chunk_dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {5, 5}; + hsize_t max_dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {H5S_UNLIMITED, H5S_UNLIMITED}; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t dxpl_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + void *read_buf = NULL; + int mpi_rank; + int data[5][5] = {{0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}}; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or file flush aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + filename = PARATESTFILE /* GetTestParameters() */; + + fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + VRFY((fapl_id >= 0), "create_faccess_plist succeeded"); + + /* + * Even though the testphdf5 framework currently sets collective metadata reads + * on the FAPL, we call it here just to be sure this is futureproof, since + * demonstrating this issue relies upon it. + */ + VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "Set collective metadata reads succeeded"); + + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + + space_id = H5Screate_simple(MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS, dims, max_dims); + VRFY((space_id >= 0), "H5Screate_simple succeeded"); + + dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl_id >= 0), "H5Pcreate succeeded"); + + VRFY((H5Pset_chunk(dcpl_id, MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS, chunk_dims) >= 0), + "H5Pset_chunk succeeded"); + + dset_id = H5Dcreate2(file_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "H5Dcreate2 succeeded"); + + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_id >= 0), "H5Pcreate succeeded"); + + VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded"); + VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_MULTI_IO) >= 0), + "H5Pset_dxpl_mpio_chunk_opt succeeded"); + + start[1] = 0; + stride[0] = stride[1] = 1; + count[0] = count[1] = 5; + block[0] = block[1] = 1; + + if (mpi_rank == 0) + start[0] = 0; + else + start[0] = 5; + + VRFY((H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) >= 0), + "H5Sselect_hyperslab succeeded"); + if (mpi_rank != 0) + VRFY((H5Sselect_none(space_id) >= 0), "H5Sselect_none succeeded"); + + VRFY((H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, space_id, dxpl_id, data) >= 0), "H5Dwrite succeeded"); + + VRFY((H5Fflush(file_id, H5F_SCOPE_GLOBAL) >= 0), "H5Fflush succeeded"); + + read_buf = HDmalloc(50 * sizeof(int)); + VRFY((read_buf != NULL), "malloc succeeded"); + + VRFY((H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "H5Dread succeeded"); + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + VRFY((H5Sclose(space_id) >= 0), "H5Sclose succeeded"); + VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded"); + VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded"); + VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded"); + VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded"); +} + +/* + * A test for HDFFV-10562 which attempts to verify that using linked-chunk + * I/O with collective metadata reads enabled doesn't cause issues due to + * collective metadata reads being made only by process 0 in H5D__sort_chunk(). + * + * NOTE: Due to the way that the threshold value which pertains to this test + * is currently calculated within HDF5, the following two conditions must be + * true to trigger the issue: + * + * Condition 1: A certain threshold ratio must be met in order to have HDF5 + * obtain all chunk addresses collectively inside H5D__sort_chunk(). This is + * given by the following: + * + * (sum_chunk * 100) / (dataset_nchunks * mpi_size) >= 30% + * + * where: + * * `sum_chunk` is the combined sum of the number of chunks selected in + * the dataset by all ranks (chunks selected by more than one rank count + * individually toward the sum for each rank selecting that chunk) + * * `dataset_nchunks` is the number of chunks in the dataset (selected + * or not) + * * `mpi_size` is the size of the MPI Communicator + * + * Condition 2: `sum_chunk` divided by `mpi_size` must exceed or equal a certain + * threshold (as of this writing, 10000). + * + * To satisfy both these conditions, we #define a macro, + * LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM, which corresponds to the + * value of the H5D_ALL_CHUNK_ADDR_THRES_COL_NUM macro in H5Dmpio.c (the + * 10000 threshold from condition 2). We then create a dataset of that many + * chunks and have each MPI rank write to and read from a piece of every single + * chunk in the dataset. This ensures chunk utilization is the max possible + * and exceeds our 30% target ratio, while always exactly matching the numeric + * chunk threshold value of condition 2. + * + * Failure in this test may either cause a hang, or, due to how the MPI calls + * pertaining to this issue might mistakenly match up, may cause an MPI error + * message similar to: + * + * #008: H5Dmpio.c line 2338 in H5D__sort_chunk(): MPI_BCast failed + * major: Internal error (too specific to document in detail) + * minor: Some MPI function failed + * #009: H5Dmpio.c line 2338 in H5D__sort_chunk(): Other MPI error, error stack: + *PMPI_Bcast(1600)........: MPI_Bcast(buf=0x7eae610, count=320000, MPI_BYTE, root=0, comm=0x84000006) failed + *MPIR_Bcast_impl(1452)...: + *MPIR_Bcast(1476)........: + *MPIR_Bcast_intra(1249)..: + *MPIR_SMP_Bcast(1088)....: + *MPIR_Bcast_binomial(250): message sizes do not match across processes in the collective routine: Received + *2096 but expected 320000 major: Internal error (too specific to document in detail) minor: MPI Error String + */ +void +test_link_chunk_io_sort_chunk_issue(void) +{ + const char *filename; + hsize_t dataset_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS]; + hsize_t sel_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS]; + hsize_t chunk_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS]; + hsize_t start[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS]; + hsize_t stride[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS]; + hsize_t count[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS]; + hsize_t block[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS]; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t dxpl_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + int mpi_rank, mpi_size; + void *data = NULL; + void *read_buf = NULL; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or file flush aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + filename = PARATESTFILE /* GetTestParameters() */; + + fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + VRFY((fapl_id >= 0), "create_faccess_plist succeeded"); + + /* + * Even though the testphdf5 framework currently sets collective metadata reads + * on the FAPL, we call it here just to be sure this is futureproof, since + * demonstrating this issue relies upon it. + */ + VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "Set collective metadata reads succeeded"); + + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + + /* + * Create a one-dimensional dataset of exactly LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM + * chunks, where every rank writes to a piece of every single chunk to keep utilization high. + */ + dataset_dims[0] = (hsize_t)mpi_size * (hsize_t)LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM; + + fspace_id = H5Screate_simple(LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS, dataset_dims, NULL); + VRFY((fspace_id >= 0), "H5Screate_simple succeeded"); + + /* + * Set up chunking on the dataset in order to reproduce the problem. + */ + dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl_id >= 0), "H5Pcreate succeeded"); + + /* Chunk size is equal to MPI size since each rank writes to a piece of every chunk */ + chunk_dims[0] = (hsize_t)mpi_size; + + VRFY((H5Pset_chunk(dcpl_id, LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS, chunk_dims) >= 0), + "H5Pset_chunk succeeded"); + + dset_id = H5Dcreate2(file_id, LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DATASET_NAME, H5T_NATIVE_INT, fspace_id, + H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "H5Dcreate2 succeeded"); + + /* + * Setup hyperslab selection to split the dataset among the ranks. + */ + start[0] = (hsize_t)mpi_rank; + stride[0] = (hsize_t)mpi_size; + count[0] = LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM; + block[0] = 1; + + VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0), + "H5Sselect_hyperslab succeeded"); + + sel_dims[0] = count[0]; + + mspace_id = H5Screate_simple(1, sel_dims, NULL); + VRFY((mspace_id >= 0), "H5Screate_simple succeeded"); + + data = HDcalloc(1, count[0] * sizeof(int)); + VRFY((data != NULL), "calloc succeeded"); + + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_id >= 0), "H5Pcreate succeeded"); + + /* + * Enable collective access for the data transfer. + */ + VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded"); + + VRFY((H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, data) >= 0), "H5Dwrite succeeded"); + + VRFY((H5Fflush(file_id, H5F_SCOPE_GLOBAL) >= 0), "H5Fflush succeeded"); + + /* + * Ensure that linked-chunk I/O is performed since this is + * the particular code path where the issue lies and we don't + * want the library doing multi-chunk I/O behind our backs. + */ + VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_ONE_IO) >= 0), + "H5Pset_dxpl_mpio_chunk_opt succeeded"); + + read_buf = HDmalloc(count[0] * sizeof(int)); + VRFY((read_buf != NULL), "malloc succeeded"); + + VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0), + "H5Sselect_hyperslab succeeded"); + + sel_dims[0] = count[0]; + + VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded"); + + mspace_id = H5Screate_simple(1, sel_dims, NULL); + VRFY((mspace_id >= 0), "H5Screate_simple succeeded"); + + /* + * Finally have each rank read their section of data back from the dataset. + */ + VRFY((H5Dread(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, read_buf) >= 0), + "H5Dread succeeded"); + + if (data) { + HDfree(data); + data = NULL; + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded"); + VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded"); + VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded"); + VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded"); + VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded"); + VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded"); +} + +/* + * A test for GitHub issue #2433 which causes a collective metadata write + * of global heap data. This test is meant to ensure that global heap data + * gets correctly mapped as raw data during a collective metadata write + * using vector I/O. + * + * An assertion exists in the library that should be triggered if global + * heap data is not correctly mapped as raw data. + */ +void +test_collective_global_heap_write(void) +{ + const char *filename; + hsize_t attr_dims[COLL_GHEAP_WRITE_ATTR_DIMS]; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t attr_id = H5I_INVALID_HID; + hid_t vl_type = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hvl_t vl_data; + int mpi_rank, mpi_size; + int data_buf[COLL_GHEAP_WRITE_ATTR_NELEMS]; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or file flush aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + filename = PARATESTFILE /* GetTestParameters() */; + + fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + VRFY((fapl_id >= 0), "create_faccess_plist succeeded"); + + /* + * Even though the testphdf5 framework currently sets collective metadata + * writes on the FAPL, we call it here just to be sure this is futureproof, + * since demonstrating this issue relies upon it. + */ + VRFY((H5Pset_coll_metadata_write(fapl_id, true) >= 0), "Set collective metadata writes succeeded"); + + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + + attr_dims[0] = 1; + + fspace_id = H5Screate_simple(COLL_GHEAP_WRITE_ATTR_DIMS, attr_dims, NULL); + VRFY((fspace_id >= 0), "H5Screate_simple succeeded"); + + vl_type = H5Tvlen_create(H5T_NATIVE_INT); + VRFY((vl_type >= 0), "H5Tvlen_create succeeded"); + + vl_data.len = COLL_GHEAP_WRITE_ATTR_NELEMS; + vl_data.p = data_buf; + + /* + * Create a variable-length attribute that will get written to the global heap + */ + attr_id = H5Acreate2(file_id, COLL_GHEAP_WRITE_ATTR_NAME, vl_type, fspace_id, H5P_DEFAULT, H5P_DEFAULT); + VRFY((attr_id >= 0), "H5Acreate2 succeeded"); + + for (size_t i = 0; i < COLL_GHEAP_WRITE_ATTR_NELEMS; i++) + data_buf[i] = (int)i; + + VRFY((H5Awrite(attr_id, vl_type, &vl_data) >= 0), "H5Awrite succeeded"); + + VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded"); + VRFY((H5Tclose(vl_type) >= 0), "H5Sclose succeeded"); + VRFY((H5Aclose(attr_id) >= 0), "H5Aclose succeeded"); + VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded"); + VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded"); +} diff --git a/testpar/API/t_dset.c b/testpar/API/t_dset.c new file mode 100644 index 00000000000..d00524364dd --- /dev/null +++ b/testpar/API/t_dset.c @@ -0,0 +1,4335 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Parallel tests for datasets + */ + +/* + * Example of using the parallel HDF5 library to access datasets. + * + * This program contains three major parts. Part 1 tests fixed dimension + * datasets, for both independent and collective transfer modes. + * Part 2 tests extendible datasets, for independent transfer mode + * only. + * Part 3 tests extendible datasets, for collective transfer mode + * only. + */ + +#include "hdf5.h" +#include "testphdf5.h" + +/* + * The following are various utility routines used by the tests. + */ + +/* + * Setup the dimensions of the hyperslab. + * Two modes--by rows or by columns. + * Assume dimension rank is 2. + * BYROW divide into slabs of rows + * BYCOL divide into blocks of columns + * ZROW same as BYROW except process 0 gets 0 rows + * ZCOL same as BYCOL except process 0 gets 0 columns + */ +static void +slab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], + int mode) +{ + switch (mode) { + case BYROW: + /* Each process takes a slabs of rows. */ + block[0] = (hsize_t)(dim0 / mpi_size); + block[1] = (hsize_t)dim1; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = (hsize_t)mpi_rank * block[0]; + start[1] = 0; + if (VERBOSE_MED) + HDprintf("slab_set BYROW\n"); + break; + case BYCOL: + /* Each process takes a block of columns. */ + block[0] = (hsize_t)dim0; + block[1] = (hsize_t)(dim1 / mpi_size); + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = 0; + start[1] = (hsize_t)mpi_rank * block[1]; + if (VERBOSE_MED) + HDprintf("slab_set BYCOL\n"); + break; + case ZROW: + /* Similar to BYROW except process 0 gets 0 row */ + block[0] = (hsize_t)(mpi_rank ? dim0 / mpi_size : 0); + block[1] = (hsize_t)dim1; + stride[0] = (mpi_rank ? block[0] : 1); /* avoid setting stride to 0 */ + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = (mpi_rank ? (hsize_t)mpi_rank * block[0] : 0); + start[1] = 0; + if (VERBOSE_MED) + HDprintf("slab_set ZROW\n"); + break; + case ZCOL: + /* Similar to BYCOL except process 0 gets 0 column */ + block[0] = (hsize_t)dim0; + block[1] = (hsize_t)(mpi_rank ? dim1 / mpi_size : 0); + stride[0] = block[0]; + stride[1] = (hsize_t)(mpi_rank ? block[1] : 1); /* avoid setting stride to 0 */ + count[0] = 1; + count[1] = 1; + start[0] = 0; + start[1] = (mpi_rank ? (hsize_t)mpi_rank * block[1] : 0); + if (VERBOSE_MED) + HDprintf("slab_set ZCOL\n"); + break; + default: + /* Unknown mode. Set it to cover the whole dataset. */ + HDprintf("unknown slab_set mode (%d)\n", mode); + block[0] = (hsize_t)dim0; + block[1] = (hsize_t)dim1; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = 0; + start[1] = 0; + if (VERBOSE_MED) + HDprintf("slab_set wholeset\n"); + break; + } + if (VERBOSE_MED) { + HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total " + "datapoints=%lu\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1], + (unsigned long)(block[0] * block[1] * count[0] * count[1])); + } +} + +/* + * Setup the coordinates for point selection. + */ +void +point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points, + hsize_t coords[], int order) +{ + hsize_t i, j, k = 0, m, n, s1, s2; + + HDcompile_assert(RANK == 2); + + if (OUT_OF_ORDER == order) + k = (num_points * RANK) - 1; + else if (IN_ORDER == order) + k = 0; + + s1 = start[0]; + s2 = start[1]; + + for (i = 0; i < count[0]; i++) + for (j = 0; j < count[1]; j++) + for (m = 0; m < block[0]; m++) + for (n = 0; n < block[1]; n++) + if (OUT_OF_ORDER == order) { + coords[k--] = s2 + (stride[1] * j) + n; + coords[k--] = s1 + (stride[0] * i) + m; + } + else if (IN_ORDER == order) { + coords[k++] = s1 + stride[0] * i + m; + coords[k++] = s2 + stride[1] * j + n; + } + + if (VERBOSE_MED) { + HDprintf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total " + "datapoints=%lu\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1], + (unsigned long)(block[0] * block[1] * count[0] * count[1])); + k = 0; + for (i = 0; i < num_points; i++) { + HDprintf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]); + k += 2; + } + } +} + +/* + * Fill the dataset with trivial data for testing. + * Assume dimension rank is 2 and data is stored contiguous. + */ +static void +dataset_fill(hsize_t start[], hsize_t block[], DATATYPE *dataset) +{ + DATATYPE *dataptr = dataset; + hsize_t i, j; + + /* put some trivial data in the data_array */ + for (i = 0; i < block[0]; i++) { + for (j = 0; j < block[1]; j++) { + *dataptr = (DATATYPE)((i + start[0]) * 100 + (j + start[1] + 1)); + dataptr++; + } + } +} + +/* + * Print the content of the dataset. + */ +static void +dataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset) +{ + DATATYPE *dataptr = dataset; + hsize_t i, j; + + /* print the column heading */ + HDprintf("%-8s", "Cols:"); + for (j = 0; j < block[1]; j++) { + HDprintf("%3lu ", (unsigned long)(start[1] + j)); + } + HDprintf("\n"); + + /* print the slab data */ + for (i = 0; i < block[0]; i++) { + HDprintf("Row %2lu: ", (unsigned long)(i + start[0])); + for (j = 0; j < block[1]; j++) { + HDprintf("%03d ", *dataptr++); + } + HDprintf("\n"); + } +} + +/* + * Print the content of the dataset. + */ +int +dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset, + DATATYPE *original) +{ + hsize_t i, j; + int vrfyerrs; + + /* print it if VERBOSE_MED */ + if (VERBOSE_MED) { + HDprintf("dataset_vrfy dumping:::\n"); + HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1]); + HDprintf("original values:\n"); + dataset_print(start, block, original); + HDprintf("compared values:\n"); + dataset_print(start, block, dataset); + } + + vrfyerrs = 0; + for (i = 0; i < block[0]; i++) { + for (j = 0; j < block[1]; j++) { + if (*dataset != *original) { + if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) { + HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n", + (unsigned long)i, (unsigned long)j, (unsigned long)(i + start[0]), + (unsigned long)(j + start[1]), *(original), *(dataset)); + } + dataset++; + original++; + } + } + } + if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED) + HDprintf("[more errors ...]\n"); + if (vrfyerrs) + HDprintf("%d errors found in dataset_vrfy\n", vrfyerrs); + return (vrfyerrs); +} + +/* + * Part 1.a--Independent read/write for fixed dimension datasets. + */ + +/* + * Example of using the parallel HDF5 library to create two datasets + * in one HDF5 files with parallel MPIO access support. + * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. + * Each process controls only a slab of size dim0 x dim1 within each + * dataset. + */ + +void +dataset_writeInd(void) +{ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t sid; /* Dataspace ID */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2; /* Dataset ID */ + hsize_t dims[RANK]; /* dataset dim sizes */ + DATATYPE *data_array1 = NULL; /* data buffer */ + const char *filename; + + hsize_t start[RANK]; /* for hyperslab setting */ + hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ + hsize_t block[RANK]; /* for hyperslab setting */ + + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; + + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + + filename = PARATESTFILE /* GetTestParameters() */; + if (VERBOSE_MED) + HDprintf("Independent write test on file %s\n", filename); + + /* set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, basic dataset, or more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + /* allocate memory for data buffer */ + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); + + /* ---------------------------------------- + * CREATE AN HDF5 FILE WITH PARALLEL ACCESS + * ---------------------------------------*/ + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* create the file collectively */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), ""); + + /* --------------------------------------------- + * Define the dimensions of the overall datasets + * and the slabs local to the MPI process. + * ------------------------------------------- */ + /* setup dimensionality object */ + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + sid = H5Screate_simple(RANK, dims, NULL); + VRFY((sid >= 0), "H5Screate_simple succeeded"); + + /* create a dataset collectively */ + dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); + + /* create another dataset collectively */ + dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset2 >= 0), "H5Dcreate2 succeeded"); + + /* + * To test the independent orders of writes between processes, all + * even number processes write to dataset1 first, then dataset2. + * All odd number processes write to dataset2 first, then dataset1. + */ + + /* set up dimensions of the slab this process accesses */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + /* put some trivial data in the data_array */ + dataset_fill(start, block, data_array1); + MESG("data_array initialized"); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset1); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* write data independently */ + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset1 succeeded"); + /* write data independently */ + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset2 succeeded"); + + /* setup dimensions again to write with zero rows for process 0 */ + if (VERBOSE_MED) + HDprintf("writeInd by some with zero row\n"); + slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + /* need to make mem_dataspace to match for process 0 */ + if (MAINPROCESS) { + ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); + } + MESG("writeInd by some with zero row"); + if ((mpi_rank / 2) * 2 != mpi_rank) { + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded"); + } +#ifdef BARRIER_CHECKS + MPI_Barrier(MPI_COMM_WORLD); +#endif /* BARRIER_CHECKS */ + + /* release dataspace ID */ + H5Sclose(file_dataspace); + + /* close dataset collectively */ + ret = H5Dclose(dataset1); + VRFY((ret >= 0), "H5Dclose1 succeeded"); + ret = H5Dclose(dataset2); + VRFY((ret >= 0), "H5Dclose2 succeeded"); + + /* release all IDs created */ + H5Sclose(sid); + + /* close the file collectively */ + H5Fclose(fid); + + /* release data buffers */ + if (data_array1) + HDfree(data_array1); +} + +/* Example of using the parallel HDF5 library to read a dataset */ +void +dataset_readInd(void) +{ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2; /* Dataset ID */ + DATATYPE *data_array1 = NULL; /* data buffer */ + DATATYPE *data_origin1 = NULL; /* expected data buffer */ + const char *filename; + + hsize_t start[RANK]; /* for hyperslab setting */ + hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ + hsize_t block[RANK]; /* for hyperslab setting */ + + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; + + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + + filename = PARATESTFILE /* GetTestParameters() */; + if (VERBOSE_MED) + HDprintf("Independent read test on file %s\n", filename); + + /* set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, basic dataset, or more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + /* allocate memory for data buffer */ + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); + data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded"); + + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* open the file collectively */ + fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl); + VRFY((fid >= 0), ""); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), ""); + + /* open the dataset1 collectively */ + dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); + VRFY((dataset1 >= 0), ""); + + /* open another dataset collectively */ + dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); + VRFY((dataset2 >= 0), ""); + + /* set up dimensions of the slab this process accesses */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset1); + VRFY((file_dataspace >= 0), ""); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), ""); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* fill dataset with test data */ + dataset_fill(start, block, data_origin1); + + /* read data independently */ + ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); + VRFY((ret >= 0), ""); + + /* verify the read data with original expected data */ + ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); + if (ret) + nerrors++; + + /* read data independently */ + ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); + VRFY((ret >= 0), ""); + + /* verify the read data with original expected data */ + ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); + if (ret) + nerrors++; + + /* close dataset collectively */ + ret = H5Dclose(dataset1); + VRFY((ret >= 0), ""); + ret = H5Dclose(dataset2); + VRFY((ret >= 0), ""); + + /* release all IDs created */ + H5Sclose(file_dataspace); + + /* close the file collectively */ + H5Fclose(fid); + + /* release data buffers */ + if (data_array1) + HDfree(data_array1); + if (data_origin1) + HDfree(data_origin1); +} + +/* + * Part 1.b--Collective read/write for fixed dimension datasets. + */ + +/* + * Example of using the parallel HDF5 library to create two datasets + * in one HDF5 file with collective parallel access support. + * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. + * Each process controls only a slab of size dim0 x dim1 within each + * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and + * each process controls a hyperslab within.] + */ + +void +dataset_writeAll(void) +{ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t xfer_plist; /* Dataset transfer properties list */ + hid_t sid; /* Dataspace ID */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2, dataset3, dataset4; /* Dataset ID */ + hid_t dataset5, dataset6, dataset7; /* Dataset ID */ + hid_t datatype; /* Datatype ID */ + hsize_t dims[RANK]; /* dataset dim sizes */ + DATATYPE *data_array1 = NULL; /* data buffer */ + const char *filename; + + hsize_t start[RANK]; /* for hyperslab setting */ + hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ + hsize_t block[RANK]; /* for hyperslab setting */ + + size_t num_points; /* for point selection */ + hsize_t *coords = NULL; /* for point selection */ + hsize_t current_dims; /* for point selection */ + + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; + + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + + filename = PARATESTFILE /* GetTestParameters() */; + if (VERBOSE_MED) + HDprintf("Collective write test on file %s\n", filename); + + /* set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, basic dataset, or more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + /* set up the coords array selection */ + num_points = (size_t)dim1; + coords = (hsize_t *)HDmalloc((size_t)dim1 * (size_t)RANK * sizeof(hsize_t)); + VRFY((coords != NULL), "coords malloc succeeded"); + + /* allocate memory for data buffer */ + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); + + /* ------------------- + * START AN HDF5 FILE + * -------------------*/ + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* create the file collectively */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), ""); + + /* -------------------------- + * Define the dimensions of the overall datasets + * and create the dataset + * ------------------------- */ + /* setup 2-D dimensionality object */ + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + sid = H5Screate_simple(RANK, dims, NULL); + VRFY((sid >= 0), "H5Screate_simple succeeded"); + + /* create a dataset collectively */ + dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); + + /* create another dataset collectively */ + datatype = H5Tcopy(H5T_NATIVE_INT); + ret = H5Tset_order(datatype, H5T_ORDER_LE); + VRFY((ret >= 0), "H5Tset_order succeeded"); + + dataset2 = H5Dcreate2(fid, DATASETNAME2, datatype, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset2 >= 0), "H5Dcreate2 2 succeeded"); + + /* create a third dataset collectively */ + dataset3 = H5Dcreate2(fid, DATASETNAME3, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset3 >= 0), "H5Dcreate2 succeeded"); + + dataset5 = H5Dcreate2(fid, DATASETNAME7, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset5 >= 0), "H5Dcreate2 succeeded"); + dataset6 = H5Dcreate2(fid, DATASETNAME8, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset6 >= 0), "H5Dcreate2 succeeded"); + dataset7 = H5Dcreate2(fid, DATASETNAME9, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset7 >= 0), "H5Dcreate2 succeeded"); + + /* release 2-D space ID created */ + H5Sclose(sid); + + /* setup scalar dimensionality object */ + sid = H5Screate(H5S_SCALAR); + VRFY((sid >= 0), "H5Screate succeeded"); + + /* create a fourth dataset collectively */ + dataset4 = H5Dcreate2(fid, DATASETNAME4, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset4 >= 0), "H5Dcreate2 succeeded"); + + /* release scalar space ID created */ + H5Sclose(sid); + + /* + * Set up dimensions of the slab this process accesses. + */ + + /* Dataset1: each process takes a block of rows. */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset1); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* fill the local slab with some trivial data */ + dataset_fill(start, block, data_array1); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* write data collectively */ + MESG("writeAll by Row"); + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset1 succeeded"); + + /* setup dimensions again to writeAll with zero rows for process 0 */ + if (VERBOSE_MED) + HDprintf("writeAll by some with zero row\n"); + slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + /* need to make mem_dataspace to match for process 0 */ + if (MAINPROCESS) { + ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); + } + MESG("writeAll by some with zero row"); + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded"); + + /* release all temporary handles. */ + /* Could have used them for dataset2 but it is cleaner */ + /* to create them again.*/ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + /* Dataset2: each process takes a block of columns. */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); + + /* put some trivial data in the data_array */ + dataset_fill(start, block, data_array1); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset1); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* fill the local slab with some trivial data */ + dataset_fill(start, block, data_array1); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* write data independently */ + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset2 succeeded"); + + /* setup dimensions again to writeAll with zero columns for process 0 */ + if (VERBOSE_MED) + HDprintf("writeAll by some with zero col\n"); + slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + /* need to make mem_dataspace to match for process 0 */ + if (MAINPROCESS) { + ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); + } + MESG("writeAll by some with zero col"); + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset1 by ZCOL succeeded"); + + /* release all temporary handles. */ + /* Could have used them for dataset3 but it is cleaner */ + /* to create them again.*/ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + /* Dataset3: each process takes a block of rows, except process zero uses "none" selection. */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset3); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + if (MAINPROCESS) { + ret = H5Sselect_none(file_dataspace); + VRFY((ret >= 0), "H5Sselect_none file_dataspace succeeded"); + } /* end if */ + else { + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sselect_hyperslab succeeded"); + } /* end else */ + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + if (MAINPROCESS) { + ret = H5Sselect_none(mem_dataspace); + VRFY((ret >= 0), "H5Sselect_none mem_dataspace succeeded"); + } /* end if */ + + /* fill the local slab with some trivial data */ + dataset_fill(start, block, data_array1); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } /* end if */ + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* write data collectively */ + MESG("writeAll with none"); + ret = H5Dwrite(dataset3, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset3 succeeded"); + + /* write data collectively (with datatype conversion) */ + MESG("writeAll with none"); + ret = H5Dwrite(dataset3, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset3 succeeded"); + + /* release all temporary handles. */ + /* Could have used them for dataset4 but it is cleaner */ + /* to create them again.*/ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + /* Dataset4: each process writes no data, except process zero uses "all" selection. */ + /* Additionally, these are in a scalar dataspace */ + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset4); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + if (MAINPROCESS) { + ret = H5Sselect_none(file_dataspace); + VRFY((ret >= 0), "H5Sselect_all file_dataspace succeeded"); + } /* end if */ + else { + ret = H5Sselect_all(file_dataspace); + VRFY((ret >= 0), "H5Sselect_none succeeded"); + } /* end else */ + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate(H5S_SCALAR); + VRFY((mem_dataspace >= 0), ""); + if (MAINPROCESS) { + ret = H5Sselect_none(mem_dataspace); + VRFY((ret >= 0), "H5Sselect_all mem_dataspace succeeded"); + } /* end if */ + else { + ret = H5Sselect_all(mem_dataspace); + VRFY((ret >= 0), "H5Sselect_none succeeded"); + } /* end else */ + + /* fill the local slab with some trivial data */ + dataset_fill(start, block, data_array1); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } /* end if */ + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* write data collectively */ + MESG("writeAll with scalar dataspace"); + ret = H5Dwrite(dataset4, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset4 succeeded"); + + /* write data collectively (with datatype conversion) */ + MESG("writeAll with scalar dataspace"); + ret = H5Dwrite(dataset4, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset4 succeeded"); + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + if (data_array1) + free(data_array1); + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); + + block[0] = 1; + block[1] = (hsize_t)dim1; + stride[0] = 1; + stride[1] = (hsize_t)dim1; + count[0] = 1; + count[1] = 1; + start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank); + start[1] = 0; + + dataset_fill(start, block, data_array1); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } + + /* Dataset5: point selection in File - Hyperslab selection in Memory*/ + /* create a file dataspace independently */ + point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER); + file_dataspace = H5Dget_space(dataset5); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY((ret >= 0), "H5Sselect_elements succeeded"); + + start[0] = 0; + start[1] = 0; + mem_dataspace = H5Dget_space(dataset5); + VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* write data collectively */ + ret = H5Dwrite(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset5 succeeded"); + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + /* Dataset6: point selection in File - Point selection in Memory*/ + /* create a file dataspace independently */ + start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank); + start[1] = 0; + point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER); + file_dataspace = H5Dget_space(dataset6); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY((ret >= 0), "H5Sselect_elements succeeded"); + + start[0] = 0; + start[1] = 0; + point_set(start, count, stride, block, num_points, coords, IN_ORDER); + mem_dataspace = H5Dget_space(dataset6); + VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY((ret >= 0), "H5Sselect_elements succeeded"); + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* write data collectively */ + ret = H5Dwrite(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset6 succeeded"); + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + /* Dataset7: point selection in File - All selection in Memory*/ + /* create a file dataspace independently */ + start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank); + start[1] = 0; + point_set(start, count, stride, block, num_points, coords, IN_ORDER); + file_dataspace = H5Dget_space(dataset7); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY((ret >= 0), "H5Sselect_elements succeeded"); + + current_dims = num_points; + mem_dataspace = H5Screate_simple(1, ¤t_dims, NULL); + VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded"); + + ret = H5Sselect_all(mem_dataspace); + VRFY((ret >= 0), "H5Sselect_all succeeded"); + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* write data collectively */ + ret = H5Dwrite(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset7 succeeded"); + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + /* + * All writes completed. Close datasets collectively + */ + ret = H5Dclose(dataset1); + VRFY((ret >= 0), "H5Dclose1 succeeded"); + ret = H5Dclose(dataset2); + VRFY((ret >= 0), "H5Dclose2 succeeded"); + ret = H5Dclose(dataset3); + VRFY((ret >= 0), "H5Dclose3 succeeded"); + ret = H5Dclose(dataset4); + VRFY((ret >= 0), "H5Dclose4 succeeded"); + ret = H5Dclose(dataset5); + VRFY((ret >= 0), "H5Dclose5 succeeded"); + ret = H5Dclose(dataset6); + VRFY((ret >= 0), "H5Dclose6 succeeded"); + ret = H5Dclose(dataset7); + VRFY((ret >= 0), "H5Dclose7 succeeded"); + + /* close the file collectively */ + H5Fclose(fid); + + /* release data buffers */ + if (coords) + HDfree(coords); + if (data_array1) + HDfree(data_array1); +} + +/* + * Example of using the parallel HDF5 library to read two datasets + * in one HDF5 file with collective parallel access support. + * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. + * Each process controls only a slab of size dim0 x dim1 within each + * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and + * each process controls a hyperslab within.] + */ + +void +dataset_readAll(void) +{ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t xfer_plist; /* Dataset transfer properties list */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2, dataset5, dataset6, dataset7; /* Dataset ID */ + DATATYPE *data_array1 = NULL; /* data buffer */ + DATATYPE *data_origin1 = NULL; /* expected data buffer */ + const char *filename; + + hsize_t start[RANK]; /* for hyperslab setting */ + hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ + hsize_t block[RANK]; /* for hyperslab setting */ + + size_t num_points; /* for point selection */ + hsize_t *coords = NULL; /* for point selection */ + int i, j, k; + + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; + + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + + filename = PARATESTFILE /* GetTestParameters() */; + if (VERBOSE_MED) + HDprintf("Collective read test on file %s\n", filename); + + /* set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, basic dataset, or more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + /* set up the coords array selection */ + num_points = (size_t)dim1; + coords = (hsize_t *)HDmalloc((size_t)dim0 * (size_t)dim1 * RANK * sizeof(hsize_t)); + VRFY((coords != NULL), "coords malloc succeeded"); + + /* allocate memory for data buffer */ + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); + data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded"); + + /* ------------------- + * OPEN AN HDF5 FILE + * -------------------*/ + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* open the file collectively */ + fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl); + VRFY((fid >= 0), "H5Fopen succeeded"); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), ""); + + /* -------------------------- + * Open the datasets in it + * ------------------------- */ + /* open the dataset1 collectively */ + dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); + VRFY((dataset1 >= 0), "H5Dopen2 succeeded"); + + /* open another dataset collectively */ + dataset2 = H5Dopen2(fid, DATASETNAME2, H5P_DEFAULT); + VRFY((dataset2 >= 0), "H5Dopen2 2 succeeded"); + + /* open another dataset collectively */ + dataset5 = H5Dopen2(fid, DATASETNAME7, H5P_DEFAULT); + VRFY((dataset5 >= 0), "H5Dopen2 5 succeeded"); + dataset6 = H5Dopen2(fid, DATASETNAME8, H5P_DEFAULT); + VRFY((dataset6 >= 0), "H5Dopen2 6 succeeded"); + dataset7 = H5Dopen2(fid, DATASETNAME9, H5P_DEFAULT); + VRFY((dataset7 >= 0), "H5Dopen2 7 succeeded"); + + /* + * Set up dimensions of the slab this process accesses. + */ + + /* Dataset1: each process takes a block of columns. */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset1); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* fill dataset with test data */ + dataset_fill(start, block, data_origin1); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_origin1); + } + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* read data collectively */ + ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dread dataset1 succeeded"); + + /* verify the read data with original expected data */ + ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); + if (ret) + nerrors++; + + /* setup dimensions again to readAll with zero columns for process 0 */ + if (VERBOSE_MED) + HDprintf("readAll by some with zero col\n"); + slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + /* need to make mem_dataspace to match for process 0 */ + if (MAINPROCESS) { + ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); + } + MESG("readAll by some with zero col"); + ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dread dataset1 by ZCOL succeeded"); + + /* verify the read data with original expected data */ + ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); + if (ret) + nerrors++; + + /* release all temporary handles. */ + /* Could have used them for dataset2 but it is cleaner */ + /* to create them again.*/ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + /* Dataset2: each process takes a block of rows. */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset1); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* fill dataset with test data */ + dataset_fill(start, block, data_origin1); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_origin1); + } + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* read data collectively */ + ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dread dataset2 succeeded"); + + /* verify the read data with original expected data */ + ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); + if (ret) + nerrors++; + + /* setup dimensions again to readAll with zero rows for process 0 */ + if (VERBOSE_MED) + HDprintf("readAll by some with zero row\n"); + slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + /* need to make mem_dataspace to match for process 0 */ + if (MAINPROCESS) { + ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); + } + MESG("readAll by some with zero row"); + ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dread dataset1 by ZROW succeeded"); + + /* verify the read data with original expected data */ + ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); + if (ret) + nerrors++; + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + if (data_array1) + free(data_array1); + if (data_origin1) + free(data_origin1); + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); + data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded"); + + block[0] = 1; + block[1] = (hsize_t)dim1; + stride[0] = 1; + stride[1] = (hsize_t)dim1; + count[0] = 1; + count[1] = 1; + start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank); + start[1] = 0; + + dataset_fill(start, block, data_origin1); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_origin1); + } + + /* Dataset5: point selection in memory - Hyperslab selection in file*/ + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset5); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + start[0] = 0; + start[1] = 0; + point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER); + mem_dataspace = H5Dget_space(dataset5); + VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY((ret >= 0), "H5Sselect_elements succeeded"); + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* read data collectively */ + ret = H5Dread(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dread dataset5 succeeded"); + + ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); + if (ret) + nerrors++; + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + if (data_array1) + free(data_array1); + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); + + /* Dataset6: point selection in File - Point selection in Memory*/ + /* create a file dataspace independently */ + start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank); + start[1] = 0; + point_set(start, count, stride, block, num_points, coords, IN_ORDER); + file_dataspace = H5Dget_space(dataset6); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY((ret >= 0), "H5Sselect_elements succeeded"); + + start[0] = 0; + start[1] = 0; + point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER); + mem_dataspace = H5Dget_space(dataset6); + VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY((ret >= 0), "H5Sselect_elements succeeded"); + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* read data collectively */ + ret = H5Dread(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dread dataset6 succeeded"); + + ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); + if (ret) + nerrors++; + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + if (data_array1) + free(data_array1); + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); + + /* Dataset7: point selection in memory - All selection in file*/ + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset7); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_all(file_dataspace); + VRFY((ret >= 0), "H5Sselect_all succeeded"); + + num_points = (size_t)(dim0 * dim1); + k = 0; + for (i = 0; i < dim0; i++) { + for (j = 0; j < dim1; j++) { + coords[k++] = (hsize_t)i; + coords[k++] = (hsize_t)j; + } + } + mem_dataspace = H5Dget_space(dataset7); + VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY((ret >= 0), "H5Sselect_elements succeeded"); + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* read data collectively */ + ret = H5Dread(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dread dataset7 succeeded"); + + start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank); + start[1] = 0; + ret = dataset_vrfy(start, count, stride, block, data_array1 + (dim0 / mpi_size * dim1 * mpi_rank), + data_origin1); + if (ret) + nerrors++; + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + /* + * All reads completed. Close datasets collectively + */ + ret = H5Dclose(dataset1); + VRFY((ret >= 0), "H5Dclose1 succeeded"); + ret = H5Dclose(dataset2); + VRFY((ret >= 0), "H5Dclose2 succeeded"); + ret = H5Dclose(dataset5); + VRFY((ret >= 0), "H5Dclose5 succeeded"); + ret = H5Dclose(dataset6); + VRFY((ret >= 0), "H5Dclose6 succeeded"); + ret = H5Dclose(dataset7); + VRFY((ret >= 0), "H5Dclose7 succeeded"); + + /* close the file collectively */ + H5Fclose(fid); + + /* release data buffers */ + if (coords) + HDfree(coords); + if (data_array1) + HDfree(data_array1); + if (data_origin1) + HDfree(data_origin1); +} + +/* + * Part 2--Independent read/write for extendible datasets. + */ + +/* + * Example of using the parallel HDF5 library to create two extendible + * datasets in one HDF5 file with independent parallel MPIO access support. + * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. + * Each process controls only a slab of size dim0 x dim1 within each + * dataset. + */ + +void +extend_writeInd(void) +{ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t sid; /* Dataspace ID */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2; /* Dataset ID */ + const char *filename; + hsize_t dims[RANK]; /* dataset dim sizes */ + hsize_t max_dims[RANK] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */ + DATATYPE *data_array1 = NULL; /* data buffer */ + hsize_t chunk_dims[RANK]; /* chunk sizes */ + hid_t dataset_pl; /* dataset create prop. list */ + + hsize_t start[RANK]; /* for hyperslab setting */ + hsize_t count[RANK]; /* for hyperslab setting */ + hsize_t stride[RANK]; /* for hyperslab setting */ + hsize_t block[RANK]; /* for hyperslab setting */ + + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; + + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + + filename = PARATESTFILE /* GetTestParameters() */; + if (VERBOSE_MED) + HDprintf("Extend independent write test on file %s\n", filename); + + /* set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, basic dataset, or more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + /* setup chunk-size. Make sure sizes are > 0 */ + chunk_dims[0] = (hsize_t)chunkdim0; + chunk_dims[1] = (hsize_t)chunkdim1; + + /* allocate memory for data buffer */ + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); + + /* ------------------- + * START AN HDF5 FILE + * -------------------*/ + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* Reduce the number of metadata cache slots, so that there are cache + * collisions during the raw data I/O on the chunked dataset. This stresses + * the metadata cache and tests for cache bugs. -QAK + */ + { + int mdc_nelmts; + size_t rdcc_nelmts; + size_t rdcc_nbytes; + double rdcc_w0; + + ret = H5Pget_cache(acc_tpl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0); + VRFY((ret >= 0), "H5Pget_cache succeeded"); + mdc_nelmts = 4; + ret = H5Pset_cache(acc_tpl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0); + VRFY((ret >= 0), "H5Pset_cache succeeded"); + } + + /* create the file collectively */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), ""); + + /* -------------------------------------------------------------- + * Define the dimensions of the overall datasets and create them. + * ------------------------------------------------------------- */ + + /* set up dataset storage chunk sizes and creation property list */ + if (VERBOSE_MED) + HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]); + dataset_pl = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dataset_pl >= 0), "H5Pcreate succeeded"); + ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims); + VRFY((ret >= 0), "H5Pset_chunk succeeded"); + + /* setup dimensionality object */ + /* start out with no rows, extend it later. */ + dims[0] = dims[1] = 0; + sid = H5Screate_simple(RANK, dims, max_dims); + VRFY((sid >= 0), "H5Screate_simple succeeded"); + + /* create an extendible dataset collectively */ + dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); + VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); + + /* create another extendible dataset collectively */ + dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); + VRFY((dataset2 >= 0), "H5Dcreate2 succeeded"); + + /* release resource */ + H5Sclose(sid); + H5Pclose(dataset_pl); + + /* ------------------------- + * Test writing to dataset1 + * -------------------------*/ + /* set up dimensions of the slab this process accesses */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + /* put some trivial data in the data_array */ + dataset_fill(start, block, data_array1); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* Extend its current dim sizes before writing */ + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + ret = H5Dset_extent(dataset1, dims); + VRFY((ret >= 0), "H5Dset_extent succeeded"); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset1); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* write data independently */ + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* release resource */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + + /* ------------------------- + * Test writing to dataset2 + * -------------------------*/ + /* set up dimensions of the slab this process accesses */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); + + /* put some trivial data in the data_array */ + dataset_fill(start, block, data_array1); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* Try write to dataset2 beyond its current dim sizes. Should fail. */ + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset2); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* write data independently. Should fail. */ + H5E_BEGIN_TRY + { + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); + } + H5E_END_TRY + VRFY((ret < 0), "H5Dwrite failed as expected"); + + H5Sclose(file_dataspace); + + /* Extend dataset2 and try again. Should succeed. */ + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + ret = H5Dset_extent(dataset2, dims); + VRFY((ret >= 0), "H5Dset_extent succeeded"); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset2); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* write data independently */ + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* release resource */ + ret = H5Sclose(file_dataspace); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret = H5Sclose(mem_dataspace); + VRFY((ret >= 0), "H5Sclose succeeded"); + + /* close dataset collectively */ + ret = H5Dclose(dataset1); + VRFY((ret >= 0), "H5Dclose1 succeeded"); + ret = H5Dclose(dataset2); + VRFY((ret >= 0), "H5Dclose2 succeeded"); + + /* close the file collectively */ + H5Fclose(fid); + + /* release data buffers */ + if (data_array1) + HDfree(data_array1); +} + +/* + * Example of using the parallel HDF5 library to create an extendable dataset + * and perform I/O on it in a way that verifies that the chunk cache is + * bypassed for parallel I/O. + */ + +void +extend_writeInd2(void) +{ + const char *filename; + hid_t fid; /* HDF5 file ID */ + hid_t fapl; /* File access templates */ + hid_t fs; /* File dataspace ID */ + hid_t ms; /* Memory dataspace ID */ + hid_t dataset; /* Dataset ID */ + hsize_t orig_size = 10; /* Original dataset dim size */ + hsize_t new_size = 20; /* Extended dataset dim size */ + hsize_t one = 1; + hsize_t max_size = H5S_UNLIMITED; /* dataset maximum dim size */ + hsize_t chunk_size = 16384; /* chunk size */ + hid_t dcpl; /* dataset create prop. list */ + int written[10], /* Data to write */ + retrieved[10]; /* Data read in */ + int mpi_size, mpi_rank; /* MPI settings */ + int i; /* Local index variable */ + herr_t ret; /* Generic return value */ + + filename = PARATESTFILE /* GetTestParameters() */; + if (VERBOSE_MED) + HDprintf("Extend independent write test #2 on file %s\n", filename); + + /* set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, basic dataset, or more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + /* ------------------- + * START AN HDF5 FILE + * -------------------*/ + /* setup file access template */ + fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + VRFY((fapl >= 0), "create_faccess_plist succeeded"); + + /* create the file collectively */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + /* Release file-access template */ + ret = H5Pclose(fapl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /* -------------------------------------------------------------- + * Define the dimensions of the overall datasets and create them. + * ------------------------------------------------------------- */ + + /* set up dataset storage chunk sizes and creation property list */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl >= 0), "H5Pcreate succeeded"); + ret = H5Pset_chunk(dcpl, 1, &chunk_size); + VRFY((ret >= 0), "H5Pset_chunk succeeded"); + + /* setup dimensionality object */ + fs = H5Screate_simple(1, &orig_size, &max_size); + VRFY((fs >= 0), "H5Screate_simple succeeded"); + + /* create an extendible dataset collectively */ + dataset = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, fs, H5P_DEFAULT, dcpl, H5P_DEFAULT); + VRFY((dataset >= 0), "H5Dcreat2e succeeded"); + + /* release resource */ + ret = H5Pclose(dcpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /* ------------------------- + * Test writing to dataset + * -------------------------*/ + /* create a memory dataspace independently */ + ms = H5Screate_simple(1, &orig_size, &max_size); + VRFY((ms >= 0), "H5Screate_simple succeeded"); + + /* put some trivial data in the data_array */ + for (i = 0; i < (int)orig_size; i++) + written[i] = i; + MESG("data array initialized"); + if (VERBOSE_MED) { + MESG("writing at offset zero: "); + for (i = 0; i < (int)orig_size; i++) + HDprintf("%s%d", i ? ", " : "", written[i]); + HDprintf("\n"); + } + ret = H5Dwrite(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, written); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* ------------------------- + * Read initial data from dataset. + * -------------------------*/ + ret = H5Dread(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, retrieved); + VRFY((ret >= 0), "H5Dread succeeded"); + for (i = 0; i < (int)orig_size; i++) + if (written[i] != retrieved[i]) { + HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n", __LINE__, i, + written[i], i, retrieved[i]); + nerrors++; + } + if (VERBOSE_MED) { + MESG("read at offset zero: "); + for (i = 0; i < (int)orig_size; i++) + HDprintf("%s%d", i ? ", " : "", retrieved[i]); + HDprintf("\n"); + } + + /* ------------------------- + * Extend the dataset & retrieve new dataspace + * -------------------------*/ + ret = H5Dset_extent(dataset, &new_size); + VRFY((ret >= 0), "H5Dset_extent succeeded"); + ret = H5Sclose(fs); + VRFY((ret >= 0), "H5Sclose succeeded"); + fs = H5Dget_space(dataset); + VRFY((fs >= 0), "H5Dget_space succeeded"); + + /* ------------------------- + * Write to the second half of the dataset + * -------------------------*/ + for (i = 0; i < (int)orig_size; i++) + written[i] = (int)orig_size + i; + MESG("data array re-initialized"); + if (VERBOSE_MED) { + MESG("writing at offset 10: "); + for (i = 0; i < (int)orig_size; i++) + HDprintf("%s%d", i ? ", " : "", written[i]); + HDprintf("\n"); + } + ret = H5Sselect_hyperslab(fs, H5S_SELECT_SET, &orig_size, NULL, &one, &orig_size); + VRFY((ret >= 0), "H5Sselect_hyperslab succeeded"); + ret = H5Dwrite(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, written); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* ------------------------- + * Read the new data + * -------------------------*/ + ret = H5Dread(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, retrieved); + VRFY((ret >= 0), "H5Dread succeeded"); + for (i = 0; i < (int)orig_size; i++) + if (written[i] != retrieved[i]) { + HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n", __LINE__, i, + written[i], i, retrieved[i]); + nerrors++; + } + if (VERBOSE_MED) { + MESG("read at offset 10: "); + for (i = 0; i < (int)orig_size; i++) + HDprintf("%s%d", i ? ", " : "", retrieved[i]); + HDprintf("\n"); + } + + /* Close dataset collectively */ + ret = H5Dclose(dataset); + VRFY((ret >= 0), "H5Dclose succeeded"); + + /* Close the file collectively */ + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); +} + +/* Example of using the parallel HDF5 library to read an extendible dataset */ +void +extend_readInd(void) +{ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2; /* Dataset ID */ + hsize_t dims[RANK]; /* dataset dim sizes */ + DATATYPE *data_array1 = NULL; /* data buffer */ + DATATYPE *data_array2 = NULL; /* data buffer */ + DATATYPE *data_origin1 = NULL; /* expected data buffer */ + const char *filename; + + hsize_t start[RANK]; /* for hyperslab setting */ + hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ + hsize_t block[RANK]; /* for hyperslab setting */ + + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; + + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + + filename = PARATESTFILE /* GetTestParameters() */; + if (VERBOSE_MED) + HDprintf("Extend independent read test on file %s\n", filename); + + /* set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, basic dataset, or more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + /* allocate memory for data buffer */ + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); + data_array2 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array2 != NULL), "data_array2 HDmalloc succeeded"); + data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded"); + + /* ------------------- + * OPEN AN HDF5 FILE + * -------------------*/ + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* open the file collectively */ + fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl); + VRFY((fid >= 0), ""); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), ""); + + /* open the dataset1 collectively */ + dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); + VRFY((dataset1 >= 0), ""); + + /* open another dataset collectively */ + dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); + VRFY((dataset2 >= 0), ""); + + /* Try extend dataset1 which is open RDONLY. Should fail. */ + + file_dataspace = H5Dget_space(dataset1); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL); + VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded"); + dims[0]++; + H5E_BEGIN_TRY + { + ret = H5Dset_extent(dataset1, dims); + } + H5E_END_TRY + VRFY((ret < 0), "H5Dset_extent failed as expected"); + + H5Sclose(file_dataspace); + + /* Read dataset1 using BYROW pattern */ + /* set up dimensions of the slab this process accesses */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset1); + VRFY((file_dataspace >= 0), ""); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), ""); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* fill dataset with test data */ + dataset_fill(start, block, data_origin1); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } + + /* read data independently */ + ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); + VRFY((ret >= 0), "H5Dread succeeded"); + + /* verify the read data with original expected data */ + ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); + VRFY((ret == 0), "dataset1 read verified correct"); + if (ret) + nerrors++; + + H5Sclose(mem_dataspace); + H5Sclose(file_dataspace); + + /* Read dataset2 using BYCOL pattern */ + /* set up dimensions of the slab this process accesses */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset2); + VRFY((file_dataspace >= 0), ""); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), ""); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* fill dataset with test data */ + dataset_fill(start, block, data_origin1); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } + + /* read data independently */ + ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); + VRFY((ret >= 0), "H5Dread succeeded"); + + /* verify the read data with original expected data */ + ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); + VRFY((ret == 0), "dataset2 read verified correct"); + if (ret) + nerrors++; + + H5Sclose(mem_dataspace); + H5Sclose(file_dataspace); + + /* close dataset collectively */ + ret = H5Dclose(dataset1); + VRFY((ret >= 0), ""); + ret = H5Dclose(dataset2); + VRFY((ret >= 0), ""); + + /* close the file collectively */ + H5Fclose(fid); + + /* release data buffers */ + if (data_array1) + HDfree(data_array1); + if (data_array2) + HDfree(data_array2); + if (data_origin1) + HDfree(data_origin1); +} + +/* + * Part 3--Collective read/write for extendible datasets. + */ + +/* + * Example of using the parallel HDF5 library to create two extendible + * datasets in one HDF5 file with collective parallel MPIO access support. + * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. + * Each process controls only a slab of size dim0 x dim1 within each + * dataset. + */ + +void +extend_writeAll(void) +{ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t xfer_plist; /* Dataset transfer properties list */ + hid_t sid; /* Dataspace ID */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2; /* Dataset ID */ + const char *filename; + hsize_t dims[RANK]; /* dataset dim sizes */ + hsize_t max_dims[RANK] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */ + DATATYPE *data_array1 = NULL; /* data buffer */ + hsize_t chunk_dims[RANK]; /* chunk sizes */ + hid_t dataset_pl; /* dataset create prop. list */ + + hsize_t start[RANK]; /* for hyperslab setting */ + hsize_t count[RANK]; /* for hyperslab setting */ + hsize_t stride[RANK]; /* for hyperslab setting */ + hsize_t block[RANK]; /* for hyperslab setting */ + + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; + + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + + filename = PARATESTFILE /* GetTestParameters() */; + if (VERBOSE_MED) + HDprintf("Extend independent write test on file %s\n", filename); + + /* set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, basic dataset, or more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + /* setup chunk-size. Make sure sizes are > 0 */ + chunk_dims[0] = (hsize_t)chunkdim0; + chunk_dims[1] = (hsize_t)chunkdim1; + + /* allocate memory for data buffer */ + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); + + /* ------------------- + * START AN HDF5 FILE + * -------------------*/ + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* Reduce the number of metadata cache slots, so that there are cache + * collisions during the raw data I/O on the chunked dataset. This stresses + * the metadata cache and tests for cache bugs. -QAK + */ + { + int mdc_nelmts; + size_t rdcc_nelmts; + size_t rdcc_nbytes; + double rdcc_w0; + + ret = H5Pget_cache(acc_tpl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0); + VRFY((ret >= 0), "H5Pget_cache succeeded"); + mdc_nelmts = 4; + ret = H5Pset_cache(acc_tpl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0); + VRFY((ret >= 0), "H5Pset_cache succeeded"); + } + + /* create the file collectively */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), ""); + + /* -------------------------------------------------------------- + * Define the dimensions of the overall datasets and create them. + * ------------------------------------------------------------- */ + + /* set up dataset storage chunk sizes and creation property list */ + if (VERBOSE_MED) + HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]); + dataset_pl = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dataset_pl >= 0), "H5Pcreate succeeded"); + ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims); + VRFY((ret >= 0), "H5Pset_chunk succeeded"); + + /* setup dimensionality object */ + /* start out with no rows, extend it later. */ + dims[0] = dims[1] = 0; + sid = H5Screate_simple(RANK, dims, max_dims); + VRFY((sid >= 0), "H5Screate_simple succeeded"); + + /* create an extendible dataset collectively */ + dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); + VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); + + /* create another extendible dataset collectively */ + dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); + VRFY((dataset2 >= 0), "H5Dcreate2 succeeded"); + + /* release resource */ + H5Sclose(sid); + H5Pclose(dataset_pl); + + /* ------------------------- + * Test writing to dataset1 + * -------------------------*/ + /* set up dimensions of the slab this process accesses */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + /* put some trivial data in the data_array */ + dataset_fill(start, block, data_array1); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* Extend its current dim sizes before writing */ + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + ret = H5Dset_extent(dataset1, dims); + VRFY((ret >= 0), "H5Dset_extent succeeded"); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset1); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* write data collectively */ + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* release resource */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + /* ------------------------- + * Test writing to dataset2 + * -------------------------*/ + /* set up dimensions of the slab this process accesses */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); + + /* put some trivial data in the data_array */ + dataset_fill(start, block, data_array1); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* Try write to dataset2 beyond its current dim sizes. Should fail. */ + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset2); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* write data independently. Should fail. */ + H5E_BEGIN_TRY + { + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + } + H5E_END_TRY + VRFY((ret < 0), "H5Dwrite failed as expected"); + + H5Sclose(file_dataspace); + + /* Extend dataset2 and try again. Should succeed. */ + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + ret = H5Dset_extent(dataset2, dims); + VRFY((ret >= 0), "H5Dset_extent succeeded"); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset2); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* write data independently */ + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* release resource */ + ret = H5Sclose(file_dataspace); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret = H5Sclose(mem_dataspace); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret = H5Pclose(xfer_plist); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /* close dataset collectively */ + ret = H5Dclose(dataset1); + VRFY((ret >= 0), "H5Dclose1 succeeded"); + ret = H5Dclose(dataset2); + VRFY((ret >= 0), "H5Dclose2 succeeded"); + + /* close the file collectively */ + H5Fclose(fid); + + /* release data buffers */ + if (data_array1) + HDfree(data_array1); +} + +/* Example of using the parallel HDF5 library to read an extendible dataset */ +void +extend_readAll(void) +{ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t xfer_plist; /* Dataset transfer properties list */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2; /* Dataset ID */ + const char *filename; + hsize_t dims[RANK]; /* dataset dim sizes */ + DATATYPE *data_array1 = NULL; /* data buffer */ + DATATYPE *data_array2 = NULL; /* data buffer */ + DATATYPE *data_origin1 = NULL; /* expected data buffer */ + + hsize_t start[RANK]; /* for hyperslab setting */ + hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ + hsize_t block[RANK]; /* for hyperslab setting */ + + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; + + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + + filename = PARATESTFILE /* GetTestParameters() */; + if (VERBOSE_MED) + HDprintf("Extend independent read test on file %s\n", filename); + + /* set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, basic dataset, or more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + /* allocate memory for data buffer */ + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); + data_array2 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array2 != NULL), "data_array2 HDmalloc succeeded"); + data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded"); + + /* ------------------- + * OPEN AN HDF5 FILE + * -------------------*/ + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* open the file collectively */ + fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl); + VRFY((fid >= 0), ""); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), ""); + + /* open the dataset1 collectively */ + dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); + VRFY((dataset1 >= 0), ""); + + /* open another dataset collectively */ + dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); + VRFY((dataset2 >= 0), ""); + + /* Try extend dataset1 which is open RDONLY. Should fail. */ + + file_dataspace = H5Dget_space(dataset1); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL); + VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded"); + dims[0]++; + H5E_BEGIN_TRY + { + ret = H5Dset_extent(dataset1, dims); + } + H5E_END_TRY + VRFY((ret < 0), "H5Dset_extent failed as expected"); + + H5Sclose(file_dataspace); + + /* Read dataset1 using BYROW pattern */ + /* set up dimensions of the slab this process accesses */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset1); + VRFY((file_dataspace >= 0), ""); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), ""); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* fill dataset with test data */ + dataset_fill(start, block, data_origin1); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* read data collectively */ + ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dread succeeded"); + + /* verify the read data with original expected data */ + ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); + VRFY((ret == 0), "dataset1 read verified correct"); + if (ret) + nerrors++; + + H5Sclose(mem_dataspace); + H5Sclose(file_dataspace); + H5Pclose(xfer_plist); + + /* Read dataset2 using BYCOL pattern */ + /* set up dimensions of the slab this process accesses */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset2); + VRFY((file_dataspace >= 0), ""); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), ""); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* fill dataset with test data */ + dataset_fill(start, block, data_origin1); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* read data collectively */ + ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dread succeeded"); + + /* verify the read data with original expected data */ + ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); + VRFY((ret == 0), "dataset2 read verified correct"); + if (ret) + nerrors++; + + H5Sclose(mem_dataspace); + H5Sclose(file_dataspace); + H5Pclose(xfer_plist); + + /* close dataset collectively */ + ret = H5Dclose(dataset1); + VRFY((ret >= 0), ""); + ret = H5Dclose(dataset2); + VRFY((ret >= 0), ""); + + /* close the file collectively */ + H5Fclose(fid); + + /* release data buffers */ + if (data_array1) + HDfree(data_array1); + if (data_array2) + HDfree(data_array2); + if (data_origin1) + HDfree(data_origin1); +} + +#ifdef H5_HAVE_FILTER_DEFLATE +static const char * +h5_rmprefix(const char *filename) +{ + const char *ret_ptr; + + if ((ret_ptr = HDstrstr(filename, ":")) == NULL) + ret_ptr = filename; + else + ret_ptr++; + + return (ret_ptr); +} + +/* + * Example of using the parallel HDF5 library to read a compressed + * dataset in an HDF5 file with collective parallel access support. + */ +void +compress_readAll(void) +{ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t dcpl; /* Dataset creation property list */ + hid_t xfer_plist; /* Dataset transfer properties list */ + hid_t dataspace; /* Dataspace ID */ + hid_t dataset; /* Dataset ID */ + int rank = 1; /* Dataspace rank */ + hsize_t dim = (hsize_t)dim0; /* Dataspace dimensions */ + unsigned u; /* Local index variable */ + unsigned chunk_opts; /* Chunk options */ + unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */ + DATATYPE *data_read = NULL; /* data buffer */ + DATATYPE *data_orig = NULL; /* expected data buffer */ + const char *filename; + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + int mpi_size, mpi_rank; + herr_t ret; /* Generic return value */ + + filename = PARATESTFILE /* GetTestParameters() */; + if (VERBOSE_MED) + HDprintf("Collective chunked dataset read test on file %s\n", filename); + + /* Retrieve MPI parameters */ + MPI_Comm_size(comm, &mpi_size); + MPI_Comm_rank(comm, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + + /* Allocate data buffer */ + data_orig = (DATATYPE *)HDmalloc((size_t)dim * sizeof(DATATYPE)); + VRFY((data_orig != NULL), "data_origin1 HDmalloc succeeded"); + data_read = (DATATYPE *)HDmalloc((size_t)dim * sizeof(DATATYPE)); + VRFY((data_read != NULL), "data_array1 HDmalloc succeeded"); + + /* Initialize data buffers */ + for (u = 0; u < dim; u++) + data_orig[u] = (DATATYPE)u; + + /* Run test both with and without filters disabled on partial chunks */ + for (disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1; + disable_partial_chunk_filters++) { + /* Process zero creates the file with a compressed, chunked dataset */ + if (mpi_rank == 0) { + hsize_t chunk_dim; /* Chunk dimensions */ + + /* Create the file */ + fid = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + VRFY((fid > 0), "H5Fcreate succeeded"); + + /* Create property list for chunking and compression */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl > 0), "H5Pcreate succeeded"); + + ret = H5Pset_layout(dcpl, H5D_CHUNKED); + VRFY((ret >= 0), "H5Pset_layout succeeded"); + + /* Use eight chunks */ + chunk_dim = dim / 8; + ret = H5Pset_chunk(dcpl, rank, &chunk_dim); + VRFY((ret >= 0), "H5Pset_chunk succeeded"); + + /* Set chunk options appropriately */ + if (disable_partial_chunk_filters) { + ret = H5Pget_chunk_opts(dcpl, &chunk_opts); + VRFY((ret >= 0), "H5Pget_chunk_opts succeeded"); + + chunk_opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS; + + ret = H5Pset_chunk_opts(dcpl, chunk_opts); + VRFY((ret >= 0), "H5Pset_chunk_opts succeeded"); + } /* end if */ + + ret = H5Pset_deflate(dcpl, 9); + VRFY((ret >= 0), "H5Pset_deflate succeeded"); + + /* Create dataspace */ + dataspace = H5Screate_simple(rank, &dim, NULL); + VRFY((dataspace > 0), "H5Screate_simple succeeded"); + + /* Create dataset */ + dataset = + H5Dcreate2(fid, "compressed_data", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); + VRFY((dataset > 0), "H5Dcreate2 succeeded"); + + /* Write compressed data */ + ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_orig); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* Close objects */ + ret = H5Pclose(dcpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + ret = H5Sclose(dataspace); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret = H5Dclose(dataset); + VRFY((ret >= 0), "H5Dclose succeeded"); + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + } + + /* Wait for file to be created */ + MPI_Barrier(comm); + + /* ------------------- + * OPEN AN HDF5 FILE + * -------------------*/ + + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* open the file collectively */ + fid = H5Fopen(filename, H5F_ACC_RDWR, acc_tpl); + VRFY((fid > 0), "H5Fopen succeeded"); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /* Open dataset with compressed chunks */ + dataset = H5Dopen2(fid, "compressed_data", H5P_DEFAULT); + VRFY((dataset > 0), "H5Dopen2 succeeded"); + + /* Try reading & writing data */ + if (dataset > 0) { + /* Create dataset transfer property list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist > 0), "H5Pcreate succeeded"); + + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* Try reading the data */ + ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read); + VRFY((ret >= 0), "H5Dread succeeded"); + + /* Verify data read */ + for (u = 0; u < dim; u++) + if (data_orig[u] != data_read[u]) { + HDprintf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n", __LINE__, + (unsigned)u, data_orig[u], (unsigned)u, data_read[u]); + nerrors++; + } + +#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES + ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read); + VRFY((ret >= 0), "H5Dwrite succeeded"); +#endif + + ret = H5Pclose(xfer_plist); + VRFY((ret >= 0), "H5Pclose succeeded"); + ret = H5Dclose(dataset); + VRFY((ret >= 0), "H5Dclose succeeded"); + } /* end if */ + + /* Close file */ + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + } /* end for */ + + /* release data buffers */ + if (data_read) + HDfree(data_read); + if (data_orig) + HDfree(data_orig); +} +#endif /* H5_HAVE_FILTER_DEFLATE */ + +/* + * Part 4--Non-selection for chunked dataset + */ + +/* + * Example of using the parallel HDF5 library to create chunked + * dataset in one HDF5 file with collective and independent parallel + * MPIO access support. The Datasets are of sizes dim0 x dim1. + * Each process controls only a slab of size dim0 x dim1 within the + * dataset with the exception that one processor selects no element. + */ + +void +none_selection_chunk(void) +{ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t xfer_plist; /* Dataset transfer properties list */ + hid_t sid; /* Dataspace ID */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2; /* Dataset ID */ + const char *filename; + hsize_t dims[RANK]; /* dataset dim sizes */ + DATATYPE *data_origin = NULL; /* data buffer */ + DATATYPE *data_array = NULL; /* data buffer */ + hsize_t chunk_dims[RANK]; /* chunk sizes */ + hid_t dataset_pl; /* dataset create prop. list */ + + hsize_t start[RANK]; /* for hyperslab setting */ + hsize_t count[RANK]; /* for hyperslab setting */ + hsize_t stride[RANK]; /* for hyperslab setting */ + hsize_t block[RANK]; /* for hyperslab setting */ + hsize_t mstart[RANK]; /* for data buffer in memory */ + + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; + + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + + filename = PARATESTFILE /* GetTestParameters() */; + if (VERBOSE_MED) + HDprintf("Extend independent write test on file %s\n", filename); + + /* set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + + /* setup chunk-size. Make sure sizes are > 0 */ + chunk_dims[0] = (hsize_t)chunkdim0; + chunk_dims[1] = (hsize_t)chunkdim1; + + /* ------------------- + * START AN HDF5 FILE + * -------------------*/ + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* create the file collectively */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), ""); + + /* -------------------------------------------------------------- + * Define the dimensions of the overall datasets and create them. + * ------------------------------------------------------------- */ + + /* set up dataset storage chunk sizes and creation property list */ + if (VERBOSE_MED) + HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]); + dataset_pl = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dataset_pl >= 0), "H5Pcreate succeeded"); + ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims); + VRFY((ret >= 0), "H5Pset_chunk succeeded"); + + /* setup dimensionality object */ + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + sid = H5Screate_simple(RANK, dims, NULL); + VRFY((sid >= 0), "H5Screate_simple succeeded"); + + /* create an extendible dataset collectively */ + dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); + VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); + + /* create another extendible dataset collectively */ + dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); + VRFY((dataset2 >= 0), "H5Dcreate2 succeeded"); + + /* release resource */ + H5Sclose(sid); + H5Pclose(dataset_pl); + + /* ------------------------- + * Test collective writing to dataset1 + * -------------------------*/ + /* set up dimensions of the slab this process accesses */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + /* allocate memory for data buffer. Only allocate enough buffer for + * each processor's data. */ + if (mpi_rank) { + data_origin = (DATATYPE *)HDmalloc(block[0] * block[1] * sizeof(DATATYPE)); + VRFY((data_origin != NULL), "data_origin HDmalloc succeeded"); + + data_array = (DATATYPE *)HDmalloc(block[0] * block[1] * sizeof(DATATYPE)); + VRFY((data_array != NULL), "data_array HDmalloc succeeded"); + + /* put some trivial data in the data_array */ + mstart[0] = mstart[1] = 0; + dataset_fill(mstart, block, data_origin); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(mstart, block, data_origin); + } + } + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* Process 0 has no selection */ + if (!mpi_rank) { + ret = H5Sselect_none(mem_dataspace); + VRFY((ret >= 0), "H5Sselect_none succeeded"); + } + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset1); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* Process 0 has no selection */ + if (!mpi_rank) { + ret = H5Sselect_none(file_dataspace); + VRFY((ret >= 0), "H5Sselect_none succeeded"); + } + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + + /* write data collectively */ + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_origin); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* read data independently */ + ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array); + VRFY((ret >= 0), ""); + + /* verify the read data with original expected data */ + if (mpi_rank) { + ret = dataset_vrfy(mstart, count, stride, block, data_array, data_origin); + if (ret) + nerrors++; + } + + /* ------------------------- + * Test independent writing to dataset2 + * -------------------------*/ + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_INDEPENDENT); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + + /* write data collectively */ + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_origin); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* read data independently */ + ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array); + VRFY((ret >= 0), ""); + + /* verify the read data with original expected data */ + if (mpi_rank) { + ret = dataset_vrfy(mstart, count, stride, block, data_array, data_origin); + if (ret) + nerrors++; + } + + /* release resource */ + ret = H5Sclose(file_dataspace); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret = H5Sclose(mem_dataspace); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret = H5Pclose(xfer_plist); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /* close dataset collectively */ + ret = H5Dclose(dataset1); + VRFY((ret >= 0), "H5Dclose1 succeeded"); + ret = H5Dclose(dataset2); + VRFY((ret >= 0), "H5Dclose2 succeeded"); + + /* close the file collectively */ + H5Fclose(fid); + + /* release data buffers */ + if (data_origin) + HDfree(data_origin); + if (data_array) + HDfree(data_array); +} + +/* Function: test_actual_io_mode + * + * Purpose: tests one specific case of collective I/O and checks that the + * actual_chunk_opt_mode property and the actual_io_mode + * properties in the DXPL have the correct values. + * + * Input: selection_mode: changes the way processes select data from the space, as well + * as some dxpl flags to get collective I/O to break in different ways. + * + * The relevant I/O function and expected response for each mode: + * TEST_ACTUAL_IO_MULTI_CHUNK_IND: + * H5D_mpi_chunk_collective_io, each process reports independent I/O + * + * TEST_ACTUAL_IO_MULTI_CHUNK_COL: + * H5D_mpi_chunk_collective_io, each process reports collective I/O + * + * TEST_ACTUAL_IO_MULTI_CHUNK_MIX: + * H5D_mpi_chunk_collective_io, each process reports mixed I/O + * + * TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE: + * H5D_mpi_chunk_collective_io, processes disagree. The root reports + * collective, the rest report independent I/O + * + * TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND: + * Same test TEST_ACTUAL_IO_MULTI_CHUNK_IND. + * Set directly go to multi-chunk-io without num threshold calc. + * TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL: + * Same test TEST_ACTUAL_IO_MULTI_CHUNK_COL. + * Set directly go to multi-chunk-io without num threshold calc. + * + * TEST_ACTUAL_IO_LINK_CHUNK: + * H5D_link_chunk_collective_io, processes report linked chunk I/O + * + * TEST_ACTUAL_IO_CONTIGUOUS: + * H5D__contig_collective_write or H5D__contig_collective_read + * each process reports contiguous collective I/O + * + * TEST_ACTUAL_IO_NO_COLLECTIVE: + * Simple independent I/O. This tests that the defaults are properly set. + * + * TEST_ACTUAL_IO_RESET: + * Performs collective and then independent I/O with hthe same dxpl to + * make sure the peroperty is correctly reset to the default on each use. + * Specifically, this test runs TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX_DISAGREE + * (The most complex case that works on all builds) and then performs + * an independent read and write with the same dxpls. + * + * Note: DIRECT_MULTI_CHUNK_MIX and DIRECT_MULTI_CHUNK_MIX_DISAGREE + * is not needed as they are covered by DIRECT_CHUNK_MIX and + * MULTI_CHUNK_MIX_DISAGREE cases. _DIRECT_ cases are only for testing + * path way to multi-chunk-io by H5FD_MPIO_CHUNK_MULTI_IO instead of num-threshold. + * + * Modification: + * - Refctore to remove multi-chunk-without-opimization test and update for + * testing direct to multi-chunk-io + * Programmer: Jonathan Kim + * Date: 2012-10-10 + * + * + * Programmer: Jacob Gruber + * Date: 2011-04-06 + */ +static void +test_actual_io_mode(int selection_mode) +{ + H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_write = H5D_MPIO_NO_CHUNK_OPTIMIZATION; + H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_read = H5D_MPIO_NO_CHUNK_OPTIMIZATION; + H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION; + H5D_mpio_actual_io_mode_t actual_io_mode_write = H5D_MPIO_NO_COLLECTIVE; + H5D_mpio_actual_io_mode_t actual_io_mode_read = H5D_MPIO_NO_COLLECTIVE; + H5D_mpio_actual_io_mode_t actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE; + const char *filename; + const char *test_name; + hbool_t direct_multi_chunk_io; + hbool_t multi_chunk_io; + hbool_t is_chunked; + hbool_t is_collective; + int mpi_size = -1; + int mpi_rank = -1; + int length; + int *buffer; + int i; + MPI_Comm mpi_comm = MPI_COMM_NULL; + MPI_Info mpi_info = MPI_INFO_NULL; + hid_t fid = -1; + hid_t sid = -1; + hid_t dataset = -1; + hid_t data_type = H5T_NATIVE_INT; + hid_t fapl = -1; + hid_t mem_space = -1; + hid_t file_space = -1; + hid_t dcpl = -1; + hid_t dxpl_write = -1; + hid_t dxpl_read = -1; + hsize_t dims[RANK]; + hsize_t chunk_dims[RANK]; + hsize_t start[RANK]; + hsize_t stride[RANK]; + hsize_t count[RANK]; + hsize_t block[RANK]; + char message[256]; + herr_t ret; + + /* Set up some flags to make some future if statements slightly more readable */ + direct_multi_chunk_io = (selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND || + selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL); + + /* Note: RESET performs the same tests as MULTI_CHUNK_MIX_DISAGREE and then + * tests independent I/O + */ + multi_chunk_io = + (selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_IND || + selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_COL || + selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX || + selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE || selection_mode == TEST_ACTUAL_IO_RESET); + + is_chunked = + (selection_mode != TEST_ACTUAL_IO_CONTIGUOUS && selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE); + + is_collective = selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE; + + /* Set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + + MPI_Barrier(MPI_COMM_WORLD); + + HDassert(mpi_size >= 1); + + mpi_comm = MPI_COMM_WORLD; + mpi_info = MPI_INFO_NULL; + + filename = (const char *)PARATESTFILE /* GetTestParameters() */; + HDassert(filename != NULL); + + /* Setup the file access template */ + fapl = create_faccess_plist(mpi_comm, mpi_info, facc_type); + VRFY((fapl >= 0), "create_faccess_plist() succeeded"); + + /* Create the file */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + /* Create the basic Space */ + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + sid = H5Screate_simple(RANK, dims, NULL); + VRFY((sid >= 0), "H5Screate_simple succeeded"); + + /* Create the dataset creation plist */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl >= 0), "dataset creation plist created successfully"); + + /* If we are not testing contiguous datasets */ + if (is_chunked) { + /* Set up chunk information. */ + chunk_dims[0] = dims[0] / (hsize_t)mpi_size; + chunk_dims[1] = dims[1]; + ret = H5Pset_chunk(dcpl, 2, chunk_dims); + VRFY((ret >= 0), "chunk creation property list succeeded"); + } + + /* Create the dataset */ + dataset = H5Dcreate2(fid, "actual_io", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded"); + + /* Create the file dataspace */ + file_space = H5Dget_space(dataset); + VRFY((file_space >= 0), "H5Dget_space succeeded"); + + /* Choose a selection method based on the type of I/O we want to occur, + * and also set up some selection-dependeent test info. */ + switch (selection_mode) { + + /* Independent I/O with optimization */ + case TEST_ACTUAL_IO_MULTI_CHUNK_IND: + case TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND: + /* Since the dataset is chunked by row and each process selects a row, + * each process writes to a different chunk. This forces all I/O to be + * independent. + */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + test_name = "Multi Chunk - Independent"; + actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; + actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT; + break; + + /* Collective I/O with optimization */ + case TEST_ACTUAL_IO_MULTI_CHUNK_COL: + case TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL: + /* The dataset is chunked by rows, so each process takes a column which + * spans all chunks. Since the processes write non-overlapping regular + * selections to each chunk, the operation is purely collective. + */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); + + test_name = "Multi Chunk - Collective"; + actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; + if (mpi_size > 1) + actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; + else + actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT; + break; + + /* Mixed I/O with optimization */ + case TEST_ACTUAL_IO_MULTI_CHUNK_MIX: + /* A chunk will be assigned collective I/O only if it is selected by each + * process. To get mixed I/O, have the root select all chunks and each + * subsequent process select the first and nth chunk. The first chunk, + * accessed by all, will be assigned collective I/O while each other chunk + * will be accessed only by the root and the nth process and will be + * assigned independent I/O. Each process will access one chunk collectively + * and at least one chunk independently, reporting mixed I/O. + */ + + if (mpi_rank == 0) { + /* Select the first column */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); + } + else { + /* Select the first and the nth chunk in the nth column */ + block[0] = (hsize_t)(dim0 / mpi_size); + block[1] = (hsize_t)(dim1 / mpi_size); + count[0] = 2; + count[1] = 1; + stride[0] = (hsize_t)mpi_rank * block[0]; + stride[1] = 1; + start[0] = 0; + start[1] = (hsize_t)mpi_rank * block[1]; + } + + test_name = "Multi Chunk - Mixed"; + actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; + actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED; + break; + + /* RESET tests that the properties are properly reset to defaults each time I/O is + * performed. To achieve this, we have RESET perform collective I/O (which would change + * the values from the defaults) followed by independent I/O (which should report the + * default values). RESET doesn't need to have a unique selection, so we reuse + * MULTI_CHUMK_MIX_DISAGREE, which was chosen because it is a complex case that works + * on all builds. The independent section of RESET can be found at the end of this function. + */ + case TEST_ACTUAL_IO_RESET: + + /* Mixed I/O with optimization and internal disagreement */ + case TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE: + /* A chunk will be assigned collective I/O only if it is selected by each + * process. To get mixed I/O with disagreement, assign process n to the + * first chunk and the nth chunk. The first chunk, selected by all, is + * assgigned collective I/O, while each other process gets independent I/O. + * Since the root process with only access the first chunk, it will report + * collective I/O. The subsequent processes will access the first chunk + * collectively, and their other chunk independently, reporting mixed I/O. + */ + + if (mpi_rank == 0) { + /* Select the first chunk in the first column */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); + block[0] = block[0] / (hsize_t)mpi_size; + } + else { + /* Select the first and the nth chunk in the nth column */ + block[0] = (hsize_t)(dim0 / mpi_size); + block[1] = (hsize_t)(dim1 / mpi_size); + count[0] = 2; + count[1] = 1; + stride[0] = (hsize_t)mpi_rank * block[0]; + stride[1] = 1; + start[0] = 0; + start[1] = (hsize_t)mpi_rank * block[1]; + } + + /* If the testname was not already set by the RESET case */ + if (selection_mode == TEST_ACTUAL_IO_RESET) + test_name = "RESET"; + else + test_name = "Multi Chunk - Mixed (Disagreement)"; + + actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; + if (mpi_size > 1) { + if (mpi_rank == 0) + actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; + else + actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED; + } + else + actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT; + + break; + + /* Linked Chunk I/O */ + case TEST_ACTUAL_IO_LINK_CHUNK: + /* Nothing special; link chunk I/O is forced in the dxpl settings. */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + test_name = "Link Chunk"; + actual_chunk_opt_mode_expected = H5D_MPIO_LINK_CHUNK; + actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; + break; + + /* Contiguous Dataset */ + case TEST_ACTUAL_IO_CONTIGUOUS: + /* A non overlapping, regular selection in a contiguous dataset leads to + * collective I/O */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + test_name = "Contiguous"; + actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION; + actual_io_mode_expected = H5D_MPIO_CONTIGUOUS_COLLECTIVE; + break; + + case TEST_ACTUAL_IO_NO_COLLECTIVE: + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + test_name = "Independent"; + actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION; + actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE; + break; + + default: + test_name = "Undefined Selection Mode"; + actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION; + actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE; + break; + } + + ret = H5Sselect_hyperslab(file_space, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* Create a memory dataspace mirroring the dataset and select the same hyperslab + * as in the file space. + */ + mem_space = H5Screate_simple(RANK, dims, NULL); + VRFY((mem_space >= 0), "mem_space created"); + + ret = H5Sselect_hyperslab(mem_space, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* Get the number of elements in the selection */ + length = dim0 * dim1; + + /* Allocate and initialize the buffer */ + buffer = (int *)HDmalloc(sizeof(int) * (size_t)length); + VRFY((buffer != NULL), "HDmalloc of buffer succeeded"); + for (i = 0; i < length; i++) + buffer[i] = i; + + /* Set up the dxpl for the write */ + dxpl_write = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); + + /* Set collective I/O properties in the dxpl. */ + if (is_collective) { + /* Request collective I/O */ + ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + + /* Set the threshold number of processes per chunk to twice mpi_size. + * This will prevent the threshold from ever being met, thus forcing + * multi chunk io instead of link chunk io. + * This is via default. + */ + if (multi_chunk_io) { + /* force multi-chunk-io by threshold */ + ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl_write, (unsigned)mpi_size * 2); + VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_num succeeded"); + + /* set this to manipulate testing scenario about allocating processes + * to chunks */ + ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl_write, (unsigned)99); + VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_ratio succeeded"); + } + + /* Set directly go to multi-chunk-io without threshold calc. */ + if (direct_multi_chunk_io) { + /* set for multi chunk io by property*/ + ret = H5Pset_dxpl_mpio_chunk_opt(dxpl_write, H5FD_MPIO_CHUNK_MULTI_IO); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + } + } + + /* Make a copy of the dxpl to test the read operation */ + dxpl_read = H5Pcopy(dxpl_write); + VRFY((dxpl_read >= 0), "H5Pcopy succeeded"); + + /* Write */ + ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer); + if (ret < 0) + H5Eprint2(H5E_DEFAULT, stdout); + VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); + + /* Retrieve Actual io values */ + ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write); + VRFY((ret >= 0), "retrieving actual io mode succeeded"); + + ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write); + VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded"); + + /* Read */ + ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer); + if (ret < 0) + H5Eprint2(H5E_DEFAULT, stdout); + VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded"); + + /* Retrieve Actual io values */ + ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read); + VRFY((ret >= 0), "retrieving actual io mode succeeded"); + + ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read); + VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded"); + + /* Check write vs read */ + VRFY((actual_io_mode_read == actual_io_mode_write), + "reading and writing are the same for actual_io_mode"); + VRFY((actual_chunk_opt_mode_read == actual_chunk_opt_mode_write), + "reading and writing are the same for actual_chunk_opt_mode"); + + /* Test values */ + if (actual_chunk_opt_mode_expected != (H5D_mpio_actual_chunk_opt_mode_t)-1 && + actual_io_mode_expected != (H5D_mpio_actual_io_mode_t)-1) { + HDsnprintf(message, sizeof(message), "Actual Chunk Opt Mode has the correct value for %s.\n", + test_name); + VRFY((actual_chunk_opt_mode_write == actual_chunk_opt_mode_expected), message); + HDsnprintf(message, sizeof(message), "Actual IO Mode has the correct value for %s.\n", test_name); + VRFY((actual_io_mode_write == actual_io_mode_expected), message); + } + else { + HDfprintf(stderr, "%s %d -> (%d,%d)\n", test_name, mpi_rank, actual_chunk_opt_mode_write, + actual_io_mode_write); + } + + /* To test that the property is successfully reset to the default, we perform some + * independent I/O after the collective I/O + */ + if (selection_mode == TEST_ACTUAL_IO_RESET) { + if (mpi_rank == 0) { + /* Switch to independent io */ + ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + ret = H5Pset_dxpl_mpio(dxpl_read, H5FD_MPIO_INDEPENDENT); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + + /* Write */ + ret = H5Dwrite(dataset, data_type, H5S_ALL, H5S_ALL, dxpl_write, buffer); + VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); + + /* Check Properties */ + ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write); + VRFY((ret >= 0), "retrieving actual io mode succeeded"); + ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write); + VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded"); + + VRFY(actual_chunk_opt_mode_write == H5D_MPIO_NO_CHUNK_OPTIMIZATION, + "actual_chunk_opt_mode has correct value for reset write (independent)"); + VRFY(actual_io_mode_write == H5D_MPIO_NO_COLLECTIVE, + "actual_io_mode has correct value for reset write (independent)"); + + /* Read */ + ret = H5Dread(dataset, data_type, H5S_ALL, H5S_ALL, dxpl_read, buffer); + VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); + + /* Check Properties */ + ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read); + VRFY((ret >= 0), "retrieving actual io mode succeeded"); + ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read); + VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded"); + + VRFY(actual_chunk_opt_mode_read == H5D_MPIO_NO_CHUNK_OPTIMIZATION, + "actual_chunk_opt_mode has correct value for reset read (independent)"); + VRFY(actual_io_mode_read == H5D_MPIO_NO_COLLECTIVE, + "actual_io_mode has correct value for reset read (independent)"); + } + } + + /* Release some resources */ + ret = H5Sclose(sid); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret = H5Pclose(fapl); + VRFY((ret >= 0), "H5Pclose succeeded"); + ret = H5Pclose(dcpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + ret = H5Pclose(dxpl_write); + VRFY((ret >= 0), "H5Pclose succeeded"); + ret = H5Pclose(dxpl_read); + VRFY((ret >= 0), "H5Pclose succeeded"); + ret = H5Dclose(dataset); + VRFY((ret >= 0), "H5Dclose succeeded"); + ret = H5Sclose(mem_space); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret = H5Sclose(file_space); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + HDfree(buffer); + return; +} + +/* Function: actual_io_mode_tests + * + * Purpose: Tests all possible cases of the actual_io_mode property. + * + * Programmer: Jacob Gruber + * Date: 2011-04-06 + */ +void +actual_io_mode_tests(void) +{ + int mpi_size = -1; + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + /* Only run these tests if selection I/O is not being used - selection I/O + * bypasses this IO mode decision - it's effectively always multi chunk + * currently */ + if (/* !H5_use_selection_io_g */ TRUE) { + test_actual_io_mode(TEST_ACTUAL_IO_NO_COLLECTIVE); + + /* + * Test multi-chunk-io via proc_num threshold + */ + test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_IND); + test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_COL); + + /* The Multi Chunk Mixed test requires at least three processes. */ + if (mpi_size > 2) + test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX); + else + HDfprintf(stdout, "Multi Chunk Mixed test requires 3 processes minimum\n"); + + test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE); + + /* + * Test multi-chunk-io via setting direct property + */ + test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND); + test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL); + + test_actual_io_mode(TEST_ACTUAL_IO_LINK_CHUNK); + test_actual_io_mode(TEST_ACTUAL_IO_CONTIGUOUS); + + test_actual_io_mode(TEST_ACTUAL_IO_RESET); + } + + return; +} + +/* + * Function: test_no_collective_cause_mode + * + * Purpose: + * tests cases for broken collective I/O and checks that the + * H5Pget_mpio_no_collective_cause properties in the DXPL have the correct values. + * + * Input: + * selection_mode: various mode to cause broken collective I/O + * Note: Originally, each TEST case is supposed to be used alone. + * After some discussion, this is updated to take multiple TEST cases + * with '|'. However there is no error check for any of combined + * test cases, so a tester is responsible to understand and feed + * proper combination of TESTs if needed. + * + * + * TEST_COLLECTIVE: + * Test for regular collective I/O without cause of breaking. + * Just to test normal behavior. + * + * TEST_SET_INDEPENDENT: + * Test for Independent I/O as the cause of breaking collective I/O. + * + * TEST_DATATYPE_CONVERSION: + * Test for Data Type Conversion as the cause of breaking collective I/O. + * + * TEST_DATA_TRANSFORMS: + * Test for Data Transform feature as the cause of breaking collective I/O. + * + * TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES: + * Test for NULL dataspace as the cause of breaking collective I/O. + * + * TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT: + * Test for Compact layout as the cause of breaking collective I/O. + * + * TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL: + * Test for Externl-File storage as the cause of breaking collective I/O. + * + * Programmer: Jonathan Kim + * Date: Aug, 2012 + */ +#ifdef LATER +#define DSET_NOCOLCAUSE "nocolcause" +#endif +#define FILE_EXTERNAL "nocolcause_extern.data" +static void +test_no_collective_cause_mode(int selection_mode) +{ + uint32_t no_collective_cause_local_write = 0; + uint32_t no_collective_cause_local_read = 0; + uint32_t no_collective_cause_local_expected = 0; + uint32_t no_collective_cause_global_write = 0; + uint32_t no_collective_cause_global_read = 0; + uint32_t no_collective_cause_global_expected = 0; + + const char *filename; + const char *test_name; + hbool_t is_chunked = 1; + hbool_t is_independent = 0; + int mpi_size = -1; + int mpi_rank = -1; + int length; + int *buffer; + int i; + MPI_Comm mpi_comm; + MPI_Info mpi_info; + hid_t fid = -1; + hid_t sid = -1; + hid_t dataset = -1; + hid_t data_type = H5T_NATIVE_INT; + hid_t fapl = -1; + hid_t dcpl = -1; + hid_t dxpl_write = -1; + hid_t dxpl_read = -1; + hsize_t dims[RANK]; + hid_t mem_space = -1; + hid_t file_space = -1; + hsize_t chunk_dims[RANK]; + herr_t ret; + /* set to global value as default */ + int l_facc_type = facc_type; + char message[256]; + + /* Set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + MPI_Barrier(MPI_COMM_WORLD); + + HDassert(mpi_size >= 1); + + mpi_comm = MPI_COMM_WORLD; + mpi_info = MPI_INFO_NULL; + + /* Create the dataset creation plist */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl >= 0), "dataset creation plist created successfully"); + + if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT) { + ret = H5Pset_layout(dcpl, H5D_COMPACT); + VRFY((ret >= 0), "set COMPACT layout succeeded"); + is_chunked = 0; + } + + if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) { + ret = H5Pset_external(dcpl, FILE_EXTERNAL, (off_t)0, H5F_UNLIMITED); + VRFY((ret >= 0), "set EXTERNAL file layout succeeded"); + is_chunked = 0; + } + + if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES) { + sid = H5Screate(H5S_NULL); + VRFY((sid >= 0), "H5Screate_simple succeeded"); + is_chunked = 0; + } + else { + /* Create the basic Space */ + /* if this is a compact dataset, create a small dataspace that does not exceed 64K */ + if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT) { + dims[0] = ROW_FACTOR * 6; + dims[1] = COL_FACTOR * 6; + } + else { + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + } + sid = H5Screate_simple(RANK, dims, NULL); + VRFY((sid >= 0), "H5Screate_simple succeeded"); + } + + filename = (const char *)PARATESTFILE /* GetTestParameters() */; + HDassert(filename != NULL); + + /* Setup the file access template */ + fapl = create_faccess_plist(mpi_comm, mpi_info, l_facc_type); + VRFY((fapl >= 0), "create_faccess_plist() succeeded"); + + /* Create the file */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + + VRFY((fid >= 0), "H5Fcreate succeeded"); + + /* If we are not testing contiguous datasets */ + if (is_chunked) { + /* Set up chunk information. */ + chunk_dims[0] = dims[0] / (hsize_t)mpi_size; + chunk_dims[1] = dims[1]; + ret = H5Pset_chunk(dcpl, 2, chunk_dims); + VRFY((ret >= 0), "chunk creation property list succeeded"); + } + + /* Create the dataset */ + dataset = H5Dcreate2(fid, "nocolcause", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded"); + + /* + * Set expected causes and some tweaks based on the type of test + */ + if (selection_mode & TEST_DATATYPE_CONVERSION) { + test_name = "Broken Collective I/O - Datatype Conversion"; + no_collective_cause_local_expected |= H5D_MPIO_DATATYPE_CONVERSION; + no_collective_cause_global_expected |= H5D_MPIO_DATATYPE_CONVERSION; + /* set different sign to trigger type conversion */ + data_type = H5T_NATIVE_UINT; + } + + if (selection_mode & TEST_DATA_TRANSFORMS) { + test_name = "Broken Collective I/O - DATA Transforms"; + no_collective_cause_local_expected |= H5D_MPIO_DATA_TRANSFORMS; + no_collective_cause_global_expected |= H5D_MPIO_DATA_TRANSFORMS; + } + + if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES) { + test_name = "Broken Collective I/O - No Simple or Scalar DataSpace"; + no_collective_cause_local_expected |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES; + no_collective_cause_global_expected |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES; + } + + if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT || + selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) { + test_name = "Broken Collective I/O - No CONTI or CHUNKED Dataset"; + no_collective_cause_local_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; + no_collective_cause_global_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; + } + + if (selection_mode & TEST_COLLECTIVE) { + test_name = "Broken Collective I/O - Not Broken"; + no_collective_cause_local_expected = H5D_MPIO_COLLECTIVE; + no_collective_cause_global_expected = H5D_MPIO_COLLECTIVE; + } + + if (selection_mode & TEST_SET_INDEPENDENT) { + test_name = "Broken Collective I/O - Independent"; + no_collective_cause_local_expected = H5D_MPIO_SET_INDEPENDENT; + no_collective_cause_global_expected = H5D_MPIO_SET_INDEPENDENT; + /* switch to independent io */ + is_independent = 1; + } + + /* use all spaces for certain tests */ + if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES || + selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) { + file_space = H5S_ALL; + mem_space = H5S_ALL; + } + else { + /* Get the file dataspace */ + file_space = H5Dget_space(dataset); + VRFY((file_space >= 0), "H5Dget_space succeeded"); + + /* Create the memory dataspace */ + mem_space = H5Screate_simple(RANK, dims, NULL); + VRFY((mem_space >= 0), "mem_space created"); + } + + /* Get the number of elements in the selection */ + length = (int)(dims[0] * dims[1]); + + /* Allocate and initialize the buffer */ + buffer = (int *)HDmalloc(sizeof(int) * (size_t)length); + VRFY((buffer != NULL), "HDmalloc of buffer succeeded"); + for (i = 0; i < length; i++) + buffer[i] = i; + + /* Set up the dxpl for the write */ + dxpl_write = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); + + if (is_independent) { + /* Set Independent I/O */ + ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + } + else { + /* Set Collective I/O */ + ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + } + + if (selection_mode & TEST_DATA_TRANSFORMS) { + ret = H5Pset_data_transform(dxpl_write, "x+1"); + VRFY((ret >= 0), "H5Pset_data_transform succeeded"); + } + + /*--------------------- + * Test Write access + *---------------------*/ + + /* Write */ + ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer); + if (ret < 0) + H5Eprint2(H5E_DEFAULT, stdout); + VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); + + /* Get the cause of broken collective I/O */ + ret = H5Pget_mpio_no_collective_cause(dxpl_write, &no_collective_cause_local_write, + &no_collective_cause_global_write); + VRFY((ret >= 0), "retrieving no collective cause succeeded"); + + /*--------------------- + * Test Read access + *---------------------*/ + + /* Make a copy of the dxpl to test the read operation */ + dxpl_read = H5Pcopy(dxpl_write); + VRFY((dxpl_read >= 0), "H5Pcopy succeeded"); + + /* Read */ + ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer); + + if (ret < 0) + H5Eprint2(H5E_DEFAULT, stdout); + VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded"); + + /* Get the cause of broken collective I/O */ + ret = H5Pget_mpio_no_collective_cause(dxpl_read, &no_collective_cause_local_read, + &no_collective_cause_global_read); + VRFY((ret >= 0), "retrieving no collective cause succeeded"); + + /* Check write vs read */ + VRFY((no_collective_cause_local_read == no_collective_cause_local_write), + "reading and writing are the same for local cause of Broken Collective I/O"); + VRFY((no_collective_cause_global_read == no_collective_cause_global_write), + "reading and writing are the same for global cause of Broken Collective I/O"); + + /* Test values */ + HDmemset(message, 0, sizeof(message)); + HDsnprintf(message, sizeof(message), + "Local cause of Broken Collective I/O has the correct value for %s.\n", test_name); + VRFY((no_collective_cause_local_write == no_collective_cause_local_expected), message); + HDmemset(message, 0, sizeof(message)); + HDsnprintf(message, sizeof(message), + "Global cause of Broken Collective I/O has the correct value for %s.\n", test_name); + VRFY((no_collective_cause_global_write == no_collective_cause_global_expected), message); + + /* Release some resources */ + if (sid) + H5Sclose(sid); + if (dcpl) + H5Pclose(dcpl); + if (dxpl_write) + H5Pclose(dxpl_write); + if (dxpl_read) + H5Pclose(dxpl_read); + if (dataset) + H5Dclose(dataset); + if (mem_space) + H5Sclose(mem_space); + if (file_space) + H5Sclose(file_space); + if (fid) + H5Fclose(fid); + HDfree(buffer); + + /* clean up external file */ + if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) + H5Fdelete(FILE_EXTERNAL, fapl); + + if (fapl) + H5Pclose(fapl); + + return; +} + +/* Function: no_collective_cause_tests + * + * Purpose: Tests cases for broken collective IO. + * + * Programmer: Jonathan Kim + * Date: Aug, 2012 + */ +void +no_collective_cause_tests(void) +{ + /* + * Test individual cause + */ + test_no_collective_cause_mode(TEST_COLLECTIVE); + test_no_collective_cause_mode(TEST_SET_INDEPENDENT); + test_no_collective_cause_mode(TEST_DATATYPE_CONVERSION); + test_no_collective_cause_mode(TEST_DATA_TRANSFORMS); + test_no_collective_cause_mode(TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES); + test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT); + test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL); + + /* + * Test combined causes + */ + test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION); + test_no_collective_cause_mode(TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS); + test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION | + TEST_DATA_TRANSFORMS); + + return; +} + +/* + * Test consistency semantics of atomic mode + */ + +/* + * Example of using the parallel HDF5 library to create a dataset, + * where process 0 writes and the other processes read at the same + * time. If atomic mode is set correctly, the other processes should + * read the old values in the dataset or the new ones. + */ + +void +dataset_atomicity(void) +{ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t sid; /* Dataspace ID */ + hid_t dataset1; /* Dataset IDs */ + hsize_t dims[RANK]; /* dataset dim sizes */ + int *write_buf = NULL; /* data buffer */ + int *read_buf = NULL; /* data buffer */ + int buf_size; + hid_t dataset2; + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* Memory dataspace ID */ + hsize_t start[RANK]; + hsize_t stride[RANK]; + hsize_t count[RANK]; + hsize_t block[RANK]; + const char *filename; + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; + int i, j, k; + hbool_t atomicity = FALSE; + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + + dim0 = 64; + dim1 = 32; + filename = PARATESTFILE /* GetTestParameters() */; + if (facc_type != FACC_MPIO) { + HDprintf("Atomicity tests will not work without the MPIO VFD\n"); + return; + } + if (VERBOSE_MED) + HDprintf("atomic writes to file %s\n", filename); + + /* set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, basic dataset, or more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + buf_size = dim0 * dim1; + /* allocate memory for data buffer */ + write_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int)); + VRFY((write_buf != NULL), "write_buf HDcalloc succeeded"); + /* allocate memory for data buffer */ + read_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int)); + VRFY((read_buf != NULL), "read_buf HDcalloc succeeded"); + + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* create the file collectively */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /* setup dimensionality object */ + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + sid = H5Screate_simple(RANK, dims, NULL); + VRFY((sid >= 0), "H5Screate_simple succeeded"); + + /* create datasets */ + dataset1 = H5Dcreate2(fid, DATASETNAME5, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); + + dataset2 = H5Dcreate2(fid, DATASETNAME6, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset2 >= 0), "H5Dcreate2 succeeded"); + + /* initialize datasets to 0s */ + if (0 == mpi_rank) { + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf); + VRFY((ret >= 0), "H5Dwrite dataset1 succeeded"); + + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf); + VRFY((ret >= 0), "H5Dwrite dataset2 succeeded"); + } + + ret = H5Dclose(dataset1); + VRFY((ret >= 0), "H5Dclose succeeded"); + ret = H5Dclose(dataset2); + VRFY((ret >= 0), "H5Dclose succeeded"); + ret = H5Sclose(sid); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + + MPI_Barrier(comm); + + /* make sure setting atomicity fails on a serial file ID */ + /* file locking allows only one file open (serial) for writing */ + if (MAINPROCESS) { + fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT); + VRFY((fid >= 0), "H5Fopen succeeded"); + + /* should fail */ + H5E_BEGIN_TRY + { + ret = H5Fset_mpi_atomicity(fid, TRUE); + } + H5E_END_TRY + VRFY((ret == FAIL), "H5Fset_mpi_atomicity failed"); + + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + } + + MPI_Barrier(comm); + + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* open the file collectively */ + fid = H5Fopen(filename, H5F_ACC_RDWR, acc_tpl); + VRFY((fid >= 0), "H5Fopen succeeded"); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + ret = H5Fset_mpi_atomicity(fid, TRUE); + VRFY((ret >= 0), "H5Fset_mpi_atomicity succeeded"); + + /* open dataset1 (contiguous case) */ + dataset1 = H5Dopen2(fid, DATASETNAME5, H5P_DEFAULT); + VRFY((dataset1 >= 0), "H5Dopen2 succeeded"); + + if (0 == mpi_rank) { + for (i = 0; i < buf_size; i++) { + write_buf[i] = 5; + } + } + else { + for (i = 0; i < buf_size; i++) { + read_buf[i] = 8; + } + } + + /* check that the atomicity flag is set */ + ret = H5Fget_mpi_atomicity(fid, &atomicity); + VRFY((ret >= 0), "atomcity get failed"); + VRFY((atomicity == TRUE), "atomcity set failed"); + + MPI_Barrier(comm); + + /* Process 0 writes contiguously to the entire dataset */ + if (0 == mpi_rank) { + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf); + VRFY((ret >= 0), "H5Dwrite dataset1 succeeded"); + } + /* The other processes read the entire dataset */ + else { + ret = H5Dread(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf); + VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); + } + + if (VERBOSE_MED) { + i = 0; + j = 0; + k = 0; + for (i = 0; i < dim0; i++) { + HDprintf("\n"); + for (j = 0; j < dim1; j++) + HDprintf("%d ", read_buf[k++]); + } + } + + /* The processes that read the dataset must either read all values + as 0 (read happened before process 0 wrote to dataset 1), or 5 + (read happened after process 0 wrote to dataset 1) */ + if (0 != mpi_rank) { + int compare = read_buf[0]; + + VRFY((compare == 0 || compare == 5), + "Atomicity Test Failed Process %d: Value read should be 0 or 5\n"); + for (i = 1; i < buf_size; i++) { + if (read_buf[i] != compare) { + HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, i, + read_buf[i], compare); + nerrors++; + } + } + } + + ret = H5Dclose(dataset1); + VRFY((ret >= 0), "H5D close succeeded"); + + /* release data buffers */ + if (write_buf) + HDfree(write_buf); + if (read_buf) + HDfree(read_buf); + + /* open dataset2 (non-contiguous case) */ + dataset2 = H5Dopen2(fid, DATASETNAME6, H5P_DEFAULT); + VRFY((dataset2 >= 0), "H5Dopen2 succeeded"); + + /* allocate memory for data buffer */ + write_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int)); + VRFY((write_buf != NULL), "write_buf HDcalloc succeeded"); + /* allocate memory for data buffer */ + read_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int)); + VRFY((read_buf != NULL), "read_buf HDcalloc succeeded"); + + for (i = 0; i < buf_size; i++) { + write_buf[i] = 5; + } + for (i = 0; i < buf_size; i++) { + read_buf[i] = 8; + } + + atomicity = FALSE; + /* check that the atomicity flag is set */ + ret = H5Fget_mpi_atomicity(fid, &atomicity); + VRFY((ret >= 0), "atomcity get failed"); + VRFY((atomicity == TRUE), "atomcity set failed"); + + block[0] = (hsize_t)(dim0 / mpi_size - 1); + block[1] = (hsize_t)(dim1 / mpi_size - 1); + stride[0] = block[0] + 1; + stride[1] = block[1] + 1; + count[0] = (hsize_t)mpi_size; + count[1] = (hsize_t)mpi_size; + start[0] = 0; + start[1] = 0; + + /* create a file dataspace */ + file_dataspace = H5Dget_space(dataset2); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* create a memory dataspace */ + mem_dataspace = H5Screate_simple(RANK, dims, NULL); + VRFY((mem_dataspace >= 0), ""); + + ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + MPI_Barrier(comm); + + /* Process 0 writes to the dataset */ + if (0 == mpi_rank) { + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, write_buf); + VRFY((ret >= 0), "H5Dwrite dataset2 succeeded"); + } + /* All processes wait for the write to finish. This works because + atomicity is set to true */ + MPI_Barrier(comm); + /* The other processes read the entire dataset */ + if (0 != mpi_rank) { + ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, read_buf); + VRFY((ret >= 0), "H5Dread dataset2 succeeded"); + } + + if (VERBOSE_MED) { + if (mpi_rank == 1) { + i = 0; + j = 0; + k = 0; + for (i = 0; i < dim0; i++) { + HDprintf("\n"); + for (j = 0; j < dim1; j++) + HDprintf("%d ", read_buf[k++]); + } + HDprintf("\n"); + } + } + + /* The processes that read the dataset must either read all values + as 5 (read happened after process 0 wrote to dataset 1) */ + if (0 != mpi_rank) { + int compare; + i = 0; + j = 0; + k = 0; + + compare = 5; + + for (i = 0; i < dim0; i++) { + if (i >= mpi_rank * ((int)block[0] + 1)) { + break; + } + if ((i + 1) % ((int)block[0] + 1) == 0) { + k += dim1; + continue; + } + for (j = 0; j < dim1; j++) { + if (j >= mpi_rank * ((int)block[1] + 1)) { + k += dim1 - mpi_rank * ((int)block[1] + 1); + break; + } + if ((j + 1) % ((int)block[1] + 1) == 0) { + k++; + continue; + } + else if (compare != read_buf[k]) { + HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, + k, read_buf[k], compare); + nerrors++; + } + k++; + } + } + } + + ret = H5Dclose(dataset2); + VRFY((ret >= 0), "H5Dclose succeeded"); + ret = H5Sclose(file_dataspace); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret = H5Sclose(mem_dataspace); + VRFY((ret >= 0), "H5Sclose succeeded"); + + /* release data buffers */ + if (write_buf) + HDfree(write_buf); + if (read_buf) + HDfree(read_buf); + + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); +} + +/* Function: dense_attr_test + * + * Purpose: Test cases for writing dense attributes in parallel + * + * Programmer: Quincey Koziol + * Date: April, 2013 + */ +void +test_dense_attr(void) +{ + int mpi_size, mpi_rank; + hid_t fpid, fid; + hid_t gid, gpid; + hid_t atFileSpace, atid; + hsize_t atDims[1] = {10000}; + herr_t status; + const char *filename; + + /* set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, group, dataset, or attribute aren't supported with " + "this connector\n"); + fflush(stdout); + } + + return; + } + + /* get filename */ + filename = (const char *)PARATESTFILE /* GetTestParameters() */; + HDassert(filename != NULL); + + fpid = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fpid > 0), "H5Pcreate succeeded"); + status = H5Pset_libver_bounds(fpid, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); + VRFY((status >= 0), "H5Pset_libver_bounds succeeded"); + status = H5Pset_fapl_mpio(fpid, MPI_COMM_WORLD, MPI_INFO_NULL); + VRFY((status >= 0), "H5Pset_fapl_mpio succeeded"); + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fpid); + VRFY((fid > 0), "H5Fcreate succeeded"); + status = H5Pclose(fpid); + VRFY((status >= 0), "H5Pclose succeeded"); + + gpid = H5Pcreate(H5P_GROUP_CREATE); + VRFY((gpid > 0), "H5Pcreate succeeded"); + status = H5Pset_attr_phase_change(gpid, 0, 0); + VRFY((status >= 0), "H5Pset_attr_phase_change succeeded"); + gid = H5Gcreate2(fid, "foo", H5P_DEFAULT, gpid, H5P_DEFAULT); + VRFY((gid > 0), "H5Gcreate2 succeeded"); + status = H5Pclose(gpid); + VRFY((status >= 0), "H5Pclose succeeded"); + + atFileSpace = H5Screate_simple(1, atDims, NULL); + VRFY((atFileSpace > 0), "H5Screate_simple succeeded"); + atid = H5Acreate2(gid, "bar", H5T_STD_U64LE, atFileSpace, H5P_DEFAULT, H5P_DEFAULT); + VRFY((atid > 0), "H5Acreate succeeded"); + status = H5Sclose(atFileSpace); + VRFY((status >= 0), "H5Sclose succeeded"); + + status = H5Aclose(atid); + VRFY((status >= 0), "H5Aclose succeeded"); + + status = H5Gclose(gid); + VRFY((status >= 0), "H5Gclose succeeded"); + status = H5Fclose(fid); + VRFY((status >= 0), "H5Fclose succeeded"); + + return; +} diff --git a/testpar/API/t_file.c b/testpar/API/t_file.c new file mode 100644 index 00000000000..936454af7ea --- /dev/null +++ b/testpar/API/t_file.c @@ -0,0 +1,1032 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Parallel tests for file operations + */ + +#include "hdf5.h" +#include "testphdf5.h" + +#if 0 +#include "H5CXprivate.h" /* API Contexts */ +#include "H5Iprivate.h" +#include "H5PBprivate.h" + +/* + * This file needs to access private information from the H5F package. + */ +#define H5AC_FRIEND /*suppress error about including H5ACpkg */ +#include "H5ACpkg.h" +#define H5C_FRIEND /*suppress error about including H5Cpkg */ +#include "H5Cpkg.h" +#define H5F_FRIEND /*suppress error about including H5Fpkg */ +#define H5F_TESTING +#include "H5Fpkg.h" +#define H5MF_FRIEND /*suppress error about including H5MFpkg */ +#include "H5MFpkg.h" +#endif + +#define NUM_DSETS 5 + +int mpi_size, mpi_rank; + +#if 0 +static int create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_strategy); +static int open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t page_size, + size_t page_buffer_size); +#endif + +/* + * test file access by communicator besides COMM_WORLD. + * Split COMM_WORLD into two, one (even_comm) contains the original + * processes of even ranks. The other (odd_comm) contains the original + * processes of odd ranks. Processes in even_comm creates a file, then + * cloose it, using even_comm. Processes in old_comm just do a barrier + * using odd_comm. Then they all do a barrier using COMM_WORLD. + * If the file creation and cloose does not do correct collective action + * according to the communicator argument, the processes will freeze up + * sooner or later due to barrier mixed up. + */ +void +test_split_comm_access(void) +{ + MPI_Comm comm; + MPI_Info info = MPI_INFO_NULL; + int is_old, mrc; + int newrank, newprocs; + hid_t fid; /* file IDs */ + hid_t acc_tpl; /* File access properties */ + herr_t ret; /* generic return value */ + const char *filename; + + filename = (const char *)PARATESTFILE /* GetTestParameters()*/; + if (VERBOSE_MED) + HDprintf("Split Communicator access test on file %s\n", filename); + + /* set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + + is_old = mpi_rank % 2; + mrc = MPI_Comm_split(MPI_COMM_WORLD, is_old, mpi_rank, &comm); + VRFY((mrc == MPI_SUCCESS), ""); + MPI_Comm_size(comm, &newprocs); + MPI_Comm_rank(comm, &newrank); + + if (is_old) { + /* odd-rank processes */ + mrc = MPI_Barrier(comm); + VRFY((mrc == MPI_SUCCESS), ""); + } + else { + /* even-rank processes */ + int sub_mpi_rank; /* rank in the sub-comm */ + MPI_Comm_rank(comm, &sub_mpi_rank); + + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* create the file collectively */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + /* close the file */ + ret = H5Fclose(fid); + VRFY((ret >= 0), ""); + + /* delete the test file */ + ret = H5Fdelete(filename, acc_tpl); + VRFY((ret >= 0), "H5Fdelete succeeded"); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), ""); + } + mrc = MPI_Comm_free(&comm); + VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free succeeded"); + mrc = MPI_Barrier(MPI_COMM_WORLD); + VRFY((mrc == MPI_SUCCESS), "final MPI_Barrier succeeded"); +} + +#if 0 +void +test_page_buffer_access(void) +{ + hid_t file_id = -1; /* File ID */ + hid_t fcpl, fapl; + size_t page_count = 0; + int i, num_elements = 200; + haddr_t raw_addr, meta_addr; + int *data; + H5F_t *f = NULL; + herr_t ret; /* generic return value */ + const char *filename; + hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */ + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + filename = (const char *)GetTestParameters(); + + if (VERBOSE_MED) + HDprintf("Page Buffer Usage in Parallel %s\n", filename); + + fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + VRFY((fapl >= 0), "create_faccess_plist succeeded"); + fcpl = H5Pcreate(H5P_FILE_CREATE); + VRFY((fcpl >= 0), ""); + + ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 1, (hsize_t)0); + VRFY((ret == 0), ""); + ret = H5Pset_file_space_page_size(fcpl, sizeof(int) * 128); + VRFY((ret == 0), ""); + ret = H5Pset_page_buffer_size(fapl, sizeof(int) * 100000, 0, 0); + VRFY((ret == 0), ""); + + /* This should fail because collective metadata writes are not supported with page buffering */ + H5E_BEGIN_TRY + { + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl); + } + H5E_END_TRY; + VRFY((file_id < 0), "H5Fcreate failed"); + + /* disable collective metadata writes for page buffering to work */ + ret = H5Pset_coll_metadata_write(fapl, FALSE); + VRFY((ret >= 0), ""); + + ret = create_file(filename, fcpl, fapl, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED); + VRFY((ret == 0), ""); + ret = open_file(filename, fapl, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, sizeof(int) * 100, + sizeof(int) * 100000); + VRFY((ret == 0), ""); + + ret = create_file(filename, fcpl, fapl, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY); + VRFY((ret == 0), ""); + ret = open_file(filename, fapl, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, sizeof(int) * 100, + sizeof(int) * 100000); + VRFY((ret == 0), ""); + + ret = H5Pset_file_space_page_size(fcpl, sizeof(int) * 100); + VRFY((ret == 0), ""); + + data = (int *)HDmalloc(sizeof(int) * (size_t)num_elements); + + /* initialize all the elements to have a value of -1 */ + for (i = 0; i < num_elements; i++) + data[i] = -1; + if (MAINPROCESS) { + hid_t fapl_self = H5I_INVALID_HID; + fapl_self = create_faccess_plist(MPI_COMM_SELF, MPI_INFO_NULL, facc_type); + + ret = H5Pset_page_buffer_size(fapl_self, sizeof(int) * 1000, 0, 0); + VRFY((ret == 0), ""); + /* collective metadata writes do not work with page buffering */ + ret = H5Pset_coll_metadata_write(fapl_self, FALSE); + VRFY((ret >= 0), ""); + + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl_self); + VRFY((file_id >= 0), ""); + + /* Push API context */ + ret = H5CX_push(); + VRFY((ret == 0), "H5CX_push()"); + api_ctx_pushed = TRUE; + + /* Get a pointer to the internal file object */ + f = (H5F_t *)H5I_object(file_id); + + VRFY((f->shared->page_buf != NULL), "Page Buffer created with 1 process"); + + /* allocate space for 200 raw elements */ + raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int) * (size_t)num_elements); + VRFY((raw_addr != HADDR_UNDEF), ""); + + /* allocate space for 200 metadata elements */ + meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, sizeof(int) * (size_t)num_elements); + VRFY((meta_addr != HADDR_UNDEF), ""); + + page_count = 0; + + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * (size_t)num_elements, data); + VRFY((ret == 0), ""); + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * (size_t)num_elements, data); + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * (size_t)num_elements, data); + VRFY((ret == 0), ""); + + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + /* update the first 50 elements */ + for (i = 0; i < 50; i++) + data[i] = i; + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data); + H5Eprint2(H5E_DEFAULT, stderr); + VRFY((ret == 0), ""); + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data); + VRFY((ret == 0), ""); + page_count += 2; + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + /* update the second 50 elements */ + for (i = 0; i < 50; i++) + data[i] = i + 50; + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 50), sizeof(int) * 50, data); + VRFY((ret == 0), ""); + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 50), sizeof(int) * 50, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + /* update 100 - 200 */ + for (i = 0; i < 100; i++) + data[i] = i + 100; + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 100), sizeof(int) * 100, data); + VRFY((ret == 0), ""); + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 100), sizeof(int) * 100, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + ret = H5PB_flush(f->shared); + VRFY((ret == 0), ""); + + /* read elements 0 - 200 */ + ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 200, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i = 0; i < 200; i++) + VRFY((data[i] == i), "Read different values than written"); + ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 200, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i = 0; i < 200; i++) + VRFY((data[i] == i), "Read different values than written"); + + /* read elements 0 - 50 */ + ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i = 0; i < 50; i++) + VRFY((data[i] == i), "Read different values than written"); + ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i = 0; i < 50; i++) + VRFY((data[i] == i), "Read different values than written"); + + /* close the file */ + ret = H5Fclose(file_id); + VRFY((ret >= 0), "H5Fclose succeeded"); + ret = H5Pclose(fapl_self); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /* Pop API context */ + if (api_ctx_pushed) { + ret = H5CX_pop(FALSE); + VRFY((ret == 0), "H5CX_pop()"); + api_ctx_pushed = FALSE; + } + } + + MPI_Barrier(MPI_COMM_WORLD); + + if (mpi_size > 1) { + ret = H5Pset_page_buffer_size(fapl, sizeof(int) * 1000, 0, 0); + VRFY((ret == 0), ""); + /* collective metadata writes do not work with page buffering */ + ret = H5Pset_coll_metadata_write(fapl, FALSE); + VRFY((ret >= 0), ""); + + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl); + VRFY((file_id >= 0), ""); + + /* Push API context */ + ret = H5CX_push(); + VRFY((ret == 0), "H5CX_push()"); + api_ctx_pushed = TRUE; + + /* Get a pointer to the internal file object */ + f = (H5F_t *)H5I_object(file_id); + + VRFY((f->shared->page_buf != NULL), "Page Buffer created with 1 process"); + + /* allocate space for 200 raw elements */ + raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int) * (size_t)num_elements); + VRFY((raw_addr != HADDR_UNDEF), ""); + /* allocate space for 200 metadata elements */ + meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, sizeof(int) * (size_t)num_elements); + VRFY((meta_addr != HADDR_UNDEF), ""); + + page_count = 0; + + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * (size_t)num_elements, data); + VRFY((ret == 0), ""); + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * (size_t)num_elements, data); + VRFY((ret == 0), ""); + + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + /* update the first 50 elements */ + for (i = 0; i < 50; i++) + data[i] = i; + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data); + VRFY((ret == 0), ""); + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + /* update the second 50 elements */ + for (i = 0; i < 50; i++) + data[i] = i + 50; + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 50), sizeof(int) * 50, data); + VRFY((ret == 0), ""); + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 50), sizeof(int) * 50, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + /* update 100 - 200 */ + for (i = 0; i < 100; i++) + data[i] = i + 100; + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 100), sizeof(int) * 100, data); + VRFY((ret == 0), ""); + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 100), sizeof(int) * 100, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL); + VRFY((ret == 0), ""); + + /* read elements 0 - 200 */ + ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 200, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i = 0; i < 200; i++) + VRFY((data[i] == i), "Read different values than written"); + ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 200, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i = 0; i < 200; i++) + VRFY((data[i] == i), "Read different values than written"); + + /* read elements 0 - 50 */ + ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i = 0; i < 50; i++) + VRFY((data[i] == i), "Read different values than written"); + ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data); + VRFY((ret == 0), ""); + page_count += 1; + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i = 0; i < 50; i++) + VRFY((data[i] == i), "Read different values than written"); + + MPI_Barrier(MPI_COMM_WORLD); + /* reset the first 50 elements to -1*/ + for (i = 0; i < 50; i++) + data[i] = -1; + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + /* read elements 0 - 50 */ + ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i = 0; i < 50; i++) + VRFY((data[i] == -1), "Read different values than written"); + ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i = 0; i < 50; i++) + VRFY((data[i] == -1), "Read different values than written"); + + /* close the file */ + ret = H5Fclose(file_id); + VRFY((ret >= 0), "H5Fclose succeeded"); + } + + ret = H5Pclose(fapl); + VRFY((ret >= 0), "H5Pclose succeeded"); + ret = H5Pclose(fcpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /* Pop API context */ + if (api_ctx_pushed) { + ret = H5CX_pop(FALSE); + VRFY((ret == 0), "H5CX_pop()"); + api_ctx_pushed = FALSE; + } + + HDfree(data); + data = NULL; + MPI_Barrier(MPI_COMM_WORLD); +} + +static int +create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_strategy) +{ + hid_t file_id, dset_id, grp_id; + hid_t sid, mem_dataspace; + hsize_t start[RANK]; + hsize_t count[RANK]; + hsize_t stride[RANK]; + hsize_t block[RANK]; + DATATYPE *data_array = NULL; + hsize_t dims[RANK], i; + hsize_t num_elements; + int k; + char dset_name[20]; + H5F_t *f = NULL; + H5C_t *cache_ptr = NULL; + H5AC_cache_config_t config; + hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */ + herr_t ret; + + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl); + VRFY((file_id >= 0), ""); + + ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL); + VRFY((ret == 0), ""); + + /* Push API context */ + ret = H5CX_push(); + VRFY((ret == 0), "H5CX_push()"); + api_ctx_pushed = TRUE; + + f = (H5F_t *)H5I_object(file_id); + VRFY((f != NULL), ""); + + cache_ptr = f->shared->cache; + VRFY((cache_ptr->magic == H5C__H5C_T_MAGIC), ""); + + cache_ptr->ignore_tags = TRUE; + H5C_stats__reset(cache_ptr); + config.version = H5AC__CURR_CACHE_CONFIG_VERSION; + + ret = H5AC_get_cache_auto_resize_config(cache_ptr, &config); + VRFY((ret == 0), ""); + + config.metadata_write_strategy = metadata_write_strategy; + + ret = H5AC_set_cache_auto_resize_config(cache_ptr, &config); + VRFY((ret == 0), ""); + + grp_id = H5Gcreate2(file_id, "GROUP", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((grp_id >= 0), ""); + + dims[0] = (hsize_t)(ROW_FACTOR * mpi_size); + dims[1] = (hsize_t)(COL_FACTOR * mpi_size); + sid = H5Screate_simple(RANK, dims, NULL); + VRFY((sid >= 0), "H5Screate_simple succeeded"); + + /* Each process takes a slabs of rows. */ + block[0] = dims[0] / (hsize_t)mpi_size; + block[1] = dims[1]; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = (hsize_t)mpi_rank * block[0]; + start[1] = 0; + + num_elements = block[0] * block[1]; + /* allocate memory for data buffer */ + data_array = (DATATYPE *)HDmalloc(num_elements * sizeof(DATATYPE)); + VRFY((data_array != NULL), "data_array HDmalloc succeeded"); + /* put some trivial data in the data_array */ + for (i = 0; i < num_elements; i++) + data_array[i] = mpi_rank + 1; + + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(1, &num_elements, NULL); + VRFY((mem_dataspace >= 0), ""); + + for (k = 0; k < NUM_DSETS; k++) { + HDsnprintf(dset_name, sizeof(dset_name), "D1dset%d", k); + dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dset_id >= 0), ""); + ret = H5Dclose(dset_id); + VRFY((ret == 0), ""); + + HDsnprintf(dset_name, sizeof(dset_name), "D2dset%d", k); + dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dset_id >= 0), ""); + ret = H5Dclose(dset_id); + VRFY((ret == 0), ""); + + HDsnprintf(dset_name, sizeof(dset_name), "D3dset%d", k); + dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dset_id >= 0), ""); + ret = H5Dclose(dset_id); + VRFY((ret == 0), ""); + + HDsnprintf(dset_name, sizeof(dset_name), "dset%d", k); + dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dset_id >= 0), ""); + + ret = H5Dwrite(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array); + VRFY((ret == 0), ""); + + ret = H5Dclose(dset_id); + VRFY((ret == 0), ""); + + HDmemset(data_array, 0, num_elements * sizeof(DATATYPE)); + dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT); + VRFY((dset_id >= 0), ""); + + ret = H5Dread(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array); + VRFY((ret == 0), ""); + + ret = H5Dclose(dset_id); + VRFY((ret == 0), ""); + + for (i = 0; i < num_elements; i++) + VRFY((data_array[i] == mpi_rank + 1), "Dataset Verify failed"); + + HDsnprintf(dset_name, sizeof(dset_name), "D1dset%d", k); + ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT); + VRFY((ret == 0), ""); + HDsnprintf(dset_name, sizeof(dset_name), "D2dset%d", k); + ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT); + VRFY((ret == 0), ""); + HDsnprintf(dset_name, sizeof(dset_name), "D3dset%d", k); + ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT); + VRFY((ret == 0), ""); + } + + ret = H5Gclose(grp_id); + VRFY((ret == 0), ""); + ret = H5Fclose(file_id); + VRFY((ret == 0), ""); + ret = H5Sclose(sid); + VRFY((ret == 0), ""); + ret = H5Sclose(mem_dataspace); + VRFY((ret == 0), ""); + + /* Pop API context */ + if (api_ctx_pushed) { + ret = H5CX_pop(FALSE); + VRFY((ret == 0), "H5CX_pop()"); + api_ctx_pushed = FALSE; + } + + MPI_Barrier(MPI_COMM_WORLD); + HDfree(data_array); + return 0; +} /* create_file */ + +static int +open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t page_size, + size_t page_buffer_size) +{ + hid_t file_id, dset_id, grp_id, grp_id2; + hid_t sid, mem_dataspace; + DATATYPE *data_array = NULL; + hsize_t dims[RANK]; + hsize_t start[RANK]; + hsize_t count[RANK]; + hsize_t stride[RANK]; + hsize_t block[RANK]; + int i, k, ndims; + hsize_t num_elements; + char dset_name[20]; + H5F_t *f = NULL; + H5C_t *cache_ptr = NULL; + H5AC_cache_config_t config; + hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */ + herr_t ret; + + config.version = H5AC__CURR_CACHE_CONFIG_VERSION; + ret = H5Pget_mdc_config(fapl, &config); + VRFY((ret == 0), ""); + + config.metadata_write_strategy = metadata_write_strategy; + + ret = H5Pget_mdc_config(fapl, &config); + VRFY((ret == 0), ""); + + file_id = H5Fopen(filename, H5F_ACC_RDWR, fapl); + H5Eprint2(H5E_DEFAULT, stderr); + VRFY((file_id >= 0), ""); + + /* Push API context */ + ret = H5CX_push(); + VRFY((ret == 0), "H5CX_push()"); + api_ctx_pushed = TRUE; + + ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL); + VRFY((ret == 0), ""); + + f = (H5F_t *)H5I_object(file_id); + VRFY((f != NULL), ""); + + cache_ptr = f->shared->cache; + VRFY((cache_ptr->magic == H5C__H5C_T_MAGIC), ""); + + MPI_Barrier(MPI_COMM_WORLD); + + VRFY((f->shared->page_buf != NULL), ""); + VRFY((f->shared->page_buf->page_size == page_size), ""); + VRFY((f->shared->page_buf->max_size == page_buffer_size), ""); + + grp_id = H5Gopen2(file_id, "GROUP", H5P_DEFAULT); + VRFY((grp_id >= 0), ""); + + dims[0] = (hsize_t)(ROW_FACTOR * mpi_size); + dims[1] = (hsize_t)(COL_FACTOR * mpi_size); + + /* Each process takes a slabs of rows. */ + block[0] = dims[0] / (hsize_t)mpi_size; + block[1] = dims[1]; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = (hsize_t)mpi_rank * block[0]; + start[1] = 0; + + num_elements = block[0] * block[1]; + /* allocate memory for data buffer */ + data_array = (DATATYPE *)HDmalloc(num_elements * sizeof(DATATYPE)); + VRFY((data_array != NULL), "data_array HDmalloc succeeded"); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(1, &num_elements, NULL); + VRFY((mem_dataspace >= 0), ""); + + for (k = 0; k < NUM_DSETS; k++) { + HDsnprintf(dset_name, sizeof(dset_name), "dset%d", k); + dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT); + VRFY((dset_id >= 0), ""); + + sid = H5Dget_space(dset_id); + VRFY((dset_id >= 0), "H5Dget_space succeeded"); + + ndims = H5Sget_simple_extent_dims(sid, dims, NULL); + VRFY((ndims == 2), "H5Sget_simple_extent_dims succeeded"); + VRFY(dims[0] == (hsize_t)(ROW_FACTOR * mpi_size), "Wrong dataset dimensions"); + VRFY(dims[1] == (hsize_t)(COL_FACTOR * mpi_size), "Wrong dataset dimensions"); + + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + ret = H5Dread(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array); + VRFY((ret >= 0), ""); + + ret = H5Dclose(dset_id); + VRFY((ret >= 0), ""); + ret = H5Sclose(sid); + VRFY((ret == 0), ""); + + for (i = 0; i < (int)num_elements; i++) + VRFY((data_array[i] == mpi_rank + 1), "Dataset Verify failed"); + } + + grp_id2 = H5Gcreate2(file_id, "GROUP/GROUP2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((grp_id2 >= 0), ""); + ret = H5Gclose(grp_id2); + VRFY((ret == 0), ""); + + ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL); + VRFY((ret == 0), ""); + + MPI_Barrier(MPI_COMM_WORLD); + /* flush invalidate each ring, starting from the outermost ring and + * working inward. + */ + for (i = 0; i < H5C__HASH_TABLE_LEN; i++) { + H5C_cache_entry_t *entry_ptr = NULL; + + entry_ptr = cache_ptr->index[i]; + + while (entry_ptr != NULL) { + HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(entry_ptr->is_dirty == FALSE); + + if (!entry_ptr->is_pinned && !entry_ptr->is_protected) { + ret = H5AC_expunge_entry(f, entry_ptr->type, entry_ptr->addr, 0); + VRFY((ret == 0), ""); + } + + entry_ptr = entry_ptr->ht_next; + } + } + MPI_Barrier(MPI_COMM_WORLD); + + grp_id2 = H5Gopen2(file_id, "GROUP/GROUP2", H5P_DEFAULT); + H5Eprint2(H5E_DEFAULT, stderr); + VRFY((grp_id2 >= 0), ""); + ret = H5Gclose(grp_id2); + H5Eprint2(H5E_DEFAULT, stderr); + VRFY((ret == 0), ""); + + ret = H5Gclose(grp_id); + VRFY((ret == 0), ""); + ret = H5Fclose(file_id); + VRFY((ret == 0), ""); + ret = H5Sclose(mem_dataspace); + VRFY((ret == 0), ""); + + /* Pop API context */ + if (api_ctx_pushed) { + ret = H5CX_pop(FALSE); + VRFY((ret == 0), "H5CX_pop()"); + api_ctx_pushed = FALSE; + } + + HDfree(data_array); + + return nerrors; +} +#endif + +/* + * NOTE: See HDFFV-10894 and add tests later to verify MPI-specific properties in the + * incoming fapl that could conflict with the existing values in H5F_shared_t on + * multiple opens of the same file. + */ +void +test_file_properties(void) +{ + hid_t fid = H5I_INVALID_HID; /* HDF5 file ID */ + hid_t fapl_id = H5I_INVALID_HID; /* File access plist */ + hid_t fapl_copy_id = H5I_INVALID_HID; /* File access plist */ + hbool_t is_coll; + htri_t are_equal; + const char *filename; + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + MPI_Comm comm_out = MPI_COMM_NULL; + MPI_Info info_out = MPI_INFO_NULL; + herr_t ret; /* Generic return value */ + int mpi_ret; /* MPI return value */ + int cmp; /* Compare value */ + + /* set up MPI parameters */ + mpi_ret = MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + VRFY((mpi_ret >= 0), "MPI_Comm_size succeeded"); + mpi_ret = MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + VRFY((mpi_ret >= 0), "MPI_Comm_rank succeeded"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + + filename = (const char *)PARATESTFILE /* GetTestParameters() */; + + mpi_ret = MPI_Info_create(&info); + VRFY((mpi_ret >= 0), "MPI_Info_create succeeded"); + mpi_ret = MPI_Info_set(info, "hdf_info_prop1", "xyz"); + VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set"); + + /* setup file access plist */ + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fapl_id != H5I_INVALID_HID), "H5Pcreate"); + ret = H5Pset_fapl_mpio(fapl_id, comm, info); + VRFY((ret >= 0), "H5Pset_fapl_mpio"); + + /* Check getting and setting MPI properties + * (for use in VOL connectors, not the MPI-I/O VFD) + */ + ret = H5Pset_mpi_params(fapl_id, comm, info); + VRFY((ret >= 0), "H5Pset_mpi_params succeeded"); + ret = H5Pget_mpi_params(fapl_id, &comm_out, &info_out); + VRFY((ret >= 0), "H5Pget_mpi_params succeeded"); + + /* Check the communicator */ + VRFY((comm != comm_out), "Communicators should not be bitwise identical"); + cmp = MPI_UNEQUAL; + mpi_ret = MPI_Comm_compare(comm, comm_out, &cmp); + VRFY((ret >= 0), "MPI_Comm_compare succeeded"); + VRFY((cmp == MPI_CONGRUENT), "Communicators should be congruent via MPI_Comm_compare"); + + /* Check the info object */ + VRFY((info != info_out), "Info objects should not be bitwise identical"); + + /* Free the obtained comm and info object */ + mpi_ret = MPI_Comm_free(&comm_out); + VRFY((mpi_ret >= 0), "MPI_Comm_free succeeded"); + mpi_ret = MPI_Info_free(&info_out); + VRFY((mpi_ret >= 0), "MPI_Info_free succeeded"); + + /* Copy the fapl and ensure it's equal to the original */ + fapl_copy_id = H5Pcopy(fapl_id); + VRFY((fapl_copy_id != H5I_INVALID_HID), "H5Pcopy"); + are_equal = H5Pequal(fapl_id, fapl_copy_id); + VRFY((TRUE == are_equal), "H5Pequal"); + + /* Add a property to the copy and ensure it's different now */ + mpi_ret = MPI_Info_set(info, "hdf_info_prop2", "abc"); + VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set"); + ret = H5Pset_mpi_params(fapl_copy_id, comm, info); + VRFY((ret >= 0), "H5Pset_mpi_params succeeded"); + are_equal = H5Pequal(fapl_id, fapl_copy_id); + VRFY((FALSE == are_equal), "H5Pequal"); + + /* Add a property with the same key but a different value to the original + * and ensure they are still different. + */ + mpi_ret = MPI_Info_set(info, "hdf_info_prop2", "ijk"); + VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set"); + ret = H5Pset_mpi_params(fapl_id, comm, info); + VRFY((ret >= 0), "H5Pset_mpi_params succeeded"); + are_equal = H5Pequal(fapl_id, fapl_copy_id); + VRFY((FALSE == are_equal), "H5Pequal"); + + /* Set the second property in the original to the same + * value as the copy and ensure they are the same now. + */ + mpi_ret = MPI_Info_set(info, "hdf_info_prop2", "abc"); + VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set"); + ret = H5Pset_mpi_params(fapl_id, comm, info); + VRFY((ret >= 0), "H5Pset_mpi_params succeeded"); + are_equal = H5Pequal(fapl_id, fapl_copy_id); + VRFY((TRUE == are_equal), "H5Pequal"); + + /* create the file */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded"); + + /* verify settings for file access properties */ + + /* Collective metadata writes */ + ret = H5Pget_coll_metadata_write(fapl_id, &is_coll); + VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded"); + VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata writes"); + + /* Collective metadata read API calling requirement */ + ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll); + VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded"); + VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata API calls requirement"); + + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + + /* Open the file with the MPI-IO driver */ + ret = H5Pset_fapl_mpio(fapl_id, comm, info); + VRFY((ret >= 0), "H5Pset_fapl_mpio failed"); + fid = H5Fopen(filename, H5F_ACC_RDWR, fapl_id); + VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded"); + + /* verify settings for file access properties */ + + /* Collective metadata writes */ + ret = H5Pget_coll_metadata_write(fapl_id, &is_coll); + VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded"); + VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata writes"); + + /* Collective metadata read API calling requirement */ + ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll); + VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded"); + VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata API calls requirement"); + + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + + /* Open the file with the MPI-IO driver w/ collective settings */ + ret = H5Pset_fapl_mpio(fapl_id, comm, info); + VRFY((ret >= 0), "H5Pset_fapl_mpio failed"); + /* Collective metadata writes */ + ret = H5Pset_coll_metadata_write(fapl_id, TRUE); + VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded"); + /* Collective metadata read API calling requirement */ + ret = H5Pset_all_coll_metadata_ops(fapl_id, TRUE); + VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded"); + fid = H5Fopen(filename, H5F_ACC_RDWR, fapl_id); + VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded"); + + /* verify settings for file access properties */ + + /* Collective metadata writes */ + ret = H5Pget_coll_metadata_write(fapl_id, &is_coll); + VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded"); + VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata writes"); + + /* Collective metadata read API calling requirement */ + ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll); + VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded"); + VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata API calls requirement"); + + /* close fapl and retrieve it from file */ + ret = H5Pclose(fapl_id); + VRFY((ret >= 0), "H5Pclose succeeded"); + fapl_id = H5I_INVALID_HID; + + fapl_id = H5Fget_access_plist(fid); + VRFY((fapl_id != H5I_INVALID_HID), "H5P_FILE_ACCESS"); + + /* verify settings for file access properties */ + + /* Collective metadata writes */ + ret = H5Pget_coll_metadata_write(fapl_id, &is_coll); + VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded"); + VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata writes"); + + /* Collective metadata read API calling requirement */ + ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll); + VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded"); + VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata API calls requirement"); + + /* close file */ + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + + /* Release file-access plist */ + ret = H5Pclose(fapl_id); + VRFY((ret >= 0), "H5Pclose succeeded"); + ret = H5Pclose(fapl_copy_id); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /* Free the MPI info object */ + mpi_ret = MPI_Info_free(&info); + VRFY((mpi_ret >= 0), "MPI_Info_free succeeded"); + +} /* end test_file_properties() */ + +void +test_delete(void) +{ + hid_t fid = H5I_INVALID_HID; /* HDF5 file ID */ + hid_t fapl_id = H5I_INVALID_HID; /* File access plist */ + const char *filename = NULL; + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + htri_t is_hdf5 = FAIL; /* Whether a file is an HDF5 file */ + herr_t ret; /* Generic return value */ + + filename = (const char *)PARATESTFILE /* GetTestParameters() */; + + /* set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* setup file access plist */ + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fapl_id != H5I_INVALID_HID), "H5Pcreate"); + ret = H5Pset_fapl_mpio(fapl_id, comm, info); + VRFY((SUCCEED == ret), "H5Pset_fapl_mpio"); + + /* create the file */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((fid != H5I_INVALID_HID), "H5Fcreate"); + + /* close the file */ + ret = H5Fclose(fid); + VRFY((SUCCEED == ret), "H5Fclose"); + + /* Verify that the file is an HDF5 file */ + is_hdf5 = H5Fis_accessible(filename, fapl_id); + VRFY((TRUE == is_hdf5), "H5Fis_accessible"); + + /* Delete the file */ + ret = H5Fdelete(filename, fapl_id); + VRFY((SUCCEED == ret), "H5Fdelete"); + + /* Verify that the file is NO LONGER an HDF5 file */ + /* This should fail since there is no file */ + H5E_BEGIN_TRY + { + is_hdf5 = H5Fis_accessible(filename, fapl_id); + } + H5E_END_TRY; + VRFY((is_hdf5 != SUCCEED), "H5Fis_accessible"); + + /* Release file-access plist */ + ret = H5Pclose(fapl_id); + VRFY((SUCCEED == ret), "H5Pclose"); + +} /* end test_delete() */ diff --git a/testpar/API/t_file_image.c b/testpar/API/t_file_image.c new file mode 100644 index 00000000000..4f4fa968e43 --- /dev/null +++ b/testpar/API/t_file_image.c @@ -0,0 +1,371 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Parallel tests for file image operations + */ + +#include "hdf5.h" +#include "testphdf5.h" + +/* file_image_daisy_chain_test + * + * Process zero: + * + * 1) Creates a core file with an integer vector data set of + * length n (= mpi_size), + * + * 2) Initializes the vector to zero in * location 0, and to -1 + * everywhere else. + * + * 3) Flushes the core file, and gets an image of it. Closes + * the core file. + * + * 4) Sends the image to process 1. + * + * 5) Awaits receipt on a file image from process n-1. + * + * 6) opens the image received from process n-1, verifies that + * it contains a vector of length equal to mpi_size, and + * that the vector contains (0, 1, 2, ... n-1) + * + * 7) closes the core file and exits. + * + * Process i (0 < i < n) + * + * 1) Await receipt of file image from process (i - 1). + * + * 2) Open the image with the core file driver, verify that i + * contains a vector v of length, and that v[j] = j for + * 0 <= j < i, and that v[j] == -1 for i <= j < n + * + * 3) Set v[i] = i in the core file. + * + * 4) Flush the core file and send it to process (i + 1) % n. + * + * 5) close the core file and exit. + * + * Test fails on a hang (if an image is not received), or on invalid data. + * + * JRM -- 11/28/11 + */ +void +file_image_daisy_chain_test(void) +{ + char file_name[1024] = "\0"; + int mpi_size, mpi_rank; + int mpi_result; + int i; + int space_ndims; + MPI_Status rcvstat; + int *vector_ptr = NULL; + hid_t fapl_id = -1; + hid_t file_id; /* file IDs */ + hid_t dset_id = -1; + hid_t dset_type_id = -1; + hid_t space_id = -1; + herr_t err; + hsize_t dims[1]; + void *image_ptr = NULL; + ssize_t bytes_read; + ssize_t image_len; + hbool_t vector_ok = TRUE; + htri_t tri_result; + + /* set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* setup file name */ + HDsnprintf(file_name, 1024, "file_image_daisy_chain_test_%05d.h5", (int)mpi_rank); + + if (mpi_rank == 0) { + + /* 1) Creates a core file with an integer vector data set + * of length mpi_size, + */ + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fapl_id >= 0), "creating fapl"); + + err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), FALSE); + VRFY((err >= 0), "setting core file driver in fapl."); + + file_id = H5Fcreate(file_name, 0, H5P_DEFAULT, fapl_id); + VRFY((file_id >= 0), "created core file"); + + dims[0] = (hsize_t)mpi_size; + space_id = H5Screate_simple(1, dims, dims); + VRFY((space_id >= 0), "created data space"); + + dset_id = H5Dcreate2(file_id, "v", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dset_id >= 0), "created data set"); + + /* 2) Initialize the vector to zero in location 0, and + * to -1 everywhere else. + */ + + vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int)); + VRFY((vector_ptr != NULL), "allocated in memory representation of vector"); + + vector_ptr[0] = 0; + for (i = 1; i < mpi_size; i++) + vector_ptr[i] = -1; + + err = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr); + VRFY((err >= 0), "wrote initial data to vector."); + + HDfree(vector_ptr); + vector_ptr = NULL; + + /* 3) Flush the core file, and get an image of it. Close + * the core file. + */ + err = H5Fflush(file_id, H5F_SCOPE_GLOBAL); + VRFY((err >= 0), "flushed core file."); + + image_len = H5Fget_file_image(file_id, NULL, (size_t)0); + VRFY((image_len > 0), "got image file size"); + + image_ptr = (void *)HDmalloc((size_t)image_len); + VRFY(image_ptr != NULL, "allocated file image buffer."); + + bytes_read = H5Fget_file_image(file_id, image_ptr, (size_t)image_len); + VRFY(bytes_read == image_len, "wrote file into image buffer"); + + err = H5Sclose(space_id); + VRFY((err >= 0), "closed data space."); + + err = H5Dclose(dset_id); + VRFY((err >= 0), "closed data set."); + + err = H5Fclose(file_id); + VRFY((err >= 0), "closed core file(1)."); + + err = H5Pclose(fapl_id); + VRFY((err >= 0), "closed fapl(1)."); + + /* 4) Send the image to process 1. */ + + mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE, 1, 0, MPI_COMM_WORLD); + VRFY((mpi_result == MPI_SUCCESS), "sent image size to process 1"); + + mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len, MPI_BYTE, 1, 0, MPI_COMM_WORLD); + VRFY((mpi_result == MPI_SUCCESS), "sent image to process 1"); + + HDfree(image_ptr); + image_ptr = NULL; + image_len = 0; + + /* 5) Await receipt on a file image from process n-1. */ + + mpi_result = MPI_Recv((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE, mpi_size - 1, 0, + MPI_COMM_WORLD, &rcvstat); + VRFY((mpi_result == MPI_SUCCESS), "received image len from process n-1"); + + image_ptr = (void *)HDmalloc((size_t)image_len); + VRFY(image_ptr != NULL, "allocated file image receive buffer."); + + mpi_result = + MPI_Recv((void *)image_ptr, (int)image_len, MPI_BYTE, mpi_size - 1, 0, MPI_COMM_WORLD, &rcvstat); + VRFY((mpi_result == MPI_SUCCESS), "received file image from process n-1"); + + /* 6) open the image received from process n-1, verify that + * it contains a vector of length equal to mpi_size, and + * that the vector contains (0, 1, 2, ... n-1). + */ + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fapl_id >= 0), "creating fapl"); + + err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), FALSE); + VRFY((err >= 0), "setting core file driver in fapl."); + + err = H5Pset_file_image(fapl_id, image_ptr, (size_t)image_len); + VRFY((err >= 0), "set file image in fapl."); + + file_id = H5Fopen(file_name, H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "opened received file image file"); + + dset_id = H5Dopen2(file_id, "v", H5P_DEFAULT); + VRFY((dset_id >= 0), "opened data set"); + + dset_type_id = H5Dget_type(dset_id); + VRFY((dset_type_id >= 0), "obtained data set type"); + + tri_result = H5Tequal(dset_type_id, H5T_NATIVE_INT); + VRFY((tri_result == TRUE), "verified data set type"); + + space_id = H5Dget_space(dset_id); + VRFY((space_id >= 0), "opened data space"); + + space_ndims = H5Sget_simple_extent_ndims(space_id); + VRFY((space_ndims == 1), "verified data space num dims(1)"); + + space_ndims = H5Sget_simple_extent_dims(space_id, dims, NULL); + VRFY((space_ndims == 1), "verified data space num dims(2)"); + VRFY((dims[0] == (hsize_t)mpi_size), "verified data space dims"); + + vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int)); + VRFY((vector_ptr != NULL), "allocated in memory rep of vector"); + + err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr); + VRFY((err >= 0), "read received vector."); + + vector_ok = TRUE; + for (i = 0; i < mpi_size; i++) + if (vector_ptr[i] != i) + vector_ok = FALSE; + VRFY((vector_ok), "verified received vector."); + + HDfree(vector_ptr); + vector_ptr = NULL; + + /* 7) closes the core file and exit. */ + + err = H5Sclose(space_id); + VRFY((err >= 0), "closed data space."); + + err = H5Dclose(dset_id); + VRFY((err >= 0), "closed data set."); + + err = H5Fclose(file_id); + VRFY((err >= 0), "closed core file(1)."); + + err = H5Pclose(fapl_id); + VRFY((err >= 0), "closed fapl(1)."); + + HDfree(image_ptr); + image_ptr = NULL; + image_len = 0; + } + else { + /* 1) Await receipt of file image from process (i - 1). */ + + mpi_result = MPI_Recv((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE, mpi_rank - 1, 0, + MPI_COMM_WORLD, &rcvstat); + VRFY((mpi_result == MPI_SUCCESS), "received image size from process mpi_rank-1"); + + image_ptr = (void *)HDmalloc((size_t)image_len); + VRFY(image_ptr != NULL, "allocated file image receive buffer."); + + mpi_result = + MPI_Recv((void *)image_ptr, (int)image_len, MPI_BYTE, mpi_rank - 1, 0, MPI_COMM_WORLD, &rcvstat); + VRFY((mpi_result == MPI_SUCCESS), "received file image from process mpi_rank-1"); + + /* 2) Open the image with the core file driver, verify that it + * contains a vector v of length, and that v[j] = j for + * 0 <= j < i, and that v[j] == -1 for i <= j < n + */ + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fapl_id >= 0), "creating fapl"); + + err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), FALSE); + VRFY((err >= 0), "setting core file driver in fapl."); + + err = H5Pset_file_image(fapl_id, image_ptr, (size_t)image_len); + VRFY((err >= 0), "set file image in fapl."); + + file_id = H5Fopen(file_name, H5F_ACC_RDWR, fapl_id); + H5Eprint2(H5P_DEFAULT, stderr); + VRFY((file_id >= 0), "opened received file image file"); + + dset_id = H5Dopen2(file_id, "v", H5P_DEFAULT); + VRFY((dset_id >= 0), "opened data set"); + + dset_type_id = H5Dget_type(dset_id); + VRFY((dset_type_id >= 0), "obtained data set type"); + + tri_result = H5Tequal(dset_type_id, H5T_NATIVE_INT); + VRFY((tri_result == TRUE), "verified data set type"); + + space_id = H5Dget_space(dset_id); + VRFY((space_id >= 0), "opened data space"); + + space_ndims = H5Sget_simple_extent_ndims(space_id); + VRFY((space_ndims == 1), "verified data space num dims(1)"); + + space_ndims = H5Sget_simple_extent_dims(space_id, dims, NULL); + VRFY((space_ndims == 1), "verified data space num dims(2)"); + VRFY((dims[0] == (hsize_t)mpi_size), "verified data space dims"); + + vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int)); + VRFY((vector_ptr != NULL), "allocated in memory rep of vector"); + + err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr); + VRFY((err >= 0), "read received vector."); + + vector_ok = TRUE; + for (i = 0; i < mpi_size; i++) { + if (i < mpi_rank) { + if (vector_ptr[i] != i) + vector_ok = FALSE; + } + else { + if (vector_ptr[i] != -1) + vector_ok = FALSE; + } + } + VRFY((vector_ok), "verified received vector."); + + /* 3) Set v[i] = i in the core file. */ + + vector_ptr[mpi_rank] = mpi_rank; + + err = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr); + VRFY((err >= 0), "wrote modified data to vector."); + + HDfree(vector_ptr); + vector_ptr = NULL; + + /* 4) Flush the core file and send it to process (mpi_rank + 1) % n. */ + + err = H5Fflush(file_id, H5F_SCOPE_GLOBAL); + VRFY((err >= 0), "flushed core file."); + + image_len = H5Fget_file_image(file_id, NULL, (size_t)0); + VRFY((image_len > 0), "got (possibly modified) image file len"); + + image_ptr = (void *)HDrealloc((void *)image_ptr, (size_t)image_len); + VRFY(image_ptr != NULL, "re-allocated file image buffer."); + + bytes_read = H5Fget_file_image(file_id, image_ptr, (size_t)image_len); + VRFY(bytes_read == image_len, "wrote file into image buffer"); + + mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE, + (mpi_rank + 1) % mpi_size, 0, MPI_COMM_WORLD); + VRFY((mpi_result == MPI_SUCCESS), "sent image size to process (mpi_rank + 1) % mpi_size"); + + mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len, MPI_BYTE, (mpi_rank + 1) % mpi_size, 0, + MPI_COMM_WORLD); + VRFY((mpi_result == MPI_SUCCESS), "sent image to process (mpi_rank + 1) % mpi_size"); + + HDfree(image_ptr); + image_ptr = NULL; + image_len = 0; + + /* 5) close the core file and exit. */ + + err = H5Sclose(space_id); + VRFY((err >= 0), "closed data space."); + + err = H5Dclose(dset_id); + VRFY((err >= 0), "closed data set."); + + err = H5Fclose(file_id); + VRFY((err >= 0), "closed core file(1)."); + + err = H5Pclose(fapl_id); + VRFY((err >= 0), "closed fapl(1)."); + } + + return; + +} /* file_image_daisy_chain_test() */ diff --git a/testpar/API/t_filter_read.c b/testpar/API/t_filter_read.c new file mode 100644 index 00000000000..f32c21b4392 --- /dev/null +++ b/testpar/API/t_filter_read.c @@ -0,0 +1,564 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * This verifies the correctness of parallel reading of a dataset that has been + * written serially using filters. + * + * Created by: Christian Chilan + * Date: 2007/05/15 + */ + +#include "hdf5.h" +#include "testphdf5.h" + +#ifdef H5_HAVE_SZLIB_H +#include "szlib.h" +#endif + +static int mpi_size, mpi_rank; + +/* Chunk sizes */ +#define CHUNK_DIM1 7 +#define CHUNK_DIM2 27 + +/* Sizes of the vertical hyperslabs. Total dataset size is + {HS_DIM1, HS_DIM2 * mpi_size } */ +#define HS_DIM1 200 +#define HS_DIM2 100 + +const char * +h5_rmprefix(const char *filename) +{ + const char *ret_ptr; + + if ((ret_ptr = HDstrstr(filename, ":")) == NULL) + ret_ptr = filename; + else + ret_ptr++; + + return (ret_ptr); +} + +#ifdef H5_HAVE_FILTER_SZIP + +/*------------------------------------------------------------------------- + * Function: h5_szip_can_encode + * + * Purpose: Retrieve the filter config flags for szip, tell if + * encoder is available. + * + * Return: 1: decode+encode is enabled + * 0: only decode is enabled + * -1: other + * + * Programmer: + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +int +h5_szip_can_encode(void) +{ + unsigned int filter_config_flags; + + H5Zget_filter_info(H5Z_FILTER_SZIP, &filter_config_flags); + if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) == 0) { + /* filter present but neither encode nor decode is supported (???) */ + return -1; + } + else if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) == + H5Z_FILTER_CONFIG_DECODE_ENABLED) { + /* decoder only: read but not write */ + return 0; + } + else if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) == + H5Z_FILTER_CONFIG_ENCODE_ENABLED) { + /* encoder only: write but not read (???) */ + return -1; + } + else if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) == + (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) { + return 1; + } + return (-1); +} +#endif /* H5_HAVE_FILTER_SZIP */ + +/*------------------------------------------------------------------------- + * Function: filter_read_internal + * + * Purpose: Tests parallel reading of a 2D dataset written serially using + * filters. During the parallel reading phase, the dataset is + * divided evenly among the processors in vertical hyperslabs. + * + * Programmer: Christian Chilan + * Tuesday, May 15, 2007 + * + *------------------------------------------------------------------------- + */ +static void +filter_read_internal(const char *filename, hid_t dcpl, hsize_t *dset_size) +{ + hid_t file, dataset; /* HDF5 IDs */ + hid_t access_plist; /* Access property list ID */ + hid_t sid, memspace; /* Dataspace IDs */ + hsize_t size[2]; /* Dataspace dimensions */ + hsize_t hs_offset[2]; /* Hyperslab offset */ + hsize_t hs_size[2]; /* Hyperslab size */ + size_t i, j; /* Local index variables */ + char name[32] = "dataset"; + herr_t hrc; /* Error status */ + int *points = NULL; /* Writing buffer for entire dataset */ + int *check = NULL; /* Reading buffer for selected hyperslab */ + + (void)dset_size; /* silence compiler */ + + /* set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* set sizes for dataset and hyperslabs */ + hs_size[0] = size[0] = HS_DIM1; + hs_size[1] = HS_DIM2; + + size[1] = hs_size[1] * (hsize_t)mpi_size; + + hs_offset[0] = 0; + hs_offset[1] = hs_size[1] * (hsize_t)mpi_rank; + + /* Create the data space */ + sid = H5Screate_simple(2, size, NULL); + VRFY(sid >= 0, "H5Screate_simple"); + + /* Create buffers */ + points = (int *)HDmalloc(size[0] * size[1] * sizeof(int)); + VRFY(points != NULL, "HDmalloc"); + + check = (int *)HDmalloc(hs_size[0] * hs_size[1] * sizeof(int)); + VRFY(check != NULL, "HDmalloc"); + + /* Initialize writing buffer with random data */ + for (i = 0; i < size[0]; i++) + for (j = 0; j < size[1]; j++) + points[i * size[1] + j] = (int)(i + j + 7); + + VRFY(H5Pall_filters_avail(dcpl), "Incorrect filter availability"); + + /* Serial write phase */ + if (MAINPROCESS) { + + file = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + VRFY(file >= 0, "H5Fcreate"); + + /* Create the dataset */ + dataset = H5Dcreate2(file, name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + VRFY(dataset >= 0, "H5Dcreate2"); + + hrc = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, points); + VRFY(hrc >= 0, "H5Dwrite"); +#if 0 + *dset_size = H5Dget_storage_size(dataset); + VRFY(*dset_size > 0, "H5Dget_storage_size"); +#endif + + hrc = H5Dclose(dataset); + VRFY(hrc >= 0, "H5Dclose"); + + hrc = H5Fclose(file); + VRFY(hrc >= 0, "H5Fclose"); + } + + MPI_Barrier(MPI_COMM_WORLD); + + /* Parallel read phase */ + /* Set up MPIO file access property lists */ + access_plist = H5Pcreate(H5P_FILE_ACCESS); + VRFY((access_plist >= 0), "H5Pcreate"); + + hrc = H5Pset_fapl_mpio(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL); + VRFY((hrc >= 0), "H5Pset_fapl_mpio"); + + /* Open the file */ + file = H5Fopen(filename, H5F_ACC_RDWR, access_plist); + VRFY((file >= 0), "H5Fopen"); + + dataset = H5Dopen2(file, name, H5P_DEFAULT); + VRFY((dataset >= 0), "H5Dopen2"); + + hrc = H5Sselect_hyperslab(sid, H5S_SELECT_SET, hs_offset, NULL, hs_size, NULL); + VRFY(hrc >= 0, "H5Sselect_hyperslab"); + + memspace = H5Screate_simple(2, hs_size, NULL); + VRFY(memspace >= 0, "H5Screate_simple"); + + hrc = H5Dread(dataset, H5T_NATIVE_INT, memspace, sid, H5P_DEFAULT, check); + VRFY(hrc >= 0, "H5Dread"); + + /* Check that the values read are the same as the values written */ + for (i = 0; i < hs_size[0]; i++) { + for (j = 0; j < hs_size[1]; j++) { + if (points[i * size[1] + (size_t)hs_offset[1] + j] != check[i * hs_size[1] + j]) { + HDfprintf(stderr, " Read different values than written.\n"); + HDfprintf(stderr, " At index %lu,%lu\n", (unsigned long)(i), + (unsigned long)(hs_offset[1] + j)); + HDfprintf(stderr, " At original: %d\n", + (int)points[i * size[1] + (size_t)hs_offset[1] + j]); + HDfprintf(stderr, " At returned: %d\n", (int)check[i * hs_size[1] + j]); + VRFY(FALSE, ""); + } + } + } +#if 0 + /* Get the storage size of the dataset */ + *dset_size = H5Dget_storage_size(dataset); + VRFY(*dset_size != 0, "H5Dget_storage_size"); +#endif + + /* Clean up objects used for this test */ + hrc = H5Dclose(dataset); + VRFY(hrc >= 0, "H5Dclose"); + + hrc = H5Sclose(sid); + VRFY(hrc >= 0, "H5Sclose"); + + hrc = H5Sclose(memspace); + VRFY(hrc >= 0, "H5Sclose"); + + hrc = H5Pclose(access_plist); + VRFY(hrc >= 0, "H5Pclose"); + + hrc = H5Fclose(file); + VRFY(hrc >= 0, "H5Fclose"); + + HDfree(points); + HDfree(check); + + MPI_Barrier(MPI_COMM_WORLD); +} + +/*------------------------------------------------------------------------- + * Function: test_filter_read + * + * Purpose: Tests parallel reading of datasets written serially using + * several (combinations of) filters. + * + * Programmer: Christian Chilan + * Tuesday, May 15, 2007 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +void +test_filter_read(void) +{ + hid_t dc; /* HDF5 IDs */ + const hsize_t chunk_size[2] = {CHUNK_DIM1, CHUNK_DIM2}; /* Chunk dimensions */ +#if 0 + hsize_t null_size; /* Size of dataset without filters */ +#endif + unsigned chunk_opts; /* Chunk options */ + unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */ + herr_t hrc; + const char *filename; +#ifdef H5_HAVE_FILTER_FLETCHER32 + hsize_t fletcher32_size; /* Size of dataset with Fletcher32 checksum */ +#endif + +#ifdef H5_HAVE_FILTER_DEFLATE + hsize_t deflate_size; /* Size of dataset with deflate filter */ +#endif /* H5_HAVE_FILTER_DEFLATE */ + +#ifdef H5_HAVE_FILTER_SZIP + hsize_t szip_size; /* Size of dataset with szip filter */ + unsigned szip_options_mask = H5_SZIP_NN_OPTION_MASK; + unsigned szip_pixels_per_block = 4; +#endif /* H5_HAVE_FILTER_SZIP */ + +#if 0 + hsize_t shuffle_size; /* Size of dataset with shuffle filter */ +#endif + +#if (defined H5_HAVE_FILTER_DEFLATE || defined H5_HAVE_FILTER_SZIP) + hsize_t combo_size; /* Size of dataset with multiple filters */ +#endif /* H5_HAVE_FILTER_DEFLATE || H5_HAVE_FILTER_SZIP */ + + filename = PARATESTFILE /* GetTestParameters() */; + + if (VERBOSE_MED) + HDprintf("Parallel reading of dataset written with filters %s\n", filename); + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FILTERS)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf( + " API functions for basic file, dataset or filter aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + + /*---------------------------------------------------------- + * STEP 0: Test without filters. + *---------------------------------------------------------- + */ + dc = H5Pcreate(H5P_DATASET_CREATE); + VRFY(dc >= 0, "H5Pcreate"); + + hrc = H5Pset_chunk(dc, 2, chunk_size); + VRFY(hrc >= 0, "H5Pset_chunk"); + + filter_read_internal(filename, dc, /* &null_size */ NULL); + + /* Clean up objects used for this test */ + hrc = H5Pclose(dc); + VRFY(hrc >= 0, "H5Pclose"); + + /* Run steps 1-3 both with and without filters disabled on partial chunks */ + for (disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1; + disable_partial_chunk_filters++) { + /* Set chunk options appropriately */ + dc = H5Pcreate(H5P_DATASET_CREATE); + VRFY(dc >= 0, "H5Pcreate"); + + hrc = H5Pset_chunk(dc, 2, chunk_size); + VRFY(hrc >= 0, "H5Pset_filter"); + + hrc = H5Pget_chunk_opts(dc, &chunk_opts); + VRFY(hrc >= 0, "H5Pget_chunk_opts"); + + if (disable_partial_chunk_filters) + chunk_opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS; + + hrc = H5Pclose(dc); + VRFY(hrc >= 0, "H5Pclose"); + + /*---------------------------------------------------------- + * STEP 1: Test Fletcher32 Checksum by itself. + *---------------------------------------------------------- + */ +#ifdef H5_HAVE_FILTER_FLETCHER32 + + dc = H5Pcreate(H5P_DATASET_CREATE); + VRFY(dc >= 0, "H5Pset_filter"); + + hrc = H5Pset_chunk(dc, 2, chunk_size); + VRFY(hrc >= 0, "H5Pset_filter"); + + hrc = H5Pset_chunk_opts(dc, chunk_opts); + VRFY(hrc >= 0, "H5Pset_chunk_opts"); + + hrc = H5Pset_filter(dc, H5Z_FILTER_FLETCHER32, 0, 0, NULL); + VRFY(hrc >= 0, "H5Pset_filter"); + + filter_read_internal(filename, dc, &fletcher32_size); + VRFY(fletcher32_size > null_size, "Size after checksumming is incorrect."); + + /* Clean up objects used for this test */ + hrc = H5Pclose(dc); + VRFY(hrc >= 0, "H5Pclose"); + +#endif /* H5_HAVE_FILTER_FLETCHER32 */ + + /*---------------------------------------------------------- + * STEP 2: Test deflation by itself. + *---------------------------------------------------------- + */ +#ifdef H5_HAVE_FILTER_DEFLATE + + dc = H5Pcreate(H5P_DATASET_CREATE); + VRFY(dc >= 0, "H5Pcreate"); + + hrc = H5Pset_chunk(dc, 2, chunk_size); + VRFY(hrc >= 0, "H5Pset_chunk"); + + hrc = H5Pset_chunk_opts(dc, chunk_opts); + VRFY(hrc >= 0, "H5Pset_chunk_opts"); + + hrc = H5Pset_deflate(dc, 6); + VRFY(hrc >= 0, "H5Pset_deflate"); + + filter_read_internal(filename, dc, &deflate_size); + + /* Clean up objects used for this test */ + hrc = H5Pclose(dc); + VRFY(hrc >= 0, "H5Pclose"); + +#endif /* H5_HAVE_FILTER_DEFLATE */ + + /*---------------------------------------------------------- + * STEP 3: Test szip compression by itself. + *---------------------------------------------------------- + */ +#ifdef H5_HAVE_FILTER_SZIP + if (h5_szip_can_encode() == 1) { + dc = H5Pcreate(H5P_DATASET_CREATE); + VRFY(dc >= 0, "H5Pcreate"); + + hrc = H5Pset_chunk(dc, 2, chunk_size); + VRFY(hrc >= 0, "H5Pset_chunk"); + + hrc = H5Pset_chunk_opts(dc, chunk_opts); + VRFY(hrc >= 0, "H5Pset_chunk_opts"); + + hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block); + VRFY(hrc >= 0, "H5Pset_szip"); + + filter_read_internal(filename, dc, &szip_size); + + /* Clean up objects used for this test */ + hrc = H5Pclose(dc); + VRFY(hrc >= 0, "H5Pclose"); + } +#endif /* H5_HAVE_FILTER_SZIP */ + } /* end for */ + + /*---------------------------------------------------------- + * STEP 4: Test shuffling by itself. + *---------------------------------------------------------- + */ + + dc = H5Pcreate(H5P_DATASET_CREATE); + VRFY(dc >= 0, "H5Pcreate"); + + hrc = H5Pset_chunk(dc, 2, chunk_size); + VRFY(hrc >= 0, "H5Pset_chunk"); + + hrc = H5Pset_shuffle(dc); + VRFY(hrc >= 0, "H5Pset_shuffle"); + + filter_read_internal(filename, dc, /* &shuffle_size */ NULL); +#if 0 + VRFY(shuffle_size == null_size, "Shuffled size not the same as uncompressed size."); +#endif + + /* Clean up objects used for this test */ + hrc = H5Pclose(dc); + VRFY(hrc >= 0, "H5Pclose"); + + /*---------------------------------------------------------- + * STEP 5: Test shuffle + deflate + checksum in any order. + *---------------------------------------------------------- + */ +#ifdef H5_HAVE_FILTER_DEFLATE + /* Testing shuffle+deflate+checksum filters (checksum first) */ + dc = H5Pcreate(H5P_DATASET_CREATE); + VRFY(dc >= 0, "H5Pcreate"); + + hrc = H5Pset_chunk(dc, 2, chunk_size); + VRFY(hrc >= 0, "H5Pset_chunk"); + + hrc = H5Pset_fletcher32(dc); + VRFY(hrc >= 0, "H5Pset_fletcher32"); + + hrc = H5Pset_shuffle(dc); + VRFY(hrc >= 0, "H5Pset_shuffle"); + + hrc = H5Pset_deflate(dc, 6); + VRFY(hrc >= 0, "H5Pset_deflate"); + + filter_read_internal(filename, dc, &combo_size); + + /* Clean up objects used for this test */ + hrc = H5Pclose(dc); + VRFY(hrc >= 0, "H5Pclose"); + + /* Testing shuffle+deflate+checksum filters (checksum last) */ + dc = H5Pcreate(H5P_DATASET_CREATE); + VRFY(dc >= 0, "H5Pcreate"); + + hrc = H5Pset_chunk(dc, 2, chunk_size); + VRFY(hrc >= 0, "H5Pset_chunk"); + + hrc = H5Pset_shuffle(dc); + VRFY(hrc >= 0, "H5Pset_shuffle"); + + hrc = H5Pset_deflate(dc, 6); + VRFY(hrc >= 0, "H5Pset_deflate"); + + hrc = H5Pset_fletcher32(dc); + VRFY(hrc >= 0, "H5Pset_fletcher32"); + + filter_read_internal(filename, dc, &combo_size); + + /* Clean up objects used for this test */ + hrc = H5Pclose(dc); + VRFY(hrc >= 0, "H5Pclose"); + +#endif /* H5_HAVE_FILTER_DEFLATE */ + + /*---------------------------------------------------------- + * STEP 6: Test shuffle + szip + checksum in any order. + *---------------------------------------------------------- + */ +#ifdef H5_HAVE_FILTER_SZIP + + /* Testing shuffle+szip(with encoder)+checksum filters(checksum first) */ + dc = H5Pcreate(H5P_DATASET_CREATE); + VRFY(dc >= 0, "H5Pcreate"); + + hrc = H5Pset_chunk(dc, 2, chunk_size); + VRFY(hrc >= 0, "H5Pset_chunk"); + + hrc = H5Pset_fletcher32(dc); + VRFY(hrc >= 0, "H5Pset_fletcher32"); + + hrc = H5Pset_shuffle(dc); + VRFY(hrc >= 0, "H5Pset_shuffle"); + + /* Make sure encoding is enabled */ + if (h5_szip_can_encode() == 1) { + hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block); + VRFY(hrc >= 0, "H5Pset_szip"); + + filter_read_internal(filename, dc, &combo_size); + } + + /* Clean up objects used for this test */ + hrc = H5Pclose(dc); + VRFY(hrc >= 0, "H5Pclose"); + + /* Testing shuffle+szip(with encoder)+checksum filters(checksum last) */ + /* Make sure encoding is enabled */ + if (h5_szip_can_encode() == 1) { + dc = H5Pcreate(H5P_DATASET_CREATE); + VRFY(dc >= 0, "H5Pcreate"); + + hrc = H5Pset_chunk(dc, 2, chunk_size); + VRFY(hrc >= 0, "H5Pset_chunk"); + + hrc = H5Pset_shuffle(dc); + VRFY(hrc >= 0, "H5Pset_shuffle"); + + hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block); + VRFY(hrc >= 0, "H5Pset_szip"); + + hrc = H5Pset_fletcher32(dc); + VRFY(hrc >= 0, "H5Pset_fletcher32"); + + filter_read_internal(filename, dc, &combo_size); + + /* Clean up objects used for this test */ + hrc = H5Pclose(dc); + VRFY(hrc >= 0, "H5Pclose"); + } + +#endif /* H5_HAVE_FILTER_SZIP */ +} diff --git a/testpar/API/t_mdset.c b/testpar/API/t_mdset.c new file mode 100644 index 00000000000..e11818f5cf2 --- /dev/null +++ b/testpar/API/t_mdset.c @@ -0,0 +1,2814 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#include "hdf5.h" +#include "testphdf5.h" + +#if 0 +#include "H5Dprivate.h" +#include "H5private.h" +#endif + +#define DIM 2 +#define SIZE 32 +#define NDATASET 4 +#define GROUP_DEPTH 32 +enum obj_type { is_group, is_dset }; + +static int get_size(void); +static void write_dataset(hid_t, hid_t, hid_t); +static int read_dataset(hid_t, hid_t, hid_t); +static void create_group_recursive(hid_t, hid_t, hid_t, int); +static void recursive_read_group(hid_t, hid_t, hid_t, int); +static void group_dataset_read(hid_t fid, int mpi_rank, int m); +static void write_attribute(hid_t, int, int); +static int read_attribute(hid_t, int, int); +static int check_value(DATATYPE *, DATATYPE *, int); +static void get_slab(hsize_t[], hsize_t[], hsize_t[], hsize_t[], int); + +/* + * The size value computed by this function is used extensively in + * configuring tests for the current number of processes. + * + * This function was created as part of an effort to allow the + * test functions in this file to run on an arbitrary number of + * processors. + * JRM - 8/11/04 + */ + +static int +get_size(void) +{ + int mpi_rank; + int mpi_size; + int size = SIZE; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* needed for VRFY */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + if (mpi_size > size) { + if ((mpi_size % 2) == 0) { + size = mpi_size; + } + else { + size = mpi_size + 1; + } + } + + VRFY((mpi_size <= size), "mpi_size <= size"); + VRFY(((size % 2) == 0), "size isn't even"); + + return (size); + +} /* get_size() */ + +/* + * Example of using PHDF5 to create a zero sized dataset. + * + */ +void +zero_dim_dset(void) +{ + int mpi_size, mpi_rank; + const char *filename; + hid_t fid, plist, dcpl, dsid, sid; + hsize_t dim, chunk_dim; + herr_t ret; + int data[1]; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + + filename = PARATESTFILE /* GetTestParameters() */; + + plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + VRFY((plist >= 0), "create_faccess_plist succeeded"); + + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); + VRFY((fid >= 0), "H5Fcreate succeeded"); + ret = H5Pclose(plist); + VRFY((ret >= 0), "H5Pclose succeeded"); + + dcpl = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl >= 0), "failed H5Pcreate"); + + /* Set 1 chunk size */ + chunk_dim = 1; + ret = H5Pset_chunk(dcpl, 1, &chunk_dim); + VRFY((ret >= 0), "failed H5Pset_chunk"); + + /* Create 1D dataspace with 0 dim size */ + dim = 0; + sid = H5Screate_simple(1, &dim, NULL); + VRFY((sid >= 0), "failed H5Screate_simple"); + + /* Create chunked dataset */ + dsid = H5Dcreate2(fid, "dset", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + VRFY((dsid >= 0), "failed H5Dcreate2"); + + /* write 0 elements from dataset */ + ret = H5Dwrite(dsid, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, data); + VRFY((ret >= 0), "failed H5Dwrite"); + + /* Read 0 elements from dataset */ + ret = H5Dread(dsid, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, data); + VRFY((ret >= 0), "failed H5Dread"); + + H5Pclose(dcpl); + H5Dclose(dsid); + H5Sclose(sid); + H5Fclose(fid); +} + +/* + * Example of using PHDF5 to create ndatasets datasets. Each process write + * a slab of array to the file. + */ +void +multiple_dset_write(void) +{ + int i, j, n, mpi_size, mpi_rank, size; + hid_t iof, plist, dataset, memspace, filespace; + hid_t dcpl; /* Dataset creation property list */ + hsize_t chunk_origin[DIM]; + hsize_t chunk_dims[DIM], file_dims[DIM]; + hsize_t count[DIM] = {1, 1}; + double *outme = NULL; + double fill = 1.0; /* Fill value */ + char dname[100]; + herr_t ret; +#if 0 + const H5Ptest_param_t *pt; +#endif + char *filename; + int ndatasets; + +#if 0 + pt = GetTestParameters(); +#endif + /* filename = pt->name; */ filename = PARATESTFILE; + /* ndatasets = pt->count; */ ndatasets = NDATASETS; + + size = get_size(); + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + + outme = HDmalloc((size_t)size * (size_t)size * sizeof(double)); + VRFY((outme != NULL), "HDmalloc succeeded for outme"); + + plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + VRFY((plist >= 0), "create_faccess_plist succeeded"); + iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); + VRFY((iof >= 0), "H5Fcreate succeeded"); + ret = H5Pclose(plist); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /* decide the hyperslab according to process number. */ + get_slab(chunk_origin, chunk_dims, count, file_dims, size); + + memspace = H5Screate_simple(DIM, chunk_dims, NULL); + filespace = H5Screate_simple(DIM, file_dims, NULL); + ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); + VRFY((ret >= 0), "mdata hyperslab selection"); + + /* Create a dataset creation property list */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl >= 0), "dataset creation property list succeeded"); + + ret = H5Pset_fill_value(dcpl, H5T_NATIVE_DOUBLE, &fill); + VRFY((ret >= 0), "set fill-value succeeded"); + + for (n = 0; n < ndatasets; n++) { + HDsnprintf(dname, sizeof(dname), "dataset %d", n); + dataset = H5Dcreate2(iof, dname, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT); + VRFY((dataset > 0), dname); + + /* calculate data to write */ + for (i = 0; i < size; i++) + for (j = 0; j < size; j++) + outme[(i * size) + j] = n * 1000 + mpi_rank; + + H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memspace, filespace, H5P_DEFAULT, outme); + + H5Dclose(dataset); +#ifdef BARRIER_CHECKS + if (!((n + 1) % 10)) { + HDprintf("created %d datasets\n", n + 1); + MPI_Barrier(MPI_COMM_WORLD); + } +#endif /* BARRIER_CHECKS */ + } + + H5Sclose(filespace); + H5Sclose(memspace); + H5Pclose(dcpl); + H5Fclose(iof); + + HDfree(outme); +} + +/* Example of using PHDF5 to create, write, and read compact dataset. + */ +void +compact_dataset(void) +{ + int i, j, mpi_size, mpi_rank, size, err_num = 0; + hid_t iof, plist, dcpl, dxpl, dataset, filespace; + hsize_t file_dims[DIM]; + double *outme; + double *inme; + char dname[] = "dataset"; + herr_t ret; + const char *filename; +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + hbool_t prop_value; +#endif + + size = get_size(); + + for (i = 0; i < DIM; i++) + file_dims[i] = (hsize_t)size; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + + outme = HDmalloc((size_t)((size_t)size * (size_t)size * sizeof(double))); + VRFY((outme != NULL), "HDmalloc succeeded for outme"); + + inme = HDmalloc((size_t)size * (size_t)size * sizeof(double)); + VRFY((outme != NULL), "HDmalloc succeeded for inme"); + + filename = PARATESTFILE /* GetTestParameters() */; + VRFY((mpi_size <= size), "mpi_size <= size"); + + plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); + + /* Define data space */ + filespace = H5Screate_simple(DIM, file_dims, NULL); + + /* Create a compact dataset */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl >= 0), "dataset creation property list succeeded"); + ret = H5Pset_layout(dcpl, H5D_COMPACT); + VRFY((dcpl >= 0), "set property list for compact dataset"); + ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY); + VRFY((ret >= 0), "set space allocation time for compact dataset"); + + dataset = H5Dcreate2(iof, dname, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT); + VRFY((dataset >= 0), "H5Dcreate2 succeeded"); + + /* set up the collective transfer properties list */ + dxpl = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl >= 0), ""); + ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* Recalculate data to write. Each process writes the same data. */ + for (i = 0; i < size; i++) + for (j = 0; j < size; j++) + outme[(i * size) + j] = (i + j) * 1000; + + ret = H5Dwrite(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, dxpl, outme); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + H5Pclose(dcpl); + H5Pclose(plist); + H5Dclose(dataset); + H5Sclose(filespace); + H5Fclose(iof); + + /* Open the file and dataset, read and compare the data. */ + plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + iof = H5Fopen(filename, H5F_ACC_RDONLY, plist); + VRFY((iof >= 0), "H5Fopen succeeded"); + + /* set up the collective transfer properties list */ + dxpl = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl >= 0), ""); + ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + dataset = H5Dopen2(iof, dname, H5P_DEFAULT); + VRFY((dataset >= 0), "H5Dopen2 succeeded"); + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF; + ret = H5Pinsert2(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value, NULL, + NULL, NULL, NULL, NULL, NULL); + VRFY((ret >= 0), "H5Pinsert2() succeeded"); +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + ret = H5Dread(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, dxpl, inme); + VRFY((ret >= 0), "H5Dread succeeded"); + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + prop_value = FALSE; + ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value); + VRFY((ret >= 0), "H5Pget succeeded"); + VRFY((prop_value == FALSE && dxfer_coll_type == DXFER_COLLECTIVE_IO), + "rank 0 Bcast optimization was performed for a compact dataset"); +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + /* Verify data value */ + for (i = 0; i < size; i++) + for (j = 0; j < size; j++) + if (!H5_DBL_ABS_EQUAL(inme[(i * size) + j], outme[(i * size) + j])) + if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED) + HDprintf("Dataset Verify failed at [%d][%d]: expect %f, got %f\n", i, j, + outme[(i * size) + j], inme[(i * size) + j]); + + H5Pclose(plist); + H5Pclose(dxpl); + H5Dclose(dataset); + H5Fclose(iof); + HDfree(inme); + HDfree(outme); +} + +/* + * Example of using PHDF5 to create, write, and read dataset and attribute + * of Null dataspace. + */ +void +null_dataset(void) +{ + int mpi_size, mpi_rank; + hid_t iof, plist, dxpl, dataset, attr, sid; + unsigned uval = 2; /* Buffer for writing to dataset */ + int val = 1; /* Buffer for writing to attribute */ + hssize_t nelem; + char dname[] = "dataset"; + char attr_name[] = "attribute"; + herr_t ret; + const char *filename; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset, or attribute aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + filename = PARATESTFILE /* GetTestParameters() */; + + plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); + + /* Define data space */ + sid = H5Screate(H5S_NULL); + + /* Check that the null dataspace actually has 0 elements */ + nelem = H5Sget_simple_extent_npoints(sid); + VRFY((nelem == 0), "H5Sget_simple_extent_npoints"); + + /* Create a compact dataset */ + dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset >= 0), "H5Dcreate2 succeeded"); + + /* set up the collective transfer properties list */ + dxpl = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl >= 0), ""); + ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* Write "nothing" to the dataset(with type conversion) */ + ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, &uval); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* Create an attribute for the group */ + attr = H5Acreate2(dataset, attr_name, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); + VRFY((attr >= 0), "H5Acreate2"); + + /* Write "nothing" to the attribute(with type conversion) */ + ret = H5Awrite(attr, H5T_NATIVE_INT, &val); + VRFY((ret >= 0), "H5Awrite"); + + H5Aclose(attr); + H5Dclose(dataset); + H5Pclose(plist); + H5Sclose(sid); + H5Fclose(iof); + + /* Open the file and dataset, read and compare the data. */ + plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + iof = H5Fopen(filename, H5F_ACC_RDONLY, plist); + VRFY((iof >= 0), "H5Fopen succeeded"); + + /* set up the collective transfer properties list */ + dxpl = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl >= 0), ""); + ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + dataset = H5Dopen2(iof, dname, H5P_DEFAULT); + VRFY((dataset >= 0), "H5Dopen2 succeeded"); + + /* Try reading from the dataset(make certain our buffer is unmodified) */ + ret = H5Dread(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, dxpl, &uval); + VRFY((ret >= 0), "H5Dread"); + VRFY((uval == 2), "H5Dread"); + + /* Open the attribute for the dataset */ + attr = H5Aopen(dataset, attr_name, H5P_DEFAULT); + VRFY((attr >= 0), "H5Aopen"); + + /* Try reading from the attribute(make certain our buffer is unmodified) */ ret = + H5Aread(attr, H5T_NATIVE_INT, &val); + VRFY((ret >= 0), "H5Aread"); + VRFY((val == 1), "H5Aread"); + + H5Pclose(plist); + H5Pclose(dxpl); + H5Aclose(attr); + H5Dclose(dataset); + H5Fclose(iof); +} + +/* Example of using PHDF5 to create "large" datasets. (>2GB, >4GB, >8GB) + * Actual data is _not_ written to these datasets. Dataspaces are exact + * sizes(2GB, 4GB, etc.), but the metadata for the file pushes the file over + * the boundary of interest. + */ +void +big_dataset(void) +{ + int mpi_size, mpi_rank; /* MPI info */ + hid_t iof, /* File ID */ + fapl, /* File access property list ID */ + dataset, /* Dataset ID */ + filespace; /* Dataset's dataspace ID */ + hsize_t file_dims[4]; /* Dimensions of dataspace */ + char dname[] = "dataset"; /* Name of dataset */ +#if 0 + MPI_Offset file_size; /* Size of file on disk */ +#endif + herr_t ret; /* Generic return value */ + const char *filename; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + + /* Verify MPI_Offset can handle larger than 2GB sizes */ + VRFY((sizeof(MPI_Offset) > 4), "sizeof(MPI_Offset)>4"); + + filename = PARATESTFILE /* GetTestParameters() */; + + fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + VRFY((fapl >= 0), "create_faccess_plist succeeded"); + + /* + * Create >2GB HDF5 file + */ + iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + VRFY((iof >= 0), "H5Fcreate succeeded"); + + /* Define dataspace for 2GB dataspace */ + file_dims[0] = 2; + file_dims[1] = 1024; + file_dims[2] = 1024; + file_dims[3] = 1024; + filespace = H5Screate_simple(4, file_dims, NULL); + VRFY((filespace >= 0), "H5Screate_simple succeeded"); + + dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset >= 0), "H5Dcreate2 succeeded"); + + /* Close all file objects */ + ret = H5Dclose(dataset); + VRFY((ret >= 0), "H5Dclose succeeded"); + ret = H5Sclose(filespace); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret = H5Fclose(iof); + VRFY((ret >= 0), "H5Fclose succeeded"); + +#if 0 + /* Check that file of the correct size was created */ + file_size = h5_get_file_size(filename, fapl); + VRFY((file_size == 2147485696ULL), "File is correct size(~2GB)"); +#endif + + /* + * Create >4GB HDF5 file + */ + iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + VRFY((iof >= 0), "H5Fcreate succeeded"); + + /* Define dataspace for 4GB dataspace */ + file_dims[0] = 4; + file_dims[1] = 1024; + file_dims[2] = 1024; + file_dims[3] = 1024; + filespace = H5Screate_simple(4, file_dims, NULL); + VRFY((filespace >= 0), "H5Screate_simple succeeded"); + + dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset >= 0), "H5Dcreate2 succeeded"); + + /* Close all file objects */ + ret = H5Dclose(dataset); + VRFY((ret >= 0), "H5Dclose succeeded"); + ret = H5Sclose(filespace); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret = H5Fclose(iof); + VRFY((ret >= 0), "H5Fclose succeeded"); +#if 0 + /* Check that file of the correct size was created */ + file_size = h5_get_file_size(filename, fapl); + VRFY((file_size == 4294969344ULL), "File is correct size(~4GB)"); +#endif + + /* + * Create >8GB HDF5 file + */ + iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + VRFY((iof >= 0), "H5Fcreate succeeded"); + + /* Define dataspace for 8GB dataspace */ + file_dims[0] = 8; + file_dims[1] = 1024; + file_dims[2] = 1024; + file_dims[3] = 1024; + filespace = H5Screate_simple(4, file_dims, NULL); + VRFY((filespace >= 0), "H5Screate_simple succeeded"); + + dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset >= 0), "H5Dcreate2 succeeded"); + + /* Close all file objects */ + ret = H5Dclose(dataset); + VRFY((ret >= 0), "H5Dclose succeeded"); + ret = H5Sclose(filespace); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret = H5Fclose(iof); + VRFY((ret >= 0), "H5Fclose succeeded"); +#if 0 + /* Check that file of the correct size was created */ + file_size = h5_get_file_size(filename, fapl); + VRFY((file_size == 8589936640ULL), "File is correct size(~8GB)"); +#endif + + /* Close fapl */ + ret = H5Pclose(fapl); + VRFY((ret >= 0), "H5Pclose succeeded"); +} + +/* Example of using PHDF5 to read a partial written dataset. The dataset does + * not have actual data written to the entire raw data area and relies on the + * default fill value of zeros to work correctly. + */ +void +dataset_fillvalue(void) +{ + int mpi_size, mpi_rank; /* MPI info */ + int err_num; /* Number of errors */ + hid_t iof, /* File ID */ + fapl, /* File access property list ID */ + dxpl, /* Data transfer property list ID */ + dataset, /* Dataset ID */ + memspace, /* Memory dataspace ID */ + filespace; /* Dataset's dataspace ID */ + char dname[] = "dataset"; /* Name of dataset */ + hsize_t dset_dims[4] = {0, 6, 7, 8}; + hsize_t req_start[4] = {0, 0, 0, 0}; + hsize_t req_count[4] = {1, 6, 7, 8}; + hsize_t dset_size; /* Dataset size */ + int *rdata, *wdata; /* Buffers for data to read and write */ + int *twdata, *trdata; /* Temporary pointer into buffer */ + int acc, i, ii, j, k, l; /* Local index variables */ + herr_t ret; /* Generic return value */ + const char *filename; +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + hbool_t prop_value; +#endif + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + + filename = PARATESTFILE /* GetTestParameters() */; + + /* Set the dataset dimension to be one row more than number of processes */ + /* and calculate the actual dataset size. */ + dset_dims[0] = (hsize_t)(mpi_size + 1); + dset_size = dset_dims[0] * dset_dims[1] * dset_dims[2] * dset_dims[3]; + + /* Allocate space for the buffers */ + rdata = HDmalloc((size_t)(dset_size * sizeof(int))); + VRFY((rdata != NULL), "HDcalloc succeeded for read buffer"); + wdata = HDmalloc((size_t)(dset_size * sizeof(int))); + VRFY((wdata != NULL), "HDmalloc succeeded for write buffer"); + + fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + VRFY((fapl >= 0), "create_faccess_plist succeeded"); + + /* + * Create HDF5 file + */ + iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + VRFY((iof >= 0), "H5Fcreate succeeded"); + + filespace = H5Screate_simple(4, dset_dims, NULL); + VRFY((filespace >= 0), "File H5Screate_simple succeeded"); + + dataset = H5Dcreate2(iof, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset >= 0), "H5Dcreate2 succeeded"); + + memspace = H5Screate_simple(4, dset_dims, NULL); + VRFY((memspace >= 0), "Memory H5Screate_simple succeeded"); + + /* + * Read dataset before any data is written. + */ + + /* Create DXPL for I/O */ + dxpl = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl >= 0), "H5Pcreate succeeded"); + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF; + ret = H5Pinsert2(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value, NULL, + NULL, NULL, NULL, NULL, NULL); + VRFY((ret >= 0), "testing property list inserted succeeded"); +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + for (ii = 0; ii < 2; ii++) { + + if (ii == 0) + ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT); + else + ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + + /* set entire read buffer with the constant 2 */ + HDmemset(rdata, 2, (size_t)(dset_size * sizeof(int))); + + /* Read the entire dataset back */ + ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, rdata); + VRFY((ret >= 0), "H5Dread succeeded"); + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + prop_value = FALSE; + ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value); + VRFY((ret >= 0), "testing property list get succeeded"); + if (ii == 0) + VRFY((prop_value == FALSE), "correctly handled rank 0 Bcast"); + else + VRFY((prop_value == TRUE), "correctly handled rank 0 Bcast"); +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + /* Verify all data read are the fill value 0 */ + trdata = rdata; + err_num = 0; + for (i = 0; i < (int)dset_dims[0]; i++) + for (j = 0; j < (int)dset_dims[1]; j++) + for (k = 0; k < (int)dset_dims[2]; k++) + for (l = 0; l < (int)dset_dims[3]; l++, trdata++) + if (*trdata != 0) + if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED) + HDprintf( + "Rank %d: Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", + mpi_rank, i, j, k, l, *trdata); + if (err_num > MAX_ERR_REPORT && !VERBOSE_MED) + HDprintf("Rank %d: [more errors ...]\n", mpi_rank); + if (err_num) { + HDprintf("Rank %d: %d errors found in check_value\n", mpi_rank, err_num); + nerrors++; + } + } + + /* Barrier to ensure all processes have completed the above test. */ + MPI_Barrier(MPI_COMM_WORLD); + + /* + * Each process writes 1 row of data. Thus last row is not written. + */ + /* Create hyperslabs in memory and file dataspaces */ + req_start[0] = (hsize_t)mpi_rank; + ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, req_start, NULL, req_count, NULL); + VRFY((ret >= 0), "H5Sselect_hyperslab succeeded on memory dataspace"); + ret = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, req_start, NULL, req_count, NULL); + VRFY((ret >= 0), "H5Sselect_hyperslab succeeded on memory dataspace"); + + ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* Fill write buffer with some values */ + twdata = wdata; + for (i = 0, acc = 0; i < (int)dset_dims[0]; i++) + for (j = 0; j < (int)dset_dims[1]; j++) + for (k = 0; k < (int)dset_dims[2]; k++) + for (l = 0; l < (int)dset_dims[3]; l++) + *twdata++ = acc++; + + /* Collectively write a hyperslab of data to the dataset */ + ret = H5Dwrite(dataset, H5T_NATIVE_INT, memspace, filespace, dxpl, wdata); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* Barrier here, to allow processes to sync */ + MPI_Barrier(MPI_COMM_WORLD); + + /* + * Read dataset after partial write. + */ + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF; + ret = H5Pset(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value); + VRFY((ret >= 0), " H5Pset succeeded"); +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + for (ii = 0; ii < 2; ii++) { + + if (ii == 0) + ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT); + else + ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + + /* set entire read buffer with the constant 2 */ + HDmemset(rdata, 2, (size_t)(dset_size * sizeof(int))); + + /* Read the entire dataset back */ + ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, rdata); + VRFY((ret >= 0), "H5Dread succeeded"); + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + prop_value = FALSE; + ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value); + VRFY((ret >= 0), "testing property list get succeeded"); + if (ii == 0) + VRFY((prop_value == FALSE), "correctly handled rank 0 Bcast"); + else + VRFY((prop_value == TRUE), "correctly handled rank 0 Bcast"); +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + /* Verify correct data read */ + twdata = wdata; + trdata = rdata; + err_num = 0; + for (i = 0; i < (int)dset_dims[0]; i++) + for (j = 0; j < (int)dset_dims[1]; j++) + for (k = 0; k < (int)dset_dims[2]; k++) + for (l = 0; l < (int)dset_dims[3]; l++, twdata++, trdata++) + if (i < mpi_size) { + if (*twdata != *trdata) + if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED) + HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect %d, got %d\n", + i, j, k, l, *twdata, *trdata); + } /* end if */ + else { + if (*trdata != 0) + if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED) + HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", + i, j, k, l, *trdata); + } /* end else */ + if (err_num > MAX_ERR_REPORT && !VERBOSE_MED) + HDprintf("[more errors ...]\n"); + if (err_num) { + HDprintf("%d errors found in check_value\n", err_num); + nerrors++; + } + } + + /* Close all file objects */ + ret = H5Dclose(dataset); + VRFY((ret >= 0), "H5Dclose succeeded"); + ret = H5Sclose(filespace); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret = H5Fclose(iof); + VRFY((ret >= 0), "H5Fclose succeeded"); + + /* Close memory dataspace */ + ret = H5Sclose(memspace); + VRFY((ret >= 0), "H5Sclose succeeded"); + + /* Close dxpl */ + ret = H5Pclose(dxpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /* Close fapl */ + ret = H5Pclose(fapl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /* free the buffers */ + HDfree(rdata); + HDfree(wdata); +} + +/* combined cngrpw and ingrpr tests because ingrpr reads file created by cngrpw. */ +void +collective_group_write_independent_group_read(void) +{ + collective_group_write(); + independent_group_read(); +} + +/* Write multiple groups with a chunked dataset in each group collectively. + * These groups and datasets are for testing independent read later. + */ +void +collective_group_write(void) +{ + int mpi_rank, mpi_size, size; + int i, j, m; + char gname[64], dname[32]; + hid_t fid, gid, did, plist, dcpl, memspace, filespace; + DATATYPE *outme = NULL; + hsize_t chunk_origin[DIM]; + hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM]; + hsize_t chunk_size[2]; /* Chunk dimensions - computed shortly */ + herr_t ret1, ret2; +#if 0 + const H5Ptest_param_t *pt; +#endif + char *filename; + int ngroups; + +#if 0 + pt = GetTestParameters(); +#endif + /* filename = pt->name; */ filename = PARATESTFILE; + /* ngroups = pt->count; */ ngroups = NGROUPS; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + + size = get_size(); + + chunk_size[0] = (hsize_t)(size / 2); + chunk_size[1] = (hsize_t)(size / 2); + + outme = HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE)); + VRFY((outme != NULL), "HDmalloc succeeded for outme"); + + plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); + VRFY((fid >= 0), "H5Fcreate"); + H5Pclose(plist); + + /* decide the hyperslab according to process number. */ + get_slab(chunk_origin, chunk_dims, count, file_dims, size); + + /* select hyperslab in memory and file spaces. These two operations are + * identical since the datasets are the same. */ + memspace = H5Screate_simple(DIM, file_dims, NULL); + ret1 = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); + filespace = H5Screate_simple(DIM, file_dims, NULL); + ret2 = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); + VRFY((memspace >= 0), "memspace"); + VRFY((filespace >= 0), "filespace"); + VRFY((ret1 == 0), "mgroup memspace selection"); + VRFY((ret2 == 0), "mgroup filespace selection"); + + dcpl = H5Pcreate(H5P_DATASET_CREATE); + ret1 = H5Pset_chunk(dcpl, 2, chunk_size); + VRFY((dcpl >= 0), "dataset creation property"); + VRFY((ret1 == 0), "set chunk for dataset creation property"); + + /* creates ngroups groups under the root group, writes chunked + * datasets in parallel. */ + for (m = 0; m < ngroups; m++) { + HDsnprintf(gname, sizeof(gname), "group%d", m); + gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((gid > 0), gname); + + HDsnprintf(dname, sizeof(dname), "dataset%d", m); + did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT); + VRFY((did > 0), dname); + + for (i = 0; i < size; i++) + for (j = 0; j < size; j++) + outme[(i * size) + j] = (i + j) * 1000 + mpi_rank; + + ret1 = H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme); + VRFY((ret1 == 0), "H5Dwrite"); + + ret1 = H5Dclose(did); + VRFY((ret1 == 0), "H5Dclose"); + + ret1 = H5Gclose(gid); + VRFY((ret1 == 0), "H5Gclose"); + +#ifdef BARRIER_CHECKS + if (!((m + 1) % 10)) { + HDprintf("created %d groups\n", m + 1); + MPI_Barrier(MPI_COMM_WORLD); + } +#endif /* BARRIER_CHECKS */ + } + + H5Pclose(dcpl); + H5Sclose(filespace); + H5Sclose(memspace); + + ret1 = H5Fclose(fid); + VRFY((ret1 == 0), "H5Fclose"); + + HDfree(outme); +} + +/* Let two sets of processes open and read different groups and chunked + * datasets independently. + */ +void +independent_group_read(void) +{ + int mpi_rank, m; + hid_t plist, fid; +#if 0 + const H5Ptest_param_t *pt; +#endif + char *filename; + int ngroups; + herr_t ret; + +#if 0 + pt = GetTestParameters(); +#endif + /* filename = pt->name; */ filename = PARATESTFILE; + /* ngroups = pt->count; */ ngroups = NGROUPS; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + H5Pset_all_coll_metadata_ops(plist, FALSE); + + fid = H5Fopen(filename, H5F_ACC_RDONLY, plist); + VRFY((fid > 0), "H5Fopen"); + H5Pclose(plist); + + /* open groups and read datasets. Odd number processes read even number + * groups from the end; even number processes read odd number groups + * from the beginning. */ + if (mpi_rank % 2 == 0) { + for (m = ngroups - 1; m == 0; m -= 2) + group_dataset_read(fid, mpi_rank, m); + } + else { + for (m = 0; m < ngroups; m += 2) + group_dataset_read(fid, mpi_rank, m); + } + + ret = H5Fclose(fid); + VRFY((ret == 0), "H5Fclose"); +} + +/* Open and read datasets and compare data + */ +static void +group_dataset_read(hid_t fid, int mpi_rank, int m) +{ + int ret, i, j, size; + char gname[64], dname[32]; + hid_t gid, did; + DATATYPE *outdata = NULL; + DATATYPE *indata = NULL; + + size = get_size(); + + indata = (DATATYPE *)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE)); + VRFY((indata != NULL), "HDmalloc succeeded for indata"); + + outdata = (DATATYPE *)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE)); + VRFY((outdata != NULL), "HDmalloc succeeded for outdata"); + + /* open every group under root group. */ + HDsnprintf(gname, sizeof(gname), "group%d", m); + gid = H5Gopen2(fid, gname, H5P_DEFAULT); + VRFY((gid > 0), gname); + + /* check the data. */ + HDsnprintf(dname, sizeof(dname), "dataset%d", m); + did = H5Dopen2(gid, dname, H5P_DEFAULT); + VRFY((did > 0), dname); + + H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, indata); + + /* this is the original value */ + for (i = 0; i < size; i++) + for (j = 0; j < size; j++) + outdata[(i * size) + j] = (i + j) * 1000 + mpi_rank; + + /* compare the original value(outdata) to the value in file(indata).*/ + ret = check_value(indata, outdata, size); + VRFY((ret == 0), "check the data"); + + ret = H5Dclose(did); + VRFY((ret == 0), "H5Dclose"); + ret = H5Gclose(gid); + VRFY((ret == 0), "H5Gclose"); + + HDfree(indata); + HDfree(outdata); +} + +/* + * Example of using PHDF5 to create multiple groups. Under the root group, + * it creates ngroups groups. Under the first group just created, it creates + * recursive subgroups of depth GROUP_DEPTH. In each created group, it + * generates NDATASETS datasets. Each process write a hyperslab of an array + * into the file. The structure is like + * + * root group + * | + * ---------------------------- ... ... ------------------------ + * | | | ... ... | | + * group0*+' group1*+' group2*+' ... ... group ngroups*+' + * | + * 1st_child_group*' + * | + * 2nd_child_group*' + * | + * : + * : + * | + * GROUP_DEPTHth_child_group*' + * + * * means the group has dataset(s). + * + means the group has attribute(s). + * ' means the datasets in the groups have attribute(s). + * + */ +void +multiple_group_write(void) +{ + int mpi_rank, mpi_size, size; + int m; + char gname[64]; + hid_t fid, gid, plist, memspace, filespace; + hsize_t chunk_origin[DIM]; + hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM]; + herr_t ret; +#if 0 + const H5Ptest_param_t *pt; +#endif + char *filename; + int ngroups; + +#if 0 + pt = GetTestParameters(); +#endif + /* filename = pt->name; */ filename = PARATESTFILE; + /* ngroups = pt->count; */ ngroups = NGROUPS; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, group, dataset, or attribute aren't supported with " + "this connector\n"); + fflush(stdout); + } + + return; + } + + size = get_size(); + + plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); + H5Pclose(plist); + + /* decide the hyperslab according to process number. */ + get_slab(chunk_origin, chunk_dims, count, file_dims, size); + + /* select hyperslab in memory and file spaces. These two operations are + * identical since the datasets are the same. */ + memspace = H5Screate_simple(DIM, file_dims, NULL); + VRFY((memspace >= 0), "memspace"); + ret = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); + VRFY((ret >= 0), "mgroup memspace selection"); + + filespace = H5Screate_simple(DIM, file_dims, NULL); + VRFY((filespace >= 0), "filespace"); + ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); + VRFY((ret >= 0), "mgroup filespace selection"); + + /* creates ngroups groups under the root group, writes datasets in + * parallel. */ + for (m = 0; m < ngroups; m++) { + HDsnprintf(gname, sizeof(gname), "group%d", m); + gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((gid > 0), gname); + + /* create attribute for these groups. */ + write_attribute(gid, is_group, m); + + if (m != 0) + write_dataset(memspace, filespace, gid); + + H5Gclose(gid); + +#ifdef BARRIER_CHECKS + if (!((m + 1) % 10)) { + HDprintf("created %d groups\n", m + 1); + MPI_Barrier(MPI_COMM_WORLD); + } +#endif /* BARRIER_CHECKS */ + } + + /* recursively creates subgroups under the first group. */ + gid = H5Gopen2(fid, "group0", H5P_DEFAULT); + create_group_recursive(memspace, filespace, gid, 0); + ret = H5Gclose(gid); + VRFY((ret >= 0), "H5Gclose"); + + ret = H5Sclose(filespace); + VRFY((ret >= 0), "H5Sclose"); + ret = H5Sclose(memspace); + VRFY((ret >= 0), "H5Sclose"); + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose"); +} + +/* + * In a group, creates NDATASETS datasets. Each process writes a hyperslab + * of a data array to the file. + */ +static void +write_dataset(hid_t memspace, hid_t filespace, hid_t gid) +{ + int i, j, n, size; + int mpi_rank, mpi_size; + char dname[32]; + DATATYPE *outme = NULL; + hid_t did; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + size = get_size(); + + outme = HDmalloc((size_t)size * (size_t)size * sizeof(double)); + VRFY((outme != NULL), "HDmalloc succeeded for outme"); + + for (n = 0; n < NDATASET; n++) { + HDsnprintf(dname, sizeof(dname), "dataset%d", n); + did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((did > 0), dname); + + for (i = 0; i < size; i++) + for (j = 0; j < size; j++) + outme[(i * size) + j] = n * 1000 + mpi_rank; + + H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme); + + /* create attribute for these datasets.*/ + write_attribute(did, is_dset, n); + + H5Dclose(did); + } + HDfree(outme); +} + +/* + * Creates subgroups of depth GROUP_DEPTH recursively. Also writes datasets + * in parallel in each group. + */ +static void +create_group_recursive(hid_t memspace, hid_t filespace, hid_t gid, int counter) +{ + hid_t child_gid; + int mpi_rank; + char gname[64]; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + +#ifdef BARRIER_CHECKS + if (!((counter + 1) % 10)) { + HDprintf("created %dth child groups\n", counter + 1); + MPI_Barrier(MPI_COMM_WORLD); + } +#endif /* BARRIER_CHECKS */ + + HDsnprintf(gname, sizeof(gname), "%dth_child_group", counter + 1); + child_gid = H5Gcreate2(gid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((child_gid > 0), gname); + + /* write datasets in parallel. */ + write_dataset(memspace, filespace, gid); + + if (counter < GROUP_DEPTH) + create_group_recursive(memspace, filespace, child_gid, counter + 1); + + H5Gclose(child_gid); +} + +/* + * This function is to verify the data from multiple group testing. It opens + * every dataset in every group and check their correctness. + */ +void +multiple_group_read(void) +{ + int mpi_rank, mpi_size, error_num, size; + int m; + char gname[64]; + hid_t plist, fid, gid, memspace, filespace; + hsize_t chunk_origin[DIM]; + hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM]; +#if 0 + const H5Ptest_param_t *pt; +#endif + char *filename; + int ngroups; + +#if 0 + pt = GetTestParameters(); +#endif + /* filename = pt->name; */ filename = PARATESTFILE; + /* ngroups = pt->count; */ ngroups = NGROUPS; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, group, dataset, or attribute aren't supported with " + "this connector\n"); + fflush(stdout); + } + + return; + } + + size = get_size(); + + plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + fid = H5Fopen(filename, H5F_ACC_RDONLY, plist); + H5Pclose(plist); + + /* decide hyperslab for each process */ + get_slab(chunk_origin, chunk_dims, count, file_dims, size); + + /* select hyperslab for memory and file space */ + memspace = H5Screate_simple(DIM, file_dims, NULL); + H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); + filespace = H5Screate_simple(DIM, file_dims, NULL); + H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); + + /* open every group under root group. */ + for (m = 0; m < ngroups; m++) { + HDsnprintf(gname, sizeof(gname), "group%d", m); + gid = H5Gopen2(fid, gname, H5P_DEFAULT); + VRFY((gid > 0), gname); + + /* check the data. */ + if (m != 0) + if ((error_num = read_dataset(memspace, filespace, gid)) > 0) + nerrors += error_num; + + /* check attribute.*/ + error_num = 0; + if ((error_num = read_attribute(gid, is_group, m)) > 0) + nerrors += error_num; + + H5Gclose(gid); + +#ifdef BARRIER_CHECKS + if (!((m + 1) % 10)) + MPI_Barrier(MPI_COMM_WORLD); +#endif /* BARRIER_CHECKS */ + } + + /* open all the groups in vertical direction. */ + gid = H5Gopen2(fid, "group0", H5P_DEFAULT); + VRFY((gid > 0), "group0"); + recursive_read_group(memspace, filespace, gid, 0); + H5Gclose(gid); + + H5Sclose(filespace); + H5Sclose(memspace); + H5Fclose(fid); +} + +/* + * This function opens all the datasets in a certain, checks the data using + * dataset_vrfy function. + */ +static int +read_dataset(hid_t memspace, hid_t filespace, hid_t gid) +{ + int i, j, n, mpi_rank, mpi_size, size, attr_errors = 0, vrfy_errors = 0; + char dname[32]; + DATATYPE *outdata = NULL, *indata = NULL; + hid_t did; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + size = get_size(); + + indata = (DATATYPE *)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE)); + VRFY((indata != NULL), "HDmalloc succeeded for indata"); + + outdata = (DATATYPE *)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE)); + VRFY((outdata != NULL), "HDmalloc succeeded for outdata"); + + for (n = 0; n < NDATASET; n++) { + HDsnprintf(dname, sizeof(dname), "dataset%d", n); + did = H5Dopen2(gid, dname, H5P_DEFAULT); + VRFY((did > 0), dname); + + H5Dread(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, indata); + + /* this is the original value */ + for (i = 0; i < size; i++) + for (j = 0; j < size; j++) { + *outdata = n * 1000 + mpi_rank; + outdata++; + } + outdata -= size * size; + + /* compare the original value(outdata) to the value in file(indata).*/ + vrfy_errors = check_value(indata, outdata, size); + + /* check attribute.*/ + if ((attr_errors = read_attribute(did, is_dset, n)) > 0) + vrfy_errors += attr_errors; + + H5Dclose(did); + } + + HDfree(indata); + HDfree(outdata); + + return vrfy_errors; +} + +/* + * This recursive function opens all the groups in vertical direction and + * checks the data. + */ +static void +recursive_read_group(hid_t memspace, hid_t filespace, hid_t gid, int counter) +{ + hid_t child_gid; + int mpi_rank, err_num = 0; + char gname[64]; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); +#ifdef BARRIER_CHECKS + if ((counter + 1) % 10) + MPI_Barrier(MPI_COMM_WORLD); +#endif /* BARRIER_CHECKS */ + + if ((err_num = read_dataset(memspace, filespace, gid))) + nerrors += err_num; + + if (counter < GROUP_DEPTH) { + HDsnprintf(gname, sizeof(gname), "%dth_child_group", counter + 1); + child_gid = H5Gopen2(gid, gname, H5P_DEFAULT); + VRFY((child_gid > 0), gname); + recursive_read_group(memspace, filespace, child_gid, counter + 1); + H5Gclose(child_gid); + } +} + +/* Create and write attribute for a group or a dataset. For groups, attribute + * is a scalar datum; for dataset, it is a one-dimensional array. + */ +static void +write_attribute(hid_t obj_id, int this_type, int num) +{ + hid_t sid, aid; + hsize_t dspace_dims[1] = {8}; + int i, mpi_rank, attr_data[8], dspace_rank = 1; + char attr_name[32]; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + if (this_type == is_group) { + HDsnprintf(attr_name, sizeof(attr_name), "Group Attribute %d", num); + sid = H5Screate(H5S_SCALAR); + aid = H5Acreate2(obj_id, attr_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); + H5Awrite(aid, H5T_NATIVE_INT, &num); + H5Aclose(aid); + H5Sclose(sid); + } /* end if */ + else if (this_type == is_dset) { + HDsnprintf(attr_name, sizeof(attr_name), "Dataset Attribute %d", num); + for (i = 0; i < 8; i++) + attr_data[i] = i; + sid = H5Screate_simple(dspace_rank, dspace_dims, NULL); + aid = H5Acreate2(obj_id, attr_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); + H5Awrite(aid, H5T_NATIVE_INT, attr_data); + H5Aclose(aid); + H5Sclose(sid); + } /* end else-if */ +} + +/* Read and verify attribute for group or dataset. */ +static int +read_attribute(hid_t obj_id, int this_type, int num) +{ + hid_t aid; + hsize_t group_block[2] = {1, 1}, dset_block[2] = {1, 8}; + int i, mpi_rank, in_num, in_data[8], out_data[8], vrfy_errors = 0; + char attr_name[32]; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + if (this_type == is_group) { + HDsnprintf(attr_name, sizeof(attr_name), "Group Attribute %d", num); + aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT); + H5Aread(aid, H5T_NATIVE_INT, &in_num); + vrfy_errors = dataset_vrfy(NULL, NULL, NULL, group_block, &in_num, &num); + H5Aclose(aid); + } + else if (this_type == is_dset) { + HDsnprintf(attr_name, sizeof(attr_name), "Dataset Attribute %d", num); + for (i = 0; i < 8; i++) + out_data[i] = i; + aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT); + H5Aread(aid, H5T_NATIVE_INT, in_data); + vrfy_errors = dataset_vrfy(NULL, NULL, NULL, dset_block, in_data, out_data); + H5Aclose(aid); + } + + return vrfy_errors; +} + +/* This functions compares the original data with the read-in data for its + * hyperslab part only by process ID. + */ +static int +check_value(DATATYPE *indata, DATATYPE *outdata, int size) +{ + int mpi_rank, mpi_size, err_num = 0; + hsize_t i, j; + hsize_t chunk_origin[DIM]; + hsize_t chunk_dims[DIM], count[DIM]; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + get_slab(chunk_origin, chunk_dims, count, NULL, size); + + indata += chunk_origin[0] * (hsize_t)size; + outdata += chunk_origin[0] * (hsize_t)size; + for (i = chunk_origin[0]; i < (chunk_origin[0] + chunk_dims[0]); i++) + for (j = chunk_origin[1]; j < (chunk_origin[1] + chunk_dims[1]); j++) { + if (*indata != *outdata) + if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED) + HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col%lu): expect %d, got %d\n", + (unsigned long)i, (unsigned long)j, (unsigned long)i, (unsigned long)j, *outdata, + *indata); + } + if (err_num > MAX_ERR_REPORT && !VERBOSE_MED) + HDprintf("[more errors ...]\n"); + if (err_num) + HDprintf("%d errors found in check_value\n", err_num); + return err_num; +} + +/* Decide the portion of data chunk in dataset by process ID. + */ + +static void +get_slab(hsize_t chunk_origin[], hsize_t chunk_dims[], hsize_t count[], hsize_t file_dims[], int size) +{ + int mpi_rank, mpi_size; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + if (chunk_origin != NULL) { + chunk_origin[0] = (hsize_t)mpi_rank * (hsize_t)(size / mpi_size); + chunk_origin[1] = 0; + } + if (chunk_dims != NULL) { + chunk_dims[0] = (hsize_t)(size / mpi_size); + chunk_dims[1] = (hsize_t)size; + } + if (file_dims != NULL) + file_dims[0] = file_dims[1] = (hsize_t)size; + if (count != NULL) + count[0] = count[1] = 1; +} + +/* + * This function is based on bug demonstration code provided by Thomas + * Guignon(thomas.guignon@ifp.fr), and is intended to verify the + * correctness of my fix for that bug. + * + * In essence, the bug appeared when at least one process attempted to + * write a point selection -- for which collective I/O is not supported, + * and at least one other attempted to write some other type of selection + * for which collective I/O is supported. + * + * Since the processes did not compare notes before performing the I/O, + * some would attempt collective I/O while others performed independent + * I/O. A hang resulted. + * + * This function reproduces this situation. At present the test hangs + * on failure. + * JRM - 9/13/04 + */ + +#define N 4 + +void +io_mode_confusion(void) +{ + /* + * HDF5 APIs definitions + */ + + const int rank = 1; + const char *dataset_name = "IntArray"; + + hid_t file_id, dset_id; /* file and dataset identifiers */ + hid_t filespace, memspace; /* file and memory dataspace */ + /* identifiers */ + hsize_t dimsf[1]; /* dataset dimensions */ + int data[N] = {1}; /* pointer to data buffer to write */ + hsize_t coord[N] = {0L, 1L, 2L, 3L}; + hid_t plist_id; /* property list identifier */ + herr_t status; + + /* + * MPI variables + */ + + int mpi_size, mpi_rank; + + /* + * test bed related variables + */ + + const char *fcn_name = "io_mode_confusion"; + const hbool_t verbose = FALSE; +#if 0 + const H5Ptest_param_t *pt; +#endif + char *filename; + +#if 0 + pt = GetTestParameters(); +#endif + /* filename = pt->name; */ filename = PARATESTFILE; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset, or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + /* + * Set up file access property list with parallel I/O access + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: Setting up property list.\n", mpi_rank, fcn_name); + + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id != -1), "H5Pcreate() failed"); + + status = H5Pset_fapl_mpio(plist_id, MPI_COMM_WORLD, MPI_INFO_NULL); + VRFY((status >= 0), "H5Pset_fapl_mpio() failed"); + + /* + * Create a new file collectively and release property list identifier. + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: Creating new file.\n", mpi_rank, fcn_name); + + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id); + VRFY((file_id >= 0), "H5Fcreate() failed"); + + status = H5Pclose(plist_id); + VRFY((status >= 0), "H5Pclose() failed"); + + /* + * Create the dataspace for the dataset. + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: Creating the dataspace for the dataset.\n", mpi_rank, fcn_name); + + dimsf[0] = N; + filespace = H5Screate_simple(rank, dimsf, NULL); + VRFY((filespace >= 0), "H5Screate_simple() failed."); + + /* + * Create the dataset with default properties and close filespace. + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: Creating the dataset, and closing filespace.\n", mpi_rank, fcn_name); + + dset_id = + H5Dcreate2(file_id, dataset_name, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dset_id >= 0), "H5Dcreate2() failed"); + + status = H5Sclose(filespace); + VRFY((status >= 0), "H5Sclose() failed"); + + if (verbose) + HDfprintf(stdout, "%0d:%s: Calling H5Screate_simple().\n", mpi_rank, fcn_name); + + memspace = H5Screate_simple(rank, dimsf, NULL); + VRFY((memspace >= 0), "H5Screate_simple() failed."); + + if (mpi_rank == 0) { + if (verbose) + HDfprintf(stdout, "%0d:%s: Calling H5Sselect_all(memspace).\n", mpi_rank, fcn_name); + + status = H5Sselect_all(memspace); + VRFY((status >= 0), "H5Sselect_all() failed"); + } + else { + if (verbose) + HDfprintf(stdout, "%0d:%s: Calling H5Sselect_none(memspace).\n", mpi_rank, fcn_name); + + status = H5Sselect_none(memspace); + VRFY((status >= 0), "H5Sselect_none() failed"); + } + + if (verbose) + HDfprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n", mpi_rank, fcn_name); + + MPI_Barrier(MPI_COMM_WORLD); + + if (verbose) + HDfprintf(stdout, "%0d:%s: Calling H5Dget_space().\n", mpi_rank, fcn_name); + + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "H5Dget_space() failed"); + + /* select all */ + if (mpi_rank == 0) { + if (verbose) + HDfprintf(stdout, "%0d:%s: Calling H5Sselect_elements() -- set up hang?\n", mpi_rank, fcn_name); + + status = H5Sselect_elements(filespace, H5S_SELECT_SET, N, (const hsize_t *)&coord); + VRFY((status >= 0), "H5Sselect_elements() failed"); + } + else { /* select nothing */ + if (verbose) + HDfprintf(stdout, "%0d:%s: Calling H5Sselect_none().\n", mpi_rank, fcn_name); + + status = H5Sselect_none(filespace); + VRFY((status >= 0), "H5Sselect_none() failed"); + } + + if (verbose) + HDfprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n", mpi_rank, fcn_name); + + MPI_Barrier(MPI_COMM_WORLD); + + if (verbose) + HDfprintf(stdout, "%0d:%s: Calling H5Pcreate().\n", mpi_rank, fcn_name); + + plist_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((plist_id != -1), "H5Pcreate() failed"); + + if (verbose) + HDfprintf(stdout, "%0d:%s: Calling H5Pset_dxpl_mpio().\n", mpi_rank, fcn_name); + + status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE); + VRFY((status >= 0), "H5Pset_dxpl_mpio() failed"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + status = H5Pset_dxpl_mpio_collective_opt(plist_id, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((status >= 0), "set independent IO collectively succeeded"); + } + + if (verbose) + HDfprintf(stdout, "%0d:%s: Calling H5Dwrite() -- hang here?.\n", mpi_rank, fcn_name); + + status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace, filespace, plist_id, data); + + if (verbose) + HDfprintf(stdout, "%0d:%s: Returned from H5Dwrite(), status=%d.\n", mpi_rank, fcn_name, status); + VRFY((status >= 0), "H5Dwrite() failed"); + + /* + * Close/release resources. + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: Cleaning up from test.\n", mpi_rank, fcn_name); + + status = H5Dclose(dset_id); + VRFY((status >= 0), "H5Dclose() failed"); + + status = H5Sclose(filespace); + VRFY((status >= 0), "H5Dclose() failed"); + + status = H5Sclose(memspace); + VRFY((status >= 0), "H5Sclose() failed"); + + status = H5Pclose(plist_id); + VRFY((status >= 0), "H5Pclose() failed"); + + status = H5Fclose(file_id); + VRFY((status >= 0), "H5Fclose() failed"); + + if (verbose) + HDfprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name); + + return; + +} /* io_mode_confusion() */ + +#undef N + +/* + * At present, the object header code maintains an image of its on disk + * representation, which is updates as necessary instead of generating on + * request. + * + * Prior to the fix that this test in designed to verify, the image of the + * on disk representation was only updated on flush -- not when the object + * header was marked clean. + * + * This worked perfectly well as long as all writes of a given object + * header were written from a single process. However, with the implementation + * of round robin metadata data writes in parallel HDF5, this is no longer + * the case -- it is possible for a given object header to be flushed from + * several different processes, with the object header simply being marked + * clean in all other processes on each flush. This resulted in NULL or + * out of data object header information being written to disk. + * + * To repair this, I modified the object header code to update its + * on disk image both on flush on when marked clean. + * + * This test is directed at verifying that the fix performs as expected. + * + * The test functions by creating a HDF5 file with several small datasets, + * and then flushing the file. This should result of at least one of + * the associated object headers being flushed by a process other than + * process 0. + * + * Then for each data set, add an attribute and flush the file again. + * + * Close the file and re-open it. + * + * Open the each of the data sets in turn. If all opens are successful, + * the test passes. Otherwise the test fails. + * + * Note that this test will probably become irrelevant shortly, when we + * land the journaling modifications on the trunk -- at which point all + * cache clients will have to construct on disk images on demand. + * + * JRM -- 10/13/10 + */ + +#define NUM_DATA_SETS 4 +#define LOCAL_DATA_SIZE 4 +#define LARGE_ATTR_SIZE 256 +/* Since all even and odd processes are split into writer and reader comm + * respectively, process 0 and 1 in COMM_WORLD become the root process of + * the writer and reader comm respectively. + */ +#define Writer_Root 0 +#define Reader_Root 1 +#define Reader_wait(mpi_err, xsteps) mpi_err = MPI_Bcast(&xsteps, 1, MPI_INT, Writer_Root, MPI_COMM_WORLD) +#define Reader_result(mpi_err, xsteps_done) \ + mpi_err = MPI_Bcast(&xsteps_done, 1, MPI_INT, Reader_Root, MPI_COMM_WORLD) +#define Reader_check(mpi_err, xsteps, xsteps_done) \ + { \ + Reader_wait(mpi_err, xsteps); \ + Reader_result(mpi_err, xsteps_done); \ + } + +/* object names used by both rr_obj_hdr_flush_confusion and + * rr_obj_hdr_flush_confusion_reader. + */ +const char *dataset_name[NUM_DATA_SETS] = {"dataset_0", "dataset_1", "dataset_2", "dataset_3"}; +const char *att_name[NUM_DATA_SETS] = {"attribute_0", "attribute_1", "attribute_2", "attribute_3"}; +const char *lg_att_name[NUM_DATA_SETS] = {"large_attribute_0", "large_attribute_1", "large_attribute_2", + "large_attribute_3"}; + +void +rr_obj_hdr_flush_confusion(void) +{ + /* MPI variables */ + /* private communicator size and rank */ + int mpi_size; + int mpi_rank; + int mrc; /* mpi error code */ + int is_reader; /* 1 for reader process; 0 for writer process. */ + MPI_Comm comm; + + /* test bed related variables */ + const char *fcn_name = "rr_obj_hdr_flush_confusion"; + const hbool_t verbose = FALSE; + + /* Create two new private communicators from MPI_COMM_WORLD. + * Even and odd ranked processes go to comm_writers and comm_readers + * respectively. + */ + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset, attribute, dataset more, attribute more, or " + "file flush aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + + HDassert(mpi_size > 2); + + is_reader = mpi_rank % 2; + mrc = MPI_Comm_split(MPI_COMM_WORLD, is_reader, mpi_rank, &comm); + VRFY((mrc == MPI_SUCCESS), "MPI_Comm_split"); + + /* The reader processes branches off to do reading + * while the writer processes continues to do writing + * Whenever writers finish one writing step, including a H5Fflush, + * they inform the readers, via MPI_COMM_WORLD, to verify. + * They will wait for the result from the readers before doing the next + * step. When all steps are done, they inform readers to end. + */ + if (is_reader) + rr_obj_hdr_flush_confusion_reader(comm); + else + rr_obj_hdr_flush_confusion_writer(comm); + + MPI_Comm_free(&comm); + if (verbose) + HDfprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name); + + return; + +} /* rr_obj_hdr_flush_confusion() */ + +void +rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) +{ + int i; + int j; + hid_t file_id = -1; + hid_t fapl_id = -1; + hid_t dxpl_id = -1; + hid_t att_id[NUM_DATA_SETS]; + hid_t att_space[NUM_DATA_SETS]; + hid_t lg_att_id[NUM_DATA_SETS]; + hid_t lg_att_space[NUM_DATA_SETS]; + hid_t disk_space[NUM_DATA_SETS]; + hid_t mem_space[NUM_DATA_SETS]; + hid_t dataset[NUM_DATA_SETS]; + hsize_t att_size[1]; + hsize_t lg_att_size[1]; + hsize_t disk_count[1]; + hsize_t disk_size[1]; + hsize_t disk_start[1]; + hsize_t mem_count[1]; + hsize_t mem_size[1]; + hsize_t mem_start[1]; + herr_t err; + double data[LOCAL_DATA_SIZE]; + double att[LOCAL_DATA_SIZE]; + double lg_att[LARGE_ATTR_SIZE]; + + /* MPI variables */ + /* world communication size and rank */ + int mpi_world_size; + int mpi_world_rank; + /* private communicator size and rank */ + int mpi_size; + int mpi_rank; + int mrc; /* mpi error code */ + /* steps to verify and have been verified */ + int steps = 0; + int steps_done = 0; + + /* test bed related variables */ + const char *fcn_name = "rr_obj_hdr_flush_confusion_writer"; + const hbool_t verbose = FALSE; +#if 0 + const H5Ptest_param_t *pt; +#endif + char *filename; + + /* + * setup test bed related variables: + */ + +#if 0 + pt = (const H5Ptest_param_t *)GetTestParameters(); +#endif + /* filename = pt->name; */ filename = PARATESTFILE; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_world_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_world_size); + MPI_Comm_rank(comm, &mpi_rank); + MPI_Comm_size(comm, &mpi_size); + + /* + * Set up file access property list with parallel I/O access + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: Setting up property list.\n", mpi_rank, fcn_name); + + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fapl_id != -1), "H5Pcreate(H5P_FILE_ACCESS) failed"); + + err = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL); + VRFY((err >= 0), "H5Pset_fapl_mpio() failed"); + + /* + * Create a new file collectively and release property list identifier. + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: Creating new file \"%s\".\n", mpi_rank, fcn_name, filename); + + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((file_id >= 0), "H5Fcreate() failed"); + + err = H5Pclose(fapl_id); + VRFY((err >= 0), "H5Pclose(fapl_id) failed"); + + /* + * Step 1: create the data sets and write data. + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: Creating the datasets.\n", mpi_rank, fcn_name); + + disk_size[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_size); + mem_size[0] = (hsize_t)(LOCAL_DATA_SIZE); + + for (i = 0; i < NUM_DATA_SETS; i++) { + + disk_space[i] = H5Screate_simple(1, disk_size, NULL); + VRFY((disk_space[i] >= 0), "H5Screate_simple(1) failed.\n"); + + dataset[i] = H5Dcreate2(file_id, dataset_name[i], H5T_NATIVE_DOUBLE, disk_space[i], H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT); + + VRFY((dataset[i] >= 0), "H5Dcreate(1) failed.\n"); + } + + /* + * setup data transfer property list + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: Setting up dxpl.\n", mpi_rank, fcn_name); + + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n"); + + err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE); + VRFY((err >= 0), "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n"); + + /* + * write data to the data sets + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: Writing datasets.\n", mpi_rank, fcn_name); + + disk_count[0] = (hsize_t)(LOCAL_DATA_SIZE); + disk_start[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_rank); + mem_count[0] = (hsize_t)(LOCAL_DATA_SIZE); + mem_start[0] = (hsize_t)(0); + + for (j = 0; j < LOCAL_DATA_SIZE; j++) { + data[j] = (double)(mpi_rank + 1); + } + + for (i = 0; i < NUM_DATA_SETS; i++) { + err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start, NULL, disk_count, NULL); + VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n"); + mem_space[i] = H5Screate_simple(1, mem_size, NULL); + VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n"); + err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET, mem_start, NULL, mem_count, NULL); + VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n"); + err = H5Dwrite(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i], disk_space[i], dxpl_id, data); + VRFY((err >= 0), "H5Dwrite(1) failed.\n"); + for (j = 0; j < LOCAL_DATA_SIZE; j++) + data[j] *= 10.0; + } + + /* + * close the data spaces + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: closing dataspaces.\n", mpi_rank, fcn_name); + + for (i = 0; i < NUM_DATA_SETS; i++) { + err = H5Sclose(disk_space[i]); + VRFY((err >= 0), "H5Sclose(disk_space[i]) failed.\n"); + err = H5Sclose(mem_space[i]); + VRFY((err >= 0), "H5Sclose(mem_space[i]) failed.\n"); + } + + /* End of Step 1: create the data sets and write data. */ + + /* + * flush the metadata cache + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name); + err = H5Fflush(file_id, H5F_SCOPE_GLOBAL); + VRFY((err >= 0), "H5Fflush(1) failed.\n"); + + /* Tell the reader to check the file up to steps. */ + steps++; + Reader_check(mrc, steps, steps_done); + VRFY((MPI_SUCCESS == mrc), "Reader_check failed"); + + /* + * Step 2: write attributes to each dataset + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: writing attributes.\n", mpi_rank, fcn_name); + + att_size[0] = (hsize_t)(LOCAL_DATA_SIZE); + for (j = 0; j < LOCAL_DATA_SIZE; j++) { + att[j] = (double)(j + 1); + } + + for (i = 0; i < NUM_DATA_SETS; i++) { + att_space[i] = H5Screate_simple(1, att_size, NULL); + VRFY((att_space[i] >= 0), "H5Screate_simple(3) failed.\n"); + att_id[i] = + H5Acreate2(dataset[i], att_name[i], H5T_NATIVE_DOUBLE, att_space[i], H5P_DEFAULT, H5P_DEFAULT); + VRFY((att_id[i] >= 0), "H5Acreate(1) failed.\n"); + err = H5Awrite(att_id[i], H5T_NATIVE_DOUBLE, att); + VRFY((err >= 0), "H5Awrite(1) failed.\n"); + for (j = 0; j < LOCAL_DATA_SIZE; j++) { + att[j] /= 10.0; + } + } + + /* + * close attribute IDs and spaces + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: closing attr ids and spaces .\n", mpi_rank, fcn_name); + + for (i = 0; i < NUM_DATA_SETS; i++) { + err = H5Sclose(att_space[i]); + VRFY((err >= 0), "H5Sclose(att_space[i]) failed.\n"); + err = H5Aclose(att_id[i]); + VRFY((err >= 0), "H5Aclose(att_id[i]) failed.\n"); + } + + /* End of Step 2: write attributes to each dataset */ + + /* + * flush the metadata cache again + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name); + err = H5Fflush(file_id, H5F_SCOPE_GLOBAL); + VRFY((err >= 0), "H5Fflush(2) failed.\n"); + + /* Tell the reader to check the file up to steps. */ + steps++; + Reader_check(mrc, steps, steps_done); + VRFY((MPI_SUCCESS == mrc), "Reader_check failed"); + + /* + * Step 3: write large attributes to each dataset + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: writing large attributes.\n", mpi_rank, fcn_name); + + lg_att_size[0] = (hsize_t)(LARGE_ATTR_SIZE); + + for (j = 0; j < LARGE_ATTR_SIZE; j++) { + lg_att[j] = (double)(j + 1); + } + + for (i = 0; i < NUM_DATA_SETS; i++) { + lg_att_space[i] = H5Screate_simple(1, lg_att_size, NULL); + VRFY((lg_att_space[i] >= 0), "H5Screate_simple(4) failed.\n"); + lg_att_id[i] = H5Acreate2(dataset[i], lg_att_name[i], H5T_NATIVE_DOUBLE, lg_att_space[i], H5P_DEFAULT, + H5P_DEFAULT); + VRFY((lg_att_id[i] >= 0), "H5Acreate(2) failed.\n"); + err = H5Awrite(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att); + VRFY((err >= 0), "H5Awrite(2) failed.\n"); + for (j = 0; j < LARGE_ATTR_SIZE; j++) { + lg_att[j] /= 10.0; + } + } + + /* Step 3: write large attributes to each dataset */ + + /* + * flush the metadata cache yet again to clean the object headers. + * + * This is an attempt to create a situation where we have dirty + * object header continuation chunks, but clean object headers + * to verify a speculative bug fix -- it doesn't seem to work, + * but I will leave the code in anyway, as the object header + * code is going to change a lot in the near future. + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name); + err = H5Fflush(file_id, H5F_SCOPE_GLOBAL); + VRFY((err >= 0), "H5Fflush(3) failed.\n"); + + /* Tell the reader to check the file up to steps. */ + steps++; + Reader_check(mrc, steps, steps_done); + VRFY((MPI_SUCCESS == mrc), "Reader_check failed"); + + /* + * Step 4: write different large attributes to each dataset + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: writing different large attributes.\n", mpi_rank, fcn_name); + + for (j = 0; j < LARGE_ATTR_SIZE; j++) { + lg_att[j] = (double)(j + 2); + } + + for (i = 0; i < NUM_DATA_SETS; i++) { + err = H5Awrite(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att); + VRFY((err >= 0), "H5Awrite(2) failed.\n"); + for (j = 0; j < LARGE_ATTR_SIZE; j++) { + lg_att[j] /= 10.0; + } + } + + /* End of Step 4: write different large attributes to each dataset */ + + /* + * flush the metadata cache again + */ + if (verbose) + HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name); + err = H5Fflush(file_id, H5F_SCOPE_GLOBAL); + VRFY((err >= 0), "H5Fflush(3) failed.\n"); + + /* Tell the reader to check the file up to steps. */ + steps++; + Reader_check(mrc, steps, steps_done); + VRFY((MPI_SUCCESS == mrc), "Reader_check failed"); + + /* Step 5: Close all objects and the file */ + + /* + * close large attribute IDs and spaces + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: closing large attr ids and spaces .\n", mpi_rank, fcn_name); + + for (i = 0; i < NUM_DATA_SETS; i++) { + + err = H5Sclose(lg_att_space[i]); + VRFY((err >= 0), "H5Sclose(lg_att_space[i]) failed.\n"); + err = H5Aclose(lg_att_id[i]); + VRFY((err >= 0), "H5Aclose(lg_att_id[i]) failed.\n"); + } + + /* + * close the data sets + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: closing datasets .\n", mpi_rank, fcn_name); + + for (i = 0; i < NUM_DATA_SETS; i++) { + err = H5Dclose(dataset[i]); + VRFY((err >= 0), "H5Dclose(dataset[i])1 failed.\n"); + } + + /* + * close the data transfer property list. + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: closing dxpl .\n", mpi_rank, fcn_name); + + err = H5Pclose(dxpl_id); + VRFY((err >= 0), "H5Pclose(dxpl_id) failed.\n"); + + /* + * Close file. + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: closing file.\n", mpi_rank, fcn_name); + + err = H5Fclose(file_id); + VRFY((err >= 0), "H5Fclose(1) failed"); + + /* End of Step 5: Close all objects and the file */ + /* Tell the reader to check the file up to steps. */ + steps++; + Reader_check(mrc, steps, steps_done); + VRFY((MPI_SUCCESS == mrc), "Reader_check failed"); + + /* All done. Inform reader to end. */ + steps = 0; + Reader_check(mrc, steps, steps_done); + VRFY((MPI_SUCCESS == mrc), "Reader_check failed"); + + if (verbose) + HDfprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name); + + return; + +} /* rr_obj_hdr_flush_confusion_writer() */ + +void +rr_obj_hdr_flush_confusion_reader(MPI_Comm comm) +{ + int i; + int j; + hid_t file_id = -1; + hid_t fapl_id = -1; + hid_t dxpl_id = -1; + hid_t lg_att_id[NUM_DATA_SETS]; + hid_t lg_att_type[NUM_DATA_SETS]; + hid_t disk_space[NUM_DATA_SETS]; + hid_t mem_space[NUM_DATA_SETS]; + hid_t dataset[NUM_DATA_SETS]; + hsize_t disk_count[1]; + hsize_t disk_start[1]; + hsize_t mem_count[1]; + hsize_t mem_size[1]; + hsize_t mem_start[1]; + herr_t err; + htri_t tri_err; + double data[LOCAL_DATA_SIZE]; + double data_read[LOCAL_DATA_SIZE]; + double att[LOCAL_DATA_SIZE]; + double att_read[LOCAL_DATA_SIZE]; + double lg_att[LARGE_ATTR_SIZE]; + double lg_att_read[LARGE_ATTR_SIZE]; + + /* MPI variables */ + /* world communication size and rank */ + int mpi_world_size; + int mpi_world_rank; + /* private communicator size and rank */ + int mpi_size; + int mpi_rank; + int mrc; /* mpi error code */ + int steps = -1; /* How far (steps) to verify the file */ + int steps_done = -1; /* How far (steps) have been verified */ + + /* test bed related variables */ + const char *fcn_name = "rr_obj_hdr_flush_confusion_reader"; + const hbool_t verbose = FALSE; +#if 0 + const H5Ptest_param_t *pt; +#endif + char *filename; + + /* + * setup test bed related variables: + */ + +#if 0 + pt = (const H5Ptest_param_t *)GetTestParameters(); +#endif + /* filename = pt->name; */ filename = PARATESTFILE; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_world_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_world_size); + MPI_Comm_rank(comm, &mpi_rank); + MPI_Comm_size(comm, &mpi_size); + + /* Repeatedly re-open the file and verify its contents until it is */ + /* told to end (when steps=0). */ + while (steps_done != 0) { + Reader_wait(mrc, steps); + VRFY((mrc >= 0), "Reader_wait failed"); + steps_done = 0; + + if (steps > 0) { + /* + * Set up file access property list with parallel I/O access + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: Setting up property list.\n", mpi_rank, fcn_name); + + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fapl_id != -1), "H5Pcreate(H5P_FILE_ACCESS) failed"); + err = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL); + VRFY((err >= 0), "H5Pset_fapl_mpio() failed"); + + /* + * Create a new file collectively and release property list identifier. + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: Re-open file \"%s\".\n", mpi_rank, fcn_name, filename); + + file_id = H5Fopen(filename, H5F_ACC_RDONLY, fapl_id); + VRFY((file_id >= 0), "H5Fopen() failed"); + err = H5Pclose(fapl_id); + VRFY((err >= 0), "H5Pclose(fapl_id) failed"); + +#if 1 + if (steps >= 1) { + /*=====================================================* + * Step 1: open the data sets and read data. + *=====================================================*/ + + if (verbose) + HDfprintf(stdout, "%0d:%s: opening the datasets.\n", mpi_rank, fcn_name); + + for (i = 0; i < NUM_DATA_SETS; i++) { + dataset[i] = -1; + } + + for (i = 0; i < NUM_DATA_SETS; i++) { + dataset[i] = H5Dopen2(file_id, dataset_name[i], H5P_DEFAULT); + VRFY((dataset[i] >= 0), "H5Dopen(1) failed.\n"); + disk_space[i] = H5Dget_space(dataset[i]); + VRFY((disk_space[i] >= 0), "H5Dget_space failed.\n"); + } + + /* + * setup data transfer property list + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: Setting up dxpl.\n", mpi_rank, fcn_name); + + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n"); + err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE); + VRFY((err >= 0), "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n"); + + /* + * read data from the data sets + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: Reading datasets.\n", mpi_rank, fcn_name); + + disk_count[0] = (hsize_t)(LOCAL_DATA_SIZE); + disk_start[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_rank); + + mem_size[0] = (hsize_t)(LOCAL_DATA_SIZE); + + mem_count[0] = (hsize_t)(LOCAL_DATA_SIZE); + mem_start[0] = (hsize_t)(0); + + /* set up expected data for verification */ + for (j = 0; j < LOCAL_DATA_SIZE; j++) { + data[j] = (double)(mpi_rank + 1); + } + + for (i = 0; i < NUM_DATA_SETS; i++) { + err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start, NULL, disk_count, + NULL); + VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n"); + mem_space[i] = H5Screate_simple(1, mem_size, NULL); + VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n"); + err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET, mem_start, NULL, mem_count, NULL); + VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n"); + err = H5Dread(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i], disk_space[i], dxpl_id, + data_read); + VRFY((err >= 0), "H5Dread(1) failed.\n"); + + /* compare read data with expected data */ + for (j = 0; j < LOCAL_DATA_SIZE; j++) + if (!H5_DBL_ABS_EQUAL(data_read[j], data[j])) { + HDfprintf(stdout, + "%0d:%s: Reading datasets value failed in " + "Dataset %d, at position %d: expect %f, got %f.\n", + mpi_rank, fcn_name, i, j, data[j], data_read[j]); + nerrors++; + } + for (j = 0; j < LOCAL_DATA_SIZE; j++) + data[j] *= 10.0; + } + + /* + * close the data spaces + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: closing dataspaces.\n", mpi_rank, fcn_name); + + for (i = 0; i < NUM_DATA_SETS; i++) { + err = H5Sclose(disk_space[i]); + VRFY((err >= 0), "H5Sclose(disk_space[i]) failed.\n"); + err = H5Sclose(mem_space[i]); + VRFY((err >= 0), "H5Sclose(mem_space[i]) failed.\n"); + } + steps_done++; + } + /* End of Step 1: open the data sets and read data. */ +#endif + +#if 1 + /*=====================================================* + * Step 2: reading attributes from each dataset + *=====================================================*/ + + if (steps >= 2) { + if (verbose) + HDfprintf(stdout, "%0d:%s: reading attributes.\n", mpi_rank, fcn_name); + + for (j = 0; j < LOCAL_DATA_SIZE; j++) { + att[j] = (double)(j + 1); + } + + for (i = 0; i < NUM_DATA_SETS; i++) { + hid_t att_id, att_type; + + att_id = H5Aopen(dataset[i], att_name[i], H5P_DEFAULT); + VRFY((att_id >= 0), "H5Aopen failed.\n"); + att_type = H5Aget_type(att_id); + VRFY((att_type >= 0), "H5Aget_type failed.\n"); + tri_err = H5Tequal(att_type, H5T_NATIVE_DOUBLE); + VRFY((tri_err >= 0), "H5Tequal failed.\n"); + if (tri_err == 0) { + HDfprintf(stdout, "%0d:%s: Mismatched Attribute type of Dataset %d.\n", mpi_rank, + fcn_name, i); + nerrors++; + } + else { + /* should verify attribute size before H5Aread */ + err = H5Aread(att_id, H5T_NATIVE_DOUBLE, att_read); + VRFY((err >= 0), "H5Aread failed.\n"); + /* compare read attribute data with expected data */ + for (j = 0; j < LOCAL_DATA_SIZE; j++) + if (!H5_DBL_ABS_EQUAL(att_read[j], att[j])) { + HDfprintf(stdout, + "%0d:%s: Mismatched attribute data read in Dataset %d, at position " + "%d: expect %f, got %f.\n", + mpi_rank, fcn_name, i, j, att[j], att_read[j]); + nerrors++; + } + for (j = 0; j < LOCAL_DATA_SIZE; j++) { + att[j] /= 10.0; + } + } + err = H5Aclose(att_id); + VRFY((err >= 0), "H5Aclose failed.\n"); + } + steps_done++; + } + /* End of Step 2: reading attributes from each dataset */ +#endif + +#if 1 + /*=====================================================* + * Step 3 or 4: read large attributes from each dataset. + * Step 4 has different attribute value from step 3. + *=====================================================*/ + + if (steps >= 3) { + if (verbose) + HDfprintf(stdout, "%0d:%s: reading large attributes.\n", mpi_rank, fcn_name); + + for (j = 0; j < LARGE_ATTR_SIZE; j++) { + lg_att[j] = (steps == 3) ? (double)(j + 1) : (double)(j + 2); + } + + for (i = 0; i < NUM_DATA_SETS; i++) { + lg_att_id[i] = H5Aopen(dataset[i], lg_att_name[i], H5P_DEFAULT); + VRFY((lg_att_id[i] >= 0), "H5Aopen(2) failed.\n"); + lg_att_type[i] = H5Aget_type(lg_att_id[i]); + VRFY((err >= 0), "H5Aget_type failed.\n"); + tri_err = H5Tequal(lg_att_type[i], H5T_NATIVE_DOUBLE); + VRFY((tri_err >= 0), "H5Tequal failed.\n"); + if (tri_err == 0) { + HDfprintf(stdout, "%0d:%s: Mismatched Large attribute type of Dataset %d.\n", + mpi_rank, fcn_name, i); + nerrors++; + } + else { + /* should verify large attribute size before H5Aread */ + err = H5Aread(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att_read); + VRFY((err >= 0), "H5Aread failed.\n"); + /* compare read attribute data with expected data */ + for (j = 0; j < LARGE_ATTR_SIZE; j++) + if (!H5_DBL_ABS_EQUAL(lg_att_read[j], lg_att[j])) { + HDfprintf(stdout, + "%0d:%s: Mismatched large attribute data read in Dataset %d, at " + "position %d: expect %f, got %f.\n", + mpi_rank, fcn_name, i, j, lg_att[j], lg_att_read[j]); + nerrors++; + } + for (j = 0; j < LARGE_ATTR_SIZE; j++) { + + lg_att[j] /= 10.0; + } + } + err = H5Tclose(lg_att_type[i]); + VRFY((err >= 0), "H5Tclose failed.\n"); + err = H5Aclose(lg_att_id[i]); + VRFY((err >= 0), "H5Aclose failed.\n"); + } + /* Both step 3 and 4 use this same read checking code. */ + steps_done = (steps == 3) ? 3 : 4; + } + + /* End of Step 3 or 4: read large attributes from each dataset */ +#endif + + /*=====================================================* + * Step 5: read all objects from the file + *=====================================================*/ + if (steps >= 5) { + /* nothing extra to verify. The file is closed normally. */ + /* Just increment steps_done */ + steps_done++; + } + + /* + * Close the data sets + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: closing datasets again.\n", mpi_rank, fcn_name); + + for (i = 0; i < NUM_DATA_SETS; i++) { + if (dataset[i] >= 0) { + err = H5Dclose(dataset[i]); + VRFY((err >= 0), "H5Dclose(dataset[i])1 failed.\n"); + } + } + + /* + * close the data transfer property list. + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: closing dxpl .\n", mpi_rank, fcn_name); + + err = H5Pclose(dxpl_id); + VRFY((err >= 0), "H5Pclose(dxpl_id) failed.\n"); + + /* + * Close the file + */ + if (verbose) + HDfprintf(stdout, "%0d:%s: closing file again.\n", mpi_rank, fcn_name); + err = H5Fclose(file_id); + VRFY((err >= 0), "H5Fclose(1) failed"); + + } /* else if (steps_done==0) */ + Reader_result(mrc, steps_done); + } /* end while(1) */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name); + + return; +} /* rr_obj_hdr_flush_confusion_reader() */ + +#undef NUM_DATA_SETS +#undef LOCAL_DATA_SIZE +#undef LARGE_ATTR_SIZE +#undef Reader_check +#undef Reader_wait +#undef Reader_result +#undef Writer_Root +#undef Reader_Root + +/* + * Test creating a chunked dataset in parallel in a file with an alignment set + * and an alignment threshold large enough to avoid aligning the chunks but + * small enough that the raw data aggregator will be aligned if it is treated as + * an object that must be aligned by the library + */ +#define CHUNK_SIZE 72 +#define NCHUNKS 32 +#define AGGR_SIZE 2048 +#define EXTRA_ALIGN 100 + +void +chunk_align_bug_1(void) +{ + int mpi_rank; + hid_t file_id, dset_id, fapl_id, dcpl_id, space_id; + hsize_t dims = CHUNK_SIZE * NCHUNKS, cdims = CHUNK_SIZE; +#if 0 + h5_stat_size_t file_size; + hsize_t align; +#endif + herr_t ret; + const char *filename; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + + filename = (const char *)PARATESTFILE /* GetTestParameters() */; + + /* Create file without alignment */ + fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + VRFY((fapl_id >= 0), "create_faccess_plist succeeded"); + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + + /* Close file */ + ret = H5Fclose(file_id); + VRFY((ret >= 0), "H5Fclose succeeded"); +#if 0 + /* Get file size */ + file_size = h5_get_file_size(filename, fapl_id); + VRFY((file_size >= 0), "h5_get_file_size succeeded"); + + /* Calculate alignment value, set to allow a chunk to squeak in between the + * original EOF and the aligned location of the aggregator. Add some space + * for the dataset metadata */ + align = (hsize_t)file_size + CHUNK_SIZE + EXTRA_ALIGN; +#endif + + /* Set aggregator size and alignment, disable metadata aggregator */ + HDassert(AGGR_SIZE > CHUNK_SIZE); + ret = H5Pset_small_data_block_size(fapl_id, AGGR_SIZE); + VRFY((ret >= 0), "H5Pset_small_data_block_size succeeded"); + ret = H5Pset_meta_block_size(fapl_id, 0); + VRFY((ret >= 0), "H5Pset_meta_block_size succeeded"); +#if 0 + ret = H5Pset_alignment(fapl_id, CHUNK_SIZE + 1, align); + VRFY((ret >= 0), "H5Pset_small_data_block_size succeeded"); +#endif + + /* Reopen file with new settings */ + file_id = H5Fopen(filename, H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "H5Fopen succeeded"); + + /* Create dataset */ + space_id = H5Screate_simple(1, &dims, NULL); + VRFY((space_id >= 0), "H5Screate_simple succeeded"); + dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl_id >= 0), "H5Pcreate succeeded"); + ret = H5Pset_chunk(dcpl_id, 1, &cdims); + VRFY((ret >= 0), "H5Pset_chunk succeeded"); + dset_id = H5Dcreate2(file_id, "dset", H5T_NATIVE_CHAR, space_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "H5Dcreate2 succeeded"); + + /* Close ids */ + ret = H5Dclose(dset_id); + VRFY((dset_id >= 0), "H5Dclose succeeded"); + ret = H5Sclose(space_id); + VRFY((space_id >= 0), "H5Sclose succeeded"); + ret = H5Pclose(dcpl_id); + VRFY((dcpl_id >= 0), "H5Pclose succeeded"); + ret = H5Pclose(fapl_id); + VRFY((fapl_id >= 0), "H5Pclose succeeded"); + + /* Close file */ + ret = H5Fclose(file_id); + VRFY((ret >= 0), "H5Fclose succeeded"); + + return; +} /* end chunk_align_bug_1() */ + +/*============================================================================= + * End of t_mdset.c + *===========================================================================*/ diff --git a/testpar/API/t_ph5basic.c b/testpar/API/t_ph5basic.c new file mode 100644 index 00000000000..1639aff7531 --- /dev/null +++ b/testpar/API/t_ph5basic.c @@ -0,0 +1,192 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Test parallel HDF5 basic components + */ + +#include "hdf5.h" +#include "testphdf5.h" + +/*------------------------------------------------------------------------- + * Function: test_fapl_mpio_dup + * + * Purpose: Test if fapl_mpio property list keeps a duplicate of the + * communicator and INFO objects given when set; and returns + * duplicates of its components when H5Pget_fapl_mpio is called. + * + * Return: Success: None + * Failure: Abort + * + * Programmer: Albert Cheng + * January 9, 2003 + * + *------------------------------------------------------------------------- + */ +void +test_fapl_mpio_dup(void) +{ + int mpi_size, mpi_rank; + MPI_Comm comm, comm_tmp; + int mpi_size_old, mpi_rank_old; + int mpi_size_tmp, mpi_rank_tmp; + MPI_Info info = MPI_INFO_NULL; + MPI_Info info_tmp = MPI_INFO_NULL; + int mrc; /* MPI return value */ + hid_t acc_pl; /* File access properties */ + herr_t ret; /* HDF5 return value */ + int nkeys, nkeys_tmp; + + if (VERBOSE_MED) + HDprintf("Verify fapl_mpio duplicates communicator and INFO objects\n"); + + /* set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + if (VERBOSE_MED) + HDprintf("rank/size of MPI_COMM_WORLD are %d/%d\n", mpi_rank, mpi_size); + + /* Create a new communicator that has the same processes as MPI_COMM_WORLD. + * Use MPI_Comm_split because it is simpler than MPI_Comm_create + */ + mrc = MPI_Comm_split(MPI_COMM_WORLD, 0, 0, &comm); + VRFY((mrc == MPI_SUCCESS), "MPI_Comm_split"); + MPI_Comm_size(comm, &mpi_size_old); + MPI_Comm_rank(comm, &mpi_rank_old); + if (VERBOSE_MED) + HDprintf("rank/size of comm are %d/%d\n", mpi_rank_old, mpi_size_old); + + /* create a new INFO object with some trivial information. */ + mrc = MPI_Info_create(&info); + VRFY((mrc == MPI_SUCCESS), "MPI_Info_create"); + mrc = MPI_Info_set(info, "hdf_info_name", "XYZ"); + VRFY((mrc == MPI_SUCCESS), "MPI_Info_set"); + if (MPI_INFO_NULL != info) { + mrc = MPI_Info_get_nkeys(info, &nkeys); + VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys"); + } +#if 0 + if (VERBOSE_MED) + h5_dump_info_object(info); +#endif + + acc_pl = H5Pcreate(H5P_FILE_ACCESS); + VRFY((acc_pl >= 0), "H5P_FILE_ACCESS"); + + ret = H5Pset_fapl_mpio(acc_pl, comm, info); + VRFY((ret >= 0), ""); + + /* Case 1: + * Free the created communicator and INFO object. + * Check if the access property list is still valid and can return + * valid communicator and INFO object. + */ + mrc = MPI_Comm_free(&comm); + VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free"); + if (MPI_INFO_NULL != info) { + mrc = MPI_Info_free(&info); + VRFY((mrc == MPI_SUCCESS), "MPI_Info_free"); + } + + ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, &info_tmp); + VRFY((ret >= 0), "H5Pget_fapl_mpio"); + MPI_Comm_size(comm_tmp, &mpi_size_tmp); + MPI_Comm_rank(comm_tmp, &mpi_rank_tmp); + if (VERBOSE_MED) + HDprintf("After H5Pget_fapl_mpio: rank/size of comm are %d/%d\n", mpi_rank_tmp, mpi_size_tmp); + VRFY((mpi_size_tmp == mpi_size), "MPI_Comm_size"); + VRFY((mpi_rank_tmp == mpi_rank), "MPI_Comm_rank"); + if (MPI_INFO_NULL != info_tmp) { + mrc = MPI_Info_get_nkeys(info_tmp, &nkeys_tmp); + VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys"); + VRFY((nkeys_tmp == nkeys), "new and old nkeys equal"); + } +#if 0 + if (VERBOSE_MED) + h5_dump_info_object(info_tmp); +#endif + + /* Case 2: + * Free the retrieved communicator and INFO object. + * Check if the access property list is still valid and can return + * valid communicator and INFO object. + * Also verify the NULL argument option. + */ + mrc = MPI_Comm_free(&comm_tmp); + VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free"); + if (MPI_INFO_NULL != info_tmp) { + mrc = MPI_Info_free(&info_tmp); + VRFY((mrc == MPI_SUCCESS), "MPI_Info_free"); + } + + /* check NULL argument options. */ + ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, NULL); + VRFY((ret >= 0), "H5Pget_fapl_mpio Comm only"); + mrc = MPI_Comm_free(&comm_tmp); + VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free"); + + ret = H5Pget_fapl_mpio(acc_pl, NULL, &info_tmp); + VRFY((ret >= 0), "H5Pget_fapl_mpio Info only"); + if (MPI_INFO_NULL != info_tmp) { + mrc = MPI_Info_free(&info_tmp); + VRFY((mrc == MPI_SUCCESS), "MPI_Info_free"); + } + + ret = H5Pget_fapl_mpio(acc_pl, NULL, NULL); + VRFY((ret >= 0), "H5Pget_fapl_mpio neither"); + + /* now get both and check validity too. */ + /* Do not free the returned objects which are used in the next case. */ + ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, &info_tmp); + VRFY((ret >= 0), "H5Pget_fapl_mpio"); + MPI_Comm_size(comm_tmp, &mpi_size_tmp); + MPI_Comm_rank(comm_tmp, &mpi_rank_tmp); + if (VERBOSE_MED) + HDprintf("After second H5Pget_fapl_mpio: rank/size of comm are %d/%d\n", mpi_rank_tmp, mpi_size_tmp); + VRFY((mpi_size_tmp == mpi_size), "MPI_Comm_size"); + VRFY((mpi_rank_tmp == mpi_rank), "MPI_Comm_rank"); + if (MPI_INFO_NULL != info_tmp) { + mrc = MPI_Info_get_nkeys(info_tmp, &nkeys_tmp); + VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys"); + VRFY((nkeys_tmp == nkeys), "new and old nkeys equal"); + } +#if 0 + if (VERBOSE_MED) + h5_dump_info_object(info_tmp); +#endif + + /* Case 3: + * Close the property list and verify the retrieved communicator and INFO + * object are still valid. + */ + H5Pclose(acc_pl); + MPI_Comm_size(comm_tmp, &mpi_size_tmp); + MPI_Comm_rank(comm_tmp, &mpi_rank_tmp); + if (VERBOSE_MED) + HDprintf("After Property list closed: rank/size of comm are %d/%d\n", mpi_rank_tmp, mpi_size_tmp); + if (MPI_INFO_NULL != info_tmp) { + mrc = MPI_Info_get_nkeys(info_tmp, &nkeys_tmp); + VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys"); + } +#if 0 + if (VERBOSE_MED) + h5_dump_info_object(info_tmp); +#endif + + /* clean up */ + mrc = MPI_Comm_free(&comm_tmp); + VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free"); + if (MPI_INFO_NULL != info_tmp) { + mrc = MPI_Info_free(&info_tmp); + VRFY((mrc == MPI_SUCCESS), "MPI_Info_free"); + } +} /* end test_fapl_mpio_dup() */ diff --git a/testpar/API/t_prop.c b/testpar/API/t_prop.c new file mode 100644 index 00000000000..36595010d52 --- /dev/null +++ b/testpar/API/t_prop.c @@ -0,0 +1,646 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Parallel tests for encoding/decoding plists sent between processes + */ + +#include "hdf5.h" +#include "testphdf5.h" + +#if 0 +#include "H5ACprivate.h" +#include "H5Pprivate.h" +#endif + +static int +test_encode_decode(hid_t orig_pl, int mpi_rank, int recv_proc) +{ + MPI_Request req[2]; + MPI_Status status; + hid_t pl; /* Decoded property list */ + size_t buf_size = 0; + void *sbuf = NULL; + herr_t ret; /* Generic return value */ + + if (mpi_rank == 0) { + int send_size = 0; + + /* first call to encode returns only the size of the buffer needed */ + ret = H5Pencode2(orig_pl, NULL, &buf_size, H5P_DEFAULT); + VRFY((ret >= 0), "H5Pencode succeeded"); + + sbuf = (uint8_t *)HDmalloc(buf_size); + + ret = H5Pencode2(orig_pl, sbuf, &buf_size, H5P_DEFAULT); + VRFY((ret >= 0), "H5Pencode succeeded"); + + /* this is a temp fix to send this size_t */ + send_size = (int)buf_size; + + MPI_Isend(&send_size, 1, MPI_INT, recv_proc, 123, MPI_COMM_WORLD, &req[0]); + MPI_Isend(sbuf, send_size, MPI_BYTE, recv_proc, 124, MPI_COMM_WORLD, &req[1]); + } /* end if */ + + if (mpi_rank == recv_proc) { + int recv_size; + void *rbuf; + + MPI_Recv(&recv_size, 1, MPI_INT, 0, 123, MPI_COMM_WORLD, &status); + VRFY((recv_size >= 0), "MPI_Recv succeeded"); + buf_size = (size_t)recv_size; + rbuf = (uint8_t *)HDmalloc(buf_size); + MPI_Recv(rbuf, recv_size, MPI_BYTE, 0, 124, MPI_COMM_WORLD, &status); + + pl = H5Pdecode(rbuf); + VRFY((pl >= 0), "H5Pdecode succeeded"); + + VRFY(H5Pequal(orig_pl, pl), "Property List Equal Succeeded"); + + ret = H5Pclose(pl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + if (NULL != rbuf) + HDfree(rbuf); + } /* end if */ + + if (0 == mpi_rank) { + /* gcc 11 complains about passing MPI_STATUSES_IGNORE as an MPI_Status + * array. See the discussion here: + * + * https://github.com/pmodels/mpich/issues/5687 + */ + /* H5_GCC_DIAG_OFF("stringop-overflow") */ + MPI_Waitall(2, req, MPI_STATUSES_IGNORE); + /* H5_GCC_DIAG_ON("stringop-overflow") */ + } + + if (NULL != sbuf) + HDfree(sbuf); + + MPI_Barrier(MPI_COMM_WORLD); + return 0; +} + +void +test_plist_ed(void) +{ + hid_t dcpl; /* dataset create prop. list */ + hid_t dapl; /* dataset access prop. list */ + hid_t dxpl; /* dataset transfer prop. list */ + hid_t gcpl; /* group create prop. list */ + hid_t lcpl; /* link create prop. list */ + hid_t lapl; /* link access prop. list */ + hid_t ocpypl; /* object copy prop. list */ + hid_t ocpl; /* object create prop. list */ + hid_t fapl; /* file access prop. list */ + hid_t fcpl; /* file create prop. list */ + hid_t strcpl; /* string create prop. list */ + hid_t acpl; /* attribute create prop. list */ + + int mpi_size, mpi_rank, recv_proc; + + hsize_t chunk_size = 16384; /* chunk size */ + double fill = 2.7; /* Fill value */ + size_t nslots = 521 * 2; + size_t nbytes = 1048576 * 10; + double w0 = 0.5; + unsigned max_compact; + unsigned min_dense; + hsize_t max_size[1]; /*data space maximum size */ + const char *c_to_f = "x+32"; + H5AC_cache_config_t my_cache_config = {H5AC__CURR_CACHE_CONFIG_VERSION, + TRUE, + FALSE, + FALSE, + "temp", + TRUE, + FALSE, + (2 * 2048 * 1024), + 0.3, + (64 * 1024 * 1024), + (4 * 1024 * 1024), + 60000, + H5C_incr__threshold, + 0.8, + 3.0, + TRUE, + (8 * 1024 * 1024), + H5C_flash_incr__add_space, + 2.0, + 0.25, + H5C_decr__age_out_with_threshold, + 0.997, + 0.8, + TRUE, + (3 * 1024 * 1024), + 3, + FALSE, + 0.2, + (256 * 2048), + 1 /* H5AC__DEFAULT_METADATA_WRITE_STRATEGY */}; + + herr_t ret; /* Generic return value */ + + if (VERBOSE_MED) + HDprintf("Encode/Decode DCPLs\n"); + + /* set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + if (mpi_size == 1) + recv_proc = 0; + else + recv_proc = 1; + + dcpl = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl >= 0), "H5Pcreate succeeded"); + + ret = H5Pset_chunk(dcpl, 1, &chunk_size); + VRFY((ret >= 0), "H5Pset_chunk succeeded"); + + ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE); + VRFY((ret >= 0), "H5Pset_alloc_time succeeded"); + + ret = H5Pset_fill_value(dcpl, H5T_NATIVE_DOUBLE, &fill); + VRFY((ret >= 0), "set fill-value succeeded"); + + max_size[0] = 100; + ret = H5Pset_external(dcpl, "ext1.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4)); + VRFY((ret >= 0), "set external succeeded"); + ret = H5Pset_external(dcpl, "ext2.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4)); + VRFY((ret >= 0), "set external succeeded"); + ret = H5Pset_external(dcpl, "ext3.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4)); + VRFY((ret >= 0), "set external succeeded"); + ret = H5Pset_external(dcpl, "ext4.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4)); + VRFY((ret >= 0), "set external succeeded"); + + ret = test_encode_decode(dcpl, mpi_rank, recv_proc); + VRFY((ret >= 0), "test_encode_decode succeeded"); + + ret = H5Pclose(dcpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /******* ENCODE/DECODE DAPLS *****/ + dapl = H5Pcreate(H5P_DATASET_ACCESS); + VRFY((dapl >= 0), "H5Pcreate succeeded"); + + ret = H5Pset_chunk_cache(dapl, nslots, nbytes, w0); + VRFY((ret >= 0), "H5Pset_chunk_cache succeeded"); + + ret = test_encode_decode(dapl, mpi_rank, recv_proc); + VRFY((ret >= 0), "test_encode_decode succeeded"); + + ret = H5Pclose(dapl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /******* ENCODE/DECODE OCPLS *****/ + ocpl = H5Pcreate(H5P_OBJECT_CREATE); + VRFY((ocpl >= 0), "H5Pcreate succeeded"); + + ret = H5Pset_attr_creation_order(ocpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED)); + VRFY((ret >= 0), "H5Pset_attr_creation_order succeeded"); + + ret = H5Pset_attr_phase_change(ocpl, 110, 105); + VRFY((ret >= 0), "H5Pset_attr_phase_change succeeded"); + + ret = H5Pset_filter(ocpl, H5Z_FILTER_FLETCHER32, 0, (size_t)0, NULL); + VRFY((ret >= 0), "H5Pset_filter succeeded"); + + ret = test_encode_decode(ocpl, mpi_rank, recv_proc); + VRFY((ret >= 0), "test_encode_decode succeeded"); + + ret = H5Pclose(ocpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /******* ENCODE/DECODE DXPLS *****/ + dxpl = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl >= 0), "H5Pcreate succeeded"); + + ret = H5Pset_btree_ratios(dxpl, 0.2, 0.6, 0.2); + VRFY((ret >= 0), "H5Pset_btree_ratios succeeded"); + + ret = H5Pset_hyper_vector_size(dxpl, 5); + VRFY((ret >= 0), "H5Pset_hyper_vector_size succeeded"); + + ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + + ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "H5Pset_dxpl_mpio_collective_opt succeeded"); + + ret = H5Pset_dxpl_mpio_chunk_opt(dxpl, H5FD_MPIO_CHUNK_MULTI_IO); + VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt succeeded"); + + ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl, 30); + VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_ratio succeeded"); + + ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl, 40); + VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_num succeeded"); + + ret = H5Pset_edc_check(dxpl, H5Z_DISABLE_EDC); + VRFY((ret >= 0), "H5Pset_edc_check succeeded"); + + ret = H5Pset_data_transform(dxpl, c_to_f); + VRFY((ret >= 0), "H5Pset_data_transform succeeded"); + + ret = test_encode_decode(dxpl, mpi_rank, recv_proc); + VRFY((ret >= 0), "test_encode_decode succeeded"); + + ret = H5Pclose(dxpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /******* ENCODE/DECODE GCPLS *****/ + gcpl = H5Pcreate(H5P_GROUP_CREATE); + VRFY((gcpl >= 0), "H5Pcreate succeeded"); + + ret = H5Pset_local_heap_size_hint(gcpl, 256); + VRFY((ret >= 0), "H5Pset_local_heap_size_hint succeeded"); + + ret = H5Pset_link_phase_change(gcpl, 2, 2); + VRFY((ret >= 0), "H5Pset_link_phase_change succeeded"); + + /* Query the group creation properties */ + ret = H5Pget_link_phase_change(gcpl, &max_compact, &min_dense); + VRFY((ret >= 0), "H5Pget_est_link_info succeeded"); + + ret = H5Pset_est_link_info(gcpl, 3, 9); + VRFY((ret >= 0), "H5Pset_est_link_info succeeded"); + + ret = H5Pset_link_creation_order(gcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED)); + VRFY((ret >= 0), "H5Pset_link_creation_order succeeded"); + + ret = test_encode_decode(gcpl, mpi_rank, recv_proc); + VRFY((ret >= 0), "test_encode_decode succeeded"); + + ret = H5Pclose(gcpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /******* ENCODE/DECODE LCPLS *****/ + lcpl = H5Pcreate(H5P_LINK_CREATE); + VRFY((lcpl >= 0), "H5Pcreate succeeded"); + + ret = H5Pset_create_intermediate_group(lcpl, TRUE); + VRFY((ret >= 0), "H5Pset_create_intermediate_group succeeded"); + + ret = test_encode_decode(lcpl, mpi_rank, recv_proc); + VRFY((ret >= 0), "test_encode_decode succeeded"); + + ret = H5Pclose(lcpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /******* ENCODE/DECODE LAPLS *****/ + lapl = H5Pcreate(H5P_LINK_ACCESS); + VRFY((lapl >= 0), "H5Pcreate succeeded"); + + ret = H5Pset_nlinks(lapl, (size_t)134); + VRFY((ret >= 0), "H5Pset_nlinks succeeded"); + + ret = H5Pset_elink_acc_flags(lapl, H5F_ACC_RDONLY); + VRFY((ret >= 0), "H5Pset_elink_acc_flags succeeded"); + + ret = H5Pset_elink_prefix(lapl, "/tmpasodiasod"); + VRFY((ret >= 0), "H5Pset_nlinks succeeded"); + + /* Create FAPL for the elink FAPL */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fapl >= 0), "H5Pcreate succeeded"); + ret = H5Pset_alignment(fapl, 2, 1024); + VRFY((ret >= 0), "H5Pset_alignment succeeded"); + + ret = H5Pset_elink_fapl(lapl, fapl); + VRFY((ret >= 0), "H5Pset_elink_fapl succeeded"); + + /* Close the elink's FAPL */ + ret = H5Pclose(fapl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + ret = test_encode_decode(lapl, mpi_rank, recv_proc); + VRFY((ret >= 0), "test_encode_decode succeeded"); + + ret = H5Pclose(lapl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /******* ENCODE/DECODE OCPYPLS *****/ + ocpypl = H5Pcreate(H5P_OBJECT_COPY); + VRFY((ocpypl >= 0), "H5Pcreate succeeded"); + + ret = H5Pset_copy_object(ocpypl, H5O_COPY_EXPAND_EXT_LINK_FLAG); + VRFY((ret >= 0), "H5Pset_copy_object succeeded"); + + ret = H5Padd_merge_committed_dtype_path(ocpypl, "foo"); + VRFY((ret >= 0), "H5Padd_merge_committed_dtype_path succeeded"); + + ret = H5Padd_merge_committed_dtype_path(ocpypl, "bar"); + VRFY((ret >= 0), "H5Padd_merge_committed_dtype_path succeeded"); + + ret = test_encode_decode(ocpypl, mpi_rank, recv_proc); + VRFY((ret >= 0), "test_encode_decode succeeded"); + + ret = H5Pclose(ocpypl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /******* ENCODE/DECODE FAPLS *****/ + fapl = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fapl >= 0), "H5Pcreate succeeded"); + + ret = H5Pset_family_offset(fapl, 1024); + VRFY((ret >= 0), "H5Pset_family_offset succeeded"); + + ret = H5Pset_meta_block_size(fapl, 2098452); + VRFY((ret >= 0), "H5Pset_meta_block_size succeeded"); + + ret = H5Pset_sieve_buf_size(fapl, 1048576); + VRFY((ret >= 0), "H5Pset_sieve_buf_size succeeded"); + + ret = H5Pset_alignment(fapl, 2, 1024); + VRFY((ret >= 0), "H5Pset_alignment succeeded"); + + ret = H5Pset_cache(fapl, 1024, 128, 10485760, 0.3); + VRFY((ret >= 0), "H5Pset_cache succeeded"); + + ret = H5Pset_elink_file_cache_size(fapl, 10485760); + VRFY((ret >= 0), "H5Pset_elink_file_cache_size succeeded"); + + ret = H5Pset_gc_references(fapl, 1); + VRFY((ret >= 0), "H5Pset_gc_references succeeded"); + + ret = H5Pset_small_data_block_size(fapl, 2048); + VRFY((ret >= 0), "H5Pset_small_data_block_size succeeded"); + + ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); + VRFY((ret >= 0), "H5Pset_libver_bounds succeeded"); + + ret = H5Pset_fclose_degree(fapl, H5F_CLOSE_WEAK); + VRFY((ret >= 0), "H5Pset_fclose_degree succeeded"); + + ret = H5Pset_multi_type(fapl, H5FD_MEM_GHEAP); + VRFY((ret >= 0), "H5Pset_multi_type succeeded"); + + ret = H5Pset_mdc_config(fapl, &my_cache_config); + VRFY((ret >= 0), "H5Pset_mdc_config succeeded"); + + ret = test_encode_decode(fapl, mpi_rank, recv_proc); + VRFY((ret >= 0), "test_encode_decode succeeded"); + + ret = H5Pclose(fapl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /******* ENCODE/DECODE FCPLS *****/ + fcpl = H5Pcreate(H5P_FILE_CREATE); + VRFY((fcpl >= 0), "H5Pcreate succeeded"); + + ret = H5Pset_userblock(fcpl, 1024); + VRFY((ret >= 0), "H5Pset_userblock succeeded"); + + ret = H5Pset_istore_k(fcpl, 3); + VRFY((ret >= 0), "H5Pset_istore_k succeeded"); + + ret = H5Pset_sym_k(fcpl, 4, 5); + VRFY((ret >= 0), "H5Pset_sym_k succeeded"); + + ret = H5Pset_shared_mesg_nindexes(fcpl, 8); + VRFY((ret >= 0), "H5Pset_shared_mesg_nindexes succeeded"); + + ret = H5Pset_shared_mesg_index(fcpl, 1, H5O_SHMESG_SDSPACE_FLAG, 32); + VRFY((ret >= 0), "H5Pset_shared_mesg_index succeeded"); + + ret = H5Pset_shared_mesg_phase_change(fcpl, 60, 20); + VRFY((ret >= 0), "H5Pset_shared_mesg_phase_change succeeded"); + + ret = H5Pset_sizes(fcpl, 8, 4); + VRFY((ret >= 0), "H5Pset_sizes succeeded"); + + ret = test_encode_decode(fcpl, mpi_rank, recv_proc); + VRFY((ret >= 0), "test_encode_decode succeeded"); + + ret = H5Pclose(fcpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /******* ENCODE/DECODE STRCPLS *****/ + strcpl = H5Pcreate(H5P_STRING_CREATE); + VRFY((strcpl >= 0), "H5Pcreate succeeded"); + + ret = H5Pset_char_encoding(strcpl, H5T_CSET_UTF8); + VRFY((ret >= 0), "H5Pset_char_encoding succeeded"); + + ret = test_encode_decode(strcpl, mpi_rank, recv_proc); + VRFY((ret >= 0), "test_encode_decode succeeded"); + + ret = H5Pclose(strcpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /******* ENCODE/DECODE ACPLS *****/ + acpl = H5Pcreate(H5P_ATTRIBUTE_CREATE); + VRFY((acpl >= 0), "H5Pcreate succeeded"); + + ret = H5Pset_char_encoding(acpl, H5T_CSET_UTF8); + VRFY((ret >= 0), "H5Pset_char_encoding succeeded"); + + ret = test_encode_decode(acpl, mpi_rank, recv_proc); + VRFY((ret >= 0), "test_encode_decode succeeded"); + + ret = H5Pclose(acpl); + VRFY((ret >= 0), "H5Pclose succeeded"); +} + +#if 0 +void +external_links(void) +{ + hid_t lcpl = H5I_INVALID_HID; /* link create prop. list */ + hid_t lapl = H5I_INVALID_HID; /* link access prop. list */ + hid_t fapl = H5I_INVALID_HID; /* file access prop. list */ + hid_t gapl = H5I_INVALID_HID; /* group access prop. list */ + hid_t fid = H5I_INVALID_HID; /* file id */ + hid_t group = H5I_INVALID_HID; /* group id */ + int mpi_size, mpi_rank; + + MPI_Comm comm; + int doIO; + int i, mrc; + + herr_t ret; /* Generic return value */ + htri_t tri_status; /* tri return value */ + + const char *filename = "HDF5test.h5"; + const char *filename_ext = "HDF5test_ext.h5"; + const char *group_path = "/Base/Block/Step"; + const char *link_name = "link"; /* external link */ + char link_path[50]; + + if (VERBOSE_MED) + HDprintf("Check external links\n"); + + /* set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Check MPI communicator access properties are passed to + linked external files */ + + if (mpi_rank == 0) { + + lcpl = H5Pcreate(H5P_LINK_CREATE); + VRFY((lcpl >= 0), "H5Pcreate succeeded"); + + ret = H5Pset_create_intermediate_group(lcpl, 1); + VRFY((ret >= 0), "H5Pset_create_intermediate_group succeeded"); + + /* Create file to serve as target for external link.*/ + fid = H5Fcreate(filename_ext, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + group = H5Gcreate2(fid, group_path, lcpl, H5P_DEFAULT, H5P_DEFAULT); + VRFY((group >= 0), "H5Gcreate succeeded"); + + ret = H5Gclose(group); + VRFY((ret >= 0), "H5Gclose succeeded"); + + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + + fapl = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fapl >= 0), "H5Pcreate succeeded"); + + /* Create a new file using the file access property list. */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + ret = H5Pclose(fapl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + group = H5Gcreate2(fid, group_path, lcpl, H5P_DEFAULT, H5P_DEFAULT); + VRFY((group >= 0), "H5Gcreate succeeded"); + + /* Create external links to the target files. */ + ret = H5Lcreate_external(filename_ext, group_path, group, link_name, H5P_DEFAULT, H5P_DEFAULT); + VRFY((ret >= 0), "H5Lcreate_external succeeded"); + + /* Close and release resources. */ + ret = H5Pclose(lcpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + ret = H5Gclose(group); + VRFY((ret >= 0), "H5Gclose succeeded"); + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + } + + MPI_Barrier(MPI_COMM_WORLD); + + /* + * For the first case, use all the processes. For the second case + * use a sub-communicator to verify the correct communicator is + * being used for the externally linked files. + * There is no way to determine if MPI info is being used for the + * externally linked files. + */ + + for (i = 0; i < 2; i++) { + + comm = MPI_COMM_WORLD; + + if (i == 0) + doIO = 1; + else { + doIO = mpi_rank % 2; + mrc = MPI_Comm_split(MPI_COMM_WORLD, doIO, mpi_rank, &comm); + VRFY((mrc == MPI_SUCCESS), ""); + } + + if (doIO) { + fapl = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fapl >= 0), "H5Pcreate succeeded"); + ret = H5Pset_fapl_mpio(fapl, comm, MPI_INFO_NULL); + VRFY((fapl >= 0), "H5Pset_fapl_mpio succeeded"); + + fid = H5Fopen(filename, H5F_ACC_RDWR, fapl); + VRFY((fid >= 0), "H5Fopen succeeded"); + + /* test opening a group that is to an external link, the external linked + file should inherit the source file's access properties */ + HDsnprintf(link_path, sizeof(link_path), "%s%s%s", group_path, "/", link_name); + group = H5Gopen2(fid, link_path, H5P_DEFAULT); + VRFY((group >= 0), "H5Gopen succeeded"); + ret = H5Gclose(group); + VRFY((ret >= 0), "H5Gclose succeeded"); + + /* test opening a group that is external link by setting group + creation property */ + gapl = H5Pcreate(H5P_GROUP_ACCESS); + VRFY((gapl >= 0), "H5Pcreate succeeded"); + + ret = H5Pset_elink_fapl(gapl, fapl); + VRFY((ret >= 0), "H5Pset_elink_fapl succeeded"); + + group = H5Gopen2(fid, link_path, gapl); + VRFY((group >= 0), "H5Gopen succeeded"); + + ret = H5Gclose(group); + VRFY((ret >= 0), "H5Gclose succeeded"); + + ret = H5Pclose(gapl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /* test link APIs */ + lapl = H5Pcreate(H5P_LINK_ACCESS); + VRFY((lapl >= 0), "H5Pcreate succeeded"); + + ret = H5Pset_elink_fapl(lapl, fapl); + VRFY((ret >= 0), "H5Pset_elink_fapl succeeded"); + + tri_status = H5Lexists(fid, link_path, H5P_DEFAULT); + VRFY((tri_status == TRUE), "H5Lexists succeeded"); + + tri_status = H5Lexists(fid, link_path, lapl); + VRFY((tri_status == TRUE), "H5Lexists succeeded"); + + group = H5Oopen(fid, link_path, H5P_DEFAULT); + VRFY((group >= 0), "H5Oopen succeeded"); + + ret = H5Oclose(group); + VRFY((ret >= 0), "H5Oclose succeeded"); + + group = H5Oopen(fid, link_path, lapl); + VRFY((group >= 0), "H5Oopen succeeded"); + + ret = H5Oclose(group); + VRFY((ret >= 0), "H5Oclose succeeded"); + + ret = H5Pclose(lapl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /* close the remaining resources */ + + ret = H5Pclose(fapl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + } + + if (comm != MPI_COMM_WORLD) { + mrc = MPI_Comm_free(&comm); + VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free succeeded"); + } + } + + MPI_Barrier(MPI_COMM_WORLD); + + /* delete the test files */ + if (mpi_rank == 0) { + MPI_File_delete(filename, MPI_INFO_NULL); + MPI_File_delete(filename_ext, MPI_INFO_NULL); + } +} +#endif diff --git a/testpar/API/t_pshutdown.c b/testpar/API/t_pshutdown.c new file mode 100644 index 00000000000..48a8005677b --- /dev/null +++ b/testpar/API/t_pshutdown.c @@ -0,0 +1,150 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Programmer: Mohamad Chaarawi + * February 2015 + * + * Purpose: This test creates a file and a bunch of objects in the + * file and then calls MPI_Finalize without closing anything. The + * library should exercise the attribute callback destroy attached to + * MPI_COMM_SELF and terminate the HDF5 library closing all open + * objects. The t_prestart test will read back the file and make sure + * all created objects are there. + */ + +#include "hdf5.h" +#include "testphdf5.h" + +int nerrors = 0; /* errors count */ + +const char *FILENAME[] = {"shutdown.h5", NULL}; + +int +main(int argc, char **argv) +{ + hid_t file_id, dset_id, grp_id; + hid_t fapl, sid, mem_dataspace; + hsize_t dims[RANK], i; + herr_t ret; +#if 0 + char filename[1024]; +#endif + int mpi_size, mpi_rank; + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + hsize_t start[RANK]; + hsize_t count[RANK]; + hsize_t stride[RANK]; + hsize_t block[RANK]; + DATATYPE *data_array = NULL; /* data buffer */ + + MPI_Init(&argc, &argv); + MPI_Comm_size(comm, &mpi_size); + MPI_Comm_rank(comm, &mpi_rank); + + if (MAINPROCESS) { + printf("Testing %-62s", "proper shutdown of HDF5 library"); + fflush(stdout); + } + + /* Set up file access property list with parallel I/O access */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fapl >= 0), "H5Pcreate succeeded"); + + /* Get the capability flag of the VOL connector being used */ + ret = H5Pget_vol_cap_flags(fapl, &vol_cap_flags_g); + VRFY((ret >= 0), "H5Pget_vol_cap_flags succeeded"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + HDprintf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + MPI_Finalize(); + return 0; + } + + ret = H5Pset_fapl_mpio(fapl, comm, info); + VRFY((ret >= 0), ""); + +#if 0 + h5_fixname(FILENAME[0], fapl, filename, sizeof filename); +#endif + file_id = H5Fcreate(FILENAME[0], H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + grp_id = H5Gcreate2(file_id, "Group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((grp_id >= 0), "H5Gcreate succeeded"); + + dims[0] = (hsize_t)ROW_FACTOR * (hsize_t)mpi_size; + dims[1] = (hsize_t)COL_FACTOR * (hsize_t)mpi_size; + sid = H5Screate_simple(RANK, dims, NULL); + VRFY((sid >= 0), "H5Screate_simple succeeded"); + + dset_id = H5Dcreate2(grp_id, "Dataset", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dset_id >= 0), "H5Dcreate succeeded"); + + /* allocate memory for data buffer */ + data_array = (DATATYPE *)HDmalloc(dims[0] * dims[1] * sizeof(DATATYPE)); + VRFY((data_array != NULL), "data_array HDmalloc succeeded"); + + /* Each process takes a slabs of rows. */ + block[0] = dims[0] / (hsize_t)mpi_size; + block[1] = dims[1]; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = (hsize_t)mpi_rank * block[0]; + start[1] = 0; + + /* put some trivial data in the data_array */ + for (i = 0; i < dims[0] * dims[1]; i++) + data_array[i] = mpi_rank + 1; + + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* write data independently */ + ret = H5Dwrite(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* release data buffers */ + if (data_array) + HDfree(data_array); + + MPI_Finalize(); + + /* nerrors += GetTestNumErrs(); */ + + if (MAINPROCESS) { + if (0 == nerrors) { + puts(" PASSED"); + fflush(stdout); + } + else { + puts("*FAILED*"); + fflush(stdout); + } + } + + return (nerrors != 0); +} diff --git a/testpar/API/t_shapesame.c b/testpar/API/t_shapesame.c new file mode 100644 index 00000000000..340e89ecd9f --- /dev/null +++ b/testpar/API/t_shapesame.c @@ -0,0 +1,4516 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + This program will test independent and collective reads and writes between + selections of different rank that non-the-less are deemed as having the + same shape by H5Sselect_shape_same(). + */ + +#define H5S_FRIEND /*suppress error about including H5Spkg */ + +/* Define this macro to indicate that the testing APIs should be available */ +#define H5S_TESTING + +#if 0 +#include "H5Spkg.h" /* Dataspaces */ +#endif + +#include "hdf5.h" +#include "testphdf5.h" + +/* FILENAME and filenames must have the same number of names. + * Use PARATESTFILE in general and use a separated filename only if the file + * created in one test is accessed by a different test. + * filenames[0] is reserved as the file name for PARATESTFILE. + */ +#define NFILENAME 2 +const char *FILENAME[NFILENAME] = {"ShapeSameTest.h5", NULL}; +char filenames[NFILENAME][PATH_MAX]; +hid_t fapl; /* file access property list */ + +/* On Lustre (and perhaps other parallel file systems?), we have severe + * slow downs if two or more processes attempt to access the same file system + * block. To minimize this problem, we set alignment in the shape same tests + * to the default Lustre block size -- which greatly reduces contention in + * the chunked dataset case. + */ + +#define SHAPE_SAME_TEST_ALIGNMENT ((hsize_t)(4 * 1024 * 1024)) + +#define PAR_SS_DR_MAX_RANK 5 /* must update code if this changes */ + +struct hs_dr_pio_test_vars_t { + int mpi_size; + int mpi_rank; + MPI_Comm mpi_comm; + MPI_Info mpi_info; + int test_num; + int edge_size; + int checker_edge_size; + int chunk_edge_size; + int small_rank; + int large_rank; + hid_t dset_type; + uint32_t *small_ds_buf_0; + uint32_t *small_ds_buf_1; + uint32_t *small_ds_buf_2; + uint32_t *small_ds_slice_buf; + uint32_t *large_ds_buf_0; + uint32_t *large_ds_buf_1; + uint32_t *large_ds_buf_2; + uint32_t *large_ds_slice_buf; + int small_ds_offset; + int large_ds_offset; + hid_t fid; /* HDF5 file ID */ + hid_t xfer_plist; + hid_t full_mem_small_ds_sid; + hid_t full_file_small_ds_sid; + hid_t mem_small_ds_sid; + hid_t file_small_ds_sid_0; + hid_t file_small_ds_sid_1; + hid_t small_ds_slice_sid; + hid_t full_mem_large_ds_sid; + hid_t full_file_large_ds_sid; + hid_t mem_large_ds_sid; + hid_t file_large_ds_sid_0; + hid_t file_large_ds_sid_1; + hid_t file_large_ds_process_slice_sid; + hid_t mem_large_ds_process_slice_sid; + hid_t large_ds_slice_sid; + hid_t small_dataset; /* Dataset ID */ + hid_t large_dataset; /* Dataset ID */ + size_t small_ds_size; + size_t small_ds_slice_size; + size_t large_ds_size; + size_t large_ds_slice_size; + hsize_t dims[PAR_SS_DR_MAX_RANK]; + hsize_t chunk_dims[PAR_SS_DR_MAX_RANK]; + hsize_t start[PAR_SS_DR_MAX_RANK]; + hsize_t stride[PAR_SS_DR_MAX_RANK]; + hsize_t count[PAR_SS_DR_MAX_RANK]; + hsize_t block[PAR_SS_DR_MAX_RANK]; + hsize_t *start_ptr; + hsize_t *stride_ptr; + hsize_t *count_ptr; + hsize_t *block_ptr; + int skips; + int max_skips; + int64_t total_tests; + int64_t tests_run; + int64_t tests_skipped; +}; + +/*------------------------------------------------------------------------- + * Function: hs_dr_pio_test__setup() + * + * Purpose: Do setup for tests of I/O to/from hyperslab selections of + * different rank in the parallel case. + * + * Return: void + * + * Programmer: JRM -- 8/9/11 + * + *------------------------------------------------------------------------- + */ + +#define CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG 0 + +static void +hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker_edge_size, + const int chunk_edge_size, const int small_rank, const int large_rank, + const hbool_t use_collective_io, const hid_t dset_type, const int express_test, + struct hs_dr_pio_test_vars_t *tv_ptr) +{ +#if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG + const char *fcnName = "hs_dr_pio_test__setup()"; +#endif /* CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG */ + const char *filename; + hbool_t mis_match = FALSE; + int i; + int mrc; + int mpi_rank; /* needed by the VRFY macro */ + uint32_t expected_value; + uint32_t *ptr_0; + uint32_t *ptr_1; + hid_t acc_tpl; /* File access templates */ + hid_t small_ds_dcpl_id = H5P_DEFAULT; + hid_t large_ds_dcpl_id = H5P_DEFAULT; + herr_t ret; /* Generic return value */ + + HDassert(edge_size >= 6); + HDassert(edge_size >= chunk_edge_size); + HDassert((chunk_edge_size == 0) || (chunk_edge_size >= 3)); + HDassert(1 < small_rank); + HDassert(small_rank < large_rank); + HDassert(large_rank <= PAR_SS_DR_MAX_RANK); + + tv_ptr->test_num = test_num; + tv_ptr->edge_size = edge_size; + tv_ptr->checker_edge_size = checker_edge_size; + tv_ptr->chunk_edge_size = chunk_edge_size; + tv_ptr->small_rank = small_rank; + tv_ptr->large_rank = large_rank; + tv_ptr->dset_type = dset_type; + + MPI_Comm_size(MPI_COMM_WORLD, &(tv_ptr->mpi_size)); + MPI_Comm_rank(MPI_COMM_WORLD, &(tv_ptr->mpi_rank)); + /* the VRFY() macro needs the local variable mpi_rank -- set it up now */ + mpi_rank = tv_ptr->mpi_rank; + + HDassert(tv_ptr->mpi_size >= 1); + + tv_ptr->mpi_comm = MPI_COMM_WORLD; + tv_ptr->mpi_info = MPI_INFO_NULL; + + for (i = 0; i < tv_ptr->small_rank - 1; i++) { + tv_ptr->small_ds_size *= (size_t)(tv_ptr->edge_size); + tv_ptr->small_ds_slice_size *= (size_t)(tv_ptr->edge_size); + } + tv_ptr->small_ds_size *= (size_t)(tv_ptr->mpi_size + 1); + + /* used by checker board tests only */ + tv_ptr->small_ds_offset = PAR_SS_DR_MAX_RANK - tv_ptr->small_rank; + + HDassert(0 < tv_ptr->small_ds_offset); + HDassert(tv_ptr->small_ds_offset < PAR_SS_DR_MAX_RANK); + + for (i = 0; i < tv_ptr->large_rank - 1; i++) { + + tv_ptr->large_ds_size *= (size_t)(tv_ptr->edge_size); + tv_ptr->large_ds_slice_size *= (size_t)(tv_ptr->edge_size); + } + tv_ptr->large_ds_size *= (size_t)(tv_ptr->mpi_size + 1); + + /* used by checker board tests only */ + tv_ptr->large_ds_offset = PAR_SS_DR_MAX_RANK - tv_ptr->large_rank; + + HDassert(0 <= tv_ptr->large_ds_offset); + HDassert(tv_ptr->large_ds_offset < PAR_SS_DR_MAX_RANK); + + /* set up the start, stride, count, and block pointers */ + /* used by contiguous tests only */ + tv_ptr->start_ptr = &(tv_ptr->start[PAR_SS_DR_MAX_RANK - tv_ptr->large_rank]); + tv_ptr->stride_ptr = &(tv_ptr->stride[PAR_SS_DR_MAX_RANK - tv_ptr->large_rank]); + tv_ptr->count_ptr = &(tv_ptr->count[PAR_SS_DR_MAX_RANK - tv_ptr->large_rank]); + tv_ptr->block_ptr = &(tv_ptr->block[PAR_SS_DR_MAX_RANK - tv_ptr->large_rank]); + + /* Allocate buffers */ + tv_ptr->small_ds_buf_0 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->small_ds_size); + VRFY((tv_ptr->small_ds_buf_0 != NULL), "malloc of small_ds_buf_0 succeeded"); + + tv_ptr->small_ds_buf_1 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->small_ds_size); + VRFY((tv_ptr->small_ds_buf_1 != NULL), "malloc of small_ds_buf_1 succeeded"); + + tv_ptr->small_ds_buf_2 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->small_ds_size); + VRFY((tv_ptr->small_ds_buf_2 != NULL), "malloc of small_ds_buf_2 succeeded"); + + tv_ptr->small_ds_slice_buf = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->small_ds_slice_size); + VRFY((tv_ptr->small_ds_slice_buf != NULL), "malloc of small_ds_slice_buf succeeded"); + + tv_ptr->large_ds_buf_0 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->large_ds_size); + VRFY((tv_ptr->large_ds_buf_0 != NULL), "malloc of large_ds_buf_0 succeeded"); + + tv_ptr->large_ds_buf_1 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->large_ds_size); + VRFY((tv_ptr->large_ds_buf_1 != NULL), "malloc of large_ds_buf_1 succeeded"); + + tv_ptr->large_ds_buf_2 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->large_ds_size); + VRFY((tv_ptr->large_ds_buf_2 != NULL), "malloc of large_ds_buf_2 succeeded"); + + tv_ptr->large_ds_slice_buf = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->large_ds_slice_size); + VRFY((tv_ptr->large_ds_slice_buf != NULL), "malloc of large_ds_slice_buf succeeded"); + + /* initialize the buffers */ + + ptr_0 = tv_ptr->small_ds_buf_0; + for (i = 0; i < (int)(tv_ptr->small_ds_size); i++) + *ptr_0++ = (uint32_t)i; + HDmemset(tv_ptr->small_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->small_ds_size); + HDmemset(tv_ptr->small_ds_buf_2, 0, sizeof(uint32_t) * tv_ptr->small_ds_size); + + HDmemset(tv_ptr->small_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->small_ds_slice_size); + + ptr_0 = tv_ptr->large_ds_buf_0; + for (i = 0; i < (int)(tv_ptr->large_ds_size); i++) + *ptr_0++ = (uint32_t)i; + HDmemset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size); + HDmemset(tv_ptr->large_ds_buf_2, 0, sizeof(uint32_t) * tv_ptr->large_ds_size); + + HDmemset(tv_ptr->large_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->large_ds_slice_size); + + filename = filenames[0]; /* (const char *)GetTestParameters(); */ + HDassert(filename != NULL); +#if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG + if (MAINPROCESS) { + + HDfprintf(stdout, "%d: test num = %d.\n", tv_ptr->mpi_rank, tv_ptr->test_num); + HDfprintf(stdout, "%d: mpi_size = %d.\n", tv_ptr->mpi_rank, tv_ptr->mpi_size); + HDfprintf(stdout, "%d: small/large rank = %d/%d, use_collective_io = %d.\n", tv_ptr->mpi_rank, + tv_ptr->small_rank, tv_ptr->large_rank, (int)use_collective_io); + HDfprintf(stdout, "%d: edge_size = %d, chunk_edge_size = %d.\n", tv_ptr->mpi_rank, tv_ptr->edge_size, + tv_ptr->chunk_edge_size); + HDfprintf(stdout, "%d: checker_edge_size = %d.\n", tv_ptr->mpi_rank, tv_ptr->checker_edge_size); + HDfprintf(stdout, "%d: small_ds_size = %d, large_ds_size = %d.\n", tv_ptr->mpi_rank, + (int)(tv_ptr->small_ds_size), (int)(tv_ptr->large_ds_size)); + HDfprintf(stdout, "%d: filename = %s.\n", tv_ptr->mpi_rank, filename); + } +#endif /* CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG */ + /* ---------------------------------------- + * CREATE AN HDF5 FILE WITH PARALLEL ACCESS + * ---------------------------------------*/ + /* setup file access template */ + acc_tpl = create_faccess_plist(tv_ptr->mpi_comm, tv_ptr->mpi_info, facc_type); + VRFY((acc_tpl >= 0), "create_faccess_plist() succeeded"); + + /* set the alignment -- need it large so that we aren't always hitting the + * the same file system block. Do this only if express_test is greater + * than zero. + */ + if (express_test > 0) { + + ret = H5Pset_alignment(acc_tpl, (hsize_t)0, SHAPE_SAME_TEST_ALIGNMENT); + VRFY((ret != FAIL), "H5Pset_alignment() succeeded"); + } + + /* create the file collectively */ + tv_ptr->fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); + VRFY((tv_ptr->fid >= 0), "H5Fcreate succeeded"); + + MESG("File opened."); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), "H5Pclose(acc_tpl) succeeded"); + + /* setup dims: */ + tv_ptr->dims[0] = (hsize_t)(tv_ptr->mpi_size + 1); + tv_ptr->dims[1] = tv_ptr->dims[2] = tv_ptr->dims[3] = tv_ptr->dims[4] = (hsize_t)(tv_ptr->edge_size); + + /* Create small ds dataspaces */ + tv_ptr->full_mem_small_ds_sid = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL); + VRFY((tv_ptr->full_mem_small_ds_sid != 0), "H5Screate_simple() full_mem_small_ds_sid succeeded"); + + tv_ptr->full_file_small_ds_sid = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL); + VRFY((tv_ptr->full_file_small_ds_sid != 0), "H5Screate_simple() full_file_small_ds_sid succeeded"); + + tv_ptr->mem_small_ds_sid = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL); + VRFY((tv_ptr->mem_small_ds_sid != 0), "H5Screate_simple() mem_small_ds_sid succeeded"); + + tv_ptr->file_small_ds_sid_0 = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL); + VRFY((tv_ptr->file_small_ds_sid_0 != 0), "H5Screate_simple() file_small_ds_sid_0 succeeded"); + + /* used by checker board tests only */ + tv_ptr->file_small_ds_sid_1 = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL); + VRFY((tv_ptr->file_small_ds_sid_1 != 0), "H5Screate_simple() file_small_ds_sid_1 succeeded"); + + tv_ptr->small_ds_slice_sid = H5Screate_simple(tv_ptr->small_rank - 1, &(tv_ptr->dims[1]), NULL); + VRFY((tv_ptr->small_ds_slice_sid != 0), "H5Screate_simple() small_ds_slice_sid succeeded"); + + /* Create large ds dataspaces */ + tv_ptr->full_mem_large_ds_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL); + VRFY((tv_ptr->full_mem_large_ds_sid != 0), "H5Screate_simple() full_mem_large_ds_sid succeeded"); + + tv_ptr->full_file_large_ds_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL); + VRFY((tv_ptr->full_file_large_ds_sid != FAIL), "H5Screate_simple() full_file_large_ds_sid succeeded"); + + tv_ptr->mem_large_ds_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL); + VRFY((tv_ptr->mem_large_ds_sid != FAIL), "H5Screate_simple() mem_large_ds_sid succeeded"); + + tv_ptr->file_large_ds_sid_0 = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL); + VRFY((tv_ptr->file_large_ds_sid_0 != FAIL), "H5Screate_simple() file_large_ds_sid_0 succeeded"); + + /* used by checker board tests only */ + tv_ptr->file_large_ds_sid_1 = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL); + VRFY((tv_ptr->file_large_ds_sid_1 != FAIL), "H5Screate_simple() file_large_ds_sid_1 succeeded"); + + tv_ptr->mem_large_ds_process_slice_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL); + VRFY((tv_ptr->mem_large_ds_process_slice_sid != FAIL), + "H5Screate_simple() mem_large_ds_process_slice_sid succeeded"); + + tv_ptr->file_large_ds_process_slice_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL); + VRFY((tv_ptr->file_large_ds_process_slice_sid != FAIL), + "H5Screate_simple() file_large_ds_process_slice_sid succeeded"); + + tv_ptr->large_ds_slice_sid = H5Screate_simple(tv_ptr->large_rank - 1, &(tv_ptr->dims[1]), NULL); + VRFY((tv_ptr->large_ds_slice_sid != 0), "H5Screate_simple() large_ds_slice_sid succeeded"); + + /* if chunk edge size is greater than zero, set up the small and + * large data set creation property lists to specify chunked + * datasets. + */ + if (tv_ptr->chunk_edge_size > 0) { + + /* Under Lustre (and perhaps other parallel file systems?) we get + * locking delays when two or more processes attempt to access the + * same file system block. + * + * To minimize this problem, I have changed chunk_dims[0] + * from (mpi_size + 1) to just when any sort of express test is + * selected. Given the structure of the test, and assuming we + * set the alignment large enough, this avoids the contention + * issue by seeing to it that each chunk is only accessed by one + * process. + * + * One can argue as to whether this is a good thing to do in our + * tests, but for now it is necessary if we want the test to complete + * in a reasonable amount of time. + * + * JRM -- 9/16/10 + */ + + tv_ptr->chunk_dims[0] = 1; + + tv_ptr->chunk_dims[1] = tv_ptr->chunk_dims[2] = tv_ptr->chunk_dims[3] = tv_ptr->chunk_dims[4] = + (hsize_t)(tv_ptr->chunk_edge_size); + + small_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((ret != FAIL), "H5Pcreate() small_ds_dcpl_id succeeded"); + + ret = H5Pset_layout(small_ds_dcpl_id, H5D_CHUNKED); + VRFY((ret != FAIL), "H5Pset_layout() small_ds_dcpl_id succeeded"); + + ret = H5Pset_chunk(small_ds_dcpl_id, tv_ptr->small_rank, tv_ptr->chunk_dims); + VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded"); + + large_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((ret != FAIL), "H5Pcreate() large_ds_dcpl_id succeeded"); + + ret = H5Pset_layout(large_ds_dcpl_id, H5D_CHUNKED); + VRFY((ret != FAIL), "H5Pset_layout() large_ds_dcpl_id succeeded"); + + ret = H5Pset_chunk(large_ds_dcpl_id, tv_ptr->large_rank, tv_ptr->chunk_dims); + VRFY((ret != FAIL), "H5Pset_chunk() large_ds_dcpl_id succeeded"); + } + + /* create the small dataset */ + tv_ptr->small_dataset = + H5Dcreate2(tv_ptr->fid, "small_dataset", tv_ptr->dset_type, tv_ptr->file_small_ds_sid_0, H5P_DEFAULT, + small_ds_dcpl_id, H5P_DEFAULT); + VRFY((ret != FAIL), "H5Dcreate2() small_dataset succeeded"); + + /* create the large dataset */ + tv_ptr->large_dataset = + H5Dcreate2(tv_ptr->fid, "large_dataset", tv_ptr->dset_type, tv_ptr->file_large_ds_sid_0, H5P_DEFAULT, + large_ds_dcpl_id, H5P_DEFAULT); + VRFY((ret != FAIL), "H5Dcreate2() large_dataset succeeded"); + + /* setup xfer property list */ + tv_ptr->xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((tv_ptr->xfer_plist >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); + + if (use_collective_io) { + ret = H5Pset_dxpl_mpio(tv_ptr->xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + } + + /* setup selection to write initial data to the small and large data sets */ + tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); + tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1)); + tv_ptr->count[0] = 1; + tv_ptr->block[0] = 1; + + for (i = 1; i < tv_ptr->large_rank; i++) { + + tv_ptr->start[i] = 0; + tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); + tv_ptr->count[i] = 1; + tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); + } + + /* setup selections for writing initial data to the small data set */ + ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded"); + + ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded"); + + if (MAINPROCESS) { /* add an additional slice to the selections */ + + tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_size); + + ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, or) succeeded"); + + ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, or) succeeded"); + } + + /* write the initial value of the small data set to file */ + ret = H5Dwrite(tv_ptr->small_dataset, tv_ptr->dset_type, tv_ptr->mem_small_ds_sid, + tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_0); + + VRFY((ret >= 0), "H5Dwrite() small_dataset initial write succeeded"); + + /* sync with the other processes before checking data */ + mrc = MPI_Barrier(MPI_COMM_WORLD); + VRFY((mrc == MPI_SUCCESS), "Sync after small dataset writes"); + + /* read the small data set back to verify that it contains the + * expected data. Note that each process reads in the entire + * data set and verifies it. + */ + ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->full_mem_small_ds_sid, + tv_ptr->full_file_small_ds_sid, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_1); + VRFY((ret >= 0), "H5Dread() small_dataset initial read succeeded"); + + /* verify that the correct data was written to the small data set */ + expected_value = 0; + mis_match = FALSE; + ptr_1 = tv_ptr->small_ds_buf_1; + + i = 0; + for (i = 0; i < (int)(tv_ptr->small_ds_size); i++) { + + if (*ptr_1 != expected_value) { + + mis_match = TRUE; + } + ptr_1++; + expected_value++; + } + VRFY((mis_match == FALSE), "small ds init data good."); + + /* setup selections for writing initial data to the large data set */ + + tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); + + ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, set) succeeded"); + + ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, set) succeeded"); + + /* In passing, setup the process slice dataspaces as well */ + + ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_process_slice_sid, H5S_SELECT_SET, tv_ptr->start, + tv_ptr->stride, tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_process_slice_sid, set) succeeded"); + + ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_process_slice_sid, H5S_SELECT_SET, tv_ptr->start, + tv_ptr->stride, tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_process_slice_sid, set) succeeded"); + + if (MAINPROCESS) { /* add an additional slice to the selections */ + + tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_size); + + ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, or) succeeded"); + + ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, or) succeeded"); + } + + /* write the initial value of the large data set to file */ + ret = H5Dwrite(tv_ptr->large_dataset, tv_ptr->dset_type, tv_ptr->mem_large_ds_sid, + tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_0); + if (ret < 0) + H5Eprint2(H5E_DEFAULT, stderr); + VRFY((ret >= 0), "H5Dwrite() large_dataset initial write succeeded"); + + /* sync with the other processes before checking data */ + mrc = MPI_Barrier(MPI_COMM_WORLD); + VRFY((mrc == MPI_SUCCESS), "Sync after large dataset writes"); + + /* read the large data set back to verify that it contains the + * expected data. Note that each process reads in the entire + * data set. + */ + ret = H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->full_mem_large_ds_sid, + tv_ptr->full_file_large_ds_sid, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1); + VRFY((ret >= 0), "H5Dread() large_dataset initial read succeeded"); + + /* verify that the correct data was written to the large data set */ + expected_value = 0; + mis_match = FALSE; + ptr_1 = tv_ptr->large_ds_buf_1; + + i = 0; + for (i = 0; i < (int)(tv_ptr->large_ds_size); i++) { + + if (*ptr_1 != expected_value) { + + mis_match = TRUE; + } + ptr_1++; + expected_value++; + } + VRFY((mis_match == FALSE), "large ds init data good."); + + /* sync with the other processes before changing data */ + mrc = MPI_Barrier(MPI_COMM_WORLD); + VRFY((mrc == MPI_SUCCESS), "Sync initial values check"); + + return; + +} /* hs_dr_pio_test__setup() */ + +/*------------------------------------------------------------------------- + * Function: hs_dr_pio_test__takedown() + * + * Purpose: Do takedown after tests of I/O to/from hyperslab selections + * of different rank in the parallel case. + * + * Return: void + * + * Programmer: JRM -- 9/18/09 + * + *------------------------------------------------------------------------- + */ + +#define HS_DR_PIO_TEST__TAKEDOWN__DEBUG 0 + +static void +hs_dr_pio_test__takedown(struct hs_dr_pio_test_vars_t *tv_ptr) +{ +#if HS_DR_PIO_TEST__TAKEDOWN__DEBUG + const char *fcnName = "hs_dr_pio_test__takedown()"; +#endif /* HS_DR_PIO_TEST__TAKEDOWN__DEBUG */ + int mpi_rank; /* needed by the VRFY macro */ + herr_t ret; /* Generic return value */ + + /* initialize the local copy of mpi_rank */ + mpi_rank = tv_ptr->mpi_rank; + + /* Close property lists */ + if (tv_ptr->xfer_plist != H5P_DEFAULT) { + ret = H5Pclose(tv_ptr->xfer_plist); + VRFY((ret != FAIL), "H5Pclose(xfer_plist) succeeded"); + } + + /* Close dataspaces */ + ret = H5Sclose(tv_ptr->full_mem_small_ds_sid); + VRFY((ret != FAIL), "H5Sclose(full_mem_small_ds_sid) succeeded"); + + ret = H5Sclose(tv_ptr->full_file_small_ds_sid); + VRFY((ret != FAIL), "H5Sclose(full_file_small_ds_sid) succeeded"); + + ret = H5Sclose(tv_ptr->mem_small_ds_sid); + VRFY((ret != FAIL), "H5Sclose(mem_small_ds_sid) succeeded"); + + ret = H5Sclose(tv_ptr->file_small_ds_sid_0); + VRFY((ret != FAIL), "H5Sclose(file_small_ds_sid_0) succeeded"); + + ret = H5Sclose(tv_ptr->file_small_ds_sid_1); + VRFY((ret != FAIL), "H5Sclose(file_small_ds_sid_1) succeeded"); + + ret = H5Sclose(tv_ptr->small_ds_slice_sid); + VRFY((ret != FAIL), "H5Sclose(small_ds_slice_sid) succeeded"); + + ret = H5Sclose(tv_ptr->full_mem_large_ds_sid); + VRFY((ret != FAIL), "H5Sclose(full_mem_large_ds_sid) succeeded"); + + ret = H5Sclose(tv_ptr->full_file_large_ds_sid); + VRFY((ret != FAIL), "H5Sclose(full_file_large_ds_sid) succeeded"); + + ret = H5Sclose(tv_ptr->mem_large_ds_sid); + VRFY((ret != FAIL), "H5Sclose(mem_large_ds_sid) succeeded"); + + ret = H5Sclose(tv_ptr->file_large_ds_sid_0); + VRFY((ret != FAIL), "H5Sclose(file_large_ds_sid_0) succeeded"); + + ret = H5Sclose(tv_ptr->file_large_ds_sid_1); + VRFY((ret != FAIL), "H5Sclose(file_large_ds_sid_1) succeeded"); + + ret = H5Sclose(tv_ptr->mem_large_ds_process_slice_sid); + VRFY((ret != FAIL), "H5Sclose(mem_large_ds_process_slice_sid) succeeded"); + + ret = H5Sclose(tv_ptr->file_large_ds_process_slice_sid); + VRFY((ret != FAIL), "H5Sclose(file_large_ds_process_slice_sid) succeeded"); + + ret = H5Sclose(tv_ptr->large_ds_slice_sid); + VRFY((ret != FAIL), "H5Sclose(large_ds_slice_sid) succeeded"); + + /* Close Datasets */ + ret = H5Dclose(tv_ptr->small_dataset); + VRFY((ret != FAIL), "H5Dclose(small_dataset) succeeded"); + + ret = H5Dclose(tv_ptr->large_dataset); + VRFY((ret != FAIL), "H5Dclose(large_dataset) succeeded"); + + /* close the file collectively */ + MESG("about to close file."); + ret = H5Fclose(tv_ptr->fid); + VRFY((ret != FAIL), "file close succeeded"); + + /* Free memory buffers */ + + if (tv_ptr->small_ds_buf_0 != NULL) + HDfree(tv_ptr->small_ds_buf_0); + if (tv_ptr->small_ds_buf_1 != NULL) + HDfree(tv_ptr->small_ds_buf_1); + if (tv_ptr->small_ds_buf_2 != NULL) + HDfree(tv_ptr->small_ds_buf_2); + if (tv_ptr->small_ds_slice_buf != NULL) + HDfree(tv_ptr->small_ds_slice_buf); + + if (tv_ptr->large_ds_buf_0 != NULL) + HDfree(tv_ptr->large_ds_buf_0); + if (tv_ptr->large_ds_buf_1 != NULL) + HDfree(tv_ptr->large_ds_buf_1); + if (tv_ptr->large_ds_buf_2 != NULL) + HDfree(tv_ptr->large_ds_buf_2); + if (tv_ptr->large_ds_slice_buf != NULL) + HDfree(tv_ptr->large_ds_slice_buf); + + return; + +} /* hs_dr_pio_test__takedown() */ + +/*------------------------------------------------------------------------- + * Function: contig_hs_dr_pio_test__d2m_l2s() + * + * Purpose: Part one of a series of tests of I/O to/from hyperslab + * selections of different rank in the parallel. + * + * Verify that we can read from disk correctly using + * selections of different rank that H5Sselect_shape_same() + * views as being of the same shape. + * + * In this function, we test this by reading small_rank - 1 + * slices from the on disk large cube, and verifying that the + * data read is correct. Verify that H5Sselect_shape_same() + * returns true on the memory and file selections. + * + * Return: void + * + * Programmer: JRM -- 9/10/11 + * + *------------------------------------------------------------------------- + */ + +#define CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG 0 + +static void +contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr) +{ +#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG + const char *fcnName = "contig_hs_dr_pio_test__run_test()"; +#endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ + hbool_t mis_match = FALSE; + int i, j, k, l; + size_t n; + int mpi_rank; /* needed by the VRFY macro */ + uint32_t expected_value; + uint32_t *ptr_1; + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ + + /* initialize the local copy of mpi_rank */ + mpi_rank = tv_ptr->mpi_rank; + + /* We have already done a H5Sselect_all() on the dataspace + * small_ds_slice_sid in the initialization phase, so no need to + * call H5Sselect_all() again. + */ + + /* set up start, stride, count, and block -- note that we will + * change start[] so as to read slices of the large cube. + */ + for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) { + + tv_ptr->start[i] = 0; + tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); + tv_ptr->count[i] = 1; + if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) { + + tv_ptr->block[i] = 1; + } + else { + + tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); + } + } + + /* zero out the buffer we will be reading into */ + HDmemset(tv_ptr->small_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->small_ds_slice_size); + +#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG + HDfprintf(stdout, "%s reading slices from big cube on disk into small cube slice.\n", fcnName); +#endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ + + /* in serial versions of this test, we loop through all the dimensions + * of the large data set. However, in the parallel version, each + * process only works with that slice of the large cube indicated + * by its rank -- hence we set the most slowly changing index to + * mpi_rank, and don't iterate over it. + */ + + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) { + + i = tv_ptr->mpi_rank; + } + else { + + i = 0; + } + + /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to + * loop over it -- either we are setting i to mpi_rank, or + * we are setting it to zero. It will not change during the + * test. + */ + + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) { + + j = tv_ptr->mpi_rank; + } + else { + + j = 0; + } + + do { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) { + + k = tv_ptr->mpi_rank; + } + else { + + k = 0; + } + + do { + /* since small rank >= 2 and large_rank > small_rank, we + * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5 + * (baring major re-orgaization), this gives us: + * + * (PAR_SS_DR_MAX_RANK - large_rank) <= 2 + * + * so no need to repeat the test in the outer loops -- + * just set l = 0. + */ + + l = 0; + do { + if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */ + + (tv_ptr->tests_skipped)++; + } + else { /* run the test */ + + tv_ptr->skips = 0; /* reset the skips counter */ + + /* we know that small_rank - 1 >= 1 and that + * large_rank > small_rank by the assertions at the head + * of this function. Thus no need for another inner loop. + */ + tv_ptr->start[0] = (hsize_t)i; + tv_ptr->start[1] = (hsize_t)j; + tv_ptr->start[2] = (hsize_t)k; + tv_ptr->start[3] = (hsize_t)l; + tv_ptr->start[4] = 0; + + ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start_ptr, + tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr); + VRFY((ret != FAIL), "H5Sselect_hyperslab(file_large_cube_sid) succeeded"); + + /* verify that H5Sselect_shape_same() reports the two + * selections as having the same shape. + */ + check = H5Sselect_shape_same(tv_ptr->small_ds_slice_sid, tv_ptr->file_large_ds_sid_0); + VRFY((check == TRUE), "H5Sselect_shape_same passed"); + + /* Read selection from disk */ +#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG + HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank), + (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]), + (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4])); + HDfprintf(stdout, "%s slice/file extent dims = %d/%d.\n", fcnName, + H5Sget_simple_extent_ndims(tv_ptr->small_ds_slice_sid), + H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0)); +#endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ + ret = + H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->small_ds_slice_sid, + tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_slice_buf); + VRFY((ret >= 0), "H5Dread() slice from large ds succeeded."); + + /* verify that expected data is retrieved */ + + mis_match = FALSE; + ptr_1 = tv_ptr->small_ds_slice_buf; + expected_value = + (uint32_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * + tv_ptr->edge_size) + + (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + + (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size)); + + for (n = 0; n < tv_ptr->small_ds_slice_size; n++) { + + if (*ptr_1 != expected_value) { + + mis_match = TRUE; + } + + *ptr_1 = 0; /* zero data for next use */ + + ptr_1++; + expected_value++; + } + + VRFY((mis_match == FALSE), "small slice read from large ds data good."); + + (tv_ptr->tests_run)++; + } + + l++; + + (tv_ptr->total_tests)++; + + } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size)); + k++; + } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size)); + j++; + } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size)); + + return; + +} /* contig_hs_dr_pio_test__d2m_l2s() */ + +/*------------------------------------------------------------------------- + * Function: contig_hs_dr_pio_test__d2m_s2l() + * + * Purpose: Part two of a series of tests of I/O to/from hyperslab + * selections of different rank in the parallel. + * + * Verify that we can read from disk correctly using + * selections of different rank that H5Sselect_shape_same() + * views as being of the same shape. + * + * In this function, we test this by reading slices of the + * on disk small data set into slices through the in memory + * large data set, and verify that the correct data (and + * only the correct data) is read. + * + * Return: void + * + * Programmer: JRM -- 8/10/11 + * + *------------------------------------------------------------------------- + */ + +#define CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG 0 + +static void +contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr) +{ +#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG + const char *fcnName = "contig_hs_dr_pio_test__d2m_s2l()"; +#endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */ + hbool_t mis_match = FALSE; + int i, j, k, l; + size_t n; + int mpi_rank; /* needed by the VRFY macro */ + size_t start_index; + size_t stop_index; + uint32_t expected_value; + uint32_t *ptr_1; + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ + + /* initialize the local copy of mpi_rank */ + mpi_rank = tv_ptr->mpi_rank; + + /* Read slices of the on disk small data set into slices + * through the in memory large data set, and verify that the correct + * data (and only the correct data) is read. + */ + + tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); + tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1)); + tv_ptr->count[0] = 1; + tv_ptr->block[0] = 1; + + for (i = 1; i < tv_ptr->large_rank; i++) { + + tv_ptr->start[i] = 0; + tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); + tv_ptr->count[i] = 1; + tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); + } + + ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded"); + +#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG + HDfprintf(stdout, "%s reading slices of on disk small data set into slices of big data set.\n", fcnName); +#endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */ + + /* zero out the in memory large ds */ + HDmemset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size); + + /* set up start, stride, count, and block -- note that we will + * change start[] so as to read slices of the large cube. + */ + for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) { + + tv_ptr->start[i] = 0; + tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); + tv_ptr->count[i] = 1; + if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) { + + tv_ptr->block[i] = 1; + } + else { + + tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); + } + } + + /* in serial versions of this test, we loop through all the dimensions + * of the large data set that don't appear in the small data set. + * + * However, in the parallel version, each process only works with that + * slice of the large (and small) data set indicated by its rank -- hence + * we set the most slowly changing index to mpi_rank, and don't iterate + * over it. + */ + + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) { + + i = tv_ptr->mpi_rank; + } + else { + + i = 0; + } + + /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to + * loop over it -- either we are setting i to mpi_rank, or + * we are setting it to zero. It will not change during the + * test. + */ + + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) { + + j = tv_ptr->mpi_rank; + } + else { + + j = 0; + } + + do { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) { + + k = tv_ptr->mpi_rank; + } + else { + + k = 0; + } + + do { + /* since small rank >= 2 and large_rank > small_rank, we + * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5 + * (baring major re-orgaization), this gives us: + * + * (PAR_SS_DR_MAX_RANK - large_rank) <= 2 + * + * so no need to repeat the test in the outer loops -- + * just set l = 0. + */ + + l = 0; + do { + if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */ + + (tv_ptr->tests_skipped)++; + } + else { /* run the test */ + + tv_ptr->skips = 0; /* reset the skips counter */ + + /* we know that small_rank >= 1 and that large_rank > small_rank + * by the assertions at the head of this function. Thus no + * need for another inner loop. + */ + tv_ptr->start[0] = (hsize_t)i; + tv_ptr->start[1] = (hsize_t)j; + tv_ptr->start[2] = (hsize_t)k; + tv_ptr->start[3] = (hsize_t)l; + tv_ptr->start[4] = 0; + + ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start_ptr, + tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr); + VRFY((ret != FAIL), "H5Sselect_hyperslab(mem_large_ds_sid) succeeded"); + + /* verify that H5Sselect_shape_same() reports the two + * selections as having the same shape. + */ + check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid); + VRFY((check == TRUE), "H5Sselect_shape_same passed"); + + /* Read selection from disk */ +#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG + HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank), + (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]), + (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4])); + HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank, + H5Sget_simple_extent_ndims(tv_ptr->mem_large_ds_sid), + H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_0)); +#endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */ + ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid, + tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1); + VRFY((ret >= 0), "H5Dread() slice from small ds succeeded."); + + /* verify that the expected data and only the + * expected data was read. + */ + ptr_1 = tv_ptr->large_ds_buf_1; + expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size); + start_index = + (size_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * + tv_ptr->edge_size) + + (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + + (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size)); + stop_index = start_index + tv_ptr->small_ds_slice_size - 1; + + HDassert(start_index < stop_index); + HDassert(stop_index <= tv_ptr->large_ds_size); + + for (n = 0; n < tv_ptr->large_ds_size; n++) { + + if ((n >= start_index) && (n <= stop_index)) { + + if (*ptr_1 != expected_value) { + + mis_match = TRUE; + } + expected_value++; + } + else { + + if (*ptr_1 != 0) { + + mis_match = TRUE; + } + } + /* zero out the value for the next pass */ + *ptr_1 = 0; + + ptr_1++; + } + + VRFY((mis_match == FALSE), "small slice read from large ds data good."); + + (tv_ptr->tests_run)++; + } + + l++; + + (tv_ptr->total_tests)++; + + } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size)); + k++; + } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size)); + j++; + } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size)); + + return; + +} /* contig_hs_dr_pio_test__d2m_s2l() */ + +/*------------------------------------------------------------------------- + * Function: contig_hs_dr_pio_test__m2d_l2s() + * + * Purpose: Part three of a series of tests of I/O to/from hyperslab + * selections of different rank in the parallel. + * + * Verify that we can write from memory to file using + * selections of different rank that H5Sselect_shape_same() + * views as being of the same shape. + * + * Do this by writing small_rank - 1 dimensional slices from + * the in memory large data set to the on disk small cube + * dataset. After each write, read the slice of the small + * dataset back from disk, and verify that it contains + * the expected data. Verify that H5Sselect_shape_same() + * returns true on the memory and file selections. + * + * Return: void + * + * Programmer: JRM -- 8/10/11 + * + *------------------------------------------------------------------------- + */ + +#define CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG 0 + +static void +contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr) +{ +#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG + const char *fcnName = "contig_hs_dr_pio_test__m2d_l2s()"; +#endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */ + hbool_t mis_match = FALSE; + int i, j, k, l; + size_t n; + int mpi_rank; /* needed by the VRFY macro */ + size_t start_index; + size_t stop_index; + uint32_t expected_value; + uint32_t *ptr_1; + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ + + /* initialize the local copy of mpi_rank */ + mpi_rank = tv_ptr->mpi_rank; + + /* now we go in the opposite direction, verifying that we can write + * from memory to file using selections of different rank that + * H5Sselect_shape_same() views as being of the same shape. + * + * Start by writing small_rank - 1 dimensional slices from the in memory large + * data set to the on disk small cube dataset. After each write, read the + * slice of the small dataset back from disk, and verify that it contains + * the expected data. Verify that H5Sselect_shape_same() returns true on + * the memory and file selections. + */ + + tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); + tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1)); + tv_ptr->count[0] = 1; + tv_ptr->block[0] = 1; + + for (i = 1; i < tv_ptr->large_rank; i++) { + + tv_ptr->start[i] = 0; + tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); + tv_ptr->count[i] = 1; + tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); + } + + ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded"); + + ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded"); + + /* set up start, stride, count, and block -- note that we will + * change start[] so as to read slices of the large cube. + */ + for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) { + + tv_ptr->start[i] = 0; + tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); + tv_ptr->count[i] = 1; + if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) { + + tv_ptr->block[i] = 1; + } + else { + + tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); + } + } + + /* zero out the in memory small ds */ + HDmemset(tv_ptr->small_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->small_ds_size); + +#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG + HDfprintf(stdout, "%s writing slices from big ds to slices of small ds on disk.\n", fcnName); +#endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */ + + /* in serial versions of this test, we loop through all the dimensions + * of the large data set that don't appear in the small data set. + * + * However, in the parallel version, each process only works with that + * slice of the large (and small) data set indicated by its rank -- hence + * we set the most slowly changing index to mpi_rank, and don't iterate + * over it. + */ + + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) { + + i = tv_ptr->mpi_rank; + } + else { + + i = 0; + } + + /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to + * loop over it -- either we are setting i to mpi_rank, or + * we are setting it to zero. It will not change during the + * test. + */ + + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) { + + j = tv_ptr->mpi_rank; + } + else { + + j = 0; + } + + j = 0; + do { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) { + + k = tv_ptr->mpi_rank; + } + else { + + k = 0; + } + + do { + /* since small rank >= 2 and large_rank > small_rank, we + * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5 + * (baring major re-orgaization), this gives us: + * + * (PAR_SS_DR_MAX_RANK - large_rank) <= 2 + * + * so no need to repeat the test in the outer loops -- + * just set l = 0. + */ + + l = 0; + do { + if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */ + + (tv_ptr->tests_skipped)++; + } + else { /* run the test */ + + tv_ptr->skips = 0; /* reset the skips counter */ + + /* we know that small_rank >= 1 and that large_rank > small_rank + * by the assertions at the head of this function. Thus no + * need for another inner loop. + */ + + /* zero out this rank's slice of the on disk small data set */ + ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid, + tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_2); + VRFY((ret >= 0), "H5Dwrite() zero slice to small ds succeeded."); + + /* select the portion of the in memory large cube from which we + * are going to write data. + */ + tv_ptr->start[0] = (hsize_t)i; + tv_ptr->start[1] = (hsize_t)j; + tv_ptr->start[2] = (hsize_t)k; + tv_ptr->start[3] = (hsize_t)l; + tv_ptr->start[4] = 0; + + ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start_ptr, + tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr); + VRFY((ret >= 0), "H5Sselect_hyperslab() mem_large_ds_sid succeeded."); + + /* verify that H5Sselect_shape_same() reports the in + * memory slice through the cube selection and the + * on disk full square selections as having the same shape. + */ + check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid); + VRFY((check == TRUE), "H5Sselect_shape_same passed."); + + /* write the slice from the in memory large data set to the + * slice of the on disk small dataset. */ +#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG + HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank), + (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]), + (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4])); + HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank, + H5Sget_simple_extent_ndims(tv_ptr->mem_large_ds_sid), + H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_0)); +#endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */ + ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid, + tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_0); + VRFY((ret >= 0), "H5Dwrite() slice to large ds succeeded."); + + /* read the on disk square into memory */ + ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid, + tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_1); + VRFY((ret >= 0), "H5Dread() slice from small ds succeeded."); + + /* verify that expected data is retrieved */ + + mis_match = FALSE; + ptr_1 = tv_ptr->small_ds_buf_1; + + expected_value = + (uint32_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * + tv_ptr->edge_size) + + (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + + (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size)); + + start_index = (size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size; + stop_index = start_index + tv_ptr->small_ds_slice_size - 1; + + HDassert(start_index < stop_index); + HDassert(stop_index <= tv_ptr->small_ds_size); + + for (n = 0; n < tv_ptr->small_ds_size; n++) { + + if ((n >= start_index) && (n <= stop_index)) { + + if (*ptr_1 != expected_value) { + + mis_match = TRUE; + } + expected_value++; + } + else { + + if (*ptr_1 != 0) { + + mis_match = TRUE; + } + } + /* zero out the value for the next pass */ + *ptr_1 = 0; + + ptr_1++; + } + + VRFY((mis_match == FALSE), "small slice write from large ds data good."); + + (tv_ptr->tests_run)++; + } + + l++; + + (tv_ptr->total_tests)++; + + } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size)); + k++; + } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size)); + j++; + } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size)); + + return; + +} /* contig_hs_dr_pio_test__m2d_l2s() */ + +/*------------------------------------------------------------------------- + * Function: contig_hs_dr_pio_test__m2d_s2l() + * + * Purpose: Part four of a series of tests of I/O to/from hyperslab + * selections of different rank in the parallel. + * + * Verify that we can write from memory to file using + * selections of different rank that H5Sselect_shape_same() + * views as being of the same shape. + * + * Do this by writing the contents of the process's slice of + * the in memory small data set to slices of the on disk + * large data set. After each write, read the process's + * slice of the large data set back into memory, and verify + * that it contains the expected data. + * + * Verify that H5Sselect_shape_same() returns true on the + * memory and file selections. + * + * Return: void + * + * Programmer: JRM -- 8/10/11 + * + *------------------------------------------------------------------------- + */ + +#define CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG 0 + +static void +contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr) +{ +#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG + const char *fcnName = "contig_hs_dr_pio_test__m2d_s2l()"; +#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */ + hbool_t mis_match = FALSE; + int i, j, k, l; + size_t n; + int mpi_rank; /* needed by the VRFY macro */ + size_t start_index; + size_t stop_index; + uint32_t expected_value; + uint32_t *ptr_1; + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ + + /* initialize the local copy of mpi_rank */ + mpi_rank = tv_ptr->mpi_rank; + + /* Now write the contents of the process's slice of the in memory + * small data set to slices of the on disk large data set. After + * each write, read the process's slice of the large data set back + * into memory, and verify that it contains the expected data. + * Verify that H5Sselect_shape_same() returns true on the memory + * and file selections. + */ + + /* select the slice of the in memory small data set associated with + * the process's mpi rank. + */ + tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); + tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1)); + tv_ptr->count[0] = 1; + tv_ptr->block[0] = 1; + + for (i = 1; i < tv_ptr->large_rank; i++) { + + tv_ptr->start[i] = 0; + tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); + tv_ptr->count[i] = 1; + tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); + } + + ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded"); + + /* set up start, stride, count, and block -- note that we will + * change start[] so as to write slices of the small data set to + * slices of the large data set. + */ + for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) { + + tv_ptr->start[i] = 0; + tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); + tv_ptr->count[i] = 1; + if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) { + + tv_ptr->block[i] = 1; + } + else { + + tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); + } + } + + /* zero out the in memory large ds */ + HDmemset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size); + +#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG + HDfprintf(stdout, "%s writing process slices of small ds to slices of large ds on disk.\n", fcnName); +#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */ + + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) { + + i = tv_ptr->mpi_rank; + } + else { + + i = 0; + } + + /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to + * loop over it -- either we are setting i to mpi_rank, or + * we are setting it to zero. It will not change during the + * test. + */ + + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) { + + j = tv_ptr->mpi_rank; + } + else { + + j = 0; + } + + do { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) { + + k = tv_ptr->mpi_rank; + } + else { + + k = 0; + } + + do { + /* since small rank >= 2 and large_rank > small_rank, we + * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5 + * (baring major re-orgaization), this gives us: + * + * (PAR_SS_DR_MAX_RANK - large_rank) <= 2 + * + * so no need to repeat the test in the outer loops -- + * just set l = 0. + */ + + l = 0; + do { + if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */ + + (tv_ptr->tests_skipped)++; + +#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG + tv_ptr->start[0] = (hsize_t)i; + tv_ptr->start[1] = (hsize_t)j; + tv_ptr->start[2] = (hsize_t)k; + tv_ptr->start[3] = (hsize_t)l; + tv_ptr->start[4] = 0; + + HDfprintf(stdout, "%s:%d: skipping test with start = %d %d %d %d %d.\n", fcnName, + (int)(tv_ptr->mpi_rank), (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), + (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4])); + HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank, + H5Sget_simple_extent_ndims(tv_ptr->mem_small_ds_sid), + H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0)); +#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */ + } + else { /* run the test */ + + tv_ptr->skips = 0; /* reset the skips counter */ + + /* we know that small_rank >= 1 and that large_rank > small_rank + * by the assertions at the head of this function. Thus no + * need for another inner loop. + */ + + /* Zero out this processes slice of the on disk large data set. + * Note that this will leave one slice with its original data + * as there is one more slice than processes. + */ + ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->large_ds_slice_sid, + tv_ptr->file_large_ds_process_slice_sid, tv_ptr->xfer_plist, + tv_ptr->large_ds_buf_2); + VRFY((ret != FAIL), "H5Dwrite() to zero large ds succeeded"); + + /* select the portion of the in memory large cube to which we + * are going to write data. + */ + tv_ptr->start[0] = (hsize_t)i; + tv_ptr->start[1] = (hsize_t)j; + tv_ptr->start[2] = (hsize_t)k; + tv_ptr->start[3] = (hsize_t)l; + tv_ptr->start[4] = 0; + + ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start_ptr, + tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr); + VRFY((ret != FAIL), "H5Sselect_hyperslab() target large ds slice succeeded"); + + /* verify that H5Sselect_shape_same() reports the in + * memory small data set slice selection and the + * on disk slice through the large data set selection + * as having the same shape. + */ + check = H5Sselect_shape_same(tv_ptr->mem_small_ds_sid, tv_ptr->file_large_ds_sid_0); + VRFY((check == TRUE), "H5Sselect_shape_same passed"); + + /* write the small data set slice from memory to the + * target slice of the disk data set + */ +#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG + HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank), + (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]), + (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4])); + HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank, + H5Sget_simple_extent_ndims(tv_ptr->mem_small_ds_sid), + H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0)); +#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */ + ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid, + tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_0); + VRFY((ret != FAIL), "H5Dwrite of small ds slice to large ds succeeded"); + + /* read this processes slice on the on disk large + * data set into memory. + */ + + ret = H5Dread( + tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_process_slice_sid, + tv_ptr->file_large_ds_process_slice_sid, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1); + VRFY((ret != FAIL), "H5Dread() of process slice of large ds succeeded"); + + /* verify that the expected data and only the + * expected data was read. + */ + ptr_1 = tv_ptr->large_ds_buf_1; + expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size); + + start_index = + (size_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * + tv_ptr->edge_size) + + (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + + (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size)); + stop_index = start_index + tv_ptr->small_ds_slice_size - 1; + + HDassert(start_index < stop_index); + HDassert(stop_index < tv_ptr->large_ds_size); + + for (n = 0; n < tv_ptr->large_ds_size; n++) { + + if ((n >= start_index) && (n <= stop_index)) { + + if (*ptr_1 != expected_value) { + + mis_match = TRUE; + } + + expected_value++; + } + else { + + if (*ptr_1 != 0) { + + mis_match = TRUE; + } + } + /* zero out buffer for next test */ + *ptr_1 = 0; + ptr_1++; + } + + VRFY((mis_match == FALSE), "small ds slice write to large ds slice data good."); + + (tv_ptr->tests_run)++; + } + + l++; + + (tv_ptr->total_tests)++; + + } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size)); + k++; + } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size)); + j++; + } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size)); + + return; + +} /* contig_hs_dr_pio_test__m2d_s2l() */ + +/*------------------------------------------------------------------------- + * Function: contig_hs_dr_pio_test__run_test() + * + * Purpose: Test I/O to/from hyperslab selections of different rank in + * the parallel. + * + * Return: void + * + * Programmer: JRM -- 9/18/09 + * + *------------------------------------------------------------------------- + */ + +#define CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG 0 + +static void +contig_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const int chunk_edge_size, + const int small_rank, const int large_rank, const hbool_t use_collective_io, + const hid_t dset_type, int express_test, int *skips_ptr, int max_skips, + int64_t *total_tests_ptr, int64_t *tests_run_ptr, int64_t *tests_skipped_ptr, + int mpi_rank) +{ +#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG + const char *fcnName = "contig_hs_dr_pio_test__run_test()"; +#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ + struct hs_dr_pio_test_vars_t test_vars = { + /* int mpi_size = */ -1, + /* int mpi_rank = */ -1, + /* MPI_Comm mpi_comm = */ MPI_COMM_NULL, + /* MPI_Inf mpi_info = */ MPI_INFO_NULL, + /* int test_num = */ -1, + /* int edge_size = */ -1, + /* int checker_edge_size = */ -1, + /* int chunk_edge_size = */ -1, + /* int small_rank = */ -1, + /* int large_rank = */ -1, + /* hid_t dset_type = */ -1, + /* uint32_t * small_ds_buf_0 = */ NULL, + /* uint32_t * small_ds_buf_1 = */ NULL, + /* uint32_t * small_ds_buf_2 = */ NULL, + /* uint32_t * small_ds_slice_buf = */ NULL, + /* uint32_t * large_ds_buf_0 = */ NULL, + /* uint32_t * large_ds_buf_1 = */ NULL, + /* uint32_t * large_ds_buf_2 = */ NULL, + /* uint32_t * large_ds_slice_buf = */ NULL, + /* int small_ds_offset = */ -1, + /* int large_ds_offset = */ -1, + /* hid_t fid = */ -1, /* HDF5 file ID */ + /* hid_t xfer_plist = */ H5P_DEFAULT, + /* hid_t full_mem_small_ds_sid = */ -1, + /* hid_t full_file_small_ds_sid = */ -1, + /* hid_t mem_small_ds_sid = */ -1, + /* hid_t file_small_ds_sid_0 = */ -1, + /* hid_t file_small_ds_sid_1 = */ -1, + /* hid_t small_ds_slice_sid = */ -1, + /* hid_t full_mem_large_ds_sid = */ -1, + /* hid_t full_file_large_ds_sid = */ -1, + /* hid_t mem_large_ds_sid = */ -1, + /* hid_t file_large_ds_sid_0 = */ -1, + /* hid_t file_large_ds_sid_1 = */ -1, + /* hid_t file_large_ds_process_slice_sid = */ -1, + /* hid_t mem_large_ds_process_slice_sid = */ -1, + /* hid_t large_ds_slice_sid = */ -1, + /* hid_t small_dataset = */ -1, /* Dataset ID */ + /* hid_t large_dataset = */ -1, /* Dataset ID */ + /* size_t small_ds_size = */ 1, + /* size_t small_ds_slice_size = */ 1, + /* size_t large_ds_size = */ 1, + /* size_t large_ds_slice_size = */ 1, + /* hsize_t dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, + /* hsize_t chunk_dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, + /* hsize_t start[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, + /* hsize_t stride[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, + /* hsize_t count[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, + /* hsize_t block[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, + /* hsize_t * start_ptr = */ NULL, + /* hsize_t * stride_ptr = */ NULL, + /* hsize_t * count_ptr = */ NULL, + /* hsize_t * block_ptr = */ NULL, + /* int skips = */ 0, + /* int max_skips = */ 0, + /* int64_t total_tests = */ 0, + /* int64_t tests_run = */ 0, + /* int64_t tests_skipped = */ 0}; + struct hs_dr_pio_test_vars_t *tv_ptr = &test_vars; + + if (MAINPROCESS) + printf("\r - running test #%lld: small rank = %d, large rank = %d", (long long)(test_num + 1), + small_rank, large_rank); + + hs_dr_pio_test__setup(test_num, edge_size, -1, chunk_edge_size, small_rank, large_rank, use_collective_io, + dset_type, express_test, tv_ptr); + + /* initialize skips & max_skips */ + tv_ptr->skips = *skips_ptr; + tv_ptr->max_skips = max_skips; + +#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG + if (MAINPROCESS) { + HDfprintf(stdout, "test %d: small rank = %d, large rank = %d.\n", test_num, small_rank, large_rank); + HDfprintf(stdout, "test %d: Initialization complete.\n", test_num); + } +#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ + + /* first, verify that we can read from disk correctly using selections + * of different rank that H5Sselect_shape_same() views as being of the + * same shape. + * + * Start by reading small_rank - 1 dimensional slice from the on disk + * large cube, and verifying that the data read is correct. Verify that + * H5Sselect_shape_same() returns true on the memory and file selections. + */ + +#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG + if (MAINPROCESS) { + HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__d2m_l2s.\n", test_num); + } +#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ + contig_hs_dr_pio_test__d2m_l2s(tv_ptr); + + /* Second, read slices of the on disk small data set into slices + * through the in memory large data set, and verify that the correct + * data (and only the correct data) is read. + */ + +#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG + if (MAINPROCESS) { + HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__d2m_s2l.\n", test_num); + } +#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ + contig_hs_dr_pio_test__d2m_s2l(tv_ptr); + + /* now we go in the opposite direction, verifying that we can write + * from memory to file using selections of different rank that + * H5Sselect_shape_same() views as being of the same shape. + * + * Start by writing small_rank - 1 D slices from the in memory large data + * set to the on disk small cube dataset. After each write, read the + * slice of the small dataset back from disk, and verify that it contains + * the expected data. Verify that H5Sselect_shape_same() returns true on + * the memory and file selections. + */ + +#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG + if (MAINPROCESS) { + HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__m2d_l2s.\n", test_num); + } +#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ + contig_hs_dr_pio_test__m2d_l2s(tv_ptr); + + /* Now write the contents of the process's slice of the in memory + * small data set to slices of the on disk large data set. After + * each write, read the process's slice of the large data set back + * into memory, and verify that it contains the expected data. + * Verify that H5Sselect_shape_same() returns true on the memory + * and file selections. + */ + +#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG + if (MAINPROCESS) { + HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__m2d_s2l.\n", test_num); + } +#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ + contig_hs_dr_pio_test__m2d_s2l(tv_ptr); + +#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG + if (MAINPROCESS) { + HDfprintf(stdout, "test %d: Subtests complete -- tests run/skipped/total = %lld/%lld/%lld.\n", + test_num, (long long)(tv_ptr->tests_run), (long long)(tv_ptr->tests_skipped), + (long long)(tv_ptr->total_tests)); + } +#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ + + hs_dr_pio_test__takedown(tv_ptr); + +#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG + if (MAINPROCESS) { + HDfprintf(stdout, "test %d: Takedown complete.\n", test_num); + } +#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ + + *skips_ptr = tv_ptr->skips; + *total_tests_ptr += tv_ptr->total_tests; + *tests_run_ptr += tv_ptr->tests_run; + *tests_skipped_ptr += tv_ptr->tests_skipped; + + return; + +} /* contig_hs_dr_pio_test__run_test() */ + +/*------------------------------------------------------------------------- + * Function: contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type) + * + * Purpose: Test I/O to/from hyperslab selections of different rank in + * the parallel case. + * + * Return: void + * + * Programmer: JRM -- 9/18/09 + * + *------------------------------------------------------------------------- + */ + +#define CONTIG_HS_DR_PIO_TEST__DEBUG 0 + +static void +contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type) +{ + int express_test; + int local_express_test; + int mpi_rank = -1; + int mpi_size; + int test_num = 0; + int edge_size; + int chunk_edge_size = 0; + int small_rank; + int large_rank; + int mpi_result; + int skips = 0; + int max_skips = 0; + /* The following table list the number of sub-tests skipped between + * each test that is actually executed as a function of the express + * test level. Note that any value in excess of 4880 will cause all + * sub tests to be skipped. + */ + int max_skips_tbl[4] = {0, 4, 64, 1024}; + hid_t dset_type = H5T_NATIVE_UINT; + int64_t total_tests = 0; + int64_t tests_run = 0; + int64_t tests_skipped = 0; + + HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned)); + + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + edge_size = (mpi_size > 6 ? mpi_size : 6); + + local_express_test = EXPRESS_MODE; /* GetTestExpress(); */ + + mpi_result = MPI_Allreduce((void *)&local_express_test, (void *)&express_test, 1, MPI_INT, MPI_MAX, + MPI_COMM_WORLD); + + VRFY((mpi_result == MPI_SUCCESS), "MPI_Allreduce(0) succeeded"); + + if (local_express_test < 0) { + max_skips = max_skips_tbl[0]; + } + else if (local_express_test > 3) { + max_skips = max_skips_tbl[3]; + } + else { + max_skips = max_skips_tbl[local_express_test]; + } + + for (large_rank = 3; large_rank <= PAR_SS_DR_MAX_RANK; large_rank++) { + + for (small_rank = 2; small_rank < large_rank; small_rank++) { + + switch (sstest_type) { + case IND_CONTIG: + /* contiguous data set, independent I/O */ + chunk_edge_size = 0; + + contig_hs_dr_pio_test__run_test( + test_num, edge_size, chunk_edge_size, small_rank, large_rank, FALSE, dset_type, + express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank); + test_num++; + break; + /* end of case IND_CONTIG */ + + case COL_CONTIG: + /* contiguous data set, collective I/O */ + chunk_edge_size = 0; + + contig_hs_dr_pio_test__run_test( + test_num, edge_size, chunk_edge_size, small_rank, large_rank, TRUE, dset_type, + express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank); + test_num++; + break; + /* end of case COL_CONTIG */ + + case IND_CHUNKED: + /* chunked data set, independent I/O */ + chunk_edge_size = 5; + + contig_hs_dr_pio_test__run_test( + test_num, edge_size, chunk_edge_size, small_rank, large_rank, FALSE, dset_type, + express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank); + test_num++; + break; + /* end of case IND_CHUNKED */ + + case COL_CHUNKED: + /* chunked data set, collective I/O */ + chunk_edge_size = 5; + + contig_hs_dr_pio_test__run_test( + test_num, edge_size, chunk_edge_size, small_rank, large_rank, TRUE, dset_type, + express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank); + test_num++; + break; + /* end of case COL_CHUNKED */ + + default: + VRFY((FALSE), "unknown test type"); + break; + + } /* end of switch(sstest_type) */ +#if CONTIG_HS_DR_PIO_TEST__DEBUG + if ((MAINPROCESS) && (tests_skipped > 0)) { + HDfprintf(stdout, " run/skipped/total = %lld/%lld/%lld.\n", tests_run, tests_skipped, + total_tests); + } +#endif /* CONTIG_HS_DR_PIO_TEST__DEBUG */ + } + } + + if (MAINPROCESS) { + if (tests_skipped > 0) { + HDfprintf(stdout, " %" PRId64 " of %" PRId64 " subtests skipped to expedite testing.\n", + tests_skipped, total_tests); + } + else + HDprintf("\n"); + } + + return; + +} /* contig_hs_dr_pio_test() */ + +/**************************************************************** +** +** ckrbrd_hs_dr_pio_test__slct_ckrbrd(): +** Given a dataspace of tgt_rank, and dimensions: +** +** (mpi_size + 1), edge_size, ... , edge_size +** +** edge_size, and a checker_edge_size, select a checker +** board selection of a sel_rank (sel_rank < tgt_rank) +** dimensional slice through the dataspace parallel to the +** sel_rank fastest changing indices, with origin (in the +** higher indices) as indicated by the start array. +** +** Note that this function, like all its relatives, is +** hard coded to presume a maximum dataspace rank of 5. +** While this maximum is declared as a constant, increasing +** it will require extensive coding in addition to changing +** the value of the constant. +** +** JRM -- 10/8/09 +** +****************************************************************/ + +#define CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG 0 + +static void +ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank, const hid_t tgt_sid, const int tgt_rank, + const int edge_size, const int checker_edge_size, const int sel_rank, + hsize_t sel_start[]) +{ +#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG + const char *fcnName = "ckrbrd_hs_dr_pio_test__slct_ckrbrd():"; +#endif + hbool_t first_selection = TRUE; + int i, j, k, l, m; + int n_cube_offset; + int sel_offset; + const int test_max_rank = PAR_SS_DR_MAX_RANK; /* must update code if */ + /* this changes */ + hsize_t base_count; + hsize_t offset_count; + hsize_t start[PAR_SS_DR_MAX_RANK]; + hsize_t stride[PAR_SS_DR_MAX_RANK]; + hsize_t count[PAR_SS_DR_MAX_RANK]; + hsize_t block[PAR_SS_DR_MAX_RANK]; + herr_t ret; /* Generic return value */ + + HDassert(edge_size >= 6); + HDassert(0 < checker_edge_size); + HDassert(checker_edge_size <= edge_size); + HDassert(0 < sel_rank); + HDassert(sel_rank <= tgt_rank); + HDassert(tgt_rank <= test_max_rank); + HDassert(test_max_rank <= PAR_SS_DR_MAX_RANK); + + sel_offset = test_max_rank - sel_rank; + HDassert(sel_offset >= 0); + + n_cube_offset = test_max_rank - tgt_rank; + HDassert(n_cube_offset >= 0); + HDassert(n_cube_offset <= sel_offset); + +#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG + HDfprintf(stdout, "%s:%d: edge_size/checker_edge_size = %d/%d\n", fcnName, mpi_rank, edge_size, + checker_edge_size); + HDfprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n", fcnName, mpi_rank, sel_rank, sel_offset); + HDfprintf(stdout, "%s:%d: tgt_rank/n_cube_offset = %d/%d.\n", fcnName, mpi_rank, tgt_rank, n_cube_offset); +#endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */ + + /* First, compute the base count (which assumes start == 0 + * for the associated offset) and offset_count (which + * assumes start == checker_edge_size for the associated + * offset). + * + * Note that the following computation depends on the C99 + * requirement that integer division discard any fraction + * (truncation towards zero) to function correctly. As we + * now require C99, this shouldn't be a problem, but noting + * it may save us some pain if we are ever obliged to support + * pre-C99 compilers again. + */ + + base_count = (hsize_t)(edge_size / (checker_edge_size * 2)); + + if ((edge_size % (checker_edge_size * 2)) > 0) { + + base_count++; + } + + offset_count = (hsize_t)((edge_size - checker_edge_size) / (checker_edge_size * 2)); + + if (((edge_size - checker_edge_size) % (checker_edge_size * 2)) > 0) { + + offset_count++; + } + + /* Now set up the stride and block arrays, and portions of the start + * and count arrays that will not be altered during the selection of + * the checker board. + */ + i = 0; + while (i < n_cube_offset) { + + /* these values should never be used */ + start[i] = 0; + stride[i] = 0; + count[i] = 0; + block[i] = 0; + + i++; + } + + while (i < sel_offset) { + + start[i] = sel_start[i]; + stride[i] = (hsize_t)(2 * edge_size); + count[i] = 1; + block[i] = 1; + + i++; + } + + while (i < test_max_rank) { + + stride[i] = (hsize_t)(2 * checker_edge_size); + block[i] = (hsize_t)checker_edge_size; + + i++; + } + + i = 0; + do { + if (0 >= sel_offset) { + + if (i == 0) { + + start[0] = 0; + count[0] = base_count; + } + else { + + start[0] = (hsize_t)checker_edge_size; + count[0] = offset_count; + } + } + + j = 0; + do { + if (1 >= sel_offset) { + + if (j == 0) { + + start[1] = 0; + count[1] = base_count; + } + else { + + start[1] = (hsize_t)checker_edge_size; + count[1] = offset_count; + } + } + + k = 0; + do { + if (2 >= sel_offset) { + + if (k == 0) { + + start[2] = 0; + count[2] = base_count; + } + else { + + start[2] = (hsize_t)checker_edge_size; + count[2] = offset_count; + } + } + + l = 0; + do { + if (3 >= sel_offset) { + + if (l == 0) { + + start[3] = 0; + count[3] = base_count; + } + else { + + start[3] = (hsize_t)checker_edge_size; + count[3] = offset_count; + } + } + + m = 0; + do { + if (4 >= sel_offset) { + + if (m == 0) { + + start[4] = 0; + count[4] = base_count; + } + else { + + start[4] = (hsize_t)checker_edge_size; + count[4] = offset_count; + } + } + + if (((i + j + k + l + m) % 2) == 0) { + +#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG + HDfprintf(stdout, "%s%d: *** first_selection = %d ***\n", fcnName, mpi_rank, + (int)first_selection); + HDfprintf(stdout, "%s:%d: i/j/k/l/m = %d/%d/%d/%d/%d\n", fcnName, mpi_rank, i, j, + k, l, m); + HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, mpi_rank, + (int)start[0], (int)start[1], (int)start[2], (int)start[3], + (int)start[4]); + HDfprintf(stdout, "%s:%d: stride = %d %d %d %d %d.\n", fcnName, mpi_rank, + (int)stride[0], (int)stride[1], (int)stride[2], (int)stride[3], + (int)stride[4]); + HDfprintf(stdout, "%s:%d: count = %d %d %d %d %d.\n", fcnName, mpi_rank, + (int)count[0], (int)count[1], (int)count[2], (int)count[3], + (int)count[4]); + HDfprintf(stdout, "%s:%d: block = %d %d %d %d %d.\n", fcnName, mpi_rank, + (int)block[0], (int)block[1], (int)block[2], (int)block[3], + (int)block[4]); + HDfprintf(stdout, "%s:%d: n-cube extent dims = %d.\n", fcnName, mpi_rank, + H5Sget_simple_extent_ndims(tgt_sid)); + HDfprintf(stdout, "%s:%d: selection rank = %d.\n", fcnName, mpi_rank, sel_rank); +#endif + + if (first_selection) { + + first_selection = FALSE; + + ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_SET, &(start[n_cube_offset]), + &(stride[n_cube_offset]), &(count[n_cube_offset]), + &(block[n_cube_offset])); + + VRFY((ret != FAIL), "H5Sselect_hyperslab(SET) succeeded"); + } + else { + + ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_OR, &(start[n_cube_offset]), + &(stride[n_cube_offset]), &(count[n_cube_offset]), + &(block[n_cube_offset])); + + VRFY((ret != FAIL), "H5Sselect_hyperslab(OR) succeeded"); + } + } + + m++; + + } while ((m <= 1) && (4 >= sel_offset)); + + l++; + + } while ((l <= 1) && (3 >= sel_offset)); + + k++; + + } while ((k <= 1) && (2 >= sel_offset)); + + j++; + + } while ((j <= 1) && (1 >= sel_offset)); + + i++; + + } while ((i <= 1) && (0 >= sel_offset)); + +#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG + HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank, + (int)H5Sget_select_npoints(tgt_sid)); +#endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */ + + /* Clip the selection back to the dataspace proper. */ + + for (i = 0; i < test_max_rank; i++) { + + start[i] = 0; + stride[i] = (hsize_t)edge_size; + count[i] = 1; + block[i] = (hsize_t)edge_size; + } + + ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_AND, start, stride, count, block); + + VRFY((ret != FAIL), "H5Sselect_hyperslab(AND) succeeded"); + +#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG + HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank, + (int)H5Sget_select_npoints(tgt_sid)); + HDfprintf(stdout, "%s%d: done.\n", fcnName, mpi_rank); +#endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */ + + return; + +} /* ckrbrd_hs_dr_pio_test__slct_ckrbrd() */ + +/**************************************************************** +** +** ckrbrd_hs_dr_pio_test__verify_data(): +** +** Examine the supplied buffer to see if it contains the +** expected data. Return TRUE if it does, and FALSE +** otherwise. +** +** The supplied buffer is presumed to this process's slice +** of the target data set. Each such slice will be an +** n-cube of rank (rank -1) and the supplied edge_size with +** origin (mpi_rank, 0, ... , 0) in the target data set. +** +** Further, the buffer is presumed to be the result of reading +** or writing a checker board selection of an m (1 <= m < +** rank) dimensional slice through this processes slice +** of the target data set. Also, this slice must be parallel +** to the fastest changing indices. +** +** It is further presumed that the buffer was zeroed before +** the read/write, and that the full target data set (i.e. +** the buffer/data set for all processes) was initialized +** with the natural numbers listed in order from the origin +** along the fastest changing axis. +** +** Thus for a 20x10x10 dataset, the value stored in location +** (x, y, z) (assuming that z is the fastest changing index +** and x the slowest) is assumed to be: +** +** (10 * 10 * x) + (10 * y) + z +** +** Further, supposing that this is process 10, this process's +** slice of the dataset would be a 10 x 10 2-cube with origin +** (10, 0, 0) in the data set, and would be initialize (prior +** to the checkerboard selection) as follows: +** +** 1000, 1001, 1002, ... 1008, 1009 +** 1010, 1011, 1012, ... 1018, 1019 +** . . . . . +** . . . . . +** . . . . . +** 1090, 1091, 1092, ... 1098, 1099 +** +** In the case of a read from the processors slice of another +** data set of different rank, the values expected will have +** to be adjusted accordingly. This is done via the +** first_expected_val parameter. +** +** Finally, the function presumes that the first element +** of the buffer resides either at the origin of either +** a selected or an unselected checker. (Translation: +** if partial checkers appear in the buffer, they will +** intersect the edges of the n-cube opposite the origin.) +** +****************************************************************/ + +#define CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG 0 + +static hbool_t +ckrbrd_hs_dr_pio_test__verify_data(uint32_t *buf_ptr, const int rank, const int edge_size, + const int checker_edge_size, uint32_t first_expected_val, + hbool_t buf_starts_in_checker) +{ +#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG + const char *fcnName = "ckrbrd_hs_dr_pio_test__verify_data():"; +#endif + hbool_t good_data = TRUE; + hbool_t in_checker; + hbool_t start_in_checker[5]; + uint32_t expected_value; + uint32_t *val_ptr; + int i, j, k, l, m; /* to track position in n-cube */ + int v, w, x, y, z; /* to track position in checker */ + const int test_max_rank = 5; /* code changes needed if this is increased */ + + HDassert(buf_ptr != NULL); + HDassert(0 < rank); + HDassert(rank <= test_max_rank); + HDassert(edge_size >= 6); + HDassert(0 < checker_edge_size); + HDassert(checker_edge_size <= edge_size); + HDassert(test_max_rank <= PAR_SS_DR_MAX_RANK); + +#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG + + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + HDfprintf(stdout, "%s mpi_rank = %d.\n", fcnName, mpi_rank); + HDfprintf(stdout, "%s rank = %d.\n", fcnName, rank); + HDfprintf(stdout, "%s edge_size = %d.\n", fcnName, edge_size); + HDfprintf(stdout, "%s checker_edge_size = %d.\n", fcnName, checker_edge_size); + HDfprintf(stdout, "%s first_expected_val = %d.\n", fcnName, (int)first_expected_val); + HDfprintf(stdout, "%s starts_in_checker = %d.\n", fcnName, (int)buf_starts_in_checker); +} +#endif + +val_ptr = buf_ptr; +expected_value = first_expected_val; + +i = 0; +v = 0; +start_in_checker[0] = buf_starts_in_checker; +do { + if (v >= checker_edge_size) { + + start_in_checker[0] = !start_in_checker[0]; + v = 0; + } + + j = 0; + w = 0; + start_in_checker[1] = start_in_checker[0]; + do { + if (w >= checker_edge_size) { + + start_in_checker[1] = !start_in_checker[1]; + w = 0; + } + + k = 0; + x = 0; + start_in_checker[2] = start_in_checker[1]; + do { + if (x >= checker_edge_size) { + + start_in_checker[2] = !start_in_checker[2]; + x = 0; + } + + l = 0; + y = 0; + start_in_checker[3] = start_in_checker[2]; + do { + if (y >= checker_edge_size) { + + start_in_checker[3] = !start_in_checker[3]; + y = 0; + } + + m = 0; + z = 0; +#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG + HDfprintf(stdout, "%d, %d, %d, %d, %d:", i, j, k, l, m); +#endif + in_checker = start_in_checker[3]; + do { +#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG + HDfprintf(stdout, " %d", (int)(*val_ptr)); +#endif + if (z >= checker_edge_size) { + + in_checker = !in_checker; + z = 0; + } + + if (in_checker) { + + if (*val_ptr != expected_value) { + + good_data = FALSE; + } + + /* zero out buffer for re-use */ + *val_ptr = 0; + } + else if (*val_ptr != 0) { + + good_data = FALSE; + + /* zero out buffer for re-use */ + *val_ptr = 0; + } + + val_ptr++; + expected_value++; + m++; + z++; + + } while ((rank >= (test_max_rank - 4)) && (m < edge_size)); +#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG + HDfprintf(stdout, "\n"); +#endif + l++; + y++; + } while ((rank >= (test_max_rank - 3)) && (l < edge_size)); + k++; + x++; + } while ((rank >= (test_max_rank - 2)) && (k < edge_size)); + j++; + w++; + } while ((rank >= (test_max_rank - 1)) && (j < edge_size)); + i++; + v++; +} while ((rank >= test_max_rank) && (i < edge_size)); + +return (good_data); + +} /* ckrbrd_hs_dr_pio_test__verify_data() */ + +/*------------------------------------------------------------------------- + * Function: ckrbrd_hs_dr_pio_test__d2m_l2s() + * + * Purpose: Part one of a series of tests of I/O to/from hyperslab + * selections of different rank in the parallel. + * + * Verify that we can read from disk correctly using checker + * board selections of different rank that + * H5Sselect_shape_same() views as being of the same shape. + * + * In this function, we test this by reading small_rank - 1 + * checker board slices from the on disk large cube, and + * verifying that the data read is correct. Verify that + * H5Sselect_shape_same() returns true on the memory and + * file selections. + * + * Return: void + * + * Programmer: JRM -- 9/15/11 + * + *------------------------------------------------------------------------- + */ + +#define CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG 0 + +static void +ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr) +{ +#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG + const char *fcnName = "ckrbrd_hs_dr_pio_test__d2m_l2s()"; + uint32_t *ptr_0; +#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ + hbool_t data_ok = FALSE; + int i, j, k, l; + uint32_t expected_value; + int mpi_rank; /* needed by VRFY */ + hsize_t sel_start[PAR_SS_DR_MAX_RANK]; + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ + + /* initialize the local copy of mpi_rank */ + mpi_rank = tv_ptr->mpi_rank; + + /* first, verify that we can read from disk correctly using selections + * of different rank that H5Sselect_shape_same() views as being of the + * same shape. + * + * Start by reading a (small_rank - 1)-D checker board slice from this + * processes slice of the on disk large data set, and verifying that the + * data read is correct. Verify that H5Sselect_shape_same() returns + * true on the memory and file selections. + * + * The first step is to set up the needed checker board selection in the + * in memory small small cube + */ + + sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0; + sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank); + + ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->small_ds_slice_sid, tv_ptr->small_rank - 1, + tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, + sel_start); + + /* zero out the buffer we will be reading into */ + HDmemset(tv_ptr->small_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->small_ds_slice_size); + +#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG + HDfprintf(stdout, "%s:%d: initial small_ds_slice_buf = ", fcnName, tv_ptr->mpi_rank); + ptr_0 = tv_ptr->small_ds_slice_buf; + for (i = 0; i < (int)(tv_ptr->small_ds_slice_size); i++) { + HDfprintf(stdout, "%d ", (int)(*ptr_0)); + ptr_0++; + } + HDfprintf(stdout, "\n"); +#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ + + /* set up start, stride, count, and block -- note that we will + * change start[] so as to read slices of the large cube. + */ + for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) { + + tv_ptr->start[i] = 0; + tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); + tv_ptr->count[i] = 1; + if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) { + + tv_ptr->block[i] = 1; + } + else { + + tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); + } + } + +#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG + HDfprintf(stdout, "%s:%d: reading slice from big ds on disk into small ds slice.\n", fcnName, + tv_ptr->mpi_rank); +#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ + /* in serial versions of this test, we loop through all the dimensions + * of the large data set. However, in the parallel version, each + * process only works with that slice of the large cube indicated + * by its rank -- hence we set the most slowly changing index to + * mpi_rank, and don't iterate over it. + */ + + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) { + + i = tv_ptr->mpi_rank; + } + else { + + i = 0; + } + + /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to + * loop over it -- either we are setting i to mpi_rank, or + * we are setting it to zero. It will not change during the + * test. + */ + + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) { + + j = tv_ptr->mpi_rank; + } + else { + + j = 0; + } + + do { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) { + + k = tv_ptr->mpi_rank; + } + else { + + k = 0; + } + + do { + /* since small rank >= 2 and large_rank > small_rank, we + * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5 + * (baring major re-orgaization), this gives us: + * + * (PAR_SS_DR_MAX_RANK - large_rank) <= 2 + * + * so no need to repeat the test in the outer loops -- + * just set l = 0. + */ + + l = 0; + do { + if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */ + + (tv_ptr->tests_skipped)++; + } + else { /* run the test */ + + tv_ptr->skips = 0; /* reset the skips counter */ + + /* we know that small_rank - 1 >= 1 and that + * large_rank > small_rank by the assertions at the head + * of this function. Thus no need for another inner loop. + */ + tv_ptr->start[0] = (hsize_t)i; + tv_ptr->start[1] = (hsize_t)j; + tv_ptr->start[2] = (hsize_t)k; + tv_ptr->start[3] = (hsize_t)l; + tv_ptr->start[4] = 0; + + HDassert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1)); + + ckrbrd_hs_dr_pio_test__slct_ckrbrd( + tv_ptr->mpi_rank, tv_ptr->file_large_ds_sid_0, tv_ptr->large_rank, tv_ptr->edge_size, + tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start); + + /* verify that H5Sselect_shape_same() reports the two + * selections as having the same shape. + */ + check = H5Sselect_shape_same(tv_ptr->small_ds_slice_sid, tv_ptr->file_large_ds_sid_0); + VRFY((check == TRUE), "H5Sselect_shape_same passed"); + + /* Read selection from disk */ +#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG + HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank, + tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3], + tv_ptr->start[4]); + HDfprintf(stdout, "%s slice/file extent dims = %d/%d.\n", fcnName, + H5Sget_simple_extent_ndims(tv_ptr->small_ds_slice_sid), + H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0)); +#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ + + ret = + H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->small_ds_slice_sid, + tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_slice_buf); + VRFY((ret >= 0), "H5Dread() slice from large ds succeeded."); + +#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG + HDfprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, tv_ptr->mpi_rank); +#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ + + /* verify that expected data is retrieved */ + + expected_value = + (uint32_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * + tv_ptr->edge_size) + + (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + + (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size)); + + data_ok = ckrbrd_hs_dr_pio_test__verify_data( + tv_ptr->small_ds_slice_buf, tv_ptr->small_rank - 1, tv_ptr->edge_size, + tv_ptr->checker_edge_size, expected_value, (hbool_t)TRUE); + + VRFY((data_ok == TRUE), "small slice read from large ds data good."); + + (tv_ptr->tests_run)++; + } + + l++; + + (tv_ptr->total_tests)++; + + } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size)); + k++; + } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size)); + j++; + } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size)); + + return; + +} /* ckrbrd_hs_dr_pio_test__d2m_l2s() */ + +/*------------------------------------------------------------------------- + * Function: ckrbrd_hs_dr_pio_test__d2m_s2l() + * + * Purpose: Part two of a series of tests of I/O to/from hyperslab + * selections of different rank in the parallel. + * + * Verify that we can read from disk correctly using + * selections of different rank that H5Sselect_shape_same() + * views as being of the same shape. + * + * In this function, we test this by reading checker board + * slices of the on disk small data set into slices through + * the in memory large data set, and verify that the correct + * data (and only the correct data) is read. + * + * Return: void + * + * Programmer: JRM -- 8/15/11 + * + *------------------------------------------------------------------------- + */ + +#define CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG 0 + +static void +ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr) +{ +#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG + const char *fcnName = "ckrbrd_hs_dr_pio_test__d2m_s2l()"; +#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */ + hbool_t data_ok = FALSE; + int i, j, k, l; + size_t u; + size_t start_index; + size_t stop_index; + uint32_t expected_value; + uint32_t *ptr_1; + int mpi_rank; /* needed by VRFY */ + hsize_t sel_start[PAR_SS_DR_MAX_RANK]; + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ + + /* initialize the local copy of mpi_rank */ + mpi_rank = tv_ptr->mpi_rank; + + /* similarly, read slices of the on disk small data set into slices + * through the in memory large data set, and verify that the correct + * data (and only the correct data) is read. + */ + + sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0; + sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank); + + ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->file_small_ds_sid_0, tv_ptr->small_rank, + tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, + sel_start); + +#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG + HDfprintf(stdout, "%s reading slices of on disk small data set into slices of big data set.\n", fcnName); +#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */ + + /* zero out the buffer we will be reading into */ + HDmemset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size); + + /* set up start, stride, count, and block -- note that we will + * change start[] so as to read the slice of the small data set + * into different slices of the process slice of the large data + * set. + */ + for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) { + + tv_ptr->start[i] = 0; + tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); + tv_ptr->count[i] = 1; + if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) { + + tv_ptr->block[i] = 1; + } + else { + + tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); + } + } + + /* in serial versions of this test, we loop through all the dimensions + * of the large data set that don't appear in the small data set. + * + * However, in the parallel version, each process only works with that + * slice of the large (and small) data set indicated by its rank -- hence + * we set the most slowly changing index to mpi_rank, and don't iterate + * over it. + */ + + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) { + + i = tv_ptr->mpi_rank; + } + else { + + i = 0; + } + + /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to + * loop over it -- either we are setting i to mpi_rank, or + * we are setting it to zero. It will not change during the + * test. + */ + + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) { + + j = tv_ptr->mpi_rank; + } + else { + + j = 0; + } + + do { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) { + + k = tv_ptr->mpi_rank; + } + else { + + k = 0; + } + + do { + /* since small rank >= 2 and large_rank > small_rank, we + * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5 + * (baring major re-orgaization), this gives us: + * + * (PAR_SS_DR_MAX_RANK - large_rank) <= 2 + * + * so no need to repeat the test in the outer loops -- + * just set l = 0. + */ + + l = 0; + do { + if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */ + + (tv_ptr->tests_skipped)++; + } + else { /* run the test */ + + tv_ptr->skips = 0; /* reset the skips counter */ + + /* we know that small_rank >= 1 and that large_rank > small_rank + * by the assertions at the head of this function. Thus no + * need for another inner loop. + */ + tv_ptr->start[0] = (hsize_t)i; + tv_ptr->start[1] = (hsize_t)j; + tv_ptr->start[2] = (hsize_t)k; + tv_ptr->start[3] = (hsize_t)l; + tv_ptr->start[4] = 0; + + HDassert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1)); + + ckrbrd_hs_dr_pio_test__slct_ckrbrd( + tv_ptr->mpi_rank, tv_ptr->mem_large_ds_sid, tv_ptr->large_rank, tv_ptr->edge_size, + tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start); + + /* verify that H5Sselect_shape_same() reports the two + * selections as having the same shape. + */ + check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid); + VRFY((check == TRUE), "H5Sselect_shape_same passed"); + + /* Read selection from disk */ +#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG + HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank, + tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3], + tv_ptr->start[4]); + HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank, + H5Sget_simple_extent_ndims(tv_ptr->large_ds_slice_sid), + H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_0)); +#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */ + ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid, + tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1); + VRFY((ret >= 0), "H5Dread() slice from small ds succeeded."); + + /* verify that the expected data and only the + * expected data was read. + */ + data_ok = TRUE; + ptr_1 = tv_ptr->large_ds_buf_1; + expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size); + start_index = + (size_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * + tv_ptr->edge_size) + + (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + + (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size)); + stop_index = start_index + tv_ptr->small_ds_slice_size - 1; + +#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG + { + int m, n; + + HDfprintf(stdout, "%s:%d: expected_value = %d.\n", fcnName, tv_ptr->mpi_rank, + expected_value); + HDfprintf(stdout, "%s:%d: start/stop index = %d/%d.\n", fcnName, tv_ptr->mpi_rank, + start_index, stop_index); + n = 0; + for (m = 0; (unsigned)m < tv_ptr->large_ds_size; m++) { + HDfprintf(stdout, "%d ", (int)(*ptr_1)); + ptr_1++; + n++; + if (n >= tv_ptr->edge_size) { + HDfprintf(stdout, "\n"); + n = 0; + } + } + HDfprintf(stdout, "\n"); + ptr_1 = tv_ptr->large_ds_buf_1; + } +#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */ + + HDassert(start_index < stop_index); + HDassert(stop_index <= tv_ptr->large_ds_size); + + for (u = 0; u < start_index; u++) { + + if (*ptr_1 != 0) { + + data_ok = FALSE; + } + + /* zero out the value for the next pass */ + *ptr_1 = 0; + + ptr_1++; + } + + VRFY((data_ok == TRUE), "slice read from small to large ds data good(1)."); + + data_ok = ckrbrd_hs_dr_pio_test__verify_data(ptr_1, tv_ptr->small_rank - 1, + tv_ptr->edge_size, tv_ptr->checker_edge_size, + expected_value, (hbool_t)TRUE); + + VRFY((data_ok == TRUE), "slice read from small to large ds data good(2)."); + + ptr_1 = tv_ptr->large_ds_buf_1 + stop_index + 1; + + for (u = stop_index + 1; u < tv_ptr->large_ds_size; u++) { + + if (*ptr_1 != 0) { + + data_ok = FALSE; + } + + /* zero out the value for the next pass */ + *ptr_1 = 0; + + ptr_1++; + } + + VRFY((data_ok == TRUE), "slice read from small to large ds data good(3)."); + + (tv_ptr->tests_run)++; + } + + l++; + + (tv_ptr->total_tests)++; + + } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size)); + k++; + } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size)); + j++; + } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size)); + + return; + +} /* ckrbrd_hs_dr_pio_test__d2m_s2l() */ + +/*------------------------------------------------------------------------- + * Function: ckrbrd_hs_dr_pio_test__m2d_l2s() + * + * Purpose: Part three of a series of tests of I/O to/from checker + * board hyperslab selections of different rank in the + * parallel. + * + * Verify that we can write from memory to file using checker + * board selections of different rank that + * H5Sselect_shape_same() views as being of the same shape. + * + * Do this by writing small_rank - 1 dimensional checker + * board slices from the in memory large data set to the on + * disk small cube dataset. After each write, read the + * slice of the small dataset back from disk, and verify + * that it contains the expected data. Verify that + * H5Sselect_shape_same() returns true on the memory and + * file selections. + * + * Return: void + * + * Programmer: JRM -- 8/15/11 + * + *------------------------------------------------------------------------- + */ + +#define CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG 0 + +static void +ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr) +{ +#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG + const char *fcnName = "ckrbrd_hs_dr_pio_test__m2d_l2s()"; +#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */ + hbool_t data_ok = FALSE; + int i, j, k, l; + size_t u; + size_t start_index; + size_t stop_index; + uint32_t expected_value; + uint32_t *ptr_1; + int mpi_rank; /* needed by VRFY */ + hsize_t sel_start[PAR_SS_DR_MAX_RANK]; + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ + + /* initialize the local copy of mpi_rank */ + mpi_rank = tv_ptr->mpi_rank; + + /* now we go in the opposite direction, verifying that we can write + * from memory to file using selections of different rank that + * H5Sselect_shape_same() views as being of the same shape. + * + * Start by writing small_rank - 1 D slices from the in memory large data + * set to the on disk small dataset. After each write, read the slice of + * the small dataset back from disk, and verify that it contains the + * expected data. Verify that H5Sselect_shape_same() returns true on + * the memory and file selections. + */ + + tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); + tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1)); + tv_ptr->count[0] = 1; + tv_ptr->block[0] = 1; + + for (i = 1; i < tv_ptr->large_rank; i++) { + + tv_ptr->start[i] = 0; + tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); + tv_ptr->count[i] = 1; + tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); + } + + ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded"); + + ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded"); + + sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0; + sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank); + + ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->file_small_ds_sid_1, tv_ptr->small_rank, + tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, + sel_start); + + /* set up start, stride, count, and block -- note that we will + * change start[] so as to read slices of the large cube. + */ + for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) { + + tv_ptr->start[i] = 0; + tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); + tv_ptr->count[i] = 1; + if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) { + + tv_ptr->block[i] = 1; + } + else { + + tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); + } + } + + /* zero out the in memory small ds */ + HDmemset(tv_ptr->small_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->small_ds_size); + +#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG + HDfprintf(stdout, + "%s writing checker boards selections of slices from big ds to slices of small ds on disk.\n", + fcnName); +#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */ + + /* in serial versions of this test, we loop through all the dimensions + * of the large data set that don't appear in the small data set. + * + * However, in the parallel version, each process only works with that + * slice of the large (and small) data set indicated by its rank -- hence + * we set the most slowly changing index to mpi_rank, and don't iterate + * over it. + */ + + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) { + + i = tv_ptr->mpi_rank; + } + else { + + i = 0; + } + + /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to + * loop over it -- either we are setting i to mpi_rank, or + * we are setting it to zero. It will not change during the + * test. + */ + + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) { + + j = tv_ptr->mpi_rank; + } + else { + + j = 0; + } + + j = 0; + do { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) { + + k = tv_ptr->mpi_rank; + } + else { + + k = 0; + } + + do { + /* since small rank >= 2 and large_rank > small_rank, we + * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5 + * (baring major re-orgaization), this gives us: + * + * (PAR_SS_DR_MAX_RANK - large_rank) <= 2 + * + * so no need to repeat the test in the outer loops -- + * just set l = 0. + */ + + l = 0; + do { + if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */ + + (tv_ptr->tests_skipped)++; + } + else { /* run the test */ + + tv_ptr->skips = 0; /* reset the skips counter */ + + /* we know that small_rank >= 1 and that large_rank > small_rank + * by the assertions at the head of this function. Thus no + * need for another inner loop. + */ + + /* zero out this rank's slice of the on disk small data set */ + ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid, + tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_2); + VRFY((ret >= 0), "H5Dwrite() zero slice to small ds succeeded."); + + /* select the portion of the in memory large cube from which we + * are going to write data. + */ + tv_ptr->start[0] = (hsize_t)i; + tv_ptr->start[1] = (hsize_t)j; + tv_ptr->start[2] = (hsize_t)k; + tv_ptr->start[3] = (hsize_t)l; + tv_ptr->start[4] = 0; + + HDassert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1)); + + ckrbrd_hs_dr_pio_test__slct_ckrbrd( + tv_ptr->mpi_rank, tv_ptr->mem_large_ds_sid, tv_ptr->large_rank, tv_ptr->edge_size, + tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start); + + /* verify that H5Sselect_shape_same() reports the in + * memory checkerboard selection of the slice through the + * large dataset and the checkerboard selection of the process + * slice of the small data set as having the same shape. + */ + check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_1, tv_ptr->mem_large_ds_sid); + VRFY((check == TRUE), "H5Sselect_shape_same passed."); + + /* write the checker board selection of the slice from the in + * memory large data set to the slice of the on disk small + * dataset. + */ +#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG + HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank, + tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3], + tv_ptr->start[4]); + HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank, + H5Sget_simple_extent_ndims(tv_ptr->mem_large_ds_sid), + H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_1)); +#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */ + ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid, + tv_ptr->file_small_ds_sid_1, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_0); + VRFY((ret >= 0), "H5Dwrite() slice to large ds succeeded."); + + /* read the on disk process slice of the small dataset into memory */ + ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid, + tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_1); + VRFY((ret >= 0), "H5Dread() slice from small ds succeeded."); + + /* verify that expected data is retrieved */ + + expected_value = + (uint32_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * + tv_ptr->edge_size) + + (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + + (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size)); + + start_index = (size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size; + stop_index = start_index + tv_ptr->small_ds_slice_size - 1; + + HDassert(start_index < stop_index); + HDassert(stop_index <= tv_ptr->small_ds_size); + + data_ok = TRUE; + + ptr_1 = tv_ptr->small_ds_buf_1; + for (u = 0; u < start_index; u++, ptr_1++) { + + if (*ptr_1 != 0) { + + data_ok = FALSE; + *ptr_1 = 0; + } + } + + data_ok &= ckrbrd_hs_dr_pio_test__verify_data( + tv_ptr->small_ds_buf_1 + start_index, tv_ptr->small_rank - 1, tv_ptr->edge_size, + tv_ptr->checker_edge_size, expected_value, (hbool_t)TRUE); + + ptr_1 = tv_ptr->small_ds_buf_1; + for (u = stop_index; u < tv_ptr->small_ds_size; u++, ptr_1++) { + + if (*ptr_1 != 0) { + + data_ok = FALSE; + *ptr_1 = 0; + } + } + + VRFY((data_ok == TRUE), "large slice write slice to small slice data good."); + + (tv_ptr->tests_run)++; + } + + l++; + + (tv_ptr->total_tests)++; + + } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size)); + k++; + } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size)); + j++; + } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size)); + + return; + +} /* ckrbrd_hs_dr_pio_test__m2d_l2s() */ + +/*------------------------------------------------------------------------- + * Function: ckrbrd_hs_dr_pio_test__m2d_s2l() + * + * Purpose: Part four of a series of tests of I/O to/from checker + * board hyperslab selections of different rank in the parallel. + * + * Verify that we can write from memory to file using + * selections of different rank that H5Sselect_shape_same() + * views as being of the same shape. + * + * Do this by writing checker board selections of the contents + * of the process's slice of the in memory small data set to + * slices of the on disk large data set. After each write, + * read the process's slice of the large data set back into + * memory, and verify that it contains the expected data. + * + * Verify that H5Sselect_shape_same() returns true on the + * memory and file selections. + * + * Return: void + * + * Programmer: JRM -- 8/15/11 + * + *------------------------------------------------------------------------- + */ + +#define CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG 0 + +static void +ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr) +{ +#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG + const char *fcnName = "ckrbrd_hs_dr_pio_test__m2d_s2l()"; +#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */ + hbool_t data_ok = FALSE; + int i, j, k, l; + size_t u; + size_t start_index; + size_t stop_index; + uint32_t expected_value; + uint32_t *ptr_1; + int mpi_rank; /* needed by VRFY */ + hsize_t sel_start[PAR_SS_DR_MAX_RANK]; + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ + + /* initialize the local copy of mpi_rank */ + mpi_rank = tv_ptr->mpi_rank; + + /* Now write the contents of the process's slice of the in memory + * small data set to slices of the on disk large data set. After + * each write, read the process's slice of the large data set back + * into memory, and verify that it contains the expected data. + * Verify that H5Sselect_shape_same() returns true on the memory + * and file selections. + */ + + tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); + tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1)); + tv_ptr->count[0] = 1; + tv_ptr->block[0] = 1; + + for (i = 1; i < tv_ptr->large_rank; i++) { + + tv_ptr->start[i] = 0; + tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); + tv_ptr->count[i] = 1; + tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); + } + + ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, set) succeeded"); + + ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, set) succeeded"); + + /* setup a checkerboard selection of the slice of the in memory small + * data set associated with the process's mpi rank. + */ + + sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0; + sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank); + + ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->mem_small_ds_sid, tv_ptr->small_rank, + tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, + sel_start); + + /* set up start, stride, count, and block -- note that we will + * change start[] so as to write checkerboard selections of slices + * of the small data set to slices of the large data set. + */ + for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) { + + tv_ptr->start[i] = 0; + tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); + tv_ptr->count[i] = 1; + if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) { + + tv_ptr->block[i] = 1; + } + else { + + tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); + } + } + + /* zero out the in memory large ds */ + HDmemset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size); + +#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG + HDfprintf(stdout, + "%s writing process checkerboard selections of slices of small ds to process slices of large " + "ds on disk.\n", + fcnName); +#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG */ + + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) { + + i = tv_ptr->mpi_rank; + } + else { + + i = 0; + } + + /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to + * loop over it -- either we are setting i to mpi_rank, or + * we are setting it to zero. It will not change during the + * test. + */ + + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) { + + j = tv_ptr->mpi_rank; + } + else { + + j = 0; + } + + do { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) { + + k = tv_ptr->mpi_rank; + } + else { + + k = 0; + } + + do { + /* since small rank >= 2 and large_rank > small_rank, we + * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5 + * (baring major re-orgaization), this gives us: + * + * (PAR_SS_DR_MAX_RANK - large_rank) <= 2 + * + * so no need to repeat the test in the outer loops -- + * just set l = 0. + */ + + l = 0; + do { + if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */ + + (tv_ptr->tests_skipped)++; + } + else { /* run the test */ + + tv_ptr->skips = 0; /* reset the skips counter */ + + /* we know that small_rank >= 1 and that large_rank > small_rank + * by the assertions at the head of this function. Thus no + * need for another inner loop. + */ + + /* Zero out this processes slice of the on disk large data set. + * Note that this will leave one slice with its original data + * as there is one more slice than processes. + */ + ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid, + tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_2); + VRFY((ret != FAIL), "H5Dwrite() to zero large ds succeeded"); + + /* select the portion of the in memory large cube to which we + * are going to write data. + */ + tv_ptr->start[0] = (hsize_t)i; + tv_ptr->start[1] = (hsize_t)j; + tv_ptr->start[2] = (hsize_t)k; + tv_ptr->start[3] = (hsize_t)l; + tv_ptr->start[4] = 0; + + HDassert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1)); + + ckrbrd_hs_dr_pio_test__slct_ckrbrd( + tv_ptr->mpi_rank, tv_ptr->file_large_ds_sid_1, tv_ptr->large_rank, tv_ptr->edge_size, + tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start); + + /* verify that H5Sselect_shape_same() reports the in + * memory small data set slice selection and the + * on disk slice through the large data set selection + * as having the same shape. + */ + check = H5Sselect_shape_same(tv_ptr->mem_small_ds_sid, tv_ptr->file_large_ds_sid_1); + VRFY((check == TRUE), "H5Sselect_shape_same passed"); + + /* write the small data set slice from memory to the + * target slice of the disk data set + */ +#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG + HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank, + tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3], + tv_ptr->start[4]); + HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank, + H5Sget_simple_extent_ndims(tv_ptr->mem_small_ds_sid), + H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_1)); +#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG */ + ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid, + tv_ptr->file_large_ds_sid_1, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_0); + VRFY((ret != FAIL), "H5Dwrite of small ds slice to large ds succeeded"); + + /* read this processes slice on the on disk large + * data set into memory. + */ + + ret = H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid, + tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1); + VRFY((ret != FAIL), "H5Dread() of process slice of large ds succeeded"); + + /* verify that the expected data and only the + * expected data was read. + */ + expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size); + + start_index = + (size_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * + tv_ptr->edge_size) + + (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + + (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size)); + stop_index = start_index + tv_ptr->small_ds_slice_size - 1; + + HDassert(start_index < stop_index); + HDassert(stop_index < tv_ptr->large_ds_size); + + data_ok = TRUE; + + ptr_1 = tv_ptr->large_ds_buf_1; + for (u = 0; u < start_index; u++, ptr_1++) { + + if (*ptr_1 != 0) { + + data_ok = FALSE; + *ptr_1 = 0; + } + } + + data_ok &= ckrbrd_hs_dr_pio_test__verify_data( + tv_ptr->large_ds_buf_1 + start_index, tv_ptr->small_rank - 1, tv_ptr->edge_size, + tv_ptr->checker_edge_size, expected_value, (hbool_t)TRUE); + + ptr_1 = tv_ptr->large_ds_buf_1; + for (u = stop_index; u < tv_ptr->small_ds_size; u++, ptr_1++) { + + if (*ptr_1 != 0) { + + data_ok = FALSE; + *ptr_1 = 0; + } + } + + VRFY((data_ok == TRUE), "small ds cb slice write to large ds slice data good."); + + (tv_ptr->tests_run)++; + } + + l++; + + (tv_ptr->total_tests)++; + + } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size)); + k++; + } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size)); + j++; + } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size)); + + return; + +} /* ckrbrd_hs_dr_pio_test__m2d_s2l() */ + +/*------------------------------------------------------------------------- + * Function: ckrbrd_hs_dr_pio_test__run_test() + * + * Purpose: Test I/O to/from checkerboard selections of hyperslabs of + * different rank in the parallel. + * + * Return: void + * + * Programmer: JRM -- 10/10/09 + * + *------------------------------------------------------------------------- + */ + +#define CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG 0 + +static void +ckrbrd_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const int checker_edge_size, + const int chunk_edge_size, const int small_rank, const int large_rank, + const hbool_t use_collective_io, const hid_t dset_type, + const int express_test, int *skips_ptr, int max_skips, + int64_t *total_tests_ptr, int64_t *tests_run_ptr, int64_t *tests_skipped_ptr, + int mpi_rank) + +{ +#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG + const char *fcnName = "ckrbrd_hs_dr_pio_test__run_test()"; +#endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ + struct hs_dr_pio_test_vars_t test_vars = { + /* int mpi_size = */ -1, + /* int mpi_rank = */ -1, + /* MPI_Comm mpi_comm = */ MPI_COMM_NULL, + /* MPI_Inf mpi_info = */ MPI_INFO_NULL, + /* int test_num = */ -1, + /* int edge_size = */ -1, + /* int checker_edge_size = */ -1, + /* int chunk_edge_size = */ -1, + /* int small_rank = */ -1, + /* int large_rank = */ -1, + /* hid_t dset_type = */ -1, + /* uint32_t * small_ds_buf_0 = */ NULL, + /* uint32_t * small_ds_buf_1 = */ NULL, + /* uint32_t * small_ds_buf_2 = */ NULL, + /* uint32_t * small_ds_slice_buf = */ NULL, + /* uint32_t * large_ds_buf_0 = */ NULL, + /* uint32_t * large_ds_buf_1 = */ NULL, + /* uint32_t * large_ds_buf_2 = */ NULL, + /* uint32_t * large_ds_slice_buf = */ NULL, + /* int small_ds_offset = */ -1, + /* int large_ds_offset = */ -1, + /* hid_t fid = */ -1, /* HDF5 file ID */ + /* hid_t xfer_plist = */ H5P_DEFAULT, + /* hid_t full_mem_small_ds_sid = */ -1, + /* hid_t full_file_small_ds_sid = */ -1, + /* hid_t mem_small_ds_sid = */ -1, + /* hid_t file_small_ds_sid_0 = */ -1, + /* hid_t file_small_ds_sid_1 = */ -1, + /* hid_t small_ds_slice_sid = */ -1, + /* hid_t full_mem_large_ds_sid = */ -1, + /* hid_t full_file_large_ds_sid = */ -1, + /* hid_t mem_large_ds_sid = */ -1, + /* hid_t file_large_ds_sid_0 = */ -1, + /* hid_t file_large_ds_sid_1 = */ -1, + /* hid_t file_large_ds_process_slice_sid = */ -1, + /* hid_t mem_large_ds_process_slice_sid = */ -1, + /* hid_t large_ds_slice_sid = */ -1, + /* hid_t small_dataset = */ -1, /* Dataset ID */ + /* hid_t large_dataset = */ -1, /* Dataset ID */ + /* size_t small_ds_size = */ 1, + /* size_t small_ds_slice_size = */ 1, + /* size_t large_ds_size = */ 1, + /* size_t large_ds_slice_size = */ 1, + /* hsize_t dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, + /* hsize_t chunk_dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, + /* hsize_t start[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, + /* hsize_t stride[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, + /* hsize_t count[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, + /* hsize_t block[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, + /* hsize_t * start_ptr = */ NULL, + /* hsize_t * stride_ptr = */ NULL, + /* hsize_t * count_ptr = */ NULL, + /* hsize_t * block_ptr = */ NULL, + /* int skips = */ 0, + /* int max_skips = */ 0, + /* int64_t total_tests = */ 0, + /* int64_t tests_run = */ 0, + /* int64_t tests_skipped = */ 0}; + struct hs_dr_pio_test_vars_t *tv_ptr = &test_vars; + + if (MAINPROCESS) + printf("\r - running test #%lld: small rank = %d, large rank = %d", (long long)(test_num + 1), + small_rank, large_rank); + + hs_dr_pio_test__setup(test_num, edge_size, checker_edge_size, chunk_edge_size, small_rank, large_rank, + use_collective_io, dset_type, express_test, tv_ptr); + + /* initialize skips & max_skips */ + tv_ptr->skips = *skips_ptr; + tv_ptr->max_skips = max_skips; + +#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG + if (MAINPROCESS) { + HDfprintf(stdout, "test %d: small rank = %d, large rank = %d.\n", test_num, small_rank, large_rank); + HDfprintf(stdout, "test %d: Initialization complete.\n", test_num); + } +#endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ + + /* first, verify that we can read from disk correctly using selections + * of different rank that H5Sselect_shape_same() views as being of the + * same shape. + * + * Start by reading a (small_rank - 1)-D slice from this processes slice + * of the on disk large data set, and verifying that the data read is + * correct. Verify that H5Sselect_shape_same() returns true on the + * memory and file selections. + * + * The first step is to set up the needed checker board selection in the + * in memory small small cube + */ + + ckrbrd_hs_dr_pio_test__d2m_l2s(tv_ptr); + + /* similarly, read slices of the on disk small data set into slices + * through the in memory large data set, and verify that the correct + * data (and only the correct data) is read. + */ + + ckrbrd_hs_dr_pio_test__d2m_s2l(tv_ptr); + + /* now we go in the opposite direction, verifying that we can write + * from memory to file using selections of different rank that + * H5Sselect_shape_same() views as being of the same shape. + * + * Start by writing small_rank - 1 D slices from the in memory large data + * set to the on disk small dataset. After each write, read the slice of + * the small dataset back from disk, and verify that it contains the + * expected data. Verify that H5Sselect_shape_same() returns true on + * the memory and file selections. + */ + + ckrbrd_hs_dr_pio_test__m2d_l2s(tv_ptr); + + /* Now write the contents of the process's slice of the in memory + * small data set to slices of the on disk large data set. After + * each write, read the process's slice of the large data set back + * into memory, and verify that it contains the expected data. + * Verify that H5Sselect_shape_same() returns true on the memory + * and file selections. + */ + + ckrbrd_hs_dr_pio_test__m2d_s2l(tv_ptr); + +#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG + if (MAINPROCESS) { + HDfprintf(stdout, "test %d: Subtests complete -- tests run/skipped/total = %lld/%lld/%lld.\n", + test_num, (long long)(tv_ptr->tests_run), (long long)(tv_ptr->tests_skipped), + (long long)(tv_ptr->total_tests)); + } +#endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ + + hs_dr_pio_test__takedown(tv_ptr); + +#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG + if (MAINPROCESS) { + HDfprintf(stdout, "test %d: Takedown complete.\n", test_num); + } +#endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ + + *skips_ptr = tv_ptr->skips; + *total_tests_ptr += tv_ptr->total_tests; + *tests_run_ptr += tv_ptr->tests_run; + *tests_skipped_ptr += tv_ptr->tests_skipped; + + return; + +} /* ckrbrd_hs_dr_pio_test__run_test() */ + +/*------------------------------------------------------------------------- + * Function: ckrbrd_hs_dr_pio_test() + * + * Purpose: Test I/O to/from hyperslab selections of different rank in + * the parallel case. + * + * Return: void + * + * Programmer: JRM -- 9/18/09 + * + *------------------------------------------------------------------------- + */ + +static void +ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type) +{ + int express_test; + int local_express_test; + int mpi_size = -1; + int mpi_rank = -1; + int test_num = 0; + int edge_size; + int checker_edge_size = 3; + int chunk_edge_size = 0; + int small_rank = 3; + int large_rank = 4; + int mpi_result; + hid_t dset_type = H5T_NATIVE_UINT; + int skips = 0; + int max_skips = 0; + /* The following table list the number of sub-tests skipped between + * each test that is actually executed as a function of the express + * test level. Note that any value in excess of 4880 will cause all + * sub tests to be skipped. + */ + int max_skips_tbl[4] = {0, 4, 64, 1024}; + int64_t total_tests = 0; + int64_t tests_run = 0; + int64_t tests_skipped = 0; + + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + edge_size = (mpi_size > 6 ? mpi_size : 6); + + local_express_test = EXPRESS_MODE; /* GetTestExpress(); */ + + HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned)); + + mpi_result = MPI_Allreduce((void *)&local_express_test, (void *)&express_test, 1, MPI_INT, MPI_MAX, + MPI_COMM_WORLD); + + VRFY((mpi_result == MPI_SUCCESS), "MPI_Allreduce(0) succeeded"); + + if (local_express_test < 0) { + max_skips = max_skips_tbl[0]; + } + else if (local_express_test > 3) { + max_skips = max_skips_tbl[3]; + } + else { + max_skips = max_skips_tbl[local_express_test]; + } + +#if 0 + { + int DebugWait = 1; + + while (DebugWait) ; + } +#endif + + for (large_rank = 3; large_rank <= PAR_SS_DR_MAX_RANK; large_rank++) { + + for (small_rank = 2; small_rank < large_rank; small_rank++) { + switch (sstest_type) { + case IND_CONTIG: + /* contiguous data set, independent I/O */ + chunk_edge_size = 0; + ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size, + small_rank, large_rank, FALSE, dset_type, express_test, + &skips, max_skips, &total_tests, &tests_run, + &tests_skipped, mpi_rank); + test_num++; + break; + /* end of case IND_CONTIG */ + + case COL_CONTIG: + /* contiguous data set, collective I/O */ + chunk_edge_size = 0; + ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size, + small_rank, large_rank, TRUE, dset_type, express_test, + &skips, max_skips, &total_tests, &tests_run, + &tests_skipped, mpi_rank); + test_num++; + break; + /* end of case COL_CONTIG */ + + case IND_CHUNKED: + /* chunked data set, independent I/O */ + chunk_edge_size = 5; + ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size, + small_rank, large_rank, FALSE, dset_type, express_test, + &skips, max_skips, &total_tests, &tests_run, + &tests_skipped, mpi_rank); + test_num++; + break; + /* end of case IND_CHUNKED */ + + case COL_CHUNKED: + /* chunked data set, collective I/O */ + chunk_edge_size = 5; + ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size, + small_rank, large_rank, TRUE, dset_type, express_test, + &skips, max_skips, &total_tests, &tests_run, + &tests_skipped, mpi_rank); + test_num++; + break; + /* end of case COL_CHUNKED */ + + default: + VRFY((FALSE), "unknown test type"); + break; + + } /* end of switch(sstest_type) */ +#if CONTIG_HS_DR_PIO_TEST__DEBUG + if ((MAINPROCESS) && (tests_skipped > 0)) { + HDfprintf(stdout, " run/skipped/total = %" PRId64 "/%" PRId64 "/%" PRId64 ".\n", + tests_run, tests_skipped, total_tests); + } +#endif /* CONTIG_HS_DR_PIO_TEST__DEBUG */ + } + } + + if (MAINPROCESS) { + if (tests_skipped > 0) { + HDfprintf(stdout, " %" PRId64 " of %" PRId64 " subtests skipped to expedite testing.\n", + tests_skipped, total_tests); + } + else + HDprintf("\n"); + } + + return; + +} /* ckrbrd_hs_dr_pio_test() */ + +/* Main Body. Here for now, may have to move them to a separated file later. */ + +/* + * Main driver of the Parallel HDF5 tests + */ + +#include "testphdf5.h" + +#ifndef PATH_MAX +#define PATH_MAX 512 +#endif /* !PATH_MAX */ + +/* global variables */ +int dim0; +int dim1; +int chunkdim0; +int chunkdim1; +int nerrors = 0; /* errors count */ +int ndatasets = 300; /* number of datasets to create*/ +int ngroups = 512; /* number of groups to create in root + * group. */ +int facc_type = FACC_MPIO; /*Test file access type */ +int dxfer_coll_type = DXFER_COLLECTIVE_IO; + +H5E_auto2_t old_func; /* previous error handler */ +void *old_client_data; /* previous error handler arg.*/ + +/* other option flags */ + +#ifdef USE_PAUSE +/* pause the process for a moment to allow debugger to attach if desired. */ +/* Will pause more if greenlight file is not persent but will eventually */ +/* continue. */ +#include +#include + +void +pause_proc(void) +{ + + int pid; + h5_stat_t statbuf; + char greenlight[] = "go"; + int maxloop = 10; + int loops = 0; + int time_int = 10; + + /* mpi variables */ + int mpi_size, mpi_rank; + int mpi_namelen; + char mpi_name[MPI_MAX_PROCESSOR_NAME]; + + pid = getpid(); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Get_processor_name(mpi_name, &mpi_namelen); + + if (MAINPROCESS) + while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop) { + if (!loops++) { + HDprintf("Proc %d (%*s, %d): to debug, attach %d\n", mpi_rank, mpi_namelen, mpi_name, pid, + pid); + } + HDprintf("waiting(%ds) for file %s ...\n", time_int, greenlight); + fflush(stdout); + HDsleep(time_int); + } + MPI_Barrier(MPI_COMM_WORLD); +} + +/* Use the Profile feature of MPI to call the pause_proc() */ +int +MPI_Init(int *argc, char ***argv) +{ + int ret_code; + ret_code = PMPI_Init(argc, argv); + pause_proc(); + return (ret_code); +} +#endif /* USE_PAUSE */ + +/* + * Show command usage + */ +static void +usage(void) +{ + HDprintf(" [-r] [-w] [-m] [-n] " + "[-o] [-f ] [-d ]\n"); + HDprintf("\t-m" + "\tset number of datasets for the multiple dataset test\n"); + HDprintf("\t-n" + "\tset number of groups for the multiple group test\n"); +#if 0 + HDprintf("\t-f \tfilename prefix\n"); +#endif + HDprintf("\t-2\t\tuse Split-file together with MPIO\n"); + HDprintf("\t-d \tdataset dimensions factors. Defaults (%d,%d)\n", ROW_FACTOR, + COL_FACTOR); + HDprintf("\t-c \tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n"); + HDprintf("\n"); +} + +/* + * parse the command line options + */ +static int +parse_options(int argc, char **argv) +{ + int mpi_size, mpi_rank; /* mpi variables */ + + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* setup default chunk-size. Make sure sizes are > 0 */ + + chunkdim0 = (dim0 + 9) / 10; + chunkdim1 = (dim1 + 9) / 10; + + while (--argc) { + if (**(++argv) != '-') { + break; + } + else { + switch (*(*argv + 1)) { + case 'm': + ndatasets = atoi((*argv + 1) + 1); + if (ndatasets < 0) { + nerrors++; + return (1); + } + break; + case 'n': + ngroups = atoi((*argv + 1) + 1); + if (ngroups < 0) { + nerrors++; + return (1); + } + break; +#if 0 + case 'f': if (--argc < 1) { + nerrors++; + return(1); + } + if (**(++argv) == '-') { + nerrors++; + return(1); + } + paraprefix = *argv; + break; +#endif + case 'i': /* Collective MPI-IO access with independent IO */ + dxfer_coll_type = DXFER_INDEPENDENT_IO; + break; + case '2': /* Use the split-file driver with MPIO access */ + /* Can use $HDF5_METAPREFIX to define the */ + /* meta-file-prefix. */ + facc_type = FACC_MPIO | FACC_SPLIT; + break; + case 'd': /* dimensizes */ + if (--argc < 2) { + nerrors++; + return (1); + } + dim0 = atoi(*(++argv)) * mpi_size; + argc--; + dim1 = atoi(*(++argv)) * mpi_size; + /* set default chunkdim sizes too */ + chunkdim0 = (dim0 + 9) / 10; + chunkdim1 = (dim1 + 9) / 10; + break; + case 'c': /* chunk dimensions */ + if (--argc < 2) { + nerrors++; + return (1); + } + chunkdim0 = atoi(*(++argv)); + argc--; + chunkdim1 = atoi(*(++argv)); + break; + case 'h': /* print help message--return with nerrors set */ + return (1); + default: + HDprintf("Illegal option(%s)\n", *argv); + nerrors++; + return (1); + } + } + } /*while*/ + + /* check validity of dimension and chunk sizes */ + if (dim0 <= 0 || dim1 <= 0) { + HDprintf("Illegal dim sizes (%d, %d)\n", dim0, dim1); + nerrors++; + return (1); + } + if (chunkdim0 <= 0 || chunkdim1 <= 0) { + HDprintf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1); + nerrors++; + return (1); + } + + /* Make sure datasets can be divided into equal portions by the processes */ + if ((dim0 % mpi_size) || (dim1 % mpi_size)) { + if (MAINPROCESS) + HDprintf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n", dim0, dim1, mpi_size); + nerrors++; + return (1); + } + + /* compose the test filenames */ + { + int i, n; + + n = sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; /* exclude the NULL */ + + for (i = 0; i < n; i++) + strncpy(filenames[i], FILENAME[i], PATH_MAX); +#if 0 /* no support for VFDs right now */ + if (h5_fixname(FILENAME[i], fapl, filenames[i], PATH_MAX) == NULL) { + HDprintf("h5_fixname failed\n"); + nerrors++; + return (1); + } +#endif + if (MAINPROCESS) { + HDprintf("Test filenames are:\n"); + for (i = 0; i < n; i++) + HDprintf(" %s\n", filenames[i]); + } + } + + return (0); +} + +/* + * Create the appropriate File access property list + */ +hid_t +create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type) +{ + hid_t ret_pl = -1; + herr_t ret; /* generic return value */ + int mpi_rank; /* mpi variables */ + + /* need the rank for error checking macros */ + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + ret_pl = H5Pcreate(H5P_FILE_ACCESS); + VRFY((ret_pl >= 0), "H5P_FILE_ACCESS"); + + if (l_facc_type == FACC_DEFAULT) + return (ret_pl); + + if (l_facc_type == FACC_MPIO) { + /* set Parallel access with communicator */ + ret = H5Pset_fapl_mpio(ret_pl, comm, info); + VRFY((ret >= 0), ""); + ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE); + VRFY((ret >= 0), ""); + ret = H5Pset_coll_metadata_write(ret_pl, TRUE); + VRFY((ret >= 0), ""); + return (ret_pl); + } + + if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) { + hid_t mpio_pl; + + mpio_pl = H5Pcreate(H5P_FILE_ACCESS); + VRFY((mpio_pl >= 0), ""); + /* set Parallel access with communicator */ + ret = H5Pset_fapl_mpio(mpio_pl, comm, info); + VRFY((ret >= 0), ""); + + /* setup file access template */ + ret_pl = H5Pcreate(H5P_FILE_ACCESS); + VRFY((ret_pl >= 0), ""); + /* set Parallel access with communicator */ + ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl); + VRFY((ret >= 0), "H5Pset_fapl_split succeeded"); + H5Pclose(mpio_pl); + return (ret_pl); + } + + /* unknown file access types */ + return (ret_pl); +} + +/* Shape Same test using contiguous hyperslab using independent IO on contiguous datasets */ +static void +sscontig1(void) +{ + contig_hs_dr_pio_test(IND_CONTIG); +} + +/* Shape Same test using contiguous hyperslab using collective IO on contiguous datasets */ +static void +sscontig2(void) +{ + contig_hs_dr_pio_test(COL_CONTIG); +} + +/* Shape Same test using contiguous hyperslab using independent IO on chunked datasets */ +static void +sscontig3(void) +{ + contig_hs_dr_pio_test(IND_CHUNKED); +} + +/* Shape Same test using contiguous hyperslab using collective IO on chunked datasets */ +static void +sscontig4(void) +{ + contig_hs_dr_pio_test(COL_CHUNKED); +} + +/* Shape Same test using checker hyperslab using independent IO on contiguous datasets */ +static void +sschecker1(void) +{ + ckrbrd_hs_dr_pio_test(IND_CONTIG); +} + +/* Shape Same test using checker hyperslab using collective IO on contiguous datasets */ +static void +sschecker2(void) +{ + ckrbrd_hs_dr_pio_test(COL_CONTIG); +} + +/* Shape Same test using checker hyperslab using independent IO on chunked datasets */ +static void +sschecker3(void) +{ + ckrbrd_hs_dr_pio_test(IND_CHUNKED); +} + +/* Shape Same test using checker hyperslab using collective IO on chunked datasets */ +static void +sschecker4(void) +{ + ckrbrd_hs_dr_pio_test(COL_CHUNKED); +} + +int +main(int argc, char **argv) +{ + int mpi_size, mpi_rank; /* mpi variables */ + +#ifndef H5_HAVE_WIN32_API + /* Un-buffer the stdout and stderr */ + HDsetbuf(stderr, NULL); + HDsetbuf(stdout, NULL); +#endif + + MPI_Init(&argc, &argv); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + dim0 = ROW_FACTOR * mpi_size; + dim1 = COL_FACTOR * mpi_size; + + if (MAINPROCESS) { + HDprintf("===================================\n"); + HDprintf("Shape Same Tests Start\n"); + HDprintf(" express_test = %d.\n", EXPRESS_MODE /* GetTestExpress() */); + HDprintf("===================================\n"); + } + + /* Attempt to turn off atexit post processing so that in case errors + * happen during the test and the process is aborted, it will not get + * hang in the atexit post processing in which it may try to make MPI + * calls. By then, MPI calls may not work. + */ + if (H5dont_atexit() < 0) { + if (MAINPROCESS) + HDprintf("%d: Failed to turn off atexit processing. Continue.\n", mpi_rank); + }; + H5open(); + /* h5_show_hostname(); */ + + fapl = H5Pcreate(H5P_FILE_ACCESS); + + /* Get the capability flag of the VOL connector being used */ + if (H5Pget_vol_cap_flags(fapl, &vol_cap_flags_g) < 0) { + if (MAINPROCESS) + HDprintf("Failed to get the capability flag of the VOL connector being used\n"); + + MPI_Finalize(); + return 0; + } + + /* Make sure the connector supports the API functions being tested. This test only + * uses a few API functions, such as H5Fcreate/close/delete, H5Dcreate/write/read/close, + */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) + HDprintf("API functions for basic file and dataset aren't supported with this connector\n"); + + MPI_Finalize(); + return 0; + } + +#if 0 + HDmemset(filenames, 0, sizeof(filenames)); + for (int i = 0; i < NFILENAME; i++) { + if (NULL == (filenames[i] = HDmalloc(PATH_MAX))) { + HDprintf("couldn't allocate filename array\n"); + MPI_Abort(MPI_COMM_WORLD, -1); + } + } +#endif + + /* Initialize testing framework */ + /* TestInit(argv[0], usage, parse_options); */ + + if (parse_options(argc, argv)) { + usage(); + return 1; + } + + if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS) { + HDprintf("===================================\n" + " Using Independent I/O with file set view to replace collective I/O \n" + "===================================\n"); + } + + /* Shape Same tests using contiguous hyperslab */ +#if 0 + AddTest("sscontig1", sscontig1, NULL, + "Cntg hslab, ind IO, cntg dsets", filenames[0]); + AddTest("sscontig2", sscontig2, NULL, + "Cntg hslab, col IO, cntg dsets", filenames[0]); + AddTest("sscontig3", sscontig3, NULL, + "Cntg hslab, ind IO, chnk dsets", filenames[0]); + AddTest("sscontig4", sscontig4, NULL, + "Cntg hslab, col IO, chnk dsets", filenames[0]); +#endif + if (MAINPROCESS) { + printf("Cntg hslab, ind IO, cntg dsets\n"); + fflush(stdout); + } + sscontig1(); + if (MAINPROCESS) { + printf("Cntg hslab, col IO, cntg dsets\n"); + fflush(stdout); + } + sscontig2(); + if (MAINPROCESS) { + printf("Cntg hslab, ind IO, chnk dsets\n"); + fflush(stdout); + } + sscontig3(); + if (MAINPROCESS) { + printf("Cntg hslab, col IO, chnk dsets\n"); + fflush(stdout); + } + sscontig4(); + + /* Shape Same tests using checker board hyperslab */ +#if 0 + AddTest("sschecker1", sschecker1, NULL, + "Check hslab, ind IO, cntg dsets", filenames[0]); + AddTest("sschecker2", sschecker2, NULL, + "Check hslab, col IO, cntg dsets", filenames[0]); + AddTest("sschecker3", sschecker3, NULL, + "Check hslab, ind IO, chnk dsets", filenames[0]); + AddTest("sschecker4", sschecker4, NULL, + "Check hslab, col IO, chnk dsets", filenames[0]); +#endif + if (MAINPROCESS) { + printf("Check hslab, ind IO, cntg dsets\n"); + fflush(stdout); + } + sschecker1(); + if (MAINPROCESS) { + printf("Check hslab, col IO, cntg dsets\n"); + fflush(stdout); + } + sschecker2(); + if (MAINPROCESS) { + printf("Check hslab, ind IO, chnk dsets\n"); + fflush(stdout); + } + sschecker3(); + if (MAINPROCESS) { + printf("Check hslab, col IO, chnk dsets\n"); + fflush(stdout); + } + sschecker4(); + + /* Display testing information */ + /* TestInfo(argv[0]); */ + + /* setup file access property list */ + H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL); + + /* Parse command line arguments */ + /* TestParseCmdLine(argc, argv); */ + + /* Perform requested testing */ + /* PerformTests(); */ + + /* make sure all processes are finished before final report, cleanup + * and exit. + */ + MPI_Barrier(MPI_COMM_WORLD); + + /* Display test summary, if requested */ + /* if (MAINPROCESS && GetTestSummary()) + TestSummary(); */ + + /* Clean up test files */ + /* h5_clean_files(FILENAME, fapl); */ + H5Fdelete(FILENAME[0], fapl); + H5Pclose(fapl); + + /* nerrors += GetTestNumErrs(); */ + + /* Gather errors from all processes */ + { + int temp; + MPI_Allreduce(&nerrors, &temp, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); + nerrors = temp; + } + + if (MAINPROCESS) { /* only process 0 reports */ + HDprintf("===================================\n"); + if (nerrors) + HDprintf("***Shape Same tests detected %d errors***\n", nerrors); + else + HDprintf("Shape Same tests finished successfully\n"); + HDprintf("===================================\n"); + } + +#if 0 + for (int i = 0; i < NFILENAME; i++) { + HDfree(filenames[i]); + filenames[i] = NULL; + } +#endif + + /* close HDF5 library */ + H5close(); + + /* Release test infrastructure */ + /* TestShutdown(); */ + + MPI_Finalize(); + + /* cannot just return (nerrors) because exit code is limited to 1byte */ + return (nerrors != 0); +} diff --git a/testpar/API/t_span_tree.c b/testpar/API/t_span_tree.c new file mode 100644 index 00000000000..5aafb0bd7a2 --- /dev/null +++ b/testpar/API/t_span_tree.c @@ -0,0 +1,2622 @@ + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + This program will test irregular hyperslab selections with collective write and read. + The way to test whether collective write and read works is to use independent IO + output to verify the collective output. + + 1) We will write two datasets with the same hyperslab selection settings; + one in independent mode, + one in collective mode, + 2) We will read two datasets with the same hyperslab selection settings, + 1. independent read to read independent output, + independent read to read collecive output, + Compare the result, + If the result is the same, then collective write succeeds. + 2. collective read to read independent output, + independent read to read independent output, + Compare the result, + If the result is the same, then collective read succeeds. + + */ + +#include "hdf5.h" +#if 0 +#include "H5private.h" +#endif +#include "testphdf5.h" + +#define LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG 0 + +static void coll_write_test(int chunk_factor); +static void coll_read_test(void); + +/*------------------------------------------------------------------------- + * Function: coll_irregular_cont_write + * + * Purpose: Wrapper to test the collectively irregular hyperslab write in + * contiguous storage + * + * Return: Success: 0 + * + * Failure: -1 + * + * Programmer: Unknown + * Dec 2nd, 2004 + * + *------------------------------------------------------------------------- + */ +void +coll_irregular_cont_write(void) +{ + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file dataset, or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + coll_write_test(0); +} + +/*------------------------------------------------------------------------- + * Function: coll_irregular_cont_read + * + * Purpose: Wrapper to test the collectively irregular hyperslab read in + * contiguous storage + * + * Return: Success: 0 + * + * Failure: -1 + * + * Programmer: Unknown + * Dec 2nd, 2004 + * + *------------------------------------------------------------------------- + */ +void +coll_irregular_cont_read(void) +{ + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file dataset, or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + coll_read_test(); +} + +/*------------------------------------------------------------------------- + * Function: coll_irregular_simple_chunk_write + * + * Purpose: Wrapper to test the collectively irregular hyperslab write in + * chunk storage(1 chunk) + * + * Return: Success: 0 + * + * Failure: -1 + * + * Programmer: Unknown + * Dec 2nd, 2004 + * + *------------------------------------------------------------------------- + */ +void +coll_irregular_simple_chunk_write(void) +{ + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file dataset, or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + coll_write_test(1); +} + +/*------------------------------------------------------------------------- + * Function: coll_irregular_simple_chunk_read + * + * Purpose: Wrapper to test the collectively irregular hyperslab read in chunk + * storage(1 chunk) + * + * Return: Success: 0 + * + * Failure: -1 + * + * Programmer: Unknown + * Dec 2nd, 2004 + * + *------------------------------------------------------------------------- + */ +void +coll_irregular_simple_chunk_read(void) +{ + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file dataset, or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + coll_read_test(); +} + +/*------------------------------------------------------------------------- + * Function: coll_irregular_complex_chunk_write + * + * Purpose: Wrapper to test the collectively irregular hyperslab write in chunk + * storage(4 chunks) + * + * Return: Success: 0 + * + * Failure: -1 + * + * Programmer: Unknown + * Dec 2nd, 2004 + * + *------------------------------------------------------------------------- + */ +void +coll_irregular_complex_chunk_write(void) +{ + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file dataset, or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + coll_write_test(4); +} + +/*------------------------------------------------------------------------- + * Function: coll_irregular_complex_chunk_read + * + * Purpose: Wrapper to test the collectively irregular hyperslab read in chunk + * storage(1 chunk) + * + * Return: Success: 0 + * + * Failure: -1 + * + * Programmer: Unknown + * Dec 2nd, 2004 + * + *------------------------------------------------------------------------- + */ +void +coll_irregular_complex_chunk_read(void) +{ + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file dataset, or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + + coll_read_test(); +} + +/*------------------------------------------------------------------------- + * Function: coll_write_test + * + * Purpose: To test the collectively irregular hyperslab write in chunk + * storage + * Input: number of chunks on each dimension + * if number is equal to 0, contiguous storage + * Return: Success: 0 + * + * Failure: -1 + * + * Programmer: Unknown + * Dec 2nd, 2004 + * + *------------------------------------------------------------------------- + */ +void +coll_write_test(int chunk_factor) +{ + + const char *filename; + hid_t facc_plist, dxfer_plist, dcrt_plist; + hid_t file, datasetc, dataseti; /* File and dataset identifiers */ + hid_t mspaceid1, mspaceid, fspaceid, fspaceid1; /* Dataspace identifiers */ + + hsize_t mdim1[1]; /* Dimension size of the first dataset (in memory) */ + hsize_t fsdim[2]; /* Dimension sizes of the dataset (on disk) */ + hsize_t mdim[2]; /* Dimension sizes of the dataset in memory when we + * read selection from the dataset on the disk + */ + + hsize_t start[2]; /* Start of hyperslab */ + hsize_t stride[2]; /* Stride of hyperslab */ + hsize_t count[2]; /* Block count */ + hsize_t block[2]; /* Block sizes */ + hsize_t chunk_dims[2]; + + herr_t ret; + int i; + int fillvalue = 0; /* Fill value for the dataset */ + + int *matrix_out = NULL; + int *matrix_out1 = NULL; /* Buffer to read from the dataset */ + int *vector = NULL; + + int mpi_size, mpi_rank; + + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + + /*set up MPI parameters */ + MPI_Comm_size(comm, &mpi_size); + MPI_Comm_rank(comm, &mpi_rank); + + /* Obtain file name */ + filename = PARATESTFILE /* GetTestParameters() */; + + /* + * Buffers' initialization. + */ + + mdim1[0] = (hsize_t)(MSPACE1_DIM * mpi_size); + mdim[0] = MSPACE_DIM1; + mdim[1] = (hsize_t)(MSPACE_DIM2 * mpi_size); + fsdim[0] = FSPACE_DIM1; + fsdim[1] = (hsize_t)(FSPACE_DIM2 * mpi_size); + + vector = (int *)HDmalloc(sizeof(int) * (size_t)mdim1[0] * (size_t)mpi_size); + matrix_out = (int *)HDmalloc(sizeof(int) * (size_t)mdim[0] * (size_t)mdim[1] * (size_t)mpi_size); + matrix_out1 = (int *)HDmalloc(sizeof(int) * (size_t)mdim[0] * (size_t)mdim[1] * (size_t)mpi_size); + + HDmemset(vector, 0, sizeof(int) * (size_t)mdim1[0] * (size_t)mpi_size); + vector[0] = vector[MSPACE1_DIM * mpi_size - 1] = -1; + for (i = 1; i < MSPACE1_DIM * mpi_size - 1; i++) + vector[i] = (int)i; + + /* Grab file access property list */ + facc_plist = create_faccess_plist(comm, info, facc_type); + VRFY((facc_plist >= 0), ""); + + /* + * Create a file. + */ + file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, facc_plist); + VRFY((file >= 0), "H5Fcreate succeeded"); + + /* + * Create property list for a dataset and set up fill values. + */ + dcrt_plist = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcrt_plist >= 0), ""); + + ret = H5Pset_fill_value(dcrt_plist, H5T_NATIVE_INT, &fillvalue); + VRFY((ret >= 0), "Fill value creation property list succeeded"); + + if (chunk_factor != 0) { + chunk_dims[0] = fsdim[0] / (hsize_t)chunk_factor; + chunk_dims[1] = fsdim[1] / (hsize_t)chunk_factor; + ret = H5Pset_chunk(dcrt_plist, 2, chunk_dims); + VRFY((ret >= 0), "chunk creation property list succeeded"); + } + + /* + * + * Create dataspace for the first dataset in the disk. + * dim1 = 9 + * dim2 = 3600 + * + * + */ + fspaceid = H5Screate_simple(FSPACE_RANK, fsdim, NULL); + VRFY((fspaceid >= 0), "file dataspace created succeeded"); + + /* + * Create dataset in the file. Notice that creation + * property list dcrt_plist is used. + */ + datasetc = + H5Dcreate2(file, "collect_write", H5T_NATIVE_INT, fspaceid, H5P_DEFAULT, dcrt_plist, H5P_DEFAULT); + VRFY((datasetc >= 0), "dataset created succeeded"); + + dataseti = + H5Dcreate2(file, "independ_write", H5T_NATIVE_INT, fspaceid, H5P_DEFAULT, dcrt_plist, H5P_DEFAULT); + VRFY((dataseti >= 0), "dataset created succeeded"); + + /* The First selection for FILE + * + * block (3,2) + * stride(4,3) + * count (1,768/mpi_size) + * start (0,1+768*3*mpi_rank/mpi_size) + * + */ + + start[0] = FHSTART0; + start[1] = (hsize_t)(FHSTART1 + mpi_rank * FHSTRIDE1 * FHCOUNT1); + stride[0] = FHSTRIDE0; + stride[1] = FHSTRIDE1; + count[0] = FHCOUNT0; + count[1] = FHCOUNT1; + block[0] = FHBLOCK0; + block[1] = FHBLOCK1; + + ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "hyperslab selection succeeded"); + + /* The Second selection for FILE + * + * block (3,768) + * stride (1,1) + * count (1,1) + * start (4,768*mpi_rank/mpi_size) + * + */ + + start[0] = SHSTART0; + start[1] = (hsize_t)(SHSTART1 + SHCOUNT1 * SHBLOCK1 * mpi_rank); + stride[0] = SHSTRIDE0; + stride[1] = SHSTRIDE1; + count[0] = SHCOUNT0; + count[1] = SHCOUNT1; + block[0] = SHBLOCK0; + block[1] = SHBLOCK1; + + ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_OR, start, stride, count, block); + VRFY((ret >= 0), "hyperslab selection succeeded"); + + /* + * Create dataspace for the first dataset in the memory + * dim1 = 27000 + * + */ + mspaceid1 = H5Screate_simple(MSPACE1_RANK, mdim1, NULL); + VRFY((mspaceid1 >= 0), "memory dataspace created succeeded"); + + /* + * Memory space is 1-D, this is a good test to check + * whether a span-tree derived datatype needs to be built. + * block 1 + * stride 1 + * count 6912/mpi_size + * start 1 + * + */ + start[0] = MHSTART0; + stride[0] = MHSTRIDE0; + count[0] = MHCOUNT0; + block[0] = MHBLOCK0; + + ret = H5Sselect_hyperslab(mspaceid1, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "hyperslab selection succeeded"); + + /* independent write */ + ret = H5Dwrite(dataseti, H5T_NATIVE_INT, mspaceid1, fspaceid, H5P_DEFAULT, vector); + VRFY((ret >= 0), "dataset independent write succeed"); + + dxfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxfer_plist >= 0), ""); + + ret = H5Pset_dxpl_mpio(dxfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "MPIO data transfer property list succeed"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(dxfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* collective write */ + ret = H5Dwrite(datasetc, H5T_NATIVE_INT, mspaceid1, fspaceid, dxfer_plist, vector); + VRFY((ret >= 0), "dataset collective write succeed"); + + ret = H5Sclose(mspaceid1); + VRFY((ret >= 0), ""); + + ret = H5Sclose(fspaceid); + VRFY((ret >= 0), ""); + + /* + * Close dataset. + */ + ret = H5Dclose(datasetc); + VRFY((ret >= 0), ""); + + ret = H5Dclose(dataseti); + VRFY((ret >= 0), ""); + + /* + * Close the file. + */ + ret = H5Fclose(file); + VRFY((ret >= 0), ""); + /* + * Close property list + */ + + ret = H5Pclose(facc_plist); + VRFY((ret >= 0), ""); + ret = H5Pclose(dxfer_plist); + VRFY((ret >= 0), ""); + ret = H5Pclose(dcrt_plist); + VRFY((ret >= 0), ""); + + /* + * Open the file. + */ + + /*** + + For testing collective hyperslab selection write + In this test, we are using independent read to check + the correctedness of collective write compared with + independent write, + + In order to thoroughly test this feature, we choose + a different selection set for reading the data out. + + + ***/ + + /* Obtain file access property list with MPI-IO driver */ + facc_plist = create_faccess_plist(comm, info, facc_type); + VRFY((facc_plist >= 0), ""); + + file = H5Fopen(filename, H5F_ACC_RDONLY, facc_plist); + VRFY((file >= 0), "H5Fopen succeeded"); + + /* + * Open the dataset. + */ + datasetc = H5Dopen2(file, "collect_write", H5P_DEFAULT); + VRFY((datasetc >= 0), "H5Dopen2 succeeded"); + + dataseti = H5Dopen2(file, "independ_write", H5P_DEFAULT); + VRFY((dataseti >= 0), "H5Dopen2 succeeded"); + + /* + * Get dataspace of the open dataset. + */ + fspaceid = H5Dget_space(datasetc); + VRFY((fspaceid >= 0), "file dataspace obtained succeeded"); + + fspaceid1 = H5Dget_space(dataseti); + VRFY((fspaceid1 >= 0), "file dataspace obtained succeeded"); + + /* The First selection for FILE to read + * + * block (1,1) + * stride(1.1) + * count (3,768/mpi_size) + * start (1,2+768*mpi_rank/mpi_size) + * + */ + start[0] = RFFHSTART0; + start[1] = (hsize_t)(RFFHSTART1 + mpi_rank * RFFHCOUNT1); + block[0] = RFFHBLOCK0; + block[1] = RFFHBLOCK1; + stride[0] = RFFHSTRIDE0; + stride[1] = RFFHSTRIDE1; + count[0] = RFFHCOUNT0; + count[1] = RFFHCOUNT1; + + /* The first selection of the dataset generated by collective write */ + ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "hyperslab selection succeeded"); + + /* The first selection of the dataset generated by independent write */ + ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "hyperslab selection succeeded"); + + /* The Second selection for FILE to read + * + * block (1,1) + * stride(1.1) + * count (3,1536/mpi_size) + * start (2,4+1536*mpi_rank/mpi_size) + * + */ + + start[0] = RFSHSTART0; + start[1] = (hsize_t)(RFSHSTART1 + RFSHCOUNT1 * mpi_rank); + block[0] = RFSHBLOCK0; + block[1] = RFSHBLOCK1; + stride[0] = RFSHSTRIDE0; + stride[1] = RFSHSTRIDE0; + count[0] = RFSHCOUNT0; + count[1] = RFSHCOUNT1; + + /* The second selection of the dataset generated by collective write */ + ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_OR, start, stride, count, block); + VRFY((ret >= 0), "hyperslab selection succeeded"); + + /* The second selection of the dataset generated by independent write */ + ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_OR, start, stride, count, block); + VRFY((ret >= 0), "hyperslab selection succeeded"); + + /* + * Create memory dataspace. + * rank = 2 + * mdim1 = 9 + * mdim2 = 3600 + * + */ + mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL); + + /* + * Select two hyperslabs in memory. Hyperslabs has the same + * size and shape as the selected hyperslabs for the file dataspace + * Only the starting point is different. + * The first selection + * block (1,1) + * stride(1.1) + * count (3,768/mpi_size) + * start (0,768*mpi_rank/mpi_size) + * + */ + + start[0] = RMFHSTART0; + start[1] = (hsize_t)(RMFHSTART1 + mpi_rank * RMFHCOUNT1); + block[0] = RMFHBLOCK0; + block[1] = RMFHBLOCK1; + stride[0] = RMFHSTRIDE0; + stride[1] = RMFHSTRIDE1; + count[0] = RMFHCOUNT0; + count[1] = RMFHCOUNT1; + + ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "hyperslab selection succeeded"); + + /* + * Select two hyperslabs in memory. Hyperslabs has the same + * size and shape as the selected hyperslabs for the file dataspace + * Only the starting point is different. + * The second selection + * block (1,1) + * stride(1,1) + * count (3,1536/mpi_size) + * start (1,2+1536*mpi_rank/mpi_size) + * + */ + start[0] = RMSHSTART0; + start[1] = (hsize_t)(RMSHSTART1 + mpi_rank * RMSHCOUNT1); + block[0] = RMSHBLOCK0; + block[1] = RMSHBLOCK1; + stride[0] = RMSHSTRIDE0; + stride[1] = RMSHSTRIDE1; + count[0] = RMSHCOUNT0; + count[1] = RMSHCOUNT1; + + ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_OR, start, stride, count, block); + VRFY((ret >= 0), "hyperslab selection succeeded"); + + /* + * Initialize data buffer. + */ + + HDmemset(matrix_out, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size); + HDmemset(matrix_out1, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size); + /* + * Read data back to the buffer matrix_out. + */ + + ret = H5Dread(datasetc, H5T_NATIVE_INT, mspaceid, fspaceid, H5P_DEFAULT, matrix_out); + VRFY((ret >= 0), "H5D independent read succeed"); + + ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid, H5P_DEFAULT, matrix_out1); + VRFY((ret >= 0), "H5D independent read succeed"); + + ret = 0; + + for (i = 0; i < MSPACE_DIM1 * MSPACE_DIM2 * mpi_size; i++) { + if (matrix_out[i] != matrix_out1[i]) + ret = -1; + if (ret < 0) + break; + } + + VRFY((ret >= 0), "H5D irregular collective write succeed"); + + /* + * Close memory file and memory dataspaces. + */ + ret = H5Sclose(mspaceid); + VRFY((ret >= 0), ""); + ret = H5Sclose(fspaceid); + VRFY((ret >= 0), ""); + + /* + * Close dataset. + */ + ret = H5Dclose(dataseti); + VRFY((ret >= 0), ""); + + ret = H5Dclose(datasetc); + VRFY((ret >= 0), ""); + + /* + * Close property list + */ + + ret = H5Pclose(facc_plist); + VRFY((ret >= 0), ""); + + /* + * Close the file. + */ + ret = H5Fclose(file); + VRFY((ret >= 0), ""); + + if (vector) + HDfree(vector); + if (matrix_out) + HDfree(matrix_out); + if (matrix_out1) + HDfree(matrix_out1); + + return; +} + +/*------------------------------------------------------------------------- + * Function: coll_read_test + * + * Purpose: To test the collectively irregular hyperslab read in chunk + * storage + * Input: number of chunks on each dimension + * if number is equal to 0, contiguous storage + * Return: Success: 0 + * + * Failure: -1 + * + * Programmer: Unknown + * Dec 2nd, 2004 + * + *------------------------------------------------------------------------- + */ +static void +coll_read_test(void) +{ + + const char *filename; + hid_t facc_plist, dxfer_plist; + hid_t file, dataseti; /* File and dataset identifiers */ + hid_t mspaceid, fspaceid1; /* Dataspace identifiers */ + + /* Dimension sizes of the dataset (on disk) */ + hsize_t mdim[2]; /* Dimension sizes of the dataset in memory when we + * read selection from the dataset on the disk + */ + + hsize_t start[2]; /* Start of hyperslab */ + hsize_t stride[2]; /* Stride of hyperslab */ + hsize_t count[2]; /* Block count */ + hsize_t block[2]; /* Block sizes */ + herr_t ret; + + int i; + + int *matrix_out; + int *matrix_out1; /* Buffer to read from the dataset */ + + int mpi_size, mpi_rank; + + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + + /*set up MPI parameters */ + MPI_Comm_size(comm, &mpi_size); + MPI_Comm_rank(comm, &mpi_rank); + + /* Obtain file name */ + filename = PARATESTFILE /* GetTestParameters() */; + + /* Initialize the buffer */ + + mdim[0] = MSPACE_DIM1; + mdim[1] = (hsize_t)(MSPACE_DIM2 * mpi_size); + matrix_out = (int *)HDmalloc(sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size); + matrix_out1 = (int *)HDmalloc(sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size); + + /*** For testing collective hyperslab selection read ***/ + + /* Obtain file access property list */ + facc_plist = create_faccess_plist(comm, info, facc_type); + VRFY((facc_plist >= 0), ""); + + /* + * Open the file. + */ + file = H5Fopen(filename, H5F_ACC_RDONLY, facc_plist); + VRFY((file >= 0), "H5Fopen succeeded"); + + /* + * Open the dataset. + */ + dataseti = H5Dopen2(file, "independ_write", H5P_DEFAULT); + VRFY((dataseti >= 0), "H5Dopen2 succeeded"); + + /* + * Get dataspace of the open dataset. + */ + fspaceid1 = H5Dget_space(dataseti); + VRFY((fspaceid1 >= 0), "file dataspace obtained succeeded"); + + /* The First selection for FILE to read + * + * block (1,1) + * stride(1.1) + * count (3,768/mpi_size) + * start (1,2+768*mpi_rank/mpi_size) + * + */ + start[0] = RFFHSTART0; + start[1] = (hsize_t)(RFFHSTART1 + mpi_rank * RFFHCOUNT1); + block[0] = RFFHBLOCK0; + block[1] = RFFHBLOCK1; + stride[0] = RFFHSTRIDE0; + stride[1] = RFFHSTRIDE1; + count[0] = RFFHCOUNT0; + count[1] = RFFHCOUNT1; + + ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "hyperslab selection succeeded"); + + /* The Second selection for FILE to read + * + * block (1,1) + * stride(1.1) + * count (3,1536/mpi_size) + * start (2,4+1536*mpi_rank/mpi_size) + * + */ + start[0] = RFSHSTART0; + start[1] = (hsize_t)(RFSHSTART1 + RFSHCOUNT1 * mpi_rank); + block[0] = RFSHBLOCK0; + block[1] = RFSHBLOCK1; + stride[0] = RFSHSTRIDE0; + stride[1] = RFSHSTRIDE0; + count[0] = RFSHCOUNT0; + count[1] = RFSHCOUNT1; + + ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_OR, start, stride, count, block); + VRFY((ret >= 0), "hyperslab selection succeeded"); + + /* + * Create memory dataspace. + */ + mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL); + + /* + * Select two hyperslabs in memory. Hyperslabs has the same + * size and shape as the selected hyperslabs for the file dataspace. + * Only the starting point is different. + * The first selection + * block (1,1) + * stride(1.1) + * count (3,768/mpi_size) + * start (0,768*mpi_rank/mpi_size) + * + */ + + start[0] = RMFHSTART0; + start[1] = (hsize_t)(RMFHSTART1 + mpi_rank * RMFHCOUNT1); + block[0] = RMFHBLOCK0; + block[1] = RMFHBLOCK1; + stride[0] = RMFHSTRIDE0; + stride[1] = RMFHSTRIDE1; + count[0] = RMFHCOUNT0; + count[1] = RMFHCOUNT1; + ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "hyperslab selection succeeded"); + + /* + * Select two hyperslabs in memory. Hyperslabs has the same + * size and shape as the selected hyperslabs for the file dataspace + * Only the starting point is different. + * The second selection + * block (1,1) + * stride(1,1) + * count (3,1536/mpi_size) + * start (1,2+1536*mpi_rank/mpi_size) + * + */ + start[0] = RMSHSTART0; + start[1] = (hsize_t)(RMSHSTART1 + mpi_rank * RMSHCOUNT1); + block[0] = RMSHBLOCK0; + block[1] = RMSHBLOCK1; + stride[0] = RMSHSTRIDE0; + stride[1] = RMSHSTRIDE1; + count[0] = RMSHCOUNT0; + count[1] = RMSHCOUNT1; + ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_OR, start, stride, count, block); + VRFY((ret >= 0), "hyperslab selection succeeded"); + + /* + * Initialize data buffer. + */ + + HDmemset(matrix_out, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size); + HDmemset(matrix_out1, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size); + + /* + * Read data back to the buffer matrix_out. + */ + + dxfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxfer_plist >= 0), ""); + + ret = H5Pset_dxpl_mpio(dxfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "MPIO data transfer property list succeed"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(dxfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* Collective read */ + ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1, dxfer_plist, matrix_out); + VRFY((ret >= 0), "H5D collecive read succeed"); + + ret = H5Pclose(dxfer_plist); + VRFY((ret >= 0), ""); + + /* Independent read */ + ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1, H5P_DEFAULT, matrix_out1); + VRFY((ret >= 0), "H5D independent read succeed"); + + ret = 0; + for (i = 0; i < MSPACE_DIM1 * MSPACE_DIM2 * mpi_size; i++) { + if (matrix_out[i] != matrix_out1[i]) + ret = -1; + if (ret < 0) + break; + } + VRFY((ret >= 0), "H5D contiguous irregular collective read succeed"); + + /* + * Free read buffers. + */ + HDfree(matrix_out); + HDfree(matrix_out1); + + /* + * Close memory file and memory dataspaces. + */ + ret = H5Sclose(mspaceid); + VRFY((ret >= 0), ""); + ret = H5Sclose(fspaceid1); + VRFY((ret >= 0), ""); + + /* + * Close dataset. + */ + ret = H5Dclose(dataseti); + VRFY((ret >= 0), ""); + + /* + * Close property list + */ + ret = H5Pclose(facc_plist); + VRFY((ret >= 0), ""); + + /* + * Close the file. + */ + ret = H5Fclose(file); + VRFY((ret >= 0), ""); + + return; +} + +/**************************************************************** +** +** lower_dim_size_comp_test__select_checker_board(): +** +** Given a dataspace of tgt_rank, and dimensions: +** +** (mpi_size + 1), edge_size, ... , edge_size +** +** edge_size, and a checker_edge_size, select a checker +** board selection of a sel_rank (sel_rank < tgt_rank) +** dimensional slice through the dataspace parallel to the +** sel_rank fastest changing indices, with origin (in the +** higher indices) as indicated by the start array. +** +** Note that this function, is hard coded to presume a +** maximum dataspace rank of 5. +** +** While this maximum is declared as a constant, increasing +** it will require extensive coding in addition to changing +** the value of the constant. +** +** JRM -- 11/11/09 +** +****************************************************************/ + +#define LDSCT_DS_RANK 5 +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG +#define LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK 0 +#endif + +#define LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG 0 + +static void +lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t tgt_sid, const int tgt_rank, + const hsize_t dims[LDSCT_DS_RANK], const int checker_edge_size, + const int sel_rank, hsize_t sel_start[]) +{ +#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG + const char *fcnName = "lower_dim_size_comp_test__select_checker_board():"; +#endif + hbool_t first_selection = TRUE; + int i, j, k, l, m; + int ds_offset; + int sel_offset; + const int test_max_rank = LDSCT_DS_RANK; /* must update code if */ + /* this changes */ + hsize_t base_count; + hsize_t offset_count; + hsize_t start[LDSCT_DS_RANK]; + hsize_t stride[LDSCT_DS_RANK]; + hsize_t count[LDSCT_DS_RANK]; + hsize_t block[LDSCT_DS_RANK]; + herr_t ret; /* Generic return value */ + +#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: dims/checker_edge_size = %d %d %d %d %d / %d\n", fcnName, mpi_rank, + (int)dims[0], (int)dims[1], (int)dims[2], (int)dims[3], (int)dims[4], checker_edge_size); + } +#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */ + + HDassert(0 < checker_edge_size); + HDassert(0 < sel_rank); + HDassert(sel_rank <= tgt_rank); + HDassert(tgt_rank <= test_max_rank); + HDassert(test_max_rank <= LDSCT_DS_RANK); + + sel_offset = test_max_rank - sel_rank; + HDassert(sel_offset >= 0); + + ds_offset = test_max_rank - tgt_rank; + HDassert(ds_offset >= 0); + HDassert(ds_offset <= sel_offset); + + HDassert((hsize_t)checker_edge_size <= dims[sel_offset]); + HDassert(dims[sel_offset] == 10); + +#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n", fcnName, mpi_rank, sel_rank, sel_offset); + HDfprintf(stdout, "%s:%d: tgt_rank/ds_offset = %d/%d.\n", fcnName, mpi_rank, tgt_rank, ds_offset); + } +#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */ + + /* First, compute the base count (which assumes start == 0 + * for the associated offset) and offset_count (which + * assumes start == checker_edge_size for the associated + * offset). + * + * Note that the following computation depends on the C99 + * requirement that integer division discard any fraction + * (truncation towards zero) to function correctly. As we + * now require C99, this shouldn't be a problem, but noting + * it may save us some pain if we are ever obliged to support + * pre-C99 compilers again. + */ + + base_count = dims[sel_offset] / (hsize_t)(checker_edge_size * 2); + + if ((dims[sel_rank] % (hsize_t)(checker_edge_size * 2)) > 0) { + + base_count++; + } + + offset_count = + (hsize_t)((dims[sel_offset] - (hsize_t)checker_edge_size) / ((hsize_t)(checker_edge_size * 2))); + + if (((dims[sel_rank] - (hsize_t)checker_edge_size) % ((hsize_t)(checker_edge_size * 2))) > 0) { + + offset_count++; + } + +#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: base_count/offset_count = %d/%d.\n", fcnName, mpi_rank, base_count, + offset_count); + } +#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */ + + /* Now set up the stride and block arrays, and portions of the start + * and count arrays that will not be altered during the selection of + * the checker board. + */ + i = 0; + while (i < ds_offset) { + + /* these values should never be used */ + start[i] = 0; + stride[i] = 0; + count[i] = 0; + block[i] = 0; + + i++; + } + + while (i < sel_offset) { + + start[i] = sel_start[i]; + stride[i] = 2 * dims[i]; + count[i] = 1; + block[i] = 1; + + i++; + } + + while (i < test_max_rank) { + + stride[i] = (hsize_t)(2 * checker_edge_size); + block[i] = (hsize_t)checker_edge_size; + + i++; + } + + i = 0; + do { + if (0 >= sel_offset) { + + if (i == 0) { + + start[0] = 0; + count[0] = base_count; + } + else { + + start[0] = (hsize_t)checker_edge_size; + count[0] = offset_count; + } + } + + j = 0; + do { + if (1 >= sel_offset) { + + if (j == 0) { + + start[1] = 0; + count[1] = base_count; + } + else { + + start[1] = (hsize_t)checker_edge_size; + count[1] = offset_count; + } + } + + k = 0; + do { + if (2 >= sel_offset) { + + if (k == 0) { + + start[2] = 0; + count[2] = base_count; + } + else { + + start[2] = (hsize_t)checker_edge_size; + count[2] = offset_count; + } + } + + l = 0; + do { + if (3 >= sel_offset) { + + if (l == 0) { + + start[3] = 0; + count[3] = base_count; + } + else { + + start[3] = (hsize_t)checker_edge_size; + count[3] = offset_count; + } + } + + m = 0; + do { + if (4 >= sel_offset) { + + if (m == 0) { + + start[4] = 0; + count[4] = base_count; + } + else { + + start[4] = (hsize_t)checker_edge_size; + count[4] = offset_count; + } + } + + if (((i + j + k + l + m) % 2) == 0) { + +#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + + HDfprintf(stdout, "%s%d: *** first_selection = %d ***\n", fcnName, mpi_rank, + (int)first_selection); + HDfprintf(stdout, "%s:%d: i/j/k/l/m = %d/%d/%d/%d/%d\n", fcnName, mpi_rank, i, + j, k, l, m); + HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, mpi_rank, + (int)start[0], (int)start[1], (int)start[2], (int)start[3], + (int)start[4]); + HDfprintf(stdout, "%s:%d: stride = %d %d %d %d %d.\n", fcnName, mpi_rank, + (int)stride[0], (int)stride[1], (int)stride[2], (int)stride[3], + (int)stride[4]); + HDfprintf(stdout, "%s:%d: count = %d %d %d %d %d.\n", fcnName, mpi_rank, + (int)count[0], (int)count[1], (int)count[2], (int)count[3], + (int)count[4]); + HDfprintf(stdout, "%s:%d: block = %d %d %d %d %d.\n", fcnName, mpi_rank, + (int)block[0], (int)block[1], (int)block[2], (int)block[3], + (int)block[4]); + HDfprintf(stdout, "%s:%d: n-cube extent dims = %d.\n", fcnName, mpi_rank, + H5Sget_simple_extent_ndims(tgt_sid)); + HDfprintf(stdout, "%s:%d: selection rank = %d.\n", fcnName, mpi_rank, + sel_rank); + } +#endif + + if (first_selection) { + + first_selection = FALSE; + + ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_SET, &(start[ds_offset]), + &(stride[ds_offset]), &(count[ds_offset]), + &(block[ds_offset])); + + VRFY((ret != FAIL), "H5Sselect_hyperslab(SET) succeeded"); + } + else { + + ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_OR, &(start[ds_offset]), + &(stride[ds_offset]), &(count[ds_offset]), + &(block[ds_offset])); + + VRFY((ret != FAIL), "H5Sselect_hyperslab(OR) succeeded"); + } + } + + m++; + + } while ((m <= 1) && (4 >= sel_offset)); + + l++; + + } while ((l <= 1) && (3 >= sel_offset)); + + k++; + + } while ((k <= 1) && (2 >= sel_offset)); + + j++; + + } while ((j <= 1) && (1 >= sel_offset)); + + i++; + + } while ((i <= 1) && (0 >= sel_offset)); + +#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank, + (int)H5Sget_select_npoints(tgt_sid)); + } +#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */ + + /* Clip the selection back to the dataspace proper. */ + + for (i = 0; i < test_max_rank; i++) { + + start[i] = 0; + stride[i] = dims[i]; + count[i] = 1; + block[i] = dims[i]; + } + + ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_AND, start, stride, count, block); + + VRFY((ret != FAIL), "H5Sselect_hyperslab(AND) succeeded"); + +#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank, + (int)H5Sget_select_npoints(tgt_sid)); + HDfprintf(stdout, "%s%d: done.\n", fcnName, mpi_rank); + } +#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */ + + return; + +} /* lower_dim_size_comp_test__select_checker_board() */ + +/**************************************************************** +** +** lower_dim_size_comp_test__verify_data(): +** +** Examine the supplied buffer to see if it contains the +** expected data. Return TRUE if it does, and FALSE +** otherwise. +** +** The supplied buffer is presumed to this process's slice +** of the target data set. Each such slice will be an +** n-cube of rank (rank -1) and the supplied edge_size with +** origin (mpi_rank, 0, ... , 0) in the target data set. +** +** Further, the buffer is presumed to be the result of reading +** or writing a checker board selection of an m (1 <= m < +** rank) dimensional slice through this processes slice +** of the target data set. Also, this slice must be parallel +** to the fastest changing indices. +** +** It is further presumed that the buffer was zeroed before +** the read/write, and that the full target data set (i.e. +** the buffer/data set for all processes) was initialized +** with the natural numbers listed in order from the origin +** along the fastest changing axis. +** +** Thus for a 20x10x10 dataset, the value stored in location +** (x, y, z) (assuming that z is the fastest changing index +** and x the slowest) is assumed to be: +** +** (10 * 10 * x) + (10 * y) + z +** +** Further, supposing that this is process 10, this process's +** slice of the dataset would be a 10 x 10 2-cube with origin +** (10, 0, 0) in the data set, and would be initialize (prior +** to the checkerboard selection) as follows: +** +** 1000, 1001, 1002, ... 1008, 1009 +** 1010, 1011, 1012, ... 1018, 1019 +** . . . . . +** . . . . . +** . . . . . +** 1090, 1091, 1092, ... 1098, 1099 +** +** In the case of a read from the processors slice of another +** data set of different rank, the values expected will have +** to be adjusted accordingly. This is done via the +** first_expected_val parameter. +** +** Finally, the function presumes that the first element +** of the buffer resides either at the origin of either +** a selected or an unselected checker. (Translation: +** if partial checkers appear in the buffer, they will +** intersect the edges of the n-cube opposite the origin.) +** +****************************************************************/ + +#define LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG 0 + +static hbool_t +lower_dim_size_comp_test__verify_data(uint32_t *buf_ptr, +#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG + const int mpi_rank, +#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */ + const int rank, const int edge_size, const int checker_edge_size, + uint32_t first_expected_val, hbool_t buf_starts_in_checker) +{ +#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG + const char *fcnName = "lower_dim_size_comp_test__verify_data():"; +#endif + hbool_t good_data = TRUE; + hbool_t in_checker; + hbool_t start_in_checker[5]; + uint32_t expected_value; + uint32_t *val_ptr; + int i, j, k, l, m; /* to track position in n-cube */ + int v, w, x, y, z; /* to track position in checker */ + const int test_max_rank = 5; /* code changes needed if this is increased */ + + HDassert(buf_ptr != NULL); + HDassert(0 < rank); + HDassert(rank <= test_max_rank); + HDassert(edge_size >= 6); + HDassert(0 < checker_edge_size); + HDassert(checker_edge_size <= edge_size); + HDassert(test_max_rank <= LDSCT_DS_RANK); + +#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s mpi_rank = %d.\n", fcnName, mpi_rank); + HDfprintf(stdout, "%s rank = %d.\n", fcnName, rank); + HDfprintf(stdout, "%s edge_size = %d.\n", fcnName, edge_size); + HDfprintf(stdout, "%s checker_edge_size = %d.\n", fcnName, checker_edge_size); + HDfprintf(stdout, "%s first_expected_val = %d.\n", fcnName, (int)first_expected_val); + HDfprintf(stdout, "%s starts_in_checker = %d.\n", fcnName, (int)buf_starts_in_checker); + } +#endif + + val_ptr = buf_ptr; + expected_value = first_expected_val; + + i = 0; + v = 0; + start_in_checker[0] = buf_starts_in_checker; + do { + if (v >= checker_edge_size) { + + start_in_checker[0] = !start_in_checker[0]; + v = 0; + } + + j = 0; + w = 0; + start_in_checker[1] = start_in_checker[0]; + do { + if (w >= checker_edge_size) { + + start_in_checker[1] = !start_in_checker[1]; + w = 0; + } + + k = 0; + x = 0; + start_in_checker[2] = start_in_checker[1]; + do { + if (x >= checker_edge_size) { + + start_in_checker[2] = !start_in_checker[2]; + x = 0; + } + + l = 0; + y = 0; + start_in_checker[3] = start_in_checker[2]; + do { + if (y >= checker_edge_size) { + + start_in_checker[3] = !start_in_checker[3]; + y = 0; + } + + m = 0; + z = 0; +#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%d, %d, %d, %d, %d:", i, j, k, l, m); + } +#endif + in_checker = start_in_checker[3]; + do { +#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, " %d", (int)(*val_ptr)); + } +#endif + if (z >= checker_edge_size) { + + in_checker = !in_checker; + z = 0; + } + + if (in_checker) { + + if (*val_ptr != expected_value) { + + good_data = FALSE; + } + + /* zero out buffer for re-use */ + *val_ptr = 0; + } + else if (*val_ptr != 0) { + + good_data = FALSE; + + /* zero out buffer for re-use */ + *val_ptr = 0; + } + + val_ptr++; + expected_value++; + m++; + z++; + + } while ((rank >= (test_max_rank - 4)) && (m < edge_size)); +#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "\n"); + } +#endif + l++; + y++; + } while ((rank >= (test_max_rank - 3)) && (l < edge_size)); + k++; + x++; + } while ((rank >= (test_max_rank - 2)) && (k < edge_size)); + j++; + w++; + } while ((rank >= (test_max_rank - 1)) && (j < edge_size)); + i++; + v++; + } while ((rank >= test_max_rank) && (i < edge_size)); + + return (good_data); + +} /* lower_dim_size_comp_test__verify_data() */ + +/*------------------------------------------------------------------------- + * Function: lower_dim_size_comp_test__run_test() + * + * Purpose: Verify that a bug in the computation of the size of the + * lower dimensions of a dataspace in H5S_obtain_datatype() + * has been corrected. + * + * Return: void + * + * Programmer: JRM -- 11/11/09 + * + *------------------------------------------------------------------------- + */ + +#define LDSCT_DS_RANK 5 + +static void +lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_collective_io, + const hid_t dset_type) +{ +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + const char *fcnName = "lower_dim_size_comp_test__run_test()"; + int rank; + hsize_t dims[32]; + hsize_t max_dims[32]; +#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ + const char *filename; + hbool_t data_ok = FALSE; + hbool_t mis_match = FALSE; + int i; + int start_index; + int stop_index; + int mrc; + int mpi_rank; + int mpi_size; + MPI_Comm mpi_comm = MPI_COMM_NULL; + MPI_Info mpi_info = MPI_INFO_NULL; + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t xfer_plist = H5P_DEFAULT; + size_t small_ds_size; + size_t small_ds_slice_size; + size_t large_ds_size; +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + size_t large_ds_slice_size; +#endif + uint32_t expected_value; + uint32_t *small_ds_buf_0 = NULL; + uint32_t *small_ds_buf_1 = NULL; + uint32_t *large_ds_buf_0 = NULL; + uint32_t *large_ds_buf_1 = NULL; + uint32_t *ptr_0; + uint32_t *ptr_1; + hsize_t small_chunk_dims[LDSCT_DS_RANK]; + hsize_t large_chunk_dims[LDSCT_DS_RANK]; + hsize_t small_dims[LDSCT_DS_RANK]; + hsize_t large_dims[LDSCT_DS_RANK]; + hsize_t start[LDSCT_DS_RANK]; + hsize_t stride[LDSCT_DS_RANK]; + hsize_t count[LDSCT_DS_RANK]; + hsize_t block[LDSCT_DS_RANK]; + hsize_t small_sel_start[LDSCT_DS_RANK]; + hsize_t large_sel_start[LDSCT_DS_RANK]; + hid_t full_mem_small_ds_sid; + hid_t full_file_small_ds_sid; + hid_t mem_small_ds_sid; + hid_t file_small_ds_sid; + hid_t full_mem_large_ds_sid; + hid_t full_file_large_ds_sid; + hid_t mem_large_ds_sid; + hid_t file_large_ds_sid; + hid_t small_ds_dcpl_id = H5P_DEFAULT; + hid_t large_ds_dcpl_id = H5P_DEFAULT; + hid_t small_dataset; /* Dataset ID */ + hid_t large_dataset; /* Dataset ID */ + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ + + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + HDassert(mpi_size >= 1); + + mpi_comm = MPI_COMM_WORLD; + mpi_info = MPI_INFO_NULL; + +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: chunk_edge_size = %d.\n", fcnName, mpi_rank, (int)chunk_edge_size); + HDfprintf(stdout, "%s:%d: use_collective_io = %d.\n", fcnName, mpi_rank, (int)use_collective_io); + } +#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ + + small_ds_size = (size_t)((mpi_size + 1) * 1 * 1 * 10 * 10); + small_ds_slice_size = (size_t)(1 * 1 * 10 * 10); + large_ds_size = (size_t)((mpi_size + 1) * 10 * 10 * 10 * 10); + +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + large_ds_slice_size = (size_t)(10 * 10 * 10 * 10); + + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: small ds size / slice size = %d / %d.\n", fcnName, mpi_rank, + (int)small_ds_size, (int)small_ds_slice_size); + HDfprintf(stdout, "%s:%d: large ds size / slice size = %d / %d.\n", fcnName, mpi_rank, + (int)large_ds_size, (int)large_ds_slice_size); + } +#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ + + /* Allocate buffers */ + small_ds_buf_0 = (uint32_t *)HDmalloc(sizeof(uint32_t) * small_ds_size); + VRFY((small_ds_buf_0 != NULL), "malloc of small_ds_buf_0 succeeded"); + + small_ds_buf_1 = (uint32_t *)HDmalloc(sizeof(uint32_t) * small_ds_size); + VRFY((small_ds_buf_1 != NULL), "malloc of small_ds_buf_1 succeeded"); + + large_ds_buf_0 = (uint32_t *)HDmalloc(sizeof(uint32_t) * large_ds_size); + VRFY((large_ds_buf_0 != NULL), "malloc of large_ds_buf_0 succeeded"); + + large_ds_buf_1 = (uint32_t *)HDmalloc(sizeof(uint32_t) * large_ds_size); + VRFY((large_ds_buf_1 != NULL), "malloc of large_ds_buf_1 succeeded"); + + /* initialize the buffers */ + + ptr_0 = small_ds_buf_0; + ptr_1 = small_ds_buf_1; + + for (i = 0; i < (int)small_ds_size; i++) { + + *ptr_0 = (uint32_t)i; + *ptr_1 = 0; + + ptr_0++; + ptr_1++; + } + + ptr_0 = large_ds_buf_0; + ptr_1 = large_ds_buf_1; + + for (i = 0; i < (int)large_ds_size; i++) { + + *ptr_0 = (uint32_t)i; + *ptr_1 = 0; + + ptr_0++; + ptr_1++; + } + + /* get the file name */ + + filename = (const char *)PARATESTFILE /* GetTestParameters() */; + HDassert(filename != NULL); + + /* ---------------------------------------- + * CREATE AN HDF5 FILE WITH PARALLEL ACCESS + * ---------------------------------------*/ + /* setup file access template */ + acc_tpl = create_faccess_plist(mpi_comm, mpi_info, facc_type); + VRFY((acc_tpl >= 0), "create_faccess_plist() succeeded"); + + /* create the file collectively */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + MESG("File opened."); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), "H5Pclose(acc_tpl) succeeded"); + + /* setup dims: */ + small_dims[0] = (hsize_t)(mpi_size + 1); + small_dims[1] = 1; + small_dims[2] = 1; + small_dims[3] = 10; + small_dims[4] = 10; + + large_dims[0] = (hsize_t)(mpi_size + 1); + large_dims[1] = 10; + large_dims[2] = 10; + large_dims[3] = 10; + large_dims[4] = 10; + +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: small_dims[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)small_dims[0], + (int)small_dims[1], (int)small_dims[2], (int)small_dims[3], (int)small_dims[4]); + HDfprintf(stdout, "%s:%d: large_dims[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)large_dims[0], + (int)large_dims[1], (int)large_dims[2], (int)large_dims[3], (int)large_dims[4]); + } +#endif + + /* create dataspaces */ + + full_mem_small_ds_sid = H5Screate_simple(5, small_dims, NULL); + VRFY((full_mem_small_ds_sid != 0), "H5Screate_simple() full_mem_small_ds_sid succeeded"); + + full_file_small_ds_sid = H5Screate_simple(5, small_dims, NULL); + VRFY((full_file_small_ds_sid != 0), "H5Screate_simple() full_file_small_ds_sid succeeded"); + + mem_small_ds_sid = H5Screate_simple(5, small_dims, NULL); + VRFY((mem_small_ds_sid != 0), "H5Screate_simple() mem_small_ds_sid succeeded"); + + file_small_ds_sid = H5Screate_simple(5, small_dims, NULL); + VRFY((file_small_ds_sid != 0), "H5Screate_simple() file_small_ds_sid succeeded"); + + full_mem_large_ds_sid = H5Screate_simple(5, large_dims, NULL); + VRFY((full_mem_large_ds_sid != 0), "H5Screate_simple() full_mem_large_ds_sid succeeded"); + + full_file_large_ds_sid = H5Screate_simple(5, large_dims, NULL); + VRFY((full_file_large_ds_sid != 0), "H5Screate_simple() full_file_large_ds_sid succeeded"); + + mem_large_ds_sid = H5Screate_simple(5, large_dims, NULL); + VRFY((mem_large_ds_sid != 0), "H5Screate_simple() mem_large_ds_sid succeeded"); + + file_large_ds_sid = H5Screate_simple(5, large_dims, NULL); + VRFY((file_large_ds_sid != 0), "H5Screate_simple() file_large_ds_sid succeeded"); + + /* Select the entire extent of the full small ds dataspaces */ + ret = H5Sselect_all(full_mem_small_ds_sid); + VRFY((ret != FAIL), "H5Sselect_all(full_mem_small_ds_sid) succeeded"); + + ret = H5Sselect_all(full_file_small_ds_sid); + VRFY((ret != FAIL), "H5Sselect_all(full_file_small_ds_sid) succeeded"); + + /* Select the entire extent of the full large ds dataspaces */ + ret = H5Sselect_all(full_mem_large_ds_sid); + VRFY((ret != FAIL), "H5Sselect_all(full_mem_large_ds_sid) succeeded"); + + ret = H5Sselect_all(full_file_large_ds_sid); + VRFY((ret != FAIL), "H5Sselect_all(full_file_large_ds_sid) succeeded"); + + /* if chunk edge size is greater than zero, set up the small and + * large data set creation property lists to specify chunked + * datasets. + */ + if (chunk_edge_size > 0) { + + small_chunk_dims[0] = (hsize_t)(1); + small_chunk_dims[1] = small_chunk_dims[2] = (hsize_t)1; + small_chunk_dims[3] = small_chunk_dims[4] = (hsize_t)chunk_edge_size; + +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: small chunk dims[] = %d %d %d %d %d\n", fcnName, mpi_rank, + (int)small_chunk_dims[0], (int)small_chunk_dims[1], (int)small_chunk_dims[2], + (int)small_chunk_dims[3], (int)small_chunk_dims[4]); + } +#endif + + small_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((ret != FAIL), "H5Pcreate() small_ds_dcpl_id succeeded"); + + ret = H5Pset_layout(small_ds_dcpl_id, H5D_CHUNKED); + VRFY((ret != FAIL), "H5Pset_layout() small_ds_dcpl_id succeeded"); + + ret = H5Pset_chunk(small_ds_dcpl_id, 5, small_chunk_dims); + VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded"); + + large_chunk_dims[0] = (hsize_t)(1); + large_chunk_dims[1] = large_chunk_dims[2] = large_chunk_dims[3] = large_chunk_dims[4] = + (hsize_t)chunk_edge_size; + +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: large chunk dims[] = %d %d %d %d %d\n", fcnName, mpi_rank, + (int)large_chunk_dims[0], (int)large_chunk_dims[1], (int)large_chunk_dims[2], + (int)large_chunk_dims[3], (int)large_chunk_dims[4]); + } +#endif + + large_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((ret != FAIL), "H5Pcreate() large_ds_dcpl_id succeeded"); + + ret = H5Pset_layout(large_ds_dcpl_id, H5D_CHUNKED); + VRFY((ret != FAIL), "H5Pset_layout() large_ds_dcpl_id succeeded"); + + ret = H5Pset_chunk(large_ds_dcpl_id, 5, large_chunk_dims); + VRFY((ret != FAIL), "H5Pset_chunk() large_ds_dcpl_id succeeded"); + } + + /* create the small dataset */ + small_dataset = H5Dcreate2(fid, "small_dataset", dset_type, file_small_ds_sid, H5P_DEFAULT, + small_ds_dcpl_id, H5P_DEFAULT); + VRFY((ret >= 0), "H5Dcreate2() small_dataset succeeded"); + + /* create the large dataset */ + large_dataset = H5Dcreate2(fid, "large_dataset", dset_type, file_large_ds_sid, H5P_DEFAULT, + large_ds_dcpl_id, H5P_DEFAULT); + VRFY((ret >= 0), "H5Dcreate2() large_dataset succeeded"); + +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: small/large ds id = %d / %d.\n", fcnName, mpi_rank, (int)small_dataset, + (int)large_dataset); + } +#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ + + /* setup xfer property list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); + + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + + if (!use_collective_io) { + + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "H5Pset_dxpl_mpio_collective_opt() succeeded"); + } + + /* setup selection to write initial data to the small data sets */ + start[0] = (hsize_t)(mpi_rank + 1); + start[1] = start[2] = start[3] = start[4] = 0; + + stride[0] = (hsize_t)(2 * (mpi_size + 1)); + stride[1] = stride[2] = 2; + stride[3] = stride[4] = 2 * 10; + + count[0] = count[1] = count[2] = count[3] = count[4] = 1; + + block[0] = block[1] = block[2] = 1; + block[3] = block[4] = 10; + +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: settings for small data set initialization.\n", fcnName, mpi_rank); + HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0], + (int)start[1], (int)start[2], (int)start[3], (int)start[4]); + HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0], + (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]); + HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0], + (int)count[1], (int)count[2], (int)count[3], (int)count[4]); + HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0], + (int)block[1], (int)block[2], (int)block[3], (int)block[4]); + } +#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ + + /* setup selections for writing initial data to the small data set */ + ret = H5Sselect_hyperslab(mem_small_ds_sid, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded"); + + ret = H5Sselect_hyperslab(file_small_ds_sid, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid, set) succeeded"); + + if (MAINPROCESS) { /* add an additional slice to the selections */ + + start[0] = 0; + +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: added settings for main process.\n", fcnName, mpi_rank); + HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0], + (int)start[1], (int)start[2], (int)start[3], (int)start[4]); + HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0], + (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]); + HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0], + (int)count[1], (int)count[2], (int)count[3], (int)count[4]); + HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0], + (int)block[1], (int)block[2], (int)block[3], (int)block[4]); + } +#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ + + ret = H5Sselect_hyperslab(mem_small_ds_sid, H5S_SELECT_OR, start, stride, count, block); + VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, or) succeeded"); + + ret = H5Sselect_hyperslab(file_small_ds_sid, H5S_SELECT_OR, start, stride, count, block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid, or) succeeded"); + } + + check = H5Sselect_valid(mem_small_ds_sid); + VRFY((check == TRUE), "H5Sselect_valid(mem_small_ds_sid) returns TRUE"); + + check = H5Sselect_valid(file_small_ds_sid); + VRFY((check == TRUE), "H5Sselect_valid(file_small_ds_sid) returns TRUE"); + + /* write the initial value of the small data set to file */ +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: writing init value of small ds to file.\n", fcnName, mpi_rank); + } +#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ + ret = H5Dwrite(small_dataset, dset_type, mem_small_ds_sid, file_small_ds_sid, xfer_plist, small_ds_buf_0); + VRFY((ret >= 0), "H5Dwrite() small_dataset initial write succeeded"); + + /* sync with the other processes before reading data */ + mrc = MPI_Barrier(MPI_COMM_WORLD); + VRFY((mrc == MPI_SUCCESS), "Sync after small dataset writes"); + + /* read the small data set back to verify that it contains the + * expected data. Note that each process reads in the entire + * data set and verifies it. + */ + ret = H5Dread(small_dataset, H5T_NATIVE_UINT32, full_mem_small_ds_sid, full_file_small_ds_sid, xfer_plist, + small_ds_buf_1); + VRFY((ret >= 0), "H5Dread() small_dataset initial read succeeded"); + + /* sync with the other processes before checking data */ + mrc = MPI_Barrier(MPI_COMM_WORLD); + VRFY((mrc == MPI_SUCCESS), "Sync after small dataset writes"); + + /* verify that the correct data was written to the small data set, + * and reset the buffer to zero in passing. + */ + expected_value = 0; + mis_match = FALSE; + ptr_1 = small_ds_buf_1; + + i = 0; + for (i = 0; i < (int)small_ds_size; i++) { + + if (*ptr_1 != expected_value) { + + mis_match = TRUE; + } + + *ptr_1 = (uint32_t)0; + + ptr_1++; + expected_value++; + } + VRFY((mis_match == FALSE), "small ds init data good."); + + /* setup selections for writing initial data to the large data set */ + start[0] = (hsize_t)(mpi_rank + 1); + start[1] = start[2] = start[3] = start[4] = (hsize_t)0; + + stride[0] = (hsize_t)(2 * (mpi_size + 1)); + stride[1] = stride[2] = stride[3] = stride[4] = (hsize_t)(2 * 10); + + count[0] = count[1] = count[2] = count[3] = count[4] = (hsize_t)1; + + block[0] = (hsize_t)1; + block[1] = block[2] = block[3] = block[4] = (hsize_t)10; + +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: settings for large data set initialization.\n", fcnName, mpi_rank); + HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0], + (int)start[1], (int)start[2], (int)start[3], (int)start[4]); + HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0], + (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]); + HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0], + (int)count[1], (int)count[2], (int)count[3], (int)count[4]); + HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0], + (int)block[1], (int)block[2], (int)block[3], (int)block[4]); + } +#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ + + ret = H5Sselect_hyperslab(mem_large_ds_sid, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, set) succeeded"); + + ret = H5Sselect_hyperslab(file_large_ds_sid, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid, set) succeeded"); + +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s%d: H5Sget_select_npoints(mem_large_ds_sid) = %d.\n", fcnName, mpi_rank, + (int)H5Sget_select_npoints(mem_large_ds_sid)); + HDfprintf(stdout, "%s%d: H5Sget_select_npoints(file_large_ds_sid) = %d.\n", fcnName, mpi_rank, + (int)H5Sget_select_npoints(file_large_ds_sid)); + } +#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ + + if (MAINPROCESS) { /* add an additional slice to the selections */ + + start[0] = (hsize_t)0; + +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: added settings for main process.\n", fcnName, mpi_rank); + HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0], + (int)start[1], (int)start[2], (int)start[3], (int)start[4]); + HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0], + (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]); + HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0], + (int)count[1], (int)count[2], (int)count[3], (int)count[4]); + HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0], + (int)block[1], (int)block[2], (int)block[3], (int)block[4]); + } +#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ + + ret = H5Sselect_hyperslab(mem_large_ds_sid, H5S_SELECT_OR, start, stride, count, block); + VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, or) succeeded"); + + ret = H5Sselect_hyperslab(file_large_ds_sid, H5S_SELECT_OR, start, stride, count, block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid, or) succeeded"); + +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s%d: H5Sget_select_npoints(mem_large_ds_sid) = %d.\n", fcnName, mpi_rank, + (int)H5Sget_select_npoints(mem_large_ds_sid)); + HDfprintf(stdout, "%s%d: H5Sget_select_npoints(file_large_ds_sid) = %d.\n", fcnName, mpi_rank, + (int)H5Sget_select_npoints(file_large_ds_sid)); + } +#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ + } + + /* try clipping the selection back to the large dataspace proper */ + start[0] = start[1] = start[2] = start[3] = start[4] = (hsize_t)0; + + stride[0] = (hsize_t)(2 * (mpi_size + 1)); + stride[1] = stride[2] = stride[3] = stride[4] = (hsize_t)(2 * 10); + + count[0] = count[1] = count[2] = count[3] = count[4] = (hsize_t)1; + + block[0] = (hsize_t)(mpi_size + 1); + block[1] = block[2] = block[3] = block[4] = (hsize_t)10; + + ret = H5Sselect_hyperslab(mem_large_ds_sid, H5S_SELECT_AND, start, stride, count, block); + VRFY((ret != FAIL), "H5Sselect_hyperslab(mem_large_ds_sid, and) succeeded"); + + ret = H5Sselect_hyperslab(file_large_ds_sid, H5S_SELECT_AND, start, stride, count, block); + VRFY((ret != FAIL), "H5Sselect_hyperslab(file_large_ds_sid, and) succeeded"); + +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + + rank = H5Sget_simple_extent_dims(mem_large_ds_sid, dims, max_dims); + HDfprintf(stdout, "%s:%d: mem_large_ds_sid dims[%d] = %d %d %d %d %d\n", fcnName, mpi_rank, rank, + (int)dims[0], (int)dims[1], (int)dims[2], (int)dims[3], (int)dims[4]); + + rank = H5Sget_simple_extent_dims(file_large_ds_sid, dims, max_dims); + HDfprintf(stdout, "%s:%d: file_large_ds_sid dims[%d] = %d %d %d %d %d\n", fcnName, mpi_rank, rank, + (int)dims[0], (int)dims[1], (int)dims[2], (int)dims[3], (int)dims[4]); + } +#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ + + check = H5Sselect_valid(mem_large_ds_sid); + VRFY((check == TRUE), "H5Sselect_valid(mem_large_ds_sid) returns TRUE"); + + check = H5Sselect_valid(file_large_ds_sid); + VRFY((check == TRUE), "H5Sselect_valid(file_large_ds_sid) returns TRUE"); + + /* write the initial value of the large data set to file */ +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: writing init value of large ds to file.\n", fcnName, mpi_rank); + HDfprintf(stdout, "%s:%d: large_dataset = %d.\n", fcnName, mpi_rank, (int)large_dataset); + HDfprintf(stdout, "%s:%d: mem_large_ds_sid = %d, file_large_ds_sid = %d.\n", fcnName, mpi_rank, + (int)mem_large_ds_sid, (int)file_large_ds_sid); + } +#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ + + ret = H5Dwrite(large_dataset, dset_type, mem_large_ds_sid, file_large_ds_sid, xfer_plist, large_ds_buf_0); + + if (ret < 0) + H5Eprint2(H5E_DEFAULT, stderr); + VRFY((ret >= 0), "H5Dwrite() large_dataset initial write succeeded"); + + /* sync with the other processes before checking data */ + mrc = MPI_Barrier(MPI_COMM_WORLD); + VRFY((mrc == MPI_SUCCESS), "Sync after large dataset writes"); + + /* read the large data set back to verify that it contains the + * expected data. Note that each process reads in the entire + * data set. + */ + ret = H5Dread(large_dataset, H5T_NATIVE_UINT32, full_mem_large_ds_sid, full_file_large_ds_sid, xfer_plist, + large_ds_buf_1); + VRFY((ret >= 0), "H5Dread() large_dataset initial read succeeded"); + + /* verify that the correct data was written to the large data set. + * in passing, reset the buffer to zeros + */ + expected_value = 0; + mis_match = FALSE; + ptr_1 = large_ds_buf_1; + + i = 0; + for (i = 0; i < (int)large_ds_size; i++) { + + if (*ptr_1 != expected_value) { + + mis_match = TRUE; + } + + *ptr_1 = (uint32_t)0; + + ptr_1++; + expected_value++; + } + VRFY((mis_match == FALSE), "large ds init data good."); + + /***********************************/ + /***** INITIALIZATION COMPLETE *****/ + /***********************************/ + + /* read a checkerboard selection of the process slice of the + * small on disk data set into the process slice of the large + * in memory data set, and verify the data read. + */ + + small_sel_start[0] = (hsize_t)(mpi_rank + 1); + small_sel_start[1] = small_sel_start[2] = small_sel_start[3] = small_sel_start[4] = 0; + + lower_dim_size_comp_test__select_checker_board(mpi_rank, file_small_ds_sid, + /* tgt_rank = */ 5, small_dims, + /* checker_edge_size = */ 3, + /* sel_rank */ 2, small_sel_start); + + expected_value = + (uint32_t)((small_sel_start[0] * small_dims[1] * small_dims[2] * small_dims[3] * small_dims[4]) + + (small_sel_start[1] * small_dims[2] * small_dims[3] * small_dims[4]) + + (small_sel_start[2] * small_dims[3] * small_dims[4]) + + (small_sel_start[3] * small_dims[4]) + (small_sel_start[4])); + + large_sel_start[0] = (hsize_t)(mpi_rank + 1); + large_sel_start[1] = 5; + large_sel_start[2] = large_sel_start[3] = large_sel_start[4] = 0; + + lower_dim_size_comp_test__select_checker_board(mpi_rank, mem_large_ds_sid, + /* tgt_rank = */ 5, large_dims, + /* checker_edge_size = */ 3, + /* sel_rank = */ 2, large_sel_start); + + /* verify that H5Sselect_shape_same() reports the two + * selections as having the same shape. + */ + check = H5Sselect_shape_same(mem_large_ds_sid, file_small_ds_sid); + VRFY((check == TRUE), "H5Sselect_shape_same passed (1)"); + + ret = H5Dread(small_dataset, H5T_NATIVE_UINT32, mem_large_ds_sid, file_small_ds_sid, xfer_plist, + large_ds_buf_1); + + VRFY((ret >= 0), "H5Sread() slice from small ds succeeded."); + +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, mpi_rank); + } +#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ + + /* verify that expected data is retrieved */ + + data_ok = TRUE; + + start_index = (int)((large_sel_start[0] * large_dims[1] * large_dims[2] * large_dims[3] * large_dims[4]) + + (large_sel_start[1] * large_dims[2] * large_dims[3] * large_dims[4]) + + (large_sel_start[2] * large_dims[3] * large_dims[4]) + + (large_sel_start[3] * large_dims[4]) + (large_sel_start[4])); + + stop_index = start_index + (int)small_ds_slice_size; + + HDassert(0 <= start_index); + HDassert(start_index < stop_index); + HDassert(stop_index <= (int)large_ds_size); + + ptr_1 = large_ds_buf_1; + + for (i = 0; i < start_index; i++) { + + if (*ptr_1 != (uint32_t)0) { + + data_ok = FALSE; + *ptr_1 = (uint32_t)0; + } + + ptr_1++; + } + + VRFY((data_ok == TRUE), "slice read from small ds data good(1)."); + + data_ok = lower_dim_size_comp_test__verify_data(ptr_1, +#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG + mpi_rank, +#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */ + /* rank */ 2, + /* edge_size */ 10, + /* checker_edge_size */ 3, expected_value, + /* buf_starts_in_checker */ TRUE); + + VRFY((data_ok == TRUE), "slice read from small ds data good(2)."); + + data_ok = TRUE; + + ptr_1 += small_ds_slice_size; + + for (i = stop_index; i < (int)large_ds_size; i++) { + + if (*ptr_1 != (uint32_t)0) { + + data_ok = FALSE; + *ptr_1 = (uint32_t)0; + } + + ptr_1++; + } + + VRFY((data_ok == TRUE), "slice read from small ds data good(3)."); + + /* read a checkerboard selection of a slice of the process slice of + * the large on disk data set into the process slice of the small + * in memory data set, and verify the data read. + */ + + small_sel_start[0] = (hsize_t)(mpi_rank + 1); + small_sel_start[1] = small_sel_start[2] = small_sel_start[3] = small_sel_start[4] = 0; + + lower_dim_size_comp_test__select_checker_board(mpi_rank, mem_small_ds_sid, + /* tgt_rank = */ 5, small_dims, + /* checker_edge_size = */ 3, + /* sel_rank */ 2, small_sel_start); + + large_sel_start[0] = (hsize_t)(mpi_rank + 1); + large_sel_start[1] = 5; + large_sel_start[2] = large_sel_start[3] = large_sel_start[4] = 0; + + lower_dim_size_comp_test__select_checker_board(mpi_rank, file_large_ds_sid, + /* tgt_rank = */ 5, large_dims, + /* checker_edge_size = */ 3, + /* sel_rank = */ 2, large_sel_start); + + /* verify that H5Sselect_shape_same() reports the two + * selections as having the same shape. + */ + check = H5Sselect_shape_same(mem_small_ds_sid, file_large_ds_sid); + VRFY((check == TRUE), "H5Sselect_shape_same passed (2)"); + + ret = H5Dread(large_dataset, H5T_NATIVE_UINT32, mem_small_ds_sid, file_large_ds_sid, xfer_plist, + small_ds_buf_1); + + VRFY((ret >= 0), "H5Sread() slice from large ds succeeded."); + +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, mpi_rank); + } +#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ + + /* verify that expected data is retrieved */ + + data_ok = TRUE; + + expected_value = + (uint32_t)((large_sel_start[0] * large_dims[1] * large_dims[2] * large_dims[3] * large_dims[4]) + + (large_sel_start[1] * large_dims[2] * large_dims[3] * large_dims[4]) + + (large_sel_start[2] * large_dims[3] * large_dims[4]) + + (large_sel_start[3] * large_dims[4]) + (large_sel_start[4])); + + start_index = (int)(mpi_rank + 1) * (int)small_ds_slice_size; + + stop_index = start_index + (int)small_ds_slice_size; + + HDassert(0 <= start_index); + HDassert(start_index < stop_index); + HDassert(stop_index <= (int)small_ds_size); + + ptr_1 = small_ds_buf_1; + + for (i = 0; i < start_index; i++) { + + if (*ptr_1 != (uint32_t)0) { + + data_ok = FALSE; + *ptr_1 = (uint32_t)0; + } + + ptr_1++; + } + + VRFY((data_ok == TRUE), "slice read from large ds data good(1)."); + + data_ok = lower_dim_size_comp_test__verify_data(ptr_1, +#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG + mpi_rank, +#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */ + /* rank */ 2, + /* edge_size */ 10, + /* checker_edge_size */ 3, expected_value, + /* buf_starts_in_checker */ TRUE); + + VRFY((data_ok == TRUE), "slice read from large ds data good(2)."); + + data_ok = TRUE; + + ptr_1 += small_ds_slice_size; + + for (i = stop_index; i < (int)small_ds_size; i++) { + + if (*ptr_1 != (uint32_t)0) { + +#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: unexpected value at index %d: %d.\n", fcnName, mpi_rank, (int)i, + (int)(*ptr_1)); + } +#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */ + + data_ok = FALSE; + *ptr_1 = (uint32_t)0; + } + + ptr_1++; + } + + VRFY((data_ok == TRUE), "slice read from large ds data good(3)."); + + /* Close dataspaces */ + ret = H5Sclose(full_mem_small_ds_sid); + VRFY((ret != FAIL), "H5Sclose(full_mem_small_ds_sid) succeeded"); + + ret = H5Sclose(full_file_small_ds_sid); + VRFY((ret != FAIL), "H5Sclose(full_file_small_ds_sid) succeeded"); + + ret = H5Sclose(mem_small_ds_sid); + VRFY((ret != FAIL), "H5Sclose(mem_small_ds_sid) succeeded"); + + ret = H5Sclose(file_small_ds_sid); + VRFY((ret != FAIL), "H5Sclose(file_small_ds_sid) succeeded"); + + ret = H5Sclose(full_mem_large_ds_sid); + VRFY((ret != FAIL), "H5Sclose(full_mem_large_ds_sid) succeeded"); + + ret = H5Sclose(full_file_large_ds_sid); + VRFY((ret != FAIL), "H5Sclose(full_file_large_ds_sid) succeeded"); + + ret = H5Sclose(mem_large_ds_sid); + VRFY((ret != FAIL), "H5Sclose(mem_large_ds_sid) succeeded"); + + ret = H5Sclose(file_large_ds_sid); + VRFY((ret != FAIL), "H5Sclose(file_large_ds_sid) succeeded"); + + /* Close Datasets */ + ret = H5Dclose(small_dataset); + VRFY((ret != FAIL), "H5Dclose(small_dataset) succeeded"); + + ret = H5Dclose(large_dataset); + VRFY((ret != FAIL), "H5Dclose(large_dataset) succeeded"); + + /* close the file collectively */ + MESG("about to close file."); + ret = H5Fclose(fid); + VRFY((ret != FAIL), "file close succeeded"); + + /* Free memory buffers */ + if (small_ds_buf_0 != NULL) + HDfree(small_ds_buf_0); + if (small_ds_buf_1 != NULL) + HDfree(small_ds_buf_1); + + if (large_ds_buf_0 != NULL) + HDfree(large_ds_buf_0); + if (large_ds_buf_1 != NULL) + HDfree(large_ds_buf_1); + + return; + +} /* lower_dim_size_comp_test__run_test() */ + +/*------------------------------------------------------------------------- + * Function: lower_dim_size_comp_test() + * + * Purpose: Test to see if an error in the computation of the size + * of the lower dimensions in H5S_obtain_datatype() has + * been corrected. + * + * Return: void + * + * Programmer: JRM -- 11/11/09 + * + *------------------------------------------------------------------------- + */ + +void +lower_dim_size_comp_test(void) +{ + /* const char *fcnName = "lower_dim_size_comp_test()"; */ + int chunk_edge_size = 0; + int use_collective_io; + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + + HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned)); + for (use_collective_io = 0; use_collective_io <= 1; use_collective_io++) { + chunk_edge_size = 0; + lower_dim_size_comp_test__run_test(chunk_edge_size, (hbool_t)use_collective_io, H5T_NATIVE_UINT); + + chunk_edge_size = 5; + lower_dim_size_comp_test__run_test(chunk_edge_size, (hbool_t)use_collective_io, H5T_NATIVE_UINT); + } /* end for */ + + return; +} /* lower_dim_size_comp_test() */ + +/*------------------------------------------------------------------------- + * Function: link_chunk_collective_io_test() + * + * Purpose: Test to verify that an error in MPI type management in + * H5D_link_chunk_collective_io() has been corrected. + * In this bug, we used to free MPI types regardless of + * whether they were basic or derived. + * + * This test is based on a bug report kindly provided by + * Rob Latham of the MPICH team and ANL. + * + * The basic thrust of the test is to cause a process + * to participate in a collective I/O in which it: + * + * 1) Reads or writes exactly one chunk, + * + * 2) Has no in memory buffer for any other chunk. + * + * The test differers from Rob Latham's bug report in + * that is runs with an arbitrary number of proceeses, + * and uses a 1 dimensional dataset. + * + * Return: void + * + * Programmer: JRM -- 12/16/09 + * + *------------------------------------------------------------------------- + */ + +#define LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE 16 + +void +link_chunk_collective_io_test(void) +{ + /* const char *fcnName = "link_chunk_collective_io_test()"; */ + const char *filename; + hbool_t mis_match = FALSE; + int i; + int mrc; + int mpi_rank; + int mpi_size; + MPI_Comm mpi_comm = MPI_COMM_WORLD; + MPI_Info mpi_info = MPI_INFO_NULL; + hsize_t count[1] = {1}; + hsize_t stride[1] = {2 * LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE}; + hsize_t block[1] = {LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE}; + hsize_t start[1]; + hsize_t dims[1]; + hsize_t chunk_dims[1] = {LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE}; + herr_t ret; /* Generic return value */ + hid_t file_id; + hid_t acc_tpl; + hid_t dset_id; + hid_t file_ds_sid; + hid_t write_mem_ds_sid; + hid_t read_mem_ds_sid; + hid_t ds_dcpl_id; + hid_t xfer_plist; + double diff; + double expected_value; + double local_data_written[LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE]; + double local_data_read[LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE]; + + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + + HDassert(mpi_size > 0); + + /* get the file name */ + filename = (const char *)PARATESTFILE /* GetTestParameters() */; + HDassert(filename != NULL); + + /* setup file access template */ + acc_tpl = create_faccess_plist(mpi_comm, mpi_info, facc_type); + VRFY((acc_tpl >= 0), "create_faccess_plist() succeeded"); + + /* create the file collectively */ + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + + MESG("File opened."); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), "H5Pclose(acc_tpl) succeeded"); + + /* setup dims */ + dims[0] = ((hsize_t)mpi_size) * ((hsize_t)(LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE)); + + /* setup mem and file dataspaces */ + write_mem_ds_sid = H5Screate_simple(1, chunk_dims, NULL); + VRFY((write_mem_ds_sid != 0), "H5Screate_simple() write_mem_ds_sid succeeded"); + + read_mem_ds_sid = H5Screate_simple(1, chunk_dims, NULL); + VRFY((read_mem_ds_sid != 0), "H5Screate_simple() read_mem_ds_sid succeeded"); + + file_ds_sid = H5Screate_simple(1, dims, NULL); + VRFY((file_ds_sid != 0), "H5Screate_simple() file_ds_sid succeeded"); + + /* setup data set creation property list */ + ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((ds_dcpl_id != FAIL), "H5Pcreate() ds_dcpl_id succeeded"); + + ret = H5Pset_layout(ds_dcpl_id, H5D_CHUNKED); + VRFY((ret != FAIL), "H5Pset_layout() ds_dcpl_id succeeded"); + + ret = H5Pset_chunk(ds_dcpl_id, 1, chunk_dims); + VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded"); + + /* create the data set */ + dset_id = + H5Dcreate2(file_id, "dataset", H5T_NATIVE_DOUBLE, file_ds_sid, H5P_DEFAULT, ds_dcpl_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "H5Dcreate2() dataset succeeded"); + + /* close the dataset creation property list */ + ret = H5Pclose(ds_dcpl_id); + VRFY((ret >= 0), "H5Pclose(ds_dcpl_id) succeeded"); + + /* setup local data */ + expected_value = (double)(LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE) * (double)(mpi_rank); + for (i = 0; i < LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE; i++) { + + local_data_written[i] = expected_value; + local_data_read[i] = 0.0; + expected_value += 1.0; + } + + /* select the file and mem spaces */ + start[0] = (hsize_t)(mpi_rank * LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE); + ret = H5Sselect_hyperslab(file_ds_sid, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_ds_sid, set) succeeded"); + + ret = H5Sselect_all(write_mem_ds_sid); + VRFY((ret != FAIL), "H5Sselect_all(mem_ds_sid) succeeded"); + + /* Note that we use NO SELECTION on the read memory dataspace */ + + /* setup xfer property list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); + + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + + /* write the data set */ + ret = H5Dwrite(dset_id, H5T_NATIVE_DOUBLE, write_mem_ds_sid, file_ds_sid, xfer_plist, local_data_written); + + VRFY((ret >= 0), "H5Dwrite() dataset initial write succeeded"); + + /* sync with the other processes before checking data */ + mrc = MPI_Barrier(MPI_COMM_WORLD); + VRFY((mrc == MPI_SUCCESS), "Sync after dataset write"); + + /* read this processes slice of the dataset back in */ + ret = H5Dread(dset_id, H5T_NATIVE_DOUBLE, read_mem_ds_sid, file_ds_sid, xfer_plist, local_data_read); + VRFY((ret >= 0), "H5Dread() dataset read succeeded"); + + /* close the xfer property list */ + ret = H5Pclose(xfer_plist); + VRFY((ret >= 0), "H5Pclose(xfer_plist) succeeded"); + + /* verify the data */ + mis_match = FALSE; + for (i = 0; i < LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE; i++) { + + diff = local_data_written[i] - local_data_read[i]; + diff = fabs(diff); + + if (diff >= 0.001) { + + mis_match = TRUE; + } + } + VRFY((mis_match == FALSE), "dataset data good."); + + /* Close dataspaces */ + ret = H5Sclose(write_mem_ds_sid); + VRFY((ret != FAIL), "H5Sclose(write_mem_ds_sid) succeeded"); + + ret = H5Sclose(read_mem_ds_sid); + VRFY((ret != FAIL), "H5Sclose(read_mem_ds_sid) succeeded"); + + ret = H5Sclose(file_ds_sid); + VRFY((ret != FAIL), "H5Sclose(file_ds_sid) succeeded"); + + /* Close Dataset */ + ret = H5Dclose(dset_id); + VRFY((ret != FAIL), "H5Dclose(dset_id) succeeded"); + + /* close the file collectively */ + ret = H5Fclose(file_id); + VRFY((ret != FAIL), "file close succeeded"); + + return; + +} /* link_chunk_collective_io_test() */ diff --git a/testpar/API/testphdf5.c b/testpar/API/testphdf5.c new file mode 100644 index 00000000000..ec5dae222a6 --- /dev/null +++ b/testpar/API/testphdf5.c @@ -0,0 +1,1007 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Main driver of the Parallel HDF5 tests + */ + +#include "hdf5.h" +#include "testphdf5.h" + +#ifndef PATH_MAX +#define PATH_MAX 512 +#endif /* !PATH_MAX */ + +/* global variables */ +int dim0; +int dim1; +int chunkdim0; +int chunkdim1; +int nerrors = 0; /* errors count */ +int ndatasets = 300; /* number of datasets to create*/ +int ngroups = 512; /* number of groups to create in root + * group. */ +int facc_type = FACC_MPIO; /*Test file access type */ +int dxfer_coll_type = DXFER_COLLECTIVE_IO; + +H5E_auto2_t old_func; /* previous error handler */ +void *old_client_data; /* previous error handler arg.*/ + +/* other option flags */ + +/* FILENAME and filenames must have the same number of names. + * Use PARATESTFILE in general and use a separated filename only if the file + * created in one test is accessed by a different test. + * filenames[0] is reserved as the file name for PARATESTFILE. + */ +#define NFILENAME 2 +/* #define PARATESTFILE filenames[0] */ +const char *FILENAME[NFILENAME] = {"ParaTest.h5", NULL}; +char filenames[NFILENAME][PATH_MAX]; +hid_t fapl; /* file access property list */ + +#ifdef USE_PAUSE +/* pause the process for a moment to allow debugger to attach if desired. */ +/* Will pause more if greenlight file is not persent but will eventually */ +/* continue. */ +#include +#include + +void +pause_proc(void) +{ + + int pid; + h5_stat_t statbuf; + char greenlight[] = "go"; + int maxloop = 10; + int loops = 0; + int time_int = 10; + + /* mpi variables */ + int mpi_size, mpi_rank; + int mpi_namelen; + char mpi_name[MPI_MAX_PROCESSOR_NAME]; + + pid = getpid(); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Get_processor_name(mpi_name, &mpi_namelen); + + if (MAINPROCESS) + while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop) { + if (!loops++) { + HDprintf("Proc %d (%*s, %d): to debug, attach %d\n", mpi_rank, mpi_namelen, mpi_name, pid, + pid); + } + HDprintf("waiting(%ds) for file %s ...\n", time_int, greenlight); + HDfflush(stdout); + HDsleep(time_int); + } + MPI_Barrier(MPI_COMM_WORLD); +} + +/* Use the Profile feature of MPI to call the pause_proc() */ +int +MPI_Init(int *argc, char ***argv) +{ + int ret_code; + ret_code = PMPI_Init(argc, argv); + pause_proc(); + return (ret_code); +} +#endif /* USE_PAUSE */ + +/* + * Show command usage + */ +static void +usage(void) +{ + HDprintf(" [-r] [-w] [-m] [-n] " + "[-o] [-f ] [-d ]\n"); + HDprintf("\t-m" + "\tset number of datasets for the multiple dataset test\n"); + HDprintf("\t-n" + "\tset number of groups for the multiple group test\n"); +#if 0 + HDprintf("\t-f \tfilename prefix\n"); +#endif + HDprintf("\t-2\t\tuse Split-file together with MPIO\n"); + HDprintf("\t-d \tdataset dimensions factors. Defaults (%d,%d)\n", ROW_FACTOR, + COL_FACTOR); + HDprintf("\t-c \tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n"); + HDprintf("\n"); +} + +/* + * parse the command line options + */ +static int +parse_options(int argc, char **argv) +{ + int mpi_size, mpi_rank; /* mpi variables */ + + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* setup default chunk-size. Make sure sizes are > 0 */ + + chunkdim0 = (dim0 + 9) / 10; + chunkdim1 = (dim1 + 9) / 10; + + while (--argc) { + if (**(++argv) != '-') { + break; + } + else { + switch (*(*argv + 1)) { + case 'm': + ndatasets = atoi((*argv + 1) + 1); + if (ndatasets < 0) { + nerrors++; + return (1); + } + break; + case 'n': + ngroups = atoi((*argv + 1) + 1); + if (ngroups < 0) { + nerrors++; + return (1); + } + break; +#if 0 + case 'f': if (--argc < 1) { + nerrors++; + return(1); + } + if (**(++argv) == '-') { + nerrors++; + return(1); + } + paraprefix = *argv; + break; +#endif + case 'i': /* Collective MPI-IO access with independent IO */ + dxfer_coll_type = DXFER_INDEPENDENT_IO; + break; + case '2': /* Use the split-file driver with MPIO access */ + /* Can use $HDF5_METAPREFIX to define the */ + /* meta-file-prefix. */ + facc_type = FACC_MPIO | FACC_SPLIT; + break; + case 'd': /* dimensizes */ + if (--argc < 2) { + nerrors++; + return (1); + } + dim0 = atoi(*(++argv)) * mpi_size; + argc--; + dim1 = atoi(*(++argv)) * mpi_size; + /* set default chunkdim sizes too */ + chunkdim0 = (dim0 + 9) / 10; + chunkdim1 = (dim1 + 9) / 10; + break; + case 'c': /* chunk dimensions */ + if (--argc < 2) { + nerrors++; + return (1); + } + chunkdim0 = atoi(*(++argv)); + argc--; + chunkdim1 = atoi(*(++argv)); + break; + case 'h': /* print help message--return with nerrors set */ + return (1); + default: + HDprintf("Illegal option(%s)\n", *argv); + nerrors++; + return (1); + } + } + } /*while*/ + + /* check validity of dimension and chunk sizes */ + if (dim0 <= 0 || dim1 <= 0) { + HDprintf("Illegal dim sizes (%d, %d)\n", dim0, dim1); + nerrors++; + return (1); + } + if (chunkdim0 <= 0 || chunkdim1 <= 0) { + HDprintf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1); + nerrors++; + return (1); + } + + /* Make sure datasets can be divided into equal portions by the processes */ + if ((dim0 % mpi_size) || (dim1 % mpi_size)) { + if (MAINPROCESS) + HDprintf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n", dim0, dim1, mpi_size); + nerrors++; + return (1); + } + + /* compose the test filenames */ + { + int i, n; + + n = sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; /* exclude the NULL */ + + for (i = 0; i < n; i++) + strncpy(filenames[i], FILENAME[i], PATH_MAX); +#if 0 /* no support for VFDs right now */ + if (h5_fixname(FILENAME[i], fapl, filenames[i], PATH_MAX) == NULL) { + HDprintf("h5_fixname failed\n"); + nerrors++; + return (1); + } +#endif + if (MAINPROCESS) { + HDprintf("Test filenames are:\n"); + for (i = 0; i < n; i++) + HDprintf(" %s\n", filenames[i]); + } + } + + return (0); +} + +/* + * Create the appropriate File access property list + */ +hid_t +create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type) +{ + hid_t ret_pl = -1; + herr_t ret; /* generic return value */ + int mpi_rank; /* mpi variables */ + + /* need the rank for error checking macros */ + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + ret_pl = H5Pcreate(H5P_FILE_ACCESS); + VRFY((ret_pl >= 0), "H5P_FILE_ACCESS"); + + if (l_facc_type == FACC_DEFAULT) + return (ret_pl); + + if (l_facc_type == FACC_MPIO) { + /* set Parallel access with communicator */ + ret = H5Pset_fapl_mpio(ret_pl, comm, info); + VRFY((ret >= 0), ""); + ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE); + VRFY((ret >= 0), ""); + ret = H5Pset_coll_metadata_write(ret_pl, TRUE); + VRFY((ret >= 0), ""); + return (ret_pl); + } + + if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) { + hid_t mpio_pl; + + mpio_pl = H5Pcreate(H5P_FILE_ACCESS); + VRFY((mpio_pl >= 0), ""); + /* set Parallel access with communicator */ + ret = H5Pset_fapl_mpio(mpio_pl, comm, info); + VRFY((ret >= 0), ""); + + /* setup file access template */ + ret_pl = H5Pcreate(H5P_FILE_ACCESS); + VRFY((ret_pl >= 0), ""); + /* set Parallel access with communicator */ + ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl); + VRFY((ret >= 0), "H5Pset_fapl_split succeeded"); + H5Pclose(mpio_pl); + return (ret_pl); + } + + /* unknown file access types */ + return (ret_pl); +} + +int +main(int argc, char **argv) +{ + int mpi_size, mpi_rank; /* mpi variables */ + herr_t ret; + +#if 0 + H5Ptest_param_t ndsets_params, ngroups_params; + H5Ptest_param_t collngroups_params; + H5Ptest_param_t io_mode_confusion_params; + H5Ptest_param_t rr_obj_flush_confusion_params; +#endif + +#ifndef H5_HAVE_WIN32_API + /* Un-buffer the stdout and stderr */ + HDsetbuf(stderr, NULL); + HDsetbuf(stdout, NULL); +#endif + + MPI_Init(&argc, &argv); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + dim0 = ROW_FACTOR * mpi_size; + dim1 = COL_FACTOR * mpi_size; + + if (MAINPROCESS) { + HDprintf("===================================\n"); + HDprintf("PHDF5 TESTS START\n"); + HDprintf("===================================\n"); + } + + /* Attempt to turn off atexit post processing so that in case errors + * happen during the test and the process is aborted, it will not get + * hang in the atexit post processing in which it may try to make MPI + * calls. By then, MPI calls may not work. + */ + if (H5dont_atexit() < 0) { + HDprintf("Failed to turn off atexit processing. Continue.\n"); + }; + H5open(); + /* h5_show_hostname(); */ + +#if 0 + HDmemset(filenames, 0, sizeof(filenames)); + for (int i = 0; i < NFILENAME; i++) { + if (NULL == (filenames[i] = HDmalloc(PATH_MAX))) { + HDprintf("couldn't allocate filename array\n"); + MPI_Abort(MPI_COMM_WORLD, -1); + } + } +#endif + + /* Set up file access property list with parallel I/O access */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fapl >= 0), "H5Pcreate succeeded"); + + vol_cap_flags_g = H5VL_CAP_FLAG_NONE; + + /* Get the capability flag of the VOL connector being used */ + ret = H5Pget_vol_cap_flags(fapl, &vol_cap_flags_g); + VRFY((ret >= 0), "H5Pget_vol_cap_flags succeeded"); + + /* Initialize testing framework */ + /* TestInit(argv[0], usage, parse_options); */ + + if (parse_options(argc, argv)) { + usage(); + return 1; + } + + /* Tests are generally arranged from least to most complexity... */ +#if 0 + AddTest("mpiodup", test_fapl_mpio_dup, NULL, + "fapl_mpio duplicate", NULL); +#endif + + if (MAINPROCESS) { + printf("fapl_mpio duplicate\n"); + fflush(stdout); + } + test_fapl_mpio_dup(); + +#if 0 + AddTest("split", test_split_comm_access, NULL, + "dataset using split communicators", PARATESTFILE); + AddTest("props", test_file_properties, NULL, + "Coll Metadata file property settings", PARATESTFILE); +#endif + + if (MAINPROCESS) { + printf("dataset using split communicators\n"); + fflush(stdout); + } + test_split_comm_access(); + + if (MAINPROCESS) { + printf("Coll Metadata file property settings\n"); + fflush(stdout); + } + test_file_properties(); + +#if 0 + AddTest("idsetw", dataset_writeInd, NULL, + "dataset independent write", PARATESTFILE); + AddTest("idsetr", dataset_readInd, NULL, + "dataset independent read", PARATESTFILE); +#endif + + if (MAINPROCESS) { + printf("dataset independent write\n"); + fflush(stdout); + } + dataset_writeInd(); + if (MAINPROCESS) { + printf("dataset independent read\n"); + fflush(stdout); + } + dataset_readInd(); + +#if 0 + AddTest("cdsetw", dataset_writeAll, NULL, + "dataset collective write", PARATESTFILE); + AddTest("cdsetr", dataset_readAll, NULL, + "dataset collective read", PARATESTFILE); +#endif + + if (MAINPROCESS) { + printf("dataset collective write\n"); + fflush(stdout); + } + dataset_writeAll(); + if (MAINPROCESS) { + printf("dataset collective read\n"); + fflush(stdout); + } + dataset_readAll(); + +#if 0 + AddTest("eidsetw", extend_writeInd, NULL, + "extendible dataset independent write", PARATESTFILE); + AddTest("eidsetr", extend_readInd, NULL, + "extendible dataset independent read", PARATESTFILE); + AddTest("ecdsetw", extend_writeAll, NULL, + "extendible dataset collective write", PARATESTFILE); + AddTest("ecdsetr", extend_readAll, NULL, + "extendible dataset collective read", PARATESTFILE); + AddTest("eidsetw2", extend_writeInd2, NULL, + "extendible dataset independent write #2", PARATESTFILE); + AddTest("selnone", none_selection_chunk, NULL, + "chunked dataset with none-selection", PARATESTFILE); + AddTest("calloc", test_chunk_alloc, NULL, + "parallel extend Chunked allocation on serial file", PARATESTFILE); + AddTest("fltread", test_filter_read, NULL, + "parallel read of dataset written serially with filters", PARATESTFILE); +#endif + + if (MAINPROCESS) { + printf("extendible dataset independent write\n"); + fflush(stdout); + } + extend_writeInd(); + if (MAINPROCESS) { + printf("extendible dataset independent read\n"); + fflush(stdout); + } + extend_readInd(); + if (MAINPROCESS) { + printf("extendible dataset collective write\n"); + fflush(stdout); + } + extend_writeAll(); + if (MAINPROCESS) { + printf("extendible dataset collective read\n"); + fflush(stdout); + } + extend_readAll(); + if (MAINPROCESS) { + printf("extendible dataset independent write #2\n"); + fflush(stdout); + } + extend_writeInd2(); + if (MAINPROCESS) { + printf("chunked dataset with none-selection\n"); + fflush(stdout); + } + none_selection_chunk(); + if (MAINPROCESS) { + printf("parallel extend Chunked allocation on serial file\n"); + fflush(stdout); + } + test_chunk_alloc(); + if (MAINPROCESS) { + printf("parallel read of dataset written serially with filters\n"); + fflush(stdout); + } + test_filter_read(); + +#ifdef H5_HAVE_FILTER_DEFLATE +#if 0 + AddTest("cmpdsetr", compress_readAll, NULL, + "compressed dataset collective read", PARATESTFILE); +#endif + + if (MAINPROCESS) { + printf("compressed dataset collective read\n"); + fflush(stdout); + } + compress_readAll(); +#endif /* H5_HAVE_FILTER_DEFLATE */ + +#if 0 + AddTest("zerodsetr", zero_dim_dset, NULL, + "zero dim dset", PARATESTFILE); +#endif + + if (MAINPROCESS) { + printf("zero dim dset\n"); + fflush(stdout); + } + zero_dim_dset(); + +#if 0 + ndsets_params.name = PARATESTFILE; + ndsets_params.count = ndatasets; + AddTest("ndsetw", multiple_dset_write, NULL, + "multiple datasets write", &ndsets_params); +#endif + + if (MAINPROCESS) { + printf("multiple datasets write\n"); + fflush(stdout); + } + multiple_dset_write(); + +#if 0 + ngroups_params.name = PARATESTFILE; + ngroups_params.count = ngroups; + AddTest("ngrpw", multiple_group_write, NULL, + "multiple groups write", &ngroups_params); + AddTest("ngrpr", multiple_group_read, NULL, + "multiple groups read", &ngroups_params); +#endif + + if (MAINPROCESS) { + printf("multiple groups write\n"); + fflush(stdout); + } + multiple_group_write(); + if (MAINPROCESS) { + printf("multiple groups read\n"); + fflush(stdout); + } + multiple_group_read(); + +#if 0 + AddTest("compact", compact_dataset, NULL, + "compact dataset test", PARATESTFILE); +#endif + + if (MAINPROCESS) { + printf("compact dataset test\n"); + fflush(stdout); + } + compact_dataset(); + +#if 0 + collngroups_params.name = PARATESTFILE; + collngroups_params.count = ngroups; + /* combined cngrpw and ingrpr tests because ingrpr reads file created by cngrpw. */ + AddTest("cngrpw-ingrpr", collective_group_write_independent_group_read, NULL, + "collective grp/dset write - independent grp/dset read", + &collngroups_params); +#ifndef H5_HAVE_WIN32_API + AddTest("bigdset", big_dataset, NULL, + "big dataset test", PARATESTFILE); +#else + HDprintf("big dataset test will be skipped on Windows (JIRA HDDFV-8064)\n"); +#endif +#endif + + if (MAINPROCESS) { + printf("collective grp/dset write - independent grp/dset read\n"); + fflush(stdout); + } + collective_group_write_independent_group_read(); + if (MAINPROCESS) { + printf("big dataset test\n"); + fflush(stdout); + } + big_dataset(); + +#if 0 + AddTest("fill", dataset_fillvalue, NULL, + "dataset fill value", PARATESTFILE); +#endif + + if (MAINPROCESS) { + printf("dataset fill value\n"); + fflush(stdout); + } + dataset_fillvalue(); + +#if 0 + AddTest("cchunk1", + coll_chunk1,NULL, "simple collective chunk io",PARATESTFILE); + AddTest("cchunk2", + coll_chunk2,NULL, "noncontiguous collective chunk io",PARATESTFILE); + AddTest("cchunk3", + coll_chunk3,NULL, "multi-chunk collective chunk io",PARATESTFILE); + AddTest("cchunk4", + coll_chunk4,NULL, "collective chunk io with partial non-selection ",PARATESTFILE); +#endif + + if (MAINPROCESS) { + printf("simple collective chunk io\n"); + fflush(stdout); + } + coll_chunk1(); + if (MAINPROCESS) { + printf("noncontiguous collective chunk io\n"); + fflush(stdout); + } + coll_chunk2(); + if (MAINPROCESS) { + printf("multi-chunk collective chunk io\n"); + fflush(stdout); + } + coll_chunk3(); + if (MAINPROCESS) { + printf("collective chunk io with partial non-selection\n"); + fflush(stdout); + } + coll_chunk4(); + + if ((mpi_size < 3) && MAINPROCESS) { + HDprintf("Collective chunk IO optimization APIs "); + HDprintf("needs at least 3 processes to participate\n"); + HDprintf("Collective chunk IO API tests will be skipped \n"); + } + +#if 0 + AddTest((mpi_size <3)? "-cchunk5":"cchunk5" , + coll_chunk5,NULL, + "linked chunk collective IO without optimization",PARATESTFILE); + AddTest((mpi_size < 3)? "-cchunk6" : "cchunk6", + coll_chunk6,NULL, + "multi-chunk collective IO with direct request",PARATESTFILE); + AddTest((mpi_size < 3)? "-cchunk7" : "cchunk7", + coll_chunk7,NULL, + "linked chunk collective IO with optimization",PARATESTFILE); + AddTest((mpi_size < 3)? "-cchunk8" : "cchunk8", + coll_chunk8,NULL, + "linked chunk collective IO transferring to multi-chunk",PARATESTFILE); + AddTest((mpi_size < 3)? "-cchunk9" : "cchunk9", + coll_chunk9,NULL, + "multiple chunk collective IO with optimization",PARATESTFILE); + AddTest((mpi_size < 3)? "-cchunk10" : "cchunk10", + coll_chunk10,NULL, + "multiple chunk collective IO transferring to independent IO",PARATESTFILE); +#endif + + if (mpi_size >= 3) { + if (MAINPROCESS) { + printf("linked chunk collective IO without optimization\n"); + fflush(stdout); + } + coll_chunk5(); + if (MAINPROCESS) { + printf("multi-chunk collective IO with direct request\n"); + fflush(stdout); + } + coll_chunk6(); + if (MAINPROCESS) { + printf("linked chunk collective IO with optimization\n"); + fflush(stdout); + } + coll_chunk7(); + if (MAINPROCESS) { + printf("linked chunk collective IO transferring to multi-chunk\n"); + fflush(stdout); + } + coll_chunk8(); + if (MAINPROCESS) { + printf("multiple chunk collective IO with optimization\n"); + fflush(stdout); + } + coll_chunk9(); + if (MAINPROCESS) { + printf("multiple chunk collective IO transferring to independent IO\n"); + fflush(stdout); + } + coll_chunk10(); + } + +#if 0 + /* irregular collective IO tests*/ + AddTest("ccontw", + coll_irregular_cont_write,NULL, + "collective irregular contiguous write",PARATESTFILE); + AddTest("ccontr", + coll_irregular_cont_read,NULL, + "collective irregular contiguous read",PARATESTFILE); + AddTest("cschunkw", + coll_irregular_simple_chunk_write,NULL, + "collective irregular simple chunk write",PARATESTFILE); + AddTest("cschunkr", + coll_irregular_simple_chunk_read,NULL, + "collective irregular simple chunk read",PARATESTFILE); + AddTest("ccchunkw", + coll_irregular_complex_chunk_write,NULL, + "collective irregular complex chunk write",PARATESTFILE); + AddTest("ccchunkr", + coll_irregular_complex_chunk_read,NULL, + "collective irregular complex chunk read",PARATESTFILE); +#endif + + if (MAINPROCESS) { + printf("collective irregular contiguous write\n"); + fflush(stdout); + } + coll_irregular_cont_write(); + if (MAINPROCESS) { + printf("collective irregular contiguous read\n"); + fflush(stdout); + } + coll_irregular_cont_read(); + if (MAINPROCESS) { + printf("collective irregular simple chunk write\n"); + fflush(stdout); + } + coll_irregular_simple_chunk_write(); + if (MAINPROCESS) { + printf("collective irregular simple chunk read\n"); + fflush(stdout); + } + coll_irregular_simple_chunk_read(); + if (MAINPROCESS) { + printf("collective irregular complex chunk write\n"); + fflush(stdout); + } + coll_irregular_complex_chunk_write(); + if (MAINPROCESS) { + printf("collective irregular complex chunk read\n"); + fflush(stdout); + } + coll_irregular_complex_chunk_read(); + +#if 0 + AddTest("null", null_dataset, NULL, + "null dataset test", PARATESTFILE); +#endif + + if (MAINPROCESS) { + printf("null dataset test\n"); + fflush(stdout); + } + null_dataset(); + +#if 0 + io_mode_confusion_params.name = PARATESTFILE; + io_mode_confusion_params.count = 0; /* value not used */ + + AddTest("I/Omodeconf", io_mode_confusion, NULL, + "I/O mode confusion test", + &io_mode_confusion_params); +#endif + + if (MAINPROCESS) { + printf("I/O mode confusion test\n"); + fflush(stdout); + } + io_mode_confusion(); + + if ((mpi_size < 3) && MAINPROCESS) { + HDprintf("rr_obj_hdr_flush_confusion test needs at least 3 processes.\n"); + HDprintf("rr_obj_hdr_flush_confusion test will be skipped \n"); + } + + if (mpi_size > 2) { +#if 0 + rr_obj_flush_confusion_params.name = PARATESTFILE; + rr_obj_flush_confusion_params.count = 0; /* value not used */ + AddTest("rrobjflushconf", rr_obj_hdr_flush_confusion, NULL, + "round robin object header flush confusion test", + &rr_obj_flush_confusion_params); +#endif + + if (MAINPROCESS) { + printf("round robin object header flush confusion test\n"); + fflush(stdout); + } + rr_obj_hdr_flush_confusion(); + } + +#if 0 + AddTest("alnbg1", + chunk_align_bug_1, NULL, + "Chunk allocation with alignment bug.", + PARATESTFILE); + + AddTest("tldsc", + lower_dim_size_comp_test, NULL, + "test lower dim size comp in span tree to mpi derived type", + PARATESTFILE); + + AddTest("lccio", + link_chunk_collective_io_test, NULL, + "test mpi derived type management", + PARATESTFILE); + + AddTest("actualio", actual_io_mode_tests, NULL, + "test actual io mode proprerty", + PARATESTFILE); + + AddTest("nocolcause", no_collective_cause_tests, NULL, + "test cause for broken collective io", + PARATESTFILE); + + AddTest("edpl", test_plist_ed, NULL, + "encode/decode Property Lists", NULL); +#endif + + if (MAINPROCESS) { + printf("Chunk allocation with alignment bug\n"); + fflush(stdout); + } + chunk_align_bug_1(); + if (MAINPROCESS) { + printf("test lower dim size comp in span tree to mpi derived type\n"); + fflush(stdout); + } + lower_dim_size_comp_test(); + if (MAINPROCESS) { + printf("test mpi derived type management\n"); + fflush(stdout); + } + link_chunk_collective_io_test(); + if (MAINPROCESS) { + printf("test actual io mode property - SKIPPED currently due to native-specific testing\n"); + fflush(stdout); + } + /* actual_io_mode_tests(); */ + if (MAINPROCESS) { + printf("test cause for broken collective io - SKIPPED currently due to native-specific testing\n"); + fflush(stdout); + } + /* no_collective_cause_tests(); */ + if (MAINPROCESS) { + printf("encode/decode Property Lists\n"); + fflush(stdout); + } + test_plist_ed(); + + if ((mpi_size < 2) && MAINPROCESS) { + HDprintf("File Image Ops daisy chain test needs at least 2 processes.\n"); + HDprintf("File Image Ops daisy chain test will be skipped \n"); + } + +#if 0 + AddTest((mpi_size < 2)? "-fiodc" : "fiodc", file_image_daisy_chain_test, NULL, + "file image ops daisy chain", NULL); +#endif + + if (mpi_size >= 2) { + if (MAINPROCESS) { + printf("file image ops daisy chain - SKIPPED currently due to native-specific testing\n"); + fflush(stdout); + } + /* file_image_daisy_chain_test(); */ + } + + if ((mpi_size < 2) && MAINPROCESS) { + HDprintf("Atomicity tests need at least 2 processes to participate\n"); + HDprintf("8 is more recommended.. Atomicity tests will be skipped \n"); + } + else if (facc_type != FACC_MPIO && MAINPROCESS) { + HDprintf("Atomicity tests will not work with a non MPIO VFD\n"); + } + else if (mpi_size >= 2 && facc_type == FACC_MPIO) { +#if 0 + AddTest("atomicity", dataset_atomicity, NULL, + "dataset atomic updates", PARATESTFILE); +#endif + + if (MAINPROCESS) { + printf("dataset atomic updates - SKIPPED currently due to native-specific testing\n"); + fflush(stdout); + } + /* dataset_atomicity(); */ + } + +#if 0 + AddTest("denseattr", test_dense_attr, NULL, + "Store Dense Attributes", PARATESTFILE); +#endif + + if (MAINPROCESS) { + printf("Store Dense Attributes\n"); + fflush(stdout); + } + test_dense_attr(); + +#if 0 + AddTest("noselcollmdread", test_partial_no_selection_coll_md_read, NULL, + "Collective Metadata read with some ranks having no selection", PARATESTFILE); + AddTest("MC_coll_MD_read", test_multi_chunk_io_addrmap_issue, NULL, + "Collective MD read with multi chunk I/O (H5D__chunk_addrmap)", PARATESTFILE); + AddTest("LC_coll_MD_read", test_link_chunk_io_sort_chunk_issue, NULL, + "Collective MD read with link chunk I/O (H5D__sort_chunk)", PARATESTFILE); +#endif + + if (MAINPROCESS) { + printf("Collective Metadata read with some ranks having no selection\n"); + fflush(stdout); + } + test_partial_no_selection_coll_md_read(); + if (MAINPROCESS) { + printf("Collective MD read with multi chunk I/O\n"); + fflush(stdout); + } + test_multi_chunk_io_addrmap_issue(); + if (MAINPROCESS) { + printf("Collective MD read with link chunk I/O\n"); + fflush(stdout); + } + test_link_chunk_io_sort_chunk_issue(); + + /* Display testing information */ + /* TestInfo(argv[0]); */ + + /* setup file access property list */ + H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL); + + /* Parse command line arguments */ + /* TestParseCmdLine(argc, argv); */ + + if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS) { + HDprintf("===================================\n" + " Using Independent I/O with file set view to replace collective I/O \n" + "===================================\n"); + } + + /* Perform requested testing */ + /* PerformTests(); */ + + /* make sure all processes are finished before final report, cleanup + * and exit. + */ + MPI_Barrier(MPI_COMM_WORLD); + + /* Display test summary, if requested */ + /* if (MAINPROCESS && GetTestSummary()) + TestSummary(); */ + + /* Clean up test files */ + /* h5_clean_files(FILENAME, fapl); */ + H5Fdelete(FILENAME[0], fapl); + H5Pclose(fapl); + + /* nerrors += GetTestNumErrs(); */ + + /* Gather errors from all processes */ + { + int temp; + MPI_Allreduce(&nerrors, &temp, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); + nerrors = temp; + } + + if (MAINPROCESS) { /* only process 0 reports */ + HDprintf("===================================\n"); + if (nerrors) + HDprintf("***PHDF5 tests detected %d errors***\n", nerrors); + else + HDprintf("PHDF5 tests finished successfully\n"); + HDprintf("===================================\n"); + } + +#if 0 + for (int i = 0; i < NFILENAME; i++) { + HDfree(filenames[i]); + filenames[i] = NULL; + } +#endif + + /* close HDF5 library */ + H5close(); + + /* Release test infrastructure */ + /* TestShutdown(); */ + + /* MPI_Finalize must be called AFTER H5close which may use MPI calls */ + MPI_Finalize(); + + /* cannot just return (nerrors) because exit code is limited to 1byte */ + return (nerrors != 0); +} diff --git a/testpar/API/testphdf5.h b/testpar/API/testphdf5.h new file mode 100644 index 00000000000..27d53e2eed1 --- /dev/null +++ b/testpar/API/testphdf5.h @@ -0,0 +1,343 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* common definitions used by all parallel hdf5 test programs. */ + +#ifndef PHDF5TEST_H +#define PHDF5TEST_H + +#include "H5private.h" +#include "testpar.h" +#include "H5_api_tests_disabled.h" + +/* + * Define parameters for various tests since we do not have access to + * passing parameters to tests via the testphdf5 test framework. + */ +#define PARATESTFILE "ParaTest.h5" +#define NDATASETS 300 +#define NGROUPS 256 + +/* Disable express testing by default */ +#define EXPRESS_MODE 0 + +enum H5TEST_COLL_CHUNK_API { + API_NONE = 0, + API_LINK_HARD, + API_MULTI_HARD, + API_LINK_TRUE, + API_LINK_FALSE, + API_MULTI_COLL, + API_MULTI_IND +}; + +#ifndef FALSE +#define FALSE 0 +#endif + +#ifndef TRUE +#define TRUE 1 +#endif + +/* Constants definitions */ +#define DIM0 600 /* Default dataset sizes. */ +#define DIM1 1200 /* Values are from a monitor pixel sizes */ +#define ROW_FACTOR 8 /* Nominal row factor for dataset size */ +#define COL_FACTOR 16 /* Nominal column factor for dataset size */ +#define RANK 2 +#define DATASETNAME1 "Data1" +#define DATASETNAME2 "Data2" +#define DATASETNAME3 "Data3" +#define DATASETNAME4 "Data4" +#define DATASETNAME5 "Data5" +#define DATASETNAME6 "Data6" +#define DATASETNAME7 "Data7" +#define DATASETNAME8 "Data8" +#define DATASETNAME9 "Data9" + +/* point selection order */ +#define IN_ORDER 1 +#define OUT_OF_ORDER 2 + +/* Hyperslab layout styles */ +#define BYROW 1 /* divide into slabs of rows */ +#define BYCOL 2 /* divide into blocks of columns */ +#define ZROW 3 /* same as BYCOL except process 0 gets 0 rows */ +#define ZCOL 4 /* same as BYCOL except process 0 gets 0 columns */ + +/* File_Access_type bits */ +#define FACC_DEFAULT 0x0 /* default */ +#define FACC_MPIO 0x1 /* MPIO */ +#define FACC_SPLIT 0x2 /* Split File */ + +#define DXFER_COLLECTIVE_IO 0x1 /* Collective IO*/ +#define DXFER_INDEPENDENT_IO 0x2 /* Independent IO collectively */ +/*Constants for collective chunk definitions */ +#define SPACE_DIM1 24 +#define SPACE_DIM2 4 +#define BYROW_CONT 1 +#define BYROW_DISCONT 2 +#define BYROW_SELECTNONE 3 +#define BYROW_SELECTUNBALANCE 4 +#define BYROW_SELECTINCHUNK 5 + +#define DIMO_NUM_CHUNK 4 +#define DIM1_NUM_CHUNK 2 +#define LINK_TRUE_NUM_CHUNK 2 +#define LINK_FALSE_NUM_CHUNK 6 +#define MULTI_TRUE_PERCENT 50 +#define LINK_TRUE_CHUNK_NAME "h5_link_chunk_true" +#define LINK_FALSE_CHUNK_NAME "h5_link_chunk_false" +#define LINK_HARD_CHUNK_NAME "h5_link_chunk_hard" +#define MULTI_HARD_CHUNK_NAME "h5_multi_chunk_hard" +#define MULTI_COLL_CHUNK_NAME "h5_multi_chunk_coll" +#define MULTI_INDP_CHUNK_NAME "h5_multi_chunk_indp" + +#define DSET_COLLECTIVE_CHUNK_NAME "coll_chunk_name" + +/*Constants for MPI derived data type generated from span tree */ + +#define MSPACE1_RANK 1 /* Rank of the first dataset in memory */ +#define MSPACE1_DIM 27000 /* Dataset size in memory */ +#define FSPACE_RANK 2 /* Dataset rank as it is stored in the file */ +#define FSPACE_DIM1 9 /* Dimension sizes of the dataset as it is stored in the file */ +#define FSPACE_DIM2 3600 +/* We will read dataset back from the file to the dataset in memory with these dataspace parameters. */ +#define MSPACE_RANK 2 +#define MSPACE_DIM1 9 +#define MSPACE_DIM2 3600 +#define FHCOUNT0 1 /* Count of the first dimension of the first hyperslab selection*/ +#define FHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/ +#define FHSTRIDE0 4 /* Stride of the first dimension of the first hyperslab selection*/ +#define FHSTRIDE1 3 /* Stride of the second dimension of the first hyperslab selection*/ +#define FHBLOCK0 3 /* Block of the first dimension of the first hyperslab selection*/ +#define FHBLOCK1 2 /* Block of the second dimension of the first hyperslab selection*/ +#define FHSTART0 0 /* start of the first dimension of the first hyperslab selection*/ +#define FHSTART1 1 /* start of the second dimension of the first hyperslab selection*/ + +#define SHCOUNT0 1 /* Count of the first dimension of the first hyperslab selection*/ +#define SHCOUNT1 1 /* Count of the second dimension of the first hyperslab selection*/ +#define SHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/ +#define SHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/ +#define SHBLOCK0 3 /* Block of the first dimension of the first hyperslab selection*/ +#define SHBLOCK1 768 /* Block of the second dimension of the first hyperslab selection*/ +#define SHSTART0 4 /* start of the first dimension of the first hyperslab selection*/ +#define SHSTART1 0 /* start of the second dimension of the first hyperslab selection*/ + +#define MHCOUNT0 6912 /* Count of the first dimension of the first hyperslab selection*/ +#define MHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/ +#define MHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/ +#define MHSTART0 1 /* start of the first dimension of the first hyperslab selection*/ + +#define RFFHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/ +#define RFFHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/ +#define RFFHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/ +#define RFFHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/ +#define RFFHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/ +#define RFFHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/ +#define RFFHSTART0 1 /* start of the first dimension of the first hyperslab selection*/ +#define RFFHSTART1 2 /* start of the second dimension of the first hyperslab selection*/ + +#define RFSHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/ +#define RFSHCOUNT1 1536 /* Count of the second dimension of the first hyperslab selection*/ +#define RFSHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/ +#define RFSHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/ +#define RFSHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/ +#define RFSHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/ +#define RFSHSTART0 2 /* start of the first dimension of the first hyperslab selection*/ +#define RFSHSTART1 4 /* start of the second dimension of the first hyperslab selection*/ + +#define RMFHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/ +#define RMFHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/ +#define RMFHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/ +#define RMFHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/ +#define RMFHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/ +#define RMFHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/ +#define RMFHSTART0 0 /* start of the first dimension of the first hyperslab selection*/ +#define RMFHSTART1 0 /* start of the second dimension of the first hyperslab selection*/ + +#define RMSHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/ +#define RMSHCOUNT1 1536 /* Count of the second dimension of the first hyperslab selection*/ +#define RMSHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/ +#define RMSHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/ +#define RMSHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/ +#define RMSHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/ +#define RMSHSTART0 1 /* start of the first dimension of the first hyperslab selection*/ +#define RMSHSTART1 2 /* start of the second dimension of the first hyperslab selection*/ + +#define NPOINTS \ + 4 /* Number of points that will be selected \ + and overwritten */ + +/* Definitions of the selection mode for the test_actual_io_function. */ +#define TEST_ACTUAL_IO_NO_COLLECTIVE 0 +#define TEST_ACTUAL_IO_RESET 1 +#define TEST_ACTUAL_IO_MULTI_CHUNK_IND 2 +#define TEST_ACTUAL_IO_MULTI_CHUNK_COL 3 +#define TEST_ACTUAL_IO_MULTI_CHUNK_MIX 4 +#define TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE 5 +#define TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND 6 +#define TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL 7 +#define TEST_ACTUAL_IO_LINK_CHUNK 8 +#define TEST_ACTUAL_IO_CONTIGUOUS 9 + +/* Definitions of the selection mode for the no_collective_cause_tests function. */ +#define TEST_COLLECTIVE 0x001 +#define TEST_SET_INDEPENDENT 0x002 +#define TEST_DATATYPE_CONVERSION 0x004 +#define TEST_DATA_TRANSFORMS 0x008 +#define TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES 0x010 +#define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT 0x020 +#define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL 0x040 + +/* Don't erase these lines, they are put here for debugging purposes */ +/* +#define MSPACE1_RANK 1 +#define MSPACE1_DIM 50 +#define MSPACE2_RANK 1 +#define MSPACE2_DIM 4 +#define FSPACE_RANK 2 +#define FSPACE_DIM1 8 +#define FSPACE_DIM2 12 +#define MSPACE_RANK 2 +#define MSPACE_DIM1 8 +#define MSPACE_DIM2 9 +#define NPOINTS 4 +*/ /* end of debugging macro */ + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY +/* Collective chunk instrumentation properties */ +#define H5D_XFER_COLL_CHUNK_LINK_HARD_NAME "coll_chunk_link_hard" +#define H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME "coll_chunk_multi_hard" +#define H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME "coll_chunk_link_true" +#define H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME "coll_chunk_link_false" +#define H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME "coll_chunk_multi_coll" +#define H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME "coll_chunk_multi_ind" + +/* Definitions for all collective chunk instrumentation properties */ +#define H5D_XFER_COLL_CHUNK_SIZE sizeof(unsigned) +#define H5D_XFER_COLL_CHUNK_DEF 1 + +/* General collective I/O instrumentation properties */ +#define H5D_XFER_COLL_RANK0_BCAST_NAME "coll_rank0_bcast" + +/* Definitions for general collective I/O instrumentation properties */ +#define H5D_XFER_COLL_RANK0_BCAST_SIZE sizeof(hbool_t) +#define H5D_XFER_COLL_RANK0_BCAST_DEF FALSE +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + +/* type definitions */ +typedef struct H5Ptest_param_t /* holds extra test parameters */ +{ + char *name; + int count; +} H5Ptest_param_t; + +/* Dataset data type. Int's can be easily octo dumped. */ +typedef int DATATYPE; + +/* Shape Same Tests Definitions */ +typedef enum { + IND_CONTIG, /* Independent IO on contiguous datasets */ + COL_CONTIG, /* Collective IO on contiguous datasets */ + IND_CHUNKED, /* Independent IO on chunked datasets */ + COL_CHUNKED /* Collective IO on chunked datasets */ +} ShapeSameTestMethods; + +/* Shared global variables */ +extern int dim0, dim1; /*Dataset dimensions */ +extern int chunkdim0, chunkdim1; /*Chunk dimensions */ +extern int nerrors; /*errors count */ +extern H5E_auto2_t old_func; /* previous error handler */ +extern void *old_client_data; /*previous error handler arg.*/ +extern int facc_type; /*Test file access type */ +extern int dxfer_coll_type; + +/* Test program prototypes */ +void test_plist_ed(void); +#if 0 +void external_links(void); +#endif +void zero_dim_dset(void); +void test_file_properties(void); +void test_delete(void); +void multiple_dset_write(void); +void multiple_group_write(void); +void multiple_group_read(void); +void collective_group_write_independent_group_read(void); +void collective_group_write(void); +void independent_group_read(void); +void test_fapl_mpio_dup(void); +void test_split_comm_access(void); +void test_page_buffer_access(void); +void dataset_atomicity(void); +void dataset_writeInd(void); +void dataset_writeAll(void); +void extend_writeInd(void); +void extend_writeInd2(void); +void extend_writeAll(void); +void dataset_readInd(void); +void dataset_readAll(void); +void extend_readInd(void); +void extend_readAll(void); +void none_selection_chunk(void); +void actual_io_mode_tests(void); +void no_collective_cause_tests(void); +void test_chunk_alloc(void); +void test_filter_read(void); +void compact_dataset(void); +void null_dataset(void); +void big_dataset(void); +void dataset_fillvalue(void); +void coll_chunk1(void); +void coll_chunk2(void); +void coll_chunk3(void); +void coll_chunk4(void); +void coll_chunk5(void); +void coll_chunk6(void); +void coll_chunk7(void); +void coll_chunk8(void); +void coll_chunk9(void); +void coll_chunk10(void); +void coll_irregular_cont_read(void); +void coll_irregular_cont_write(void); +void coll_irregular_simple_chunk_read(void); +void coll_irregular_simple_chunk_write(void); +void coll_irregular_complex_chunk_read(void); +void coll_irregular_complex_chunk_write(void); +void io_mode_confusion(void); +void rr_obj_hdr_flush_confusion(void); +void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm); +void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm); +void chunk_align_bug_1(void); +void lower_dim_size_comp_test(void); +void link_chunk_collective_io_test(void); +void contig_hyperslab_dr_pio_test(ShapeSameTestMethods sstest_type); +void checker_board_hyperslab_dr_pio_test(ShapeSameTestMethods sstest_type); +void file_image_daisy_chain_test(void); +#ifdef H5_HAVE_FILTER_DEFLATE +void compress_readAll(void); +#endif /* H5_HAVE_FILTER_DEFLATE */ +void test_dense_attr(void); +void test_partial_no_selection_coll_md_read(void); +void test_multi_chunk_io_addrmap_issue(void); +void test_link_chunk_io_sort_chunk_issue(void); +void test_collective_global_heap_write(void); + +/* commonly used prototypes */ +hid_t create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type); +MPI_Offset h5_mpi_get_file_size(const char *filename, MPI_Comm comm, MPI_Info info); +int dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset, + DATATYPE *original); +void point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points, + hsize_t coords[], int order); +#endif /* PHDF5TEST_H */ diff --git a/testpar/CMakeLists.txt b/testpar/CMakeLists.txt index 3a44fca7c1b..d34b8001267 100644 --- a/testpar/CMakeLists.txt +++ b/testpar/CMakeLists.txt @@ -104,10 +104,30 @@ set (H5P_TESTS t_vfd ) +set (HDF5_API_TESTS + attribute + dataset + datatype + file + group + link + misc + object +) + +if (HDF5_TEST_API_ENABLE_ASYNC) + set (HDF5_API_TESTS + ${HDF5_API_TESTS} + async + ) +endif () + foreach (h5_testp ${H5P_TESTS}) ADD_H5P_EXE(${h5_testp}) endforeach () +add_subdirectory (API) + if (HDF5_TEST_PARALLEL) include (CMakeTests.cmake) endif () From 75782097db2dabc8b341c2267e42e79f587ccd72 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Wed, 3 May 2023 11:45:16 -0500 Subject: [PATCH 194/231] Changes to isolate API Test option files and use fetchcontent (#2880) --- config/cmake/cacheinit.cmake | 11 + doxygen/dox/Overview.dox | 2 +- test/API/CMake/CheckAsan.cmake | 37 - test/API/CMake/CheckUbsan.cmake | 37 - test/API/CMakeLists.txt | 38 +- test/API/driver/CMakeLists.txt | 37 +- test/API/driver/kwsys/.clang-format | 22 - test/API/driver/kwsys/.hooks-config | 2 - test/API/driver/kwsys/Base64.c | 225 - test/API/driver/kwsys/Base64.h.in | 110 - test/API/driver/kwsys/CMakeLists.txt | 1260 ---- test/API/driver/kwsys/CONTRIBUTING.rst | 49 - test/API/driver/kwsys/CTestConfig.cmake | 9 - test/API/driver/kwsys/CTestCustom.cmake.in | 14 - .../API/driver/kwsys/CommandLineArguments.cxx | 768 --- .../driver/kwsys/CommandLineArguments.hxx.in | 270 - test/API/driver/kwsys/Configure.h.in | 89 - test/API/driver/kwsys/Configure.hxx.in | 65 - test/API/driver/kwsys/ConsoleBuf.hxx.in | 398 -- test/API/driver/kwsys/Copyright.txt | 38 - test/API/driver/kwsys/Directory.cxx | 236 - test/API/driver/kwsys/Directory.hxx.in | 72 - test/API/driver/kwsys/DynamicLoader.cxx | 495 -- test/API/driver/kwsys/DynamicLoader.hxx.in | 106 - test/API/driver/kwsys/Encoding.h.in | 69 - test/API/driver/kwsys/Encoding.hxx.in | 80 - test/API/driver/kwsys/EncodingC.c | 72 - test/API/driver/kwsys/EncodingCXX.cxx | 288 - test/API/driver/kwsys/ExtraTest.cmake.in | 1 - test/API/driver/kwsys/FStream.cxx | 55 - test/API/driver/kwsys/FStream.hxx.in | 278 - test/API/driver/kwsys/GitSetup/.gitattributes | 6 - test/API/driver/kwsys/GitSetup/LICENSE | 202 - test/API/driver/kwsys/GitSetup/NOTICE | 5 - test/API/driver/kwsys/GitSetup/README | 87 - test/API/driver/kwsys/GitSetup/config | 4 - test/API/driver/kwsys/GitSetup/config.sample | 32 - .../API/driver/kwsys/GitSetup/git-gerrit-push | 74 - .../API/driver/kwsys/GitSetup/git-gitlab-push | 177 - test/API/driver/kwsys/GitSetup/pre-commit | 26 - test/API/driver/kwsys/GitSetup/setup-aliases | 6 - test/API/driver/kwsys/GitSetup/setup-gerrit | 147 - test/API/driver/kwsys/GitSetup/setup-gitlab | 140 - test/API/driver/kwsys/GitSetup/setup-hooks | 64 - test/API/driver/kwsys/GitSetup/setup-ssh | 111 - test/API/driver/kwsys/GitSetup/setup-stage | 82 - test/API/driver/kwsys/GitSetup/setup-upstream | 104 - test/API/driver/kwsys/GitSetup/setup-user | 39 - test/API/driver/kwsys/GitSetup/tips | 55 - test/API/driver/kwsys/Glob.cxx | 448 -- test/API/driver/kwsys/Glob.hxx.in | 134 - test/API/driver/kwsys/IOStream.cxx | 255 - test/API/driver/kwsys/IOStream.hxx.in | 126 - test/API/driver/kwsys/MD5.c | 494 -- test/API/driver/kwsys/MD5.h.in | 97 - test/API/driver/kwsys/Process.h.in | 544 -- test/API/driver/kwsys/ProcessUNIX.c | 2920 --------- test/API/driver/kwsys/ProcessWin32.c | 2786 --------- test/API/driver/kwsys/README.rst | 37 - test/API/driver/kwsys/RegularExpression.cxx | 1218 ---- .../API/driver/kwsys/RegularExpression.hxx.in | 562 -- test/API/driver/kwsys/SetupForDevelopment.sh | 20 - test/API/driver/kwsys/SharedForward.h.in | 879 --- test/API/driver/kwsys/String.c | 100 - test/API/driver/kwsys/String.h.in | 57 - test/API/driver/kwsys/String.hxx.in | 65 - test/API/driver/kwsys/System.c | 236 - test/API/driver/kwsys/System.h.in | 60 - test/API/driver/kwsys/SystemInformation.cxx | 5466 ----------------- .../API/driver/kwsys/SystemInformation.hxx.in | 170 - test/API/driver/kwsys/SystemTools.cxx | 4703 -------------- test/API/driver/kwsys/SystemTools.hxx.in | 981 --- test/API/driver/kwsys/Terminal.c | 414 -- test/API/driver/kwsys/Terminal.h.in | 170 - test/API/driver/kwsys/clang-format.bash | 128 - test/API/driver/kwsys/hash_fun.hxx.in | 166 - test/API/driver/kwsys/hash_map.hxx.in | 423 -- test/API/driver/kwsys/hash_set.hxx.in | 392 -- test/API/driver/kwsys/hashtable.hxx.in | 995 --- test/API/driver/kwsys/kwsysHeaderDump.pl | 41 - .../API/driver/kwsys/kwsysPlatformTests.cmake | 216 - test/API/driver/kwsys/kwsysPlatformTestsC.c | 108 - .../driver/kwsys/kwsysPlatformTestsCXX.cxx | 335 - test/API/driver/kwsys/kwsysPrivate.h | 34 - .../driver/kwsys/testCommandLineArguments.cxx | 209 - .../kwsys/testCommandLineArguments1.cxx | 93 - test/API/driver/kwsys/testConfigure.cxx | 30 - test/API/driver/kwsys/testConsoleBuf.cxx | 782 --- test/API/driver/kwsys/testConsoleBuf.hxx | 17 - test/API/driver/kwsys/testConsoleBufChild.cxx | 55 - test/API/driver/kwsys/testDirectory.cxx | 110 - test/API/driver/kwsys/testDynamicLoader.cxx | 133 - test/API/driver/kwsys/testDynload.c | 13 - test/API/driver/kwsys/testDynloadImpl.c | 10 - test/API/driver/kwsys/testDynloadImpl.h | 15 - test/API/driver/kwsys/testDynloadUse.c | 15 - test/API/driver/kwsys/testEncode.c | 67 - test/API/driver/kwsys/testEncoding.cxx | 286 - test/API/driver/kwsys/testFStream.cxx | 113 - test/API/driver/kwsys/testFail.c | 24 - test/API/driver/kwsys/testHashSTL.cxx | 64 - test/API/driver/kwsys/testProcess.c | 728 --- test/API/driver/kwsys/testSharedForward.c.in | 27 - .../driver/kwsys/testSystemInformation.cxx | 106 - test/API/driver/kwsys/testSystemTools.bin | Bin 766 -> 0 bytes test/API/driver/kwsys/testSystemTools.cxx | 1128 ---- test/API/driver/kwsys/testSystemTools.h.in | 12 - test/API/driver/kwsys/testTerminal.c | 22 - test/API/driver/kwsys/update-gitsetup.bash | 20 - test/API/driver/kwsys/update-third-party.bash | 169 - test/CMakeLists.txt | 24 +- testpar/API/CMakeLists.txt | 27 +- testpar/CMakeLists.txt | 24 +- 113 files changed, 89 insertions(+), 36608 deletions(-) delete mode 100644 test/API/CMake/CheckAsan.cmake delete mode 100644 test/API/CMake/CheckUbsan.cmake delete mode 100644 test/API/driver/kwsys/.clang-format delete mode 100644 test/API/driver/kwsys/.hooks-config delete mode 100644 test/API/driver/kwsys/Base64.c delete mode 100644 test/API/driver/kwsys/Base64.h.in delete mode 100644 test/API/driver/kwsys/CMakeLists.txt delete mode 100644 test/API/driver/kwsys/CONTRIBUTING.rst delete mode 100644 test/API/driver/kwsys/CTestConfig.cmake delete mode 100644 test/API/driver/kwsys/CTestCustom.cmake.in delete mode 100644 test/API/driver/kwsys/CommandLineArguments.cxx delete mode 100644 test/API/driver/kwsys/CommandLineArguments.hxx.in delete mode 100644 test/API/driver/kwsys/Configure.h.in delete mode 100644 test/API/driver/kwsys/Configure.hxx.in delete mode 100644 test/API/driver/kwsys/ConsoleBuf.hxx.in delete mode 100644 test/API/driver/kwsys/Copyright.txt delete mode 100644 test/API/driver/kwsys/Directory.cxx delete mode 100644 test/API/driver/kwsys/Directory.hxx.in delete mode 100644 test/API/driver/kwsys/DynamicLoader.cxx delete mode 100644 test/API/driver/kwsys/DynamicLoader.hxx.in delete mode 100644 test/API/driver/kwsys/Encoding.h.in delete mode 100644 test/API/driver/kwsys/Encoding.hxx.in delete mode 100644 test/API/driver/kwsys/EncodingC.c delete mode 100644 test/API/driver/kwsys/EncodingCXX.cxx delete mode 100644 test/API/driver/kwsys/ExtraTest.cmake.in delete mode 100644 test/API/driver/kwsys/FStream.cxx delete mode 100644 test/API/driver/kwsys/FStream.hxx.in delete mode 100644 test/API/driver/kwsys/GitSetup/.gitattributes delete mode 100644 test/API/driver/kwsys/GitSetup/LICENSE delete mode 100644 test/API/driver/kwsys/GitSetup/NOTICE delete mode 100644 test/API/driver/kwsys/GitSetup/README delete mode 100644 test/API/driver/kwsys/GitSetup/config delete mode 100644 test/API/driver/kwsys/GitSetup/config.sample delete mode 100644 test/API/driver/kwsys/GitSetup/git-gerrit-push delete mode 100644 test/API/driver/kwsys/GitSetup/git-gitlab-push delete mode 100644 test/API/driver/kwsys/GitSetup/pre-commit delete mode 100644 test/API/driver/kwsys/GitSetup/setup-aliases delete mode 100644 test/API/driver/kwsys/GitSetup/setup-gerrit delete mode 100644 test/API/driver/kwsys/GitSetup/setup-gitlab delete mode 100644 test/API/driver/kwsys/GitSetup/setup-hooks delete mode 100644 test/API/driver/kwsys/GitSetup/setup-ssh delete mode 100644 test/API/driver/kwsys/GitSetup/setup-stage delete mode 100644 test/API/driver/kwsys/GitSetup/setup-upstream delete mode 100644 test/API/driver/kwsys/GitSetup/setup-user delete mode 100644 test/API/driver/kwsys/GitSetup/tips delete mode 100644 test/API/driver/kwsys/Glob.cxx delete mode 100644 test/API/driver/kwsys/Glob.hxx.in delete mode 100644 test/API/driver/kwsys/IOStream.cxx delete mode 100644 test/API/driver/kwsys/IOStream.hxx.in delete mode 100644 test/API/driver/kwsys/MD5.c delete mode 100644 test/API/driver/kwsys/MD5.h.in delete mode 100644 test/API/driver/kwsys/Process.h.in delete mode 100644 test/API/driver/kwsys/ProcessUNIX.c delete mode 100644 test/API/driver/kwsys/ProcessWin32.c delete mode 100644 test/API/driver/kwsys/README.rst delete mode 100644 test/API/driver/kwsys/RegularExpression.cxx delete mode 100644 test/API/driver/kwsys/RegularExpression.hxx.in delete mode 100644 test/API/driver/kwsys/SetupForDevelopment.sh delete mode 100644 test/API/driver/kwsys/SharedForward.h.in delete mode 100644 test/API/driver/kwsys/String.c delete mode 100644 test/API/driver/kwsys/String.h.in delete mode 100644 test/API/driver/kwsys/String.hxx.in delete mode 100644 test/API/driver/kwsys/System.c delete mode 100644 test/API/driver/kwsys/System.h.in delete mode 100644 test/API/driver/kwsys/SystemInformation.cxx delete mode 100644 test/API/driver/kwsys/SystemInformation.hxx.in delete mode 100644 test/API/driver/kwsys/SystemTools.cxx delete mode 100644 test/API/driver/kwsys/SystemTools.hxx.in delete mode 100644 test/API/driver/kwsys/Terminal.c delete mode 100644 test/API/driver/kwsys/Terminal.h.in delete mode 100644 test/API/driver/kwsys/clang-format.bash delete mode 100644 test/API/driver/kwsys/hash_fun.hxx.in delete mode 100644 test/API/driver/kwsys/hash_map.hxx.in delete mode 100644 test/API/driver/kwsys/hash_set.hxx.in delete mode 100644 test/API/driver/kwsys/hashtable.hxx.in delete mode 100644 test/API/driver/kwsys/kwsysHeaderDump.pl delete mode 100644 test/API/driver/kwsys/kwsysPlatformTests.cmake delete mode 100644 test/API/driver/kwsys/kwsysPlatformTestsC.c delete mode 100644 test/API/driver/kwsys/kwsysPlatformTestsCXX.cxx delete mode 100644 test/API/driver/kwsys/kwsysPrivate.h delete mode 100644 test/API/driver/kwsys/testCommandLineArguments.cxx delete mode 100644 test/API/driver/kwsys/testCommandLineArguments1.cxx delete mode 100644 test/API/driver/kwsys/testConfigure.cxx delete mode 100644 test/API/driver/kwsys/testConsoleBuf.cxx delete mode 100644 test/API/driver/kwsys/testConsoleBuf.hxx delete mode 100644 test/API/driver/kwsys/testConsoleBufChild.cxx delete mode 100644 test/API/driver/kwsys/testDirectory.cxx delete mode 100644 test/API/driver/kwsys/testDynamicLoader.cxx delete mode 100644 test/API/driver/kwsys/testDynload.c delete mode 100644 test/API/driver/kwsys/testDynloadImpl.c delete mode 100644 test/API/driver/kwsys/testDynloadImpl.h delete mode 100644 test/API/driver/kwsys/testDynloadUse.c delete mode 100644 test/API/driver/kwsys/testEncode.c delete mode 100644 test/API/driver/kwsys/testEncoding.cxx delete mode 100644 test/API/driver/kwsys/testFStream.cxx delete mode 100644 test/API/driver/kwsys/testFail.c delete mode 100644 test/API/driver/kwsys/testHashSTL.cxx delete mode 100644 test/API/driver/kwsys/testProcess.c delete mode 100644 test/API/driver/kwsys/testSharedForward.c.in delete mode 100644 test/API/driver/kwsys/testSystemInformation.cxx delete mode 100644 test/API/driver/kwsys/testSystemTools.bin delete mode 100644 test/API/driver/kwsys/testSystemTools.cxx delete mode 100644 test/API/driver/kwsys/testSystemTools.h.in delete mode 100644 test/API/driver/kwsys/testTerminal.c delete mode 100644 test/API/driver/kwsys/update-gitsetup.bash delete mode 100644 test/API/driver/kwsys/update-third-party.bash diff --git a/config/cmake/cacheinit.cmake b/config/cmake/cacheinit.cmake index 44608915241..376b28f5c78 100644 --- a/config/cmake/cacheinit.cmake +++ b/config/cmake/cacheinit.cmake @@ -49,6 +49,10 @@ set (HDF5_MINGW_STATIC_GCC_LIBS ON CACHE BOOL "Statically link libgcc/libstdc++" set (HDF5_ALLOW_EXTERNAL_SUPPORT "TGZ" CACHE STRING "Allow External Library Building (NO GIT TGZ)" FORCE) set_property (CACHE HDF5_ALLOW_EXTERNAL_SUPPORT PROPERTY STRINGS NO GIT TGZ) +######################## +# compression options +######################## + set (ZLIB_PACKAGE_NAME "zlib" CACHE STRING "Name of ZLIB package" FORCE) set (ZLIB_TGZ_NAME "ZLib.tar.gz" CACHE STRING "Use HDF5_ZLib from compressed file" FORCE) set (ZLIB_TGZ_ORIGPATH "https://github.com/madler/zlib/releases/download/v1.2.13" CACHE STRING "Use ZLIB from original location" FORCE) @@ -61,6 +65,13 @@ set (LIBAEC_TGZ_ORIGPATH "https://github.com/MathisRosenhauer/libaec/releases/do set (LIBAEC_TGZ_ORIGNAME "libaec-v1.0.6.tar.gz" CACHE STRING "Use LIBAEC from original compressed file" FORCE) set (LIBAEC_USE_LOCALCONTENT OFF CACHE BOOL "Use local file for LIBAEC FetchContent" FORCE) +######################## +# API test options +######################## +set (KWSYS_TGZ_ORIGPATH "https://gitlab.kitware.com/utils/kwsys/-/archive/master/kwsys-master.tar.gz" CACHE STRING "Use KWSYS from original location" FORCE) +set (KWSYS_TGZ_ORIGNAME "kwsys-master.tar.gz" CACHE STRING "Use KWSYS from original compressed file" FORCE) +set (KWSYS_USE_LOCALCONTENT OFF CACHE BOOL "Use local file for KWSYS FetchContent" FORCE) + ######################## # filter plugin options ######################## diff --git a/doxygen/dox/Overview.dox b/doxygen/dox/Overview.dox index fb6231ce36a..32d0deaeaf6 100644 --- a/doxygen/dox/Overview.dox +++ b/doxygen/dox/Overview.dox @@ -23,7 +23,7 @@ documents cover a mix of tasks, concepts, and reference, to help a specific \par Versions Version-specific documentation (see the version in the title area) can be found here: - - HDF5 develop branch (this site) + - HDF5 develop branch (this site) - HDF5 1.14.x - HDF5 1.12.x - HDF5 1.10.x diff --git a/test/API/CMake/CheckAsan.cmake b/test/API/CMake/CheckAsan.cmake deleted file mode 100644 index 32f4b4535cb..00000000000 --- a/test/API/CMake/CheckAsan.cmake +++ /dev/null @@ -1,37 +0,0 @@ -set(ASAN_FLAG "-fsanitize=address") -set(ASAN_C_FLAGS "-O1 -g ${ASAN_FLAG} -fsanitize-address-use-after-scope -fno-omit-frame-pointer -fno-optimize-sibling-calls") -set(ASAN_CXX_FLAGS ${ASAN_C_FLAGS}) - -get_property(ASAN_LANGUAGES GLOBAL PROPERTY ENABLED_LANGUAGES) -foreach(lang ${ASAN_LANGUAGES}) - set(ASAN_${lang}_LANG_ENABLED 1) -endforeach() - -if(ASAN_C_LANG_ENABLED) - include(CheckCCompilerFlag) - set(CMAKE_REQUIRED_LINK_OPTIONS ${ASAN_FLAG}) - check_c_compiler_flag(${ASAN_FLAG} ASAN_C_FLAG_SUPPORTED) - if(NOT ASAN_C_FLAG_SUPPORTED) - message(STATUS "Asan flags are not supported by the C compiler.") - else() - if(NOT CMAKE_C_FLAGS_ASAN) - set(CMAKE_C_FLAGS_ASAN ${ASAN_C_FLAGS} CACHE STRING "Flags used by the C compiler during ASAN builds." FORCE) - endif() - endif() - unset(CMAKE_REQUIRED_LINK_OPTIONS) -endif() - -if(ASAN_CXX_LANG_ENABLED) - include(CheckCXXCompilerFlag) - set(CMAKE_REQUIRED_LINK_OPTIONS ${ASAN_FLAG}) - check_cxx_compiler_flag(${ASAN_FLAG} ASAN_CXX_FLAG_SUPPORTED) - if(NOT ASAN_CXX_FLAG_SUPPORTED) - message(STATUS "Asan flags are not supported by the CXX compiler.") - else() - if(NOT CMAKE_CXX_FLAGS_ASAN) - set(CMAKE_CXX_FLAGS_ASAN ${ASAN_CXX_FLAGS} CACHE STRING "Flags used by the CXX compiler during ASAN builds." FORCE) - endif() - endif() - unset(CMAKE_REQUIRED_LINK_OPTIONS) -endif() - diff --git a/test/API/CMake/CheckUbsan.cmake b/test/API/CMake/CheckUbsan.cmake deleted file mode 100644 index f2b9c2cf0c5..00000000000 --- a/test/API/CMake/CheckUbsan.cmake +++ /dev/null @@ -1,37 +0,0 @@ -set(UBSAN_FLAG "-fsanitize=undefined") -set(UBSAN_C_FLAGS "-O1 -g ${UBSAN_FLAG} -fno-omit-frame-pointer") -set(UBSAN_CXX_FLAGS ${UBSAN_C_FLAGS}) - -get_property(UBSAN_LANGUAGES GLOBAL PROPERTY ENABLED_LANGUAGES) -foreach(lang ${UBSAN_LANGUAGES}) - set(UBSAN_${lang}_LANG_ENABLED 1) -endforeach() - -if(UBSAN_C_LANG_ENABLED) - include(CheckCCompilerFlag) - set(CMAKE_REQUIRED_LINK_OPTIONS ${UBSAN_FLAG}) - check_c_compiler_flag(${UBSAN_FLAG} UBSAN_C_FLAG_SUPPORTED) - if(NOT UBSAN_C_FLAG_SUPPORTED) - message(STATUS "Ubsan flags are not supported by the C compiler.") - else() - if(NOT CMAKE_C_FLAGS_UBSAN) - set(CMAKE_C_FLAGS_UBSAN ${UBSAN_C_FLAGS} CACHE STRING "Flags used by the C compiler during UBSAN builds." FORCE) - endif() - endif() - unset(CMAKE_REQUIRED_LINK_OPTIONS) -endif() - -if(UBSAN_CXX_LANG_ENABLED) - include(CheckCXXCompilerFlag) - set(CMAKE_REQUIRED_LINK_OPTIONS ${UBSAN_FLAG}) - check_cxx_compiler_flag(${UBSAN_FLAG} UBSAN_CXX_FLAG_SUPPORTED) - if(NOT UBSAN_CXX_FLAG_SUPPORTED) - message(STATUS "Ubsan flags are not supported by the CXX compiler.") - else() - if(NOT CMAKE_CXX_FLAGS_UBSAN) - set(CMAKE_CXX_FLAGS_UBSAN ${UBSAN_CXX_FLAGS} CACHE STRING "Flags used by the CXX compiler during UBSAN builds." FORCE) - endif() - endif() - unset(CMAKE_REQUIRED_LINK_OPTIONS) -endif() - diff --git a/test/API/CMakeLists.txt b/test/API/CMakeLists.txt index d189d67d5ac..c2f95bdd4b8 100644 --- a/test/API/CMakeLists.txt +++ b/test/API/CMakeLists.txt @@ -9,26 +9,12 @@ # help@hdfgroup.org. # -#------------------------------------------------------------------------------ -# Set module path -#------------------------------------------------------------------------------ -set(HDF5_TEST_API_CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMake") -set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${HDF5_TEST_API_CMAKE_MODULE_PATH}) - -# TODO: probably not necessary -#------------------------------------------------------------------------------ -# Setup CMake Environment -#------------------------------------------------------------------------------ -if (WIN32) - message("The HDF5 API test suite is currently not supported on this platform." FATAL_ERROR) -endif () +cmake_minimum_required (VERSION 3.18) +project (HDF5_TEST_API C) #------------------------------------------------------------------------------ # Setup testing configuration file #------------------------------------------------------------------------------ -if (HDF5_TEST_PARALLEL) - set (HDF5_TEST_API_HAVE_PARALLEL 1) -endif () if (HDF5_TEST_API_ENABLE_ASYNC) set (H5_API_TEST_HAVE_ASYNC 1) endif () @@ -46,9 +32,27 @@ if (HDF5_TEST_API_ENABLE_DRIVER) endif () #------------------------------------------------------------------------------ -# Setup for API tests +# Define for API tests #------------------------------------------------------------------------------ +set (HDF5_API_TESTS + attribute + dataset + datatype + file + group + link + misc + object +) + +if (HDF5_TEST_API_ENABLE_ASYNC) + set (HDF5_API_TESTS + ${HDF5_API_TESTS} + async + ) +endif () + # Ported HDF5 tests set (HDF5_API_TESTS_EXTRA testhdf5 diff --git a/test/API/driver/CMakeLists.txt b/test/API/driver/CMakeLists.txt index 2210068dc41..23ba0535b0f 100644 --- a/test/API/driver/CMakeLists.txt +++ b/test/API/driver/CMakeLists.txt @@ -1,17 +1,34 @@ -cmake_minimum_required(VERSION 2.8.12.2 FATAL_ERROR) +cmake_minimum_required (VERSION 3.18) project(H5_API_TEST_DRIVER CXX) -include(CheckAsan) -include(CheckUbsan) +if (NOT KWSYS_USE_LOCALCONTENT) + set (KWSYS_URL ${KWSYS_TGZ_ORIGPATH}/${KWSYS_TGZ_ORIGNAME}) +else () + set (KWSYS_URL ${TGZPATH}/${KWSYS_TGZ_ORIGNAME}) +endif () +# Only tgz files +FetchContent_Declare (KWSYS + URL ${KWSYS_URL} + URL_HASH "" +) +FetchContent_GetProperties(KWSYS) +if(NOT kwsys_POPULATED) + FetchContent_Populate(KWSYS) -set(CMAKE_CXX_STANDARD 11) + # Copy an additional/replacement files into the populated source + #file(COPY ${HDF_RESOURCES_DIR}/KWSYS/CMakeLists.txt DESTINATION ${hdf5_kwsys_SOURCE_DIR}) -set(KWSYS_NAMESPACE h5_api_test_sys) -set(KWSYS_USE_SystemTools 1) -set(KWSYS_USE_Process 1) -set(KWSYS_USE_RegularExpression 1) -add_subdirectory(kwsys) -include_directories(${CMAKE_CURRENT_BINARY_DIR}/kwsys) + set(CMAKE_CXX_STANDARD 11) + + set(KWSYS_NAMESPACE h5_api_test_sys) + set(KWSYS_USE_SystemTools 1) + set(KWSYS_USE_Process 1) + set(KWSYS_USE_RegularExpression 1) + + add_subdirectory(${hdf5_kwsysb_SOURCE_DIR} ${hdf5_kwsys_BINARY_DIR}) +endif() + +include_directories(${hdf5_kwsys_BINARY_DIR}) add_executable(h5_api_test_driver h5_api_test_driver.cxx) target_link_libraries(h5_api_test_driver h5_api_test_sys) diff --git a/test/API/driver/kwsys/.clang-format b/test/API/driver/kwsys/.clang-format deleted file mode 100644 index 588b79016b4..00000000000 --- a/test/API/driver/kwsys/.clang-format +++ /dev/null @@ -1,22 +0,0 @@ ---- -# This configuration requires clang-format version 6.0 exactly. -BasedOnStyle: Mozilla -AlignOperands: false -AllowShortFunctionsOnASingleLine: InlineOnly -AlwaysBreakAfterDefinitionReturnType: None -AlwaysBreakAfterReturnType: None -BinPackArguments: true -BinPackParameters: true -BraceWrapping: - AfterClass: true - AfterEnum: true - AfterFunction: true - AfterStruct: true - AfterUnion: true -BreakBeforeBraces: Custom -ColumnLimit: 79 -IndentPPDirectives: AfterHash -SortUsingDeclarations: false -SpaceAfterTemplateKeyword: true -Standard: Cpp03 -... diff --git a/test/API/driver/kwsys/.hooks-config b/test/API/driver/kwsys/.hooks-config deleted file mode 100644 index 739cdd268bb..00000000000 --- a/test/API/driver/kwsys/.hooks-config +++ /dev/null @@ -1,2 +0,0 @@ -[hooks "chain"] - pre-commit = GitSetup/pre-commit diff --git a/test/API/driver/kwsys/Base64.c b/test/API/driver/kwsys/Base64.c deleted file mode 100644 index bf876f2d5e1..00000000000 --- a/test/API/driver/kwsys/Base64.c +++ /dev/null @@ -1,225 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" -#include KWSYS_HEADER(Base64.h) - -/* Work-around CMake dependency scanning limitation. This must - duplicate the above list of headers. */ -#if 0 -# include "Base64.h.in" -#endif - -static const unsigned char kwsysBase64EncodeTable[65] = - "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - "abcdefghijklmnopqrstuvwxyz" - "0123456789+/"; - -static const unsigned char kwsysBase64DecodeTable[256] = { - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0x3E, 0xFF, 0xFF, 0xFF, 0x3F, 0x34, 0x35, 0x36, 0x37, - 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, - 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, - 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, - 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - /*------------------------------------*/ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF -}; - -static unsigned char kwsysBase64EncodeChar(int c) -{ - return kwsysBase64EncodeTable[(unsigned char)c]; -} - -static unsigned char kwsysBase64DecodeChar(unsigned char c) -{ - return kwsysBase64DecodeTable[c]; -} - -/* Encode 3 bytes into a 4 byte string. */ -void kwsysBase64_Encode3(const unsigned char* src, unsigned char* dest) -{ - dest[0] = kwsysBase64EncodeChar((src[0] >> 2) & 0x3F); - dest[1] = - kwsysBase64EncodeChar(((src[0] << 4) & 0x30) | ((src[1] >> 4) & 0x0F)); - dest[2] = - kwsysBase64EncodeChar(((src[1] << 2) & 0x3C) | ((src[2] >> 6) & 0x03)); - dest[3] = kwsysBase64EncodeChar(src[2] & 0x3F); -} - -/* Encode 2 bytes into a 4 byte string. */ -void kwsysBase64_Encode2(const unsigned char* src, unsigned char* dest) -{ - dest[0] = kwsysBase64EncodeChar((src[0] >> 2) & 0x3F); - dest[1] = - kwsysBase64EncodeChar(((src[0] << 4) & 0x30) | ((src[1] >> 4) & 0x0F)); - dest[2] = kwsysBase64EncodeChar(((src[1] << 2) & 0x3C)); - dest[3] = '='; -} - -/* Encode 1 bytes into a 4 byte string. */ -void kwsysBase64_Encode1(const unsigned char* src, unsigned char* dest) -{ - dest[0] = kwsysBase64EncodeChar((src[0] >> 2) & 0x3F); - dest[1] = kwsysBase64EncodeChar(((src[0] << 4) & 0x30)); - dest[2] = '='; - dest[3] = '='; -} - -/* Encode 'length' bytes from the input buffer and store the - encoded stream into the output buffer. Return the length of the encoded - buffer (output). Note that the output buffer must be allocated by the caller - (length * 1.5 should be a safe estimate). If 'mark_end' is true than an - extra set of 4 bytes is added to the end of the stream if the input is a - multiple of 3 bytes. These bytes are invalid chars and therefore they will - stop the decoder thus enabling the caller to decode a stream without - actually knowing how much data to expect (if the input is not a multiple of - 3 bytes then the extra padding needed to complete the encode 4 bytes will - stop the decoding anyway). */ -size_t kwsysBase64_Encode(const unsigned char* input, size_t length, - unsigned char* output, int mark_end) -{ - const unsigned char* ptr = input; - const unsigned char* end = input + length; - unsigned char* optr = output; - - /* Encode complete triplet */ - - while ((end - ptr) >= 3) { - kwsysBase64_Encode3(ptr, optr); - ptr += 3; - optr += 4; - } - - /* Encodes a 2-byte ending into 3 bytes and 1 pad byte and writes. */ - - if (end - ptr == 2) { - kwsysBase64_Encode2(ptr, optr); - optr += 4; - } - - /* Encodes a 1-byte ending into 2 bytes and 2 pad bytes */ - - else if (end - ptr == 1) { - kwsysBase64_Encode1(ptr, optr); - optr += 4; - } - - /* Do we need to mark the end */ - - else if (mark_end) { - optr[0] = optr[1] = optr[2] = optr[3] = '='; - optr += 4; - } - - return (size_t)(optr - output); -} - -/* Decode 4 bytes into a 3 byte string. */ -int kwsysBase64_Decode3(const unsigned char* src, unsigned char* dest) -{ - unsigned char d0, d1, d2, d3; - - d0 = kwsysBase64DecodeChar(src[0]); - d1 = kwsysBase64DecodeChar(src[1]); - d2 = kwsysBase64DecodeChar(src[2]); - d3 = kwsysBase64DecodeChar(src[3]); - - /* Make sure all characters were valid */ - - if (d0 == 0xFF || d1 == 0xFF || d2 == 0xFF || d3 == 0xFF) { - return 0; - } - - /* Decode the 3 bytes */ - - dest[0] = (unsigned char)(((d0 << 2) & 0xFC) | ((d1 >> 4) & 0x03)); - dest[1] = (unsigned char)(((d1 << 4) & 0xF0) | ((d2 >> 2) & 0x0F)); - dest[2] = (unsigned char)(((d2 << 6) & 0xC0) | ((d3 >> 0) & 0x3F)); - - /* Return the number of bytes actually decoded */ - - if (src[2] == '=') { - return 1; - } - if (src[3] == '=') { - return 2; - } - return 3; -} - -/* Decode bytes from the input buffer and store the decoded stream - into the output buffer until 'length' bytes have been decoded. Return the - real length of the decoded stream (which should be equal to 'length'). Note - that the output buffer must be allocated by the caller. If - 'max_input_length' is not null, then it specifies the number of encoded - bytes that should be at most read from the input buffer. In that case the - 'length' parameter is ignored. This enables the caller to decode a stream - without actually knowing how much decoded data to expect (of course, the - buffer must be large enough). */ -size_t kwsysBase64_Decode(const unsigned char* input, size_t length, - unsigned char* output, size_t max_input_length) -{ - const unsigned char* ptr = input; - unsigned char* optr = output; - - /* Decode complete triplet */ - - if (max_input_length) { - const unsigned char* end = input + max_input_length; - while (ptr < end) { - int len = kwsysBase64_Decode3(ptr, optr); - optr += len; - if (len < 3) { - return (size_t)(optr - output); - } - ptr += 4; - } - } else { - unsigned char* oend = output + length; - while ((oend - optr) >= 3) { - int len = kwsysBase64_Decode3(ptr, optr); - optr += len; - if (len < 3) { - return (size_t)(optr - output); - } - ptr += 4; - } - - /* Decode the last triplet */ - - if (oend - optr == 2) { - unsigned char temp[3]; - int len = kwsysBase64_Decode3(ptr, temp); - if (len >= 2) { - optr[0] = temp[0]; - optr[1] = temp[1]; - optr += 2; - } else if (len > 0) { - optr[0] = temp[0]; - optr += 1; - } - } else if (oend - optr == 1) { - unsigned char temp[3]; - int len = kwsysBase64_Decode3(ptr, temp); - if (len > 0) { - optr[0] = temp[0]; - optr += 1; - } - } - } - - return (size_t)(optr - output); -} diff --git a/test/API/driver/kwsys/Base64.h.in b/test/API/driver/kwsys/Base64.h.in deleted file mode 100644 index 729f9729782..00000000000 --- a/test/API/driver/kwsys/Base64.h.in +++ /dev/null @@ -1,110 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifndef @KWSYS_NAMESPACE@_Base64_h -#define @KWSYS_NAMESPACE@_Base64_h - -#include <@KWSYS_NAMESPACE@/Configure.h> - -#include /* size_t */ - -/* Redefine all public interface symbol names to be in the proper - namespace. These macros are used internally to kwsys only, and are - not visible to user code. Use kwsysHeaderDump.pl to reproduce - these macros after making changes to the interface. */ -#if !defined(KWSYS_NAMESPACE) -# define kwsys_ns(x) @KWSYS_NAMESPACE@##x -# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT -#endif -#if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS -# define kwsysBase64 kwsys_ns(Base64) -# define kwsysBase64_Decode kwsys_ns(Base64_Decode) -# define kwsysBase64_Decode3 kwsys_ns(Base64_Decode3) -# define kwsysBase64_Encode kwsys_ns(Base64_Encode) -# define kwsysBase64_Encode1 kwsys_ns(Base64_Encode1) -# define kwsysBase64_Encode2 kwsys_ns(Base64_Encode2) -# define kwsysBase64_Encode3 kwsys_ns(Base64_Encode3) -#endif - -#if defined(__cplusplus) -extern "C" { -#endif - -/** - * Encode 3 bytes into a 4 byte string. - */ -kwsysEXPORT void kwsysBase64_Encode3(const unsigned char* src, - unsigned char* dest); - -/** - * Encode 2 bytes into a 4 byte string. - */ -kwsysEXPORT void kwsysBase64_Encode2(const unsigned char* src, - unsigned char* dest); - -/** - * Encode 1 bytes into a 4 byte string. - */ -kwsysEXPORT void kwsysBase64_Encode1(const unsigned char* src, - unsigned char* dest); - -/** - * Encode 'length' bytes from the input buffer and store the encoded - * stream into the output buffer. Return the length of the encoded - * buffer (output). Note that the output buffer must be allocated by - * the caller (length * 1.5 should be a safe estimate). If 'mark_end' - * is true than an extra set of 4 bytes is added to the end of the - * stream if the input is a multiple of 3 bytes. These bytes are - * invalid chars and therefore they will stop the decoder thus - * enabling the caller to decode a stream without actually knowing how - * much data to expect (if the input is not a multiple of 3 bytes then - * the extra padding needed to complete the encode 4 bytes will stop - * the decoding anyway). - */ -kwsysEXPORT size_t kwsysBase64_Encode(const unsigned char* input, - size_t length, unsigned char* output, - int mark_end); - -/** - * Decode 4 bytes into a 3 byte string. Returns the number of bytes - * actually decoded. - */ -kwsysEXPORT int kwsysBase64_Decode3(const unsigned char* src, - unsigned char* dest); - -/** - * Decode bytes from the input buffer and store the decoded stream - * into the output buffer until 'length' bytes have been decoded. - * Return the real length of the decoded stream (which should be equal - * to 'length'). Note that the output buffer must be allocated by the - * caller. If 'max_input_length' is not null, then it specifies the - * number of encoded bytes that should be at most read from the input - * buffer. In that case the 'length' parameter is ignored. This - * enables the caller to decode a stream without actually knowing how - * much decoded data to expect (of course, the buffer must be large - * enough). - */ -kwsysEXPORT size_t kwsysBase64_Decode(const unsigned char* input, - size_t length, unsigned char* output, - size_t max_input_length); - -#if defined(__cplusplus) -} /* extern "C" */ -#endif - -/* If we are building a kwsys .c or .cxx file, let it use these macros. - Otherwise, undefine them to keep the namespace clean. */ -#if !defined(KWSYS_NAMESPACE) -# undef kwsys_ns -# undef kwsysEXPORT -# if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS -# undef kwsysBase64 -# undef kwsysBase64_Decode -# undef kwsysBase64_Decode3 -# undef kwsysBase64_Encode -# undef kwsysBase64_Encode1 -# undef kwsysBase64_Encode2 -# undef kwsysBase64_Encode3 -# endif -#endif - -#endif diff --git a/test/API/driver/kwsys/CMakeLists.txt b/test/API/driver/kwsys/CMakeLists.txt deleted file mode 100644 index 09bcdb9430f..00000000000 --- a/test/API/driver/kwsys/CMakeLists.txt +++ /dev/null @@ -1,1260 +0,0 @@ -# Distributed under the OSI-approved BSD 3-Clause License. See accompanying -# file Copyright.txt or https://cmake.org/licensing#kwsys for details. - -# The Kitware System Library is intended to be included in other -# projects. It is completely configurable in that the library's -# namespace can be configured and the components that are included can -# be selected invididually. - -# Typical usage is to import the kwsys directory tree into a -# subdirectory under a parent project and enable the classes that will -# be used. All classes are disabled by default. The CMake listfile -# above this one configures the library as follows: -# -# SET(KWSYS_NAMESPACE foosys) -# SET(KWSYS_USE_Directory 1) # Enable Directory class. -# SUBDIRS(kwsys) -# -# Optional settings are as follows: -# -# KWSYS_HEADER_ROOT = The directory into which to generate the kwsys headers. -# A directory called "${KWSYS_NAMESPACE}" will be -# created under this root directory to hold the files. -# KWSYS_SPLIT_OBJECTS_FROM_INTERFACE -# = Instead of creating a single ${KWSYS_NAMESPACE} library -# target, create three separate targets: -# ${KWSYS_NAMESPACE} -# - An INTERFACE library only containing usage -# requirements. -# ${KWSYS_NAMESPACE}_objects -# - An OBJECT library for the built kwsys objects. -# Note: This is omitted from the install rules -# ${KWSYS_NAMESPACE}_private -# - An INTERFACE library combining both that is -# appropriate for use with PRIVATE linking in -# target_link_libraries. Because of how interface -# properties propagate, this target is not suitable -# for use with PUBLIC or INTERFACE linking. -# KWSYS_ALIAS_TARGET = The name of an alias target to create to the actual target. -# -# Example: -# -# SET(KWSYS_HEADER_ROOT ${PROJECT_BINARY_DIR}) -# INCLUDE_DIRECTORIES(${PROJECT_BINARY_DIR}) -# -# KWSYS_CXX_STANDARD = A value for CMAKE_CXX_STANDARD within KWSys. -# Set to empty string to use no default value. -# KWSYS_CXX_COMPILE_FEATURES = target_compile_features arguments for KWSys. -# -# Optional settings to setup install rules are as follows: -# -# KWSYS_INSTALL_BIN_DIR = The installation target directories into -# KWSYS_INSTALL_LIB_DIR which the libraries and headers from -# KWSYS_INSTALL_INCLUDE_DIR kwsys should be installed by a "make install". -# The values should be specified relative to -# the installation prefix and NOT start with '/'. -# KWSYS_INSTALL_DOC_DIR = The installation target directory for documentation -# such as copyright information. -# -# KWSYS_INSTALL_COMPONENT_NAME_RUNTIME = Name of runtime and development -# KWSYS_INSTALL_COMPONENT_NAME_DEVELOPMENT installation components. -# If not given the install rules -# will not be in any component. -# -# KWSYS_INSTALL_EXPORT_NAME = The EXPORT option value for install(TARGETS) calls. -# -# Example: -# -# SET(KWSYS_INSTALL_BIN_DIR bin) -# SET(KWSYS_INSTALL_LIB_DIR lib) -# SET(KWSYS_INSTALL_INCLUDE_DIR include) -# SET(KWSYS_INSTALL_COMPONENT_NAME_RUNTIME Runtime) -# SET(KWSYS_INSTALL_COMPONENT_NAME_DEVELOPMENT Development) - -# Once configured, kwsys should be used as follows from C or C++ code: -# -# #include -# ... -# foosys::Directory directory; -# - -# NOTE: This library is intended for internal use by Kitware-driven -# projects. In order to keep it simple no attempt will be made to -# maintain backward compatibility when changes are made to KWSys. -# When an incompatible change is made Kitware's projects that use -# KWSys will be fixed, but no notification will necessarily be sent to -# any outside mailing list and no documentation of the change will be -# written. - -CMAKE_MINIMUM_REQUIRED(VERSION 3.1 FATAL_ERROR) -FOREACH(p - CMP0056 # CMake 3.2, Honor link flags in try_compile() source-file signature. - CMP0063 # CMake 3.3, Honor visibility properties for all target types. - CMP0067 # CMake 3.8, Honor language standard in try_compile source-file signature. - CMP0069 # CMake 3.9, INTERPROCEDURAL_OPTIMIZATION is enforced when enabled. - ) - IF(POLICY ${p}) - CMAKE_POLICY(SET ${p} NEW) - ENDIF() -ENDFOREACH() - -#----------------------------------------------------------------------------- -# If a namespace is not specified, use "kwsys" and enable testing. -# This should be the case only when kwsys is not included inside -# another project and is being tested. -IF(NOT KWSYS_NAMESPACE) - SET(KWSYS_NAMESPACE "kwsys") - SET(KWSYS_STANDALONE 1) -ENDIF() - -#----------------------------------------------------------------------------- -# The project name is that of the specified namespace. -PROJECT(${KWSYS_NAMESPACE}) - -# Tell CMake how to follow dependencies of sources in this directory. -SET_PROPERTY(DIRECTORY - PROPERTY IMPLICIT_DEPENDS_INCLUDE_TRANSFORM - "KWSYS_HEADER(%)=<${KWSYS_NAMESPACE}/%>" - ) - -if(KWSYS_CXX_STANDARD) - set(CMAKE_CXX_STANDARD "${KWSYS_CXX_STANDARD}") -elseif(NOT DEFINED CMAKE_CXX_STANDARD AND NOT DEFINED KWSYS_CXX_STANDARD) - if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang" - AND CMAKE_CXX_SIMULATE_ID STREQUAL "MSVC" - AND CMAKE_CXX_COMPILER_FRONTEND_VARIANT STREQUAL "GNU" - ) - set(CMAKE_CXX_STANDARD 14) - else() - set(CMAKE_CXX_STANDARD 11) - endif() -endif() - -# Select library components. -IF(KWSYS_STANDALONE OR CMake_SOURCE_DIR) - SET(KWSYS_ENABLE_C 1) - # Enable all components. - SET(KWSYS_USE_Base64 1) - SET(KWSYS_USE_Directory 1) - SET(KWSYS_USE_DynamicLoader 1) - SET(KWSYS_USE_Encoding 1) - SET(KWSYS_USE_Glob 1) - SET(KWSYS_USE_MD5 1) - SET(KWSYS_USE_Process 1) - SET(KWSYS_USE_RegularExpression 1) - SET(KWSYS_USE_System 1) - SET(KWSYS_USE_SystemTools 1) - SET(KWSYS_USE_CommandLineArguments 1) - SET(KWSYS_USE_Terminal 1) - SET(KWSYS_USE_IOStream 1) - SET(KWSYS_USE_FStream 1) - SET(KWSYS_USE_String 1) - SET(KWSYS_USE_SystemInformation 1) - SET(KWSYS_USE_ConsoleBuf 1) -ENDIF() - -# Enforce component dependencies. -IF(KWSYS_USE_SystemTools) - SET(KWSYS_USE_Directory 1) - SET(KWSYS_USE_FStream 1) - SET(KWSYS_USE_Encoding 1) -ENDIF() -IF(KWSYS_USE_Glob) - SET(KWSYS_USE_Directory 1) - SET(KWSYS_USE_SystemTools 1) - SET(KWSYS_USE_RegularExpression 1) - SET(KWSYS_USE_FStream 1) - SET(KWSYS_USE_Encoding 1) -ENDIF() -IF(KWSYS_USE_Process) - SET(KWSYS_USE_System 1) - SET(KWSYS_USE_Encoding 1) -ENDIF() -IF(KWSYS_USE_SystemInformation) - SET(KWSYS_USE_Process 1) -ENDIF() -IF(KWSYS_USE_System) - SET(KWSYS_USE_Encoding 1) -ENDIF() -IF(KWSYS_USE_Directory) - SET(KWSYS_USE_Encoding 1) -ENDIF() -IF(KWSYS_USE_DynamicLoader) - SET(KWSYS_USE_Encoding 1) -ENDIF() -IF(KWSYS_USE_FStream) - SET(KWSYS_USE_Encoding 1) -ENDIF() -IF(KWSYS_USE_ConsoleBuf) - SET(KWSYS_USE_Encoding 1) -ENDIF() - -# Specify default 8 bit encoding for Windows -IF(NOT KWSYS_ENCODING_DEFAULT_CODEPAGE) - SET(KWSYS_ENCODING_DEFAULT_CODEPAGE CP_ACP) -ENDIF() - -# Enable testing if building standalone. -IF(KWSYS_STANDALONE) - INCLUDE(Dart) - MARK_AS_ADVANCED(BUILD_TESTING DART_ROOT TCL_TCLSH) - IF(BUILD_TESTING) - ENABLE_TESTING() - ENDIF() -ENDIF() - -# Choose default shared/static build if not specified. -IF(NOT DEFINED KWSYS_BUILD_SHARED) - SET(KWSYS_BUILD_SHARED ${BUILD_SHARED_LIBS}) -ENDIF() - -# Include helper macros. -INCLUDE(${CMAKE_CURRENT_SOURCE_DIR}/kwsysPlatformTests.cmake) -INCLUDE(CheckTypeSize) - -# Do full dependency headers. -INCLUDE_REGULAR_EXPRESSION("^.*$") - -# Use new KWSYS_INSTALL_*_DIR variable names to control installation. -# Take defaults from the old names. Note that there was no old name -# for the bin dir, so we take the old lib dir name so DLLs will be -# installed in a compatible way for old code. -IF(NOT KWSYS_INSTALL_INCLUDE_DIR) - STRING(REGEX REPLACE "^/" "" KWSYS_INSTALL_INCLUDE_DIR - "${KWSYS_HEADER_INSTALL_DIR}") -ENDIF() -IF(NOT KWSYS_INSTALL_LIB_DIR) - STRING(REGEX REPLACE "^/" "" KWSYS_INSTALL_LIB_DIR - "${KWSYS_LIBRARY_INSTALL_DIR}") -ENDIF() -IF(NOT KWSYS_INSTALL_BIN_DIR) - STRING(REGEX REPLACE "^/" "" KWSYS_INSTALL_BIN_DIR - "${KWSYS_LIBRARY_INSTALL_DIR}") -ENDIF() - -# Setup header install rules. -SET(KWSYS_INSTALL_INCLUDE_OPTIONS) -IF(KWSYS_INSTALL_COMPONENT_NAME_DEVELOPMENT) - SET(KWSYS_INSTALL_INCLUDE_OPTIONS ${KWSYS_INSTALL_INCLUDE_OPTIONS} - COMPONENT ${KWSYS_INSTALL_COMPONENT_NAME_DEVELOPMENT} - ) -ENDIF() - -# Setup library install rules. -SET(KWSYS_INSTALL_LIBRARY_RULE) -SET(KWSYS_INSTALL_NAMELINK_RULE) -IF(KWSYS_INSTALL_LIB_DIR) - IF(KWSYS_INSTALL_EXPORT_NAME) - LIST(APPEND KWSYS_INSTALL_LIBRARY_RULE EXPORT ${KWSYS_INSTALL_EXPORT_NAME}) - ENDIF() - # Install the shared library to the lib directory. - SET(KWSYS_INSTALL_LIBRARY_RULE ${KWSYS_INSTALL_LIBRARY_RULE} - LIBRARY DESTINATION ${KWSYS_INSTALL_LIB_DIR} NAMELINK_SKIP - ) - # Assign the shared library to the runtime component. - IF(KWSYS_INSTALL_COMPONENT_NAME_RUNTIME) - SET(KWSYS_INSTALL_LIBRARY_RULE ${KWSYS_INSTALL_LIBRARY_RULE} - COMPONENT ${KWSYS_INSTALL_COMPONENT_NAME_RUNTIME} - ) - ENDIF() - IF(KWSYS_BUILD_SHARED) - SET(KWSYS_INSTALL_NAMELINK_RULE ${KWSYS_INSTALL_NAMELINK_RULE} - LIBRARY DESTINATION ${KWSYS_INSTALL_LIB_DIR} NAMELINK_ONLY - ) - # Assign the namelink to the development component. - IF(KWSYS_INSTALL_COMPONENT_NAME_DEVELOPMENT) - SET(KWSYS_INSTALL_NAMELINK_RULE ${KWSYS_INSTALL_NAMELINK_RULE} - COMPONENT ${KWSYS_INSTALL_COMPONENT_NAME_DEVELOPMENT} - ) - ENDIF() - ENDIF() - - # Install the archive to the lib directory. - SET(KWSYS_INSTALL_LIBRARY_RULE ${KWSYS_INSTALL_LIBRARY_RULE} - ARCHIVE DESTINATION ${KWSYS_INSTALL_LIB_DIR} - ) - # Assign the archive to the development component. - IF(KWSYS_INSTALL_COMPONENT_NAME_DEVELOPMENT) - SET(KWSYS_INSTALL_LIBRARY_RULE ${KWSYS_INSTALL_LIBRARY_RULE} - COMPONENT ${KWSYS_INSTALL_COMPONENT_NAME_DEVELOPMENT} - ) - ENDIF() -ENDIF() -IF(KWSYS_INSTALL_BIN_DIR) - # Install the runtime library to the bin directory. - SET(KWSYS_INSTALL_LIBRARY_RULE ${KWSYS_INSTALL_LIBRARY_RULE} - RUNTIME DESTINATION ${KWSYS_INSTALL_BIN_DIR} - ) - # Assign the runtime library to the runtime component. - IF(KWSYS_INSTALL_COMPONENT_NAME_RUNTIME) - SET(KWSYS_INSTALL_LIBRARY_RULE ${KWSYS_INSTALL_LIBRARY_RULE} - COMPONENT ${KWSYS_INSTALL_COMPONENT_NAME_RUNTIME} - ) - ENDIF() -ENDIF() - -# Do not support old KWSYS_*a_INSTALL_DIR variable names. -SET(KWSYS_HEADER_INSTALL_DIR) -SET(KWSYS_LIBRARY_INSTALL_DIR) - -# Generated source files will need this header. -STRING(COMPARE EQUAL "${PROJECT_SOURCE_DIR}" "${PROJECT_BINARY_DIR}" - KWSYS_IN_SOURCE_BUILD) -IF(NOT KWSYS_IN_SOURCE_BUILD) - CONFIGURE_FILE(${PROJECT_SOURCE_DIR}/kwsysPrivate.h - ${PROJECT_BINARY_DIR}/kwsysPrivate.h COPYONLY IMMEDIATE) -ENDIF() - -# Select plugin module file name convention. -IF(NOT KWSYS_DynamicLoader_PREFIX) - SET(KWSYS_DynamicLoader_PREFIX ${CMAKE_SHARED_MODULE_PREFIX}) -ENDIF() -IF(NOT KWSYS_DynamicLoader_SUFFIX) - SET(KWSYS_DynamicLoader_SUFFIX ${CMAKE_SHARED_MODULE_SUFFIX}) -ENDIF() - -#----------------------------------------------------------------------------- -# We require ANSI support from the C compiler. Add any needed flags. -IF(CMAKE_ANSI_CFLAGS) - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_ANSI_CFLAGS}") -ENDIF() - -#----------------------------------------------------------------------------- -# Adjust compiler flags for some platforms. -IF(NOT CMAKE_COMPILER_IS_GNUCXX) - IF(CMAKE_SYSTEM MATCHES "OSF1-V.*") - STRING(REGEX MATCH "-timplicit_local" - KWSYS_CXX_FLAGS_HAVE_IMPLICIT_LOCAL "${CMAKE_CXX_FLAGS}") - STRING(REGEX MATCH "-no_implicit_include" - KWSYS_CXX_FLAGS_HAVE_NO_IMPLICIT_INCLUDE "${CMAKE_CXX_FLAGS}") - IF(NOT KWSYS_CXX_FLAGS_HAVE_IMPLICIT_LOCAL) - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -timplicit_local") - ENDIF() - IF(NOT KWSYS_CXX_FLAGS_HAVE_NO_IMPLICIT_INCLUDE) - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -no_implicit_include") - ENDIF() - ENDIF() - IF(CMAKE_SYSTEM MATCHES "HP-UX") - SET(KWSYS_PLATFORM_CXX_TEST_EXTRA_FLAGS "+p") - IF(CMAKE_CXX_COMPILER_ID MATCHES "HP") - # it is known that version 3.85 fails and 6.25 works without these flags - IF(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4) - # use new C++ library and improved template support - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -AA +hpxstd98") - ENDIF() - ENDIF() - ENDIF() -ENDIF() -IF(KWSYS_STANDALONE) - IF(CMAKE_CXX_COMPILER_ID STREQUAL SunPro) - IF(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.13) - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++03") - ELSE() - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -library=stlport4") - ENDIF() - ENDIF() -ENDIF() - -#----------------------------------------------------------------------------- -# Configure the standard library header wrappers based on compiler's -# capabilities and parent project's request. Enforce 0/1 as only -# possible values for configuration into Configure.hxx. - -# Check existence and uniqueness of long long and __int64. -KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_LONG_LONG - "Checking whether C++ compiler has 'long long'" DIRECT) -KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS___INT64 - "Checking whether C++ compiler has '__int64'" DIRECT) -IF(KWSYS_CXX_HAS___INT64) - KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_SAME_LONG_AND___INT64 - "Checking whether long and __int64 are the same type" DIRECT) - IF(KWSYS_CXX_HAS_LONG_LONG) - KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_SAME_LONG_LONG_AND___INT64 - "Checking whether long long and __int64 are the same type" DIRECT) - ENDIF() -ENDIF() - -# Enable the "long long" type if it is available. It is standard in -# C99 and C++03 but not in earlier standards. -IF(KWSYS_CXX_HAS_LONG_LONG) - SET(KWSYS_USE_LONG_LONG 1) -ELSE() - SET(KWSYS_USE_LONG_LONG 0) -ENDIF() - -# Enable the "__int64" type if it is available and unique. It is not -# standard. -SET(KWSYS_USE___INT64 0) -IF(KWSYS_CXX_HAS___INT64) - IF(NOT KWSYS_CXX_SAME_LONG_AND___INT64) - IF(NOT KWSYS_CXX_SAME_LONG_LONG_AND___INT64) - SET(KWSYS_USE___INT64 1) - ENDIF() - ENDIF() -ENDIF() - -IF(KWSYS_USE_Encoding) - # Look for type size helper macros. - KWSYS_PLATFORM_CXX_TEST(KWSYS_STL_HAS_WSTRING - "Checking whether wstring is available" DIRECT) -ENDIF() - -IF(KWSYS_USE_IOStream) - # Determine whether iostreams support long long. - IF(KWSYS_CXX_HAS_LONG_LONG) - KWSYS_PLATFORM_CXX_TEST(KWSYS_IOS_HAS_ISTREAM_LONG_LONG - "Checking if istream supports long long" DIRECT) - KWSYS_PLATFORM_CXX_TEST(KWSYS_IOS_HAS_OSTREAM_LONG_LONG - "Checking if ostream supports long long" DIRECT) - ELSE() - SET(KWSYS_IOS_HAS_ISTREAM_LONG_LONG 0) - SET(KWSYS_IOS_HAS_OSTREAM_LONG_LONG 0) - ENDIF() - IF(KWSYS_CXX_HAS___INT64) - KWSYS_PLATFORM_CXX_TEST(KWSYS_IOS_HAS_ISTREAM___INT64 - "Checking if istream supports __int64" DIRECT) - KWSYS_PLATFORM_CXX_TEST(KWSYS_IOS_HAS_OSTREAM___INT64 - "Checking if ostream supports __int64" DIRECT) - ELSE() - SET(KWSYS_IOS_HAS_ISTREAM___INT64 0) - SET(KWSYS_IOS_HAS_OSTREAM___INT64 0) - ENDIF() -ENDIF() - -IF(KWSYS_NAMESPACE MATCHES "^kwsys$") - SET(KWSYS_NAME_IS_KWSYS 1) -ELSE() - SET(KWSYS_NAME_IS_KWSYS 0) -ENDIF() - -IF(KWSYS_BUILD_SHARED) - SET(KWSYS_BUILD_SHARED 1) - SET(KWSYS_LIBRARY_TYPE SHARED) -ELSE() - SET(KWSYS_BUILD_SHARED 0) - SET(KWSYS_LIBRARY_TYPE STATIC) -ENDIF() - -if(NOT DEFINED KWSYS_BUILD_PIC) - set(KWSYS_BUILD_PIC 0) -endif() - -#----------------------------------------------------------------------------- -# Configure some implementation details. - -KWSYS_PLATFORM_C_TEST(KWSYS_C_HAS_PTRDIFF_T - "Checking whether C compiler has ptrdiff_t in stddef.h" DIRECT) -KWSYS_PLATFORM_C_TEST(KWSYS_C_HAS_SSIZE_T - "Checking whether C compiler has ssize_t in unistd.h" DIRECT) -IF(KWSYS_USE_Process) - KWSYS_PLATFORM_C_TEST(KWSYS_C_HAS_CLOCK_GETTIME_MONOTONIC - "Checking whether C compiler has clock_gettime" DIRECT) -ENDIF() - -SET_SOURCE_FILES_PROPERTIES(ProcessUNIX.c System.c PROPERTIES - COMPILE_FLAGS "-DKWSYS_C_HAS_PTRDIFF_T=${KWSYS_C_HAS_PTRDIFF_T} -DKWSYS_C_HAS_SSIZE_T=${KWSYS_C_HAS_SSIZE_T} -DKWSYS_C_HAS_CLOCK_GETTIME_MONOTONIC=${KWSYS_C_HAS_CLOCK_GETTIME_MONOTONIC}" - ) - -IF(DEFINED KWSYS_PROCESS_USE_SELECT) - GET_PROPERTY(ProcessUNIX_FLAGS SOURCE ProcessUNIX.c PROPERTY COMPILE_FLAGS) - SET_PROPERTY(SOURCE ProcessUNIX.c PROPERTY COMPILE_FLAGS "${ProcessUNIX_FLAGS} -DKWSYSPE_USE_SELECT=${KWSYSPE_USE_SELECT}") -ENDIF() - -IF(KWSYS_USE_DynamicLoader) - GET_PROPERTY(KWSYS_SUPPORTS_SHARED_LIBS GLOBAL PROPERTY TARGET_SUPPORTS_SHARED_LIBS) - IF(KWSYS_SUPPORTS_SHARED_LIBS) - SET(KWSYS_SUPPORTS_SHARED_LIBS 1) - ELSE() - SET(KWSYS_SUPPORTS_SHARED_LIBS 0) - ENDIF() - SET_PROPERTY(SOURCE DynamicLoader.cxx APPEND PROPERTY COMPILE_DEFINITIONS - KWSYS_SUPPORTS_SHARED_LIBS=${KWSYS_SUPPORTS_SHARED_LIBS}) -ENDIF() - -IF(KWSYS_USE_SystemTools) - if (NOT DEFINED KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP) - set(KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP 1) - endif () - if (KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP) - set(KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP 1) - else () - set(KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP 0) - endif () - KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_SETENV - "Checking whether CXX compiler has setenv" DIRECT) - KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_UNSETENV - "Checking whether CXX compiler has unsetenv" DIRECT) - KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_ENVIRON_IN_STDLIB_H - "Checking whether CXX compiler has environ in stdlib.h" DIRECT) - KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_UTIMES - "Checking whether CXX compiler has utimes" DIRECT) - KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_UTIMENSAT - "Checking whether CXX compiler has utimensat" DIRECT) - KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_STAT_HAS_ST_MTIM - "Checking whether CXX compiler struct stat has st_mtim member" DIRECT) - KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_STAT_HAS_ST_MTIMESPEC - "Checking whether CXX compiler struct stat has st_mtimespec member" DIRECT) - SET_PROPERTY(SOURCE SystemTools.cxx APPEND PROPERTY COMPILE_DEFINITIONS - KWSYS_CXX_HAS_SETENV=${KWSYS_CXX_HAS_SETENV} - KWSYS_CXX_HAS_UNSETENV=${KWSYS_CXX_HAS_UNSETENV} - KWSYS_CXX_HAS_ENVIRON_IN_STDLIB_H=${KWSYS_CXX_HAS_ENVIRON_IN_STDLIB_H} - KWSYS_CXX_HAS_UTIMES=${KWSYS_CXX_HAS_UTIMES} - KWSYS_CXX_HAS_UTIMENSAT=${KWSYS_CXX_HAS_UTIMENSAT} - KWSYS_CXX_STAT_HAS_ST_MTIM=${KWSYS_CXX_STAT_HAS_ST_MTIM} - KWSYS_CXX_STAT_HAS_ST_MTIMESPEC=${KWSYS_CXX_STAT_HAS_ST_MTIMESPEC} - ) - IF(NOT WIN32) - IF(KWSYS_STANDALONE) - OPTION(KWSYS_SYSTEMTOOLS_SUPPORT_WINDOWS_SLASHES "If true, Windows paths will be supported on Unix as well" ON) - ENDIF() - IF(KWSYS_SYSTEMTOOLS_SUPPORT_WINDOWS_SLASHES) - SET_PROPERTY(SOURCE SystemTools.cxx testSystemTools.cxx APPEND PROPERTY COMPILE_DEFINITIONS - KWSYS_SYSTEMTOOLS_SUPPORT_WINDOWS_SLASHES - ) - ENDIF() - ENDIF() - - # Disable getpwnam for static linux builds since it depends on shared glibc - GET_PROPERTY(SHARED_LIBS_SUPPORTED GLOBAL PROPERTY TARGET_SUPPORTS_SHARED_LIBS) - IF(CMAKE_SYSTEM_NAME MATCHES "Linux" AND NOT SHARED_LIBS_SUPPORTED) - SET_PROPERTY(SOURCE SystemTools.cxx APPEND PROPERTY COMPILE_DEFINITIONS - HAVE_GETPWNAM=0 - ) - ENDIF() -ENDIF() - -IF(KWSYS_USE_SystemInformation) - SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY - COMPILE_DEFINITIONS SIZEOF_VOID_P=${CMAKE_SIZEOF_VOID_P}) - IF(NOT CYGWIN) - INCLUDE(CheckIncludeFiles) - CHECK_INCLUDE_FILES("sys/types.h;ifaddrs.h" KWSYS_SYS_HAS_IFADDRS_H) - IF(KWSYS_SYS_HAS_IFADDRS_H) - SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY - COMPILE_DEFINITIONS KWSYS_SYS_HAS_IFADDRS_H=1) - ENDIF() - ENDIF() - IF(WIN32) - INCLUDE(CheckSymbolExists) - SET(CMAKE_REQUIRED_LIBRARIES Psapi) - CHECK_SYMBOL_EXISTS(GetProcessMemoryInfo "windows.h;psapi.h" KWSYS_SYS_HAS_PSAPI) - UNSET(CMAKE_REQUIRED_LIBRARIES) - IF(KWSYS_SYS_HAS_PSAPI) - SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY - COMPILE_DEFINITIONS KWSYS_SYS_HAS_PSAPI=1) - IF(MSVC70 OR MSVC71) - # Suppress LNK4089: all references to 'PSAPI.DLL' discarded by /OPT:REF - SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /IGNORE:4089") - ENDIF() - ENDIF() - ENDIF() - IF(CMAKE_SYSTEM MATCHES "HP-UX") - CHECK_INCLUDE_FILES("sys/mpctl.h" KWSYS_SYS_HAS_MPCTL_H) - IF(KWSYS_SYS_HAS_MPCTL_H) - SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY - COMPILE_DEFINITIONS KWSYS_SYS_HAS_MPCTL_H=1) - ENDIF() - ENDIF() - IF(CMAKE_SYSTEM MATCHES "BSD") - CHECK_INCLUDE_FILES("machine/cpu.h" KWSYS_SYS_HAS_MACHINE_CPU_H) - IF(KWSYS_SYS_HAS_MACHINE_CPU_H) - SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY - COMPILE_DEFINITIONS KWSYS_SYS_HAS_MACHINE_CPU_H=1) - ENDIF() - ENDIF() - KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_RLIMIT64 - "Checking whether CXX compiler has rlimit64" DIRECT) - SET(KWSYS_PLATFORM_CXX_TEST_DEFINES) - IF(KWSYS_CXX_HAS_RLIMIT64) - SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY - COMPILE_DEFINITIONS KWSYS_CXX_HAS_RLIMIT64=1) - ENDIF() - KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_ATOL - "Checking whether CXX compiler has atol" DIRECT) - IF(KWSYS_CXX_HAS_ATOL) - SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY - COMPILE_DEFINITIONS KWSYS_CXX_HAS_ATOL=1) - ENDIF() - KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_ATOLL - "Checking whether CXX compiler has atoll" DIRECT) - IF(KWSYS_CXX_HAS_ATOLL) - SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY - COMPILE_DEFINITIONS KWSYS_CXX_HAS_ATOLL=1) - ENDIF() - KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS__ATOI64 - "Checking whether CXX compiler has _atoi64" DIRECT) - IF(KWSYS_CXX_HAS__ATOI64) - SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY - COMPILE_DEFINITIONS KWSYS_CXX_HAS__ATOI64=1) - ENDIF() - IF(UNIX) - INCLUDE(CheckIncludeFileCXX) - # check for simple stack trace - # usually it's in libc but on FreeBSD - # it's in libexecinfo - FIND_LIBRARY(EXECINFO_LIB "execinfo") - MARK_AS_ADVANCED(EXECINFO_LIB) - IF (NOT EXECINFO_LIB) - SET(EXECINFO_LIB "") - ENDIF() - CHECK_INCLUDE_FILE_CXX("execinfo.h" KWSYS_CXX_HAS_EXECINFOH) - IF (KWSYS_CXX_HAS_EXECINFOH) - # we have the backtrace header check if it - # can be used with this compiler - SET(KWSYS_PLATFORM_CXX_TEST_LINK_LIBRARIES ${EXECINFO_LIB}) - KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_BACKTRACE - "Checking whether backtrace works with this C++ compiler" DIRECT) - SET(KWSYS_PLATFORM_CXX_TEST_LINK_LIBRARIES) - IF (KWSYS_CXX_HAS_BACKTRACE) - # backtrace is supported by this system and compiler. - # now check for the more advanced capabilities. - SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY - COMPILE_DEFINITIONS KWSYS_SYSTEMINFORMATION_HAS_BACKTRACE=1) - # check for symbol lookup using dladdr - CHECK_INCLUDE_FILE_CXX("dlfcn.h" KWSYS_CXX_HAS_DLFCNH) - IF (KWSYS_CXX_HAS_DLFCNH) - # we have symbol lookup libraries and headers - # check if they can be used with this compiler - SET(KWSYS_PLATFORM_CXX_TEST_LINK_LIBRARIES ${CMAKE_DL_LIBS}) - KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_DLADDR - "Checking whether dladdr works with this C++ compiler" DIRECT) - SET(KWSYS_PLATFORM_CXX_TEST_LINK_LIBRARIES) - IF (KWSYS_CXX_HAS_DLADDR) - # symbol lookup is supported by this system - # and compiler. - SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY - COMPILE_DEFINITIONS KWSYS_SYSTEMINFORMATION_HAS_SYMBOL_LOOKUP=1) - ENDIF() - ENDIF() - # c++ demangling support - # check for cxxabi headers - CHECK_INCLUDE_FILE_CXX("cxxabi.h" KWSYS_CXX_HAS_CXXABIH) - IF (KWSYS_CXX_HAS_CXXABIH) - # check if cxxabi can be used with this - # system and compiler. - KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_CXXABI - "Checking whether cxxabi works with this C++ compiler" DIRECT) - IF (KWSYS_CXX_HAS_CXXABI) - # c++ demangle using cxxabi is supported with - # this system and compiler - SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY - COMPILE_DEFINITIONS KWSYS_SYSTEMINFORMATION_HAS_CPP_DEMANGLE=1) - ENDIF() - ENDIF() - # basic backtrace works better with release build - # don't bother with advanced features for release - SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY - COMPILE_DEFINITIONS_DEBUG KWSYS_SYSTEMINFORMATION_HAS_DEBUG_BUILD=1) - SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY - COMPILE_DEFINITIONS_RELWITHDEBINFO KWSYS_SYSTEMINFORMATION_HAS_DEBUG_BUILD=1) - ENDIF() - ENDIF() - ENDIF() - IF(BORLAND) - KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_BORLAND_ASM - "Checking whether Borland CXX compiler supports assembler instructions" DIRECT) - IF(KWSYS_CXX_HAS_BORLAND_ASM) - SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY - COMPILE_DEFINITIONS KWSYS_CXX_HAS_BORLAND_ASM=1) - KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_BORLAND_ASM_CPUID - "Checking whether Borland CXX compiler supports CPUID assembler instruction" DIRECT) - IF(KWSYS_CXX_HAS_BORLAND_ASM_CPUID) - SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY - COMPILE_DEFINITIONS KWSYS_CXX_HAS_BORLAND_ASM_CPUID=1) - ENDIF() - ENDIF() - ENDIF() - IF(KWSYS_USE___INT64) - SET_PROPERTY(SOURCE SystemInformation.cxx testSystemInformation.cxx APPEND PROPERTY - COMPILE_DEFINITIONS KWSYS_USE___INT64=1) - ENDIF() - IF(KWSYS_USE_LONG_LONG) - SET_PROPERTY(SOURCE SystemInformation.cxx testSystemInformation.cxx APPEND PROPERTY - COMPILE_DEFINITIONS KWSYS_USE_LONG_LONG=1) - ENDIF() - IF(KWSYS_IOS_HAS_OSTREAM_LONG_LONG) - SET_PROPERTY(SOURCE SystemInformation.cxx testSystemInformation.cxx APPEND PROPERTY - COMPILE_DEFINITIONS KWSYS_IOS_HAS_OSTREAM_LONG_LONG=1) - ENDIF() - IF(KWSYS_IOS_HAS_OSTREAM___INT64) - SET_PROPERTY(SOURCE SystemInformation.cxx testSystemInformation.cxx APPEND PROPERTY - COMPILE_DEFINITIONS KWSYS_IOS_HAS_OSTREAM___INT64=1) - ENDIF() - IF(KWSYS_BUILD_SHARED) - SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY - COMPILE_DEFINITIONS KWSYS_BUILD_SHARED=1) - ENDIF() - - IF(UNIX AND NOT CYGWIN) - KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_GETLOADAVG - "Checking whether CXX compiler has getloadavg" DIRECT) - IF(KWSYS_CXX_HAS_GETLOADAVG) - SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY - COMPILE_DEFINITIONS KWSYS_CXX_HAS_GETLOADAVG=1) - ENDIF() - ENDIF() -ENDIF() - -IF(KWSYS_USE_FStream) - KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_EXT_STDIO_FILEBUF_H - "Checking whether is available" DIRECT) -ENDIF() - -#----------------------------------------------------------------------------- -# Choose a directory for the generated headers. -IF(NOT KWSYS_HEADER_ROOT) - SET(KWSYS_HEADER_ROOT "${PROJECT_BINARY_DIR}") -ENDIF() -SET(KWSYS_HEADER_DIR "${KWSYS_HEADER_ROOT}/${KWSYS_NAMESPACE}") -INCLUDE_DIRECTORIES(${KWSYS_HEADER_ROOT}) - -#----------------------------------------------------------------------------- -IF(KWSYS_INSTALL_DOC_DIR) - # Assign the license to the runtime component since it must be - # distributed with binary forms of this software. - IF(KWSYS_INSTALL_COMPONENT_NAME_RUNTIME) - SET(KWSYS_INSTALL_LICENSE_OPTIONS ${KWSYS_INSTALL_LICENSE_OPTIONS} - COMPONENT ${KWSYS_INSTALL_COMPONENT_NAME_RUNTIME} - ) - ENDIF() - - # Install the license under the documentation directory. - INSTALL(FILES ${CMAKE_CURRENT_SOURCE_DIR}/Copyright.txt - DESTINATION ${KWSYS_INSTALL_DOC_DIR}/${KWSYS_NAMESPACE} - ${KWSYS_INSTALL_LICENSE_OPTIONS}) -ENDIF() - -#----------------------------------------------------------------------------- -# Build a list of classes and headers we need to implement the -# selected components. Initialize with required components. -SET(KWSYS_CLASSES) -SET(KWSYS_H_FILES Configure SharedForward) -SET(KWSYS_HXX_FILES Configure String) - -IF(NOT CMake_SOURCE_DIR) - SET(KWSYS_HXX_FILES ${KWSYS_HXX_FILES} - hashtable hash_fun hash_map hash_set - ) -ENDIF() - -# Add selected C++ classes. -SET(cppclasses - Directory DynamicLoader Encoding Glob RegularExpression SystemTools - CommandLineArguments IOStream FStream SystemInformation ConsoleBuf - ) -FOREACH(cpp ${cppclasses}) - IF(KWSYS_USE_${cpp}) - # Use the corresponding class. - SET(KWSYS_CLASSES ${KWSYS_CLASSES} ${cpp}) - - # Load component-specific CMake code. - IF(EXISTS ${PROJECT_SOURCE_DIR}/kwsys${cpp}.cmake) - INCLUDE(${PROJECT_SOURCE_DIR}/kwsys${cpp}.cmake) - ENDIF() - ENDIF() -ENDFOREACH() - -# Add selected C components. -FOREACH(c - Process Base64 Encoding MD5 Terminal System String - ) - IF(KWSYS_USE_${c}) - # Use the corresponding header file. - SET(KWSYS_H_FILES ${KWSYS_H_FILES} ${c}) - - # Load component-specific CMake code. - IF(EXISTS ${PROJECT_SOURCE_DIR}/kwsys${c}.cmake) - INCLUDE(${PROJECT_SOURCE_DIR}/kwsys${c}.cmake) - ENDIF() - ENDIF() -ENDFOREACH() - -#----------------------------------------------------------------------------- -# Build a list of sources for the library based on components that are -# included. -SET(KWSYS_C_SRCS) -SET(KWSYS_CXX_SRCS) - -# Add the proper sources for this platform's Process implementation. -IF(KWSYS_USE_Process) - IF(NOT UNIX) - # Use the Windows implementation. - SET(KWSYS_C_SRCS ${KWSYS_C_SRCS} ProcessWin32.c) - ELSE() - # Use the UNIX implementation. - SET(KWSYS_C_SRCS ${KWSYS_C_SRCS} ProcessUNIX.c) - ENDIF() -ENDIF() - -# Add selected C sources. -FOREACH(c Base64 Encoding MD5 Terminal System String) - IF(KWSYS_USE_${c}) - IF(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${c}C.c) - LIST(APPEND KWSYS_C_SRCS ${c}C.c) - ELSE() - LIST(APPEND KWSYS_C_SRCS ${c}.c) - ENDIF() - ENDIF() -ENDFOREACH() - -# Configure headers of C++ classes and construct the list of sources. -FOREACH(c ${KWSYS_CLASSES}) - # Add this source to the list of source files for the library. - IF(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${c}CXX.cxx) - LIST(APPEND KWSYS_CXX_SRCS ${c}CXX.cxx) - ELSEIF(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${c}.cxx) - LIST(APPEND KWSYS_CXX_SRCS ${c}.cxx) - ENDIF() - - # Configure the header for this class. - CONFIGURE_FILE(${PROJECT_SOURCE_DIR}/${c}.hxx.in ${KWSYS_HEADER_DIR}/${c}.hxx - @ONLY IMMEDIATE) - SET(KWSYS_CXX_SRCS ${KWSYS_CXX_SRCS} ${KWSYS_HEADER_DIR}/${c}.hxx) - - # Create an install target for the header. - IF(KWSYS_INSTALL_INCLUDE_DIR) - INSTALL(FILES ${KWSYS_HEADER_DIR}/${c}.hxx - DESTINATION ${KWSYS_INSTALL_INCLUDE_DIR}/${KWSYS_NAMESPACE} - ${KWSYS_INSTALL_INCLUDE_OPTIONS}) - ENDIF() -ENDFOREACH() - -# Configure C headers. -FOREACH(h ${KWSYS_H_FILES}) - # Configure the header into the given directory. - CONFIGURE_FILE(${PROJECT_SOURCE_DIR}/${h}.h.in ${KWSYS_HEADER_DIR}/${h}.h - @ONLY IMMEDIATE) - SET(KWSYS_C_SRCS ${KWSYS_C_SRCS} ${KWSYS_HEADER_DIR}/${h}.h) - - # Create an install target for the header. - IF(KWSYS_INSTALL_INCLUDE_DIR) - INSTALL(FILES ${KWSYS_HEADER_DIR}/${h}.h - DESTINATION ${KWSYS_INSTALL_INCLUDE_DIR}/${KWSYS_NAMESPACE} - ${KWSYS_INSTALL_INCLUDE_OPTIONS}) - ENDIF() -ENDFOREACH() - -# Configure other C++ headers. -FOREACH(h ${KWSYS_HXX_FILES}) - # Configure the header into the given directory. - CONFIGURE_FILE(${PROJECT_SOURCE_DIR}/${h}.hxx.in ${KWSYS_HEADER_DIR}/${h}.hxx - @ONLY IMMEDIATE) - SET(KWSYS_CXX_SRCS ${KWSYS_CXX_SRCS} ${KWSYS_HEADER_DIR}/${h}.hxx) - - # Create an install target for the header. - IF(KWSYS_INSTALL_INCLUDE_DIR) - INSTALL(FILES ${KWSYS_HEADER_DIR}/${h}.hxx - DESTINATION ${KWSYS_INSTALL_INCLUDE_DIR}/${KWSYS_NAMESPACE} - ${KWSYS_INSTALL_INCLUDE_OPTIONS}) - ENDIF() -ENDFOREACH() - -#----------------------------------------------------------------------------- -# Add the library with the configured name and list of sources. -IF(KWSYS_C_SRCS OR KWSYS_CXX_SRCS) - IF(KWSYS_SPLIT_OBJECTS_FROM_INTERFACE) - SET(KWSYS_TARGET_INTERFACE ${KWSYS_NAMESPACE}) - SET(KWSYS_TARGET_OBJECT ${KWSYS_NAMESPACE}_objects) - SET(KWSYS_TARGET_LINK ${KWSYS_NAMESPACE}_private) - SET(KWSYS_TARGET_INSTALL ${KWSYS_TARGET_INTERFACE} ${KWSYS_TARGET_LINK}) - SET(KWSYS_LINK_DEPENDENCY INTERFACE) - ADD_LIBRARY(${KWSYS_TARGET_OBJECT} OBJECT - ${KWSYS_C_SRCS} ${KWSYS_CXX_SRCS}) - IF(KWSYS_BUILD_SHARED OR KWSYS_BUILD_PIC) - SET_PROPERTY(TARGET ${KWSYS_TARGET_OBJECT} PROPERTY - POSITION_INDEPENDENT_CODE TRUE) - ENDIF() - ADD_LIBRARY(${KWSYS_TARGET_INTERFACE} INTERFACE) - ADD_LIBRARY(${KWSYS_TARGET_LINK} INTERFACE) - TARGET_LINK_LIBRARIES(${KWSYS_TARGET_LINK} INTERFACE - ${KWSYS_TARGET_INTERFACE}) - TARGET_SOURCES(${KWSYS_TARGET_LINK} INTERFACE - $) - target_compile_features(${KWSYS_TARGET_OBJECT} PRIVATE ${KWSYS_CXX_COMPILE_FEATURES}) - target_compile_features(${KWSYS_TARGET_INTERFACE} INTERFACE ${KWSYS_CXX_COMPILE_FEATURES}) - ELSE() - SET(KWSYS_TARGET_INTERFACE ${KWSYS_NAMESPACE}) - SET(KWSYS_TARGET_OBJECT ${KWSYS_NAMESPACE}) - SET(KWSYS_TARGET_LINK ${KWSYS_NAMESPACE}) - set(KWSYS_TARGET_INSTALL ${KWSYS_TARGET_LINK}) - SET(KWSYS_LINK_DEPENDENCY PUBLIC) - ADD_LIBRARY(${KWSYS_TARGET_INTERFACE} ${KWSYS_LIBRARY_TYPE} - ${KWSYS_C_SRCS} ${KWSYS_CXX_SRCS}) - target_compile_features(${KWSYS_TARGET_INTERFACE} PUBLIC ${KWSYS_CXX_COMPILE_FEATURES}) - ENDIF() - if (KWSYS_ALIAS_TARGET) - add_library(${KWSYS_ALIAS_TARGET} ALIAS ${KWSYS_TARGET_INTERFACE}) - endif () - SET_TARGET_PROPERTIES(${KWSYS_TARGET_OBJECT} PROPERTIES - C_CLANG_TIDY "" - CXX_CLANG_TIDY "" - C_INCLUDE_WHAT_YOU_USE "" - CXX_INCLUDE_WHAT_YOU_USE "" - LABELS "${KWSYS_LABELS_LIB}") - IF(KWSYS_USE_DynamicLoader) - IF(UNIX) - TARGET_LINK_LIBRARIES(${KWSYS_TARGET_INTERFACE} ${KWSYS_LINK_DEPENDENCY} - ${CMAKE_DL_LIBS}) - ENDIF() - ENDIF() - - IF(KWSYS_USE_SystemInformation) - IF(WIN32) - TARGET_LINK_LIBRARIES(${KWSYS_TARGET_INTERFACE} ${KWSYS_LINK_DEPENDENCY} ws2_32) - # link in dbghelp.dll for symbol lookup if MSVC 1800 or later - # Note that the dbghelp runtime is part of MS Windows OS - IF(MSVC_VERSION AND NOT MSVC_VERSION VERSION_LESS 1800) - TARGET_LINK_LIBRARIES(${KWSYS_TARGET_INTERFACE} ${KWSYS_LINK_DEPENDENCY} dbghelp) - ENDIF() - IF(KWSYS_SYS_HAS_PSAPI) - TARGET_LINK_LIBRARIES(${KWSYS_TARGET_INTERFACE} ${KWSYS_LINK_DEPENDENCY} - Psapi) - ENDIF() - ELSEIF(UNIX) - IF (EXECINFO_LIB AND KWSYS_CXX_HAS_BACKTRACE) - # backtrace on FreeBSD is not in libc - TARGET_LINK_LIBRARIES(${KWSYS_TARGET_INTERFACE} ${KWSYS_LINK_DEPENDENCY} - ${EXECINFO_LIB}) - ENDIF() - IF (KWSYS_CXX_HAS_DLADDR) - # for symbol lookup using dladdr - TARGET_LINK_LIBRARIES(${KWSYS_TARGET_INTERFACE} ${KWSYS_LINK_DEPENDENCY} - ${CMAKE_DL_LIBS}) - ENDIF() - IF (CMAKE_SYSTEM_NAME STREQUAL "SunOS") - TARGET_LINK_LIBRARIES(${KWSYS_TARGET_INTERFACE} ${KWSYS_LINK_DEPENDENCY} - socket) - ENDIF() - ENDIF() - ENDIF() - - # Apply user-defined target properties to the library. - IF(KWSYS_PROPERTIES_CXX) - SET_TARGET_PROPERTIES(${KWSYS_TARGET_INTERFACE} PROPERTIES - ${KWSYS_PROPERTIES_CXX}) - ENDIF() - - # Set up include usage requirement - IF(COMMAND TARGET_INCLUDE_DIRECTORIES) - TARGET_INCLUDE_DIRECTORIES(${KWSYS_TARGET_INTERFACE} INTERFACE - $) - IF(KWSYS_INSTALL_INCLUDE_DIR) - TARGET_INCLUDE_DIRECTORIES(${KWSYS_TARGET_INTERFACE} INTERFACE - $) - ENDIF() - ENDIF() - - # Create an install target for the library. - IF(KWSYS_INSTALL_LIBRARY_RULE) - INSTALL(TARGETS ${KWSYS_TARGET_INSTALL} ${KWSYS_INSTALL_LIBRARY_RULE}) - ENDIF() - IF(KWSYS_INSTALL_NAMELINK_RULE) - INSTALL(TARGETS ${KWSYS_TARGET_INSTALL} ${KWSYS_INSTALL_NAMELINK_RULE}) - ENDIF() -ENDIF() - -# Add a C-only library if requested. -IF(KWSYS_ENABLE_C AND KWSYS_C_SRCS) - IF(KWSYS_SPLIT_OBJECTS_FROM_INTERFACE) - SET(KWSYS_TARGET_C_INTERFACE ${KWSYS_NAMESPACE}_c) - SET(KWSYS_TARGET_C_OBJECT ${KWSYS_NAMESPACE}_c_objects) - SET(KWSYS_TARGET_C_LINK ${KWSYS_NAMESPACE}_c_private) - SET(KWSYS_TARGET_C_INSTALL - ${KWSYS_TARGET_C_INTERFACE} ${KWSYS_TARGET_C_LINK}) - SET(KWSYS_LINK_DEPENDENCY INTERFACE) - ADD_LIBRARY(${KWSYS_TARGET_C_OBJECT} OBJECT ${KWSYS_C_SRCS}) - IF(KWSYS_BUILD_SHARED OR KWSYS_BUILD_PIC) - SET_PROPERTY(TARGET ${KWSYS_TARGET_C_OBJECT} PROPERTY - POSITION_INDEPENDENT_CODE TRUE) - ENDIF() - ADD_LIBRARY(${KWSYS_TARGET_C_INTERFACE} INTERFACE) - ADD_LIBRARY(${KWSYS_TARGET_C_LINK} INTERFACE) - TARGET_LINK_LIBRARIES(${KWSYS_TARGET_C_LINK} INTERFACE - ${KWSYS_TARGET_C_INTERFACE}) - TARGET_SOURCES(${KWSYS_TARGET_C_LINK} INTERFACE - $) - ELSE() - SET(KWSYS_TARGET_C_INTERFACE ${KWSYS_NAMESPACE}_c) - SET(KWSYS_TARGET_C_OBJECT ${KWSYS_NAMESPACE}_c) - SET(KWSYS_TARGET_C_LINK ${KWSYS_NAMESPACE}_c) - SET(KWSYS_TARGET_C_INSTALL ${KWSYS_TARGET_C_LINK}) - SET(KWSYS_LINK_DEPENDENCY PUBLIC) - ADD_LIBRARY(${KWSYS_TARGET_C_INTERFACE} ${KWSYS_LIBRARY_TYPE} - ${KWSYS_C_SRCS}) - ENDIF() - SET_TARGET_PROPERTIES(${KWSYS_TARGET_C_OBJECT} PROPERTIES - LABELS "${KWSYS_LABELS_LIB}") - - # Apply user-defined target properties to the library. - IF(KWSYS_PROPERTIES_C) - SET_TARGET_PROPERTIES(${KWSYS_TARGET_C_INTERFACE} PROPERTIES - ${KWSYS_PROPERTIES_C}) - ENDIF() - - # Set up include usage requirement - IF(COMMAND TARGET_INCLUDE_DIRECTORIES) - TARGET_INCLUDE_DIRECTORIES(${KWSYS_TARGET_C_INTERFACE} INTERFACE - $) - IF(KWSYS_INSTALL_INCLUDE_DIR) - TARGET_INCLUDE_DIRECTORIES(${KWSYS_TARGET_C_INTERFACE} INTERFACE - $) - ENDIF() - ENDIF() - - # Create an install target for the library. - IF(KWSYS_INSTALL_LIBRARY_RULE) - INSTALL(TARGETS ${KWSYS_TARGET_C_INSTALL}) - ENDIF() -ENDIF() - -# For building kwsys itself, we use a macro defined on the command -# line to configure the namespace in the C and C++ source files. -ADD_DEFINITIONS("-DKWSYS_NAMESPACE=${KWSYS_NAMESPACE}") - -# Disable deprecation warnings for standard C functions. -IF(MSVC OR (WIN32 AND (CMAKE_C_COMPILER_ID STREQUAL "Intel" OR - (CMAKE_C_COMPILER_ID STREQUAL "Clang" AND CMAKE_CXX_SIMULATE_ID STREQUAL "MSVC")))) - ADD_DEFINITIONS( - -D_CRT_NONSTDC_NO_DEPRECATE - -D_CRT_SECURE_NO_DEPRECATE - -D_CRT_SECURE_NO_WARNINGS - -D_SCL_SECURE_NO_DEPRECATE - ) -ENDIF() - -IF(WIN32) - # Help enforce the use of wide Windows apis. - ADD_DEFINITIONS(-DUNICODE -D_UNICODE) -ENDIF() - -IF(KWSYS_USE_String) - # Activate code in "String.c". See the comment in the source. - SET_SOURCE_FILES_PROPERTIES(String.c PROPERTIES - COMPILE_FLAGS "-DKWSYS_STRING_C") -ENDIF() - -IF(KWSYS_USE_Encoding) - # Set default 8 bit encoding in "EndcodingC.c". - SET_PROPERTY(SOURCE EncodingC.c EncodingCXX.cxx APPEND PROPERTY COMPILE_DEFINITIONS - KWSYS_ENCODING_DEFAULT_CODEPAGE=${KWSYS_ENCODING_DEFAULT_CODEPAGE}) -ENDIF() - -#----------------------------------------------------------------------------- -# Setup testing if not being built as part of another project. -IF(KWSYS_STANDALONE OR CMake_SOURCE_DIR) - IF(BUILD_TESTING) - # Compute the location of executables. - SET(EXEC_DIR "${CMAKE_CURRENT_BINARY_DIR}") - IF(EXECUTABLE_OUTPUT_PATH) - SET(EXEC_DIR "${EXECUTABLE_OUTPUT_PATH}") - ENDIF() - - # C tests - SET(KWSYS_C_TESTS - testEncode.c - testTerminal.c - ) - IF(KWSYS_STANDALONE) - SET(KWSYS_C_TESTS ${KWSYS_C_TESTS} testFail.c) - ENDIF() - CREATE_TEST_SOURCELIST( - KWSYS_C_TEST_SRCS ${KWSYS_NAMESPACE}TestsC.c - ${KWSYS_C_TESTS} - ) - ADD_EXECUTABLE(${KWSYS_NAMESPACE}TestsC ${KWSYS_C_TEST_SRCS}) - SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestsC PROPERTY LABELS ${KWSYS_LABELS_EXE}) - TARGET_LINK_LIBRARIES(${KWSYS_NAMESPACE}TestsC ${KWSYS_TARGET_C_LINK}) - FOREACH(testfile ${KWSYS_C_TESTS}) - get_filename_component(test "${testfile}" NAME_WE) - ADD_TEST(kwsys.${test} ${EXEC_DIR}/${KWSYS_NAMESPACE}TestsC ${test} ${KWSYS_TEST_ARGS_${test}}) - SET_PROPERTY(TEST kwsys.${test} PROPERTY LABELS ${KWSYS_LABELS_TEST}) - ENDFOREACH() - - # C++ tests - IF(NOT WATCOM AND NOT CMake_SOURCE_DIR) - SET(KWSYS_CXX_TESTS - testHashSTL.cxx - ) - ENDIF() - SET(KWSYS_CXX_TESTS ${KWSYS_CXX_TESTS} - testConfigure.cxx - testSystemTools.cxx - testCommandLineArguments.cxx - testCommandLineArguments1.cxx - testDirectory.cxx - ) - IF(KWSYS_STL_HAS_WSTRING) - SET(KWSYS_CXX_TESTS ${KWSYS_CXX_TESTS} - testEncoding.cxx - ) - ENDIF() - IF(KWSYS_USE_FStream) - SET(KWSYS_CXX_TESTS ${KWSYS_CXX_TESTS} - testFStream.cxx - ) - ENDIF() - IF(KWSYS_USE_ConsoleBuf) - ADD_EXECUTABLE(testConsoleBufChild testConsoleBufChild.cxx) - SET_PROPERTY(TARGET testConsoleBufChild PROPERTY C_CLANG_TIDY "") - SET_PROPERTY(TARGET testConsoleBufChild PROPERTY CXX_CLANG_TIDY "") - SET_PROPERTY(TARGET testConsoleBufChild PROPERTY C_INCLUDE_WHAT_YOU_USE "") - SET_PROPERTY(TARGET testConsoleBufChild PROPERTY CXX_INCLUDE_WHAT_YOU_USE "") - SET_PROPERTY(TARGET testConsoleBufChild PROPERTY LABELS ${KWSYS_LABELS_EXE}) - TARGET_LINK_LIBRARIES(testConsoleBufChild ${KWSYS_TARGET_LINK}) - SET(KWSYS_CXX_TESTS ${KWSYS_CXX_TESTS} - testConsoleBuf.cxx - ) - IF(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC" AND - CMAKE_CXX_COMPILER_VERSION VERSION_GREATER "19.0.23506") - set_property(SOURCE testConsoleBuf.cxx testConsoleBufChild.cxx PROPERTY COMPILE_FLAGS /utf-8) - ENDIF() - SET_PROPERTY(SOURCE testConsoleBuf.cxx APPEND PROPERTY COMPILE_DEFINITIONS - KWSYS_ENCODING_DEFAULT_CODEPAGE=${KWSYS_ENCODING_DEFAULT_CODEPAGE}) - ENDIF() - IF(KWSYS_USE_SystemInformation) - SET(KWSYS_CXX_TESTS ${KWSYS_CXX_TESTS} testSystemInformation.cxx) - ENDIF() - IF(KWSYS_USE_DynamicLoader) - SET(KWSYS_CXX_TESTS ${KWSYS_CXX_TESTS} testDynamicLoader.cxx) - # If kwsys contains the DynamicLoader, need extra library - ADD_LIBRARY(${KWSYS_NAMESPACE}TestDynload MODULE testDynload.c) - SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestDynload PROPERTY LABELS ${KWSYS_LABELS_LIB}) - ADD_DEPENDENCIES(${KWSYS_NAMESPACE}TestDynload ${KWSYS_TARGET_INTERFACE}) - - if (WIN32) - # Windows tests supported flags. - add_library(${KWSYS_NAMESPACE}TestDynloadImpl SHARED testDynloadImpl.c) - set_property(TARGET ${KWSYS_NAMESPACE}TestDynloadImpl PROPERTY LABELS ${KWSYS_LABELS_LIB}) - set_property(TARGET ${KWSYS_NAMESPACE}TestDynloadImpl PROPERTY DEFINE_SYMBOL BUILDING_TestDynloadImpl) - set_property(TARGET ${KWSYS_NAMESPACE}TestDynloadImpl PROPERTY RUNTIME_OUTPUT_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/dynloaddir") - add_dependencies(${KWSYS_NAMESPACE}TestDynloadImpl ${KWSYS_TARGET_INTERFACE}) - add_library(${KWSYS_NAMESPACE}TestDynloadUse MODULE testDynloadUse.c) - set_property(TARGET ${KWSYS_NAMESPACE}TestDynloadUse PROPERTY LABELS ${KWSYS_LABELS_LIB}) - set_property(TARGET ${KWSYS_NAMESPACE}TestDynloadUse PROPERTY LIBRARY_OUTPUT_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/dynloaddir") - add_dependencies(${KWSYS_NAMESPACE}TestDynloadUse ${KWSYS_TARGET_INTERFACE}) - target_link_libraries(${KWSYS_NAMESPACE}TestDynloadUse PRIVATE ${KWSYS_NAMESPACE}TestDynloadImpl) - endif () - ENDIF() - CREATE_TEST_SOURCELIST( - KWSYS_CXX_TEST_SRCS ${KWSYS_NAMESPACE}TestsCxx.cxx - ${KWSYS_CXX_TESTS} - ) - ADD_EXECUTABLE(${KWSYS_NAMESPACE}TestsCxx ${KWSYS_CXX_TEST_SRCS}) - SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestsCxx PROPERTY C_CLANG_TIDY "") - SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestsCxx PROPERTY CXX_CLANG_TIDY "") - SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestsCxx PROPERTY C_INCLUDE_WHAT_YOU_USE "") - SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestsCxx PROPERTY CXX_INCLUDE_WHAT_YOU_USE "") - SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestsCxx PROPERTY LABELS ${KWSYS_LABELS_EXE}) - TARGET_LINK_LIBRARIES(${KWSYS_NAMESPACE}TestsCxx ${KWSYS_TARGET_LINK}) - - SET(TEST_SYSTEMTOOLS_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}") - SET(TEST_SYSTEMTOOLS_BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}") - CONFIGURE_FILE( - ${PROJECT_SOURCE_DIR}/testSystemTools.h.in - ${PROJECT_BINARY_DIR}/testSystemTools.h) - INCLUDE_DIRECTORIES(${PROJECT_BINARY_DIR}) - - IF(CTEST_TEST_KWSYS) - CONFIGURE_FILE("${CMAKE_CURRENT_SOURCE_DIR}/ExtraTest.cmake.in" - "${CMAKE_CURRENT_BINARY_DIR}/ExtraTest.cmake") - SET_DIRECTORY_PROPERTIES(PROPERTIES TEST_INCLUDE_FILE "${CMAKE_CURRENT_BINARY_DIR}/ExtraTest.cmake") - ENDIF() - - SET(KWSYS_TEST_ARGS_testCommandLineArguments - --another-bool-variable - --long3=opt - --set-bool-arg1 - -SSS ken brad bill andy - --some-bool-variable=true - --some-double-variable12.5 - --some-int-variable 14 - "--some-string-variable=test string with space" - --some-multi-argument 5 1 8 3 7 1 3 9 7 1 - -N 12.5 -SS=andy -N 1.31 -N 22 - -SS=bill -BBtrue -SS=brad - -BBtrue - -BBfalse - -SS=ken - -A - -C=test - --long2 hello - ) - SET(KWSYS_TEST_ARGS_testCommandLineArguments1 - --ignored - -n 24 - --second-ignored - "-m=test value" - third-ignored - -p - some junk at the end - ) - FOREACH(testfile ${KWSYS_CXX_TESTS}) - get_filename_component(test "${testfile}" NAME_WE) - ADD_TEST(kwsys.${test} ${EXEC_DIR}/${KWSYS_NAMESPACE}TestsCxx ${test} ${KWSYS_TEST_ARGS_${test}}) - SET_PROPERTY(TEST kwsys.${test} PROPERTY LABELS ${KWSYS_LABELS_TEST}) - ENDFOREACH() - - # Process tests. - ADD_EXECUTABLE(${KWSYS_NAMESPACE}TestProcess testProcess.c) - SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestProcess PROPERTY LABELS ${KWSYS_LABELS_EXE}) - TARGET_LINK_LIBRARIES(${KWSYS_NAMESPACE}TestProcess ${KWSYS_TARGET_C_LINK}) - IF(NOT CYGWIN) - SET(KWSYS_TEST_PROCESS_7 7) - ENDIF() - FOREACH(n 1 2 3 4 5 6 ${KWSYS_TEST_PROCESS_7} 9 10) - ADD_TEST(kwsys.testProcess-${n} ${EXEC_DIR}/${KWSYS_NAMESPACE}TestProcess ${n}) - SET_PROPERTY(TEST kwsys.testProcess-${n} PROPERTY LABELS ${KWSYS_LABELS_TEST}) - SET_TESTS_PROPERTIES(kwsys.testProcess-${n} PROPERTIES TIMEOUT 120) - ENDFOREACH() - - SET(testProcess_COMPILE_FLAGS "") - # Some Apple compilers produce bad optimizations in this source. - IF(APPLE AND CMAKE_C_COMPILER_ID MATCHES "^(GNU|LLVM)$") - SET(testProcess_COMPILE_FLAGS "${testProcess_COMPILE_FLAGS} -O0") - ELSEIF(CMAKE_C_COMPILER_ID STREQUAL "XL") - # Tell IBM XL not to warn about our test infinite loop - IF(CMAKE_SYSTEM MATCHES "Linux.*ppc64le" - AND CMAKE_C_COMPILER_VERSION VERSION_LESS "16.1.0" - AND NOT CMAKE_C_COMPILER_VERSION VERSION_LESS "13.1.1") - # v13.1.[1-6] on Linux ppc64le is clang based and does not accept - # the -qsuppress option, so just suppress all warnings. - SET(testProcess_COMPILE_FLAGS "${testProcess_COMPILE_FLAGS} -w") - ELSE() - SET(testProcess_COMPILE_FLAGS "${testProcess_COMPILE_FLAGS} -qsuppress=1500-010") - ENDIF() - ENDIF() - IF(CMAKE_C_FLAGS MATCHES "-fsanitize=") - SET(testProcess_COMPILE_FLAGS "${testProcess_COMPILE_FLAGS} -DCRASH_USING_ABORT") - ENDIF() - SET_PROPERTY(SOURCE testProcess.c PROPERTY COMPILE_FLAGS "${testProcess_COMPILE_FLAGS}") - - # Test SharedForward - CONFIGURE_FILE(${PROJECT_SOURCE_DIR}/testSharedForward.c.in - ${PROJECT_BINARY_DIR}/testSharedForward.c @ONLY IMMEDIATE) - ADD_EXECUTABLE(${KWSYS_NAMESPACE}TestSharedForward - ${PROJECT_BINARY_DIR}/testSharedForward.c) - SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestSharedForward PROPERTY LABELS ${KWSYS_LABELS_EXE}) - ADD_DEPENDENCIES(${KWSYS_NAMESPACE}TestSharedForward ${KWSYS_TARGET_C_LINK}) - ADD_TEST(kwsys.testSharedForward ${EXEC_DIR}/${KWSYS_NAMESPACE}TestSharedForward 1) - SET_PROPERTY(TEST kwsys.testSharedForward PROPERTY LABELS ${KWSYS_LABELS_TEST}) - - # Configure some test properties. - IF(KWSYS_STANDALONE) - # We expect test to fail - SET_TESTS_PROPERTIES(kwsys.testFail PROPERTIES WILL_FAIL ON) - GET_TEST_PROPERTY(kwsys.testFail WILL_FAIL wfv) - SET_TESTS_PROPERTIES(kwsys.testFail PROPERTIES MEASUREMENT "Some Key=Some Value") - MESSAGE(STATUS "GET_TEST_PROPERTY returned: ${wfv}") - ENDIF() - - # Set up ctest custom configuration file. - CONFIGURE_FILE(${PROJECT_SOURCE_DIR}/CTestCustom.cmake.in - ${PROJECT_BINARY_DIR}/CTestCustom.cmake @ONLY) - - # Suppress known consistent failures on buggy systems. - IF(KWSYS_TEST_BOGUS_FAILURES) - SET_TESTS_PROPERTIES(${KWSYS_TEST_BOGUS_FAILURES} PROPERTIES WILL_FAIL ON) - ENDIF() - - ENDIF() -ENDIF() diff --git a/test/API/driver/kwsys/CONTRIBUTING.rst b/test/API/driver/kwsys/CONTRIBUTING.rst deleted file mode 100644 index 32e7b83c5b8..00000000000 --- a/test/API/driver/kwsys/CONTRIBUTING.rst +++ /dev/null @@ -1,49 +0,0 @@ -Contributing to KWSys -********************* - -Patches -======= - -KWSys is kept in its own Git repository and shared by several projects -via copies in their source trees. Changes to KWSys should not be made -directly in a host project, except perhaps in maintenance branches. - -KWSys uses `Kitware's GitLab Instance`_ to manage development and code review. -To contribute patches: - -#. Fork the upstream `KWSys Repository`_ into a personal account. -#. Base all new work on the upstream ``master`` branch. -#. Run ``./SetupForDevelopment.sh`` in new local work trees. -#. Create commits making incremental, distinct, logically complete changes. -#. Push a topic branch to a personal repository fork on GitLab. -#. Create a GitLab Merge Request targeting the upstream ``master`` branch. - -Once changes are reviewed, tested, and integrated to KWSys upstream then -copies of KWSys within dependent projects can be updated to get the changes. - -.. _`Kitware's GitLab Instance`: https://gitlab.kitware.com -.. _`KWSys Repository`: https://gitlab.kitware.com/utils/kwsys - -Code Style -========== - -We use `clang-format`_ version **6.0** to define our style for C++ code in -the KWSys source tree. See the `.clang-format`_ configuration file for -our style settings. Use the `clang-format.bash`_ script to format source -code. It automatically runs ``clang-format`` on the set of source files -for which we enforce style. The script also has options to format only -a subset of files, such as those that are locally modified. - -.. _`clang-format`: http://clang.llvm.org/docs/ClangFormat.html -.. _`.clang-format`: .clang-format -.. _`clang-format.bash`: clang-format.bash - -License -======= - -We do not require any formal copyright assignment or contributor license -agreement. Any contributions intentionally sent upstream are presumed -to be offered under terms of the OSI-approved BSD 3-clause License. -See `Copyright.txt`_ for details. - -.. _`Copyright.txt`: Copyright.txt diff --git a/test/API/driver/kwsys/CTestConfig.cmake b/test/API/driver/kwsys/CTestConfig.cmake deleted file mode 100644 index 1339ffc2ddc..00000000000 --- a/test/API/driver/kwsys/CTestConfig.cmake +++ /dev/null @@ -1,9 +0,0 @@ -# Distributed under the OSI-approved BSD 3-Clause License. See accompanying -# file Copyright.txt or https://cmake.org/licensing#kwsys for details. - -set(CTEST_PROJECT_NAME "KWSys") -set(CTEST_NIGHTLY_START_TIME "21:00:00 EDT") -set(CTEST_DROP_METHOD "http") -set(CTEST_DROP_SITE "open.cdash.org") -set(CTEST_DROP_LOCATION "/submit.php?project=KWSys") -set(CTEST_DROP_SITE_CDASH TRUE) diff --git a/test/API/driver/kwsys/CTestCustom.cmake.in b/test/API/driver/kwsys/CTestCustom.cmake.in deleted file mode 100644 index 760221b1244..00000000000 --- a/test/API/driver/kwsys/CTestCustom.cmake.in +++ /dev/null @@ -1,14 +0,0 @@ -# kwsys.testProcess-10 involves sending SIGINT to a child process, which then -# exits abnormally via a call to _exit(). (On Windows, a call to ExitProcess). -# Naturally, this results in plenty of memory being "leaked" by this child -# process - the memory check results are not meaningful in this case. -# -# kwsys.testProcess-9 also tests sending SIGINT to a child process. However, -# normal operation of that test involves the child process timing out, and the -# host process kills (SIGKILL) it as a result. Since it was SIGKILL'ed, the -# resulting memory leaks are not logged by valgrind anyway. Therefore, we -# don't have to exclude it. - -list(APPEND CTEST_CUSTOM_MEMCHECK_IGNORE - kwsys.testProcess-10 - ) diff --git a/test/API/driver/kwsys/CommandLineArguments.cxx b/test/API/driver/kwsys/CommandLineArguments.cxx deleted file mode 100644 index 3fd19556179..00000000000 --- a/test/API/driver/kwsys/CommandLineArguments.cxx +++ /dev/null @@ -1,768 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" -#include KWSYS_HEADER(CommandLineArguments.hxx) - -#include KWSYS_HEADER(Configure.hxx) -#include KWSYS_HEADER(String.hxx) - -// Work-around CMake dependency scanning limitation. This must -// duplicate the above list of headers. -#if 0 -# include "CommandLineArguments.hxx.in" -# include "Configure.hxx.in" -# include "String.hxx.in" -#endif - -#include -#include -#include -#include -#include - -#include -#include -#include - -#ifdef _MSC_VER -# pragma warning(disable : 4786) -#endif - -#if defined(__sgi) && !defined(__GNUC__) -# pragma set woff 1375 /* base class destructor not virtual */ -#endif - -#if 0 -# define CommandLineArguments_DEBUG(x) \ - std::cout << __LINE__ << " CLA: " << x << std::endl -#else -# define CommandLineArguments_DEBUG(x) -#endif - -namespace KWSYS_NAMESPACE { - -struct CommandLineArgumentsCallbackStructure -{ - const char* Argument; - int ArgumentType; - CommandLineArguments::CallbackType Callback; - void* CallData; - void* Variable; - int VariableType; - const char* Help; -}; - -class CommandLineArgumentsVectorOfStrings : public std::vector -{ -}; -class CommandLineArgumentsSetOfStrings : public std::set -{ -}; -class CommandLineArgumentsMapOfStrucs - : public std::map -{ -}; - -class CommandLineArgumentsInternal -{ -public: - CommandLineArgumentsInternal() - : UnknownArgumentCallback{ nullptr } - , ClientData{ nullptr } - , LastArgument{ 0 } - { - } - - typedef CommandLineArgumentsVectorOfStrings VectorOfStrings; - typedef CommandLineArgumentsMapOfStrucs CallbacksMap; - typedef kwsys::String String; - typedef CommandLineArgumentsSetOfStrings SetOfStrings; - - VectorOfStrings Argv; - String Argv0; - CallbacksMap Callbacks; - - CommandLineArguments::ErrorCallbackType UnknownArgumentCallback; - void* ClientData; - - VectorOfStrings::size_type LastArgument; - - VectorOfStrings UnusedArguments; -}; - -CommandLineArguments::CommandLineArguments() -{ - this->Internals = new CommandLineArguments::Internal; - this->Help = ""; - this->LineLength = 80; - this->StoreUnusedArgumentsFlag = false; -} - -CommandLineArguments::~CommandLineArguments() -{ - delete this->Internals; -} - -void CommandLineArguments::Initialize(int argc, const char* const argv[]) -{ - int cc; - - this->Initialize(); - this->Internals->Argv0 = argv[0]; - for (cc = 1; cc < argc; cc++) { - this->ProcessArgument(argv[cc]); - } -} - -void CommandLineArguments::Initialize(int argc, char* argv[]) -{ - this->Initialize(argc, static_cast(argv)); -} - -void CommandLineArguments::Initialize() -{ - this->Internals->Argv.clear(); - this->Internals->LastArgument = 0; -} - -void CommandLineArguments::ProcessArgument(const char* arg) -{ - this->Internals->Argv.push_back(arg); -} - -bool CommandLineArguments::GetMatchedArguments( - std::vector* matches, const std::string& arg) -{ - matches->clear(); - CommandLineArguments::Internal::CallbacksMap::iterator it; - - // Does the argument match to any we know about? - for (it = this->Internals->Callbacks.begin(); - it != this->Internals->Callbacks.end(); it++) { - const CommandLineArguments::Internal::String& parg = it->first; - CommandLineArgumentsCallbackStructure* cs = &it->second; - if (cs->ArgumentType == CommandLineArguments::NO_ARGUMENT || - cs->ArgumentType == CommandLineArguments::SPACE_ARGUMENT) { - if (arg == parg) { - matches->push_back(parg); - } - } else if (arg.find(parg) == 0) { - matches->push_back(parg); - } - } - return !matches->empty(); -} - -int CommandLineArguments::Parse() -{ - std::vector::size_type cc; - std::vector matches; - if (this->StoreUnusedArgumentsFlag) { - this->Internals->UnusedArguments.clear(); - } - for (cc = 0; cc < this->Internals->Argv.size(); cc++) { - const std::string& arg = this->Internals->Argv[cc]; - CommandLineArguments_DEBUG("Process argument: " << arg); - this->Internals->LastArgument = cc; - if (this->GetMatchedArguments(&matches, arg)) { - // Ok, we found one or more arguments that match what user specified. - // Let's find the longest one. - CommandLineArguments::Internal::VectorOfStrings::size_type kk; - CommandLineArguments::Internal::VectorOfStrings::size_type maxidx = 0; - CommandLineArguments::Internal::String::size_type maxlen = 0; - for (kk = 0; kk < matches.size(); kk++) { - if (matches[kk].size() > maxlen) { - maxlen = matches[kk].size(); - maxidx = kk; - } - } - // So, the longest one is probably the right one. Now see if it has any - // additional value - CommandLineArgumentsCallbackStructure* cs = - &this->Internals->Callbacks[matches[maxidx]]; - const std::string& sarg = matches[maxidx]; - if (cs->Argument != sarg) { - abort(); - } - switch (cs->ArgumentType) { - case NO_ARGUMENT: - // No value - if (!this->PopulateVariable(cs, nullptr)) { - return 0; - } - break; - case SPACE_ARGUMENT: - if (cc == this->Internals->Argv.size() - 1) { - this->Internals->LastArgument--; - return 0; - } - CommandLineArguments_DEBUG("This is a space argument: " - << arg << " value: " - << this->Internals->Argv[cc + 1]); - // Value is the next argument - if (!this->PopulateVariable(cs, - this->Internals->Argv[cc + 1].c_str())) { - return 0; - } - cc++; - break; - case EQUAL_ARGUMENT: - if (arg.size() == sarg.size() || arg.at(sarg.size()) != '=') { - this->Internals->LastArgument--; - return 0; - } - // Value is everythng followed the '=' sign - if (!this->PopulateVariable(cs, arg.c_str() + sarg.size() + 1)) { - return 0; - } - break; - case CONCAT_ARGUMENT: - // Value is whatever follows the argument - if (!this->PopulateVariable(cs, arg.c_str() + sarg.size())) { - return 0; - } - break; - case MULTI_ARGUMENT: - // Suck in all the rest of the arguments - CommandLineArguments_DEBUG("This is a multi argument: " << arg); - for (cc++; cc < this->Internals->Argv.size(); ++cc) { - const std::string& marg = this->Internals->Argv[cc]; - CommandLineArguments_DEBUG( - " check multi argument value: " << marg); - if (this->GetMatchedArguments(&matches, marg)) { - CommandLineArguments_DEBUG("End of multi argument " - << arg << " with value: " << marg); - break; - } - CommandLineArguments_DEBUG( - " populate multi argument value: " << marg); - if (!this->PopulateVariable(cs, marg.c_str())) { - return 0; - } - } - if (cc != this->Internals->Argv.size()) { - CommandLineArguments_DEBUG("Again End of multi argument " << arg); - cc--; - continue; - } - break; - default: - std::cerr << "Got unknown argument type: \"" << cs->ArgumentType - << "\"" << std::endl; - this->Internals->LastArgument--; - return 0; - } - } else { - // Handle unknown arguments - if (this->Internals->UnknownArgumentCallback) { - if (!this->Internals->UnknownArgumentCallback( - arg.c_str(), this->Internals->ClientData)) { - this->Internals->LastArgument--; - return 0; - } - return 1; - } else if (this->StoreUnusedArgumentsFlag) { - CommandLineArguments_DEBUG("Store unused argument " << arg); - this->Internals->UnusedArguments.push_back(arg); - } else { - std::cerr << "Got unknown argument: \"" << arg << "\"" << std::endl; - this->Internals->LastArgument--; - return 0; - } - } - } - return 1; -} - -void CommandLineArguments::GetRemainingArguments(int* argc, char*** argv) -{ - CommandLineArguments::Internal::VectorOfStrings::size_type size = - this->Internals->Argv.size() - this->Internals->LastArgument + 1; - CommandLineArguments::Internal::VectorOfStrings::size_type cc; - - // Copy Argv0 as the first argument - char** args = new char*[size]; - args[0] = new char[this->Internals->Argv0.size() + 1]; - strcpy(args[0], this->Internals->Argv0.c_str()); - int cnt = 1; - - // Copy everything after the LastArgument, since that was not parsed. - for (cc = this->Internals->LastArgument + 1; - cc < this->Internals->Argv.size(); cc++) { - args[cnt] = new char[this->Internals->Argv[cc].size() + 1]; - strcpy(args[cnt], this->Internals->Argv[cc].c_str()); - cnt++; - } - *argc = cnt; - *argv = args; -} - -void CommandLineArguments::GetUnusedArguments(int* argc, char*** argv) -{ - CommandLineArguments::Internal::VectorOfStrings::size_type size = - this->Internals->UnusedArguments.size() + 1; - CommandLineArguments::Internal::VectorOfStrings::size_type cc; - - // Copy Argv0 as the first argument - char** args = new char*[size]; - args[0] = new char[this->Internals->Argv0.size() + 1]; - strcpy(args[0], this->Internals->Argv0.c_str()); - int cnt = 1; - - // Copy everything after the LastArgument, since that was not parsed. - for (cc = 0; cc < this->Internals->UnusedArguments.size(); cc++) { - kwsys::String& str = this->Internals->UnusedArguments[cc]; - args[cnt] = new char[str.size() + 1]; - strcpy(args[cnt], str.c_str()); - cnt++; - } - *argc = cnt; - *argv = args; -} - -void CommandLineArguments::DeleteRemainingArguments(int argc, char*** argv) -{ - int cc; - for (cc = 0; cc < argc; ++cc) { - delete[](*argv)[cc]; - } - delete[] * argv; -} - -void CommandLineArguments::AddCallback(const char* argument, - ArgumentTypeEnum type, - CallbackType callback, void* call_data, - const char* help) -{ - CommandLineArgumentsCallbackStructure s; - s.Argument = argument; - s.ArgumentType = type; - s.Callback = callback; - s.CallData = call_data; - s.VariableType = CommandLineArguments::NO_VARIABLE_TYPE; - s.Variable = nullptr; - s.Help = help; - - this->Internals->Callbacks[argument] = s; - this->GenerateHelp(); -} - -void CommandLineArguments::AddArgument(const char* argument, - ArgumentTypeEnum type, - VariableTypeEnum vtype, void* variable, - const char* help) -{ - CommandLineArgumentsCallbackStructure s; - s.Argument = argument; - s.ArgumentType = type; - s.Callback = nullptr; - s.CallData = nullptr; - s.VariableType = vtype; - s.Variable = variable; - s.Help = help; - - this->Internals->Callbacks[argument] = s; - this->GenerateHelp(); -} - -#define CommandLineArgumentsAddArgumentMacro(type, ctype) \ - void CommandLineArguments::AddArgument(const char* argument, \ - ArgumentTypeEnum type, \ - ctype* variable, const char* help) \ - { \ - this->AddArgument(argument, type, CommandLineArguments::type##_TYPE, \ - variable, help); \ - } - -/* clang-format off */ -CommandLineArgumentsAddArgumentMacro(BOOL, bool) -CommandLineArgumentsAddArgumentMacro(INT, int) -CommandLineArgumentsAddArgumentMacro(DOUBLE, double) -CommandLineArgumentsAddArgumentMacro(STRING, char*) -CommandLineArgumentsAddArgumentMacro(STL_STRING, std::string) - -CommandLineArgumentsAddArgumentMacro(VECTOR_BOOL, std::vector) -CommandLineArgumentsAddArgumentMacro(VECTOR_INT, std::vector) -CommandLineArgumentsAddArgumentMacro(VECTOR_DOUBLE, std::vector) -CommandLineArgumentsAddArgumentMacro(VECTOR_STRING, std::vector) -CommandLineArgumentsAddArgumentMacro(VECTOR_STL_STRING, - std::vector) -#ifdef HELP_CLANG_FORMAT -; -#endif -/* clang-format on */ - -#define CommandLineArgumentsAddBooleanArgumentMacro(type, ctype) \ - void CommandLineArguments::AddBooleanArgument( \ - const char* argument, ctype* variable, const char* help) \ - { \ - this->AddArgument(argument, CommandLineArguments::NO_ARGUMENT, \ - CommandLineArguments::type##_TYPE, variable, help); \ - } - -/* clang-format off */ -CommandLineArgumentsAddBooleanArgumentMacro(BOOL, bool) -CommandLineArgumentsAddBooleanArgumentMacro(INT, int) -CommandLineArgumentsAddBooleanArgumentMacro(DOUBLE, double) -CommandLineArgumentsAddBooleanArgumentMacro(STRING, char*) -CommandLineArgumentsAddBooleanArgumentMacro(STL_STRING, std::string) -#ifdef HELP_CLANG_FORMAT -; -#endif -/* clang-format on */ - -void CommandLineArguments::SetClientData(void* client_data) -{ - this->Internals->ClientData = client_data; -} - -void CommandLineArguments::SetUnknownArgumentCallback( - CommandLineArguments::ErrorCallbackType callback) -{ - this->Internals->UnknownArgumentCallback = callback; -} - -const char* CommandLineArguments::GetHelp(const char* arg) -{ - CommandLineArguments::Internal::CallbacksMap::iterator it = - this->Internals->Callbacks.find(arg); - if (it == this->Internals->Callbacks.end()) { - return nullptr; - } - - // Since several arguments may point to the same argument, find the one this - // one point to if this one is pointing to another argument. - CommandLineArgumentsCallbackStructure* cs = &(it->second); - for (;;) { - CommandLineArguments::Internal::CallbacksMap::iterator hit = - this->Internals->Callbacks.find(cs->Help); - if (hit == this->Internals->Callbacks.end()) { - break; - } - cs = &(hit->second); - } - return cs->Help; -} - -void CommandLineArguments::SetLineLength(unsigned int ll) -{ - if (ll < 9 || ll > 1000) { - return; - } - this->LineLength = ll; - this->GenerateHelp(); -} - -const char* CommandLineArguments::GetArgv0() -{ - return this->Internals->Argv0.c_str(); -} - -unsigned int CommandLineArguments::GetLastArgument() -{ - return static_cast(this->Internals->LastArgument + 1); -} - -void CommandLineArguments::GenerateHelp() -{ - std::ostringstream str; - - // Collapse all arguments into the map of vectors of all arguments that do - // the same thing. - CommandLineArguments::Internal::CallbacksMap::iterator it; - typedef std::map - MapArgs; - MapArgs mp; - MapArgs::iterator mpit, smpit; - for (it = this->Internals->Callbacks.begin(); - it != this->Internals->Callbacks.end(); it++) { - CommandLineArgumentsCallbackStructure* cs = &(it->second); - mpit = mp.find(cs->Help); - if (mpit != mp.end()) { - mpit->second.insert(it->first); - mp[it->first].insert(it->first); - } else { - mp[it->first].insert(it->first); - } - } - for (it = this->Internals->Callbacks.begin(); - it != this->Internals->Callbacks.end(); it++) { - CommandLineArgumentsCallbackStructure* cs = &(it->second); - mpit = mp.find(cs->Help); - if (mpit != mp.end()) { - mpit->second.insert(it->first); - smpit = mp.find(it->first); - CommandLineArguments::Internal::SetOfStrings::iterator sit; - for (sit = smpit->second.begin(); sit != smpit->second.end(); sit++) { - mpit->second.insert(*sit); - } - mp.erase(smpit); - } else { - mp[it->first].insert(it->first); - } - } - - // Find the length of the longest string - CommandLineArguments::Internal::String::size_type maxlen = 0; - for (mpit = mp.begin(); mpit != mp.end(); mpit++) { - CommandLineArguments::Internal::SetOfStrings::iterator sit; - for (sit = mpit->second.begin(); sit != mpit->second.end(); sit++) { - CommandLineArguments::Internal::String::size_type clen = sit->size(); - switch (this->Internals->Callbacks[*sit].ArgumentType) { - case CommandLineArguments::NO_ARGUMENT: - clen += 0; - break; - case CommandLineArguments::CONCAT_ARGUMENT: - clen += 3; - break; - case CommandLineArguments::SPACE_ARGUMENT: - clen += 4; - break; - case CommandLineArguments::EQUAL_ARGUMENT: - clen += 4; - break; - } - if (clen > maxlen) { - maxlen = clen; - } - } - } - - CommandLineArguments::Internal::String::size_type maxstrlen = maxlen; - maxlen += 4; // For the space before and after the option - - // Print help for each option - for (mpit = mp.begin(); mpit != mp.end(); mpit++) { - CommandLineArguments::Internal::SetOfStrings::iterator sit; - for (sit = mpit->second.begin(); sit != mpit->second.end(); sit++) { - str << std::endl; - std::string argument = *sit; - switch (this->Internals->Callbacks[*sit].ArgumentType) { - case CommandLineArguments::NO_ARGUMENT: - break; - case CommandLineArguments::CONCAT_ARGUMENT: - argument += "opt"; - break; - case CommandLineArguments::SPACE_ARGUMENT: - argument += " opt"; - break; - case CommandLineArguments::EQUAL_ARGUMENT: - argument += "=opt"; - break; - case CommandLineArguments::MULTI_ARGUMENT: - argument += " opt opt ..."; - break; - } - str << " " << argument.substr(0, maxstrlen) << " "; - } - const char* ptr = this->Internals->Callbacks[mpit->first].Help; - size_t len = strlen(ptr); - int cnt = 0; - while (len > 0) { - // If argument with help is longer than line length, split it on previous - // space (or tab) and continue on the next line - CommandLineArguments::Internal::String::size_type cc; - for (cc = 0; ptr[cc]; cc++) { - if (*ptr == ' ' || *ptr == '\t') { - ptr++; - len--; - } - } - if (cnt > 0) { - for (cc = 0; cc < maxlen; cc++) { - str << " "; - } - } - CommandLineArguments::Internal::String::size_type skip = len; - if (skip > this->LineLength - maxlen) { - skip = this->LineLength - maxlen; - for (cc = skip - 1; cc > 0; cc--) { - if (ptr[cc] == ' ' || ptr[cc] == '\t') { - break; - } - } - if (cc != 0) { - skip = cc; - } - } - str.write(ptr, static_cast(skip)); - str << std::endl; - ptr += skip; - len -= skip; - cnt++; - } - } - /* - // This can help debugging help string - str << endl; - unsigned int cc; - for ( cc = 0; cc < this->LineLength; cc ++ ) - { - str << cc % 10; - } - str << endl; - */ - this->Help = str.str(); -} - -void CommandLineArguments::PopulateVariable(bool* variable, - const std::string& value) -{ - if (value == "1" || value == "ON" || value == "on" || value == "On" || - value == "TRUE" || value == "true" || value == "True" || - value == "yes" || value == "Yes" || value == "YES") { - *variable = true; - } else { - *variable = false; - } -} - -void CommandLineArguments::PopulateVariable(int* variable, - const std::string& value) -{ - char* res = nullptr; - *variable = static_cast(strtol(value.c_str(), &res, 10)); - // if ( res && *res ) - // { - // Can handle non-int - // } -} - -void CommandLineArguments::PopulateVariable(double* variable, - const std::string& value) -{ - char* res = nullptr; - *variable = strtod(value.c_str(), &res); - // if ( res && *res ) - // { - // Can handle non-double - // } -} - -void CommandLineArguments::PopulateVariable(char** variable, - const std::string& value) -{ - delete[] * variable; - *variable = new char[value.size() + 1]; - strcpy(*variable, value.c_str()); -} - -void CommandLineArguments::PopulateVariable(std::string* variable, - const std::string& value) -{ - *variable = value; -} - -void CommandLineArguments::PopulateVariable(std::vector* variable, - const std::string& value) -{ - bool val = false; - if (value == "1" || value == "ON" || value == "on" || value == "On" || - value == "TRUE" || value == "true" || value == "True" || - value == "yes" || value == "Yes" || value == "YES") { - val = true; - } - variable->push_back(val); -} - -void CommandLineArguments::PopulateVariable(std::vector* variable, - const std::string& value) -{ - char* res = nullptr; - variable->push_back(static_cast(strtol(value.c_str(), &res, 10))); - // if ( res && *res ) - // { - // Can handle non-int - // } -} - -void CommandLineArguments::PopulateVariable(std::vector* variable, - const std::string& value) -{ - char* res = nullptr; - variable->push_back(strtod(value.c_str(), &res)); - // if ( res && *res ) - // { - // Can handle non-int - // } -} - -void CommandLineArguments::PopulateVariable(std::vector* variable, - const std::string& value) -{ - char* var = new char[value.size() + 1]; - strcpy(var, value.c_str()); - variable->push_back(var); -} - -void CommandLineArguments::PopulateVariable(std::vector* variable, - const std::string& value) -{ - variable->push_back(value); -} - -bool CommandLineArguments::PopulateVariable( - CommandLineArgumentsCallbackStructure* cs, const char* value) -{ - // Call the callback - if (cs->Callback) { - if (!cs->Callback(cs->Argument, value, cs->CallData)) { - this->Internals->LastArgument--; - return 0; - } - } - CommandLineArguments_DEBUG("Set argument: " << cs->Argument << " to " - << value); - if (cs->Variable) { - std::string var = "1"; - if (value) { - var = value; - } - switch (cs->VariableType) { - case CommandLineArguments::INT_TYPE: - this->PopulateVariable(static_cast(cs->Variable), var); - break; - case CommandLineArguments::DOUBLE_TYPE: - this->PopulateVariable(static_cast(cs->Variable), var); - break; - case CommandLineArguments::STRING_TYPE: - this->PopulateVariable(static_cast(cs->Variable), var); - break; - case CommandLineArguments::STL_STRING_TYPE: - this->PopulateVariable(static_cast(cs->Variable), var); - break; - case CommandLineArguments::BOOL_TYPE: - this->PopulateVariable(static_cast(cs->Variable), var); - break; - case CommandLineArguments::VECTOR_BOOL_TYPE: - this->PopulateVariable(static_cast*>(cs->Variable), - var); - break; - case CommandLineArguments::VECTOR_INT_TYPE: - this->PopulateVariable(static_cast*>(cs->Variable), - var); - break; - case CommandLineArguments::VECTOR_DOUBLE_TYPE: - this->PopulateVariable(static_cast*>(cs->Variable), - var); - break; - case CommandLineArguments::VECTOR_STRING_TYPE: - this->PopulateVariable(static_cast*>(cs->Variable), - var); - break; - case CommandLineArguments::VECTOR_STL_STRING_TYPE: - this->PopulateVariable( - static_cast*>(cs->Variable), var); - break; - default: - std::cerr << "Got unknown variable type: \"" << cs->VariableType - << "\"" << std::endl; - this->Internals->LastArgument--; - return 0; - } - } - return 1; -} - -} // namespace KWSYS_NAMESPACE diff --git a/test/API/driver/kwsys/CommandLineArguments.hxx.in b/test/API/driver/kwsys/CommandLineArguments.hxx.in deleted file mode 100644 index 7db90155640..00000000000 --- a/test/API/driver/kwsys/CommandLineArguments.hxx.in +++ /dev/null @@ -1,270 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifndef @KWSYS_NAMESPACE@_CommandLineArguments_hxx -#define @KWSYS_NAMESPACE@_CommandLineArguments_hxx - -#include <@KWSYS_NAMESPACE@/Configure.h> -#include <@KWSYS_NAMESPACE@/Configure.hxx> - -#include -#include - -namespace @KWSYS_NAMESPACE@ { - -class CommandLineArgumentsInternal; -struct CommandLineArgumentsCallbackStructure; - -/** \class CommandLineArguments - * \brief Command line arguments processing code. - * - * Find specified arguments with optional options and execute specified methods - * or set given variables. - * - * The two interfaces it knows are callback based and variable based. For - * callback based, you have to register callback for particular argument using - * AddCallback method. When that argument is passed, the callback will be - * called with argument, value, and call data. For boolean (NO_ARGUMENT) - * arguments, the value is "1". If the callback returns 0 the argument parsing - * will stop with an error. - * - * For the variable interface you associate variable with each argument. When - * the argument is specified, the variable is set to the specified value casted - * to the appropriate type. For boolean (NO_ARGUMENT), the value is "1". - * - * Both interfaces can be used at the same time. - * - * Possible argument types are: - * NO_ARGUMENT - The argument takes no value : --A - * CONCAT_ARGUMENT - The argument takes value after no space : --Aval - * SPACE_ARGUMENT - The argument takes value after space : --A val - * EQUAL_ARGUMENT - The argument takes value after equal : --A=val - * MULTI_ARGUMENT - The argument takes values after space : --A val1 val2 - * val3 ... - * - * Example use: - * - * kwsys::CommandLineArguments arg; - * arg.Initialize(argc, argv); - * typedef kwsys::CommandLineArguments argT; - * arg.AddArgument("--something", argT::EQUAL_ARGUMENT, &some_variable, - * "This is help string for --something"); - * if ( !arg.Parse() ) - * { - * std::cerr << "Problem parsing arguments" << std::endl; - * res = 1; - * } - * - */ - -class @KWSYS_NAMESPACE@_EXPORT CommandLineArguments -{ -public: - CommandLineArguments(); - ~CommandLineArguments(); - - CommandLineArguments(const CommandLineArguments&) = delete; - CommandLineArguments& operator=(const CommandLineArguments&) = delete; - - /** - * Various argument types. - */ - enum ArgumentTypeEnum - { - NO_ARGUMENT, - CONCAT_ARGUMENT, - SPACE_ARGUMENT, - EQUAL_ARGUMENT, - MULTI_ARGUMENT - }; - - /** - * Various variable types. When using the variable interface, this specifies - * what type the variable is. - */ - enum VariableTypeEnum - { - NO_VARIABLE_TYPE = 0, // The variable is not specified - INT_TYPE, // The variable is integer (int) - BOOL_TYPE, // The variable is boolean (bool) - DOUBLE_TYPE, // The variable is float (double) - STRING_TYPE, // The variable is string (char*) - STL_STRING_TYPE, // The variable is string (char*) - VECTOR_INT_TYPE, // The variable is integer (int) - VECTOR_BOOL_TYPE, // The variable is boolean (bool) - VECTOR_DOUBLE_TYPE, // The variable is float (double) - VECTOR_STRING_TYPE, // The variable is string (char*) - VECTOR_STL_STRING_TYPE, // The variable is string (char*) - LAST_VARIABLE_TYPE - }; - - /** - * Prototypes for callbacks for callback interface. - */ - typedef int (*CallbackType)(const char* argument, const char* value, - void* call_data); - typedef int (*ErrorCallbackType)(const char* argument, void* client_data); - - /** - * Initialize internal data structures. This should be called before parsing. - */ - void Initialize(int argc, const char* const argv[]); - void Initialize(int argc, char* argv[]); - - /** - * Initialize internal data structure and pass arguments one by one. This is - * convenience method for use from scripting languages where argc and argv - * are not available. - */ - void Initialize(); - void ProcessArgument(const char* arg); - - /** - * This method will parse arguments and call appropriate methods. - */ - int Parse(); - - /** - * This method will add a callback for a specific argument. The arguments to - * it are argument, argument type, callback method, and call data. The - * argument help specifies the help string used with this option. The - * callback and call_data can be skipped. - */ - void AddCallback(const char* argument, ArgumentTypeEnum type, - CallbackType callback, void* call_data, const char* help); - - /** - * Add handler for argument which is going to set the variable to the - * specified value. If the argument is specified, the option is casted to the - * appropriate type. - */ - void AddArgument(const char* argument, ArgumentTypeEnum type, bool* variable, - const char* help); - void AddArgument(const char* argument, ArgumentTypeEnum type, int* variable, - const char* help); - void AddArgument(const char* argument, ArgumentTypeEnum type, - double* variable, const char* help); - void AddArgument(const char* argument, ArgumentTypeEnum type, - char** variable, const char* help); - void AddArgument(const char* argument, ArgumentTypeEnum type, - std::string* variable, const char* help); - - /** - * Add handler for argument which is going to set the variable to the - * specified value. If the argument is specified, the option is casted to the - * appropriate type. This will handle the multi argument values. - */ - void AddArgument(const char* argument, ArgumentTypeEnum type, - std::vector* variable, const char* help); - void AddArgument(const char* argument, ArgumentTypeEnum type, - std::vector* variable, const char* help); - void AddArgument(const char* argument, ArgumentTypeEnum type, - std::vector* variable, const char* help); - void AddArgument(const char* argument, ArgumentTypeEnum type, - std::vector* variable, const char* help); - void AddArgument(const char* argument, ArgumentTypeEnum type, - std::vector* variable, const char* help); - - /** - * Add handler for boolean argument. The argument does not take any option - * and if it is specified, the value of the variable is true/1, otherwise it - * is false/0. - */ - void AddBooleanArgument(const char* argument, bool* variable, - const char* help); - void AddBooleanArgument(const char* argument, int* variable, - const char* help); - void AddBooleanArgument(const char* argument, double* variable, - const char* help); - void AddBooleanArgument(const char* argument, char** variable, - const char* help); - void AddBooleanArgument(const char* argument, std::string* variable, - const char* help); - - /** - * Set the callbacks for error handling. - */ - void SetClientData(void* client_data); - void SetUnknownArgumentCallback(ErrorCallbackType callback); - - /** - * Get remaining arguments. It allocates space for argv, so you have to call - * delete[] on it. - */ - void GetRemainingArguments(int* argc, char*** argv); - void DeleteRemainingArguments(int argc, char*** argv); - - /** - * If StoreUnusedArguments is set to true, then all unknown arguments will be - * stored and the user can access the modified argc, argv without known - * arguments. - */ - void StoreUnusedArguments(bool val) { this->StoreUnusedArgumentsFlag = val; } - void GetUnusedArguments(int* argc, char*** argv); - - /** - * Return string containing help. If the argument is specified, only return - * help for that argument. - */ - const char* GetHelp() { return this->Help.c_str(); } - const char* GetHelp(const char* arg); - - /** - * Get / Set the help line length. This length is used when generating the - * help page. Default length is 80. - */ - void SetLineLength(unsigned int); - unsigned int GetLineLength(); - - /** - * Get the executable name (argv0). This is only available when using - * Initialize with argc/argv. - */ - const char* GetArgv0(); - - /** - * Get index of the last argument parsed. This is the last argument that was - * parsed ok in the original argc/argv list. - */ - unsigned int GetLastArgument(); - -protected: - void GenerateHelp(); - - //! This is internal method that registers variable with argument - void AddArgument(const char* argument, ArgumentTypeEnum type, - VariableTypeEnum vtype, void* variable, const char* help); - - bool GetMatchedArguments(std::vector* matches, - const std::string& arg); - - //! Populate individual variables - bool PopulateVariable(CommandLineArgumentsCallbackStructure* cs, - const char* value); - - //! Populate individual variables of type ... - void PopulateVariable(bool* variable, const std::string& value); - void PopulateVariable(int* variable, const std::string& value); - void PopulateVariable(double* variable, const std::string& value); - void PopulateVariable(char** variable, const std::string& value); - void PopulateVariable(std::string* variable, const std::string& value); - void PopulateVariable(std::vector* variable, const std::string& value); - void PopulateVariable(std::vector* variable, const std::string& value); - void PopulateVariable(std::vector* variable, - const std::string& value); - void PopulateVariable(std::vector* variable, - const std::string& value); - void PopulateVariable(std::vector* variable, - const std::string& value); - - typedef CommandLineArgumentsInternal Internal; - Internal* Internals; - std::string Help; - - unsigned int LineLength; - - bool StoreUnusedArgumentsFlag; -}; - -} // namespace @KWSYS_NAMESPACE@ - -#endif diff --git a/test/API/driver/kwsys/Configure.h.in b/test/API/driver/kwsys/Configure.h.in deleted file mode 100644 index 5323c57bebe..00000000000 --- a/test/API/driver/kwsys/Configure.h.in +++ /dev/null @@ -1,89 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifndef @KWSYS_NAMESPACE@_Configure_h -#define @KWSYS_NAMESPACE@_Configure_h - -/* If we are building a kwsys .c or .cxx file, let it use the kwsys - namespace. When not building a kwsys source file these macros are - temporarily defined inside the headers that use them. */ -#if defined(KWSYS_NAMESPACE) -# define kwsys_ns(x) @KWSYS_NAMESPACE@##x -# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT -#endif - -/* Disable some warnings inside kwsys source files. */ -#if defined(KWSYS_NAMESPACE) -# if defined(__BORLANDC__) -# pragma warn - 8027 /* function not inlined. */ -# endif -# if defined(__INTEL_COMPILER) -# pragma warning(disable : 1572) /* floating-point equality test */ -# endif -# if defined(__sgi) && !defined(__GNUC__) -# pragma set woff 3970 /* pointer to int conversion */ -# pragma set woff 3968 /* 64 bit conversion */ -# endif -#endif - -/* Whether kwsys namespace is "kwsys". */ -#define @KWSYS_NAMESPACE@_NAME_IS_KWSYS @KWSYS_NAME_IS_KWSYS@ - -/* Setup the export macro. */ -#if @KWSYS_BUILD_SHARED@ -# if defined(_WIN32) || defined(__CYGWIN__) -# if defined(@KWSYS_NAMESPACE@_EXPORTS) -# define @KWSYS_NAMESPACE@_EXPORT __declspec(dllexport) -# else -# define @KWSYS_NAMESPACE@_EXPORT __declspec(dllimport) -# endif -# elif __GNUC__ >= 4 -# define @KWSYS_NAMESPACE@_EXPORT __attribute__((visibility("default"))) -# else -# define @KWSYS_NAMESPACE@_EXPORT -# endif -#else -# define @KWSYS_NAMESPACE@_EXPORT -#endif - -/* Enable warnings that are off by default but are useful. */ -#if !defined(@KWSYS_NAMESPACE@_NO_WARNING_ENABLE) -# if defined(_MSC_VER) -# pragma warning(default : 4263) /* no override, call convention differs \ - */ -# endif -#endif - -/* Disable warnings that are on by default but occur in valid code. */ -#if !defined(@KWSYS_NAMESPACE@_NO_WARNING_DISABLE) -# if defined(_MSC_VER) -# pragma warning(disable : 4097) /* typedef is synonym for class */ -# pragma warning(disable : 4127) /* conditional expression is constant */ -# pragma warning(disable : 4244) /* possible loss in conversion */ -# pragma warning(disable : 4251) /* missing DLL-interface */ -# pragma warning(disable : 4305) /* truncation from type1 to type2 */ -# pragma warning(disable : 4309) /* truncation of constant value */ -# pragma warning(disable : 4514) /* unreferenced inline function */ -# pragma warning(disable : 4706) /* assignment in conditional expression \ - */ -# pragma warning(disable : 4710) /* function not inlined */ -# pragma warning(disable : 4786) /* identifier truncated in debug info */ -# endif -# if defined(__BORLANDC__) && !defined(__cplusplus) -/* Code has no effect; raised by winnt.h in C (not C++) when ignoring an - unused parameter using "(param)" syntax (i.e. no cast to void). */ -# pragma warn - 8019 -# endif -#endif - -/* MSVC 6.0 in release mode will warn about code it produces with its - optimizer. Disable the warnings specifically for this - configuration. Real warnings will be revealed by a debug build or - by other compilers. */ -#if !defined(@KWSYS_NAMESPACE@_NO_WARNING_DISABLE_BOGUS) -# if defined(_MSC_VER) && (_MSC_VER < 1300) && defined(NDEBUG) -# pragma warning(disable : 4701) /* Variable may be used uninitialized. */ -# pragma warning(disable : 4702) /* Unreachable code. */ -# endif -#endif - -#endif diff --git a/test/API/driver/kwsys/Configure.hxx.in b/test/API/driver/kwsys/Configure.hxx.in deleted file mode 100644 index 29a2dd11e39..00000000000 --- a/test/API/driver/kwsys/Configure.hxx.in +++ /dev/null @@ -1,65 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifndef @KWSYS_NAMESPACE@_Configure_hxx -#define @KWSYS_NAMESPACE@_Configure_hxx - -/* Include C configuration. */ -#include <@KWSYS_NAMESPACE@/Configure.h> - -/* Whether wstring is available. */ -#define @KWSYS_NAMESPACE@_STL_HAS_WSTRING @KWSYS_STL_HAS_WSTRING@ -/* Whether is available. */ -#define @KWSYS_NAMESPACE@_CXX_HAS_EXT_STDIO_FILEBUF_H \ - @KWSYS_CXX_HAS_EXT_STDIO_FILEBUF_H@ -/* Whether the translation map is available or not. */ -#define @KWSYS_NAMESPACE@_SYSTEMTOOLS_USE_TRANSLATION_MAP \ - @KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP@ - -#if defined(__SUNPRO_CC) && __SUNPRO_CC > 0x5130 && defined(__has_attribute) -# define @KWSYS_NAMESPACE@__has_cpp_attribute(x) __has_attribute(x) -#elif defined(__has_cpp_attribute) -# define @KWSYS_NAMESPACE@__has_cpp_attribute(x) __has_cpp_attribute(x) -#else -# define @KWSYS_NAMESPACE@__has_cpp_attribute(x) 0 -#endif - -#if __cplusplus >= 201103L -# define @KWSYS_NAMESPACE@_NULLPTR nullptr -#else -# define @KWSYS_NAMESPACE@_NULLPTR 0 -#endif - -#ifndef @KWSYS_NAMESPACE@_FALLTHROUGH -# if __cplusplus >= 201703L && \ - @KWSYS_NAMESPACE@__has_cpp_attribute(fallthrough) -# define @KWSYS_NAMESPACE@_FALLTHROUGH [[fallthrough]] -# elif __cplusplus >= 201103L && \ - @KWSYS_NAMESPACE@__has_cpp_attribute(gnu::fallthrough) -# define @KWSYS_NAMESPACE@_FALLTHROUGH [[gnu::fallthrough]] -# elif __cplusplus >= 201103L && \ - @KWSYS_NAMESPACE@__has_cpp_attribute(clang::fallthrough) -# define @KWSYS_NAMESPACE@_FALLTHROUGH [[clang::fallthrough]] -# endif -#endif -#ifndef @KWSYS_NAMESPACE@_FALLTHROUGH -# define @KWSYS_NAMESPACE@_FALLTHROUGH static_cast(0) -#endif - -#undef @KWSYS_NAMESPACE@__has_cpp_attribute - -/* If building a C++ file in kwsys itself, give the source file - access to the macros without a configured namespace. */ -#if defined(KWSYS_NAMESPACE) -# if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS -# define kwsys @KWSYS_NAMESPACE@ -# endif -# define KWSYS_NAME_IS_KWSYS @KWSYS_NAMESPACE@_NAME_IS_KWSYS -# define KWSYS_STL_HAS_WSTRING @KWSYS_NAMESPACE@_STL_HAS_WSTRING -# define KWSYS_CXX_HAS_EXT_STDIO_FILEBUF_H \ - @KWSYS_NAMESPACE@_CXX_HAS_EXT_STDIO_FILEBUF_H -# define KWSYS_FALLTHROUGH @KWSYS_NAMESPACE@_FALLTHROUGH -# define KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP \ - @KWSYS_NAMESPACE@_SYSTEMTOOLS_USE_TRANSLATION_MAP -#endif - -#endif diff --git a/test/API/driver/kwsys/ConsoleBuf.hxx.in b/test/API/driver/kwsys/ConsoleBuf.hxx.in deleted file mode 100644 index 49dbdf7ea5f..00000000000 --- a/test/API/driver/kwsys/ConsoleBuf.hxx.in +++ /dev/null @@ -1,398 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifndef @KWSYS_NAMESPACE@_ConsoleBuf_hxx -#define @KWSYS_NAMESPACE@_ConsoleBuf_hxx - -#include <@KWSYS_NAMESPACE@/Configure.hxx> - -#include <@KWSYS_NAMESPACE@/Encoding.hxx> - -#include -#include -#include -#include -#include -#include - -#if defined(_WIN32) -# include -# if __cplusplus >= 201103L -# include -# endif -#endif - -namespace @KWSYS_NAMESPACE@ { -#if defined(_WIN32) - -template > -class BasicConsoleBuf : public std::basic_streambuf -{ -public: - typedef typename Traits::int_type int_type; - typedef typename Traits::char_type char_type; - - class Manager - { - public: - Manager(std::basic_ios& ios, const bool err = false) - : m_consolebuf(0) - { - m_ios = &ios; - try { - m_consolebuf = new BasicConsoleBuf(err); - m_streambuf = m_ios->rdbuf(m_consolebuf); - } catch (const std::runtime_error& ex) { - std::cerr << "Failed to create ConsoleBuf!" << std::endl - << ex.what() << std::endl; - }; - } - - BasicConsoleBuf* GetConsoleBuf() { return m_consolebuf; } - - void SetUTF8Pipes() - { - if (m_consolebuf) { - m_consolebuf->input_pipe_codepage = CP_UTF8; - m_consolebuf->output_pipe_codepage = CP_UTF8; - m_consolebuf->activateCodepageChange(); - } - } - - ~Manager() - { - if (m_consolebuf) { - delete m_consolebuf; - m_ios->rdbuf(m_streambuf); - } - } - - private: - std::basic_ios* m_ios; - std::basic_streambuf* m_streambuf; - BasicConsoleBuf* m_consolebuf; - }; - - BasicConsoleBuf(const bool err = false) - : flush_on_newline(true) - , input_pipe_codepage(0) - , output_pipe_codepage(0) - , input_file_codepage(CP_UTF8) - , output_file_codepage(CP_UTF8) - , m_consolesCodepage(0) - { - m_hInput = ::GetStdHandle(STD_INPUT_HANDLE); - checkHandle(true, "STD_INPUT_HANDLE"); - if (!setActiveInputCodepage()) { - throw std::runtime_error("setActiveInputCodepage failed!"); - } - m_hOutput = err ? ::GetStdHandle(STD_ERROR_HANDLE) - : ::GetStdHandle(STD_OUTPUT_HANDLE); - checkHandle(false, err ? "STD_ERROR_HANDLE" : "STD_OUTPUT_HANDLE"); - if (!setActiveOutputCodepage()) { - throw std::runtime_error("setActiveOutputCodepage failed!"); - } - _setg(); - _setp(); - } - - ~BasicConsoleBuf() throw() { sync(); } - - bool activateCodepageChange() - { - return setActiveInputCodepage() && setActiveOutputCodepage(); - } - -protected: - virtual int sync() - { - bool success = true; - if (m_hInput && m_isConsoleInput && - ::FlushConsoleInputBuffer(m_hInput) == 0) { - success = false; - } - if (m_hOutput && !m_obuffer.empty()) { - const std::wstring wbuffer = getBuffer(m_obuffer); - if (m_isConsoleOutput) { - DWORD charsWritten; - success = - ::WriteConsoleW(m_hOutput, wbuffer.c_str(), (DWORD)wbuffer.size(), - &charsWritten, nullptr) == 0 - ? false - : true; - } else { - DWORD bytesWritten; - std::string buffer; - success = encodeOutputBuffer(wbuffer, buffer); - if (success) { - success = - ::WriteFile(m_hOutput, buffer.c_str(), (DWORD)buffer.size(), - &bytesWritten, nullptr) == 0 - ? false - : true; - } - } - } - m_ibuffer.clear(); - m_obuffer.clear(); - _setg(); - _setp(); - return success ? 0 : -1; - } - - virtual int_type underflow() - { - if (this->gptr() >= this->egptr()) { - if (!m_hInput) { - _setg(true); - return Traits::eof(); - } - if (m_isConsoleInput) { - // ReadConsole doesn't tell if there's more input available - // don't support reading more characters than this - wchar_t wbuffer[8192]; - DWORD charsRead; - if (ReadConsoleW(m_hInput, wbuffer, - (sizeof(wbuffer) / sizeof(wbuffer[0])), &charsRead, - nullptr) == 0 || - charsRead == 0) { - _setg(true); - return Traits::eof(); - } - setBuffer(std::wstring(wbuffer, charsRead), m_ibuffer); - } else { - std::wstring wbuffer; - std::string strbuffer; - DWORD bytesRead; - LARGE_INTEGER size; - if (GetFileSizeEx(m_hInput, &size) == 0) { - _setg(true); - return Traits::eof(); - } - char* buffer = new char[size.LowPart]; - while (ReadFile(m_hInput, buffer, size.LowPart, &bytesRead, nullptr) == - 0) { - if (GetLastError() == ERROR_MORE_DATA) { - strbuffer += std::string(buffer, bytesRead); - continue; - } - _setg(true); - delete[] buffer; - return Traits::eof(); - } - if (bytesRead > 0) { - strbuffer += std::string(buffer, bytesRead); - } - delete[] buffer; - if (!decodeInputBuffer(strbuffer, wbuffer)) { - _setg(true); - return Traits::eof(); - } - setBuffer(wbuffer, m_ibuffer); - } - _setg(); - } - return Traits::to_int_type(*this->gptr()); - } - - virtual int_type overflow(int_type ch = Traits::eof()) - { - if (!Traits::eq_int_type(ch, Traits::eof())) { - char_type chr = Traits::to_char_type(ch); - m_obuffer += chr; - if ((flush_on_newline && Traits::eq(chr, '\n')) || - Traits::eq_int_type(ch, 0x00)) { - sync(); - } - return ch; - } - sync(); - return Traits::eof(); - } - -public: - bool flush_on_newline; - UINT input_pipe_codepage; - UINT output_pipe_codepage; - UINT input_file_codepage; - UINT output_file_codepage; - -private: - HANDLE m_hInput; - HANDLE m_hOutput; - std::basic_string m_ibuffer; - std::basic_string m_obuffer; - bool m_isConsoleInput; - bool m_isConsoleOutput; - UINT m_activeInputCodepage; - UINT m_activeOutputCodepage; - UINT m_consolesCodepage; - void checkHandle(bool input, std::string handleName) - { - if ((input && m_hInput == INVALID_HANDLE_VALUE) || - (!input && m_hOutput == INVALID_HANDLE_VALUE)) { - std::string errmsg = - "GetStdHandle(" + handleName + ") returned INVALID_HANDLE_VALUE"; -# if __cplusplus >= 201103L - throw std::system_error(::GetLastError(), std::system_category(), - errmsg); -# else - throw std::runtime_error(errmsg); -# endif - } - } - UINT getConsolesCodepage() - { - if (!m_consolesCodepage) { - m_consolesCodepage = GetConsoleCP(); - if (!m_consolesCodepage) { - m_consolesCodepage = GetACP(); - } - } - return m_consolesCodepage; - } - bool setActiveInputCodepage() - { - m_isConsoleInput = false; - switch (GetFileType(m_hInput)) { - case FILE_TYPE_DISK: - m_activeInputCodepage = input_file_codepage; - break; - case FILE_TYPE_CHAR: - // Check for actual console. - DWORD consoleMode; - m_isConsoleInput = - GetConsoleMode(m_hInput, &consoleMode) == 0 ? false : true; - if (m_isConsoleInput) { - break; - } - @KWSYS_NAMESPACE@_FALLTHROUGH; - case FILE_TYPE_PIPE: - m_activeInputCodepage = input_pipe_codepage; - break; - default: - return false; - } - if (!m_isConsoleInput && m_activeInputCodepage == 0) { - m_activeInputCodepage = getConsolesCodepage(); - } - return true; - } - bool setActiveOutputCodepage() - { - m_isConsoleOutput = false; - switch (GetFileType(m_hOutput)) { - case FILE_TYPE_DISK: - m_activeOutputCodepage = output_file_codepage; - break; - case FILE_TYPE_CHAR: - // Check for actual console. - DWORD consoleMode; - m_isConsoleOutput = - GetConsoleMode(m_hOutput, &consoleMode) == 0 ? false : true; - if (m_isConsoleOutput) { - break; - } - @KWSYS_NAMESPACE@_FALLTHROUGH; - case FILE_TYPE_PIPE: - m_activeOutputCodepage = output_pipe_codepage; - break; - default: - return false; - } - if (!m_isConsoleOutput && m_activeOutputCodepage == 0) { - m_activeOutputCodepage = getConsolesCodepage(); - } - return true; - } - void _setg(bool empty = false) - { - if (!empty) { - this->setg((char_type*)m_ibuffer.data(), (char_type*)m_ibuffer.data(), - (char_type*)m_ibuffer.data() + m_ibuffer.size()); - } else { - this->setg((char_type*)m_ibuffer.data(), - (char_type*)m_ibuffer.data() + m_ibuffer.size(), - (char_type*)m_ibuffer.data() + m_ibuffer.size()); - } - } - void _setp() - { - this->setp((char_type*)m_obuffer.data(), - (char_type*)m_obuffer.data() + m_obuffer.size()); - } - bool encodeOutputBuffer(const std::wstring wbuffer, std::string& buffer) - { - if (wbuffer.size() == 0) { - buffer = std::string(); - return true; - } - const int length = - WideCharToMultiByte(m_activeOutputCodepage, 0, wbuffer.c_str(), - (int)wbuffer.size(), nullptr, 0, nullptr, nullptr); - char* buf = new char[length]; - const bool success = - WideCharToMultiByte(m_activeOutputCodepage, 0, wbuffer.c_str(), - (int)wbuffer.size(), buf, length, nullptr, - nullptr) > 0 - ? true - : false; - buffer = std::string(buf, length); - delete[] buf; - return success; - } - bool decodeInputBuffer(const std::string buffer, std::wstring& wbuffer) - { - size_t length = buffer.length(); - if (length == 0) { - wbuffer = std::wstring(); - return true; - } - int actualCodepage = m_activeInputCodepage; - const char BOM_UTF8[] = { char(0xEF), char(0xBB), char(0xBF) }; - const char* data = buffer.data(); - const size_t BOMsize = sizeof(BOM_UTF8); - if (length >= BOMsize && std::memcmp(data, BOM_UTF8, BOMsize) == 0) { - // PowerShell uses UTF-8 with BOM for pipes - actualCodepage = CP_UTF8; - data += BOMsize; - length -= BOMsize; - } - const size_t wlength = static_cast(MultiByteToWideChar( - actualCodepage, 0, data, static_cast(length), nullptr, 0)); - wchar_t* wbuf = new wchar_t[wlength]; - const bool success = - MultiByteToWideChar(actualCodepage, 0, data, static_cast(length), - wbuf, static_cast(wlength)) > 0 - ? true - : false; - wbuffer = std::wstring(wbuf, wlength); - delete[] wbuf; - return success; - } - std::wstring getBuffer(const std::basic_string buffer) - { - return Encoding::ToWide(buffer); - } - std::wstring getBuffer(const std::basic_string buffer) - { - return buffer; - } - void setBuffer(const std::wstring wbuffer, std::basic_string& target) - { - target = Encoding::ToNarrow(wbuffer); - } - void setBuffer(const std::wstring wbuffer, - std::basic_string& target) - { - target = wbuffer; - } - -}; // BasicConsoleBuf class - -typedef BasicConsoleBuf ConsoleBuf; -typedef BasicConsoleBuf WConsoleBuf; - -#endif -} // KWSYS_NAMESPACE - -#endif diff --git a/test/API/driver/kwsys/Copyright.txt b/test/API/driver/kwsys/Copyright.txt deleted file mode 100644 index 33d7fb47266..00000000000 --- a/test/API/driver/kwsys/Copyright.txt +++ /dev/null @@ -1,38 +0,0 @@ -KWSys - Kitware System Library -Copyright 2000-2016 Kitware, Inc. and Contributors -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -* Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -* Neither the name of Kitware, Inc. nor the names of Contributors - may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------------------------------------------------------------------- - -The following individuals and institutions are among the Contributors: - -* Insight Software Consortium - -See version control history for details of individual contributions. diff --git a/test/API/driver/kwsys/Directory.cxx b/test/API/driver/kwsys/Directory.cxx deleted file mode 100644 index e3791826be7..00000000000 --- a/test/API/driver/kwsys/Directory.cxx +++ /dev/null @@ -1,236 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" -#include KWSYS_HEADER(Directory.hxx) - -#include KWSYS_HEADER(Configure.hxx) - -#include KWSYS_HEADER(Encoding.hxx) - -// Work-around CMake dependency scanning limitation. This must -// duplicate the above list of headers. -#if 0 -# include "Configure.hxx.in" -# include "Directory.hxx.in" -# include "Encoding.hxx.in" -#endif - -#include -#include - -namespace KWSYS_NAMESPACE { - -class DirectoryInternals -{ -public: - // Array of Files - std::vector Files; - - // Path to Open'ed directory - std::string Path; -}; - -Directory::Directory() -{ - this->Internal = new DirectoryInternals; -} - -Directory::~Directory() -{ - delete this->Internal; -} - -unsigned long Directory::GetNumberOfFiles() const -{ - return static_cast(this->Internal->Files.size()); -} - -const char* Directory::GetFile(unsigned long dindex) const -{ - if (dindex >= this->Internal->Files.size()) { - return nullptr; - } - return this->Internal->Files[dindex].c_str(); -} - -const char* Directory::GetPath() const -{ - return this->Internal->Path.c_str(); -} - -void Directory::Clear() -{ - this->Internal->Path.resize(0); - this->Internal->Files.clear(); -} - -} // namespace KWSYS_NAMESPACE - -// First Windows platforms - -#if defined(_WIN32) && !defined(__CYGWIN__) -# include - -# include -# include -# include -# include -# include -# include -# include -# include - -// Wide function names can vary depending on compiler: -# ifdef __BORLANDC__ -# define _wfindfirst_func __wfindfirst -# define _wfindnext_func __wfindnext -# else -# define _wfindfirst_func _wfindfirst -# define _wfindnext_func _wfindnext -# endif - -namespace KWSYS_NAMESPACE { - -bool Directory::Load(const std::string& name) -{ - this->Clear(); -# if (defined(_MSC_VER) && _MSC_VER < 1300) || defined(__BORLANDC__) - // Older Visual C++ and Embarcadero compilers. - long srchHandle; -# else // Newer Visual C++ - intptr_t srchHandle; -# endif - char* buf; - size_t n = name.size(); - if (name.back() == '/' || name.back() == '\\') { - buf = new char[n + 1 + 1]; - sprintf(buf, "%s*", name.c_str()); - } else { - // Make sure the slashes in the wildcard suffix are consistent with the - // rest of the path - buf = new char[n + 2 + 1]; - if (name.find('\\') != std::string::npos) { - sprintf(buf, "%s\\*", name.c_str()); - } else { - sprintf(buf, "%s/*", name.c_str()); - } - } - struct _wfinddata_t data; // data of current file - - // Now put them into the file array - srchHandle = _wfindfirst_func( - (wchar_t*)Encoding::ToWindowsExtendedPath(buf).c_str(), &data); - delete[] buf; - - if (srchHandle == -1) { - return 0; - } - - // Loop through names - do { - this->Internal->Files.push_back(Encoding::ToNarrow(data.name)); - } while (_wfindnext_func(srchHandle, &data) != -1); - this->Internal->Path = name; - return _findclose(srchHandle) != -1; -} - -unsigned long Directory::GetNumberOfFilesInDirectory(const std::string& name) -{ -# if (defined(_MSC_VER) && _MSC_VER < 1300) || defined(__BORLANDC__) - // Older Visual C++ and Embarcadero compilers. - long srchHandle; -# else // Newer Visual C++ - intptr_t srchHandle; -# endif - char* buf; - size_t n = name.size(); - if (name.back() == '/') { - buf = new char[n + 1 + 1]; - sprintf(buf, "%s*", name.c_str()); - } else { - buf = new char[n + 2 + 1]; - sprintf(buf, "%s/*", name.c_str()); - } - struct _wfinddata_t data; // data of current file - - // Now put them into the file array - srchHandle = - _wfindfirst_func((wchar_t*)Encoding::ToWide(buf).c_str(), &data); - delete[] buf; - - if (srchHandle == -1) { - return 0; - } - - // Loop through names - unsigned long count = 0; - do { - count++; - } while (_wfindnext_func(srchHandle, &data) != -1); - _findclose(srchHandle); - return count; -} - -} // namespace KWSYS_NAMESPACE - -#else - -// Now the POSIX style directory access - -# include - -# include - -// PGI with glibc has trouble with dirent and large file support: -// http://www.pgroup.com/userforum/viewtopic.php? -// p=1992&sid=f16167f51964f1a68fe5041b8eb213b6 -// Work around the problem by mapping dirent the same way as readdir. -# if defined(__PGI) && defined(__GLIBC__) -# define kwsys_dirent_readdir dirent -# define kwsys_dirent_readdir64 dirent64 -# define kwsys_dirent kwsys_dirent_lookup(readdir) -# define kwsys_dirent_lookup(x) kwsys_dirent_lookup_delay(x) -# define kwsys_dirent_lookup_delay(x) kwsys_dirent_##x -# else -# define kwsys_dirent dirent -# endif - -namespace KWSYS_NAMESPACE { - -bool Directory::Load(const std::string& name) -{ - this->Clear(); - - DIR* dir = opendir(name.c_str()); - - if (!dir) { - return 0; - } - - for (kwsys_dirent* d = readdir(dir); d; d = readdir(dir)) { - this->Internal->Files.push_back(d->d_name); - } - this->Internal->Path = name; - closedir(dir); - return 1; -} - -unsigned long Directory::GetNumberOfFilesInDirectory(const std::string& name) -{ - DIR* dir = opendir(name.c_str()); - - if (!dir) { - return 0; - } - - unsigned long count = 0; - for (kwsys_dirent* d = readdir(dir); d; d = readdir(dir)) { - count++; - } - closedir(dir); - return count; -} - -} // namespace KWSYS_NAMESPACE - -#endif diff --git a/test/API/driver/kwsys/Directory.hxx.in b/test/API/driver/kwsys/Directory.hxx.in deleted file mode 100644 index ad8c51b86e2..00000000000 --- a/test/API/driver/kwsys/Directory.hxx.in +++ /dev/null @@ -1,72 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifndef @KWSYS_NAMESPACE@_Directory_hxx -#define @KWSYS_NAMESPACE@_Directory_hxx - -#include <@KWSYS_NAMESPACE@/Configure.h> - -#include - -namespace @KWSYS_NAMESPACE@ { - -class DirectoryInternals; - -/** \class Directory - * \brief Portable directory/filename traversal. - * - * Directory provides a portable way of finding the names of the files - * in a system directory. - * - * Directory currently works with Windows and Unix operating systems. - */ -class @KWSYS_NAMESPACE@_EXPORT Directory -{ -public: - Directory(); - ~Directory(); - - /** - * Load the specified directory and load the names of the files - * in that directory. 0 is returned if the directory can not be - * opened, 1 if it is opened. - */ - bool Load(const std::string&); - - /** - * Return the number of files in the current directory. - */ - unsigned long GetNumberOfFiles() const; - - /** - * Return the number of files in the specified directory. - * A higher performance static method. - */ - static unsigned long GetNumberOfFilesInDirectory(const std::string&); - - /** - * Return the file at the given index, the indexing is 0 based - */ - const char* GetFile(unsigned long) const; - - /** - * Return the path to Open'ed directory - */ - const char* GetPath() const; - - /** - * Clear the internal structure. Used internally at beginning of Load(...) - * to clear the cache. - */ - void Clear(); - -private: - // Private implementation details. - DirectoryInternals* Internal; - - Directory(const Directory&); // Not implemented. - void operator=(const Directory&); // Not implemented. -}; // End Class: Directory - -} // namespace @KWSYS_NAMESPACE@ - -#endif diff --git a/test/API/driver/kwsys/DynamicLoader.cxx b/test/API/driver/kwsys/DynamicLoader.cxx deleted file mode 100644 index a4b864118ce..00000000000 --- a/test/API/driver/kwsys/DynamicLoader.cxx +++ /dev/null @@ -1,495 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#if defined(_WIN32) -# define NOMINMAX // hide min,max to not conflict with -#endif - -#include "kwsysPrivate.h" -#include KWSYS_HEADER(DynamicLoader.hxx) - -#include KWSYS_HEADER(Configure.hxx) -#include KWSYS_HEADER(Encoding.hxx) - -// Work-around CMake dependency scanning limitation. This must -// duplicate the above list of headers. -#if 0 -# include "Configure.hxx.in" -# include "DynamicLoader.hxx.in" -#endif - -// This file actually contains several different implementations: -// * NOOP for environments without dynamic libs -// * HP machines which uses shl_load -// * Mac OS X 10.2.x and earlier which uses NSLinkModule -// * Windows which uses LoadLibrary -// * BeOS / Haiku -// * FreeMiNT for Atari -// * Default implementation for *NIX systems (including Mac OS X 10.3 and -// later) which use dlopen -// -// Each part of the ifdef contains a complete implementation for -// the static methods of DynamicLoader. - -#define CHECK_OPEN_FLAGS(var, supported, ret) \ - do { \ - /* Check for unknown flags. */ \ - if ((var & AllOpenFlags) != var) { \ - return ret; \ - } \ - \ - /* Check for unsupported flags. */ \ - if ((var & (supported)) != var) { \ - return ret; \ - } \ - } while (0) - -namespace KWSYS_NAMESPACE { - -DynamicLoader::LibraryHandle DynamicLoader::OpenLibrary( - const std::string& libname) -{ - return DynamicLoader::OpenLibrary(libname, 0); -} -} - -#if !KWSYS_SUPPORTS_SHARED_LIBS -// Implementation for environments without dynamic libs -# include // for strerror() - -namespace KWSYS_NAMESPACE { - -DynamicLoader::LibraryHandle DynamicLoader::OpenLibrary( - const std::string& libname, int flags) -{ - return 0; -} - -int DynamicLoader::CloseLibrary(DynamicLoader::LibraryHandle lib) -{ - if (!lib) { - return 0; - } - - return 1; -} - -DynamicLoader::SymbolPointer DynamicLoader::GetSymbolAddress( - DynamicLoader::LibraryHandle lib, const std::string& sym) -{ - return 0; -} - -const char* DynamicLoader::LastError() -{ - return "General error"; -} - -} // namespace KWSYS_NAMESPACE - -#elif defined(__hpux) -// Implementation for HPUX machines -# include -# include - -namespace KWSYS_NAMESPACE { - -DynamicLoader::LibraryHandle DynamicLoader::OpenLibrary( - const std::string& libname, int flags) -{ - CHECK_OPEN_FLAGS(flags, 0, 0); - - return shl_load(libname.c_str(), BIND_DEFERRED | DYNAMIC_PATH, 0L); -} - -int DynamicLoader::CloseLibrary(DynamicLoader::LibraryHandle lib) -{ - if (!lib) { - return 0; - } - return !shl_unload(lib); -} - -DynamicLoader::SymbolPointer DynamicLoader::GetSymbolAddress( - DynamicLoader::LibraryHandle lib, const std::string& sym) -{ - void* addr; - int status; - - /* TYPE_PROCEDURE Look for a function or procedure. (This used to be default) - * TYPE_DATA Look for a symbol in the data segment (for example, - * variables). - * TYPE_UNDEFINED Look for any symbol. - */ - status = shl_findsym(&lib, sym.c_str(), TYPE_UNDEFINED, &addr); - void* result = (status < 0) ? (void*)0 : addr; - - // Hack to cast pointer-to-data to pointer-to-function. - return *reinterpret_cast(&result); -} - -const char* DynamicLoader::LastError() -{ - // TODO: Need implementation with errno/strerror - /* If successful, shl_findsym returns an integer (int) value zero. If - * shl_findsym cannot find sym, it returns -1 and sets errno to zero. - * If any other errors occur, shl_findsym returns -1 and sets errno to one - * of these values (defined in ): - * ENOEXEC - * A format error was detected in the specified library. - * ENOSYM - * A symbol on which sym depends could not be found. - * EINVAL - * The specified handle is invalid. - */ - - if (errno == ENOEXEC || errno == ENOSYM || errno == EINVAL) { - return strerror(errno); - } - // else - return 0; -} - -} // namespace KWSYS_NAMESPACE - -#elif defined(__APPLE__) && (MAC_OS_X_VERSION_MAX_ALLOWED < 1030) -// Implementation for Mac OS X 10.2.x and earlier -# include -# include // for strlen - -namespace KWSYS_NAMESPACE { - -DynamicLoader::LibraryHandle DynamicLoader::OpenLibrary( - const std::string& libname, int flags) -{ - CHECK_OPEN_FLAGS(flags, 0, 0); - - NSObjectFileImageReturnCode rc; - NSObjectFileImage image = 0; - - rc = NSCreateObjectFileImageFromFile(libname.c_str(), &image); - // rc == NSObjectFileImageInappropriateFile when trying to load a dylib file - if (rc != NSObjectFileImageSuccess) { - return 0; - } - NSModule handle = NSLinkModule(image, libname.c_str(), - NSLINKMODULE_OPTION_BINDNOW | - NSLINKMODULE_OPTION_RETURN_ON_ERROR); - NSDestroyObjectFileImage(image); - return handle; -} - -int DynamicLoader::CloseLibrary(DynamicLoader::LibraryHandle lib) -{ - // NSUNLINKMODULE_OPTION_KEEP_MEMORY_MAPPED - // With this option the memory for the module is not deallocated - // allowing pointers into the module to still be valid. - // You should use this option instead if your code experience some problems - // reported against Panther 10.3.9 (fixed in Tiger 10.4.2 and up) - bool success = NSUnLinkModule(lib, NSUNLINKMODULE_OPTION_NONE); - return success; -} - -DynamicLoader::SymbolPointer DynamicLoader::GetSymbolAddress( - DynamicLoader::LibraryHandle lib, const std::string& sym) -{ - void* result = 0; - // Need to prepend symbols with '_' on Apple-gcc compilers - std::string rsym = '_' + sym; - - NSSymbol symbol = NSLookupSymbolInModule(lib, rsym.c_str()); - if (symbol) { - result = NSAddressOfSymbol(symbol); - } - - // Hack to cast pointer-to-data to pointer-to-function. - return *reinterpret_cast(&result); -} - -const char* DynamicLoader::LastError() -{ - return 0; -} - -} // namespace KWSYS_NAMESPACE - -#elif defined(_WIN32) && !defined(__CYGWIN__) -// Implementation for Windows win32 code but not cygwin -# include - -# include - -namespace KWSYS_NAMESPACE { - -DynamicLoader::LibraryHandle DynamicLoader::OpenLibrary( - const std::string& libname, int flags) -{ - CHECK_OPEN_FLAGS(flags, SearchBesideLibrary, nullptr); - - DWORD llFlags = 0; - if (flags & SearchBesideLibrary) { - llFlags |= LOAD_WITH_ALTERED_SEARCH_PATH; - } - - return LoadLibraryExW(Encoding::ToWindowsExtendedPath(libname).c_str(), - nullptr, llFlags); -} - -int DynamicLoader::CloseLibrary(DynamicLoader::LibraryHandle lib) -{ - return (int)FreeLibrary(lib); -} - -DynamicLoader::SymbolPointer DynamicLoader::GetSymbolAddress( - DynamicLoader::LibraryHandle lib, const std::string& sym) -{ - // TODO: The calling convention affects the name of the symbol. We - // should have a tool to help get the symbol with the desired - // calling convention. Currently we assume cdecl. - // - // Borland: - // __cdecl = "_func" (default) - // __fastcall = "@_func" - // __stdcall = "func" - // - // Watcom: - // __cdecl = "_func" - // __fastcall = "@_func@X" - // __stdcall = "_func@X" - // __watcall = "func_" (default) - // - // MSVC: - // __cdecl = "func" (default) - // __fastcall = "@_func@X" - // __stdcall = "_func@X" - // - // Note that the "@X" part of the name above is the total size (in - // bytes) of the arguments on the stack. - void* result; -# if defined(__BORLANDC__) || defined(__WATCOMC__) - // Need to prepend symbols with '_' - std::string ssym = '_' + sym; - const char* rsym = ssym.c_str(); -# else - const char* rsym = sym.c_str(); -# endif - result = (void*)GetProcAddress(lib, rsym); -// Hack to cast pointer-to-data to pointer-to-function. -# ifdef __WATCOMC__ - return *(DynamicLoader::SymbolPointer*)(&result); -# else - return *reinterpret_cast(&result); -# endif -} - -# define DYNLOAD_ERROR_BUFFER_SIZE 1024 - -const char* DynamicLoader::LastError() -{ - wchar_t lpMsgBuf[DYNLOAD_ERROR_BUFFER_SIZE + 1]; - - DWORD error = GetLastError(); - DWORD length = FormatMessageW( - FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, nullptr, error, - MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // Default language - lpMsgBuf, DYNLOAD_ERROR_BUFFER_SIZE, nullptr); - - static char str[DYNLOAD_ERROR_BUFFER_SIZE + 1]; - - if (length < 1) { - /* FormatMessage failed. Use a default message. */ - _snprintf(str, DYNLOAD_ERROR_BUFFER_SIZE, - "DynamicLoader encountered error 0x%X. " - "FormatMessage failed with error 0x%X", - error, GetLastError()); - return str; - } - - if (!WideCharToMultiByte(CP_UTF8, 0, lpMsgBuf, -1, str, - DYNLOAD_ERROR_BUFFER_SIZE, nullptr, nullptr)) { - /* WideCharToMultiByte failed. Use a default message. */ - _snprintf(str, DYNLOAD_ERROR_BUFFER_SIZE, - "DynamicLoader encountered error 0x%X. " - "WideCharToMultiByte failed with error 0x%X", - error, GetLastError()); - } - - return str; -} - -} // namespace KWSYS_NAMESPACE - -#elif defined(__BEOS__) -// Implementation for BeOS / Haiku -# include // for strerror() - -# include -# include - -namespace KWSYS_NAMESPACE { - -static image_id last_dynamic_err = B_OK; - -DynamicLoader::LibraryHandle DynamicLoader::OpenLibrary( - const std::string& libname, int flags) -{ - CHECK_OPEN_FLAGS(flags, 0, 0); - - // image_id's are integers, errors are negative. Add one just in case we - // get a valid image_id of zero (is that even possible?). - image_id rc = load_add_on(libname.c_str()); - if (rc < 0) { - last_dynamic_err = rc; - return 0; - } - - return rc + 1; -} - -int DynamicLoader::CloseLibrary(DynamicLoader::LibraryHandle lib) -{ - if (!lib) { - last_dynamic_err = B_BAD_VALUE; - return 0; - } else { - // The function dlclose() returns 0 on success, and non-zero on error. - status_t rc = unload_add_on(lib - 1); - if (rc != B_OK) { - last_dynamic_err = rc; - return 0; - } - } - - return 1; -} - -DynamicLoader::SymbolPointer DynamicLoader::GetSymbolAddress( - DynamicLoader::LibraryHandle lib, const std::string& sym) -{ - // Hack to cast pointer-to-data to pointer-to-function. - union - { - void* pvoid; - DynamicLoader::SymbolPointer psym; - } result; - - result.psym = nullptr; - - if (!lib) { - last_dynamic_err = B_BAD_VALUE; - } else { - // !!! FIXME: BeOS can do function-only lookups...does this ever - // !!! FIXME: actually _want_ a data symbol lookup, or was this union - // !!! FIXME: a leftover of dlsym()? (s/ANY/TEXT for functions only). - status_t rc = - get_image_symbol(lib - 1, sym.c_str(), B_SYMBOL_TYPE_ANY, &result.pvoid); - if (rc != B_OK) { - last_dynamic_err = rc; - result.psym = nullptr; - } - } - return result.psym; -} - -const char* DynamicLoader::LastError() -{ - const char* retval = strerror(last_dynamic_err); - last_dynamic_err = B_OK; - return retval; -} - -} // namespace KWSYS_NAMESPACE - -#elif defined(__MINT__) -// Implementation for FreeMiNT on Atari -# define _GNU_SOURCE /* for program_invocation_name */ -# include -# include -# include -# include - -namespace KWSYS_NAMESPACE { - -DynamicLoader::LibraryHandle DynamicLoader::OpenLibrary( - const std::string& libname, int flags) -{ - CHECK_OPEN_FLAGS(flags, 0, nullptr); - - char* name = (char*)calloc(1, libname.size() + 1); - dld_init(program_invocation_name); - strncpy(name, libname.c_str(), libname.size()); - dld_link(libname.c_str()); - return (void*)name; -} - -int DynamicLoader::CloseLibrary(DynamicLoader::LibraryHandle lib) -{ - dld_unlink_by_file((char*)lib, 0); - free(lib); - return 0; -} - -DynamicLoader::SymbolPointer DynamicLoader::GetSymbolAddress( - DynamicLoader::LibraryHandle lib, const std::string& sym) -{ - // Hack to cast pointer-to-data to pointer-to-function. - union - { - void* pvoid; - DynamicLoader::SymbolPointer psym; - } result; - result.pvoid = dld_get_symbol(sym.c_str()); - return result.psym; -} - -const char* DynamicLoader::LastError() -{ - return dld_strerror(dld_errno); -} - -} // namespace KWSYS_NAMESPACE - -#else -// Default implementation for *NIX systems (including Mac OS X 10.3 and -// later) which use dlopen -# include - -namespace KWSYS_NAMESPACE { - -DynamicLoader::LibraryHandle DynamicLoader::OpenLibrary( - const std::string& libname, int flags) -{ - CHECK_OPEN_FLAGS(flags, 0, nullptr); - - return dlopen(libname.c_str(), RTLD_LAZY); -} - -int DynamicLoader::CloseLibrary(DynamicLoader::LibraryHandle lib) -{ - if (lib) { - // The function dlclose() returns 0 on success, and non-zero on error. - return !dlclose(lib); - } - // else - return 0; -} - -DynamicLoader::SymbolPointer DynamicLoader::GetSymbolAddress( - DynamicLoader::LibraryHandle lib, const std::string& sym) -{ - // Hack to cast pointer-to-data to pointer-to-function. - union - { - void* pvoid; - DynamicLoader::SymbolPointer psym; - } result; - result.pvoid = dlsym(lib, sym.c_str()); - return result.psym; -} - -const char* DynamicLoader::LastError() -{ - return dlerror(); -} - -} // namespace KWSYS_NAMESPACE -#endif diff --git a/test/API/driver/kwsys/DynamicLoader.hxx.in b/test/API/driver/kwsys/DynamicLoader.hxx.in deleted file mode 100644 index 539c7425980..00000000000 --- a/test/API/driver/kwsys/DynamicLoader.hxx.in +++ /dev/null @@ -1,106 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifndef @KWSYS_NAMESPACE@_DynamicLoader_hxx -#define @KWSYS_NAMESPACE@_DynamicLoader_hxx - -#include <@KWSYS_NAMESPACE@/Configure.hxx> - -#include - -#if defined(__hpux) -# include -#elif defined(_WIN32) && !defined(__CYGWIN__) -# include -#elif defined(__APPLE__) -# include -# if MAC_OS_X_VERSION_MAX_ALLOWED < 1030 -# include -# endif -#elif defined(__BEOS__) -# include -#endif - -namespace @KWSYS_NAMESPACE@ { -/** \class DynamicLoader - * \brief Portable loading of dynamic libraries or dll's. - * - * DynamicLoader provides a portable interface to loading dynamic - * libraries or dll's into a process. - * - * Directory currently works with Windows, Apple, HP-UX and Unix (POSIX) - * operating systems - * - * \warning dlopen on *nix system works the following way: - * If filename contains a slash ("/"), then it is interpreted as a (relative - * or absolute) pathname. Otherwise, the dynamic linker searches for the - * library as follows : see ld.so(8) for further details): - * Whereas this distinction does not exist on Win32. Therefore ideally you - * should be doing full path to guarantee to have a consistent way of dealing - * with dynamic loading of shared library. - * - * \warning the Cygwin implementation do not use the Win32 HMODULE. Put extra - * condition so that we can include the correct declaration (POSIX) - */ - -class @KWSYS_NAMESPACE@_EXPORT DynamicLoader -{ -public: -// Ugly stuff for library handles -// They are different on several different OS's -#if defined(__hpux) - typedef shl_t LibraryHandle; -#elif defined(_WIN32) && !defined(__CYGWIN__) - typedef HMODULE LibraryHandle; -#elif defined(__APPLE__) -# if MAC_OS_X_VERSION_MAX_ALLOWED < 1030 - typedef NSModule LibraryHandle; -# else - typedef void* LibraryHandle; -# endif -#elif defined(__BEOS__) - typedef image_id LibraryHandle; -#else // POSIX - typedef void* LibraryHandle; -#endif - - // Return type from DynamicLoader::GetSymbolAddress. - typedef void (*SymbolPointer)(); - - enum OpenFlags - { - // Search for dependent libraries beside the library being loaded. - // - // This is currently only supported on Windows. - SearchBesideLibrary = 0x00000001, - - AllOpenFlags = SearchBesideLibrary - }; - - /** Load a dynamic library into the current process. - * The returned LibraryHandle can be used to access the symbols in the - * library. The optional second argument is a set of flags to use when - * opening the library. If unrecognized or unsupported flags are specified, - * the library is not opened. */ - static LibraryHandle OpenLibrary(const std::string&); - static LibraryHandle OpenLibrary(const std::string&, int); - - /** Attempt to detach a dynamic library from the - * process. A value of true is returned if it is successful. */ - static int CloseLibrary(LibraryHandle); - - /** Find the address of the symbol in the given library. */ - static SymbolPointer GetSymbolAddress(LibraryHandle, const std::string&); - - /** Return the default module prefix for the current platform. */ - static const char* LibPrefix() { return "@KWSYS_DynamicLoader_PREFIX@"; } - - /** Return the default module suffix for the current platform. */ - static const char* LibExtension() { return "@KWSYS_DynamicLoader_SUFFIX@"; } - - /** Return the last error produced from a calls made on this class. */ - static const char* LastError(); -}; // End Class: DynamicLoader - -} // namespace @KWSYS_NAMESPACE@ - -#endif diff --git a/test/API/driver/kwsys/Encoding.h.in b/test/API/driver/kwsys/Encoding.h.in deleted file mode 100644 index 86a26692abc..00000000000 --- a/test/API/driver/kwsys/Encoding.h.in +++ /dev/null @@ -1,69 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifndef @KWSYS_NAMESPACE@_Encoding_h -#define @KWSYS_NAMESPACE@_Encoding_h - -#include <@KWSYS_NAMESPACE@/Configure.h> - -#include - -/* Redefine all public interface symbol names to be in the proper - namespace. These macros are used internally to kwsys only, and are - not visible to user code. Use kwsysHeaderDump.pl to reproduce - these macros after making changes to the interface. */ -#if !defined(KWSYS_NAMESPACE) -# define kwsys_ns(x) @KWSYS_NAMESPACE@##x -# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT -#endif -#if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS -# define kwsysEncoding kwsys_ns(Encoding) -# define kwsysEncoding_mbstowcs kwsys_ns(Encoding_mbstowcs) -# define kwsysEncoding_DupToWide kwsys_ns(Encoding_DupToWide) -# define kwsysEncoding_wcstombs kwsys_ns(Encoding_wcstombs) -# define kwsysEncoding_DupToNarrow kwsys_ns(Encoding_DupToNarrow) -#endif - -#if defined(__cplusplus) -extern "C" { -#endif - -/* Convert a narrow string to a wide string. - On Windows, UTF-8 is assumed, and on other platforms, - the current locale is assumed. - */ -kwsysEXPORT size_t kwsysEncoding_mbstowcs(wchar_t* dest, const char* src, - size_t n); - -/* Convert a narrow string to a wide string. - This can return NULL if the conversion fails. */ -kwsysEXPORT wchar_t* kwsysEncoding_DupToWide(const char* src); - -/* Convert a wide string to a narrow string. - On Windows, UTF-8 is assumed, and on other platforms, - the current locale is assumed. */ -kwsysEXPORT size_t kwsysEncoding_wcstombs(char* dest, const wchar_t* src, - size_t n); - -/* Convert a wide string to a narrow string. - This can return NULL if the conversion fails. */ -kwsysEXPORT char* kwsysEncoding_DupToNarrow(const wchar_t* str); - -#if defined(__cplusplus) -} /* extern "C" */ -#endif - -/* If we are building a kwsys .c or .cxx file, let it use these macros. - Otherwise, undefine them to keep the namespace clean. */ -#if !defined(KWSYS_NAMESPACE) -# undef kwsys_ns -# undef kwsysEXPORT -# if !defined(KWSYS_NAMESPACE) && !@KWSYS_NAMESPACE@_NAME_IS_KWSYS -# undef kwsysEncoding -# undef kwsysEncoding_mbstowcs -# undef kwsysEncoding_DupToWide -# undef kwsysEncoding_wcstombs -# undef kwsysEncoding_DupToNarrow -# endif -#endif - -#endif diff --git a/test/API/driver/kwsys/Encoding.hxx.in b/test/API/driver/kwsys/Encoding.hxx.in deleted file mode 100644 index 75a2d4d0f99..00000000000 --- a/test/API/driver/kwsys/Encoding.hxx.in +++ /dev/null @@ -1,80 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifndef @KWSYS_NAMESPACE@_Encoding_hxx -#define @KWSYS_NAMESPACE@_Encoding_hxx - -#include <@KWSYS_NAMESPACE@/Configure.hxx> - -#include -#include - -namespace @KWSYS_NAMESPACE@ { -class @KWSYS_NAMESPACE@_EXPORT Encoding -{ -public: - // Container class for argc/argv. - class @KWSYS_NAMESPACE@_EXPORT CommandLineArguments - { - public: - // On Windows, get the program command line arguments - // in this Encoding module's 8 bit encoding. - // On other platforms the given argc/argv is used, and - // to be consistent, should be the argc/argv from main(). - static CommandLineArguments Main(int argc, char const* const* argv); - - // Construct CommandLineArguments with the given - // argc/argv. It is assumed that the string is already - // in the encoding used by this module. - CommandLineArguments(int argc, char const* const* argv); - - // Construct CommandLineArguments with the given - // argc and wide argv. This is useful if wmain() is used. - CommandLineArguments(int argc, wchar_t const* const* argv); - ~CommandLineArguments(); - CommandLineArguments(const CommandLineArguments&); - CommandLineArguments& operator=(const CommandLineArguments&); - - int argc() const; - char const* const* argv() const; - - protected: - std::vector argv_; - }; - - /** - * Convert between char and wchar_t - */ - -#if @KWSYS_NAMESPACE@_STL_HAS_WSTRING - - // Convert a narrow string to a wide string. - // On Windows, UTF-8 is assumed, and on other platforms, - // the current locale is assumed. - static std::wstring ToWide(const std::string& str); - static std::wstring ToWide(const char* str); - - // Convert a wide string to a narrow string. - // On Windows, UTF-8 is assumed, and on other platforms, - // the current locale is assumed. - static std::string ToNarrow(const std::wstring& str); - static std::string ToNarrow(const wchar_t* str); - -# if defined(_WIN32) - /** - * Convert the path to an extended length path to avoid MAX_PATH length - * limitations on Windows. If the input is a local path the result will be - * prefixed with \\?\; if the input is instead a network path, the result - * will be prefixed with \\?\UNC\. All output will also be converted to - * absolute paths with Windows-style backslashes. - **/ - static std::wstring ToWindowsExtendedPath(std::string const&); - static std::wstring ToWindowsExtendedPath(const char* source); - static std::wstring ToWindowsExtendedPath(std::wstring const& wsource); -# endif - -#endif // @KWSYS_NAMESPACE@_STL_HAS_WSTRING - -}; // class Encoding -} // namespace @KWSYS_NAMESPACE@ - -#endif diff --git a/test/API/driver/kwsys/EncodingC.c b/test/API/driver/kwsys/EncodingC.c deleted file mode 100644 index e12236afe5a..00000000000 --- a/test/API/driver/kwsys/EncodingC.c +++ /dev/null @@ -1,72 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" -#include KWSYS_HEADER(Encoding.h) - -/* Work-around CMake dependency scanning limitation. This must - duplicate the above list of headers. */ -#if 0 -# include "Encoding.h.in" -#endif - -#include - -#ifdef _WIN32 -# include -#endif - -size_t kwsysEncoding_mbstowcs(wchar_t* dest, const char* str, size_t n) -{ - if (str == 0) { - return (size_t)-1; - } -#ifdef _WIN32 - return MultiByteToWideChar(KWSYS_ENCODING_DEFAULT_CODEPAGE, 0, str, -1, dest, - (int)n) - - 1; -#else - return mbstowcs(dest, str, n); -#endif -} - -wchar_t* kwsysEncoding_DupToWide(const char* str) -{ - wchar_t* ret = NULL; - size_t length = kwsysEncoding_mbstowcs(NULL, str, 0) + 1; - if (length > 0) { - ret = (wchar_t*)malloc((length) * sizeof(wchar_t)); - if (ret) { - ret[0] = 0; - kwsysEncoding_mbstowcs(ret, str, length); - } - } - return ret; -} - -size_t kwsysEncoding_wcstombs(char* dest, const wchar_t* str, size_t n) -{ - if (str == 0) { - return (size_t)-1; - } -#ifdef _WIN32 - return WideCharToMultiByte(KWSYS_ENCODING_DEFAULT_CODEPAGE, 0, str, -1, dest, - (int)n, NULL, NULL) - - 1; -#else - return wcstombs(dest, str, n); -#endif -} - -char* kwsysEncoding_DupToNarrow(const wchar_t* str) -{ - char* ret = NULL; - size_t length = kwsysEncoding_wcstombs(0, str, 0) + 1; - if (length > 0) { - ret = (char*)malloc(length); - if (ret) { - ret[0] = 0; - kwsysEncoding_wcstombs(ret, str, length); - } - } - return ret; -} diff --git a/test/API/driver/kwsys/EncodingCXX.cxx b/test/API/driver/kwsys/EncodingCXX.cxx deleted file mode 100644 index 5cad934ec37..00000000000 --- a/test/API/driver/kwsys/EncodingCXX.cxx +++ /dev/null @@ -1,288 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifdef __osf__ -# define _OSF_SOURCE -# define _POSIX_C_SOURCE 199506L -# define _XOPEN_SOURCE_EXTENDED -#endif - -#include "kwsysPrivate.h" -#include KWSYS_HEADER(Encoding.hxx) -#include KWSYS_HEADER(Encoding.h) - -// Work-around CMake dependency scanning limitation. This must -// duplicate the above list of headers. -#if 0 -# include "Encoding.h.in" -# include "Encoding.hxx.in" -#endif - -#include -#include -#include - -#ifdef _MSC_VER -# pragma warning(disable : 4786) -#endif - -// Windows API. -#if defined(_WIN32) -# include - -# include -# include -#endif - -namespace KWSYS_NAMESPACE { - -Encoding::CommandLineArguments Encoding::CommandLineArguments::Main( - int argc, char const* const* argv) -{ -#ifdef _WIN32 - (void)argc; - (void)argv; - - int ac; - LPWSTR* w_av = CommandLineToArgvW(GetCommandLineW(), &ac); - - std::vector av1(ac); - std::vector av2(ac); - for (int i = 0; i < ac; i++) { - av1[i] = ToNarrow(w_av[i]); - av2[i] = av1[i].c_str(); - } - LocalFree(w_av); - return CommandLineArguments(ac, &av2[0]); -#else - return CommandLineArguments(argc, argv); -#endif -} - -Encoding::CommandLineArguments::CommandLineArguments(int ac, - char const* const* av) -{ - this->argv_.resize(ac + 1); - for (int i = 0; i < ac; i++) { - this->argv_[i] = strdup(av[i]); - } - this->argv_[ac] = nullptr; -} - -Encoding::CommandLineArguments::CommandLineArguments(int ac, - wchar_t const* const* av) -{ - this->argv_.resize(ac + 1); - for (int i = 0; i < ac; i++) { - this->argv_[i] = kwsysEncoding_DupToNarrow(av[i]); - } - this->argv_[ac] = nullptr; -} - -Encoding::CommandLineArguments::~CommandLineArguments() -{ - for (size_t i = 0; i < this->argv_.size(); i++) { - free(argv_[i]); - } -} - -Encoding::CommandLineArguments::CommandLineArguments( - const CommandLineArguments& other) -{ - this->argv_.resize(other.argv_.size()); - for (size_t i = 0; i < this->argv_.size(); i++) { - this->argv_[i] = other.argv_[i] ? strdup(other.argv_[i]) : nullptr; - } -} - -Encoding::CommandLineArguments& Encoding::CommandLineArguments::operator=( - const CommandLineArguments& other) -{ - if (this != &other) { - size_t i; - for (i = 0; i < this->argv_.size(); i++) { - free(this->argv_[i]); - } - - this->argv_.resize(other.argv_.size()); - for (i = 0; i < this->argv_.size(); i++) { - this->argv_[i] = other.argv_[i] ? strdup(other.argv_[i]) : nullptr; - } - } - - return *this; -} - -int Encoding::CommandLineArguments::argc() const -{ - return static_cast(this->argv_.size() - 1); -} - -char const* const* Encoding::CommandLineArguments::argv() const -{ - return &this->argv_[0]; -} - -#if KWSYS_STL_HAS_WSTRING - -std::wstring Encoding::ToWide(const std::string& str) -{ - std::wstring wstr; -# if defined(_WIN32) - const int wlength = - MultiByteToWideChar(KWSYS_ENCODING_DEFAULT_CODEPAGE, 0, str.data(), - int(str.size()), nullptr, 0); - if (wlength > 0) { - wchar_t* wdata = new wchar_t[wlength]; - int r = MultiByteToWideChar(KWSYS_ENCODING_DEFAULT_CODEPAGE, 0, str.data(), - int(str.size()), wdata, wlength); - if (r > 0) { - wstr = std::wstring(wdata, wlength); - } - delete[] wdata; - } -# else - size_t pos = 0; - size_t nullPos = 0; - do { - if (pos < str.size() && str.at(pos) != '\0') { - wstr += ToWide(str.c_str() + pos); - } - nullPos = str.find('\0', pos); - if (nullPos != std::string::npos) { - pos = nullPos + 1; - wstr += wchar_t('\0'); - } - } while (nullPos != std::string::npos); -# endif - return wstr; -} - -std::string Encoding::ToNarrow(const std::wstring& str) -{ - std::string nstr; -# if defined(_WIN32) - int length = - WideCharToMultiByte(KWSYS_ENCODING_DEFAULT_CODEPAGE, 0, str.c_str(), - int(str.size()), nullptr, 0, nullptr, nullptr); - if (length > 0) { - char* data = new char[length]; - int r = - WideCharToMultiByte(KWSYS_ENCODING_DEFAULT_CODEPAGE, 0, str.c_str(), - int(str.size()), data, length, nullptr, nullptr); - if (r > 0) { - nstr = std::string(data, length); - } - delete[] data; - } -# else - size_t pos = 0; - size_t nullPos = 0; - do { - if (pos < str.size() && str.at(pos) != '\0') { - nstr += ToNarrow(str.c_str() + pos); - } - nullPos = str.find(wchar_t('\0'), pos); - if (nullPos != std::string::npos) { - pos = nullPos + 1; - nstr += '\0'; - } - } while (nullPos != std::string::npos); -# endif - return nstr; -} - -std::wstring Encoding::ToWide(const char* cstr) -{ - std::wstring wstr; - size_t length = kwsysEncoding_mbstowcs(nullptr, cstr, 0) + 1; - if (length > 0) { - std::vector wchars(length); - if (kwsysEncoding_mbstowcs(&wchars[0], cstr, length) > 0) { - wstr = &wchars[0]; - } - } - return wstr; -} - -std::string Encoding::ToNarrow(const wchar_t* wcstr) -{ - std::string str; - size_t length = kwsysEncoding_wcstombs(nullptr, wcstr, 0) + 1; - if (length > 0) { - std::vector chars(length); - if (kwsysEncoding_wcstombs(&chars[0], wcstr, length) > 0) { - str = &chars[0]; - } - } - return str; -} - -# if defined(_WIN32) -// Convert local paths to UNC style paths -std::wstring Encoding::ToWindowsExtendedPath(std::string const& source) -{ - return ToWindowsExtendedPath(ToWide(source)); -} - -// Convert local paths to UNC style paths -std::wstring Encoding::ToWindowsExtendedPath(const char* source) -{ - return ToWindowsExtendedPath(ToWide(source)); -} - -// Convert local paths to UNC style paths -std::wstring Encoding::ToWindowsExtendedPath(std::wstring const& wsource) -{ - // Resolve any relative paths - DWORD wfull_len; - - /* The +3 is a workaround for a bug in some versions of GetFullPathNameW that - * won't return a large enough buffer size if the input is too small */ - wfull_len = GetFullPathNameW(wsource.c_str(), 0, nullptr, nullptr) + 3; - std::vector wfull(wfull_len); - GetFullPathNameW(wsource.c_str(), wfull_len, &wfull[0], nullptr); - - /* This should get the correct size without any extra padding from the - * previous size workaround. */ - wfull_len = static_cast(wcslen(&wfull[0])); - - if (wfull_len >= 2 && isalpha(wfull[0]) && - wfull[1] == L':') { /* C:\Foo\bar\FooBar.txt */ - return L"\\\\?\\" + std::wstring(&wfull[0]); - } else if (wfull_len >= 2 && wfull[0] == L'\\' && - wfull[1] == L'\\') { /* Starts with \\ */ - if (wfull_len >= 4 && wfull[2] == L'?' && - wfull[3] == L'\\') { /* Starts with \\?\ */ - if (wfull_len >= 8 && wfull[4] == L'U' && wfull[5] == L'N' && - wfull[6] == L'C' && - wfull[7] == L'\\') { /* \\?\UNC\Foo\bar\FooBar.txt */ - return std::wstring(&wfull[0]); - } else if (wfull_len >= 6 && isalpha(wfull[4]) && - wfull[5] == L':') { /* \\?\C:\Foo\bar\FooBar.txt */ - return std::wstring(&wfull[0]); - } else if (wfull_len >= 5) { /* \\?\Foo\bar\FooBar.txt */ - return L"\\\\?\\UNC\\" + std::wstring(&wfull[4]); - } - } else if (wfull_len >= 4 && wfull[2] == L'.' && - wfull[3] == L'\\') { /* Starts with \\.\ a device name */ - if (wfull_len >= 6 && isalpha(wfull[4]) && - wfull[5] == L':') { /* \\.\C:\Foo\bar\FooBar.txt */ - return L"\\\\?\\" + std::wstring(&wfull[4]); - } else if (wfull_len >= - 5) { /* \\.\Foo\bar\ Device name is left unchanged */ - return std::wstring(&wfull[0]); - } - } else if (wfull_len >= 3) { /* \\Foo\bar\FooBar.txt */ - return L"\\\\?\\UNC\\" + std::wstring(&wfull[2]); - } - } - - // If this case has been reached, then the path is invalid. Leave it - // unchanged - return wsource; -} -# endif - -#endif // KWSYS_STL_HAS_WSTRING - -} // namespace KWSYS_NAMESPACE diff --git a/test/API/driver/kwsys/ExtraTest.cmake.in b/test/API/driver/kwsys/ExtraTest.cmake.in deleted file mode 100644 index e8c0a1cdb19..00000000000 --- a/test/API/driver/kwsys/ExtraTest.cmake.in +++ /dev/null @@ -1 +0,0 @@ -MESSAGE("*** This message is generated by message inside a file that is included in DartTestfile.txt ***") diff --git a/test/API/driver/kwsys/FStream.cxx b/test/API/driver/kwsys/FStream.cxx deleted file mode 100644 index 5e4133ac564..00000000000 --- a/test/API/driver/kwsys/FStream.cxx +++ /dev/null @@ -1,55 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" -#include KWSYS_HEADER(FStream.hxx) - -// Work-around CMake dependency scanning limitation. This must -// duplicate the above list of headers. -#if 0 -# include "FStream.hxx.in" -#endif - -namespace KWSYS_NAMESPACE { -namespace FStream { - -BOM ReadBOM(std::istream& in) -{ - if (!in.good()) { - return BOM_None; - } - unsigned long orig = in.tellg(); - unsigned char bom[4]; - in.read(reinterpret_cast(bom), 2); - if (!in.good()) { - in.clear(); - in.seekg(orig); - return BOM_None; - } - if (bom[0] == 0xEF && bom[1] == 0xBB) { - in.read(reinterpret_cast(bom + 2), 1); - if (in.good() && bom[2] == 0xBF) { - return BOM_UTF8; - } - } else if (bom[0] == 0xFE && bom[1] == 0xFF) { - return BOM_UTF16BE; - } else if (bom[0] == 0x00 && bom[1] == 0x00) { - in.read(reinterpret_cast(bom + 2), 2); - if (in.good() && bom[2] == 0xFE && bom[3] == 0xFF) { - return BOM_UTF32BE; - } - } else if (bom[0] == 0xFF && bom[1] == 0xFE) { - unsigned long p = in.tellg(); - in.read(reinterpret_cast(bom + 2), 2); - if (in.good() && bom[2] == 0x00 && bom[3] == 0x00) { - return BOM_UTF32LE; - } - in.seekg(p); - return BOM_UTF16LE; - } - in.clear(); - in.seekg(orig); - return BOM_None; -} - -} // FStream namespace -} // KWSYS_NAMESPACE diff --git a/test/API/driver/kwsys/FStream.hxx.in b/test/API/driver/kwsys/FStream.hxx.in deleted file mode 100644 index d79bbdf16b9..00000000000 --- a/test/API/driver/kwsys/FStream.hxx.in +++ /dev/null @@ -1,278 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifndef @KWSYS_NAMESPACE@_FStream_hxx -#define @KWSYS_NAMESPACE@_FStream_hxx - -#include <@KWSYS_NAMESPACE@/Configure.hxx> - -#include <@KWSYS_NAMESPACE@/Encoding.hxx> - -#include -#if defined(_WIN32) -# if !defined(_MSC_VER) && @KWSYS_NAMESPACE@_CXX_HAS_EXT_STDIO_FILEBUF_H -# include -# endif -#endif - -namespace @KWSYS_NAMESPACE@ { -#if defined(_WIN32) && \ - (defined(_MSC_VER) || @KWSYS_NAMESPACE@_CXX_HAS_EXT_STDIO_FILEBUF_H) -# if defined(_NOEXCEPT) -# define @KWSYS_NAMESPACE@_FStream_NOEXCEPT _NOEXCEPT -# else -# define @KWSYS_NAMESPACE@_FStream_NOEXCEPT -# endif - -# if defined(_MSC_VER) - -template -class basic_filebuf : public std::basic_filebuf -{ -# if _MSC_VER >= 1400 -public: - typedef std::basic_filebuf my_base_type; - basic_filebuf* open(char const* s, std::ios_base::openmode mode) - { - const std::wstring wstr = Encoding::ToWindowsExtendedPath(s); - return static_cast(my_base_type::open(wstr.c_str(), mode)); - } -# endif -}; - -# else - -inline std::wstring getcmode(const std::ios_base::openmode mode) -{ - std::wstring cmode; - bool plus = false; - if (mode & std::ios_base::app) { - cmode += L"a"; - plus = mode & std::ios_base::in ? true : false; - } else if (mode & std::ios_base::trunc || - (mode & std::ios_base::out && (mode & std::ios_base::in) == 0)) { - cmode += L"w"; - plus = mode & std::ios_base::in ? true : false; - } else { - cmode += L"r"; - plus = mode & std::ios_base::out ? true : false; - } - if (plus) { - cmode += L"+"; - } - if (mode & std::ios_base::binary) { - cmode += L"b"; - } else { - cmode += L"t"; - } - return cmode; -}; - -# endif - -template > -class basic_efilebuf -{ -public: -# if defined(_MSC_VER) - typedef basic_filebuf internal_buffer_type; -# else - typedef __gnu_cxx::stdio_filebuf internal_buffer_type; -# endif - - basic_efilebuf() - : file_(0) - { - buf_ = 0; - } - - bool _open(char const* file_name, std::ios_base::openmode mode) - { - if (is_open() || file_) { - return false; - } -# if defined(_MSC_VER) - const bool success = buf_->open(file_name, mode) != 0; -# else - const std::wstring wstr = Encoding::ToWindowsExtendedPath(file_name); - bool success = false; - std::wstring cmode = getcmode(mode); - file_ = _wfopen(wstr.c_str(), cmode.c_str()); - if (file_) { - if (buf_) { - delete buf_; - } - buf_ = new internal_buffer_type(file_, mode); - success = true; - } -# endif - return success; - } - - bool is_open() - { - if (!buf_) { - return false; - } - return buf_->is_open(); - } - - bool is_open() const - { - if (!buf_) { - return false; - } - return buf_->is_open(); - } - - bool _close() - { - bool success = false; - if (buf_) { - success = buf_->close() != 0; -# if !defined(_MSC_VER) - if (file_) { - success = fclose(file_) == 0 ? success : false; - file_ = 0; - } -# endif - } - return success; - } - - static void _set_state(bool success, std::basic_ios* ios, - basic_efilebuf* efilebuf) - { -# if !defined(_MSC_VER) - ios->rdbuf(efilebuf->buf_); -# else - static_cast(efilebuf); -# endif - if (!success) { - ios->setstate(std::ios_base::failbit); - } else { - ios->clear(); - } - } - - ~basic_efilebuf() - { - if (buf_) { - delete buf_; - } - } - -protected: - internal_buffer_type* buf_; - FILE* file_; -}; - -template > -class basic_ifstream - : public std::basic_istream - , public basic_efilebuf -{ -public: - typedef typename basic_efilebuf::internal_buffer_type - internal_buffer_type; - typedef std::basic_istream internal_stream_type; - - basic_ifstream() - : internal_stream_type(new internal_buffer_type()) - { - this->buf_ = - static_cast(internal_stream_type::rdbuf()); - } - explicit basic_ifstream(char const* file_name, - std::ios_base::openmode mode = std::ios_base::in) - : internal_stream_type(new internal_buffer_type()) - { - this->buf_ = - static_cast(internal_stream_type::rdbuf()); - open(file_name, mode); - } - - void open(char const* file_name, - std::ios_base::openmode mode = std::ios_base::in) - { - mode = mode | std::ios_base::in; - this->_set_state(this->_open(file_name, mode), this, this); - } - - void close() { this->_set_state(this->_close(), this, this); } - - using basic_efilebuf::is_open; - - internal_buffer_type* rdbuf() const { return this->buf_; } - - ~basic_ifstream() @KWSYS_NAMESPACE@_FStream_NOEXCEPT { close(); } -}; - -template > -class basic_ofstream - : public std::basic_ostream - , public basic_efilebuf -{ - using basic_efilebuf::is_open; - -public: - typedef typename basic_efilebuf::internal_buffer_type - internal_buffer_type; - typedef std::basic_ostream internal_stream_type; - - basic_ofstream() - : internal_stream_type(new internal_buffer_type()) - { - this->buf_ = - static_cast(internal_stream_type::rdbuf()); - } - explicit basic_ofstream(char const* file_name, - std::ios_base::openmode mode = std::ios_base::out) - : internal_stream_type(new internal_buffer_type()) - { - this->buf_ = - static_cast(internal_stream_type::rdbuf()); - open(file_name, mode); - } - void open(char const* file_name, - std::ios_base::openmode mode = std::ios_base::out) - { - mode = mode | std::ios_base::out; - this->_set_state(this->_open(file_name, mode), this, this); - } - - void close() { this->_set_state(this->_close(), this, this); } - - internal_buffer_type* rdbuf() const { return this->buf_; } - - ~basic_ofstream() @KWSYS_NAMESPACE@_FStream_NOEXCEPT { close(); } -}; - -typedef basic_ifstream ifstream; -typedef basic_ofstream ofstream; - -# undef @KWSYS_NAMESPACE@_FStream_NOEXCEPT -#else -using std::ofstream; -using std::ifstream; -#endif - -namespace FStream { -enum BOM -{ - BOM_None, - BOM_UTF8, - BOM_UTF16BE, - BOM_UTF16LE, - BOM_UTF32BE, - BOM_UTF32LE -}; - -// Read a BOM, if one exists. -// If a BOM exists, the stream is advanced to after the BOM. -// This function requires a seekable stream (but not a relative -// seekable stream). -@KWSYS_NAMESPACE@_EXPORT BOM ReadBOM(std::istream& in); -} -} - -#endif diff --git a/test/API/driver/kwsys/GitSetup/.gitattributes b/test/API/driver/kwsys/GitSetup/.gitattributes deleted file mode 100644 index e96d1f8c503..00000000000 --- a/test/API/driver/kwsys/GitSetup/.gitattributes +++ /dev/null @@ -1,6 +0,0 @@ -.git* export-ignore - -config* eol=lf whitespace=indent-with-non-tab -git-* eol=lf whitespace=indent-with-non-tab -tips eol=lf whitespace=indent-with-non-tab -setup-* eol=lf whitespace=indent-with-non-tab diff --git a/test/API/driver/kwsys/GitSetup/LICENSE b/test/API/driver/kwsys/GitSetup/LICENSE deleted file mode 100644 index d6456956733..00000000000 --- a/test/API/driver/kwsys/GitSetup/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/test/API/driver/kwsys/GitSetup/NOTICE b/test/API/driver/kwsys/GitSetup/NOTICE deleted file mode 100644 index 0d32c02eb69..00000000000 --- a/test/API/driver/kwsys/GitSetup/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Kitware Local Git Setup Scripts -Copyright 2010-2012 Kitware, Inc. - -This product includes software developed at Kitware, Inc. -(http://www.kitware.com/). diff --git a/test/API/driver/kwsys/GitSetup/README b/test/API/driver/kwsys/GitSetup/README deleted file mode 100644 index 2f9f1ec078d..00000000000 --- a/test/API/driver/kwsys/GitSetup/README +++ /dev/null @@ -1,87 +0,0 @@ -Kitware Local Git Setup Scripts - - -Introduction ------------- - -This is a collection of local Git development setup scripts meant for -inclusion in project source trees to aid their development workflow. -Project-specific information needed by the scripts may be configured -in a "config" file added next to them in the project. - - -Import ------- - -A project may import these scripts into their source tree by -initializing a subtree merge. Bring up a Git prompt and set the -current working directory inside a clone of the target project. -Fetch the "setup" branch from the GitSetup repository: - - $ git fetch ../GitSetup setup:setup - -Prepare to merge the branch but place the content in a subdirectory. -Any prefix (with trailing '/') may be chosen so long as it is used -consistently within a project through the rest of these instructions: - - $ git merge -s ours --no-commit setup - $ git read-tree -u --prefix=Utilities/GitSetup/ setup - -Commit the merge with an informative message: - - $ git commit - ------------------------------------------------------------------------ - Merge branch 'setup' - - Add Utilities/GitSetup/ directory using subtree merge from - the general GitSetup repository "setup" branch. - ------------------------------------------------------------------------ - -Optionally add to the project ".gitattributes" file the line - - /Utilities/GitSetup export-ignore - -to exclude the GitSetup directory from inclusion by "git archive" -since it does not make sense in source tarballs. - - -Configuration -------------- - -Read the "Project configuration instructions" comment in each script. -Add a "config" file next to the scripts with desired configuration -(optionally copy and modify "config.sample"). For example, to -configure the "setup-hooks" script: - - $ git config -f Utilities/GitSetup/config hooks.url "$url" - -where "$url" is the project repository publishing the "hooks" branch. -When finished, add and commit the configuration file: - - $ git add Utilities/GitSetup/config - $ git commit - - -Update ------- - -A project may update these scripts from the GitSetup repository. -Bring up a Git prompt and set the current working directory inside a -clone of the target project. Fetch the "setup" branch from the -GitSetup repository: - - $ git fetch ../GitSetup setup:setup - -Merge the "setup" branch into the subtree: - - $ git merge -X subtree=Utilities/GitSetup setup - -where "Utilities/GitSetup" is the same prefix used during the import -setup, but without a trailing '/'. - - -License -------- - -Distributed under the Apache License 2.0. -See LICENSE and NOTICE for details. diff --git a/test/API/driver/kwsys/GitSetup/config b/test/API/driver/kwsys/GitSetup/config deleted file mode 100644 index cba4c146031..00000000000 --- a/test/API/driver/kwsys/GitSetup/config +++ /dev/null @@ -1,4 +0,0 @@ -[hooks] - url = https://gitlab.kitware.com/utils/gitsetup.git -[upstream] - url = https://gitlab.kitware.com/utils/kwsys.git diff --git a/test/API/driver/kwsys/GitSetup/config.sample b/test/API/driver/kwsys/GitSetup/config.sample deleted file mode 100644 index eeb468ba17b..00000000000 --- a/test/API/driver/kwsys/GitSetup/config.sample +++ /dev/null @@ -1,32 +0,0 @@ -# Kitware Local Git Setup Scripts - Sample Project Configuration -# -# Copy to "config" and edit as necessary. - -[hooks] - url = http://public.kitware.com/GitSetup.git - #branch = hooks - -[ssh] - host = public.kitware.com - key = id_git_public - request-url = https://www.kitware.com/Admin/SendPassword.cgi - -[stage] - #url = git://public.kitware.com/stage/Project.git - #pushurl = git@public.kitware.com:stage/Project.git - -[gerrit] - #project = Project - site = http://review.source.kitware.com - # pushurl placeholder "$username" is literal - pushurl = $username@review.source.kitware.com:Project - -[upstream] - url = git://public.kitware.com/Project.git - -[gitlab] - host = gitlab.kitware.com - group-path = group - group-name = Group - project-path = project - project-name = Project diff --git a/test/API/driver/kwsys/GitSetup/git-gerrit-push b/test/API/driver/kwsys/GitSetup/git-gerrit-push deleted file mode 100644 index b46f753eb22..00000000000 --- a/test/API/driver/kwsys/GitSetup/git-gerrit-push +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env bash -#============================================================================= -# Copyright 2010-2015 Kitware, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= - -USAGE="[] [--no-topic] [--dry-run] [--]" -OPTIONS_SPEC= -SUBDIRECTORY_OK=Yes -. "$(git --exec-path)/git-sh-setup" - -#----------------------------------------------------------------------------- - -remote='' -refspecs='' -no_topic='' -dry_run='' - -# Parse the command line options. -while test $# != 0; do - case "$1" in - --no-topic) no_topic=1 ;; - --dry-run) dry_run=--dry-run ;; - --) shift; break ;; - -*) usage ;; - *) test -z "$remote" || usage ; remote="$1" ;; - esac - shift -done -test $# = 0 || usage - -# Default remote. -test -n "$remote" || remote="gerrit" - -if test -z "$no_topic"; then - # Identify and validate the topic branch name. - head="$(git symbolic-ref HEAD)" && topic="${head#refs/heads/}" || topic='' - if test -z "$topic" -o "$topic" = "master"; then - die 'Please name your topic: - git checkout -b descriptive-name' - fi - # The topic branch will be pushed by name. - refspecs="HEAD:refs/for/master/$topic $refspecs" -fi - -# Fetch the current upstream master branch head. -# This helps computation of a minimal pack to push. -echo "Fetching $remote master" -fetch_out=$(git fetch "$remote" master 2>&1) || die "$fetch_out" - -# Exit early if we have nothing to push. -if test -z "$refspecs"; then - echo 'Nothing to push!' - exit 0 -fi - -# Push. Save output and exit code. -echo "Pushing to $remote" -push_stdout=$(git push --porcelain $dry_run "$remote" $refspecs); push_exit=$? -echo "$push_stdout" - -# Reproduce the push exit code. -exit $push_exit diff --git a/test/API/driver/kwsys/GitSetup/git-gitlab-push b/test/API/driver/kwsys/GitSetup/git-gitlab-push deleted file mode 100644 index 768f8534ed7..00000000000 --- a/test/API/driver/kwsys/GitSetup/git-gitlab-push +++ /dev/null @@ -1,177 +0,0 @@ -#!/usr/bin/env bash -#============================================================================= -# Copyright 2010-2015 Kitware, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= - -USAGE='[] [...] [--] - -OPTIONS - ---dry-run - Show what would be pushed without actually updating the destination - --f,--force - Force-push the topic HEAD to rewrite the destination branch - ---no-default - Do not push the default branch (e.g. master) - ---no-topic - Do not push the topic HEAD. -' -OPTIONS_SPEC= -SUBDIRECTORY_OK=Yes -. "$(git --exec-path)/git-sh-setup" - -egrep-q() { - egrep "$@" >/dev/null 2>/dev/null -} - -# Load the project configuration. -gitlab_upstream='' && -gitlab_configured='' && -config="${BASH_SOURCE%/*}/config" && -protocol=$(git config -f "$config" --get gitlab.protocol || - echo "https") && -host=$(git config -f "$config" --get gitlab.host) && -site=$(git config -f "$config" --get gitlab.site || - echo "$protocol://$host") && -group_path=$(git config -f "$config" --get gitlab.group-path) && -project_path=$(git config -f "$config" --get gitlab.project-path) && -gitlab_upstream="$site/$group_path/$project_path.git" && -gitlab_pushurl=$(git config --get remote.gitlab.pushurl || - git config --get remote.gitlab.url) && -gitlab_configured=1 - -#----------------------------------------------------------------------------- - -remote='' -refspecs='' -force='' -lease=false -lease_flag='' -no_topic='' -no_default='' -dry_run='' - -# Parse the command line options. -while test $# != 0; do - case "$1" in - -f|--force) force='+'; lease=true ;; - --no-topic) no_topic=1 ;; - --dry-run) dry_run=--dry-run ;; - --no-default) no_default=1 ;; - --) shift; break ;; - -*) usage ;; - *) test -z "$remote" || usage ; remote="$1" ;; - esac - shift -done -test $# = 0 || usage - -# Default remote. -test -n "$remote" || remote="gitlab" - -if test -z "$no_topic"; then - # Identify and validate the topic branch name. - head="$(git symbolic-ref HEAD)" && topic="${head#refs/heads/}" || topic='' - if test -z "$topic" -o "$topic" = "master"; then - die 'Please name your topic: - git checkout -b descriptive-name' - fi - - if $lease; then - have_ref=false - remoteref="refs/remotes/$remote/$topic" - if git rev-parse --verify -q "$remoteref"; then - have_ref=true - else - die "It seems that a local ref for the branch is -missing; forcing a push is dangerous and may overwrite -previous work. Fetch from the $remote remote first or -push without '-f' or '--force'." - fi - - have_lease_flag=false - if git push -h | egrep-q -e '--force-with-lease'; then - have_lease_flag=true - fi - - if $have_lease_flag && $have_ref; then - # Set the lease flag. - lease_flag="--force-with-lease=$topic:$remoteref" - # Clear the force string. - force='' - fi - fi - - # The topic branch will be pushed by name. - refspecs="${force}HEAD:refs/heads/$topic $refspecs" -fi - -# Fetch the current remote master branch head. -# This helps computation of a minimal pack to push. -echo "Fetching $remote master" -fetch_out=$(git fetch "$remote" master 2>&1) || die "$fetch_out" -gitlab_head=$(git rev-parse FETCH_HEAD) || exit - -# Fetch the current upstream master branch head. -if origin_fetchurl=$(git config --get remote.origin.url) && - test "$origin_fetchurl" = "$gitlab_upstream"; then - upstream_remote='origin' -else - upstream_remote="$gitlab_upstream" -fi -echo "Fetching $upstream_remote master" -fetch_out=$(git fetch "$upstream_remote" master 2>&1) || die "$fetch_out" -upstream_head=$(git rev-parse FETCH_HEAD) || exit - -# Add a refspec to keep the remote master up to date if possible. -if test -z "$no_default" && - base=$(git merge-base "$gitlab_head" "$upstream_head") && - test "$base" = "$gitlab_head"; then - refspecs="$upstream_head:refs/heads/master $refspecs" -fi - -# Exit early if we have nothing to push. -if test -z "$refspecs"; then - echo 'Nothing to push!' - exit 0 -fi - -# Push. Save output and exit code. -echo "Pushing to $remote" -push_config='-c advice.pushUpdateRejected=false' -push_stdout=$(git $push_config push $lease_flag --porcelain $dry_run "$remote" $refspecs); push_exit=$? -echo "$push_stdout" - -if test "$push_exit" -ne 0 && test -z "$force"; then - # Advise the user to fetch if needed. - if echo "$push_stdout" | egrep-q 'stale info'; then - echo " -You have pushed to your branch from another machine; you may be overwriting -commits unintentionally. Fetch from the $remote remote and check that you are -not pushing an outdated branch." - fi - - # Advise the user to force-push if needed. - if echo "$push_stdout" | egrep-q 'non-fast-forward'; then - echo ' -Add "-f" or "--force" to push a rewritten topic.' - fi -fi - -# Reproduce the push exit code. -exit $push_exit diff --git a/test/API/driver/kwsys/GitSetup/pre-commit b/test/API/driver/kwsys/GitSetup/pre-commit deleted file mode 100644 index 1f1d3f52959..00000000000 --- a/test/API/driver/kwsys/GitSetup/pre-commit +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env bash - -egrep-q() { - egrep "$@" >/dev/null 2>/dev/null -} - -die() { - echo 'pre-commit hook failure' 1>&2 - echo '-----------------------' 1>&2 - echo '' 1>&2 - echo "$@" 1>&2 - exit 1 -} - -#----------------------------------------------------------------------------- - -# Check that developmer setup is up-to-date. -lastSetupForDevelopment=$(git config --get hooks.SetupForDevelopment || echo 0) -eval $(grep '^SetupForDevelopment_VERSION=' "${BASH_SOURCE%/*}/../SetupForDevelopment.sh") -test -n "$SetupForDevelopment_VERSION" || SetupForDevelopment_VERSION=0 -if test $lastSetupForDevelopment -lt $SetupForDevelopment_VERSION; then - die 'Developer setup in this work tree is out of date. Please re-run - - ./SetupForDevelopment.sh -' -fi diff --git a/test/API/driver/kwsys/GitSetup/setup-aliases b/test/API/driver/kwsys/GitSetup/setup-aliases deleted file mode 100644 index 98810adcfe3..00000000000 --- a/test/API/driver/kwsys/GitSetup/setup-aliases +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env bash -echo "Adding 'git prepush' alias" && -git config alias.prepush 'log --graph --stat origin/master..' && -gerrit_disabled="KWSys no longer uses Gerrit. Please use GitLab." && -git config alias.gerrit-push '!sh -c "echo '"${gerrit_disabled}"'"' && -true diff --git a/test/API/driver/kwsys/GitSetup/setup-gerrit b/test/API/driver/kwsys/GitSetup/setup-gerrit deleted file mode 100644 index 6d46e3ccf54..00000000000 --- a/test/API/driver/kwsys/GitSetup/setup-gerrit +++ /dev/null @@ -1,147 +0,0 @@ -#!/usr/bin/env bash -#============================================================================= -# Copyright 2010-2012 Kitware, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= - -# Run this script to set up the local Git repository to push to -# a Gerrit Code Review instance for this project. - -# Project configuration instructions: -# -# - Run a Gerrit Code Review server -# -# - Populate adjacent "config" file with: -# gerrit.site = Top Gerrit URL (not project-specific) -# gerrit.project = Name of project in Gerrit -# gerrit.pushurl = Review site push URL with "$username" placeholder -# gerrit.remote = Gerrit remote name, if not "gerrit" -# gerrit.url = Gerrit project URL, if not "$site/p/$project" -# optionally with "$username" placeholder - -die() { - echo 1>&2 "$@" ; exit 1 -} - -# Make sure we are inside the repository. -cd "${BASH_SOURCE%/*}" && - -# Load the project configuration. -site=$(git config -f config --get gerrit.site) && -project=$(git config -f config --get gerrit.project) && -remote=$(git config -f config --get gerrit.remote || - echo "gerrit") && -fetchurl_=$(git config -f config --get gerrit.url || - echo "$site/p/$project") && -pushurl_=$(git config -f config --get gerrit.pushurl || - git config -f config --get gerrit.url) || -die 'This project is not configured to use Gerrit.' - -# Get current gerrit push URL. -pushurl=$(git config --get remote."$remote".pushurl || - git config --get remote."$remote".url || echo '') && - -# Tell user about current configuration. -if test -n "$pushurl"; then - echo 'Remote "'"$remote"'" is currently configured to push to - - '"$pushurl"' -' && - read -ep 'Reconfigure Gerrit? [y/N]: ' ans && - if [ "$ans" == "y" ] || [ "$ans" == "Y" ]; then - setup=1 - else - setup='' - fi -else - echo 'Remote "'"$remote"'" is not yet configured. - -'"$project"' changes must be pushed to our Gerrit Code Review site: - - '"$site/p/$project"' - -Register a Gerrit account and select a username (used below). -You will need an OpenID: - - http://openid.net/get-an-openid/ -' && - read -ep 'Configure Gerrit? [Y/n]: ' ans && - if [ "$ans" == "n" ] || [ "$ans" == "N" ]; then - exit 0 - else - setup=1 - fi -fi && - -# Perform setup if necessary. -if test -n "$setup"; then - echo 'Sign-in to Gerrit to get/set your username at - - '"$site"'/#/settings - -Add your SSH public keys at - - '"$site"'/#/settings/ssh-keys -' && - read -ep "Gerrit username? [$USER]: " gu && - if test -z "$gu"; then - gu="$USER" - fi && - fetchurl="${fetchurl_/\$username/$gu}" && - if test -z "$pushurl"; then - git remote add "$remote" "$fetchurl" - else - git config remote."$remote".url "$fetchurl" - fi && - pushurl="${pushurl_/\$username/$gu}" && - if test "$pushurl" != "$fetchurl"; then - git config remote."$remote".pushurl "$pushurl" - fi && - echo 'Remote "'"$remote"'" is now configured to push to - - '"$pushurl"' -' -fi && - -# Optionally test Gerrit access. -if test -n "$pushurl"; then - read -ep 'Test access to Gerrit (SSH)? [y/N]: ' ans && - if [ "$ans" == "y" ] || [ "$ans" == "Y" ]; then - echo -n 'Testing Gerrit access by SSH...' - if git ls-remote --heads "$pushurl" >/dev/null; then - echo 'passed.' - else - echo 'failed.' && - die 'Could not access Gerrit. Add your SSH public keys at - - '"$site"'/#/settings/ssh-keys -' - fi - fi -fi && - -# Set up GerritId hook. -hook=$(git config --get hooks.GerritId || echo '') && -if test -z "$hook"; then - echo ' -Enabling GerritId hook to add a "Change-Id" footer to commit -messages for interaction with Gerrit. Run - - git config hooks.GerritId false - -to disable this feature (but you will be on your own).' && - git config hooks.GerritId true -else - echo 'GerritId hook already configured to "'"$hook"'".' -fi diff --git a/test/API/driver/kwsys/GitSetup/setup-gitlab b/test/API/driver/kwsys/GitSetup/setup-gitlab deleted file mode 100644 index 9c7574d1ea9..00000000000 --- a/test/API/driver/kwsys/GitSetup/setup-gitlab +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/env bash -#============================================================================= -# Copyright 2010-2015 Kitware, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= - -# Run this script to set up the local Git repository to push to -# a personal fork for this project in GitLab. - -# Project configuration instructions: -# -# - Run a GitLab server -# -# - Populate adjacent "config" file with: -# gitlab.protocol = Top GitLab protocol, if not 'https' -# gitlab.host = Top GitLab fully qualified host name -# gitlab.site = Top GitLab URL, if not "://" -# gitlab.group-name = Name of group containing project in GitLab -# gitlab.group-path = Path of group containing project in GitLab -# gitlab.project-name = Name of project within GitLab group -# gitlab.project-path = Path of project within GitLab group -# gitlab.url = GitLab push URL with "$username" placeholder, -# if not "/$username/.git" -# gitlab.pushurl = GitLab push URL with "$username" placeholder, -# if not "git@:$username/.git" -# gitlab.remote = GitLab remote name, if not "gitlab" - -die() { - echo 1>&2 "$@" ; exit 1 -} - -# Make sure we are inside the repository. -cd "${BASH_SOURCE%/*}" && - -# Load the project configuration. -protocol=$(git config -f config --get gitlab.protocol || - echo "https") && -host=$(git config -f config --get gitlab.host) && -site=$(git config -f config --get gitlab.site || - echo "$protocol://$host") && -group_path=$(git config -f config --get gitlab.group-path) && -group_name=$(git config -f config --get gitlab.group-name) && -project_name=$(git config -f config --get gitlab.project-name) && -project_path=$(git config -f config --get gitlab.project-path) && -pushurl_=$(git config -f config --get gitlab.pushurl || - echo "git@$host:\$username/$project_path.git") && -remote=$(git config -f config --get gitlab.remote || - echo "gitlab") && -fetchurl_=$(git config -f config --get gitlab.url || - echo "$site/\$username/$project_path.git") || -die 'This project is not configured to use GitLab.' - -# Get current gitlab push URL. -pushurl=$(git config --get remote."$remote".pushurl || - git config --get remote."$remote".url || echo '') && - -# Tell user about current configuration. -if test -n "$pushurl"; then - echo 'Remote "'"$remote"'" is currently configured to push to - - '"$pushurl"' -' && - read -ep 'Reconfigure GitLab? [y/N]: ' ans && - if [ "$ans" == "y" ] || [ "$ans" == "Y" ]; then - setup=1 - else - setup='' - fi -else - echo 'Remote "'"$remote"'" is not yet configured. -' && - read -ep 'Configure GitLab to contribute to '"$project_name"'? [Y/n]: ' ans && - if [ "$ans" == "n" ] || [ "$ans" == "N" ]; then - exit 0 - else - setup=1 - fi -fi && - -setup_instructions='Add your SSH public keys at - - '"$site"'/profile/keys - -Then visit the main repository at: - - '"$site/$group_path/$project_path"' - -and use the Fork button in the upper right. -' - -# Perform setup if necessary. -if test -n "$setup"; then - echo 'Sign-in to GitLab to get/set your username at - - '"$site/profile/account"' - -'"$setup_instructions" && - read -ep "GitLab username? [$USER]: " gu && - if test -z "$gu"; then - gu="$USER" - fi && - fetchurl="${fetchurl_/\$username/$gu}" && - if test -z "$pushurl"; then - git remote add "$remote" "$fetchurl" - else - git config remote."$remote".url "$fetchurl" - fi && - pushurl="${pushurl_/\$username/$gu}" && - git config remote."$remote".pushurl "$pushurl" && - echo 'Remote "'"$remote"'" is now configured to push to - - '"$pushurl"' -' -fi && - -# Optionally test GitLab access. -if test -n "$pushurl"; then - read -ep 'Test access to GitLab (SSH)? [y/N]: ' ans && - if [ "$ans" == "y" ] || [ "$ans" == "Y" ]; then - echo -n 'Testing GitLab access by SSH...' - if git ls-remote --heads "$pushurl" >/dev/null; then - echo 'passed.' - else - echo 'failed.' && - die 'Could not access your GitLab fork of this project. -'"$setup_instructions" - fi - fi -fi diff --git a/test/API/driver/kwsys/GitSetup/setup-hooks b/test/API/driver/kwsys/GitSetup/setup-hooks deleted file mode 100644 index ca07712d55a..00000000000 --- a/test/API/driver/kwsys/GitSetup/setup-hooks +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env bash -#============================================================================= -# Copyright 2010-2012 Kitware, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= - -# Run this script to set up local Git hooks for this project. - -# Project configuration instructions: -# -# - Publish a "hooks" branch in the project repository such that -# clones will have "refs/remotes/origin/hooks". -# -# - Populate adjacent "config" file with: -# hooks.url = Repository URL publishing "hooks" branch -# hooks.branch = Repository branch instead of "hooks" - -egrep-q() { - egrep "$@" >/dev/null 2>/dev/null -} - -die() { - echo 1>&2 "$@" ; exit 1 -} - -# Make sure we are inside the repository. -cd "${BASH_SOURCE%/*}" && - -# Select a hooks branch. -if url=$(git config --get hooks.url); then - # Fetch hooks from locally configured repository. - branch=$(git config hooks.branch || echo hooks) -elif git for-each-ref refs/remotes/origin/hooks 2>/dev/null | - egrep-q 'refs/remotes/origin/hooks$'; then - # Use hooks cloned from origin. - url=.. && branch=remotes/origin/hooks -elif url=$(git config -f config --get hooks.url); then - # Fetch hooks from project-configured repository. - branch=$(git config -f config hooks.branch || echo hooks) -else - die 'This project is not configured to install local hooks.' -fi && - -# Populate ".git/hooks". -echo 'Setting up git hooks...' && -git_dir=$(git rev-parse --git-dir) && -mkdir -p "$git_dir/hooks" && -cd "$git_dir/hooks" && -if ! test -e .git; then - git init -q || die 'Could not run git init for hooks.' -fi && -git fetch -q "$url" "$branch" && -git reset -q --hard FETCH_HEAD || die 'Failed to install hooks' diff --git a/test/API/driver/kwsys/GitSetup/setup-ssh b/test/API/driver/kwsys/GitSetup/setup-ssh deleted file mode 100644 index 8920a5bd338..00000000000 --- a/test/API/driver/kwsys/GitSetup/setup-ssh +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/env bash -#============================================================================= -# Copyright 2010-2012 Kitware, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= - -# Run this script to set up ssh push access to the repository host. - -# Project configuration instructions: -# -# - Populate adjacent "config" file with: -# ssh.host = Repository host name -# ssh.user = Username on host, if not "git" -# ssh.key = Local ssh key name -# ssh.request-url = Web page URL to request ssh access - -egrep-q() { - egrep "$@" >/dev/null 2>/dev/null -} - -die() { - echo 1>&2 "$@" ; exit 1 -} - -# Make sure we are inside the repository. -cd "${BASH_SOURCE%/*}" && - -# Load the project configuration. -host=$(git config -f config --get ssh.host) && -user=$(git config -f config --get ssh.user || echo git) && -key=$(git config -f config --get ssh.key) && -request_url=$(git config -f config --get ssh.request-url) || -die 'This project is not configured for ssh push access.' - -# Check for existing configuration. -if test -r ~/.ssh/config && - egrep-q 'Host[= ]'"${host//\./\\.}" ~/.ssh/config; then - echo 'Host "'"$host"'" is already in ~/.ssh/config' && - setup= && - question='Test' -else - echo 'Host "'"$host"'" not found in ~/.ssh/config' && - setup=1 && - question='Setup and test' -fi && - -# Ask the user whether to make changes. -echo '' && -read -ep "${question} push access by ssh to $user@$host? [y/N]: " access && -if test "$access" != "y" -a "$access" != "Y"; then - exit 0 -fi && - -# Setup host configuration if necessary. -if test -n "$setup"; then - if ! test -d ~/.ssh; then - mkdir -p ~/.ssh && - chmod 700 ~/.ssh - fi && - if ! test -f ~/.ssh/config; then - touch ~/.ssh/config && - chmod 600 ~/.ssh/config - fi && - ssh_config='Host='"$host"' - IdentityFile ~/.ssh/'"$key" && - echo "Adding to ~/.ssh/config: - -$ssh_config -" && - echo "$ssh_config" >> ~/.ssh/config && - if ! test -e ~/.ssh/"$key"; then - if test -f ~/.ssh/id_rsa; then - # Take care of the common case. - ln -s id_rsa ~/.ssh/"$key" - echo ' -Assuming ~/.ssh/id_rsa is the private key corresponding to the public key for - - '"$user@$host"' - -If this is incorrect place private key at "~/.ssh/'"$key"'".' - else - echo ' -Place the private key corresponding to the public key registered for - - '"$user@$host"' - -at "~/.ssh/'"$key"'".' - fi - read -e -n 1 -p 'Press any key to continue...' - fi -fi || exit 1 - -# Test access configuration. -echo 'Testing ssh push access to "'"$user@$host"'"...' && -if ! ssh "$user@$host" info; then - die 'No ssh push access to "'"$user@$host"'". You may need to request access at - - '"$request_url"' -' -fi diff --git a/test/API/driver/kwsys/GitSetup/setup-stage b/test/API/driver/kwsys/GitSetup/setup-stage deleted file mode 100644 index ce6ec457487..00000000000 --- a/test/API/driver/kwsys/GitSetup/setup-stage +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env bash -#============================================================================= -# Copyright 2010-2012 Kitware, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= - -# Run this script to set up the topic stage for pushing changes. - -# Project configuration instructions: -# -# - Run a Topic Stage repository next to the main project repository. -# -# - Populate adjacent "config" file with: -# stage.url = Topic Stage repository URL -# stage.pushurl = Topic Stage push URL if not "$url" - -egrep-q() { - egrep "$@" >/dev/null 2>/dev/null -} - -die() { - echo 1>&2 "$@" ; exit 1 -} - -# Make sure we are inside the repository. -cd "${BASH_SOURCE%/*}" && - -# Load the project configuration. -fetchurl_=$(git config -f config --get stage.url) && -pushurl_=$(git config -f config --get stage.pushurl || echo "$fetchurl_") && -remote=$(git config -f config --get stage.remote || echo 'stage') || -die 'This project is not configured to use a topic stage.' - -# Get current stage push URL. -pushurl=$(git config --get remote."$remote".pushurl || - git config --get remote."$remote".url || echo '') && - -# Tell user about current configuration. -if test -n "$pushurl"; then - echo 'Remote "'"$remote"'" is currently configured to push to - - '"$pushurl"' -' && - read -ep 'Reconfigure Topic Stage? [y/N]: ' ans && - if [ "$ans" == "y" ] || [ "$ans" == "Y" ]; then - setup=1 - else - setup='' - fi -else - setup=1 -fi - -# Perform setup if necessary. -if test -n "$setup"; then - echo 'Setting up the topic stage...' && - fetchurl="${fetchurl_}" && - if test -z "$pushurl"; then - git remote add "$remote" "$fetchurl" - else - git config remote."$remote".url "$fetchurl" - fi && - pushurl="${pushurl_}" && - if test "$pushurl" != "$fetchurl"; then - git config remote."$remote".pushurl "$pushurl" - fi && - echo 'Remote "'"$remote"'" is now configured to push to - - '"$pushurl"' -' -fi || die 'Could not configure the topic stage remote.' diff --git a/test/API/driver/kwsys/GitSetup/setup-upstream b/test/API/driver/kwsys/GitSetup/setup-upstream deleted file mode 100644 index 92ce1dae5f3..00000000000 --- a/test/API/driver/kwsys/GitSetup/setup-upstream +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/env bash -#============================================================================= -# Copyright 2010-2015 Kitware, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= - -# Run this script to set up the local Git repository to use the -# preferred upstream repository URLs. - -# Project configuration instructions: -# -# - Populate adjacent "config" file with: -# upstream.url = Preferred fetch url for upstream remote -# upstream.remote = Preferred name for upstream remote, if not "origin" - -die() { - echo 1>&2 "$@" ; exit 1 -} - -# Make sure we are inside the repository. -cd "${BASH_SOURCE%/*}" && - -# Load the project configuration. -url=$(git config -f config --get upstream.url) && -remote=$(git config -f config --get upstream.remote || - echo 'origin') || -die 'This project is not configured to use a preferred upstream repository.' - -# Get current upstream URLs. -fetchurl=$(git config --get remote."$remote".url || echo '') && -pushurl=$(git config --get remote."$remote".pushurl || echo '') && - -if test "$fetchurl" = "$url"; then - echo 'Remote "'"$remote"'" already uses recommended upstream repository.' - exit 0 -fi - -upstream_recommend=' -We recommended configuring the "'"$remote"'" remote to fetch from upstream at - - '"$url"' -' - -# Tell user about current configuration. -if test -n "$fetchurl"; then - echo 'Remote "'"$remote"'" is currently configured to fetch from - - '"$fetchurl"' -' && - if test -n "$pushurl"; then - echo 'and push to - - '"$pushurl" - fi && - echo "$upstream_recommend" && - if test -n "$pushurl"; then - echo 'and to never push to it directly. -' - fi && - - read -ep 'Reconfigure "'"$remote"'" remote as recommended? [y/N]: ' ans && - if [ "$ans" == "y" ] || [ "$ans" == "Y" ]; then - setup=1 - else - setup='' - fi -else - echo 'Remote "'"$remote"'" is not yet configured.' && - echo "$upstream_recommend" && - read -ep 'Configure "'"$remote"'" remote as recommended? [Y/n]: ' ans && - if [ "$ans" == "n" ] || [ "$ans" == "N" ]; then - exit 0 - else - setup=1 - fi -fi && - -# Perform setup if necessary. -if test -n "$setup"; then - if test -z "$fetchurl"; then - git remote add "$remote" "$url" - else - git config remote."$remote".url "$url" && - if old=$(git config --get remote."$remote".pushurl); then - git config --unset remote."$remote".pushurl || - echo 'Warning: failed to unset remote.'"$remote"'.pushurl' - fi - fi && - echo 'Remote "'"$remote"'" is now configured to fetch from - - '"$url"' -' -fi diff --git a/test/API/driver/kwsys/GitSetup/setup-user b/test/API/driver/kwsys/GitSetup/setup-user deleted file mode 100644 index 1af439c45e4..00000000000 --- a/test/API/driver/kwsys/GitSetup/setup-user +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env bash -#============================================================================= -# Copyright 2010-2012 Kitware, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= - -# Run this script to configure Git user info in this repository. - -# Project configuration instructions: NONE - -for (( ; ; )); do - user_name=$(git config user.name || echo '') && - user_email=$(git config user.email || echo '') && - if test -n "$user_name" -a -n "$user_email"; then - echo 'Your commits will record as Author: - - '"$user_name <$user_email>"' -' && - read -ep 'Is the author name and email address above correct? [Y/n] ' correct && - if test "$correct" != "n" -a "$correct" != "N"; then - break - fi - fi && - read -ep 'Enter your full name e.g. "John Doe": ' name && - read -ep 'Enter your email address e.g. "john@gmail.com": ' email && - git config user.name "$name" && - git config user.email "$email" -done diff --git a/test/API/driver/kwsys/GitSetup/tips b/test/API/driver/kwsys/GitSetup/tips deleted file mode 100644 index 784e1ed890d..00000000000 --- a/test/API/driver/kwsys/GitSetup/tips +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env bash -#============================================================================= -# Copyright 2010-2012 Kitware, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= - -# This script makes optional suggestions for working with Git. - -# Project configuration instructions: NONE - -egrep-q() { - egrep "$@" >/dev/null 2>/dev/null -} - -# Suggest color configuration. -if test -z "$(git config --get color.ui)"; then - echo ' -One may enable color output from Git commands with - - git config --global color.ui auto -' -fi - -# Suggest bash completion. -if ! bash -i -c 'echo $PS1' | egrep-q '__git_ps1'; then - echo ' -A dynamic, informative Git shell prompt can be obtained by sourcing -the git bash-completion script in your "~/.bashrc". Set the PS1 -environmental variable as suggested in the comments at the top of the -bash-completion script. You may need to install the bash-completion -package from your distribution to obtain it. -' -fi - -# Suggest merge tool. -if test -z "$(git config --get merge.tool)"; then - echo ' -One may configure Git to load a merge tool with - - git config merge.tool - -See "git help mergetool" for more information. -' -fi diff --git a/test/API/driver/kwsys/Glob.cxx b/test/API/driver/kwsys/Glob.cxx deleted file mode 100644 index 34bb0d0fe00..00000000000 --- a/test/API/driver/kwsys/Glob.cxx +++ /dev/null @@ -1,448 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" -#include KWSYS_HEADER(Glob.hxx) - -#include KWSYS_HEADER(Configure.hxx) - -#include KWSYS_HEADER(RegularExpression.hxx) -#include KWSYS_HEADER(SystemTools.hxx) -#include KWSYS_HEADER(Directory.hxx) - -// Work-around CMake dependency scanning limitation. This must -// duplicate the above list of headers. -#if 0 -# include "Configure.hxx.in" -# include "Directory.hxx.in" -# include "Glob.hxx.in" -# include "RegularExpression.hxx.in" -# include "SystemTools.hxx.in" -#endif - -#include -#include -#include - -#include -#include -#include -namespace KWSYS_NAMESPACE { -#if defined(_WIN32) || defined(__APPLE__) || defined(__CYGWIN__) -// On Windows and Apple, no difference between lower and upper case -# define KWSYS_GLOB_CASE_INDEPENDENT -#endif - -#if defined(_WIN32) || defined(__CYGWIN__) -// Handle network paths -# define KWSYS_GLOB_SUPPORT_NETWORK_PATHS -#endif - -class GlobInternals -{ -public: - std::vector Files; - std::vector Expressions; -}; - -Glob::Glob() -{ - this->Internals = new GlobInternals; - this->Recurse = false; - this->Relative = ""; - - this->RecurseThroughSymlinks = true; - // RecurseThroughSymlinks is true by default for backwards compatibility, - // not because it's a good idea... - this->FollowedSymlinkCount = 0; - - // Keep separate variables for directory listing for back compatibility - this->ListDirs = true; - this->RecurseListDirs = false; -} - -Glob::~Glob() -{ - delete this->Internals; -} - -std::vector& Glob::GetFiles() -{ - return this->Internals->Files; -} - -std::string Glob::PatternToRegex(const std::string& pattern, - bool require_whole_string, bool preserve_case) -{ - // Incrementally build the regular expression from the pattern. - std::string regex = require_whole_string ? "^" : ""; - std::string::const_iterator pattern_first = pattern.begin(); - std::string::const_iterator pattern_last = pattern.end(); - for (std::string::const_iterator i = pattern_first; i != pattern_last; ++i) { - int c = *i; - if (c == '*') { - // A '*' (not between brackets) matches any string. - // We modify this to not match slashes since the original glob - // pattern documentation was meant for matching file name - // components separated by slashes. - regex += "[^/]*"; - } else if (c == '?') { - // A '?' (not between brackets) matches any single character. - // We modify this to not match slashes since the original glob - // pattern documentation was meant for matching file name - // components separated by slashes. - regex += "[^/]"; - } else if (c == '[') { - // Parse out the bracket expression. It begins just after the - // opening character. - std::string::const_iterator bracket_first = i + 1; - std::string::const_iterator bracket_last = bracket_first; - - // The first character may be complementation '!' or '^'. - if (bracket_last != pattern_last && - (*bracket_last == '!' || *bracket_last == '^')) { - ++bracket_last; - } - - // If the next character is a ']' it is included in the brackets - // because the bracket string may not be empty. - if (bracket_last != pattern_last && *bracket_last == ']') { - ++bracket_last; - } - - // Search for the closing ']'. - while (bracket_last != pattern_last && *bracket_last != ']') { - ++bracket_last; - } - - // Check whether we have a complete bracket string. - if (bracket_last == pattern_last) { - // The bracket string did not end, so it was opened simply by - // a '[' that is supposed to be matched literally. - regex += "\\["; - } else { - // Convert the bracket string to its regex equivalent. - std::string::const_iterator k = bracket_first; - - // Open the regex block. - regex += "["; - - // A regex range complement uses '^' instead of '!'. - if (k != bracket_last && *k == '!') { - regex += "^"; - ++k; - } - - // Convert the remaining characters. - for (; k != bracket_last; ++k) { - // Backslashes must be escaped. - if (*k == '\\') { - regex += "\\"; - } - - // Store this character. - regex += *k; - } - - // Close the regex block. - regex += "]"; - - // Jump to the end of the bracket string. - i = bracket_last; - } - } else { - // A single character matches itself. - int ch = c; - if (!(('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') || - ('0' <= ch && ch <= '9'))) { - // Escape the non-alphanumeric character. - regex += "\\"; - } -#if defined(KWSYS_GLOB_CASE_INDEPENDENT) - else { - // On case-insensitive systems file names are converted to lower - // case before matching. - if (!preserve_case) { - ch = tolower(ch); - } - } -#endif - (void)preserve_case; - // Store the character. - regex.append(1, static_cast(ch)); - } - } - - if (require_whole_string) { - regex += "$"; - } - return regex; -} - -bool Glob::RecurseDirectory(std::string::size_type start, - const std::string& dir, GlobMessages* messages) -{ - kwsys::Directory d; - if (!d.Load(dir)) { - return true; - } - unsigned long cc; - std::string realname; - std::string fname; - for (cc = 0; cc < d.GetNumberOfFiles(); cc++) { - fname = d.GetFile(cc); - if (fname == "." || fname == "..") { - continue; - } - - if (start == 0) { - realname = dir + fname; - } else { - realname = dir + "/" + fname; - } - -#if defined(KWSYS_GLOB_CASE_INDEPENDENT) - // On Windows and Apple, no difference between lower and upper case - fname = kwsys::SystemTools::LowerCase(fname); -#endif - - bool isDir = kwsys::SystemTools::FileIsDirectory(realname); - bool isSymLink = kwsys::SystemTools::FileIsSymlink(realname); - - if (isDir && (!isSymLink || this->RecurseThroughSymlinks)) { - if (isSymLink) { - ++this->FollowedSymlinkCount; - std::string realPathErrorMessage; - std::string canonicalPath( - SystemTools::GetRealPath(dir, &realPathErrorMessage)); - - if (!realPathErrorMessage.empty()) { - if (messages) { - messages->push_back( - Message(Glob::error, - "Canonical path generation from path '" + dir + - "' failed! Reason: '" + realPathErrorMessage + "'")); - } - return false; - } - - if (std::find(this->VisitedSymlinks.begin(), - this->VisitedSymlinks.end(), - canonicalPath) == this->VisitedSymlinks.end()) { - if (this->RecurseListDirs) { - // symlinks are treated as directories - this->AddFile(this->Internals->Files, realname); - } - - this->VisitedSymlinks.push_back(canonicalPath); - if (!this->RecurseDirectory(start + 1, realname, messages)) { - this->VisitedSymlinks.pop_back(); - - return false; - } - this->VisitedSymlinks.pop_back(); - } - // else we have already visited this symlink - prevent cyclic recursion - else if (messages) { - std::string message; - for (std::vector::const_iterator pathIt = - std::find(this->VisitedSymlinks.begin(), - this->VisitedSymlinks.end(), canonicalPath); - pathIt != this->VisitedSymlinks.end(); ++pathIt) { - message += *pathIt + "\n"; - } - message += canonicalPath + "/" + fname; - messages->push_back(Message(Glob::cyclicRecursion, message)); - } - } else { - if (this->RecurseListDirs) { - this->AddFile(this->Internals->Files, realname); - } - if (!this->RecurseDirectory(start + 1, realname, messages)) { - return false; - } - } - } else { - if (!this->Internals->Expressions.empty() && - this->Internals->Expressions.back().find(fname)) { - this->AddFile(this->Internals->Files, realname); - } - } - } - - return true; -} - -void Glob::ProcessDirectory(std::string::size_type start, - const std::string& dir, GlobMessages* messages) -{ - // std::cout << "ProcessDirectory: " << dir << std::endl; - bool last = (start == this->Internals->Expressions.size() - 1); - if (last && this->Recurse) { - this->RecurseDirectory(start, dir, messages); - return; - } - - if (start >= this->Internals->Expressions.size()) { - return; - } - - kwsys::Directory d; - if (!d.Load(dir)) { - return; - } - unsigned long cc; - std::string realname; - std::string fname; - for (cc = 0; cc < d.GetNumberOfFiles(); cc++) { - fname = d.GetFile(cc); - if (fname == "." || fname == "..") { - continue; - } - - if (start == 0) { - realname = dir + fname; - } else { - realname = dir + "/" + fname; - } - -#if defined(KWSYS_GLOB_CASE_INDEPENDENT) - // On case-insensitive file systems convert to lower case for matching. - fname = kwsys::SystemTools::LowerCase(fname); -#endif - - // std::cout << "Look at file: " << fname << std::endl; - // std::cout << "Match: " - // << this->Internals->TextExpressions[start].c_str() << std::endl; - // std::cout << "Real name: " << realname << std::endl; - - if ((!last && !kwsys::SystemTools::FileIsDirectory(realname)) || - (!this->ListDirs && last && - kwsys::SystemTools::FileIsDirectory(realname))) { - continue; - } - - if (this->Internals->Expressions[start].find(fname)) { - if (last) { - this->AddFile(this->Internals->Files, realname); - } else { - this->ProcessDirectory(start + 1, realname, messages); - } - } - } -} - -bool Glob::FindFiles(const std::string& inexpr, GlobMessages* messages) -{ - std::string cexpr; - std::string::size_type cc; - std::string expr = inexpr; - - this->Internals->Expressions.clear(); - this->Internals->Files.clear(); - - if (!kwsys::SystemTools::FileIsFullPath(expr)) { - expr = kwsys::SystemTools::GetCurrentWorkingDirectory(); - expr += "/" + inexpr; - } - std::string fexpr = expr; - - std::string::size_type skip = 0; - std::string::size_type last_slash = 0; - for (cc = 0; cc < expr.size(); cc++) { - if (cc > 0 && expr[cc] == '/' && expr[cc - 1] != '\\') { - last_slash = cc; - } - if (cc > 0 && (expr[cc] == '[' || expr[cc] == '?' || expr[cc] == '*') && - expr[cc - 1] != '\\') { - break; - } - } - if (last_slash > 0) { - // std::cout << "I can skip: " << fexpr.substr(0, last_slash) - // << std::endl; - skip = last_slash; - } - if (skip == 0) { -#if defined(KWSYS_GLOB_SUPPORT_NETWORK_PATHS) - // Handle network paths - if (expr[0] == '/' && expr[1] == '/') { - int cnt = 0; - for (cc = 2; cc < expr.size(); cc++) { - if (expr[cc] == '/') { - cnt++; - if (cnt == 2) { - break; - } - } - } - skip = int(cc + 1); - } else -#endif - // Handle drive letters on Windows - if (expr[1] == ':' && expr[0] != '/') { - skip = 2; - } - } - - if (skip > 0) { - expr = expr.substr(skip); - } - - cexpr = ""; - for (cc = 0; cc < expr.size(); cc++) { - int ch = expr[cc]; - if (ch == '/') { - if (!cexpr.empty()) { - this->AddExpression(cexpr); - } - cexpr = ""; - } else { - cexpr.append(1, static_cast(ch)); - } - } - if (!cexpr.empty()) { - this->AddExpression(cexpr); - } - - // Handle network paths - if (skip > 0) { - this->ProcessDirectory(0, fexpr.substr(0, skip) + "/", messages); - } else { - this->ProcessDirectory(0, "/", messages); - } - return true; -} - -void Glob::AddExpression(const std::string& expr) -{ - this->Internals->Expressions.push_back( - kwsys::RegularExpression(this->PatternToRegex(expr))); -} - -void Glob::SetRelative(const char* dir) -{ - if (!dir) { - this->Relative = ""; - return; - } - this->Relative = dir; -} - -const char* Glob::GetRelative() -{ - if (this->Relative.empty()) { - return nullptr; - } - return this->Relative.c_str(); -} - -void Glob::AddFile(std::vector& files, const std::string& file) -{ - if (!this->Relative.empty()) { - files.push_back(kwsys::SystemTools::RelativePath(this->Relative, file)); - } else { - files.push_back(file); - } -} - -} // namespace KWSYS_NAMESPACE diff --git a/test/API/driver/kwsys/Glob.hxx.in b/test/API/driver/kwsys/Glob.hxx.in deleted file mode 100644 index 170766f4b1e..00000000000 --- a/test/API/driver/kwsys/Glob.hxx.in +++ /dev/null @@ -1,134 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifndef @KWSYS_NAMESPACE@_Glob_hxx -#define @KWSYS_NAMESPACE@_Glob_hxx - -#include <@KWSYS_NAMESPACE@/Configure.h> -#include <@KWSYS_NAMESPACE@/Configure.hxx> - -#include -#include - -namespace @KWSYS_NAMESPACE@ { - -class GlobInternals; - -/** \class Glob - * \brief Portable globbing searches. - * - * Globbing expressions are much simpler than regular - * expressions. This class will search for files using - * globbing expressions. - * - * Finds all files that match a given globbing expression. - */ -class @KWSYS_NAMESPACE@_EXPORT Glob -{ -public: - enum MessageType - { - error, - cyclicRecursion - }; - - struct Message - { - MessageType type; - std::string content; - - Message(MessageType t, const std::string& c) - : type(t) - , content(c) - { - } - ~Message() = default; - Message(const Message& msg) = default; - Message& operator=(Message const& msg) = default; - }; - - typedef std::vector GlobMessages; - typedef std::vector::iterator GlobMessagesIterator; - -public: - Glob(); - ~Glob(); - - //! Find all files that match the pattern. - bool FindFiles(const std::string& inexpr, GlobMessages* messages = nullptr); - - //! Return the list of files that matched. - std::vector& GetFiles(); - - //! Set recurse to true to match subdirectories. - void RecurseOn() { this->SetRecurse(true); } - void RecurseOff() { this->SetRecurse(false); } - void SetRecurse(bool i) { this->Recurse = i; } - bool GetRecurse() { return this->Recurse; } - - //! Set recurse through symlinks to true if recursion should traverse the - // linked-to directories - void RecurseThroughSymlinksOn() { this->SetRecurseThroughSymlinks(true); } - void RecurseThroughSymlinksOff() { this->SetRecurseThroughSymlinks(false); } - void SetRecurseThroughSymlinks(bool i) { this->RecurseThroughSymlinks = i; } - bool GetRecurseThroughSymlinks() { return this->RecurseThroughSymlinks; } - - //! Get the number of symlinks followed through recursion - unsigned int GetFollowedSymlinkCount() { return this->FollowedSymlinkCount; } - - //! Set relative to true to only show relative path to files. - void SetRelative(const char* dir); - const char* GetRelative(); - - /** Convert the given globbing pattern to a regular expression. - There is no way to quote meta-characters. The - require_whole_string argument specifies whether the regex is - automatically surrounded by "^" and "$" to match the whole - string. This is on by default because patterns always match - whole strings, but may be disabled to support concatenating - expressions more easily (regex1|regex2|etc). */ - static std::string PatternToRegex(const std::string& pattern, - bool require_whole_string = true, - bool preserve_case = false); - - /** Getters and setters for enabling and disabling directory - listing in recursive and non recursive globbing mode. - If listing is enabled in recursive mode it also lists - directory symbolic links even if follow symlinks is enabled. */ - void SetListDirs(bool list) { this->ListDirs = list; } - bool GetListDirs() const { return this->ListDirs; } - void SetRecurseListDirs(bool list) { this->RecurseListDirs = list; } - bool GetRecurseListDirs() const { return this->RecurseListDirs; } - -protected: - //! Process directory - void ProcessDirectory(std::string::size_type start, const std::string& dir, - GlobMessages* messages); - - //! Process last directory, but only when recurse flags is on. That is - // effectively like saying: /path/to/file/**/file - bool RecurseDirectory(std::string::size_type start, const std::string& dir, - GlobMessages* messages); - - //! Add regular expression - void AddExpression(const std::string& expr); - - //! Add a file to the list - void AddFile(std::vector& files, const std::string& file); - - GlobInternals* Internals; - bool Recurse; - std::string Relative; - bool RecurseThroughSymlinks; - unsigned int FollowedSymlinkCount; - std::vector VisitedSymlinks; - bool ListDirs; - bool RecurseListDirs; - -private: - Glob(const Glob&); // Not implemented. - void operator=(const Glob&); // Not implemented. -}; - -} // namespace @KWSYS_NAMESPACE@ - -#endif diff --git a/test/API/driver/kwsys/IOStream.cxx b/test/API/driver/kwsys/IOStream.cxx deleted file mode 100644 index e21f87d4539..00000000000 --- a/test/API/driver/kwsys/IOStream.cxx +++ /dev/null @@ -1,255 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" -#include KWSYS_HEADER(Configure.hxx) - -// Include the streams library. -#include -#include KWSYS_HEADER(IOStream.hxx) - -// Work-around CMake dependency scanning limitation. This must -// duplicate the above list of headers. -#if 0 -# include "Configure.hxx.in" -# include "IOStream.hxx.in" -#endif - -// Implement the rest of this file only if it is needed. -#if KWSYS_IOS_NEED_OPERATORS_LL - -# include // sscanf, sprintf -# include // memchr - -# if defined(_MAX_INT_DIG) -# define KWSYS_IOS_INT64_MAX_DIG _MAX_INT_DIG -# else -# define KWSYS_IOS_INT64_MAX_DIG 32 -# endif - -namespace KWSYS_NAMESPACE { - -// Scan an input stream for an integer value. -static int IOStreamScanStream(std::istream& is, char* buffer) -{ - // Prepare to write to buffer. - char* out = buffer; - char* end = buffer + KWSYS_IOS_INT64_MAX_DIG - 1; - - // Look for leading sign. - if (is.peek() == '+') { - *out++ = '+'; - is.ignore(); - } else if (is.peek() == '-') { - *out++ = '-'; - is.ignore(); - } - - // Determine the base. If not specified in the stream, try to - // detect it from the input. A leading 0x means hex, and a leading - // 0 alone means octal. - int base = 0; - int flags = is.flags() & std::ios_base::basefield; - if (flags == std::ios_base::oct) { - base = 8; - } else if (flags == std::ios_base::dec) { - base = 10; - } else if (flags == std::ios_base::hex) { - base = 16; - } - bool foundDigit = false; - bool foundNonZero = false; - if (is.peek() == '0') { - foundDigit = true; - is.ignore(); - if ((is.peek() == 'x' || is.peek() == 'X') && (base == 0 || base == 16)) { - base = 16; - foundDigit = false; - is.ignore(); - } else if (base == 0) { - base = 8; - } - } - - // Determine the range of digits allowed for this number. - const char* digits = "0123456789abcdefABCDEF"; - int maxDigitIndex = 10; - if (base == 8) { - maxDigitIndex = 8; - } else if (base == 16) { - maxDigitIndex = 10 + 6 + 6; - } - - // Scan until an invalid digit is found. - for (; is.peek() != EOF; is.ignore()) { - if (memchr(digits, *out = (char)is.peek(), maxDigitIndex) != 0) { - if ((foundNonZero || *out != '0') && out < end) { - ++out; - foundNonZero = true; - } - foundDigit = true; - } else { - break; - } - } - - // Correct the buffer contents for degenerate cases. - if (foundDigit && !foundNonZero) { - *out++ = '0'; - } else if (!foundDigit) { - out = buffer; - } - - // Terminate the string in the buffer. - *out = '\0'; - - return base; -} - -// Read an integer value from an input stream. -template -std::istream& IOStreamScanTemplate(std::istream& is, T& value, char type) -{ - int state = std::ios_base::goodbit; - - // Skip leading whitespace. - std::istream::sentry okay(is); - - if (okay) { - try { - // Copy the string to a buffer and construct the format string. - char buffer[KWSYS_IOS_INT64_MAX_DIG]; -# if defined(_MSC_VER) - char format[] = "%I64_"; - const int typeIndex = 4; -# else - char format[] = "%ll_"; - const int typeIndex = 3; -# endif - switch (IOStreamScanStream(is, buffer)) { - case 8: - format[typeIndex] = 'o'; - break; - case 0: // Default to decimal if not told otherwise. - case 10: - format[typeIndex] = type; - break; - case 16: - format[typeIndex] = 'x'; - break; - }; - - // Use sscanf to parse the number from the buffer. - T result; - int success = (sscanf(buffer, format, &result) == 1) ? 1 : 0; - - // Set flags for resulting state. - if (is.peek() == EOF) { - state |= std::ios_base::eofbit; - } - if (!success) { - state |= std::ios_base::failbit; - } else { - value = result; - } - } catch (...) { - state |= std::ios_base::badbit; - } - } - - is.setstate(std::ios_base::iostate(state)); - return is; -} - -// Print an integer value to an output stream. -template -std::ostream& IOStreamPrintTemplate(std::ostream& os, T value, char type) -{ - std::ostream::sentry okay(os); - if (okay) { - try { - // Construct the format string. - char format[8]; - char* f = format; - *f++ = '%'; - if (os.flags() & std::ios_base::showpos) { - *f++ = '+'; - } - if (os.flags() & std::ios_base::showbase) { - *f++ = '#'; - } -# if defined(_MSC_VER) - *f++ = 'I'; - *f++ = '6'; - *f++ = '4'; -# else - *f++ = 'l'; - *f++ = 'l'; -# endif - long bflags = os.flags() & std::ios_base::basefield; - if (bflags == std::ios_base::oct) { - *f++ = 'o'; - } else if (bflags != std::ios_base::hex) { - *f++ = type; - } else if (os.flags() & std::ios_base::uppercase) { - *f++ = 'X'; - } else { - *f++ = 'x'; - } - *f = '\0'; - - // Use sprintf to print to a buffer and then write the - // buffer to the stream. - char buffer[2 * KWSYS_IOS_INT64_MAX_DIG]; - sprintf(buffer, format, value); - os << buffer; - } catch (...) { - os.clear(os.rdstate() | std::ios_base::badbit); - } - } - return os; -} - -# if !KWSYS_IOS_HAS_ISTREAM_LONG_LONG -// Implement input stream operator for IOStreamSLL. -std::istream& IOStreamScan(std::istream& is, IOStreamSLL& value) -{ - return IOStreamScanTemplate(is, value, 'd'); -} - -// Implement input stream operator for IOStreamULL. -std::istream& IOStreamScan(std::istream& is, IOStreamULL& value) -{ - return IOStreamScanTemplate(is, value, 'u'); -} -# endif - -# if !KWSYS_IOS_HAS_OSTREAM_LONG_LONG -// Implement output stream operator for IOStreamSLL. -std::ostream& IOStreamPrint(std::ostream& os, IOStreamSLL value) -{ - return IOStreamPrintTemplate(os, value, 'd'); -} - -// Implement output stream operator for IOStreamULL. -std::ostream& IOStreamPrint(std::ostream& os, IOStreamULL value) -{ - return IOStreamPrintTemplate(os, value, 'u'); -} -# endif - -} // namespace KWSYS_NAMESPACE - -#else - -namespace KWSYS_NAMESPACE { - -// Create one public symbol in this object file to avoid warnings from -// archivers. -void IOStreamSymbolToAvoidWarning(); -void IOStreamSymbolToAvoidWarning() -{ -} - -} // namespace KWSYS_NAMESPACE - -#endif // KWSYS_IOS_NEED_OPERATORS_LL diff --git a/test/API/driver/kwsys/IOStream.hxx.in b/test/API/driver/kwsys/IOStream.hxx.in deleted file mode 100644 index db8a23ef56d..00000000000 --- a/test/API/driver/kwsys/IOStream.hxx.in +++ /dev/null @@ -1,126 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifndef @KWSYS_NAMESPACE@_IOStream_hxx -#define @KWSYS_NAMESPACE@_IOStream_hxx - -#include - -/* Define these macros temporarily to keep the code readable. */ -#if !defined(KWSYS_NAMESPACE) && !@KWSYS_NAMESPACE@_NAME_IS_KWSYS -# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT -#endif - -/* Whether istream supports long long. */ -#define @KWSYS_NAMESPACE@_IOS_HAS_ISTREAM_LONG_LONG \ - @KWSYS_IOS_HAS_ISTREAM_LONG_LONG@ - -/* Whether ostream supports long long. */ -#define @KWSYS_NAMESPACE@_IOS_HAS_OSTREAM_LONG_LONG \ - @KWSYS_IOS_HAS_OSTREAM_LONG_LONG@ - -/* Determine whether we need to define the streaming operators for - long long or __int64. */ -#if @KWSYS_USE_LONG_LONG@ -# if !@KWSYS_NAMESPACE@_IOS_HAS_ISTREAM_LONG_LONG || \ - !@KWSYS_NAMESPACE@_IOS_HAS_OSTREAM_LONG_LONG -# define @KWSYS_NAMESPACE@_IOS_NEED_OPERATORS_LL 1 -namespace @KWSYS_NAMESPACE@ { -typedef long long IOStreamSLL; -typedef unsigned long long IOStreamULL; -} -# endif -#elif defined(_MSC_VER) && _MSC_VER < 1300 -# define @KWSYS_NAMESPACE@_IOS_NEED_OPERATORS_LL 1 -namespace @KWSYS_NAMESPACE@ { -typedef __int64 IOStreamSLL; -typedef unsigned __int64 IOStreamULL; -} -#endif -#if !defined(@KWSYS_NAMESPACE@_IOS_NEED_OPERATORS_LL) -# define @KWSYS_NAMESPACE@_IOS_NEED_OPERATORS_LL 0 -#endif - -#if @KWSYS_NAMESPACE@_IOS_NEED_OPERATORS_LL -# if !@KWSYS_NAMESPACE@_IOS_HAS_ISTREAM_LONG_LONG - -/* Input stream operator implementation functions. */ -namespace @KWSYS_NAMESPACE@ { -kwsysEXPORT std::istream& IOStreamScan(std::istream&, IOStreamSLL&); -kwsysEXPORT std::istream& IOStreamScan(std::istream&, IOStreamULL&); -} - -/* Provide input stream operator for long long. */ -# if !defined(@KWSYS_NAMESPACE@_IOS_NO_ISTREAM_LONG_LONG) && \ - !defined(KWSYS_IOS_ISTREAM_LONG_LONG_DEFINED) -# define KWSYS_IOS_ISTREAM_LONG_LONG_DEFINED -# define @KWSYS_NAMESPACE@_IOS_ISTREAM_LONG_LONG_DEFINED -inline std::istream& operator>>(std::istream& is, - @KWSYS_NAMESPACE@::IOStreamSLL& value) -{ - return @KWSYS_NAMESPACE@::IOStreamScan(is, value); -} -# endif - -/* Provide input stream operator for unsigned long long. */ -# if !defined(@KWSYS_NAMESPACE@_IOS_NO_ISTREAM_UNSIGNED_LONG_LONG) && \ - !defined(KWSYS_IOS_ISTREAM_UNSIGNED_LONG_LONG_DEFINED) -# define KWSYS_IOS_ISTREAM_UNSIGNED_LONG_LONG_DEFINED -# define @KWSYS_NAMESPACE@_IOS_ISTREAM_UNSIGNED_LONG_LONG_DEFINED -inline std::istream& operator>>(std::istream& is, - @KWSYS_NAMESPACE@::IOStreamULL& value) -{ - return @KWSYS_NAMESPACE@::IOStreamScan(is, value); -} -# endif -# endif /* !@KWSYS_NAMESPACE@_IOS_HAS_ISTREAM_LONG_LONG */ - -# if !@KWSYS_NAMESPACE@_IOS_HAS_OSTREAM_LONG_LONG - -/* Output stream operator implementation functions. */ -namespace @KWSYS_NAMESPACE@ { -kwsysEXPORT std::ostream& IOStreamPrint(std::ostream&, IOStreamSLL); -kwsysEXPORT std::ostream& IOStreamPrint(std::ostream&, IOStreamULL); -} - -/* Provide output stream operator for long long. */ -# if !defined(@KWSYS_NAMESPACE@_IOS_NO_OSTREAM_LONG_LONG) && \ - !defined(KWSYS_IOS_OSTREAM_LONG_LONG_DEFINED) -# define KWSYS_IOS_OSTREAM_LONG_LONG_DEFINED -# define @KWSYS_NAMESPACE@_IOS_OSTREAM_LONG_LONG_DEFINED -inline std::ostream& operator<<(std::ostream& os, - @KWSYS_NAMESPACE@::IOStreamSLL value) -{ - return @KWSYS_NAMESPACE@::IOStreamPrint(os, value); -} -# endif - -/* Provide output stream operator for unsigned long long. */ -# if !defined(@KWSYS_NAMESPACE@_IOS_NO_OSTREAM_UNSIGNED_LONG_LONG) && \ - !defined(KWSYS_IOS_OSTREAM_UNSIGNED_LONG_LONG_DEFINED) -# define KWSYS_IOS_OSTREAM_UNSIGNED_LONG_LONG_DEFINED -# define @KWSYS_NAMESPACE@_IOS_OSTREAM_UNSIGNED_LONG_LONG_DEFINED -inline std::ostream& operator<<(std::ostream& os, - @KWSYS_NAMESPACE@::IOStreamULL value) -{ - return @KWSYS_NAMESPACE@::IOStreamPrint(os, value); -} -# endif -# endif /* !@KWSYS_NAMESPACE@_IOS_HAS_OSTREAM_LONG_LONG */ -#endif /* @KWSYS_NAMESPACE@_IOS_NEED_OPERATORS_LL */ - -/* Undefine temporary macros. */ -#if !defined(KWSYS_NAMESPACE) && !@KWSYS_NAMESPACE@_NAME_IS_KWSYS -# undef kwsysEXPORT -#endif - -/* If building a C++ file in kwsys itself, give the source file - access to the macros without a configured namespace. */ -#if defined(KWSYS_NAMESPACE) -# define KWSYS_IOS_HAS_ISTREAM_LONG_LONG \ - @KWSYS_NAMESPACE@_IOS_HAS_ISTREAM_LONG_LONG -# define KWSYS_IOS_HAS_OSTREAM_LONG_LONG \ - @KWSYS_NAMESPACE@_IOS_HAS_OSTREAM_LONG_LONG -# define KWSYS_IOS_NEED_OPERATORS_LL @KWSYS_NAMESPACE@_IOS_NEED_OPERATORS_LL -#endif - -#endif diff --git a/test/API/driver/kwsys/MD5.c b/test/API/driver/kwsys/MD5.c deleted file mode 100644 index 97cf9ba68b1..00000000000 --- a/test/API/driver/kwsys/MD5.c +++ /dev/null @@ -1,494 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" -#include KWSYS_HEADER(MD5.h) - -/* Work-around CMake dependency scanning limitation. This must - duplicate the above list of headers. */ -#if 0 -# include "MD5.h.in" -#endif - -#include /* size_t */ -#include /* malloc, free */ -#include /* memcpy, strlen */ - -/* This MD5 implementation has been taken from a third party. Slight - modifications to the arrangement of the code have been made to put - it in a single source file instead of a separate header and - implementation file. */ - -#if defined(__clang__) && !defined(__INTEL_COMPILER) -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wcast-align" -#endif - -/* - Copyright (C) 1999, 2000, 2002 Aladdin Enterprises. All rights reserved. - - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - L. Peter Deutsch - ghost@aladdin.com - - */ -/* - Independent implementation of MD5 (RFC 1321). - - This code implements the MD5 Algorithm defined in RFC 1321, whose - text is available at - http://www.ietf.org/rfc/rfc1321.txt - The code is derived from the text of the RFC, including the test suite - (section A.5) but excluding the rest of Appendix A. It does not include - any code or documentation that is identified in the RFC as being - copyrighted. - - The original and principal author of md5.c is L. Peter Deutsch - . Other authors are noted in the change history - that follows (in reverse chronological order): - - 2002-04-13 lpd Clarified derivation from RFC 1321; now handles byte order - either statically or dynamically; added missing #include - in library. - 2002-03-11 lpd Corrected argument list for main(), and added int return - type, in test program and T value program. - 2002-02-21 lpd Added missing #include in test program. - 2000-07-03 lpd Patched to eliminate warnings about "constant is - unsigned in ANSI C, signed in traditional"; made test program - self-checking. - 1999-11-04 lpd Edited comments slightly for automatic TOC extraction. - 1999-10-18 lpd Fixed typo in header comment (ansi2knr rather than md5). - 1999-05-03 lpd Original version. - */ - -/* - * This package supports both compile-time and run-time determination of CPU - * byte order. If ARCH_IS_BIG_ENDIAN is defined as 0, the code will be - * compiled to run only on little-endian CPUs; if ARCH_IS_BIG_ENDIAN is - * defined as non-zero, the code will be compiled to run only on big-endian - * CPUs; if ARCH_IS_BIG_ENDIAN is not defined, the code will be compiled to - * run on either big- or little-endian CPUs, but will run slightly less - * efficiently on either one than if ARCH_IS_BIG_ENDIAN is defined. - */ - -typedef unsigned char md5_byte_t; /* 8-bit byte */ -typedef unsigned int md5_word_t; /* 32-bit word */ - -/* Define the state of the MD5 Algorithm. */ -typedef struct md5_state_s -{ - md5_word_t count[2]; /* message length in bits, lsw first */ - md5_word_t abcd[4]; /* digest buffer */ - md5_byte_t buf[64]; /* accumulate block */ -} md5_state_t; - -#undef BYTE_ORDER /* 1 = big-endian, -1 = little-endian, 0 = unknown */ -#ifdef ARCH_IS_BIG_ENDIAN -# define BYTE_ORDER (ARCH_IS_BIG_ENDIAN ? 1 : -1) -#else -# define BYTE_ORDER 0 -#endif - -#define T_MASK ((md5_word_t)~0) -#define T1 /* 0xd76aa478 */ (T_MASK ^ 0x28955b87) -#define T2 /* 0xe8c7b756 */ (T_MASK ^ 0x173848a9) -#define T3 0x242070db -#define T4 /* 0xc1bdceee */ (T_MASK ^ 0x3e423111) -#define T5 /* 0xf57c0faf */ (T_MASK ^ 0x0a83f050) -#define T6 0x4787c62a -#define T7 /* 0xa8304613 */ (T_MASK ^ 0x57cfb9ec) -#define T8 /* 0xfd469501 */ (T_MASK ^ 0x02b96afe) -#define T9 0x698098d8 -#define T10 /* 0x8b44f7af */ (T_MASK ^ 0x74bb0850) -#define T11 /* 0xffff5bb1 */ (T_MASK ^ 0x0000a44e) -#define T12 /* 0x895cd7be */ (T_MASK ^ 0x76a32841) -#define T13 0x6b901122 -#define T14 /* 0xfd987193 */ (T_MASK ^ 0x02678e6c) -#define T15 /* 0xa679438e */ (T_MASK ^ 0x5986bc71) -#define T16 0x49b40821 -#define T17 /* 0xf61e2562 */ (T_MASK ^ 0x09e1da9d) -#define T18 /* 0xc040b340 */ (T_MASK ^ 0x3fbf4cbf) -#define T19 0x265e5a51 -#define T20 /* 0xe9b6c7aa */ (T_MASK ^ 0x16493855) -#define T21 /* 0xd62f105d */ (T_MASK ^ 0x29d0efa2) -#define T22 0x02441453 -#define T23 /* 0xd8a1e681 */ (T_MASK ^ 0x275e197e) -#define T24 /* 0xe7d3fbc8 */ (T_MASK ^ 0x182c0437) -#define T25 0x21e1cde6 -#define T26 /* 0xc33707d6 */ (T_MASK ^ 0x3cc8f829) -#define T27 /* 0xf4d50d87 */ (T_MASK ^ 0x0b2af278) -#define T28 0x455a14ed -#define T29 /* 0xa9e3e905 */ (T_MASK ^ 0x561c16fa) -#define T30 /* 0xfcefa3f8 */ (T_MASK ^ 0x03105c07) -#define T31 0x676f02d9 -#define T32 /* 0x8d2a4c8a */ (T_MASK ^ 0x72d5b375) -#define T33 /* 0xfffa3942 */ (T_MASK ^ 0x0005c6bd) -#define T34 /* 0x8771f681 */ (T_MASK ^ 0x788e097e) -#define T35 0x6d9d6122 -#define T36 /* 0xfde5380c */ (T_MASK ^ 0x021ac7f3) -#define T37 /* 0xa4beea44 */ (T_MASK ^ 0x5b4115bb) -#define T38 0x4bdecfa9 -#define T39 /* 0xf6bb4b60 */ (T_MASK ^ 0x0944b49f) -#define T40 /* 0xbebfbc70 */ (T_MASK ^ 0x4140438f) -#define T41 0x289b7ec6 -#define T42 /* 0xeaa127fa */ (T_MASK ^ 0x155ed805) -#define T43 /* 0xd4ef3085 */ (T_MASK ^ 0x2b10cf7a) -#define T44 0x04881d05 -#define T45 /* 0xd9d4d039 */ (T_MASK ^ 0x262b2fc6) -#define T46 /* 0xe6db99e5 */ (T_MASK ^ 0x1924661a) -#define T47 0x1fa27cf8 -#define T48 /* 0xc4ac5665 */ (T_MASK ^ 0x3b53a99a) -#define T49 /* 0xf4292244 */ (T_MASK ^ 0x0bd6ddbb) -#define T50 0x432aff97 -#define T51 /* 0xab9423a7 */ (T_MASK ^ 0x546bdc58) -#define T52 /* 0xfc93a039 */ (T_MASK ^ 0x036c5fc6) -#define T53 0x655b59c3 -#define T54 /* 0x8f0ccc92 */ (T_MASK ^ 0x70f3336d) -#define T55 /* 0xffeff47d */ (T_MASK ^ 0x00100b82) -#define T56 /* 0x85845dd1 */ (T_MASK ^ 0x7a7ba22e) -#define T57 0x6fa87e4f -#define T58 /* 0xfe2ce6e0 */ (T_MASK ^ 0x01d3191f) -#define T59 /* 0xa3014314 */ (T_MASK ^ 0x5cfebceb) -#define T60 0x4e0811a1 -#define T61 /* 0xf7537e82 */ (T_MASK ^ 0x08ac817d) -#define T62 /* 0xbd3af235 */ (T_MASK ^ 0x42c50dca) -#define T63 0x2ad7d2bb -#define T64 /* 0xeb86d391 */ (T_MASK ^ 0x14792c6e) - -static void md5_process(md5_state_t* pms, const md5_byte_t* data /*[64]*/) -{ - md5_word_t a = pms->abcd[0], b = pms->abcd[1], c = pms->abcd[2], - d = pms->abcd[3]; - md5_word_t t; -#if BYTE_ORDER > 0 - /* Define storage only for big-endian CPUs. */ - md5_word_t X[16]; -#else - /* Define storage for little-endian or both types of CPUs. */ - md5_word_t xbuf[16]; - const md5_word_t* X; -#endif - - { -#if BYTE_ORDER == 0 - /* - * Determine dynamically whether this is a big-endian or - * little-endian machine, since we can use a more efficient - * algorithm on the latter. - */ - static const int w = 1; - - if (*((const md5_byte_t*)&w)) /* dynamic little-endian */ -#endif -#if BYTE_ORDER <= 0 /* little-endian */ - { - /* - * On little-endian machines, we can process properly aligned - * data without copying it. - */ - if (!((data - (const md5_byte_t*)0) & 3)) { - /* data are properly aligned */ - X = (const md5_word_t*)data; - } else { - /* not aligned */ - memcpy(xbuf, data, 64); - X = xbuf; - } - } -#endif -#if BYTE_ORDER == 0 - else /* dynamic big-endian */ -#endif -#if BYTE_ORDER >= 0 /* big-endian */ - { - /* - * On big-endian machines, we must arrange the bytes in the - * right order. - */ - const md5_byte_t* xp = data; - int i; - -# if BYTE_ORDER == 0 - X = xbuf; /* (dynamic only) */ -# else -# define xbuf X /* (static only) */ -# endif - for (i = 0; i < 16; ++i, xp += 4) - xbuf[i] = - (md5_word_t)(xp[0] + (xp[1] << 8) + (xp[2] << 16) + (xp[3] << 24)); - } -#endif - } - -#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32 - (n)))) - -/* Round 1. */ -/* Let [abcd k s i] denote the operation - a = b + ((a + F(b,c,d) + X[k] + T[i]) <<< s). */ -#define F(x, y, z) (((x) & (y)) | (~(x) & (z))) -#define SET(a, b, c, d, k, s, Ti) \ - t = a + F(b, c, d) + X[k] + (Ti); \ - a = ROTATE_LEFT(t, s) + b - /* Do the following 16 operations. */ - SET(a, b, c, d, 0, 7, T1); - SET(d, a, b, c, 1, 12, T2); - SET(c, d, a, b, 2, 17, T3); - SET(b, c, d, a, 3, 22, T4); - SET(a, b, c, d, 4, 7, T5); - SET(d, a, b, c, 5, 12, T6); - SET(c, d, a, b, 6, 17, T7); - SET(b, c, d, a, 7, 22, T8); - SET(a, b, c, d, 8, 7, T9); - SET(d, a, b, c, 9, 12, T10); - SET(c, d, a, b, 10, 17, T11); - SET(b, c, d, a, 11, 22, T12); - SET(a, b, c, d, 12, 7, T13); - SET(d, a, b, c, 13, 12, T14); - SET(c, d, a, b, 14, 17, T15); - SET(b, c, d, a, 15, 22, T16); -#undef SET - -/* Round 2. */ -/* Let [abcd k s i] denote the operation - a = b + ((a + G(b,c,d) + X[k] + T[i]) <<< s). */ -#define G(x, y, z) (((x) & (z)) | ((y) & ~(z))) -#define SET(a, b, c, d, k, s, Ti) \ - t = a + G(b, c, d) + X[k] + (Ti); \ - a = ROTATE_LEFT(t, s) + b - /* Do the following 16 operations. */ - SET(a, b, c, d, 1, 5, T17); - SET(d, a, b, c, 6, 9, T18); - SET(c, d, a, b, 11, 14, T19); - SET(b, c, d, a, 0, 20, T20); - SET(a, b, c, d, 5, 5, T21); - SET(d, a, b, c, 10, 9, T22); - SET(c, d, a, b, 15, 14, T23); - SET(b, c, d, a, 4, 20, T24); - SET(a, b, c, d, 9, 5, T25); - SET(d, a, b, c, 14, 9, T26); - SET(c, d, a, b, 3, 14, T27); - SET(b, c, d, a, 8, 20, T28); - SET(a, b, c, d, 13, 5, T29); - SET(d, a, b, c, 2, 9, T30); - SET(c, d, a, b, 7, 14, T31); - SET(b, c, d, a, 12, 20, T32); -#undef SET - -/* Round 3. */ -/* Let [abcd k s t] denote the operation - a = b + ((a + H(b,c,d) + X[k] + T[i]) <<< s). */ -#define H(x, y, z) ((x) ^ (y) ^ (z)) -#define SET(a, b, c, d, k, s, Ti) \ - t = a + H(b, c, d) + X[k] + (Ti); \ - a = ROTATE_LEFT(t, s) + b - /* Do the following 16 operations. */ - SET(a, b, c, d, 5, 4, T33); - SET(d, a, b, c, 8, 11, T34); - SET(c, d, a, b, 11, 16, T35); - SET(b, c, d, a, 14, 23, T36); - SET(a, b, c, d, 1, 4, T37); - SET(d, a, b, c, 4, 11, T38); - SET(c, d, a, b, 7, 16, T39); - SET(b, c, d, a, 10, 23, T40); - SET(a, b, c, d, 13, 4, T41); - SET(d, a, b, c, 0, 11, T42); - SET(c, d, a, b, 3, 16, T43); - SET(b, c, d, a, 6, 23, T44); - SET(a, b, c, d, 9, 4, T45); - SET(d, a, b, c, 12, 11, T46); - SET(c, d, a, b, 15, 16, T47); - SET(b, c, d, a, 2, 23, T48); -#undef SET - -/* Round 4. */ -/* Let [abcd k s t] denote the operation - a = b + ((a + I(b,c,d) + X[k] + T[i]) <<< s). */ -#define I(x, y, z) ((y) ^ ((x) | ~(z))) -#define SET(a, b, c, d, k, s, Ti) \ - t = a + I(b, c, d) + X[k] + (Ti); \ - a = ROTATE_LEFT(t, s) + b - /* Do the following 16 operations. */ - SET(a, b, c, d, 0, 6, T49); - SET(d, a, b, c, 7, 10, T50); - SET(c, d, a, b, 14, 15, T51); - SET(b, c, d, a, 5, 21, T52); - SET(a, b, c, d, 12, 6, T53); - SET(d, a, b, c, 3, 10, T54); - SET(c, d, a, b, 10, 15, T55); - SET(b, c, d, a, 1, 21, T56); - SET(a, b, c, d, 8, 6, T57); - SET(d, a, b, c, 15, 10, T58); - SET(c, d, a, b, 6, 15, T59); - SET(b, c, d, a, 13, 21, T60); - SET(a, b, c, d, 4, 6, T61); - SET(d, a, b, c, 11, 10, T62); - SET(c, d, a, b, 2, 15, T63); - SET(b, c, d, a, 9, 21, T64); -#undef SET - - /* Then perform the following additions. (That is increment each - of the four registers by the value it had before this block - was started.) */ - pms->abcd[0] += a; - pms->abcd[1] += b; - pms->abcd[2] += c; - pms->abcd[3] += d; -} - -/* Initialize the algorithm. */ -static void md5_init(md5_state_t* pms) -{ - pms->count[0] = pms->count[1] = 0; - pms->abcd[0] = 0x67452301; - pms->abcd[1] = /*0xefcdab89*/ T_MASK ^ 0x10325476; - pms->abcd[2] = /*0x98badcfe*/ T_MASK ^ 0x67452301; - pms->abcd[3] = 0x10325476; -} - -/* Append a string to the message. */ -static void md5_append(md5_state_t* pms, const md5_byte_t* data, size_t nbytes) -{ - const md5_byte_t* p = data; - size_t left = nbytes; - size_t offset = (pms->count[0] >> 3) & 63; - md5_word_t nbits = (md5_word_t)(nbytes << 3); - - if (nbytes <= 0) - return; - - /* Update the message length. */ - pms->count[1] += (md5_word_t)(nbytes >> 29); - pms->count[0] += nbits; - if (pms->count[0] < nbits) - pms->count[1]++; - - /* Process an initial partial block. */ - if (offset) { - size_t copy = (offset + nbytes > 64 ? 64 - offset : nbytes); - - memcpy(pms->buf + offset, p, copy); - if (offset + copy < 64) - return; - p += copy; - left -= copy; - md5_process(pms, pms->buf); - } - - /* Process full blocks. */ - for (; left >= 64; p += 64, left -= 64) - md5_process(pms, p); - - /* Process a final partial block. */ - if (left) - memcpy(pms->buf, p, left); -} - -/* Finish the message and return the digest. */ -static void md5_finish(md5_state_t* pms, md5_byte_t digest[16]) -{ - static const md5_byte_t pad[64] = { 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - md5_byte_t data[8]; - int i; - - /* Save the length before padding. */ - for (i = 0; i < 8; ++i) - data[i] = (md5_byte_t)(pms->count[i >> 2] >> ((i & 3) << 3)); - /* Pad to 56 bytes mod 64. */ - md5_append(pms, pad, ((55 - (pms->count[0] >> 3)) & 63) + 1); - /* Append the length. */ - md5_append(pms, data, 8); - for (i = 0; i < 16; ++i) - digest[i] = (md5_byte_t)(pms->abcd[i >> 2] >> ((i & 3) << 3)); -} - -#if defined(__clang__) && !defined(__INTEL_COMPILER) -# pragma clang diagnostic pop -#endif - -/* Wrap up the MD5 state in our opaque structure. */ -struct kwsysMD5_s -{ - md5_state_t md5_state; -}; - -kwsysMD5* kwsysMD5_New(void) -{ - /* Allocate a process control structure. */ - kwsysMD5* md5 = (kwsysMD5*)malloc(sizeof(kwsysMD5)); - if (!md5) { - return 0; - } - return md5; -} - -void kwsysMD5_Delete(kwsysMD5* md5) -{ - /* Make sure we have an instance. */ - if (!md5) { - return; - } - - /* Free memory. */ - free(md5); -} - -void kwsysMD5_Initialize(kwsysMD5* md5) -{ - md5_init(&md5->md5_state); -} - -void kwsysMD5_Append(kwsysMD5* md5, unsigned char const* data, int length) -{ - size_t dlen; - if (length < 0) { - dlen = strlen((char const*)data); - } else { - dlen = (size_t)length; - } - md5_append(&md5->md5_state, (md5_byte_t const*)data, dlen); -} - -void kwsysMD5_Finalize(kwsysMD5* md5, unsigned char digest[16]) -{ - md5_finish(&md5->md5_state, (md5_byte_t*)digest); -} - -void kwsysMD5_FinalizeHex(kwsysMD5* md5, char buffer[32]) -{ - unsigned char digest[16]; - kwsysMD5_Finalize(md5, digest); - kwsysMD5_DigestToHex(digest, buffer); -} - -void kwsysMD5_DigestToHex(unsigned char const digest[16], char buffer[32]) -{ - /* Map from 4-bit index to hexadecimal representation. */ - static char const hex[16] = { '0', '1', '2', '3', '4', '5', '6', '7', - '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' }; - - /* Map each 4-bit block separately. */ - char* out = buffer; - int i; - for (i = 0; i < 16; ++i) { - *out++ = hex[digest[i] >> 4]; - *out++ = hex[digest[i] & 0xF]; - } -} diff --git a/test/API/driver/kwsys/MD5.h.in b/test/API/driver/kwsys/MD5.h.in deleted file mode 100644 index 7646f1297ab..00000000000 --- a/test/API/driver/kwsys/MD5.h.in +++ /dev/null @@ -1,97 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifndef @KWSYS_NAMESPACE@_MD5_h -#define @KWSYS_NAMESPACE@_MD5_h - -#include <@KWSYS_NAMESPACE@/Configure.h> - -/* Redefine all public interface symbol names to be in the proper - namespace. These macros are used internally to kwsys only, and are - not visible to user code. Use kwsysHeaderDump.pl to reproduce - these macros after making changes to the interface. */ -#if !defined(KWSYS_NAMESPACE) -# define kwsys_ns(x) @KWSYS_NAMESPACE@##x -# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT -#endif -#if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS -# define kwsysMD5 kwsys_ns(MD5) -# define kwsysMD5_s kwsys_ns(MD5_s) -# define kwsysMD5_New kwsys_ns(MD5_New) -# define kwsysMD5_Delete kwsys_ns(MD5_Delete) -# define kwsysMD5_Initialize kwsys_ns(MD5_Initialize) -# define kwsysMD5_Append kwsys_ns(MD5_Append) -# define kwsysMD5_Finalize kwsys_ns(MD5_Finalize) -# define kwsysMD5_FinalizeHex kwsys_ns(MD5_FinalizeHex) -# define kwsysMD5_DigestToHex kwsys_ns(MD5_DigestToHex) -#endif - -#if defined(__cplusplus) -extern "C" { -#endif - -/** - * MD5 state data structure. - */ -typedef struct kwsysMD5_s kwsysMD5; - -/** - * Create a new MD5 instance. The returned instance is not initialized. - */ -kwsysEXPORT kwsysMD5* kwsysMD5_New(void); - -/** - * Delete an old MD5 instance. - */ -kwsysEXPORT void kwsysMD5_Delete(kwsysMD5* md5); - -/** - * Initialize a new MD5 digest. - */ -kwsysEXPORT void kwsysMD5_Initialize(kwsysMD5* md5); - -/** - * Append data to an MD5 digest. If the given length is negative, - * data will be read up to but not including a terminating null. - */ -kwsysEXPORT void kwsysMD5_Append(kwsysMD5* md5, unsigned char const* data, - int length); - -/** - * Finalize a MD5 digest and get the 16-byte hash value. - */ -kwsysEXPORT void kwsysMD5_Finalize(kwsysMD5* md5, unsigned char digest[16]); - -/** - * Finalize a MD5 digest and get the 32-bit hexadecimal representation. - */ -kwsysEXPORT void kwsysMD5_FinalizeHex(kwsysMD5* md5, char buffer[32]); - -/** - * Convert a MD5 digest 16-byte value to a 32-byte hexadecimal representation. - */ -kwsysEXPORT void kwsysMD5_DigestToHex(unsigned char const digest[16], - char buffer[32]); - -#if defined(__cplusplus) -} /* extern "C" */ -#endif - -/* If we are building a kwsys .c or .cxx file, let it use these macros. - Otherwise, undefine them to keep the namespace clean. */ -#if !defined(KWSYS_NAMESPACE) -# undef kwsys_ns -# undef kwsysEXPORT -# if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS -# undef kwsysMD5 -# undef kwsysMD5_s -# undef kwsysMD5_New -# undef kwsysMD5_Delete -# undef kwsysMD5_Initialize -# undef kwsysMD5_Append -# undef kwsysMD5_Finalize -# undef kwsysMD5_FinalizeHex -# undef kwsysMD5_DigestToHex -# endif -#endif - -#endif diff --git a/test/API/driver/kwsys/Process.h.in b/test/API/driver/kwsys/Process.h.in deleted file mode 100644 index 73ea9dbfcd9..00000000000 --- a/test/API/driver/kwsys/Process.h.in +++ /dev/null @@ -1,544 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifndef @KWSYS_NAMESPACE@_Process_h -#define @KWSYS_NAMESPACE@_Process_h - -#include <@KWSYS_NAMESPACE@/Configure.h> - -/* Redefine all public interface symbol names to be in the proper - namespace. These macros are used internally to kwsys only, and are - not visible to user code. Use kwsysHeaderDump.pl to reproduce - these macros after making changes to the interface. */ -#if !defined(KWSYS_NAMESPACE) -# define kwsys_ns(x) @KWSYS_NAMESPACE@##x -# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT -#endif -#if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS -# define kwsysProcess kwsys_ns(Process) -# define kwsysProcess_s kwsys_ns(Process_s) -# define kwsysProcess_New kwsys_ns(Process_New) -# define kwsysProcess_Delete kwsys_ns(Process_Delete) -# define kwsysProcess_SetCommand kwsys_ns(Process_SetCommand) -# define kwsysProcess_AddCommand kwsys_ns(Process_AddCommand) -# define kwsysProcess_SetTimeout kwsys_ns(Process_SetTimeout) -# define kwsysProcess_SetWorkingDirectory \ - kwsys_ns(Process_SetWorkingDirectory) -# define kwsysProcess_SetPipeFile kwsys_ns(Process_SetPipeFile) -# define kwsysProcess_SetPipeNative kwsys_ns(Process_SetPipeNative) -# define kwsysProcess_SetPipeShared kwsys_ns(Process_SetPipeShared) -# define kwsysProcess_Option_Detach kwsys_ns(Process_Option_Detach) -# define kwsysProcess_Option_HideWindow kwsys_ns(Process_Option_HideWindow) -# define kwsysProcess_Option_MergeOutput kwsys_ns(Process_Option_MergeOutput) -# define kwsysProcess_Option_Verbatim kwsys_ns(Process_Option_Verbatim) -# define kwsysProcess_Option_CreateProcessGroup \ - kwsys_ns(Process_Option_CreateProcessGroup) -# define kwsysProcess_GetOption kwsys_ns(Process_GetOption) -# define kwsysProcess_SetOption kwsys_ns(Process_SetOption) -# define kwsysProcess_Option_e kwsys_ns(Process_Option_e) -# define kwsysProcess_State_Starting kwsys_ns(Process_State_Starting) -# define kwsysProcess_State_Error kwsys_ns(Process_State_Error) -# define kwsysProcess_State_Exception kwsys_ns(Process_State_Exception) -# define kwsysProcess_State_Executing kwsys_ns(Process_State_Executing) -# define kwsysProcess_State_Exited kwsys_ns(Process_State_Exited) -# define kwsysProcess_State_Expired kwsys_ns(Process_State_Expired) -# define kwsysProcess_State_Killed kwsys_ns(Process_State_Killed) -# define kwsysProcess_State_Disowned kwsys_ns(Process_State_Disowned) -# define kwsysProcess_State_e kwsys_ns(Process_State_e) -# define kwsysProcess_Exception_None kwsys_ns(Process_Exception_None) -# define kwsysProcess_Exception_Fault kwsys_ns(Process_Exception_Fault) -# define kwsysProcess_Exception_Illegal kwsys_ns(Process_Exception_Illegal) -# define kwsysProcess_Exception_Interrupt \ - kwsys_ns(Process_Exception_Interrupt) -# define kwsysProcess_Exception_Numerical \ - kwsys_ns(Process_Exception_Numerical) -# define kwsysProcess_Exception_Other kwsys_ns(Process_Exception_Other) -# define kwsysProcess_Exception_e kwsys_ns(Process_Exception_e) -# define kwsysProcess_GetState kwsys_ns(Process_GetState) -# define kwsysProcess_GetExitException kwsys_ns(Process_GetExitException) -# define kwsysProcess_GetExitCode kwsys_ns(Process_GetExitCode) -# define kwsysProcess_GetExitValue kwsys_ns(Process_GetExitValue) -# define kwsysProcess_GetErrorString kwsys_ns(Process_GetErrorString) -# define kwsysProcess_GetExceptionString kwsys_ns(Process_GetExceptionString) -# define kwsysProcess_GetStateByIndex kwsys_ns(Process_GetStateByIndex) -# define kwsysProcess_GetExitExceptionByIndex \ - kwsys_ns(Process_GetExitExceptionByIndex) -# define kwsysProcess_GetExitCodeByIndex kwsys_ns(Process_GetExitCodeByIndex) -# define kwsysProcess_GetExitValueByIndex \ - kwsys_ns(Process_GetExitValueByIndex) -# define kwsysProcess_GetExceptionStringByIndex \ - kwsys_ns(Process_GetExceptionStringByIndex) -# define kwsysProcess_GetExitCodeByIndex kwsys_ns(Process_GetExitCodeByIndex) -# define kwsysProcess_Execute kwsys_ns(Process_Execute) -# define kwsysProcess_Disown kwsys_ns(Process_Disown) -# define kwsysProcess_WaitForData kwsys_ns(Process_WaitForData) -# define kwsysProcess_Pipes_e kwsys_ns(Process_Pipes_e) -# define kwsysProcess_Pipe_None kwsys_ns(Process_Pipe_None) -# define kwsysProcess_Pipe_STDIN kwsys_ns(Process_Pipe_STDIN) -# define kwsysProcess_Pipe_STDOUT kwsys_ns(Process_Pipe_STDOUT) -# define kwsysProcess_Pipe_STDERR kwsys_ns(Process_Pipe_STDERR) -# define kwsysProcess_Pipe_Timeout kwsys_ns(Process_Pipe_Timeout) -# define kwsysProcess_Pipe_Handle kwsys_ns(Process_Pipe_Handle) -# define kwsysProcess_WaitForExit kwsys_ns(Process_WaitForExit) -# define kwsysProcess_Interrupt kwsys_ns(Process_Interrupt) -# define kwsysProcess_Kill kwsys_ns(Process_Kill) -# define kwsysProcess_KillPID kwsys_ns(Process_KillPID) -# define kwsysProcess_ResetStartTime kwsys_ns(Process_ResetStartTime) -#endif - -#if defined(__cplusplus) -extern "C" { -#endif - -/** - * Process control data structure. - */ -typedef struct kwsysProcess_s kwsysProcess; - -/* Platform-specific pipe handle type. */ -#if defined(_WIN32) && !defined(__CYGWIN__) -typedef void* kwsysProcess_Pipe_Handle; -#else -typedef int kwsysProcess_Pipe_Handle; -#endif - -/** - * Create a new Process instance. - */ -kwsysEXPORT kwsysProcess* kwsysProcess_New(void); - -/** - * Delete an existing Process instance. If the instance is currently - * executing a process, this blocks until the process terminates. - */ -kwsysEXPORT void kwsysProcess_Delete(kwsysProcess* cp); - -/** - * Set the command line to be executed. Argument is an array of - * pointers to the command and each argument. The array must end with - * a NULL pointer. Any previous command lines are removed. Returns - * 1 for success and 0 otherwise. - */ -kwsysEXPORT int kwsysProcess_SetCommand(kwsysProcess* cp, - char const* const* command); - -/** - * Add a command line to be executed. Argument is an array of - * pointers to the command and each argument. The array must end with - * a NULL pointer. If this is not the first command added, its - * standard input will be connected to the standard output of the - * previous command. Returns 1 for success and 0 otherwise. - */ -kwsysEXPORT int kwsysProcess_AddCommand(kwsysProcess* cp, - char const* const* command); - -/** - * Set the timeout in seconds for the child process. The timeout - * period begins when the child is executed. If the child has not - * terminated when the timeout expires, it will be killed. A - * non-positive (<= 0) value will disable the timeout. - */ -kwsysEXPORT void kwsysProcess_SetTimeout(kwsysProcess* cp, double timeout); - -/** - * Set the working directory for the child process. The working - * directory can be absolute or relative to the current directory. - * Returns 1 for success and 0 for failure. - */ -kwsysEXPORT int kwsysProcess_SetWorkingDirectory(kwsysProcess* cp, - const char* dir); - -/** - * Set the name of a file to be attached to the given pipe. Returns 1 - * for success and 0 for failure. - */ -kwsysEXPORT int kwsysProcess_SetPipeFile(kwsysProcess* cp, int pipe, - const char* file); - -/** - * Set whether the given pipe in the child is shared with the parent - * process. The default is no for Pipe_STDOUT and Pipe_STDERR and yes - * for Pipe_STDIN. - */ -kwsysEXPORT void kwsysProcess_SetPipeShared(kwsysProcess* cp, int pipe, - int shared); - -/** - * Specify a platform-specific native pipe for use as one of the child - * interface pipes. The native pipe is specified by an array of two - * descriptors or handles. The first entry in the array (index 0) - * should be the read end of the pipe. The second entry in the array - * (index 1) should be the write end of the pipe. If a null pointer - * is given the option will be disabled. - * - * For Pipe_STDIN the native pipe is connected to the first child in - * the pipeline as its stdin. After the children are created the - * write end of the pipe will be closed in the child process and the - * read end will be closed in the parent process. - * - * For Pipe_STDOUT and Pipe_STDERR the pipe is connected to the last - * child as its stdout or stderr. After the children are created the - * write end of the pipe will be closed in the parent process and the - * read end will be closed in the child process. - */ -kwsysEXPORT void kwsysProcess_SetPipeNative(kwsysProcess* cp, int pipe, - kwsysProcess_Pipe_Handle p[2]); - -/** - * Get/Set a possibly platform-specific option. Possible options are: - * - * kwsysProcess_Option_Detach = Whether to detach the process. - * 0 = No (default) - * 1 = Yes - * - * kwsysProcess_Option_HideWindow = Whether to hide window on Windows. - * 0 = No (default) - * 1 = Yes - * - * kwsysProcess_Option_MergeOutput = Whether to merge stdout/stderr. - * No content will be returned as stderr. - * Any actual stderr will be on stdout. - * 0 = No (default) - * 1 = Yes - * - * kwsysProcess_Option_Verbatim = Whether SetCommand and AddCommand - * should treat the first argument - * as a verbatim command line - * and ignore the rest of the arguments. - * 0 = No (default) - * 1 = Yes - * - * kwsysProcess_Option_CreateProcessGroup = Whether to place the process in a - * new process group. This is - * useful if you want to send Ctrl+C - * to the process. On UNIX, also - * places the process in a new - * session. - * 0 = No (default) - * 1 = Yes - */ -kwsysEXPORT int kwsysProcess_GetOption(kwsysProcess* cp, int optionId); -kwsysEXPORT void kwsysProcess_SetOption(kwsysProcess* cp, int optionId, - int value); -enum kwsysProcess_Option_e -{ - kwsysProcess_Option_HideWindow, - kwsysProcess_Option_Detach, - kwsysProcess_Option_MergeOutput, - kwsysProcess_Option_Verbatim, - kwsysProcess_Option_CreateProcessGroup -}; - -/** - * Get the current state of the Process instance. Possible states are: - * - * kwsysProcess_State_Starting = Execute has not yet been called. - * kwsysProcess_State_Error = Error administrating the child process. - * kwsysProcess_State_Exception = Child process exited abnormally. - * kwsysProcess_State_Executing = Child process is currently running. - * kwsysProcess_State_Exited = Child process exited normally. - * kwsysProcess_State_Expired = Child process's timeout expired. - * kwsysProcess_State_Killed = Child process terminated by Kill method. - * kwsysProcess_State_Disowned = Child is no longer managed by this object. - */ -kwsysEXPORT int kwsysProcess_GetState(kwsysProcess* cp); -enum kwsysProcess_State_e -{ - kwsysProcess_State_Starting, - kwsysProcess_State_Error, - kwsysProcess_State_Exception, - kwsysProcess_State_Executing, - kwsysProcess_State_Exited, - kwsysProcess_State_Expired, - kwsysProcess_State_Killed, - kwsysProcess_State_Disowned -}; - -/** - * When GetState returns "Exception", this method returns a - * platform-independent description of the exceptional behavior that - * caused the child to terminate abnormally. Possible exceptions are: - * - * kwsysProcess_Exception_None = No exceptional behavior occurred. - * kwsysProcess_Exception_Fault = Child crashed with a memory fault. - * kwsysProcess_Exception_Illegal = Child crashed with an illegal - * instruction. - * kwsysProcess_Exception_Interrupt = Child was interrupted by user - * (Cntl-C/Break). - * kwsysProcess_Exception_Numerical = Child crashed with a numerical - * exception. - * kwsysProcess_Exception_Other = Child terminated for another reason. - */ -kwsysEXPORT int kwsysProcess_GetExitException(kwsysProcess* cp); -enum kwsysProcess_Exception_e -{ - kwsysProcess_Exception_None, - kwsysProcess_Exception_Fault, - kwsysProcess_Exception_Illegal, - kwsysProcess_Exception_Interrupt, - kwsysProcess_Exception_Numerical, - kwsysProcess_Exception_Other -}; - -/** - * When GetState returns "Exited" or "Exception", this method returns - * the platform-specific raw exit code of the process. UNIX platforms - * should use WIFEXITED/WEXITSTATUS and WIFSIGNALED/WTERMSIG to access - * this value. Windows users should compare the value to the various - * EXCEPTION_* values. - * - * If GetState returns "Exited", use GetExitValue to get the - * platform-independent child return value. - */ -kwsysEXPORT int kwsysProcess_GetExitCode(kwsysProcess* cp); - -/** - * When GetState returns "Exited", this method returns the child's - * platform-independent exit code (such as the value returned by the - * child's main). - */ -kwsysEXPORT int kwsysProcess_GetExitValue(kwsysProcess* cp); - -/** - * When GetState returns "Error", this method returns a string - * describing the problem. Otherwise, it returns NULL. - */ -kwsysEXPORT const char* kwsysProcess_GetErrorString(kwsysProcess* cp); - -/** - * When GetState returns "Exception", this method returns a string - * describing the problem. Otherwise, it returns NULL. - */ -kwsysEXPORT const char* kwsysProcess_GetExceptionString(kwsysProcess* cp); - -/** - * Get the current state of the Process instance. Possible states are: - * - * kwsysProcess_StateByIndex_Starting = Execute has not yet been called. - * kwsysProcess_StateByIndex_Exception = Child process exited abnormally. - * kwsysProcess_StateByIndex_Exited = Child process exited normally. - * kwsysProcess_StateByIndex_Error = Error getting the child return code. - */ -kwsysEXPORT int kwsysProcess_GetStateByIndex(kwsysProcess* cp, int idx); -enum kwsysProcess_StateByIndex_e -{ - kwsysProcess_StateByIndex_Starting = kwsysProcess_State_Starting, - kwsysProcess_StateByIndex_Exception = kwsysProcess_State_Exception, - kwsysProcess_StateByIndex_Exited = kwsysProcess_State_Exited, - kwsysProcess_StateByIndex_Error = kwsysProcess_State_Error -}; - -/** - * When GetState returns "Exception", this method returns a - * platform-independent description of the exceptional behavior that - * caused the child to terminate abnormally. Possible exceptions are: - * - * kwsysProcess_Exception_None = No exceptional behavior occurred. - * kwsysProcess_Exception_Fault = Child crashed with a memory fault. - * kwsysProcess_Exception_Illegal = Child crashed with an illegal - * instruction. - * kwsysProcess_Exception_Interrupt = Child was interrupted by user - * (Cntl-C/Break). - * kwsysProcess_Exception_Numerical = Child crashed with a numerical - * exception. - * kwsysProcess_Exception_Other = Child terminated for another reason. - */ -kwsysEXPORT int kwsysProcess_GetExitExceptionByIndex(kwsysProcess* cp, - int idx); - -/** - * When GetState returns "Exited" or "Exception", this method returns - * the platform-specific raw exit code of the process. UNIX platforms - * should use WIFEXITED/WEXITSTATUS and WIFSIGNALED/WTERMSIG to access - * this value. Windows users should compare the value to the various - * EXCEPTION_* values. - * - * If GetState returns "Exited", use GetExitValue to get the - * platform-independent child return value. - */ -kwsysEXPORT int kwsysProcess_GetExitCodeByIndex(kwsysProcess* cp, int idx); - -/** - * When GetState returns "Exited", this method returns the child's - * platform-independent exit code (such as the value returned by the - * child's main). - */ -kwsysEXPORT int kwsysProcess_GetExitValueByIndex(kwsysProcess* cp, int idx); - -/** - * When GetState returns "Exception", this method returns a string - * describing the problem. Otherwise, it returns NULL. - */ -kwsysEXPORT const char* kwsysProcess_GetExceptionStringByIndex( - kwsysProcess* cp, int idx); - -/** - * Start executing the child process. - */ -kwsysEXPORT void kwsysProcess_Execute(kwsysProcess* cp); - -/** - * Stop management of a detached child process. This closes any pipes - * being read. If the child was not created with the - * kwsysProcess_Option_Detach option, this method does nothing. This - * is because disowning a non-detached process will cause the child - * exit signal to be left unhandled until this process exits. - */ -kwsysEXPORT void kwsysProcess_Disown(kwsysProcess* cp); - -/** - * Block until data are available on a pipe, a timeout expires, or the - * child process terminates. Arguments are as follows: - * - * data = If data are read, the pointer to which this points is - * set to point to the data. - * length = If data are read, the integer to which this points is - * set to the length of the data read. - * timeout = Specifies the maximum time this call may block. Upon - * return after reading data, the time elapsed is subtracted - * from the timeout value. If this timeout expires, the - * value is set to 0. A NULL pointer passed for this argument - * indicates no timeout for the call. A negative or zero - * value passed for this argument may be used for polling - * and will always return immediately. - * - * Return value will be one of: - * - * Pipe_None = No more data will be available from the child process, - * ( == 0) or no process has been executed. WaitForExit should - * be called to wait for the process to terminate. - * Pipe_STDOUT = Data have been read from the child's stdout pipe. - * Pipe_STDERR = Data have been read from the child's stderr pipe. - * Pipe_Timeout = No data available within timeout specified for the - * call. Time elapsed has been subtracted from timeout - * argument. - */ -kwsysEXPORT int kwsysProcess_WaitForData(kwsysProcess* cp, char** data, - int* length, double* timeout); -enum kwsysProcess_Pipes_e -{ - kwsysProcess_Pipe_None, - kwsysProcess_Pipe_STDIN, - kwsysProcess_Pipe_STDOUT, - kwsysProcess_Pipe_STDERR, - kwsysProcess_Pipe_Timeout = 255 -}; - -/** - * Block until the child process terminates or the given timeout - * expires. If no process is running, returns immediately. The - * argument is: - * - * timeout = Specifies the maximum time this call may block. Upon - * returning due to child termination, the elapsed time - * is subtracted from the given value. A NULL pointer - * passed for this argument indicates no timeout for the - * call. - * - * Return value will be one of: - * - * 0 = Child did not terminate within timeout specified for - * the call. Time elapsed has been subtracted from timeout - * argument. - * 1 = Child has terminated or was not running. - */ -kwsysEXPORT int kwsysProcess_WaitForExit(kwsysProcess* cp, double* timeout); - -/** - * Interrupt the process group for the child process that is currently - * running by sending it the appropriate operating-system specific signal. - * The caller should call WaitForExit after this returns to wait for the - * child to terminate. - * - * WARNING: If you didn't specify kwsysProcess_Option_CreateProcessGroup, - * you will interrupt your own process group. - */ -kwsysEXPORT void kwsysProcess_Interrupt(kwsysProcess* cp); - -/** - * Forcefully terminate the child process that is currently running. - * The caller should call WaitForExit after this returns to wait for - * the child to terminate. - */ -kwsysEXPORT void kwsysProcess_Kill(kwsysProcess* cp); - -/** - * Same as kwsysProcess_Kill using process ID to locate process to - * terminate. - * @see kwsysProcess_Kill(kwsysProcess* cp) - */ -kwsysEXPORT void kwsysProcess_KillPID(unsigned long); - -/** - * Reset the start time of the child process to the current time. - */ -kwsysEXPORT void kwsysProcess_ResetStartTime(kwsysProcess* cp); - -#if defined(__cplusplus) -} /* extern "C" */ -#endif - -/* If we are building a kwsys .c or .cxx file, let it use these macros. - Otherwise, undefine them to keep the namespace clean. */ -#if !defined(KWSYS_NAMESPACE) -# undef kwsys_ns -# undef kwsysEXPORT -# if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS -# undef kwsysProcess -# undef kwsysProcess_s -# undef kwsysProcess_New -# undef kwsysProcess_Delete -# undef kwsysProcess_SetCommand -# undef kwsysProcess_AddCommand -# undef kwsysProcess_SetTimeout -# undef kwsysProcess_SetWorkingDirectory -# undef kwsysProcess_SetPipeFile -# undef kwsysProcess_SetPipeNative -# undef kwsysProcess_SetPipeShared -# undef kwsysProcess_Option_Detach -# undef kwsysProcess_Option_HideWindow -# undef kwsysProcess_Option_MergeOutput -# undef kwsysProcess_Option_Verbatim -# undef kwsysProcess_Option_CreateProcessGroup -# undef kwsysProcess_GetOption -# undef kwsysProcess_SetOption -# undef kwsysProcess_Option_e -# undef kwsysProcess_State_Starting -# undef kwsysProcess_State_Error -# undef kwsysProcess_State_Exception -# undef kwsysProcess_State_Executing -# undef kwsysProcess_State_Exited -# undef kwsysProcess_State_Expired -# undef kwsysProcess_State_Killed -# undef kwsysProcess_State_Disowned -# undef kwsysProcess_GetState -# undef kwsysProcess_State_e -# undef kwsysProcess_Exception_None -# undef kwsysProcess_Exception_Fault -# undef kwsysProcess_Exception_Illegal -# undef kwsysProcess_Exception_Interrupt -# undef kwsysProcess_Exception_Numerical -# undef kwsysProcess_Exception_Other -# undef kwsysProcess_GetExitException -# undef kwsysProcess_Exception_e -# undef kwsysProcess_GetExitCode -# undef kwsysProcess_GetExitValue -# undef kwsysProcess_GetErrorString -# undef kwsysProcess_GetExceptionString -# undef kwsysProcess_Execute -# undef kwsysProcess_Disown -# undef kwsysProcess_WaitForData -# undef kwsysProcess_Pipes_e -# undef kwsysProcess_Pipe_None -# undef kwsysProcess_Pipe_STDIN -# undef kwsysProcess_Pipe_STDOUT -# undef kwsysProcess_Pipe_STDERR -# undef kwsysProcess_Pipe_Timeout -# undef kwsysProcess_Pipe_Handle -# undef kwsysProcess_WaitForExit -# undef kwsysProcess_Interrupt -# undef kwsysProcess_Kill -# undef kwsysProcess_ResetStartTime -# endif -#endif - -#endif diff --git a/test/API/driver/kwsys/ProcessUNIX.c b/test/API/driver/kwsys/ProcessUNIX.c deleted file mode 100644 index 100eddcde7e..00000000000 --- a/test/API/driver/kwsys/ProcessUNIX.c +++ /dev/null @@ -1,2920 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" -#include KWSYS_HEADER(Process.h) -#include KWSYS_HEADER(System.h) - -/* Work-around CMake dependency scanning limitation. This must - duplicate the above list of headers. */ -#if 0 -# include "Process.h.in" -# include "System.h.in" -#endif - -/* - -Implementation for UNIX - -On UNIX, a child process is forked to exec the program. Three output -pipes are read by the parent process using a select call to block -until data are ready. Two of the pipes are stdout and stderr for the -child. The third is a special pipe populated by a signal handler to -indicate that a child has terminated. This is used in conjunction -with the timeout on the select call to implement a timeout for program -even when it closes stdout and stderr and at the same time avoiding -races. - -*/ - -/* - -TODO: - -We cannot create the pipeline of processes in suspended states. How -do we cleanup processes already started when one fails to load? Right -now we are just killing them, which is probably not the right thing to -do. - -*/ - -#if defined(__CYGWIN__) -/* Increase the file descriptor limit for select() before including - related system headers. (Default: 64) */ -# define FD_SETSIZE 16384 -#endif - -#include /* assert */ -#include /* isspace */ -#include /* DIR, dirent */ -#include /* errno */ -#include /* fcntl */ -#include /* sigaction */ -#include /* ptrdiff_t */ -#include /* snprintf */ -#include /* malloc, free */ -#include /* strdup, strerror, memset */ -#include /* open mode */ -#include /* struct timeval */ -#include /* pid_t, fd_set */ -#include /* waitpid */ -#include /* gettimeofday */ -#include /* pipe, close, fork, execvp, select, _exit */ - -#if defined(__VMS) -# define KWSYSPE_VMS_NONBLOCK , O_NONBLOCK -#else -# define KWSYSPE_VMS_NONBLOCK -#endif - -#if defined(KWSYS_C_HAS_PTRDIFF_T) && KWSYS_C_HAS_PTRDIFF_T -typedef ptrdiff_t kwsysProcess_ptrdiff_t; -#else -typedef int kwsysProcess_ptrdiff_t; -#endif - -#if defined(KWSYS_C_HAS_SSIZE_T) && KWSYS_C_HAS_SSIZE_T -typedef ssize_t kwsysProcess_ssize_t; -#else -typedef int kwsysProcess_ssize_t; -#endif - -#if defined(__BEOS__) && !defined(__ZETA__) -/* BeOS 5 doesn't have usleep(), but it has snooze(), which is identical. */ -# include -static inline void kwsysProcess_usleep(unsigned int msec) -{ - snooze(msec); -} -#else -# define kwsysProcess_usleep usleep -#endif - -/* - * BeOS's select() works like WinSock: it's for networking only, and - * doesn't work with Unix file handles...socket and file handles are - * different namespaces (the same descriptor means different things in - * each context!) - * - * So on Unix-like systems where select() is flakey, we'll set the - * pipes' file handles to be non-blocking and just poll them directly - * without select(). - */ -#if !defined(__BEOS__) && !defined(__VMS) && !defined(__MINT__) && \ - !defined(KWSYSPE_USE_SELECT) -# define KWSYSPE_USE_SELECT 1 -#endif - -/* Some platforms do not have siginfo on their signal handlers. */ -#if defined(SA_SIGINFO) && !defined(__BEOS__) -# define KWSYSPE_USE_SIGINFO 1 -#endif - -/* The number of pipes for the child's output. The standard stdout - and stderr pipes are the first two. One more pipe is used to - detect when the child process has terminated. The third pipe is - not given to the child process, so it cannot close it until it - terminates. */ -#define KWSYSPE_PIPE_COUNT 3 -#define KWSYSPE_PIPE_STDOUT 0 -#define KWSYSPE_PIPE_STDERR 1 -#define KWSYSPE_PIPE_SIGNAL 2 - -/* The maximum amount to read from a pipe at a time. */ -#define KWSYSPE_PIPE_BUFFER_SIZE 1024 - -/* Keep track of times using a signed representation. Switch to the - native (possibly unsigned) representation only when calling native - functions. */ -typedef struct timeval kwsysProcessTimeNative; -typedef struct kwsysProcessTime_s kwsysProcessTime; -struct kwsysProcessTime_s -{ - long tv_sec; - long tv_usec; -}; - -typedef struct kwsysProcessCreateInformation_s -{ - int StdIn; - int StdOut; - int StdErr; - int ErrorPipe[2]; -} kwsysProcessCreateInformation; - -static void kwsysProcessVolatileFree(volatile void* p); -static int kwsysProcessInitialize(kwsysProcess* cp); -static void kwsysProcessCleanup(kwsysProcess* cp, int error); -static void kwsysProcessCleanupDescriptor(int* pfd); -static void kwsysProcessClosePipes(kwsysProcess* cp); -static int kwsysProcessSetNonBlocking(int fd); -static int kwsysProcessCreate(kwsysProcess* cp, int prIndex, - kwsysProcessCreateInformation* si); -static void kwsysProcessDestroy(kwsysProcess* cp); -static int kwsysProcessSetupOutputPipeFile(int* p, const char* name); -static int kwsysProcessSetupOutputPipeNative(int* p, int des[2]); -static int kwsysProcessGetTimeoutTime(kwsysProcess* cp, double* userTimeout, - kwsysProcessTime* timeoutTime); -static int kwsysProcessGetTimeoutLeft(kwsysProcessTime* timeoutTime, - double* userTimeout, - kwsysProcessTimeNative* timeoutLength, - int zeroIsExpired); -static kwsysProcessTime kwsysProcessTimeGetCurrent(void); -static double kwsysProcessTimeToDouble(kwsysProcessTime t); -static kwsysProcessTime kwsysProcessTimeFromDouble(double d); -static int kwsysProcessTimeLess(kwsysProcessTime in1, kwsysProcessTime in2); -static kwsysProcessTime kwsysProcessTimeAdd(kwsysProcessTime in1, - kwsysProcessTime in2); -static kwsysProcessTime kwsysProcessTimeSubtract(kwsysProcessTime in1, - kwsysProcessTime in2); -static void kwsysProcessSetExitExceptionByIndex(kwsysProcess* cp, int sig, - int idx); -static void kwsysProcessChildErrorExit(int errorPipe); -static void kwsysProcessRestoreDefaultSignalHandlers(void); -static pid_t kwsysProcessFork(kwsysProcess* cp, - kwsysProcessCreateInformation* si); -static void kwsysProcessKill(pid_t process_id); -#if defined(__VMS) -static int kwsysProcessSetVMSFeature(const char* name, int value); -#endif -static int kwsysProcessesAdd(kwsysProcess* cp); -static void kwsysProcessesRemove(kwsysProcess* cp); -#if KWSYSPE_USE_SIGINFO -static void kwsysProcessesSignalHandler(int signum, siginfo_t* info, - void* ucontext); -#else -static void kwsysProcessesSignalHandler(int signum); -#endif - -/* A structure containing results data for each process. */ -typedef struct kwsysProcessResults_s kwsysProcessResults; -struct kwsysProcessResults_s -{ - /* The status of the child process. */ - int State; - - /* The exceptional behavior that terminated the process, if any. */ - int ExitException; - - /* The process exit code. */ - int ExitCode; - - /* The process return code, if any. */ - int ExitValue; - - /* Description for the ExitException. */ - char ExitExceptionString[KWSYSPE_PIPE_BUFFER_SIZE + 1]; -}; - -/* Structure containing data used to implement the child's execution. */ -struct kwsysProcess_s -{ - /* The command lines to execute. */ - char*** Commands; - volatile int NumberOfCommands; - - /* Descriptors for the read ends of the child's output pipes and - the signal pipe. */ - int PipeReadEnds[KWSYSPE_PIPE_COUNT]; - - /* Descriptors for the child's ends of the pipes. - Used temporarily during process creation. */ - int PipeChildStd[3]; - - /* Write descriptor for child termination signal pipe. */ - int SignalPipe; - - /* Buffer for pipe data. */ - char PipeBuffer[KWSYSPE_PIPE_BUFFER_SIZE]; - - /* Process IDs returned by the calls to fork. Everything is volatile - because the signal handler accesses them. You must be very careful - when reaping PIDs or modifying this array to avoid race conditions. */ - volatile pid_t* volatile ForkPIDs; - - /* Flag for whether the children were terminated by a failed select. */ - int SelectError; - - /* The timeout length. */ - double Timeout; - - /* The working directory for the process. */ - char* WorkingDirectory; - - /* Whether to create the child as a detached process. */ - int OptionDetach; - - /* Whether the child was created as a detached process. */ - int Detached; - - /* Whether to treat command lines as verbatim. */ - int Verbatim; - - /* Whether to merge stdout/stderr of the child. */ - int MergeOutput; - - /* Whether to create the process in a new process group. */ - volatile sig_atomic_t CreateProcessGroup; - - /* Time at which the child started. Negative for no timeout. */ - kwsysProcessTime StartTime; - - /* Time at which the child will timeout. Negative for no timeout. */ - kwsysProcessTime TimeoutTime; - - /* Flag for whether the timeout expired. */ - int TimeoutExpired; - - /* The number of pipes left open during execution. */ - int PipesLeft; - -#if KWSYSPE_USE_SELECT - /* File descriptor set for call to select. */ - fd_set PipeSet; -#endif - - /* The number of children still executing. */ - int CommandsLeft; - - /* The status of the process structure. Must be atomic because - the signal handler checks this to avoid a race. */ - volatile sig_atomic_t State; - - /* Whether the process was killed. */ - volatile sig_atomic_t Killed; - - /* Buffer for error message in case of failure. */ - char ErrorMessage[KWSYSPE_PIPE_BUFFER_SIZE + 1]; - - /* process results. */ - kwsysProcessResults* ProcessResults; - - /* The exit codes of each child process in the pipeline. */ - int* CommandExitCodes; - - /* Name of files to which stdin and stdout pipes are attached. */ - char* PipeFileSTDIN; - char* PipeFileSTDOUT; - char* PipeFileSTDERR; - - /* Whether each pipe is shared with the parent process. */ - int PipeSharedSTDIN; - int PipeSharedSTDOUT; - int PipeSharedSTDERR; - - /* Native pipes provided by the user. */ - int PipeNativeSTDIN[2]; - int PipeNativeSTDOUT[2]; - int PipeNativeSTDERR[2]; - - /* The real working directory of this process. */ - int RealWorkingDirectoryLength; - char* RealWorkingDirectory; -}; - -kwsysProcess* kwsysProcess_New(void) -{ - /* Allocate a process control structure. */ - kwsysProcess* cp = (kwsysProcess*)malloc(sizeof(kwsysProcess)); - if (!cp) { - return 0; - } - memset(cp, 0, sizeof(kwsysProcess)); - - /* Share stdin with the parent process by default. */ - cp->PipeSharedSTDIN = 1; - - /* No native pipes by default. */ - cp->PipeNativeSTDIN[0] = -1; - cp->PipeNativeSTDIN[1] = -1; - cp->PipeNativeSTDOUT[0] = -1; - cp->PipeNativeSTDOUT[1] = -1; - cp->PipeNativeSTDERR[0] = -1; - cp->PipeNativeSTDERR[1] = -1; - - /* Set initial status. */ - cp->State = kwsysProcess_State_Starting; - - return cp; -} - -void kwsysProcess_Delete(kwsysProcess* cp) -{ - /* Make sure we have an instance. */ - if (!cp) { - return; - } - - /* If the process is executing, wait for it to finish. */ - if (cp->State == kwsysProcess_State_Executing) { - if (cp->Detached) { - kwsysProcess_Disown(cp); - } else { - kwsysProcess_WaitForExit(cp, 0); - } - } - - /* Free memory. */ - kwsysProcess_SetCommand(cp, 0); - kwsysProcess_SetWorkingDirectory(cp, 0); - kwsysProcess_SetPipeFile(cp, kwsysProcess_Pipe_STDIN, 0); - kwsysProcess_SetPipeFile(cp, kwsysProcess_Pipe_STDOUT, 0); - kwsysProcess_SetPipeFile(cp, kwsysProcess_Pipe_STDERR, 0); - free(cp->CommandExitCodes); - free(cp->ProcessResults); - free(cp); -} - -int kwsysProcess_SetCommand(kwsysProcess* cp, char const* const* command) -{ - int i; - if (!cp) { - return 0; - } - for (i = 0; i < cp->NumberOfCommands; ++i) { - char** c = cp->Commands[i]; - while (*c) { - free(*c++); - } - free(cp->Commands[i]); - } - cp->NumberOfCommands = 0; - if (cp->Commands) { - free(cp->Commands); - cp->Commands = 0; - } - if (command) { - return kwsysProcess_AddCommand(cp, command); - } - return 1; -} - -int kwsysProcess_AddCommand(kwsysProcess* cp, char const* const* command) -{ - int newNumberOfCommands; - char*** newCommands; - - /* Make sure we have a command to add. */ - if (!cp || !command || !*command) { - return 0; - } - - /* Allocate a new array for command pointers. */ - newNumberOfCommands = cp->NumberOfCommands + 1; - if (!(newCommands = - (char***)malloc(sizeof(char**) * (size_t)(newNumberOfCommands)))) { - /* Out of memory. */ - return 0; - } - - /* Copy any existing commands into the new array. */ - { - int i; - for (i = 0; i < cp->NumberOfCommands; ++i) { - newCommands[i] = cp->Commands[i]; - } - } - - /* Add the new command. */ - if (cp->Verbatim) { - /* In order to run the given command line verbatim we need to - parse it. */ - newCommands[cp->NumberOfCommands] = - kwsysSystem_Parse_CommandForUnix(*command, 0); - if (!newCommands[cp->NumberOfCommands] || - !newCommands[cp->NumberOfCommands][0]) { - /* Out of memory or no command parsed. */ - free(newCommands); - return 0; - } - } else { - /* Copy each argument string individually. */ - char const* const* c = command; - kwsysProcess_ptrdiff_t n = 0; - kwsysProcess_ptrdiff_t i = 0; - while (*c++) - ; - n = c - command - 1; - newCommands[cp->NumberOfCommands] = - (char**)malloc((size_t)(n + 1) * sizeof(char*)); - if (!newCommands[cp->NumberOfCommands]) { - /* Out of memory. */ - free(newCommands); - return 0; - } - for (i = 0; i < n; ++i) { - assert(command[i]); /* Quiet Clang scan-build. */ - newCommands[cp->NumberOfCommands][i] = strdup(command[i]); - if (!newCommands[cp->NumberOfCommands][i]) { - break; - } - } - if (i < n) { - /* Out of memory. */ - for (; i > 0; --i) { - free(newCommands[cp->NumberOfCommands][i - 1]); - } - free(newCommands); - return 0; - } - newCommands[cp->NumberOfCommands][n] = 0; - } - - /* Successfully allocated new command array. Free the old array. */ - free(cp->Commands); - cp->Commands = newCommands; - cp->NumberOfCommands = newNumberOfCommands; - - return 1; -} - -void kwsysProcess_SetTimeout(kwsysProcess* cp, double timeout) -{ - if (!cp) { - return; - } - cp->Timeout = timeout; - if (cp->Timeout < 0) { - cp->Timeout = 0; - } - // Force recomputation of TimeoutTime. - cp->TimeoutTime.tv_sec = -1; -} - -int kwsysProcess_SetWorkingDirectory(kwsysProcess* cp, const char* dir) -{ - if (!cp) { - return 0; - } - if (cp->WorkingDirectory == dir) { - return 1; - } - if (cp->WorkingDirectory && dir && strcmp(cp->WorkingDirectory, dir) == 0) { - return 1; - } - if (cp->WorkingDirectory) { - free(cp->WorkingDirectory); - cp->WorkingDirectory = 0; - } - if (dir) { - cp->WorkingDirectory = strdup(dir); - if (!cp->WorkingDirectory) { - return 0; - } - } - return 1; -} - -int kwsysProcess_SetPipeFile(kwsysProcess* cp, int prPipe, const char* file) -{ - char** pfile; - if (!cp) { - return 0; - } - switch (prPipe) { - case kwsysProcess_Pipe_STDIN: - pfile = &cp->PipeFileSTDIN; - break; - case kwsysProcess_Pipe_STDOUT: - pfile = &cp->PipeFileSTDOUT; - break; - case kwsysProcess_Pipe_STDERR: - pfile = &cp->PipeFileSTDERR; - break; - default: - return 0; - } - if (*pfile) { - free(*pfile); - *pfile = 0; - } - if (file) { - *pfile = strdup(file); - if (!*pfile) { - return 0; - } - } - - /* If we are redirecting the pipe, do not share it or use a native - pipe. */ - if (*pfile) { - kwsysProcess_SetPipeNative(cp, prPipe, 0); - kwsysProcess_SetPipeShared(cp, prPipe, 0); - } - return 1; -} - -void kwsysProcess_SetPipeShared(kwsysProcess* cp, int prPipe, int shared) -{ - if (!cp) { - return; - } - - switch (prPipe) { - case kwsysProcess_Pipe_STDIN: - cp->PipeSharedSTDIN = shared ? 1 : 0; - break; - case kwsysProcess_Pipe_STDOUT: - cp->PipeSharedSTDOUT = shared ? 1 : 0; - break; - case kwsysProcess_Pipe_STDERR: - cp->PipeSharedSTDERR = shared ? 1 : 0; - break; - default: - return; - } - - /* If we are sharing the pipe, do not redirect it to a file or use a - native pipe. */ - if (shared) { - kwsysProcess_SetPipeFile(cp, prPipe, 0); - kwsysProcess_SetPipeNative(cp, prPipe, 0); - } -} - -void kwsysProcess_SetPipeNative(kwsysProcess* cp, int prPipe, int p[2]) -{ - int* pPipeNative = 0; - - if (!cp) { - return; - } - - switch (prPipe) { - case kwsysProcess_Pipe_STDIN: - pPipeNative = cp->PipeNativeSTDIN; - break; - case kwsysProcess_Pipe_STDOUT: - pPipeNative = cp->PipeNativeSTDOUT; - break; - case kwsysProcess_Pipe_STDERR: - pPipeNative = cp->PipeNativeSTDERR; - break; - default: - return; - } - - /* Copy the native pipe descriptors provided. */ - if (p) { - pPipeNative[0] = p[0]; - pPipeNative[1] = p[1]; - } else { - pPipeNative[0] = -1; - pPipeNative[1] = -1; - } - - /* If we are using a native pipe, do not share it or redirect it to - a file. */ - if (p) { - kwsysProcess_SetPipeFile(cp, prPipe, 0); - kwsysProcess_SetPipeShared(cp, prPipe, 0); - } -} - -int kwsysProcess_GetOption(kwsysProcess* cp, int optionId) -{ - if (!cp) { - return 0; - } - - switch (optionId) { - case kwsysProcess_Option_Detach: - return cp->OptionDetach; - case kwsysProcess_Option_MergeOutput: - return cp->MergeOutput; - case kwsysProcess_Option_Verbatim: - return cp->Verbatim; - case kwsysProcess_Option_CreateProcessGroup: - return cp->CreateProcessGroup; - default: - return 0; - } -} - -void kwsysProcess_SetOption(kwsysProcess* cp, int optionId, int value) -{ - if (!cp) { - return; - } - - switch (optionId) { - case kwsysProcess_Option_Detach: - cp->OptionDetach = value; - break; - case kwsysProcess_Option_MergeOutput: - cp->MergeOutput = value; - break; - case kwsysProcess_Option_Verbatim: - cp->Verbatim = value; - break; - case kwsysProcess_Option_CreateProcessGroup: - cp->CreateProcessGroup = value; - break; - default: - break; - } -} - -int kwsysProcess_GetState(kwsysProcess* cp) -{ - return cp ? cp->State : kwsysProcess_State_Error; -} - -int kwsysProcess_GetExitException(kwsysProcess* cp) -{ - return (cp && cp->ProcessResults && (cp->NumberOfCommands > 0)) - ? cp->ProcessResults[cp->NumberOfCommands - 1].ExitException - : kwsysProcess_Exception_Other; -} - -int kwsysProcess_GetExitCode(kwsysProcess* cp) -{ - return (cp && cp->ProcessResults && (cp->NumberOfCommands > 0)) - ? cp->ProcessResults[cp->NumberOfCommands - 1].ExitCode - : 0; -} - -int kwsysProcess_GetExitValue(kwsysProcess* cp) -{ - return (cp && cp->ProcessResults && (cp->NumberOfCommands > 0)) - ? cp->ProcessResults[cp->NumberOfCommands - 1].ExitValue - : -1; -} - -const char* kwsysProcess_GetErrorString(kwsysProcess* cp) -{ - if (!cp) { - return "Process management structure could not be allocated"; - } else if (cp->State == kwsysProcess_State_Error) { - return cp->ErrorMessage; - } - return "Success"; -} - -const char* kwsysProcess_GetExceptionString(kwsysProcess* cp) -{ - if (!(cp && cp->ProcessResults && (cp->NumberOfCommands > 0))) { - return "GetExceptionString called with NULL process management structure"; - } else if (cp->State == kwsysProcess_State_Exception) { - return cp->ProcessResults[cp->NumberOfCommands - 1].ExitExceptionString; - } - return "No exception"; -} - -/* the index should be in array bound. */ -#define KWSYSPE_IDX_CHK(RET) \ - if (!cp || idx >= cp->NumberOfCommands || idx < 0) { \ - return RET; \ - } - -int kwsysProcess_GetStateByIndex(kwsysProcess* cp, int idx) -{ - KWSYSPE_IDX_CHK(kwsysProcess_State_Error) - return cp->ProcessResults[idx].State; -} - -int kwsysProcess_GetExitExceptionByIndex(kwsysProcess* cp, int idx) -{ - KWSYSPE_IDX_CHK(kwsysProcess_Exception_Other) - return cp->ProcessResults[idx].ExitException; -} - -int kwsysProcess_GetExitValueByIndex(kwsysProcess* cp, int idx) -{ - KWSYSPE_IDX_CHK(-1) - return cp->ProcessResults[idx].ExitValue; -} - -int kwsysProcess_GetExitCodeByIndex(kwsysProcess* cp, int idx) -{ - KWSYSPE_IDX_CHK(-1) - return cp->CommandExitCodes[idx]; -} - -const char* kwsysProcess_GetExceptionStringByIndex(kwsysProcess* cp, int idx) -{ - KWSYSPE_IDX_CHK("GetExceptionString called with NULL process management " - "structure or index out of bound") - if (cp->ProcessResults[idx].State == kwsysProcess_StateByIndex_Exception) { - return cp->ProcessResults[idx].ExitExceptionString; - } - return "No exception"; -} - -#undef KWSYSPE_IDX_CHK - -void kwsysProcess_Execute(kwsysProcess* cp) -{ - int i; - - /* Do not execute a second copy simultaneously. */ - if (!cp || cp->State == kwsysProcess_State_Executing) { - return; - } - - /* Make sure we have something to run. */ - if (cp->NumberOfCommands < 1) { - strcpy(cp->ErrorMessage, "No command"); - cp->State = kwsysProcess_State_Error; - return; - } - - /* Initialize the control structure for a new process. */ - if (!kwsysProcessInitialize(cp)) { - strcpy(cp->ErrorMessage, "Out of memory"); - cp->State = kwsysProcess_State_Error; - return; - } - -#if defined(__VMS) - /* Make sure pipes behave like streams on VMS. */ - if (!kwsysProcessSetVMSFeature("DECC$STREAM_PIPE", 1)) { - kwsysProcessCleanup(cp, 1); - return; - } -#endif - - /* Save the real working directory of this process and change to - the working directory for the child processes. This is needed - to make pipe file paths evaluate correctly. */ - if (cp->WorkingDirectory) { - int r; - if (!getcwd(cp->RealWorkingDirectory, - (size_t)(cp->RealWorkingDirectoryLength))) { - kwsysProcessCleanup(cp, 1); - return; - } - - /* Some platforms specify that the chdir call may be - interrupted. Repeat the call until it finishes. */ - while (((r = chdir(cp->WorkingDirectory)) < 0) && (errno == EINTR)) - ; - if (r < 0) { - kwsysProcessCleanup(cp, 1); - return; - } - } - - /* If not running a detached child, add this object to the global - set of process objects that wish to be notified when a child - exits. */ - if (!cp->OptionDetach) { - if (!kwsysProcessesAdd(cp)) { - kwsysProcessCleanup(cp, 1); - return; - } - } - - /* Setup the stdin pipe for the first process. */ - if (cp->PipeFileSTDIN) { - /* Open a file for the child's stdin to read. */ - cp->PipeChildStd[0] = open(cp->PipeFileSTDIN, O_RDONLY); - if (cp->PipeChildStd[0] < 0) { - kwsysProcessCleanup(cp, 1); - return; - } - - /* Set close-on-exec flag on the pipe's end. */ - if (fcntl(cp->PipeChildStd[0], F_SETFD, FD_CLOEXEC) < 0) { - kwsysProcessCleanup(cp, 1); - return; - } - } else if (cp->PipeSharedSTDIN) { - cp->PipeChildStd[0] = 0; - } else if (cp->PipeNativeSTDIN[0] >= 0) { - cp->PipeChildStd[0] = cp->PipeNativeSTDIN[0]; - - /* Set close-on-exec flag on the pipe's ends. The read end will - be dup2-ed into the stdin descriptor after the fork but before - the exec. */ - if ((fcntl(cp->PipeNativeSTDIN[0], F_SETFD, FD_CLOEXEC) < 0) || - (fcntl(cp->PipeNativeSTDIN[1], F_SETFD, FD_CLOEXEC) < 0)) { - kwsysProcessCleanup(cp, 1); - return; - } - } else { - cp->PipeChildStd[0] = -1; - } - - /* Create the output pipe for the last process. - We always create this so the pipe can be passed to select even if - it will report closed immediately. */ - { - /* Create the pipe. */ - int p[2]; - if (pipe(p KWSYSPE_VMS_NONBLOCK) < 0) { - kwsysProcessCleanup(cp, 1); - return; - } - - /* Store the pipe. */ - cp->PipeReadEnds[KWSYSPE_PIPE_STDOUT] = p[0]; - cp->PipeChildStd[1] = p[1]; - - /* Set close-on-exec flag on the pipe's ends. */ - if ((fcntl(p[0], F_SETFD, FD_CLOEXEC) < 0) || - (fcntl(p[1], F_SETFD, FD_CLOEXEC) < 0)) { - kwsysProcessCleanup(cp, 1); - return; - } - - /* Set to non-blocking in case select lies, or for the polling - implementation. */ - if (!kwsysProcessSetNonBlocking(p[0])) { - kwsysProcessCleanup(cp, 1); - return; - } - } - - if (cp->PipeFileSTDOUT) { - /* Use a file for stdout. */ - if (!kwsysProcessSetupOutputPipeFile(&cp->PipeChildStd[1], - cp->PipeFileSTDOUT)) { - kwsysProcessCleanup(cp, 1); - return; - } - } else if (cp->PipeSharedSTDOUT) { - /* Use the parent stdout. */ - kwsysProcessCleanupDescriptor(&cp->PipeChildStd[1]); - cp->PipeChildStd[1] = 1; - } else if (cp->PipeNativeSTDOUT[1] >= 0) { - /* Use the given descriptor for stdout. */ - if (!kwsysProcessSetupOutputPipeNative(&cp->PipeChildStd[1], - cp->PipeNativeSTDOUT)) { - kwsysProcessCleanup(cp, 1); - return; - } - } - - /* Create stderr pipe to be shared by all processes in the pipeline. - We always create this so the pipe can be passed to select even if - it will report closed immediately. */ - { - /* Create the pipe. */ - int p[2]; - if (pipe(p KWSYSPE_VMS_NONBLOCK) < 0) { - kwsysProcessCleanup(cp, 1); - return; - } - - /* Store the pipe. */ - cp->PipeReadEnds[KWSYSPE_PIPE_STDERR] = p[0]; - cp->PipeChildStd[2] = p[1]; - - /* Set close-on-exec flag on the pipe's ends. */ - if ((fcntl(p[0], F_SETFD, FD_CLOEXEC) < 0) || - (fcntl(p[1], F_SETFD, FD_CLOEXEC) < 0)) { - kwsysProcessCleanup(cp, 1); - return; - } - - /* Set to non-blocking in case select lies, or for the polling - implementation. */ - if (!kwsysProcessSetNonBlocking(p[0])) { - kwsysProcessCleanup(cp, 1); - return; - } - } - - if (cp->PipeFileSTDERR) { - /* Use a file for stderr. */ - if (!kwsysProcessSetupOutputPipeFile(&cp->PipeChildStd[2], - cp->PipeFileSTDERR)) { - kwsysProcessCleanup(cp, 1); - return; - } - } else if (cp->PipeSharedSTDERR) { - /* Use the parent stderr. */ - kwsysProcessCleanupDescriptor(&cp->PipeChildStd[2]); - cp->PipeChildStd[2] = 2; - } else if (cp->PipeNativeSTDERR[1] >= 0) { - /* Use the given handle for stderr. */ - if (!kwsysProcessSetupOutputPipeNative(&cp->PipeChildStd[2], - cp->PipeNativeSTDERR)) { - kwsysProcessCleanup(cp, 1); - return; - } - } - - /* The timeout period starts now. */ - cp->StartTime = kwsysProcessTimeGetCurrent(); - cp->TimeoutTime.tv_sec = -1; - cp->TimeoutTime.tv_usec = -1; - - /* Create the pipeline of processes. */ - { - kwsysProcessCreateInformation si = { -1, -1, -1, { -1, -1 } }; - int nextStdIn = cp->PipeChildStd[0]; - for (i = 0; i < cp->NumberOfCommands; ++i) { - /* Setup the process's pipes. */ - si.StdIn = nextStdIn; - if (i == cp->NumberOfCommands - 1) { - nextStdIn = -1; - si.StdOut = cp->PipeChildStd[1]; - } else { - /* Create a pipe to sit between the children. */ - int p[2] = { -1, -1 }; - if (pipe(p KWSYSPE_VMS_NONBLOCK) < 0) { - if (nextStdIn != cp->PipeChildStd[0]) { - kwsysProcessCleanupDescriptor(&nextStdIn); - } - kwsysProcessCleanup(cp, 1); - return; - } - - /* Set close-on-exec flag on the pipe's ends. */ - if ((fcntl(p[0], F_SETFD, FD_CLOEXEC) < 0) || - (fcntl(p[1], F_SETFD, FD_CLOEXEC) < 0)) { - close(p[0]); - close(p[1]); - if (nextStdIn != cp->PipeChildStd[0]) { - kwsysProcessCleanupDescriptor(&nextStdIn); - } - kwsysProcessCleanup(cp, 1); - return; - } - nextStdIn = p[0]; - si.StdOut = p[1]; - } - si.StdErr = cp->MergeOutput ? cp->PipeChildStd[1] : cp->PipeChildStd[2]; - - { - int res = kwsysProcessCreate(cp, i, &si); - - /* Close our copies of pipes used between children. */ - if (si.StdIn != cp->PipeChildStd[0]) { - kwsysProcessCleanupDescriptor(&si.StdIn); - } - if (si.StdOut != cp->PipeChildStd[1]) { - kwsysProcessCleanupDescriptor(&si.StdOut); - } - if (si.StdErr != cp->PipeChildStd[2] && !cp->MergeOutput) { - kwsysProcessCleanupDescriptor(&si.StdErr); - } - - if (!res) { - kwsysProcessCleanupDescriptor(&si.ErrorPipe[0]); - kwsysProcessCleanupDescriptor(&si.ErrorPipe[1]); - if (nextStdIn != cp->PipeChildStd[0]) { - kwsysProcessCleanupDescriptor(&nextStdIn); - } - kwsysProcessCleanup(cp, 1); - return; - } - } - } - } - - /* The parent process does not need the child's pipe ends. */ - for (i = 0; i < 3; ++i) { - kwsysProcessCleanupDescriptor(&cp->PipeChildStd[i]); - } - - /* Restore the working directory. */ - if (cp->RealWorkingDirectory) { - /* Some platforms specify that the chdir call may be - interrupted. Repeat the call until it finishes. */ - while ((chdir(cp->RealWorkingDirectory) < 0) && (errno == EINTR)) - ; - free(cp->RealWorkingDirectory); - cp->RealWorkingDirectory = 0; - } - - /* All the pipes are now open. */ - cp->PipesLeft = KWSYSPE_PIPE_COUNT; - - /* The process has now started. */ - cp->State = kwsysProcess_State_Executing; - cp->Detached = cp->OptionDetach; -} - -kwsysEXPORT void kwsysProcess_Disown(kwsysProcess* cp) -{ - /* Make sure a detached child process is running. */ - if (!cp || !cp->Detached || cp->State != kwsysProcess_State_Executing || - cp->TimeoutExpired || cp->Killed) { - return; - } - - /* Close all the pipes safely. */ - kwsysProcessClosePipes(cp); - - /* We will not wait for exit, so cleanup now. */ - kwsysProcessCleanup(cp, 0); - - /* The process has been disowned. */ - cp->State = kwsysProcess_State_Disowned; -} - -typedef struct kwsysProcessWaitData_s -{ - int Expired; - int PipeId; - int User; - double* UserTimeout; - kwsysProcessTime TimeoutTime; -} kwsysProcessWaitData; -static int kwsysProcessWaitForPipe(kwsysProcess* cp, char** data, int* length, - kwsysProcessWaitData* wd); - -int kwsysProcess_WaitForData(kwsysProcess* cp, char** data, int* length, - double* userTimeout) -{ - kwsysProcessTime userStartTime = { 0, 0 }; - kwsysProcessWaitData wd = { 0, kwsysProcess_Pipe_None, 0, 0, { 0, 0 } }; - wd.UserTimeout = userTimeout; - /* Make sure we are executing a process. */ - if (!cp || cp->State != kwsysProcess_State_Executing || cp->Killed || - cp->TimeoutExpired) { - return kwsysProcess_Pipe_None; - } - - /* Record the time at which user timeout period starts. */ - if (userTimeout) { - userStartTime = kwsysProcessTimeGetCurrent(); - } - - /* Calculate the time at which a timeout will expire, and whether it - is the user or process timeout. */ - wd.User = kwsysProcessGetTimeoutTime(cp, userTimeout, &wd.TimeoutTime); - - /* Data can only be available when pipes are open. If the process - is not running, cp->PipesLeft will be 0. */ - while (cp->PipesLeft > 0 && - !kwsysProcessWaitForPipe(cp, data, length, &wd)) { - } - - /* Update the user timeout. */ - if (userTimeout) { - kwsysProcessTime userEndTime = kwsysProcessTimeGetCurrent(); - kwsysProcessTime difference = - kwsysProcessTimeSubtract(userEndTime, userStartTime); - double d = kwsysProcessTimeToDouble(difference); - *userTimeout -= d; - if (*userTimeout < 0) { - *userTimeout = 0; - } - } - - /* Check what happened. */ - if (wd.PipeId) { - /* Data are ready on a pipe. */ - return wd.PipeId; - } else if (wd.Expired) { - /* A timeout has expired. */ - if (wd.User) { - /* The user timeout has expired. It has no time left. */ - return kwsysProcess_Pipe_Timeout; - } else { - /* The process timeout has expired. Kill the children now. */ - kwsysProcess_Kill(cp); - cp->Killed = 0; - cp->TimeoutExpired = 1; - return kwsysProcess_Pipe_None; - } - } else { - /* No pipes are left open. */ - return kwsysProcess_Pipe_None; - } -} - -static int kwsysProcessWaitForPipe(kwsysProcess* cp, char** data, int* length, - kwsysProcessWaitData* wd) -{ - int i; - kwsysProcessTimeNative timeoutLength; - -#if KWSYSPE_USE_SELECT - int numReady = 0; - int max = -1; - kwsysProcessTimeNative* timeout = 0; - - /* Check for any open pipes with data reported ready by the last - call to select. According to "man select_tut" we must deal - with all descriptors reported by a call to select before - passing them to another select call. */ - for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { - if (cp->PipeReadEnds[i] >= 0 && - FD_ISSET(cp->PipeReadEnds[i], &cp->PipeSet)) { - kwsysProcess_ssize_t n; - - /* We are handling this pipe now. Remove it from the set. */ - FD_CLR(cp->PipeReadEnds[i], &cp->PipeSet); - - /* The pipe is ready to read without blocking. Keep trying to - read until the operation is not interrupted. */ - while (((n = read(cp->PipeReadEnds[i], cp->PipeBuffer, - KWSYSPE_PIPE_BUFFER_SIZE)) < 0) && - (errno == EINTR)) - ; - if (n > 0) { - /* We have data on this pipe. */ - if (i == KWSYSPE_PIPE_SIGNAL) { - /* A child process has terminated. */ - kwsysProcessDestroy(cp); - } else if (data && length) { - /* Report this data. */ - *data = cp->PipeBuffer; - *length = (int)(n); - switch (i) { - case KWSYSPE_PIPE_STDOUT: - wd->PipeId = kwsysProcess_Pipe_STDOUT; - break; - case KWSYSPE_PIPE_STDERR: - wd->PipeId = kwsysProcess_Pipe_STDERR; - break; - } - return 1; - } - } else if (n < 0 && errno == EAGAIN) { - /* No data are really ready. The select call lied. See the - "man select" page on Linux for cases when this occurs. */ - } else { - /* We are done reading from this pipe. */ - kwsysProcessCleanupDescriptor(&cp->PipeReadEnds[i]); - --cp->PipesLeft; - } - } - } - - /* If we have data, break early. */ - if (wd->PipeId) { - return 1; - } - - /* Make sure the set is empty (it should always be empty here - anyway). */ - FD_ZERO(&cp->PipeSet); - - /* Setup a timeout if required. */ - if (wd->TimeoutTime.tv_sec < 0) { - timeout = 0; - } else { - timeout = &timeoutLength; - } - if (kwsysProcessGetTimeoutLeft( - &wd->TimeoutTime, wd->User ? wd->UserTimeout : 0, &timeoutLength, 0)) { - /* Timeout has already expired. */ - wd->Expired = 1; - return 1; - } - - /* Add the pipe reading ends that are still open. */ - max = -1; - for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { - if (cp->PipeReadEnds[i] >= 0) { - FD_SET(cp->PipeReadEnds[i], &cp->PipeSet); - if (cp->PipeReadEnds[i] > max) { - max = cp->PipeReadEnds[i]; - } - } - } - - /* Make sure we have a non-empty set. */ - if (max < 0) { - /* All pipes have closed. Child has terminated. */ - return 1; - } - - /* Run select to block until data are available. Repeat call - until it is not interrupted. */ - while (((numReady = select(max + 1, &cp->PipeSet, 0, 0, timeout)) < 0) && - (errno == EINTR)) - ; - - /* Check result of select. */ - if (numReady == 0) { - /* Select's timeout expired. */ - wd->Expired = 1; - return 1; - } else if (numReady < 0) { - /* Select returned an error. Leave the error description in the - pipe buffer. */ - strncpy(cp->ErrorMessage, strerror(errno), KWSYSPE_PIPE_BUFFER_SIZE); - - /* Kill the children now. */ - kwsysProcess_Kill(cp); - cp->Killed = 0; - cp->SelectError = 1; - } - - return 0; -#else - /* Poll pipes for data since we do not have select. */ - for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { - if (cp->PipeReadEnds[i] >= 0) { - const int fd = cp->PipeReadEnds[i]; - int n = read(fd, cp->PipeBuffer, KWSYSPE_PIPE_BUFFER_SIZE); - if (n > 0) { - /* We have data on this pipe. */ - if (i == KWSYSPE_PIPE_SIGNAL) { - /* A child process has terminated. */ - kwsysProcessDestroy(cp); - } else if (data && length) { - /* Report this data. */ - *data = cp->PipeBuffer; - *length = n; - switch (i) { - case KWSYSPE_PIPE_STDOUT: - wd->PipeId = kwsysProcess_Pipe_STDOUT; - break; - case KWSYSPE_PIPE_STDERR: - wd->PipeId = kwsysProcess_Pipe_STDERR; - break; - }; - } - return 1; - } else if (n == 0) /* EOF */ - { -/* We are done reading from this pipe. */ -# if defined(__VMS) - if (!cp->CommandsLeft) -# endif - { - kwsysProcessCleanupDescriptor(&cp->PipeReadEnds[i]); - --cp->PipesLeft; - } - } else if (n < 0) /* error */ - { -# if defined(__VMS) - if (!cp->CommandsLeft) { - kwsysProcessCleanupDescriptor(&cp->PipeReadEnds[i]); - --cp->PipesLeft; - } else -# endif - if ((errno != EINTR) && (errno != EAGAIN)) { - strncpy(cp->ErrorMessage, strerror(errno), KWSYSPE_PIPE_BUFFER_SIZE); - /* Kill the children now. */ - kwsysProcess_Kill(cp); - cp->Killed = 0; - cp->SelectError = 1; - return 1; - } - } - } - } - - /* If we have data, break early. */ - if (wd->PipeId) { - return 1; - } - - if (kwsysProcessGetTimeoutLeft( - &wd->TimeoutTime, wd->User ? wd->UserTimeout : 0, &timeoutLength, 1)) { - /* Timeout has already expired. */ - wd->Expired = 1; - return 1; - } - - /* Sleep a little, try again. */ - { - unsigned int msec = - ((timeoutLength.tv_sec * 1000) + (timeoutLength.tv_usec / 1000)); - if (msec > 100000) { - msec = 100000; /* do not sleep more than 100 milliseconds at a time */ - } - kwsysProcess_usleep(msec); - } - return 0; -#endif -} - -int kwsysProcess_WaitForExit(kwsysProcess* cp, double* userTimeout) -{ - int prPipe = 0; - - /* Make sure we are executing a process. */ - if (!cp || cp->State != kwsysProcess_State_Executing) { - return 1; - } - - /* Wait for all the pipes to close. Ignore all data. */ - while ((prPipe = kwsysProcess_WaitForData(cp, 0, 0, userTimeout)) > 0) { - if (prPipe == kwsysProcess_Pipe_Timeout) { - return 0; - } - } - - /* Check if there was an error in one of the waitpid calls. */ - if (cp->State == kwsysProcess_State_Error) { - /* The error message is already in its buffer. Tell - kwsysProcessCleanup to not create it. */ - kwsysProcessCleanup(cp, 0); - return 1; - } - - /* Check whether the child reported an error invoking the process. */ - if (cp->SelectError) { - /* The error message is already in its buffer. Tell - kwsysProcessCleanup to not create it. */ - kwsysProcessCleanup(cp, 0); - cp->State = kwsysProcess_State_Error; - return 1; - } - /* Determine the outcome. */ - if (cp->Killed) { - /* We killed the child. */ - cp->State = kwsysProcess_State_Killed; - } else if (cp->TimeoutExpired) { - /* The timeout expired. */ - cp->State = kwsysProcess_State_Expired; - } else { - /* The children exited. Report the outcome of the child processes. */ - for (prPipe = 0; prPipe < cp->NumberOfCommands; ++prPipe) { - cp->ProcessResults[prPipe].ExitCode = cp->CommandExitCodes[prPipe]; - if (WIFEXITED(cp->ProcessResults[prPipe].ExitCode)) { - /* The child exited normally. */ - cp->ProcessResults[prPipe].State = kwsysProcess_StateByIndex_Exited; - cp->ProcessResults[prPipe].ExitException = kwsysProcess_Exception_None; - cp->ProcessResults[prPipe].ExitValue = - (int)WEXITSTATUS(cp->ProcessResults[prPipe].ExitCode); - } else if (WIFSIGNALED(cp->ProcessResults[prPipe].ExitCode)) { - /* The child received an unhandled signal. */ - cp->ProcessResults[prPipe].State = kwsysProcess_State_Exception; - kwsysProcessSetExitExceptionByIndex( - cp, (int)WTERMSIG(cp->ProcessResults[prPipe].ExitCode), prPipe); - } else { - /* Error getting the child return code. */ - strcpy(cp->ProcessResults[prPipe].ExitExceptionString, - "Error getting child return code."); - cp->ProcessResults[prPipe].State = kwsysProcess_StateByIndex_Error; - } - } - /* support legacy state status value */ - cp->State = cp->ProcessResults[cp->NumberOfCommands - 1].State; - } - /* Normal cleanup. */ - kwsysProcessCleanup(cp, 0); - return 1; -} - -void kwsysProcess_Interrupt(kwsysProcess* cp) -{ - int i; - /* Make sure we are executing a process. */ - if (!cp || cp->State != kwsysProcess_State_Executing || cp->TimeoutExpired || - cp->Killed) { - return; - } - - /* Interrupt the children. */ - if (cp->CreateProcessGroup) { - if (cp->ForkPIDs) { - for (i = 0; i < cp->NumberOfCommands; ++i) { - /* Make sure the PID is still valid. */ - if (cp->ForkPIDs[i]) { - /* The user created a process group for this process. The group ID - is the process ID for the original process in the group. */ - kill(-cp->ForkPIDs[i], SIGINT); - } - } - } - } else { - /* No process group was created. Kill our own process group. - NOTE: While one could argue that we could call kill(cp->ForkPIDs[i], - SIGINT) as a way to still interrupt the process even though it's not in - a special group, this is not an option on Windows. Therefore, we kill - the current process group for consistency with Windows. */ - kill(0, SIGINT); - } -} - -void kwsysProcess_Kill(kwsysProcess* cp) -{ - int i; - - /* Make sure we are executing a process. */ - if (!cp || cp->State != kwsysProcess_State_Executing) { - return; - } - - /* First close the child exit report pipe write end to avoid causing a - SIGPIPE when the child terminates and our signal handler tries to - report it after we have already closed the read end. */ - kwsysProcessCleanupDescriptor(&cp->SignalPipe); - -#if !defined(__APPLE__) - /* Close all the pipe read ends. Do this before killing the - children because Cygwin has problems killing processes that are - blocking to wait for writing to their output pipes. */ - kwsysProcessClosePipes(cp); -#endif - - /* Kill the children. */ - cp->Killed = 1; - for (i = 0; i < cp->NumberOfCommands; ++i) { - int status; - if (cp->ForkPIDs[i]) { - /* Kill the child. */ - kwsysProcessKill(cp->ForkPIDs[i]); - - /* Reap the child. Keep trying until the call is not - interrupted. */ - while ((waitpid(cp->ForkPIDs[i], &status, 0) < 0) && (errno == EINTR)) - ; - } - } - -#if defined(__APPLE__) - /* Close all the pipe read ends. Do this after killing the - children because OS X has problems closing pipe read ends whose - pipes are full and still have an open write end. */ - kwsysProcessClosePipes(cp); -#endif - - cp->CommandsLeft = 0; -} - -/* Call the free() function with a pointer to volatile without causing - compiler warnings. */ -static void kwsysProcessVolatileFree(volatile void* p) -{ -/* clang has made it impossible to free memory that points to volatile - without first using special pragmas to disable a warning... */ -#if defined(__clang__) && !defined(__INTEL_COMPILER) -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wcast-qual" -#endif - free((void*)p); /* The cast will silence most compilers, but not clang. */ -#if defined(__clang__) && !defined(__INTEL_COMPILER) -# pragma clang diagnostic pop -#endif -} - -/* Initialize a process control structure for kwsysProcess_Execute. */ -static int kwsysProcessInitialize(kwsysProcess* cp) -{ - int i; - volatile pid_t* oldForkPIDs; - for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { - cp->PipeReadEnds[i] = -1; - } - for (i = 0; i < 3; ++i) { - cp->PipeChildStd[i] = -1; - } - cp->SignalPipe = -1; - cp->SelectError = 0; - cp->StartTime.tv_sec = -1; - cp->StartTime.tv_usec = -1; - cp->TimeoutTime.tv_sec = -1; - cp->TimeoutTime.tv_usec = -1; - cp->TimeoutExpired = 0; - cp->PipesLeft = 0; - cp->CommandsLeft = 0; -#if KWSYSPE_USE_SELECT - FD_ZERO(&cp->PipeSet); -#endif - cp->State = kwsysProcess_State_Starting; - cp->Killed = 0; - cp->ErrorMessage[0] = 0; - - oldForkPIDs = cp->ForkPIDs; - cp->ForkPIDs = (volatile pid_t*)malloc(sizeof(volatile pid_t) * - (size_t)(cp->NumberOfCommands)); - kwsysProcessVolatileFree(oldForkPIDs); - if (!cp->ForkPIDs) { - return 0; - } - for (i = 0; i < cp->NumberOfCommands; ++i) { - cp->ForkPIDs[i] = 0; /* can't use memset due to volatile */ - } - - free(cp->CommandExitCodes); - cp->CommandExitCodes = - (int*)malloc(sizeof(int) * (size_t)(cp->NumberOfCommands)); - if (!cp->CommandExitCodes) { - return 0; - } - memset(cp->CommandExitCodes, 0, - sizeof(int) * (size_t)(cp->NumberOfCommands)); - - /* Allocate process result information for each process. */ - free(cp->ProcessResults); - cp->ProcessResults = (kwsysProcessResults*)malloc( - sizeof(kwsysProcessResults) * (size_t)(cp->NumberOfCommands)); - if (!cp->ProcessResults) { - return 0; - } - memset(cp->ProcessResults, 0, - sizeof(kwsysProcessResults) * (size_t)(cp->NumberOfCommands)); - for (i = 0; i < cp->NumberOfCommands; i++) { - cp->ProcessResults[i].ExitException = kwsysProcess_Exception_None; - cp->ProcessResults[i].State = kwsysProcess_StateByIndex_Starting; - cp->ProcessResults[i].ExitCode = 1; - cp->ProcessResults[i].ExitValue = 1; - strcpy(cp->ProcessResults[i].ExitExceptionString, "No exception"); - } - - /* Allocate memory to save the real working directory. */ - if (cp->WorkingDirectory) { -#if defined(MAXPATHLEN) - cp->RealWorkingDirectoryLength = MAXPATHLEN; -#elif defined(PATH_MAX) - cp->RealWorkingDirectoryLength = PATH_MAX; -#else - cp->RealWorkingDirectoryLength = 4096; -#endif - cp->RealWorkingDirectory = - (char*)malloc((size_t)(cp->RealWorkingDirectoryLength)); - if (!cp->RealWorkingDirectory) { - return 0; - } - } - - return 1; -} - -/* Free all resources used by the given kwsysProcess instance that were - allocated by kwsysProcess_Execute. */ -static void kwsysProcessCleanup(kwsysProcess* cp, int error) -{ - int i; - - if (error) { - /* We are cleaning up due to an error. Report the error message - if one has not been provided already. */ - if (cp->ErrorMessage[0] == 0) { - strncpy(cp->ErrorMessage, strerror(errno), KWSYSPE_PIPE_BUFFER_SIZE); - } - - /* Set the error state. */ - cp->State = kwsysProcess_State_Error; - - /* Kill any children already started. */ - if (cp->ForkPIDs) { - int status; - for (i = 0; i < cp->NumberOfCommands; ++i) { - if (cp->ForkPIDs[i]) { - /* Kill the child. */ - kwsysProcessKill(cp->ForkPIDs[i]); - - /* Reap the child. Keep trying until the call is not - interrupted. */ - while ((waitpid(cp->ForkPIDs[i], &status, 0) < 0) && - (errno == EINTR)) - ; - } - } - } - - /* Restore the working directory. */ - if (cp->RealWorkingDirectory) { - while ((chdir(cp->RealWorkingDirectory) < 0) && (errno == EINTR)) - ; - } - } - - /* If not creating a detached child, remove this object from the - global set of process objects that wish to be notified when a - child exits. */ - if (!cp->OptionDetach) { - kwsysProcessesRemove(cp); - } - - /* Free memory. */ - if (cp->ForkPIDs) { - kwsysProcessVolatileFree(cp->ForkPIDs); - cp->ForkPIDs = 0; - } - if (cp->RealWorkingDirectory) { - free(cp->RealWorkingDirectory); - cp->RealWorkingDirectory = 0; - } - - /* Close pipe handles. */ - for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { - kwsysProcessCleanupDescriptor(&cp->PipeReadEnds[i]); - } - for (i = 0; i < 3; ++i) { - kwsysProcessCleanupDescriptor(&cp->PipeChildStd[i]); - } -} - -/* Close the given file descriptor if it is open. Reset its value to -1. */ -static void kwsysProcessCleanupDescriptor(int* pfd) -{ - if (pfd && *pfd > 2) { - /* Keep trying to close until it is not interrupted by a - * signal. */ - while ((close(*pfd) < 0) && (errno == EINTR)) - ; - *pfd = -1; - } -} - -static void kwsysProcessClosePipes(kwsysProcess* cp) -{ - int i; - - /* Close any pipes that are still open. */ - for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { - if (cp->PipeReadEnds[i] >= 0) { -#if KWSYSPE_USE_SELECT - /* If the pipe was reported by the last call to select, we must - read from it. This is needed to satisfy the suggestions from - "man select_tut" and is not needed for the polling - implementation. Ignore the data. */ - if (FD_ISSET(cp->PipeReadEnds[i], &cp->PipeSet)) { - /* We are handling this pipe now. Remove it from the set. */ - FD_CLR(cp->PipeReadEnds[i], &cp->PipeSet); - - /* The pipe is ready to read without blocking. Keep trying to - read until the operation is not interrupted. */ - while ((read(cp->PipeReadEnds[i], cp->PipeBuffer, - KWSYSPE_PIPE_BUFFER_SIZE) < 0) && - (errno == EINTR)) - ; - } -#endif - - /* We are done reading from this pipe. */ - kwsysProcessCleanupDescriptor(&cp->PipeReadEnds[i]); - --cp->PipesLeft; - } - } -} - -static int kwsysProcessSetNonBlocking(int fd) -{ - int flags = fcntl(fd, F_GETFL); - if (flags >= 0) { - flags = fcntl(fd, F_SETFL, flags | O_NONBLOCK); - } - return flags >= 0; -} - -#if defined(__VMS) -int decc$set_child_standard_streams(int fd1, int fd2, int fd3); -#endif - -static int kwsysProcessCreate(kwsysProcess* cp, int prIndex, - kwsysProcessCreateInformation* si) -{ - sigset_t mask, old_mask; - int pgidPipe[2]; - char tmp; - ssize_t readRes; - - /* Create the error reporting pipe. */ - if (pipe(si->ErrorPipe) < 0) { - return 0; - } - - /* Create a pipe for detecting that the child process has created a process - group and session. */ - if (pipe(pgidPipe) < 0) { - kwsysProcessCleanupDescriptor(&si->ErrorPipe[0]); - kwsysProcessCleanupDescriptor(&si->ErrorPipe[1]); - return 0; - } - - /* Set close-on-exec flag on the pipe's write end. */ - if (fcntl(si->ErrorPipe[1], F_SETFD, FD_CLOEXEC) < 0 || - fcntl(pgidPipe[1], F_SETFD, FD_CLOEXEC) < 0) { - kwsysProcessCleanupDescriptor(&si->ErrorPipe[0]); - kwsysProcessCleanupDescriptor(&si->ErrorPipe[1]); - kwsysProcessCleanupDescriptor(&pgidPipe[0]); - kwsysProcessCleanupDescriptor(&pgidPipe[1]); - return 0; - } - - /* Block SIGINT / SIGTERM while we start. The purpose is so that our signal - handler doesn't get called from the child process after the fork and - before the exec, and subsequently start kill()'ing PIDs from ForkPIDs. */ - sigemptyset(&mask); - sigaddset(&mask, SIGINT); - sigaddset(&mask, SIGTERM); - if (sigprocmask(SIG_BLOCK, &mask, &old_mask) < 0) { - kwsysProcessCleanupDescriptor(&si->ErrorPipe[0]); - kwsysProcessCleanupDescriptor(&si->ErrorPipe[1]); - kwsysProcessCleanupDescriptor(&pgidPipe[0]); - kwsysProcessCleanupDescriptor(&pgidPipe[1]); - return 0; - } - -/* Fork off a child process. */ -#if defined(__VMS) - /* VMS needs vfork and execvp to be in the same function because - they use setjmp/longjmp to run the child startup code in the - parent! TODO: OptionDetach. Also - TODO: CreateProcessGroup. */ - cp->ForkPIDs[prIndex] = vfork(); -#else - cp->ForkPIDs[prIndex] = kwsysProcessFork(cp, si); -#endif - if (cp->ForkPIDs[prIndex] < 0) { - sigprocmask(SIG_SETMASK, &old_mask, 0); - kwsysProcessCleanupDescriptor(&si->ErrorPipe[0]); - kwsysProcessCleanupDescriptor(&si->ErrorPipe[1]); - kwsysProcessCleanupDescriptor(&pgidPipe[0]); - kwsysProcessCleanupDescriptor(&pgidPipe[1]); - return 0; - } - - if (cp->ForkPIDs[prIndex] == 0) { -#if defined(__VMS) - /* Specify standard pipes for child process. */ - decc$set_child_standard_streams(si->StdIn, si->StdOut, si->StdErr); -#else - /* Close the read end of the error reporting / process group - setup pipe. */ - close(si->ErrorPipe[0]); - close(pgidPipe[0]); - - /* Setup the stdin, stdout, and stderr pipes. */ - if (si->StdIn > 0) { - dup2(si->StdIn, 0); - } else if (si->StdIn < 0) { - close(0); - } - if (si->StdOut != 1) { - dup2(si->StdOut, 1); - } - if (si->StdErr != 2) { - dup2(si->StdErr, 2); - } - - /* Clear the close-on-exec flag for stdin, stdout, and stderr. - All other pipe handles will be closed when exec succeeds. */ - fcntl(0, F_SETFD, 0); - fcntl(1, F_SETFD, 0); - fcntl(2, F_SETFD, 0); - - /* Restore all default signal handlers. */ - kwsysProcessRestoreDefaultSignalHandlers(); - - /* Now that we have restored default signal handling and created the - process group, restore mask. */ - sigprocmask(SIG_SETMASK, &old_mask, 0); - - /* Create new process group. We use setsid instead of setpgid to avoid - the child getting hung up on signals like SIGTTOU. (In the real world, - this has been observed where "git svn" ends up calling the "resize" - program which opens /dev/tty. */ - if (cp->CreateProcessGroup && setsid() < 0) { - kwsysProcessChildErrorExit(si->ErrorPipe[1]); - } -#endif - - /* Execute the real process. If successful, this does not return. */ - execvp(cp->Commands[prIndex][0], cp->Commands[prIndex]); - /* TODO: What does VMS do if the child fails to start? */ - /* TODO: On VMS, how do we put the process in a new group? */ - - /* Failure. Report error to parent and terminate. */ - kwsysProcessChildErrorExit(si->ErrorPipe[1]); - } - -#if defined(__VMS) - /* Restore the standard pipes of this process. */ - decc$set_child_standard_streams(0, 1, 2); -#endif - - /* We are done with the error reporting pipe and process group setup pipe - write end. */ - kwsysProcessCleanupDescriptor(&si->ErrorPipe[1]); - kwsysProcessCleanupDescriptor(&pgidPipe[1]); - - /* Make sure the child is in the process group before we proceed. This - avoids race conditions with calls to the kill function that we make for - signalling process groups. */ - while ((readRes = read(pgidPipe[0], &tmp, 1)) > 0) - ; - if (readRes < 0) { - sigprocmask(SIG_SETMASK, &old_mask, 0); - kwsysProcessCleanupDescriptor(&si->ErrorPipe[0]); - kwsysProcessCleanupDescriptor(&pgidPipe[0]); - return 0; - } - kwsysProcessCleanupDescriptor(&pgidPipe[0]); - - /* Unmask signals. */ - if (sigprocmask(SIG_SETMASK, &old_mask, 0) < 0) { - kwsysProcessCleanupDescriptor(&si->ErrorPipe[0]); - return 0; - } - - /* A child has been created. */ - ++cp->CommandsLeft; - - /* Block until the child's exec call succeeds and closes the error - pipe or writes data to the pipe to report an error. */ - { - kwsysProcess_ssize_t total = 0; - kwsysProcess_ssize_t n = 1; - /* Read the entire error message up to the length of our buffer. */ - while (total < KWSYSPE_PIPE_BUFFER_SIZE && n > 0) { - /* Keep trying to read until the operation is not interrupted. */ - while (((n = read(si->ErrorPipe[0], cp->ErrorMessage + total, - (size_t)(KWSYSPE_PIPE_BUFFER_SIZE - total))) < 0) && - (errno == EINTR)) - ; - if (n > 0) { - total += n; - } - } - - /* We are done with the error reporting pipe read end. */ - kwsysProcessCleanupDescriptor(&si->ErrorPipe[0]); - - if (total > 0) { - /* The child failed to execute the process. */ - return 0; - } - } - - return 1; -} - -static void kwsysProcessDestroy(kwsysProcess* cp) -{ - /* A child process has terminated. Reap it if it is one handled by - this object. */ - int i; - /* Temporarily disable signals that access ForkPIDs. We don't want them to - read a reaped PID, and writes to ForkPIDs are not atomic. */ - sigset_t mask, old_mask; - sigemptyset(&mask); - sigaddset(&mask, SIGINT); - sigaddset(&mask, SIGTERM); - if (sigprocmask(SIG_BLOCK, &mask, &old_mask) < 0) { - return; - } - - for (i = 0; i < cp->NumberOfCommands; ++i) { - if (cp->ForkPIDs[i]) { - int result; - while (((result = waitpid(cp->ForkPIDs[i], &cp->CommandExitCodes[i], - WNOHANG)) < 0) && - (errno == EINTR)) - ; - if (result > 0) { - /* This child has terminated. */ - cp->ForkPIDs[i] = 0; - if (--cp->CommandsLeft == 0) { - /* All children have terminated. Close the signal pipe - write end so that no more notifications are sent to this - object. */ - kwsysProcessCleanupDescriptor(&cp->SignalPipe); - - /* TODO: Once the children have terminated, switch - WaitForData to use a non-blocking read to get the - rest of the data from the pipe. This is needed when - grandchildren keep the output pipes open. */ - } - } else if (result < 0 && cp->State != kwsysProcess_State_Error) { - /* Unexpected error. Report the first time this happens. */ - strncpy(cp->ErrorMessage, strerror(errno), KWSYSPE_PIPE_BUFFER_SIZE); - cp->State = kwsysProcess_State_Error; - } - } - } - - /* Re-enable signals. */ - sigprocmask(SIG_SETMASK, &old_mask, 0); -} - -static int kwsysProcessSetupOutputPipeFile(int* p, const char* name) -{ - int fout; - if (!name) { - return 1; - } - - /* Close the existing descriptor. */ - kwsysProcessCleanupDescriptor(p); - - /* Open a file for the pipe to write. */ - if ((fout = open(name, O_WRONLY | O_CREAT | O_TRUNC, 0666)) < 0) { - return 0; - } - - /* Set close-on-exec flag on the pipe's end. */ - if (fcntl(fout, F_SETFD, FD_CLOEXEC) < 0) { - close(fout); - return 0; - } - - /* Assign the replacement descriptor. */ - *p = fout; - return 1; -} - -static int kwsysProcessSetupOutputPipeNative(int* p, int des[2]) -{ - /* Close the existing descriptor. */ - kwsysProcessCleanupDescriptor(p); - - /* Set close-on-exec flag on the pipe's ends. The proper end will - be dup2-ed into the standard descriptor number after fork but - before exec. */ - if ((fcntl(des[0], F_SETFD, FD_CLOEXEC) < 0) || - (fcntl(des[1], F_SETFD, FD_CLOEXEC) < 0)) { - return 0; - } - - /* Assign the replacement descriptor. */ - *p = des[1]; - return 1; -} - -/* Get the time at which either the process or user timeout will - expire. Returns 1 if the user timeout is first, and 0 otherwise. */ -static int kwsysProcessGetTimeoutTime(kwsysProcess* cp, double* userTimeout, - kwsysProcessTime* timeoutTime) -{ - /* The first time this is called, we need to calculate the time at - which the child will timeout. */ - if (cp->Timeout > 0 && cp->TimeoutTime.tv_sec < 0) { - kwsysProcessTime length = kwsysProcessTimeFromDouble(cp->Timeout); - cp->TimeoutTime = kwsysProcessTimeAdd(cp->StartTime, length); - } - - /* Start with process timeout. */ - *timeoutTime = cp->TimeoutTime; - - /* Check if the user timeout is earlier. */ - if (userTimeout) { - kwsysProcessTime currentTime = kwsysProcessTimeGetCurrent(); - kwsysProcessTime userTimeoutLength = - kwsysProcessTimeFromDouble(*userTimeout); - kwsysProcessTime userTimeoutTime = - kwsysProcessTimeAdd(currentTime, userTimeoutLength); - if (timeoutTime->tv_sec < 0 || - kwsysProcessTimeLess(userTimeoutTime, *timeoutTime)) { - *timeoutTime = userTimeoutTime; - return 1; - } - } - return 0; -} - -/* Get the length of time before the given timeout time arrives. - Returns 1 if the time has already arrived, and 0 otherwise. */ -static int kwsysProcessGetTimeoutLeft(kwsysProcessTime* timeoutTime, - double* userTimeout, - kwsysProcessTimeNative* timeoutLength, - int zeroIsExpired) -{ - if (timeoutTime->tv_sec < 0) { - /* No timeout time has been requested. */ - return 0; - } else { - /* Calculate the remaining time. */ - kwsysProcessTime currentTime = kwsysProcessTimeGetCurrent(); - kwsysProcessTime timeLeft = - kwsysProcessTimeSubtract(*timeoutTime, currentTime); - if (timeLeft.tv_sec < 0 && userTimeout && *userTimeout <= 0) { - /* Caller has explicitly requested a zero timeout. */ - timeLeft.tv_sec = 0; - timeLeft.tv_usec = 0; - } - - if (timeLeft.tv_sec < 0 || - (timeLeft.tv_sec == 0 && timeLeft.tv_usec == 0 && zeroIsExpired)) { - /* Timeout has already expired. */ - return 1; - } else { - /* There is some time left. */ - timeoutLength->tv_sec = timeLeft.tv_sec; - timeoutLength->tv_usec = timeLeft.tv_usec; - return 0; - } - } -} - -static kwsysProcessTime kwsysProcessTimeGetCurrent(void) -{ - kwsysProcessTime current; - kwsysProcessTimeNative current_native; -#if KWSYS_C_HAS_CLOCK_GETTIME_MONOTONIC - struct timespec current_timespec; - clock_gettime(CLOCK_MONOTONIC, ¤t_timespec); - - current_native.tv_sec = current_timespec.tv_sec; - current_native.tv_usec = current_timespec.tv_nsec / 1000; -#else - gettimeofday(¤t_native, 0); -#endif - current.tv_sec = (long)current_native.tv_sec; - current.tv_usec = (long)current_native.tv_usec; - return current; -} - -static double kwsysProcessTimeToDouble(kwsysProcessTime t) -{ - return (double)t.tv_sec + (double)(t.tv_usec) * 0.000001; -} - -static kwsysProcessTime kwsysProcessTimeFromDouble(double d) -{ - kwsysProcessTime t; - t.tv_sec = (long)d; - t.tv_usec = (long)((d - (double)(t.tv_sec)) * 1000000); - return t; -} - -static int kwsysProcessTimeLess(kwsysProcessTime in1, kwsysProcessTime in2) -{ - return ((in1.tv_sec < in2.tv_sec) || - ((in1.tv_sec == in2.tv_sec) && (in1.tv_usec < in2.tv_usec))); -} - -static kwsysProcessTime kwsysProcessTimeAdd(kwsysProcessTime in1, - kwsysProcessTime in2) -{ - kwsysProcessTime out; - out.tv_sec = in1.tv_sec + in2.tv_sec; - out.tv_usec = in1.tv_usec + in2.tv_usec; - if (out.tv_usec >= 1000000) { - out.tv_usec -= 1000000; - out.tv_sec += 1; - } - return out; -} - -static kwsysProcessTime kwsysProcessTimeSubtract(kwsysProcessTime in1, - kwsysProcessTime in2) -{ - kwsysProcessTime out; - out.tv_sec = in1.tv_sec - in2.tv_sec; - out.tv_usec = in1.tv_usec - in2.tv_usec; - if (out.tv_usec < 0) { - out.tv_usec += 1000000; - out.tv_sec -= 1; - } - return out; -} - -#define KWSYSPE_CASE(type, str) \ - cp->ProcessResults[idx].ExitException = kwsysProcess_Exception_##type; \ - strcpy(cp->ProcessResults[idx].ExitExceptionString, str) -static void kwsysProcessSetExitExceptionByIndex(kwsysProcess* cp, int sig, - int idx) -{ - switch (sig) { -#ifdef SIGSEGV - case SIGSEGV: - KWSYSPE_CASE(Fault, "Segmentation fault"); - break; -#endif -#ifdef SIGBUS -# if !defined(SIGSEGV) || SIGBUS != SIGSEGV - case SIGBUS: - KWSYSPE_CASE(Fault, "Bus error"); - break; -# endif -#endif -#ifdef SIGFPE - case SIGFPE: - KWSYSPE_CASE(Numerical, "Floating-point exception"); - break; -#endif -#ifdef SIGILL - case SIGILL: - KWSYSPE_CASE(Illegal, "Illegal instruction"); - break; -#endif -#ifdef SIGINT - case SIGINT: - KWSYSPE_CASE(Interrupt, "User interrupt"); - break; -#endif -#ifdef SIGABRT - case SIGABRT: - KWSYSPE_CASE(Other, "Child aborted"); - break; -#endif -#ifdef SIGKILL - case SIGKILL: - KWSYSPE_CASE(Other, "Child killed"); - break; -#endif -#ifdef SIGTERM - case SIGTERM: - KWSYSPE_CASE(Other, "Child terminated"); - break; -#endif -#ifdef SIGHUP - case SIGHUP: - KWSYSPE_CASE(Other, "SIGHUP"); - break; -#endif -#ifdef SIGQUIT - case SIGQUIT: - KWSYSPE_CASE(Other, "SIGQUIT"); - break; -#endif -#ifdef SIGTRAP - case SIGTRAP: - KWSYSPE_CASE(Other, "SIGTRAP"); - break; -#endif -#ifdef SIGIOT -# if !defined(SIGABRT) || SIGIOT != SIGABRT - case SIGIOT: - KWSYSPE_CASE(Other, "SIGIOT"); - break; -# endif -#endif -#ifdef SIGUSR1 - case SIGUSR1: - KWSYSPE_CASE(Other, "SIGUSR1"); - break; -#endif -#ifdef SIGUSR2 - case SIGUSR2: - KWSYSPE_CASE(Other, "SIGUSR2"); - break; -#endif -#ifdef SIGPIPE - case SIGPIPE: - KWSYSPE_CASE(Other, "SIGPIPE"); - break; -#endif -#ifdef SIGALRM - case SIGALRM: - KWSYSPE_CASE(Other, "SIGALRM"); - break; -#endif -#ifdef SIGSTKFLT - case SIGSTKFLT: - KWSYSPE_CASE(Other, "SIGSTKFLT"); - break; -#endif -#ifdef SIGCHLD - case SIGCHLD: - KWSYSPE_CASE(Other, "SIGCHLD"); - break; -#elif defined(SIGCLD) - case SIGCLD: - KWSYSPE_CASE(Other, "SIGCLD"); - break; -#endif -#ifdef SIGCONT - case SIGCONT: - KWSYSPE_CASE(Other, "SIGCONT"); - break; -#endif -#ifdef SIGSTOP - case SIGSTOP: - KWSYSPE_CASE(Other, "SIGSTOP"); - break; -#endif -#ifdef SIGTSTP - case SIGTSTP: - KWSYSPE_CASE(Other, "SIGTSTP"); - break; -#endif -#ifdef SIGTTIN - case SIGTTIN: - KWSYSPE_CASE(Other, "SIGTTIN"); - break; -#endif -#ifdef SIGTTOU - case SIGTTOU: - KWSYSPE_CASE(Other, "SIGTTOU"); - break; -#endif -#ifdef SIGURG - case SIGURG: - KWSYSPE_CASE(Other, "SIGURG"); - break; -#endif -#ifdef SIGXCPU - case SIGXCPU: - KWSYSPE_CASE(Other, "SIGXCPU"); - break; -#endif -#ifdef SIGXFSZ - case SIGXFSZ: - KWSYSPE_CASE(Other, "SIGXFSZ"); - break; -#endif -#ifdef SIGVTALRM - case SIGVTALRM: - KWSYSPE_CASE(Other, "SIGVTALRM"); - break; -#endif -#ifdef SIGPROF - case SIGPROF: - KWSYSPE_CASE(Other, "SIGPROF"); - break; -#endif -#ifdef SIGWINCH - case SIGWINCH: - KWSYSPE_CASE(Other, "SIGWINCH"); - break; -#endif -#ifdef SIGPOLL - case SIGPOLL: - KWSYSPE_CASE(Other, "SIGPOLL"); - break; -#endif -#ifdef SIGIO -# if !defined(SIGPOLL) || SIGIO != SIGPOLL - case SIGIO: - KWSYSPE_CASE(Other, "SIGIO"); - break; -# endif -#endif -#ifdef SIGPWR - case SIGPWR: - KWSYSPE_CASE(Other, "SIGPWR"); - break; -#endif -#ifdef SIGSYS - case SIGSYS: - KWSYSPE_CASE(Other, "SIGSYS"); - break; -#endif -#ifdef SIGUNUSED -# if !defined(SIGSYS) || SIGUNUSED != SIGSYS - case SIGUNUSED: - KWSYSPE_CASE(Other, "SIGUNUSED"); - break; -# endif -#endif - default: - cp->ProcessResults[idx].ExitException = kwsysProcess_Exception_Other; - sprintf(cp->ProcessResults[idx].ExitExceptionString, "Signal %d", sig); - break; - } -} -#undef KWSYSPE_CASE - -/* When the child process encounters an error before its program is - invoked, this is called to report the error to the parent and - exit. */ -static void kwsysProcessChildErrorExit(int errorPipe) -{ - /* Construct the error message. */ - char buffer[KWSYSPE_PIPE_BUFFER_SIZE]; - kwsysProcess_ssize_t result; - strncpy(buffer, strerror(errno), KWSYSPE_PIPE_BUFFER_SIZE); - buffer[KWSYSPE_PIPE_BUFFER_SIZE - 1] = '\0'; - - /* Report the error to the parent through the special pipe. */ - result = write(errorPipe, buffer, strlen(buffer)); - (void)result; - - /* Terminate without cleanup. */ - _exit(1); -} - -/* Restores all signal handlers to their default values. */ -static void kwsysProcessRestoreDefaultSignalHandlers(void) -{ - struct sigaction act; - memset(&act, 0, sizeof(struct sigaction)); - act.sa_handler = SIG_DFL; -#ifdef SIGHUP - sigaction(SIGHUP, &act, 0); -#endif -#ifdef SIGINT - sigaction(SIGINT, &act, 0); -#endif -#ifdef SIGQUIT - sigaction(SIGQUIT, &act, 0); -#endif -#ifdef SIGILL - sigaction(SIGILL, &act, 0); -#endif -#ifdef SIGTRAP - sigaction(SIGTRAP, &act, 0); -#endif -#ifdef SIGABRT - sigaction(SIGABRT, &act, 0); -#endif -#ifdef SIGIOT - sigaction(SIGIOT, &act, 0); -#endif -#ifdef SIGBUS - sigaction(SIGBUS, &act, 0); -#endif -#ifdef SIGFPE - sigaction(SIGFPE, &act, 0); -#endif -#ifdef SIGUSR1 - sigaction(SIGUSR1, &act, 0); -#endif -#ifdef SIGSEGV - sigaction(SIGSEGV, &act, 0); -#endif -#ifdef SIGUSR2 - sigaction(SIGUSR2, &act, 0); -#endif -#ifdef SIGPIPE - sigaction(SIGPIPE, &act, 0); -#endif -#ifdef SIGALRM - sigaction(SIGALRM, &act, 0); -#endif -#ifdef SIGTERM - sigaction(SIGTERM, &act, 0); -#endif -#ifdef SIGSTKFLT - sigaction(SIGSTKFLT, &act, 0); -#endif -#ifdef SIGCLD - sigaction(SIGCLD, &act, 0); -#endif -#ifdef SIGCHLD - sigaction(SIGCHLD, &act, 0); -#endif -#ifdef SIGCONT - sigaction(SIGCONT, &act, 0); -#endif -#ifdef SIGTSTP - sigaction(SIGTSTP, &act, 0); -#endif -#ifdef SIGTTIN - sigaction(SIGTTIN, &act, 0); -#endif -#ifdef SIGTTOU - sigaction(SIGTTOU, &act, 0); -#endif -#ifdef SIGURG - sigaction(SIGURG, &act, 0); -#endif -#ifdef SIGXCPU - sigaction(SIGXCPU, &act, 0); -#endif -#ifdef SIGXFSZ - sigaction(SIGXFSZ, &act, 0); -#endif -#ifdef SIGVTALRM - sigaction(SIGVTALRM, &act, 0); -#endif -#ifdef SIGPROF - sigaction(SIGPROF, &act, 0); -#endif -#ifdef SIGWINCH - sigaction(SIGWINCH, &act, 0); -#endif -#ifdef SIGPOLL - sigaction(SIGPOLL, &act, 0); -#endif -#ifdef SIGIO - sigaction(SIGIO, &act, 0); -#endif -#ifdef SIGPWR - sigaction(SIGPWR, &act, 0); -#endif -#ifdef SIGSYS - sigaction(SIGSYS, &act, 0); -#endif -#ifdef SIGUNUSED - sigaction(SIGUNUSED, &act, 0); -#endif -} - -static void kwsysProcessExit(void) -{ - _exit(0); -} - -#if !defined(__VMS) -static pid_t kwsysProcessFork(kwsysProcess* cp, - kwsysProcessCreateInformation* si) -{ - /* Create a detached process if requested. */ - if (cp->OptionDetach) { - /* Create an intermediate process. */ - pid_t middle_pid = fork(); - if (middle_pid < 0) { - /* Fork failed. Return as if we were not detaching. */ - return middle_pid; - } else if (middle_pid == 0) { - /* This is the intermediate process. Create the real child. */ - pid_t child_pid = fork(); - if (child_pid == 0) { - /* This is the real child process. There is nothing to do here. */ - return 0; - } else { - /* Use the error pipe to report the pid to the real parent. */ - while ((write(si->ErrorPipe[1], &child_pid, sizeof(child_pid)) < 0) && - (errno == EINTR)) - ; - - /* Exit without cleanup. The parent holds all resources. */ - kwsysProcessExit(); - return 0; /* Never reached, but avoids SunCC warning. */ - } - } else { - /* This is the original parent process. The intermediate - process will use the error pipe to report the pid of the - detached child. */ - pid_t child_pid; - int status; - while ((read(si->ErrorPipe[0], &child_pid, sizeof(child_pid)) < 0) && - (errno == EINTR)) - ; - - /* Wait for the intermediate process to exit and clean it up. */ - while ((waitpid(middle_pid, &status, 0) < 0) && (errno == EINTR)) - ; - return child_pid; - } - } else { - /* Not creating a detached process. Use normal fork. */ - return fork(); - } -} -#endif - -/* We try to obtain process information by invoking the ps command. - Here we define the command to call on each platform and the - corresponding parsing format string. The parsing format should - have two integers to store: the pid and then the ppid. */ -#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ - defined(__OpenBSD__) || defined(__GLIBC__) || defined(__GNU__) -# define KWSYSPE_PS_COMMAND "ps axo pid,ppid" -# define KWSYSPE_PS_FORMAT "%d %d\n" -#elif defined(__sun) && (defined(__SVR4) || defined(__svr4__)) /* Solaris */ -# define KWSYSPE_PS_COMMAND "ps -e -o pid,ppid" -# define KWSYSPE_PS_FORMAT "%d %d\n" -#elif defined(__hpux) || defined(__sun__) || defined(__sgi) || \ - defined(_AIX) || defined(__sparc) -# define KWSYSPE_PS_COMMAND "ps -ef" -# define KWSYSPE_PS_FORMAT "%*s %d %d %*[^\n]\n" -#elif defined(__QNX__) -# define KWSYSPE_PS_COMMAND "ps -Af" -# define KWSYSPE_PS_FORMAT "%*d %d %d %*[^\n]\n" -#elif defined(__CYGWIN__) -# define KWSYSPE_PS_COMMAND "ps aux" -# define KWSYSPE_PS_FORMAT "%d %d %*[^\n]\n" -#endif - -void kwsysProcess_KillPID(unsigned long process_id) -{ - kwsysProcessKill((pid_t)process_id); -} - -static void kwsysProcessKill(pid_t process_id) -{ -#if defined(__linux__) || defined(__CYGWIN__) - DIR* procdir; -#endif - - /* Suspend the process to be sure it will not create more children. */ - kill(process_id, SIGSTOP); - -#if defined(__CYGWIN__) - /* Some Cygwin versions seem to need help here. Give up our time slice - so that the child can process SIGSTOP before we send SIGKILL. */ - usleep(1); -#endif - -/* Kill all children if we can find them. */ -#if defined(__linux__) || defined(__CYGWIN__) - /* First try using the /proc filesystem. */ - if ((procdir = opendir("/proc")) != NULL) { -# if defined(MAXPATHLEN) - char fname[MAXPATHLEN]; -# elif defined(PATH_MAX) - char fname[PATH_MAX]; -# else - char fname[4096]; -# endif - char buffer[KWSYSPE_PIPE_BUFFER_SIZE + 1]; - struct dirent* d; - - /* Each process has a directory in /proc whose name is the pid. - Within this directory is a file called stat that has the - following format: - - pid (command line) status ppid ... - - We want to get the ppid for all processes. Those that have - process_id as their parent should be recursively killed. */ - for (d = readdir(procdir); d; d = readdir(procdir)) { - int pid; - if (sscanf(d->d_name, "%d", &pid) == 1 && pid != 0) { - struct stat finfo; - sprintf(fname, "/proc/%d/stat", pid); - if (stat(fname, &finfo) == 0) { - FILE* f = fopen(fname, "r"); - if (f) { - size_t nread = fread(buffer, 1, KWSYSPE_PIPE_BUFFER_SIZE, f); - fclose(f); - buffer[nread] = '\0'; - if (nread > 0) { - const char* rparen = strrchr(buffer, ')'); - int ppid; - if (rparen && (sscanf(rparen + 1, "%*s %d", &ppid) == 1)) { - if (ppid == process_id) { - /* Recursively kill this child and its children. */ - kwsysProcessKill(pid); - } - } - } - } - } - } - } - closedir(procdir); - } else -#endif - { -#if defined(KWSYSPE_PS_COMMAND) - /* Try running "ps" to get the process information. */ - FILE* ps = popen(KWSYSPE_PS_COMMAND, "r"); - - /* Make sure the process started and provided a valid header. */ - if (ps && fscanf(ps, "%*[^\n]\n") != EOF) { - /* Look for processes whose parent is the process being killed. */ - int pid, ppid; - while (fscanf(ps, KWSYSPE_PS_FORMAT, &pid, &ppid) == 2) { - if (ppid == process_id) { - /* Recursively kill this child and its children. */ - kwsysProcessKill(pid); - } - } - } - - /* We are done with the ps process. */ - if (ps) { - pclose(ps); - } -#endif - } - - /* Kill the process. */ - kill(process_id, SIGKILL); - -#if defined(__APPLE__) - /* On OS X 10.3 the above SIGSTOP occasionally prevents the SIGKILL - from working. Just in case, we resume the child and kill it - again. There is a small race condition in this obscure case. If - the child manages to fork again between these two signals, we - will not catch its children. */ - kill(process_id, SIGCONT); - kill(process_id, SIGKILL); -#endif -} - -#if defined(__VMS) -int decc$feature_get_index(const char* name); -int decc$feature_set_value(int index, int mode, int value); -static int kwsysProcessSetVMSFeature(const char* name, int value) -{ - int i; - errno = 0; - i = decc$feature_get_index(name); - return i >= 0 && (decc$feature_set_value(i, 1, value) >= 0 || errno == 0); -} -#endif - -/* Global set of executing processes for use by the signal handler. - This global instance will be zero-initialized by the compiler. */ -typedef struct kwsysProcessInstances_s -{ - int Count; - int Size; - kwsysProcess** Processes; -} kwsysProcessInstances; -static kwsysProcessInstances kwsysProcesses; - -/* The old SIGCHLD / SIGINT / SIGTERM handlers. */ -static struct sigaction kwsysProcessesOldSigChldAction; -static struct sigaction kwsysProcessesOldSigIntAction; -static struct sigaction kwsysProcessesOldSigTermAction; - -static void kwsysProcessesUpdate(kwsysProcessInstances* newProcesses) -{ - /* Block signals while we update the set of pipes to check. - TODO: sigprocmask is undefined for threaded apps. See - pthread_sigmask. */ - sigset_t newset; - sigset_t oldset; - sigemptyset(&newset); - sigaddset(&newset, SIGCHLD); - sigaddset(&newset, SIGINT); - sigaddset(&newset, SIGTERM); - sigprocmask(SIG_BLOCK, &newset, &oldset); - - /* Store the new set in that seen by the signal handler. */ - kwsysProcesses = *newProcesses; - - /* Restore the signal mask to the previous setting. */ - sigprocmask(SIG_SETMASK, &oldset, 0); -} - -static int kwsysProcessesAdd(kwsysProcess* cp) -{ - /* Create a pipe through which the signal handler can notify the - given process object that a child has exited. */ - { - /* Create the pipe. */ - int p[2]; - if (pipe(p KWSYSPE_VMS_NONBLOCK) < 0) { - return 0; - } - - /* Store the pipes now to be sure they are cleaned up later. */ - cp->PipeReadEnds[KWSYSPE_PIPE_SIGNAL] = p[0]; - cp->SignalPipe = p[1]; - - /* Switch the pipe to non-blocking mode so that reading a byte can - be an atomic test-and-set. */ - if (!kwsysProcessSetNonBlocking(p[0]) || - !kwsysProcessSetNonBlocking(p[1])) { - return 0; - } - - /* The children do not need this pipe. Set close-on-exec flag on - the pipe's ends. */ - if ((fcntl(p[0], F_SETFD, FD_CLOEXEC) < 0) || - (fcntl(p[1], F_SETFD, FD_CLOEXEC) < 0)) { - return 0; - } - } - - /* Attempt to add the given signal pipe to the signal handler set. */ - { - - /* Make sure there is enough space for the new signal pipe. */ - kwsysProcessInstances oldProcesses = kwsysProcesses; - kwsysProcessInstances newProcesses = oldProcesses; - if (oldProcesses.Count == oldProcesses.Size) { - /* Start with enough space for a small number of process instances - and double the size each time more is needed. */ - newProcesses.Size = oldProcesses.Size ? oldProcesses.Size * 2 : 4; - - /* Try allocating the new block of memory. */ - if ((newProcesses.Processes = ((kwsysProcess**)malloc( - (size_t)(newProcesses.Size) * sizeof(kwsysProcess*))))) { - /* Copy the old pipe set to the new memory. */ - if (oldProcesses.Count > 0) { - memcpy(newProcesses.Processes, oldProcesses.Processes, - ((size_t)(oldProcesses.Count) * sizeof(kwsysProcess*))); - } - } else { - /* Failed to allocate memory for the new signal pipe set. */ - return 0; - } - } - - /* Append the new signal pipe to the set. */ - newProcesses.Processes[newProcesses.Count++] = cp; - - /* Store the new set in that seen by the signal handler. */ - kwsysProcessesUpdate(&newProcesses); - - /* Free the original pipes if new ones were allocated. */ - if (newProcesses.Processes != oldProcesses.Processes) { - free(oldProcesses.Processes); - } - - /* If this is the first process, enable the signal handler. */ - if (newProcesses.Count == 1) { - /* Install our handler for SIGCHLD. Repeat call until it is not - interrupted. */ - struct sigaction newSigAction; - memset(&newSigAction, 0, sizeof(struct sigaction)); -#if KWSYSPE_USE_SIGINFO - newSigAction.sa_sigaction = kwsysProcessesSignalHandler; - newSigAction.sa_flags = SA_NOCLDSTOP | SA_SIGINFO; -# ifdef SA_RESTART - newSigAction.sa_flags |= SA_RESTART; -# endif -#else - newSigAction.sa_handler = kwsysProcessesSignalHandler; - newSigAction.sa_flags = SA_NOCLDSTOP; -#endif - sigemptyset(&newSigAction.sa_mask); - while ((sigaction(SIGCHLD, &newSigAction, - &kwsysProcessesOldSigChldAction) < 0) && - (errno == EINTR)) - ; - - /* Install our handler for SIGINT / SIGTERM. Repeat call until - it is not interrupted. */ - sigemptyset(&newSigAction.sa_mask); - sigaddset(&newSigAction.sa_mask, SIGTERM); - while ((sigaction(SIGINT, &newSigAction, - &kwsysProcessesOldSigIntAction) < 0) && - (errno == EINTR)) - ; - - sigemptyset(&newSigAction.sa_mask); - sigaddset(&newSigAction.sa_mask, SIGINT); - while ((sigaction(SIGTERM, &newSigAction, - &kwsysProcessesOldSigIntAction) < 0) && - (errno == EINTR)) - ; - } - } - - return 1; -} - -static void kwsysProcessesRemove(kwsysProcess* cp) -{ - /* Attempt to remove the given signal pipe from the signal handler set. */ - { - /* Find the given process in the set. */ - kwsysProcessInstances newProcesses = kwsysProcesses; - int i; - for (i = 0; i < newProcesses.Count; ++i) { - if (newProcesses.Processes[i] == cp) { - break; - } - } - if (i < newProcesses.Count) { - /* Remove the process from the set. */ - --newProcesses.Count; - for (; i < newProcesses.Count; ++i) { - newProcesses.Processes[i] = newProcesses.Processes[i + 1]; - } - - /* If this was the last process, disable the signal handler. */ - if (newProcesses.Count == 0) { - /* Restore the signal handlers. Repeat call until it is not - interrupted. */ - while ((sigaction(SIGCHLD, &kwsysProcessesOldSigChldAction, 0) < 0) && - (errno == EINTR)) - ; - while ((sigaction(SIGINT, &kwsysProcessesOldSigIntAction, 0) < 0) && - (errno == EINTR)) - ; - while ((sigaction(SIGTERM, &kwsysProcessesOldSigTermAction, 0) < 0) && - (errno == EINTR)) - ; - - /* Free the table of process pointers since it is now empty. - This is safe because the signal handler has been removed. */ - newProcesses.Size = 0; - free(newProcesses.Processes); - newProcesses.Processes = 0; - } - - /* Store the new set in that seen by the signal handler. */ - kwsysProcessesUpdate(&newProcesses); - } - } - - /* Close the pipe through which the signal handler may have notified - the given process object that a child has exited. */ - kwsysProcessCleanupDescriptor(&cp->SignalPipe); -} - -static void kwsysProcessesSignalHandler(int signum -#if KWSYSPE_USE_SIGINFO - , - siginfo_t* info, void* ucontext -#endif -) -{ - int i, j, procStatus, old_errno = errno; -#if KWSYSPE_USE_SIGINFO - (void)info; - (void)ucontext; -#endif - - /* Signal all process objects that a child has terminated. */ - switch (signum) { - case SIGCHLD: - for (i = 0; i < kwsysProcesses.Count; ++i) { - /* Set the pipe in a signalled state. */ - char buf = 1; - kwsysProcess* cp = kwsysProcesses.Processes[i]; - kwsysProcess_ssize_t pipeStatus = - read(cp->PipeReadEnds[KWSYSPE_PIPE_SIGNAL], &buf, 1); - (void)pipeStatus; - pipeStatus = write(cp->SignalPipe, &buf, 1); - (void)pipeStatus; - } - break; - case SIGINT: - case SIGTERM: - /* Signal child processes that are running in new process groups. */ - for (i = 0; i < kwsysProcesses.Count; ++i) { - kwsysProcess* cp = kwsysProcesses.Processes[i]; - /* Check Killed to avoid data race condition when killing. - Check State to avoid data race condition in kwsysProcessCleanup - when there is an error (it leaves a reaped PID). */ - if (cp->CreateProcessGroup && !cp->Killed && - cp->State != kwsysProcess_State_Error && cp->ForkPIDs) { - for (j = 0; j < cp->NumberOfCommands; ++j) { - /* Make sure the PID is still valid. */ - if (cp->ForkPIDs[j]) { - /* The user created a process group for this process. The group - ID - is the process ID for the original process in the group. */ - kill(-cp->ForkPIDs[j], SIGINT); - } - } - } - } - - /* Wait for all processes to terminate. */ - while (wait(&procStatus) >= 0 || errno != ECHILD) { - } - - /* Terminate the process, which is now in an inconsistent state - because we reaped all the PIDs that it may have been reaping - or may have reaped in the future. Reraise the signal so that - the proper exit code is returned. */ - { - /* Install default signal handler. */ - struct sigaction defSigAction; - sigset_t unblockSet; - memset(&defSigAction, 0, sizeof(defSigAction)); - defSigAction.sa_handler = SIG_DFL; - sigemptyset(&defSigAction.sa_mask); - while ((sigaction(signum, &defSigAction, 0) < 0) && (errno == EINTR)) - ; - /* Unmask the signal. */ - sigemptyset(&unblockSet); - sigaddset(&unblockSet, signum); - sigprocmask(SIG_UNBLOCK, &unblockSet, 0); - /* Raise the signal again. */ - raise(signum); - /* We shouldn't get here... but if we do... */ - _exit(1); - } - /* break omitted to silence unreachable code clang compiler warning. */ - } - -#if !KWSYSPE_USE_SIGINFO - /* Re-Install our handler. Repeat call until it is not interrupted. */ - { - struct sigaction newSigAction; - struct sigaction& oldSigAction; - memset(&newSigAction, 0, sizeof(struct sigaction)); - newSigChldAction.sa_handler = kwsysProcessesSignalHandler; - newSigChldAction.sa_flags = SA_NOCLDSTOP; - sigemptyset(&newSigAction.sa_mask); - switch (signum) { - case SIGCHLD: - oldSigAction = &kwsysProcessesOldSigChldAction; - break; - case SIGINT: - sigaddset(&newSigAction.sa_mask, SIGTERM); - oldSigAction = &kwsysProcessesOldSigIntAction; - break; - case SIGTERM: - sigaddset(&newSigAction.sa_mask, SIGINT); - oldSigAction = &kwsysProcessesOldSigTermAction; - break; - default: - return 0; - } - while ((sigaction(signum, &newSigAction, oldSigAction) < 0) && - (errno == EINTR)) - ; - } -#endif - - errno = old_errno; -} - -void kwsysProcess_ResetStartTime(kwsysProcess* cp) -{ - if (!cp) { - return; - } - /* Reset start time. */ - cp->StartTime = kwsysProcessTimeGetCurrent(); -} diff --git a/test/API/driver/kwsys/ProcessWin32.c b/test/API/driver/kwsys/ProcessWin32.c deleted file mode 100644 index a963862f060..00000000000 --- a/test/API/driver/kwsys/ProcessWin32.c +++ /dev/null @@ -1,2786 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" -#include KWSYS_HEADER(Process.h) -#include KWSYS_HEADER(Encoding.h) - -/* Work-around CMake dependency scanning limitation. This must - duplicate the above list of headers. */ -#if 0 -# include "Encoding.h.in" -# include "Process.h.in" -#endif - -/* - -Implementation for Windows - -On windows, a thread is created to wait for data on each pipe. The -threads are synchronized with the main thread to simulate the use of -a UNIX-style select system call. - -*/ - -#ifdef _MSC_VER -# pragma warning(push, 1) -#endif -#include /* Windows API */ -#if defined(_MSC_VER) && _MSC_VER >= 1800 -# define KWSYS_WINDOWS_DEPRECATED_GetVersionEx -#endif -#include /* _unlink */ -#include /* sprintf */ -#include /* strlen, strdup */ -#ifdef __WATCOMC__ -# define _unlink unlink -#endif - -#ifndef _MAX_FNAME -# define _MAX_FNAME 4096 -#endif -#ifndef _MAX_PATH -# define _MAX_PATH 4096 -#endif - -#ifdef _MSC_VER -# pragma warning(pop) -# pragma warning(disable : 4514) -# pragma warning(disable : 4706) -#endif - -#if defined(__BORLANDC__) -# pragma warn - 8004 /* assigned a value that is never used */ -# pragma warn - 8060 /* Assignment inside if() condition. */ -#endif - -/* There are pipes for the process pipeline's stdout and stderr. */ -#define KWSYSPE_PIPE_COUNT 2 -#define KWSYSPE_PIPE_STDOUT 0 -#define KWSYSPE_PIPE_STDERR 1 - -/* The maximum amount to read from a pipe at a time. */ -#define KWSYSPE_PIPE_BUFFER_SIZE 1024 - -/* Debug output macro. */ -#if 0 -# define KWSYSPE_DEBUG(x) \ - ((void*)cp == (void*)0x00226DE0 \ - ? (fprintf(stderr, "%d/%p/%d ", (int)GetCurrentProcessId(), cp, \ - __LINE__), \ - fprintf x, fflush(stderr), 1) \ - : (1)) -#else -# define KWSYSPE_DEBUG(x) (void)1 -#endif - -typedef LARGE_INTEGER kwsysProcessTime; - -typedef struct kwsysProcessCreateInformation_s -{ - /* Windows child startup control data. */ - STARTUPINFOW StartupInfo; - - /* Original handles before making inherited duplicates. */ - HANDLE hStdInput; - HANDLE hStdOutput; - HANDLE hStdError; -} kwsysProcessCreateInformation; - -typedef struct kwsysProcessPipeData_s kwsysProcessPipeData; -static DWORD WINAPI kwsysProcessPipeThreadRead(LPVOID ptd); -static void kwsysProcessPipeThreadReadPipe(kwsysProcess* cp, - kwsysProcessPipeData* td); -static DWORD WINAPI kwsysProcessPipeThreadWake(LPVOID ptd); -static void kwsysProcessPipeThreadWakePipe(kwsysProcess* cp, - kwsysProcessPipeData* td); -static int kwsysProcessInitialize(kwsysProcess* cp); -static DWORD kwsysProcessCreate(kwsysProcess* cp, int index, - kwsysProcessCreateInformation* si); -static void kwsysProcessDestroy(kwsysProcess* cp, int event); -static DWORD kwsysProcessSetupOutputPipeFile(PHANDLE handle, const char* name); -static void kwsysProcessSetupSharedPipe(DWORD nStdHandle, PHANDLE handle); -static void kwsysProcessSetupPipeNative(HANDLE native, PHANDLE handle); -static void kwsysProcessCleanupHandle(PHANDLE h); -static void kwsysProcessCleanup(kwsysProcess* cp, DWORD error); -static void kwsysProcessCleanErrorMessage(kwsysProcess* cp); -static int kwsysProcessGetTimeoutTime(kwsysProcess* cp, double* userTimeout, - kwsysProcessTime* timeoutTime); -static int kwsysProcessGetTimeoutLeft(kwsysProcessTime* timeoutTime, - double* userTimeout, - kwsysProcessTime* timeoutLength); -static kwsysProcessTime kwsysProcessTimeGetCurrent(void); -static DWORD kwsysProcessTimeToDWORD(kwsysProcessTime t); -static double kwsysProcessTimeToDouble(kwsysProcessTime t); -static kwsysProcessTime kwsysProcessTimeFromDouble(double d); -static int kwsysProcessTimeLess(kwsysProcessTime in1, kwsysProcessTime in2); -static kwsysProcessTime kwsysProcessTimeAdd(kwsysProcessTime in1, - kwsysProcessTime in2); -static kwsysProcessTime kwsysProcessTimeSubtract(kwsysProcessTime in1, - kwsysProcessTime in2); -static void kwsysProcessSetExitExceptionByIndex(kwsysProcess* cp, int code, - int idx); -static void kwsysProcessKillTree(int pid); -static void kwsysProcessDisablePipeThreads(kwsysProcess* cp); -static int kwsysProcessesInitialize(void); -static int kwsysTryEnterCreateProcessSection(void); -static void kwsysLeaveCreateProcessSection(void); -static int kwsysProcessesAdd(HANDLE hProcess, DWORD dwProcessId, - int newProcessGroup); -static void kwsysProcessesRemove(HANDLE hProcess); -static BOOL WINAPI kwsysCtrlHandler(DWORD dwCtrlType); - -/* A structure containing synchronization data for each thread. */ -typedef struct kwsysProcessPipeSync_s kwsysProcessPipeSync; -struct kwsysProcessPipeSync_s -{ - /* Handle to the thread. */ - HANDLE Thread; - - /* Semaphore indicating to the thread that a process has started. */ - HANDLE Ready; - - /* Semaphore indicating to the thread that it should begin work. */ - HANDLE Go; - - /* Semaphore indicating thread has reset for another process. */ - HANDLE Reset; -}; - -/* A structure containing data for each pipe's threads. */ -struct kwsysProcessPipeData_s -{ - /* ------------- Data managed per instance of kwsysProcess ------------- */ - - /* Synchronization data for reading thread. */ - kwsysProcessPipeSync Reader; - - /* Synchronization data for waking thread. */ - kwsysProcessPipeSync Waker; - - /* Index of this pipe. */ - int Index; - - /* The kwsysProcess instance owning this pipe. */ - kwsysProcess* Process; - - /* ------------- Data managed per call to Execute ------------- */ - - /* Buffer for data read in this pipe's thread. */ - char DataBuffer[KWSYSPE_PIPE_BUFFER_SIZE]; - - /* The length of the data stored in the buffer. */ - DWORD DataLength; - - /* Whether the pipe has been closed. */ - int Closed; - - /* Handle for the read end of this pipe. */ - HANDLE Read; - - /* Handle for the write end of this pipe. */ - HANDLE Write; -}; - -/* A structure containing results data for each process. */ -typedef struct kwsysProcessResults_s kwsysProcessResults; -struct kwsysProcessResults_s -{ - /* The status of the process. */ - int State; - - /* The exceptional behavior that terminated the process, if any. */ - int ExitException; - - /* The process exit code. */ - DWORD ExitCode; - - /* The process return code, if any. */ - int ExitValue; - - /* Description for the ExitException. */ - char ExitExceptionString[KWSYSPE_PIPE_BUFFER_SIZE + 1]; -}; - -/* Structure containing data used to implement the child's execution. */ -struct kwsysProcess_s -{ - /* ------------- Data managed per instance of kwsysProcess ------------- */ - - /* The status of the process structure. */ - int State; - - /* The command lines to execute. */ - wchar_t** Commands; - int NumberOfCommands; - - /* The exit code of each command. */ - DWORD* CommandExitCodes; - - /* The working directory for the child process. */ - wchar_t* WorkingDirectory; - - /* Whether to create the child as a detached process. */ - int OptionDetach; - - /* Whether the child was created as a detached process. */ - int Detached; - - /* Whether to hide the child process's window. */ - int HideWindow; - - /* Whether to treat command lines as verbatim. */ - int Verbatim; - - /* Whether to merge stdout/stderr of the child. */ - int MergeOutput; - - /* Whether to create the process in a new process group. */ - int CreateProcessGroup; - - /* Mutex to protect the shared index used by threads to report data. */ - HANDLE SharedIndexMutex; - - /* Semaphore used by threads to signal data ready. */ - HANDLE Full; - - /* Whether we are currently deleting this kwsysProcess instance. */ - int Deleting; - - /* Data specific to each pipe and its thread. */ - kwsysProcessPipeData Pipe[KWSYSPE_PIPE_COUNT]; - - /* Name of files to which stdin and stdout pipes are attached. */ - char* PipeFileSTDIN; - char* PipeFileSTDOUT; - char* PipeFileSTDERR; - - /* Whether each pipe is shared with the parent process. */ - int PipeSharedSTDIN; - int PipeSharedSTDOUT; - int PipeSharedSTDERR; - - /* Native pipes provided by the user. */ - HANDLE PipeNativeSTDIN[2]; - HANDLE PipeNativeSTDOUT[2]; - HANDLE PipeNativeSTDERR[2]; - - /* ------------- Data managed per call to Execute ------------- */ - - /* Index of last pipe to report data, if any. */ - int CurrentIndex; - - /* Index shared by threads to report data. */ - int SharedIndex; - - /* The timeout length. */ - double Timeout; - - /* Time at which the child started. */ - kwsysProcessTime StartTime; - - /* Time at which the child will timeout. Negative for no timeout. */ - kwsysProcessTime TimeoutTime; - - /* Flag for whether the process was killed. */ - int Killed; - - /* Flag for whether the timeout expired. */ - int TimeoutExpired; - - /* Flag for whether the process has terminated. */ - int Terminated; - - /* The number of pipes still open during execution and while waiting - for pipes to close after process termination. */ - int PipesLeft; - - /* Buffer for error messages. */ - char ErrorMessage[KWSYSPE_PIPE_BUFFER_SIZE + 1]; - - /* process results. */ - kwsysProcessResults* ProcessResults; - - /* Windows process information data. */ - PROCESS_INFORMATION* ProcessInformation; - - /* Data and process termination events for which to wait. */ - PHANDLE ProcessEvents; - int ProcessEventsLength; - - /* Real working directory of our own process. */ - DWORD RealWorkingDirectoryLength; - wchar_t* RealWorkingDirectory; - - /* Own handles for the child's ends of the pipes in the parent process. - Used temporarily during process creation. */ - HANDLE PipeChildStd[3]; -}; - -kwsysProcess* kwsysProcess_New(void) -{ - int i; - - /* Process control structure. */ - kwsysProcess* cp; - - /* Windows version number data. */ - OSVERSIONINFO osv; - - /* Initialize list of processes before we get any farther. It's especially - important that the console Ctrl handler be added BEFORE starting the - first process. This prevents the risk of an orphaned process being - started by the main thread while the default Ctrl handler is in - progress. */ - if (!kwsysProcessesInitialize()) { - return 0; - } - - /* Allocate a process control structure. */ - cp = (kwsysProcess*)malloc(sizeof(kwsysProcess)); - if (!cp) { - /* Could not allocate memory for the control structure. */ - return 0; - } - ZeroMemory(cp, sizeof(*cp)); - - /* Share stdin with the parent process by default. */ - cp->PipeSharedSTDIN = 1; - - /* Set initial status. */ - cp->State = kwsysProcess_State_Starting; - - /* Choose a method of running the child based on version of - windows. */ - ZeroMemory(&osv, sizeof(osv)); - osv.dwOSVersionInfoSize = sizeof(osv); -#ifdef KWSYS_WINDOWS_DEPRECATED_GetVersionEx -# pragma warning(push) -# ifdef __INTEL_COMPILER -# pragma warning(disable : 1478) -# elif defined __clang__ -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wdeprecated-declarations" -# else -# pragma warning(disable : 4996) -# endif -#endif - GetVersionEx(&osv); -#ifdef KWSYS_WINDOWS_DEPRECATED_GetVersionEx -# ifdef __clang__ -# pragma clang diagnostic pop -# else -# pragma warning(pop) -# endif -#endif - if (osv.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS) { - /* Win9x no longer supported. */ - kwsysProcess_Delete(cp); - return 0; - } - - /* Initially no thread owns the mutex. Initialize semaphore to 1. */ - if (!(cp->SharedIndexMutex = CreateSemaphore(0, 1, 1, 0))) { - kwsysProcess_Delete(cp); - return 0; - } - - /* Initially no data are available. Initialize semaphore to 0. */ - if (!(cp->Full = CreateSemaphore(0, 0, 1, 0))) { - kwsysProcess_Delete(cp); - return 0; - } - - /* Create the thread to read each pipe. */ - for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { - DWORD dummy = 0; - - /* Assign the thread its index. */ - cp->Pipe[i].Index = i; - - /* Give the thread a pointer back to the kwsysProcess instance. */ - cp->Pipe[i].Process = cp; - - /* No process is yet running. Initialize semaphore to 0. */ - if (!(cp->Pipe[i].Reader.Ready = CreateSemaphore(0, 0, 1, 0))) { - kwsysProcess_Delete(cp); - return 0; - } - - /* The pipe is not yet reset. Initialize semaphore to 0. */ - if (!(cp->Pipe[i].Reader.Reset = CreateSemaphore(0, 0, 1, 0))) { - kwsysProcess_Delete(cp); - return 0; - } - - /* The thread's buffer is initially empty. Initialize semaphore to 1. */ - if (!(cp->Pipe[i].Reader.Go = CreateSemaphore(0, 1, 1, 0))) { - kwsysProcess_Delete(cp); - return 0; - } - - /* Create the reading thread. It will block immediately. The - thread will not make deeply nested calls, so we need only a - small stack. */ - if (!(cp->Pipe[i].Reader.Thread = CreateThread( - 0, 1024, kwsysProcessPipeThreadRead, &cp->Pipe[i], 0, &dummy))) { - kwsysProcess_Delete(cp); - return 0; - } - - /* No process is yet running. Initialize semaphore to 0. */ - if (!(cp->Pipe[i].Waker.Ready = CreateSemaphore(0, 0, 1, 0))) { - kwsysProcess_Delete(cp); - return 0; - } - - /* The pipe is not yet reset. Initialize semaphore to 0. */ - if (!(cp->Pipe[i].Waker.Reset = CreateSemaphore(0, 0, 1, 0))) { - kwsysProcess_Delete(cp); - return 0; - } - - /* The waker should not wake immediately. Initialize semaphore to 0. */ - if (!(cp->Pipe[i].Waker.Go = CreateSemaphore(0, 0, 1, 0))) { - kwsysProcess_Delete(cp); - return 0; - } - - /* Create the waking thread. It will block immediately. The - thread will not make deeply nested calls, so we need only a - small stack. */ - if (!(cp->Pipe[i].Waker.Thread = CreateThread( - 0, 1024, kwsysProcessPipeThreadWake, &cp->Pipe[i], 0, &dummy))) { - kwsysProcess_Delete(cp); - return 0; - } - } - for (i = 0; i < 3; ++i) { - cp->PipeChildStd[i] = INVALID_HANDLE_VALUE; - } - - return cp; -} - -void kwsysProcess_Delete(kwsysProcess* cp) -{ - int i; - - /* Make sure we have an instance. */ - if (!cp) { - return; - } - - /* If the process is executing, wait for it to finish. */ - if (cp->State == kwsysProcess_State_Executing) { - if (cp->Detached) { - kwsysProcess_Disown(cp); - } else { - kwsysProcess_WaitForExit(cp, 0); - } - } - - /* We are deleting the kwsysProcess instance. */ - cp->Deleting = 1; - - /* Terminate each of the threads. */ - for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { - /* Terminate this reading thread. */ - if (cp->Pipe[i].Reader.Thread) { - /* Signal the thread we are ready for it. It will terminate - immediately since Deleting is set. */ - ReleaseSemaphore(cp->Pipe[i].Reader.Ready, 1, 0); - - /* Wait for the thread to exit. */ - WaitForSingleObject(cp->Pipe[i].Reader.Thread, INFINITE); - - /* Close the handle to the thread. */ - kwsysProcessCleanupHandle(&cp->Pipe[i].Reader.Thread); - } - - /* Terminate this waking thread. */ - if (cp->Pipe[i].Waker.Thread) { - /* Signal the thread we are ready for it. It will terminate - immediately since Deleting is set. */ - ReleaseSemaphore(cp->Pipe[i].Waker.Ready, 1, 0); - - /* Wait for the thread to exit. */ - WaitForSingleObject(cp->Pipe[i].Waker.Thread, INFINITE); - - /* Close the handle to the thread. */ - kwsysProcessCleanupHandle(&cp->Pipe[i].Waker.Thread); - } - - /* Cleanup the pipe's semaphores. */ - kwsysProcessCleanupHandle(&cp->Pipe[i].Reader.Ready); - kwsysProcessCleanupHandle(&cp->Pipe[i].Reader.Go); - kwsysProcessCleanupHandle(&cp->Pipe[i].Reader.Reset); - kwsysProcessCleanupHandle(&cp->Pipe[i].Waker.Ready); - kwsysProcessCleanupHandle(&cp->Pipe[i].Waker.Go); - kwsysProcessCleanupHandle(&cp->Pipe[i].Waker.Reset); - } - - /* Close the shared semaphores. */ - kwsysProcessCleanupHandle(&cp->SharedIndexMutex); - kwsysProcessCleanupHandle(&cp->Full); - - /* Free memory. */ - kwsysProcess_SetCommand(cp, 0); - kwsysProcess_SetWorkingDirectory(cp, 0); - kwsysProcess_SetPipeFile(cp, kwsysProcess_Pipe_STDIN, 0); - kwsysProcess_SetPipeFile(cp, kwsysProcess_Pipe_STDOUT, 0); - kwsysProcess_SetPipeFile(cp, kwsysProcess_Pipe_STDERR, 0); - free(cp->CommandExitCodes); - free(cp->ProcessResults); - free(cp); -} - -int kwsysProcess_SetCommand(kwsysProcess* cp, char const* const* command) -{ - int i; - if (!cp) { - return 0; - } - for (i = 0; i < cp->NumberOfCommands; ++i) { - free(cp->Commands[i]); - } - cp->NumberOfCommands = 0; - if (cp->Commands) { - free(cp->Commands); - cp->Commands = 0; - } - if (command) { - return kwsysProcess_AddCommand(cp, command); - } - return 1; -} - -int kwsysProcess_AddCommand(kwsysProcess* cp, char const* const* command) -{ - int newNumberOfCommands; - wchar_t** newCommands; - - /* Make sure we have a command to add. */ - if (!cp || !command || !*command) { - return 0; - } - - /* Allocate a new array for command pointers. */ - newNumberOfCommands = cp->NumberOfCommands + 1; - if (!(newCommands = - (wchar_t**)malloc(sizeof(wchar_t*) * newNumberOfCommands))) { - /* Out of memory. */ - return 0; - } - - /* Copy any existing commands into the new array. */ - { - int i; - for (i = 0; i < cp->NumberOfCommands; ++i) { - newCommands[i] = cp->Commands[i]; - } - } - - if (cp->Verbatim) { - /* Copy the verbatim command line into the buffer. */ - newCommands[cp->NumberOfCommands] = kwsysEncoding_DupToWide(*command); - } else { - /* Encode the arguments so CommandLineToArgvW can decode - them from the command line string in the child. */ - char buffer[32768]; /* CreateProcess max command-line length. */ - char* end = buffer + sizeof(buffer); - char* out = buffer; - char const* const* a; - for (a = command; *a; ++a) { - int quote = !**a; /* Quote the empty string. */ - int slashes = 0; - char const* c; - if (a != command && out != end) { - *out++ = ' '; - } - for (c = *a; !quote && *c; ++c) { - quote = (*c == ' ' || *c == '\t'); - } - if (quote && out != end) { - *out++ = '"'; - } - for (c = *a; *c; ++c) { - if (*c == '\\') { - ++slashes; - } else { - if (*c == '"') { - // Add n+1 backslashes to total 2n+1 before internal '"'. - while (slashes-- >= 0 && out != end) { - *out++ = '\\'; - } - } - slashes = 0; - } - if (out != end) { - *out++ = *c; - } - } - if (quote) { - // Add n backslashes to total 2n before ending '"'. - while (slashes-- > 0 && out != end) { - *out++ = '\\'; - } - if (out != end) { - *out++ = '"'; - } - } - } - if (out != end) { - *out = '\0'; - newCommands[cp->NumberOfCommands] = kwsysEncoding_DupToWide(buffer); - } else { - newCommands[cp->NumberOfCommands] = 0; - } - } - if (!newCommands[cp->NumberOfCommands]) { - /* Out of memory or command line too long. */ - free(newCommands); - return 0; - } - - /* Save the new array of commands. */ - free(cp->Commands); - cp->Commands = newCommands; - cp->NumberOfCommands = newNumberOfCommands; - return 1; -} - -void kwsysProcess_SetTimeout(kwsysProcess* cp, double timeout) -{ - if (!cp) { - return; - } - cp->Timeout = timeout; - if (cp->Timeout < 0) { - cp->Timeout = 0; - } - // Force recomputation of TimeoutTime. - cp->TimeoutTime.QuadPart = -1; -} - -int kwsysProcess_SetWorkingDirectory(kwsysProcess* cp, const char* dir) -{ - if (!cp) { - return 0; - } - if (cp->WorkingDirectory) { - free(cp->WorkingDirectory); - cp->WorkingDirectory = 0; - } - if (dir && dir[0]) { - wchar_t* wdir = kwsysEncoding_DupToWide(dir); - /* We must convert the working directory to a full path. */ - DWORD length = GetFullPathNameW(wdir, 0, 0, 0); - if (length > 0) { - wchar_t* work_dir = malloc(length * sizeof(wchar_t)); - if (!work_dir) { - free(wdir); - return 0; - } - if (!GetFullPathNameW(wdir, length, work_dir, 0)) { - free(work_dir); - free(wdir); - return 0; - } - cp->WorkingDirectory = work_dir; - } - free(wdir); - } - return 1; -} - -int kwsysProcess_SetPipeFile(kwsysProcess* cp, int pipe, const char* file) -{ - char** pfile; - if (!cp) { - return 0; - } - switch (pipe) { - case kwsysProcess_Pipe_STDIN: - pfile = &cp->PipeFileSTDIN; - break; - case kwsysProcess_Pipe_STDOUT: - pfile = &cp->PipeFileSTDOUT; - break; - case kwsysProcess_Pipe_STDERR: - pfile = &cp->PipeFileSTDERR; - break; - default: - return 0; - } - if (*pfile) { - free(*pfile); - *pfile = 0; - } - if (file) { - *pfile = strdup(file); - if (!*pfile) { - return 0; - } - } - - /* If we are redirecting the pipe, do not share it or use a native - pipe. */ - if (*pfile) { - kwsysProcess_SetPipeNative(cp, pipe, 0); - kwsysProcess_SetPipeShared(cp, pipe, 0); - } - - return 1; -} - -void kwsysProcess_SetPipeShared(kwsysProcess* cp, int pipe, int shared) -{ - if (!cp) { - return; - } - - switch (pipe) { - case kwsysProcess_Pipe_STDIN: - cp->PipeSharedSTDIN = shared ? 1 : 0; - break; - case kwsysProcess_Pipe_STDOUT: - cp->PipeSharedSTDOUT = shared ? 1 : 0; - break; - case kwsysProcess_Pipe_STDERR: - cp->PipeSharedSTDERR = shared ? 1 : 0; - break; - default: - return; - } - - /* If we are sharing the pipe, do not redirect it to a file or use a - native pipe. */ - if (shared) { - kwsysProcess_SetPipeFile(cp, pipe, 0); - kwsysProcess_SetPipeNative(cp, pipe, 0); - } -} - -void kwsysProcess_SetPipeNative(kwsysProcess* cp, int pipe, HANDLE p[2]) -{ - HANDLE* pPipeNative = 0; - - if (!cp) { - return; - } - - switch (pipe) { - case kwsysProcess_Pipe_STDIN: - pPipeNative = cp->PipeNativeSTDIN; - break; - case kwsysProcess_Pipe_STDOUT: - pPipeNative = cp->PipeNativeSTDOUT; - break; - case kwsysProcess_Pipe_STDERR: - pPipeNative = cp->PipeNativeSTDERR; - break; - default: - return; - } - - /* Copy the native pipe handles provided. */ - if (p) { - pPipeNative[0] = p[0]; - pPipeNative[1] = p[1]; - } else { - pPipeNative[0] = 0; - pPipeNative[1] = 0; - } - - /* If we are using a native pipe, do not share it or redirect it to - a file. */ - if (p) { - kwsysProcess_SetPipeFile(cp, pipe, 0); - kwsysProcess_SetPipeShared(cp, pipe, 0); - } -} - -int kwsysProcess_GetOption(kwsysProcess* cp, int optionId) -{ - if (!cp) { - return 0; - } - - switch (optionId) { - case kwsysProcess_Option_Detach: - return cp->OptionDetach; - case kwsysProcess_Option_HideWindow: - return cp->HideWindow; - case kwsysProcess_Option_MergeOutput: - return cp->MergeOutput; - case kwsysProcess_Option_Verbatim: - return cp->Verbatim; - case kwsysProcess_Option_CreateProcessGroup: - return cp->CreateProcessGroup; - default: - return 0; - } -} - -void kwsysProcess_SetOption(kwsysProcess* cp, int optionId, int value) -{ - if (!cp) { - return; - } - - switch (optionId) { - case kwsysProcess_Option_Detach: - cp->OptionDetach = value; - break; - case kwsysProcess_Option_HideWindow: - cp->HideWindow = value; - break; - case kwsysProcess_Option_MergeOutput: - cp->MergeOutput = value; - break; - case kwsysProcess_Option_Verbatim: - cp->Verbatim = value; - break; - case kwsysProcess_Option_CreateProcessGroup: - cp->CreateProcessGroup = value; - break; - default: - break; - } -} - -int kwsysProcess_GetState(kwsysProcess* cp) -{ - return cp ? cp->State : kwsysProcess_State_Error; -} - -int kwsysProcess_GetExitException(kwsysProcess* cp) -{ - return (cp && cp->ProcessResults && (cp->NumberOfCommands > 0)) - ? cp->ProcessResults[cp->NumberOfCommands - 1].ExitException - : kwsysProcess_Exception_Other; -} - -int kwsysProcess_GetExitValue(kwsysProcess* cp) -{ - return (cp && cp->ProcessResults && (cp->NumberOfCommands > 0)) - ? cp->ProcessResults[cp->NumberOfCommands - 1].ExitValue - : -1; -} - -int kwsysProcess_GetExitCode(kwsysProcess* cp) -{ - return (cp && cp->ProcessResults && (cp->NumberOfCommands > 0)) - ? cp->ProcessResults[cp->NumberOfCommands - 1].ExitCode - : 0; -} - -const char* kwsysProcess_GetErrorString(kwsysProcess* cp) -{ - if (!cp) { - return "Process management structure could not be allocated"; - } else if (cp->State == kwsysProcess_State_Error) { - return cp->ErrorMessage; - } - return "Success"; -} - -const char* kwsysProcess_GetExceptionString(kwsysProcess* cp) -{ - if (!(cp && cp->ProcessResults && (cp->NumberOfCommands > 0))) { - return "GetExceptionString called with NULL process management structure"; - } else if (cp->State == kwsysProcess_State_Exception) { - return cp->ProcessResults[cp->NumberOfCommands - 1].ExitExceptionString; - } - return "No exception"; -} - -/* the index should be in array bound. */ -#define KWSYSPE_IDX_CHK(RET) \ - if (!cp || idx >= cp->NumberOfCommands || idx < 0) { \ - KWSYSPE_DEBUG((stderr, "array index out of bound\n")); \ - return RET; \ - } - -int kwsysProcess_GetStateByIndex(kwsysProcess* cp, int idx) -{ - KWSYSPE_IDX_CHK(kwsysProcess_State_Error) - return cp->ProcessResults[idx].State; -} - -int kwsysProcess_GetExitExceptionByIndex(kwsysProcess* cp, int idx) -{ - KWSYSPE_IDX_CHK(kwsysProcess_Exception_Other) - return cp->ProcessResults[idx].ExitException; -} - -int kwsysProcess_GetExitValueByIndex(kwsysProcess* cp, int idx) -{ - KWSYSPE_IDX_CHK(-1) - return cp->ProcessResults[idx].ExitValue; -} - -int kwsysProcess_GetExitCodeByIndex(kwsysProcess* cp, int idx) -{ - KWSYSPE_IDX_CHK(-1) - return cp->CommandExitCodes[idx]; -} - -const char* kwsysProcess_GetExceptionStringByIndex(kwsysProcess* cp, int idx) -{ - KWSYSPE_IDX_CHK("GetExceptionString called with NULL process management " - "structure or index out of bound") - if (cp->ProcessResults[idx].State == kwsysProcess_StateByIndex_Exception) { - return cp->ProcessResults[idx].ExitExceptionString; - } - return "No exception"; -} - -#undef KWSYSPE_IDX_CHK - -void kwsysProcess_Execute(kwsysProcess* cp) -{ - int i; - - /* Do not execute a second time. */ - if (!cp || cp->State == kwsysProcess_State_Executing) { - return; - } - - /* Make sure we have something to run. */ - if (cp->NumberOfCommands < 1) { - strcpy(cp->ErrorMessage, "No command"); - cp->State = kwsysProcess_State_Error; - return; - } - - /* Initialize the control structure for a new process. */ - if (!kwsysProcessInitialize(cp)) { - strcpy(cp->ErrorMessage, "Out of memory"); - cp->State = kwsysProcess_State_Error; - return; - } - - /* Save the real working directory of this process and change to - the working directory for the child processes. This is needed - to make pipe file paths evaluate correctly. */ - if (cp->WorkingDirectory) { - if (!GetCurrentDirectoryW(cp->RealWorkingDirectoryLength, - cp->RealWorkingDirectory)) { - kwsysProcessCleanup(cp, GetLastError()); - return; - } - SetCurrentDirectoryW(cp->WorkingDirectory); - } - - /* Setup the stdin pipe for the first process. */ - if (cp->PipeFileSTDIN) { - /* Create a handle to read a file for stdin. */ - wchar_t* wstdin = kwsysEncoding_DupToWide(cp->PipeFileSTDIN); - DWORD error; - cp->PipeChildStd[0] = - CreateFileW(wstdin, GENERIC_READ, FILE_SHARE_READ | FILE_SHARE_WRITE, 0, - OPEN_EXISTING, 0, 0); - error = GetLastError(); /* Check now in case free changes this. */ - free(wstdin); - if (cp->PipeChildStd[0] == INVALID_HANDLE_VALUE) { - kwsysProcessCleanup(cp, error); - return; - } - } else if (cp->PipeSharedSTDIN) { - /* Share this process's stdin with the child. */ - kwsysProcessSetupSharedPipe(STD_INPUT_HANDLE, &cp->PipeChildStd[0]); - } else if (cp->PipeNativeSTDIN[0]) { - /* Use the provided native pipe. */ - kwsysProcessSetupPipeNative(cp->PipeNativeSTDIN[0], &cp->PipeChildStd[0]); - } else { - /* Explicitly give the child no stdin. */ - cp->PipeChildStd[0] = INVALID_HANDLE_VALUE; - } - - /* Create the output pipe for the last process. - We always create this so the pipe thread can run even if we - do not end up giving the write end to the child below. */ - if (!CreatePipe(&cp->Pipe[KWSYSPE_PIPE_STDOUT].Read, - &cp->Pipe[KWSYSPE_PIPE_STDOUT].Write, 0, 0)) { - kwsysProcessCleanup(cp, GetLastError()); - return; - } - - if (cp->PipeFileSTDOUT) { - /* Use a file for stdout. */ - DWORD error = kwsysProcessSetupOutputPipeFile(&cp->PipeChildStd[1], - cp->PipeFileSTDOUT); - if (error) { - kwsysProcessCleanup(cp, error); - return; - } - } else if (cp->PipeSharedSTDOUT) { - /* Use the parent stdout. */ - kwsysProcessSetupSharedPipe(STD_OUTPUT_HANDLE, &cp->PipeChildStd[1]); - } else if (cp->PipeNativeSTDOUT[1]) { - /* Use the given handle for stdout. */ - kwsysProcessSetupPipeNative(cp->PipeNativeSTDOUT[1], &cp->PipeChildStd[1]); - } else { - /* Use our pipe for stdout. Duplicate the handle since our waker - thread will use the original. Do not make it inherited yet. */ - if (!DuplicateHandle(GetCurrentProcess(), - cp->Pipe[KWSYSPE_PIPE_STDOUT].Write, - GetCurrentProcess(), &cp->PipeChildStd[1], 0, FALSE, - DUPLICATE_SAME_ACCESS)) { - kwsysProcessCleanup(cp, GetLastError()); - return; - } - } - - /* Create stderr pipe to be shared by all processes in the pipeline. - We always create this so the pipe thread can run even if we do not - end up giving the write end to the child below. */ - if (!CreatePipe(&cp->Pipe[KWSYSPE_PIPE_STDERR].Read, - &cp->Pipe[KWSYSPE_PIPE_STDERR].Write, 0, 0)) { - kwsysProcessCleanup(cp, GetLastError()); - return; - } - - if (cp->PipeFileSTDERR) { - /* Use a file for stderr. */ - DWORD error = kwsysProcessSetupOutputPipeFile(&cp->PipeChildStd[2], - cp->PipeFileSTDERR); - if (error) { - kwsysProcessCleanup(cp, error); - return; - } - } else if (cp->PipeSharedSTDERR) { - /* Use the parent stderr. */ - kwsysProcessSetupSharedPipe(STD_ERROR_HANDLE, &cp->PipeChildStd[2]); - } else if (cp->PipeNativeSTDERR[1]) { - /* Use the given handle for stderr. */ - kwsysProcessSetupPipeNative(cp->PipeNativeSTDERR[1], &cp->PipeChildStd[2]); - } else { - /* Use our pipe for stderr. Duplicate the handle since our waker - thread will use the original. Do not make it inherited yet. */ - if (!DuplicateHandle(GetCurrentProcess(), - cp->Pipe[KWSYSPE_PIPE_STDERR].Write, - GetCurrentProcess(), &cp->PipeChildStd[2], 0, FALSE, - DUPLICATE_SAME_ACCESS)) { - kwsysProcessCleanup(cp, GetLastError()); - return; - } - } - - /* Create the pipeline of processes. */ - { - /* Child startup control data. */ - kwsysProcessCreateInformation si; - HANDLE nextStdInput = cp->PipeChildStd[0]; - - /* Initialize startup info data. */ - ZeroMemory(&si, sizeof(si)); - si.StartupInfo.cb = sizeof(si.StartupInfo); - - /* Decide whether a child window should be shown. */ - si.StartupInfo.dwFlags |= STARTF_USESHOWWINDOW; - si.StartupInfo.wShowWindow = - (unsigned short)(cp->HideWindow ? SW_HIDE : SW_SHOWDEFAULT); - - /* Connect the child's output pipes to the threads. */ - si.StartupInfo.dwFlags |= STARTF_USESTDHANDLES; - - for (i = 0; i < cp->NumberOfCommands; ++i) { - /* Setup the process's pipes. */ - si.hStdInput = nextStdInput; - if (i == cp->NumberOfCommands - 1) { - /* The last child gets the overall stdout. */ - nextStdInput = INVALID_HANDLE_VALUE; - si.hStdOutput = cp->PipeChildStd[1]; - } else { - /* Create a pipe to sit between the children. */ - HANDLE p[2] = { INVALID_HANDLE_VALUE, INVALID_HANDLE_VALUE }; - if (!CreatePipe(&p[0], &p[1], 0, 0)) { - DWORD error = GetLastError(); - if (nextStdInput != cp->PipeChildStd[0]) { - kwsysProcessCleanupHandle(&nextStdInput); - } - kwsysProcessCleanup(cp, error); - return; - } - nextStdInput = p[0]; - si.hStdOutput = p[1]; - } - si.hStdError = - cp->MergeOutput ? cp->PipeChildStd[1] : cp->PipeChildStd[2]; - - { - DWORD error = kwsysProcessCreate(cp, i, &si); - - /* Close our copies of pipes used between children. */ - if (si.hStdInput != cp->PipeChildStd[0]) { - kwsysProcessCleanupHandle(&si.hStdInput); - } - if (si.hStdOutput != cp->PipeChildStd[1]) { - kwsysProcessCleanupHandle(&si.hStdOutput); - } - if (si.hStdError != cp->PipeChildStd[2] && !cp->MergeOutput) { - kwsysProcessCleanupHandle(&si.hStdError); - } - if (!error) { - cp->ProcessEvents[i + 1] = cp->ProcessInformation[i].hProcess; - } else { - if (nextStdInput != cp->PipeChildStd[0]) { - kwsysProcessCleanupHandle(&nextStdInput); - } - kwsysProcessCleanup(cp, error); - return; - } - } - } - } - - /* The parent process does not need the child's pipe ends. */ - for (i = 0; i < 3; ++i) { - kwsysProcessCleanupHandle(&cp->PipeChildStd[i]); - } - - /* Restore the working directory. */ - if (cp->RealWorkingDirectory) { - SetCurrentDirectoryW(cp->RealWorkingDirectory); - free(cp->RealWorkingDirectory); - cp->RealWorkingDirectory = 0; - } - - /* The timeout period starts now. */ - cp->StartTime = kwsysProcessTimeGetCurrent(); - cp->TimeoutTime = kwsysProcessTimeFromDouble(-1); - - /* All processes in the pipeline have been started in suspended - mode. Resume them all now. */ - for (i = 0; i < cp->NumberOfCommands; ++i) { - ResumeThread(cp->ProcessInformation[i].hThread); - } - - /* ---- It is no longer safe to call kwsysProcessCleanup. ----- */ - /* Tell the pipe threads that a process has started. */ - for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { - ReleaseSemaphore(cp->Pipe[i].Reader.Ready, 1, 0); - ReleaseSemaphore(cp->Pipe[i].Waker.Ready, 1, 0); - } - - /* We don't care about the children's main threads. */ - for (i = 0; i < cp->NumberOfCommands; ++i) { - kwsysProcessCleanupHandle(&cp->ProcessInformation[i].hThread); - } - - /* No pipe has reported data. */ - cp->CurrentIndex = KWSYSPE_PIPE_COUNT; - cp->PipesLeft = KWSYSPE_PIPE_COUNT; - - /* The process has now started. */ - cp->State = kwsysProcess_State_Executing; - cp->Detached = cp->OptionDetach; -} - -void kwsysProcess_Disown(kwsysProcess* cp) -{ - int i; - - /* Make sure we are executing a detached process. */ - if (!cp || !cp->Detached || cp->State != kwsysProcess_State_Executing || - cp->TimeoutExpired || cp->Killed || cp->Terminated) { - return; - } - - /* Disable the reading threads. */ - kwsysProcessDisablePipeThreads(cp); - - /* Wait for all pipe threads to reset. */ - for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { - WaitForSingleObject(cp->Pipe[i].Reader.Reset, INFINITE); - WaitForSingleObject(cp->Pipe[i].Waker.Reset, INFINITE); - } - - /* We will not wait for exit, so cleanup now. */ - kwsysProcessCleanup(cp, 0); - - /* The process has been disowned. */ - cp->State = kwsysProcess_State_Disowned; -} - -int kwsysProcess_WaitForData(kwsysProcess* cp, char** data, int* length, - double* userTimeout) -{ - kwsysProcessTime userStartTime; - kwsysProcessTime timeoutLength; - kwsysProcessTime timeoutTime; - DWORD timeout; - int user; - int done = 0; - int expired = 0; - int pipeId = kwsysProcess_Pipe_None; - DWORD w; - - /* Make sure we are executing a process. */ - if (!cp || cp->State != kwsysProcess_State_Executing || cp->Killed || - cp->TimeoutExpired) { - return kwsysProcess_Pipe_None; - } - - /* Record the time at which user timeout period starts. */ - userStartTime = kwsysProcessTimeGetCurrent(); - - /* Calculate the time at which a timeout will expire, and whether it - is the user or process timeout. */ - user = kwsysProcessGetTimeoutTime(cp, userTimeout, &timeoutTime); - - /* Loop until we have a reason to return. */ - while (!done && cp->PipesLeft > 0) { - /* If we previously got data from a thread, let it know we are - done with the data. */ - if (cp->CurrentIndex < KWSYSPE_PIPE_COUNT) { - KWSYSPE_DEBUG((stderr, "releasing reader %d\n", cp->CurrentIndex)); - ReleaseSemaphore(cp->Pipe[cp->CurrentIndex].Reader.Go, 1, 0); - cp->CurrentIndex = KWSYSPE_PIPE_COUNT; - } - - /* Setup a timeout if required. */ - if (kwsysProcessGetTimeoutLeft(&timeoutTime, user ? userTimeout : 0, - &timeoutLength)) { - /* Timeout has already expired. */ - expired = 1; - break; - } - if (timeoutTime.QuadPart < 0) { - timeout = INFINITE; - } else { - timeout = kwsysProcessTimeToDWORD(timeoutLength); - } - - /* Wait for a pipe's thread to signal or a process to terminate. */ - w = WaitForMultipleObjects(cp->ProcessEventsLength, cp->ProcessEvents, 0, - timeout); - if (w == WAIT_TIMEOUT) { - /* Timeout has expired. */ - expired = 1; - done = 1; - } else if (w == WAIT_OBJECT_0) { - /* Save the index of the reporting thread and release the mutex. - The thread will block until we signal its Empty mutex. */ - cp->CurrentIndex = cp->SharedIndex; - ReleaseSemaphore(cp->SharedIndexMutex, 1, 0); - - /* Data are available or a pipe closed. */ - if (cp->Pipe[cp->CurrentIndex].Closed) { - /* The pipe closed at the write end. Close the read end and - inform the wakeup thread it is done with this process. */ - kwsysProcessCleanupHandle(&cp->Pipe[cp->CurrentIndex].Read); - ReleaseSemaphore(cp->Pipe[cp->CurrentIndex].Waker.Go, 1, 0); - KWSYSPE_DEBUG((stderr, "wakeup %d\n", cp->CurrentIndex)); - --cp->PipesLeft; - } else if (data && length) { - /* Report this data. */ - *data = cp->Pipe[cp->CurrentIndex].DataBuffer; - *length = cp->Pipe[cp->CurrentIndex].DataLength; - switch (cp->CurrentIndex) { - case KWSYSPE_PIPE_STDOUT: - pipeId = kwsysProcess_Pipe_STDOUT; - break; - case KWSYSPE_PIPE_STDERR: - pipeId = kwsysProcess_Pipe_STDERR; - break; - } - done = 1; - } - } else { - /* A process has terminated. */ - kwsysProcessDestroy(cp, w - WAIT_OBJECT_0); - } - } - - /* Update the user timeout. */ - if (userTimeout) { - kwsysProcessTime userEndTime = kwsysProcessTimeGetCurrent(); - kwsysProcessTime difference = - kwsysProcessTimeSubtract(userEndTime, userStartTime); - double d = kwsysProcessTimeToDouble(difference); - *userTimeout -= d; - if (*userTimeout < 0) { - *userTimeout = 0; - } - } - - /* Check what happened. */ - if (pipeId) { - /* Data are ready on a pipe. */ - return pipeId; - } else if (expired) { - /* A timeout has expired. */ - if (user) { - /* The user timeout has expired. It has no time left. */ - return kwsysProcess_Pipe_Timeout; - } else { - /* The process timeout has expired. Kill the child now. */ - KWSYSPE_DEBUG((stderr, "killing child because timeout expired\n")); - kwsysProcess_Kill(cp); - cp->TimeoutExpired = 1; - cp->Killed = 0; - return kwsysProcess_Pipe_None; - } - } else { - /* The children have terminated and no more data are available. */ - return kwsysProcess_Pipe_None; - } -} - -int kwsysProcess_WaitForExit(kwsysProcess* cp, double* userTimeout) -{ - int i; - int pipe; - - /* Make sure we are executing a process. */ - if (!cp || cp->State != kwsysProcess_State_Executing) { - return 1; - } - - /* Wait for the process to terminate. Ignore all data. */ - while ((pipe = kwsysProcess_WaitForData(cp, 0, 0, userTimeout)) > 0) { - if (pipe == kwsysProcess_Pipe_Timeout) { - /* The user timeout has expired. */ - return 0; - } - } - - KWSYSPE_DEBUG((stderr, "no more data\n")); - - /* When the last pipe closes in WaitForData, the loop terminates - without releasing the pipe's thread. Release it now. */ - if (cp->CurrentIndex < KWSYSPE_PIPE_COUNT) { - KWSYSPE_DEBUG((stderr, "releasing reader %d\n", cp->CurrentIndex)); - ReleaseSemaphore(cp->Pipe[cp->CurrentIndex].Reader.Go, 1, 0); - cp->CurrentIndex = KWSYSPE_PIPE_COUNT; - } - - /* Wait for all pipe threads to reset. */ - for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { - KWSYSPE_DEBUG((stderr, "waiting reader reset %d\n", i)); - WaitForSingleObject(cp->Pipe[i].Reader.Reset, INFINITE); - KWSYSPE_DEBUG((stderr, "waiting waker reset %d\n", i)); - WaitForSingleObject(cp->Pipe[i].Waker.Reset, INFINITE); - } - - /* ---- It is now safe again to call kwsysProcessCleanup. ----- */ - /* Close all the pipes. */ - kwsysProcessCleanup(cp, 0); - - /* Determine the outcome. */ - if (cp->Killed) { - /* We killed the child. */ - cp->State = kwsysProcess_State_Killed; - } else if (cp->TimeoutExpired) { - /* The timeout expired. */ - cp->State = kwsysProcess_State_Expired; - } else { - /* The children exited. Report the outcome of the child processes. */ - for (i = 0; i < cp->NumberOfCommands; ++i) { - cp->ProcessResults[i].ExitCode = cp->CommandExitCodes[i]; - if ((cp->ProcessResults[i].ExitCode & 0xF0000000) == 0xC0000000) { - /* Child terminated due to exceptional behavior. */ - cp->ProcessResults[i].State = kwsysProcess_StateByIndex_Exception; - cp->ProcessResults[i].ExitValue = 1; - kwsysProcessSetExitExceptionByIndex(cp, cp->ProcessResults[i].ExitCode, - i); - } else { - /* Child exited without exception. */ - cp->ProcessResults[i].State = kwsysProcess_StateByIndex_Exited; - cp->ProcessResults[i].ExitException = kwsysProcess_Exception_None; - cp->ProcessResults[i].ExitValue = cp->ProcessResults[i].ExitCode; - } - } - /* support legacy state status value */ - cp->State = cp->ProcessResults[cp->NumberOfCommands - 1].State; - } - - return 1; -} - -void kwsysProcess_Interrupt(kwsysProcess* cp) -{ - int i; - /* Make sure we are executing a process. */ - if (!cp || cp->State != kwsysProcess_State_Executing || cp->TimeoutExpired || - cp->Killed) { - KWSYSPE_DEBUG((stderr, "interrupt: child not executing\n")); - return; - } - - /* Skip actually interrupting the child if it has already terminated. */ - if (cp->Terminated) { - KWSYSPE_DEBUG((stderr, "interrupt: child already terminated\n")); - return; - } - - /* Interrupt the children. */ - if (cp->CreateProcessGroup) { - if (cp->ProcessInformation) { - for (i = 0; i < cp->NumberOfCommands; ++i) { - /* Make sure the process handle isn't closed (e.g. from disowning). */ - if (cp->ProcessInformation[i].hProcess) { - /* The user created a process group for this process. The group ID - is the process ID for the original process in the group. Note - that we have to use Ctrl+Break: Ctrl+C is not allowed for process - groups. */ - GenerateConsoleCtrlEvent(CTRL_BREAK_EVENT, - cp->ProcessInformation[i].dwProcessId); - } - } - } - } else { - /* No process group was created. Kill our own process group... */ - GenerateConsoleCtrlEvent(CTRL_BREAK_EVENT, 0); - } -} - -void kwsysProcess_Kill(kwsysProcess* cp) -{ - int i; - /* Make sure we are executing a process. */ - if (!cp || cp->State != kwsysProcess_State_Executing || cp->TimeoutExpired || - cp->Killed) { - KWSYSPE_DEBUG((stderr, "kill: child not executing\n")); - return; - } - - /* Disable the reading threads. */ - KWSYSPE_DEBUG((stderr, "kill: disabling pipe threads\n")); - kwsysProcessDisablePipeThreads(cp); - - /* Skip actually killing the child if it has already terminated. */ - if (cp->Terminated) { - KWSYSPE_DEBUG((stderr, "kill: child already terminated\n")); - return; - } - - /* Kill the children. */ - cp->Killed = 1; - for (i = 0; i < cp->NumberOfCommands; ++i) { - kwsysProcessKillTree(cp->ProcessInformation[i].dwProcessId); - /* Remove from global list of processes and close handles. */ - kwsysProcessesRemove(cp->ProcessInformation[i].hProcess); - kwsysProcessCleanupHandle(&cp->ProcessInformation[i].hThread); - kwsysProcessCleanupHandle(&cp->ProcessInformation[i].hProcess); - } - - /* We are killing the children and ignoring all data. Do not wait - for them to exit. */ -} - -void kwsysProcess_KillPID(unsigned long process_id) -{ - kwsysProcessKillTree((DWORD)process_id); -} - -/* - Function executed for each pipe's thread. Argument is a pointer to - the kwsysProcessPipeData instance for this thread. -*/ -DWORD WINAPI kwsysProcessPipeThreadRead(LPVOID ptd) -{ - kwsysProcessPipeData* td = (kwsysProcessPipeData*)ptd; - kwsysProcess* cp = td->Process; - - /* Wait for a process to be ready. */ - while ((WaitForSingleObject(td->Reader.Ready, INFINITE), !cp->Deleting)) { - /* Read output from the process for this thread's pipe. */ - kwsysProcessPipeThreadReadPipe(cp, td); - - /* Signal the main thread we have reset for a new process. */ - ReleaseSemaphore(td->Reader.Reset, 1, 0); - } - return 0; -} - -/* - Function called in each pipe's thread to handle data for one - execution of a subprocess. -*/ -void kwsysProcessPipeThreadReadPipe(kwsysProcess* cp, kwsysProcessPipeData* td) -{ - /* Wait for space in the thread's buffer. */ - while ((KWSYSPE_DEBUG((stderr, "wait for read %d\n", td->Index)), - WaitForSingleObject(td->Reader.Go, INFINITE), !td->Closed)) { - KWSYSPE_DEBUG((stderr, "reading %d\n", td->Index)); - - /* Read data from the pipe. This may block until data are available. */ - if (!ReadFile(td->Read, td->DataBuffer, KWSYSPE_PIPE_BUFFER_SIZE, - &td->DataLength, 0)) { - if (GetLastError() != ERROR_BROKEN_PIPE) { - /* UNEXPECTED failure to read the pipe. */ - } - - /* The pipe closed. There are no more data to read. */ - td->Closed = 1; - KWSYSPE_DEBUG((stderr, "read closed %d\n", td->Index)); - } - - KWSYSPE_DEBUG((stderr, "read %d\n", td->Index)); - - /* Wait for our turn to be handled by the main thread. */ - WaitForSingleObject(cp->SharedIndexMutex, INFINITE); - - KWSYSPE_DEBUG((stderr, "reporting read %d\n", td->Index)); - - /* Tell the main thread we have something to report. */ - cp->SharedIndex = td->Index; - ReleaseSemaphore(cp->Full, 1, 0); - } - - /* We were signalled to exit with our buffer empty. Reset the - mutex for a new process. */ - KWSYSPE_DEBUG((stderr, "self releasing reader %d\n", td->Index)); - ReleaseSemaphore(td->Reader.Go, 1, 0); -} - -/* - Function executed for each pipe's thread. Argument is a pointer to - the kwsysProcessPipeData instance for this thread. -*/ -DWORD WINAPI kwsysProcessPipeThreadWake(LPVOID ptd) -{ - kwsysProcessPipeData* td = (kwsysProcessPipeData*)ptd; - kwsysProcess* cp = td->Process; - - /* Wait for a process to be ready. */ - while ((WaitForSingleObject(td->Waker.Ready, INFINITE), !cp->Deleting)) { - /* Wait for a possible wakeup. */ - kwsysProcessPipeThreadWakePipe(cp, td); - - /* Signal the main thread we have reset for a new process. */ - ReleaseSemaphore(td->Waker.Reset, 1, 0); - } - return 0; -} - -/* - Function called in each pipe's thread to handle reading thread - wakeup for one execution of a subprocess. -*/ -void kwsysProcessPipeThreadWakePipe(kwsysProcess* cp, kwsysProcessPipeData* td) -{ - (void)cp; - - /* Wait for a possible wake command. */ - KWSYSPE_DEBUG((stderr, "wait for wake %d\n", td->Index)); - WaitForSingleObject(td->Waker.Go, INFINITE); - KWSYSPE_DEBUG((stderr, "waking %d\n", td->Index)); - - /* If the pipe is not closed, we need to wake up the reading thread. */ - if (!td->Closed) { - DWORD dummy; - KWSYSPE_DEBUG((stderr, "waker %d writing byte\n", td->Index)); - WriteFile(td->Write, "", 1, &dummy, 0); - KWSYSPE_DEBUG((stderr, "waker %d wrote byte\n", td->Index)); - } -} - -/* Initialize a process control structure for kwsysProcess_Execute. */ -int kwsysProcessInitialize(kwsysProcess* cp) -{ - int i; - /* Reset internal status flags. */ - cp->TimeoutExpired = 0; - cp->Terminated = 0; - cp->Killed = 0; - - free(cp->ProcessResults); - /* Allocate process result information for each process. */ - cp->ProcessResults = (kwsysProcessResults*)malloc( - sizeof(kwsysProcessResults) * (cp->NumberOfCommands)); - if (!cp->ProcessResults) { - return 0; - } - ZeroMemory(cp->ProcessResults, - sizeof(kwsysProcessResults) * cp->NumberOfCommands); - for (i = 0; i < cp->NumberOfCommands; i++) { - cp->ProcessResults[i].ExitException = kwsysProcess_Exception_None; - cp->ProcessResults[i].State = kwsysProcess_StateByIndex_Starting; - cp->ProcessResults[i].ExitCode = 1; - cp->ProcessResults[i].ExitValue = 1; - strcpy(cp->ProcessResults[i].ExitExceptionString, "No exception"); - } - - /* Allocate process information for each process. */ - free(cp->ProcessInformation); - cp->ProcessInformation = (PROCESS_INFORMATION*)malloc( - sizeof(PROCESS_INFORMATION) * cp->NumberOfCommands); - if (!cp->ProcessInformation) { - return 0; - } - ZeroMemory(cp->ProcessInformation, - sizeof(PROCESS_INFORMATION) * cp->NumberOfCommands); - free(cp->CommandExitCodes); - cp->CommandExitCodes = (DWORD*)malloc(sizeof(DWORD) * cp->NumberOfCommands); - if (!cp->CommandExitCodes) { - return 0; - } - ZeroMemory(cp->CommandExitCodes, sizeof(DWORD) * cp->NumberOfCommands); - - /* Allocate event wait array. The first event is cp->Full, the rest - are the process termination events. */ - cp->ProcessEvents = - (PHANDLE)malloc(sizeof(HANDLE) * (cp->NumberOfCommands + 1)); - if (!cp->ProcessEvents) { - return 0; - } - ZeroMemory(cp->ProcessEvents, sizeof(HANDLE) * (cp->NumberOfCommands + 1)); - cp->ProcessEvents[0] = cp->Full; - cp->ProcessEventsLength = cp->NumberOfCommands + 1; - - /* Allocate space to save the real working directory of this process. */ - if (cp->WorkingDirectory) { - cp->RealWorkingDirectoryLength = GetCurrentDirectoryW(0, 0); - if (cp->RealWorkingDirectoryLength > 0) { - cp->RealWorkingDirectory = - malloc(cp->RealWorkingDirectoryLength * sizeof(wchar_t)); - if (!cp->RealWorkingDirectory) { - return 0; - } - } - } - { - for (i = 0; i < 3; ++i) { - cp->PipeChildStd[i] = INVALID_HANDLE_VALUE; - } - } - - return 1; -} - -static DWORD kwsysProcessCreateChildHandle(PHANDLE out, HANDLE in, int isStdIn) -{ - DWORD flags; - - /* Check whether the handle is valid for this process. */ - if (in != INVALID_HANDLE_VALUE && GetHandleInformation(in, &flags)) { - /* Use the handle as-is if it is already inherited. */ - if (flags & HANDLE_FLAG_INHERIT) { - *out = in; - return ERROR_SUCCESS; - } - - /* Create an inherited copy of this handle. */ - if (DuplicateHandle(GetCurrentProcess(), in, GetCurrentProcess(), out, 0, - TRUE, DUPLICATE_SAME_ACCESS)) { - return ERROR_SUCCESS; - } else { - return GetLastError(); - } - } else { - /* The given handle is not valid for this process. Some child - processes may break if they do not have a valid standard handle, - so open NUL to give to the child. */ - SECURITY_ATTRIBUTES sa; - ZeroMemory(&sa, sizeof(sa)); - sa.nLength = (DWORD)sizeof(sa); - sa.bInheritHandle = 1; - *out = CreateFileW( - L"NUL", - (isStdIn ? GENERIC_READ : (GENERIC_WRITE | FILE_READ_ATTRIBUTES)), - FILE_SHARE_READ | FILE_SHARE_WRITE, &sa, OPEN_EXISTING, 0, 0); - return (*out != INVALID_HANDLE_VALUE) ? ERROR_SUCCESS : GetLastError(); - } -} - -DWORD kwsysProcessCreate(kwsysProcess* cp, int index, - kwsysProcessCreateInformation* si) -{ - DWORD creationFlags; - DWORD error = ERROR_SUCCESS; - - /* Check if we are currently exiting. */ - if (!kwsysTryEnterCreateProcessSection()) { - /* The Ctrl handler is currently working on exiting our process. Rather - than return an error code, which could cause incorrect conclusions to be - reached by the caller, we simply hang. (For example, a CMake try_run - configure step might cause the project to configure wrong.) */ - Sleep(INFINITE); - } - - /* Create the child in a suspended state so we can wait until all - children have been created before running any one. */ - creationFlags = CREATE_SUSPENDED; - if (cp->CreateProcessGroup) { - creationFlags |= CREATE_NEW_PROCESS_GROUP; - } - - /* Create inherited copies of the handles. */ - (error = kwsysProcessCreateChildHandle(&si->StartupInfo.hStdInput, - si->hStdInput, 1)) || - (error = kwsysProcessCreateChildHandle(&si->StartupInfo.hStdOutput, - si->hStdOutput, 0)) || - (error = kwsysProcessCreateChildHandle(&si->StartupInfo.hStdError, - si->hStdError, 0)) || - /* Create the process. */ - (!CreateProcessW(0, cp->Commands[index], 0, 0, TRUE, creationFlags, 0, 0, - &si->StartupInfo, &cp->ProcessInformation[index]) && - (error = GetLastError())); - - /* Close the inherited copies of the handles. */ - if (si->StartupInfo.hStdInput != si->hStdInput) { - kwsysProcessCleanupHandle(&si->StartupInfo.hStdInput); - } - if (si->StartupInfo.hStdOutput != si->hStdOutput) { - kwsysProcessCleanupHandle(&si->StartupInfo.hStdOutput); - } - if (si->StartupInfo.hStdError != si->hStdError) { - kwsysProcessCleanupHandle(&si->StartupInfo.hStdError); - } - - /* Add the process to the global list of processes. */ - if (!error && - !kwsysProcessesAdd(cp->ProcessInformation[index].hProcess, - cp->ProcessInformation[index].dwProcessId, - cp->CreateProcessGroup)) { - /* This failed for some reason. Kill the suspended process. */ - TerminateProcess(cp->ProcessInformation[index].hProcess, 1); - /* And clean up... */ - kwsysProcessCleanupHandle(&cp->ProcessInformation[index].hProcess); - kwsysProcessCleanupHandle(&cp->ProcessInformation[index].hThread); - strcpy(cp->ErrorMessage, "kwsysProcessesAdd function failed"); - error = ERROR_NOT_ENOUGH_MEMORY; /* Most likely reason. */ - } - - /* If the console Ctrl handler is waiting for us, this will release it... */ - kwsysLeaveCreateProcessSection(); - return error; -} - -void kwsysProcessDestroy(kwsysProcess* cp, int event) -{ - int i; - int index; - - /* Find the process index for the termination event. */ - for (index = 0; index < cp->NumberOfCommands; ++index) { - if (cp->ProcessInformation[index].hProcess == cp->ProcessEvents[event]) { - break; - } - } - - /* Check the exit code of the process. */ - GetExitCodeProcess(cp->ProcessInformation[index].hProcess, - &cp->CommandExitCodes[index]); - - /* Remove from global list of processes. */ - kwsysProcessesRemove(cp->ProcessInformation[index].hProcess); - - /* Close the process handle for the terminated process. */ - kwsysProcessCleanupHandle(&cp->ProcessInformation[index].hProcess); - - /* Remove the process from the available events. */ - cp->ProcessEventsLength -= 1; - for (i = event; i < cp->ProcessEventsLength; ++i) { - cp->ProcessEvents[i] = cp->ProcessEvents[i + 1]; - } - - /* Check if all processes have terminated. */ - if (cp->ProcessEventsLength == 1) { - cp->Terminated = 1; - - /* Close our copies of the pipe write handles so the pipe threads - can detect end-of-data. */ - for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { - /* TODO: If the child created its own child (our grandchild) - which inherited a copy of the pipe write-end then the pipe - may not close and we will still need the waker write pipe. - However we still want to be able to detect end-of-data in the - normal case. The reader thread will have to switch to using - PeekNamedPipe to read the last bit of data from the pipe - without blocking. This is equivalent to using a non-blocking - read on posix. */ - KWSYSPE_DEBUG((stderr, "closing wakeup write %d\n", i)); - kwsysProcessCleanupHandle(&cp->Pipe[i].Write); - } - } -} - -DWORD kwsysProcessSetupOutputPipeFile(PHANDLE phandle, const char* name) -{ - HANDLE fout; - wchar_t* wname; - DWORD error; - if (!name) { - return ERROR_INVALID_PARAMETER; - } - - /* Close the existing handle. */ - kwsysProcessCleanupHandle(phandle); - - /* Create a handle to write a file for the pipe. */ - wname = kwsysEncoding_DupToWide(name); - fout = - CreateFileW(wname, GENERIC_WRITE, FILE_SHARE_READ, 0, CREATE_ALWAYS, 0, 0); - error = GetLastError(); - free(wname); - if (fout == INVALID_HANDLE_VALUE) { - return error; - } - - /* Assign the replacement handle. */ - *phandle = fout; - return ERROR_SUCCESS; -} - -void kwsysProcessSetupSharedPipe(DWORD nStdHandle, PHANDLE handle) -{ - /* Close the existing handle. */ - kwsysProcessCleanupHandle(handle); - /* Store the new standard handle. */ - *handle = GetStdHandle(nStdHandle); -} - -void kwsysProcessSetupPipeNative(HANDLE native, PHANDLE handle) -{ - /* Close the existing handle. */ - kwsysProcessCleanupHandle(handle); - /* Store the new given handle. */ - *handle = native; -} - -/* Close the given handle if it is open. Reset its value to 0. */ -void kwsysProcessCleanupHandle(PHANDLE h) -{ - if (h && *h && *h != INVALID_HANDLE_VALUE && - *h != GetStdHandle(STD_INPUT_HANDLE) && - *h != GetStdHandle(STD_OUTPUT_HANDLE) && - *h != GetStdHandle(STD_ERROR_HANDLE)) { - CloseHandle(*h); - *h = INVALID_HANDLE_VALUE; - } -} - -/* Close all handles created by kwsysProcess_Execute. */ -void kwsysProcessCleanup(kwsysProcess* cp, DWORD error) -{ - int i; - /* If this is an error case, report the error. */ - if (error) { - /* Construct an error message if one has not been provided already. */ - if (cp->ErrorMessage[0] == 0) { - /* Format the error message. */ - wchar_t err_msg[KWSYSPE_PIPE_BUFFER_SIZE]; - DWORD length = FormatMessageW( - FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, 0, error, - MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), err_msg, - KWSYSPE_PIPE_BUFFER_SIZE, 0); - if (length < 1) { - /* FormatMessage failed. Use a default message. */ - _snprintf(cp->ErrorMessage, KWSYSPE_PIPE_BUFFER_SIZE, - "Process execution failed with error 0x%X. " - "FormatMessage failed with error 0x%X", - error, GetLastError()); - } - if (!WideCharToMultiByte(CP_UTF8, 0, err_msg, -1, cp->ErrorMessage, - KWSYSPE_PIPE_BUFFER_SIZE, NULL, NULL)) { - /* WideCharToMultiByte failed. Use a default message. */ - _snprintf(cp->ErrorMessage, KWSYSPE_PIPE_BUFFER_SIZE, - "Process execution failed with error 0x%X. " - "WideCharToMultiByte failed with error 0x%X", - error, GetLastError()); - } - } - - /* Remove trailing period and newline, if any. */ - kwsysProcessCleanErrorMessage(cp); - - /* Set the error state. */ - cp->State = kwsysProcess_State_Error; - - /* Cleanup any processes already started in a suspended state. */ - if (cp->ProcessInformation) { - for (i = 0; i < cp->NumberOfCommands; ++i) { - if (cp->ProcessInformation[i].hProcess) { - TerminateProcess(cp->ProcessInformation[i].hProcess, 255); - WaitForSingleObject(cp->ProcessInformation[i].hProcess, INFINITE); - } - } - for (i = 0; i < cp->NumberOfCommands; ++i) { - /* Remove from global list of processes and close handles. */ - kwsysProcessesRemove(cp->ProcessInformation[i].hProcess); - kwsysProcessCleanupHandle(&cp->ProcessInformation[i].hThread); - kwsysProcessCleanupHandle(&cp->ProcessInformation[i].hProcess); - } - } - - /* Restore the working directory. */ - if (cp->RealWorkingDirectory) { - SetCurrentDirectoryW(cp->RealWorkingDirectory); - } - } - - /* Free memory. */ - if (cp->ProcessInformation) { - free(cp->ProcessInformation); - cp->ProcessInformation = 0; - } - if (cp->ProcessEvents) { - free(cp->ProcessEvents); - cp->ProcessEvents = 0; - } - if (cp->RealWorkingDirectory) { - free(cp->RealWorkingDirectory); - cp->RealWorkingDirectory = 0; - } - - /* Close each pipe. */ - for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { - kwsysProcessCleanupHandle(&cp->Pipe[i].Write); - kwsysProcessCleanupHandle(&cp->Pipe[i].Read); - cp->Pipe[i].Closed = 0; - } - for (i = 0; i < 3; ++i) { - kwsysProcessCleanupHandle(&cp->PipeChildStd[i]); - } -} - -void kwsysProcessCleanErrorMessage(kwsysProcess* cp) -{ - /* Remove trailing period and newline, if any. */ - size_t length = strlen(cp->ErrorMessage); - if (cp->ErrorMessage[length - 1] == '\n') { - cp->ErrorMessage[length - 1] = 0; - --length; - if (length > 0 && cp->ErrorMessage[length - 1] == '\r') { - cp->ErrorMessage[length - 1] = 0; - --length; - } - } - if (length > 0 && cp->ErrorMessage[length - 1] == '.') { - cp->ErrorMessage[length - 1] = 0; - } -} - -/* Get the time at which either the process or user timeout will - expire. Returns 1 if the user timeout is first, and 0 otherwise. */ -int kwsysProcessGetTimeoutTime(kwsysProcess* cp, double* userTimeout, - kwsysProcessTime* timeoutTime) -{ - /* The first time this is called, we need to calculate the time at - which the child will timeout. */ - if (cp->Timeout && cp->TimeoutTime.QuadPart < 0) { - kwsysProcessTime length = kwsysProcessTimeFromDouble(cp->Timeout); - cp->TimeoutTime = kwsysProcessTimeAdd(cp->StartTime, length); - } - - /* Start with process timeout. */ - *timeoutTime = cp->TimeoutTime; - - /* Check if the user timeout is earlier. */ - if (userTimeout) { - kwsysProcessTime currentTime = kwsysProcessTimeGetCurrent(); - kwsysProcessTime userTimeoutLength = - kwsysProcessTimeFromDouble(*userTimeout); - kwsysProcessTime userTimeoutTime = - kwsysProcessTimeAdd(currentTime, userTimeoutLength); - if (timeoutTime->QuadPart < 0 || - kwsysProcessTimeLess(userTimeoutTime, *timeoutTime)) { - *timeoutTime = userTimeoutTime; - return 1; - } - } - return 0; -} - -/* Get the length of time before the given timeout time arrives. - Returns 1 if the time has already arrived, and 0 otherwise. */ -int kwsysProcessGetTimeoutLeft(kwsysProcessTime* timeoutTime, - double* userTimeout, - kwsysProcessTime* timeoutLength) -{ - if (timeoutTime->QuadPart < 0) { - /* No timeout time has been requested. */ - return 0; - } else { - /* Calculate the remaining time. */ - kwsysProcessTime currentTime = kwsysProcessTimeGetCurrent(); - *timeoutLength = kwsysProcessTimeSubtract(*timeoutTime, currentTime); - - if (timeoutLength->QuadPart < 0 && userTimeout && *userTimeout <= 0) { - /* Caller has explicitly requested a zero timeout. */ - timeoutLength->QuadPart = 0; - } - - if (timeoutLength->QuadPart < 0) { - /* Timeout has already expired. */ - return 1; - } else { - /* There is some time left. */ - return 0; - } - } -} - -kwsysProcessTime kwsysProcessTimeGetCurrent() -{ - kwsysProcessTime current; - FILETIME ft; - GetSystemTimeAsFileTime(&ft); - current.LowPart = ft.dwLowDateTime; - current.HighPart = ft.dwHighDateTime; - return current; -} - -DWORD kwsysProcessTimeToDWORD(kwsysProcessTime t) -{ - return (DWORD)(t.QuadPart * 0.0001); -} - -double kwsysProcessTimeToDouble(kwsysProcessTime t) -{ - return t.QuadPart * 0.0000001; -} - -kwsysProcessTime kwsysProcessTimeFromDouble(double d) -{ - kwsysProcessTime t; - t.QuadPart = (LONGLONG)(d * 10000000); - return t; -} - -int kwsysProcessTimeLess(kwsysProcessTime in1, kwsysProcessTime in2) -{ - return in1.QuadPart < in2.QuadPart; -} - -kwsysProcessTime kwsysProcessTimeAdd(kwsysProcessTime in1, - kwsysProcessTime in2) -{ - kwsysProcessTime out; - out.QuadPart = in1.QuadPart + in2.QuadPart; - return out; -} - -kwsysProcessTime kwsysProcessTimeSubtract(kwsysProcessTime in1, - kwsysProcessTime in2) -{ - kwsysProcessTime out; - out.QuadPart = in1.QuadPart - in2.QuadPart; - return out; -} - -#define KWSYSPE_CASE(type, str) \ - cp->ProcessResults[idx].ExitException = kwsysProcess_Exception_##type; \ - strcpy(cp->ProcessResults[idx].ExitExceptionString, str) -static void kwsysProcessSetExitExceptionByIndex(kwsysProcess* cp, int code, - int idx) -{ - switch (code) { - case STATUS_CONTROL_C_EXIT: - KWSYSPE_CASE(Interrupt, "User interrupt"); - break; - - case STATUS_FLOAT_DENORMAL_OPERAND: - KWSYSPE_CASE(Numerical, "Floating-point exception (denormal operand)"); - break; - case STATUS_FLOAT_DIVIDE_BY_ZERO: - KWSYSPE_CASE(Numerical, "Divide-by-zero"); - break; - case STATUS_FLOAT_INEXACT_RESULT: - KWSYSPE_CASE(Numerical, "Floating-point exception (inexact result)"); - break; - case STATUS_FLOAT_INVALID_OPERATION: - KWSYSPE_CASE(Numerical, "Invalid floating-point operation"); - break; - case STATUS_FLOAT_OVERFLOW: - KWSYSPE_CASE(Numerical, "Floating-point overflow"); - break; - case STATUS_FLOAT_STACK_CHECK: - KWSYSPE_CASE(Numerical, "Floating-point stack check failed"); - break; - case STATUS_FLOAT_UNDERFLOW: - KWSYSPE_CASE(Numerical, "Floating-point underflow"); - break; -#ifdef STATUS_FLOAT_MULTIPLE_FAULTS - case STATUS_FLOAT_MULTIPLE_FAULTS: - KWSYSPE_CASE(Numerical, "Floating-point exception (multiple faults)"); - break; -#endif -#ifdef STATUS_FLOAT_MULTIPLE_TRAPS - case STATUS_FLOAT_MULTIPLE_TRAPS: - KWSYSPE_CASE(Numerical, "Floating-point exception (multiple traps)"); - break; -#endif - case STATUS_INTEGER_DIVIDE_BY_ZERO: - KWSYSPE_CASE(Numerical, "Integer divide-by-zero"); - break; - case STATUS_INTEGER_OVERFLOW: - KWSYSPE_CASE(Numerical, "Integer overflow"); - break; - - case STATUS_DATATYPE_MISALIGNMENT: - KWSYSPE_CASE(Fault, "Datatype misalignment"); - break; - case STATUS_ACCESS_VIOLATION: - KWSYSPE_CASE(Fault, "Access violation"); - break; - case STATUS_IN_PAGE_ERROR: - KWSYSPE_CASE(Fault, "In-page error"); - break; - case STATUS_INVALID_HANDLE: - KWSYSPE_CASE(Fault, "Invalid hanlde"); - break; - case STATUS_NONCONTINUABLE_EXCEPTION: - KWSYSPE_CASE(Fault, "Noncontinuable exception"); - break; - case STATUS_INVALID_DISPOSITION: - KWSYSPE_CASE(Fault, "Invalid disposition"); - break; - case STATUS_ARRAY_BOUNDS_EXCEEDED: - KWSYSPE_CASE(Fault, "Array bounds exceeded"); - break; - case STATUS_STACK_OVERFLOW: - KWSYSPE_CASE(Fault, "Stack overflow"); - break; - - case STATUS_ILLEGAL_INSTRUCTION: - KWSYSPE_CASE(Illegal, "Illegal instruction"); - break; - case STATUS_PRIVILEGED_INSTRUCTION: - KWSYSPE_CASE(Illegal, "Privileged instruction"); - break; - - case STATUS_NO_MEMORY: - default: - cp->ProcessResults[idx].ExitException = kwsysProcess_Exception_Other; - _snprintf(cp->ProcessResults[idx].ExitExceptionString, - KWSYSPE_PIPE_BUFFER_SIZE, "Exit code 0x%x\n", code); - break; - } -} -#undef KWSYSPE_CASE - -typedef struct kwsysProcess_List_s kwsysProcess_List; -static kwsysProcess_List* kwsysProcess_List_New(void); -static void kwsysProcess_List_Delete(kwsysProcess_List* self); -static int kwsysProcess_List_Update(kwsysProcess_List* self); -static int kwsysProcess_List_NextProcess(kwsysProcess_List* self); -static int kwsysProcess_List_GetCurrentProcessId(kwsysProcess_List* self); -static int kwsysProcess_List_GetCurrentParentId(kwsysProcess_List* self); - -/* Windows NT 4 API definitions. */ -#define STATUS_INFO_LENGTH_MISMATCH ((NTSTATUS)0xC0000004L) -typedef LONG NTSTATUS; -typedef LONG KPRIORITY; -typedef struct _UNICODE_STRING UNICODE_STRING; -struct _UNICODE_STRING -{ - USHORT Length; - USHORT MaximumLength; - PWSTR Buffer; -}; - -/* The process information structure. Declare only enough to get - process identifiers. The rest may be ignored because we use the - NextEntryDelta to move through an array of instances. */ -typedef struct _SYSTEM_PROCESS_INFORMATION SYSTEM_PROCESS_INFORMATION; -typedef SYSTEM_PROCESS_INFORMATION* PSYSTEM_PROCESS_INFORMATION; -struct _SYSTEM_PROCESS_INFORMATION -{ - ULONG NextEntryDelta; - ULONG ThreadCount; - ULONG Reserved1[6]; - LARGE_INTEGER CreateTime; - LARGE_INTEGER UserTime; - LARGE_INTEGER KernelTime; - UNICODE_STRING ProcessName; - KPRIORITY BasePriority; - ULONG ProcessId; - ULONG InheritedFromProcessId; -}; - -/* Toolhelp32 API definitions. */ -#define TH32CS_SNAPPROCESS 0x00000002 -#if defined(_WIN64) -typedef unsigned __int64 ProcessULONG_PTR; -#else -typedef unsigned long ProcessULONG_PTR; -#endif -typedef struct tagPROCESSENTRY32 PROCESSENTRY32; -typedef PROCESSENTRY32* LPPROCESSENTRY32; -struct tagPROCESSENTRY32 -{ - DWORD dwSize; - DWORD cntUsage; - DWORD th32ProcessID; - ProcessULONG_PTR th32DefaultHeapID; - DWORD th32ModuleID; - DWORD cntThreads; - DWORD th32ParentProcessID; - LONG pcPriClassBase; - DWORD dwFlags; - char szExeFile[MAX_PATH]; -}; - -/* Windows API function types. */ -typedef HANDLE(WINAPI* CreateToolhelp32SnapshotType)(DWORD, DWORD); -typedef BOOL(WINAPI* Process32FirstType)(HANDLE, LPPROCESSENTRY32); -typedef BOOL(WINAPI* Process32NextType)(HANDLE, LPPROCESSENTRY32); -typedef NTSTATUS(WINAPI* ZwQuerySystemInformationType)(ULONG, PVOID, ULONG, - PULONG); - -static int kwsysProcess_List__New_NT4(kwsysProcess_List* self); -static int kwsysProcess_List__New_Snapshot(kwsysProcess_List* self); -static void kwsysProcess_List__Delete_NT4(kwsysProcess_List* self); -static void kwsysProcess_List__Delete_Snapshot(kwsysProcess_List* self); -static int kwsysProcess_List__Update_NT4(kwsysProcess_List* self); -static int kwsysProcess_List__Update_Snapshot(kwsysProcess_List* self); -static int kwsysProcess_List__Next_NT4(kwsysProcess_List* self); -static int kwsysProcess_List__Next_Snapshot(kwsysProcess_List* self); -static int kwsysProcess_List__GetProcessId_NT4(kwsysProcess_List* self); -static int kwsysProcess_List__GetProcessId_Snapshot(kwsysProcess_List* self); -static int kwsysProcess_List__GetParentId_NT4(kwsysProcess_List* self); -static int kwsysProcess_List__GetParentId_Snapshot(kwsysProcess_List* self); - -struct kwsysProcess_List_s -{ - /* Implementation switches at runtime based on version of Windows. */ - int NT4; - - /* Implementation functions and data for NT 4. */ - ZwQuerySystemInformationType P_ZwQuerySystemInformation; - char* Buffer; - int BufferSize; - PSYSTEM_PROCESS_INFORMATION CurrentInfo; - - /* Implementation functions and data for other Windows versions. */ - CreateToolhelp32SnapshotType P_CreateToolhelp32Snapshot; - Process32FirstType P_Process32First; - Process32NextType P_Process32Next; - HANDLE Snapshot; - PROCESSENTRY32 CurrentEntry; -}; - -static kwsysProcess_List* kwsysProcess_List_New(void) -{ - OSVERSIONINFO osv; - kwsysProcess_List* self; - - /* Allocate and initialize the list object. */ - if (!(self = (kwsysProcess_List*)malloc(sizeof(kwsysProcess_List)))) { - return 0; - } - memset(self, 0, sizeof(*self)); - - /* Select an implementation. */ - ZeroMemory(&osv, sizeof(osv)); - osv.dwOSVersionInfoSize = sizeof(osv); -#ifdef KWSYS_WINDOWS_DEPRECATED_GetVersionEx -# pragma warning(push) -# ifdef __INTEL_COMPILER -# pragma warning(disable : 1478) -# elif defined __clang__ -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wdeprecated-declarations" -# else -# pragma warning(disable : 4996) -# endif -#endif - GetVersionEx(&osv); -#ifdef KWSYS_WINDOWS_DEPRECATED_GetVersionEx -# ifdef __clang__ -# pragma clang diagnostic pop -# else -# pragma warning(pop) -# endif -#endif - self->NT4 = - (osv.dwPlatformId == VER_PLATFORM_WIN32_NT && osv.dwMajorVersion < 5) ? 1 - : 0; - - /* Initialize the selected implementation. */ - if (!(self->NT4 ? kwsysProcess_List__New_NT4(self) - : kwsysProcess_List__New_Snapshot(self))) { - kwsysProcess_List_Delete(self); - return 0; - } - - /* Update to the current set of processes. */ - if (!kwsysProcess_List_Update(self)) { - kwsysProcess_List_Delete(self); - return 0; - } - return self; -} - -static void kwsysProcess_List_Delete(kwsysProcess_List* self) -{ - if (self) { - if (self->NT4) { - kwsysProcess_List__Delete_NT4(self); - } else { - kwsysProcess_List__Delete_Snapshot(self); - } - free(self); - } -} - -static int kwsysProcess_List_Update(kwsysProcess_List* self) -{ - return self ? (self->NT4 ? kwsysProcess_List__Update_NT4(self) - : kwsysProcess_List__Update_Snapshot(self)) - : 0; -} - -static int kwsysProcess_List_GetCurrentProcessId(kwsysProcess_List* self) -{ - return self ? (self->NT4 ? kwsysProcess_List__GetProcessId_NT4(self) - : kwsysProcess_List__GetProcessId_Snapshot(self)) - : -1; -} - -static int kwsysProcess_List_GetCurrentParentId(kwsysProcess_List* self) -{ - return self ? (self->NT4 ? kwsysProcess_List__GetParentId_NT4(self) - : kwsysProcess_List__GetParentId_Snapshot(self)) - : -1; -} - -static int kwsysProcess_List_NextProcess(kwsysProcess_List* self) -{ - return (self ? (self->NT4 ? kwsysProcess_List__Next_NT4(self) - : kwsysProcess_List__Next_Snapshot(self)) - : 0); -} - -static int kwsysProcess_List__New_NT4(kwsysProcess_List* self) -{ - /* Get a handle to the NT runtime module that should already be - loaded in this program. This does not actually increment the - reference count to the module so we do not need to close the - handle. */ - HMODULE hNT = GetModuleHandleW(L"ntdll.dll"); - if (hNT) { - /* Get pointers to the needed API functions. */ - self->P_ZwQuerySystemInformation = - ((ZwQuerySystemInformationType)GetProcAddress( - hNT, "ZwQuerySystemInformation")); - } - if (!self->P_ZwQuerySystemInformation) { - return 0; - } - - /* Allocate an initial process information buffer. */ - self->BufferSize = 32768; - self->Buffer = (char*)malloc(self->BufferSize); - return self->Buffer ? 1 : 0; -} - -static void kwsysProcess_List__Delete_NT4(kwsysProcess_List* self) -{ - /* Free the process information buffer. */ - free(self->Buffer); -} - -static int kwsysProcess_List__Update_NT4(kwsysProcess_List* self) -{ - self->CurrentInfo = 0; - for (;;) { - /* Query number 5 is for system process list. */ - NTSTATUS status = - self->P_ZwQuerySystemInformation(5, self->Buffer, self->BufferSize, 0); - if (status == STATUS_INFO_LENGTH_MISMATCH) { - /* The query requires a bigger buffer. */ - int newBufferSize = self->BufferSize * 2; - char* newBuffer = (char*)malloc(newBufferSize); - if (newBuffer) { - free(self->Buffer); - self->Buffer = newBuffer; - self->BufferSize = newBufferSize; - } else { - return 0; - } - } else if (status >= 0) { - /* The query succeeded. Initialize traversal of the process list. */ - self->CurrentInfo = (PSYSTEM_PROCESS_INFORMATION)self->Buffer; - return 1; - } else { - /* The query failed. */ - return 0; - } - } -} - -static int kwsysProcess_List__Next_NT4(kwsysProcess_List* self) -{ - if (self->CurrentInfo) { - if (self->CurrentInfo->NextEntryDelta > 0) { - self->CurrentInfo = - ((PSYSTEM_PROCESS_INFORMATION)((char*)self->CurrentInfo + - self->CurrentInfo->NextEntryDelta)); - return 1; - } - self->CurrentInfo = 0; - } - return 0; -} - -static int kwsysProcess_List__GetProcessId_NT4(kwsysProcess_List* self) -{ - return self->CurrentInfo ? self->CurrentInfo->ProcessId : -1; -} - -static int kwsysProcess_List__GetParentId_NT4(kwsysProcess_List* self) -{ - return self->CurrentInfo ? self->CurrentInfo->InheritedFromProcessId : -1; -} - -static int kwsysProcess_List__New_Snapshot(kwsysProcess_List* self) -{ - /* Get a handle to the Windows runtime module that should already be - loaded in this program. This does not actually increment the - reference count to the module so we do not need to close the - handle. */ - HMODULE hKernel = GetModuleHandleW(L"kernel32.dll"); - if (hKernel) { - self->P_CreateToolhelp32Snapshot = - ((CreateToolhelp32SnapshotType)GetProcAddress( - hKernel, "CreateToolhelp32Snapshot")); - self->P_Process32First = - ((Process32FirstType)GetProcAddress(hKernel, "Process32First")); - self->P_Process32Next = - ((Process32NextType)GetProcAddress(hKernel, "Process32Next")); - } - return (self->P_CreateToolhelp32Snapshot && self->P_Process32First && - self->P_Process32Next) - ? 1 - : 0; -} - -static void kwsysProcess_List__Delete_Snapshot(kwsysProcess_List* self) -{ - if (self->Snapshot) { - CloseHandle(self->Snapshot); - } -} - -static int kwsysProcess_List__Update_Snapshot(kwsysProcess_List* self) -{ - if (self->Snapshot) { - CloseHandle(self->Snapshot); - } - if (!(self->Snapshot = - self->P_CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0))) { - return 0; - } - ZeroMemory(&self->CurrentEntry, sizeof(self->CurrentEntry)); - self->CurrentEntry.dwSize = sizeof(self->CurrentEntry); - if (!self->P_Process32First(self->Snapshot, &self->CurrentEntry)) { - CloseHandle(self->Snapshot); - self->Snapshot = 0; - return 0; - } - return 1; -} - -static int kwsysProcess_List__Next_Snapshot(kwsysProcess_List* self) -{ - if (self->Snapshot) { - if (self->P_Process32Next(self->Snapshot, &self->CurrentEntry)) { - return 1; - } - CloseHandle(self->Snapshot); - self->Snapshot = 0; - } - return 0; -} - -static int kwsysProcess_List__GetProcessId_Snapshot(kwsysProcess_List* self) -{ - return self->Snapshot ? self->CurrentEntry.th32ProcessID : -1; -} - -static int kwsysProcess_List__GetParentId_Snapshot(kwsysProcess_List* self) -{ - return self->Snapshot ? self->CurrentEntry.th32ParentProcessID : -1; -} - -static void kwsysProcessKill(DWORD pid) -{ - HANDLE h = OpenProcess(PROCESS_TERMINATE, 0, pid); - if (h) { - TerminateProcess(h, 255); - WaitForSingleObject(h, INFINITE); - CloseHandle(h); - } -} - -static void kwsysProcessKillTree(int pid) -{ - kwsysProcess_List* plist = kwsysProcess_List_New(); - kwsysProcessKill(pid); - if (plist) { - do { - if (kwsysProcess_List_GetCurrentParentId(plist) == pid) { - int ppid = kwsysProcess_List_GetCurrentProcessId(plist); - kwsysProcessKillTree(ppid); - } - } while (kwsysProcess_List_NextProcess(plist)); - kwsysProcess_List_Delete(plist); - } -} - -static void kwsysProcessDisablePipeThreads(kwsysProcess* cp) -{ - int i; - - /* If data were just reported data, release the pipe's thread. */ - if (cp->CurrentIndex < KWSYSPE_PIPE_COUNT) { - KWSYSPE_DEBUG((stderr, "releasing reader %d\n", cp->CurrentIndex)); - ReleaseSemaphore(cp->Pipe[cp->CurrentIndex].Reader.Go, 1, 0); - cp->CurrentIndex = KWSYSPE_PIPE_COUNT; - } - - /* Wakeup all reading threads that are not on closed pipes. */ - for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) { - /* The wakeup threads will write one byte to the pipe write ends. - If there are no data in the pipe then this is enough to wakeup - the reading threads. If there are already data in the pipe - this may block. We cannot use PeekNamedPipe to check whether - there are data because an outside process might still be - writing data if we are disowning it. Also, PeekNamedPipe will - block if checking a pipe on which the reading thread is - currently calling ReadPipe. Therefore we need a separate - thread to call WriteFile. If it blocks, that is okay because - it will unblock when we close the read end and break the pipe - below. */ - if (cp->Pipe[i].Read) { - KWSYSPE_DEBUG((stderr, "releasing waker %d\n", i)); - ReleaseSemaphore(cp->Pipe[i].Waker.Go, 1, 0); - } - } - - /* Tell pipe threads to reset until we run another process. */ - while (cp->PipesLeft > 0) { - /* The waking threads will cause all reading threads to report. - Wait for the next one and save its index. */ - KWSYSPE_DEBUG((stderr, "waiting for reader\n")); - WaitForSingleObject(cp->Full, INFINITE); - cp->CurrentIndex = cp->SharedIndex; - ReleaseSemaphore(cp->SharedIndexMutex, 1, 0); - KWSYSPE_DEBUG((stderr, "got reader %d\n", cp->CurrentIndex)); - - /* We are done reading this pipe. Close its read handle. */ - cp->Pipe[cp->CurrentIndex].Closed = 1; - kwsysProcessCleanupHandle(&cp->Pipe[cp->CurrentIndex].Read); - --cp->PipesLeft; - - /* Tell the reading thread we are done with the data. It will - reset immediately because the pipe is closed. */ - ReleaseSemaphore(cp->Pipe[cp->CurrentIndex].Reader.Go, 1, 0); - } -} - -/* Global set of executing processes for use by the Ctrl handler. - This global instance will be zero-initialized by the compiler. - - Note that the console Ctrl handler runs on a background thread and so - everything it does must be thread safe. Here, we track the hProcess - HANDLEs directly instead of kwsysProcess instances, so that we don't have - to make kwsysProcess thread safe. */ -typedef struct kwsysProcessInstance_s -{ - HANDLE hProcess; - DWORD dwProcessId; - int NewProcessGroup; /* Whether the process was created in a new group. */ -} kwsysProcessInstance; - -typedef struct kwsysProcessInstances_s -{ - /* Whether we have initialized key fields below, like critical sections. */ - int Initialized; - - /* Ctrl handler runs on a different thread, so we must sync access. */ - CRITICAL_SECTION Lock; - - int Exiting; - size_t Count; - size_t Size; - kwsysProcessInstance* Processes; -} kwsysProcessInstances; -static kwsysProcessInstances kwsysProcesses; - -/* Initialize critial section and set up console Ctrl handler. You MUST call - this before using any other kwsysProcesses* functions below. */ -static int kwsysProcessesInitialize(void) -{ - /* Initialize everything if not done already. */ - if (!kwsysProcesses.Initialized) { - InitializeCriticalSection(&kwsysProcesses.Lock); - - /* Set up console ctrl handler. */ - if (!SetConsoleCtrlHandler(kwsysCtrlHandler, TRUE)) { - return 0; - } - - kwsysProcesses.Initialized = 1; - } - return 1; -} - -/* The Ctrl handler waits on the global list of processes. To prevent an - orphaned process, do not create a new process if the Ctrl handler is - already running. Do so by using this function to check if it is ok to - create a process. */ -static int kwsysTryEnterCreateProcessSection(void) -{ - /* Enter main critical section; this means creating a process and the Ctrl - handler are mutually exclusive. */ - EnterCriticalSection(&kwsysProcesses.Lock); - /* Indicate to the caller if they can create a process. */ - if (kwsysProcesses.Exiting) { - LeaveCriticalSection(&kwsysProcesses.Lock); - return 0; - } else { - return 1; - } -} - -/* Matching function on successful kwsysTryEnterCreateProcessSection return. - Make sure you called kwsysProcessesAdd if applicable before calling this.*/ -static void kwsysLeaveCreateProcessSection(void) -{ - LeaveCriticalSection(&kwsysProcesses.Lock); -} - -/* Add new process to global process list. The Ctrl handler will wait for - the process to exit before it returns. Do not close the process handle - until after calling kwsysProcessesRemove. The newProcessGroup parameter - must be set if the process was created with CREATE_NEW_PROCESS_GROUP. */ -static int kwsysProcessesAdd(HANDLE hProcess, DWORD dwProcessid, - int newProcessGroup) -{ - if (!kwsysProcessesInitialize() || !hProcess || - hProcess == INVALID_HANDLE_VALUE) { - return 0; - } - - /* Enter the critical section. */ - EnterCriticalSection(&kwsysProcesses.Lock); - - /* Make sure there is enough space for the new process handle. */ - if (kwsysProcesses.Count == kwsysProcesses.Size) { - size_t newSize; - kwsysProcessInstance* newArray; - /* Start with enough space for a small number of process handles - and double the size each time more is needed. */ - newSize = kwsysProcesses.Size ? kwsysProcesses.Size * 2 : 4; - - /* Try allocating the new block of memory. */ - if ((newArray = (kwsysProcessInstance*)malloc( - newSize * sizeof(kwsysProcessInstance)))) { - /* Copy the old process handles to the new memory. */ - if (kwsysProcesses.Count > 0) { - memcpy(newArray, kwsysProcesses.Processes, - kwsysProcesses.Count * sizeof(kwsysProcessInstance)); - } - } else { - /* Failed to allocate memory for the new process handle set. */ - LeaveCriticalSection(&kwsysProcesses.Lock); - return 0; - } - - /* Free original array. */ - free(kwsysProcesses.Processes); - - /* Update original structure with new allocation. */ - kwsysProcesses.Size = newSize; - kwsysProcesses.Processes = newArray; - } - - /* Append the new process information to the set. */ - kwsysProcesses.Processes[kwsysProcesses.Count].hProcess = hProcess; - kwsysProcesses.Processes[kwsysProcesses.Count].dwProcessId = dwProcessid; - kwsysProcesses.Processes[kwsysProcesses.Count++].NewProcessGroup = - newProcessGroup; - - /* Leave critical section and return success. */ - LeaveCriticalSection(&kwsysProcesses.Lock); - - return 1; -} - -/* Removes process to global process list. */ -static void kwsysProcessesRemove(HANDLE hProcess) -{ - size_t i; - - if (!hProcess || hProcess == INVALID_HANDLE_VALUE) { - return; - } - - EnterCriticalSection(&kwsysProcesses.Lock); - - /* Find the given process in the set. */ - for (i = 0; i < kwsysProcesses.Count; ++i) { - if (kwsysProcesses.Processes[i].hProcess == hProcess) { - break; - } - } - if (i < kwsysProcesses.Count) { - /* Found it! Remove the process from the set. */ - --kwsysProcesses.Count; - for (; i < kwsysProcesses.Count; ++i) { - kwsysProcesses.Processes[i] = kwsysProcesses.Processes[i + 1]; - } - - /* If this was the last process, free the array. */ - if (kwsysProcesses.Count == 0) { - kwsysProcesses.Size = 0; - free(kwsysProcesses.Processes); - kwsysProcesses.Processes = 0; - } - } - - LeaveCriticalSection(&kwsysProcesses.Lock); -} - -static BOOL WINAPI kwsysCtrlHandler(DWORD dwCtrlType) -{ - size_t i; - (void)dwCtrlType; - /* Enter critical section. */ - EnterCriticalSection(&kwsysProcesses.Lock); - - /* Set flag indicating that we are exiting. */ - kwsysProcesses.Exiting = 1; - - /* If some of our processes were created in a new process group, we must - manually interrupt them. They won't otherwise receive a Ctrl+C/Break. */ - for (i = 0; i < kwsysProcesses.Count; ++i) { - if (kwsysProcesses.Processes[i].NewProcessGroup) { - DWORD groupId = kwsysProcesses.Processes[i].dwProcessId; - if (groupId) { - GenerateConsoleCtrlEvent(CTRL_BREAK_EVENT, groupId); - } - } - } - - /* Wait for each child process to exit. This is the key step that prevents - us from leaving several orphaned children processes running in the - background when the user presses Ctrl+C. */ - for (i = 0; i < kwsysProcesses.Count; ++i) { - WaitForSingleObject(kwsysProcesses.Processes[i].hProcess, INFINITE); - } - - /* Leave critical section. */ - LeaveCriticalSection(&kwsysProcesses.Lock); - - /* Continue on to default Ctrl handler (which calls ExitProcess). */ - return FALSE; -} - -void kwsysProcess_ResetStartTime(kwsysProcess* cp) -{ - if (!cp) { - return; - } - /* Reset start time. */ - cp->StartTime = kwsysProcessTimeGetCurrent(); -} diff --git a/test/API/driver/kwsys/README.rst b/test/API/driver/kwsys/README.rst deleted file mode 100644 index fc6b5902edc..00000000000 --- a/test/API/driver/kwsys/README.rst +++ /dev/null @@ -1,37 +0,0 @@ -KWSys -***** - -Introduction -============ - -KWSys is the Kitware System Library. It provides platform-independent -APIs to many common system features that are implemented differently on -every platform. This library is intended to be shared among many -projects at the source level, so it has a configurable namespace. -Each project should configure KWSys to use a namespace unique to itself. -See comments in `CMakeLists.txt`_ for details. - -.. _`CMakeLists.txt`: CMakeLists.txt - -License -======= - -KWSys is distributed under the OSI-approved BSD 3-clause License. -See `Copyright.txt`_ for details. - -.. _`Copyright.txt`: Copyright.txt - -Reporting Bugs -============== - -KWSys has no independent issue tracker. After encountering an issue -(bug) please submit a patch using the instructions for `Contributing`_. -Otherwise please report the issue to the tracker for the project that -hosts the copy of KWSys in which the problem was found. - -Contributing -============ - -See `CONTRIBUTING.rst`_ for instructions to contribute. - -.. _`CONTRIBUTING.rst`: CONTRIBUTING.rst diff --git a/test/API/driver/kwsys/RegularExpression.cxx b/test/API/driver/kwsys/RegularExpression.cxx deleted file mode 100644 index 5e6f8da5031..00000000000 --- a/test/API/driver/kwsys/RegularExpression.cxx +++ /dev/null @@ -1,1218 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -// -// Copyright (C) 1991 Texas Instruments Incorporated. -// -// Permission is granted to any individual or institution to use, copy, modify -// and distribute this software, provided that this complete copyright and -// permission notice is maintained, intact, in all copies and supporting -// documentation. -// -// Texas Instruments Incorporated provides this software "as is" without -// express or implied warranty. -// -// -// Created: MNF 06/13/89 Initial Design and Implementation -// Updated: LGO 08/09/89 Inherit from Generic -// Updated: MBN 09/07/89 Added conditional exception handling -// Updated: MBN 12/15/89 Sprinkled "const" qualifiers all over the place! -// Updated: DLS 03/22/91 New lite version -// - -#include "kwsysPrivate.h" -#include KWSYS_HEADER(RegularExpression.hxx) - -// Work-around CMake dependency scanning limitation. This must -// duplicate the above list of headers. -#if 0 -# include "RegularExpression.hxx.in" -#endif - -#include -#include - -namespace KWSYS_NAMESPACE { - -// RegularExpression -- Copies the given regular expression. -RegularExpression::RegularExpression(const RegularExpression& rxp) -{ - if (!rxp.program) { - this->program = nullptr; - return; - } - int ind; - this->progsize = rxp.progsize; // Copy regular expression size - this->program = new char[this->progsize]; // Allocate storage - for (ind = this->progsize; ind-- != 0;) // Copy regular expression - this->program[ind] = rxp.program[ind]; - // Copy pointers into last successful "find" operation - this->regmatch = rxp.regmatch; - this->regmust = rxp.regmust; // Copy field - if (rxp.regmust != nullptr) { - char* dum = rxp.program; - ind = 0; - while (dum != rxp.regmust) { - ++dum; - ++ind; - } - this->regmust = this->program + ind; - } - this->regstart = rxp.regstart; // Copy starting index - this->reganch = rxp.reganch; // Copy remaining private data - this->regmlen = rxp.regmlen; // Copy remaining private data -} - -// operator= -- Copies the given regular expression. -RegularExpression& RegularExpression::operator=(const RegularExpression& rxp) -{ - if (this == &rxp) { - return *this; - } - if (!rxp.program) { - this->program = nullptr; - return *this; - } - int ind; - this->progsize = rxp.progsize; // Copy regular expression size - delete[] this->program; - this->program = new char[this->progsize]; // Allocate storage - for (ind = this->progsize; ind-- != 0;) // Copy regular expression - this->program[ind] = rxp.program[ind]; - // Copy pointers into last successful "find" operation - this->regmatch = rxp.regmatch; - this->regmust = rxp.regmust; // Copy field - if (rxp.regmust != nullptr) { - char* dum = rxp.program; - ind = 0; - while (dum != rxp.regmust) { - ++dum; - ++ind; - } - this->regmust = this->program + ind; - } - this->regstart = rxp.regstart; // Copy starting index - this->reganch = rxp.reganch; // Copy remaining private data - this->regmlen = rxp.regmlen; // Copy remaining private data - - return *this; -} - -// operator== -- Returns true if two regular expressions have the same -// compiled program for pattern matching. -bool RegularExpression::operator==(const RegularExpression& rxp) const -{ - if (this != &rxp) { // Same address? - int ind = this->progsize; // Get regular expression size - if (ind != rxp.progsize) // If different size regexp - return false; // Return failure - while (ind-- != 0) // Else while still characters - if (this->program[ind] != rxp.program[ind]) // If regexp are different - return false; // Return failure - } - return true; // Else same, return success -} - -// deep_equal -- Returns true if have the same compiled regular expressions -// and the same start and end pointers. - -bool RegularExpression::deep_equal(const RegularExpression& rxp) const -{ - int ind = this->progsize; // Get regular expression size - if (ind != rxp.progsize) // If different size regexp - return false; // Return failure - while (ind-- != 0) // Else while still characters - if (this->program[ind] != rxp.program[ind]) // If regexp are different - return false; // Return failure - // Else if same start/end ptrs, return true - return (this->regmatch.start() == rxp.regmatch.start() && - this->regmatch.end() == rxp.regmatch.end()); -} - -// The remaining code in this file is derived from the regular expression code -// whose copyright statement appears below. It has been changed to work -// with the class concepts of C++ and COOL. - -/* - * compile and find - * - * Copyright (c) 1986 by University of Toronto. - * Written by Henry Spencer. Not derived from licensed software. - * - * Permission is granted to anyone to use this software for any - * purpose on any computer system, and to redistribute it freely, - * subject to the following restrictions: - * - * 1. The author is not responsible for the consequences of use of - * this software, no matter how awful, even if they arise - * from defects in it. - * - * 2. The origin of this software must not be misrepresented, either - * by explicit claim or by omission. - * - * 3. Altered versions must be plainly marked as such, and must not - * be misrepresented as being the original software. - * - * Beware that some of this code is subtly aware of the way operator - * precedence is structured in regular expressions. Serious changes in - * regular-expression syntax might require a total rethink. - */ - -/* - * The "internal use only" fields in regexp.h are present to pass info from - * compile to execute that permits the execute phase to run lots faster on - * simple cases. They are: - * - * regstart char that must begin a match; '\0' if none obvious - * reganch is the match anchored (at beginning-of-line only)? - * regmust string (pointer into program) that match must include, or - * nullptr regmlen length of regmust string - * - * Regstart and reganch permit very fast decisions on suitable starting points - * for a match, cutting down the work a lot. Regmust permits fast rejection - * of lines that cannot possibly match. The regmust tests are costly enough - * that compile() supplies a regmust only if the r.e. contains something - * potentially expensive (at present, the only such thing detected is * or + - * at the start of the r.e., which can involve a lot of backup). Regmlen is - * supplied because the test in find() needs it and compile() is computing - * it anyway. - */ - -/* - * Structure for regexp "program". This is essentially a linear encoding - * of a nondeterministic finite-state machine (aka syntax charts or - * "railroad normal form" in parsing technology). Each node is an opcode - * plus a "next" pointer, possibly plus an operand. "Next" pointers of - * all nodes except BRANCH implement concatenation; a "next" pointer with - * a BRANCH on both ends of it is connecting two alternatives. (Here we - * have one of the subtle syntax dependencies: an individual BRANCH (as - * opposed to a collection of them) is never concatenated with anything - * because of operator precedence.) The operand of some types of node is - * a literal string; for others, it is a node leading into a sub-FSM. In - * particular, the operand of a BRANCH node is the first node of the branch. - * (NB this is *not* a tree structure: the tail of the branch connects - * to the thing following the set of BRANCHes.) The opcodes are: - */ - -// definition number opnd? meaning -#define END 0 // no End of program. -#define BOL 1 // no Match "" at beginning of line. -#define EOL 2 // no Match "" at end of line. -#define ANY 3 // no Match any one character. -#define ANYOF 4 // str Match any character in this string. -#define ANYBUT \ - 5 // str Match any character not in this - // string. -#define BRANCH \ - 6 // node Match this alternative, or the - // next... -#define BACK 7 // no Match "", "next" ptr points backward. -#define EXACTLY 8 // str Match this string. -#define NOTHING 9 // no Match empty string. -#define STAR \ - 10 // node Match this (simple) thing 0 or more - // times. -#define PLUS \ - 11 // node Match this (simple) thing 1 or more - // times. -#define OPEN \ - 20 // no Mark this point in input as start of - // #n. -// OPEN+1 is number 1, etc. -#define CLOSE 30 // no Analogous to OPEN. - -/* - * Opcode notes: - * - * BRANCH The set of branches constituting a single choice are hooked - * together with their "next" pointers, since precedence prevents - * anything being concatenated to any individual branch. The - * "next" pointer of the last BRANCH in a choice points to the - * thing following the whole choice. This is also where the - * final "next" pointer of each individual branch points; each - * branch starts with the operand node of a BRANCH node. - * - * BACK Normal "next" pointers all implicitly point forward; BACK - * exists to make loop structures possible. - * - * STAR,PLUS '?', and complex '*' and '+', are implemented as circular - * BRANCH structures using BACK. Simple cases (one character - * per match) are implemented with STAR and PLUS for speed - * and to minimize recursive plunges. - * - * OPEN,CLOSE ...are numbered at compile time. - */ - -/* - * A node is one char of opcode followed by two chars of "next" pointer. - * "Next" pointers are stored as two 8-bit pieces, high order first. The - * value is a positive offset from the opcode of the node containing it. - * An operand, if any, simply follows the node. (Note that much of the - * code generation knows about this implicit relationship.) - * - * Using two bytes for the "next" pointer is vast overkill for most things, - * but allows patterns to get big without disasters. - */ - -#define OP(p) (*(p)) -#define NEXT(p) (((*((p) + 1) & 0377) << 8) + (*((p) + 2) & 0377)) -#define OPERAND(p) ((p) + 3) - -const unsigned char MAGIC = 0234; -/* - * Utility definitions. - */ - -#define UCHARAT(p) (reinterpret_cast(p))[0] - -#define ISMULT(c) ((c) == '*' || (c) == '+' || (c) == '?') -#define META "^$.[()|?+*\\" - -/* - * Flags to be passed up and down. - */ -#define HASWIDTH 01 // Known never to match null string. -#define SIMPLE 02 // Simple enough to be STAR/PLUS operand. -#define SPSTART 04 // Starts with * or +. -#define WORST 0 // Worst case. - -///////////////////////////////////////////////////////////////////////// -// -// COMPILE AND ASSOCIATED FUNCTIONS -// -///////////////////////////////////////////////////////////////////////// - -/* - * Read only utility variables. - */ -static char regdummy; -static char* const regdummyptr = ®dummy; - -/* - * Utility class for RegularExpression::compile(). - */ -class RegExpCompile -{ -public: - const char* regparse; // Input-scan pointer. - int regnpar; // () count. - char* regcode; // Code-emit pointer; regdummyptr = don't. - long regsize; // Code size. - - char* reg(int, int*); - char* regbranch(int*); - char* regpiece(int*); - char* regatom(int*); - char* regnode(char); - void regc(char); - void reginsert(char, char*); - static void regtail(char*, const char*); - static void regoptail(char*, const char*); -}; - -static const char* regnext(const char*); -static char* regnext(char*); - -#ifdef STRCSPN -static int strcspn(); -#endif - -/* - * We can't allocate space until we know how big the compiled form will be, - * but we can't compile it (and thus know how big it is) until we've got a - * place to put the code. So we cheat: we compile it twice, once with code - * generation turned off and size counting turned on, and once "for real". - * This also means that we don't allocate space until we are sure that the - * thing really will compile successfully, and we never have to move the - * code and thus invalidate pointers into it. (Note that it has to be in - * one piece because free() must be able to free it all.) - * - * Beware that the optimization-preparation code in here knows about some - * of the structure of the compiled regexp. - */ - -// compile -- compile a regular expression into internal code -// for later pattern matching. - -bool RegularExpression::compile(const char* exp) -{ - const char* scan; - const char* longest; - int flags; - - if (exp == nullptr) { - // RAISE Error, SYM(RegularExpression), SYM(No_Expr), - printf("RegularExpression::compile(): No expression supplied.\n"); - return false; - } - - // First pass: determine size, legality. - RegExpCompile comp; - comp.regparse = exp; - comp.regnpar = 1; - comp.regsize = 0L; - comp.regcode = regdummyptr; - comp.regc(static_cast(MAGIC)); - if (!comp.reg(0, &flags)) { - printf("RegularExpression::compile(): Error in compile.\n"); - return false; - } - this->regmatch.clear(); - - // Small enough for pointer-storage convention? - if (comp.regsize >= 32767L) { // Probably could be 65535L. - // RAISE Error, SYM(RegularExpression), SYM(Expr_Too_Big), - printf("RegularExpression::compile(): Expression too big.\n"); - return false; - } - - // Allocate space. - //#ifndef _WIN32 - if (this->program != nullptr) - delete[] this->program; - //#endif - this->program = new char[comp.regsize]; - this->progsize = static_cast(comp.regsize); - - if (this->program == nullptr) { - // RAISE Error, SYM(RegularExpression), SYM(Out_Of_Memory), - printf("RegularExpression::compile(): Out of memory.\n"); - return false; - } - - // Second pass: emit code. - comp.regparse = exp; - comp.regnpar = 1; - comp.regcode = this->program; - comp.regc(static_cast(MAGIC)); - comp.reg(0, &flags); - - // Dig out information for optimizations. - this->regstart = '\0'; // Worst-case defaults. - this->reganch = 0; - this->regmust = nullptr; - this->regmlen = 0; - scan = this->program + 1; // First BRANCH. - if (OP(regnext(scan)) == END) { // Only one top-level choice. - scan = OPERAND(scan); - - // Starting-point info. - if (OP(scan) == EXACTLY) - this->regstart = *OPERAND(scan); - else if (OP(scan) == BOL) - this->reganch++; - - // - // If there's something expensive in the r.e., find the longest - // literal string that must appear and make it the regmust. Resolve - // ties in favor of later strings, since the regstart check works - // with the beginning of the r.e. and avoiding duplication - // strengthens checking. Not a strong reason, but sufficient in the - // absence of others. - // - if (flags & SPSTART) { - longest = nullptr; - size_t len = 0; - for (; scan != nullptr; scan = regnext(scan)) - if (OP(scan) == EXACTLY && strlen(OPERAND(scan)) >= len) { - longest = OPERAND(scan); - len = strlen(OPERAND(scan)); - } - this->regmust = longest; - this->regmlen = len; - } - } - return true; -} - -/* - - reg - regular expression, i.e. main body or parenthesized thing - * - * Caller must absorb opening parenthesis. - * - * Combining parenthesis handling with the base level of regular expression - * is a trifle forced, but the need to tie the tails of the branches to what - * follows makes it hard to avoid. - */ -char* RegExpCompile::reg(int paren, int* flagp) -{ - char* ret; - char* br; - char* ender; - int parno = 0; - int flags; - - *flagp = HASWIDTH; // Tentatively. - - // Make an OPEN node, if parenthesized. - if (paren) { - if (regnpar >= RegularExpressionMatch::NSUBEXP) { - // RAISE Error, SYM(RegularExpression), SYM(Too_Many_Parens), - printf("RegularExpression::compile(): Too many parentheses.\n"); - return nullptr; - } - parno = regnpar; - regnpar++; - ret = regnode(static_cast(OPEN + parno)); - } else - ret = nullptr; - - // Pick up the branches, linking them together. - br = regbranch(&flags); - if (br == nullptr) - return (nullptr); - if (ret != nullptr) - regtail(ret, br); // OPEN -> first. - else - ret = br; - if (!(flags & HASWIDTH)) - *flagp &= ~HASWIDTH; - *flagp |= flags & SPSTART; - while (*regparse == '|') { - regparse++; - br = regbranch(&flags); - if (br == nullptr) - return (nullptr); - regtail(ret, br); // BRANCH -> BRANCH. - if (!(flags & HASWIDTH)) - *flagp &= ~HASWIDTH; - *flagp |= flags & SPSTART; - } - - // Make a closing node, and hook it on the end. - ender = regnode(static_cast((paren) ? CLOSE + parno : END)); - regtail(ret, ender); - - // Hook the tails of the branches to the closing node. - for (br = ret; br != nullptr; br = regnext(br)) - regoptail(br, ender); - - // Check for proper termination. - if (paren && *regparse++ != ')') { - // RAISE Error, SYM(RegularExpression), SYM(Unmatched_Parens), - printf("RegularExpression::compile(): Unmatched parentheses.\n"); - return nullptr; - } else if (!paren && *regparse != '\0') { - if (*regparse == ')') { - // RAISE Error, SYM(RegularExpression), SYM(Unmatched_Parens), - printf("RegularExpression::compile(): Unmatched parentheses.\n"); - return nullptr; - } else { - // RAISE Error, SYM(RegularExpression), SYM(Internal_Error), - printf("RegularExpression::compile(): Internal error.\n"); - return nullptr; - } - // NOTREACHED - } - return (ret); -} - -/* - - regbranch - one alternative of an | operator - * - * Implements the concatenation operator. - */ -char* RegExpCompile::regbranch(int* flagp) -{ - char* ret; - char* chain; - char* latest; - int flags; - - *flagp = WORST; // Tentatively. - - ret = regnode(BRANCH); - chain = nullptr; - while (*regparse != '\0' && *regparse != '|' && *regparse != ')') { - latest = regpiece(&flags); - if (latest == nullptr) - return (nullptr); - *flagp |= flags & HASWIDTH; - if (chain == nullptr) // First piece. - *flagp |= flags & SPSTART; - else - regtail(chain, latest); - chain = latest; - } - if (chain == nullptr) // Loop ran zero times. - regnode(NOTHING); - - return (ret); -} - -/* - - regpiece - something followed by possible [*+?] - * - * Note that the branching code sequences used for ? and the general cases - * of * and + are somewhat optimized: they use the same NOTHING node as - * both the endmarker for their branch list and the body of the last branch. - * It might seem that this node could be dispensed with entirely, but the - * endmarker role is not redundant. - */ -char* RegExpCompile::regpiece(int* flagp) -{ - char* ret; - char op; - char* next; - int flags; - - ret = regatom(&flags); - if (ret == nullptr) - return (nullptr); - - op = *regparse; - if (!ISMULT(op)) { - *flagp = flags; - return (ret); - } - - if (!(flags & HASWIDTH) && op != '?') { - // RAISE Error, SYM(RegularExpression), SYM(Empty_Operand), - printf("RegularExpression::compile() : *+ operand could be empty.\n"); - return nullptr; - } - *flagp = (op != '+') ? (WORST | SPSTART) : (WORST | HASWIDTH); - - if (op == '*' && (flags & SIMPLE)) - reginsert(STAR, ret); - else if (op == '*') { - // Emit x* as (x&|), where & means "self". - reginsert(BRANCH, ret); // Either x - regoptail(ret, regnode(BACK)); // and loop - regoptail(ret, ret); // back - regtail(ret, regnode(BRANCH)); // or - regtail(ret, regnode(NOTHING)); // null. - } else if (op == '+' && (flags & SIMPLE)) - reginsert(PLUS, ret); - else if (op == '+') { - // Emit x+ as x(&|), where & means "self". - next = regnode(BRANCH); // Either - regtail(ret, next); - regtail(regnode(BACK), ret); // loop back - regtail(next, regnode(BRANCH)); // or - regtail(ret, regnode(NOTHING)); // null. - } else if (op == '?') { - // Emit x? as (x|) - reginsert(BRANCH, ret); // Either x - regtail(ret, regnode(BRANCH)); // or - next = regnode(NOTHING); // null. - regtail(ret, next); - regoptail(ret, next); - } - regparse++; - if (ISMULT(*regparse)) { - // RAISE Error, SYM(RegularExpression), SYM(Nested_Operand), - printf("RegularExpression::compile(): Nested *?+.\n"); - return nullptr; - } - return (ret); -} - -/* - - regatom - the lowest level - * - * Optimization: gobbles an entire sequence of ordinary characters so that - * it can turn them into a single node, which is smaller to store and - * faster to run. Backslashed characters are exceptions, each becoming a - * separate node; the code is simpler that way and it's not worth fixing. - */ -char* RegExpCompile::regatom(int* flagp) -{ - char* ret; - int flags; - - *flagp = WORST; // Tentatively. - - switch (*regparse++) { - case '^': - ret = regnode(BOL); - break; - case '$': - ret = regnode(EOL); - break; - case '.': - ret = regnode(ANY); - *flagp |= HASWIDTH | SIMPLE; - break; - case '[': { - int rxpclass; - int rxpclassend; - - if (*regparse == '^') { // Complement of range. - ret = regnode(ANYBUT); - regparse++; - } else - ret = regnode(ANYOF); - if (*regparse == ']' || *regparse == '-') - regc(*regparse++); - while (*regparse != '\0' && *regparse != ']') { - if (*regparse == '-') { - regparse++; - if (*regparse == ']' || *regparse == '\0') - regc('-'); - else { - rxpclass = UCHARAT(regparse - 2) + 1; - rxpclassend = UCHARAT(regparse); - if (rxpclass > rxpclassend + 1) { - // RAISE Error, SYM(RegularExpression), SYM(Invalid_Range), - printf("RegularExpression::compile(): Invalid range in [].\n"); - return nullptr; - } - for (; rxpclass <= rxpclassend; rxpclass++) - regc(static_cast(rxpclass)); - regparse++; - } - } else - regc(*regparse++); - } - regc('\0'); - if (*regparse != ']') { - // RAISE Error, SYM(RegularExpression), SYM(Unmatched_Bracket), - printf("RegularExpression::compile(): Unmatched [].\n"); - return nullptr; - } - regparse++; - *flagp |= HASWIDTH | SIMPLE; - } break; - case '(': - ret = reg(1, &flags); - if (ret == nullptr) - return (nullptr); - *flagp |= flags & (HASWIDTH | SPSTART); - break; - case '\0': - case '|': - case ')': - // RAISE Error, SYM(RegularExpression), SYM(Internal_Error), - printf("RegularExpression::compile(): Internal error.\n"); // Never here - return nullptr; - case '?': - case '+': - case '*': - // RAISE Error, SYM(RegularExpression), SYM(No_Operand), - printf("RegularExpression::compile(): ?+* follows nothing.\n"); - return nullptr; - case '\\': - if (*regparse == '\0') { - // RAISE Error, SYM(RegularExpression), SYM(Trailing_Backslash), - printf("RegularExpression::compile(): Trailing backslash.\n"); - return nullptr; - } - ret = regnode(EXACTLY); - regc(*regparse++); - regc('\0'); - *flagp |= HASWIDTH | SIMPLE; - break; - default: { - int len; - char ender; - - regparse--; - len = int(strcspn(regparse, META)); - if (len <= 0) { - // RAISE Error, SYM(RegularExpression), SYM(Internal_Error), - printf("RegularExpression::compile(): Internal error.\n"); - return nullptr; - } - ender = *(regparse + len); - if (len > 1 && ISMULT(ender)) - len--; // Back off clear of ?+* operand. - *flagp |= HASWIDTH; - if (len == 1) - *flagp |= SIMPLE; - ret = regnode(EXACTLY); - while (len > 0) { - regc(*regparse++); - len--; - } - regc('\0'); - } break; - } - return (ret); -} - -/* - - regnode - emit a node - Location. - */ -char* RegExpCompile::regnode(char op) -{ - char* ret; - char* ptr; - - ret = regcode; - if (ret == regdummyptr) { - regsize += 3; - return (ret); - } - - ptr = ret; - *ptr++ = op; - *ptr++ = '\0'; // Null "next" pointer. - *ptr++ = '\0'; - regcode = ptr; - - return (ret); -} - -/* - - regc - emit (if appropriate) a byte of code - */ -void RegExpCompile::regc(char b) -{ - if (regcode != regdummyptr) - *regcode++ = b; - else - regsize++; -} - -/* - - reginsert - insert an operator in front of already-emitted operand - * - * Means relocating the operand. - */ -void RegExpCompile::reginsert(char op, char* opnd) -{ - char* src; - char* dst; - char* place; - - if (regcode == regdummyptr) { - regsize += 3; - return; - } - - src = regcode; - regcode += 3; - dst = regcode; - while (src > opnd) - *--dst = *--src; - - place = opnd; // Op node, where operand used to be. - *place++ = op; - *place++ = '\0'; - *place = '\0'; -} - -/* - - regtail - set the next-pointer at the end of a node chain - */ -void RegExpCompile::regtail(char* p, const char* val) -{ - char* scan; - char* temp; - int offset; - - if (p == regdummyptr) - return; - - // Find last node. - scan = p; - for (;;) { - temp = regnext(scan); - if (temp == nullptr) - break; - scan = temp; - } - - if (OP(scan) == BACK) - offset = int(scan - val); - else - offset = int(val - scan); - *(scan + 1) = static_cast((offset >> 8) & 0377); - *(scan + 2) = static_cast(offset & 0377); -} - -/* - - regoptail - regtail on operand of first argument; nop if operandless - */ -void RegExpCompile::regoptail(char* p, const char* val) -{ - // "Operandless" and "op != BRANCH" are synonymous in practice. - if (p == nullptr || p == regdummyptr || OP(p) != BRANCH) - return; - regtail(OPERAND(p), val); -} - -//////////////////////////////////////////////////////////////////////// -// -// find and friends -// -//////////////////////////////////////////////////////////////////////// - -/* - * Utility class for RegularExpression::find(). - */ -class RegExpFind -{ -public: - const char* reginput; // String-input pointer. - const char* regbol; // Beginning of input, for ^ check. - const char** regstartp; // Pointer to startp array. - const char** regendp; // Ditto for endp. - - int regtry(const char*, const char**, const char**, const char*); - int regmatch(const char*); - int regrepeat(const char*); -}; - -// find -- Matches the regular expression to the given string. -// Returns true if found, and sets start and end indexes accordingly. -bool RegularExpression::find(char const* string, - RegularExpressionMatch& rmatch) const -{ - const char* s; - - rmatch.clear(); - rmatch.searchstring = string; - - if (!this->program) { - return false; - } - - // Check validity of program. - if (UCHARAT(this->program) != MAGIC) { - // RAISE Error, SYM(RegularExpression), SYM(Internal_Error), - printf( - "RegularExpression::find(): Compiled regular expression corrupted.\n"); - return false; - } - - // If there is a "must appear" string, look for it. - if (this->regmust != nullptr) { - s = string; - while ((s = strchr(s, this->regmust[0])) != nullptr) { - if (strncmp(s, this->regmust, this->regmlen) == 0) - break; // Found it. - s++; - } - if (s == nullptr) // Not present. - return false; - } - - RegExpFind regFind; - - // Mark beginning of line for ^ . - regFind.regbol = string; - - // Simplest case: anchored match need be tried only once. - if (this->reganch) - return ( - regFind.regtry(string, rmatch.startp, rmatch.endp, this->program) != 0); - - // Messy cases: unanchored match. - s = string; - if (this->regstart != '\0') - // We know what char it must start with. - while ((s = strchr(s, this->regstart)) != nullptr) { - if (regFind.regtry(s, rmatch.startp, rmatch.endp, this->program)) - return true; - s++; - } - else - // We don't -- general case. - do { - if (regFind.regtry(s, rmatch.startp, rmatch.endp, this->program)) - return true; - } while (*s++ != '\0'); - - // Failure. - return false; -} - -/* - - regtry - try match at specific point - 0 failure, 1 success - */ -int RegExpFind::regtry(const char* string, const char** start, - const char** end, const char* prog) -{ - int i; - const char** sp1; - const char** ep; - - reginput = string; - regstartp = start; - regendp = end; - - sp1 = start; - ep = end; - for (i = RegularExpressionMatch::NSUBEXP; i > 0; i--) { - *sp1++ = nullptr; - *ep++ = nullptr; - } - if (regmatch(prog + 1)) { - start[0] = string; - end[0] = reginput; - return (1); - } else - return (0); -} - -/* - - regmatch - main matching routine - * - * Conceptually the strategy is simple: check to see whether the current - * node matches, call self recursively to see whether the rest matches, - * and then act accordingly. In practice we make some effort to avoid - * recursion, in particular by going through "ordinary" nodes (that don't - * need to know whether the rest of the match failed) by a loop instead of - * by recursion. - * 0 failure, 1 success - */ -int RegExpFind::regmatch(const char* prog) -{ - const char* scan; // Current node. - const char* next; // Next node. - - scan = prog; - - while (scan != nullptr) { - - next = regnext(scan); - - switch (OP(scan)) { - case BOL: - if (reginput != regbol) - return (0); - break; - case EOL: - if (*reginput != '\0') - return (0); - break; - case ANY: - if (*reginput == '\0') - return (0); - reginput++; - break; - case EXACTLY: { - size_t len; - const char* opnd; - - opnd = OPERAND(scan); - // Inline the first character, for speed. - if (*opnd != *reginput) - return (0); - len = strlen(opnd); - if (len > 1 && strncmp(opnd, reginput, len) != 0) - return (0); - reginput += len; - } break; - case ANYOF: - if (*reginput == '\0' || strchr(OPERAND(scan), *reginput) == nullptr) - return (0); - reginput++; - break; - case ANYBUT: - if (*reginput == '\0' || strchr(OPERAND(scan), *reginput) != nullptr) - return (0); - reginput++; - break; - case NOTHING: - break; - case BACK: - break; - case OPEN + 1: - case OPEN + 2: - case OPEN + 3: - case OPEN + 4: - case OPEN + 5: - case OPEN + 6: - case OPEN + 7: - case OPEN + 8: - case OPEN + 9: { - int no; - const char* save; - - no = OP(scan) - OPEN; - save = reginput; - - if (regmatch(next)) { - - // - // Don't set startp if some later invocation of the - // same parentheses already has. - // - if (regstartp[no] == nullptr) - regstartp[no] = save; - return (1); - } else - return (0); - } - // break; - case CLOSE + 1: - case CLOSE + 2: - case CLOSE + 3: - case CLOSE + 4: - case CLOSE + 5: - case CLOSE + 6: - case CLOSE + 7: - case CLOSE + 8: - case CLOSE + 9: { - int no; - const char* save; - - no = OP(scan) - CLOSE; - save = reginput; - - if (regmatch(next)) { - - // - // Don't set endp if some later invocation of the - // same parentheses already has. - // - if (regendp[no] == nullptr) - regendp[no] = save; - return (1); - } else - return (0); - } - // break; - case BRANCH: { - - const char* save; - - if (OP(next) != BRANCH) // No choice. - next = OPERAND(scan); // Avoid recursion. - else { - do { - save = reginput; - if (regmatch(OPERAND(scan))) - return (1); - reginput = save; - scan = regnext(scan); - } while (scan != nullptr && OP(scan) == BRANCH); - return (0); - // NOTREACHED - } - } break; - case STAR: - case PLUS: { - char nextch; - int no; - const char* save; - int min_no; - - // - // Lookahead to avoid useless match attempts when we know - // what character comes next. - // - nextch = '\0'; - if (OP(next) == EXACTLY) - nextch = *OPERAND(next); - min_no = (OP(scan) == STAR) ? 0 : 1; - save = reginput; - no = regrepeat(OPERAND(scan)); - while (no >= min_no) { - // If it could work, try it. - if (nextch == '\0' || *reginput == nextch) - if (regmatch(next)) - return (1); - // Couldn't or didn't -- back up. - no--; - reginput = save + no; - } - return (0); - } - // break; - case END: - return (1); // Success! - - default: - // RAISE Error, SYM(RegularExpression), SYM(Internal_Error), - printf( - "RegularExpression::find(): Internal error -- memory corrupted.\n"); - return 0; - } - scan = next; - } - - // - // We get here only if there's trouble -- normally "case END" is the - // terminating point. - // - // RAISE Error, SYM(RegularExpression), SYM(Internal_Error), - printf("RegularExpression::find(): Internal error -- corrupted pointers.\n"); - return (0); -} - -/* - - regrepeat - repeatedly match something simple, report how many - */ -int RegExpFind::regrepeat(const char* p) -{ - int count = 0; - const char* scan; - const char* opnd; - - scan = reginput; - opnd = OPERAND(p); - switch (OP(p)) { - case ANY: - count = int(strlen(scan)); - scan += count; - break; - case EXACTLY: - while (*opnd == *scan) { - count++; - scan++; - } - break; - case ANYOF: - while (*scan != '\0' && strchr(opnd, *scan) != nullptr) { - count++; - scan++; - } - break; - case ANYBUT: - while (*scan != '\0' && strchr(opnd, *scan) == nullptr) { - count++; - scan++; - } - break; - default: // Oh dear. Called inappropriately. - // RAISE Error, SYM(RegularExpression), SYM(Internal_Error), - printf("cm RegularExpression::find(): Internal error.\n"); - return 0; - } - reginput = scan; - return (count); -} - -/* - - regnext - dig the "next" pointer out of a node - */ -static const char* regnext(const char* p) -{ - int offset; - - if (p == regdummyptr) - return (nullptr); - - offset = NEXT(p); - if (offset == 0) - return (nullptr); - - if (OP(p) == BACK) - return (p - offset); - else - return (p + offset); -} - -static char* regnext(char* p) -{ - int offset; - - if (p == regdummyptr) - return (nullptr); - - offset = NEXT(p); - if (offset == 0) - return (nullptr); - - if (OP(p) == BACK) - return (p - offset); - else - return (p + offset); -} - -} // namespace KWSYS_NAMESPACE diff --git a/test/API/driver/kwsys/RegularExpression.hxx.in b/test/API/driver/kwsys/RegularExpression.hxx.in deleted file mode 100644 index 0c2366b8421..00000000000 --- a/test/API/driver/kwsys/RegularExpression.hxx.in +++ /dev/null @@ -1,562 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -// Original Copyright notice: -// Copyright (C) 1991 Texas Instruments Incorporated. -// -// Permission is granted to any individual or institution to use, copy, modify, -// and distribute this software, provided that this complete copyright and -// permission notice is maintained, intact, in all copies and supporting -// documentation. -// -// Texas Instruments Incorporated provides this software "as is" without -// express or implied warranty. -// -// Created: MNF 06/13/89 Initial Design and Implementation -// Updated: LGO 08/09/89 Inherit from Generic -// Updated: MBN 09/07/89 Added conditional exception handling -// Updated: MBN 12/15/89 Sprinkled "const" qualifiers all over the place! -// Updated: DLS 03/22/91 New lite version -// - -#ifndef @KWSYS_NAMESPACE@_RegularExpression_hxx -#define @KWSYS_NAMESPACE@_RegularExpression_hxx - -#include <@KWSYS_NAMESPACE@/Configure.h> -#include <@KWSYS_NAMESPACE@/Configure.hxx> - -#include - -/* Disable useless Borland warnings. KWSys tries not to force things - on its includers, but there is no choice here. */ -#if defined(__BORLANDC__) -# pragma warn - 8027 /* function not inlined. */ -#endif - -namespace @KWSYS_NAMESPACE@ { - -// Forward declaration -class RegularExpression; - -/** \class RegularExpressionMatch - * \brief Stores the pattern matches of a RegularExpression - */ -class @KWSYS_NAMESPACE@_EXPORT RegularExpressionMatch -{ -public: - RegularExpressionMatch(); - - bool isValid() const; - void clear(); - - std::string::size_type start() const; - std::string::size_type end() const; - std::string::size_type start(int n) const; - std::string::size_type end(int n) const; - std::string match(int n) const; - - enum - { - NSUBEXP = 10 - }; - -private: - friend class RegularExpression; - const char* startp[NSUBEXP]; - const char* endp[NSUBEXP]; - const char* searchstring; -}; - -/** - * \brief Creates an invalid match object - */ -inline RegularExpressionMatch::RegularExpressionMatch() - : startp{} - , endp{} - , searchstring{} -{ -} - -/** - * \brief Returns true if the match pointers are valid - */ -inline bool RegularExpressionMatch::isValid() const -{ - return (this->startp[0] != nullptr); -} - -/** - * \brief Resets to the (invalid) construction state. - */ -inline void RegularExpressionMatch::clear() -{ - startp[0] = nullptr; - endp[0] = nullptr; - searchstring = nullptr; -} - -/** - * \brief Returns the start index of the full match. - */ -inline std::string::size_type RegularExpressionMatch::start() const -{ - return static_cast(this->startp[0] - searchstring); -} - -/** - * \brief Returns the end index of the full match. - */ -inline std::string::size_type RegularExpressionMatch::end() const -{ - return static_cast(this->endp[0] - searchstring); -} - -/** - * \brief Returns the start index of nth submatch. - * start(0) is the start of the full match. - */ -inline std::string::size_type RegularExpressionMatch::start(int n) const -{ - return static_cast(this->startp[n] - - this->searchstring); -} - -/** - * \brief Returns the end index of nth submatch. - * end(0) is the end of the full match. - */ -inline std::string::size_type RegularExpressionMatch::end(int n) const -{ - return static_cast(this->endp[n] - - this->searchstring); -} - -/** - * \brief Returns the nth submatch as a string. - */ -inline std::string RegularExpressionMatch::match(int n) const -{ - if (this->startp[n] == nullptr) { - return std::string(); - } else { - return std::string( - this->startp[n], - static_cast(this->endp[n] - this->startp[n])); - } -} - -/** \class RegularExpression - * \brief Implements pattern matching with regular expressions. - * - * This is the header file for the regular expression class. An object of - * this class contains a regular expression, in a special "compiled" format. - * This compiled format consists of several slots all kept as the objects - * private data. The RegularExpression class provides a convenient way to - * represent regular expressions. It makes it easy to search for the same - * regular expression in many different strings without having to compile a - * string to regular expression format more than necessary. - * - * This class implements pattern matching via regular expressions. - * A regular expression allows a programmer to specify complex - * patterns that can be searched for and matched against the - * character string of a string object. In its simplest form, a - * regular expression is a sequence of characters used to - * search for exact character matches. However, many times the - * exact sequence to be found is not known, or only a match at - * the beginning or end of a string is desired. The RegularExpression regu- - * lar expression class implements regular expression pattern - * matching as is found and implemented in many UNIX commands - * and utilities. - * - * Example: The perl code - * - * $filename =~ m"([a-z]+)\.cc"; - * print $1; - * - * Is written as follows in C++ - * - * RegularExpression re("([a-z]+)\\.cc"); - * re.find(filename); - * cerr << re.match(1); - * - * - * The regular expression class provides a convenient mechanism - * for specifying and manipulating regular expressions. The - * regular expression object allows specification of such pat- - * terns by using the following regular expression metacharac- - * ters: - * - * ^ Matches at beginning of a line - * - * $ Matches at end of a line - * - * . Matches any single character - * - * [ ] Matches any character(s) inside the brackets - * - * [^ ] Matches any character(s) not inside the brackets - * - * - Matches any character in range on either side of a dash - * - * * Matches preceding pattern zero or more times - * - * + Matches preceding pattern one or more times - * - * ? Matches preceding pattern zero or once only - * - * () Saves a matched expression and uses it in a later match - * - * Note that more than one of these metacharacters can be used - * in a single regular expression in order to create complex - * search patterns. For example, the pattern [^ab1-9] says to - * match any character sequence that does not begin with the - * characters "ab" followed by numbers in the series one - * through nine. - * - * There are three constructors for RegularExpression. One just creates an - * empty RegularExpression object. Another creates a RegularExpression - * object and initializes it with a regular expression that is given in the - * form of a char*. The third takes a reference to a RegularExpression - * object as an argument and creates an object initialized with the - * information from the given RegularExpression object. - * - * The find member function finds the first occurrence of the regular - * expression of that object in the string given to find as an argument. Find - * returns a boolean, and if true, mutates the private data appropriately. - * Find sets pointers to the beginning and end of the thing last found, they - * are pointers into the actual string that was searched. The start and end - * member functions return indices into the searched string that correspond - * to the beginning and end pointers respectively. The compile member - * function takes a char* and puts the compiled version of the char* argument - * into the object's private data fields. The == and != operators only check - * the to see if the compiled regular expression is the same, and the - * deep_equal functions also checks to see if the start and end pointers are - * the same. The is_valid function returns false if program is set to - * nullptr, (i.e. there is no valid compiled expression). The set_invalid - * function sets the program to nullptr (Warning: this deletes the compiled - * expression). The following examples may help clarify regular expression - * usage: - * - * * The regular expression "^hello" matches a "hello" only at the - * beginning of a line. It would match "hello there" but not "hi, - * hello there". - * - * * The regular expression "long$" matches a "long" only at the end - * of a line. It would match "so long\0", but not "long ago". - * - * * The regular expression "t..t..g" will match anything that has a - * "t" then any two characters, another "t", any two characters and - * then a "g". It will match "testing", or "test again" but would - * not match "toasting" - * - * * The regular expression "[1-9ab]" matches any number one through - * nine, and the characters "a" and "b". It would match "hello 1" - * or "begin", but would not match "no-match". - * - * * The regular expression "[^1-9ab]" matches any character that is - * not a number one through nine, or an "a" or "b". It would NOT - * match "hello 1" or "begin", but would match "no-match". - * - * * The regular expression "br* " matches something that begins with - * a "b", is followed by zero or more "r"s, and ends in a space. It - * would match "brrrrr ", and "b ", but would not match "brrh ". - * - * * The regular expression "br+ " matches something that begins with - * a "b", is followed by one or more "r"s, and ends in a space. It - * would match "brrrrr ", and "br ", but would not match "b " or - * "brrh ". - * - * * The regular expression "br? " matches something that begins with - * a "b", is followed by zero or one "r"s, and ends in a space. It - * would match "br ", and "b ", but would not match "brrrr " or - * "brrh ". - * - * * The regular expression "(..p)b" matches something ending with pb - * and beginning with whatever the two characters before the first p - * encountered in the line were. It would find "repb" in "rep drepa - * qrepb". The regular expression "(..p)a" would find "repa qrepb" - * in "rep drepa qrepb" - * - * * The regular expression "d(..p)" matches something ending with p, - * beginning with d, and having two characters in between that are - * the same as the two characters before the first p encountered in - * the line. It would match "drepa qrepb" in "rep drepa qrepb". - * - * All methods of RegularExpression can be called simultaneously from - * different threads but only if each invocation uses an own instance of - * RegularExpression. - */ -class @KWSYS_NAMESPACE@_EXPORT RegularExpression -{ -public: - /** - * Instantiate RegularExpression with program=nullptr. - */ - inline RegularExpression(); - - /** - * Instantiate RegularExpression with compiled char*. - */ - inline RegularExpression(char const*); - - /** - * Instantiate RegularExpression as a copy of another regular expression. - */ - RegularExpression(RegularExpression const&); - - /** - * Instantiate RegularExpression with compiled string. - */ - inline RegularExpression(std::string const&); - - /** - * Destructor. - */ - inline ~RegularExpression(); - - /** - * Compile a regular expression into internal code - * for later pattern matching. - */ - bool compile(char const*); - - /** - * Compile a regular expression into internal code - * for later pattern matching. - */ - inline bool compile(std::string const&); - - /** - * Matches the regular expression to the given string. - * Returns true if found, and sets start and end indexes - * in the RegularExpressionMatch instance accordingly. - * - * This method is thread safe when called with different - * RegularExpressionMatch instances. - */ - bool find(char const*, RegularExpressionMatch&) const; - - /** - * Matches the regular expression to the given string. - * Returns true if found, and sets start and end indexes accordingly. - */ - inline bool find(char const*); - - /** - * Matches the regular expression to the given std string. - * Returns true if found, and sets start and end indexes accordingly. - */ - inline bool find(std::string const&); - - /** - * Match indices - */ - inline RegularExpressionMatch const& regMatch() const; - inline std::string::size_type start() const; - inline std::string::size_type end() const; - inline std::string::size_type start(int n) const; - inline std::string::size_type end(int n) const; - - /** - * Match strings - */ - inline std::string match(int n) const; - - /** - * Copy the given regular expression. - */ - RegularExpression& operator=(const RegularExpression& rxp); - - /** - * Returns true if two regular expressions have the same - * compiled program for pattern matching. - */ - bool operator==(RegularExpression const&) const; - - /** - * Returns true if two regular expressions have different - * compiled program for pattern matching. - */ - inline bool operator!=(RegularExpression const&) const; - - /** - * Returns true if have the same compiled regular expressions - * and the same start and end pointers. - */ - bool deep_equal(RegularExpression const&) const; - - /** - * True if the compiled regexp is valid. - */ - inline bool is_valid() const; - - /** - * Marks the regular expression as invalid. - */ - inline void set_invalid(); - -private: - RegularExpressionMatch regmatch; - char regstart; // Internal use only - char reganch; // Internal use only - const char* regmust; // Internal use only - std::string::size_type regmlen; // Internal use only - char* program; - int progsize; -}; - -/** - * Create an empty regular expression. - */ -inline RegularExpression::RegularExpression() - : regstart{} - , reganch{} - , regmust{} - , program{ nullptr } - , progsize{} -{ -} - -/** - * Creates a regular expression from string s, and - * compiles s. - */ -inline RegularExpression::RegularExpression(const char* s) - : regstart{} - , reganch{} - , regmust{} - , program{ nullptr } - , progsize{} -{ - if (s) { - this->compile(s); - } -} - -/** - * Creates a regular expression from string s, and - * compiles s. - */ -inline RegularExpression::RegularExpression(const std::string& s) - : regstart{} - , reganch{} - , regmust{} - , program{ nullptr } - , progsize{} -{ - this->compile(s); -} - -/** - * Destroys and frees space allocated for the regular expression. - */ -inline RegularExpression::~RegularExpression() -{ - //#ifndef _WIN32 - delete[] this->program; - //#endif -} - -/** - * Compile a regular expression into internal code - * for later pattern matching. - */ -inline bool RegularExpression::compile(std::string const& s) -{ - return this->compile(s.c_str()); -} - -/** - * Matches the regular expression to the given std string. - * Returns true if found, and sets start and end indexes accordingly. - */ -inline bool RegularExpression::find(const char* s) -{ - return this->find(s, this->regmatch); -} - -/** - * Matches the regular expression to the given std string. - * Returns true if found, and sets start and end indexes accordingly. - */ -inline bool RegularExpression::find(std::string const& s) -{ - return this->find(s.c_str()); -} - -/** - * Returns the internal match object - */ -inline RegularExpressionMatch const& RegularExpression::regMatch() const -{ - return this->regmatch; -} - -/** - * Returns the start index of the full match. - */ -inline std::string::size_type RegularExpression::start() const -{ - return regmatch.start(); -} - -/** - * Returns the end index of the full match. - */ -inline std::string::size_type RegularExpression::end() const -{ - return regmatch.end(); -} - -/** - * Return start index of nth submatch. start(0) is the start of the full match. - */ -inline std::string::size_type RegularExpression::start(int n) const -{ - return regmatch.start(n); -} - -/** - * Return end index of nth submatch. end(0) is the end of the full match. - */ -inline std::string::size_type RegularExpression::end(int n) const -{ - return regmatch.end(n); -} - -/** - * Return nth submatch as a string. - */ -inline std::string RegularExpression::match(int n) const -{ - return regmatch.match(n); -} - -/** - * Returns true if two regular expressions have different - * compiled program for pattern matching. - */ -inline bool RegularExpression::operator!=(const RegularExpression& r) const -{ - return (!(*this == r)); -} - -/** - * Returns true if a valid regular expression is compiled - * and ready for pattern matching. - */ -inline bool RegularExpression::is_valid() const -{ - return (this->program != nullptr); -} - -inline void RegularExpression::set_invalid() -{ - //#ifndef _WIN32 - delete[] this->program; - //#endif - this->program = nullptr; -} - -} // namespace @KWSYS_NAMESPACE@ - -#endif diff --git a/test/API/driver/kwsys/SetupForDevelopment.sh b/test/API/driver/kwsys/SetupForDevelopment.sh deleted file mode 100644 index c3a2b1655bd..00000000000 --- a/test/API/driver/kwsys/SetupForDevelopment.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash - -cd "${BASH_SOURCE%/*}" && -GitSetup/setup-user && echo && -GitSetup/setup-hooks && echo && -GitSetup/setup-aliases && echo && -GitSetup/setup-upstream && echo && -GitSetup/tips - -# Rebase master by default -git config rebase.stat true -git config branch.master.rebase true - -# Disable Gerrit hook explicitly so the commit-msg hook will -# not complain even if some gerrit remotes are still configured. -git config hooks.GerritId false - -# Record the version of this setup so Scripts/pre-commit can check it. -SetupForDevelopment_VERSION=2 -git config hooks.SetupForDevelopment ${SetupForDevelopment_VERSION} diff --git a/test/API/driver/kwsys/SharedForward.h.in b/test/API/driver/kwsys/SharedForward.h.in deleted file mode 100644 index 5716cd4f1e1..00000000000 --- a/test/API/driver/kwsys/SharedForward.h.in +++ /dev/null @@ -1,879 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifndef @KWSYS_NAMESPACE@_SharedForward_h -# define @KWSYS_NAMESPACE@_SharedForward_h - -/* - This header is used to create a forwarding executable sets up the - shared library search path and replaces itself with a real - executable. This is useful when creating installations on UNIX with - shared libraries that will run from any install directory. Typical - usage: - - #if defined(CMAKE_INTDIR) - # define CONFIG_DIR_PRE CMAKE_INTDIR "/" - # define CONFIG_DIR_POST "/" CMAKE_INTDIR - #else - # define CONFIG_DIR_PRE "" - # define CONFIG_DIR_POST "" - #endif - #define @KWSYS_NAMESPACE@_SHARED_FORWARD_DIR_BUILD "/path/to/foo-build/bin" - #define @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_BUILD "." CONFIG_DIR_POST - #define @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_INSTALL "../lib/foo-1.2" - #define @KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_BUILD CONFIG_DIR_PRE "foo-real" - #define @KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_INSTALL - "../lib/foo-1.2/foo-real" - #define @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_COMMAND "--command" - #define @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_PRINT "--print" - #define @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_LDD "--ldd" - #if defined(CMAKE_INTDIR) - # define @KWSYS_NAMESPACE@_SHARED_FORWARD_CONFIG_NAME CMAKE_INTDIR - #endif - #include <@KWSYS_NAMESPACE@/SharedForward.h> - int main(int argc, char** argv) - { - return @KWSYS_NAMESPACE@_shared_forward_to_real(argc, argv); - } - - Specify search and executable paths relative to the forwarding - executable location or as full paths. Include no trailing slash. - In the case of a multi-configuration build, when CMAKE_INTDIR is - defined, the DIR_BUILD setting should point at the directory above - the executable (the one containing the per-configuration - subdirectory specified by CMAKE_INTDIR). Then PATH_BUILD entries - and EXE_BUILD should be specified relative to this location and use - CMAKE_INTDIR as necessary. In the above example imagine appending - the PATH_BUILD or EXE_BUILD setting to the DIR_BUILD setting. The - result should form a valid path with per-configuration subdirectory. - - Additional paths may be specified in the PATH_BUILD and PATH_INSTALL - variables by using comma-separated strings. For example: - - #define @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_BUILD \ - "." CONFIG_DIR_POST, "/path/to/bar-build" CONFIG_DIR_POST - #define @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_INSTALL \ - "../lib/foo-1.2", "../lib/bar-4.5" - - See the comments below for specific explanations of each macro. -*/ - -/* Disable -Wcast-qual warnings since they are too hard to fix in a - cross-platform way. */ -# if defined(__clang__) && defined(__has_warning) -# if __has_warning("-Wcast-qual") -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wcast-qual" -# endif -# endif - -# if defined(__BORLANDC__) && !defined(__cplusplus) -/* Code has no effect; raised by winnt.h in C (not C++) when ignoring an - unused parameter using "(param)" syntax (i.e. no cast to void). */ -# pragma warn - 8019 -# endif - -/* Full path to the directory in which this executable is built. Do - not include a trailing slash. */ -# if !defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_DIR_BUILD) -# error "Must define @KWSYS_NAMESPACE@_SHARED_FORWARD_DIR_BUILD" -# endif -# if !defined(KWSYS_SHARED_FORWARD_DIR_BUILD) -# define KWSYS_SHARED_FORWARD_DIR_BUILD \ - @KWSYS_NAMESPACE@_SHARED_FORWARD_DIR_BUILD -# endif - -/* Library search path for build tree. */ -# if !defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_BUILD) -# error "Must define @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_BUILD" -# endif -# if !defined(KWSYS_SHARED_FORWARD_PATH_BUILD) -# define KWSYS_SHARED_FORWARD_PATH_BUILD \ - @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_BUILD -# endif - -/* Library search path for install tree. */ -# if !defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_INSTALL) -# error "Must define @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_INSTALL" -# endif -# if !defined(KWSYS_SHARED_FORWARD_PATH_INSTALL) -# define KWSYS_SHARED_FORWARD_PATH_INSTALL \ - @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_INSTALL -# endif - -/* The real executable to which to forward in the build tree. */ -# if !defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_BUILD) -# error "Must define @KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_BUILD" -# endif -# if !defined(KWSYS_SHARED_FORWARD_EXE_BUILD) -# define KWSYS_SHARED_FORWARD_EXE_BUILD \ - @KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_BUILD -# endif - -/* The real executable to which to forward in the install tree. */ -# if !defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_INSTALL) -# error "Must define @KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_INSTALL" -# endif -# if !defined(KWSYS_SHARED_FORWARD_EXE_INSTALL) -# define KWSYS_SHARED_FORWARD_EXE_INSTALL \ - @KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_INSTALL -# endif - -/* The configuration name with which this executable was built (Debug/Release). - */ -# if defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_CONFIG_NAME) -# define KWSYS_SHARED_FORWARD_CONFIG_NAME \ - @KWSYS_NAMESPACE@_SHARED_FORWARD_CONFIG_NAME -# else -# undef KWSYS_SHARED_FORWARD_CONFIG_NAME -# endif - -/* Create command line option to replace executable. */ -# if defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_COMMAND) -# if !defined(KWSYS_SHARED_FORWARD_OPTION_COMMAND) -# define KWSYS_SHARED_FORWARD_OPTION_COMMAND \ - @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_COMMAND -# endif -# else -# undef KWSYS_SHARED_FORWARD_OPTION_COMMAND -# endif - -/* Create command line option to print environment setting and exit. */ -# if defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_PRINT) -# if !defined(KWSYS_SHARED_FORWARD_OPTION_PRINT) -# define KWSYS_SHARED_FORWARD_OPTION_PRINT \ - @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_PRINT -# endif -# else -# undef KWSYS_SHARED_FORWARD_OPTION_PRINT -# endif - -/* Create command line option to run ldd or equivalent. */ -# if defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_LDD) -# if !defined(KWSYS_SHARED_FORWARD_OPTION_LDD) -# define KWSYS_SHARED_FORWARD_OPTION_LDD \ - @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_LDD -# endif -# else -# undef KWSYS_SHARED_FORWARD_OPTION_LDD -# endif - -/* Include needed system headers. */ - -# include -# include -# include /* size_t */ -# include -# include -# include - -# if defined(_WIN32) && !defined(__CYGWIN__) -# include - -# include -# include -# define KWSYS_SHARED_FORWARD_ESCAPE_ARGV /* re-escape argv for execvp */ -# else -# include -# include -# endif - -/* Configuration for this platform. */ - -/* The path separator for this platform. */ -# if defined(_WIN32) && !defined(__CYGWIN__) -# define KWSYS_SHARED_FORWARD_PATH_SEP ';' -# define KWSYS_SHARED_FORWARD_PATH_SLASH '\\' -# else -# define KWSYS_SHARED_FORWARD_PATH_SEP ':' -# define KWSYS_SHARED_FORWARD_PATH_SLASH '/' -# endif -static const char kwsys_shared_forward_path_sep[2] = { - KWSYS_SHARED_FORWARD_PATH_SEP, 0 -}; -static const char kwsys_shared_forward_path_slash[2] = { - KWSYS_SHARED_FORWARD_PATH_SLASH, 0 -}; - -/* The maximum length of a file name. */ -# if defined(PATH_MAX) -# define KWSYS_SHARED_FORWARD_MAXPATH PATH_MAX -# elif defined(MAXPATHLEN) -# define KWSYS_SHARED_FORWARD_MAXPATH MAXPATHLEN -# else -# define KWSYS_SHARED_FORWARD_MAXPATH 16384 -# endif - -/* Select the environment variable holding the shared library runtime - search path for this platform and build configuration. Also select - ldd command equivalent. */ - -/* Linux */ -# if defined(__linux) -# define KWSYS_SHARED_FORWARD_LDD "ldd" -# define KWSYS_SHARED_FORWARD_LDD_N 1 -# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY_PATH" - -/* FreeBSD */ -# elif defined(__FreeBSD__) -# define KWSYS_SHARED_FORWARD_LDD "ldd" -# define KWSYS_SHARED_FORWARD_LDD_N 1 -# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY_PATH" - -/* OpenBSD */ -# elif defined(__OpenBSD__) -# define KWSYS_SHARED_FORWARD_LDD "ldd" -# define KWSYS_SHARED_FORWARD_LDD_N 1 -# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY_PATH" - -/* OS X */ -# elif defined(__APPLE__) -# define KWSYS_SHARED_FORWARD_LDD "otool", "-L" -# define KWSYS_SHARED_FORWARD_LDD_N 2 -# define KWSYS_SHARED_FORWARD_LDPATH "DYLD_LIBRARY_PATH" - -/* AIX */ -# elif defined(_AIX) -# define KWSYS_SHARED_FORWARD_LDD "dump", "-H" -# define KWSYS_SHARED_FORWARD_LDD_N 2 -# define KWSYS_SHARED_FORWARD_LDPATH "LIBPATH" - -/* SUN */ -# elif defined(__sun) -# define KWSYS_SHARED_FORWARD_LDD "ldd" -# define KWSYS_SHARED_FORWARD_LDD_N 1 -# include -# if defined(_ILP32) -# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY_PATH" -# elif defined(_LP64) -# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY_PATH_64" -# endif - -/* HP-UX */ -# elif defined(__hpux) -# define KWSYS_SHARED_FORWARD_LDD "chatr" -# define KWSYS_SHARED_FORWARD_LDD_N 1 -# if defined(__LP64__) -# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY_PATH" -# else -# define KWSYS_SHARED_FORWARD_LDPATH "SHLIB_PATH" -# endif - -/* SGI MIPS */ -# elif defined(__sgi) && defined(_MIPS_SIM) -# define KWSYS_SHARED_FORWARD_LDD "ldd" -# define KWSYS_SHARED_FORWARD_LDD_N 1 -# if _MIPS_SIM == _ABIO32 -# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY_PATH" -# elif _MIPS_SIM == _ABIN32 -# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARYN32_PATH" -# elif _MIPS_SIM == _ABI64 -# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY64_PATH" -# endif - -/* Cygwin */ -# elif defined(__CYGWIN__) -# define KWSYS_SHARED_FORWARD_LDD \ - "cygcheck" /* TODO: cygwin 1.7 has ldd \ - */ -# define KWSYS_SHARED_FORWARD_LDD_N 1 -# define KWSYS_SHARED_FORWARD_LDPATH "PATH" - -/* Windows */ -# elif defined(_WIN32) -# define KWSYS_SHARED_FORWARD_LDPATH "PATH" - -/* Guess on this unknown system. */ -# else -# define KWSYS_SHARED_FORWARD_LDD "ldd" -# define KWSYS_SHARED_FORWARD_LDD_N 1 -# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY_PATH" -# endif - -# ifdef KWSYS_SHARED_FORWARD_ESCAPE_ARGV -typedef struct kwsys_sf_arg_info_s -{ - const char* arg; - int size; - int quote; -} kwsys_sf_arg_info; - -static kwsys_sf_arg_info kwsys_sf_get_arg_info(const char* in) -{ - /* Initialize information. */ - kwsys_sf_arg_info info; - - /* String iterator. */ - const char* c; - - /* Keep track of how many backslashes have been encountered in a row. */ - int windows_backslashes = 0; - - /* Start with the length of the original argument, plus one for - either a terminating null or a separating space. */ - info.arg = in; - info.size = (int)strlen(in) + 1; - info.quote = 0; - - /* Scan the string for characters that require escaping or quoting. */ - for (c = in; *c; ++c) { - /* Check whether this character needs quotes. */ - if (strchr(" \t?'#&<>|^", *c)) { - info.quote = 1; - } - - /* On Windows only backslashes and double-quotes need escaping. */ - if (*c == '\\') { - /* Found a backslash. It may need to be escaped later. */ - ++windows_backslashes; - } else if (*c == '"') { - /* Found a double-quote. We need to escape it and all - immediately preceding backslashes. */ - info.size += windows_backslashes + 1; - windows_backslashes = 0; - } else { - /* Found another character. This eliminates the possibility - that any immediately preceding backslashes will be - escaped. */ - windows_backslashes = 0; - } - } - - /* Check whether the argument needs surrounding quotes. */ - if (info.quote) { - /* Surrounding quotes are needed. Allocate space for them. */ - info.size += 2; - - /* We must escape all ending backslashes when quoting on windows. */ - info.size += windows_backslashes; - } - - return info; -} - -static char* kwsys_sf_get_arg(kwsys_sf_arg_info info, char* out) -{ - /* String iterator. */ - const char* c; - - /* Keep track of how many backslashes have been encountered in a row. */ - int windows_backslashes = 0; - - /* Whether the argument must be quoted. */ - if (info.quote) { - /* Add the opening quote for this argument. */ - *out++ = '"'; - } - - /* Scan the string for characters that require escaping or quoting. */ - for (c = info.arg; *c; ++c) { - /* On Windows only backslashes and double-quotes need escaping. */ - if (*c == '\\') { - /* Found a backslash. It may need to be escaped later. */ - ++windows_backslashes; - } else if (*c == '"') { - /* Found a double-quote. Escape all immediately preceding - backslashes. */ - while (windows_backslashes > 0) { - --windows_backslashes; - *out++ = '\\'; - } - - /* Add the backslash to escape the double-quote. */ - *out++ = '\\'; - } else { - /* We encountered a normal character. This eliminates any - escaping needed for preceding backslashes. */ - windows_backslashes = 0; - } - - /* Store this character. */ - *out++ = *c; - } - - if (info.quote) { - /* Add enough backslashes to escape any trailing ones. */ - while (windows_backslashes > 0) { - --windows_backslashes; - *out++ = '\\'; - } - - /* Add the closing quote for this argument. */ - *out++ = '"'; - } - - /* Store a terminating null without incrementing. */ - *out = 0; - - return out; -} -# endif - -/* Function to convert a logical or relative path to a physical full path. */ -static int kwsys_shared_forward_realpath(const char* in_path, char* out_path) -{ -# if defined(_WIN32) && !defined(__CYGWIN__) - /* Implementation for Windows. */ - DWORD n = - GetFullPathNameA(in_path, KWSYS_SHARED_FORWARD_MAXPATH, out_path, 0); - return n > 0 && n <= KWSYS_SHARED_FORWARD_MAXPATH; -# else - /* Implementation for UNIX. */ - return realpath(in_path, out_path) != 0; -# endif -} - -static int kwsys_shared_forward_samepath(const char* file1, const char* file2) -{ -# if defined(_WIN32) - int result = 0; - HANDLE h1 = CreateFileA(file1, GENERIC_READ, FILE_SHARE_READ, NULL, - OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL); - HANDLE h2 = CreateFileA(file2, GENERIC_READ, FILE_SHARE_READ, NULL, - OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL); - if (h1 != INVALID_HANDLE_VALUE && h2 != INVALID_HANDLE_VALUE) { - BY_HANDLE_FILE_INFORMATION fi1; - BY_HANDLE_FILE_INFORMATION fi2; - GetFileInformationByHandle(h1, &fi1); - GetFileInformationByHandle(h2, &fi2); - result = (fi1.dwVolumeSerialNumber == fi2.dwVolumeSerialNumber && - fi1.nFileIndexHigh == fi2.nFileIndexHigh && - fi1.nFileIndexLow == fi2.nFileIndexLow); - } - CloseHandle(h1); - CloseHandle(h2); - return result; -# else - struct stat fs1, fs2; - return (stat(file1, &fs1) == 0 && stat(file2, &fs2) == 0 && - memcmp(&fs2.st_dev, &fs1.st_dev, sizeof(fs1.st_dev)) == 0 && - memcmp(&fs2.st_ino, &fs1.st_ino, sizeof(fs1.st_ino)) == 0 && - fs2.st_size == fs1.st_size); -# endif -} - -/* Function to report a system error message. */ -static void kwsys_shared_forward_strerror(char* message) -{ -# if defined(_WIN32) && !defined(__CYGWIN__) - /* Implementation for Windows. */ - DWORD original = GetLastError(); - DWORD length = - FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, - 0, original, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), - message, KWSYS_SHARED_FORWARD_MAXPATH, 0); - if (length < 1 || length > KWSYS_SHARED_FORWARD_MAXPATH) { - /* FormatMessage failed. Use a default message. */ - _snprintf(message, KWSYS_SHARED_FORWARD_MAXPATH, - "Error 0x%X (FormatMessage failed with error 0x%X)", original, - GetLastError()); - } -# else - /* Implementation for UNIX. */ - strcpy(message, strerror(errno)); -# endif -} - -/* Functions to execute a child process. */ -static void kwsys_shared_forward_execvp(const char* cmd, - char const* const* argv) -{ -# ifdef KWSYS_SHARED_FORWARD_ESCAPE_ARGV - /* Count the number of arguments. */ - int argc = 0; - { - char const* const* argvc; - for (argvc = argv; *argvc; ++argvc, ++argc) { - } - } - - /* Create the escaped arguments. */ - { - char** nargv = (char**)malloc((argc + 1) * sizeof(char*)); - int i; - for (i = 0; i < argc; ++i) { - kwsys_sf_arg_info info = kwsys_sf_get_arg_info(argv[i]); - nargv[i] = (char*)malloc(info.size); - kwsys_sf_get_arg(info, nargv[i]); - } - nargv[argc] = 0; - - /* Replace the command line to be used. */ - argv = (char const* const*)nargv; - } -# endif - -/* Invoke the child process. */ -# if defined(_MSC_VER) - _execvp(cmd, argv); -# elif defined(__MINGW32__) && !defined(__MINGW64__) - execvp(cmd, argv); -# else - execvp(cmd, (char* const*)argv); -# endif -} - -/* Function to get the directory containing the given file or directory. */ -static void kwsys_shared_forward_dirname(const char* begin, char* result) -{ - /* Find the location of the last slash. */ - int last_slash_index = -1; - const char* end = begin + strlen(begin); - for (; begin <= end && last_slash_index < 0; --end) { - if (*end == '/' || *end == '\\') { - last_slash_index = (int)(end - begin); - } - } - - /* Handle each case of the index of the last slash. */ - if (last_slash_index < 0) { - /* No slashes. */ - strcpy(result, "."); - } else if (last_slash_index == 0) { - /* Only one leading slash. */ - strcpy(result, kwsys_shared_forward_path_slash); - } -# if defined(_WIN32) - else if (last_slash_index == 2 && begin[1] == ':') { - /* Only one leading drive letter and slash. */ - strncpy(result, begin, (size_t)last_slash_index); - result[last_slash_index] = KWSYS_SHARED_FORWARD_PATH_SLASH; - result[last_slash_index + 1] = 0; - } -# endif - else { - /* A non-leading slash. */ - strncpy(result, begin, (size_t)last_slash_index); - result[last_slash_index] = 0; - } -} - -/* Function to check if a file exists and is executable. */ -static int kwsys_shared_forward_is_executable(const char* f) -{ -# if defined(_MSC_VER) -# define KWSYS_SHARED_FORWARD_ACCESS _access -# else -# define KWSYS_SHARED_FORWARD_ACCESS access -# endif -# if defined(X_OK) -# define KWSYS_SHARED_FORWARD_ACCESS_OK X_OK -# else -# define KWSYS_SHARED_FORWARD_ACCESS_OK 04 -# endif - if (KWSYS_SHARED_FORWARD_ACCESS(f, KWSYS_SHARED_FORWARD_ACCESS_OK) == 0) { - return 1; - } else { - return 0; - } -} - -/* Function to locate the executable currently running. */ -static int kwsys_shared_forward_self_path(const char* argv0, char* result) -{ - /* Check whether argv0 has a slash. */ - int has_slash = 0; - const char* p = argv0; - for (; *p && !has_slash; ++p) { - if (*p == '/' || *p == '\\') { - has_slash = 1; - } - } - - if (has_slash) { - /* There is a slash. Use the dirname of the given location. */ - kwsys_shared_forward_dirname(argv0, result); - return 1; - } else { - /* There is no slash. Search the PATH for the executable. */ - const char* path = getenv("PATH"); - const char* begin = path; - const char* end = begin + (begin ? strlen(begin) : 0); - const char* first = begin; - while (first != end) { - /* Store the end of this path entry. */ - const char* last; - - /* Skip all path separators. */ - for (; *first && *first == KWSYS_SHARED_FORWARD_PATH_SEP; ++first) - ; - - /* Find the next separator. */ - for (last = first; *last && *last != KWSYS_SHARED_FORWARD_PATH_SEP; - ++last) - ; - - /* If we got a non-empty directory, look for the executable there. */ - if (first < last) { - /* Determine the length without trailing slash. */ - size_t length = (size_t)(last - first); - if (*(last - 1) == '/' || *(last - 1) == '\\') { - --length; - } - - /* Construct the name of the executable in this location. */ - strncpy(result, first, length); - result[length] = KWSYS_SHARED_FORWARD_PATH_SLASH; - strcpy(result + (length) + 1, argv0); - - /* Check if it exists and is executable. */ - if (kwsys_shared_forward_is_executable(result)) { - /* Found it. */ - result[length] = 0; - return 1; - } - } - - /* Move to the next directory in the path. */ - first = last; - } - } - - /* We could not find the executable. */ - return 0; -} - -/* Function to convert a specified path to a full path. If it is not - already full, it is taken relative to the self path. */ -static int kwsys_shared_forward_fullpath(const char* self_path, - const char* in_path, char* result, - const char* desc) -{ - /* Check the specified path type. */ - if (in_path[0] == '/') { - /* Already a full path. */ - strcpy(result, in_path); - } -# if defined(_WIN32) - else if (in_path[0] && in_path[1] == ':') { - /* Already a full path. */ - strcpy(result, in_path); - } -# endif - else { - /* Relative to self path. */ - char temp_path[KWSYS_SHARED_FORWARD_MAXPATH]; - strcpy(temp_path, self_path); - strcat(temp_path, kwsys_shared_forward_path_slash); - strcat(temp_path, in_path); - if (!kwsys_shared_forward_realpath(temp_path, result)) { - if (desc) { - char msgbuf[KWSYS_SHARED_FORWARD_MAXPATH]; - kwsys_shared_forward_strerror(msgbuf); - fprintf(stderr, "Error converting %s \"%s\" to real path: %s\n", desc, - temp_path, msgbuf); - } - return 0; - } - } - return 1; -} - -/* Function to compute the library search path and executable name - based on the self path. */ -static int kwsys_shared_forward_get_settings(const char* self_path, - char* ldpath, char* exe) -{ - /* Possible search paths. */ - static const char* search_path_build[] = { KWSYS_SHARED_FORWARD_PATH_BUILD, - 0 }; - static const char* search_path_install[] = { - KWSYS_SHARED_FORWARD_PATH_INSTALL, 0 - }; - - /* Chosen paths. */ - const char** search_path; - const char* exe_path; - -/* Get the real name of the build and self paths. */ -# if defined(KWSYS_SHARED_FORWARD_CONFIG_NAME) - char build_path[] = - KWSYS_SHARED_FORWARD_DIR_BUILD "/" KWSYS_SHARED_FORWARD_CONFIG_NAME; - char self_path_logical[KWSYS_SHARED_FORWARD_MAXPATH]; -# else - char build_path[] = KWSYS_SHARED_FORWARD_DIR_BUILD; - const char* self_path_logical = self_path; -# endif - char build_path_real[KWSYS_SHARED_FORWARD_MAXPATH]; - char self_path_real[KWSYS_SHARED_FORWARD_MAXPATH]; - if (!kwsys_shared_forward_realpath(self_path, self_path_real)) { - char msgbuf[KWSYS_SHARED_FORWARD_MAXPATH]; - kwsys_shared_forward_strerror(msgbuf); - fprintf(stderr, "Error converting self path \"%s\" to real path: %s\n", - self_path, msgbuf); - return 0; - } - - /* Check whether we are running in the build tree or an install tree. */ - if (kwsys_shared_forward_realpath(build_path, build_path_real) && - kwsys_shared_forward_samepath(self_path_real, build_path_real)) { - /* Running in build tree. Use the build path and exe. */ - search_path = search_path_build; -# if defined(_WIN32) - exe_path = KWSYS_SHARED_FORWARD_EXE_BUILD ".exe"; -# else - exe_path = KWSYS_SHARED_FORWARD_EXE_BUILD; -# endif - -# if defined(KWSYS_SHARED_FORWARD_CONFIG_NAME) - /* Remove the configuration directory from self_path. */ - kwsys_shared_forward_dirname(self_path, self_path_logical); -# endif - } else { - /* Running in install tree. Use the install path and exe. */ - search_path = search_path_install; -# if defined(_WIN32) - exe_path = KWSYS_SHARED_FORWARD_EXE_INSTALL ".exe"; -# else - exe_path = KWSYS_SHARED_FORWARD_EXE_INSTALL; -# endif - -# if defined(KWSYS_SHARED_FORWARD_CONFIG_NAME) - /* Use the original self path directory. */ - strcpy(self_path_logical, self_path); -# endif - } - - /* Construct the runtime search path. */ - { - const char** dir; - for (dir = search_path; *dir; ++dir) { - /* Add separator between path components. */ - if (dir != search_path) { - strcat(ldpath, kwsys_shared_forward_path_sep); - } - - /* Add this path component. */ - if (!kwsys_shared_forward_fullpath(self_path_logical, *dir, - ldpath + strlen(ldpath), - "runtime path entry")) { - return 0; - } - } - } - - /* Construct the executable location. */ - if (!kwsys_shared_forward_fullpath(self_path_logical, exe_path, exe, - "executable file")) { - return 0; - } - return 1; -} - -/* Function to print why execution of a command line failed. */ -static void kwsys_shared_forward_print_failure(char const* const* argv) -{ - char msg[KWSYS_SHARED_FORWARD_MAXPATH]; - char const* const* arg = argv; - kwsys_shared_forward_strerror(msg); - fprintf(stderr, "Error running"); - for (; *arg; ++arg) { - fprintf(stderr, " \"%s\"", *arg); - } - fprintf(stderr, ": %s\n", msg); -} - -/* Static storage space to store the updated environment variable. */ -static char kwsys_shared_forward_ldpath[65535] = - KWSYS_SHARED_FORWARD_LDPATH "="; - -/* Main driver function to be called from main. */ -static int @KWSYS_NAMESPACE@_shared_forward_to_real(int argc, char** argv_in) -{ - char const** argv = (char const**)argv_in; - /* Get the directory containing this executable. */ - char self_path[KWSYS_SHARED_FORWARD_MAXPATH]; - if (kwsys_shared_forward_self_path(argv[0], self_path)) { - /* Found this executable. Use it to get the library directory. */ - char exe[KWSYS_SHARED_FORWARD_MAXPATH]; - if (kwsys_shared_forward_get_settings(self_path, - kwsys_shared_forward_ldpath, exe)) { - /* Append the old runtime search path. */ - const char* old_ldpath = getenv(KWSYS_SHARED_FORWARD_LDPATH); - if (old_ldpath) { - strcat(kwsys_shared_forward_ldpath, kwsys_shared_forward_path_sep); - strcat(kwsys_shared_forward_ldpath, old_ldpath); - } - - /* Store the environment variable. */ - putenv(kwsys_shared_forward_ldpath); - -# if defined(KWSYS_SHARED_FORWARD_OPTION_COMMAND) - /* Look for the command line replacement option. */ - if (argc > 1 && - strcmp(argv[1], KWSYS_SHARED_FORWARD_OPTION_COMMAND) == 0) { - if (argc > 2) { - /* Use the command line given. */ - strcpy(exe, argv[2]); - argv += 2; - argc -= 2; - } else { - /* The option was not given an executable. */ - fprintf(stderr, - "Option " KWSYS_SHARED_FORWARD_OPTION_COMMAND - " must be followed by a command line.\n"); - return 1; - } - } -# endif - -# if defined(KWSYS_SHARED_FORWARD_OPTION_PRINT) - /* Look for the print command line option. */ - if (argc > 1 && - strcmp(argv[1], KWSYS_SHARED_FORWARD_OPTION_PRINT) == 0) { - fprintf(stdout, "%s\n", kwsys_shared_forward_ldpath); - fprintf(stdout, "%s\n", exe); - return 0; - } -# endif - -# if defined(KWSYS_SHARED_FORWARD_OPTION_LDD) - /* Look for the ldd command line option. */ - if (argc > 1 && strcmp(argv[1], KWSYS_SHARED_FORWARD_OPTION_LDD) == 0) { -# if defined(KWSYS_SHARED_FORWARD_LDD) - /* Use the named ldd-like executable and arguments. */ - char const* ldd_argv[] = { KWSYS_SHARED_FORWARD_LDD, 0, 0 }; - ldd_argv[KWSYS_SHARED_FORWARD_LDD_N] = exe; - kwsys_shared_forward_execvp(ldd_argv[0], ldd_argv); - - /* Report why execution failed. */ - kwsys_shared_forward_print_failure(ldd_argv); - return 1; -# else - /* We have no ldd-like executable available on this platform. */ - fprintf(stderr, "No ldd-like tool is known to this executable.\n"); - return 1; -# endif - } -# endif - - /* Replace this process with the real executable. */ - argv[0] = exe; - kwsys_shared_forward_execvp(argv[0], argv); - - /* Report why execution failed. */ - kwsys_shared_forward_print_failure(argv); - } else { - /* Could not convert self path to the library directory. */ - } - } else { - /* Could not find this executable. */ - fprintf(stderr, "Error locating executable \"%s\".\n", argv[0]); - } - - /* Avoid unused argument warning. */ - (void)argc; - - /* Exit with failure. */ - return 1; -} - -/* Restore warning stack. */ -# if defined(__clang__) && defined(__has_warning) -# if __has_warning("-Wcast-qual") -# pragma clang diagnostic pop -# endif -# endif - -#else -# error "@KWSYS_NAMESPACE@/SharedForward.h should be included only once." -#endif diff --git a/test/API/driver/kwsys/String.c b/test/API/driver/kwsys/String.c deleted file mode 100644 index daf7ad1a0f5..00000000000 --- a/test/API/driver/kwsys/String.c +++ /dev/null @@ -1,100 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifdef KWSYS_STRING_C -/* -All code in this source file is conditionally compiled to work-around -template definition auto-search on VMS. Other source files in this -directory that use the stl string cause the compiler to load this -source to try to get the definition of the string template. This -condition blocks the compiler from seeing the symbols defined here. -*/ -# include "kwsysPrivate.h" -# include KWSYS_HEADER(String.h) - -/* Work-around CMake dependency scanning limitation. This must - duplicate the above list of headers. */ -# if 0 -# include "String.h.in" -# endif - -/* Select an implementation for strcasecmp. */ -# if defined(_MSC_VER) -# define KWSYS_STRING_USE_STRICMP -# include -# elif defined(__GNUC__) -# define KWSYS_STRING_USE_STRCASECMP -# include -# else -/* Table to convert upper case letters to lower case and leave all - other characters alone. */ -static char kwsysString_strcasecmp_tolower[] = { - '\000', '\001', '\002', '\003', '\004', '\005', '\006', '\007', '\010', - '\011', '\012', '\013', '\014', '\015', '\016', '\017', '\020', '\021', - '\022', '\023', '\024', '\025', '\026', '\027', '\030', '\031', '\032', - '\033', '\034', '\035', '\036', '\037', '\040', '\041', '\042', '\043', - '\044', '\045', '\046', '\047', '\050', '\051', '\052', '\053', '\054', - '\055', '\056', '\057', '\060', '\061', '\062', '\063', '\064', '\065', - '\066', '\067', '\070', '\071', '\072', '\073', '\074', '\075', '\076', - '\077', '\100', '\141', '\142', '\143', '\144', '\145', '\146', '\147', - '\150', '\151', '\152', '\153', '\154', '\155', '\156', '\157', '\160', - '\161', '\162', '\163', '\164', '\165', '\166', '\167', '\170', '\171', - '\172', '\133', '\134', '\135', '\136', '\137', '\140', '\141', '\142', - '\143', '\144', '\145', '\146', '\147', '\150', '\151', '\152', '\153', - '\154', '\155', '\156', '\157', '\160', '\161', '\162', '\163', '\164', - '\165', '\166', '\167', '\170', '\171', '\172', '\173', '\174', '\175', - '\176', '\177', '\200', '\201', '\202', '\203', '\204', '\205', '\206', - '\207', '\210', '\211', '\212', '\213', '\214', '\215', '\216', '\217', - '\220', '\221', '\222', '\223', '\224', '\225', '\226', '\227', '\230', - '\231', '\232', '\233', '\234', '\235', '\236', '\237', '\240', '\241', - '\242', '\243', '\244', '\245', '\246', '\247', '\250', '\251', '\252', - '\253', '\254', '\255', '\256', '\257', '\260', '\261', '\262', '\263', - '\264', '\265', '\266', '\267', '\270', '\271', '\272', '\273', '\274', - '\275', '\276', '\277', '\300', '\301', '\302', '\303', '\304', '\305', - '\306', '\307', '\310', '\311', '\312', '\313', '\314', '\315', '\316', - '\317', '\320', '\321', '\322', '\323', '\324', '\325', '\326', '\327', - '\330', '\331', '\332', '\333', '\334', '\335', '\336', '\337', '\340', - '\341', '\342', '\343', '\344', '\345', '\346', '\347', '\350', '\351', - '\352', '\353', '\354', '\355', '\356', '\357', '\360', '\361', '\362', - '\363', '\364', '\365', '\366', '\367', '\370', '\371', '\372', '\373', - '\374', '\375', '\376', '\377' -}; -# endif - -/*--------------------------------------------------------------------------*/ -int kwsysString_strcasecmp(const char* lhs, const char* rhs) -{ -# if defined(KWSYS_STRING_USE_STRICMP) - return _stricmp(lhs, rhs); -# elif defined(KWSYS_STRING_USE_STRCASECMP) - return strcasecmp(lhs, rhs); -# else - const char* const lower = kwsysString_strcasecmp_tolower; - unsigned char const* us1 = (unsigned char const*)lhs; - unsigned char const* us2 = (unsigned char const*)rhs; - int result; - while ((result = lower[*us1] - lower[*us2++], result == 0) && *us1++) { - } - return result; -# endif -} - -/*--------------------------------------------------------------------------*/ -int kwsysString_strncasecmp(const char* lhs, const char* rhs, size_t n) -{ -# if defined(KWSYS_STRING_USE_STRICMP) - return _strnicmp(lhs, rhs, n); -# elif defined(KWSYS_STRING_USE_STRCASECMP) - return strncasecmp(lhs, rhs, n); -# else - const char* const lower = kwsysString_strcasecmp_tolower; - unsigned char const* us1 = (unsigned char const*)lhs; - unsigned char const* us2 = (unsigned char const*)rhs; - int result = 0; - while (n && (result = lower[*us1] - lower[*us2++], result == 0) && *us1++) { - --n; - } - return result; -# endif -} - -#endif /* KWSYS_STRING_C */ diff --git a/test/API/driver/kwsys/String.h.in b/test/API/driver/kwsys/String.h.in deleted file mode 100644 index 7c9348af134..00000000000 --- a/test/API/driver/kwsys/String.h.in +++ /dev/null @@ -1,57 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifndef @KWSYS_NAMESPACE@_String_h -#define @KWSYS_NAMESPACE@_String_h - -#include <@KWSYS_NAMESPACE@/Configure.h> - -#include /* size_t */ - -/* Redefine all public interface symbol names to be in the proper - namespace. These macros are used internally to kwsys only, and are - not visible to user code. Use kwsysHeaderDump.pl to reproduce - these macros after making changes to the interface. */ -#if !defined(KWSYS_NAMESPACE) -# define kwsys_ns(x) @KWSYS_NAMESPACE@##x -# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT -#endif -#if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS -# define kwsysString_strcasecmp kwsys_ns(String_strcasecmp) -# define kwsysString_strncasecmp kwsys_ns(String_strncasecmp) -#endif - -#if defined(__cplusplus) -extern "C" { -#endif - -/** - * Compare two strings ignoring the case of the characters. The - * integer returned is negative, zero, or positive if the first string - * is found to be less than, equal to, or greater than the second - * string, respectively. - */ -kwsysEXPORT int kwsysString_strcasecmp(const char* lhs, const char* rhs); - -/** - * Identical to String_strcasecmp except that only the first n - * characters are considered. - */ -kwsysEXPORT int kwsysString_strncasecmp(const char* lhs, const char* rhs, - size_t n); - -#if defined(__cplusplus) -} /* extern "C" */ -#endif - -/* If we are building a kwsys .c or .cxx file, let it use these macros. - Otherwise, undefine them to keep the namespace clean. */ -#if !defined(KWSYS_NAMESPACE) -# undef kwsys_ns -# undef kwsysEXPORT -# if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS -# undef kwsysString_strcasecmp -# undef kwsysString_strncasecmp -# endif -#endif - -#endif diff --git a/test/API/driver/kwsys/String.hxx.in b/test/API/driver/kwsys/String.hxx.in deleted file mode 100644 index db1cf22a93a..00000000000 --- a/test/API/driver/kwsys/String.hxx.in +++ /dev/null @@ -1,65 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifndef @KWSYS_NAMESPACE@_String_hxx -#define @KWSYS_NAMESPACE@_String_hxx - -#include - -namespace @KWSYS_NAMESPACE@ { - -/** \class String - * \brief Short-name version of the STL basic_string class template. - * - * The standard library "string" type is actually a typedef for - * "basic_string<..long argument list..>". This string class is - * simply a subclass of this type with the same interface so that the - * name is shorter in debugging symbols and error messages. - */ -class String : public std::string -{ - /** The original string type. */ - typedef std::string stl_string; - -public: - /** String member types. */ - typedef stl_string::value_type value_type; - typedef stl_string::pointer pointer; - typedef stl_string::reference reference; - typedef stl_string::const_reference const_reference; - typedef stl_string::size_type size_type; - typedef stl_string::difference_type difference_type; - typedef stl_string::iterator iterator; - typedef stl_string::const_iterator const_iterator; - typedef stl_string::reverse_iterator reverse_iterator; - typedef stl_string::const_reverse_iterator const_reverse_iterator; - - /** String constructors. */ - String() - : stl_string() - { - } - String(const value_type* s) - : stl_string(s) - { - } - String(const value_type* s, size_type n) - : stl_string(s, n) - { - } - String(const stl_string& s, size_type pos = 0, size_type n = npos) - : stl_string(s, pos, n) - { - } -}; // End Class: String - -#if defined(__WATCOMC__) -inline bool operator<(String const& l, String const& r) -{ - return (static_cast(l) < - static_cast(r)); -} -#endif - -} // namespace @KWSYS_NAMESPACE@ - -#endif diff --git a/test/API/driver/kwsys/System.c b/test/API/driver/kwsys/System.c deleted file mode 100644 index d43cc6fbbce..00000000000 --- a/test/API/driver/kwsys/System.c +++ /dev/null @@ -1,236 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" -#include KWSYS_HEADER(System.h) - -/* Work-around CMake dependency scanning limitation. This must - duplicate the above list of headers. */ -#if 0 -# include "System.h.in" -#endif - -#include /* isspace */ -#include /* ptrdiff_t */ -#include /* malloc, free */ -#include /* memcpy */ - -#include - -#if defined(KWSYS_C_HAS_PTRDIFF_T) && KWSYS_C_HAS_PTRDIFF_T -typedef ptrdiff_t kwsysSystem_ptrdiff_t; -#else -typedef int kwsysSystem_ptrdiff_t; -#endif - -static int kwsysSystem__AppendByte(char* local, char** begin, char** end, - int* size, char c) -{ - /* Allocate space for the character. */ - if ((*end - *begin) >= *size) { - kwsysSystem_ptrdiff_t length = *end - *begin; - char* newBuffer = (char*)malloc((size_t)(*size * 2)); - if (!newBuffer) { - return 0; - } - memcpy(newBuffer, *begin, (size_t)(length) * sizeof(char)); - if (*begin != local) { - free(*begin); - } - *begin = newBuffer; - *end = *begin + length; - *size *= 2; - } - - /* Store the character. */ - *(*end)++ = c; - return 1; -} - -static int kwsysSystem__AppendArgument(char** local, char*** begin, - char*** end, int* size, char* arg_local, - char** arg_begin, char** arg_end, - int* arg_size) -{ - /* Append a null-terminator to the argument string. */ - if (!kwsysSystem__AppendByte(arg_local, arg_begin, arg_end, arg_size, - '\0')) { - return 0; - } - - /* Allocate space for the argument pointer. */ - if ((*end - *begin) >= *size) { - kwsysSystem_ptrdiff_t length = *end - *begin; - char** newPointers = (char**)malloc((size_t)(*size) * 2 * sizeof(char*)); - if (!newPointers) { - return 0; - } - memcpy(newPointers, *begin, (size_t)(length) * sizeof(char*)); - if (*begin != local) { - free(*begin); - } - *begin = newPointers; - *end = *begin + length; - *size *= 2; - } - - /* Allocate space for the argument string. */ - **end = (char*)malloc((size_t)(*arg_end - *arg_begin)); - if (!**end) { - return 0; - } - - /* Store the argument in the command array. */ - memcpy(**end, *arg_begin, (size_t)(*arg_end - *arg_begin)); - ++(*end); - - /* Reset the argument to be empty. */ - *arg_end = *arg_begin; - - return 1; -} - -#define KWSYSPE_LOCAL_BYTE_COUNT 1024 -#define KWSYSPE_LOCAL_ARGS_COUNT 32 -static char** kwsysSystem__ParseUnixCommand(const char* command, int flags) -{ - /* Create a buffer for argument pointers during parsing. */ - char* local_pointers[KWSYSPE_LOCAL_ARGS_COUNT]; - int pointers_size = KWSYSPE_LOCAL_ARGS_COUNT; - char** pointer_begin = local_pointers; - char** pointer_end = pointer_begin; - - /* Create a buffer for argument strings during parsing. */ - char local_buffer[KWSYSPE_LOCAL_BYTE_COUNT]; - int buffer_size = KWSYSPE_LOCAL_BYTE_COUNT; - char* buffer_begin = local_buffer; - char* buffer_end = buffer_begin; - - /* Parse the command string. Try to behave like a UNIX shell. */ - char** newCommand = 0; - const char* c = command; - int in_argument = 0; - int in_escape = 0; - int in_single = 0; - int in_double = 0; - int failed = 0; - for (; *c; ++c) { - if (in_escape) { - /* This character is escaped so do no special handling. */ - if (!in_argument) { - in_argument = 1; - } - if (!kwsysSystem__AppendByte(local_buffer, &buffer_begin, &buffer_end, - &buffer_size, *c)) { - failed = 1; - break; - } - in_escape = 0; - } else if (*c == '\\') { - /* The next character should be escaped. */ - in_escape = 1; - } else if (*c == '\'' && !in_double) { - /* Enter or exit single-quote state. */ - if (in_single) { - in_single = 0; - } else { - in_single = 1; - if (!in_argument) { - in_argument = 1; - } - } - } else if (*c == '"' && !in_single) { - /* Enter or exit double-quote state. */ - if (in_double) { - in_double = 0; - } else { - in_double = 1; - if (!in_argument) { - in_argument = 1; - } - } - } else if (isspace((unsigned char)*c)) { - if (in_argument) { - if (in_single || in_double) { - /* This space belongs to a quoted argument. */ - if (!kwsysSystem__AppendByte(local_buffer, &buffer_begin, - &buffer_end, &buffer_size, *c)) { - failed = 1; - break; - } - } else { - /* This argument has been terminated by whitespace. */ - if (!kwsysSystem__AppendArgument( - local_pointers, &pointer_begin, &pointer_end, &pointers_size, - local_buffer, &buffer_begin, &buffer_end, &buffer_size)) { - failed = 1; - break; - } - in_argument = 0; - } - } - } else { - /* This character belong to an argument. */ - if (!in_argument) { - in_argument = 1; - } - if (!kwsysSystem__AppendByte(local_buffer, &buffer_begin, &buffer_end, - &buffer_size, *c)) { - failed = 1; - break; - } - } - } - - /* Finish the last argument. */ - if (in_argument) { - if (!kwsysSystem__AppendArgument( - local_pointers, &pointer_begin, &pointer_end, &pointers_size, - local_buffer, &buffer_begin, &buffer_end, &buffer_size)) { - failed = 1; - } - } - - /* If we still have memory allocate space for the new command - buffer. */ - if (!failed) { - kwsysSystem_ptrdiff_t n = pointer_end - pointer_begin; - newCommand = (char**)malloc((size_t)(n + 1) * sizeof(char*)); - } - - if (newCommand) { - /* Copy the arguments into the new command buffer. */ - kwsysSystem_ptrdiff_t n = pointer_end - pointer_begin; - memcpy(newCommand, pointer_begin, sizeof(char*) * (size_t)(n)); - newCommand[n] = 0; - } else { - /* Free arguments already allocated. */ - while (pointer_end != pointer_begin) { - free(*(--pointer_end)); - } - } - - /* Free temporary buffers. */ - if (pointer_begin != local_pointers) { - free(pointer_begin); - } - if (buffer_begin != local_buffer) { - free(buffer_begin); - } - - /* The flags argument is currently unused. */ - (void)flags; - - /* Return the final command buffer. */ - return newCommand; -} - -char** kwsysSystem_Parse_CommandForUnix(const char* command, int flags) -{ - /* Validate the flags. */ - if (flags != 0) { - return 0; - } - - /* Forward to our internal implementation. */ - return kwsysSystem__ParseUnixCommand(command, flags); -} diff --git a/test/API/driver/kwsys/System.h.in b/test/API/driver/kwsys/System.h.in deleted file mode 100644 index a9d4f5e690b..00000000000 --- a/test/API/driver/kwsys/System.h.in +++ /dev/null @@ -1,60 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifndef @KWSYS_NAMESPACE@_System_h -#define @KWSYS_NAMESPACE@_System_h - -#include <@KWSYS_NAMESPACE@/Configure.h> - -/* Redefine all public interface symbol names to be in the proper - namespace. These macros are used internally to kwsys only, and are - not visible to user code. Use kwsysHeaderDump.pl to reproduce - these macros after making changes to the interface. */ -#if !defined(KWSYS_NAMESPACE) -# define kwsys_ns(x) @KWSYS_NAMESPACE@##x -# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT -#endif -#if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS -# define kwsysSystem_Parse_CommandForUnix \ - kwsys_ns(System_Parse_CommandForUnix) -#endif - -#if defined(__cplusplus) -extern "C" { -#endif - -/** - * Parse a unix-style command line string into separate arguments. - * - * On success, returns a pointer to an array of pointers to individual - * argument strings. Each string is null-terminated and the last - * entry in the array is a NULL pointer (just like argv). It is the - * caller's responsibility to free() the strings and the array of - * pointers to them. - * - * On failure, returns NULL. Failure occurs only on invalid flags or - * when memory cannot be allocated; never due to content of the input - * string. Missing close-quotes are treated as if the necessary - * closing quote appears. - * - * By default single- and double-quoted arguments are supported, and - * any character may be escaped by a backslash. The flags argument is - * reserved for future use, and must be zero (or the call will fail). - */ -kwsysEXPORT char** kwsysSystem_Parse_CommandForUnix(const char* command, - int flags); - -#if defined(__cplusplus) -} /* extern "C" */ -#endif - -/* If we are building a kwsys .c or .cxx file, let it use these macros. - Otherwise, undefine them to keep the namespace clean. */ -#if !defined(KWSYS_NAMESPACE) -# undef kwsys_ns -# undef kwsysEXPORT -# if !defined(KWSYS_NAMESPACE) && !@KWSYS_NAMESPACE@_NAME_IS_KWSYS -# undef kwsysSystem_Parse_CommandForUnix -# endif -#endif - -#endif diff --git a/test/API/driver/kwsys/SystemInformation.cxx b/test/API/driver/kwsys/SystemInformation.cxx deleted file mode 100644 index 6ec6e48ffb0..00000000000 --- a/test/API/driver/kwsys/SystemInformation.cxx +++ /dev/null @@ -1,5466 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#if defined(_WIN32) -# define NOMINMAX // use our min,max -# if !defined(_WIN32_WINNT) && defined(_MSC_VER) && _MSC_VER >= 1800 -# define _WIN32_WINNT 0x0600 // vista -# endif -# if !defined(_WIN32_WINNT) && !(defined(_MSC_VER) && _MSC_VER < 1300) -# define _WIN32_WINNT 0x0501 -# endif -# include // WSADATA, include before sys/types.h -#endif - -#if (defined(__GNUC__) || defined(__PGI)) && !defined(_GNU_SOURCE) -# define _GNU_SOURCE -#endif - -// TODO: -// We need an alternative implementation for many functions in this file -// when USE_ASM_INSTRUCTIONS gets defined as 0. -// -// Consider using these on Win32/Win64 for some of them: -// -// IsProcessorFeaturePresent -// http://msdn.microsoft.com/en-us/library/ms724482(VS.85).aspx -// -// GetProcessMemoryInfo -// http://msdn.microsoft.com/en-us/library/ms683219(VS.85).aspx - -#include "kwsysPrivate.h" -#include KWSYS_HEADER(SystemInformation.hxx) -#include KWSYS_HEADER(Process.h) - -// Work-around CMake dependency scanning limitation. This must -// duplicate the above list of headers. -#if 0 -# include "Process.h.in" -# include "SystemInformation.hxx.in" -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if defined(_WIN32) -# include -# if defined(_MSC_VER) && _MSC_VER >= 1800 -# define KWSYS_WINDOWS_DEPRECATED_GetVersionEx -# endif -# include -# if defined(KWSYS_SYS_HAS_PSAPI) -# include -# endif -# if !defined(siginfo_t) -typedef int siginfo_t; -# endif -#else -# include - -# include // extern int errno; -# include -# include -# include // getrlimit -# include -# include // int uname(struct utsname *buf); -# include -#endif - -#if defined(__CYGWIN__) && !defined(_WIN32) -# include -# undef _WIN32 -#endif - -#if defined(__OpenBSD__) || defined(__FreeBSD__) || defined(__NetBSD__) || \ - defined(__DragonFly__) -# include -# include -# include -# include -# include -# if defined(KWSYS_SYS_HAS_IFADDRS_H) -# include -# include -# define KWSYS_SYSTEMINFORMATION_IMPLEMENT_FQDN -# endif -#endif - -#if defined(KWSYS_SYS_HAS_MACHINE_CPU_H) -# include -#endif - -#ifdef __APPLE__ -# include -# include -# include -# include -# include -# include -# include -# include -# if defined(KWSYS_SYS_HAS_IFADDRS_H) -# include -# include -# define KWSYS_SYSTEMINFORMATION_IMPLEMENT_FQDN -# endif -# if !(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ - 0 >= 1050) -# undef KWSYS_SYSTEMINFORMATION_HAS_BACKTRACE -# endif -#endif - -#if defined(__linux) || defined(__sun) || defined(_SCO_DS) || \ - defined(__GLIBC__) || defined(__GNU__) -# include -# include -# include -# if defined(KWSYS_SYS_HAS_IFADDRS_H) -# include -# include -# if defined(__LSB_VERSION__) -/* LSB has no getifaddrs */ -# elif defined(__ANDROID_API__) && __ANDROID_API__ < 24 -/* Android has no getifaddrs prior to API 24. */ -# else -# define KWSYS_SYSTEMINFORMATION_IMPLEMENT_FQDN -# endif -# endif -# if defined(KWSYS_CXX_HAS_RLIMIT64) -typedef struct rlimit64 ResourceLimitType; -# define GetResourceLimit getrlimit64 -# else -typedef struct rlimit ResourceLimitType; -# define GetResourceLimit getrlimit -# endif -#elif defined(__hpux) -# include -# include -# if defined(KWSYS_SYS_HAS_MPCTL_H) -# include -# endif -#endif - -#ifdef __HAIKU__ -# include -#endif - -#if defined(KWSYS_SYSTEMINFORMATION_HAS_BACKTRACE) -# include -# if defined(KWSYS_SYSTEMINFORMATION_HAS_CPP_DEMANGLE) -# include -# endif -# if defined(KWSYS_SYSTEMINFORMATION_HAS_SYMBOL_LOOKUP) -# include -# endif -#else -# undef KWSYS_SYSTEMINFORMATION_HAS_CPP_DEMANGLE -# undef KWSYS_SYSTEMINFORMATION_HAS_SYMBOL_LOOKUP -#endif - -#include // int isdigit(int c); -#include -#include -#include -#include - -#if defined(KWSYS_USE_LONG_LONG) -# if defined(KWSYS_IOS_HAS_OSTREAM_LONG_LONG) -# define iostreamLongLong(x) (x) -# else -# define iostreamLongLong(x) ((long)(x)) -# endif -#elif defined(KWSYS_USE___INT64) -# if defined(KWSYS_IOS_HAS_OSTREAM___INT64) -# define iostreamLongLong(x) (x) -# else -# define iostreamLongLong(x) ((long)(x)) -# endif -#else -# error "No Long Long" -#endif - -#if defined(KWSYS_CXX_HAS_ATOLL) -# define atoLongLong atoll -#else -# if defined(KWSYS_CXX_HAS__ATOI64) -# define atoLongLong _atoi64 -# elif defined(KWSYS_CXX_HAS_ATOL) -# define atoLongLong atol -# else -# define atoLongLong atoi -# endif -#endif - -#if defined(_MSC_VER) && (_MSC_VER >= 1300) && !defined(_WIN64) && \ - !defined(__clang__) -# define USE_ASM_INSTRUCTIONS 1 -#else -# define USE_ASM_INSTRUCTIONS 0 -#endif - -#if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(__clang__) -# include -# define USE_CPUID_INTRINSICS 1 -#else -# define USE_CPUID_INTRINSICS 0 -#endif - -#if USE_ASM_INSTRUCTIONS || USE_CPUID_INTRINSICS || \ - defined(KWSYS_CXX_HAS_BORLAND_ASM_CPUID) -# define USE_CPUID 1 -#else -# define USE_CPUID 0 -#endif - -#if USE_CPUID - -# define CPUID_AWARE_COMPILER - -/** - * call CPUID instruction - * - * Will return false if the instruction failed. - */ -static bool call_cpuid(int select, int result[4]) -{ -# if USE_CPUID_INTRINSICS - __cpuid(result, select); - return true; -# else - int tmp[4]; -# if defined(_MSC_VER) - // Use SEH to determine CPUID presence - __try { - _asm { -# ifdef CPUID_AWARE_COMPILER - ; we must push/pop the registers <> writes to, as the - ; optimiser does not know about <>, and so does not expect - ; these registers to change. - push eax - push ebx - push ecx - push edx -# endif - ; <> - mov eax, select -# ifdef CPUID_AWARE_COMPILER - cpuid -# else - _asm _emit 0x0f - _asm _emit 0xa2 -# endif - mov tmp[0 * TYPE int], eax - mov tmp[1 * TYPE int], ebx - mov tmp[2 * TYPE int], ecx - mov tmp[3 * TYPE int], edx - -# ifdef CPUID_AWARE_COMPILER - pop edx - pop ecx - pop ebx - pop eax -# endif - } - } __except (1) { - return false; - } - - memcpy(result, tmp, sizeof(tmp)); -# elif defined(KWSYS_CXX_HAS_BORLAND_ASM_CPUID) - unsigned int a, b, c, d; - __asm { - mov EAX, select; - cpuid - mov a, EAX; - mov b, EBX; - mov c, ECX; - mov d, EDX; - } - - result[0] = a; - result[1] = b; - result[2] = c; - result[3] = d; -# endif - - // The cpuid instruction succeeded. - return true; -# endif -} -#endif - -namespace KWSYS_NAMESPACE { -template -T min(T a, T b) -{ - return a < b ? a : b; -} - -extern "C" { -typedef void (*SigAction)(int, siginfo_t*, void*); -} - -// Define SystemInformationImplementation class -typedef void (*DELAY_FUNC)(unsigned int uiMS); - -class SystemInformationImplementation -{ -public: - typedef SystemInformation::LongLong LongLong; - SystemInformationImplementation(); - ~SystemInformationImplementation(); - - const char* GetVendorString(); - const char* GetVendorID(); - std::string GetTypeID(); - std::string GetFamilyID(); - std::string GetModelID(); - std::string GetModelName(); - std::string GetSteppingCode(); - const char* GetExtendedProcessorName(); - const char* GetProcessorSerialNumber(); - int GetProcessorCacheSize(); - unsigned int GetLogicalProcessorsPerPhysical(); - float GetProcessorClockFrequency(); - int GetProcessorAPICID(); - int GetProcessorCacheXSize(long int); - bool DoesCPUSupportFeature(long int); - - const char* GetOSName(); - const char* GetHostname(); - int GetFullyQualifiedDomainName(std::string& fqdn); - const char* GetOSRelease(); - const char* GetOSVersion(); - const char* GetOSPlatform(); - - bool Is64Bits(); - - unsigned int GetNumberOfLogicalCPU(); // per physical cpu - unsigned int GetNumberOfPhysicalCPU(); - - bool DoesCPUSupportCPUID(); - - // Retrieve memory information in MiB. - size_t GetTotalVirtualMemory(); - size_t GetAvailableVirtualMemory(); - size_t GetTotalPhysicalMemory(); - size_t GetAvailablePhysicalMemory(); - - LongLong GetProcessId(); - - // Retrieve memory information in KiB. - LongLong GetHostMemoryTotal(); - LongLong GetHostMemoryAvailable(const char* envVarName); - LongLong GetHostMemoryUsed(); - - LongLong GetProcMemoryAvailable(const char* hostLimitEnvVarName, - const char* procLimitEnvVarName); - LongLong GetProcMemoryUsed(); - - double GetLoadAverage(); - - // enable/disable stack trace signal handler. - static void SetStackTraceOnError(int enable); - - // get current stack - static std::string GetProgramStack(int firstFrame, int wholePath); - - /** Run the different checks */ - void RunCPUCheck(); - void RunOSCheck(); - void RunMemoryCheck(); - -public: - typedef struct tagID - { - int Type; - int Family; - int Model; - int Revision; - int ExtendedFamily; - int ExtendedModel; - std::string ProcessorName; - std::string Vendor; - std::string SerialNumber; - std::string ModelName; - } ID; - - typedef struct tagCPUPowerManagement - { - bool HasVoltageID; - bool HasFrequencyID; - bool HasTempSenseDiode; - } CPUPowerManagement; - - typedef struct tagCPUExtendedFeatures - { - bool Has3DNow; - bool Has3DNowPlus; - bool SupportsMP; - bool HasMMXPlus; - bool HasSSEMMX; - unsigned int LogicalProcessorsPerPhysical; - int APIC_ID; - CPUPowerManagement PowerManagement; - } CPUExtendedFeatures; - - typedef struct CPUtagFeatures - { - bool HasFPU; - bool HasTSC; - bool HasMMX; - bool HasSSE; - bool HasSSEFP; - bool HasSSE2; - bool HasIA64; - bool HasAPIC; - bool HasCMOV; - bool HasMTRR; - bool HasACPI; - bool HasSerial; - bool HasThermal; - int CPUSpeed; - int L1CacheSize; - int L2CacheSize; - int L3CacheSize; - CPUExtendedFeatures ExtendedFeatures; - } CPUFeatures; - - enum Manufacturer - { - AMD, - Intel, - NSC, - UMC, - Cyrix, - NexGen, - IDT, - Rise, - Transmeta, - Sun, - IBM, - Motorola, - HP, - Hygon, - UnknownManufacturer - }; - -protected: - // For windows - bool RetrieveCPUFeatures(); - bool RetrieveCPUIdentity(); - bool RetrieveCPUCacheDetails(); - bool RetrieveClassicalCPUCacheDetails(); - bool RetrieveCPUClockSpeed(); - bool RetrieveClassicalCPUClockSpeed(); - bool RetrieveCPUExtendedLevelSupport(int); - bool RetrieveExtendedCPUFeatures(); - bool RetrieveProcessorSerialNumber(); - bool RetrieveCPUPowerManagement(); - bool RetrieveClassicalCPUIdentity(); - bool RetrieveExtendedCPUIdentity(); - - // Processor information - Manufacturer ChipManufacturer; - CPUFeatures Features; - ID ChipID; - float CPUSpeedInMHz; - unsigned int NumberOfLogicalCPU; - unsigned int NumberOfPhysicalCPU; - - void CPUCountWindows(); // For windows - unsigned char GetAPICId(); // For windows - bool IsSMTSupported(); - static LongLong GetCyclesDifference(DELAY_FUNC, unsigned int); // For windows - - // For Linux and Cygwin, /proc/cpuinfo formats are slightly different - bool RetreiveInformationFromCpuInfoFile(); - std::string ExtractValueFromCpuInfoFile(std::string buffer, const char* word, - size_t init = 0); - - bool QueryLinuxMemory(); - bool QueryCygwinMemory(); - - static void Delay(unsigned int); - static void DelayOverhead(unsigned int); - - void FindManufacturer(const std::string& family = ""); - - // For Mac - bool ParseSysCtl(); - int CallSwVers(const char* arg, std::string& ver); - void TrimNewline(std::string&); - std::string ExtractValueFromSysCtl(const char* word); - std::string SysCtlBuffer; - - // For Solaris - bool QuerySolarisMemory(); - bool QuerySolarisProcessor(); - std::string ParseValueFromKStat(const char* arguments); - std::string RunProcess(std::vector args); - - // For Haiku OS - bool QueryHaikuInfo(); - - // For QNX - bool QueryQNXMemory(); - bool QueryQNXProcessor(); - - // For OpenBSD, FreeBSD, NetBSD, DragonFly - bool QueryBSDMemory(); - bool QueryBSDProcessor(); - - // For HP-UX - bool QueryHPUXMemory(); - bool QueryHPUXProcessor(); - - // For Microsoft Windows - bool QueryWindowsMemory(); - - // For AIX - bool QueryAIXMemory(); - - bool QueryProcessorBySysconf(); - bool QueryProcessor(); - - // Evaluate the memory information. - bool QueryMemoryBySysconf(); - bool QueryMemory(); - size_t TotalVirtualMemory; - size_t AvailableVirtualMemory; - size_t TotalPhysicalMemory; - size_t AvailablePhysicalMemory; - - size_t CurrentPositionInFile; - - // Operating System information - bool QueryOSInformation(); - std::string OSName; - std::string Hostname; - std::string OSRelease; - std::string OSVersion; - std::string OSPlatform; - bool OSIs64Bit; -}; - -SystemInformation::SystemInformation() -{ - this->Implementation = new SystemInformationImplementation; -} - -SystemInformation::~SystemInformation() -{ - delete this->Implementation; -} - -const char* SystemInformation::GetVendorString() -{ - return this->Implementation->GetVendorString(); -} - -const char* SystemInformation::GetVendorID() -{ - return this->Implementation->GetVendorID(); -} - -std::string SystemInformation::GetTypeID() -{ - return this->Implementation->GetTypeID(); -} - -std::string SystemInformation::GetFamilyID() -{ - return this->Implementation->GetFamilyID(); -} - -std::string SystemInformation::GetModelID() -{ - return this->Implementation->GetModelID(); -} - -std::string SystemInformation::GetModelName() -{ - return this->Implementation->GetModelName(); -} - -std::string SystemInformation::GetSteppingCode() -{ - return this->Implementation->GetSteppingCode(); -} - -const char* SystemInformation::GetExtendedProcessorName() -{ - return this->Implementation->GetExtendedProcessorName(); -} - -const char* SystemInformation::GetProcessorSerialNumber() -{ - return this->Implementation->GetProcessorSerialNumber(); -} - -int SystemInformation::GetProcessorCacheSize() -{ - return this->Implementation->GetProcessorCacheSize(); -} - -unsigned int SystemInformation::GetLogicalProcessorsPerPhysical() -{ - return this->Implementation->GetLogicalProcessorsPerPhysical(); -} - -float SystemInformation::GetProcessorClockFrequency() -{ - return this->Implementation->GetProcessorClockFrequency(); -} - -int SystemInformation::GetProcessorAPICID() -{ - return this->Implementation->GetProcessorAPICID(); -} - -int SystemInformation::GetProcessorCacheXSize(long int l) -{ - return this->Implementation->GetProcessorCacheXSize(l); -} - -bool SystemInformation::DoesCPUSupportFeature(long int i) -{ - return this->Implementation->DoesCPUSupportFeature(i); -} - -std::string SystemInformation::GetCPUDescription() -{ - std::ostringstream oss; - oss << this->GetNumberOfPhysicalCPU() << " core "; - if (this->GetModelName().empty()) { - oss << this->GetProcessorClockFrequency() << " MHz " - << this->GetVendorString() << " " << this->GetExtendedProcessorName(); - } else { - oss << this->GetModelName(); - } - - // remove extra spaces - std::string tmp = oss.str(); - size_t pos; - while ((pos = tmp.find(" ")) != std::string::npos) { - tmp.replace(pos, 2, " "); - } - - return tmp; -} - -const char* SystemInformation::GetOSName() -{ - return this->Implementation->GetOSName(); -} - -const char* SystemInformation::GetHostname() -{ - return this->Implementation->GetHostname(); -} - -std::string SystemInformation::GetFullyQualifiedDomainName() -{ - std::string fqdn; - this->Implementation->GetFullyQualifiedDomainName(fqdn); - return fqdn; -} - -const char* SystemInformation::GetOSRelease() -{ - return this->Implementation->GetOSRelease(); -} - -const char* SystemInformation::GetOSVersion() -{ - return this->Implementation->GetOSVersion(); -} - -const char* SystemInformation::GetOSPlatform() -{ - return this->Implementation->GetOSPlatform(); -} - -int SystemInformation::GetOSIsWindows() -{ -#if defined(_WIN32) - return 1; -#else - return 0; -#endif -} - -int SystemInformation::GetOSIsLinux() -{ -#if defined(__linux) - return 1; -#else - return 0; -#endif -} - -int SystemInformation::GetOSIsApple() -{ -#if defined(__APPLE__) - return 1; -#else - return 0; -#endif -} - -std::string SystemInformation::GetOSDescription() -{ - std::ostringstream oss; - oss << this->GetOSName() << " " << this->GetOSRelease() << " " - << this->GetOSVersion(); - - return oss.str(); -} - -bool SystemInformation::Is64Bits() -{ - return this->Implementation->Is64Bits(); -} - -unsigned int SystemInformation::GetNumberOfLogicalCPU() // per physical cpu -{ - return this->Implementation->GetNumberOfLogicalCPU(); -} - -unsigned int SystemInformation::GetNumberOfPhysicalCPU() -{ - return this->Implementation->GetNumberOfPhysicalCPU(); -} - -bool SystemInformation::DoesCPUSupportCPUID() -{ - return this->Implementation->DoesCPUSupportCPUID(); -} - -// Retrieve memory information in MiB. -size_t SystemInformation::GetTotalVirtualMemory() -{ - return this->Implementation->GetTotalVirtualMemory(); -} - -size_t SystemInformation::GetAvailableVirtualMemory() -{ - return this->Implementation->GetAvailableVirtualMemory(); -} - -size_t SystemInformation::GetTotalPhysicalMemory() -{ - return this->Implementation->GetTotalPhysicalMemory(); -} - -size_t SystemInformation::GetAvailablePhysicalMemory() -{ - return this->Implementation->GetAvailablePhysicalMemory(); -} - -std::string SystemInformation::GetMemoryDescription( - const char* hostLimitEnvVarName, const char* procLimitEnvVarName) -{ - std::ostringstream oss; - oss << "Host Total: " << iostreamLongLong(this->GetHostMemoryTotal()) - << " KiB, Host Available: " - << iostreamLongLong(this->GetHostMemoryAvailable(hostLimitEnvVarName)) - << " KiB, Process Available: " - << iostreamLongLong(this->GetProcMemoryAvailable(hostLimitEnvVarName, - procLimitEnvVarName)) - << " KiB"; - return oss.str(); -} - -// host memory info in units of KiB. -SystemInformation::LongLong SystemInformation::GetHostMemoryTotal() -{ - return this->Implementation->GetHostMemoryTotal(); -} - -SystemInformation::LongLong SystemInformation::GetHostMemoryAvailable( - const char* hostLimitEnvVarName) -{ - return this->Implementation->GetHostMemoryAvailable(hostLimitEnvVarName); -} - -SystemInformation::LongLong SystemInformation::GetHostMemoryUsed() -{ - return this->Implementation->GetHostMemoryUsed(); -} - -// process memory info in units of KiB. -SystemInformation::LongLong SystemInformation::GetProcMemoryAvailable( - const char* hostLimitEnvVarName, const char* procLimitEnvVarName) -{ - return this->Implementation->GetProcMemoryAvailable(hostLimitEnvVarName, - procLimitEnvVarName); -} - -SystemInformation::LongLong SystemInformation::GetProcMemoryUsed() -{ - return this->Implementation->GetProcMemoryUsed(); -} - -double SystemInformation::GetLoadAverage() -{ - return this->Implementation->GetLoadAverage(); -} - -SystemInformation::LongLong SystemInformation::GetProcessId() -{ - return this->Implementation->GetProcessId(); -} - -void SystemInformation::SetStackTraceOnError(int enable) -{ - SystemInformationImplementation::SetStackTraceOnError(enable); -} - -std::string SystemInformation::GetProgramStack(int firstFrame, int wholePath) -{ - return SystemInformationImplementation::GetProgramStack(firstFrame, - wholePath); -} - -/** Run the different checks */ -void SystemInformation::RunCPUCheck() -{ - this->Implementation->RunCPUCheck(); -} - -void SystemInformation::RunOSCheck() -{ - this->Implementation->RunOSCheck(); -} - -void SystemInformation::RunMemoryCheck() -{ - this->Implementation->RunMemoryCheck(); -} - -// SystemInformationImplementation starts here - -#if USE_CPUID -# define STORE_TLBCACHE_INFO(x, y) x = (x < (y)) ? (y) : x -# define TLBCACHE_INFO_UNITS (15) -#endif - -#if USE_ASM_INSTRUCTIONS -# define CLASSICAL_CPU_FREQ_LOOP 10000000 -# define RDTSC_INSTRUCTION _asm _emit 0x0f _asm _emit 0x31 -#endif - -#define INITIAL_APIC_ID_BITS 0xFF000000 -// initial APIC ID for the processor this code is running on. -// Default value = 0xff if HT is not supported - -// Hide implementation details in an anonymous namespace. -namespace { -// ***************************************************************************** -#if defined(__linux) || defined(__APPLE__) -int LoadLines(FILE* file, std::vector& lines) -{ - // Load each line in the given file into a the vector. - int nRead = 0; - const int bufSize = 1024; - char buf[bufSize] = { '\0' }; - while (!feof(file) && !ferror(file)) { - errno = 0; - if (fgets(buf, bufSize, file) == nullptr) { - if (ferror(file) && (errno == EINTR)) { - clearerr(file); - } - continue; - } - char* pBuf = buf; - while (*pBuf) { - if (*pBuf == '\n') - *pBuf = '\0'; - pBuf += 1; - } - lines.push_back(buf); - ++nRead; - } - if (ferror(file)) { - return 0; - } - return nRead; -} - -# if defined(__linux) -// ***************************************************************************** -int LoadLines(const char* fileName, std::vector& lines) -{ - FILE* file = fopen(fileName, "r"); - if (file == 0) { - return 0; - } - int nRead = LoadLines(file, lines); - fclose(file); - return nRead; -} -# endif - -// **************************************************************************** -template -int NameValue(std::vector const& lines, std::string const& name, - T& value) -{ - size_t nLines = lines.size(); - for (size_t i = 0; i < nLines; ++i) { - size_t at = lines[i].find(name); - if (at == std::string::npos) { - continue; - } - std::istringstream is(lines[i].substr(at + name.size())); - is >> value; - return 0; - } - return -1; -} -#endif - -#if defined(__linux) -// **************************************************************************** -template -int GetFieldsFromFile(const char* fileName, const char** fieldNames, T* values) -{ - std::vector fields; - if (!LoadLines(fileName, fields)) { - return -1; - } - int i = 0; - while (fieldNames[i] != nullptr) { - int ierr = NameValue(fields, fieldNames[i], values[i]); - if (ierr) { - return -(i + 2); - } - i += 1; - } - return 0; -} - -// **************************************************************************** -template -int GetFieldFromFile(const char* fileName, const char* fieldName, T& value) -{ - const char* fieldNames[2] = { fieldName, nullptr }; - T values[1] = { T(0) }; - int ierr = GetFieldsFromFile(fileName, fieldNames, values); - if (ierr) { - return ierr; - } - value = values[0]; - return 0; -} -#endif - -// **************************************************************************** -#if defined(__APPLE__) -template -int GetFieldsFromCommand(const char* command, const char** fieldNames, - T* values) -{ - FILE* file = popen(command, "r"); - if (file == nullptr) { - return -1; - } - std::vector fields; - int nl = LoadLines(file, fields); - pclose(file); - if (nl == 0) { - return -1; - } - int i = 0; - while (fieldNames[i] != nullptr) { - int ierr = NameValue(fields, fieldNames[i], values[i]); - if (ierr) { - return -(i + 2); - } - i += 1; - } - return 0; -} -#endif - -// **************************************************************************** -#if !defined(_WIN32) && !defined(__MINGW32__) && !defined(__CYGWIN__) -void StacktraceSignalHandler(int sigNo, siginfo_t* sigInfo, - void* /*sigContext*/) -{ -# if defined(__linux) || defined(__APPLE__) - std::ostringstream oss; - oss << std::endl - << "=========================================================" - << std::endl - << "Process id " << getpid() << " "; - switch (sigNo) { - case SIGINT: - oss << "Caught SIGINT"; - break; - - case SIGTERM: - oss << "Caught SIGTERM"; - break; - - case SIGABRT: - oss << "Caught SIGABRT"; - break; - - case SIGFPE: - oss << "Caught SIGFPE at " << (sigInfo->si_addr == nullptr ? "0x" : "") - << sigInfo->si_addr << " "; - switch (sigInfo->si_code) { -# if defined(FPE_INTDIV) - case FPE_INTDIV: - oss << "integer division by zero"; - break; -# endif - -# if defined(FPE_INTOVF) - case FPE_INTOVF: - oss << "integer overflow"; - break; -# endif - - case FPE_FLTDIV: - oss << "floating point divide by zero"; - break; - - case FPE_FLTOVF: - oss << "floating point overflow"; - break; - - case FPE_FLTUND: - oss << "floating point underflow"; - break; - - case FPE_FLTRES: - oss << "floating point inexact result"; - break; - - case FPE_FLTINV: - oss << "floating point invalid operation"; - break; - -# if defined(FPE_FLTSUB) - case FPE_FLTSUB: - oss << "floating point subscript out of range"; - break; -# endif - - default: - oss << "code " << sigInfo->si_code; - break; - } - break; - - case SIGSEGV: - oss << "Caught SIGSEGV at " << (sigInfo->si_addr == nullptr ? "0x" : "") - << sigInfo->si_addr << " "; - switch (sigInfo->si_code) { - case SEGV_MAPERR: - oss << "address not mapped to object"; - break; - - case SEGV_ACCERR: - oss << "invalid permission for mapped object"; - break; - - default: - oss << "code " << sigInfo->si_code; - break; - } - break; - - case SIGBUS: - oss << "Caught SIGBUS at " << (sigInfo->si_addr == nullptr ? "0x" : "") - << sigInfo->si_addr << " "; - switch (sigInfo->si_code) { - case BUS_ADRALN: - oss << "invalid address alignment"; - break; - -# if defined(BUS_ADRERR) - case BUS_ADRERR: - oss << "nonexistent physical address"; - break; -# endif - -# if defined(BUS_OBJERR) - case BUS_OBJERR: - oss << "object-specific hardware error"; - break; -# endif - -# if defined(BUS_MCEERR_AR) - case BUS_MCEERR_AR: - oss << "Hardware memory error consumed on a machine check; action " - "required."; - break; -# endif - -# if defined(BUS_MCEERR_AO) - case BUS_MCEERR_AO: - oss << "Hardware memory error detected in process but not consumed; " - "action optional."; - break; -# endif - - default: - oss << "code " << sigInfo->si_code; - break; - } - break; - - case SIGILL: - oss << "Caught SIGILL at " << (sigInfo->si_addr == nullptr ? "0x" : "") - << sigInfo->si_addr << " "; - switch (sigInfo->si_code) { - case ILL_ILLOPC: - oss << "illegal opcode"; - break; - -# if defined(ILL_ILLOPN) - case ILL_ILLOPN: - oss << "illegal operand"; - break; -# endif - -# if defined(ILL_ILLADR) - case ILL_ILLADR: - oss << "illegal addressing mode."; - break; -# endif - - case ILL_ILLTRP: - oss << "illegal trap"; - break; - - case ILL_PRVOPC: - oss << "privileged opcode"; - break; - -# if defined(ILL_PRVREG) - case ILL_PRVREG: - oss << "privileged register"; - break; -# endif - -# if defined(ILL_COPROC) - case ILL_COPROC: - oss << "co-processor error"; - break; -# endif - -# if defined(ILL_BADSTK) - case ILL_BADSTK: - oss << "internal stack error"; - break; -# endif - - default: - oss << "code " << sigInfo->si_code; - break; - } - break; - - default: - oss << "Caught " << sigNo << " code " << sigInfo->si_code; - break; - } - oss << std::endl - << "Program Stack:" << std::endl - << SystemInformationImplementation::GetProgramStack(2, 0) - << "=========================================================" - << std::endl; - std::cerr << oss.str() << std::endl; - - // restore the previously registered handlers - // and abort - SystemInformationImplementation::SetStackTraceOnError(0); - abort(); -# else - // avoid warning C4100 - (void)sigNo; - (void)sigInfo; -# endif -} -#endif - -#if defined(KWSYS_SYSTEMINFORMATION_HAS_BACKTRACE) -# define safes(_arg) ((_arg) ? (_arg) : "???") - -// Description: -// A container for symbol properties. Each instance -// must be Initialized. -class SymbolProperties -{ -public: - SymbolProperties(); - - // Description: - // The SymbolProperties instance must be initialized by - // passing a stack address. - void Initialize(void* address); - - // Description: - // Get the symbol's stack address. - void* GetAddress() const { return this->Address; } - - // Description: - // If not set paths will be removed. eg, from a binary - // or source file. - void SetReportPath(int rp) { this->ReportPath = rp; } - - // Description: - // Set/Get the name of the binary file that the symbol - // is found in. - void SetBinary(const char* binary) { this->Binary = safes(binary); } - - std::string GetBinary() const; - - // Description: - // Set the name of the function that the symbol is found in. - // If c++ demangling is supported it will be demangled. - void SetFunction(const char* function) - { - this->Function = this->Demangle(function); - } - - std::string GetFunction() const { return this->Function; } - - // Description: - // Set/Get the name of the source file where the symbol - // is defined. - void SetSourceFile(const char* sourcefile) - { - this->SourceFile = safes(sourcefile); - } - - std::string GetSourceFile() const - { - return this->GetFileName(this->SourceFile); - } - - // Description: - // Set/Get the line number where the symbol is defined - void SetLineNumber(long linenumber) { this->LineNumber = linenumber; } - long GetLineNumber() const { return this->LineNumber; } - - // Description: - // Set the address where the binary image is mapped - // into memory. - void SetBinaryBaseAddress(void* address) - { - this->BinaryBaseAddress = address; - } - -private: - void* GetRealAddress() const - { - return (void*)((char*)this->Address - (char*)this->BinaryBaseAddress); - } - - std::string GetFileName(const std::string& path) const; - std::string Demangle(const char* symbol) const; - -private: - std::string Binary; - void* BinaryBaseAddress; - void* Address; - std::string SourceFile; - std::string Function; - long LineNumber; - int ReportPath; -}; - -std::ostream& operator<<(std::ostream& os, const SymbolProperties& sp) -{ -# if defined(KWSYS_SYSTEMINFORMATION_HAS_SYMBOL_LOOKUP) - os << std::hex << sp.GetAddress() << " : " << sp.GetFunction() << " [(" - << sp.GetBinary() << ") " << sp.GetSourceFile() << ":" << std::dec - << sp.GetLineNumber() << "]"; -# elif defined(KWSYS_SYSTEMINFORMATION_HAS_BACKTRACE) - void* addr = sp.GetAddress(); - char** syminfo = backtrace_symbols(&addr, 1); - os << safes(syminfo[0]); - free(syminfo); -# else - (void)os; - (void)sp; -# endif - return os; -} - -SymbolProperties::SymbolProperties() -{ - // not using an initializer list - // to avoid some PGI compiler warnings - this->SetBinary("???"); - this->SetBinaryBaseAddress(nullptr); - this->Address = nullptr; - this->SetSourceFile("???"); - this->SetFunction("???"); - this->SetLineNumber(-1); - this->SetReportPath(0); - // avoid PGI compiler warnings - this->GetRealAddress(); - this->GetFunction(); - this->GetSourceFile(); - this->GetLineNumber(); -} - -std::string SymbolProperties::GetFileName(const std::string& path) const -{ - std::string file(path); - if (!this->ReportPath) { - size_t at = file.rfind("/"); - if (at != std::string::npos) { - file = file.substr(at + 1); - } - } - return file; -} - -std::string SymbolProperties::GetBinary() const -{ -// only linux has proc fs -# if defined(__linux__) - if (this->Binary == "/proc/self/exe") { - std::string binary; - char buf[1024] = { '\0' }; - ssize_t ll = 0; - if ((ll = readlink("/proc/self/exe", buf, 1024)) > 0 && ll < 1024) { - buf[ll] = '\0'; - binary = buf; - } else { - binary = "/proc/self/exe"; - } - return this->GetFileName(binary); - } -# endif - return this->GetFileName(this->Binary); -} - -std::string SymbolProperties::Demangle(const char* symbol) const -{ - std::string result = safes(symbol); -# if defined(KWSYS_SYSTEMINFORMATION_HAS_CPP_DEMANGLE) - int status = 0; - size_t bufferLen = 1024; - char* buffer = (char*)malloc(1024); - char* demangledSymbol = - abi::__cxa_demangle(symbol, buffer, &bufferLen, &status); - if (!status) { - result = demangledSymbol; - } - free(buffer); -# else - (void)symbol; -# endif - return result; -} - -void SymbolProperties::Initialize(void* address) -{ - this->Address = address; -# if defined(KWSYS_SYSTEMINFORMATION_HAS_SYMBOL_LOOKUP) - // first fallback option can demangle c++ functions - Dl_info info; - int ierr = dladdr(this->Address, &info); - if (ierr && info.dli_sname && info.dli_saddr) { - this->SetBinary(info.dli_fname); - this->SetFunction(info.dli_sname); - } -# else -// second fallback use builtin backtrace_symbols -// to decode the bactrace. -# endif -} -#endif // don't define this class if we're not using it - -#if defined(_WIN32) || defined(__CYGWIN__) -# define KWSYS_SYSTEMINFORMATION_USE_GetSystemTimes -#endif -#if defined(_MSC_VER) && _MSC_VER < 1310 -# undef KWSYS_SYSTEMINFORMATION_USE_GetSystemTimes -#endif -#if defined(KWSYS_SYSTEMINFORMATION_USE_GetSystemTimes) -double calculateCPULoad(unsigned __int64 idleTicks, - unsigned __int64 totalTicks) -{ - static double previousLoad = -0.0; - static unsigned __int64 previousIdleTicks = 0; - static unsigned __int64 previousTotalTicks = 0; - - unsigned __int64 const idleTicksSinceLastTime = - idleTicks - previousIdleTicks; - unsigned __int64 const totalTicksSinceLastTime = - totalTicks - previousTotalTicks; - - double load; - if (previousTotalTicks == 0 || totalTicksSinceLastTime == 0) { - // No new information. Use previous result. - load = previousLoad; - } else { - // Calculate load since last time. - load = 1.0 - double(idleTicksSinceLastTime) / totalTicksSinceLastTime; - - // Smooth if possible. - if (previousLoad > 0) { - load = 0.25 * load + 0.75 * previousLoad; - } - } - - previousLoad = load; - previousIdleTicks = idleTicks; - previousTotalTicks = totalTicks; - - return load; -} - -unsigned __int64 fileTimeToUInt64(FILETIME const& ft) -{ - LARGE_INTEGER out; - out.HighPart = ft.dwHighDateTime; - out.LowPart = ft.dwLowDateTime; - return out.QuadPart; -} -#endif - -} // anonymous namespace - -SystemInformationImplementation::SystemInformationImplementation() -{ - this->TotalVirtualMemory = 0; - this->AvailableVirtualMemory = 0; - this->TotalPhysicalMemory = 0; - this->AvailablePhysicalMemory = 0; - this->CurrentPositionInFile = 0; - this->ChipManufacturer = UnknownManufacturer; - memset(&this->Features, 0, sizeof(CPUFeatures)); - this->ChipID.Type = 0; - this->ChipID.Family = 0; - this->ChipID.Model = 0; - this->ChipID.Revision = 0; - this->ChipID.ExtendedFamily = 0; - this->ChipID.ExtendedModel = 0; - this->CPUSpeedInMHz = 0; - this->NumberOfLogicalCPU = 0; - this->NumberOfPhysicalCPU = 0; - this->OSName = ""; - this->Hostname = ""; - this->OSRelease = ""; - this->OSVersion = ""; - this->OSPlatform = ""; - this->OSIs64Bit = (sizeof(void*) == 8); -} - -SystemInformationImplementation::~SystemInformationImplementation() -{ -} - -void SystemInformationImplementation::RunCPUCheck() -{ -#ifdef _WIN32 - // Check to see if this processor supports CPUID. - bool supportsCPUID = DoesCPUSupportCPUID(); - - if (supportsCPUID) { - // Retrieve the CPU details. - RetrieveCPUIdentity(); - this->FindManufacturer(); - RetrieveCPUFeatures(); - } - - // These two may be called without support for the CPUID instruction. - // (But if the instruction is there, they should be called *after* - // the above call to RetrieveCPUIdentity... that's why the two if - // blocks exist with the same "if (supportsCPUID)" logic... - // - if (!RetrieveCPUClockSpeed()) { - RetrieveClassicalCPUClockSpeed(); - } - - if (supportsCPUID) { - // Retrieve cache information. - if (!RetrieveCPUCacheDetails()) { - RetrieveClassicalCPUCacheDetails(); - } - - // Retrieve the extended CPU details. - if (!RetrieveExtendedCPUIdentity()) { - RetrieveClassicalCPUIdentity(); - } - - RetrieveExtendedCPUFeatures(); - RetrieveCPUPowerManagement(); - - // Now attempt to retrieve the serial number (if possible). - RetrieveProcessorSerialNumber(); - } - - this->CPUCountWindows(); - -#elif defined(__APPLE__) - this->ParseSysCtl(); -#elif defined(__SVR4) && defined(__sun) - this->QuerySolarisProcessor(); -#elif defined(__HAIKU__) - this->QueryHaikuInfo(); -#elif defined(__QNX__) - this->QueryQNXProcessor(); -#elif defined(__OpenBSD__) || defined(__FreeBSD__) || defined(__NetBSD__) || \ - defined(__DragonFly__) - this->QueryBSDProcessor(); -#elif defined(__hpux) - this->QueryHPUXProcessor(); -#elif defined(__linux) || defined(__CYGWIN__) - this->RetreiveInformationFromCpuInfoFile(); -#else - this->QueryProcessor(); -#endif -} - -void SystemInformationImplementation::RunOSCheck() -{ - this->QueryOSInformation(); -} - -void SystemInformationImplementation::RunMemoryCheck() -{ -#if defined(__APPLE__) - this->ParseSysCtl(); -#elif defined(__SVR4) && defined(__sun) - this->QuerySolarisMemory(); -#elif defined(__HAIKU__) - this->QueryHaikuInfo(); -#elif defined(__QNX__) - this->QueryQNXMemory(); -#elif defined(__OpenBSD__) || defined(__FreeBSD__) || defined(__NetBSD__) || \ - defined(__DragonFly__) - this->QueryBSDMemory(); -#elif defined(__CYGWIN__) - this->QueryCygwinMemory(); -#elif defined(_WIN32) - this->QueryWindowsMemory(); -#elif defined(__hpux) - this->QueryHPUXMemory(); -#elif defined(__linux) - this->QueryLinuxMemory(); -#elif defined(_AIX) - this->QueryAIXMemory(); -#else - this->QueryMemory(); -#endif -} - -/** Get the vendor string */ -const char* SystemInformationImplementation::GetVendorString() -{ - return this->ChipID.Vendor.c_str(); -} - -/** Get the OS Name */ -const char* SystemInformationImplementation::GetOSName() -{ - return this->OSName.c_str(); -} - -/** Get the hostname */ -const char* SystemInformationImplementation::GetHostname() -{ - if (this->Hostname.empty()) { - this->Hostname = "localhost"; -#if defined(_WIN32) - WORD wVersionRequested; - WSADATA wsaData; - char name[255]; - wVersionRequested = MAKEWORD(2, 0); - if (WSAStartup(wVersionRequested, &wsaData) == 0) { - gethostname(name, sizeof(name)); - WSACleanup(); - } - this->Hostname = name; -#else - struct utsname unameInfo; - int errorFlag = uname(&unameInfo); - if (errorFlag == 0) { - this->Hostname = unameInfo.nodename; - } -#endif - } - return this->Hostname.c_str(); -} - -/** Get the FQDN */ -int SystemInformationImplementation::GetFullyQualifiedDomainName( - std::string& fqdn) -{ - // in the event of absolute failure return localhost. - fqdn = "localhost"; - -#if defined(_WIN32) - int ierr; - // TODO - a more robust implementation for windows, see comments - // in unix implementation. - WSADATA wsaData; - WORD ver = MAKEWORD(2, 0); - ierr = WSAStartup(ver, &wsaData); - if (ierr) { - return -1; - } - - char base[256] = { '\0' }; - ierr = gethostname(base, 256); - if (ierr) { - WSACleanup(); - return -2; - } - fqdn = base; - - HOSTENT* hent = gethostbyname(base); - if (hent) { - fqdn = hent->h_name; - } - - WSACleanup(); - return 0; - -#elif defined(KWSYS_SYSTEMINFORMATION_IMPLEMENT_FQDN) - // gethostname typical returns an alias for loopback interface - // we want the fully qualified domain name. Because there are - // any number of interfaces on this system we look for the - // first of these that contains the name returned by gethostname - // and is longer. failing that we return gethostname and indicate - // with a failure code. Return of a failure code is not necessarily - // an indication of an error. for instance gethostname may return - // the fully qualified domain name, or there may not be one if the - // system lives on a private network such as in the case of a cluster - // node. - - int ierr = 0; - char base[NI_MAXHOST]; - ierr = gethostname(base, NI_MAXHOST); - if (ierr) { - return -1; - } - size_t baseSize = strlen(base); - fqdn = base; - - struct ifaddrs* ifas; - struct ifaddrs* ifa; - ierr = getifaddrs(&ifas); - if (ierr) { - return -2; - } - - for (ifa = ifas; ifa != nullptr; ifa = ifa->ifa_next) { - int fam = ifa->ifa_addr ? ifa->ifa_addr->sa_family : -1; - // Skip Loopback interfaces - if (((fam == AF_INET) || (fam == AF_INET6)) && - !(ifa->ifa_flags & IFF_LOOPBACK)) { - char host[NI_MAXHOST] = { '\0' }; - - const size_t addrlen = (fam == AF_INET ? sizeof(struct sockaddr_in) - : sizeof(struct sockaddr_in6)); - - ierr = getnameinfo(ifa->ifa_addr, static_cast(addrlen), host, - NI_MAXHOST, nullptr, 0, NI_NAMEREQD); - if (ierr) { - // don't report the failure now since we may succeed on another - // interface. If all attempts fail then return the failure code. - ierr = -3; - continue; - } - - std::string candidate = host; - if ((candidate.find(base) != std::string::npos) && - baseSize < candidate.size()) { - // success, stop now. - ierr = 0; - fqdn = candidate; - break; - } - } - } - freeifaddrs(ifas); - - return ierr; -#else - /* TODO: Implement on more platforms. */ - fqdn = this->GetHostname(); - return -1; -#endif -} - -/** Get the OS release */ -const char* SystemInformationImplementation::GetOSRelease() -{ - return this->OSRelease.c_str(); -} - -/** Get the OS version */ -const char* SystemInformationImplementation::GetOSVersion() -{ - return this->OSVersion.c_str(); -} - -/** Get the OS platform */ -const char* SystemInformationImplementation::GetOSPlatform() -{ - return this->OSPlatform.c_str(); -} - -/** Get the vendor ID */ -const char* SystemInformationImplementation::GetVendorID() -{ - // Return the vendor ID. - switch (this->ChipManufacturer) { - case Intel: - return "Intel Corporation"; - case AMD: - return "Advanced Micro Devices"; - case NSC: - return "National Semiconductor"; - case Cyrix: - return "Cyrix Corp., VIA Inc."; - case NexGen: - return "NexGen Inc., Advanced Micro Devices"; - case IDT: - return "IDT\\Centaur, Via Inc."; - case UMC: - return "United Microelectronics Corp."; - case Rise: - return "Rise"; - case Transmeta: - return "Transmeta"; - case Sun: - return "Sun Microelectronics"; - case IBM: - return "IBM"; - case Motorola: - return "Motorola"; - case HP: - return "Hewlett-Packard"; - case Hygon: - return "Chengdu Haiguang IC Design Co., Ltd."; - case UnknownManufacturer: - default: - return "Unknown Manufacturer"; - } -} - -/** Return the type ID of the CPU */ -std::string SystemInformationImplementation::GetTypeID() -{ - std::ostringstream str; - str << this->ChipID.Type; - return str.str(); -} - -/** Return the family of the CPU present */ -std::string SystemInformationImplementation::GetFamilyID() -{ - std::ostringstream str; - str << this->ChipID.Family; - return str.str(); -} - -// Return the model of CPU present */ -std::string SystemInformationImplementation::GetModelID() -{ - std::ostringstream str; - str << this->ChipID.Model; - return str.str(); -} - -// Return the model name of CPU present */ -std::string SystemInformationImplementation::GetModelName() -{ - return this->ChipID.ModelName; -} - -/** Return the stepping code of the CPU present. */ -std::string SystemInformationImplementation::GetSteppingCode() -{ - std::ostringstream str; - str << this->ChipID.Revision; - return str.str(); -} - -/** Return the stepping code of the CPU present. */ -const char* SystemInformationImplementation::GetExtendedProcessorName() -{ - return this->ChipID.ProcessorName.c_str(); -} - -/** Return the serial number of the processor - * in hexadecimal: xxxx-xxxx-xxxx-xxxx-xxxx-xxxx. */ -const char* SystemInformationImplementation::GetProcessorSerialNumber() -{ - return this->ChipID.SerialNumber.c_str(); -} - -/** Return the logical processors per physical */ -unsigned int SystemInformationImplementation::GetLogicalProcessorsPerPhysical() -{ - return this->Features.ExtendedFeatures.LogicalProcessorsPerPhysical; -} - -/** Return the processor clock frequency. */ -float SystemInformationImplementation::GetProcessorClockFrequency() -{ - return this->CPUSpeedInMHz; -} - -/** Return the APIC ID. */ -int SystemInformationImplementation::GetProcessorAPICID() -{ - return this->Features.ExtendedFeatures.APIC_ID; -} - -/** Return the L1 cache size. */ -int SystemInformationImplementation::GetProcessorCacheSize() -{ - return this->Features.L1CacheSize; -} - -/** Return the chosen cache size. */ -int SystemInformationImplementation::GetProcessorCacheXSize(long int dwCacheID) -{ - switch (dwCacheID) { - case SystemInformation::CPU_FEATURE_L1CACHE: - return this->Features.L1CacheSize; - case SystemInformation::CPU_FEATURE_L2CACHE: - return this->Features.L2CacheSize; - case SystemInformation::CPU_FEATURE_L3CACHE: - return this->Features.L3CacheSize; - } - return -1; -} - -bool SystemInformationImplementation::DoesCPUSupportFeature(long int dwFeature) -{ - bool bHasFeature = false; - - // Check for MMX instructions. - if (((dwFeature & SystemInformation::CPU_FEATURE_MMX) != 0) && - this->Features.HasMMX) - bHasFeature = true; - - // Check for MMX+ instructions. - if (((dwFeature & SystemInformation::CPU_FEATURE_MMX_PLUS) != 0) && - this->Features.ExtendedFeatures.HasMMXPlus) - bHasFeature = true; - - // Check for SSE FP instructions. - if (((dwFeature & SystemInformation::CPU_FEATURE_SSE) != 0) && - this->Features.HasSSE) - bHasFeature = true; - - // Check for SSE FP instructions. - if (((dwFeature & SystemInformation::CPU_FEATURE_SSE_FP) != 0) && - this->Features.HasSSEFP) - bHasFeature = true; - - // Check for SSE MMX instructions. - if (((dwFeature & SystemInformation::CPU_FEATURE_SSE_MMX) != 0) && - this->Features.ExtendedFeatures.HasSSEMMX) - bHasFeature = true; - - // Check for SSE2 instructions. - if (((dwFeature & SystemInformation::CPU_FEATURE_SSE2) != 0) && - this->Features.HasSSE2) - bHasFeature = true; - - // Check for 3DNow! instructions. - if (((dwFeature & SystemInformation::CPU_FEATURE_AMD_3DNOW) != 0) && - this->Features.ExtendedFeatures.Has3DNow) - bHasFeature = true; - - // Check for 3DNow+ instructions. - if (((dwFeature & SystemInformation::CPU_FEATURE_AMD_3DNOW_PLUS) != 0) && - this->Features.ExtendedFeatures.Has3DNowPlus) - bHasFeature = true; - - // Check for IA64 instructions. - if (((dwFeature & SystemInformation::CPU_FEATURE_IA64) != 0) && - this->Features.HasIA64) - bHasFeature = true; - - // Check for MP capable. - if (((dwFeature & SystemInformation::CPU_FEATURE_MP_CAPABLE) != 0) && - this->Features.ExtendedFeatures.SupportsMP) - bHasFeature = true; - - // Check for a serial number for the processor. - if (((dwFeature & SystemInformation::CPU_FEATURE_SERIALNUMBER) != 0) && - this->Features.HasSerial) - bHasFeature = true; - - // Check for a local APIC in the processor. - if (((dwFeature & SystemInformation::CPU_FEATURE_APIC) != 0) && - this->Features.HasAPIC) - bHasFeature = true; - - // Check for CMOV instructions. - if (((dwFeature & SystemInformation::CPU_FEATURE_CMOV) != 0) && - this->Features.HasCMOV) - bHasFeature = true; - - // Check for MTRR instructions. - if (((dwFeature & SystemInformation::CPU_FEATURE_MTRR) != 0) && - this->Features.HasMTRR) - bHasFeature = true; - - // Check for L1 cache size. - if (((dwFeature & SystemInformation::CPU_FEATURE_L1CACHE) != 0) && - (this->Features.L1CacheSize != -1)) - bHasFeature = true; - - // Check for L2 cache size. - if (((dwFeature & SystemInformation::CPU_FEATURE_L2CACHE) != 0) && - (this->Features.L2CacheSize != -1)) - bHasFeature = true; - - // Check for L3 cache size. - if (((dwFeature & SystemInformation::CPU_FEATURE_L3CACHE) != 0) && - (this->Features.L3CacheSize != -1)) - bHasFeature = true; - - // Check for ACPI capability. - if (((dwFeature & SystemInformation::CPU_FEATURE_ACPI) != 0) && - this->Features.HasACPI) - bHasFeature = true; - - // Check for thermal monitor support. - if (((dwFeature & SystemInformation::CPU_FEATURE_THERMALMONITOR) != 0) && - this->Features.HasThermal) - bHasFeature = true; - - // Check for temperature sensing diode support. - if (((dwFeature & SystemInformation::CPU_FEATURE_TEMPSENSEDIODE) != 0) && - this->Features.ExtendedFeatures.PowerManagement.HasTempSenseDiode) - bHasFeature = true; - - // Check for frequency ID support. - if (((dwFeature & SystemInformation::CPU_FEATURE_FREQUENCYID) != 0) && - this->Features.ExtendedFeatures.PowerManagement.HasFrequencyID) - bHasFeature = true; - - // Check for voltage ID support. - if (((dwFeature & SystemInformation::CPU_FEATURE_VOLTAGEID_FREQUENCY) != - 0) && - this->Features.ExtendedFeatures.PowerManagement.HasVoltageID) - bHasFeature = true; - - // Check for FPU support. - if (((dwFeature & SystemInformation::CPU_FEATURE_FPU) != 0) && - this->Features.HasFPU) - bHasFeature = true; - - return bHasFeature; -} - -void SystemInformationImplementation::Delay(unsigned int uiMS) -{ -#ifdef _WIN32 - LARGE_INTEGER Frequency, StartCounter, EndCounter; - __int64 x; - - // Get the frequency of the high performance counter. - if (!QueryPerformanceFrequency(&Frequency)) - return; - x = Frequency.QuadPart / 1000 * uiMS; - - // Get the starting position of the counter. - QueryPerformanceCounter(&StartCounter); - - do { - // Get the ending position of the counter. - QueryPerformanceCounter(&EndCounter); - } while (EndCounter.QuadPart - StartCounter.QuadPart < x); -#endif - (void)uiMS; -} - -bool SystemInformationImplementation::DoesCPUSupportCPUID() -{ -#if USE_CPUID - int dummy[4] = { 0, 0, 0, 0 }; - -# if USE_ASM_INSTRUCTIONS - return call_cpuid(0, dummy); -# else - call_cpuid(0, dummy); - return dummy[0] || dummy[1] || dummy[2] || dummy[3]; -# endif -#else - // Assume no cpuid instruction. - return false; -#endif -} - -bool SystemInformationImplementation::RetrieveCPUFeatures() -{ -#if USE_CPUID - int cpuinfo[4] = { 0, 0, 0, 0 }; - - if (!call_cpuid(1, cpuinfo)) { - return false; - } - - // Retrieve the features of CPU present. - this->Features.HasFPU = - ((cpuinfo[3] & 0x00000001) != 0); // FPU Present --> Bit 0 - this->Features.HasTSC = - ((cpuinfo[3] & 0x00000010) != 0); // TSC Present --> Bit 4 - this->Features.HasAPIC = - ((cpuinfo[3] & 0x00000200) != 0); // APIC Present --> Bit 9 - this->Features.HasMTRR = - ((cpuinfo[3] & 0x00001000) != 0); // MTRR Present --> Bit 12 - this->Features.HasCMOV = - ((cpuinfo[3] & 0x00008000) != 0); // CMOV Present --> Bit 15 - this->Features.HasSerial = - ((cpuinfo[3] & 0x00040000) != 0); // Serial Present --> Bit 18 - this->Features.HasACPI = - ((cpuinfo[3] & 0x00400000) != 0); // ACPI Capable --> Bit 22 - this->Features.HasMMX = - ((cpuinfo[3] & 0x00800000) != 0); // MMX Present --> Bit 23 - this->Features.HasSSE = - ((cpuinfo[3] & 0x02000000) != 0); // SSE Present --> Bit 25 - this->Features.HasSSE2 = - ((cpuinfo[3] & 0x04000000) != 0); // SSE2 Present --> Bit 26 - this->Features.HasThermal = - ((cpuinfo[3] & 0x20000000) != 0); // Thermal Monitor Present --> Bit 29 - this->Features.HasIA64 = - ((cpuinfo[3] & 0x40000000) != 0); // IA64 Present --> Bit 30 - -# if USE_ASM_INSTRUCTIONS - // Retrieve extended SSE capabilities if SSE is available. - if (this->Features.HasSSE) { - - // Attempt to __try some SSE FP instructions. - __try { - // Perform: orps xmm0, xmm0 - _asm - { - _emit 0x0f - _emit 0x56 - _emit 0xc0 - } - - // SSE FP capable processor. - this->Features.HasSSEFP = true; - } __except (1) { - // bad instruction - processor or OS cannot handle SSE FP. - this->Features.HasSSEFP = false; - } - } else { - // Set the advanced SSE capabilities to not available. - this->Features.HasSSEFP = false; - } -# else - this->Features.HasSSEFP = false; -# endif - - // Retrieve Intel specific extended features. - if (this->ChipManufacturer == Intel) { - bool SupportsSMT = - ((cpuinfo[3] & 0x10000000) != 0); // Intel specific: SMT --> Bit 28 - - if ((SupportsSMT) && (this->Features.HasAPIC)) { - // Retrieve APIC information if there is one present. - this->Features.ExtendedFeatures.APIC_ID = - ((cpuinfo[1] & 0xFF000000) >> 24); - } - } - - return true; - -#else - return false; -#endif -} - -/** Find the manufacturer given the vendor id */ -void SystemInformationImplementation::FindManufacturer( - const std::string& family) -{ - if (this->ChipID.Vendor == "GenuineIntel") - this->ChipManufacturer = Intel; // Intel Corp. - else if (this->ChipID.Vendor == "UMC UMC UMC ") - this->ChipManufacturer = UMC; // United Microelectronics Corp. - else if (this->ChipID.Vendor == "AuthenticAMD") - this->ChipManufacturer = AMD; // Advanced Micro Devices - else if (this->ChipID.Vendor == "AMD ISBETTER") - this->ChipManufacturer = AMD; // Advanced Micro Devices (1994) - else if (this->ChipID.Vendor == "HygonGenuine") - this->ChipManufacturer = Hygon; // Chengdu Haiguang IC Design Co., Ltd. - else if (this->ChipID.Vendor == "CyrixInstead") - this->ChipManufacturer = Cyrix; // Cyrix Corp., VIA Inc. - else if (this->ChipID.Vendor == "NexGenDriven") - this->ChipManufacturer = NexGen; // NexGen Inc. (now AMD) - else if (this->ChipID.Vendor == "CentaurHauls") - this->ChipManufacturer = IDT; // IDT/Centaur (now VIA) - else if (this->ChipID.Vendor == "RiseRiseRise") - this->ChipManufacturer = Rise; // Rise - else if (this->ChipID.Vendor == "GenuineTMx86") - this->ChipManufacturer = Transmeta; // Transmeta - else if (this->ChipID.Vendor == "TransmetaCPU") - this->ChipManufacturer = Transmeta; // Transmeta - else if (this->ChipID.Vendor == "Geode By NSC") - this->ChipManufacturer = NSC; // National Semiconductor - else if (this->ChipID.Vendor == "Sun") - this->ChipManufacturer = Sun; // Sun Microelectronics - else if (this->ChipID.Vendor == "IBM") - this->ChipManufacturer = IBM; // IBM Microelectronics - else if (this->ChipID.Vendor == "Hewlett-Packard") - this->ChipManufacturer = HP; // Hewlett-Packard - else if (this->ChipID.Vendor == "Motorola") - this->ChipManufacturer = Motorola; // Motorola Microelectronics - else if (family.substr(0, 7) == "PA-RISC") - this->ChipManufacturer = HP; // Hewlett-Packard - else - this->ChipManufacturer = UnknownManufacturer; // Unknown manufacturer -} - -/** */ -bool SystemInformationImplementation::RetrieveCPUIdentity() -{ -#if USE_CPUID - int localCPUVendor[4]; - int localCPUSignature[4]; - - if (!call_cpuid(0, localCPUVendor)) { - return false; - } - if (!call_cpuid(1, localCPUSignature)) { - return false; - } - - // Process the returned information. - // ; eax = 0 --> eax: maximum value of CPUID instruction. - // ; ebx: part 1 of 3; CPU signature. - // ; edx: part 2 of 3; CPU signature. - // ; ecx: part 3 of 3; CPU signature. - char vbuf[13]; - memcpy(&(vbuf[0]), &(localCPUVendor[1]), sizeof(int)); - memcpy(&(vbuf[4]), &(localCPUVendor[3]), sizeof(int)); - memcpy(&(vbuf[8]), &(localCPUVendor[2]), sizeof(int)); - vbuf[12] = '\0'; - this->ChipID.Vendor = vbuf; - - // Retrieve the family of CPU present. - // ; eax = 1 --> eax: CPU ID - bits 31..16 - unused, bits 15..12 - type, - // bits 11..8 - family, bits 7..4 - model, bits 3..0 - mask revision - // ; ebx: 31..24 - default APIC ID, 23..16 - logical processor ID, - // 15..8 - CFLUSH chunk size , 7..0 - brand ID - // ; edx: CPU feature flags - this->ChipID.ExtendedFamily = - ((localCPUSignature[0] & 0x0FF00000) >> 20); // Bits 27..20 Used - this->ChipID.ExtendedModel = - ((localCPUSignature[0] & 0x000F0000) >> 16); // Bits 19..16 Used - this->ChipID.Type = - ((localCPUSignature[0] & 0x0000F000) >> 12); // Bits 15..12 Used - this->ChipID.Family = - ((localCPUSignature[0] & 0x00000F00) >> 8); // Bits 11..8 Used - this->ChipID.Model = - ((localCPUSignature[0] & 0x000000F0) >> 4); // Bits 7..4 Used - this->ChipID.Revision = - ((localCPUSignature[0] & 0x0000000F) >> 0); // Bits 3..0 Used - - return true; - -#else - return false; -#endif -} - -/** */ -bool SystemInformationImplementation::RetrieveCPUCacheDetails() -{ -#if USE_CPUID - int L1Cache[4] = { 0, 0, 0, 0 }; - int L2Cache[4] = { 0, 0, 0, 0 }; - - // Check to see if what we are about to do is supported... - if (RetrieveCPUExtendedLevelSupport(0x80000005)) { - if (!call_cpuid(0x80000005, L1Cache)) { - return false; - } - // Save the L1 data cache size (in KB) from ecx: bits 31..24 as well as - // data cache size from edx: bits 31..24. - this->Features.L1CacheSize = ((L1Cache[2] & 0xFF000000) >> 24); - this->Features.L1CacheSize += ((L1Cache[3] & 0xFF000000) >> 24); - } else { - // Store -1 to indicate the cache could not be queried. - this->Features.L1CacheSize = -1; - } - - // Check to see if what we are about to do is supported... - if (RetrieveCPUExtendedLevelSupport(0x80000006)) { - if (!call_cpuid(0x80000006, L2Cache)) { - return false; - } - // Save the L2 unified cache size (in KB) from ecx: bits 31..16. - this->Features.L2CacheSize = ((L2Cache[2] & 0xFFFF0000) >> 16); - } else { - // Store -1 to indicate the cache could not be queried. - this->Features.L2CacheSize = -1; - } - - // Define L3 as being not present as we cannot test for it. - this->Features.L3CacheSize = -1; - -#endif - - // Return failure if we cannot detect either cache with this method. - return ((this->Features.L1CacheSize == -1) && - (this->Features.L2CacheSize == -1)) - ? false - : true; -} - -/** */ -bool SystemInformationImplementation::RetrieveClassicalCPUCacheDetails() -{ -#if USE_CPUID - int TLBCode = -1, TLBData = -1, L1Code = -1, L1Data = -1, L1Trace = -1, - L2Unified = -1, L3Unified = -1; - int TLBCacheData[4] = { 0, 0, 0, 0 }; - int TLBPassCounter = 0; - int TLBCacheUnit = 0; - - do { - if (!call_cpuid(2, TLBCacheData)) { - return false; - } - - int bob = ((TLBCacheData[0] & 0x00FF0000) >> 16); - (void)bob; - // Process the returned TLB and cache information. - for (int nCounter = 0; nCounter < TLBCACHE_INFO_UNITS; nCounter++) { - // First of all - decide which unit we are dealing with. - switch (nCounter) { - // eax: bits 8..15 : bits 16..23 : bits 24..31 - case 0: - TLBCacheUnit = ((TLBCacheData[0] & 0x0000FF00) >> 8); - break; - case 1: - TLBCacheUnit = ((TLBCacheData[0] & 0x00FF0000) >> 16); - break; - case 2: - TLBCacheUnit = ((TLBCacheData[0] & 0xFF000000) >> 24); - break; - - // ebx: bits 0..7 : bits 8..15 : bits 16..23 : bits 24..31 - case 3: - TLBCacheUnit = ((TLBCacheData[1] & 0x000000FF) >> 0); - break; - case 4: - TLBCacheUnit = ((TLBCacheData[1] & 0x0000FF00) >> 8); - break; - case 5: - TLBCacheUnit = ((TLBCacheData[1] & 0x00FF0000) >> 16); - break; - case 6: - TLBCacheUnit = ((TLBCacheData[1] & 0xFF000000) >> 24); - break; - - // ecx: bits 0..7 : bits 8..15 : bits 16..23 : bits 24..31 - case 7: - TLBCacheUnit = ((TLBCacheData[2] & 0x000000FF) >> 0); - break; - case 8: - TLBCacheUnit = ((TLBCacheData[2] & 0x0000FF00) >> 8); - break; - case 9: - TLBCacheUnit = ((TLBCacheData[2] & 0x00FF0000) >> 16); - break; - case 10: - TLBCacheUnit = ((TLBCacheData[2] & 0xFF000000) >> 24); - break; - - // edx: bits 0..7 : bits 8..15 : bits 16..23 : bits 24..31 - case 11: - TLBCacheUnit = ((TLBCacheData[3] & 0x000000FF) >> 0); - break; - case 12: - TLBCacheUnit = ((TLBCacheData[3] & 0x0000FF00) >> 8); - break; - case 13: - TLBCacheUnit = ((TLBCacheData[3] & 0x00FF0000) >> 16); - break; - case 14: - TLBCacheUnit = ((TLBCacheData[3] & 0xFF000000) >> 24); - break; - - // Default case - an error has occurred. - default: - return false; - } - - // Now process the resulting unit to see what it means.... - switch (TLBCacheUnit) { - case 0x00: - break; - case 0x01: - STORE_TLBCACHE_INFO(TLBCode, 4); - break; - case 0x02: - STORE_TLBCACHE_INFO(TLBCode, 4096); - break; - case 0x03: - STORE_TLBCACHE_INFO(TLBData, 4); - break; - case 0x04: - STORE_TLBCACHE_INFO(TLBData, 4096); - break; - case 0x06: - STORE_TLBCACHE_INFO(L1Code, 8); - break; - case 0x08: - STORE_TLBCACHE_INFO(L1Code, 16); - break; - case 0x0a: - STORE_TLBCACHE_INFO(L1Data, 8); - break; - case 0x0c: - STORE_TLBCACHE_INFO(L1Data, 16); - break; - case 0x10: - STORE_TLBCACHE_INFO(L1Data, 16); - break; // <-- FIXME: IA-64 Only - case 0x15: - STORE_TLBCACHE_INFO(L1Code, 16); - break; // <-- FIXME: IA-64 Only - case 0x1a: - STORE_TLBCACHE_INFO(L2Unified, 96); - break; // <-- FIXME: IA-64 Only - case 0x22: - STORE_TLBCACHE_INFO(L3Unified, 512); - break; - case 0x23: - STORE_TLBCACHE_INFO(L3Unified, 1024); - break; - case 0x25: - STORE_TLBCACHE_INFO(L3Unified, 2048); - break; - case 0x29: - STORE_TLBCACHE_INFO(L3Unified, 4096); - break; - case 0x39: - STORE_TLBCACHE_INFO(L2Unified, 128); - break; - case 0x3c: - STORE_TLBCACHE_INFO(L2Unified, 256); - break; - case 0x40: - STORE_TLBCACHE_INFO(L2Unified, 0); - break; // <-- FIXME: No integrated L2 cache (P6 core) or L3 cache (P4 - // core). - case 0x41: - STORE_TLBCACHE_INFO(L2Unified, 128); - break; - case 0x42: - STORE_TLBCACHE_INFO(L2Unified, 256); - break; - case 0x43: - STORE_TLBCACHE_INFO(L2Unified, 512); - break; - case 0x44: - STORE_TLBCACHE_INFO(L2Unified, 1024); - break; - case 0x45: - STORE_TLBCACHE_INFO(L2Unified, 2048); - break; - case 0x50: - STORE_TLBCACHE_INFO(TLBCode, 4096); - break; - case 0x51: - STORE_TLBCACHE_INFO(TLBCode, 4096); - break; - case 0x52: - STORE_TLBCACHE_INFO(TLBCode, 4096); - break; - case 0x5b: - STORE_TLBCACHE_INFO(TLBData, 4096); - break; - case 0x5c: - STORE_TLBCACHE_INFO(TLBData, 4096); - break; - case 0x5d: - STORE_TLBCACHE_INFO(TLBData, 4096); - break; - case 0x66: - STORE_TLBCACHE_INFO(L1Data, 8); - break; - case 0x67: - STORE_TLBCACHE_INFO(L1Data, 16); - break; - case 0x68: - STORE_TLBCACHE_INFO(L1Data, 32); - break; - case 0x70: - STORE_TLBCACHE_INFO(L1Trace, 12); - break; - case 0x71: - STORE_TLBCACHE_INFO(L1Trace, 16); - break; - case 0x72: - STORE_TLBCACHE_INFO(L1Trace, 32); - break; - case 0x77: - STORE_TLBCACHE_INFO(L1Code, 16); - break; // <-- FIXME: IA-64 Only - case 0x79: - STORE_TLBCACHE_INFO(L2Unified, 128); - break; - case 0x7a: - STORE_TLBCACHE_INFO(L2Unified, 256); - break; - case 0x7b: - STORE_TLBCACHE_INFO(L2Unified, 512); - break; - case 0x7c: - STORE_TLBCACHE_INFO(L2Unified, 1024); - break; - case 0x7e: - STORE_TLBCACHE_INFO(L2Unified, 256); - break; - case 0x81: - STORE_TLBCACHE_INFO(L2Unified, 128); - break; - case 0x82: - STORE_TLBCACHE_INFO(L2Unified, 256); - break; - case 0x83: - STORE_TLBCACHE_INFO(L2Unified, 512); - break; - case 0x84: - STORE_TLBCACHE_INFO(L2Unified, 1024); - break; - case 0x85: - STORE_TLBCACHE_INFO(L2Unified, 2048); - break; - case 0x88: - STORE_TLBCACHE_INFO(L3Unified, 2048); - break; // <-- FIXME: IA-64 Only - case 0x89: - STORE_TLBCACHE_INFO(L3Unified, 4096); - break; // <-- FIXME: IA-64 Only - case 0x8a: - STORE_TLBCACHE_INFO(L3Unified, 8192); - break; // <-- FIXME: IA-64 Only - case 0x8d: - STORE_TLBCACHE_INFO(L3Unified, 3096); - break; // <-- FIXME: IA-64 Only - case 0x90: - STORE_TLBCACHE_INFO(TLBCode, 262144); - break; // <-- FIXME: IA-64 Only - case 0x96: - STORE_TLBCACHE_INFO(TLBCode, 262144); - break; // <-- FIXME: IA-64 Only - case 0x9b: - STORE_TLBCACHE_INFO(TLBCode, 262144); - break; // <-- FIXME: IA-64 Only - - // Default case - an error has occurred. - default: - return false; - } - } - - // Increment the TLB pass counter. - TLBPassCounter++; - } while ((TLBCacheData[0] & 0x000000FF) > TLBPassCounter); - - // Ok - we now have the maximum TLB, L1, L2, and L3 sizes... - if ((L1Code == -1) && (L1Data == -1) && (L1Trace == -1)) { - this->Features.L1CacheSize = -1; - } else if ((L1Code == -1) && (L1Data == -1) && (L1Trace != -1)) { - this->Features.L1CacheSize = L1Trace; - } else if ((L1Code != -1) && (L1Data == -1)) { - this->Features.L1CacheSize = L1Code; - } else if ((L1Code == -1) && (L1Data != -1)) { - this->Features.L1CacheSize = L1Data; - } else if ((L1Code != -1) && (L1Data != -1)) { - this->Features.L1CacheSize = L1Code + L1Data; - } else { - this->Features.L1CacheSize = -1; - } - - // Ok - we now have the maximum TLB, L1, L2, and L3 sizes... - if (L2Unified == -1) { - this->Features.L2CacheSize = -1; - } else { - this->Features.L2CacheSize = L2Unified; - } - - // Ok - we now have the maximum TLB, L1, L2, and L3 sizes... - if (L3Unified == -1) { - this->Features.L3CacheSize = -1; - } else { - this->Features.L3CacheSize = L3Unified; - } - - return true; - -#else - return false; -#endif -} - -/** */ -bool SystemInformationImplementation::RetrieveCPUClockSpeed() -{ - bool retrieved = false; - -#if defined(_WIN32) - unsigned int uiRepetitions = 1; - unsigned int uiMSecPerRepetition = 50; - __int64 i64Total = 0; - __int64 i64Overhead = 0; - - // Check if the TSC implementation works at all - if (this->Features.HasTSC && - GetCyclesDifference(SystemInformationImplementation::Delay, - uiMSecPerRepetition) > 0) { - for (unsigned int nCounter = 0; nCounter < uiRepetitions; nCounter++) { - i64Total += GetCyclesDifference(SystemInformationImplementation::Delay, - uiMSecPerRepetition); - i64Overhead += GetCyclesDifference( - SystemInformationImplementation::DelayOverhead, uiMSecPerRepetition); - } - - // Calculate the MHz speed. - i64Total -= i64Overhead; - i64Total /= uiRepetitions; - i64Total /= uiMSecPerRepetition; - i64Total /= 1000; - - // Save the CPU speed. - this->CPUSpeedInMHz = (float)i64Total; - - retrieved = true; - } - - // If RDTSC is not supported, we fallback to trying to read this value - // from the registry: - if (!retrieved) { - HKEY hKey = nullptr; - LONG err = - RegOpenKeyExW(HKEY_LOCAL_MACHINE, - L"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", 0, - KEY_READ, &hKey); - - if (ERROR_SUCCESS == err) { - DWORD dwType = 0; - DWORD data = 0; - DWORD dwSize = sizeof(DWORD); - - err = - RegQueryValueExW(hKey, L"~MHz", 0, &dwType, (LPBYTE)&data, &dwSize); - - if (ERROR_SUCCESS == err) { - this->CPUSpeedInMHz = (float)data; - retrieved = true; - } - - RegCloseKey(hKey); - hKey = nullptr; - } - } -#endif - - return retrieved; -} - -/** */ -bool SystemInformationImplementation::RetrieveClassicalCPUClockSpeed() -{ -#if USE_ASM_INSTRUCTIONS - LARGE_INTEGER liStart, liEnd, liCountsPerSecond; - double dFrequency, dDifference; - - // Attempt to get a starting tick count. - QueryPerformanceCounter(&liStart); - - __try { - _asm { - mov eax, 0x80000000 - mov ebx, CLASSICAL_CPU_FREQ_LOOP - Timer_Loop: - bsf ecx,eax - dec ebx - jnz Timer_Loop - } - } __except (1) { - return false; - } - - // Attempt to get a starting tick count. - QueryPerformanceCounter(&liEnd); - - // Get the difference... NB: This is in seconds.... - QueryPerformanceFrequency(&liCountsPerSecond); - dDifference = (((double)liEnd.QuadPart - (double)liStart.QuadPart) / - (double)liCountsPerSecond.QuadPart); - - // Calculate the clock speed. - if (this->ChipID.Family == 3) { - // 80386 processors.... Loop time is 115 cycles! - dFrequency = (((CLASSICAL_CPU_FREQ_LOOP * 115) / dDifference) / 1000000); - } else if (this->ChipID.Family == 4) { - // 80486 processors.... Loop time is 47 cycles! - dFrequency = (((CLASSICAL_CPU_FREQ_LOOP * 47) / dDifference) / 1000000); - } else if (this->ChipID.Family == 5) { - // Pentium processors.... Loop time is 43 cycles! - dFrequency = (((CLASSICAL_CPU_FREQ_LOOP * 43) / dDifference) / 1000000); - } - - // Save the clock speed. - this->Features.CPUSpeed = (int)dFrequency; - - return true; - -#else - return false; -#endif -} - -/** */ -bool SystemInformationImplementation::RetrieveCPUExtendedLevelSupport( - int CPULevelToCheck) -{ - int cpuinfo[4] = { 0, 0, 0, 0 }; - - // The extended CPUID is supported by various vendors starting with the - // following CPU models: - // - // Manufacturer & Chip Name | Family Model Revision - // - // AMD K6, K6-2 | 5 6 x - // Cyrix GXm, Cyrix III "Joshua" | 5 4 x - // IDT C6-2 | 5 8 x - // VIA Cyrix III | 6 5 x - // Transmeta Crusoe | 5 x x - // Intel Pentium 4 | f x x - // - - // We check to see if a supported processor is present... - if (this->ChipManufacturer == AMD) { - if (this->ChipID.Family < 5) - return false; - if ((this->ChipID.Family == 5) && (this->ChipID.Model < 6)) - return false; - } else if (this->ChipManufacturer == Cyrix) { - if (this->ChipID.Family < 5) - return false; - if ((this->ChipID.Family == 5) && (this->ChipID.Model < 4)) - return false; - if ((this->ChipID.Family == 6) && (this->ChipID.Model < 5)) - return false; - } else if (this->ChipManufacturer == IDT) { - if (this->ChipID.Family < 5) - return false; - if ((this->ChipID.Family == 5) && (this->ChipID.Model < 8)) - return false; - } else if (this->ChipManufacturer == Transmeta) { - if (this->ChipID.Family < 5) - return false; - } else if (this->ChipManufacturer == Intel) { - if (this->ChipID.Family < 0xf) { - return false; - } - } - -#if USE_CPUID - if (!call_cpuid(0x80000000, cpuinfo)) { - return false; - } -#endif - - // Now we have to check the level wanted vs level returned... - int nLevelWanted = (CPULevelToCheck & 0x7FFFFFFF); - int nLevelReturn = (cpuinfo[0] & 0x7FFFFFFF); - - // Check to see if the level provided is supported... - if (nLevelWanted > nLevelReturn) { - return false; - } - - return true; -} - -/** */ -bool SystemInformationImplementation::RetrieveExtendedCPUFeatures() -{ - - // Check that we are not using an Intel processor as it does not support - // this. - if (this->ChipManufacturer == Intel) { - return false; - } - - // Check to see if what we are about to do is supported... - if (!RetrieveCPUExtendedLevelSupport(static_cast(0x80000001))) { - return false; - } - -#if USE_CPUID - int localCPUExtendedFeatures[4] = { 0, 0, 0, 0 }; - - if (!call_cpuid(0x80000001, localCPUExtendedFeatures)) { - return false; - } - - // Retrieve the extended features of CPU present. - this->Features.ExtendedFeatures.Has3DNow = - ((localCPUExtendedFeatures[3] & 0x80000000) != - 0); // 3DNow Present --> Bit 31. - this->Features.ExtendedFeatures.Has3DNowPlus = - ((localCPUExtendedFeatures[3] & 0x40000000) != - 0); // 3DNow+ Present -- > Bit 30. - this->Features.ExtendedFeatures.HasSSEMMX = - ((localCPUExtendedFeatures[3] & 0x00400000) != - 0); // SSE MMX Present --> Bit 22. - this->Features.ExtendedFeatures.SupportsMP = - ((localCPUExtendedFeatures[3] & 0x00080000) != - 0); // MP Capable -- > Bit 19. - - // Retrieve AMD specific extended features. - if (this->ChipManufacturer == AMD || this->ChipManufacturer == Hygon) { - this->Features.ExtendedFeatures.HasMMXPlus = - ((localCPUExtendedFeatures[3] & 0x00400000) != - 0); // AMD specific: MMX-SSE --> Bit 22 - } - - // Retrieve Cyrix specific extended features. - if (this->ChipManufacturer == Cyrix) { - this->Features.ExtendedFeatures.HasMMXPlus = - ((localCPUExtendedFeatures[3] & 0x01000000) != - 0); // Cyrix specific: Extended MMX --> Bit 24 - } - - return true; - -#else - return false; -#endif -} - -/** */ -bool SystemInformationImplementation::RetrieveProcessorSerialNumber() -{ - // Check to see if the processor supports the processor serial number. - if (!this->Features.HasSerial) { - return false; - } - -#if USE_CPUID - int SerialNumber[4]; - - if (!call_cpuid(3, SerialNumber)) { - return false; - } - - // Process the returned information. - // ; eax = 3 --> ebx: top 32 bits are the processor signature bits --> NB: - // Transmeta only ?!? - // ; ecx: middle 32 bits are the processor signature bits - // ; edx: bottom 32 bits are the processor signature bits - char sn[128]; - sprintf(sn, "%.2x%.2x-%.2x%.2x-%.2x%.2x-%.2x%.2x-%.2x%.2x-%.2x%.2x", - ((SerialNumber[1] & 0xff000000) >> 24), - ((SerialNumber[1] & 0x00ff0000) >> 16), - ((SerialNumber[1] & 0x0000ff00) >> 8), - ((SerialNumber[1] & 0x000000ff) >> 0), - ((SerialNumber[2] & 0xff000000) >> 24), - ((SerialNumber[2] & 0x00ff0000) >> 16), - ((SerialNumber[2] & 0x0000ff00) >> 8), - ((SerialNumber[2] & 0x000000ff) >> 0), - ((SerialNumber[3] & 0xff000000) >> 24), - ((SerialNumber[3] & 0x00ff0000) >> 16), - ((SerialNumber[3] & 0x0000ff00) >> 8), - ((SerialNumber[3] & 0x000000ff) >> 0)); - this->ChipID.SerialNumber = sn; - return true; - -#else - return false; -#endif -} - -/** */ -bool SystemInformationImplementation::RetrieveCPUPowerManagement() -{ - // Check to see if what we are about to do is supported... - if (!RetrieveCPUExtendedLevelSupport(static_cast(0x80000007))) { - this->Features.ExtendedFeatures.PowerManagement.HasFrequencyID = false; - this->Features.ExtendedFeatures.PowerManagement.HasVoltageID = false; - this->Features.ExtendedFeatures.PowerManagement.HasTempSenseDiode = false; - return false; - } - -#if USE_CPUID - int localCPUPowerManagement[4] = { 0, 0, 0, 0 }; - - if (!call_cpuid(0x80000007, localCPUPowerManagement)) { - return false; - } - - // Check for the power management capabilities of the CPU. - this->Features.ExtendedFeatures.PowerManagement.HasTempSenseDiode = - ((localCPUPowerManagement[3] & 0x00000001) != 0); - this->Features.ExtendedFeatures.PowerManagement.HasFrequencyID = - ((localCPUPowerManagement[3] & 0x00000002) != 0); - this->Features.ExtendedFeatures.PowerManagement.HasVoltageID = - ((localCPUPowerManagement[3] & 0x00000004) != 0); - - return true; - -#else - return false; -#endif -} - -#if USE_CPUID -// Used only in USE_CPUID implementation below. -static void SystemInformationStripLeadingSpace(std::string& str) -{ - // Because some manufacturers have leading white space - we have to - // post-process the name. - std::string::size_type pos = str.find_first_not_of(" "); - if (pos != std::string::npos) { - str = str.substr(pos); - } -} -#endif - -/** */ -bool SystemInformationImplementation::RetrieveExtendedCPUIdentity() -{ - // Check to see if what we are about to do is supported... - if (!RetrieveCPUExtendedLevelSupport(static_cast(0x80000002))) - return false; - if (!RetrieveCPUExtendedLevelSupport(static_cast(0x80000003))) - return false; - if (!RetrieveCPUExtendedLevelSupport(static_cast(0x80000004))) - return false; - -#if USE_CPUID - int CPUExtendedIdentity[12]; - - if (!call_cpuid(0x80000002, CPUExtendedIdentity)) { - return false; - } - if (!call_cpuid(0x80000003, CPUExtendedIdentity + 4)) { - return false; - } - if (!call_cpuid(0x80000004, CPUExtendedIdentity + 8)) { - return false; - } - - // Process the returned information. - char nbuf[49]; - memcpy(&(nbuf[0]), &(CPUExtendedIdentity[0]), sizeof(int)); - memcpy(&(nbuf[4]), &(CPUExtendedIdentity[1]), sizeof(int)); - memcpy(&(nbuf[8]), &(CPUExtendedIdentity[2]), sizeof(int)); - memcpy(&(nbuf[12]), &(CPUExtendedIdentity[3]), sizeof(int)); - memcpy(&(nbuf[16]), &(CPUExtendedIdentity[4]), sizeof(int)); - memcpy(&(nbuf[20]), &(CPUExtendedIdentity[5]), sizeof(int)); - memcpy(&(nbuf[24]), &(CPUExtendedIdentity[6]), sizeof(int)); - memcpy(&(nbuf[28]), &(CPUExtendedIdentity[7]), sizeof(int)); - memcpy(&(nbuf[32]), &(CPUExtendedIdentity[8]), sizeof(int)); - memcpy(&(nbuf[36]), &(CPUExtendedIdentity[9]), sizeof(int)); - memcpy(&(nbuf[40]), &(CPUExtendedIdentity[10]), sizeof(int)); - memcpy(&(nbuf[44]), &(CPUExtendedIdentity[11]), sizeof(int)); - nbuf[48] = '\0'; - this->ChipID.ProcessorName = nbuf; - this->ChipID.ModelName = nbuf; - - // Because some manufacturers have leading white space - we have to - // post-process the name. - SystemInformationStripLeadingSpace(this->ChipID.ProcessorName); - return true; -#else - return false; -#endif -} - -/** */ -bool SystemInformationImplementation::RetrieveClassicalCPUIdentity() -{ - // Start by decided which manufacturer we are using.... - switch (this->ChipManufacturer) { - case Intel: - // Check the family / model / revision to determine the CPU ID. - switch (this->ChipID.Family) { - case 3: - this->ChipID.ProcessorName = "Newer i80386 family"; - break; - case 4: - switch (this->ChipID.Model) { - case 0: - this->ChipID.ProcessorName = "i80486DX-25/33"; - break; - case 1: - this->ChipID.ProcessorName = "i80486DX-50"; - break; - case 2: - this->ChipID.ProcessorName = "i80486SX"; - break; - case 3: - this->ChipID.ProcessorName = "i80486DX2"; - break; - case 4: - this->ChipID.ProcessorName = "i80486SL"; - break; - case 5: - this->ChipID.ProcessorName = "i80486SX2"; - break; - case 7: - this->ChipID.ProcessorName = "i80486DX2 WriteBack"; - break; - case 8: - this->ChipID.ProcessorName = "i80486DX4"; - break; - case 9: - this->ChipID.ProcessorName = "i80486DX4 WriteBack"; - break; - default: - this->ChipID.ProcessorName = "Unknown 80486 family"; - return false; - } - break; - case 5: - switch (this->ChipID.Model) { - case 0: - this->ChipID.ProcessorName = "P5 A-Step"; - break; - case 1: - this->ChipID.ProcessorName = "P5"; - break; - case 2: - this->ChipID.ProcessorName = "P54C"; - break; - case 3: - this->ChipID.ProcessorName = "P24T OverDrive"; - break; - case 4: - this->ChipID.ProcessorName = "P55C"; - break; - case 7: - this->ChipID.ProcessorName = "P54C"; - break; - case 8: - this->ChipID.ProcessorName = "P55C (0.25micron)"; - break; - default: - this->ChipID.ProcessorName = "Unknown Pentium family"; - return false; - } - break; - case 6: - switch (this->ChipID.Model) { - case 0: - this->ChipID.ProcessorName = "P6 A-Step"; - break; - case 1: - this->ChipID.ProcessorName = "P6"; - break; - case 3: - this->ChipID.ProcessorName = "Pentium II (0.28 micron)"; - break; - case 5: - this->ChipID.ProcessorName = "Pentium II (0.25 micron)"; - break; - case 6: - this->ChipID.ProcessorName = "Pentium II With On-Die L2 Cache"; - break; - case 7: - this->ChipID.ProcessorName = "Pentium III (0.25 micron)"; - break; - case 8: - this->ChipID.ProcessorName = - "Pentium III (0.18 micron) With 256 KB On-Die L2 Cache "; - break; - case 0xa: - this->ChipID.ProcessorName = - "Pentium III (0.18 micron) With 1 Or 2 MB On-Die L2 Cache "; - break; - case 0xb: - this->ChipID.ProcessorName = "Pentium III (0.13 micron) With " - "256 Or 512 KB On-Die L2 Cache "; - break; - case 23: - this->ChipID.ProcessorName = - "Intel(R) Core(TM)2 Duo CPU T9500 @ 2.60GHz"; - break; - default: - this->ChipID.ProcessorName = "Unknown P6 family"; - return false; - } - break; - case 7: - this->ChipID.ProcessorName = "Intel Merced (IA-64)"; - break; - case 0xf: - // Check the extended family bits... - switch (this->ChipID.ExtendedFamily) { - case 0: - switch (this->ChipID.Model) { - case 0: - this->ChipID.ProcessorName = "Pentium IV (0.18 micron)"; - break; - case 1: - this->ChipID.ProcessorName = "Pentium IV (0.18 micron)"; - break; - case 2: - this->ChipID.ProcessorName = "Pentium IV (0.13 micron)"; - break; - default: - this->ChipID.ProcessorName = "Unknown Pentium 4 family"; - return false; - } - break; - case 1: - this->ChipID.ProcessorName = "Intel McKinley (IA-64)"; - break; - default: - this->ChipID.ProcessorName = "Pentium"; - } - break; - default: - this->ChipID.ProcessorName = "Unknown Intel family"; - return false; - } - break; - - case AMD: - // Check the family / model / revision to determine the CPU ID. - switch (this->ChipID.Family) { - case 4: - switch (this->ChipID.Model) { - case 3: - this->ChipID.ProcessorName = "80486DX2"; - break; - case 7: - this->ChipID.ProcessorName = "80486DX2 WriteBack"; - break; - case 8: - this->ChipID.ProcessorName = "80486DX4"; - break; - case 9: - this->ChipID.ProcessorName = "80486DX4 WriteBack"; - break; - case 0xe: - this->ChipID.ProcessorName = "5x86"; - break; - case 0xf: - this->ChipID.ProcessorName = "5x86WB"; - break; - default: - this->ChipID.ProcessorName = "Unknown 80486 family"; - return false; - } - break; - case 5: - switch (this->ChipID.Model) { - case 0: - this->ChipID.ProcessorName = "SSA5 (PR75, PR90 = PR100)"; - break; - case 1: - this->ChipID.ProcessorName = "5k86 (PR120 = PR133)"; - break; - case 2: - this->ChipID.ProcessorName = "5k86 (PR166)"; - break; - case 3: - this->ChipID.ProcessorName = "5k86 (PR200)"; - break; - case 6: - this->ChipID.ProcessorName = "K6 (0.30 micron)"; - break; - case 7: - this->ChipID.ProcessorName = "K6 (0.25 micron)"; - break; - case 8: - this->ChipID.ProcessorName = "K6-2"; - break; - case 9: - this->ChipID.ProcessorName = "K6-III"; - break; - case 0xd: - this->ChipID.ProcessorName = "K6-2+ or K6-III+ (0.18 micron)"; - break; - default: - this->ChipID.ProcessorName = "Unknown 80586 family"; - return false; - } - break; - case 6: - switch (this->ChipID.Model) { - case 1: - this->ChipID.ProcessorName = "Athlon- (0.25 micron)"; - break; - case 2: - this->ChipID.ProcessorName = "Athlon- (0.18 micron)"; - break; - case 3: - this->ChipID.ProcessorName = "Duron- (SF core)"; - break; - case 4: - this->ChipID.ProcessorName = "Athlon- (Thunderbird core)"; - break; - case 6: - this->ChipID.ProcessorName = "Athlon- (Palomino core)"; - break; - case 7: - this->ChipID.ProcessorName = "Duron- (Morgan core)"; - break; - case 8: - if (this->Features.ExtendedFeatures.SupportsMP) - this->ChipID.ProcessorName = "Athlon - MP (Thoroughbred core)"; - else - this->ChipID.ProcessorName = "Athlon - XP (Thoroughbred core)"; - break; - default: - this->ChipID.ProcessorName = "Unknown K7 family"; - return false; - } - break; - default: - this->ChipID.ProcessorName = "Unknown AMD family"; - return false; - } - break; - - case Hygon: - this->ChipID.ProcessorName = "Unknown Hygon family"; - return false; - - case Transmeta: - switch (this->ChipID.Family) { - case 5: - switch (this->ChipID.Model) { - case 4: - this->ChipID.ProcessorName = "Crusoe TM3x00 and TM5x00"; - break; - default: - this->ChipID.ProcessorName = "Unknown Crusoe family"; - return false; - } - break; - default: - this->ChipID.ProcessorName = "Unknown Transmeta family"; - return false; - } - break; - - case Rise: - switch (this->ChipID.Family) { - case 5: - switch (this->ChipID.Model) { - case 0: - this->ChipID.ProcessorName = "mP6 (0.25 micron)"; - break; - case 2: - this->ChipID.ProcessorName = "mP6 (0.18 micron)"; - break; - default: - this->ChipID.ProcessorName = "Unknown Rise family"; - return false; - } - break; - default: - this->ChipID.ProcessorName = "Unknown Rise family"; - return false; - } - break; - - case UMC: - switch (this->ChipID.Family) { - case 4: - switch (this->ChipID.Model) { - case 1: - this->ChipID.ProcessorName = "U5D"; - break; - case 2: - this->ChipID.ProcessorName = "U5S"; - break; - default: - this->ChipID.ProcessorName = "Unknown UMC family"; - return false; - } - break; - default: - this->ChipID.ProcessorName = "Unknown UMC family"; - return false; - } - break; - - case IDT: - switch (this->ChipID.Family) { - case 5: - switch (this->ChipID.Model) { - case 4: - this->ChipID.ProcessorName = "C6"; - break; - case 8: - this->ChipID.ProcessorName = "C2"; - break; - case 9: - this->ChipID.ProcessorName = "C3"; - break; - default: - this->ChipID.ProcessorName = "Unknown IDT\\Centaur family"; - return false; - } - break; - case 6: - switch (this->ChipID.Model) { - case 6: - this->ChipID.ProcessorName = "VIA Cyrix III - Samuel"; - break; - default: - this->ChipID.ProcessorName = "Unknown IDT\\Centaur family"; - return false; - } - break; - default: - this->ChipID.ProcessorName = "Unknown IDT\\Centaur family"; - return false; - } - break; - - case Cyrix: - switch (this->ChipID.Family) { - case 4: - switch (this->ChipID.Model) { - case 4: - this->ChipID.ProcessorName = "MediaGX GX = GXm"; - break; - case 9: - this->ChipID.ProcessorName = "5x86"; - break; - default: - this->ChipID.ProcessorName = "Unknown Cx5x86 family"; - return false; - } - break; - case 5: - switch (this->ChipID.Model) { - case 2: - this->ChipID.ProcessorName = "Cx6x86"; - break; - case 4: - this->ChipID.ProcessorName = "MediaGX GXm"; - break; - default: - this->ChipID.ProcessorName = "Unknown Cx6x86 family"; - return false; - } - break; - case 6: - switch (this->ChipID.Model) { - case 0: - this->ChipID.ProcessorName = "6x86MX"; - break; - case 5: - this->ChipID.ProcessorName = "Cyrix M2 Core"; - break; - case 6: - this->ChipID.ProcessorName = "WinChip C5A Core"; - break; - case 7: - this->ChipID.ProcessorName = "WinChip C5B\\C5C Core"; - break; - case 8: - this->ChipID.ProcessorName = "WinChip C5C-T Core"; - break; - default: - this->ChipID.ProcessorName = "Unknown 6x86MX\\Cyrix III family"; - return false; - } - break; - default: - this->ChipID.ProcessorName = "Unknown Cyrix family"; - return false; - } - break; - - case NexGen: - switch (this->ChipID.Family) { - case 5: - switch (this->ChipID.Model) { - case 0: - this->ChipID.ProcessorName = "Nx586 or Nx586FPU"; - break; - default: - this->ChipID.ProcessorName = "Unknown NexGen family"; - return false; - } - break; - default: - this->ChipID.ProcessorName = "Unknown NexGen family"; - return false; - } - break; - - case NSC: - this->ChipID.ProcessorName = "Cx486SLC \\ DLC \\ Cx486S A-Step"; - break; - - case Sun: - case IBM: - case Motorola: - case HP: - case UnknownManufacturer: - default: - this->ChipID.ProcessorName = - "Unknown family"; // We cannot identify the processor. - return false; - } - - return true; -} - -/** Extract a value from the CPUInfo file */ -std::string SystemInformationImplementation::ExtractValueFromCpuInfoFile( - std::string buffer, const char* word, size_t init) -{ - size_t pos = buffer.find(word, init); - if (pos != std::string::npos) { - this->CurrentPositionInFile = pos; - pos = buffer.find(":", pos); - size_t pos2 = buffer.find("\n", pos); - if (pos != std::string::npos && pos2 != std::string::npos) { - // It may happen that the beginning matches, but this is still not the - // requested key. - // An example is looking for "cpu" when "cpu family" comes first. So we - // check that - // we have only spaces from here to pos, otherwise we search again. - for (size_t i = this->CurrentPositionInFile + strlen(word); i < pos; - ++i) { - if (buffer[i] != ' ' && buffer[i] != '\t') { - return this->ExtractValueFromCpuInfoFile(buffer, word, pos2); - } - } - return buffer.substr(pos + 2, pos2 - pos - 2); - } - } - this->CurrentPositionInFile = std::string::npos; - return ""; -} - -/** Query for the cpu status */ -bool SystemInformationImplementation::RetreiveInformationFromCpuInfoFile() -{ - this->NumberOfLogicalCPU = 0; - this->NumberOfPhysicalCPU = 0; - std::string buffer; - - FILE* fd = fopen("/proc/cpuinfo", "r"); - if (!fd) { - std::cout << "Problem opening /proc/cpuinfo" << std::endl; - return false; - } - - size_t fileSize = 0; - while (!feof(fd)) { - buffer += static_cast(fgetc(fd)); - fileSize++; - } - fclose(fd); - buffer.resize(fileSize - 2); - // Number of logical CPUs (combination of multiple processors, multi-core - // and SMT) - size_t pos = buffer.find("processor\t"); - while (pos != std::string::npos) { - this->NumberOfLogicalCPU++; - pos = buffer.find("processor\t", pos + 1); - } - -#ifdef __linux - // Count sockets. - std::set PhysicalIDs; - std::string idc = this->ExtractValueFromCpuInfoFile(buffer, "physical id"); - while (this->CurrentPositionInFile != std::string::npos) { - int id = atoi(idc.c_str()); - PhysicalIDs.insert(id); - idc = this->ExtractValueFromCpuInfoFile(buffer, "physical id", - this->CurrentPositionInFile + 1); - } - uint64_t NumberOfSockets = PhysicalIDs.size(); - NumberOfSockets = std::max(NumberOfSockets, (uint64_t)1); - // Physical ids returned by Linux don't distinguish cores. - // We want to record the total number of cores in this->NumberOfPhysicalCPU - // (checking only the first proc) - std::string Cores = this->ExtractValueFromCpuInfoFile(buffer, "cpu cores"); - unsigned int NumberOfCoresPerSocket = (unsigned int)atoi(Cores.c_str()); - NumberOfCoresPerSocket = std::max(NumberOfCoresPerSocket, 1u); - this->NumberOfPhysicalCPU = - NumberOfCoresPerSocket * (unsigned int)NumberOfSockets; - -#else // __CYGWIN__ - // does not have "physical id" entries, neither "cpu cores" - // this has to be fixed for hyper-threading. - std::string cpucount = - this->ExtractValueFromCpuInfoFile(buffer, "cpu count"); - this->NumberOfPhysicalCPU = this->NumberOfLogicalCPU = - atoi(cpucount.c_str()); -#endif - // gotta have one, and if this is 0 then we get a / by 0n - // better to have a bad answer than a crash - if (this->NumberOfPhysicalCPU <= 0) { - this->NumberOfPhysicalCPU = 1; - } - // LogicalProcessorsPerPhysical>1 => SMT. - this->Features.ExtendedFeatures.LogicalProcessorsPerPhysical = - this->NumberOfLogicalCPU / this->NumberOfPhysicalCPU; - - // CPU speed (checking only the first processor) - std::string CPUSpeed = this->ExtractValueFromCpuInfoFile(buffer, "cpu MHz"); - if (!CPUSpeed.empty()) { - this->CPUSpeedInMHz = static_cast(atof(CPUSpeed.c_str())); - } -#ifdef __linux - else { - // Linux Sparc: CPU speed is in Hz and encoded in hexadecimal - CPUSpeed = this->ExtractValueFromCpuInfoFile(buffer, "Cpu0ClkTck"); - this->CPUSpeedInMHz = - static_cast(strtoull(CPUSpeed.c_str(), 0, 16)) / 1000000.0f; - } -#endif - - // Chip family - std::string familyStr = - this->ExtractValueFromCpuInfoFile(buffer, "cpu family"); - if (familyStr.empty()) { - familyStr = this->ExtractValueFromCpuInfoFile(buffer, "CPU architecture"); - } - this->ChipID.Family = atoi(familyStr.c_str()); - - // Chip Vendor - this->ChipID.Vendor = this->ExtractValueFromCpuInfoFile(buffer, "vendor_id"); - this->FindManufacturer(familyStr); - - // second try for setting family - if (this->ChipID.Family == 0 && this->ChipManufacturer == HP) { - if (familyStr == "PA-RISC 1.1a") - this->ChipID.Family = 0x11a; - else if (familyStr == "PA-RISC 2.0") - this->ChipID.Family = 0x200; - // If you really get CMake to work on a machine not belonging to - // any of those families I owe you a dinner if you get it to - // contribute nightly builds regularly. - } - - // Chip Model - this->ChipID.Model = - atoi(this->ExtractValueFromCpuInfoFile(buffer, "model").c_str()); - if (!this->RetrieveClassicalCPUIdentity()) { - // Some platforms (e.g. PA-RISC) tell us their CPU name here. - // Note: x86 does not. - std::string cpuname = this->ExtractValueFromCpuInfoFile(buffer, "cpu"); - if (!cpuname.empty()) { - this->ChipID.ProcessorName = cpuname; - } - } - - // Chip revision - std::string cpurev = this->ExtractValueFromCpuInfoFile(buffer, "stepping"); - if (cpurev.empty()) { - cpurev = this->ExtractValueFromCpuInfoFile(buffer, "CPU revision"); - } - this->ChipID.Revision = atoi(cpurev.c_str()); - - // Chip Model Name - this->ChipID.ModelName = - this->ExtractValueFromCpuInfoFile(buffer, "model name"); - - // L1 Cache size - // Different architectures may show different names for the caches. - // Sum up everything we find. - std::vector cachename; - cachename.clear(); - - cachename.push_back("cache size"); // e.g. x86 - cachename.push_back("I-cache"); // e.g. PA-RISC - cachename.push_back("D-cache"); // e.g. PA-RISC - - this->Features.L1CacheSize = 0; - for (size_t index = 0; index < cachename.size(); index++) { - std::string cacheSize = - this->ExtractValueFromCpuInfoFile(buffer, cachename[index]); - if (!cacheSize.empty()) { - pos = cacheSize.find(" KB"); - if (pos != std::string::npos) { - cacheSize = cacheSize.substr(0, pos); - } - this->Features.L1CacheSize += atoi(cacheSize.c_str()); - } - } - - // processor feature flags (probably x86 specific) - std::string cpuflags = this->ExtractValueFromCpuInfoFile(buffer, "flags"); - if (!cpurev.empty()) { - // now we can match every flags as space + flag + space - cpuflags = " " + cpuflags + " "; - if ((cpuflags.find(" fpu ") != std::string::npos)) { - this->Features.HasFPU = true; - } - if ((cpuflags.find(" tsc ") != std::string::npos)) { - this->Features.HasTSC = true; - } - if ((cpuflags.find(" mmx ") != std::string::npos)) { - this->Features.HasMMX = true; - } - if ((cpuflags.find(" sse ") != std::string::npos)) { - this->Features.HasSSE = true; - } - if ((cpuflags.find(" sse2 ") != std::string::npos)) { - this->Features.HasSSE2 = true; - } - if ((cpuflags.find(" apic ") != std::string::npos)) { - this->Features.HasAPIC = true; - } - if ((cpuflags.find(" cmov ") != std::string::npos)) { - this->Features.HasCMOV = true; - } - if ((cpuflags.find(" mtrr ") != std::string::npos)) { - this->Features.HasMTRR = true; - } - if ((cpuflags.find(" acpi ") != std::string::npos)) { - this->Features.HasACPI = true; - } - if ((cpuflags.find(" 3dnow ") != std::string::npos)) { - this->Features.ExtendedFeatures.Has3DNow = true; - } - } - - return true; -} - -bool SystemInformationImplementation::QueryProcessorBySysconf() -{ -#if defined(_SC_NPROC_ONLN) && !defined(_SC_NPROCESSORS_ONLN) -// IRIX names this slightly different -# define _SC_NPROCESSORS_ONLN _SC_NPROC_ONLN -#endif - -#ifdef _SC_NPROCESSORS_ONLN - long c = sysconf(_SC_NPROCESSORS_ONLN); - if (c <= 0) { - return false; - } - - this->NumberOfPhysicalCPU = static_cast(c); - this->NumberOfLogicalCPU = this->NumberOfPhysicalCPU; - - return true; -#else - return false; -#endif -} - -bool SystemInformationImplementation::QueryProcessor() -{ - return this->QueryProcessorBySysconf(); -} - -/** -Get total system RAM in units of KiB. -*/ -SystemInformation::LongLong -SystemInformationImplementation::GetHostMemoryTotal() -{ -#if defined(_WIN32) -# if defined(_MSC_VER) && _MSC_VER < 1300 - MEMORYSTATUS stat; - stat.dwLength = sizeof(stat); - GlobalMemoryStatus(&stat); - return stat.dwTotalPhys / 1024; -# else - MEMORYSTATUSEX statex; - statex.dwLength = sizeof(statex); - GlobalMemoryStatusEx(&statex); - return statex.ullTotalPhys / 1024; -# endif -#elif defined(__linux) - SystemInformation::LongLong memTotal = 0; - int ierr = GetFieldFromFile("/proc/meminfo", "MemTotal:", memTotal); - if (ierr) { - return -1; - } - return memTotal; -#elif defined(__APPLE__) - uint64_t mem; - size_t len = sizeof(mem); - int ierr = sysctlbyname("hw.memsize", &mem, &len, nullptr, 0); - if (ierr) { - return -1; - } - return mem / 1024; -#else - return 0; -#endif -} - -/** -Get total system RAM in units of KiB. This may differ from the -host total if a host-wide resource limit is applied. -*/ -SystemInformation::LongLong -SystemInformationImplementation::GetHostMemoryAvailable( - const char* hostLimitEnvVarName) -{ - SystemInformation::LongLong memTotal = this->GetHostMemoryTotal(); - - // the following mechanism is provided for systems that - // apply resource limits across groups of processes. - // this is of use on certain SMP systems (eg. SGI UV) - // where the host has a large amount of ram but a given user's - // access to it is severely restricted. The system will - // apply a limit across a set of processes. Units are in KiB. - if (hostLimitEnvVarName) { - const char* hostLimitEnvVarValue = getenv(hostLimitEnvVarName); - if (hostLimitEnvVarValue) { - SystemInformation::LongLong hostLimit = - atoLongLong(hostLimitEnvVarValue); - if (hostLimit > 0) { - memTotal = min(hostLimit, memTotal); - } - } - } - - return memTotal; -} - -/** -Get total system RAM in units of KiB. This may differ from the -host total if a per-process resource limit is applied. -*/ -SystemInformation::LongLong -SystemInformationImplementation::GetProcMemoryAvailable( - const char* hostLimitEnvVarName, const char* procLimitEnvVarName) -{ - SystemInformation::LongLong memAvail = - this->GetHostMemoryAvailable(hostLimitEnvVarName); - - // the following mechanism is provide for systems where rlimits - // are not employed. Units are in KiB. - if (procLimitEnvVarName) { - const char* procLimitEnvVarValue = getenv(procLimitEnvVarName); - if (procLimitEnvVarValue) { - SystemInformation::LongLong procLimit = - atoLongLong(procLimitEnvVarValue); - if (procLimit > 0) { - memAvail = min(procLimit, memAvail); - } - } - } - -#if defined(__linux) - int ierr; - ResourceLimitType rlim; - ierr = GetResourceLimit(RLIMIT_DATA, &rlim); - if ((ierr == 0) && (rlim.rlim_cur != RLIM_INFINITY)) { - memAvail = - min((SystemInformation::LongLong)rlim.rlim_cur / 1024, memAvail); - } - - ierr = GetResourceLimit(RLIMIT_AS, &rlim); - if ((ierr == 0) && (rlim.rlim_cur != RLIM_INFINITY)) { - memAvail = - min((SystemInformation::LongLong)rlim.rlim_cur / 1024, memAvail); - } -#elif defined(__APPLE__) - struct rlimit rlim; - int ierr; - ierr = getrlimit(RLIMIT_DATA, &rlim); - if ((ierr == 0) && (rlim.rlim_cur != RLIM_INFINITY)) { - memAvail = - min((SystemInformation::LongLong)rlim.rlim_cur / 1024, memAvail); - } - - ierr = getrlimit(RLIMIT_RSS, &rlim); - if ((ierr == 0) && (rlim.rlim_cur != RLIM_INFINITY)) { - memAvail = - min((SystemInformation::LongLong)rlim.rlim_cur / 1024, memAvail); - } -#endif - - return memAvail; -} - -/** -Get RAM used by all processes in the host, in units of KiB. -*/ -SystemInformation::LongLong -SystemInformationImplementation::GetHostMemoryUsed() -{ -#if defined(_WIN32) -# if defined(_MSC_VER) && _MSC_VER < 1300 - MEMORYSTATUS stat; - stat.dwLength = sizeof(stat); - GlobalMemoryStatus(&stat); - return (stat.dwTotalPhys - stat.dwAvailPhys) / 1024; -# else - MEMORYSTATUSEX statex; - statex.dwLength = sizeof(statex); - GlobalMemoryStatusEx(&statex); - return (statex.ullTotalPhys - statex.ullAvailPhys) / 1024; -# endif -#elif defined(__linux) - // First try to use MemAvailable, but it only works on newer kernels - const char* names2[3] = { "MemTotal:", "MemAvailable:", nullptr }; - SystemInformation::LongLong values2[2] = { SystemInformation::LongLong(0) }; - int ierr = GetFieldsFromFile("/proc/meminfo", names2, values2); - if (ierr) { - const char* names4[5] = { "MemTotal:", "MemFree:", "Buffers:", "Cached:", - nullptr }; - SystemInformation::LongLong values4[4] = { SystemInformation::LongLong( - 0) }; - ierr = GetFieldsFromFile("/proc/meminfo", names4, values4); - if (ierr) { - return ierr; - } - SystemInformation::LongLong& memTotal = values4[0]; - SystemInformation::LongLong& memFree = values4[1]; - SystemInformation::LongLong& memBuffers = values4[2]; - SystemInformation::LongLong& memCached = values4[3]; - return memTotal - memFree - memBuffers - memCached; - } - SystemInformation::LongLong& memTotal = values2[0]; - SystemInformation::LongLong& memAvail = values2[1]; - return memTotal - memAvail; -#elif defined(__APPLE__) - SystemInformation::LongLong psz = getpagesize(); - if (psz < 1) { - return -1; - } - const char* names[3] = { "Pages wired down:", "Pages active:", nullptr }; - SystemInformation::LongLong values[2] = { SystemInformation::LongLong(0) }; - int ierr = GetFieldsFromCommand("vm_stat", names, values); - if (ierr) { - return -1; - } - SystemInformation::LongLong& vmWired = values[0]; - SystemInformation::LongLong& vmActive = values[1]; - return ((vmActive + vmWired) * psz) / 1024; -#else - return 0; -#endif -} - -/** -Get system RAM used by the process associated with the given -process id in units of KiB. -*/ -SystemInformation::LongLong -SystemInformationImplementation::GetProcMemoryUsed() -{ -#if defined(_WIN32) && defined(KWSYS_SYS_HAS_PSAPI) - long pid = GetCurrentProcessId(); - HANDLE hProc; - hProc = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, false, pid); - if (hProc == 0) { - return -1; - } - PROCESS_MEMORY_COUNTERS pmc; - int ok = GetProcessMemoryInfo(hProc, &pmc, sizeof(pmc)); - CloseHandle(hProc); - if (!ok) { - return -2; - } - return pmc.WorkingSetSize / 1024; -#elif defined(__linux) - SystemInformation::LongLong memUsed = 0; - int ierr = GetFieldFromFile("/proc/self/status", "VmRSS:", memUsed); - if (ierr) { - return -1; - } - return memUsed; -#elif defined(__APPLE__) - SystemInformation::LongLong memUsed = 0; - pid_t pid = getpid(); - std::ostringstream oss; - oss << "ps -o rss= -p " << pid; - FILE* file = popen(oss.str().c_str(), "r"); - if (file == nullptr) { - return -1; - } - oss.str(""); - while (!feof(file) && !ferror(file)) { - char buf[256] = { '\0' }; - errno = 0; - size_t nRead = fread(buf, 1, 256, file); - if (ferror(file) && (errno == EINTR)) { - clearerr(file); - } - if (nRead) - oss << buf; - } - int ierr = ferror(file); - pclose(file); - if (ierr) { - return -2; - } - std::istringstream iss(oss.str()); - iss >> memUsed; - return memUsed; -#else - return 0; -#endif -} - -double SystemInformationImplementation::GetLoadAverage() -{ -#if defined(KWSYS_CXX_HAS_GETLOADAVG) - double loadavg[3] = { 0.0, 0.0, 0.0 }; - if (getloadavg(loadavg, 3) > 0) { - return loadavg[0]; - } - return -0.0; -#elif defined(KWSYS_SYSTEMINFORMATION_USE_GetSystemTimes) - // Old windows.h headers do not provide GetSystemTimes. - typedef BOOL(WINAPI * GetSystemTimesType)(LPFILETIME, LPFILETIME, - LPFILETIME); - static GetSystemTimesType pGetSystemTimes = - (GetSystemTimesType)GetProcAddress(GetModuleHandleW(L"kernel32"), - "GetSystemTimes"); - FILETIME idleTime, kernelTime, userTime; - if (pGetSystemTimes && pGetSystemTimes(&idleTime, &kernelTime, &userTime)) { - unsigned __int64 const idleTicks = fileTimeToUInt64(idleTime); - unsigned __int64 const totalTicks = - fileTimeToUInt64(kernelTime) + fileTimeToUInt64(userTime); - return calculateCPULoad(idleTicks, totalTicks) * GetNumberOfPhysicalCPU(); - } - return -0.0; -#else - // Not implemented on this platform. - return -0.0; -#endif -} - -/** -Get the process id of the running process. -*/ -SystemInformation::LongLong SystemInformationImplementation::GetProcessId() -{ -#if defined(_WIN32) - return GetCurrentProcessId(); -#elif defined(__linux) || defined(__APPLE__) || defined(__OpenBSD__) || \ - defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__) - return getpid(); -#else - return -1; -#endif -} - -/** - * Used in GetProgramStack(...) below - */ -#if defined(_WIN32_WINNT) && _WIN32_WINNT >= 0x0600 && defined(_MSC_VER) && \ - _MSC_VER >= 1800 -# define KWSYS_SYSTEMINFORMATION_HAS_DBGHELP -# define TRACE_MAX_STACK_FRAMES 1024 -# define TRACE_MAX_FUNCTION_NAME_LENGTH 1024 -# pragma warning(push) -# pragma warning(disable : 4091) /* 'typedef ': ignored on left of '' */ -# include "dbghelp.h" -# pragma warning(pop) -#endif - -/** -return current program stack in a string -demangle cxx symbols if possible. -*/ -std::string SystemInformationImplementation::GetProgramStack(int firstFrame, - int wholePath) -{ - std::ostringstream oss; - std::string programStack = ""; - -#ifdef KWSYS_SYSTEMINFORMATION_HAS_DBGHELP - (void)wholePath; - - void* stack[TRACE_MAX_STACK_FRAMES]; - HANDLE process = GetCurrentProcess(); - SymInitialize(process, nullptr, TRUE); - WORD numberOfFrames = - CaptureStackBackTrace(firstFrame, TRACE_MAX_STACK_FRAMES, stack, nullptr); - SYMBOL_INFO* symbol = static_cast( - malloc(sizeof(SYMBOL_INFO) + - (TRACE_MAX_FUNCTION_NAME_LENGTH - 1) * sizeof(TCHAR))); - symbol->MaxNameLen = TRACE_MAX_FUNCTION_NAME_LENGTH; - symbol->SizeOfStruct = sizeof(SYMBOL_INFO); - DWORD displacement; - IMAGEHLP_LINE64 line; - line.SizeOfStruct = sizeof(IMAGEHLP_LINE64); - for (int i = 0; i < numberOfFrames; i++) { - DWORD64 address = reinterpret_cast(stack[i]); - SymFromAddr(process, address, nullptr, symbol); - if (SymGetLineFromAddr64(process, address, &displacement, &line)) { - oss << " at " << symbol->Name << " in " << line.FileName << " line " - << line.LineNumber << std::endl; - } else { - oss << " at " << symbol->Name << std::endl; - } - } - free(symbol); - -#else - programStack += "" -# if !defined(KWSYS_SYSTEMINFORMATION_HAS_BACKTRACE) - "WARNING: The stack could not be examined " - "because backtrace is not supported.\n" -# elif !defined(KWSYS_SYSTEMINFORMATION_HAS_DEBUG_BUILD) - "WARNING: The stack trace will not use advanced " - "capabilities because this is a release build.\n" -# else -# if !defined(KWSYS_SYSTEMINFORMATION_HAS_SYMBOL_LOOKUP) - "WARNING: Function names will not be demangled " - "because dladdr is not available.\n" -# endif -# if !defined(KWSYS_SYSTEMINFORMATION_HAS_CPP_DEMANGLE) - "WARNING: Function names will not be demangled " - "because cxxabi is not available.\n" -# endif -# endif - ; - -# if defined(KWSYS_SYSTEMINFORMATION_HAS_BACKTRACE) - void* stackSymbols[256]; - int nFrames = backtrace(stackSymbols, 256); - for (int i = firstFrame; i < nFrames; ++i) { - SymbolProperties symProps; - symProps.SetReportPath(wholePath); - symProps.Initialize(stackSymbols[i]); - oss << symProps << std::endl; - } -# else - (void)firstFrame; - (void)wholePath; -# endif -#endif - - programStack += oss.str(); - - return programStack; -} - -/** -when set print stack trace in response to common signals. -*/ -void SystemInformationImplementation::SetStackTraceOnError(int enable) -{ -#if !defined(_WIN32) && !defined(__MINGW32__) && !defined(__CYGWIN__) - static int saOrigValid = 0; - static struct sigaction saABRTOrig; - static struct sigaction saSEGVOrig; - static struct sigaction saTERMOrig; - static struct sigaction saINTOrig; - static struct sigaction saILLOrig; - static struct sigaction saBUSOrig; - static struct sigaction saFPEOrig; - - if (enable && !saOrigValid) { - // save the current actions - sigaction(SIGABRT, nullptr, &saABRTOrig); - sigaction(SIGSEGV, nullptr, &saSEGVOrig); - sigaction(SIGTERM, nullptr, &saTERMOrig); - sigaction(SIGINT, nullptr, &saINTOrig); - sigaction(SIGILL, nullptr, &saILLOrig); - sigaction(SIGBUS, nullptr, &saBUSOrig); - sigaction(SIGFPE, nullptr, &saFPEOrig); - - // enable read, disable write - saOrigValid = 1; - - // install ours - struct sigaction sa; - sa.sa_sigaction = (SigAction)StacktraceSignalHandler; - sa.sa_flags = SA_SIGINFO | SA_RESETHAND; -# ifdef SA_RESTART - sa.sa_flags |= SA_RESTART; -# endif - sigemptyset(&sa.sa_mask); - - sigaction(SIGABRT, &sa, nullptr); - sigaction(SIGSEGV, &sa, nullptr); - sigaction(SIGTERM, &sa, nullptr); - sigaction(SIGINT, &sa, nullptr); - sigaction(SIGILL, &sa, nullptr); - sigaction(SIGBUS, &sa, nullptr); - sigaction(SIGFPE, &sa, nullptr); - } else if (!enable && saOrigValid) { - // restore previous actions - sigaction(SIGABRT, &saABRTOrig, nullptr); - sigaction(SIGSEGV, &saSEGVOrig, nullptr); - sigaction(SIGTERM, &saTERMOrig, nullptr); - sigaction(SIGINT, &saINTOrig, nullptr); - sigaction(SIGILL, &saILLOrig, nullptr); - sigaction(SIGBUS, &saBUSOrig, nullptr); - sigaction(SIGFPE, &saFPEOrig, nullptr); - - // enable write, disable read - saOrigValid = 0; - } -#else - // avoid warning C4100 - (void)enable; -#endif -} - -bool SystemInformationImplementation::QueryWindowsMemory() -{ -#if defined(_WIN32) -# if defined(_MSC_VER) && _MSC_VER < 1300 - MEMORYSTATUS ms; - unsigned long tv, tp, av, ap; - ms.dwLength = sizeof(ms); - GlobalMemoryStatus(&ms); -# define MEM_VAL(value) dw##value -# else - MEMORYSTATUSEX ms; - DWORDLONG tv, tp, av, ap; - ms.dwLength = sizeof(ms); - if (0 == GlobalMemoryStatusEx(&ms)) { - return 0; - } -# define MEM_VAL(value) ull##value -# endif - tv = ms.MEM_VAL(TotalPageFile); - tp = ms.MEM_VAL(TotalPhys); - av = ms.MEM_VAL(AvailPageFile); - ap = ms.MEM_VAL(AvailPhys); - this->TotalVirtualMemory = tv >> 10 >> 10; - this->TotalPhysicalMemory = tp >> 10 >> 10; - this->AvailableVirtualMemory = av >> 10 >> 10; - this->AvailablePhysicalMemory = ap >> 10 >> 10; - return true; -#else - return false; -#endif -} - -bool SystemInformationImplementation::QueryLinuxMemory() -{ -#if defined(__linux) - unsigned long tv = 0; - unsigned long tp = 0; - unsigned long av = 0; - unsigned long ap = 0; - - char buffer[1024]; // for reading lines - - int linuxMajor = 0; - int linuxMinor = 0; - - // Find the Linux kernel version first - struct utsname unameInfo; - int errorFlag = uname(&unameInfo); - if (errorFlag != 0) { - std::cout << "Problem calling uname(): " << strerror(errno) << std::endl; - return false; - } - - if (strlen(unameInfo.release) >= 3) { - // release looks like "2.6.3-15mdk-i686-up-4GB" - char majorChar = unameInfo.release[0]; - char minorChar = unameInfo.release[2]; - - if (isdigit(majorChar)) { - linuxMajor = majorChar - '0'; - } - - if (isdigit(minorChar)) { - linuxMinor = minorChar - '0'; - } - } - - FILE* fd = fopen("/proc/meminfo", "r"); - if (!fd) { - std::cout << "Problem opening /proc/meminfo" << std::endl; - return false; - } - - if (linuxMajor >= 3 || ((linuxMajor >= 2) && (linuxMinor >= 6))) { - // new /proc/meminfo format since kernel 2.6.x - // Rigorously, this test should check from the developing version 2.5.x - // that introduced the new format... - - enum - { - mMemTotal, - mMemFree, - mBuffers, - mCached, - mSwapTotal, - mSwapFree - }; - const char* format[6] = { "MemTotal:%lu kB", "MemFree:%lu kB", - "Buffers:%lu kB", "Cached:%lu kB", - "SwapTotal:%lu kB", "SwapFree:%lu kB" }; - bool have[6] = { false, false, false, false, false, false }; - unsigned long value[6]; - int count = 0; - while (fgets(buffer, static_cast(sizeof(buffer)), fd)) { - for (int i = 0; i < 6; ++i) { - if (!have[i] && sscanf(buffer, format[i], &value[i]) == 1) { - have[i] = true; - ++count; - } - } - } - if (count == 6) { - this->TotalPhysicalMemory = value[mMemTotal] / 1024; - this->AvailablePhysicalMemory = - (value[mMemFree] + value[mBuffers] + value[mCached]) / 1024; - this->TotalVirtualMemory = value[mSwapTotal] / 1024; - this->AvailableVirtualMemory = value[mSwapFree] / 1024; - } else { - std::cout << "Problem parsing /proc/meminfo" << std::endl; - fclose(fd); - return false; - } - } else { - // /proc/meminfo format for kernel older than 2.6.x - - unsigned long temp; - unsigned long cachedMem; - unsigned long buffersMem; - // Skip "total: used:..." - char* r = fgets(buffer, static_cast(sizeof(buffer)), fd); - int status = 0; - if (r == buffer) { - status += fscanf(fd, "Mem: %lu %lu %lu %lu %lu %lu\n", &tp, &temp, &ap, - &temp, &buffersMem, &cachedMem); - } - if (status == 6) { - status += fscanf(fd, "Swap: %lu %lu %lu\n", &tv, &temp, &av); - } - if (status == 9) { - this->TotalVirtualMemory = tv >> 10 >> 10; - this->TotalPhysicalMemory = tp >> 10 >> 10; - this->AvailableVirtualMemory = av >> 10 >> 10; - this->AvailablePhysicalMemory = - (ap + buffersMem + cachedMem) >> 10 >> 10; - } else { - std::cout << "Problem parsing /proc/meminfo" << std::endl; - fclose(fd); - return false; - } - } - fclose(fd); - - return true; -#else - return false; -#endif -} - -bool SystemInformationImplementation::QueryCygwinMemory() -{ -#ifdef __CYGWIN__ - // _SC_PAGE_SIZE does return the mmap() granularity on Cygwin, - // see http://cygwin.com/ml/cygwin/2006-06/msg00350.html - // Therefore just use 4096 as the page size of Windows. - long m = sysconf(_SC_PHYS_PAGES); - if (m < 0) { - return false; - } - this->TotalPhysicalMemory = m >> 8; - return true; -#else - return false; -#endif -} - -bool SystemInformationImplementation::QueryAIXMemory() -{ -#if defined(_AIX) && defined(_SC_AIX_REALMEM) - long c = sysconf(_SC_AIX_REALMEM); - if (c <= 0) { - return false; - } - - this->TotalPhysicalMemory = c / 1024; - - return true; -#else - return false; -#endif -} - -bool SystemInformationImplementation::QueryMemoryBySysconf() -{ -#if defined(_SC_PHYS_PAGES) && defined(_SC_PAGESIZE) - // Assume the mmap() granularity as returned by _SC_PAGESIZE is also - // the system page size. The only known system where this isn't true - // is Cygwin. - long p = sysconf(_SC_PHYS_PAGES); - long m = sysconf(_SC_PAGESIZE); - - if (p < 0 || m < 0) { - return false; - } - - // assume pagesize is a power of 2 and smaller 1 MiB - size_t pagediv = (1024 * 1024 / m); - - this->TotalPhysicalMemory = p; - this->TotalPhysicalMemory /= pagediv; - -# if defined(_SC_AVPHYS_PAGES) - p = sysconf(_SC_AVPHYS_PAGES); - if (p < 0) { - return false; - } - - this->AvailablePhysicalMemory = p; - this->AvailablePhysicalMemory /= pagediv; -# endif - - return true; -#else - return false; -#endif -} - -/** Query for the memory status */ -bool SystemInformationImplementation::QueryMemory() -{ - return this->QueryMemoryBySysconf(); -} - -/** */ -size_t SystemInformationImplementation::GetTotalVirtualMemory() -{ - return this->TotalVirtualMemory; -} - -/** */ -size_t SystemInformationImplementation::GetAvailableVirtualMemory() -{ - return this->AvailableVirtualMemory; -} - -size_t SystemInformationImplementation::GetTotalPhysicalMemory() -{ - return this->TotalPhysicalMemory; -} - -/** */ -size_t SystemInformationImplementation::GetAvailablePhysicalMemory() -{ - return this->AvailablePhysicalMemory; -} - -/** Get Cycle differences */ -SystemInformation::LongLong -SystemInformationImplementation::GetCyclesDifference(DELAY_FUNC DelayFunction, - unsigned int uiParameter) -{ -#if defined(_MSC_VER) && (_MSC_VER >= 1400) - unsigned __int64 stamp1, stamp2; - - stamp1 = __rdtsc(); - DelayFunction(uiParameter); - stamp2 = __rdtsc(); - - return stamp2 - stamp1; -#elif USE_ASM_INSTRUCTIONS - - unsigned int edx1, eax1; - unsigned int edx2, eax2; - - // Calculate the frequency of the CPU instructions. - __try { - _asm { - push uiParameter ; push parameter param - mov ebx, DelayFunction ; store func in ebx - - RDTSC_INSTRUCTION - - mov esi, eax ; esi = eax - mov edi, edx ; edi = edx - - call ebx ; call the delay functions - - RDTSC_INSTRUCTION - - pop ebx - - mov edx2, edx ; edx2 = edx - mov eax2, eax ; eax2 = eax - - mov edx1, edi ; edx2 = edi - mov eax1, esi ; eax2 = esi - } - } __except (1) { - return -1; - } - - return ((((__int64)edx2 << 32) + eax2) - (((__int64)edx1 << 32) + eax1)); - -#else - (void)DelayFunction; - (void)uiParameter; - return -1; -#endif -} - -/** Compute the delay overhead */ -void SystemInformationImplementation::DelayOverhead(unsigned int uiMS) -{ -#if defined(_WIN32) - LARGE_INTEGER Frequency, StartCounter, EndCounter; - __int64 x; - - // Get the frequency of the high performance counter. - if (!QueryPerformanceFrequency(&Frequency)) { - return; - } - x = Frequency.QuadPart / 1000 * uiMS; - - // Get the starting position of the counter. - QueryPerformanceCounter(&StartCounter); - - do { - // Get the ending position of the counter. - QueryPerformanceCounter(&EndCounter); - } while (EndCounter.QuadPart - StartCounter.QuadPart == x); -#endif - (void)uiMS; -} - -/** Works only for windows */ -bool SystemInformationImplementation::IsSMTSupported() -{ - return this->Features.ExtendedFeatures.LogicalProcessorsPerPhysical > 1; -} - -/** Return the APIC Id. Works only for windows. */ -unsigned char SystemInformationImplementation::GetAPICId() -{ - int Regs[4] = { 0, 0, 0, 0 }; - -#if USE_CPUID - if (!this->IsSMTSupported()) { - return static_cast(-1); // HT not supported - } // Logical processor = 1 - call_cpuid(1, Regs); -#endif - - return static_cast((Regs[1] & INITIAL_APIC_ID_BITS) >> 24); -} - -/** Count the number of CPUs. Works only on windows. */ -void SystemInformationImplementation::CPUCountWindows() -{ -#if defined(_WIN32) - this->NumberOfPhysicalCPU = 0; - this->NumberOfLogicalCPU = 0; - - typedef BOOL(WINAPI * GetLogicalProcessorInformationType)( - PSYSTEM_LOGICAL_PROCESSOR_INFORMATION, PDWORD); - static GetLogicalProcessorInformationType pGetLogicalProcessorInformation = - (GetLogicalProcessorInformationType)GetProcAddress( - GetModuleHandleW(L"kernel32"), "GetLogicalProcessorInformation"); - - if (!pGetLogicalProcessorInformation) { - // Fallback to approximate implementation on ancient Windows versions. - SYSTEM_INFO info; - ZeroMemory(&info, sizeof(info)); - GetSystemInfo(&info); - this->NumberOfPhysicalCPU = - static_cast(info.dwNumberOfProcessors); - this->NumberOfLogicalCPU = this->NumberOfPhysicalCPU; - return; - } - - std::vector ProcInfo; - { - DWORD Length = 0; - DWORD rc = pGetLogicalProcessorInformation(nullptr, &Length); - assert(FALSE == rc); - (void)rc; // Silence unused variable warning in Borland C++ 5.81 - assert(GetLastError() == ERROR_INSUFFICIENT_BUFFER); - ProcInfo.resize(Length / sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION)); - rc = pGetLogicalProcessorInformation(&ProcInfo[0], &Length); - assert(rc != FALSE); - (void)rc; // Silence unused variable warning in Borland C++ 5.81 - } - - typedef std::vector::iterator - pinfoIt_t; - for (pinfoIt_t it = ProcInfo.begin(); it != ProcInfo.end(); ++it) { - SYSTEM_LOGICAL_PROCESSOR_INFORMATION PInfo = *it; - if (PInfo.Relationship != RelationProcessorCore) { - continue; - } - - std::bitset::digits> ProcMask( - (unsigned long long)PInfo.ProcessorMask); - unsigned int count = (unsigned int)ProcMask.count(); - if (count == 0) { // I think this should never happen, but just to be safe. - continue; - } - this->NumberOfPhysicalCPU++; - this->NumberOfLogicalCPU += (unsigned int)count; - this->Features.ExtendedFeatures.LogicalProcessorsPerPhysical = count; - } - this->NumberOfPhysicalCPU = std::max(1u, this->NumberOfPhysicalCPU); - this->NumberOfLogicalCPU = std::max(1u, this->NumberOfLogicalCPU); -#else -#endif -} - -/** Return the number of logical CPUs on the system */ -unsigned int SystemInformationImplementation::GetNumberOfLogicalCPU() -{ - return this->NumberOfLogicalCPU; -} - -/** Return the number of physical CPUs on the system */ -unsigned int SystemInformationImplementation::GetNumberOfPhysicalCPU() -{ - return this->NumberOfPhysicalCPU; -} - -/** For Mac use sysctlbyname calls to find system info */ -bool SystemInformationImplementation::ParseSysCtl() -{ -#if defined(__APPLE__) - char retBuf[128]; - int err = 0; - uint64_t value = 0; - size_t len = sizeof(value); - sysctlbyname("hw.memsize", &value, &len, nullptr, 0); - this->TotalPhysicalMemory = static_cast(value / 1048576); - - // Parse values for Mac - this->AvailablePhysicalMemory = 0; - vm_statistics_data_t vmstat; - mach_msg_type_number_t count = HOST_VM_INFO_COUNT; - if (host_statistics(mach_host_self(), HOST_VM_INFO, (host_info_t)&vmstat, - &count) == KERN_SUCCESS) { - len = sizeof(value); - err = sysctlbyname("hw.pagesize", &value, &len, nullptr, 0); - int64_t available_memory = - (vmstat.free_count + vmstat.inactive_count) * value; - this->AvailablePhysicalMemory = - static_cast(available_memory / 1048576); - } - -# ifdef VM_SWAPUSAGE - // Virtual memory. - int mib[2] = { CTL_VM, VM_SWAPUSAGE }; - unsigned int miblen = - static_cast(sizeof(mib) / sizeof(mib[0])); - struct xsw_usage swap; - len = sizeof(swap); - err = sysctl(mib, miblen, &swap, &len, nullptr, 0); - if (err == 0) { - this->AvailableVirtualMemory = - static_cast(swap.xsu_avail / 1048576); - this->TotalVirtualMemory = static_cast(swap.xsu_total / 1048576); - } -# else - this->AvailableVirtualMemory = 0; - this->TotalVirtualMemory = 0; -# endif - - // CPU Info - len = sizeof(this->NumberOfPhysicalCPU); - sysctlbyname("hw.physicalcpu", &this->NumberOfPhysicalCPU, &len, nullptr, 0); - len = sizeof(this->NumberOfLogicalCPU); - sysctlbyname("hw.logicalcpu", &this->NumberOfLogicalCPU, &len, nullptr, 0); - - int cores_per_package = 0; - len = sizeof(cores_per_package); - err = sysctlbyname("machdep.cpu.cores_per_package", &cores_per_package, &len, - nullptr, 0); - // That name was not found, default to 1 - this->Features.ExtendedFeatures.LogicalProcessorsPerPhysical = - err != 0 ? 1 : static_cast(cores_per_package); - - len = sizeof(value); - sysctlbyname("hw.cpufrequency", &value, &len, nullptr, 0); - this->CPUSpeedInMHz = static_cast(value) / 1000000; - - // Chip family - len = sizeof(this->ChipID.Family); - // Seems only the intel chips will have this name so if this fails it is - // probably a PPC machine - err = - sysctlbyname("machdep.cpu.family", &this->ChipID.Family, &len, nullptr, 0); - if (err != 0) // Go back to names we know but are less descriptive - { - this->ChipID.Family = 0; - ::memset(retBuf, 0, 128); - len = 32; - err = sysctlbyname("hw.machine", &retBuf, &len, nullptr, 0); - std::string machineBuf(retBuf); - if (machineBuf.find_first_of("Power") != std::string::npos) { - this->ChipID.Vendor = "IBM"; - len = sizeof(this->ChipID.Family); - err = sysctlbyname("hw.cputype", &this->ChipID.Family, &len, nullptr, 0); - len = sizeof(this->ChipID.Model); - err = - sysctlbyname("hw.cpusubtype", &this->ChipID.Model, &len, nullptr, 0); - this->FindManufacturer(); - } - } else // Should be an Intel Chip. - { - len = sizeof(this->ChipID.Family); - err = sysctlbyname("machdep.cpu.family", &this->ChipID.Family, &len, - nullptr, 0); - - ::memset(retBuf, 0, 128); - len = 128; - err = sysctlbyname("machdep.cpu.vendor", retBuf, &len, nullptr, 0); - // Chip Vendor - this->ChipID.Vendor = retBuf; - this->FindManufacturer(); - - // Chip Model - len = sizeof(value); - err = sysctlbyname("machdep.cpu.model", &value, &len, nullptr, 0); - this->ChipID.Model = static_cast(value); - - // Chip Stepping - len = sizeof(value); - value = 0; - err = sysctlbyname("machdep.cpu.stepping", &value, &len, nullptr, 0); - if (!err) { - this->ChipID.Revision = static_cast(value); - } - - // feature string - char* buf = nullptr; - size_t allocSize = 128; - - err = 0; - len = 0; - - // sysctlbyname() will return with err==0 && len==0 if the buffer is too - // small - while (err == 0 && len == 0) { - delete[] buf; - allocSize *= 2; - buf = new char[allocSize]; - if (!buf) { - break; - } - buf[0] = ' '; - len = allocSize - 2; // keep space for leading and trailing space - err = sysctlbyname("machdep.cpu.features", buf + 1, &len, nullptr, 0); - } - if (!err && buf && len) { - // now we can match every flags as space + flag + space - buf[len + 1] = ' '; - std::string cpuflags(buf, len + 2); - - if ((cpuflags.find(" FPU ") != std::string::npos)) { - this->Features.HasFPU = true; - } - if ((cpuflags.find(" TSC ") != std::string::npos)) { - this->Features.HasTSC = true; - } - if ((cpuflags.find(" MMX ") != std::string::npos)) { - this->Features.HasMMX = true; - } - if ((cpuflags.find(" SSE ") != std::string::npos)) { - this->Features.HasSSE = true; - } - if ((cpuflags.find(" SSE2 ") != std::string::npos)) { - this->Features.HasSSE2 = true; - } - if ((cpuflags.find(" APIC ") != std::string::npos)) { - this->Features.HasAPIC = true; - } - if ((cpuflags.find(" CMOV ") != std::string::npos)) { - this->Features.HasCMOV = true; - } - if ((cpuflags.find(" MTRR ") != std::string::npos)) { - this->Features.HasMTRR = true; - } - if ((cpuflags.find(" ACPI ") != std::string::npos)) { - this->Features.HasACPI = true; - } - } - delete[] buf; - } - - // brand string - ::memset(retBuf, 0, sizeof(retBuf)); - len = sizeof(retBuf); - err = sysctlbyname("machdep.cpu.brand_string", retBuf, &len, nullptr, 0); - if (!err) { - this->ChipID.ProcessorName = retBuf; - this->ChipID.ModelName = retBuf; - } - - // Cache size - len = sizeof(value); - err = sysctlbyname("hw.l1icachesize", &value, &len, nullptr, 0); - this->Features.L1CacheSize = static_cast(value); - len = sizeof(value); - err = sysctlbyname("hw.l2cachesize", &value, &len, nullptr, 0); - this->Features.L2CacheSize = static_cast(value); - - return true; -#else - return false; -#endif -} - -/** Extract a value from sysctl command */ -std::string SystemInformationImplementation::ExtractValueFromSysCtl( - const char* word) -{ - size_t pos = this->SysCtlBuffer.find(word); - if (pos != std::string::npos) { - pos = this->SysCtlBuffer.find(": ", pos); - size_t pos2 = this->SysCtlBuffer.find("\n", pos); - if (pos != std::string::npos && pos2 != std::string::npos) { - return this->SysCtlBuffer.substr(pos + 2, pos2 - pos - 2); - } - } - return ""; -} - -/** Run a given process */ -std::string SystemInformationImplementation::RunProcess( - std::vector args) -{ - std::string buffer; - - // Run the application - kwsysProcess* gp = kwsysProcess_New(); - kwsysProcess_SetCommand(gp, args.data()); - kwsysProcess_SetOption(gp, kwsysProcess_Option_HideWindow, 1); - - kwsysProcess_Execute(gp); - - char* data = nullptr; - int length; - double timeout = 255; - int pipe; // pipe id as returned by kwsysProcess_WaitForData() - - while ((static_cast( - pipe = kwsysProcess_WaitForData(gp, &data, &length, &timeout)), - (pipe == kwsysProcess_Pipe_STDOUT || - pipe == kwsysProcess_Pipe_STDERR))) // wait for 1s - { - buffer.append(data, length); - } - kwsysProcess_WaitForExit(gp, nullptr); - - int result = 0; - switch (kwsysProcess_GetState(gp)) { - case kwsysProcess_State_Exited: { - result = kwsysProcess_GetExitValue(gp); - } break; - case kwsysProcess_State_Error: { - std::cerr << "Error: Could not run " << args[0] << ":\n"; - std::cerr << kwsysProcess_GetErrorString(gp) << "\n"; - } break; - case kwsysProcess_State_Exception: { - std::cerr << "Error: " << args[0] << " terminated with an exception: " - << kwsysProcess_GetExceptionString(gp) << "\n"; - } break; - case kwsysProcess_State_Starting: - case kwsysProcess_State_Executing: - case kwsysProcess_State_Expired: - case kwsysProcess_State_Killed: { - // Should not get here. - std::cerr << "Unexpected ending state after running " << args[0] - << std::endl; - } break; - } - kwsysProcess_Delete(gp); - if (result) { - std::cerr << "Error " << args[0] << " returned :" << result << "\n"; - } - return buffer; -} - -std::string SystemInformationImplementation::ParseValueFromKStat( - const char* arguments) -{ - std::vector args_string; - std::string command = arguments; - size_t start = std::string::npos; - size_t pos = command.find(' ', 0); - while (pos != std::string::npos) { - bool inQuotes = false; - // Check if we are between quotes - size_t b0 = command.find('"', 0); - size_t b1 = command.find('"', b0 + 1); - while (b0 != std::string::npos && b1 != std::string::npos && b1 > b0) { - if (pos > b0 && pos < b1) { - inQuotes = true; - break; - } - b0 = command.find('"', b1 + 1); - b1 = command.find('"', b0 + 1); - } - - if (!inQuotes) { - args_string.push_back(command.substr(start + 1, pos - start - 1)); - std::string& arg = args_string.back(); - - // Remove the quotes if any - arg.erase(std::remove(arg.begin(), arg.end(), '"'), arg.end()); - start = pos; - } - pos = command.find(' ', pos + 1); - } - args_string.push_back(command.substr(start + 1, command.size() - start - 1)); - - std::vector args; - args.reserve(3 + args_string.size()); - args.push_back("kstat"); - args.push_back("-p"); - for (size_t i = 0; i < args_string.size(); ++i) { - args.push_back(args_string[i].c_str()); - } - args.push_back(nullptr); - - std::string buffer = this->RunProcess(args); - - std::string value; - for (size_t i = buffer.size() - 1; i > 0; i--) { - if (buffer[i] == ' ' || buffer[i] == '\t') { - break; - } - if (buffer[i] != '\n' && buffer[i] != '\r') { - value.insert(0u, 1, buffer[i]); - } - } - return value; -} - -/** Querying for system information from Solaris */ -bool SystemInformationImplementation::QuerySolarisMemory() -{ -#if defined(__SVR4) && defined(__sun) -// Solaris allows querying this value by sysconf, but if this is -// a 32 bit process on a 64 bit host the returned memory will be -// limited to 4GiB. So if this is a 32 bit process or if the sysconf -// method fails use the kstat interface. -# if SIZEOF_VOID_P == 8 - if (this->QueryMemoryBySysconf()) { - return true; - } -# endif - - char* tail; - unsigned long totalMemory = - strtoul(this->ParseValueFromKStat("-s physmem").c_str(), &tail, 0); - this->TotalPhysicalMemory = totalMemory / 128; - - return true; -#else - return false; -#endif -} - -bool SystemInformationImplementation::QuerySolarisProcessor() -{ - if (!this->QueryProcessorBySysconf()) { - return false; - } - - // Parse values - this->CPUSpeedInMHz = static_cast( - atoi(this->ParseValueFromKStat("-s clock_MHz").c_str())); - - // Chip family - this->ChipID.Family = 0; - - // Chip Model - this->ChipID.ProcessorName = this->ParseValueFromKStat("-s cpu_type"); - this->ChipID.Model = 0; - - // Chip Vendor - if (this->ChipID.ProcessorName != "i386") { - this->ChipID.Vendor = "Sun"; - this->FindManufacturer(); - } - - return true; -} - -/** Querying for system information from Haiku OS */ -bool SystemInformationImplementation::QueryHaikuInfo() -{ -#if defined(__HAIKU__) - - // CPU count - system_info info; - get_system_info(&info); - this->NumberOfPhysicalCPU = info.cpu_count; - - // CPU speed - uint32 topologyNodeCount = 0; - cpu_topology_node_info* topology = 0; - get_cpu_topology_info(0, &topologyNodeCount); - if (topologyNodeCount != 0) - topology = new cpu_topology_node_info[topologyNodeCount]; - get_cpu_topology_info(topology, &topologyNodeCount); - - for (uint32 i = 0; i < topologyNodeCount; i++) { - if (topology[i].type == B_TOPOLOGY_CORE) { - this->CPUSpeedInMHz = - topology[i].data.core.default_frequency / 1000000.0f; - break; - } - } - - delete[] topology; - - // Physical Memory - this->TotalPhysicalMemory = (info.max_pages * B_PAGE_SIZE) / (1024 * 1024); - this->AvailablePhysicalMemory = this->TotalPhysicalMemory - - ((info.used_pages * B_PAGE_SIZE) / (1024 * 1024)); - - // NOTE: get_system_info_etc is currently a private call so just set to 0 - // until it becomes public - this->TotalVirtualMemory = 0; - this->AvailableVirtualMemory = 0; - - // Retrieve cpuid_info union for cpu 0 - cpuid_info cpu_info; - get_cpuid(&cpu_info, 0, 0); - - // Chip Vendor - // Use a temporary buffer so that we can add NULL termination to the string - char vbuf[13]; - strncpy(vbuf, cpu_info.eax_0.vendor_id, 12); - vbuf[12] = '\0'; - this->ChipID.Vendor = vbuf; - - this->FindManufacturer(); - - // Retrieve cpuid_info union for cpu 0 this time using a register value of 1 - get_cpuid(&cpu_info, 1, 0); - - this->NumberOfLogicalCPU = cpu_info.eax_1.logical_cpus; - - // Chip type - this->ChipID.Type = cpu_info.eax_1.type; - - // Chip family - this->ChipID.Family = cpu_info.eax_1.family; - - // Chip Model - this->ChipID.Model = cpu_info.eax_1.model; - - // Chip Revision - this->ChipID.Revision = cpu_info.eax_1.stepping; - - // Chip Extended Family - this->ChipID.ExtendedFamily = cpu_info.eax_1.extended_family; - - // Chip Extended Model - this->ChipID.ExtendedModel = cpu_info.eax_1.extended_model; - - // Get ChipID.ProcessorName from other information already gathered - this->RetrieveClassicalCPUIdentity(); - - // Cache size - this->Features.L1CacheSize = 0; - this->Features.L2CacheSize = 0; - - return true; - -#else - return false; -#endif -} - -bool SystemInformationImplementation::QueryQNXMemory() -{ -#if defined(__QNX__) - std::string buffer; - std::vector args; - args.clear(); - - args.push_back("showmem"); - args.push_back("-S"); - args.push_back(0); - buffer = this->RunProcess(args); - args.clear(); - - size_t pos = buffer.find("System RAM:"); - if (pos == std::string::npos) - return false; - pos = buffer.find(":", pos); - size_t pos2 = buffer.find("M (", pos); - if (pos2 == std::string::npos) - return false; - - pos++; - while (buffer[pos] == ' ') - pos++; - - this->TotalPhysicalMemory = atoi(buffer.substr(pos, pos2 - pos).c_str()); - return true; -#endif - return false; -} - -bool SystemInformationImplementation::QueryBSDMemory() -{ -#if defined(__OpenBSD__) || defined(__FreeBSD__) || defined(__NetBSD__) || \ - defined(__DragonFly__) - int ctrl[2] = { CTL_HW, HW_PHYSMEM }; -# if defined(HW_PHYSMEM64) - int64_t k; - ctrl[1] = HW_PHYSMEM64; -# else - int k; -# endif - size_t sz = sizeof(k); - - if (sysctl(ctrl, 2, &k, &sz, nullptr, 0) != 0) { - return false; - } - - this->TotalPhysicalMemory = k >> 10 >> 10; - - return true; -#else - return false; -#endif -} - -bool SystemInformationImplementation::QueryQNXProcessor() -{ -#if defined(__QNX__) - // the output on my QNX 6.4.1 looks like this: - // Processor1: 686 Pentium II Stepping 3 2175MHz FPU - std::string buffer; - std::vector args; - args.clear(); - - args.push_back("pidin"); - args.push_back("info"); - args.push_back(0); - buffer = this->RunProcess(args); - args.clear(); - - size_t pos = buffer.find("Processor1:"); - if (pos == std::string::npos) - return false; - - size_t pos2 = buffer.find("MHz", pos); - if (pos2 == std::string::npos) - return false; - - size_t pos3 = pos2; - while (buffer[pos3] != ' ') - --pos3; - - this->CPUSpeedInMHz = atoi(buffer.substr(pos3 + 1, pos2 - pos3 - 1).c_str()); - - pos2 = buffer.find(" Stepping", pos); - if (pos2 != std::string::npos) { - pos2 = buffer.find(" ", pos2 + 1); - if (pos2 != std::string::npos && pos2 < pos3) { - this->ChipID.Revision = - atoi(buffer.substr(pos2 + 1, pos3 - pos2).c_str()); - } - } - - this->NumberOfPhysicalCPU = 0; - do { - pos = buffer.find("\nProcessor", pos + 1); - ++this->NumberOfPhysicalCPU; - } while (pos != std::string::npos); - this->NumberOfLogicalCPU = 1; - - return true; -#else - return false; -#endif -} - -bool SystemInformationImplementation::QueryBSDProcessor() -{ -#if defined(__OpenBSD__) || defined(__FreeBSD__) || defined(__NetBSD__) || \ - defined(__DragonFly__) - int k; - size_t sz = sizeof(k); - int ctrl[2] = { CTL_HW, HW_NCPU }; - - if (sysctl(ctrl, 2, &k, &sz, nullptr, 0) != 0) { - return false; - } - - this->NumberOfPhysicalCPU = k; - this->NumberOfLogicalCPU = this->NumberOfPhysicalCPU; - -# if defined(HW_CPUSPEED) - ctrl[1] = HW_CPUSPEED; - - if (sysctl(ctrl, 2, &k, &sz, nullptr, 0) != 0) { - return false; - } - - this->CPUSpeedInMHz = (float)k; -# endif - -# if defined(CPU_SSE) - ctrl[0] = CTL_MACHDEP; - ctrl[1] = CPU_SSE; - - if (sysctl(ctrl, 2, &k, &sz, nullptr, 0) != 0) { - return false; - } - - this->Features.HasSSE = (k > 0); -# endif - -# if defined(CPU_SSE2) - ctrl[0] = CTL_MACHDEP; - ctrl[1] = CPU_SSE2; - - if (sysctl(ctrl, 2, &k, &sz, nullptr, 0) != 0) { - return false; - } - - this->Features.HasSSE2 = (k > 0); -# endif - -# if defined(CPU_CPUVENDOR) - ctrl[0] = CTL_MACHDEP; - ctrl[1] = CPU_CPUVENDOR; - char vbuf[25]; - ::memset(vbuf, 0, sizeof(vbuf)); - sz = sizeof(vbuf) - 1; - if (sysctl(ctrl, 2, vbuf, &sz, nullptr, 0) != 0) { - return false; - } - - this->ChipID.Vendor = vbuf; - this->FindManufacturer(); -# endif - - return true; -#else - return false; -#endif -} - -bool SystemInformationImplementation::QueryHPUXMemory() -{ -#if defined(__hpux) - unsigned long tv = 0; - unsigned long tp = 0; - unsigned long av = 0; - unsigned long ap = 0; - struct pst_static pst; - struct pst_dynamic pdy; - - unsigned long ps = 0; - if (pstat_getstatic(&pst, sizeof(pst), (size_t)1, 0) == -1) { - return false; - } - - ps = pst.page_size; - tp = pst.physical_memory * ps; - tv = (pst.physical_memory + pst.pst_maxmem) * ps; - if (pstat_getdynamic(&pdy, sizeof(pdy), (size_t)1, 0) == -1) { - return false; - } - - ap = tp - pdy.psd_rm * ps; - av = tv - pdy.psd_vm; - this->TotalVirtualMemory = tv >> 10 >> 10; - this->TotalPhysicalMemory = tp >> 10 >> 10; - this->AvailableVirtualMemory = av >> 10 >> 10; - this->AvailablePhysicalMemory = ap >> 10 >> 10; - return true; -#else - return false; -#endif -} - -bool SystemInformationImplementation::QueryHPUXProcessor() -{ -#if defined(__hpux) -# if defined(KWSYS_SYS_HAS_MPCTL_H) - int c = mpctl(MPC_GETNUMSPUS_SYS, 0, 0); - if (c <= 0) { - return false; - } - - this->NumberOfPhysicalCPU = c; - this->NumberOfLogicalCPU = this->NumberOfPhysicalCPU; - - long t = sysconf(_SC_CPU_VERSION); - - if (t == -1) { - return false; - } - - switch (t) { - case CPU_PA_RISC1_0: - this->ChipID.Vendor = "Hewlett-Packard"; - this->ChipID.Family = 0x100; - break; - case CPU_PA_RISC1_1: - this->ChipID.Vendor = "Hewlett-Packard"; - this->ChipID.Family = 0x110; - break; - case CPU_PA_RISC2_0: - this->ChipID.Vendor = "Hewlett-Packard"; - this->ChipID.Family = 0x200; - break; -# if defined(CPU_HP_INTEL_EM_1_0) || defined(CPU_IA64_ARCHREV_0) -# ifdef CPU_HP_INTEL_EM_1_0 - case CPU_HP_INTEL_EM_1_0: -# endif -# ifdef CPU_IA64_ARCHREV_0 - case CPU_IA64_ARCHREV_0: -# endif - this->ChipID.Vendor = "GenuineIntel"; - this->Features.HasIA64 = true; - break; -# endif - default: - return false; - } - - this->FindManufacturer(); - - return true; -# else - return false; -# endif -#else - return false; -#endif -} - -/** Query the operating system information */ -bool SystemInformationImplementation::QueryOSInformation() -{ -#if defined(_WIN32) - - this->OSName = "Windows"; - - OSVERSIONINFOEXW osvi; - BOOL bIsWindows64Bit; - BOOL bOsVersionInfoEx; - char operatingSystem[256]; - - // Try calling GetVersionEx using the OSVERSIONINFOEX structure. - ZeroMemory(&osvi, sizeof(OSVERSIONINFOEXW)); - osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEXW); -# ifdef KWSYS_WINDOWS_DEPRECATED_GetVersionEx -# pragma warning(push) -# ifdef __INTEL_COMPILER -# pragma warning(disable : 1478) -# elif defined __clang__ -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wdeprecated-declarations" -# else -# pragma warning(disable : 4996) -# endif -# endif - bOsVersionInfoEx = GetVersionExW((OSVERSIONINFOW*)&osvi); - if (!bOsVersionInfoEx) { - osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOW); - if (!GetVersionExW((OSVERSIONINFOW*)&osvi)) { - return false; - } - } -# ifdef KWSYS_WINDOWS_DEPRECATED_GetVersionEx -# ifdef __clang__ -# pragma clang diagnostic pop -# else -# pragma warning(pop) -# endif -# endif - - switch (osvi.dwPlatformId) { - case VER_PLATFORM_WIN32_NT: - // Test for the product. - if (osvi.dwMajorVersion <= 4) { - this->OSRelease = "NT"; - } - if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 0) { - this->OSRelease = "2000"; - } - if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 1) { - this->OSRelease = "XP"; - } - // XP Professional x64 - if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 2) { - this->OSRelease = "XP"; - } -# ifdef VER_NT_WORKSTATION - // Test for product type. - if (bOsVersionInfoEx) { - if (osvi.wProductType == VER_NT_WORKSTATION) { - if (osvi.dwMajorVersion == 6 && osvi.dwMinorVersion == 0) { - this->OSRelease = "Vista"; - } - if (osvi.dwMajorVersion == 6 && osvi.dwMinorVersion == 1) { - this->OSRelease = "7"; - } -// VER_SUITE_PERSONAL may not be defined -# ifdef VER_SUITE_PERSONAL - else { - if (osvi.wSuiteMask & VER_SUITE_PERSONAL) { - this->OSRelease += " Personal"; - } else { - this->OSRelease += " Professional"; - } - } -# endif - } else if (osvi.wProductType == VER_NT_SERVER) { - // Check for .NET Server instead of Windows XP. - if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 1) { - this->OSRelease = ".NET"; - } - - // Continue with the type detection. - if (osvi.wSuiteMask & VER_SUITE_DATACENTER) { - this->OSRelease += " DataCenter Server"; - } else if (osvi.wSuiteMask & VER_SUITE_ENTERPRISE) { - this->OSRelease += " Advanced Server"; - } else { - this->OSRelease += " Server"; - } - } - - sprintf(operatingSystem, "%ls (Build %ld)", osvi.szCSDVersion, - osvi.dwBuildNumber & 0xFFFF); - this->OSVersion = operatingSystem; - } else -# endif // VER_NT_WORKSTATION - { - HKEY hKey; - wchar_t szProductType[80]; - DWORD dwBufLen; - - // Query the registry to retrieve information. - RegOpenKeyExW(HKEY_LOCAL_MACHINE, - L"SYSTEM\\CurrentControlSet\\Control\\ProductOptions", 0, - KEY_QUERY_VALUE, &hKey); - RegQueryValueExW(hKey, L"ProductType", nullptr, nullptr, - (LPBYTE)szProductType, &dwBufLen); - RegCloseKey(hKey); - - if (lstrcmpiW(L"WINNT", szProductType) == 0) { - this->OSRelease += " Professional"; - } - if (lstrcmpiW(L"LANMANNT", szProductType) == 0) { - // Decide between Windows 2000 Advanced Server and Windows .NET - // Enterprise Server. - if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 1) { - this->OSRelease += " Standard Server"; - } else { - this->OSRelease += " Server"; - } - } - if (lstrcmpiW(L"SERVERNT", szProductType) == 0) { - // Decide between Windows 2000 Advanced Server and Windows .NET - // Enterprise Server. - if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 1) { - this->OSRelease += " Enterprise Server"; - } else { - this->OSRelease += " Advanced Server"; - } - } - } - - // Display version, service pack (if any), and build number. - if (osvi.dwMajorVersion <= 4) { - // NB: NT 4.0 and earlier. - sprintf(operatingSystem, "version %ld.%ld %ls (Build %ld)", - osvi.dwMajorVersion, osvi.dwMinorVersion, osvi.szCSDVersion, - osvi.dwBuildNumber & 0xFFFF); - this->OSVersion = operatingSystem; - } else if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 1) { - // Windows XP and .NET server. - typedef BOOL(CALLBACK * LPFNPROC)(HANDLE, BOOL*); - HINSTANCE hKernelDLL; - LPFNPROC DLLProc; - - // Load the Kernel32 DLL. - hKernelDLL = LoadLibraryW(L"kernel32"); - if (hKernelDLL != nullptr) { - // Only XP and .NET Server support IsWOW64Process so... Load - // dynamically! - DLLProc = (LPFNPROC)GetProcAddress(hKernelDLL, "IsWow64Process"); - - // If the function address is valid, call the function. - if (DLLProc != nullptr) - (DLLProc)(GetCurrentProcess(), &bIsWindows64Bit); - else - bIsWindows64Bit = false; - - // Free the DLL module. - FreeLibrary(hKernelDLL); - } - } else { - // Windows 2000 and everything else. - sprintf(operatingSystem, "%ls (Build %ld)", osvi.szCSDVersion, - osvi.dwBuildNumber & 0xFFFF); - this->OSVersion = operatingSystem; - } - break; - - case VER_PLATFORM_WIN32_WINDOWS: - // Test for the product. - if (osvi.dwMajorVersion == 4 && osvi.dwMinorVersion == 0) { - this->OSRelease = "95"; - if (osvi.szCSDVersion[1] == 'C') { - this->OSRelease += "OSR 2.5"; - } else if (osvi.szCSDVersion[1] == 'B') { - this->OSRelease += "OSR 2"; - } - } - - if (osvi.dwMajorVersion == 4 && osvi.dwMinorVersion == 10) { - this->OSRelease = "98"; - if (osvi.szCSDVersion[1] == 'A') { - this->OSRelease += "SE"; - } - } - - if (osvi.dwMajorVersion == 4 && osvi.dwMinorVersion == 90) { - this->OSRelease = "Me"; - } - break; - - case VER_PLATFORM_WIN32s: - this->OSRelease = "Win32s"; - break; - - default: - this->OSRelease = "Unknown"; - break; - } - - // Get the hostname - WORD wVersionRequested; - WSADATA wsaData; - char name[255]; - wVersionRequested = MAKEWORD(2, 0); - - if (WSAStartup(wVersionRequested, &wsaData) == 0) { - gethostname(name, sizeof(name)); - WSACleanup(); - } - this->Hostname = name; - - const char* arch = getenv("PROCESSOR_ARCHITECTURE"); - const char* wow64 = getenv("PROCESSOR_ARCHITEW6432"); - if (arch) { - this->OSPlatform = arch; - } - - if (wow64) { - // the PROCESSOR_ARCHITEW6432 is only defined when running 32bit programs - // on 64bit OS - this->OSIs64Bit = true; - } else if (arch) { - // all values other than x86 map to 64bit architectures - this->OSIs64Bit = (strncmp(arch, "x86", 3) != 0); - } - -#else - - struct utsname unameInfo; - int errorFlag = uname(&unameInfo); - if (errorFlag == 0) { - this->OSName = unameInfo.sysname; - this->Hostname = unameInfo.nodename; - this->OSRelease = unameInfo.release; - this->OSVersion = unameInfo.version; - this->OSPlatform = unameInfo.machine; - - // This is still insufficient to capture 64bit architecture such - // powerpc and possible mips and sparc - if (this->OSPlatform.find_first_of("64") != std::string::npos) { - this->OSIs64Bit = true; - } - } - -# ifdef __APPLE__ - this->OSName = "Unknown Apple OS"; - this->OSRelease = "Unknown product version"; - this->OSVersion = "Unknown build version"; - - this->CallSwVers("-productName", this->OSName); - this->CallSwVers("-productVersion", this->OSRelease); - this->CallSwVers("-buildVersion", this->OSVersion); -# endif - -#endif - - return true; -} - -int SystemInformationImplementation::CallSwVers(const char* arg, - std::string& ver) -{ -#ifdef __APPLE__ - std::vector args; - args.push_back("sw_vers"); - args.push_back(arg); - args.push_back(nullptr); - ver = this->RunProcess(args); - this->TrimNewline(ver); -#else - // avoid C4100 - (void)arg; - (void)ver; -#endif - return 0; -} - -void SystemInformationImplementation::TrimNewline(std::string& output) -{ - // remove \r - std::string::size_type pos = 0; - while ((pos = output.find("\r", pos)) != std::string::npos) { - output.erase(pos); - } - - // remove \n - pos = 0; - while ((pos = output.find("\n", pos)) != std::string::npos) { - output.erase(pos); - } -} - -/** Return true if the machine is 64 bits */ -bool SystemInformationImplementation::Is64Bits() -{ - return this->OSIs64Bit; -} -} diff --git a/test/API/driver/kwsys/SystemInformation.hxx.in b/test/API/driver/kwsys/SystemInformation.hxx.in deleted file mode 100644 index fc42e9dc72d..00000000000 --- a/test/API/driver/kwsys/SystemInformation.hxx.in +++ /dev/null @@ -1,170 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifndef @KWSYS_NAMESPACE@_SystemInformation_h -#define @KWSYS_NAMESPACE@_SystemInformation_h - -#include <@KWSYS_NAMESPACE@/Configure.hxx> - -#include /* size_t */ -#include - -namespace @KWSYS_NAMESPACE@ { - -// forward declare the implementation class -class SystemInformationImplementation; - -class @KWSYS_NAMESPACE@_EXPORT SystemInformation -{ -#if @KWSYS_USE_LONG_LONG@ - typedef long long LongLong; -#elif @KWSYS_USE___INT64@ - typedef __int64 LongLong; -#else -# error "No Long Long" -#endif - friend class SystemInformationImplementation; - SystemInformationImplementation* Implementation; - -public: - // possible parameter values for DoesCPUSupportFeature() - static const long int CPU_FEATURE_MMX = 1 << 0; - static const long int CPU_FEATURE_MMX_PLUS = 1 << 1; - static const long int CPU_FEATURE_SSE = 1 << 2; - static const long int CPU_FEATURE_SSE2 = 1 << 3; - static const long int CPU_FEATURE_AMD_3DNOW = 1 << 4; - static const long int CPU_FEATURE_AMD_3DNOW_PLUS = 1 << 5; - static const long int CPU_FEATURE_IA64 = 1 << 6; - static const long int CPU_FEATURE_MP_CAPABLE = 1 << 7; - static const long int CPU_FEATURE_HYPERTHREAD = 1 << 8; - static const long int CPU_FEATURE_SERIALNUMBER = 1 << 9; - static const long int CPU_FEATURE_APIC = 1 << 10; - static const long int CPU_FEATURE_SSE_FP = 1 << 11; - static const long int CPU_FEATURE_SSE_MMX = 1 << 12; - static const long int CPU_FEATURE_CMOV = 1 << 13; - static const long int CPU_FEATURE_MTRR = 1 << 14; - static const long int CPU_FEATURE_L1CACHE = 1 << 15; - static const long int CPU_FEATURE_L2CACHE = 1 << 16; - static const long int CPU_FEATURE_L3CACHE = 1 << 17; - static const long int CPU_FEATURE_ACPI = 1 << 18; - static const long int CPU_FEATURE_THERMALMONITOR = 1 << 19; - static const long int CPU_FEATURE_TEMPSENSEDIODE = 1 << 20; - static const long int CPU_FEATURE_FREQUENCYID = 1 << 21; - static const long int CPU_FEATURE_VOLTAGEID_FREQUENCY = 1 << 22; - static const long int CPU_FEATURE_FPU = 1 << 23; - -public: - SystemInformation(); - ~SystemInformation(); - - SystemInformation(const SystemInformation&) = delete; - SystemInformation& operator=(const SystemInformation&) = delete; - - const char* GetVendorString(); - const char* GetVendorID(); - std::string GetTypeID(); - std::string GetFamilyID(); - std::string GetModelID(); - std::string GetModelName(); - std::string GetSteppingCode(); - const char* GetExtendedProcessorName(); - const char* GetProcessorSerialNumber(); - int GetProcessorCacheSize(); - unsigned int GetLogicalProcessorsPerPhysical(); - float GetProcessorClockFrequency(); - int GetProcessorAPICID(); - int GetProcessorCacheXSize(long int); - bool DoesCPUSupportFeature(long int); - - // returns an informative general description of the cpu - // on this system. - std::string GetCPUDescription(); - - const char* GetHostname(); - std::string GetFullyQualifiedDomainName(); - - const char* GetOSName(); - const char* GetOSRelease(); - const char* GetOSVersion(); - const char* GetOSPlatform(); - - int GetOSIsWindows(); - int GetOSIsLinux(); - int GetOSIsApple(); - - // returns an informative general description of the os - // on this system. - std::string GetOSDescription(); - - // returns if the operating system is 64bit or not. - bool Is64Bits(); - - unsigned int GetNumberOfLogicalCPU(); - unsigned int GetNumberOfPhysicalCPU(); - - bool DoesCPUSupportCPUID(); - - // Retrieve id of the current running process - LongLong GetProcessId(); - - // Retrieve memory information in MiB. - size_t GetTotalVirtualMemory(); - size_t GetAvailableVirtualMemory(); - size_t GetTotalPhysicalMemory(); - size_t GetAvailablePhysicalMemory(); - - // returns an informative general description if the installed and - // available ram on this system. See the GetHostMemoryTotal, and - // Get{Host,Proc}MemoryAvailable methods for more information. - std::string GetMemoryDescription(const char* hostLimitEnvVarName = nullptr, - const char* procLimitEnvVarName = nullptr); - - // Retrieve amount of physical memory installed on the system in KiB - // units. - LongLong GetHostMemoryTotal(); - - // Get total system RAM in units of KiB available colectivley to all - // processes in a process group. An example of a process group - // are the processes comprising an mpi program which is running in - // parallel. The amount of memory reported may differ from the host - // total if a host wide resource limit is applied. Such reource limits - // are reported to us via an application specified environment variable. - LongLong GetHostMemoryAvailable(const char* hostLimitEnvVarName = nullptr); - - // Get total system RAM in units of KiB available to this process. - // This may differ from the host available if a per-process resource - // limit is applied. per-process memory limits are applied on unix - // system via rlimit API. Resource limits that are not imposed via - // rlimit API may be reported to us via an application specified - // environment variable. - LongLong GetProcMemoryAvailable(const char* hostLimitEnvVarName = nullptr, - const char* procLimitEnvVarName = nullptr); - - // Get the system RAM used by all processes on the host, in units of KiB. - LongLong GetHostMemoryUsed(); - - // Get system RAM used by this process id in units of KiB. - LongLong GetProcMemoryUsed(); - - // Return the load average of the machine or -0.0 if it cannot - // be determined. - double GetLoadAverage(); - - // enable/disable stack trace signal handler. In order to - // produce an informative stack trace the application should - // be dynamically linked and compiled with debug symbols. - static void SetStackTraceOnError(int enable); - - // format and return the current program stack in a string. In - // order to produce an informative stack trace the application - // should be dynamically linked and compiled with debug symbols. - static std::string GetProgramStack(int firstFrame, int wholePath); - - /** Run the different checks */ - void RunCPUCheck(); - void RunOSCheck(); - void RunMemoryCheck(); -}; - -} // namespace @KWSYS_NAMESPACE@ - -#endif diff --git a/test/API/driver/kwsys/SystemTools.cxx b/test/API/driver/kwsys/SystemTools.cxx deleted file mode 100644 index ce4d6ef9505..00000000000 --- a/test/API/driver/kwsys/SystemTools.cxx +++ /dev/null @@ -1,4703 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifdef __osf__ -# define _OSF_SOURCE -# define _POSIX_C_SOURCE 199506L -# define _XOPEN_SOURCE_EXTENDED -#endif - -#if defined(_WIN32) && \ - (defined(_MSC_VER) || defined(__WATCOMC__) || defined(__BORLANDC__) || \ - defined(__MINGW32__)) -# define KWSYS_WINDOWS_DIRS -#else -# if defined(__SUNPRO_CC) -# include -# endif -#endif - -#include "kwsysPrivate.h" -#include KWSYS_HEADER(RegularExpression.hxx) -#include KWSYS_HEADER(SystemTools.hxx) -#include KWSYS_HEADER(Directory.hxx) -#include KWSYS_HEADER(FStream.hxx) -#include KWSYS_HEADER(Encoding.h) -#include KWSYS_HEADER(Encoding.hxx) - -#include -#include -#include -#include -#include -#include - -// Work-around CMake dependency scanning limitation. This must -// duplicate the above list of headers. -#if 0 -# include "Directory.hxx.in" -# include "Encoding.hxx.in" -# include "FStream.hxx.in" -# include "RegularExpression.hxx.in" -# include "SystemTools.hxx.in" -#endif - -#ifdef _MSC_VER -# pragma warning(disable : 4786) -#endif - -#if defined(__sgi) && !defined(__GNUC__) -# pragma set woff 1375 /* base class destructor not virtual */ -#endif - -#include -#include -#ifdef __QNX__ -# include /* for malloc/free on QNX */ -#endif -#include -#include -#include -#include - -#if defined(_WIN32) && !defined(_MSC_VER) && defined(__GNUC__) -# include /* for strcasecmp */ -#endif - -#ifdef _MSC_VER -# define umask _umask // Note this is still umask on Borland -#endif - -// support for realpath call -#ifndef _WIN32 -# include -# include -# include -# include -# include -# include -# include -# ifndef __VMS -# include -# include -# endif -# include /* sigprocmask */ -#endif - -#ifdef __linux -# include -#endif - -// Windows API. -#if defined(_WIN32) -# include -# include -# ifndef INVALID_FILE_ATTRIBUTES -# define INVALID_FILE_ATTRIBUTES ((DWORD)-1) -# endif -# if defined(_MSC_VER) && _MSC_VER >= 1800 -# define KWSYS_WINDOWS_DEPRECATED_GetVersionEx -# endif -#elif defined(__CYGWIN__) -# include -# undef _WIN32 -#endif - -#if !KWSYS_CXX_HAS_ENVIRON_IN_STDLIB_H -extern char** environ; -#endif - -#ifdef __CYGWIN__ -# include -#endif - -// getpwnam doesn't exist on Windows and Cray Xt3/Catamount -// same for TIOCGWINSZ -#if defined(_WIN32) || defined(__LIBCATAMOUNT__) || \ - (defined(HAVE_GETPWNAM) && HAVE_GETPWNAM == 0) -# undef HAVE_GETPWNAM -# undef HAVE_TTY_INFO -#else -# define HAVE_GETPWNAM 1 -# define HAVE_TTY_INFO 1 -#endif - -#define VTK_URL_PROTOCOL_REGEX "([a-zA-Z0-9]*)://(.*)" -#define VTK_URL_REGEX \ - "([a-zA-Z0-9]*)://(([A-Za-z0-9]+)(:([^:@]+))?@)?([^:@/]+)(:([0-9]+))?/" \ - "(.+)?" - -#ifdef _MSC_VER -# include -#else -# include -#endif - -// This is a hack to prevent warnings about these functions being -// declared but not referenced. -#if defined(__sgi) && !defined(__GNUC__) -# include -namespace KWSYS_NAMESPACE { -class SystemToolsHack -{ -public: - enum - { - Ref1 = sizeof(cfgetospeed(0)), - Ref2 = sizeof(cfgetispeed(0)), - Ref3 = sizeof(tcgetattr(0, 0)), - Ref4 = sizeof(tcsetattr(0, 0, 0)), - Ref5 = sizeof(cfsetospeed(0, 0)), - Ref6 = sizeof(cfsetispeed(0, 0)) - }; -}; -} -#endif - -#if defined(_WIN32) && \ - (defined(_MSC_VER) || defined(__WATCOMC__) || defined(__BORLANDC__) || \ - defined(__MINGW32__)) -# include -# include -# define _unlink unlink -#endif - -/* The maximum length of a file name. */ -#if defined(PATH_MAX) -# define KWSYS_SYSTEMTOOLS_MAXPATH PATH_MAX -#elif defined(MAXPATHLEN) -# define KWSYS_SYSTEMTOOLS_MAXPATH MAXPATHLEN -#else -# define KWSYS_SYSTEMTOOLS_MAXPATH 16384 -#endif -#if defined(__WATCOMC__) -# include -# define _mkdir mkdir -# define _rmdir rmdir -# define _getcwd getcwd -# define _chdir chdir -#endif - -#if defined(__BEOS__) && !defined(__ZETA__) -# include -# include - -// BeOS 5 doesn't have usleep(), but it has snooze(), which is identical. -static inline void usleep(unsigned int msec) -{ - ::snooze(msec); -} - -// BeOS 5 also doesn't have realpath(), but its C++ API offers something close. -static inline char* realpath(const char* path, char* resolved_path) -{ - const size_t maxlen = KWSYS_SYSTEMTOOLS_MAXPATH; - snprintf(resolved_path, maxlen, "%s", path); - BPath normalized(resolved_path, nullptr, true); - const char* resolved = normalized.Path(); - if (resolved != nullptr) // nullptr == No such file. - { - if (snprintf(resolved_path, maxlen, "%s", resolved) < maxlen) { - return resolved_path; - } - } - return nullptr; // something went wrong. -} -#endif - -#ifdef _WIN32 -static time_t windows_filetime_to_posix_time(const FILETIME& ft) -{ - LARGE_INTEGER date; - date.HighPart = ft.dwHighDateTime; - date.LowPart = ft.dwLowDateTime; - - // removes the diff between 1970 and 1601 - date.QuadPart -= ((LONGLONG)(369 * 365 + 89) * 24 * 3600 * 10000000); - - // converts back from 100-nanoseconds to seconds - return date.QuadPart / 10000000; -} -#endif - -#ifdef KWSYS_WINDOWS_DIRS -# include - -inline int Mkdir(const std::string& dir) -{ - return _wmkdir( - KWSYS_NAMESPACE::Encoding::ToWindowsExtendedPath(dir).c_str()); -} -inline int Rmdir(const std::string& dir) -{ - return _wrmdir( - KWSYS_NAMESPACE::Encoding::ToWindowsExtendedPath(dir).c_str()); -} -inline const char* Getcwd(char* buf, unsigned int len) -{ - std::vector w_buf(len); - if (_wgetcwd(&w_buf[0], len)) { - size_t nlen = kwsysEncoding_wcstombs(buf, &w_buf[0], len); - if (nlen == static_cast(-1)) { - return 0; - } - if (nlen < len) { - // make sure the drive letter is capital - if (nlen > 1 && buf[1] == ':') { - buf[0] = toupper(buf[0]); - } - return buf; - } - } - return 0; -} -inline int Chdir(const std::string& dir) -{ -# if defined(__BORLANDC__) - return chdir(dir.c_str()); -# else - return _wchdir(KWSYS_NAMESPACE::Encoding::ToWide(dir).c_str()); -# endif -} -inline void Realpath(const std::string& path, std::string& resolved_path, - std::string* errorMessage = 0) -{ - std::wstring tmp = KWSYS_NAMESPACE::Encoding::ToWide(path); - wchar_t* ptemp; - wchar_t fullpath[MAX_PATH]; - DWORD bufferLen = GetFullPathNameW( - tmp.c_str(), sizeof(fullpath) / sizeof(fullpath[0]), fullpath, &ptemp); - if (bufferLen < sizeof(fullpath) / sizeof(fullpath[0])) { - resolved_path = KWSYS_NAMESPACE::Encoding::ToNarrow(fullpath); - KWSYS_NAMESPACE::SystemTools::ConvertToUnixSlashes(resolved_path); - } else if (errorMessage) { - if (bufferLen) { - *errorMessage = "Destination path buffer size too small."; - } else if (unsigned int errorId = GetLastError()) { - LPSTR message = nullptr; - DWORD size = FormatMessageA( - FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | - FORMAT_MESSAGE_IGNORE_INSERTS, - nullptr, errorId, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), - (LPSTR)&message, 0, nullptr); - *errorMessage = std::string(message, size); - LocalFree(message); - } else { - *errorMessage = "Unknown error."; - } - - resolved_path = ""; - } else { - resolved_path = path; - } -} -#else -# include - -# include -# include -inline int Mkdir(const std::string& dir) -{ - return mkdir(dir.c_str(), 00777); -} -inline int Rmdir(const std::string& dir) -{ - return rmdir(dir.c_str()); -} -inline const char* Getcwd(char* buf, unsigned int len) -{ - return getcwd(buf, len); -} - -inline int Chdir(const std::string& dir) -{ - return chdir(dir.c_str()); -} -inline void Realpath(const std::string& path, std::string& resolved_path, - std::string* errorMessage = nullptr) -{ - char resolved_name[KWSYS_SYSTEMTOOLS_MAXPATH]; - - errno = 0; - char* ret = realpath(path.c_str(), resolved_name); - if (ret) { - resolved_path = ret; - } else if (errorMessage) { - if (errno) { - *errorMessage = strerror(errno); - } else { - *errorMessage = "Unknown error."; - } - - resolved_path = ""; - } else { - // if path resolution fails, return what was passed in - resolved_path = path; - } -} -#endif - -#if !defined(_WIN32) && defined(__COMO__) -// Hack for como strict mode to avoid defining _SVID_SOURCE or _BSD_SOURCE. -extern "C" { -extern FILE* popen(__const char* __command, __const char* __modes) __THROW; -extern int pclose(FILE* __stream) __THROW; -extern char* realpath(__const char* __restrict __name, - char* __restrict __resolved) __THROW; -extern char* strdup(__const char* __s) __THROW; -extern int putenv(char* __string) __THROW; -} -#endif - -namespace KWSYS_NAMESPACE { - -double SystemTools::GetTime(void) -{ -#if defined(_WIN32) && !defined(__CYGWIN__) - FILETIME ft; - GetSystemTimeAsFileTime(&ft); - return (429.4967296 * ft.dwHighDateTime + 0.0000001 * ft.dwLowDateTime - - 11644473600.0); -#else - struct timeval t; - gettimeofday(&t, nullptr); - return 1.0 * double(t.tv_sec) + 0.000001 * double(t.tv_usec); -#endif -} - -/* Type of character storing the environment. */ -#if defined(_WIN32) -typedef wchar_t envchar; -#else -typedef char envchar; -#endif - -/* Order by environment key only (VAR from VAR=VALUE). */ -struct kwsysEnvCompare -{ - bool operator()(const envchar* l, const envchar* r) const - { -#if defined(_WIN32) - const wchar_t* leq = wcschr(l, L'='); - const wchar_t* req = wcschr(r, L'='); - size_t llen = leq ? (leq - l) : wcslen(l); - size_t rlen = req ? (req - r) : wcslen(r); - if (llen == rlen) { - return wcsncmp(l, r, llen) < 0; - } else { - return wcscmp(l, r) < 0; - } -#else - const char* leq = strchr(l, '='); - const char* req = strchr(r, '='); - size_t llen = leq ? static_cast(leq - l) : strlen(l); - size_t rlen = req ? static_cast(req - r) : strlen(r); - if (llen == rlen) { - return strncmp(l, r, llen) < 0; - } else { - return strcmp(l, r) < 0; - } -#endif - } -}; - -class kwsysEnvSet : public std::set -{ -public: - class Free - { - const envchar* Env; - - public: - Free(const envchar* env) - : Env(env) - { - } - ~Free() { free(const_cast(this->Env)); } - - Free(const Free&) = delete; - Free& operator=(const Free&) = delete; - }; - - const envchar* Release(const envchar* env) - { - const envchar* old = nullptr; - iterator i = this->find(env); - if (i != this->end()) { - old = *i; - this->erase(i); - } - return old; - } -}; - -#ifdef _WIN32 -struct SystemToolsPathCaseCmp -{ - bool operator()(std::string const& l, std::string const& r) const - { -# ifdef _MSC_VER - return _stricmp(l.c_str(), r.c_str()) < 0; -# elif defined(__GNUC__) - return strcasecmp(l.c_str(), r.c_str()) < 0; -# else - return SystemTools::Strucmp(l.c_str(), r.c_str()) < 0; -# endif - } -}; -#endif - -/** - * SystemTools static variables singleton class. - */ -class SystemToolsStatic -{ -public: - typedef std::map StringMap; -#if KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP - /** - * Path translation table from dir to refdir - * Each time 'dir' will be found it will be replace by 'refdir' - */ - StringMap TranslationMap; -#endif -#ifdef _WIN32 - static std::string GetCasePathName(std::string const& pathIn); - static std::string GetActualCaseForPathCached(std::string const& path); - static const char* GetEnvBuffered(const char* key); - std::map PathCaseMap; - std::map EnvMap; -#endif -#ifdef __CYGWIN__ - StringMap Cyg2Win32Map; -#endif - - /** - * Actual implementation of ReplaceString. - */ - static void ReplaceString(std::string& source, const char* replace, - size_t replaceSize, const std::string& with); - - /** - * Actual implementation of FileIsFullPath. - */ - static bool FileIsFullPath(const char*, size_t); - - /** - * Find a filename (file or directory) in the system PATH, with - * optional extra paths. - */ - static std::string FindName( - const std::string& name, - const std::vector& path = std::vector(), - bool no_system_path = false); -}; - -#ifdef _WIN32 -std::string SystemToolsStatic::GetCasePathName(std::string const& pathIn) -{ - std::string casePath; - - // First check if the file is relative. We don't fix relative paths since the - // real case depends on the root directory and the given path fragment may - // have meaning elsewhere in the project. - if (!SystemTools::FileIsFullPath(pathIn)) { - // This looks unnecessary, but it allows for the return value optimization - // since all return paths return the same local variable. - casePath = pathIn; - return casePath; - } - - std::vector path_components; - SystemTools::SplitPath(pathIn, path_components); - - // Start with root component. - std::vector::size_type idx = 0; - casePath = path_components[idx++]; - // make sure drive letter is always upper case - if (casePath.size() > 1 && casePath[1] == ':') { - casePath[0] = toupper(casePath[0]); - } - const char* sep = ""; - - // If network path, fill casePath with server/share so FindFirstFile - // will work after that. Maybe someday call other APIs to get - // actual case of servers and shares. - if (path_components.size() > 2 && path_components[0] == "//") { - casePath += path_components[idx++]; - casePath += "/"; - casePath += path_components[idx++]; - sep = "/"; - } - - // Convert case of all components that exist. - bool converting = true; - for (; idx < path_components.size(); idx++) { - casePath += sep; - sep = "/"; - - if (converting) { - // If path component contains wildcards, we skip matching - // because these filenames are not allowed on windows, - // and we do not want to match a different file. - if (path_components[idx].find('*') != std::string::npos || - path_components[idx].find('?') != std::string::npos) { - converting = false; - } else { - std::string test_str = casePath; - test_str += path_components[idx]; - WIN32_FIND_DATAW findData; - HANDLE hFind = - ::FindFirstFileW(Encoding::ToWide(test_str).c_str(), &findData); - if (INVALID_HANDLE_VALUE != hFind) { - path_components[idx] = Encoding::ToNarrow(findData.cFileName); - ::FindClose(hFind); - } else { - converting = false; - } - } - } - - casePath += path_components[idx]; - } - return casePath; -} - -std::string SystemToolsStatic::GetActualCaseForPathCached(std::string const& p) -{ - // Check to see if actual case has already been called - // for this path, and the result is stored in the PathCaseMap - auto& pcm = SystemTools::Statics->PathCaseMap; - { - auto itr = pcm.find(p); - if (itr != pcm.end()) { - return itr->second; - } - } - std::string casePath = SystemToolsStatic::GetCasePathName(p); - if (casePath.size() <= MAX_PATH) { - pcm[p] = casePath; - } - return casePath; -} -#endif - -// adds the elements of the env variable path to the arg passed in -void SystemTools::GetPath(std::vector& path, const char* env) -{ - size_t const old_size = path.size(); -#if defined(_WIN32) && !defined(__CYGWIN__) - const char pathSep = ';'; -#else - const char pathSep = ':'; -#endif - if (!env) { - env = "PATH"; - } - std::string pathEnv; - if (!SystemTools::GetEnv(env, pathEnv)) { - return; - } - - // A hack to make the below algorithm work. - if (!pathEnv.empty() && pathEnv.back() != pathSep) { - pathEnv += pathSep; - } - std::string::size_type start = 0; - bool done = false; - while (!done) { - std::string::size_type endpos = pathEnv.find(pathSep, start); - if (endpos != std::string::npos) { - path.push_back(pathEnv.substr(start, endpos - start)); - start = endpos + 1; - } else { - done = true; - } - } - for (std::vector::iterator i = path.begin() + old_size; - i != path.end(); ++i) { - SystemTools::ConvertToUnixSlashes(*i); - } -} - -#if defined(_WIN32) -const char* SystemToolsStatic::GetEnvBuffered(const char* key) -{ - std::string env; - if (SystemTools::GetEnv(key, env)) { - std::string& menv = SystemTools::Statics->EnvMap[key]; - if (menv != env) { - menv = std::move(env); - } - return menv.c_str(); - } - return nullptr; -} -#endif - -const char* SystemTools::GetEnv(const char* key) -{ -#if defined(_WIN32) - return SystemToolsStatic::GetEnvBuffered(key); -#else - return getenv(key); -#endif -} - -const char* SystemTools::GetEnv(const std::string& key) -{ -#if defined(_WIN32) - return SystemToolsStatic::GetEnvBuffered(key.c_str()); -#else - return getenv(key.c_str()); -#endif -} - -bool SystemTools::GetEnv(const char* key, std::string& result) -{ -#if defined(_WIN32) - const std::wstring wkey = Encoding::ToWide(key); - const wchar_t* wv = _wgetenv(wkey.c_str()); - if (wv) { - result = Encoding::ToNarrow(wv); - return true; - } -#else - const char* v = getenv(key); - if (v) { - result = v; - return true; - } -#endif - return false; -} - -bool SystemTools::GetEnv(const std::string& key, std::string& result) -{ - return SystemTools::GetEnv(key.c_str(), result); -} - -bool SystemTools::HasEnv(const char* key) -{ -#if defined(_WIN32) - const std::wstring wkey = Encoding::ToWide(key); - const wchar_t* v = _wgetenv(wkey.c_str()); -#else - const char* v = getenv(key); -#endif - return v != nullptr; -} - -bool SystemTools::HasEnv(const std::string& key) -{ - return SystemTools::HasEnv(key.c_str()); -} - -#if KWSYS_CXX_HAS_UNSETENV -/* unsetenv("A") removes A from the environment. - On older platforms it returns void instead of int. */ -static int kwsysUnPutEnv(const std::string& env) -{ - size_t pos = env.find('='); - if (pos != std::string::npos) { - std::string name = env.substr(0, pos); - unsetenv(name.c_str()); - } else { - unsetenv(env.c_str()); - } - return 0; -} - -#elif defined(__CYGWIN__) || defined(__GLIBC__) -/* putenv("A") removes A from the environment. It must not put the - memory in the environment because it does not have any "=" syntax. */ -static int kwsysUnPutEnv(const std::string& env) -{ - int err = 0; - size_t pos = env.find('='); - size_t const len = pos == std::string::npos ? env.size() : pos; - size_t const sz = len + 1; - char local_buf[256]; - char* buf = sz > sizeof(local_buf) ? (char*)malloc(sz) : local_buf; - if (!buf) { - return -1; - } - strncpy(buf, env.c_str(), len); - buf[len] = 0; - if (putenv(buf) < 0 && errno != EINVAL) { - err = errno; - } - if (buf != local_buf) { - free(buf); - } - if (err) { - errno = err; - return -1; - } - return 0; -} - -#elif defined(_WIN32) -/* putenv("A=") places "A=" in the environment, which is as close to - removal as we can get with the putenv API. We have to leak the - most recent value placed in the environment for each variable name - on program exit in case exit routines access it. */ - -static kwsysEnvSet kwsysUnPutEnvSet; - -static int kwsysUnPutEnv(std::string const& env) -{ - std::wstring wEnv = Encoding::ToWide(env); - size_t const pos = wEnv.find('='); - size_t const len = pos == std::string::npos ? wEnv.size() : pos; - wEnv.resize(len + 1, L'='); - wchar_t* newEnv = _wcsdup(wEnv.c_str()); - if (!newEnv) { - return -1; - } - kwsysEnvSet::Free oldEnv(kwsysUnPutEnvSet.Release(newEnv)); - kwsysUnPutEnvSet.insert(newEnv); - return _wputenv(newEnv); -} - -#else -/* Manipulate the "environ" global directly. */ -static int kwsysUnPutEnv(const std::string& env) -{ - size_t pos = env.find('='); - size_t const len = pos == std::string::npos ? env.size() : pos; - int in = 0; - int out = 0; - while (environ[in]) { - if (strlen(environ[in]) > len && environ[in][len] == '=' && - strncmp(env.c_str(), environ[in], len) == 0) { - ++in; - } else { - environ[out++] = environ[in++]; - } - } - while (out < in) { - environ[out++] = 0; - } - return 0; -} -#endif - -#if KWSYS_CXX_HAS_SETENV - -/* setenv("A", "B", 1) will set A=B in the environment and makes its - own copies of the strings. */ -bool SystemTools::PutEnv(const std::string& env) -{ - size_t pos = env.find('='); - if (pos != std::string::npos) { - std::string name = env.substr(0, pos); - return setenv(name.c_str(), env.c_str() + pos + 1, 1) == 0; - } else { - return kwsysUnPutEnv(env) == 0; - } -} - -bool SystemTools::UnPutEnv(const std::string& env) -{ - return kwsysUnPutEnv(env) == 0; -} - -#else - -/* putenv("A=B") will set A=B in the environment. Most putenv implementations - put their argument directly in the environment. They never free the memory - on program exit. Keep an active set of pointers to memory we allocate and - pass to putenv, one per environment key. At program exit remove any - environment values that may still reference memory we allocated. Then free - the memory. This will not affect any environment values we never set. */ - -# ifdef __INTEL_COMPILER -# pragma warning disable 444 /* base has non-virtual destructor */ -# endif - -class kwsysEnv : public kwsysEnvSet -{ -public: - ~kwsysEnv() - { - for (iterator i = this->begin(); i != this->end(); ++i) { -# if defined(_WIN32) - const std::string s = Encoding::ToNarrow(*i); - kwsysUnPutEnv(s); -# else - kwsysUnPutEnv(*i); -# endif - free(const_cast(*i)); - } - } - bool Put(const char* env) - { -# if defined(_WIN32) - const std::wstring wEnv = Encoding::ToWide(env); - wchar_t* newEnv = _wcsdup(wEnv.c_str()); -# else - char* newEnv = strdup(env); -# endif - Free oldEnv(this->Release(newEnv)); - this->insert(newEnv); -# if defined(_WIN32) - return _wputenv(newEnv) == 0; -# else - return putenv(newEnv) == 0; -# endif - } - bool UnPut(const char* env) - { -# if defined(_WIN32) - const std::wstring wEnv = Encoding::ToWide(env); - Free oldEnv(this->Release(wEnv.c_str())); -# else - Free oldEnv(this->Release(env)); -# endif - return kwsysUnPutEnv(env) == 0; - } -}; - -static kwsysEnv kwsysEnvInstance; - -bool SystemTools::PutEnv(const std::string& env) -{ - return kwsysEnvInstance.Put(env.c_str()); -} - -bool SystemTools::UnPutEnv(const std::string& env) -{ - return kwsysEnvInstance.UnPut(env.c_str()); -} - -#endif - -const char* SystemTools::GetExecutableExtension() -{ -#if defined(_WIN32) || defined(__CYGWIN__) || defined(__VMS) - return ".exe"; -#else - return ""; -#endif -} - -FILE* SystemTools::Fopen(const std::string& file, const char* mode) -{ -#ifdef _WIN32 - return _wfopen(Encoding::ToWindowsExtendedPath(file).c_str(), - Encoding::ToWide(mode).c_str()); -#else - return fopen(file.c_str(), mode); -#endif -} - -bool SystemTools::MakeDirectory(const char* path, const mode_t* mode) -{ - if (!path) { - return false; - } - return SystemTools::MakeDirectory(std::string(path), mode); -} - -bool SystemTools::MakeDirectory(const std::string& path, const mode_t* mode) -{ - if (SystemTools::PathExists(path)) { - return SystemTools::FileIsDirectory(path); - } - if (path.empty()) { - return false; - } - std::string dir = path; - SystemTools::ConvertToUnixSlashes(dir); - - std::string::size_type pos = 0; - std::string topdir; - while ((pos = dir.find('/', pos)) != std::string::npos) { - topdir = dir.substr(0, pos); - - if (Mkdir(topdir) == 0 && mode != nullptr) { - SystemTools::SetPermissions(topdir, *mode); - } - - ++pos; - } - topdir = dir; - if (Mkdir(topdir) != 0) { - // There is a bug in the Borland Run time library which makes MKDIR - // return EACCES when it should return EEXISTS - // if it is some other error besides directory exists - // then return false - if ((errno != EEXIST) -#ifdef __BORLANDC__ - && (errno != EACCES) -#endif - ) { - return false; - } - } else if (mode != nullptr) { - SystemTools::SetPermissions(topdir, *mode); - } - - return true; -} - -// replace replace with with as many times as it shows up in source. -// write the result into source. -void SystemTools::ReplaceString(std::string& source, - const std::string& replace, - const std::string& with) -{ - // do while hangs if replaceSize is 0 - if (replace.empty()) { - return; - } - - SystemToolsStatic::ReplaceString(source, replace.c_str(), replace.size(), - with); -} - -void SystemTools::ReplaceString(std::string& source, const char* replace, - const char* with) -{ - // do while hangs if replaceSize is 0 - if (!*replace) { - return; - } - - SystemToolsStatic::ReplaceString(source, replace, strlen(replace), - with ? with : ""); -} - -void SystemToolsStatic::ReplaceString(std::string& source, const char* replace, - size_t replaceSize, - const std::string& with) -{ - const char* src = source.c_str(); - char* searchPos = const_cast(strstr(src, replace)); - - // get out quick if string is not found - if (!searchPos) { - return; - } - - // perform replacements until done - char* orig = strdup(src); - char* currentPos = orig; - searchPos = searchPos - src + orig; - - // initialize the result - source.erase(source.begin(), source.end()); - do { - *searchPos = '\0'; - source += currentPos; - currentPos = searchPos + replaceSize; - // replace - source += with; - searchPos = strstr(currentPos, replace); - } while (searchPos); - - // copy any trailing text - source += currentPos; - free(orig); -} - -#if defined(_WIN32) && !defined(__CYGWIN__) - -# if defined(KEY_WOW64_32KEY) && defined(KEY_WOW64_64KEY) -# define KWSYS_ST_KEY_WOW64_32KEY KEY_WOW64_32KEY -# define KWSYS_ST_KEY_WOW64_64KEY KEY_WOW64_64KEY -# else -# define KWSYS_ST_KEY_WOW64_32KEY 0x0200 -# define KWSYS_ST_KEY_WOW64_64KEY 0x0100 -# endif - -static bool SystemToolsParseRegistryKey(const std::string& key, - HKEY& primaryKey, std::string& second, - std::string& valuename) -{ - std::string primary = key; - - size_t start = primary.find('\\'); - if (start == std::string::npos) { - return false; - } - - size_t valuenamepos = primary.find(';'); - if (valuenamepos != std::string::npos) { - valuename = primary.substr(valuenamepos + 1); - } - - second = primary.substr(start + 1, valuenamepos - start - 1); - primary = primary.substr(0, start); - - if (primary == "HKEY_CURRENT_USER") { - primaryKey = HKEY_CURRENT_USER; - } - if (primary == "HKEY_CURRENT_CONFIG") { - primaryKey = HKEY_CURRENT_CONFIG; - } - if (primary == "HKEY_CLASSES_ROOT") { - primaryKey = HKEY_CLASSES_ROOT; - } - if (primary == "HKEY_LOCAL_MACHINE") { - primaryKey = HKEY_LOCAL_MACHINE; - } - if (primary == "HKEY_USERS") { - primaryKey = HKEY_USERS; - } - - return true; -} - -static DWORD SystemToolsMakeRegistryMode(DWORD mode, - SystemTools::KeyWOW64 view) -{ - // only add the modes when on a system that supports Wow64. - static FARPROC wow64p = - GetProcAddress(GetModuleHandleW(L"kernel32"), "IsWow64Process"); - if (wow64p == nullptr) { - return mode; - } - - if (view == SystemTools::KeyWOW64_32) { - return mode | KWSYS_ST_KEY_WOW64_32KEY; - } else if (view == SystemTools::KeyWOW64_64) { - return mode | KWSYS_ST_KEY_WOW64_64KEY; - } - return mode; -} -#endif - -#if defined(_WIN32) && !defined(__CYGWIN__) -bool SystemTools::GetRegistrySubKeys(const std::string& key, - std::vector& subkeys, - KeyWOW64 view) -{ - HKEY primaryKey = HKEY_CURRENT_USER; - std::string second; - std::string valuename; - if (!SystemToolsParseRegistryKey(key, primaryKey, second, valuename)) { - return false; - } - - HKEY hKey; - if (RegOpenKeyExW(primaryKey, Encoding::ToWide(second).c_str(), 0, - SystemToolsMakeRegistryMode(KEY_READ, view), - &hKey) != ERROR_SUCCESS) { - return false; - } else { - wchar_t name[1024]; - DWORD dwNameSize = sizeof(name) / sizeof(name[0]); - - DWORD i = 0; - while (RegEnumKeyW(hKey, i, name, dwNameSize) == ERROR_SUCCESS) { - subkeys.push_back(Encoding::ToNarrow(name)); - ++i; - } - - RegCloseKey(hKey); - } - - return true; -} -#else -bool SystemTools::GetRegistrySubKeys(const std::string&, - std::vector&, KeyWOW64) -{ - return false; -} -#endif - -// Read a registry value. -// Example : -// HKEY_LOCAL_MACHINE\SOFTWARE\Python\PythonCore\2.1\InstallPath -// => will return the data of the "default" value of the key -// HKEY_LOCAL_MACHINE\SOFTWARE\Scriptics\Tcl\8.4;Root -// => will return the data of the "Root" value of the key - -#if defined(_WIN32) && !defined(__CYGWIN__) -bool SystemTools::ReadRegistryValue(const std::string& key, std::string& value, - KeyWOW64 view) -{ - bool valueset = false; - HKEY primaryKey = HKEY_CURRENT_USER; - std::string second; - std::string valuename; - if (!SystemToolsParseRegistryKey(key, primaryKey, second, valuename)) { - return false; - } - - HKEY hKey; - if (RegOpenKeyExW(primaryKey, Encoding::ToWide(second).c_str(), 0, - SystemToolsMakeRegistryMode(KEY_READ, view), - &hKey) != ERROR_SUCCESS) { - return false; - } else { - DWORD dwType, dwSize; - dwSize = 1023; - wchar_t data[1024]; - if (RegQueryValueExW(hKey, Encoding::ToWide(valuename).c_str(), nullptr, - &dwType, (BYTE*)data, &dwSize) == ERROR_SUCCESS) { - if (dwType == REG_SZ) { - value = Encoding::ToNarrow(data); - valueset = true; - } else if (dwType == REG_EXPAND_SZ) { - wchar_t expanded[1024]; - DWORD dwExpandedSize = sizeof(expanded) / sizeof(expanded[0]); - if (ExpandEnvironmentStringsW(data, expanded, dwExpandedSize)) { - value = Encoding::ToNarrow(expanded); - valueset = true; - } - } - } - - RegCloseKey(hKey); - } - - return valueset; -} -#else -bool SystemTools::ReadRegistryValue(const std::string&, std::string&, KeyWOW64) -{ - return false; -} -#endif - -// Write a registry value. -// Example : -// HKEY_LOCAL_MACHINE\SOFTWARE\Python\PythonCore\2.1\InstallPath -// => will set the data of the "default" value of the key -// HKEY_LOCAL_MACHINE\SOFTWARE\Scriptics\Tcl\8.4;Root -// => will set the data of the "Root" value of the key - -#if defined(_WIN32) && !defined(__CYGWIN__) -bool SystemTools::WriteRegistryValue(const std::string& key, - const std::string& value, KeyWOW64 view) -{ - HKEY primaryKey = HKEY_CURRENT_USER; - std::string second; - std::string valuename; - if (!SystemToolsParseRegistryKey(key, primaryKey, second, valuename)) { - return false; - } - - HKEY hKey; - DWORD dwDummy; - wchar_t lpClass[] = L""; - if (RegCreateKeyExW(primaryKey, Encoding::ToWide(second).c_str(), 0, lpClass, - REG_OPTION_NON_VOLATILE, - SystemToolsMakeRegistryMode(KEY_WRITE, view), nullptr, - &hKey, &dwDummy) != ERROR_SUCCESS) { - return false; - } - - std::wstring wvalue = Encoding::ToWide(value); - if (RegSetValueExW(hKey, Encoding::ToWide(valuename).c_str(), 0, REG_SZ, - (CONST BYTE*)wvalue.c_str(), - (DWORD)(sizeof(wchar_t) * (wvalue.size() + 1))) == - ERROR_SUCCESS) { - return true; - } - return false; -} -#else -bool SystemTools::WriteRegistryValue(const std::string&, const std::string&, - KeyWOW64) -{ - return false; -} -#endif - -// Delete a registry value. -// Example : -// HKEY_LOCAL_MACHINE\SOFTWARE\Python\PythonCore\2.1\InstallPath -// => will delete the data of the "default" value of the key -// HKEY_LOCAL_MACHINE\SOFTWARE\Scriptics\Tcl\8.4;Root -// => will delete the data of the "Root" value of the key - -#if defined(_WIN32) && !defined(__CYGWIN__) -bool SystemTools::DeleteRegistryValue(const std::string& key, KeyWOW64 view) -{ - HKEY primaryKey = HKEY_CURRENT_USER; - std::string second; - std::string valuename; - if (!SystemToolsParseRegistryKey(key, primaryKey, second, valuename)) { - return false; - } - - HKEY hKey; - if (RegOpenKeyExW(primaryKey, Encoding::ToWide(second).c_str(), 0, - SystemToolsMakeRegistryMode(KEY_WRITE, view), - &hKey) != ERROR_SUCCESS) { - return false; - } else { - if (RegDeleteValue(hKey, (LPTSTR)valuename.c_str()) == ERROR_SUCCESS) { - RegCloseKey(hKey); - return true; - } - } - return false; -} -#else -bool SystemTools::DeleteRegistryValue(const std::string&, KeyWOW64) -{ - return false; -} -#endif - -bool SystemTools::SameFile(const std::string& file1, const std::string& file2) -{ -#ifdef _WIN32 - HANDLE hFile1, hFile2; - - hFile1 = - CreateFileW(Encoding::ToWide(file1).c_str(), GENERIC_READ, FILE_SHARE_READ, - nullptr, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, nullptr); - hFile2 = - CreateFileW(Encoding::ToWide(file2).c_str(), GENERIC_READ, FILE_SHARE_READ, - nullptr, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, nullptr); - if (hFile1 == INVALID_HANDLE_VALUE || hFile2 == INVALID_HANDLE_VALUE) { - if (hFile1 != INVALID_HANDLE_VALUE) { - CloseHandle(hFile1); - } - if (hFile2 != INVALID_HANDLE_VALUE) { - CloseHandle(hFile2); - } - return false; - } - - BY_HANDLE_FILE_INFORMATION fiBuf1; - BY_HANDLE_FILE_INFORMATION fiBuf2; - GetFileInformationByHandle(hFile1, &fiBuf1); - GetFileInformationByHandle(hFile2, &fiBuf2); - CloseHandle(hFile1); - CloseHandle(hFile2); - return (fiBuf1.dwVolumeSerialNumber == fiBuf2.dwVolumeSerialNumber && - fiBuf1.nFileIndexHigh == fiBuf2.nFileIndexHigh && - fiBuf1.nFileIndexLow == fiBuf2.nFileIndexLow); -#else - struct stat fileStat1, fileStat2; - if (stat(file1.c_str(), &fileStat1) == 0 && - stat(file2.c_str(), &fileStat2) == 0) { - // see if the files are the same file - // check the device inode and size - if (memcmp(&fileStat2.st_dev, &fileStat1.st_dev, - sizeof(fileStat1.st_dev)) == 0 && - memcmp(&fileStat2.st_ino, &fileStat1.st_ino, - sizeof(fileStat1.st_ino)) == 0 && - fileStat2.st_size == fileStat1.st_size) { - return true; - } - } - return false; -#endif -} - -bool SystemTools::PathExists(const std::string& path) -{ - if (path.empty()) { - return false; - } -#if defined(__CYGWIN__) - // Convert path to native windows path if possible. - char winpath[MAX_PATH]; - if (SystemTools::PathCygwinToWin32(path.c_str(), winpath)) { - return (GetFileAttributesA(winpath) != INVALID_FILE_ATTRIBUTES); - } - struct stat st; - return lstat(path.c_str(), &st) == 0; -#elif defined(_WIN32) - return (GetFileAttributesW(Encoding::ToWindowsExtendedPath(path).c_str()) != - INVALID_FILE_ATTRIBUTES); -#else - struct stat st; - return lstat(path.c_str(), &st) == 0; -#endif -} - -bool SystemTools::FileExists(const char* filename) -{ - if (!filename) { - return false; - } - return SystemTools::FileExists(std::string(filename)); -} - -bool SystemTools::FileExists(const std::string& filename) -{ - if (filename.empty()) { - return false; - } -#if defined(__CYGWIN__) - // Convert filename to native windows path if possible. - char winpath[MAX_PATH]; - if (SystemTools::PathCygwinToWin32(filename.c_str(), winpath)) { - return (GetFileAttributesA(winpath) != INVALID_FILE_ATTRIBUTES); - } - return access(filename.c_str(), R_OK) == 0; -#elif defined(_WIN32) - DWORD attr = - GetFileAttributesW(Encoding::ToWindowsExtendedPath(filename).c_str()); - if (attr == INVALID_FILE_ATTRIBUTES) { - return false; - } - - if (attr & FILE_ATTRIBUTE_REPARSE_POINT) { - // Using 0 instead of GENERIC_READ as it allows reading of file attributes - // even if we do not have permission to read the file itself - HANDLE handle = - CreateFileW(Encoding::ToWindowsExtendedPath(filename).c_str(), 0, 0, - nullptr, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, nullptr); - - if (handle == INVALID_HANDLE_VALUE) { - return false; - } - - CloseHandle(handle); - } - - return true; -#else -// SCO OpenServer 5.0.7/3.2's command has 711 permission. -# if defined(_SCO_DS) - return access(filename.c_str(), F_OK) == 0; -# else - return access(filename.c_str(), R_OK) == 0; -# endif -#endif -} - -bool SystemTools::FileExists(const char* filename, bool isFile) -{ - if (!filename) { - return false; - } - return SystemTools::FileExists(std::string(filename), isFile); -} - -bool SystemTools::FileExists(const std::string& filename, bool isFile) -{ - if (SystemTools::FileExists(filename)) { - // If isFile is set return not FileIsDirectory, - // so this will only be true if it is a file - return !isFile || !SystemTools::FileIsDirectory(filename); - } - return false; -} - -bool SystemTools::TestFileAccess(const char* filename, - TestFilePermissions permissions) -{ - if (!filename) { - return false; - } - return SystemTools::TestFileAccess(std::string(filename), permissions); -} - -bool SystemTools::TestFileAccess(const std::string& filename, - TestFilePermissions permissions) -{ - if (filename.empty()) { - return false; - } -#if defined(_WIN32) && !defined(__CYGWIN__) - // If execute set, change to read permission (all files on Windows - // are executable if they are readable). The CRT will always fail - // if you pass an execute bit. - if (permissions & TEST_FILE_EXECUTE) { - permissions &= ~TEST_FILE_EXECUTE; - permissions |= TEST_FILE_READ; - } - return _waccess(Encoding::ToWindowsExtendedPath(filename).c_str(), - permissions) == 0; -#else - return access(filename.c_str(), permissions) == 0; -#endif -} - -int SystemTools::Stat(const char* path, SystemTools::Stat_t* buf) -{ - if (!path) { - errno = EFAULT; - return -1; - } - return SystemTools::Stat(std::string(path), buf); -} - -int SystemTools::Stat(const std::string& path, SystemTools::Stat_t* buf) -{ - if (path.empty()) { - errno = ENOENT; - return -1; - } -#if defined(_WIN32) && !defined(__CYGWIN__) - // Ideally we should use Encoding::ToWindowsExtendedPath to support - // long paths, but _wstat64 rejects paths with '?' in them, thinking - // they are wildcards. - std::wstring const& wpath = Encoding::ToWide(path); -# if defined(__BORLANDC__) - return _wstati64(wpath.c_str(), buf); -# else - return _wstat64(wpath.c_str(), buf); -# endif -#else - return stat(path.c_str(), buf); -#endif -} - -#ifdef __CYGWIN__ -bool SystemTools::PathCygwinToWin32(const char* path, char* win32_path) -{ - auto itr = SystemTools::Statics->Cyg2Win32Map.find(path); - if (itr != SystemTools::Statics->Cyg2Win32Map.end()) { - strncpy(win32_path, itr->second.c_str(), MAX_PATH); - } else { - if (cygwin_conv_path(CCP_POSIX_TO_WIN_A, path, win32_path, MAX_PATH) != - 0) { - win32_path[0] = 0; - } - SystemTools::Statics->Cyg2Win32Map.insert( - SystemToolsStatic::StringMap::value_type(path, win32_path)); - } - return win32_path[0] != 0; -} -#endif - -bool SystemTools::Touch(const std::string& filename, bool create) -{ - if (!SystemTools::PathExists(filename)) { - if (create) { - FILE* file = Fopen(filename, "a+b"); - if (file) { - fclose(file); - return true; - } - return false; - } else { - return true; - } - } -#if defined(_WIN32) && !defined(__CYGWIN__) - HANDLE h = CreateFileW(Encoding::ToWindowsExtendedPath(filename).c_str(), - FILE_WRITE_ATTRIBUTES, FILE_SHARE_WRITE, 0, - OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0); - if (!h) { - return false; - } - FILETIME mtime; - GetSystemTimeAsFileTime(&mtime); - if (!SetFileTime(h, 0, 0, &mtime)) { - CloseHandle(h); - return false; - } - CloseHandle(h); -#elif KWSYS_CXX_HAS_UTIMENSAT - // utimensat is only available on newer Unixes and macOS 10.13+ - if (utimensat(AT_FDCWD, filename.c_str(), nullptr, 0) < 0) { - return false; - } -#else - // fall back to utimes - if (utimes(filename.c_str(), nullptr) < 0) { - return false; - } -#endif - return true; -} - -bool SystemTools::FileTimeCompare(const std::string& f1, const std::string& f2, - int* result) -{ - // Default to same time. - *result = 0; -#if !defined(_WIN32) || defined(__CYGWIN__) - // POSIX version. Use stat function to get file modification time. - struct stat s1; - if (stat(f1.c_str(), &s1) != 0) { - return false; - } - struct stat s2; - if (stat(f2.c_str(), &s2) != 0) { - return false; - } -# if KWSYS_CXX_STAT_HAS_ST_MTIM - // Compare using nanosecond resolution. - if (s1.st_mtim.tv_sec < s2.st_mtim.tv_sec) { - *result = -1; - } else if (s1.st_mtim.tv_sec > s2.st_mtim.tv_sec) { - *result = 1; - } else if (s1.st_mtim.tv_nsec < s2.st_mtim.tv_nsec) { - *result = -1; - } else if (s1.st_mtim.tv_nsec > s2.st_mtim.tv_nsec) { - *result = 1; - } -# elif KWSYS_CXX_STAT_HAS_ST_MTIMESPEC - // Compare using nanosecond resolution. - if (s1.st_mtimespec.tv_sec < s2.st_mtimespec.tv_sec) { - *result = -1; - } else if (s1.st_mtimespec.tv_sec > s2.st_mtimespec.tv_sec) { - *result = 1; - } else if (s1.st_mtimespec.tv_nsec < s2.st_mtimespec.tv_nsec) { - *result = -1; - } else if (s1.st_mtimespec.tv_nsec > s2.st_mtimespec.tv_nsec) { - *result = 1; - } -# else - // Compare using 1 second resolution. - if (s1.st_mtime < s2.st_mtime) { - *result = -1; - } else if (s1.st_mtime > s2.st_mtime) { - *result = 1; - } -# endif -#else - // Windows version. Get the modification time from extended file attributes. - WIN32_FILE_ATTRIBUTE_DATA f1d; - WIN32_FILE_ATTRIBUTE_DATA f2d; - if (!GetFileAttributesExW(Encoding::ToWindowsExtendedPath(f1).c_str(), - GetFileExInfoStandard, &f1d)) { - return false; - } - if (!GetFileAttributesExW(Encoding::ToWindowsExtendedPath(f2).c_str(), - GetFileExInfoStandard, &f2d)) { - return false; - } - - // Compare the file times using resolution provided by system call. - *result = (int)CompareFileTime(&f1d.ftLastWriteTime, &f2d.ftLastWriteTime); -#endif - return true; -} - -// Return a capitalized string (i.e the first letter is uppercased, all other -// are lowercased) -std::string SystemTools::Capitalized(const std::string& s) -{ - std::string n; - if (s.empty()) { - return n; - } - n.resize(s.size()); - n[0] = static_cast(toupper(s[0])); - for (size_t i = 1; i < s.size(); i++) { - n[i] = static_cast(tolower(s[i])); - } - return n; -} - -// Return capitalized words -std::string SystemTools::CapitalizedWords(const std::string& s) -{ - std::string n(s); - for (size_t i = 0; i < s.size(); i++) { -#if defined(_MSC_VER) && defined(_MT) && defined(_DEBUG) - // MS has an assert that will fail if s[i] < 0; setting - // LC_CTYPE using setlocale() does *not* help. Painful. - if ((int)s[i] >= 0 && isalpha(s[i]) && - (i == 0 || ((int)s[i - 1] >= 0 && isspace(s[i - 1])))) -#else - if (isalpha(s[i]) && (i == 0 || isspace(s[i - 1]))) -#endif - { - n[i] = static_cast(toupper(s[i])); - } - } - return n; -} - -// Return uncapitalized words -std::string SystemTools::UnCapitalizedWords(const std::string& s) -{ - std::string n(s); - for (size_t i = 0; i < s.size(); i++) { -#if defined(_MSC_VER) && defined(_MT) && defined(_DEBUG) - // MS has an assert that will fail if s[i] < 0; setting - // LC_CTYPE using setlocale() does *not* help. Painful. - if ((int)s[i] >= 0 && isalpha(s[i]) && - (i == 0 || ((int)s[i - 1] >= 0 && isspace(s[i - 1])))) -#else - if (isalpha(s[i]) && (i == 0 || isspace(s[i - 1]))) -#endif - { - n[i] = static_cast(tolower(s[i])); - } - } - return n; -} - -// only works for words with at least two letters -std::string SystemTools::AddSpaceBetweenCapitalizedWords(const std::string& s) -{ - std::string n; - if (!s.empty()) { - n.reserve(s.size()); - n += s[0]; - for (size_t i = 1; i < s.size(); i++) { - if (isupper(s[i]) && !isspace(s[i - 1]) && !isupper(s[i - 1])) { - n += ' '; - } - n += s[i]; - } - } - return n; -} - -char* SystemTools::AppendStrings(const char* str1, const char* str2) -{ - if (!str1) { - return SystemTools::DuplicateString(str2); - } - if (!str2) { - return SystemTools::DuplicateString(str1); - } - size_t len1 = strlen(str1); - char* newstr = new char[len1 + strlen(str2) + 1]; - if (!newstr) { - return nullptr; - } - strcpy(newstr, str1); - strcat(newstr + len1, str2); - return newstr; -} - -char* SystemTools::AppendStrings(const char* str1, const char* str2, - const char* str3) -{ - if (!str1) { - return SystemTools::AppendStrings(str2, str3); - } - if (!str2) { - return SystemTools::AppendStrings(str1, str3); - } - if (!str3) { - return SystemTools::AppendStrings(str1, str2); - } - - size_t len1 = strlen(str1), len2 = strlen(str2); - char* newstr = new char[len1 + len2 + strlen(str3) + 1]; - if (!newstr) { - return nullptr; - } - strcpy(newstr, str1); - strcat(newstr + len1, str2); - strcat(newstr + len1 + len2, str3); - return newstr; -} - -// Return a lower case string -std::string SystemTools::LowerCase(const std::string& s) -{ - std::string n; - n.resize(s.size()); - for (size_t i = 0; i < s.size(); i++) { - n[i] = static_cast(tolower(s[i])); - } - return n; -} - -// Return a lower case string -std::string SystemTools::UpperCase(const std::string& s) -{ - std::string n; - n.resize(s.size()); - for (size_t i = 0; i < s.size(); i++) { - n[i] = static_cast(toupper(s[i])); - } - return n; -} - -// Count char in string -size_t SystemTools::CountChar(const char* str, char c) -{ - size_t count = 0; - - if (str) { - while (*str) { - if (*str == c) { - ++count; - } - ++str; - } - } - return count; -} - -// Remove chars in string -char* SystemTools::RemoveChars(const char* str, const char* toremove) -{ - if (!str) { - return nullptr; - } - char* clean_str = new char[strlen(str) + 1]; - char* ptr = clean_str; - while (*str) { - const char* str2 = toremove; - while (*str2 && *str != *str2) { - ++str2; - } - if (!*str2) { - *ptr++ = *str; - } - ++str; - } - *ptr = '\0'; - return clean_str; -} - -// Remove chars in string -char* SystemTools::RemoveCharsButUpperHex(const char* str) -{ - if (!str) { - return nullptr; - } - char* clean_str = new char[strlen(str) + 1]; - char* ptr = clean_str; - while (*str) { - if ((*str >= '0' && *str <= '9') || (*str >= 'A' && *str <= 'F')) { - *ptr++ = *str; - } - ++str; - } - *ptr = '\0'; - return clean_str; -} - -// Replace chars in string -char* SystemTools::ReplaceChars(char* str, const char* toreplace, - char replacement) -{ - if (str) { - char* ptr = str; - while (*ptr) { - const char* ptr2 = toreplace; - while (*ptr2) { - if (*ptr == *ptr2) { - *ptr = replacement; - } - ++ptr2; - } - ++ptr; - } - } - return str; -} - -// Returns if string starts with another string -bool SystemTools::StringStartsWith(const char* str1, const char* str2) -{ - if (!str1 || !str2) { - return false; - } - size_t len1 = strlen(str1), len2 = strlen(str2); - return len1 >= len2 && !strncmp(str1, str2, len2) ? true : false; -} - -// Returns if string starts with another string -bool SystemTools::StringStartsWith(const std::string& str1, const char* str2) -{ - if (!str2) { - return false; - } - size_t len1 = str1.size(), len2 = strlen(str2); - return len1 >= len2 && !strncmp(str1.c_str(), str2, len2) ? true : false; -} - -// Returns if string ends with another string -bool SystemTools::StringEndsWith(const char* str1, const char* str2) -{ - if (!str1 || !str2) { - return false; - } - size_t len1 = strlen(str1), len2 = strlen(str2); - return len1 >= len2 && !strncmp(str1 + (len1 - len2), str2, len2) ? true - : false; -} - -// Returns if string ends with another string -bool SystemTools::StringEndsWith(const std::string& str1, const char* str2) -{ - if (!str2) { - return false; - } - size_t len1 = str1.size(), len2 = strlen(str2); - return len1 >= len2 && !strncmp(str1.c_str() + (len1 - len2), str2, len2) - ? true - : false; -} - -// Returns a pointer to the last occurrence of str2 in str1 -const char* SystemTools::FindLastString(const char* str1, const char* str2) -{ - if (!str1 || !str2) { - return nullptr; - } - - size_t len1 = strlen(str1), len2 = strlen(str2); - if (len1 >= len2) { - const char* ptr = str1 + len1 - len2; - do { - if (!strncmp(ptr, str2, len2)) { - return ptr; - } - } while (ptr-- != str1); - } - - return nullptr; -} - -// Duplicate string -char* SystemTools::DuplicateString(const char* str) -{ - if (str) { - char* newstr = new char[strlen(str) + 1]; - return strcpy(newstr, str); - } - return nullptr; -} - -// Return a cropped string -std::string SystemTools::CropString(const std::string& s, size_t max_len) -{ - if (!s.size() || max_len == 0 || max_len >= s.size()) { - return s; - } - - std::string n; - n.reserve(max_len); - - size_t middle = max_len / 2; - - n += s.substr(0, middle); - n += s.substr(s.size() - (max_len - middle)); - - if (max_len > 2) { - n[middle] = '.'; - if (max_len > 3) { - n[middle - 1] = '.'; - if (max_len > 4) { - n[middle + 1] = '.'; - } - } - } - - return n; -} - -std::vector SystemTools::SplitString(const std::string& p, - char sep, bool isPath) -{ - std::string path = p; - std::vector paths; - if (path.empty()) { - return paths; - } - if (isPath && path[0] == '/') { - path.erase(path.begin()); - paths.push_back("/"); - } - std::string::size_type pos1 = 0; - std::string::size_type pos2 = path.find(sep, pos1 + 1); - while (pos2 != std::string::npos) { - paths.push_back(path.substr(pos1, pos2 - pos1)); - pos1 = pos2 + 1; - pos2 = path.find(sep, pos1 + 1); - } - paths.push_back(path.substr(pos1, pos2 - pos1)); - - return paths; -} - -int SystemTools::EstimateFormatLength(const char* format, va_list ap) -{ - if (!format) { - return 0; - } - - // Quick-hack attempt at estimating the length of the string. - // Should never under-estimate. - - // Start with the length of the format string itself. - - size_t length = strlen(format); - - // Increase the length for every argument in the format. - - const char* cur = format; - while (*cur) { - if (*cur++ == '%') { - // Skip "%%" since it doesn't correspond to a va_arg. - if (*cur != '%') { - while (!int(isalpha(*cur))) { - ++cur; - } - switch (*cur) { - case 's': { - // Check the length of the string. - char* s = va_arg(ap, char*); - if (s) { - length += strlen(s); - } - } break; - case 'e': - case 'f': - case 'g': { - // Assume the argument contributes no more than 64 characters. - length += 64; - - // Eat the argument. - static_cast(va_arg(ap, double)); - } break; - default: { - // Assume the argument contributes no more than 64 characters. - length += 64; - - // Eat the argument. - static_cast(va_arg(ap, int)); - } break; - } - } - - // Move past the characters just tested. - ++cur; - } - } - - return static_cast(length); -} - -std::string SystemTools::EscapeChars(const char* str, - const char* chars_to_escape, - char escape_char) -{ - std::string n; - if (str) { - if (!chars_to_escape || !*chars_to_escape) { - n.append(str); - } else { - n.reserve(strlen(str)); - while (*str) { - const char* ptr = chars_to_escape; - while (*ptr) { - if (*str == *ptr) { - n += escape_char; - break; - } - ++ptr; - } - n += *str; - ++str; - } - } - } - return n; -} - -#ifdef __VMS -static void ConvertVMSToUnix(std::string& path) -{ - std::string::size_type rootEnd = path.find(":["); - std::string::size_type pathEnd = path.find("]"); - if (rootEnd != std::string::npos) { - std::string root = path.substr(0, rootEnd); - std::string pathPart = path.substr(rootEnd + 2, pathEnd - rootEnd - 2); - const char* pathCString = pathPart.c_str(); - const char* pos0 = pathCString; - for (std::string::size_type pos = 0; *pos0; ++pos) { - if (*pos0 == '.') { - pathPart[pos] = '/'; - } - pos0++; - } - path = "/" + root + "/" + pathPart; - } -} -#endif - -// convert windows slashes to unix slashes -void SystemTools::ConvertToUnixSlashes(std::string& path) -{ - if (path.empty()) { - return; - } - - const char* pathCString = path.c_str(); - bool hasDoubleSlash = false; -#ifdef __VMS - ConvertVMSToUnix(path); -#else - const char* pos0 = pathCString; - for (std::string::size_type pos = 0; *pos0; ++pos) { - if (*pos0 == '\\') { - path[pos] = '/'; - } - - // Also, reuse the loop to check for slash followed by another slash - if (!hasDoubleSlash && *(pos0 + 1) == '/' && *(pos0 + 2) == '/') { -# ifdef _WIN32 - // However, on windows if the first characters are both slashes, - // then keep them that way, so that network paths can be handled. - if (pos > 0) { - hasDoubleSlash = true; - } -# else - hasDoubleSlash = true; -# endif - } - - pos0++; - } - - if (hasDoubleSlash) { - SystemTools::ReplaceString(path, "//", "/"); - } -#endif - - // remove any trailing slash - // if there is a tilda ~ then replace it with HOME - pathCString = path.c_str(); - if (pathCString[0] == '~' && - (pathCString[1] == '/' || pathCString[1] == '\0')) { - std::string homeEnv; - if (SystemTools::GetEnv("HOME", homeEnv)) { - path.replace(0, 1, homeEnv); - } - } -#ifdef HAVE_GETPWNAM - else if (pathCString[0] == '~') { - std::string::size_type idx = path.find_first_of("/\0"); - std::string user = path.substr(1, idx - 1); - passwd* pw = getpwnam(user.c_str()); - if (pw) { - path.replace(0, idx, pw->pw_dir); - } - } -#endif - // remove trailing slash if the path is more than - // a single / - pathCString = path.c_str(); - size_t size = path.size(); - if (size > 1 && path.back() == '/') { - // if it is c:/ then do not remove the trailing slash - if (!((size == 3 && pathCString[1] == ':'))) { - path.resize(size - 1); - } - } -} - -#ifdef _WIN32 -std::wstring SystemTools::ConvertToWindowsExtendedPath( - const std::string& source) -{ - return Encoding::ToWindowsExtendedPath(source); -} -#endif - -// change // to /, and escape any spaces in the path -std::string SystemTools::ConvertToUnixOutputPath(const std::string& path) -{ - std::string ret = path; - - // remove // except at the beginning might be a cygwin drive - std::string::size_type pos = 1; - while ((pos = ret.find("//", pos)) != std::string::npos) { - ret.erase(pos, 1); - } - // escape spaces and () in the path - if (ret.find_first_of(" ") != std::string::npos) { - std::string result; - char lastch = 1; - for (const char* ch = ret.c_str(); *ch != '\0'; ++ch) { - // if it is already escaped then don't try to escape it again - if ((*ch == ' ') && lastch != '\\') { - result += '\\'; - } - result += *ch; - lastch = *ch; - } - ret = result; - } - return ret; -} - -std::string SystemTools::ConvertToOutputPath(const std::string& path) -{ -#if defined(_WIN32) && !defined(__CYGWIN__) - return SystemTools::ConvertToWindowsOutputPath(path); -#else - return SystemTools::ConvertToUnixOutputPath(path); -#endif -} - -// remove double slashes not at the start -std::string SystemTools::ConvertToWindowsOutputPath(const std::string& path) -{ - std::string ret; - // make it big enough for all of path and double quotes - ret.reserve(path.size() + 3); - // put path into the string - ret = path; - std::string::size_type pos = 0; - // first convert all of the slashes - while ((pos = ret.find('/', pos)) != std::string::npos) { - ret[pos] = '\\'; - pos++; - } - // check for really small paths - if (ret.size() < 2) { - return ret; - } - // now clean up a bit and remove double slashes - // Only if it is not the first position in the path which is a network - // path on windows - pos = 1; // start at position 1 - if (ret[0] == '\"') { - pos = 2; // if the string is already quoted then start at 2 - if (ret.size() < 3) { - return ret; - } - } - while ((pos = ret.find("\\\\", pos)) != std::string::npos) { - ret.erase(pos, 1); - } - // now double quote the path if it has spaces in it - // and is not already double quoted - if (ret.find(' ') != std::string::npos && ret[0] != '\"') { - ret.insert(static_cast(0), - static_cast(1), '\"'); - ret.append(1, '\"'); - } - return ret; -} - -/** - * Append the filename from the path source to the directory name dir. - */ -static std::string FileInDir(const std::string& source, const std::string& dir) -{ - std::string new_destination = dir; - SystemTools::ConvertToUnixSlashes(new_destination); - return new_destination + '/' + SystemTools::GetFilenameName(source); -} - -bool SystemTools::CopyFileIfDifferent(const std::string& source, - const std::string& destination) -{ - // special check for a destination that is a directory - // FilesDiffer does not handle file to directory compare - if (SystemTools::FileIsDirectory(destination)) { - const std::string new_destination = FileInDir(source, destination); - return SystemTools::CopyFileIfDifferent(source, new_destination); - } - // source and destination are files so do a copy if they - // are different - if (SystemTools::FilesDiffer(source, destination)) { - return SystemTools::CopyFileAlways(source, destination); - } - // at this point the files must be the same so return true - return true; -} - -#define KWSYS_ST_BUFFER 4096 - -bool SystemTools::FilesDiffer(const std::string& source, - const std::string& destination) -{ - -#if defined(_WIN32) - WIN32_FILE_ATTRIBUTE_DATA statSource; - if (GetFileAttributesExW(Encoding::ToWindowsExtendedPath(source).c_str(), - GetFileExInfoStandard, &statSource) == 0) { - return true; - } - - WIN32_FILE_ATTRIBUTE_DATA statDestination; - if (GetFileAttributesExW( - Encoding::ToWindowsExtendedPath(destination).c_str(), - GetFileExInfoStandard, &statDestination) == 0) { - return true; - } - - if (statSource.nFileSizeHigh != statDestination.nFileSizeHigh || - statSource.nFileSizeLow != statDestination.nFileSizeLow) { - return true; - } - - if (statSource.nFileSizeHigh == 0 && statSource.nFileSizeLow == 0) { - return false; - } - off_t nleft = - ((__int64)statSource.nFileSizeHigh << 32) + statSource.nFileSizeLow; - -#else - - struct stat statSource; - if (stat(source.c_str(), &statSource) != 0) { - return true; - } - - struct stat statDestination; - if (stat(destination.c_str(), &statDestination) != 0) { - return true; - } - - if (statSource.st_size != statDestination.st_size) { - return true; - } - - if (statSource.st_size == 0) { - return false; - } - off_t nleft = statSource.st_size; -#endif - -#if defined(_WIN32) - kwsys::ifstream finSource(source.c_str(), (std::ios::binary | std::ios::in)); - kwsys::ifstream finDestination(destination.c_str(), - (std::ios::binary | std::ios::in)); -#else - kwsys::ifstream finSource(source.c_str()); - kwsys::ifstream finDestination(destination.c_str()); -#endif - if (!finSource || !finDestination) { - return true; - } - - // Compare the files a block at a time. - char source_buf[KWSYS_ST_BUFFER]; - char dest_buf[KWSYS_ST_BUFFER]; - while (nleft > 0) { - // Read a block from each file. - std::streamsize nnext = (nleft > KWSYS_ST_BUFFER) - ? KWSYS_ST_BUFFER - : static_cast(nleft); - finSource.read(source_buf, nnext); - finDestination.read(dest_buf, nnext); - - // If either failed to read assume they are different. - if (static_cast(finSource.gcount()) != nnext || - static_cast(finDestination.gcount()) != nnext) { - return true; - } - - // If this block differs the file differs. - if (memcmp(static_cast(source_buf), - static_cast(dest_buf), - static_cast(nnext)) != 0) { - return true; - } - - // Update the byte count remaining. - nleft -= nnext; - } - - // No differences found. - return false; -} - -bool SystemTools::TextFilesDiffer(const std::string& path1, - const std::string& path2) -{ - kwsys::ifstream if1(path1.c_str()); - kwsys::ifstream if2(path2.c_str()); - if (!if1 || !if2) { - return true; - } - - for (;;) { - std::string line1, line2; - bool hasData1 = GetLineFromStream(if1, line1); - bool hasData2 = GetLineFromStream(if2, line2); - if (hasData1 != hasData2) { - return true; - } - if (!hasData1) { - break; - } - if (line1 != line2) { - return true; - } - } - return false; -} - -/** - * Blockwise copy source to destination file - */ -static bool CopyFileContentBlockwise(const std::string& source, - const std::string& destination) -{ -// Open files -#if defined(_WIN32) - kwsys::ifstream fin( - Encoding::ToNarrow(Encoding::ToWindowsExtendedPath(source)).c_str(), - std::ios::in | std::ios::binary); -#else - kwsys::ifstream fin(source.c_str(), std::ios::in | std::ios::binary); -#endif - if (!fin) { - return false; - } - - // try and remove the destination file so that read only destination files - // can be written to. - // If the remove fails continue so that files in read only directories - // that do not allow file removal can be modified. - SystemTools::RemoveFile(destination); - -#if defined(_WIN32) - kwsys::ofstream fout( - Encoding::ToNarrow(Encoding::ToWindowsExtendedPath(destination)).c_str(), - std::ios::out | std::ios::trunc | std::ios::binary); -#else - kwsys::ofstream fout(destination.c_str(), - std::ios::out | std::ios::trunc | std::ios::binary); -#endif - if (!fout) { - return false; - } - - // This copy loop is very sensitive on certain platforms with - // slightly broken stream libraries (like HPUX). Normally, it is - // incorrect to not check the error condition on the fin.read() - // before using the data, but the fin.gcount() will be zero if an - // error occurred. Therefore, the loop should be safe everywhere. - while (fin) { - const int bufferSize = 4096; - char buffer[bufferSize]; - - fin.read(buffer, bufferSize); - if (fin.gcount()) { - fout.write(buffer, fin.gcount()); - } else { - break; - } - } - - // Make sure the operating system has finished writing the file - // before closing it. This will ensure the file is finished before - // the check below. - fout.flush(); - - fin.close(); - fout.close(); - - if (!fout) { - return false; - } - - return true; -} - -/** - * Clone the source file to the destination file - * - * If available, the Linux FICLONE ioctl is used to create a check - * copy-on-write clone of the source file. - * - * The method returns false for the following cases: - * - The code has not been compiled on Linux or the ioctl was unknown - * - The source and destination is on different file systems - * - The underlying filesystem does not support file cloning - * - An unspecified error occurred - */ -static bool CloneFileContent(const std::string& source, - const std::string& destination) -{ -#if defined(__linux) && defined(FICLONE) - int in = open(source.c_str(), O_RDONLY); - if (in < 0) { - return false; - } - - SystemTools::RemoveFile(destination); - - int out = - open(destination.c_str(), O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR); - if (out < 0) { - close(in); - return false; - } - - int result = ioctl(out, FICLONE, in); - close(in); - close(out); - - if (result < 0) { - return false; - } - - return true; -#else - (void)source; - (void)destination; - return false; -#endif -} - -/** - * Copy a file named by "source" to the file named by "destination". - */ -bool SystemTools::CopyFileAlways(const std::string& source, - const std::string& destination) -{ - mode_t perm = 0; - bool perms = SystemTools::GetPermissions(source, perm); - std::string real_destination = destination; - - if (SystemTools::FileIsDirectory(source)) { - SystemTools::MakeDirectory(destination); - } else { - // If destination is a directory, try to create a file with the same - // name as the source in that directory. - - std::string destination_dir; - if (SystemTools::FileIsDirectory(destination)) { - destination_dir = real_destination; - SystemTools::ConvertToUnixSlashes(real_destination); - real_destination += '/'; - std::string source_name = source; - real_destination += SystemTools::GetFilenameName(source_name); - } else { - destination_dir = SystemTools::GetFilenamePath(destination); - } - // If files are the same do not copy - if (SystemTools::SameFile(source, real_destination)) { - return true; - } - - // Create destination directory - - SystemTools::MakeDirectory(destination_dir); - - if (!CloneFileContent(source, real_destination)) { - // if cloning did not succeed, fall back to blockwise copy - if (!CopyFileContentBlockwise(source, real_destination)) { - return false; - } - } - } - if (perms) { - if (!SystemTools::SetPermissions(real_destination, perm)) { - return false; - } - } - return true; -} - -bool SystemTools::CopyAFile(const std::string& source, - const std::string& destination, bool always) -{ - if (always) { - return SystemTools::CopyFileAlways(source, destination); - } else { - return SystemTools::CopyFileIfDifferent(source, destination); - } -} - -/** - * Copy a directory content from "source" directory to the directory named by - * "destination". - */ -bool SystemTools::CopyADirectory(const std::string& source, - const std::string& destination, bool always) -{ - Directory dir; - if (dir.Load(source) == 0) { - return false; - } - size_t fileNum; - if (!SystemTools::MakeDirectory(destination)) { - return false; - } - for (fileNum = 0; fileNum < dir.GetNumberOfFiles(); ++fileNum) { - if (strcmp(dir.GetFile(static_cast(fileNum)), ".") && - strcmp(dir.GetFile(static_cast(fileNum)), "..")) { - std::string fullPath = source; - fullPath += "/"; - fullPath += dir.GetFile(static_cast(fileNum)); - if (SystemTools::FileIsDirectory(fullPath)) { - std::string fullDestPath = destination; - fullDestPath += "/"; - fullDestPath += dir.GetFile(static_cast(fileNum)); - if (!SystemTools::CopyADirectory(fullPath, fullDestPath, always)) { - return false; - } - } else { - if (!SystemTools::CopyAFile(fullPath, destination, always)) { - return false; - } - } - } - } - - return true; -} - -// return size of file; also returns zero if no file exists -unsigned long SystemTools::FileLength(const std::string& filename) -{ - unsigned long length = 0; -#ifdef _WIN32 - WIN32_FILE_ATTRIBUTE_DATA fs; - if (GetFileAttributesExW(Encoding::ToWindowsExtendedPath(filename).c_str(), - GetFileExInfoStandard, &fs) != 0) { - /* To support the full 64-bit file size, use fs.nFileSizeHigh - * and fs.nFileSizeLow to construct the 64 bit size - - length = ((__int64)fs.nFileSizeHigh << 32) + fs.nFileSizeLow; - */ - length = static_cast(fs.nFileSizeLow); - } -#else - struct stat fs; - if (stat(filename.c_str(), &fs) == 0) { - length = static_cast(fs.st_size); - } -#endif - return length; -} - -int SystemTools::Strucmp(const char* l, const char* r) -{ - int lc; - int rc; - do { - lc = tolower(*l++); - rc = tolower(*r++); - } while (lc == rc && lc); - return lc - rc; -} - -// return file's modified time -long int SystemTools::ModifiedTime(const std::string& filename) -{ - long int mt = 0; -#ifdef _WIN32 - WIN32_FILE_ATTRIBUTE_DATA fs; - if (GetFileAttributesExW(Encoding::ToWindowsExtendedPath(filename).c_str(), - GetFileExInfoStandard, &fs) != 0) { - mt = windows_filetime_to_posix_time(fs.ftLastWriteTime); - } -#else - struct stat fs; - if (stat(filename.c_str(), &fs) == 0) { - mt = static_cast(fs.st_mtime); - } -#endif - return mt; -} - -// return file's creation time -long int SystemTools::CreationTime(const std::string& filename) -{ - long int ct = 0; -#ifdef _WIN32 - WIN32_FILE_ATTRIBUTE_DATA fs; - if (GetFileAttributesExW(Encoding::ToWindowsExtendedPath(filename).c_str(), - GetFileExInfoStandard, &fs) != 0) { - ct = windows_filetime_to_posix_time(fs.ftCreationTime); - } -#else - struct stat fs; - if (stat(filename.c_str(), &fs) == 0) { - ct = fs.st_ctime >= 0 ? static_cast(fs.st_ctime) : 0; - } -#endif - return ct; -} - -std::string SystemTools::GetLastSystemError() -{ - int e = errno; - return strerror(e); -} - -bool SystemTools::RemoveFile(const std::string& source) -{ -#ifdef _WIN32 - std::wstring const& ws = Encoding::ToWindowsExtendedPath(source); - if (DeleteFileW(ws.c_str())) { - return true; - } - DWORD err = GetLastError(); - if (err == ERROR_FILE_NOT_FOUND || err == ERROR_PATH_NOT_FOUND) { - return true; - } - if (err != ERROR_ACCESS_DENIED) { - return false; - } - /* The file may be read-only. Try adding write permission. */ - mode_t mode; - if (!SystemTools::GetPermissions(source, mode) || - !SystemTools::SetPermissions(source, S_IWRITE)) { - SetLastError(err); - return false; - } - - const DWORD DIRECTORY_SOFT_LINK_ATTRS = - FILE_ATTRIBUTE_DIRECTORY | FILE_ATTRIBUTE_REPARSE_POINT; - DWORD attrs = GetFileAttributesW(ws.c_str()); - if (attrs != INVALID_FILE_ATTRIBUTES && - (attrs & DIRECTORY_SOFT_LINK_ATTRS) == DIRECTORY_SOFT_LINK_ATTRS && - RemoveDirectoryW(ws.c_str())) { - return true; - } - if (DeleteFileW(ws.c_str()) || GetLastError() == ERROR_FILE_NOT_FOUND || - GetLastError() == ERROR_PATH_NOT_FOUND) { - return true; - } - /* Try to restore the original permissions. */ - SystemTools::SetPermissions(source, mode); - SetLastError(err); - return false; -#else - return unlink(source.c_str()) == 0 || errno == ENOENT; -#endif -} - -bool SystemTools::RemoveADirectory(const std::string& source) -{ - // Add write permission to the directory so we can modify its - // content to remove files and directories from it. - mode_t mode; - if (SystemTools::GetPermissions(source, mode)) { -#if defined(_WIN32) && !defined(__CYGWIN__) - mode |= S_IWRITE; -#else - mode |= S_IWUSR; -#endif - SystemTools::SetPermissions(source, mode); - } - - Directory dir; - dir.Load(source); - size_t fileNum; - for (fileNum = 0; fileNum < dir.GetNumberOfFiles(); ++fileNum) { - if (strcmp(dir.GetFile(static_cast(fileNum)), ".") && - strcmp(dir.GetFile(static_cast(fileNum)), "..")) { - std::string fullPath = source; - fullPath += "/"; - fullPath += dir.GetFile(static_cast(fileNum)); - if (SystemTools::FileIsDirectory(fullPath) && - !SystemTools::FileIsSymlink(fullPath)) { - if (!SystemTools::RemoveADirectory(fullPath)) { - return false; - } - } else { - if (!SystemTools::RemoveFile(fullPath)) { - return false; - } - } - } - } - - return (Rmdir(source) == 0); -} - -/** - */ -size_t SystemTools::GetMaximumFilePathLength() -{ - return KWSYS_SYSTEMTOOLS_MAXPATH; -} - -/** - * Find the file the given name. Searches the given path and then - * the system search path. Returns the full path to the file if it is - * found. Otherwise, the empty string is returned. - */ -std::string SystemToolsStatic::FindName( - const std::string& name, const std::vector& userPaths, - bool no_system_path) -{ - // Add the system search path to our path first - std::vector path; - if (!no_system_path) { - SystemTools::GetPath(path, "CMAKE_FILE_PATH"); - SystemTools::GetPath(path); - } - // now add the additional paths - path.reserve(path.size() + userPaths.size()); - path.insert(path.end(), userPaths.begin(), userPaths.end()); - // now look for the file - std::string tryPath; - for (std::string const& p : path) { - tryPath = p; - if (tryPath.empty() || tryPath.back() != '/') { - tryPath += '/'; - } - tryPath += name; - if (SystemTools::FileExists(tryPath)) { - return tryPath; - } - } - // Couldn't find the file. - return ""; -} - -/** - * Find the file the given name. Searches the given path and then - * the system search path. Returns the full path to the file if it is - * found. Otherwise, the empty string is returned. - */ -std::string SystemTools::FindFile(const std::string& name, - const std::vector& userPaths, - bool no_system_path) -{ - std::string tryPath = - SystemToolsStatic::FindName(name, userPaths, no_system_path); - if (!tryPath.empty() && !SystemTools::FileIsDirectory(tryPath)) { - return SystemTools::CollapseFullPath(tryPath); - } - // Couldn't find the file. - return ""; -} - -/** - * Find the directory the given name. Searches the given path and then - * the system search path. Returns the full path to the directory if it is - * found. Otherwise, the empty string is returned. - */ -std::string SystemTools::FindDirectory( - const std::string& name, const std::vector& userPaths, - bool no_system_path) -{ - std::string tryPath = - SystemToolsStatic::FindName(name, userPaths, no_system_path); - if (!tryPath.empty() && SystemTools::FileIsDirectory(tryPath)) { - return SystemTools::CollapseFullPath(tryPath); - } - // Couldn't find the file. - return ""; -} - -/** - * Find the executable with the given name. Searches the given path and then - * the system search path. Returns the full path to the executable if it is - * found. Otherwise, the empty string is returned. - */ -std::string SystemTools::FindProgram(const char* nameIn, - const std::vector& userPaths, - bool no_system_path) -{ - if (!nameIn || !*nameIn) { - return ""; - } - return SystemTools::FindProgram(std::string(nameIn), userPaths, - no_system_path); -} - -std::string SystemTools::FindProgram(const std::string& name, - const std::vector& userPaths, - bool no_system_path) -{ - std::string tryPath; - -#if defined(_WIN32) || defined(__CYGWIN__) || defined(__MINGW32__) - std::vector extensions; - // check to see if the name already has a .xxx at - // the end of it - // on windows try .com then .exe - if (name.size() <= 3 || name[name.size() - 4] != '.') { - extensions.emplace_back(".com"); - extensions.emplace_back(".exe"); - - // first try with extensions if the os supports them - for (std::string const& ext : extensions) { - tryPath = name; - tryPath += ext; - if (SystemTools::FileExists(tryPath, true)) { - return SystemTools::CollapseFullPath(tryPath); - } - } - } -#endif - - // now try just the name - if (SystemTools::FileExists(name, true)) { - return SystemTools::CollapseFullPath(name); - } - // now construct the path - std::vector path; - // Add the system search path to our path. - if (!no_system_path) { - SystemTools::GetPath(path); - } - // now add the additional paths - path.reserve(path.size() + userPaths.size()); - path.insert(path.end(), userPaths.begin(), userPaths.end()); - // Add a trailing slash to all paths to aid the search process. - for (std::string& p : path) { - if (p.empty() || p.back() != '/') { - p += '/'; - } - } - // Try each path - for (std::string& p : path) { -#ifdef _WIN32 - // Remove double quotes from the path on windows - SystemTools::ReplaceString(p, "\"", ""); -#endif -#if defined(_WIN32) || defined(__CYGWIN__) || defined(__MINGW32__) - // first try with extensions - for (std::string const& ext : extensions) { - tryPath = p; - tryPath += name; - tryPath += ext; - if (SystemTools::FileExists(tryPath, true)) { - return SystemTools::CollapseFullPath(tryPath); - } - } -#endif - // now try it without them - tryPath = p; - tryPath += name; - if (SystemTools::FileExists(tryPath, true)) { - return SystemTools::CollapseFullPath(tryPath); - } - } - // Couldn't find the program. - return ""; -} - -std::string SystemTools::FindProgram(const std::vector& names, - const std::vector& path, - bool noSystemPath) -{ - for (std::string const& name : names) { - // Try to find the program. - std::string result = SystemTools::FindProgram(name, path, noSystemPath); - if (!result.empty()) { - return result; - } - } - return ""; -} - -/** - * Find the library with the given name. Searches the given path and then - * the system search path. Returns the full path to the library if it is - * found. Otherwise, the empty string is returned. - */ -std::string SystemTools::FindLibrary(const std::string& name, - const std::vector& userPaths) -{ - // See if the executable exists as written. - if (SystemTools::FileExists(name, true)) { - return SystemTools::CollapseFullPath(name); - } - - // Add the system search path to our path. - std::vector path; - SystemTools::GetPath(path); - // now add the additional paths - path.reserve(path.size() + userPaths.size()); - path.insert(path.end(), userPaths.begin(), userPaths.end()); - // Add a trailing slash to all paths to aid the search process. - for (std::string& p : path) { - if (p.empty() || p.back() != '/') { - p += '/'; - } - } - std::string tryPath; - for (std::string const& p : path) { -#if defined(__APPLE__) - tryPath = p; - tryPath += name; - tryPath += ".framework"; - if (SystemTools::FileIsDirectory(tryPath)) { - return SystemTools::CollapseFullPath(tryPath); - } -#endif -#if defined(_WIN32) && !defined(__CYGWIN__) && !defined(__MINGW32__) - tryPath = p; - tryPath += name; - tryPath += ".lib"; - if (SystemTools::FileExists(tryPath, true)) { - return SystemTools::CollapseFullPath(tryPath); - } -#else - tryPath = p; - tryPath += "lib"; - tryPath += name; - tryPath += ".so"; - if (SystemTools::FileExists(tryPath, true)) { - return SystemTools::CollapseFullPath(tryPath); - } - tryPath = p; - tryPath += "lib"; - tryPath += name; - tryPath += ".a"; - if (SystemTools::FileExists(tryPath, true)) { - return SystemTools::CollapseFullPath(tryPath); - } - tryPath = p; - tryPath += "lib"; - tryPath += name; - tryPath += ".sl"; - if (SystemTools::FileExists(tryPath, true)) { - return SystemTools::CollapseFullPath(tryPath); - } - tryPath = p; - tryPath += "lib"; - tryPath += name; - tryPath += ".dylib"; - if (SystemTools::FileExists(tryPath, true)) { - return SystemTools::CollapseFullPath(tryPath); - } - tryPath = p; - tryPath += "lib"; - tryPath += name; - tryPath += ".dll"; - if (SystemTools::FileExists(tryPath, true)) { - return SystemTools::CollapseFullPath(tryPath); - } -#endif - } - - // Couldn't find the library. - return ""; -} - -std::string SystemTools::GetRealPath(const std::string& path, - std::string* errorMessage) -{ - std::string ret; - Realpath(path, ret, errorMessage); - return ret; -} - -bool SystemTools::FileIsDirectory(const std::string& inName) -{ - if (inName.empty()) { - return false; - } - size_t length = inName.size(); - const char* name = inName.c_str(); - - // Remove any trailing slash from the name except in a root component. - char local_buffer[KWSYS_SYSTEMTOOLS_MAXPATH]; - std::string string_buffer; - size_t last = length - 1; - if (last > 0 && (name[last] == '/' || name[last] == '\\') && - strcmp(name, "/") != 0 && name[last - 1] != ':') { - if (last < sizeof(local_buffer)) { - memcpy(local_buffer, name, last); - local_buffer[last] = '\0'; - name = local_buffer; - } else { - string_buffer.append(name, last); - name = string_buffer.c_str(); - } - } - -// Now check the file node type. -#if defined(_WIN32) - DWORD attr = - GetFileAttributesW(Encoding::ToWindowsExtendedPath(name).c_str()); - if (attr != INVALID_FILE_ATTRIBUTES) { - return (attr & FILE_ATTRIBUTE_DIRECTORY) != 0; -#else - struct stat fs; - if (stat(name, &fs) == 0) { - return S_ISDIR(fs.st_mode); -#endif - } else { - return false; - } -} - -bool SystemTools::FileIsSymlink(const std::string& name) -{ -#if defined(_WIN32) - std::wstring path = Encoding::ToWindowsExtendedPath(name); - DWORD attr = GetFileAttributesW(path.c_str()); - if (attr != INVALID_FILE_ATTRIBUTES) { - if ((attr & FILE_ATTRIBUTE_REPARSE_POINT) != 0) { - // FILE_ATTRIBUTE_REPARSE_POINT means: - // * a file or directory that has an associated reparse point, or - // * a file that is a symbolic link. - HANDLE hFile = CreateFileW( - path.c_str(), GENERIC_READ, FILE_SHARE_READ, nullptr, OPEN_EXISTING, - FILE_FLAG_OPEN_REPARSE_POINT | FILE_FLAG_BACKUP_SEMANTICS, nullptr); - if (hFile == INVALID_HANDLE_VALUE) { - return false; - } - byte buffer[MAXIMUM_REPARSE_DATA_BUFFER_SIZE]; - DWORD bytesReturned = 0; - if (!DeviceIoControl(hFile, FSCTL_GET_REPARSE_POINT, nullptr, 0, buffer, - MAXIMUM_REPARSE_DATA_BUFFER_SIZE, &bytesReturned, - nullptr)) { - CloseHandle(hFile); - // Since FILE_ATTRIBUTE_REPARSE_POINT is set this file must be - // a symbolic link if it is not a reparse point. - return GetLastError() == ERROR_NOT_A_REPARSE_POINT; - } - CloseHandle(hFile); - ULONG reparseTag = - reinterpret_cast(&buffer[0])->ReparseTag; - return (reparseTag == IO_REPARSE_TAG_SYMLINK) || - (reparseTag == IO_REPARSE_TAG_MOUNT_POINT); - } - return false; - } else { - return false; - } -#else - struct stat fs; - if (lstat(name.c_str(), &fs) == 0) { - return S_ISLNK(fs.st_mode); - } else { - return false; - } -#endif -} - -bool SystemTools::FileIsFIFO(const std::string& name) -{ -#if defined(_WIN32) - HANDLE hFile = - CreateFileW(Encoding::ToWide(name).c_str(), GENERIC_READ, FILE_SHARE_READ, - nullptr, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, nullptr); - if (hFile == INVALID_HANDLE_VALUE) { - return false; - } - const DWORD type = GetFileType(hFile); - CloseHandle(hFile); - return type == FILE_TYPE_PIPE; -#else - struct stat fs; - if (lstat(name.c_str(), &fs) == 0) { - return S_ISFIFO(fs.st_mode); - } else { - return false; - } -#endif -} - -#if defined(_WIN32) && !defined(__CYGWIN__) -bool SystemTools::CreateSymlink(const std::string&, const std::string&) -{ - return false; -} -#else -bool SystemTools::CreateSymlink(const std::string& origName, - const std::string& newName) -{ - return symlink(origName.c_str(), newName.c_str()) >= 0; -} -#endif - -#if defined(_WIN32) && !defined(__CYGWIN__) -bool SystemTools::ReadSymlink(const std::string&, std::string&) -{ - return false; -} -#else -bool SystemTools::ReadSymlink(const std::string& newName, - std::string& origName) -{ - char buf[KWSYS_SYSTEMTOOLS_MAXPATH + 1]; - int count = static_cast( - readlink(newName.c_str(), buf, KWSYS_SYSTEMTOOLS_MAXPATH)); - if (count >= 0) { - // Add null-terminator. - buf[count] = 0; - origName = buf; - return true; - } else { - return false; - } -} -#endif - -int SystemTools::ChangeDirectory(const std::string& dir) -{ - return Chdir(dir); -} - -std::string SystemTools::GetCurrentWorkingDirectory(bool collapse) -{ - char buf[2048]; - const char* cwd = Getcwd(buf, 2048); - std::string path; - if (cwd) { - path = cwd; - } - if (collapse) { - return SystemTools::CollapseFullPath(path); - } - return path; -} - -std::string SystemTools::GetProgramPath(const std::string& in_name) -{ - std::string dir, file; - SystemTools::SplitProgramPath(in_name, dir, file); - return dir; -} - -bool SystemTools::SplitProgramPath(const std::string& in_name, - std::string& dir, std::string& file, bool) -{ - dir = in_name; - file = ""; - SystemTools::ConvertToUnixSlashes(dir); - - if (!SystemTools::FileIsDirectory(dir)) { - std::string::size_type slashPos = dir.rfind("/"); - if (slashPos != std::string::npos) { - file = dir.substr(slashPos + 1); - dir = dir.substr(0, slashPos); - } else { - file = dir; - dir = ""; - } - } - if (!(dir.empty()) && !SystemTools::FileIsDirectory(dir)) { - std::string oldDir = in_name; - SystemTools::ConvertToUnixSlashes(oldDir); - dir = in_name; - return false; - } - return true; -} - -bool SystemTools::FindProgramPath(const char* argv0, std::string& pathOut, - std::string& errorMsg, const char* exeName, - const char* buildDir, - const char* installPrefix) -{ - std::vector failures; - std::string self = argv0 ? argv0 : ""; - failures.push_back(self); - SystemTools::ConvertToUnixSlashes(self); - self = SystemTools::FindProgram(self); - if (!SystemTools::FileExists(self)) { - if (buildDir) { - std::string intdir = "."; -#ifdef CMAKE_INTDIR - intdir = CMAKE_INTDIR; -#endif - self = buildDir; - self += "/bin/"; - self += intdir; - self += "/"; - self += exeName; - self += SystemTools::GetExecutableExtension(); - } - } - if (installPrefix) { - if (!SystemTools::FileExists(self)) { - failures.push_back(self); - self = installPrefix; - self += "/bin/"; - self += exeName; - } - } - if (!SystemTools::FileExists(self)) { - failures.push_back(self); - std::ostringstream msg; - msg << "Can not find the command line program "; - if (exeName) { - msg << exeName; - } - msg << "\n"; - if (argv0) { - msg << " argv[0] = \"" << argv0 << "\"\n"; - } - msg << " Attempted paths:\n"; - for (std::string const& ff : failures) { - msg << " \"" << ff << "\"\n"; - } - errorMsg = msg.str(); - return false; - } - pathOut = self; - return true; -} - -std::string SystemTools::CollapseFullPath(const std::string& in_relative) -{ - return SystemTools::CollapseFullPath(in_relative, nullptr); -} - -#if KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP -void SystemTools::AddTranslationPath(const std::string& a, - const std::string& b) -{ - std::string path_a = a; - std::string path_b = b; - SystemTools::ConvertToUnixSlashes(path_a); - SystemTools::ConvertToUnixSlashes(path_b); - // First check this is a directory path, since we don't want the table to - // grow too fat - if (SystemTools::FileIsDirectory(path_a)) { - // Make sure the path is a full path and does not contain no '..' - // Ken--the following code is incorrect. .. can be in a valid path - // for example /home/martink/MyHubba...Hubba/Src - if (SystemTools::FileIsFullPath(path_b) && - path_b.find("..") == std::string::npos) { - // Before inserting make sure path ends with '/' - if (!path_a.empty() && path_a.back() != '/') { - path_a += '/'; - } - if (!path_b.empty() && path_b.back() != '/') { - path_b += '/'; - } - if (!(path_a == path_b)) { - SystemTools::Statics->TranslationMap.insert( - SystemToolsStatic::StringMap::value_type(std::move(path_a), - std::move(path_b))); - } - } - } -} - -void SystemTools::AddKeepPath(const std::string& dir) -{ - std::string cdir; - Realpath(SystemTools::CollapseFullPath(dir), cdir); - SystemTools::AddTranslationPath(cdir, dir); -} - -void SystemTools::CheckTranslationPath(std::string& path) -{ - // Do not translate paths that are too short to have meaningful - // translations. - if (path.size() < 2) { - return; - } - - // Always add a trailing slash before translation. It does not - // matter if this adds an extra slash, but we do not want to - // translate part of a directory (like the foo part of foo-dir). - path += '/'; - - // In case a file was specified we still have to go through this: - // Now convert any path found in the table back to the one desired: - for (auto const& pair : SystemTools::Statics->TranslationMap) { - // We need to check of the path is a substring of the other path - if (path.find(pair.first) == 0) { - path = path.replace(0, pair.first.size(), pair.second); - } - } - - // Remove the trailing slash we added before. - path.pop_back(); -} -#endif - -static void SystemToolsAppendComponents( - std::vector& out_components, - std::vector::iterator first, - std::vector::iterator last) -{ - static const std::string up = ".."; - static const std::string cur = "."; - for (std::vector::const_iterator i = first; i != last; ++i) { - if (*i == up) { - // Remove the previous component if possible. Ignore ../ components - // that try to go above the root. Keep ../ components if they are - // at the beginning of a relative path (base path is relative). - if (out_components.size() > 1 && out_components.back() != up) { - out_components.resize(out_components.size() - 1); - } else if (!out_components.empty() && out_components[0].empty()) { - out_components.emplace_back(std::move(*i)); - } - } else if (!i->empty() && *i != cur) { - out_components.emplace_back(std::move(*i)); - } - } -} - -std::string SystemTools::CollapseFullPath(const std::string& in_path, - const char* in_base) -{ - // Use the current working directory as a base path. - char buf[2048]; - const char* res_in_base = in_base; - if (!res_in_base) { - if (const char* cwd = Getcwd(buf, 2048)) { - res_in_base = cwd; - } else { - res_in_base = ""; - } - } - - return SystemTools::CollapseFullPath(in_path, std::string(res_in_base)); -} - -std::string SystemTools::CollapseFullPath(const std::string& in_path, - const std::string& in_base) -{ - // Collect the output path components. - std::vector out_components; - - // Split the input path components. - std::vector path_components; - SystemTools::SplitPath(in_path, path_components); - out_components.reserve(path_components.size()); - - // If the input path is relative, start with a base path. - if (path_components[0].empty()) { - std::vector base_components; - // Use the given base path. - SystemTools::SplitPath(in_base, base_components); - - // Append base path components to the output path. - out_components.push_back(base_components[0]); - SystemToolsAppendComponents(out_components, base_components.begin() + 1, - base_components.end()); - } - - // Append input path components to the output path. - SystemToolsAppendComponents(out_components, path_components.begin(), - path_components.end()); - - // Transform the path back to a string. - std::string newPath = SystemTools::JoinPath(out_components); - -#if KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP - // Update the translation table with this potentially new path. I am not - // sure why this line is here, it seems really questionable, but yet I - // would put good money that if I remove it something will break, basically - // from what I can see it created a mapping from the collapsed path, to be - // replaced by the input path, which almost completely does the opposite of - // this function, the only thing preventing this from happening a lot is - // that if the in_path has a .. in it, then it is not added to the - // translation table. So for most calls this either does nothing due to the - // .. or it adds a translation between identical paths as nothing was - // collapsed, so I am going to try to comment it out, and see what hits the - // fan, hopefully quickly. - // Commented out line below: - // SystemTools::AddTranslationPath(newPath, in_path); - - SystemTools::CheckTranslationPath(newPath); -#endif -#ifdef _WIN32 - newPath = SystemTools::Statics->GetActualCaseForPathCached(newPath); - SystemTools::ConvertToUnixSlashes(newPath); -#endif - // Return the reconstructed path. - return newPath; -} - -// compute the relative path from here to there -std::string SystemTools::RelativePath(const std::string& local, - const std::string& remote) -{ - if (!SystemTools::FileIsFullPath(local)) { - return ""; - } - if (!SystemTools::FileIsFullPath(remote)) { - return ""; - } - - std::string l = SystemTools::CollapseFullPath(local); - std::string r = SystemTools::CollapseFullPath(remote); - - // split up both paths into arrays of strings using / as a separator - std::vector localSplit = SystemTools::SplitString(l, '/', true); - std::vector remoteSplit = - SystemTools::SplitString(r, '/', true); - std::vector - commonPath; // store shared parts of path in this array - std::vector finalPath; // store the final relative path here - // count up how many matching directory names there are from the start - unsigned int sameCount = 0; - while (((sameCount <= (localSplit.size() - 1)) && - (sameCount <= (remoteSplit.size() - 1))) && -// for Windows and Apple do a case insensitive string compare -#if defined(_WIN32) || defined(__APPLE__) - SystemTools::Strucmp(localSplit[sameCount].c_str(), - remoteSplit[sameCount].c_str()) == 0 -#else - localSplit[sameCount] == remoteSplit[sameCount] -#endif - ) { - // put the common parts of the path into the commonPath array - commonPath.push_back(localSplit[sameCount]); - // erase the common parts of the path from the original path arrays - localSplit[sameCount] = ""; - remoteSplit[sameCount] = ""; - sameCount++; - } - - // If there is nothing in common at all then just return the full - // path. This is the case only on windows when the paths have - // different drive letters. On unix two full paths always at least - // have the root "/" in common so we will return a relative path - // that passes through the root directory. - if (sameCount == 0) { - return remote; - } - - // for each entry that is not common in the local path - // add a ../ to the finalpath array, this gets us out of the local - // path into the remote dir - for (std::string const& lp : localSplit) { - if (!lp.empty()) { - finalPath.emplace_back("../"); - } - } - // for each entry that is not common in the remote path add it - // to the final path. - for (std::string const& rp : remoteSplit) { - if (!rp.empty()) { - finalPath.push_back(rp); - } - } - std::string relativePath; // result string - // now turn the array of directories into a unix path by puttint / - // between each entry that does not already have one - for (std::string const& fp : finalPath) { - if (!relativePath.empty() && relativePath.back() != '/') { - relativePath += '/'; - } - relativePath += fp; - } - return relativePath; -} - -std::string SystemTools::GetActualCaseForPath(const std::string& p) -{ -#ifdef _WIN32 - return SystemToolsStatic::GetCasePathName(p); -#else - return p; -#endif -} - -const char* SystemTools::SplitPathRootComponent(const std::string& p, - std::string* root) -{ - // Identify the root component. - const char* c = p.c_str(); - if ((c[0] == '/' && c[1] == '/') || (c[0] == '\\' && c[1] == '\\')) { - // Network path. - if (root) { - *root = "//"; - } - c += 2; - } else if (c[0] == '/' || c[0] == '\\') { - // Unix path (or Windows path w/out drive letter). - if (root) { - *root = "/"; - } - c += 1; - } else if (c[0] && c[1] == ':' && (c[2] == '/' || c[2] == '\\')) { - // Windows path. - if (root) { - (*root) = "_:/"; - (*root)[0] = c[0]; - } - c += 3; - } else if (c[0] && c[1] == ':') { - // Path relative to a windows drive working directory. - if (root) { - (*root) = "_:"; - (*root)[0] = c[0]; - } - c += 2; - } else if (c[0] == '~') { - // Home directory. The returned root should always have a - // trailing slash so that appending components as - // c[0]c[1]/c[2]/... works. The remaining path returned should - // skip the first slash if it exists: - // - // "~" : root = "~/" , return "" - // "~/ : root = "~/" , return "" - // "~/x : root = "~/" , return "x" - // "~u" : root = "~u/", return "" - // "~u/" : root = "~u/", return "" - // "~u/x" : root = "~u/", return "x" - size_t n = 1; - while (c[n] && c[n] != '/') { - ++n; - } - if (root) { - root->assign(c, n); - *root += '/'; - } - if (c[n] == '/') { - ++n; - } - c += n; - } else { - // Relative path. - if (root) { - *root = ""; - } - } - - // Return the remaining path. - return c; -} - -void SystemTools::SplitPath(const std::string& p, - std::vector& components, - bool expand_home_dir) -{ - const char* c; - components.clear(); - - // Identify the root component. - { - std::string root; - c = SystemTools::SplitPathRootComponent(p, &root); - - // Expand home directory references if requested. - if (expand_home_dir && !root.empty() && root[0] == '~') { - std::string homedir; - root = root.substr(0, root.size() - 1); - if (root.size() == 1) { -#if defined(_WIN32) && !defined(__CYGWIN__) - if (!SystemTools::GetEnv("USERPROFILE", homedir)) -#endif - SystemTools::GetEnv("HOME", homedir); - } -#ifdef HAVE_GETPWNAM - else if (passwd* pw = getpwnam(root.c_str() + 1)) { - if (pw->pw_dir) { - homedir = pw->pw_dir; - } - } -#endif - if (!homedir.empty() && - (homedir.back() == '/' || homedir.back() == '\\')) { - homedir.resize(homedir.size() - 1); - } - SystemTools::SplitPath(homedir, components); - } else { - components.push_back(root); - } - } - - // Parse the remaining components. - const char* first = c; - const char* last = first; - for (; *last; ++last) { - if (*last == '/' || *last == '\\') { - // End of a component. Save it. - components.push_back(std::string(first, last)); - first = last + 1; - } - } - - // Save the last component unless there were no components. - if (last != c) { - components.push_back(std::string(first, last)); - } -} - -std::string SystemTools::JoinPath(const std::vector& components) -{ - return SystemTools::JoinPath(components.begin(), components.end()); -} - -std::string SystemTools::JoinPath( - std::vector::const_iterator first, - std::vector::const_iterator last) -{ - // Construct result in a single string. - std::string result; - size_t len = 0; - for (std::vector::const_iterator i = first; i != last; ++i) { - len += 1 + i->size(); - } - result.reserve(len); - - // The first two components do not add a slash. - if (first != last) { - result.append(*first++); - } - if (first != last) { - result.append(*first++); - } - - // All remaining components are always separated with a slash. - while (first != last) { - result.push_back('/'); - result.append((*first++)); - } - - // Return the concatenated result. - return result; -} - -bool SystemTools::ComparePath(const std::string& c1, const std::string& c2) -{ -#if defined(_WIN32) || defined(__APPLE__) -# ifdef _MSC_VER - return _stricmp(c1.c_str(), c2.c_str()) == 0; -# elif defined(__APPLE__) || defined(__GNUC__) - return strcasecmp(c1.c_str(), c2.c_str()) == 0; -# else - return SystemTools::Strucmp(c1.c_str(), c2.c_str()) == 0; -# endif -#else - return c1 == c2; -#endif -} - -bool SystemTools::Split(const std::string& str, - std::vector& lines, char separator) -{ - std::string data(str); - std::string::size_type lpos = 0; - while (lpos < data.length()) { - std::string::size_type rpos = data.find_first_of(separator, lpos); - if (rpos == std::string::npos) { - // String ends at end of string without a separator. - lines.push_back(data.substr(lpos)); - return false; - } else { - // String ends in a separator, remove the character. - lines.push_back(data.substr(lpos, rpos - lpos)); - } - lpos = rpos + 1; - } - return true; -} - -bool SystemTools::Split(const std::string& str, - std::vector& lines) -{ - std::string data(str); - std::string::size_type lpos = 0; - while (lpos < data.length()) { - std::string::size_type rpos = data.find_first_of('\n', lpos); - if (rpos == std::string::npos) { - // Line ends at end of string without a newline. - lines.push_back(data.substr(lpos)); - return false; - } - if ((rpos > lpos) && (data[rpos - 1] == '\r')) { - // Line ends in a "\r\n" pair, remove both characters. - lines.push_back(data.substr(lpos, (rpos - 1) - lpos)); - } else { - // Line ends in a "\n", remove the character. - lines.push_back(data.substr(lpos, rpos - lpos)); - } - lpos = rpos + 1; - } - return true; -} - -/** - * Return path of a full filename (no trailing slashes). - * Warning: returned path is converted to Unix slashes format. - */ -std::string SystemTools::GetFilenamePath(const std::string& filename) -{ - std::string fn = filename; - SystemTools::ConvertToUnixSlashes(fn); - - std::string::size_type slash_pos = fn.rfind("/"); - if (slash_pos != std::string::npos) { - std::string ret = fn.substr(0, slash_pos); - if (ret.size() == 2 && ret[1] == ':') { - return ret + '/'; - } - if (ret.empty()) { - return "/"; - } - return ret; - } else { - return ""; - } -} - -/** - * Return file name of a full filename (i.e. file name without path). - */ -std::string SystemTools::GetFilenameName(const std::string& filename) -{ -#if defined(_WIN32) || defined(KWSYS_SYSTEMTOOLS_SUPPORT_WINDOWS_SLASHES) - const char* separators = "/\\"; -#else - char separators = '/'; -#endif - std::string::size_type slash_pos = filename.find_last_of(separators); - if (slash_pos != std::string::npos) { - return filename.substr(slash_pos + 1); - } else { - return filename; - } -} - -/** - * Return file extension of a full filename (dot included). - * Warning: this is the longest extension (for example: .tar.gz) - */ -std::string SystemTools::GetFilenameExtension(const std::string& filename) -{ - std::string name = SystemTools::GetFilenameName(filename); - std::string::size_type dot_pos = name.find('.'); - if (dot_pos != std::string::npos) { - return name.substr(dot_pos); - } else { - return ""; - } -} - -/** - * Return file extension of a full filename (dot included). - * Warning: this is the shortest extension (for example: .gz of .tar.gz) - */ -std::string SystemTools::GetFilenameLastExtension(const std::string& filename) -{ - std::string name = SystemTools::GetFilenameName(filename); - std::string::size_type dot_pos = name.rfind('.'); - if (dot_pos != std::string::npos) { - return name.substr(dot_pos); - } else { - return ""; - } -} - -/** - * Return file name without extension of a full filename (i.e. without path). - * Warning: it considers the longest extension (for example: .tar.gz) - */ -std::string SystemTools::GetFilenameWithoutExtension( - const std::string& filename) -{ - std::string name = SystemTools::GetFilenameName(filename); - std::string::size_type dot_pos = name.find('.'); - if (dot_pos != std::string::npos) { - return name.substr(0, dot_pos); - } else { - return name; - } -} - -/** - * Return file name without extension of a full filename (i.e. without path). - * Warning: it considers the last extension (for example: removes .gz - * from .tar.gz) - */ -std::string SystemTools::GetFilenameWithoutLastExtension( - const std::string& filename) -{ - std::string name = SystemTools::GetFilenameName(filename); - std::string::size_type dot_pos = name.rfind('.'); - if (dot_pos != std::string::npos) { - return name.substr(0, dot_pos); - } else { - return name; - } -} - -bool SystemTools::FileHasSignature(const char* filename, const char* signature, - long offset) -{ - if (!filename || !signature) { - return false; - } - - FILE* fp = Fopen(filename, "rb"); - if (!fp) { - return false; - } - - fseek(fp, offset, SEEK_SET); - - bool res = false; - size_t signature_len = strlen(signature); - char* buffer = new char[signature_len]; - - if (fread(buffer, 1, signature_len, fp) == signature_len) { - res = (!strncmp(buffer, signature, signature_len) ? true : false); - } - - delete[] buffer; - - fclose(fp); - return res; -} - -SystemTools::FileTypeEnum SystemTools::DetectFileType(const char* filename, - unsigned long length, - double percent_bin) -{ - if (!filename || percent_bin < 0) { - return SystemTools::FileTypeUnknown; - } - - if (SystemTools::FileIsDirectory(filename)) { - return SystemTools::FileTypeUnknown; - } - - FILE* fp = Fopen(filename, "rb"); - if (!fp) { - return SystemTools::FileTypeUnknown; - } - - // Allocate buffer and read bytes - - unsigned char* buffer = new unsigned char[length]; - size_t read_length = fread(buffer, 1, length, fp); - fclose(fp); - if (read_length == 0) { - delete[] buffer; - return SystemTools::FileTypeUnknown; - } - - // Loop over contents and count - - size_t text_count = 0; - - const unsigned char* ptr = buffer; - const unsigned char* buffer_end = buffer + read_length; - - while (ptr != buffer_end) { - if ((*ptr >= 0x20 && *ptr <= 0x7F) || *ptr == '\n' || *ptr == '\r' || - *ptr == '\t') { - text_count++; - } - ptr++; - } - - delete[] buffer; - - double current_percent_bin = (static_cast(read_length - text_count) / - static_cast(read_length)); - - if (current_percent_bin >= percent_bin) { - return SystemTools::FileTypeBinary; - } - - return SystemTools::FileTypeText; -} - -bool SystemTools::LocateFileInDir(const char* filename, const char* dir, - std::string& filename_found, - int try_filename_dirs) -{ - if (!filename || !dir) { - return false; - } - - // Get the basename of 'filename' - - std::string filename_base = SystemTools::GetFilenameName(filename); - - // Check if 'dir' is really a directory - // If win32 and matches something like C:, accept it as a dir - - std::string real_dir; - if (!SystemTools::FileIsDirectory(dir)) { -#if defined(_WIN32) - size_t dir_len = strlen(dir); - if (dir_len < 2 || dir[dir_len - 1] != ':') { -#endif - real_dir = SystemTools::GetFilenamePath(dir); - dir = real_dir.c_str(); -#if defined(_WIN32) - } -#endif - } - - // Try to find the file in 'dir' - - bool res = false; - if (!filename_base.empty() && dir) { - size_t dir_len = strlen(dir); - int need_slash = - (dir_len && dir[dir_len - 1] != '/' && dir[dir_len - 1] != '\\'); - - std::string temp = dir; - if (need_slash) { - temp += "/"; - } - temp += filename_base; - - if (SystemTools::FileExists(temp)) { - res = true; - filename_found = temp; - } - - // If not found, we can try harder by appending part of the file to - // to the directory to look inside. - // Example: if we were looking for /foo/bar/yo.txt in /d1/d2, then - // try to find yo.txt in /d1/d2/bar, then /d1/d2/foo/bar, etc. - - else if (try_filename_dirs) { - std::string filename_dir(filename); - std::string filename_dir_base; - std::string filename_dir_bases; - do { - filename_dir = SystemTools::GetFilenamePath(filename_dir); - filename_dir_base = SystemTools::GetFilenameName(filename_dir); -#if defined(_WIN32) - if (filename_dir_base.empty() || filename_dir_base.back() == ':') -#else - if (filename_dir_base.empty()) -#endif - { - break; - } - - filename_dir_bases = filename_dir_base + "/" + filename_dir_bases; - - temp = dir; - if (need_slash) { - temp += "/"; - } - temp += filename_dir_bases; - - res = SystemTools::LocateFileInDir(filename_base.c_str(), temp.c_str(), - filename_found, 0); - - } while (!res && !filename_dir_base.empty()); - } - } - - return res; -} - -bool SystemTools::FileIsFullPath(const std::string& in_name) -{ - return SystemToolsStatic::FileIsFullPath(in_name.c_str(), in_name.size()); -} - -bool SystemTools::FileIsFullPath(const char* in_name) -{ - return SystemToolsStatic::FileIsFullPath( - in_name, in_name[0] ? (in_name[1] ? 2 : 1) : 0); -} - -bool SystemToolsStatic::FileIsFullPath(const char* in_name, size_t len) -{ -#if defined(_WIN32) || defined(__CYGWIN__) - // On Windows, the name must be at least two characters long. - if (len < 2) { - return false; - } - if (in_name[1] == ':') { - return true; - } - if (in_name[0] == '\\') { - return true; - } -#else - // On UNIX, the name must be at least one character long. - if (len < 1) { - return false; - } -#endif -#if !defined(_WIN32) - if (in_name[0] == '~') { - return true; - } -#endif - // On UNIX, the name must begin in a '/'. - // On Windows, if the name begins in a '/', then it is a full - // network path. - if (in_name[0] == '/') { - return true; - } - return false; -} - -bool SystemTools::GetShortPath(const std::string& path, std::string& shortPath) -{ -#if defined(_WIN32) && !defined(__CYGWIN__) - std::string tempPath = path; // create a buffer - - // if the path passed in has quotes around it, first remove the quotes - if (!path.empty() && path[0] == '"' && path.back() == '"') { - tempPath = path.substr(1, path.length() - 2); - } - - std::wstring wtempPath = Encoding::ToWide(tempPath); - DWORD ret = GetShortPathNameW(wtempPath.c_str(), nullptr, 0); - std::vector buffer(ret); - if (ret != 0) { - ret = GetShortPathNameW(wtempPath.c_str(), &buffer[0], - static_cast(buffer.size())); - } - - if (ret == 0) { - return false; - } else { - shortPath = Encoding::ToNarrow(&buffer[0]); - return true; - } -#else - shortPath = path; - return true; -#endif -} - -std::string SystemTools::GetCurrentDateTime(const char* format) -{ - char buf[1024]; - time_t t; - time(&t); - strftime(buf, sizeof(buf), format, localtime(&t)); - return std::string(buf); -} - -std::string SystemTools::MakeCidentifier(const std::string& s) -{ - std::string str(s); - if (str.find_first_of("0123456789") == 0) { - str = "_" + str; - } - - std::string permited_chars("_" - "abcdefghijklmnopqrstuvwxyz" - "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - "0123456789"); - std::string::size_type pos = 0; - while ((pos = str.find_first_not_of(permited_chars, pos)) != - std::string::npos) { - str[pos] = '_'; - } - return str; -} - -// Convenience function around std::getline which removes a trailing carriage -// return and can truncate the buffer as needed. Returns true -// if any data were read before the end-of-file was reached. -bool SystemTools::GetLineFromStream(std::istream& is, std::string& line, - bool* has_newline /* = 0 */, - long sizeLimit /* = -1 */) -{ - // Start with an empty line. - line = ""; - - // Early short circuit return if stream is no good. Just return - // false and the empty line. (Probably means caller tried to - // create a file stream with a non-existent file name...) - // - if (!is) { - if (has_newline) { - *has_newline = false; - } - return false; - } - - std::getline(is, line); - bool haveData = !line.empty() || !is.eof(); - if (!line.empty()) { - // Avoid storing a carriage return character. - if (line.back() == '\r') { - line.resize(line.size() - 1); - } - - // if we read too much then truncate the buffer - if (sizeLimit >= 0 && line.size() >= static_cast(sizeLimit)) { - line.resize(sizeLimit); - } - } - - // Return the results. - if (has_newline) { - *has_newline = !is.eof(); - } - return haveData; -} - -int SystemTools::GetTerminalWidth() -{ - int width = -1; -#ifdef HAVE_TTY_INFO - struct winsize ws; - std::string columns; /* Unix98 environment variable */ - if (ioctl(1, TIOCGWINSZ, &ws) != -1 && ws.ws_col > 0 && ws.ws_row > 0) { - width = ws.ws_col; - } - if (!isatty(STDOUT_FILENO)) { - width = -1; - } - if (SystemTools::GetEnv("COLUMNS", columns) && !columns.empty()) { - long t; - char* endptr; - t = strtol(columns.c_str(), &endptr, 0); - if (endptr && !*endptr && (t > 0) && (t < 1000)) { - width = static_cast(t); - } - } - if (width < 9) { - width = -1; - } -#endif - return width; -} - -bool SystemTools::GetPermissions(const char* file, mode_t& mode) -{ - if (!file) { - return false; - } - return SystemTools::GetPermissions(std::string(file), mode); -} - -bool SystemTools::GetPermissions(const std::string& file, mode_t& mode) -{ -#if defined(_WIN32) - DWORD attr = - GetFileAttributesW(Encoding::ToWindowsExtendedPath(file).c_str()); - if (attr == INVALID_FILE_ATTRIBUTES) { - return false; - } - if ((attr & FILE_ATTRIBUTE_READONLY) != 0) { - mode = (_S_IREAD | (_S_IREAD >> 3) | (_S_IREAD >> 6)); - } else { - mode = (_S_IWRITE | (_S_IWRITE >> 3) | (_S_IWRITE >> 6)) | - (_S_IREAD | (_S_IREAD >> 3) | (_S_IREAD >> 6)); - } - if ((attr & FILE_ATTRIBUTE_DIRECTORY) != 0) { - mode |= S_IFDIR | (_S_IEXEC | (_S_IEXEC >> 3) | (_S_IEXEC >> 6)); - } else { - mode |= S_IFREG; - } - size_t dotPos = file.rfind('.'); - const char* ext = dotPos == std::string::npos ? 0 : (file.c_str() + dotPos); - if (ext && - (Strucmp(ext, ".exe") == 0 || Strucmp(ext, ".com") == 0 || - Strucmp(ext, ".cmd") == 0 || Strucmp(ext, ".bat") == 0)) { - mode |= (_S_IEXEC | (_S_IEXEC >> 3) | (_S_IEXEC >> 6)); - } -#else - struct stat st; - if (stat(file.c_str(), &st) < 0) { - return false; - } - mode = st.st_mode; -#endif - return true; -} - -bool SystemTools::SetPermissions(const char* file, mode_t mode, - bool honor_umask) -{ - if (!file) { - return false; - } - return SystemTools::SetPermissions(std::string(file), mode, honor_umask); -} - -bool SystemTools::SetPermissions(const std::string& file, mode_t mode, - bool honor_umask) -{ - if (!SystemTools::PathExists(file)) { - return false; - } - if (honor_umask) { - mode_t currentMask = umask(0); - umask(currentMask); - mode &= ~currentMask; - } -#ifdef _WIN32 - if (_wchmod(Encoding::ToWindowsExtendedPath(file).c_str(), mode) < 0) -#else - if (chmod(file.c_str(), mode) < 0) -#endif - { - return false; - } - - return true; -} - -std::string SystemTools::GetParentDirectory(const std::string& fileOrDir) -{ - return SystemTools::GetFilenamePath(fileOrDir); -} - -bool SystemTools::IsSubDirectory(const std::string& cSubdir, - const std::string& cDir) -{ - if (cDir.empty()) { - return false; - } - std::string subdir = cSubdir; - std::string dir = cDir; - SystemTools::ConvertToUnixSlashes(subdir); - SystemTools::ConvertToUnixSlashes(dir); - if (subdir.size() <= dir.size() || dir.empty()) { - return false; - } - bool isRootPath = dir.back() == '/'; // like "/" or "C:/" - size_t expectedSlashPosition = isRootPath ? dir.size() - 1u : dir.size(); - if (subdir[expectedSlashPosition] != '/') { - return false; - } - std::string s = subdir.substr(0, dir.size()); - return SystemTools::ComparePath(s, dir); -} - -void SystemTools::Delay(unsigned int msec) -{ -#ifdef _WIN32 - Sleep(msec); -#else - // The sleep function gives 1 second resolution and the usleep - // function gives 1e-6 second resolution but on some platforms has a - // maximum sleep time of 1 second. This could be re-implemented to - // use select with masked signals or pselect to mask signals - // atomically. If select is given empty sets and zero as the max - // file descriptor but a non-zero timeout it can be used to block - // for a precise amount of time. - if (msec >= 1000) { - sleep(msec / 1000); - usleep((msec % 1000) * 1000); - } else { - usleep(msec * 1000); - } -#endif -} - -std::string SystemTools::GetOperatingSystemNameAndVersion() -{ - std::string res; - -#ifdef _WIN32 - char buffer[256]; - - OSVERSIONINFOEXA osvi; - BOOL bOsVersionInfoEx; - - ZeroMemory(&osvi, sizeof(osvi)); - osvi.dwOSVersionInfoSize = sizeof(osvi); - -# ifdef KWSYS_WINDOWS_DEPRECATED_GetVersionEx -# pragma warning(push) -# ifdef __INTEL_COMPILER -# pragma warning(disable : 1478) -# elif defined __clang__ -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wdeprecated-declarations" -# else -# pragma warning(disable : 4996) -# endif -# endif - bOsVersionInfoEx = GetVersionExA((OSVERSIONINFOA*)&osvi); - if (!bOsVersionInfoEx) { - return 0; - } -# ifdef KWSYS_WINDOWS_DEPRECATED_GetVersionEx -# ifdef __clang__ -# pragma clang diagnostic pop -# else -# pragma warning(pop) -# endif -# endif - - switch (osvi.dwPlatformId) { - // Test for the Windows NT product family. - - case VER_PLATFORM_WIN32_NT: - - // Test for the specific product family. - if (osvi.dwMajorVersion == 10 && osvi.dwMinorVersion == 0) { - if (osvi.wProductType == VER_NT_WORKSTATION) { - res += "Microsoft Windows 10"; - } else { - res += "Microsoft Windows Server 2016 family"; - } - } - - if (osvi.dwMajorVersion == 6 && osvi.dwMinorVersion == 3) { - if (osvi.wProductType == VER_NT_WORKSTATION) { - res += "Microsoft Windows 8.1"; - } else { - res += "Microsoft Windows Server 2012 R2 family"; - } - } - - if (osvi.dwMajorVersion == 6 && osvi.dwMinorVersion == 2) { - if (osvi.wProductType == VER_NT_WORKSTATION) { - res += "Microsoft Windows 8"; - } else { - res += "Microsoft Windows Server 2012 family"; - } - } - - if (osvi.dwMajorVersion == 6 && osvi.dwMinorVersion == 1) { - if (osvi.wProductType == VER_NT_WORKSTATION) { - res += "Microsoft Windows 7"; - } else { - res += "Microsoft Windows Server 2008 R2 family"; - } - } - - if (osvi.dwMajorVersion == 6 && osvi.dwMinorVersion == 0) { - if (osvi.wProductType == VER_NT_WORKSTATION) { - res += "Microsoft Windows Vista"; - } else { - res += "Microsoft Windows Server 2008 family"; - } - } - - if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 2) { - res += "Microsoft Windows Server 2003 family"; - } - - if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 1) { - res += "Microsoft Windows XP"; - } - - if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 0) { - res += "Microsoft Windows 2000"; - } - - if (osvi.dwMajorVersion <= 4) { - res += "Microsoft Windows NT"; - } - - // Test for specific product on Windows NT 4.0 SP6 and later. - - if (bOsVersionInfoEx) { - // Test for the workstation type. - - if (osvi.wProductType == VER_NT_WORKSTATION) { - if (osvi.dwMajorVersion == 4) { - res += " Workstation 4.0"; - } else if (osvi.dwMajorVersion == 5) { - if (osvi.wSuiteMask & VER_SUITE_PERSONAL) { - res += " Home Edition"; - } else { - res += " Professional"; - } - } - } - - // Test for the server type. - - else if (osvi.wProductType == VER_NT_SERVER) { - if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 2) { - if (osvi.wSuiteMask & VER_SUITE_DATACENTER) { - res += " Datacenter Edition"; - } else if (osvi.wSuiteMask & VER_SUITE_ENTERPRISE) { - res += " Enterprise Edition"; - } else if (osvi.wSuiteMask == VER_SUITE_BLADE) { - res += " Web Edition"; - } else { - res += " Standard Edition"; - } - } - - else if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 0) { - if (osvi.wSuiteMask & VER_SUITE_DATACENTER) { - res += " Datacenter Server"; - } else if (osvi.wSuiteMask & VER_SUITE_ENTERPRISE) { - res += " Advanced Server"; - } else { - res += " Server"; - } - } - - else if (osvi.dwMajorVersion <= 4) // Windows NT 4.0 - { - if (osvi.wSuiteMask & VER_SUITE_ENTERPRISE) { - res += " Server 4.0, Enterprise Edition"; - } else { - res += " Server 4.0"; - } - } - } - } - - // Test for specific product on Windows NT 4.0 SP5 and earlier - - else { - HKEY hKey; -# define BUFSIZE 80 - wchar_t szProductType[BUFSIZE]; - DWORD dwBufLen = BUFSIZE; - LONG lRet; - - lRet = - RegOpenKeyExW(HKEY_LOCAL_MACHINE, - L"SYSTEM\\CurrentControlSet\\Control\\ProductOptions", - 0, KEY_QUERY_VALUE, &hKey); - if (lRet != ERROR_SUCCESS) { - return 0; - } - - lRet = RegQueryValueExW(hKey, L"ProductType", nullptr, nullptr, - (LPBYTE)szProductType, &dwBufLen); - - if ((lRet != ERROR_SUCCESS) || (dwBufLen > BUFSIZE)) { - return 0; - } - - RegCloseKey(hKey); - - if (lstrcmpiW(L"WINNT", szProductType) == 0) { - res += " Workstation"; - } - if (lstrcmpiW(L"LANMANNT", szProductType) == 0) { - res += " Server"; - } - if (lstrcmpiW(L"SERVERNT", szProductType) == 0) { - res += " Advanced Server"; - } - - res += " "; - sprintf(buffer, "%ld", osvi.dwMajorVersion); - res += buffer; - res += "."; - sprintf(buffer, "%ld", osvi.dwMinorVersion); - res += buffer; - } - - // Display service pack (if any) and build number. - - if (osvi.dwMajorVersion == 4 && - lstrcmpiA(osvi.szCSDVersion, "Service Pack 6") == 0) { - HKEY hKey; - LONG lRet; - - // Test for SP6 versus SP6a. - - lRet = RegOpenKeyExW( - HKEY_LOCAL_MACHINE, - L"SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Hotfix\\Q246009", - 0, KEY_QUERY_VALUE, &hKey); - - if (lRet == ERROR_SUCCESS) { - res += " Service Pack 6a (Build "; - sprintf(buffer, "%ld", osvi.dwBuildNumber & 0xFFFF); - res += buffer; - res += ")"; - } else // Windows NT 4.0 prior to SP6a - { - res += " "; - res += osvi.szCSDVersion; - res += " (Build "; - sprintf(buffer, "%ld", osvi.dwBuildNumber & 0xFFFF); - res += buffer; - res += ")"; - } - - RegCloseKey(hKey); - } else // Windows NT 3.51 and earlier or Windows 2000 and later - { - res += " "; - res += osvi.szCSDVersion; - res += " (Build "; - sprintf(buffer, "%ld", osvi.dwBuildNumber & 0xFFFF); - res += buffer; - res += ")"; - } - - break; - - // Test for the Windows 95 product family. - - case VER_PLATFORM_WIN32_WINDOWS: - - if (osvi.dwMajorVersion == 4 && osvi.dwMinorVersion == 0) { - res += "Microsoft Windows 95"; - if (osvi.szCSDVersion[1] == 'C' || osvi.szCSDVersion[1] == 'B') { - res += " OSR2"; - } - } - - if (osvi.dwMajorVersion == 4 && osvi.dwMinorVersion == 10) { - res += "Microsoft Windows 98"; - if (osvi.szCSDVersion[1] == 'A') { - res += " SE"; - } - } - - if (osvi.dwMajorVersion == 4 && osvi.dwMinorVersion == 90) { - res += "Microsoft Windows Millennium Edition"; - } - break; - - case VER_PLATFORM_WIN32s: - - res += "Microsoft Win32s"; - break; - } -#endif - - return res; -} - -bool SystemTools::ParseURLProtocol(const std::string& URL, - std::string& protocol, - std::string& dataglom) -{ - // match 0 entire url - // match 1 protocol - // match 2 dataglom following protocol:// - kwsys::RegularExpression urlRe(VTK_URL_PROTOCOL_REGEX); - - if (!urlRe.find(URL)) - return false; - - protocol = urlRe.match(1); - dataglom = urlRe.match(2); - - return true; -} - -bool SystemTools::ParseURL(const std::string& URL, std::string& protocol, - std::string& username, std::string& password, - std::string& hostname, std::string& dataport, - std::string& database) -{ - kwsys::RegularExpression urlRe(VTK_URL_REGEX); - if (!urlRe.find(URL)) - return false; - - // match 0 URL - // match 1 protocol - // match 2 mangled user - // match 3 username - // match 4 mangled password - // match 5 password - // match 6 hostname - // match 7 mangled port - // match 8 dataport - // match 9 database name - - protocol = urlRe.match(1); - username = urlRe.match(3); - password = urlRe.match(5); - hostname = urlRe.match(6); - dataport = urlRe.match(8); - database = urlRe.match(9); - - return true; -} - -// These must NOT be initialized. Default initialization to zero is -// necessary. -static unsigned int SystemToolsManagerCount; -SystemToolsStatic* SystemTools::Statics; - -// SystemToolsManager manages the SystemTools singleton. -// SystemToolsManager should be included in any translation unit -// that will use SystemTools or that implements the singleton -// pattern. It makes sure that the SystemTools singleton is created -// before and destroyed after all other singletons in CMake. - -SystemToolsManager::SystemToolsManager() -{ - if (++SystemToolsManagerCount == 1) { - SystemTools::ClassInitialize(); - } -} - -SystemToolsManager::~SystemToolsManager() -{ - if (--SystemToolsManagerCount == 0) { - SystemTools::ClassFinalize(); - } -} - -#if defined(__VMS) -// On VMS we configure the run time C library to be more UNIX like. -// http://h71000.www7.hp.com/doc/732final/5763/5763pro_004.html -extern "C" int decc$feature_get_index(char* name); -extern "C" int decc$feature_set_value(int index, int mode, int value); -static int SetVMSFeature(char* name, int value) -{ - int i; - errno = 0; - i = decc$feature_get_index(name); - return i >= 0 && (decc$feature_set_value(i, 1, value) >= 0 || errno == 0); -} -#endif - -void SystemTools::ClassInitialize() -{ -#ifdef __VMS - SetVMSFeature("DECC$FILENAME_UNIX_ONLY", 1); -#endif - - // Create statics singleton instance - SystemTools::Statics = new SystemToolsStatic; - -#if KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP -// Add some special translation paths for unix. These are not added -// for windows because drive letters need to be maintained. Also, -// there are not sym-links and mount points on windows anyway. -# if !defined(_WIN32) || defined(__CYGWIN__) - // The tmp path is frequently a logical path so always keep it: - SystemTools::AddKeepPath("/tmp/"); - - // If the current working directory is a logical path then keep the - // logical name. - std::string pwd_str; - if (SystemTools::GetEnv("PWD", pwd_str)) { - char buf[2048]; - if (const char* cwd = Getcwd(buf, 2048)) { - // The current working directory may be a logical path. Find - // the shortest logical path that still produces the correct - // physical path. - std::string cwd_changed; - std::string pwd_changed; - - // Test progressively shorter logical-to-physical mappings. - std::string cwd_str = cwd; - std::string pwd_path; - Realpath(pwd_str, pwd_path); - while (cwd_str == pwd_path && cwd_str != pwd_str) { - // The current pair of paths is a working logical mapping. - cwd_changed = cwd_str; - pwd_changed = pwd_str; - - // Strip off one directory level and see if the logical - // mapping still works. - pwd_str = SystemTools::GetFilenamePath(pwd_str); - cwd_str = SystemTools::GetFilenamePath(cwd_str); - Realpath(pwd_str, pwd_path); - } - - // Add the translation to keep the logical path name. - if (!cwd_changed.empty() && !pwd_changed.empty()) { - SystemTools::AddTranslationPath(cwd_changed, pwd_changed); - } - } - } -# endif -#endif -} - -void SystemTools::ClassFinalize() -{ - delete SystemTools::Statics; -} - -} // namespace KWSYS_NAMESPACE - -#if defined(_MSC_VER) && defined(_DEBUG) -# include -# include -# include -namespace KWSYS_NAMESPACE { - -static int SystemToolsDebugReport(int, char* message, int*) -{ - fprintf(stderr, "%s", message); - fflush(stderr); - return 1; // no further reporting required -} - -void SystemTools::EnableMSVCDebugHook() -{ - if (SystemTools::HasEnv("DART_TEST_FROM_DART") || - SystemTools::HasEnv("DASHBOARD_TEST_FROM_CTEST")) { - _CrtSetReportHook(SystemToolsDebugReport); - } -} - -} // namespace KWSYS_NAMESPACE -#else -namespace KWSYS_NAMESPACE { -void SystemTools::EnableMSVCDebugHook() -{ -} -} // namespace KWSYS_NAMESPACE -#endif diff --git a/test/API/driver/kwsys/SystemTools.hxx.in b/test/API/driver/kwsys/SystemTools.hxx.in deleted file mode 100644 index c4ab9d4f36a..00000000000 --- a/test/API/driver/kwsys/SystemTools.hxx.in +++ /dev/null @@ -1,981 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifndef @KWSYS_NAMESPACE@_SystemTools_hxx -#define @KWSYS_NAMESPACE@_SystemTools_hxx - -#include <@KWSYS_NAMESPACE@/Configure.hxx> - -#include -#include -#include -#include - -#include -// include sys/stat.h after sys/types.h -#include - -#if !defined(_WIN32) || defined(__CYGWIN__) -# include // For access permissions for use with access() -#endif - -// Required for va_list -#include -// Required for FILE* -#include -#if !defined(va_list) -// Some compilers move va_list into the std namespace and there is no way to -// tell that this has been done. Playing with things being included before or -// after stdarg.h does not solve things because we do not have control over -// what the user does. This hack solves this problem by moving va_list to our -// own namespace that is local for kwsys. -namespace std { -} // Required for platforms that do not have std namespace -namespace @KWSYS_NAMESPACE@_VA_LIST { -using namespace std; -typedef va_list hack_va_list; -} -namespace @KWSYS_NAMESPACE@ { -typedef @KWSYS_NAMESPACE@_VA_LIST::hack_va_list va_list; -} -#endif // va_list - -namespace @KWSYS_NAMESPACE@ { - -class SystemToolsStatic; - -/** \class SystemToolsManager - * \brief Use to make sure SystemTools is initialized before it is used - * and is the last static object destroyed - */ -class @KWSYS_NAMESPACE@_EXPORT SystemToolsManager -{ -public: - SystemToolsManager(); - ~SystemToolsManager(); - - SystemToolsManager(const SystemToolsManager&) = delete; - SystemToolsManager& operator=(const SystemToolsManager&) = delete; -}; - -// This instance will show up in any translation unit that uses -// SystemTools. It will make sure SystemTools is initialized -// before it is used and is the last static object destroyed. -static SystemToolsManager SystemToolsManagerInstance; - -// Flags for use with TestFileAccess. Use a typedef in case any operating -// system in the future needs a special type. These are flags that may be -// combined using the | operator. -typedef int TestFilePermissions; -#if defined(_WIN32) && !defined(__CYGWIN__) -// On Windows (VC and Borland), no system header defines these constants... -static const TestFilePermissions TEST_FILE_OK = 0; -static const TestFilePermissions TEST_FILE_READ = 4; -static const TestFilePermissions TEST_FILE_WRITE = 2; -static const TestFilePermissions TEST_FILE_EXECUTE = 1; -#else -// Standard POSIX constants -static const TestFilePermissions TEST_FILE_OK = F_OK; -static const TestFilePermissions TEST_FILE_READ = R_OK; -static const TestFilePermissions TEST_FILE_WRITE = W_OK; -static const TestFilePermissions TEST_FILE_EXECUTE = X_OK; -#endif - -/** \class SystemTools - * \brief A collection of useful platform-independent system functions. - */ -class @KWSYS_NAMESPACE@_EXPORT SystemTools -{ -public: - /** ----------------------------------------------------------------- - * String Manipulation Routines - * ----------------------------------------------------------------- - */ - - /** - * Replace symbols in str that are not valid in C identifiers as - * defined by the 1999 standard, ie. anything except [A-Za-z0-9_]. - * They are replaced with `_' and if the first character is a digit - * then an underscore is prepended. Note that this can produce - * identifiers that the standard reserves (_[A-Z].* and __.*). - */ - static std::string MakeCidentifier(const std::string& s); - - static std::string MakeCindentifier(const std::string& s) - { - return MakeCidentifier(s); - } - - /** - * Replace replace all occurrences of the string in the source string. - */ - static void ReplaceString(std::string& source, const char* replace, - const char* with); - static void ReplaceString(std::string& source, const std::string& replace, - const std::string& with); - - /** - * Return a capitalized string (i.e the first letter is uppercased, - * all other are lowercased). - */ - static std::string Capitalized(const std::string&); - - /** - * Return a 'capitalized words' string (i.e the first letter of each word - * is uppercased all other are left untouched though). - */ - static std::string CapitalizedWords(const std::string&); - - /** - * Return a 'uncapitalized words' string (i.e the first letter of each word - * is lowercased all other are left untouched though). - */ - static std::string UnCapitalizedWords(const std::string&); - - /** - * Return a lower case string - */ - static std::string LowerCase(const std::string&); - - /** - * Return a lower case string - */ - static std::string UpperCase(const std::string&); - - /** - * Count char in string - */ - static size_t CountChar(const char* str, char c); - - /** - * Remove some characters from a string. - * Return a pointer to the new resulting string (allocated with 'new') - */ - static char* RemoveChars(const char* str, const char* toremove); - - /** - * Remove remove all but 0->9, A->F characters from a string. - * Return a pointer to the new resulting string (allocated with 'new') - */ - static char* RemoveCharsButUpperHex(const char* str); - - /** - * Replace some characters by another character in a string (in-place) - * Return a pointer to string - */ - static char* ReplaceChars(char* str, const char* toreplace, - char replacement); - - /** - * Returns true if str1 starts (respectively ends) with str2 - */ - static bool StringStartsWith(const char* str1, const char* str2); - static bool StringStartsWith(const std::string& str1, const char* str2); - static bool StringEndsWith(const char* str1, const char* str2); - static bool StringEndsWith(const std::string& str1, const char* str2); - - /** - * Returns a pointer to the last occurrence of str2 in str1 - */ - static const char* FindLastString(const char* str1, const char* str2); - - /** - * Make a duplicate of the string similar to the strdup C function - * but use new to create the 'new' string, so one can use - * 'delete' to remove it. Returns 0 if the input is empty. - */ - static char* DuplicateString(const char* str); - - /** - * Return the string cropped to a given length by removing chars in the - * center of the string and replacing them with an ellipsis (...) - */ - static std::string CropString(const std::string&, size_t max_len); - - /** split a path by separator into an array of strings, default is /. - If isPath is true then the string is treated like a path and if - s starts with a / then the first element of the returned array will - be /, so /foo/bar will be [/, foo, bar] - */ - static std::vector SplitString(const std::string& s, - char separator = '/', - bool isPath = false); - /** - * Perform a case-independent string comparison - */ - static int Strucmp(const char* s1, const char* s2); - - /** - * Split a string on its newlines into multiple lines - * Return false only if the last line stored had no newline - */ - static bool Split(const std::string& s, std::vector& l); - static bool Split(const std::string& s, std::vector& l, - char separator); - - /** - * Return string with space added between capitalized words - * (i.e. EatMyShorts becomes Eat My Shorts ) - * (note that IEatShorts becomes IEat Shorts) - */ - static std::string AddSpaceBetweenCapitalizedWords(const std::string&); - - /** - * Append two or more strings and produce new one. - * Programmer must 'delete []' the resulting string, which was allocated - * with 'new'. - * Return 0 if inputs are empty or there was an error - */ - static char* AppendStrings(const char* str1, const char* str2); - static char* AppendStrings(const char* str1, const char* str2, - const char* str3); - - /** - * Estimate the length of the string that will be produced - * from printing the given format string and arguments. The - * returned length will always be at least as large as the string - * that will result from printing. - * WARNING: since va_arg is called to iterate of the argument list, - * you will not be able to use this 'ap' anymore from the beginning. - * It's up to you to call va_end though. - */ - static int EstimateFormatLength(const char* format, va_list ap); - - /** - * Escape specific characters in 'str'. - */ - static std::string EscapeChars(const char* str, const char* chars_to_escape, - char escape_char = '\\'); - - /** ----------------------------------------------------------------- - * Filename Manipulation Routines - * ----------------------------------------------------------------- - */ - - /** - * Replace Windows file system slashes with Unix-style slashes. - */ - static void ConvertToUnixSlashes(std::string& path); - -#ifdef _WIN32 - /** Calls Encoding::ToWindowsExtendedPath. */ - static std::wstring ConvertToWindowsExtendedPath(const std::string&); -#endif - - /** - * For windows this calls ConvertToWindowsOutputPath and for unix - * it calls ConvertToUnixOutputPath - */ - static std::string ConvertToOutputPath(const std::string&); - - /** - * Convert the path to a string that can be used in a unix makefile. - * double slashes are removed, and spaces are escaped. - */ - static std::string ConvertToUnixOutputPath(const std::string&); - - /** - * Convert the path to string that can be used in a windows project or - * makefile. Double slashes are removed if they are not at the start of - * the string, the slashes are converted to windows style backslashes, and - * if there are spaces in the string it is double quoted. - */ - static std::string ConvertToWindowsOutputPath(const std::string&); - - /** - * Return true if a path with the given name exists in the current directory. - */ - static bool PathExists(const std::string& path); - - /** - * Return true if a file exists in the current directory. - * If isFile = true, then make sure the file is a file and - * not a directory. If isFile = false, then return true - * if it is a file or a directory. Note that the file will - * also be checked for read access. (Currently, this check - * for read access is only done on POSIX systems.) - */ - static bool FileExists(const char* filename, bool isFile); - static bool FileExists(const std::string& filename, bool isFile); - static bool FileExists(const char* filename); - static bool FileExists(const std::string& filename); - - /** - * Test if a file exists and can be accessed with the requested - * permissions. Symbolic links are followed. Returns true if - * the access test was successful. - * - * On POSIX systems (including Cygwin), this maps to the access - * function. On Windows systems, all existing files are - * considered readable, and writable files are considered to - * have the read-only file attribute cleared. - */ - static bool TestFileAccess(const char* filename, - TestFilePermissions permissions); - static bool TestFileAccess(const std::string& filename, - TestFilePermissions permissions); -/** - * Cross platform wrapper for stat struct - */ -#if defined(_WIN32) && !defined(__CYGWIN__) -# if defined(__BORLANDC__) - typedef struct stati64 Stat_t; -# else - typedef struct _stat64 Stat_t; -# endif -#else - typedef struct stat Stat_t; -#endif - - /** - * Cross platform wrapper for stat system call - * - * On Windows this may not work for paths longer than 250 characters - * due to limitations of the underlying '_wstat64' call. - */ - static int Stat(const char* path, Stat_t* buf); - static int Stat(const std::string& path, Stat_t* buf); - -/** - * Converts Cygwin path to Win32 path. Uses dictionary container for - * caching and calls to cygwin_conv_to_win32_path from Cygwin dll - * for actual translation. Returns true on success, else false. - */ -#ifdef __CYGWIN__ - static bool PathCygwinToWin32(const char* path, char* win32_path); -#endif - - /** - * Return file length - */ - static unsigned long FileLength(const std::string& filename); - - /** - Change the modification time or create a file - */ - static bool Touch(const std::string& filename, bool create); - - /** - * Compare file modification times. - * Return true for successful comparison and false for error. - * When true is returned, result has -1, 0, +1 for - * f1 older, same, or newer than f2. - */ - static bool FileTimeCompare(const std::string& f1, const std::string& f2, - int* result); - - /** - * Get the file extension (including ".") needed for an executable - * on the current platform ("" for unix, ".exe" for Windows). - */ - static const char* GetExecutableExtension(); - - /** - * Given a path on a Windows machine, return the actual case of - * the path as it exists on disk. Path components that do not - * exist on disk are returned unchanged. Relative paths are always - * returned unchanged. Drive letters are always made upper case. - * This does nothing on non-Windows systems but return the path. - */ - static std::string GetActualCaseForPath(const std::string& path); - - /** - * Given the path to a program executable, get the directory part of - * the path with the file stripped off. If there is no directory - * part, the empty string is returned. - */ - static std::string GetProgramPath(const std::string&); - static bool SplitProgramPath(const std::string& in_name, std::string& dir, - std::string& file, bool errorReport = true); - - /** - * Given argv[0] for a unix program find the full path to a running - * executable. argv0 can be null for windows WinMain programs - * in this case GetModuleFileName will be used to find the path - * to the running executable. If argv0 is not a full path, - * then this will try to find the full path. If the path is not - * found false is returned, if found true is returned. An error - * message of the attempted paths is stored in errorMsg. - * exeName is the name of the executable. - * buildDir is a possibly null path to the build directory. - * installPrefix is a possibly null pointer to the install directory. - */ - static bool FindProgramPath(const char* argv0, std::string& pathOut, - std::string& errorMsg, - const char* exeName = nullptr, - const char* buildDir = nullptr, - const char* installPrefix = nullptr); - - /** - * Given a path to a file or directory, convert it to a full path. - * This collapses away relative paths relative to the cwd argument - * (which defaults to the current working directory). The full path - * is returned. - */ - static std::string CollapseFullPath(const std::string& in_relative); - static std::string CollapseFullPath(const std::string& in_relative, - const char* in_base); - static std::string CollapseFullPath(const std::string& in_relative, - const std::string& in_base); - - /** - * Get the real path for a given path, removing all symlinks. In - * the event of an error (non-existent path, permissions issue, - * etc.) the original path is returned if errorMessage pointer is - * nullptr. Otherwise empty string is returned and errorMessage - * contains error description. - */ - static std::string GetRealPath(const std::string& path, - std::string* errorMessage = nullptr); - - /** - * Split a path name into its root component and the rest of the - * path. The root component is one of the following: - * "/" = UNIX full path - * "c:/" = Windows full path (can be any drive letter) - * "c:" = Windows drive-letter relative path (can be any drive letter) - * "//" = Network path - * "~/" = Home path for current user - * "~u/" = Home path for user 'u' - * "" = Relative path - * - * A pointer to the rest of the path after the root component is - * returned. The root component is stored in the "root" string if - * given. - */ - static const char* SplitPathRootComponent(const std::string& p, - std::string* root = nullptr); - - /** - * Split a path name into its basic components. The first component - * always exists and is the root returned by SplitPathRootComponent. - * The remaining components form the path. If there is a trailing - * slash then the last component is the empty string. The - * components can be recombined as "c[0]c[1]/c[2]/.../c[n]" to - * produce the original path. Home directory references are - * automatically expanded if expand_home_dir is true and this - * platform supports them. - * - * This does *not* normalize the input path. All components are - * preserved, including empty ones. Typically callers should use - * this only on paths that have already been normalized. - */ - static void SplitPath(const std::string& p, - std::vector& components, - bool expand_home_dir = true); - - /** - * Join components of a path name into a single string. See - * SplitPath for the format of the components. - * - * This does *not* normalize the input path. All components are - * preserved, including empty ones. Typically callers should use - * this only on paths that have already been normalized. - */ - static std::string JoinPath(const std::vector& components); - static std::string JoinPath(std::vector::const_iterator first, - std::vector::const_iterator last); - - /** - * Compare a path or components of a path. - */ - static bool ComparePath(const std::string& c1, const std::string& c2); - - /** - * Return path of a full filename (no trailing slashes) - */ - static std::string GetFilenamePath(const std::string&); - - /** - * Return file name of a full filename (i.e. file name without path) - */ - static std::string GetFilenameName(const std::string&); - - /** - * Return longest file extension of a full filename (dot included) - */ - static std::string GetFilenameExtension(const std::string&); - - /** - * Return shortest file extension of a full filename (dot included) - */ - static std::string GetFilenameLastExtension(const std::string& filename); - - /** - * Return file name without extension of a full filename - */ - static std::string GetFilenameWithoutExtension(const std::string&); - - /** - * Return file name without its last (shortest) extension - */ - static std::string GetFilenameWithoutLastExtension(const std::string&); - - /** - * Return whether the path represents a full path (not relative) - */ - static bool FileIsFullPath(const std::string&); - static bool FileIsFullPath(const char*); - - /** - * For windows return the short path for the given path, - * Unix just a pass through - */ - static bool GetShortPath(const std::string& path, std::string& result); - - /** - * Read line from file. Make sure to read a full line and truncates it if - * requested via sizeLimit. Returns true if any data were read before the - * end-of-file was reached. If the has_newline argument is specified, it will - * be true when the line read had a newline character. - */ - static bool GetLineFromStream(std::istream& istr, std::string& line, - bool* has_newline = nullptr, - long sizeLimit = -1); - - /** - * Get the parent directory of the directory or file - */ - static std::string GetParentDirectory(const std::string& fileOrDir); - - /** - * Check if the given file or directory is in subdirectory of dir - */ - static bool IsSubDirectory(const std::string& fileOrDir, - const std::string& dir); - - /** ----------------------------------------------------------------- - * File Manipulation Routines - * ----------------------------------------------------------------- - */ - - /** - * Open a file considering unicode. - */ - static FILE* Fopen(const std::string& file, const char* mode); - -/** - * Visual C++ does not define mode_t (note that Borland does, however). - */ -#if defined(_MSC_VER) - typedef unsigned short mode_t; -#endif - - /** - * Make a new directory if it is not there. This function - * can make a full path even if none of the directories existed - * prior to calling this function. - */ - static bool MakeDirectory(const char* path, const mode_t* mode = nullptr); - static bool MakeDirectory(const std::string& path, - const mode_t* mode = nullptr); - - /** - * Copy the source file to the destination file only - * if the two files differ. - */ - static bool CopyFileIfDifferent(const std::string& source, - const std::string& destination); - - /** - * Compare the contents of two files. Return true if different - */ - static bool FilesDiffer(const std::string& source, - const std::string& destination); - - /** - * Compare the contents of two files, ignoring line ending differences. - * Return true if different - */ - static bool TextFilesDiffer(const std::string& path1, - const std::string& path2); - - /** - * Return true if the two files are the same file - */ - static bool SameFile(const std::string& file1, const std::string& file2); - - /** - * Copy a file. - */ - static bool CopyFileAlways(const std::string& source, - const std::string& destination); - - /** - * Copy a file. If the "always" argument is true the file is always - * copied. If it is false, the file is copied only if it is new or - * has changed. - */ - static bool CopyAFile(const std::string& source, - const std::string& destination, bool always = true); - - /** - * Copy content directory to another directory with all files and - * subdirectories. If the "always" argument is true all files are - * always copied. If it is false, only files that have changed or - * are new are copied. - */ - static bool CopyADirectory(const std::string& source, - const std::string& destination, - bool always = true); - - /** - * Remove a file - */ - static bool RemoveFile(const std::string& source); - - /** - * Remove a directory - */ - static bool RemoveADirectory(const std::string& source); - - /** - * Get the maximum full file path length - */ - static size_t GetMaximumFilePathLength(); - - /** - * Find a file in the system PATH, with optional extra paths - */ - static std::string FindFile( - const std::string& name, - const std::vector& path = std::vector(), - bool no_system_path = false); - - /** - * Find a directory in the system PATH, with optional extra paths - */ - static std::string FindDirectory( - const std::string& name, - const std::vector& path = std::vector(), - bool no_system_path = false); - - /** - * Find an executable in the system PATH, with optional extra paths - */ - static std::string FindProgram( - const char* name, - const std::vector& path = std::vector(), - bool no_system_path = false); - static std::string FindProgram( - const std::string& name, - const std::vector& path = std::vector(), - bool no_system_path = false); - static std::string FindProgram( - const std::vector& names, - const std::vector& path = std::vector(), - bool no_system_path = false); - - /** - * Find a library in the system PATH, with optional extra paths - */ - static std::string FindLibrary(const std::string& name, - const std::vector& path); - - /** - * Return true if the file is a directory - */ - static bool FileIsDirectory(const std::string& name); - - /** - * Return true if the file is a symlink - */ - static bool FileIsSymlink(const std::string& name); - - /** - * Return true if the file is a FIFO - */ - static bool FileIsFIFO(const std::string& name); - - /** - * Return true if the file has a given signature (first set of bytes) - */ - static bool FileHasSignature(const char* filename, const char* signature, - long offset = 0); - - /** - * Attempt to detect and return the type of a file. - * Up to 'length' bytes are read from the file, if more than 'percent_bin' % - * of the bytes are non-textual elements, the file is considered binary, - * otherwise textual. Textual elements are bytes in the ASCII [0x20, 0x7E] - * range, but also \\n, \\r, \\t. - * The algorithm is simplistic, and should probably check for usual file - * extensions, 'magic' signature, unicode, etc. - */ - enum FileTypeEnum - { - FileTypeUnknown, - FileTypeBinary, - FileTypeText - }; - static SystemTools::FileTypeEnum DetectFileType(const char* filename, - unsigned long length = 256, - double percent_bin = 0.05); - - /** - * Create a symbolic link if the platform supports it. Returns whether - * creation succeeded. - */ - static bool CreateSymlink(const std::string& origName, - const std::string& newName); - - /** - * Read the contents of a symbolic link. Returns whether reading - * succeeded. - */ - static bool ReadSymlink(const std::string& newName, std::string& origName); - - /** - * Try to locate the file 'filename' in the directory 'dir'. - * If 'filename' is a fully qualified filename, the basename of the file is - * used to check for its existence in 'dir'. - * If 'dir' is not a directory, GetFilenamePath() is called on 'dir' to - * get its directory first (thus, you can pass a filename as 'dir', as - * a convenience). - * 'filename_found' is assigned the fully qualified name/path of the file - * if it is found (not touched otherwise). - * If 'try_filename_dirs' is true, try to find the file using the - * components of its path, i.e. if we are looking for c:/foo/bar/bill.txt, - * first look for bill.txt in 'dir', then in 'dir'/bar, then in 'dir'/foo/bar - * etc. - * Return true if the file was found, false otherwise. - */ - static bool LocateFileInDir(const char* filename, const char* dir, - std::string& filename_found, - int try_filename_dirs = 0); - - /** compute the relative path from local to remote. local must - be a directory. remote can be a file or a directory. - Both remote and local must be full paths. Basically, if - you are in directory local and you want to access the file in remote - what is the relative path to do that. For example: - /a/b/c/d to /a/b/c1/d1 -> ../../c1/d1 - from /usr/src to /usr/src/test/blah/foo.cpp -> test/blah/foo.cpp - */ - static std::string RelativePath(const std::string& local, - const std::string& remote); - - /** - * Return file's modified time - */ - static long int ModifiedTime(const std::string& filename); - - /** - * Return file's creation time (Win32: works only for NTFS, not FAT) - */ - static long int CreationTime(const std::string& filename); - - /** - * Get and set permissions of the file. If honor_umask is set, the umask - * is queried and applied to the given permissions. Returns false if - * failure. - * - * WARNING: A non-thread-safe method is currently used to get the umask - * if a honor_umask parameter is set to true. - */ - static bool GetPermissions(const char* file, mode_t& mode); - static bool GetPermissions(const std::string& file, mode_t& mode); - static bool SetPermissions(const char* file, mode_t mode, - bool honor_umask = false); - static bool SetPermissions(const std::string& file, mode_t mode, - bool honor_umask = false); - - /** ----------------------------------------------------------------- - * Time Manipulation Routines - * ----------------------------------------------------------------- - */ - - /** Get current time in seconds since Posix Epoch (Jan 1, 1970). */ - static double GetTime(); - - /** - * Get current date/time - */ - static std::string GetCurrentDateTime(const char* format); - - /** ----------------------------------------------------------------- - * Registry Manipulation Routines - * ----------------------------------------------------------------- - */ - - /** - * Specify access to the 32-bit or 64-bit application view of - * registry values. The default is to match the currently running - * binary type. - */ - enum KeyWOW64 - { - KeyWOW64_Default, - KeyWOW64_32, - KeyWOW64_64 - }; - - /** - * Get a list of subkeys. - */ - static bool GetRegistrySubKeys(const std::string& key, - std::vector& subkeys, - KeyWOW64 view = KeyWOW64_Default); - - /** - * Read a registry value - */ - static bool ReadRegistryValue(const std::string& key, std::string& value, - KeyWOW64 view = KeyWOW64_Default); - - /** - * Write a registry value - */ - static bool WriteRegistryValue(const std::string& key, - const std::string& value, - KeyWOW64 view = KeyWOW64_Default); - - /** - * Delete a registry value - */ - static bool DeleteRegistryValue(const std::string& key, - KeyWOW64 view = KeyWOW64_Default); - - /** ----------------------------------------------------------------- - * Environment Manipulation Routines - * ----------------------------------------------------------------- - */ - - /** - * Add the paths from the environment variable PATH to the - * string vector passed in. If env is set then the value - * of env will be used instead of PATH. - */ - static void GetPath(std::vector& path, - const char* env = nullptr); - - /** - * Read an environment variable - */ - static const char* GetEnv(const char* key); - static const char* GetEnv(const std::string& key); - static bool GetEnv(const char* key, std::string& result); - static bool GetEnv(const std::string& key, std::string& result); - static bool HasEnv(const char* key); - static bool HasEnv(const std::string& key); - - /** Put a string into the environment - of the form var=value */ - static bool PutEnv(const std::string& env); - - /** Remove a string from the environment. - Input is of the form "var" or "var=value" (value is ignored). */ - static bool UnPutEnv(const std::string& env); - - /** - * Get current working directory CWD - */ - static std::string GetCurrentWorkingDirectory(bool collapse = true); - - /** - * Change directory to the directory specified - */ - static int ChangeDirectory(const std::string& dir); - - /** - * Get the result of strerror(errno) - */ - static std::string GetLastSystemError(); - - /** - * When building DEBUG with MSVC, this enables a hook that prevents - * error dialogs from popping up if the program is being run from - * DART. - */ - static void EnableMSVCDebugHook(); - - /** - * Get the width of the terminal window. The code may or may not work, so - * make sure you have some reasonable defaults prepared if the code returns - * some bogus size. - */ - static int GetTerminalWidth(); - -#if @KWSYS_NAMESPACE@_SYSTEMTOOLS_USE_TRANSLATION_MAP - /** - * Add an entry in the path translation table. - */ - static void AddTranslationPath(const std::string& dir, - const std::string& refdir); - - /** - * If dir is different after CollapseFullPath is called, - * Then insert it into the path translation table - */ - static void AddKeepPath(const std::string& dir); - - /** - * Update path by going through the Path Translation table; - */ - static void CheckTranslationPath(std::string& path); -#endif - - /** - * Delay the execution for a specified amount of time specified - * in milliseconds - */ - static void Delay(unsigned int msec); - - /** - * Get the operating system name and version - * This is implemented for Win32 only for the moment - */ - static std::string GetOperatingSystemNameAndVersion(); - - /** ----------------------------------------------------------------- - * URL Manipulation Routines - * ----------------------------------------------------------------- - */ - - /** - * Parse a character string : - * protocol://dataglom - * and fill protocol as appropriate. - * Return false if the URL does not have the required form, true otherwise. - */ - static bool ParseURLProtocol(const std::string& URL, std::string& protocol, - std::string& dataglom); - - /** - * Parse a string (a URL without protocol prefix) with the form: - * protocol://[[username[':'password]'@']hostname[':'dataport]]'/'[datapath] - * and fill protocol, username, password, hostname, dataport, and datapath - * when values are found. - * Return true if the string matches the format; false otherwise. - */ - static bool ParseURL(const std::string& URL, std::string& protocol, - std::string& username, std::string& password, - std::string& hostname, std::string& dataport, - std::string& datapath); - -private: - /** - * Allocate the stl map that serve as the Path Translation table. - */ - static void ClassInitialize(); - - /** - * Deallocate the stl map that serve as the Path Translation table. - */ - static void ClassFinalize(); - - /** - * This method prevents warning on SGI - */ - SystemToolsManager* GetSystemToolsManager() - { - return &SystemToolsManagerInstance; - } - - static SystemToolsStatic* Statics; - friend class SystemToolsStatic; - friend class SystemToolsManager; -}; - -} // namespace @KWSYS_NAMESPACE@ - -#endif diff --git a/test/API/driver/kwsys/Terminal.c b/test/API/driver/kwsys/Terminal.c deleted file mode 100644 index 4dd246148c5..00000000000 --- a/test/API/driver/kwsys/Terminal.c +++ /dev/null @@ -1,414 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" -#include KWSYS_HEADER(Terminal.h) - -/* Work-around CMake dependency scanning limitation. This must - duplicate the above list of headers. */ -#if 0 -# include "Terminal.h.in" -#endif - -/* Configure support for this platform. */ -#if defined(_WIN32) || defined(__CYGWIN__) -# define KWSYS_TERMINAL_SUPPORT_CONSOLE -#endif -#if !defined(_WIN32) -# define KWSYS_TERMINAL_ISATTY_WORKS -#endif - -/* Include needed system APIs. */ - -#include /* va_list */ -#include /* getenv */ -#include /* strcmp */ - -#if defined(KWSYS_TERMINAL_SUPPORT_CONSOLE) -# include /* _get_osfhandle */ -# include /* SetConsoleTextAttribute */ -#endif - -#if defined(KWSYS_TERMINAL_ISATTY_WORKS) -# include /* isatty */ -#else -# include /* fstat */ -#endif - -static int kwsysTerminalStreamIsVT100(FILE* stream, int default_vt100, - int default_tty); -static void kwsysTerminalSetVT100Color(FILE* stream, int color); -#if defined(KWSYS_TERMINAL_SUPPORT_CONSOLE) -static HANDLE kwsysTerminalGetStreamHandle(FILE* stream); -static void kwsysTerminalSetConsoleColor(HANDLE hOut, - CONSOLE_SCREEN_BUFFER_INFO* hOutInfo, - FILE* stream, int color); -#endif - -void kwsysTerminal_cfprintf(int color, FILE* stream, const char* format, ...) -{ - /* Setup the stream with the given color if possible. */ - int pipeIsConsole = 0; - int pipeIsVT100 = 0; - int default_vt100 = color & kwsysTerminal_Color_AssumeVT100; - int default_tty = color & kwsysTerminal_Color_AssumeTTY; -#if defined(KWSYS_TERMINAL_SUPPORT_CONSOLE) - CONSOLE_SCREEN_BUFFER_INFO hOutInfo; - HANDLE hOut = kwsysTerminalGetStreamHandle(stream); - if (GetConsoleScreenBufferInfo(hOut, &hOutInfo)) { - pipeIsConsole = 1; - kwsysTerminalSetConsoleColor(hOut, &hOutInfo, stream, color); - } -#endif - if (!pipeIsConsole && - kwsysTerminalStreamIsVT100(stream, default_vt100, default_tty)) { - pipeIsVT100 = 1; - kwsysTerminalSetVT100Color(stream, color); - } - - /* Format the text into the stream. */ - { - va_list var_args; - va_start(var_args, format); - vfprintf(stream, format, var_args); - va_end(var_args); - } - -/* Restore the normal color state for the stream. */ -#if defined(KWSYS_TERMINAL_SUPPORT_CONSOLE) - if (pipeIsConsole) { - kwsysTerminalSetConsoleColor(hOut, &hOutInfo, stream, - kwsysTerminal_Color_Normal); - } -#endif - if (pipeIsVT100) { - kwsysTerminalSetVT100Color(stream, kwsysTerminal_Color_Normal); - } -} - -/* Detect cases when a stream is definitely not interactive. */ -#if !defined(KWSYS_TERMINAL_ISATTY_WORKS) -static int kwsysTerminalStreamIsNotInteractive(FILE* stream) -{ - /* The given stream is definitely not interactive if it is a regular - file. */ - struct stat stream_stat; - if (fstat(fileno(stream), &stream_stat) == 0) { - if (stream_stat.st_mode & S_IFREG) { - return 1; - } - } - return 0; -} -#endif - -/* List of terminal names known to support VT100 color escape sequences. */ -static const char* kwsysTerminalVT100Names[] = { "Eterm", - "alacritty", - "alacritty-direct", - "ansi", - "color-xterm", - "con132x25", - "con132x30", - "con132x43", - "con132x60", - "con80x25", - "con80x28", - "con80x30", - "con80x43", - "con80x50", - "con80x60", - "cons25", - "console", - "cygwin", - "dtterm", - "eterm-color", - "gnome", - "gnome-256color", - "konsole", - "konsole-256color", - "kterm", - "linux", - "msys", - "linux-c", - "mach-color", - "mlterm", - "putty", - "putty-256color", - "rxvt", - "rxvt-256color", - "rxvt-cygwin", - "rxvt-cygwin-native", - "rxvt-unicode", - "rxvt-unicode-256color", - "screen", - "screen-256color", - "screen-256color-bce", - "screen-bce", - "screen-w", - "screen.linux", - "tmux", - "tmux-256color", - "vt100", - "xterm", - "xterm-16color", - "xterm-256color", - "xterm-88color", - "xterm-color", - "xterm-debian", - "xterm-kitty", - "xterm-termite", - 0 }; - -/* Detect whether a stream is displayed in a VT100-compatible terminal. */ -static int kwsysTerminalStreamIsVT100(FILE* stream, int default_vt100, - int default_tty) -{ - /* Force color according to http://bixense.com/clicolors/ convention. */ - { - const char* clicolor_force = getenv("CLICOLOR_FORCE"); - if (clicolor_force && *clicolor_force && - strcmp(clicolor_force, "0") != 0) { - return 1; - } - } - - /* If running inside emacs the terminal is not VT100. Some emacs - seem to claim the TERM is xterm even though they do not support - VT100 escapes. */ - { - const char* emacs = getenv("EMACS"); - if (emacs && *emacs == 't') { - return 0; - } - } - - /* Check for a valid terminal. */ - if (!default_vt100) { - const char** t = 0; - const char* term = getenv("TERM"); - if (term) { - for (t = kwsysTerminalVT100Names; *t && strcmp(term, *t) != 0; ++t) { - } - } - if (!(t && *t)) { - return 0; - } - } - -#if defined(KWSYS_TERMINAL_ISATTY_WORKS) - /* Make sure the stream is a tty. */ - (void)default_tty; - return isatty(fileno(stream)) ? 1 : 0; -#else - /* Check for cases in which the stream is definitely not a tty. */ - if (kwsysTerminalStreamIsNotInteractive(stream)) { - return 0; - } - - /* Use the provided default for whether this is a tty. */ - return default_tty; -#endif -} - -/* VT100 escape sequence strings. */ -#if defined(__MVS__) -/* if building on z/OS (aka MVS), assume we are using EBCDIC */ -# define ESCAPE_CHAR "\47" -#else -# define ESCAPE_CHAR "\33" -#endif - -#define KWSYS_TERMINAL_VT100_NORMAL ESCAPE_CHAR "[0m" -#define KWSYS_TERMINAL_VT100_BOLD ESCAPE_CHAR "[1m" -#define KWSYS_TERMINAL_VT100_UNDERLINE ESCAPE_CHAR "[4m" -#define KWSYS_TERMINAL_VT100_BLINK ESCAPE_CHAR "[5m" -#define KWSYS_TERMINAL_VT100_INVERSE ESCAPE_CHAR "[7m" -#define KWSYS_TERMINAL_VT100_FOREGROUND_BLACK ESCAPE_CHAR "[30m" -#define KWSYS_TERMINAL_VT100_FOREGROUND_RED ESCAPE_CHAR "[31m" -#define KWSYS_TERMINAL_VT100_FOREGROUND_GREEN ESCAPE_CHAR "[32m" -#define KWSYS_TERMINAL_VT100_FOREGROUND_YELLOW ESCAPE_CHAR "[33m" -#define KWSYS_TERMINAL_VT100_FOREGROUND_BLUE ESCAPE_CHAR "[34m" -#define KWSYS_TERMINAL_VT100_FOREGROUND_MAGENTA ESCAPE_CHAR "[35m" -#define KWSYS_TERMINAL_VT100_FOREGROUND_CYAN ESCAPE_CHAR "[36m" -#define KWSYS_TERMINAL_VT100_FOREGROUND_WHITE ESCAPE_CHAR "[37m" -#define KWSYS_TERMINAL_VT100_BACKGROUND_BLACK ESCAPE_CHAR "[40m" -#define KWSYS_TERMINAL_VT100_BACKGROUND_RED ESCAPE_CHAR "[41m" -#define KWSYS_TERMINAL_VT100_BACKGROUND_GREEN ESCAPE_CHAR "[42m" -#define KWSYS_TERMINAL_VT100_BACKGROUND_YELLOW ESCAPE_CHAR "[43m" -#define KWSYS_TERMINAL_VT100_BACKGROUND_BLUE ESCAPE_CHAR "[44m" -#define KWSYS_TERMINAL_VT100_BACKGROUND_MAGENTA ESCAPE_CHAR "[45m" -#define KWSYS_TERMINAL_VT100_BACKGROUND_CYAN ESCAPE_CHAR "[46m" -#define KWSYS_TERMINAL_VT100_BACKGROUND_WHITE ESCAPE_CHAR "[47m" - -/* Write VT100 escape sequences to the stream for the given color. */ -static void kwsysTerminalSetVT100Color(FILE* stream, int color) -{ - if (color == kwsysTerminal_Color_Normal) { - fprintf(stream, KWSYS_TERMINAL_VT100_NORMAL); - return; - } - - switch (color & kwsysTerminal_Color_ForegroundMask) { - case kwsysTerminal_Color_Normal: - fprintf(stream, KWSYS_TERMINAL_VT100_NORMAL); - break; - case kwsysTerminal_Color_ForegroundBlack: - fprintf(stream, KWSYS_TERMINAL_VT100_FOREGROUND_BLACK); - break; - case kwsysTerminal_Color_ForegroundRed: - fprintf(stream, KWSYS_TERMINAL_VT100_FOREGROUND_RED); - break; - case kwsysTerminal_Color_ForegroundGreen: - fprintf(stream, KWSYS_TERMINAL_VT100_FOREGROUND_GREEN); - break; - case kwsysTerminal_Color_ForegroundYellow: - fprintf(stream, KWSYS_TERMINAL_VT100_FOREGROUND_YELLOW); - break; - case kwsysTerminal_Color_ForegroundBlue: - fprintf(stream, KWSYS_TERMINAL_VT100_FOREGROUND_BLUE); - break; - case kwsysTerminal_Color_ForegroundMagenta: - fprintf(stream, KWSYS_TERMINAL_VT100_FOREGROUND_MAGENTA); - break; - case kwsysTerminal_Color_ForegroundCyan: - fprintf(stream, KWSYS_TERMINAL_VT100_FOREGROUND_CYAN); - break; - case kwsysTerminal_Color_ForegroundWhite: - fprintf(stream, KWSYS_TERMINAL_VT100_FOREGROUND_WHITE); - break; - } - switch (color & kwsysTerminal_Color_BackgroundMask) { - case kwsysTerminal_Color_BackgroundBlack: - fprintf(stream, KWSYS_TERMINAL_VT100_BACKGROUND_BLACK); - break; - case kwsysTerminal_Color_BackgroundRed: - fprintf(stream, KWSYS_TERMINAL_VT100_BACKGROUND_RED); - break; - case kwsysTerminal_Color_BackgroundGreen: - fprintf(stream, KWSYS_TERMINAL_VT100_BACKGROUND_GREEN); - break; - case kwsysTerminal_Color_BackgroundYellow: - fprintf(stream, KWSYS_TERMINAL_VT100_BACKGROUND_YELLOW); - break; - case kwsysTerminal_Color_BackgroundBlue: - fprintf(stream, KWSYS_TERMINAL_VT100_BACKGROUND_BLUE); - break; - case kwsysTerminal_Color_BackgroundMagenta: - fprintf(stream, KWSYS_TERMINAL_VT100_BACKGROUND_MAGENTA); - break; - case kwsysTerminal_Color_BackgroundCyan: - fprintf(stream, KWSYS_TERMINAL_VT100_BACKGROUND_CYAN); - break; - case kwsysTerminal_Color_BackgroundWhite: - fprintf(stream, KWSYS_TERMINAL_VT100_BACKGROUND_WHITE); - break; - } - if (color & kwsysTerminal_Color_ForegroundBold) { - fprintf(stream, KWSYS_TERMINAL_VT100_BOLD); - } -} - -#if defined(KWSYS_TERMINAL_SUPPORT_CONSOLE) - -# define KWSYS_TERMINAL_MASK_FOREGROUND \ - (FOREGROUND_BLUE | FOREGROUND_GREEN | FOREGROUND_RED | \ - FOREGROUND_INTENSITY) -# define KWSYS_TERMINAL_MASK_BACKGROUND \ - (BACKGROUND_BLUE | BACKGROUND_GREEN | BACKGROUND_RED | \ - BACKGROUND_INTENSITY) - -/* Get the Windows handle for a FILE stream. */ -static HANDLE kwsysTerminalGetStreamHandle(FILE* stream) -{ - /* Get the C-library file descriptor from the stream. */ - int fd = fileno(stream); - -# if defined(__CYGWIN__) - /* Cygwin seems to have an extra pipe level. If the file descriptor - corresponds to stdout or stderr then obtain the matching windows - handle directly. */ - if (fd == fileno(stdout)) { - return GetStdHandle(STD_OUTPUT_HANDLE); - } else if (fd == fileno(stderr)) { - return GetStdHandle(STD_ERROR_HANDLE); - } -# endif - - /* Get the underlying Windows handle for the descriptor. */ - return (HANDLE)_get_osfhandle(fd); -} - -/* Set color attributes in a Windows console. */ -static void kwsysTerminalSetConsoleColor(HANDLE hOut, - CONSOLE_SCREEN_BUFFER_INFO* hOutInfo, - FILE* stream, int color) -{ - WORD attributes = 0; - switch (color & kwsysTerminal_Color_ForegroundMask) { - case kwsysTerminal_Color_Normal: - attributes |= hOutInfo->wAttributes & KWSYS_TERMINAL_MASK_FOREGROUND; - break; - case kwsysTerminal_Color_ForegroundBlack: - attributes |= 0; - break; - case kwsysTerminal_Color_ForegroundRed: - attributes |= FOREGROUND_RED; - break; - case kwsysTerminal_Color_ForegroundGreen: - attributes |= FOREGROUND_GREEN; - break; - case kwsysTerminal_Color_ForegroundYellow: - attributes |= FOREGROUND_RED | FOREGROUND_GREEN; - break; - case kwsysTerminal_Color_ForegroundBlue: - attributes |= FOREGROUND_BLUE; - break; - case kwsysTerminal_Color_ForegroundMagenta: - attributes |= FOREGROUND_RED | FOREGROUND_BLUE; - break; - case kwsysTerminal_Color_ForegroundCyan: - attributes |= FOREGROUND_BLUE | FOREGROUND_GREEN; - break; - case kwsysTerminal_Color_ForegroundWhite: - attributes |= FOREGROUND_BLUE | FOREGROUND_GREEN | FOREGROUND_RED; - break; - } - switch (color & kwsysTerminal_Color_BackgroundMask) { - case kwsysTerminal_Color_Normal: - attributes |= hOutInfo->wAttributes & KWSYS_TERMINAL_MASK_BACKGROUND; - break; - case kwsysTerminal_Color_BackgroundBlack: - attributes |= 0; - break; - case kwsysTerminal_Color_BackgroundRed: - attributes |= BACKGROUND_RED; - break; - case kwsysTerminal_Color_BackgroundGreen: - attributes |= BACKGROUND_GREEN; - break; - case kwsysTerminal_Color_BackgroundYellow: - attributes |= BACKGROUND_RED | BACKGROUND_GREEN; - break; - case kwsysTerminal_Color_BackgroundBlue: - attributes |= BACKGROUND_BLUE; - break; - case kwsysTerminal_Color_BackgroundMagenta: - attributes |= BACKGROUND_RED | BACKGROUND_BLUE; - break; - case kwsysTerminal_Color_BackgroundCyan: - attributes |= BACKGROUND_BLUE | BACKGROUND_GREEN; - break; - case kwsysTerminal_Color_BackgroundWhite: - attributes |= BACKGROUND_BLUE | BACKGROUND_GREEN | BACKGROUND_RED; - break; - } - if (color & kwsysTerminal_Color_ForegroundBold) { - attributes |= FOREGROUND_INTENSITY; - } - if (color & kwsysTerminal_Color_BackgroundBold) { - attributes |= BACKGROUND_INTENSITY; - } - fflush(stream); - SetConsoleTextAttribute(hOut, attributes); -} -#endif diff --git a/test/API/driver/kwsys/Terminal.h.in b/test/API/driver/kwsys/Terminal.h.in deleted file mode 100644 index 1a2c7452fa1..00000000000 --- a/test/API/driver/kwsys/Terminal.h.in +++ /dev/null @@ -1,170 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifndef @KWSYS_NAMESPACE@_Terminal_h -#define @KWSYS_NAMESPACE@_Terminal_h - -#include <@KWSYS_NAMESPACE@/Configure.h> - -#include /* For file stream type FILE. */ - -/* Redefine all public interface symbol names to be in the proper - namespace. These macros are used internally to kwsys only, and are - not visible to user code. Use kwsysHeaderDump.pl to reproduce - these macros after making changes to the interface. */ -#if !defined(KWSYS_NAMESPACE) -# define kwsys_ns(x) @KWSYS_NAMESPACE@##x -# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT -#endif -#if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS -# define kwsysTerminal_cfprintf kwsys_ns(Terminal_cfprintf) -# define kwsysTerminal_Color_e kwsys_ns(Terminal_Color_e) -# define kwsysTerminal_Color_Normal kwsys_ns(Terminal_Color_Normal) -# define kwsysTerminal_Color_ForegroundBlack \ - kwsys_ns(Terminal_Color_ForegroundBlack) -# define kwsysTerminal_Color_ForegroundRed \ - kwsys_ns(Terminal_Color_ForegroundRed) -# define kwsysTerminal_Color_ForegroundGreen \ - kwsys_ns(Terminal_Color_ForegroundGreen) -# define kwsysTerminal_Color_ForegroundYellow \ - kwsys_ns(Terminal_Color_ForegroundYellow) -# define kwsysTerminal_Color_ForegroundBlue \ - kwsys_ns(Terminal_Color_ForegroundBlue) -# define kwsysTerminal_Color_ForegroundMagenta \ - kwsys_ns(Terminal_Color_ForegroundMagenta) -# define kwsysTerminal_Color_ForegroundCyan \ - kwsys_ns(Terminal_Color_ForegroundCyan) -# define kwsysTerminal_Color_ForegroundWhite \ - kwsys_ns(Terminal_Color_ForegroundWhite) -# define kwsysTerminal_Color_ForegroundMask \ - kwsys_ns(Terminal_Color_ForegroundMask) -# define kwsysTerminal_Color_BackgroundBlack \ - kwsys_ns(Terminal_Color_BackgroundBlack) -# define kwsysTerminal_Color_BackgroundRed \ - kwsys_ns(Terminal_Color_BackgroundRed) -# define kwsysTerminal_Color_BackgroundGreen \ - kwsys_ns(Terminal_Color_BackgroundGreen) -# define kwsysTerminal_Color_BackgroundYellow \ - kwsys_ns(Terminal_Color_BackgroundYellow) -# define kwsysTerminal_Color_BackgroundBlue \ - kwsys_ns(Terminal_Color_BackgroundBlue) -# define kwsysTerminal_Color_BackgroundMagenta \ - kwsys_ns(Terminal_Color_BackgroundMagenta) -# define kwsysTerminal_Color_BackgroundCyan \ - kwsys_ns(Terminal_Color_BackgroundCyan) -# define kwsysTerminal_Color_BackgroundWhite \ - kwsys_ns(Terminal_Color_BackgroundWhite) -# define kwsysTerminal_Color_BackgroundMask \ - kwsys_ns(Terminal_Color_BackgroundMask) -# define kwsysTerminal_Color_ForegroundBold \ - kwsys_ns(Terminal_Color_ForegroundBold) -# define kwsysTerminal_Color_BackgroundBold \ - kwsys_ns(Terminal_Color_BackgroundBold) -# define kwsysTerminal_Color_AssumeTTY kwsys_ns(Terminal_Color_AssumeTTY) -# define kwsysTerminal_Color_AssumeVT100 kwsys_ns(Terminal_Color_AssumeVT100) -# define kwsysTerminal_Color_AttributeMask \ - kwsys_ns(Terminal_Color_AttributeMask) -#endif - -#if defined(__cplusplus) -extern "C" { -#endif - -/** - * Write colored and formatted text to a stream. Color is used only - * for streams supporting it. The color specification is constructed - * by bitwise-OR-ing enumeration values. At most one foreground and - * one background value may be given. - * - * Whether the a stream supports color is usually automatically - * detected, but with two exceptions: - * - * - When the stream is displayed in a terminal supporting VT100 - * color but using an intermediate pipe for communication the - * detection of a tty fails. (This typically occurs for a shell - * running in an rxvt terminal in MSYS.) If the caller knows this - * to be the case, the attribute Color_AssumeTTY may be included in - * the color specification. - * - * - When the stream is displayed in a terminal whose TERM - * environment variable is not set or is set to a value that is not - * known to support VT100 colors. If the caller knows this to be - * the case, the attribute Color_AssumeVT100 may be included in the - * color specification. - */ -kwsysEXPORT void kwsysTerminal_cfprintf(int color, FILE* stream, - const char* format, ...); -enum kwsysTerminal_Color_e -{ - /* Normal Text */ - kwsysTerminal_Color_Normal = 0, - - /* Foreground Color */ - kwsysTerminal_Color_ForegroundBlack = 0x1, - kwsysTerminal_Color_ForegroundRed = 0x2, - kwsysTerminal_Color_ForegroundGreen = 0x3, - kwsysTerminal_Color_ForegroundYellow = 0x4, - kwsysTerminal_Color_ForegroundBlue = 0x5, - kwsysTerminal_Color_ForegroundMagenta = 0x6, - kwsysTerminal_Color_ForegroundCyan = 0x7, - kwsysTerminal_Color_ForegroundWhite = 0x8, - kwsysTerminal_Color_ForegroundMask = 0xF, - - /* Background Color */ - kwsysTerminal_Color_BackgroundBlack = 0x10, - kwsysTerminal_Color_BackgroundRed = 0x20, - kwsysTerminal_Color_BackgroundGreen = 0x30, - kwsysTerminal_Color_BackgroundYellow = 0x40, - kwsysTerminal_Color_BackgroundBlue = 0x50, - kwsysTerminal_Color_BackgroundMagenta = 0x60, - kwsysTerminal_Color_BackgroundCyan = 0x70, - kwsysTerminal_Color_BackgroundWhite = 0x80, - kwsysTerminal_Color_BackgroundMask = 0xF0, - - /* Attributes */ - kwsysTerminal_Color_ForegroundBold = 0x100, - kwsysTerminal_Color_BackgroundBold = 0x200, - kwsysTerminal_Color_AssumeTTY = 0x400, - kwsysTerminal_Color_AssumeVT100 = 0x800, - kwsysTerminal_Color_AttributeMask = 0xF00 -}; - -#if defined(__cplusplus) -} /* extern "C" */ -#endif - -/* If we are building a kwsys .c or .cxx file, let it use these macros. - Otherwise, undefine them to keep the namespace clean. */ -#if !defined(KWSYS_NAMESPACE) -# undef kwsys_ns -# undef kwsysEXPORT -# if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS -# undef kwsysTerminal_cfprintf -# undef kwsysTerminal_Color_e -# undef kwsysTerminal_Color_Normal -# undef kwsysTerminal_Color_ForegroundBlack -# undef kwsysTerminal_Color_ForegroundRed -# undef kwsysTerminal_Color_ForegroundGreen -# undef kwsysTerminal_Color_ForegroundYellow -# undef kwsysTerminal_Color_ForegroundBlue -# undef kwsysTerminal_Color_ForegroundMagenta -# undef kwsysTerminal_Color_ForegroundCyan -# undef kwsysTerminal_Color_ForegroundWhite -# undef kwsysTerminal_Color_ForegroundMask -# undef kwsysTerminal_Color_BackgroundBlack -# undef kwsysTerminal_Color_BackgroundRed -# undef kwsysTerminal_Color_BackgroundGreen -# undef kwsysTerminal_Color_BackgroundYellow -# undef kwsysTerminal_Color_BackgroundBlue -# undef kwsysTerminal_Color_BackgroundMagenta -# undef kwsysTerminal_Color_BackgroundCyan -# undef kwsysTerminal_Color_BackgroundWhite -# undef kwsysTerminal_Color_BackgroundMask -# undef kwsysTerminal_Color_ForegroundBold -# undef kwsysTerminal_Color_BackgroundBold -# undef kwsysTerminal_Color_AssumeTTY -# undef kwsysTerminal_Color_AssumeVT100 -# undef kwsysTerminal_Color_AttributeMask -# endif -#endif - -#endif diff --git a/test/API/driver/kwsys/clang-format.bash b/test/API/driver/kwsys/clang-format.bash deleted file mode 100644 index b0282abc8aa..00000000000 --- a/test/API/driver/kwsys/clang-format.bash +++ /dev/null @@ -1,128 +0,0 @@ -#!/usr/bin/env bash -#============================================================================= -# Copyright 2015-2017 Kitware, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= - -usage='usage: clang-format.bash [] [--] - - --help Print usage plus more detailed help. - - --clang-format Use given clang-format tool. - - --amend Filter files changed by HEAD. - --cached Filter files locally staged for commit. - --modified Filter files locally modified from HEAD. - --tracked Filter files tracked by Git. -' - -help="$usage"' -Example to format locally modified files: - - ./clang-format.bash --modified - -Example to format locally modified files staged for commit: - - ./clang-format.bash --cached - -Example to format files modified by the most recent commit: - - ./clang-format.bash --amend - -Example to format all files: - - ./clang-format.bash --tracked - -Example to format the current topic: - - git filter-branch \ - --tree-filter "./clang-format.bash --tracked" \ - master.. -' - -die() { - echo "$@" 1>&2; exit 1 -} - -#----------------------------------------------------------------------------- - -# Parse command-line arguments. -clang_format='' -mode='' -while test "$#" != 0; do - case "$1" in - --amend) mode="amend" ;; - --cached) mode="cached" ;; - --clang-format) shift; clang_format="$1" ;; - --help) echo "$help"; exit 0 ;; - --modified) mode="modified" ;; - --tracked) mode="tracked" ;; - --) shift ; break ;; - -*) die "$usage" ;; - *) break ;; - esac - shift -done -test "$#" = 0 || die "$usage" - -# Find a default tool. -tools=' - clang-format-6.0 - clang-format -' -if test "x$clang_format" = "x"; then - for tool in $tools; do - if type -p "$tool" >/dev/null; then - clang_format="$tool" - break - fi - done -fi - -# Verify that we have a tool. -if ! type -p "$clang_format" >/dev/null; then - echo "Unable to locate a 'clang-format' tool." - exit 1 -fi - -if ! "$clang_format" --version | grep 'clang-format version 6\.0' >/dev/null 2>/dev/null; then - echo "clang-format version 6.0 is required (exactly)" - exit 1 -fi - -# Select listing mode. -case "$mode" in - '') echo "$usage"; exit 0 ;; - amend) git_ls='git diff-tree --diff-filter=AM --name-only HEAD -r --no-commit-id' ;; - cached) git_ls='git diff-index --diff-filter=AM --name-only HEAD --cached' ;; - modified) git_ls='git diff-index --diff-filter=AM --name-only HEAD' ;; - tracked) git_ls='git ls-files' ;; - *) die "invalid mode: $mode" ;; -esac - -# List files as selected above. -list_files() { - $git_ls | - - # Select sources with our attribute. - git check-attr --stdin format.clang-format | - sed -n '/: format\.clang-format: set$/ {s/:[^:]*:[^:]*$//p}' -} - -# Transform configured sources to protect @SYMBOLS@. -list_files | xargs -d '\n' sed -i 's/@\(KWSYS_[A-Z0-9_]\+\)@/x\1x/g' -# Update sources in-place. -list_files | xargs -d '\n' "$clang_format" -i -# Transform configured sources to restore @SYMBOLS@. -list_files | xargs -d '\n' sed -i 's/x\(KWSYS_[A-Z0-9_]\+\)x/@\1@/g' diff --git a/test/API/driver/kwsys/hash_fun.hxx.in b/test/API/driver/kwsys/hash_fun.hxx.in deleted file mode 100644 index 8626c2aa2a1..00000000000 --- a/test/API/driver/kwsys/hash_fun.hxx.in +++ /dev/null @@ -1,166 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -/* - * Copyright (c) 1996 - * Silicon Graphics Computer Systems, Inc. - * - * Permission to use, copy, modify, distribute and sell this software - * and its documentation for any purpose is hereby granted without fee, - * provided that the above copyright notice appear in all copies and - * that both that copyright notice and this permission notice appear - * in supporting documentation. Silicon Graphics makes no - * representations about the suitability of this software for any - * purpose. It is provided "as is" without express or implied warranty. - * - * - * Copyright (c) 1994 - * Hewlett-Packard Company - * - * Permission to use, copy, modify, distribute and sell this software - * and its documentation for any purpose is hereby granted without fee, - * provided that the above copyright notice appear in all copies and - * that both that copyright notice and this permission notice appear - * in supporting documentation. Hewlett-Packard Company makes no - * representations about the suitability of this software for any - * purpose. It is provided "as is" without express or implied warranty. - * - */ -#ifndef @KWSYS_NAMESPACE@_hash_fun_hxx -#define @KWSYS_NAMESPACE@_hash_fun_hxx - -#include <@KWSYS_NAMESPACE@/Configure.hxx> - -#include // size_t -#include - -namespace @KWSYS_NAMESPACE@ { - -template -struct hash -{ -}; - -inline size_t _stl_hash_string(const char* __s) -{ - unsigned long __h = 0; - for (; *__s; ++__s) - __h = 5 * __h + *__s; - - return size_t(__h); -} - -template <> -struct hash -{ - size_t operator()(const char* __s) const { return _stl_hash_string(__s); } -}; - -template <> -struct hash -{ - size_t operator()(const char* __s) const { return _stl_hash_string(__s); } -}; - -template <> -struct hash -{ - size_t operator()(const std::string& __s) const - { - return _stl_hash_string(__s.c_str()); - } -}; - -#if !defined(__BORLANDC__) -template <> -struct hash -{ - size_t operator()(const std::string& __s) const - { - return _stl_hash_string(__s.c_str()); - } -}; -#endif - -template <> -struct hash -{ - size_t operator()(char __x) const { return __x; } -}; - -template <> -struct hash -{ - size_t operator()(unsigned char __x) const { return __x; } -}; - -template <> -struct hash -{ - size_t operator()(unsigned char __x) const { return __x; } -}; - -template <> -struct hash -{ - size_t operator()(short __x) const { return __x; } -}; - -template <> -struct hash -{ - size_t operator()(unsigned short __x) const { return __x; } -}; - -template <> -struct hash -{ - size_t operator()(int __x) const { return __x; } -}; - -template <> -struct hash -{ - size_t operator()(unsigned int __x) const { return __x; } -}; - -template <> -struct hash -{ - size_t operator()(long __x) const { return __x; } -}; - -template <> -struct hash -{ - size_t operator()(unsigned long __x) const { return __x; } -}; - -// use long long or __int64 -#if @KWSYS_USE_LONG_LONG@ -template <> -struct hash -{ - size_t operator()(long long __x) const { return __x; } -}; - -template <> -struct hash -{ - size_t operator()(unsigned long long __x) const { return __x; } -}; -#elif @KWSYS_USE___INT64@ -template <> -struct hash<__int64> -{ - size_t operator()(__int64 __x) const { return __x; } -}; -template <> -struct hash -{ - size_t operator()(unsigned __int64 __x) const { return __x; } -}; -#endif // use long long or __int64 - -} // namespace @KWSYS_NAMESPACE@ - -#endif diff --git a/test/API/driver/kwsys/hash_map.hxx.in b/test/API/driver/kwsys/hash_map.hxx.in deleted file mode 100644 index 5f04e9c1680..00000000000 --- a/test/API/driver/kwsys/hash_map.hxx.in +++ /dev/null @@ -1,423 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -/* - * Copyright (c) 1996 - * Silicon Graphics Computer Systems, Inc. - * - * Permission to use, copy, modify, distribute and sell this software - * and its documentation for any purpose is hereby granted without fee, - * provided that the above copyright notice appear in all copies and - * that both that copyright notice and this permission notice appear - * in supporting documentation. Silicon Graphics makes no - * representations about the suitability of this software for any - * purpose. It is provided "as is" without express or implied warranty. - * - * - * Copyright (c) 1994 - * Hewlett-Packard Company - * - * Permission to use, copy, modify, distribute and sell this software - * and its documentation for any purpose is hereby granted without fee, - * provided that the above copyright notice appear in all copies and - * that both that copyright notice and this permission notice appear - * in supporting documentation. Hewlett-Packard Company makes no - * representations about the suitability of this software for any - * purpose. It is provided "as is" without express or implied warranty. - * - */ -#ifndef @KWSYS_NAMESPACE@_hash_map_hxx -#define @KWSYS_NAMESPACE@_hash_map_hxx - -#include <@KWSYS_NAMESPACE@/hashtable.hxx> - -#include <@KWSYS_NAMESPACE@/hash_fun.hxx> - -#include // equal_to - -#if defined(_MSC_VER) -# pragma warning(push) -# pragma warning(disable : 4284) -# pragma warning(disable : 4786) -#endif - -#if defined(__sgi) && !defined(__GNUC__) && (_MIPS_SIM != _MIPS_SIM_ABI32) -# pragma set woff 1174 -# pragma set woff 1375 -#endif - -namespace @KWSYS_NAMESPACE@ { - -// select1st is an extension: it is not part of the standard. -template -struct hash_select1st -{ - const T1& operator()(const std::pair& __x) const - { - return __x.first; - } -}; - -// Forward declaration of equality operator; needed for friend declaration. - -template , - class _EqualKey = std::equal_to<_Key>, - class _Alloc = std::allocator > -class hash_map; - -template -bool operator==(const hash_map<_Key, _Tp, _HashFn, _EqKey, _Alloc>&, - const hash_map<_Key, _Tp, _HashFn, _EqKey, _Alloc>&); - -template -class hash_map -{ -private: - typedef hashtable, _Key, _HashFcn, - hash_select1st, _EqualKey, _Alloc> - _Ht; - _Ht _M_ht; - -public: - typedef typename _Ht::key_type key_type; - typedef _Tp data_type; - typedef _Tp mapped_type; - typedef typename _Ht::value_type value_type; - typedef typename _Ht::hasher hasher; - typedef typename _Ht::key_equal key_equal; - - typedef typename _Ht::size_type size_type; - typedef typename _Ht::difference_type difference_type; - typedef typename _Ht::pointer pointer; - typedef typename _Ht::const_pointer const_pointer; - typedef typename _Ht::reference reference; - typedef typename _Ht::const_reference const_reference; - - typedef typename _Ht::iterator iterator; - typedef typename _Ht::const_iterator const_iterator; - - typedef typename _Ht::allocator_type allocator_type; - - hasher hash_funct() const { return _M_ht.hash_funct(); } - key_equal key_eq() const { return _M_ht.key_eq(); } - allocator_type get_allocator() const { return _M_ht.get_allocator(); } - -public: - hash_map() - : _M_ht(100, hasher(), key_equal(), allocator_type()) - { - } - explicit hash_map(size_type __n) - : _M_ht(__n, hasher(), key_equal(), allocator_type()) - { - } - hash_map(size_type __n, const hasher& __hf) - : _M_ht(__n, __hf, key_equal(), allocator_type()) - { - } - hash_map(size_type __n, const hasher& __hf, const key_equal& __eql, - const allocator_type& __a = allocator_type()) - : _M_ht(__n, __hf, __eql, __a) - { - } - - template - hash_map(_InputIterator __f, _InputIterator __l) - : _M_ht(100, hasher(), key_equal(), allocator_type()) - { - _M_ht.insert_unique(__f, __l); - } - template - hash_map(_InputIterator __f, _InputIterator __l, size_type __n) - : _M_ht(__n, hasher(), key_equal(), allocator_type()) - { - _M_ht.insert_unique(__f, __l); - } - template - hash_map(_InputIterator __f, _InputIterator __l, size_type __n, - const hasher& __hf) - : _M_ht(__n, __hf, key_equal(), allocator_type()) - { - _M_ht.insert_unique(__f, __l); - } - template - hash_map(_InputIterator __f, _InputIterator __l, size_type __n, - const hasher& __hf, const key_equal& __eql, - const allocator_type& __a = allocator_type()) - : _M_ht(__n, __hf, __eql, __a) - { - _M_ht.insert_unique(__f, __l); - } - -public: - size_type size() const { return _M_ht.size(); } - size_type max_size() const { return _M_ht.max_size(); } - bool empty() const { return _M_ht.empty(); } - void swap(hash_map& __hs) { _M_ht.swap(__hs._M_ht); } - - friend bool operator==<>(const hash_map&, const hash_map&); - - iterator begin() { return _M_ht.begin(); } - iterator end() { return _M_ht.end(); } - const_iterator begin() const { return _M_ht.begin(); } - const_iterator end() const { return _M_ht.end(); } - -public: - std::pair insert(const value_type& __obj) - { - return _M_ht.insert_unique(__obj); - } - template - void insert(_InputIterator __f, _InputIterator __l) - { - _M_ht.insert_unique(__f, __l); - } - std::pair insert_noresize(const value_type& __obj) - { - return _M_ht.insert_unique_noresize(__obj); - } - - iterator find(const key_type& __key) { return _M_ht.find(__key); } - const_iterator find(const key_type& __key) const - { - return _M_ht.find(__key); - } - - _Tp& operator[](const key_type& __key) - { - return _M_ht.find_or_insert(value_type(__key, _Tp())).second; - } - - size_type count(const key_type& __key) const { return _M_ht.count(__key); } - - std::pair equal_range(const key_type& __key) - { - return _M_ht.equal_range(__key); - } - std::pair equal_range( - const key_type& __key) const - { - return _M_ht.equal_range(__key); - } - - size_type erase(const key_type& __key) { return _M_ht.erase(__key); } - void erase(iterator __it) { _M_ht.erase(__it); } - void erase(iterator __f, iterator __l) { _M_ht.erase(__f, __l); } - void clear() { _M_ht.clear(); } - - void resize(size_type __hint) { _M_ht.resize(__hint); } - size_type bucket_count() const { return _M_ht.bucket_count(); } - size_type max_bucket_count() const { return _M_ht.max_bucket_count(); } - size_type elems_in_bucket(size_type __n) const - { - return _M_ht.elems_in_bucket(__n); - } -}; - -template -bool operator==(const hash_map<_Key, _Tp, _HashFcn, _EqlKey, _Alloc>& __hm1, - const hash_map<_Key, _Tp, _HashFcn, _EqlKey, _Alloc>& __hm2) -{ - return __hm1._M_ht == __hm2._M_ht; -} - -template -inline bool operator!=( - const hash_map<_Key, _Tp, _HashFcn, _EqlKey, _Alloc>& __hm1, - const hash_map<_Key, _Tp, _HashFcn, _EqlKey, _Alloc>& __hm2) -{ - return !(__hm1 == __hm2); -} - -template -inline void swap(hash_map<_Key, _Tp, _HashFcn, _EqlKey, _Alloc>& __hm1, - hash_map<_Key, _Tp, _HashFcn, _EqlKey, _Alloc>& __hm2) -{ - __hm1.swap(__hm2); -} - -// Forward declaration of equality operator; needed for friend declaration. - -template , - class _EqualKey = std::equal_to<_Key>, - class _Alloc = std::allocator > -class hash_multimap; - -template -bool operator==(const hash_multimap<_Key, _Tp, _HF, _EqKey, _Alloc>& __hm1, - const hash_multimap<_Key, _Tp, _HF, _EqKey, _Alloc>& __hm2); - -template -class hash_multimap -{ -private: - typedef hashtable, _Key, _HashFcn, - hash_select1st, _EqualKey, _Alloc> - _Ht; - _Ht _M_ht; - -public: - typedef typename _Ht::key_type key_type; - typedef _Tp data_type; - typedef _Tp mapped_type; - typedef typename _Ht::value_type value_type; - typedef typename _Ht::hasher hasher; - typedef typename _Ht::key_equal key_equal; - - typedef typename _Ht::size_type size_type; - typedef typename _Ht::difference_type difference_type; - typedef typename _Ht::pointer pointer; - typedef typename _Ht::const_pointer const_pointer; - typedef typename _Ht::reference reference; - typedef typename _Ht::const_reference const_reference; - - typedef typename _Ht::iterator iterator; - typedef typename _Ht::const_iterator const_iterator; - - typedef typename _Ht::allocator_type allocator_type; - - hasher hash_funct() const { return _M_ht.hash_funct(); } - key_equal key_eq() const { return _M_ht.key_eq(); } - allocator_type get_allocator() const { return _M_ht.get_allocator(); } - -public: - hash_multimap() - : _M_ht(100, hasher(), key_equal(), allocator_type()) - { - } - explicit hash_multimap(size_type __n) - : _M_ht(__n, hasher(), key_equal(), allocator_type()) - { - } - hash_multimap(size_type __n, const hasher& __hf) - : _M_ht(__n, __hf, key_equal(), allocator_type()) - { - } - hash_multimap(size_type __n, const hasher& __hf, const key_equal& __eql, - const allocator_type& __a = allocator_type()) - : _M_ht(__n, __hf, __eql, __a) - { - } - - template - hash_multimap(_InputIterator __f, _InputIterator __l) - : _M_ht(100, hasher(), key_equal(), allocator_type()) - { - _M_ht.insert_equal(__f, __l); - } - template - hash_multimap(_InputIterator __f, _InputIterator __l, size_type __n) - : _M_ht(__n, hasher(), key_equal(), allocator_type()) - { - _M_ht.insert_equal(__f, __l); - } - template - hash_multimap(_InputIterator __f, _InputIterator __l, size_type __n, - const hasher& __hf) - : _M_ht(__n, __hf, key_equal(), allocator_type()) - { - _M_ht.insert_equal(__f, __l); - } - template - hash_multimap(_InputIterator __f, _InputIterator __l, size_type __n, - const hasher& __hf, const key_equal& __eql, - const allocator_type& __a = allocator_type()) - : _M_ht(__n, __hf, __eql, __a) - { - _M_ht.insert_equal(__f, __l); - } - -public: - size_type size() const { return _M_ht.size(); } - size_type max_size() const { return _M_ht.max_size(); } - bool empty() const { return _M_ht.empty(); } - void swap(hash_multimap& __hs) { _M_ht.swap(__hs._M_ht); } - - friend bool operator==<>(const hash_multimap&, const hash_multimap&); - - iterator begin() { return _M_ht.begin(); } - iterator end() { return _M_ht.end(); } - const_iterator begin() const { return _M_ht.begin(); } - const_iterator end() const { return _M_ht.end(); } - -public: - iterator insert(const value_type& __obj) - { - return _M_ht.insert_equal(__obj); - } - template - void insert(_InputIterator __f, _InputIterator __l) - { - _M_ht.insert_equal(__f, __l); - } - iterator insert_noresize(const value_type& __obj) - { - return _M_ht.insert_equal_noresize(__obj); - } - - iterator find(const key_type& __key) { return _M_ht.find(__key); } - const_iterator find(const key_type& __key) const - { - return _M_ht.find(__key); - } - - size_type count(const key_type& __key) const { return _M_ht.count(__key); } - - std::pair equal_range(const key_type& __key) - { - return _M_ht.equal_range(__key); - } - std::pair equal_range( - const key_type& __key) const - { - return _M_ht.equal_range(__key); - } - - size_type erase(const key_type& __key) { return _M_ht.erase(__key); } - void erase(iterator __it) { _M_ht.erase(__it); } - void erase(iterator __f, iterator __l) { _M_ht.erase(__f, __l); } - void clear() { _M_ht.clear(); } - -public: - void resize(size_type __hint) { _M_ht.resize(__hint); } - size_type bucket_count() const { return _M_ht.bucket_count(); } - size_type max_bucket_count() const { return _M_ht.max_bucket_count(); } - size_type elems_in_bucket(size_type __n) const - { - return _M_ht.elems_in_bucket(__n); - } -}; - -template -bool operator==(const hash_multimap<_Key, _Tp, _HF, _EqKey, _Alloc>& __hm1, - const hash_multimap<_Key, _Tp, _HF, _EqKey, _Alloc>& __hm2) -{ - return __hm1._M_ht == __hm2._M_ht; -} - -template -inline bool operator!=( - const hash_multimap<_Key, _Tp, _HF, _EqKey, _Alloc>& __hm1, - const hash_multimap<_Key, _Tp, _HF, _EqKey, _Alloc>& __hm2) -{ - return !(__hm1 == __hm2); -} - -template -inline void swap(hash_multimap<_Key, _Tp, _HashFcn, _EqlKey, _Alloc>& __hm1, - hash_multimap<_Key, _Tp, _HashFcn, _EqlKey, _Alloc>& __hm2) -{ - __hm1.swap(__hm2); -} - -} // namespace @KWSYS_NAMESPACE@ - -#if defined(__sgi) && !defined(__GNUC__) && (_MIPS_SIM != _MIPS_SIM_ABI32) -# pragma reset woff 1174 -# pragma reset woff 1375 -#endif - -#if defined(_MSC_VER) -# pragma warning(pop) -#endif - -#endif diff --git a/test/API/driver/kwsys/hash_set.hxx.in b/test/API/driver/kwsys/hash_set.hxx.in deleted file mode 100644 index f4a37eebdb3..00000000000 --- a/test/API/driver/kwsys/hash_set.hxx.in +++ /dev/null @@ -1,392 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -/* - * Copyright (c) 1996 - * Silicon Graphics Computer Systems, Inc. - * - * Permission to use, copy, modify, distribute and sell this software - * and its documentation for any purpose is hereby granted without fee, - * provided that the above copyright notice appear in all copies and - * that both that copyright notice and this permission notice appear - * in supporting documentation. Silicon Graphics makes no - * representations about the suitability of this software for any - * purpose. It is provided "as is" without express or implied warranty. - * - * - * Copyright (c) 1994 - * Hewlett-Packard Company - * - * Permission to use, copy, modify, distribute and sell this software - * and its documentation for any purpose is hereby granted without fee, - * provided that the above copyright notice appear in all copies and - * that both that copyright notice and this permission notice appear - * in supporting documentation. Hewlett-Packard Company makes no - * representations about the suitability of this software for any - * purpose. It is provided "as is" without express or implied warranty. - * - */ -#ifndef @KWSYS_NAMESPACE@_hash_set_hxx -#define @KWSYS_NAMESPACE@_hash_set_hxx - -#include <@KWSYS_NAMESPACE@/hashtable.hxx> - -#include <@KWSYS_NAMESPACE@/hash_fun.hxx> - -#include // equal_to - -#if defined(_MSC_VER) -# pragma warning(push) -# pragma warning(disable : 4284) -# pragma warning(disable : 4786) -#endif - -#if defined(__sgi) && !defined(__GNUC__) && (_MIPS_SIM != _MIPS_SIM_ABI32) -# pragma set woff 1174 -# pragma set woff 1375 -#endif - -namespace @KWSYS_NAMESPACE@ { - -// identity is an extension: it is not part of the standard. -template -struct _Identity -{ - const _Tp& operator()(const _Tp& __x) const { return __x; } -}; - -// Forward declaration of equality operator; needed for friend declaration. - -template , - class _EqualKey = std::equal_to<_Value>, - class _Alloc = std::allocator > -class hash_set; - -template -bool operator==(const hash_set<_Value, _HashFcn, _EqualKey, _Alloc>& __hs1, - const hash_set<_Value, _HashFcn, _EqualKey, _Alloc>& __hs2); - -template -class hash_set -{ -private: - typedef hashtable<_Value, _Value, _HashFcn, _Identity<_Value>, _EqualKey, - _Alloc> - _Ht; - _Ht _M_ht; - -public: - typedef typename _Ht::key_type key_type; - typedef typename _Ht::value_type value_type; - typedef typename _Ht::hasher hasher; - typedef typename _Ht::key_equal key_equal; - - typedef typename _Ht::size_type size_type; - typedef typename _Ht::difference_type difference_type; - typedef typename _Ht::const_pointer pointer; - typedef typename _Ht::const_pointer const_pointer; - typedef typename _Ht::const_reference reference; - typedef typename _Ht::const_reference const_reference; - - typedef typename _Ht::const_iterator iterator; - typedef typename _Ht::const_iterator const_iterator; - - typedef typename _Ht::allocator_type allocator_type; - - hasher hash_funct() const { return _M_ht.hash_funct(); } - key_equal key_eq() const { return _M_ht.key_eq(); } - allocator_type get_allocator() const { return _M_ht.get_allocator(); } - -public: - hash_set() - : _M_ht(100, hasher(), key_equal(), allocator_type()) - { - } - explicit hash_set(size_type __n) - : _M_ht(__n, hasher(), key_equal(), allocator_type()) - { - } - hash_set(size_type __n, const hasher& __hf) - : _M_ht(__n, __hf, key_equal(), allocator_type()) - { - } - hash_set(size_type __n, const hasher& __hf, const key_equal& __eql, - const allocator_type& __a = allocator_type()) - : _M_ht(__n, __hf, __eql, __a) - { - } - - template - hash_set(_InputIterator __f, _InputIterator __l) - : _M_ht(100, hasher(), key_equal(), allocator_type()) - { - _M_ht.insert_unique(__f, __l); - } - template - hash_set(_InputIterator __f, _InputIterator __l, size_type __n) - : _M_ht(__n, hasher(), key_equal(), allocator_type()) - { - _M_ht.insert_unique(__f, __l); - } - template - hash_set(_InputIterator __f, _InputIterator __l, size_type __n, - const hasher& __hf) - : _M_ht(__n, __hf, key_equal(), allocator_type()) - { - _M_ht.insert_unique(__f, __l); - } - template - hash_set(_InputIterator __f, _InputIterator __l, size_type __n, - const hasher& __hf, const key_equal& __eql, - const allocator_type& __a = allocator_type()) - : _M_ht(__n, __hf, __eql, __a) - { - _M_ht.insert_unique(__f, __l); - } - -public: - size_type size() const { return _M_ht.size(); } - size_type max_size() const { return _M_ht.max_size(); } - bool empty() const { return _M_ht.empty(); } - void swap(hash_set& __hs) { _M_ht.swap(__hs._M_ht); } - - friend bool operator==<>(const hash_set&, const hash_set&); - - iterator begin() const { return _M_ht.begin(); } - iterator end() const { return _M_ht.end(); } - -public: - std::pair insert(const value_type& __obj) - { - typedef typename _Ht::iterator _Ht_iterator; - std::pair<_Ht_iterator, bool> __p = _M_ht.insert_unique(__obj); - return std::pair(__p.first, __p.second); - } - template - void insert(_InputIterator __f, _InputIterator __l) - { - _M_ht.insert_unique(__f, __l); - } - std::pair insert_noresize(const value_type& __obj) - { - typedef typename _Ht::iterator _Ht_iterator; - std::pair<_Ht_iterator, bool> __p = _M_ht.insert_unique_noresize(__obj); - return std::pair(__p.first, __p.second); - } - - iterator find(const key_type& __key) const { return _M_ht.find(__key); } - - size_type count(const key_type& __key) const { return _M_ht.count(__key); } - - std::pair equal_range(const key_type& __key) const - { - return _M_ht.equal_range(__key); - } - - size_type erase(const key_type& __key) { return _M_ht.erase(__key); } - void erase(iterator __it) { _M_ht.erase(__it); } - void erase(iterator __f, iterator __l) { _M_ht.erase(__f, __l); } - void clear() { _M_ht.clear(); } - -public: - void resize(size_type __hint) { _M_ht.resize(__hint); } - size_type bucket_count() const { return _M_ht.bucket_count(); } - size_type max_bucket_count() const { return _M_ht.max_bucket_count(); } - size_type elems_in_bucket(size_type __n) const - { - return _M_ht.elems_in_bucket(__n); - } -}; - -template -bool operator==(const hash_set<_Value, _HashFcn, _EqualKey, _Alloc>& __hs1, - const hash_set<_Value, _HashFcn, _EqualKey, _Alloc>& __hs2) -{ - return __hs1._M_ht == __hs2._M_ht; -} - -template -inline bool operator!=( - const hash_set<_Value, _HashFcn, _EqualKey, _Alloc>& __hs1, - const hash_set<_Value, _HashFcn, _EqualKey, _Alloc>& __hs2) -{ - return !(__hs1 == __hs2); -} - -template -inline void swap(hash_set<_Val, _HashFcn, _EqualKey, _Alloc>& __hs1, - hash_set<_Val, _HashFcn, _EqualKey, _Alloc>& __hs2) -{ - __hs1.swap(__hs2); -} - -template , - class _EqualKey = std::equal_to<_Value>, - class _Alloc = std::allocator > -class hash_multiset; - -template -bool operator==(const hash_multiset<_Val, _HashFcn, _EqualKey, _Alloc>& __hs1, - const hash_multiset<_Val, _HashFcn, _EqualKey, _Alloc>& __hs2); - -template -class hash_multiset -{ -private: - typedef hashtable<_Value, _Value, _HashFcn, _Identity<_Value>, _EqualKey, - _Alloc> - _Ht; - _Ht _M_ht; - -public: - typedef typename _Ht::key_type key_type; - typedef typename _Ht::value_type value_type; - typedef typename _Ht::hasher hasher; - typedef typename _Ht::key_equal key_equal; - - typedef typename _Ht::size_type size_type; - typedef typename _Ht::difference_type difference_type; - typedef typename _Ht::const_pointer pointer; - typedef typename _Ht::const_pointer const_pointer; - typedef typename _Ht::const_reference reference; - typedef typename _Ht::const_reference const_reference; - - typedef typename _Ht::const_iterator iterator; - typedef typename _Ht::const_iterator const_iterator; - - typedef typename _Ht::allocator_type allocator_type; - - hasher hash_funct() const { return _M_ht.hash_funct(); } - key_equal key_eq() const { return _M_ht.key_eq(); } - allocator_type get_allocator() const { return _M_ht.get_allocator(); } - -public: - hash_multiset() - : _M_ht(100, hasher(), key_equal(), allocator_type()) - { - } - explicit hash_multiset(size_type __n) - : _M_ht(__n, hasher(), key_equal(), allocator_type()) - { - } - hash_multiset(size_type __n, const hasher& __hf) - : _M_ht(__n, __hf, key_equal(), allocator_type()) - { - } - hash_multiset(size_type __n, const hasher& __hf, const key_equal& __eql, - const allocator_type& __a = allocator_type()) - : _M_ht(__n, __hf, __eql, __a) - { - } - - template - hash_multiset(_InputIterator __f, _InputIterator __l) - : _M_ht(100, hasher(), key_equal(), allocator_type()) - { - _M_ht.insert_equal(__f, __l); - } - template - hash_multiset(_InputIterator __f, _InputIterator __l, size_type __n) - : _M_ht(__n, hasher(), key_equal(), allocator_type()) - { - _M_ht.insert_equal(__f, __l); - } - template - hash_multiset(_InputIterator __f, _InputIterator __l, size_type __n, - const hasher& __hf) - : _M_ht(__n, __hf, key_equal(), allocator_type()) - { - _M_ht.insert_equal(__f, __l); - } - template - hash_multiset(_InputIterator __f, _InputIterator __l, size_type __n, - const hasher& __hf, const key_equal& __eql, - const allocator_type& __a = allocator_type()) - : _M_ht(__n, __hf, __eql, __a) - { - _M_ht.insert_equal(__f, __l); - } - -public: - size_type size() const { return _M_ht.size(); } - size_type max_size() const { return _M_ht.max_size(); } - bool empty() const { return _M_ht.empty(); } - void swap(hash_multiset& hs) { _M_ht.swap(hs._M_ht); } - - friend bool operator==<>(const hash_multiset&, const hash_multiset&); - - iterator begin() const { return _M_ht.begin(); } - iterator end() const { return _M_ht.end(); } - -public: - iterator insert(const value_type& __obj) - { - return _M_ht.insert_equal(__obj); - } - template - void insert(_InputIterator __f, _InputIterator __l) - { - _M_ht.insert_equal(__f, __l); - } - iterator insert_noresize(const value_type& __obj) - { - return _M_ht.insert_equal_noresize(__obj); - } - - iterator find(const key_type& __key) const { return _M_ht.find(__key); } - - size_type count(const key_type& __key) const { return _M_ht.count(__key); } - - std::pair equal_range(const key_type& __key) const - { - return _M_ht.equal_range(__key); - } - - size_type erase(const key_type& __key) { return _M_ht.erase(__key); } - void erase(iterator __it) { _M_ht.erase(__it); } - void erase(iterator __f, iterator __l) { _M_ht.erase(__f, __l); } - void clear() { _M_ht.clear(); } - -public: - void resize(size_type __hint) { _M_ht.resize(__hint); } - size_type bucket_count() const { return _M_ht.bucket_count(); } - size_type max_bucket_count() const { return _M_ht.max_bucket_count(); } - size_type elems_in_bucket(size_type __n) const - { - return _M_ht.elems_in_bucket(__n); - } -}; - -template -bool operator==(const hash_multiset<_Val, _HashFcn, _EqualKey, _Alloc>& __hs1, - const hash_multiset<_Val, _HashFcn, _EqualKey, _Alloc>& __hs2) -{ - return __hs1._M_ht == __hs2._M_ht; -} - -template -inline bool operator!=( - const hash_multiset<_Val, _HashFcn, _EqualKey, _Alloc>& __hs1, - const hash_multiset<_Val, _HashFcn, _EqualKey, _Alloc>& __hs2) -{ - return !(__hs1 == __hs2); -} - -template -inline void swap(hash_multiset<_Val, _HashFcn, _EqualKey, _Alloc>& __hs1, - hash_multiset<_Val, _HashFcn, _EqualKey, _Alloc>& __hs2) -{ - __hs1.swap(__hs2); -} - -} // namespace @KWSYS_NAMESPACE@ - -#if defined(__sgi) && !defined(__GNUC__) && (_MIPS_SIM != _MIPS_SIM_ABI32) -# pragma reset woff 1174 -# pragma reset woff 1375 -#endif - -#if defined(_MSC_VER) -# pragma warning(pop) -#endif - -#endif diff --git a/test/API/driver/kwsys/hashtable.hxx.in b/test/API/driver/kwsys/hashtable.hxx.in deleted file mode 100644 index 8c4b0025f53..00000000000 --- a/test/API/driver/kwsys/hashtable.hxx.in +++ /dev/null @@ -1,995 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -/* - * Copyright (c) 1996 - * Silicon Graphics Computer Systems, Inc. - * - * Permission to use, copy, modify, distribute and sell this software - * and its documentation for any purpose is hereby granted without fee, - * provided that the above copyright notice appear in all copies and - * that both that copyright notice and this permission notice appear - * in supporting documentation. Silicon Graphics makes no - * representations about the suitability of this software for any - * purpose. It is provided "as is" without express or implied warranty. - * - * - * Copyright (c) 1994 - * Hewlett-Packard Company - * - * Permission to use, copy, modify, distribute and sell this software - * and its documentation for any purpose is hereby granted without fee, - * provided that the above copyright notice appear in all copies and - * that both that copyright notice and this permission notice appear - * in supporting documentation. Hewlett-Packard Company makes no - * representations about the suitability of this software for any - * purpose. It is provided "as is" without express or implied warranty. - * - */ -#ifdef __BORLANDC__ -# pragma warn - 8027 /* 'for' not inlined. */ -# pragma warn - 8026 /* 'exception' not inlined. */ -#endif - -#ifndef @KWSYS_NAMESPACE@_hashtable_hxx -# define @KWSYS_NAMESPACE@_hashtable_hxx - -# include <@KWSYS_NAMESPACE@/Configure.hxx> - -# include // lower_bound -# include // iterator_traits -# include // allocator -# include // size_t -# include // pair -# include // vector - -# if defined(_MSC_VER) -# pragma warning(push) -# pragma warning(disable : 4284) -# pragma warning(disable : 4786) -# pragma warning(disable : 4512) /* no assignment operator for class */ -# endif -# if defined(__sgi) && !defined(__GNUC__) -# pragma set woff 3970 /* pointer to int conversion */ 3321 3968 -# endif - -// In C++11, clang will warn about using dynamic exception specifications -// as they are deprecated. But as this class is trying to faithfully -// mimic unordered_set and unordered_map, we want to keep the 'throw()' -// decorations below. So we suppress the warning. -# if defined(__clang__) && defined(__has_warning) -# if __has_warning("-Wdeprecated") -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wdeprecated" -# endif -# endif - -namespace @KWSYS_NAMESPACE@ { - -template -struct _Hashtable_node -{ - _Hashtable_node* _M_next; - _Val _M_val; - void public_method_to_quiet_warning_about_all_methods_private(); - -private: - void operator=(_Hashtable_node<_Val> const&) = delete; -}; - -template > -class hashtable; - -template -struct _Hashtable_iterator; - -template -struct _Hashtable_const_iterator; - -template -struct _Hashtable_iterator -{ - typedef hashtable<_Val, _Key, _HashFcn, _ExtractKey, _EqualKey, _Alloc> - _Hashtable; - typedef _Hashtable_iterator<_Val, _Key, _HashFcn, _ExtractKey, _EqualKey, - _Alloc> - iterator; - typedef _Hashtable_const_iterator<_Val, _Key, _HashFcn, _ExtractKey, - _EqualKey, _Alloc> - const_iterator; - typedef _Hashtable_node<_Val> _Node; - - typedef std::forward_iterator_tag iterator_category; - typedef _Val value_type; - typedef ptrdiff_t difference_type; - typedef size_t size_type; - typedef _Val& reference; - typedef _Val* pointer; - - _Node* _M_cur; - _Hashtable* _M_ht; - - _Hashtable_iterator(_Node* __n, _Hashtable* __tab) - : _M_cur(__n) - , _M_ht(__tab) - { - } - _Hashtable_iterator() {} - reference operator*() const { return _M_cur->_M_val; } - pointer operator->() const { return &(operator*()); } - iterator& operator++(); - iterator operator++(int); - bool operator==(const iterator& __it) const { return _M_cur == __it._M_cur; } - bool operator!=(const iterator& __it) const { return _M_cur != __it._M_cur; } -}; - -template -struct _Hashtable_const_iterator -{ - typedef hashtable<_Val, _Key, _HashFcn, _ExtractKey, _EqualKey, _Alloc> - _Hashtable; - typedef _Hashtable_iterator<_Val, _Key, _HashFcn, _ExtractKey, _EqualKey, - _Alloc> - iterator; - typedef _Hashtable_const_iterator<_Val, _Key, _HashFcn, _ExtractKey, - _EqualKey, _Alloc> - const_iterator; - typedef _Hashtable_node<_Val> _Node; - - typedef std::forward_iterator_tag iterator_category; - typedef _Val value_type; - typedef ptrdiff_t difference_type; - typedef size_t size_type; - typedef const _Val& reference; - typedef const _Val* pointer; - - const _Node* _M_cur; - const _Hashtable* _M_ht; - - _Hashtable_const_iterator(const _Node* __n, const _Hashtable* __tab) - : _M_cur(__n) - , _M_ht(__tab) - { - } - _Hashtable_const_iterator() {} - _Hashtable_const_iterator(const iterator& __it) - : _M_cur(__it._M_cur) - , _M_ht(__it._M_ht) - { - } - reference operator*() const { return _M_cur->_M_val; } - pointer operator->() const { return &(operator*()); } - const_iterator& operator++(); - const_iterator operator++(int); - bool operator==(const const_iterator& __it) const - { - return _M_cur == __it._M_cur; - } - bool operator!=(const const_iterator& __it) const - { - return _M_cur != __it._M_cur; - } -}; - -// Note: assumes long is at least 32 bits. -enum -{ - _stl_num_primes = 31 -}; - -// create a function with a static local to that function that returns -// the static -static inline const unsigned long* get_stl_prime_list() -{ - - static const unsigned long _stl_prime_list[_stl_num_primes] = { - 5ul, 11ul, 23ul, 53ul, 97ul, - 193ul, 389ul, 769ul, 1543ul, 3079ul, - 6151ul, 12289ul, 24593ul, 49157ul, 98317ul, - 196613ul, 393241ul, 786433ul, 1572869ul, 3145739ul, - 6291469ul, 12582917ul, 25165843ul, 50331653ul, 100663319ul, - 201326611ul, 402653189ul, 805306457ul, 1610612741ul, 3221225473ul, - 4294967291ul - }; - - return &_stl_prime_list[0]; -} - -static inline size_t _stl_next_prime(size_t __n) -{ - const unsigned long* __first = get_stl_prime_list(); - const unsigned long* __last = get_stl_prime_list() + (int)_stl_num_primes; - const unsigned long* pos = std::lower_bound(__first, __last, __n); - return pos == __last ? *(__last - 1) : *pos; -} - -// Forward declaration of operator==. - -template -class hashtable; - -template -bool operator==(const hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>& __ht1, - const hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>& __ht2); - -// Hashtables handle allocators a bit differently than other containers -// do. If we're using standard-conforming allocators, then a hashtable -// unconditionally has a member variable to hold its allocator, even if -// it so happens that all instances of the allocator type are identical. -// This is because, for hashtables, this extra storage is negligible. -// Additionally, a base class wouldn't serve any other purposes; it -// wouldn't, for example, simplify the exception-handling code. - -template -class hashtable -{ -public: - typedef _Key key_type; - typedef _Val value_type; - typedef _HashFcn hasher; - typedef _EqualKey key_equal; - - typedef size_t size_type; - typedef ptrdiff_t difference_type; - typedef value_type* pointer; - typedef const value_type* const_pointer; - typedef value_type& reference; - typedef const value_type& const_reference; - - hasher hash_funct() const { return _M_hash; } - key_equal key_eq() const { return _M_equals; } - -private: - typedef _Hashtable_node<_Val> _Node; - -public: - typedef typename _Alloc::template rebind<_Val>::other allocator_type; - allocator_type get_allocator() const { return _M_node_allocator; } - -private: - typedef - typename _Alloc::template rebind<_Node>::other _M_node_allocator_type; - typedef - typename _Alloc::template rebind<_Node*>::other _M_node_ptr_allocator_type; - typedef std::vector<_Node*, _M_node_ptr_allocator_type> _M_buckets_type; - -private: - _M_node_allocator_type _M_node_allocator; - hasher _M_hash; - key_equal _M_equals; - _ExtractKey _M_get_key; - _M_buckets_type _M_buckets; - size_type _M_num_elements; - - _Node* _M_get_node() { return _M_node_allocator.allocate(1); } - void _M_put_node(_Node* __p) { _M_node_allocator.deallocate(__p, 1); } - -public: - typedef _Hashtable_iterator<_Val, _Key, _HashFcn, _ExtractKey, _EqualKey, - _Alloc> - iterator; - typedef _Hashtable_const_iterator<_Val, _Key, _HashFcn, _ExtractKey, - _EqualKey, _Alloc> - const_iterator; - - friend struct _Hashtable_iterator<_Val, _Key, _HashFcn, _ExtractKey, - _EqualKey, _Alloc>; - friend struct _Hashtable_const_iterator<_Val, _Key, _HashFcn, _ExtractKey, - _EqualKey, _Alloc>; - -public: - hashtable(size_type __n, const _HashFcn& __hf, const _EqualKey& __eql, - const _ExtractKey& __ext, - const allocator_type& __a = allocator_type()) - : _M_node_allocator(__a) - , _M_hash(__hf) - , _M_equals(__eql) - , _M_get_key(__ext) - , _M_buckets(__a) - , _M_num_elements(0) - { - _M_initialize_buckets(__n); - } - - hashtable(size_type __n, const _HashFcn& __hf, const _EqualKey& __eql, - const allocator_type& __a = allocator_type()) - : _M_node_allocator(__a) - , _M_hash(__hf) - , _M_equals(__eql) - , _M_get_key(_ExtractKey()) - , _M_buckets(__a) - , _M_num_elements(0) - { - _M_initialize_buckets(__n); - } - - hashtable(const hashtable& __ht) - : _M_node_allocator(__ht.get_allocator()) - , _M_hash(__ht._M_hash) - , _M_equals(__ht._M_equals) - , _M_get_key(__ht._M_get_key) - , _M_buckets(__ht.get_allocator()) - , _M_num_elements(0) - { - _M_copy_from(__ht); - } - - hashtable& operator=(const hashtable& __ht) - { - if (&__ht != this) { - clear(); - _M_hash = __ht._M_hash; - _M_equals = __ht._M_equals; - _M_get_key = __ht._M_get_key; - _M_copy_from(__ht); - } - return *this; - } - - ~hashtable() { clear(); } - - size_type size() const { return _M_num_elements; } - size_type max_size() const { return size_type(-1); } - bool empty() const { return size() == 0; } - - void swap(hashtable& __ht) - { - std::swap(_M_hash, __ht._M_hash); - std::swap(_M_equals, __ht._M_equals); - std::swap(_M_get_key, __ht._M_get_key); - _M_buckets.swap(__ht._M_buckets); - std::swap(_M_num_elements, __ht._M_num_elements); - } - - iterator begin() - { - for (size_type __n = 0; __n < _M_buckets.size(); ++__n) - if (_M_buckets[__n]) - return iterator(_M_buckets[__n], this); - return end(); - } - - iterator end() { return iterator(nullptr, this); } - - const_iterator begin() const - { - for (size_type __n = 0; __n < _M_buckets.size(); ++__n) - if (_M_buckets[__n]) - return const_iterator(_M_buckets[__n], this); - return end(); - } - - const_iterator end() const { return const_iterator(nullptr, this); } - - friend bool operator==<>(const hashtable&, const hashtable&); - -public: - size_type bucket_count() const { return _M_buckets.size(); } - - size_type max_bucket_count() const - { - return get_stl_prime_list()[(int)_stl_num_primes - 1]; - } - - size_type elems_in_bucket(size_type __bucket) const - { - size_type __result = 0; - for (_Node* __cur = _M_buckets[__bucket]; __cur; __cur = __cur->_M_next) - __result += 1; - return __result; - } - - std::pair insert_unique(const value_type& __obj) - { - resize(_M_num_elements + 1); - return insert_unique_noresize(__obj); - } - - iterator insert_equal(const value_type& __obj) - { - resize(_M_num_elements + 1); - return insert_equal_noresize(__obj); - } - - std::pair insert_unique_noresize(const value_type& __obj); - iterator insert_equal_noresize(const value_type& __obj); - - template - void insert_unique(_InputIterator __f, _InputIterator __l) - { - insert_unique( - __f, __l, - typename std::iterator_traits<_InputIterator>::iterator_category()); - } - - template - void insert_equal(_InputIterator __f, _InputIterator __l) - { - insert_equal( - __f, __l, - typename std::iterator_traits<_InputIterator>::iterator_category()); - } - - template - void insert_unique(_InputIterator __f, _InputIterator __l, - std::input_iterator_tag) - { - for (; __f != __l; ++__f) - insert_unique(*__f); - } - - template - void insert_equal(_InputIterator __f, _InputIterator __l, - std::input_iterator_tag) - { - for (; __f != __l; ++__f) - insert_equal(*__f); - } - - template - void insert_unique(_ForwardIterator __f, _ForwardIterator __l, - std::forward_iterator_tag) - { - size_type __n = 0; - std::distance(__f, __l, __n); - resize(_M_num_elements + __n); - for (; __n > 0; --__n, ++__f) - insert_unique_noresize(*__f); - } - - template - void insert_equal(_ForwardIterator __f, _ForwardIterator __l, - std::forward_iterator_tag) - { - size_type __n = 0; - std::distance(__f, __l, __n); - resize(_M_num_elements + __n); - for (; __n > 0; --__n, ++__f) - insert_equal_noresize(*__f); - } - - reference find_or_insert(const value_type& __obj); - - iterator find(const key_type& __key) - { - size_type __n = _M_bkt_num_key(__key); - _Node* __first; - for (__first = _M_buckets[__n]; - __first && !_M_equals(_M_get_key(__first->_M_val), __key); - __first = __first->_M_next) { - } - return iterator(__first, this); - } - - const_iterator find(const key_type& __key) const - { - size_type __n = _M_bkt_num_key(__key); - const _Node* __first; - for (__first = _M_buckets[__n]; - __first && !_M_equals(_M_get_key(__first->_M_val), __key); - __first = __first->_M_next) { - } - return const_iterator(__first, this); - } - - size_type count(const key_type& __key) const - { - const size_type __n = _M_bkt_num_key(__key); - size_type __result = 0; - - for (const _Node* __cur = _M_buckets[__n]; __cur; __cur = __cur->_M_next) - if (_M_equals(_M_get_key(__cur->_M_val), __key)) - ++__result; - return __result; - } - - std::pair equal_range(const key_type& __key); - - std::pair equal_range( - const key_type& __key) const; - - size_type erase(const key_type& __key); - void erase(const iterator& __it); - void erase(iterator __first, iterator __last); - - void erase(const const_iterator& __it); - void erase(const_iterator __first, const_iterator __last); - - void resize(size_type __num_elements_hint); - void clear(); - -private: - size_type _M_next_size(size_type __n) const { return _stl_next_prime(__n); } - - void _M_initialize_buckets(size_type __n) - { - const size_type __n_buckets = _M_next_size(__n); - _M_buckets.reserve(__n_buckets); - _M_buckets.insert(_M_buckets.end(), __n_buckets, (_Node*)nullptr); - _M_num_elements = 0; - } - - size_type _M_bkt_num_key(const key_type& __key) const - { - return _M_bkt_num_key(__key, _M_buckets.size()); - } - - size_type _M_bkt_num(const value_type& __obj) const - { - return _M_bkt_num_key(_M_get_key(__obj)); - } - - size_type _M_bkt_num_key(const key_type& __key, size_t __n) const - { - return _M_hash(__key) % __n; - } - - size_type _M_bkt_num(const value_type& __obj, size_t __n) const - { - return _M_bkt_num_key(_M_get_key(__obj), __n); - } - - void construct(_Val* p, const _Val& v) { new (p) _Val(v); } - void destroy(_Val* p) - { - (void)p; - p->~_Val(); - } - - _Node* _M_new_node(const value_type& __obj) - { - _Node* __n = _M_get_node(); - __n->_M_next = nullptr; - try { - construct(&__n->_M_val, __obj); - return __n; - } catch (...) { - _M_put_node(__n); - throw; - } - } - - void _M_delete_node(_Node* __n) - { - destroy(&__n->_M_val); - _M_put_node(__n); - } - - void _M_erase_bucket(const size_type __n, _Node* __first, _Node* __last); - void _M_erase_bucket(const size_type __n, _Node* __last); - - void _M_copy_from(const hashtable& __ht); -}; - -template -_Hashtable_iterator<_Val, _Key, _HF, _ExK, _EqK, _All>& -_Hashtable_iterator<_Val, _Key, _HF, _ExK, _EqK, _All>::operator++() -{ - const _Node* __old = _M_cur; - _M_cur = _M_cur->_M_next; - if (!_M_cur) { - size_type __bucket = _M_ht->_M_bkt_num(__old->_M_val); - while (!_M_cur && ++__bucket < _M_ht->_M_buckets.size()) - _M_cur = _M_ht->_M_buckets[__bucket]; - } - return *this; -} - -template -inline _Hashtable_iterator<_Val, _Key, _HF, _ExK, _EqK, _All> -_Hashtable_iterator<_Val, _Key, _HF, _ExK, _EqK, _All>::operator++(int) -{ - iterator __tmp = *this; - ++*this; - return __tmp; -} - -template -_Hashtable_const_iterator<_Val, _Key, _HF, _ExK, _EqK, _All>& -_Hashtable_const_iterator<_Val, _Key, _HF, _ExK, _EqK, _All>::operator++() -{ - const _Node* __old = _M_cur; - _M_cur = _M_cur->_M_next; - if (!_M_cur) { - size_type __bucket = _M_ht->_M_bkt_num(__old->_M_val); - while (!_M_cur && ++__bucket < _M_ht->_M_buckets.size()) - _M_cur = _M_ht->_M_buckets[__bucket]; - } - return *this; -} - -template -inline _Hashtable_const_iterator<_Val, _Key, _HF, _ExK, _EqK, _All> -_Hashtable_const_iterator<_Val, _Key, _HF, _ExK, _EqK, _All>::operator++(int) -{ - const_iterator __tmp = *this; - ++*this; - return __tmp; -} - -template -bool operator==(const hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>& __ht1, - const hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>& __ht2) -{ - typedef typename hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::_Node _Node; - if (__ht1._M_buckets.size() != __ht2._M_buckets.size()) - return false; - for (int __n = 0; __n < __ht1._M_buckets.size(); ++__n) { - _Node* __cur1 = __ht1._M_buckets[__n]; - _Node* __cur2 = __ht2._M_buckets[__n]; - for (; __cur1 && __cur2 && __cur1->_M_val == __cur2->_M_val; - __cur1 = __cur1->_M_next, __cur2 = __cur2->_M_next) { - } - if (__cur1 || __cur2) - return false; - } - return true; -} - -template -inline bool operator!=(const hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>& __ht1, - const hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>& __ht2) -{ - return !(__ht1 == __ht2); -} - -template -inline void swap(hashtable<_Val, _Key, _HF, _Extract, _EqKey, _All>& __ht1, - hashtable<_Val, _Key, _HF, _Extract, _EqKey, _All>& __ht2) -{ - __ht1.swap(__ht2); -} - -template -std::pair::iterator, bool> -hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::insert_unique_noresize( - const value_type& __obj) -{ - const size_type __n = _M_bkt_num(__obj); - _Node* __first = _M_buckets[__n]; - - for (_Node* __cur = __first; __cur; __cur = __cur->_M_next) - if (_M_equals(_M_get_key(__cur->_M_val), _M_get_key(__obj))) - return std::pair(iterator(__cur, this), false); - - _Node* __tmp = _M_new_node(__obj); - __tmp->_M_next = __first; - _M_buckets[__n] = __tmp; - ++_M_num_elements; - return std::pair(iterator(__tmp, this), true); -} - -template -typename hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::iterator -hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::insert_equal_noresize( - const value_type& __obj) -{ - const size_type __n = _M_bkt_num(__obj); - _Node* __first = _M_buckets[__n]; - - for (_Node* __cur = __first; __cur; __cur = __cur->_M_next) - if (_M_equals(_M_get_key(__cur->_M_val), _M_get_key(__obj))) { - _Node* __tmp = _M_new_node(__obj); - __tmp->_M_next = __cur->_M_next; - __cur->_M_next = __tmp; - ++_M_num_elements; - return iterator(__tmp, this); - } - - _Node* __tmp = _M_new_node(__obj); - __tmp->_M_next = __first; - _M_buckets[__n] = __tmp; - ++_M_num_elements; - return iterator(__tmp, this); -} - -template -typename hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::reference -hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::find_or_insert( - const value_type& __obj) -{ - resize(_M_num_elements + 1); - - size_type __n = _M_bkt_num(__obj); - _Node* __first = _M_buckets[__n]; - - for (_Node* __cur = __first; __cur; __cur = __cur->_M_next) - if (_M_equals(_M_get_key(__cur->_M_val), _M_get_key(__obj))) - return __cur->_M_val; - - _Node* __tmp = _M_new_node(__obj); - __tmp->_M_next = __first; - _M_buckets[__n] = __tmp; - ++_M_num_elements; - return __tmp->_M_val; -} - -template -std::pair::iterator, - typename hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::iterator> -hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::equal_range(const key_type& __key) -{ - typedef std::pair _Pii; - const size_type __n = _M_bkt_num_key(__key); - - for (_Node* __first = _M_buckets[__n]; __first; __first = __first->_M_next) - if (_M_equals(_M_get_key(__first->_M_val), __key)) { - for (_Node* __cur = __first->_M_next; __cur; __cur = __cur->_M_next) - if (!_M_equals(_M_get_key(__cur->_M_val), __key)) - return _Pii(iterator(__first, this), iterator(__cur, this)); - for (size_type __m = __n + 1; __m < _M_buckets.size(); ++__m) - if (_M_buckets[__m]) - return _Pii(iterator(__first, this), - iterator(_M_buckets[__m], this)); - return _Pii(iterator(__first, this), end()); - } - return _Pii(end(), end()); -} - -template -std::pair::const_iterator, - typename hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::const_iterator> -hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::equal_range( - const key_type& __key) const -{ - typedef std::pair _Pii; - const size_type __n = _M_bkt_num_key(__key); - - for (const _Node* __first = _M_buckets[__n]; __first; - __first = __first->_M_next) { - if (_M_equals(_M_get_key(__first->_M_val), __key)) { - for (const _Node* __cur = __first->_M_next; __cur; - __cur = __cur->_M_next) - if (!_M_equals(_M_get_key(__cur->_M_val), __key)) - return _Pii(const_iterator(__first, this), - const_iterator(__cur, this)); - for (size_type __m = __n + 1; __m < _M_buckets.size(); ++__m) - if (_M_buckets[__m]) - return _Pii(const_iterator(__first, this), - const_iterator(_M_buckets[__m], this)); - return _Pii(const_iterator(__first, this), end()); - } - } - return _Pii(end(), end()); -} - -template -typename hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::size_type -hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::erase(const key_type& __key) -{ - const size_type __n = _M_bkt_num_key(__key); - _Node* __first = _M_buckets[__n]; - size_type __erased = 0; - - if (__first) { - _Node* __cur = __first; - _Node* __next = __cur->_M_next; - while (__next) { - if (_M_equals(_M_get_key(__next->_M_val), __key)) { - __cur->_M_next = __next->_M_next; - _M_delete_node(__next); - __next = __cur->_M_next; - ++__erased; - --_M_num_elements; - } else { - __cur = __next; - __next = __cur->_M_next; - } - } - if (_M_equals(_M_get_key(__first->_M_val), __key)) { - _M_buckets[__n] = __first->_M_next; - _M_delete_node(__first); - ++__erased; - --_M_num_elements; - } - } - return __erased; -} - -template -void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::erase(const iterator& __it) -{ - _Node* __p = __it._M_cur; - if (__p) { - const size_type __n = _M_bkt_num(__p->_M_val); - _Node* __cur = _M_buckets[__n]; - - if (__cur == __p) { - _M_buckets[__n] = __cur->_M_next; - _M_delete_node(__cur); - --_M_num_elements; - } else { - _Node* __next = __cur->_M_next; - while (__next) { - if (__next == __p) { - __cur->_M_next = __next->_M_next; - _M_delete_node(__next); - --_M_num_elements; - break; - } else { - __cur = __next; - __next = __cur->_M_next; - } - } - } - } -} - -template -void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::erase(iterator __first, - iterator __last) -{ - size_type __f_bucket = - __first._M_cur ? _M_bkt_num(__first._M_cur->_M_val) : _M_buckets.size(); - size_type __l_bucket = - __last._M_cur ? _M_bkt_num(__last._M_cur->_M_val) : _M_buckets.size(); - - if (__first._M_cur == __last._M_cur) - return; - else if (__f_bucket == __l_bucket) - _M_erase_bucket(__f_bucket, __first._M_cur, __last._M_cur); - else { - _M_erase_bucket(__f_bucket, __first._M_cur, nullptr); - for (size_type __n = __f_bucket + 1; __n < __l_bucket; ++__n) - _M_erase_bucket(__n, nullptr); - if (__l_bucket != _M_buckets.size()) - _M_erase_bucket(__l_bucket, __last._M_cur); - } -} - -template -inline void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::erase( - const_iterator __first, const_iterator __last) -{ - erase(iterator(const_cast<_Node*>(__first._M_cur), - const_cast(__first._M_ht)), - iterator(const_cast<_Node*>(__last._M_cur), - const_cast(__last._M_ht))); -} - -template -inline void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::erase( - const const_iterator& __it) -{ - erase(iterator(const_cast<_Node*>(__it._M_cur), - const_cast(__it._M_ht))); -} - -template -void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::resize( - size_type __num_elements_hint) -{ - const size_type __old_n = _M_buckets.size(); - if (__num_elements_hint > __old_n) { - const size_type __n = _M_next_size(__num_elements_hint); - if (__n > __old_n) { - _M_buckets_type __tmp(__n, (_Node*)(nullptr), - _M_buckets.get_allocator()); - try { - for (size_type __bucket = 0; __bucket < __old_n; ++__bucket) { - _Node* __first = _M_buckets[__bucket]; - while (__first) { - size_type __new_bucket = _M_bkt_num(__first->_M_val, __n); - _M_buckets[__bucket] = __first->_M_next; - __first->_M_next = __tmp[__new_bucket]; - __tmp[__new_bucket] = __first; - __first = _M_buckets[__bucket]; - } - } - _M_buckets.swap(__tmp); - } catch (...) { - for (size_type __bucket = 0; __bucket < __tmp.size(); ++__bucket) { - while (__tmp[__bucket]) { - _Node* __next = __tmp[__bucket]->_M_next; - _M_delete_node(__tmp[__bucket]); - __tmp[__bucket] = __next; - } - } - throw; - } - } - } -} - -template -void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::_M_erase_bucket( - const size_type __n, _Node* __first, _Node* __last) -{ - _Node* __cur = _M_buckets[__n]; - if (__cur == __first) - _M_erase_bucket(__n, __last); - else { - _Node* __next; - for (__next = __cur->_M_next; __next != __first; - __cur = __next, __next = __cur->_M_next) - ; - while (__next != __last) { - __cur->_M_next = __next->_M_next; - _M_delete_node(__next); - __next = __cur->_M_next; - --_M_num_elements; - } - } -} - -template -void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::_M_erase_bucket( - const size_type __n, _Node* __last) -{ - _Node* __cur = _M_buckets[__n]; - while (__cur != __last) { - _Node* __next = __cur->_M_next; - _M_delete_node(__cur); - __cur = __next; - _M_buckets[__n] = __cur; - --_M_num_elements; - } -} - -template -void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::clear() -{ - for (size_type __i = 0; __i < _M_buckets.size(); ++__i) { - _Node* __cur = _M_buckets[__i]; - while (__cur != nullptr) { - _Node* __next = __cur->_M_next; - _M_delete_node(__cur); - __cur = __next; - } - _M_buckets[__i] = nullptr; - } - _M_num_elements = 0; -} - -template -void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::_M_copy_from( - const hashtable& __ht) -{ - _M_buckets.clear(); - _M_buckets.reserve(__ht._M_buckets.size()); - _M_buckets.insert(_M_buckets.end(), __ht._M_buckets.size(), (_Node*)nullptr); - try { - for (size_type __i = 0; __i < __ht._M_buckets.size(); ++__i) { - const _Node* __cur = __ht._M_buckets[__i]; - if (__cur) { - _Node* __copy = _M_new_node(__cur->_M_val); - _M_buckets[__i] = __copy; - - for (_Node* __next = __cur->_M_next; __next; - __cur = __next, __next = __cur->_M_next) { - __copy->_M_next = _M_new_node(__next->_M_val); - __copy = __copy->_M_next; - } - } - } - _M_num_elements = __ht._M_num_elements; - } catch (...) { - clear(); - throw; - } -} - -} // namespace @KWSYS_NAMESPACE@ - -// Undo warning suppression. -# if defined(__clang__) && defined(__has_warning) -# if __has_warning("-Wdeprecated") -# pragma clang diagnostic pop -# endif -# endif - -# if defined(_MSC_VER) -# pragma warning(pop) -# endif - -#endif diff --git a/test/API/driver/kwsys/kwsysHeaderDump.pl b/test/API/driver/kwsys/kwsysHeaderDump.pl deleted file mode 100644 index e3391e76232..00000000000 --- a/test/API/driver/kwsys/kwsysHeaderDump.pl +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/perl -# Distributed under the OSI-approved BSD 3-Clause License. See accompanying -# file Copyright.txt or https://cmake.org/licensing#kwsys for details. - -if ( $#ARGV+1 < 2 ) -{ - print "Usage: ./kwsysHeaderDump.pl
    \n"; - exit(1); -} - -$name = $ARGV[0]; -$max = 0; -open(INFILE, $ARGV[1]); -while (chomp ($line = )) -{ - if (($line !~ /^\#/) && - ($line =~ s/.*kwsys${name}_([A-Za-z0-9_]*).*/\1/) && - ($i{$line}++ < 1)) - { - push(@lines, "$line"); - if (length($line) > $max) - { - $max = length($line); - } - } -} -close(INFILE); - -$width = $max + 13; -print sprintf("#define %-${width}s kwsys_ns(${name})\n", "kwsys${name}"); -foreach $l (@lines) -{ - print sprintf("#define %-${width}s kwsys_ns(${name}_$l)\n", - "kwsys${name}_$l"); -} -print "\n"; -print sprintf("# undef kwsys${name}\n"); -foreach $l (@lines) -{ - print sprintf("# undef kwsys${name}_$l\n"); -} diff --git a/test/API/driver/kwsys/kwsysPlatformTests.cmake b/test/API/driver/kwsys/kwsysPlatformTests.cmake deleted file mode 100644 index 28d3f68e252..00000000000 --- a/test/API/driver/kwsys/kwsysPlatformTests.cmake +++ /dev/null @@ -1,216 +0,0 @@ -# Distributed under the OSI-approved BSD 3-Clause License. See accompanying -# file Copyright.txt or https://cmake.org/licensing#kwsys for details. - -SET(KWSYS_PLATFORM_TEST_FILE_C kwsysPlatformTestsC.c) -SET(KWSYS_PLATFORM_TEST_FILE_CXX kwsysPlatformTestsCXX.cxx) - -MACRO(KWSYS_PLATFORM_TEST lang var description invert) - IF(NOT DEFINED ${var}_COMPILED) - MESSAGE(STATUS "${description}") - set(maybe_cxx_standard "") - if(CMAKE_VERSION VERSION_LESS 3.8 AND CMAKE_CXX_STANDARD) - set(maybe_cxx_standard "-DCMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD}") - endif() - TRY_COMPILE(${var}_COMPILED - ${CMAKE_CURRENT_BINARY_DIR} - ${CMAKE_CURRENT_SOURCE_DIR}/${KWSYS_PLATFORM_TEST_FILE_${lang}} - COMPILE_DEFINITIONS -DTEST_${var} ${KWSYS_PLATFORM_TEST_DEFINES} ${KWSYS_PLATFORM_TEST_EXTRA_FLAGS} - CMAKE_FLAGS "-DLINK_LIBRARIES:STRING=${KWSYS_PLATFORM_TEST_LINK_LIBRARIES}" - ${maybe_cxx_standard} - OUTPUT_VARIABLE OUTPUT) - IF(${var}_COMPILED) - FILE(APPEND - ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log - "${description} compiled with the following output:\n${OUTPUT}\n\n") - ELSE() - FILE(APPEND - ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log - "${description} failed to compile with the following output:\n${OUTPUT}\n\n") - ENDIF() - IF(${invert} MATCHES INVERT) - IF(${var}_COMPILED) - MESSAGE(STATUS "${description} - no") - ELSE() - MESSAGE(STATUS "${description} - yes") - ENDIF() - ELSE() - IF(${var}_COMPILED) - MESSAGE(STATUS "${description} - yes") - ELSE() - MESSAGE(STATUS "${description} - no") - ENDIF() - ENDIF() - ENDIF() - IF(${invert} MATCHES INVERT) - IF(${var}_COMPILED) - SET(${var} 0) - ELSE() - SET(${var} 1) - ENDIF() - ELSE() - IF(${var}_COMPILED) - SET(${var} 1) - ELSE() - SET(${var} 0) - ENDIF() - ENDIF() -ENDMACRO() - -MACRO(KWSYS_PLATFORM_TEST_RUN lang var description invert) - IF(NOT DEFINED ${var}) - MESSAGE(STATUS "${description}") - TRY_RUN(${var} ${var}_COMPILED - ${CMAKE_CURRENT_BINARY_DIR} - ${CMAKE_CURRENT_SOURCE_DIR}/${KWSYS_PLATFORM_TEST_FILE_${lang}} - COMPILE_DEFINITIONS -DTEST_${var} ${KWSYS_PLATFORM_TEST_DEFINES} ${KWSYS_PLATFORM_TEST_EXTRA_FLAGS} - OUTPUT_VARIABLE OUTPUT) - - # Note that ${var} will be a 0 return value on success. - IF(${var}_COMPILED) - IF(${var}) - FILE(APPEND - ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log - "${description} compiled but failed to run with the following output:\n${OUTPUT}\n\n") - ELSE() - FILE(APPEND - ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log - "${description} compiled and ran with the following output:\n${OUTPUT}\n\n") - ENDIF() - ELSE() - FILE(APPEND - ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log - "${description} failed to compile with the following output:\n${OUTPUT}\n\n") - SET(${var} -1 CACHE INTERNAL "${description} failed to compile.") - ENDIF() - - IF(${invert} MATCHES INVERT) - IF(${var}_COMPILED) - IF(${var}) - MESSAGE(STATUS "${description} - yes") - ELSE() - MESSAGE(STATUS "${description} - no") - ENDIF() - ELSE() - MESSAGE(STATUS "${description} - failed to compile") - ENDIF() - ELSE() - IF(${var}_COMPILED) - IF(${var}) - MESSAGE(STATUS "${description} - no") - ELSE() - MESSAGE(STATUS "${description} - yes") - ENDIF() - ELSE() - MESSAGE(STATUS "${description} - failed to compile") - ENDIF() - ENDIF() - ENDIF() - - IF(${invert} MATCHES INVERT) - IF(${var}_COMPILED) - IF(${var}) - SET(${var} 1) - ELSE() - SET(${var} 0) - ENDIF() - ELSE() - SET(${var} 1) - ENDIF() - ELSE() - IF(${var}_COMPILED) - IF(${var}) - SET(${var} 0) - ELSE() - SET(${var} 1) - ENDIF() - ELSE() - SET(${var} 0) - ENDIF() - ENDIF() -ENDMACRO() - -MACRO(KWSYS_PLATFORM_C_TEST var description invert) - SET(KWSYS_PLATFORM_TEST_DEFINES ${KWSYS_PLATFORM_C_TEST_DEFINES}) - SET(KWSYS_PLATFORM_TEST_EXTRA_FLAGS ${KWSYS_PLATFORM_C_TEST_EXTRA_FLAGS}) - KWSYS_PLATFORM_TEST(C "${var}" "${description}" "${invert}") - SET(KWSYS_PLATFORM_TEST_DEFINES) - SET(KWSYS_PLATFORM_TEST_EXTRA_FLAGS) -ENDMACRO() - -MACRO(KWSYS_PLATFORM_C_TEST_RUN var description invert) - SET(KWSYS_PLATFORM_TEST_DEFINES ${KWSYS_PLATFORM_C_TEST_DEFINES}) - SET(KWSYS_PLATFORM_TEST_EXTRA_FLAGS ${KWSYS_PLATFORM_C_TEST_EXTRA_FLAGS}) - KWSYS_PLATFORM_TEST_RUN(C "${var}" "${description}" "${invert}") - SET(KWSYS_PLATFORM_TEST_DEFINES) - SET(KWSYS_PLATFORM_TEST_EXTRA_FLAGS) -ENDMACRO() - -MACRO(KWSYS_PLATFORM_CXX_TEST var description invert) - SET(KWSYS_PLATFORM_TEST_DEFINES ${KWSYS_PLATFORM_CXX_TEST_DEFINES}) - SET(KWSYS_PLATFORM_TEST_EXTRA_FLAGS ${KWSYS_PLATFORM_CXX_TEST_EXTRA_FLAGS}) - SET(KWSYS_PLATFORM_TEST_LINK_LIBRARIES ${KWSYS_PLATFORM_CXX_TEST_LINK_LIBRARIES}) - KWSYS_PLATFORM_TEST(CXX "${var}" "${description}" "${invert}") - SET(KWSYS_PLATFORM_TEST_DEFINES) - SET(KWSYS_PLATFORM_TEST_EXTRA_FLAGS) - SET(KWSYS_PLATFORM_TEST_LINK_LIBRARIES) -ENDMACRO() - -MACRO(KWSYS_PLATFORM_CXX_TEST_RUN var description invert) - SET(KWSYS_PLATFORM_TEST_DEFINES ${KWSYS_PLATFORM_CXX_TEST_DEFINES}) - SET(KWSYS_PLATFORM_TEST_EXTRA_FLAGS ${KWSYS_PLATFORM_CXX_TEST_EXTRA_FLAGS}) - KWSYS_PLATFORM_TEST_RUN(CXX "${var}" "${description}" "${invert}") - SET(KWSYS_PLATFORM_TEST_DEFINES) - SET(KWSYS_PLATFORM_TEST_EXTRA_FLAGS) -ENDMACRO() - -#----------------------------------------------------------------------------- -# KWSYS_PLATFORM_INFO_TEST(lang var description) -# -# Compile test named by ${var} and store INFO strings extracted from binary. -MACRO(KWSYS_PLATFORM_INFO_TEST lang var description) - # We can implement this macro on CMake 2.6 and above. - IF("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.6) - SET(${var} "") - ELSE() - # Choose a location for the result binary. - SET(KWSYS_PLATFORM_INFO_FILE - ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_FILES_DIRECTORY}/${var}.bin) - - # Compile the test binary. - IF(NOT EXISTS ${KWSYS_PLATFORM_INFO_FILE}) - MESSAGE(STATUS "${description}") - TRY_COMPILE(${var}_COMPILED - ${CMAKE_CURRENT_BINARY_DIR} - ${CMAKE_CURRENT_SOURCE_DIR}/${KWSYS_PLATFORM_TEST_FILE_${lang}} - COMPILE_DEFINITIONS -DTEST_${var} - ${KWSYS_PLATFORM_${lang}_TEST_DEFINES} - ${KWSYS_PLATFORM_${lang}_TEST_EXTRA_FLAGS} - OUTPUT_VARIABLE OUTPUT - COPY_FILE ${KWSYS_PLATFORM_INFO_FILE} - ) - IF(${var}_COMPILED) - FILE(APPEND - ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log - "${description} compiled with the following output:\n${OUTPUT}\n\n") - ELSE() - FILE(APPEND - ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log - "${description} failed to compile with the following output:\n${OUTPUT}\n\n") - ENDIF() - IF(${var}_COMPILED) - MESSAGE(STATUS "${description} - compiled") - ELSE() - MESSAGE(STATUS "${description} - failed") - ENDIF() - ENDIF() - - # Parse info strings out of the compiled binary. - IF(${var}_COMPILED) - FILE(STRINGS ${KWSYS_PLATFORM_INFO_FILE} ${var} REGEX "INFO:[A-Za-z0-9]+\\[[^]]*\\]") - ELSE() - SET(${var} "") - ENDIF() - - SET(KWSYS_PLATFORM_INFO_FILE) - ENDIF() -ENDMACRO() diff --git a/test/API/driver/kwsys/kwsysPlatformTestsC.c b/test/API/driver/kwsys/kwsysPlatformTestsC.c deleted file mode 100644 index b0cf7ad3b0c..00000000000 --- a/test/API/driver/kwsys/kwsysPlatformTestsC.c +++ /dev/null @@ -1,108 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -/* - Macros to define main() in a cross-platform way. - - Usage: - - int KWSYS_PLATFORM_TEST_C_MAIN() - { - return 0; - } - - int KWSYS_PLATFORM_TEST_C_MAIN_ARGS(argc, argv) - { - (void)argc; (void)argv; - return 0; - } -*/ -#if defined(__CLASSIC_C__) -# define KWSYS_PLATFORM_TEST_C_MAIN() main() -# define KWSYS_PLATFORM_TEST_C_MAIN_ARGS(argc, argv) \ - main(argc, argv) int argc; \ - char* argv[]; -#else -# define KWSYS_PLATFORM_TEST_C_MAIN() main(void) -# define KWSYS_PLATFORM_TEST_C_MAIN_ARGS(argc, argv) \ - main(int argc, char* argv[]) -#endif - -#ifdef TEST_KWSYS_C_HAS_PTRDIFF_T -# include -int f(ptrdiff_t n) -{ - return n > 0; -} -int KWSYS_PLATFORM_TEST_C_MAIN() -{ - char* p = 0; - ptrdiff_t d = p - p; - (void)d; - return f(p - p); -} -#endif - -#ifdef TEST_KWSYS_C_HAS_SSIZE_T -# include -int f(ssize_t n) -{ - return (int)n; -} -int KWSYS_PLATFORM_TEST_C_MAIN() -{ - ssize_t n = 0; - return f(n); -} -#endif - -#ifdef TEST_KWSYS_C_HAS_CLOCK_GETTIME_MONOTONIC -# if defined(__APPLE__) -# include -# if MAC_OS_X_VERSION_MIN_REQUIRED < 101200 -# error "clock_gettime not available on macOS < 10.12" -# endif -# endif -# include -int KWSYS_PLATFORM_TEST_C_MAIN() -{ - struct timespec ts; - return clock_gettime(CLOCK_MONOTONIC, &ts); -} -#endif - -#ifdef TEST_KWSYS_C_TYPE_MACROS -char* info_macros = -# if defined(__SIZEOF_SHORT__) - "INFO:macro[__SIZEOF_SHORT__]\n" -# endif -# if defined(__SIZEOF_INT__) - "INFO:macro[__SIZEOF_INT__]\n" -# endif -# if defined(__SIZEOF_LONG__) - "INFO:macro[__SIZEOF_LONG__]\n" -# endif -# if defined(__SIZEOF_LONG_LONG__) - "INFO:macro[__SIZEOF_LONG_LONG__]\n" -# endif -# if defined(__SHORT_MAX__) - "INFO:macro[__SHORT_MAX__]\n" -# endif -# if defined(__INT_MAX__) - "INFO:macro[__INT_MAX__]\n" -# endif -# if defined(__LONG_MAX__) - "INFO:macro[__LONG_MAX__]\n" -# endif -# if defined(__LONG_LONG_MAX__) - "INFO:macro[__LONG_LONG_MAX__]\n" -# endif - ""; - -int KWSYS_PLATFORM_TEST_C_MAIN_ARGS(argc, argv) -{ - int require = 0; - require += info_macros[argc]; - (void)argv; - return require; -} -#endif diff --git a/test/API/driver/kwsys/kwsysPlatformTestsCXX.cxx b/test/API/driver/kwsys/kwsysPlatformTestsCXX.cxx deleted file mode 100644 index cfd5666f304..00000000000 --- a/test/API/driver/kwsys/kwsysPlatformTestsCXX.cxx +++ /dev/null @@ -1,335 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifdef TEST_KWSYS_CXX_HAS_CSTDIO -# include -int main() -{ - return 0; -} -#endif - -#ifdef TEST_KWSYS_CXX_HAS_LONG_LONG -long long f(long long n) -{ - return n; -} -int main() -{ - long long n = 0; - return static_cast(f(n)); -} -#endif - -#ifdef TEST_KWSYS_CXX_HAS___INT64 -__int64 f(__int64 n) -{ - return n; -} -int main() -{ - __int64 n = 0; - return static_cast(f(n)); -} -#endif - -#ifdef TEST_KWSYS_CXX_STAT_HAS_ST_MTIM -# include - -# include -# include -int main() -{ - struct stat stat1; - (void)stat1.st_mtim.tv_sec; - (void)stat1.st_mtim.tv_nsec; - return 0; -} -#endif - -#ifdef TEST_KWSYS_CXX_STAT_HAS_ST_MTIMESPEC -# include - -# include -# include -int main() -{ - struct stat stat1; - (void)stat1.st_mtimespec.tv_sec; - (void)stat1.st_mtimespec.tv_nsec; - return 0; -} -#endif - -#ifdef TEST_KWSYS_CXX_SAME_LONG_AND___INT64 -void function(long**) -{ -} -int main() -{ - __int64** p = 0; - function(p); - return 0; -} -#endif - -#ifdef TEST_KWSYS_CXX_SAME_LONG_LONG_AND___INT64 -void function(long long**) -{ -} -int main() -{ - __int64** p = 0; - function(p); - return 0; -} -#endif - -#ifdef TEST_KWSYS_IOS_HAS_ISTREAM_LONG_LONG -# include -int test_istream(std::istream& is, long long& x) -{ - return (is >> x) ? 1 : 0; -} -int main() -{ - long long x = 0; - return test_istream(std::cin, x); -} -#endif - -#ifdef TEST_KWSYS_IOS_HAS_OSTREAM_LONG_LONG -# include -int test_ostream(std::ostream& os, long long x) -{ - return (os << x) ? 1 : 0; -} -int main() -{ - long long x = 0; - return test_ostream(std::cout, x); -} -#endif - -#ifdef TEST_KWSYS_IOS_HAS_ISTREAM___INT64 -# include -int test_istream(std::istream& is, __int64& x) -{ - return (is >> x) ? 1 : 0; -} -int main() -{ - __int64 x = 0; - return test_istream(std::cin, x); -} -#endif - -#ifdef TEST_KWSYS_IOS_HAS_OSTREAM___INT64 -# include -int test_ostream(std::ostream& os, __int64 x) -{ - return (os << x) ? 1 : 0; -} -int main() -{ - __int64 x = 0; - return test_ostream(std::cout, x); -} -#endif - -#ifdef TEST_KWSYS_CXX_HAS_SETENV -# include -int main() -{ - return setenv("A", "B", 1); -} -#endif - -#ifdef TEST_KWSYS_CXX_HAS_UNSETENV -# include -int main() -{ - unsetenv("A"); - return 0; -} -#endif - -#ifdef TEST_KWSYS_CXX_HAS_ENVIRON_IN_STDLIB_H -# include -int main() -{ - char* e = environ[0]; - return e ? 0 : 1; -} -#endif - -#ifdef TEST_KWSYS_CXX_HAS_GETLOADAVG -// Match feature definitions from SystemInformation.cxx -# if (defined(__GNUC__) || defined(__PGI)) && !defined(_GNU_SOURCE) -# define _GNU_SOURCE -# endif -# include -int main() -{ - double loadavg[3] = { 0.0, 0.0, 0.0 }; - return getloadavg(loadavg, 3); -} -#endif - -#ifdef TEST_KWSYS_CXX_HAS_RLIMIT64 -# include -int main() -{ - struct rlimit64 rlim; - return getrlimit64(0, &rlim); -} -#endif - -#ifdef TEST_KWSYS_CXX_HAS_ATOLL -# include -int main() -{ - const char* str = "1024"; - return static_cast(atoll(str)); -} -#endif - -#ifdef TEST_KWSYS_CXX_HAS_ATOL -# include -int main() -{ - const char* str = "1024"; - return static_cast(atol(str)); -} -#endif - -#ifdef TEST_KWSYS_CXX_HAS__ATOI64 -# include -int main() -{ - const char* str = "1024"; - return static_cast(_atoi64(str)); -} -#endif - -#ifdef TEST_KWSYS_CXX_HAS_UTIMES -# include -int main() -{ - struct timeval* current_time = 0; - return utimes("/example", current_time); -} -#endif - -#ifdef TEST_KWSYS_CXX_HAS_UTIMENSAT -# include -# include -# if defined(__APPLE__) -# include -# if MAC_OS_X_VERSION_MIN_REQUIRED < 101300 -# error "utimensat not available on macOS < 10.13" -# endif -# endif -int main() -{ - struct timespec times[2] = { { 0, UTIME_OMIT }, { 0, UTIME_NOW } }; - return utimensat(AT_FDCWD, "/example", times, AT_SYMLINK_NOFOLLOW); -} -#endif - -#ifdef TEST_KWSYS_CXX_HAS_BACKTRACE -# if defined(__PATHSCALE__) || defined(__PATHCC__) || \ - (defined(__LSB_VERSION__) && (__LSB_VERSION__ < 41)) -backtrace does not work with this compiler or os -# endif -# if (defined(__GNUC__) || defined(__PGI)) && !defined(_GNU_SOURCE) -# define _GNU_SOURCE -# endif -# include -int main() -{ - void* stackSymbols[256]; - backtrace(stackSymbols, 256); - backtrace_symbols(&stackSymbols[0], 1); - return 0; -} -#endif - -#ifdef TEST_KWSYS_CXX_HAS_DLADDR -# if (defined(__GNUC__) || defined(__PGI)) && !defined(_GNU_SOURCE) -# define _GNU_SOURCE -# endif -# include -int main() -{ - Dl_info info; - int ierr = dladdr((void*)main, &info); - return 0; -} -#endif - -#ifdef TEST_KWSYS_CXX_HAS_CXXABI -# if (defined(__GNUC__) || defined(__PGI)) && !defined(_GNU_SOURCE) -# define _GNU_SOURCE -# endif -# if defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x5130 && __linux && \ - __SUNPRO_CC_COMPAT == 'G' -# include -# endif -# include -int main() -{ - int status = 0; - size_t bufferLen = 512; - char buffer[512] = { '\0' }; - const char* function = "_ZN5kwsys17SystemInformation15GetProgramStackEii"; - char* demangledFunction = - abi::__cxa_demangle(function, buffer, &bufferLen, &status); - return status; -} -#endif - -#ifdef TEST_KWSYS_CXX_HAS_BORLAND_ASM -int main() -{ - int a = 1; - __asm { - xor EBX, EBX; - mov a, EBX; - } - - return a; -} -#endif - -#ifdef TEST_KWSYS_CXX_HAS_BORLAND_ASM_CPUID -int main() -{ - int a = 0; - __asm { - xor EAX, EAX; - cpuid; - mov a, EAX; - } - - return a; -} -#endif - -#ifdef TEST_KWSYS_STL_HAS_WSTRING -# include -void f(std::wstring*) -{ -} -int main() -{ - return 0; -} -#endif - -#ifdef TEST_KWSYS_CXX_HAS_EXT_STDIO_FILEBUF_H -# include -int main() -{ - return 0; -} -#endif diff --git a/test/API/driver/kwsys/kwsysPrivate.h b/test/API/driver/kwsys/kwsysPrivate.h deleted file mode 100644 index dd9c1277fb6..00000000000 --- a/test/API/driver/kwsys/kwsysPrivate.h +++ /dev/null @@ -1,34 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifndef KWSYS_NAMESPACE -# error "Do not include kwsysPrivate.h outside of kwsys c and cxx files." -#endif - -#ifndef _kwsysPrivate_h -# define _kwsysPrivate_h - -/* - Define KWSYS_HEADER macro to help the c and cxx files include kwsys - headers from the configured namespace directory. The macro can be - used like this: - - #include KWSYS_HEADER(Directory.hxx) - #include KWSYS_HEADER(std/vector) -*/ -/* clang-format off */ -#define KWSYS_HEADER(x) KWSYS_HEADER0(KWSYS_NAMESPACE/x) -/* clang-format on */ -# define KWSYS_HEADER0(x) KWSYS_HEADER1(x) -# define KWSYS_HEADER1(x) - -/* - Define KWSYS_NAMESPACE_STRING to be a string constant containing the - name configured for this instance of the kwsys library. -*/ -# define KWSYS_NAMESPACE_STRING KWSYS_NAMESPACE_STRING0(KWSYS_NAMESPACE) -# define KWSYS_NAMESPACE_STRING0(x) KWSYS_NAMESPACE_STRING1(x) -# define KWSYS_NAMESPACE_STRING1(x) # x - -#else -# error "kwsysPrivate.h included multiple times." -#endif diff --git a/test/API/driver/kwsys/testCommandLineArguments.cxx b/test/API/driver/kwsys/testCommandLineArguments.cxx deleted file mode 100644 index 1778a9ba8bd..00000000000 --- a/test/API/driver/kwsys/testCommandLineArguments.cxx +++ /dev/null @@ -1,209 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" -#include KWSYS_HEADER(CommandLineArguments.hxx) - -// Work-around CMake dependency scanning limitation. This must -// duplicate the above list of headers. -#if 0 -# include "CommandLineArguments.hxx.in" -#endif - -#include -#include - -#include /* size_t */ -#include /* strcmp */ - -static void* random_ptr = reinterpret_cast(0x123); - -static int argument(const char* arg, const char* value, void* call_data) -{ - std::cout << "Got argument: \"" << arg << "\" value: \"" - << (value ? value : "(null)") << "\"" << std::endl; - if (call_data != random_ptr) { - std::cerr << "Problem processing call_data" << std::endl; - return 0; - } - return 1; -} - -static int unknown_argument(const char* argument, void* call_data) -{ - std::cout << "Got unknown argument: \"" << argument << "\"" << std::endl; - if (call_data != random_ptr) { - std::cerr << "Problem processing call_data" << std::endl; - return 0; - } - return 1; -} - -static bool CompareTwoItemsOnList(bool i1, bool i2) -{ - return i1 == i2; -} -static bool CompareTwoItemsOnList(int i1, int i2) -{ - return i1 == i2; -} -static bool CompareTwoItemsOnList(double i1, double i2) -{ - return i1 == i2; -} -static bool CompareTwoItemsOnList(const char* i1, const char* i2) -{ - return strcmp(i1, i2) == 0; -} -static bool CompareTwoItemsOnList(const std::string& i1, const std::string& i2) -{ - return i1 == i2; -} - -int testCommandLineArguments(int argc, char* argv[]) -{ - // Example run: ./testCommandLineArguments --some-int-variable 4 - // --another-bool-variable --some-bool-variable=yes - // --some-stl-string-variable=foobar --set-bool-arg1 --set-bool-arg2 - // --some-string-variable=hello - - int res = 0; - kwsys::CommandLineArguments arg; - arg.Initialize(argc, argv); - - // For error handling - arg.SetClientData(random_ptr); - arg.SetUnknownArgumentCallback(unknown_argument); - - int some_int_variable = 10; - double some_double_variable = 10.10; - char* some_string_variable = nullptr; - std::string some_stl_string_variable; - bool some_bool_variable = false; - bool some_bool_variable1 = false; - bool bool_arg1 = false; - int bool_arg2 = 0; - - std::vector numbers_argument; - int valid_numbers[] = { 5, 1, 8, 3, 7, 1, 3, 9, 7, 1 }; - - std::vector doubles_argument; - double valid_doubles[] = { 12.5, 1.31, 22 }; - - std::vector bools_argument; - bool valid_bools[] = { true, true, false }; - - std::vector strings_argument; - const char* valid_strings[] = { "andy", "bill", "brad", "ken" }; - - std::vector stl_strings_argument; - std::string valid_stl_strings[] = { "ken", "brad", "bill", "andy" }; - - typedef kwsys::CommandLineArguments argT; - - arg.AddArgument("--some-int-variable", argT::SPACE_ARGUMENT, - &some_int_variable, "Set some random int variable"); - arg.AddArgument("--some-double-variable", argT::CONCAT_ARGUMENT, - &some_double_variable, "Set some random double variable"); - arg.AddArgument("--some-string-variable", argT::EQUAL_ARGUMENT, - &some_string_variable, "Set some random string variable"); - arg.AddArgument("--some-stl-string-variable", argT::EQUAL_ARGUMENT, - &some_stl_string_variable, - "Set some random stl string variable"); - arg.AddArgument("--some-bool-variable", argT::EQUAL_ARGUMENT, - &some_bool_variable, "Set some random bool variable"); - arg.AddArgument("--another-bool-variable", argT::NO_ARGUMENT, - &some_bool_variable1, "Set some random bool variable 1"); - arg.AddBooleanArgument("--set-bool-arg1", &bool_arg1, - "Test AddBooleanArgument 1"); - arg.AddBooleanArgument("--set-bool-arg2", &bool_arg2, - "Test AddBooleanArgument 2"); - arg.AddArgument("--some-multi-argument", argT::MULTI_ARGUMENT, - &numbers_argument, "Some multiple values variable"); - arg.AddArgument("-N", argT::SPACE_ARGUMENT, &doubles_argument, - "Some explicit multiple values variable"); - arg.AddArgument("-BB", argT::CONCAT_ARGUMENT, &bools_argument, - "Some explicit multiple values variable"); - arg.AddArgument("-SS", argT::EQUAL_ARGUMENT, &strings_argument, - "Some explicit multiple values variable"); - arg.AddArgument("-SSS", argT::MULTI_ARGUMENT, &stl_strings_argument, - "Some explicit multiple values variable"); - - arg.AddCallback("-A", argT::NO_ARGUMENT, argument, random_ptr, - "Some option -A. This option has a multiline comment. It " - "should demonstrate how the code splits lines."); - arg.AddCallback("-B", argT::SPACE_ARGUMENT, argument, random_ptr, - "Option -B takes argument with space"); - arg.AddCallback("-C", argT::EQUAL_ARGUMENT, argument, random_ptr, - "Option -C takes argument after ="); - arg.AddCallback("-D", argT::CONCAT_ARGUMENT, argument, random_ptr, - "This option takes concatenated argument"); - arg.AddCallback("--long1", argT::NO_ARGUMENT, argument, random_ptr, "-A"); - arg.AddCallback("--long2", argT::SPACE_ARGUMENT, argument, random_ptr, "-B"); - arg.AddCallback("--long3", argT::EQUAL_ARGUMENT, argument, random_ptr, - "Same as -C but a bit different"); - arg.AddCallback("--long4", argT::CONCAT_ARGUMENT, argument, random_ptr, - "-C"); - - if (!arg.Parse()) { - std::cerr << "Problem parsing arguments" << std::endl; - res = 1; - } - std::cout << "Help: " << arg.GetHelp() << std::endl; - - std::cout << "Some int variable was set to: " << some_int_variable - << std::endl; - std::cout << "Some double variable was set to: " << some_double_variable - << std::endl; - if (some_string_variable && - strcmp(some_string_variable, "test string with space") == 0) { - std::cout << "Some string variable was set to: " << some_string_variable - << std::endl; - delete[] some_string_variable; - } else { - std::cerr << "Problem setting string variable" << std::endl; - res = 1; - } - size_t cc; -#define CompareTwoLists(list1, list_valid, lsize) \ - do { \ - if (list1.size() != lsize) { \ - std::cerr << "Problem setting " #list1 ". Size is: " << list1.size() \ - << " should be: " << lsize << std::endl; \ - res = 1; \ - } else { \ - std::cout << #list1 " argument set:"; \ - for (cc = 0; cc < lsize; ++cc) { \ - std::cout << " " << list1[cc]; \ - if (!CompareTwoItemsOnList(list1[cc], list_valid[cc])) { \ - std::cerr << "Problem setting " #list1 ". Value of " << cc \ - << " is: [" << list1[cc] << "] <> [" << list_valid[cc] \ - << "]" << std::endl; \ - res = 1; \ - break; \ - } \ - } \ - std::cout << std::endl; \ - } \ - } while (0) - CompareTwoLists(numbers_argument, valid_numbers, 10); - CompareTwoLists(doubles_argument, valid_doubles, 3); - CompareTwoLists(bools_argument, valid_bools, 3); - CompareTwoLists(strings_argument, valid_strings, 4); - CompareTwoLists(stl_strings_argument, valid_stl_strings, 4); - - std::cout << "Some STL String variable was set to: " - << some_stl_string_variable << std::endl; - std::cout << "Some bool variable was set to: " << some_bool_variable - << std::endl; - std::cout << "Some bool variable was set to: " << some_bool_variable1 - << std::endl; - std::cout << "bool_arg1 variable was set to: " << bool_arg1 << std::endl; - std::cout << "bool_arg2 variable was set to: " << bool_arg2 << std::endl; - std::cout << std::endl; - - for (cc = 0; cc < strings_argument.size(); ++cc) { - delete[] strings_argument[cc]; - strings_argument[cc] = nullptr; - } - return res; -} diff --git a/test/API/driver/kwsys/testCommandLineArguments1.cxx b/test/API/driver/kwsys/testCommandLineArguments1.cxx deleted file mode 100644 index 64561b1e3b4..00000000000 --- a/test/API/driver/kwsys/testCommandLineArguments1.cxx +++ /dev/null @@ -1,93 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" -#include KWSYS_HEADER(CommandLineArguments.hxx) - -// Work-around CMake dependency scanning limitation. This must -// duplicate the above list of headers. -#if 0 -# include "CommandLineArguments.hxx.in" -#endif - -#include -#include - -#include /* assert */ -#include /* strcmp */ - -int testCommandLineArguments1(int argc, char* argv[]) -{ - kwsys::CommandLineArguments arg; - arg.Initialize(argc, argv); - - int n = 0; - char* m = nullptr; - std::string p; - int res = 0; - - typedef kwsys::CommandLineArguments argT; - arg.AddArgument("-n", argT::SPACE_ARGUMENT, &n, "Argument N"); - arg.AddArgument("-m", argT::EQUAL_ARGUMENT, &m, "Argument M"); - arg.AddBooleanArgument("-p", &p, "Argument P"); - - arg.StoreUnusedArguments(true); - - if (!arg.Parse()) { - std::cerr << "Problem parsing arguments" << std::endl; - res = 1; - } - if (n != 24) { - std::cout << "Problem setting N. Value of N: " << n << std::endl; - res = 1; - } - if (!m || strcmp(m, "test value") != 0) { - std::cout << "Problem setting M. Value of M: " << m << std::endl; - res = 1; - } - if (p != "1") { - std::cout << "Problem setting P. Value of P: " << p << std::endl; - res = 1; - } - std::cout << "Value of N: " << n << std::endl; - std::cout << "Value of M: " << m << std::endl; - std::cout << "Value of P: " << p << std::endl; - if (m) { - delete[] m; - } - - char** newArgv = nullptr; - int newArgc = 0; - arg.GetUnusedArguments(&newArgc, &newArgv); - int cc; - const char* valid_unused_args[9] = { nullptr, - "--ignored", - "--second-ignored", - "third-ignored", - "some", - "junk", - "at", - "the", - "end" }; - if (newArgc != 9) { - std::cerr << "Bad number of unused arguments: " << newArgc << std::endl; - res = 1; - } - for (cc = 0; cc < newArgc; ++cc) { - assert(newArgv[cc]); /* Quiet Clang scan-build. */ - std::cout << "Unused argument[" << cc << "] = [" << newArgv[cc] << "]" - << std::endl; - if (cc >= 9) { - std::cerr << "Too many unused arguments: " << cc << std::endl; - res = 1; - } else if (valid_unused_args[cc] && - strcmp(valid_unused_args[cc], newArgv[cc]) != 0) { - std::cerr << "Bad unused argument [" << cc << "] \"" << newArgv[cc] - << "\" should be: \"" << valid_unused_args[cc] << "\"" - << std::endl; - res = 1; - } - } - arg.DeleteRemainingArguments(newArgc, &newArgv); - - return res; -} diff --git a/test/API/driver/kwsys/testConfigure.cxx b/test/API/driver/kwsys/testConfigure.cxx deleted file mode 100644 index a3c2ed3aeda..00000000000 --- a/test/API/driver/kwsys/testConfigure.cxx +++ /dev/null @@ -1,30 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying -file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" -#include KWSYS_HEADER(Configure.hxx) - -// Work-around CMake dependency scanning limitation. This must -// duplicate the above list of headers. -#if 0 -# include "Configure.hxx.in" -#endif - -static bool testFallthrough(int n) -{ - int r = 0; - switch (n) { - case 1: - ++r; - KWSYS_FALLTHROUGH; - default: - ++r; - } - return r == 2; -} - -int testConfigure(int, char* []) -{ - bool res = true; - res = testFallthrough(1) && res; - return res ? 0 : 1; -} diff --git a/test/API/driver/kwsys/testConsoleBuf.cxx b/test/API/driver/kwsys/testConsoleBuf.cxx deleted file mode 100644 index 4b7ddf053fc..00000000000 --- a/test/API/driver/kwsys/testConsoleBuf.cxx +++ /dev/null @@ -1,782 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" - -// Ignore Windows version levels defined by command-line flags. This -// source needs access to all APIs available on the host in order for -// the test to run properly. The test binary is not installed anyway. -#undef _WIN32_WINNT -#undef NTDDI_VERSION - -#include KWSYS_HEADER(Encoding.hxx) - -// Work-around CMake dependency scanning limitation. This must -// duplicate the above list of headers. -#if 0 -# include "Encoding.hxx.in" -#endif - -#if defined(_WIN32) - -# include -# include -# include -# include -# include -# include -# include - -# include "testConsoleBuf.hxx" - -# if defined(_MSC_VER) && _MSC_VER >= 1800 -# define KWSYS_WINDOWS_DEPRECATED_GetVersion -# endif -// يونيكود -static const WCHAR UnicodeInputTestString[] = - L"\u064A\u0648\u0646\u064A\u0643\u0648\u062F!"; -static UINT TestCodepage = KWSYS_ENCODING_DEFAULT_CODEPAGE; - -static const DWORD waitTimeout = 10 * 1000; -static STARTUPINFO startupInfo; -static PROCESS_INFORMATION processInfo; -static HANDLE beforeInputEvent; -static HANDLE afterOutputEvent; -static std::string encodedInputTestString; -static std::string encodedTestString; - -static void displayError(DWORD errorCode) -{ - std::cerr.setf(std::ios::hex, std::ios::basefield); - std::cerr << "Failed with error: 0x" << errorCode << "!" << std::endl; - LPWSTR message; - if (FormatMessageW(FORMAT_MESSAGE_ALLOCATE_BUFFER | - FORMAT_MESSAGE_FROM_SYSTEM, - nullptr, errorCode, 0, (LPWSTR)&message, 0, nullptr)) { - std::cerr << "Error message: " << kwsys::Encoding::ToNarrow(message) - << std::endl; - HeapFree(GetProcessHeap(), 0, message); - } else { - std::cerr << "FormatMessage() failed with error: 0x" << GetLastError() - << "!" << std::endl; - } - std::cerr.unsetf(std::ios::hex); -} - -std::basic_streambuf* errstream(const char* unused) -{ - static_cast(unused); - return std::cerr.rdbuf(); -} - -std::basic_streambuf* errstream(const wchar_t* unused) -{ - static_cast(unused); - return std::wcerr.rdbuf(); -} - -template -static void dumpBuffers(const T* expected, const T* received, size_t size) -{ - std::basic_ostream err(errstream(expected)); - err << "Expected output: '" << std::basic_string(expected, size) << "'" - << std::endl; - if (err.fail()) { - err.clear(); - err << "--- Error while outputting ---" << std::endl; - } - err << "Received output: '" << std::basic_string(received, size) << "'" - << std::endl; - if (err.fail()) { - err.clear(); - err << "--- Error while outputting ---" << std::endl; - } - std::cerr << "Expected output | Received output" << std::endl; - for (size_t i = 0; i < size; i++) { - std::cerr << std::setbase(16) << std::setfill('0') << " " - << "0x" << std::setw(8) << static_cast(expected[i]) - << " | " - << "0x" << std::setw(8) - << static_cast(received[i]); - if (static_cast(expected[i]) != - static_cast(received[i])) { - std::cerr << " MISMATCH!"; - } - std::cerr << std::endl; - } - std::cerr << std::endl; -} - -static bool createProcess(HANDLE hIn, HANDLE hOut, HANDLE hErr) -{ - BOOL bInheritHandles = FALSE; - DWORD dwCreationFlags = 0; - memset(&processInfo, 0, sizeof(processInfo)); - memset(&startupInfo, 0, sizeof(startupInfo)); - startupInfo.cb = sizeof(startupInfo); - startupInfo.dwFlags = STARTF_USESHOWWINDOW; - startupInfo.wShowWindow = SW_HIDE; - if (hIn || hOut || hErr) { - startupInfo.dwFlags |= STARTF_USESTDHANDLES; - startupInfo.hStdInput = hIn; - startupInfo.hStdOutput = hOut; - startupInfo.hStdError = hErr; - bInheritHandles = TRUE; - } - - WCHAR cmd[MAX_PATH]; - if (GetModuleFileNameW(nullptr, cmd, MAX_PATH) == 0) { - std::cerr << "GetModuleFileName failed!" << std::endl; - return false; - } - WCHAR* p = cmd + wcslen(cmd); - while (p > cmd && *p != L'\\') - p--; - *(p + 1) = 0; - wcscat(cmd, cmdConsoleBufChild); - wcscat(cmd, L".exe"); - - bool success = - CreateProcessW(nullptr, // No module name (use command line) - cmd, // Command line - nullptr, // Process handle not inheritable - nullptr, // Thread handle not inheritable - bInheritHandles, // Set handle inheritance - dwCreationFlags, - nullptr, // Use parent's environment block - nullptr, // Use parent's starting directory - &startupInfo, // Pointer to STARTUPINFO structure - &processInfo) != - 0; // Pointer to PROCESS_INFORMATION structure - if (!success) { - DWORD lastError = GetLastError(); - std::cerr << "CreateProcess(" << kwsys::Encoding::ToNarrow(cmd) << ")" - << std::endl; - displayError(lastError); - } - return success; -} - -static void finishProcess(bool success) -{ - if (success) { - success = - WaitForSingleObject(processInfo.hProcess, waitTimeout) == WAIT_OBJECT_0; - }; - if (!success) { - TerminateProcess(processInfo.hProcess, 1); - } - CloseHandle(processInfo.hProcess); - CloseHandle(processInfo.hThread); -} - -static bool createPipe(PHANDLE readPipe, PHANDLE writePipe) -{ - SECURITY_ATTRIBUTES securityAttributes; - securityAttributes.nLength = sizeof(SECURITY_ATTRIBUTES); - securityAttributes.bInheritHandle = TRUE; - securityAttributes.lpSecurityDescriptor = nullptr; - return CreatePipe(readPipe, writePipe, &securityAttributes, 0) == 0 ? false - : true; -} - -static void finishPipe(HANDLE readPipe, HANDLE writePipe) -{ - if (readPipe != INVALID_HANDLE_VALUE) { - CloseHandle(readPipe); - } - if (writePipe != INVALID_HANDLE_VALUE) { - CloseHandle(writePipe); - } -} - -static HANDLE createFile(LPCWSTR fileName) -{ - SECURITY_ATTRIBUTES securityAttributes; - securityAttributes.nLength = sizeof(SECURITY_ATTRIBUTES); - securityAttributes.bInheritHandle = TRUE; - securityAttributes.lpSecurityDescriptor = nullptr; - - HANDLE file = - CreateFileW(fileName, GENERIC_READ | GENERIC_WRITE, - 0, // do not share - &securityAttributes, - CREATE_ALWAYS, // overwrite existing - FILE_ATTRIBUTE_TEMPORARY | FILE_FLAG_DELETE_ON_CLOSE, - nullptr); // no template - if (file == INVALID_HANDLE_VALUE) { - DWORD lastError = GetLastError(); - std::cerr << "CreateFile(" << kwsys::Encoding::ToNarrow(fileName) << ")" - << std::endl; - displayError(lastError); - } - return file; -} - -static void finishFile(HANDLE file) -{ - if (file != INVALID_HANDLE_VALUE) { - CloseHandle(file); - } -} - -# ifndef MAPVK_VK_TO_VSC -# define MAPVK_VK_TO_VSC (0) -# endif - -static void writeInputKeyEvent(INPUT_RECORD inputBuffer[], WCHAR chr) -{ - inputBuffer[0].EventType = KEY_EVENT; - inputBuffer[0].Event.KeyEvent.bKeyDown = TRUE; - inputBuffer[0].Event.KeyEvent.wRepeatCount = 1; - SHORT keyCode = VkKeyScanW(chr); - if (keyCode == -1) { - // Character can't be entered with current keyboard layout - // Just set any, it doesn't really matter - keyCode = 'K'; - } - inputBuffer[0].Event.KeyEvent.wVirtualKeyCode = LOBYTE(keyCode); - inputBuffer[0].Event.KeyEvent.wVirtualScanCode = MapVirtualKey( - inputBuffer[0].Event.KeyEvent.wVirtualKeyCode, MAPVK_VK_TO_VSC); - inputBuffer[0].Event.KeyEvent.uChar.UnicodeChar = chr; - inputBuffer[0].Event.KeyEvent.dwControlKeyState = 0; - if ((HIBYTE(keyCode) & 1) == 1) { - inputBuffer[0].Event.KeyEvent.dwControlKeyState |= SHIFT_PRESSED; - } - if ((HIBYTE(keyCode) & 2) == 2) { - inputBuffer[0].Event.KeyEvent.dwControlKeyState |= RIGHT_CTRL_PRESSED; - } - if ((HIBYTE(keyCode) & 4) == 4) { - inputBuffer[0].Event.KeyEvent.dwControlKeyState |= RIGHT_ALT_PRESSED; - } - inputBuffer[1].EventType = inputBuffer[0].EventType; - inputBuffer[1].Event.KeyEvent.bKeyDown = FALSE; - inputBuffer[1].Event.KeyEvent.wRepeatCount = 1; - inputBuffer[1].Event.KeyEvent.wVirtualKeyCode = - inputBuffer[0].Event.KeyEvent.wVirtualKeyCode; - inputBuffer[1].Event.KeyEvent.wVirtualScanCode = - inputBuffer[0].Event.KeyEvent.wVirtualScanCode; - inputBuffer[1].Event.KeyEvent.uChar.UnicodeChar = - inputBuffer[0].Event.KeyEvent.uChar.UnicodeChar; - inputBuffer[1].Event.KeyEvent.dwControlKeyState = 0; -} - -static int testPipe() -{ - int didFail = 1; - HANDLE inPipeRead = INVALID_HANDLE_VALUE; - HANDLE inPipeWrite = INVALID_HANDLE_VALUE; - HANDLE outPipeRead = INVALID_HANDLE_VALUE; - HANDLE outPipeWrite = INVALID_HANDLE_VALUE; - HANDLE errPipeRead = INVALID_HANDLE_VALUE; - HANDLE errPipeWrite = INVALID_HANDLE_VALUE; - UINT currentCodepage = GetConsoleCP(); - char buffer[200]; - char buffer2[200]; - try { - if (!createPipe(&inPipeRead, &inPipeWrite) || - !createPipe(&outPipeRead, &outPipeWrite) || - !createPipe(&errPipeRead, &errPipeWrite)) { - throw std::runtime_error("createFile failed!"); - } - if (TestCodepage == CP_ACP) { - TestCodepage = GetACP(); - } - if (!SetConsoleCP(TestCodepage)) { - throw std::runtime_error("SetConsoleCP failed!"); - } - - DWORD bytesWritten = 0; - if (!WriteFile(inPipeWrite, encodedInputTestString.c_str(), - (DWORD)encodedInputTestString.size(), &bytesWritten, - nullptr) || - bytesWritten == 0) { - throw std::runtime_error("WriteFile failed!"); - } - - if (createProcess(inPipeRead, outPipeWrite, errPipeWrite)) { - try { - DWORD status; - if ((status = WaitForSingleObject(afterOutputEvent, waitTimeout)) != - WAIT_OBJECT_0) { - std::cerr.setf(std::ios::hex, std::ios::basefield); - std::cerr << "WaitForSingleObject returned unexpected status 0x" - << status << std::endl; - std::cerr.unsetf(std::ios::hex); - throw std::runtime_error("WaitForSingleObject failed!"); - } - DWORD bytesRead = 0; - if (!ReadFile(outPipeRead, buffer, sizeof(buffer), &bytesRead, - nullptr) || - bytesRead == 0) { - throw std::runtime_error("ReadFile#1 failed!"); - } - buffer[bytesRead] = 0; - if ((bytesRead < - encodedTestString.size() + 1 + encodedInputTestString.size() && - !ReadFile(outPipeRead, buffer + bytesRead, - sizeof(buffer) - bytesRead, &bytesRead, nullptr)) || - bytesRead == 0) { - throw std::runtime_error("ReadFile#2 failed!"); - } - if (memcmp(buffer, encodedTestString.c_str(), - encodedTestString.size()) == 0 && - memcmp(buffer + encodedTestString.size() + 1, - encodedInputTestString.c_str(), - encodedInputTestString.size()) == 0) { - bytesRead = 0; - if (!ReadFile(errPipeRead, buffer2, sizeof(buffer2), &bytesRead, - nullptr) || - bytesRead == 0) { - throw std::runtime_error("ReadFile#3 failed!"); - } - buffer2[bytesRead] = 0; - didFail = encodedTestString.compare(0, std::string::npos, buffer2, - encodedTestString.size()) == 0 - ? 0 - : 1; - } - if (didFail != 0) { - std::cerr << "Pipe's output didn't match expected output!" - << std::endl; - dumpBuffers(encodedTestString.c_str(), buffer, - encodedTestString.size()); - dumpBuffers(encodedInputTestString.c_str(), - buffer + encodedTestString.size() + 1, - encodedInputTestString.size()); - dumpBuffers(encodedTestString.c_str(), buffer2, - encodedTestString.size()); - } - } catch (const std::runtime_error& ex) { - DWORD lastError = GetLastError(); - std::cerr << "In function testPipe, line " << __LINE__ << ": " - << ex.what() << std::endl; - displayError(lastError); - } - finishProcess(didFail == 0); - } - } catch (const std::runtime_error& ex) { - DWORD lastError = GetLastError(); - std::cerr << "In function testPipe, line " << __LINE__ << ": " << ex.what() - << std::endl; - displayError(lastError); - } - finishPipe(inPipeRead, inPipeWrite); - finishPipe(outPipeRead, outPipeWrite); - finishPipe(errPipeRead, errPipeWrite); - SetConsoleCP(currentCodepage); - return didFail; -} - -static int testFile() -{ - int didFail = 1; - HANDLE inFile = INVALID_HANDLE_VALUE; - HANDLE outFile = INVALID_HANDLE_VALUE; - HANDLE errFile = INVALID_HANDLE_VALUE; - try { - if ((inFile = createFile(L"stdinFile.txt")) == INVALID_HANDLE_VALUE || - (outFile = createFile(L"stdoutFile.txt")) == INVALID_HANDLE_VALUE || - (errFile = createFile(L"stderrFile.txt")) == INVALID_HANDLE_VALUE) { - throw std::runtime_error("createFile failed!"); - } - DWORD bytesWritten = 0; - char buffer[200]; - char buffer2[200]; - - int length; - if ((length = WideCharToMultiByte(TestCodepage, 0, UnicodeInputTestString, - -1, buffer, sizeof(buffer), nullptr, - nullptr)) == 0) { - throw std::runtime_error("WideCharToMultiByte failed!"); - } - buffer[length - 1] = '\n'; - if (!WriteFile(inFile, buffer, length, &bytesWritten, nullptr) || - bytesWritten == 0) { - throw std::runtime_error("WriteFile failed!"); - } - if (SetFilePointer(inFile, 0, 0, FILE_BEGIN) == INVALID_SET_FILE_POINTER) { - throw std::runtime_error("SetFilePointer failed!"); - } - - if (createProcess(inFile, outFile, errFile)) { - DWORD bytesRead = 0; - try { - DWORD status; - if ((status = WaitForSingleObject(afterOutputEvent, waitTimeout)) != - WAIT_OBJECT_0) { - std::cerr.setf(std::ios::hex, std::ios::basefield); - std::cerr << "WaitForSingleObject returned unexpected status 0x" - << status << std::endl; - std::cerr.unsetf(std::ios::hex); - throw std::runtime_error("WaitForSingleObject failed!"); - } - if (SetFilePointer(outFile, 0, 0, FILE_BEGIN) == - INVALID_SET_FILE_POINTER) { - throw std::runtime_error("SetFilePointer#1 failed!"); - } - if (!ReadFile(outFile, buffer, sizeof(buffer), &bytesRead, nullptr) || - bytesRead == 0) { - throw std::runtime_error("ReadFile#1 failed!"); - } - buffer[bytesRead] = 0; - if (memcmp(buffer, encodedTestString.c_str(), - encodedTestString.size()) == 0 && - memcmp(buffer + encodedTestString.size() + 1, - encodedInputTestString.c_str(), - encodedInputTestString.size()) == 0) { - bytesRead = 0; - if (SetFilePointer(errFile, 0, 0, FILE_BEGIN) == - INVALID_SET_FILE_POINTER) { - throw std::runtime_error("SetFilePointer#2 failed!"); - } - - if (!ReadFile(errFile, buffer2, sizeof(buffer2), &bytesRead, - nullptr) || - bytesRead == 0) { - throw std::runtime_error("ReadFile#2 failed!"); - } - buffer2[bytesRead] = 0; - didFail = encodedTestString.compare(0, std::string::npos, buffer2, - encodedTestString.size()) == 0 - ? 0 - : 1; - } - if (didFail != 0) { - std::cerr << "File's output didn't match expected output!" - << std::endl; - dumpBuffers(encodedTestString.c_str(), buffer, - encodedTestString.size()); - dumpBuffers(encodedInputTestString.c_str(), - buffer + encodedTestString.size() + 1, - encodedInputTestString.size()); - dumpBuffers(encodedTestString.c_str(), buffer2, - encodedTestString.size()); - } - } catch (const std::runtime_error& ex) { - DWORD lastError = GetLastError(); - std::cerr << "In function testFile, line " << __LINE__ << ": " - << ex.what() << std::endl; - displayError(lastError); - } - finishProcess(didFail == 0); - } - } catch (const std::runtime_error& ex) { - DWORD lastError = GetLastError(); - std::cerr << "In function testFile, line " << __LINE__ << ": " << ex.what() - << std::endl; - displayError(lastError); - } - finishFile(inFile); - finishFile(outFile); - finishFile(errFile); - return didFail; -} - -# ifndef _WIN32_WINNT_VISTA -# define _WIN32_WINNT_VISTA 0x0600 -# endif - -static int testConsole() -{ - int didFail = 1; - HANDLE parentIn = GetStdHandle(STD_INPUT_HANDLE); - HANDLE parentOut = GetStdHandle(STD_OUTPUT_HANDLE); - HANDLE parentErr = GetStdHandle(STD_ERROR_HANDLE); - HANDLE hIn = parentIn; - HANDLE hOut = parentOut; - DWORD consoleMode; - bool newConsole = false; - bool forceNewConsole = false; - bool restoreConsole = false; - LPCWSTR TestFaceName = L"Lucida Console"; - const DWORD TestFontFamily = 0x00000036; - const DWORD TestFontSize = 0x000c0000; - HKEY hConsoleKey; - WCHAR FaceName[200]; - FaceName[0] = 0; - DWORD FaceNameSize = sizeof(FaceName); - DWORD FontFamily = TestFontFamily; - DWORD FontSize = TestFontSize; -# ifdef KWSYS_WINDOWS_DEPRECATED_GetVersion -# pragma warning(push) -# ifdef __INTEL_COMPILER -# pragma warning(disable : 1478) -# elif defined __clang__ -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wdeprecated-declarations" -# else -# pragma warning(disable : 4996) -# endif -# endif - const bool isVistaOrGreater = - LOBYTE(LOWORD(GetVersion())) >= HIBYTE(_WIN32_WINNT_VISTA); -# ifdef KWSYS_WINDOWS_DEPRECATED_GetVersion -# ifdef __clang__ -# pragma clang diagnostic pop -# else -# pragma warning(pop) -# endif -# endif - if (!isVistaOrGreater) { - if (RegOpenKeyExW(HKEY_CURRENT_USER, L"Console", 0, KEY_READ | KEY_WRITE, - &hConsoleKey) == ERROR_SUCCESS) { - DWORD dwordSize = sizeof(DWORD); - if (RegQueryValueExW(hConsoleKey, L"FontFamily", nullptr, nullptr, - (LPBYTE)&FontFamily, &dwordSize) == ERROR_SUCCESS) { - if (FontFamily != TestFontFamily) { - RegQueryValueExW(hConsoleKey, L"FaceName", nullptr, nullptr, - (LPBYTE)FaceName, &FaceNameSize); - RegQueryValueExW(hConsoleKey, L"FontSize", nullptr, nullptr, - (LPBYTE)&FontSize, &dwordSize); - - RegSetValueExW(hConsoleKey, L"FontFamily", 0, REG_DWORD, - (BYTE*)&TestFontFamily, sizeof(TestFontFamily)); - RegSetValueExW(hConsoleKey, L"FaceName", 0, REG_SZ, - (BYTE*)TestFaceName, - (DWORD)((wcslen(TestFaceName) + 1) * sizeof(WCHAR))); - RegSetValueExW(hConsoleKey, L"FontSize", 0, REG_DWORD, - (BYTE*)&TestFontSize, sizeof(TestFontSize)); - - restoreConsole = true; - forceNewConsole = true; - } - } else { - std::cerr << "RegGetValueW(FontFamily) failed!" << std::endl; - } - RegCloseKey(hConsoleKey); - } else { - std::cerr << "RegOpenKeyExW(HKEY_CURRENT_USER\\Console) failed!" - << std::endl; - } - } - if (forceNewConsole || GetConsoleMode(parentOut, &consoleMode) == 0) { - // Not a real console, let's create new one. - FreeConsole(); - if (!AllocConsole()) { - std::cerr << "AllocConsole failed!" << std::endl; - return didFail; - } - SECURITY_ATTRIBUTES securityAttributes; - securityAttributes.nLength = sizeof(SECURITY_ATTRIBUTES); - securityAttributes.bInheritHandle = TRUE; - securityAttributes.lpSecurityDescriptor = nullptr; - hIn = CreateFileW(L"CONIN$", GENERIC_READ | GENERIC_WRITE, - FILE_SHARE_READ | FILE_SHARE_WRITE, &securityAttributes, - OPEN_EXISTING, 0, nullptr); - if (hIn == INVALID_HANDLE_VALUE) { - DWORD lastError = GetLastError(); - std::cerr << "CreateFile(CONIN$)" << std::endl; - displayError(lastError); - } - hOut = CreateFileW(L"CONOUT$", GENERIC_READ | GENERIC_WRITE, - FILE_SHARE_READ | FILE_SHARE_WRITE, &securityAttributes, - OPEN_EXISTING, 0, nullptr); - if (hOut == INVALID_HANDLE_VALUE) { - DWORD lastError = GetLastError(); - std::cerr << "CreateFile(CONOUT$)" << std::endl; - displayError(lastError); - } - SetStdHandle(STD_INPUT_HANDLE, hIn); - SetStdHandle(STD_OUTPUT_HANDLE, hOut); - SetStdHandle(STD_ERROR_HANDLE, hOut); - newConsole = true; - } - -# if _WIN32_WINNT >= _WIN32_WINNT_VISTA - if (isVistaOrGreater) { - CONSOLE_FONT_INFOEX consoleFont; - memset(&consoleFont, 0, sizeof(consoleFont)); - consoleFont.cbSize = sizeof(consoleFont); - HMODULE kernel32 = LoadLibraryW(L"kernel32.dll"); - typedef BOOL(WINAPI * GetCurrentConsoleFontExFunc)( - HANDLE hConsoleOutput, BOOL bMaximumWindow, - PCONSOLE_FONT_INFOEX lpConsoleCurrentFontEx); - typedef BOOL(WINAPI * SetCurrentConsoleFontExFunc)( - HANDLE hConsoleOutput, BOOL bMaximumWindow, - PCONSOLE_FONT_INFOEX lpConsoleCurrentFontEx); - GetCurrentConsoleFontExFunc getConsoleFont = - (GetCurrentConsoleFontExFunc)GetProcAddress(kernel32, - "GetCurrentConsoleFontEx"); - SetCurrentConsoleFontExFunc setConsoleFont = - (SetCurrentConsoleFontExFunc)GetProcAddress(kernel32, - "SetCurrentConsoleFontEx"); - if (getConsoleFont(hOut, FALSE, &consoleFont)) { - if (consoleFont.FontFamily != TestFontFamily) { - consoleFont.FontFamily = TestFontFamily; - wcscpy(consoleFont.FaceName, TestFaceName); - if (!setConsoleFont(hOut, FALSE, &consoleFont)) { - std::cerr << "SetCurrentConsoleFontEx failed!" << std::endl; - } - } - } else { - std::cerr << "GetCurrentConsoleFontEx failed!" << std::endl; - } - } else { -# endif - if (restoreConsole && - RegOpenKeyExW(HKEY_CURRENT_USER, L"Console", 0, KEY_WRITE, - &hConsoleKey) == ERROR_SUCCESS) { - RegSetValueExW(hConsoleKey, L"FontFamily", 0, REG_DWORD, - (BYTE*)&FontFamily, sizeof(FontFamily)); - if (FaceName[0] != 0) { - RegSetValueExW(hConsoleKey, L"FaceName", 0, REG_SZ, (BYTE*)FaceName, - FaceNameSize); - } else { - RegDeleteValueW(hConsoleKey, L"FaceName"); - } - RegSetValueExW(hConsoleKey, L"FontSize", 0, REG_DWORD, (BYTE*)&FontSize, - sizeof(FontSize)); - RegCloseKey(hConsoleKey); - } -# if _WIN32_WINNT >= _WIN32_WINNT_VISTA - } -# endif - - if (createProcess(nullptr, nullptr, nullptr)) { - try { - DWORD status; - if ((status = WaitForSingleObject(beforeInputEvent, waitTimeout)) != - WAIT_OBJECT_0) { - std::cerr.setf(std::ios::hex, std::ios::basefield); - std::cerr << "WaitForSingleObject returned unexpected status 0x" - << status << std::endl; - std::cerr.unsetf(std::ios::hex); - throw std::runtime_error("WaitForSingleObject#1 failed!"); - } - INPUT_RECORD inputBuffer[(sizeof(UnicodeInputTestString) / - sizeof(UnicodeInputTestString[0])) * - 2]; - memset(&inputBuffer, 0, sizeof(inputBuffer)); - unsigned int i; - for (i = 0; i < (sizeof(UnicodeInputTestString) / - sizeof(UnicodeInputTestString[0]) - - 1); - i++) { - writeInputKeyEvent(&inputBuffer[i * 2], UnicodeInputTestString[i]); - } - writeInputKeyEvent(&inputBuffer[i * 2], VK_RETURN); - DWORD eventsWritten = 0; - // We need to wait a bit before writing to console so child process have - // started waiting for input on stdin. - Sleep(300); - if (!WriteConsoleInputW(hIn, inputBuffer, - sizeof(inputBuffer) / sizeof(inputBuffer[0]), - &eventsWritten) || - eventsWritten == 0) { - throw std::runtime_error("WriteConsoleInput failed!"); - } - if ((status = WaitForSingleObject(afterOutputEvent, waitTimeout)) != - WAIT_OBJECT_0) { - std::cerr.setf(std::ios::hex, std::ios::basefield); - std::cerr << "WaitForSingleObject returned unexpected status 0x" - << status << std::endl; - std::cerr.unsetf(std::ios::hex); - throw std::runtime_error("WaitForSingleObject#2 failed!"); - } - CONSOLE_SCREEN_BUFFER_INFO screenBufferInfo; - if (!GetConsoleScreenBufferInfo(hOut, &screenBufferInfo)) { - throw std::runtime_error("GetConsoleScreenBufferInfo failed!"); - } - - COORD coord; - DWORD charsRead = 0; - coord.X = 0; - coord.Y = screenBufferInfo.dwCursorPosition.Y - 4; - WCHAR* outputBuffer = new WCHAR[screenBufferInfo.dwSize.X * 4]; - if (!ReadConsoleOutputCharacterW(hOut, outputBuffer, - screenBufferInfo.dwSize.X * 4, coord, - &charsRead) || - charsRead == 0) { - delete[] outputBuffer; - throw std::runtime_error("ReadConsoleOutputCharacter failed!"); - } - std::wstring wideTestString = kwsys::Encoding::ToWide(encodedTestString); - std::replace(wideTestString.begin(), wideTestString.end(), '\0', ' '); - std::wstring wideInputTestString = - kwsys::Encoding::ToWide(encodedInputTestString); - if (memcmp(outputBuffer, wideTestString.c_str(), - wideTestString.size() * sizeof(wchar_t)) == 0 && - memcmp(outputBuffer + screenBufferInfo.dwSize.X * 1, - wideTestString.c_str(), - wideTestString.size() * sizeof(wchar_t)) == 0 && - memcmp(outputBuffer + screenBufferInfo.dwSize.X * 2, - UnicodeInputTestString, - sizeof(UnicodeInputTestString) - sizeof(WCHAR)) == 0 && - memcmp(outputBuffer + screenBufferInfo.dwSize.X * 3, - wideInputTestString.c_str(), - (wideInputTestString.size() - 1) * sizeof(wchar_t)) == 0) { - didFail = 0; - } else { - std::cerr << "Console's output didn't match expected output!" - << std::endl; - dumpBuffers(wideTestString.c_str(), outputBuffer, - wideTestString.size()); - dumpBuffers(wideTestString.c_str(), - outputBuffer + screenBufferInfo.dwSize.X * 1, - wideTestString.size()); - dumpBuffers( - UnicodeInputTestString, outputBuffer + screenBufferInfo.dwSize.X * 2, - (sizeof(UnicodeInputTestString) - 1) / sizeof(WCHAR)); - dumpBuffers(wideInputTestString.c_str(), - outputBuffer + screenBufferInfo.dwSize.X * 3, - wideInputTestString.size() - 1); - } - delete[] outputBuffer; - } catch (const std::runtime_error& ex) { - DWORD lastError = GetLastError(); - std::cerr << "In function testConsole, line " << __LINE__ << ": " - << ex.what() << std::endl; - displayError(lastError); - } - finishProcess(didFail == 0); - } - if (newConsole) { - SetStdHandle(STD_INPUT_HANDLE, parentIn); - SetStdHandle(STD_OUTPUT_HANDLE, parentOut); - SetStdHandle(STD_ERROR_HANDLE, parentErr); - CloseHandle(hIn); - CloseHandle(hOut); - FreeConsole(); - } - return didFail; -} - -#endif - -int testConsoleBuf(int, char* []) -{ - int ret = 0; - -#if defined(_WIN32) - beforeInputEvent = CreateEventW(nullptr, - FALSE, // auto-reset event - FALSE, // initial state is nonsignaled - BeforeInputEventName); // object name - if (!beforeInputEvent) { - std::cerr << "CreateEvent#1 failed " << GetLastError() << std::endl; - return 1; - } - - afterOutputEvent = CreateEventW(nullptr, FALSE, FALSE, AfterOutputEventName); - if (!afterOutputEvent) { - std::cerr << "CreateEvent#2 failed " << GetLastError() << std::endl; - return 1; - } - - encodedTestString = kwsys::Encoding::ToNarrow(std::wstring( - UnicodeTestString, sizeof(UnicodeTestString) / sizeof(wchar_t) - 1)); - encodedInputTestString = kwsys::Encoding::ToNarrow( - std::wstring(UnicodeInputTestString, - sizeof(UnicodeInputTestString) / sizeof(wchar_t) - 1)); - encodedInputTestString += "\n"; - - ret |= testPipe(); - ret |= testFile(); - ret |= testConsole(); - - CloseHandle(beforeInputEvent); - CloseHandle(afterOutputEvent); -#endif - - return ret; -} diff --git a/test/API/driver/kwsys/testConsoleBuf.hxx b/test/API/driver/kwsys/testConsoleBuf.hxx deleted file mode 100644 index e93cb4f0a1c..00000000000 --- a/test/API/driver/kwsys/testConsoleBuf.hxx +++ /dev/null @@ -1,17 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifndef testConsoleBuf_hxx -#define testConsoleBuf_hxx - -static const wchar_t cmdConsoleBufChild[] = L"testConsoleBufChild"; - -static const wchar_t BeforeInputEventName[] = L"BeforeInputEvent"; -static const wchar_t AfterOutputEventName[] = L"AfterOutputEvent"; - -// यूनिकोड είναι здорово! -static const wchar_t UnicodeTestString[] = - L"\u092F\u0942\u0928\u093F\u0915\u094B\u0921 " - L"\u03B5\u03AF\u03BD\0\u03B1\u03B9 " - L"\u0437\u0434\u043E\u0440\u043E\u0432\u043E!"; - -#endif diff --git a/test/API/driver/kwsys/testConsoleBufChild.cxx b/test/API/driver/kwsys/testConsoleBufChild.cxx deleted file mode 100644 index 3c8fdc2e13b..00000000000 --- a/test/API/driver/kwsys/testConsoleBufChild.cxx +++ /dev/null @@ -1,55 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" - -#include KWSYS_HEADER(ConsoleBuf.hxx) -#include KWSYS_HEADER(Encoding.hxx) - -// Work-around CMake dependency scanning limitation. This must -// duplicate the above list of headers. -#if 0 -# include "ConsoleBuf.hxx.in" -# include "Encoding.hxx.in" -#endif - -#include - -#include "testConsoleBuf.hxx" - -int main(int argc, const char* argv[]) -{ -#if defined(_WIN32) - kwsys::ConsoleBuf::Manager out(std::cout); - kwsys::ConsoleBuf::Manager err(std::cerr, true); - kwsys::ConsoleBuf::Manager in(std::cin); - - if (argc > 1) { - std::cout << argv[1] << std::endl; - std::cerr << argv[1] << std::endl; - } else { - std::string str = kwsys::Encoding::ToNarrow(std::wstring( - UnicodeTestString, sizeof(UnicodeTestString) / sizeof(wchar_t) - 1)); - std::cout << str << std::endl; - std::cerr << str << std::endl; - } - - std::string input; - HANDLE event = OpenEventW(EVENT_MODIFY_STATE, FALSE, BeforeInputEventName); - if (event) { - SetEvent(event); - CloseHandle(event); - } - - std::cin >> input; - std::cout << input << std::endl; - event = OpenEventW(EVENT_MODIFY_STATE, FALSE, AfterOutputEventName); - if (event) { - SetEvent(event); - CloseHandle(event); - } -#else - static_cast(argc); - static_cast(argv); -#endif - return 0; -} diff --git a/test/API/driver/kwsys/testDirectory.cxx b/test/API/driver/kwsys/testDirectory.cxx deleted file mode 100644 index b1ab0c87270..00000000000 --- a/test/API/driver/kwsys/testDirectory.cxx +++ /dev/null @@ -1,110 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying -file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" -#include KWSYS_HEADER(Directory.hxx) -#include KWSYS_HEADER(Encoding.hxx) -#include KWSYS_HEADER(SystemTools.hxx) - -// Work-around CMake dependency scanning limitation. This must -// duplicate the above list of headers. -#if 0 -# include "Directory.hxx.in" -# include "Encoding.hxx.in" -# include "SystemTools.hxx.in" -#endif - -#include -#include -#include - -#include - -int _doLongPathTest() -{ - using namespace kwsys; - static const int LONG_PATH_THRESHOLD = 512; - int res = 0; - std::string topdir(TEST_SYSTEMTOOLS_BINARY_DIR "/directory_testing/"); - std::stringstream testpathstrm; - std::string testdirpath; - std::string extendedtestdirpath; - - testpathstrm << topdir; - size_t pathlen = testpathstrm.str().length(); - testpathstrm.seekp(0, std::ios_base::end); - while (pathlen < LONG_PATH_THRESHOLD) { - testpathstrm << "0123456789/"; - pathlen = testpathstrm.str().length(); - } - - testdirpath = testpathstrm.str(); -#ifdef _WIN32 - extendedtestdirpath = - Encoding::ToNarrow(SystemTools::ConvertToWindowsExtendedPath(testdirpath)); -#else - extendedtestdirpath = testdirpath; -#endif - - if (SystemTools::MakeDirectory(extendedtestdirpath)) { - std::ofstream testfile1( - (extendedtestdirpath + "longfilepathtest1.txt").c_str()); - std::ofstream testfile2( - (extendedtestdirpath + "longfilepathtest2.txt").c_str()); - testfile1 << "foo"; - testfile2 << "bar"; - testfile1.close(); - testfile2.close(); - - Directory testdir; - // Set res to failure if the directory doesn't load - res += !testdir.Load(testdirpath); - // Increment res failure if the directory appears empty - res += testdir.GetNumberOfFiles() == 0; - // Increment res failures if the path has changed from - // what was provided. - res += testdirpath != testdir.GetPath(); - - SystemTools::RemoveADirectory(topdir); - } else { - std::cerr << "Failed to create directory with long path: " - << extendedtestdirpath << std::endl; - res += 1; - } - return res; -} - -int _copyDirectoryTest() -{ - using namespace kwsys; - const std::string source(TEST_SYSTEMTOOLS_BINARY_DIR - "/directory_testing/copyDirectoryTestSrc"); - if (SystemTools::PathExists(source)) { - std::cerr << source << " shouldn't exist before test" << std::endl; - return 1; - } - const std::string destination(TEST_SYSTEMTOOLS_BINARY_DIR - "/directory_testing/copyDirectoryTestDst"); - if (SystemTools::PathExists(destination)) { - std::cerr << destination << " shouldn't exist before test" << std::endl; - return 2; - } - const bool copysuccess = SystemTools::CopyADirectory(source, destination); - const bool destinationexists = SystemTools::PathExists(destination); - if (copysuccess) { - std::cerr << "CopyADirectory should have returned false" << std::endl; - SystemTools::RemoveADirectory(destination); - return 3; - } - if (destinationexists) { - std::cerr << "CopyADirectory returned false, but destination directory" - << " has been created" << std::endl; - SystemTools::RemoveADirectory(destination); - return 4; - } - return 0; -} - -int testDirectory(int, char* []) -{ - return _doLongPathTest() + _copyDirectoryTest(); -} diff --git a/test/API/driver/kwsys/testDynamicLoader.cxx b/test/API/driver/kwsys/testDynamicLoader.cxx deleted file mode 100644 index 2421ac0e154..00000000000 --- a/test/API/driver/kwsys/testDynamicLoader.cxx +++ /dev/null @@ -1,133 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" - -#include KWSYS_HEADER(DynamicLoader.hxx) - -#if defined(__BEOS__) || defined(__HAIKU__) -# include /* disable_debugger() API. */ -#endif - -// Work-around CMake dependency scanning limitation. This must -// duplicate the above list of headers. -#if 0 -# include "DynamicLoader.hxx.in" -#endif - -#include -#include - -// Include with <> instead of "" to avoid getting any in-source copy -// left on disk. -#include - -static std::string GetLibName(const char* lname, const char* subdir = nullptr) -{ - // Construct proper name of lib - std::string slname; - slname = EXECUTABLE_OUTPUT_PATH; - if (subdir) { - slname += "/"; - slname += subdir; - } -#ifdef CMAKE_INTDIR - slname += "/"; - slname += CMAKE_INTDIR; -#endif - slname += "/"; - slname += kwsys::DynamicLoader::LibPrefix(); - slname += lname; - slname += kwsys::DynamicLoader::LibExtension(); - - return slname; -} - -/* libname = Library name (proper prefix, proper extension) - * System = symbol to lookup in libname - * r1: should OpenLibrary succeed ? - * r2: should GetSymbolAddress succeed ? - * r3: should CloseLibrary succeed ? - */ -static int TestDynamicLoader(const char* libname, const char* symbol, int r1, - int r2, int r3, int flags = 0) -{ - std::cerr << "Testing: " << libname << std::endl; - kwsys::DynamicLoader::LibraryHandle l = - kwsys::DynamicLoader::OpenLibrary(libname, flags); - // If result is incompatible with expectation just fails (xor): - if ((r1 && !l) || (!r1 && l)) { - std::cerr << "OpenLibrary: " << kwsys::DynamicLoader::LastError() - << std::endl; - return 1; - } - kwsys::DynamicLoader::SymbolPointer f = - kwsys::DynamicLoader::GetSymbolAddress(l, symbol); - if ((r2 && !f) || (!r2 && f)) { - std::cerr << "GetSymbolAddress: " << kwsys::DynamicLoader::LastError() - << std::endl; - return 1; - } -#ifndef __APPLE__ - int s = kwsys::DynamicLoader::CloseLibrary(l); - if ((r3 && !s) || (!r3 && s)) { - std::cerr << "CloseLibrary: " << kwsys::DynamicLoader::LastError() - << std::endl; - return 1; - } -#else - (void)r3; -#endif - return 0; -} - -int testDynamicLoader(int argc, char* argv[]) -{ -#if defined(_WIN32) - SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX); -#elif defined(__BEOS__) || defined(__HAIKU__) - disable_debugger(1); -#endif - int res = 0; - if (argc == 3) { - // User specify a libname and symbol to check. - res = TestDynamicLoader(argv[1], argv[2], 1, 1, 1); - return res; - } - -// dlopen() on Syllable before 11/22/2007 doesn't return 0 on error -#ifndef __SYLLABLE__ - // Make sure that inexistent lib is giving correct result - res += TestDynamicLoader("azerty_", "foo_bar", 0, 0, 0); - // Make sure that random binary file cannot be assimilated as dylib - res += TestDynamicLoader(TEST_SYSTEMTOOLS_SOURCE_DIR "/testSystemTools.bin", - "wp", 0, 0, 0); -#endif - -#ifdef __linux__ - // This one is actually fun to test, since dlopen is by default - // loaded...wonder why :) - res += TestDynamicLoader("foobar.lib", "dlopen", 0, 1, 0); - res += TestDynamicLoader("libdl.so", "dlopen", 1, 1, 1); - res += TestDynamicLoader("libdl.so", "TestDynamicLoader", 1, 0, 1); -#endif - // Now try on the generated library - std::string libname = GetLibName(KWSYS_NAMESPACE_STRING "TestDynload"); - res += TestDynamicLoader(libname.c_str(), "dummy", 1, 0, 1); - res += TestDynamicLoader(libname.c_str(), "TestDynamicLoaderSymbolPointer", - 1, 1, 1); - res += TestDynamicLoader(libname.c_str(), "_TestDynamicLoaderSymbolPointer", - 1, 0, 1); - res += TestDynamicLoader(libname.c_str(), "TestDynamicLoaderData", 1, 1, 1); - res += TestDynamicLoader(libname.c_str(), "_TestDynamicLoaderData", 1, 0, 1); - -#ifdef _WIN32 - libname = GetLibName(KWSYS_NAMESPACE_STRING "TestDynloadUse", "dynloaddir"); - res += TestDynamicLoader(libname.c_str(), "dummy", 0, 0, 0); - res += TestDynamicLoader(libname.c_str(), "TestLoad", 1, 1, 1, - kwsys::DynamicLoader::SearchBesideLibrary); - res += TestDynamicLoader(libname.c_str(), "_TestLoad", 1, 0, 1, - kwsys::DynamicLoader::SearchBesideLibrary); -#endif - - return res; -} diff --git a/test/API/driver/kwsys/testDynload.c b/test/API/driver/kwsys/testDynload.c deleted file mode 100644 index c49f747df43..00000000000 --- a/test/API/driver/kwsys/testDynload.c +++ /dev/null @@ -1,13 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifdef _WIN32 -# define DL_EXPORT __declspec(dllexport) -#else -# define DL_EXPORT -#endif - -DL_EXPORT int TestDynamicLoaderData = 0; - -DL_EXPORT void TestDynamicLoaderSymbolPointer() -{ -} diff --git a/test/API/driver/kwsys/testDynloadImpl.c b/test/API/driver/kwsys/testDynloadImpl.c deleted file mode 100644 index 2b9069bc7a4..00000000000 --- a/test/API/driver/kwsys/testDynloadImpl.c +++ /dev/null @@ -1,10 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ - -#include "testDynloadImpl.h" - -int TestDynamicLoaderImplData = 0; - -void TestDynamicLoaderImplSymbolPointer() -{ -} diff --git a/test/API/driver/kwsys/testDynloadImpl.h b/test/API/driver/kwsys/testDynloadImpl.h deleted file mode 100644 index d0c9dfb756b..00000000000 --- a/test/API/driver/kwsys/testDynloadImpl.h +++ /dev/null @@ -1,15 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifdef _WIN32 -# ifdef BUILDING_TestDynloadImpl -# define DLIMPL_EXPORT __declspec(dllexport) -# else -# define DLIMPL_EXPORT __declspec(dllimport) -# endif -#else -# define DLIMPL_EXPORT -#endif - -DLIMPL_EXPORT int TestDynamicLoaderImplData; - -DLIMPL_EXPORT void TestDynamicLoaderImplSymbolPointer(); diff --git a/test/API/driver/kwsys/testDynloadUse.c b/test/API/driver/kwsys/testDynloadUse.c deleted file mode 100644 index 5402add6a20..00000000000 --- a/test/API/driver/kwsys/testDynloadUse.c +++ /dev/null @@ -1,15 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "testDynloadImpl.h" - -#ifdef _WIN32 -# define DL_EXPORT __declspec(dllexport) -#else -# define DL_EXPORT -#endif - -DL_EXPORT int TestLoad() -{ - TestDynamicLoaderImplSymbolPointer(); - return TestDynamicLoaderImplData; -} diff --git a/test/API/driver/kwsys/testEncode.c b/test/API/driver/kwsys/testEncode.c deleted file mode 100644 index b7b6dd8458f..00000000000 --- a/test/API/driver/kwsys/testEncode.c +++ /dev/null @@ -1,67 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" -#include KWSYS_HEADER(MD5.h) - -/* Work-around CMake dependency scanning limitation. This must - duplicate the above list of headers. */ -#if 0 -# include "MD5.h.in" -#endif - -#include -#include - -static const unsigned char testMD5input1[] = - " A quick brown fox jumps over the lazy dog.\n" - " This is sample text for MD5 sum input.\n"; -static const char testMD5output1[] = "8f146af46ed4f267921bb937d4d3500c"; - -static const int testMD5input2len = 28; -static const unsigned char testMD5input2[] = "the cow jumped over the moon"; -static const char testMD5output2[] = "a2ad137b746138fae4e5adca9c85d3ae"; - -static int testMD5_1(kwsysMD5* md5) -{ - char md5out[33]; - kwsysMD5_Initialize(md5); - kwsysMD5_Append(md5, testMD5input1, -1); - kwsysMD5_FinalizeHex(md5, md5out); - md5out[32] = 0; - printf("md5sum 1: expected [%s]\n" - " got [%s]\n", - testMD5output1, md5out); - return (strcmp(md5out, testMD5output1) != 0) ? 1 : 0; -} - -static int testMD5_2(kwsysMD5* md5) -{ - unsigned char digest[16]; - char md5out[33]; - kwsysMD5_Initialize(md5); - kwsysMD5_Append(md5, testMD5input2, testMD5input2len); - kwsysMD5_Finalize(md5, digest); - kwsysMD5_DigestToHex(digest, md5out); - md5out[32] = 0; - printf("md5sum 2: expected [%s]\n" - " got [%s]\n", - testMD5output2, md5out); - return (strcmp(md5out, testMD5output2) != 0) ? 1 : 0; -} - -int testEncode(int argc, char* argv[]) -{ - int result = 0; - (void)argc; - (void)argv; - - /* Test MD5 digest. */ - { - kwsysMD5* md5 = kwsysMD5_New(); - result |= testMD5_1(md5); - result |= testMD5_2(md5); - kwsysMD5_Delete(md5); - } - - return result; -} diff --git a/test/API/driver/kwsys/testEncoding.cxx b/test/API/driver/kwsys/testEncoding.cxx deleted file mode 100644 index 988697bff8d..00000000000 --- a/test/API/driver/kwsys/testEncoding.cxx +++ /dev/null @@ -1,286 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" - -#if defined(_MSC_VER) -# pragma warning(disable : 4786) -#endif - -#include KWSYS_HEADER(Encoding.hxx) -#include KWSYS_HEADER(Encoding.h) - -#include -#include -#include -#include -#include - -// Work-around CMake dependency scanning limitation. This must -// duplicate the above list of headers. -#if 0 -# include "Encoding.h.in" -# include "Encoding.hxx.in" -#endif - -static const unsigned char helloWorldStrings[][32] = { - // English - { 'H', 'e', 'l', 'l', 'o', ' ', 'W', 'o', 'r', 'l', 'd', 0 }, - // Japanese - { 0xE3, 0x81, 0x93, 0xE3, 0x82, 0x93, 0xE3, 0x81, 0xAB, 0xE3, 0x81, - 0xA1, 0xE3, 0x81, 0xAF, 0xE4, 0xB8, 0x96, 0xE7, 0x95, 0x8C, 0 }, - // Arabic - { 0xD9, 0x85, 0xD8, 0xB1, 0xD8, 0xAD, 0xD8, 0xA8, 0xD8, 0xA7, 0x20, 0xD8, - 0xA7, 0xD9, 0x84, 0xD8, 0xB9, 0xD8, 0xA7, 0xD9, 0x84, 0xD9, 0x85, 0 }, - // Yiddish - { 0xD7, 0x94, 0xD7, 0xA2, 0xD7, 0x9C, 0xD7, 0x90, 0x20, 0xD7, - 0x95, 0xD7, 0x95, 0xD7, 0xA2, 0xD7, 0x9C, 0xD7, 0x98, 0 }, - // Russian - { 0xD0, 0xBF, 0xD1, 0x80, 0xD0, 0xB8, 0xD0, 0xB2, 0xD0, 0xB5, - 0xD1, 0x82, 0x20, 0xD0, 0xBC, 0xD0, 0xB8, 0xD1, 0x80, 0 }, - // Latin - { 0x4D, 0x75, 0x6E, 0x64, 0x75, 0x73, 0x20, 0x73, 0x61, 0x6C, 0x76, 0x65, - 0 }, - // Swahili - { 0x68, 0x75, 0x6A, 0x61, 0x6D, 0x62, 0x6F, 0x20, 0x44, 0x75, 0x6E, 0x69, - 0x61, 0 }, - // Icelandic - { 0x48, 0x61, 0x6C, 0x6C, 0xC3, 0xB3, 0x20, 0x68, 0x65, 0x69, 0x6D, 0x75, - 0x72, 0 }, - { 0 } -}; - -static int testHelloWorldEncoding() -{ - int ret = 0; - for (int i = 0; helloWorldStrings[i][0] != 0; i++) { - std::string str = reinterpret_cast(helloWorldStrings[i]); - std::cout << str << std::endl; - std::wstring wstr = kwsys::Encoding::ToWide(str); - std::string str2 = kwsys::Encoding::ToNarrow(wstr); - wchar_t* c_wstr = kwsysEncoding_DupToWide(str.c_str()); - char* c_str2 = kwsysEncoding_DupToNarrow(c_wstr); - if (!wstr.empty() && (str != str2 || strcmp(c_str2, str.c_str()))) { - std::cout << "converted string was different: " << str2 << std::endl; - std::cout << "converted string was different: " << c_str2 << std::endl; - ret++; - } - free(c_wstr); - free(c_str2); - } - return ret; -} - -static int testRobustEncoding() -{ - // test that the conversion functions handle invalid - // unicode correctly/gracefully - - // we manipulate the format flags of stdout, remember - // the original state here to restore before return - std::ios::fmtflags const& flags = std::cout.flags(); - - int ret = 0; - char cstr[] = { (char)-1, 0 }; - // this conversion could fail - std::wstring wstr = kwsys::Encoding::ToWide(cstr); - - wstr = kwsys::Encoding::ToWide(nullptr); - if (wstr != L"") { - const wchar_t* wcstr = wstr.c_str(); - std::cout << "ToWide(NULL) returned"; - for (size_t i = 0; i < wstr.size(); i++) { - std::cout << " " << std::hex << (int)wcstr[i]; - } - std::cout << std::endl; - ret++; - } - wstr = kwsys::Encoding::ToWide(""); - if (wstr != L"") { - const wchar_t* wcstr = wstr.c_str(); - std::cout << "ToWide(\"\") returned"; - for (size_t i = 0; i < wstr.size(); i++) { - std::cout << " " << std::hex << (int)wcstr[i]; - } - std::cout << std::endl; - ret++; - } - -#ifdef _WIN32 - // 16 bit wchar_t - we make an invalid surrogate pair - wchar_t cwstr[] = { 0xD801, 0xDA00, 0 }; - // this conversion could fail - std::string win_str = kwsys::Encoding::ToNarrow(cwstr); -#endif - - std::string str = kwsys::Encoding::ToNarrow(nullptr); - if (str != "") { - std::cout << "ToNarrow(NULL) returned " << str << std::endl; - ret++; - } - - str = kwsys::Encoding::ToNarrow(L""); - if (wstr != L"") { - std::cout << "ToNarrow(\"\") returned " << str << std::endl; - ret++; - } - - std::cout.flags(flags); - return ret; -} - -static int testWithNulls() -{ - int ret = 0; - std::vector strings; - strings.push_back(std::string("ab") + '\0' + 'c'); - strings.push_back(std::string("d") + '\0' + '\0' + 'e'); - strings.push_back(std::string() + '\0' + 'f'); - strings.push_back(std::string() + '\0' + '\0' + "gh"); - strings.push_back(std::string("ij") + '\0'); - strings.push_back(std::string("k") + '\0' + '\0'); - strings.push_back(std::string("\0\0\0\0", 4) + "lmn" + - std::string("\0\0\0\0", 4)); - for (std::vector::iterator it = strings.begin(); - it != strings.end(); ++it) { - std::wstring wstr = kwsys::Encoding::ToWide(*it); - std::string str = kwsys::Encoding::ToNarrow(wstr); - std::string s(*it); - std::replace(s.begin(), s.end(), '\0', ' '); - std::cout << "'" << s << "' (" << it->size() << ")" << std::endl; - if (str != *it) { - std::replace(str.begin(), str.end(), '\0', ' '); - std::cout << "string with null was different: '" << str << "' (" - << str.size() << ")" << std::endl; - ret++; - } - } - return ret; -} - -static int testCommandLineArguments() -{ - int status = 0; - - char const* argv[2] = { "./app.exe", (char const*)helloWorldStrings[1] }; - - kwsys::Encoding::CommandLineArguments args(2, argv); - kwsys::Encoding::CommandLineArguments arg2 = - kwsys::Encoding::CommandLineArguments(args); - - char const* const* u8_argv = args.argv(); - for (int i = 0; i < args.argc(); i++) { - char const* u8_arg = u8_argv[i]; - if (strcmp(argv[i], u8_arg) != 0) { - std::cout << "argv[" << i << "] " << argv[i] << " != " << u8_arg - << std::endl; - status++; - } - } - - kwsys::Encoding::CommandLineArguments args3 = - kwsys::Encoding::CommandLineArguments::Main(2, argv); - - return status; -} - -static int testToWindowsExtendedPath() -{ -#ifdef _WIN32 - int ret = 0; - if (kwsys::Encoding::ToWindowsExtendedPath( - "L:\\Local Mojo\\Hex Power Pack\\Iffy Voodoo") != - L"\\\\?\\L:\\Local Mojo\\Hex Power Pack\\Iffy Voodoo") { - std::cout << "Problem with ToWindowsExtendedPath " - << "\"L:\\Local Mojo\\Hex Power Pack\\Iffy Voodoo\"" - << std::endl; - ++ret; - } - - if (kwsys::Encoding::ToWindowsExtendedPath( - "L:/Local Mojo/Hex Power Pack/Iffy Voodoo") != - L"\\\\?\\L:\\Local Mojo\\Hex Power Pack\\Iffy Voodoo") { - std::cout << "Problem with ToWindowsExtendedPath " - << "\"L:/Local Mojo/Hex Power Pack/Iffy Voodoo\"" << std::endl; - ++ret; - } - - if (kwsys::Encoding::ToWindowsExtendedPath( - "\\\\Foo\\Local Mojo\\Hex Power Pack\\Iffy Voodoo") != - L"\\\\?\\UNC\\Foo\\Local Mojo\\Hex Power Pack\\Iffy Voodoo") { - std::cout << "Problem with ToWindowsExtendedPath " - << "\"\\\\Foo\\Local Mojo\\Hex Power Pack\\Iffy Voodoo\"" - << std::endl; - ++ret; - } - - if (kwsys::Encoding::ToWindowsExtendedPath( - "//Foo/Local Mojo/Hex Power Pack/Iffy Voodoo") != - L"\\\\?\\UNC\\Foo\\Local Mojo\\Hex Power Pack\\Iffy Voodoo") { - std::cout << "Problem with ToWindowsExtendedPath " - << "\"//Foo/Local Mojo/Hex Power Pack/Iffy Voodoo\"" - << std::endl; - ++ret; - } - - if (kwsys::Encoding::ToWindowsExtendedPath("//") != L"//") { - std::cout << "Problem with ToWindowsExtendedPath " - << "\"//\"" << std::endl; - ++ret; - } - - if (kwsys::Encoding::ToWindowsExtendedPath("\\\\.\\") != L"\\\\.\\") { - std::cout << "Problem with ToWindowsExtendedPath " - << "\"\\\\.\\\"" << std::endl; - ++ret; - } - - if (kwsys::Encoding::ToWindowsExtendedPath("\\\\.\\X") != L"\\\\.\\X") { - std::cout << "Problem with ToWindowsExtendedPath " - << "\"\\\\.\\X\"" << std::endl; - ++ret; - } - - if (kwsys::Encoding::ToWindowsExtendedPath("\\\\.\\X:") != L"\\\\?\\X:") { - std::cout << "Problem with ToWindowsExtendedPath " - << "\"\\\\.\\X:\"" << std::endl; - ++ret; - } - - if (kwsys::Encoding::ToWindowsExtendedPath("\\\\.\\X:\\") != - L"\\\\?\\X:\\") { - std::cout << "Problem with ToWindowsExtendedPath " - << "\"\\\\.\\X:\\\"" << std::endl; - ++ret; - } - - if (kwsys::Encoding::ToWindowsExtendedPath("NUL") != L"\\\\.\\NUL") { - std::cout << "Problem with ToWindowsExtendedPath " - << "\"NUL\"" << std::endl; - ++ret; - } - - return ret; -#else - return 0; -#endif -} - -int testEncoding(int, char* []) -{ - const char* loc = setlocale(LC_ALL, ""); - if (loc) { - std::cout << "Locale: " << loc << std::endl; - } else { - std::cout << "Locale: None" << std::endl; - } - - int ret = 0; - - ret |= testHelloWorldEncoding(); - ret |= testRobustEncoding(); - ret |= testCommandLineArguments(); - ret |= testWithNulls(); - ret |= testToWindowsExtendedPath(); - - return ret; -} diff --git a/test/API/driver/kwsys/testFStream.cxx b/test/API/driver/kwsys/testFStream.cxx deleted file mode 100644 index 5009e988773..00000000000 --- a/test/API/driver/kwsys/testFStream.cxx +++ /dev/null @@ -1,113 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" - -#if defined(_MSC_VER) -# pragma warning(disable : 4786) -#endif - -#include KWSYS_HEADER(FStream.hxx) -#include -#ifdef __BORLANDC__ -# include /* memcmp */ -#endif - -// Work-around CMake dependency scanning limitation. This must -// duplicate the above list of headers. -#if 0 -# include "FStream.hxx.in" -#endif - -#include - -static int testNoFile() -{ - kwsys::ifstream in_file("NoSuchFile.txt"); - if (in_file) { - return 1; - } - - return 0; -} - -static const int num_test_files = 7; -static const int max_test_file_size = 45; - -static kwsys::FStream::BOM expected_bom[num_test_files] = { - kwsys::FStream::BOM_None, kwsys::FStream::BOM_None, - kwsys::FStream::BOM_UTF8, kwsys::FStream::BOM_UTF16LE, - kwsys::FStream::BOM_UTF16BE, kwsys::FStream::BOM_UTF32LE, - kwsys::FStream::BOM_UTF32BE -}; - -static unsigned char expected_bom_data[num_test_files][5] = { - { 0 }, - { 0 }, - { 3, 0xEF, 0xBB, 0xBF }, - { 2, 0xFF, 0xFE }, - { 2, 0xFE, 0xFF }, - { 4, 0xFF, 0xFE, 0x00, 0x00 }, - { 4, 0x00, 0x00, 0xFE, 0xFF }, -}; - -static unsigned char file_data[num_test_files][max_test_file_size] = { - { 1, 'H' }, - { 11, 'H', 'e', 'l', 'l', 'o', ' ', 'W', 'o', 'r', 'l', 'd' }, - { 11, 'H', 'e', 'l', 'l', 'o', ' ', 'W', 'o', 'r', 'l', 'd' }, - { 22, 0x48, 0x00, 0x65, 0x00, 0x6C, 0x00, 0x6C, 0x00, 0x6F, 0x00, 0x20, - 0x00, 0x57, 0x00, 0x6F, 0x00, 0x72, 0x00, 0x6C, 0x00, 0x64, 0x00 }, - { 22, 0x00, 0x48, 0x00, 0x65, 0x00, 0x6C, 0x00, 0x6C, 0x00, 0x6F, 0x00, - 0x20, 0x00, 0x57, 0x00, 0x6F, 0x00, 0x72, 0x00, 0x6C, 0x00, 0x64 }, - { 44, 0x48, 0x00, 0x00, 0x00, 0x65, 0x00, 0x00, 0x00, 0x6C, 0x00, 0x00, - 0x00, 0x6C, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, - 0x00, 0x57, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x00, 0x00, 0x72, 0x00, 0x00, - 0x00, 0x6C, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00 }, - { 44, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x65, 0x00, 0x00, 0x00, - 0x6C, 0x00, 0x00, 0x00, 0x6C, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x57, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x00, 0x00, - 0x72, 0x00, 0x00, 0x00, 0x6C, 0x00, 0x00, 0x00, 0x64 }, -}; - -static int testBOM() -{ - // test various encodings in binary mode - for (int i = 0; i < num_test_files; i++) { - { - kwsys::ofstream out("bom.txt", kwsys::ofstream::binary); - out.write(reinterpret_cast(expected_bom_data[i] + 1), - *expected_bom_data[i]); - out.write(reinterpret_cast(file_data[i] + 1), - file_data[i][0]); - } - - kwsys::ifstream in("bom.txt", kwsys::ofstream::binary); - kwsys::FStream::BOM bom = kwsys::FStream::ReadBOM(in); - if (bom != expected_bom[i]) { - std::cout << "Unexpected BOM " << i << std::endl; - return 1; - } - char data[max_test_file_size]; - in.read(data, file_data[i][0]); - if (!in.good()) { - std::cout << "Unable to read data " << i << std::endl; - return 1; - } - - if (memcmp(data, file_data[i] + 1, file_data[i][0]) != 0) { - std::cout << "Incorrect read data " << i << std::endl; - return 1; - } - } - - return 0; -} - -int testFStream(int, char* []) -{ - int ret = 0; - - ret |= testNoFile(); - ret |= testBOM(); - - return ret; -} diff --git a/test/API/driver/kwsys/testFail.c b/test/API/driver/kwsys/testFail.c deleted file mode 100644 index 82caeac37f4..00000000000 --- a/test/API/driver/kwsys/testFail.c +++ /dev/null @@ -1,24 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include -#include -#include - -int testFail(int argc, char* argv[]) -{ - char* env = getenv("DASHBOARD_TEST_FROM_CTEST"); - int oldCtest = 0; - if (env) { - if (strcmp(env, "1") == 0) { - oldCtest = 1; - } - printf("DASHBOARD_TEST_FROM_CTEST = %s\n", env); - } - printf("%s: This test intentionally fails\n", argv[0]); - if (oldCtest) { - printf("The version of ctest is not able to handle intentionally failing " - "tests, so pass.\n"); - return 0; - } - return argc; -} diff --git a/test/API/driver/kwsys/testHashSTL.cxx b/test/API/driver/kwsys/testHashSTL.cxx deleted file mode 100644 index 4ed2f899d1c..00000000000 --- a/test/API/driver/kwsys/testHashSTL.cxx +++ /dev/null @@ -1,64 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" -#include KWSYS_HEADER(hash_map.hxx) -#include KWSYS_HEADER(hash_set.hxx) - -// Work-around CMake dependency scanning limitation. This must -// duplicate the above list of headers. -#if 0 -# include "hash_map.hxx.in" -# include "hash_set.hxx.in" -#endif - -#include - -#if defined(_MSC_VER) -# pragma warning(disable : 4786) -#endif - -#if defined(__sgi) && !defined(__GNUC__) -# pragma set woff 1468 /* inline function cannot be explicitly instantiated \ - */ -#endif - -template class kwsys::hash_map; -template class kwsys::hash_set; - -static bool test_hash_map() -{ - typedef kwsys::hash_map mtype; - mtype m; - const char* keys[] = { "hello", "world" }; - m[keys[0]] = 1; - m.insert(mtype::value_type(keys[1], 2)); - int sum = 0; - for (mtype::iterator mi = m.begin(); mi != m.end(); ++mi) { - std::cout << "Found entry [" << mi->first << "," << mi->second << "]" - << std::endl; - sum += mi->second; - } - return sum == 3; -} - -static bool test_hash_set() -{ - typedef kwsys::hash_set stype; - stype s; - s.insert(1); - s.insert(2); - int sum = 0; - for (stype::iterator si = s.begin(); si != s.end(); ++si) { - std::cout << "Found entry [" << *si << "]" << std::endl; - sum += *si; - } - return sum == 3; -} - -int testHashSTL(int, char* []) -{ - bool result = true; - result = test_hash_map() && result; - result = test_hash_set() && result; - return result ? 0 : 1; -} diff --git a/test/API/driver/kwsys/testProcess.c b/test/API/driver/kwsys/testProcess.c deleted file mode 100644 index 39aaa23ba85..00000000000 --- a/test/API/driver/kwsys/testProcess.c +++ /dev/null @@ -1,728 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" -#include KWSYS_HEADER(Process.h) -#include KWSYS_HEADER(Encoding.h) - -/* Work-around CMake dependency scanning limitation. This must - duplicate the above list of headers. */ -#if 0 -# include "Encoding.h.in" -# include "Process.h.in" -#endif - -#include -#include -#include -#include -#include - -#if defined(_WIN32) -# include -#else -# include -# include -#endif - -#if defined(__BORLANDC__) -# pragma warn - 8060 /* possibly incorrect assignment */ -#endif - -/* Platform-specific sleep functions. */ - -#if defined(__BEOS__) && !defined(__ZETA__) -/* BeOS 5 doesn't have usleep(), but it has snooze(), which is identical. */ -# include -static inline void testProcess_usleep(unsigned int usec) -{ - snooze(usec); -} -#elif defined(_WIN32) -/* Windows can only sleep in millisecond intervals. */ -static void testProcess_usleep(unsigned int usec) -{ - Sleep(usec / 1000); -} -#else -# define testProcess_usleep usleep -#endif - -#if defined(_WIN32) -static void testProcess_sleep(unsigned int sec) -{ - Sleep(sec * 1000); -} -#else -static void testProcess_sleep(unsigned int sec) -{ - sleep(sec); -} -#endif - -int runChild(const char* cmd[], int state, int exception, int value, int share, - int output, int delay, double timeout, int poll, int repeat, - int disown, int createNewGroup, unsigned int interruptDelay); - -static int test1(int argc, const char* argv[]) -{ - /* This is a very basic functional test of kwsysProcess. It is repeated - numerous times to verify that there are no resource leaks in kwsysProcess - that eventually lead to an error. Many versions of OS X will fail after - 256 leaked file handles, so 257 iterations seems to be a good test. On - the other hand, too many iterations will cause the test to time out - - especially if the test is instrumented with e.g. valgrind. - - If you have problems with this test timing out on your system, or want to - run more than 257 iterations, you can change the number of iterations by - setting the KWSYS_TEST_PROCESS_1_COUNT environment variable. */ - (void)argc; - (void)argv; - fprintf(stdout, "Output on stdout from test returning 0.\n"); - fprintf(stderr, "Output on stderr from test returning 0.\n"); - return 0; -} - -static int test2(int argc, const char* argv[]) -{ - (void)argc; - (void)argv; - fprintf(stdout, "Output on stdout from test returning 123.\n"); - fprintf(stderr, "Output on stderr from test returning 123.\n"); - return 123; -} - -static int test3(int argc, const char* argv[]) -{ - (void)argc; - (void)argv; - fprintf(stdout, "Output before sleep on stdout from timeout test.\n"); - fprintf(stderr, "Output before sleep on stderr from timeout test.\n"); - fflush(stdout); - fflush(stderr); - testProcess_sleep(15); - fprintf(stdout, "Output after sleep on stdout from timeout test.\n"); - fprintf(stderr, "Output after sleep on stderr from timeout test.\n"); - return 0; -} - -static int test4(int argc, const char* argv[]) -{ -#ifndef CRASH_USING_ABORT - /* Prepare a pointer to an invalid address. Don't use null, because - dereferencing null is undefined behaviour and compilers are free to - do whatever they want. ex: Clang will warn at compile time, or even - optimize away the write. We hope to 'outsmart' them by using - 'volatile' and a slightly larger address, based on a runtime value. */ - volatile int* invalidAddress = 0; - invalidAddress += argc ? 1 : 2; -#endif - -#if defined(_WIN32) - /* Avoid error diagnostic popups since we are crashing on purpose. */ - SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX); -#elif defined(__BEOS__) || defined(__HAIKU__) - /* Avoid error diagnostic popups since we are crashing on purpose. */ - disable_debugger(1); -#endif - (void)argc; - (void)argv; - fprintf(stdout, "Output before crash on stdout from crash test.\n"); - fprintf(stderr, "Output before crash on stderr from crash test.\n"); - fflush(stdout); - fflush(stderr); -#ifdef CRASH_USING_ABORT - abort(); -#else - assert(invalidAddress); /* Quiet Clang scan-build. */ - /* Provoke deliberate crash by writing to the invalid address. */ - *invalidAddress = 0; -#endif - fprintf(stdout, "Output after crash on stdout from crash test.\n"); - fprintf(stderr, "Output after crash on stderr from crash test.\n"); - return 0; -} - -static int test5(int argc, const char* argv[]) -{ - int r; - const char* cmd[4]; - (void)argc; - cmd[0] = argv[0]; - cmd[1] = "run"; - cmd[2] = "4"; - cmd[3] = 0; - fprintf(stdout, "Output on stdout before recursive test.\n"); - fprintf(stderr, "Output on stderr before recursive test.\n"); - fflush(stdout); - fflush(stderr); - r = runChild(cmd, kwsysProcess_State_Exception, -#ifdef CRASH_USING_ABORT - kwsysProcess_Exception_Other, -#else - kwsysProcess_Exception_Fault, -#endif - 1, 1, 1, 0, 15, 0, 1, 0, 0, 0); - fprintf(stdout, "Output on stdout after recursive test.\n"); - fprintf(stderr, "Output on stderr after recursive test.\n"); - fflush(stdout); - fflush(stderr); - return r; -} - -#define TEST6_SIZE (4096 * 2) -static void test6(int argc, const char* argv[]) -{ - int i; - char runaway[TEST6_SIZE + 1]; - (void)argc; - (void)argv; - for (i = 0; i < TEST6_SIZE; ++i) { - runaway[i] = '.'; - } - runaway[TEST6_SIZE] = '\n'; - - /* Generate huge amounts of output to test killing. */ - for (;;) { - fwrite(runaway, 1, TEST6_SIZE + 1, stdout); - fflush(stdout); - } -} - -/* Define MINPOLL to be one more than the number of times output is - written. Define MAXPOLL to be the largest number of times a loop - delaying 1/10th of a second should ever have to poll. */ -#define MINPOLL 5 -#define MAXPOLL 20 -static int test7(int argc, const char* argv[]) -{ - (void)argc; - (void)argv; - fprintf(stdout, "Output on stdout before sleep.\n"); - fprintf(stderr, "Output on stderr before sleep.\n"); - fflush(stdout); - fflush(stderr); - /* Sleep for 1 second. */ - testProcess_sleep(1); - fprintf(stdout, "Output on stdout after sleep.\n"); - fprintf(stderr, "Output on stderr after sleep.\n"); - fflush(stdout); - fflush(stderr); - return 0; -} - -static int test8(int argc, const char* argv[]) -{ - /* Create a disowned grandchild to test handling of processes - that exit before their children. */ - int r; - const char* cmd[4]; - (void)argc; - cmd[0] = argv[0]; - cmd[1] = "run"; - cmd[2] = "108"; - cmd[3] = 0; - fprintf(stdout, "Output on stdout before grandchild test.\n"); - fprintf(stderr, "Output on stderr before grandchild test.\n"); - fflush(stdout); - fflush(stderr); - r = runChild(cmd, kwsysProcess_State_Disowned, kwsysProcess_Exception_None, - 1, 1, 1, 0, 10, 0, 1, 1, 0, 0); - fprintf(stdout, "Output on stdout after grandchild test.\n"); - fprintf(stderr, "Output on stderr after grandchild test.\n"); - fflush(stdout); - fflush(stderr); - return r; -} - -static int test8_grandchild(int argc, const char* argv[]) -{ - (void)argc; - (void)argv; - fprintf(stdout, "Output on stdout from grandchild before sleep.\n"); - fprintf(stderr, "Output on stderr from grandchild before sleep.\n"); - fflush(stdout); - fflush(stderr); - /* TODO: Instead of closing pipes here leave them open to make sure - the grandparent can stop listening when the parent exits. This - part of the test cannot be enabled until the feature is - implemented. */ - fclose(stdout); - fclose(stderr); - testProcess_sleep(15); - return 0; -} - -static int test9(int argc, const char* argv[]) -{ - /* Test Ctrl+C behavior: the root test program will send a Ctrl+C to this - process. Here, we start a child process that sleeps for a long time - while ignoring signals. The test is successful if this process waits - for the child to return before exiting from the Ctrl+C handler. - - WARNING: This test will falsely pass if the share parameter of runChild - was set to 0 when invoking the test9 process. */ - int r; - const char* cmd[4]; - (void)argc; - cmd[0] = argv[0]; - cmd[1] = "run"; - cmd[2] = "109"; - cmd[3] = 0; - fprintf(stdout, "Output on stdout before grandchild test.\n"); - fprintf(stderr, "Output on stderr before grandchild test.\n"); - fflush(stdout); - fflush(stderr); - r = runChild(cmd, kwsysProcess_State_Exited, kwsysProcess_Exception_None, 0, - 1, 1, 0, 30, 0, 1, 0, 0, 0); - /* This sleep will avoid a race condition between this function exiting - normally and our Ctrl+C handler exiting abnormally after the process - exits. */ - testProcess_sleep(1); - fprintf(stdout, "Output on stdout after grandchild test.\n"); - fprintf(stderr, "Output on stderr after grandchild test.\n"); - fflush(stdout); - fflush(stderr); - return r; -} - -#if defined(_WIN32) -static BOOL WINAPI test9_grandchild_handler(DWORD dwCtrlType) -{ - /* Ignore all Ctrl+C/Break signals. We must use an actual handler function - instead of using SetConsoleCtrlHandler(NULL, TRUE) so that we can also - ignore Ctrl+Break in addition to Ctrl+C. */ - (void)dwCtrlType; - return TRUE; -} -#endif - -static int test9_grandchild(int argc, const char* argv[]) -{ - /* The grandchild just sleeps for a few seconds while ignoring signals. */ - (void)argc; - (void)argv; -#if defined(_WIN32) - if (!SetConsoleCtrlHandler(test9_grandchild_handler, TRUE)) { - return 1; - } -#else - struct sigaction sa; - memset(&sa, 0, sizeof(sa)); - sa.sa_handler = SIG_IGN; - sigemptyset(&sa.sa_mask); - if (sigaction(SIGINT, &sa, 0) < 0) { - return 1; - } -#endif - fprintf(stdout, "Output on stdout from grandchild before sleep.\n"); - fprintf(stderr, "Output on stderr from grandchild before sleep.\n"); - fflush(stdout); - fflush(stderr); - /* Sleep for 9 seconds. */ - testProcess_sleep(9); - fprintf(stdout, "Output on stdout from grandchild after sleep.\n"); - fprintf(stderr, "Output on stderr from grandchild after sleep.\n"); - fflush(stdout); - fflush(stderr); - return 0; -} - -static int test10(int argc, const char* argv[]) -{ - /* Test Ctrl+C behavior: the root test program will send a Ctrl+C to this - process. Here, we start a child process that sleeps for a long time and - processes signals normally. However, this grandchild is created in a new - process group - ensuring that Ctrl+C we receive is sent to our process - groups. We make sure it exits anyway. */ - int r; - const char* cmd[4]; - (void)argc; - cmd[0] = argv[0]; - cmd[1] = "run"; - cmd[2] = "110"; - cmd[3] = 0; - fprintf(stdout, "Output on stdout before grandchild test.\n"); - fprintf(stderr, "Output on stderr before grandchild test.\n"); - fflush(stdout); - fflush(stderr); - r = - runChild(cmd, kwsysProcess_State_Exception, - kwsysProcess_Exception_Interrupt, 0, 1, 1, 0, 30, 0, 1, 0, 1, 0); - fprintf(stdout, "Output on stdout after grandchild test.\n"); - fprintf(stderr, "Output on stderr after grandchild test.\n"); - fflush(stdout); - fflush(stderr); - return r; -} - -static int test10_grandchild(int argc, const char* argv[]) -{ - /* The grandchild just sleeps for a few seconds and handles signals. */ - (void)argc; - (void)argv; - fprintf(stdout, "Output on stdout from grandchild before sleep.\n"); - fprintf(stderr, "Output on stderr from grandchild before sleep.\n"); - fflush(stdout); - fflush(stderr); - /* Sleep for 6 seconds. */ - testProcess_sleep(6); - fprintf(stdout, "Output on stdout from grandchild after sleep.\n"); - fprintf(stderr, "Output on stderr from grandchild after sleep.\n"); - fflush(stdout); - fflush(stderr); - return 0; -} - -static int runChild2(kwsysProcess* kp, const char* cmd[], int state, - int exception, int value, int share, int output, - int delay, double timeout, int poll, int disown, - int createNewGroup, unsigned int interruptDelay) -{ - int result = 0; - char* data = 0; - int length = 0; - double userTimeout = 0; - double* pUserTimeout = 0; - kwsysProcess_SetCommand(kp, cmd); - if (timeout >= 0) { - kwsysProcess_SetTimeout(kp, timeout); - } - if (share) { - kwsysProcess_SetPipeShared(kp, kwsysProcess_Pipe_STDOUT, 1); - kwsysProcess_SetPipeShared(kp, kwsysProcess_Pipe_STDERR, 1); - } - if (disown) { - kwsysProcess_SetOption(kp, kwsysProcess_Option_Detach, 1); - } - if (createNewGroup) { - kwsysProcess_SetOption(kp, kwsysProcess_Option_CreateProcessGroup, 1); - } - kwsysProcess_Execute(kp); - - if (poll) { - pUserTimeout = &userTimeout; - } - - if (interruptDelay) { - testProcess_sleep(interruptDelay); - kwsysProcess_Interrupt(kp); - } - - if (!share && !disown) { - int p; - while ((p = kwsysProcess_WaitForData(kp, &data, &length, pUserTimeout))) { - if (output) { - if (poll && p == kwsysProcess_Pipe_Timeout) { - fprintf(stdout, "WaitForData timeout reached.\n"); - fflush(stdout); - - /* Count the number of times we polled without getting data. - If it is excessive then kill the child and fail. */ - if (++poll >= MAXPOLL) { - fprintf(stdout, "Poll count reached limit %d.\n", MAXPOLL); - kwsysProcess_Kill(kp); - } - } else { - fwrite(data, 1, (size_t)length, stdout); - fflush(stdout); - } - } - if (poll) { - /* Delay to avoid busy loop during polling. */ - testProcess_usleep(100000); - } - if (delay) { -/* Purposely sleeping only on Win32 to let pipe fill up. */ -#if defined(_WIN32) - testProcess_usleep(100000); -#endif - } - } - } - - if (disown) { - kwsysProcess_Disown(kp); - } else { - kwsysProcess_WaitForExit(kp, 0); - } - - switch (kwsysProcess_GetState(kp)) { - case kwsysProcess_State_Starting: - printf("No process has been executed.\n"); - break; - case kwsysProcess_State_Executing: - printf("The process is still executing.\n"); - break; - case kwsysProcess_State_Expired: - printf("Child was killed when timeout expired.\n"); - break; - case kwsysProcess_State_Exited: - printf("Child exited with value = %d\n", kwsysProcess_GetExitValue(kp)); - result = ((exception != kwsysProcess_GetExitException(kp)) || - (value != kwsysProcess_GetExitValue(kp))); - break; - case kwsysProcess_State_Killed: - printf("Child was killed by parent.\n"); - break; - case kwsysProcess_State_Exception: - printf("Child terminated abnormally: %s\n", - kwsysProcess_GetExceptionString(kp)); - result = ((exception != kwsysProcess_GetExitException(kp)) || - (value != kwsysProcess_GetExitValue(kp))); - break; - case kwsysProcess_State_Disowned: - printf("Child was disowned.\n"); - break; - case kwsysProcess_State_Error: - printf("Error in administrating child process: [%s]\n", - kwsysProcess_GetErrorString(kp)); - break; - } - - if (result) { - if (exception != kwsysProcess_GetExitException(kp)) { - fprintf(stderr, - "Mismatch in exit exception. " - "Should have been %d, was %d.\n", - exception, kwsysProcess_GetExitException(kp)); - } - if (value != kwsysProcess_GetExitValue(kp)) { - fprintf(stderr, - "Mismatch in exit value. " - "Should have been %d, was %d.\n", - value, kwsysProcess_GetExitValue(kp)); - } - } - - if (kwsysProcess_GetState(kp) != state) { - fprintf(stderr, - "Mismatch in state. " - "Should have been %d, was %d.\n", - state, kwsysProcess_GetState(kp)); - result = 1; - } - - /* We should have polled more times than there were data if polling - was enabled. */ - if (poll && poll < MINPOLL) { - fprintf(stderr, "Poll count is %d, which is less than %d.\n", poll, - MINPOLL); - result = 1; - } - - return result; -} - -/** - * Runs a child process and blocks until it returns. Arguments as follows: - * - * cmd = Command line to run. - * state = Expected return value of kwsysProcess_GetState after exit. - * exception = Expected return value of kwsysProcess_GetExitException. - * value = Expected return value of kwsysProcess_GetExitValue. - * share = Whether to share stdout/stderr child pipes with our pipes - * by way of kwsysProcess_SetPipeShared. If false, new pipes - * are created. - * output = If !share && !disown, whether to write the child's stdout - * and stderr output to our stdout. - * delay = If !share && !disown, adds an additional short delay to - * the pipe loop to allow the pipes to fill up; Windows only. - * timeout = Non-zero to sets a timeout in seconds via - * kwsysProcess_SetTimeout. - * poll = If !share && !disown, we count the number of 0.1 second - * intervals where the child pipes had no new data. We fail - * if not in the bounds of MINPOLL/MAXPOLL. - * repeat = Number of times to run the process. - * disown = If set, the process is disowned. - * createNewGroup = If set, the process is created in a new process group. - * interruptDelay = If non-zero, number of seconds to delay before - * interrupting the process. Note that this delay will occur - * BEFORE any reading/polling of pipes occurs and before any - * detachment occurs. - */ -int runChild(const char* cmd[], int state, int exception, int value, int share, - int output, int delay, double timeout, int poll, int repeat, - int disown, int createNewGroup, unsigned int interruptDelay) -{ - int result = 1; - kwsysProcess* kp = kwsysProcess_New(); - if (!kp) { - fprintf(stderr, "kwsysProcess_New returned NULL!\n"); - return 1; - } - while (repeat-- > 0) { - result = runChild2(kp, cmd, state, exception, value, share, output, delay, - timeout, poll, disown, createNewGroup, interruptDelay); - if (result) { - break; - } - } - kwsysProcess_Delete(kp); - return result; -} - -int main(int argc, const char* argv[]) -{ - int n = 0; - -#ifdef _WIN32 - int i; - char new_args[10][_MAX_PATH]; - LPWSTR* w_av = CommandLineToArgvW(GetCommandLineW(), &argc); - for (i = 0; i < argc; i++) { - kwsysEncoding_wcstombs(new_args[i], w_av[i], _MAX_PATH); - argv[i] = new_args[i]; - } - LocalFree(w_av); -#endif - -#if 0 - { - HANDLE out = GetStdHandle(STD_OUTPUT_HANDLE); - DuplicateHandle(GetCurrentProcess(), out, - GetCurrentProcess(), &out, 0, FALSE, - DUPLICATE_SAME_ACCESS | DUPLICATE_CLOSE_SOURCE); - SetStdHandle(STD_OUTPUT_HANDLE, out); - } - { - HANDLE out = GetStdHandle(STD_ERROR_HANDLE); - DuplicateHandle(GetCurrentProcess(), out, - GetCurrentProcess(), &out, 0, FALSE, - DUPLICATE_SAME_ACCESS | DUPLICATE_CLOSE_SOURCE); - SetStdHandle(STD_ERROR_HANDLE, out); - } -#endif - if (argc == 2) { - n = atoi(argv[1]); - } else if (argc == 3 && strcmp(argv[1], "run") == 0) { - n = atoi(argv[2]); - } - /* Check arguments. */ - if (((n >= 1 && n <= 10) || n == 108 || n == 109 || n == 110) && argc == 3) { - /* This is the child process for a requested test number. */ - switch (n) { - case 1: - return test1(argc, argv); - case 2: - return test2(argc, argv); - case 3: - return test3(argc, argv); - case 4: - return test4(argc, argv); - case 5: - return test5(argc, argv); - case 6: - test6(argc, argv); - return 0; - case 7: - return test7(argc, argv); - case 8: - return test8(argc, argv); - case 9: - return test9(argc, argv); - case 10: - return test10(argc, argv); - case 108: - return test8_grandchild(argc, argv); - case 109: - return test9_grandchild(argc, argv); - case 110: - return test10_grandchild(argc, argv); - } - fprintf(stderr, "Invalid test number %d.\n", n); - return 1; - } else if (n >= 1 && n <= 10) { - /* This is the parent process for a requested test number. */ - int states[10] = { - kwsysProcess_State_Exited, kwsysProcess_State_Exited, - kwsysProcess_State_Expired, kwsysProcess_State_Exception, - kwsysProcess_State_Exited, kwsysProcess_State_Expired, - kwsysProcess_State_Exited, kwsysProcess_State_Exited, - kwsysProcess_State_Expired, /* Ctrl+C handler test */ - kwsysProcess_State_Exception /* Process group test */ - }; - int exceptions[10] = { - kwsysProcess_Exception_None, kwsysProcess_Exception_None, - kwsysProcess_Exception_None, -#ifdef CRASH_USING_ABORT - kwsysProcess_Exception_Other, -#else - kwsysProcess_Exception_Fault, -#endif - kwsysProcess_Exception_None, kwsysProcess_Exception_None, - kwsysProcess_Exception_None, kwsysProcess_Exception_None, - kwsysProcess_Exception_None, kwsysProcess_Exception_Interrupt - }; - int values[10] = { 0, 123, 1, 1, 0, 0, 0, 0, 1, 1 }; - int shares[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1 }; - int outputs[10] = { 1, 1, 1, 1, 1, 0, 1, 1, 1, 1 }; - int delays[10] = { 0, 0, 0, 0, 0, 1, 0, 0, 0, 0 }; - double timeouts[10] = { 10, 10, 10, 30, 30, 10, -1, 10, 6, 4 }; - int polls[10] = { 0, 0, 0, 0, 0, 0, 1, 0, 0, 0 }; - int repeat[10] = { 257, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; - int createNewGroups[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1 }; - unsigned int interruptDelays[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 2 }; - int r; - const char* cmd[4]; -#ifdef _WIN32 - char* argv0 = 0; -#endif - char* test1IterationsStr = getenv("KWSYS_TEST_PROCESS_1_COUNT"); - if (test1IterationsStr) { - long int test1Iterations = strtol(test1IterationsStr, 0, 10); - if (test1Iterations > 10 && test1Iterations != LONG_MAX) { - repeat[0] = (int)test1Iterations; - } - } -#ifdef _WIN32 - if (n == 0 && (argv0 = strdup(argv[0]))) { - /* Try converting to forward slashes to see if it works. */ - char* c; - for (c = argv0; *c; ++c) { - if (*c == '\\') { - *c = '/'; - } - } - cmd[0] = argv0; - } else { - cmd[0] = argv[0]; - } -#else - cmd[0] = argv[0]; -#endif - cmd[1] = "run"; - cmd[2] = argv[1]; - cmd[3] = 0; - fprintf(stdout, "Output on stdout before test %d.\n", n); - fprintf(stderr, "Output on stderr before test %d.\n", n); - fflush(stdout); - fflush(stderr); - r = runChild(cmd, states[n - 1], exceptions[n - 1], values[n - 1], - shares[n - 1], outputs[n - 1], delays[n - 1], timeouts[n - 1], - polls[n - 1], repeat[n - 1], 0, createNewGroups[n - 1], - interruptDelays[n - 1]); - fprintf(stdout, "Output on stdout after test %d.\n", n); - fprintf(stderr, "Output on stderr after test %d.\n", n); - fflush(stdout); - fflush(stderr); -#if defined(_WIN32) - free(argv0); -#endif - return r; - } else if (argc > 2 && strcmp(argv[1], "0") == 0) { - /* This is the special debugging test to run a given command - line. */ - const char** cmd = argv + 2; - int state = kwsysProcess_State_Exited; - int exception = kwsysProcess_Exception_None; - int value = 0; - double timeout = 0; - int r = - runChild(cmd, state, exception, value, 0, 1, 0, timeout, 0, 1, 0, 0, 0); - return r; - } else { - /* Improper usage. */ - fprintf(stdout, "Usage: %s \n", argv[0]); - return 1; - } -} diff --git a/test/API/driver/kwsys/testSharedForward.c.in b/test/API/driver/kwsys/testSharedForward.c.in deleted file mode 100644 index b3eb4138289..00000000000 --- a/test/API/driver/kwsys/testSharedForward.c.in +++ /dev/null @@ -1,27 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#if defined(CMAKE_INTDIR) -# define CONFIG_DIR_PRE CMAKE_INTDIR "/" -# define CONFIG_DIR_POST "/" CMAKE_INTDIR -#else -# define CONFIG_DIR_PRE "" -# define CONFIG_DIR_POST "" -#endif -#define @KWSYS_NAMESPACE@_SHARED_FORWARD_DIR_BUILD "@EXEC_DIR@" -#define @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_BUILD "." CONFIG_DIR_POST -#define @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_INSTALL 0 -#define @KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_BUILD \ - CONFIG_DIR_PRE "@KWSYS_NAMESPACE@TestProcess" -#define @KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_INSTALL \ - "@KWSYS_NAMESPACE@TestProcess" -#define @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_COMMAND "--command" -#define @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_PRINT "--print" -#define @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_LDD "--ldd" -#if defined(CMAKE_INTDIR) -# define @KWSYS_NAMESPACE@_SHARED_FORWARD_CONFIG_NAME CMAKE_INTDIR -#endif -#include <@KWSYS_NAMESPACE@/SharedForward.h> -int main(int argc, char** argv) -{ - return @KWSYS_NAMESPACE@_shared_forward_to_real(argc, argv); -} diff --git a/test/API/driver/kwsys/testSystemInformation.cxx b/test/API/driver/kwsys/testSystemInformation.cxx deleted file mode 100644 index 154517eae4b..00000000000 --- a/test/API/driver/kwsys/testSystemInformation.cxx +++ /dev/null @@ -1,106 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" -#include KWSYS_HEADER(SystemInformation.hxx) - -// Work-around CMake dependency scanning limitation. This must -// duplicate the above list of headers. -#if 0 -# include "SystemInformation.hxx.in" -#endif - -#include - -#if defined(KWSYS_USE_LONG_LONG) -# if defined(KWSYS_IOS_HAS_OSTREAM_LONG_LONG) -# define iostreamLongLong(x) (x) -# else -# define iostreamLongLong(x) ((long)x) -# endif -#elif defined(KWSYS_USE___INT64) -# if defined(KWSYS_IOS_HAS_OSTREAM___INT64) -# define iostreamLongLong(x) (x) -# else -# define iostreamLongLong(x) ((long)x) -# endif -#else -# error "No Long Long" -#endif - -#define printMethod(info, m) std::cout << #m << ": " << info.m() << "\n" - -#define printMethod2(info, m, unit) \ - std::cout << #m << ": " << info.m() << " " << unit << "\n" - -#define printMethod3(info, m, unit) \ - std::cout << #m << ": " << iostreamLongLong(info.m) << " " << unit << "\n" - -int testSystemInformation(int, char* []) -{ - std::cout << "CTEST_FULL_OUTPUT\n"; // avoid truncation - - kwsys::SystemInformation info; - info.RunCPUCheck(); - info.RunOSCheck(); - info.RunMemoryCheck(); - printMethod(info, GetOSName); - printMethod(info, GetOSIsLinux); - printMethod(info, GetOSIsApple); - printMethod(info, GetOSIsWindows); - printMethod(info, GetHostname); - printMethod(info, GetFullyQualifiedDomainName); - printMethod(info, GetOSRelease); - printMethod(info, GetOSVersion); - printMethod(info, GetOSPlatform); - printMethod(info, Is64Bits); - printMethod(info, GetVendorString); - printMethod(info, GetVendorID); - printMethod(info, GetTypeID); - printMethod(info, GetFamilyID); - printMethod(info, GetModelID); - printMethod(info, GetExtendedProcessorName); - printMethod(info, GetSteppingCode); - printMethod(info, GetProcessorSerialNumber); - printMethod2(info, GetProcessorCacheSize, "KB"); - printMethod(info, GetLogicalProcessorsPerPhysical); - printMethod2(info, GetProcessorClockFrequency, "MHz"); - printMethod(info, GetNumberOfLogicalCPU); - printMethod(info, GetNumberOfPhysicalCPU); - printMethod(info, DoesCPUSupportCPUID); - printMethod(info, GetProcessorAPICID); - printMethod2(info, GetTotalVirtualMemory, "MB"); - printMethod2(info, GetAvailableVirtualMemory, "MB"); - printMethod2(info, GetTotalPhysicalMemory, "MB"); - printMethod2(info, GetAvailablePhysicalMemory, "MB"); - printMethod3(info, GetHostMemoryTotal(), "KiB"); - printMethod3(info, GetHostMemoryAvailable("KWSHL"), "KiB"); - printMethod3(info, GetProcMemoryAvailable("KWSHL", "KWSPL"), "KiB"); - printMethod3(info, GetHostMemoryUsed(), "KiB"); - printMethod3(info, GetProcMemoryUsed(), "KiB"); - printMethod(info, GetLoadAverage); - - for (long int i = 0; i <= 31; i++) { - if (info.DoesCPUSupportFeature(static_cast(1) << i)) { - std::cout << "CPU feature " << i << "\n"; - } - } - - /* test stack trace - */ - std::cout << "Program Stack:" << std::endl - << kwsys::SystemInformation::GetProgramStack(0, 0) << std::endl - << std::endl; - - /* test segv handler - info.SetStackTraceOnError(1); - double *d = (double*)100; - *d=0; - */ - - /* test abort handler - info.SetStackTraceOnError(1); - abort(); - */ - - return 0; -} diff --git a/test/API/driver/kwsys/testSystemTools.bin b/test/API/driver/kwsys/testSystemTools.bin deleted file mode 100644 index 961a4043b9b2785351ab26a33cfcb1f366c1391b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 766 zcmbV~J8r`;5JX3D078KQr%G#;xrK8icPgzTq*cc{r`QAV5H3@?bYXZsLq;FVX_BRe z%g5!cJ`hlGG|ej%-%r3B^E=g0A5?{B&Opc7@6oZyO4pUdnM;@%vkIOsxNAjmXiL+d z<7 zAIM?AiVDY~%?XgU=c3&OkPg?P^8282@1&KxNx}UnZQM`N*8ME)+%M9>{YuT^22-rR A-v9sr diff --git a/test/API/driver/kwsys/testSystemTools.cxx b/test/API/driver/kwsys/testSystemTools.cxx deleted file mode 100644 index 1f3a15b5912..00000000000 --- a/test/API/driver/kwsys/testSystemTools.cxx +++ /dev/null @@ -1,1128 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" - -#if defined(_MSC_VER) -# pragma warning(disable : 4786) -#endif - -#include KWSYS_HEADER(FStream.hxx) -#include KWSYS_HEADER(SystemTools.hxx) - -// Work-around CMake dependency scanning limitation. This must -// duplicate the above list of headers. -#if 0 -# include "FStream.hxx.in" -# include "SystemTools.hxx.in" -#endif - -// Include with <> instead of "" to avoid getting any in-source copy -// left on disk. -#include - -#include -#include -#include /* free */ -#include /* strcmp */ -#if defined(_WIN32) && !defined(__CYGWIN__) -# include /* _umask (MSVC) / umask (Borland) */ -# ifdef _MSC_VER -# define umask _umask // Note this is still umask on Borland -# endif -#endif -#include /* umask (POSIX), _S_I* constants (Windows) */ -// Visual C++ does not define mode_t (note that Borland does, however). -#if defined(_MSC_VER) -typedef unsigned short mode_t; -#endif - -static const char* toUnixPaths[][2] = { - { "/usr/local/bin/passwd", "/usr/local/bin/passwd" }, - { "/usr/lo cal/bin/pa sswd", "/usr/lo cal/bin/pa sswd" }, - { "/usr/lo\\ cal/bin/pa\\ sswd", "/usr/lo/ cal/bin/pa/ sswd" }, - { "c:/usr/local/bin/passwd", "c:/usr/local/bin/passwd" }, - { "c:/usr/lo cal/bin/pa sswd", "c:/usr/lo cal/bin/pa sswd" }, - { "c:/usr/lo\\ cal/bin/pa\\ sswd", "c:/usr/lo/ cal/bin/pa/ sswd" }, - { "\\usr\\local\\bin\\passwd", "/usr/local/bin/passwd" }, - { "\\usr\\lo cal\\bin\\pa sswd", "/usr/lo cal/bin/pa sswd" }, - { "\\usr\\lo\\ cal\\bin\\pa\\ sswd", "/usr/lo/ cal/bin/pa/ sswd" }, - { "c:\\usr\\local\\bin\\passwd", "c:/usr/local/bin/passwd" }, - { "c:\\usr\\lo cal\\bin\\pa sswd", "c:/usr/lo cal/bin/pa sswd" }, - { "c:\\usr\\lo\\ cal\\bin\\pa\\ sswd", "c:/usr/lo/ cal/bin/pa/ sswd" }, - { "\\\\usr\\local\\bin\\passwd", "//usr/local/bin/passwd" }, - { "\\\\usr\\lo cal\\bin\\pa sswd", "//usr/lo cal/bin/pa sswd" }, - { "\\\\usr\\lo\\ cal\\bin\\pa\\ sswd", "//usr/lo/ cal/bin/pa/ sswd" }, - { nullptr, nullptr } -}; - -static bool CheckConvertToUnixSlashes(std::string const& input, - std::string const& output) -{ - std::string result = input; - kwsys::SystemTools::ConvertToUnixSlashes(result); - if (result != output) { - std::cerr << "Problem with ConvertToUnixSlashes - input: " << input - << " output: " << result << " expected: " << output << std::endl; - return false; - } - return true; -} - -static const char* checkEscapeChars[][4] = { - { "1 foo 2 bar 2", "12", "\\", "\\1 foo \\2 bar \\2" }, - { " {} ", "{}", "#", " #{#} " }, - { nullptr, nullptr, nullptr, nullptr } -}; - -static bool CheckEscapeChars(std::string const& input, - const char* chars_to_escape, char escape_char, - std::string const& output) -{ - std::string result = kwsys::SystemTools::EscapeChars( - input.c_str(), chars_to_escape, escape_char); - if (result != output) { - std::cerr << "Problem with CheckEscapeChars - input: " << input - << " output: " << result << " expected: " << output << std::endl; - return false; - } - return true; -} - -static bool CheckFileOperations() -{ - bool res = true; - const std::string testNonExistingFile(TEST_SYSTEMTOOLS_SOURCE_DIR - "/testSystemToolsNonExistingFile"); - const std::string testDotFile(TEST_SYSTEMTOOLS_SOURCE_DIR "/."); - const std::string testBinFile(TEST_SYSTEMTOOLS_SOURCE_DIR - "/testSystemTools.bin"); - const std::string testTxtFile(TEST_SYSTEMTOOLS_SOURCE_DIR - "/testSystemTools.cxx"); - const std::string testNewDir(TEST_SYSTEMTOOLS_BINARY_DIR - "/testSystemToolsNewDir"); - const std::string testNewFile(testNewDir + "/testNewFile.txt"); - - if (kwsys::SystemTools::DetectFileType(testNonExistingFile.c_str()) != - kwsys::SystemTools::FileTypeUnknown) { - std::cerr << "Problem with DetectFileType - failed to detect type of: " - << testNonExistingFile << std::endl; - res = false; - } - - if (kwsys::SystemTools::DetectFileType(testDotFile.c_str()) != - kwsys::SystemTools::FileTypeUnknown) { - std::cerr << "Problem with DetectFileType - failed to detect type of: " - << testDotFile << std::endl; - res = false; - } - - if (kwsys::SystemTools::DetectFileType(testBinFile.c_str()) != - kwsys::SystemTools::FileTypeBinary) { - std::cerr << "Problem with DetectFileType - failed to detect type of: " - << testBinFile << std::endl; - res = false; - } - - if (kwsys::SystemTools::DetectFileType(testTxtFile.c_str()) != - kwsys::SystemTools::FileTypeText) { - std::cerr << "Problem with DetectFileType - failed to detect type of: " - << testTxtFile << std::endl; - res = false; - } - - if (kwsys::SystemTools::FileLength(testBinFile) != 766) { - std::cerr << "Problem with FileLength - incorrect length for: " - << testBinFile << std::endl; - res = false; - } - - kwsys::SystemTools::Stat_t buf; - if (kwsys::SystemTools::Stat(testTxtFile.c_str(), &buf) != 0) { - std::cerr << "Problem with Stat - unable to stat text file: " - << testTxtFile << std::endl; - res = false; - } - - if (kwsys::SystemTools::Stat(testBinFile, &buf) != 0) { - std::cerr << "Problem with Stat - unable to stat bin file: " << testBinFile - << std::endl; - res = false; - } - - if (!kwsys::SystemTools::MakeDirectory(testNewDir)) { - std::cerr << "Problem with MakeDirectory for: " << testNewDir << std::endl; - res = false; - } - // calling it again should just return true - if (!kwsys::SystemTools::MakeDirectory(testNewDir)) { - std::cerr << "Problem with second call to MakeDirectory for: " - << testNewDir << std::endl; - res = false; - } - // calling with 0 pointer should return false - if (kwsys::SystemTools::MakeDirectory(nullptr)) { - std::cerr << "Problem with MakeDirectory(0)" << std::endl; - res = false; - } - // calling with an empty string should return false - if (kwsys::SystemTools::MakeDirectory(std::string())) { - std::cerr << "Problem with MakeDirectory(std::string())" << std::endl; - res = false; - } - // check existence - if (!kwsys::SystemTools::FileExists(testNewDir.c_str(), false)) { - std::cerr << "Problem with FileExists as C string and not file for: " - << testNewDir << std::endl; - res = false; - } - // check existence - if (!kwsys::SystemTools::PathExists(testNewDir)) { - std::cerr << "Problem with PathExists for: " << testNewDir << std::endl; - res = false; - } - // remove it - if (!kwsys::SystemTools::RemoveADirectory(testNewDir)) { - std::cerr << "Problem with RemoveADirectory for: " << testNewDir - << std::endl; - res = false; - } - // check existence - if (kwsys::SystemTools::FileExists(testNewDir.c_str(), false)) { - std::cerr << "After RemoveADirectory: " - << "Problem with FileExists as C string and not file for: " - << testNewDir << std::endl; - res = false; - } - // check existence - if (kwsys::SystemTools::PathExists(testNewDir)) { - std::cerr << "After RemoveADirectory: " - << "Problem with PathExists for: " << testNewDir << std::endl; - res = false; - } - // create it using the char* version - if (!kwsys::SystemTools::MakeDirectory(testNewDir.c_str())) { - std::cerr << "Problem with second call to MakeDirectory as C string for: " - << testNewDir << std::endl; - res = false; - } - - if (!kwsys::SystemTools::Touch(testNewFile, true)) { - std::cerr << "Problem with Touch for: " << testNewFile << std::endl; - res = false; - } - // calling MakeDirectory with something that is no file should fail - if (kwsys::SystemTools::MakeDirectory(testNewFile)) { - std::cerr << "Problem with to MakeDirectory for: " << testNewFile - << std::endl; - res = false; - } - - // calling with 0 pointer should return false - if (kwsys::SystemTools::FileExists(nullptr)) { - std::cerr << "Problem with FileExists(0)" << std::endl; - res = false; - } - if (kwsys::SystemTools::FileExists(nullptr, true)) { - std::cerr << "Problem with FileExists(0) as file" << std::endl; - res = false; - } - // calling with an empty string should return false - if (kwsys::SystemTools::FileExists(std::string())) { - std::cerr << "Problem with FileExists(std::string())" << std::endl; - res = false; - } - // FileExists(x, true) should return false on a directory - if (kwsys::SystemTools::FileExists(testNewDir, true)) { - std::cerr << "Problem with FileExists as file for: " << testNewDir - << std::endl; - res = false; - } - if (kwsys::SystemTools::FileExists(testNewDir.c_str(), true)) { - std::cerr << "Problem with FileExists as C string and file for: " - << testNewDir << std::endl; - res = false; - } - // FileExists(x, false) should return true even on a directory - if (!kwsys::SystemTools::FileExists(testNewDir, false)) { - std::cerr << "Problem with FileExists as not file for: " << testNewDir - << std::endl; - res = false; - } - if (!kwsys::SystemTools::FileExists(testNewDir.c_str(), false)) { - std::cerr << "Problem with FileExists as C string and not file for: " - << testNewDir << std::endl; - res = false; - } - // should work, was created as new file before - if (!kwsys::SystemTools::FileExists(testNewFile)) { - std::cerr << "Problem with FileExists for: " << testNewFile << std::endl; - res = false; - } - if (!kwsys::SystemTools::FileExists(testNewFile.c_str())) { - std::cerr << "Problem with FileExists as C string for: " << testNewFile - << std::endl; - res = false; - } - if (!kwsys::SystemTools::FileExists(testNewFile, true)) { - std::cerr << "Problem with FileExists as file for: " << testNewFile - << std::endl; - res = false; - } - if (!kwsys::SystemTools::FileExists(testNewFile.c_str(), true)) { - std::cerr << "Problem with FileExists as C string and file for: " - << testNewFile << std::endl; - res = false; - } - - // calling with an empty string should return false - if (kwsys::SystemTools::PathExists(std::string())) { - std::cerr << "Problem with PathExists(std::string())" << std::endl; - res = false; - } - // PathExists(x) should return true on a directory - if (!kwsys::SystemTools::PathExists(testNewDir)) { - std::cerr << "Problem with PathExists for: " << testNewDir << std::endl; - res = false; - } - // should work, was created as new file before - if (!kwsys::SystemTools::PathExists(testNewFile)) { - std::cerr << "Problem with PathExists for: " << testNewFile << std::endl; - res = false; - } - -// Reset umask -#if defined(_WIN32) && !defined(__CYGWIN__) - // NOTE: Windows doesn't support toggling _S_IREAD. - mode_t fullMask = _S_IWRITE; -#else - // On a normal POSIX platform, we can toggle all permissions. - mode_t fullMask = S_IRWXU | S_IRWXG | S_IRWXO; -#endif - mode_t orig_umask = umask(fullMask); - - // Test file permissions without umask - mode_t origPerm, thisPerm; - if (!kwsys::SystemTools::GetPermissions(testNewFile, origPerm)) { - std::cerr << "Problem with GetPermissions (1) for: " << testNewFile - << std::endl; - res = false; - } - - if (!kwsys::SystemTools::SetPermissions(testNewFile, 0)) { - std::cerr << "Problem with SetPermissions (1) for: " << testNewFile - << std::endl; - res = false; - } - - if (!kwsys::SystemTools::GetPermissions(testNewFile, thisPerm)) { - std::cerr << "Problem with GetPermissions (2) for: " << testNewFile - << std::endl; - res = false; - } - - if ((thisPerm & fullMask) != 0) { - std::cerr << "SetPermissions failed to set permissions (1) for: " - << testNewFile << ": actual = " << thisPerm - << "; expected = " << 0 << std::endl; - res = false; - } - - // While we're at it, check proper TestFileAccess functionality. - if (kwsys::SystemTools::TestFileAccess(testNewFile, - kwsys::TEST_FILE_WRITE)) { - std::cerr - << "TestFileAccess incorrectly indicated that this is a writable file:" - << testNewFile << std::endl; - res = false; - } - - if (!kwsys::SystemTools::TestFileAccess(testNewFile, kwsys::TEST_FILE_OK)) { - std::cerr - << "TestFileAccess incorrectly indicated that this file does not exist:" - << testNewFile << std::endl; - res = false; - } - - // Test restoring/setting full permissions. - if (!kwsys::SystemTools::SetPermissions(testNewFile, fullMask)) { - std::cerr << "Problem with SetPermissions (2) for: " << testNewFile - << std::endl; - res = false; - } - - if (!kwsys::SystemTools::GetPermissions(testNewFile, thisPerm)) { - std::cerr << "Problem with GetPermissions (3) for: " << testNewFile - << std::endl; - res = false; - } - - if ((thisPerm & fullMask) != fullMask) { - std::cerr << "SetPermissions failed to set permissions (2) for: " - << testNewFile << ": actual = " << thisPerm - << "; expected = " << fullMask << std::endl; - res = false; - } - - // Test setting file permissions while honoring umask - if (!kwsys::SystemTools::SetPermissions(testNewFile, fullMask, true)) { - std::cerr << "Problem with SetPermissions (3) for: " << testNewFile - << std::endl; - res = false; - } - - if (!kwsys::SystemTools::GetPermissions(testNewFile, thisPerm)) { - std::cerr << "Problem with GetPermissions (4) for: " << testNewFile - << std::endl; - res = false; - } - - if ((thisPerm & fullMask) != 0) { - std::cerr << "SetPermissions failed to honor umask for: " << testNewFile - << ": actual = " << thisPerm << "; expected = " << 0 - << std::endl; - res = false; - } - - // Restore umask - umask(orig_umask); - - // Restore file permissions - if (!kwsys::SystemTools::SetPermissions(testNewFile, origPerm)) { - std::cerr << "Problem with SetPermissions (4) for: " << testNewFile - << std::endl; - res = false; - } - - // Remove the test file - if (!kwsys::SystemTools::RemoveFile(testNewFile)) { - std::cerr << "Problem with RemoveFile: " << testNewFile << std::endl; - res = false; - } - - std::string const testFileMissing(testNewDir + "/testMissingFile.txt"); - if (!kwsys::SystemTools::RemoveFile(testFileMissing)) { - std::string const& msg = kwsys::SystemTools::GetLastSystemError(); - std::cerr << "RemoveFile(\"" << testFileMissing << "\") failed: " << msg - << "\n"; - res = false; - } - - std::string const testFileMissingDir(testNewDir + "/missing/file.txt"); - if (!kwsys::SystemTools::RemoveFile(testFileMissingDir)) { - std::string const& msg = kwsys::SystemTools::GetLastSystemError(); - std::cerr << "RemoveFile(\"" << testFileMissingDir << "\") failed: " << msg - << "\n"; - res = false; - } - - kwsys::SystemTools::Touch(testNewFile, true); - if (!kwsys::SystemTools::RemoveADirectory(testNewDir)) { - std::cerr << "Problem with RemoveADirectory for: " << testNewDir - << std::endl; - res = false; - } - -#ifdef KWSYS_TEST_SYSTEMTOOLS_LONG_PATHS - // Perform the same file and directory creation and deletion tests but - // with paths > 256 characters in length. - - const std::string testNewLongDir( - TEST_SYSTEMTOOLS_BINARY_DIR - "/" - "012345678901234567890123456789012345678901234567890123456789" - "012345678901234567890123456789012345678901234567890123456789" - "012345678901234567890123456789012345678901234567890123456789" - "012345678901234567890123456789012345678901234567890123456789" - "01234567890123"); - const std::string testNewLongFile( - testNewLongDir + - "/" - "012345678901234567890123456789012345678901234567890123456789" - "012345678901234567890123456789012345678901234567890123456789" - "012345678901234567890123456789012345678901234567890123456789" - "012345678901234567890123456789012345678901234567890123456789" - "0123456789.txt"); - - if (!kwsys::SystemTools::MakeDirectory(testNewLongDir)) { - std::cerr << "Problem with MakeDirectory for: " << testNewLongDir - << std::endl; - res = false; - } - - if (!kwsys::SystemTools::Touch(testNewLongFile.c_str(), true)) { - std::cerr << "Problem with Touch for: " << testNewLongFile << std::endl; - res = false; - } - - if (!kwsys::SystemTools::RemoveFile(testNewLongFile)) { - std::cerr << "Problem with RemoveFile: " << testNewLongFile << std::endl; - res = false; - } - - kwsys::SystemTools::Touch(testNewLongFile.c_str(), true); - if (!kwsys::SystemTools::RemoveADirectory(testNewLongDir)) { - std::cerr << "Problem with RemoveADirectory for: " << testNewLongDir - << std::endl; - res = false; - } -#endif - - return res; -} - -static bool CheckStringOperations() -{ - bool res = true; - - std::string test = "mary had a little lamb."; - if (kwsys::SystemTools::CapitalizedWords(test) != - "Mary Had A Little Lamb.") { - std::cerr << "Problem with CapitalizedWords " << '"' << test << '"' - << std::endl; - res = false; - } - - test = "Mary Had A Little Lamb."; - if (kwsys::SystemTools::UnCapitalizedWords(test) != - "mary had a little lamb.") { - std::cerr << "Problem with UnCapitalizedWords " << '"' << test << '"' - << std::endl; - res = false; - } - - test = "MaryHadTheLittleLamb."; - if (kwsys::SystemTools::AddSpaceBetweenCapitalizedWords(test) != - "Mary Had The Little Lamb.") { - std::cerr << "Problem with AddSpaceBetweenCapitalizedWords " << '"' << test - << '"' << std::endl; - res = false; - } - - char* cres = - kwsys::SystemTools::AppendStrings("Mary Had A", " Little Lamb."); - if (strcmp(cres, "Mary Had A Little Lamb.")) { - std::cerr << "Problem with AppendStrings " - << "\"Mary Had A\" \" Little Lamb.\"" << std::endl; - res = false; - } - delete[] cres; - - cres = kwsys::SystemTools::AppendStrings("Mary Had", " A ", "Little Lamb."); - if (strcmp(cres, "Mary Had A Little Lamb.")) { - std::cerr << "Problem with AppendStrings " - << "\"Mary Had\" \" A \" \"Little Lamb.\"" << std::endl; - res = false; - } - delete[] cres; - - if (kwsys::SystemTools::CountChar("Mary Had A Little Lamb.", 'a') != 3) { - std::cerr << "Problem with CountChar " - << "\"Mary Had A Little Lamb.\"" << std::endl; - res = false; - } - - cres = kwsys::SystemTools::RemoveChars("Mary Had A Little Lamb.", "aeiou"); - if (strcmp(cres, "Mry Hd A Lttl Lmb.")) { - std::cerr << "Problem with RemoveChars " - << "\"Mary Had A Little Lamb.\"" << std::endl; - res = false; - } - delete[] cres; - - cres = kwsys::SystemTools::RemoveCharsButUpperHex("Mary Had A Little Lamb."); - if (strcmp(cres, "A")) { - std::cerr << "Problem with RemoveCharsButUpperHex " - << "\"Mary Had A Little Lamb.\"" << std::endl; - res = false; - } - delete[] cres; - - char* cres2 = strdup("Mary Had A Little Lamb."); - kwsys::SystemTools::ReplaceChars(cres2, "aeiou", 'X'); - if (strcmp(cres2, "MXry HXd A LXttlX LXmb.")) { - std::cerr << "Problem with ReplaceChars " - << "\"Mary Had A Little Lamb.\"" << std::endl; - res = false; - } - free(cres2); - - if (!kwsys::SystemTools::StringStartsWith("Mary Had A Little Lamb.", - "Mary ")) { - std::cerr << "Problem with StringStartsWith " - << "\"Mary Had A Little Lamb.\"" << std::endl; - res = false; - } - - if (!kwsys::SystemTools::StringEndsWith("Mary Had A Little Lamb.", - " Lamb.")) { - std::cerr << "Problem with StringEndsWith " - << "\"Mary Had A Little Lamb.\"" << std::endl; - res = false; - } - - cres = kwsys::SystemTools::DuplicateString("Mary Had A Little Lamb."); - if (strcmp(cres, "Mary Had A Little Lamb.")) { - std::cerr << "Problem with DuplicateString " - << "\"Mary Had A Little Lamb.\"" << std::endl; - res = false; - } - delete[] cres; - - test = "Mary Had A Little Lamb."; - if (kwsys::SystemTools::CropString(test, 13) != "Mary ...Lamb.") { - std::cerr << "Problem with CropString " - << "\"Mary Had A Little Lamb.\"" << std::endl; - res = false; - } - - std::vector lines; - kwsys::SystemTools::Split("Mary Had A Little Lamb.", lines, ' '); - if (lines[0] != "Mary" || lines[1] != "Had" || lines[2] != "A" || - lines[3] != "Little" || lines[4] != "Lamb.") { - std::cerr << "Problem with Split " - << "\"Mary Had A Little Lamb.\"" << std::endl; - res = false; - } - - if (kwsys::SystemTools::ConvertToWindowsOutputPath( - "L://Local Mojo/Hex Power Pack/Iffy Voodoo") != - "\"L:\\Local Mojo\\Hex Power Pack\\Iffy Voodoo\"") { - std::cerr << "Problem with ConvertToWindowsOutputPath " - << "\"L://Local Mojo/Hex Power Pack/Iffy Voodoo\"" << std::endl; - res = false; - } - - if (kwsys::SystemTools::ConvertToWindowsOutputPath( - "//grayson/Local Mojo/Hex Power Pack/Iffy Voodoo") != - "\"\\\\grayson\\Local Mojo\\Hex Power Pack\\Iffy Voodoo\"") { - std::cerr << "Problem with ConvertToWindowsOutputPath " - << "\"//grayson/Local Mojo/Hex Power Pack/Iffy Voodoo\"" - << std::endl; - res = false; - } - - if (kwsys::SystemTools::ConvertToUnixOutputPath( - "//Local Mojo/Hex Power Pack/Iffy Voodoo") != - "//Local\\ Mojo/Hex\\ Power\\ Pack/Iffy\\ Voodoo") { - std::cerr << "Problem with ConvertToUnixOutputPath " - << "\"//Local Mojo/Hex Power Pack/Iffy Voodoo\"" << std::endl; - res = false; - } - - return res; -} - -static bool CheckPutEnv(const std::string& env, const char* name, - const char* value) -{ - if (!kwsys::SystemTools::PutEnv(env)) { - std::cerr << "PutEnv(\"" << env << "\") failed!" << std::endl; - return false; - } - std::string v = "(null)"; - kwsys::SystemTools::GetEnv(name, v); - if (v != value) { - std::cerr << "GetEnv(\"" << name << "\") returned \"" << v << "\", not \"" - << value << "\"!" << std::endl; - return false; - } - return true; -} - -static bool CheckUnPutEnv(const char* env, const char* name) -{ - if (!kwsys::SystemTools::UnPutEnv(env)) { - std::cerr << "UnPutEnv(\"" << env << "\") failed!" << std::endl; - return false; - } - std::string v; - if (kwsys::SystemTools::GetEnv(name, v)) { - std::cerr << "GetEnv(\"" << name << "\") returned \"" << v - << "\", not (null)!" << std::endl; - return false; - } - return true; -} - -static bool CheckEnvironmentOperations() -{ - bool res = true; - res &= CheckPutEnv("A=B", "A", "B"); - res &= CheckPutEnv("B=C", "B", "C"); - res &= CheckPutEnv("C=D", "C", "D"); - res &= CheckPutEnv("D=E", "D", "E"); - res &= CheckUnPutEnv("A", "A"); - res &= CheckUnPutEnv("B=", "B"); - res &= CheckUnPutEnv("C=D", "C"); - /* Leave "D=E" in environment so a memory checker can test for leaks. */ - return res; -} - -static bool CheckRelativePath(const std::string& local, - const std::string& remote, - const std::string& expected) -{ - std::string result = kwsys::SystemTools::RelativePath(local, remote); - if (!kwsys::SystemTools::ComparePath(expected, result)) { - std::cerr << "RelativePath(" << local << ", " << remote << ") yielded " - << result << " instead of " << expected << std::endl; - return false; - } - return true; -} - -static bool CheckRelativePaths() -{ - bool res = true; - res &= CheckRelativePath("/usr/share", "/bin/bash", "../../bin/bash"); - res &= CheckRelativePath("/usr/./share/", "/bin/bash", "../../bin/bash"); - res &= CheckRelativePath("/usr//share/", "/bin/bash", "../../bin/bash"); - res &= - CheckRelativePath("/usr/share/../bin/", "/bin/bash", "../../bin/bash"); - res &= CheckRelativePath("/usr/share", "/usr/share//bin", "bin"); - return res; -} - -static bool CheckCollapsePath(const std::string& path, - const std::string& expected, - const char* base = nullptr) -{ - std::string result = kwsys::SystemTools::CollapseFullPath(path, base); - if (!kwsys::SystemTools::ComparePath(expected, result)) { - std::cerr << "CollapseFullPath(" << path << ") yielded " << result - << " instead of " << expected << std::endl; - return false; - } - return true; -} - -static bool CheckCollapsePath() -{ - bool res = true; - res &= CheckCollapsePath("/usr/share/*", "/usr/share/*"); - res &= CheckCollapsePath("C:/Windows/*", "C:/Windows/*"); - res &= CheckCollapsePath("/usr/share/../lib", "/usr/lib"); - res &= CheckCollapsePath("/usr/share/./lib", "/usr/share/lib"); - res &= CheckCollapsePath("/usr/share/../../lib", "/lib"); - res &= CheckCollapsePath("/usr/share/.././../lib", "/lib"); - res &= CheckCollapsePath("/../lib", "/lib"); - res &= CheckCollapsePath("/../lib/", "/lib"); - res &= CheckCollapsePath("/", "/"); - res &= CheckCollapsePath("C:/", "C:/"); - res &= CheckCollapsePath("C:/../", "C:/"); - res &= CheckCollapsePath("C:/../../", "C:/"); - res &= CheckCollapsePath("../b", "../../b", "../"); - res &= CheckCollapsePath("../a/../b", "../b", "../rel"); - res &= CheckCollapsePath("a/../b", "../rel/b", "../rel"); - return res; -} - -static std::string StringVectorToString(const std::vector& vec) -{ - std::stringstream ss; - ss << "vector("; - for (std::vector::const_iterator i = vec.begin(); - i != vec.end(); ++i) { - if (i != vec.begin()) { - ss << ", "; - } - ss << *i; - } - ss << ")"; - return ss.str(); -} - -static bool CheckGetPath() -{ - const char* envName = "S"; -#ifdef _WIN32 - const char* envValue = "C:\\Somewhere\\something;D:\\Temp"; -#else - const char* envValue = "/Somewhere/something:/tmp"; -#endif - const char* registryPath = "[HKEY_LOCAL_MACHINE\\SOFTWARE\\MyApp; MyKey]"; - - std::vector originalPaths; - originalPaths.push_back(registryPath); - - std::vector expectedPaths; - expectedPaths.push_back(registryPath); -#ifdef _WIN32 - expectedPaths.push_back("C:/Somewhere/something"); - expectedPaths.push_back("D:/Temp"); -#else - expectedPaths.push_back("/Somewhere/something"); - expectedPaths.push_back("/tmp"); -#endif - - bool res = true; - res &= CheckPutEnv(std::string(envName) + "=" + envValue, envName, envValue); - - std::vector paths = originalPaths; - kwsys::SystemTools::GetPath(paths, envName); - - if (paths != expectedPaths) { - std::cerr << "GetPath(" << StringVectorToString(originalPaths) << ", " - << envName << ") yielded " << StringVectorToString(paths) - << " instead of " << StringVectorToString(expectedPaths) - << std::endl; - res = false; - } - - res &= CheckUnPutEnv(envName, envName); - return res; -} - -static bool CheckGetFilenameName() -{ - const char* windowsFilepath = "C:\\somewhere\\something"; - const char* unixFilepath = "/somewhere/something"; - -#if defined(_WIN32) || defined(KWSYS_SYSTEMTOOLS_SUPPORT_WINDOWS_SLASHES) - std::string expectedWindowsFilename = "something"; -#else - std::string expectedWindowsFilename = "C:\\somewhere\\something"; -#endif - std::string expectedUnixFilename = "something"; - - bool res = true; - std::string filename = kwsys::SystemTools::GetFilenameName(windowsFilepath); - if (filename != expectedWindowsFilename) { - std::cerr << "GetFilenameName(" << windowsFilepath << ") yielded " - << filename << " instead of " << expectedWindowsFilename - << std::endl; - res = false; - } - - filename = kwsys::SystemTools::GetFilenameName(unixFilepath); - if (filename != expectedUnixFilename) { - std::cerr << "GetFilenameName(" << unixFilepath << ") yielded " << filename - << " instead of " << expectedUnixFilename << std::endl; - res = false; - } - return res; -} - -static bool CheckFind() -{ - bool res = true; - const std::string testFindFileName("testFindFile.txt"); - const std::string testFindFile(TEST_SYSTEMTOOLS_BINARY_DIR "/" + - testFindFileName); - - if (!kwsys::SystemTools::Touch(testFindFile, true)) { - std::cerr << "Problem with Touch for: " << testFindFile << std::endl; - // abort here as the existence of the file only makes the test meaningful - return false; - } - - std::vector searchPaths; - searchPaths.push_back(TEST_SYSTEMTOOLS_BINARY_DIR); - if (kwsys::SystemTools::FindFile(testFindFileName, searchPaths, true) - .empty()) { - std::cerr << "Problem with FindFile without system paths for: " - << testFindFileName << std::endl; - res = false; - } - if (kwsys::SystemTools::FindFile(testFindFileName, searchPaths, false) - .empty()) { - std::cerr << "Problem with FindFile with system paths for: " - << testFindFileName << std::endl; - res = false; - } - - return res; -} - -static bool CheckIsSubDirectory() -{ - bool res = true; - - if (kwsys::SystemTools::IsSubDirectory("/foo", "/") == false) { - std::cerr << "Problem with IsSubDirectory (root - unix): " << std::endl; - res = false; - } - if (kwsys::SystemTools::IsSubDirectory("c:/foo", "c:/") == false) { - std::cerr << "Problem with IsSubDirectory (root - dos): " << std::endl; - res = false; - } - if (kwsys::SystemTools::IsSubDirectory("/foo/bar", "/foo") == false) { - std::cerr << "Problem with IsSubDirectory (deep): " << std::endl; - res = false; - } - if (kwsys::SystemTools::IsSubDirectory("/foo", "/foo") == true) { - std::cerr << "Problem with IsSubDirectory (identity): " << std::endl; - res = false; - } - if (kwsys::SystemTools::IsSubDirectory("/fooo", "/foo") == true) { - std::cerr << "Problem with IsSubDirectory (substring): " << std::endl; - res = false; - } - if (kwsys::SystemTools::IsSubDirectory("/foo/", "/foo") == true) { - std::cerr << "Problem with IsSubDirectory (prepended slash): " - << std::endl; - res = false; - } - - return res; -} - -static bool CheckGetLineFromStream() -{ - const std::string fileWithFiveCharsOnFirstLine(TEST_SYSTEMTOOLS_SOURCE_DIR - "/README.rst"); - - kwsys::ifstream file(fileWithFiveCharsOnFirstLine.c_str(), std::ios::in); - - if (!file) { - std::cerr << "Problem opening: " << fileWithFiveCharsOnFirstLine - << std::endl; - return false; - } - - std::string line; - bool has_newline = false; - bool result; - - file.seekg(0, std::ios::beg); - result = kwsys::SystemTools::GetLineFromStream(file, line, &has_newline, -1); - if (!result || line.size() != 5) { - std::cerr << "First line does not have five characters: " << line.size() - << std::endl; - return false; - } - - file.seekg(0, std::ios::beg); - result = kwsys::SystemTools::GetLineFromStream(file, line, &has_newline, -1); - if (!result || line.size() != 5) { - std::cerr << "First line does not have five characters after rewind: " - << line.size() << std::endl; - return false; - } - - bool ret = true; - - for (size_t size = 1; size <= 5; ++size) { - file.seekg(0, std::ios::beg); - result = kwsys::SystemTools::GetLineFromStream(file, line, &has_newline, - static_cast(size)); - if (!result || line.size() != size) { - std::cerr << "Should have read " << size << " characters but got " - << line.size() << std::endl; - ret = false; - } - } - - return ret; -} - -static bool CheckGetLineFromStreamLongLine() -{ - const std::string fileWithLongLine("longlines.txt"); - std::string firstLine, secondLine; - // First line: large buffer, containing a carriage return for some reason. - firstLine.assign(2050, ' '); - firstLine += "\rfirst"; - secondLine.assign(2050, 'y'); - secondLine += "second"; - - // Create file with long lines. - { - kwsys::ofstream out(fileWithLongLine.c_str(), std::ios::binary); - if (!out) { - std::cerr << "Problem opening for write: " << fileWithLongLine - << std::endl; - return false; - } - out << firstLine << "\r\n\n" << secondLine << "\n"; - } - - kwsys::ifstream file(fileWithLongLine.c_str(), std::ios::binary); - if (!file) { - std::cerr << "Problem opening: " << fileWithLongLine << std::endl; - return false; - } - - std::string line; - bool has_newline = false; - bool result; - - // Read first line. - result = kwsys::SystemTools::GetLineFromStream(file, line, &has_newline, -1); - if (!result || line != firstLine) { - std::cerr << "First line does not match, expected " << firstLine.size() - << " characters, got " << line.size() << std::endl; - return false; - } - if (!has_newline) { - std::cerr << "Expected new line to be read from first line" << std::endl; - return false; - } - - // Read empty line. - has_newline = false; - result = kwsys::SystemTools::GetLineFromStream(file, line, &has_newline, -1); - if (!result || !line.empty()) { - std::cerr << "Expected successful read with an empty line, got " - << line.size() << " characters" << std::endl; - return false; - } - if (!has_newline) { - std::cerr << "Expected new line to be read for an empty line" << std::endl; - return false; - } - - // Read second line. - has_newline = false; - result = kwsys::SystemTools::GetLineFromStream(file, line, &has_newline, -1); - if (!result || line != secondLine) { - std::cerr << "Second line does not match, expected " << secondLine.size() - << " characters, got " << line.size() << std::endl; - return false; - } - if (!has_newline) { - std::cerr << "Expected new line to be read from second line" << std::endl; - return false; - } - - return true; -} - -static bool writeFile(const char* fileName, const char* data) -{ - kwsys::ofstream out(fileName, std::ios::binary); - out << data; - if (!out) { - std::cerr << "Failed to write file: " << fileName << std::endl; - return false; - } - return true; -} - -static std::string readFile(const char* fileName) -{ - kwsys::ifstream in(fileName, std::ios::binary); - std::stringstream sstr; - sstr << in.rdbuf(); - std::string data = sstr.str(); - if (!in) { - std::cerr << "Failed to read file: " << fileName << std::endl; - return std::string(); - } - return data; -} - -struct -{ - const char* a; - const char* b; - bool differ; -} diff_test_cases[] = { { "one", "one", false }, - { "one", "two", true }, - { "", "", false }, - { "\n", "\r\n", false }, - { "one\n", "one\n", false }, - { "one\r\n", "one\n", false }, - { "one\n", "one", false }, - { "one\ntwo", "one\ntwo", false }, - { "one\ntwo", "one\r\ntwo", false } }; - -static bool CheckTextFilesDiffer() -{ - const int num_test_cases = - sizeof(diff_test_cases) / sizeof(diff_test_cases[0]); - for (int i = 0; i < num_test_cases; ++i) { - if (!writeFile("file_a", diff_test_cases[i].a) || - !writeFile("file_b", diff_test_cases[i].b)) { - return false; - } - if (kwsys::SystemTools::TextFilesDiffer("file_a", "file_b") != - diff_test_cases[i].differ) { - std::cerr << "Incorrect TextFilesDiffer result for test case " << i + 1 - << "." << std::endl; - return false; - } - } - - return true; -} - -static bool CheckCopyFileIfDifferent() -{ - bool ret = true; - const int num_test_cases = - sizeof(diff_test_cases) / sizeof(diff_test_cases[0]); - for (int i = 0; i < num_test_cases; ++i) { - if (!writeFile("file_a", diff_test_cases[i].a) || - !writeFile("file_b", diff_test_cases[i].b)) { - return false; - } - const char* cptarget = - i < 4 ? TEST_SYSTEMTOOLS_BINARY_DIR "/file_b" : "file_b"; - if (!kwsys::SystemTools::CopyFileIfDifferent("file_a", cptarget)) { - std::cerr << "CopyFileIfDifferent() returned false for test case " - << i + 1 << "." << std::endl; - ret = false; - continue; - } - std::string bdata = readFile("file_b"); - if (diff_test_cases[i].a != bdata) { - std::cerr << "Incorrect CopyFileIfDifferent file contents in test case " - << i + 1 << "." << std::endl; - ret = false; - continue; - } - } - - return ret; -} - -int testSystemTools(int, char* []) -{ - bool res = true; - - int cc; - for (cc = 0; toUnixPaths[cc][0]; cc++) { - res &= CheckConvertToUnixSlashes(toUnixPaths[cc][0], toUnixPaths[cc][1]); - } - - // Special check for ~ - std::string output; - if (kwsys::SystemTools::GetEnv("HOME", output)) { - output += "/foo bar/lala"; - res &= CheckConvertToUnixSlashes("~/foo bar/lala", output); - } - - for (cc = 0; checkEscapeChars[cc][0]; cc++) { - res &= CheckEscapeChars(checkEscapeChars[cc][0], checkEscapeChars[cc][1], - *checkEscapeChars[cc][2], checkEscapeChars[cc][3]); - } - - res &= CheckFileOperations(); - - res &= CheckStringOperations(); - - res &= CheckEnvironmentOperations(); - - res &= CheckRelativePaths(); - - res &= CheckCollapsePath(); - - res &= CheckGetPath(); - - res &= CheckFind(); - - res &= CheckIsSubDirectory(); - - res &= CheckGetLineFromStream(); - - res &= CheckGetLineFromStreamLongLine(); - - res &= CheckGetFilenameName(); - - res &= CheckTextFilesDiffer(); - - res &= CheckCopyFileIfDifferent(); - - return res ? 0 : 1; -} diff --git a/test/API/driver/kwsys/testSystemTools.h.in b/test/API/driver/kwsys/testSystemTools.h.in deleted file mode 100644 index 022e36e2f44..00000000000 --- a/test/API/driver/kwsys/testSystemTools.h.in +++ /dev/null @@ -1,12 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#ifndef @KWSYS_NAMESPACE@_testSystemtools_h -#define @KWSYS_NAMESPACE@_testSystemtools_h - -#define EXECUTABLE_OUTPUT_PATH "@CMAKE_CURRENT_BINARY_DIR@" - -#define TEST_SYSTEMTOOLS_SOURCE_DIR "@TEST_SYSTEMTOOLS_SOURCE_DIR@" -#define TEST_SYSTEMTOOLS_BINARY_DIR "@TEST_SYSTEMTOOLS_BINARY_DIR@" -#cmakedefine KWSYS_TEST_SYSTEMTOOLS_LONG_PATHS - -#endif diff --git a/test/API/driver/kwsys/testTerminal.c b/test/API/driver/kwsys/testTerminal.c deleted file mode 100644 index 652830ccd91..00000000000 --- a/test/API/driver/kwsys/testTerminal.c +++ /dev/null @@ -1,22 +0,0 @@ -/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying - file Copyright.txt or https://cmake.org/licensing#kwsys for details. */ -#include "kwsysPrivate.h" -#include KWSYS_HEADER(Terminal.h) - -/* Work-around CMake dependency scanning limitation. This must - duplicate the above list of headers. */ -#if 0 -# include "Terminal.h.in" -#endif - -int testTerminal(int argc, char* argv[]) -{ - (void)argc; - (void)argv; - kwsysTerminal_cfprintf(kwsysTerminal_Color_ForegroundYellow | - kwsysTerminal_Color_BackgroundBlue | - kwsysTerminal_Color_AssumeTTY, - stdout, "Hello %s!", "World"); - fprintf(stdout, "\n"); - return 0; -} diff --git a/test/API/driver/kwsys/update-gitsetup.bash b/test/API/driver/kwsys/update-gitsetup.bash deleted file mode 100644 index aa83cb8079e..00000000000 --- a/test/API/driver/kwsys/update-gitsetup.bash +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash - -set -e -set -x -shopt -s dotglob - -readonly name="GitSetup" -readonly ownership="GitSetup Upstream " -readonly subtree="GitSetup" -readonly repo="https://gitlab.kitware.com/utils/gitsetup.git" -readonly tag="setup" -readonly shortlog=false -readonly paths=" -" - -extract_source () { - git_archive -} - -. "${BASH_SOURCE%/*}/update-third-party.bash" diff --git a/test/API/driver/kwsys/update-third-party.bash b/test/API/driver/kwsys/update-third-party.bash deleted file mode 100644 index 3b8358e0114..00000000000 --- a/test/API/driver/kwsys/update-third-party.bash +++ /dev/null @@ -1,169 +0,0 @@ -#============================================================================= -# Copyright 2015-2016 Kitware, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= - -######################################################################## -# Script for updating third party packages. -# -# This script should be sourced in a project-specific script which sets -# the following variables: -# -# name -# The name of the project. -# ownership -# A git author name/email for the commits. -# subtree -# The location of the thirdparty package within the main source -# tree. -# repo -# The git repository to use as upstream. -# tag -# The tag, branch or commit hash to use for upstream. -# shortlog -# Optional. Set to 'true' to get a shortlog in the commit message. -# -# Additionally, an "extract_source" function must be defined. It will be -# run within the checkout of the project on the requested tag. It should -# should place the desired tree into $extractdir/$name-reduced. This -# directory will be used as the newest commit for the project. -# -# For convenience, the function may use the "git_archive" function which -# does a standard "git archive" extraction using the (optional) "paths" -# variable to only extract a subset of the source tree. -######################################################################## - -######################################################################## -# Utility functions -######################################################################## -git_archive () { - git archive --worktree-attributes --prefix="$name-reduced/" HEAD -- $paths | \ - tar -C "$extractdir" -x -} - -die () { - echo >&2 "$@" - exit 1 -} - -warn () { - echo >&2 "warning: $@" -} - -readonly regex_date='20[0-9][0-9]-[0-9][0-9]-[0-9][0-9]' -readonly basehash_regex="$name $regex_date ([0-9a-f]*)" -readonly basehash="$( git rev-list --author="$ownership" --grep="$basehash_regex" -n 1 HEAD )" -readonly upstream_old_short="$( git cat-file commit "$basehash" | sed -n '/'"$basehash_regex"'/ {s/.*(//;s/)//;p}' | egrep '^[0-9a-f]+$' )" - -######################################################################## -# Sanity checking -######################################################################## -[ -n "$name" ] || \ - die "'name' is empty" -[ -n "$ownership" ] || \ - die "'ownership' is empty" -[ -n "$subtree" ] || \ - die "'subtree' is empty" -[ -n "$repo" ] || \ - die "'repo' is empty" -[ -n "$tag" ] || \ - die "'tag' is empty" -[ -n "$basehash" ] || \ - warn "'basehash' is empty; performing initial import" -readonly do_shortlog="${shortlog-false}" - -readonly workdir="$PWD/work" -readonly upstreamdir="$workdir/upstream" -readonly extractdir="$workdir/extract" - -[ -d "$workdir" ] && \ - die "error: workdir '$workdir' already exists" - -trap "rm -rf '$workdir'" EXIT - -# Get upstream -git clone "$repo" "$upstreamdir" - -if [ -n "$basehash" ]; then - # Use the existing package's history - git worktree add "$extractdir" "$basehash" - # Clear out the working tree - pushd "$extractdir" - git ls-files | xargs rm -v - find . -type d -empty -delete - popd -else - # Create a repo to hold this package's history - mkdir -p "$extractdir" - git -C "$extractdir" init -fi - -# Extract the subset of upstream we care about -pushd "$upstreamdir" -git checkout "$tag" -readonly upstream_hash="$( git rev-parse HEAD )" -readonly upstream_hash_short="$( git rev-parse --short=8 "$upstream_hash" )" -readonly upstream_datetime="$( git rev-list "$upstream_hash" --format='%ci' -n 1 | grep -e "^$regex_date" )" -readonly upstream_date="$( echo "$upstream_datetime" | grep -o -e "$regex_date" )" -if $do_shortlog && [ -n "$basehash" ]; then - readonly commit_shortlog=" - -Upstream Shortlog ------------------ - -$( git shortlog --no-merges --abbrev=8 --format='%h %s' "$upstream_old_short".."$upstream_hash" )" -else - readonly commit_shortlog="" -fi -extract_source || \ - die "failed to extract source" -popd - -[ -d "$extractdir/$name-reduced" ] || \ - die "expected directory to extract does not exist" -readonly commit_summary="$name $upstream_date ($upstream_hash_short)" - -# Commit the subset -pushd "$extractdir" -mv -v "$name-reduced/"* . -rmdir "$name-reduced/" -git add -A . -git commit -n --author="$ownership" --date="$upstream_datetime" -F - <<-EOF -$commit_summary - -Code extracted from: - - $repo - -at commit $upstream_hash ($tag).$commit_shortlog -EOF -git branch -f "upstream-$name" -popd - -# Merge the subset into this repository -if [ -n "$basehash" ]; then - git merge --log -s recursive "-Xsubtree=$subtree/" --no-commit "upstream-$name" -else - unrelated_histories_flag="" - if git merge --help | grep -q -e allow-unrelated-histories; then - unrelated_histories_flag="--allow-unrelated-histories " - fi - readonly unrelated_histories_flag - - git fetch "$extractdir" "upstream-$name:upstream-$name" - git merge --log -s ours --no-commit $unrelated_histories_flag "upstream-$name" - git read-tree -u --prefix="$subtree/" "upstream-$name" -fi -git commit --no-edit -git branch -d "upstream-$name" diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index d52beb01d46..fe52cd33091 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -395,24 +395,6 @@ if (HDF5_BUILD_UTILS) set (H5_TESTS ${H5_TESTS} mirror_vfd) endif () -set (HDF5_API_TESTS - attribute - dataset - datatype - file - group - link - misc - object -) - -if (HDF5_TEST_API_ENABLE_ASYNC) - set (HDF5_API_TESTS - ${HDF5_API_TESTS} - async - ) -endif () - macro (ADD_H5_EXE file) add_executable (${file} ${HDF5_TEST_SOURCE_DIR}/${file}.c) target_include_directories (${file} PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};${HDF5_TEST_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") @@ -897,8 +879,10 @@ if (HDF5_ENABLE_FORMATTERS) clang_format (HDF5_TEST_use_disable_mdc_flushes_FORMAT use_disable_mdc_flushes) endif () -add_subdirectory (API) - if (HDF5_TEST_SERIAL) include (CMakeTests.cmake) endif () + +if (HDF5_TEST_API) + add_subdirectory (API) +endif () diff --git a/testpar/API/CMakeLists.txt b/testpar/API/CMakeLists.txt index 5eb69c4d58c..e907078783b 100644 --- a/testpar/API/CMakeLists.txt +++ b/testpar/API/CMakeLists.txt @@ -9,16 +9,31 @@ # help@hdfgroup.org. # -#------------------------------------------------------------------------------ -# Set module path -#------------------------------------------------------------------------------ -set(HDF5_TEST_API_CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMake") -set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${HDF5_TEST_API_CMAKE_MODULE_PATH}) +cmake_minimum_required (VERSION 3.18) +project (HDF5_TEST_PAR_API C) #------------------------------------------------------------------------------ -# Setup for API tests +# Define for API tests #------------------------------------------------------------------------------ +set (HDF5_API_TESTS + attribute + dataset + datatype + file + group + link + misc + object +) + +if (HDF5_TEST_API_ENABLE_ASYNC) + set (HDF5_API_TESTS + ${HDF5_API_TESTS} + async + ) +endif () + # Ported HDF5 tests set (HDF5_API_PAR_TESTS_EXTRA t_bigio diff --git a/testpar/CMakeLists.txt b/testpar/CMakeLists.txt index d34b8001267..6bb5fa6f77e 100644 --- a/testpar/CMakeLists.txt +++ b/testpar/CMakeLists.txt @@ -104,30 +104,14 @@ set (H5P_TESTS t_vfd ) -set (HDF5_API_TESTS - attribute - dataset - datatype - file - group - link - misc - object -) - -if (HDF5_TEST_API_ENABLE_ASYNC) - set (HDF5_API_TESTS - ${HDF5_API_TESTS} - async - ) -endif () - foreach (h5_testp ${H5P_TESTS}) ADD_H5P_EXE(${h5_testp}) endforeach () -add_subdirectory (API) - if (HDF5_TEST_PARALLEL) include (CMakeTests.cmake) endif () + +if (HDF5_TEST_API) + add_subdirectory (API) +endif () From 195a491a6be76ee729c76b94e7e870fae72f6543 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Wed, 3 May 2023 11:46:31 -0500 Subject: [PATCH 195/231] Minor tidying of API tests files (#2878) * Minor tidying of API tests files * Remove old API test configuration setting --- test/API/CMakeLists.txt | 1 + test/API/README.md | 15 ++++++++------- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/test/API/CMakeLists.txt b/test/API/CMakeLists.txt index c2f95bdd4b8..dd2bca224f2 100644 --- a/test/API/CMakeLists.txt +++ b/test/API/CMakeLists.txt @@ -9,6 +9,7 @@ # help@hdfgroup.org. # + cmake_minimum_required (VERSION 3.18) project (HDF5_TEST_API C) diff --git a/test/API/README.md b/test/API/README.md index d57472da04a..aec6eaa0fa4 100644 --- a/test/API/README.md +++ b/test/API/README.md @@ -1,12 +1,13 @@ # HDF5 API Tests -This directory contains several test applications that exercise [HDF5](https://github.com/HDFGroup/hdf5)'s +This directory contains several test applications that exercise HDF5's public API and serve as regression tests for HDF5 [VOL Connectors](https://portal.hdfgroup.org/display/HDF5/Virtual+Object+Layer). ## Build Process and options -These HDF5 API tests are enabled and built by default, but can be disabled if desired. -The following build options are available to influence how the API tests get built: +These HDF5 API tests are disabled by default, but can be enabled by passing the +`-DHDF5_TEST_API=ON` option to CMake. The following build options are available +to influence how the API tests get built: ### CMake @@ -39,9 +40,9 @@ Currently unsupported ### Usage -These API tests currently only support usage with HDF5 VOL connectors that can be loaded dynamically -as a plugin. For information on how to build a VOL connector in this manner, refer to section 2.3 of -the [HDF5 VOL Connector Author Guide](https://portal.hdfgroup.org/display/HDF5/HDF5+VOL+Connector+Authors+Guide?preview=/53610813/59903039/vol_connector_author_guide.pdf). +These API tests currently only support usage with the native HDF5 VOL connector and HDF5 VOL +connectors that can be loaded dynamically as a plugin. For information on how to build a VOL +connector in this manner, refer to section 2.3 of the [HDF5 VOL Connector Author Guide](https://portal.hdfgroup.org/display/HDF5/HDF5+VOL+Connector+Authors+Guide?preview=/53610813/59903039/vol_connector_author_guide.pdf). TODO: section on building VOL connectors alongside HDF5 for use with tests @@ -64,7 +65,7 @@ and use it when running tests. If HDF5 is unable to locate or load the VOL conne will fall back to running the tests with the native HDF5 VOL connector and an error similar to the following will appear in the test output: - HDF5-DIAG: Error detected in HDF5 (1.13.0) MPI-process 0: + HDF5-DIAG: Error detected in HDF5 (X.XX.X) MPI-process 0: #000: /home/user/git/hdf5/src/H5.c line 1010 in H5open(): library initialization failed major: Function entry/exit minor: Unable to initialize object From 0b3ca564ef8de08d6eba76c18b2a250ecf740619 Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Wed, 3 May 2023 11:46:59 -0500 Subject: [PATCH 196/231] fixed args in execvp for h5fuse (#2885) --- testpar/t_subfiling_vfd.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/testpar/t_subfiling_vfd.c b/testpar/t_subfiling_vfd.c index f827aa5a823..08419239f03 100644 --- a/testpar/t_subfiling_vfd.c +++ b/testpar/t_subfiling_vfd.c @@ -1888,7 +1888,7 @@ test_subfiling_h5fuse(void) if (pid == 0) { char *tmp_filename; - char *args[6]; + char *args[7]; tmp_filename = HDmalloc(PATH_MAX); VRFY(tmp_filename, "HDmalloc succeeded"); @@ -1900,9 +1900,10 @@ test_subfiling_h5fuse(void) args[0] = HDstrdup("env"); args[1] = HDstrdup("sh"); args[2] = HDstrdup("h5fuse.sh"); - args[3] = HDstrdup("-q -f"); - args[4] = tmp_filename; - args[5] = NULL; + args[3] = HDstrdup("-q"); + args[4] = HDstrdup("-f"); + args[5] = tmp_filename; + args[6] = NULL; /* Call h5fuse script from MPI rank 0 */ HDexecvp("env", args); From 6e810828e3fabeb06b4f936c3b2adda02418bde4 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Wed, 3 May 2023 11:47:17 -0500 Subject: [PATCH 197/231] Fix Autotools internal-debug=all builds (#2886) --- src/H5Ocache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/H5Ocache.c b/src/H5Ocache.c index 72261faa805..66b092a01ec 100644 --- a/src/H5Ocache.c +++ b/src/H5Ocache.c @@ -534,7 +534,7 @@ H5O__cache_notify(H5AC_notify_action_t action, void *_thing) for (u = 0; u < oh->nmesgs; u++) if (oh->mesg[u].chunkno == 0) oh->mesg[u].dirty = FALSE; -#ifdef H5O_DEBUG +#ifndef NDEBUG /* Reset the number of messages dirtied by decoding */ oh->ndecode_dirtied = 0; #endif From dc4f682cd0bfcaacd418629594bf0eff3144418f Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Wed, 3 May 2023 15:24:14 -0500 Subject: [PATCH 198/231] Remove references to old MANIFEST file (#2890) --- CONTRIBUTING.md | 1 - fortran/src/README | 1 - 2 files changed, 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 687e9819008..a16e845ac12 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -125,7 +125,6 @@ Please make sure that you check the items applicable to your pull request: * [ ] Does the new feature require a change to an existing API? See "API Compatibility Macros" document (https://portal.hdfgroup.org/display/HDF5/API+Compatibility+Macros) * Documentation * [ ] Was the change described in the release_docs/RELEASE.txt file? - * [ ] Was MANIFEST updated if new files had been added to the source? * [ ] Was the new function documented in the corresponding public header file using [Doxygen](https://docs.hdfgroup.org/hdf5/develop/_r_m_t.html)? * [ ] Was new functionality documented for the HDF5 community (the level of documentation depends on the feature; ask us what would be appropriate) * Testing diff --git a/fortran/src/README b/fortran/src/README index f9316b5c009..f73a59a4b98 100644 --- a/fortran/src/README +++ b/fortran/src/README @@ -130,5 +130,4 @@ Procedure for adding a new file to the repository Add the name of the file to the: (1) Makefile.am located in the same directory as the newfile (2) CMakeLists.txt located in the same directory as the newfile - (3) MANIFEST located in the top level directory From cafa0d86d3861d83b55997d549e16e23a2f43f14 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Wed, 3 May 2023 16:09:26 -0500 Subject: [PATCH 199/231] Add missing terminator (#2888) --- doxygen/dox/Overview.dox | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doxygen/dox/Overview.dox b/doxygen/dox/Overview.dox index 32d0deaeaf6..54cc638d9d6 100644 --- a/doxygen/dox/Overview.dox +++ b/doxygen/dox/Overview.dox @@ -23,7 +23,7 @@ documents cover a mix of tasks, concepts, and reference, to help a specific \par Versions Version-specific documentation (see the version in the title area) can be found here: - - HDF5 develop branch (this site) + - HDF5 develop branch (this site) - HDF5 1.14.x - HDF5 1.12.x - HDF5 1.10.x From e1c07af0a97742ad54cde4227d122536bf49f350 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Wed, 3 May 2023 16:11:38 -0500 Subject: [PATCH 200/231] Add RELEASE.txt entry for API tests (#2889) --- release_docs/RELEASE.txt | 42 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index b3aa5b64471..d0ac38f568d 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -47,6 +47,48 @@ New Features Configuration: ------------- + - Added new CMake options for building and running HDF5 API tests + (Experimental) + + HDF5 API tests are an experimental feature, primarily targeted + toward HDF5 VOL connector authors, that is currently being developed. + These tests exercise the HDF5 API and are being integrated back + into the HDF5 library from the HDF5 VOL tests repository + (https://github.com/HDFGroup/vol-tests). To support this feature, + the following new options have been added to CMake: + + * HDF5_TEST_API: ON/OFF (Default: OFF) + + Controls whether the HDF5 API tests will be built. These tests + will only be run during testing of HDF5 if the HDF5_TEST_SERIAL + (for serial tests) and HDF5_TEST_PARALLEL (for parallel tests) + options are enabled. + + * HDF5_TEST_API_INSTALL: ON/OFF (Default: OFF) + + Controls whether the HDF5 API test executables will be installed + on the system alongside the HDF5 library. This option is currently + not functional. + + * HDF5_TEST_API_ENABLE_ASYNC: ON/OFF (Default: OFF) + + Controls whether the HDF5 Async API tests will be built. These + tests will only be run if the VOL connector used supports Async + operations. + + * HDF5_TEST_API_ENABLE_DRIVER: ON/OFF (Default: OFF) + + Controls whether to build the HDF5 API test driver program. This + test driver program is useful for VOL connectors that use a + client/server model where the server needs to be up and running + before the VOL connector can function. This option is currently + not functional. + + * HDF5_TEST_API_SERVER: String (Default: "") + + Used to specify a path to the server executable that the test + driver program should execute. + - Added support for CMake presets file. CMake supports two main files, CMakePresets.json and CMakeUserPresets.json, From 41be79bed5f4e736a46d171552107b2d3c75319f Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Wed, 3 May 2023 22:03:58 -0500 Subject: [PATCH 201/231] Force lowercase Fortran module file names (#2891) * fixed args in execvp for h5fuse * Force lowercase Fortran module file names for Cray compilers --- config/cmake/HDF5UseFortran.cmake | 6 ++++++ release_docs/RELEASE.txt | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/config/cmake/HDF5UseFortran.cmake b/config/cmake/HDF5UseFortran.cmake index aae707853fd..3e058adbf83 100644 --- a/config/cmake/HDF5UseFortran.cmake +++ b/config/cmake/HDF5UseFortran.cmake @@ -17,6 +17,12 @@ enable_language (Fortran) set (HDF_PREFIX "H5") + +# Force lowercase Fortran module file names +if (CMAKE_Fortran_COMPILER_ID STREQUAL "Cray") + set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -ef") +endif () + include (CheckFortranFunctionExists) include (CheckFortranSourceRuns) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index d0ac38f568d..574d1679d9a 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -443,6 +443,12 @@ Bug Fixes since HDF5-1.14.0 release Fixes GitHub issues #1546 and #2259 + - Force lowercase Fortran module file names + + The Cray Fortran compiler uses uppercase Fortran module file names, which + caused CMake installs to fail. A compiler option was added to use lowercase + instead. + Tools ----- From 0e9eaa82b043b967eb629f6d8e4cb5f4883bc87c Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Fri, 5 May 2023 08:28:59 -0700 Subject: [PATCH 202/231] Remove H5TB discussion (#2899) --- src/H5Cpkg.h | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index 056c1810a17..7bd7087da87 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -3003,16 +3003,6 @@ typedef struct H5C_tag_info_t { * * Catchall structure for all variables specific to an instance of the cache. * - * While the individual fields of the structure are discussed below, the - * following overview may be helpful. - * - * Entries in the cache are stored in an instance of H5TB_TREE, indexed on - * the entry's disk address. While the H5TB_TREE is less efficient than - * hash table, it keeps the entries in address sorted order. As flushes - * in parallel mode are more efficient if they are issued in increasing - * address order, this is a significant benefit. Also the H5TB_TREE code - * was readily available, which reduced development time. - * * While the cache was designed with multiple replacement policies in mind, * at present only a modified form of LRU is supported. * From f7d3728d31a882c8a5d4ac347769d90ffa5f8e32 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Fri, 5 May 2023 09:06:41 -0700 Subject: [PATCH 203/231] Remove 1.8 badge from README.md (#2895) --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index 489d3910f88..b30fd3da043 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,6 @@ HDF5 version 1.15.0 currently under development [![1.14 build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/main.yml?branch=hdf5_1_14&label=1.14)](https://github.com/HDFGroup/hdf5/actions?query=branch%3Ahdf5_1_14) [![1.12 build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/main.yml?branch=hdf5_1_12&label=1.12)](https://github.com/HDFGroup/hdf5/actions?query=branch%3Ahdf5_1_12) [![1.10 build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/main.yml?branch=hdf5_1_10&label=1.10)](https://github.com/HDFGroup/hdf5/actions?query=branch%3Ahdf5_1_10) -[![1.8 build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/main.yml?branch=hdf5_1_8&label=1.8)](https://github.com/HDFGroup/hdf5/actions?query=branch%3Ahdf5_1_8) [![BSD](https://img.shields.io/badge/License-BSD-blue.svg)](https://github.com/HDFGroup/hdf5/blob/develop/COPYING) *Please refer to the release_docs/INSTALL file for installation instructions.* From 92a080526f28e6e709f51988bf2187fcd460a281 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Fri, 5 May 2023 11:07:16 -0500 Subject: [PATCH 204/231] remove obsolete SZIP find module (#2901) --- config/cmake/FindSZIP.cmake | 127 ------------------------------------ release_docs/RELEASE.txt | 2 + 2 files changed, 2 insertions(+), 127 deletions(-) delete mode 100644 config/cmake/FindSZIP.cmake diff --git a/config/cmake/FindSZIP.cmake b/config/cmake/FindSZIP.cmake deleted file mode 100644 index b96a7326201..00000000000 --- a/config/cmake/FindSZIP.cmake +++ /dev/null @@ -1,127 +0,0 @@ -# -# Copyright by The HDF Group. -# All rights reserved. -# -# This file is part of HDF5. The full HDF5 copyright notice, including -# terms governing use, modification, and redistribution, is contained in -# the COPYING file, which can be found at the root of the source code -# distribution tree, or in https://www.hdfgroup.org/licenses. -# If you do not have access to either file, you may request a copy from -# help@hdfgroup.org. -# -######################################################################### - -# - Derived from the FindTiff.cmake and FindJPEG.cmake that is included with cmake -# FindSZIP - -# Find the native SZIP includes and library - -# Imported targets -################## - -# This module defines the following :prop_tgt:`IMPORTED` targets: -# -# SZIP::SZIP -# The SZIP library, if found. -# -# Result variables -################### - -# This module will set the following variables in your project: - -# SZIP_FOUND, true if the SZIP headers and libraries were found. -# SZIP_INCLUDE_DIR, the directory containing the SZIP headers. -# SZIP_INCLUDE_DIRS, the directory containing the SZIP headers. -# SZIP_LIBRARIES, libraries to link against to use SZIP. - -# Cache variables -################# - -# The following variables may also be set: - -# SZIP_LIBRARY, where to find the SZIP library. -# SZIP_LIBRARY_DEBUG - Debug version of SZIP library -# SZIP_LIBRARY_RELEASE - Release Version of SZIP library - -# message (STATUS "Finding SZIP library and headers..." ) -######################################################################### - - -find_path(SZIP_INCLUDE_DIR szlib.h) - -set(szip_names ${SZIP_NAMES} sz szip szip-static libsz libszip libszip-static) -foreach(name ${szip_names}) - list (APPEND szip_names_debug "${name}d") -endforeach() - -if(NOT SZIP_LIBRARY) - find_library(SZIP_LIBRARY_RELEASE NAMES ${szip_names}) - find_library(SZIP_LIBRARY_DEBUG NAMES ${szip_names_debug}) - include(SelectLibraryConfigurations) - select_library_configurations(SZIP) - mark_as_advanced(SZIP_LIBRARY_RELEASE SZIP_LIBRARY_DEBUG) -endif() -unset(szip_names) -unset(szip_names_debug) - -if(SZIP_INCLUDE_DIR AND EXISTS "${SZIP_INCLUDE_DIR}/SZconfig.h") - file(STRINGS "${SZIP_INCLUDE_DIR}/SZconfig.h" szip_version_str - REGEX "^#define[\t ]+SZIP_PACKAGE_VERSION[\t ]+.*") - - string(REGEX REPLACE "^#define[\t ]+SZIP_PACKAGE_VERSION[\t ]+([0-9]+).*" - "\\1" SZIP_VERSION "${szip_version_str}") - unset(szip_version_str) -endif() - -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(SZIP - REQUIRED_VARS SZIP_LIBRARY SZIP_INCLUDE_DIR - VERSION_VAR SZIP_VERSION) - -if(SZIP_FOUND) - set(SZIP_LIBRARIES ${SZIP_LIBRARY}) - set(SZIP_INCLUDE_DIRS "${SZIP_INCLUDE_DIR}") - - if(NOT TARGET SZIP::SZIP) - add_library(SZIP::SZIP UNKNOWN IMPORTED) - if(SZIP_INCLUDE_DIRS) - set_target_properties(SZIP::SZIP PROPERTIES - INTERFACE_INCLUDE_DIRECTORIES "${SZIP_INCLUDE_DIRS}") - endif() - if(EXISTS "${SZIP_LIBRARY}") - set_target_properties(SZIP::SZIP PROPERTIES - IMPORTED_LINK_INTERFACE_LANGUAGES "C" - IMPORTED_LOCATION "${SZIP_LIBRARY}") - endif() - if(EXISTS "${SZIP_LIBRARY_RELEASE}") - set_property(TARGET SZIP::SZIP APPEND PROPERTY - IMPORTED_CONFIGURATIONS RELEASE) - set_target_properties(SZIP::SZIP PROPERTIES - IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "C" - IMPORTED_LOCATION_RELEASE "${SZIP_LIBRARY_RELEASE}") - endif() - if(EXISTS "${SZIP_LIBRARY_DEBUG}") - set_property(TARGET SZIP::SZIP APPEND PROPERTY - IMPORTED_CONFIGURATIONS DEBUG) - set_target_properties(SZIP::SZIP PROPERTIES - IMPORTED_LINK_INTERFACE_LANGUAGES_DEBUG "C" - IMPORTED_LOCATION_DEBUG "${SZIP_LIBRARY_DEBUG}") - endif() - endif() -endif() - -mark_as_advanced(SZIP_LIBRARY SZIP_INCLUDE_DIR) - -# Report the results. -if (NOT SZIP_FOUND) - set (SZIP_DIR_MESSAGE - "SZip was not found. Make sure SZIP_LIBRARY and SZIP_INCLUDE_DIR are set or set the SZIP_INSTALL environment variable." - ) - if (NOT SZIP_FIND_QUIETLY) - message (VERBOSE "${SZIP_DIR_MESSAGE}") - else () - if (SZIP_FIND_REQUIRED) - message (FATAL_ERROR "SZip was NOT found and is Required by this project") - endif () - endif () -endif () diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 574d1679d9a..4b13d09be4d 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -103,6 +103,8 @@ New Features LIBAEC library has been used in HDF5 binaries as the szip library of choice for a few years. We are removing the options for using the old SZIP library. + Also removed the config/cmake/FindSZIP.cmake file. + - Enabled instrumentation of the library by default in CMake for parallel debug builds From 4dcfa1f00f0bfa940c2e93124182826464833dde Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Fri, 5 May 2023 09:17:00 -0700 Subject: [PATCH 205/231] Move the 1.10.11 release to September (#2898) --- doc/img/release-schedule.plantuml | 4 ++-- doc/img/release-schedule.png | Bin 15473 -> 15625 bytes 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/img/release-schedule.plantuml b/doc/img/release-schedule.plantuml index 7bb7452275d..f2a7ddf2dd9 100644 --- a/doc/img/release-schedule.plantuml +++ b/doc/img/release-schedule.plantuml @@ -13,10 +13,10 @@ Project starts 2022-01-01 [1.8.23] happens 2023-01-31 [1.8] is colored in #F76969 -[1.10] starts 2022-01-01 and lasts 109 weeks +[1.10] starts 2022-01-01 and lasts 91 weeks [1.10.9] happens 2022-05-31 [1.10.10] happens 2023-03-31 -[1.10.11] happens 2024-01-31 +[1.10.11] happens 2023-09-30 [1.10.10] displays on same row as [1.10.9] [1.10.11] displays on same row as [1.10.9] [1.10] is colored in #F6DD60 diff --git a/doc/img/release-schedule.png b/doc/img/release-schedule.png index e504dac39f77526a8527ac417c12a775ae5387d5..4b7e50125e61d21324718c057297fef707174bf7 100644 GIT binary patch literal 15625 zcmd73bySsG`!>1=1qA63kxuCl>5!0;luiMWP`VcqB1m^hBaO6xAhi@}q(h{U?rzrN z%!PZu``zF9oo|e9ob$(F?6G}T&1XI{?)$p0>zW~IDss1P-Ms~YKyE9@%V$7TxxItuD~lH{5IGAw3nx<-i{~_Eo-|f2E)LGZTwD&XOzm7; zZC`Pk+uOSJbyGtisI=Cax-Nge4?zL#c%;=ND%MPO zKl8}wliLj?3P*p(J@H#mCkNVUcZ0Mwyh=hamQG4hv%7GLDafr=OEzGL2Bq(-<;+evHaBz_1=TN|5l_OUyUZUP5f;A>Vjd* z-!AhUOBnMRF}fm;!)FYJIm4v(yCRG;$2kJ&&=kFwvZI995h~sykwMCcjZiD0pK!sE znsb@PUo84=9ttH@^-r=1_S#?0W z7wy;+v>v_IJUb|%wKJ2S@a8ukVDi<|rwtN6wQg+WDpuJ2@%%lHp33pNWuh2-v$6-@ zE{5L!o@G(Zy%nMRWOW&3%-(refHc_7sahS|)aJ>1eLw1v+JRH-E-8js>|IiE2t?Rc zLFSRBhw)ZA)@x1K&sQSPNHL=!3N!(-GMLdkZz|fNAK^bI*HnA-d<9KBeK4pFHx;`Z zm+$nQp2G`7PzdR;imod0;%kf7XSQ9=V^#Jq zd?I~5z7#63oBlWqOWj6~0;>}IeZ3i_k6fVO%7TXU`pS<3KkUytq`|V+7tgR;>8>v- zZkQ2VUyP$xVqRaodZUYeeeqe69l38H5QG1*8Ogkkg{2rdDs>80k`MK__P>^m+$8#N z>lUQ21wCFF0?{HdVq?*G+taG0W26Q1f#XsNi=MCM1mRI9!Ywe^a7&A|OJhs5OT6jm z=~MY%J~V9!Fz<*gf(Jo0*f7Qxd09ihz9|mjb`tOrze~K(GH{e(mzoQkOQ{SVnI3=~9HiOyM!# zxAgBad(rJnX9G?aFJ3S)FsPmRB367yeC~qB0OKQRg~gHzfqZ1G*X9nD{WWiLb*=;Y zQuGeti@5Y`dh738Op>=!T05i2m6MZW5!BMsQuMT7Uv=}YfZgj^?~`;P=XSxd4=Wwi zzOTRE7EoIP10dTs`Po*)dHKih-)45xOzz|h<9N({ARLnOfwOdLxE_jf~hMUW6^(fN)*=mQLDHv)>l8@y`rTw zH&Z?>FE>~8Y%z|JkrR zWdUwN60SM~GKyTTANfm2!gWP-Yv#T9zDjcp5@JpxVAIUAV=<=r;$s=2yT$*#*#Rej-R^Ds(Za~y*!A{E3|8Dd3 zJY9;_Ij`oe7JKaWe0MXJy923ESLCoez$=mMN6^HoiYPs!oVtb~L_rU4nd zZq`GuWFGT(_j=-7w$G;BXTcLkj?9C##Fz?}O_%ycgzZ!tFA{O0&Sqx0FQ8Du*7v!+N&6bZ}x4 z5)R|W_a-Xux!)M*bwq!xQq=H=>UW%PCC9~mPoJ!|9#($ZPJDj0l1a_?0*8OJQ2p(? z*YW1s&jLz@)6ce(iHV8Jdp>nP5`5aS--cJEO469^%?DBE?5n5hqS zbsk$2HF8l@TJ6Fo3`L5{?$fSh30#JC4q#V0SZ<$zEfn3du-_R&FP4;7TBUj;)?%Oj zh2vS=_Pg5@`$~vA^N7YHp=JL&w$Fm(fF}Rc9DAW7hFO_UW%r7T7h0LZKQO zgw}#oJ!nPvWMpzx%1TN(^M|~Ae8c6cH{9fPl+BlR2A-q|#u${s=A?YenvV5fxVPSg&MO>oy>3onUJ062+_pOi-1_gzb%WrQn z;OCm}1?-Jyo4pl`hqa#4Xc+a29{dQzCVP&77#7N9I#XQ%yCZWWp&0WrI~c?Q5V=Q6 z4Nfw=w$J*nE>$;V2N_*l@^ZLd z8t64~$;zn@JsTU3Cti-7tD#h4{%CD&-JFGG-txVeob|v~Nf%B?6$fD-Ovc_q$J4V> z9h1ELvxAij%KzL(p5~hc1FP@lUWfne4Y7LI-%j? zSP4aTcJ{@5_ZiQimw$Dew=Vt3tI5x$s+r=3c$Ju}@3OP$X8cR_YksGYa_A5AB$@am zcdztku8bKqxCuqw%Ef}@($9rwNOmX3Byt$EA6fRLsi#QtcZ|B^?sR3j?}OxAZ~Faa zS&%L|$#<~iJh3?q>!M#~XJ;=)HhS#Q)w1F(ofk@`KqQ4x`&O}XMal2e)4ydEaa}(I zqp!Dtg3nn%@2z^XHqISny0|#{#@UkS@6x!fbPn0Mxw#-QRA1#kNv30?r>9?vWar>m zx?K-FvRP60Nrm8ZT3V|8;$e#3f6icrxI7z)&&cS@JVBvxC2#kC(xIZmI`?W29DAVi(xgqHBH<*CFnKS zIyMxAGj;Kio5#=bMb4`F#W#lXu#>La%T5z?%mxY1yggjVb)ERR5a zy4!aj)H%%iAFu>5K^Wxaz6ML9VsX-oAx;+z1)Z01U_Bt_B>rbgg44l3{SFTgr)b=L zsG_8l%592zf<=oT_7z9$Hv=>CtSK!IPfRMvvjpViU9cIL(~{yu`aW6By~jciP-q?r zqdvYzDDOai|KY<#SV5i3n#NrX0D6WCRGo<+;S_6~rpV!CWGCH=lXZ}tx3y&CZguHb@4=-HHnIgA7TpgqocBw<0Z!9bp+teSpy$$KAz@`WSQL-|EP1(DrN6K-gg(RYq^3%V*3hYV&W*Shj(s9sw`HOk zf^T<0wjJa!2`gNrojPE56bYC_1YXci9dmt@!TitH5csq%8Q7JpWVJcqS9R1awpZDT z0pM2-65#u?jCk`ImLZeKUrpi7fA&>pl={!t_Jc4#92i7uFBhn7oWG8rk`F!)g5hZp!9!|~&Q0W_sp2Q-t z#>JJD9@wCb3?A*t5I_QT@7x4Ak}l;|djhs+q0t3kpZi>%3e3KKy7rXm5T9^`l0*<1 z9Gy%5JKcQ-YpY9E_!cUhFaXBVp6BrOj{p6Qt%~oQ=tD6wCtOnLG2yg#kUwb0|IjVJ zDas%x_*0La5R?^e8Ok(wq**d_N_0)F8v~qO+?2f6$4XOEQlN{S1ZC6$c7^~Kc-P+y zrvhWJ<9l^MK>v8kxnE4N2@PQ2mPNz&mtkrq@l-8>aan_KK3w&CDdFRR_>ZP>>D%$+ z&(~Ttj&t`B@C!5vec{JfIC+S+FtVN`7Y?qb!`M7Hp$}Q_9skE|?d|z_d3%|Z`AM`> z@-6P#@eAii8z<|<<>zkq(c19|VQl!A?f6ToaxEJ|)yz6kg{tDg@Fvf`8J_jh?pN!?o9eVzc&HbIZhH%zV+`Ke#;qLkhZtb-p<3fNAx&a7If zsNkB!i=QdPF;PnRq)A0TD@ckG-N)SWx%I%Jn>ZtvFpz5SR57Af`-H38NxiMDO*#ly zSygqm=H*YzOz*KT&X#bKi$Uqmz#~WTCo`v2p$L!_@Wl{l#$r+auZ@2%d*Vl<;o^!AWshW+OprW2H zdJ;K6f*_e#e*HUy`Bbm!NpY)Elk3J9h+-i0e^1 z%|7fV=~5>&$5aq3pr1dwciKfFFPG#fPuxEFafgkq-nX6h)=*6Drvmy9wwoZRH{geR zt;q*)(mT@b(z8GD3^DTDT<+GjY_u6=J-^Bj^D6tYeAg~CIQSHCu~U?9J)A$Leybh- zfha4hO>7dC7!-3y6&4p4hesvK(pc;8)emK)V_|RLS5C@kvChfbaL4+a>3y93G@;>7 z7Crv{sKRf?;Ff88LZToSy+Oke{9S3@QMh){FEQKb$>*MxBhPh~9pCjdGl5O#)(-3p zz9Mmz?9Cddncc~+`H;uFR`899rjHK-IE9mX}s6beNhBzoAC z++`7iun>nYarvILU~`F{OgU5gURJhsFs8xz;@hnDTdEb$N!IR2%a>GcMt*MGal0d} zD~6F28St~}kf^gO!TH7E(Vzg|P>hes>(|Mbe0}QCk?*@RA_E>){#o4o(bI#8C)2nM z_}VK=UGj#}GpIr>PW#$UXk=lBE=s~lcHCG7OMW+M@11MY&!@%4I{j|N;w;y#{9Pd{ z2s;`xX!1Jwd~8`e)DMs4r%I`cA{E=*0gaGJD{cKoOV!F;T$47mvYq+Wpyy7s4 z#Q`CyLHyt2*&81d^GL2|T_!sy8ljpYN=`)dRA0Z|bwj%Wxgw}dn694Cs0YBh_}cN? zCqHy#%bm5h%5?W+Z$|Au=m)5E2rY4UJ9=b9(^Mbd8}GFKj3CGN1t^%ilLCxAmq@&w zsf3&Y`gi-%MJz1Y+VRKo@(V1XpWez%5KMLibESydH6JI$3P^U_OPm`e|E>HuO_%nQl*Sg(}WtH9UXai@q3|X zF%yd+bXUN${Z5OA$f2&hTv|(eqP)O8Qcgbx}+8JY^g#N_abkeW$kf zB#X{rTu#p_ZApN$FyV~A7v6NkT>5re^;I^S>Ob@780_!zJa0dro?t!vIS#Gkeaesl z3kEmiG5V4f#(CEzo<1!@Tbc$SbkcRgj=jAXNPwMx@D=&j(S)#>@=ELquX) zB)<%%eX$HI;A_rlx@b-i3z5O+SMx1$)ZVSscx&;@GBjRUq6oCsEhNf3rbV4q_W{lM z0Gzo;K^I-R>Ia(J^U98ijEkZ898kKiT%JG9V{_Rn~6DSWkcpRpzm>tKEoT%vA1Td1z-V>|a%+tCq9eFx_zw&hB0hx zTtH<9^7h-a`B%->_z;L6kp*d3AnaW0;^u8_#BVaqjD!SN%Z;&r-;Sk9S5(x_(4{xU z5aH4q%v4$VfBzgXN_&HY)H6OJ31la$F~hkbGZ;7Derx&l*9iSE!GlMW$)l8kahTKQaVl;9t_hN9-FnZrG53iSkAAG#_t#`SPW* zvJ%w5Pbt8gg2+Ox@H|?h#HitO$3jSOu%9tH#E%zH8j!-@@_C(ij?4f}4Qfh4LPEf$ z$1^Rf`kp`C7%Lqf8uB@=%4|BB@fyoNSn0>RC&7D3?p#_KBJO2MNt}S< zevtd}XPKv`=NK&nq85Uz6g4e~^Q;QU!otE7P`78ACRs=i4h}$_k4Gsivs63lb20I< zyykPPt|7<{*4FnElGyczN(}^94I4dT4(Z%I;730VYHTbh!2(LaM9K*XZH2_-MqJ1e zu8s*YS&Q>cOK!CXWW1zAECGI^5A4A*b9{`|%?apEX%77}s71~aI7IWKt zpGO9PXfh%78?D5tsVP7Om6Wh+F+&sRYFOc0t!CEN*82Jr2KG&|OP&YIx2eSqx8^PS z(u9bW2(my;guEl~^WYjnpqml^UeoVmxH{L}gzO5ku&WE$6$K?_?-Cd6l9!ROtrjjv z%4zsMIoWf4dd(^Z5jJJ1g1sz412M0GJeFX#)vcz-~$q z(EN;y7MtUhEo`TNX{|fz6_^Dy5{KvNNYQXXrjLy+m~R&A0YDMG@K~FuVbijkZBFk7 zvkrJ;Dp8MJFq>>>C=kgduwEf*95fN{W}N}a9F_5b+1^H}d)ruZXpfh~o!F_u&*JFF$Klq!kx~wU z(5Oivo>~8zH%k2cwIKSmf>9WE%jOxgH?EzP=hmlgZ3vb@q0rx=4K6Jd$m&| zmx~$d=Tw29A7Jp_EVDr%n8a1cIFEeY_Gd8h!~Q%8_Vr`m`m2MZE!X{UQus^A%tHG| z<1G12OiWy;1y(0!A1@}!xKdV$B)hhSle04sA>rzk4H_g%n3PjI8YQ{>vrO|kDT+XeJP=0XKtozgqy6uwR@kM#Gj)FS z)5Ox!vLG*S+HKM{Ffee8$qdk2K-$O|YuK(26J;p{QSk$d?BkzdZUv)XM(5cvc+dlc zKwGQO!R=;|TH(!G3;5=Yn9kr9ck4BtyEN$<8eYzL?QDVY=3%y_kAX*ZnFPBGymd0` zd-WrkuT&ZA{dnm9{(j+QhXleA1cWhdh-5N1tL)>!|Go(yXpOkZ%6&H7fYL3{F}RX2 zKnOofE7afL|K{zxhOVPpHj29%Igf7-#dPm{qZEHVKHth(3S_A8kdUta{(^nL%{I7h z96!~ejieMIMTQd)r7bKh^bAlSZ=_e^XWA6lKX#n*g1oSuZow2xOM~;l+e|a`NrT%~ zY;0_aSJzg=q8e5UX>i-ecEGc&t$sZFioJvl&3Jk)lBt__1WL6-(zd0y7?B3k2f_fL@o3@a7(ZZ@o!@C)_xu1zb;F0tHxz}9>Rq}zJ3TflJ2W&j z;;+YST{cI9c*0tXWkZkj(QOpdurjg;MRltaBidjiQx~febW`kBvr!o^u8QiXB9)58 zM?^u5jiL+8xIaJ%13lR;9Yck52WOLQ#b*zw6ieY-3V~rrt6WD$)9Q;d{MIsV;Ro9O zK*_J{raQ`ht3ixVDVY(r8lX8dTDHvaY@d)x6ZMQm_|CTkHhW(*gW^Gv4MKyNYwV&H z+Tx;Lx?0+2sfW%F)=UZ6n({Z5`?rR@YqM+5r%BH78x-+D)YG<*=Nc<|+VSYV+xFBK z$A$KI5%7GVPGD=yyuY}(I9@{-giG1o-3{nO3Q-SkU*-wn#kGP=JD`360S)lwz5Ecr zed(QzqxDf|wa{~Gjukyz2h2j3Xk;hOz46R+PASbw(C2Yl%}6k?DE&U7{Ie_ncP<2E zSpG-DS&G$7tA%@A2cfBlvnMnVNk#|~i+p>g)AxE7K*R(H5~$pKYb12wjfV=+M8z?4 z99zz@EranqY&Lz84m}qV_^e?unbsV_QKnqaRAcetn7;hg;Gej#vS-JF zv|p8_4#<}WA>-FypJO)ZZn_-oZknE~9$m+=vt|qw2rCLLX&8LlH~Y*r-E!HxK;Sy! z{ndp44R(at5Z`@UzKjV6wH-WUm>w4eaD$bWmMBa>LR92SZ-2&XU)T)!bqN1!Fut;r zrheYhtu@topyH(Vs98*dkNe15WSsJP17Vl#?J9n75~3`^WslB zdr$FK7yjh4&F{hw3O38;okt+5er5bC*7^a}1UX9(ee zIEEx6c&9}d;3yE!ewLe*KZF3ZL@yv9z{$A{0{~<6CPZ=^tx}gKS|dC%^4LR^hlghX zi9&9(wcRz&B10*30c#VYai16YE-MaO8&axje`}1MHH1>>k*6L{m$TXis6QY0IV13S zysN=+wGjg@j<0Xfo<4m#S!+KFR7pTr=s!NS1{pg^4g6q3)?DA&&Q;|l@R|T+MG2^` z`uh4{As_7TmmoteXjztrRkr;j5+lw6gbkE9ii7;hgncF#%uagBj_;+fF`#9<{Wtlk^CyjZfxz+WThF}!ozp#Vob<+ z`MHI~FSr=riZ;m19FMO*5-;U(EA6u`&CPwjK2jw84o4k8dK55~D7r`4=^Dhx$uT^i zfhNbz$9FJN^i)x7VG2MHyR2)7_-ZJnb^NZpzKFw|B=DPz=wIJe8>fVfGCsSaE1a;& zbue2NI8K;VGpe-}(?G8HDyGZD#Ka`bB5SJnQu|OlsM_t{FtT|I%p9ar*VN&-`e|so zh`ZfXog>%-ii+J}Gy(-qmXJ{lV9|K0U!V+6p3ehy!5D2jc&0ithjtLNp!#g8LzB(k zB3k(c1+{k5P$0>GYG7+g5VQ=QEN0xh>n70usX9F%ni~J3WeT|q{s~t9a$aF_&Ah+B zLc*$+Z-N}s0I;-Riy0tye2G-}FhE=on#PLdBApj@YvW7}#;;y)y%&a(I@@iVq5rVEQSUnD=+3oUlLNSmZ@l_z-TvvAV5fMK z>sqyIQE$1|_Miae}vjT71sfj~fX15Vdau7r>cjmio|keY7~}q>~MAOyYIc5zyPTflP8phslOq z1SD8&2>>2=ie>=Ihh({-X$4{)jZHQx2{3wSP=WwFj%b!6QXS$2}0QYQ0YE;M09yT-S#30Pj4w5d!QNSAbCd&;mc1m2h5u zQuF1m+MA6X?p$a#8xr#MZ7dJixYNtCml@QSRaeJ%D)O^0q<~;%ept+gZ{=q>}Bft?hr&M~C8~v-& zADOHBK%m?`AuBy;@ti)?h6(upsxuN*Q$HW+-%J^^vw zkVVpTbUbO_-`hL75wg$`)zRMWbT;?xt#ZnPJ==0p&AD?vT;OWkI)~heYNmINLe+~fhe4Bhu5F0~_AnT@AH9k1$Nl7}lCEg`!cAoR~;TF?|UZY6+Y5zI`KrRE{hp6j%yat}% zpCu;W^gBy4;Fz=U*-slC2_&S$<^;Bw@Cn28!ssKiNaaz|A|x=}t+jJ2bvp_p<>>D0 z6!hHBoq+h=xE?fVXdb9Va24|ag$*oZO#E@NvAk}Zy7O=rA!g=|GA%RjsA^mxmsK@w zZEe5>fU(rHx!1esBTgV(HZrikE9Y)Y^etNF1c~N+m;O_>U9_KNhK?_ z*mbXVQBF_;D8O1q{(b#FvYC1~{jsZ7e2aT|kR%};{QrBMJ>n{nQQEFQLrlp>l&b$d zkNI=>{!&l+)if(czDoK>nGt4RK!WVUyf57HW+I1%h5$`7z(~O*cS5}%rXdxu{#o%* z2_KPtC#eO+95cV9_B#+;!0hKz?42$|;oW_(N_}-2G;xpH4G|JX#>J^yF<03^mC9q@ zj)o^%@#+_EO;FP1tM1{q=I(uZuD1PIfU zOLHTC5Kb$V5Hs#6AL&q5rAvH#b?A}I{Z363h&?Ss9#|`@n~CSZgC)Sve*|1~h7usD z0yXX2<@O!1Z1FQ7P@m904zF9u@QR0GC@LxjbbaAIBm2y(jiGCdN*9cmoGCo$5up(g!1)FY zwG=R%`NxFKRoVAI)Pw=Q>}ahLIrqEhQ=>PM-0xU?g$a%OKZt#S%6RWT+Vst(zY)Yl z??x-5R{K4e2iOTna(oGf!k?4*?=_&lc}7O3Ngjf|B+>H4f-o$1c^ulVEH+*>@WQI^)N+Sz7c4?KR% z1sq|g*@s=KDG$~_@k;EEF4FXRF-Fen7{jfHPZXp&;2FTab!!z}|0L6~Z*h0( zi!-nS&jkZ(UQT|$#MNs+M^C+pq-$CX(_m8d)-E$t2n6Of#96=Zi_C5?8BVtolz)tK zsLYWiWM=vvjp~q*l4=Ml5@}0>Q%PmU9+uB)!<%)Du;P5%g>L->LXSBakO1R#0mA|e zt+THWZYO}$;Qb~1mK!b7wCu?sF5c{Wg=lc!AB7s68jkZ*?S#G0T|f%x%al+xh@5FBmT1ZJT?V^Q;0sB< z)=;^8WbCyp94^;@IvleLT_(Tj;pEvTFLxfF>kI$9e$uwC`2N2H;lF2MOtL+P(gkcL zJX#O3TTlNcI6X+&sZ#b-}B@EvaO|>Ca6Yij&D3H-4_~PP}!aHT3b8u z`*QX7J5rI^l7|7x_Ai|Da($Ckeb0JKns-~U`^8SCfSdo-o+Fm!6^;c@N+Wk^tdFcP zaPz!mZ^+@;X`pP!*Y5w82kay4aC9f9r8IJH@5A13m)t)xb+|KeE`2`IL84qDiqw3~ zZ$EdhUh=N}&KWT3>B&n-mRlI;@W^6OV9Pk?LD+FEi3`;sl=6{wy#5`LB%{H(P%nfI>SocdBV z-L$)L)e?w(*w{0RKcoYUAz`UXV9`EjhZC<}i~$=2{MGuwiC2;{hZ#?FTrv9SPdOM% zEY^py9^5;cVGQaOX?9rp~b80k<<@tXYUI~Gf%QKVDy{Qkg6DWx0j;Q~^nk4Wt6 zaS1pYXxZpC7tne4uC1fP`7BaoJInh3Sf2pEnc!+ZrL#5=z6lOzPhN> zrrhbZ?22VoQX;jMGot}ZeX5+J^$2_Gn>;A|}bH&YCT#>wKF z7cIcXgEUzk2FN5y-_Y1I>=$HL@gr-aWI5a@hjCsQ3`* z1KVLyTp-}khJGT(jk7$&cdIoAMg~Rpc{qVsU{-{TPEvIAvm^RJY0Mo8NM0#$6=|S0 z9rTF+sotnehjg*x1gpOuTnqeScq7hX=%F5FfjvD6DRSjA+5hwu&glNM>>aixRfiPVq?#X}5we zuJS@L*^s&5M7it<>*|FLwpSO=hk(0RK*U_r2k2)KWzN&0ungOJ;_UB3ZV4HY%Ds`*)zHQKYb2$#hQW?40n zgubeb>B5JG{WnC}@lcFcTWBd$P$fx|di#GR9@FGH-ypaOUz z4f$+FSfFRX{i%KikgKNMc!jB>W99$ijqajmBqb$v8U}#*J3oZ`Zv#9Nc+N3^A+GJ^ z((392_K(gm^7N~%9&w%Fx%oK8dY>hg4 zPiHu$nfdKZSM3aR0|4eaDl-C290~!uMGoN5mMRMg4UHAq2ZsTsK8nU~=-lKA&A$>yx-~})O=fVYm zEmIFrW%u2m1F_nE09Xt~qV^}z{Tzfu-gZawyoqO~CjiVrLH^;P3!)JZ65_SiS;zc# zvI~%QrjNSdw4~n>-f64^6VRtTu-*ryw!sDncH3VrbDYlT9nb8$CL^5eQ2l`4+ijdN zI*X6c`TWqDY9Acs`DmL3UCLI z9JLOni=8o4vt08Jtp@zADxHJ=eQW><4kLVVSpGrolLRcD96upn>tZ7BAsz6Zd%xSj z0}6P|+omNZf@6f}+eA`e2*{zW3Mwki-=)mV=mfULE9rT8V||-^m%0BKUWZXEpxMjpZ@4J5J;#zB+*O_yUImWo&K2(sx#lD7(Kp=2sq{WpH2sA@@KZc14 z|7xevSHpkIb`lzP23FS2=0?VL2q_~=Bb%poMus=^oo_s|v$MA4(kXX@(P{mjNbVKNDhiy_%D^69EXr9sPDc-&Of9vKwwO?<=MkLY2;Z}|4*(>cpZAO48j(ZTWPEx(%G zy(G24cVqR{DV-jFlyyYfO1cR4Lsl`y8I+H&!>pv{9eYQHznhcXmABY;U(KH|T^(Mt ze?GDGiR9b8{q48CgO%A48%%Ag{kJ|T%x}|Yd9*k099p`*cw?zEu{J8kIjPkvn6hQz zg6kS9>Rz%!hUeO|&!@UZnlN$OYFN0-THrCkmfuZNssTRkW#b|*K@xxRduHlgE59yr zC%t)hK)7`yQ1*lEb}d(jwe*FKWjaPplG|!ZcHgUIEqr`uB38*NA`+(rMRs|e^l+)8 z<9VL4?^V3NDVET*{3Db4dqA%eyS+T#%jLJv{qk-_{W6d+J_wG;;7Q7VP&z17Gw1fy z+pwv=*ihTR>-B?_+l)aM0)Bdwi3!QvH=MqRK=6^vh~HOn(p`*UqY%V43jxq^f)qOLn6{}iHC{k zk~|pVgx)CczT#AfdU{EVt~sut&8}vam+;3{CG#Ji#U&})v(5MBXxsnx=vn-A2R0PE z@@nYR|Gcxt@RK-y_XMl{=J}gEbbZ3}HyWtYBIf`#;GgcRvHIhko$go1@LDNw@0?vCWlyJ|UD0ln7PP89 zad6R0Q=sTdC@pnb>diR5@-0<9Mrfl3&7>o)FQ{nou!$jl)M@&iism;53!CvOx1%Ud zlMf$0*es7%y4u>>8tCiS*40fC^Lue5jNqp*jN{6}MXSX~4Ev+KRe?_=UOGy= zk@Lz@5)$0c`vb$Ns*e|vf=m01QClK8=G*NgxOn*ZqV=N3v@*mNQ{;&dBJVKt2|cgj zKF462!!u@cIz4vIrJXgIy>FLl(~~OCZ@)xK6nL0sWL(;Je48tR*J`{qhw9}9N}rdv z_v&y_fbQB*f$z~5%>v25S()K|0r%sBrZ5JH&iijJC_M@wVLPCEOY3q^zaG9+Hhp`}qYuhnV&6~#~djzQW-JB!mc9zkafqr6k!C6b?Qmd33gsT;RwS6$uD zPWGo8-_h7>z+UPiqNHqBI`|PKUSQnzdv8@vbkePij-Z2Z|VgM6HDu(1ygS*<*4 z^m^sKZU>?8cfqrHk5k{~NcP85k-@^JrIQw@N=iy9?!R7L9Kv^U-Qa!~D-Fh5(C^;@uT{vZfAC6f_;nNt=yd_otf~lI!6hmWr90F8vRyI$`tXJW**+_0R(>}Ughc8U){bM$SMyfnlTH7`LGHpw}RBK~| zV)^vc)B&9b&RFS-X|#)Y)@^WL%W9|#@3gz4!cJqedP}ZrG7u49Hkd~p;fqTyIXj^1 z+O|?G`mYHLwY$Yyudl4ETw*z*Uy>@&{`a%?E}0nTQvQO?+52Fx-rWJcv!67L)$l*@vqP>mXA{X+EOg`4+uz8j30-Y_w&Qt>WBMf zJW7kZXDdnZyl1vLfe#l$Zy9-un}sIR%Aamb*6F&*w#M*& z(W`c+{j!+JM39j?ALqI@)ew+~cD8t3IqW#i_V@mJ$8NXkfxA-uXqjycmudIs`)`ic zzO8$l?R&J5N2MC+zIuzyuXl&@KQ9w%@ zxmCr}Tzn$9lcI`B@Ao$sGd3W}MU?#N&3II{*rO=?E->)Au)9Enp2vy3S+uZ6)qoO@ zJOcB5kS9qD`jnnz;ub^vOZ|;+pIe9_+z6r&+zkmb-%Ix3k6N6l79J2|eVx?|t59aW zHhORTHJeu9uV_f}T`iHiE^nKqf+AgJaCI`)1DmoHUQrn*4qUU87 z5RGr`3=0@eMw;q_DEJ|iwd@iEd^TfbVwz+dOqD0kHSNnJu6()Mr=Ib0vKGT~tiUAr zgi$XVA;O2cmyVSjN@fb9)$!O{(agZ=wZ1MdwG_AG$}r)6v>L->G3DYX@sQhmydtXP zSFQJ@GWt7rlvjSrMY6xh+Ig58w3UK|NTzub94*2e7Si3*lkpWkn(#QQETcr!P1+@| z$(%WCt&M%NwXwn3w^H+)M7gL>IH;ti)}N)$G3@au^#Nf7At9l@)Q8B(?F)|!jNhFw zevwC9#WOJ>SbBCv+UU;Z+a9Mc&EVWlcN`%I?nVX`x^`E`3K;$*APFC((kIlhV6+dq zdOK0Gz<9mtD7UQ2sO3X0i!d{(@Z~gL@*|}^dBiPgsT%Ao^eIP4{q|4y>crsa(3Ii! znqQZdQgiJNjy*n{k9S-hj%Yb=PgNmKU3BT*D>lK+1uPSUxu*Ll6Hu4V&nwxl}Q*OtEYucOM z1^KcR>tD-`N)m9;bh9*bqnPu`l`Aw_13B6QWw!GxD=Uy-U|J#~BV8%zc%AsrJ@50O zYR&8@Ee3MA>?}YO8?&y%&Ckzw`@J)iZ{WLe0eJ#K9Su_)zfP(}jeT>R{|$YMI*WGv z>m+K75j-x+8CmjtAc$Sl^;xZz!F9$7B{6hb3c}|DxwtD$5`0@|yO(jS zlnUjZzTrWC)HU+dTNN{z=GD>!rk!NB&yy9$@bzb6=*%?v+n(1a1zhu`iHL{>I~qv~ zscAb{)N{GGbsL5FzNHA^UgMIMk-6ux!#EI3&I|aVJt?arov^RzY|XKn{I-6*+x{3s z-^fEXHRFX(QAH!oCCJ6t%Sg*X7yal1uO<0Rr z>kKF5v>|{rAkvQQ?BIv+gVo5*@@K;YT+DoYeB9g-XDhcVaehhO5ipUs$Ii>U56N_Y zMH!`#8eLbt2D-@{AB0mfUlXUpoyg}byeHfp;w2wOl11R7S0Wa`gWerNmg7P5nXHK! zecB%5<#%TB&v!19GJAM!Gt-jDr1hYo2zDN?T6rWz+R>)Jjbr;-Pyl7e>h0Dm7MKO| z7pY#pzjSzbSm%o$4_G^0DXHrB5=&LB^Y&cV_K%uZX!zIfZfqa0SPpdnV6VYvfoQ2! zZkM5fvHHoR{Sz{TR3FcBsBA-((VeN}$G=`-3#otQo$lPs%!WipSUlw3w^tY#(Xkv{ z4hIc_yVlY~ARtc4$SN%mzWbu$`dsx)Qi46Ru+VOyGr`o<)OBy=@OZm}lasU0V^s>5 z+E_HVR3v|}daqb^zhP#PLFFt^p2%{`Xe&?^Q>=`_jJ58>WYem0S55YHu+VQtL50nn z$WEWnIz8!1G%g#&XEzyW*!;>bw#LFV)tz+I@tP05XXr2=DbCR@vGnJ&D*k{aj7xq>Z`%3$aHAv{UwFYZjzJe~s~1KA1S|tqeigOG-@Kg~hcly!xqW@^E_| z3YB`oU@Bn+dA*mL4e)sx8ij;Q6Dgg1-0y6yqGmXe6CS^N;s?{XWSIOl#7wVnss&6R zNf48e=-Re%H#9U51$BE3;V1b#Ort%{Dp9TEFjn_z7dD{seqNyvmyexLNuiwm_3KwJ zFE6NZKQymbpH*o{Vr3CPeU~V3nVfv28giuT{{&{IdeLesoi=GCz>=8R6{c{1Mh-E6Jz)Kl+KW=!dmyXUItY?GEyI7~mxf4!ft( z)U8tAAbWdBK$R-^mZGj9&SbL@w|Zm6b&l%eJAx6RKExLTDesp)DViojo)Ea&ZEe*9 z)I*u&g0sX<@uOR2T8sOOmt~A~Co}gI5!GiW>-XHG)2Vxzu+H?i5+y4+ zD68yF$&MKHnN}D&G`u^<@t8>3;wdM}&UhzY8WgGLh~5e>fB@Q+@rXgJebIHEjR3^87^Wz6`>-^y z(!=NLRr&GGoKP1D=3xR3wzY4>!vw}1rztQ;ng^S+GyeBDmwu@2BqgRey&9OF~ zeO8FIf`WpO^A>HSdajPZr}PWRM!K1IbuB;BN=^4nmC6EhDQ<_6QvFh4xE}>Bpaw^P2(mf zCwDxJN*$$TW&IPh3GDHm$`7$}JdA(uEOv8XRqhQxUz!tu+z>wrfFYnA_HC=2ykf_W zA(JJ`;TaSb=@r1bz}_ZX^KEHueURlk*>?s(f6+P8LrnTH-LC%wOZFsOYJ&XUN1xZU zSqiCp3o_a#mLIoszLn5Z$R$#FOtqW%KB)I-YEo`Kev{6B(qb!+$7|0aJ^y_Wz*Qd; zKUrhiEP_DH4r;JZ*bh7|v~u5$ zndr?@w`%YwPPSOQ3K7GHvbMC*cA+DkdVwS~G-Z<0q&@j-dPheGm7vQGd@g-vIaY4J zHPb|UoF*T$2TQTP4mkJpgq}*cjM)7HhaUTqsXkXco!p13B(^*rZf`018-pn1lw&Cw z7YnmU>FD*8mG8VfuwH8AFuhY441AS5rSw?X#b_)=Wq;AvvAW$?E#H-|LgDQFT4S9G14TmyhPUeMFc}jk8aVNXdaJQyH}q3cU%}b-~<7lH{5bFOzY! zI7@fY<@kg!VL;i}j@!T2u#MMwq~Rtd(|SnQjsX7(I)3J5+TR0J}rB-`xL0M zkP&7-RX^FO_u5!rlnU>RI#&GN$DU=XJ6UX^+b@|>X{APW(#BY#JD_Y>o+0BF_Z}>^ zASV?%JuoD9h35+pM2vF?ee*6Q-*^w1B@@4Ja zqQ6=D#RK=v_b1%-0zEh28vhNFr?)iTadvP4#a)BvCvg==ZMu~c7k-f^Mnlpu>NwHH zrd2N?(I76FDf3~aAx`j1z}v;a(idjajqBqMF~cYq-h6*O^jB+K`kSLC`(fy#FZRc@ z@}2Z+z5KL_@B*(%%<^8qGrw{rY46qdCwWhJ{<_^e4MYCBE`@5Wq2cDb)j~2d%pA)j zyd_3*Mff-u-e43sr{0G?I;d!{hMu_oe1A}GAGSPTMNWRS3irQw73abw|?Tl5Ltx{5>6Iu~^TSo%Swr}-MUEOlZxtl2$y^u-Gn zp8eNt;=Y}o4)WOxm2WFBbF2+ovPY)@D?{=-IH4yw_;4Lfokx?SC_RG{^b69?Ef*E+-3;B?*}1E$3oug{Ib0k>$`t9R@0GLFzr?l7 zVqdz{v(yk88ag^UN-rN>Utj;z!txa0HxOt%+}yjQ)dvkexa6(!r0hCZ?^-{Rm6iPw zL;+{M%;U^GQvd?RK<;B;wx3A@>9V!G-6n=W-0(sch$^;T%7%q5`Gtkyw7-DwE7H%; z%e!^+=G4y+8Ws2bF~=|8*7$5$-ZnrP4F&%r=cZ6PNE&Iiz;sD-*!V&sFOZMnnWIJ^ z2t<)1$>a=GF6;^rzkmPL`Bvyk#DJ~vMI&Fo#uLRRm}O<8goBAmCR3-_d`O;mj8+vG zqI6_&nqK@Y;$~7`fSP{DV9|4RQBhHt3?Nh;r|PlF4zUe>1jXKck?1s(rTzuRzqdL< zCG0-hdkKLM#RBeDWH&iI-EZcxFCK+qtdln|pmGzz#g_wSXJ>&fgw%`|A)}8+C8V=9 zX$*{lrlw|S2cSJ5>+s0#)azg&5Y*^U1%Dn6jf^CvqEbXv=znXOBAuh`!IFP4aduh) zrMJ%MQr4HyurR?95B`B47KGx3ettOUS`+OLYgR z$=>GVEHLd!efsoiQ&SU+pL_H*2LpqzHj?Lo@jF6^0n|;r+g?=J?qI&bOtJlP|Dq+7 zy^<09j%&#_T3T8o#TFOwDUZh-C*F#{3Td6MkZ?As%#tP(QX*6Pv-y$@74HveI8A?D2gzLuvzq^snjQ zN46E9%WAAML3_K(QsZNmrhcW8qpteO>F0a=oL!j_D=oo3cFWf%5-e6McuNF|tisE0 zo_7Oo`xJT0t^f$y=X3ip>3p2X$b$SkkNMXBGS#zR{}M4pKr({mJm+dP+L)M`L$hE< zW47Nx^aT>|^%4a1MO9Q(czL1!t{z1oFqy1Bprjjn^(XjfWEXdm1O^1iJ$PU~TKXd0 z9OxqK*olLK10IWEu4kPH6EaUT1cf1AyIuO?Gv9jTFXHl)X?57uL7yvf$xlwE<>f5_ zHSzZC+sa|`%McL4Vq;^Yqg{SbI1-VP#>DtR=x|u*e5JJxLd4dMB2)x3HcJDXr!LK0 z)vOO{99pqzM`|$zD?bPjp22y_$9QeROA?8nKi3#1N#_7F zjfCE;tjSw*Z85+PoZ70_-E}{90#<*%Ao+1aeNEQrsde|{!*FXu8bUrQhr#8)1; zq_mrua9spTI4G?@p;#G1KVM5pMMeBhh+RSCy$ose!(9_fB>Q+WK1HA2|~#N+mEt>)`Q#Vh$k2n8hpie zPP5x9J{}%jFVka%x_Ba(Va0K~%|MR&P?8X{v9Y0=r6zN-%IjZFu{`jl!)VnZ`C=02 z)2V4`!khJ1AigNGAZi?8%~eV)A0J-*marc~s`pZ9iE6OG{D2#7gGQmvk}vHW8&pwN z(8DmCiX6?g#J&KnlxI#OjLKPu7sr5k8K$!11QB|&K$g*e-?BI zjUGotcEAdW>ABv?|?DMesC2dIZ-*T=na{9^wX%rb5coBi;l-Vzq*MN4MSIwWnIMMyUj_snl7MZ8 zQv@=;ee9V#qy)7D1qtP@;yCQKZVO@}2vGlo*p;QIc~=4iph~|6P3_>KZS zM>i2xT~(EopXn|kjEsz(bMlR;_P|BR$jI2N60bmbz3~u<-IgTkcyh>!bM{OvK%U|q zvkQY5utDb6kR_$0M95SNf^d^&k34u3$ayWzPm4dx8*`a!15o9EF(qEMhrO2r5QTs- zhK`o@y-!%!7%>5+}9ndYc}(S?az9VroZKQ0};@Tci!z`bDGmG9P@&EdG6l&0=?ovPV{7qec-7^dI~uZizq~;M7m` zL3&Y5d8T0^DLGPZJy{zV7zmPq+-~4`JJ)lNVW#e84pw1KRdSJ-FCHB|eRB~45&udf z8Q@5ICMKbVrjF_0i`baco}R`xX!^+S=N7 z%`YNU5U_FXBcsd5sHh<@0OjT7{T;AIBg4WB`dn59Z*TPjwzJ~}QiBt}s~8ng(+59+ z0F_$QOj`-EhO*IrAil?M(GcmkV82~hYydp`JOcK#3g>~<8pL3SjTSkuE;w#owapMjcDshlmOvphV`H#W>_O;9*{`i=U?kO3TqOTpP_T8QH zE+(Uy{ZMONZFQV3#DN2e$=++eJHWJTZWupYZa3T*{$Zec=U{{<33<=CzPoHS`3Mt?SzL!0 zipHbe6m{ynyatm?Kh{GL#C&|Rh~KA#jCtE;|7r+?h{``5l={1a$tw1bw}akYtM~Ty zZs2}U$~4WBsVQ`F5;R~}Y({+HJm^GlDYPJKgI}U;YRD$DF8}@OUULkX%{=DY|N9q^ zap*tIYOh}>Cf+Q=){)3N{f@SVfyskhIaNhnU0v|XfRxC<>zRexFYM$50y0=S7J`1_ zB2*}lH)r-4Cxv=M1sKJjKc#bYnV6WG+S)9-lVu(}2ppZuv45Ag)wkDBn#sr4_eUAvnv?RO5;LoM?@Oc?@UDLh4ef^P8Yk$wHc_p& z1kO=pK_{z`0gPS8tYnrth{1n>w%xJzDtkDF291GA%WpwdIy>y}NUeQ?FLW1(^0KnV zhjhzUoXPZcvLRMTqmN4EvuaD895vq8^;3=S$Xi-kGAaz4LP4OGqd+`4AGuGmiun$< z(!+I*&SF)hgb%6Hi!VqpaHdU?+Jd*?2lK1fwZ2(ytI1Cdi%BmXy%{3T(ll zU@^$n!kF8a-hNLb6=Y~+l#-eXQbZ}e$8R-N1J>=;<>mBT2OaMWfmYFB5r1e3A_E~I-Dr8O;fRw0PYD?u7%K| z$fV!+2NE*WsP;&1_Ud@zv&h;D7MQ7tC3VHKo|&b@%;m zgfx;sjJhdT;zHE$X)jCIwnG1%*h?@Rb?|N}KL4$LPjUmZ*W|!zXCm(XjCciBBSmH^ z0jJLWq?E+nj)zO&tjl+j8Q2Q;IIin;C;JkRihDmPF)8WMqeqS_gWoyON<(luG?ju{ z+S-sYN${Hr!UFTv0PzNfg!V+dF1aQ}-n(}XO2Zij`Di;J=dkaC6e0wM z=)xnWPGHrYLY0YJ#HU&M#t;SuIB(y+AnhbJJ9lr}{+ajH?{%z|9x2b|WJ%W_UQI+fdwlFO$lZH@%o`(>%% zx!U<9XF9CI@sA$5-F_#eOiP zc>oF#*JJMi*Qe8Z6}0h4*UZ;)i)5C|N$A4_ub(J@3|p|yFd)KL{#rhm7 zF|e>D#0B9P8K~3KXnH^SBqt^L3+xFmg=>A-Ir3Q|`*g^*0|BMVpG}llO%x62w#Wl{ z2YM=6AD7kmsDP{}`D-63Nl68po&poax|5!DQ0Kv{K)$AZ_el+6Y1*c2VMPhn>N$AA zp6}III{QEgU;%&+@N=xl_GwRh)b%*feSmPhz6q}J-W~Agnva!7@OLUDN4`|*p2_F{ zmu~tko0*@&CyRaiwysro(s=AcCR_T9VHiRv?;i*d(Z zr@Pkl{7}?5Oj}j2c6YPIb*4WXZp&Ds0%cOa7YujuR+Rdht)~ND^gOoPc;oNcrUDlM zJoRTbDJz3es=64DIpMQI{4#F4tYqDgt8|?{1Ro7dgB^%d;cP^iEKE1e-lL(&_#gdz zZCCH*!gnY9-Q%c0L*m!BFLqN}RQH5T?h(8XtjCDU43_%Hz;VaNdiHBHi-B%Xq@H_J zoUBZpKbmCl4V}Es~7_V3Us%?G)|VX$+={4+;4$m;(NfR-wKgqw2d=I^mPakMAtdjVPaJFU%xc^qG|}QNWyH@cQT(lv?@2S!&c* zn>9I&TNipW!Ve>n^c?F$>+N@|(ChMd9GN9x6^7nJw0B1&MQ;ilkN zXnP9zgIYDdC#P(_<$cn385kJi1)M`$z=p(n3u_)=6R_h#bEhk62ls4dq`^nyal9#Y zn=0lWCu?8z*{LhD2blcf2?I5bW&vlHerNcrK`wL>sB0AlCW+feDvFUOQwKImKCYZ8 zX=y!OclbdW^*V$61&|ILa)8;!mWng?8|ot4g{S3+>PBY3vqni% z!CPD%P;0x?+!^Hu_g+IMGFk|X9eec)bGrSuTa5J5bzSAQx1|(x3GdhdP$^*o`*toR zHO<83D^W?ru?I*XD&wUu465BvaQ5qrjEz(KDmRwSz@WQR(NayFoC}r%dfvz`9|Av( zC8me_Ibb?i8d8~G-({bVs!!x#P{+>94R%qImRfS#+(_B(IJn%f!Q(7RF6zqPt065F z1lycboQ@ORHInGcim%G8a`LbfC>8Bbb*&1mbrWc^eRxK5b<}K)o@s#ZHFCXtnSGeI z`moIko+ByIZ@}&@w!`Vq))d2ksxy6W2NX=ZOEOK}8xX;jHGAjh=YwHv@OiQa5CnYS z^S^uwW@J}*A_b@gu(Y^`1;>wK3j;~?fE#lwRj z?@|^+#5R{_y62KD!2|GF2u{<6oi!OZdgYf5-OGBWTxV-UhiO?K!HWd?R>Ke6LLbP9 z%#n0RJo4bmxBYR~372KIu1rhvMSph(I}C2GC}cV~lFt=oAYJs!mmylhzS{g}AqU0p z^WC{pzWDRpn@4arJSecnh}J>3iza)Hu5PtCB{u5?9ggW4kg7i|i0l7Rp|!}?_>mIy zR`}%U&P?c$whi+f0ZK&<>h9&2!Bh5If!7j6!O`{Z5|zjCZ4ZHDIN`-JCS z$$zBo=4r5A4iHqCOOS)O?muwlot>8*5dlcd|47s#n4h-=$n&K`9j5Bv?i}c=C7nJc zP7J*}qgMNz$enps&twnn%W0)(iD`2<({QzA%||ixjbiLxxeY=8fhJBfKkR%!c0lfT zak2=CD!u*?Gp0iC#o_w-&Z%4dIa>W&>Q3B3f?sYg4(+ZB(BjmtI1L3g1qM2R;eP6u z6H2=I*n|YA3j!UItQQD;MUg%DKXru~{LRvlsO$bJWB~r+LNX0PyMiks%==v9V+DND{~b2X~rNu@ETTgq`~FBF8_afzPQt z{k`nEvp`5rE@yBUY>M{ek53!7+Tl(CJj4)5z8hbT1q$E)vA6urS;l<)IL6~#h)HW$ zo;82lz#o7srHm4s(W-Opu`S*}3~iE_gK|0$c#Yd%w`!+bPDKd(IuU?fp^{#e8sb8^ z0d`$2^UHo7WHQH$`K6^FWBPL+w{RnFDJ0Xx2rf=2(8JRvT_sj}xxO0cs$w!SSXTEp zFQxzoC&6)upQ2l8{Yra(WynCdIg%a$?sq{m)C@yOtzvVsfp|ezH8WK1>ry=-FklGL*UvN=mARXWp;Gp^R>3ZEs46(jb4angR;Smww9Sqo;0N5yQBrfhX7y^QA z#r=V7P3YM3D+r&uAu=z%VRYrFK=Iai;rQt8?(TzaAq0AR3e9TVjuitw*&Qzox_ua< zI=O5tjbz}n{_LS|>s8Nlt%km)EH?)?Hw4!=RMS~;H&OuE}d3}~IbK}$~ zd12w%sbB+W@7?bu-(8EnXX_L5w{yKZmi0-#UpzdYfNjOk32I!TJpf0b1>4)(6=Y;LtIU!`7EJ8711sYRnb~+Mh`CUC;@mJA!SqWJ78L!ZzE?ZH5UeQ z%Lt@fG}me%H}>$0^F?eVWk&GoUypQvy&QEBGz!`q6j;c5{62a&A>xL#2;XJUHO`A? b#IwV*j~Xu49oDr($O|$O3gWq Date: Fri, 5 May 2023 12:52:25 -0500 Subject: [PATCH 206/231] Added summary report to test big IO (#2908) --- testpar/t_bigio.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/testpar/t_bigio.c b/testpar/t_bigio.c index ad669d81776..bca52b1110f 100644 --- a/testpar/t_bigio.c +++ b/testpar/t_bigio.c @@ -1168,11 +1168,7 @@ single_rank_independent_io(void) H5Dclose(dset_id); H5Fclose(file_id); - H5E_BEGIN_TRY - { - H5Fdelete(FILENAME[1], fapl_id); - } - H5E_END_TRY; + H5Fdelete(FILENAME[1], fapl_id); H5Pclose(fapl_id); } @@ -1912,20 +1908,30 @@ main(int argc, char **argv) H5Pset_fapl_mpio(fapl_id, MPI_COMM_SELF, MPI_INFO_NULL); - H5E_BEGIN_TRY - { - H5Fdelete(FILENAME[0], fapl_id); - H5Fdelete(FILENAME[1], fapl_id); - } - H5E_END_TRY; + if (H5Fdelete(FILENAME[0], fapl_id) < 0) + nerrors++; H5Pclose(fapl_id); } + /* Gather errors from all ranks */ + MPI_Allreduce(MPI_IN_PLACE, &nerrors, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); + + if (mpi_rank_g == 0) { + printf("\n==================================================\n"); + if (nerrors) + printf("***Parallel big IO tests detected %d errors***\n", nerrors); + else + printf("Parallel big IO tests finished with no errors\n"); + printf("==================================================\n"); + } + /* close HDF5 library */ H5close(); + /* MPI_Finalize must be called AFTER H5close which may use MPI calls */ MPI_Finalize(); - return 0; + /* cannot just return (nerrors) because exit code is limited to 1 byte */ + return (nerrors != 0); } From a39517a6139b4d06f3ee4cfd5637d5b5b31e4042 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Fri, 5 May 2023 15:34:04 -0500 Subject: [PATCH 207/231] Fix some compile failures in API tests (#2913) --- testpar/API/t_dset.c | 13 ------------- testpar/API/t_filter_read.c | 13 ------------- testpar/API/t_shapesame.c | 10 ++++------ 3 files changed, 4 insertions(+), 32 deletions(-) diff --git a/testpar/API/t_dset.c b/testpar/API/t_dset.c index d00524364dd..56a3f68fdf6 100644 --- a/testpar/API/t_dset.c +++ b/testpar/API/t_dset.c @@ -2568,19 +2568,6 @@ extend_readAll(void) } #ifdef H5_HAVE_FILTER_DEFLATE -static const char * -h5_rmprefix(const char *filename) -{ - const char *ret_ptr; - - if ((ret_ptr = HDstrstr(filename, ":")) == NULL) - ret_ptr = filename; - else - ret_ptr++; - - return (ret_ptr); -} - /* * Example of using the parallel HDF5 library to read a compressed * dataset in an HDF5 file with collective parallel access support. diff --git a/testpar/API/t_filter_read.c b/testpar/API/t_filter_read.c index f32c21b4392..64b69b20a1b 100644 --- a/testpar/API/t_filter_read.c +++ b/testpar/API/t_filter_read.c @@ -36,19 +36,6 @@ static int mpi_size, mpi_rank; #define HS_DIM1 200 #define HS_DIM2 100 -const char * -h5_rmprefix(const char *filename) -{ - const char *ret_ptr; - - if ((ret_ptr = HDstrstr(filename, ":")) == NULL) - ret_ptr = filename; - else - ret_ptr++; - - return (ret_ptr); -} - #ifdef H5_HAVE_FILTER_SZIP /*------------------------------------------------------------------------- diff --git a/testpar/API/t_shapesame.c b/testpar/API/t_shapesame.c index 340e89ecd9f..2a029abbf2d 100644 --- a/testpar/API/t_shapesame.c +++ b/testpar/API/t_shapesame.c @@ -28,6 +28,10 @@ #include "hdf5.h" #include "testphdf5.h" +#ifndef PATH_MAX +#define PATH_MAX 512 +#endif + /* FILENAME and filenames must have the same number of names. * Use PARATESTFILE in general and use a separated filename only if the file * created in one test is accessed by a different test. @@ -3967,12 +3971,6 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type) * Main driver of the Parallel HDF5 tests */ -#include "testphdf5.h" - -#ifndef PATH_MAX -#define PATH_MAX 512 -#endif /* !PATH_MAX */ - /* global variables */ int dim0; int dim1; From 2b9516782bc607195b19718cc5c6d3c756642e5c Mon Sep 17 00:00:00 2001 From: Quincey Koziol Date: Fri, 5 May 2023 15:34:17 -0500 Subject: [PATCH 208/231] Fix mirror VFD test timeouts (#2915) * Close & re-open socket when looping for timeouts * Committing clang-format changes --------- Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> --- test/mirror_vfd.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/test/mirror_vfd.c b/test/mirror_vfd.c index b4f4c2c185e..489a9c03aa4 100644 --- a/test/mirror_vfd.c +++ b/test/mirror_vfd.c @@ -1268,7 +1268,6 @@ create_mirroring_split_fapl(const char *basename, struct mirrortest_filenames *n return ret_value; error: - HDfree(splitter_config); H5E_BEGIN_TRY { H5Pclose(splitter_config->wo_fapl_id); @@ -1276,6 +1275,7 @@ create_mirroring_split_fapl(const char *basename, struct mirrortest_filenames *n H5Pclose(ret_value); } H5E_END_TRY; + HDfree(splitter_config); return H5I_INVALID_HID; } /* end create_mirroring_split_fapl() */ @@ -2328,7 +2328,7 @@ static int confirm_server(struct mt_opts *opts) { char mybuf[16]; - int live_socket; + int live_socket = -1; struct sockaddr_in target_addr; unsigned attempt = 0; @@ -2349,9 +2349,24 @@ confirm_server(struct mt_opts *opts) HDprintf("ERROR connect() (%d)\n%s\n", errno, HDstrerror(errno)); return -1; } + + /* Close socket during sleep() */ + if (HDclose(live_socket) < 0) { + HDprintf("ERROR close() can't close socket\n"); + return -1; + } + live_socket = -1; + attempt++; HDsleep(1); HDprintf("attempt #%u: ERROR connect() (%d)\n%s\n", attempt, errno, HDstrerror(errno)); + + /* Re-open socket for retry */ + live_socket = HDsocket(AF_INET, SOCK_STREAM, 0); + if (live_socket < 0) { + HDprintf("ERROR socket()\n"); + return -1; + } } else break; From 51c38aa528a0db4f030814ee293a39802ca08cf5 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Fri, 5 May 2023 15:57:40 -0500 Subject: [PATCH 209/231] Fix CMake generator expression syntax (#2909) --- c++/src/CMakeLists.txt | 4 ++-- c++/test/CMakeLists.txt | 2 +- config/cmake/HDFMacros.cmake | 6 ++--- fortran/examples/CMakeLists.txt | 4 ++-- fortran/src/CMakeLists.txt | 4 ++-- fortran/test/CMakeLists.txt | 24 +++++++++---------- release_docs/RELEASE.txt | 13 ++++++++++ src/CMakeLists.txt | 22 ++++++++--------- test/API/CMakeLists.txt | 4 ++-- test/CMakeLists.txt | 42 ++++++++++++++++----------------- testpar/API/CMakeLists.txt | 4 ++-- testpar/CMakeLists.txt | 8 +++---- 12 files changed, 75 insertions(+), 62 deletions(-) diff --git a/c++/src/CMakeLists.txt b/c++/src/CMakeLists.txt index ab1baed3f80..1ba90ce8a23 100644 --- a/c++/src/CMakeLists.txt +++ b/c++/src/CMakeLists.txt @@ -85,7 +85,7 @@ if (BUILD_STATIC_LIBS) ) target_compile_options(${HDF5_CPP_LIB_TARGET} PRIVATE "${HDF5_CMAKE_CXX_FLAGS}") target_compile_definitions(${HDF5_CPP_LIB_TARGET} - PRIVATE $<$:MPICH_SKIP_MPICXX;MPICH_IGNORE_CXX_SEEK># Parallel/MPI, prevent spurious cpp/cxx warnings + PRIVATE "$<$:MPICH_SKIP_MPICXX;MPICH_IGNORE_CXX_SEEK>"# Parallel/MPI, prevent spurious cpp/cxx warnings ) TARGET_C_PROPERTIES (${HDF5_CPP_LIB_TARGET} STATIC) target_link_libraries (${HDF5_CPP_LIB_TARGET} PUBLIC ${HDF5_LIB_TARGET}) @@ -104,7 +104,7 @@ if (BUILD_SHARED_LIBS) target_compile_options(${HDF5_CPP_LIBSH_TARGET} PRIVATE "${HDF5_CMAKE_CXX_FLAGS}") target_compile_definitions(${HDF5_CPP_LIBSH_TARGET} PUBLIC "H5_BUILT_AS_DYNAMIC_LIB" - PRIVATE $<$:MPICH_SKIP_MPICXX;MPICH_IGNORE_CXX_SEEK># Parallel/MPI, prevent spurious cpp/cxx warnings + PRIVATE "$<$:MPICH_SKIP_MPICXX;MPICH_IGNORE_CXX_SEEK>"# Parallel/MPI, prevent spurious cpp/cxx warnings ) TARGET_C_PROPERTIES (${HDF5_CPP_LIBSH_TARGET} SHARED) target_link_libraries (${HDF5_CPP_LIBSH_TARGET} diff --git a/c++/test/CMakeLists.txt b/c++/test/CMakeLists.txt index 55aade27786..308bc3b10a0 100644 --- a/c++/test/CMakeLists.txt +++ b/c++/test/CMakeLists.txt @@ -41,7 +41,7 @@ add_executable (cpp_testhdf5 ${CPP_TEST_SOURCES} ${HDF5_CPP_TEST_SOURCE_DIR}/h5c target_include_directories (cpp_testhdf5 PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};${HDF5_TEST_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") target_compile_options(cpp_testhdf5 PRIVATE "${HDF5_CMAKE_CXX_FLAGS}") target_compile_definitions(cpp_testhdf5 - PRIVATE $<$:MPICH_SKIP_MPICXX;MPICH_IGNORE_CXX_SEEK># Parallel/MPI, prevent spurious cpp/cxx warnings + PRIVATE "$<$:MPICH_SKIP_MPICXX;MPICH_IGNORE_CXX_SEEK>"# Parallel/MPI, prevent spurious cpp/cxx warnings ) if (NOT BUILD_SHARED_LIBS) TARGET_C_PROPERTIES (cpp_testhdf5 STATIC) diff --git a/config/cmake/HDFMacros.cmake b/config/cmake/HDFMacros.cmake index e0e122031b6..be9ceafded7 100644 --- a/config/cmake/HDFMacros.cmake +++ b/config/cmake/HDFMacros.cmake @@ -77,7 +77,7 @@ macro (INSTALL_TARGET_PDB libtarget targetdestination targetcomponent) if (${target_type} MATCHES "SHARED") set (targetfilename $) else () - get_property (target_name TARGET ${libtarget} PROPERTY $,$>,OUTPUT_NAME_DEBUG,OUTPUT_NAME_RELWITHDEBINFO>) + get_property (target_name TARGET ${libtarget} PROPERTY "$,$>,OUTPUT_NAME_DEBUG,OUTPUT_NAME_RELWITHDEBINFO>") set (targetfilename $/${target_name}.pdb) endif () install ( @@ -213,8 +213,8 @@ endmacro () #------------------------------------------------------------------------------- macro (TARGET_C_PROPERTIES wintarget libtype) target_compile_options(${wintarget} PRIVATE - $<$:${WIN_COMPILE_FLAGS}> - $<$:${WIN_COMPILE_FLAGS}> + "$<$:${WIN_COMPILE_FLAGS}>" + "$<$:${WIN_COMPILE_FLAGS}>" ) if(MSVC) set_property(TARGET ${wintarget} APPEND PROPERTY LINK_FLAGS "${WIN_LINK_FLAGS}") diff --git a/fortran/examples/CMakeLists.txt b/fortran/examples/CMakeLists.txt index e7edf40bae5..3a16c23a0c1 100644 --- a/fortran/examples/CMakeLists.txt +++ b/fortran/examples/CMakeLists.txt @@ -137,7 +137,7 @@ if (H5_HAVE_PARALLEL AND MPI_Fortran_FOUND) PRIVATE ${HDF5_F90_LIB_TARGET} ${HDF5_LIB_TARGET} - $<$:${MPI_Fortran_LIBRARIES}> + "$<$:${MPI_Fortran_LIBRARIES}>" ) set_target_properties (f90_ex_ph5example PROPERTIES LINKER_LANGUAGE Fortran @@ -158,7 +158,7 @@ if (H5_HAVE_PARALLEL AND MPI_Fortran_FOUND) PRIVATE ${HDF5_F90_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} - $<$:${MPI_Fortran_LIBRARIES}> + "$<$:${MPI_Fortran_LIBRARIES}>" ) set_target_properties (f90_ex_ph5example PROPERTIES LINKER_LANGUAGE Fortran diff --git a/fortran/src/CMakeLists.txt b/fortran/src/CMakeLists.txt index 5cfdeadc3fd..fbe946843c1 100644 --- a/fortran/src/CMakeLists.txt +++ b/fortran/src/CMakeLists.txt @@ -339,7 +339,7 @@ if (BUILD_STATIC_LIBS) PUBLIC ${HDF5_F90_C_LIB_TARGET} PRIVATE ${LINK_Fortran_LIBS} - $<$:${MPI_Fortran_LIBRARIES}> + "$<$:${MPI_Fortran_LIBRARIES}>" ) # set_property(TARGET ${HDF5_F90_LIB_TARGET} APPEND PROPERTY LINK_FLAGS $<$:"-SUBSYSTEM:CONSOLE">) # set_property(TARGET ${HDF5_F90_LIB_TARGET} APPEND PROPERTY LINK_FLAGS $<$:${WIN_LINK_FLAGS}>) @@ -372,7 +372,7 @@ if (BUILD_SHARED_LIBS) ) target_link_libraries (${HDF5_F90_LIBSH_TARGET} PUBLIC ${HDF5_F90_C_LIBSH_TARGET} - PRIVATE ${LINK_Fortran_LIBS} $<$:${MPI_Fortran_LIBRARIES}> + PRIVATE ${LINK_Fortran_LIBS} "$<$:${MPI_Fortran_LIBRARIES}>" ) # set_property(TARGET ${HDF5_F90_LIBSH_TARGET} APPEND PROPERTY LINK_FLAGS $<$:"-SUBSYSTEM:CONSOLE">) # set_property(TARGET ${HDF5_F90_LIBSH_TARGET} APPEND PROPERTY LINK_FLAGS $<$:${WIN_LINK_FLAGS}>) diff --git a/fortran/test/CMakeLists.txt b/fortran/test/CMakeLists.txt index e8854274d4f..67c8b7568ef 100644 --- a/fortran/test/CMakeLists.txt +++ b/fortran/test/CMakeLists.txt @@ -224,7 +224,7 @@ if(MSVC) endif() if (NOT BUILD_SHARED_LIBS) target_include_directories (testhdf5_fortran PRIVATE "${CMAKE_Fortran_MODULE_DIRECTORY}/static;${HDF5_F90_BINARY_DIR};${HDF5_F90_BINARY_DIR}/static") - target_link_libraries (testhdf5_fortran PRIVATE ${HDF5_F90_TEST_LIB_TARGET} ${HDF5_F90_LIB_TARGET} ${HDF5_LIB_TARGET} $<$:ws2_32.lib>) + target_link_libraries (testhdf5_fortran PRIVATE ${HDF5_F90_TEST_LIB_TARGET} ${HDF5_F90_LIB_TARGET} ${HDF5_LIB_TARGET} "$<$:ws2_32.lib>") set_target_properties (testhdf5_fortran PROPERTIES LINKER_LANGUAGE Fortran FOLDER test/fortran @@ -233,7 +233,7 @@ if (NOT BUILD_SHARED_LIBS) add_dependencies (testhdf5_fortran ${HDF5_F90_TEST_LIB_TARGET}) else () target_include_directories (testhdf5_fortran PRIVATE "${CMAKE_Fortran_MODULE_DIRECTORY}/shared;${HDF5_F90_BINARY_DIR};${HDF5_F90_BINARY_DIR}/shared") - target_link_libraries (testhdf5_fortran PRIVATE ${HDF5_F90_TEST_LIBSH_TARGET} ${HDF5_F90_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} $<$:ws2_32.lib>) + target_link_libraries (testhdf5_fortran PRIVATE ${HDF5_F90_TEST_LIBSH_TARGET} ${HDF5_F90_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} "$<$:ws2_32.lib>") set_target_properties (testhdf5_fortran PROPERTIES LINKER_LANGUAGE Fortran FOLDER test/fortran @@ -263,7 +263,7 @@ if(MSVC) endif() if (NOT BUILD_SHARED_LIBS) target_include_directories (testhdf5_fortran_1_8 PRIVATE "${CMAKE_Fortran_MODULE_DIRECTORY}/static;${HDF5_F90_BINARY_DIR};${HDF5_F90_BINARY_DIR}/static") - target_link_libraries (testhdf5_fortran_1_8 PRIVATE ${HDF5_F90_TEST_LIB_TARGET} ${HDF5_F90_LIB_TARGET} ${HDF5_LIB_TARGET} $<$:ws2_32.lib>) + target_link_libraries (testhdf5_fortran_1_8 PRIVATE ${HDF5_F90_TEST_LIB_TARGET} ${HDF5_F90_LIB_TARGET} ${HDF5_LIB_TARGET} "$<$:ws2_32.lib>") set_target_properties (testhdf5_fortran_1_8 PROPERTIES LINKER_LANGUAGE Fortran FOLDER test/fortran @@ -272,7 +272,7 @@ if (NOT BUILD_SHARED_LIBS) add_dependencies (testhdf5_fortran_1_8 ${HDF5_F90_TEST_LIB_TARGET}) else () target_include_directories (testhdf5_fortran_1_8 PRIVATE "${CMAKE_Fortran_MODULE_DIRECTORY}/shared;${HDF5_F90_BINARY_DIR};${HDF5_F90_BINARY_DIR}/shared") - target_link_libraries (testhdf5_fortran_1_8 PRIVATE ${HDF5_F90_TEST_LIBSH_TARGET} ${HDF5_F90_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} $<$:ws2_32.lib>) + target_link_libraries (testhdf5_fortran_1_8 PRIVATE ${HDF5_F90_TEST_LIBSH_TARGET} ${HDF5_F90_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} "$<$:ws2_32.lib>") set_target_properties (testhdf5_fortran_1_8 PROPERTIES LINKER_LANGUAGE Fortran FOLDER test/fortran @@ -304,7 +304,7 @@ if(MSVC) endif() if (NOT BUILD_SHARED_LIBS) target_include_directories (fortranlib_test_F03 PRIVATE "${CMAKE_Fortran_MODULE_DIRECTORY}/static;${HDF5_F90_BINARY_DIR};${HDF5_F90_BINARY_DIR}/static") - target_link_libraries (fortranlib_test_F03 PRIVATE ${HDF5_F90_TEST_LIB_TARGET} ${HDF5_F90_LIB_TARGET} ${HDF5_LIB_TARGET} $<$:ws2_32.lib>) + target_link_libraries (fortranlib_test_F03 PRIVATE ${HDF5_F90_TEST_LIB_TARGET} ${HDF5_F90_LIB_TARGET} ${HDF5_LIB_TARGET} "$<$:ws2_32.lib>") set_target_properties (fortranlib_test_F03 PROPERTIES LINKER_LANGUAGE Fortran FOLDER test/fortran @@ -313,7 +313,7 @@ if (NOT BUILD_SHARED_LIBS) add_dependencies (fortranlib_test_F03 ${HDF5_F90_TEST_LIB_TARGET}) else () target_include_directories (fortranlib_test_F03 PRIVATE "${CMAKE_Fortran_MODULE_DIRECTORY}/shared;${HDF5_F90_BINARY_DIR};${HDF5_F90_BINARY_DIR}/shared") - target_link_libraries (fortranlib_test_F03 PRIVATE ${HDF5_F90_TEST_LIBSH_TARGET} ${HDF5_F90_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} $<$:ws2_32.lib>) + target_link_libraries (fortranlib_test_F03 PRIVATE ${HDF5_F90_TEST_LIBSH_TARGET} ${HDF5_F90_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} "$<$:ws2_32.lib>") set_target_properties (fortranlib_test_F03 PROPERTIES LINKER_LANGUAGE Fortran FOLDER test/fortran @@ -336,7 +336,7 @@ if(MSVC) endif() if (NOT BUILD_SHARED_LIBS) target_include_directories (fflush1 PRIVATE ${CMAKE_Fortran_MODULE_DIRECTORY}/static) - target_link_libraries (fflush1 PRIVATE ${HDF5_F90_LIB_TARGET} ${HDF5_F90_TEST_LIB_TARGET} ${HDF5_LIB_TARGET} $<$:ws2_32.lib>) + target_link_libraries (fflush1 PRIVATE ${HDF5_F90_LIB_TARGET} ${HDF5_F90_TEST_LIB_TARGET} ${HDF5_LIB_TARGET} "$<$:ws2_32.lib>") set_target_properties (fflush1 PROPERTIES LINKER_LANGUAGE Fortran FOLDER test/fortran @@ -345,7 +345,7 @@ if (NOT BUILD_SHARED_LIBS) add_dependencies (fflush1 ${HDF5_F90_TEST_LIB_TARGET}) else () target_include_directories (fflush1 PRIVATE ${CMAKE_Fortran_MODULE_DIRECTORY}/shared) - target_link_libraries (fflush1 PRIVATE ${HDF5_F90_LIBSH_TARGET} ${HDF5_F90_TEST_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} $<$:ws2_32.lib>) + target_link_libraries (fflush1 PRIVATE ${HDF5_F90_LIBSH_TARGET} ${HDF5_F90_TEST_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} "$<$:ws2_32.lib>") set_target_properties (fflush1 PROPERTIES LINKER_LANGUAGE Fortran FOLDER test/fortran @@ -368,7 +368,7 @@ if(MSVC) endif() if (NOT BUILD_SHARED_LIBS) target_include_directories (fflush2 PRIVATE ${CMAKE_Fortran_MODULE_DIRECTORY}/static) - target_link_libraries (fflush2 PRIVATE ${HDF5_F90_TEST_LIB_TARGET} ${HDF5_F90_LIB_TARGET} ${HDF5_LIB_TARGET} $<$:ws2_32.lib>) + target_link_libraries (fflush2 PRIVATE ${HDF5_F90_TEST_LIB_TARGET} ${HDF5_F90_LIB_TARGET} ${HDF5_LIB_TARGET} "$<$:ws2_32.lib>") set_target_properties (fflush2 PROPERTIES LINKER_LANGUAGE Fortran FOLDER test/fortran @@ -377,7 +377,7 @@ if (NOT BUILD_SHARED_LIBS) add_dependencies (fflush2 ${HDF5_F90_TEST_LIB_TARGET}) else () target_include_directories (fflush2 PRIVATE ${CMAKE_Fortran_MODULE_DIRECTORY}/shared) - target_link_libraries (fflush2 PRIVATE ${HDF5_F90_TEST_LIBSH_TARGET} ${HDF5_F90_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} $<$:ws2_32.lib>) + target_link_libraries (fflush2 PRIVATE ${HDF5_F90_TEST_LIBSH_TARGET} ${HDF5_F90_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} "$<$:ws2_32.lib>") set_target_properties (fflush2 PROPERTIES LINKER_LANGUAGE Fortran FOLDER test/fortran @@ -400,7 +400,7 @@ if(MSVC) endif() if (NOT BUILD_SHARED_LIBS) target_include_directories (vol_connector PRIVATE ${CMAKE_Fortran_MODULE_DIRECTORY}/static) - target_link_libraries (vol_connector PRIVATE ${HDF5_F90_LIB_TARGET} ${HDF5_F90_TEST_LIB_TARGET} ${HDF5_LIB_TARGET} $<$:ws2_32.lib>) + target_link_libraries (vol_connector PRIVATE ${HDF5_F90_LIB_TARGET} ${HDF5_F90_TEST_LIB_TARGET} ${HDF5_LIB_TARGET} "$<$:ws2_32.lib>") set_target_properties (vol_connector PROPERTIES LINKER_LANGUAGE Fortran FOLDER test/fortran @@ -409,7 +409,7 @@ if (NOT BUILD_SHARED_LIBS) add_dependencies (vol_connector ${HDF5_F90_TEST_LIB_TARGET}) else () target_include_directories (vol_connector PRIVATE ${CMAKE_Fortran_MODULE_DIRECTORY}/shared) - target_link_libraries (vol_connector PRIVATE ${HDF5_F90_LIBSH_TARGET} ${HDF5_F90_TEST_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} $<$:ws2_32.lib>) + target_link_libraries (vol_connector PRIVATE ${HDF5_F90_LIBSH_TARGET} ${HDF5_F90_TEST_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} "$<$:ws2_32.lib>") set_target_properties (vol_connector PROPERTIES LINKER_LANGUAGE Fortran FOLDER test/fortran diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 4b13d09be4d..8d3111eb480 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -414,6 +414,19 @@ Bug Fixes since HDF5-1.14.0 release Configuration ------------- + - Fixed syntax of generator expressions used by CMake + + Add quotes around the generator expression should allow CMake to + correctly parse the expression. Generator expressions are typically + parsed after command arguments. If a generator expression contains + spaces, new lines, semicolons or other characters that may be + interpreted as command argument separators, the whole expression + should be surrounded by quotes when passed to a command. Failure to + do so may result in the expression being split and it may no longer + be recognized as a generator expression. + + Fixes GitHub issue #2906 + - Fixed improper include of Subfiling VFD build directory With the release of the Subfiling Virtual File Driver feature, compiler diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index e002e4f98ae..4edcc98c662 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1238,18 +1238,18 @@ if (BUILD_STATIC_LIBS) ${HDF_EXTRA_C_FLAGS} ${HDF_EXTRA_FLAGS} PRIVATE - $<$:H5_DEBUG_API> # Enable tracing of the API - $<$:${HDF5_DEBUG_APIS}> - $<$:${HDF5_DEVELOPER_DEFS}> + "$<$:H5_DEBUG_API>" # Enable tracing of the API + "$<$:${HDF5_DEBUG_APIS}>" + "$<$:${HDF5_DEVELOPER_DEFS}>" ) TARGET_C_PROPERTIES (${HDF5_LIB_TARGET} STATIC) target_link_libraries (${HDF5_LIB_TARGET} PRIVATE ${LINK_LIBS} ${LINK_COMP_LIBS} - PUBLIC $<$>:${CMAKE_DL_LIBS}> "$<$:MPI::MPI_C>" + PUBLIC "$<$>:${CMAKE_DL_LIBS}>" "$<$:MPI::MPI_C>" ) if (NOT WIN32) target_link_libraries (${HDF5_LIB_TARGET} - PRIVATE $<$,$>:Threads::Threads> + PRIVATE "$<$,$>:Threads::Threads>" ) endif () set_global_variable (HDF5_LIBRARIES_TO_EXPORT ${HDF5_LIB_TARGET}) @@ -1280,16 +1280,16 @@ if (BUILD_SHARED_LIBS) ${HDF_EXTRA_C_FLAGS} ${HDF_EXTRA_FLAGS} PRIVATE - $<$:H5_HAVE_THREADSAFE> - $<$:H5_DEBUG_API> # Enable tracing of the API - $<$:${HDF5_DEBUG_APIS}> - $<$:${HDF5_DEVELOPER_DEFS}> + "$<$:H5_HAVE_THREADSAFE>" + "$<$:H5_DEBUG_API>" # Enable tracing of the API + "$<$:${HDF5_DEBUG_APIS}>" + "$<$:${HDF5_DEVELOPER_DEFS}>" ) TARGET_C_PROPERTIES (${HDF5_LIBSH_TARGET} SHARED) target_link_libraries (${HDF5_LIBSH_TARGET} PRIVATE ${LINK_LIBS} ${LINK_COMP_LIBS} - $<$,$>:Threads::Threads> - PUBLIC $<$>:${CMAKE_DL_LIBS}> "$<$:MPI::MPI_C>" + "$<$,$>:Threads::Threads>" + PUBLIC "$<$>:${CMAKE_DL_LIBS}>" "$<$:MPI::MPI_C>" ) set_global_variable (HDF5_LIBRARIES_TO_EXPORT "${HDF5_LIBRARIES_TO_EXPORT};${HDF5_LIBSH_TARGET}") H5_SET_LIB_OPTIONS (${HDF5_LIBSH_TARGET} ${HDF5_LIB_NAME} SHARED "LIB") diff --git a/test/API/CMakeLists.txt b/test/API/CMakeLists.txt index dd2bca224f2..606aa9cda5a 100644 --- a/test/API/CMakeLists.txt +++ b/test/API/CMakeLists.txt @@ -134,7 +134,7 @@ target_compile_options ( target_compile_definitions ( h5_api_test PRIVATE - $<$:${HDF5_DEVELOPER_DEFS}> + "$<$:${HDF5_DEVELOPER_DEFS}>" ) if (NOT BUILD_SHARED_LIBS) TARGET_C_PROPERTIES (h5_api_test STATIC) @@ -214,7 +214,7 @@ foreach (api_test_extra ${HDF5_API_TESTS_EXTRA}) target_compile_definitions ( h5_api_test_${api_test_extra} PRIVATE - $<$:${HDF5_DEVELOPER_DEFS}> + "$<$:${HDF5_DEVELOPER_DEFS}>" ) if (NOT BUILD_SHARED_LIBS) TARGET_C_PROPERTIES (h5_api_test_${api_test_extra} STATIC) diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index fe52cd33091..210ff22a424 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -37,12 +37,12 @@ if (BUILD_STATIC_LIBS) target_compile_definitions(${HDF5_TEST_LIB_TARGET} PRIVATE "H5_TEST_EXPRESS_LEVEL_DEFAULT=${H5_TEST_EXPRESS_LEVEL_DEFAULT}" - $<$:${HDF5_DEVELOPER_DEFS}> + "$<$:${HDF5_DEVELOPER_DEFS}>" ) TARGET_C_PROPERTIES (${HDF5_TEST_LIB_TARGET} STATIC) target_link_libraries (${HDF5_TEST_LIB_TARGET} PUBLIC ${LINK_LIBS} ${HDF5_LIB_TARGET} - PRIVATE $<$,$>:ws2_32.lib> + PRIVATE "$<$,$>:ws2_32.lib>" ) if (MINGW) target_link_libraries (${HDF5_TEST_LIB_TARGET} PRIVATE "wsock32.lib") @@ -63,12 +63,12 @@ if (BUILD_SHARED_LIBS) "H5_BUILT_AS_DYNAMIC_LIB" PRIVATE "H5_TEST_EXPRESS_LEVEL_DEFAULT=${H5_TEST_EXPRESS_LEVEL_DEFAULT}" - $<$:${HDF5_DEVELOPER_DEFS}> + "$<$:${HDF5_DEVELOPER_DEFS}>" ) TARGET_C_PROPERTIES (${HDF5_TEST_LIBSH_TARGET} SHARED) target_link_libraries (${HDF5_TEST_LIBSH_TARGET} PUBLIC ${LINK_LIBS} ${HDF5_LIBSH_TARGET} - PRIVATE $<$,$>:ws2_32.lib> + PRIVATE "$<$,$>:ws2_32.lib>" ) if (MINGW) target_link_libraries (${HDF5_TEST_LIBSH_TARGET} PRIVATE "wsock32.lib") @@ -401,7 +401,7 @@ macro (ADD_H5_EXE file) target_compile_options(${file} PRIVATE "${HDF5_CMAKE_C_FLAGS}") target_compile_definitions(${file} PRIVATE - $<$:${HDF5_DEVELOPER_DEFS}> + "$<$:${HDF5_DEVELOPER_DEFS}>" ) if (NOT BUILD_SHARED_LIBS) TARGET_C_PROPERTIES (${file} STATIC) @@ -445,7 +445,7 @@ add_executable (chunk_info ${HDF5_TEST_SOURCE_DIR}/chunk_info.c) target_compile_options(chunk_info PRIVATE "${HDF5_CMAKE_C_FLAGS}") target_compile_definitions(chunk_info PRIVATE - $<$:${HDF5_DEVELOPER_DEFS}> + "$<$:${HDF5_DEVELOPER_DEFS}>" ) target_include_directories (chunk_info PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};${HDF5_TEST_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") if (NOT BUILD_SHARED_LIBS) @@ -469,7 +469,7 @@ add_executable (direct_chunk ${HDF5_TEST_SOURCE_DIR}/direct_chunk.c) target_compile_options(direct_chunk PRIVATE "${HDF5_CMAKE_C_FLAGS}") target_compile_definitions(direct_chunk PRIVATE - $<$:${HDF5_DEVELOPER_DEFS}> + "$<$:${HDF5_DEVELOPER_DEFS}>" ) target_include_directories (direct_chunk PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};${HDF5_TEST_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") if (NOT BUILD_SHARED_LIBS) @@ -494,7 +494,7 @@ add_executable (testhdf5 ${testhdf5_SOURCES}) target_compile_options(testhdf5 PRIVATE "${HDF5_CMAKE_C_FLAGS}") target_compile_definitions(testhdf5 PRIVATE - $<$:${HDF5_DEVELOPER_DEFS}> + "$<$:${HDF5_DEVELOPER_DEFS}>" ) target_include_directories (testhdf5 PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") if (NOT BUILD_SHARED_LIBS) @@ -518,7 +518,7 @@ add_executable (cache_image ${cache_image_SOURCES}) target_compile_options(cache_image PRIVATE "${HDF5_CMAKE_C_FLAGS}") target_compile_definitions(cache_image PRIVATE - $<$:${HDF5_DEVELOPER_DEFS}> + "$<$:${HDF5_DEVELOPER_DEFS}>" ) target_include_directories (cache_image PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") if (NOT BUILD_SHARED_LIBS) @@ -542,7 +542,7 @@ add_executable (ttsafe ${ttsafe_SOURCES}) target_compile_options(ttsafe PRIVATE "${HDF5_CMAKE_C_FLAGS}") target_compile_definitions(ttsafe PRIVATE - $<$:${HDF5_DEVELOPER_DEFS}> + "$<$:${HDF5_DEVELOPER_DEFS}>" ) target_include_directories (ttsafe PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") if (NOT BUILD_SHARED_LIBS) @@ -550,12 +550,12 @@ if (NOT BUILD_SHARED_LIBS) target_link_libraries (ttsafe PRIVATE ${HDF5_TEST_LIB_TARGET}) if (NOT WIN32) target_link_libraries (ttsafe - PRIVATE $<$:Threads::Threads> + PRIVATE "$<$:Threads::Threads>" ) endif () else () TARGET_C_PROPERTIES (ttsafe SHARED) - target_link_libraries (ttsafe PRIVATE ${HDF5_TEST_LIBSH_TARGET} $<$:Threads::Threads>) + target_link_libraries (ttsafe PRIVATE ${HDF5_TEST_LIBSH_TARGET} "$<$:Threads::Threads>") endif () set_target_properties (ttsafe PROPERTIES FOLDER test) @@ -572,7 +572,7 @@ add_executable (thread_id ${HDF5_TEST_SOURCE_DIR}/thread_id.c) target_compile_options(thread_id PRIVATE "${HDF5_CMAKE_C_FLAGS}") target_compile_definitions(thread_id PRIVATE - $<$:${HDF5_DEVELOPER_DEFS}> + "$<$:${HDF5_DEVELOPER_DEFS}>" ) target_include_directories (thread_id PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") if (NOT BUILD_SHARED_LIBS) @@ -580,12 +580,12 @@ if (NOT BUILD_SHARED_LIBS) target_link_libraries (thread_id PRIVATE ${HDF5_TEST_LIB_TARGET}) if (NOT WIN32) target_link_libraries (thread_id - PRIVATE $<$:Threads::Threads> + PRIVATE "$<$:Threads::Threads>" ) endif () else () TARGET_C_PROPERTIES (thread_id SHARED) - target_link_libraries (thread_id PRIVATE ${HDF5_TEST_LIBSH_TARGET} $<$:Threads::Threads>) + target_link_libraries (thread_id PRIVATE ${HDF5_TEST_LIBSH_TARGET} "$<$:Threads::Threads>") endif () set_target_properties (thread_id PROPERTIES FOLDER test) @@ -682,7 +682,7 @@ macro (ADD_H5_VDS_EXE file) target_compile_options(${file} PRIVATE "${HDF5_CMAKE_C_FLAGS}") target_compile_definitions(${file} PRIVATE - $<$:${HDF5_DEVELOPER_DEFS}> + "$<$:${HDF5_DEVELOPER_DEFS}>" ) if (NOT BUILD_SHARED_LIBS) TARGET_C_PROPERTIES (${file} STATIC) @@ -712,7 +712,7 @@ add_executable (accum_swmr_reader ${HDF5_TEST_SOURCE_DIR}/accum_swmr_reader.c) target_compile_options(accum_swmr_reader PRIVATE "${HDF5_CMAKE_C_FLAGS}") target_compile_definitions(accum_swmr_reader PRIVATE - $<$:${HDF5_DEVELOPER_DEFS}> + "$<$:${HDF5_DEVELOPER_DEFS}>" ) target_include_directories (accum_swmr_reader PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") if (NOT BUILD_SHARED_LIBS) @@ -786,7 +786,7 @@ add_executable (use_append_chunk ${use_append_chunk_SOURCES}) target_compile_options(use_append_chunk PRIVATE "${HDF5_CMAKE_C_FLAGS}") target_compile_definitions(use_append_chunk PRIVATE - $<$:${HDF5_DEVELOPER_DEFS}> + "$<$:${HDF5_DEVELOPER_DEFS}>" ) target_include_directories (use_append_chunk PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") if (NOT BUILD_SHARED_LIBS) @@ -811,7 +811,7 @@ if (HDF5_BUILD_UTILS) # requires mirror server target_compile_options(use_append_chunk_mirror PRIVATE "${HDF5_CMAKE_C_FLAGS}") target_compile_definitions(use_append_chunk_mirror PRIVATE - $<$:${HDF5_DEVELOPER_DEFS}> + "$<$:${HDF5_DEVELOPER_DEFS}>" ) target_include_directories (use_append_chunk_mirror PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") if (NOT BUILD_SHARED_LIBS) @@ -836,7 +836,7 @@ add_executable (use_append_mchunks ${use_append_mchunks_SOURCES}) target_compile_options(use_append_mchunks PRIVATE "${HDF5_CMAKE_C_FLAGS}") target_compile_definitions(use_append_mchunks PRIVATE - $<$:${HDF5_DEVELOPER_DEFS}> + "$<$:${HDF5_DEVELOPER_DEFS}>" ) target_include_directories (use_append_mchunks PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") if (NOT BUILD_SHARED_LIBS) @@ -860,7 +860,7 @@ add_executable (use_disable_mdc_flushes ${use_disable_mdc_flushes_SOURCES}) target_compile_options(use_disable_mdc_flushes PRIVATE "${HDF5_CMAKE_C_FLAGS}") target_compile_definitions(use_disable_mdc_flushes PRIVATE - $<$:${HDF5_DEVELOPER_DEFS}> + "$<$:${HDF5_DEVELOPER_DEFS}>" ) target_include_directories (use_disable_mdc_flushes PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") if (NOT BUILD_SHARED_LIBS) diff --git a/testpar/API/CMakeLists.txt b/testpar/API/CMakeLists.txt index e907078783b..f893f0c5855 100644 --- a/testpar/API/CMakeLists.txt +++ b/testpar/API/CMakeLists.txt @@ -94,7 +94,7 @@ target_compile_options ( target_compile_definitions ( h5_api_test_parallel PRIVATE - $<$:${HDF5_DEVELOPER_DEFS}> + "$<$:${HDF5_DEVELOPER_DEFS}>" ) if (NOT BUILD_SHARED_LIBS) TARGET_C_PROPERTIES (h5_api_test_parallel STATIC) @@ -173,7 +173,7 @@ foreach (api_test_extra ${HDF5_API_PAR_TESTS_EXTRA}) target_compile_definitions ( h5_api_test_parallel_${api_test_extra} PRIVATE - $<$:${HDF5_DEVELOPER_DEFS}> + "$<$:${HDF5_DEVELOPER_DEFS}>" ) if (NOT BUILD_SHARED_LIBS) TARGET_C_PROPERTIES (h5_api_test_parallel_${api_test_extra} STATIC) diff --git a/testpar/CMakeLists.txt b/testpar/CMakeLists.txt index 6bb5fa6f77e..7894cffa1da 100644 --- a/testpar/CMakeLists.txt +++ b/testpar/CMakeLists.txt @@ -26,7 +26,7 @@ add_executable (testphdf5 ${testphdf5_SOURCES}) target_compile_options(testphdf5 PRIVATE "${HDF5_CMAKE_C_FLAGS}") target_compile_definitions(testphdf5 PRIVATE - $<$:${HDF5_DEVELOPER_DEFS}> + "$<$:${HDF5_DEVELOPER_DEFS}>" ) target_include_directories (testphdf5 PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>" @@ -56,7 +56,7 @@ macro (ADD_H5P_EXE file) target_compile_options(${file} PRIVATE "${HDF5_CMAKE_C_FLAGS}") target_compile_definitions(${file} PRIVATE - $<$:${HDF5_DEVELOPER_DEFS}> + "$<$:${HDF5_DEVELOPER_DEFS}>" ) target_include_directories (${file} PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>" @@ -65,13 +65,13 @@ macro (ADD_H5P_EXE file) TARGET_C_PROPERTIES (${file} STATIC) target_link_libraries (${file} PRIVATE ${HDF5_TEST_LIB_TARGET} ${HDF5_LIB_TARGET} "$<$:MPI::MPI_C>" - $<$,$>:ws2_32.lib> + "$<$,$>:ws2_32.lib>" ) else () TARGET_C_PROPERTIES (${file} SHARED) target_link_libraries (${file} PRIVATE ${HDF5_TEST_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} "$<$:MPI::MPI_C>" - $<$,$>:ws2_32.lib> + "$<$,$>:ws2_32.lib>" ) endif () set_target_properties (${file} PROPERTIES FOLDER test/par) From 1326cba7c1fabd74b51a6a5566dca7e59da3b272 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Sat, 6 May 2023 15:35:55 -0700 Subject: [PATCH 210/231] Add Doxygen for H5S_ALL, _PLIST, and _BLOCK (#2921) Fixes #688 --- src/H5Spublic.h | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/src/H5Spublic.h b/src/H5Spublic.h index bd5a82cc627..d177e161778 100644 --- a/src/H5Spublic.h +++ b/src/H5Spublic.h @@ -21,9 +21,30 @@ #include "H5Ipublic.h" /* Define special dataspaces for dataset I/O operations */ -#define H5S_ALL 0 /* (hid_t) */ -#define H5S_BLOCK 1 /* (hid_t) */ -#define H5S_PLIST 2 /* (hid_t) */ + +/** + * Used with @ref H5Dread and @ref H5Dwrite to indicate that the entire + * dataspace will be selected. In the case of a file dataspace, this means + * that the entire file dataspace, as defined by the dataset's dimensions, + * will be selected. In the case of a memory dataspace, this means that + * the specified file dataspace will also be used for the memory dataspace. + * Used in place of a file or memory dataspace @ref hid_t value. + */ +#define H5S_ALL 0 + +/** + * Indicates that the buffer provided in a call to @ref H5Dread or @ref H5Dwrite + * is a single contiguous block of memory, with the same number of elements + * as the file dataspace. Used in place of a memory dataspace @ref hid_t value. + */ +#define H5S_BLOCK 1 + +/** + * Used with @ref H5Dread and @ref H5Dwrite to indicate that the file dataspace + * selection was set via @ref H5Pset_dataset_io_hyperslab_selection calls. + * Used in place of a file dataspace @ref hid_t value. + */ +#define H5S_PLIST 2 #define H5S_UNLIMITED HSIZE_UNDEF /**< Value for 'unlimited' dimensions */ From 6257939ed2dbd9f9887775e57fb6da0e3fb0b173 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Sat, 6 May 2023 15:36:14 -0700 Subject: [PATCH 211/231] Add Doxygen for H5ES_NONE _WAIT_NONE/FOREVER (#2922) --- src/H5ESpublic.h | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/src/H5ESpublic.h b/src/H5ESpublic.h index f2c7cb518cb..b66ef21d4d2 100644 --- a/src/H5ESpublic.h +++ b/src/H5ESpublic.h @@ -18,23 +18,26 @@ #define H5ESpublic_H /* Public headers needed by this file */ -#include "H5public.h" /* Generic Functions */ +#include "H5public.h" /*****************/ /* Public Macros */ /*****************/ -/* Default value for "no event set" / synchronous execution */ -#define H5ES_NONE 0 /* (hid_t) */ +/** + * Default value for "no event set" / synchronous execution. Used in + * place of a @ref hid_t identifier. + */ +#define H5ES_NONE 0 /* Special "wait" timeout values */ -#define H5ES_WAIT_FOREVER (UINT64_MAX) /* Wait until all operations complete */ -#define H5ES_WAIT_NONE \ - (0) /* Don't wait for operations to complete, \ - * just check their status. \ - * (this allows H5ESwait to behave \ - * like a 'test' operation) \ - */ +#define H5ES_WAIT_FOREVER (UINT64_MAX) /**< Wait until all operations complete */ + +/** + * Don't wait for operations to complete, just check their status. + * (This allows @ref H5ESwait to behave like a 'test' operation) + */ +#define H5ES_WAIT_NONE (0) /*******************/ /* Public Typedefs */ From c84422c7035cac6d2484d0114093467fa4b92832 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Sat, 6 May 2023 18:12:32 -0700 Subject: [PATCH 212/231] Public header cleanup (#2923) Removes unnecessary headers and adds missing headers, ensuring that headers can be included independently and in any order. Fixes #2789 --- src/H5ACpublic.h | 9 +++------ src/H5Apublic.h | 8 ++++---- src/H5Cpublic.h | 9 +++------ src/H5Dpublic.h | 7 ++----- src/H5ESpublic.h | 4 ++-- src/H5Epublic.h | 7 +++---- src/H5FDpublic.h | 6 +++--- src/H5Fpublic.h | 7 +++---- src/H5Gpublic.h | 14 ++++---------- src/H5Ipublic.h | 3 +-- src/H5Lpublic.h | 11 ++++------- src/H5MMpublic.h | 3 +-- src/H5Mpublic.h | 8 ++++---- src/H5Opublic.h | 10 +++------- src/H5PLpublic.h | 3 +-- src/H5Ppublic.h | 17 +++++++---------- src/H5Rpublic.h | 8 ++++---- src/H5Spublic.h | 5 ++--- src/H5Tpublic.h | 5 ++--- src/H5VLpublic.h | 5 ++--- src/H5Zpublic.h | 3 +-- 21 files changed, 59 insertions(+), 93 deletions(-) diff --git a/src/H5ACpublic.h b/src/H5ACpublic.h index 5dc65c7a191..532cc80ed15 100644 --- a/src/H5ACpublic.h +++ b/src/H5ACpublic.h @@ -13,19 +13,16 @@ /*------------------------------------------------------------------------- * * Created: H5ACpublic.h - * Jul 10 1997 - * Robb Matzke * - * Purpose: Public include file for cache functions. + * Purpose: Public include file for cache functions * *------------------------------------------------------------------------- */ #ifndef H5ACpublic_H #define H5ACpublic_H -/* Public headers needed by this file */ -#include "H5public.h" -#include "H5Cpublic.h" +#include "H5public.h" /* Generic Functions */ +#include "H5Cpublic.h" /* Cache */ /**************************************************************************** * diff --git a/src/H5Apublic.h b/src/H5Apublic.h index 92df94a51ed..de6851d5a02 100644 --- a/src/H5Apublic.h +++ b/src/H5Apublic.h @@ -16,10 +16,10 @@ #ifndef H5Apublic_H #define H5Apublic_H -/* Public headers needed by this file */ -#include "H5Ipublic.h" /* IDs */ -#include "H5Opublic.h" /* Object Headers */ -#include "H5Tpublic.h" /* Datatypes */ +#include "H5public.h" /* Generic Functions */ +#include "H5Ipublic.h" /* Identifiers */ +#include "H5Opublic.h" /* Object Headers */ +#include "H5Tpublic.h" /* Datatypes */ //! /** diff --git a/src/H5Cpublic.h b/src/H5Cpublic.h index 501a99b8a2f..69c86cdef9b 100644 --- a/src/H5Cpublic.h +++ b/src/H5Cpublic.h @@ -12,19 +12,16 @@ /*------------------------------------------------------------------------- * - * Created: H5Cpublic.h - * June 4, 2005 - * John Mainzer + * Created: H5Cpublic.h * - * Purpose: Public include file for cache functions. + * Purpose: Public header file for cache functions * *------------------------------------------------------------------------- */ #ifndef H5Cpublic_H #define H5Cpublic_H -/* Public headers needed by this file */ -#include "H5public.h" +#include "H5public.h" /* Generic Functions */ enum H5C_cache_incr_mode { H5C_incr__off, diff --git a/src/H5Dpublic.h b/src/H5Dpublic.h index 4315c7b9a5a..0dbd6493cc8 100644 --- a/src/H5Dpublic.h +++ b/src/H5Dpublic.h @@ -16,11 +16,8 @@ #ifndef H5Dpublic_H #define H5Dpublic_H -/* System headers needed by this file */ - -/* Public headers needed by this file */ -#include "H5public.h" -#include "H5Ipublic.h" +#include "H5public.h" /* Generic Functions */ +#include "H5Ipublic.h" /* Identifiers */ /*****************/ /* Public Macros */ diff --git a/src/H5ESpublic.h b/src/H5ESpublic.h index b66ef21d4d2..b5f2af48f76 100644 --- a/src/H5ESpublic.h +++ b/src/H5ESpublic.h @@ -17,8 +17,8 @@ #ifndef H5ESpublic_H #define H5ESpublic_H -/* Public headers needed by this file */ -#include "H5public.h" +#include "H5public.h" /* Generic Functions */ +#include "H5Ipublic.h" /* Identifiers */ /*****************/ /* Public Macros */ diff --git a/src/H5Epublic.h b/src/H5Epublic.h index c02049a6e3c..50ae941ea2b 100644 --- a/src/H5Epublic.h +++ b/src/H5Epublic.h @@ -16,11 +16,10 @@ #ifndef H5Epublic_H #define H5Epublic_H -#include /*FILE arg of H5Eprint() */ +#include /* FILE arg of H5Eprint() */ -/* Public headers needed by this file */ -#include "H5public.h" -#include "H5Ipublic.h" +#include "H5public.h" /* Generic Functions */ +#include "H5Ipublic.h" /* Identifiers */ /* Value for the default error stack */ #define H5E_DEFAULT 0 /* (hid_t) */ diff --git a/src/H5FDpublic.h b/src/H5FDpublic.h index 891b3485bfd..2a131ddaf59 100644 --- a/src/H5FDpublic.h +++ b/src/H5FDpublic.h @@ -13,9 +13,9 @@ #ifndef H5FDpublic_H #define H5FDpublic_H -/* Public headers needed by this file */ -#include "H5public.h" /* Generic Functions */ -#include "H5Fpublic.h" /* Files */ +#include "H5public.h" /* Generic Functions */ +#include "H5Fpublic.h" /* Files */ +#include "H5Ipublic.h" /* Identifiers */ /*****************/ /* Public Macros */ diff --git a/src/H5Fpublic.h b/src/H5Fpublic.h index 59f8dec268a..ffcde3c833e 100644 --- a/src/H5Fpublic.h +++ b/src/H5Fpublic.h @@ -16,10 +16,9 @@ #ifndef H5Fpublic_H #define H5Fpublic_H -/* Public header files needed by this file */ -#include "H5public.h" -#include "H5ACpublic.h" -#include "H5Ipublic.h" +#include "H5public.h" /* Generic Functions */ +#include "H5ACpublic.h" /* Metadata Cache */ +#include "H5Ipublic.h" /* Identifiers */ /* When this header is included from a private header, don't make calls to H5check() */ #undef H5CHECK diff --git a/src/H5Gpublic.h b/src/H5Gpublic.h index 0e0a58b1986..2609fb061c9 100644 --- a/src/H5Gpublic.h +++ b/src/H5Gpublic.h @@ -13,8 +13,6 @@ /*------------------------------------------------------------------------- * * Created: H5Gpublic.h - * Jul 11 1997 - * Robb Matzke * * Purpose: Public declarations for the H5G package * @@ -23,14 +21,10 @@ #ifndef H5Gpublic_H #define H5Gpublic_H -/* System headers needed by this file */ -#include - -/* Public headers needed by this file */ -#include "H5public.h" /* Generic Functions */ -#include "H5Lpublic.h" /* Links */ -#include "H5Opublic.h" /* Object headers */ -#include "H5Tpublic.h" /* Datatypes */ +#include "H5public.h" /* Generic Functions */ +#include "H5Ipublic.h" /* Identifiers */ +#include "H5Lpublic.h" /* Links */ +#include "H5Opublic.h" /* Object Headers */ /*****************/ /* Public Macros */ diff --git a/src/H5Ipublic.h b/src/H5Ipublic.h index 69b2450644e..5f8fc280280 100644 --- a/src/H5Ipublic.h +++ b/src/H5Ipublic.h @@ -17,8 +17,7 @@ #ifndef H5Ipublic_H #define H5Ipublic_H -/* Public headers needed by this file */ -#include "H5public.h" +#include "H5public.h" /* Generic Functions */ /** * Library type values. diff --git a/src/H5Lpublic.h b/src/H5Lpublic.h index 653bf2738cb..fb46ad8208f 100644 --- a/src/H5Lpublic.h +++ b/src/H5Lpublic.h @@ -13,8 +13,6 @@ /*------------------------------------------------------------------------- * * Created: H5Lpublic.h - * Dec 1 2005 - * James Laird * * Purpose: Public declarations for the H5L package (links) * @@ -23,11 +21,10 @@ #ifndef H5Lpublic_H #define H5Lpublic_H -/* Public headers needed by this file */ -#include "H5public.h" /* Generic Functions */ -#include "H5Ipublic.h" /* IDs */ -#include "H5Opublic.h" /* Object Headers */ -#include "H5Tpublic.h" /* Datatypes */ +#include "H5public.h" /* Generic Functions */ +#include "H5Ipublic.h" /* Identifiers */ +#include "H5Opublic.h" /* Object Headers */ +#include "H5Tpublic.h" /* Datatypes */ /*****************/ /* Public Macros */ diff --git a/src/H5MMpublic.h b/src/H5MMpublic.h index 778c6e389e3..62172c9ea09 100644 --- a/src/H5MMpublic.h +++ b/src/H5MMpublic.h @@ -22,8 +22,7 @@ #ifndef H5MMpublic_H #define H5MMpublic_H -/* Public headers needed by this file */ -#include "H5public.h" +#include "H5public.h" /* Generic Functions */ /* These typedefs are currently used for VL datatype allocation/freeing */ //! diff --git a/src/H5Mpublic.h b/src/H5Mpublic.h index f5874e8ea78..af6aa49d4ee 100644 --- a/src/H5Mpublic.h +++ b/src/H5Mpublic.h @@ -19,11 +19,11 @@ #ifndef H5Mpublic_H #define H5Mpublic_H -/* System headers needed by this file */ +#include "H5public.h" /* Generic Functions */ +#include "H5Ipublic.h" /* Identifiers */ +#include "H5VLpublic.h" /* Virtual Object Layer */ -/* Public headers needed by this file */ -#include "H5public.h" -#include "H5Ipublic.h" +/* Exposes VOL connector types, so it needs the connector header */ #include "H5VLconnector.h" /*****************/ diff --git a/src/H5Opublic.h b/src/H5Opublic.h index 6fba5085948..d880a66730c 100644 --- a/src/H5Opublic.h +++ b/src/H5Opublic.h @@ -13,21 +13,17 @@ /*------------------------------------------------------------------------- * * Created: H5Opublic.h - * Aug 5 1997 - * Robb Matzke * * Purpose: Public declarations for the H5O (object header) - * package. + * package * *------------------------------------------------------------------------- */ #ifndef H5Opublic_H #define H5Opublic_H -/* Public headers needed by this file */ -#include "H5public.h" /* Generic Functions */ -#include "H5Ipublic.h" /* IDs */ -#include "H5Lpublic.h" /* Links */ +#include "H5public.h" /* Generic Functions */ +#include "H5Ipublic.h" /* Identifiers */ /*****************/ /* Public Macros */ diff --git a/src/H5PLpublic.h b/src/H5PLpublic.h index c53053b1538..ca996977485 100644 --- a/src/H5PLpublic.h +++ b/src/H5PLpublic.h @@ -17,8 +17,7 @@ #ifndef H5PLpublic_H #define H5PLpublic_H -/* Public headers needed by this file */ -#include "H5public.h" /* Generic Functions */ +#include "H5public.h" /* Generic Functions */ /*******************/ /* Public Typedefs */ diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h index a08119d5d69..2817551c46e 100644 --- a/src/H5Ppublic.h +++ b/src/H5Ppublic.h @@ -17,21 +17,18 @@ #ifndef H5Ppublic_H #define H5Ppublic_H -/* System headers needed by this file */ - -/* Public headers needed by this file */ -#include "H5public.h" -#include "H5ACpublic.h" /* Metadata cache */ +#include "H5public.h" /* Generic Functions */ +#include "H5ACpublic.h" /* Metadata Cache */ #include "H5Dpublic.h" /* Datasets */ #include "H5Fpublic.h" /* Files */ -#include "H5FDpublic.h" /* File drivers */ -#include "H5Ipublic.h" /* ID management */ +#include "H5FDpublic.h" /* (Virtual) File Drivers */ +#include "H5Ipublic.h" /* Identifiers */ #include "H5Lpublic.h" /* Links */ -#include "H5MMpublic.h" /* Memory management */ -#include "H5Opublic.h" /* Object headers */ +#include "H5MMpublic.h" /* Memory Management */ +#include "H5Opublic.h" /* Object Headers */ #include "H5Spublic.h" /* Dataspaces */ #include "H5Tpublic.h" /* Datatypes */ -#include "H5Zpublic.h" /* Data filters */ +#include "H5Zpublic.h" /* Data Filters */ /*****************/ /* Public Macros */ diff --git a/src/H5Rpublic.h b/src/H5Rpublic.h index dfeffda1600..1e447247390 100644 --- a/src/H5Rpublic.h +++ b/src/H5Rpublic.h @@ -16,10 +16,10 @@ #ifndef H5Rpublic_H #define H5Rpublic_H -/* Public headers needed by this file */ -#include "H5public.h" -#include "H5Gpublic.h" -#include "H5Ipublic.h" +#include "H5public.h" /* Generic Functions */ +#include "H5Gpublic.h" /* Groups */ +#include "H5Ipublic.h" /* Identifiers */ +#include "H5Opublic.h" /* Object Headers */ /*****************/ /* Public Macros */ diff --git a/src/H5Spublic.h b/src/H5Spublic.h index d177e161778..c6179f3a8d0 100644 --- a/src/H5Spublic.h +++ b/src/H5Spublic.h @@ -16,9 +16,8 @@ #ifndef H5Spublic_H #define H5Spublic_H -/* Public headers needed by this file */ -#include "H5public.h" -#include "H5Ipublic.h" +#include "H5public.h" /* Generic Functions */ +#include "H5Ipublic.h" /* Identifiers */ /* Define special dataspaces for dataset I/O operations */ diff --git a/src/H5Tpublic.h b/src/H5Tpublic.h index 92ec134efa6..c28266acdb9 100644 --- a/src/H5Tpublic.h +++ b/src/H5Tpublic.h @@ -16,9 +16,8 @@ #ifndef H5Tpublic_H #define H5Tpublic_H -/* Public headers needed by this file */ -#include "H5public.h" -#include "H5Ipublic.h" +#include "H5public.h" /* Generic Functions */ +#include "H5Ipublic.h" /* Identifiers */ #define HOFFSET(S, M) (offsetof(S, M)) diff --git a/src/H5VLpublic.h b/src/H5VLpublic.h index 09b31afc101..25566fc3591 100644 --- a/src/H5VLpublic.h +++ b/src/H5VLpublic.h @@ -17,9 +17,8 @@ #ifndef H5VLpublic_H #define H5VLpublic_H -/* Public headers needed by this file */ -#include "H5public.h" /* Generic Functions */ -#include "H5Ipublic.h" /* IDs */ +#include "H5public.h" /* Generic Functions */ +#include "H5Ipublic.h" /* Identifiers */ /*****************/ /* Public Macros */ diff --git a/src/H5Zpublic.h b/src/H5Zpublic.h index 8a059870c99..6d9b9914228 100644 --- a/src/H5Zpublic.h +++ b/src/H5Zpublic.h @@ -13,8 +13,7 @@ #ifndef H5Zpublic_H #define H5Zpublic_H -/* Public headers needed by this file */ -#include "H5public.h" +#include "H5public.h" /* Generic Functions */ /** * \brief Filter identifiers From 42d9246eab2800d81e2cedee4fd9599c5715fb18 Mon Sep 17 00:00:00 2001 From: Quincey Koziol Date: Mon, 8 May 2023 13:12:41 -0500 Subject: [PATCH 213/231] Reduce code duplication in macros (#2914) * Reduce code duplication in macros Signed-off-by: Quincey Koziol * "insert" -> "remove" in error string Signed-off-by: Quincey Koziol * Remove unused macro also Signed-off-by: Quincey Koziol --------- Signed-off-by: Quincey Koziol Signed-off-by: Quincey Koziol --- src/H5Cpkg.h | 665 ++++++++++----------------------------------------- 1 file changed, 126 insertions(+), 539 deletions(-) diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index 7bd7087da87..cfc7bcfc3f3 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -13,16 +13,9 @@ /* * Programmer: John Mainzer -- 10/12/04 * - * Purpose: This file contains declarations which are normally visible - * only within the H5C package. - * - * Source files outside the H5C package should include - * H5Cprivate.h instead. - * - * The one exception to this rule is test/cache.c. The test - * code is easier to write if it can look at the cache's - * internal data structures. Indeed, this is the main - * reason why this file was created. + * Purpose: This file contains declarations which are visible only within + * the H5C package. Source files outside the H5C package should + * include H5Cprivate.h instead. */ /* clang-format off */ @@ -82,424 +75,151 @@ #ifdef H5C_DO_SANITY_CHECKS -#define H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ -if ( ( (head_ptr) == NULL ) || \ - ( (tail_ptr) == NULL ) || \ - ( (entry_ptr) == NULL ) || \ - ( (len) <= 0 ) || \ - ( (list_size) < (entry_ptr)->size ) || \ - ( ( (entry_ptr)->prev == NULL ) && ( (head_ptr) != (entry_ptr) ) ) || \ - ( ( (entry_ptr)->next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \ - ( ( (len) == 1 ) && \ - ( ! ( ( (head_ptr) == (entry_ptr) ) && \ - ( (tail_ptr) == (entry_ptr) ) && \ - ( (entry_ptr)->next == NULL ) && \ - ( (entry_ptr)->prev == NULL ) && \ - ( (list_size) == (entry_ptr)->size ) \ - ) \ - ) \ - ) \ +#define H5C__GEN_DLL_PRE_REMOVE_SC(entry_ptr, list_next, list_prev, head_ptr, tail_ptr, len, list_size, fail_val) \ +if (((head_ptr) == NULL) || ((tail_ptr) == NULL) || \ + ((entry_ptr) == NULL) || ((len) <= 0) || \ + ((list_size) < (entry_ptr)->size) || \ + ((entry_ptr)->list_prev == NULL && (head_ptr) != (entry_ptr)) || \ + ((entry_ptr)->list_next == NULL && (tail_ptr) != (entry_ptr)) || \ + ((len) == 1 && \ + !((head_ptr) == (entry_ptr) && (tail_ptr) == (entry_ptr) && \ + (entry_ptr)->list_next == NULL && (entry_ptr)->list_prev == NULL && \ + (list_size) == (entry_ptr)->size \ + ) \ + ) \ ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "DLL pre remove SC failed") \ -} - -#define H5C__DLL_SC(head_ptr, tail_ptr, len, list_size, fail_val) \ -if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ - ( (head_ptr) != (tail_ptr) ) \ - ) || \ - ( (len) < 0 ) || \ - ( (list_size) < 0 ) || \ - ( ( (len) == 1 ) && \ - ( ( (head_ptr) != (tail_ptr) ) || \ - ( (head_ptr) == NULL ) || ( (head_ptr)->size != (list_size) ) \ - ) \ - ) || \ - ( ( (len) >= 1 ) && \ - ( ( (head_ptr) == NULL ) || ( (head_ptr)->prev != NULL ) || \ - ( (tail_ptr) == NULL ) || ( (tail_ptr)->next != NULL ) \ - ) \ - ) \ - ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "DLL sanity check failed") \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "DLL pre remove SC failed") \ } -#define H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ -if ( ( (entry_ptr) == NULL ) || \ - ( (entry_ptr)->next != NULL ) || \ - ( (entry_ptr)->prev != NULL ) || \ - ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ - ( (head_ptr) != (tail_ptr) ) \ - ) || \ - ( ( (len) == 1 ) && \ - ( ( (head_ptr) != (tail_ptr) ) || \ - ( (head_ptr) == NULL ) || ( (head_ptr)->size != (list_size) ) \ - ) \ - ) || \ - ( ( (len) >= 1 ) && \ - ( ( (head_ptr) == NULL ) || ( (head_ptr)->prev != NULL ) || \ - ( (tail_ptr) == NULL ) || ( (tail_ptr)->next != NULL ) \ +#define H5C__GEN_DLL_PRE_INSERT_SC(entry_ptr, list_next, list_prev, head_ptr, tail_ptr, len, list_size, fail_val) \ +if ((entry_ptr) == NULL || (entry_ptr)->list_next != NULL || (entry_ptr)->list_prev != NULL || \ + (((head_ptr) == NULL || (tail_ptr) == NULL) && (head_ptr) != (tail_ptr)) || \ + ( (len) == 0 && \ + ((list_size) > 0 || \ + (head_ptr) != NULL || (tail_ptr) != NULL \ ) \ - ) \ - ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "DLL pre insert SC failed") \ + ) || \ + ((len) == 1 && \ + ((head_ptr) != (tail_ptr) || \ + (head_ptr) == NULL || (head_ptr)->size != (list_size) \ + ) \ + ) || \ + ((len) >= 1 && \ + ((head_ptr) == NULL || (head_ptr)->list_prev != NULL || \ + (tail_ptr) == NULL || (tail_ptr)->list_next != NULL \ + ) \ + ) \ + ) { \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "DLL pre insert SC failed") \ } -#define H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) \ -if ( ( (dll_len) <= 0 ) || \ - ( (dll_size) <= 0 ) || \ - ( (old_size) <= 0 ) || \ - ( (old_size) > (dll_size) ) || \ - ( (new_size) <= 0 ) || \ - ( ( (dll_len) == 1 ) && ( (old_size) != (dll_size) ) ) ) { \ +#define H5C__GEN_DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) \ +if ((dll_len) <= 0 || (dll_size) <= 0 || (old_size) <= 0 || \ + (old_size) > (dll_size) || (new_size) <= 0 || \ + ((dll_len) == 1 && (old_size) != (dll_size)) \ + ) { \ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "DLL pre size update SC failed") \ } -#define H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) \ -if ( ( (new_size) > (dll_size) ) || \ - ( ( (dll_len) == 1 ) && ( (new_size) != (dll_size) ) ) ) { \ +#define H5C__GEN_DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) \ +if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size)) \ + ) { \ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "DLL post size update SC failed") \ } - -#else /* H5C_DO_SANITY_CHECKS */ - -#define H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) -#define H5C__DLL_SC(head_ptr, tail_ptr, len, list_size, fail_val) -#define H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) -#define H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) -#define H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) - -#endif /* H5C_DO_SANITY_CHECKS */ - - -#define H5C__DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ -{ \ - H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, \ - fail_val) \ - if ( (head_ptr) == NULL ) \ - { \ - (head_ptr) = (entry_ptr); \ - (tail_ptr) = (entry_ptr); \ - } \ - else \ - { \ - (tail_ptr)->next = (entry_ptr); \ - (entry_ptr)->prev = (tail_ptr); \ - (tail_ptr) = (entry_ptr); \ - } \ - (len)++; \ - (list_size) += (entry_ptr)->size; \ -} /* H5C__DLL_APPEND() */ - -#define H5C__DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ -{ \ - H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, \ - fail_val) \ - if ( (head_ptr) == NULL ) \ - { \ - (head_ptr) = (entry_ptr); \ - (tail_ptr) = (entry_ptr); \ - } \ - else \ - { \ - (head_ptr)->prev = (entry_ptr); \ - (entry_ptr)->next = (head_ptr); \ - (head_ptr) = (entry_ptr); \ - } \ - (len)++; \ - (list_size) += (entry_ptr)->size; \ -} /* H5C__DLL_PREPEND() */ - -#define H5C__DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ -{ \ - H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, \ - fail_val) \ - { \ - if ( (head_ptr) == (entry_ptr) ) \ - { \ - (head_ptr) = (entry_ptr)->next; \ - if ( (head_ptr) != NULL ) \ - (head_ptr)->prev = NULL; \ - } \ - else \ - (entry_ptr)->prev->next = (entry_ptr)->next; \ - if ( (tail_ptr) == (entry_ptr) ) \ - { \ - (tail_ptr) = (entry_ptr)->prev; \ - if ( (tail_ptr) != NULL ) \ - (tail_ptr)->next = NULL; \ - } \ - else \ - (entry_ptr)->next->prev = (entry_ptr)->prev; \ - (entry_ptr)->next = NULL; \ - (entry_ptr)->prev = NULL; \ - (len)--; \ - (list_size) -= (entry_ptr)->size; \ - } \ -} /* H5C__DLL_REMOVE() */ - -#define H5C__DLL_UPDATE_FOR_SIZE_CHANGE(dll_len, dll_size, old_size, new_size, fail_val) \ -{ \ - H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) \ - (dll_size) -= (old_size); \ - (dll_size) += (new_size); \ - H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) \ -} /* H5C__DLL_UPDATE_FOR_SIZE_CHANGE() */ - -#ifdef H5C_DO_SANITY_CHECKS - -#define H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, list_size, fail_val) \ -if ( ( (hd_ptr) == NULL ) || \ - ( (tail_ptr) == NULL ) || \ - ( (entry_ptr) == NULL ) || \ - ( (len) <= 0 ) || \ - ( (list_size) < (entry_ptr)->size ) || \ - ( ( (list_size) == (entry_ptr)->size ) && ( ! ( (len) == 1 ) ) ) || \ - ( ( (entry_ptr)->aux_prev == NULL ) && ( (hd_ptr) != (entry_ptr) ) ) || \ - ( ( (entry_ptr)->aux_next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \ - ( ( (len) == 1 ) && \ - ( ! ( ( (hd_ptr) == (entry_ptr) ) && ( (tail_ptr) == (entry_ptr) ) && \ - ( (entry_ptr)->aux_next == NULL ) && \ - ( (entry_ptr)->aux_prev == NULL ) && \ - ( (list_size) == (entry_ptr)->size ) \ - ) \ - ) \ - ) \ - ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "aux DLL pre remove SC failed") \ -} - -#define H5C__AUX_DLL_SC(head_ptr, tail_ptr, len, list_size, fail_val) \ -if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ - ( (head_ptr) != (tail_ptr) ) \ - ) || \ - ( (len) < 0 ) || \ - ( (list_size) < 0 ) || \ - ( ( (len) == 1 ) && \ - ( ( (head_ptr) != (tail_ptr) ) || ( (list_size) <= 0 ) || \ - ( (head_ptr) == NULL ) || ( (head_ptr)->size != (list_size) ) \ - ) \ - ) || \ - ( ( (len) >= 1 ) && \ - ( ( (head_ptr) == NULL ) || ( (head_ptr)->aux_prev != NULL ) || \ - ( (tail_ptr) == NULL ) || ( (tail_ptr)->aux_next != NULL ) \ - ) \ - ) \ - ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "AUX DLL sanity check failed") \ -} - -#define H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, list_size, fail_val) \ -if ( ( (entry_ptr) == NULL ) || \ - ( (entry_ptr)->aux_next != NULL ) || \ - ( (entry_ptr)->aux_prev != NULL ) || \ - ( ( ( (hd_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ - ( (hd_ptr) != (tail_ptr) ) \ - ) || \ - ( ( (len) == 1 ) && \ - ( ( (hd_ptr) != (tail_ptr) ) || ( (list_size) <= 0 ) || \ - ( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (list_size) ) \ - ) \ - ) || \ - ( ( (len) >= 1 ) && \ - ( ( (hd_ptr) == NULL ) || ( (hd_ptr)->aux_prev != NULL ) || \ - ( (tail_ptr) == NULL ) || ( (tail_ptr)->aux_next != NULL ) \ - ) \ - ) \ - ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "AUX DLL pre insert SC failed") \ -} - #else /* H5C_DO_SANITY_CHECKS */ - -#define H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, list_size, fail_val) -#define H5C__AUX_DLL_SC(head_ptr, tail_ptr, len, list_size, fail_val) -#define H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, list_size, fail_val) - +#define H5C__GEN_DLL_PRE_REMOVE_SC(entry_ptr, list_next, list_prev, head_ptr, tail_ptr, len, list_size, fail_val) +#define H5C__GEN_DLL_PRE_INSERT_SC(entry_ptr, list_next, list_prev, head_ptr, tail_ptr, len, list_size, fail_val) +#define H5C__GEN_DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) +#define H5C__GEN_DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) #endif /* H5C_DO_SANITY_CHECKS */ - -#define H5C__AUX_DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val)\ +#define H5C__GEN_DLL_APPEND(entry_ptr, list_next, list_prev, head_ptr, tail_ptr, len, list_size, fail_val) \ { \ - H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, \ - fail_val) \ - if ( (head_ptr) == NULL ) \ - { \ + H5C__GEN_DLL_PRE_INSERT_SC(entry_ptr, list_next, list_prev, head_ptr, tail_ptr, len, list_size, fail_val) \ + if ((head_ptr) == NULL) { \ (head_ptr) = (entry_ptr); \ (tail_ptr) = (entry_ptr); \ } \ - else \ - { \ - (tail_ptr)->aux_next = (entry_ptr); \ - (entry_ptr)->aux_prev = (tail_ptr); \ + else { \ + (tail_ptr)->list_next = (entry_ptr); \ + (entry_ptr)->list_prev = (tail_ptr); \ (tail_ptr) = (entry_ptr); \ } \ (len)++; \ - (list_size) += entry_ptr->size; \ -} /* H5C__AUX_DLL_APPEND() */ - -#define H5C__AUX_DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ -{ \ - H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ - if ( (head_ptr) == NULL ) \ - { \ - (head_ptr) = (entry_ptr); \ - (tail_ptr) = (entry_ptr); \ - } \ - else \ - { \ - (head_ptr)->aux_prev = (entry_ptr); \ - (entry_ptr)->aux_next = (head_ptr); \ - (head_ptr) = (entry_ptr); \ - } \ - (len)++; \ - (list_size) += entry_ptr->size; \ -} /* H5C__AUX_DLL_PREPEND() */ - -#define H5C__AUX_DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ -{ \ - H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ - { \ - if ( (head_ptr) == (entry_ptr) ) \ - { \ - (head_ptr) = (entry_ptr)->aux_next; \ - if ( (head_ptr) != NULL ) \ - (head_ptr)->aux_prev = NULL; \ - } \ - else \ - (entry_ptr)->aux_prev->aux_next = (entry_ptr)->aux_next; \ - if ( (tail_ptr) == (entry_ptr) ) \ - { \ - (tail_ptr) = (entry_ptr)->aux_prev; \ - if ( (tail_ptr) != NULL ) \ - (tail_ptr)->aux_next = NULL; \ - } \ - else \ - (entry_ptr)->aux_next->aux_prev = (entry_ptr)->aux_prev; \ - entry_ptr->aux_next = NULL; \ - entry_ptr->aux_prev = NULL; \ - (len)--; \ - (list_size) -= entry_ptr->size; \ - } \ -} /* H5C__AUX_DLL_REMOVE() */ - -#ifdef H5C_DO_SANITY_CHECKS - -#define H5C__IL_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, list_size, fail_val) \ -if ( ( (hd_ptr) == NULL ) || \ - ( (tail_ptr) == NULL ) || \ - ( (entry_ptr) == NULL ) || \ - ( (len) <= 0 ) || \ - ( (list_size) < (entry_ptr)->size ) || \ - ( ( (list_size) == (entry_ptr)->size ) && ( ! ( (len) == 1 ) ) ) || \ - ( ( (entry_ptr)->il_prev == NULL ) && ( (hd_ptr) != (entry_ptr) ) ) || \ - ( ( (entry_ptr)->il_next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \ - ( ( (len) == 1 ) && \ - ( ! ( ( (hd_ptr) == (entry_ptr) ) && ( (tail_ptr) == (entry_ptr) ) && \ - ( (entry_ptr)->il_next == NULL ) && \ - ( (entry_ptr)->il_prev == NULL ) && \ - ( (list_size) == (entry_ptr)->size ) \ - ) \ - ) \ - ) \ - ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "il DLL pre remove SC failed") \ -} - -#define H5C__IL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, list_size, fail_val) \ -if ( ( (entry_ptr) == NULL ) || \ - ( (entry_ptr)->il_next != NULL ) || \ - ( (entry_ptr)->il_prev != NULL ) || \ - ( ( ( (hd_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ - ( (hd_ptr) != (tail_ptr) ) \ - ) || \ - ( ( (len) == 1 ) && \ - ( ( (hd_ptr) != (tail_ptr) ) || ( (list_size) <= 0 ) || \ - ( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (list_size) ) \ - ) \ - ) || \ - ( ( (len) >= 1 ) && \ - ( ( (hd_ptr) == NULL ) || ( (hd_ptr)->il_prev != NULL ) || \ - ( (tail_ptr) == NULL ) || ( (tail_ptr)->il_next != NULL ) \ - ) \ - ) \ - ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "IL DLL pre insert SC failed") \ -} - -#define H5C__IL_DLL_SC(head_ptr, tail_ptr, len, list_size, fail_val) \ -if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ - ( (head_ptr) != (tail_ptr) ) \ - ) || \ - ( ( (len) == 1 ) && \ - ( ( (head_ptr) != (tail_ptr) ) || \ - ( (head_ptr) == NULL ) || ( (head_ptr)->size != (list_size) ) \ - ) \ - ) || \ - ( ( (len) >= 1 ) && \ - ( ( (head_ptr) == NULL ) || ( (head_ptr)->il_prev != NULL ) || \ - ( (tail_ptr) == NULL ) || ( (tail_ptr)->il_next != NULL ) \ - ) \ - ) \ - ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "IL DLL sanity check failed") \ -} + (list_size) += (entry_ptr)->size; \ +} /* H5C__GEN_DLL_APPEND() */ -#else /* H5C_DO_SANITY_CHECKS */ +#define H5C__GEN_DLL_PREPEND(entry_ptr, list_next, list_prev, head_ptr, tail_ptr, len, list_size, fail_val) \ +{ \ + H5C__GEN_DLL_PRE_INSERT_SC(entry_ptr, list_next, list_prev, head_ptr, tail_ptr, len, list_size, fail_val) \ + if ((head_ptr) == NULL) { \ + (head_ptr) = (entry_ptr); \ + (tail_ptr) = (entry_ptr); \ + } \ + else { \ + (head_ptr)->list_prev = (entry_ptr); \ + (entry_ptr)->list_next = (head_ptr); \ + (head_ptr) = (entry_ptr); \ + } \ + (len)++; \ + (list_size) += (entry_ptr)->size; \ +} /* H5C__GEN_DLL_PREPEND() */ -#define H5C__IL_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, list_size, fail_val) -#define H5C__IL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, list_size, fail_val) -#define H5C__IL_DLL_SC(head_ptr, tail_ptr, len, list_size, fail_val) +#define H5C__GEN_DLL_REMOVE(entry_ptr, list_next, list_prev, head_ptr, tail_ptr, len, list_size, fail_val) \ +{ \ + H5C__GEN_DLL_PRE_REMOVE_SC(entry_ptr, list_next, list_prev, head_ptr, tail_ptr, len, list_size, fail_val) \ + if ((head_ptr) == (entry_ptr)) { \ + (head_ptr) = (entry_ptr)->list_next; \ + if ((head_ptr) != NULL) \ + (head_ptr)->list_prev = NULL; \ + } \ + else \ + (entry_ptr)->list_prev->list_next = (entry_ptr)->list_next; \ + if ((tail_ptr) == (entry_ptr)) { \ + (tail_ptr) = (entry_ptr)->list_prev; \ + if ((tail_ptr) != NULL) \ + (tail_ptr)->list_next = NULL; \ + } \ + else \ + (entry_ptr)->list_next->list_prev = (entry_ptr)->list_prev; \ + (entry_ptr)->list_next = NULL; \ + (entry_ptr)->list_prev = NULL; \ + (len)--; \ + (list_size) -= (entry_ptr)->size; \ +} /* H5C__GEN_DLL_REMOVE() */ + +#define H5C__GEN_DLL_UPDATE_FOR_SIZE_CHANGE(dll_len, dll_size, old_size, new_size, fail_val) \ +{ \ + H5C__GEN_DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) \ + (dll_size) -= (old_size); \ + (dll_size) += (new_size); \ + H5C__GEN_DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) \ +} /* H5C__GEN_DLL_UPDATE_FOR_SIZE_CHANGE() */ -#endif /* H5C_DO_SANITY_CHECKS */ +/* Macros that modify the LRU/protected/pinned lists */ +#define H5C__DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ + H5C__GEN_DLL_APPEND(entry_ptr, next, prev, head_ptr, tail_ptr, len, list_size, fail_val) +#define H5C__DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ + H5C__GEN_DLL_PREPEND(entry_ptr, next, prev, head_ptr, tail_ptr, len, list_size, fail_val) +#define H5C__DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ + H5C__GEN_DLL_REMOVE(entry_ptr, next, prev, head_ptr, tail_ptr, len, list_size, fail_val) +#define H5C__DLL_UPDATE_FOR_SIZE_CHANGE(dll_len, dll_size, old_size, new_size, fail_val) \ + H5C__GEN_DLL_UPDATE_FOR_SIZE_CHANGE(dll_len, dll_size, old_size, new_size, fail_val) -#define H5C__IL_DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val)\ -{ \ - H5C__IL_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, \ - fail_val) \ - if ( (head_ptr) == NULL ) \ - { \ - (head_ptr) = (entry_ptr); \ - (tail_ptr) = (entry_ptr); \ - } \ - else \ - { \ - (tail_ptr)->il_next = (entry_ptr); \ - (entry_ptr)->il_prev = (tail_ptr); \ - (tail_ptr) = (entry_ptr); \ - } \ - (len)++; \ - (list_size) += entry_ptr->size; \ - H5C__IL_DLL_SC(head_ptr, tail_ptr, len, list_size, fail_val) \ -} /* H5C__IL_DLL_APPEND() */ +/* Macros that modify the "auxiliary" LRU list */ +#define H5C__AUX_DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ + H5C__GEN_DLL_APPEND(entry_ptr, aux_next, aux_prev, head_ptr, tail_ptr, len, list_size, fail_val) +#define H5C__AUX_DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ + H5C__GEN_DLL_PREPEND(entry_ptr, aux_next, aux_prev, head_ptr, tail_ptr, len, list_size, fail_val) +#define H5C__AUX_DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ + H5C__GEN_DLL_REMOVE(entry_ptr, aux_next, aux_prev, head_ptr, tail_ptr, len, list_size, fail_val) -#define H5C__IL_DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ -{ \ - H5C__IL_DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ - { \ - if ( (head_ptr) == (entry_ptr) ) \ - { \ - (head_ptr) = (entry_ptr)->il_next; \ - if ( (head_ptr) != NULL ) \ - (head_ptr)->il_prev = NULL; \ - } \ - else \ - (entry_ptr)->il_prev->il_next = (entry_ptr)->il_next; \ - if ( (tail_ptr) == (entry_ptr) ) \ - { \ - (tail_ptr) = (entry_ptr)->il_prev; \ - if ( (tail_ptr) != NULL ) \ - (tail_ptr)->il_next = NULL; \ - } \ - else \ - (entry_ptr)->il_next->il_prev = (entry_ptr)->il_prev; \ - entry_ptr->il_next = NULL; \ - entry_ptr->il_prev = NULL; \ - (len)--; \ - (list_size) -= entry_ptr->size; \ - } \ - H5C__IL_DLL_SC(head_ptr, tail_ptr, len, list_size, fail_val) \ -} /* H5C__IL_DLL_REMOVE() */ +/* Macros that modify the "index" list */ +#define H5C__IL_DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ + H5C__GEN_DLL_APPEND(entry_ptr, il_next, il_prev, head_ptr, tail_ptr, len, list_size, fail_val) +#define H5C__IL_DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ + H5C__GEN_DLL_REMOVE(entry_ptr, il_next, il_prev, head_ptr, tail_ptr, len, list_size, fail_val) /*********************************************************************** @@ -2718,144 +2438,11 @@ if ( ( (cache_ptr)->index_size != \ #ifdef H5_HAVE_PARALLEL -#ifdef H5C_DO_SANITY_CHECKS - -#define H5C__COLL_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, list_size, fail_val) \ -if ( ( (hd_ptr) == NULL ) || \ - ( (tail_ptr) == NULL ) || \ - ( (entry_ptr) == NULL ) || \ - ( (len) <= 0 ) || \ - ( (list_size) < (entry_ptr)->size ) || \ - ( ( (list_size) == (entry_ptr)->size ) && ( ! ( (len) == 1 ) ) ) || \ - ( ( (entry_ptr)->coll_prev == NULL ) && ( (hd_ptr) != (entry_ptr) ) ) || \ - ( ( (entry_ptr)->coll_next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) ||\ - ( ( (len) == 1 ) && \ - ( ! ( ( (hd_ptr) == (entry_ptr) ) && ( (tail_ptr) == (entry_ptr) ) && \ - ( (entry_ptr)->coll_next == NULL ) && \ - ( (entry_ptr)->coll_prev == NULL ) && \ - ( (list_size) == (entry_ptr)->size ) \ - ) \ - ) \ - ) \ - ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "coll DLL pre remove SC failed") \ -} - -#define H5C__COLL_DLL_SC(head_ptr, tail_ptr, len, list_size, fail_val) \ -if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ - ( (head_ptr) != (tail_ptr) ) \ - ) || \ - ( (len) < 0 ) || \ - ( (list_size) < 0 ) || \ - ( ( (len) == 1 ) && \ - ( ( (head_ptr) != (tail_ptr) ) || ( (list_size) <= 0 ) || \ - ( (head_ptr) == NULL ) || ( (head_ptr)->size != (list_size) ) \ - ) \ - ) || \ - ( ( (len) >= 1 ) && \ - ( ( (head_ptr) == NULL ) || ( (head_ptr)->coll_prev != NULL ) || \ - ( (tail_ptr) == NULL ) || ( (tail_ptr)->coll_next != NULL ) \ - ) \ - ) \ - ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "COLL DLL sanity check failed")\ -} - -#define H5C__COLL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, list_size, fail_val)\ -if ( ( (entry_ptr) == NULL ) || \ - ( (entry_ptr)->coll_next != NULL ) || \ - ( (entry_ptr)->coll_prev != NULL ) || \ - ( ( ( (hd_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ - ( (hd_ptr) != (tail_ptr) ) \ - ) || \ - ( ( (len) == 1 ) && \ - ( ( (hd_ptr) != (tail_ptr) ) || ( (list_size) <= 0 ) || \ - ( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (list_size) ) \ - ) \ - ) || \ - ( ( (len) >= 1 ) && \ - ( ( (hd_ptr) == NULL ) || ( (hd_ptr)->coll_prev != NULL ) || \ - ( (tail_ptr) == NULL ) || ( (tail_ptr)->coll_next != NULL ) \ - ) \ - ) \ - ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "COLL DLL pre insert SC failed") \ -} - -#else /* H5C_DO_SANITY_CHECKS */ - -#define H5C__COLL_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, list_size, fail_val) -#define H5C__COLL_DLL_SC(head_ptr, tail_ptr, len, list_size, fail_val) -#define H5C__COLL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, list_size, fail_val) - -#endif /* H5C_DO_SANITY_CHECKS */ - - -#define H5C__COLL_DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ -{ \ - H5C__COLL_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, \ - fail_val) \ - if ( (head_ptr) == NULL ) \ - { \ - (head_ptr) = (entry_ptr); \ - (tail_ptr) = (entry_ptr); \ - } \ - else \ - { \ - (tail_ptr)->coll_next = (entry_ptr); \ - (entry_ptr)->coll_prev = (tail_ptr); \ - (tail_ptr) = (entry_ptr); \ - } \ - (len)++; \ - (list_size) += entry_ptr->size; \ -} /* H5C__COLL_DLL_APPEND() */ - -#define H5C__COLL_DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ -{ \ - H5C__COLL_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val)\ - if ( (head_ptr) == NULL ) \ - { \ - (head_ptr) = (entry_ptr); \ - (tail_ptr) = (entry_ptr); \ - } \ - else \ - { \ - (head_ptr)->coll_prev = (entry_ptr); \ - (entry_ptr)->coll_next = (head_ptr); \ - (head_ptr) = (entry_ptr); \ - } \ - (len)++; \ - (list_size) += entry_ptr->size; \ -} /* H5C__COLL_DLL_PREPEND() */ - -#define H5C__COLL_DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ -{ \ - H5C__COLL_DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val)\ - { \ - if ( (head_ptr) == (entry_ptr) ) \ - { \ - (head_ptr) = (entry_ptr)->coll_next; \ - if ( (head_ptr) != NULL ) \ - (head_ptr)->coll_prev = NULL; \ - } \ - else \ - { \ - (entry_ptr)->coll_prev->coll_next = (entry_ptr)->coll_next; \ - } \ - if ( (tail_ptr) == (entry_ptr) ) \ - { \ - (tail_ptr) = (entry_ptr)->coll_prev; \ - if ( (tail_ptr) != NULL ) \ - (tail_ptr)->coll_next = NULL; \ - } \ - else \ - (entry_ptr)->coll_next->coll_prev = (entry_ptr)->coll_prev; \ - entry_ptr->coll_next = NULL; \ - entry_ptr->coll_prev = NULL; \ - (len)--; \ - (list_size) -= entry_ptr->size; \ - } \ -} /* H5C__COLL_DLL_REMOVE() */ +/* Macros that modify the "collective I/O" LRU list */ +#define H5C__COLL_DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ + H5C__GEN_DLL_PREPEND(entry_ptr, coll_next, coll_prev, head_ptr, tail_ptr, len, list_size, fail_val) +#define H5C__COLL_DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, list_size, fail_val) \ + H5C__GEN_DLL_REMOVE(entry_ptr, coll_next, coll_prev, head_ptr, tail_ptr, len, list_size, fail_val) /*------------------------------------------------------------------------- From c6437f7bae227691c6b346f59084cd6a75d122bc Mon Sep 17 00:00:00 2001 From: "H. Joe Lee" Date: Tue, 9 May 2023 11:11:29 -0500 Subject: [PATCH 214/231] Remove unused variable warning in H5C.c (#2844) --- src/H5C.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/H5C.c b/src/H5C.c index 8a97e47ea4d..a0f80f14616 100644 --- a/src/H5C.c +++ b/src/H5C.c @@ -6628,8 +6628,8 @@ H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted) if (entry_ptr->is_dirty && (entry_ptr->tag_info && entry_ptr->tag_info->corked)) { /* Skip "dirty" corked entries. */ - ++num_corked_entries; - didnt_flush_entry = TRUE; + num_corked_entries = num_corked_entries + 1; + didnt_flush_entry = TRUE; } else if ((entry_ptr->type->id != H5AC_EPOCH_MARKER_ID) && !entry_ptr->flush_in_progress && !entry_ptr->prefetched_dirty) { From f5b0a7b2e177ec3f186af41838fbb071dd42ad1b Mon Sep 17 00:00:00 2001 From: Quincey Koziol Date: Tue, 9 May 2023 15:19:18 -0500 Subject: [PATCH 215/231] More code duplication reduction (#2930) * Add failure value where it's missing from 1+ macros. Clean up whitespace / continuation characters ('\'). Made hash-table macros generic for use in both the package header and test header. Remove duplicated copy & pasted macros (by hoisting difference into #ifdef'd macro). Updated and re-flowed comments to read better. Also clean up a few compiler warnings in production builds. Signed-off-by: Quincey Koziol * Committing clang-format changes * Remove unused variable warning in H5C.c (#2844) * Remove trailing /* NDEBUG */ comment from #endif's * Committing clang-format changes --------- Signed-off-by: Quincey Koziol Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> --- src/H5C.c | 58 +- src/H5Cimage.c | 110 +- src/H5Cmpio.c | 14 +- src/H5Cpkg.h | 3551 +++++++++++++++++-------------------------- test/cache_common.h | 50 +- 5 files changed, 1536 insertions(+), 2247 deletions(-) diff --git a/src/H5C.c b/src/H5C.c index a0f80f14616..b80ed985023 100644 --- a/src/H5C.c +++ b/src/H5C.c @@ -127,7 +127,7 @@ static herr_t H5C__verify_len_eoa(H5F_t *f, const H5C_class_t *type, haddr_t add #ifndef NDEBUG static void H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t *entry, const H5C_cache_entry_t *base_entry); -#endif /* NDEBUG */ +#endif /*********************/ /* Package Variables */ @@ -410,7 +410,7 @@ H5C_create(size_t max_cache_size, size_t min_clean_size, int max_type_id, #ifndef NDEBUG cache_ptr->get_entry_ptr_from_addr_counter = 0; -#endif /* NDEBUG */ +#endif /* Set return value */ ret_value = cache_ptr; @@ -454,9 +454,9 @@ void H5C_def_auto_resize_rpt_fcn(H5C_t *cache_ptr, #ifndef NDEBUG int32_t version, -#else /* NDEBUG */ +#else int32_t H5_ATTR_UNUSED version, -#endif /* NDEBUG */ +#endif double hit_rate, enum H5C_resize_status status, size_t old_max_cache_size, size_t new_max_cache_size, size_t old_min_clean_size, size_t new_min_clean_size) { @@ -732,7 +732,7 @@ H5C_dest(H5F_t *f) #endif /* H5C_DO_SANITY_CHECKS */ cache_ptr->magic = 0; -#endif /* NDEBUG */ +#endif cache_ptr = H5FL_FREE(H5C_t, cache_ptr); @@ -1216,7 +1216,7 @@ H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, u entry_ptr->prefetched_dirty = FALSE; #ifndef NDEBUG /* debugging field */ entry_ptr->serialization_count = 0; -#endif /* NDEBUG */ +#endif /* initialize tag list fields */ entry_ptr->tl_next = NULL; @@ -1500,7 +1500,7 @@ H5C_mark_entry_clean(void *_thing) if (was_dirty) H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr, FAIL) if (entry_ptr->in_slist) - H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE) + H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE, FAIL) /* Update stats for entry being marked clean */ H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) @@ -1703,7 +1703,7 @@ H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type, haddr_t old_addr, hadd if (entry_ptr->in_slist) { HDassert(cache_ptr->slist_ptr); - H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE) + H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE, FAIL) } /* end if */ } /* end if */ @@ -2747,7 +2747,7 @@ H5C_set_slist_enabled(H5C_t *cache_ptr, hbool_t slist_enabled, hbool_t clear_sli node_ptr = H5SL_first(cache_ptr->slist_ptr); while (node_ptr != NULL) { entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); - H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE); + H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE, FAIL) node_ptr = H5SL_first(cache_ptr->slist_ptr); } } @@ -3435,8 +3435,8 @@ H5C_create_flush_dependency(void *parent_thing, void *child_thing) for (u = 0; u < child_entry->flush_dep_nparents; u++) HDassert(child_entry->flush_dep_parent[u] != parent_entry); - } /* end block */ -#endif /* NDEBUG */ + } /* end block */ +#endif /* More sanity checks */ if (child_entry == parent_entry) @@ -3529,7 +3529,7 @@ H5C_create_flush_dependency(void *parent_thing, void *child_thing) HDassert(child_entry->flush_dep_parent_nalloc > 0); #ifndef NDEBUG H5C__assert_flush_dep_nocycle(parent_entry, child_entry); -#endif /* NDEBUG */ +#endif done: FUNC_LEAVE_NOAPI(ret_value) @@ -4859,7 +4859,7 @@ H5C__flush_invalidate_cache(H5F_t *f, unsigned flags) HDassert(cache_ptr->pl_size == 0); HDassert(cache_ptr->LRU_list_len == 0); HDassert(cache_ptr->LRU_list_size == 0); -#endif /* NDEBUG */ +#endif done: FUNC_LEAVE_NOAPI(ret_value) @@ -5884,7 +5884,7 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL) if (entry_ptr->in_slist && del_from_slist_on_destroy) - H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush) + H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush, FAIL) #ifdef H5_HAVE_PARALLEL /* Check for collective read access flag */ @@ -5916,7 +5916,7 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) * Hence no differentiation between them. */ H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, FAIL) - H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush) + H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush, FAIL) /* mark the entry as clean and update the index for * entry clean. Also, call the clear callback @@ -6005,7 +6005,7 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) entry_ptr->type->image_len((void *)entry_ptr, &curr_len); HDassert(curr_len == entry_ptr->size); } -#endif /* NDEBUG */ +#endif /* If the file space free size callback is defined, use * it to get the size of the block of file space to free. @@ -6508,7 +6508,7 @@ H5C__load_entry(H5F_t *f, entry->prefetched_dirty = FALSE; #ifndef NDEBUG /* debugging field */ entry->serialization_count = 0; -#endif /* NDEBUG */ +#endif /* initialize tag list fields */ entry->tl_next = NULL; @@ -6579,8 +6579,10 @@ H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted) H5C_cache_entry_t *entry_ptr; H5C_cache_entry_t *prev_ptr; H5C_cache_entry_t *next_ptr; - uint32_t num_corked_entries = 0; - herr_t ret_value = SUCCEED; /* Return value */ +#ifndef NDEBUG + uint32_t num_corked_entries = 0; +#endif + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -6628,8 +6630,10 @@ H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted) if (entry_ptr->is_dirty && (entry_ptr->tag_info && entry_ptr->tag_info->corked)) { /* Skip "dirty" corked entries. */ - num_corked_entries = num_corked_entries + 1; - didnt_flush_entry = TRUE; +#ifndef NDEBUG + ++num_corked_entries; +#endif + didnt_flush_entry = TRUE; } else if ((entry_ptr->type->id != H5AC_EPOCH_MARKER_ID) && !entry_ptr->flush_in_progress && !entry_ptr->prefetched_dirty) { @@ -7475,7 +7479,7 @@ H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t *entry, const H5C_cache_en FUNC_LEAVE_NOAPI_VOID } /* H5C__assert_flush_dep_nocycle() */ -#endif /* NDEBUG */ +#endif /*------------------------------------------------------------------------- * Function: H5C__serialize_cache @@ -7587,7 +7591,7 @@ H5C__serialize_cache(H5F_t *f) scan_ptr = scan_ptr->il_next; } /* end while */ } /* end block */ -#endif /* NDEBUG */ +#endif /* set cache_ptr->serialization_in_progress to TRUE, and back * to FALSE at the end of the function. Must maintain this flag @@ -7653,7 +7657,7 @@ H5C__serialize_cache(H5F_t *f) scan_ptr = scan_ptr->il_next; } /* end while */ } /* end block */ -#endif /* NDEBUG */ +#endif done: cache_ptr->serialization_in_progress = FALSE; @@ -7830,7 +7834,7 @@ H5C__serialize_ring(H5F_t *f, H5C_ring_t ring) #ifndef NDEBUG /* Increment serialization counter (to detect multiple serializations) */ entry_ptr->serialization_count++; -#endif /* NDEBUG */ +#endif } /* end if */ } /* end if */ @@ -7899,7 +7903,7 @@ H5C__serialize_ring(H5F_t *f, H5C_ring_t ring) #ifndef NDEBUG /* Increment serialization counter (to detect multiple serializations) */ entry_ptr->serialization_count++; -#endif /* NDEBUG */ +#endif } /* end if */ } /* end if */ else { @@ -8119,7 +8123,7 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr) if (entry_ptr->addr == old_addr) { /* Delete the entry from the hash table and the slist */ H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL); - H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE); + H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE, FAIL) /* Update the entry for its new address */ entry_ptr->addr = new_addr; diff --git a/src/H5Cimage.c b/src/H5Cimage.c index b8f46f11422..6d211a4f56d 100644 --- a/src/H5Cimage.c +++ b/src/H5Cimage.c @@ -137,7 +137,7 @@ static herr_t H5C__decode_cache_image_header(const H5F_t *f, H5C_t *cache_ptr, c #ifndef NDEBUG /* only used in assertions */ static herr_t H5C__decode_cache_image_entry(const H5F_t *f, const H5C_t *cache_ptr, const uint8_t **buf, unsigned entry_num); -#endif /* NDEBUG */ /* only used in assertions */ +#endif static herr_t H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, H5C_cache_entry_t *pf_entry_ptr, H5C_cache_entry_t **fd_children); static herr_t H5C__encode_cache_image_header(const H5F_t *f, const H5C_t *cache_ptr, uint8_t **buf); @@ -401,8 +401,8 @@ H5C__construct_cache_image_buffer(H5F_t *f, H5C_t *cache_ptr) fake_cache_ptr->image_entries = (H5C_image_entry_t *)H5MM_xfree(fake_cache_ptr->image_entries); fake_cache_ptr = (H5C_t *)H5MM_xfree(fake_cache_ptr); - } /* end block */ -#endif /* NDEBUG */ + } /* end block */ +#endif done: FUNC_LEAVE_NOAPI(ret_value) @@ -702,7 +702,7 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t ds_entry_ptr->prefetched_dirty = pf_entry_ptr->prefetched_dirty; #ifndef NDEBUG /* debugging field */ ds_entry_ptr->serialization_count = 0; -#endif /* NDEBUG */ +#endif H5C__RESET_CACHE_ENTRY_STATS(ds_entry_ptr); @@ -746,7 +746,7 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t H5C__SEARCH_INDEX(cache_ptr, addr, pf_entry_ptr, FAIL); HDassert(NULL == pf_entry_ptr); -#endif /* NDEBUG */ +#endif /* Insert the deserialized entry into the cache. */ H5C__INSERT_IN_INDEX(cache_ptr, ds_entry_ptr, FAIL) @@ -797,7 +797,7 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t } /* end while */ HDassert(found); } -#endif /* NDEBUG */ +#endif if (H5C_create_flush_dependency(ds_entry_ptr, fd_children[i]) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Can't restore child flush dependency") @@ -2007,7 +2007,7 @@ H5C__decode_cache_image_entry(const H5F_t *f, const H5C_t *cache_ptr, const uint done: FUNC_LEAVE_NOAPI(ret_value) } /* H5C__decode_cache_image_entry() */ -#endif /* NDEBUG */ +#endif /*------------------------------------------------------------------------- * Function: H5C__destroy_pf_entry_child_flush_deps() @@ -2039,10 +2039,12 @@ H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, H5C_cache_entry_t *pf_e H5C_cache_entry_t **fd_children) { H5C_cache_entry_t *entry_ptr; - unsigned entries_visited = 0; - int fd_children_found = 0; - hbool_t found; - herr_t ret_value = SUCCEED; /* Return value */ +#ifndef NDEBUG + unsigned entries_visited = 0; +#endif + int fd_children_found = 0; + hbool_t found; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -2119,11 +2121,13 @@ H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, H5C_cache_entry_t *pf_e u++; } /* end while */ HDassert(found); -#endif /* NDEBUG */ +#endif } /* end if */ } /* end if */ +#ifndef NDEBUG entries_visited++; +#endif entry_ptr = entry_ptr->il_next; } /* end while */ @@ -2374,12 +2378,14 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr) { H5C_cache_entry_t *entry_ptr; H5C_cache_entry_t *parent_ptr; - unsigned entries_removed_from_image = 0; - unsigned external_parent_fd_refs_removed = 0; - unsigned external_child_fd_refs_removed = 0; - hbool_t done = FALSE; - unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; +#ifndef NDEBUG + unsigned entries_removed_from_image = 0; + unsigned external_parent_fd_refs_removed = 0; + unsigned external_child_fd_refs_removed = 0; +#endif + hbool_t done = FALSE; + unsigned u; /* Local index variable */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE @@ -2415,7 +2421,9 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr) entry_ptr->include_in_image) { /* Must remove child from image -- only do this once */ +#ifndef NDEBUG entries_removed_from_image++; +#endif entry_ptr->include_in_image = FALSE; } /* end if */ } /* for */ @@ -2458,7 +2466,9 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr) parent_ptr->fd_dirty_child_count--; } /* end if */ +#ifndef NDEBUG external_child_fd_refs_removed++; +#endif } /* end if */ } /* for */ } /* end if */ @@ -2483,7 +2493,9 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr) HDassert(parent_ptr->addr == entry_ptr->fd_parent_addrs[u]); entry_ptr->fd_parent_addrs[u] = HADDR_UNDEF; +#ifndef NDEBUG external_parent_fd_refs_removed++; +#endif } /* end if */ } /* for */ @@ -2650,10 +2662,12 @@ static herr_t H5C__prep_for_file_close__setup_image_entries_array(H5C_t *cache_ptr) { H5C_cache_entry_t *entry_ptr; - H5C_image_entry_t *image_entries = NULL; - uint32_t entries_visited = 0; - unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ + H5C_image_entry_t *image_entries = NULL; +#ifndef NDEBUG + uint32_t entries_visited = 0; +#endif + unsigned u; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -2736,7 +2750,9 @@ H5C__prep_for_file_close__setup_image_entries_array(H5C_t *cache_ptr) HDassert(u <= cache_ptr->num_entries_in_image); } /* end if */ +#ifndef NDEBUG entries_visited++; +#endif entry_ptr = entry_ptr->il_next; } /* end while */ @@ -2789,14 +2805,16 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) { H5C_cache_entry_t *entry_ptr; hbool_t include_in_image; - unsigned entries_visited = 0; - int lru_rank = 1; - uint32_t num_entries_tentatively_in_image = 0; - uint32_t num_entries_in_image = 0; - size_t image_len; - size_t entry_header_len; - size_t fd_parents_list_len; - herr_t ret_value = SUCCEED; /* Return value */ + int lru_rank = 1; +#ifndef NDEBUG + unsigned entries_visited = 0; + uint32_t num_entries_tentatively_in_image = 0; +#endif + uint32_t num_entries_in_image = 0; + size_t image_len; + size_t entry_header_len; + size_t fd_parents_list_len; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -2897,10 +2915,14 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) entry_ptr->fd_dirty_child_count = entry_ptr->flush_dep_ndirty_children; } /* end if */ +#ifndef NDEBUG num_entries_tentatively_in_image++; +#endif } /* end if */ +#ifndef NDEBUG entries_visited++; +#endif entry_ptr = entry_ptr->il_next; } /* end while */ HDassert(entries_visited == cache_ptr->index_len); @@ -2931,12 +2953,14 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) if (H5C__prep_for_file_close__compute_fd_heights(cache_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "computation of flush dependency heights failed?!?") - /* At this point, all entries that will appear in the cache - * image should be marked correctly. Compute the size of the - * cache image. - */ + /* At this point, all entries that will appear in the cache + * image should be marked correctly. Compute the size of the + * cache image. + */ +#ifndef NDEBUG entries_visited = 0; - entry_ptr = cache_ptr->il_head; +#endif + entry_ptr = cache_ptr->il_head; while (entry_ptr != NULL) { HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); @@ -2950,7 +2974,9 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) num_entries_in_image++; } /* end if */ +#ifndef NDEBUG entries_visited++; +#endif entry_ptr = entry_ptr->il_next; } /* end while */ HDassert(entries_visited == cache_ptr->index_len); @@ -2968,7 +2994,9 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) #endif cache_ptr->num_entries_in_image = num_entries_in_image; - entries_visited = 0; +#ifndef NDEBUG + entries_visited = 0; +#endif /* Now scan the LRU list to set the lru_rank fields of all entries * on the LRU. @@ -3001,7 +3029,9 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) lru_rank++; } /* end else-if */ +#ifndef NDEBUG entries_visited++; +#endif entry_ptr = entry_ptr->next; } /* end while */ HDassert(entries_visited == cache_ptr->LRU_list_len); @@ -3185,8 +3215,8 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) * we add code to store and restore adaptive resize status. */ HDassert(lru_rank_holes <= H5C__MAX_EPOCH_MARKERS); - } /* end block */ -#endif /* NDEBUG */ + } /* end block */ +#endif /* Check to see if the cache is oversize, and evict entries as * necessary to remain within limits. @@ -3237,7 +3267,7 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **b hbool_t in_lru = FALSE; hbool_t is_fd_parent = FALSE; hbool_t is_fd_child = FALSE; -#endif /* NDEBUG */ /* only used in assertions */ +#endif const uint8_t *p; hbool_t file_is_rw; H5C_cache_entry_t *ret_value = NULL; /* Return value */ @@ -3274,7 +3304,7 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **b is_fd_parent = TRUE; if (flags & H5C__MDCI_ENTRY_IS_FD_CHILD_FLAG) is_fd_child = TRUE; -#endif /* NDEBUG */ /* only used in assertions */ +#endif /* Force dirty entries to clean if the file read only -- must do * this as otherwise the cache will attempt to write them on file diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c index a92ac104770..5822746cc9c 100644 --- a/src/H5Cmpio.c +++ b/src/H5Cmpio.c @@ -159,10 +159,12 @@ herr_t H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, haddr_t *candidates_list_ptr, int mpi_rank, int mpi_size) { - unsigned first_entry_to_flush; - unsigned last_entry_to_flush; - unsigned total_entries_to_clear = 0; - unsigned total_entries_to_flush = 0; + unsigned first_entry_to_flush; + unsigned last_entry_to_flush; +#ifndef NDEBUG + unsigned total_entries_to_clear = 0; + unsigned total_entries_to_flush = 0; +#endif unsigned *candidate_assignment_table = NULL; unsigned entries_to_flush[H5C_RING_NTYPES]; unsigned entries_to_clear[H5C_RING_NTYPES]; @@ -316,12 +318,16 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha * markings. */ if (u >= first_entry_to_flush && u <= last_entry_to_flush) { +#ifndef NDEBUG total_entries_to_flush++; +#endif entries_to_flush[entry_ptr->ring]++; entry_ptr->flush_immediately = TRUE; } /* end if */ else { +#ifndef NDEBUG total_entries_to_clear++; +#endif entries_to_clear[entry_ptr->ring]++; entry_ptr->clear_on_unprotect = TRUE; } /* end else */ diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index cfc7bcfc3f3..2cad464743d 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -76,41 +76,37 @@ #ifdef H5C_DO_SANITY_CHECKS #define H5C__GEN_DLL_PRE_REMOVE_SC(entry_ptr, list_next, list_prev, head_ptr, tail_ptr, len, list_size, fail_val) \ -if (((head_ptr) == NULL) || ((tail_ptr) == NULL) || \ - ((entry_ptr) == NULL) || ((len) <= 0) || \ - ((list_size) < (entry_ptr)->size) || \ - ((entry_ptr)->list_prev == NULL && (head_ptr) != (entry_ptr)) || \ - ((entry_ptr)->list_next == NULL && (tail_ptr) != (entry_ptr)) || \ +if ((head_ptr) == NULL || (tail_ptr) == NULL || \ + (entry_ptr) == NULL || (len) <= 0 || \ + (list_size) < (entry_ptr)->size || \ + ((entry_ptr)->list_prev == NULL && (head_ptr) != (entry_ptr)) || \ + ((entry_ptr)->list_next == NULL && (tail_ptr) != (entry_ptr)) || \ ((len) == 1 && \ !((head_ptr) == (entry_ptr) && (tail_ptr) == (entry_ptr) && \ - (entry_ptr)->list_next == NULL && (entry_ptr)->list_prev == NULL && \ + (entry_ptr)->list_next == NULL && (entry_ptr)->list_prev == NULL && \ (list_size) == (entry_ptr)->size \ ) \ ) \ ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "DLL pre remove SC failed") \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "DLL pre remove SC failed") \ } #define H5C__GEN_DLL_PRE_INSERT_SC(entry_ptr, list_next, list_prev, head_ptr, tail_ptr, len, list_size, fail_val) \ if ((entry_ptr) == NULL || (entry_ptr)->list_next != NULL || (entry_ptr)->list_prev != NULL || \ - (((head_ptr) == NULL || (tail_ptr) == NULL) && (head_ptr) != (tail_ptr)) || \ - ( (len) == 0 && \ - ((list_size) > 0 || \ - (head_ptr) != NULL || (tail_ptr) != NULL \ - ) \ - ) || \ - ((len) == 1 && \ - ((head_ptr) != (tail_ptr) || \ - (head_ptr) == NULL || (head_ptr)->size != (list_size) \ - ) \ - ) || \ - ((len) >= 1 && \ - ((head_ptr) == NULL || (head_ptr)->list_prev != NULL || \ - (tail_ptr) == NULL || (tail_ptr)->list_next != NULL \ - ) \ - ) \ - ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "DLL pre insert SC failed") \ + (((head_ptr) == NULL || (tail_ptr) == NULL) && (head_ptr) != (tail_ptr)) || \ + ((len) == 0 && \ + ((list_size) > 0 || (head_ptr) != NULL || (tail_ptr) != NULL) \ + ) || \ + ((len) == 1 && \ + ((head_ptr) != (tail_ptr) || (head_ptr) == NULL || \ + (head_ptr)->size != (list_size)) \ + ) || \ + ((len) >= 1 && \ + ((head_ptr) == NULL || (head_ptr)->list_prev != NULL || \ + (tail_ptr) == NULL || (tail_ptr)->list_next != NULL) \ + ) \ + ) { \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "DLL pre insert SC failed") \ } #define H5C__GEN_DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) \ @@ -122,8 +118,7 @@ if ((dll_len) <= 0 || (dll_size) <= 0 || (old_size) <= 0 || \ } #define H5C__GEN_DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) \ -if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size)) \ - ) { \ +if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size))) { \ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "DLL post size update SC failed") \ } #else /* H5C_DO_SANITY_CHECKS */ @@ -146,7 +141,7 @@ if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size)) \ (tail_ptr) = (entry_ptr); \ } \ (len)++; \ - (list_size) += (entry_ptr)->size; \ + (list_size) += (entry_ptr)->size; \ } /* H5C__GEN_DLL_APPEND() */ #define H5C__GEN_DLL_PREPEND(entry_ptr, list_next, list_prev, head_ptr, tail_ptr, len, list_size, fail_val) \ @@ -157,43 +152,43 @@ if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size)) \ (tail_ptr) = (entry_ptr); \ } \ else { \ - (head_ptr)->list_prev = (entry_ptr); \ - (entry_ptr)->list_next = (head_ptr); \ + (head_ptr)->list_prev = (entry_ptr); \ + (entry_ptr)->list_next = (head_ptr); \ (head_ptr) = (entry_ptr); \ } \ (len)++; \ - (list_size) += (entry_ptr)->size; \ + (list_size) += (entry_ptr)->size; \ } /* H5C__GEN_DLL_PREPEND() */ #define H5C__GEN_DLL_REMOVE(entry_ptr, list_next, list_prev, head_ptr, tail_ptr, len, list_size, fail_val) \ { \ H5C__GEN_DLL_PRE_REMOVE_SC(entry_ptr, list_next, list_prev, head_ptr, tail_ptr, len, list_size, fail_val) \ if ((head_ptr) == (entry_ptr)) { \ - (head_ptr) = (entry_ptr)->list_next; \ + (head_ptr) = (entry_ptr)->list_next; \ if ((head_ptr) != NULL) \ - (head_ptr)->list_prev = NULL; \ + (head_ptr)->list_prev = NULL; \ } \ else \ - (entry_ptr)->list_prev->list_next = (entry_ptr)->list_next; \ + (entry_ptr)->list_prev->list_next = (entry_ptr)->list_next; \ if ((tail_ptr) == (entry_ptr)) { \ - (tail_ptr) = (entry_ptr)->list_prev; \ + (tail_ptr) = (entry_ptr)->list_prev; \ if ((tail_ptr) != NULL) \ - (tail_ptr)->list_next = NULL; \ + (tail_ptr)->list_next = NULL; \ } \ else \ - (entry_ptr)->list_next->list_prev = (entry_ptr)->list_prev; \ - (entry_ptr)->list_next = NULL; \ - (entry_ptr)->list_prev = NULL; \ + (entry_ptr)->list_next->list_prev = (entry_ptr)->list_prev; \ + (entry_ptr)->list_next = NULL; \ + (entry_ptr)->list_prev = NULL; \ (len)--; \ - (list_size) -= (entry_ptr)->size; \ + (list_size) -= (entry_ptr)->size; \ } /* H5C__GEN_DLL_REMOVE() */ #define H5C__GEN_DLL_UPDATE_FOR_SIZE_CHANGE(dll_len, dll_size, old_size, new_size, fail_val) \ { \ - H5C__GEN_DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) \ + H5C__GEN_DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) \ (dll_size) -= (old_size); \ (dll_size) += (new_size); \ - H5C__GEN_DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) \ + H5C__GEN_DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) \ } /* H5C__GEN_DLL_UPDATE_FOR_SIZE_CHANGE() */ @@ -236,7 +231,7 @@ if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size)) \ ***********************************************************************/ #define H5C__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit) \ - (cache_ptr)->cache_accesses++; \ + (cache_ptr)->cache_accesses++; \ if (hit) \ (cache_ptr)->cache_hits++; @@ -263,27 +258,27 @@ if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size)) \ if ((cache_ptr)->pel_size > (cache_ptr)->max_pel_size) \ (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; -#define H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr) \ - if ((cache_ptr)->flush_in_progress) \ +#define H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr) \ + if ((cache_ptr)->flush_in_progress) \ (cache_ptr)->cache_flush_moves[(entry_ptr)->type->id]++; \ - if ((entry_ptr)->flush_in_progress) \ + if ((entry_ptr)->flush_in_progress) \ (cache_ptr)->entry_flush_moves[(entry_ptr)->type->id]++; \ (cache_ptr)->moves[(entry_ptr)->type->id]++; \ (cache_ptr)->entries_relocated_counter++; -#define H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size)\ - if ((cache_ptr)->flush_in_progress) \ +#define H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size) \ + if ((cache_ptr)->flush_in_progress) \ (cache_ptr)->cache_flush_size_changes[(entry_ptr)->type->id]++; \ - if ((entry_ptr)->flush_in_progress) \ + if ((entry_ptr)->flush_in_progress) \ (cache_ptr)->entry_flush_size_changes[(entry_ptr)->type->id]++; \ if ((entry_ptr)->size < (new_size)) { \ (cache_ptr)->size_increases[(entry_ptr)->type->id]++; \ - H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ + H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ if ((cache_ptr)->slist_size > (cache_ptr)->max_slist_size) \ - (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \ + (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \ if ((cache_ptr)->pl_size > (cache_ptr)->max_pl_size) \ - (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \ - } else if ((entry_ptr)->size > (new_size)) \ + (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \ + } else if ((entry_ptr)->size > (new_size)) \ (cache_ptr)->size_decreases[(entry_ptr)->type->id]++; #define H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) \ @@ -292,13 +287,13 @@ if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size)) \ #define H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) \ (cache_ptr)->total_ht_deletions++; -#define H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, success, depth) \ - if (success) { \ - (cache_ptr)->successful_ht_searches++; \ - (cache_ptr)->total_successful_ht_search_depth += depth; \ - } else { \ - (cache_ptr)->failed_ht_searches++; \ - (cache_ptr)->total_failed_ht_search_depth += depth; \ +#define H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, success, depth) \ + if (success) { \ + (cache_ptr)->successful_ht_searches++; \ + (cache_ptr)->total_successful_ht_search_depth += depth; \ + } else { \ + (cache_ptr)->failed_ht_searches++; \ + (cache_ptr)->total_failed_ht_search_depth += depth; \ } #define H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) \ @@ -316,35 +311,35 @@ if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size)) \ #if H5C_COLLECT_CACHE_ENTRY_STATS #define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr) \ -{ \ - (entry_ptr)->accesses = 0; \ - (entry_ptr)->clears = 0; \ - (entry_ptr)->flushes = 0; \ - (entry_ptr)->pins = 0; \ +{ \ + (entry_ptr)->accesses = 0; \ + (entry_ptr)->clears = 0; \ + (entry_ptr)->flushes = 0; \ + (entry_ptr)->pins = 0; \ } -#define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \ -{ \ +#define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \ +{ \ (cache_ptr)->clears[(entry_ptr)->type->id]++; \ - if((entry_ptr)->is_pinned) \ + if((entry_ptr)->is_pinned) \ (cache_ptr)->pinned_clears[(entry_ptr)->type->id]++; \ - (entry_ptr)->clears++; \ + (entry_ptr)->clears++; \ } -#define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \ -{ \ +#define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \ +{ \ (cache_ptr)->flushes[(entry_ptr)->type->id]++; \ - if((entry_ptr)->is_pinned) \ + if((entry_ptr)->is_pinned) \ (cache_ptr)->pinned_flushes[(entry_ptr)->type->id]++; \ - (entry_ptr)->flushes++; \ + (entry_ptr)->flushes++; \ } #define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership) \ { \ - if (take_ownership) \ - (cache_ptr)->take_ownerships[(entry_ptr)->type->id]++; \ + if (take_ownership) \ + (cache_ptr)->take_ownerships[(entry_ptr)->type->id]++; \ else \ - (cache_ptr)->evictions[(entry_ptr)->type->id]++; \ + (cache_ptr)->evictions[(entry_ptr)->type->id]++; \ if ((entry_ptr)->accesses > (cache_ptr)->max_accesses[(entry_ptr)->type->id]) \ (cache_ptr)->max_accesses[(entry_ptr)->type->id] = (entry_ptr)->accesses; \ if ((entry_ptr)->accesses < (cache_ptr)->min_accesses[(entry_ptr)->type->id]) \ @@ -353,68 +348,68 @@ if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size)) \ (cache_ptr)->max_clears[(entry_ptr)->type->id] = (entry_ptr)->clears; \ if ((entry_ptr)->flushes > (cache_ptr)->max_flushes[(entry_ptr)->type->id]) \ (cache_ptr)->max_flushes[(entry_ptr)->type->id] = (entry_ptr)->flushes; \ - if ((entry_ptr)->size > (cache_ptr)->max_size[(entry_ptr)->type->id]) \ - (cache_ptr)->max_size[(entry_ptr)->type->id] = (entry_ptr)->size; \ - if ((entry_ptr)->pins > (cache_ptr)->max_pins[(entry_ptr)->type->id]) \ - (cache_ptr)->max_pins[(entry_ptr)->type->id] = (entry_ptr)->pins; \ + if ((entry_ptr)->size > (cache_ptr)->max_size[(entry_ptr)->type->id]) \ + (cache_ptr)->max_size[(entry_ptr)->type->id] = (entry_ptr)->size; \ + if ((entry_ptr)->pins > (cache_ptr)->max_pins[(entry_ptr)->type->id]) \ + (cache_ptr)->max_pins[(entry_ptr)->type->id] = (entry_ptr)->pins; \ } -#define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \ -{ \ - (cache_ptr)->insertions[(entry_ptr)->type->id]++; \ - if ((entry_ptr)->is_pinned) { \ - (cache_ptr)->pinned_insertions[(entry_ptr)->type->id]++; \ - (cache_ptr)->pins[(entry_ptr)->type->id]++; \ - (entry_ptr)->pins++; \ - if ((cache_ptr)->pel_len > (cache_ptr)->max_pel_len) \ - (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ - if ((cache_ptr)->pel_size > (cache_ptr)->max_pel_size) \ - (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \ - } \ - if ((cache_ptr)->index_len > (cache_ptr)->max_index_len) \ - (cache_ptr)->max_index_len = (cache_ptr)->index_len; \ - H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ - if ((cache_ptr)->slist_len > (cache_ptr)->max_slist_len) \ - (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \ - if ((cache_ptr)->slist_size > (cache_ptr)->max_slist_size) \ - (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \ +#define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \ +{ \ + (cache_ptr)->insertions[(entry_ptr)->type->id]++; \ + if ((entry_ptr)->is_pinned) { \ + (cache_ptr)->pinned_insertions[(entry_ptr)->type->id]++; \ + (cache_ptr)->pins[(entry_ptr)->type->id]++; \ + (entry_ptr)->pins++; \ + if ((cache_ptr)->pel_len > (cache_ptr)->max_pel_len) \ + (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ + if ((cache_ptr)->pel_size > (cache_ptr)->max_pel_size) \ + (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \ + } \ + if ((cache_ptr)->index_len > (cache_ptr)->max_index_len) \ + (cache_ptr)->max_index_len = (cache_ptr)->index_len; \ + H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ + if ((cache_ptr)->slist_len > (cache_ptr)->max_slist_len) \ + (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \ + if ((cache_ptr)->slist_size > (cache_ptr)->max_slist_size) \ + (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \ if ((entry_ptr)->size > (cache_ptr)->max_size[(entry_ptr)->type->id]) \ (cache_ptr)->max_size[(entry_ptr)->type->id] = (entry_ptr)->size; \ - (cache_ptr)->entries_inserted_counter++; \ + (cache_ptr)->entries_inserted_counter++; \ } -#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \ -{ \ +#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \ +{ \ if (hit) \ (cache_ptr)->hits[(entry_ptr)->type->id]++; \ - else \ + else \ (cache_ptr)->misses[(entry_ptr)->type->id]++; \ - if (!(entry_ptr)->is_read_only) \ + if (!(entry_ptr)->is_read_only) \ (cache_ptr)->write_protects[(entry_ptr)->type->id]++; \ else { \ (cache_ptr)->read_protects[(entry_ptr)->type->id]++; \ - if ((entry_ptr)->ro_ref_count > (cache_ptr)->max_read_protects[(entry_ptr)->type->id]) \ - (cache_ptr)->max_read_protects[(entry_ptr)->type->id] = (entry_ptr)->ro_ref_count; \ - } \ + if ((entry_ptr)->ro_ref_count > (cache_ptr)->max_read_protects[(entry_ptr)->type->id]) \ + (cache_ptr)->max_read_protects[(entry_ptr)->type->id] = (entry_ptr)->ro_ref_count; \ + } \ if ((cache_ptr)->index_len > (cache_ptr)->max_index_len) \ - (cache_ptr)->max_index_len = (cache_ptr)->index_len; \ - H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ + (cache_ptr)->max_index_len = (cache_ptr)->index_len; \ + H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ if ((cache_ptr)->pl_len > (cache_ptr)->max_pl_len) \ - (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \ + (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \ if ((cache_ptr)->pl_size > (cache_ptr)->max_pl_size) \ - (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \ - if ((entry_ptr)->size > (cache_ptr)->max_size[(entry_ptr)->type->id]) \ + (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \ + if ((entry_ptr)->size > (cache_ptr)->max_size[(entry_ptr)->type->id]) \ (cache_ptr)->max_size[(entry_ptr)->type->id] = (entry_ptr)->size; \ (entry_ptr)->accesses++; \ } #define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) \ { \ - (cache_ptr)->pins[(entry_ptr)->type->id]++; \ + (cache_ptr)->pins[(entry_ptr)->type->id]++; \ (entry_ptr)->pins++; \ - if ((cache_ptr)->pel_len > (cache_ptr)->max_pel_len) \ + if ((cache_ptr)->pel_len > (cache_ptr)->max_pel_len) \ (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ - if ((cache_ptr)->pel_size > (cache_ptr)->max_pel_size) \ + if ((cache_ptr)->pel_size > (cache_ptr)->max_pel_size) \ (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \ } @@ -422,80 +417,80 @@ if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size)) \ #define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr) -#define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \ -{ \ +#define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \ +{ \ (cache_ptr)->clears[(entry_ptr)->type->id]++; \ - if((entry_ptr)->is_pinned) \ + if((entry_ptr)->is_pinned) \ (cache_ptr)->pinned_clears[(entry_ptr)->type->id]++; \ } -#define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \ -{ \ +#define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \ +{ \ (cache_ptr)->flushes[(entry_ptr)->type->id]++; \ - if ((entry_ptr)->is_pinned) \ + if ((entry_ptr)->is_pinned) \ (cache_ptr)->pinned_flushes[(entry_ptr)->type->id]++; \ } #define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership) \ -{ \ - if (take_ownership) \ - (cache_ptr)->take_ownerships[(entry_ptr)->type->id]++; \ - else \ - (cache_ptr)->evictions[(entry_ptr)->type->id]++; \ +{ \ + if (take_ownership) \ + (cache_ptr)->take_ownerships[(entry_ptr)->type->id]++; \ + else \ + (cache_ptr)->evictions[(entry_ptr)->type->id]++; \ } -#define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \ -{ \ - (((cache_ptr)->insertions)[(entry_ptr)->type->id])++; \ - if ( (entry_ptr)->is_pinned ) { \ - (((cache_ptr)->pinned_insertions)[(entry_ptr)->type->id])++; \ - ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \ - if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \ - (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ - if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \ - (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \ - } \ - if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \ - (cache_ptr)->max_index_len = (cache_ptr)->index_len; \ - H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ - if ( (cache_ptr)->slist_len > (cache_ptr)->max_slist_len ) \ - (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \ - if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \ - (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \ - (cache_ptr)->entries_inserted_counter++; \ +#define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \ +{ \ + (cache_ptr)->insertions[(entry_ptr)->type->id]++; \ + if ((entry_ptr)->is_pinned) { \ + (cache_ptr)->pinned_insertions[(entry_ptr)->type->id]++; \ + (cache_ptr)->pins[(entry_ptr)->type->id]++; \ + if ((cache_ptr)->pel_len > (cache_ptr)->max_pel_len) \ + (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ + if ((cache_ptr)->pel_size > (cache_ptr)->max_pel_size) \ + (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \ + } \ + if ((cache_ptr)->index_len > (cache_ptr)->max_index_len) \ + (cache_ptr)->max_index_len = (cache_ptr)->index_len; \ + H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ + if ((cache_ptr)->slist_len > (cache_ptr)->max_slist_len) \ + (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \ + if ((cache_ptr)->slist_size > (cache_ptr)->max_slist_size) \ + (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \ + (cache_ptr)->entries_inserted_counter++; \ } -#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \ -{ \ - if ( hit ) \ - ((cache_ptr)->hits)[(entry_ptr)->type->id]++; \ - else \ - ((cache_ptr)->misses)[(entry_ptr)->type->id]++; \ - if ( ! ((entry_ptr)->is_read_only) ) \ - ((cache_ptr)->write_protects)[(entry_ptr)->type->id]++; \ - else { \ - ((cache_ptr)->read_protects)[(entry_ptr)->type->id]++; \ - if ( ((entry_ptr)->ro_ref_count) > \ - ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] ) \ - ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] = \ - ((entry_ptr)->ro_ref_count); \ - } \ - if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \ - (cache_ptr)->max_index_len = (cache_ptr)->index_len; \ - H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ - if ( (cache_ptr)->pl_len > (cache_ptr)->max_pl_len ) \ - (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \ - if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \ - (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \ +#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \ +{ \ + if (hit) \ + (cache_ptr)->hits[(entry_ptr)->type->id]++; \ + else \ + (cache_ptr)->misses[(entry_ptr)->type->id]++; \ + if (!(entry_ptr)->is_read_only) \ + (cache_ptr)->write_protects[(entry_ptr)->type->id]++; \ + else { \ + (cache_ptr)->read_protects[(entry_ptr)->type->id]++; \ + if ((entry_ptr)->ro_ref_count > \ + (cache_ptr)->max_read_protects[(entry_ptr)->type->id]) \ + (cache_ptr)->max_read_protects[(entry_ptr)->type->id] = \ + (entry_ptr)->ro_ref_count; \ + } \ + if ((cache_ptr)->index_len > (cache_ptr)->max_index_len) \ + (cache_ptr)->max_index_len = (cache_ptr)->index_len; \ + H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ + if ((cache_ptr)->pl_len > (cache_ptr)->max_pl_len) \ + (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \ + if ((cache_ptr)->pl_size > (cache_ptr)->max_pl_size) \ + (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \ } -#define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) \ -{ \ - ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \ - if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \ - (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ - if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \ - (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \ +#define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) \ +{ \ + (cache_ptr)->pins[(entry_ptr)->type->id]++; \ + if ((cache_ptr)->pel_len > (cache_ptr)->max_pel_len) \ + (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ + if ((cache_ptr)->pel_size > (cache_ptr)->max_pel_size) \ + (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \ } #endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ @@ -531,318 +526,267 @@ if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size)) \ * The following macros handle searches, insertions, and deletion in * the hash table. * - * When modifying these macros, remember to modify the similar macros - * in tst/cache.c - * ***********************************************************************/ -/* H5C__HASH_TABLE_LEN is defined in H5Cpkg.h. It mut be a power of two. */ - -#define H5C__HASH_MASK ((size_t)(H5C__HASH_TABLE_LEN - 1) << 3) - +#define H5C__HASH_MASK ((size_t)(H5C__HASH_TABLE_LEN - 1) << 3) #define H5C__HASH_FCN(x) (int)((unsigned)((x) & H5C__HASH_MASK) >> 3) +#define H5C__POST_HT_SHIFT_TO_FRONT_SC_CMP(cache_ptr, entry_ptr, k) \ +((cache_ptr) == NULL || (cache_ptr)->index[k] != (entry_ptr) || \ + (entry_ptr)->ht_prev != NULL \ +) +#define H5C__PRE_HT_SEARCH_SC_CMP(cache_ptr, entry_addr) \ +((cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \ + (cache_ptr)->index_size != \ + ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) || \ + !H5F_addr_defined(entry_addr) || \ + H5C__HASH_FCN(entry_addr) < 0 || \ + H5C__HASH_FCN(entry_addr) >= H5C__HASH_TABLE_LEN \ +) +#define H5C__POST_SUC_HT_SEARCH_SC_CMP(cache_ptr, entry_ptr, k) \ +((cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \ + (cache_ptr)->index_len < 1 || \ + (entry_ptr) == NULL || \ + (cache_ptr)->index_size < (entry_ptr)->size || \ + (cache_ptr)->index_size != \ + ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) || \ + (entry_ptr)->size <= 0 || \ + (cache_ptr)->index[k] == NULL || \ + ((cache_ptr)->index[k] != (entry_ptr) && (entry_ptr)->ht_prev == NULL) || \ + ((cache_ptr)->index[k] == (entry_ptr) && (entry_ptr)->ht_prev != NULL) || \ + ((entry_ptr)->ht_prev != NULL && (entry_ptr)->ht_prev->ht_next != (entry_ptr)) || \ + ((entry_ptr)->ht_next != NULL && (entry_ptr)->ht_next->ht_prev != (entry_ptr)) \ +) + #ifdef H5C_DO_SANITY_CHECKS #define H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \ -if ( ( (cache_ptr) == NULL ) || \ - ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ - ( (entry_ptr) == NULL ) || \ - ( ! H5F_addr_defined((entry_ptr)->addr) ) || \ - ( (entry_ptr)->ht_next != NULL ) || \ - ( (entry_ptr)->ht_prev != NULL ) || \ - ( (entry_ptr)->size <= 0 ) || \ - ( H5C__HASH_FCN((entry_ptr)->addr) < 0 ) || \ - ( H5C__HASH_FCN((entry_ptr)->addr) >= H5C__HASH_TABLE_LEN ) || \ - ( (cache_ptr)->index_size != \ - ((cache_ptr)->clean_index_size + \ - (cache_ptr)->dirty_index_size) ) || \ - ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \ - ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \ - ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \ - ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \ - ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \ - (cache_ptr)->index_len ) || \ - ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \ - (cache_ptr)->index_size ) || \ - ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ +if ((cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \ + (entry_ptr) == NULL || !H5F_addr_defined((entry_ptr)->addr) || \ + (entry_ptr)->ht_next != NULL || (entry_ptr)->ht_prev != NULL || \ + (entry_ptr)->size <= 0 || \ + H5C__HASH_FCN((entry_ptr)->addr) < 0 || \ + H5C__HASH_FCN((entry_ptr)->addr) >= H5C__HASH_TABLE_LEN || \ + (cache_ptr)->index_size != \ + ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) || \ + (cache_ptr)->index_size < (cache_ptr)->clean_index_size || \ + (cache_ptr)->index_size < (cache_ptr)->dirty_index_size || \ + (entry_ptr)->ring <= H5C_RING_UNDEFINED || \ + (entry_ptr)->ring >= H5C_RING_NTYPES || \ + (cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len || \ + (cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size || \ + (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ - (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \ - ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \ - ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \ + (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) || \ + (cache_ptr)->index_len != (cache_ptr)->il_len || \ + (cache_ptr)->index_size != (cache_ptr)->il_size \ + ) { \ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "pre HT insert SC failed") \ } #define H5C__POST_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \ -if ( ( (cache_ptr) == NULL ) || \ - ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ - ( (cache_ptr)->index_size != \ - ((cache_ptr)->clean_index_size + \ - (cache_ptr)->dirty_index_size) ) || \ - ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \ - ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \ - ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] == 0 ) || \ - ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \ - (cache_ptr)->index_len ) || \ - ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \ - (cache_ptr)->index_size ) || \ - ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ +if ((cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \ + (cache_ptr)->index_size != \ + ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) || \ + (cache_ptr)->index_size < (cache_ptr)->clean_index_size || \ + (cache_ptr)->index_size < (cache_ptr)->dirty_index_size || \ + (cache_ptr)->index_ring_len[(entry_ptr)->ring] == 0 || \ + (cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len || \ + (cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size || \ + (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ - (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \ - ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \ - ( (cache_ptr)->index_size != (cache_ptr)->il_size) ) { \ + (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) || \ + (cache_ptr)->index_len != (cache_ptr)->il_len || \ + (cache_ptr)->index_size != (cache_ptr)->il_size \ + ) { \ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "post HT insert SC failed") \ } -#define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) \ -if ( ( (cache_ptr) == NULL ) || \ - ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ - ( (cache_ptr)->index_len < 1 ) || \ - ( (entry_ptr) == NULL ) || \ - ( (cache_ptr)->index_size < (entry_ptr)->size ) || \ - ( ! H5F_addr_defined((entry_ptr)->addr) ) || \ - ( (entry_ptr)->size <= 0 ) || \ - ( H5C__HASH_FCN((entry_ptr)->addr) < 0 ) || \ - ( H5C__HASH_FCN((entry_ptr)->addr) >= H5C__HASH_TABLE_LEN ) || \ - ( ((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] \ - == NULL ) || \ - ( ( ((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] \ - != (entry_ptr) ) && \ - ( (entry_ptr)->ht_prev == NULL ) ) || \ - ( ( ((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] == \ - (entry_ptr) ) && \ - ( (entry_ptr)->ht_prev != NULL ) ) || \ - ( (cache_ptr)->index_size != \ - ((cache_ptr)->clean_index_size + \ - (cache_ptr)->dirty_index_size) ) || \ - ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \ - ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \ - ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \ - ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \ - ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 ) || \ - ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \ - (cache_ptr)->index_len ) || \ - ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] < \ - (entry_ptr)->size ) || \ - ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \ - (cache_ptr)->index_size ) || \ - ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ +#define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) \ +if ( (cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \ + (cache_ptr)->index_len < 1 || \ + (entry_ptr) == NULL || \ + (cache_ptr)->index_size < (entry_ptr)->size || \ + !H5F_addr_defined((entry_ptr)->addr) || \ + (entry_ptr)->size <= 0 || \ + H5C__HASH_FCN((entry_ptr)->addr) < 0 || \ + H5C__HASH_FCN((entry_ptr)->addr) >= H5C__HASH_TABLE_LEN || \ + (cache_ptr)->index[H5C__HASH_FCN((entry_ptr)->addr)] == NULL || \ + ((cache_ptr)->index[H5C__HASH_FCN((entry_ptr)->addr)] != (entry_ptr) && \ + (entry_ptr)->ht_prev == NULL) || \ + ((cache_ptr)->index[H5C__HASH_FCN((entry_ptr)->addr)] == (entry_ptr) && \ + (entry_ptr)->ht_prev != NULL) || \ + (cache_ptr)->index_size != \ + ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) || \ + (cache_ptr)->index_size < (cache_ptr)->clean_index_size || \ + (cache_ptr)->index_size < (cache_ptr)->dirty_index_size || \ + (entry_ptr)->ring <= H5C_RING_UNDEFINED || \ + (entry_ptr)->ring >= H5C_RING_NTYPES || \ + (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 || \ + (cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len || \ + (cache_ptr)->index_ring_size[(entry_ptr)->ring] < (entry_ptr)->size || \ + (cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size || \ + (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ - (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \ - ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \ - ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \ + (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) || \ + (cache_ptr)->index_len != (cache_ptr)->il_len || \ + (cache_ptr)->index_size != (cache_ptr)->il_size \ + ) { \ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "pre HT remove SC failed") \ } -#define H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) \ -if ( ( (cache_ptr) == NULL ) || \ - ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ - ( (entry_ptr) == NULL ) || \ - ( ! H5F_addr_defined((entry_ptr)->addr) ) || \ - ( (entry_ptr)->size <= 0 ) || \ - ( (entry_ptr)->ht_prev != NULL ) || \ - ( (entry_ptr)->ht_prev != NULL ) || \ - ( (cache_ptr)->index_size != \ - ((cache_ptr)->clean_index_size + \ - (cache_ptr)->dirty_index_size) ) || \ - ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \ - ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \ - ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \ - (cache_ptr)->index_len ) || \ - ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \ - (cache_ptr)->index_size ) || \ - ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ - ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ - (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \ - ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \ - ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \ +#define H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) \ +if ((cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \ + (entry_ptr) == NULL || !H5F_addr_defined((entry_ptr)->addr) || \ + (entry_ptr)->size <= 0 || \ + (entry_ptr)->ht_next != NULL || \ + (entry_ptr)->ht_prev != NULL || \ + (cache_ptr)->index_size != \ + ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) || \ + (cache_ptr)->index_size < (cache_ptr)->clean_index_size || \ + (cache_ptr)->index_size < (cache_ptr)->dirty_index_size || \ + (cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len || \ + (cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size || \ + (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ + ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ + (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) || \ + (cache_ptr)->index_len != (cache_ptr)->il_len || \ + (cache_ptr)->index_size != (cache_ptr)->il_size \ + ) { \ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "post HT remove SC failed") \ } -/* (Keep in sync w/H5C_TEST__PRE_HT_SEARCH_SC macro in test/cache_common.h -QAK) */ -#define H5C__PRE_HT_SEARCH_SC(cache_ptr, entry_addr, fail_val) \ -if ( ( (cache_ptr) == NULL ) || \ - ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ - ( (cache_ptr)->index_size != \ - ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \ - ( ! H5F_addr_defined(entry_addr) ) || \ - ( H5C__HASH_FCN(entry_addr) < 0 ) || \ - ( H5C__HASH_FCN(entry_addr) >= H5C__HASH_TABLE_LEN ) ) { \ +#define H5C__PRE_HT_SEARCH_SC(cache_ptr, entry_addr, fail_val) \ +if (H5C__PRE_HT_SEARCH_SC_CMP(cache_ptr, entry_addr)) { \ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "pre HT search SC failed") \ } -/* (Keep in sync w/H5C_TEST__POST_SUC_HT_SEARCH_SC macro in test/cache_common.h -QAK) */ #define H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) \ -if ( ( (cache_ptr) == NULL ) || \ - ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ - ( (cache_ptr)->index_len < 1 ) || \ - ( (entry_ptr) == NULL ) || \ - ( (cache_ptr)->index_size < (entry_ptr)->size ) || \ - ( (cache_ptr)->index_size != \ - ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \ - ( (entry_ptr)->size <= 0 ) || \ - ( ((cache_ptr)->index)[k] == NULL ) || \ - ( ( ((cache_ptr)->index)[k] != (entry_ptr) ) && \ - ( (entry_ptr)->ht_prev == NULL ) ) || \ - ( ( ((cache_ptr)->index)[k] == (entry_ptr) ) && \ - ( (entry_ptr)->ht_prev != NULL ) ) || \ - ( ( (entry_ptr)->ht_prev != NULL ) && \ - ( (entry_ptr)->ht_prev->ht_next != (entry_ptr) ) ) || \ - ( ( (entry_ptr)->ht_next != NULL ) && \ - ( (entry_ptr)->ht_next->ht_prev != (entry_ptr) ) ) ) { \ +if(H5C__POST_SUC_HT_SEARCH_SC_CMP(cache_ptr, entry_ptr, k)) { \ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "post successful HT search SC failed") \ } -/* (Keep in sync w/H5C_TEST__POST_HT_SHIFT_TO_FRONT macro in test/cache_common.h -QAK) */ -#define H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \ -if ( ( (cache_ptr) == NULL ) || \ - ( ((cache_ptr)->index)[k] != (entry_ptr) ) || \ - ( (entry_ptr)->ht_prev != NULL ) ) { \ +#define H5C__POST_HT_SHIFT_TO_FRONT_SC(cache_ptr, entry_ptr, k, fail_val) \ +if(H5C__POST_HT_SHIFT_TO_FRONT_SC_CMP(cache_ptr, entry_ptr, k)) { \ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "post HT shift to front SC failed") \ } -#define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ - entry_ptr, was_clean, fail_val) \ -if ( ( (cache_ptr) == NULL ) || \ - ( (cache_ptr)->index_len <= 0 ) || \ - ( (cache_ptr)->index_size <= 0 ) || \ - ( (new_size) <= 0 ) || \ - ( (old_size) > (cache_ptr)->index_size ) || \ - ( ( (cache_ptr)->index_len == 1 ) && \ - ( (cache_ptr)->index_size != (old_size) ) ) || \ - ( (cache_ptr)->index_size != \ - ((cache_ptr)->clean_index_size + \ - (cache_ptr)->dirty_index_size) ) || \ - ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \ - ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \ - ( ( !( was_clean ) || \ - ( (cache_ptr)->clean_index_size < (old_size) ) ) && \ - ( ( (was_clean) ) || \ - ( (cache_ptr)->dirty_index_size < (old_size) ) ) ) || \ - ( (entry_ptr) == NULL ) || \ - ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \ - ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \ - ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 ) || \ - ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \ - (cache_ptr)->index_len ) || \ - ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \ - (cache_ptr)->index_size ) || \ - ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ - ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ - (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \ - ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \ - ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \ +#define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ + entry_ptr, was_clean, fail_val) \ +if ((cache_ptr) == NULL || \ + (cache_ptr)->index_len <= 0 || (cache_ptr)->index_size <= 0 || \ + (new_size) <= 0 || (old_size) > (cache_ptr)->index_size || \ + ((cache_ptr)->index_len == 1 && (cache_ptr)->index_size != (old_size)) || \ + (cache_ptr)->index_size != \ + ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) || \ + (cache_ptr)->index_size < (cache_ptr)->clean_index_size || \ + (cache_ptr)->index_size < (cache_ptr)->dirty_index_size || \ + ((!(was_clean) || (cache_ptr)->clean_index_size < (old_size)) && \ + ((was_clean) || (cache_ptr)->dirty_index_size < (old_size))) || \ + (entry_ptr) == NULL || \ + (entry_ptr)->ring <= H5C_RING_UNDEFINED || \ + (entry_ptr)->ring >= H5C_RING_NTYPES || \ + (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 || \ + (cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len || \ + (cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size || \ + (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ + ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ + (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) || \ + (cache_ptr)->index_len != (cache_ptr)->il_len || \ + (cache_ptr)->index_size != (cache_ptr)->il_size \ + ) { \ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "pre HT entry size change SC failed") \ } -#define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ - entry_ptr, fail_val) \ -if ( ( (cache_ptr) == NULL ) || \ - ( (cache_ptr)->index_len <= 0 ) || \ - ( (cache_ptr)->index_size <= 0 ) || \ - ( (new_size) > (cache_ptr)->index_size ) || \ - ( (cache_ptr)->index_size != \ - ((cache_ptr)->clean_index_size + \ - (cache_ptr)->dirty_index_size) ) || \ - ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \ - ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \ - ( ( !((entry_ptr)->is_dirty ) || \ - ( (cache_ptr)->dirty_index_size < (new_size) ) ) && \ - ( ( ((entry_ptr)->is_dirty) ) || \ - ( (cache_ptr)->clean_index_size < (new_size) ) ) ) || \ - ( ( (cache_ptr)->index_len == 1 ) && \ - ( (cache_ptr)->index_size != (new_size) ) ) || \ - ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \ - (cache_ptr)->index_len ) || \ - ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \ - (cache_ptr)->index_size ) || \ - ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ - ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ - (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \ - ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \ - ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \ +#define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ + entry_ptr, fail_val) \ +if ((cache_ptr) == NULL || \ + (cache_ptr)->index_len <= 0 || (cache_ptr)->index_size <= 0 || \ + (new_size) > (cache_ptr)->index_size || \ + (cache_ptr)->index_size != \ + ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) || \ + (cache_ptr)->index_size < (cache_ptr)->clean_index_size || \ + (cache_ptr)->index_size < (cache_ptr)->dirty_index_size || \ + ((!((entry_ptr)->is_dirty ) || (cache_ptr)->dirty_index_size < (new_size)) && \ + ((entry_ptr)->is_dirty || (cache_ptr)->clean_index_size < (new_size)) \ + ) || \ + ((cache_ptr)->index_len == 1 && (cache_ptr)->index_size != (new_size)) || \ + (cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len || \ + (cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size || \ + (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ + ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ + (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) || \ + (cache_ptr)->index_len != (cache_ptr)->il_len || \ + (cache_ptr)->index_size != (cache_ptr)->il_size \ + ) { \ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "post HT entry size change SC failed") \ } -#define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr, fail_val) \ -if ( \ - ( (cache_ptr) == NULL ) || \ - ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ - ( (cache_ptr)->index_len <= 0 ) || \ - ( (entry_ptr) == NULL ) || \ - ( (entry_ptr)->is_dirty != FALSE ) || \ - ( (cache_ptr)->index_size < (entry_ptr)->size ) || \ - ( (cache_ptr)->dirty_index_size < (entry_ptr)->size ) || \ - ( (cache_ptr)->index_size != \ - ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \ - ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \ - ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \ - ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \ - ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \ - ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 ) || \ - ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \ - (cache_ptr)->index_len ) || \ - ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \ - (cache_ptr)->index_size ) || \ - ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ +#define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr, fail_val) \ +if ((cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \ + (cache_ptr)->index_len <= 0 || \ + (entry_ptr) == NULL || (entry_ptr)->is_dirty != FALSE || \ + (cache_ptr)->index_size < (entry_ptr)->size || \ + (cache_ptr)->dirty_index_size < (entry_ptr)->size || \ + (cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) || \ + (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) || \ + (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) || \ + (entry_ptr)->ring <= H5C_RING_UNDEFINED || \ + (entry_ptr)->ring >= H5C_RING_NTYPES || \ + (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 || \ + (cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len || \ + (cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size || \ + (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ - (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \ + (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) \ + ) { \ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "pre HT update for entry clean SC failed") \ } -#define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr, fail_val) \ -if ( \ - ( (cache_ptr) == NULL ) || \ - ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ - ( (cache_ptr)->index_len <= 0 ) || \ - ( (entry_ptr) == NULL ) || \ - ( (entry_ptr)->is_dirty != TRUE ) || \ - ( (cache_ptr)->index_size < (entry_ptr)->size ) || \ - ( (cache_ptr)->clean_index_size < (entry_ptr)->size ) || \ - ( (cache_ptr)->index_size != \ - ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \ - ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \ - ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \ - ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \ - ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \ - ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 ) || \ - ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \ - (cache_ptr)->index_len ) || \ - ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \ - (cache_ptr)->index_size ) || \ - ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ +#define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr, fail_val) \ +if ((cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \ + (cache_ptr)->index_len <= 0 || \ + (entry_ptr) == NULL || (entry_ptr)->is_dirty != TRUE || \ + (cache_ptr)->index_size < (entry_ptr)->size || \ + (cache_ptr)->clean_index_size < (entry_ptr)->size || \ + (cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) || \ + (cache_ptr)->index_size < (cache_ptr)->clean_index_size || \ + (cache_ptr)->index_size < (cache_ptr)->dirty_index_size || \ + (entry_ptr)->ring <= H5C_RING_UNDEFINED || \ + (entry_ptr)->ring >= H5C_RING_NTYPES || \ + (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 || \ + (cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len || \ + (cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size || \ + (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ - (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \ + (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) \ + ) { \ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "pre HT update for entry dirty SC failed") \ } -#define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr, fail_val) \ -if ( ( (cache_ptr)->index_size != \ - ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \ - ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \ - ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \ - ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \ - (cache_ptr)->index_len ) || \ - ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \ - (cache_ptr)->index_size ) || \ - ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ - ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ - (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \ +#define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr, fail_val) \ +if ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) || \ + (cache_ptr)->index_size < (cache_ptr)->clean_index_size || \ + (cache_ptr)->index_size < (cache_ptr)->dirty_index_size || \ + (cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len || \ + (cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size || \ + (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ + ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ + (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) \ + ) { \ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "post HT update for entry clean SC failed") \ } -#define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr, fail_val) \ -if ( ( (cache_ptr)->index_size != \ - ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \ - ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \ - ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \ - ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \ - (cache_ptr)->index_len ) || \ - ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \ - (cache_ptr)->index_size ) || \ - ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ - ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ - (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \ +#define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr, fail_val) \ +if ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) || \ + (cache_ptr)->index_size < (cache_ptr)->clean_index_size || \ + (cache_ptr)->index_size < (cache_ptr)->dirty_index_size || \ + (cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len || \ + (cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size || \ + (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ + ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ + (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) \ + ) { \ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "post HT update for entry dirty SC failed") \ } @@ -854,13 +798,11 @@ if ( ( (cache_ptr)->index_size != \ #define H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) #define H5C__PRE_HT_SEARCH_SC(cache_ptr, entry_addr, fail_val) #define H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) -#define H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) +#define H5C__POST_HT_SHIFT_TO_FRONT_SC(cache_ptr, entry_ptr, k, fail_val) #define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr, fail_val) #define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr, fail_val) -#define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ - entry_ptr, was_clean, fail_val) -#define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ - entry_ptr, fail_val) +#define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, entry_ptr, was_clean, fail_val) +#define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, entry_ptr, fail_val) #define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr, fail_val) #define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr, fail_val) @@ -872,24 +814,21 @@ if ( ( (cache_ptr)->index_size != \ int k; \ H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \ k = H5C__HASH_FCN((entry_ptr)->addr); \ - if(((cache_ptr)->index)[k] != NULL) { \ - (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \ + if((cache_ptr)->index[k] != NULL) { \ + (entry_ptr)->ht_next = (cache_ptr)->index[k]; \ (entry_ptr)->ht_next->ht_prev = (entry_ptr); \ } \ - ((cache_ptr)->index)[k] = (entry_ptr); \ + (cache_ptr)->index[k] = (entry_ptr); \ (cache_ptr)->index_len++; \ (cache_ptr)->index_size += (entry_ptr)->size; \ - ((cache_ptr)->index_ring_len[(entry_ptr)->ring])++; \ - ((cache_ptr)->index_ring_size[(entry_ptr)->ring]) \ - += (entry_ptr)->size; \ + (cache_ptr)->index_ring_len[(entry_ptr)->ring]++; \ + (cache_ptr)->index_ring_size[(entry_ptr)->ring] += (entry_ptr)->size; \ if((entry_ptr)->is_dirty) { \ (cache_ptr)->dirty_index_size += (entry_ptr)->size; \ - ((cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) \ - += (entry_ptr)->size; \ + (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring] += (entry_ptr)->size; \ } else { \ (cache_ptr)->clean_index_size += (entry_ptr)->size; \ - ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring]) \ - += (entry_ptr)->size; \ + (cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] += (entry_ptr)->size; \ } \ if((entry_ptr)->flush_me_last) { \ (cache_ptr)->num_last_entries++; \ @@ -905,29 +844,26 @@ if ( ( (cache_ptr)->index_size != \ #define H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, fail_val) \ { \ int k; \ - H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) \ + H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) \ k = H5C__HASH_FCN((entry_ptr)->addr); \ if((entry_ptr)->ht_next) \ (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \ if((entry_ptr)->ht_prev) \ (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \ - if(((cache_ptr)->index)[k] == (entry_ptr)) \ - ((cache_ptr)->index)[k] = (entry_ptr)->ht_next; \ + if((cache_ptr)->index[k] == (entry_ptr)) \ + (cache_ptr)->index[k] = (entry_ptr)->ht_next; \ (entry_ptr)->ht_next = NULL; \ (entry_ptr)->ht_prev = NULL; \ (cache_ptr)->index_len--; \ (cache_ptr)->index_size -= (entry_ptr)->size; \ - ((cache_ptr)->index_ring_len[(entry_ptr)->ring])--; \ - ((cache_ptr)->index_ring_size[(entry_ptr)->ring]) \ - -= (entry_ptr)->size; \ + (cache_ptr)->index_ring_len[(entry_ptr)->ring]--; \ + (cache_ptr)->index_ring_size[(entry_ptr)->ring] -= (entry_ptr)->size; \ if((entry_ptr)->is_dirty) { \ (cache_ptr)->dirty_index_size -= (entry_ptr)->size; \ - ((cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) \ - -= (entry_ptr)->size; \ + (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring] -= (entry_ptr)->size; \ } else { \ (cache_ptr)->clean_index_size -= (entry_ptr)->size; \ - ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring]) \ - -= (entry_ptr)->size; \ + (cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] -= (entry_ptr)->size; \ } \ if((entry_ptr)->flush_me_last) { \ (cache_ptr)->num_last_entries--; \ @@ -937,209 +873,170 @@ if ( ( (cache_ptr)->index_size != \ (cache_ptr)->il_tail, (cache_ptr)->il_len, \ (cache_ptr)->il_size, fail_val) \ H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) \ - H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) \ + H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) \ } -#define H5C__SEARCH_INDEX(cache_ptr, entry_addr, entry_ptr, fail_val) \ -{ \ - int k; \ - int depth = 0; \ - H5C__PRE_HT_SEARCH_SC(cache_ptr, entry_addr, fail_val) \ - k = H5C__HASH_FCN(entry_addr); \ - entry_ptr = ((cache_ptr)->index)[k]; \ - while(entry_ptr) { \ - if(H5F_addr_eq(entry_addr, (entry_ptr)->addr)) { \ - H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) \ - if(entry_ptr != ((cache_ptr)->index)[k]) { \ - if((entry_ptr)->ht_next) \ - (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \ - HDassert((entry_ptr)->ht_prev != NULL); \ - (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \ - ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \ - (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \ - (entry_ptr)->ht_prev = NULL; \ - ((cache_ptr)->index)[k] = (entry_ptr); \ - H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \ - } \ - break; \ - } \ - (entry_ptr) = (entry_ptr)->ht_next; \ - (depth)++; \ - } \ - H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, (entry_ptr != NULL), depth) \ +#define H5C__SEARCH_INDEX(cache_ptr, entry_addr, entry_ptr, fail_val) \ +{ \ + int k; \ + int depth = 0; \ + H5C__PRE_HT_SEARCH_SC(cache_ptr, entry_addr, fail_val) \ + k = H5C__HASH_FCN(entry_addr); \ + (entry_ptr) = (cache_ptr)->index[k]; \ + while(entry_ptr) { \ + if(H5F_addr_eq(entry_addr, (entry_ptr)->addr)) { \ + H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) \ + if((entry_ptr) != (cache_ptr)->index[k]) { \ + if((entry_ptr)->ht_next) \ + (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \ + HDassert((entry_ptr)->ht_prev != NULL); \ + (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \ + (cache_ptr)->index[k]->ht_prev = (entry_ptr); \ + (entry_ptr)->ht_next = (cache_ptr)->index[k]; \ + (entry_ptr)->ht_prev = NULL; \ + (cache_ptr)->index[k] = (entry_ptr); \ + H5C__POST_HT_SHIFT_TO_FRONT_SC(cache_ptr, entry_ptr, k, fail_val) \ + } \ + break; \ + } \ + (entry_ptr) = (entry_ptr)->ht_next; \ + (depth)++; \ + } \ + H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, ((entry_ptr) != NULL), depth) \ } -#define H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr, fail_val) \ -{ \ +#define H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr, fail_val) \ +{ \ H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr, fail_val) \ - (cache_ptr)->dirty_index_size -= (entry_ptr)->size; \ - ((cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) \ - -= (entry_ptr)->size; \ - (cache_ptr)->clean_index_size += (entry_ptr)->size; \ - ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring]) \ - += (entry_ptr)->size; \ + (cache_ptr)->dirty_index_size -= (entry_ptr)->size; \ + (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring] -= (entry_ptr)->size; \ + (cache_ptr)->clean_index_size += (entry_ptr)->size; \ + (cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] += (entry_ptr)->size; \ H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr, fail_val) \ } -#define H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr, fail_val) \ -{ \ +#define H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr, fail_val) \ +{ \ H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr, fail_val) \ - (cache_ptr)->clean_index_size -= (entry_ptr)->size; \ - ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring]) \ - -= (entry_ptr)->size; \ - (cache_ptr)->dirty_index_size += (entry_ptr)->size; \ - ((cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) \ - += (entry_ptr)->size; \ + (cache_ptr)->clean_index_size -= (entry_ptr)->size; \ + (cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] -= (entry_ptr)->size; \ + (cache_ptr)->dirty_index_size += (entry_ptr)->size; \ + (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring] += (entry_ptr)->size; \ H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr, fail_val) \ } #define H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size, \ - entry_ptr, was_clean, fail_val) \ + entry_ptr, was_clean, fail_val) \ { \ H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ - entry_ptr, was_clean, fail_val) \ + entry_ptr, was_clean, fail_val) \ (cache_ptr)->index_size -= (old_size); \ (cache_ptr)->index_size += (new_size); \ - ((cache_ptr)->index_ring_size[(entry_ptr)->ring]) -= (old_size); \ - ((cache_ptr)->index_ring_size[(entry_ptr)->ring]) += (new_size); \ + (cache_ptr)->index_ring_size[(entry_ptr)->ring] -= (old_size); \ + (cache_ptr)->index_ring_size[(entry_ptr)->ring] += (new_size); \ if(was_clean) { \ (cache_ptr)->clean_index_size -= (old_size); \ - ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring])-= (old_size); \ + (cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] -= (old_size); \ } else { \ - (cache_ptr)->dirty_index_size -= (old_size); \ - ((cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring])-= (old_size); \ + (cache_ptr)->dirty_index_size -= (old_size); \ + (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring] -= (old_size); \ } \ if((entry_ptr)->is_dirty) { \ (cache_ptr)->dirty_index_size += (new_size); \ - ((cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring])+= (new_size); \ + (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring] += (new_size); \ } else { \ - (cache_ptr)->clean_index_size += (new_size); \ - ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring])+= (new_size); \ + (cache_ptr)->clean_index_size += (new_size); \ + (cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] += (new_size); \ } \ H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->il_len, \ (cache_ptr)->il_size, \ - (old_size), (new_size), (fail_val)) \ + (old_size), (new_size), (fail_val)) \ H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ - entry_ptr, fail_val) \ + entry_ptr, fail_val) \ } /************************************************************************** * - * Skip list insertion and deletion macros: + * Skip list modification macros * **************************************************************************/ -/*------------------------------------------------------------------------- - * - * Macro: H5C__INSERT_ENTRY_IN_SLIST - * - * Purpose: Insert the specified instance of H5C_cache_entry_t into - * the skip list in the specified instance of H5C_t. Update - * the associated length and size fields. - * - * Return: N/A - * - * Programmer: John Mainzer, 5/10/04 - * - *------------------------------------------------------------------------- - */ - -/* NOTE: The H5C__INSERT_ENTRY_IN_SLIST() macro is set up so that - * - * H5C_DO_SANITY_CHECKS - * - * and - * - * H5C_DO_SLIST_SANITY_CHECKS - * - * can be selected independently. This is easy to miss as the - * two #defines are easy to confuse. - */ - #ifdef H5C_DO_SLIST_SANITY_CHECKS -#define H5C_ENTRY_IN_SLIST(cache_ptr, entry_ptr) \ +#define H5C__ENTRY_IN_SLIST(cache_ptr, entry_ptr) \ H5C__entry_in_skip_list((cache_ptr), (entry_ptr)) #else /* H5C_DO_SLIST_SANITY_CHECKS */ -#define H5C_ENTRY_IN_SLIST(cache_ptr, entry_ptr) FALSE +#define H5C__ENTRY_IN_SLIST(cache_ptr, entry_ptr) FALSE #endif /* H5C_DO_SLIST_SANITY_CHECKS */ #ifdef H5C_DO_SANITY_CHECKS -#define H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - \ - if ( (cache_ptr)->slist_enabled ) { \ - \ - HDassert( (entry_ptr) ); \ - HDassert( (entry_ptr)->size > 0 ); \ - HDassert( H5F_addr_defined((entry_ptr)->addr) ); \ - HDassert( !((entry_ptr)->in_slist) ); \ - HDassert( ! H5C_ENTRY_IN_SLIST((cache_ptr), (entry_ptr)) ); \ - HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \ - HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \ - HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \ - (cache_ptr)->slist_len ); \ - HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \ - (cache_ptr)->slist_size ); \ - \ - if ( H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, \ - &((entry_ptr)->addr)) < 0) \ - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \ - "can't insert entry in skip list") \ - \ - (entry_ptr)->in_slist = TRUE; \ - (cache_ptr)->slist_changed = TRUE; \ - (cache_ptr)->slist_len++; \ - (cache_ptr)->slist_size += (entry_ptr)->size; \ - ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])++; \ - ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (entry_ptr)->size;\ - (cache_ptr)->slist_len_increase++; \ - (cache_ptr)->slist_size_increase += (int64_t)((entry_ptr)->size); \ - \ - HDassert( (cache_ptr)->slist_len > 0 ); \ - HDassert( (cache_ptr)->slist_size > 0 ); \ - \ - } else { /* slist disabled */ \ - \ - HDassert( (cache_ptr)->slist_len == 0 ); \ - HDassert( (cache_ptr)->slist_size == 0 ); \ - } \ -} /* H5C__INSERT_ENTRY_IN_SLIST */ +#define H5C__SLIST_INSERT_ENTRY_SC(cache_ptr, entry_ptr) \ +{ \ + (cache_ptr)->slist_len_increase++; \ + (cache_ptr)->slist_size_increase += (int64_t)((entry_ptr)->size); \ +} /* H5C__SLIST_INSERT_ENTRY_SC() */ +#define H5C__SLIST_REMOVE_ENTRY_SC(cache_ptr, entry_ptr) \ +{ \ + (cache_ptr)->slist_len_increase--; \ + (cache_ptr)->slist_size_increase -= (int64_t)((entry_ptr)->size); \ +} /* H5C__SLIST_REMOVE_ENTRY_SC() */ +#define H5C__SLIST_UPDATE_FOR_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size) \ +{ \ + (cache_ptr)->slist_size_increase -= (int64_t)(old_size); \ + (cache_ptr)->slist_size_increase += (int64_t)(new_size); \ +} /* H5C__SLIST_UPDATE_FOR_ENTRY_SIZE_CHANGE_SC() */ #else /* H5C_DO_SANITY_CHECKS */ +#define H5C__SLIST_INSERT_ENTRY_SC(cache_ptr, entry_ptr) +#define H5C__SLIST_REMOVE_ENTRY_SC(cache_ptr, entry_ptr) +#define H5C__SLIST_UPDATE_FOR_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size) + +#endif /* H5C_DO_SANITY_CHECKS */ + + +/*------------------------------------------------------------------------- + * + * Macro: H5C__INSERT_ENTRY_IN_SLIST + * + * Purpose: Insert a cache entry into a cache's skip list. Updates + * the associated length and size fields. + * + * Note: This macro is set up so that H5C_DO_SANITY_CHECKS and + * H5C_DO_SLIST_SANITY_CHECKS can be selected independently. + * + * Programmer: John Mainzer, 5/10/04 + * + *------------------------------------------------------------------------- + */ + #define H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, fail_val) \ { \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - \ - if ( (cache_ptr)->slist_enabled ) { \ + HDassert(cache_ptr); \ + HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ \ - HDassert( (entry_ptr) ); \ - HDassert( (entry_ptr)->size > 0 ); \ - HDassert( ! H5C_ENTRY_IN_SLIST((cache_ptr), (entry_ptr)) ); \ - HDassert( H5F_addr_defined((entry_ptr)->addr) ); \ - HDassert( !((entry_ptr)->in_slist) ); \ - HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \ - HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \ - HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \ - (cache_ptr)->slist_len ); \ - HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \ - (cache_ptr)->slist_size ); \ - HDassert( (cache_ptr)->slist_ptr ); \ + if((cache_ptr)->slist_enabled) { \ + HDassert(entry_ptr); \ + HDassert((entry_ptr)->size > 0); \ + HDassert(H5F_addr_defined((entry_ptr)->addr)); \ + HDassert(!(entry_ptr)->in_slist); \ + HDassert(!H5C__ENTRY_IN_SLIST((cache_ptr), (entry_ptr))); \ + HDassert((entry_ptr)->ring > H5C_RING_UNDEFINED); \ + HDassert((entry_ptr)->ring < H5C_RING_NTYPES); \ + HDassert((cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \ + (cache_ptr)->slist_len); \ + HDassert((cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \ + (cache_ptr)->slist_size); \ + HDassert((cache_ptr)->slist_ptr); \ \ - if ( H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, \ - &((entry_ptr)->addr)) < 0) \ - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \ - "can't insert entry in skip list") \ + if(H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &((entry_ptr)->addr)) < 0) \ + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), "can't insert entry in skip list") \ \ (entry_ptr)->in_slist = TRUE; \ (cache_ptr)->slist_changed = TRUE; \ @@ -1147,288 +1044,160 @@ if ( ( (cache_ptr)->index_size != \ (cache_ptr)->slist_size += (entry_ptr)->size; \ ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])++; \ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (entry_ptr)->size;\ + H5C__SLIST_INSERT_ENTRY_SC(cache_ptr, entry_ptr) \ \ - HDassert( (cache_ptr)->slist_len > 0 ); \ - HDassert( (cache_ptr)->slist_size > 0 ); \ - \ + HDassert((cache_ptr)->slist_len > 0); \ + HDassert((cache_ptr)->slist_size > 0); \ } else { /* slist disabled */ \ - \ - HDassert( (cache_ptr)->slist_len == 0 ); \ - HDassert( (cache_ptr)->slist_size == 0 ); \ + HDassert((cache_ptr)->slist_len == 0); \ + HDassert((cache_ptr)->slist_size == 0); \ } \ } /* H5C__INSERT_ENTRY_IN_SLIST */ -#endif /* H5C_DO_SANITY_CHECKS */ - /*------------------------------------------------------------------------- * - * Function: H5C__REMOVE_ENTRY_FROM_SLIST + * Macro: H5C__REMOVE_ENTRY_FROM_SLIST * - * Purpose: Remove the specified instance of H5C_cache_entry_t from the - * index skip list in the specified instance of H5C_t. Update + * Purpose: Insert a cache entry into a cache's skip list. Updates * the associated length and size fields. * - * Return: N/A - * * Programmer: John Mainzer, 5/10/04 * *------------------------------------------------------------------------- */ -#ifdef H5C_DO_SANITY_CHECKS -#define H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - \ - if ( (cache_ptr)->slist_enabled ) { \ - \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ - HDassert( (entry_ptr)->in_slist ); \ - HDassert( (cache_ptr)->slist_ptr ); \ - HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \ - HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \ - HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \ - (cache_ptr)->slist_len ); \ - HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \ - (cache_ptr)->slist_size ); \ - HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \ - \ - if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \ - != (entry_ptr) ) \ - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, \ - "can't delete entry from skip list") \ - \ - HDassert( (cache_ptr)->slist_len > 0 ); \ - if(!(during_flush)) \ - (cache_ptr)->slist_changed = TRUE; \ - (cache_ptr)->slist_len--; \ - HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \ - (cache_ptr)->slist_size -= (entry_ptr)->size; \ - ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])--; \ - HDassert( (cache_ptr)->slist_ring_size[((entry_ptr)->ring)] >= \ - (entry_ptr)->size ); \ - ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (entry_ptr)->size;\ - (cache_ptr)->slist_len_increase--; \ - (cache_ptr)->slist_size_increase -= (int64_t)((entry_ptr)->size); \ - (entry_ptr)->in_slist = FALSE; \ - \ - } else { /* slist disabled */ \ - \ - HDassert( (cache_ptr)->slist_len == 0 ); \ - HDassert( (cache_ptr)->slist_size == 0 ); \ - } \ -} /* H5C__REMOVE_ENTRY_FROM_SLIST */ - -#else /* H5C_DO_SANITY_CHECKS */ - -#define H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush) \ +#define H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush, fail_val) \ { \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - \ - if ( (cache_ptr)->slist_enabled ) { \ + HDassert(cache_ptr); \ + HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->in_slist ); \ - HDassert( (cache_ptr)->slist_ptr ); \ - HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \ - HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \ - HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \ - (cache_ptr)->slist_len ); \ - HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \ - (cache_ptr)->slist_size ); \ + if((cache_ptr)->slist_enabled) { \ + HDassert(entry_ptr); \ + HDassert(!(entry_ptr)->is_read_only); \ + HDassert((entry_ptr)->ro_ref_count == 0); \ + HDassert((entry_ptr)->size > 0); \ + HDassert((entry_ptr)->in_slist); \ + HDassert((cache_ptr)->slist_ptr); \ + HDassert((entry_ptr)->ring > H5C_RING_UNDEFINED); \ + HDassert((entry_ptr)->ring < H5C_RING_NTYPES); \ + HDassert((cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \ + (cache_ptr)->slist_len); \ + HDassert((cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \ + (cache_ptr)->slist_size); \ + HDassert((cache_ptr)->slist_size >= (entry_ptr)->size); \ \ - if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \ - != (entry_ptr) ) \ - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, \ - "can't delete entry from skip list") \ + if(H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) != (entry_ptr) ) \ + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), "can't delete entry from skip list") \ \ - HDassert( (cache_ptr)->slist_len > 0 ); \ + HDassert((cache_ptr)->slist_len > 0); \ if(!(during_flush)) \ (cache_ptr)->slist_changed = TRUE; \ (cache_ptr)->slist_len--; \ - HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \ + HDassert((cache_ptr)->slist_size >= (entry_ptr)->size); \ (cache_ptr)->slist_size -= (entry_ptr)->size; \ - ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])--; \ - HDassert( (cache_ptr)->slist_ring_size[((entry_ptr)->ring)] >= \ - (entry_ptr)->size ); \ + (cache_ptr)->slist_ring_len[(entry_ptr)->ring]--; \ + HDassert((cache_ptr)->slist_ring_size[(entry_ptr)->ring] >= (entry_ptr)->size); \ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (entry_ptr)->size;\ + H5C__SLIST_REMOVE_ENTRY_SC(cache_ptr, entry_ptr) \ (entry_ptr)->in_slist = FALSE; \ - \ } else { /* slist disabled */ \ - \ - HDassert( (cache_ptr)->slist_len == 0 ); \ - HDassert( (cache_ptr)->slist_size == 0 ); \ + HDassert((cache_ptr)->slist_len == 0); \ + HDassert((cache_ptr)->slist_size == 0); \ } \ } /* H5C__REMOVE_ENTRY_FROM_SLIST */ -#endif /* H5C_DO_SANITY_CHECKS */ - /*------------------------------------------------------------------------- * - * Function: H5C__UPDATE_SLIST_FOR_SIZE_CHANGE + * Macro: H5C__UPDATE_SLIST_FOR_SIZE_CHANGE * * Purpose: Update cache_ptr->slist_size for a change in the size of * and entry in the slist. * - * Return: N/A - * * Programmer: John Mainzer, 9/07/05 * *------------------------------------------------------------------------- */ -#ifdef H5C_DO_SANITY_CHECKS - #define H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \ { \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert(cache_ptr); \ + HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ \ - if ( (cache_ptr)->slist_enabled ) { \ - \ - HDassert( (old_size) > 0 ); \ - HDassert( (new_size) > 0 ); \ - HDassert( (old_size) <= (cache_ptr)->slist_size ); \ - HDassert( (cache_ptr)->slist_len > 0 ); \ - HDassert( ((cache_ptr)->slist_len > 1) || \ - ( (cache_ptr)->slist_size == (old_size) ) ); \ - HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \ - HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \ - HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \ - (cache_ptr)->slist_len ); \ - HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \ - (cache_ptr)->slist_size ); \ + if((cache_ptr)->slist_enabled) { \ + HDassert((old_size) > 0); \ + HDassert((new_size) > 0); \ + HDassert((old_size) <= (cache_ptr)->slist_size); \ + HDassert((cache_ptr)->slist_len > 0); \ + HDassert((cache_ptr)->slist_len > 1 || \ + (cache_ptr)->slist_size == (old_size)); \ + HDassert((entry_ptr)->ring > H5C_RING_UNDEFINED); \ + HDassert((entry_ptr)->ring < H5C_RING_NTYPES); \ + HDassert((cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \ + (cache_ptr)->slist_len); \ + HDassert((cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \ + (cache_ptr)->slist_size); \ \ (cache_ptr)->slist_size -= (old_size); \ (cache_ptr)->slist_size += (new_size); \ \ - HDassert( (cache_ptr)->slist_ring_size[((entry_ptr)->ring)] \ - >= (old_size) ); \ - \ - ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (old_size); \ - ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (new_size); \ + HDassert((cache_ptr)->slist_ring_size[(entry_ptr)->ring] >= (old_size)); \ \ - (cache_ptr)->slist_size_increase -= (int64_t)(old_size); \ - (cache_ptr)->slist_size_increase += (int64_t)(new_size); \ + (cache_ptr)->slist_ring_size[(entry_ptr)->ring] -= (old_size); \ + (cache_ptr)->slist_ring_size[(entry_ptr)->ring] += (new_size); \ \ - HDassert( (new_size) <= (cache_ptr)->slist_size ); \ - HDassert( ( (cache_ptr)->slist_len > 1 ) || \ - ( (cache_ptr)->slist_size == (new_size) ) ); \ + H5C__SLIST_UPDATE_FOR_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size) \ \ + HDassert((new_size) <= (cache_ptr)->slist_size); \ + HDassert((cache_ptr)->slist_len > 1 || \ + (cache_ptr)->slist_size == (new_size)); \ } else { /* slist disabled */ \ - \ - HDassert( (cache_ptr)->slist_len == 0 ); \ - HDassert( (cache_ptr)->slist_size == 0 ); \ + HDassert((cache_ptr)->slist_len == 0); \ + HDassert((cache_ptr)->slist_size == 0); \ } \ } /* H5C__UPDATE_SLIST_FOR_SIZE_CHANGE */ -#else /* H5C_DO_SANITY_CHECKS */ - -#define H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - \ - if ( (cache_ptr)->slist_enabled ) { \ - \ - HDassert( (old_size) > 0 ); \ - HDassert( (new_size) > 0 ); \ - HDassert( (old_size) <= (cache_ptr)->slist_size ); \ - HDassert( (cache_ptr)->slist_len > 0 ); \ - HDassert( ((cache_ptr)->slist_len > 1) || \ - ( (cache_ptr)->slist_size == (old_size) ) ); \ - HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \ - HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \ - HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \ - (cache_ptr)->slist_len ); \ - HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \ - (cache_ptr)->slist_size ); \ - \ - (cache_ptr)->slist_size -= (old_size); \ - (cache_ptr)->slist_size += (new_size); \ - \ - HDassert( (cache_ptr)->slist_ring_size[((entry_ptr)->ring)] >= \ - (old_size) ); \ - ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (old_size); \ - ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (new_size); \ - \ - HDassert( (new_size) <= (cache_ptr)->slist_size ); \ - HDassert( ( (cache_ptr)->slist_len > 1 ) || \ - ( (cache_ptr)->slist_size == (new_size) ) ); \ - \ - } else { /* slist disabled */ \ - \ - HDassert( (cache_ptr)->slist_len == 0 ); \ - HDassert( (cache_ptr)->slist_size == 0 ); \ - } \ -} /* H5C__UPDATE_SLIST_FOR_SIZE_CHANGE */ - -#endif /* H5C_DO_SANITY_CHECKS */ - /************************************************************************** * - * Replacement policy update macros: + * Replacement policy update macros * **************************************************************************/ -/*------------------------------------------------------------------------- - * - * Macro: H5C__UPDATE_RP_FOR_EVICTION - * - * Purpose: Update the replacement policy data structures for an - * eviction of the specified cache entry. - * - * At present, we only support the modified LRU policy, so - * this function deals with that case unconditionally. If - * we ever support other replacement policies, the function - * should switch on the current policy and act accordingly. - * - * Return: Non-negative on success/Negative on failure. - * - * Programmer: John Mainzer, 5/10/04 - * - *------------------------------------------------------------------------- - */ - #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS -#define H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, fail_val) \ +#define H5C__UPDATE_RP_FOR_EVICTION_CD_LRU(cache_ptr, entry_ptr, fail_val) \ { \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( !((entry_ptr)->is_pinned) ); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - /* modified LRU specific code */ \ - \ - /* remove the entry from the LRU list. */ \ - \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ /* If the entry is clean when it is evicted, it should be on the \ * clean LRU list, if it was dirty, it should be on the dirty LRU list. \ * Remove it from the appropriate list according to the value of the \ * dirty flag. \ */ \ - \ - if ( (entry_ptr)->is_dirty ) { \ - \ + if((entry_ptr)->is_dirty) { \ + H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ + (cache_ptr)->dLRU_tail_ptr, \ + (cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, (fail_val)) \ + } else { \ + H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, (fail_val)) \ + } \ +} /* H5C__UPDATE_RP_FOR_EVICTION_CD_LRU() */ +#define H5C__UPDATE_RP_FOR_FLUSH_CD_LRU(cache_ptr, entry_ptr, fail_val) \ +{ \ + /* An entry being flushed or cleared, may not be dirty. Use the \ + * dirty flag to infer whether the entry is on the clean or dirty \ + * LRU list, and remove it. Then insert it at the head of the \ + * clean LRU list. \ + * \ + * The function presumes that a dirty entry will be either cleared \ + * or flushed shortly, so it is OK if we put a dirty entry on the \ + * clean LRU list. \ + */ \ + if((entry_ptr)->is_dirty) { \ H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ (cache_ptr)->dLRU_tail_ptr, \ (cache_ptr)->dLRU_list_len, \ @@ -1440,73 +1209,225 @@ if ( ( (cache_ptr)->index_size != \ (cache_ptr)->cLRU_list_size, (fail_val)) \ } \ \ -} /* H5C__UPDATE_RP_FOR_EVICTION */ + H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, (fail_val)) \ +} /* H5C__UPDATE_RP_FOR_FLUSH_CD_LRU() */ +#define H5C__UPDATE_RP_FOR_INSERT_APPEND_CD_LRU(cache_ptr, entry_ptr, fail_val) \ +{ \ + /* Insert the entry at the _tail_ of the clean or dirty LRU list as \ + * appropriate. \ + */ \ + if((entry_ptr)->is_dirty) { \ + H5C__AUX_DLL_APPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ + (cache_ptr)->dLRU_tail_ptr, \ + (cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, (fail_val)) \ + } else { \ + H5C__AUX_DLL_APPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, (fail_val)) \ + } \ +} /* H5C__UPDATE_RP_FOR_INSERT_APPEND_CD_LRU() */ +#define H5C__UPDATE_RP_FOR_INSERTION_CD_LRU(cache_ptr, entry_ptr, fail_val) \ +{ \ + /* Insert the entry at the head of the clean or dirty LRU list as \ + * appropriate. \ + */ \ + if((entry_ptr)->is_dirty) { \ + H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ + (cache_ptr)->dLRU_tail_ptr, \ + (cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, (fail_val)) \ + } else { \ + H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, (fail_val)) \ + } \ +} /* H5C__UPDATE_RP_FOR_INSERTION_CD_LRU() */ +#define H5C__UPDATE_RP_FOR_PROTECT_CD_LRU(cache_ptr, entry_ptr, fail_val) \ +{ \ + /* Remove the entry from the clean or dirty LRU list as appropriate */ \ + if((entry_ptr)->is_dirty) { \ + H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ + (cache_ptr)->dLRU_tail_ptr, \ + (cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, (fail_val)) \ + } else { \ + H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, (fail_val)) \ + } \ +} /* H5C__UPDATE_RP_FOR_PROTECT_CD_LRU() */ +#define H5C__UPDATE_RP_FOR_MOVE_CD_LRU(cache_ptr, entry_ptr, was_dirty, fail_val) \ +{ \ + /* Remove the entry from either the clean or dirty LRU list as \ + * indicated by the was_dirty parameter \ + */ \ + if(was_dirty) { \ + H5C__AUX_DLL_REMOVE((entry_ptr), \ + (cache_ptr)->dLRU_head_ptr, \ + (cache_ptr)->dLRU_tail_ptr, \ + (cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, (fail_val)) \ + \ + } else { \ + H5C__AUX_DLL_REMOVE((entry_ptr), \ + (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, (fail_val)) \ + } \ + \ + /* Insert the entry at the head of either the clean or dirty \ + * LRU list as appropriate. \ + */ \ + if((entry_ptr)->is_dirty) { \ + H5C__AUX_DLL_PREPEND((entry_ptr), \ + (cache_ptr)->dLRU_head_ptr, \ + (cache_ptr)->dLRU_tail_ptr, \ + (cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, (fail_val)) \ + } else { \ + H5C__AUX_DLL_PREPEND((entry_ptr), \ + (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, (fail_val)) \ + } \ +} /* H5C__UPDATE_RP_FOR_MOVE_CD_LRU() */ +#define H5C__UPDATE_RP_FOR_SIZE_CHANGE_CD_LRU(cache_ptr, entry_ptr, new_size, fail_val) \ +{ \ + /* Update the size of the clean or dirty LRU list as \ + * appropriate. \ + */ \ + if((entry_ptr)->is_dirty) { \ + H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, \ + (entry_ptr)->size, \ + (new_size), (fail_val)) \ + \ + } else { \ + H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, \ + (entry_ptr)->size, \ + (new_size), (fail_val)) \ + } \ +} /* H5C__UPDATE_RP_FOR_SIZE_CHANGE_CD_LRU() */ +#define H5C__UPDATE_RP_FOR_UNPIN_CD_LRU(cache_ptr, entry_ptr, fail_val) \ +{ \ + /* Insert the entry at the head of either the clean \ + * or dirty LRU list as appropriate. \ + */ \ + if((entry_ptr)->is_dirty) { \ + H5C__AUX_DLL_PREPEND((entry_ptr), \ + (cache_ptr)->dLRU_head_ptr, \ + (cache_ptr)->dLRU_tail_ptr, \ + (cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, (fail_val)) \ + } else { \ + H5C__AUX_DLL_PREPEND((entry_ptr), \ + (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, (fail_val)) \ + } \ +} /* H5C__UPDATE_RP_FOR_UNPIN_CD_LRU() */ +#define H5C__UPDATE_RP_FOR_UNPROTECT_CD_LRU(cache_ptr, entry_ptr, fail_val) \ +{ \ + /* Insert the entry at the head of either the clean or \ + * dirty LRU list as appropriate. \ + */ \ + if((entry_ptr)->is_dirty) { \ + H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ + (cache_ptr)->dLRU_tail_ptr, \ + (cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, (fail_val)) \ + } else { \ + H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, (fail_val)) \ + } \ +} /* H5C__UPDATE_RP_FOR_UNPROTECT_CD_LRU() */ #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ +#define H5C__UPDATE_RP_FOR_EVICTION_CD_LRU(cache_ptr, entry_ptr, fail_val) +#define H5C__UPDATE_RP_FOR_FLUSH_CD_LRU(cache_ptr, entry_ptr, fail_val) +#define H5C__UPDATE_RP_FOR_INSERT_APPEND_CD_LRU(cache_ptr, entry_ptr, fail_val) +#define H5C__UPDATE_RP_FOR_INSERTION_CD_LRU(cache_ptr, entry_ptr, fail_val) +#define H5C__UPDATE_RP_FOR_PROTECT_CD_LRU(cache_ptr, entry_ptr, fail_val) +#define H5C__UPDATE_RP_FOR_MOVE_CD_LRU(cache_ptr, entry_ptr, was_dirty, fail_val) +#define H5C__UPDATE_RP_FOR_SIZE_CHANGE_CD_LRU(cache_ptr, entry_ptr, new_size, fail_val) +#define H5C__UPDATE_RP_FOR_UNPIN_CD_LRU(cache_ptr, entry_ptr, fail_val) +#define H5C__UPDATE_RP_FOR_UNPROTECT_CD_LRU(cache_ptr, entry_ptr, fail_val) + +#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ + +/*------------------------------------------------------------------------- + * + * Macro: H5C__UPDATE_RP_FOR_EVICTION + * + * Purpose: Update the replacement policy data structures for an + * eviction of the specified cache entry. + * + * Programmer: John Mainzer, 5/10/04 + * + *------------------------------------------------------------------------- + */ + #define H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, fail_val) \ { \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( !((entry_ptr)->is_pinned) ); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - /* modified LRU specific code */ \ - \ - /* remove the entry from the LRU list. */ \ + HDassert(cache_ptr); \ + HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ + HDassert(entry_ptr); \ + HDassert(!(entry_ptr)->is_protected); \ + HDassert(!(entry_ptr)->is_read_only); \ + HDassert((entry_ptr)->ro_ref_count == 0); \ + HDassert(!(entry_ptr)->is_pinned); \ + HDassert((entry_ptr)->size > 0); \ \ + /* Remove the entry from the LRU list */ \ H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \ (cache_ptr)->LRU_list_size, (fail_val)) \ \ + /* Remove the entry from the clean & dirty LRU lists, if enabled */ \ + H5C__UPDATE_RP_FOR_EVICTION_CD_LRU(cache_ptr, entry_ptr, fail_val) \ } /* H5C__UPDATE_RP_FOR_EVICTION */ -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - /*------------------------------------------------------------------------- * - * Macro: H5C__UPDATE_RP_FOR_FLUSH + * Macro: H5C__UPDATE_RP_FOR_FLUSH * * Purpose: Update the replacement policy data structures for a flush - * of the specified cache entry. - * - * At present, we only support the modified LRU policy, so - * this function deals with that case unconditionally. If - * we ever support other replacement policies, the function - * should switch on the current policy and act accordingly. - * - * Return: N/A + * of the specified cache entry. * * Programmer: John Mainzer, 5/6/04 * *------------------------------------------------------------------------- */ -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS - #define H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, fail_val) \ { \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ + HDassert(cache_ptr); \ + HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ + HDassert(entry_ptr); \ + HDassert(!(entry_ptr)->is_protected); \ + HDassert(!(entry_ptr)->is_read_only); \ + HDassert((entry_ptr)->ro_ref_count == 0); \ + HDassert((entry_ptr)->size > 0); \ \ - if ( ! ((entry_ptr)->is_pinned) ) { \ - \ - /* modified LRU specific code */ \ - \ - /* remove the entry from the LRU list, and re-insert it at the \ - * head. \ + if(!(entry_ptr)->is_pinned) { \ + /* Remove the entry from its location in the LRU list \ + * and re-insert it at the head of the list. \ */ \ - \ H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ (cache_ptr)->LRU_tail_ptr, \ (cache_ptr)->LRU_list_len, \ @@ -1517,722 +1438,289 @@ if ( ( (cache_ptr)->index_size != \ (cache_ptr)->LRU_list_len, \ (cache_ptr)->LRU_list_size, (fail_val)) \ \ - /* since the entry is being flushed or cleared, one would think \ - * that it must be dirty -- but that need not be the case. Use the \ - * dirty flag to infer whether the entry is on the clean or dirty \ - * LRU list, and remove it. Then insert it at the head of the \ - * clean LRU list. \ - * \ - * The function presumes that a dirty entry will be either cleared \ - * or flushed shortly, so it is OK if we put a dirty entry on the \ - * clean LRU list. \ - */ \ - \ - if ( (entry_ptr)->is_dirty ) { \ - H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ - (cache_ptr)->dLRU_tail_ptr, \ - (cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, (fail_val)) \ - } else { \ - H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, (fail_val)) \ - } \ - \ - H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, (fail_val)) \ - \ - /* End modified LRU specific code. */ \ + /* Maintain the clean & dirty LRU lists, if enabled */ \ + H5C__UPDATE_RP_FOR_FLUSH_CD_LRU(cache_ptr, entry_ptr, fail_val) \ } \ } /* H5C__UPDATE_RP_FOR_FLUSH */ -#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - -#define H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - if ( ! ((entry_ptr)->is_pinned) ) { \ - \ - /* modified LRU specific code */ \ - \ - /* remove the entry from the LRU list, and re-insert it at the \ - * head. \ - */ \ - \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - /* End modified LRU specific code. */ \ - } \ -} /* H5C__UPDATE_RP_FOR_FLUSH */ - -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - /*------------------------------------------------------------------------- * - * Macro: H5C__UPDATE_RP_FOR_INSERT_APPEND + * Macro: H5C__UPDATE_RP_FOR_INSERT_APPEND * * Purpose: Update the replacement policy data structures for an - * insertion of the specified cache entry. - * - * Unlike H5C__UPDATE_RP_FOR_INSERTION below, mark the - * new entry as the LEAST recently used entry, not the - * most recently used. + * insertion of the specified cache entry. * - * For now at least, this macro should only be used in - * the reconstruction of the metadata cache from a cache - * image block. - * - * At present, we only support the modified LRU policy, so - * this function deals with that case unconditionally. If - * we ever support other replacement policies, the function - * should switch on the current policy and act accordingly. - * - * Return: N/A + * Unlike H5C__UPDATE_RP_FOR_INSERTION below, insert a non-pinned + * new entry as the LEAST recently used entry, not the + * most recently used. * * Programmer: John Mainzer, 8/15/15 * *------------------------------------------------------------------------- */ -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS - -#define H5C__UPDATE_RP_FOR_INSERT_APPEND(cache_ptr, entry_ptr, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - if ( (entry_ptr)->is_pinned ) { \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \ - (cache_ptr)->pel_tail_ptr, \ - (cache_ptr)->pel_len, \ - (cache_ptr)->pel_size, (fail_val)) \ - \ - } else { \ - \ - /* modified LRU specific code */ \ - \ - /* insert the entry at the tail of the LRU list. */ \ - \ - H5C__DLL_APPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - /* insert the entry at the tail of the clean or dirty LRU list as \ - * appropriate. \ - */ \ - \ - if ( (entry_ptr)->is_dirty ) { \ - H5C__AUX_DLL_APPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ - (cache_ptr)->dLRU_tail_ptr, \ - (cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, (fail_val)) \ - } else { \ - H5C__AUX_DLL_APPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, (fail_val)) \ - } \ - \ - /* End modified LRU specific code. */ \ - } \ -} - -#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - #define H5C__UPDATE_RP_FOR_INSERT_APPEND(cache_ptr, entry_ptr, fail_val) \ { \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - if ( (entry_ptr)->is_pinned ) { \ + HDassert(cache_ptr); \ + HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ + HDassert(entry_ptr); \ + HDassert(!(entry_ptr)->is_protected); \ + HDassert(!(entry_ptr)->is_read_only); \ + HDassert((entry_ptr)->ro_ref_count == 0 ); \ + HDassert((entry_ptr)->size > 0 ); \ \ + if((entry_ptr)->is_pinned) { \ H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \ (cache_ptr)->pel_tail_ptr, \ (cache_ptr)->pel_len, \ (cache_ptr)->pel_size, (fail_val)) \ - \ } else { \ - \ - /* modified LRU specific code */ \ - \ - /* insert the entry at the tail of the LRU list. */ \ - \ + /* Insert the entry at the tail of the LRU list. */ \ H5C__DLL_APPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ (cache_ptr)->LRU_tail_ptr, \ (cache_ptr)->LRU_list_len, \ (cache_ptr)->LRU_list_size, (fail_val)) \ \ - /* End modified LRU specific code. */ \ + /* Maintain the clean & dirty LRU lists, if enabled */ \ + H5C__UPDATE_RP_FOR_INSERT_APPEND_CD_LRU(cache_ptr, entry_ptr, fail_val) \ } \ } -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - /*------------------------------------------------------------------------- * - * Macro: H5C__UPDATE_RP_FOR_INSERTION + * Macro: H5C__UPDATE_RP_FOR_INSERTION * * Purpose: Update the replacement policy data structures for an - * insertion of the specified cache entry. - * - * At present, we only support the modified LRU policy, so - * this function deals with that case unconditionally. If - * we ever support other replacement policies, the function - * should switch on the current policy and act accordingly. - * - * Return: N/A + * insertion of the specified cache entry. * * Programmer: John Mainzer, 5/17/04 * *------------------------------------------------------------------------- */ -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS - -#define H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - if ( (entry_ptr)->is_pinned ) { \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \ - (cache_ptr)->pel_tail_ptr, \ - (cache_ptr)->pel_len, \ - (cache_ptr)->pel_size, (fail_val)) \ - \ - } else { \ - \ - /* modified LRU specific code */ \ - \ - /* insert the entry at the head of the LRU list. */ \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - /* insert the entry at the head of the clean or dirty LRU list as \ - * appropriate. \ - */ \ - \ - if ( (entry_ptr)->is_dirty ) { \ - H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ - (cache_ptr)->dLRU_tail_ptr, \ - (cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, (fail_val)) \ - } else { \ - H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, (fail_val)) \ - } \ - \ - /* End modified LRU specific code. */ \ - } \ -} - -#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - -#define H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - if ( (entry_ptr)->is_pinned ) { \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \ - (cache_ptr)->pel_tail_ptr, \ - (cache_ptr)->pel_len, \ - (cache_ptr)->pel_size, (fail_val)) \ - \ - } else { \ - \ - /* modified LRU specific code */ \ - \ - /* insert the entry at the head of the LRU list. */ \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - /* End modified LRU specific code. */ \ - } \ +#define H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, fail_val) \ +{ \ + HDassert(cache_ptr); \ + HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ + HDassert(entry_ptr); \ + HDassert(!(entry_ptr)->is_protected); \ + HDassert(!(entry_ptr)->is_read_only); \ + HDassert((entry_ptr)->ro_ref_count == 0); \ + HDassert((entry_ptr)->size > 0); \ + \ + if((entry_ptr)->is_pinned) { \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \ + (cache_ptr)->pel_tail_ptr, \ + (cache_ptr)->pel_len, \ + (cache_ptr)->pel_size, (fail_val)) \ + \ + } else { \ + /* Insert the entry at the head of the LRU list. */ \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ + \ + /* Maintain the clean & dirty LRU lists, if enabled */ \ + H5C__UPDATE_RP_FOR_INSERTION_CD_LRU(cache_ptr, entry_ptr, fail_val) \ + } \ } -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - /*------------------------------------------------------------------------- * - * Macro: H5C__UPDATE_RP_FOR_PROTECT + * Macro: H5C__UPDATE_RP_FOR_PROTECT * * Purpose: Update the replacement policy data structures for a - * protect of the specified cache entry. + * protect of the specified cache entry. * - * To do this, unlink the specified entry from any data - * structures used by the replacement policy, and add the - * entry to the protected list. - * - * At present, we only support the modified LRU policy, so - * this function deals with that case unconditionally. If - * we ever support other replacement policies, the function - * should switch on the current policy and act accordingly. - * - * Return: N/A + * To do this, unlink the specified entry from any data + * structures used by the replacement policy (or the pinned list, + * which is outside of the replacement policy), and add the + * entry to the protected list. * * Programmer: John Mainzer, 5/17/04 * *------------------------------------------------------------------------- */ -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS - -#define H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - if ( (entry_ptr)->is_pinned ) { \ - \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \ - (cache_ptr)->pel_tail_ptr, \ - (cache_ptr)->pel_len, \ - (cache_ptr)->pel_size, (fail_val)) \ - \ - } else { \ - \ - /* modified LRU specific code */ \ - \ - /* remove the entry from the LRU list. */ \ - \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - /* Similarly, remove the entry from the clean or dirty LRU list \ - * as appropriate. \ - */ \ - \ - if ( (entry_ptr)->is_dirty ) { \ - \ - H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ - (cache_ptr)->dLRU_tail_ptr, \ - (cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, (fail_val)) \ - \ - } else { \ - \ - H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, (fail_val)) \ - } \ - \ - /* End modified LRU specific code. */ \ - } \ - \ - /* Regardless of the replacement policy, or whether the entry is \ - * pinned, now add the entry to the protected list. \ - */ \ - \ - H5C__DLL_APPEND((entry_ptr), (cache_ptr)->pl_head_ptr, \ - (cache_ptr)->pl_tail_ptr, \ - (cache_ptr)->pl_len, \ - (cache_ptr)->pl_size, (fail_val)) \ -} /* H5C__UPDATE_RP_FOR_PROTECT */ - -#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - #define H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, fail_val) \ { \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - if ( (entry_ptr)->is_pinned ) { \ + HDassert(cache_ptr); \ + HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ + HDassert(entry_ptr); \ + HDassert(!(entry_ptr)->is_protected); \ + HDassert(!(entry_ptr)->is_read_only); \ + HDassert((entry_ptr)->ro_ref_count == 0); \ + HDassert((entry_ptr)->size > 0); \ \ + if((entry_ptr)->is_pinned) { \ H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \ (cache_ptr)->pel_tail_ptr, \ (cache_ptr)->pel_len, \ (cache_ptr)->pel_size, (fail_val)) \ - \ } else { \ - \ - /* modified LRU specific code */ \ - \ - /* remove the entry from the LRU list. */ \ - \ + /* Remove the entry from the LRU list. */ \ H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ (cache_ptr)->LRU_tail_ptr, \ (cache_ptr)->LRU_list_len, \ (cache_ptr)->LRU_list_size, (fail_val)) \ \ - /* End modified LRU specific code. */ \ + /* Maintain the clean & dirty LRU lists, if enabled */ \ + H5C__UPDATE_RP_FOR_PROTECT_CD_LRU(cache_ptr, entry_ptr, fail_val) \ } \ \ - /* Regardless of the replacement policy, or whether the entry is \ - * pinned, now add the entry to the protected list. \ + /* Regardless of whether the entry is pinned, add it to the protected \ + * list. \ */ \ - \ H5C__DLL_APPEND((entry_ptr), (cache_ptr)->pl_head_ptr, \ (cache_ptr)->pl_tail_ptr, \ (cache_ptr)->pl_len, \ (cache_ptr)->pl_size, (fail_val)) \ } /* H5C__UPDATE_RP_FOR_PROTECT */ -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - /*------------------------------------------------------------------------- * - * Macro: H5C__UPDATE_RP_FOR_MOVE + * Macro: H5C__UPDATE_RP_FOR_MOVE * * Purpose: Update the replacement policy data structures for a - * move of the specified cache entry. - * - * At present, we only support the modified LRU policy, so - * this function deals with that case unconditionally. If - * we ever support other replacement policies, the function - * should switch on the current policy and act accordingly. - * - * Return: N/A + * move of the specified cache entry. * * Programmer: John Mainzer, 5/17/04 * *------------------------------------------------------------------------- */ -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS - -#define H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, fail_val) \ +#define H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, fail_val) \ { \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ + HDassert(cache_ptr); \ + HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ + HDassert(entry_ptr); \ + HDassert(!(entry_ptr)->is_read_only); \ + HDassert((entry_ptr)->ro_ref_count == 0); \ + HDassert((entry_ptr)->size > 0); \ \ - if ( ! ( (entry_ptr)->is_pinned ) && ! ( ((entry_ptr)->is_protected ) ) ) {\ - \ - /* modified LRU specific code */ \ - \ - /* remove the entry from the LRU list, and re-insert it at the head. \ + if(!(entry_ptr)->is_pinned && !(entry_ptr)->is_protected) { \ + /* Remove the entry from the LRU list, and re-insert it at the head. \ */ \ - \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ \ H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ (cache_ptr)->LRU_tail_ptr, \ (cache_ptr)->LRU_list_len, \ (cache_ptr)->LRU_list_size, (fail_val)) \ \ - /* remove the entry from either the clean or dirty LUR list as \ - * indicated by the was_dirty parameter \ - */ \ - if ( was_dirty ) { \ - \ - H5C__AUX_DLL_REMOVE((entry_ptr), \ - (cache_ptr)->dLRU_head_ptr, \ - (cache_ptr)->dLRU_tail_ptr, \ - (cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, \ - (fail_val)) \ - \ - } else { \ - \ - H5C__AUX_DLL_REMOVE((entry_ptr), \ - (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, \ - (fail_val)) \ - } \ - \ - /* insert the entry at the head of either the clean or dirty \ - * LRU list as appropriate. \ - */ \ - \ - if ( (entry_ptr)->is_dirty ) { \ - \ - H5C__AUX_DLL_PREPEND((entry_ptr), \ - (cache_ptr)->dLRU_head_ptr, \ - (cache_ptr)->dLRU_tail_ptr, \ - (cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, \ - (fail_val)) \ - \ - } else { \ - \ - H5C__AUX_DLL_PREPEND((entry_ptr), \ - (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, \ - (fail_val)) \ - } \ - \ - /* End modified LRU specific code. */ \ - } \ -} /* H5C__UPDATE_RP_FOR_MOVE */ - -#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - -#define H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - if ( ! ( (entry_ptr)->is_pinned ) && ! ( ((entry_ptr)->is_protected ) ) ) {\ - \ - /* modified LRU specific code */ \ - \ - /* remove the entry from the LRU list, and re-insert it at the head. \ - */ \ - \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - /* End modified LRU specific code. */ \ - } \ + /* Maintain the clean & dirty LRU lists, if enabled */ \ + H5C__UPDATE_RP_FOR_MOVE_CD_LRU(cache_ptr, entry_ptr, was_dirty, fail_val) \ + } \ } /* H5C__UPDATE_RP_FOR_MOVE */ -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - /*------------------------------------------------------------------------- * - * Macro: H5C__UPDATE_RP_FOR_SIZE_CHANGE + * Macro: H5C__UPDATE_RP_FOR_SIZE_CHANGE * * Purpose: Update the replacement policy data structures for a - * size change of the specified cache entry. - * - * To do this, determine if the entry is pinned. If it is, - * update the size of the pinned entry list. + * size change of the specified cache entry. * - * If it isn't pinned, the entry must handled by the - * replacement policy. Update the appropriate replacement - * policy data structures. + * To do this, determine if the entry is pinned. If it is, + * update the size of the pinned entry list. * - * At present, we only support the modified LRU policy, so - * this function deals with that case unconditionally. If - * we ever support other replacement policies, the function - * should switch on the current policy and act accordingly. + * If it isn't pinned, the entry must handled by the + * replacement policy. Update the appropriate replacement + * policy data structures. * - * Return: N/A + * If the entry is accessed with collective operations for + * parallel I/O, update that list. * * Programmer: John Mainzer, 8/23/06 * *------------------------------------------------------------------------- */ -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS +#ifdef H5_HAVE_PARALLEL + +#define H5C__UPDATE_RP_FOR_SIZE_CHANGE_COLL(cache_ptr, entry_ptr, new_size, fail_val) \ +{ \ + if((entry_ptr)->coll_access) { \ + H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->coll_list_len, \ + (cache_ptr)->coll_list_size, \ + (entry_ptr)->size, \ + (new_size), (fail_val)); \ + \ + } \ +} /* H5C__UPDATE_RP_FOR_SIZE_CHANGE_COLL() */ + +#else /* H5_HAVE_PARALLEL */ + +#define H5C__UPDATE_RP_FOR_SIZE_CHANGE_COLL(cache_ptr, entry_ptr, new_size, fail_val) + +#endif /* H5_HAVE_PARALLEL */ #define H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_size, fail_val) \ { \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ - HDassert( new_size > 0 ); \ - \ - if ( (entry_ptr)->coll_access ) { \ - \ - H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->coll_list_len, \ - (cache_ptr)->coll_list_size, \ - (entry_ptr)->size, \ - (new_size), (fail_val)); \ + HDassert(cache_ptr); \ + HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ + HDassert(entry_ptr); \ + HDassert(!(entry_ptr)->is_protected); \ + HDassert(!(entry_ptr)->is_read_only); \ + HDassert((entry_ptr)->ro_ref_count == 0); \ + HDassert((entry_ptr)->size > 0 ); \ + HDassert(new_size > 0 ); \ \ - } \ - \ - if ( (entry_ptr)->is_pinned ) { \ + /* Maintain the collective access list, if enabled */ \ + H5C__UPDATE_RP_FOR_SIZE_CHANGE_COLL(cache_ptr, entry_ptr, new_size, fail_val) \ \ + if((entry_ptr)->is_pinned) { \ H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->pel_len, \ (cache_ptr)->pel_size, \ (entry_ptr)->size, \ - (new_size), (fail_val)); \ - \ - } else { \ - \ - /* modified LRU specific code */ \ - \ - /* Update the size of the LRU list */ \ - \ - H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, \ - (entry_ptr)->size, \ - (new_size), (fail_val)); \ - \ - /* Similarly, update the size of the clean or dirty LRU list as \ - * appropriate. At present, the entry must be clean, but that \ - * could change. \ - */ \ - \ - if ( (entry_ptr)->is_dirty ) { \ - \ - H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, \ - (entry_ptr)->size, \ - (new_size), (fail_val)); \ - \ - } else { \ - \ - H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, \ - (entry_ptr)->size, \ - (new_size), (fail_val)); \ - } \ - \ - /* End modified LRU specific code. */ \ - } \ - \ -} /* H5C__UPDATE_RP_FOR_SIZE_CHANGE */ - -#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - -#define H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_size, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ - HDassert( new_size > 0 ); \ - \ - if ( (entry_ptr)->is_pinned ) { \ - \ - H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->pel_len, \ - (cache_ptr)->pel_size, \ - (entry_ptr)->size, \ - (new_size), (fail_val)); \ - \ + (new_size), (fail_val)) \ } else { \ - \ - /* modified LRU specific code */ \ - \ /* Update the size of the LRU list */ \ - \ H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->LRU_list_len, \ (cache_ptr)->LRU_list_size, \ (entry_ptr)->size, \ - (new_size), (fail_val)); \ + (new_size), (fail_val)) \ \ - /* End modified LRU specific code. */ \ + /* Maintain the clean & dirty LRU lists, if enabled */ \ + H5C__UPDATE_RP_FOR_SIZE_CHANGE_CD_LRU(cache_ptr, entry_ptr, new_size, fail_val) \ } \ - \ } /* H5C__UPDATE_RP_FOR_SIZE_CHANGE */ -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - /*------------------------------------------------------------------------- * - * Macro: H5C__UPDATE_RP_FOR_UNPIN + * Macro: H5C__UPDATE_RP_FOR_UNPIN * * Purpose: Update the replacement policy data structures for an - * unpin of the specified cache entry. - * - * To do this, unlink the specified entry from the protected - * entry list, and re-insert it in the data structures used - * by the current replacement policy. + * unpin of the specified cache entry. * - * At present, we only support the modified LRU policy, so - * this function deals with that case unconditionally. If - * we ever support other replacement policies, the macro - * should switch on the current policy and act accordingly. - * - * Return: N/A + * To do this, unlink the specified entry from the pinned + * entry list, and re-insert it in the data structures used + * by the current replacement policy. * * Programmer: John Mainzer, 3/22/06 * *------------------------------------------------------------------------- */ -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS - #define H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, fail_val) \ { \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->is_pinned); \ - HDassert( (entry_ptr)->size > 0 ); \ + HDassert(cache_ptr); \ + HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ + HDassert(entry_ptr); \ + HDassert(!(entry_ptr)->is_protected); \ + HDassert(!(entry_ptr)->is_read_only); \ + HDassert((entry_ptr)->ro_ref_count == 0 ); \ + HDassert((entry_ptr)->is_pinned); \ + HDassert((entry_ptr)->size > 0); \ \ /* Regardless of the replacement policy, remove the entry from the \ * pinned entry list. \ @@ -2241,169 +1729,40 @@ if ( ( (cache_ptr)->index_size != \ (cache_ptr)->pel_tail_ptr, (cache_ptr)->pel_len, \ (cache_ptr)->pel_size, (fail_val)) \ \ - /* modified LRU specific code */ \ - \ - /* insert the entry at the head of the LRU list. */ \ - \ + /* Insert the entry at the head of the LRU list. */ \ H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ (cache_ptr)->LRU_tail_ptr, \ (cache_ptr)->LRU_list_len, \ (cache_ptr)->LRU_list_size, (fail_val)) \ \ - /* Similarly, insert the entry at the head of either the clean \ - * or dirty LRU list as appropriate. \ - */ \ - \ - if ( (entry_ptr)->is_dirty ) { \ - \ - H5C__AUX_DLL_PREPEND((entry_ptr), \ - (cache_ptr)->dLRU_head_ptr, \ - (cache_ptr)->dLRU_tail_ptr, \ - (cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, \ - (fail_val)) \ - \ - } else { \ - \ - H5C__AUX_DLL_PREPEND((entry_ptr), \ - (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, \ - (fail_val)) \ - } \ - \ - /* End modified LRU specific code. */ \ - \ -} /* H5C__UPDATE_RP_FOR_UNPIN */ - -#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - -#define H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->is_pinned); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - /* Regardless of the replacement policy, remove the entry from the \ - * pinned entry list. \ - */ \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \ - (cache_ptr)->pel_tail_ptr, (cache_ptr)->pel_len, \ - (cache_ptr)->pel_size, (fail_val)) \ - \ - /* modified LRU specific code */ \ - \ - /* insert the entry at the head of the LRU list. */ \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - /* End modified LRU specific code. */ \ - \ + /* Maintain the clean & dirty LRU lists, if enabled */ \ + H5C__UPDATE_RP_FOR_UNPIN_CD_LRU(cache_ptr, entry_ptr, fail_val) \ } /* H5C__UPDATE_RP_FOR_UNPIN */ -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - /*------------------------------------------------------------------------- * - * Macro: H5C__UPDATE_RP_FOR_UNPROTECT + * Macro: H5C__UPDATE_RP_FOR_UNPROTECT * * Purpose: Update the replacement policy data structures for an - * unprotect of the specified cache entry. - * - * To do this, unlink the specified entry from the protected - * list, and re-insert it in the data structures used by the - * current replacement policy. + * unprotect of the specified cache entry. * - * At present, we only support the modified LRU policy, so - * this function deals with that case unconditionally. If - * we ever support other replacement policies, the function - * should switch on the current policy and act accordingly. - * - * Return: N/A + * To do this, unlink the specified entry from the protected + * list, and re-insert it in the data structures used by the + * current replacement policy. * * Programmer: John Mainzer, 5/19/04 * *------------------------------------------------------------------------- */ -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS - -#define H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( (entry_ptr)->is_protected); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - /* Regardless of the replacement policy, remove the entry from the \ - * protected list. \ - */ \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pl_head_ptr, \ - (cache_ptr)->pl_tail_ptr, (cache_ptr)->pl_len, \ - (cache_ptr)->pl_size, (fail_val)) \ - \ - if ( (entry_ptr)->is_pinned ) { \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \ - (cache_ptr)->pel_tail_ptr, \ - (cache_ptr)->pel_len, \ - (cache_ptr)->pel_size, (fail_val)) \ - \ - } else { \ - \ - /* modified LRU specific code */ \ - \ - /* insert the entry at the head of the LRU list. */ \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - /* Similarly, insert the entry at the head of either the clean or \ - * dirty LRU list as appropriate. \ - */ \ - \ - if ( (entry_ptr)->is_dirty ) { \ - \ - H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ - (cache_ptr)->dLRU_tail_ptr, \ - (cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, (fail_val)) \ - \ - } else { \ - \ - H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, (fail_val)) \ - } \ - \ - /* End modified LRU specific code. */ \ - } \ - \ -} /* H5C__UPDATE_RP_FOR_UNPROTECT */ - -#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - #define H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, fail_val) \ { \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( (entry_ptr)->is_protected); \ - HDassert( (entry_ptr)->size > 0 ); \ + HDassert(cache_ptr); \ + HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ + HDassert(entry_ptr); \ + HDassert((entry_ptr)->is_protected); \ + HDassert((entry_ptr)->size > 0); \ \ /* Regardless of the replacement policy, remove the entry from the \ * protected list. \ @@ -2412,29 +1771,23 @@ if ( ( (cache_ptr)->index_size != \ (cache_ptr)->pl_tail_ptr, (cache_ptr)->pl_len, \ (cache_ptr)->pl_size, (fail_val)) \ \ - if ( (entry_ptr)->is_pinned ) { \ - \ + if((entry_ptr)->is_pinned) { \ H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \ (cache_ptr)->pel_tail_ptr, \ (cache_ptr)->pel_len, \ (cache_ptr)->pel_size, (fail_val)) \ - \ } else { \ - \ - /* modified LRU specific code */ \ - \ - /* insert the entry at the head of the LRU list. */ \ - \ + /* Insert the entry at the head of the LRU list. */ \ H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ (cache_ptr)->LRU_tail_ptr, \ (cache_ptr)->LRU_list_len, \ (cache_ptr)->LRU_list_size, (fail_val)) \ \ - /* End modified LRU specific code. */ \ + /* Maintain the clean & dirty LRU lists, if enabled */ \ + H5C__UPDATE_RP_FOR_UNPROTECT_CD_LRU(cache_ptr, entry_ptr, fail_val) \ } \ } /* H5C__UPDATE_RP_FOR_UNPROTECT */ -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ #ifdef H5_HAVE_PARALLEL @@ -2447,12 +1800,10 @@ if ( ( (cache_ptr)->index_size != \ /*------------------------------------------------------------------------- * - * Macro: H5C__INSERT_IN_COLL_LIST + * Macro: H5C__INSERT_IN_COLL_LIST * * Purpose: Insert entry into collective entries list * - * Return: N/A - * * Programmer: Mohamad Chaarawi * *------------------------------------------------------------------------- @@ -2460,29 +1811,24 @@ if ( ( (cache_ptr)->index_size != \ #define H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, fail_val) \ { \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - \ - /* insert the entry at the head of the list. */ \ + HDassert(cache_ptr); \ + HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert(entry_ptr); \ \ + /* Insert the entry at the head of the list. */ \ H5C__COLL_DLL_PREPEND((entry_ptr), (cache_ptr)->coll_head_ptr, \ (cache_ptr)->coll_tail_ptr, \ (cache_ptr)->coll_list_len, \ - (cache_ptr)->coll_list_size, \ - (fail_val)) \ - \ + (cache_ptr)->coll_list_size, (fail_val)) \ } /* H5C__INSERT_IN_COLL_LIST */ /*------------------------------------------------------------------------- * - * Macro: H5C__REMOVE_FROM_COLL_LIST + * Macro: H5C__REMOVE_FROM_COLL_LIST * * Purpose: Remove entry from collective entries list * - * Return: N/A - * * Programmer: Mohamad Chaarawi * *------------------------------------------------------------------------- @@ -2490,29 +1836,24 @@ if ( ( (cache_ptr)->index_size != \ #define H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, fail_val) \ { \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - \ - /* remove the entry from the list. */ \ + HDassert(cache_ptr); \ + HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ + HDassert(entry_ptr); \ \ + /* Remove the entry from the list. */ \ H5C__COLL_DLL_REMOVE((entry_ptr), (cache_ptr)->coll_head_ptr, \ (cache_ptr)->coll_tail_ptr, \ (cache_ptr)->coll_list_len, \ - (cache_ptr)->coll_list_size, \ - (fail_val)) \ - \ + (cache_ptr)->coll_list_size, (fail_val)) \ } /* H5C__REMOVE_FROM_COLL_LIST */ /*------------------------------------------------------------------------- * - * Macro: H5C__MOVE_TO_TOP_IN_COLL_LIST + * Macro: H5C__MOVE_TO_TOP_IN_COLL_LIST * * Purpose: Update entry position in collective entries list * - * Return: N/A - * * Programmer: Mohamad Chaarawi * *------------------------------------------------------------------------- @@ -2520,22 +1861,20 @@ if ( ( (cache_ptr)->index_size != \ #define H5C__MOVE_TO_TOP_IN_COLL_LIST(cache_ptr, entry_ptr, fail_val) \ { \ - HDassert((cache_ptr)); \ + HDassert(cache_ptr); \ HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ - HDassert((entry_ptr)); \ + HDassert(entry_ptr); \ \ /* Remove entry and insert at the head of the list. */ \ H5C__COLL_DLL_REMOVE((entry_ptr), (cache_ptr)->coll_head_ptr, \ (cache_ptr)->coll_tail_ptr, \ (cache_ptr)->coll_list_len, \ - (cache_ptr)->coll_list_size, \ - (fail_val)) \ + (cache_ptr)->coll_list_size, (fail_val)) \ \ H5C__COLL_DLL_PREPEND((entry_ptr), (cache_ptr)->coll_head_ptr, \ (cache_ptr)->coll_tail_ptr, \ (cache_ptr)->coll_list_len, \ - (cache_ptr)->coll_list_size, \ - (fail_val)) \ + (cache_ptr)->coll_list_size, (fail_val)) \ \ } /* H5C__MOVE_TO_TOP_IN_COLL_LIST */ #endif /* H5_HAVE_PARALLEL */ @@ -2580,7 +1919,7 @@ typedef struct H5C_tag_info_t { hbool_t corked; /* Whether this object is corked */ /* Hash table fields */ - UT_hash_handle hh; /* Hash table handle (must be LAST) */ + UT_hash_handle hh; /* Hash table handle (must be LAST) */ } H5C_tag_info_t; @@ -2588,7 +1927,7 @@ typedef struct H5C_tag_info_t { * * structure H5C_t * - * Catchall structure for all variables specific to an instance of the cache. + * Structure for all information specific to an instance of the cache. * * While the cache was designed with multiple replacement policies in mind, * at present only a modified form of LRU is supported. @@ -2598,13 +1937,11 @@ typedef struct H5C_tag_info_t { * is used to track dirty entries. * * magic: Unsigned 32 bit integer always set to H5C__H5C_T_MAGIC. - * This field is used to validate pointers to instances of - * H5C_t. + * This field is used to validate pointers to instances of H5C_t. * - * flush_in_progress: Boolean flag indicating whether a flush is in - * progress. + * flush_in_progress: Boolean flag indicating whether a flush is in progress. * - * log_info: Information used by the MDC logging functionality. + * log_info: Information used by the cache logging functionality. * Described in H5Clog.h. * * aux_ptr: Pointer to void used to allow wrapper code to associate @@ -2613,29 +1950,27 @@ typedef struct H5C_tag_info_t { * * max_type_id: Integer field containing the maximum type id number assigned * to a type of entry in the cache. All type ids from 0 to - * max_type_id inclusive must be defined. The names of the - * types are stored in the type_name_table discussed below, and - * indexed by the ids. + * max_type_id inclusive must be defined. * * class_table_ptr: Pointer to an array of H5C_class_t of length - * max_type_id + 1. Entry classes for the cache. + * max_type_id + 1. Entry classes for the cache. * * max_cache_size: Nominal maximum number of bytes that may be stored in the - * cache. This value should be viewed as a soft limit, as the - * cache can exceed this value under the following circumstances: + * cache. This value should be viewed as a soft limit, as the + * cache can exceed this value under the following circumstances: * - * a) All entries in the cache are protected, and the cache is - * asked to insert a new entry. In this case the new entry - * will be created. If this causes the cache to exceed - * max_cache_size, it will do so. The cache will attempt - * to reduce its size as entries are unprotected. + * a) All entries in the cache are protected, and the cache is + * asked to insert a new entry. In this case the new entry + * will be created. If this causes the cache to exceed + * max_cache_size, it will do so. The cache will attempt + * to reduce its size as entries are unprotected. * - * b) When running in parallel mode, the cache may not be + * b) When running in parallel mode, the cache may not be * permitted to flush a dirty entry in response to a read. * If there are no clean entries available to evict, the * cache will exceed its maximum size. Again the cache - * will attempt to reduce its size to the max_cache_size - * limit on the next cache write. + * will attempt to reduce its size to the max_cache_size + * limit on the next cache write. * * c) When an entry increases in size, the cache may exceed * the max_cache_size limit until the next time the cache @@ -2646,16 +1981,15 @@ typedef struct H5C_tag_info_t { * field is set to true. * * min_clean_size: Nominal minimum number of clean bytes in the cache. - * The cache attempts to maintain this number of bytes of - * clean data so as to avoid case b) above. Again, this is - * a soft limit. + * The cache attempts to maintain this number of bytes of clean + * data so as to avoid case b) above. Again, this is a soft limit. * * close_warning_received: Boolean flag indicating that a file closing * warning has been received. * * - * In addition to the call back functions required for each entry, the - * cache requires the following call back functions for this instance of + * In addition to the callback functions required for each entry's class, + * the cache requires the following callback functions for an instance of * the cache as a whole: * * check_write_permitted: In certain applications, the cache may not @@ -2690,7 +2024,7 @@ typedef struct H5C_tag_info_t { * following fields support that index. * * We sometimes need to visit all entries in the cache, they are stored in - * the index list. + * an index list. * * The index list is maintained by the same macros that maintain the * index, and must have the same length and size as the index proper. @@ -2699,13 +2033,13 @@ typedef struct H5C_tag_info_t { * the cache. * * index_size: Number of bytes of cache entries currently stored in the - * hash table used to index the cache. + * hash table used to index the cache. * - * This value should not be mistaken for footprint of the - * cache in memory. The average cache entry is small, and - * the cache has a considerable overhead. Multiplying the - * index_size by three should yield a conservative estimate - * of the cache's memory footprint. + * This value should not be mistaken for footprint of the + * cache in memory. The average cache entry is small, and + * the cache has a considerable overhead. Multiplying the + * index_size by three should yield a conservative estimate + * of the cache's memory footprint. * * index_ring_len: Array of integer of length H5C_RING_NTYPES used to * maintain a count of entries in the index by ring. Note @@ -2718,7 +2052,7 @@ typedef struct H5C_tag_info_t { * equal the value stored in index_size above. * * clean_index_size: Number of bytes of clean entries currently stored in - * the hash table. Note that the index_size field (above) + * the hash table. Note that the index_size field (above) * is also the sum of the sizes of all entries in the cache. * Thus we should have the invariant that clean_index_size + * dirty_index_size == index_size. @@ -2735,7 +2069,7 @@ typedef struct H5C_tag_info_t { * must equal the value stored in clean_index_size above. * * dirty_index_size: Number of bytes of dirty entries currently stored in - * the hash table. Note that the index_size field (above) + * the hash table. Note that the index_size field (above) * is also the sum of the sizes of all entries in the cache. * Thus we should have the invariant that clean_index_size + * dirty_index_size == index_size. @@ -2745,7 +2079,7 @@ typedef struct H5C_tag_info_t { * index by ring. Note that the sum of all cells in this array * must equal the value stored in dirty_index_size above. * - * index: Array of pointer to H5C_cache_entry_t of size + * index: Array of pointers to H5C_cache_entry_t of size * H5C__HASH_TABLE_LEN. At present, this value is a power * of two, not the usual prime number. * @@ -2760,7 +2094,7 @@ typedef struct H5C_tag_info_t { * changing the H5C__HASH_FCN macro and the deletion of the * H5C__HASH_MASK #define. No other changes should be required. * - * il_len: Number of entries on the index list. + * il_len: Number of entries on the index list. * * This must always be equal to index_len. As such, this * field is redundant. However, the existing linked list @@ -2768,7 +2102,7 @@ typedef struct H5C_tag_info_t { * this field exists primarily to avoid adding complexity to * these macros. * - * il_size: Number of bytes of cache entries currently stored in the + * il_size: Number of bytes of cache entries currently stored in the * index list. * * This must always be equal to index_size. As such, this @@ -2777,17 +2111,17 @@ typedef struct H5C_tag_info_t { * this field exists primarily to avoid adding complexity to * these macros. * - * il_head: Pointer to the head of the doubly linked list of entries in - * the index list. Note that cache entries on this list are + * il_head: Pointer to the head of the doubly linked list of entries in + * the index list. Note that cache entries on this list are * linked by their il_next and il_prev fields. * - * This field is NULL if the index is empty. + * This field is NULL if the index is empty. * - * il_tail: Pointer to the tail of the doubly linked list of entries in - * the index list. Note that cache entries on this list are - * linked by their il_next and il_prev fields. + * il_tail: Pointer to the tail of the doubly linked list of entries in + * the index list. Note that cache entries on this list are + * linked by their il_next and il_prev fields. * - * This field is NULL if the index is empty. + * This field is NULL if the index is empty. * * * It is possible that an entry may be removed from the cache as the result @@ -2799,7 +2133,7 @@ typedef struct H5C_tag_info_t { * * The following fields are maintained to facilitate this. * - * entries_removed_counter: Counter that is incremented each time an + * entries_removed_counter: Counter that is incremented each time an * entry is removed from the cache by any means (eviction, * expungement, or take ownership at this point in time). * Functions that perform scans on lists may set this field @@ -2807,7 +2141,7 @@ typedef struct H5C_tag_info_t { * Unexpected changes to the counter indicate that an entry * was removed from the cache as a side effect of the flush. * - * last_entry_removed_ptr: Pointer to the instance of H5C_cache_entry_t + * last_entry_removed_ptr: Pointer to the instance of H5C_cache_entry_t * which contained the last entry to be removed from the cache, * or NULL if there either is no such entry, or if a function * performing a scan of a list has set this field to NULL prior @@ -2817,8 +2151,7 @@ typedef struct H5C_tag_info_t { * maintained to allow functions that perform scans of lists * to compare this pointer with their pointers to next, thus * allowing them to avoid unnecessary restarts of scans if the - * pointers don't match, and if entries_removed_counter is - * one. + * pointers don't match, and if entries_removed_counter is one. * * entry_watched_for_removal: Pointer to an instance of H5C_cache_entry_t * which contains the 'next' entry for an iteration. Removing @@ -2853,50 +2186,47 @@ typedef struct H5C_tag_info_t { * the next flush or close. * * slist_enabled: Boolean flag used to control operation of the skip - * list. If this filed is FALSE, operations on the - * slist are no-ops, and the slist must be empty. If - * it is TRUE, operations on the slist proceed as usual, - * and all dirty entries in the metadata cache must be - * listed in the slist. + * list. If this filed is FALSE, operations on the slist are + * no-ops, and the slist must be empty. If it is TRUE, + * operations on the skip list proceed as usual, and all dirty + * entries in the metadata cache must be listed in the skip list. * * slist_changed: Boolean flag used to indicate whether the contents of - * the slist has changed since the last time this flag was + * the skip list has changed since the last time this flag was * reset. This is used in the cache flush code to detect * conditions in which pre-serialize or serialize callbacks - * have modified the slist -- which obliges us to restart - * the scan of the slist from the beginning. + * have modified the skip list -- which obliges us to restart + * the scan of the skip list from the beginning. * - * slist_len: Number of entries currently in the skip list - * used to maintain a sorted list of dirty entries in the - * cache. + * slist_len: Number of entries currently in the skip list. Used to + * maintain a sorted list of dirty entries in the cache. * - * slist_size: Number of bytes of cache entries currently stored in the - * skip list used to maintain a sorted list of - * dirty entries in the cache. + * slist_size: Number of bytes of cache entries currently stored in the + * skip list used to maintain a sorted list of dirty entries in + * the cache. * * slist_ring_len: Array of integer of length H5C_RING_NTYPES used to - * maintain a count of entries in the slist by ring. Note + * maintain a count of entries in the skip list by ring. Note * that the sum of all the cells in this array must equal * the value stored in slist_len above. * * slist_ring_size: Array of size_t of length H5C_RING_NTYPES used to - * maintain the sum of the sizes of all entries in the - * slist by ring. Note that the sum of all cells in this - * array must equal the value stored in slist_size above. + * maintain the sum of the sizes of all entries in the skip list + * by ring. Note that the sum of all cells in this array must + * equal the value stored in slist_size above. * - * slist_ptr: pointer to the instance of H5SL_t used maintain a sorted - * list of dirty entries in the cache. This sorted list has - * two uses: + * slist_ptr: Pointer to the instance of H5SL_t used maintain a sorted + * list of dirty entries in the cache. This sorted list has + * two uses: * - * a) It allows us to flush dirty entries in increasing address - * order, which results in significant savings. + * a) It allows us to flush dirty entries in increasing address + * order, which results in significant savings. * - * b) It facilitates checking for adjacent dirty entries when - * attempting to evict entries from the cache. + * b) It facilitates checking for adjacent dirty entries when + * attempting to evict entries from the cache. * * num_last_entries: The number of entries in the cache that can only be - * flushed after all other entries in the cache have - * been flushed. + * flushed after all other entries in the cache have been flushed. * * Note: At this time, the this field will only be applied to * two types of entries: the superblock and the file driver info @@ -2909,11 +2239,11 @@ typedef struct H5C_tag_info_t { * compiled in when H5C_DO_SANITY_CHECKS is TRUE. * * slist_len_increase: Number of entries that have been added to the - * slist since the last time this field was set to zero. + * skip list since the last time this field was set to zero. * Note that this value can be negative. * * slist_size_increase: Total size of all entries that have been added - * to the slist since the last time this field was set to + * to the skip list since the last time this field was set to * zero. Note that this value can be negative. * * Cache entries belonging to a particular object are "tagged" with that @@ -2921,71 +2251,69 @@ typedef struct H5C_tag_info_t { * * The following fields are maintained to facilitate this. * - * tag_list: A collection to track entries that belong to an object. - * Each H5C_tag_info_t struct on the tag list corresponds to - * a particular object in the file. Tagged entries can be - * flushed or evicted as a group, or corked to prevent entries - * from being evicted from the cache. + * tag_list: A collection to track entries that belong to an object. + * Each H5C_tag_info_t struct on the tag list corresponds to a + * particular object in the file. Tagged entries can be flushed + * or evicted as a group, or corked to prevent entries from being + * evicted from the cache. * - * "Global" entries, like the superblock and the file's - * freelist, as well as shared entries like global - * heaps and shared object header messages, are not tagged. + * "Global" entries, like the superblock and the file's freelist, + * as well as shared entries like global heaps and shared object + * header messages, are not tagged. * - * ignore_tags: Boolean flag to disable tag validation during entry insertion. + * ignore_tags: Boolean flag to disable tag validation during entry insertion. * * num_objs_corked: Unsigned integer field containing the number of objects - * that are "corked". The "corked" status of an object is - * found by searching the "tag_list". This field is added - * for optimization so that the skip list search on "tag_list" - * can be skipped if this field is zero, i.e. no "corked" - * objects. + * that are "corked". The "corked" status of an object is found by + * searching the "tag_list". This field is added for optimization + * so that the skip list search on "tag_list" can be skipped if this + * field is zero, i.e. no "corked" objects. * * When a cache entry is protected, it must be removed from the LRU - * list(s) as it cannot be either flushed or evicted until it is unprotected. + * list(s), as it cannot be either flushed or evicted until it is unprotected. * The following fields are used to implement the protected list (pl). * - * pl_len: Number of entries currently residing on the protected list. + * pl_len: Number of entries currently residing on the protected list. * - * pl_size: Number of bytes of cache entries currently residing on the - * protected list. + * pl_size: Number of bytes of cache entries currently residing on the + * protected list. * * pl_head_ptr: Pointer to the head of the doubly linked list of protected - * entries. Note that cache entries on this list are linked - * by their next and prev fields. + * entries. Note that cache entries on this list are linked + * by their next and prev fields. * - * This field is NULL if the list is empty. + * This field is NULL if the list is empty. * * pl_tail_ptr: Pointer to the tail of the doubly linked list of protected - * entries. Note that cache entries on this list are linked - * by their next and prev fields. + * entries. Note that cache entries on this list are linked + * by their next and prev fields. * - * This field is NULL if the list is empty. + * This field is NULL if the list is empty. * * * For very frequently used entries, the protect/unprotect overhead can - * become burdensome. To avoid this overhead, the cache - * allows entries to be "pinned". A pinned entry is similar to a - * protected entry, in the sense that it cannot be evicted, and that - * the entry can be modified at any time. + * become burdensome. To avoid this overhead, the cache allows entries to + * be "pinned". A pinned entry is similar to a protected entry, in the + * sense that it cannot be evicted, and that the entry can be modified at + * any time. * * Pinning an entry has the following implications: * - * 1) A pinned entry cannot be evicted. Thus unprotected - * pinned entries reside in the pinned entry list, instead - * of the LRU list(s) (or other lists maintained by the current - * replacement policy code). + * 1) A pinned entry cannot be evicted. Thus unprotected pinned + * entries reside in the pinned entry list, instead of the LRU + * list(s) or other lists maintained by the current replacement + * policy code. * * 2) A pinned entry can be accessed or modified at any time. * This places an additional burden on the associated pre-serialize - * and serialize callbacks, which must ensure the entry is in - * a consistent state before creating an image of it. + * and serialize callbacks, which must ensure the entry is in + * a consistent state before creating an image of it. * * 3) A pinned entry can be marked as dirty (and possibly * change size) while it is unprotected. * * 4) The flush-destroy code must allow pinned entries to - * be unpinned (and possibly unprotected) during the - * flush. + * be unpinned (and possibly unprotected) during the flush. * * Since pinned entries cannot be evicted, they must be kept on a pinned * entry list (pel), instead of being entrusted to the replacement policy @@ -2993,23 +2321,22 @@ typedef struct H5C_tag_info_t { * * Maintaining the pinned entry list requires the following fields: * - * pel_len: Number of entries currently residing on the pinned - * entry list. + * pel_len: Number of entries currently residing on the pinned entry list. * - * pel_size: Number of bytes of cache entries currently residing on + * pel_size: Number of bytes of cache entries currently residing on * the pinned entry list. * * pel_head_ptr: Pointer to the head of the doubly linked list of pinned * but not protected entries. Note that cache entries on * this list are linked by their next and prev fields. * - * This field is NULL if the list is empty. + * This field is NULL if the list is empty. * * pel_tail_ptr: Pointer to the tail of the doubly linked list of pinned * but not protected entries. Note that cache entries on * this list are linked by their next and prev fields. * - * This field is NULL if the list is empty. + * This field is NULL if the list is empty. * * * The cache must have a replacement policy, and the fields supporting this @@ -3033,10 +2360,9 @@ typedef struct H5C_tag_info_t { * * When reading in parallel mode, we evict from the clean LRU list only. * This implies that we must try to ensure that the clean LRU list is - * reasonably well stocked at all times. - * - * We attempt to do this by trying to flush enough entries on each write - * to keep the cLRU_list_size >= min_clean_size. + * reasonably well stocked at all times. We attempt to do this by trying + * to flush enough entries on each write to keep the cLRU_list_size >= + * min_clean_size. * * Even if we start with a completely clean cache, a sequence of protects * without unprotects can empty the clean LRU list. In this case, the @@ -3050,14 +2376,14 @@ typedef struct H5C_tag_info_t { * * LRU_list_len: Number of cache entries currently on the LRU list. * - * Observe that LRU_list_len + pl_len + pel_len must always - * equal index_len. + * The LRU_list_len + pl_len + pel_len must always + * equal index_len. * * LRU_list_size: Number of bytes of cache entries currently residing on the * LRU list. * - * Observe that LRU_list_size + pl_size + pel_size must always - * equal index_size. + * The LRU_list_size + pl_size + pel_size must always + * equal index_size. * * LRU_head_ptr: Pointer to the head of the doubly linked LRU list. Cache * entries on this list are linked by their next and prev fields. @@ -3071,13 +2397,13 @@ typedef struct H5C_tag_info_t { * * cLRU_list_len: Number of cache entries currently on the clean LRU list. * - * Observe that cLRU_list_len + dLRU_list_len must always + * The cLRU_list_len + dLRU_list_len must always * equal LRU_list_len. * * cLRU_list_size: Number of bytes of cache entries currently residing on * the clean LRU list. * - * Observe that cLRU_list_size + dLRU_list_size must always + * The cLRU_list_size + dLRU_list_size must always * equal LRU_list_size. * * cLRU_head_ptr: Pointer to the head of the doubly linked clean LRU list. @@ -3094,12 +2420,12 @@ typedef struct H5C_tag_info_t { * * dLRU_list_len: Number of cache entries currently on the dirty LRU list. * - * Observe that cLRU_list_len + dLRU_list_len must always + * The cLRU_list_len + dLRU_list_len must always * equal LRU_list_len. * * dLRU_list_size: Number of cache entries currently on the dirty LRU list. * - * Observe that cLRU_list_len + dLRU_list_len must always + * The cLRU_list_len + dLRU_list_len must always * equal LRU_list_len. * * dLRU_head_ptr: Pointer to the head of the doubly linked dirty LRU list. @@ -3117,8 +2443,8 @@ typedef struct H5C_tag_info_t { * * Automatic cache size adjustment: * - * While the default cache size is adequate for most cases, we can run into - * cases where the default is too small. Ideally, we will let the user + * While the default cache size is adequate for many cases, there are + * cases where the default is too small. Ideally, the user should * adjust the cache size as required. However, this is not possible in all * cases, so the cache has automatic cache size adjustment code. * @@ -3126,36 +2452,32 @@ typedef struct H5C_tag_info_t { * the structure described below: * * size_increase_possible: Depending on the configuration data given - * in the resize_ctl field, it may or may not be possible - * to increase the size of the cache. Rather than test for - * all the ways this can happen, we simply set this flag when - * we receive a new configuration. + * in the resize_ctl field, it may or may not be possible to + * increase the size of the cache. Rather than test for all the + * ways this can happen, we simply set this flag when we receive + * a new configuration. * * flash_size_increase_possible: Depending on the configuration data given - * in the resize_ctl field, it may or may not be possible - * for a flash size increase to occur. We set this flag - * whenever we receive a new configuration so as to avoid - * repeated calculations. + * in the resize_ctl field, it may or may not be possible for a + * flash size increase to occur. We set this flag whenever we + * receive a new configuration so as to avoid repeated calculations. * * flash_size_increase_threshold: If a flash cache size increase is possible, - * this field is used to store the minimum size of a new entry - * or size increase needed to trigger a flash cache size - * increase. Note that this field must be updated whenever - * the size of the cache is changed. + * this field is used to store the minimum size of a new entry or size + * increase needed to trigger a flash cache size increase. Note that + * this field must be updated whenever the size of the cache is changed. * - * size_decrease_possible: Depending on the configuration data given - * in the resize_ctl field, it may or may not be possible - * to decrease the size of the cache. Rather than test for - * all the ways this can happen, we simply set this flag when - * we receive a new configuration. + * size_decrease_possible: Depending on the configuration data given in the + * resize_ctl field, it may or may not be possible to decrease the + * size of the cache. Rather than test for all the ways this can + * happen, we simply set this flag when we receive a new configuration. * * resize_enabled: This is another convenience flag which is set whenever - * a new set of values for resize_ctl are provided. Very - * simply: + * a new set of values for resize_ctl are provided. Very simply: * * resize_enabled = size_increase_possible || size_decrease_possible; * - * cache_full: Boolean flag used to keep track of whether the cache is + * cache_full: Boolean flag used to keep track of whether the cache is * full, so we can refrain from increasing the size of a * cache which hasn't used up the space allotted to it. * @@ -3177,18 +2499,16 @@ typedef struct H5C_tag_info_t { * occur. * * msic_in_progress: As the metadata cache has become re-entrant, and as - * the free space manager code has become more tightly - * integrated with the metadata cache, it is possible that - * a call to H5C_insert_entry() may trigger a call to - * H5C_make_space_in_cache(), which, via H5C__flush_single_entry() - * and client callbacks, may trigger an infinite regression - * of calls to H5C_make_space_in_cache(). - * - * The msic_in_progress boolean flag is used to detect this, - * and prevent the infinite regression that would otherwise - * occur. - * - * resize_ctl: Instance of H5C_auto_size_ctl_t containing configuration + * the free space manager code has become more tightly integrated + * with the metadata cache, it is possible that a call to + * H5C_insert_entry() may trigger a call to H5C_make_space_in_cache(), + * which, via H5C__flush_single_entry() and client callbacks, may + * trigger an infinite regression of calls to H5C_make_space_in_cache(). + * + * The msic_in_progress boolean flag is used to detect this, + * and prevent the infinite regression that would otherwise occur. + * + * resize_ctl: Instance of H5C_auto_size_ctl_t containing configuration * data for automatic cache resizing. * * epoch_markers_active: Integer field containing the number of epoch @@ -3221,27 +2541,24 @@ typedef struct H5C_tag_info_t { * in the ring buffer. * * epoch_markers: Array of instances of H5C_cache_entry_t of length - * H5C__MAX_EPOCH_MARKERS. The entries are used as markers - * in the LRU list to identify cache entries that haven't - * been accessed for some (small) specified number of - * epochs. These entries (if any) can then be evicted and - * the cache size reduced -- ideally without evicting any - * of the current working set. Needless to say, the epoch - * length and the number of epochs before an unused entry - * must be chosen so that all, or almost all, the working - * set will be accessed before the limit. - * - * Epoch markers only appear in the LRU list, never in - * the index or slist. While they are of type - * H5C__EPOCH_MARKER_TYPE, and have associated class - * functions, these functions should never be called. - * - * The addr fields of these instances of H5C_cache_entry_t - * are set to the index of the instance in the epoch_markers - * array, the size is set to 0, and the type field points - * to the constant structure epoch_marker_class defined - * in H5C.c. The next and prev fields are used as usual - * to link the entry into the LRU list. + * H5C__MAX_EPOCH_MARKERS. The entries are used as markers in the + * LRU list to identify cache entries that haven't been accessed for + * some (small) specified number of epochs. These entries (if any) + * can then be evicted and the cache size reduced -- ideally without + * evicting any of the current working set. Needless to say, the epoch + * length and the number of epochs before an unused entry must be + * chosen so that all, or almost all, the working set will be accessed + * before the limit. + * + * Epoch markers only appear in the LRU list, never in the index or + * skip list. While they are of type H5C__EPOCH_MARKER_TYPE, and have + * associated class functions, these functions should never be called. + * + * The addr fields of these instances of H5C_cache_entry_t are set to + * the index of the instance in the epoch_markers array, the size is + * set to 0, and the type field points to the constant structure + * epoch_marker_class defined in H5Cepoch.c. The next and prev fields + * are used as usual to link the entry into the LRU list. * * All other fields are unused. * @@ -3252,36 +2569,33 @@ typedef struct H5C_tag_info_t { * simple cache hit rate computation regardless of whether statistics * collection is enabled. The following fields support this capability. * - * cache_hits: Number of cache hits since the last time the cache hit - * rate statistics were reset. Note that when automatic cache - * re-sizing is enabled, this field will be reset every automatic - * resize epoch. + * cache_hits: Number of cache hits since the last time the cache hit rate + * statistics were reset. Note that when automatic cache re-sizing + * is enabled, this field will be reset every automatic resize epoch. * * cache_accesses: Number of times the cache has been accessed while - * since the last since the last time the cache hit rate statistics - * were reset. Note that when automatic cache re-sizing is enabled, - * this field will be reset every automatic resize epoch. + * since the last since the last time the cache hit rate statistics + * were reset. Note that when automatic cache re-sizing is enabled, + * this field will be reset every automatic resize epoch. * * * Metadata cache image management related fields. * - * image_ctl: Instance of H5C_cache_image_ctl_t containing configuration - * data for generation of a cache image on file close. + * image_ctl: Instance of H5C_cache_image_ctl_t containing configuration + * data for generation of a cache image on file close. * - * serialization_in_progress: Boolean field that is set to TRUE iff - * the cache is in the process of being serialized. This - * field is needed to support the H5C_serialization_in_progress() - * call, which is in turn required for sanity checks in some - * cache clients. + * serialization_in_progress: Boolean field that is set to TRUE iff + * the cache is in the process of being serialized. This field is + * needed to support the H5C_serialization_in_progress() call, which + * is in turn required for sanity checks in some cache clients. * - * load_image: Boolean flag indicating that the metadata cache image - * superblock extension message exists and should be - * read, and the image block read and decoded on the next - * call to H5C_protect(). + * load_image: Boolean flag indicating that the metadata cache image + * superblock extension message exists and should be read, and the + * image block read and decoded on the next call to H5C_protect(). * - * image_loaded: Boolean flag indicating that the metadata cache has + * image_loaded: Boolean flag indicating that the metadata cache has * loaded the metadata cache image as directed by the - * MDC cache image superblock extension message. + * cache image superblock extension message. * * delete_image: Boolean flag indicating whether the metadata cache image * superblock message should be deleted and the cache image @@ -3290,20 +2604,18 @@ typedef struct H5C_tag_info_t { * This flag should be set to TRUE iff the file is opened * R/W and there is a cache image to be read. * - * image_addr: haddr_t containing the base address of the on disk - * metadata cache image, or HADDR_UNDEF if that value is - * undefined. Note that this field is used both in the - * construction and write, and the read and decode of - * metadata cache image blocks. + * image_addr: The base address of the on-disk metadata cache image, or + * HADDR_UNDEF if that value is undefined. Note that this field + * is used both in the construction and write, and the read and + * decode of metadata cache image blocks. * - * image_len: hsize_t containing the size of the on disk metadata cache - * image, or zero if that value is undefined. Note that this - * field is used both in the construction and write, and the - * read and decode of metadata cache image blocks. + * image_len: The size of the on disk metadata cache image, or zero if that + * value is undefined. Note that this field is used both in the + * construction and write, and the read and decode of metadata cache + * image blocks. * - * image_data_len: size_t containing the number of bytes of data in the - * on disk metadata cache image, or zero if that value is - * undefined. + * image_data_len: The number of bytes of data in the on disk metadata + * cache image, or zero if that value is undefined. * * In most cases, this value is the same as the image_len * above. It exists to allow for metadata cache image blocks @@ -3349,11 +2661,11 @@ typedef struct H5C_tag_info_t { * The following fields are used assemble the cache image prior to * writing it to disk. * - * num_entries_in_image: Unsigned integer field containing the number of entries - * to be copied into the metadata cache image. Note that - * this value will be less than the number of entries in - * the cache, and the superblock and its related entries - * are not written to the metadata cache image. + * num_entries_in_image: Unsigned integer field containing the number of + * entries to be copied into the metadata cache image. Note that + * this value will be less than the number of entries in the cache, + * and the superblock and its related entries are not written to the + * metadata cache image. * * image_entries: Pointer to a dynamically allocated array of instance of * H5C_image_entry_t of length num_entries_in_image, or NULL @@ -3363,19 +2675,19 @@ typedef struct H5C_tag_info_t { * * image_buffer: Pointer to the dynamically allocated buffer of length * image_len in which the metadata cache image is assembled, - * or NULL if that buffer does not exist. + * or NULL if that buffer does not exist. * * * Free Space Manager Related fields: * - * The free space managers must be informed when we are about to close - * or flush the file so that they order themselves accordingly. This used - * to be done much later in the close process, but with cache image and + * The free space managers for the file must be informed when we are about to + * close or flush the file so that they order themselves accordingly. This + * used to be done much later in the close process, but with cache image and * page buffering, this is no longer viable, as we must finalize the on * disk image of all metadata much sooner. * * This is handled by the H5MF_settle_raw_data_fsm() and - * H5MF_settle_meta_data_FSM() routines. As these calls are expensive, + * H5MF_settle_meta_data_fsm() routines. As these calls are expensive, * the following fields are used to track whether the target free space * managers are clean. * @@ -3409,151 +2721,122 @@ typedef struct H5C_tag_info_t { * below. The first set are collected only when H5C_COLLECT_CACHE_STATS * is true. * - * hits: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the number of times an entry with type id + * hits: Array to record the number of times an entry with type id * equal to the array index has been in cache when requested in * the current epoch. * - * misses: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the number of times an entry with type id + * misses: Array to record the number of times an entry with type id * equal to the array index has not been in cache when * requested in the current epoch. * - * write_protects: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The - * cells are used to record the number of times an entry with + * write_protects: Array to record the number of times an entry with * type id equal to the array index has been write protected * in the current epoch. * * Observe that (hits + misses) = (write_protects + read_protects). * - * read_protects: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The - * cells are used to record the number of times an entry with + * read_protects: Array to record the number of times an entry with * type id equal to the array index has been read protected in * the current epoch. * - * Observe that (hits + misses) = (write_protects + read_protects). + * Observe that (hits + misses) = (write_protects + read_protects). * - * max_read_protects: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. - * The cells are used to maximum number of simultaneous read + * max_read_protects: Array to maximum number of simultaneous read * protects on any entry with type id equal to the array index * in the current epoch. * - * insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the number of times an entry with type + * insertions: Array to record the number of times an entry with type * id equal to the array index has been inserted into the * cache in the current epoch. * - * pinned_insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. - * The cells are used to record the number of times an entry + * pinned_insertions: Array to record the number of times an entry * with type id equal to the array index has been inserted * pinned into the cache in the current epoch. * - * clears: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the number of times a dirty entry with type - * id equal to the array index has been cleared in the current - * epoch. + * clears: Array to record the number of times a dirty entry with type + * id equal to the array index has been cleared in the current epoch. * - * flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the number of times an entry with type id + * flushes: Array to record the number of times an entry with type id * equal to the array index has been written to disk in the - * current epoch. + * current epoch. * - * evictions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the number of times an entry with type id + * evictions: Array to record the number of times an entry with type id * equal to the array index has been evicted from the cache in * the current epoch. * - * take_ownerships: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The - * cells are used to record the number of times an entry with + * take_ownerships: Array to record the number of times an entry with * type id equal to the array index has been removed from the * cache via the H5C__TAKE_OWNERSHIP_FLAG in the current epoch. * - * moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the number of times an entry with type - * id equal to the array index has been moved in the current - * epoch. + * moves: Array to record the number of times an entry with type + * id equal to the array index has been moved in the current epoch. * - * entry_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. - * The cells are used to record the number of times an entry + * entry_flush_moves: Array to record the number of times an entry * with type id equal to the array index has been moved * during its pre-serialize callback in the current epoch. * - * cache_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. - * The cells are used to record the number of times an entry + * cache_flush_moves: Array to record the number of times an entry * with type id equal to the array index has been moved * during a cache flush in the current epoch. * - * pins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the number of times an entry with type - * id equal to the array index has been pinned in the current - * epoch. + * pins: Array to record the number of times an entry with type + * id equal to the array index has been pinned in the current epoch. * - * unpins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the number of times an entry with type - * id equal to the array index has been unpinned in the current - * epoch. + * unpins: Array to record the number of times an entry with type + * id equal to the array index has been unpinned in the current epoch. * - * dirty_pins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the number of times an entry with type + * dirty_pins: Array to record the number of times an entry with type * id equal to the array index has been marked dirty while pinned * in the current epoch. * - * pinned_flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The - * cells are used to record the number of times an entry + * pinned_flushes: Array to record the number of times an entry * with type id equal to the array index has been flushed while * pinned in the current epoch. * - * pinned_clears: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The - * cells are used to record the number of times an entry + * pinned_clears: Array to record the number of times an entry * with type id equal to the array index has been cleared while * pinned in the current epoch. * - * size_increases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. - * The cells are used to record the number of times an entry + * size_increases: Array to record the number of times an entry * with type id equal to the array index has increased in * size in the current epoch. * - * size_decreases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. - * The cells are used to record the number of times an entry + * size_decreases: Array to record the number of times an entry * with type id equal to the array index has decreased in * size in the current epoch. * - * entry_flush_size_changes: Array of int64 of length - * H5C__MAX_NUM_TYPE_IDS + 1. The cells are used to record - * the number of times an entry with type id equal to the - * array index has changed size while in its pre-serialize - * callback. + * entry_flush_size_changes: Array to record the number of times an entry + * with type id equal to the array index has changed size while in + * its pre-serialize callback. * - * cache_flush_size_changes: Array of int64 of length - * H5C__MAX_NUM_TYPE_IDS + 1. The cells are used to record - * the number of times an entry with type id equal to the - * array index has changed size during a cache flush + * cache_flush_size_changes: Array to record the number of times an entry + * with type id equal to the array index has changed size during a + * cache flush * * total_ht_insertions: Number of times entries have been inserted into the * hash table in the current epoch. * * total_ht_deletions: Number of times entries have been deleted from the - * hash table in the current epoch. - * - * successful_ht_searches: int64 containing the total number of successful - * searches of the hash table in the current epoch. + * hash table in the current epoch. * - * total_successful_ht_search_depth: int64 containing the total number of - * entries other than the targets examined in successful - * searches of the hash table in the current epoch. + * successful_ht_searches: The total number of successful searches of the + * hash table in the current epoch. * - * failed_ht_searches: int64 containing the total number of unsuccessful - * searches of the hash table in the current epoch. + * total_successful_ht_search_depth: The total number of entries other than + * the targets examined in successful searches of the hash table in + * the current epoch. * - * total_failed_ht_search_depth: int64 containing the total number of - * entries examined in unsuccessful searches of the hash + * failed_ht_searches: The total number of unsuccessful searches of the hash * table in the current epoch. * - * max_index_len: Largest value attained by the index_len field in the - * current epoch. + * total_failed_ht_search_depth: The total number of entries examined in + * unsuccessful searches of the hash table in the current epoch. * - * max_index_size: Largest value attained by the index_size field in the - * current epoch. + * max_index_len: Largest value attained by the index_len field in the + * current epoch. + * + * max_index_size: Largest value attained by the index_size field in the + * current epoch. * * max_clean_index_size: Largest value attained by the clean_index_size field * in the current epoch. @@ -3561,75 +2844,73 @@ typedef struct H5C_tag_info_t { * max_dirty_index_size: Largest value attained by the dirty_index_size field * in the current epoch. * - * max_slist_len: Largest value attained by the slist_len field in the - * current epoch. + * max_slist_len: Largest value attained by the slist_len field in the + * current epoch. * - * max_slist_size: Largest value attained by the slist_size field in the - * current epoch. + * max_slist_size: Largest value attained by the slist_size field in the + * current epoch. * * max_pl_len: Largest value attained by the pl_len field in the - * current epoch. + * current epoch. * * max_pl_size: Largest value attained by the pl_size field in the - * current epoch. + * current epoch. * * max_pel_len: Largest value attained by the pel_len field in the - * current epoch. + * current epoch. * * max_pel_size: Largest value attained by the pel_size field in the - * current epoch. + * current epoch. * * calls_to_msic: Total number of calls to H5C__make_space_in_cache * * total_entries_skipped_in_msic: Number of clean entries skipped while - * enforcing the min_clean_fraction in H5C__make_space_in_cache(). + * enforcing the min_clean_fraction in H5C__make_space_in_cache(). * * total_dirty_pf_entries_skipped_in_msic: Number of dirty prefetched entries - * skipped in H5C__make_space_in_cache(). Note that this can - * only occur when a file is opened R/O with a cache image - * containing dirty entries. + * skipped in H5C__make_space_in_cache(). Note that this can + * only occur when a file is opened R/O with a cache image + * containing dirty entries. * * total_entries_scanned_in_msic: Number of clean entries skipped while - * enforcing the min_clean_fraction in H5C__make_space_in_cache(). + * enforcing the min_clean_fraction in H5C__make_space_in_cache(). * * max_entries_skipped_in_msic: Maximum number of clean entries skipped - * in any one call to H5C__make_space_in_cache(). + * in any one call to H5C__make_space_in_cache(). * * max_dirty_pf_entries_skipped_in_msic: Maximum number of dirty prefetched - * entries skipped in any one call to H5C__make_space_in_cache(). - * Note that this can only occur when the file is opened - * R/O with a cache image containing dirty entries. + * entries skipped in any one call to H5C__make_space_in_cache(). + * Note that this can only occur when the file is opened + * R/O with a cache image containing dirty entries. * * max_entries_scanned_in_msic: Maximum number of entries scanned over - * in any one call to H5C__make_space_in_cache(). + * in any one call to H5C__make_space_in_cache(). * * entries_scanned_to_make_space: Number of entries scanned only when looking - * for entries to evict in order to make space in cache. + * for entries to evict in order to make space in cache. * * * The following fields track statistics on cache images. * - * images_created: Integer field containing the number of cache images - * created since the last time statistics were reset. + * images_created: The number of cache images created since the last + * time statistics were reset. * * At present, this field must always be either 0 or 1. * Further, since cache images are only created at file * close, this field should only be set at that time. * - * images_read: Integer field containing the number of cache images - * read from file. Note that reading an image is different - * from loading it -- reading the image means just that, - * while loading the image refers to decoding it and loading - * it into the metadata cache. + * images_read: The number of cache images read from file. Note that + * reading an image is different from loading it -- reading the + * image means just that, while loading the image refers to decoding + * it and loading it into the metadata cache. * - * In the serial case, image_read should always equal - * images_loaded. However, in the parallel case, the - * image should only be read by process 0. All other - * processes should receive the cache image via a broadcast - * from process 0. + * In the serial case, image_read should always equal images_loaded. + * However, in the parallel case, the image should only be read by + * process 0. All other processes should receive the cache image via + * a broadcast from process 0. * - * images_loaded: Integer field containing the number of cache images - * loaded since the last time statistics were reset. + * images_loaded: The number of cache images loaded since the last time + * statistics were reset. * * At present, this field must always be either 0 or 1. * Further, since cache images are only loaded at the @@ -3649,25 +2930,24 @@ typedef struct H5C_tag_info_t { * of prefetched entries are tracked in the flushes and evictions arrays * discussed above. * - * prefetches: Number of prefetched entries that are loaded to the cache. + * prefetches: Number of prefetched entries that are loaded to the cache. * - * dirty_prefetches: Number of dirty prefetched entries that are loaded + * dirty_prefetches: Number of dirty prefetched entries that are loaded * into the cache. * - * prefetch_hits: Number of prefetched entries that are actually used. + * prefetch_hits: Number of prefetched entries that are actually used. * * - * Entries may move, load, dirty, and delete - * other entries in their pre_serialize and serialize callbacks, there is - * code to restart scans of lists so as to avoid - * improper behavior if the next entry in the list is the target of one on - * these operations. + * Entries may move, load, dirty, and delete other entries in their + * pre_serialize and serialize callbacks, there is code to restart scans of + * lists so as to avoid improper behavior if the next entry in the list is + * the target of one on these operations. * * The following fields are use to count such occurrences. They are used * both in tests (to verify that the scan has been restarted), and to * obtain estimates of how frequently these restarts occur. * - * slist_scan_restarts: Number of times a scan of the slist (that contains + * slist_scan_restarts: Number of times a scan of the skip list (that contains * calls to H5C__flush_single_entry()) has been restarted to * avoid potential issues with change of status of the next * entry in the scan. @@ -3689,42 +2969,35 @@ typedef struct H5C_tag_info_t { * The remaining stats are collected only when both H5C_COLLECT_CACHE_STATS * and H5C_COLLECT_CACHE_ENTRY_STATS are true. * - * max_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the maximum number of times any single + * max_accesses: Array to record the maximum number of times any single * entry with type id equal to the array index has been * accessed in the current epoch. * - * min_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the minimum number of times any single + * min_accesses: Array to record the minimum number of times any single * entry with type id equal to the array index has been * accessed in the current epoch. * - * max_clears: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the maximum number of times any single + * max_clears: Array to record the maximum number of times any single * entry with type id equal to the array index has been cleared * in the current epoch. * - * max_flushes: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the maximum number of times any single + * max_flushes: Array to record the maximum number of times any single * entry with type id equal to the array index has been * flushed in the current epoch. * - * max_size: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the maximum size of any single entry + * max_size: Array to record the maximum size of any single entry * with type id equal to the array index that has resided in * the cache in the current epoch. * - * max_pins: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the maximum number of times that any single + * max_pins: Array to record the maximum number of times that any single * entry with type id equal to the array index that has been * marked as pinned in the cache in the current epoch. * * * Fields supporting testing: * - * prefix Array of char used to prefix debugging output. The - * field is intended to allow marking of output of with - * the processes mpi rank. + * prefix: Array of char used to prefix debugging output. The field is + * intended to allow marking of output of with the processes mpi rank. * * get_entry_ptr_from_addr_counter: Counter used to track the number of * times the H5C_get_entry_ptr_from_addr() function has been @@ -3735,238 +3008,238 @@ typedef struct H5C_tag_info_t { struct H5C_t { uint32_t magic; - hbool_t flush_in_progress; - H5C_log_info_t *log_info; - void * aux_ptr; - int32_t max_type_id; - const H5C_class_t * const *class_table_ptr; - size_t max_cache_size; - size_t min_clean_size; - H5C_write_permitted_func_t check_write_permitted; - hbool_t write_permitted; - H5C_log_flush_func_t log_flush; - hbool_t evictions_enabled; - hbool_t close_warning_received; + hbool_t flush_in_progress; + H5C_log_info_t * log_info; + void * aux_ptr; + int32_t max_type_id; + const H5C_class_t * const *class_table_ptr; + size_t max_cache_size; + size_t min_clean_size; + H5C_write_permitted_func_t check_write_permitted; + hbool_t write_permitted; + H5C_log_flush_func_t log_flush; + hbool_t evictions_enabled; + hbool_t close_warning_received; /* Fields for maintaining the [hash table] index of entries */ - uint32_t index_len; - size_t index_size; + uint32_t index_len; + size_t index_size; uint32_t index_ring_len[H5C_RING_NTYPES]; - size_t index_ring_size[H5C_RING_NTYPES]; - size_t clean_index_size; - size_t clean_index_ring_size[H5C_RING_NTYPES]; - size_t dirty_index_size; - size_t dirty_index_ring_size[H5C_RING_NTYPES]; - H5C_cache_entry_t * index[H5C__HASH_TABLE_LEN]; - uint32_t il_len; - size_t il_size; - H5C_cache_entry_t * il_head; - H5C_cache_entry_t * il_tail; + size_t index_ring_size[H5C_RING_NTYPES]; + size_t clean_index_size; + size_t clean_index_ring_size[H5C_RING_NTYPES]; + size_t dirty_index_size; + size_t dirty_index_ring_size[H5C_RING_NTYPES]; + H5C_cache_entry_t * index[H5C__HASH_TABLE_LEN]; + uint32_t il_len; + size_t il_size; + H5C_cache_entry_t * il_head; + H5C_cache_entry_t * il_tail; /* Fields to detect entries removed during scans */ - int64_t entries_removed_counter; - H5C_cache_entry_t * last_entry_removed_ptr; - H5C_cache_entry_t * entry_watched_for_removal; + int64_t entries_removed_counter; + H5C_cache_entry_t * last_entry_removed_ptr; + H5C_cache_entry_t * entry_watched_for_removal; /* Fields for maintaining list of in-order entries, for flushing */ - hbool_t slist_enabled; - hbool_t slist_changed; - uint32_t slist_len; - size_t slist_size; + hbool_t slist_enabled; + hbool_t slist_changed; + uint32_t slist_len; + size_t slist_size; uint32_t slist_ring_len[H5C_RING_NTYPES]; - size_t slist_ring_size[H5C_RING_NTYPES]; - H5SL_t * slist_ptr; - uint32_t num_last_entries; + size_t slist_ring_size[H5C_RING_NTYPES]; + H5SL_t * slist_ptr; + uint32_t num_last_entries; #ifdef H5C_DO_SANITY_CHECKS - int32_t slist_len_increase; - int64_t slist_size_increase; + int32_t slist_len_increase; + int64_t slist_size_increase; #endif /* H5C_DO_SANITY_CHECKS */ /* Fields for maintaining list of tagged entries */ - H5C_tag_info_t * tag_list; - hbool_t ignore_tags; - uint32_t num_objs_corked; + H5C_tag_info_t * tag_list; + hbool_t ignore_tags; + uint32_t num_objs_corked; /* Fields for tracking protected entries */ - uint32_t pl_len; - size_t pl_size; - H5C_cache_entry_t * pl_head_ptr; - H5C_cache_entry_t * pl_tail_ptr; + uint32_t pl_len; + size_t pl_size; + H5C_cache_entry_t * pl_head_ptr; + H5C_cache_entry_t * pl_tail_ptr; /* Fields for tracking pinned entries */ - uint32_t pel_len; - size_t pel_size; - H5C_cache_entry_t * pel_head_ptr; - H5C_cache_entry_t * pel_tail_ptr; + uint32_t pel_len; + size_t pel_size; + H5C_cache_entry_t * pel_head_ptr; + H5C_cache_entry_t * pel_tail_ptr; /* Fields for complete LRU list of entries */ - uint32_t LRU_list_len; - size_t LRU_list_size; - H5C_cache_entry_t * LRU_head_ptr; - H5C_cache_entry_t * LRU_tail_ptr; + uint32_t LRU_list_len; + size_t LRU_list_size; + H5C_cache_entry_t * LRU_head_ptr; + H5C_cache_entry_t * LRU_tail_ptr; #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS /* Fields for clean LRU list of entries */ - uint32_t cLRU_list_len; - size_t cLRU_list_size; - H5C_cache_entry_t * cLRU_head_ptr; - H5C_cache_entry_t * cLRU_tail_ptr; + uint32_t cLRU_list_len; + size_t cLRU_list_size; + H5C_cache_entry_t * cLRU_head_ptr; + H5C_cache_entry_t * cLRU_tail_ptr; /* Fields for dirty LRU list of entries */ - uint32_t dLRU_list_len; - size_t dLRU_list_size; - H5C_cache_entry_t * dLRU_head_ptr; - H5C_cache_entry_t * dLRU_tail_ptr; + uint32_t dLRU_list_len; + size_t dLRU_list_size; + H5C_cache_entry_t * dLRU_head_ptr; + H5C_cache_entry_t * dLRU_tail_ptr; #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ #ifdef H5_HAVE_PARALLEL /* Fields for collective metadata reads */ - uint32_t coll_list_len; - size_t coll_list_size; - H5C_cache_entry_t * coll_head_ptr; - H5C_cache_entry_t * coll_tail_ptr; + uint32_t coll_list_len; + size_t coll_list_size; + H5C_cache_entry_t * coll_head_ptr; + H5C_cache_entry_t * coll_tail_ptr; /* Fields for collective metadata writes */ - H5SL_t * coll_write_list; + H5SL_t * coll_write_list; #endif /* H5_HAVE_PARALLEL */ /* Fields for automatic cache size adjustment */ - hbool_t size_increase_possible; - hbool_t flash_size_increase_possible; - size_t flash_size_increase_threshold; - hbool_t size_decrease_possible; - hbool_t resize_enabled; - hbool_t cache_full; - hbool_t size_decreased; - hbool_t resize_in_progress; - hbool_t msic_in_progress; - H5C_auto_size_ctl_t resize_ctl; + hbool_t size_increase_possible; + hbool_t flash_size_increase_possible; + size_t flash_size_increase_threshold; + hbool_t size_decrease_possible; + hbool_t resize_enabled; + hbool_t cache_full; + hbool_t size_decreased; + hbool_t resize_in_progress; + hbool_t msic_in_progress; + H5C_auto_size_ctl_t resize_ctl; /* Fields for epoch markers used in automatic cache size adjustment */ - int32_t epoch_markers_active; - hbool_t epoch_marker_active[H5C__MAX_EPOCH_MARKERS]; - int32_t epoch_marker_ringbuf[H5C__MAX_EPOCH_MARKERS+1]; - int32_t epoch_marker_ringbuf_first; - int32_t epoch_marker_ringbuf_last; - int32_t epoch_marker_ringbuf_size; - H5C_cache_entry_t epoch_markers[H5C__MAX_EPOCH_MARKERS]; + int32_t epoch_markers_active; + hbool_t epoch_marker_active[H5C__MAX_EPOCH_MARKERS]; + int32_t epoch_marker_ringbuf[H5C__MAX_EPOCH_MARKERS+1]; + int32_t epoch_marker_ringbuf_first; + int32_t epoch_marker_ringbuf_last; + int32_t epoch_marker_ringbuf_size; + H5C_cache_entry_t epoch_markers[H5C__MAX_EPOCH_MARKERS]; /* Fields for cache hit rate collection */ - int64_t cache_hits; - int64_t cache_accesses; + int64_t cache_hits; + int64_t cache_accesses; /* fields supporting generation of a cache image on file close */ - H5C_cache_image_ctl_t image_ctl; - hbool_t serialization_in_progress; - hbool_t load_image; - hbool_t image_loaded; - hbool_t delete_image; + H5C_cache_image_ctl_t image_ctl; + hbool_t serialization_in_progress; + hbool_t load_image; + hbool_t image_loaded; + hbool_t delete_image; haddr_t image_addr; - hsize_t image_len; - hsize_t image_data_len; - int64_t entries_loaded_counter; - int64_t entries_inserted_counter; - int64_t entries_relocated_counter; - int64_t entry_fd_height_change_counter; + hsize_t image_len; + hsize_t image_data_len; + int64_t entries_loaded_counter; + int64_t entries_inserted_counter; + int64_t entries_relocated_counter; + int64_t entry_fd_height_change_counter; uint32_t num_entries_in_image; - H5C_image_entry_t * image_entries; - void * image_buffer; + H5C_image_entry_t * image_entries; + void * image_buffer; /* Free Space Manager Related fields */ hbool_t rdfsm_settled; - hbool_t mdfsm_settled; + hbool_t mdfsm_settled; #if H5C_COLLECT_CACHE_STATS /* stats fields */ - int64_t hits[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t misses[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t write_protects[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t read_protects[H5C__MAX_NUM_TYPE_IDS + 1]; - int32_t max_read_protects[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t insertions[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t pinned_insertions[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t clears[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t flushes[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t evictions[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t take_ownerships[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t moves[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t entry_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t cache_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t pins[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t unpins[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t dirty_pins[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t pinned_flushes[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t pinned_clears[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t size_increases[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t size_decreases[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t entry_flush_size_changes[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t cache_flush_size_changes[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t hits[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t misses[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t write_protects[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t read_protects[H5C__MAX_NUM_TYPE_IDS + 1]; + int32_t max_read_protects[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t insertions[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t pinned_insertions[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t clears[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t flushes[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t evictions[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t take_ownerships[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t moves[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t entry_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t cache_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t pins[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t unpins[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t dirty_pins[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t pinned_flushes[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t pinned_clears[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t size_increases[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t size_decreases[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t entry_flush_size_changes[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t cache_flush_size_changes[H5C__MAX_NUM_TYPE_IDS + 1]; /* Fields for hash table operations */ - int64_t total_ht_insertions; - int64_t total_ht_deletions; - int64_t successful_ht_searches; - int64_t total_successful_ht_search_depth; - int64_t failed_ht_searches; - int64_t total_failed_ht_search_depth; - uint32_t max_index_len; - size_t max_index_size; - size_t max_clean_index_size; - size_t max_dirty_index_size; + int64_t total_ht_insertions; + int64_t total_ht_deletions; + int64_t successful_ht_searches; + int64_t total_successful_ht_search_depth; + int64_t failed_ht_searches; + int64_t total_failed_ht_search_depth; + uint32_t max_index_len; + size_t max_index_size; + size_t max_clean_index_size; + size_t max_dirty_index_size; /* Fields for in-order skip list */ - uint32_t max_slist_len; - size_t max_slist_size; + uint32_t max_slist_len; + size_t max_slist_size; /* Fields for protected entry list */ - uint32_t max_pl_len; - size_t max_pl_size; + uint32_t max_pl_len; + size_t max_pl_size; /* Fields for pinned entry list */ - uint32_t max_pel_len; - size_t max_pel_size; + uint32_t max_pel_len; + size_t max_pel_size; /* Fields for tracking 'make space in cache' (msic) operations */ - int64_t calls_to_msic; - int64_t total_entries_skipped_in_msic; - int64_t total_dirty_pf_entries_skipped_in_msic; - int64_t total_entries_scanned_in_msic; - int32_t max_entries_skipped_in_msic; - int32_t max_dirty_pf_entries_skipped_in_msic; - int32_t max_entries_scanned_in_msic; - int64_t entries_scanned_to_make_space; + int64_t calls_to_msic; + int64_t total_entries_skipped_in_msic; + int64_t total_dirty_pf_entries_skipped_in_msic; + int64_t total_entries_scanned_in_msic; + int32_t max_entries_skipped_in_msic; + int32_t max_dirty_pf_entries_skipped_in_msic; + int32_t max_entries_scanned_in_msic; + int64_t entries_scanned_to_make_space; /* Fields for tracking skip list scan restarts */ - int64_t slist_scan_restarts; - int64_t LRU_scan_restarts; - int64_t index_scan_restarts; + int64_t slist_scan_restarts; + int64_t LRU_scan_restarts; + int64_t index_scan_restarts; /* Fields for tracking cache image operations */ - int32_t images_created; - int32_t images_read; - int32_t images_loaded; - hsize_t last_image_size; + int32_t images_created; + int32_t images_read; + int32_t images_loaded; + hsize_t last_image_size; /* Fields for tracking prefetched entries */ - int64_t prefetches; - int64_t dirty_prefetches; - int64_t prefetch_hits; + int64_t prefetches; + int64_t dirty_prefetches; + int64_t prefetch_hits; #if H5C_COLLECT_CACHE_ENTRY_STATS - int32_t max_accesses[H5C__MAX_NUM_TYPE_IDS + 1]; - int32_t min_accesses[H5C__MAX_NUM_TYPE_IDS + 1]; - int32_t max_clears[H5C__MAX_NUM_TYPE_IDS + 1]; - int32_t max_flushes[H5C__MAX_NUM_TYPE_IDS + 1]; - size_t max_size[H5C__MAX_NUM_TYPE_IDS + 1]; - int32_t max_pins[H5C__MAX_NUM_TYPE_IDS + 1]; + int32_t max_accesses[H5C__MAX_NUM_TYPE_IDS + 1]; + int32_t min_accesses[H5C__MAX_NUM_TYPE_IDS + 1]; + int32_t max_clears[H5C__MAX_NUM_TYPE_IDS + 1]; + int32_t max_flushes[H5C__MAX_NUM_TYPE_IDS + 1]; + size_t max_size[H5C__MAX_NUM_TYPE_IDS + 1]; + int32_t max_pins[H5C__MAX_NUM_TYPE_IDS + 1]; #endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ #endif /* H5C_COLLECT_CACHE_STATS */ - char prefix[H5C__PREFIX_LEN]; + char prefix[H5C__PREFIX_LEN]; #ifndef NDEBUG - int64_t get_entry_ptr_from_addr_counter; -#endif /* NDEBUG */ + int64_t get_entry_ptr_from_addr_counter; +#endif }; /* H5C_t */ diff --git a/test/cache_common.h b/test/cache_common.h index bdeeded0dca..e91099cf47f 100644 --- a/test/cache_common.h +++ b/test/cache_common.h @@ -402,37 +402,18 @@ typedef struct test_entry_t { unsigned verify_ct; /* Count the # of checksum verification for an entry */ } test_entry_t; -/* The following are cut down test versions of the hash table manipulation - * macros from H5Cpkg.h, which have been further modified to avoid references - * to the error reporting macros. Needless to say, these macros must be - * updated as necessary. - */ - -#define H5C__HASH_MASK ((size_t)(H5C__HASH_TABLE_LEN - 1) << 3) - #define H5C_TEST__PRE_HT_SEARCH_SC(cache_ptr, Addr) \ - if (((cache_ptr) == NULL) || ((cache_ptr)->magic != H5C__H5C_T_MAGIC) || \ - ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size)) || \ - (!H5F_addr_defined(Addr)) || (H5C__HASH_FCN(Addr) < 0) || \ - (H5C__HASH_FCN(Addr) >= H5C__HASH_TABLE_LEN)) { \ + if (H5C__PRE_HT_SEARCH_SC_CMP(cache_ptr, Addr)) { \ HDfprintf(stdout, "Pre HT search SC failed.\n"); \ } #define H5C_TEST__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k) \ - if (((cache_ptr) == NULL) || ((cache_ptr)->magic != H5C__H5C_T_MAGIC) || ((cache_ptr)->index_len < 1) || \ - ((entry_ptr) == NULL) || ((cache_ptr)->index_size < (entry_ptr)->size) || \ - ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size)) || \ - ((entry_ptr)->size <= 0) || (((cache_ptr)->index)[k] == NULL) || \ - ((((cache_ptr)->index)[k] != (entry_ptr)) && ((entry_ptr)->ht_prev == NULL)) || \ - ((((cache_ptr)->index)[k] == (entry_ptr)) && ((entry_ptr)->ht_prev != NULL)) || \ - (((entry_ptr)->ht_prev != NULL) && ((entry_ptr)->ht_prev->ht_next != (entry_ptr))) || \ - (((entry_ptr)->ht_next != NULL) && ((entry_ptr)->ht_next->ht_prev != (entry_ptr)))) { \ + if (H5C__POST_SUC_HT_SEARCH_SC_CMP(cache_ptr, entry_ptr, k)) { \ HDfprintf(stdout, "Post successful HT search SC failed.\n"); \ } -#define H5C_TEST__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k) \ - if (((cache_ptr) == NULL) || (((cache_ptr)->index)[k] != (entry_ptr)) || \ - ((entry_ptr)->ht_prev != NULL)) { \ +#define H5C_TEST__POST_HT_SHIFT_TO_FRONT_SC(cache_ptr, entry_ptr, k) \ + if (H5C__POST_HT_SHIFT_TO_FRONT_SC_CMP(cache_ptr, entry_ptr, k)) { \ HDfprintf(stdout, "Post HT shift to front failed.\n"); \ } @@ -440,21 +421,21 @@ typedef struct test_entry_t { { \ int k; \ H5C_TEST__PRE_HT_SEARCH_SC(cache_ptr, Addr) \ - k = H5C__HASH_FCN(Addr); \ - entry_ptr = ((cache_ptr)->index)[k]; \ + k = H5C__HASH_FCN(Addr); \ + (entry_ptr) = (cache_ptr)->index[k]; \ while (entry_ptr) { \ if (H5F_addr_eq(Addr, (entry_ptr)->addr)) { \ H5C_TEST__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k) \ - if (entry_ptr != ((cache_ptr)->index)[k]) { \ + if ((entry_ptr) != (cache_ptr)->index[k]) { \ if ((entry_ptr)->ht_next) \ (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \ HDassert((entry_ptr)->ht_prev != NULL); \ - (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \ - ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \ - (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \ - (entry_ptr)->ht_prev = NULL; \ - ((cache_ptr)->index)[k] = (entry_ptr); \ - H5C_TEST__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k) \ + (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \ + (cache_ptr)->index[k]->ht_prev = (entry_ptr); \ + (entry_ptr)->ht_next = (cache_ptr)->index[k]; \ + (entry_ptr)->ht_prev = NULL; \ + (cache_ptr)->index[k] = (entry_ptr); \ + H5C_TEST__POST_HT_SHIFT_TO_FRONT_SC(cache_ptr, entry_ptr, k) \ } \ break; \ } \ @@ -568,11 +549,6 @@ H5TEST_DLL void add_flush_op(int target_type, int target_idx, int op_code, int t H5TEST_DLL void addr_to_type_and_index(haddr_t addr, int32_t *type_ptr, int32_t *index_ptr); -#if 0 /* keep this for a while -- it may be useful */ -H5TEST_DLL haddr_t type_and_index_to_addr(int32_t type, - int32_t idx); -#endif - H5TEST_DLL void dirty_entry(H5F_t *file_ptr, int32_t type, int32_t idx, hbool_t dirty_pin); H5TEST_DLL void expunge_entry(H5F_t *file_ptr, int32_t type, int32_t idx); From ed560a72e8c5334d68973dbe6630b7953eed56cc Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Thu, 11 May 2023 10:18:54 -0500 Subject: [PATCH 216/231] Update action uses version (#2937) --- .github/workflows/clang-format-fix.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/clang-format-fix.yml b/.github/workflows/clang-format-fix.yml index feaa3d0014e..00d23529cbd 100644 --- a/.github/workflows/clang-format-fix.yml +++ b/.github/workflows/clang-format-fix.yml @@ -27,7 +27,7 @@ jobs: inplace: True style: file exclude: './config ./hl/src/H5LTanalyze.c ./hl/src/H5LTparse.c ./hl/src/H5LTparse.h ./src/H5Epubgen.h ./src/H5Einit.h ./src/H5Eterm.h ./src/H5Edefin.h ./src/H5version.h ./src/H5overflow.h' - - uses: EndBug/add-and-commit@v7 + - uses: EndBug/add-and-commit@v9 with: author_name: github-actions author_email: 41898282+github-actions[bot]@users.noreply.github.com From a083d045aeeab80a8bb1d17857299b864571206b Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Thu, 11 May 2023 17:04:09 -0700 Subject: [PATCH 217/231] Bump Autoconf version to 2.71 (#2944) Required for building with Intel's oneAPI --- configure.ac | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.ac b/configure.ac index 1675bee56fb..88bad6f6d8c 100644 --- a/configure.ac +++ b/configure.ac @@ -13,7 +13,7 @@ ## ---------------------------------------------------------------------- ## Initialize configure. ## -AC_PREREQ([2.69]) +AC_PREREQ([2.71]) ## AC_INIT takes the name of the package, the version number, and an ## email address to report bugs. AC_CONFIG_SRCDIR takes a unique file From 21b70fe93e3a8340d904dc253ae05b0c45000e3f Mon Sep 17 00:00:00 2001 From: Quincey Koziol Date: Fri, 12 May 2023 14:52:11 -0500 Subject: [PATCH 218/231] Move functions into more focused source code modules (#2936) * Move functions into more focused source code modules, along with a small # of directly secondary effects. No actual changes to the contents of any moved routines. Signed-off-by: Quincey Koziol * Committing clang-format changes --------- Signed-off-by: Quincey Koziol Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> --- src/CMakeLists.txt | 2 + src/H5C.c | 7841 +++------------------------------------ src/H5Cdbg.c | 483 ++- src/H5Centry.c | 4363 ++++++++++++++++++++++ src/H5Cimage.c | 588 +-- src/H5Cint.c | 2578 +++++++++++++ src/H5Clog.c | 4 +- src/H5Clog.h | 4 +- src/H5Clog_json.c | 8 +- src/H5Clog_trace.c | 8 +- src/H5Cmpio.c | 4 - src/H5Cpkg.h | 29 +- src/H5Cprivate.h | 2 - src/H5Cquery.c | 1 - src/H5Ctag.c | 66 +- src/Makefile.am | 3 +- test/cache_image.c | 4 +- testpar/t_cache_image.c | 4 +- 18 files changed, 8029 insertions(+), 7963 deletions(-) create mode 100644 src/H5Centry.c create mode 100644 src/H5Cint.c diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 4edcc98c662..fc21eacaafa 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -78,8 +78,10 @@ IDE_GENERATED_PROPERTIES ("H5B2" "${H5B2_HDRS}" "${H5B2_SOURCES}" ) set (H5C_SOURCES ${HDF5_SRC_DIR}/H5C.c ${HDF5_SRC_DIR}/H5Cdbg.c + ${HDF5_SRC_DIR}/H5Centry.c ${HDF5_SRC_DIR}/H5Cepoch.c ${HDF5_SRC_DIR}/H5Cimage.c + ${HDF5_SRC_DIR}/H5Cint.c ${HDF5_SRC_DIR}/H5Clog.c ${HDF5_SRC_DIR}/H5Clog_json.c ${HDF5_SRC_DIR}/H5Clog_trace.c diff --git a/src/H5C.c b/src/H5C.c index b80ed985023..348e6435325 100644 --- a/src/H5C.c +++ b/src/H5C.c @@ -61,74 +61,26 @@ /* Headers */ /***********/ #include "H5private.h" /* Generic Functions */ +#include "H5ACprivate.h" /* Metadata cache */ #include "H5Cpkg.h" /* Cache */ -#include "H5CXprivate.h" /* API Contexts */ #include "H5Eprivate.h" /* Error handling */ #include "H5Fpkg.h" /* Files */ #include "H5FLprivate.h" /* Free Lists */ -#include "H5Iprivate.h" /* IDs */ #include "H5MFprivate.h" /* File memory management */ #include "H5MMprivate.h" /* Memory management */ -#include "H5Pprivate.h" /* Property lists */ /****************/ /* Local Macros */ /****************/ -#if H5C_DO_MEMORY_SANITY_CHECKS -#define H5C_IMAGE_EXTRA_SPACE 8 -#define H5C_IMAGE_SANITY_VALUE "DeadBeef" -#else /* H5C_DO_MEMORY_SANITY_CHECKS */ -#define H5C_IMAGE_EXTRA_SPACE 0 -#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ /******************/ /* Local Typedefs */ /******************/ -/* Alias for pointer to cache entry, for use when allocating sequences of them */ -typedef H5C_cache_entry_t *H5C_cache_entry_ptr_t; - /********************/ /* Local Prototypes */ /********************/ -static herr_t H5C__pin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr); -static herr_t H5C__unpin_entry_real(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp); -static herr_t H5C__unpin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp); -static herr_t H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted); -static herr_t H5C__autoadjust__ageout(H5F_t *f, double hit_rate, enum H5C_resize_status *status_ptr, - size_t *new_max_cache_size_ptr, hbool_t write_permitted); -static herr_t H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t *cache_ptr); -static herr_t H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t *f, hbool_t write_permitted); -static herr_t H5C__autoadjust__ageout__insert_new_marker(H5C_t *cache_ptr); -static herr_t H5C__autoadjust__ageout__remove_all_markers(H5C_t *cache_ptr); -static herr_t H5C__autoadjust__ageout__remove_excess_markers(H5C_t *cache_ptr); -static herr_t H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t new_entry_size); -static herr_t H5C__flush_invalidate_cache(H5F_t *f, unsigned flags); -static herr_t H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags); -static herr_t H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags); -static void *H5C__load_entry(H5F_t *f, -#ifdef H5_HAVE_PARALLEL - hbool_t coll_access, -#endif /* H5_HAVE_PARALLEL */ - const H5C_class_t *type, haddr_t addr, void *udata); - -static herr_t H5C__mark_flush_dep_dirty(H5C_cache_entry_t *entry); -static herr_t H5C__mark_flush_dep_clean(H5C_cache_entry_t *entry); -static herr_t H5C__mark_flush_dep_serialized(H5C_cache_entry_t *entry); -static herr_t H5C__mark_flush_dep_unserialized(H5C_cache_entry_t *entry); - -static herr_t H5C__serialize_ring(H5F_t *f, H5C_ring_t ring); -static herr_t H5C__serialize_single_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr); -static herr_t H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr); -static herr_t H5C__verify_len_eoa(H5F_t *f, const H5C_class_t *type, haddr_t addr, size_t *len, - hbool_t actual); - -#ifndef NDEBUG -static void H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t *entry, - const H5C_cache_entry_t *base_entry); -#endif - /*********************/ /* Package Variables */ /*********************/ @@ -147,9 +99,6 @@ H5FL_DEFINE(H5C_tag_info_t); /* Declare a free list to manage the H5C_t struct */ H5FL_DEFINE_STATIC(H5C_t); -/* Declare a free list to manage arrays of cache entries */ -H5FL_SEQ_DEFINE_STATIC(H5C_cache_entry_ptr_t); - /*------------------------------------------------------------------------- * Function: H5C_create * @@ -436,137 +385,6 @@ H5C_create(size_t max_cache_size, size_t min_clean_size, int max_type_id, } /* H5C_create() */ /*------------------------------------------------------------------------- - * Function: H5C_def_auto_resize_rpt_fcn - * - * Purpose: Print results of a automatic cache resize. - * - * This function should only be used where HDprintf() behaves - * well -- i.e. not on Windows. - * - * Return: void - * - * Programmer: John Mainzer - * 10/27/04 - * - *------------------------------------------------------------------------- - */ -void -H5C_def_auto_resize_rpt_fcn(H5C_t *cache_ptr, -#ifndef NDEBUG - int32_t version, -#else - int32_t H5_ATTR_UNUSED version, -#endif - double hit_rate, enum H5C_resize_status status, size_t old_max_cache_size, - size_t new_max_cache_size, size_t old_min_clean_size, size_t new_min_clean_size) -{ - HDassert(cache_ptr != NULL); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - HDassert(version == H5C__CURR_AUTO_RESIZE_RPT_FCN_VER); - - switch (status) { - case in_spec: - HDfprintf(stdout, "%sAuto cache resize -- no change. (hit rate = %lf)\n", cache_ptr->prefix, - hit_rate); - break; - - case increase: - HDassert(hit_rate < cache_ptr->resize_ctl.lower_hr_threshold); - HDassert(old_max_cache_size < new_max_cache_size); - - HDfprintf(stdout, "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n", - cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.lower_hr_threshold); - HDfprintf(stdout, "%scache size increased from (%zu/%zu) to (%zu/%zu).\n", cache_ptr->prefix, - old_max_cache_size, old_min_clean_size, new_max_cache_size, new_min_clean_size); - break; - - case flash_increase: - HDassert(old_max_cache_size < new_max_cache_size); - - HDfprintf(stdout, "%sflash cache resize(%d) -- size threshold = %zu.\n", cache_ptr->prefix, - (int)(cache_ptr->resize_ctl.flash_incr_mode), cache_ptr->flash_size_increase_threshold); - HDfprintf(stdout, "%s cache size increased from (%zu/%zu) to (%zu/%zu).\n", cache_ptr->prefix, - old_max_cache_size, old_min_clean_size, new_max_cache_size, new_min_clean_size); - break; - - case decrease: - HDassert(old_max_cache_size > new_max_cache_size); - - switch (cache_ptr->resize_ctl.decr_mode) { - case H5C_decr__off: - HDfprintf(stdout, "%sAuto cache resize -- decrease off. HR = %lf\n", cache_ptr->prefix, - hit_rate); - break; - - case H5C_decr__threshold: - HDassert(hit_rate > cache_ptr->resize_ctl.upper_hr_threshold); - - HDfprintf(stdout, "%sAuto cache resize -- decrease by threshold. HR = %lf > %6.5lf\n", - cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.upper_hr_threshold); - HDfprintf(stdout, "%sout of bounds high (%6.5lf).\n", cache_ptr->prefix, - cache_ptr->resize_ctl.upper_hr_threshold); - break; - - case H5C_decr__age_out: - HDfprintf(stdout, "%sAuto cache resize -- decrease by ageout. HR = %lf\n", - cache_ptr->prefix, hit_rate); - break; - - case H5C_decr__age_out_with_threshold: - HDassert(hit_rate > cache_ptr->resize_ctl.upper_hr_threshold); - - HDfprintf(stdout, - "%sAuto cache resize -- decrease by ageout with threshold. HR = %lf > %6.5lf\n", - cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.upper_hr_threshold); - break; - - default: - HDfprintf(stdout, "%sAuto cache resize -- decrease by unknown mode. HR = %lf\n", - cache_ptr->prefix, hit_rate); - } - - HDfprintf(stdout, "%s cache size decreased from (%zu/%zu) to (%zu/%zu).\n", cache_ptr->prefix, - old_max_cache_size, old_min_clean_size, new_max_cache_size, new_min_clean_size); - break; - - case at_max_size: - HDfprintf(stdout, "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n", - cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.lower_hr_threshold); - HDfprintf(stdout, "%s cache already at maximum size so no change.\n", cache_ptr->prefix); - break; - - case at_min_size: - HDfprintf(stdout, "%sAuto cache resize -- hit rate (%lf) -- can't decrease.\n", cache_ptr->prefix, - hit_rate); - HDfprintf(stdout, "%s cache already at minimum size.\n", cache_ptr->prefix); - break; - - case increase_disabled: - HDfprintf(stdout, "%sAuto cache resize -- increase disabled -- HR = %lf.", cache_ptr->prefix, - hit_rate); - break; - - case decrease_disabled: - HDfprintf(stdout, "%sAuto cache resize -- decrease disabled -- HR = %lf.\n", cache_ptr->prefix, - hit_rate); - break; - - case not_full: - HDassert(hit_rate < cache_ptr->resize_ctl.lower_hr_threshold); - - HDfprintf(stdout, "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n", - cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.lower_hr_threshold); - HDfprintf(stdout, "%s cache not full so no increase in size.\n", cache_ptr->prefix); - break; - - default: - HDfprintf(stdout, "%sAuto cache resize -- unknown status code.\n", cache_ptr->prefix); - break; - } -} /* H5C_def_auto_resize_rpt_fcn() */ - -/*------------------------------------------------------------------------- - * * Function: H5C_prep_for_file_close * * Purpose: This function should be called just prior to the cache @@ -687,7 +505,7 @@ H5C_dest(H5F_t *f) HDassert(cache_ptr->close_warning_received); #if H5AC_DUMP_IMAGE_STATS_ON_CLOSE - if (H5C_image_stats(cache_ptr, TRUE) < 0) + if (H5C__image_stats(cache_ptr, TRUE) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't display cache image stats") #endif /* H5AC_DUMP_IMAGE_STATS_ON_CLOSE */ @@ -788,81 +606,6 @@ H5C_evict(H5F_t *f) FUNC_LEAVE_NOAPI(ret_value) } /* H5C_evict() */ -/*------------------------------------------------------------------------- - * Function: H5C_expunge_entry - * - * Purpose: Expunge an entry from the cache without writing it to disk - * even if it is dirty. The entry may not be either pinned or - * protected. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: John Mainzer - * 6/29/06 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_expunge_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, unsigned flags) -{ - H5C_t *cache_ptr; - H5C_cache_entry_t *entry_ptr = NULL; - unsigned flush_flags = (H5C__FLUSH_INVALIDATE_FLAG | H5C__FLUSH_CLEAR_ONLY_FLAG); - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - HDassert(f); - HDassert(f->shared); - cache_ptr = f->shared->cache; - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - HDassert(type); - HDassert(H5F_addr_defined(addr)); - -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_lru_list(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on entry") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - /* Look for entry in cache */ - H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL) - if ((entry_ptr == NULL) || (entry_ptr->type != type)) - /* the target doesn't exist in the cache, so we are done. */ - HGOTO_DONE(SUCCEED) - - HDassert(entry_ptr->addr == addr); - HDassert(entry_ptr->type == type); - - /* Check for entry being pinned or protected */ - if (entry_ptr->is_protected) - HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is protected") - if (entry_ptr->is_pinned) - HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is pinned") - - /* If we get this far, call H5C__flush_single_entry() with the - * H5C__FLUSH_INVALIDATE_FLAG and the H5C__FLUSH_CLEAR_ONLY_FLAG. - * This will clear the entry, and then delete it from the cache. - */ - - /* Pass along 'free file space' flag */ - flush_flags |= (flags & H5C__FREE_FILE_SPACE_FLAG); - - /* Delete the entry from the skip list on destroy */ - flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG; - - if (H5C__flush_single_entry(f, entry_ptr, flush_flags) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "can't flush entry") - -done: -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_lru_list(cache_ptr) < 0) - HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on exit") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_expunge_entry() */ - /*------------------------------------------------------------------------- * Function: H5C_flush_cache * @@ -1059,7240 +802,694 @@ H5C_flush_to_min_clean(H5F_t *f) } /* H5C_flush_to_min_clean() */ /*------------------------------------------------------------------------- - * Function: H5C_insert_entry + * Function: H5C_reset_cache_hit_rate_stats() * - * Purpose: Adds the specified thing to the cache. The thing need not - * exist on disk yet, but it must have an address and disk - * space reserved. + * Purpose: Reset the cache hit rate computation fields. * - * Return: Non-negative on success/Negative on failure + * Return: SUCCEED on success, and FAIL on failure. * - * Programmer: John Mainzer - * 6/2/04 + * Programmer: John Mainzer, 10/5/04 * *------------------------------------------------------------------------- */ herr_t -H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, unsigned int flags) +H5C_reset_cache_hit_rate_stats(H5C_t *cache_ptr) { - H5C_t *cache_ptr; - H5AC_ring_t ring = H5C_RING_UNDEFINED; - hbool_t insert_pinned; - hbool_t flush_last; -#ifdef H5_HAVE_PARALLEL - hbool_t coll_access = FALSE; /* whether access to the cache entry is done collectively */ -#endif /* H5_HAVE_PARALLEL */ - hbool_t set_flush_marker; - hbool_t write_permitted = TRUE; - size_t empty_space; - H5C_cache_entry_t *entry_ptr = NULL; - H5C_cache_entry_t *test_entry_ptr; - hbool_t entry_tagged = FALSE; - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) - HDassert(f); - HDassert(f->shared); - - cache_ptr = f->shared->cache; - - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - HDassert(type); - HDassert(type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type); - HDassert(type->image_len); - HDassert(H5F_addr_defined(addr)); - HDassert(thing); - -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - /* no need to verify that entry is not already in the index as */ - /* we already make that check below. */ - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - set_flush_marker = ((flags & H5C__SET_FLUSH_MARKER_FLAG) != 0); - insert_pinned = ((flags & H5C__PIN_ENTRY_FLAG) != 0); - flush_last = ((flags & H5C__FLUSH_LAST_FLAG) != 0); - - /* Get the ring type from the API context */ - ring = H5CX_get_ring(); - - entry_ptr = (H5C_cache_entry_t *)thing; - - /* verify that the new entry isn't already in the hash table -- scream - * and die if it is. - */ - - H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL) - - if (test_entry_ptr != NULL) { - if (test_entry_ptr == entry_ptr) - HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "entry already in cache") - else - HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "duplicate entry in cache") - } /* end if */ - - entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC; - entry_ptr->cache_ptr = cache_ptr; - entry_ptr->addr = addr; - entry_ptr->type = type; - - entry_ptr->image_ptr = NULL; - entry_ptr->image_up_to_date = FALSE; - - entry_ptr->is_protected = FALSE; - entry_ptr->is_read_only = FALSE; - entry_ptr->ro_ref_count = 0; - - entry_ptr->is_pinned = insert_pinned; - entry_ptr->pinned_from_client = insert_pinned; - entry_ptr->pinned_from_cache = FALSE; - entry_ptr->flush_me_last = flush_last; - - /* newly inserted entries are assumed to be dirty */ - entry_ptr->is_dirty = TRUE; - - /* not protected, so can't be dirtied */ - entry_ptr->dirtied = FALSE; - - /* Retrieve the size of the thing */ - if ((type->image_len)(thing, &(entry_ptr->size)) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTGETSIZE, FAIL, "can't get size of thing") - HDassert(entry_ptr->size > 0 && entry_ptr->size < H5C_MAX_ENTRY_SIZE); - - entry_ptr->in_slist = FALSE; - -#ifdef H5_HAVE_PARALLEL - entry_ptr->clear_on_unprotect = FALSE; - entry_ptr->flush_immediately = FALSE; -#endif /* H5_HAVE_PARALLEL */ - - entry_ptr->flush_in_progress = FALSE; - entry_ptr->destroy_in_progress = FALSE; - - entry_ptr->ring = ring; - - /* Initialize flush dependency fields */ - entry_ptr->flush_dep_parent = NULL; - entry_ptr->flush_dep_nparents = 0; - entry_ptr->flush_dep_parent_nalloc = 0; - entry_ptr->flush_dep_nchildren = 0; - entry_ptr->flush_dep_ndirty_children = 0; - entry_ptr->flush_dep_nunser_children = 0; - - entry_ptr->ht_next = NULL; - entry_ptr->ht_prev = NULL; - entry_ptr->il_next = NULL; - entry_ptr->il_prev = NULL; - - entry_ptr->next = NULL; - entry_ptr->prev = NULL; - -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS - entry_ptr->aux_next = NULL; - entry_ptr->aux_prev = NULL; -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - -#ifdef H5_HAVE_PARALLEL - entry_ptr->coll_next = NULL; - entry_ptr->coll_prev = NULL; -#endif /* H5_HAVE_PARALLEL */ - - /* initialize cache image related fields */ - entry_ptr->include_in_image = FALSE; - entry_ptr->lru_rank = 0; - entry_ptr->image_dirty = FALSE; - entry_ptr->fd_parent_count = 0; - entry_ptr->fd_parent_addrs = NULL; - entry_ptr->fd_child_count = 0; - entry_ptr->fd_dirty_child_count = 0; - entry_ptr->image_fd_height = 0; - entry_ptr->prefetched = FALSE; - entry_ptr->prefetch_type_id = 0; - entry_ptr->age = 0; - entry_ptr->prefetched_dirty = FALSE; -#ifndef NDEBUG /* debugging field */ - entry_ptr->serialization_count = 0; -#endif - - /* initialize tag list fields */ - entry_ptr->tl_next = NULL; - entry_ptr->tl_prev = NULL; - entry_ptr->tag_info = NULL; - - /* Apply tag to newly inserted entry */ - if (H5C__tag_entry(cache_ptr, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "Cannot tag metadata entry") - entry_tagged = TRUE; - - H5C__RESET_CACHE_ENTRY_STATS(entry_ptr) - - if (cache_ptr->flash_size_increase_possible && - (entry_ptr->size > cache_ptr->flash_size_increase_threshold)) - if (H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__flash_increase_cache_size failed") - - if (cache_ptr->index_size >= cache_ptr->max_cache_size) - empty_space = 0; - else - empty_space = cache_ptr->max_cache_size - cache_ptr->index_size; - - if (cache_ptr->evictions_enabled && - (((cache_ptr->index_size + entry_ptr->size) > cache_ptr->max_cache_size) || - (((empty_space + cache_ptr->clean_index_size) < cache_ptr->min_clean_size)))) { - size_t space_needed; - - if (empty_space <= entry_ptr->size) - cache_ptr->cache_full = TRUE; - - if (cache_ptr->check_write_permitted != NULL) { - if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "Can't get write_permitted") - } /* end if */ - else - write_permitted = cache_ptr->write_permitted; - - HDassert(entry_ptr->size <= H5C_MAX_ENTRY_SIZE); - space_needed = entry_ptr->size; - if (space_needed > cache_ptr->max_cache_size) - space_needed = cache_ptr->max_cache_size; - - /* Note that space_needed is just the amount of space that - * needed to insert the new entry without exceeding the cache - * size limit. The subsequent call to H5C__make_space_in_cache() - * may evict the entries required to free more or less space - * depending on conditions. It MAY be less if the cache is - * currently undersized, or more if the cache is oversized. - * - * The cache can exceed its maximum size limit via the following - * mechanisms: - * - * First, it is possible for the cache to grow without - * bound as long as entries are protected and not unprotected. - * - * Second, when writes are not permitted it is also possible - * for the cache to grow without bound. - * - * Finally, we usually don't check to see if the cache is - * oversized at the end of an unprotect. As a result, it is - * possible to have a vastly oversized cache with no protected - * entries as long as all the protects precede the unprotects. - */ - - if (H5C__make_space_in_cache(f, space_needed, write_permitted) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__make_space_in_cache failed") - } /* end if */ - - H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL) - - /* New entries are presumed to be dirty */ - HDassert(entry_ptr->is_dirty); - entry_ptr->flush_marker = set_flush_marker; - H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL) - H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, FAIL) - -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed just before done") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - /* If the entry's type has a 'notify' callback send a 'after insertion' - * notice now that the entry is fully integrated into the cache. - */ - if (entry_ptr->type->notify && (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_INSERT, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry inserted into cache") - - H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) + if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad cache_ptr on entry") -#ifdef H5_HAVE_PARALLEL - if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) - coll_access = H5F_get_coll_metadata_reads(f); - - entry_ptr->coll_access = coll_access; - if (coll_access) { - H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, FAIL) - - /* Make sure the size of the collective entries in the cache remain in check */ - if (H5P_USER_TRUE == H5F_COLL_MD_READ(f)) { - if (cache_ptr->max_cache_size * 80 < cache_ptr->coll_list_size * 100) { - if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear collective metadata entries") - } /* end if */ - } /* end if */ - else { - if (cache_ptr->max_cache_size * 40 < cache_ptr->coll_list_size * 100) { - if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear collective metadata entries") - } /* end if */ - } /* end else */ - } /* end if */ -#endif + cache_ptr->cache_hits = 0; + cache_ptr->cache_accesses = 0; done: -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - if (ret_value < 0 && entry_tagged) - if (H5C__untag_entry(cache_ptr, entry_ptr) < 0) - HDONE_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list") - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_insert_entry() */ +} /* H5C_reset_cache_hit_rate_stats() */ /*------------------------------------------------------------------------- - * Function: H5C_mark_entry_dirty - * - * Purpose: Mark a pinned or protected entry as dirty. The target entry - * MUST be either pinned or protected, and MAY be both. + * Function: H5C_set_cache_auto_resize_config * - * In the protected case, this call is the functional - * equivalent of setting the H5C__DIRTIED_FLAG on an unprotect - * call. + * Purpose: Set the cache automatic resize configuration to the + * provided values if they are in range, and fail if they + * are not. * - * In the pinned but not protected case, if the entry is not - * already dirty, the function places function marks the entry - * dirty and places it on the skip list. + * If the new configuration enables automatic cache resizing, + * coerce the cache max size and min clean size into agreement + * with the new policy and re-set the full cache hit rate + * stats. * - * Return: Non-negative on success/Negative on failure + * Return: SUCCEED on success, and FAIL on failure. * * Programmer: John Mainzer - * 5/15/06 + * 10/8/04 * *------------------------------------------------------------------------- */ herr_t -H5C_mark_entry_dirty(void *thing) +H5C_set_cache_auto_resize_config(H5C_t *cache_ptr, H5C_auto_size_ctl_t *config_ptr) { - H5C_t *cache_ptr; - H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)thing; - herr_t ret_value = SUCCEED; /* Return value */ + size_t new_max_cache_size; + size_t new_min_clean_size; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) - /* Sanity checks */ - HDassert(entry_ptr); - HDassert(H5F_addr_defined(entry_ptr->addr)); - cache_ptr = entry_ptr->cache_ptr; - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad cache_ptr on entry") + if (config_ptr == NULL) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry") + if (config_ptr->version != H5C__CURR_AUTO_SIZE_CTL_VER) + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "unknown config version") - if (entry_ptr->is_protected) { - HDassert(!((entry_ptr)->is_read_only)); + /* check general configuration section of the config: */ + if (H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_GENERAL) < 0) + HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in general configuration fields of new config") - /* set the dirtied flag */ - entry_ptr->dirtied = TRUE; + /* check size increase control fields of the config: */ + if (H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_INCREMENT) < 0) + HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in the size increase control fields of new config") - /* reset image_up_to_date */ - if (entry_ptr->image_up_to_date) { - entry_ptr->image_up_to_date = FALSE; + /* check size decrease control fields of the config: */ + if (H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_DECREMENT) < 0) + HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in the size decrease control fields of new config") - if (entry_ptr->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "Can't propagate serialization status to fd parents") - } /* end if */ - } /* end if */ - else if (entry_ptr->is_pinned) { - hbool_t was_clean; /* Whether the entry was previously clean */ - hbool_t image_was_up_to_date; + /* check for conflicts between size increase and size decrease controls: */ + if (H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_INTERACTIONS) < 0) + HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "conflicting threshold fields in new config") - /* Remember previous dirty status */ - was_clean = !entry_ptr->is_dirty; + /* will set the increase possible fields to FALSE later if needed */ + cache_ptr->size_increase_possible = TRUE; + cache_ptr->flash_size_increase_possible = TRUE; + cache_ptr->size_decrease_possible = TRUE; - /* Check if image is up to date */ - image_was_up_to_date = entry_ptr->image_up_to_date; + switch (config_ptr->incr_mode) { + case H5C_incr__off: + cache_ptr->size_increase_possible = FALSE; + break; - /* Mark the entry as dirty if it isn't already */ - entry_ptr->is_dirty = TRUE; - entry_ptr->image_up_to_date = FALSE; + case H5C_incr__threshold: + if ((config_ptr->lower_hr_threshold <= 0.0) || (config_ptr->increment <= 1.0) || + ((config_ptr->apply_max_increment) && (config_ptr->max_increment <= 0))) + cache_ptr->size_increase_possible = FALSE; + break; - /* Modify cache data structures */ - if (was_clean) - H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr, FAIL) - if (!entry_ptr->in_slist) - H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL) + default: /* should be unreachable */ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown incr_mode?!?!?") + } /* end switch */ - /* Update stats for entry being marked dirty */ - H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr) + /* logically, this is where configuration for flash cache size increases + * should go. However, this configuration depends on max_cache_size, so + * we wait until the end of the function, when this field is set. + */ - /* Check for entry changing status and do notifications, etc. */ - if (was_clean) { - /* If the entry's type has a 'notify' callback send a 'entry dirtied' - * notice now that the entry is fully integrated into the cache. - */ - if (entry_ptr->type->notify && - (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set") - - /* Propagate the dirty flag up the flush dependency chain if appropriate */ - if (entry_ptr->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_dirty(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag") - } /* end if */ - if (image_was_up_to_date) - if (entry_ptr->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "Can't propagate serialization status to fd parents") - } /* end if */ - else - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Entry is neither pinned nor protected??") - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_mark_entry_dirty() */ - -/*------------------------------------------------------------------------- - * Function: H5C_mark_entry_clean - * - * Purpose: Mark a pinned entry as clean. The target entry MUST be pinned. - * - * If the entry is not - * already clean, the function places function marks the entry - * clean and removes it from the skip list. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * 7/23/16 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_mark_entry_clean(void *_thing) -{ - H5C_t *cache_ptr; - H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)_thing; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - /* Sanity checks */ - HDassert(entry_ptr); - HDassert(H5F_addr_defined(entry_ptr->addr)); - cache_ptr = entry_ptr->cache_ptr; - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - - /* Operate on pinned entry */ - if (entry_ptr->is_protected) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "entry is protected") - else if (entry_ptr->is_pinned) { - hbool_t was_dirty; /* Whether the entry was previously dirty */ - - /* Remember previous dirty status */ - was_dirty = entry_ptr->is_dirty; - - /* Mark the entry as clean if it isn't already */ - entry_ptr->is_dirty = FALSE; - - /* Also reset the 'flush_marker' flag, since the entry shouldn't be flushed now */ - entry_ptr->flush_marker = FALSE; - - /* Modify cache data structures */ - if (was_dirty) - H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr, FAIL) - if (entry_ptr->in_slist) - H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE, FAIL) - - /* Update stats for entry being marked clean */ - H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) - - /* Check for entry changing status and do notifications, etc. */ - if (was_dirty) { - /* If the entry's type has a 'notify' callback send a 'entry cleaned' - * notice now that the entry is fully integrated into the cache. - */ - if (entry_ptr->type->notify && - (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "can't notify client about entry dirty flag cleared") - - /* Propagate the clean up the flush dependency chain, if appropriate */ - if (entry_ptr->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_clean(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Can't propagate flush dep clean") - } /* end if */ - } /* end if */ - else - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Entry is not pinned??") - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_mark_entry_clean() */ - -/*------------------------------------------------------------------------- - * Function: H5C_mark_entry_unserialized - * - * Purpose: Mark a pinned or protected entry as unserialized. The target - * entry MUST be either pinned or protected, and MAY be both. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * 12/23/16 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_mark_entry_unserialized(void *thing) -{ - H5C_cache_entry_t *entry = (H5C_cache_entry_t *)thing; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - /* Sanity checks */ - HDassert(entry); - HDassert(H5F_addr_defined(entry->addr)); - - if (entry->is_protected || entry->is_pinned) { - HDassert(!entry->is_read_only); - - /* Reset image_up_to_date */ - if (entry->image_up_to_date) { - entry->image_up_to_date = FALSE; - - if (entry->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_unserialized(entry) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, - "Can't propagate serialization status to fd parents") - } /* end if */ - } /* end if */ - else - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKUNSERIALIZED, FAIL, - "Entry to unserialize is neither pinned nor protected??") - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_mark_entry_unserialized() */ - -/*------------------------------------------------------------------------- - * Function: H5C_mark_entry_serialized - * - * Purpose: Mark a pinned entry as serialized. The target entry MUST be - * pinned. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * 12/23/16 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_mark_entry_serialized(void *_thing) -{ - H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_thing; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - /* Sanity checks */ - HDassert(entry); - HDassert(H5F_addr_defined(entry->addr)); - - /* Operate on pinned entry */ - if (entry->is_protected) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL, "entry is protected") - else if (entry->is_pinned) { - /* Check for entry changing status and do notifications, etc. */ - if (!entry->image_up_to_date) { - /* Set the image_up_to_date flag */ - entry->image_up_to_date = TRUE; - - /* Propagate the serialize up the flush dependency chain, if appropriate */ - if (entry->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_serialized(entry) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL, - "Can't propagate flush dep serialize") - } /* end if */ - } /* end if */ - else - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL, "Entry is not pinned??") - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_mark_entry_serialized() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C_move_entry - * - * Purpose: Use this function to notify the cache that an entry's - * file address changed. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: John Mainzer - * 6/2/04 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type, haddr_t old_addr, haddr_t new_addr) -{ - H5C_cache_entry_t *entry_ptr = NULL; - H5C_cache_entry_t *test_entry_ptr = NULL; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) + switch (config_ptr->decr_mode) { + case H5C_decr__off: + cache_ptr->size_decrease_possible = FALSE; + break; - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - HDassert(type); - HDassert(H5F_addr_defined(old_addr)); - HDassert(H5F_addr_defined(new_addr)); - HDassert(H5F_addr_ne(old_addr, new_addr)); + case H5C_decr__threshold: + if (config_ptr->upper_hr_threshold >= 1.0 || config_ptr->decrement >= 1.0 || + (config_ptr->apply_max_decrement && config_ptr->max_decrement <= 0)) + cache_ptr->size_decrease_possible = FALSE; + break; -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + case H5C_decr__age_out: + if ((config_ptr->apply_empty_reserve && config_ptr->empty_reserve >= 1.0) || + (config_ptr->apply_max_decrement && config_ptr->max_decrement <= 0)) + cache_ptr->size_decrease_possible = FALSE; + break; - H5C__SEARCH_INDEX(cache_ptr, old_addr, entry_ptr, FAIL) + case H5C_decr__age_out_with_threshold: + if ((config_ptr->apply_empty_reserve && config_ptr->empty_reserve >= 1.0) || + (config_ptr->apply_max_decrement && config_ptr->max_decrement <= 0) || + config_ptr->upper_hr_threshold >= 1.0) + cache_ptr->size_decrease_possible = FALSE; + break; - if (entry_ptr == NULL || entry_ptr->type != type) - /* the old item doesn't exist in the cache, so we are done. */ - HGOTO_DONE(SUCCEED) + default: /* should be unreachable */ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown decr_mode?!?!?") + } /* end switch */ - HDassert(entry_ptr->addr == old_addr); - HDassert(entry_ptr->type == type); + if (config_ptr->max_size == config_ptr->min_size) { + cache_ptr->size_increase_possible = FALSE; + cache_ptr->flash_size_increase_possible = FALSE; + cache_ptr->size_decrease_possible = FALSE; + } /* end if */ - /* Check for R/W status, otherwise error */ - /* (Moving a R/O entry would mark it dirty, which shouldn't - * happen. QAK - 2016/12/02) + /* flash_size_increase_possible is intentionally omitted from the + * following: */ - if (entry_ptr->is_read_only) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "can't move R/O entry") - - H5C__SEARCH_INDEX(cache_ptr, new_addr, test_entry_ptr, FAIL) - - if (test_entry_ptr != NULL) { /* we are hosed */ - if (test_entry_ptr->type == type) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "target already moved & reinserted???") - else - HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "new address already in use?") - } /* end if */ + cache_ptr->resize_enabled = cache_ptr->size_increase_possible || cache_ptr->size_decrease_possible; + cache_ptr->resize_ctl = *config_ptr; - /* If we get this far we have work to do. Remove *entry_ptr from - * the hash table (and skip list if necessary), change its address to the - * new address, mark it as dirty (if it isn't already) and then re-insert. - * - * Update the replacement policy for a hit to avoid an eviction before - * the moved entry is touched. Update stats for a move. - * - * Note that we do not check the size of the cache, or evict anything. - * Since this is a simple re-name, cache size should be unaffected. + /* Resize the cache to the supplied initial value if requested, or as + * necessary to force it within the bounds of the current automatic + * cache resizing configuration. * - * Check to see if the target entry is in the process of being destroyed - * before we delete from the index, etc. If it is, all we do is - * change the addr. If the entry is only in the process of being flushed, - * don't mark it as dirty either, lest we confuse the flush call back. + * Note that the min_clean_fraction may have changed, so we + * go through the exercise even if the current size is within + * range and an initial size has not been provided. */ - if (!entry_ptr->destroy_in_progress) { - H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL) - - if (entry_ptr->in_slist) { - HDassert(cache_ptr->slist_ptr); - H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE, FAIL) - } /* end if */ - } /* end if */ - - entry_ptr->addr = new_addr; - - if (!entry_ptr->destroy_in_progress) { - hbool_t was_dirty; /* Whether the entry was previously dirty */ - - /* Remember previous dirty status */ - was_dirty = entry_ptr->is_dirty; - - /* Mark the entry as dirty if it isn't already */ - entry_ptr->is_dirty = TRUE; - - /* This shouldn't be needed, but it keeps the test code happy */ - if (entry_ptr->image_up_to_date) { - entry_ptr->image_up_to_date = FALSE; - if (entry_ptr->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "Can't propagate serialization status to fd parents") - } /* end if */ - - /* Modify cache data structures */ - H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL) - H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL) - - /* Skip some actions if we're in the middle of flushing the entry */ - if (!entry_ptr->flush_in_progress) { - /* Update the replacement policy for the entry */ - H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, FAIL) - - /* Check for entry changing status and do notifications, etc. */ - if (!was_dirty) { - /* If the entry's type has a 'notify' callback send a 'entry dirtied' - * notice now that the entry is fully integrated into the cache. - */ - if (entry_ptr->type->notify && - (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "can't notify client about entry dirty flag set") - - /* Propagate the dirty flag up the flush dependency chain if appropriate */ - if (entry_ptr->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_dirty(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, - "Can't propagate flush dep dirty flag") - } /* end if */ - } /* end if */ - } /* end if */ - - H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr) - -done: -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_move_entry() */ - -/*------------------------------------------------------------------------- - * Function: H5C_resize_entry - * - * Purpose: Resize a pinned or protected entry. - * - * Resizing an entry dirties it, so if the entry is not - * already dirty, the function places the entry on the - * skip list. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: John Mainzer - * 7/5/06 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_resize_entry(void *thing, size_t new_size) -{ - H5C_t *cache_ptr; - H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)thing; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - /* Sanity checks */ - HDassert(entry_ptr); - HDassert(H5F_addr_defined(entry_ptr->addr)); - cache_ptr = entry_ptr->cache_ptr; - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - - /* Check for usage errors */ - if (new_size <= 0) - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "New size is non-positive") - if (!(entry_ptr->is_pinned || entry_ptr->is_protected)) - HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, FAIL, "Entry isn't pinned or protected??") - -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - /* update for change in entry size if necessary */ - if (entry_ptr->size != new_size) { - hbool_t was_clean; - - /* make note of whether the entry was clean to begin with */ - was_clean = !entry_ptr->is_dirty; - - /* mark the entry as dirty if it isn't already */ - entry_ptr->is_dirty = TRUE; - - /* Reset the image up-to-date status */ - if (entry_ptr->image_up_to_date) { - entry_ptr->image_up_to_date = FALSE; - if (entry_ptr->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "Can't propagate serialization status to fd parents") - } /* end if */ - - /* Release the current image */ - if (entry_ptr->image_ptr) - entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr); - - /* do a flash cache size increase if appropriate */ - if (cache_ptr->flash_size_increase_possible) { - if (new_size > entry_ptr->size) { - size_t size_increase; - - size_increase = new_size - entry_ptr->size; - if (size_increase >= cache_ptr->flash_size_increase_threshold) - if (H5C__flash_increase_cache_size(cache_ptr, entry_ptr->size, new_size) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTRESIZE, FAIL, "flash cache increase failed") - } - } - - /* update the pinned and/or protected entry list */ - if (entry_ptr->is_pinned) - H5C__DLL_UPDATE_FOR_SIZE_CHANGE(cache_ptr->pel_len, cache_ptr->pel_size, entry_ptr->size, - new_size, FAIL) - if (entry_ptr->is_protected) - H5C__DLL_UPDATE_FOR_SIZE_CHANGE(cache_ptr->pl_len, cache_ptr->pl_size, entry_ptr->size, new_size, - FAIL) - -#ifdef H5_HAVE_PARALLEL - if (entry_ptr->coll_access) - H5C__DLL_UPDATE_FOR_SIZE_CHANGE(cache_ptr->coll_list_len, cache_ptr->coll_list_size, - entry_ptr->size, new_size, FAIL) -#endif /* H5_HAVE_PARALLEL */ - - /* update statistics just before changing the entry size */ - H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size); - - /* update the hash table */ - H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_size, entry_ptr, was_clean, FAIL); - - /* if the entry is in the skip list, update that too */ - if (entry_ptr->in_slist) - H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_size); - - /* finally, update the entry size proper */ - entry_ptr->size = new_size; - - if (!entry_ptr->in_slist) - H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL) - - if (entry_ptr->is_pinned) - H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr) - - /* Check for entry changing status and do notifications, etc. */ - if (was_clean) { - /* If the entry's type has a 'notify' callback send a 'entry dirtied' - * notice now that the entry is fully integrated into the cache. - */ - if (entry_ptr->type->notify && - (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set") - - /* Propagate the dirty flag up the flush dependency chain if appropriate */ - if (entry_ptr->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_dirty(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag") - } /* end if */ - } /* end if */ - -done: -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0) - HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_resize_entry() */ - -/*------------------------------------------------------------------------- - * Function: H5C_pin_protected_entry() - * - * Purpose: Pin a protected cache entry. The entry must be protected - * at the time of call, and must be unpinned. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: John Mainzer - * 4/26/06 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_pin_protected_entry(void *thing) -{ - H5C_t *cache_ptr; - H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)thing; /* Pointer to entry to pin */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - /* Sanity checks */ - HDassert(entry_ptr); - HDassert(H5F_addr_defined(entry_ptr->addr)); - cache_ptr = entry_ptr->cache_ptr; - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - /* Only protected entries can be pinned */ - if (!entry_ptr->is_protected) - HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Entry isn't protected") - - /* Pin the entry from a client */ - if (H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client") - -done: -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_pin_protected_entry() */ - -/*------------------------------------------------------------------------- - * Function: H5C_protect - * - * Purpose: If the target entry is not in the cache, load it. If - * necessary, attempt to evict one or more entries to keep - * the cache within its maximum size. - * - * Mark the target entry as protected, and return its address - * to the caller. The caller must call H5C_unprotect() when - * finished with the entry. - * - * While it is protected, the entry may not be either evicted - * or flushed -- nor may it be accessed by another call to - * H5C_protect. Any attempt to do so will result in a failure. - * - * Return: Success: Ptr to the desired entry - * Failure: NULL - * - * Programmer: John Mainzer - 6/2/04 - * - *------------------------------------------------------------------------- - */ -void * -H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsigned flags) -{ - H5C_t *cache_ptr; - H5AC_ring_t ring = H5C_RING_UNDEFINED; - hbool_t hit; - hbool_t have_write_permitted = FALSE; - hbool_t read_only = FALSE; - hbool_t flush_last; -#ifdef H5_HAVE_PARALLEL - hbool_t coll_access = FALSE; /* whether access to the cache entry is done collectively */ -#endif /* H5_HAVE_PARALLEL */ - hbool_t write_permitted = FALSE; - hbool_t was_loaded = FALSE; /* Whether the entry was loaded as a result of the protect */ - size_t empty_space; - void *thing; - H5C_cache_entry_t *entry_ptr; - void *ret_value = NULL; /* Return value */ - - FUNC_ENTER_NOAPI(NULL) - - /* check args */ - HDassert(f); - HDassert(f->shared); - - cache_ptr = f->shared->cache; - - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - HDassert(type); - HDassert(type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type); - HDassert(H5F_addr_defined(addr)); - -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on entry") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - /* Load the cache image, if requested */ - if (cache_ptr->load_image) { - cache_ptr->load_image = FALSE; - if (H5C__load_cache_image(f) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't load cache image") - } /* end if */ - - read_only = ((flags & H5C__READ_ONLY_FLAG) != 0); - flush_last = ((flags & H5C__FLUSH_LAST_FLAG) != 0); - - /* Get the ring type from the API context */ - ring = H5CX_get_ring(); - -#ifdef H5_HAVE_PARALLEL - if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) - coll_access = H5F_get_coll_metadata_reads(f); -#endif /* H5_HAVE_PARALLEL */ - - /* first check to see if the target is in cache */ - H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, NULL) - - if (entry_ptr != NULL) { - if (entry_ptr->ring != ring) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "ring type mismatch occurred for cache entry") - - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - - if (entry_ptr->prefetched) { - /* This call removes the prefetched entry from the cache, - * and replaces it with an entry deserialized from the - * image of the prefetched entry. - */ - if (H5C__deserialize_prefetched_entry(f, cache_ptr, &entry_ptr, type, addr, udata) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't deserialize prefetched entry") - - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(!entry_ptr->prefetched); - HDassert(entry_ptr->addr == addr); - } /* end if */ - - /* Check for trying to load the wrong type of entry from an address */ - if (entry_ptr->type != type) - HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, NULL, "incorrect cache entry type") - -#ifdef H5_HAVE_PARALLEL - /* If this is a collective metadata read, the entry is not marked as - * collective, and is clean, it is possible that other processes will - * not have it in its cache and will expect a bcast of the entry from - * process 0. So process 0 will bcast the entry to all other ranks. - * Ranks that _do_ have the entry in their cache still have to - * participate in the bcast. - */ - if (coll_access) { - if (!entry_ptr->is_dirty && !entry_ptr->coll_access) { - MPI_Comm comm; /* File MPI Communicator */ - int mpi_code; /* MPI error code */ - int buf_size; - - if (MPI_COMM_NULL == (comm = H5F_mpi_get_comm(f))) - HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "get_comm request failed") - - if (entry_ptr->image_ptr == NULL) { - int mpi_rank; - - if ((mpi_rank = H5F_mpi_get_rank(f)) < 0) - HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "Can't get MPI rank") - - if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, - "memory allocation failed for on disk image buffer") -#if H5C_DO_MEMORY_SANITY_CHECKS - H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE, - H5C_IMAGE_EXTRA_SPACE); -#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ - if (0 == mpi_rank && H5C__generate_image(f, cache_ptr, entry_ptr) < 0) - /* If image generation fails, push an error but - * still participate in the following MPI_Bcast - */ - HDONE_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't generate entry's image") - } /* end if */ - HDassert(entry_ptr->image_ptr); - - H5_CHECKED_ASSIGN(buf_size, int, entry_ptr->size, size_t); - if (MPI_SUCCESS != (mpi_code = MPI_Bcast(entry_ptr->image_ptr, buf_size, MPI_BYTE, 0, comm))) - HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code) - - /* Mark the entry as collective and insert into the collective list */ - entry_ptr->coll_access = TRUE; - H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, NULL) - } /* end if */ - else if (entry_ptr->coll_access) - H5C__MOVE_TO_TOP_IN_COLL_LIST(cache_ptr, entry_ptr, NULL) - } /* end if */ -#endif /* H5_HAVE_PARALLEL */ - -#ifdef H5C_DO_TAGGING_SANITY_CHECKS - { - /* Verify tag value */ - if (cache_ptr->ignore_tags != TRUE) { - haddr_t tag; /* Tag value */ - - /* The entry is already in the cache, but make sure that the tag value - * is still legal. This will ensure that had the entry NOT been in the - * cache, tagging was still set up correctly and it would have received - * a legal tag value after getting loaded from disk. - */ - - /* Get the tag */ - tag = H5CX_get_tag(); - - if (H5C_verify_tag(entry_ptr->type->id, tag) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "tag verification failed") - } /* end if */ - } -#endif - - hit = TRUE; - thing = (void *)entry_ptr; - } - else { - /* must try to load the entry from disk. */ - hit = FALSE; - if (NULL == (thing = H5C__load_entry(f, -#ifdef H5_HAVE_PARALLEL - coll_access, -#endif /* H5_HAVE_PARALLEL */ - type, addr, udata))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't load entry") - - entry_ptr = (H5C_cache_entry_t *)thing; - cache_ptr->entries_loaded_counter++; - - entry_ptr->ring = ring; -#ifdef H5_HAVE_PARALLEL - if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI) && entry_ptr->coll_access) - H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, NULL) -#endif /* H5_HAVE_PARALLEL */ - - /* Apply tag to newly protected entry */ - if (H5C__tag_entry(cache_ptr, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, NULL, "Cannot tag metadata entry") - - /* If the entry is very large, and we are configured to allow it, - * we may wish to perform a flash cache size increase. - */ - if (cache_ptr->flash_size_increase_possible && - (entry_ptr->size > cache_ptr->flash_size_increase_threshold)) - if (H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__flash_increase_cache_size failed") - - if (cache_ptr->index_size >= cache_ptr->max_cache_size) - empty_space = 0; - else - empty_space = cache_ptr->max_cache_size - cache_ptr->index_size; - - /* try to free up if necceary and if evictions are permitted. Note - * that if evictions are enabled, we will call H5C__make_space_in_cache() - * regardless if the min_free_space requirement is not met. - */ - if (cache_ptr->evictions_enabled && - (((cache_ptr->index_size + entry_ptr->size) > cache_ptr->max_cache_size) || - ((empty_space + cache_ptr->clean_index_size) < cache_ptr->min_clean_size))) { - - size_t space_needed; - - if (empty_space <= entry_ptr->size) - cache_ptr->cache_full = TRUE; - - if (cache_ptr->check_write_permitted != NULL) { - if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted 1") - else - have_write_permitted = TRUE; - } /* end if */ - else { - write_permitted = cache_ptr->write_permitted; - have_write_permitted = TRUE; - } /* end else */ - - HDassert(entry_ptr->size <= H5C_MAX_ENTRY_SIZE); - space_needed = entry_ptr->size; - if (space_needed > cache_ptr->max_cache_size) - space_needed = cache_ptr->max_cache_size; - - /* Note that space_needed is just the amount of space that - * needed to insert the new entry without exceeding the cache - * size limit. The subsequent call to H5C__make_space_in_cache() - * may evict the entries required to free more or less space - * depending on conditions. It MAY be less if the cache is - * currently undersized, or more if the cache is oversized. - * - * The cache can exceed its maximum size limit via the following - * mechanisms: - * - * First, it is possible for the cache to grow without - * bound as long as entries are protected and not unprotected. - * - * Second, when writes are not permitted it is also possible - * for the cache to grow without bound. - * - * Third, the user may choose to disable evictions -- causing - * the cache to grow without bound until evictions are - * re-enabled. - * - * Finally, we usually don't check to see if the cache is - * oversized at the end of an unprotect. As a result, it is - * possible to have a vastly oversized cache with no protected - * entries as long as all the protects precede the unprotects. - */ - if (H5C__make_space_in_cache(f, space_needed, write_permitted) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__make_space_in_cache failed") - } /* end if */ - - /* Insert the entry in the hash table. - * - * ******************************************* - * - * Set the flush_me_last field - * of the newly loaded entry before inserting it into the - * index. Must do this, as the index tracked the number of - * entries with the flush_last field set, but assumes that - * the field will not change after insertion into the index. - * - * Note that this means that the H5C__FLUSH_LAST_FLAG flag - * is ignored if the entry is already in cache. - */ - entry_ptr->flush_me_last = flush_last; - - H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, NULL) - if (entry_ptr->is_dirty && !entry_ptr->in_slist) - H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, NULL) - - /* insert the entry in the data structures used by the replacement - * policy. We are just going to take it out again when we update - * the replacement policy for a protect, but this simplifies the - * code. If we do this often enough, we may want to optimize this. - */ - H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, NULL) - - /* Record that the entry was loaded, to trigger a notify callback later */ - /* (After the entry is fully added to the cache) */ - was_loaded = TRUE; - } /* end else */ - - HDassert(entry_ptr->addr == addr); - HDassert(entry_ptr->type == type); - - if (entry_ptr->is_protected) { - if (read_only && entry_ptr->is_read_only) { - HDassert(entry_ptr->ro_ref_count > 0); - (entry_ptr->ro_ref_count)++; - } /* end if */ - else - HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Target already protected & not read only?!?") - } /* end if */ - else { - H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, NULL) - - entry_ptr->is_protected = TRUE; - if (read_only) { - entry_ptr->is_read_only = TRUE; - entry_ptr->ro_ref_count = 1; - } /* end if */ - entry_ptr->dirtied = FALSE; - } /* end else */ - - H5C__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit) - H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) - - ret_value = thing; - - if (cache_ptr->evictions_enabled && - (cache_ptr->size_decreased || - (cache_ptr->resize_enabled && (cache_ptr->cache_accesses >= cache_ptr->resize_ctl.epoch_length)))) { - - if (!have_write_permitted) { - if (cache_ptr->check_write_permitted != NULL) { - if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted") - else - have_write_permitted = TRUE; - } - else { - write_permitted = cache_ptr->write_permitted; - have_write_permitted = TRUE; - } - } - - if (cache_ptr->resize_enabled && (cache_ptr->cache_accesses >= cache_ptr->resize_ctl.epoch_length)) - if (H5C__auto_adjust_cache_size(f, write_permitted) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Cache auto-resize failed") - - if (cache_ptr->size_decreased) { - cache_ptr->size_decreased = FALSE; - - /* check to see if the cache is now oversized due to the cache - * size reduction. If it is, try to evict enough entries to - * bring the cache size down to the current maximum cache size. - * - * Also, if the min_clean_size requirement is not met, we - * should also call H5C__make_space_in_cache() to bring us - * into compliance. - */ - if (cache_ptr->index_size >= cache_ptr->max_cache_size) - empty_space = 0; - else - empty_space = cache_ptr->max_cache_size - cache_ptr->index_size; - - if ((cache_ptr->index_size > cache_ptr->max_cache_size) || - ((empty_space + cache_ptr->clean_index_size) < cache_ptr->min_clean_size)) { - - if (cache_ptr->index_size > cache_ptr->max_cache_size) - cache_ptr->cache_full = TRUE; - - if (H5C__make_space_in_cache(f, (size_t)0, write_permitted) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__make_space_in_cache failed") - } - } /* end if */ - } - - /* If we loaded the entry and the entry's type has a 'notify' callback, send - * an 'after load' notice now that the entry is fully integrated into - * the cache and protected. We must wait until it is protected so it is not - * evicted during the notify callback. - */ - if (was_loaded) - /* If the entry's type has a 'notify' callback send a 'after load' - * notice now that the entry is fully integrated into the cache. - */ - if (entry_ptr->type->notify && (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_LOAD, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, NULL, - "can't notify client about entry inserted into cache") - -#ifdef H5_HAVE_PARALLEL - /* Make sure the size of the collective entries in the cache remain in check */ - if (coll_access) { - if (H5P_USER_TRUE == H5F_COLL_MD_READ(f)) { - if (cache_ptr->max_cache_size * 80 < cache_ptr->coll_list_size * 100) - if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "can't clear collective metadata entries") - } /* end if */ - else { - if (cache_ptr->max_cache_size * 40 < cache_ptr->coll_list_size * 100) - if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "can't clear collective metadata entries") - } /* end else */ - } /* end if */ -#endif /* H5_HAVE_PARALLEL */ - -done: -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on exit") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_protect() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C_reset_cache_hit_rate_stats() - * - * Purpose: Reset the cache hit rate computation fields. - * - * Return: SUCCEED on success, and FAIL on failure. - * - * Programmer: John Mainzer, 10/5/04 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_reset_cache_hit_rate_stats(H5C_t *cache_ptr) -{ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad cache_ptr on entry") - - cache_ptr->cache_hits = 0; - cache_ptr->cache_accesses = 0; - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_reset_cache_hit_rate_stats() */ - -/*------------------------------------------------------------------------- - * Function: H5C_set_cache_auto_resize_config - * - * Purpose: Set the cache automatic resize configuration to the - * provided values if they are in range, and fail if they - * are not. - * - * If the new configuration enables automatic cache resizing, - * coerce the cache max size and min clean size into agreement - * with the new policy and re-set the full cache hit rate - * stats. - * - * Return: SUCCEED on success, and FAIL on failure. - * - * Programmer: John Mainzer - * 10/8/04 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_set_cache_auto_resize_config(H5C_t *cache_ptr, H5C_auto_size_ctl_t *config_ptr) -{ - size_t new_max_cache_size; - size_t new_min_clean_size; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad cache_ptr on entry") - if (config_ptr == NULL) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry") - if (config_ptr->version != H5C__CURR_AUTO_SIZE_CTL_VER) - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "unknown config version") - - /* check general configuration section of the config: */ - if (H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_GENERAL) < 0) - HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in general configuration fields of new config") - - /* check size increase control fields of the config: */ - if (H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_INCREMENT) < 0) - HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in the size increase control fields of new config") - - /* check size decrease control fields of the config: */ - if (H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_DECREMENT) < 0) - HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in the size decrease control fields of new config") - - /* check for conflicts between size increase and size decrease controls: */ - if (H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_INTERACTIONS) < 0) - HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "conflicting threshold fields in new config") - - /* will set the increase possible fields to FALSE later if needed */ - cache_ptr->size_increase_possible = TRUE; - cache_ptr->flash_size_increase_possible = TRUE; - cache_ptr->size_decrease_possible = TRUE; - - switch (config_ptr->incr_mode) { - case H5C_incr__off: - cache_ptr->size_increase_possible = FALSE; - break; - - case H5C_incr__threshold: - if ((config_ptr->lower_hr_threshold <= 0.0) || (config_ptr->increment <= 1.0) || - ((config_ptr->apply_max_increment) && (config_ptr->max_increment <= 0))) - cache_ptr->size_increase_possible = FALSE; - break; - - default: /* should be unreachable */ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown incr_mode?!?!?") - } /* end switch */ - - /* logically, this is where configuration for flash cache size increases - * should go. However, this configuration depends on max_cache_size, so - * we wait until the end of the function, when this field is set. - */ - - switch (config_ptr->decr_mode) { - case H5C_decr__off: - cache_ptr->size_decrease_possible = FALSE; - break; - - case H5C_decr__threshold: - if (config_ptr->upper_hr_threshold >= 1.0 || config_ptr->decrement >= 1.0 || - (config_ptr->apply_max_decrement && config_ptr->max_decrement <= 0)) - cache_ptr->size_decrease_possible = FALSE; - break; - - case H5C_decr__age_out: - if ((config_ptr->apply_empty_reserve && config_ptr->empty_reserve >= 1.0) || - (config_ptr->apply_max_decrement && config_ptr->max_decrement <= 0)) - cache_ptr->size_decrease_possible = FALSE; - break; - - case H5C_decr__age_out_with_threshold: - if ((config_ptr->apply_empty_reserve && config_ptr->empty_reserve >= 1.0) || - (config_ptr->apply_max_decrement && config_ptr->max_decrement <= 0) || - config_ptr->upper_hr_threshold >= 1.0) - cache_ptr->size_decrease_possible = FALSE; - break; - - default: /* should be unreachable */ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown decr_mode?!?!?") - } /* end switch */ - - if (config_ptr->max_size == config_ptr->min_size) { - cache_ptr->size_increase_possible = FALSE; - cache_ptr->flash_size_increase_possible = FALSE; - cache_ptr->size_decrease_possible = FALSE; - } /* end if */ - - /* flash_size_increase_possible is intentionally omitted from the - * following: - */ - cache_ptr->resize_enabled = cache_ptr->size_increase_possible || cache_ptr->size_decrease_possible; - cache_ptr->resize_ctl = *config_ptr; - - /* Resize the cache to the supplied initial value if requested, or as - * necessary to force it within the bounds of the current automatic - * cache resizing configuration. - * - * Note that the min_clean_fraction may have changed, so we - * go through the exercise even if the current size is within - * range and an initial size has not been provided. - */ - if (cache_ptr->resize_ctl.set_initial_size) - new_max_cache_size = cache_ptr->resize_ctl.initial_size; - else if (cache_ptr->max_cache_size > cache_ptr->resize_ctl.max_size) - new_max_cache_size = cache_ptr->resize_ctl.max_size; - else if (cache_ptr->max_cache_size < cache_ptr->resize_ctl.min_size) - new_max_cache_size = cache_ptr->resize_ctl.min_size; - else - new_max_cache_size = cache_ptr->max_cache_size; - - new_min_clean_size = (size_t)((double)new_max_cache_size * (cache_ptr->resize_ctl.min_clean_fraction)); - - /* since new_min_clean_size is of type size_t, we have - * - * ( 0 <= new_min_clean_size ) - * - * by definition. - */ - HDassert(new_min_clean_size <= new_max_cache_size); - HDassert(cache_ptr->resize_ctl.min_size <= new_max_cache_size); - HDassert(new_max_cache_size <= cache_ptr->resize_ctl.max_size); - - if (new_max_cache_size < cache_ptr->max_cache_size) - cache_ptr->size_decreased = TRUE; - - cache_ptr->max_cache_size = new_max_cache_size; - cache_ptr->min_clean_size = new_min_clean_size; - - if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0) - /* this should be impossible... */ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed") - - /* remove excess epoch markers if any */ - if ((config_ptr->decr_mode == H5C_decr__age_out_with_threshold) || - (config_ptr->decr_mode == H5C_decr__age_out)) { - if (cache_ptr->epoch_markers_active > cache_ptr->resize_ctl.epochs_before_eviction) - if (H5C__autoadjust__ageout__remove_excess_markers(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't remove excess epoch markers") - } /* end if */ - else if (cache_ptr->epoch_markers_active > 0) { - if (H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers") - } - - /* configure flash size increase facility. We wait until the - * end of the function, as we need the max_cache_size set before - * we start to keep things simple. - * - * If we haven't already ruled out flash cache size increases above, - * go ahead and configure it. - */ - if (cache_ptr->flash_size_increase_possible) { - switch (config_ptr->flash_incr_mode) { - case H5C_flash_incr__off: - cache_ptr->flash_size_increase_possible = FALSE; - break; - - case H5C_flash_incr__add_space: - cache_ptr->flash_size_increase_possible = TRUE; - cache_ptr->flash_size_increase_threshold = - (size_t)(((double)(cache_ptr->max_cache_size)) * (cache_ptr->resize_ctl.flash_threshold)); - break; - - default: /* should be unreachable */ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?") - break; - } /* end switch */ - } /* end if */ - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_set_cache_auto_resize_config() */ - -/*------------------------------------------------------------------------- - * Function: H5C_set_evictions_enabled() - * - * Purpose: Set cache_ptr->evictions_enabled to the value of the - * evictions enabled parameter. - * - * Return: SUCCEED on success, and FAIL on failure. - * - * Programmer: John Mainzer - * 7/27/07 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_set_evictions_enabled(H5C_t *cache_ptr, hbool_t evictions_enabled) -{ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry") - - /* There is no fundamental reason why we should not permit - * evictions to be disabled while automatic resize is enabled. - * However, allowing it would greatly complicate testing - * the feature. Hence the following: - */ - if ((evictions_enabled != TRUE) && ((cache_ptr->resize_ctl.incr_mode != H5C_incr__off) || - (cache_ptr->resize_ctl.decr_mode != H5C_decr__off))) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't disable evictions when auto resize enabled") - - cache_ptr->evictions_enabled = evictions_enabled; - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_set_evictions_enabled() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C_set_slist_enabled() - * - * Purpose: Enable or disable the slist as directed. - * - * The slist (skip list) is an address ordered list of - * dirty entries in the metadata cache. However, this - * list is only needed during flush and close, where we - * use it to write entries in more or less increasing - * address order. - * - * This function sets up and enables further operations - * on the slist, or disable the slist. This in turn - * allows us to avoid the overhead of maintaining the - * slist when it is not needed. - * - * - * If the slist_enabled parameter is TRUE, the function - * - * 1) Verifies that the slist is empty. - * - * 2) Scans the index list, and inserts all dirty entries - * into the slist. - * - * 3) Sets cache_ptr->slist_enabled = TRUE. - * - * Note that the clear_slist parameter is ignored if - * the slist_enabed parameter is TRUE. - * - * - * If the slist_enabled_parameter is FALSE, the function - * shuts down the slist. - * - * Normally the slist will be empty at this point, however - * that need not be the case if H5C_flush_cache() has been - * called with the H5C__FLUSH_MARKED_ENTRIES_FLAG. - * - * Thus shutdown proceeds as follows: - * - * 1) Test to see if the slist is empty. If it is, proceed - * to step 3. - * - * 2) Test to see if the clear_slist parameter is TRUE. - * - * If it is, remove all entries from the slist. - * - * If it isn't, throw an error. - * - * 3) set cache_ptr->slist_enabled = FALSE. - * - * Return: SUCCEED on success, and FAIL on failure. - * - * Programmer: John Mainzer - * 5/1/20 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_set_slist_enabled(H5C_t *cache_ptr, hbool_t slist_enabled, hbool_t clear_slist) -{ - H5C_cache_entry_t *entry_ptr; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry") - - if (slist_enabled) { - if (cache_ptr->slist_enabled) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist already enabled?") - if ((cache_ptr->slist_len != 0) || (cache_ptr->slist_size != 0)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist not empty?") - - /* set cache_ptr->slist_enabled to TRUE so that the slist - * maintenance macros will be enabled. - */ - cache_ptr->slist_enabled = TRUE; - - /* scan the index list and insert all dirty entries in the slist */ - entry_ptr = cache_ptr->il_head; - while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - if (entry_ptr->is_dirty) - H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL) - entry_ptr = entry_ptr->il_next; - } - - /* we don't maintain a dirty index len, so we can't do a cross - * check against it. Note that there is no point in cross checking - * against the dirty LRU size, as the dirty LRU may not be maintained, - * and in any case, there is no requirement that all dirty entries - * will reside on the dirty LRU. - */ - HDassert(cache_ptr->dirty_index_size == cache_ptr->slist_size); - } - else { /* take down the skip list */ - if (!cache_ptr->slist_enabled) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist already disabled?") - - if ((cache_ptr->slist_len != 0) || (cache_ptr->slist_size != 0)) { - if (clear_slist) { - H5SL_node_t *node_ptr; - - node_ptr = H5SL_first(cache_ptr->slist_ptr); - while (node_ptr != NULL) { - entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); - H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE, FAIL) - node_ptr = H5SL_first(cache_ptr->slist_ptr); - } - } - else - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist not empty?") - } - - cache_ptr->slist_enabled = FALSE; - - HDassert(0 == cache_ptr->slist_len); - HDassert(0 == cache_ptr->slist_size); - } - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_set_slist_enabled() */ - -/*------------------------------------------------------------------------- - * Function: H5C_unpin_entry() - * - * Purpose: Unpin a cache entry. The entry can be either protected or - * unprotected at the time of call, but must be pinned. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: John Mainzer - * 3/22/06 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_unpin_entry(void *_entry_ptr) -{ - H5C_t *cache_ptr; - H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)_entry_ptr; /* Pointer to entry to unpin */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - /* Sanity check */ - HDassert(entry_ptr); - cache_ptr = entry_ptr->cache_ptr; - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - /* Unpin the entry */ - if (H5C__unpin_entry_from_client(cache_ptr, entry_ptr, TRUE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry from client") - -done: -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_unpin_entry() */ - -/*------------------------------------------------------------------------- - * Function: H5C_unprotect - * - * Purpose: Undo an H5C_protect() call -- specifically, mark the - * entry as unprotected, remove it from the protected list, - * and give it back to the replacement policy. - * - * The TYPE and ADDR arguments must be the same as those in - * the corresponding call to H5C_protect() and the THING - * argument must be the value returned by that call to - * H5C_protect(). - * - * Return: Non-negative on success/Negative on failure - * - * If the deleted flag is TRUE, simply remove the target entry - * from the cache, clear it, and free it without writing it to - * disk. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: John Mainzer - * 6/2/04 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags) -{ - H5C_t *cache_ptr; - hbool_t deleted; - hbool_t dirtied; - hbool_t set_flush_marker; - hbool_t pin_entry; - hbool_t unpin_entry; - hbool_t free_file_space; - hbool_t take_ownership; - hbool_t was_clean; -#ifdef H5_HAVE_PARALLEL - hbool_t clear_entry = FALSE; -#endif /* H5_HAVE_PARALLEL */ - H5C_cache_entry_t *entry_ptr; - H5C_cache_entry_t *test_entry_ptr; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - deleted = ((flags & H5C__DELETED_FLAG) != 0); - dirtied = ((flags & H5C__DIRTIED_FLAG) != 0); - set_flush_marker = ((flags & H5C__SET_FLUSH_MARKER_FLAG) != 0); - pin_entry = ((flags & H5C__PIN_ENTRY_FLAG) != 0); - unpin_entry = ((flags & H5C__UNPIN_ENTRY_FLAG) != 0); - free_file_space = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0); - take_ownership = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0); - - HDassert(f); - HDassert(f->shared); - - cache_ptr = f->shared->cache; - - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - HDassert(H5F_addr_defined(addr)); - HDassert(thing); - HDassert(!(pin_entry && unpin_entry)); - - /* deleted flag must accompany free_file_space */ - HDassert((!free_file_space) || (deleted)); - - /* deleted flag must accompany take_ownership */ - HDassert((!take_ownership) || (deleted)); - - /* can't have both free_file_space & take_ownership */ - HDassert(!(free_file_space && take_ownership)); - - entry_ptr = (H5C_cache_entry_t *)thing; - HDassert(entry_ptr->addr == addr); - - /* also set the dirtied variable if the dirtied field is set in - * the entry. - */ - dirtied |= entry_ptr->dirtied; - was_clean = !(entry_ptr->is_dirty); - -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - /* if the entry has multiple read only protects, just decrement - * the ro_ref_counter. Don't actually unprotect until the ref count - * drops to zero. - */ - if (entry_ptr->ro_ref_count > 1) { - /* Sanity check */ - HDassert(entry_ptr->is_protected); - HDassert(entry_ptr->is_read_only); - - if (dirtied) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Read only entry modified??") - - /* Reduce the RO ref count */ - (entry_ptr->ro_ref_count)--; - - /* Pin or unpin the entry as requested. */ - if (pin_entry) { - /* Pin the entry from a client */ - if (H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client") - } - else if (unpin_entry) { - /* Unpin the entry from a client */ - if (H5C__unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry by client") - } /* end if */ - } - else { - if (entry_ptr->is_read_only) { - /* Sanity check */ - HDassert(entry_ptr->ro_ref_count == 1); - - if (dirtied) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Read only entry modified??") - - entry_ptr->is_read_only = FALSE; - entry_ptr->ro_ref_count = 0; - } /* end if */ - -#ifdef H5_HAVE_PARALLEL - /* When the H5C code is used to implement the metadata cache in the - * PHDF5 case, only the cache on process 0 is allowed to write to file. - * All the other metadata caches must hold dirty entries until they - * are told that the entries are clean. - * - * The clear_on_unprotect flag in the H5C_cache_entry_t structure - * exists to deal with the case in which an entry is protected when - * its cache receives word that the entry is now clean. In this case, - * the clear_on_unprotect flag is set, and the entry is flushed with - * the H5C__FLUSH_CLEAR_ONLY_FLAG. - * - * All this is a bit awkward, but until the metadata cache entries - * are contiguous, with only one dirty flag, we have to let the supplied - * functions deal with the resetting the is_dirty flag. - */ - if (entry_ptr->clear_on_unprotect) { - /* Sanity check */ - HDassert(entry_ptr->is_dirty); - - entry_ptr->clear_on_unprotect = FALSE; - if (!dirtied) - clear_entry = TRUE; - } /* end if */ -#endif /* H5_HAVE_PARALLEL */ - - if (!entry_ptr->is_protected) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Entry already unprotected??") - - /* Mark the entry as dirty if appropriate */ - entry_ptr->is_dirty = (entry_ptr->is_dirty || dirtied); - if (dirtied && entry_ptr->image_up_to_date) { - entry_ptr->image_up_to_date = FALSE; - if (entry_ptr->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "Can't propagate serialization status to fd parents") - } /* end if */ - - /* Check for newly dirtied entry */ - if (was_clean && entry_ptr->is_dirty) { - /* Update index for newly dirtied entry */ - H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr, FAIL) - - /* If the entry's type has a 'notify' callback send a - * 'entry dirtied' notice now that the entry is fully - * integrated into the cache. - */ - if (entry_ptr->type->notify && - (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set") - - /* Propagate the flush dep dirty flag up the flush dependency chain - * if appropriate - */ - if (entry_ptr->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_dirty(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag") - } /* end if */ - /* Check for newly clean entry */ - else if (!was_clean && !entry_ptr->is_dirty) { - - /* If the entry's type has a 'notify' callback send a - * 'entry cleaned' notice now that the entry is fully - * integrated into the cache. - */ - if (entry_ptr->type->notify && - (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "can't notify client about entry dirty flag cleared") - - /* Propagate the flush dep clean flag up the flush dependency chain - * if appropriate - */ - if (entry_ptr->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_clean(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag") - } /* end else-if */ - - /* Pin or unpin the entry as requested. */ - if (pin_entry) { - /* Pin the entry from a client */ - if (H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client") - } - else if (unpin_entry) { - /* Unpin the entry from a client */ - if (H5C__unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry by client") - } /* end if */ - - /* H5C__UPDATE_RP_FOR_UNPROTECT will place the unprotected entry on - * the pinned entry list if entry_ptr->is_pinned is TRUE. - */ - H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, FAIL) - - entry_ptr->is_protected = FALSE; - - /* if the entry is dirty, 'or' its flush_marker with the set flush flag, - * and then add it to the skip list if it isn't there already. - */ - if (entry_ptr->is_dirty) { - entry_ptr->flush_marker |= set_flush_marker; - if (!entry_ptr->in_slist) - /* this is a no-op if cache_ptr->slist_enabled is FALSE */ - H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL) - } /* end if */ - - /* This implementation of the "deleted" option is a bit inefficient, as - * we re-insert the entry to be deleted into the replacement policy - * data structures, only to remove them again. Depending on how often - * we do this, we may want to optimize a bit. - */ - if (deleted) { - unsigned flush_flags = (H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__FLUSH_INVALIDATE_FLAG); - - /* verify that the target entry is in the cache. */ - H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL) - - if (test_entry_ptr == NULL) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?") - else if (test_entry_ptr != entry_ptr) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, - "hash table contains multiple entries for addr?!?") - - /* Set the 'free file space' flag for the flush, if needed */ - if (free_file_space) - flush_flags |= H5C__FREE_FILE_SPACE_FLAG; - - /* Set the "take ownership" flag for the flush, if needed */ - if (take_ownership) - flush_flags |= H5C__TAKE_OWNERSHIP_FLAG; - - /* Delete the entry from the skip list on destroy */ - flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG; - - HDassert((!cache_ptr->slist_enabled) || (((!was_clean) || dirtied) == (entry_ptr->in_slist))); - - if (H5C__flush_single_entry(f, entry_ptr, flush_flags) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't flush entry") - } /* end if */ -#ifdef H5_HAVE_PARALLEL - else if (clear_entry) { - /* Verify that the target entry is in the cache. */ - H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL) - - if (test_entry_ptr == NULL) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?") - else if (test_entry_ptr != entry_ptr) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, - "hash table contains multiple entries for addr?!?") - - if (H5C__flush_single_entry(f, entry_ptr, - H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't clear entry") - } /* end else if */ -#endif /* H5_HAVE_PARALLEL */ - } - - H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr) - -done: -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_unprotect() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C_unsettle_entry_ring - * - * Purpose: Advise the metadata cache that the specified entry's free space - * manager ring is no longer settled (if it was on entry). - * - * If the target free space manager ring is already - * unsettled, do nothing, and return SUCCEED. - * - * If the target free space manager ring is settled, and - * we are not in the process of a file shutdown, mark - * the ring as unsettled, and return SUCCEED. - * - * If the target free space manager is settled, and we - * are in the process of a file shutdown, post an error - * message, and return FAIL. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * January 3, 2017 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_unsettle_entry_ring(void *_entry) -{ - H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_entry; /* Entry whose ring to unsettle */ - H5C_t *cache; /* Cache for file */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - /* Sanity checks */ - HDassert(entry); - HDassert(entry->ring != H5C_RING_UNDEFINED); - HDassert((H5C_RING_USER == entry->ring) || (H5C_RING_RDFSM == entry->ring) || - (H5C_RING_MDFSM == entry->ring)); - cache = entry->cache_ptr; - HDassert(cache); - HDassert(cache->magic == H5C__H5C_T_MAGIC); - - switch (entry->ring) { - case H5C_RING_USER: - /* Do nothing */ - break; - - case H5C_RING_RDFSM: - if (cache->rdfsm_settled) { - if (cache->flush_in_progress || cache->close_warning_received) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected rdfsm ring unsettle") - cache->rdfsm_settled = FALSE; - } /* end if */ - break; - - case H5C_RING_MDFSM: - if (cache->mdfsm_settled) { - if (cache->flush_in_progress || cache->close_warning_received) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected mdfsm ring unsettle") - cache->mdfsm_settled = FALSE; - } /* end if */ - break; - - default: - HDassert(FALSE); /* this should be un-reachable */ - break; - } /* end switch */ - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_unsettle_entry_ring() */ - -/*------------------------------------------------------------------------- - * Function: H5C_unsettle_ring() - * - * Purpose: Advise the metadata cache that the specified free space - * manager ring is no longer settled (if it was on entry). - * - * If the target free space manager ring is already - * unsettled, do nothing, and return SUCCEED. - * - * If the target free space manager ring is settled, and - * we are not in the process of a file shutdown, mark - * the ring as unsettled, and return SUCCEED. - * - * If the target free space manager is settled, and we - * are in the process of a file shutdown, post an error - * message, and return FAIL. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: John Mainzer - * 10/15/16 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_unsettle_ring(H5F_t *f, H5C_ring_t ring) -{ - H5C_t *cache_ptr; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - /* Sanity checks */ - HDassert(f); - HDassert(f->shared); - HDassert(f->shared->cache); - HDassert((H5C_RING_RDFSM == ring) || (H5C_RING_MDFSM == ring)); - cache_ptr = f->shared->cache; - HDassert(H5C__H5C_T_MAGIC == cache_ptr->magic); - - switch (ring) { - case H5C_RING_RDFSM: - if (cache_ptr->rdfsm_settled) { - if (cache_ptr->close_warning_received) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected rdfsm ring unsettle") - cache_ptr->rdfsm_settled = FALSE; - } /* end if */ - break; - - case H5C_RING_MDFSM: - if (cache_ptr->mdfsm_settled) { - if (cache_ptr->close_warning_received) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected mdfsm ring unsettle") - cache_ptr->mdfsm_settled = FALSE; - } /* end if */ - break; - - default: - HDassert(FALSE); /* this should be un-reachable */ - break; - } /* end switch */ - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_unsettle_ring() */ - -/*------------------------------------------------------------------------- - * Function: H5C_validate_resize_config() - * - * Purpose: Run a sanity check on the specified sections of the - * provided instance of struct H5C_auto_size_ctl_t. - * - * Do nothing and return SUCCEED if no errors are detected, - * and flag an error and return FAIL otherwise. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: John Mainzer - * 3/23/05 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_validate_resize_config(H5C_auto_size_ctl_t *config_ptr, unsigned int tests) -{ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - if (config_ptr == NULL) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry") - - if (config_ptr->version != H5C__CURR_AUTO_SIZE_CTL_VER) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown config version") - - if ((tests & H5C_RESIZE_CFG__VALIDATE_GENERAL) != 0) { - if (config_ptr->max_size > H5C__MAX_MAX_CACHE_SIZE) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "max_size too big") - if (config_ptr->min_size < H5C__MIN_MAX_CACHE_SIZE) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size too small") - if (config_ptr->min_size > config_ptr->max_size) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size > max_size") - if (config_ptr->set_initial_size && ((config_ptr->initial_size < config_ptr->min_size) || - (config_ptr->initial_size > config_ptr->max_size))) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, - "initial_size must be in the interval [min_size, max_size]") - if ((config_ptr->min_clean_fraction < 0.0) || (config_ptr->min_clean_fraction > 1.0)) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_clean_fraction must be in the interval [0.0, 1.0]") - if (config_ptr->epoch_length < H5C__MIN_AR_EPOCH_LENGTH) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too small") - if (config_ptr->epoch_length > H5C__MAX_AR_EPOCH_LENGTH) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too big") - } /* H5C_RESIZE_CFG__VALIDATE_GENERAL */ - - if ((tests & H5C_RESIZE_CFG__VALIDATE_INCREMENT) != 0) { - if ((config_ptr->incr_mode != H5C_incr__off) && (config_ptr->incr_mode != H5C_incr__threshold)) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid incr_mode") - - if (config_ptr->incr_mode == H5C_incr__threshold) { - if ((config_ptr->lower_hr_threshold < 0.0) || (config_ptr->lower_hr_threshold > 1.0)) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, - "lower_hr_threshold must be in the range [0.0, 1.0]") - if (config_ptr->increment < 1.0) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "increment must be greater than or equal to 1.0") - - /* no need to check max_increment, as it is a size_t, - * and thus must be non-negative. - */ - } /* H5C_incr__threshold */ - - switch (config_ptr->flash_incr_mode) { - case H5C_flash_incr__off: - /* nothing to do here */ - break; - - case H5C_flash_incr__add_space: - if ((config_ptr->flash_multiple < 0.1) || (config_ptr->flash_multiple > 10.0)) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, - "flash_multiple must be in the range [0.1, 10.0]") - if ((config_ptr->flash_threshold < 0.1) || (config_ptr->flash_threshold > 1.0)) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, - "flash_threshold must be in the range [0.1, 1.0]") - break; - - default: - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid flash_incr_mode") - break; - } /* end switch */ - } /* H5C_RESIZE_CFG__VALIDATE_INCREMENT */ - - if ((tests & H5C_RESIZE_CFG__VALIDATE_DECREMENT) != 0) { - if ((config_ptr->decr_mode != H5C_decr__off) && (config_ptr->decr_mode != H5C_decr__threshold) && - (config_ptr->decr_mode != H5C_decr__age_out) && - (config_ptr->decr_mode != H5C_decr__age_out_with_threshold)) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid decr_mode") - - if (config_ptr->decr_mode == H5C_decr__threshold) { - if (config_ptr->upper_hr_threshold > 1.0) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "upper_hr_threshold must be <= 1.0") - if ((config_ptr->decrement > 1.0) || (config_ptr->decrement < 0.0)) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "decrement must be in the interval [0.0, 1.0]") - - /* no need to check max_decrement as it is a size_t - * and thus must be non-negative. - */ - } /* H5C_decr__threshold */ - - if ((config_ptr->decr_mode == H5C_decr__age_out) || - (config_ptr->decr_mode == H5C_decr__age_out_with_threshold)) { - if (config_ptr->epochs_before_eviction < 1) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction must be positive") - if (config_ptr->epochs_before_eviction > H5C__MAX_EPOCH_MARKERS) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction too big") - if (config_ptr->apply_empty_reserve && - (config_ptr->empty_reserve > 1.0 || config_ptr->empty_reserve < 0.0)) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "empty_reserve must be in the interval [0.0, 1.0]") - - /* no need to check max_decrement as it is a size_t - * and thus must be non-negative. - */ - } /* H5C_decr__age_out || H5C_decr__age_out_with_threshold */ - - if (config_ptr->decr_mode == H5C_decr__age_out_with_threshold) - if ((config_ptr->upper_hr_threshold > 1.0) || (config_ptr->upper_hr_threshold < 0.0)) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, - "upper_hr_threshold must be in the interval [0.0, 1.0]") - } /* H5C_RESIZE_CFG__VALIDATE_DECREMENT */ - - if ((tests & H5C_RESIZE_CFG__VALIDATE_INTERACTIONS) != 0) { - if ((config_ptr->incr_mode == H5C_incr__threshold) && - ((config_ptr->decr_mode == H5C_decr__threshold) || - (config_ptr->decr_mode == H5C_decr__age_out_with_threshold)) && - (config_ptr->lower_hr_threshold >= config_ptr->upper_hr_threshold)) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "conflicting threshold fields in config") - } /* H5C_RESIZE_CFG__VALIDATE_INTERACTIONS */ - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_validate_resize_config() */ - -/*------------------------------------------------------------------------- - * Function: H5C_create_flush_dependency() - * - * Purpose: Initiates a parent<->child entry flush dependency. The parent - * entry must be pinned or protected at the time of call, and must - * have all dependencies removed before the cache can shut down. - * - * Note: Flush dependencies in the cache indicate that a child entry - * must be flushed to the file before its parent. (This is - * currently used to implement Single-Writer/Multiple-Reader (SWMR) - * I/O access for data structures in the file). - * - * Creating a flush dependency between two entries will also pin - * the parent entry. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * 3/05/09 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_create_flush_dependency(void *parent_thing, void *child_thing) -{ - H5C_t *cache_ptr; - H5C_cache_entry_t *parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent thing's entry */ - H5C_cache_entry_t *child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child thing's entry */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - /* Sanity checks */ - HDassert(parent_entry); - HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(H5F_addr_defined(parent_entry->addr)); - HDassert(child_entry); - HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(H5F_addr_defined(child_entry->addr)); - cache_ptr = parent_entry->cache_ptr; - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - HDassert(cache_ptr == child_entry->cache_ptr); -#ifndef NDEBUG - /* Make sure the parent is not already a parent */ - { - unsigned u; - - for (u = 0; u < child_entry->flush_dep_nparents; u++) - HDassert(child_entry->flush_dep_parent[u] != parent_entry); - } /* end block */ -#endif - - /* More sanity checks */ - if (child_entry == parent_entry) - HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Child entry flush dependency parent can't be itself") - if (!(parent_entry->is_protected || parent_entry->is_pinned)) - HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Parent entry isn't pinned or protected") - - /* Check for parent not pinned */ - if (!parent_entry->is_pinned) { - /* Sanity check */ - HDassert(parent_entry->flush_dep_nchildren == 0); - HDassert(!parent_entry->pinned_from_client); - HDassert(!parent_entry->pinned_from_cache); - - /* Pin the parent entry */ - parent_entry->is_pinned = TRUE; - H5C__UPDATE_STATS_FOR_PIN(cache_ptr, parent_entry) - } /* end else */ - - /* Mark the entry as pinned from the cache's action (possibly redundantly) */ - parent_entry->pinned_from_cache = TRUE; - - /* Check if we need to resize the child's parent array */ - if (child_entry->flush_dep_nparents >= child_entry->flush_dep_parent_nalloc) { - if (child_entry->flush_dep_parent_nalloc == 0) { - /* Array does not exist yet, allocate it */ - HDassert(!child_entry->flush_dep_parent); - - if (NULL == (child_entry->flush_dep_parent = - H5FL_SEQ_MALLOC(H5C_cache_entry_ptr_t, H5C_FLUSH_DEP_PARENT_INIT))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, - "memory allocation failed for flush dependency parent list") - child_entry->flush_dep_parent_nalloc = H5C_FLUSH_DEP_PARENT_INIT; - } /* end if */ - else { - /* Resize existing array */ - HDassert(child_entry->flush_dep_parent); - - if (NULL == (child_entry->flush_dep_parent = - H5FL_SEQ_REALLOC(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent, - 2 * child_entry->flush_dep_parent_nalloc))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, - "memory allocation failed for flush dependency parent list") - child_entry->flush_dep_parent_nalloc *= 2; - } /* end else */ - cache_ptr->entry_fd_height_change_counter++; - } /* end if */ - - /* Add the dependency to the child's parent array */ - child_entry->flush_dep_parent[child_entry->flush_dep_nparents] = parent_entry; - child_entry->flush_dep_nparents++; - - /* Increment parent's number of children */ - parent_entry->flush_dep_nchildren++; - - /* Adjust the number of dirty children */ - if (child_entry->is_dirty) { - /* Sanity check */ - HDassert(parent_entry->flush_dep_ndirty_children < parent_entry->flush_dep_nchildren); - - parent_entry->flush_dep_ndirty_children++; - - /* If the parent has a 'notify' callback, send a 'child entry dirtied' notice */ - if (parent_entry->type->notify && - (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_DIRTIED, parent_entry) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "can't notify parent about child entry dirty flag set") - } /* end if */ - - /* adjust the parent's number of unserialized children. Note - * that it is possible for and entry to be clean and unserialized. - */ - if (!child_entry->image_up_to_date) { - HDassert(parent_entry->flush_dep_nunser_children < parent_entry->flush_dep_nchildren); - - parent_entry->flush_dep_nunser_children++; - - /* If the parent has a 'notify' callback, send a 'child entry unserialized' notice */ - if (parent_entry->type->notify && - (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED, parent_entry) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "can't notify parent about child entry serialized flag reset") - } /* end if */ - - /* Post-conditions, for successful operation */ - HDassert(parent_entry->is_pinned); - HDassert(parent_entry->flush_dep_nchildren > 0); - HDassert(child_entry->flush_dep_parent); - HDassert(child_entry->flush_dep_nparents > 0); - HDassert(child_entry->flush_dep_parent_nalloc > 0); -#ifndef NDEBUG - H5C__assert_flush_dep_nocycle(parent_entry, child_entry); -#endif - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_create_flush_dependency() */ - -/*------------------------------------------------------------------------- - * Function: H5C_destroy_flush_dependency() - * - * Purpose: Terminates a parent<-> child entry flush dependency. The - * parent entry must be pinned. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * 3/05/09 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_destroy_flush_dependency(void *parent_thing, void *child_thing) -{ - H5C_t *cache_ptr; - H5C_cache_entry_t *parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent entry */ - H5C_cache_entry_t *child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child entry */ - unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - /* Sanity checks */ - HDassert(parent_entry); - HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(H5F_addr_defined(parent_entry->addr)); - HDassert(child_entry); - HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(H5F_addr_defined(child_entry->addr)); - cache_ptr = parent_entry->cache_ptr; - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - HDassert(cache_ptr == child_entry->cache_ptr); - - /* Usage checks */ - if (!parent_entry->is_pinned) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry isn't pinned") - if (NULL == child_entry->flush_dep_parent) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, - "Child entry doesn't have a flush dependency parent array") - if (0 == parent_entry->flush_dep_nchildren) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, - "Parent entry flush dependency ref. count has no child dependencies") - - /* Search for parent in child's parent array. This is a linear search - * because we do not expect large numbers of parents. If this changes, we - * may wish to change the parent array to a skip list */ - for (u = 0; u < child_entry->flush_dep_nparents; u++) - if (child_entry->flush_dep_parent[u] == parent_entry) - break; - if (u == child_entry->flush_dep_nparents) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, - "Parent entry isn't a flush dependency parent for child entry") - - /* Remove parent entry from child's parent array */ - if (u < (child_entry->flush_dep_nparents - 1)) - HDmemmove(&child_entry->flush_dep_parent[u], &child_entry->flush_dep_parent[u + 1], - (child_entry->flush_dep_nparents - u - 1) * sizeof(child_entry->flush_dep_parent[0])); - child_entry->flush_dep_nparents--; - - /* Adjust parent entry's nchildren and unpin parent if it goes to zero */ - parent_entry->flush_dep_nchildren--; - if (0 == parent_entry->flush_dep_nchildren) { - /* Sanity check */ - HDassert(parent_entry->pinned_from_cache); - - /* Check if we should unpin parent entry now */ - if (!parent_entry->pinned_from_client) - if (H5C__unpin_entry_real(cache_ptr, parent_entry, TRUE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry") - - /* Mark the entry as unpinned from the cache's action */ - parent_entry->pinned_from_cache = FALSE; - } /* end if */ - - /* Adjust parent entry's ndirty_children */ - if (child_entry->is_dirty) { - /* Sanity check */ - HDassert(parent_entry->flush_dep_ndirty_children > 0); - - parent_entry->flush_dep_ndirty_children--; - - /* If the parent has a 'notify' callback, send a 'child entry cleaned' notice */ - if (parent_entry->type->notify && - (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_CLEANED, parent_entry) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "can't notify parent about child entry dirty flag reset") - } /* end if */ - - /* adjust parent entry's number of unserialized children */ - if (!child_entry->image_up_to_date) { - HDassert(parent_entry->flush_dep_nunser_children > 0); - - parent_entry->flush_dep_nunser_children--; - - /* If the parent has a 'notify' callback, send a 'child entry serialized' notice */ - if (parent_entry->type->notify && - (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_SERIALIZED, parent_entry) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "can't notify parent about child entry serialized flag set") - } /* end if */ - - /* Shrink or free the parent array if appropriate */ - if (child_entry->flush_dep_nparents == 0) { - child_entry->flush_dep_parent = H5FL_SEQ_FREE(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent); - child_entry->flush_dep_parent_nalloc = 0; - } /* end if */ - else if (child_entry->flush_dep_parent_nalloc > H5C_FLUSH_DEP_PARENT_INIT && - child_entry->flush_dep_nparents <= (child_entry->flush_dep_parent_nalloc / 4)) { - if (NULL == (child_entry->flush_dep_parent = - H5FL_SEQ_REALLOC(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent, - child_entry->flush_dep_parent_nalloc / 4))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, - "memory allocation failed for flush dependency parent list") - child_entry->flush_dep_parent_nalloc /= 4; - } /* end if */ - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_destroy_flush_dependency() */ - -/*************************************************************************/ -/**************************** Private Functions: *************************/ -/*************************************************************************/ - -/*------------------------------------------------------------------------- - * Function: H5C__pin_entry_from_client() - * - * Purpose: Internal routine to pin a cache entry from a client action. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * 3/26/09 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__pin_entry_from_client(H5C_t -#if !H5C_COLLECT_CACHE_STATS - H5_ATTR_UNUSED -#endif - *cache_ptr, - H5C_cache_entry_t *entry_ptr) -{ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Sanity checks */ - HDassert(cache_ptr); - HDassert(entry_ptr); - HDassert(entry_ptr->is_protected); - - /* Check if the entry is already pinned */ - if (entry_ptr->is_pinned) { - /* Check if the entry was pinned through an explicit pin from a client */ - if (entry_ptr->pinned_from_client) - HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "entry is already pinned") - } /* end if */ - else { - entry_ptr->is_pinned = TRUE; - - H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) - } /* end else */ - - /* Mark that the entry was pinned through an explicit pin from a client */ - entry_ptr->pinned_from_client = TRUE; - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__pin_entry_from_client() */ - -/*------------------------------------------------------------------------- - * Function: H5C__unpin_entry_real() - * - * Purpose: Internal routine to unpin a cache entry. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * 1/6/18 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__unpin_entry_real(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp) -{ - herr_t ret_value = SUCCEED; /* Return value */ - -#ifdef H5C_DO_SANITY_CHECKS - FUNC_ENTER_PACKAGE -#else - FUNC_ENTER_PACKAGE_NOERR -#endif - - /* Sanity checking */ - HDassert(cache_ptr); - HDassert(entry_ptr); - HDassert(entry_ptr->is_pinned); - - /* If requested, update the replacement policy if the entry is not protected */ - if (update_rp && !entry_ptr->is_protected) - H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, FAIL) - - /* Unpin the entry now */ - entry_ptr->is_pinned = FALSE; - - /* Update the stats for an unpin operation */ - H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) - -#ifdef H5C_DO_SANITY_CHECKS -done: -#endif - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__unpin_entry_real() */ - -/*------------------------------------------------------------------------- - * Function: H5C__unpin_entry_from_client() - * - * Purpose: Internal routine to unpin a cache entry from a client action. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * 3/24/09 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__unpin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp) -{ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Sanity checking */ - HDassert(cache_ptr); - HDassert(entry_ptr); - - /* Error checking (should be sanity checks?) */ - if (!entry_ptr->is_pinned) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "entry isn't pinned") - if (!entry_ptr->pinned_from_client) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "entry wasn't pinned by cache client") - - /* Check if the entry is not pinned from a flush dependency */ - if (!entry_ptr->pinned_from_cache) - if (H5C__unpin_entry_real(cache_ptr, entry_ptr, update_rp) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "can't unpin entry") - - /* Mark the entry as explicitly unpinned by the client */ - entry_ptr->pinned_from_client = FALSE; - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__unpin_entry_from_client() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__auto_adjust_cache_size - * - * Purpose: Obtain the current full cache hit rate, and compare it - * with the hit rate thresholds for modifying cache size. - * If one of the thresholds has been crossed, adjusts the - * size of the cache accordingly. - * - * The function then resets the full cache hit rate - * statistics, and exits. - * - * Return: Non-negative on success/Negative on failure or if there was - * an attempt to flush a protected item. - * - * - * Programmer: John Mainzer, 10/7/04 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted) -{ - H5C_t *cache_ptr = f->shared->cache; - hbool_t reentrant_call = FALSE; - hbool_t inserted_epoch_marker = FALSE; - size_t new_max_cache_size = 0; - size_t old_max_cache_size = 0; - size_t new_min_clean_size = 0; - size_t old_min_clean_size = 0; - double hit_rate; - enum H5C_resize_status status = in_spec; /* will change if needed */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - HDassert(f); - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - HDassert(cache_ptr->cache_accesses >= cache_ptr->resize_ctl.epoch_length); - HDassert(0.0 <= cache_ptr->resize_ctl.min_clean_fraction); - HDassert(cache_ptr->resize_ctl.min_clean_fraction <= 100.0); - - /* check to see if cache_ptr->resize_in_progress is TRUE. If it, this - * is a re-entrant call via a client callback called in the resize - * process. To avoid an infinite recursion, set reentrant_call to - * TRUE, and goto done. - */ - if (cache_ptr->resize_in_progress) { - reentrant_call = TRUE; - HGOTO_DONE(SUCCEED) - } /* end if */ - - cache_ptr->resize_in_progress = TRUE; - - if (!cache_ptr->resize_enabled) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Auto cache resize disabled") - - HDassert((cache_ptr->resize_ctl.incr_mode != H5C_incr__off) || - (cache_ptr->resize_ctl.decr_mode != H5C_decr__off)); - - if (H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate") - - HDassert((0.0 <= hit_rate) && (hit_rate <= 1.0)); - - switch (cache_ptr->resize_ctl.incr_mode) { - case H5C_incr__off: - if (cache_ptr->size_increase_possible) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "size_increase_possible but H5C_incr__off?!?!?") - break; - - case H5C_incr__threshold: - if (hit_rate < cache_ptr->resize_ctl.lower_hr_threshold) { - if (!cache_ptr->size_increase_possible) - status = increase_disabled; - else if (cache_ptr->max_cache_size >= cache_ptr->resize_ctl.max_size) { - HDassert(cache_ptr->max_cache_size == cache_ptr->resize_ctl.max_size); - status = at_max_size; - } - else if (!cache_ptr->cache_full) - status = not_full; - else { - new_max_cache_size = - (size_t)(((double)(cache_ptr->max_cache_size)) * cache_ptr->resize_ctl.increment); - - /* clip to max size if necessary */ - if (new_max_cache_size > cache_ptr->resize_ctl.max_size) - new_max_cache_size = cache_ptr->resize_ctl.max_size; - - /* clip to max increment if necessary */ - if (cache_ptr->resize_ctl.apply_max_increment && - ((cache_ptr->max_cache_size + cache_ptr->resize_ctl.max_increment) < - new_max_cache_size)) - new_max_cache_size = cache_ptr->max_cache_size + cache_ptr->resize_ctl.max_increment; - - status = increase; - } - } - break; - - default: - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode") - } - - /* If the decr_mode is either age out or age out with threshold, we - * must run the marker maintenance code, whether we run the size - * reduction code or not. We do this in two places -- here we - * insert a new marker if the number of active epoch markers is - * is less than the current epochs before eviction, and after - * the ageout call, we cycle the markers. - * - * However, we can't call the ageout code or cycle the markers - * unless there was a full complement of markers in place on - * entry. The inserted_epoch_marker flag is used to track this. - */ - - if (((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out) || - (cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out_with_threshold)) && - (cache_ptr->epoch_markers_active < cache_ptr->resize_ctl.epochs_before_eviction)) { - - if (H5C__autoadjust__ageout__insert_new_marker(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't insert new epoch marker") - - inserted_epoch_marker = TRUE; - } - - /* don't run the cache size decrease code unless the cache size - * increase code is disabled, or the size increase code sees no need - * for action. In either case, status == in_spec at this point. - */ - - if (status == in_spec) { - switch (cache_ptr->resize_ctl.decr_mode) { - case H5C_decr__off: - break; - - case H5C_decr__threshold: - if (hit_rate > cache_ptr->resize_ctl.upper_hr_threshold) { - if (!cache_ptr->size_decrease_possible) - status = decrease_disabled; - else if (cache_ptr->max_cache_size <= cache_ptr->resize_ctl.min_size) { - HDassert(cache_ptr->max_cache_size == cache_ptr->resize_ctl.min_size); - status = at_min_size; - } - else { - new_max_cache_size = - (size_t)(((double)(cache_ptr->max_cache_size)) * cache_ptr->resize_ctl.decrement); - - /* clip to min size if necessary */ - if (new_max_cache_size < cache_ptr->resize_ctl.min_size) - new_max_cache_size = cache_ptr->resize_ctl.min_size; - - /* clip to max decrement if necessary */ - if (cache_ptr->resize_ctl.apply_max_decrement && - ((cache_ptr->resize_ctl.max_decrement + new_max_cache_size) < - cache_ptr->max_cache_size)) - new_max_cache_size = - cache_ptr->max_cache_size - cache_ptr->resize_ctl.max_decrement; - - status = decrease; - } - } - break; - - case H5C_decr__age_out_with_threshold: - case H5C_decr__age_out: - if (!inserted_epoch_marker) { - if (!cache_ptr->size_decrease_possible) - status = decrease_disabled; - else { - if (H5C__autoadjust__ageout(f, hit_rate, &status, &new_max_cache_size, - write_permitted) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ageout code failed") - } /* end else */ - } /* end if */ - break; - - default: - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode") - } - } - - /* cycle the epoch markers here if appropriate */ - if (((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out) || - (cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out_with_threshold)) && - !inserted_epoch_marker) - /* move last epoch marker to the head of the LRU list */ - if (H5C__autoadjust__ageout__cycle_epoch_marker(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error cycling epoch marker") - - if ((status == increase) || (status == decrease)) { - old_max_cache_size = cache_ptr->max_cache_size; - old_min_clean_size = cache_ptr->min_clean_size; - - new_min_clean_size = - (size_t)((double)new_max_cache_size * (cache_ptr->resize_ctl.min_clean_fraction)); - - /* new_min_clean_size is of size_t, and thus must be non-negative. - * Hence we have - * - * ( 0 <= new_min_clean_size ). - * - * by definition. - */ - HDassert(new_min_clean_size <= new_max_cache_size); - HDassert(cache_ptr->resize_ctl.min_size <= new_max_cache_size); - HDassert(new_max_cache_size <= cache_ptr->resize_ctl.max_size); - - cache_ptr->max_cache_size = new_max_cache_size; - cache_ptr->min_clean_size = new_min_clean_size; - - if (status == increase) - cache_ptr->cache_full = FALSE; - else if (status == decrease) - cache_ptr->size_decreased = TRUE; - - /* update flash cache size increase fields as appropriate */ - if (cache_ptr->flash_size_increase_possible) { - switch (cache_ptr->resize_ctl.flash_incr_mode) { - case H5C_flash_incr__off: - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, - "flash_size_increase_possible but H5C_flash_incr__off?!") - break; - - case H5C_flash_incr__add_space: - cache_ptr->flash_size_increase_threshold = - (size_t)(((double)(cache_ptr->max_cache_size)) * - (cache_ptr->resize_ctl.flash_threshold)); - break; - - default: /* should be unreachable */ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?") - break; - } - } - } - - if (cache_ptr->resize_ctl.rpt_fcn != NULL) - (cache_ptr->resize_ctl.rpt_fcn)(cache_ptr, H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, hit_rate, status, - old_max_cache_size, new_max_cache_size, old_min_clean_size, - new_min_clean_size); - - if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0) - /* this should be impossible... */ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed") - -done: - /* Sanity checks */ - HDassert(cache_ptr->resize_in_progress); - if (!reentrant_call) - cache_ptr->resize_in_progress = FALSE; - HDassert((!reentrant_call) || (cache_ptr->resize_in_progress)); - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__auto_adjust_cache_size() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__autoadjust__ageout - * - * Purpose: Implement the ageout automatic cache size decrement - * algorithm. Note that while this code evicts aged out - * entries, the code does not change the maximum cache size. - * Instead, the function simply computes the new value (if - * any change is indicated) and reports this value in - * *new_max_cache_size_ptr. - * - * Return: Non-negative on success/Negative on failure or if there was - * an attempt to flush a protected item. - * - * - * Programmer: John Mainzer, 11/18/04 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__autoadjust__ageout(H5F_t *f, double hit_rate, enum H5C_resize_status *status_ptr, - size_t *new_max_cache_size_ptr, hbool_t write_permitted) -{ - H5C_t *cache_ptr = f->shared->cache; - size_t test_size; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - HDassert(f); - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - HDassert((status_ptr) && (*status_ptr == in_spec)); - HDassert((new_max_cache_size_ptr) && (*new_max_cache_size_ptr == 0)); - - /* remove excess epoch markers if any */ - if (cache_ptr->epoch_markers_active > cache_ptr->resize_ctl.epochs_before_eviction) - if (H5C__autoadjust__ageout__remove_excess_markers(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't remove excess epoch markers") - - if ((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out) || - ((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out_with_threshold) && - (hit_rate >= cache_ptr->resize_ctl.upper_hr_threshold))) { - - if (cache_ptr->max_cache_size > cache_ptr->resize_ctl.min_size) { - /* evict aged out cache entries if appropriate... */ - if (H5C__autoadjust__ageout__evict_aged_out_entries(f, write_permitted) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error flushing aged out entries") - - /* ... and then reduce cache size if appropriate */ - if (cache_ptr->index_size < cache_ptr->max_cache_size) { - if (cache_ptr->resize_ctl.apply_empty_reserve) { - test_size = - (size_t)(((double)cache_ptr->index_size) / (1 - cache_ptr->resize_ctl.empty_reserve)); - if (test_size < cache_ptr->max_cache_size) { - *status_ptr = decrease; - *new_max_cache_size_ptr = test_size; - } - } - else { - *status_ptr = decrease; - *new_max_cache_size_ptr = cache_ptr->index_size; - } - - if (*status_ptr == decrease) { - /* clip to min size if necessary */ - if (*new_max_cache_size_ptr < cache_ptr->resize_ctl.min_size) - *new_max_cache_size_ptr = cache_ptr->resize_ctl.min_size; - - /* clip to max decrement if necessary */ - if ((cache_ptr->resize_ctl.apply_max_decrement) && - ((cache_ptr->resize_ctl.max_decrement + *new_max_cache_size_ptr) < - cache_ptr->max_cache_size)) - *new_max_cache_size_ptr = - cache_ptr->max_cache_size - cache_ptr->resize_ctl.max_decrement; - } - } - } - else - *status_ptr = at_min_size; - } - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__autoadjust__ageout() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__autoadjust__ageout__cycle_epoch_marker - * - * Purpose: Remove the oldest epoch marker from the LRU list, - * and reinsert it at the head of the LRU list. Also - * remove the epoch marker's index from the head of the - * ring buffer, and re-insert it at the tail of the ring - * buffer. - * - * Return: SUCCEED on success/FAIL on failure. - * - * Programmer: John Mainzer, 11/22/04 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t *cache_ptr) -{ - int i; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - - if (cache_ptr->epoch_markers_active <= 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "No active epoch markers on entry?!?!?") - - /* remove the last marker from both the ring buffer and the LRU list */ - i = cache_ptr->epoch_marker_ringbuf[cache_ptr->epoch_marker_ringbuf_first]; - cache_ptr->epoch_marker_ringbuf_first = - (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1); - if (cache_ptr->epoch_marker_ringbuf_size <= 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow") - - cache_ptr->epoch_marker_ringbuf_size -= 1; - if (cache_ptr->epoch_marker_active[i] != TRUE) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?") - - H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr, (cache_ptr)->LRU_tail_ptr, - (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size, (FAIL)) - - /* now, re-insert it at the head of the LRU list, and at the tail of - * the ring buffer. - */ - HDassert(cache_ptr->epoch_markers[i].addr == (haddr_t)i); - HDassert(cache_ptr->epoch_markers[i].next == NULL); - HDassert(cache_ptr->epoch_markers[i].prev == NULL); - - cache_ptr->epoch_marker_ringbuf_last = - (cache_ptr->epoch_marker_ringbuf_last + 1) % (H5C__MAX_EPOCH_MARKERS + 1); - cache_ptr->epoch_marker_ringbuf[cache_ptr->epoch_marker_ringbuf_last] = i; - if (cache_ptr->epoch_marker_ringbuf_size >= H5C__MAX_EPOCH_MARKERS) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow") - - cache_ptr->epoch_marker_ringbuf_size += 1; - - H5C__DLL_PREPEND(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr, - cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL) -done: - - FUNC_LEAVE_NOAPI(ret_value) - -} /* H5C__autoadjust__ageout__cycle_epoch_marker() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__autoadjust__ageout__evict_aged_out_entries - * - * Purpose: Evict clean entries in the cache that haven't - * been accessed for at least - * cache_ptr->resize_ctl.epochs_before_eviction epochs, - * and flush dirty entries that haven't been accessed for - * that amount of time. - * - * Depending on configuration, the function will either - * flush or evict all such entries, or all such entries it - * encounters until it has freed the maximum amount of space - * allowed under the maximum decrement. - * - * If we are running in parallel mode, writes may not be - * permitted. If so, the function simply skips any dirty - * entries it may encounter. - * - * The function makes no attempt to maintain the minimum - * clean size, as there is no guarantee that the cache size - * will be changed. - * - * If there is no cache size change, the minimum clean size - * constraint will be met through a combination of clean - * entries and free space in the cache. - * - * If there is a cache size reduction, the minimum clean size - * will be re-calculated, and will be enforced the next time - * we have to make space in the cache. - * - * Return: Non-negative on success/Negative on failure. - * - * Programmer: John Mainzer, 11/22/04 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t *f, hbool_t write_permitted) -{ - H5C_t *cache_ptr = f->shared->cache; - size_t eviction_size_limit; - size_t bytes_evicted = 0; - hbool_t prev_is_dirty = FALSE; - hbool_t restart_scan; - H5C_cache_entry_t *entry_ptr; - H5C_cache_entry_t *next_ptr; - H5C_cache_entry_t *prev_ptr; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - HDassert(f); - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - - /* if there is a limit on the amount that the cache size can be decrease - * in any one round of the cache size reduction algorithm, load that - * limit into eviction_size_limit. Otherwise, set eviction_size_limit - * to the equivalent of infinity. The current size of the index will - * do nicely. - */ - if (cache_ptr->resize_ctl.apply_max_decrement) - eviction_size_limit = cache_ptr->resize_ctl.max_decrement; - else - eviction_size_limit = cache_ptr->index_size; /* i.e. infinity */ - - if (write_permitted) { - restart_scan = FALSE; - entry_ptr = cache_ptr->LRU_tail_ptr; - while (entry_ptr != NULL && entry_ptr->type->id != H5AC_EPOCH_MARKER_ID && - bytes_evicted < eviction_size_limit) { - hbool_t skipping_entry = FALSE; - - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(!(entry_ptr->is_protected)); - HDassert(!(entry_ptr->is_read_only)); - HDassert((entry_ptr->ro_ref_count) == 0); - - next_ptr = entry_ptr->next; - prev_ptr = entry_ptr->prev; - - if (prev_ptr != NULL) - prev_is_dirty = prev_ptr->is_dirty; - - if (entry_ptr->is_dirty) { - HDassert(!entry_ptr->prefetched_dirty); - - /* dirty corked entry is skipped */ - if (entry_ptr->tag_info && entry_ptr->tag_info->corked) - skipping_entry = TRUE; - else { - /* reset entries_removed_counter and - * last_entry_removed_ptr prior to the call to - * H5C__flush_single_entry() so that we can spot - * unexpected removals of entries from the cache, - * and set the restart_scan flag if proceeding - * would be likely to cause us to scan an entry - * that is no longer in the cache. - */ - cache_ptr->entries_removed_counter = 0; - cache_ptr->last_entry_removed_ptr = NULL; - - if (H5C__flush_single_entry(f, entry_ptr, H5C__NO_FLAGS_SET) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") - - if (cache_ptr->entries_removed_counter > 1 || - cache_ptr->last_entry_removed_ptr == prev_ptr) - restart_scan = TRUE; - } /* end else */ - } /* end if */ - else if (!entry_ptr->prefetched_dirty) { - bytes_evicted += entry_ptr->size; - - if (H5C__flush_single_entry( - f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") - } /* end else-if */ - else { - HDassert(!entry_ptr->is_dirty); - HDassert(entry_ptr->prefetched_dirty); - - skipping_entry = TRUE; - } /* end else */ - - if (prev_ptr != NULL) { - if (skipping_entry) - entry_ptr = prev_ptr; - else if (restart_scan || (prev_ptr->is_dirty != prev_is_dirty) || - (prev_ptr->next != next_ptr) || (prev_ptr->is_protected) || (prev_ptr->is_pinned)) { - /* Something has happened to the LRU -- start over - * from the tail. - */ - restart_scan = FALSE; - entry_ptr = cache_ptr->LRU_tail_ptr; - - H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr) - } /* end else-if */ - else - entry_ptr = prev_ptr; - } /* end if */ - else - entry_ptr = NULL; - } /* end while */ - - /* for now at least, don't bother to maintain the minimum clean size, - * as the cache should now be less than its maximum size. Due to - * the vaguries of the cache size reduction algorithm, we may not - * reduce the size of the cache. - * - * If we do, we will calculate a new minimum clean size, which will - * be enforced the next time we try to make space in the cache. - * - * If we don't, no action is necessary, as we have just evicted and/or - * or flushed a bunch of entries and therefore the sum of the clean - * and free space in the cache must be greater than or equal to the - * min clean space requirement (assuming that requirement was met on - * entry). - */ - } /* end if */ - else /* ! write_permitted */ { - /* Since we are not allowed to write, all we can do is evict - * any clean entries that we may encounter before we either - * hit the eviction size limit, or encounter the epoch marker. - * - * If we are operating read only, this isn't an issue, as there - * will not be any dirty entries. - * - * If we are operating in R/W mode, all the dirty entries we - * skip will be flushed the next time we attempt to make space - * when writes are permitted. This may have some local - * performance implications, but it shouldn't cause any net - * slowdown. - */ - HDassert(H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS); - entry_ptr = cache_ptr->LRU_tail_ptr; - while (entry_ptr != NULL && ((entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID) && - (bytes_evicted < eviction_size_limit)) { - HDassert(!(entry_ptr->is_protected)); - - prev_ptr = entry_ptr->prev; - - if (!(entry_ptr->is_dirty) && !(entry_ptr->prefetched_dirty)) - if (H5C__flush_single_entry( - f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush clean entry") - - /* just skip the entry if it is dirty, as we can't do - * anything with it now since we can't write. - * - * Since all entries are clean, serialize() will not be called, - * and thus we needn't test to see if the LRU has been changed - * out from under us. - */ - entry_ptr = prev_ptr; - } /* end while */ - } /* end else */ - - if (cache_ptr->index_size < cache_ptr->max_cache_size) - cache_ptr->cache_full = FALSE; - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__autoadjust__ageout__evict_aged_out_entries() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__autoadjust__ageout__insert_new_marker - * - * Purpose: Find an unused marker cache entry, mark it as used, and - * insert it at the head of the LRU list. Also add the - * marker's index in the epoch_markers array. - * - * Return: SUCCEED on success/FAIL on failure. - * - * Programmer: John Mainzer, 11/19/04 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__autoadjust__ageout__insert_new_marker(H5C_t *cache_ptr) -{ - int i; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - - if (cache_ptr->epoch_markers_active >= cache_ptr->resize_ctl.epochs_before_eviction) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Already have a full complement of markers") - - /* find an unused marker */ - i = 0; - while ((cache_ptr->epoch_marker_active)[i] && i < H5C__MAX_EPOCH_MARKERS) - i++; - if (i >= H5C__MAX_EPOCH_MARKERS) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't find unused marker") - - HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i); - HDassert(((cache_ptr->epoch_markers)[i]).next == NULL); - HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL); - - (cache_ptr->epoch_marker_active)[i] = TRUE; - - cache_ptr->epoch_marker_ringbuf_last = - (cache_ptr->epoch_marker_ringbuf_last + 1) % (H5C__MAX_EPOCH_MARKERS + 1); - (cache_ptr->epoch_marker_ringbuf)[cache_ptr->epoch_marker_ringbuf_last] = i; - if (cache_ptr->epoch_marker_ringbuf_size >= H5C__MAX_EPOCH_MARKERS) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow") - - cache_ptr->epoch_marker_ringbuf_size += 1; - - H5C__DLL_PREPEND(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr, - cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL) - - cache_ptr->epoch_markers_active += 1; - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__autoadjust__ageout__insert_new_marker() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__autoadjust__ageout__remove_all_markers - * - * Purpose: Remove all epoch markers from the LRU list and mark them - * as inactive. - * - * Return: SUCCEED on success/FAIL on failure. - * - * Programmer: John Mainzer, 11/22/04 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__autoadjust__ageout__remove_all_markers(H5C_t *cache_ptr) -{ - int ring_buf_index; - int i; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - - while (cache_ptr->epoch_markers_active > 0) { - /* get the index of the last epoch marker in the LRU list - * and remove it from the ring buffer. - */ - - ring_buf_index = cache_ptr->epoch_marker_ringbuf_first; - i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index]; - - cache_ptr->epoch_marker_ringbuf_first = - (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1); - - if (cache_ptr->epoch_marker_ringbuf_size <= 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow") - cache_ptr->epoch_marker_ringbuf_size -= 1; - - if (cache_ptr->epoch_marker_active[i] != TRUE) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?") - - /* remove the epoch marker from the LRU list */ - H5C__DLL_REMOVE(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr, - cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL) - - /* mark the epoch marker as unused. */ - cache_ptr->epoch_marker_active[i] = FALSE; - - HDassert(cache_ptr->epoch_markers[i].addr == (haddr_t)i); - HDassert(cache_ptr->epoch_markers[i].next == NULL); - HDassert(cache_ptr->epoch_markers[i].prev == NULL); - - /* decrement the number of active epoch markers */ - cache_ptr->epoch_markers_active -= 1; - - HDassert(cache_ptr->epoch_markers_active == cache_ptr->epoch_marker_ringbuf_size); - } - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__autoadjust__ageout__remove_all_markers() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__autoadjust__ageout__remove_excess_markers - * - * Purpose: Remove epoch markers from the end of the LRU list and - * mark them as inactive until the number of active markers - * equals the current value of - * cache_ptr->resize_ctl.epochs_before_eviction. - * - * Return: SUCCEED on success/FAIL on failure. - * - * Programmer: John Mainzer, 11/19/04 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__autoadjust__ageout__remove_excess_markers(H5C_t *cache_ptr) -{ - int ring_buf_index; - int i; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - - if (cache_ptr->epoch_markers_active <= cache_ptr->resize_ctl.epochs_before_eviction) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "no excess markers on entry") - - while (cache_ptr->epoch_markers_active > cache_ptr->resize_ctl.epochs_before_eviction) { - /* get the index of the last epoch marker in the LRU list - * and remove it from the ring buffer. - */ - ring_buf_index = cache_ptr->epoch_marker_ringbuf_first; - i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index]; - - cache_ptr->epoch_marker_ringbuf_first = - (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1); - - if (cache_ptr->epoch_marker_ringbuf_size <= 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow") - cache_ptr->epoch_marker_ringbuf_size -= 1; - - if (cache_ptr->epoch_marker_active[i] != TRUE) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?") - - /* remove the epoch marker from the LRU list */ - H5C__DLL_REMOVE(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr, - cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL) - - /* mark the epoch marker as unused. */ - cache_ptr->epoch_marker_active[i] = FALSE; - - HDassert(cache_ptr->epoch_markers[i].addr == (haddr_t)i); - HDassert(cache_ptr->epoch_markers[i].next == NULL); - HDassert(cache_ptr->epoch_markers[i].prev == NULL); - - /* decrement the number of active epoch markers */ - cache_ptr->epoch_markers_active -= 1; - - HDassert(cache_ptr->epoch_markers_active == cache_ptr->epoch_marker_ringbuf_size); - } - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__autoadjust__ageout__remove_excess_markers() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__flash_increase_cache_size - * - * Purpose: If there is not at least new_entry_size - old_entry_size - * bytes of free space in the cache and the current - * max_cache_size is less than cache_ptr->resize_ctl.max_size, - * perform a flash increase in the cache size and then reset - * the full cache hit rate statistics, and exit. - * - * Return: Non-negative on success/Negative on failure. - * - * Programmer: John Mainzer, 12/31/07 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t new_entry_size) -{ - size_t new_max_cache_size = 0; - size_t old_max_cache_size = 0; - size_t new_min_clean_size = 0; - size_t old_min_clean_size = 0; - size_t space_needed; - enum H5C_resize_status status = flash_increase; /* may change */ - double hit_rate; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - HDassert(cache_ptr->flash_size_increase_possible); - HDassert(new_entry_size > cache_ptr->flash_size_increase_threshold); - HDassert(old_entry_size < new_entry_size); - - if (old_entry_size >= new_entry_size) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "old_entry_size >= new_entry_size") - - space_needed = new_entry_size - old_entry_size; - if (((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) && - (cache_ptr->max_cache_size < cache_ptr->resize_ctl.max_size)) { - switch (cache_ptr->resize_ctl.flash_incr_mode) { - case H5C_flash_incr__off: - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, - "flash_size_increase_possible but H5C_flash_incr__off?!") - break; - - case H5C_flash_incr__add_space: - if (cache_ptr->index_size < cache_ptr->max_cache_size) { - HDassert((cache_ptr->max_cache_size - cache_ptr->index_size) < space_needed); - space_needed -= cache_ptr->max_cache_size - cache_ptr->index_size; - } - space_needed = (size_t)(((double)space_needed) * cache_ptr->resize_ctl.flash_multiple); - new_max_cache_size = cache_ptr->max_cache_size + space_needed; - break; - - default: /* should be unreachable */ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?") - break; - } - - if (new_max_cache_size > cache_ptr->resize_ctl.max_size) - new_max_cache_size = cache_ptr->resize_ctl.max_size; - HDassert(new_max_cache_size > cache_ptr->max_cache_size); - - new_min_clean_size = (size_t)((double)new_max_cache_size * cache_ptr->resize_ctl.min_clean_fraction); - HDassert(new_min_clean_size <= new_max_cache_size); - - old_max_cache_size = cache_ptr->max_cache_size; - old_min_clean_size = cache_ptr->min_clean_size; - - cache_ptr->max_cache_size = new_max_cache_size; - cache_ptr->min_clean_size = new_min_clean_size; - - /* update flash cache size increase fields as appropriate */ - HDassert(cache_ptr->flash_size_increase_possible); - - switch (cache_ptr->resize_ctl.flash_incr_mode) { - case H5C_flash_incr__off: - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, - "flash_size_increase_possible but H5C_flash_incr__off?!") - break; - - case H5C_flash_incr__add_space: - cache_ptr->flash_size_increase_threshold = - (size_t)((double)cache_ptr->max_cache_size * cache_ptr->resize_ctl.flash_threshold); - break; - - default: /* should be unreachable */ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?") - break; - } - - /* note that we don't cycle the epoch markers. We can - * argue either way as to whether we should, but for now - * we don't. - */ - - if (cache_ptr->resize_ctl.rpt_fcn != NULL) { - /* get the hit rate for the reporting function. Should still - * be good as we haven't reset the hit rate statistics. - */ - if (H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate") - - (cache_ptr->resize_ctl.rpt_fcn)(cache_ptr, H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, hit_rate, status, - old_max_cache_size, new_max_cache_size, old_min_clean_size, - new_min_clean_size); - } - - if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0) - /* this should be impossible... */ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed") - } - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__flash_increase_cache_size() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__flush_invalidate_cache - * - * Purpose: Flush and destroy the entries contained in the target - * cache. - * - * If the cache contains protected entries, the function will - * fail, as protected entries cannot be either flushed or - * destroyed. However all unprotected entries should be - * flushed and destroyed before the function returns failure. - * - * While pinned entries can usually be flushed, they cannot - * be destroyed. However, they should be unpinned when all - * the entries that reference them have been destroyed (thus - * reduding the pinned entry's reference count to 0, allowing - * it to be unpinned). - * - * If pinned entries are present, the function makes repeated - * passes through the cache, flushing all dirty entries - * (including the pinned dirty entries where permitted) and - * destroying all unpinned entries. This process is repeated - * until either the cache is empty, or the number of pinned - * entries stops decreasing on each pass. - * - * Return: Non-negative on success/Negative on failure or if there was - * a request to flush all items and something was protected. - * - * Programmer: John Mainzer - * 3/24/05 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__flush_invalidate_cache(H5F_t *f, unsigned flags) -{ - H5C_t *cache_ptr; - H5C_ring_t ring; - herr_t ret_value = SUCCEED; - - FUNC_ENTER_PACKAGE - - HDassert(f); - HDassert(f->shared); - cache_ptr = f->shared->cache; - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - HDassert(cache_ptr->slist_ptr); - HDassert(cache_ptr->slist_enabled); - -#ifdef H5C_DO_SANITY_CHECKS - { - int32_t i; - uint32_t index_len = 0; - uint32_t slist_len = 0; - size_t index_size = (size_t)0; - size_t clean_index_size = (size_t)0; - size_t dirty_index_size = (size_t)0; - size_t slist_size = (size_t)0; - - HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0); - HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); - HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); - HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); - HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0); - HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0); - - for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) { - index_len += cache_ptr->index_ring_len[i]; - index_size += cache_ptr->index_ring_size[i]; - clean_index_size += cache_ptr->clean_index_ring_size[i]; - dirty_index_size += cache_ptr->dirty_index_ring_size[i]; - - slist_len += cache_ptr->slist_ring_len[i]; - slist_size += cache_ptr->slist_ring_size[i]; - } /* end for */ - - HDassert(cache_ptr->index_len == index_len); - HDassert(cache_ptr->index_size == index_size); - HDassert(cache_ptr->clean_index_size == clean_index_size); - HDassert(cache_ptr->dirty_index_size == dirty_index_size); - HDassert(cache_ptr->slist_len == slist_len); - HDassert(cache_ptr->slist_size == slist_size); - } -#endif /* H5C_DO_SANITY_CHECKS */ - - /* remove ageout markers if present */ - if (cache_ptr->epoch_markers_active > 0) - if (H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers") - - /* flush invalidate each ring, starting from the outermost ring and - * working inward. - */ - ring = H5C_RING_USER; - while (ring < H5C_RING_NTYPES) { - if (H5C__flush_invalidate_ring(f, ring, flags) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate ring failed") - ring++; - } /* end while */ - -#ifndef NDEBUG - /* Invariants, after destroying all entries in the hash table */ - if (!(flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG)) { - HDassert(cache_ptr->index_size == 0); - HDassert(cache_ptr->clean_index_size == 0); - HDassert(cache_ptr->pel_len == 0); - HDassert(cache_ptr->pel_size == 0); - } /* end if */ - else { - H5C_cache_entry_t *entry_ptr; /* Cache entry */ - unsigned u; /* Local index variable */ - - /* All rings except ring 4 should be empty now */ - /* (Ring 4 has the superblock) */ - for (u = H5C_RING_USER; u < H5C_RING_SB; u++) { - HDassert(cache_ptr->index_ring_len[u] == 0); - HDassert(cache_ptr->index_ring_size[u] == 0); - HDassert(cache_ptr->clean_index_ring_size[u] == 0); - } /* end for */ - - /* Check that any remaining pinned entries are in the superblock ring */ - entry_ptr = cache_ptr->pel_head_ptr; - while (entry_ptr) { - /* Check ring */ - HDassert(entry_ptr->ring == H5C_RING_SB); - - /* Advance to next entry in pinned entry list */ - entry_ptr = entry_ptr->next; - } /* end while */ - } /* end else */ - - HDassert(cache_ptr->dirty_index_size == 0); - HDassert(cache_ptr->slist_len == 0); - HDassert(cache_ptr->slist_size == 0); - HDassert(cache_ptr->pl_len == 0); - HDassert(cache_ptr->pl_size == 0); - HDassert(cache_ptr->LRU_list_len == 0); - HDassert(cache_ptr->LRU_list_size == 0); -#endif - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__flush_invalidate_cache() */ - -/*------------------------------------------------------------------------- - * Function: H5C__flush_invalidate_ring - * - * Purpose: Flush and destroy the entries contained in the target - * cache and ring. - * - * If the ring contains protected entries, the function will - * fail, as protected entries cannot be either flushed or - * destroyed. However all unprotected entries should be - * flushed and destroyed before the function returns failure. - * - * While pinned entries can usually be flushed, they cannot - * be destroyed. However, they should be unpinned when all - * the entries that reference them have been destroyed (thus - * reduding the pinned entry's reference count to 0, allowing - * it to be unpinned). - * - * If pinned entries are present, the function makes repeated - * passes through the cache, flushing all dirty entries - * (including the pinned dirty entries where permitted) and - * destroying all unpinned entries. This process is repeated - * until either the cache is empty, or the number of pinned - * entries stops decreasing on each pass. - * - * If flush dependencies appear in the target ring, the - * function makes repeated passes through the cache flushing - * entries in flush dependency order. - * - * Return: Non-negative on success/Negative on failure or if there was - * a request to flush all items and something was protected. - * - * Programmer: John Mainzer - * 9/1/15 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) -{ - H5C_t *cache_ptr; - hbool_t restart_slist_scan; - uint32_t protected_entries = 0; - int32_t i; - uint32_t cur_ring_pel_len; - uint32_t old_ring_pel_len; - unsigned cooked_flags; - unsigned evict_flags; - H5SL_node_t *node_ptr = NULL; - H5C_cache_entry_t *entry_ptr = NULL; - H5C_cache_entry_t *next_entry_ptr = NULL; -#ifdef H5C_DO_SANITY_CHECKS - uint32_t initial_slist_len = 0; - size_t initial_slist_size = 0; -#endif /* H5C_DO_SANITY_CHECKS */ - herr_t ret_value = SUCCEED; - - FUNC_ENTER_PACKAGE - - HDassert(f); - HDassert(f->shared); - - cache_ptr = f->shared->cache; - - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - HDassert(cache_ptr->slist_enabled); - HDassert(cache_ptr->slist_ptr); - HDassert(ring > H5C_RING_UNDEFINED); - HDassert(ring < H5C_RING_NTYPES); - - HDassert(cache_ptr->epoch_markers_active == 0); - - /* Filter out the flags that are not relevant to the flush/invalidate. - */ - cooked_flags = flags & H5C__FLUSH_CLEAR_ONLY_FLAG; - evict_flags = flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG; - - /* The flush procedure here is a bit strange. - * - * In the outer while loop we make at least one pass through the - * cache, and then repeat until either all the pinned entries in - * the ring unpin themselves, or until the number of pinned entries - * in the ring stops declining. In this later case, we scream and die. - * - * Since the fractal heap can dirty, resize, and/or move entries - * in is flush callback, it is possible that the cache will still - * contain dirty entries at this point. If so, we must make more - * passes through the skip list to allow it to empty. - * - * Further, since clean entries can be dirtied, resized, and/or moved - * as the result of a flush call back (either the entries own, or that - * for some other cache entry), we can no longer promise to flush - * the cache entries in increasing address order. - * - * Instead, we make a pass through - * the skip list, and then a pass through the "clean" entries, and - * then repeating as needed. Thus it is quite possible that an - * entry will be evicted from the cache only to be re-loaded later - * in the flush process. - * - * The bottom line is that entries will probably be flushed in close - * to increasing address order, but there are no guarantees. - */ - - /* compute the number of pinned entries in this ring */ - entry_ptr = cache_ptr->pel_head_ptr; - cur_ring_pel_len = 0; - while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(entry_ptr->ring >= ring); - if (entry_ptr->ring == ring) - cur_ring_pel_len++; - - entry_ptr = entry_ptr->next; - } /* end while */ - old_ring_pel_len = cur_ring_pel_len; - - while (cache_ptr->index_ring_len[ring] > 0) { - /* first, try to flush-destroy any dirty entries. Do this by - * making a scan through the slist. Note that new dirty entries - * may be created by the flush call backs. Thus it is possible - * that the slist will not be empty after we finish the scan. - */ - -#ifdef H5C_DO_SANITY_CHECKS - /* Depending on circumstances, H5C__flush_single_entry() will - * remove dirty entries from the slist as it flushes them. - * Thus for sanity checks we must make note of the initial - * slist length and size before we do any flushes. - */ - initial_slist_len = cache_ptr->slist_len; - initial_slist_size = cache_ptr->slist_size; - - /* There is also the possibility that entries will be - * dirtied, resized, moved, and/or removed from the cache - * as the result of calls to the flush callbacks. We use - * the slist_len_increase and slist_size_increase increase - * fields in struct H5C_t to track these changes for purpose - * of sanity checking. - * - * To this end, we must zero these fields before we start - * the pass through the slist. - */ - cache_ptr->slist_len_increase = 0; - cache_ptr->slist_size_increase = 0; -#endif /* H5C_DO_SANITY_CHECKS */ - - /* Set the cache_ptr->slist_changed to false. - * - * This flag is set to TRUE by H5C__flush_single_entry if the slist - * is modified by a pre_serialize, serialize, or notify callback. - * - * H5C__flush_invalidate_ring() uses this flag to detect any - * modifications to the slist that might corrupt the scan of - * the slist -- and restart the scan in this event. - */ - cache_ptr->slist_changed = FALSE; - - /* this done, start the scan of the slist */ - restart_slist_scan = TRUE; - while (restart_slist_scan || (node_ptr != NULL)) { - if (restart_slist_scan) { - restart_slist_scan = FALSE; - - /* Start at beginning of skip list */ - node_ptr = H5SL_first(cache_ptr->slist_ptr); - if (node_ptr == NULL) - /* the slist is empty -- break out of inner loop */ - break; - - /* Get cache entry for this node */ - next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); - if (NULL == next_entry_ptr) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!") - - HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(next_entry_ptr->is_dirty); - HDassert(next_entry_ptr->in_slist); - HDassert(next_entry_ptr->ring >= ring); - } /* end if */ - - entry_ptr = next_entry_ptr; - - /* It is possible that entries will be dirtied, resized, - * flushed, or removed from the cache via the take ownership - * flag as the result of pre_serialize or serialized callbacks. - * - * This in turn can corrupt the scan through the slist. - * - * We test for slist modifications in the pre_serialize - * and serialize callbacks, and restart the scan of the - * slist if we find them. However, best we do some extra - * sanity checking just in case. - */ - HDassert(entry_ptr != NULL); - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(entry_ptr->in_slist); - HDassert(entry_ptr->is_dirty); - HDassert(entry_ptr->ring >= ring); - - /* increment node pointer now, before we delete its target - * from the slist. - */ - node_ptr = H5SL_next(node_ptr); - if (node_ptr != NULL) { - next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); - if (NULL == next_entry_ptr) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!") - - HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(next_entry_ptr->is_dirty); - HDassert(next_entry_ptr->in_slist); - HDassert(next_entry_ptr->ring >= ring); - HDassert(entry_ptr != next_entry_ptr); - } /* end if */ - else - next_entry_ptr = NULL; - - /* Note that we now remove nodes from the slist as we flush - * the associated entries, instead of leaving them there - * until we are done, and then destroying all nodes in - * the slist. - * - * While this optimization used to be easy, with the possibility - * of new entries being added to the slist in the midst of the - * flush, we must keep the slist in canonical form at all - * times. - */ - if (((!entry_ptr->flush_me_last) || - ((entry_ptr->flush_me_last) && (cache_ptr->num_last_entries >= cache_ptr->slist_len))) && - (entry_ptr->flush_dep_nchildren == 0) && (entry_ptr->ring == ring)) { - if (entry_ptr->is_protected) { - /* We have major problems -- but lets flush - * everything we can before we flag an error. - */ - protected_entries++; - } /* end if */ - else if (entry_ptr->is_pinned) { - if (H5C__flush_single_entry(f, entry_ptr, H5C__DURING_FLUSH_FLAG) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed") - - if (cache_ptr->slist_changed) { - /* The slist has been modified by something - * other than the simple removal of the - * of the flushed entry after the flush. - * - * This has the potential to corrupt the - * scan through the slist, so restart it. - */ - restart_slist_scan = TRUE; - cache_ptr->slist_changed = FALSE; - H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr); - } /* end if */ - } /* end else-if */ - else { - if (H5C__flush_single_entry(f, entry_ptr, - (cooked_flags | H5C__DURING_FLUSH_FLAG | - H5C__FLUSH_INVALIDATE_FLAG | - H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed") - - if (cache_ptr->slist_changed) { - /* The slist has been modified by something - * other than the simple removal of the - * of the flushed entry after the flush. - * - * This has the potential to corrupt the - * scan through the slist, so restart it. - */ - restart_slist_scan = TRUE; - cache_ptr->slist_changed = FALSE; - H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr) - } /* end if */ - } /* end else */ - } /* end if */ - } /* end while loop scanning skip list */ - -#ifdef H5C_DO_SANITY_CHECKS - /* It is possible that entries were added to the slist during - * the scan, either before or after scan pointer. The following - * asserts take this into account. - * - * Don't bother with the sanity checks if node_ptr != NULL, as - * in this case we broke out of the loop because it got changed - * out from under us. - */ - - if (node_ptr == NULL) { - HDassert(cache_ptr->slist_len == - (uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase)); - HDassert(cache_ptr->slist_size == - (size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase)); - } /* end if */ -#endif /* H5C_DO_SANITY_CHECKS */ - - /* Since we are doing a destroy, we must make a pass through - * the hash table and try to flush - destroy all entries that - * remain. - * - * It used to be that all entries remaining in the cache at - * this point had to be clean, but with the fractal heap mods - * this may not be the case. If so, we will flush entries out - * in increasing address order. - * - * Writes to disk are possible here. - */ - - /* Reset the counters so that we can detect insertions, loads, - * and moves caused by the pre_serialize and serialize calls. - */ - cache_ptr->entries_loaded_counter = 0; - cache_ptr->entries_inserted_counter = 0; - cache_ptr->entries_relocated_counter = 0; - - next_entry_ptr = cache_ptr->il_head; - while (next_entry_ptr != NULL) { - entry_ptr = next_entry_ptr; - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(entry_ptr->ring >= ring); - - next_entry_ptr = entry_ptr->il_next; - HDassert((next_entry_ptr == NULL) || (next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC)); - - if (((!entry_ptr->flush_me_last) || - (entry_ptr->flush_me_last && (cache_ptr->num_last_entries >= cache_ptr->slist_len))) && - (entry_ptr->flush_dep_nchildren == 0) && (entry_ptr->ring == ring)) { - - if (entry_ptr->is_protected) { - /* we have major problems -- but lets flush and - * destroy everything we can before we flag an - * error. - */ - protected_entries++; - - if (!entry_ptr->in_slist) - HDassert(!(entry_ptr->is_dirty)); - } /* end if */ - else if (!entry_ptr->is_pinned) { - /* if *entry_ptr is dirty, it is possible - * that one or more other entries may be - * either removed from the cache, loaded - * into the cache, or moved to a new location - * in the file as a side effect of the flush. - * - * It's also possible that removing a clean - * entry will remove the last child of a proxy - * entry, allowing it to be removed also and - * invalidating the next_entry_ptr. - * - * If either of these happen, and one of the target - * or proxy entries happens to be the next entry in - * the hash bucket, we could either find ourselves - * either scanning a non-existent entry, scanning - * through a different bucket, or skipping an entry. - * - * Neither of these are good, so restart the - * the scan at the head of the hash bucket - * after the flush if we detect that the next_entry_ptr - * becomes invalid. - * - * This is not as inefficient at it might seem, - * as hash buckets typically have at most two - * or three entries. - */ - cache_ptr->entry_watched_for_removal = next_entry_ptr; - if (H5C__flush_single_entry(f, entry_ptr, - (cooked_flags | H5C__DURING_FLUSH_FLAG | - H5C__FLUSH_INVALIDATE_FLAG | - H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed") - - /* Restart the index list scan if necessary. Must - * do this if the next entry is evicted, and also if - * one or more entries are inserted, loaded, or moved - * as these operations can result in part of the scan - * being skipped -- which can cause a spurious failure - * if this results in the size of the pinned entry - * failing to decline during the pass. - */ - if (((NULL != next_entry_ptr) && (NULL == cache_ptr->entry_watched_for_removal)) || - (cache_ptr->entries_loaded_counter > 0) || - (cache_ptr->entries_inserted_counter > 0) || - (cache_ptr->entries_relocated_counter > 0)) { - - next_entry_ptr = cache_ptr->il_head; - - cache_ptr->entries_loaded_counter = 0; - cache_ptr->entries_inserted_counter = 0; - cache_ptr->entries_relocated_counter = 0; - - H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr) - } /* end if */ - else - cache_ptr->entry_watched_for_removal = NULL; - } /* end if */ - } /* end if */ - } /* end for loop scanning hash table */ - - /* We can't do anything if entries are pinned. The - * hope is that the entries will be unpinned as the - * result of destroys of entries that reference them. - * - * We detect this by noting the change in the number - * of pinned entries from pass to pass. If it stops - * shrinking before it hits zero, we scream and die. - */ - old_ring_pel_len = cur_ring_pel_len; - entry_ptr = cache_ptr->pel_head_ptr; - cur_ring_pel_len = 0; - - while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(entry_ptr->ring >= ring); - - if (entry_ptr->ring == ring) - cur_ring_pel_len++; - - entry_ptr = entry_ptr->next; - } /* end while */ - - /* Check if the number of pinned entries in the ring is positive, and - * it is not declining. Scream and die if so. - */ - if ((cur_ring_pel_len > 0) && (cur_ring_pel_len >= old_ring_pel_len)) { - /* Don't error if allowed to have pinned entries remaining */ - if (evict_flags) - HGOTO_DONE(TRUE) - - HGOTO_ERROR( - H5E_CACHE, H5E_CANTFLUSH, FAIL, - "Pinned entry count not decreasing, cur_ring_pel_len = %d, old_ring_pel_len = %d, ring = %d", - (int)cur_ring_pel_len, (int)old_ring_pel_len, (int)ring) - } /* end if */ - - HDassert(protected_entries == cache_ptr->pl_len); - - if ((protected_entries > 0) && (protected_entries == cache_ptr->index_len)) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, - "Only protected entries left in cache, protected_entries = %d", - (int)protected_entries) - } /* main while loop */ - - /* Invariants, after destroying all entries in the ring */ - for (i = (int)H5C_RING_UNDEFINED; i <= (int)ring; i++) { - HDassert(cache_ptr->index_ring_len[i] == 0); - HDassert(cache_ptr->index_ring_size[i] == (size_t)0); - HDassert(cache_ptr->clean_index_ring_size[i] == (size_t)0); - HDassert(cache_ptr->dirty_index_ring_size[i] == (size_t)0); - - HDassert(cache_ptr->slist_ring_len[i] == 0); - HDassert(cache_ptr->slist_ring_size[i] == (size_t)0); - } /* end for */ - - HDassert(protected_entries <= cache_ptr->pl_len); - - if (protected_entries > 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Cache has protected entries") - else if (cur_ring_pel_len > 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't unpin all pinned entries in ring") - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__flush_invalidate_ring() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__flush_ring - * - * Purpose: Flush the entries contained in the specified cache and - * ring. All entries in rings outside the specified ring - * must have been flushed on entry. - * - * If the cache contains protected entries in the specified - * ring, the function will fail, as protected entries cannot - * be flushed. However all unprotected entries in the target - * ring should be flushed before the function returns failure. - * - * If flush dependencies appear in the target ring, the - * function makes repeated passes through the slist flushing - * entries in flush dependency order. - * - * Return: Non-negative on success/Negative on failure or if there was - * a request to flush all items and something was protected. - * - * Programmer: John Mainzer - * 9/1/15 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) -{ - H5C_t *cache_ptr = f->shared->cache; - hbool_t flushed_entries_last_pass; - hbool_t flush_marked_entries; - hbool_t ignore_protected; - hbool_t tried_to_flush_protected_entry = FALSE; - hbool_t restart_slist_scan; - uint32_t protected_entries = 0; - H5SL_node_t *node_ptr = NULL; - H5C_cache_entry_t *entry_ptr = NULL; - H5C_cache_entry_t *next_entry_ptr = NULL; -#ifdef H5C_DO_SANITY_CHECKS - uint32_t initial_slist_len = 0; - size_t initial_slist_size = 0; -#endif /* H5C_DO_SANITY_CHECKS */ - int i; - herr_t ret_value = SUCCEED; - - FUNC_ENTER_PACKAGE - - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - HDassert(cache_ptr->slist_enabled); - HDassert(cache_ptr->slist_ptr); - HDassert((flags & H5C__FLUSH_INVALIDATE_FLAG) == 0); - HDassert(ring > H5C_RING_UNDEFINED); - HDassert(ring < H5C_RING_NTYPES); - -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - - ignore_protected = ((flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0); - flush_marked_entries = ((flags & H5C__FLUSH_MARKED_ENTRIES_FLAG) != 0); - - if (!flush_marked_entries) - for (i = (int)H5C_RING_UNDEFINED; i < (int)ring; i++) - HDassert(cache_ptr->slist_ring_len[i] == 0); - - HDassert(cache_ptr->flush_in_progress); - - /* When we are only flushing marked entries, the slist will usually - * still contain entries when we have flushed everything we should. - * Thus we track whether we have flushed any entries in the last - * pass, and terminate if we haven't. - */ - flushed_entries_last_pass = TRUE; - - /* Set the cache_ptr->slist_changed to false. - * - * This flag is set to TRUE by H5C__flush_single_entry if the - * slist is modified by a pre_serialize, serialize, or notify callback. - * H5C_flush_cache uses this flag to detect any modifications - * to the slist that might corrupt the scan of the slist -- and - * restart the scan in this event. - */ - cache_ptr->slist_changed = FALSE; - - while ((cache_ptr->slist_ring_len[ring] > 0) && (protected_entries == 0) && (flushed_entries_last_pass)) { - flushed_entries_last_pass = FALSE; - -#ifdef H5C_DO_SANITY_CHECKS - /* For sanity checking, try to verify that the skip list has - * the expected size and number of entries at the end of each - * internal while loop (see below). - * - * Doing this get a bit tricky, as depending on flags, we may - * or may not flush all the entries in the slist. - * - * To make things more entertaining, with the advent of the - * fractal heap, the entry serialize callback can cause entries - * to be dirtied, resized, and/or moved. Also, the - * pre_serialize callback can result in an entry being - * removed from the cache via the take ownership flag. - * - * To deal with this, we first make note of the initial - * skip list length and size: - */ - initial_slist_len = cache_ptr->slist_len; - initial_slist_size = cache_ptr->slist_size; - - /* As mentioned above, there is the possibility that - * entries will be dirtied, resized, flushed, or removed - * from the cache via the take ownership flag during - * our pass through the skip list. To capture the number - * of entries added, and the skip list size delta, - * zero the slist_len_increase and slist_size_increase of - * the cache's instance of H5C_t. These fields will be - * updated elsewhere to account for slist insertions and/or - * dirty entry size changes. - */ - cache_ptr->slist_len_increase = 0; - cache_ptr->slist_size_increase = 0; - - /* at the end of the loop, use these values to compute the - * expected slist length and size and compare this with the - * value recorded in the cache's instance of H5C_t. - */ -#endif /* H5C_DO_SANITY_CHECKS */ - - restart_slist_scan = TRUE; - while ((restart_slist_scan) || (node_ptr != NULL)) { - if (restart_slist_scan) { - restart_slist_scan = FALSE; - - /* Start at beginning of skip list */ - node_ptr = H5SL_first(cache_ptr->slist_ptr); - if (node_ptr == NULL) - /* the slist is empty -- break out of inner loop */ - break; - - /* Get cache entry for this node */ - next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); - if (NULL == next_entry_ptr) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!") - - HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(next_entry_ptr->is_dirty); - HDassert(next_entry_ptr->in_slist); - } /* end if */ - - entry_ptr = next_entry_ptr; - - /* With the advent of the fractal heap, the free space - * manager, and the version 3 cache, it is possible - * that the pre-serialize or serialize callback will - * dirty, resize, or take ownership of other entries - * in the cache. - * - * To deal with this, there is code to detect any - * change in the skip list not directly under the control - * of this function. If such modifications are detected, - * we must re-start the scan of the skip list to avoid - * the possibility that the target of the next_entry_ptr - * may have been flushed or deleted from the cache. - * - * To verify that all such possibilities have been dealt - * with, we do a bit of extra sanity checking on - * entry_ptr. - */ - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(entry_ptr->in_slist); - HDassert(entry_ptr->is_dirty); - - if (!flush_marked_entries || entry_ptr->flush_marker) - HDassert(entry_ptr->ring >= ring); - - /* Advance node pointer now, before we delete its target - * from the slist. - */ - node_ptr = H5SL_next(node_ptr); - if (node_ptr != NULL) { - next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); - if (NULL == next_entry_ptr) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!") - - HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(next_entry_ptr->is_dirty); - HDassert(next_entry_ptr->in_slist); - - if (!flush_marked_entries || next_entry_ptr->flush_marker) - HDassert(next_entry_ptr->ring >= ring); - - HDassert(entry_ptr != next_entry_ptr); - } /* end if */ - else - next_entry_ptr = NULL; - - if ((!flush_marked_entries || entry_ptr->flush_marker) && - ((!entry_ptr->flush_me_last) || - ((entry_ptr->flush_me_last) && ((cache_ptr->num_last_entries >= cache_ptr->slist_len) || - (flush_marked_entries && entry_ptr->flush_marker)))) && - ((entry_ptr->flush_dep_nchildren == 0) || (entry_ptr->flush_dep_ndirty_children == 0)) && - (entry_ptr->ring == ring)) { - - HDassert(entry_ptr->flush_dep_nunser_children == 0); - - if (entry_ptr->is_protected) { - /* we probably have major problems -- but lets - * flush everything we can before we decide - * whether to flag an error. - */ - tried_to_flush_protected_entry = TRUE; - protected_entries++; - } /* end if */ - else { - if (H5C__flush_single_entry(f, entry_ptr, (flags | H5C__DURING_FLUSH_FLAG)) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry") - - if (cache_ptr->slist_changed) { - /* The slist has been modified by something - * other than the simple removal of the - * of the flushed entry after the flush. - * - * This has the potential to corrupt the - * scan through the slist, so restart it. - */ - restart_slist_scan = TRUE; - cache_ptr->slist_changed = FALSE; - H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr) - } /* end if */ - - flushed_entries_last_pass = TRUE; - } /* end else */ - } /* end if */ - } /* while ( ( restart_slist_scan ) || ( node_ptr != NULL ) ) */ - -#ifdef H5C_DO_SANITY_CHECKS - /* Verify that the slist size and length are as expected. */ - HDassert((uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase) == - cache_ptr->slist_len); - HDassert((size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase) == - cache_ptr->slist_size); -#endif /* H5C_DO_SANITY_CHECKS */ - } /* while */ - - HDassert(protected_entries <= cache_ptr->pl_len); - - if (((cache_ptr->pl_len > 0) && !ignore_protected) || tried_to_flush_protected_entry) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "cache has protected items") - -#ifdef H5C_DO_SANITY_CHECKS - if (!flush_marked_entries) { - HDassert(cache_ptr->slist_ring_len[ring] == 0); - HDassert(cache_ptr->slist_ring_size[ring] == 0); - } /* end if */ -#endif /* H5C_DO_SANITY_CHECKS */ - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__flush_ring() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__flush_single_entry - * - * Purpose: Flush or clear (and evict if requested) the cache entry - * with the specified address and type. If the type is NULL, - * any unprotected entry at the specified address will be - * flushed (and possibly evicted). - * - * Attempts to flush a protected entry will result in an - * error. - * - * If the H5C__FLUSH_INVALIDATE_FLAG flag is set, the entry will - * be cleared and not flushed, and the call can't be part of a - * sequence of flushes. - * - * The function does nothing silently if there is no entry - * at the supplied address, or if the entry found has the - * wrong type. - * - * Return: Non-negative on success/Negative on failure or if there was - * an attempt to flush a protected item. - * - * Programmer: John Mainzer, 5/5/04 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) -{ - H5C_t *cache_ptr; /* Cache for file */ - hbool_t destroy; /* external flag */ - hbool_t clear_only; /* external flag */ - hbool_t free_file_space; /* external flag */ - hbool_t take_ownership; /* external flag */ - hbool_t del_from_slist_on_destroy; /* external flag */ - hbool_t during_flush; /* external flag */ - hbool_t write_entry; /* internal flag */ - hbool_t destroy_entry; /* internal flag */ - hbool_t generate_image; /* internal flag */ - hbool_t update_page_buffer; /* internal flag */ - hbool_t was_dirty; - hbool_t suppress_image_entry_writes = FALSE; - hbool_t suppress_image_entry_frees = FALSE; - haddr_t entry_addr = HADDR_UNDEF; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - HDassert(f); - cache_ptr = f->shared->cache; - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - HDassert(entry_ptr); - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(entry_ptr->ring != H5C_RING_UNDEFINED); - HDassert(entry_ptr->type); - - /* setup external flags from the flags parameter */ - destroy = ((flags & H5C__FLUSH_INVALIDATE_FLAG) != 0); - clear_only = ((flags & H5C__FLUSH_CLEAR_ONLY_FLAG) != 0); - free_file_space = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0); - take_ownership = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0); - del_from_slist_on_destroy = ((flags & H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) != 0); - during_flush = ((flags & H5C__DURING_FLUSH_FLAG) != 0); - generate_image = ((flags & H5C__GENERATE_IMAGE_FLAG) != 0); - update_page_buffer = ((flags & H5C__UPDATE_PAGE_BUFFER_FLAG) != 0); - - /* Set the flag for destroying the entry, based on the 'take ownership' - * and 'destroy' flags - */ - if (take_ownership) - destroy_entry = FALSE; - else - destroy_entry = destroy; - - /* we will write the entry to disk if it exists, is dirty, and if the - * clear only flag is not set. - */ - if (entry_ptr->is_dirty && !clear_only) - write_entry = TRUE; - else - write_entry = FALSE; - - /* if we have received close warning, and we have been instructed to - * generate a metadata cache image, and we have actually constructed - * the entry images, set suppress_image_entry_frees to TRUE. - * - * Set suppress_image_entry_writes to TRUE if indicated by the - * image_ctl flags. - */ - if (cache_ptr->close_warning_received && cache_ptr->image_ctl.generate_image && - cache_ptr->num_entries_in_image > 0 && cache_ptr->image_entries != NULL) { - - /* Sanity checks */ - HDassert(entry_ptr->image_up_to_date || !(entry_ptr->include_in_image)); - HDassert(entry_ptr->image_ptr || !(entry_ptr->include_in_image)); - HDassert((!clear_only) || !(entry_ptr->include_in_image)); - HDassert((!take_ownership) || !(entry_ptr->include_in_image)); - HDassert((!free_file_space) || !(entry_ptr->include_in_image)); - - suppress_image_entry_frees = TRUE; - - if (cache_ptr->image_ctl.flags & H5C_CI__SUPRESS_ENTRY_WRITES) - suppress_image_entry_writes = TRUE; - } /* end if */ - - /* run initial sanity checks */ -#ifdef H5C_DO_SANITY_CHECKS - if (cache_ptr->slist_enabled) { - if (entry_ptr->in_slist) { - HDassert(entry_ptr->is_dirty); - if (entry_ptr->flush_marker && !entry_ptr->is_dirty) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry in slist failed sanity checks") - } /* end if */ - else { - HDassert(!entry_ptr->is_dirty); - HDassert(!entry_ptr->flush_marker); - if (entry_ptr->is_dirty || entry_ptr->flush_marker) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry failed sanity checks") - } /* end else */ - } - else { /* slist is disabled */ - HDassert(!entry_ptr->in_slist); - if (!entry_ptr->is_dirty) - if (entry_ptr->flush_marker) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flush marked clean entry?") - } -#endif /* H5C_DO_SANITY_CHECKS */ - - if (entry_ptr->is_protected) - /* Attempt to flush a protected entry -- scream and die. */ - HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, "Attempt to flush a protected entry") - - /* Set entry_ptr->flush_in_progress = TRUE and set - * entry_ptr->flush_marker = FALSE - * - * We will set flush_in_progress back to FALSE at the end if the - * entry still exists at that point. - */ - entry_ptr->flush_in_progress = TRUE; - entry_ptr->flush_marker = FALSE; - - /* Preserve current dirty state for later */ - was_dirty = entry_ptr->is_dirty; - - /* The entry is dirty, and we are doing a flush, a flush destroy or have - * been requested to generate an image. In those cases, serialize the - * entry. - */ - if (write_entry || generate_image) { - HDassert(entry_ptr->is_dirty); - if (NULL == entry_ptr->image_ptr) { - if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, - "memory allocation failed for on disk image buffer") - -#if H5C_DO_MEMORY_SANITY_CHECKS - H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE, - H5C_IMAGE_EXTRA_SPACE); -#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ - - } /* end if */ - - if (!entry_ptr->image_up_to_date) { - /* Sanity check */ - HDassert(!entry_ptr->prefetched); - - /* Generate the entry's image */ - if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't generate entry's image") - } /* end if ( ! (entry_ptr->image_up_to_date) ) */ - } /* end if */ - - /* Finally, write the image to disk. - * - * Note that if the H5AC__CLASS_SKIP_WRITES flag is set in the - * in the entry's type, we silently skip the write. This - * flag should only be used in test code. - */ - if (write_entry) { - HDassert(entry_ptr->is_dirty); - -#ifdef H5C_DO_SANITY_CHECKS - if (cache_ptr->check_write_permitted && !cache_ptr->write_permitted) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Write when writes are always forbidden!?!?!") -#endif /* H5C_DO_SANITY_CHECKS */ - - /* Write the image to disk unless the write is suppressed. - * - * This happens if both suppress_image_entry_writes and - * entry_ptr->include_in_image are TRUE, or if the - * H5AC__CLASS_SKIP_WRITES is set in the entry's type. This - * flag should only be used in test code - */ - if ((!suppress_image_entry_writes || !entry_ptr->include_in_image) && - ((entry_ptr->type->flags & H5C__CLASS_SKIP_WRITES) == 0)) { - H5FD_mem_t mem_type = H5FD_MEM_DEFAULT; - -#ifdef H5_HAVE_PARALLEL - if (cache_ptr->coll_write_list) { - if (H5SL_insert(cache_ptr->coll_write_list, entry_ptr, &entry_ptr->addr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "unable to insert skip list item") - } /* end if */ - else { -#endif /* H5_HAVE_PARALLEL */ - if (entry_ptr->prefetched) { - HDassert(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID); - mem_type = cache_ptr->class_table_ptr[entry_ptr->prefetch_type_id]->mem_type; - } /* end if */ - else - mem_type = entry_ptr->type->mem_type; - - if (H5F_block_write(f, mem_type, entry_ptr->addr, entry_ptr->size, entry_ptr->image_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't write image to file") -#ifdef H5_HAVE_PARALLEL - } -#endif /* H5_HAVE_PARALLEL */ - } /* end if */ - - /* if the entry has a notify callback, notify it that we have - * just flushed the entry. - */ - if (entry_ptr->type->notify && - (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_FLUSH, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client of entry flush") - } /* if ( write_entry ) */ - - /* At this point, all pre-serialize and serialize calls have been - * made if it was appropriate to make them. Similarly, the entry - * has been written to disk if desired. - * - * Thus it is now safe to update the cache data structures for the - * flush. - */ - - /* start by updating the statistics */ - if (clear_only) { - /* only log a clear if the entry was dirty */ - if (was_dirty) - H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) - } - else if (write_entry) { - HDassert(was_dirty); - - /* only log a flush if we actually wrote to disk */ - H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) - } /* end else if */ - - /* Note that the algorithm below is (very) similar to the set of operations - * in H5C_remove_entry() and should be kept in sync with changes - * to that code. - QAK, 2016/11/30 - */ - - /* Update the cache internal data structures. */ - if (destroy) { - /* Sanity checks */ - if (take_ownership) - HDassert(!destroy_entry); - else - HDassert(destroy_entry); - - HDassert(!entry_ptr->is_pinned); - - /* Update stats, while entry is still in the cache */ - H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership) - - /* If the entry's type has a 'notify' callback and the entry is about - * to be removed from the cache, send a 'before eviction' notice while - * the entry is still fully integrated in the cache. - */ - if (entry_ptr->type->notify && - (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict") - - /* Update the cache internal data structures as appropriate - * for a destroy. Specifically: - * - * 1) Delete it from the index - * - * 2) Delete it from the skip list if requested. - * - * 3) Delete it from the collective read access list. - * - * 4) Update the replacement policy for eviction - * - * 5) Remove it from the tag list for this object - * - * Finally, if the destroy_entry flag is set, discard the - * entry. - */ - H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL) - - if (entry_ptr->in_slist && del_from_slist_on_destroy) - H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush, FAIL) - -#ifdef H5_HAVE_PARALLEL - /* Check for collective read access flag */ - if (entry_ptr->coll_access) { - entry_ptr->coll_access = FALSE; - H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL) - } /* end if */ -#endif /* H5_HAVE_PARALLEL */ - - H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, FAIL) - - /* Remove entry from tag list */ - if (H5C__untag_entry(cache_ptr, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list") - - /* verify that the entry is no longer part of any flush dependencies */ - HDassert(entry_ptr->flush_dep_nparents == 0); - HDassert(entry_ptr->flush_dep_nchildren == 0); - } /* end if */ - else { - HDassert(clear_only || write_entry); - HDassert(entry_ptr->is_dirty); - HDassert((!cache_ptr->slist_enabled) || (entry_ptr->in_slist)); - - /* We are either doing a flush or a clear. - * - * A clear and a flush are the same from the point of - * view of the replacement policy and the slist. - * Hence no differentiation between them. - */ - H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, FAIL) - H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush, FAIL) - - /* mark the entry as clean and update the index for - * entry clean. Also, call the clear callback - * if defined. - */ - entry_ptr->is_dirty = FALSE; - - H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr, FAIL); - - /* Check for entry changing status and do notifications, etc. */ - if (was_dirty) { - /* If the entry's type has a 'notify' callback send a - * 'entry cleaned' notice now that the entry is fully - * integrated into the cache. - */ - if (entry_ptr->type->notify && - (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "can't notify client about entry dirty flag cleared") - - /* Propagate the clean flag up the flush dependency chain - * if appropriate - */ - if (entry_ptr->flush_dep_ndirty_children != 0) - HDassert(entry_ptr->flush_dep_ndirty_children == 0); - if (entry_ptr->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_clean(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Can't propagate flush dep clean flag") - } /* end if */ - } /* end else */ - - /* reset the flush_in progress flag */ - entry_ptr->flush_in_progress = FALSE; - - /* capture the cache entry address for the log_flush call at the - * end before the entry_ptr gets freed - */ - entry_addr = entry_ptr->addr; - - /* Internal cache data structures should now be up to date, and - * consistent with the status of the entry. - * - * Now discard the entry if appropriate. - */ - if (destroy) { - /* Sanity check */ - HDassert(0 == entry_ptr->flush_dep_nparents); - - /* if both suppress_image_entry_frees and entry_ptr->include_in_image - * are true, simply set entry_ptr->image_ptr to NULL, as we have - * another pointer to the buffer in an instance of H5C_image_entry_t - * in cache_ptr->image_entries. - * - * Otherwise, free the buffer if it exists. - */ - if (suppress_image_entry_frees && entry_ptr->include_in_image) - entry_ptr->image_ptr = NULL; - else if (entry_ptr->image_ptr != NULL) - entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr); - - /* If the entry is not a prefetched entry, verify that the flush - * dependency parents addresses array has been transferred. - * - * If the entry is prefetched, the free_isr routine will dispose of - * the flush dependency parents addresses array if necessary. - */ - if (!entry_ptr->prefetched) { - HDassert(0 == entry_ptr->fd_parent_count); - HDassert(NULL == entry_ptr->fd_parent_addrs); - } /* end if */ - - /* Check whether we should free the space in the file that - * the entry occupies - */ - if (free_file_space) { - hsize_t fsf_size; - - /* Sanity checks */ - HDassert(H5F_addr_defined(entry_ptr->addr)); - HDassert(!H5F_IS_TMP_ADDR(f, entry_ptr->addr)); -#ifndef NDEBUG - { - size_t curr_len; - - /* Get the actual image size for the thing again */ - entry_ptr->type->image_len((void *)entry_ptr, &curr_len); - HDassert(curr_len == entry_ptr->size); - } -#endif - - /* If the file space free size callback is defined, use - * it to get the size of the block of file space to free. - * Otherwise use entry_ptr->size. - */ - if (entry_ptr->type->fsf_size) { - if ((entry_ptr->type->fsf_size)((void *)entry_ptr, &fsf_size) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to get file space free size") - } /* end if */ - else /* no file space free size callback -- use entry size */ - fsf_size = entry_ptr->size; - - /* Release the space on disk */ - if (H5MF_xfree(f, entry_ptr->type->mem_type, entry_ptr->addr, fsf_size) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to free file space for cache entry") - } /* end if ( free_file_space ) */ - - /* Reset the pointer to the cache the entry is within. -QAK */ - entry_ptr->cache_ptr = NULL; - - /* increment entries_removed_counter and set - * last_entry_removed_ptr. As we are likely abuut to - * free the entry, recall that last_entry_removed_ptr - * must NEVER be dereferenced. - * - * Recall that these fields are maintained to allow functions - * that perform scans of lists of entries to detect the - * unexpected removal of entries (via expunge, eviction, - * or take ownership at present), so that they can re-start - * their scans if necessary. - * - * Also check if the entry we are watching for removal is being - * removed (usually the 'next' entry for an iteration) and reset - * it to indicate that it was removed. - */ - cache_ptr->entries_removed_counter++; - cache_ptr->last_entry_removed_ptr = entry_ptr; - - if (entry_ptr == cache_ptr->entry_watched_for_removal) - cache_ptr->entry_watched_for_removal = NULL; - - /* Check for actually destroying the entry in memory */ - /* (As opposed to taking ownership of it) */ - if (destroy_entry) { - if (entry_ptr->is_dirty) { - /* Reset dirty flag */ - entry_ptr->is_dirty = FALSE; - - /* If the entry's type has a 'notify' callback send a - * 'entry cleaned' notice now that the entry is fully - * integrated into the cache. - */ - if (entry_ptr->type->notify && - (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "can't notify client about entry dirty flag cleared") - } /* end if */ - - /* we are about to discard the in core representation -- - * set the magic field to bad magic so we can detect a - * freed entry if we see one. - */ - entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC; - - /* verify that the image has been freed */ - HDassert(entry_ptr->image_ptr == NULL); - - if (entry_ptr->type->free_icr((void *)entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed") - } /* end if */ - else { - HDassert(take_ownership); - - /* Client is taking ownership of the entry. Set bad magic here too - * so the cache will choke unless the entry is re-inserted properly - */ - entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC; - } /* end else */ - } /* if (destroy) */ - - /* Check if we have to update the page buffer with cleared entries - * so it doesn't go out of date - */ - if (update_page_buffer) { - /* Sanity check */ - HDassert(!destroy); - HDassert(entry_ptr->image_ptr); - - if (f->shared->page_buf && (f->shared->page_buf->page_size >= entry_ptr->size)) - if (H5PB_update_entry(f->shared->page_buf, entry_ptr->addr, entry_ptr->size, - entry_ptr->image_ptr) > 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Failed to update PB with metadata cache") - } /* end if */ - - if (cache_ptr->log_flush) - if ((cache_ptr->log_flush)(cache_ptr, entry_addr, was_dirty, flags) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "log_flush callback failed") - -done: - HDassert((ret_value != SUCCEED) || (destroy_entry) || (!entry_ptr->flush_in_progress)); - HDassert((ret_value != SUCCEED) || (destroy_entry) || (take_ownership) || (!entry_ptr->is_dirty)); - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__flush_single_entry() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__verify_len_eoa - * - * Purpose: Verify that 'len' does not exceed eoa when 'actual' is - * false i.e. 'len" is the initial speculative length from - * get_load_size callback with null image pointer. - * If exceed, adjust 'len' accordingly. - * - * Verify that 'len' should not exceed eoa when 'actual' is - * true i.e. 'len' is the actual length from get_load_size - * callback with non-null image pointer. - * If exceed, return error. - * - * Return: FAIL if error is detected, SUCCEED otherwise. - * - * Programmer: Vailin Choi - * 9/6/15 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__verify_len_eoa(H5F_t *f, const H5C_class_t *type, haddr_t addr, size_t *len, hbool_t actual) -{ - H5FD_mem_t cooked_type; /* Modified type, accounting for switching global heaps */ - haddr_t eoa; /* End-of-allocation in the file */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* if type == H5FD_MEM_GHEAP, H5F_block_read() forces - * type to H5FD_MEM_DRAW via its call to H5F__accum_read(). - * Thus we do the same for purposes of computing the EOA - * for sanity checks. - */ - cooked_type = (type->mem_type == H5FD_MEM_GHEAP) ? H5FD_MEM_DRAW : type->mem_type; - - /* Get the file's end-of-allocation value */ - eoa = H5F_get_eoa(f, cooked_type); - if (!H5F_addr_defined(eoa)) - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid EOA address for file") - - /* Check for bad address in general */ - if (H5F_addr_gt(addr, eoa)) - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "address of object past end of allocation") - - /* Check if the amount of data to read will be past the EOA */ - if (H5F_addr_gt((addr + *len), eoa)) { - if (actual) - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "actual len exceeds EOA") - else - /* Trim down the length of the metadata */ - *len = (size_t)(eoa - addr); - } /* end if */ - - if (*len <= 0) - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "len not positive after adjustment for EOA") - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__verify_len_eoa() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__load_entry - * - * Purpose: Attempt to load the entry at the specified disk address - * and with the specified type into memory. If successful. - * return the in memory address of the entry. Return NULL - * on failure. - * - * Note that this function simply loads the entry into - * core. It does not insert it into the cache. - * - * Return: Non-NULL on success / NULL on failure. - * - * Programmer: John Mainzer, 5/18/04 - * - *------------------------------------------------------------------------- - */ -static void * -H5C__load_entry(H5F_t *f, -#ifdef H5_HAVE_PARALLEL - hbool_t coll_access, -#endif /* H5_HAVE_PARALLEL */ - const H5C_class_t *type, haddr_t addr, void *udata) -{ - hbool_t dirty = FALSE; /* Flag indicating whether thing was dirtied during deserialize */ - uint8_t *image = NULL; /* Buffer for disk image */ - void *thing = NULL; /* Pointer to thing loaded */ - H5C_cache_entry_t *entry = NULL; /* Alias for thing loaded, as cache entry */ - size_t len; /* Size of image in file */ -#ifdef H5_HAVE_PARALLEL - int mpi_rank = 0; /* MPI process rank */ - MPI_Comm comm = MPI_COMM_NULL; /* File MPI Communicator */ - int mpi_code; /* MPI error code */ -#endif /* H5_HAVE_PARALLEL */ - void *ret_value = NULL; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Sanity checks */ - HDassert(f); - HDassert(f->shared); - HDassert(f->shared->cache); - HDassert(type); - HDassert(H5F_addr_defined(addr)); - HDassert(type->get_initial_load_size); - if (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) - HDassert(type->get_final_load_size); - else - HDassert(NULL == type->get_final_load_size); - HDassert(type->deserialize); - - /* Can't see how skip reads could be usefully combined with - * the speculative read flag. Hence disallow. - */ - HDassert(!((type->flags & H5C__CLASS_SKIP_READS) && (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG))); - - /* Call the get_initial_load_size callback, to retrieve the initial size of image */ - if (type->get_initial_load_size(udata, &len) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't retrieve image size") - HDassert(len > 0); - - /* Check for possible speculative read off the end of the file */ - if (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) - if (H5C__verify_len_eoa(f, type, addr, &len, FALSE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid len with respect to EOA") - - /* Allocate the buffer for reading the on-disk entry image */ - if (NULL == (image = (uint8_t *)H5MM_malloc(len + H5C_IMAGE_EXTRA_SPACE))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for on disk image buffer") -#if H5C_DO_MEMORY_SANITY_CHECKS - H5MM_memcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE); -#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ - -#ifdef H5_HAVE_PARALLEL - if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) { - if ((mpi_rank = H5F_mpi_get_rank(f)) < 0) - HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "Can't get MPI rank") - if ((comm = H5F_mpi_get_comm(f)) == MPI_COMM_NULL) - HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "get_comm request failed") - } /* end if */ -#endif /* H5_HAVE_PARALLEL */ - - /* Get the on-disk entry image */ - if (0 == (type->flags & H5C__CLASS_SKIP_READS)) { - unsigned tries, max_tries; /* The # of read attempts */ - unsigned retries; /* The # of retries */ - htri_t chk_ret; /* return from verify_chksum callback */ - size_t actual_len = len; /* The actual length, after speculative reads have been resolved */ - uint64_t nanosec = 1; /* # of nanoseconds to sleep between retries */ - void *new_image; /* Pointer to image */ - hbool_t len_changed = TRUE; /* Whether to re-check speculative entries */ - - /* Get the # of read attempts */ - max_tries = tries = H5F_GET_READ_ATTEMPTS(f); - - /* - * This do/while loop performs the following till the metadata checksum - * is correct or the file's number of allowed read attempts are reached. - * --read the metadata - * --determine the actual size of the metadata - * --perform checksum verification - */ - do { - if (actual_len != len) { - if (NULL == (new_image = H5MM_realloc(image, len + H5C_IMAGE_EXTRA_SPACE))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()") - image = (uint8_t *)new_image; -#if H5C_DO_MEMORY_SANITY_CHECKS - H5MM_memcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE); -#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ - } /* end if */ - -#ifdef H5_HAVE_PARALLEL - if (!coll_access || 0 == mpi_rank) { -#endif /* H5_HAVE_PARALLEL */ - if (H5F_block_read(f, type->mem_type, addr, len, image) < 0) { -#ifdef H5_HAVE_PARALLEL - if (coll_access) { - /* Push an error, but still participate in following MPI_Bcast */ - HDmemset(image, 0, len); - HDONE_ERROR(H5E_CACHE, H5E_READERROR, NULL, "Can't read image*") - } - else -#endif - HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "Can't read image*") - } - -#ifdef H5_HAVE_PARALLEL - } /* end if */ - /* if the collective metadata read optimization is turned on, - * bcast the metadata read from process 0 to all ranks in the file - * communicator - */ - if (coll_access) { - int buf_size; - - H5_CHECKED_ASSIGN(buf_size, int, len, size_t); - if (MPI_SUCCESS != (mpi_code = MPI_Bcast(image, buf_size, MPI_BYTE, 0, comm))) - HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code) - } /* end if */ -#endif /* H5_HAVE_PARALLEL */ - - /* If the entry could be read speculatively and the length is still - * changing, check for updating the actual size - */ - if ((type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) && len_changed) { - /* Retrieve the actual length */ - actual_len = len; - if (type->get_final_load_size(image, len, udata, &actual_len) < 0) - continue; /* Transfer control to while() and count towards retries */ - - /* Check for the length changing */ - if (actual_len != len) { - /* Verify that the length isn't past the EOA for the file */ - if (H5C__verify_len_eoa(f, type, addr, &actual_len, TRUE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "actual_len exceeds EOA") - - /* Expand buffer to new size */ - if (NULL == (new_image = H5MM_realloc(image, actual_len + H5C_IMAGE_EXTRA_SPACE))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()") - image = (uint8_t *)new_image; -#if H5C_DO_MEMORY_SANITY_CHECKS - H5MM_memcpy(image + actual_len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE); -#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ - - if (actual_len > len) { -#ifdef H5_HAVE_PARALLEL - if (!coll_access || 0 == mpi_rank) { -#endif /* H5_HAVE_PARALLEL */ - /* If the thing's image needs to be bigger for a speculatively - * loaded thing, go get the on-disk image again (the extra portion). - */ - if (H5F_block_read(f, type->mem_type, addr + len, actual_len - len, image + len) < - 0) { -#ifdef H5_HAVE_PARALLEL - if (coll_access) { - /* Push an error, but still participate in following MPI_Bcast */ - HDmemset(image + len, 0, actual_len - len); - HDONE_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't read image") - } - else -#endif - HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't read image") - } - -#ifdef H5_HAVE_PARALLEL - } - /* If the collective metadata read optimization is turned on, - * Bcast the metadata read from process 0 to all ranks in the file - * communicator */ - if (coll_access) { - int buf_size; - - H5_CHECKED_ASSIGN(buf_size, int, actual_len - len, size_t); - if (MPI_SUCCESS != - (mpi_code = MPI_Bcast(image + len, buf_size, MPI_BYTE, 0, comm))) - HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code) - } /* end if */ -#endif /* H5_HAVE_PARALLEL */ - } /* end if */ - } /* end if (actual_len != len) */ - else { - /* The length has stabilized */ - len_changed = FALSE; - - /* Set the final length */ - len = actual_len; - } /* else */ - } /* end if */ - - /* If there's no way to verify the checksum for a piece of metadata - * (usually because there's no checksum in the file), leave now - */ - if (type->verify_chksum == NULL) - break; - - /* Verify the checksum for the metadata image */ - if ((chk_ret = type->verify_chksum(image, actual_len, udata)) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "failure from verify_chksum callback") - if (chk_ret == TRUE) - break; - - /* Sleep for some time */ - H5_nanosleep(nanosec); - nanosec *= 2; /* Double the sleep time next time */ - } while (--tries); - - /* Check for too many tries */ - if (tries == 0) - HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "incorrect metadata checksum after all read attempts") - - /* Calculate and track the # of retries */ - retries = max_tries - tries; - if (retries) /* Does not track 0 retry */ - if (H5F_track_metadata_read_retries(f, (unsigned)type->mem_type, retries) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "cannot track read tries = %u ", retries) - - /* Set the final length (in case it wasn't set earlier) */ - len = actual_len; - } /* end if !H5C__CLASS_SKIP_READS */ - - /* Deserialize the on-disk image into the native memory form */ - if (NULL == (thing = type->deserialize(image, len, udata, &dirty))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't deserialize image") - - entry = (H5C_cache_entry_t *)thing; - - /* In general, an entry should be clean just after it is loaded. - * - * However, when this code is used in the metadata cache, it is - * possible that object headers will be dirty at this point, as - * the deserialize function will alter object headers if necessary to - * fix an old bug. - * - * In the following assert: - * - * HDassert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6 ) ); - * - * note that type ids 5 & 6 are associated with object headers in the - * metadata cache. - * - * When we get to using H5C for other purposes, we may wish to - * tighten up the assert so that the loophole only applies to the - * metadata cache. - */ - - HDassert((dirty == FALSE) || (type->id == 5 || type->id == 6)); - - entry->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC; - entry->cache_ptr = f->shared->cache; - entry->addr = addr; - entry->size = len; - HDassert(entry->size < H5C_MAX_ENTRY_SIZE); - entry->image_ptr = image; - entry->image_up_to_date = !dirty; - entry->type = type; - entry->is_dirty = dirty; - entry->dirtied = FALSE; - entry->is_protected = FALSE; - entry->is_read_only = FALSE; - entry->ro_ref_count = 0; - entry->is_pinned = FALSE; - entry->in_slist = FALSE; - entry->flush_marker = FALSE; -#ifdef H5_HAVE_PARALLEL - entry->clear_on_unprotect = FALSE; - entry->flush_immediately = FALSE; - entry->coll_access = coll_access; -#endif /* H5_HAVE_PARALLEL */ - entry->flush_in_progress = FALSE; - entry->destroy_in_progress = FALSE; - - entry->ring = H5C_RING_UNDEFINED; - - /* Initialize flush dependency fields */ - entry->flush_dep_parent = NULL; - entry->flush_dep_nparents = 0; - entry->flush_dep_parent_nalloc = 0; - entry->flush_dep_nchildren = 0; - entry->flush_dep_ndirty_children = 0; - entry->flush_dep_nunser_children = 0; - entry->ht_next = NULL; - entry->ht_prev = NULL; - entry->il_next = NULL; - entry->il_prev = NULL; - - entry->next = NULL; - entry->prev = NULL; - -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS - entry->aux_next = NULL; - entry->aux_prev = NULL; -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - -#ifdef H5_HAVE_PARALLEL - entry->coll_next = NULL; - entry->coll_prev = NULL; -#endif /* H5_HAVE_PARALLEL */ - - /* initialize cache image related fields */ - entry->include_in_image = FALSE; - entry->lru_rank = 0; - entry->image_dirty = FALSE; - entry->fd_parent_count = 0; - entry->fd_parent_addrs = NULL; - entry->fd_child_count = 0; - entry->fd_dirty_child_count = 0; - entry->image_fd_height = 0; - entry->prefetched = FALSE; - entry->prefetch_type_id = 0; - entry->age = 0; - entry->prefetched_dirty = FALSE; -#ifndef NDEBUG /* debugging field */ - entry->serialization_count = 0; -#endif - - /* initialize tag list fields */ - entry->tl_next = NULL; - entry->tl_prev = NULL; - entry->tag_info = NULL; - - H5C__RESET_CACHE_ENTRY_STATS(entry); - - ret_value = thing; - -done: - /* Cleanup on error */ - if (NULL == ret_value) { - /* Release resources */ - if (thing && type->free_icr(thing) < 0) - HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "free_icr callback failed") - if (image) - image = (uint8_t *)H5MM_xfree(image); - } /* end if */ - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__load_entry() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__make_space_in_cache - * - * Purpose: Attempt to evict cache entries until the index_size - * is at least needed_space below max_cache_size. - * - * In passing, also attempt to bring cLRU_list_size to a - * value greater than min_clean_size. - * - * Depending on circumstances, both of these goals may - * be impossible, as in parallel mode, we must avoid generating - * a write as part of a read (to avoid deadlock in collective - * I/O), and in all cases, it is possible (though hopefully - * highly unlikely) that the protected list may exceed the - * maximum size of the cache. - * - * Thus the function simply does its best, returning success - * unless an error is encountered. - * - * Observe that this function cannot occasion a read. - * - * Return: Non-negative on success/Negative on failure. - * - * Programmer: John Mainzer, 5/14/04 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted) -{ - H5C_t *cache_ptr = f->shared->cache; -#if H5C_COLLECT_CACHE_STATS - int32_t clean_entries_skipped = 0; - int32_t dirty_pf_entries_skipped = 0; - int32_t total_entries_scanned = 0; -#endif /* H5C_COLLECT_CACHE_STATS */ - uint32_t entries_examined = 0; - uint32_t initial_list_len; - size_t empty_space; - hbool_t reentrant_call = FALSE; - hbool_t prev_is_dirty = FALSE; - hbool_t didnt_flush_entry = FALSE; - hbool_t restart_scan; - H5C_cache_entry_t *entry_ptr; - H5C_cache_entry_t *prev_ptr; - H5C_cache_entry_t *next_ptr; -#ifndef NDEBUG - uint32_t num_corked_entries = 0; -#endif - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Sanity checks */ - HDassert(f); - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size)); - - /* check to see if cache_ptr->msic_in_progress is TRUE. If it, this - * is a re-entrant call via a client callback called in the make - * space in cache process. To avoid an infinite recursion, set - * reentrant_call to TRUE, and goto done. - */ - if (cache_ptr->msic_in_progress) { - reentrant_call = TRUE; - HGOTO_DONE(SUCCEED); - } /* end if */ - - cache_ptr->msic_in_progress = TRUE; - - if (write_permitted) { - restart_scan = FALSE; - initial_list_len = cache_ptr->LRU_list_len; - entry_ptr = cache_ptr->LRU_tail_ptr; - - if (cache_ptr->index_size >= cache_ptr->max_cache_size) - empty_space = 0; - else - empty_space = cache_ptr->max_cache_size - cache_ptr->index_size; - - while ((((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) || - ((empty_space + cache_ptr->clean_index_size) < (cache_ptr->min_clean_size))) && - (entries_examined <= (2 * initial_list_len)) && (entry_ptr != NULL)) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(!(entry_ptr->is_protected)); - HDassert(!(entry_ptr->is_read_only)); - HDassert((entry_ptr->ro_ref_count) == 0); - - next_ptr = entry_ptr->next; - prev_ptr = entry_ptr->prev; - - if (prev_ptr != NULL) - prev_is_dirty = prev_ptr->is_dirty; - - if (entry_ptr->is_dirty && (entry_ptr->tag_info && entry_ptr->tag_info->corked)) { - /* Skip "dirty" corked entries. */ -#ifndef NDEBUG - ++num_corked_entries; -#endif - didnt_flush_entry = TRUE; - } - else if ((entry_ptr->type->id != H5AC_EPOCH_MARKER_ID) && !entry_ptr->flush_in_progress && - !entry_ptr->prefetched_dirty) { - didnt_flush_entry = FALSE; - if (entry_ptr->is_dirty) { -#if H5C_COLLECT_CACHE_STATS - if ((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) - cache_ptr->entries_scanned_to_make_space++; -#endif /* H5C_COLLECT_CACHE_STATS */ - - /* reset entries_removed_counter and - * last_entry_removed_ptr prior to the call to - * H5C__flush_single_entry() so that we can spot - * unexpected removals of entries from the cache, - * and set the restart_scan flag if proceeding - * would be likely to cause us to scan an entry - * that is no longer in the cache. - */ - cache_ptr->entries_removed_counter = 0; - cache_ptr->last_entry_removed_ptr = NULL; - - if (H5C__flush_single_entry(f, entry_ptr, H5C__NO_FLAGS_SET) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") - - if ((cache_ptr->entries_removed_counter > 1) || - (cache_ptr->last_entry_removed_ptr == prev_ptr)) - - restart_scan = TRUE; - } - else if ((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size -#ifdef H5_HAVE_PARALLEL - && !(entry_ptr->coll_access) -#endif /* H5_HAVE_PARALLEL */ - ) { -#if H5C_COLLECT_CACHE_STATS - cache_ptr->entries_scanned_to_make_space++; -#endif /* H5C_COLLECT_CACHE_STATS */ - - if (H5C__flush_single_entry(f, entry_ptr, - H5C__FLUSH_INVALIDATE_FLAG | - H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") - } - else { - /* We have enough space so don't flush clean entry. */ -#if H5C_COLLECT_CACHE_STATS - clean_entries_skipped++; -#endif /* H5C_COLLECT_CACHE_STATS */ - didnt_flush_entry = TRUE; - } - -#if H5C_COLLECT_CACHE_STATS - total_entries_scanned++; -#endif /* H5C_COLLECT_CACHE_STATS */ - } - else { - - /* Skip epoch markers, entries that are in the process - * of being flushed, and entries marked as prefetched_dirty - * (occurs in the R/O case only). - */ - didnt_flush_entry = TRUE; - -#if H5C_COLLECT_CACHE_STATS - if (entry_ptr->prefetched_dirty) - dirty_pf_entries_skipped++; -#endif /* H5C_COLLECT_CACHE_STATS */ - } - - if (prev_ptr != NULL) { - if (didnt_flush_entry) - /* epoch markers don't get flushed, and we don't touch - * entries that are in the process of being flushed. - * Hence no need for sanity checks, as we haven't - * flushed anything. Thus just set entry_ptr to prev_ptr - * and go on. - */ - entry_ptr = prev_ptr; - else if (restart_scan || prev_ptr->is_dirty != prev_is_dirty || prev_ptr->next != next_ptr || - prev_ptr->is_protected || prev_ptr->is_pinned) { - /* something has happened to the LRU -- start over - * from the tail. - */ - restart_scan = FALSE; - entry_ptr = cache_ptr->LRU_tail_ptr; - H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr) - } - else - entry_ptr = prev_ptr; - } - else - entry_ptr = NULL; - - entries_examined++; - - if (cache_ptr->index_size >= cache_ptr->max_cache_size) - empty_space = 0; - else - empty_space = cache_ptr->max_cache_size - cache_ptr->index_size; - - HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size)); - } - -#if H5C_COLLECT_CACHE_STATS - cache_ptr->calls_to_msic++; - - cache_ptr->total_entries_skipped_in_msic += clean_entries_skipped; - cache_ptr->total_dirty_pf_entries_skipped_in_msic += dirty_pf_entries_skipped; - cache_ptr->total_entries_scanned_in_msic += total_entries_scanned; - - if (clean_entries_skipped > cache_ptr->max_entries_skipped_in_msic) - cache_ptr->max_entries_skipped_in_msic = clean_entries_skipped; - - if (dirty_pf_entries_skipped > cache_ptr->max_dirty_pf_entries_skipped_in_msic) - cache_ptr->max_dirty_pf_entries_skipped_in_msic = dirty_pf_entries_skipped; - - if (total_entries_scanned > cache_ptr->max_entries_scanned_in_msic) - cache_ptr->max_entries_scanned_in_msic = total_entries_scanned; -#endif /* H5C_COLLECT_CACHE_STATS */ - - /* NEED: work on a better assert for corked entries */ - HDassert((entries_examined > (2 * initial_list_len)) || - ((cache_ptr->pl_size + cache_ptr->pel_size + cache_ptr->min_clean_size) > - cache_ptr->max_cache_size) || - ((cache_ptr->clean_index_size + empty_space) >= cache_ptr->min_clean_size) || - ((num_corked_entries))); -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS - - HDassert((entries_examined > (2 * initial_list_len)) || - (cache_ptr->cLRU_list_size <= cache_ptr->clean_index_size)); - HDassert((entries_examined > (2 * initial_list_len)) || - (cache_ptr->dLRU_list_size <= cache_ptr->dirty_index_size)); - -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - } - else { - HDassert(H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS); - -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS - initial_list_len = cache_ptr->cLRU_list_len; - entry_ptr = cache_ptr->cLRU_tail_ptr; - - while (((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) && - (entries_examined <= initial_list_len) && (entry_ptr != NULL)) { - HDassert(!(entry_ptr->is_protected)); - HDassert(!(entry_ptr->is_read_only)); - HDassert((entry_ptr->ro_ref_count) == 0); - HDassert(!(entry_ptr->is_dirty)); - - prev_ptr = entry_ptr->aux_prev; - - if (!entry_ptr->prefetched_dirty -#ifdef H5_HAVE_PARALLEL - && !entry_ptr->coll_access -#endif /* H5_HAVE_PARALLEL */ - ) { - if (H5C__flush_single_entry( - f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") - } /* end if */ - - /* we are scanning the clean LRU, so the serialize function - * will not be called on any entry -- thus there is no - * concern about the list being modified out from under - * this function. - */ - - entry_ptr = prev_ptr; - entries_examined++; - } -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - } - -done: - /* Sanity checks */ - HDassert(cache_ptr->msic_in_progress); - if (!reentrant_call) - cache_ptr->msic_in_progress = FALSE; - HDassert((!reentrant_call) || (cache_ptr->msic_in_progress)); - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__make_space_in_cache() */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__validate_lru_list - * - * Purpose: Debugging function that scans the LRU list for errors. - * - * If an error is detected, the function generates a - * diagnostic and returns FAIL. If no error is detected, - * the function returns SUCCEED. - * - * Return: FAIL if error is detected, SUCCEED otherwise. - * - * Programmer: John Mainzer, 7/14/05 - * - *------------------------------------------------------------------------- - */ -#ifdef H5C_DO_EXTREME_SANITY_CHECKS -herr_t -H5C__validate_lru_list(H5C_t *cache_ptr) -{ - int32_t len = 0; - size_t size = 0; - H5C_cache_entry_t *entry_ptr = NULL; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - - if (((cache_ptr->LRU_head_ptr == NULL) || (cache_ptr->LRU_tail_ptr == NULL)) && - (cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU list head/tail check failed") - - if ((cache_ptr->LRU_list_len == 1) && - ((cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr) || (cache_ptr->LRU_head_ptr == NULL) || - (cache_ptr->LRU_head_ptr->size != cache_ptr->LRU_list_size))) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU list sanity check failed") - - if ((cache_ptr->LRU_list_len >= 1) && - ((cache_ptr->LRU_head_ptr == NULL) || (cache_ptr->LRU_head_ptr->prev != NULL) || - (cache_ptr->LRU_tail_ptr == NULL) || (cache_ptr->LRU_tail_ptr->next != NULL))) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU list sanity check failed") - - entry_ptr = cache_ptr->LRU_head_ptr; - while (entry_ptr != NULL) { - if ((entry_ptr != cache_ptr->LRU_head_ptr) && - ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr))) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers") - - if ((entry_ptr != cache_ptr->LRU_tail_ptr) && - ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr))) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers") - - if (entry_ptr->is_pinned || entry_ptr->pinned_from_client || entry_ptr->pinned_from_cache) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "invalid entry 'pin origin' fields") - - len++; - size += entry_ptr->size; - entry_ptr = entry_ptr->next; - } - - if ((cache_ptr->LRU_list_len != (uint32_t)len) || (cache_ptr->LRU_list_size != size)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU list length/size check failed") - -done: - if (ret_value != SUCCEED) - HDassert(0); - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__validate_lru_list() */ -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__validate_pinned_entry_list - * - * Purpose: Debugging function that scans the pinned entry list for - * errors. - * - * If an error is detected, the function generates a - * diagnostic and returns FAIL. If no error is detected, - * the function returns SUCCEED. - * - * Return: FAIL if error is detected, SUCCEED otherwise. - * - * Programmer: John Mainzer, 4/25/14 - * - *------------------------------------------------------------------------- - */ -#ifdef H5C_DO_EXTREME_SANITY_CHECKS -herr_t -H5C__validate_pinned_entry_list(H5C_t *cache_ptr) -{ - int32_t len = 0; - size_t size = 0; - H5C_cache_entry_t *entry_ptr = NULL; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - - if (((cache_ptr->pel_head_ptr == NULL) || (cache_ptr->pel_tail_ptr == NULL)) && - (cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list head/tail check failed") - - if ((cache_ptr->pel_len == 1) && - ((cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr) || (cache_ptr->pel_head_ptr == NULL) || - (cache_ptr->pel_head_ptr->size != cache_ptr->pel_size))) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list sanity check failed") - - if ((cache_ptr->pel_len >= 1) && - ((cache_ptr->pel_head_ptr == NULL) || (cache_ptr->pel_head_ptr->prev != NULL) || - (cache_ptr->pel_tail_ptr == NULL) || (cache_ptr->pel_tail_ptr->next != NULL))) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list sanity check failed") - - entry_ptr = cache_ptr->pel_head_ptr; - while (entry_ptr != NULL) { - if ((entry_ptr != cache_ptr->pel_head_ptr) && - ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr))) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers") - - if ((entry_ptr != cache_ptr->pel_tail_ptr) && - ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr))) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers") - - if (!entry_ptr->is_pinned) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list contains unpinned entry") - - if (!(entry_ptr->pinned_from_client || entry_ptr->pinned_from_cache)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "invalid entry 'pin origin' fields") - - len++; - size += entry_ptr->size; - entry_ptr = entry_ptr->next; - } - - if ((cache_ptr->pel_len != (uint32_t)len) || (cache_ptr->pel_size != size)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list length/size check failed") - -done: - if (ret_value != SUCCEED) - HDassert(0); - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__validate_pinned_entry_list() */ -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__validate_protected_entry_list - * - * Purpose: Debugging function that scans the protected entry list for - * errors. - * - * If an error is detected, the function generates a - * diagnostic and returns FAIL. If no error is detected, - * the function returns SUCCEED. - * - * Return: FAIL if error is detected, SUCCEED otherwise. - * - * Programmer: John Mainzer, 4/25/14 - * - *------------------------------------------------------------------------- - */ -#ifdef H5C_DO_EXTREME_SANITY_CHECKS -herr_t -H5C__validate_protected_entry_list(H5C_t *cache_ptr) -{ - int32_t len = 0; - size_t size = 0; - H5C_cache_entry_t *entry_ptr = NULL; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - - if (((cache_ptr->pl_head_ptr == NULL) || (cache_ptr->pl_tail_ptr == NULL)) && - (cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list head/tail check failed") - - if ((cache_ptr->pl_len == 1) && - ((cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr) || (cache_ptr->pl_head_ptr == NULL) || - (cache_ptr->pl_head_ptr->size != cache_ptr->pl_size))) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list sanity check failed") - - if ((cache_ptr->pl_len >= 1) && - ((cache_ptr->pl_head_ptr == NULL) || (cache_ptr->pl_head_ptr->prev != NULL) || - (cache_ptr->pl_tail_ptr == NULL) || (cache_ptr->pl_tail_ptr->next != NULL))) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list sanity check failed") - - entry_ptr = cache_ptr->pl_head_ptr; - while (entry_ptr != NULL) { - if ((entry_ptr != cache_ptr->pl_head_ptr) && - ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr))) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers") - - if ((entry_ptr != cache_ptr->pl_tail_ptr) && - ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr))) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers") - - if (!entry_ptr->is_protected) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list contains unprotected entry") - - if (entry_ptr->is_read_only && (entry_ptr->ro_ref_count <= 0)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "read-only entry has non-positive ref count") - - len++; - size += entry_ptr->size; - entry_ptr = entry_ptr->next; - } - - if ((cache_ptr->pl_len != (uint32_t)len) || (cache_ptr->pl_size != size)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list length/size check failed") - -done: - if (ret_value != SUCCEED) - HDassert(0); - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__validate_protected_entry_list() */ -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__entry_in_skip_list - * - * Purpose: Debugging function that scans skip list to see if it - * is in present. We need this, as it is possible for - * an entry to be in the skip list twice. - * - * Return: FALSE if the entry is not in the skip list, and TRUE - * if it is. - * - * Programmer: John Mainzer, 11/1/14 - * - *------------------------------------------------------------------------- - */ -#ifdef H5C_DO_SLIST_SANITY_CHECKS -hbool_t -H5C__entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr) -{ - H5SL_node_t *node_ptr; - hbool_t in_slist; - hbool_t ret_value; - - FUNC_ENTER_PACKAGE - - /* Assertions */ - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - HDassert(cache_ptr->slist_ptr); - - node_ptr = H5SL_first(cache_ptr->slist_ptr); - in_slist = FALSE; - while ((node_ptr != NULL) && (!in_slist)) { - H5C_cache_entry_t *entry_ptr; - - entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); - - HDassert(entry_ptr); - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(entry_ptr->is_dirty); - HDassert(entry_ptr->in_slist); - - if (entry_ptr == target_ptr) - in_slist = TRUE; - else - node_ptr = H5SL_next(node_ptr); - } - - /* Set return value */ - ret_value = in_slist; - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__entry_in_skip_list() */ -#endif /* H5C_DO_SLIST_SANITY_CHECKS */ - -/*------------------------------------------------------------------------- - * - * Function: H5C__flush_marked_entries - * - * Purpose: Flushes all marked entries in the cache. - * - * Return: FAIL if error is detected, SUCCEED otherwise. - * - * Programmer: Mike McGreevy - * November 3, 2010 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C__flush_marked_entries(H5F_t *f) -{ - herr_t ret_value = SUCCEED; - - FUNC_ENTER_PACKAGE - - /* Assertions */ - HDassert(f != NULL); - - /* Enable the slist, as it is needed in the flush */ - if (H5C_set_slist_enabled(f->shared->cache, TRUE, FALSE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed") - - /* Flush all marked entries */ - if (H5C_flush_cache(f, H5C__FLUSH_MARKED_ENTRIES_FLAG | H5C__FLUSH_IGNORE_PROTECTED_FLAG) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache") - - /* Disable the slist. Set the clear_slist parameter to TRUE - * since we called H5C_flush_cache() with the - * H5C__FLUSH_MARKED_ENTRIES_FLAG. - */ - if (H5C_set_slist_enabled(f->shared->cache, FALSE, TRUE) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "disable slist failed") - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__flush_marked_entries */ - -/*------------------------------------------------------------------------- - * - * Function: H5C_cork - * - * Purpose: To cork/uncork/get cork status of an object depending on "action": - * H5C__SET_CORK: - * To cork the object - * Return error if the object is already corked - * H5C__UNCORK: - * To uncork the object - * Return error if the object is not corked - * H5C__GET_CORKED: - * To retrieve the cork status of an object in - * the parameter "corked" - * - * Return: Success: Non-negative - * Failure: Negative - * - * Programmer: Vailin Choi - * January 2014 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C_cork(H5C_t *cache_ptr, haddr_t obj_addr, unsigned action, hbool_t *corked) -{ - H5C_tag_info_t *tag_info = NULL; - herr_t ret_value = SUCCEED; - - FUNC_ENTER_NOAPI_NOINIT - - /* Assertions */ - HDassert(cache_ptr != NULL); - HDassert(H5F_addr_defined(obj_addr)); - HDassert(action == H5C__SET_CORK || action == H5C__UNCORK || action == H5C__GET_CORKED); - - /* Search the list of corked object addresses in the cache */ - HASH_FIND(hh, cache_ptr->tag_list, &obj_addr, sizeof(haddr_t), tag_info); - - if (H5C__GET_CORKED == action) { - HDassert(corked); - if (tag_info != NULL && tag_info->corked) - *corked = TRUE; - else - *corked = FALSE; - } - else { - /* Sanity check */ - HDassert(H5C__SET_CORK == action || H5C__UNCORK == action); - - /* Perform appropriate action */ - if (H5C__SET_CORK == action) { - /* Check if this is the first entry for this tagged object */ - if (NULL == tag_info) { - /* Allocate new tag info struct */ - if (NULL == (tag_info = H5FL_CALLOC(H5C_tag_info_t))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "can't allocate tag info for cache entry") - - /* Set the tag for all entries */ - tag_info->tag = obj_addr; - - /* Insert tag info into hash table */ - HASH_ADD(hh, cache_ptr->tag_list, tag, sizeof(haddr_t), tag_info); - } - else { - /* Check for object already corked */ - if (tag_info->corked) - HGOTO_ERROR(H5E_CACHE, H5E_CANTCORK, FAIL, "object already corked") - HDassert(tag_info->entry_cnt > 0 && tag_info->head); - } - - /* Set the corked status for the entire object */ - tag_info->corked = TRUE; - cache_ptr->num_objs_corked++; - } - else { - /* Sanity check */ - if (NULL == tag_info) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNCORK, FAIL, "tag info pointer is NULL") - - /* Check for already uncorked */ - if (!tag_info->corked) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNCORK, FAIL, "object already uncorked") - - /* Set the corked status for the entire object */ - tag_info->corked = FALSE; - cache_ptr->num_objs_corked--; - - /* Remove the tag info from the tag list, if there's no more entries with this tag */ - if (0 == tag_info->entry_cnt) { - /* Sanity check */ - HDassert(NULL == tag_info->head); - - HASH_DELETE(hh, cache_ptr->tag_list, tag_info); - - /* Release the tag info */ - tag_info = H5FL_FREE(H5C_tag_info_t, tag_info); - } - else - HDassert(NULL != tag_info->head); - } - } - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_cork() */ - -/*------------------------------------------------------------------------- - * Function: H5C__mark_flush_dep_dirty() - * - * Purpose: Recursively propagate the flush_dep_ndirty_children flag - * up the dependency chain in response to entry either - * becoming dirty or having its flush_dep_ndirty_children - * increased from 0. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Neil Fortner - * 11/13/12 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__mark_flush_dep_dirty(H5C_cache_entry_t *entry) -{ - unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Sanity checks */ - HDassert(entry); - - /* Iterate over the parent entries, if any */ - for (u = 0; u < entry->flush_dep_nparents; u++) { - /* Sanity check */ - HDassert(entry->flush_dep_parent[u]->flush_dep_ndirty_children < - entry->flush_dep_parent[u]->flush_dep_nchildren); - - /* Adjust the parent's number of dirty children */ - entry->flush_dep_parent[u]->flush_dep_ndirty_children++; - - /* If the parent has a 'notify' callback, send a 'child entry dirtied' notice */ - if (entry->flush_dep_parent[u]->type->notify && - (entry->flush_dep_parent[u]->type->notify)(H5C_NOTIFY_ACTION_CHILD_DIRTIED, - entry->flush_dep_parent[u]) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "can't notify parent about child entry dirty flag set") - } /* end for */ - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__mark_flush_dep_dirty() */ - -/*------------------------------------------------------------------------- - * Function: H5C__mark_flush_dep_clean() - * - * Purpose: Recursively propagate the flush_dep_ndirty_children flag - * up the dependency chain in response to entry either - * becoming clean or having its flush_dep_ndirty_children - * reduced to 0. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Neil Fortner - * 11/13/12 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__mark_flush_dep_clean(H5C_cache_entry_t *entry) -{ - int i; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Sanity checks */ - HDassert(entry); - - /* Iterate over the parent entries, if any */ - /* Note reverse iteration order, in case the callback removes the flush - * dependency - QAK, 2017/08/12 - */ - for (i = ((int)entry->flush_dep_nparents) - 1; i >= 0; i--) { - /* Sanity check */ - HDassert(entry->flush_dep_parent[i]->flush_dep_ndirty_children > 0); - - /* Adjust the parent's number of dirty children */ - entry->flush_dep_parent[i]->flush_dep_ndirty_children--; - - /* If the parent has a 'notify' callback, send a 'child entry cleaned' notice */ - if (entry->flush_dep_parent[i]->type->notify && - (entry->flush_dep_parent[i]->type->notify)(H5C_NOTIFY_ACTION_CHILD_CLEANED, - entry->flush_dep_parent[i]) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "can't notify parent about child entry dirty flag reset") - } /* end for */ - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__mark_flush_dep_clean() */ - -/*------------------------------------------------------------------------- - * Function: H5C__mark_flush_dep_serialized() - * - * Purpose: Decrement the flush_dep_nunser_children fields of all the - * target entry's flush dependency parents in response to - * the target entry becoming serialized. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: John Mainzer - * 8/30/16 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__mark_flush_dep_serialized(H5C_cache_entry_t *entry_ptr) -{ - int i; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Sanity checks */ - HDassert(entry_ptr); - - /* Iterate over the parent entries, if any */ - /* Note reverse iteration order, in case the callback removes the flush - * dependency - QAK, 2017/08/12 - */ - for (i = ((int)entry_ptr->flush_dep_nparents) - 1; i >= 0; i--) { - /* Sanity checks */ - HDassert(entry_ptr->flush_dep_parent); - HDassert(entry_ptr->flush_dep_parent[i]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(entry_ptr->flush_dep_parent[i]->flush_dep_nunser_children > 0); - - /* decrement the parents number of unserialized children */ - entry_ptr->flush_dep_parent[i]->flush_dep_nunser_children--; - - /* If the parent has a 'notify' callback, send a 'child entry serialized' notice */ - if (entry_ptr->flush_dep_parent[i]->type->notify && - (entry_ptr->flush_dep_parent[i]->type->notify)(H5C_NOTIFY_ACTION_CHILD_SERIALIZED, - entry_ptr->flush_dep_parent[i]) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "can't notify parent about child entry serialized flag set") - } /* end for */ - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__mark_flush_dep_serialized() */ - -/*------------------------------------------------------------------------- - * Function: H5C__mark_flush_dep_unserialized() - * - * Purpose: Increment the flush_dep_nunser_children fields of all the - * target entry's flush dependency parents in response to - * the target entry becoming unserialized. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: John Mainzer - * 8/30/16 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__mark_flush_dep_unserialized(H5C_cache_entry_t *entry_ptr) -{ - unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Sanity checks */ - HDassert(entry_ptr); - - /* Iterate over the parent entries, if any */ - for (u = 0; u < entry_ptr->flush_dep_nparents; u++) { - /* Sanity check */ - HDassert(entry_ptr->flush_dep_parent); - HDassert(entry_ptr->flush_dep_parent[u]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children < - entry_ptr->flush_dep_parent[u]->flush_dep_nchildren); - - /* increment parents number of usserialized children */ - entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children++; - - /* If the parent has a 'notify' callback, send a 'child entry unserialized' notice */ - if (entry_ptr->flush_dep_parent[u]->type->notify && - (entry_ptr->flush_dep_parent[u]->type->notify)(H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED, - entry_ptr->flush_dep_parent[u]) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, - "can't notify parent about child entry serialized flag reset") - } /* end for */ - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__mark_flush_dep_unserialized() */ - -#ifndef NDEBUG -/*------------------------------------------------------------------------- - * Function: H5C__assert_flush_dep_nocycle() - * - * Purpose: Assert recursively that base_entry is not the same as - * entry, and perform the same assertion on all of entry's - * flush dependency parents. This is used to detect cycles - * created by flush dependencies. - * - * Return: void - * - * Programmer: Neil Fortner - * 12/10/12 - * - *------------------------------------------------------------------------- - */ -static void -H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t *entry, const H5C_cache_entry_t *base_entry) -{ - unsigned u; /* Local index variable */ - - FUNC_ENTER_PACKAGE_NOERR - - /* Sanity checks */ - HDassert(entry); - HDassert(base_entry); - - /* Make sure the entries are not the same */ - HDassert(base_entry != entry); - - /* Iterate over entry's parents (if any) */ - for (u = 0; u < entry->flush_dep_nparents; u++) - H5C__assert_flush_dep_nocycle(entry->flush_dep_parent[u], base_entry); - - FUNC_LEAVE_NOAPI_VOID -} /* H5C__assert_flush_dep_nocycle() */ -#endif - -/*------------------------------------------------------------------------- - * Function: H5C__serialize_cache - * - * Purpose: Serialize (i.e. construct an on disk image) for all entries - * in the metadata cache including clean entries. - * - * Note that flush dependencies and "flush me last" flags - * must be observed in the serialization process. - * - * Note also that entries may be loaded, flushed, evicted, - * expunged, relocated, resized, or removed from the cache - * during this process, just as these actions may occur during - * a regular flush. - * - * However, we are given that the cache will contain no protected - * entries on entry to this routine (although entries may be - * briefly protected and then unprotected during the serialize - * process). - * - * The objective of this routine is serialize all entries and - * to force all entries into their actual locations on disk. - * - * The initial need for this routine is to settle all entries - * in the cache prior to construction of the metadata cache - * image so that the size of the cache image can be calculated. - * - * Return: Non-negative on success/Negative on failure or if there was - * a request to flush all items and something was protected. - * - * Programmer: John Mainzer - * 7/22/15 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C__serialize_cache(H5F_t *f) -{ -#ifdef H5C_DO_SANITY_CHECKS - int i; - uint32_t index_len = 0; - size_t index_size = (size_t)0; - size_t clean_index_size = (size_t)0; - size_t dirty_index_size = (size_t)0; - size_t slist_size = (size_t)0; - uint32_t slist_len = 0; -#endif /* H5C_DO_SANITY_CHECKS */ - H5C_ring_t ring; - H5C_t *cache_ptr; - herr_t ret_value = SUCCEED; - - FUNC_ENTER_PACKAGE - - /* Sanity checks */ - HDassert(f); - HDassert(f->shared); - cache_ptr = f->shared->cache; - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - HDassert(cache_ptr->slist_ptr); - -#ifdef H5C_DO_SANITY_CHECKS - HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0); - HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); - HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); - HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); - HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0); - HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0); - - for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) { - index_len += cache_ptr->index_ring_len[i]; - index_size += cache_ptr->index_ring_size[i]; - clean_index_size += cache_ptr->clean_index_ring_size[i]; - dirty_index_size += cache_ptr->dirty_index_ring_size[i]; - - slist_len += cache_ptr->slist_ring_len[i]; - slist_size += cache_ptr->slist_ring_size[i]; - } /* end for */ - - HDassert(cache_ptr->index_len == index_len); - HDassert(cache_ptr->index_size == index_size); - HDassert(cache_ptr->clean_index_size == clean_index_size); - HDassert(cache_ptr->dirty_index_size == dirty_index_size); - HDassert(cache_ptr->slist_len == slist_len); - HDassert(cache_ptr->slist_size == slist_size); -#endif /* H5C_DO_SANITY_CHECKS */ + if (cache_ptr->resize_ctl.set_initial_size) + new_max_cache_size = cache_ptr->resize_ctl.initial_size; + else if (cache_ptr->max_cache_size > cache_ptr->resize_ctl.max_size) + new_max_cache_size = cache_ptr->resize_ctl.max_size; + else if (cache_ptr->max_cache_size < cache_ptr->resize_ctl.min_size) + new_max_cache_size = cache_ptr->resize_ctl.min_size; + else + new_max_cache_size = cache_ptr->max_cache_size; -#ifdef H5C_DO_EXTREME_SANITY_CHECKS - if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || - H5C__validate_lru_list(cache_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") -#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + new_min_clean_size = (size_t)((double)new_max_cache_size * (cache_ptr->resize_ctl.min_clean_fraction)); -#ifndef NDEBUG - /* if this is a debug build, set the serialization_count field of - * each entry in the cache to zero before we start the serialization. - * This allows us to detect the case in which any entry is serialized - * more than once (a performance issues), and more importantly, the - * case is which any flush dependency parent is serializes more than - * once (a correctness issue). + /* since new_min_clean_size is of type size_t, we have + * + * ( 0 <= new_min_clean_size ) + * + * by definition. */ - { - H5C_cache_entry_t *scan_ptr = NULL; + HDassert(new_min_clean_size <= new_max_cache_size); + HDassert(cache_ptr->resize_ctl.min_size <= new_max_cache_size); + HDassert(new_max_cache_size <= cache_ptr->resize_ctl.max_size); - scan_ptr = cache_ptr->il_head; - while (scan_ptr != NULL) { - HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - scan_ptr->serialization_count = 0; - scan_ptr = scan_ptr->il_next; - } /* end while */ - } /* end block */ -#endif + if (new_max_cache_size < cache_ptr->max_cache_size) + cache_ptr->size_decreased = TRUE; - /* set cache_ptr->serialization_in_progress to TRUE, and back - * to FALSE at the end of the function. Must maintain this flag - * to support H5C_get_serialization_in_progress(), which is in - * turn required to support sanity checking in some cache - * clients. - */ - HDassert(!cache_ptr->serialization_in_progress); - cache_ptr->serialization_in_progress = TRUE; + cache_ptr->max_cache_size = new_max_cache_size; + cache_ptr->min_clean_size = new_min_clean_size; - /* Serialize each ring, starting from the outermost ring and - * working inward. - */ - ring = H5C_RING_USER; - while (ring < H5C_RING_NTYPES) { - HDassert(cache_ptr->close_warning_received); - switch (ring) { - case H5C_RING_USER: - break; + if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0) + /* this should be impossible... */ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed") - case H5C_RING_RDFSM: - /* Settle raw data FSM */ - if (!cache_ptr->rdfsm_settled) - if (H5MF_settle_raw_data_fsm(f, &cache_ptr->rdfsm_settled) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed") - break; + /* remove excess epoch markers if any */ + if ((config_ptr->decr_mode == H5C_decr__age_out_with_threshold) || + (config_ptr->decr_mode == H5C_decr__age_out)) { + if (cache_ptr->epoch_markers_active > cache_ptr->resize_ctl.epochs_before_eviction) + if (H5C__autoadjust__ageout__remove_excess_markers(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't remove excess epoch markers") + } /* end if */ + else if (cache_ptr->epoch_markers_active > 0) { + if (H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers") + } - case H5C_RING_MDFSM: - /* Settle metadata FSM */ - if (!cache_ptr->mdfsm_settled) - if (H5MF_settle_meta_data_fsm(f, &cache_ptr->mdfsm_settled) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed") + /* configure flash size increase facility. We wait until the + * end of the function, as we need the max_cache_size set before + * we start to keep things simple. + * + * If we haven't already ruled out flash cache size increases above, + * go ahead and configure it. + */ + if (cache_ptr->flash_size_increase_possible) { + switch (config_ptr->flash_incr_mode) { + case H5C_flash_incr__off: + cache_ptr->flash_size_increase_possible = FALSE; break; - case H5C_RING_SBE: - case H5C_RING_SB: + case H5C_flash_incr__add_space: + cache_ptr->flash_size_increase_possible = TRUE; + cache_ptr->flash_size_increase_threshold = + (size_t)(((double)(cache_ptr->max_cache_size)) * (cache_ptr->resize_ctl.flash_threshold)); break; - default: - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown ring?!?!") + default: /* should be unreachable */ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?") break; } /* end switch */ + } /* end if */ - if (H5C__serialize_ring(f, ring) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "serialize ring failed") +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_set_cache_auto_resize_config() */ - ring++; - } /* end while */ +/*------------------------------------------------------------------------- + * Function: H5C_set_evictions_enabled() + * + * Purpose: Set cache_ptr->evictions_enabled to the value of the + * evictions enabled parameter. + * + * Return: SUCCEED on success, and FAIL on failure. + * + * Programmer: John Mainzer + * 7/27/07 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_set_evictions_enabled(H5C_t *cache_ptr, hbool_t evictions_enabled) +{ + herr_t ret_value = SUCCEED; /* Return value */ -#ifndef NDEBUG - /* Verify that no entry has been serialized more than once. - * FD parents with multiple serializations should have been caught - * elsewhere, so no specific check for them here. - */ - { - H5C_cache_entry_t *scan_ptr = NULL; + FUNC_ENTER_NOAPI(FAIL) - scan_ptr = cache_ptr->il_head; - while (scan_ptr != NULL) { - HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(scan_ptr->serialization_count <= 1); + if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry") - scan_ptr = scan_ptr->il_next; - } /* end while */ - } /* end block */ -#endif + /* There is no fundamental reason why we should not permit + * evictions to be disabled while automatic resize is enabled. + * However, allowing it would greatly complicate testing + * the feature. Hence the following: + */ + if ((evictions_enabled != TRUE) && ((cache_ptr->resize_ctl.incr_mode != H5C_incr__off) || + (cache_ptr->resize_ctl.decr_mode != H5C_decr__off))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't disable evictions when auto resize enabled") + + cache_ptr->evictions_enabled = evictions_enabled; done: - cache_ptr->serialization_in_progress = FALSE; FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__serialize_cache() */ +} /* H5C_set_evictions_enabled() */ /*------------------------------------------------------------------------- - * Function: H5C__serialize_ring + * Function: H5C_set_slist_enabled() + * + * Purpose: Enable or disable the slist as directed. + * + * The slist (skip list) is an address ordered list of + * dirty entries in the metadata cache. However, this + * list is only needed during flush and close, where we + * use it to write entries in more or less increasing + * address order. + * + * This function sets up and enables further operations + * on the slist, or disable the slist. This in turn + * allows us to avoid the overhead of maintaining the + * slist when it is not needed. + * * - * Purpose: Serialize the entries contained in the specified cache and - * ring. All entries in rings outside the specified ring - * must have been serialized on entry. + * If the slist_enabled parameter is TRUE, the function * - * If the cache contains protected entries in the specified - * ring, the function will fail, as protected entries cannot - * be serialized. However all unprotected entries in the - * target ring should be serialized before the function - * returns failure. + * 1) Verifies that the slist is empty. * - * If flush dependencies appear in the target ring, the - * function makes repeated passes through the index list - * serializing entries in flush dependency order. + * 2) Scans the index list, and inserts all dirty entries + * into the slist. * - * All entries outside the H5C_RING_SBE are marked for - * inclusion in the cache image. Entries in H5C_RING_SBE - * and below are marked for exclusion from the image. + * 3) Sets cache_ptr->slist_enabled = TRUE. * - * Return: Non-negative on success/Negative on failure or if there was - * a request to flush all items and something was protected. + * Note that the clear_slist parameter is ignored if + * the slist_enabed parameter is TRUE. + * + * + * If the slist_enabled_parameter is FALSE, the function + * shuts down the slist. + * + * Normally the slist will be empty at this point, however + * that need not be the case if H5C_flush_cache() has been + * called with the H5C__FLUSH_MARKED_ENTRIES_FLAG. + * + * Thus shutdown proceeds as follows: + * + * 1) Test to see if the slist is empty. If it is, proceed + * to step 3. + * + * 2) Test to see if the clear_slist parameter is TRUE. + * + * If it is, remove all entries from the slist. + * + * If it isn't, throw an error. + * + * 3) set cache_ptr->slist_enabled = FALSE. + * + * Return: SUCCEED on success, and FAIL on failure. * * Programmer: John Mainzer - * 9/11/15 + * 5/1/20 * *------------------------------------------------------------------------- */ -static herr_t -H5C__serialize_ring(H5F_t *f, H5C_ring_t ring) +herr_t +H5C_set_slist_enabled(H5C_t *cache_ptr, hbool_t slist_enabled, hbool_t clear_slist) { - hbool_t done = FALSE; - H5C_t *cache_ptr; H5C_cache_entry_t *entry_ptr; - herr_t ret_value = SUCCEED; + herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_PACKAGE + FUNC_ENTER_NOAPI(FAIL) - /* Sanity checks */ - HDassert(f); - HDassert(f->shared); - cache_ptr = f->shared->cache; - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - HDassert(ring > H5C_RING_UNDEFINED); - HDassert(ring < H5C_RING_NTYPES); + if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry") - HDassert(cache_ptr->serialization_in_progress); + if (slist_enabled) { + if (cache_ptr->slist_enabled) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist already enabled?") + if ((cache_ptr->slist_len != 0) || (cache_ptr->slist_size != 0)) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist not empty?") - /* The objective here is to serialize all entries in the cache ring - * in flush dependency order. - * - * The basic algorithm is to scan the cache index list looking for - * unserialized entries that are either not in a flush dependency - * relationship, or which have no unserialized children. Any such - * entry is serialized and its flush dependency parents (if any) are - * informed -- allowing them to decrement their userialized child counts. - * - * However, this algorithm is complicated by the ability - * of client serialization callbacks to perform operations on - * on the cache which can result in the insertion, deletion, - * relocation, resize, dirty, flush, eviction, or removal (via the - * take ownership flag) of entries. Changes in the flush dependency - * structure are also possible. - * - * On the other hand, the algorithm is simplified by the fact that - * we are serializing, not flushing. Thus, as long as all entries - * are serialized correctly, it doesn't matter if we have to go back - * and serialize an entry a second time. - * - * These possible actions result in the following modifications to - * the basic algorithm: - * - * 1) In the event of an entry expunge, eviction or removal, we must - * restart the scan as it is possible that the next entry in our - * scan is no longer in the cache. Were we to examine this entry, - * we would be accessing deallocated memory. - * - * 2) A resize, dirty, or insertion of an entry may result in the - * the increment of a flush dependency parent's dirty and/or - * unserialized child count. In the context of serializing the - * the cache, this is a non-issue, as even if we have already - * serialized the parent, it will be marked dirty and its image - * marked out of date if appropriate when the child is serialized. - * - * However, this is a major issue for a flush, as were this to happen - * in a flush, it would violate the invariant that the flush dependency - * feature is intended to enforce. As the metadata cache has no - * control over the behavior of cache clients, it has no way of - * preventing this behaviour. However, it should detect it if at all - * possible. - * - * Do this by maintaining a count of the number of times each entry is - * serialized during a cache serialization. If any flush dependency - * parent is serialized more than once, throw an assertion failure. - * - * 3) An entry relocation will typically change the location of the - * entry in the index list. This shouldn't cause problems as we - * will scan the index list until we make a complete pass without - * finding anything to serialize -- making relocations of either - * the current or next entries irrelevant. - * - * Note that since a relocation may result in our skipping part of - * the index list, we must always do at least one more pass through - * the index list after an entry relocation. - * - * 4) Changes in the flush dependency structure are possible on - * entry insertion, load, expunge, evict, or remove. Destruction - * of a flush dependency has no effect, as it can only relax the - * flush dependencies. Creation of a flush dependency can create - * an unserialized child of a flush dependency parent where all - * flush dependency children were previously serialized. Should - * this child dirty the flush dependency parent when it is serialized, - * the parent will be re-serialized. - * - * Per the discussion of 2) above, this is a non issue for cache - * serialization, and a major problem for cache flush. Using the - * same detection mechanism, throw an assertion failure if this - * condition appears. - * - * Observe that either eviction or removal of entries as a result of - * a serialization is not a problem as long as the flush dependency - * tree does not change beyond the removal of a leaf. - */ - while (!done) { - /* Reset the counters so that we can detect insertions, loads, - * moves, and flush dependency height changes caused by the pre_serialize - * and serialize callbacks. + /* set cache_ptr->slist_enabled to TRUE so that the slist + * maintenance macros will be enabled. */ - cache_ptr->entries_loaded_counter = 0; - cache_ptr->entries_inserted_counter = 0; - cache_ptr->entries_relocated_counter = 0; + cache_ptr->slist_enabled = TRUE; - done = TRUE; /* set to FALSE if any activity in inner loop */ + /* scan the index list and insert all dirty entries in the slist */ entry_ptr = cache_ptr->il_head; while (entry_ptr != NULL) { HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + if (entry_ptr->is_dirty) + H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL) + entry_ptr = entry_ptr->il_next; + } - /* Verify that either the entry is already serialized, or - * that it is assigned to either the target or an inner - * ring. - */ - HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date)); - - /* Skip flush me last entries or inner ring entries */ - if (!entry_ptr->flush_me_last && entry_ptr->ring == ring) { - - /* if we encounter an unserialized entry in the current - * ring that is not marked flush me last, we are not done. - */ - if (!entry_ptr->image_up_to_date) - done = FALSE; - - /* Serialize the entry if its image is not up to date - * and it has no unserialized flush dependency children. - */ - if (!entry_ptr->image_up_to_date && entry_ptr->flush_dep_nunser_children == 0) { - HDassert(entry_ptr->serialization_count == 0); - - /* Serialize the entry */ - if (H5C__serialize_single_entry(f, cache_ptr, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed") - - HDassert(entry_ptr->flush_dep_nunser_children == 0); - HDassert(entry_ptr->serialization_count == 0); - -#ifndef NDEBUG - /* Increment serialization counter (to detect multiple serializations) */ - entry_ptr->serialization_count++; -#endif - } /* end if */ - } /* end if */ - - /* Check for the cache being perturbed during the entry serialize */ - if ((cache_ptr->entries_loaded_counter > 0) || (cache_ptr->entries_inserted_counter > 0) || - (cache_ptr->entries_relocated_counter > 0)) { - -#if H5C_COLLECT_CACHE_STATS - H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr); -#endif /* H5C_COLLECT_CACHE_STATS */ + /* we don't maintain a dirty index len, so we can't do a cross + * check against it. Note that there is no point in cross checking + * against the dirty LRU size, as the dirty LRU may not be maintained, + * and in any case, there is no requirement that all dirty entries + * will reside on the dirty LRU. + */ + HDassert(cache_ptr->dirty_index_size == cache_ptr->slist_size); + } + else { /* take down the skip list */ + if (!cache_ptr->slist_enabled) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist already disabled?") - /* Reset the counters */ - cache_ptr->entries_loaded_counter = 0; - cache_ptr->entries_inserted_counter = 0; - cache_ptr->entries_relocated_counter = 0; + if ((cache_ptr->slist_len != 0) || (cache_ptr->slist_size != 0)) { + if (clear_slist) { + H5SL_node_t *node_ptr; - /* Restart scan */ - entry_ptr = cache_ptr->il_head; - } /* end if */ + node_ptr = H5SL_first(cache_ptr->slist_ptr); + while (node_ptr != NULL) { + entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); + H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE, FAIL) + node_ptr = H5SL_first(cache_ptr->slist_ptr); + } + } else - /* Advance to next entry */ - entry_ptr = entry_ptr->il_next; - } /* while ( entry_ptr != NULL ) */ - } /* while ( ! done ) */ - - /* Reset the counters so that we can detect insertions, loads, - * moves, and flush dependency height changes caused by the pre_serialize - * and serialize callbacks. - */ - cache_ptr->entries_loaded_counter = 0; - cache_ptr->entries_inserted_counter = 0; - cache_ptr->entries_relocated_counter = 0; - - /* At this point, all entries not marked "flush me last" and in - * the current ring or outside it should be serialized and have up - * to date images. Scan the index list again to serialize the - * "flush me last" entries (if they are in the current ring) and to - * verify that all other entries have up to date images. - */ - entry_ptr = cache_ptr->il_head; - while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(entry_ptr->ring > H5C_RING_UNDEFINED); - HDassert(entry_ptr->ring < H5C_RING_NTYPES); - HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date)); - - if (entry_ptr->ring == ring) { - if (entry_ptr->flush_me_last) { - if (!entry_ptr->image_up_to_date) { - HDassert(entry_ptr->serialization_count == 0); - HDassert(entry_ptr->flush_dep_nunser_children == 0); - - /* Serialize the entry */ - if (H5C__serialize_single_entry(f, cache_ptr, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed") - - /* Check for the cache changing */ - if ((cache_ptr->entries_loaded_counter > 0) || - (cache_ptr->entries_inserted_counter > 0) || - (cache_ptr->entries_relocated_counter > 0)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, - "flush_me_last entry serialization triggered restart") - - HDassert(entry_ptr->flush_dep_nunser_children == 0); - HDassert(entry_ptr->serialization_count == 0); -#ifndef NDEBUG - /* Increment serialization counter (to detect multiple serializations) */ - entry_ptr->serialization_count++; -#endif - } /* end if */ - } /* end if */ - else { - HDassert(entry_ptr->image_up_to_date); - HDassert(entry_ptr->serialization_count <= 1); - HDassert(entry_ptr->flush_dep_nunser_children == 0); - } /* end else */ - } /* if ( entry_ptr->ring == ring ) */ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist not empty?") + } + + cache_ptr->slist_enabled = FALSE; - entry_ptr = entry_ptr->il_next; - } /* while ( entry_ptr != NULL ) */ + HDassert(0 == cache_ptr->slist_len); + HDassert(0 == cache_ptr->slist_size); + } done: - HDassert(cache_ptr->serialization_in_progress); FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__serialize_ring() */ +} /* H5C_set_slist_enabled() */ /*------------------------------------------------------------------------- - * Function: H5C__serialize_single_entry + * Function: H5C_unsettle_ring() + * + * Purpose: Advise the metadata cache that the specified free space + * manager ring is no longer settled (if it was on entry). + * + * If the target free space manager ring is already + * unsettled, do nothing, and return SUCCEED. * - * Purpose: Serialize the cache entry pointed to by the entry_ptr - * parameter. + * If the target free space manager ring is settled, and + * we are not in the process of a file shutdown, mark + * the ring as unsettled, and return SUCCEED. + * + * If the target free space manager is settled, and we + * are in the process of a file shutdown, post an error + * message, and return FAIL. * * Return: Non-negative on success/Negative on failure * - * Programmer: John Mainzer, 7/24/15 + * Programmer: John Mainzer + * 10/15/16 * *------------------------------------------------------------------------- */ -static herr_t -H5C__serialize_single_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr) +herr_t +H5C_unsettle_ring(H5F_t *f, H5C_ring_t ring) { + H5C_t *cache_ptr; herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_PACKAGE + FUNC_ENTER_NOAPI(FAIL) /* Sanity checks */ HDassert(f); - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - HDassert(entry_ptr); - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(!entry_ptr->prefetched); - HDassert(!entry_ptr->image_up_to_date); - HDassert(entry_ptr->is_dirty); - HDassert(!entry_ptr->is_protected); - HDassert(!entry_ptr->flush_in_progress); - HDassert(entry_ptr->type); - - /* Set entry_ptr->flush_in_progress to TRUE so the target entry - * will not be evicted out from under us. Must set it back to FALSE - * when we are done. - */ - entry_ptr->flush_in_progress = TRUE; - - /* Allocate buffer for the entry image if required. */ - if (NULL == entry_ptr->image_ptr) { - HDassert(entry_ptr->size > 0); - if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer") -#if H5C_DO_MEMORY_SANITY_CHECKS - H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + image_size, H5C_IMAGE_SANITY_VALUE, - H5C_IMAGE_EXTRA_SPACE); -#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ - } /* end if */ + HDassert(f->shared); + HDassert(f->shared->cache); + HDassert((H5C_RING_RDFSM == ring) || (H5C_RING_MDFSM == ring)); + cache_ptr = f->shared->cache; + HDassert(H5C__H5C_T_MAGIC == cache_ptr->magic); + + switch (ring) { + case H5C_RING_RDFSM: + if (cache_ptr->rdfsm_settled) { + if (cache_ptr->close_warning_received) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected rdfsm ring unsettle") + cache_ptr->rdfsm_settled = FALSE; + } /* end if */ + break; - /* Generate image for entry */ - if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "Can't generate image for cache entry") + case H5C_RING_MDFSM: + if (cache_ptr->mdfsm_settled) { + if (cache_ptr->close_warning_received) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected mdfsm ring unsettle") + cache_ptr->mdfsm_settled = FALSE; + } /* end if */ + break; - /* Reset the flush_in progress flag */ - entry_ptr->flush_in_progress = FALSE; + default: + HDassert(FALSE); /* this should be un-reachable */ + break; + } /* end switch */ done: - HDassert((ret_value != SUCCEED) || (!entry_ptr->flush_in_progress)); - HDassert((ret_value != SUCCEED) || (entry_ptr->image_up_to_date)); FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__serialize_single_entry() */ +} /* H5C_unsettle_ring() */ /*------------------------------------------------------------------------- - * Function: H5C__generate_image - * - * Purpose: Serialize an entry and generate its image. + * Function: H5C_validate_resize_config() * - * Note: This may cause the entry to be re-sized and/or moved in - * the cache. + * Purpose: Run a sanity check on the specified sections of the + * provided instance of struct H5C_auto_size_ctl_t. * - * As we will not update the metadata cache's data structures - * until we we finish the write, we must touch up these - * data structures for size and location changes even if we - * are about to delete the entry from the cache (i.e. on a - * flush destroy). + * Do nothing and return SUCCEED if no errors are detected, + * and flag an error and return FAIL otherwise. * * Return: Non-negative on success/Negative on failure * - * Programmer: Mohamad Chaarawi - * 2/10/16 + * Programmer: John Mainzer + * 3/23/05 * *------------------------------------------------------------------------- */ -static herr_t -H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr) +herr_t +H5C_validate_resize_config(H5C_auto_size_ctl_t *config_ptr, unsigned int tests) { - haddr_t new_addr = HADDR_UNDEF; - haddr_t old_addr = HADDR_UNDEF; - size_t new_len = 0; - unsigned serialize_flags = H5C__SERIALIZE_NO_FLAGS_SET; - herr_t ret_value = SUCCEED; + herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_PACKAGE + FUNC_ENTER_NOAPI(FAIL) - /* Sanity check */ - HDassert(f); - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - HDassert(entry_ptr); - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(!entry_ptr->image_up_to_date); - HDassert(entry_ptr->is_dirty); - HDassert(!entry_ptr->is_protected); - HDassert(entry_ptr->type); - - /* make note of the entry's current address */ - old_addr = entry_ptr->addr; - - /* Call client's pre-serialize callback, if there's one */ - if ((entry_ptr->type->pre_serialize) && - ((entry_ptr->type->pre_serialize)(f, (void *)entry_ptr, entry_ptr->addr, entry_ptr->size, &new_addr, - &new_len, &serialize_flags) < 0)) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to pre-serialize entry") - - /* Check for any flags set in the pre-serialize callback */ - if (serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) { - /* Check for unexpected flags from serialize callback */ - if (serialize_flags & ~(H5C__SERIALIZE_RESIZED_FLAG | H5C__SERIALIZE_MOVED_FLAG)) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unknown serialize flag(s)") + if (config_ptr == NULL) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry") -#ifdef H5_HAVE_PARALLEL - /* In the parallel case, resizes and moves in - * the serialize operation can cause problems. - * If they occur, scream and die. - * - * At present, in the parallel case, the aux_ptr - * will only be set if there is more than one - * process. Thus we can use this to detect - * the parallel case. - * - * This works for now, but if we start using the - * aux_ptr for other purposes, we will have to - * change this test accordingly. - * - * NB: While this test detects entryies that attempt - * to resize or move themselves during a flush - * in the parallel case, it will not detect an - * entry that dirties, resizes, and/or moves - * other entries during its flush. - */ - if (cache_ptr->aux_ptr != NULL) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occurred in parallel case") -#endif + if (config_ptr->version != H5C__CURR_AUTO_SIZE_CTL_VER) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown config version") - /* If required, resize the buffer and update the entry and the cache - * data structures - */ - if (serialize_flags & H5C__SERIALIZE_RESIZED_FLAG) { - /* Sanity check */ - HDassert(new_len > 0); - - /* Allocate a new image buffer */ - if (NULL == - (entry_ptr->image_ptr = H5MM_realloc(entry_ptr->image_ptr, new_len + H5C_IMAGE_EXTRA_SPACE))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, - "memory allocation failed for on disk image buffer") - -#if H5C_DO_MEMORY_SANITY_CHECKS - H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + new_len, H5C_IMAGE_SANITY_VALUE, - H5C_IMAGE_EXTRA_SPACE); -#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ - - /* Update statistics for resizing the entry */ - H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_len); - - /* Update the hash table for the size change */ - H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len, entry_ptr, - !entry_ptr->is_dirty, FAIL); - - /* The entry can't be protected since we are in the process of - * flushing it. Thus we must update the replacement policy data - * structures for the size change. The macro deals with the pinned - * case. + if ((tests & H5C_RESIZE_CFG__VALIDATE_GENERAL) != 0) { + if (config_ptr->max_size > H5C__MAX_MAX_CACHE_SIZE) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "max_size too big") + if (config_ptr->min_size < H5C__MIN_MAX_CACHE_SIZE) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size too small") + if (config_ptr->min_size > config_ptr->max_size) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size > max_size") + if (config_ptr->set_initial_size && ((config_ptr->initial_size < config_ptr->min_size) || + (config_ptr->initial_size > config_ptr->max_size))) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "initial_size must be in the interval [min_size, max_size]") + if ((config_ptr->min_clean_fraction < 0.0) || (config_ptr->min_clean_fraction > 1.0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_clean_fraction must be in the interval [0.0, 1.0]") + if (config_ptr->epoch_length < H5C__MIN_AR_EPOCH_LENGTH) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too small") + if (config_ptr->epoch_length > H5C__MAX_AR_EPOCH_LENGTH) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too big") + } /* H5C_RESIZE_CFG__VALIDATE_GENERAL */ + + if ((tests & H5C_RESIZE_CFG__VALIDATE_INCREMENT) != 0) { + if ((config_ptr->incr_mode != H5C_incr__off) && (config_ptr->incr_mode != H5C_incr__threshold)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid incr_mode") + + if (config_ptr->incr_mode == H5C_incr__threshold) { + if ((config_ptr->lower_hr_threshold < 0.0) || (config_ptr->lower_hr_threshold > 1.0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "lower_hr_threshold must be in the range [0.0, 1.0]") + if (config_ptr->increment < 1.0) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "increment must be greater than or equal to 1.0") + + /* no need to check max_increment, as it is a size_t, + * and thus must be non-negative. */ - H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_len, FAIL); + } /* H5C_incr__threshold */ + + switch (config_ptr->flash_incr_mode) { + case H5C_flash_incr__off: + /* nothing to do here */ + break; + + case H5C_flash_incr__add_space: + if ((config_ptr->flash_multiple < 0.1) || (config_ptr->flash_multiple > 10.0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "flash_multiple must be in the range [0.1, 10.0]") + if ((config_ptr->flash_threshold < 0.1) || (config_ptr->flash_threshold > 1.0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "flash_threshold must be in the range [0.1, 1.0]") + break; + + default: + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid flash_incr_mode") + break; + } /* end switch */ + } /* H5C_RESIZE_CFG__VALIDATE_INCREMENT */ + + if ((tests & H5C_RESIZE_CFG__VALIDATE_DECREMENT) != 0) { + if ((config_ptr->decr_mode != H5C_decr__off) && (config_ptr->decr_mode != H5C_decr__threshold) && + (config_ptr->decr_mode != H5C_decr__age_out) && + (config_ptr->decr_mode != H5C_decr__age_out_with_threshold)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid decr_mode") + + if (config_ptr->decr_mode == H5C_decr__threshold) { + if (config_ptr->upper_hr_threshold > 1.0) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "upper_hr_threshold must be <= 1.0") + if ((config_ptr->decrement > 1.0) || (config_ptr->decrement < 0.0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "decrement must be in the interval [0.0, 1.0]") - /* As we haven't updated the cache data structures for - * for the flush or flush destroy yet, the entry should - * be in the slist if the slist is enabled. Since - * H5C__UPDATE_SLIST_FOR_SIZE_CHANGE() is a no-op if the - * slist is enabled, call it un-conditionally. + /* no need to check max_decrement as it is a size_t + * and thus must be non-negative. */ - HDassert(entry_ptr->is_dirty); - HDassert((entry_ptr->in_slist) || (!cache_ptr->slist_enabled)); + } /* H5C_decr__threshold */ - H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len); + if ((config_ptr->decr_mode == H5C_decr__age_out) || + (config_ptr->decr_mode == H5C_decr__age_out_with_threshold)) { + if (config_ptr->epochs_before_eviction < 1) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction must be positive") + if (config_ptr->epochs_before_eviction > H5C__MAX_EPOCH_MARKERS) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction too big") + if (config_ptr->apply_empty_reserve && + (config_ptr->empty_reserve > 1.0 || config_ptr->empty_reserve < 0.0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "empty_reserve must be in the interval [0.0, 1.0]") - /* Finally, update the entry for its new size */ - entry_ptr->size = new_len; - } /* end if */ + /* no need to check max_decrement as it is a size_t + * and thus must be non-negative. + */ + } /* H5C_decr__age_out || H5C_decr__age_out_with_threshold */ - /* If required, udate the entry and the cache data structures - * for a move - */ - if (serialize_flags & H5C__SERIALIZE_MOVED_FLAG) { - /* Update stats and entries relocated counter */ - H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr) - - /* We must update cache data structures for the change in address */ - if (entry_ptr->addr == old_addr) { - /* Delete the entry from the hash table and the slist */ - H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL); - H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE, FAIL) - - /* Update the entry for its new address */ - entry_ptr->addr = new_addr; - - /* And then reinsert in the index and slist */ - H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL); - H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL); - } /* end if */ - else /* move is already done for us -- just do sanity checks */ - HDassert(entry_ptr->addr == new_addr); - } /* end if */ - } /* end if(serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) */ - - /* Serialize object into buffer */ - if (entry_ptr->type->serialize(f, entry_ptr->image_ptr, entry_ptr->size, (void *)entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to serialize entry") - -#if H5C_DO_MEMORY_SANITY_CHECKS - HDassert(0 == HDmemcmp(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE, - H5C_IMAGE_EXTRA_SPACE)); -#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ - - entry_ptr->image_up_to_date = TRUE; - - /* Propagate the fact that the entry is serialized up the - * flush dependency chain if appropriate. Since the image must - * have been out of date for this function to have been called - * (see assertion on entry), no need to check that -- only check - * for flush dependency parents. - */ - HDassert(entry_ptr->flush_dep_nunser_children == 0); + if (config_ptr->decr_mode == H5C_decr__age_out_with_threshold) + if ((config_ptr->upper_hr_threshold > 1.0) || (config_ptr->upper_hr_threshold < 0.0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "upper_hr_threshold must be in the interval [0.0, 1.0]") + } /* H5C_RESIZE_CFG__VALIDATE_DECREMENT */ - if (entry_ptr->flush_dep_nparents > 0) - if (H5C__mark_flush_dep_serialized(entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "Can't propagate serialization status to fd parents") + if ((tests & H5C_RESIZE_CFG__VALIDATE_INTERACTIONS) != 0) { + if ((config_ptr->incr_mode == H5C_incr__threshold) && + ((config_ptr->decr_mode == H5C_decr__threshold) || + (config_ptr->decr_mode == H5C_decr__age_out_with_threshold)) && + (config_ptr->lower_hr_threshold >= config_ptr->upper_hr_threshold)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "conflicting threshold fields in config") + } /* H5C_RESIZE_CFG__VALIDATE_INTERACTIONS */ done: FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__generate_image */ +} /* H5C_validate_resize_config() */ /*------------------------------------------------------------------------- + * Function: H5C_cork * - * Function: H5C_remove_entry - * - * Purpose: Remove an entry from the cache. Must be not protected, pinned, - * dirty, involved in flush dependencies, etc. + * Purpose: To cork/uncork/get cork status of an object depending on "action": + * H5C__SET_CORK: + * To cork the object + * Return error if the object is already corked + * H5C__UNCORK: + * To uncork the object + * Return error if the object is not corked + * H5C__GET_CORKED: + * To retrieve the cork status of an object in + * the parameter "corked" * - * Return: Non-negative on success/Negative on failure + * Return: Success: Non-negative + * Failure: Negative * - * Programmer: Quincey Koziol - * September 17, 2016 + * Programmer: Vailin Choi + * January 2014 * *------------------------------------------------------------------------- */ herr_t -H5C_remove_entry(void *_entry) +H5C_cork(H5C_t *cache_ptr, haddr_t obj_addr, unsigned action, hbool_t *corked) { - H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_entry; /* Entry to remove */ - H5C_t *cache; /* Cache for file */ - herr_t ret_value = SUCCEED; /* Return value */ + H5C_tag_info_t *tag_info = NULL; + herr_t ret_value = SUCCEED; - FUNC_ENTER_NOAPI(FAIL) + FUNC_ENTER_NOAPI_NOINIT - /* Sanity checks */ - HDassert(entry); - HDassert(entry->ring != H5C_RING_UNDEFINED); - cache = entry->cache_ptr; - HDassert(cache); - HDassert(cache->magic == H5C__H5C_T_MAGIC); - - /* Check for error conditions */ - if (entry->is_dirty) - HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove dirty entry from cache") - if (entry->is_protected) - HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove protected entry from cache") - if (entry->is_pinned) - HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove pinned entry from cache") - /* NOTE: If these two errors are getting tripped because the entry is - * in a flush dependency with a freedspace entry, move the checks - * after the "before evict" message is sent, and add the - * "child being evicted" message to the "before evict" notify - * section below. QAK - 2017/08/03 - */ - if (entry->flush_dep_nparents > 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, - "can't remove entry with flush dependency parents from cache") - if (entry->flush_dep_nchildren > 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, - "can't remove entry with flush dependency children from cache") - - /* Additional internal cache consistency checks */ - HDassert(!entry->in_slist); - HDassert(!entry->flush_marker); - HDassert(!entry->flush_in_progress); - - /* Note that the algorithm below is (very) similar to the set of operations - * in H5C__flush_single_entry() and should be kept in sync with changes - * to that code. - QAK, 2016/11/30 - */ + /* Assertions */ + HDassert(cache_ptr != NULL); + HDassert(H5F_addr_defined(obj_addr)); + HDassert(action == H5C__SET_CORK || action == H5C__UNCORK || action == H5C__GET_CORKED); - /* Update stats, as if we are "destroying" and taking ownership of the entry */ - H5C__UPDATE_STATS_FOR_EVICTION(cache, entry, TRUE) + /* Search the list of corked object addresses in the cache */ + HASH_FIND(hh, cache_ptr->tag_list, &obj_addr, sizeof(haddr_t), tag_info); - /* If the entry's type has a 'notify' callback, send a 'before eviction' - * notice while the entry is still fully integrated in the cache. - */ - if (entry->type->notify && (entry->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict") - - /* Update the cache internal data structures as appropriate for a destroy. - * Specifically: - * 1) Delete it from the index - * 2) Delete it from the collective read access list - * 3) Update the replacement policy for eviction - * 4) Remove it from the tag list for this object - */ + if (H5C__GET_CORKED == action) { + HDassert(corked); + if (tag_info != NULL && tag_info->corked) + *corked = TRUE; + else + *corked = FALSE; + } + else { + /* Sanity check */ + HDassert(H5C__SET_CORK == action || H5C__UNCORK == action); - H5C__DELETE_FROM_INDEX(cache, entry, FAIL) + /* Perform appropriate action */ + if (H5C__SET_CORK == action) { + /* Check if this is the first entry for this tagged object */ + if (NULL == tag_info) { + /* Allocate new tag info struct */ + if (NULL == (tag_info = H5FL_CALLOC(H5C_tag_info_t))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "can't allocate tag info for cache entry") -#ifdef H5_HAVE_PARALLEL - /* Check for collective read access flag */ - if (entry->coll_access) { - entry->coll_access = FALSE; - H5C__REMOVE_FROM_COLL_LIST(cache, entry, FAIL) - } /* end if */ -#endif /* H5_HAVE_PARALLEL */ + /* Set the tag for all entries */ + tag_info->tag = obj_addr; - H5C__UPDATE_RP_FOR_EVICTION(cache, entry, FAIL) + /* Insert tag info into hash table */ + HASH_ADD(hh, cache_ptr->tag_list, tag, sizeof(haddr_t), tag_info); + } + else { + /* Check for object already corked */ + if (tag_info->corked) + HGOTO_ERROR(H5E_CACHE, H5E_CANTCORK, FAIL, "object already corked") + HDassert(tag_info->entry_cnt > 0 && tag_info->head); + } - /* Remove entry from tag list */ - if (H5C__untag_entry(cache, entry) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list") + /* Set the corked status for the entire object */ + tag_info->corked = TRUE; + cache_ptr->num_objs_corked++; + } + else { + /* Sanity check */ + if (NULL == tag_info) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNCORK, FAIL, "tag info pointer is NULL") - /* Increment entries_removed_counter and set last_entry_removed_ptr. - * As we me be about to free the entry, recall that last_entry_removed_ptr - * must NEVER be dereferenced. - * - * Recall that these fields are maintained to allow functions that perform - * scans of lists of entries to detect the unexpected removal of entries - * (via expunge, eviction, or take ownership at present), so that they can - * re-start their scans if necessary. - * - * Also check if the entry we are watching for removal is being - * removed (usually the 'next' entry for an iteration) and reset - * it to indicate that it was removed. - */ - cache->entries_removed_counter++; - cache->last_entry_removed_ptr = entry; - if (entry == cache->entry_watched_for_removal) - cache->entry_watched_for_removal = NULL; + /* Check for already uncorked */ + if (!tag_info->corked) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNCORK, FAIL, "object already uncorked") - /* Internal cache data structures should now be up to date, and - * consistent with the status of the entry. - * - * Now clean up internal cache fields if appropriate. - */ + /* Set the corked status for the entire object */ + tag_info->corked = FALSE; + cache_ptr->num_objs_corked--; - /* Free the buffer for the on disk image */ - if (entry->image_ptr != NULL) - entry->image_ptr = H5MM_xfree(entry->image_ptr); + /* Remove the tag info from the tag list, if there's no more entries with this tag */ + if (0 == tag_info->entry_cnt) { + /* Sanity check */ + HDassert(NULL == tag_info->head); - /* Reset the pointer to the cache the entry is within */ - entry->cache_ptr = NULL; + HASH_DELETE(hh, cache_ptr->tag_list, tag_info); - /* Client is taking ownership of the entry. Set bad magic here so the - * cache will choke unless the entry is re-inserted properly - */ - entry->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC; + /* Release the tag info */ + tag_info = H5FL_FREE(H5C_tag_info_t, tag_info); + } + else + HDassert(NULL != tag_info->head); + } + } done: FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__remove_entry() */ +} /* H5C_cork() */ diff --git a/src/H5Cdbg.c b/src/H5Cdbg.c index ed95bcda691..b7c05d02f31 100644 --- a/src/H5Cdbg.c +++ b/src/H5Cdbg.c @@ -247,7 +247,6 @@ H5C_dump_cache_LRU(H5C_t *cache_ptr, const char *cache_name) #endif /* NDEBUG */ /*------------------------------------------------------------------------- - * * Function: H5C_dump_cache_skip_list * * Purpose: Debugging routine that prints a summary of the contents of @@ -697,7 +696,6 @@ H5C_stats(H5C_t *cache_ptr, const char *cache_name, } /* H5C_stats() */ /*------------------------------------------------------------------------- - * * Function: H5C_stats__reset * * Purpose: Reset the stats fields to their initial values. @@ -713,7 +711,7 @@ void H5C_stats__reset(H5C_t *cache_ptr) #else /* NDEBUG */ #if H5C_COLLECT_CACHE_STATS -H5C_stats__reset(H5C_t *cache_ptr) +H5C_stats__reset(H5C_t *cache_ptr) #else /* H5C_COLLECT_CACHE_STATS */ H5C_stats__reset(H5C_t H5_ATTR_UNUSED *cache_ptr) #endif /* H5C_COLLECT_CACHE_STATS */ @@ -884,7 +882,6 @@ H5C_flush_dependency_exists(H5C_t *cache_ptr, haddr_t parent_addr, haddr_t child #endif /* NDEBUG */ /*------------------------------------------------------------------------- - * * Function: H5C_validate_index_list * * Purpose: Debugging function that scans the index list for errors. @@ -1000,7 +997,6 @@ H5C_validate_index_list(H5C_t *cache_ptr) #endif /* NDEBUG */ /*------------------------------------------------------------------------- - * * Function: H5C_get_entry_ptr_from_addr() * * Purpose: Debugging function that attempts to look up an entry in the @@ -1097,7 +1093,6 @@ H5C_get_serialization_in_progress(const H5C_t *cache_ptr) #endif /* NDEBUG */ /*------------------------------------------------------------------------- - * * Function: H5C_cache_is_clean() * * Purpose: Debugging function that verifies that all rings in the @@ -1141,7 +1136,6 @@ H5C_cache_is_clean(const H5C_t *cache_ptr, H5C_ring_t inner_ring) #endif /* NDEBUG */ /*------------------------------------------------------------------------- - * * Function: H5C_verify_entry_type() * * Purpose: Debugging function that attempts to look up an entry in the @@ -1203,3 +1197,478 @@ H5C_verify_entry_type(H5C_t *cache_ptr, haddr_t addr, const H5C_class_t *expecte FUNC_LEAVE_NOAPI(ret_value) } /* H5C_verify_entry_type() */ #endif /* NDEBUG */ + +/*------------------------------------------------------------------------- + * Function: H5C_def_auto_resize_rpt_fcn + * + * Purpose: Print results of a automatic cache resize. + * + * This function should only be used where HDprintf() behaves + * well -- i.e. not on Windows. + * + * Return: void + * + * Programmer: John Mainzer + * 10/27/04 + * + *------------------------------------------------------------------------- + */ +void +H5C_def_auto_resize_rpt_fcn(H5C_t *cache_ptr, +#ifndef NDEBUG + int32_t version, +#else + int32_t H5_ATTR_UNUSED version, +#endif + double hit_rate, enum H5C_resize_status status, size_t old_max_cache_size, + size_t new_max_cache_size, size_t old_min_clean_size, size_t new_min_clean_size) +{ + HDassert(cache_ptr != NULL); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + HDassert(version == H5C__CURR_AUTO_RESIZE_RPT_FCN_VER); + + switch (status) { + case in_spec: + HDfprintf(stdout, "%sAuto cache resize -- no change. (hit rate = %lf)\n", cache_ptr->prefix, + hit_rate); + break; + + case increase: + HDassert(hit_rate < cache_ptr->resize_ctl.lower_hr_threshold); + HDassert(old_max_cache_size < new_max_cache_size); + + HDfprintf(stdout, "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n", + cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.lower_hr_threshold); + HDfprintf(stdout, "%scache size increased from (%zu/%zu) to (%zu/%zu).\n", cache_ptr->prefix, + old_max_cache_size, old_min_clean_size, new_max_cache_size, new_min_clean_size); + break; + + case flash_increase: + HDassert(old_max_cache_size < new_max_cache_size); + + HDfprintf(stdout, "%sflash cache resize(%d) -- size threshold = %zu.\n", cache_ptr->prefix, + (int)(cache_ptr->resize_ctl.flash_incr_mode), cache_ptr->flash_size_increase_threshold); + HDfprintf(stdout, "%s cache size increased from (%zu/%zu) to (%zu/%zu).\n", cache_ptr->prefix, + old_max_cache_size, old_min_clean_size, new_max_cache_size, new_min_clean_size); + break; + + case decrease: + HDassert(old_max_cache_size > new_max_cache_size); + + switch (cache_ptr->resize_ctl.decr_mode) { + case H5C_decr__off: + HDfprintf(stdout, "%sAuto cache resize -- decrease off. HR = %lf\n", cache_ptr->prefix, + hit_rate); + break; + + case H5C_decr__threshold: + HDassert(hit_rate > cache_ptr->resize_ctl.upper_hr_threshold); + + HDfprintf(stdout, "%sAuto cache resize -- decrease by threshold. HR = %lf > %6.5lf\n", + cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.upper_hr_threshold); + HDfprintf(stdout, "%sout of bounds high (%6.5lf).\n", cache_ptr->prefix, + cache_ptr->resize_ctl.upper_hr_threshold); + break; + + case H5C_decr__age_out: + HDfprintf(stdout, "%sAuto cache resize -- decrease by ageout. HR = %lf\n", + cache_ptr->prefix, hit_rate); + break; + + case H5C_decr__age_out_with_threshold: + HDassert(hit_rate > cache_ptr->resize_ctl.upper_hr_threshold); + + HDfprintf(stdout, + "%sAuto cache resize -- decrease by ageout with threshold. HR = %lf > %6.5lf\n", + cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.upper_hr_threshold); + break; + + default: + HDfprintf(stdout, "%sAuto cache resize -- decrease by unknown mode. HR = %lf\n", + cache_ptr->prefix, hit_rate); + } + + HDfprintf(stdout, "%s cache size decreased from (%zu/%zu) to (%zu/%zu).\n", cache_ptr->prefix, + old_max_cache_size, old_min_clean_size, new_max_cache_size, new_min_clean_size); + break; + + case at_max_size: + HDfprintf(stdout, "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n", + cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.lower_hr_threshold); + HDfprintf(stdout, "%s cache already at maximum size so no change.\n", cache_ptr->prefix); + break; + + case at_min_size: + HDfprintf(stdout, "%sAuto cache resize -- hit rate (%lf) -- can't decrease.\n", cache_ptr->prefix, + hit_rate); + HDfprintf(stdout, "%s cache already at minimum size.\n", cache_ptr->prefix); + break; + + case increase_disabled: + HDfprintf(stdout, "%sAuto cache resize -- increase disabled -- HR = %lf.", cache_ptr->prefix, + hit_rate); + break; + + case decrease_disabled: + HDfprintf(stdout, "%sAuto cache resize -- decrease disabled -- HR = %lf.\n", cache_ptr->prefix, + hit_rate); + break; + + case not_full: + HDassert(hit_rate < cache_ptr->resize_ctl.lower_hr_threshold); + + HDfprintf(stdout, "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n", + cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.lower_hr_threshold); + HDfprintf(stdout, "%s cache not full so no increase in size.\n", cache_ptr->prefix); + break; + + default: + HDfprintf(stdout, "%sAuto cache resize -- unknown status code.\n", cache_ptr->prefix); + break; + } +} /* H5C_def_auto_resize_rpt_fcn() */ + +/*------------------------------------------------------------------------- + * Function: H5C__validate_lru_list + * + * Purpose: Debugging function that scans the LRU list for errors. + * + * If an error is detected, the function generates a + * diagnostic and returns FAIL. If no error is detected, + * the function returns SUCCEED. + * + * Return: FAIL if error is detected, SUCCEED otherwise. + * + * Programmer: John Mainzer, 7/14/05 + * + *------------------------------------------------------------------------- + */ +#ifdef H5C_DO_EXTREME_SANITY_CHECKS +herr_t +H5C__validate_lru_list(H5C_t *cache_ptr) +{ + int32_t len = 0; + size_t size = 0; + H5C_cache_entry_t *entry_ptr = NULL; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + + if (((cache_ptr->LRU_head_ptr == NULL) || (cache_ptr->LRU_tail_ptr == NULL)) && + (cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr)) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU list head/tail check failed") + + if ((cache_ptr->LRU_list_len == 1) && + ((cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr) || (cache_ptr->LRU_head_ptr == NULL) || + (cache_ptr->LRU_head_ptr->size != cache_ptr->LRU_list_size))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU list sanity check failed") + + if ((cache_ptr->LRU_list_len >= 1) && + ((cache_ptr->LRU_head_ptr == NULL) || (cache_ptr->LRU_head_ptr->prev != NULL) || + (cache_ptr->LRU_tail_ptr == NULL) || (cache_ptr->LRU_tail_ptr->next != NULL))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU list sanity check failed") + + entry_ptr = cache_ptr->LRU_head_ptr; + while (entry_ptr != NULL) { + if ((entry_ptr != cache_ptr->LRU_head_ptr) && + ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers") + + if ((entry_ptr != cache_ptr->LRU_tail_ptr) && + ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers") + + if (entry_ptr->is_pinned || entry_ptr->pinned_from_client || entry_ptr->pinned_from_cache) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "invalid entry 'pin origin' fields") + + len++; + size += entry_ptr->size; + entry_ptr = entry_ptr->next; + } + + if ((cache_ptr->LRU_list_len != (uint32_t)len) || (cache_ptr->LRU_list_size != size)) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU list length/size check failed") + +done: + if (ret_value != SUCCEED) + HDassert(0); + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__validate_lru_list() */ +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + +/*------------------------------------------------------------------------- + * Function: H5C__validate_pinned_entry_list + * + * Purpose: Debugging function that scans the pinned entry list for + * errors. + * + * If an error is detected, the function generates a + * diagnostic and returns FAIL. If no error is detected, + * the function returns SUCCEED. + * + * Return: FAIL if error is detected, SUCCEED otherwise. + * + * Programmer: John Mainzer, 4/25/14 + * + *------------------------------------------------------------------------- + */ +#ifdef H5C_DO_EXTREME_SANITY_CHECKS +herr_t +H5C__validate_pinned_entry_list(H5C_t *cache_ptr) +{ + int32_t len = 0; + size_t size = 0; + H5C_cache_entry_t *entry_ptr = NULL; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + + if (((cache_ptr->pel_head_ptr == NULL) || (cache_ptr->pel_tail_ptr == NULL)) && + (cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr)) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list head/tail check failed") + + if ((cache_ptr->pel_len == 1) && + ((cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr) || (cache_ptr->pel_head_ptr == NULL) || + (cache_ptr->pel_head_ptr->size != cache_ptr->pel_size))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list sanity check failed") + + if ((cache_ptr->pel_len >= 1) && + ((cache_ptr->pel_head_ptr == NULL) || (cache_ptr->pel_head_ptr->prev != NULL) || + (cache_ptr->pel_tail_ptr == NULL) || (cache_ptr->pel_tail_ptr->next != NULL))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list sanity check failed") + + entry_ptr = cache_ptr->pel_head_ptr; + while (entry_ptr != NULL) { + if ((entry_ptr != cache_ptr->pel_head_ptr) && + ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers") + + if ((entry_ptr != cache_ptr->pel_tail_ptr) && + ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers") + + if (!entry_ptr->is_pinned) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list contains unpinned entry") + + if (!(entry_ptr->pinned_from_client || entry_ptr->pinned_from_cache)) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "invalid entry 'pin origin' fields") + + len++; + size += entry_ptr->size; + entry_ptr = entry_ptr->next; + } + + if ((cache_ptr->pel_len != (uint32_t)len) || (cache_ptr->pel_size != size)) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list length/size check failed") + +done: + if (ret_value != SUCCEED) + HDassert(0); + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__validate_pinned_entry_list() */ +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + +/*------------------------------------------------------------------------- + * Function: H5C__validate_protected_entry_list + * + * Purpose: Debugging function that scans the protected entry list for + * errors. + * + * If an error is detected, the function generates a + * diagnostic and returns FAIL. If no error is detected, + * the function returns SUCCEED. + * + * Return: FAIL if error is detected, SUCCEED otherwise. + * + * Programmer: John Mainzer, 4/25/14 + * + *------------------------------------------------------------------------- + */ +#ifdef H5C_DO_EXTREME_SANITY_CHECKS +herr_t +H5C__validate_protected_entry_list(H5C_t *cache_ptr) +{ + int32_t len = 0; + size_t size = 0; + H5C_cache_entry_t *entry_ptr = NULL; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + + if (((cache_ptr->pl_head_ptr == NULL) || (cache_ptr->pl_tail_ptr == NULL)) && + (cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr)) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list head/tail check failed") + + if ((cache_ptr->pl_len == 1) && + ((cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr) || (cache_ptr->pl_head_ptr == NULL) || + (cache_ptr->pl_head_ptr->size != cache_ptr->pl_size))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list sanity check failed") + + if ((cache_ptr->pl_len >= 1) && + ((cache_ptr->pl_head_ptr == NULL) || (cache_ptr->pl_head_ptr->prev != NULL) || + (cache_ptr->pl_tail_ptr == NULL) || (cache_ptr->pl_tail_ptr->next != NULL))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list sanity check failed") + + entry_ptr = cache_ptr->pl_head_ptr; + while (entry_ptr != NULL) { + if ((entry_ptr != cache_ptr->pl_head_ptr) && + ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers") + + if ((entry_ptr != cache_ptr->pl_tail_ptr) && + ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr))) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers") + + if (!entry_ptr->is_protected) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list contains unprotected entry") + + if (entry_ptr->is_read_only && (entry_ptr->ro_ref_count <= 0)) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "read-only entry has non-positive ref count") + + len++; + size += entry_ptr->size; + entry_ptr = entry_ptr->next; + } + + if ((cache_ptr->pl_len != (uint32_t)len) || (cache_ptr->pl_size != size)) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list length/size check failed") + +done: + if (ret_value != SUCCEED) + HDassert(0); + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__validate_protected_entry_list() */ +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + +/*------------------------------------------------------------------------- + * Function: H5C__entry_in_skip_list + * + * Purpose: Debugging function that scans skip list to see if it + * is in present. We need this, as it is possible for + * an entry to be in the skip list twice. + * + * Return: FALSE if the entry is not in the skip list, and TRUE + * if it is. + * + * Programmer: John Mainzer, 11/1/14 + * + *------------------------------------------------------------------------- + */ +#ifdef H5C_DO_SLIST_SANITY_CHECKS +hbool_t +H5C__entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr) +{ + H5SL_node_t *node_ptr; + hbool_t in_slist; + hbool_t ret_value; + + FUNC_ENTER_PACKAGE + + /* Assertions */ + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + HDassert(cache_ptr->slist_ptr); + + node_ptr = H5SL_first(cache_ptr->slist_ptr); + in_slist = FALSE; + while ((node_ptr != NULL) && (!in_slist)) { + H5C_cache_entry_t *entry_ptr; + + entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); + + HDassert(entry_ptr); + HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(entry_ptr->is_dirty); + HDassert(entry_ptr->in_slist); + + if (entry_ptr == target_ptr) + in_slist = TRUE; + else + node_ptr = H5SL_next(node_ptr); + } + + /* Set return value */ + ret_value = in_slist; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__entry_in_skip_list() */ +#endif /* H5C_DO_SLIST_SANITY_CHECKS */ + +/*------------------------------------------------------------------------- + * Function: H5C__image_stats + * + * Purpose: Prints statistics specific to the cache image. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: John Mainzer + * 10/26/15 + * + *------------------------------------------------------------------------- + */ +herr_t +#if H5C_COLLECT_CACHE_STATS +H5C__image_stats(H5C_t *cache_ptr, hbool_t print_header) +#else /* H5C_COLLECT_CACHE_STATS */ +H5C__image_stats(H5C_t *cache_ptr, hbool_t H5_ATTR_UNUSED print_header) +#endif /* H5C_COLLECT_CACHE_STATS */ +{ +#if H5C_COLLECT_CACHE_STATS + int i; + int64_t total_hits = 0; + int64_t total_misses = 0; + double hit_rate; + double prefetch_use_rate; +#endif /* H5C_COLLECT_CACHE_STATS */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + if (!cache_ptr || cache_ptr->magic != H5C__H5C_T_MAGIC) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr") + +#if H5C_COLLECT_CACHE_STATS + for (i = 0; i <= cache_ptr->max_type_id; i++) { + total_hits += cache_ptr->hits[i]; + total_misses += cache_ptr->misses[i]; + } /* end for */ + + if ((total_hits > 0) || (total_misses > 0)) + hit_rate = 100.0 * ((double)(total_hits)) / ((double)(total_hits + total_misses)); + else + hit_rate = 0.0; + + if (cache_ptr->prefetches > 0) + prefetch_use_rate = 100.0 * ((double)(cache_ptr->prefetch_hits)) / ((double)(cache_ptr->prefetches)); + else + prefetch_use_rate = 0.0; + + if (print_header) { + HDfprintf(stdout, "\nhit prefetches prefetch image pf hit\n"); + HDfprintf(stdout, "rate: total: dirty: hits: flshs: evct: size: rate:\n"); + } /* end if */ + + HDfprintf(stdout, "%3.1lf %5lld %5lld %5lld %5lld %5lld %5lld %3.1lf\n", hit_rate, + (long long)(cache_ptr->prefetches), (long long)(cache_ptr->dirty_prefetches), + (long long)(cache_ptr->prefetch_hits), + (long long)(cache_ptr->flushes[H5AC_PREFETCHED_ENTRY_ID]), + (long long)(cache_ptr->evictions[H5AC_PREFETCHED_ENTRY_ID]), + (long long)(cache_ptr->last_image_size), prefetch_use_rate); +#endif /* H5C_COLLECT_CACHE_STATS */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__image_stats() */ diff --git a/src/H5Centry.c b/src/H5Centry.c new file mode 100644 index 00000000000..e5735614a99 --- /dev/null +++ b/src/H5Centry.c @@ -0,0 +1,4363 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*------------------------------------------------------------------------- + * + * Created: H5Centry.c + * May 8 2023 + * Quincey Koziol + * + * Purpose: Routines which operate on cache entries. + * + *------------------------------------------------------------------------- + */ + +/****************/ +/* Module Setup */ +/****************/ + +#include "H5Cmodule.h" /* This source code file is part of the H5C module */ +#define H5F_FRIEND /* suppress error about including H5Fpkg */ + +/***********/ +/* Headers */ +/***********/ +#include "H5private.h" /* Generic Functions */ +#include "H5Cpkg.h" /* Cache */ +#include "H5CXprivate.h" /* API Contexts */ +#include "H5Eprivate.h" /* Error handling */ +#include "H5Fpkg.h" /* Files */ +#include "H5MFprivate.h" /* File memory management */ +#include "H5MMprivate.h" /* Memory management */ + +/****************/ +/* Local Macros */ +/****************/ +#if H5C_DO_MEMORY_SANITY_CHECKS +#define H5C_IMAGE_EXTRA_SPACE 8 +#define H5C_IMAGE_SANITY_VALUE "DeadBeef" +#else /* H5C_DO_MEMORY_SANITY_CHECKS */ +#define H5C_IMAGE_EXTRA_SPACE 0 +#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ + +/******************/ +/* Local Typedefs */ +/******************/ + +/* Alias for pointer to cache entry, for use when allocating sequences of them */ +typedef H5C_cache_entry_t *H5C_cache_entry_ptr_t; + +/********************/ +/* Local Prototypes */ +/********************/ +static herr_t H5C__pin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr); +static herr_t H5C__unpin_entry_real(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp); +static herr_t H5C__unpin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp); +static herr_t H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr); +static herr_t H5C__verify_len_eoa(H5F_t *f, const H5C_class_t *type, haddr_t addr, size_t *len, + hbool_t actual); +static void *H5C__load_entry(H5F_t *f, +#ifdef H5_HAVE_PARALLEL + hbool_t coll_access, +#endif /* H5_HAVE_PARALLEL */ + const H5C_class_t *type, haddr_t addr, void *udata); +static herr_t H5C__mark_flush_dep_dirty(H5C_cache_entry_t *entry); +static herr_t H5C__mark_flush_dep_clean(H5C_cache_entry_t *entry); +static herr_t H5C__mark_flush_dep_serialized(H5C_cache_entry_t *entry); +static herr_t H5C__mark_flush_dep_unserialized(H5C_cache_entry_t *entry); +#ifndef NDEBUG +static void H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t *entry, + const H5C_cache_entry_t *base_entry); +#endif +static herr_t H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, H5C_cache_entry_t *pf_entry_ptr, + H5C_cache_entry_t **fd_children); +static herr_t H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t **entry_ptr_ptr, + const H5C_class_t *type, haddr_t addr, void *udata); + +/*********************/ +/* Package Variables */ +/*********************/ + +/*****************************/ +/* Library Private Variables */ +/*****************************/ + +/*******************/ +/* Local Variables */ +/*******************/ + +/* Declare a free list to manage arrays of cache entries */ +H5FL_SEQ_DEFINE_STATIC(H5C_cache_entry_ptr_t); + +/*------------------------------------------------------------------------- + * Function: H5C__pin_entry_from_client() + * + * Purpose: Internal routine to pin a cache entry from a client action. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * 3/26/09 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__pin_entry_from_client(H5C_t +#if !H5C_COLLECT_CACHE_STATS + H5_ATTR_UNUSED +#endif + *cache_ptr, + H5C_cache_entry_t *entry_ptr) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity checks */ + HDassert(cache_ptr); + HDassert(entry_ptr); + HDassert(entry_ptr->is_protected); + + /* Check if the entry is already pinned */ + if (entry_ptr->is_pinned) { + /* Check if the entry was pinned through an explicit pin from a client */ + if (entry_ptr->pinned_from_client) + HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "entry is already pinned") + } /* end if */ + else { + entry_ptr->is_pinned = TRUE; + + H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) + } /* end else */ + + /* Mark that the entry was pinned through an explicit pin from a client */ + entry_ptr->pinned_from_client = TRUE; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__pin_entry_from_client() */ + +/*------------------------------------------------------------------------- + * Function: H5C__unpin_entry_real() + * + * Purpose: Internal routine to unpin a cache entry. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * 1/6/18 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__unpin_entry_real(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp) +{ + herr_t ret_value = SUCCEED; /* Return value */ + +#ifdef H5C_DO_SANITY_CHECKS + FUNC_ENTER_PACKAGE +#else + FUNC_ENTER_PACKAGE_NOERR +#endif + + /* Sanity checking */ + HDassert(cache_ptr); + HDassert(entry_ptr); + HDassert(entry_ptr->is_pinned); + + /* If requested, update the replacement policy if the entry is not protected */ + if (update_rp && !entry_ptr->is_protected) + H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, FAIL) + + /* Unpin the entry now */ + entry_ptr->is_pinned = FALSE; + + /* Update the stats for an unpin operation */ + H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) + +#ifdef H5C_DO_SANITY_CHECKS +done: +#endif + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__unpin_entry_real() */ + +/*------------------------------------------------------------------------- + * Function: H5C__unpin_entry_from_client() + * + * Purpose: Internal routine to unpin a cache entry from a client action. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * 3/24/09 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__unpin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity checking */ + HDassert(cache_ptr); + HDassert(entry_ptr); + + /* Error checking (should be sanity checks?) */ + if (!entry_ptr->is_pinned) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "entry isn't pinned") + if (!entry_ptr->pinned_from_client) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "entry wasn't pinned by cache client") + + /* Check if the entry is not pinned from a flush dependency */ + if (!entry_ptr->pinned_from_cache) + if (H5C__unpin_entry_real(cache_ptr, entry_ptr, update_rp) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "can't unpin entry") + + /* Mark the entry as explicitly unpinned by the client */ + entry_ptr->pinned_from_client = FALSE; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__unpin_entry_from_client() */ + +/*------------------------------------------------------------------------- + * Function: H5C__generate_image + * + * Purpose: Serialize an entry and generate its image. + * + * Note: This may cause the entry to be re-sized and/or moved in + * the cache. + * + * As we will not update the metadata cache's data structures + * until we we finish the write, we must touch up these + * data structures for size and location changes even if we + * are about to delete the entry from the cache (i.e. on a + * flush destroy). + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Mohamad Chaarawi + * 2/10/16 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr) +{ + haddr_t new_addr = HADDR_UNDEF; + haddr_t old_addr = HADDR_UNDEF; + size_t new_len = 0; + unsigned serialize_flags = H5C__SERIALIZE_NO_FLAGS_SET; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE + + /* Sanity check */ + HDassert(f); + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + HDassert(entry_ptr); + HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(!entry_ptr->image_up_to_date); + HDassert(entry_ptr->is_dirty); + HDassert(!entry_ptr->is_protected); + HDassert(entry_ptr->type); + + /* make note of the entry's current address */ + old_addr = entry_ptr->addr; + + /* Call client's pre-serialize callback, if there's one */ + if ((entry_ptr->type->pre_serialize) && + ((entry_ptr->type->pre_serialize)(f, (void *)entry_ptr, entry_ptr->addr, entry_ptr->size, &new_addr, + &new_len, &serialize_flags) < 0)) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to pre-serialize entry") + + /* Check for any flags set in the pre-serialize callback */ + if (serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) { + /* Check for unexpected flags from serialize callback */ + if (serialize_flags & ~(H5C__SERIALIZE_RESIZED_FLAG | H5C__SERIALIZE_MOVED_FLAG)) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unknown serialize flag(s)") + +#ifdef H5_HAVE_PARALLEL + /* In the parallel case, resizes and moves in + * the serialize operation can cause problems. + * If they occur, scream and die. + * + * At present, in the parallel case, the aux_ptr + * will only be set if there is more than one + * process. Thus we can use this to detect + * the parallel case. + * + * This works for now, but if we start using the + * aux_ptr for other purposes, we will have to + * change this test accordingly. + * + * NB: While this test detects entryies that attempt + * to resize or move themselves during a flush + * in the parallel case, it will not detect an + * entry that dirties, resizes, and/or moves + * other entries during its flush. + */ + if (cache_ptr->aux_ptr != NULL) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occurred in parallel case") +#endif + + /* If required, resize the buffer and update the entry and the cache + * data structures + */ + if (serialize_flags & H5C__SERIALIZE_RESIZED_FLAG) { + /* Sanity check */ + HDassert(new_len > 0); + + /* Allocate a new image buffer */ + if (NULL == + (entry_ptr->image_ptr = H5MM_realloc(entry_ptr->image_ptr, new_len + H5C_IMAGE_EXTRA_SPACE))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, + "memory allocation failed for on disk image buffer") + +#if H5C_DO_MEMORY_SANITY_CHECKS + H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + new_len, H5C_IMAGE_SANITY_VALUE, + H5C_IMAGE_EXTRA_SPACE); +#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ + + /* Update statistics for resizing the entry */ + H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_len); + + /* Update the hash table for the size change */ + H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len, entry_ptr, + !entry_ptr->is_dirty, FAIL); + + /* The entry can't be protected since we are in the process of + * flushing it. Thus we must update the replacement policy data + * structures for the size change. The macro deals with the pinned + * case. + */ + H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_len, FAIL); + + /* As we haven't updated the cache data structures for + * for the flush or flush destroy yet, the entry should + * be in the slist if the slist is enabled. Since + * H5C__UPDATE_SLIST_FOR_SIZE_CHANGE() is a no-op if the + * slist is enabled, call it un-conditionally. + */ + HDassert(entry_ptr->is_dirty); + HDassert((entry_ptr->in_slist) || (!cache_ptr->slist_enabled)); + + H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len); + + /* Finally, update the entry for its new size */ + entry_ptr->size = new_len; + } /* end if */ + + /* If required, udate the entry and the cache data structures + * for a move + */ + if (serialize_flags & H5C__SERIALIZE_MOVED_FLAG) { + /* Update stats and entries relocated counter */ + H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr) + + /* We must update cache data structures for the change in address */ + if (entry_ptr->addr == old_addr) { + /* Delete the entry from the hash table and the slist */ + H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL); + H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE, FAIL) + + /* Update the entry for its new address */ + entry_ptr->addr = new_addr; + + /* And then reinsert in the index and slist */ + H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL); + H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL); + } /* end if */ + else /* move is already done for us -- just do sanity checks */ + HDassert(entry_ptr->addr == new_addr); + } /* end if */ + } /* end if(serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) */ + + /* Serialize object into buffer */ + if (entry_ptr->type->serialize(f, entry_ptr->image_ptr, entry_ptr->size, (void *)entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to serialize entry") + +#if H5C_DO_MEMORY_SANITY_CHECKS + HDassert(0 == HDmemcmp(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE, + H5C_IMAGE_EXTRA_SPACE)); +#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ + + entry_ptr->image_up_to_date = TRUE; + + /* Propagate the fact that the entry is serialized up the + * flush dependency chain if appropriate. Since the image must + * have been out of date for this function to have been called + * (see assertion on entry), no need to check that -- only check + * for flush dependency parents. + */ + HDassert(entry_ptr->flush_dep_nunser_children == 0); + + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_serialized(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "Can't propagate serialization status to fd parents") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__generate_image */ + +/*------------------------------------------------------------------------- + * Function: H5C__flush_single_entry + * + * Purpose: Flush or clear (and evict if requested) the cache entry + * with the specified address and type. If the type is NULL, + * any unprotected entry at the specified address will be + * flushed (and possibly evicted). + * + * Attempts to flush a protected entry will result in an + * error. + * + * If the H5C__FLUSH_INVALIDATE_FLAG flag is set, the entry will + * be cleared and not flushed, and the call can't be part of a + * sequence of flushes. + * + * The function does nothing silently if there is no entry + * at the supplied address, or if the entry found has the + * wrong type. + * + * Return: Non-negative on success/Negative on failure or if there was + * an attempt to flush a protected item. + * + * Programmer: John Mainzer, 5/5/04 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) +{ + H5C_t *cache_ptr; /* Cache for file */ + hbool_t destroy; /* external flag */ + hbool_t clear_only; /* external flag */ + hbool_t free_file_space; /* external flag */ + hbool_t take_ownership; /* external flag */ + hbool_t del_from_slist_on_destroy; /* external flag */ + hbool_t during_flush; /* external flag */ + hbool_t write_entry; /* internal flag */ + hbool_t destroy_entry; /* internal flag */ + hbool_t generate_image; /* internal flag */ + hbool_t update_page_buffer; /* internal flag */ + hbool_t was_dirty; + hbool_t suppress_image_entry_writes = FALSE; + hbool_t suppress_image_entry_frees = FALSE; + haddr_t entry_addr = HADDR_UNDEF; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + HDassert(f); + cache_ptr = f->shared->cache; + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + HDassert(entry_ptr); + HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(entry_ptr->ring != H5C_RING_UNDEFINED); + HDassert(entry_ptr->type); + + /* setup external flags from the flags parameter */ + destroy = ((flags & H5C__FLUSH_INVALIDATE_FLAG) != 0); + clear_only = ((flags & H5C__FLUSH_CLEAR_ONLY_FLAG) != 0); + free_file_space = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0); + take_ownership = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0); + del_from_slist_on_destroy = ((flags & H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) != 0); + during_flush = ((flags & H5C__DURING_FLUSH_FLAG) != 0); + generate_image = ((flags & H5C__GENERATE_IMAGE_FLAG) != 0); + update_page_buffer = ((flags & H5C__UPDATE_PAGE_BUFFER_FLAG) != 0); + + /* Set the flag for destroying the entry, based on the 'take ownership' + * and 'destroy' flags + */ + if (take_ownership) + destroy_entry = FALSE; + else + destroy_entry = destroy; + + /* we will write the entry to disk if it exists, is dirty, and if the + * clear only flag is not set. + */ + if (entry_ptr->is_dirty && !clear_only) + write_entry = TRUE; + else + write_entry = FALSE; + + /* if we have received close warning, and we have been instructed to + * generate a metadata cache image, and we have actually constructed + * the entry images, set suppress_image_entry_frees to TRUE. + * + * Set suppress_image_entry_writes to TRUE if indicated by the + * image_ctl flags. + */ + if (cache_ptr->close_warning_received && cache_ptr->image_ctl.generate_image && + cache_ptr->num_entries_in_image > 0 && cache_ptr->image_entries != NULL) { + + /* Sanity checks */ + HDassert(entry_ptr->image_up_to_date || !(entry_ptr->include_in_image)); + HDassert(entry_ptr->image_ptr || !(entry_ptr->include_in_image)); + HDassert((!clear_only) || !(entry_ptr->include_in_image)); + HDassert((!take_ownership) || !(entry_ptr->include_in_image)); + HDassert((!free_file_space) || !(entry_ptr->include_in_image)); + + suppress_image_entry_frees = TRUE; + + if (cache_ptr->image_ctl.flags & H5C_CI__SUPRESS_ENTRY_WRITES) + suppress_image_entry_writes = TRUE; + } /* end if */ + + /* run initial sanity checks */ +#ifdef H5C_DO_SANITY_CHECKS + if (cache_ptr->slist_enabled) { + if (entry_ptr->in_slist) { + HDassert(entry_ptr->is_dirty); + if (entry_ptr->flush_marker && !entry_ptr->is_dirty) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry in slist failed sanity checks") + } /* end if */ + else { + HDassert(!entry_ptr->is_dirty); + HDassert(!entry_ptr->flush_marker); + if (entry_ptr->is_dirty || entry_ptr->flush_marker) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry failed sanity checks") + } /* end else */ + } + else { /* slist is disabled */ + HDassert(!entry_ptr->in_slist); + if (!entry_ptr->is_dirty) + if (entry_ptr->flush_marker) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flush marked clean entry?") + } +#endif /* H5C_DO_SANITY_CHECKS */ + + if (entry_ptr->is_protected) + /* Attempt to flush a protected entry -- scream and die. */ + HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, "Attempt to flush a protected entry") + + /* Set entry_ptr->flush_in_progress = TRUE and set + * entry_ptr->flush_marker = FALSE + * + * We will set flush_in_progress back to FALSE at the end if the + * entry still exists at that point. + */ + entry_ptr->flush_in_progress = TRUE; + entry_ptr->flush_marker = FALSE; + + /* Preserve current dirty state for later */ + was_dirty = entry_ptr->is_dirty; + + /* The entry is dirty, and we are doing a flush, a flush destroy or have + * been requested to generate an image. In those cases, serialize the + * entry. + */ + if (write_entry || generate_image) { + HDassert(entry_ptr->is_dirty); + if (NULL == entry_ptr->image_ptr) { + if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, + "memory allocation failed for on disk image buffer") + +#if H5C_DO_MEMORY_SANITY_CHECKS + H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE, + H5C_IMAGE_EXTRA_SPACE); +#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ + + } /* end if */ + + if (!entry_ptr->image_up_to_date) { + /* Sanity check */ + HDassert(!entry_ptr->prefetched); + + /* Generate the entry's image */ + if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't generate entry's image") + } /* end if ( ! (entry_ptr->image_up_to_date) ) */ + } /* end if */ + + /* Finally, write the image to disk. + * + * Note that if the H5AC__CLASS_SKIP_WRITES flag is set in the + * in the entry's type, we silently skip the write. This + * flag should only be used in test code. + */ + if (write_entry) { + HDassert(entry_ptr->is_dirty); + +#ifdef H5C_DO_SANITY_CHECKS + if (cache_ptr->check_write_permitted && !cache_ptr->write_permitted) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Write when writes are always forbidden!?!?!") +#endif /* H5C_DO_SANITY_CHECKS */ + + /* Write the image to disk unless the write is suppressed. + * + * This happens if both suppress_image_entry_writes and + * entry_ptr->include_in_image are TRUE, or if the + * H5AC__CLASS_SKIP_WRITES is set in the entry's type. This + * flag should only be used in test code + */ + if ((!suppress_image_entry_writes || !entry_ptr->include_in_image) && + ((entry_ptr->type->flags & H5C__CLASS_SKIP_WRITES) == 0)) { + H5FD_mem_t mem_type = H5FD_MEM_DEFAULT; + +#ifdef H5_HAVE_PARALLEL + if (cache_ptr->coll_write_list) { + if (H5SL_insert(cache_ptr->coll_write_list, entry_ptr, &entry_ptr->addr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "unable to insert skip list item") + } /* end if */ + else { +#endif /* H5_HAVE_PARALLEL */ + if (entry_ptr->prefetched) { + HDassert(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID); + mem_type = cache_ptr->class_table_ptr[entry_ptr->prefetch_type_id]->mem_type; + } /* end if */ + else + mem_type = entry_ptr->type->mem_type; + + if (H5F_block_write(f, mem_type, entry_ptr->addr, entry_ptr->size, entry_ptr->image_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't write image to file") +#ifdef H5_HAVE_PARALLEL + } +#endif /* H5_HAVE_PARALLEL */ + } /* end if */ + + /* if the entry has a notify callback, notify it that we have + * just flushed the entry. + */ + if (entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_FLUSH, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client of entry flush") + } /* if ( write_entry ) */ + + /* At this point, all pre-serialize and serialize calls have been + * made if it was appropriate to make them. Similarly, the entry + * has been written to disk if desired. + * + * Thus it is now safe to update the cache data structures for the + * flush. + */ + + /* start by updating the statistics */ + if (clear_only) { + /* only log a clear if the entry was dirty */ + if (was_dirty) + H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) + } + else if (write_entry) { + HDassert(was_dirty); + + /* only log a flush if we actually wrote to disk */ + H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) + } /* end else if */ + + /* Note that the algorithm below is (very) similar to the set of operations + * in H5C_remove_entry() and should be kept in sync with changes + * to that code. - QAK, 2016/11/30 + */ + + /* Update the cache internal data structures. */ + if (destroy) { + /* Sanity checks */ + if (take_ownership) + HDassert(!destroy_entry); + else + HDassert(destroy_entry); + + HDassert(!entry_ptr->is_pinned); + + /* Update stats, while entry is still in the cache */ + H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership) + + /* If the entry's type has a 'notify' callback and the entry is about + * to be removed from the cache, send a 'before eviction' notice while + * the entry is still fully integrated in the cache. + */ + if (entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict") + + /* Update the cache internal data structures as appropriate + * for a destroy. Specifically: + * + * 1) Delete it from the index + * + * 2) Delete it from the skip list if requested. + * + * 3) Delete it from the collective read access list. + * + * 4) Update the replacement policy for eviction + * + * 5) Remove it from the tag list for this object + * + * Finally, if the destroy_entry flag is set, discard the + * entry. + */ + H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL) + + if (entry_ptr->in_slist && del_from_slist_on_destroy) + H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush, FAIL) + +#ifdef H5_HAVE_PARALLEL + /* Check for collective read access flag */ + if (entry_ptr->coll_access) { + entry_ptr->coll_access = FALSE; + H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL) + } /* end if */ +#endif /* H5_HAVE_PARALLEL */ + + H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, FAIL) + + /* Remove entry from tag list */ + if (H5C__untag_entry(cache_ptr, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list") + + /* verify that the entry is no longer part of any flush dependencies */ + HDassert(entry_ptr->flush_dep_nparents == 0); + HDassert(entry_ptr->flush_dep_nchildren == 0); + } /* end if */ + else { + HDassert(clear_only || write_entry); + HDassert(entry_ptr->is_dirty); + HDassert((!cache_ptr->slist_enabled) || (entry_ptr->in_slist)); + + /* We are either doing a flush or a clear. + * + * A clear and a flush are the same from the point of + * view of the replacement policy and the slist. + * Hence no differentiation between them. + */ + H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, FAIL) + H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush, FAIL) + + /* mark the entry as clean and update the index for + * entry clean. Also, call the clear callback + * if defined. + */ + entry_ptr->is_dirty = FALSE; + + H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr, FAIL); + + /* Check for entry changing status and do notifications, etc. */ + if (was_dirty) { + /* If the entry's type has a 'notify' callback send a + * 'entry cleaned' notice now that the entry is fully + * integrated into the cache. + */ + if (entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "can't notify client about entry dirty flag cleared") + + /* Propagate the clean flag up the flush dependency chain + * if appropriate + */ + if (entry_ptr->flush_dep_ndirty_children != 0) + HDassert(entry_ptr->flush_dep_ndirty_children == 0); + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_clean(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Can't propagate flush dep clean flag") + } /* end if */ + } /* end else */ + + /* reset the flush_in progress flag */ + entry_ptr->flush_in_progress = FALSE; + + /* capture the cache entry address for the log_flush call at the + * end before the entry_ptr gets freed + */ + entry_addr = entry_ptr->addr; + + /* Internal cache data structures should now be up to date, and + * consistent with the status of the entry. + * + * Now discard the entry if appropriate. + */ + if (destroy) { + /* Sanity check */ + HDassert(0 == entry_ptr->flush_dep_nparents); + + /* if both suppress_image_entry_frees and entry_ptr->include_in_image + * are true, simply set entry_ptr->image_ptr to NULL, as we have + * another pointer to the buffer in an instance of H5C_image_entry_t + * in cache_ptr->image_entries. + * + * Otherwise, free the buffer if it exists. + */ + if (suppress_image_entry_frees && entry_ptr->include_in_image) + entry_ptr->image_ptr = NULL; + else if (entry_ptr->image_ptr != NULL) + entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr); + + /* If the entry is not a prefetched entry, verify that the flush + * dependency parents addresses array has been transferred. + * + * If the entry is prefetched, the free_isr routine will dispose of + * the flush dependency parents addresses array if necessary. + */ + if (!entry_ptr->prefetched) { + HDassert(0 == entry_ptr->fd_parent_count); + HDassert(NULL == entry_ptr->fd_parent_addrs); + } /* end if */ + + /* Check whether we should free the space in the file that + * the entry occupies + */ + if (free_file_space) { + hsize_t fsf_size; + + /* Sanity checks */ + HDassert(H5F_addr_defined(entry_ptr->addr)); + HDassert(!H5F_IS_TMP_ADDR(f, entry_ptr->addr)); +#ifndef NDEBUG + { + size_t curr_len; + + /* Get the actual image size for the thing again */ + entry_ptr->type->image_len((void *)entry_ptr, &curr_len); + HDassert(curr_len == entry_ptr->size); + } +#endif + + /* If the file space free size callback is defined, use + * it to get the size of the block of file space to free. + * Otherwise use entry_ptr->size. + */ + if (entry_ptr->type->fsf_size) { + if ((entry_ptr->type->fsf_size)((void *)entry_ptr, &fsf_size) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to get file space free size") + } /* end if */ + else /* no file space free size callback -- use entry size */ + fsf_size = entry_ptr->size; + + /* Release the space on disk */ + if (H5MF_xfree(f, entry_ptr->type->mem_type, entry_ptr->addr, fsf_size) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to free file space for cache entry") + } /* end if ( free_file_space ) */ + + /* Reset the pointer to the cache the entry is within. -QAK */ + entry_ptr->cache_ptr = NULL; + + /* increment entries_removed_counter and set + * last_entry_removed_ptr. As we are likely abuut to + * free the entry, recall that last_entry_removed_ptr + * must NEVER be dereferenced. + * + * Recall that these fields are maintained to allow functions + * that perform scans of lists of entries to detect the + * unexpected removal of entries (via expunge, eviction, + * or take ownership at present), so that they can re-start + * their scans if necessary. + * + * Also check if the entry we are watching for removal is being + * removed (usually the 'next' entry for an iteration) and reset + * it to indicate that it was removed. + */ + cache_ptr->entries_removed_counter++; + cache_ptr->last_entry_removed_ptr = entry_ptr; + + if (entry_ptr == cache_ptr->entry_watched_for_removal) + cache_ptr->entry_watched_for_removal = NULL; + + /* Check for actually destroying the entry in memory */ + /* (As opposed to taking ownership of it) */ + if (destroy_entry) { + if (entry_ptr->is_dirty) { + /* Reset dirty flag */ + entry_ptr->is_dirty = FALSE; + + /* If the entry's type has a 'notify' callback send a + * 'entry cleaned' notice now that the entry is fully + * integrated into the cache. + */ + if (entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "can't notify client about entry dirty flag cleared") + } /* end if */ + + /* we are about to discard the in core representation -- + * set the magic field to bad magic so we can detect a + * freed entry if we see one. + */ + entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC; + + /* verify that the image has been freed */ + HDassert(entry_ptr->image_ptr == NULL); + + if (entry_ptr->type->free_icr((void *)entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed") + } /* end if */ + else { + HDassert(take_ownership); + + /* Client is taking ownership of the entry. Set bad magic here too + * so the cache will choke unless the entry is re-inserted properly + */ + entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC; + } /* end else */ + } /* if (destroy) */ + + /* Check if we have to update the page buffer with cleared entries + * so it doesn't go out of date + */ + if (update_page_buffer) { + /* Sanity check */ + HDassert(!destroy); + HDassert(entry_ptr->image_ptr); + + if (f->shared->page_buf && (f->shared->page_buf->page_size >= entry_ptr->size)) + if (H5PB_update_entry(f->shared->page_buf, entry_ptr->addr, entry_ptr->size, + entry_ptr->image_ptr) > 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Failed to update PB with metadata cache") + } /* end if */ + + if (cache_ptr->log_flush) + if ((cache_ptr->log_flush)(cache_ptr, entry_addr, was_dirty, flags) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "log_flush callback failed") + +done: + HDassert((ret_value != SUCCEED) || (destroy_entry) || (!entry_ptr->flush_in_progress)); + HDassert((ret_value != SUCCEED) || (destroy_entry) || (take_ownership) || (!entry_ptr->is_dirty)); + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__flush_single_entry() */ + +/*------------------------------------------------------------------------- + * Function: H5C__verify_len_eoa + * + * Purpose: Verify that 'len' does not exceed eoa when 'actual' is + * false i.e. 'len" is the initial speculative length from + * get_load_size callback with null image pointer. + * If exceed, adjust 'len' accordingly. + * + * Verify that 'len' should not exceed eoa when 'actual' is + * true i.e. 'len' is the actual length from get_load_size + * callback with non-null image pointer. + * If exceed, return error. + * + * Return: FAIL if error is detected, SUCCEED otherwise. + * + * Programmer: Vailin Choi + * 9/6/15 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__verify_len_eoa(H5F_t *f, const H5C_class_t *type, haddr_t addr, size_t *len, hbool_t actual) +{ + H5FD_mem_t cooked_type; /* Modified type, accounting for switching global heaps */ + haddr_t eoa; /* End-of-allocation in the file */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* if type == H5FD_MEM_GHEAP, H5F_block_read() forces + * type to H5FD_MEM_DRAW via its call to H5F__accum_read(). + * Thus we do the same for purposes of computing the EOA + * for sanity checks. + */ + cooked_type = (type->mem_type == H5FD_MEM_GHEAP) ? H5FD_MEM_DRAW : type->mem_type; + + /* Get the file's end-of-allocation value */ + eoa = H5F_get_eoa(f, cooked_type); + if (!H5F_addr_defined(eoa)) + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid EOA address for file") + + /* Check for bad address in general */ + if (H5F_addr_gt(addr, eoa)) + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "address of object past end of allocation") + + /* Check if the amount of data to read will be past the EOA */ + if (H5F_addr_gt((addr + *len), eoa)) { + if (actual) + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "actual len exceeds EOA") + else + /* Trim down the length of the metadata */ + *len = (size_t)(eoa - addr); + } /* end if */ + + if (*len <= 0) + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "len not positive after adjustment for EOA") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__verify_len_eoa() */ + +/*------------------------------------------------------------------------- + * Function: H5C__load_entry + * + * Purpose: Attempt to load the entry at the specified disk address + * and with the specified type into memory. If successful. + * return the in memory address of the entry. Return NULL + * on failure. + * + * Note that this function simply loads the entry into + * core. It does not insert it into the cache. + * + * Return: Non-NULL on success / NULL on failure. + * + * Programmer: John Mainzer, 5/18/04 + * + *------------------------------------------------------------------------- + */ +void * +H5C__load_entry(H5F_t *f, +#ifdef H5_HAVE_PARALLEL + hbool_t coll_access, +#endif /* H5_HAVE_PARALLEL */ + const H5C_class_t *type, haddr_t addr, void *udata) +{ + hbool_t dirty = FALSE; /* Flag indicating whether thing was dirtied during deserialize */ + uint8_t *image = NULL; /* Buffer for disk image */ + void *thing = NULL; /* Pointer to thing loaded */ + H5C_cache_entry_t *entry = NULL; /* Alias for thing loaded, as cache entry */ + size_t len; /* Size of image in file */ +#ifdef H5_HAVE_PARALLEL + int mpi_rank = 0; /* MPI process rank */ + MPI_Comm comm = MPI_COMM_NULL; /* File MPI Communicator */ + int mpi_code; /* MPI error code */ +#endif /* H5_HAVE_PARALLEL */ + void *ret_value = NULL; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity checks */ + HDassert(f); + HDassert(f->shared); + HDassert(f->shared->cache); + HDassert(type); + HDassert(H5F_addr_defined(addr)); + HDassert(type->get_initial_load_size); + if (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) + HDassert(type->get_final_load_size); + else + HDassert(NULL == type->get_final_load_size); + HDassert(type->deserialize); + + /* Can't see how skip reads could be usefully combined with + * the speculative read flag. Hence disallow. + */ + HDassert(!((type->flags & H5C__CLASS_SKIP_READS) && (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG))); + + /* Call the get_initial_load_size callback, to retrieve the initial size of image */ + if (type->get_initial_load_size(udata, &len) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't retrieve image size") + HDassert(len > 0); + + /* Check for possible speculative read off the end of the file */ + if (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) + if (H5C__verify_len_eoa(f, type, addr, &len, FALSE) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid len with respect to EOA") + + /* Allocate the buffer for reading the on-disk entry image */ + if (NULL == (image = (uint8_t *)H5MM_malloc(len + H5C_IMAGE_EXTRA_SPACE))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for on disk image buffer") +#if H5C_DO_MEMORY_SANITY_CHECKS + H5MM_memcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE); +#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ + +#ifdef H5_HAVE_PARALLEL + if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) { + if ((mpi_rank = H5F_mpi_get_rank(f)) < 0) + HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "Can't get MPI rank") + if ((comm = H5F_mpi_get_comm(f)) == MPI_COMM_NULL) + HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "get_comm request failed") + } /* end if */ +#endif /* H5_HAVE_PARALLEL */ + + /* Get the on-disk entry image */ + if (0 == (type->flags & H5C__CLASS_SKIP_READS)) { + unsigned tries, max_tries; /* The # of read attempts */ + unsigned retries; /* The # of retries */ + htri_t chk_ret; /* return from verify_chksum callback */ + size_t actual_len = len; /* The actual length, after speculative reads have been resolved */ + uint64_t nanosec = 1; /* # of nanoseconds to sleep between retries */ + void *new_image; /* Pointer to image */ + hbool_t len_changed = TRUE; /* Whether to re-check speculative entries */ + + /* Get the # of read attempts */ + max_tries = tries = H5F_GET_READ_ATTEMPTS(f); + + /* + * This do/while loop performs the following till the metadata checksum + * is correct or the file's number of allowed read attempts are reached. + * --read the metadata + * --determine the actual size of the metadata + * --perform checksum verification + */ + do { + if (actual_len != len) { + if (NULL == (new_image = H5MM_realloc(image, len + H5C_IMAGE_EXTRA_SPACE))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()") + image = (uint8_t *)new_image; +#if H5C_DO_MEMORY_SANITY_CHECKS + H5MM_memcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE); +#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ + } /* end if */ + +#ifdef H5_HAVE_PARALLEL + if (!coll_access || 0 == mpi_rank) { +#endif /* H5_HAVE_PARALLEL */ + if (H5F_block_read(f, type->mem_type, addr, len, image) < 0) { +#ifdef H5_HAVE_PARALLEL + if (coll_access) { + /* Push an error, but still participate in following MPI_Bcast */ + HDmemset(image, 0, len); + HDONE_ERROR(H5E_CACHE, H5E_READERROR, NULL, "Can't read image*") + } + else +#endif + HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "Can't read image*") + } + +#ifdef H5_HAVE_PARALLEL + } /* end if */ + /* if the collective metadata read optimization is turned on, + * bcast the metadata read from process 0 to all ranks in the file + * communicator + */ + if (coll_access) { + int buf_size; + + H5_CHECKED_ASSIGN(buf_size, int, len, size_t); + if (MPI_SUCCESS != (mpi_code = MPI_Bcast(image, buf_size, MPI_BYTE, 0, comm))) + HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code) + } /* end if */ +#endif /* H5_HAVE_PARALLEL */ + + /* If the entry could be read speculatively and the length is still + * changing, check for updating the actual size + */ + if ((type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) && len_changed) { + /* Retrieve the actual length */ + actual_len = len; + if (type->get_final_load_size(image, len, udata, &actual_len) < 0) + continue; /* Transfer control to while() and count towards retries */ + + /* Check for the length changing */ + if (actual_len != len) { + /* Verify that the length isn't past the EOA for the file */ + if (H5C__verify_len_eoa(f, type, addr, &actual_len, TRUE) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "actual_len exceeds EOA") + + /* Expand buffer to new size */ + if (NULL == (new_image = H5MM_realloc(image, actual_len + H5C_IMAGE_EXTRA_SPACE))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()") + image = (uint8_t *)new_image; +#if H5C_DO_MEMORY_SANITY_CHECKS + H5MM_memcpy(image + actual_len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE); +#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ + + if (actual_len > len) { +#ifdef H5_HAVE_PARALLEL + if (!coll_access || 0 == mpi_rank) { +#endif /* H5_HAVE_PARALLEL */ + /* If the thing's image needs to be bigger for a speculatively + * loaded thing, go get the on-disk image again (the extra portion). + */ + if (H5F_block_read(f, type->mem_type, addr + len, actual_len - len, image + len) < + 0) { +#ifdef H5_HAVE_PARALLEL + if (coll_access) { + /* Push an error, but still participate in following MPI_Bcast */ + HDmemset(image + len, 0, actual_len - len); + HDONE_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't read image") + } + else +#endif + HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't read image") + } + +#ifdef H5_HAVE_PARALLEL + } + /* If the collective metadata read optimization is turned on, + * Bcast the metadata read from process 0 to all ranks in the file + * communicator */ + if (coll_access) { + int buf_size; + + H5_CHECKED_ASSIGN(buf_size, int, actual_len - len, size_t); + if (MPI_SUCCESS != + (mpi_code = MPI_Bcast(image + len, buf_size, MPI_BYTE, 0, comm))) + HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code) + } /* end if */ +#endif /* H5_HAVE_PARALLEL */ + } /* end if */ + } /* end if (actual_len != len) */ + else { + /* The length has stabilized */ + len_changed = FALSE; + + /* Set the final length */ + len = actual_len; + } /* else */ + } /* end if */ + + /* If there's no way to verify the checksum for a piece of metadata + * (usually because there's no checksum in the file), leave now + */ + if (type->verify_chksum == NULL) + break; + + /* Verify the checksum for the metadata image */ + if ((chk_ret = type->verify_chksum(image, actual_len, udata)) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "failure from verify_chksum callback") + if (chk_ret == TRUE) + break; + + /* Sleep for some time */ + H5_nanosleep(nanosec); + nanosec *= 2; /* Double the sleep time next time */ + } while (--tries); + + /* Check for too many tries */ + if (tries == 0) + HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "incorrect metadata checksum after all read attempts") + + /* Calculate and track the # of retries */ + retries = max_tries - tries; + if (retries) /* Does not track 0 retry */ + if (H5F_track_metadata_read_retries(f, (unsigned)type->mem_type, retries) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "cannot track read tries = %u ", retries) + + /* Set the final length (in case it wasn't set earlier) */ + len = actual_len; + } /* end if !H5C__CLASS_SKIP_READS */ + + /* Deserialize the on-disk image into the native memory form */ + if (NULL == (thing = type->deserialize(image, len, udata, &dirty))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't deserialize image") + + entry = (H5C_cache_entry_t *)thing; + + /* In general, an entry should be clean just after it is loaded. + * + * However, when this code is used in the metadata cache, it is + * possible that object headers will be dirty at this point, as + * the deserialize function will alter object headers if necessary to + * fix an old bug. + * + * In the following assert: + * + * HDassert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6 ) ); + * + * note that type ids 5 & 6 are associated with object headers in the + * metadata cache. + * + * When we get to using H5C for other purposes, we may wish to + * tighten up the assert so that the loophole only applies to the + * metadata cache. + */ + + HDassert((dirty == FALSE) || (type->id == 5 || type->id == 6)); + + entry->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC; + entry->cache_ptr = f->shared->cache; + entry->addr = addr; + entry->size = len; + HDassert(entry->size < H5C_MAX_ENTRY_SIZE); + entry->image_ptr = image; + entry->image_up_to_date = !dirty; + entry->type = type; + entry->is_dirty = dirty; + entry->dirtied = FALSE; + entry->is_protected = FALSE; + entry->is_read_only = FALSE; + entry->ro_ref_count = 0; + entry->is_pinned = FALSE; + entry->in_slist = FALSE; + entry->flush_marker = FALSE; +#ifdef H5_HAVE_PARALLEL + entry->clear_on_unprotect = FALSE; + entry->flush_immediately = FALSE; + entry->coll_access = coll_access; +#endif /* H5_HAVE_PARALLEL */ + entry->flush_in_progress = FALSE; + entry->destroy_in_progress = FALSE; + + entry->ring = H5C_RING_UNDEFINED; + + /* Initialize flush dependency fields */ + entry->flush_dep_parent = NULL; + entry->flush_dep_nparents = 0; + entry->flush_dep_parent_nalloc = 0; + entry->flush_dep_nchildren = 0; + entry->flush_dep_ndirty_children = 0; + entry->flush_dep_nunser_children = 0; + entry->ht_next = NULL; + entry->ht_prev = NULL; + entry->il_next = NULL; + entry->il_prev = NULL; + + entry->next = NULL; + entry->prev = NULL; + +#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS + entry->aux_next = NULL; + entry->aux_prev = NULL; +#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ + +#ifdef H5_HAVE_PARALLEL + entry->coll_next = NULL; + entry->coll_prev = NULL; +#endif /* H5_HAVE_PARALLEL */ + + /* initialize cache image related fields */ + entry->include_in_image = FALSE; + entry->lru_rank = 0; + entry->image_dirty = FALSE; + entry->fd_parent_count = 0; + entry->fd_parent_addrs = NULL; + entry->fd_child_count = 0; + entry->fd_dirty_child_count = 0; + entry->image_fd_height = 0; + entry->prefetched = FALSE; + entry->prefetch_type_id = 0; + entry->age = 0; + entry->prefetched_dirty = FALSE; +#ifndef NDEBUG /* debugging field */ + entry->serialization_count = 0; +#endif + + /* initialize tag list fields */ + entry->tl_next = NULL; + entry->tl_prev = NULL; + entry->tag_info = NULL; + + H5C__RESET_CACHE_ENTRY_STATS(entry); + + ret_value = thing; + +done: + /* Cleanup on error */ + if (NULL == ret_value) { + /* Release resources */ + if (thing && type->free_icr(thing) < 0) + HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "free_icr callback failed") + if (image) + image = (uint8_t *)H5MM_xfree(image); + } /* end if */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__load_entry() */ + +/*------------------------------------------------------------------------- + * Function: H5C__mark_flush_dep_dirty() + * + * Purpose: Recursively propagate the flush_dep_ndirty_children flag + * up the dependency chain in response to entry either + * becoming dirty or having its flush_dep_ndirty_children + * increased from 0. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Neil Fortner + * 11/13/12 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__mark_flush_dep_dirty(H5C_cache_entry_t *entry) +{ + unsigned u; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity checks */ + HDassert(entry); + + /* Iterate over the parent entries, if any */ + for (u = 0; u < entry->flush_dep_nparents; u++) { + /* Sanity check */ + HDassert(entry->flush_dep_parent[u]->flush_dep_ndirty_children < + entry->flush_dep_parent[u]->flush_dep_nchildren); + + /* Adjust the parent's number of dirty children */ + entry->flush_dep_parent[u]->flush_dep_ndirty_children++; + + /* If the parent has a 'notify' callback, send a 'child entry dirtied' notice */ + if (entry->flush_dep_parent[u]->type->notify && + (entry->flush_dep_parent[u]->type->notify)(H5C_NOTIFY_ACTION_CHILD_DIRTIED, + entry->flush_dep_parent[u]) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "can't notify parent about child entry dirty flag set") + } /* end for */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__mark_flush_dep_dirty() */ + +/*------------------------------------------------------------------------- + * Function: H5C__mark_flush_dep_clean() + * + * Purpose: Recursively propagate the flush_dep_ndirty_children flag + * up the dependency chain in response to entry either + * becoming clean or having its flush_dep_ndirty_children + * reduced to 0. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Neil Fortner + * 11/13/12 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__mark_flush_dep_clean(H5C_cache_entry_t *entry) +{ + int i; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity checks */ + HDassert(entry); + + /* Iterate over the parent entries, if any */ + /* Note reverse iteration order, in case the callback removes the flush + * dependency - QAK, 2017/08/12 + */ + for (i = ((int)entry->flush_dep_nparents) - 1; i >= 0; i--) { + /* Sanity check */ + HDassert(entry->flush_dep_parent[i]->flush_dep_ndirty_children > 0); + + /* Adjust the parent's number of dirty children */ + entry->flush_dep_parent[i]->flush_dep_ndirty_children--; + + /* If the parent has a 'notify' callback, send a 'child entry cleaned' notice */ + if (entry->flush_dep_parent[i]->type->notify && + (entry->flush_dep_parent[i]->type->notify)(H5C_NOTIFY_ACTION_CHILD_CLEANED, + entry->flush_dep_parent[i]) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "can't notify parent about child entry dirty flag reset") + } /* end for */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__mark_flush_dep_clean() */ + +/*------------------------------------------------------------------------- + * Function: H5C__mark_flush_dep_serialized() + * + * Purpose: Decrement the flush_dep_nunser_children fields of all the + * target entry's flush dependency parents in response to + * the target entry becoming serialized. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: John Mainzer + * 8/30/16 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__mark_flush_dep_serialized(H5C_cache_entry_t *entry_ptr) +{ + int i; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity checks */ + HDassert(entry_ptr); + + /* Iterate over the parent entries, if any */ + /* Note reverse iteration order, in case the callback removes the flush + * dependency - QAK, 2017/08/12 + */ + for (i = ((int)entry_ptr->flush_dep_nparents) - 1; i >= 0; i--) { + /* Sanity checks */ + HDassert(entry_ptr->flush_dep_parent); + HDassert(entry_ptr->flush_dep_parent[i]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(entry_ptr->flush_dep_parent[i]->flush_dep_nunser_children > 0); + + /* decrement the parents number of unserialized children */ + entry_ptr->flush_dep_parent[i]->flush_dep_nunser_children--; + + /* If the parent has a 'notify' callback, send a 'child entry serialized' notice */ + if (entry_ptr->flush_dep_parent[i]->type->notify && + (entry_ptr->flush_dep_parent[i]->type->notify)(H5C_NOTIFY_ACTION_CHILD_SERIALIZED, + entry_ptr->flush_dep_parent[i]) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "can't notify parent about child entry serialized flag set") + } /* end for */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__mark_flush_dep_serialized() */ + +/*------------------------------------------------------------------------- + * Function: H5C__mark_flush_dep_unserialized() + * + * Purpose: Increment the flush_dep_nunser_children fields of all the + * target entry's flush dependency parents in response to + * the target entry becoming unserialized. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: John Mainzer + * 8/30/16 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__mark_flush_dep_unserialized(H5C_cache_entry_t *entry_ptr) +{ + unsigned u; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity checks */ + HDassert(entry_ptr); + + /* Iterate over the parent entries, if any */ + for (u = 0; u < entry_ptr->flush_dep_nparents; u++) { + /* Sanity check */ + HDassert(entry_ptr->flush_dep_parent); + HDassert(entry_ptr->flush_dep_parent[u]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children < + entry_ptr->flush_dep_parent[u]->flush_dep_nchildren); + + /* increment parents number of usserialized children */ + entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children++; + + /* If the parent has a 'notify' callback, send a 'child entry unserialized' notice */ + if (entry_ptr->flush_dep_parent[u]->type->notify && + (entry_ptr->flush_dep_parent[u]->type->notify)(H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED, + entry_ptr->flush_dep_parent[u]) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "can't notify parent about child entry serialized flag reset") + } /* end for */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__mark_flush_dep_unserialized() */ + +#ifndef NDEBUG +/*------------------------------------------------------------------------- + * Function: H5C__assert_flush_dep_nocycle() + * + * Purpose: Assert recursively that base_entry is not the same as + * entry, and perform the same assertion on all of entry's + * flush dependency parents. This is used to detect cycles + * created by flush dependencies. + * + * Return: void + * + * Programmer: Neil Fortner + * 12/10/12 + * + *------------------------------------------------------------------------- + */ +static void +H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t *entry, const H5C_cache_entry_t *base_entry) +{ + unsigned u; /* Local index variable */ + + FUNC_ENTER_PACKAGE_NOERR + + /* Sanity checks */ + HDassert(entry); + HDassert(base_entry); + + /* Make sure the entries are not the same */ + HDassert(base_entry != entry); + + /* Iterate over entry's parents (if any) */ + for (u = 0; u < entry->flush_dep_nparents; u++) + H5C__assert_flush_dep_nocycle(entry->flush_dep_parent[u], base_entry); + + FUNC_LEAVE_NOAPI_VOID +} /* H5C__assert_flush_dep_nocycle() */ +#endif + +/*------------------------------------------------------------------------- + * Function: H5C__serialize_single_entry + * + * Purpose: Serialize the cache entry pointed to by the entry_ptr + * parameter. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: John Mainzer, 7/24/15 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C__serialize_single_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity checks */ + HDassert(f); + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + HDassert(entry_ptr); + HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(!entry_ptr->prefetched); + HDassert(!entry_ptr->image_up_to_date); + HDassert(entry_ptr->is_dirty); + HDassert(!entry_ptr->is_protected); + HDassert(!entry_ptr->flush_in_progress); + HDassert(entry_ptr->type); + + /* Set entry_ptr->flush_in_progress to TRUE so the target entry + * will not be evicted out from under us. Must set it back to FALSE + * when we are done. + */ + entry_ptr->flush_in_progress = TRUE; + + /* Allocate buffer for the entry image if required. */ + if (NULL == entry_ptr->image_ptr) { + HDassert(entry_ptr->size > 0); + if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer") +#if H5C_DO_MEMORY_SANITY_CHECKS + H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + image_size, H5C_IMAGE_SANITY_VALUE, + H5C_IMAGE_EXTRA_SPACE); +#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ + } /* end if */ + + /* Generate image for entry */ + if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "Can't generate image for cache entry") + + /* Reset the flush_in progress flag */ + entry_ptr->flush_in_progress = FALSE; + +done: + HDassert((ret_value != SUCCEED) || (!entry_ptr->flush_in_progress)); + HDassert((ret_value != SUCCEED) || (entry_ptr->image_up_to_date)); + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__serialize_single_entry() */ + +/*------------------------------------------------------------------------- + * Function: H5C__destroy_pf_entry_child_flush_deps() + * + * Purpose: Destroy all flush dependencies in this the supplied + * prefetched entry is the parent. Note that the children + * in these flush dependencies must be prefetched entries as + * well. + * + * As this action is part of the process of transferring all + * such flush dependencies to the deserialized version of the + * prefetched entry, ensure that the data necessary to complete + * the transfer is retained. + * + * Note: The current implementation of this function is + * quite inefficient -- mostly due to the current + * implementation of flush dependencies. This should + * be fixed at some point. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: John Mainzer + * 8/11/15 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, H5C_cache_entry_t *pf_entry_ptr, + H5C_cache_entry_t **fd_children) +{ + H5C_cache_entry_t *entry_ptr; +#ifndef NDEBUG + unsigned entries_visited = 0; +#endif + int fd_children_found = 0; + hbool_t found; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity checks */ + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + HDassert(pf_entry_ptr); + HDassert(pf_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(pf_entry_ptr->type); + HDassert(pf_entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID); + HDassert(pf_entry_ptr->prefetched); + HDassert(pf_entry_ptr->fd_child_count > 0); + HDassert(fd_children); + + /* Scan each entry on the index list */ + entry_ptr = cache_ptr->il_head; + while (entry_ptr != NULL) { + HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + + /* Here we look at entry_ptr->flush_dep_nparents and not + * entry_ptr->fd_parent_count as it is possible that some + * or all of the prefetched flush dependency child relationships + * have already been destroyed. + */ + if (entry_ptr->prefetched && (entry_ptr->flush_dep_nparents > 0)) { + unsigned u; /* Local index variable */ + + /* Re-init */ + u = 0; + found = FALSE; + + /* Sanity checks */ + HDassert(entry_ptr->type); + HDassert(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID); + HDassert(entry_ptr->fd_parent_count >= entry_ptr->flush_dep_nparents); + HDassert(entry_ptr->fd_parent_addrs); + HDassert(entry_ptr->flush_dep_parent); + + /* Look for correct entry */ + while (!found && (u < entry_ptr->fd_parent_count)) { + /* Sanity check entry */ + HDassert(entry_ptr->flush_dep_parent[u]); + HDassert(entry_ptr->flush_dep_parent[u]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + + /* Correct entry? */ + if (pf_entry_ptr == entry_ptr->flush_dep_parent[u]) + found = TRUE; + + u++; + } /* end while */ + + if (found) { + HDassert(NULL == fd_children[fd_children_found]); + + /* Remove flush dependency */ + fd_children[fd_children_found] = entry_ptr; + fd_children_found++; + if (H5C_destroy_flush_dependency(pf_entry_ptr, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, + "can't destroy pf entry child flush dependency") + +#ifndef NDEBUG + /* Sanity check -- verify that the address of the parent + * appears in entry_ptr->fd_parent_addrs. Must do a search, + * as with flush dependency creates and destroys, + * entry_ptr->fd_parent_addrs and entry_ptr->flush_dep_parent + * can list parents in different order. + */ + found = FALSE; + u = 0; + while (!found && u < entry_ptr->fd_parent_count) { + if (pf_entry_ptr->addr == entry_ptr->fd_parent_addrs[u]) + found = TRUE; + u++; + } /* end while */ + HDassert(found); +#endif + } /* end if */ + } /* end if */ + +#ifndef NDEBUG + entries_visited++; +#endif + entry_ptr = entry_ptr->il_next; + } /* end while */ + + /* Post-op sanity checks */ + HDassert(NULL == fd_children[fd_children_found]); + HDassert((unsigned)fd_children_found == pf_entry_ptr->fd_child_count); + HDassert(entries_visited == cache_ptr->index_len); + HDassert(!pf_entry_ptr->is_pinned); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__destroy_pf_entry_child_flush_deps() */ + +/*------------------------------------------------------------------------- + * Function: H5C__deserialize_prefetched_entry() + * + * Purpose: Deserialize the supplied prefetched entry entry, and return + * a pointer to the deserialized entry in *entry_ptr_ptr. + * If successful, remove the prefetched entry from the cache, + * and free it. Insert the deserialized entry into the cache. + * + * Note that the on disk image of the entry is not freed -- + * a pointer to it is stored in the deserialized entries' + * image_ptr field, and its image_up_to_date field is set to + * TRUE unless the entry is dirtied by the deserialize call. + * + * If the prefetched entry is a flush dependency child, + * destroy that flush dependency prior to calling the + * deserialize callback. If appropriate, the flush dependency + * relationship will be recreated by the cache client. + * + * If the prefetched entry is a flush dependency parent, + * destroy the flush dependency relationship with all its + * children. As all these children must be prefetched entries, + * recreate these flush dependency relationships with + * deserialized entry after it is inserted in the cache. + * + * Since deserializing a prefetched entry is semantically + * equivalent to a load, issue an entry loaded nofification + * if the notify callback is defined. + * + * Return: SUCCEED on success, and FAIL on failure. + * + * Note that *entry_ptr_ptr is undefined on failure. + * + * Programmer: John Mainzer, 8/10/15 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t **entry_ptr_ptr, + const H5C_class_t *type, haddr_t addr, void *udata) +{ + hbool_t dirty = FALSE; /* Flag indicating whether thing was + * dirtied during deserialize + */ + size_t len; /* Size of image in file */ + void *thing = NULL; /* Pointer to thing loaded */ + H5C_cache_entry_t *pf_entry_ptr; /* pointer to the prefetched entry */ + /* supplied in *entry_ptr_ptr. */ + H5C_cache_entry_t *ds_entry_ptr; /* Alias for thing loaded, as cache + * entry + */ + H5C_cache_entry_t **fd_children = NULL; /* Pointer to a dynamically */ + /* allocated array of pointers to */ + /* the flush dependency children of */ + /* the prefetched entry, or NULL if */ + /* that array does not exist. */ + unsigned flush_flags = (H5C__FLUSH_INVALIDATE_FLAG | H5C__FLUSH_CLEAR_ONLY_FLAG); + int i; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* sanity checks */ + HDassert(f); + HDassert(f->shared); + HDassert(f->shared->cache); + HDassert(f->shared->cache == cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + HDassert(entry_ptr_ptr); + HDassert(*entry_ptr_ptr); + pf_entry_ptr = *entry_ptr_ptr; + HDassert(pf_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(pf_entry_ptr->type); + HDassert(pf_entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID); + HDassert(pf_entry_ptr->prefetched); + HDassert(pf_entry_ptr->image_up_to_date); + HDassert(pf_entry_ptr->image_ptr); + HDassert(pf_entry_ptr->size > 0); + HDassert(pf_entry_ptr->addr == addr); + HDassert(type); + HDassert(type->id == pf_entry_ptr->prefetch_type_id); + HDassert(type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type); + + /* verify absence of prohibited or unsupported type flag combinations */ + HDassert(!(type->flags & H5C__CLASS_SKIP_READS)); + + /* Can't see how skip reads could be usefully combined with + * either the speculative read flag. Hence disallow. + */ + HDassert(!((type->flags & H5C__CLASS_SKIP_READS) && (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG))); + HDassert(H5F_addr_defined(addr)); + HDassert(type->get_initial_load_size); + HDassert(type->deserialize); + + /* if *pf_entry_ptr is a flush dependency child, destroy all such + * relationships now. The client will restore the relationship(s) with + * the deserialized entry if appropriate. + */ + HDassert(pf_entry_ptr->fd_parent_count == pf_entry_ptr->flush_dep_nparents); + for (i = (int)(pf_entry_ptr->fd_parent_count) - 1; i >= 0; i--) { + HDassert(pf_entry_ptr->flush_dep_parent); + HDassert(pf_entry_ptr->flush_dep_parent[i]); + HDassert(pf_entry_ptr->flush_dep_parent[i]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(pf_entry_ptr->flush_dep_parent[i]->flush_dep_nchildren > 0); + HDassert(pf_entry_ptr->fd_parent_addrs); + HDassert(pf_entry_ptr->flush_dep_parent[i]->addr == pf_entry_ptr->fd_parent_addrs[i]); + + if (H5C_destroy_flush_dependency(pf_entry_ptr->flush_dep_parent[i], pf_entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "can't destroy pf entry parent flush dependency") + + pf_entry_ptr->fd_parent_addrs[i] = HADDR_UNDEF; + } /* end for */ + HDassert(pf_entry_ptr->flush_dep_nparents == 0); + + /* If *pf_entry_ptr is a flush dependency parent, destroy its flush + * dependency relationships with all its children (which must be + * prefetched entries as well). + * + * These flush dependency relationships will have to be restored + * after the deserialized entry is inserted into the cache in order + * to transfer these relationships to the new entry. Hence save the + * pointers to the flush dependency children of *pf_enty_ptr for later + * use. + */ + if (pf_entry_ptr->fd_child_count > 0) { + if (NULL == (fd_children = (H5C_cache_entry_t **)H5MM_calloc( + sizeof(H5C_cache_entry_t **) * (size_t)(pf_entry_ptr->fd_child_count + 1)))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for fd child ptr array") + + if (H5C__destroy_pf_entry_child_flush_deps(cache_ptr, pf_entry_ptr, fd_children) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, + "can't destroy pf entry child flush dependency(s).") + } /* end if */ + + /* Since the size of the on disk image is known exactly, there is + * no need for either a call to the get_initial_load_size() callback, + * or retries if the H5C__CLASS_SPECULATIVE_LOAD_FLAG flag is set. + * Similarly, there is no need to clamp possible reads beyond + * EOF. + */ + len = pf_entry_ptr->size; + + /* Deserialize the prefetched on-disk image of the entry into the + * native memory form + */ + if (NULL == (thing = type->deserialize(pf_entry_ptr->image_ptr, len, udata, &dirty))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, FAIL, "Can't deserialize image") + ds_entry_ptr = (H5C_cache_entry_t *)thing; + + /* In general, an entry should be clean just after it is loaded. + * + * However, when this code is used in the metadata cache, it is + * possible that object headers will be dirty at this point, as + * the deserialize function will alter object headers if necessary to + * fix an old bug. + * + * In the following assert: + * + * HDassert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6 ) ); + * + * note that type ids 5 & 6 are associated with object headers in the + * metadata cache. + * + * When we get to using H5C for other purposes, we may wish to + * tighten up the assert so that the loophole only applies to the + * metadata cache. + * + * Note that at present, dirty can't be set to true with prefetched + * entries. However this may change, so include this functionality + * against that possibility. + * + * Also, note that it is possible for a prefetched entry to be dirty -- + * hence the value assigned to ds_entry_ptr->is_dirty below. + */ + + HDassert((dirty == FALSE) || (type->id == 5 || type->id == 6)); + + ds_entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC; + ds_entry_ptr->cache_ptr = f->shared->cache; + ds_entry_ptr->addr = addr; + ds_entry_ptr->size = len; + HDassert(ds_entry_ptr->size < H5C_MAX_ENTRY_SIZE); + ds_entry_ptr->image_ptr = pf_entry_ptr->image_ptr; + ds_entry_ptr->image_up_to_date = !dirty; + ds_entry_ptr->type = type; + ds_entry_ptr->is_dirty = dirty | pf_entry_ptr->is_dirty; + ds_entry_ptr->dirtied = FALSE; + ds_entry_ptr->is_protected = FALSE; + ds_entry_ptr->is_read_only = FALSE; + ds_entry_ptr->ro_ref_count = 0; + ds_entry_ptr->is_pinned = FALSE; + ds_entry_ptr->in_slist = FALSE; + ds_entry_ptr->flush_marker = FALSE; +#ifdef H5_HAVE_PARALLEL + ds_entry_ptr->clear_on_unprotect = FALSE; + ds_entry_ptr->flush_immediately = FALSE; + ds_entry_ptr->coll_access = FALSE; +#endif /* H5_HAVE_PARALLEL */ + ds_entry_ptr->flush_in_progress = FALSE; + ds_entry_ptr->destroy_in_progress = FALSE; + + ds_entry_ptr->ring = pf_entry_ptr->ring; + + /* Initialize flush dependency height fields */ + ds_entry_ptr->flush_dep_parent = NULL; + ds_entry_ptr->flush_dep_nparents = 0; + ds_entry_ptr->flush_dep_parent_nalloc = 0; + ds_entry_ptr->flush_dep_nchildren = 0; + ds_entry_ptr->flush_dep_ndirty_children = 0; + ds_entry_ptr->flush_dep_nunser_children = 0; + + /* Initialize fields supporting the hash table: */ + ds_entry_ptr->ht_next = NULL; + ds_entry_ptr->ht_prev = NULL; + ds_entry_ptr->il_next = NULL; + ds_entry_ptr->il_prev = NULL; + + /* Initialize fields supporting replacement policies: */ + ds_entry_ptr->next = NULL; + ds_entry_ptr->prev = NULL; +#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS + ds_entry_ptr->aux_next = NULL; + ds_entry_ptr->aux_prev = NULL; +#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ +#ifdef H5_HAVE_PARALLEL + pf_entry_ptr->coll_next = NULL; + pf_entry_ptr->coll_prev = NULL; +#endif /* H5_HAVE_PARALLEL */ + + /* Initialize cache image related fields */ + ds_entry_ptr->include_in_image = FALSE; + ds_entry_ptr->lru_rank = 0; + ds_entry_ptr->image_dirty = FALSE; + ds_entry_ptr->fd_parent_count = 0; + ds_entry_ptr->fd_parent_addrs = NULL; + ds_entry_ptr->fd_child_count = pf_entry_ptr->fd_child_count; + ds_entry_ptr->fd_dirty_child_count = 0; + ds_entry_ptr->image_fd_height = 0; + ds_entry_ptr->prefetched = FALSE; + ds_entry_ptr->prefetch_type_id = 0; + ds_entry_ptr->age = 0; + ds_entry_ptr->prefetched_dirty = pf_entry_ptr->prefetched_dirty; +#ifndef NDEBUG /* debugging field */ + ds_entry_ptr->serialization_count = 0; +#endif + + H5C__RESET_CACHE_ENTRY_STATS(ds_entry_ptr); + + /* Apply to to the newly deserialized entry */ + if (H5C__tag_entry(cache_ptr, ds_entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "Cannot tag metadata entry") + + /* We have successfully deserialized the prefetched entry. + * + * Before we return a pointer to the deserialized entry, we must remove + * the prefetched entry from the cache, discard it, and replace it with + * the deserialized entry. Note that we do not free the prefetched + * entries image, as that has been transferred to the deserialized + * entry. + * + * Also note that we have not yet restored any flush dependencies. This + * must wait until the deserialized entry is inserted in the cache. + * + * To delete the prefetched entry from the cache: + * + * 1) Set pf_entry_ptr->image_ptr to NULL. Since we have already + * transferred the buffer containing the image to *ds_entry_ptr, + * this is not a memory leak. + * + * 2) Call H5C__flush_single_entry() with the H5C__FLUSH_INVALIDATE_FLAG + * and H5C__FLUSH_CLEAR_ONLY_FLAG flags set. + */ + pf_entry_ptr->image_ptr = NULL; + + if (pf_entry_ptr->is_dirty) { + HDassert(((cache_ptr->slist_enabled) && (pf_entry_ptr->in_slist)) || + ((!cache_ptr->slist_enabled) && (!pf_entry_ptr->in_slist))); + + flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG; + } /* end if */ + + if (H5C__flush_single_entry(f, pf_entry_ptr, flush_flags) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "can't expunge prefetched entry") + +#ifndef NDEGUG /* verify deletion */ + H5C__SEARCH_INDEX(cache_ptr, addr, pf_entry_ptr, FAIL); + + HDassert(NULL == pf_entry_ptr); +#endif + + /* Insert the deserialized entry into the cache. */ + H5C__INSERT_IN_INDEX(cache_ptr, ds_entry_ptr, FAIL) + + HDassert(!ds_entry_ptr->in_slist); + if (ds_entry_ptr->is_dirty) + H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, ds_entry_ptr, FAIL) + + H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, ds_entry_ptr, FAIL) + + /* Deserializing a prefetched entry is the conceptual equivalent of + * loading it from file. If the deserialized entry has a notify callback, + * send an "after load" notice now that the deserialized entry is fully + * integrated into the cache. + */ + if (ds_entry_ptr->type->notify && + (ds_entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_LOAD, ds_entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry loaded into cache") + + /* Restore flush dependencies with the flush dependency children of + * of the prefetched entry. Note that we must protect *ds_entry_ptr + * before the call to avoid triggering sanity check failures, and + * then unprotect it afterwards. + */ + i = 0; + if (fd_children != NULL) { + H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, ds_entry_ptr, FAIL) + ds_entry_ptr->is_protected = TRUE; + while (fd_children[i] != NULL) { + /* Sanity checks */ + HDassert((fd_children[i])->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert((fd_children[i])->prefetched); + HDassert((fd_children[i])->fd_parent_count > 0); + HDassert((fd_children[i])->fd_parent_addrs); + +#ifndef NDEBUG + { + int j; + hbool_t found; + + j = 0; + found = FALSE; + while ((j < (int)((fd_children[i])->fd_parent_count)) && (!found)) { + if ((fd_children[i])->fd_parent_addrs[j] == ds_entry_ptr->addr) + found = TRUE; + + j++; + } /* end while */ + HDassert(found); + } +#endif + + if (H5C_create_flush_dependency(ds_entry_ptr, fd_children[i]) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Can't restore child flush dependency") + + i++; + } /* end while */ + + H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, ds_entry_ptr, FAIL); + ds_entry_ptr->is_protected = FALSE; + } /* end if ( fd_children != NULL ) */ + HDassert((unsigned)i == ds_entry_ptr->fd_child_count); + + ds_entry_ptr->fd_child_count = 0; + H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr) + + /* finally, pass ds_entry_ptr back to the caller */ + *entry_ptr_ptr = ds_entry_ptr; + +done: + if (fd_children) + fd_children = (H5C_cache_entry_t **)H5MM_xfree((void *)fd_children); + + /* Release resources on error */ + if (FAIL == ret_value) + if (thing && type->free_icr(thing) < 0) + HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed") + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__deserialize_prefetched_entry() */ + +/*------------------------------------------------------------------------- + * Function: H5C_insert_entry + * + * Purpose: Adds the specified thing to the cache. The thing need not + * exist on disk yet, but it must have an address and disk + * space reserved. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: John Mainzer + * 6/2/04 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, unsigned int flags) +{ + H5C_t *cache_ptr; + H5AC_ring_t ring = H5C_RING_UNDEFINED; + hbool_t insert_pinned; + hbool_t flush_last; +#ifdef H5_HAVE_PARALLEL + hbool_t coll_access = FALSE; /* whether access to the cache entry is done collectively */ +#endif /* H5_HAVE_PARALLEL */ + hbool_t set_flush_marker; + hbool_t write_permitted = TRUE; + size_t empty_space; + H5C_cache_entry_t *entry_ptr = NULL; + H5C_cache_entry_t *test_entry_ptr; + hbool_t entry_tagged = FALSE; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + HDassert(f); + HDassert(f->shared); + + cache_ptr = f->shared->cache; + + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + HDassert(type); + HDassert(type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type); + HDassert(type->image_len); + HDassert(H5F_addr_defined(addr)); + HDassert(thing); + +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + /* no need to verify that entry is not already in the index as */ + /* we already make that check below. */ + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + set_flush_marker = ((flags & H5C__SET_FLUSH_MARKER_FLAG) != 0); + insert_pinned = ((flags & H5C__PIN_ENTRY_FLAG) != 0); + flush_last = ((flags & H5C__FLUSH_LAST_FLAG) != 0); + + /* Get the ring type from the API context */ + ring = H5CX_get_ring(); + + entry_ptr = (H5C_cache_entry_t *)thing; + + /* verify that the new entry isn't already in the hash table -- scream + * and die if it is. + */ + + H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL) + + if (test_entry_ptr != NULL) { + if (test_entry_ptr == entry_ptr) + HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "entry already in cache") + else + HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "duplicate entry in cache") + } /* end if */ + + entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC; + entry_ptr->cache_ptr = cache_ptr; + entry_ptr->addr = addr; + entry_ptr->type = type; + + entry_ptr->image_ptr = NULL; + entry_ptr->image_up_to_date = FALSE; + + entry_ptr->is_protected = FALSE; + entry_ptr->is_read_only = FALSE; + entry_ptr->ro_ref_count = 0; + + entry_ptr->is_pinned = insert_pinned; + entry_ptr->pinned_from_client = insert_pinned; + entry_ptr->pinned_from_cache = FALSE; + entry_ptr->flush_me_last = flush_last; + + /* newly inserted entries are assumed to be dirty */ + entry_ptr->is_dirty = TRUE; + + /* not protected, so can't be dirtied */ + entry_ptr->dirtied = FALSE; + + /* Retrieve the size of the thing */ + if ((type->image_len)(thing, &(entry_ptr->size)) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTGETSIZE, FAIL, "can't get size of thing") + HDassert(entry_ptr->size > 0 && entry_ptr->size < H5C_MAX_ENTRY_SIZE); + + entry_ptr->in_slist = FALSE; + +#ifdef H5_HAVE_PARALLEL + entry_ptr->clear_on_unprotect = FALSE; + entry_ptr->flush_immediately = FALSE; +#endif /* H5_HAVE_PARALLEL */ + + entry_ptr->flush_in_progress = FALSE; + entry_ptr->destroy_in_progress = FALSE; + + entry_ptr->ring = ring; + + /* Initialize flush dependency fields */ + entry_ptr->flush_dep_parent = NULL; + entry_ptr->flush_dep_nparents = 0; + entry_ptr->flush_dep_parent_nalloc = 0; + entry_ptr->flush_dep_nchildren = 0; + entry_ptr->flush_dep_ndirty_children = 0; + entry_ptr->flush_dep_nunser_children = 0; + + entry_ptr->ht_next = NULL; + entry_ptr->ht_prev = NULL; + entry_ptr->il_next = NULL; + entry_ptr->il_prev = NULL; + + entry_ptr->next = NULL; + entry_ptr->prev = NULL; + +#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS + entry_ptr->aux_next = NULL; + entry_ptr->aux_prev = NULL; +#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ + +#ifdef H5_HAVE_PARALLEL + entry_ptr->coll_next = NULL; + entry_ptr->coll_prev = NULL; +#endif /* H5_HAVE_PARALLEL */ + + /* initialize cache image related fields */ + entry_ptr->include_in_image = FALSE; + entry_ptr->lru_rank = 0; + entry_ptr->image_dirty = FALSE; + entry_ptr->fd_parent_count = 0; + entry_ptr->fd_parent_addrs = NULL; + entry_ptr->fd_child_count = 0; + entry_ptr->fd_dirty_child_count = 0; + entry_ptr->image_fd_height = 0; + entry_ptr->prefetched = FALSE; + entry_ptr->prefetch_type_id = 0; + entry_ptr->age = 0; + entry_ptr->prefetched_dirty = FALSE; +#ifndef NDEBUG /* debugging field */ + entry_ptr->serialization_count = 0; +#endif + + /* initialize tag list fields */ + entry_ptr->tl_next = NULL; + entry_ptr->tl_prev = NULL; + entry_ptr->tag_info = NULL; + + /* Apply tag to newly inserted entry */ + if (H5C__tag_entry(cache_ptr, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "Cannot tag metadata entry") + entry_tagged = TRUE; + + H5C__RESET_CACHE_ENTRY_STATS(entry_ptr) + + if (cache_ptr->flash_size_increase_possible && + (entry_ptr->size > cache_ptr->flash_size_increase_threshold)) + if (H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__flash_increase_cache_size failed") + + if (cache_ptr->index_size >= cache_ptr->max_cache_size) + empty_space = 0; + else + empty_space = cache_ptr->max_cache_size - cache_ptr->index_size; + + if (cache_ptr->evictions_enabled && + (((cache_ptr->index_size + entry_ptr->size) > cache_ptr->max_cache_size) || + (((empty_space + cache_ptr->clean_index_size) < cache_ptr->min_clean_size)))) { + size_t space_needed; + + if (empty_space <= entry_ptr->size) + cache_ptr->cache_full = TRUE; + + if (cache_ptr->check_write_permitted != NULL) { + if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "Can't get write_permitted") + } /* end if */ + else + write_permitted = cache_ptr->write_permitted; + + HDassert(entry_ptr->size <= H5C_MAX_ENTRY_SIZE); + space_needed = entry_ptr->size; + if (space_needed > cache_ptr->max_cache_size) + space_needed = cache_ptr->max_cache_size; + + /* Note that space_needed is just the amount of space that + * needed to insert the new entry without exceeding the cache + * size limit. The subsequent call to H5C__make_space_in_cache() + * may evict the entries required to free more or less space + * depending on conditions. It MAY be less if the cache is + * currently undersized, or more if the cache is oversized. + * + * The cache can exceed its maximum size limit via the following + * mechanisms: + * + * First, it is possible for the cache to grow without + * bound as long as entries are protected and not unprotected. + * + * Second, when writes are not permitted it is also possible + * for the cache to grow without bound. + * + * Finally, we usually don't check to see if the cache is + * oversized at the end of an unprotect. As a result, it is + * possible to have a vastly oversized cache with no protected + * entries as long as all the protects precede the unprotects. + */ + + if (H5C__make_space_in_cache(f, space_needed, write_permitted) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__make_space_in_cache failed") + } /* end if */ + + H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL) + + /* New entries are presumed to be dirty */ + HDassert(entry_ptr->is_dirty); + entry_ptr->flush_marker = set_flush_marker; + H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL) + H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, FAIL) + +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed just before done") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + /* If the entry's type has a 'notify' callback send a 'after insertion' + * notice now that the entry is fully integrated into the cache. + */ + if (entry_ptr->type->notify && (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_INSERT, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry inserted into cache") + + H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) + +#ifdef H5_HAVE_PARALLEL + if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) + coll_access = H5F_get_coll_metadata_reads(f); + + entry_ptr->coll_access = coll_access; + if (coll_access) { + H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, FAIL) + + /* Make sure the size of the collective entries in the cache remain in check */ + if (H5P_USER_TRUE == H5F_COLL_MD_READ(f)) { + if (cache_ptr->max_cache_size * 80 < cache_ptr->coll_list_size * 100) { + if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear collective metadata entries") + } /* end if */ + } /* end if */ + else { + if (cache_ptr->max_cache_size * 40 < cache_ptr->coll_list_size * 100) { + if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear collective metadata entries") + } /* end if */ + } /* end else */ + } /* end if */ +#endif + +done: +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + if (ret_value < 0 && entry_tagged) + if (H5C__untag_entry(cache_ptr, entry_ptr) < 0) + HDONE_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list") + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_insert_entry() */ + +/*------------------------------------------------------------------------- + * Function: H5C_mark_entry_dirty + * + * Purpose: Mark a pinned or protected entry as dirty. The target entry + * MUST be either pinned or protected, and MAY be both. + * + * In the protected case, this call is the functional + * equivalent of setting the H5C__DIRTIED_FLAG on an unprotect + * call. + * + * In the pinned but not protected case, if the entry is not + * already dirty, the function places function marks the entry + * dirty and places it on the skip list. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: John Mainzer + * 5/15/06 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_mark_entry_dirty(void *thing) +{ + H5C_t *cache_ptr; + H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)thing; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(entry_ptr); + HDassert(H5F_addr_defined(entry_ptr->addr)); + cache_ptr = entry_ptr->cache_ptr; + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + + if (entry_ptr->is_protected) { + HDassert(!((entry_ptr)->is_read_only)); + + /* set the dirtied flag */ + entry_ptr->dirtied = TRUE; + + /* reset image_up_to_date */ + if (entry_ptr->image_up_to_date) { + entry_ptr->image_up_to_date = FALSE; + + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "Can't propagate serialization status to fd parents") + } /* end if */ + } /* end if */ + else if (entry_ptr->is_pinned) { + hbool_t was_clean; /* Whether the entry was previously clean */ + hbool_t image_was_up_to_date; + + /* Remember previous dirty status */ + was_clean = !entry_ptr->is_dirty; + + /* Check if image is up to date */ + image_was_up_to_date = entry_ptr->image_up_to_date; + + /* Mark the entry as dirty if it isn't already */ + entry_ptr->is_dirty = TRUE; + entry_ptr->image_up_to_date = FALSE; + + /* Modify cache data structures */ + if (was_clean) + H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr, FAIL) + if (!entry_ptr->in_slist) + H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL) + + /* Update stats for entry being marked dirty */ + H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr) + + /* Check for entry changing status and do notifications, etc. */ + if (was_clean) { + /* If the entry's type has a 'notify' callback send a 'entry dirtied' + * notice now that the entry is fully integrated into the cache. + */ + if (entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set") + + /* Propagate the dirty flag up the flush dependency chain if appropriate */ + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_dirty(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag") + } /* end if */ + if (image_was_up_to_date) + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "Can't propagate serialization status to fd parents") + } /* end if */ + else + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Entry is neither pinned nor protected??") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_mark_entry_dirty() */ + +/*------------------------------------------------------------------------- + * Function: H5C_mark_entry_clean + * + * Purpose: Mark a pinned entry as clean. The target entry MUST be pinned. + * + * If the entry is not + * already clean, the function places function marks the entry + * clean and removes it from the skip list. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * 7/23/16 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_mark_entry_clean(void *_thing) +{ + H5C_t *cache_ptr; + H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)_thing; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(entry_ptr); + HDassert(H5F_addr_defined(entry_ptr->addr)); + cache_ptr = entry_ptr->cache_ptr; + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + + /* Operate on pinned entry */ + if (entry_ptr->is_protected) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "entry is protected") + else if (entry_ptr->is_pinned) { + hbool_t was_dirty; /* Whether the entry was previously dirty */ + + /* Remember previous dirty status */ + was_dirty = entry_ptr->is_dirty; + + /* Mark the entry as clean if it isn't already */ + entry_ptr->is_dirty = FALSE; + + /* Also reset the 'flush_marker' flag, since the entry shouldn't be flushed now */ + entry_ptr->flush_marker = FALSE; + + /* Modify cache data structures */ + if (was_dirty) + H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr, FAIL) + if (entry_ptr->in_slist) + H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE, FAIL) + + /* Update stats for entry being marked clean */ + H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) + + /* Check for entry changing status and do notifications, etc. */ + if (was_dirty) { + /* If the entry's type has a 'notify' callback send a 'entry cleaned' + * notice now that the entry is fully integrated into the cache. + */ + if (entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "can't notify client about entry dirty flag cleared") + + /* Propagate the clean up the flush dependency chain, if appropriate */ + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_clean(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Can't propagate flush dep clean") + } /* end if */ + } /* end if */ + else + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Entry is not pinned??") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_mark_entry_clean() */ + +/*------------------------------------------------------------------------- + * Function: H5C_mark_entry_unserialized + * + * Purpose: Mark a pinned or protected entry as unserialized. The target + * entry MUST be either pinned or protected, and MAY be both. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * 12/23/16 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_mark_entry_unserialized(void *thing) +{ + H5C_cache_entry_t *entry = (H5C_cache_entry_t *)thing; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(entry); + HDassert(H5F_addr_defined(entry->addr)); + + if (entry->is_protected || entry->is_pinned) { + HDassert(!entry->is_read_only); + + /* Reset image_up_to_date */ + if (entry->image_up_to_date) { + entry->image_up_to_date = FALSE; + + if (entry->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_unserialized(entry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, + "Can't propagate serialization status to fd parents") + } /* end if */ + } /* end if */ + else + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKUNSERIALIZED, FAIL, + "Entry to unserialize is neither pinned nor protected??") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_mark_entry_unserialized() */ + +/*------------------------------------------------------------------------- + * Function: H5C_mark_entry_serialized + * + * Purpose: Mark a pinned entry as serialized. The target entry MUST be + * pinned. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * 12/23/16 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_mark_entry_serialized(void *_thing) +{ + H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_thing; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(entry); + HDassert(H5F_addr_defined(entry->addr)); + + /* Operate on pinned entry */ + if (entry->is_protected) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL, "entry is protected") + else if (entry->is_pinned) { + /* Check for entry changing status and do notifications, etc. */ + if (!entry->image_up_to_date) { + /* Set the image_up_to_date flag */ + entry->image_up_to_date = TRUE; + + /* Propagate the serialize up the flush dependency chain, if appropriate */ + if (entry->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_serialized(entry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL, + "Can't propagate flush dep serialize") + } /* end if */ + } /* end if */ + else + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL, "Entry is not pinned??") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_mark_entry_serialized() */ + +/*------------------------------------------------------------------------- + * Function: H5C_move_entry + * + * Purpose: Use this function to notify the cache that an entry's + * file address changed. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: John Mainzer + * 6/2/04 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type, haddr_t old_addr, haddr_t new_addr) +{ + H5C_cache_entry_t *entry_ptr = NULL; + H5C_cache_entry_t *test_entry_ptr = NULL; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + HDassert(type); + HDassert(H5F_addr_defined(old_addr)); + HDassert(H5F_addr_defined(new_addr)); + HDassert(H5F_addr_ne(old_addr, new_addr)); + +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + H5C__SEARCH_INDEX(cache_ptr, old_addr, entry_ptr, FAIL) + + if (entry_ptr == NULL || entry_ptr->type != type) + /* the old item doesn't exist in the cache, so we are done. */ + HGOTO_DONE(SUCCEED) + + HDassert(entry_ptr->addr == old_addr); + HDassert(entry_ptr->type == type); + + /* Check for R/W status, otherwise error */ + /* (Moving a R/O entry would mark it dirty, which shouldn't + * happen. QAK - 2016/12/02) + */ + if (entry_ptr->is_read_only) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "can't move R/O entry") + + H5C__SEARCH_INDEX(cache_ptr, new_addr, test_entry_ptr, FAIL) + + if (test_entry_ptr != NULL) { /* we are hosed */ + if (test_entry_ptr->type == type) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "target already moved & reinserted???") + else + HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "new address already in use?") + } /* end if */ + + /* If we get this far we have work to do. Remove *entry_ptr from + * the hash table (and skip list if necessary), change its address to the + * new address, mark it as dirty (if it isn't already) and then re-insert. + * + * Update the replacement policy for a hit to avoid an eviction before + * the moved entry is touched. Update stats for a move. + * + * Note that we do not check the size of the cache, or evict anything. + * Since this is a simple re-name, cache size should be unaffected. + * + * Check to see if the target entry is in the process of being destroyed + * before we delete from the index, etc. If it is, all we do is + * change the addr. If the entry is only in the process of being flushed, + * don't mark it as dirty either, lest we confuse the flush call back. + */ + if (!entry_ptr->destroy_in_progress) { + H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL) + + if (entry_ptr->in_slist) { + HDassert(cache_ptr->slist_ptr); + H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE, FAIL) + } /* end if */ + } /* end if */ + + entry_ptr->addr = new_addr; + + if (!entry_ptr->destroy_in_progress) { + hbool_t was_dirty; /* Whether the entry was previously dirty */ + + /* Remember previous dirty status */ + was_dirty = entry_ptr->is_dirty; + + /* Mark the entry as dirty if it isn't already */ + entry_ptr->is_dirty = TRUE; + + /* This shouldn't be needed, but it keeps the test code happy */ + if (entry_ptr->image_up_to_date) { + entry_ptr->image_up_to_date = FALSE; + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "Can't propagate serialization status to fd parents") + } /* end if */ + + /* Modify cache data structures */ + H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL) + H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL) + + /* Skip some actions if we're in the middle of flushing the entry */ + if (!entry_ptr->flush_in_progress) { + /* Update the replacement policy for the entry */ + H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, FAIL) + + /* Check for entry changing status and do notifications, etc. */ + if (!was_dirty) { + /* If the entry's type has a 'notify' callback send a 'entry dirtied' + * notice now that the entry is fully integrated into the cache. + */ + if (entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "can't notify client about entry dirty flag set") + + /* Propagate the dirty flag up the flush dependency chain if appropriate */ + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_dirty(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, + "Can't propagate flush dep dirty flag") + } /* end if */ + } /* end if */ + } /* end if */ + + H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr) + +done: +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_move_entry() */ + +/*------------------------------------------------------------------------- + * Function: H5C_resize_entry + * + * Purpose: Resize a pinned or protected entry. + * + * Resizing an entry dirties it, so if the entry is not + * already dirty, the function places the entry on the + * skip list. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: John Mainzer + * 7/5/06 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_resize_entry(void *thing, size_t new_size) +{ + H5C_t *cache_ptr; + H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)thing; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(entry_ptr); + HDassert(H5F_addr_defined(entry_ptr->addr)); + cache_ptr = entry_ptr->cache_ptr; + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + + /* Check for usage errors */ + if (new_size <= 0) + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "New size is non-positive") + if (!(entry_ptr->is_pinned || entry_ptr->is_protected)) + HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, FAIL, "Entry isn't pinned or protected??") + +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + /* update for change in entry size if necessary */ + if (entry_ptr->size != new_size) { + hbool_t was_clean; + + /* make note of whether the entry was clean to begin with */ + was_clean = !entry_ptr->is_dirty; + + /* mark the entry as dirty if it isn't already */ + entry_ptr->is_dirty = TRUE; + + /* Reset the image up-to-date status */ + if (entry_ptr->image_up_to_date) { + entry_ptr->image_up_to_date = FALSE; + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "Can't propagate serialization status to fd parents") + } /* end if */ + + /* Release the current image */ + if (entry_ptr->image_ptr) + entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr); + + /* do a flash cache size increase if appropriate */ + if (cache_ptr->flash_size_increase_possible) { + if (new_size > entry_ptr->size) { + size_t size_increase; + + size_increase = new_size - entry_ptr->size; + if (size_increase >= cache_ptr->flash_size_increase_threshold) + if (H5C__flash_increase_cache_size(cache_ptr, entry_ptr->size, new_size) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTRESIZE, FAIL, "flash cache increase failed") + } + } + + /* update the pinned and/or protected entry list */ + if (entry_ptr->is_pinned) + H5C__DLL_UPDATE_FOR_SIZE_CHANGE(cache_ptr->pel_len, cache_ptr->pel_size, entry_ptr->size, + new_size, FAIL) + if (entry_ptr->is_protected) + H5C__DLL_UPDATE_FOR_SIZE_CHANGE(cache_ptr->pl_len, cache_ptr->pl_size, entry_ptr->size, new_size, + FAIL) + +#ifdef H5_HAVE_PARALLEL + if (entry_ptr->coll_access) + H5C__DLL_UPDATE_FOR_SIZE_CHANGE(cache_ptr->coll_list_len, cache_ptr->coll_list_size, + entry_ptr->size, new_size, FAIL) +#endif /* H5_HAVE_PARALLEL */ + + /* update statistics just before changing the entry size */ + H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size); + + /* update the hash table */ + H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_size, entry_ptr, was_clean, FAIL); + + /* if the entry is in the skip list, update that too */ + if (entry_ptr->in_slist) + H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_size); + + /* finally, update the entry size proper */ + entry_ptr->size = new_size; + + if (!entry_ptr->in_slist) + H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL) + + if (entry_ptr->is_pinned) + H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr) + + /* Check for entry changing status and do notifications, etc. */ + if (was_clean) { + /* If the entry's type has a 'notify' callback send a 'entry dirtied' + * notice now that the entry is fully integrated into the cache. + */ + if (entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set") + + /* Propagate the dirty flag up the flush dependency chain if appropriate */ + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_dirty(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag") + } /* end if */ + } /* end if */ + +done: +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0) + HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_resize_entry() */ + +/*------------------------------------------------------------------------- + * Function: H5C_pin_protected_entry() + * + * Purpose: Pin a protected cache entry. The entry must be protected + * at the time of call, and must be unpinned. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: John Mainzer + * 4/26/06 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_pin_protected_entry(void *thing) +{ + H5C_t *cache_ptr; + H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)thing; /* Pointer to entry to pin */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(entry_ptr); + HDassert(H5F_addr_defined(entry_ptr->addr)); + cache_ptr = entry_ptr->cache_ptr; + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + /* Only protected entries can be pinned */ + if (!entry_ptr->is_protected) + HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Entry isn't protected") + + /* Pin the entry from a client */ + if (H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client") + +done: +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_pin_protected_entry() */ + +/*------------------------------------------------------------------------- + * Function: H5C_protect + * + * Purpose: If the target entry is not in the cache, load it. If + * necessary, attempt to evict one or more entries to keep + * the cache within its maximum size. + * + * Mark the target entry as protected, and return its address + * to the caller. The caller must call H5C_unprotect() when + * finished with the entry. + * + * While it is protected, the entry may not be either evicted + * or flushed -- nor may it be accessed by another call to + * H5C_protect. Any attempt to do so will result in a failure. + * + * Return: Success: Ptr to the desired entry + * Failure: NULL + * + * Programmer: John Mainzer - 6/2/04 + * + *------------------------------------------------------------------------- + */ +void * +H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsigned flags) +{ + H5C_t *cache_ptr; + H5AC_ring_t ring = H5C_RING_UNDEFINED; + hbool_t hit; + hbool_t have_write_permitted = FALSE; + hbool_t read_only = FALSE; + hbool_t flush_last; +#ifdef H5_HAVE_PARALLEL + hbool_t coll_access = FALSE; /* whether access to the cache entry is done collectively */ +#endif /* H5_HAVE_PARALLEL */ + hbool_t write_permitted = FALSE; + hbool_t was_loaded = FALSE; /* Whether the entry was loaded as a result of the protect */ + size_t empty_space; + void *thing; + H5C_cache_entry_t *entry_ptr; + void *ret_value = NULL; /* Return value */ + + FUNC_ENTER_NOAPI(NULL) + + /* check args */ + HDassert(f); + HDassert(f->shared); + + cache_ptr = f->shared->cache; + + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + HDassert(type); + HDassert(type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type); + HDassert(H5F_addr_defined(addr)); + +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on entry") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + /* Load the cache image, if requested */ + if (cache_ptr->load_image) { + cache_ptr->load_image = FALSE; + if (H5C__load_cache_image(f) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't load cache image") + } /* end if */ + + read_only = ((flags & H5C__READ_ONLY_FLAG) != 0); + flush_last = ((flags & H5C__FLUSH_LAST_FLAG) != 0); + + /* Get the ring type from the API context */ + ring = H5CX_get_ring(); + +#ifdef H5_HAVE_PARALLEL + if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) + coll_access = H5F_get_coll_metadata_reads(f); +#endif /* H5_HAVE_PARALLEL */ + + /* first check to see if the target is in cache */ + H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, NULL) + + if (entry_ptr != NULL) { + if (entry_ptr->ring != ring) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "ring type mismatch occurred for cache entry") + + HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + + if (entry_ptr->prefetched) { + /* This call removes the prefetched entry from the cache, + * and replaces it with an entry deserialized from the + * image of the prefetched entry. + */ + if (H5C__deserialize_prefetched_entry(f, cache_ptr, &entry_ptr, type, addr, udata) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't deserialize prefetched entry") + + HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(!entry_ptr->prefetched); + HDassert(entry_ptr->addr == addr); + } /* end if */ + + /* Check for trying to load the wrong type of entry from an address */ + if (entry_ptr->type != type) + HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, NULL, "incorrect cache entry type") + +#ifdef H5_HAVE_PARALLEL + /* If this is a collective metadata read, the entry is not marked as + * collective, and is clean, it is possible that other processes will + * not have it in its cache and will expect a bcast of the entry from + * process 0. So process 0 will bcast the entry to all other ranks. + * Ranks that _do_ have the entry in their cache still have to + * participate in the bcast. + */ + if (coll_access) { + if (!entry_ptr->is_dirty && !entry_ptr->coll_access) { + MPI_Comm comm; /* File MPI Communicator */ + int mpi_code; /* MPI error code */ + int buf_size; + + if (MPI_COMM_NULL == (comm = H5F_mpi_get_comm(f))) + HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "get_comm request failed") + + if (entry_ptr->image_ptr == NULL) { + int mpi_rank; + + if ((mpi_rank = H5F_mpi_get_rank(f)) < 0) + HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "Can't get MPI rank") + + if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, + "memory allocation failed for on disk image buffer") +#if H5C_DO_MEMORY_SANITY_CHECKS + H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE, + H5C_IMAGE_EXTRA_SPACE); +#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ + if (0 == mpi_rank && H5C__generate_image(f, cache_ptr, entry_ptr) < 0) + /* If image generation fails, push an error but + * still participate in the following MPI_Bcast + */ + HDONE_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't generate entry's image") + } /* end if */ + HDassert(entry_ptr->image_ptr); + + H5_CHECKED_ASSIGN(buf_size, int, entry_ptr->size, size_t); + if (MPI_SUCCESS != (mpi_code = MPI_Bcast(entry_ptr->image_ptr, buf_size, MPI_BYTE, 0, comm))) + HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code) + + /* Mark the entry as collective and insert into the collective list */ + entry_ptr->coll_access = TRUE; + H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, NULL) + } /* end if */ + else if (entry_ptr->coll_access) + H5C__MOVE_TO_TOP_IN_COLL_LIST(cache_ptr, entry_ptr, NULL) + } /* end if */ +#endif /* H5_HAVE_PARALLEL */ + +#ifdef H5C_DO_TAGGING_SANITY_CHECKS + { + /* Verify tag value */ + if (cache_ptr->ignore_tags != TRUE) { + haddr_t tag; /* Tag value */ + + /* The entry is already in the cache, but make sure that the tag value + * is still legal. This will ensure that had the entry NOT been in the + * cache, tagging was still set up correctly and it would have received + * a legal tag value after getting loaded from disk. + */ + + /* Get the tag */ + tag = H5CX_get_tag(); + + if (H5C_verify_tag(entry_ptr->type->id, tag) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "tag verification failed") + } /* end if */ + } +#endif + + hit = TRUE; + thing = (void *)entry_ptr; + } + else { + /* must try to load the entry from disk. */ + hit = FALSE; + if (NULL == (thing = H5C__load_entry(f, +#ifdef H5_HAVE_PARALLEL + coll_access, +#endif /* H5_HAVE_PARALLEL */ + type, addr, udata))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't load entry") + + entry_ptr = (H5C_cache_entry_t *)thing; + cache_ptr->entries_loaded_counter++; + + entry_ptr->ring = ring; +#ifdef H5_HAVE_PARALLEL + if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI) && entry_ptr->coll_access) + H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, NULL) +#endif /* H5_HAVE_PARALLEL */ + + /* Apply tag to newly protected entry */ + if (H5C__tag_entry(cache_ptr, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, NULL, "Cannot tag metadata entry") + + /* If the entry is very large, and we are configured to allow it, + * we may wish to perform a flash cache size increase. + */ + if (cache_ptr->flash_size_increase_possible && + (entry_ptr->size > cache_ptr->flash_size_increase_threshold)) + if (H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__flash_increase_cache_size failed") + + if (cache_ptr->index_size >= cache_ptr->max_cache_size) + empty_space = 0; + else + empty_space = cache_ptr->max_cache_size - cache_ptr->index_size; + + /* try to free up if necceary and if evictions are permitted. Note + * that if evictions are enabled, we will call H5C__make_space_in_cache() + * regardless if the min_free_space requirement is not met. + */ + if (cache_ptr->evictions_enabled && + (((cache_ptr->index_size + entry_ptr->size) > cache_ptr->max_cache_size) || + ((empty_space + cache_ptr->clean_index_size) < cache_ptr->min_clean_size))) { + + size_t space_needed; + + if (empty_space <= entry_ptr->size) + cache_ptr->cache_full = TRUE; + + if (cache_ptr->check_write_permitted != NULL) { + if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted 1") + else + have_write_permitted = TRUE; + } /* end if */ + else { + write_permitted = cache_ptr->write_permitted; + have_write_permitted = TRUE; + } /* end else */ + + HDassert(entry_ptr->size <= H5C_MAX_ENTRY_SIZE); + space_needed = entry_ptr->size; + if (space_needed > cache_ptr->max_cache_size) + space_needed = cache_ptr->max_cache_size; + + /* Note that space_needed is just the amount of space that + * needed to insert the new entry without exceeding the cache + * size limit. The subsequent call to H5C__make_space_in_cache() + * may evict the entries required to free more or less space + * depending on conditions. It MAY be less if the cache is + * currently undersized, or more if the cache is oversized. + * + * The cache can exceed its maximum size limit via the following + * mechanisms: + * + * First, it is possible for the cache to grow without + * bound as long as entries are protected and not unprotected. + * + * Second, when writes are not permitted it is also possible + * for the cache to grow without bound. + * + * Third, the user may choose to disable evictions -- causing + * the cache to grow without bound until evictions are + * re-enabled. + * + * Finally, we usually don't check to see if the cache is + * oversized at the end of an unprotect. As a result, it is + * possible to have a vastly oversized cache with no protected + * entries as long as all the protects precede the unprotects. + */ + if (H5C__make_space_in_cache(f, space_needed, write_permitted) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__make_space_in_cache failed") + } /* end if */ + + /* Insert the entry in the hash table. + * + * ******************************************* + * + * Set the flush_me_last field + * of the newly loaded entry before inserting it into the + * index. Must do this, as the index tracked the number of + * entries with the flush_last field set, but assumes that + * the field will not change after insertion into the index. + * + * Note that this means that the H5C__FLUSH_LAST_FLAG flag + * is ignored if the entry is already in cache. + */ + entry_ptr->flush_me_last = flush_last; + + H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, NULL) + if (entry_ptr->is_dirty && !entry_ptr->in_slist) + H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, NULL) + + /* insert the entry in the data structures used by the replacement + * policy. We are just going to take it out again when we update + * the replacement policy for a protect, but this simplifies the + * code. If we do this often enough, we may want to optimize this. + */ + H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, NULL) + + /* Record that the entry was loaded, to trigger a notify callback later */ + /* (After the entry is fully added to the cache) */ + was_loaded = TRUE; + } /* end else */ + + HDassert(entry_ptr->addr == addr); + HDassert(entry_ptr->type == type); + + if (entry_ptr->is_protected) { + if (read_only && entry_ptr->is_read_only) { + HDassert(entry_ptr->ro_ref_count > 0); + (entry_ptr->ro_ref_count)++; + } /* end if */ + else + HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Target already protected & not read only?!?") + } /* end if */ + else { + H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, NULL) + + entry_ptr->is_protected = TRUE; + if (read_only) { + entry_ptr->is_read_only = TRUE; + entry_ptr->ro_ref_count = 1; + } /* end if */ + entry_ptr->dirtied = FALSE; + } /* end else */ + + H5C__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit) + H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) + + ret_value = thing; + + if (cache_ptr->evictions_enabled && + (cache_ptr->size_decreased || + (cache_ptr->resize_enabled && (cache_ptr->cache_accesses >= cache_ptr->resize_ctl.epoch_length)))) { + + if (!have_write_permitted) { + if (cache_ptr->check_write_permitted != NULL) { + if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted") + else + have_write_permitted = TRUE; + } + else { + write_permitted = cache_ptr->write_permitted; + have_write_permitted = TRUE; + } + } + + if (cache_ptr->resize_enabled && (cache_ptr->cache_accesses >= cache_ptr->resize_ctl.epoch_length)) + if (H5C__auto_adjust_cache_size(f, write_permitted) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Cache auto-resize failed") + + if (cache_ptr->size_decreased) { + cache_ptr->size_decreased = FALSE; + + /* check to see if the cache is now oversized due to the cache + * size reduction. If it is, try to evict enough entries to + * bring the cache size down to the current maximum cache size. + * + * Also, if the min_clean_size requirement is not met, we + * should also call H5C__make_space_in_cache() to bring us + * into compliance. + */ + if (cache_ptr->index_size >= cache_ptr->max_cache_size) + empty_space = 0; + else + empty_space = cache_ptr->max_cache_size - cache_ptr->index_size; + + if ((cache_ptr->index_size > cache_ptr->max_cache_size) || + ((empty_space + cache_ptr->clean_index_size) < cache_ptr->min_clean_size)) { + + if (cache_ptr->index_size > cache_ptr->max_cache_size) + cache_ptr->cache_full = TRUE; + + if (H5C__make_space_in_cache(f, (size_t)0, write_permitted) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__make_space_in_cache failed") + } + } /* end if */ + } + + /* If we loaded the entry and the entry's type has a 'notify' callback, send + * an 'after load' notice now that the entry is fully integrated into + * the cache and protected. We must wait until it is protected so it is not + * evicted during the notify callback. + */ + if (was_loaded) + /* If the entry's type has a 'notify' callback send a 'after load' + * notice now that the entry is fully integrated into the cache. + */ + if (entry_ptr->type->notify && (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_LOAD, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, NULL, + "can't notify client about entry inserted into cache") + +#ifdef H5_HAVE_PARALLEL + /* Make sure the size of the collective entries in the cache remain in check */ + if (coll_access) { + if (H5P_USER_TRUE == H5F_COLL_MD_READ(f)) { + if (cache_ptr->max_cache_size * 80 < cache_ptr->coll_list_size * 100) + if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "can't clear collective metadata entries") + } /* end if */ + else { + if (cache_ptr->max_cache_size * 40 < cache_ptr->coll_list_size * 100) + if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "can't clear collective metadata entries") + } /* end else */ + } /* end if */ +#endif /* H5_HAVE_PARALLEL */ + +done: +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on exit") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_protect() */ + +/*------------------------------------------------------------------------- + * Function: H5C_unpin_entry() + * + * Purpose: Unpin a cache entry. The entry can be either protected or + * unprotected at the time of call, but must be pinned. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: John Mainzer + * 3/22/06 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_unpin_entry(void *_entry_ptr) +{ + H5C_t *cache_ptr; + H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)_entry_ptr; /* Pointer to entry to unpin */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity check */ + HDassert(entry_ptr); + cache_ptr = entry_ptr->cache_ptr; + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + /* Unpin the entry */ + if (H5C__unpin_entry_from_client(cache_ptr, entry_ptr, TRUE) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry from client") + +done: +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_unpin_entry() */ + +/*------------------------------------------------------------------------- + * Function: H5C_unprotect + * + * Purpose: Undo an H5C_protect() call -- specifically, mark the + * entry as unprotected, remove it from the protected list, + * and give it back to the replacement policy. + * + * The TYPE and ADDR arguments must be the same as those in + * the corresponding call to H5C_protect() and the THING + * argument must be the value returned by that call to + * H5C_protect(). + * + * Return: Non-negative on success/Negative on failure + * + * If the deleted flag is TRUE, simply remove the target entry + * from the cache, clear it, and free it without writing it to + * disk. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: John Mainzer + * 6/2/04 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags) +{ + H5C_t *cache_ptr; + hbool_t deleted; + hbool_t dirtied; + hbool_t set_flush_marker; + hbool_t pin_entry; + hbool_t unpin_entry; + hbool_t free_file_space; + hbool_t take_ownership; + hbool_t was_clean; +#ifdef H5_HAVE_PARALLEL + hbool_t clear_entry = FALSE; +#endif /* H5_HAVE_PARALLEL */ + H5C_cache_entry_t *entry_ptr; + H5C_cache_entry_t *test_entry_ptr; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + deleted = ((flags & H5C__DELETED_FLAG) != 0); + dirtied = ((flags & H5C__DIRTIED_FLAG) != 0); + set_flush_marker = ((flags & H5C__SET_FLUSH_MARKER_FLAG) != 0); + pin_entry = ((flags & H5C__PIN_ENTRY_FLAG) != 0); + unpin_entry = ((flags & H5C__UNPIN_ENTRY_FLAG) != 0); + free_file_space = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0); + take_ownership = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0); + + HDassert(f); + HDassert(f->shared); + + cache_ptr = f->shared->cache; + + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + HDassert(H5F_addr_defined(addr)); + HDassert(thing); + HDassert(!(pin_entry && unpin_entry)); + + /* deleted flag must accompany free_file_space */ + HDassert((!free_file_space) || (deleted)); + + /* deleted flag must accompany take_ownership */ + HDassert((!take_ownership) || (deleted)); + + /* can't have both free_file_space & take_ownership */ + HDassert(!(free_file_space && take_ownership)); + + entry_ptr = (H5C_cache_entry_t *)thing; + HDassert(entry_ptr->addr == addr); + + /* also set the dirtied variable if the dirtied field is set in + * the entry. + */ + dirtied |= entry_ptr->dirtied; + was_clean = !(entry_ptr->is_dirty); + +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + /* if the entry has multiple read only protects, just decrement + * the ro_ref_counter. Don't actually unprotect until the ref count + * drops to zero. + */ + if (entry_ptr->ro_ref_count > 1) { + /* Sanity check */ + HDassert(entry_ptr->is_protected); + HDassert(entry_ptr->is_read_only); + + if (dirtied) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Read only entry modified??") + + /* Reduce the RO ref count */ + (entry_ptr->ro_ref_count)--; + + /* Pin or unpin the entry as requested. */ + if (pin_entry) { + /* Pin the entry from a client */ + if (H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client") + } + else if (unpin_entry) { + /* Unpin the entry from a client */ + if (H5C__unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry by client") + } /* end if */ + } + else { + if (entry_ptr->is_read_only) { + /* Sanity check */ + HDassert(entry_ptr->ro_ref_count == 1); + + if (dirtied) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Read only entry modified??") + + entry_ptr->is_read_only = FALSE; + entry_ptr->ro_ref_count = 0; + } /* end if */ + +#ifdef H5_HAVE_PARALLEL + /* When the H5C code is used to implement the metadata cache in the + * PHDF5 case, only the cache on process 0 is allowed to write to file. + * All the other metadata caches must hold dirty entries until they + * are told that the entries are clean. + * + * The clear_on_unprotect flag in the H5C_cache_entry_t structure + * exists to deal with the case in which an entry is protected when + * its cache receives word that the entry is now clean. In this case, + * the clear_on_unprotect flag is set, and the entry is flushed with + * the H5C__FLUSH_CLEAR_ONLY_FLAG. + * + * All this is a bit awkward, but until the metadata cache entries + * are contiguous, with only one dirty flag, we have to let the supplied + * functions deal with the resetting the is_dirty flag. + */ + if (entry_ptr->clear_on_unprotect) { + /* Sanity check */ + HDassert(entry_ptr->is_dirty); + + entry_ptr->clear_on_unprotect = FALSE; + if (!dirtied) + clear_entry = TRUE; + } /* end if */ +#endif /* H5_HAVE_PARALLEL */ + + if (!entry_ptr->is_protected) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Entry already unprotected??") + + /* Mark the entry as dirty if appropriate */ + entry_ptr->is_dirty = (entry_ptr->is_dirty || dirtied); + if (dirtied && entry_ptr->image_up_to_date) { + entry_ptr->image_up_to_date = FALSE; + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "Can't propagate serialization status to fd parents") + } /* end if */ + + /* Check for newly dirtied entry */ + if (was_clean && entry_ptr->is_dirty) { + /* Update index for newly dirtied entry */ + H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr, FAIL) + + /* If the entry's type has a 'notify' callback send a + * 'entry dirtied' notice now that the entry is fully + * integrated into the cache. + */ + if (entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set") + + /* Propagate the flush dep dirty flag up the flush dependency chain + * if appropriate + */ + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_dirty(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag") + } /* end if */ + /* Check for newly clean entry */ + else if (!was_clean && !entry_ptr->is_dirty) { + + /* If the entry's type has a 'notify' callback send a + * 'entry cleaned' notice now that the entry is fully + * integrated into the cache. + */ + if (entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "can't notify client about entry dirty flag cleared") + + /* Propagate the flush dep clean flag up the flush dependency chain + * if appropriate + */ + if (entry_ptr->flush_dep_nparents > 0) + if (H5C__mark_flush_dep_clean(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag") + } /* end else-if */ + + /* Pin or unpin the entry as requested. */ + if (pin_entry) { + /* Pin the entry from a client */ + if (H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client") + } + else if (unpin_entry) { + /* Unpin the entry from a client */ + if (H5C__unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry by client") + } /* end if */ + + /* H5C__UPDATE_RP_FOR_UNPROTECT will place the unprotected entry on + * the pinned entry list if entry_ptr->is_pinned is TRUE. + */ + H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, FAIL) + + entry_ptr->is_protected = FALSE; + + /* if the entry is dirty, 'or' its flush_marker with the set flush flag, + * and then add it to the skip list if it isn't there already. + */ + if (entry_ptr->is_dirty) { + entry_ptr->flush_marker |= set_flush_marker; + if (!entry_ptr->in_slist) + /* this is a no-op if cache_ptr->slist_enabled is FALSE */ + H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL) + } /* end if */ + + /* This implementation of the "deleted" option is a bit inefficient, as + * we re-insert the entry to be deleted into the replacement policy + * data structures, only to remove them again. Depending on how often + * we do this, we may want to optimize a bit. + */ + if (deleted) { + unsigned flush_flags = (H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__FLUSH_INVALIDATE_FLAG); + + /* verify that the target entry is in the cache. */ + H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL) + + if (test_entry_ptr == NULL) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?") + else if (test_entry_ptr != entry_ptr) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, + "hash table contains multiple entries for addr?!?") + + /* Set the 'free file space' flag for the flush, if needed */ + if (free_file_space) + flush_flags |= H5C__FREE_FILE_SPACE_FLAG; + + /* Set the "take ownership" flag for the flush, if needed */ + if (take_ownership) + flush_flags |= H5C__TAKE_OWNERSHIP_FLAG; + + /* Delete the entry from the skip list on destroy */ + flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG; + + HDassert((!cache_ptr->slist_enabled) || (((!was_clean) || dirtied) == (entry_ptr->in_slist))); + + if (H5C__flush_single_entry(f, entry_ptr, flush_flags) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't flush entry") + } /* end if */ +#ifdef H5_HAVE_PARALLEL + else if (clear_entry) { + /* Verify that the target entry is in the cache. */ + H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL) + + if (test_entry_ptr == NULL) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?") + else if (test_entry_ptr != entry_ptr) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, + "hash table contains multiple entries for addr?!?") + + if (H5C__flush_single_entry(f, entry_ptr, + H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't clear entry") + } /* end else if */ +#endif /* H5_HAVE_PARALLEL */ + } + + H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr) + +done: +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_unprotect() */ + +/*------------------------------------------------------------------------- + * Function: H5C_unsettle_entry_ring + * + * Purpose: Advise the metadata cache that the specified entry's free space + * manager ring is no longer settled (if it was on entry). + * + * If the target free space manager ring is already + * unsettled, do nothing, and return SUCCEED. + * + * If the target free space manager ring is settled, and + * we are not in the process of a file shutdown, mark + * the ring as unsettled, and return SUCCEED. + * + * If the target free space manager is settled, and we + * are in the process of a file shutdown, post an error + * message, and return FAIL. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * January 3, 2017 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_unsettle_entry_ring(void *_entry) +{ + H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_entry; /* Entry whose ring to unsettle */ + H5C_t *cache; /* Cache for file */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(entry); + HDassert(entry->ring != H5C_RING_UNDEFINED); + HDassert((H5C_RING_USER == entry->ring) || (H5C_RING_RDFSM == entry->ring) || + (H5C_RING_MDFSM == entry->ring)); + cache = entry->cache_ptr; + HDassert(cache); + HDassert(cache->magic == H5C__H5C_T_MAGIC); + + switch (entry->ring) { + case H5C_RING_USER: + /* Do nothing */ + break; + + case H5C_RING_RDFSM: + if (cache->rdfsm_settled) { + if (cache->flush_in_progress || cache->close_warning_received) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected rdfsm ring unsettle") + cache->rdfsm_settled = FALSE; + } /* end if */ + break; + + case H5C_RING_MDFSM: + if (cache->mdfsm_settled) { + if (cache->flush_in_progress || cache->close_warning_received) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected mdfsm ring unsettle") + cache->mdfsm_settled = FALSE; + } /* end if */ + break; + + default: + HDassert(FALSE); /* this should be un-reachable */ + break; + } /* end switch */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_unsettle_entry_ring() */ + +/*------------------------------------------------------------------------- + * Function: H5C_create_flush_dependency() + * + * Purpose: Initiates a parent<->child entry flush dependency. The parent + * entry must be pinned or protected at the time of call, and must + * have all dependencies removed before the cache can shut down. + * + * Note: Flush dependencies in the cache indicate that a child entry + * must be flushed to the file before its parent. (This is + * currently used to implement Single-Writer/Multiple-Reader (SWMR) + * I/O access for data structures in the file). + * + * Creating a flush dependency between two entries will also pin + * the parent entry. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * 3/05/09 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_create_flush_dependency(void *parent_thing, void *child_thing) +{ + H5C_t *cache_ptr; + H5C_cache_entry_t *parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent thing's entry */ + H5C_cache_entry_t *child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child thing's entry */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(parent_entry); + HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(H5F_addr_defined(parent_entry->addr)); + HDassert(child_entry); + HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(H5F_addr_defined(child_entry->addr)); + cache_ptr = parent_entry->cache_ptr; + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + HDassert(cache_ptr == child_entry->cache_ptr); +#ifndef NDEBUG + /* Make sure the parent is not already a parent */ + { + unsigned u; + + for (u = 0; u < child_entry->flush_dep_nparents; u++) + HDassert(child_entry->flush_dep_parent[u] != parent_entry); + } /* end block */ +#endif + + /* More sanity checks */ + if (child_entry == parent_entry) + HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Child entry flush dependency parent can't be itself") + if (!(parent_entry->is_protected || parent_entry->is_pinned)) + HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Parent entry isn't pinned or protected") + + /* Check for parent not pinned */ + if (!parent_entry->is_pinned) { + /* Sanity check */ + HDassert(parent_entry->flush_dep_nchildren == 0); + HDassert(!parent_entry->pinned_from_client); + HDassert(!parent_entry->pinned_from_cache); + + /* Pin the parent entry */ + parent_entry->is_pinned = TRUE; + H5C__UPDATE_STATS_FOR_PIN(cache_ptr, parent_entry) + } /* end else */ + + /* Mark the entry as pinned from the cache's action (possibly redundantly) */ + parent_entry->pinned_from_cache = TRUE; + + /* Check if we need to resize the child's parent array */ + if (child_entry->flush_dep_nparents >= child_entry->flush_dep_parent_nalloc) { + if (child_entry->flush_dep_parent_nalloc == 0) { + /* Array does not exist yet, allocate it */ + HDassert(!child_entry->flush_dep_parent); + + if (NULL == (child_entry->flush_dep_parent = + H5FL_SEQ_MALLOC(H5C_cache_entry_ptr_t, H5C_FLUSH_DEP_PARENT_INIT))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, + "memory allocation failed for flush dependency parent list") + child_entry->flush_dep_parent_nalloc = H5C_FLUSH_DEP_PARENT_INIT; + } /* end if */ + else { + /* Resize existing array */ + HDassert(child_entry->flush_dep_parent); + + if (NULL == (child_entry->flush_dep_parent = + H5FL_SEQ_REALLOC(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent, + 2 * child_entry->flush_dep_parent_nalloc))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, + "memory allocation failed for flush dependency parent list") + child_entry->flush_dep_parent_nalloc *= 2; + } /* end else */ + cache_ptr->entry_fd_height_change_counter++; + } /* end if */ + + /* Add the dependency to the child's parent array */ + child_entry->flush_dep_parent[child_entry->flush_dep_nparents] = parent_entry; + child_entry->flush_dep_nparents++; + + /* Increment parent's number of children */ + parent_entry->flush_dep_nchildren++; + + /* Adjust the number of dirty children */ + if (child_entry->is_dirty) { + /* Sanity check */ + HDassert(parent_entry->flush_dep_ndirty_children < parent_entry->flush_dep_nchildren); + + parent_entry->flush_dep_ndirty_children++; + + /* If the parent has a 'notify' callback, send a 'child entry dirtied' notice */ + if (parent_entry->type->notify && + (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_DIRTIED, parent_entry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "can't notify parent about child entry dirty flag set") + } /* end if */ + + /* adjust the parent's number of unserialized children. Note + * that it is possible for and entry to be clean and unserialized. + */ + if (!child_entry->image_up_to_date) { + HDassert(parent_entry->flush_dep_nunser_children < parent_entry->flush_dep_nchildren); + + parent_entry->flush_dep_nunser_children++; + + /* If the parent has a 'notify' callback, send a 'child entry unserialized' notice */ + if (parent_entry->type->notify && + (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED, parent_entry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "can't notify parent about child entry serialized flag reset") + } /* end if */ + + /* Post-conditions, for successful operation */ + HDassert(parent_entry->is_pinned); + HDassert(parent_entry->flush_dep_nchildren > 0); + HDassert(child_entry->flush_dep_parent); + HDassert(child_entry->flush_dep_nparents > 0); + HDassert(child_entry->flush_dep_parent_nalloc > 0); +#ifndef NDEBUG + H5C__assert_flush_dep_nocycle(parent_entry, child_entry); +#endif + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_create_flush_dependency() */ + +/*------------------------------------------------------------------------- + * Function: H5C_destroy_flush_dependency() + * + * Purpose: Terminates a parent<-> child entry flush dependency. The + * parent entry must be pinned. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * 3/05/09 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_destroy_flush_dependency(void *parent_thing, void *child_thing) +{ + H5C_t *cache_ptr; + H5C_cache_entry_t *parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent entry */ + H5C_cache_entry_t *child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child entry */ + unsigned u; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(parent_entry); + HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(H5F_addr_defined(parent_entry->addr)); + HDassert(child_entry); + HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(H5F_addr_defined(child_entry->addr)); + cache_ptr = parent_entry->cache_ptr; + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + HDassert(cache_ptr == child_entry->cache_ptr); + + /* Usage checks */ + if (!parent_entry->is_pinned) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry isn't pinned") + if (NULL == child_entry->flush_dep_parent) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, + "Child entry doesn't have a flush dependency parent array") + if (0 == parent_entry->flush_dep_nchildren) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, + "Parent entry flush dependency ref. count has no child dependencies") + + /* Search for parent in child's parent array. This is a linear search + * because we do not expect large numbers of parents. If this changes, we + * may wish to change the parent array to a skip list */ + for (u = 0; u < child_entry->flush_dep_nparents; u++) + if (child_entry->flush_dep_parent[u] == parent_entry) + break; + if (u == child_entry->flush_dep_nparents) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, + "Parent entry isn't a flush dependency parent for child entry") + + /* Remove parent entry from child's parent array */ + if (u < (child_entry->flush_dep_nparents - 1)) + HDmemmove(&child_entry->flush_dep_parent[u], &child_entry->flush_dep_parent[u + 1], + (child_entry->flush_dep_nparents - u - 1) * sizeof(child_entry->flush_dep_parent[0])); + child_entry->flush_dep_nparents--; + + /* Adjust parent entry's nchildren and unpin parent if it goes to zero */ + parent_entry->flush_dep_nchildren--; + if (0 == parent_entry->flush_dep_nchildren) { + /* Sanity check */ + HDassert(parent_entry->pinned_from_cache); + + /* Check if we should unpin parent entry now */ + if (!parent_entry->pinned_from_client) + if (H5C__unpin_entry_real(cache_ptr, parent_entry, TRUE) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry") + + /* Mark the entry as unpinned from the cache's action */ + parent_entry->pinned_from_cache = FALSE; + } /* end if */ + + /* Adjust parent entry's ndirty_children */ + if (child_entry->is_dirty) { + /* Sanity check */ + HDassert(parent_entry->flush_dep_ndirty_children > 0); + + parent_entry->flush_dep_ndirty_children--; + + /* If the parent has a 'notify' callback, send a 'child entry cleaned' notice */ + if (parent_entry->type->notify && + (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_CLEANED, parent_entry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "can't notify parent about child entry dirty flag reset") + } /* end if */ + + /* adjust parent entry's number of unserialized children */ + if (!child_entry->image_up_to_date) { + HDassert(parent_entry->flush_dep_nunser_children > 0); + + parent_entry->flush_dep_nunser_children--; + + /* If the parent has a 'notify' callback, send a 'child entry serialized' notice */ + if (parent_entry->type->notify && + (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_SERIALIZED, parent_entry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, + "can't notify parent about child entry serialized flag set") + } /* end if */ + + /* Shrink or free the parent array if appropriate */ + if (child_entry->flush_dep_nparents == 0) { + child_entry->flush_dep_parent = H5FL_SEQ_FREE(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent); + child_entry->flush_dep_parent_nalloc = 0; + } /* end if */ + else if (child_entry->flush_dep_parent_nalloc > H5C_FLUSH_DEP_PARENT_INIT && + child_entry->flush_dep_nparents <= (child_entry->flush_dep_parent_nalloc / 4)) { + if (NULL == (child_entry->flush_dep_parent = + H5FL_SEQ_REALLOC(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent, + child_entry->flush_dep_parent_nalloc / 4))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, + "memory allocation failed for flush dependency parent list") + child_entry->flush_dep_parent_nalloc /= 4; + } /* end if */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_destroy_flush_dependency() */ + +/*------------------------------------------------------------------------- + * Function: H5C_expunge_entry + * + * Purpose: Expunge an entry from the cache without writing it to disk + * even if it is dirty. The entry may not be either pinned or + * protected. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: John Mainzer + * 6/29/06 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_expunge_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, unsigned flags) +{ + H5C_t *cache_ptr; + H5C_cache_entry_t *entry_ptr = NULL; + unsigned flush_flags = (H5C__FLUSH_INVALIDATE_FLAG | H5C__FLUSH_CLEAR_ONLY_FLAG); + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + HDassert(f); + HDassert(f->shared); + cache_ptr = f->shared->cache; + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + HDassert(type); + HDassert(H5F_addr_defined(addr)); + +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_lru_list(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on entry") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + /* Look for entry in cache */ + H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL) + if ((entry_ptr == NULL) || (entry_ptr->type != type)) + /* the target doesn't exist in the cache, so we are done. */ + HGOTO_DONE(SUCCEED) + + HDassert(entry_ptr->addr == addr); + HDassert(entry_ptr->type == type); + + /* Check for entry being pinned or protected */ + if (entry_ptr->is_protected) + HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is protected") + if (entry_ptr->is_pinned) + HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is pinned") + + /* If we get this far, call H5C__flush_single_entry() with the + * H5C__FLUSH_INVALIDATE_FLAG and the H5C__FLUSH_CLEAR_ONLY_FLAG. + * This will clear the entry, and then delete it from the cache. + */ + + /* Pass along 'free file space' flag */ + flush_flags |= (flags & H5C__FREE_FILE_SPACE_FLAG); + + /* Delete the entry from the skip list on destroy */ + flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG; + + if (H5C__flush_single_entry(f, entry_ptr, flush_flags) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "can't flush entry") + +done: +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_lru_list(cache_ptr) < 0) + HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on exit") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_expunge_entry() */ + +/*------------------------------------------------------------------------- + * Function: H5C_remove_entry + * + * Purpose: Remove an entry from the cache. Must be not protected, pinned, + * dirty, involved in flush dependencies, etc. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * September 17, 2016 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_remove_entry(void *_entry) +{ + H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_entry; /* Entry to remove */ + H5C_t *cache; /* Cache for file */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(entry); + HDassert(entry->ring != H5C_RING_UNDEFINED); + cache = entry->cache_ptr; + HDassert(cache); + HDassert(cache->magic == H5C__H5C_T_MAGIC); + + /* Check for error conditions */ + if (entry->is_dirty) + HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove dirty entry from cache") + if (entry->is_protected) + HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove protected entry from cache") + if (entry->is_pinned) + HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove pinned entry from cache") + /* NOTE: If these two errors are getting tripped because the entry is + * in a flush dependency with a freedspace entry, move the checks + * after the "before evict" message is sent, and add the + * "child being evicted" message to the "before evict" notify + * section below. QAK - 2017/08/03 + */ + if (entry->flush_dep_nparents > 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, + "can't remove entry with flush dependency parents from cache") + if (entry->flush_dep_nchildren > 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, + "can't remove entry with flush dependency children from cache") + + /* Additional internal cache consistency checks */ + HDassert(!entry->in_slist); + HDassert(!entry->flush_marker); + HDassert(!entry->flush_in_progress); + + /* Note that the algorithm below is (very) similar to the set of operations + * in H5C__flush_single_entry() and should be kept in sync with changes + * to that code. - QAK, 2016/11/30 + */ + + /* Update stats, as if we are "destroying" and taking ownership of the entry */ + H5C__UPDATE_STATS_FOR_EVICTION(cache, entry, TRUE) + + /* If the entry's type has a 'notify' callback, send a 'before eviction' + * notice while the entry is still fully integrated in the cache. + */ + if (entry->type->notify && (entry->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict") + + /* Update the cache internal data structures as appropriate for a destroy. + * Specifically: + * 1) Delete it from the index + * 2) Delete it from the collective read access list + * 3) Update the replacement policy for eviction + * 4) Remove it from the tag list for this object + */ + + H5C__DELETE_FROM_INDEX(cache, entry, FAIL) + +#ifdef H5_HAVE_PARALLEL + /* Check for collective read access flag */ + if (entry->coll_access) { + entry->coll_access = FALSE; + H5C__REMOVE_FROM_COLL_LIST(cache, entry, FAIL) + } /* end if */ +#endif /* H5_HAVE_PARALLEL */ + + H5C__UPDATE_RP_FOR_EVICTION(cache, entry, FAIL) + + /* Remove entry from tag list */ + if (H5C__untag_entry(cache, entry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list") + + /* Increment entries_removed_counter and set last_entry_removed_ptr. + * As we me be about to free the entry, recall that last_entry_removed_ptr + * must NEVER be dereferenced. + * + * Recall that these fields are maintained to allow functions that perform + * scans of lists of entries to detect the unexpected removal of entries + * (via expunge, eviction, or take ownership at present), so that they can + * re-start their scans if necessary. + * + * Also check if the entry we are watching for removal is being + * removed (usually the 'next' entry for an iteration) and reset + * it to indicate that it was removed. + */ + cache->entries_removed_counter++; + cache->last_entry_removed_ptr = entry; + if (entry == cache->entry_watched_for_removal) + cache->entry_watched_for_removal = NULL; + + /* Internal cache data structures should now be up to date, and + * consistent with the status of the entry. + * + * Now clean up internal cache fields if appropriate. + */ + + /* Free the buffer for the on disk image */ + if (entry->image_ptr != NULL) + entry->image_ptr = H5MM_xfree(entry->image_ptr); + + /* Reset the pointer to the cache the entry is within */ + entry->cache_ptr = NULL; + + /* Client is taking ownership of the entry. Set bad magic here so the + * cache will choke unless the entry is re-inserted properly + */ + entry->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__remove_entry() */ diff --git a/src/H5Cimage.c b/src/H5Cimage.c index 6d211a4f56d..df60d00a73f 100644 --- a/src/H5Cimage.c +++ b/src/H5Cimage.c @@ -105,21 +105,11 @@ (cache_ptr)->images_loaded++; \ (cache_ptr)->last_image_size = (cache_ptr)->image_len; \ } -#define H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, dirty) \ -{ \ - (cache_ptr)->prefetches++; \ - if (dirty) \ - (cache_ptr)->dirty_prefetches++; \ -} -#define H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr) \ - (cache_ptr)->prefetch_hits++; /* clang-format on */ #else /* H5C_COLLECT_CACHE_STATS */ #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr) #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr) #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr) -#define H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, dirty) -#define H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr) #endif /* H5C_COLLECT_CACHE_STATS */ /******************/ @@ -138,8 +128,6 @@ static herr_t H5C__decode_cache_image_header(const H5F_t *f, H5C_t *cache_ptr, c static herr_t H5C__decode_cache_image_entry(const H5F_t *f, const H5C_t *cache_ptr, const uint8_t **buf, unsigned entry_num); #endif -static herr_t H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, H5C_cache_entry_t *pf_entry_ptr, - H5C_cache_entry_t **fd_children); static herr_t H5C__encode_cache_image_header(const H5F_t *f, const H5C_t *cache_ptr, uint8_t **buf); static herr_t H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr, uint8_t **buf, unsigned entry_num); static herr_t H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr); @@ -171,7 +159,6 @@ H5FL_DEFINE(H5C_cache_entry_t); /*******************/ /*------------------------------------------------------------------------- - * * Function: H5C_cache_image_pending() * * Purpose: Tests to see if the load of a metadata cache image @@ -459,375 +446,6 @@ H5C__generate_cache_image(H5F_t *f, H5C_t *cache_ptr) FUNC_LEAVE_NOAPI(ret_value) } /* H5C__generate_cache_image() */ -/*------------------------------------------------------------------------- - * Function: H5C__deserialize_prefetched_entry() - * - * Purpose: Deserialize the supplied prefetched entry entry, and return - * a pointer to the deserialized entry in *entry_ptr_ptr. - * If successful, remove the prefetched entry from the cache, - * and free it. Insert the deserialized entry into the cache. - * - * Note that the on disk image of the entry is not freed -- - * a pointer to it is stored in the deserialized entries' - * image_ptr field, and its image_up_to_date field is set to - * TRUE unless the entry is dirtied by the deserialize call. - * - * If the prefetched entry is a flush dependency child, - * destroy that flush dependency prior to calling the - * deserialize callback. If appropriate, the flush dependency - * relationship will be recreated by the cache client. - * - * If the prefetched entry is a flush dependency parent, - * destroy the flush dependency relationship with all its - * children. As all these children must be prefetched entries, - * recreate these flush dependency relationships with - * deserialized entry after it is inserted in the cache. - * - * Since deserializing a prefetched entry is semantically - * equivalent to a load, issue an entry loaded nofification - * if the notify callback is defined. - * - * Return: SUCCEED on success, and FAIL on failure. - * - * Note that *entry_ptr_ptr is undefined on failure. - * - * Programmer: John Mainzer, 8/10/15 - * - *------------------------------------------------------------------------- - */ -herr_t -H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t **entry_ptr_ptr, - const H5C_class_t *type, haddr_t addr, void *udata) -{ - hbool_t dirty = FALSE; /* Flag indicating whether thing was - * dirtied during deserialize - */ - size_t len; /* Size of image in file */ - void *thing = NULL; /* Pointer to thing loaded */ - H5C_cache_entry_t *pf_entry_ptr; /* pointer to the prefetched entry */ - /* supplied in *entry_ptr_ptr. */ - H5C_cache_entry_t *ds_entry_ptr; /* Alias for thing loaded, as cache - * entry - */ - H5C_cache_entry_t **fd_children = NULL; /* Pointer to a dynamically */ - /* allocated array of pointers to */ - /* the flush dependency children of */ - /* the prefetched entry, or NULL if */ - /* that array does not exist. */ - unsigned flush_flags = (H5C__FLUSH_INVALIDATE_FLAG | H5C__FLUSH_CLEAR_ONLY_FLAG); - int i; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* sanity checks */ - HDassert(f); - HDassert(f->shared); - HDassert(f->shared->cache); - HDassert(f->shared->cache == cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - HDassert(entry_ptr_ptr); - HDassert(*entry_ptr_ptr); - pf_entry_ptr = *entry_ptr_ptr; - HDassert(pf_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(pf_entry_ptr->type); - HDassert(pf_entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID); - HDassert(pf_entry_ptr->prefetched); - HDassert(pf_entry_ptr->image_up_to_date); - HDassert(pf_entry_ptr->image_ptr); - HDassert(pf_entry_ptr->size > 0); - HDassert(pf_entry_ptr->addr == addr); - HDassert(type); - HDassert(type->id == pf_entry_ptr->prefetch_type_id); - HDassert(type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type); - - /* verify absence of prohibited or unsupported type flag combinations */ - HDassert(!(type->flags & H5C__CLASS_SKIP_READS)); - - /* Can't see how skip reads could be usefully combined with - * either the speculative read flag. Hence disallow. - */ - HDassert(!((type->flags & H5C__CLASS_SKIP_READS) && (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG))); - HDassert(H5F_addr_defined(addr)); - HDassert(type->get_initial_load_size); - HDassert(type->deserialize); - - /* if *pf_entry_ptr is a flush dependency child, destroy all such - * relationships now. The client will restore the relationship(s) with - * the deserialized entry if appropriate. - */ - HDassert(pf_entry_ptr->fd_parent_count == pf_entry_ptr->flush_dep_nparents); - for (i = (int)(pf_entry_ptr->fd_parent_count) - 1; i >= 0; i--) { - HDassert(pf_entry_ptr->flush_dep_parent); - HDassert(pf_entry_ptr->flush_dep_parent[i]); - HDassert(pf_entry_ptr->flush_dep_parent[i]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(pf_entry_ptr->flush_dep_parent[i]->flush_dep_nchildren > 0); - HDassert(pf_entry_ptr->fd_parent_addrs); - HDassert(pf_entry_ptr->flush_dep_parent[i]->addr == pf_entry_ptr->fd_parent_addrs[i]); - - if (H5C_destroy_flush_dependency(pf_entry_ptr->flush_dep_parent[i], pf_entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "can't destroy pf entry parent flush dependency") - - pf_entry_ptr->fd_parent_addrs[i] = HADDR_UNDEF; - } /* end for */ - HDassert(pf_entry_ptr->flush_dep_nparents == 0); - - /* If *pf_entry_ptr is a flush dependency parent, destroy its flush - * dependency relationships with all its children (which must be - * prefetched entries as well). - * - * These flush dependency relationships will have to be restored - * after the deserialized entry is inserted into the cache in order - * to transfer these relationships to the new entry. Hence save the - * pointers to the flush dependency children of *pf_enty_ptr for later - * use. - */ - if (pf_entry_ptr->fd_child_count > 0) { - if (NULL == (fd_children = (H5C_cache_entry_t **)H5MM_calloc( - sizeof(H5C_cache_entry_t **) * (size_t)(pf_entry_ptr->fd_child_count + 1)))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for fd child ptr array") - - if (H5C__destroy_pf_entry_child_flush_deps(cache_ptr, pf_entry_ptr, fd_children) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, - "can't destroy pf entry child flush dependency(s).") - } /* end if */ - - /* Since the size of the on disk image is known exactly, there is - * no need for either a call to the get_initial_load_size() callback, - * or retries if the H5C__CLASS_SPECULATIVE_LOAD_FLAG flag is set. - * Similarly, there is no need to clamp possible reads beyond - * EOF. - */ - len = pf_entry_ptr->size; - - /* Deserialize the prefetched on-disk image of the entry into the - * native memory form - */ - if (NULL == (thing = type->deserialize(pf_entry_ptr->image_ptr, len, udata, &dirty))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, FAIL, "Can't deserialize image") - ds_entry_ptr = (H5C_cache_entry_t *)thing; - - /* In general, an entry should be clean just after it is loaded. - * - * However, when this code is used in the metadata cache, it is - * possible that object headers will be dirty at this point, as - * the deserialize function will alter object headers if necessary to - * fix an old bug. - * - * In the following assert: - * - * HDassert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6 ) ); - * - * note that type ids 5 & 6 are associated with object headers in the - * metadata cache. - * - * When we get to using H5C for other purposes, we may wish to - * tighten up the assert so that the loophole only applies to the - * metadata cache. - * - * Note that at present, dirty can't be set to true with prefetched - * entries. However this may change, so include this functionality - * against that possibility. - * - * Also, note that it is possible for a prefetched entry to be dirty -- - * hence the value assigned to ds_entry_ptr->is_dirty below. - */ - - HDassert((dirty == FALSE) || (type->id == 5 || type->id == 6)); - - ds_entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC; - ds_entry_ptr->cache_ptr = f->shared->cache; - ds_entry_ptr->addr = addr; - ds_entry_ptr->size = len; - HDassert(ds_entry_ptr->size < H5C_MAX_ENTRY_SIZE); - ds_entry_ptr->image_ptr = pf_entry_ptr->image_ptr; - ds_entry_ptr->image_up_to_date = !dirty; - ds_entry_ptr->type = type; - ds_entry_ptr->is_dirty = dirty | pf_entry_ptr->is_dirty; - ds_entry_ptr->dirtied = FALSE; - ds_entry_ptr->is_protected = FALSE; - ds_entry_ptr->is_read_only = FALSE; - ds_entry_ptr->ro_ref_count = 0; - ds_entry_ptr->is_pinned = FALSE; - ds_entry_ptr->in_slist = FALSE; - ds_entry_ptr->flush_marker = FALSE; -#ifdef H5_HAVE_PARALLEL - ds_entry_ptr->clear_on_unprotect = FALSE; - ds_entry_ptr->flush_immediately = FALSE; - ds_entry_ptr->coll_access = FALSE; -#endif /* H5_HAVE_PARALLEL */ - ds_entry_ptr->flush_in_progress = FALSE; - ds_entry_ptr->destroy_in_progress = FALSE; - - ds_entry_ptr->ring = pf_entry_ptr->ring; - - /* Initialize flush dependency height fields */ - ds_entry_ptr->flush_dep_parent = NULL; - ds_entry_ptr->flush_dep_nparents = 0; - ds_entry_ptr->flush_dep_parent_nalloc = 0; - ds_entry_ptr->flush_dep_nchildren = 0; - ds_entry_ptr->flush_dep_ndirty_children = 0; - ds_entry_ptr->flush_dep_nunser_children = 0; - - /* Initialize fields supporting the hash table: */ - ds_entry_ptr->ht_next = NULL; - ds_entry_ptr->ht_prev = NULL; - ds_entry_ptr->il_next = NULL; - ds_entry_ptr->il_prev = NULL; - - /* Initialize fields supporting replacement policies: */ - ds_entry_ptr->next = NULL; - ds_entry_ptr->prev = NULL; -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS - ds_entry_ptr->aux_next = NULL; - ds_entry_ptr->aux_prev = NULL; -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ -#ifdef H5_HAVE_PARALLEL - pf_entry_ptr->coll_next = NULL; - pf_entry_ptr->coll_prev = NULL; -#endif /* H5_HAVE_PARALLEL */ - - /* Initialize cache image related fields */ - ds_entry_ptr->include_in_image = FALSE; - ds_entry_ptr->lru_rank = 0; - ds_entry_ptr->image_dirty = FALSE; - ds_entry_ptr->fd_parent_count = 0; - ds_entry_ptr->fd_parent_addrs = NULL; - ds_entry_ptr->fd_child_count = pf_entry_ptr->fd_child_count; - ds_entry_ptr->fd_dirty_child_count = 0; - ds_entry_ptr->image_fd_height = 0; - ds_entry_ptr->prefetched = FALSE; - ds_entry_ptr->prefetch_type_id = 0; - ds_entry_ptr->age = 0; - ds_entry_ptr->prefetched_dirty = pf_entry_ptr->prefetched_dirty; -#ifndef NDEBUG /* debugging field */ - ds_entry_ptr->serialization_count = 0; -#endif - - H5C__RESET_CACHE_ENTRY_STATS(ds_entry_ptr); - - /* Apply to to the newly deserialized entry */ - if (H5C__tag_entry(cache_ptr, ds_entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "Cannot tag metadata entry") - - /* We have successfully deserialized the prefetched entry. - * - * Before we return a pointer to the deserialized entry, we must remove - * the prefetched entry from the cache, discard it, and replace it with - * the deserialized entry. Note that we do not free the prefetched - * entries image, as that has been transferred to the deserialized - * entry. - * - * Also note that we have not yet restored any flush dependencies. This - * must wait until the deserialized entry is inserted in the cache. - * - * To delete the prefetched entry from the cache: - * - * 1) Set pf_entry_ptr->image_ptr to NULL. Since we have already - * transferred the buffer containing the image to *ds_entry_ptr, - * this is not a memory leak. - * - * 2) Call H5C__flush_single_entry() with the H5C__FLUSH_INVALIDATE_FLAG - * and H5C__FLUSH_CLEAR_ONLY_FLAG flags set. - */ - pf_entry_ptr->image_ptr = NULL; - - if (pf_entry_ptr->is_dirty) { - HDassert(((cache_ptr->slist_enabled) && (pf_entry_ptr->in_slist)) || - ((!cache_ptr->slist_enabled) && (!pf_entry_ptr->in_slist))); - - flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG; - } /* end if */ - - if (H5C__flush_single_entry(f, pf_entry_ptr, flush_flags) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "can't expunge prefetched entry") - -#ifndef NDEGUG /* verify deletion */ - H5C__SEARCH_INDEX(cache_ptr, addr, pf_entry_ptr, FAIL); - - HDassert(NULL == pf_entry_ptr); -#endif - - /* Insert the deserialized entry into the cache. */ - H5C__INSERT_IN_INDEX(cache_ptr, ds_entry_ptr, FAIL) - - HDassert(!ds_entry_ptr->in_slist); - if (ds_entry_ptr->is_dirty) - H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, ds_entry_ptr, FAIL) - - H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, ds_entry_ptr, FAIL) - - /* Deserializing a prefetched entry is the conceptual equivalent of - * loading it from file. If the deserialized entry has a notify callback, - * send an "after load" notice now that the deserialized entry is fully - * integrated into the cache. - */ - if (ds_entry_ptr->type->notify && - (ds_entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_LOAD, ds_entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry loaded into cache") - - /* Restore flush dependencies with the flush dependency children of - * of the prefetched entry. Note that we must protect *ds_entry_ptr - * before the call to avoid triggering sanity check failures, and - * then unprotect it afterwards. - */ - i = 0; - if (fd_children != NULL) { - H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, ds_entry_ptr, FAIL) - ds_entry_ptr->is_protected = TRUE; - while (fd_children[i] != NULL) { - /* Sanity checks */ - HDassert((fd_children[i])->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert((fd_children[i])->prefetched); - HDassert((fd_children[i])->fd_parent_count > 0); - HDassert((fd_children[i])->fd_parent_addrs); - -#ifndef NDEBUG - { - int j; - hbool_t found; - - j = 0; - found = FALSE; - while ((j < (int)((fd_children[i])->fd_parent_count)) && (!found)) { - if ((fd_children[i])->fd_parent_addrs[j] == ds_entry_ptr->addr) - found = TRUE; - - j++; - } /* end while */ - HDassert(found); - } -#endif - - if (H5C_create_flush_dependency(ds_entry_ptr, fd_children[i]) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Can't restore child flush dependency") - - i++; - } /* end while */ - - H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, ds_entry_ptr, FAIL); - ds_entry_ptr->is_protected = FALSE; - } /* end if ( fd_children != NULL ) */ - HDassert((unsigned)i == ds_entry_ptr->fd_child_count); - - ds_entry_ptr->fd_child_count = 0; - H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr) - - /* finally, pass ds_entry_ptr back to the caller */ - *entry_ptr_ptr = ds_entry_ptr; - -done: - if (fd_children) - fd_children = (H5C_cache_entry_t **)H5MM_xfree((void *)fd_children); - - /* Release resources on error */ - if (FAIL == ret_value) - if (thing && type->free_icr(thing) < 0) - HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed") - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__deserialize_prefetched_entry() */ - /*------------------------------------------------------------------------- * Function: H5C__free_image_entries_array * @@ -896,7 +514,7 @@ H5C__free_image_entries_array(H5C_t *cache_ptr) } /* H5C__free_image_entries_array() */ /*------------------------------------------------------------------------- - * Function: H5C_get_cache_image_config + * Function: H5C__get_cache_image_config * * Purpose: Copy the current configuration for cache image generation * on file close into the instance of H5C_cache_image_ctl_t @@ -910,11 +528,11 @@ H5C__free_image_entries_array(H5C_t *cache_ptr) *------------------------------------------------------------------------- */ herr_t -H5C_get_cache_image_config(const H5C_t *cache_ptr, H5C_cache_image_ctl_t *config_ptr) +H5C__get_cache_image_config(const H5C_t *cache_ptr, H5C_cache_image_ctl_t *config_ptr) { herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI(FAIL) + FUNC_ENTER_PACKAGE if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad cache_ptr on entry") @@ -925,73 +543,7 @@ H5C_get_cache_image_config(const H5C_t *cache_ptr, H5C_cache_image_ctl_t *config done: FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_get_cache_image_config() */ - -/*------------------------------------------------------------------------- - * Function: H5C_image_stats - * - * Purpose: Prints statistics specific to the cache image. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: John Mainzer - * 10/26/15 - * - *------------------------------------------------------------------------- - */ -herr_t -#if H5C_COLLECT_CACHE_STATS -H5C_image_stats(H5C_t *cache_ptr, hbool_t print_header) -#else /* H5C_COLLECT_CACHE_STATS */ -H5C_image_stats(H5C_t *cache_ptr, hbool_t H5_ATTR_UNUSED print_header) -#endif /* H5C_COLLECT_CACHE_STATS */ -{ -#if H5C_COLLECT_CACHE_STATS - int i; - int64_t total_hits = 0; - int64_t total_misses = 0; - double hit_rate; - double prefetch_use_rate; -#endif /* H5C_COLLECT_CACHE_STATS */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - if (!cache_ptr || cache_ptr->magic != H5C__H5C_T_MAGIC) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr") - -#if H5C_COLLECT_CACHE_STATS - for (i = 0; i <= cache_ptr->max_type_id; i++) { - total_hits += cache_ptr->hits[i]; - total_misses += cache_ptr->misses[i]; - } /* end for */ - - if ((total_hits > 0) || (total_misses > 0)) - hit_rate = 100.0 * ((double)(total_hits)) / ((double)(total_hits + total_misses)); - else - hit_rate = 0.0; - - if (cache_ptr->prefetches > 0) - prefetch_use_rate = 100.0 * ((double)(cache_ptr->prefetch_hits)) / ((double)(cache_ptr->prefetches)); - else - prefetch_use_rate = 0.0; - - if (print_header) { - HDfprintf(stdout, "\nhit prefetches prefetch image pf hit\n"); - HDfprintf(stdout, "rate: total: dirty: hits: flshs: evct: size: rate:\n"); - } /* end if */ - - HDfprintf(stdout, "%3.1lf %5lld %5lld %5lld %5lld %5lld %5lld %3.1lf\n", hit_rate, - (long long)(cache_ptr->prefetches), (long long)(cache_ptr->dirty_prefetches), - (long long)(cache_ptr->prefetch_hits), - (long long)(cache_ptr->flushes[H5AC_PREFETCHED_ENTRY_ID]), - (long long)(cache_ptr->evictions[H5AC_PREFETCHED_ENTRY_ID]), - (long long)(cache_ptr->last_image_size), prefetch_use_rate); -#endif /* H5C_COLLECT_CACHE_STATS */ - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_image_stats() */ +} /* H5C__get_cache_image_config() */ /*------------------------------------------------------------------------- * Function: H5C__read_cache_image @@ -2009,138 +1561,6 @@ H5C__decode_cache_image_entry(const H5F_t *f, const H5C_t *cache_ptr, const uint } /* H5C__decode_cache_image_entry() */ #endif -/*------------------------------------------------------------------------- - * Function: H5C__destroy_pf_entry_child_flush_deps() - * - * Purpose: Destroy all flush dependencies in this the supplied - * prefetched entry is the parent. Note that the children - * in these flush dependencies must be prefetched entries as - * well. - * - * As this action is part of the process of transferring all - * such flush dependencies to the deserialized version of the - * prefetched entry, ensure that the data necessary to complete - * the transfer is retained. - * - * Note: The current implementation of this function is - * quite inefficient -- mostly due to the current - * implementation of flush dependencies. This should - * be fixed at some point. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: John Mainzer - * 8/11/15 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, H5C_cache_entry_t *pf_entry_ptr, - H5C_cache_entry_t **fd_children) -{ - H5C_cache_entry_t *entry_ptr; -#ifndef NDEBUG - unsigned entries_visited = 0; -#endif - int fd_children_found = 0; - hbool_t found; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Sanity checks */ - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - HDassert(pf_entry_ptr); - HDassert(pf_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(pf_entry_ptr->type); - HDassert(pf_entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID); - HDassert(pf_entry_ptr->prefetched); - HDassert(pf_entry_ptr->fd_child_count > 0); - HDassert(fd_children); - - /* Scan each entry on the index list */ - entry_ptr = cache_ptr->il_head; - while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - - /* Here we look at entry_ptr->flush_dep_nparents and not - * entry_ptr->fd_parent_count as it is possible that some - * or all of the prefetched flush dependency child relationships - * have already been destroyed. - */ - if (entry_ptr->prefetched && (entry_ptr->flush_dep_nparents > 0)) { - unsigned u; /* Local index variable */ - - /* Re-init */ - u = 0; - found = FALSE; - - /* Sanity checks */ - HDassert(entry_ptr->type); - HDassert(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID); - HDassert(entry_ptr->fd_parent_count >= entry_ptr->flush_dep_nparents); - HDassert(entry_ptr->fd_parent_addrs); - HDassert(entry_ptr->flush_dep_parent); - - /* Look for correct entry */ - while (!found && (u < entry_ptr->fd_parent_count)) { - /* Sanity check entry */ - HDassert(entry_ptr->flush_dep_parent[u]); - HDassert(entry_ptr->flush_dep_parent[u]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - - /* Correct entry? */ - if (pf_entry_ptr == entry_ptr->flush_dep_parent[u]) - found = TRUE; - - u++; - } /* end while */ - - if (found) { - HDassert(NULL == fd_children[fd_children_found]); - - /* Remove flush dependency */ - fd_children[fd_children_found] = entry_ptr; - fd_children_found++; - if (H5C_destroy_flush_dependency(pf_entry_ptr, entry_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, - "can't destroy pf entry child flush dependency") - -#ifndef NDEBUG - /* Sanity check -- verify that the address of the parent - * appears in entry_ptr->fd_parent_addrs. Must do a search, - * as with flush dependency creates and destroys, - * entry_ptr->fd_parent_addrs and entry_ptr->flush_dep_parent - * can list parents in different order. - */ - found = FALSE; - u = 0; - while (!found && u < entry_ptr->fd_parent_count) { - if (pf_entry_ptr->addr == entry_ptr->fd_parent_addrs[u]) - found = TRUE; - u++; - } /* end while */ - HDassert(found); -#endif - } /* end if */ - } /* end if */ - -#ifndef NDEBUG - entries_visited++; -#endif - entry_ptr = entry_ptr->il_next; - } /* end while */ - - /* Post-op sanity checks */ - HDassert(NULL == fd_children[fd_children_found]); - HDassert((unsigned)fd_children_found == pf_entry_ptr->fd_child_count); - HDassert(entries_visited == cache_ptr->index_len); - HDassert(!pf_entry_ptr->is_pinned); - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5C__destroy_pf_entry_child_flush_deps() */ - /*------------------------------------------------------------------------- * Function: H5C__encode_cache_image_header() * diff --git a/src/H5Cint.c b/src/H5Cint.c new file mode 100644 index 00000000000..5200ac6d103 --- /dev/null +++ b/src/H5Cint.c @@ -0,0 +1,2578 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*------------------------------------------------------------------------- + * + * Created: H5Centry.c + * May 8 2023 + * Quincey Koziol + * + * Purpose: Routines which operate on cache entries. + * + *------------------------------------------------------------------------- + */ + +/****************/ +/* Module Setup */ +/****************/ + +#include "H5Cmodule.h" /* This source code file is part of the H5C module */ +#define H5F_FRIEND /* suppress error about including H5Fpkg */ + +/***********/ +/* Headers */ +/***********/ +#include "H5private.h" /* Generic Functions */ +#include "H5Cpkg.h" /* Cache */ +#include "H5Eprivate.h" /* Error handling */ +#include "H5Fpkg.h" /* Files */ +#include "H5MFprivate.h" /* File memory management */ + +/****************/ +/* Local Macros */ +/****************/ + +/******************/ +/* Local Typedefs */ +/******************/ + +/********************/ +/* Local Prototypes */ +/********************/ +static herr_t H5C__autoadjust__ageout(H5F_t *f, double hit_rate, enum H5C_resize_status *status_ptr, + size_t *new_max_cache_size_ptr, hbool_t write_permitted); +static herr_t H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t *cache_ptr); +static herr_t H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t *f, hbool_t write_permitted); +static herr_t H5C__autoadjust__ageout__insert_new_marker(H5C_t *cache_ptr); +static herr_t H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags); +static herr_t H5C__serialize_ring(H5F_t *f, H5C_ring_t ring); + +/*********************/ +/* Package Variables */ +/*********************/ + +/*****************************/ +/* Library Private Variables */ +/*****************************/ + +/*******************/ +/* Local Variables */ +/*******************/ + +/*------------------------------------------------------------------------- + * Function: H5C__auto_adjust_cache_size + * + * Purpose: Obtain the current full cache hit rate, and compare it + * with the hit rate thresholds for modifying cache size. + * If one of the thresholds has been crossed, adjusts the + * size of the cache accordingly. + * + * The function then resets the full cache hit rate + * statistics, and exits. + * + * Return: Non-negative on success/Negative on failure or if there was + * an attempt to flush a protected item. + * + * + * Programmer: John Mainzer, 10/7/04 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted) +{ + H5C_t *cache_ptr = f->shared->cache; + hbool_t reentrant_call = FALSE; + hbool_t inserted_epoch_marker = FALSE; + size_t new_max_cache_size = 0; + size_t old_max_cache_size = 0; + size_t new_min_clean_size = 0; + size_t old_min_clean_size = 0; + double hit_rate; + enum H5C_resize_status status = in_spec; /* will change if needed */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + HDassert(f); + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + HDassert(cache_ptr->cache_accesses >= cache_ptr->resize_ctl.epoch_length); + HDassert(0.0 <= cache_ptr->resize_ctl.min_clean_fraction); + HDassert(cache_ptr->resize_ctl.min_clean_fraction <= 100.0); + + /* check to see if cache_ptr->resize_in_progress is TRUE. If it, this + * is a re-entrant call via a client callback called in the resize + * process. To avoid an infinite recursion, set reentrant_call to + * TRUE, and goto done. + */ + if (cache_ptr->resize_in_progress) { + reentrant_call = TRUE; + HGOTO_DONE(SUCCEED) + } /* end if */ + + cache_ptr->resize_in_progress = TRUE; + + if (!cache_ptr->resize_enabled) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Auto cache resize disabled") + + HDassert((cache_ptr->resize_ctl.incr_mode != H5C_incr__off) || + (cache_ptr->resize_ctl.decr_mode != H5C_decr__off)); + + if (H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate") + + HDassert((0.0 <= hit_rate) && (hit_rate <= 1.0)); + + switch (cache_ptr->resize_ctl.incr_mode) { + case H5C_incr__off: + if (cache_ptr->size_increase_possible) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "size_increase_possible but H5C_incr__off?!?!?") + break; + + case H5C_incr__threshold: + if (hit_rate < cache_ptr->resize_ctl.lower_hr_threshold) { + if (!cache_ptr->size_increase_possible) + status = increase_disabled; + else if (cache_ptr->max_cache_size >= cache_ptr->resize_ctl.max_size) { + HDassert(cache_ptr->max_cache_size == cache_ptr->resize_ctl.max_size); + status = at_max_size; + } + else if (!cache_ptr->cache_full) + status = not_full; + else { + new_max_cache_size = + (size_t)(((double)(cache_ptr->max_cache_size)) * cache_ptr->resize_ctl.increment); + + /* clip to max size if necessary */ + if (new_max_cache_size > cache_ptr->resize_ctl.max_size) + new_max_cache_size = cache_ptr->resize_ctl.max_size; + + /* clip to max increment if necessary */ + if (cache_ptr->resize_ctl.apply_max_increment && + ((cache_ptr->max_cache_size + cache_ptr->resize_ctl.max_increment) < + new_max_cache_size)) + new_max_cache_size = cache_ptr->max_cache_size + cache_ptr->resize_ctl.max_increment; + + status = increase; + } + } + break; + + default: + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode") + } + + /* If the decr_mode is either age out or age out with threshold, we + * must run the marker maintenance code, whether we run the size + * reduction code or not. We do this in two places -- here we + * insert a new marker if the number of active epoch markers is + * is less than the current epochs before eviction, and after + * the ageout call, we cycle the markers. + * + * However, we can't call the ageout code or cycle the markers + * unless there was a full complement of markers in place on + * entry. The inserted_epoch_marker flag is used to track this. + */ + + if (((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out) || + (cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out_with_threshold)) && + (cache_ptr->epoch_markers_active < cache_ptr->resize_ctl.epochs_before_eviction)) { + + if (H5C__autoadjust__ageout__insert_new_marker(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't insert new epoch marker") + + inserted_epoch_marker = TRUE; + } + + /* don't run the cache size decrease code unless the cache size + * increase code is disabled, or the size increase code sees no need + * for action. In either case, status == in_spec at this point. + */ + + if (status == in_spec) { + switch (cache_ptr->resize_ctl.decr_mode) { + case H5C_decr__off: + break; + + case H5C_decr__threshold: + if (hit_rate > cache_ptr->resize_ctl.upper_hr_threshold) { + if (!cache_ptr->size_decrease_possible) + status = decrease_disabled; + else if (cache_ptr->max_cache_size <= cache_ptr->resize_ctl.min_size) { + HDassert(cache_ptr->max_cache_size == cache_ptr->resize_ctl.min_size); + status = at_min_size; + } + else { + new_max_cache_size = + (size_t)(((double)(cache_ptr->max_cache_size)) * cache_ptr->resize_ctl.decrement); + + /* clip to min size if necessary */ + if (new_max_cache_size < cache_ptr->resize_ctl.min_size) + new_max_cache_size = cache_ptr->resize_ctl.min_size; + + /* clip to max decrement if necessary */ + if (cache_ptr->resize_ctl.apply_max_decrement && + ((cache_ptr->resize_ctl.max_decrement + new_max_cache_size) < + cache_ptr->max_cache_size)) + new_max_cache_size = + cache_ptr->max_cache_size - cache_ptr->resize_ctl.max_decrement; + + status = decrease; + } + } + break; + + case H5C_decr__age_out_with_threshold: + case H5C_decr__age_out: + if (!inserted_epoch_marker) { + if (!cache_ptr->size_decrease_possible) + status = decrease_disabled; + else { + if (H5C__autoadjust__ageout(f, hit_rate, &status, &new_max_cache_size, + write_permitted) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ageout code failed") + } /* end else */ + } /* end if */ + break; + + default: + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode") + } + } + + /* cycle the epoch markers here if appropriate */ + if (((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out) || + (cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out_with_threshold)) && + !inserted_epoch_marker) + /* move last epoch marker to the head of the LRU list */ + if (H5C__autoadjust__ageout__cycle_epoch_marker(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error cycling epoch marker") + + if ((status == increase) || (status == decrease)) { + old_max_cache_size = cache_ptr->max_cache_size; + old_min_clean_size = cache_ptr->min_clean_size; + + new_min_clean_size = + (size_t)((double)new_max_cache_size * (cache_ptr->resize_ctl.min_clean_fraction)); + + /* new_min_clean_size is of size_t, and thus must be non-negative. + * Hence we have + * + * ( 0 <= new_min_clean_size ). + * + * by definition. + */ + HDassert(new_min_clean_size <= new_max_cache_size); + HDassert(cache_ptr->resize_ctl.min_size <= new_max_cache_size); + HDassert(new_max_cache_size <= cache_ptr->resize_ctl.max_size); + + cache_ptr->max_cache_size = new_max_cache_size; + cache_ptr->min_clean_size = new_min_clean_size; + + if (status == increase) + cache_ptr->cache_full = FALSE; + else if (status == decrease) + cache_ptr->size_decreased = TRUE; + + /* update flash cache size increase fields as appropriate */ + if (cache_ptr->flash_size_increase_possible) { + switch (cache_ptr->resize_ctl.flash_incr_mode) { + case H5C_flash_incr__off: + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, + "flash_size_increase_possible but H5C_flash_incr__off?!") + break; + + case H5C_flash_incr__add_space: + cache_ptr->flash_size_increase_threshold = + (size_t)(((double)(cache_ptr->max_cache_size)) * + (cache_ptr->resize_ctl.flash_threshold)); + break; + + default: /* should be unreachable */ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?") + break; + } + } + } + + if (cache_ptr->resize_ctl.rpt_fcn != NULL) + (cache_ptr->resize_ctl.rpt_fcn)(cache_ptr, H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, hit_rate, status, + old_max_cache_size, new_max_cache_size, old_min_clean_size, + new_min_clean_size); + + if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0) + /* this should be impossible... */ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed") + +done: + /* Sanity checks */ + HDassert(cache_ptr->resize_in_progress); + if (!reentrant_call) + cache_ptr->resize_in_progress = FALSE; + HDassert((!reentrant_call) || (cache_ptr->resize_in_progress)); + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__auto_adjust_cache_size() */ + +/*------------------------------------------------------------------------- + * Function: H5C__autoadjust__ageout + * + * Purpose: Implement the ageout automatic cache size decrement + * algorithm. Note that while this code evicts aged out + * entries, the code does not change the maximum cache size. + * Instead, the function simply computes the new value (if + * any change is indicated) and reports this value in + * *new_max_cache_size_ptr. + * + * Return: Non-negative on success/Negative on failure or if there was + * an attempt to flush a protected item. + * + * + * Programmer: John Mainzer, 11/18/04 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__autoadjust__ageout(H5F_t *f, double hit_rate, enum H5C_resize_status *status_ptr, + size_t *new_max_cache_size_ptr, hbool_t write_permitted) +{ + H5C_t *cache_ptr = f->shared->cache; + size_t test_size; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + HDassert(f); + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + HDassert((status_ptr) && (*status_ptr == in_spec)); + HDassert((new_max_cache_size_ptr) && (*new_max_cache_size_ptr == 0)); + + /* remove excess epoch markers if any */ + if (cache_ptr->epoch_markers_active > cache_ptr->resize_ctl.epochs_before_eviction) + if (H5C__autoadjust__ageout__remove_excess_markers(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't remove excess epoch markers") + + if ((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out) || + ((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out_with_threshold) && + (hit_rate >= cache_ptr->resize_ctl.upper_hr_threshold))) { + + if (cache_ptr->max_cache_size > cache_ptr->resize_ctl.min_size) { + /* evict aged out cache entries if appropriate... */ + if (H5C__autoadjust__ageout__evict_aged_out_entries(f, write_permitted) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error flushing aged out entries") + + /* ... and then reduce cache size if appropriate */ + if (cache_ptr->index_size < cache_ptr->max_cache_size) { + if (cache_ptr->resize_ctl.apply_empty_reserve) { + test_size = + (size_t)(((double)cache_ptr->index_size) / (1 - cache_ptr->resize_ctl.empty_reserve)); + if (test_size < cache_ptr->max_cache_size) { + *status_ptr = decrease; + *new_max_cache_size_ptr = test_size; + } + } + else { + *status_ptr = decrease; + *new_max_cache_size_ptr = cache_ptr->index_size; + } + + if (*status_ptr == decrease) { + /* clip to min size if necessary */ + if (*new_max_cache_size_ptr < cache_ptr->resize_ctl.min_size) + *new_max_cache_size_ptr = cache_ptr->resize_ctl.min_size; + + /* clip to max decrement if necessary */ + if ((cache_ptr->resize_ctl.apply_max_decrement) && + ((cache_ptr->resize_ctl.max_decrement + *new_max_cache_size_ptr) < + cache_ptr->max_cache_size)) + *new_max_cache_size_ptr = + cache_ptr->max_cache_size - cache_ptr->resize_ctl.max_decrement; + } + } + } + else + *status_ptr = at_min_size; + } + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__autoadjust__ageout() */ + +/*------------------------------------------------------------------------- + * Function: H5C__autoadjust__ageout__cycle_epoch_marker + * + * Purpose: Remove the oldest epoch marker from the LRU list, + * and reinsert it at the head of the LRU list. Also + * remove the epoch marker's index from the head of the + * ring buffer, and re-insert it at the tail of the ring + * buffer. + * + * Return: SUCCEED on success/FAIL on failure. + * + * Programmer: John Mainzer, 11/22/04 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t *cache_ptr) +{ + int i; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + + if (cache_ptr->epoch_markers_active <= 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "No active epoch markers on entry?!?!?") + + /* remove the last marker from both the ring buffer and the LRU list */ + i = cache_ptr->epoch_marker_ringbuf[cache_ptr->epoch_marker_ringbuf_first]; + cache_ptr->epoch_marker_ringbuf_first = + (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1); + if (cache_ptr->epoch_marker_ringbuf_size <= 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow") + + cache_ptr->epoch_marker_ringbuf_size -= 1; + if (cache_ptr->epoch_marker_active[i] != TRUE) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?") + + H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr, (cache_ptr)->LRU_tail_ptr, + (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size, (FAIL)) + + /* now, re-insert it at the head of the LRU list, and at the tail of + * the ring buffer. + */ + HDassert(cache_ptr->epoch_markers[i].addr == (haddr_t)i); + HDassert(cache_ptr->epoch_markers[i].next == NULL); + HDassert(cache_ptr->epoch_markers[i].prev == NULL); + + cache_ptr->epoch_marker_ringbuf_last = + (cache_ptr->epoch_marker_ringbuf_last + 1) % (H5C__MAX_EPOCH_MARKERS + 1); + cache_ptr->epoch_marker_ringbuf[cache_ptr->epoch_marker_ringbuf_last] = i; + if (cache_ptr->epoch_marker_ringbuf_size >= H5C__MAX_EPOCH_MARKERS) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow") + + cache_ptr->epoch_marker_ringbuf_size += 1; + + H5C__DLL_PREPEND(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr, + cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL) +done: + + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5C__autoadjust__ageout__cycle_epoch_marker() */ + +/*------------------------------------------------------------------------- + * Function: H5C__autoadjust__ageout__evict_aged_out_entries + * + * Purpose: Evict clean entries in the cache that haven't + * been accessed for at least + * cache_ptr->resize_ctl.epochs_before_eviction epochs, + * and flush dirty entries that haven't been accessed for + * that amount of time. + * + * Depending on configuration, the function will either + * flush or evict all such entries, or all such entries it + * encounters until it has freed the maximum amount of space + * allowed under the maximum decrement. + * + * If we are running in parallel mode, writes may not be + * permitted. If so, the function simply skips any dirty + * entries it may encounter. + * + * The function makes no attempt to maintain the minimum + * clean size, as there is no guarantee that the cache size + * will be changed. + * + * If there is no cache size change, the minimum clean size + * constraint will be met through a combination of clean + * entries and free space in the cache. + * + * If there is a cache size reduction, the minimum clean size + * will be re-calculated, and will be enforced the next time + * we have to make space in the cache. + * + * Return: Non-negative on success/Negative on failure. + * + * Programmer: John Mainzer, 11/22/04 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t *f, hbool_t write_permitted) +{ + H5C_t *cache_ptr = f->shared->cache; + size_t eviction_size_limit; + size_t bytes_evicted = 0; + hbool_t prev_is_dirty = FALSE; + hbool_t restart_scan; + H5C_cache_entry_t *entry_ptr; + H5C_cache_entry_t *next_ptr; + H5C_cache_entry_t *prev_ptr; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + HDassert(f); + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + + /* if there is a limit on the amount that the cache size can be decrease + * in any one round of the cache size reduction algorithm, load that + * limit into eviction_size_limit. Otherwise, set eviction_size_limit + * to the equivalent of infinity. The current size of the index will + * do nicely. + */ + if (cache_ptr->resize_ctl.apply_max_decrement) + eviction_size_limit = cache_ptr->resize_ctl.max_decrement; + else + eviction_size_limit = cache_ptr->index_size; /* i.e. infinity */ + + if (write_permitted) { + restart_scan = FALSE; + entry_ptr = cache_ptr->LRU_tail_ptr; + while (entry_ptr != NULL && entry_ptr->type->id != H5AC_EPOCH_MARKER_ID && + bytes_evicted < eviction_size_limit) { + hbool_t skipping_entry = FALSE; + + HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(!(entry_ptr->is_protected)); + HDassert(!(entry_ptr->is_read_only)); + HDassert((entry_ptr->ro_ref_count) == 0); + + next_ptr = entry_ptr->next; + prev_ptr = entry_ptr->prev; + + if (prev_ptr != NULL) + prev_is_dirty = prev_ptr->is_dirty; + + if (entry_ptr->is_dirty) { + HDassert(!entry_ptr->prefetched_dirty); + + /* dirty corked entry is skipped */ + if (entry_ptr->tag_info && entry_ptr->tag_info->corked) + skipping_entry = TRUE; + else { + /* reset entries_removed_counter and + * last_entry_removed_ptr prior to the call to + * H5C__flush_single_entry() so that we can spot + * unexpected removals of entries from the cache, + * and set the restart_scan flag if proceeding + * would be likely to cause us to scan an entry + * that is no longer in the cache. + */ + cache_ptr->entries_removed_counter = 0; + cache_ptr->last_entry_removed_ptr = NULL; + + if (H5C__flush_single_entry(f, entry_ptr, H5C__NO_FLAGS_SET) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") + + if (cache_ptr->entries_removed_counter > 1 || + cache_ptr->last_entry_removed_ptr == prev_ptr) + restart_scan = TRUE; + } /* end else */ + } /* end if */ + else if (!entry_ptr->prefetched_dirty) { + bytes_evicted += entry_ptr->size; + + if (H5C__flush_single_entry( + f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") + } /* end else-if */ + else { + HDassert(!entry_ptr->is_dirty); + HDassert(entry_ptr->prefetched_dirty); + + skipping_entry = TRUE; + } /* end else */ + + if (prev_ptr != NULL) { + if (skipping_entry) + entry_ptr = prev_ptr; + else if (restart_scan || (prev_ptr->is_dirty != prev_is_dirty) || + (prev_ptr->next != next_ptr) || (prev_ptr->is_protected) || (prev_ptr->is_pinned)) { + /* Something has happened to the LRU -- start over + * from the tail. + */ + restart_scan = FALSE; + entry_ptr = cache_ptr->LRU_tail_ptr; + + H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr) + } /* end else-if */ + else + entry_ptr = prev_ptr; + } /* end if */ + else + entry_ptr = NULL; + } /* end while */ + + /* for now at least, don't bother to maintain the minimum clean size, + * as the cache should now be less than its maximum size. Due to + * the vaguries of the cache size reduction algorithm, we may not + * reduce the size of the cache. + * + * If we do, we will calculate a new minimum clean size, which will + * be enforced the next time we try to make space in the cache. + * + * If we don't, no action is necessary, as we have just evicted and/or + * or flushed a bunch of entries and therefore the sum of the clean + * and free space in the cache must be greater than or equal to the + * min clean space requirement (assuming that requirement was met on + * entry). + */ + } /* end if */ + else /* ! write_permitted */ { + /* Since we are not allowed to write, all we can do is evict + * any clean entries that we may encounter before we either + * hit the eviction size limit, or encounter the epoch marker. + * + * If we are operating read only, this isn't an issue, as there + * will not be any dirty entries. + * + * If we are operating in R/W mode, all the dirty entries we + * skip will be flushed the next time we attempt to make space + * when writes are permitted. This may have some local + * performance implications, but it shouldn't cause any net + * slowdown. + */ + HDassert(H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS); + entry_ptr = cache_ptr->LRU_tail_ptr; + while (entry_ptr != NULL && ((entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID) && + (bytes_evicted < eviction_size_limit)) { + HDassert(!(entry_ptr->is_protected)); + + prev_ptr = entry_ptr->prev; + + if (!(entry_ptr->is_dirty) && !(entry_ptr->prefetched_dirty)) + if (H5C__flush_single_entry( + f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush clean entry") + + /* just skip the entry if it is dirty, as we can't do + * anything with it now since we can't write. + * + * Since all entries are clean, serialize() will not be called, + * and thus we needn't test to see if the LRU has been changed + * out from under us. + */ + entry_ptr = prev_ptr; + } /* end while */ + } /* end else */ + + if (cache_ptr->index_size < cache_ptr->max_cache_size) + cache_ptr->cache_full = FALSE; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__autoadjust__ageout__evict_aged_out_entries() */ + +/*------------------------------------------------------------------------- + * Function: H5C__autoadjust__ageout__insert_new_marker + * + * Purpose: Find an unused marker cache entry, mark it as used, and + * insert it at the head of the LRU list. Also add the + * marker's index in the epoch_markers array. + * + * Return: SUCCEED on success/FAIL on failure. + * + * Programmer: John Mainzer, 11/19/04 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__autoadjust__ageout__insert_new_marker(H5C_t *cache_ptr) +{ + int i; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + + if (cache_ptr->epoch_markers_active >= cache_ptr->resize_ctl.epochs_before_eviction) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Already have a full complement of markers") + + /* find an unused marker */ + i = 0; + while ((cache_ptr->epoch_marker_active)[i] && i < H5C__MAX_EPOCH_MARKERS) + i++; + if (i >= H5C__MAX_EPOCH_MARKERS) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't find unused marker") + + HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i); + HDassert(((cache_ptr->epoch_markers)[i]).next == NULL); + HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL); + + (cache_ptr->epoch_marker_active)[i] = TRUE; + + cache_ptr->epoch_marker_ringbuf_last = + (cache_ptr->epoch_marker_ringbuf_last + 1) % (H5C__MAX_EPOCH_MARKERS + 1); + (cache_ptr->epoch_marker_ringbuf)[cache_ptr->epoch_marker_ringbuf_last] = i; + if (cache_ptr->epoch_marker_ringbuf_size >= H5C__MAX_EPOCH_MARKERS) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow") + + cache_ptr->epoch_marker_ringbuf_size += 1; + + H5C__DLL_PREPEND(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr, + cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL) + + cache_ptr->epoch_markers_active += 1; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__autoadjust__ageout__insert_new_marker() */ + +/*------------------------------------------------------------------------- + * Function: H5C__autoadjust__ageout__remove_all_markers + * + * Purpose: Remove all epoch markers from the LRU list and mark them + * as inactive. + * + * Return: SUCCEED on success/FAIL on failure. + * + * Programmer: John Mainzer, 11/22/04 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C__autoadjust__ageout__remove_all_markers(H5C_t *cache_ptr) +{ + int ring_buf_index; + int i; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + + while (cache_ptr->epoch_markers_active > 0) { + /* get the index of the last epoch marker in the LRU list + * and remove it from the ring buffer. + */ + + ring_buf_index = cache_ptr->epoch_marker_ringbuf_first; + i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index]; + + cache_ptr->epoch_marker_ringbuf_first = + (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1); + + if (cache_ptr->epoch_marker_ringbuf_size <= 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow") + cache_ptr->epoch_marker_ringbuf_size -= 1; + + if (cache_ptr->epoch_marker_active[i] != TRUE) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?") + + /* remove the epoch marker from the LRU list */ + H5C__DLL_REMOVE(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr, + cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL) + + /* mark the epoch marker as unused. */ + cache_ptr->epoch_marker_active[i] = FALSE; + + HDassert(cache_ptr->epoch_markers[i].addr == (haddr_t)i); + HDassert(cache_ptr->epoch_markers[i].next == NULL); + HDassert(cache_ptr->epoch_markers[i].prev == NULL); + + /* decrement the number of active epoch markers */ + cache_ptr->epoch_markers_active -= 1; + + HDassert(cache_ptr->epoch_markers_active == cache_ptr->epoch_marker_ringbuf_size); + } + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__autoadjust__ageout__remove_all_markers() */ + +/*------------------------------------------------------------------------- + * Function: H5C__autoadjust__ageout__remove_excess_markers + * + * Purpose: Remove epoch markers from the end of the LRU list and + * mark them as inactive until the number of active markers + * equals the current value of + * cache_ptr->resize_ctl.epochs_before_eviction. + * + * Return: SUCCEED on success/FAIL on failure. + * + * Programmer: John Mainzer, 11/19/04 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C__autoadjust__ageout__remove_excess_markers(H5C_t *cache_ptr) +{ + int ring_buf_index; + int i; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + + if (cache_ptr->epoch_markers_active <= cache_ptr->resize_ctl.epochs_before_eviction) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "no excess markers on entry") + + while (cache_ptr->epoch_markers_active > cache_ptr->resize_ctl.epochs_before_eviction) { + /* get the index of the last epoch marker in the LRU list + * and remove it from the ring buffer. + */ + ring_buf_index = cache_ptr->epoch_marker_ringbuf_first; + i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index]; + + cache_ptr->epoch_marker_ringbuf_first = + (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1); + + if (cache_ptr->epoch_marker_ringbuf_size <= 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow") + cache_ptr->epoch_marker_ringbuf_size -= 1; + + if (cache_ptr->epoch_marker_active[i] != TRUE) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?") + + /* remove the epoch marker from the LRU list */ + H5C__DLL_REMOVE(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr, + cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL) + + /* mark the epoch marker as unused. */ + cache_ptr->epoch_marker_active[i] = FALSE; + + HDassert(cache_ptr->epoch_markers[i].addr == (haddr_t)i); + HDassert(cache_ptr->epoch_markers[i].next == NULL); + HDassert(cache_ptr->epoch_markers[i].prev == NULL); + + /* decrement the number of active epoch markers */ + cache_ptr->epoch_markers_active -= 1; + + HDassert(cache_ptr->epoch_markers_active == cache_ptr->epoch_marker_ringbuf_size); + } + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__autoadjust__ageout__remove_excess_markers() */ + +/*------------------------------------------------------------------------- + * Function: H5C__flash_increase_cache_size + * + * Purpose: If there is not at least new_entry_size - old_entry_size + * bytes of free space in the cache and the current + * max_cache_size is less than cache_ptr->resize_ctl.max_size, + * perform a flash increase in the cache size and then reset + * the full cache hit rate statistics, and exit. + * + * Return: Non-negative on success/Negative on failure. + * + * Programmer: John Mainzer, 12/31/07 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t new_entry_size) +{ + size_t new_max_cache_size = 0; + size_t old_max_cache_size = 0; + size_t new_min_clean_size = 0; + size_t old_min_clean_size = 0; + size_t space_needed; + enum H5C_resize_status status = flash_increase; /* may change */ + double hit_rate; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + HDassert(cache_ptr->flash_size_increase_possible); + HDassert(new_entry_size > cache_ptr->flash_size_increase_threshold); + HDassert(old_entry_size < new_entry_size); + + if (old_entry_size >= new_entry_size) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "old_entry_size >= new_entry_size") + + space_needed = new_entry_size - old_entry_size; + if (((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) && + (cache_ptr->max_cache_size < cache_ptr->resize_ctl.max_size)) { + switch (cache_ptr->resize_ctl.flash_incr_mode) { + case H5C_flash_incr__off: + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, + "flash_size_increase_possible but H5C_flash_incr__off?!") + break; + + case H5C_flash_incr__add_space: + if (cache_ptr->index_size < cache_ptr->max_cache_size) { + HDassert((cache_ptr->max_cache_size - cache_ptr->index_size) < space_needed); + space_needed -= cache_ptr->max_cache_size - cache_ptr->index_size; + } + space_needed = (size_t)(((double)space_needed) * cache_ptr->resize_ctl.flash_multiple); + new_max_cache_size = cache_ptr->max_cache_size + space_needed; + break; + + default: /* should be unreachable */ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?") + break; + } + + if (new_max_cache_size > cache_ptr->resize_ctl.max_size) + new_max_cache_size = cache_ptr->resize_ctl.max_size; + HDassert(new_max_cache_size > cache_ptr->max_cache_size); + + new_min_clean_size = (size_t)((double)new_max_cache_size * cache_ptr->resize_ctl.min_clean_fraction); + HDassert(new_min_clean_size <= new_max_cache_size); + + old_max_cache_size = cache_ptr->max_cache_size; + old_min_clean_size = cache_ptr->min_clean_size; + + cache_ptr->max_cache_size = new_max_cache_size; + cache_ptr->min_clean_size = new_min_clean_size; + + /* update flash cache size increase fields as appropriate */ + HDassert(cache_ptr->flash_size_increase_possible); + + switch (cache_ptr->resize_ctl.flash_incr_mode) { + case H5C_flash_incr__off: + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, + "flash_size_increase_possible but H5C_flash_incr__off?!") + break; + + case H5C_flash_incr__add_space: + cache_ptr->flash_size_increase_threshold = + (size_t)((double)cache_ptr->max_cache_size * cache_ptr->resize_ctl.flash_threshold); + break; + + default: /* should be unreachable */ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?") + break; + } + + /* note that we don't cycle the epoch markers. We can + * argue either way as to whether we should, but for now + * we don't. + */ + + if (cache_ptr->resize_ctl.rpt_fcn != NULL) { + /* get the hit rate for the reporting function. Should still + * be good as we haven't reset the hit rate statistics. + */ + if (H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate") + + (cache_ptr->resize_ctl.rpt_fcn)(cache_ptr, H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, hit_rate, status, + old_max_cache_size, new_max_cache_size, old_min_clean_size, + new_min_clean_size); + } + + if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0) + /* this should be impossible... */ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed") + } + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__flash_increase_cache_size() */ + +/*------------------------------------------------------------------------- + * Function: H5C__flush_invalidate_cache + * + * Purpose: Flush and destroy the entries contained in the target + * cache. + * + * If the cache contains protected entries, the function will + * fail, as protected entries cannot be either flushed or + * destroyed. However all unprotected entries should be + * flushed and destroyed before the function returns failure. + * + * While pinned entries can usually be flushed, they cannot + * be destroyed. However, they should be unpinned when all + * the entries that reference them have been destroyed (thus + * reduding the pinned entry's reference count to 0, allowing + * it to be unpinned). + * + * If pinned entries are present, the function makes repeated + * passes through the cache, flushing all dirty entries + * (including the pinned dirty entries where permitted) and + * destroying all unpinned entries. This process is repeated + * until either the cache is empty, or the number of pinned + * entries stops decreasing on each pass. + * + * Return: Non-negative on success/Negative on failure or if there was + * a request to flush all items and something was protected. + * + * Programmer: John Mainzer + * 3/24/05 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C__flush_invalidate_cache(H5F_t *f, unsigned flags) +{ + H5C_t *cache_ptr; + H5C_ring_t ring; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE + + HDassert(f); + HDassert(f->shared); + cache_ptr = f->shared->cache; + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + HDassert(cache_ptr->slist_ptr); + HDassert(cache_ptr->slist_enabled); + +#ifdef H5C_DO_SANITY_CHECKS + { + int32_t i; + uint32_t index_len = 0; + uint32_t slist_len = 0; + size_t index_size = (size_t)0; + size_t clean_index_size = (size_t)0; + size_t dirty_index_size = (size_t)0; + size_t slist_size = (size_t)0; + + HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0); + HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); + HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); + HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); + HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0); + HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0); + + for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) { + index_len += cache_ptr->index_ring_len[i]; + index_size += cache_ptr->index_ring_size[i]; + clean_index_size += cache_ptr->clean_index_ring_size[i]; + dirty_index_size += cache_ptr->dirty_index_ring_size[i]; + + slist_len += cache_ptr->slist_ring_len[i]; + slist_size += cache_ptr->slist_ring_size[i]; + } /* end for */ + + HDassert(cache_ptr->index_len == index_len); + HDassert(cache_ptr->index_size == index_size); + HDassert(cache_ptr->clean_index_size == clean_index_size); + HDassert(cache_ptr->dirty_index_size == dirty_index_size); + HDassert(cache_ptr->slist_len == slist_len); + HDassert(cache_ptr->slist_size == slist_size); + } +#endif /* H5C_DO_SANITY_CHECKS */ + + /* remove ageout markers if present */ + if (cache_ptr->epoch_markers_active > 0) + if (H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers") + + /* flush invalidate each ring, starting from the outermost ring and + * working inward. + */ + ring = H5C_RING_USER; + while (ring < H5C_RING_NTYPES) { + if (H5C__flush_invalidate_ring(f, ring, flags) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate ring failed") + ring++; + } /* end while */ + +#ifndef NDEBUG + /* Invariants, after destroying all entries in the hash table */ + if (!(flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG)) { + HDassert(cache_ptr->index_size == 0); + HDassert(cache_ptr->clean_index_size == 0); + HDassert(cache_ptr->pel_len == 0); + HDassert(cache_ptr->pel_size == 0); + } /* end if */ + else { + H5C_cache_entry_t *entry_ptr; /* Cache entry */ + unsigned u; /* Local index variable */ + + /* All rings except ring 4 should be empty now */ + /* (Ring 4 has the superblock) */ + for (u = H5C_RING_USER; u < H5C_RING_SB; u++) { + HDassert(cache_ptr->index_ring_len[u] == 0); + HDassert(cache_ptr->index_ring_size[u] == 0); + HDassert(cache_ptr->clean_index_ring_size[u] == 0); + } /* end for */ + + /* Check that any remaining pinned entries are in the superblock ring */ + entry_ptr = cache_ptr->pel_head_ptr; + while (entry_ptr) { + /* Check ring */ + HDassert(entry_ptr->ring == H5C_RING_SB); + + /* Advance to next entry in pinned entry list */ + entry_ptr = entry_ptr->next; + } /* end while */ + } /* end else */ + + HDassert(cache_ptr->dirty_index_size == 0); + HDassert(cache_ptr->slist_len == 0); + HDassert(cache_ptr->slist_size == 0); + HDassert(cache_ptr->pl_len == 0); + HDassert(cache_ptr->pl_size == 0); + HDassert(cache_ptr->LRU_list_len == 0); + HDassert(cache_ptr->LRU_list_size == 0); +#endif + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__flush_invalidate_cache() */ + +/*------------------------------------------------------------------------- + * Function: H5C__flush_invalidate_ring + * + * Purpose: Flush and destroy the entries contained in the target + * cache and ring. + * + * If the ring contains protected entries, the function will + * fail, as protected entries cannot be either flushed or + * destroyed. However all unprotected entries should be + * flushed and destroyed before the function returns failure. + * + * While pinned entries can usually be flushed, they cannot + * be destroyed. However, they should be unpinned when all + * the entries that reference them have been destroyed (thus + * reduding the pinned entry's reference count to 0, allowing + * it to be unpinned). + * + * If pinned entries are present, the function makes repeated + * passes through the cache, flushing all dirty entries + * (including the pinned dirty entries where permitted) and + * destroying all unpinned entries. This process is repeated + * until either the cache is empty, or the number of pinned + * entries stops decreasing on each pass. + * + * If flush dependencies appear in the target ring, the + * function makes repeated passes through the cache flushing + * entries in flush dependency order. + * + * Return: Non-negative on success/Negative on failure or if there was + * a request to flush all items and something was protected. + * + * Programmer: John Mainzer + * 9/1/15 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) +{ + H5C_t *cache_ptr; + hbool_t restart_slist_scan; + uint32_t protected_entries = 0; + int32_t i; + uint32_t cur_ring_pel_len; + uint32_t old_ring_pel_len; + unsigned cooked_flags; + unsigned evict_flags; + H5SL_node_t *node_ptr = NULL; + H5C_cache_entry_t *entry_ptr = NULL; + H5C_cache_entry_t *next_entry_ptr = NULL; +#ifdef H5C_DO_SANITY_CHECKS + uint32_t initial_slist_len = 0; + size_t initial_slist_size = 0; +#endif /* H5C_DO_SANITY_CHECKS */ + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE + + HDassert(f); + HDassert(f->shared); + + cache_ptr = f->shared->cache; + + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + HDassert(cache_ptr->slist_enabled); + HDassert(cache_ptr->slist_ptr); + HDassert(ring > H5C_RING_UNDEFINED); + HDassert(ring < H5C_RING_NTYPES); + + HDassert(cache_ptr->epoch_markers_active == 0); + + /* Filter out the flags that are not relevant to the flush/invalidate. + */ + cooked_flags = flags & H5C__FLUSH_CLEAR_ONLY_FLAG; + evict_flags = flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG; + + /* The flush procedure here is a bit strange. + * + * In the outer while loop we make at least one pass through the + * cache, and then repeat until either all the pinned entries in + * the ring unpin themselves, or until the number of pinned entries + * in the ring stops declining. In this later case, we scream and die. + * + * Since the fractal heap can dirty, resize, and/or move entries + * in is flush callback, it is possible that the cache will still + * contain dirty entries at this point. If so, we must make more + * passes through the skip list to allow it to empty. + * + * Further, since clean entries can be dirtied, resized, and/or moved + * as the result of a flush call back (either the entries own, or that + * for some other cache entry), we can no longer promise to flush + * the cache entries in increasing address order. + * + * Instead, we make a pass through + * the skip list, and then a pass through the "clean" entries, and + * then repeating as needed. Thus it is quite possible that an + * entry will be evicted from the cache only to be re-loaded later + * in the flush process. + * + * The bottom line is that entries will probably be flushed in close + * to increasing address order, but there are no guarantees. + */ + + /* compute the number of pinned entries in this ring */ + entry_ptr = cache_ptr->pel_head_ptr; + cur_ring_pel_len = 0; + while (entry_ptr != NULL) { + HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(entry_ptr->ring >= ring); + if (entry_ptr->ring == ring) + cur_ring_pel_len++; + + entry_ptr = entry_ptr->next; + } /* end while */ + old_ring_pel_len = cur_ring_pel_len; + + while (cache_ptr->index_ring_len[ring] > 0) { + /* first, try to flush-destroy any dirty entries. Do this by + * making a scan through the slist. Note that new dirty entries + * may be created by the flush call backs. Thus it is possible + * that the slist will not be empty after we finish the scan. + */ + +#ifdef H5C_DO_SANITY_CHECKS + /* Depending on circumstances, H5C__flush_single_entry() will + * remove dirty entries from the slist as it flushes them. + * Thus for sanity checks we must make note of the initial + * slist length and size before we do any flushes. + */ + initial_slist_len = cache_ptr->slist_len; + initial_slist_size = cache_ptr->slist_size; + + /* There is also the possibility that entries will be + * dirtied, resized, moved, and/or removed from the cache + * as the result of calls to the flush callbacks. We use + * the slist_len_increase and slist_size_increase increase + * fields in struct H5C_t to track these changes for purpose + * of sanity checking. + * + * To this end, we must zero these fields before we start + * the pass through the slist. + */ + cache_ptr->slist_len_increase = 0; + cache_ptr->slist_size_increase = 0; +#endif /* H5C_DO_SANITY_CHECKS */ + + /* Set the cache_ptr->slist_changed to false. + * + * This flag is set to TRUE by H5C__flush_single_entry if the slist + * is modified by a pre_serialize, serialize, or notify callback. + * + * H5C__flush_invalidate_ring() uses this flag to detect any + * modifications to the slist that might corrupt the scan of + * the slist -- and restart the scan in this event. + */ + cache_ptr->slist_changed = FALSE; + + /* this done, start the scan of the slist */ + restart_slist_scan = TRUE; + while (restart_slist_scan || (node_ptr != NULL)) { + if (restart_slist_scan) { + restart_slist_scan = FALSE; + + /* Start at beginning of skip list */ + node_ptr = H5SL_first(cache_ptr->slist_ptr); + if (node_ptr == NULL) + /* the slist is empty -- break out of inner loop */ + break; + + /* Get cache entry for this node */ + next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); + if (NULL == next_entry_ptr) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!") + + HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(next_entry_ptr->is_dirty); + HDassert(next_entry_ptr->in_slist); + HDassert(next_entry_ptr->ring >= ring); + } /* end if */ + + entry_ptr = next_entry_ptr; + + /* It is possible that entries will be dirtied, resized, + * flushed, or removed from the cache via the take ownership + * flag as the result of pre_serialize or serialized callbacks. + * + * This in turn can corrupt the scan through the slist. + * + * We test for slist modifications in the pre_serialize + * and serialize callbacks, and restart the scan of the + * slist if we find them. However, best we do some extra + * sanity checking just in case. + */ + HDassert(entry_ptr != NULL); + HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(entry_ptr->in_slist); + HDassert(entry_ptr->is_dirty); + HDassert(entry_ptr->ring >= ring); + + /* increment node pointer now, before we delete its target + * from the slist. + */ + node_ptr = H5SL_next(node_ptr); + if (node_ptr != NULL) { + next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); + if (NULL == next_entry_ptr) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!") + + HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(next_entry_ptr->is_dirty); + HDassert(next_entry_ptr->in_slist); + HDassert(next_entry_ptr->ring >= ring); + HDassert(entry_ptr != next_entry_ptr); + } /* end if */ + else + next_entry_ptr = NULL; + + /* Note that we now remove nodes from the slist as we flush + * the associated entries, instead of leaving them there + * until we are done, and then destroying all nodes in + * the slist. + * + * While this optimization used to be easy, with the possibility + * of new entries being added to the slist in the midst of the + * flush, we must keep the slist in canonical form at all + * times. + */ + if (((!entry_ptr->flush_me_last) || + ((entry_ptr->flush_me_last) && (cache_ptr->num_last_entries >= cache_ptr->slist_len))) && + (entry_ptr->flush_dep_nchildren == 0) && (entry_ptr->ring == ring)) { + if (entry_ptr->is_protected) { + /* We have major problems -- but lets flush + * everything we can before we flag an error. + */ + protected_entries++; + } /* end if */ + else if (entry_ptr->is_pinned) { + if (H5C__flush_single_entry(f, entry_ptr, H5C__DURING_FLUSH_FLAG) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed") + + if (cache_ptr->slist_changed) { + /* The slist has been modified by something + * other than the simple removal of the + * of the flushed entry after the flush. + * + * This has the potential to corrupt the + * scan through the slist, so restart it. + */ + restart_slist_scan = TRUE; + cache_ptr->slist_changed = FALSE; + H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr); + } /* end if */ + } /* end else-if */ + else { + if (H5C__flush_single_entry(f, entry_ptr, + (cooked_flags | H5C__DURING_FLUSH_FLAG | + H5C__FLUSH_INVALIDATE_FLAG | + H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed") + + if (cache_ptr->slist_changed) { + /* The slist has been modified by something + * other than the simple removal of the + * of the flushed entry after the flush. + * + * This has the potential to corrupt the + * scan through the slist, so restart it. + */ + restart_slist_scan = TRUE; + cache_ptr->slist_changed = FALSE; + H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr) + } /* end if */ + } /* end else */ + } /* end if */ + } /* end while loop scanning skip list */ + +#ifdef H5C_DO_SANITY_CHECKS + /* It is possible that entries were added to the slist during + * the scan, either before or after scan pointer. The following + * asserts take this into account. + * + * Don't bother with the sanity checks if node_ptr != NULL, as + * in this case we broke out of the loop because it got changed + * out from under us. + */ + + if (node_ptr == NULL) { + HDassert(cache_ptr->slist_len == + (uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase)); + HDassert(cache_ptr->slist_size == + (size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase)); + } /* end if */ +#endif /* H5C_DO_SANITY_CHECKS */ + + /* Since we are doing a destroy, we must make a pass through + * the hash table and try to flush - destroy all entries that + * remain. + * + * It used to be that all entries remaining in the cache at + * this point had to be clean, but with the fractal heap mods + * this may not be the case. If so, we will flush entries out + * in increasing address order. + * + * Writes to disk are possible here. + */ + + /* Reset the counters so that we can detect insertions, loads, + * and moves caused by the pre_serialize and serialize calls. + */ + cache_ptr->entries_loaded_counter = 0; + cache_ptr->entries_inserted_counter = 0; + cache_ptr->entries_relocated_counter = 0; + + next_entry_ptr = cache_ptr->il_head; + while (next_entry_ptr != NULL) { + entry_ptr = next_entry_ptr; + HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(entry_ptr->ring >= ring); + + next_entry_ptr = entry_ptr->il_next; + HDassert((next_entry_ptr == NULL) || (next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC)); + + if (((!entry_ptr->flush_me_last) || + (entry_ptr->flush_me_last && (cache_ptr->num_last_entries >= cache_ptr->slist_len))) && + (entry_ptr->flush_dep_nchildren == 0) && (entry_ptr->ring == ring)) { + + if (entry_ptr->is_protected) { + /* we have major problems -- but lets flush and + * destroy everything we can before we flag an + * error. + */ + protected_entries++; + + if (!entry_ptr->in_slist) + HDassert(!(entry_ptr->is_dirty)); + } /* end if */ + else if (!entry_ptr->is_pinned) { + /* if *entry_ptr is dirty, it is possible + * that one or more other entries may be + * either removed from the cache, loaded + * into the cache, or moved to a new location + * in the file as a side effect of the flush. + * + * It's also possible that removing a clean + * entry will remove the last child of a proxy + * entry, allowing it to be removed also and + * invalidating the next_entry_ptr. + * + * If either of these happen, and one of the target + * or proxy entries happens to be the next entry in + * the hash bucket, we could either find ourselves + * either scanning a non-existent entry, scanning + * through a different bucket, or skipping an entry. + * + * Neither of these are good, so restart the + * the scan at the head of the hash bucket + * after the flush if we detect that the next_entry_ptr + * becomes invalid. + * + * This is not as inefficient at it might seem, + * as hash buckets typically have at most two + * or three entries. + */ + cache_ptr->entry_watched_for_removal = next_entry_ptr; + if (H5C__flush_single_entry(f, entry_ptr, + (cooked_flags | H5C__DURING_FLUSH_FLAG | + H5C__FLUSH_INVALIDATE_FLAG | + H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed") + + /* Restart the index list scan if necessary. Must + * do this if the next entry is evicted, and also if + * one or more entries are inserted, loaded, or moved + * as these operations can result in part of the scan + * being skipped -- which can cause a spurious failure + * if this results in the size of the pinned entry + * failing to decline during the pass. + */ + if (((NULL != next_entry_ptr) && (NULL == cache_ptr->entry_watched_for_removal)) || + (cache_ptr->entries_loaded_counter > 0) || + (cache_ptr->entries_inserted_counter > 0) || + (cache_ptr->entries_relocated_counter > 0)) { + + next_entry_ptr = cache_ptr->il_head; + + cache_ptr->entries_loaded_counter = 0; + cache_ptr->entries_inserted_counter = 0; + cache_ptr->entries_relocated_counter = 0; + + H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr) + } /* end if */ + else + cache_ptr->entry_watched_for_removal = NULL; + } /* end if */ + } /* end if */ + } /* end for loop scanning hash table */ + + /* We can't do anything if entries are pinned. The + * hope is that the entries will be unpinned as the + * result of destroys of entries that reference them. + * + * We detect this by noting the change in the number + * of pinned entries from pass to pass. If it stops + * shrinking before it hits zero, we scream and die. + */ + old_ring_pel_len = cur_ring_pel_len; + entry_ptr = cache_ptr->pel_head_ptr; + cur_ring_pel_len = 0; + + while (entry_ptr != NULL) { + HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(entry_ptr->ring >= ring); + + if (entry_ptr->ring == ring) + cur_ring_pel_len++; + + entry_ptr = entry_ptr->next; + } /* end while */ + + /* Check if the number of pinned entries in the ring is positive, and + * it is not declining. Scream and die if so. + */ + if ((cur_ring_pel_len > 0) && (cur_ring_pel_len >= old_ring_pel_len)) { + /* Don't error if allowed to have pinned entries remaining */ + if (evict_flags) + HGOTO_DONE(TRUE) + + HGOTO_ERROR( + H5E_CACHE, H5E_CANTFLUSH, FAIL, + "Pinned entry count not decreasing, cur_ring_pel_len = %d, old_ring_pel_len = %d, ring = %d", + (int)cur_ring_pel_len, (int)old_ring_pel_len, (int)ring) + } /* end if */ + + HDassert(protected_entries == cache_ptr->pl_len); + + if ((protected_entries > 0) && (protected_entries == cache_ptr->index_len)) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, + "Only protected entries left in cache, protected_entries = %d", + (int)protected_entries) + } /* main while loop */ + + /* Invariants, after destroying all entries in the ring */ + for (i = (int)H5C_RING_UNDEFINED; i <= (int)ring; i++) { + HDassert(cache_ptr->index_ring_len[i] == 0); + HDassert(cache_ptr->index_ring_size[i] == (size_t)0); + HDassert(cache_ptr->clean_index_ring_size[i] == (size_t)0); + HDassert(cache_ptr->dirty_index_ring_size[i] == (size_t)0); + + HDassert(cache_ptr->slist_ring_len[i] == 0); + HDassert(cache_ptr->slist_ring_size[i] == (size_t)0); + } /* end for */ + + HDassert(protected_entries <= cache_ptr->pl_len); + + if (protected_entries > 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Cache has protected entries") + else if (cur_ring_pel_len > 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't unpin all pinned entries in ring") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__flush_invalidate_ring() */ + +/*------------------------------------------------------------------------- + * Function: H5C__flush_ring + * + * Purpose: Flush the entries contained in the specified cache and + * ring. All entries in rings outside the specified ring + * must have been flushed on entry. + * + * If the cache contains protected entries in the specified + * ring, the function will fail, as protected entries cannot + * be flushed. However all unprotected entries in the target + * ring should be flushed before the function returns failure. + * + * If flush dependencies appear in the target ring, the + * function makes repeated passes through the slist flushing + * entries in flush dependency order. + * + * Return: Non-negative on success/Negative on failure or if there was + * a request to flush all items and something was protected. + * + * Programmer: John Mainzer + * 9/1/15 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) +{ + H5C_t *cache_ptr = f->shared->cache; + hbool_t flushed_entries_last_pass; + hbool_t flush_marked_entries; + hbool_t ignore_protected; + hbool_t tried_to_flush_protected_entry = FALSE; + hbool_t restart_slist_scan; + uint32_t protected_entries = 0; + H5SL_node_t *node_ptr = NULL; + H5C_cache_entry_t *entry_ptr = NULL; + H5C_cache_entry_t *next_entry_ptr = NULL; +#ifdef H5C_DO_SANITY_CHECKS + uint32_t initial_slist_len = 0; + size_t initial_slist_size = 0; +#endif /* H5C_DO_SANITY_CHECKS */ + int i; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE + + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + HDassert(cache_ptr->slist_enabled); + HDassert(cache_ptr->slist_ptr); + HDassert((flags & H5C__FLUSH_INVALIDATE_FLAG) == 0); + HDassert(ring > H5C_RING_UNDEFINED); + HDassert(ring < H5C_RING_NTYPES); + +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + + ignore_protected = ((flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0); + flush_marked_entries = ((flags & H5C__FLUSH_MARKED_ENTRIES_FLAG) != 0); + + if (!flush_marked_entries) + for (i = (int)H5C_RING_UNDEFINED; i < (int)ring; i++) + HDassert(cache_ptr->slist_ring_len[i] == 0); + + HDassert(cache_ptr->flush_in_progress); + + /* When we are only flushing marked entries, the slist will usually + * still contain entries when we have flushed everything we should. + * Thus we track whether we have flushed any entries in the last + * pass, and terminate if we haven't. + */ + flushed_entries_last_pass = TRUE; + + /* Set the cache_ptr->slist_changed to false. + * + * This flag is set to TRUE by H5C__flush_single_entry if the + * slist is modified by a pre_serialize, serialize, or notify callback. + * H5C_flush_cache uses this flag to detect any modifications + * to the slist that might corrupt the scan of the slist -- and + * restart the scan in this event. + */ + cache_ptr->slist_changed = FALSE; + + while ((cache_ptr->slist_ring_len[ring] > 0) && (protected_entries == 0) && (flushed_entries_last_pass)) { + flushed_entries_last_pass = FALSE; + +#ifdef H5C_DO_SANITY_CHECKS + /* For sanity checking, try to verify that the skip list has + * the expected size and number of entries at the end of each + * internal while loop (see below). + * + * Doing this get a bit tricky, as depending on flags, we may + * or may not flush all the entries in the slist. + * + * To make things more entertaining, with the advent of the + * fractal heap, the entry serialize callback can cause entries + * to be dirtied, resized, and/or moved. Also, the + * pre_serialize callback can result in an entry being + * removed from the cache via the take ownership flag. + * + * To deal with this, we first make note of the initial + * skip list length and size: + */ + initial_slist_len = cache_ptr->slist_len; + initial_slist_size = cache_ptr->slist_size; + + /* As mentioned above, there is the possibility that + * entries will be dirtied, resized, flushed, or removed + * from the cache via the take ownership flag during + * our pass through the skip list. To capture the number + * of entries added, and the skip list size delta, + * zero the slist_len_increase and slist_size_increase of + * the cache's instance of H5C_t. These fields will be + * updated elsewhere to account for slist insertions and/or + * dirty entry size changes. + */ + cache_ptr->slist_len_increase = 0; + cache_ptr->slist_size_increase = 0; + + /* at the end of the loop, use these values to compute the + * expected slist length and size and compare this with the + * value recorded in the cache's instance of H5C_t. + */ +#endif /* H5C_DO_SANITY_CHECKS */ + + restart_slist_scan = TRUE; + while ((restart_slist_scan) || (node_ptr != NULL)) { + if (restart_slist_scan) { + restart_slist_scan = FALSE; + + /* Start at beginning of skip list */ + node_ptr = H5SL_first(cache_ptr->slist_ptr); + if (node_ptr == NULL) + /* the slist is empty -- break out of inner loop */ + break; + + /* Get cache entry for this node */ + next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); + if (NULL == next_entry_ptr) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!") + + HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(next_entry_ptr->is_dirty); + HDassert(next_entry_ptr->in_slist); + } /* end if */ + + entry_ptr = next_entry_ptr; + + /* With the advent of the fractal heap, the free space + * manager, and the version 3 cache, it is possible + * that the pre-serialize or serialize callback will + * dirty, resize, or take ownership of other entries + * in the cache. + * + * To deal with this, there is code to detect any + * change in the skip list not directly under the control + * of this function. If such modifications are detected, + * we must re-start the scan of the skip list to avoid + * the possibility that the target of the next_entry_ptr + * may have been flushed or deleted from the cache. + * + * To verify that all such possibilities have been dealt + * with, we do a bit of extra sanity checking on + * entry_ptr. + */ + HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(entry_ptr->in_slist); + HDassert(entry_ptr->is_dirty); + + if (!flush_marked_entries || entry_ptr->flush_marker) + HDassert(entry_ptr->ring >= ring); + + /* Advance node pointer now, before we delete its target + * from the slist. + */ + node_ptr = H5SL_next(node_ptr); + if (node_ptr != NULL) { + next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); + if (NULL == next_entry_ptr) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!") + + HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(next_entry_ptr->is_dirty); + HDassert(next_entry_ptr->in_slist); + + if (!flush_marked_entries || next_entry_ptr->flush_marker) + HDassert(next_entry_ptr->ring >= ring); + + HDassert(entry_ptr != next_entry_ptr); + } /* end if */ + else + next_entry_ptr = NULL; + + if ((!flush_marked_entries || entry_ptr->flush_marker) && + ((!entry_ptr->flush_me_last) || + ((entry_ptr->flush_me_last) && ((cache_ptr->num_last_entries >= cache_ptr->slist_len) || + (flush_marked_entries && entry_ptr->flush_marker)))) && + ((entry_ptr->flush_dep_nchildren == 0) || (entry_ptr->flush_dep_ndirty_children == 0)) && + (entry_ptr->ring == ring)) { + + HDassert(entry_ptr->flush_dep_nunser_children == 0); + + if (entry_ptr->is_protected) { + /* we probably have major problems -- but lets + * flush everything we can before we decide + * whether to flag an error. + */ + tried_to_flush_protected_entry = TRUE; + protected_entries++; + } /* end if */ + else { + if (H5C__flush_single_entry(f, entry_ptr, (flags | H5C__DURING_FLUSH_FLAG)) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry") + + if (cache_ptr->slist_changed) { + /* The slist has been modified by something + * other than the simple removal of the + * of the flushed entry after the flush. + * + * This has the potential to corrupt the + * scan through the slist, so restart it. + */ + restart_slist_scan = TRUE; + cache_ptr->slist_changed = FALSE; + H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr) + } /* end if */ + + flushed_entries_last_pass = TRUE; + } /* end else */ + } /* end if */ + } /* while ( ( restart_slist_scan ) || ( node_ptr != NULL ) ) */ + +#ifdef H5C_DO_SANITY_CHECKS + /* Verify that the slist size and length are as expected. */ + HDassert((uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase) == + cache_ptr->slist_len); + HDassert((size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase) == + cache_ptr->slist_size); +#endif /* H5C_DO_SANITY_CHECKS */ + } /* while */ + + HDassert(protected_entries <= cache_ptr->pl_len); + + if (((cache_ptr->pl_len > 0) && !ignore_protected) || tried_to_flush_protected_entry) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "cache has protected items") + +#ifdef H5C_DO_SANITY_CHECKS + if (!flush_marked_entries) { + HDassert(cache_ptr->slist_ring_len[ring] == 0); + HDassert(cache_ptr->slist_ring_size[ring] == 0); + } /* end if */ +#endif /* H5C_DO_SANITY_CHECKS */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__flush_ring() */ + +/*------------------------------------------------------------------------- + * Function: H5C__make_space_in_cache + * + * Purpose: Attempt to evict cache entries until the index_size + * is at least needed_space below max_cache_size. + * + * In passing, also attempt to bring cLRU_list_size to a + * value greater than min_clean_size. + * + * Depending on circumstances, both of these goals may + * be impossible, as in parallel mode, we must avoid generating + * a write as part of a read (to avoid deadlock in collective + * I/O), and in all cases, it is possible (though hopefully + * highly unlikely) that the protected list may exceed the + * maximum size of the cache. + * + * Thus the function simply does its best, returning success + * unless an error is encountered. + * + * Observe that this function cannot occasion a read. + * + * Return: Non-negative on success/Negative on failure. + * + * Programmer: John Mainzer, 5/14/04 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted) +{ + H5C_t *cache_ptr = f->shared->cache; +#if H5C_COLLECT_CACHE_STATS + int32_t clean_entries_skipped = 0; + int32_t dirty_pf_entries_skipped = 0; + int32_t total_entries_scanned = 0; +#endif /* H5C_COLLECT_CACHE_STATS */ + uint32_t entries_examined = 0; + uint32_t initial_list_len; + size_t empty_space; + hbool_t reentrant_call = FALSE; + hbool_t prev_is_dirty = FALSE; + hbool_t didnt_flush_entry = FALSE; + hbool_t restart_scan; + H5C_cache_entry_t *entry_ptr; + H5C_cache_entry_t *prev_ptr; + H5C_cache_entry_t *next_ptr; +#ifndef NDEBUG + uint32_t num_corked_entries = 0; +#endif + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity checks */ + HDassert(f); + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size)); + + /* check to see if cache_ptr->msic_in_progress is TRUE. If it, this + * is a re-entrant call via a client callback called in the make + * space in cache process. To avoid an infinite recursion, set + * reentrant_call to TRUE, and goto done. + */ + if (cache_ptr->msic_in_progress) { + reentrant_call = TRUE; + HGOTO_DONE(SUCCEED); + } /* end if */ + + cache_ptr->msic_in_progress = TRUE; + + if (write_permitted) { + restart_scan = FALSE; + initial_list_len = cache_ptr->LRU_list_len; + entry_ptr = cache_ptr->LRU_tail_ptr; + + if (cache_ptr->index_size >= cache_ptr->max_cache_size) + empty_space = 0; + else + empty_space = cache_ptr->max_cache_size - cache_ptr->index_size; + + while ((((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) || + ((empty_space + cache_ptr->clean_index_size) < (cache_ptr->min_clean_size))) && + (entries_examined <= (2 * initial_list_len)) && (entry_ptr != NULL)) { + HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(!(entry_ptr->is_protected)); + HDassert(!(entry_ptr->is_read_only)); + HDassert((entry_ptr->ro_ref_count) == 0); + + next_ptr = entry_ptr->next; + prev_ptr = entry_ptr->prev; + + if (prev_ptr != NULL) + prev_is_dirty = prev_ptr->is_dirty; + + if (entry_ptr->is_dirty && (entry_ptr->tag_info && entry_ptr->tag_info->corked)) { + /* Skip "dirty" corked entries. */ +#ifndef NDEBUG + ++num_corked_entries; +#endif + didnt_flush_entry = TRUE; + } + else if ((entry_ptr->type->id != H5AC_EPOCH_MARKER_ID) && !entry_ptr->flush_in_progress && + !entry_ptr->prefetched_dirty) { + didnt_flush_entry = FALSE; + if (entry_ptr->is_dirty) { +#if H5C_COLLECT_CACHE_STATS + if ((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) + cache_ptr->entries_scanned_to_make_space++; +#endif /* H5C_COLLECT_CACHE_STATS */ + + /* reset entries_removed_counter and + * last_entry_removed_ptr prior to the call to + * H5C__flush_single_entry() so that we can spot + * unexpected removals of entries from the cache, + * and set the restart_scan flag if proceeding + * would be likely to cause us to scan an entry + * that is no longer in the cache. + */ + cache_ptr->entries_removed_counter = 0; + cache_ptr->last_entry_removed_ptr = NULL; + + if (H5C__flush_single_entry(f, entry_ptr, H5C__NO_FLAGS_SET) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") + + if ((cache_ptr->entries_removed_counter > 1) || + (cache_ptr->last_entry_removed_ptr == prev_ptr)) + + restart_scan = TRUE; + } + else if ((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size +#ifdef H5_HAVE_PARALLEL + && !(entry_ptr->coll_access) +#endif /* H5_HAVE_PARALLEL */ + ) { +#if H5C_COLLECT_CACHE_STATS + cache_ptr->entries_scanned_to_make_space++; +#endif /* H5C_COLLECT_CACHE_STATS */ + + if (H5C__flush_single_entry(f, entry_ptr, + H5C__FLUSH_INVALIDATE_FLAG | + H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") + } + else { + /* We have enough space so don't flush clean entry. */ +#if H5C_COLLECT_CACHE_STATS + clean_entries_skipped++; +#endif /* H5C_COLLECT_CACHE_STATS */ + didnt_flush_entry = TRUE; + } + +#if H5C_COLLECT_CACHE_STATS + total_entries_scanned++; +#endif /* H5C_COLLECT_CACHE_STATS */ + } + else { + + /* Skip epoch markers, entries that are in the process + * of being flushed, and entries marked as prefetched_dirty + * (occurs in the R/O case only). + */ + didnt_flush_entry = TRUE; + +#if H5C_COLLECT_CACHE_STATS + if (entry_ptr->prefetched_dirty) + dirty_pf_entries_skipped++; +#endif /* H5C_COLLECT_CACHE_STATS */ + } + + if (prev_ptr != NULL) { + if (didnt_flush_entry) + /* epoch markers don't get flushed, and we don't touch + * entries that are in the process of being flushed. + * Hence no need for sanity checks, as we haven't + * flushed anything. Thus just set entry_ptr to prev_ptr + * and go on. + */ + entry_ptr = prev_ptr; + else if (restart_scan || prev_ptr->is_dirty != prev_is_dirty || prev_ptr->next != next_ptr || + prev_ptr->is_protected || prev_ptr->is_pinned) { + /* something has happened to the LRU -- start over + * from the tail. + */ + restart_scan = FALSE; + entry_ptr = cache_ptr->LRU_tail_ptr; + H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr) + } + else + entry_ptr = prev_ptr; + } + else + entry_ptr = NULL; + + entries_examined++; + + if (cache_ptr->index_size >= cache_ptr->max_cache_size) + empty_space = 0; + else + empty_space = cache_ptr->max_cache_size - cache_ptr->index_size; + + HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size)); + } + +#if H5C_COLLECT_CACHE_STATS + cache_ptr->calls_to_msic++; + + cache_ptr->total_entries_skipped_in_msic += clean_entries_skipped; + cache_ptr->total_dirty_pf_entries_skipped_in_msic += dirty_pf_entries_skipped; + cache_ptr->total_entries_scanned_in_msic += total_entries_scanned; + + if (clean_entries_skipped > cache_ptr->max_entries_skipped_in_msic) + cache_ptr->max_entries_skipped_in_msic = clean_entries_skipped; + + if (dirty_pf_entries_skipped > cache_ptr->max_dirty_pf_entries_skipped_in_msic) + cache_ptr->max_dirty_pf_entries_skipped_in_msic = dirty_pf_entries_skipped; + + if (total_entries_scanned > cache_ptr->max_entries_scanned_in_msic) + cache_ptr->max_entries_scanned_in_msic = total_entries_scanned; +#endif /* H5C_COLLECT_CACHE_STATS */ + + /* NEED: work on a better assert for corked entries */ + HDassert((entries_examined > (2 * initial_list_len)) || + ((cache_ptr->pl_size + cache_ptr->pel_size + cache_ptr->min_clean_size) > + cache_ptr->max_cache_size) || + ((cache_ptr->clean_index_size + empty_space) >= cache_ptr->min_clean_size) || + ((num_corked_entries))); +#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS + + HDassert((entries_examined > (2 * initial_list_len)) || + (cache_ptr->cLRU_list_size <= cache_ptr->clean_index_size)); + HDassert((entries_examined > (2 * initial_list_len)) || + (cache_ptr->dLRU_list_size <= cache_ptr->dirty_index_size)); + +#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ + } + else { + HDassert(H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS); + +#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS + initial_list_len = cache_ptr->cLRU_list_len; + entry_ptr = cache_ptr->cLRU_tail_ptr; + + while (((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) && + (entries_examined <= initial_list_len) && (entry_ptr != NULL)) { + HDassert(!(entry_ptr->is_protected)); + HDassert(!(entry_ptr->is_read_only)); + HDassert((entry_ptr->ro_ref_count) == 0); + HDassert(!(entry_ptr->is_dirty)); + + prev_ptr = entry_ptr->aux_prev; + + if (!entry_ptr->prefetched_dirty +#ifdef H5_HAVE_PARALLEL + && !entry_ptr->coll_access +#endif /* H5_HAVE_PARALLEL */ + ) { + if (H5C__flush_single_entry( + f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") + } /* end if */ + + /* we are scanning the clean LRU, so the serialize function + * will not be called on any entry -- thus there is no + * concern about the list being modified out from under + * this function. + */ + + entry_ptr = prev_ptr; + entries_examined++; + } +#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ + } + +done: + /* Sanity checks */ + HDassert(cache_ptr->msic_in_progress); + if (!reentrant_call) + cache_ptr->msic_in_progress = FALSE; + HDassert((!reentrant_call) || (cache_ptr->msic_in_progress)); + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__make_space_in_cache() */ + +/*------------------------------------------------------------------------- + * Function: H5C__serialize_cache + * + * Purpose: Serialize (i.e. construct an on disk image) for all entries + * in the metadata cache including clean entries. + * + * Note that flush dependencies and "flush me last" flags + * must be observed in the serialization process. + * + * Note also that entries may be loaded, flushed, evicted, + * expunged, relocated, resized, or removed from the cache + * during this process, just as these actions may occur during + * a regular flush. + * + * However, we are given that the cache will contain no protected + * entries on entry to this routine (although entries may be + * briefly protected and then unprotected during the serialize + * process). + * + * The objective of this routine is serialize all entries and + * to force all entries into their actual locations on disk. + * + * The initial need for this routine is to settle all entries + * in the cache prior to construction of the metadata cache + * image so that the size of the cache image can be calculated. + * + * Return: Non-negative on success/Negative on failure or if there was + * a request to flush all items and something was protected. + * + * Programmer: John Mainzer + * 7/22/15 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C__serialize_cache(H5F_t *f) +{ +#ifdef H5C_DO_SANITY_CHECKS + int i; + uint32_t index_len = 0; + size_t index_size = (size_t)0; + size_t clean_index_size = (size_t)0; + size_t dirty_index_size = (size_t)0; + size_t slist_size = (size_t)0; + uint32_t slist_len = 0; +#endif /* H5C_DO_SANITY_CHECKS */ + H5C_ring_t ring; + H5C_t *cache_ptr; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE + + /* Sanity checks */ + HDassert(f); + HDassert(f->shared); + cache_ptr = f->shared->cache; + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + HDassert(cache_ptr->slist_ptr); + +#ifdef H5C_DO_SANITY_CHECKS + HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0); + HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); + HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); + HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0); + HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0); + HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0); + + for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) { + index_len += cache_ptr->index_ring_len[i]; + index_size += cache_ptr->index_ring_size[i]; + clean_index_size += cache_ptr->clean_index_ring_size[i]; + dirty_index_size += cache_ptr->dirty_index_ring_size[i]; + + slist_len += cache_ptr->slist_ring_len[i]; + slist_size += cache_ptr->slist_ring_size[i]; + } /* end for */ + + HDassert(cache_ptr->index_len == index_len); + HDassert(cache_ptr->index_size == index_size); + HDassert(cache_ptr->clean_index_size == clean_index_size); + HDassert(cache_ptr->dirty_index_size == dirty_index_size); + HDassert(cache_ptr->slist_len == slist_len); + HDassert(cache_ptr->slist_size == slist_size); +#endif /* H5C_DO_SANITY_CHECKS */ + +#ifdef H5C_DO_EXTREME_SANITY_CHECKS + if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || + H5C__validate_lru_list(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry") +#endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + +#ifndef NDEBUG + /* if this is a debug build, set the serialization_count field of + * each entry in the cache to zero before we start the serialization. + * This allows us to detect the case in which any entry is serialized + * more than once (a performance issues), and more importantly, the + * case is which any flush dependency parent is serializes more than + * once (a correctness issue). + */ + { + H5C_cache_entry_t *scan_ptr = NULL; + + scan_ptr = cache_ptr->il_head; + while (scan_ptr != NULL) { + HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + scan_ptr->serialization_count = 0; + scan_ptr = scan_ptr->il_next; + } /* end while */ + } /* end block */ +#endif + + /* set cache_ptr->serialization_in_progress to TRUE, and back + * to FALSE at the end of the function. Must maintain this flag + * to support H5C_get_serialization_in_progress(), which is in + * turn required to support sanity checking in some cache + * clients. + */ + HDassert(!cache_ptr->serialization_in_progress); + cache_ptr->serialization_in_progress = TRUE; + + /* Serialize each ring, starting from the outermost ring and + * working inward. + */ + ring = H5C_RING_USER; + while (ring < H5C_RING_NTYPES) { + HDassert(cache_ptr->close_warning_received); + switch (ring) { + case H5C_RING_USER: + break; + + case H5C_RING_RDFSM: + /* Settle raw data FSM */ + if (!cache_ptr->rdfsm_settled) + if (H5MF_settle_raw_data_fsm(f, &cache_ptr->rdfsm_settled) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed") + break; + + case H5C_RING_MDFSM: + /* Settle metadata FSM */ + if (!cache_ptr->mdfsm_settled) + if (H5MF_settle_meta_data_fsm(f, &cache_ptr->mdfsm_settled) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed") + break; + + case H5C_RING_SBE: + case H5C_RING_SB: + break; + + default: + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown ring?!?!") + break; + } /* end switch */ + + if (H5C__serialize_ring(f, ring) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "serialize ring failed") + + ring++; + } /* end while */ + +#ifndef NDEBUG + /* Verify that no entry has been serialized more than once. + * FD parents with multiple serializations should have been caught + * elsewhere, so no specific check for them here. + */ + { + H5C_cache_entry_t *scan_ptr = NULL; + + scan_ptr = cache_ptr->il_head; + while (scan_ptr != NULL) { + HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(scan_ptr->serialization_count <= 1); + + scan_ptr = scan_ptr->il_next; + } /* end while */ + } /* end block */ +#endif + +done: + cache_ptr->serialization_in_progress = FALSE; + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__serialize_cache() */ + +/*------------------------------------------------------------------------- + * Function: H5C__serialize_ring + * + * Purpose: Serialize the entries contained in the specified cache and + * ring. All entries in rings outside the specified ring + * must have been serialized on entry. + * + * If the cache contains protected entries in the specified + * ring, the function will fail, as protected entries cannot + * be serialized. However all unprotected entries in the + * target ring should be serialized before the function + * returns failure. + * + * If flush dependencies appear in the target ring, the + * function makes repeated passes through the index list + * serializing entries in flush dependency order. + * + * All entries outside the H5C_RING_SBE are marked for + * inclusion in the cache image. Entries in H5C_RING_SBE + * and below are marked for exclusion from the image. + * + * Return: Non-negative on success/Negative on failure or if there was + * a request to flush all items and something was protected. + * + * Programmer: John Mainzer + * 9/11/15 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__serialize_ring(H5F_t *f, H5C_ring_t ring) +{ + hbool_t done = FALSE; + H5C_t *cache_ptr; + H5C_cache_entry_t *entry_ptr; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE + + /* Sanity checks */ + HDassert(f); + HDassert(f->shared); + cache_ptr = f->shared->cache; + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + HDassert(ring > H5C_RING_UNDEFINED); + HDassert(ring < H5C_RING_NTYPES); + + HDassert(cache_ptr->serialization_in_progress); + + /* The objective here is to serialize all entries in the cache ring + * in flush dependency order. + * + * The basic algorithm is to scan the cache index list looking for + * unserialized entries that are either not in a flush dependency + * relationship, or which have no unserialized children. Any such + * entry is serialized and its flush dependency parents (if any) are + * informed -- allowing them to decrement their userialized child counts. + * + * However, this algorithm is complicated by the ability + * of client serialization callbacks to perform operations on + * on the cache which can result in the insertion, deletion, + * relocation, resize, dirty, flush, eviction, or removal (via the + * take ownership flag) of entries. Changes in the flush dependency + * structure are also possible. + * + * On the other hand, the algorithm is simplified by the fact that + * we are serializing, not flushing. Thus, as long as all entries + * are serialized correctly, it doesn't matter if we have to go back + * and serialize an entry a second time. + * + * These possible actions result in the following modifications to + * the basic algorithm: + * + * 1) In the event of an entry expunge, eviction or removal, we must + * restart the scan as it is possible that the next entry in our + * scan is no longer in the cache. Were we to examine this entry, + * we would be accessing deallocated memory. + * + * 2) A resize, dirty, or insertion of an entry may result in the + * the increment of a flush dependency parent's dirty and/or + * unserialized child count. In the context of serializing the + * the cache, this is a non-issue, as even if we have already + * serialized the parent, it will be marked dirty and its image + * marked out of date if appropriate when the child is serialized. + * + * However, this is a major issue for a flush, as were this to happen + * in a flush, it would violate the invariant that the flush dependency + * feature is intended to enforce. As the metadata cache has no + * control over the behavior of cache clients, it has no way of + * preventing this behaviour. However, it should detect it if at all + * possible. + * + * Do this by maintaining a count of the number of times each entry is + * serialized during a cache serialization. If any flush dependency + * parent is serialized more than once, throw an assertion failure. + * + * 3) An entry relocation will typically change the location of the + * entry in the index list. This shouldn't cause problems as we + * will scan the index list until we make a complete pass without + * finding anything to serialize -- making relocations of either + * the current or next entries irrelevant. + * + * Note that since a relocation may result in our skipping part of + * the index list, we must always do at least one more pass through + * the index list after an entry relocation. + * + * 4) Changes in the flush dependency structure are possible on + * entry insertion, load, expunge, evict, or remove. Destruction + * of a flush dependency has no effect, as it can only relax the + * flush dependencies. Creation of a flush dependency can create + * an unserialized child of a flush dependency parent where all + * flush dependency children were previously serialized. Should + * this child dirty the flush dependency parent when it is serialized, + * the parent will be re-serialized. + * + * Per the discussion of 2) above, this is a non issue for cache + * serialization, and a major problem for cache flush. Using the + * same detection mechanism, throw an assertion failure if this + * condition appears. + * + * Observe that either eviction or removal of entries as a result of + * a serialization is not a problem as long as the flush dependency + * tree does not change beyond the removal of a leaf. + */ + while (!done) { + /* Reset the counters so that we can detect insertions, loads, + * moves, and flush dependency height changes caused by the pre_serialize + * and serialize callbacks. + */ + cache_ptr->entries_loaded_counter = 0; + cache_ptr->entries_inserted_counter = 0; + cache_ptr->entries_relocated_counter = 0; + + done = TRUE; /* set to FALSE if any activity in inner loop */ + entry_ptr = cache_ptr->il_head; + while (entry_ptr != NULL) { + HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + + /* Verify that either the entry is already serialized, or + * that it is assigned to either the target or an inner + * ring. + */ + HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date)); + + /* Skip flush me last entries or inner ring entries */ + if (!entry_ptr->flush_me_last && entry_ptr->ring == ring) { + + /* if we encounter an unserialized entry in the current + * ring that is not marked flush me last, we are not done. + */ + if (!entry_ptr->image_up_to_date) + done = FALSE; + + /* Serialize the entry if its image is not up to date + * and it has no unserialized flush dependency children. + */ + if (!entry_ptr->image_up_to_date && entry_ptr->flush_dep_nunser_children == 0) { + HDassert(entry_ptr->serialization_count == 0); + + /* Serialize the entry */ + if (H5C__serialize_single_entry(f, cache_ptr, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed") + + HDassert(entry_ptr->flush_dep_nunser_children == 0); + HDassert(entry_ptr->serialization_count == 0); + +#ifndef NDEBUG + /* Increment serialization counter (to detect multiple serializations) */ + entry_ptr->serialization_count++; +#endif + } /* end if */ + } /* end if */ + + /* Check for the cache being perturbed during the entry serialize */ + if ((cache_ptr->entries_loaded_counter > 0) || (cache_ptr->entries_inserted_counter > 0) || + (cache_ptr->entries_relocated_counter > 0)) { + +#if H5C_COLLECT_CACHE_STATS + H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr); +#endif /* H5C_COLLECT_CACHE_STATS */ + + /* Reset the counters */ + cache_ptr->entries_loaded_counter = 0; + cache_ptr->entries_inserted_counter = 0; + cache_ptr->entries_relocated_counter = 0; + + /* Restart scan */ + entry_ptr = cache_ptr->il_head; + } /* end if */ + else + /* Advance to next entry */ + entry_ptr = entry_ptr->il_next; + } /* while ( entry_ptr != NULL ) */ + } /* while ( ! done ) */ + + /* Reset the counters so that we can detect insertions, loads, + * moves, and flush dependency height changes caused by the pre_serialize + * and serialize callbacks. + */ + cache_ptr->entries_loaded_counter = 0; + cache_ptr->entries_inserted_counter = 0; + cache_ptr->entries_relocated_counter = 0; + + /* At this point, all entries not marked "flush me last" and in + * the current ring or outside it should be serialized and have up + * to date images. Scan the index list again to serialize the + * "flush me last" entries (if they are in the current ring) and to + * verify that all other entries have up to date images. + */ + entry_ptr = cache_ptr->il_head; + while (entry_ptr != NULL) { + HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(entry_ptr->ring > H5C_RING_UNDEFINED); + HDassert(entry_ptr->ring < H5C_RING_NTYPES); + HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date)); + + if (entry_ptr->ring == ring) { + if (entry_ptr->flush_me_last) { + if (!entry_ptr->image_up_to_date) { + HDassert(entry_ptr->serialization_count == 0); + HDassert(entry_ptr->flush_dep_nunser_children == 0); + + /* Serialize the entry */ + if (H5C__serialize_single_entry(f, cache_ptr, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed") + + /* Check for the cache changing */ + if ((cache_ptr->entries_loaded_counter > 0) || + (cache_ptr->entries_inserted_counter > 0) || + (cache_ptr->entries_relocated_counter > 0)) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, + "flush_me_last entry serialization triggered restart") + + HDassert(entry_ptr->flush_dep_nunser_children == 0); + HDassert(entry_ptr->serialization_count == 0); +#ifndef NDEBUG + /* Increment serialization counter (to detect multiple serializations) */ + entry_ptr->serialization_count++; +#endif + } /* end if */ + } /* end if */ + else { + HDassert(entry_ptr->image_up_to_date); + HDassert(entry_ptr->serialization_count <= 1); + HDassert(entry_ptr->flush_dep_nunser_children == 0); + } /* end else */ + } /* if ( entry_ptr->ring == ring ) */ + + entry_ptr = entry_ptr->il_next; + } /* while ( entry_ptr != NULL ) */ + +done: + HDassert(cache_ptr->serialization_in_progress); + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__serialize_ring() */ diff --git a/src/H5Clog.c b/src/H5Clog.c index 25b3cf01c1a..b0eff4d8e93 100644 --- a/src/H5Clog.c +++ b/src/H5Clog.c @@ -100,11 +100,11 @@ H5C_log_set_up(H5C_t *cache, const char log_location[], H5C_log_style_t style, h /* Set up logging */ if (H5C_LOG_STYLE_JSON == style) { - if (H5C_log_json_set_up(cache->log_info, log_location, mpi_rank) < 0) + if (H5C__log_json_set_up(cache->log_info, log_location, mpi_rank) < 0) HGOTO_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to set up json logging") } else if (H5C_LOG_STYLE_TRACE == style) { - if (H5C_log_trace_set_up(cache->log_info, log_location, mpi_rank) < 0) + if (H5C__log_trace_set_up(cache->log_info, log_location, mpi_rank) < 0) HGOTO_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to set up trace logging") } else diff --git a/src/H5Clog.h b/src/H5Clog.h index b3636c6c176..b8ea5eda3db 100644 --- a/src/H5Clog.h +++ b/src/H5Clog.h @@ -132,7 +132,7 @@ H5_DLL herr_t H5C_log_write_remove_entry_msg(H5C_t *cache, const H5C_cache_entry herr_t fxn_ret_value); /* Logging-specific setup functions */ -H5_DLL herr_t H5C_log_json_set_up(H5C_log_info_t *log_info, const char log_location[], int mpi_rank); -H5_DLL herr_t H5C_log_trace_set_up(H5C_log_info_t *log_info, const char log_location[], int mpi_rank); +H5_DLL herr_t H5C__log_json_set_up(H5C_log_info_t *log_info, const char log_location[], int mpi_rank); +H5_DLL herr_t H5C__log_trace_set_up(H5C_log_info_t *log_info, const char log_location[], int mpi_rank); #endif /* H5Clog_H */ diff --git a/src/H5Clog_json.c b/src/H5Clog_json.c index ad27c94aea3..afb81c582cb 100644 --- a/src/H5Clog_json.c +++ b/src/H5Clog_json.c @@ -181,7 +181,7 @@ H5C__json_write_log_message(H5C_log_json_udata_t *json_udata) } /* H5C__json_write_log_message() */ /*------------------------------------------------------------------------- - * Function: H5C_log_json_set_up + * Function: H5C__log_json_set_up * * Purpose: Setup for metadata cache logging. * @@ -205,14 +205,14 @@ H5C__json_write_log_message(H5C_log_json_udata_t *json_udata) *------------------------------------------------------------------------- */ herr_t -H5C_log_json_set_up(H5C_log_info_t *log_info, const char log_location[], int mpi_rank) +H5C__log_json_set_up(H5C_log_info_t *log_info, const char log_location[], int mpi_rank) { H5C_log_json_udata_t *json_udata = NULL; char *file_name = NULL; size_t n_chars; herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI(FAIL) + FUNC_ENTER_PACKAGE /* Sanity checks */ HDassert(log_info); @@ -271,7 +271,7 @@ H5C_log_json_set_up(H5C_log_info_t *log_info, const char log_location[], int mpi } FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_log_json_set_up() */ +} /* H5C__log_json_set_up() */ /*------------------------------------------------------------------------- * Function: H5C__json_tear_down_logging diff --git a/src/H5Clog_trace.c b/src/H5Clog_trace.c index 74f33ed85f5..b095e877d7e 100644 --- a/src/H5Clog_trace.c +++ b/src/H5Clog_trace.c @@ -176,7 +176,7 @@ H5C__trace_write_log_message(H5C_log_trace_udata_t *trace_udata) } /* H5C__trace_write_log_message() */ /*------------------------------------------------------------------------- - * Function: H5C_log_trace_set_up + * Function: H5C__log_trace_set_up * * Purpose: Setup for metadata cache logging. * @@ -200,14 +200,14 @@ H5C__trace_write_log_message(H5C_log_trace_udata_t *trace_udata) *------------------------------------------------------------------------- */ herr_t -H5C_log_trace_set_up(H5C_log_info_t *log_info, const char log_location[], int mpi_rank) +H5C__log_trace_set_up(H5C_log_info_t *log_info, const char log_location[], int mpi_rank) { H5C_log_trace_udata_t *trace_udata = NULL; char *file_name = NULL; size_t n_chars; herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI(FAIL) + FUNC_ENTER_PACKAGE /* Sanity checks */ HDassert(log_info); @@ -269,7 +269,7 @@ H5C_log_trace_set_up(H5C_log_info_t *log_info, const char log_location[], int mp } FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_log_trace_set_up() */ +} /* H5C__log_trace_set_up() */ /*------------------------------------------------------------------------- * Function: H5C__trace_tear_down_logging diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c index 5822746cc9c..47f9b1fae8a 100644 --- a/src/H5Cmpio.c +++ b/src/H5Cmpio.c @@ -397,7 +397,6 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha } /* H5C_apply_candidate_list() */ /*------------------------------------------------------------------------- - * * Function: H5C_construct_candidate_list__clean_cache * * Purpose: Construct the list of entries that should be flushed to @@ -597,7 +596,6 @@ H5C_construct_candidate_list__min_clean(H5C_t *cache_ptr) } /* H5C_construct_candidate_list__min_clean() */ /*------------------------------------------------------------------------- - * * Function: H5C_mark_entries_as_clean * * Purpose: When the H5C code is used to implement the metadata caches @@ -827,7 +825,6 @@ H5C_mark_entries_as_clean(H5F_t *f, unsigned ce_array_len, haddr_t *ce_array_ptr } /* H5C_mark_entries_as_clean() */ /*------------------------------------------------------------------------- - * * Function: H5C_clear_coll_entries * * Purpose: Clear half or the entire list of collective entries and @@ -879,7 +876,6 @@ H5C_clear_coll_entries(H5C_t *cache_ptr, hbool_t partial) } /* H5C_clear_coll_entries */ /*------------------------------------------------------------------------- - * * Function: H5C__collective_write * * Purpose: Perform a collective write of a list of metadata entries. diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index 2cad464743d..d9203b8b151 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -299,6 +299,16 @@ if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size))) { \ #define H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) \ (cache_ptr)->unpins[(entry_ptr)->type->id]++; +#define H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, dirty) \ +{ \ + (cache_ptr)->prefetches++; \ + if (dirty) \ + (cache_ptr)->dirty_prefetches++; \ +} + +#define H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr) \ + (cache_ptr)->prefetch_hits++; + #define H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr) \ (cache_ptr)->slist_scan_restarts++; @@ -512,6 +522,8 @@ if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size))) { \ #define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) #define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) #define H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) +#define H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, dirty) +#define H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr) #define H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr) #define H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr) #define H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr) @@ -3256,19 +3268,22 @@ typedef int (*H5C_tag_iter_cb_t)(H5C_cache_entry_t *entry, void *ctx); /* Package Private Prototypes */ /******************************/ H5_DLL herr_t H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated); -H5_DLL herr_t H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t * cache_ptr, - H5C_cache_entry_t** entry_ptr_ptr, const H5C_class_t * type, haddr_t addr, - void * udata); /* General routines */ +H5_DLL herr_t H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted); +H5_DLL herr_t H5C__autoadjust__ageout__remove_all_markers(H5C_t *cache_ptr); +H5_DLL herr_t H5C__autoadjust__ageout__remove_excess_markers(H5C_t *cache_ptr); +H5_DLL herr_t H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t new_entry_size); +H5_DLL herr_t H5C__flush_invalidate_cache(H5F_t *f, unsigned flags); +H5_DLL herr_t H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags); H5_DLL herr_t H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags); H5_DLL herr_t H5C__generate_cache_image(H5F_t *f, H5C_t *cache_ptr); H5_DLL herr_t H5C__load_cache_image(H5F_t *f); H5_DLL herr_t H5C__make_space_in_cache(H5F_t * f, size_t space_needed, hbool_t write_permitted); -H5_DLL herr_t H5C__flush_marked_entries(H5F_t * f); H5_DLL herr_t H5C__serialize_cache(H5F_t *f); +H5_DLL herr_t H5C__serialize_single_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr); H5_DLL herr_t H5C__iter_tagged_entries(H5C_t *cache, haddr_t tag, hbool_t match_global, H5C_tag_iter_cb_t cb, void *cb_ctx); @@ -3276,10 +3291,14 @@ H5_DLL herr_t H5C__iter_tagged_entries(H5C_t *cache, haddr_t tag, hbool_t match_ H5_DLL herr_t H5C__tag_entry(H5C_t * cache_ptr, H5C_cache_entry_t * entry_ptr); H5_DLL herr_t H5C__untag_entry(H5C_t *cache, H5C_cache_entry_t *entry); +/* Routines for operating on cache images */ +H5_DLL herr_t H5C__get_cache_image_config(const H5C_t *cache_ptr, H5C_cache_image_ctl_t *config_ptr); +H5_DLL herr_t H5C__image_stats(H5C_t *cache_ptr, hbool_t print_header); + +/* Debugging routines */ #ifdef H5C_DO_SLIST_SANITY_CHECKS H5_DLL hbool_t H5C__entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr); #endif - #ifdef H5C_DO_EXTREME_SANITY_CHECKS H5_DLL herr_t H5C__validate_lru_list(H5C_t *cache_ptr); H5_DLL herr_t H5C__validate_pinned_entry_list(H5C_t *cache_ptr); diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h index a5330ce664d..ed52ed27451 100644 --- a/src/H5Cprivate.h +++ b/src/H5Cprivate.h @@ -2215,7 +2215,6 @@ herr_t H5C_verify_tag(int id, haddr_t tag); #endif H5_DLL herr_t H5C_flush_to_min_clean(H5F_t *f); H5_DLL herr_t H5C_get_cache_auto_resize_config(const H5C_t *cache_ptr, H5C_auto_size_ctl_t *config_ptr); -H5_DLL herr_t H5C_get_cache_image_config(const H5C_t *cache_ptr, H5C_cache_image_ctl_t *config_ptr); H5_DLL herr_t H5C_get_cache_size(const H5C_t *cache_ptr, size_t *max_size_ptr, size_t *min_clean_size_ptr, size_t *cur_size_ptr, uint32_t *cur_num_entries_ptr); H5_DLL herr_t H5C_get_cache_flush_in_progress(const H5C_t *cache_ptr, hbool_t *flush_in_progress_ptr); @@ -2226,7 +2225,6 @@ H5_DLL herr_t H5C_get_entry_status(const H5F_t *f, haddr_t addr, size_t *size_pt hbool_t *is_flush_dep_child_ptr, hbool_t *image_up_to_date_ptr); H5_DLL herr_t H5C_get_evictions_enabled(const H5C_t *cache_ptr, hbool_t *evictions_enabled_ptr); H5_DLL void *H5C_get_aux_ptr(const H5C_t *cache_ptr); -H5_DLL herr_t H5C_image_stats(H5C_t *cache_ptr, hbool_t print_header); H5_DLL herr_t H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, unsigned int flags); H5_DLL herr_t H5C_load_cache_image_on_next_protect(H5F_t *f, haddr_t addr, hsize_t len, hbool_t rw); diff --git a/src/H5Cquery.c b/src/H5Cquery.c index db73d65133f..6325d1f6d26 100644 --- a/src/H5Cquery.c +++ b/src/H5Cquery.c @@ -207,7 +207,6 @@ H5C_get_cache_hit_rate(const H5C_t *cache_ptr, double *hit_rate_ptr) } /* H5C_get_cache_hit_rate() */ /*------------------------------------------------------------------------- - * * Function: H5C_get_entry_status * * Purpose: This function is used to determine whether the cache diff --git a/src/H5Ctag.c b/src/H5Ctag.c index 10089e4949c..d821e280db7 100644 --- a/src/H5Ctag.c +++ b/src/H5Ctag.c @@ -38,8 +38,6 @@ #include "H5CXprivate.h" /* API Contexts */ #include "H5Eprivate.h" /* Error handling */ #include "H5Fpkg.h" /* Files */ -#include "H5Iprivate.h" /* IDs */ -#include "H5Pprivate.h" /* Property lists */ /****************/ /* Local Macros */ @@ -81,7 +79,9 @@ typedef struct { /********************/ /* Local Prototypes */ /********************/ +static herr_t H5C__iter_tagged_entries_real(H5C_t *cache, haddr_t tag, H5C_tag_iter_cb_t cb, void *cb_ctx); static herr_t H5C__mark_tagged_entries(H5C_t *cache, haddr_t tag); +static herr_t H5C__flush_marked_entries(H5F_t *f); /*********************/ /* Package Variables */ @@ -99,7 +99,6 @@ H5FL_EXTERN(H5C_tag_info_t); /*******************/ /*------------------------------------------------------------------------- - * * Function: H5C_ignore_tags * * Purpose: Override all assertion frameworks associated with making @@ -136,7 +135,6 @@ H5C_ignore_tags(H5C_t *cache) } /* H5C_ignore_tags */ /*------------------------------------------------------------------------- - * * Function: H5C_get_ignore_tags * * Purpose: Retrieve the 'ignore_tags' field for the cache @@ -148,7 +146,7 @@ H5C_ignore_tags(H5C_t *cache) * *------------------------------------------------------------------------- */ -hbool_t +H5_ATTR_PURE hbool_t H5C_get_ignore_tags(const H5C_t *cache) { FUNC_ENTER_NOAPI_NOERR @@ -162,7 +160,6 @@ H5C_get_ignore_tags(const H5C_t *cache) } /* H5C_get_ignore_tags */ /*------------------------------------------------------------------------- - * * Function: H5C_get_num_objs_corked * * Purpose: Retrieve the 'num_objs_corked' field for the cache @@ -173,7 +170,7 @@ H5C_get_ignore_tags(const H5C_t *cache) * *------------------------------------------------------------------------- */ -uint32_t +H5_ATTR_PURE uint32_t H5C_get_num_objs_corked(const H5C_t *cache) { FUNC_ENTER_NOAPI_NOERR @@ -187,7 +184,6 @@ H5C_get_num_objs_corked(const H5C_t *cache) } /* H5C_get_num_objs_corked */ /*------------------------------------------------------------------------- - * * Function: H5C__tag_entry * * Purpose: Tags an entry with the provided tag (contained in the API context). @@ -274,7 +270,6 @@ H5C__tag_entry(H5C_t *cache, H5C_cache_entry_t *entry) } /* H5C__tag_entry */ /*------------------------------------------------------------------------- - * * Function: H5C__untag_entry * * Purpose: Removes an entry from a tag list, possibly removing the tag @@ -333,7 +328,6 @@ H5C__untag_entry(H5C_t *cache, H5C_cache_entry_t *entry) } /* H5C__untag_entry */ /*------------------------------------------------------------------------- - * * Function: H5C__iter_tagged_entries_real * * Purpose: Iterate over tagged entries, making a callback for matches @@ -390,7 +384,6 @@ H5C__iter_tagged_entries_real(H5C_t *cache, haddr_t tag, H5C_tag_iter_cb_t cb, v } /* H5C__iter_tagged_entries_real() */ /*------------------------------------------------------------------------- - * * Function: H5C__iter_tagged_entries * * Purpose: Iterate over tagged entries, making a callback for matches @@ -434,7 +427,6 @@ H5C__iter_tagged_entries(H5C_t *cache, haddr_t tag, hbool_t match_global, H5C_ta } /* H5C__iter_tagged_entries() */ /*------------------------------------------------------------------------- - * * Function: H5C__evict_tagged_entries_cb * * Purpose: Callback for evicting tagged entries @@ -485,7 +477,6 @@ H5C__evict_tagged_entries_cb(H5C_cache_entry_t *entry, void *_ctx) } /* H5C__evict_tagged_entries_cb() */ /*------------------------------------------------------------------------- - * * Function: H5C_evict_tagged_entries * * Purpose: Evicts all entries with the specified tag from cache @@ -564,7 +555,6 @@ H5C_evict_tagged_entries(H5F_t *f, haddr_t tag, hbool_t match_global) } /* H5C_evict_tagged_entries() */ /*------------------------------------------------------------------------- - * * Function: H5C__mark_tagged_entries_cb * * Purpose: Callback to set the flush marker on dirty entries in the cache @@ -594,7 +584,6 @@ H5C__mark_tagged_entries_cb(H5C_cache_entry_t *entry, void H5_ATTR_UNUSED *_ctx) } /* H5C__mark_tagged_entries_cb() */ /*------------------------------------------------------------------------- - * * Function: H5C__mark_tagged_entries * * Purpose: Set the flush marker on dirty entries in the cache that have @@ -629,10 +618,50 @@ H5C__mark_tagged_entries(H5C_t *cache, haddr_t tag) FUNC_LEAVE_NOAPI(ret_value) } /* H5C__mark_tagged_entries() */ +/*------------------------------------------------------------------------- + * Function: H5C__flush_marked_entries + * + * Purpose: Flushes all marked entries in the cache. + * + * Return: FAIL if error is detected, SUCCEED otherwise. + * + * Programmer: Mike McGreevy + * November 3, 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5C__flush_marked_entries(H5F_t *f) +{ + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE + + /* Assertions */ + HDassert(f != NULL); + + /* Enable the slist, as it is needed in the flush */ + if (H5C_set_slist_enabled(f->shared->cache, TRUE, FALSE) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed") + + /* Flush all marked entries */ + if (H5C_flush_cache(f, H5C__FLUSH_MARKED_ENTRIES_FLAG | H5C__FLUSH_IGNORE_PROTECTED_FLAG) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache") + + /* Disable the slist. Set the clear_slist parameter to TRUE + * since we called H5C_flush_cache() with the + * H5C__FLUSH_MARKED_ENTRIES_FLAG. + */ + if (H5C_set_slist_enabled(f->shared->cache, FALSE, TRUE) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "disable slist failed") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__flush_marked_entries */ + #ifdef H5C_DO_TAGGING_SANITY_CHECKS /*------------------------------------------------------------------------- - * * Function: H5C_verify_tag * * Purpose: Performs sanity checking on an entrytype/tag pair. @@ -703,7 +732,6 @@ H5C_verify_tag(int id, haddr_t tag) #endif /*------------------------------------------------------------------------- - * * Function: H5C_flush_tagged_entries * * Purpose: Flushes all entries with the specified tag to disk. @@ -744,7 +772,6 @@ H5C_flush_tagged_entries(H5F_t *f, haddr_t tag) } /* H5C_flush_tagged_entries */ /*------------------------------------------------------------------------- - * * Function: H5C_retag_entries * * Purpose: Searches through cache index for all entries with the @@ -786,7 +813,6 @@ H5C_retag_entries(H5C_t *cache, haddr_t src_tag, haddr_t dest_tag) } /* H5C_retag_entries() */ /*------------------------------------------------------------------------- - * * Function: H5C__expunge_tag_type_metadata_cb * * Purpose: Expunge from the cache entries associated @@ -822,7 +848,6 @@ H5C__expunge_tag_type_metadata_cb(H5C_cache_entry_t *entry, void *_ctx) } /* H5C__expunge_tag_type_metadata_cb() */ /*------------------------------------------------------------------------- - * * Function: H5C_expunge_tag_type_metadata * * Purpose: Search and expunge from the cache entries associated @@ -866,7 +891,6 @@ H5C_expunge_tag_type_metadata(H5F_t *f, haddr_t tag, int type_id, unsigned flags } /* H5C_expunge_tag_type_metadata() */ /*------------------------------------------------------------------------- - * * Function: H5C_get_tag() * * Purpose: Get the tag for a metadata cache entry. diff --git a/src/Makefile.am b/src/Makefile.am index 5650c38e3d8..a1c21e3984a 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -43,7 +43,8 @@ libhdf5_la_SOURCES= H5.c H5checksum.c H5dbg.c H5lib_settings.c H5system.c \ H5B.c H5Bcache.c H5Bdbg.c \ H5B2.c H5B2cache.c H5B2dbg.c H5B2hdr.c H5B2int.c H5B2internal.c \ H5B2leaf.c H5B2stat.c H5B2test.c \ - H5C.c H5Cdbg.c H5Cepoch.c H5Cimage.c H5Clog.c H5Clog_json.c H5Clog_trace.c \ + H5C.c H5Cdbg.c H5Centry.c H5Cepoch.c H5Cimage.c H5Cint.c \ + H5Clog.c H5Clog_json.c H5Clog_trace.c \ H5Cprefetched.c H5Cquery.c H5Ctag.c H5Ctest.c \ H5CS.c \ H5CX.c \ diff --git a/test/cache_image.c b/test/cache_image.c index 8c996d5cc7a..c3c0b497965 100644 --- a/test/cache_image.c +++ b/test/cache_image.c @@ -728,10 +728,10 @@ open_hdf5_file(hbool_t create_file, hbool_t mdci_sbem_expected, hbool_t read_onl */ if (pass) { - if (H5C_get_cache_image_config(cache_ptr, &image_ctl) < 0) { + if (H5C__get_cache_image_config(cache_ptr, &image_ctl) < 0) { pass = FALSE; - failure_mssg = "error returned by H5C_get_cache_image_config()."; + failure_mssg = "error returned by H5C__get_cache_image_config()."; } } diff --git a/testpar/t_cache_image.c b/testpar/t_cache_image.c index 1e556d984ee..86f1696bd52 100644 --- a/testpar/t_cache_image.c +++ b/testpar/t_cache_image.c @@ -1135,10 +1135,10 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons */ if (pass) { - if (H5C_get_cache_image_config(cache_ptr, &image_ctl) < 0) { + if (H5C__get_cache_image_config(cache_ptr, &image_ctl) < 0) { pass = FALSE; - failure_mssg = "error returned by H5C_get_cache_image_config()."; + failure_mssg = "error returned by H5C__get_cache_image_config()."; } } From 07c1d3af0f3c0f99caaef0443d5a22c68bd550f2 Mon Sep 17 00:00:00 2001 From: mattjala <124107509+mattjala@users.noreply.github.com> Date: Fri, 12 May 2023 15:22:55 -0500 Subject: [PATCH 219/231] Prevent buffer overrun in H5S_select_deserialize (#2931) * Prevent buffer overrun in H5S_select_deserialize The call to H5S_select_deserialize from H5S_decode doesn't have the buffer size available to it, so to allow decoding there I set it to assume a max size buffer for now. Making the buffer size known in H5S_decode could be done by modifying the external API's H5Sdecode, or splitting H5Sdecode into two functions using a macro (similar to H5Sencode), with the macro taking one argument and assuming a max buffer size. * Conditional buffer check in H5S_select_deserialize Moved and renamed a macro for only checking buffer overflow when buffer size is known from H5Odtype.c to H5private.h, so it can be used throughout the library. Also silenced some build warnings about types. --- src/H5Odtype.c | 67 ++++++++++++++++++---------------------- src/H5Olayout.c | 20 ++++++++++-- src/H5Rint.c | 19 +++++++++--- src/H5S.c | 5 +-- src/H5Sall.c | 18 ++++++----- src/H5Shyper.c | 80 ++++++++++++++++++++++++++++++++++++++---------- src/H5Snone.c | 17 ++++++---- src/H5Spkg.h | 3 +- src/H5Spoint.c | 76 +++++++++++++++++++++++++++++++++++---------- src/H5Sprivate.h | 4 +-- src/H5Sselect.c | 19 +++++++----- src/H5private.h | 9 ++++++ 12 files changed, 233 insertions(+), 104 deletions(-) diff --git a/src/H5Odtype.c b/src/H5Odtype.c index 977e4b189ff..cab2eb263cc 100644 --- a/src/H5Odtype.c +++ b/src/H5Odtype.c @@ -24,15 +24,6 @@ #include "H5Tpkg.h" /* Datatypes */ #include "H5VMprivate.h" /* Vectors and arrays */ -/* Variant boundary-checking macro, used here since H5Tdecode() doesn't take a - * size parameter so we need to ignore the bounds checks. - * - * This is a separate macro since we don't want to inflict that behavior on - * the rest of the library. - */ -#define H5_DTYPE_IS_BUFFER_OVERFLOW(skip, ptr, size, buffer_end) \ - (skip ? FALSE : ((ptr) + (size)-1) > (buffer_end)) - /* PRIVATE PROTOTYPES */ static herr_t H5O__dtype_encode(H5F_t *f, uint8_t *p, const void *mesg); static void *H5O__dtype_decode(H5F_t *f, H5O_t *open_oh, unsigned mesg_flags, unsigned *ioflags, @@ -146,7 +137,7 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t * that case is impossible. * * Instead of using our normal H5_IS_BUFFER_OVERFLOW macro, use - * H5_DTYPE_IS_BUFFER_OVERFLOW, which will skip the check when the + * H5_IS_KNOWN_BUFFER_OVERFLOW, which will skip the check when the * we're decoding a buffer from H5Tconvert(). * * Even if this is fixed at some point in the future, as long as we @@ -155,7 +146,7 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t */ /* Version, class & flags */ - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 4, p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, 4, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT32DECODE(*pp, flags); version = (flags >> 4) & 0x0f; @@ -166,7 +157,7 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t flags >>= 8; /* Size */ - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 4, p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, 4, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT32DECODE(*pp, dt->shared->size); @@ -183,7 +174,7 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t dt->shared->u.atomic.lsb_pad = (flags & 0x2) ? H5T_PAD_ONE : H5T_PAD_ZERO; dt->shared->u.atomic.msb_pad = (flags & 0x4) ? H5T_PAD_ONE : H5T_PAD_ZERO; dt->shared->u.atomic.u.i.sign = (flags & 0x8) ? H5T_SGN_2 : H5T_SGN_NONE; - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 2 + 2, p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, 2 + 2, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT16DECODE(*pp, dt->shared->u.atomic.offset); UINT16DECODE(*pp, dt->shared->u.atomic.prec); @@ -224,26 +215,26 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t } dt->shared->u.atomic.u.f.sign = (flags >> 8) & 0xff; - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 2 + 2, p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, 2 + 2, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT16DECODE(*pp, dt->shared->u.atomic.offset); UINT16DECODE(*pp, dt->shared->u.atomic.prec); - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 1 + 1, p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, 1 + 1, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); dt->shared->u.atomic.u.f.epos = *(*pp)++; dt->shared->u.atomic.u.f.esize = *(*pp)++; if (dt->shared->u.atomic.u.f.esize == 0) HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, FAIL, "exponent size can't be zero") - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 1 + 1, p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, 1 + 1, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); dt->shared->u.atomic.u.f.mpos = *(*pp)++; dt->shared->u.atomic.u.f.msize = *(*pp)++; if (dt->shared->u.atomic.u.f.msize == 0) HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, FAIL, "mantissa size can't be zero") - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 4, p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, 4, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT32DECODE(*pp, dt->shared->u.atomic.u.f.ebias); break; @@ -253,7 +244,7 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t * Time datatypes... */ dt->shared->u.atomic.order = (flags & 0x1) ? H5T_ORDER_BE : H5T_ORDER_LE; - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 2, p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, 2, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT16DECODE(*pp, dt->shared->u.atomic.prec); break; @@ -279,7 +270,7 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t dt->shared->u.atomic.order = (flags & 0x1) ? H5T_ORDER_BE : H5T_ORDER_LE; dt->shared->u.atomic.lsb_pad = (flags & 0x2) ? H5T_PAD_ONE : H5T_PAD_ZERO; dt->shared->u.atomic.msb_pad = (flags & 0x4) ? H5T_PAD_ONE : H5T_PAD_ZERO; - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 2 + 2, p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, 2 + 2, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT16DECODE(*pp, dt->shared->u.atomic.offset); UINT16DECODE(*pp, dt->shared->u.atomic.prec); @@ -300,7 +291,7 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t if (NULL == (dt->shared->u.opaque.tag = (char *)H5MM_malloc(z + 1))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed") - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, z, p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, z, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); H5MM_memcpy(dt->shared->u.opaque.tag, *pp, z); dt->shared->u.opaque.tag[z] = '\0'; @@ -361,7 +352,7 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t actual_name_length = HDstrlen((const char *)*pp); } - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, actual_name_length, p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, actual_name_length, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); /* Decode the field name */ @@ -373,14 +364,14 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t /* Version 3 of the datatype message eliminated the padding to multiple of 8 bytes */ if (version >= H5O_DTYPE_VERSION_3) { /* Advance past name, including null terminator */ - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, actual_name_length + 1, p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, actual_name_length + 1, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); *pp += actual_name_length + 1; } else { /* Advance multiple of 8 w/ null terminator */ - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, ((actual_name_length + 8) / 8) * 8, p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, ((actual_name_length + 8) / 8) * 8, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); *pp += ((actual_name_length + 8) / 8) * 8; @@ -389,14 +380,14 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t /* Decode the field offset */ /* (starting with version 3 of the datatype message, use the minimum # of bytes required) */ if (version >= H5O_DTYPE_VERSION_3) { - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, offset_nbytes, p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, offset_nbytes, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT32DECODE_VAR(*pp, dt->shared->u.compnd.memb[dt->shared->u.compnd.nmembs].offset, offset_nbytes) } else { - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 4, p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, 4, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT32DECODE(*pp, dt->shared->u.compnd.memb[dt->shared->u.compnd.nmembs].offset) @@ -407,7 +398,7 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t * use the separate array datatypes. */ if (version == H5O_DTYPE_VERSION_1) { /* Decode the number of dimensions */ - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 1, p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, 1, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); ndims = *(*pp)++; @@ -420,25 +411,25 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t } /* Skip reserved bytes */ - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 3, p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, 3, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); *pp += 3; /* Skip dimension permutation */ - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 4, p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, 4, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); *pp += 4; /* Skip reserved bytes */ - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 4, p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, 4, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); *pp += 4; /* Decode array dimension sizes */ - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, (4 * 4), p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, (4 * 4), p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); for (int i = 0; i < 4; i++) @@ -657,7 +648,7 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t actual_name_length = HDstrlen((const char *)*pp); } - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, actual_name_length, p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, actual_name_length, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); if (NULL == (dt->shared->u.enumer.name[dt->shared->u.enumer.nmembs] = H5MM_xstrdup((const char *)*pp))) @@ -666,14 +657,14 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t /* Version 3 of the datatype message eliminated the padding to multiple of 8 bytes */ if (version >= H5O_DTYPE_VERSION_3) { /* Advance past name, including null terminator */ - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, actual_name_length + 1, p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, actual_name_length + 1, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); *pp += actual_name_length + 1; } else { /* Advance multiple of 8 w/ null terminator */ - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, ((actual_name_length + 8) / 8) * 8, p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, ((actual_name_length + 8) / 8) * 8, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); *pp += ((actual_name_length + 8) / 8) * 8; @@ -683,7 +674,7 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "incorrect number of enum members decoded"); /* Values */ - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, nmembs * dt->shared->parent->shared->size, p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, nmembs * dt->shared->parent->shared->size, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); H5MM_memcpy(dt->shared->u.enumer.value, *pp, nmembs * dt->shared->parent->shared->size); *pp += nmembs * dt->shared->parent->shared->size; @@ -723,7 +714,7 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t * Array datatypes... */ /* Decode the number of dimensions */ - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 1, p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, 1, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); dt->shared->u.array.ndims = *(*pp)++; @@ -733,14 +724,14 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t /* Skip reserved bytes, if version has them */ if (version < H5O_DTYPE_VERSION_3) { - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, 3, p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, 3, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); *pp += 3; } /* Decode array dimension sizes & compute number of elements */ dt->shared->u.array.nelem = 1; - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, (dt->shared->u.array.ndims * 4), p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, (dt->shared->u.array.ndims * 4), p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); for (unsigned u = 0; u < dt->shared->u.array.ndims; u++) { UINT32DECODE(*pp, dt->shared->u.array.dim[u]); @@ -749,7 +740,7 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t /* Skip array dimension permutations, if version has them */ if (version < H5O_DTYPE_VERSION_3) { - if (H5_DTYPE_IS_BUFFER_OVERFLOW(skip, *pp, (dt->shared->u.array.ndims * 4), p_end)) + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, (dt->shared->u.array.ndims * 4), p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); *pp += dt->shared->u.array.ndims * 4; } diff --git a/src/H5Olayout.c b/src/H5Olayout.c index f784f246ca7..645ad73c5c1 100644 --- a/src/H5Olayout.c +++ b/src/H5Olayout.c @@ -634,13 +634,27 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU heap_block_p += tmp_size; /* Source selection */ - if (H5S_SELECT_DESERIALIZE(&mesg->storage.u.virt.list[i].source_select, - &heap_block_p) < 0) + avail_buffer_space = heap_block_p_end - heap_block_p + 1; + + if (avail_buffer_space <= 0) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, NULL, + "buffer overflow while decoding layout") + + if (H5S_SELECT_DESERIALIZE(&mesg->storage.u.virt.list[i].source_select, &heap_block_p, + (size_t)(avail_buffer_space)) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTDECODE, NULL, "can't decode source space selection") /* Virtual selection */ + + /* Buffer space must be updated after previous deserialization */ + avail_buffer_space = heap_block_p_end - heap_block_p + 1; + + if (avail_buffer_space <= 0) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, NULL, + "buffer overflow while decoding layout") + if (H5S_SELECT_DESERIALIZE(&mesg->storage.u.virt.list[i].source_dset.virtual_select, - &heap_block_p) < 0) + &heap_block_p, (size_t)(avail_buffer_space)) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTDECODE, NULL, "can't decode virtual space selection") diff --git a/src/H5Rint.c b/src/H5Rint.c index 4b3d692a8d5..7b22150c4ec 100644 --- a/src/H5Rint.c +++ b/src/H5Rint.c @@ -1177,6 +1177,7 @@ static herr_t H5R__decode_region(const unsigned char *buf, size_t *nbytes, H5S_t **space_ptr) { const uint8_t *p = (const uint8_t *)buf; + const uint8_t *p_end = p + *nbytes - 1; size_t buf_size = 0; unsigned rank; H5S_t *space; @@ -1209,7 +1210,11 @@ H5R__decode_region(const unsigned char *buf, size_t *nbytes, H5S_t **space_ptr) HGOTO_ERROR(H5E_REFERENCE, H5E_CANTDECODE, FAIL, "Buffer size is too small") if (H5S_set_extent_simple(space, rank, NULL, NULL) < 0) HGOTO_ERROR(H5E_REFERENCE, H5E_CANTSET, FAIL, "can't set extent rank for selection") - if (H5S_SELECT_DESERIALIZE(&space, &p) < 0) + + if (p - 1 > p_end) + HGOTO_ERROR(H5E_REFERENCE, H5E_CANTDECODE, FAIL, "Ran off end of buffer while decoding") + + if (H5S_SELECT_DESERIALIZE(&space, &p, (size_t)(p_end - p + 1)) < 0) HGOTO_ERROR(H5E_REFERENCE, H5E_CANTDECODE, FAIL, "can't deserialize selection") *nbytes = buf_size; @@ -1472,7 +1477,8 @@ H5R__decode_token_region_compat(H5F_t *f, const unsigned char *buf, size_t *nbyt unsigned char *data = NULL; H5O_token_t token = {0}; size_t data_size; - const uint8_t *p; + const uint8_t *p = NULL; + const uint8_t *p_end = NULL; H5S_t *space = NULL; herr_t ret_value = SUCCEED; @@ -1488,7 +1494,8 @@ H5R__decode_token_region_compat(H5F_t *f, const unsigned char *buf, size_t *nbyt HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier") /* Get object address */ - p = (const uint8_t *)data; + p = (const uint8_t *)data; + p_end = p + data_size - 1; H5MM_memcpy(&token, p, token_size); p += token_size; @@ -1508,7 +1515,11 @@ H5R__decode_token_region_compat(H5F_t *f, const unsigned char *buf, size_t *nbyt HGOTO_ERROR(H5E_REFERENCE, H5E_NOTFOUND, FAIL, "not found") /* Unserialize the selection */ - if (H5S_SELECT_DESERIALIZE(&space, &p) < 0) + + if (p - 1 >= p_end) + HGOTO_ERROR(H5E_REFERENCE, H5E_CANTDECODE, FAIL, "Ran off end of buffer while deserializing") + + if (H5S_SELECT_DESERIALIZE(&space, &p, (size_t)(p_end - p + 1)) < 0) HGOTO_ERROR(H5E_REFERENCE, H5E_CANTDECODE, FAIL, "can't deserialize selection") *space_ptr = space; diff --git a/src/H5S.c b/src/H5S.c index 22de41f97fa..c938a714c9b 100644 --- a/src/H5S.c +++ b/src/H5S.c @@ -1655,9 +1655,10 @@ H5S_decode(const unsigned char **p) if (H5S_select_all(ds, FALSE) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSET, NULL, "unable to set all selection") - /* Decode the select part of dataspace. I believe this part always exists. */ + /* Decode the select part of dataspace. + * Because size of buffer is unknown, assume arbitrarily large buffer to allow decoding. */ *p = pp; - if (H5S_SELECT_DESERIALIZE(&ds, p) < 0) + if (H5S_SELECT_DESERIALIZE(&ds, p, SIZE_MAX) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTDECODE, NULL, "can't decode space selection") /* Set return value */ diff --git a/src/H5Sall.c b/src/H5Sall.c index 20c9a20a332..7f5633facff 100644 --- a/src/H5Sall.c +++ b/src/H5Sall.c @@ -50,7 +50,7 @@ static herr_t H5S__all_release(H5S_t *space); static htri_t H5S__all_is_valid(const H5S_t *space); static hssize_t H5S__all_serial_size(H5S_t *space); static herr_t H5S__all_serialize(H5S_t *space, uint8_t **p); -static herr_t H5S__all_deserialize(H5S_t **space, const uint8_t **p); +static herr_t H5S__all_deserialize(H5S_t **space, const uint8_t **p, const size_t p_size, hbool_t skip); static herr_t H5S__all_bounds(const H5S_t *space, hsize_t *start, hsize_t *end); static herr_t H5S__all_offset(const H5S_t *space, hsize_t *off); static int H5S__all_unlim_dim(const H5S_t *space); @@ -637,13 +637,13 @@ H5S__all_serialize(H5S_t *space, uint8_t **p) REVISION LOG --------------------------------------------------------------------------*/ static herr_t -H5S__all_deserialize(H5S_t **space, const uint8_t **p) +H5S__all_deserialize(H5S_t **space, const uint8_t **p, const size_t p_size, hbool_t skip) { - uint32_t version; /* Version number */ - H5S_t *tmp_space = NULL; /* Pointer to actual dataspace to use, - either *space or a newly allocated one */ - herr_t ret_value = SUCCEED; /* return value */ - + uint32_t version; /* Version number */ + H5S_t *tmp_space = NULL; /* Pointer to actual dataspace to use, + either *space or a newly allocated one */ + herr_t ret_value = SUCCEED; /* return value */ + const uint8_t *p_end = *p + p_size - 1; /* Pointer to last valid byte in buffer */ FUNC_ENTER_PACKAGE HDassert(p); @@ -663,12 +663,16 @@ H5S__all_deserialize(H5S_t **space, const uint8_t **p) tmp_space = *space; /* Decode version */ + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *p, sizeof(uint32_t), p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, "buffer overflow while decoding selection version") UINT32DECODE(*p, version); if (version < H5S_ALL_VERSION_1 || version > H5S_ALL_VERSION_LATEST) HGOTO_ERROR(H5E_DATASPACE, H5E_BADVALUE, FAIL, "bad version number for all selection") /* Skip over the remainder of the header */ + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *p, 8, p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, "buffer overflow while decoding header") *p += 8; /* Change to "all" selection */ diff --git a/src/H5Shyper.c b/src/H5Shyper.c index e749ee91d9d..9f567d32b2b 100644 --- a/src/H5Shyper.c +++ b/src/H5Shyper.c @@ -175,7 +175,7 @@ static htri_t H5S__hyper_is_valid(const H5S_t *space); static hsize_t H5S__hyper_span_nblocks(H5S_hyper_span_info_t *spans); static hssize_t H5S__hyper_serial_size(H5S_t *space); static herr_t H5S__hyper_serialize(H5S_t *space, uint8_t **p); -static herr_t H5S__hyper_deserialize(H5S_t **space, const uint8_t **p); +static herr_t H5S__hyper_deserialize(H5S_t **space, const uint8_t **p, const size_t p_size, hbool_t skip); static herr_t H5S__hyper_bounds(const H5S_t *space, hsize_t *start, hsize_t *end); static herr_t H5S__hyper_offset(const H5S_t *space, hsize_t *offset); static int H5S__hyper_unlim_dim(const H5S_t *space); @@ -4239,21 +4239,21 @@ H5S__hyper_serialize(H5S_t *space, uint8_t **p) REVISION LOG --------------------------------------------------------------------------*/ static herr_t -H5S__hyper_deserialize(H5S_t **space, const uint8_t **p) +H5S__hyper_deserialize(H5S_t **space, const uint8_t **p, const size_t p_size, hbool_t skip) { - H5S_t *tmp_space = NULL; /* Pointer to actual dataspace to use, - either *space or a newly allocated one */ - hsize_t dims[H5S_MAX_RANK]; /* Dimension sizes */ - hsize_t start[H5S_MAX_RANK]; /* hyperslab start information */ - hsize_t block[H5S_MAX_RANK]; /* hyperslab block information */ - uint32_t version; /* Version number */ - uint8_t flags = 0; /* Flags */ - uint8_t enc_size = 0; /* Encoded size of selection info */ - unsigned rank; /* rank of points */ - const uint8_t *pp; /* Local pointer for decoding */ - unsigned u; /* Local counting variable */ - herr_t ret_value = FAIL; /* return value */ - + H5S_t *tmp_space = NULL; /* Pointer to actual dataspace to use, + either *space or a newly allocated one */ + hsize_t dims[H5S_MAX_RANK]; /* Dimension sizes */ + hsize_t start[H5S_MAX_RANK]; /* hyperslab start information */ + hsize_t block[H5S_MAX_RANK]; /* hyperslab block information */ + uint32_t version; /* Version number */ + uint8_t flags = 0; /* Flags */ + uint8_t enc_size = 0; /* Encoded size of selection info */ + unsigned rank; /* rank of points */ + const uint8_t *pp; /* Local pointer for decoding */ + unsigned u; /* Local counting variable */ + herr_t ret_value = FAIL; /* return value */ + const uint8_t *p_end = *p + p_size - 1; /* Pointer to last valid byte in buffer */ FUNC_ENTER_PACKAGE /* Check args */ @@ -4274,6 +4274,8 @@ H5S__hyper_deserialize(H5S_t **space, const uint8_t **p) tmp_space = *space; /* Decode version */ + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, pp, sizeof(uint32_t), p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, "buffer overflow while decoding selection version") UINT32DECODE(pp, version); if (version < H5S_HYPER_VERSION_1 || version > H5S_HYPER_VERSION_LATEST) @@ -4281,13 +4283,22 @@ H5S__hyper_deserialize(H5S_t **space, const uint8_t **p) if (version >= (uint32_t)H5S_HYPER_VERSION_2) { /* Decode flags */ + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, pp, 1, p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, "buffer overflow while decoding selection flags") flags = *(pp)++; - if (version >= (uint32_t)H5S_HYPER_VERSION_3) + if (version >= (uint32_t)H5S_HYPER_VERSION_3) { /* decode size of offset info */ + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, pp, 1, p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, + "buffer overflow while decoding selection encoding size") enc_size = *(pp)++; + } else { /* Skip over the remainder of the header */ + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, pp, 4, p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, + "buffer overflow while decoding selection header") pp += 4; enc_size = H5S_SELECT_INFO_ENC_SIZE_8; } /* end else */ @@ -4298,6 +4309,8 @@ H5S__hyper_deserialize(H5S_t **space, const uint8_t **p) } else { /* Skip over the remainder of the header */ + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, pp, 8, p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, "buffer overflow while decoding selection header") pp += 8; enc_size = H5S_SELECT_INFO_ENC_SIZE_4; } /* end else */ @@ -4307,6 +4320,8 @@ H5S__hyper_deserialize(H5S_t **space, const uint8_t **p) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTLOAD, FAIL, "unknown size of point/offset info for selection") /* Decode the rank of the point selection */ + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, pp, sizeof(uint32_t), p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, "buffer overflow while decoding selection rank") UINT32DECODE(pp, rank); if (!*space) { @@ -4333,6 +4348,10 @@ H5S__hyper_deserialize(H5S_t **space, const uint8_t **p) switch (enc_size) { case H5S_SELECT_INFO_ENC_SIZE_2: for (u = 0; u < tmp_space->extent.rank; u++) { + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, pp, 4 * sizeof(uint16_t), p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, + "buffer overflow while decoding selection ranks") + UINT16DECODE(pp, start[u]); UINT16DECODE(pp, stride[u]); @@ -4348,6 +4367,10 @@ H5S__hyper_deserialize(H5S_t **space, const uint8_t **p) case H5S_SELECT_INFO_ENC_SIZE_4: for (u = 0; u < tmp_space->extent.rank; u++) { + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, pp, 4 * sizeof(uint32_t), p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, + "buffer overflow while decoding selection ranks") + UINT32DECODE(pp, start[u]); UINT32DECODE(pp, stride[u]); @@ -4363,6 +4386,10 @@ H5S__hyper_deserialize(H5S_t **space, const uint8_t **p) case H5S_SELECT_INFO_ENC_SIZE_8: for (u = 0; u < tmp_space->extent.rank; u++) { + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, pp, 4 * sizeof(uint64_t), p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, + "buffer overflow while decoding selection ranks") + UINT64DECODE(pp, start[u]); UINT64DECODE(pp, stride[u]); @@ -4398,14 +4425,23 @@ H5S__hyper_deserialize(H5S_t **space, const uint8_t **p) /* Decode the number of blocks */ switch (enc_size) { case H5S_SELECT_INFO_ENC_SIZE_2: + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, pp, sizeof(uint16_t), p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, + "buffer overflow while decoding number of selection blocks") UINT16DECODE(pp, num_elem); break; case H5S_SELECT_INFO_ENC_SIZE_4: + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, pp, sizeof(uint32_t), p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, + "buffer overflow while decoding number of selection blocks") UINT32DECODE(pp, num_elem); break; case H5S_SELECT_INFO_ENC_SIZE_8: + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, pp, sizeof(uint64_t), p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, + "buffer overflow while decoding number of selection blocks") UINT64DECODE(pp, num_elem); break; @@ -4422,6 +4458,10 @@ H5S__hyper_deserialize(H5S_t **space, const uint8_t **p) /* Decode the starting and ending points */ switch (enc_size) { case H5S_SELECT_INFO_ENC_SIZE_2: + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, pp, rank * 2 * sizeof(uint16_t), p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, + "buffer overflow while decoding selection coordinates") + for (tstart = start, v = 0; v < rank; v++, tstart++) UINT16DECODE(pp, *tstart); for (tend = end, v = 0; v < rank; v++, tend++) @@ -4429,6 +4469,10 @@ H5S__hyper_deserialize(H5S_t **space, const uint8_t **p) break; case H5S_SELECT_INFO_ENC_SIZE_4: + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, pp, rank * 2 * sizeof(uint32_t), p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, + "buffer overflow while decoding selection coordinates") + for (tstart = start, v = 0; v < rank; v++, tstart++) UINT32DECODE(pp, *tstart); for (tend = end, v = 0; v < rank; v++, tend++) @@ -4436,6 +4480,10 @@ H5S__hyper_deserialize(H5S_t **space, const uint8_t **p) break; case H5S_SELECT_INFO_ENC_SIZE_8: + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, pp, rank * 2 * sizeof(uint64_t), p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, + "buffer overflow while decoding selection coordinates") + for (tstart = start, v = 0; v < rank; v++, tstart++) UINT64DECODE(pp, *tstart); for (tend = end, v = 0; v < rank; v++, tend++) diff --git a/src/H5Snone.c b/src/H5Snone.c index b32ac124e1a..c6e862c434f 100644 --- a/src/H5Snone.c +++ b/src/H5Snone.c @@ -50,7 +50,7 @@ static herr_t H5S__none_release(H5S_t *space); static htri_t H5S__none_is_valid(const H5S_t *space); static hssize_t H5S__none_serial_size(H5S_t *space); static herr_t H5S__none_serialize(H5S_t *space, uint8_t **p); -static herr_t H5S__none_deserialize(H5S_t **space, const uint8_t **p); +static herr_t H5S__none_deserialize(H5S_t **space, const uint8_t **p, const size_t p_size, hbool_t skip); static herr_t H5S__none_bounds(const H5S_t *space, hsize_t *start, hsize_t *end); static herr_t H5S__none_offset(const H5S_t *space, hsize_t *off); static int H5S__none_unlim_dim(const H5S_t *space); @@ -593,12 +593,13 @@ H5S__none_serialize(H5S_t *space, uint8_t **p) REVISION LOG --------------------------------------------------------------------------*/ static herr_t -H5S__none_deserialize(H5S_t **space, const uint8_t **p) +H5S__none_deserialize(H5S_t **space, const uint8_t **p, const size_t p_size, hbool_t skip) { - H5S_t *tmp_space = NULL; /* Pointer to actual dataspace to use, - either *space or a newly allocated one */ - uint32_t version; /* Version number */ - herr_t ret_value = SUCCEED; /* return value */ + H5S_t *tmp_space = NULL; /* Pointer to actual dataspace to use, + either *space or a newly allocated one */ + uint32_t version; /* Version number */ + herr_t ret_value = SUCCEED; /* return value */ + const uint8_t *p_end = *p + p_size - 1; /* Pointer to last valid byte in buffer */ FUNC_ENTER_PACKAGE @@ -618,12 +619,16 @@ H5S__none_deserialize(H5S_t **space, const uint8_t **p) tmp_space = *space; /* Decode version */ + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *p, sizeof(uint32_t), p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, "buffer overflow while decoding selection version") UINT32DECODE(*p, version); if (version < H5S_NONE_VERSION_1 || version > H5S_NONE_VERSION_LATEST) HGOTO_ERROR(H5E_DATASPACE, H5E_BADVALUE, FAIL, "bad version number for none selection") /* Skip over the remainder of the header */ + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *p, 8, p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, "buffer overflow while decoding selection header") *p += 8; /* Change to "none" selection */ diff --git a/src/H5Spkg.h b/src/H5Spkg.h index 1163484a73d..4367a4d3cdd 100644 --- a/src/H5Spkg.h +++ b/src/H5Spkg.h @@ -242,7 +242,8 @@ typedef hssize_t (*H5S_sel_serial_size_func_t)(H5S_t *space); /* Method to store current selection in "serialized" form (a byte sequence suitable for storing on disk) */ typedef herr_t (*H5S_sel_serialize_func_t)(H5S_t *space, uint8_t **p); /* Method to create selection from "serialized" form (a byte sequence suitable for storing on disk) */ -typedef herr_t (*H5S_sel_deserialize_func_t)(H5S_t **space, const uint8_t **p); +typedef herr_t (*H5S_sel_deserialize_func_t)(H5S_t **space, const uint8_t **p, const size_t p_size, + hbool_t skip); /* Method to determine smallest n-D bounding box containing the current selection */ typedef herr_t (*H5S_sel_bounds_func_t)(const H5S_t *space, hsize_t *start, hsize_t *end); /* Method to determine linear offset of initial element in selection within dataspace */ diff --git a/src/H5Spoint.c b/src/H5Spoint.c index 9cac3c4fdd0..b393c8d5199 100644 --- a/src/H5Spoint.c +++ b/src/H5Spoint.c @@ -60,7 +60,7 @@ static herr_t H5S__point_release(H5S_t *space); static htri_t H5S__point_is_valid(const H5S_t *space); static hssize_t H5S__point_serial_size(H5S_t *space); static herr_t H5S__point_serialize(H5S_t *space, uint8_t **p); -static herr_t H5S__point_deserialize(H5S_t **space, const uint8_t **p); +static herr_t H5S__point_deserialize(H5S_t **space, const uint8_t **p, const size_t p_size, hbool_t skip); static herr_t H5S__point_bounds(const H5S_t *space, hsize_t *start, hsize_t *end); static herr_t H5S__point_offset(const H5S_t *space, hsize_t *off); static int H5S__point_unlim_dim(const H5S_t *space); @@ -1352,20 +1352,20 @@ H5S__point_serialize(H5S_t *space, uint8_t **p) REVISION LOG --------------------------------------------------------------------------*/ static herr_t -H5S__point_deserialize(H5S_t **space, const uint8_t **p) +H5S__point_deserialize(H5S_t **space, const uint8_t **p, const size_t p_size, hbool_t skip) { - H5S_t *tmp_space = NULL; /* Pointer to actual dataspace to use, - either *space or a newly allocated one */ - hsize_t dims[H5S_MAX_RANK]; /* Dimension sizes */ - uint32_t version; /* Version number */ - uint8_t enc_size = 0; /* Encoded size of selection info */ - hsize_t *coord = NULL, *tcoord; /* Pointer to array of elements */ - const uint8_t *pp; /* Local pointer for decoding */ - uint64_t num_elem = 0; /* Number of elements in selection */ - unsigned rank; /* Rank of points */ - unsigned i, j; /* local counting variables */ - herr_t ret_value = SUCCEED; /* Return value */ - + H5S_t *tmp_space = NULL; /* Pointer to actual dataspace to use, + either *space or a newly allocated one */ + hsize_t dims[H5S_MAX_RANK]; /* Dimension sizes */ + uint32_t version; /* Version number */ + uint8_t enc_size = 0; /* Encoded size of selection info */ + hsize_t *coord = NULL, *tcoord; /* Pointer to array of elements */ + const uint8_t *pp; /* Local pointer for decoding */ + uint64_t num_elem = 0; /* Number of elements in selection */ + unsigned rank; /* Rank of points */ + unsigned i, j; /* local counting variables */ + herr_t ret_value = SUCCEED; /* Return value */ + const uint8_t *p_end = *p + p_size - 1; /* Pointer to last valid byte in buffer */ FUNC_ENTER_PACKAGE /* Check args */ @@ -1386,16 +1386,23 @@ H5S__point_deserialize(H5S_t **space, const uint8_t **p) tmp_space = *space; /* Decode version */ + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, pp, sizeof(uint32_t), p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, "buffer overflow while decoding selection version") UINT32DECODE(pp, version); if (version < H5S_POINT_VERSION_1 || version > H5S_POINT_VERSION_LATEST) HGOTO_ERROR(H5E_DATASPACE, H5E_BADVALUE, FAIL, "bad version number for point selection") - if (version >= (uint32_t)H5S_POINT_VERSION_2) + if (version >= (uint32_t)H5S_POINT_VERSION_2) { /* Decode size of point info */ + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, pp, 1, p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, "buffer overflow while decoding point info") enc_size = *(pp)++; + } else { /* Skip over the remainder of the header */ + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, pp, 8, p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, "buffer overflow while decoding selection headers") pp += 8; enc_size = H5S_SELECT_INFO_ENC_SIZE_4; } @@ -1405,6 +1412,8 @@ H5S__point_deserialize(H5S_t **space, const uint8_t **p) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTLOAD, FAIL, "unknown size of point/offset info for selection") /* Decode the rank of the point selection */ + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, pp, sizeof(uint32_t), p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, "buffer overflow while decoding selection rank") UINT32DECODE(pp, rank); if (!*space) { @@ -1422,12 +1431,24 @@ H5S__point_deserialize(H5S_t **space, const uint8_t **p) /* decode the number of points */ switch (enc_size) { case H5S_SELECT_INFO_ENC_SIZE_2: + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, pp, sizeof(uint16_t), p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, + "buffer overflow while decoding number of points") + UINT16DECODE(pp, num_elem); break; case H5S_SELECT_INFO_ENC_SIZE_4: + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, pp, sizeof(uint32_t), p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, + "buffer overflow while decoding number of points") + UINT32DECODE(pp, num_elem); break; case H5S_SELECT_INFO_ENC_SIZE_8: + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, pp, sizeof(uint64_t), p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, + "buffer overflow while decoding number of points") + UINT64DECODE(pp, num_elem); break; default: @@ -1439,6 +1460,29 @@ H5S__point_deserialize(H5S_t **space, const uint8_t **p) if (NULL == (coord = (hsize_t *)H5MM_malloc(num_elem * rank * sizeof(hsize_t)))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate coordinate information") + /* Determine necessary size of buffer for coordinates */ + size_t enc_type_size = 0; + + switch (enc_size) { + case H5S_SELECT_INFO_ENC_SIZE_2: + enc_type_size = sizeof(uint16_t); + break; + case H5S_SELECT_INFO_ENC_SIZE_4: + enc_type_size = sizeof(uint32_t); + break; + case H5S_SELECT_INFO_ENC_SIZE_8: + enc_type_size = sizeof(uint64_t); + break; + default: + HGOTO_ERROR(H5E_DATASPACE, H5E_UNSUPPORTED, FAIL, "unknown point info size") + break; + } + + size_t coordinate_buffer_requirement = num_elem * rank * enc_type_size; + + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, pp, coordinate_buffer_requirement, p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, "buffer overflow while decoding selection coordinates") + /* Retrieve the coordinates from the buffer */ for (tcoord = coord, i = 0; i < num_elem; i++) for (j = 0; j < (unsigned)rank; j++, tcoord++) @@ -1446,11 +1490,9 @@ H5S__point_deserialize(H5S_t **space, const uint8_t **p) case H5S_SELECT_INFO_ENC_SIZE_2: UINT16DECODE(pp, *tcoord); break; - case H5S_SELECT_INFO_ENC_SIZE_4: UINT32DECODE(pp, *tcoord); break; - case H5S_SELECT_INFO_ENC_SIZE_8: UINT64DECODE(pp, *tcoord); break; diff --git a/src/H5Sprivate.h b/src/H5Sprivate.h index 4303eee3995..ac46d9e362f 100644 --- a/src/H5Sprivate.h +++ b/src/H5Sprivate.h @@ -190,7 +190,7 @@ typedef struct H5S_sel_iter_op_t { #define H5S_SELECT_SHAPE_SAME(S1, S2) (H5S_select_shape_same(S1, S2)) #define H5S_SELECT_INTERSECT_BLOCK(S, START, END) (H5S_select_intersect_block(S, START, END)) #define H5S_SELECT_RELEASE(S) (H5S_select_release(S)) -#define H5S_SELECT_DESERIALIZE(S, BUF) (H5S_select_deserialize(S, BUF)) +#define H5S_SELECT_DESERIALIZE(S, BUF, BUF_SIZE) (H5S_select_deserialize(S, BUF, BUF_SIZE)) /* Forward declaration of structs used below */ struct H5O_t; @@ -229,7 +229,7 @@ H5_DLL htri_t H5S_extent_equal(const H5S_t *ds1, const H5S_t *ds2); H5_DLL herr_t H5S_extent_copy(H5S_t *dst, const H5S_t *src); /* Operations on selections */ -H5_DLL herr_t H5S_select_deserialize(H5S_t **space, const uint8_t **p); +H5_DLL herr_t H5S_select_deserialize(H5S_t **space, const uint8_t **p, const size_t p_size); H5_DLL H5S_sel_type H5S_get_select_type(const H5S_t *space); H5_DLL herr_t H5S_select_iterate(void *buf, const H5T_t *type, H5S_t *space, const H5S_sel_iter_op_t *op, void *op_data); diff --git a/src/H5Sselect.c b/src/H5Sselect.c index 02889f71707..28c30ea830e 100644 --- a/src/H5Sselect.c +++ b/src/H5Sselect.c @@ -521,11 +521,12 @@ H5S_select_valid(const H5S_t *space) REVISION LOG --------------------------------------------------------------------------*/ herr_t -H5S_select_deserialize(H5S_t **space, const uint8_t **p) +H5S_select_deserialize(H5S_t **space, const uint8_t **p, const size_t p_size) { - uint32_t sel_type; /* Pointer to the selection type */ - herr_t ret_value = FAIL; /* Return value */ - + uint32_t sel_type; /* Pointer to the selection type */ + herr_t ret_value = FAIL; /* Return value */ + const uint8_t *p_end = *p + p_size - 1; /* Pointer to last valid byte in buffer */ + hbool_t skip = (p_size == SIZE_MAX ? TRUE : FALSE); /* If p_size is unknown, skip buffer checks */ FUNC_ENTER_NOAPI(FAIL) HDassert(space); @@ -533,24 +534,26 @@ H5S_select_deserialize(H5S_t **space, const uint8_t **p) /* Selection-type specific coding is moved to the callbacks. */ /* Decode selection type */ + if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *p, sizeof(uint32_t), p_end)) + HGOTO_ERROR(H5E_DATASPACE, H5E_OVERFLOW, FAIL, "buffer overflow while decoding selection type") UINT32DECODE(*p, sel_type); /* Make routine for selection type */ switch (sel_type) { case H5S_SEL_POINTS: /* Sequence of points selected */ - ret_value = (*H5S_sel_point->deserialize)(space, p); + ret_value = (*H5S_sel_point->deserialize)(space, p, p_size - sizeof(uint32_t), skip); break; case H5S_SEL_HYPERSLABS: /* Hyperslab selection defined */ - ret_value = (*H5S_sel_hyper->deserialize)(space, p); + ret_value = (*H5S_sel_hyper->deserialize)(space, p, p_size - sizeof(uint32_t), skip); break; case H5S_SEL_ALL: /* Entire extent selected */ - ret_value = (*H5S_sel_all->deserialize)(space, p); + ret_value = (*H5S_sel_all->deserialize)(space, p, p_size - sizeof(uint32_t), skip); break; case H5S_SEL_NONE: /* Nothing selected */ - ret_value = (*H5S_sel_none->deserialize)(space, p); + ret_value = (*H5S_sel_none->deserialize)(space, p, p_size - sizeof(uint32_t), skip); break; default: diff --git a/src/H5private.h b/src/H5private.h index eac2cbaae41..91a47806aa1 100644 --- a/src/H5private.h +++ b/src/H5private.h @@ -327,6 +327,15 @@ */ #define H5_IS_BUFFER_OVERFLOW(ptr, size, buffer_end) (((ptr) + (size)-1) > (buffer_end)) +/* Variant of H5_IS_BUFFER_OVERFLOW, used with functions such as H5Tdecode() + * that don't take a size parameter, where we need to skip the bounds checks. + * + * This is a separate macro since we don't want to inflict that behavior on + * the entire library. + */ +#define H5_IS_KNOWN_BUFFER_OVERFLOW(skip, ptr, size, buffer_end) \ + (skip ? FALSE : ((ptr) + (size)-1) > (buffer_end)) + /* * HDF Boolean type. */ From 27375a45494e2de554844dad2ac45dceeef46458 Mon Sep 17 00:00:00 2001 From: mattjala <124107509+mattjala@users.noreply.github.com> Date: Sun, 14 May 2023 22:09:58 -0500 Subject: [PATCH 220/231] Warn about changing collections during iteration (#2950) Resolves #92 --- src/H5Apublic.h | 9 +++++++++ src/H5Dpublic.h | 2 ++ src/H5Gpublic.h | 5 ++++- src/H5Ipublic.h | 3 +++ src/H5Lpublic.h | 25 ++++++++++++------------- src/H5Mpublic.h | 6 ++++++ src/H5Ppublic.h | 2 +- 7 files changed, 37 insertions(+), 15 deletions(-) diff --git a/src/H5Apublic.h b/src/H5Apublic.h index de6851d5a02..6e7ca7ed9ef 100644 --- a/src/H5Apublic.h +++ b/src/H5Apublic.h @@ -664,6 +664,9 @@ H5_DLL hid_t H5Aget_type(hid_t attr_id); * * \note This function is also available through the H5Aiterate() macro. * + * \warning Adding or removing attributes to the object during iteration + * will lead to undefined behavior. + * * \since 1.8.0 * */ @@ -721,6 +724,9 @@ H5_DLL herr_t H5Aiterate2(hid_t loc_id, H5_index_t idx_type, H5_iter_order_t ord * information regarding the properties of links required to access * the object, \p obj_name. * + * \warning Adding or removing attributes to the object during iteration + * will lead to undefined behavior. + * * \since 1.8.0 * */ @@ -1188,6 +1194,9 @@ H5_DLL int H5Aget_num_attrs(hid_t loc_id); * \p op, is returned in \p idx. If \p idx is the null pointer, * then all attributes are processed. * + * \warning Adding or removing attributes to the object during iteration + * will lead to undefined behavior. + * * \version 1.8.0 The function \p H5Aiterate was renamed to H5Aiterate1() * and deprecated in this release. * \since 1.0.0 diff --git a/src/H5Dpublic.h b/src/H5Dpublic.h index 0dbd6493cc8..4ede564c992 100644 --- a/src/H5Dpublic.h +++ b/src/H5Dpublic.h @@ -1286,6 +1286,8 @@ H5_DLL herr_t H5Dread_chunk(hid_t dset_id, hid_t dxpl_id, const hsize_t *offset, * be restarted at the point of exit; a second H5Diterate() * call will always restart at the beginning. * + * \warning Modifying the selection of \p space_id during iteration + * will lead to undefined behavior. * * \since 1.10.2 * diff --git a/src/H5Gpublic.h b/src/H5Gpublic.h index 2609fb061c9..28cfcc45737 100644 --- a/src/H5Gpublic.h +++ b/src/H5Gpublic.h @@ -1002,7 +1002,10 @@ H5_DLL int H5Gget_comment(hid_t loc_id, const char *name, size_t bufsize, char * * examine the members of \c subgroup_a. When recursive iteration is * required, the application must handle the recursion, explicitly * calling H5Giterate() on discovered subgroups. - + * + * \warning Adding or removing members to the group during iteration + * will lead to undefined behavior. + * * \version 1.8.0 Function deprecated in this release. * */ diff --git a/src/H5Ipublic.h b/src/H5Ipublic.h index 5f8fc280280..6f250713339 100644 --- a/src/H5Ipublic.h +++ b/src/H5Ipublic.h @@ -588,6 +588,9 @@ H5_DLL void *H5Isearch(H5I_type_t type, H5I_search_func_t func, void *key); * to continue, as long as there are other identifiers remaining in * type. * + * \warning Adding or removing members of the identifier type during iteration + * will lead to undefined behavior. + * * \since 1.12.0 * */ diff --git a/src/H5Lpublic.h b/src/H5Lpublic.h index fb46ad8208f..001614810fb 100644 --- a/src/H5Lpublic.h +++ b/src/H5Lpublic.h @@ -980,6 +980,12 @@ H5_DLL herr_t H5Literate_async(hid_t group_id, H5_index_t idx_type, H5_iter_orde * passed in by the application with a starting point and returned by * the library with the point at which the iteration stopped. * + * \warning H5Literate_by_name2() assumes that the membership of the group being + * iterated over remains unchanged through the iteration; if any of the + * links in the group change during the iteration, the function’s + * behavior is undefined. Note, however, that objects pointed to by the + * links can be modified. + * * \note H5Literate_by_name2() is not recursive. In particular, if a member of * \p group_name is found to be a group, call it \c subgroup_a, * H5Literate_by_name2() does not examine the members of \c subgroup_a. @@ -987,12 +993,6 @@ H5_DLL herr_t H5Literate_async(hid_t group_id, H5_index_t idx_type, H5_iter_orde * recursion, explicitly calling H5Literate_by_name2() on discovered * subgroups. * - * \note H5Literate_by_name2() assumes that the membership of the group being - * iterated over remains unchanged through the iteration; if any of the - * links in the group change during the iteration, the function’s - * behavior is undefined. Note, however, that objects pointed to by the - * links can be modified. - * * \note H5Literate_by_name2() is the same as H5Literate2(), except that * H5Literate2() always proceeds in alphanumeric order. * @@ -1670,7 +1670,6 @@ H5_DLL herr_t H5Lget_info_by_idx1(hid_t loc_id, const char *group_name, H5_index * This does not limit the ability to change link destinations * while iterating, but caution is advised. * - * * \version 1.12.0 Function was deprecated in this release. * \since 1.8.0 * @@ -1727,6 +1726,12 @@ H5_DLL herr_t H5Literate1(hid_t grp_id, H5_index_t idx_type, H5_iter_order_t ord * passed in by the application with a starting point and returned by * the library with the point at which the iteration stopped. * + * \warning H5Literate_by_name1() assumes that the membership of the group being + * iterated over remains unchanged through the iteration; if any of the + * links in the group change during the iteration, the function’s + * behavior is undefined. Note, however, that objects pointed to by the + * links can be modified. + * * \note H5Literate_by_name1() is not recursive. In particular, if a member of * \p group_name is found to be a group, call it \c subgroup_a, * H5Literate_by_name1() does not examine the members of \c subgroup_a. @@ -1734,12 +1739,6 @@ H5_DLL herr_t H5Literate1(hid_t grp_id, H5_index_t idx_type, H5_iter_order_t ord * recursion, explicitly calling H5Literate_by_name1() on discovered * subgroups. * - * \note H5Literate_by_name1() assumes that the membership of the group being - * iterated over remains unchanged through the iteration; if any of the - * links in the group change during the iteration, the function’s - * behavior is undefined. Note, however, that objects pointed to by the - * links can be modified. - * * \note H5Literate_by_name1() is the same as H5Giterate(), except that * H5Giterate() always proceeds in lexicographic order. * diff --git a/src/H5Mpublic.h b/src/H5Mpublic.h index af6aa49d4ee..48625a55382 100644 --- a/src/H5Mpublic.h +++ b/src/H5Mpublic.h @@ -542,6 +542,9 @@ H5_DLL herr_t H5Mexists(hid_t map_id, hid_t key_mem_type_id, const void *key, hb * * Any further options can be specified through the property list \p dxpl_id. * + * \warning Adding or removing key-value pairs to the map during iteration + * will lead to undefined behavior. + * * \since 1.12.0 * */ @@ -583,6 +586,9 @@ H5_DLL herr_t H5Miterate(hid_t map_id, hsize_t *idx, hid_t key_mem_type_id, H5M_ * * Any further options can be specified through the property list \p dxpl_id. * + * \warning Adding or removing key-value pairs to the map during iteration + * will lead to undefined behavior. + * * \since 1.12.0 * */ diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h index 2817551c46e..53a455c78ff 100644 --- a/src/H5Ppublic.h +++ b/src/H5Ppublic.h @@ -1459,7 +1459,7 @@ H5_DLL htri_t H5Pisa_class(hid_t plist_id, hid_t pclass_id); * and the pointer to the operator data passed in to H5Piterate(), * \p iter_data. * - * H5Piterate() assumes that the properties in the object + * \warning H5Piterate() assumes that the properties in the object * identified by \p id remain unchanged through the iteration. * If the membership changes during the iteration, the function's * behavior is undefined. From 0c396aae20f058defe341791e785dee54781fda4 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Sun, 14 May 2023 22:12:28 -0500 Subject: [PATCH 221/231] Fix warning in cache_common.c (#2952) --- test/cache_common.c | 142 ++++++++++++++++++++++++-------------------- 1 file changed, 77 insertions(+), 65 deletions(-) diff --git a/test/cache_common.c b/test/cache_common.c index d4bab3b7fbd..e647abf0896 100644 --- a/test/cache_common.c +++ b/test/cache_common.c @@ -24,6 +24,7 @@ hbool_t pass = TRUE; /* set to false on error */ const char *failure_mssg = NULL; +static char tmp_msg_buf[256]; static test_entry_t *pico_entries = NULL, *orig_pico_entries = NULL; static test_entry_t *nano_entries = NULL, *orig_nano_entries = NULL; @@ -2316,8 +2317,7 @@ verify_clean(void) void verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_entry_status expected[]) { - char msg[256]; - int i; + int i; i = 0; while ((pass) && (i < num_entries)) { @@ -2329,16 +2329,16 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_ if ((!expected[i].in_cache) && ((expected[i].is_protected) || (expected[i].is_pinned))) { pass = FALSE; - HDsnprintf(msg, sizeof(msg), "%d: Contradictory data in expected[%d].\n", tag, i); - failure_mssg = msg; + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), "%d: Contradictory data in expected[%d].\n", tag, i); + failure_mssg = tmp_msg_buf; } if ((!expected[i].in_cache) && (expected[i].is_dirty) && (!entry_ptr->expunged)) { pass = FALSE; - HDsnprintf(msg, sizeof(msg), "%d: expected[%d] specs non-expunged, dirty, non-resident.\n", tag, - i); - failure_mssg = msg; + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), + "%d: expected[%d] specs non-expunged, dirty, non-resident.\n", tag, i); + failure_mssg = tmp_msg_buf; } if (pass) { @@ -2348,10 +2348,11 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_ if (in_cache != expected[i].in_cache) { pass = FALSE; - HDsnprintf(msg, sizeof(msg), "%d entry (%d, %d) in cache actual/expected = %d/%d.\n", tag, + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), + "%d entry (%d, %d) in cache actual/expected = %d/%d.\n", tag, (int)expected[i].entry_type, (int)expected[i].entry_index, (int)in_cache, (int)expected[i].in_cache); - failure_mssg = msg; + failure_mssg = tmp_msg_buf; } } @@ -2360,10 +2361,11 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_ if (entry_ptr->size != expected[i].size) { pass = FALSE; - HDsnprintf(msg, sizeof(msg), "%d entry (%d, %d) size actual/expected = %ld/%ld.\n", tag, + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), + "%d entry (%d, %d) size actual/expected = %ld/%ld.\n", tag, (int)expected[i].entry_type, (int)expected[i].entry_index, (long)(entry_ptr->size), (long)expected[i].size); - failure_mssg = msg; + failure_mssg = tmp_msg_buf; } } @@ -2372,10 +2374,11 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_ if (entry_ptr->header.size != expected[i].size) { pass = FALSE; - HDsnprintf(msg, sizeof(msg), "%d entry (%d, %d) header size actual/expected = %ld/%ld.\n", - tag, (int)expected[i].entry_type, (int)expected[i].entry_index, + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), + "%d entry (%d, %d) header size actual/expected = %ld/%ld.\n", tag, + (int)expected[i].entry_type, (int)expected[i].entry_index, (long)(entry_ptr->header.size), (long)expected[i].size); - failure_mssg = msg; + failure_mssg = tmp_msg_buf; } } @@ -2384,10 +2387,11 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_ if (entry_ptr->at_main_addr != expected[i].at_main_addr) { pass = FALSE; - HDsnprintf(msg, sizeof(msg), "%d entry (%d, %d) at main addr actual/expected = %d/%d.\n", tag, + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), + "%d entry (%d, %d) at main addr actual/expected = %d/%d.\n", tag, (int)expected[i].entry_type, (int)expected[i].entry_index, (int)(entry_ptr->at_main_addr), (int)expected[i].at_main_addr); - failure_mssg = msg; + failure_mssg = tmp_msg_buf; } } @@ -2396,10 +2400,11 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_ if (entry_ptr->is_dirty != expected[i].is_dirty) { pass = FALSE; - HDsnprintf(msg, sizeof(msg), "%d entry (%d, %d) is_dirty actual/expected = %d/%d.\n", tag, + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), + "%d entry (%d, %d) is_dirty actual/expected = %d/%d.\n", tag, (int)expected[i].entry_type, (int)expected[i].entry_index, (int)(entry_ptr->is_dirty), (int)expected[i].is_dirty); - failure_mssg = msg; + failure_mssg = tmp_msg_buf; } } @@ -2408,10 +2413,11 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_ if (entry_ptr->header.is_dirty != expected[i].is_dirty) { pass = FALSE; - HDsnprintf(msg, sizeof(msg), "%d entry (%d, %d) header is_dirty actual/expected = %d/%d.\n", - tag, (int)expected[i].entry_type, (int)expected[i].entry_index, + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), + "%d entry (%d, %d) header is_dirty actual/expected = %d/%d.\n", tag, + (int)expected[i].entry_type, (int)expected[i].entry_index, (int)(entry_ptr->header.is_dirty), (int)expected[i].is_dirty); - failure_mssg = msg; + failure_mssg = tmp_msg_buf; } } @@ -2420,10 +2426,11 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_ if (entry_ptr->is_protected != expected[i].is_protected) { pass = FALSE; - HDsnprintf(msg, sizeof(msg), "%d entry (%d, %d) is_protected actual/expected = %d/%d.\n", tag, + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), + "%d entry (%d, %d) is_protected actual/expected = %d/%d.\n", tag, (int)expected[i].entry_type, (int)expected[i].entry_index, (int)(entry_ptr->is_protected), (int)expected[i].is_protected); - failure_mssg = msg; + failure_mssg = tmp_msg_buf; } } @@ -2432,11 +2439,11 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_ if (entry_ptr->header.is_protected != expected[i].is_protected) { pass = FALSE; - HDsnprintf(msg, sizeof(msg), + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), "%d entry (%d, %d) header is_protected actual/expected = %d/%d.\n", tag, (int)expected[i].entry_type, (int)expected[i].entry_index, (int)(entry_ptr->header.is_protected), (int)expected[i].is_protected); - failure_mssg = msg; + failure_mssg = tmp_msg_buf; } } @@ -2445,10 +2452,11 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_ if (entry_ptr->is_pinned != expected[i].is_pinned) { pass = FALSE; - HDsnprintf(msg, sizeof(msg), "%d entry (%d, %d) is_pinned actual/expected = %d/%d.\n", tag, + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), + "%d entry (%d, %d) is_pinned actual/expected = %d/%d.\n", tag, (int)expected[i].entry_type, (int)expected[i].entry_index, (int)(entry_ptr->is_pinned), (int)expected[i].is_pinned); - failure_mssg = msg; + failure_mssg = tmp_msg_buf; } } @@ -2457,10 +2465,11 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_ if (entry_ptr->is_corked != expected[i].is_corked) { pass = FALSE; - HDsnprintf(msg, sizeof(msg), "%d entry (%d, %d) is_corked actual/expected = %d/%d.\n", tag, + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), + "%d entry (%d, %d) is_corked actual/expected = %d/%d.\n", tag, (int)expected[i].entry_type, (int)expected[i].entry_index, (int)(entry_ptr->is_corked), (int)expected[i].is_corked); - failure_mssg = msg; + failure_mssg = tmp_msg_buf; } } @@ -2469,10 +2478,11 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_ if (entry_ptr->header.is_pinned != expected[i].is_pinned) { pass = FALSE; - HDsnprintf(msg, sizeof(msg), "%d entry (%d, %d) header is_pinned actual/expected = %d/%d.\n", - tag, (int)expected[i].entry_type, (int)expected[i].entry_index, + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), + "%d entry (%d, %d) header is_pinned actual/expected = %d/%d.\n", tag, + (int)expected[i].entry_type, (int)expected[i].entry_index, (int)(entry_ptr->header.is_pinned), (int)expected[i].is_pinned); - failure_mssg = msg; + failure_mssg = tmp_msg_buf; } } @@ -2483,13 +2493,13 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_ (entry_ptr->destroyed != expected[i].destroyed)) { pass = FALSE; - HDsnprintf(msg, sizeof(msg), + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), "%d entry (%d,%d) deserialized = %d(%d), serialized = %d(%d), dest = %d(%d)\n", tag, (int)expected[i].entry_type, (int)expected[i].entry_index, (int)(entry_ptr->deserialized), (int)(expected[i].deserialized), (int)(entry_ptr->serialized), (int)(expected[i].serialized), (int)(entry_ptr->destroyed), (int)(expected[i].destroyed)); - failure_mssg = msg; + failure_mssg = tmp_msg_buf; } } @@ -2499,20 +2509,21 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_ if (pass) { if (entry_ptr->flush_dep_npar != expected[i].flush_dep_npar) { pass = FALSE; - HDsnprintf(msg, sizeof(msg), "%d entry (%d, %d) flush_dep_npar actual/expected = %u/%u.\n", - tag, expected[i].entry_type, expected[i].entry_index, entry_ptr->flush_dep_npar, + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), + "%d entry (%d, %d) flush_dep_npar actual/expected = %u/%u.\n", tag, + expected[i].entry_type, expected[i].entry_index, entry_ptr->flush_dep_npar, expected[i].flush_dep_npar); - failure_mssg = msg; + failure_mssg = tmp_msg_buf; } /* end if */ } /* end if */ if ((pass) && (in_cache)) { if (entry_ptr->header.flush_dep_nparents != expected[i].flush_dep_npar) { pass = FALSE; - HDsnprintf(msg, sizeof(msg), + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), "%d entry (%d, %d) header flush_dep_nparents actual/expected = %u/%u.\n", tag, expected[i].entry_type, expected[i].entry_index, entry_ptr->header.flush_dep_nparents, expected[i].flush_dep_npar); - failure_mssg = msg; + failure_mssg = tmp_msg_buf; } /* end if */ } /* end if */ @@ -2522,11 +2533,11 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_ for (u = 0; u < entry_ptr->flush_dep_npar; u++) { if (entry_ptr->flush_dep_par_type[u] != expected[i].flush_dep_par_type[u]) { pass = FALSE; - HDsnprintf(msg, sizeof(msg), + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), "%d entry (%d, %d) flush_dep_par_type[%u] actual/expected = %d/%d.\n", tag, expected[i].entry_type, expected[i].entry_index, u, entry_ptr->flush_dep_par_type[u], expected[i].flush_dep_par_type[u]); - failure_mssg = msg; + failure_mssg = tmp_msg_buf; } /* end if */ } /* end for */ } /* end if */ @@ -2534,11 +2545,11 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_ for (u = 0; u < entry_ptr->flush_dep_npar; u++) { if (entry_ptr->flush_dep_par_idx[u] != expected[i].flush_dep_par_idx[u]) { pass = FALSE; - HDsnprintf(msg, sizeof(msg), + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), "%d entry (%d, %d) flush_dep_par_idx[%u] actual/expected = %d/%d.\n", tag, expected[i].entry_type, expected[i].entry_index, u, entry_ptr->flush_dep_par_idx[u], expected[i].flush_dep_par_idx[u]); - failure_mssg = msg; + failure_mssg = tmp_msg_buf; } /* end if */ } /* end for */ } /* end if */ @@ -2547,40 +2558,41 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_ if (pass) { if (entry_ptr->flush_dep_nchd != expected[i].flush_dep_nchd) { pass = FALSE; - HDsnprintf(msg, sizeof(msg), "%d entry (%d, %d) flush_dep_nchd actual/expected = %u/%u.\n", - tag, expected[i].entry_type, expected[i].entry_index, entry_ptr->flush_dep_nchd, + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), + "%d entry (%d, %d) flush_dep_nchd actual/expected = %u/%u.\n", tag, + expected[i].entry_type, expected[i].entry_index, entry_ptr->flush_dep_nchd, expected[i].flush_dep_nchd); - failure_mssg = msg; + failure_mssg = tmp_msg_buf; } /* end if */ } /* end if */ if ((pass) && (in_cache)) { if (entry_ptr->header.flush_dep_nchildren != expected[i].flush_dep_nchd) { pass = FALSE; - HDsnprintf(msg, sizeof(msg), + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), "%d entry (%d, %d) header flush_dep_nchildren actual/expected = %u/%u.\n", tag, expected[i].entry_type, expected[i].entry_index, entry_ptr->header.flush_dep_nchildren, expected[i].flush_dep_nchd); - failure_mssg = msg; + failure_mssg = tmp_msg_buf; } /* end if */ } /* end if */ if (pass) { if (entry_ptr->flush_dep_ndirty_chd != expected[i].flush_dep_ndirty_chd) { pass = FALSE; - HDsnprintf(msg, sizeof(msg), + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), "%d entry (%d, %d) flush_dep_ndirty_chd actual/expected = %u/%u.\n", tag, expected[i].entry_type, expected[i].entry_index, entry_ptr->flush_dep_ndirty_chd, expected[i].flush_dep_ndirty_chd); - failure_mssg = msg; + failure_mssg = tmp_msg_buf; } /* end if */ } /* end if */ if ((pass) && (in_cache)) { if (entry_ptr->header.flush_dep_ndirty_children != expected[i].flush_dep_ndirty_chd) { pass = FALSE; - HDsnprintf(msg, sizeof(msg), + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), "%d entry (%d, %d) header flush_dep_ndirty_children actual/expected = %u/%u.\n", tag, expected[i].entry_type, expected[i].entry_index, entry_ptr->header.flush_dep_ndirty_children, expected[i].flush_dep_ndirty_chd); - failure_mssg = msg; + failure_mssg = tmp_msg_buf; } /* end if */ } /* end if */ @@ -2588,10 +2600,11 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_ if (pass) { if (expected[i].flush_order >= 0 && entry_ptr->flush_order != (unsigned)expected[i].flush_order) { pass = FALSE; - HDsnprintf(msg, sizeof(msg), "%d entry (%d, %d) flush_order actual/expected = %u/%d.\n", tag, + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), + "%d entry (%d, %d) flush_order actual/expected = %u/%d.\n", tag, expected[i].entry_type, expected[i].entry_index, entry_ptr->flush_order, expected[i].flush_order); - failure_mssg = msg; + failure_mssg = tmp_msg_buf; } /* end if */ } /* end if */ @@ -5228,7 +5241,6 @@ resize_configs_are_equal(const H5C_auto_size_ctl_t *a, const H5C_auto_size_ctl_t void validate_mdc_config(hid_t file_id, H5AC_cache_config_t *ext_config_ptr, hbool_t compare_init, int test_num) { - static char msg[256]; H5F_t *file_ptr = NULL; H5C_t *cache_ptr = NULL; H5AC_cache_config_t scratch; @@ -5244,8 +5256,8 @@ validate_mdc_config(hid_t file_id, H5AC_cache_config_t *ext_config_ptr, hbool_t if (file_ptr == NULL) { pass = FALSE; - HDsnprintf(msg, (size_t)128, "Can't get file_ptr #%d.", test_num); - failure_mssg = msg; + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), "Can't get file_ptr #%d.", test_num); + failure_mssg = tmp_msg_buf; } else { @@ -5260,8 +5272,8 @@ validate_mdc_config(hid_t file_id, H5AC_cache_config_t *ext_config_ptr, hbool_t (cache_ptr->resize_ctl.version != H5C__CURR_AUTO_SIZE_CTL_VER)) { pass = FALSE; - HDsnprintf(msg, (size_t)128, "Can't access cache resize_ctl #%d.", test_num); - failure_mssg = msg; + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), "Can't access cache resize_ctl #%d.", test_num); + failure_mssg = tmp_msg_buf; } } @@ -5271,8 +5283,8 @@ validate_mdc_config(hid_t file_id, H5AC_cache_config_t *ext_config_ptr, hbool_t if (!resize_configs_are_equal(&int_config, &cache_ptr->resize_ctl, compare_init)) { pass = FALSE; - HDsnprintf(msg, (size_t)128, "Unexpected internal config #%d.", test_num); - failure_mssg = msg; + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), "Unexpected internal config #%d.", test_num); + failure_mssg = tmp_msg_buf; } } @@ -5284,8 +5296,8 @@ validate_mdc_config(hid_t file_id, H5AC_cache_config_t *ext_config_ptr, hbool_t if (H5Fget_mdc_config(file_id, &scratch) < 0) { pass = FALSE; - HDsnprintf(msg, (size_t)128, "H5Fget_mdc_config() failed #%d.", test_num); - failure_mssg = msg; + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), "H5Fget_mdc_config() failed #%d.", test_num); + failure_mssg = tmp_msg_buf; } } @@ -5305,8 +5317,8 @@ validate_mdc_config(hid_t file_id, H5AC_cache_config_t *ext_config_ptr, hbool_t if (!CACHE_CONFIGS_EQUAL((*ext_config_ptr), scratch, FALSE, compare_init)) { pass = FALSE; - HDsnprintf(msg, (size_t)128, "Unexpected external config #%d.", test_num); - failure_mssg = msg; + HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), "Unexpected external config #%d.", test_num); + failure_mssg = tmp_msg_buf; } } From 51488a4e129ce67a3b040d36a93974c9cd417ac5 Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Mon, 15 May 2023 13:29:51 -0500 Subject: [PATCH 222/231] addresses gfortran issue https://gcc.gnu.org/bugzilla/show_bug.cgi?id=109861 (#2957) --- fortran/src/H5Aff.F90 | 2 +- fortran/src/H5Pff.F90 | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/fortran/src/H5Aff.F90 b/fortran/src/H5Aff.F90 index 2f1e6d0ef14..fab86bd34a3 100644 --- a/fortran/src/H5Aff.F90 +++ b/fortran/src/H5Aff.F90 @@ -2086,7 +2086,7 @@ SUBROUTINE h5aread_async_f(attr_id, mem_type_id, buf, es_id, hdferr, file, func, IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: attr_id INTEGER(HID_T), INTENT(IN) :: mem_type_id - TYPE(C_PTR) , INTENT(OUT) :: buf + TYPE(C_PTR) , INTENT(INOUT) :: buf INTEGER(HID_T), INTENT(IN) :: es_id INTEGER , INTENT(OUT) :: hdferr TYPE(C_PTR), OPTIONAL :: file diff --git a/fortran/src/H5Pff.F90 b/fortran/src/H5Pff.F90 index 49917dd6411..098a6c31717 100644 --- a/fortran/src/H5Pff.F90 +++ b/fortran/src/H5Pff.F90 @@ -4825,7 +4825,7 @@ SUBROUTINE h5pget_ptr(prp_id, name, value, hdferr) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: prp_id CHARACTER(LEN=*), INTENT(IN) :: name - TYPE(C_PTR), INTENT(OUT) :: value + TYPE(C_PTR), INTENT(INOUT) :: value INTEGER, INTENT(OUT) :: hdferr INTEGER :: name_len @@ -5090,19 +5090,19 @@ END SUBROUTINE h5pset_file_image_f !! SUBROUTINE h5pget_file_image_f(fapl_id, buf_ptr, buf_len_ptr, hdferr) IMPLICIT NONE - INTEGER(HID_T) , INTENT(IN) :: fapl_id - TYPE(C_PTR) , INTENT(OUT), DIMENSION(*) :: buf_ptr - INTEGER(SIZE_T), INTENT(OUT) :: buf_len_ptr - INTEGER , INTENT(OUT) :: hdferr + INTEGER(HID_T) , INTENT(IN) :: fapl_id + TYPE(C_PTR) , INTENT(INOUT), DIMENSION(*) :: buf_ptr + INTEGER(SIZE_T), INTENT(OUT) :: buf_len_ptr + INTEGER , INTENT(OUT) :: hdferr INTERFACE INTEGER FUNCTION h5pget_file_image_c(fapl_id, buf_ptr, buf_len_ptr) & BIND(C, NAME='h5pget_file_image_c') IMPORT :: c_ptr IMPORT :: HID_T, SIZE_T - INTEGER(HID_T), INTENT(IN) :: fapl_id - TYPE(C_PTR), DIMENSION(*), INTENT(OUT) :: buf_ptr - INTEGER(SIZE_T), INTENT(OUT) :: buf_len_ptr + INTEGER(HID_T) :: fapl_id + TYPE(C_PTR), DIMENSION(*) :: buf_ptr + INTEGER(SIZE_T) :: buf_len_ptr END FUNCTION h5pget_file_image_c END INTERFACE From 7c6f81723e54efd64518f0540274b5ff8e1558a9 Mon Sep 17 00:00:00 2001 From: Quincey Koziol Date: Mon, 15 May 2023 13:54:58 -0500 Subject: [PATCH 223/231] Remove unnecessary fields from cache structs (#2951) * Remove unnecessary 'magic' field from cache structs Signed-off-by: Quincey Koziol * Committing clang-format changes --------- Signed-off-by: Quincey Koziol Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> --- src/H5AC.c | 30 ++------------------- src/H5ACmpio.c | 23 ---------------- src/H5ACpkg.h | 30 +++------------------ src/H5C.c | 30 +++++---------------- src/H5Cdbg.c | 37 +++---------------------- src/H5Centry.c | 59 ---------------------------------------- src/H5Cimage.c | 64 ++++---------------------------------------- src/H5Cint.c | 33 ----------------------- src/H5Cmpio.c | 9 ------- src/H5Cpkg.h | 43 ++++++----------------------- src/H5Cprefetched.c | 3 --- src/H5Cprivate.h | 32 ---------------------- src/H5Cquery.c | 22 +++++---------- src/H5Ctag.c | 10 ------- src/H5EAcache.c | 1 - src/H5FAcache.c | 1 - src/H5FScache.c | 20 -------------- src/H5Fsuper.c | 1 - src/H5Fsuper_cache.c | 13 --------- src/H5Gcache.c | 7 ----- src/H5HFcache.c | 37 ------------------------- src/H5HGcache.c | 7 ----- src/H5HLcache.c | 14 ---------- src/H5Oalloc.c | 2 -- src/H5Ocache.c | 18 ------------- src/H5SMcache.c | 14 ---------- test/cache.c | 18 ------------- test/cache_api.c | 6 ++--- test/cache_common.c | 27 +------------------ test/cache_image.c | 1 - testpar/t_cache.c | 23 ++-------------- testpar/t_file.c | 3 --- 32 files changed, 41 insertions(+), 597 deletions(-) diff --git a/src/H5AC.c b/src/H5AC.c index 2b4c29770a2..178e59ec31e 100644 --- a/src/H5AC.c +++ b/src/H5AC.c @@ -278,7 +278,6 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_co if (NULL == (aux_ptr = H5FL_CALLOC(H5AC_aux_t))) HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "Can't allocate H5AC auxiliary structure") - aux_ptr->magic = H5AC__H5AC_AUX_T_MAGIC; aux_ptr->mpi_comm = mpi_comm; aux_ptr->mpi_rank = mpi_rank; aux_ptr->mpi_size = mpi_size; @@ -392,8 +391,7 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_co H5SL_close(aux_ptr->c_slist_ptr); if (aux_ptr->candidate_slist_ptr != NULL) H5SL_close(aux_ptr->candidate_slist_ptr); - aux_ptr->magic = 0; - aux_ptr = H5FL_FREE(H5AC_aux_t, aux_ptr); + aux_ptr = H5FL_FREE(H5AC_aux_t, aux_ptr); } /* end if */ } /* end if */ #endif /* H5_HAVE_PARALLEL */ @@ -458,9 +456,6 @@ H5AC_dest(H5F_t *f) aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(f->shared->cache); if (aux_ptr) { - /* Sanity check */ - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); - /* If the file was opened R/W, attempt to flush all entries * from rank 0 & Bcast clean list to other ranks. * @@ -516,8 +511,7 @@ H5AC_dest(H5F_t *f) H5SL_close(aux_ptr->candidate_slist_ptr); } /* end if */ - aux_ptr->magic = 0; - aux_ptr = H5FL_FREE(H5AC_aux_t, aux_ptr); + aux_ptr = H5FL_FREE(H5AC_aux_t, aux_ptr); } /* end if */ #endif /* H5_HAVE_PARALLEL */ @@ -1688,15 +1682,6 @@ H5AC_get_cache_auto_resize_config(const H5AC_t *cache_ptr, H5AC_cache_config_t * if ((cache_ptr == NULL) || (config_ptr == NULL) || (config_ptr->version != H5AC__CURR_CACHE_CONFIG_VERSION)) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr or config_ptr on entry") -#ifdef H5_HAVE_PARALLEL - { - H5AC_aux_t *aux_ptr; - - aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); - if ((aux_ptr != NULL) && (aux_ptr->magic != H5AC__H5AC_AUX_T_MAGIC)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad aux_ptr on entry") - } -#endif /* H5_HAVE_PARALLEL */ /* Retrieve the configuration */ if (H5C_get_cache_auto_resize_config((const H5C_t *)cache_ptr, &internal_config) < 0) @@ -1887,15 +1872,6 @@ H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr, const H5AC_cache_config_t * if (cache_ptr == NULL) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "bad cache_ptr on entry") -#ifdef H5_HAVE_PARALLEL - { - H5AC_aux_t *aux_ptr; - - aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); - if ((aux_ptr != NULL) && (aux_ptr->magic != H5AC__H5AC_AUX_T_MAGIC)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "bad aux_ptr on entry") - } -#endif /* H5_HAVE_PARALLEL */ /* Validate external configuration */ if (H5AC_validate_config(config_ptr) != SUCCEED) @@ -2123,8 +2099,6 @@ H5AC__check_if_write_permitted(const H5F_t HDassert(f->shared->cache != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(f->shared->cache); if (aux_ptr != NULL) { - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); - if ((aux_ptr->mpi_rank == 0) || (aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED)) write_permitted = aux_ptr->write_permitted; diff --git a/src/H5ACmpio.c b/src/H5ACmpio.c index 3299a30fcf6..0ef59551319 100644 --- a/src/H5ACmpio.c +++ b/src/H5ACmpio.c @@ -146,7 +146,6 @@ H5AC__set_sync_point_done_callback(H5C_t *cache_ptr, H5AC_sync_point_done_cb_t s HDassert(cache_ptr); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); HDassert(aux_ptr != NULL); - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); aux_ptr->sync_point_done = sync_point_done; @@ -178,7 +177,6 @@ H5AC__set_write_done_callback(H5C_t *cache_ptr, H5AC_write_done_cb_t write_done) HDassert(cache_ptr); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); HDassert(aux_ptr != NULL); - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); aux_ptr->write_done = write_done; @@ -216,7 +214,6 @@ H5AC_add_candidate(H5AC_t *cache_ptr, haddr_t addr) HDassert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); HDassert(aux_ptr != NULL); - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED); HDassert(aux_ptr->candidate_slist_ptr != NULL); @@ -278,7 +275,6 @@ H5AC__broadcast_candidate_list(H5AC_t *cache_ptr, unsigned *num_entries_ptr, had HDassert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); HDassert(aux_ptr != NULL); - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); HDassert(aux_ptr->mpi_rank == 0); HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED); HDassert(aux_ptr->candidate_slist_ptr != NULL); @@ -411,7 +407,6 @@ H5AC__broadcast_clean_list(H5AC_t *cache_ptr) HDassert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); HDassert(aux_ptr != NULL); - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); HDassert(aux_ptr->mpi_rank == 0); HDassert(aux_ptr->c_slist_ptr != NULL); @@ -497,7 +492,6 @@ H5AC__construct_candidate_list(H5AC_t *cache_ptr, H5AC_aux_t H5_ATTR_NDEBUG_UNUS /* Sanity checks */ HDassert(cache_ptr != NULL); HDassert(aux_ptr != NULL); - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED); HDassert((sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_CACHE) || (aux_ptr->mpi_rank == 0)); HDassert(aux_ptr->d_slist_ptr != NULL); @@ -612,7 +606,6 @@ H5AC__copy_candidate_list_to_buffer(const H5AC_t *cache_ptr, unsigned *num_entri HDassert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); HDassert(aux_ptr != NULL); - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED); HDassert(aux_ptr->candidate_slist_ptr != NULL); HDassert(H5SL_count(aux_ptr->candidate_slist_ptr) > 0); @@ -687,7 +680,6 @@ H5AC__log_deleted_entry(const H5AC_info_t *entry_ptr) HDassert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); HDassert(aux_ptr != NULL); - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); HDassert(aux_ptr->mpi_rank == 0); HDassert(aux_ptr->d_slist_ptr != NULL); HDassert(aux_ptr->c_slist_ptr != NULL); @@ -740,7 +732,6 @@ H5AC__log_dirtied_entry(const H5AC_info_t *entry_ptr) HDassert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); HDassert(aux_ptr != NULL); - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); if (aux_ptr->mpi_rank == 0) { H5AC_slist_entry_t *slist_entry_ptr; @@ -817,7 +808,6 @@ H5AC__log_cleaned_entry(const H5AC_info_t *entry_ptr) HDassert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); HDassert(aux_ptr != NULL); - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); if (aux_ptr->mpi_rank == 0) { H5AC_slist_entry_t *slist_entry_ptr; @@ -878,7 +868,6 @@ H5AC__log_flushed_entry(H5C_t *cache_ptr, haddr_t addr, hbool_t was_dirty, unsig HDassert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); HDassert(aux_ptr != NULL); - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); HDassert(aux_ptr->mpi_rank == 0); HDassert(aux_ptr->c_slist_ptr != NULL); @@ -947,7 +936,6 @@ H5AC__log_inserted_entry(const H5AC_info_t *entry_ptr) HDassert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); HDassert(aux_ptr != NULL); - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); if (aux_ptr->mpi_rank == 0) { H5AC_slist_entry_t *slist_entry_ptr; @@ -1049,7 +1037,6 @@ H5AC__log_moved_entry(const H5F_t *f, haddr_t old_addr, haddr_t new_addr) HDassert(cache_ptr); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); HDassert(aux_ptr != NULL); - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); /* get entry status, size, etc here */ if (H5C_get_entry_status(f, old_addr, &entry_size, &entry_in_cache, &entry_dirty, NULL, NULL, NULL, NULL, @@ -1228,7 +1215,6 @@ H5AC__propagate_and_apply_candidate_list(H5F_t *f) HDassert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); HDassert(aux_ptr != NULL); - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED); /* to prevent "messages from the future" we must synchronize all @@ -1391,7 +1377,6 @@ H5AC__propagate_flushed_and_still_clean_entries_list(H5F_t *f) HDassert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); HDassert(aux_ptr != NULL); - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY); if (aux_ptr->mpi_rank == 0) { @@ -1514,7 +1499,6 @@ H5AC__receive_and_apply_clean_list(H5F_t *f) HDassert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); HDassert(aux_ptr != NULL); - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); HDassert(aux_ptr->mpi_rank != 0); /* Retrieve the clean list from process 0 */ @@ -1572,7 +1556,6 @@ H5AC__receive_candidate_list(const H5AC_t *cache_ptr, unsigned *num_entries_ptr, HDassert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); HDassert(aux_ptr != NULL); - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); HDassert(aux_ptr->mpi_rank != 0); HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED); HDassert(num_entries_ptr != NULL); @@ -1655,7 +1638,6 @@ H5AC__rsp__dist_md_write__flush(H5F_t *f) HDassert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); HDassert(aux_ptr != NULL); - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED); /* first construct the candidate list -- initially, this will be in the @@ -1799,7 +1781,6 @@ H5AC__rsp__dist_md_write__flush_to_min_clean(H5F_t *f) HDassert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); HDassert(aux_ptr != NULL); - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED); /* Query if evictions are allowed */ @@ -1878,7 +1859,6 @@ H5AC__rsp__p0_only__flush(H5F_t *f) HDassert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); HDassert(aux_ptr != NULL); - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY); /* To prevent "messages from the future" we must @@ -1989,7 +1969,6 @@ H5AC__rsp__p0_only__flush_to_min_clean(H5F_t *f) HDassert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); HDassert(aux_ptr != NULL); - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY); /* Query if evictions are allowed */ @@ -2103,7 +2082,6 @@ H5AC__run_sync_point(H5F_t *f, int sync_point_op) HDassert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); HDassert(aux_ptr != NULL); - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); HDassert((sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) || (sync_point_op == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED)); @@ -2232,7 +2210,6 @@ H5AC__tidy_cache_0_lists(H5AC_t *cache_ptr, unsigned num_candidates, haddr_t *ca HDassert(cache_ptr != NULL); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); HDassert(aux_ptr != NULL); - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED); HDassert(aux_ptr->mpi_rank == 0); HDassert(num_candidates > 0); diff --git a/src/H5ACpkg.h b/src/H5ACpkg.h index beb7ba7347b..535eabd5ff7 100644 --- a/src/H5ACpkg.h +++ b/src/H5ACpkg.h @@ -165,10 +165,6 @@ H5FL_EXTERN(H5AC_aux_t); * * JRM -- 1/6/15 * - * magic: Unsigned 32 bit integer always set to - * H5AC__H5AC_AUX_T_MAGIC. This field is used to validate - * pointers to instances of H5AC_aux_t. - * * mpi_comm: MPI communicator associated with the file for which the * cache has been created. * @@ -350,52 +346,34 @@ H5FL_EXTERN(H5AC_aux_t); #ifdef H5_HAVE_PARALLEL -#define H5AC__H5AC_AUX_T_MAGIC (unsigned)0x00D0A01 - typedef struct H5AC_aux_t { - uint32_t magic; - MPI_Comm mpi_comm; - - int mpi_rank; - - int mpi_size; + int mpi_rank; + int mpi_size; hbool_t write_permitted; - - size_t dirty_bytes_threshold; - - size_t dirty_bytes; - + size_t dirty_bytes_threshold; + size_t dirty_bytes; int32_t metadata_write_strategy; #ifdef H5AC_DEBUG_DIRTY_BYTES_CREATION - unsigned dirty_bytes_propagations; - size_t unprotect_dirty_bytes; unsigned unprotect_dirty_bytes_updates; - size_t insert_dirty_bytes; unsigned insert_dirty_bytes_updates; - size_t move_dirty_bytes; unsigned move_dirty_bytes_updates; - #endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */ H5SL_t *d_slist_ptr; - H5SL_t *c_slist_ptr; - H5SL_t *candidate_slist_ptr; void (*write_done)(void); - void (*sync_point_done)(unsigned num_writes, haddr_t *written_entries_tbl); unsigned p0_image_len; - } H5AC_aux_t; /* struct H5AC_aux_t */ /* Typedefs for debugging function pointers */ diff --git a/src/H5C.c b/src/H5C.c index 348e6435325..edf946c9ec9 100644 --- a/src/H5C.c +++ b/src/H5C.c @@ -158,8 +158,6 @@ H5C_create(size_t max_cache_size, size_t min_clean_size, int max_type_id, * the fields. */ - cache_ptr->magic = H5C__H5C_T_MAGIC; - cache_ptr->flush_in_progress = FALSE; if (NULL == (cache_ptr->log_info = (H5C_log_info_t *)H5MM_calloc(sizeof(H5C_log_info_t)))) @@ -313,9 +311,8 @@ H5C_create(size_t max_cache_size, size_t min_clean_size, int max_type_id, /* Set non-zero/FALSE/NULL fields for epoch markers */ for (i = 0; i < H5C__MAX_EPOCH_MARKERS; i++) { - ((cache_ptr->epoch_markers)[i]).magic = H5C__H5C_CACHE_ENTRY_T_MAGIC; - ((cache_ptr->epoch_markers)[i]).addr = (haddr_t)i; - ((cache_ptr->epoch_markers)[i]).type = H5AC_EPOCH_MARKER; + ((cache_ptr->epoch_markers)[i]).addr = (haddr_t)i; + ((cache_ptr->epoch_markers)[i]).type = H5AC_EPOCH_MARKER; } /* Initialize cache image generation on file close related fields. @@ -376,8 +373,7 @@ H5C_create(size_t max_cache_size, size_t min_clean_size, int max_type_id, if (cache_ptr->log_info != NULL) H5MM_xfree(cache_ptr->log_info); - cache_ptr->magic = 0; - cache_ptr = H5FL_FREE(H5C_t, cache_ptr); + cache_ptr = H5FL_FREE(H5C_t, cache_ptr); } } @@ -413,7 +409,6 @@ H5C_prep_for_file_close(H5F_t *f) HDassert(f->shared->cache); cache_ptr = f->shared->cache; HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); /* It is possible to receive the close warning more than once */ if (cache_ptr->close_warning_received) @@ -501,7 +496,6 @@ H5C_dest(H5F_t *f) /* Sanity check */ HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_ptr->close_warning_received); #if H5AC_DUMP_IMAGE_STATS_ON_CLOSE @@ -542,16 +536,12 @@ H5C_dest(H5F_t *f) if (cache_ptr->log_info != NULL) H5MM_xfree(cache_ptr->log_info); -#ifndef NDEBUG #ifdef H5C_DO_SANITY_CHECKS if (cache_ptr->get_entry_ptr_from_addr_counter > 0) HDfprintf(stdout, "*** %" PRId64 " calls to H5C_get_entry_ptr_from_add(). ***\n", cache_ptr->get_entry_ptr_from_addr_counter); #endif /* H5C_DO_SANITY_CHECKS */ - cache_ptr->magic = 0; -#endif - cache_ptr = H5FL_FREE(H5C_t, cache_ptr); done: @@ -648,7 +638,6 @@ H5C_flush_cache(H5F_t *f, unsigned flags) HDassert(f->shared); cache_ptr = f->shared->cache; HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_ptr->slist_ptr); #ifdef H5C_DO_SANITY_CHECKS @@ -778,11 +767,8 @@ H5C_flush_to_min_clean(H5F_t *f) HDassert(f); HDassert(f->shared); - cache_ptr = f->shared->cache; - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); if (cache_ptr->check_write_permitted != NULL) { if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0) @@ -819,7 +805,7 @@ H5C_reset_cache_hit_rate_stats(H5C_t *cache_ptr) FUNC_ENTER_NOAPI(FAIL) - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) + if (cache_ptr == NULL) HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad cache_ptr on entry") cache_ptr->cache_hits = 0; @@ -857,7 +843,7 @@ H5C_set_cache_auto_resize_config(H5C_t *cache_ptr, H5C_auto_size_ctl_t *config_p FUNC_ENTER_NOAPI(FAIL) - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) + if (cache_ptr == NULL) HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad cache_ptr on entry") if (config_ptr == NULL) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry") @@ -1045,7 +1031,7 @@ H5C_set_evictions_enabled(H5C_t *cache_ptr, hbool_t evictions_enabled) FUNC_ENTER_NOAPI(FAIL) - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) + if (cache_ptr == NULL) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry") /* There is no fundamental reason why we should not permit @@ -1128,7 +1114,7 @@ H5C_set_slist_enabled(H5C_t *cache_ptr, hbool_t slist_enabled, hbool_t clear_sli FUNC_ENTER_NOAPI(FAIL) - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) + if (cache_ptr == NULL) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry") if (slist_enabled) { @@ -1145,7 +1131,6 @@ H5C_set_slist_enabled(H5C_t *cache_ptr, hbool_t slist_enabled, hbool_t clear_sli /* scan the index list and insert all dirty entries in the slist */ entry_ptr = cache_ptr->il_head; while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); if (entry_ptr->is_dirty) H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL) entry_ptr = entry_ptr->il_next; @@ -1226,7 +1211,6 @@ H5C_unsettle_ring(H5F_t *f, H5C_ring_t ring) HDassert(f->shared->cache); HDassert((H5C_RING_RDFSM == ring) || (H5C_RING_MDFSM == ring)); cache_ptr = f->shared->cache; - HDassert(H5C__H5C_T_MAGIC == cache_ptr->magic); switch (ring) { case H5C_RING_RDFSM: diff --git a/src/H5Cdbg.c b/src/H5Cdbg.c index b7c05d02f31..c32417ebb50 100644 --- a/src/H5Cdbg.c +++ b/src/H5Cdbg.c @@ -86,7 +86,6 @@ H5C_dump_cache(H5C_t *cache_ptr, const char *cache_name) /* Sanity check */ HDassert(cache_ptr != NULL); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_name != NULL); /* First, create a skip list */ @@ -101,7 +100,6 @@ H5C_dump_cache(H5C_t *cache_ptr, const char *cache_name) entry_ptr = cache_ptr->index[i]; while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); if (H5SL_insert(slist_ptr, entry_ptr, &(entry_ptr->addr)) < 0) HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "can't insert entry in skip list") @@ -131,8 +129,6 @@ H5C_dump_cache(H5C_t *cache_ptr, const char *cache_name) i = 0; entry_ptr = (H5C_cache_entry_t *)H5SL_remove_first(slist_ptr); while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - /* Print entry */ HDfprintf(stdout, "%s%5d ", cache_ptr->prefix, i); HDfprintf(stdout, " 0x%16llx ", (long long)(entry_ptr->addr)); @@ -193,7 +189,6 @@ H5C_dump_cache_LRU(H5C_t *cache_ptr, const char *cache_name) /* Sanity check */ HDassert(cache_ptr != NULL); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_name != NULL); HDfprintf(stdout, "\n\nDump of metadata cache LRU \"%s\"\n", cache_name); @@ -218,8 +213,6 @@ H5C_dump_cache_LRU(H5C_t *cache_ptr, const char *cache_name) entry_ptr = cache_ptr->LRU_head_ptr; while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - /* Print entry */ HDfprintf(stdout, "%s%5d ", cache_ptr->prefix, i); HDfprintf(stdout, " 0x%16llx ", (long long)(entry_ptr->addr)); @@ -272,7 +265,6 @@ H5C_dump_cache_skip_list(H5C_t *cache_ptr, char *calling_fcn) FUNC_ENTER_NOAPI_NOERR HDassert(cache_ptr != NULL); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(calling_fcn != NULL); HDfprintf(stdout, "\n\nDumping metadata cache skip list from %s.\n", calling_fcn); @@ -296,7 +288,6 @@ H5C_dump_cache_skip_list(H5C_t *cache_ptr, char *calling_fcn) entry_ptr = NULL; while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDfprintf(stdout, "%s%d 0x%016llx %4lld %d/%d %d %s\n", cache_ptr->prefix, i, (long long)(entry_ptr->addr), (long long)(entry_ptr->size), (int)(entry_ptr->is_protected), (int)(entry_ptr->is_pinned), (int)(entry_ptr->is_dirty), @@ -340,8 +331,7 @@ H5C_set_prefix(H5C_t *cache_ptr, char *prefix) FUNC_ENTER_NOAPI(FAIL) - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC) || (prefix == NULL) || - (HDstrlen(prefix) >= H5C__PREFIX_LEN)) + if (cache_ptr == NULL || prefix == NULL || HDstrlen(prefix) >= H5C__PREFIX_LEN) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad param(s) on entry") HDstrncpy(&(cache_ptr->prefix[0]), prefix, (size_t)(H5C__PREFIX_LEN)); @@ -415,12 +405,7 @@ H5C_stats(H5C_t *cache_ptr, const char *cache_name, FUNC_ENTER_NOAPI(FAIL) - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - - /* This would normally be an assert, but we need to use an HGOTO_ERROR - * call to shut up the compiler. - */ - if ((NULL == cache_ptr) || (cache_ptr->magic != H5C__H5C_T_MAGIC) || (NULL == cache_name)) + if (NULL == cache_ptr || NULL == cache_name) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr or cache_name") #if H5C_COLLECT_CACHE_STATS @@ -722,7 +707,6 @@ H5C_stats__reset(H5C_t H5_ATTR_UNUSED *cache_ptr) #endif /* H5C_COLLECT_CACHE_STATS */ HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); #if H5C_COLLECT_CACHE_STATS for (i = 0; i <= cache_ptr->max_type_id; i++) { @@ -846,7 +830,6 @@ H5C_flush_dependency_exists(H5C_t *cache_ptr, haddr_t parent_addr, haddr_t child /* Sanity checks */ HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(H5F_addr_defined(parent_addr)); HDassert(H5F_addr_defined(child_addr)); HDassert(fd_exists_ptr); @@ -855,9 +838,6 @@ H5C_flush_dependency_exists(H5C_t *cache_ptr, haddr_t parent_addr, haddr_t child H5C__SEARCH_INDEX(cache_ptr, child_addr, child_ptr, FAIL) if (parent_ptr && child_ptr) { - HDassert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(child_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - if (child_ptr->flush_dep_nparents > 0) { unsigned u; /* Local index variable */ @@ -916,7 +896,6 @@ H5C_validate_index_list(H5C_t *cache_ptr) /* Sanity checks */ HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); for (i = 0; i < H5C_RING_NTYPES; i++) { index_ring_len[i] = 0; @@ -1042,7 +1021,6 @@ H5C_get_entry_ptr_from_addr(H5C_t *cache_ptr, haddr_t addr, void **entry_ptr_ptr /* Sanity checks */ HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(H5F_addr_defined(addr)); HDassert(entry_ptr_ptr); @@ -1086,7 +1064,6 @@ H5C_get_serialization_in_progress(const H5C_t *cache_ptr) /* Sanity check */ HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); FUNC_LEAVE_NOAPI(cache_ptr->serialization_in_progress) } /* H5C_get_serialization_in_progress() */ @@ -1119,7 +1096,6 @@ H5C_cache_is_clean(const H5C_t *cache_ptr, H5C_ring_t inner_ring) /* Sanity checks */ HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(inner_ring >= H5C_RING_USER); HDassert(inner_ring <= H5C_RING_SB); @@ -1171,7 +1147,6 @@ H5C_verify_entry_type(H5C_t *cache_ptr, haddr_t addr, const H5C_class_t *expecte /* Sanity checks */ HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(H5F_addr_defined(addr)); HDassert(expected_type); HDassert(in_cache_ptr); @@ -1224,7 +1199,6 @@ H5C_def_auto_resize_rpt_fcn(H5C_t *cache_ptr, size_t new_max_cache_size, size_t old_min_clean_size, size_t new_min_clean_size) { HDassert(cache_ptr != NULL); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(version == H5C__CURR_AUTO_RESIZE_RPT_FCN_VER); switch (status) { @@ -1355,7 +1329,6 @@ H5C__validate_lru_list(H5C_t *cache_ptr) FUNC_ENTER_PACKAGE HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); if (((cache_ptr->LRU_head_ptr == NULL) || (cache_ptr->LRU_tail_ptr == NULL)) && (cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr)) @@ -1428,7 +1401,6 @@ H5C__validate_pinned_entry_list(H5C_t *cache_ptr) FUNC_ENTER_PACKAGE HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); if (((cache_ptr->pel_head_ptr == NULL) || (cache_ptr->pel_tail_ptr == NULL)) && (cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr)) @@ -1504,7 +1476,6 @@ H5C__validate_protected_entry_list(H5C_t *cache_ptr) FUNC_ENTER_PACKAGE HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); if (((cache_ptr->pl_head_ptr == NULL) || (cache_ptr->pl_tail_ptr == NULL)) && (cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr)) @@ -1578,7 +1549,6 @@ H5C__entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr) /* Assertions */ HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_ptr->slist_ptr); node_ptr = H5SL_first(cache_ptr->slist_ptr); @@ -1589,7 +1559,6 @@ H5C__entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr) entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr); HDassert(entry_ptr); - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(entry_ptr->is_dirty); HDassert(entry_ptr->in_slist); @@ -1637,7 +1606,7 @@ H5C__image_stats(H5C_t *cache_ptr, hbool_t H5_ATTR_UNUSED print_header) FUNC_ENTER_NOAPI(FAIL) - if (!cache_ptr || cache_ptr->magic != H5C__H5C_T_MAGIC) + if (NULL == cache_ptr) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr") #if H5C_COLLECT_CACHE_STATS diff --git a/src/H5Centry.c b/src/H5Centry.c index e5735614a99..2d55a6bb015 100644 --- a/src/H5Centry.c +++ b/src/H5Centry.c @@ -266,9 +266,7 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr) /* Sanity check */ HDassert(f); HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(entry_ptr); - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(!entry_ptr->image_up_to_date); HDassert(entry_ptr->is_dirty); HDassert(!entry_ptr->is_protected); @@ -463,9 +461,7 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) HDassert(f); cache_ptr = f->shared->cache; HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(entry_ptr); - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(entry_ptr->ring != H5C_RING_UNDEFINED); HDassert(entry_ptr->type); @@ -885,12 +881,6 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) "can't notify client about entry dirty flag cleared") } /* end if */ - /* we are about to discard the in core representation -- - * set the magic field to bad magic so we can detect a - * freed entry if we see one. - */ - entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC; - /* verify that the image has been freed */ HDassert(entry_ptr->image_ptr == NULL); @@ -899,11 +889,6 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) } /* end if */ else { HDassert(take_ownership); - - /* Client is taking ownership of the entry. Set bad magic here too - * so the cache will choke unless the entry is re-inserted properly - */ - entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC; } /* end else */ } /* if (destroy) */ @@ -1261,7 +1246,6 @@ H5C__load_entry(H5F_t *f, HDassert((dirty == FALSE) || (type->id == 5 || type->id == 6)); - entry->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC; entry->cache_ptr = f->shared->cache; entry->addr = addr; entry->size = len; @@ -1479,7 +1463,6 @@ H5C__mark_flush_dep_serialized(H5C_cache_entry_t *entry_ptr) for (i = ((int)entry_ptr->flush_dep_nparents) - 1; i >= 0; i--) { /* Sanity checks */ HDassert(entry_ptr->flush_dep_parent); - HDassert(entry_ptr->flush_dep_parent[i]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(entry_ptr->flush_dep_parent[i]->flush_dep_nunser_children > 0); /* decrement the parents number of unserialized children */ @@ -1526,7 +1509,6 @@ H5C__mark_flush_dep_unserialized(H5C_cache_entry_t *entry_ptr) for (u = 0; u < entry_ptr->flush_dep_nparents; u++) { /* Sanity check */ HDassert(entry_ptr->flush_dep_parent); - HDassert(entry_ptr->flush_dep_parent[u]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children < entry_ptr->flush_dep_parent[u]->flush_dep_nchildren); @@ -1605,9 +1587,7 @@ H5C__serialize_single_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry /* Sanity checks */ HDassert(f); HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(entry_ptr); - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(!entry_ptr->prefetched); HDassert(!entry_ptr->image_up_to_date); HDassert(entry_ptr->is_dirty); @@ -1686,9 +1666,7 @@ H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, H5C_cache_entry_t *pf_e /* Sanity checks */ HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(pf_entry_ptr); - HDassert(pf_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(pf_entry_ptr->type); HDassert(pf_entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID); HDassert(pf_entry_ptr->prefetched); @@ -1698,8 +1676,6 @@ H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, H5C_cache_entry_t *pf_e /* Scan each entry on the index list */ entry_ptr = cache_ptr->il_head; while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - /* Here we look at entry_ptr->flush_dep_nparents and not * entry_ptr->fd_parent_count as it is possible that some * or all of the prefetched flush dependency child relationships @@ -1723,7 +1699,6 @@ H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, H5C_cache_entry_t *pf_e while (!found && (u < entry_ptr->fd_parent_count)) { /* Sanity check entry */ HDassert(entry_ptr->flush_dep_parent[u]); - HDassert(entry_ptr->flush_dep_parent[u]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); /* Correct entry? */ if (pf_entry_ptr == entry_ptr->flush_dep_parent[u]) @@ -1843,11 +1818,9 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t HDassert(f->shared); HDassert(f->shared->cache); HDassert(f->shared->cache == cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(entry_ptr_ptr); HDassert(*entry_ptr_ptr); pf_entry_ptr = *entry_ptr_ptr; - HDassert(pf_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(pf_entry_ptr->type); HDassert(pf_entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID); HDassert(pf_entry_ptr->prefetched); @@ -1878,7 +1851,6 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t for (i = (int)(pf_entry_ptr->fd_parent_count) - 1; i >= 0; i--) { HDassert(pf_entry_ptr->flush_dep_parent); HDassert(pf_entry_ptr->flush_dep_parent[i]); - HDassert(pf_entry_ptr->flush_dep_parent[i]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(pf_entry_ptr->flush_dep_parent[i]->flush_dep_nchildren > 0); HDassert(pf_entry_ptr->fd_parent_addrs); HDassert(pf_entry_ptr->flush_dep_parent[i]->addr == pf_entry_ptr->fd_parent_addrs[i]); @@ -1953,7 +1925,6 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t HDassert((dirty == FALSE) || (type->id == 5 || type->id == 6)); - ds_entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC; ds_entry_ptr->cache_ptr = f->shared->cache; ds_entry_ptr->addr = addr; ds_entry_ptr->size = len; @@ -2095,7 +2066,6 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t ds_entry_ptr->is_protected = TRUE; while (fd_children[i] != NULL) { /* Sanity checks */ - HDassert((fd_children[i])->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert((fd_children[i])->prefetched); HDassert((fd_children[i])->fd_parent_count > 0); HDassert((fd_children[i])->fd_parent_addrs); @@ -2186,7 +2156,6 @@ H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, u cache_ptr = f->shared->cache; HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(type); HDassert(type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type); HDassert(type->image_len); @@ -2223,7 +2192,6 @@ H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, u HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "duplicate entry in cache") } /* end if */ - entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC; entry_ptr->cache_ptr = cache_ptr; entry_ptr->addr = addr; entry_ptr->type = type; @@ -2469,7 +2437,6 @@ H5C_mark_entry_dirty(void *thing) HDassert(H5F_addr_defined(entry_ptr->addr)); cache_ptr = entry_ptr->cache_ptr; HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); if (entry_ptr->is_protected) { HDassert(!((entry_ptr)->is_read_only)); @@ -2567,7 +2534,6 @@ H5C_mark_entry_clean(void *_thing) HDassert(H5F_addr_defined(entry_ptr->addr)); cache_ptr = entry_ptr->cache_ptr; HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); /* Operate on pinned entry */ if (entry_ptr->is_protected) @@ -2733,7 +2699,6 @@ H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type, haddr_t old_addr, hadd FUNC_ENTER_NOAPI(FAIL) HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(type); HDassert(H5F_addr_defined(old_addr)); HDassert(H5F_addr_defined(new_addr)); @@ -2884,7 +2849,6 @@ H5C_resize_entry(void *thing, size_t new_size) HDassert(H5F_addr_defined(entry_ptr->addr)); cache_ptr = entry_ptr->cache_ptr; HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); /* Check for usage errors */ if (new_size <= 0) @@ -3017,7 +2981,6 @@ H5C_pin_protected_entry(void *thing) HDassert(H5F_addr_defined(entry_ptr->addr)); cache_ptr = entry_ptr->cache_ptr; HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); #ifdef H5C_DO_EXTREME_SANITY_CHECKS if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || @@ -3089,11 +3052,8 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign /* check args */ HDassert(f); HDassert(f->shared); - cache_ptr = f->shared->cache; - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(type); HDassert(type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type); HDassert(H5F_addr_defined(addr)); @@ -3129,8 +3089,6 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign if (entry_ptr->ring != ring) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "ring type mismatch occurred for cache entry") - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - if (entry_ptr->prefetched) { /* This call removes the prefetched entry from the cache, * and replaces it with an entry deserialized from the @@ -3139,7 +3097,6 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign if (H5C__deserialize_prefetched_entry(f, cache_ptr, &entry_ptr, type, addr, udata) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't deserialize prefetched entry") - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(!entry_ptr->prefetched); HDassert(entry_ptr->addr == addr); } /* end if */ @@ -3489,7 +3446,6 @@ H5C_unpin_entry(void *_entry_ptr) HDassert(entry_ptr); cache_ptr = entry_ptr->cache_ptr; HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); #ifdef H5C_DO_EXTREME_SANITY_CHECKS if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 || @@ -3571,7 +3527,6 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags) cache_ptr = f->shared->cache; HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(H5F_addr_defined(addr)); HDassert(thing); HDassert(!(pin_entry && unpin_entry)); @@ -3850,7 +3805,6 @@ H5C_unsettle_entry_ring(void *_entry) (H5C_RING_MDFSM == entry->ring)); cache = entry->cache_ptr; HDassert(cache); - HDassert(cache->magic == H5C__H5C_T_MAGIC); switch (entry->ring) { case H5C_RING_USER: @@ -3916,14 +3870,11 @@ H5C_create_flush_dependency(void *parent_thing, void *child_thing) /* Sanity checks */ HDassert(parent_entry); - HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(H5F_addr_defined(parent_entry->addr)); HDassert(child_entry); - HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(H5F_addr_defined(child_entry->addr)); cache_ptr = parent_entry->cache_ptr; HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_ptr == child_entry->cache_ptr); #ifndef NDEBUG /* Make sure the parent is not already a parent */ @@ -4058,14 +4009,11 @@ H5C_destroy_flush_dependency(void *parent_thing, void *child_thing) /* Sanity checks */ HDassert(parent_entry); - HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(H5F_addr_defined(parent_entry->addr)); HDassert(child_entry); - HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(H5F_addr_defined(child_entry->addr)); cache_ptr = parent_entry->cache_ptr; HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_ptr == child_entry->cache_ptr); /* Usage checks */ @@ -4183,7 +4131,6 @@ H5C_expunge_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, unsigned flag HDassert(f->shared); cache_ptr = f->shared->cache; HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(type); HDassert(H5F_addr_defined(addr)); @@ -4257,7 +4204,6 @@ H5C_remove_entry(void *_entry) HDassert(entry->ring != H5C_RING_UNDEFINED); cache = entry->cache_ptr; HDassert(cache); - HDassert(cache->magic == H5C__H5C_T_MAGIC); /* Check for error conditions */ if (entry->is_dirty) @@ -4353,11 +4299,6 @@ H5C_remove_entry(void *_entry) /* Reset the pointer to the cache the entry is within */ entry->cache_ptr = NULL; - /* Client is taking ownership of the entry. Set bad magic here so the - * cache will choke unless the entry is re-inserted properly - */ - entry->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC; - done: FUNC_LEAVE_NOAPI(ret_value) } /* H5C__remove_entry() */ diff --git a/src/H5Cimage.c b/src/H5Cimage.c index df60d00a73f..f7696e1a393 100644 --- a/src/H5Cimage.c +++ b/src/H5Cimage.c @@ -73,10 +73,6 @@ #define H5C__MDCI_MAX_FD_CHILDREN USHRT_MAX #define H5C__MDCI_MAX_FD_PARENTS USHRT_MAX -/* Values for image entry magic field */ -#define H5C_IMAGE_ENTRY_T_MAGIC 0x005CAC08 -#define H5C_IMAGE_ENTRY_T_BAD_MAGIC 0xBeefDead - /* Maximum ring allowed in image */ #define H5C_MAX_RING_IN_IMAGE H5C_RING_MDFSM @@ -183,7 +179,6 @@ H5C_cache_image_pending(const H5C_t *cache_ptr) /* Sanity checks */ HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); ret_value = (cache_ptr->load_image && !cache_ptr->image_loaded); @@ -224,7 +219,6 @@ H5C_cache_image_status(H5F_t *f, hbool_t *load_ci_ptr, hbool_t *write_ci_ptr) HDassert(f->shared); cache_ptr = f->shared->cache; HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(load_ci_ptr); HDassert(write_ci_ptr); @@ -265,7 +259,6 @@ H5C__construct_cache_image_buffer(H5F_t *f, H5C_t *cache_ptr) HDassert(f->shared); HDassert(cache_ptr == f->shared->cache); HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_ptr->close_warning_received); HDassert(cache_ptr->image_ctl.generate_image); HDassert(cache_ptr->num_entries_in_image > 0); @@ -311,7 +304,6 @@ H5C__construct_cache_image_buffer(H5F_t *f, H5C_t *cache_ptr) fake_cache_ptr = (H5C_t *)H5MM_malloc(sizeof(H5C_t)); HDassert(fake_cache_ptr); - fake_cache_ptr->magic = H5C__H5C_T_MAGIC; /* needed for sanity checks */ fake_cache_ptr->image_len = cache_ptr->image_len; @@ -327,7 +319,6 @@ H5C__construct_cache_image_buffer(H5F_t *f, H5C_t *cache_ptr) HDassert(fake_cache_ptr->image_entries); for (u = 0; u < fake_cache_ptr->num_entries_in_image; u++) { - fake_cache_ptr->image_entries[u].magic = H5C_IMAGE_ENTRY_T_MAGIC; fake_cache_ptr->image_entries[u].image_ptr = NULL; /* touch up f->shared->cache to satisfy sanity checks... */ @@ -420,7 +411,6 @@ H5C__generate_cache_image(H5F_t *f, H5C_t *cache_ptr) HDassert(f->shared); HDassert(cache_ptr == f->shared->cache); HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); /* Construct cache image */ if (H5C__construct_cache_image_buffer(f, cache_ptr) < 0) @@ -470,7 +460,6 @@ H5C__free_image_entries_array(H5C_t *cache_ptr) /* Sanity checks */ HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_ptr->close_warning_received); HDassert(cache_ptr->image_ctl.generate_image); HDassert(cache_ptr->index_len == 0); @@ -487,7 +476,6 @@ H5C__free_image_entries_array(H5C_t *cache_ptr) /* Sanity checks */ HDassert(ie_ptr); - HDassert(ie_ptr->magic == H5C_IMAGE_ENTRY_T_MAGIC); HDassert(ie_ptr->image_ptr); /* Free the parent addrs array if appropriate */ @@ -501,9 +489,6 @@ H5C__free_image_entries_array(H5C_t *cache_ptr) /* Free the image */ ie_ptr->image_ptr = H5MM_xfree(ie_ptr->image_ptr); - - /* Set magic field to bad magic so we can detect freed entries */ - ie_ptr->magic = H5C_IMAGE_ENTRY_T_BAD_MAGIC; } /* end for */ /* Free the image entries array */ @@ -534,7 +519,7 @@ H5C__get_cache_image_config(const H5C_t *cache_ptr, H5C_cache_image_ctl_t *confi FUNC_ENTER_PACKAGE - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) + if (cache_ptr == NULL) HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad cache_ptr on entry") if (config_ptr == NULL) HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad config_ptr on entry") @@ -577,8 +562,7 @@ H5C__read_cache_image(H5F_t *f, H5C_t *cache_ptr) H5AC_aux_t *aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr; int mpi_result; - if ((NULL == aux_ptr) || (aux_ptr->mpi_rank == 0)) { - HDassert((NULL == aux_ptr) || (aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC)); + if (NULL == aux_ptr || aux_ptr->mpi_rank == 0) { #endif /* H5_HAVE_PARALLEL */ /* Read the buffer (if serial access, or rank 0 of parallel access) */ @@ -642,7 +626,6 @@ H5C__load_cache_image(H5F_t *f) HDassert(f->shared); cache_ptr = f->shared->cache; HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); /* If the image address is defined, load the image, decode it, * and insert its contents into the metadata cache. @@ -737,7 +720,6 @@ H5C_load_cache_image_on_next_protect(H5F_t *f, haddr_t addr, hsize_t len, hbool_ HDassert(f->shared); cache_ptr = f->shared->cache; HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); /* Set information needed to load cache image */ cache_ptr->image_addr = addr; @@ -866,7 +848,6 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated) HDassert(f->shared->cache); cache_ptr = f->shared->cache; HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(image_generated); /* If the file is opened and closed without any access to @@ -1149,7 +1130,7 @@ H5C_set_cache_image_config(const H5F_t *f, H5C_t *cache_ptr, H5C_cache_image_ctl HDassert(f->shared->cache == f->shared->cache); /* Check arguments */ - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) + if (cache_ptr == NULL) HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad cache_ptr on entry") /* Validate the config: */ @@ -1347,7 +1328,6 @@ H5C__decode_cache_image_header(const H5F_t *f, H5C_t *cache_ptr, const uint8_t * /* Sanity checks */ HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(buf); HDassert(*buf); @@ -1447,13 +1427,11 @@ H5C__decode_cache_image_entry(const H5F_t *f, const H5C_t *cache_ptr, const uint HDassert(f->shared); HDassert(cache_ptr == f->shared->cache); HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(buf); HDassert(*buf); HDassert(entry_num < cache_ptr->num_entries_in_image); ie_ptr = &(cache_ptr->image_entries[entry_num]); HDassert(ie_ptr); - HDassert(ie_ptr->magic == H5C_IMAGE_ENTRY_T_MAGIC); /* Get pointer to buffer */ p = *buf; @@ -1588,7 +1566,6 @@ H5C__encode_cache_image_header(const H5F_t *f, const H5C_t *cache_ptr, uint8_t * /* Sanity checks */ HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_ptr->close_warning_received); HDassert(cache_ptr->image_ctl.generate_image); HDassert(cache_ptr->index_len == 0); @@ -1667,7 +1644,6 @@ H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr, uint8_t **buf, unsigne HDassert(f->shared); HDassert(cache_ptr == f->shared->cache); HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_ptr->close_warning_received); HDassert(cache_ptr->image_ctl.generate_image); HDassert(cache_ptr->index_len == 0); @@ -1675,7 +1651,6 @@ H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr, uint8_t **buf, unsigne HDassert(*buf); HDassert(entry_num < cache_ptr->num_entries_in_image); ie_ptr = &(cache_ptr->image_entries[entry_num]); - HDassert(ie_ptr->magic == H5C_IMAGE_ENTRY_T_MAGIC); /* Get pointer to buffer to encode into */ p = *buf; @@ -1811,7 +1786,6 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr) /* sanity checks */ HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); /* Remove from the cache image all dirty entries that are * flush dependency children of dirty entries that are not in the @@ -1825,8 +1799,6 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr) done = TRUE; entry_ptr = cache_ptr->il_head; while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - /* Should this entry be in the image */ if (entry_ptr->image_dirty && entry_ptr->include_in_image && (entry_ptr->fd_parent_count > 0)) { HDassert(entry_ptr->flush_dep_parent != NULL); @@ -1834,7 +1806,6 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr) parent_ptr = entry_ptr->flush_dep_parent[u]; /* Sanity check parent */ - HDassert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(entry_ptr->ring == parent_ptr->ring); if (parent_ptr->is_dirty && !parent_ptr->include_in_image && @@ -1873,7 +1844,6 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr) parent_ptr = entry_ptr->flush_dep_parent[u]; /* Sanity check parent */ - HDassert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(entry_ptr->ring == parent_ptr->ring); if (parent_ptr->include_in_image) { @@ -1902,7 +1872,6 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr) parent_ptr = entry_ptr->flush_dep_parent[u]; /* Sanity check parent */ - HDassert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(entry_ptr->ring == parent_ptr->ring); if (!parent_ptr->include_in_image) { @@ -1971,7 +1940,6 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr) for (u = 0; u < entry_ptr->fd_parent_count; u++) { parent_ptr = entry_ptr->flush_dep_parent[u]; - HDassert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); if (parent_ptr->include_in_image && parent_ptr->image_fd_height <= 0) H5C__prep_for_file_close__compute_fd_heights_real(parent_ptr, 1); } /* end for */ @@ -2039,7 +2007,6 @@ H5C__prep_for_file_close__compute_fd_heights_real(H5C_cache_entry_t *entry_ptr, /* Sanity checks */ HDassert(entry_ptr); - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(entry_ptr->include_in_image); HDassert((entry_ptr->image_fd_height == 0) || (entry_ptr->image_fd_height < fd_height)); HDassert(((fd_height == 0) && (entry_ptr->fd_child_count == 0)) || @@ -2054,7 +2021,6 @@ H5C__prep_for_file_close__compute_fd_heights_real(H5C_cache_entry_t *entry_ptr, H5C_cache_entry_t *parent_ptr; parent_ptr = entry_ptr->flush_dep_parent[u]; - HDassert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); if (parent_ptr->include_in_image && parent_ptr->image_fd_height <= fd_height) H5C__prep_for_file_close__compute_fd_heights_real(parent_ptr, fd_height + 1); @@ -2093,7 +2059,6 @@ H5C__prep_for_file_close__setup_image_entries_array(H5C_t *cache_ptr) /* Sanity checks */ HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_ptr->close_warning_received); HDassert(cache_ptr->pl_len == 0); HDassert(cache_ptr->num_entries_in_image > 0); @@ -2106,7 +2071,6 @@ H5C__prep_for_file_close__setup_image_entries_array(H5C_t *cache_ptr) /* Initialize (non-zero/NULL/FALSE) fields */ for (u = 0; u <= cache_ptr->num_entries_in_image; u++) { - image_entries[u].magic = H5C_IMAGE_ENTRY_T_MAGIC; image_entries[u].addr = HADDR_UNDEF; image_entries[u].ring = H5C_RING_UNDEFINED; image_entries[u].type_id = -1; @@ -2116,8 +2080,6 @@ H5C__prep_for_file_close__setup_image_entries_array(H5C_t *cache_ptr) u = 0; entry_ptr = cache_ptr->il_head; while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - if (entry_ptr->include_in_image) { /* Since we have already serialized the cache, the following * should hold. @@ -2243,7 +2205,6 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) HDassert(f->shared); HDassert(f->shared->sblock); HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_ptr->close_warning_received); HDassert(cache_ptr->pl_len == 0); @@ -2256,8 +2217,6 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) /* Scan each entry on the index list */ entry_ptr = cache_ptr->il_head; while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - /* Since we have already serialized the cache, the following * should hold. */ @@ -2382,8 +2341,6 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) #endif entry_ptr = cache_ptr->il_head; while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - if (entry_ptr->include_in_image) { if (entry_ptr->fd_parent_count > 0) fd_parents_list_len = (size_t)(H5F_SIZEOF_ADDR(f) * entry_ptr->fd_parent_count); @@ -2430,7 +2387,6 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) */ entry_ptr = cache_ptr->LRU_head_ptr; while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(entry_ptr->type != NULL); /* to avoid confusion, don't set lru_rank on epoch markers. @@ -2495,7 +2451,6 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) HDassert(f->shared); HDassert(cache_ptr == f->shared->cache); HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_ptr->image_buffer); HDassert(cache_ptr->image_len > 0); @@ -2553,7 +2508,6 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) HGOTO_ERROR(H5E_CACHE, H5E_NOTFOUND, FAIL, "fd parent not in cache?!?") /* Sanity checks */ - HDassert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(parent_ptr->addr == pf_entry_ptr->fd_parent_addrs[v]); HDassert(parent_ptr->lru_rank == -1); @@ -2579,7 +2533,6 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) */ pf_entry_ptr = cache_ptr->il_head; while (pf_entry_ptr != NULL) { - HDassert(pf_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert((pf_entry_ptr->prefetched && pf_entry_ptr->type == H5AC_PREFETCHED_ENTRY) || (!pf_entry_ptr->prefetched && pf_entry_ptr->type != H5AC_PREFETCHED_ENTRY)); if (pf_entry_ptr->type == H5AC_PREFETCHED_ENTRY) @@ -2588,7 +2541,6 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) for (v = 0; v < pf_entry_ptr->fd_parent_count; v++) { parent_ptr = pf_entry_ptr->flush_dep_parent[v]; HDassert(parent_ptr); - HDassert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(pf_entry_ptr->fd_parent_addrs); HDassert(pf_entry_ptr->fd_parent_addrs[v] == parent_ptr->addr); HDassert(parent_ptr->flush_dep_nchildren > 0); @@ -2613,7 +2565,6 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) i = -1; entry_ptr = cache_ptr->LRU_head_ptr; while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(entry_ptr->type != NULL); if (entry_ptr->prefetched) { @@ -2696,7 +2647,6 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **b /* Sanity checks */ HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_ptr->num_entries_in_image > 0); HDassert(buf && *buf); @@ -2813,7 +2763,6 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **b /* Initialize the rest of the fields in the prefetched entry */ /* (Only need to set non-zero/NULL/FALSE fields, due to calloc() above) */ - pf_entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC; pf_entry_ptr->cache_ptr = cache_ptr; pf_entry_ptr->image_up_to_date = TRUE; pf_entry_ptr->type = H5AC_PREFETCHED_ENTRY; @@ -2870,7 +2819,6 @@ H5C__write_cache_image_superblock_msg(H5F_t *f, hbool_t create) HDassert(f->shared->cache); cache_ptr = f->shared->cache; HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_ptr->close_warning_received); /* Write data into the metadata cache image superblock extension message. @@ -2882,8 +2830,7 @@ H5C__write_cache_image_superblock_msg(H5F_t *f, hbool_t create) if (cache_ptr->aux_ptr) { /* we have multiple processes */ H5AC_aux_t *aux_ptr; - aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr; - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); + aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr; mdci_msg.size = aux_ptr->p0_image_len; } /* end if */ else @@ -2930,8 +2877,7 @@ H5C__write_cache_image(H5F_t *f, const H5C_t *cache_ptr) { H5AC_aux_t *aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr; - if ((NULL == aux_ptr) || (aux_ptr->mpi_rank == 0)) { - HDassert((NULL == aux_ptr) || (aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC)); + if (NULL == aux_ptr || aux_ptr->mpi_rank == 0) { #endif /* H5_HAVE_PARALLEL */ /* Write the buffer (if serial access, or rank 0 for parallel access) */ diff --git a/src/H5Cint.c b/src/H5Cint.c index 5200ac6d103..f67636743fe 100644 --- a/src/H5Cint.c +++ b/src/H5Cint.c @@ -105,7 +105,6 @@ H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted) HDassert(f); HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_ptr->cache_accesses >= cache_ptr->resize_ctl.epoch_length); HDassert(0.0 <= cache_ptr->resize_ctl.min_clean_fraction); HDassert(cache_ptr->resize_ctl.min_clean_fraction <= 100.0); @@ -354,7 +353,6 @@ H5C__autoadjust__ageout(H5F_t *f, double hit_rate, enum H5C_resize_status *statu HDassert(f); HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert((status_ptr) && (*status_ptr == in_spec)); HDassert((new_max_cache_size_ptr) && (*new_max_cache_size_ptr == 0)); @@ -433,7 +431,6 @@ H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t *cache_ptr) FUNC_ENTER_PACKAGE HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); if (cache_ptr->epoch_markers_active <= 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "No active epoch markers on entry?!?!?") @@ -528,7 +525,6 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t *f, hbool_t write_permitte HDassert(f); HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); /* if there is a limit on the amount that the cache size can be decrease * in any one round of the cache size reduction algorithm, load that @@ -548,7 +544,6 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t *f, hbool_t write_permitte bytes_evicted < eviction_size_limit) { hbool_t skipping_entry = FALSE; - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(!(entry_ptr->is_protected)); HDassert(!(entry_ptr->is_read_only)); HDassert((entry_ptr->ro_ref_count) == 0); @@ -701,7 +696,6 @@ H5C__autoadjust__ageout__insert_new_marker(H5C_t *cache_ptr) FUNC_ENTER_PACKAGE HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); if (cache_ptr->epoch_markers_active >= cache_ptr->resize_ctl.epochs_before_eviction) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Already have a full complement of markers") @@ -758,7 +752,6 @@ H5C__autoadjust__ageout__remove_all_markers(H5C_t *cache_ptr) FUNC_ENTER_PACKAGE HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); while (cache_ptr->epoch_markers_active > 0) { /* get the index of the last epoch marker in the LRU list @@ -823,7 +816,6 @@ H5C__autoadjust__ageout__remove_excess_markers(H5C_t *cache_ptr) FUNC_ENTER_PACKAGE HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); if (cache_ptr->epoch_markers_active <= cache_ptr->resize_ctl.epochs_before_eviction) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "no excess markers on entry") @@ -896,7 +888,6 @@ H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t n FUNC_ENTER_PACKAGE HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_ptr->flash_size_increase_possible); HDassert(new_entry_size > cache_ptr->flash_size_increase_threshold); HDassert(old_entry_size < new_entry_size); @@ -1030,7 +1021,6 @@ H5C__flush_invalidate_cache(H5F_t *f, unsigned flags) HDassert(f->shared); cache_ptr = f->shared->cache; HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_ptr->slist_ptr); HDassert(cache_ptr->slist_enabled); @@ -1189,11 +1179,8 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) HDassert(f); HDassert(f->shared); - cache_ptr = f->shared->cache; - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_ptr->slist_enabled); HDassert(cache_ptr->slist_ptr); HDassert(ring > H5C_RING_UNDEFINED); @@ -1237,7 +1224,6 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) entry_ptr = cache_ptr->pel_head_ptr; cur_ring_pel_len = 0; while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(entry_ptr->ring >= ring); if (entry_ptr->ring == ring) cur_ring_pel_len++; @@ -1304,7 +1290,6 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) if (NULL == next_entry_ptr) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!") - HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(next_entry_ptr->is_dirty); HDassert(next_entry_ptr->in_slist); HDassert(next_entry_ptr->ring >= ring); @@ -1324,7 +1309,6 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) * sanity checking just in case. */ HDassert(entry_ptr != NULL); - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(entry_ptr->in_slist); HDassert(entry_ptr->is_dirty); HDassert(entry_ptr->ring >= ring); @@ -1338,7 +1322,6 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) if (NULL == next_entry_ptr) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!") - HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(next_entry_ptr->is_dirty); HDassert(next_entry_ptr->in_slist); HDassert(next_entry_ptr->ring >= ring); @@ -1446,11 +1429,9 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) next_entry_ptr = cache_ptr->il_head; while (next_entry_ptr != NULL) { entry_ptr = next_entry_ptr; - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(entry_ptr->ring >= ring); next_entry_ptr = entry_ptr->il_next; - HDassert((next_entry_ptr == NULL) || (next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC)); if (((!entry_ptr->flush_me_last) || (entry_ptr->flush_me_last && (cache_ptr->num_last_entries >= cache_ptr->slist_len))) && @@ -1540,7 +1521,6 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) cur_ring_pel_len = 0; while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(entry_ptr->ring >= ring); if (entry_ptr->ring == ring) @@ -1640,7 +1620,6 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) FUNC_ENTER_PACKAGE HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_ptr->slist_enabled); HDassert(cache_ptr->slist_ptr); HDassert((flags & H5C__FLUSH_INVALIDATE_FLAG) == 0); @@ -1737,7 +1716,6 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) if (NULL == next_entry_ptr) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!") - HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(next_entry_ptr->is_dirty); HDassert(next_entry_ptr->in_slist); } /* end if */ @@ -1761,7 +1739,6 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) * with, we do a bit of extra sanity checking on * entry_ptr. */ - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(entry_ptr->in_slist); HDassert(entry_ptr->is_dirty); @@ -1777,7 +1754,6 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) if (NULL == next_entry_ptr) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!") - HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(next_entry_ptr->is_dirty); HDassert(next_entry_ptr->in_slist); @@ -1909,7 +1885,6 @@ H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted) /* Sanity checks */ HDassert(f); HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size)); /* check to see if cache_ptr->msic_in_progress is TRUE. If it, this @@ -1937,7 +1912,6 @@ H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted) while ((((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) || ((empty_space + cache_ptr->clean_index_size) < (cache_ptr->min_clean_size))) && (entries_examined <= (2 * initial_list_len)) && (entry_ptr != NULL)) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(!(entry_ptr->is_protected)); HDassert(!(entry_ptr->is_read_only)); HDassert((entry_ptr->ro_ref_count) == 0); @@ -2194,7 +2168,6 @@ H5C__serialize_cache(H5F_t *f) HDassert(f->shared); cache_ptr = f->shared->cache; HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_ptr->slist_ptr); #ifdef H5C_DO_SANITY_CHECKS @@ -2242,7 +2215,6 @@ H5C__serialize_cache(H5F_t *f) scan_ptr = cache_ptr->il_head; while (scan_ptr != NULL) { - HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); scan_ptr->serialization_count = 0; scan_ptr = scan_ptr->il_next; } /* end while */ @@ -2307,7 +2279,6 @@ H5C__serialize_cache(H5F_t *f) scan_ptr = cache_ptr->il_head; while (scan_ptr != NULL) { - HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(scan_ptr->serialization_count <= 1); scan_ptr = scan_ptr->il_next; @@ -2364,7 +2335,6 @@ H5C__serialize_ring(H5F_t *f, H5C_ring_t ring) HDassert(f->shared); cache_ptr = f->shared->cache; HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(ring > H5C_RING_UNDEFINED); HDassert(ring < H5C_RING_NTYPES); @@ -2457,8 +2427,6 @@ H5C__serialize_ring(H5F_t *f, H5C_ring_t ring) done = TRUE; /* set to FALSE if any activity in inner loop */ entry_ptr = cache_ptr->il_head; while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - /* Verify that either the entry is already serialized, or * that it is assigned to either the target or an inner * ring. @@ -2532,7 +2500,6 @@ H5C__serialize_ring(H5F_t *f, H5C_ring_t ring) */ entry_ptr = cache_ptr->il_head; while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(entry_ptr->ring > H5C_RING_UNDEFINED); HDassert(entry_ptr->ring < H5C_RING_NTYPES); HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date)); diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c index 47f9b1fae8a..1c12fba3385 100644 --- a/src/H5Cmpio.c +++ b/src/H5Cmpio.c @@ -184,7 +184,6 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha /* Sanity checks */ HDassert(cache_ptr != NULL); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(num_candidates > 0); HDassert((!cache_ptr->slist_enabled) || (num_candidates <= cache_ptr->slist_len)); HDassert(candidates_list_ptr != NULL); @@ -306,7 +305,6 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed entry is protected?!?!?") /* Sanity checks */ - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(entry_ptr->ring >= H5C_RING_USER); HDassert(entry_ptr->ring <= H5C_RING_SB); HDassert(!entry_ptr->flush_immediately); @@ -422,7 +420,6 @@ H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr) FUNC_ENTER_NOAPI(FAIL) HDassert(cache_ptr != NULL); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); /* As a sanity check, set space needed to the dirty_index_size. This * should be the sum total of the sizes of all the dirty entries @@ -533,7 +530,6 @@ H5C_construct_candidate_list__min_clean(H5C_t *cache_ptr) FUNC_ENTER_NOAPI(FAIL) HDassert(cache_ptr != NULL); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); /* compute the number of bytes (if any) that must be flushed to get the * cache back within its min clean constraints. @@ -652,7 +648,6 @@ H5C_mark_entries_as_clean(H5F_t *f, unsigned ce_array_len, haddr_t *ce_array_ptr HDassert(f->shared); cache_ptr = f->shared->cache; HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(ce_array_len > 0); HDassert(ce_array_ptr != NULL); @@ -1055,11 +1050,8 @@ H5C__flush_candidate_entries(H5F_t *f, unsigned entries_to_flush[H5C_RING_NTYPES HDassert(f); HDassert(f->shared); - cache_ptr = f->shared->cache; - HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_ptr->slist_ptr); HDassert(entries_to_flush[H5C_RING_UNDEFINED] == 0); @@ -1178,7 +1170,6 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, unsigned entries_to_flu HDassert(f->shared); cache_ptr = f->shared->cache; HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_ptr->slist_ptr); HDassert(ring > H5C_RING_UNDEFINED); HDassert(ring < H5C_RING_NTYPES); diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index d9203b8b151..cb488c9539a 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -42,11 +42,8 @@ /* Number of epoch markers active */ #define H5C__MAX_EPOCH_MARKERS 10 - /* Cache configuration settings */ #define H5C__HASH_TABLE_LEN (64 * 1024) /* must be a power of 2 */ -#define H5C__H5C_T_MAGIC 0x005CAC0E - /* Initial allocated size of the "flush_dep_parent" array */ #define H5C_FLUSH_DEP_PARENT_INIT 8 @@ -548,7 +545,7 @@ if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size))) { \ (entry_ptr)->ht_prev != NULL \ ) #define H5C__PRE_HT_SEARCH_SC_CMP(cache_ptr, entry_addr) \ -((cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \ +((cache_ptr) == NULL || \ (cache_ptr)->index_size != \ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) || \ !H5F_addr_defined(entry_addr) || \ @@ -556,8 +553,7 @@ if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size))) { \ H5C__HASH_FCN(entry_addr) >= H5C__HASH_TABLE_LEN \ ) #define H5C__POST_SUC_HT_SEARCH_SC_CMP(cache_ptr, entry_ptr, k) \ -((cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \ - (cache_ptr)->index_len < 1 || \ +((cache_ptr) == NULL || (cache_ptr)->index_len < 1 || \ (entry_ptr) == NULL || \ (cache_ptr)->index_size < (entry_ptr)->size || \ (cache_ptr)->index_size != \ @@ -573,7 +569,7 @@ if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size))) { \ #ifdef H5C_DO_SANITY_CHECKS #define H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \ -if ((cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \ +if ((cache_ptr) == NULL || \ (entry_ptr) == NULL || !H5F_addr_defined((entry_ptr)->addr) || \ (entry_ptr)->ht_next != NULL || (entry_ptr)->ht_prev != NULL || \ (entry_ptr)->size <= 0 || \ @@ -597,7 +593,7 @@ if ((cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \ } #define H5C__POST_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \ -if ((cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \ +if ((cache_ptr) == NULL || \ (cache_ptr)->index_size != \ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) || \ (cache_ptr)->index_size < (cache_ptr)->clean_index_size || \ @@ -615,8 +611,7 @@ if ((cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \ } #define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) \ -if ( (cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \ - (cache_ptr)->index_len < 1 || \ +if ( (cache_ptr) == NULL || (cache_ptr)->index_len < 1 || \ (entry_ptr) == NULL || \ (cache_ptr)->index_size < (entry_ptr)->size || \ !H5F_addr_defined((entry_ptr)->addr) || \ @@ -648,7 +643,7 @@ if ( (cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \ } #define H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) \ -if ((cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \ +if ((cache_ptr) == NULL || \ (entry_ptr) == NULL || !H5F_addr_defined((entry_ptr)->addr) || \ (entry_ptr)->size <= 0 || \ (entry_ptr)->ht_next != NULL || \ @@ -735,8 +730,7 @@ if ((cache_ptr) == NULL || \ } #define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr, fail_val) \ -if ((cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \ - (cache_ptr)->index_len <= 0 || \ +if ((cache_ptr) == NULL || (cache_ptr)->index_len <= 0 || \ (entry_ptr) == NULL || (entry_ptr)->is_dirty != FALSE || \ (cache_ptr)->index_size < (entry_ptr)->size || \ (cache_ptr)->dirty_index_size < (entry_ptr)->size || \ @@ -756,8 +750,7 @@ if ((cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \ } #define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr, fail_val) \ -if ((cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \ - (cache_ptr)->index_len <= 0 || \ +if ((cache_ptr) == NULL || (cache_ptr)->index_len <= 0 || \ (entry_ptr) == NULL || (entry_ptr)->is_dirty != TRUE || \ (cache_ptr)->index_size < (entry_ptr)->size || \ (cache_ptr)->clean_index_size < (entry_ptr)->size || \ @@ -1031,7 +1024,6 @@ if ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dir #define H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, fail_val) \ { \ HDassert(cache_ptr); \ - HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ \ if((cache_ptr)->slist_enabled) { \ HDassert(entry_ptr); \ @@ -1082,7 +1074,6 @@ if ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dir #define H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush, fail_val) \ { \ HDassert(cache_ptr); \ - HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ \ if((cache_ptr)->slist_enabled) { \ HDassert(entry_ptr); \ @@ -1135,7 +1126,6 @@ if ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dir #define H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \ { \ HDassert(cache_ptr); \ - HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ \ if((cache_ptr)->slist_enabled) { \ HDassert((old_size) > 0); \ @@ -1396,7 +1386,6 @@ if ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dir #define H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, fail_val) \ { \ HDassert(cache_ptr); \ - HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ HDassert(entry_ptr); \ HDassert(!(entry_ptr)->is_protected); \ HDassert(!(entry_ptr)->is_read_only); \ @@ -1429,7 +1418,6 @@ if ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dir #define H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, fail_val) \ { \ HDassert(cache_ptr); \ - HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ HDassert(entry_ptr); \ HDassert(!(entry_ptr)->is_protected); \ HDassert(!(entry_ptr)->is_read_only); \ @@ -1475,7 +1463,6 @@ if ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dir #define H5C__UPDATE_RP_FOR_INSERT_APPEND(cache_ptr, entry_ptr, fail_val) \ { \ HDassert(cache_ptr); \ - HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ HDassert(entry_ptr); \ HDassert(!(entry_ptr)->is_protected); \ HDassert(!(entry_ptr)->is_read_only); \ @@ -1515,7 +1502,6 @@ if ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dir #define H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, fail_val) \ { \ HDassert(cache_ptr); \ - HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ HDassert(entry_ptr); \ HDassert(!(entry_ptr)->is_protected); \ HDassert(!(entry_ptr)->is_read_only); \ @@ -1561,7 +1547,6 @@ if ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dir #define H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, fail_val) \ { \ HDassert(cache_ptr); \ - HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ HDassert(entry_ptr); \ HDassert(!(entry_ptr)->is_protected); \ HDassert(!(entry_ptr)->is_read_only); \ @@ -1609,7 +1594,6 @@ if ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dir #define H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, fail_val) \ { \ HDassert(cache_ptr); \ - HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ HDassert(entry_ptr); \ HDassert(!(entry_ptr)->is_read_only); \ HDassert((entry_ptr)->ro_ref_count == 0); \ @@ -1678,7 +1662,6 @@ if ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dir #define H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_size, fail_val) \ { \ HDassert(cache_ptr); \ - HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ HDassert(entry_ptr); \ HDassert(!(entry_ptr)->is_protected); \ HDassert(!(entry_ptr)->is_read_only); \ @@ -1726,7 +1709,6 @@ if ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dir #define H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, fail_val) \ { \ HDassert(cache_ptr); \ - HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ HDassert(entry_ptr); \ HDassert(!(entry_ptr)->is_protected); \ HDassert(!(entry_ptr)->is_read_only); \ @@ -1771,7 +1753,6 @@ if ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dir #define H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, fail_val) \ { \ HDassert(cache_ptr); \ - HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ HDassert(entry_ptr); \ HDassert((entry_ptr)->is_protected); \ HDassert((entry_ptr)->size > 0); \ @@ -1824,7 +1805,6 @@ if ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dir #define H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, fail_val) \ { \ HDassert(cache_ptr); \ - HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert(entry_ptr); \ \ /* Insert the entry at the head of the list. */ \ @@ -1849,7 +1829,6 @@ if ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dir #define H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, fail_val) \ { \ HDassert(cache_ptr); \ - HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ HDassert(entry_ptr); \ \ /* Remove the entry from the list. */ \ @@ -1874,7 +1853,6 @@ if ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dir #define H5C__MOVE_TO_TOP_IN_COLL_LIST(cache_ptr, entry_ptr, fail_val) \ { \ HDassert(cache_ptr); \ - HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \ HDassert(entry_ptr); \ \ /* Remove entry and insert at the head of the list. */ \ @@ -1948,9 +1926,6 @@ typedef struct H5C_tag_info_t { * advantages of flushing entries in increasing address order, a skip list * is used to track dirty entries. * - * magic: Unsigned 32 bit integer always set to H5C__H5C_T_MAGIC. - * This field is used to validate pointers to instances of H5C_t. - * * flush_in_progress: Boolean flag indicating whether a flush is in progress. * * log_info: Information used by the cache logging functionality. @@ -3017,9 +2992,7 @@ typedef struct H5C_tag_info_t { * NDEBUG is not #defined. * ****************************************************************************/ - struct H5C_t { - uint32_t magic; hbool_t flush_in_progress; H5C_log_info_t * log_info; void * aux_ptr; diff --git a/src/H5Cprefetched.c b/src/H5Cprefetched.c index 8ce94192aa5..dc7c33b373f 100644 --- a/src/H5Cprefetched.c +++ b/src/H5Cprefetched.c @@ -211,7 +211,6 @@ H5C__prefetched_entry_notify(H5C_notify_action_t action, void *_thing) /* Sanity checks */ HDassert(entry_ptr); - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(entry_ptr->prefetched); switch (action) { @@ -235,7 +234,6 @@ H5C__prefetched_entry_notify(H5C_notify_action_t action, void *_thing) HDassert(entry_ptr->flush_dep_parent); parent_ptr = entry_ptr->flush_dep_parent[u]; HDassert(parent_ptr); - HDassert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(parent_ptr->flush_dep_nchildren > 0); /* Destroy flush dependency with flush dependency parent */ @@ -289,7 +287,6 @@ H5C__prefetched_entry_free_icr(void *_thing) /* Sanity checks */ HDassert(entry_ptr); - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC); HDassert(entry_ptr->prefetched); /* Release array for flush dependency parent addresses */ diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h index ed52ed27451..914c57913cc 100644 --- a/src/H5Cprivate.h +++ b/src/H5Cprivate.h @@ -87,10 +87,6 @@ #define H5C__DEFAULT_MAX_CACHE_SIZE ((size_t)(4 * 1024 * 1024)) #define H5C__DEFAULT_MIN_CLEAN_SIZE ((size_t)(2 * 1024 * 1024)) -/* Values for cache entry magic field */ -#define H5C__H5C_CACHE_ENTRY_T_MAGIC 0x005CAC0A -#define H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC 0xDeadBeef - /* Cache configuration validation definitions */ #define H5C_RESIZE_CFG__VALIDATE_GENERAL 0x1 #define H5C_RESIZE_CFG__VALIDATE_INCREMENT 0x2 @@ -976,25 +972,6 @@ typedef int H5C_ring_t; * * The fields of this structure are discussed individually below: * - * magic: Unsigned 32 bit integer that must always be set to - * H5C__H5C_CACHE_ENTRY_T_MAGIC when the entry is valid. - * The field must be set to H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC - * just before the entry is freed. - * - * This is necessary, as the LRU list can be changed out - * from under H5C__make_space_in_cache() by the serialize - * callback which may change the size of an existing entry, - * and/or load a new entry while serializing the target entry. - * - * This in turn can cause a recursive call to - * H5C__make_space_in_cache() which may either flush or evict - * the next entry that the first invocation of that function - * was about to examine. - * - * The magic field allows H5C__make_space_in_cache() to - * detect this case, and re-start its scan from the bottom - * of the LRU when this situation occurs. - * * cache_ptr: Pointer to the cache that this entry is contained within. * * addr: Base address of the cache entry on disk. @@ -1570,7 +1547,6 @@ typedef int H5C_ring_t; * ****************************************************************************/ typedef struct H5C_cache_entry_t { - uint32_t magic; H5C_t *cache_ptr; haddr_t addr; size_t size; @@ -1668,11 +1644,6 @@ typedef struct H5C_cache_entry_t { * * The fields of this structure are discussed individually below: * - * magic: Unsigned 32 bit integer that must always be set to - * H5C_IMAGE_ENTRY_T_MAGIC when the entry is valid. - * The field must be set to H5C_IMAGE_ENTRY_T_BAD_MAGIC - * just before the entry is freed. - * * addr: Base address of the cache entry on disk. * * size: Length of the cache entry on disk in bytes. @@ -1796,11 +1767,8 @@ typedef struct H5C_cache_entry_t { * callbacks must be used to update this image before it is * written to disk * - * ****************************************************************************/ - typedef struct H5C_image_entry_t { - uint32_t magic; haddr_t addr; size_t size; H5C_ring_t ring; diff --git a/src/H5Cquery.c b/src/H5Cquery.c index 6325d1f6d26..d342b7219fd 100644 --- a/src/H5Cquery.c +++ b/src/H5Cquery.c @@ -82,7 +82,7 @@ H5C_get_cache_auto_resize_config(const H5C_t *cache_ptr, H5C_auto_size_ctl_t *co FUNC_ENTER_NOAPI(FAIL) - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) + if (cache_ptr == NULL) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.") if (config_ptr == NULL) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad config_ptr on entry.") @@ -120,7 +120,7 @@ H5C_get_cache_size(const H5C_t *cache_ptr, size_t *max_size_ptr, size_t *min_cle FUNC_ENTER_NOAPI(FAIL) - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) + if (cache_ptr == NULL) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.") if (max_size_ptr != NULL) @@ -156,7 +156,7 @@ H5C_get_cache_flush_in_progress(const H5C_t *cache_ptr, hbool_t *flush_in_progre FUNC_ENTER_NOAPI(FAIL) - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) + if (cache_ptr == NULL) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.") if (flush_in_progress_ptr != NULL) @@ -189,7 +189,7 @@ H5C_get_cache_hit_rate(const H5C_t *cache_ptr, double *hit_rate_ptr) FUNC_ENTER_NOAPI(FAIL) - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) + if (cache_ptr == NULL) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.") if (hit_rate_ptr == NULL) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad hit_rate_ptr on entry.") @@ -242,18 +242,12 @@ H5C_get_entry_status(const H5F_t *f, haddr_t addr, size_t *size_ptr, hbool_t *in /* Sanity checks */ HDassert(f); HDassert(f->shared); - cache_ptr = f->shared->cache; - HDassert(cache_ptr != NULL); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(H5F_addr_defined(addr)); HDassert(in_cache_ptr != NULL); - /* this test duplicates two of the above asserts, but we need an - * invocation of HGOTO_ERROR to keep the compiler happy. - */ - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) + if (cache_ptr == NULL) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.") H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL) @@ -308,7 +302,7 @@ H5C_get_evictions_enabled(const H5C_t *cache_ptr, hbool_t *evictions_enabled_ptr FUNC_ENTER_NOAPI(FAIL) - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) + if (cache_ptr == NULL) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.") if (evictions_enabled_ptr == NULL) @@ -342,7 +336,6 @@ H5C_get_aux_ptr(const H5C_t *cache_ptr) /* Check arguments */ HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); FUNC_LEAVE_NOAPI(cache_ptr->aux_ptr) } /* H5C_get_aux_ptr() */ @@ -376,7 +369,6 @@ H5C_get_entry_ring(const H5F_t *f, haddr_t addr, H5C_ring_t *ring) HDassert(f->shared); cache_ptr = f->shared->cache; HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(H5F_addr_defined(addr)); /* Locate the entry at the address */ @@ -409,7 +401,7 @@ H5C_get_mdc_image_info(const H5C_t *cache_ptr, haddr_t *image_addr, hsize_t *ima FUNC_ENTER_NOAPI(FAIL) - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) + if (cache_ptr == NULL) HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad cache_ptr on entry") if (image_addr) diff --git a/src/H5Ctag.c b/src/H5Ctag.c index d821e280db7..a040a501d92 100644 --- a/src/H5Ctag.c +++ b/src/H5Ctag.c @@ -126,7 +126,6 @@ H5C_ignore_tags(H5C_t *cache) /* Assertions */ HDassert(cache != NULL); - HDassert(cache->magic == H5C__H5C_T_MAGIC); /* Set variable to ignore tag values upon assignment */ cache->ignore_tags = TRUE; @@ -153,7 +152,6 @@ H5C_get_ignore_tags(const H5C_t *cache) /* Sanity checks */ HDassert(cache); - HDassert(cache->magic == H5C__H5C_T_MAGIC); /* Return ignore tag value */ FUNC_LEAVE_NOAPI(cache->ignore_tags) @@ -177,7 +175,6 @@ H5C_get_num_objs_corked(const H5C_t *cache) /* Sanity checks */ HDassert(cache); - HDassert(cache->magic == H5C__H5C_T_MAGIC); /* Return value for num_objs_corked */ FUNC_LEAVE_NOAPI(cache->num_objs_corked) @@ -210,7 +207,6 @@ H5C__tag_entry(H5C_t *cache, H5C_cache_entry_t *entry) /* Assertions */ HDassert(cache != NULL); HDassert(entry != NULL); - HDassert(cache->magic == H5C__H5C_T_MAGIC); /* Get the tag */ tag = H5CX_get_tag(); @@ -293,7 +289,6 @@ H5C__untag_entry(H5C_t *cache, H5C_cache_entry_t *entry) /* Assertions */ HDassert(cache != NULL); HDassert(entry != NULL); - HDassert(cache->magic == H5C__H5C_T_MAGIC); /* Get the entry's tag info struct */ if (NULL != (tag_info = entry->tag_info)) { @@ -350,7 +345,6 @@ H5C__iter_tagged_entries_real(H5C_t *cache, haddr_t tag, H5C_tag_iter_cb_t cb, v /* Sanity checks */ HDassert(cache != NULL); - HDassert(cache->magic == H5C__H5C_T_MAGIC); /* Search the list of tagged object addresses in the cache */ HASH_FIND(hh, cache->tag_list, &tag, sizeof(haddr_t), tag_info); @@ -405,7 +399,6 @@ H5C__iter_tagged_entries(H5C_t *cache, haddr_t tag, hbool_t match_global, H5C_ta /* Sanity checks */ HDassert(cache != NULL); - HDassert(cache->magic == H5C__H5C_T_MAGIC); /* Iterate over the entries for this tag */ if (H5C__iter_tagged_entries_real(cache, tag, cb, cb_ctx) < 0) @@ -503,7 +496,6 @@ H5C_evict_tagged_entries(H5F_t *f, haddr_t tag, hbool_t match_global) HDassert(f->shared); cache = f->shared->cache; /* Get cache pointer */ HDassert(cache != NULL); - HDassert(cache->magic == H5C__H5C_T_MAGIC); /* Construct context for iterator callbacks */ ctx.f = f; @@ -606,7 +598,6 @@ H5C__mark_tagged_entries(H5C_t *cache, haddr_t tag) /* Sanity check */ HDassert(cache); - HDassert(cache->magic == H5C__H5C_T_MAGIC); /* Iterate through hash table entries, marking those with specified tag, as * well as any major global entries which should always be flushed @@ -875,7 +866,6 @@ H5C_expunge_tag_type_metadata(H5F_t *f, haddr_t tag, int type_id, unsigned flags HDassert(f->shared); cache = f->shared->cache; /* Get cache pointer */ HDassert(cache != NULL); - HDassert(cache->magic == H5C__H5C_T_MAGIC); /* Construct context for iterator callbacks */ ctx.f = f; diff --git a/src/H5EAcache.c b/src/H5EAcache.c index 968a9332e50..eccd3986162 100644 --- a/src/H5EAcache.c +++ b/src/H5EAcache.c @@ -1900,7 +1900,6 @@ H5EA__cache_dblock_fsf_size(const void *_thing, hsize_t *fsf_size) /* Check arguments */ HDassert(dblock); - HDassert(dblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(dblock->cache_info.type == H5AC_EARRAY_DBLOCK); HDassert(fsf_size); diff --git a/src/H5FAcache.c b/src/H5FAcache.c index fdd56ff91f7..aa088e52448 100644 --- a/src/H5FAcache.c +++ b/src/H5FAcache.c @@ -972,7 +972,6 @@ H5FA__cache_dblock_fsf_size(const void *_thing, hsize_t *fsf_size) /* Check arguments */ HDassert(dblock); - HDassert(dblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(dblock->cache_info.type == H5AC_FARRAY_DBLOCK); HDassert(fsf_size); diff --git a/src/H5FScache.c b/src/H5FScache.c index 313439ef37b..6967555421b 100644 --- a/src/H5FScache.c +++ b/src/H5FScache.c @@ -343,7 +343,6 @@ H5FS__cache_hdr_image_len(const void *_thing, size_t *image_len) /* Check arguments */ HDassert(fspace); - HDassert(fspace->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(fspace->cache_info.type == H5AC_FSPACE_HDR); HDassert(image_len); @@ -394,7 +393,6 @@ H5FS__cache_hdr_pre_serialize(H5F_t *f, void *_thing, haddr_t addr, size_t H5_AT /* Sanity check */ HDassert(f); HDassert(fspace); - HDassert(fspace->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(fspace->cache_info.type == H5AC_FSPACE_HDR); HDassert(H5F_addr_defined(addr)); HDassert(new_addr); @@ -693,7 +691,6 @@ H5FS__cache_hdr_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_NDEBUG_UN HDassert(f); HDassert(image); HDassert(fspace); - HDassert(fspace->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(fspace->cache_info.type == H5AC_FSPACE_HDR); HDassert(fspace->hdr_size == len); @@ -833,10 +830,6 @@ H5FS__cache_hdr_notify(H5AC_notify_action_t action, void *_thing) * * Purpose: Destroys a free space header in memory. * - * Note: The metadata cache sets the object's cache_info.magic to - * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr - * callback (checked in assert). - * * Return: Success: SUCCEED * Failure: FAIL * @@ -855,7 +848,6 @@ H5FS__cache_hdr_free_icr(void *_thing) /* Sanity checks */ HDassert(fspace); - HDassert(fspace->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC); HDassert(fspace->cache_info.type == H5AC_FSPACE_HDR); /* We should not still be holding on to the free space section info */ @@ -1124,10 +1116,8 @@ H5FS__cache_sinfo_image_len(const void *_thing, size_t *image_len) /* Sanity checks */ HDassert(sinfo); - HDassert(sinfo->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(sinfo->cache_info.type == H5AC_FSPACE_SINFO); HDassert(sinfo->fspace); - HDassert(sinfo->fspace->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(sinfo->fspace->cache_info.type == H5AC_FSPACE_HDR); HDassert(image_len); @@ -1167,10 +1157,8 @@ H5FS__cache_sinfo_pre_serialize(H5F_t *f, void *_thing, haddr_t addr, size_t H5_ /* Sanity checks */ HDassert(f); HDassert(sinfo); - HDassert(sinfo->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(sinfo->cache_info.type == H5AC_FSPACE_SINFO); fspace = sinfo->fspace; - HDassert(fspace->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(fspace->cache_info.type == H5AC_FSPACE_HDR); HDassert(fspace->cache_info.is_pinned); HDassert(H5F_addr_defined(addr)); @@ -1253,9 +1241,7 @@ H5FS__cache_sinfo_serialize(const H5F_t *f, void *_image, size_t len, void *_thi HDassert(f); HDassert(image); HDassert(sinfo); - HDassert(sinfo->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(sinfo->cache_info.type == H5AC_FSPACE_SINFO); - HDassert(sinfo->fspace->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(sinfo->fspace->cache_info.type == H5AC_FSPACE_HDR); HDassert(sinfo->fspace->cache_info.is_pinned); HDassert(sinfo->fspace->sect_size == len); @@ -1373,10 +1359,6 @@ H5FS__cache_sinfo_notify(H5AC_notify_action_t action, void *_thing) * Purpose: Free the memory used for the in core representation of the * free space manager section info. * - * Note: The metadata cache sets the object's cache_info.magic to - * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr - * callback (checked in assert). - * * Return: Success: SUCCEED * Failure: FAIL * @@ -1395,9 +1377,7 @@ H5FS__cache_sinfo_free_icr(void *_thing) /* Sanity checks */ HDassert(sinfo); - HDassert(sinfo->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC); HDassert(sinfo->cache_info.type == H5AC_FSPACE_SINFO); - HDassert(sinfo->fspace->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(sinfo->fspace->cache_info.type == H5AC_FSPACE_HDR); HDassert(sinfo->fspace->cache_info.is_pinned); diff --git a/src/H5Fsuper.c b/src/H5Fsuper.c index d590119574c..04e343494a8 100644 --- a/src/H5Fsuper.c +++ b/src/H5Fsuper.c @@ -248,7 +248,6 @@ H5F__update_super_ext_driver_msg(H5F_t *f) HDassert(f->shared); sblock = f->shared->sblock; HDassert(sblock); - HDassert(sblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(sblock->cache_info.type == H5AC_SUPERBLOCK); /* Update the driver information message in the superblock extension diff --git a/src/H5Fsuper_cache.c b/src/H5Fsuper_cache.c index 7dbaf22fae1..700e5d6744f 100644 --- a/src/H5Fsuper_cache.c +++ b/src/H5Fsuper_cache.c @@ -633,7 +633,6 @@ H5F__cache_superblock_image_len(const void *_thing, size_t *image_len) FUNC_ENTER_PACKAGE_NOERR HDassert(sblock); - HDassert(sblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(sblock->cache_info.type == H5AC_SUPERBLOCK); HDassert(image_len); @@ -780,10 +779,6 @@ H5F__cache_superblock_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNU * Purpose: Destroy/release an "in core representation" of a data * structure * - * Note: The metadata cache sets the object's cache_info.magic to - * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr - * callback (checked in assert). - * * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ @@ -796,7 +791,6 @@ H5F__cache_superblock_free_icr(void *_thing) FUNC_ENTER_PACKAGE HDassert(sblock); - HDassert(sblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC); HDassert(sblock->cache_info.type == H5AC_SUPERBLOCK); /* Destroy superblock */ @@ -933,7 +927,6 @@ H5F__cache_drvrinfo_image_len(const void *_thing, size_t *image_len) FUNC_ENTER_PACKAGE_NOERR HDassert(drvinfo); - HDassert(drvinfo->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(drvinfo->cache_info.type == H5AC_DRVRINFO); HDassert(image_len); @@ -965,7 +958,6 @@ H5F__cache_drvrinfo_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_NDEBU HDassert(f); HDassert(image); HDassert(drvinfo); - HDassert(drvinfo->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(drvinfo->cache_info.type == H5AC_DRVRINFO); HDassert(len == (size_t)(H5F_DRVINFOBLOCK_HDR_SIZE + drvinfo->len)); @@ -1001,10 +993,6 @@ H5F__cache_drvrinfo_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_NDEBU * Purpose: Destroy/release an "in core representation" of a data * structure * - * Note: The metadata cache sets the object's cache_info.magic to - * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr - * callback (checked in assert). - * * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ @@ -1016,7 +1004,6 @@ H5F__cache_drvrinfo_free_icr(void *_thing) FUNC_ENTER_PACKAGE_NOERR HDassert(drvinfo); - HDassert(drvinfo->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC); HDassert(drvinfo->cache_info.type == H5AC_DRVRINFO); /* Destroy driver info message */ diff --git a/src/H5Gcache.c b/src/H5Gcache.c index e088fd81b70..60a0f2837a3 100644 --- a/src/H5Gcache.c +++ b/src/H5Gcache.c @@ -216,7 +216,6 @@ H5G__cache_node_image_len(const void *_thing, size_t *image_len) FUNC_ENTER_PACKAGE_NOERR HDassert(sym); - HDassert(sym->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(sym->cache_info.type == H5AC_SNODE); HDassert(image_len); @@ -248,7 +247,6 @@ H5G__cache_node_serialize(const H5F_t *f, void *_image, size_t len, void *_thing HDassert(f); HDassert(image); HDassert(sym); - HDassert(sym->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(sym->cache_info.type == H5AC_SNODE); HDassert(len == sym->node_size); @@ -281,10 +279,6 @@ H5G__cache_node_serialize(const H5F_t *f, void *_image, size_t len, void *_thing * * Purpose: Destroy a symbol table node in memory * - * Note: The metadata cache sets the object's cache_info.magic to - * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr - * callback (checked in assert). - * * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ @@ -297,7 +291,6 @@ H5G__cache_node_free_icr(void *_thing) FUNC_ENTER_PACKAGE HDassert(sym); - HDassert(sym->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC); HDassert(sym->cache_info.type == H5AC_SNODE); /* Destroy symbol table node */ diff --git a/src/H5HFcache.c b/src/H5HFcache.c index ab01c1e7aa6..be93f3b0900 100644 --- a/src/H5HFcache.c +++ b/src/H5HFcache.c @@ -606,7 +606,6 @@ H5HF__cache_hdr_image_len(const void *_thing, size_t *image_len) /* Sanity checks */ HDassert(hdr); - HDassert(hdr->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(hdr->cache_info.type == H5AC_FHEAP_HDR); HDassert(image_len); @@ -648,7 +647,6 @@ H5HF__cache_hdr_pre_serialize(H5F_t *f, void *_thing, haddr_t addr, size_t len, /* Sanity checks */ HDassert(f); HDassert(hdr); - HDassert(hdr->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(hdr->cache_info.type == H5AC_FHEAP_HDR); HDassert(H5F_addr_defined(addr)); HDassert(addr == hdr->heap_addr); @@ -736,7 +734,6 @@ H5HF__cache_hdr_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_NDEBUG_UN HDassert(f); HDassert(image); HDassert(hdr); - HDassert(hdr->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(hdr->cache_info.type == H5AC_FHEAP_HDR); HDassert(len == hdr->heap_size); @@ -828,10 +825,6 @@ H5HF__cache_hdr_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_NDEBUG_UN * This routine also does not free the file space that may * be allocated to the header. * - * Note: The metadata cache sets the object's cache_info.magic to - * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr - * callback (checked in assert). - * * Return: Success: SUCCEED * Failure: FAIL * @@ -850,7 +843,6 @@ H5HF__cache_hdr_free_icr(void *_thing) /* Sanity checks */ HDassert(hdr); - HDassert(hdr->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC); HDassert(hdr->cache_info.type == H5AC_FHEAP_HDR); HDassert(hdr->rc == 0); @@ -1147,7 +1139,6 @@ H5HF__cache_iblock_image_len(const void *_thing, size_t *image_len) /* Sanity checks */ HDassert(iblock); - HDassert(iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(iblock->cache_info.type == H5AC_FHEAP_IBLOCK); HDassert(image_len); @@ -1189,7 +1180,6 @@ H5HF__cache_iblock_pre_serialize(H5F_t *f, void *_thing, haddr_t addr, size_t H5 /* Sanity checks */ HDassert(f); HDassert(iblock); - HDassert(iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(iblock->cache_info.type == H5AC_FHEAP_IBLOCK); HDassert(iblock->cache_info.size == iblock->size); HDassert(H5F_addr_defined(addr)); @@ -1199,7 +1189,6 @@ H5HF__cache_iblock_pre_serialize(H5F_t *f, void *_thing, haddr_t addr, size_t H5 HDassert(flags); hdr = iblock->hdr; HDassert(hdr); - HDassert(hdr->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(hdr->cache_info.type == H5AC_FHEAP_HDR); #ifndef NDEBUG @@ -1318,7 +1307,6 @@ H5HF__cache_iblock_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_NDEBUG HDassert(f); HDassert(image); HDassert(iblock); - HDassert(iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(iblock->cache_info.type == H5AC_FHEAP_IBLOCK); HDassert(iblock->cache_info.size == iblock->size); HDassert(len == iblock->size); @@ -1431,7 +1419,6 @@ H5HF__cache_iblock_notify(H5AC_notify_action_t action, void *_thing) /* Sanity checks */ HDassert(iblock); - HDassert(iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(iblock->cache_info.type == H5AC_FHEAP_IBLOCK); HDassert(iblock->hdr); @@ -1508,10 +1495,6 @@ H5HF__cache_iblock_notify(H5AC_notify_action_t action, void *_thing) * Purpose: Unlink the supplied instance of H5HF_indirect_t from the * fractal heap and free its memory. * - * Note: The metadata cache sets the object's cache_info.magic to - * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr - * callback (checked in assert). - * * Return: Success: SUCCEED * Failure: FAIL * @@ -1530,7 +1513,6 @@ H5HF__cache_iblock_free_icr(void *thing) /* Sanity checks */ HDassert(iblock); - HDassert(iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC); HDassert(iblock->cache_info.type == H5AC_FHEAP_IBLOCK); HDassert(iblock->rc == 0); HDassert(iblock->hdr); @@ -1757,7 +1739,6 @@ H5HF__cache_dblock_deserialize(const void *_image, size_t len, void *_udata, hbo HDassert(par_info); hdr = par_info->hdr; HDassert(hdr); - HDassert(hdr->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(hdr->cache_info.type == H5AC_FHEAP_HDR); HDassert(dirty); @@ -1928,7 +1909,6 @@ H5HF__cache_dblock_image_len(const void *_thing, size_t *image_len) /* Sanity checks */ HDassert(dblock); - HDassert(dblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(dblock->cache_info.type == H5AC_FHEAP_DBLOCK); HDassert(image_len); @@ -2071,7 +2051,6 @@ H5HF__cache_dblock_pre_serialize(H5F_t *f, void *_thing, haddr_t addr, size_t le /* Sanity checks */ HDassert(f); HDassert(dblock); - HDassert(dblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(dblock->cache_info.type == H5AC_FHEAP_DBLOCK); HDassert(dblock->write_buf == NULL); HDassert(dblock->write_size == 0); @@ -2089,7 +2068,6 @@ H5HF__cache_dblock_pre_serialize(H5F_t *f, void *_thing, haddr_t addr, size_t le hdr->f = (H5F_t *)f; HDassert(hdr); - HDassert(hdr->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(hdr->cache_info.type == H5AC_FHEAP_HDR); if (dblock->parent) { @@ -2102,7 +2080,6 @@ H5HF__cache_dblock_pre_serialize(H5F_t *f, void *_thing, haddr_t addr, size_t le par_iblock = dblock->parent; par_entry = dblock->par_entry; - HDassert(par_iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(par_iblock->cache_info.type == H5AC_FHEAP_IBLOCK); HDassert(H5F_addr_eq(par_iblock->ents[par_entry].addr, addr)); } /* end if */ @@ -2437,7 +2414,6 @@ H5HF__cache_dblock_serialize(const H5F_t H5_ATTR_NDEBUG_UNUSED *f, void *image, HDassert(image); HDassert(len > 0); HDassert(dblock); - HDassert(dblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(dblock->cache_info.type == H5AC_FHEAP_DBLOCK); HDassert((dblock->blk != dblock->write_buf) || (dblock->cache_info.size == dblock->size)); HDassert(dblock->write_buf); @@ -2485,7 +2461,6 @@ H5HF__cache_dblock_notify(H5AC_notify_action_t action, void *_thing) /* Sanity checks */ HDassert(dblock); - HDassert(dblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(dblock->cache_info.type == H5AC_FHEAP_DBLOCK); HDassert(dblock->hdr); @@ -2532,10 +2507,6 @@ H5HF__cache_dblock_notify(H5AC_notify_action_t action, void *_thing) * Purpose: Free the in core memory allocated to the supplied direct * block. * - * Note: The metadata cache sets the object's cache_info.magic to - * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr - * callback (checked in assert). - * * Return: Success: SUCCEED * Failure: FAIL * @@ -2554,7 +2525,6 @@ H5HF__cache_dblock_free_icr(void *_thing) /* Sanity checks */ HDassert(dblock); - HDassert(dblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC); HDassert(dblock->cache_info.type == H5AC_FHEAP_DBLOCK); /* Destroy fractal heap direct block */ @@ -2589,7 +2559,6 @@ H5HF__cache_dblock_fsf_size(const void *_thing, hsize_t *fsf_size) /* Sanity checks */ HDassert(dblock); - HDassert(dblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(dblock->cache_info.type == H5AC_FHEAP_DBLOCK); HDassert(dblock->file_size > 0); HDassert(fsf_size); @@ -2677,7 +2646,6 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr, hbool_t *fd_ /* Sanity checks */ HDassert(f); HDassert(hdr); - HDassert(hdr->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(hdr->cache_info.type == H5AC_FHEAP_HDR); HDassert(fd_clean); HDassert(clean); @@ -2884,7 +2852,6 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr, hbool_t *fd_ * in memory for the duration of the call. Do some sanity checks, * and then call H5HF__cache_verify_iblock_descendants_clean(). */ - HDassert(root_iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(root_iblock->cache_info.type == H5AC_FHEAP_IBLOCK); if (H5HF__cache_verify_iblock_descendants_clean(f, hdr->heap_addr, root_iblock, @@ -3042,7 +3009,6 @@ H5HF__cache_verify_iblock_descendants_clean(H5F_t *f, haddr_t fd_parent_addr, H5 HDassert(f); HDassert(H5F_addr_defined(fd_parent_addr)); HDassert(iblock); - HDassert(iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(iblock->cache_info.type == H5AC_FHEAP_IBLOCK); HDassert(iblock_status); HDassert(fd_clean); @@ -3161,7 +3127,6 @@ H5HF__cache_verify_iblocks_dblocks_clean(H5F_t *f, haddr_t fd_parent_addr, H5HF_ HDassert(f); HDassert(H5F_addr_defined(fd_parent_addr)); HDassert(iblock); - HDassert(iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(iblock->cache_info.type == H5AC_FHEAP_IBLOCK); HDassert(fd_clean); HDassert(*fd_clean); @@ -3326,7 +3291,6 @@ H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, haddr_t fd_parent_addr, H5 HDassert(f); HDassert(H5F_addr_defined(fd_parent_addr)); HDassert(iblock); - HDassert(iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(iblock->cache_info.type == H5AC_FHEAP_IBLOCK); HDassert(fd_clean); HDassert(*fd_clean); @@ -3479,7 +3443,6 @@ H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, haddr_t fd_parent_addr, H5 * that we have the correct one. */ HDassert(child_iblock); - HDassert(child_iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(child_iblock->cache_info.type == H5AC_FHEAP_IBLOCK); HDassert(child_iblock->addr == child_iblock_addr); diff --git a/src/H5HGcache.c b/src/H5HGcache.c index bbfae7cdee9..4422fb96854 100644 --- a/src/H5HGcache.c +++ b/src/H5HGcache.c @@ -411,7 +411,6 @@ H5HG__cache_heap_image_len(const void *_thing, size_t *image_len) FUNC_ENTER_PACKAGE_NOERR HDassert(heap); - HDassert(heap->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(heap->cache_info.type == H5AC_GHEAP); HDassert(heap->size >= H5HG_MINSIZE); HDassert(image_len); @@ -442,7 +441,6 @@ H5HG__cache_heap_serialize(const H5F_t H5_ATTR_NDEBUG_UNUSED *f, void *image, si HDassert(f); HDassert(image); HDassert(heap); - HDassert(heap->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(heap->cache_info.type == H5AC_GHEAP); HDassert(heap->size == len); HDassert(heap->chunk); @@ -458,10 +456,6 @@ H5HG__cache_heap_serialize(const H5F_t H5_ATTR_NDEBUG_UNUSED *f, void *image, si * * Purpose: Free the in memory representation of the supplied global heap. * - * Note: The metadata cache sets the object's cache_info.magic to - * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr - * callback (checked in assert). - * * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ @@ -474,7 +468,6 @@ H5HG__cache_heap_free_icr(void *_thing) FUNC_ENTER_PACKAGE HDassert(heap); - HDassert(heap->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC); HDassert(heap->cache_info.type == H5AC_GHEAP); /* Destroy global heap collection */ diff --git a/src/H5HLcache.c b/src/H5HLcache.c index c04efb6b3ad..56082f0b757 100644 --- a/src/H5HLcache.c +++ b/src/H5HLcache.c @@ -505,7 +505,6 @@ H5HL__cache_prefix_image_len(const void *_thing, size_t *image_len) /* Check arguments */ HDassert(prfx); - HDassert(prfx->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(prfx->cache_info.type == H5AC_LHEAP_PRFX); HDassert(image_len); @@ -551,7 +550,6 @@ H5HL__cache_prefix_serialize(const H5_ATTR_NDEBUG_UNUSED H5F_t *f, void *_image, HDassert(f); HDassert(image); HDassert(prfx); - HDassert(prfx->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(prfx->cache_info.type == H5AC_LHEAP_PRFX); HDassert(H5F_addr_eq(prfx->cache_info.addr, prfx->heap->prfx_addr)); HDassert(prfx->heap); @@ -626,10 +624,6 @@ H5HL__cache_prefix_serialize(const H5_ATTR_NDEBUG_UNUSED H5F_t *f, void *_image, * from a failed speculative load attempt. See comments below for * details. * - * Note: The metadata cache sets the object's cache_info.magic to - * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr - * callback (checked in assert). - * * Return: Success: SUCCEED * Failure: FAIL * @@ -648,7 +642,6 @@ H5HL__cache_prefix_free_icr(void *_thing) /* Check arguments */ HDassert(prfx); - HDassert(prfx->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC); HDassert(prfx->cache_info.type == H5AC_LHEAP_PRFX); HDassert(H5F_addr_eq(prfx->cache_info.addr, prfx->heap->prfx_addr)); @@ -778,7 +771,6 @@ H5HL__cache_datablock_image_len(const void *_thing, size_t *image_len) /* Check arguments */ HDassert(dblk); - HDassert(dblk->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(dblk->cache_info.type == H5AC_LHEAP_DBLK); HDassert(dblk->heap); HDassert(dblk->heap->dblk_size > 0); @@ -816,7 +808,6 @@ H5HL__cache_datablock_serialize(const H5F_t H5_ATTR_NDEBUG_UNUSED *f, void *imag HDassert(f); HDassert(image); HDassert(dblk); - HDassert(dblk->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(dblk->cache_info.type == H5AC_LHEAP_DBLK); HDassert(dblk->heap); heap = dblk->heap; @@ -909,10 +900,6 @@ H5HL__cache_datablock_notify(H5C_notify_action_t action, void *_thing) * * Purpose: Free the in memory representation of the supplied local heap data block. * - * Note: The metadata cache sets the object's cache_info.magic to - * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr - * callback (checked in assert). - * * Return: Success: SUCCEED * Failure: FAIL * @@ -931,7 +918,6 @@ H5HL__cache_datablock_free_icr(void *_thing) /* Check arguments */ HDassert(dblk); - HDassert(dblk->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC); HDassert(dblk->cache_info.type == H5AC_LHEAP_DBLK); /* Destroy the data block */ diff --git a/src/H5Oalloc.c b/src/H5Oalloc.c index 84846788bbf..5a95afe8061 100644 --- a/src/H5Oalloc.c +++ b/src/H5Oalloc.c @@ -1758,8 +1758,6 @@ H5O__move_msgs_forward(H5F_t *f, H5O_t *oh) if (0 != null_msg->chunkno) { /* Sanity checks */ HDassert(null_chk_mdc_obj); - HDassert(((H5C_cache_entry_t *)null_chk_mdc_obj)->magic == - H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(((H5C_cache_entry_t *)null_chk_mdc_obj)->type); HDassert(((H5C_cache_entry_t *)null_chk_mdc_obj)->type->id == H5AC_OHDR_CHK_ID); diff --git a/src/H5Ocache.c b/src/H5Ocache.c index 66b092a01ec..c6898e54c82 100644 --- a/src/H5Ocache.c +++ b/src/H5Ocache.c @@ -344,7 +344,6 @@ H5O__cache_image_len(const void *_thing, size_t *image_len) FUNC_ENTER_PACKAGE_NOERR HDassert(oh); - HDassert(oh->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(oh->cache_info.type == H5AC_OHDR); HDassert(image_len); @@ -375,7 +374,6 @@ H5O__cache_serialize(const H5F_t *f, void *image, size_t len, void *_thing) HDassert(f); HDassert(image); HDassert(oh); - HDassert(oh->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(oh->cache_info.type == H5AC_OHDR); HDassert(oh->chunk[0].size == len); #ifdef H5O_DEBUG @@ -568,10 +566,6 @@ H5O__cache_notify(H5AC_notify_action_t action, void *_thing) * * Purpose: Free the in core representation of the supplied object header. * - * Note: The metadata cache sets the object's cache_info.magic to - * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr - * callback (checked in assert). - * * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ @@ -584,7 +578,6 @@ H5O__cache_free_icr(void *_thing) FUNC_ENTER_PACKAGE HDassert(oh); - HDassert(oh->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC); HDassert(oh->cache_info.type == H5AC_OHDR); /* Destroy object header */ @@ -749,7 +742,6 @@ H5O__cache_chk_image_len(const void *_thing, size_t *image_len) FUNC_ENTER_PACKAGE_NOERR HDassert(chk_proxy); - HDassert(chk_proxy->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(chk_proxy->cache_info.type == H5AC_OHDR_CHK); HDassert(chk_proxy->oh); HDassert(image_len); @@ -781,7 +773,6 @@ H5O__cache_chk_serialize(const H5F_t *f, void *image, size_t len, void *_thing) HDassert(f); HDassert(image); HDassert(chk_proxy); - HDassert(chk_proxy->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(chk_proxy->cache_info.type == H5AC_OHDR_CHK); HDassert(chk_proxy->oh); HDassert(chk_proxy->oh->chunk[chk_proxy->chunkno].size == len); @@ -825,8 +816,6 @@ H5O__cache_chk_notify(H5AC_notify_action_t action, void *_thing) /* Add flush dependency on chunk with continuation, if one exists */ if (chk_proxy->fd_parent) { /* Sanity checks */ - HDassert(((H5C_cache_entry_t *)(chk_proxy->fd_parent))->magic == - H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(((H5C_cache_entry_t *)(chk_proxy->fd_parent))->type); HDassert((((H5C_cache_entry_t *)(chk_proxy->fd_parent))->type->id == H5AC_OHDR_ID) || (((H5C_cache_entry_t *)(chk_proxy->fd_parent))->type->id == H5AC_OHDR_CHK_ID)); @@ -883,8 +872,6 @@ H5O__cache_chk_notify(H5AC_notify_action_t action, void *_thing) /* Remove flush dependency on parent object header chunk, if one is set */ if (chk_proxy->fd_parent) { /* Sanity checks */ - HDassert(((H5C_cache_entry_t *)(chk_proxy->fd_parent))->magic == - H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(((H5C_cache_entry_t *)(chk_proxy->fd_parent))->type); HDassert((((H5C_cache_entry_t *)(chk_proxy->fd_parent))->type->id == H5AC_OHDR_ID) || (((H5C_cache_entry_t *)(chk_proxy->fd_parent))->type->id == H5AC_OHDR_CHK_ID)); @@ -919,10 +906,6 @@ H5O__cache_chk_notify(H5AC_notify_action_t action, void *_thing) * Purpose: Free the in core memory associated with the supplied object * header continuation chunk. * - * Note: The metadata cache sets the object's cache_info.magic to - * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr - * callback (checked in assert). - * * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ @@ -935,7 +918,6 @@ H5O__cache_chk_free_icr(void *_thing) FUNC_ENTER_PACKAGE HDassert(chk_proxy); - HDassert(chk_proxy->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC); HDassert(chk_proxy->cache_info.type == H5AC_OHDR_CHK); /* Destroy object header chunk proxy */ diff --git a/src/H5SMcache.c b/src/H5SMcache.c index 64d34c87a43..30676f1cdfb 100644 --- a/src/H5SMcache.c +++ b/src/H5SMcache.c @@ -324,7 +324,6 @@ H5SM__cache_table_image_len(const void *_thing, size_t *image_len) /* Check arguments */ HDassert(table); - HDassert(table->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(table->cache_info.type == H5AC_SOHM_TABLE); HDassert(image_len); @@ -361,7 +360,6 @@ H5SM__cache_table_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_NDEBUG_ HDassert(f); HDassert(image); HDassert(table); - HDassert(table->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(table->cache_info.type == H5AC_SOHM_TABLE); HDassert(table->table_size == len); @@ -423,10 +421,6 @@ H5SM__cache_table_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_NDEBUG_ * * Purpose: Free memory used by the SOHM table. * - * Note: The metadata cache sets the object's cache_info.magic to - * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr - * callback (checked in assert). - * * Return: Success: SUCCEED * Failure: FAIL * @@ -445,7 +439,6 @@ H5SM__cache_table_free_icr(void *_thing) /* Check arguments */ HDassert(table); - HDassert(table->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC); HDassert(table->cache_info.type == H5AC_SOHM_TABLE); /* Destroy Shared Object Header Message table */ @@ -639,7 +632,6 @@ H5SM__cache_list_image_len(const void *_thing, size_t *image_len) /* Check arguments */ HDassert(list); - HDassert(list->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(list->cache_info.type == H5AC_SOHM_LIST); HDassert(list->header); HDassert(image_len); @@ -680,7 +672,6 @@ H5SM__cache_list_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_NDEBUG_U HDassert(f); HDassert(image); HDassert(list); - HDassert(list->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(list->cache_info.type == H5AC_SOHM_LIST); HDassert(list->header); HDassert(list->header->list_size == len); @@ -727,10 +718,6 @@ H5SM__cache_list_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_NDEBUG_U * * Purpose: Free all memory used by the list. * - * Note: The metadata cache sets the object's cache_info.magic to - * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr - * callback (checked in assert). - * * Return: Success: SUCCEED * Failure: FAIL * @@ -749,7 +736,6 @@ H5SM__cache_list_free_icr(void *_thing) /* Check arguments */ HDassert(list); - HDassert(list->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC); HDassert(list->cache_info.type == H5AC_SOHM_LIST); /* Destroy Shared Object Header Message list */ diff --git a/test/cache.c b/test/cache.c index 517c0b55e92..203c09a8db9 100644 --- a/test/cache.c +++ b/test/cache.c @@ -24793,17 +24793,6 @@ check_auto_cache_resize_input_errs(unsigned paged) } } - if (pass) { - - result = H5C_get_cache_auto_resize_config((const H5C_t *)&test_auto_size_ctl, &test_auto_size_ctl); - - if (result != FAIL) { - - pass = FALSE; - failure_mssg = "H5C_get_cache_auto_resize_config accepted bad cache_ptr.\n"; - } - } - if (pass) { result = H5C_get_cache_auto_resize_config(cache_ptr, NULL); @@ -33918,13 +33907,6 @@ setup_cache(size_t max_cache_size, size_t min_clean_size, unsigned paged) if (verbose) HDfprintf(stdout, "%s: H5C_create() failed.\n", __func__); } - else if (cache_ptr->magic != H5C__H5C_T_MAGIC) { - pass = FALSE; - failure_mssg = "Bad cache_ptr magic."; - - if (verbose) - HDfprintf(stdout, "%s: Bad cache_ptr magic.\n", __func__); - } } if (show_progress) /* 7 */ diff --git a/test/cache_api.c b/test/cache_api.c index a34a6ca723b..f40782cd6ee 100644 --- a/test/cache_api.c +++ b/test/cache_api.c @@ -241,8 +241,7 @@ check_fapl_mdc_api_calls(unsigned paged, hid_t fcpl_id) /* verify that we can access the internal version of the cache config */ if (pass) { - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC) || - (cache_ptr->resize_ctl.version != H5C__CURR_AUTO_SIZE_CTL_VER)) { + if (cache_ptr == NULL || cache_ptr->resize_ctl.version != H5C__CURR_AUTO_SIZE_CTL_VER) { pass = FALSE; failure_mssg = "Can't access cache resize_ctl.\n"; @@ -385,8 +384,7 @@ check_fapl_mdc_api_calls(unsigned paged, hid_t fcpl_id) /* verify that we can access the internal version of the cache config */ if (pass) { - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC) || - (cache_ptr->resize_ctl.version != H5C__CURR_AUTO_SIZE_CTL_VER)) { + if (cache_ptr == NULL || cache_ptr->resize_ctl.version != H5C__CURR_AUTO_SIZE_CTL_VER) { pass = FALSE; failure_mssg = "Can't access cache resize_ctl.\n"; diff --git a/test/cache_common.c b/test/cache_common.c index e647abf0896..d755e1ede81 100644 --- a/test/cache_common.c +++ b/test/cache_common.c @@ -1335,9 +1335,7 @@ free_icr(test_entry_t *entry, int32_t H5_ATTR_NDEBUG_UNUSED entry_type) HDassert(entry == &(entries[entry->type][entry->index])); HDassert(entry == entry->self); HDassert(entry->cache_ptr != NULL); - HDassert(entry->cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert((entry->header.destroy_in_progress) || (entry->header.addr == entry->addr)); - HDassert(entry->header.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC); HDassert(entry->header.size == entry->size); HDassert((entry->type == VARIABLE_ENTRY_TYPE) || (entry->size == entry_sizes[entry->type])); HDassert(entry->header.tl_next == NULL); @@ -1675,7 +1673,6 @@ execute_flush_op(H5F_t *file_ptr, struct test_entry_t *entry_ptr, struct flush_o HDassert(file_ptr); cache_ptr = file_ptr->shared->cache; HDassert(cache_ptr != NULL); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(entry_ptr != NULL); HDassert(entry_ptr == entry_ptr->self); HDassert(entry_ptr->header.addr == entry_ptr->addr); @@ -4973,16 +4970,6 @@ check_and_validate_cache_hit_rate(hid_t file_id, double *hit_rate_ptr, hbool_t d } } - /* verify that we can access the cache data structure */ - if (pass) { - - if (cache_ptr->magic != H5C__H5C_T_MAGIC) { - - pass = FALSE; - failure_mssg = "Can't access cache resize_ctl."; - } - } - /* compare the cache's internal configuration with the expected value */ if (pass) { @@ -5102,16 +5089,6 @@ check_and_validate_cache_size(hid_t file_id, size_t *max_size_ptr, size_t *min_c } } - /* verify that we can access the cache data structure */ - if (pass) { - - if (cache_ptr->magic != H5C__H5C_T_MAGIC) { - - pass = FALSE; - failure_mssg = "Can't access cache data structure."; - } - } - /* compare the cache's internal configuration with the expected value */ if (pass) { @@ -5268,8 +5245,7 @@ validate_mdc_config(hid_t file_id, H5AC_cache_config_t *ext_config_ptr, hbool_t /* verify that we can access the internal version of the cache config */ if (pass) { - if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC) || - (cache_ptr->resize_ctl.version != H5C__CURR_AUTO_SIZE_CTL_VER)) { + if (cache_ptr == NULL || cache_ptr->resize_ctl.version != H5C__CURR_AUTO_SIZE_CTL_VER) { pass = FALSE; HDsnprintf(tmp_msg_buf, sizeof(tmp_msg_buf), "Can't access cache resize_ctl #%d.", test_num); @@ -5352,7 +5328,6 @@ dump_LRU(H5F_t * file_ptr) H5C_t *cache_ptr = file_ptr->shared->cache; HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); entry_ptr = cache_ptr->LRU_head_ptr; diff --git a/test/cache_image.c b/test/cache_image.c index c3c0b497965..797745485bc 100644 --- a/test/cache_image.c +++ b/test/cache_image.c @@ -7659,7 +7659,6 @@ evict_on_close_test(hbool_t H5_ATTR_PARALLEL_UNUSED single_file_vfd) if (verbose) { HDassert(cache_ptr); - HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDfprintf(stdout, "index size / index dirty size = %lld / %lld\n", (long long)(cache_ptr->index_size), (long long)(cache_ptr->dirty_index_size)); diff --git a/testpar/t_cache.c b/testpar/t_cache.c index 4d3aff53f03..2507c7e7590 100644 --- a/testpar/t_cache.c +++ b/testpar/t_cache.c @@ -2293,13 +2293,11 @@ datum_serialize(const H5F_t *f, void H5_ATTR_NDEBUG_UNUSED *image_ptr, size_t le HDassert(f); HDassert(f->shared); HDassert(f->shared->cache); - HDassert(f->shared->cache->magic == H5C__H5C_T_MAGIC); HDassert(f->shared->cache->aux_ptr); aux_ptr = (H5AC_aux_t *)(f->shared->cache->aux_ptr); HDassert(aux_ptr); - HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); entry_ptr->aux_ptr = aux_ptr; @@ -2526,7 +2524,6 @@ datum_notify(H5C_notify_action_t action, void *thing) } HDassert(entry_ptr->aux_ptr); - HDassert(entry_ptr->aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); aux_ptr = entry_ptr->aux_ptr; entry_ptr->aux_ptr = NULL; @@ -2898,8 +2895,7 @@ insert_entry(H5C_t *cache_ptr, H5F_t *file_ptr, int32_t idx, unsigned int flags) aux_ptr = ((H5AC_aux_t *)(cache_ptr->aux_ptr)); - if (!((aux_ptr != NULL) && (aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC) && - (aux_ptr->dirty_bytes == 0))) { + if (!(aux_ptr != NULL && aux_ptr->dirty_bytes == 0)) { nerrors++; if (verbose) { @@ -3525,8 +3521,7 @@ move_entry(H5F_t *file_ptr, int32_t old_idx, int32_t new_idx) aux_ptr = ((H5AC_aux_t *)(file_ptr->shared->cache->aux_ptr)); - if (!((aux_ptr != NULL) && (aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC) && - (aux_ptr->dirty_bytes == 0))) { + if (!(aux_ptr != NULL && aux_ptr->dirty_bytes == 0)) { nerrors++; if (verbose) { @@ -3750,12 +3745,6 @@ setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr HDfprintf(stdout, "%d:%s: Can't get cache_ptr.\n", world_mpi_rank, __func__); } } - else if (cache_ptr->magic != H5C__H5C_T_MAGIC) { - nerrors++; - if (verbose) { - HDfprintf(stdout, "%d:%s: Bad cache_ptr magic.\n", world_mpi_rank, __func__); - } - } else { cache_ptr->ignore_tags = TRUE; *fid_ptr = fid; @@ -3805,14 +3794,6 @@ setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr HDfprintf(stdout, "%d:%s: cache_ptr->aux_ptr == NULL.\n", world_mpi_rank, __func__); } } - else if (((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic != H5AC__H5AC_AUX_T_MAGIC) { - - nerrors++; - if (verbose) { - HDfprintf(stdout, "%d:%s: cache_ptr->aux_ptr->magic != H5AC__H5AC_AUX_T_MAGIC.\n", - world_mpi_rank, __func__); - } - } else if (((H5AC_aux_t *)(cache_ptr->aux_ptr))->metadata_write_strategy != metadata_write_strategy) { nerrors++; diff --git a/testpar/t_file.c b/testpar/t_file.c index 90ae22d098d..716379bc985 100644 --- a/testpar/t_file.c +++ b/testpar/t_file.c @@ -467,7 +467,6 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str VRFY((f != NULL), ""); cache_ptr = f->shared->cache; - VRFY((cache_ptr->magic == H5C__H5C_T_MAGIC), ""); cache_ptr->ignore_tags = TRUE; H5C_stats__reset(cache_ptr); @@ -634,7 +633,6 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t VRFY((f != NULL), ""); cache_ptr = f->shared->cache; - VRFY((cache_ptr->magic == H5C__H5C_T_MAGIC), ""); MPI_Barrier(MPI_COMM_WORLD); @@ -713,7 +711,6 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t entry_ptr = cache_ptr->index[i]; while (entry_ptr != NULL) { - HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert(entry_ptr->is_dirty == FALSE); if (!entry_ptr->is_pinned && !entry_ptr->is_protected) { From c421f0f6aa1592864e75b29894dafbf51512995c Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Wed, 17 May 2023 10:30:45 -0500 Subject: [PATCH 224/231] Change UBSAN for undefined instaed of address (#2964) --- src/H5detect.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/H5detect.c b/src/H5detect.c index daf7708ac61..0f3be9c719c 100644 --- a/src/H5detect.c +++ b/src/H5detect.c @@ -48,8 +48,8 @@ static const char *FileHeader = "\n\ #include "H5Rpublic.h" #if defined(__has_attribute) -#if __has_attribute(no_sanitize_address) -#define HDF_NO_UBSAN __attribute__((no_sanitize_address)) +#if __has_attribute(no_sanitize) +#define HDF_NO_UBSAN __attribute__((no_sanitize("undefined"))) #else #define HDF_NO_UBSAN #endif From 664b37870f80709ca6914d26d5f5875941fb3585 Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Wed, 17 May 2023 10:34:10 -0500 Subject: [PATCH 225/231] added missing H5ES function DLLs (#2969) --- fortran/src/H5ESff.F90 | 12 ++++++++++++ fortran/src/hdf5_fortrandll.def.in | 9 +++++++++ 2 files changed, 21 insertions(+) diff --git a/fortran/src/H5ESff.F90 b/fortran/src/H5ESff.F90 index 5b19a514955..520257c2048 100644 --- a/fortran/src/H5ESff.F90 +++ b/fortran/src/H5ESff.F90 @@ -18,6 +18,18 @@ ! help@hdfgroup.org. * ! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * ! +! NOTES +! _____ __ __ _____ ____ _____ _______ _ _ _______ +! |_ _| \/ | __ \ / __ \| __ \__ __|/\ | \ | |__ __| +! **** | | | \ / | |__) | | | | |__) | | | / \ | \| | | | **** +! **** | | | |\/| | ___/| | | | _ / | | / /\ \ | . ` | | | **** +! **** _| |_| | | | | | |__| | | \ \ | |/ ____ \| |\ | | | **** +! |_____|_| |_|_| \____/|_| \_\ |_/_/ \_\_| \_| |_| +! +! If you add a new H5ES function to the module you must add the function name +! to the Windows dll file 'hdf5_fortrandll.def.in' in the fortran/src directory. +! This is needed for Windows based operating systems. +! MODULE H5ES diff --git a/fortran/src/hdf5_fortrandll.def.in b/fortran/src/hdf5_fortrandll.def.in index 47196336a36..80de79b0dd2 100644 --- a/fortran/src/hdf5_fortrandll.def.in +++ b/fortran/src/hdf5_fortrandll.def.in @@ -107,6 +107,15 @@ H5E_mp_H5EPRINT_F H5E_mp_H5EGET_MAJOR_F H5E_mp_H5EGET_MINOR_F H5E_mp_H5ESET_AUTO_F +; H5ES +H5ES_mp_H5ESCREATE_F +H5ES_mp_H5ESGET_COUNT_F +H5ES_mp_H5ESGET_OP_COUNTER_F +H5ES_mp_H5ESWAIT_F +H5ES_mp_H5ESCANCEL_F +H5ES_mp_H5ESGET_ERR_STATUS_F +H5ES_mp_H5ESGET_ERR_COUNT_F +H5ES_mp_H5ESCLOSE_F ; H5F H5F_mp_H5FCREATE_F H5F_mp_H5FCREATE_ASYNC_F From 752ce095a3e6cdb02f20cd05689724a0b01bc410 Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Wed, 17 May 2023 10:38:31 -0500 Subject: [PATCH 226/231] Updated INTENT to IN for C_PTR types dummy args. (#2961) * Changed to INTENT(IN) for C_PTR dummy args since it refers to whether C_PTR can be changed and does not relate to the INTENT state of the target. --- fortran/src/H5Aff.F90 | 76 +++++++++++++++++++++---------------------- fortran/src/H5Dff.F90 | 56 +++++++++++++++---------------- fortran/src/H5Fff.F90 | 20 ++++++------ fortran/src/H5Gff.F90 | 24 +++++++------- fortran/src/H5Lff.F90 | 26 +++++++-------- fortran/src/H5Off.F90 | 30 ++++++++--------- fortran/src/H5Pff.F90 | 20 ++++++------ fortran/src/H5Tff.F90 | 14 ++++---- 8 files changed, 133 insertions(+), 133 deletions(-) diff --git a/fortran/src/H5Aff.F90 b/fortran/src/H5Aff.F90 index fab86bd34a3..8159731fe55 100644 --- a/fortran/src/H5Aff.F90 +++ b/fortran/src/H5Aff.F90 @@ -229,8 +229,8 @@ SUBROUTINE h5acreate_async_f(loc_id, name, type_id, space_id, attr_id, es_id, & INTEGER, INTENT(OUT) :: hdferr INTEGER(HID_T) , INTENT(IN), OPTIONAL :: acpl_id INTEGER(HID_T) , INTENT(IN), OPTIONAL :: aapl_id - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line INTEGER(HID_T) :: acpl_id_default @@ -622,8 +622,8 @@ SUBROUTINE h5aclose_async_f(attr_id, es_id, hdferr, file, func, line) INTEGER(HID_T), INTENT(IN) :: attr_id INTEGER(HID_T), INTENT(IN) :: es_id INTEGER, INTENT(OUT) :: hdferr - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line TYPE(C_PTR) :: file_default = C_NULL_PTR @@ -797,8 +797,8 @@ SUBROUTINE h5arename_by_name_async_f(loc_id, obj_name, old_attr_name, new_attr_n INTEGER(HID_T), INTENT(IN) :: es_id INTEGER, INTENT(OUT) :: hdferr INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line INTEGER(HID_T) :: lapl_id_default @@ -908,8 +908,8 @@ SUBROUTINE h5aopen_async_f(obj_id, attr_name, attr_id, es_id, hdferr, aapl_id, f INTEGER(HID_T), INTENT(IN) :: es_id INTEGER, INTENT(OUT) :: hdferr INTEGER(HID_T), INTENT(IN), OPTIONAL :: aapl_id - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN) , OPTIONAL :: line INTEGER(HID_T) :: aapl_id_default @@ -1178,8 +1178,8 @@ SUBROUTINE h5aopen_by_idx_async_f(loc_id, obj_name, idx_type, order, n, attr_id, INTEGER, INTENT(OUT) :: hdferr INTEGER(HID_T), INTENT(IN), OPTIONAL :: aapl_id INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN) , OPTIONAL :: line INTEGER(HID_T) :: aapl_id_default @@ -1534,8 +1534,8 @@ SUBROUTINE h5acreate_by_name_async_f(loc_id, obj_name, attr_name, type_id, space INTEGER(HID_T), INTENT(IN), OPTIONAL :: acpl_id INTEGER(HID_T), INTENT(IN), OPTIONAL :: aapl_id INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line INTEGER(HID_T) :: acpl_id_default @@ -1657,11 +1657,11 @@ SUBROUTINE h5aexists_async_f(obj_id, attr_name, attr_exists, es_id, hdferr, file IMPLICIT NONE INTEGER(HID_T) , INTENT(IN) :: obj_id CHARACTER(LEN=*), INTENT(IN) :: attr_name - TYPE(C_PTR) , INTENT(INOUT) :: attr_exists + TYPE(C_PTR) , INTENT(IN) :: attr_exists INTEGER(HID_T) , INTENT(IN) :: es_id INTEGER , INTENT(OUT) :: hdferr - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line CHARACTER(LEN=LEN_TRIM(attr_name)+1,KIND=C_CHAR) :: c_attr_name @@ -1774,12 +1774,12 @@ SUBROUTINE h5aexists_by_name_async_f(loc_id, obj_name, attr_name, attr_exists, e INTEGER (HID_T), INTENT(IN) :: loc_id CHARACTER(LEN=*), INTENT(IN) :: obj_name CHARACTER(LEN=*), INTENT(IN) :: attr_name - TYPE(C_PTR) , INTENT(INOUT) :: attr_exists + TYPE(C_PTR) , INTENT(IN) :: attr_exists INTEGER (HID_T), INTENT(IN) :: es_id INTEGER , INTENT(OUT) :: hdferr INTEGER (HID_T), INTENT(IN) , OPTIONAL :: lapl_id - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN) , OPTIONAL :: line INTEGER(HID_T) :: lapl_id_default @@ -1912,8 +1912,8 @@ SUBROUTINE h5aopen_by_name_async_f(loc_id, obj_name, attr_name, attr_id, es_id, INTEGER, INTENT(OUT) :: hdferr INTEGER(HID_T), INTENT(IN), OPTIONAL :: aapl_id INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line INTEGER(HID_T) :: aapl_id_default @@ -2026,8 +2026,8 @@ SUBROUTINE h5arename_async_f(loc_id, old_attr_name, new_attr_name, es_id, hdferr CHARACTER(LEN=*), INTENT(IN) :: new_attr_name INTEGER(HID_T), INTENT(IN) :: es_id INTEGER, INTENT(OUT) :: hdferr - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line TYPE(C_PTR) :: file_default = C_NULL_PTR @@ -2084,13 +2084,13 @@ END SUBROUTINE h5arename_async_f SUBROUTINE h5aread_async_f(attr_id, mem_type_id, buf, es_id, hdferr, file, func, line) IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: attr_id - INTEGER(HID_T), INTENT(IN) :: mem_type_id - TYPE(C_PTR) , INTENT(INOUT) :: buf - INTEGER(HID_T), INTENT(IN) :: es_id - INTEGER , INTENT(OUT) :: hdferr - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + INTEGER(HID_T), INTENT(IN) :: attr_id + INTEGER(HID_T), INTENT(IN) :: mem_type_id + TYPE(C_PTR) , INTENT(IN) :: buf + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER , INTENT(OUT) :: hdferr + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line TYPE(C_PTR) :: file_default = C_NULL_PTR @@ -2140,11 +2140,11 @@ END SUBROUTINE h5aread_async_f SUBROUTINE h5awrite_async_f(attr_id, mem_type_id, buf, es_id, hdferr, file, func, line) IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: attr_id - INTEGER(HID_T), INTENT(IN) :: mem_type_id - TYPE(C_PTR) , INTENT(IN) :: buf - INTEGER(HID_T), INTENT(IN) :: es_id - INTEGER , INTENT(OUT) :: hdferr + INTEGER(HID_T), INTENT(IN) :: attr_id + INTEGER(HID_T), INTENT(IN) :: mem_type_id + TYPE(C_PTR) , INTENT(IN) :: buf + INTEGER(HID_T), INTENT(IN) :: es_id + INTEGER , INTENT(OUT) :: hdferr TYPE(C_PTR), OPTIONAL :: file TYPE(C_PTR), OPTIONAL :: func INTEGER , INTENT(IN), OPTIONAL :: line @@ -2239,10 +2239,10 @@ END SUBROUTINE h5awrite_f !! See C API: @ref H5Aread() !! SUBROUTINE h5aread_f(attr_id, memtype_id, buf, hdferr) - INTEGER(HID_T), INTENT(IN) :: attr_id - INTEGER(HID_T), INTENT(IN) :: memtype_id - TYPE(C_PTR) , INTENT(INOUT) :: buf - INTEGER , INTENT(OUT) :: hdferr + INTEGER(HID_T), INTENT(IN) :: attr_id + INTEGER(HID_T), INTENT(IN) :: memtype_id + TYPE(C_PTR) , INTENT(IN) :: buf + INTEGER , INTENT(OUT) :: hdferr END SUBROUTINE h5aread_f !> diff --git a/fortran/src/H5Dff.F90 b/fortran/src/H5Dff.F90 index 06034ac4d19..21f13fd1c6a 100644 --- a/fortran/src/H5Dff.F90 +++ b/fortran/src/H5Dff.F90 @@ -311,8 +311,8 @@ SUBROUTINE h5dcreate_async_f(loc_id, name, type_id, space_id, dset_id, es_id, & INTEGER(HID_T), INTENT(IN), OPTIONAL :: dcpl_id INTEGER(HID_T), INTENT(IN), OPTIONAL :: lcpl_id INTEGER(HID_T), INTENT(IN), OPTIONAL :: dapl_id - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line INTEGER(HID_T) :: lcpl_id_default @@ -439,8 +439,8 @@ SUBROUTINE h5dopen_async_f(loc_id, name, dset_id, es_id, hdferr, dapl_id, file, INTEGER(HID_T), INTENT(IN) :: es_id INTEGER, INTENT(OUT) :: hdferr INTEGER(HID_T), INTENT(IN), OPTIONAL :: dapl_id - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line INTEGER(HID_T) :: dapl_id_default @@ -528,8 +528,8 @@ SUBROUTINE h5dclose_async_f(dset_id, es_id, hdferr, file, func, line) INTEGER(HID_T), INTENT(IN) :: dset_id INTEGER(HID_T), INTENT(IN) :: es_id INTEGER, INTENT(OUT) :: hdferr - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line TYPE(C_PTR) :: file_default = C_NULL_PTR @@ -673,8 +673,8 @@ SUBROUTINE h5dset_extent_async_f(dataset_id, fsize, es_id, hdferr, file, func, l INTEGER(HSIZE_T), DIMENSION(*), INTENT(IN) :: fsize INTEGER(HID_T), INTENT(IN) :: es_id INTEGER, INTENT(OUT) :: hdferr - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line TYPE(C_PTR) :: file_default = C_NULL_PTR @@ -1377,8 +1377,8 @@ SUBROUTINE h5dget_space_async_f(dataset_id, dataspace_id, es_id, hdferr, file, f INTEGER(HID_T), INTENT(OUT) :: dataspace_id INTEGER(HID_T), INTENT(IN) :: es_id INTEGER, INTENT(OUT) :: hdferr - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line TYPE(C_PTR) :: file_default = C_NULL_PTR @@ -1455,11 +1455,11 @@ END SUBROUTINE h5dget_access_plist_f !! SUBROUTINE h5dvlen_reclaim_f(type_id, space_id, plist_id, buf, hdferr) IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: type_id - INTEGER(HID_T), INTENT(IN) :: space_id - INTEGER(HID_T), INTENT(IN) :: plist_id - TYPE(C_PTR) , INTENT(INOUT) :: buf - INTEGER , INTENT(OUT) :: hdferr + INTEGER(HID_T), INTENT(IN) :: type_id + INTEGER(HID_T), INTENT(IN) :: space_id + INTEGER(HID_T), INTENT(IN) :: plist_id + TYPE(C_PTR) , INTENT(IN) :: buf + INTEGER , INTENT(OUT) :: hdferr INTERFACE INTEGER FUNCTION h5dvlen_reclaim_c(type_id, space_id, plist_id, buf) BIND(C, NAME='h5dvlen_reclaim_c') @@ -1502,14 +1502,14 @@ SUBROUTINE h5dread_async_f(dset_id, mem_type_id, buf, es_id, hdferr, & IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: dset_id INTEGER(HID_T), INTENT(IN) :: mem_type_id - TYPE(C_PTR), INTENT(INOUT) :: buf + TYPE(C_PTR), INTENT(IN) :: buf INTEGER(HID_T), INTENT(IN) :: es_id INTEGER, INTENT(OUT) :: hdferr INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line TYPE(C_PTR) :: file_default = C_NULL_PTR @@ -1575,8 +1575,8 @@ SUBROUTINE h5dwrite_async_f(dset_id, mem_type_id, buf, es_id, hdferr, & INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line TYPE(C_PTR) :: file_default = C_NULL_PTR @@ -1662,7 +1662,7 @@ SUBROUTINE h5dread_f(dset_id, mem_type_id, buf, hdferr, mem_space_id, file_space IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: dset_id INTEGER(HID_T), INTENT(IN) :: mem_type_id - TYPE(C_PTR), INTENT(INOUT) :: buf + TYPE(C_PTR), INTENT(IN) :: buf INTEGER, INTENT(OUT) :: hdferr INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id @@ -1773,9 +1773,9 @@ END SUBROUTINE h5dfill_f SUBROUTINE h5dfill_f(fill_value, fill_type_id, buf, buf_type_id, space_id, hdferr) USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR IMPLICIT NONE - TYPE(C_PTR) :: fill_value + TYPE(C_PTR), INTENT(IN) :: fill_value INTEGER(HID_T), INTENT(IN) :: fill_type_id - TYPE(C_PTR) :: buf + TYPE(C_PTR), INTENT(IN) :: buf INTEGER(HID_T), INTENT(IN) :: buf_type_id INTEGER(HID_T), INTENT(IN) :: space_id END SUBROUTINE h5dfill_f @@ -2101,7 +2101,7 @@ SUBROUTINE h5dread_ptr(dset_id, mem_type_id, buf, hdferr, & IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: dset_id INTEGER(HID_T), INTENT(IN) :: mem_type_id - TYPE(C_PTR), INTENT(INOUT) :: buf + TYPE(C_PTR), INTENT(IN) :: buf INTEGER, INTENT(OUT) :: hdferr INTEGER(HID_T), INTENT(IN), OPTIONAL :: mem_space_id INTEGER(HID_T), INTENT(IN), OPTIONAL :: file_space_id @@ -2126,9 +2126,9 @@ END SUBROUTINE h5dread_ptr SUBROUTINE h5dfill_ptr(fill_value, fill_type_id, buf, buf_type_id, space_id, hdferr) IMPLICIT NONE - TYPE(C_PTR) :: fill_value + TYPE(C_PTR) , INTENT(IN) :: fill_value INTEGER(HID_T), INTENT(IN) :: fill_type_id - TYPE(C_PTR) :: buf + TYPE(C_PTR) , INTENT(IN) :: buf INTEGER(HID_T), INTENT(IN) :: buf_type_id INTEGER(HID_T), INTENT(IN) :: space_id INTEGER, INTENT(OUT) :: hdferr @@ -2288,7 +2288,7 @@ SUBROUTINE h5dread_multi_f(count, dset_id, mem_type_id, mem_space_id, file_space INTEGER(HID_T), INTENT(IN), DIMENSION(*) :: mem_type_id INTEGER(HID_T), INTENT(IN), DIMENSION(*) :: mem_space_id INTEGER(HID_T), INTENT(IN), DIMENSION(*) :: file_space_id - TYPE(C_PTR), DIMENSION(*) :: buf + TYPE(C_PTR), INTENT(IN), DIMENSION(*) :: buf INTEGER, INTENT(OUT) :: hdferr INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp @@ -2339,7 +2339,7 @@ SUBROUTINE h5dwrite_multi_f(count, dset_id, mem_type_id, mem_space_id, file_spac INTEGER(HID_T), INTENT(IN), DIMENSION(*) :: mem_type_id INTEGER(HID_T), INTENT(IN), DIMENSION(*) :: mem_space_id INTEGER(HID_T), INTENT(IN), DIMENSION(*) :: file_space_id - TYPE(C_PTR), DIMENSION(*) :: buf + TYPE(C_PTR), INTENT(IN), DIMENSION(*) :: buf INTEGER, INTENT(OUT) :: hdferr INTEGER(HID_T), INTENT(IN), OPTIONAL :: xfer_prp diff --git a/fortran/src/H5Fff.F90 b/fortran/src/H5Fff.F90 index f1a0d2acb11..d7db4b89def 100644 --- a/fortran/src/H5Fff.F90 +++ b/fortran/src/H5Fff.F90 @@ -147,8 +147,8 @@ SUBROUTINE h5fcreate_async_f(name, access_flags, file_id, es_id, hdferr, & INTEGER, INTENT(OUT) :: hdferr INTEGER(HID_T), INTENT(IN), OPTIONAL :: creation_prp INTEGER(HID_T), INTENT(IN), OPTIONAL :: access_prp - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line INTEGER(HID_T) :: creation_prp_default @@ -248,8 +248,8 @@ SUBROUTINE h5fflush_async_f(object_id, scope, es_id, hdferr, file, func, line) INTEGER, INTENT(IN) :: scope INTEGER(HID_T), INTENT(IN) :: es_id INTEGER, INTENT(OUT) :: hdferr - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line TYPE(C_PTR) :: file_default = C_NULL_PTR @@ -436,8 +436,8 @@ SUBROUTINE h5fopen_async_f(name, access_flags, file_id, es_id, hdferr, & INTEGER(HID_T), INTENT(IN) :: es_id INTEGER, INTENT(OUT) :: hdferr INTEGER(HID_T), INTENT(IN), OPTIONAL :: access_prp - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line INTEGER(HID_T) :: access_prp_default @@ -529,8 +529,8 @@ SUBROUTINE h5freopen_async_f(file_id, ret_file_id, es_id, hdferr, file, func, li INTEGER(HID_T), INTENT(OUT) :: ret_file_id INTEGER(HID_T), INTENT(IN) :: es_id INTEGER, INTENT(OUT) :: hdferr - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line TYPE(C_PTR) :: file_default = C_NULL_PTR @@ -737,8 +737,8 @@ SUBROUTINE h5fclose_async_f(file_id, es_id, hdferr, file, func, line) INTEGER(HID_T), INTENT(IN) :: file_id INTEGER(HID_T), INTENT(IN) :: es_id INTEGER, INTENT(OUT) :: hdferr - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line TYPE(C_PTR) :: file_default = C_NULL_PTR diff --git a/fortran/src/H5Gff.F90 b/fortran/src/H5Gff.F90 index 655c226a464..f961299e11d 100644 --- a/fortran/src/H5Gff.F90 +++ b/fortran/src/H5Gff.F90 @@ -287,8 +287,8 @@ SUBROUTINE h5gcreate_async_f(loc_id, name, grp_id, es_id, hdferr, & INTEGER(HID_T) , INTENT(IN), OPTIONAL :: lcpl_id INTEGER(HID_T) , INTENT(IN), OPTIONAL :: gcpl_id INTEGER(HID_T) , INTENT(IN), OPTIONAL :: gapl_id - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line INTEGER(HID_T) :: lcpl_id_default @@ -438,8 +438,8 @@ SUBROUTINE h5gopen_async_f(loc_id, name, grp_id, es_id, hdferr, & INTEGER(HID_T), INTENT(IN) :: es_id INTEGER, INTENT(OUT) :: hdferr INTEGER(HID_T), INTENT(IN), OPTIONAL :: gapl_id - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line INTEGER(HID_T) :: gapl_id_default @@ -522,8 +522,8 @@ SUBROUTINE h5gclose_async_f(grp_id, es_id, hdferr, file, func, line) INTEGER(HID_T), INTENT(IN) :: grp_id INTEGER(HID_T), INTENT(IN) :: es_id INTEGER, INTENT(OUT) :: hdferr - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line TYPE(C_PTR) :: file_default = C_NULL_PTR @@ -1092,8 +1092,8 @@ SUBROUTINE h5gget_info_async_f(loc_id, ginfo, es_id, hdferr, file, func, line) TYPE(H5G_info_t), INTENT(OUT), TARGET :: ginfo INTEGER(HID_T) , INTENT(IN) :: es_id INTEGER , INTENT(OUT) :: hdferr - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line TYPE(C_PTR) :: ptr @@ -1256,8 +1256,8 @@ SUBROUTINE h5gget_info_by_idx_async_f(loc_id, group_name, idx_type, order, n, gi INTEGER(HID_T), INTENT(IN) :: es_id INTEGER, INTENT(OUT) :: hdferr INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line INTEGER(HID_T) :: lapl_id_default @@ -1430,8 +1430,8 @@ SUBROUTINE h5gget_info_by_name_async_f(loc_id, name, ginfo, es_id, hdferr, & INTEGER(HID_T), INTENT(IN) :: es_id INTEGER, INTENT(OUT) :: hdferr INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line INTEGER(HID_T) :: lapl_id_default diff --git a/fortran/src/H5Lff.F90 b/fortran/src/H5Lff.F90 index 9111144657b..790a65d7432 100644 --- a/fortran/src/H5Lff.F90 +++ b/fortran/src/H5Lff.F90 @@ -196,8 +196,8 @@ SUBROUTINE h5ldelete_async_f(loc_id, name, es_id, hdferr, lapl_id, file, func, l INTEGER(HID_T), INTENT(IN) :: es_id INTEGER, INTENT(OUT) :: hdferr INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line INTEGER(HID_T) :: lapl_id_default @@ -318,8 +318,8 @@ SUBROUTINE h5lcreate_soft_async_f(target_path, link_loc_id, link_name, es_id, hd INTEGER, INTENT(OUT) :: hdferr INTEGER(HID_T), INTENT(IN), OPTIONAL :: lcpl_id INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line INTEGER(HID_T) :: lcpl_id_default @@ -451,8 +451,8 @@ SUBROUTINE h5lcreate_hard_async_f(obj_loc_id, obj_name, link_loc_id, link_name, INTEGER, INTENT(OUT) :: hdferr INTEGER(HID_T), INTENT(IN), OPTIONAL :: lcpl_id INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line INTEGER(HID_T) :: lcpl_id_default @@ -664,8 +664,8 @@ SUBROUTINE h5ldelete_by_idx_async_f(loc_id, group_name, index_field, order, n, e INTEGER(HID_T), INTENT(IN) :: es_id INTEGER, INTENT(OUT) :: hdferr INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line INTEGER(HID_T) :: lapl_id_default @@ -779,12 +779,12 @@ SUBROUTINE h5lexists_async_f(loc_id, name, link_exists, es_id, hdferr, lapl_id, IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: loc_id CHARACTER(LEN=*), INTENT(IN) :: name - TYPE(C_PTR) , INTENT(INOUT) :: link_exists + TYPE(C_PTR) , INTENT(IN) :: link_exists INTEGER(HID_T), INTENT(IN) :: es_id INTEGER, INTENT(OUT) :: hdferr INTEGER(HID_T), INTENT(IN), OPTIONAL :: lapl_id - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line INTEGER(HID_T) :: lapl_id_default @@ -1388,8 +1388,8 @@ SUBROUTINE h5literate_async_f(group_id, idx_type, order, idx, op, op_data, retur INTEGER , INTENT(OUT) :: return_value INTEGER(HID_T) , INTENT(IN) :: es_id INTEGER , INTENT(OUT) :: hdferr - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line TYPE(C_PTR) :: file_default = C_NULL_PTR diff --git a/fortran/src/H5Off.F90 b/fortran/src/H5Off.F90 index 215f6e86cfa..7810b6e45fe 100644 --- a/fortran/src/H5Off.F90 +++ b/fortran/src/H5Off.F90 @@ -283,8 +283,8 @@ SUBROUTINE h5oopen_async_f(loc_id, name, obj_id, es_id, hdferr, lapl_id, file, f INTEGER(HID_T) , INTENT(IN) :: es_id INTEGER , INTENT(OUT) :: hdferr INTEGER(HID_T) , INTENT(IN), OPTIONAL :: lapl_id - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line INTEGER(HID_T) :: lapl_id_default @@ -368,8 +368,8 @@ SUBROUTINE h5oclose_async_f(object_id, es_id, hdferr, file, func, line) INTEGER(HID_T), INTENT(IN) :: object_id INTEGER(HID_T), INTENT(IN) :: es_id INTEGER , INTENT(OUT) :: hdferr - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line TYPE(C_PTR) :: file_default = C_NULL_PTR @@ -516,8 +516,8 @@ SUBROUTINE h5ocopy_async_f(src_loc_id, src_name, dst_loc_id, dst_name, es_id, hd INTEGER , INTENT(OUT) :: hdferr INTEGER(HID_T) , INTENT(IN), OPTIONAL :: ocpypl_id INTEGER(HID_T) , INTENT(IN), OPTIONAL :: lcpl_id - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line INTEGER(HID_T) :: ocpypl_id_default, lcpl_id_default @@ -867,8 +867,8 @@ SUBROUTINE h5oopen_by_idx_async_f(loc_id, group_name, index_type, order, n, obj_ INTEGER(HID_T) , INTENT(IN) :: es_id INTEGER , INTENT(OUT) :: hdferr INTEGER(HID_T) , INTENT(IN), OPTIONAL :: lapl_id - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN), OPTIONAL :: line INTEGER(HID_T) :: lapl_id_default @@ -1026,8 +1026,8 @@ SUBROUTINE h5ovisit_f(object_id, index_type, order, op, op_data, return_value, h INTEGER, INTENT(IN) :: index_type INTEGER, INTENT(IN) :: order - TYPE(C_FUNPTR):: op - TYPE(C_PTR) :: op_data + TYPE(C_FUNPTR), INTENT(IN) :: op + TYPE(C_PTR), INTENT(IN) :: op_data INTEGER, INTENT(OUT) :: return_value INTEGER, INTENT(OUT) :: hdferr INTEGER, INTENT(IN), OPTIONAL :: fields @@ -1133,13 +1133,13 @@ SUBROUTINE h5oget_info_by_name_async_f(loc_id, name, object_info, es_id, hdferr, IMPLICIT NONE INTEGER(HID_T) , INTENT(IN) :: loc_id CHARACTER(LEN=*), INTENT(IN) :: name - TYPE(C_PTR) , INTENT(INOUT) :: object_info + TYPE(C_PTR) , INTENT(IN) :: object_info INTEGER(HID_T) , INTENT(IN) :: es_id INTEGER , INTENT(OUT) :: hdferr INTEGER(HID_T) , INTENT(IN) , OPTIONAL :: lapl_id INTEGER , INTENT(IN) , OPTIONAL :: fields - TYPE(C_PTR), OPTIONAL :: file - TYPE(C_PTR), OPTIONAL :: func + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: file + TYPE(C_PTR), OPTIONAL, INTENT(IN) :: func INTEGER , INTENT(IN) , OPTIONAL :: line CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name @@ -1326,8 +1326,8 @@ SUBROUTINE h5ovisit_by_name_f(loc_id, object_name, index_type, order, op, op_dat INTEGER , INTENT(IN) :: index_type INTEGER , INTENT(IN) :: order - TYPE(C_FUNPTR) :: op - TYPE(C_PTR) :: op_data + TYPE(C_FUNPTR) , INTENT(IN) :: op + TYPE(C_PTR) , INTENT(IN) :: op_data INTEGER , INTENT(OUT) :: return_value INTEGER , INTENT(OUT) :: hdferr INTEGER(HID_T) , INTENT(IN) , OPTIONAL :: lapl_id diff --git a/fortran/src/H5Pff.F90 b/fortran/src/H5Pff.F90 index 098a6c31717..5ccc3677d00 100644 --- a/fortran/src/H5Pff.F90 +++ b/fortran/src/H5Pff.F90 @@ -4457,7 +4457,7 @@ END SUBROUTINE h5pset_f SUBROUTINE h5pget_f(prp_id, name, value, hdferr) INTEGER(HID_T) , INTENT(IN) :: prp_id CHARACTER(LEN=*), INTENT(IN) :: name - TYPE(C_PTR) , INTENT(OUT) :: value + TYPE(C_PTR) , INTENT(IN) :: value INTEGER , INTENT(OUT) :: hdferr END SUBROUTINE h5pget_f @@ -4675,7 +4675,7 @@ SUBROUTINE h5pget_fill_value_ptr(prp_id, type_id, fillvalue, hdferr) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: prp_id INTEGER(HID_T), INTENT(IN) :: type_id - TYPE(C_PTR) :: fillvalue + TYPE(C_PTR) , INTENT(IN) :: fillvalue INTEGER , INTENT(OUT) :: hdferr hdferr = h5pget_fill_value_c(prp_id, type_id, fillvalue) @@ -4825,7 +4825,7 @@ SUBROUTINE h5pget_ptr(prp_id, name, value, hdferr) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: prp_id CHARACTER(LEN=*), INTENT(IN) :: name - TYPE(C_PTR), INTENT(INOUT) :: value + TYPE(C_PTR), INTENT(IN) :: value INTEGER, INTENT(OUT) :: hdferr INTEGER :: name_len @@ -4998,8 +4998,8 @@ SUBROUTINE h5pcreate_class_f(parent, name, class, hdferr, create, create_data, & CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(HID_T) , INTENT(OUT) :: class INTEGER , INTENT(OUT) :: hdferr - TYPE(C_PTR) , OPTIONAL :: create_data, copy_data, close_data - TYPE(C_FUNPTR) , OPTIONAL :: create, copy, close + TYPE(C_PTR) , OPTIONAL, INTENT(IN) :: create_data, copy_data, close_data + TYPE(C_FUNPTR) , OPTIONAL, INTENT(IN) :: create, copy, close INTEGER :: name_len TYPE(C_PTR) :: create_data_default, copy_data_default, close_data_default TYPE(C_FUNPTR) :: create_default, copy_default, close_default @@ -5090,10 +5090,10 @@ END SUBROUTINE h5pset_file_image_f !! SUBROUTINE h5pget_file_image_f(fapl_id, buf_ptr, buf_len_ptr, hdferr) IMPLICIT NONE - INTEGER(HID_T) , INTENT(IN) :: fapl_id - TYPE(C_PTR) , INTENT(INOUT), DIMENSION(*) :: buf_ptr - INTEGER(SIZE_T), INTENT(OUT) :: buf_len_ptr - INTEGER , INTENT(OUT) :: hdferr + INTEGER(HID_T) , INTENT(IN) :: fapl_id + TYPE(C_PTR) , INTENT(IN), DIMENSION(*) :: buf_ptr + INTEGER(SIZE_T), INTENT(OUT) :: buf_len_ptr + INTEGER , INTENT(OUT) :: hdferr INTERFACE INTEGER FUNCTION h5pget_file_image_c(fapl_id, buf_ptr, buf_len_ptr) & @@ -6114,7 +6114,7 @@ SUBROUTINE h5pset_vol_f(plist_id, new_vol_id, hdferr, new_vol_info) INTEGER(HID_T) , INTENT(IN) :: plist_id INTEGER(HID_T) , INTENT(IN) :: new_vol_id INTEGER , INTENT(OUT) :: hdferr - TYPE(C_PTR) , OPTIONAL :: new_vol_info + TYPE(C_PTR) , INTENT(IN), OPTIONAL :: new_vol_info TYPE(C_PTR) :: new_vol_info_default diff --git a/fortran/src/H5Tff.F90 b/fortran/src/H5Tff.F90 index 84b96541947..29550b783b4 100644 --- a/fortran/src/H5Tff.F90 +++ b/fortran/src/H5Tff.F90 @@ -2040,13 +2040,13 @@ END SUBROUTINE h5tget_native_type_f !! SUBROUTINE h5tconvert_f(src_id, dst_id, nelmts, buf, hdferr, background, plist_id) IMPLICIT NONE - INTEGER(HID_T) , INTENT(IN) :: src_id - INTEGER(HID_T) , INTENT(IN) :: dst_id - INTEGER(SIZE_T), INTENT(IN) :: nelmts - TYPE(C_PTR) , INTENT(INOUT) :: buf - INTEGER , INTENT(OUT) :: hdferr - TYPE(C_PTR) , INTENT(INOUT), OPTIONAL :: background - INTEGER(HID_T) , INTENT(IN) , OPTIONAL :: plist_id + INTEGER(HID_T) , INTENT(IN) :: src_id + INTEGER(HID_T) , INTENT(IN) :: dst_id + INTEGER(SIZE_T), INTENT(IN) :: nelmts + TYPE(C_PTR) , INTENT(IN) :: buf + INTEGER , INTENT(OUT) :: hdferr + TYPE(C_PTR) , INTENT(IN), OPTIONAL :: background + INTEGER(HID_T) , INTENT(IN), OPTIONAL :: plist_id INTEGER(HID_T) :: plist_id_default TYPE(C_PTR) :: background_default From dcd1b42796bab8424cbc4bde21817e8edff98b21 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Wed, 17 May 2023 10:41:01 -0500 Subject: [PATCH 227/231] Add h5copy help test and verify consistency (#2902) * Add autotools test for help --- tools/src/h5import/h5import.c | 2 +- tools/src/h5stat/h5stat.c | 2 +- tools/src/misc/h5delete.c | 2 +- tools/test/h5copy/CMakeTests.cmake | 32 ++++++++++++ tools/test/h5copy/testfiles/h5copy_help1.ddl | 49 +++++++++++++++++++ tools/test/h5copy/testfiles/h5copy_help2.ddl | 49 +++++++++++++++++++ tools/test/h5copy/testh5copy.sh.in | 42 ++++++++++++++++ tools/test/h5stat/testfiles/h5stat_help1.ddl | 2 +- tools/test/h5stat/testfiles/h5stat_help2.ddl | 2 +- tools/test/h5stat/testfiles/h5stat_nofile.ddl | 2 +- 10 files changed, 178 insertions(+), 6 deletions(-) create mode 100644 tools/test/h5copy/testfiles/h5copy_help1.ddl create mode 100644 tools/test/h5copy/testfiles/h5copy_help2.ddl diff --git a/tools/src/h5import/h5import.c b/tools/src/h5import/h5import.c index e63b6a8c186..5a2fae3765c 100644 --- a/tools/src/h5import/h5import.c +++ b/tools/src/h5import/h5import.c @@ -5097,7 +5097,7 @@ help(char *name) void usage(char *name) { - (void)HDfprintf(stdout, "\nUsage:\t%s -h[elp], OR\n", name); + (void)HDfprintf(stdout, "\nusage:\t%s -h[elp], OR\n", name); (void)HDfprintf(stdout, "\t%s -c[onfig] \ [ -c[config] ...] -o[utfile] \n\n", name); diff --git a/tools/src/h5stat/h5stat.c b/tools/src/h5stat/h5stat.c index 04d17236629..05e0be82a4b 100644 --- a/tools/src/h5stat/h5stat.c +++ b/tools/src/h5stat/h5stat.c @@ -208,7 +208,7 @@ static void usage(const char *prog) { HDfflush(stdout); - HDfprintf(stdout, "Usage: %s [OPTIONS] file\n", prog); + HDfprintf(stdout, "usage: %s [OPTIONS] file\n", prog); HDfprintf(stdout, "\n"); HDfprintf(stdout, " ERROR\n"); HDfprintf(stdout, " --enable-error-stack Prints messages from the HDF5 error stack as they occur\n"); diff --git a/tools/src/misc/h5delete.c b/tools/src/misc/h5delete.c index 20e81a18bd2..ef5b25fc558 100644 --- a/tools/src/misc/h5delete.c +++ b/tools/src/misc/h5delete.c @@ -25,7 +25,7 @@ static void usage(void); static void usage(void) { - HDfprintf(stderr, "Usage: h5delete [-f] \n"); + HDfprintf(stderr, "usage: h5delete [-f] \n"); } int diff --git a/tools/test/h5copy/CMakeTests.cmake b/tools/test/h5copy/CMakeTests.cmake index 05288b44b66..b47000a3193 100644 --- a/tools/test/h5copy/CMakeTests.cmake +++ b/tools/test/h5copy/CMakeTests.cmake @@ -35,6 +35,8 @@ ${HDF5_TOOLS_TEST_H5COPY_SOURCE_DIR}/testfiles/tudfilter.h5_ERR.txt ${HDF5_TOOLS_TEST_H5COPY_SOURCE_DIR}/testfiles/h5copy_plugin_fail_ERR.out.h5.txt ${HDF5_TOOLS_TEST_H5COPY_SOURCE_DIR}/testfiles/h5copy_plugin_test.out.h5.txt + ${HDF5_TOOLS_TEST_H5COPY_SOURCE_DIR}/testfiles/h5copy_help1.ddl + ${HDF5_TOOLS_TEST_H5COPY_SOURCE_DIR}/testfiles/h5copy_help2.ddl ) file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles") @@ -393,6 +395,32 @@ endif () endmacro () + macro (ADD_SIMPLE_TEST resultfile resultcode) + # If using memchecker add tests without using scripts + if (HDF5_ENABLE_USING_MEMCHECKER) + add_test (NAME H5COPY-${resultfile} COMMAND ${CMAKE_CROSSCOMPILING_EMULATOR} $ ${ARGN}) + if (${resultcode}) + set_tests_properties (H5COPY-${resultfile} PROPERTIES WILL_FAIL "true") + endif () + else (HDF5_ENABLE_USING_MEMCHECKER) + add_test ( + NAME H5COPY-${resultfile} + COMMAND "${CMAKE_COMMAND}" + -D "TEST_EMULATOR=${CMAKE_CROSSCOMPILING_EMULATOR}" + -D "TEST_PROGRAM=$" + -D "TEST_ARGS=${ARGN}" + -D "TEST_FOLDER=${PROJECT_BINARY_DIR}" + -D "TEST_OUTPUT=./testfiles/${resultfile}.out" + -D "TEST_EXPECT=${resultcode}" + -D "TEST_REFERENCE=./testfiles/${resultfile}.ddl" + -P "${HDF_RESOURCES_DIR}/runTest.cmake" + ) + endif () + set_tests_properties (H5COPY-${resultfile} PROPERTIES + WORKING_DIRECTORY "${PROJECT_BINARY_DIR}" + ) + endmacro () + ############################################################################## ############################################################################## ### T H E T E S T S ### @@ -420,6 +448,10 @@ set (USE_FILTER_SZIP "true") endif () +# Test for help flag + ADD_SIMPLE_TEST (h5copy_help1 0 -h) + ADD_SIMPLE_TEST (h5copy_help2 0 --help) + # "Test copying various forms of datasets" ADD_H5_TEST (simple 0 ${HDF_FILE1}.h5 -v -s simple -d simple) ADD_H5_TEST (chunk 0 ${HDF_FILE1}.h5 -v -s chunk -d chunk) diff --git a/tools/test/h5copy/testfiles/h5copy_help1.ddl b/tools/test/h5copy/testfiles/h5copy_help1.ddl new file mode 100644 index 00000000000..418faea77d6 --- /dev/null +++ b/tools/test/h5copy/testfiles/h5copy_help1.ddl @@ -0,0 +1,49 @@ + +usage: h5copy [OPTIONS] [OBJECTS...] + OBJECTS + -i, --input input file name + -o, --output output file name + -s, --source source object name + -d, --destination destination object name + ERROR + --enable-error-stack Prints messages from the HDF5 error stack as they occur. + Optional value 2 also prints file open errors. + OPTIONS + -h, --help Print a usage message and exit + -p, --parents No error if existing, make parent groups as needed + -v, --verbose Print information about OBJECTS and OPTIONS + -V, --version Print version number and exit + -f, --flag Flag type + + Flag type is one of the following strings: + + shallow Copy only immediate members for groups + + soft Expand soft links into new objects + + ext Expand external links into new objects + + ref Copy references and any referenced objects, i.e., objects + that the references point to. + Referenced objects are copied in addition to the objects + specified on the command line and reference datasets are + populated with correct reference values. Copies of referenced + datasets outside the copy range specified on the command line + will normally have a different name from the original. + (Default:Without this option, reference value(s) in any + reference datasets are set to NULL and referenced objects are + not copied unless they are otherwise within the copy range + specified on the command line.) + + noattr Copy object without copying attributes + + allflags Switches all flags from the default to the non-default setting + + These flag types correspond to the following API symbols + + H5O_COPY_SHALLOW_HIERARCHY_FLAG + H5O_COPY_EXPAND_SOFT_LINK_FLAG + H5O_COPY_EXPAND_EXT_LINK_FLAG + H5O_COPY_EXPAND_REFERENCE_FLAG + H5O_COPY_WITHOUT_ATTR_FLAG + H5O_COPY_ALL diff --git a/tools/test/h5copy/testfiles/h5copy_help2.ddl b/tools/test/h5copy/testfiles/h5copy_help2.ddl new file mode 100644 index 00000000000..418faea77d6 --- /dev/null +++ b/tools/test/h5copy/testfiles/h5copy_help2.ddl @@ -0,0 +1,49 @@ + +usage: h5copy [OPTIONS] [OBJECTS...] + OBJECTS + -i, --input input file name + -o, --output output file name + -s, --source source object name + -d, --destination destination object name + ERROR + --enable-error-stack Prints messages from the HDF5 error stack as they occur. + Optional value 2 also prints file open errors. + OPTIONS + -h, --help Print a usage message and exit + -p, --parents No error if existing, make parent groups as needed + -v, --verbose Print information about OBJECTS and OPTIONS + -V, --version Print version number and exit + -f, --flag Flag type + + Flag type is one of the following strings: + + shallow Copy only immediate members for groups + + soft Expand soft links into new objects + + ext Expand external links into new objects + + ref Copy references and any referenced objects, i.e., objects + that the references point to. + Referenced objects are copied in addition to the objects + specified on the command line and reference datasets are + populated with correct reference values. Copies of referenced + datasets outside the copy range specified on the command line + will normally have a different name from the original. + (Default:Without this option, reference value(s) in any + reference datasets are set to NULL and referenced objects are + not copied unless they are otherwise within the copy range + specified on the command line.) + + noattr Copy object without copying attributes + + allflags Switches all flags from the default to the non-default setting + + These flag types correspond to the following API symbols + + H5O_COPY_SHALLOW_HIERARCHY_FLAG + H5O_COPY_EXPAND_SOFT_LINK_FLAG + H5O_COPY_EXPAND_EXT_LINK_FLAG + H5O_COPY_EXPAND_REFERENCE_FLAG + H5O_COPY_WITHOUT_ATTR_FLAG + H5O_COPY_ALL diff --git a/tools/test/h5copy/testh5copy.sh.in b/tools/test/h5copy/testh5copy.sh.in index 7587c0e684f..1f64d15f037 100644 --- a/tools/test/h5copy/testh5copy.sh.in +++ b/tools/test/h5copy/testh5copy.sh.in @@ -60,6 +60,8 @@ $SRC_H5COPY_TESTFILES/h5copy_extlinks_trg.h5 LIST_OTHER_TEST_FILES=" $SRC_H5COPY_TESTFILES/h5copy_misc1.out $SRC_H5COPY_TESTFILES/h5copy_misc1.err +$SRC_H5COPY_TESTFILES/h5copy_help1.ddl +$SRC_H5COPY_TESTFILES/h5copy_help2.ddl " H5COPY=../../src/h5copy/h5copy # The tool name @@ -483,6 +485,42 @@ H5DIFFTEST_FAIL() fi } +# ADD_HELP_TEST +TOOLTEST_HELP() { + + expect="$TESTDIR/$1" + actual="$TESTDIR/`basename $1 .ddl`.out" + actual_err="$TESTDIR/`basename $1 .ddl`.err" + shift + + # Run test. + TESTING $H5COPY $@ + ( + cd $TESTDIR + $RUNSERIAL $H5COPY_BIN "$@" + ) >$actual 2>$actual_err + + if [ ! -f $expectdata ]; then + # Create the expect data file if it doesn't yet exist. + echo " CREATED" + cp $actual $expect-CREATED + echo " Expected output (*.ddl) missing" + nerrors="`expr $nerrors + 1`" + elif $CMP $expect $actual; then + echo " PASSED" + else + echo "*FAILED*" + echo " Expected output (*.ddl) differs from actual output (*.out)" + nerrors="`expr $nerrors + 1`" + fi + + # Clean up output file + if test -z "$HDF5_NOCLEANUP"; then + rm -f $actual $actual_err + fi + +} + # Copy single datasets of various forms from one group to another, # adding object copied to the destination file each time # @@ -607,6 +645,10 @@ fi # prepare for test COPY_TESTFILES_TO_TESTDIR +# check help text +TOOLTEST_HELP h5copy_help1.ddl -h +TOOLTEST_HELP h5copy_help2.ddl --help + # Start tests COPY_OBJECTS COPY_REFERENCES diff --git a/tools/test/h5stat/testfiles/h5stat_help1.ddl b/tools/test/h5stat/testfiles/h5stat_help1.ddl index 1f65f0d1914..54d6a318eda 100644 --- a/tools/test/h5stat/testfiles/h5stat_help1.ddl +++ b/tools/test/h5stat/testfiles/h5stat_help1.ddl @@ -1,4 +1,4 @@ -Usage: h5stat [OPTIONS] file +usage: h5stat [OPTIONS] file ERROR --enable-error-stack Prints messages from the HDF5 error stack as they occur diff --git a/tools/test/h5stat/testfiles/h5stat_help2.ddl b/tools/test/h5stat/testfiles/h5stat_help2.ddl index 1f65f0d1914..54d6a318eda 100644 --- a/tools/test/h5stat/testfiles/h5stat_help2.ddl +++ b/tools/test/h5stat/testfiles/h5stat_help2.ddl @@ -1,4 +1,4 @@ -Usage: h5stat [OPTIONS] file +usage: h5stat [OPTIONS] file ERROR --enable-error-stack Prints messages from the HDF5 error stack as they occur diff --git a/tools/test/h5stat/testfiles/h5stat_nofile.ddl b/tools/test/h5stat/testfiles/h5stat_nofile.ddl index 1f65f0d1914..54d6a318eda 100644 --- a/tools/test/h5stat/testfiles/h5stat_nofile.ddl +++ b/tools/test/h5stat/testfiles/h5stat_nofile.ddl @@ -1,4 +1,4 @@ -Usage: h5stat [OPTIONS] file +usage: h5stat [OPTIONS] file ERROR --enable-error-stack Prints messages from the HDF5 error stack as they occur From 369b87cd9ade443f79c35857c73bc8add45d3907 Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Wed, 17 May 2023 14:11:36 -0500 Subject: [PATCH 228/231] fixed spacing --- fortran/test/tf.F90 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fortran/test/tf.F90 b/fortran/test/tf.F90 index 73f43bc3016..61451af1f0e 100644 --- a/fortran/test/tf.F90 +++ b/fortran/test/tf.F90 @@ -69,9 +69,11 @@ SUBROUTINE write_test_header(title_header) CHARACTER(LEN=*), INTENT(IN) :: title_header ! test name INTEGER, PARAMETER :: width = TAB_SPACE+10 - CHARACTER(LEN=2*width) ::title_centered =" " + CHARACTER(LEN=2*width) ::title_centered INTEGER :: len, i + title_centered(:) = " " + len=LEN_TRIM(title_header) title_centered(1:3) ="| |" title_centered((width-len)/2:(width-len)/2+len) = TRIM(title_header) From b4f7cc3d556d15eee419b16fa2fbb9cc411f7641 Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Thu, 18 May 2023 12:30:47 -0500 Subject: [PATCH 229/231] Fixed value checking error with gfortran 4.8 --- fortran/test/tH5P_F03.F90 | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/fortran/test/tH5P_F03.F90 b/fortran/test/tH5P_F03.F90 index 0875b81af13..f15a471e7ce 100644 --- a/fortran/test/tH5P_F03.F90 +++ b/fortran/test/tH5P_F03.F90 @@ -532,6 +532,7 @@ SUBROUTINE external_test_offset(cleanup,total_error) INTEGER(hid_t) :: dset=-1 ! dataset INTEGER(hid_t) :: grp=-1 ! group to emit diagnostics INTEGER(size_t) :: i, j ! miscellaneous counters + INTEGER :: k CHARACTER(LEN=180) :: filename ! file names INTEGER, DIMENSION(1:25) :: part INTEGER, DIMENSION(1:100), TARGET :: whole ! raw data buffers @@ -598,8 +599,9 @@ SUBROUTINE external_test_offset(cleanup,total_error) CALL h5dread_f(dset, H5T_NATIVE_INTEGER, f_ptr, error, mem_space_id=space, file_space_id=space) CALL check("h5dread_f", error, total_error) - DO i = 1, 100 - IF(whole(i) .NE. i-1)THEN + DO k = 1, 100 + CALL verify("h5dread_f", whole(k), k-1, error) + IF(error .NE. 0)THEN WRITE(*,*) "Incorrect value(s) read." total_error = total_error + 1 EXIT @@ -619,8 +621,10 @@ SUBROUTINE external_test_offset(cleanup,total_error) CALL h5sclose_f(hs_space, error) CALL check("h5sclose_f", error, total_error) - DO i = INT(hs_start(1))+1, INT(hs_start(1)+hs_count(1)) - IF(whole(i) .NE. i-1)THEN + + DO k = INT(hs_start(1))+1, INT(hs_start(1)+hs_count(1)) + CALL verify("h5dread_f", whole(k), k-1, error) + IF(error .NE. 0)THEN WRITE(*,*) "Incorrect value(s) read." total_error = total_error + 1 EXIT From ae3751e7cf7215e4b6417fbcbafd8440e98288f2 Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Thu, 18 May 2023 13:59:06 -0500 Subject: [PATCH 230/231] resolved issue with gfortran 4.8 --- fortran/src/H5Off.F90 | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/fortran/src/H5Off.F90 b/fortran/src/H5Off.F90 index 7810b6e45fe..0b512818620 100644 --- a/fortran/src/H5Off.F90 +++ b/fortran/src/H5Off.F90 @@ -1027,7 +1027,7 @@ SUBROUTINE h5ovisit_f(object_id, index_type, order, op, op_data, return_value, h INTEGER, INTENT(IN) :: order TYPE(C_FUNPTR), INTENT(IN) :: op - TYPE(C_PTR), INTENT(IN) :: op_data + TYPE(C_PTR), INTENT(IN) :: op_data INTEGER, INTENT(OUT) :: return_value INTEGER, INTENT(OUT) :: hdferr INTEGER, INTENT(IN), OPTIONAL :: fields @@ -1039,12 +1039,12 @@ INTEGER FUNCTION h5ovisit_c(object_id, index_type, order, op, op_data, fields) & IMPORT :: C_FUNPTR, C_PTR IMPORT :: HID_T IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: object_id - INTEGER, INTENT(IN) :: index_type - INTEGER, INTENT(IN) :: order + INTEGER(HID_T):: object_id + INTEGER :: index_type + INTEGER, :: order TYPE(C_FUNPTR), VALUE :: op TYPE(C_PTR), VALUE :: op_data - INTEGER, INTENT(IN) :: fields + INTEGER :: fields END FUNCTION h5ovisit_c END INTERFACE @@ -1343,15 +1343,15 @@ INTEGER FUNCTION h5ovisit_by_name_c(loc_id, object_name, namelen, index_type, or IMPORT :: C_CHAR, C_PTR, C_FUNPTR IMPORT :: HID_T, SIZE_T IMPLICIT NONE - INTEGER(HID_T) , INTENT(IN) :: loc_id - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: object_name - INTEGER(SIZE_T) :: namelen - INTEGER , INTENT(IN) :: index_type - INTEGER , INTENT(IN) :: order - TYPE(C_FUNPTR) , VALUE :: op - TYPE(C_PTR) , VALUE :: op_data - INTEGER(HID_T) , INTENT(IN) :: lapl_id - INTEGER , INTENT(IN) :: fields + INTEGER(HID_T) :: loc_id + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: object_name + INTEGER(SIZE_T) :: namelen + INTEGER :: index_type + INTEGER :: order + TYPE(C_FUNPTR), VALUE :: op + TYPE(C_PTR) , VALUE :: op_data + INTEGER(HID_T) :: lapl_id + INTEGER :: fields END FUNCTION h5ovisit_by_name_c END INTERFACE From cfbda63be368d5844b29b5d18655d84976b1b0bf Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Thu, 18 May 2023 14:08:16 -0500 Subject: [PATCH 231/231] fixed typo --- fortran/src/H5Off.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fortran/src/H5Off.F90 b/fortran/src/H5Off.F90 index 0b512818620..84a15902c19 100644 --- a/fortran/src/H5Off.F90 +++ b/fortran/src/H5Off.F90 @@ -1041,7 +1041,7 @@ INTEGER FUNCTION h5ovisit_c(object_id, index_type, order, op, op_data, fields) & IMPLICIT NONE INTEGER(HID_T):: object_id INTEGER :: index_type - INTEGER, :: order + INTEGER :: order TYPE(C_FUNPTR), VALUE :: op TYPE(C_PTR), VALUE :: op_data INTEGER :: fields